Merge tag 'md/4.3' of git://neil.brown.name/md

Pull md updates from Neil Brown:

 - an assortment of little fixes, several for minor races only likely to
   be hit during testing

 - further cluster-md-raid1 development, not ready for real use yet.

 - new RAID6 syndrome code for ARM NEON

 - fix a race where a write can return before failure of one device is
   properly recorded in metadata, so an immediate crash might result in
   that write being lost.

* tag 'md/4.3' of git://neil.brown.name/md: (33 commits)
  md/raid5: ensure device failure recorded before write request returns.
  md/raid5: use bio_list for the list of bios to return.
  md/raid10: ensure device failure recorded before write request returns.
  md/raid1: ensure device failure recorded before write request returns.
  md-cluster: remove inappropriate try_module_get from join()
  md: extend spinlock protection in register_md_cluster_operations
  md-cluster: Read the disk bitmap sb and check if it needs recovery
  md-cluster: only call complete(&cinfo->completion) when node join cluster
  md-cluster: add missed lockres_free
  md-cluster: remove the unused sb_lock
  md-cluster: init suspend_list and suspend_lock early in join
  md-cluster: add the error check if failed to get dlm lock
  md-cluster: init completion within lockres_init
  md-cluster: fix deadlock issue on message lock
  md-cluster: transfer the resync ownership to another node
  md-cluster: split recover_slot for future code reuse
  md-cluster: use %pU to print UUIDs
  md: setup safemode_timer before it's being used
  md/raid5: handle possible race as reshape completes.
  md: sync sync_completed has correct value as recovery finishes.
  ...
diff --git a/CREDITS b/CREDITS
index 4fcf9cd..bcb8efa 100644
--- a/CREDITS
+++ b/CREDITS
@@ -20,6 +20,10 @@
 S: (ask for current address)
 S: Finland
 
+N: Thomas Abraham
+E: thomas.ab@samsung.com
+D: Samsung pin controller driver
+
 N: Dragos Acostachioaie
 E: dragos@iname.com
 W: http://www.arbornet.org/~dragos
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index acfe9df..b07e86d 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -223,3 +223,13 @@
                 Writing 1 will issue a PERST to card which may cause the card
                 to reload the FPGA depending on load_image_on_perst.
 Users:		https://github.com/ibm-capi/libcxl
+
+What:		/sys/class/cxl/<card>/perst_reloads_same_image
+Date:		July 2015
+Contact:	linuxppc-dev@lists.ozlabs.org
+Description:	read/write
+		Trust that when an image is reloaded via PERST, it will not
+		have changed.
+		0 = don't trust, the image may be different (default)
+		1 = trust that the image will not change.
+Users:		https://github.com/ibm-capi/libcxl
diff --git a/Documentation/ABI/testing/sysfs-gpio b/Documentation/ABI/testing/sysfs-gpio
index 80f4c94..55ffa2d 100644
--- a/Documentation/ABI/testing/sysfs-gpio
+++ b/Documentation/ABI/testing/sysfs-gpio
@@ -16,7 +16,8 @@
     /sys/class/gpio
 	/export ... asks the kernel to export a GPIO to userspace
 	/unexport ... to return a GPIO to the kernel
-	/gpioN ... for each exported GPIO #N
+	/gpioN ... for each exported GPIO #N OR
+	/<LINE-NAME> ... for a properly named GPIO line
 	    /value ... always readable, writes fail for input GPIOs
 	    /direction ... r/w as: in, out (default low); write: high, low
 	    /edge ... r/w as: none, falling, rising, both
diff --git a/Documentation/DocBook/alsa-driver-api.tmpl b/Documentation/DocBook/alsa-driver-api.tmpl
index 71f9246..e94a10b 100644
--- a/Documentation/DocBook/alsa-driver-api.tmpl
+++ b/Documentation/DocBook/alsa-driver-api.tmpl
@@ -108,7 +108,7 @@
      <sect1><title>ASoC Core API</title>
 !Iinclude/sound/soc.h
 !Esound/soc/soc-core.c
-!Esound/soc/soc-cache.c
+<!-- !Esound/soc/soc-cache.c no docbook comments here -->
 !Esound/soc/soc-devres.c
 !Esound/soc/soc-io.c
 !Esound/soc/soc-pcm.c
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2fb9a54..9ddf8c6 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3982,7 +3982,6 @@
         <title>Interrupt Handling</title>
 !Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
 !Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
       </sect2>
@@ -4012,7 +4011,6 @@
         <title>Frontbuffer Tracking</title>
 !Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
 !Idrivers/gpu/drm/i915/intel_frontbuffer.c
-!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
 !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
       </sect2>
       <sect2>
@@ -4045,6 +4043,11 @@
         </para>
       </sect2>
       <sect2>
+        <title>Hotplug</title>
+!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
+!Idrivers/gpu/drm/i915/intel_hotplug.c
+      </sect2>
+      <sect2>
 	<title>High Definition Audio</title>
 !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
 !Idrivers/gpu/drm/i915/intel_audio.c
@@ -4195,6 +4198,23 @@
 !Idrivers/gpu/drm/i915/i915_gem_gtt.c
       </sect2>
       <sect2>
+        <title>GTT Fences and Swizzling</title>
+!Idrivers/gpu/drm/i915/i915_gem_fence.c
+        <sect3>
+          <title>Global GTT Fence Handling</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
+        </sect3>
+        <sect3>
+          <title>Hardware Tiling and Swizzling Details</title>
+!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
+        </sect3>
+      </sect2>
+      <sect2>
+        <title>Object Tiling IOCTLs</title>
+!Idrivers/gpu/drm/i915/i915_gem_tiling.c
+!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
+      </sect2>
+      <sect2>
         <title>Buffer Object Eviction</title>
 	<para>
 	  This section documents the interface functions for evicting buffer
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index bcdfdb9..6006b63 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -146,36 +146,30 @@
 The journalling layer is  easy to use. You need to
 first of all create a journal_t data structure. There are
 two calls to do this dependent on how you decide to allocate the physical
-media on which the journal resides. The journal_init_inode() call
-is for journals stored in filesystem inodes, or the journal_init_dev()
-call can be use for journal stored on a raw device (in a continuous range
+media on which the journal resides. The jbd2_journal_init_inode() call
+is for journals stored in filesystem inodes, or the jbd2_journal_init_dev()
+call can be used for journal stored on a raw device (in a continuous range
 of blocks). A journal_t is a typedef for a struct pointer, so when
-you are finally finished make sure you call journal_destroy() on it
+you are finally finished make sure you call jbd2_journal_destroy() on it
 to free up any used kernel memory.
 </para>
 
 <para>
 Once you have got your journal_t object you need to 'mount' or load the journal
-file, unless of course you haven't initialised it yet - in which case you
-need to call journal_create().
+file. The journalling layer expects the space for the journal was already
+allocated and initialized properly by the userspace tools.  When loading the
+journal you must call jbd2_journal_load() to process journal contents.  If the
+client file system detects the journal contents does not need to be processed
+(or even need not have valid contents), it may call jbd2_journal_wipe() to
+clear the journal contents before calling jbd2_journal_load().
 </para>
 
 <para>
-Most of the time however your journal file will already have been created, but
-before you load it you must call journal_wipe() to empty the journal file.
-Hang on, you say , what if the filesystem wasn't cleanly umount()'d . Well, it is the
-job of the client file system to detect this and skip the call to journal_wipe().
-</para>
-
-<para>
-In either case the next call should be to journal_load() which prepares the
-journal file for use. Note that journal_wipe(..,0) calls journal_skip_recovery()
-for you if it detects any outstanding transactions in the journal and similarly
-journal_load() will call journal_recover() if necessary.
-I would advise reading fs/ext3/super.c for examples on this stage.
-[RGG: Why is the journal_wipe() call necessary - doesn't this needlessly
-complicate the API. Or isn't a good idea for the journal layer to hide
-dirty mounts from the client fs]
+Note that jbd2_journal_wipe(..,0) calls jbd2_journal_skip_recovery() for you if
+it detects any outstanding transactions in the journal and similarly
+jbd2_journal_load() will call jbd2_journal_recover() if necessary.  I would
+advise reading ext4_load_journal() in fs/ext4/super.c for examples on this
+stage.
 </para>
 
 <para>
@@ -189,41 +183,41 @@
 is done by wrapping them into transactions. Additionally you
 also need to wrap the modification of each of the buffers
 with calls to the journal layer, so it knows what the modifications
-you are actually making are. To do this use  journal_start() which
+you are actually making are. To do this use jbd2_journal_start() which
 returns a transaction handle.
 </para>
 
 <para>
-journal_start()
-and its counterpart journal_stop(), which indicates the end of a transaction
-are nestable calls, so you can reenter a transaction if necessary,
-but remember you must call journal_stop() the same number of times as
-journal_start() before the transaction is completed (or more accurately
-leaves the update phase). Ext3/VFS makes use of this feature to simplify
-quota support.
+jbd2_journal_start()
+and its counterpart jbd2_journal_stop(), which indicates the end of a
+transaction are nestable calls, so you can reenter a transaction if necessary,
+but remember you must call jbd2_journal_stop() the same number of times as
+jbd2_journal_start() before the transaction is completed (or more accurately
+leaves the update phase). Ext4/VFS makes use of this feature to simplify
+handling of inode dirtying, quota support, etc.
 </para>
 
 <para>
 Inside each transaction you need to wrap the modifications to the
 individual buffers (blocks). Before you start to modify a buffer you
-need to call journal_get_{create,write,undo}_access() as appropriate,
+need to call jbd2_journal_get_{create,write,undo}_access() as appropriate,
 this allows the journalling layer to copy the unmodified data if it
 needs to. After all the buffer may be part of a previously uncommitted
 transaction.
 At this point you are at last ready to modify a buffer, and once
-you are have done so you need to call journal_dirty_{meta,}data().
+you are have done so you need to call jbd2_journal_dirty_{meta,}data().
 Or if you've asked for access to a buffer you now know is now longer
-required to be pushed back on the device you can call journal_forget()
+required to be pushed back on the device you can call jbd2_journal_forget()
 in much the same way as you might have used bforget() in the past.
 </para>
 
 <para>
-A journal_flush() may be called at any time to commit and checkpoint
+A jbd2_journal_flush() may be called at any time to commit and checkpoint
 all your transactions.
 </para>
 
 <para>
-Then at umount time , in your put_super() you can then call journal_destroy()
+Then at umount time , in your put_super() you can then call jbd2_journal_destroy()
 to clean up your in-core journal object.
 </para>
 
@@ -231,53 +225,68 @@
 Unfortunately there a couple of ways the journal layer can cause a deadlock.
 The first thing to note is that each task can only have
 a single outstanding transaction at any one time, remember nothing
-commits until the outermost journal_stop(). This means
+commits until the outermost jbd2_journal_stop(). This means
 you must complete the transaction at the end of each file/inode/address
 etc. operation you perform, so that the journalling system isn't re-entered
 on another journal. Since transactions can't be nested/batched
 across differing journals, and another filesystem other than
-yours (say ext3) may be modified in a later syscall.
+yours (say ext4) may be modified in a later syscall.
 </para>
 
 <para>
-The second case to bear in mind is that journal_start() can
+The second case to bear in mind is that jbd2_journal_start() can
 block if there isn't enough space in the journal for your transaction
 (based on the passed nblocks param) - when it blocks it merely(!) needs to
 wait for transactions to complete and be committed from other tasks,
-so essentially we are waiting for journal_stop(). So to avoid
-deadlocks you must treat journal_start/stop() as if they
+so essentially we are waiting for jbd2_journal_stop(). So to avoid
+deadlocks you must treat jbd2_journal_start/stop() as if they
 were semaphores and include them in your semaphore ordering rules to prevent
-deadlocks. Note that journal_extend() has similar blocking behaviour to
-journal_start() so you can deadlock here just as easily as on journal_start().
+deadlocks. Note that jbd2_journal_extend() has similar blocking behaviour to
+jbd2_journal_start() so you can deadlock here just as easily as on
+jbd2_journal_start().
 </para>
 
 <para>
 Try to reserve the right number of blocks the first time. ;-). This will
 be the maximum number of blocks you are going to touch in this transaction.
-I advise having a look at at least ext3_jbd.h to see the basis on which
-ext3 uses to make these decisions.
+I advise having a look at at least ext4_jbd.h to see the basis on which
+ext4 uses to make these decisions.
 </para>
 
 <para>
 Another wriggle to watch out for is your on-disk block allocation strategy.
-why? Because, if you undo a delete, you need to ensure you haven't reused any
-of the freed blocks in a later transaction. One simple way of doing this
-is make sure any blocks you allocate only have checkpointed transactions
-listed against them. Ext3 does this in ext3_test_allocatable().
+Why? Because, if you do a delete, you need to ensure you haven't reused any
+of the freed blocks until the transaction freeing these blocks commits. If you
+reused these blocks and crash happens, there is no way to restore the contents
+of the reallocated blocks at the end of the last fully committed transaction.
+
+One simple way of doing this is to mark blocks as free in internal in-memory
+block allocation structures only after the transaction freeing them commits.
+Ext4 uses journal commit callback for this purpose.
 </para>
 
 <para>
-Lock is also providing through journal_{un,}lock_updates(),
-ext3 uses this when it wants a window with a clean and stable fs for a moment.
-eg.
+With journal commit callbacks you can ask the journalling layer to call a
+callback function when the transaction is finally committed to disk, so that
+you can do some of your own management. You ask the journalling layer for
+calling the callback by simply setting journal->j_commit_callback function
+pointer and that function is called after each transaction commit. You can also
+use transaction->t_private_list for attaching entries to a transaction that
+need processing when the transaction commits.
+</para>
+
+<para>
+JBD2 also provides a way to block all transaction updates via
+jbd2_journal_{un,}lock_updates(). Ext4 uses this when it wants a window with a
+clean and stable fs for a moment.  E.g.
 </para>
 
 <programlisting>
 
-	journal_lock_updates() //stop new stuff happening..
-	journal_flush()        // checkpoint everything.
+	jbd2_journal_lock_updates() //stop new stuff happening..
+	jbd2_journal_flush()        // checkpoint everything.
 	..do stuff on stable fs
-	journal_unlock_updates() // carry on with filesystem use.
+	jbd2_journal_unlock_updates() // carry on with filesystem use.
 </programlisting>
 
 <para>
@@ -286,29 +295,6 @@
 calls.
 </para>
 
-<para>
-A new feature of jbd since 2.5.25 is commit callbacks with the new
-journal_callback_set() function you can now ask the journalling layer
-to call you back when the transaction is finally committed to disk, so that
-you can do some of your own management. The key to this is the journal_callback
-struct, this maintains the internal callback information but you can
-extend it like this:-
-</para>
-<programlisting>
-	struct  myfs_callback_s {
-		//Data structure element required by jbd..
-		struct journal_callback for_jbd;
-		// Stuff for myfs allocated together.
-		myfs_inode*    i_commited;
-
-	}
-</programlisting>
-
-<para>
-this would be useful if you needed to know when data was committed to a
-particular inode.
-</para>
-
     </sect2>
 
     <sect2 id="jbd_summary">
@@ -319,36 +305,6 @@
 to tell the journalling layer about them.
 </para>
 
-<para>
-Here is a some pseudo code to give you an idea of how it works, as
-an example.
-</para>
-
-<programlisting>
-  journal_t* my_jnrl = journal_create();
-  journal_init_{dev,inode}(jnrl,...)
-  if (clean) journal_wipe();
-  journal_load();
-
-   foreach(transaction) { /*transactions must be
-                            completed before
-                            a syscall returns to
-                            userspace*/
-
-          handle_t * xct=journal_start(my_jnrl);
-          foreach(bh) {
-                journal_get_{create,write,undo}_access(xact,bh);
-                if ( myfs_modify(bh) ) { /* returns true
-                                        if makes changes */
-                           journal_dirty_{meta,}data(xact,bh);
-                } else {
-                           journal_forget(bh);
-                }
-          }
-          journal_stop(xct);
-   }
-   journal_destroy(my_jrnl);
-</programlisting>
     </sect2>
 
     </sect1>
@@ -357,13 +313,13 @@
      <title>Data Types</title>
      <para>
 	The journalling layer uses typedefs to 'hide' the concrete definitions
-	of the structures used. As a client of the JBD layer you can
+	of the structures used. As a client of the JBD2 layer you can
 	just rely on the using the pointer as a magic cookie  of some sort.
 
 	Obviously the hiding is not enforced as this is 'C'.
      </para>
 	<sect2 id="structures"><title>Structures</title>
-!Iinclude/linux/jbd.h
+!Iinclude/linux/jbd2.h
 	</sect2>
     </sect1>
 
@@ -375,11 +331,11 @@
 	manage transactions
      </para>
 	<sect2 id="journal_level"><title>Journal Level</title>
-!Efs/jbd/journal.c
-!Ifs/jbd/recovery.c
+!Efs/jbd2/journal.c
+!Ifs/jbd2/recovery.c
 	</sect2>
 	<sect2 id="transaction_level"><title>Transasction Level</title>
-!Efs/jbd/transaction.c
+!Efs/jbd2/transaction.c
 	</sect2>
     </sect1>
     <sect1 id="see_also">
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 1690350..7d9d3c2 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -81,7 +81,7 @@
   u64 res3	= 0;		/* reserved */
   u64 res4	= 0;		/* reserved */
   u32 magic	= 0x644d5241;	/* Magic number, little endian, "ARM\x64" */
-  u32 res5;      		/* reserved (used for PE COFF offset) */
+  u32 res5;			/* reserved (used for PE COFF offset) */
 
 
 Header notes:
@@ -103,7 +103,7 @@
 
 - The flags field (introduced in v3.17) is a little-endian 64-bit field
   composed as follows:
-  Bit 0: 	Kernel endianness.  1 if BE, 0 if LE.
+  Bit 0:	Kernel endianness.  1 if BE, 0 if LE.
   Bits 1-63:	Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
@@ -115,11 +115,14 @@
 address near the start of usable system RAM and called there. Memory
 below that base address is currently unusable by Linux, and therefore it
 is strongly recommended that this location is the start of system RAM.
+The region between the 2 MB aligned base address and the start of the
+image has no special significance to the kernel, and may be used for
+other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
 
-Any memory described to the kernel (even that below the 2MB aligned base
-address) which is not marked as reserved from the kernel e.g. with a
+Any memory described to the kernel (even that below the start of the
+image) which is not marked as reserved from the kernel (e.g., with a
 memreserve region in the device tree) will be considered as available to
 the kernel.
 
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index dab6da3..b19fc34 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -266,7 +266,9 @@
 atomic_cmpxchg will only satisfy its atomicity semantics as long as all
 other accesses of *v are performed through atomic_xxx operations.
 
-atomic_cmpxchg must provide explicit memory barriers around the operation.
+atomic_cmpxchg must provide explicit memory barriers around the operation,
+although if the comparison fails then no memory ordering guarantees are
+required.
 
 The semantics for atomic_cmpxchg are the same as those defined for 'cas'
 below.
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index cb12af3..df2d636 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -209,6 +209,37 @@
 	"repair" - Initiate a repair of the array.
 	"reshape"- Currently unsupported (-EINVAL).
 
+
+Discard Support
+---------------
+The implementation of discard support among hardware vendors varies.
+When a block is discarded, some storage devices will return zeroes when
+the block is read.  These devices set the 'discard_zeroes_data'
+attribute.  Other devices will return random data.  Confusingly, some
+devices that advertise 'discard_zeroes_data' will not reliably return
+zeroes when discarded blocks are read!  Since RAID 4/5/6 uses blocks
+from a number of devices to calculate parity blocks and (for performance
+reasons) relies on 'discard_zeroes_data' being reliable, it is important
+that the devices be consistent.  Blocks may be discarded in the middle
+of a RAID 4/5/6 stripe and if subsequent read results are not
+consistent, the parity blocks may be calculated differently at any time;
+making the parity blocks useless for redundancy.  It is important to
+understand how your hardware behaves with discards if you are going to
+enable discards with RAID 4/5/6.
+
+Since the behavior of storage devices is unreliable in this respect,
+even when reporting 'discard_zeroes_data', by default RAID 4/5/6
+discard support is disabled -- this ensures data integrity at the
+expense of losing some performance.
+
+Storage devices that properly support 'discard_zeroes_data' are
+increasingly whitelisted in the kernel and can thus be trusted.
+
+For trusted devices, the following dm-raid module parameter can be set
+to safely enable discard support for RAID 4/5/6:
+    'devices_handle_discards_safely'
+
+
 Version History
 ---------------
 1.0.0	Initial version.  Support for RAID 4/5/6
diff --git a/Documentation/device-mapper/statistics.txt b/Documentation/device-mapper/statistics.txt
index 4919b2d..6f5ef94 100644
--- a/Documentation/device-mapper/statistics.txt
+++ b/Documentation/device-mapper/statistics.txt
@@ -121,6 +121,10 @@
 
 	Output format:
 	  <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+	        precise_timestamps histogram:n1,n2,n3,...
+
+	The strings "precise_timestamps" and "histogram" are printed only
+	if they were specified when creating the region.
 
     @stats_print <region_id> [<starting_line> <number_of_lines>]
 
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 2251dcc..06c88a4 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -67,6 +67,12 @@
   disable if zero.
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
   0-7, 15, 23, and 31.
+- arm,shared-override : The default behavior of the pl310 cache controller with
+  respect to the shareable attribute is to transform "normal memory
+  non-cacheable transactions" into "cacheable no allocate" (for reads) or
+  "write through no write allocate" (for writes).
+  On systems where this may cause DMA buffer corruption, this property must be
+  specified to indicate that such transforms are precluded.
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
   (forcibly enable), property absent (retain settings set by firmware)
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 3b5f5d1..435251f 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -26,13 +26,19 @@
 
 Optional properties:
 
-- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles
-                       to CPU nodes corresponding directly to the affinity of
+- interrupt-affinity : When using SPIs, specifies a list of phandles to CPU
+                       nodes corresponding directly to the affinity of
 		       the SPIs listed in the interrupts property.
 
-		       This property should be present when there is more than
+                       When using a PPI, specifies a list of phandles to CPU
+		       nodes corresponding to the set of CPUs which have
+		       a PMU of this type signalling the PPI listed in the
+		       interrupts property.
+
+                       This property should be present when there is more than
 		       a single SPI.
 
+
 - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
                      events.
 
diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
new file mode 100644
index 0000000..47cb1d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
@@ -0,0 +1,61 @@
+Analog Device AXI-DMAC DMA controller
+
+Required properties:
+ - compatible: Must be "adi,axi-dmac-1.00.a".
+ - reg: Specification for the controllers memory mapped register map.
+ - interrupts: Specification for the controllers interrupt.
+ - clocks: Phandle and specifier to the controllers AXI interface clock
+ - #dma-cells: Must be 1.
+
+Required sub-nodes:
+ - adi,channels: This sub-node must contain a sub-node for each DMA channel. For
+   the channel sub-nodes the following bindings apply. They must match the
+   configuration options of the peripheral as it was instantiated.
+
+Required properties for adi,channels sub-node:
+ - #size-cells: Must be 0
+ - #address-cells: Must be 1
+
+Required channel sub-node properties:
+ - reg: Which channel this node refers to.
+ - adi,length-width: Width of the DMA transfer length register.
+ - adi,source-bus-width,
+   adi,destination-bus-width: Width of the source or destination bus in bits.
+ - adi,source-bus-type,
+   adi,destination-bus-type: Type of the source or destination bus. Must be one
+   of the following:
+	0 (AXI_DMAC_TYPE_AXI_MM): Memory mapped AXI interface
+	1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
+	2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
+
+Optional channel properties:
+ - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
+   transfers.
+ - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
+
+DMA clients connected to the AXI-DMAC DMA controller must use the format
+described in the dma.txt file using a one-cell specifier. The value of the
+specifier refers to the DMA channel index.
+
+Example:
+
+dma: dma@7c420000 {
+	compatible = "adi,axi-dmac-1.00.a";
+	reg = <0x7c420000 0x10000>;
+	interrupts = <0 57 0>;
+	clocks = <&clkc 16>;
+	#dma-cells = <1>;
+
+	adi,channels {
+		#size-cells = <0>;
+		#address-cells = <1>;
+
+		dma-channel@0 {
+			reg = <0>;
+			adi,source-bus-width = <32>;
+			adi,source-bus-type = <ADI_AXI_DMAC_TYPE_MM_AXI>;
+			adi,destination-bus-width = <64>;
+			adi,destination-bus-type = <ADI_AXI_DMAC_TYPE_FIFO>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.txt b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
new file mode 100644
index 0000000..8a0097a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
@@ -0,0 +1,54 @@
+* ARM PrimeCells PL080 and PL081 and derivatives DMA controller
+
+Required properties:
+- compatible: "arm,pl080", "arm,primecell";
+	      "arm,pl081", "arm,primecell";
+- reg: Address range of the PL08x registers
+- interrupt: The PL08x interrupt number
+- clocks: The clock running the IP core clock
+- clock-names: Must contain "apb_pclk"
+- lli-bus-interface-ahb1: if AHB master 1 is eligible for fetching LLIs
+- lli-bus-interface-ahb2: if AHB master 2 is eligible for fetching LLIs
+- mem-bus-interface-ahb1: if AHB master 1 is eligible for fetching memory contents
+- mem-bus-interface-ahb2: if AHB master 2 is eligible for fetching memory contents
+- #dma-cells: must be <2>. First cell should contain the DMA request,
+              second cell should contain either 1 or 2 depending on
+              which AHB master that is used.
+
+Optional properties:
+- dma-channels: contains the total number of DMA channels supported by the DMAC
+- dma-requests: contains the total number of DMA requests supported by the DMAC
+- memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32
+  64, 128 or 256 bytes are legal values
+- memcpy-bus-width: the bus width used for memcpy: 8, 16 or 32 are legal
+  values
+
+Clients
+Required properties:
+- dmas: List of DMA controller phandle, request channel and AHB master id
+- dma-names: Names of the aforementioned requested channels
+
+Example:
+
+dmac0: dma-controller@10130000 {
+	compatible = "arm,pl080", "arm,primecell";
+	reg = <0x10130000 0x1000>;
+	interrupt-parent = <&vica>;
+	interrupts = <15>;
+	clocks = <&hclkdma0>;
+	clock-names = "apb_pclk";
+	lli-bus-interface-ahb1;
+	lli-bus-interface-ahb2;
+	mem-bus-interface-ahb2;
+	memcpy-burst-size = <256>;
+	memcpy-bus-width = <32>;
+	#dma-cells = <2>;
+};
+
+device@40008000 {
+	...
+	dmas = <&dmac0 0 2
+		&dmac0 1 2>;
+	dma-names = "tx", "rx";
+	...
+};
diff --git a/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
new file mode 100644
index 0000000..87740ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
@@ -0,0 +1,54 @@
+NXP LPC18xx/43xx DMA MUX (DMA request router)
+
+Required properties:
+- compatible:	"nxp,lpc1850-dmamux"
+- reg:		Memory map for accessing module
+- #dma-cells:	Should be set to <3>.
+		* 1st cell contain the master dma request signal
+		* 2nd cell contain the mux value (0-3) for the peripheral
+		* 3rd cell contain either 1 or 2 depending on the AHB
+		  master used.
+- dma-requests:	Number of DMA requests for the mux
+- dma-masters:	phandle pointing to the DMA controller
+
+The DMA controller node need to have the following poroperties:
+- dma-requests:	Number of DMA requests the controller can handle
+
+Example:
+
+dmac: dma@40002000 {
+	compatible = "nxp,lpc1850-gpdma", "arm,pl080", "arm,primecell";
+	arm,primecell-periphid = <0x00041080>;
+	reg = <0x40002000 0x1000>;
+	interrupts = <2>;
+	clocks = <&ccu1 CLK_CPU_DMA>;
+	clock-names = "apb_pclk";
+	#dma-cells = <2>;
+	dma-channels = <8>;
+	dma-requests = <16>;
+	lli-bus-interface-ahb1;
+	lli-bus-interface-ahb2;
+	mem-bus-interface-ahb1;
+	mem-bus-interface-ahb2;
+	memcpy-burst-size = <256>;
+	memcpy-bus-width = <32>;
+};
+
+dmamux: dma-mux {
+	compatible = "nxp,lpc1850-dmamux";
+	#dma-cells = <3>;
+	dma-requests = <64>;
+	dma-masters = <&dmac>;
+};
+
+uart0: serial@40081000 {
+	compatible = "nxp,lpc1850-uart", "ns16550a";
+	reg = <0x40081000 0x1000>;
+	reg-shift = <2>;
+	interrupts = <24>;
+	clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
+	clock-names = "uartclk", "reg";
+	dmas = <&dmamux 1 1 2
+		&dmamux 2 1 2>;
+	dma-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
index cc29c35..276ef81 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -12,10 +12,13 @@
 properties:
 - interrupts: interrupt of the XOR channel
 
-And the following optional properties:
+The sub-nodes used to contain one or several of the following
+properties, but they are now deprecated:
 - dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
 - dmacap,memset to indicate that the XOR channel is capable of memset operations
 - dmacap,xor to indicate that the XOR channel is capable of xor operations
+- dmacap,interrupt to indicate that the XOR channel is capable of
+  generating interrupts
 
 Example:
 
@@ -28,13 +31,8 @@
 
 	xor00 {
 	      interrupts = <51>;
-	      dmacap,memcpy;
-	      dmacap,xor;
 	};
 	xor01 {
 	      interrupts = <52>;
-	      dmacap,memcpy;
-	      dmacap,xor;
-	      dmacap,memset;
 	};
 };
diff --git a/Documentation/devicetree/bindings/dma/sun4i-dma.txt b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
new file mode 100644
index 0000000..f1634a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
@@ -0,0 +1,46 @@
+Allwinner A10 DMA Controller
+
+This driver follows the generic DMA bindings defined in dma.txt.
+
+Required properties:
+
+- compatible:	Must be "allwinner,sun4i-a10-dma"
+- reg:		Should contain the registers base address and length
+- interrupts:	Should contain a reference to the interrupt used by this device
+- clocks:	Should contain a reference to the parent AHB clock
+- #dma-cells :	Should be 2, first cell denoting normal or dedicated dma,
+		second cell holding the request line number.
+
+Example:
+	dma: dma-controller@01c02000 {
+		compatible = "allwinner,sun4i-a10-dma";
+		reg = <0x01c02000 0x1000>;
+		interrupts = <27>;
+		clocks = <&ahb_gates 6>;
+		#dma-cells = <2>;
+	};
+
+Clients:
+
+DMA clients connected to the Allwinner A10 DMA controller must use the
+format described in the dma.txt file, using a three-cell specifier for
+each channel: a phandle plus two integer cells.
+The three cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Whether it is using normal (0) or dedicated (1) channels
+3. The port ID as specified in the datasheet
+
+Example:
+	spi2: spi@01c17000 {
+		compatible = "allwinner,sun4i-a10-spi";
+		reg = <0x01c17000 0x1000>;
+		interrupts = <0 12 4>;
+		clocks = <&ahb_gates 22>, <&spi2_clk>;
+		clock-names = "ahb", "mod";
+		dmas = <&dma 1 29>, <&dma 1 28>;
+		dma-names = "rx", "tx";
+		status = "disabled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt
new file mode 100644
index 0000000..3207ceb
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/zxdma.txt
@@ -0,0 +1,38 @@
+* ZTE ZX296702 DMA controller
+
+Required properties:
+- compatible: Should be "zte,zx296702-dma"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: see dma.txt, should be 1, para number
+- dma-channels: physical channels supported
+- dma-requests: virtual channels supported, each virtual channel
+		have specific request line
+- clocks: clock required
+
+Example:
+
+Controller:
+	dma: dma-controller@0x09c00000{
+		compatible = "zte,zx296702-dma";
+		reg = <0x09c00000 0x1000>;
+		clocks = <&topclk ZX296702_DMA_ACLK>;
+		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+		#dma-cells = <1>;
+		dma-channels = <24>;
+		dma-requests = <24>;
+	};
+
+Client:
+Use specific request line passing from dmax
+For example, spdif0 tx channel request line is 4
+	spdif0: spdif0@0b004000 {
+		#sound-dai-cells = <0>;
+		compatible = "zte,zx296702-spdif";
+		reg = <0x0b004000 0x1000>;
+		clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
+		clock-names = "tx";
+		interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dma 4>;
+		dma-names = "tx";
+	}
diff --git a/Documentation/devicetree/bindings/drm/msm/dsi.txt b/Documentation/devicetree/bindings/drm/msm/dsi.txt
index cd8fe6c..d56923c 100644
--- a/Documentation/devicetree/bindings/drm/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/dsi.txt
@@ -30,20 +30,27 @@
 - panel@0: Node of panel connected to this DSI controller.
   See files in Documentation/devicetree/bindings/panel/ for each supported
   panel.
-- qcom,dual-panel-mode: Boolean value indicating if the DSI controller is
+- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
   driving a panel which needs 2 DSI links.
-- qcom,master-panel: Boolean value indicating if the DSI controller is driving
+- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
   the master link of the 2-DSI panel.
-- qcom,sync-dual-panel: Boolean value indicating if the DSI controller is
+- qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
   driving a 2-DSI panel whose 2 links need receive command simultaneously.
 - interrupt-parent: phandle to the MDP block if the interrupt signal is routed
   through MDP block
+- pinctrl-names: the pin control state names; should contain "default"
+- pinctrl-0: the default pinctrl state (active)
+- pinctrl-n: the "sleep" pinctrl state
+- port: DSI controller output port. This contains one endpoint subnode, with its
+  remote-endpoint set to the phandle of the connected panel's endpoint.
+  See Documentation/devicetree/bindings/graph.txt for device graph info.
 
 DSI PHY:
 Required properties:
 - compatible: Could be the following
   * "qcom,dsi-phy-28nm-hpm"
   * "qcom,dsi-phy-28nm-lp"
+  * "qcom,dsi-phy-20nm"
 - reg: Physical base address and length of the registers of PLL, PHY and PHY
   regulator
 - reg-names: The names of register regions. The following regions are required:
@@ -59,6 +66,10 @@
   * "iface_clk"
 - vddio-supply: phandle to vdd-io regulator device node
 
+Optional properties:
+- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
+  regulator is wanted.
+
 Example:
 	mdss_dsi0: qcom,mdss_dsi@fd922800 {
 		compatible = "qcom,mdss-dsi-ctrl";
@@ -90,9 +101,13 @@
 
 		qcom,dsi-phy = <&mdss_dsi_phy0>;
 
-		qcom,dual-panel-mode;
-		qcom,master-panel;
-		qcom,sync-dual-panel;
+		qcom,dual-dsi-mode;
+		qcom,master-dsi;
+		qcom,sync-dual-dsi;
+
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&mdss_dsi_active>;
+		pinctrl-1 = <&mdss_dsi_suspend>;
 
 		panel: panel@0 {
 			compatible = "sharp,lq101r1sx01";
@@ -101,6 +116,18 @@
 
 			power-supply = <...>;
 			backlight = <...>;
+
+			port {
+				panel_in: endpoint {
+					remote-endpoint = <&dsi0_out>;
+				};
+			};
+		};
+
+		port {
+			dsi0_out: endpoint {
+				remote-endpoint = <&panel_in>;
+			};
 		};
 	};
 
@@ -117,4 +144,6 @@
 		clock-names = "iface_clk";
 		clocks = <&mmcc MDSS_AHB_CLK>;
 		vddio-supply = <&pma8084_l12>;
+
+		qcom,dsi-phy-regulator-ldo-mode;
 	};
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi.txt b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
index c43aa53..e926239 100644
--- a/Documentation/devicetree/bindings/drm/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi.txt
@@ -2,8 +2,9 @@
 
 Required properties:
 - compatible: one of the following
+   * "qcom,hdmi-tx-8994"
    * "qcom,hdmi-tx-8084"
-   * "qcom,hdmi-tx-8074"
+   * "qcom,hdmi-tx-8974"
    * "qcom,hdmi-tx-8660"
    * "qcom,hdmi-tx-8960"
 - reg: Physical base address and length of the controller's registers
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
index 435f1bc..b405b44 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -33,6 +33,13 @@
 - interrupt-parent:
     phandle of the parent interrupt controller
 
+- interrupts-extended:
+    Alternate form of specifying interrupts and parents that allows for
+    multiple parents.  This takes precedence over 'interrupts' and
+    'interrupt-parent'.  Wakeup-capable GPIO controllers often route their
+    wakeup interrupt lines through a different interrupt controller than the
+    primary interrupt line, making this property necessary.
+
 - #interrupt-cells:
     Should be <2>.  The first cell is the GPIO number, the second should specify
     flags.  The following subset of flags is supported:
@@ -47,19 +54,33 @@
 - interrupt-controller:
     Marks the device node as an interrupt controller
 
-- interrupt-names:
-    The name of the IRQ resource used by this controller
+- wakeup-source:
+    GPIOs for this controller can be used as a wakeup source
 
 Example:
 	upg_gio: gpio@f040a700 {
-		#gpio-cells = <0x2>;
-		#interrupt-cells = <0x2>;
+		#gpio-cells = <2>;
+		#interrupt-cells = <2>;
 		compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
 		gpio-controller;
 		interrupt-controller;
 		reg = <0xf040a700 0x80>;
-		interrupt-parent = <0xf>;
+		interrupt-parent = <&irq0_intc>;
 		interrupts = <0x6>;
-		interrupt-names = "upg_gio";
-		brcm,gpio-bank-widths = <0x20 0x20 0x20 0x18>;
+		brcm,gpio-bank-widths = <32 32 32 24>;
+	};
+
+	upg_gio_aon: gpio@f04172c0 {
+		#gpio-cells = <2>;
+		#interrupt-cells = <2>;
+		compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+		gpio-controller;
+		interrupt-controller;
+		reg = <0xf04172c0 0x40>;
+		interrupt-parent = <&irq0_aon_intc>;
+		interrupts = <0x6>;
+		interrupts-extended = <&irq0_aon_intc 0x6>,
+			<&aon_pm_l2_intc 0x5>;
+		wakeup-source;
+		brcm,gpio-bank-widths = <18 4>;
 	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt b/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
index abf4db7..170194a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
@@ -2,8 +2,9 @@
 
 Required properties:
 
-- compatible:
+- compatible: one of:
   - "axis,etraxfs-gio"
+  - "axis,artpec3-gio"
 - reg: Physical base address and length of the controller's registers.
 - #gpio-cells: Should be 3
   - The first cell is the gpio offset number.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
new file mode 100644
index 0000000..805ddcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -0,0 +1,22 @@
+* Freescale MPC512x/MPC8xxx GPIO controller
+
+Required properties:
+- compatible : Should be "fsl,<soc>-gpio"
+  The following <soc>s are known to be supported:
+    mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq
+- reg : Address and length of the register set for the device
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify the gpio polarity:
+      0 = active high
+      1 = active low
+
+Example:
+
+gpio0: gpio@1100 {
+	compatible = "fsl,mpc5125-gpio";
+	#gpio-cells = <2>;
+	reg = <0x1100 0x080>;
+	interrupts = <78 0x8>;
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 38fb86f..f60e2f4 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -9,6 +9,7 @@
     - "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
     - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
     - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
+    - "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
     - "renesas,gpio-rcar": for generic R-Car GPIO controller.
 
   - reg: Base address and length of each memory resource used by the GPIO
diff --git a/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
new file mode 100644
index 0000000..0dab156
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
@@ -0,0 +1,24 @@
+ZTE ZX296702 GPIO controller
+
+Required properties:
+- compatible : "zte,zx296702-gpio"
+- #gpio-cells : Should be two. The first cell is the pin number and the
+  second cell is used to specify optional parameters:
+  - bit 0 specifies polarity (0 for normal, 1 for inverted)
+- gpio-controller : Marks the device node as a GPIO controller.
+- interrupts : Interrupt mapping for GPIO IRQ.
+- gpio-ranges : Interaction with the PINCTRL subsystem.
+
+gpio1: gpio@b008040 {
+	compatible = "zte,zx296702-gpio";
+	reg = <0xb008040 0x40>;
+	gpio-controller;
+	#gpio-cells = <2>;
+	gpio-ranges = < &pmx0 0 54 2 &pmx0 2 59 14>;
+	interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+	clock-names = "gpio_pclk";
+	clocks = <&lsp0clk ZX296702_GPIO_CLK>;
+};
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index 009f4bf..e685610 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -197,9 +197,11 @@
 - sor: serial output resource
 
   Required properties:
-  - compatible: For Tegra124, must contain "nvidia,tegra124-sor".  Otherwise,
-    must contain '"nvidia,<chip>-sor", "nvidia,tegra124-sor"', where <chip>
-    is tegra132.
+  - compatible: Should be:
+    - "nvidia,tegra124-sor": for Tegra124 and Tegra132
+    - "nvidia,tegra132-sor": for Tegra132
+    - "nvidia,tegra210-sor": for Tegra210
+    - "nvidia,tegra210-sor1": for Tegra210
   - reg: Physical base address and length of the controller's registers.
   - interrupts: The interrupt outputs from the controller.
   - clocks: Must contain an entry for each entry in clock-names.
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 6b1d75f..a36dfce 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -52,10 +52,9 @@
     See ../reset/reset.txt for details.
   - reset-names: names of the resets listed in resets property in the same
     order.
-  - ranges: to allow probing of subdevices
 
 - sti-hdmi: hdmi output block
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   Required properties:
   - compatible: "st,stih<chip>-hdmi";
   - reg: Physical base address of the IP registers and length of memory mapped region.
@@ -72,7 +71,7 @@
 
 sti-hda:
   Required properties:
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   - compatible: "st,stih<chip>-hda"
   - reg: Physical base address of the IP registers and length of memory mapped region.
   - reg-names: names of the mapped memory regions listed in regs property in
@@ -85,7 +84,7 @@
 
 sti-dvo:
   Required properties:
-  must be a child of sti-tvout
+  must be a child of sti-display-subsystem
   - compatible: "st,stih<chip>-dvo"
   - reg: Physical base address of the IP registers and length of memory mapped region.
   - reg-names: names of the mapped memory regions listed in regs property in
@@ -195,38 +194,37 @@
 			reg-names	= "tvout-reg", "hda-reg", "syscfg";
 			reset-names     = "tvout";
 			resets          = <&softreset STIH416_HDTVOUT_SOFTRESET>;
-			ranges;
+		};
 
-			sti-hdmi@fe85c000 {
-				compatible	= "st,stih416-hdmi";
-				reg		= <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
-				reg-names	= "hdmi-reg", "syscfg";
-				interrupts	= <GIC_SPI 173 IRQ_TYPE_NONE>;
-				interrupt-names	= "irq";
-				clock-names	= "pix", "tmds", "phy", "audio";
-				clocks          = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
-			};
+		sti-hdmi@fe85c000 {
+			compatible	= "st,stih416-hdmi";
+			reg		= <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
+			reg-names	= "hdmi-reg", "syscfg";
+			interrupts	= <GIC_SPI 173 IRQ_TYPE_NONE>;
+			interrupt-names	= "irq";
+			clock-names	= "pix", "tmds", "phy", "audio";
+			clocks          = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
+		};
 
-			sti-hda@fe85a000 {
-				compatible	= "st,stih416-hda";
-				reg		= <0xfe85a000 0x400>, <0xfe83085c 0x4>;
-				reg-names	= "hda-reg", "video-dacs-ctrl";
-				clock-names	= "pix", "hddac";
-				clocks          = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
-			};
+		sti-hda@fe85a000 {
+			compatible	= "st,stih416-hda";
+			reg		= <0xfe85a000 0x400>, <0xfe83085c 0x4>;
+			reg-names	= "hda-reg", "video-dacs-ctrl";
+			clock-names	= "pix", "hddac";
+			clocks          = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
+		};
 
-			sti-dvo@8d00400 {
-				compatible	= "st,stih407-dvo";
-				reg		= <0x8d00400 0x200>;
-				reg-names	= "dvo-reg";
-				clock-names	= "dvo_pix", "dvo",
-						  "main_parent", "aux_parent";
-				clocks		= <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
-						  <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
-				pinctrl-names	= "default";
-				pinctrl-0	= <&pinctrl_dvo>;
-				sti,panel	= <&panel_dvo>;
-			};
+		sti-dvo@8d00400 {
+			compatible	= "st,stih407-dvo";
+			reg		= <0x8d00400 0x200>;
+			reg-names	= "dvo-reg";
+			clock-names	= "dvo_pix", "dvo",
+					  "main_parent", "aux_parent";
+			clocks		= <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
+					  <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
+			pinctrl-names	= "default";
+			pinctrl-0	= <&pinctrl_dvo>;
+			sti,panel	= <&panel_dvo>;
 		};
 
 		sti-hqvdp@9c000000 {
@@ -237,7 +235,7 @@
 				reset-names     = "hqvdp";
 				resets          = <&softreset STIH407_HDQVDP_SOFTRESET>;
 				st,vtg		= <&vtg_main>;
-			};
+		};
 	};
 	...
 };
diff --git a/Documentation/devicetree/bindings/input/ads7846.txt b/Documentation/devicetree/bindings/input/ads7846.txt
index 5f7619c..df8b127 100644
--- a/Documentation/devicetree/bindings/input/ads7846.txt
+++ b/Documentation/devicetree/bindings/input/ads7846.txt
@@ -64,7 +64,7 @@
 					pendown-gpio (u32).
 	pendown-gpio			GPIO handle describing the pin the !PENIRQ
 					line is connected to.
-	linux,wakeup			use any event on touchscreen as wakeup event.
+	wakeup-source			use any event on touchscreen as wakeup event.
 
 
 Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC::
diff --git a/Documentation/devicetree/bindings/input/cap11xx.txt b/Documentation/devicetree/bindings/input/cap11xx.txt
index 7d0a300..8c67a0b 100644
--- a/Documentation/devicetree/bindings/input/cap11xx.txt
+++ b/Documentation/devicetree/bindings/input/cap11xx.txt
@@ -55,5 +55,24 @@
 				 <105>,		/* KEY_LEFT */
 				 <109>,		/* KEY_PAGEDOWN */
 				 <104>;		/* KEY_PAGEUP */
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		usr@0 {
+			label = "cap11xx:green:usr0";
+			reg = <0>;
+		};
+
+		usr@1 {
+			label = "cap11xx:green:usr1";
+			reg = <1>;
+		};
+
+		alive@2 {
+			label = "cap11xx:green:alive";
+			reg = <2>;
+			linux,default_trigger = "heartbeat";
+		};
 	};
 }
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
new file mode 100644
index 0000000..635a3b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -0,0 +1,44 @@
+Cypress I2C Touchpad
+
+Required properties:
+- compatible: must be "cypress,cyapa".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+	binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+	binding[0]).
+
+Optional properties:
+- wakeup-source: touchpad can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+	pinctrl binding [1]).
+- vcc-supply: a phandle for the regulator supplying 3.3V power.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+	&i2c0 {
+		/* ... */
+
+		/* Cypress Gen3 touchpad */
+		touchpad@67 {
+			compatible = "cypress,cyapa";
+			reg = <0x24>;
+			interrupt-parent = <&gpio>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
+			wakeup-source;
+		};
+
+		/* Cypress Gen5 and later touchpad */
+		touchpad@24 {
+			compatible = "cypress,cyapa";
+			reg = <0x24>;
+			interrupt-parent = <&gpio>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
+			wakeup-source;
+		};
+
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
index a765232..8a71038 100644
--- a/Documentation/devicetree/bindings/input/elants_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -13,6 +13,9 @@
 - pinctrl-names: should be "default" (see pinctrl binding [1]).
 - pinctrl-0: a phandle pointing to the pin settings for the device (see
   pinctrl binding [1]).
+- reset-gpios: reset gpio the chip is connected to.
+- vcc33-supply: a phandle for the regulator supplying 3.3V power.
+- vccio-supply: a phandle for the regulator supplying IO power.
 
 [0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 [1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
diff --git a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
index 313abef..5b91f5a 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
@@ -20,7 +20,7 @@
 	  If not specified defaults to <1> == EV_KEY.
 	- debounce-interval: Debouncing interval time in milliseconds.
 	  If not specified defaults to 5.
-	- gpio-key,wakeup: Boolean, button can wake-up the system.
+	- wakeup-source: Boolean, button can wake-up the system.
 
 Example nodes:
 
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 44b7057..072bf75 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -23,7 +23,7 @@
 	  If not specified defaults to <1> == EV_KEY.
 	- debounce-interval: Debouncing interval time in milliseconds.
 	  If not specified defaults to 5.
-	- gpio-key,wakeup: Boolean, button can wake-up the system.
+	- wakeup-source: Boolean, button can wake-up the system.
 	- linux,can-disable: Boolean, indicates that button is connected
 	  to dedicated (not shared) interrupt which can be disabled to
 	  suppress events from the button.
diff --git a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
index ead641c..4d86059 100644
--- a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
+++ b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
@@ -19,7 +19,7 @@
 
 Optional Properties:
 - linux,no-autorepeat:	do no enable autorepeat feature.
-- linux,wakeup:		use any event on keypad as wakeup event.
+- wakeup-source:	use any event on keypad as wakeup event.
 - debounce-delay-ms:	debounce interval in milliseconds
 - col-scan-delay-us:	delay, measured in microseconds, that is needed
 			before we can scan keypad after activating column gpio
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt b/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
index 7d8cb92..ee62156 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
@@ -33,7 +33,7 @@
 	Value type: <bool>
 	Definition: don't enable autorepeat feature.
 
-- linux,keypad-wakeup:
+- wakeup-source:
 	Usage: optional
 	Value type: <bool>
 	Definition: use any event on keypad as wakeup event.
diff --git a/Documentation/devicetree/bindings/input/samsung-keypad.txt b/Documentation/devicetree/bindings/input/samsung-keypad.txt
index 942d071..863e77f 100644
--- a/Documentation/devicetree/bindings/input/samsung-keypad.txt
+++ b/Documentation/devicetree/bindings/input/samsung-keypad.txt
@@ -36,9 +36,11 @@
 - pinctrl-0: Should specify pin control groups used for this controller.
 - pinctrl-names: Should contain only one value - "default".
 
+Optional Properties:
+- wakeup-source: use any event on keypad as wakeup event.
+
 Optional Properties specific to linux:
 - linux,keypad-no-autorepeat: do no enable autorepeat feature.
-- linux,keypad-wakeup: use any event on keypad as wakeup event.
 
 
 Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
index 6e55109..8eb240a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
@@ -8,6 +8,9 @@
 - touchscreen-size-x: horizontal resolution of touchscreen (in pixels)
 - touchscreen-size-y: vertical resolution of touchscreen (in pixels)
 
+Optional properties:
+- reset-gpio: GPIO connected to the RESET line of the chip
+
 Example:
 
 	i2c@00000000 {
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
index 80c37df..e3c27c4 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
@@ -4,12 +4,12 @@
 - compatible: must be "neonode,zforce"
 - reg: I2C address of the chip
 - interrupts: interrupt to which the chip is connected
-- gpios: gpios the chip is connected to
-  first one is the interrupt gpio and second one the reset gpio
+- reset-gpios: reset gpio the chip is connected to
 - x-size: horizontal resolution of touchscreen
 - y-size: vertical resolution of touchscreen
 
 Optional properties:
+- irq-gpios : interrupt gpio the chip is connected to
 - vdd-supply: Regulator controlling the controller supply
 
 Example:
@@ -23,8 +23,8 @@
 			interrupts = <2 0>;
 			vdd-supply = <&reg_zforce_vdd>;
 
-			gpios = <&gpio5 6 0>, /* INT */
-				<&gpio5 9 0>; /* RST */
+			reset-gpios = <&gpio5 9 0>; /* RST */
+			irq-gpios = <&gpio5 6 0>; /* IRQ, optional */
 
 			x-size = <800>;
 			y-size = <600>;
diff --git a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt b/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
deleted file mode 100644
index a85a964..0000000
--- a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Binding for Qualcomm PM8941 WLED driver
-
-Required properties:
-- compatible: should be "qcom,pm8941-wled"
-- reg: slave address
-
-Optional properties:
-- label: The label for this led
-  See Documentation/devicetree/bindings/leds/common.txt
-- linux,default-trigger: Default trigger assigned to the LED
-  See Documentation/devicetree/bindings/leds/common.txt
-- qcom,cs-out: bool; enable current sink output
-- qcom,cabc: bool; enable content adaptive backlight control
-- qcom,ext-gen: bool; use externally generated modulator signal to dim
-- qcom,current-limit: mA; per-string current limit; value from 0 to 25
-	default: 20mA
-- qcom,current-boost-limit: mA; boost current limit; one of:
-	105, 385, 525, 805, 980, 1260, 1400, 1680
-	default: 805mA
-- qcom,switching-freq: kHz; switching frequency; one of:
-	600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
-	1600, 1920, 2400, 3200, 4800, 9600,
-	default: 1600kHz
-- qcom,ovp: V; Over-voltage protection limit; one of:
-	27, 29, 32, 35
-	default: 29V
-- qcom,num-strings: #; number of led strings attached; value from 1 to 3
-	default: 2
-
-Example:
-
-pm8941-wled@d800 {
-	compatible = "qcom,pm8941-wled";
-	reg = <0xd800>;
-	label = "backlight";
-
-	qcom,cs-out;
-	qcom,current-limit = <20>;
-	qcom,current-boost-limit = <805>;
-	qcom,switching-freq = <1600>;
-	qcom,ovp = <29>;
-	qcom,num-strings = <2>;
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-powernv.txt b/Documentation/devicetree/bindings/leds/leds-powernv.txt
new file mode 100644
index 0000000..6665569
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-powernv.txt
@@ -0,0 +1,26 @@
+Device Tree binding for LEDs on IBM Power Systems
+-------------------------------------------------
+
+Required properties:
+- compatible : Should be "ibm,opal-v3-led".
+- led-mode   : Should be "lightpath" or "guidinglight".
+
+Each location code of FRU/Enclosure must be expressed in the
+form of a sub-node.
+
+Required properties for the sub nodes:
+- led-types : Supported LED types (attention/identify/fault) provided
+              in the form of string array.
+
+Example:
+
+leds {
+	compatible = "ibm,opal-v3-led";
+	led-mode = "lightpath";
+
+	U78C9.001.RST0027-P1-C1 {
+		led-types = "identify", "fault";
+	};
+	...
+	...
+};
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
index d5e3704..89427b0 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
@@ -18,6 +18,8 @@
               interrupt (NAND_EVTER_STAT).  If there is only one,
               that interrupt reports both types of event.
 
+- little-endian : If this property is absent, the big-endian mode will
+                  be in use as default for registers.
 
 - ranges : Each range corresponds to a single chipselect, and covers
            the entire access window as configured.
@@ -34,6 +36,7 @@
 		#size-cells = <1>;
 		reg = <0x0 0xffe1e000 0 0x2000>;
 		interrupts = <16 2 19 2>;
+		little-endian;
 
 		/* NOR, NAND Flashes and CPLD on board */
 		ranges = <0x0 0x0 0x0 0xee000000 0x02000000
diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
index f64de95a..ad5d904 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
@@ -2,7 +2,11 @@
 
 Required properties:
  - compatible: value should be one of the following:
+   "atmel,at91sam9n12-hlcdc"
+   "atmel,at91sam9x5-hlcdc"
+   "atmel,sama5d2-hlcdc"
    "atmel,sama5d3-hlcdc"
+   "atmel,sama5d4-hlcdc"
  - reg: base address and size of the HLCDC device registers.
  - clock-names: the name of the 3 clocks requested by the HLCDC device.
    Should contain "periph_clk", "sys_clk" and "slow_clk".
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 753f14f..4181122 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -1,12 +1,14 @@
 AXP family PMIC device tree bindings
 
 The axp20x family current members :
+axp152 (X-Powers)
 axp202 (X-Powers)
 axp209 (X-Powers)
 axp221 (X-Powers)
 
 Required properties:
-- compatible: "x-powers,axp202", "x-powers,axp209", "x-powers,axp221"
+- compatible: "x-powers,axp152", "x-powers,axp202", "x-powers,axp209",
+	      "x-powers,axp221"
 - reg: The I2C slave address for the AXP chip
 - interrupt-parent: The parent interrupt controller
 - interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
new file mode 100644
index 0000000..38802b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -0,0 +1,88 @@
+* Dialog DA9062 Power Management Integrated Circuit (PMIC)
+
+DA9062 consists of a large and varied group of sub-devices:
+
+Device                   Supply Names    Description
+------                   ------------    -----------
+da9062-regulator        :               : LDOs & BUCKs
+da9062-rtc              :               : Real-Time Clock
+da9062-watchdog         :               : Watchdog Timer
+
+======
+
+Required properties:
+
+- compatible : Should be "dlg,da9062".
+- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
+  modified to match the chip's OTP settings).
+- interrupt-parent : Specifies the reference to the interrupt controller for
+  the DA9062.
+- interrupts : IRQ line information.
+- interrupt-controller
+
+See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
+further information on IRQ bindings.
+
+Sub-nodes:
+
+- regulators : This node defines the settings for the LDOs and BUCKs. The
+  DA9062 regulators are bound using their names listed below:
+
+    buck1    : BUCK_1
+    buck2    : BUCK_2
+    buck3    : BUCK_3
+    buck4    : BUCK_4
+    ldo1     : LDO_1
+    ldo2     : LDO_2
+    ldo3     : LDO_3
+    ldo4     : LDO_4
+
+  The component follows the standard regulator framework and the bindings
+  details of individual regulator device can be found in:
+  Documentation/devicetree/bindings/regulator/regulator.txt
+
+
+- rtc : This node defines settings required for the Real-Time Clock associated
+  with the DA9062. There are currently no entries in this binding, however
+  compatible = "dlg,da9062-rtc" should be added if a node is created.
+
+- watchdog: This node defines the settings for the watchdog driver associated
+  with the DA9062 PMIC. The compatible = "dlg,da9062-watchdog" should be added
+  if a node is created.
+
+
+Example:
+
+	pmic0: da9062@58 {
+		compatible = "dlg,da9062";
+		reg = <0x58>;
+		interrupt-parent = <&gpio6>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-controller;
+
+		rtc {
+			compatible = "dlg,da9062-rtc";
+		};
+
+		watchdog {
+			compatible = "dlg,da9062-watchdog";
+		};
+
+		regulators {
+			DA9062_BUCK1: buck1 {
+				regulator-name = "BUCK1";
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1570000>;
+				regulator-min-microamp = <500000>;
+				regulator-max-microamp = <2000000>;
+				regulator-boot-on;
+			};
+			DA9062_LDO1: ldo1 {
+				regulator-name = "LDO_1";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <3600000>;
+				regulator-boot-on;
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 163bd81..741e766 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -7,8 +7,9 @@
 client while probing.This document describes the binding for mfd device and
 PMIC submodule.
 
-Binding for the built-in 32k clock generator block is defined separately
-in bindings/clk/maxim,max77686.txt file.
+Bindings for the built-in 32k clock generator block and
+regulators are defined in ../clk/maxim,max77686.txt and
+../regulator/max77686.txt respectively.
 
 Required properties:
 - compatible : Must be "maxim,max77686";
@@ -16,67 +17,11 @@
 - interrupts : This i2c device has an IRQ line connected to the main SoC.
 - interrupt-parent : The parent interrupt controller.
 
-Optional node:
-- voltage-regulators : The regulators of max77686 have to be instantiated
-  under subnode named "voltage-regulators" using the following format.
-
-	regulator_name {
-		regulator-compatible = LDOn/BUCKn
-		standard regulator constraints....
-	};
-	refer Documentation/devicetree/bindings/regulator/regulator.txt
-
-  The regulator-compatible property of regulator should initialized with string
-to get matched with their hardware counterparts as follow:
-
-	-LDOn 	:	for LDOs, where n can lie in range 1 to 26.
-		 	example: LDO1, LDO2, LDO26.
-	-BUCKn 	:	for BUCKs, where n can lie in range 1 to 9.
-			example: BUCK1, BUCK5, BUCK9.
-
-  Regulators which can be turned off during system suspend:
-	-LDOn	:	2, 6-8, 10-12, 14-16,
-	-BUCKn	:	1-4.
-  Use standard regulator bindings for it ('regulator-off-in-suspend').
-
-  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
-  control. To turn this feature on this property must be added to the regulator
-  sub-node:
-	- maxim,ena-gpios :	one GPIO specifier enable control (the gpio
-				flags are actually ignored and always
-				ACTIVE_HIGH is used)
-
 Example:
 
-	max77686@09 {
+	max77686: pmic@09 {
 		compatible = "maxim,max77686";
 		interrupt-parent = <&wakeup_eint>;
 		interrupts = <26 0>;
 		reg = <0x09>;
-
-		voltage-regulators {
-			ldo11_reg {
-				regulator-compatible = "LDO11";
-				regulator-name = "vdd_ldo11";
-				regulator-min-microvolt = <1900000>;
-				regulator-max-microvolt = <1900000>;
-				regulator-always-on;
-			};
-
-			buck1_reg {
-				regulator-compatible = "BUCK1";
-				regulator-name = "vdd_mif";
-				regulator-min-microvolt = <950000>;
-				regulator-max-microvolt = <1300000>;
-				regulator-always-on;
-				regulator-boot-on;
-			};
-
-			buck9_reg {
-				regulator-compatible = "BUCK9";
-				regulator-name = "CAM_ISP_CORE_1.2V";
-				regulator-min-microvolt = <1000000>;
-				regulator-max-microvolt = <1200000>;
-				maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
-			};
-	}
+	};
diff --git a/Documentation/devicetree/bindings/mfd/max77802.txt b/Documentation/devicetree/bindings/mfd/max77802.txt
new file mode 100644
index 0000000..51fc1a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max77802.txt
@@ -0,0 +1,26 @@
+Maxim MAX77802 multi-function device
+
+The Maxim MAX77802 is a Power Management IC (PMIC) that contains 10 high
+efficiency Buck regulators, 32 Low-DropOut (LDO) regulators used to power
+up application processors and peripherals, a 2-channel 32kHz clock outputs,
+a Real-Time-Clock (RTC) and a I2C interface to program the individual
+regulators, clocks outputs and the RTC.
+
+Bindings for the built-in 32k clock generator block and
+regulators are defined in ../clk/maxim,max77802.txt and
+../regulator/max77802.txt respectively.
+
+Required properties:
+- compatible		: Must be "maxim,max77802"
+- reg			: Specifies the I2C slave address of PMIC block.
+- interrupts		: I2C device IRQ line connected to the main SoC.
+- interrupt-parent	: The parent interrupt controller.
+
+Example:
+
+	max77802: pmic@09 {
+		compatible = "maxim,max77802";
+		interrupt-parent = <&intc>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		reg = <0x09>;
+	};
diff --git a/Documentation/devicetree/bindings/mfd/tc3589x.txt b/Documentation/devicetree/bindings/mfd/tc3589x.txt
index 6fcedba..37bf7f1 100644
--- a/Documentation/devicetree/bindings/mfd/tc3589x.txt
+++ b/Documentation/devicetree/bindings/mfd/tc3589x.txt
@@ -55,7 +55,7 @@
  - linux,keymap: the definition can be found in
    bindings/input/matrix-keymap.txt
  - linux,no-autorepeat: do no enable autorepeat feature.
- - linux,wakeup: use any event on keypad as wakeup event.
+ - wakeup-source: use any event on keypad as wakeup event.
 
 Example:
 
@@ -84,7 +84,6 @@
 		keypad,num-columns = <8>;
 		keypad,num-rows = <8>;
 		linux,no-autorepeat;
-		linux,wakeup;
 		linux,keymap = <0x0301006b
 				0x04010066
 				0x06040072
@@ -103,5 +102,6 @@
 				0x01030039
 				0x07060069
 				0x050500d9>;
+		wakeup-source;
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index f0b4cd7..04e6bef 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -44,9 +44,10 @@
 described below.
 
 Optionnal property:
-- link			: Should be a phandle to another switch's DSA port.
+- link			: Should be a list of phandles to another switch's DSA port.
 			  This property is only used when switches are being
-			  chained/cascaded together.
+			  chained/cascaded together. This port is used as outgoing port
+			  towards the phandle port, which can be more than one hop away.
 
 - phy-handle		: Phandle to a PHY on an external MDIO bus, not the
 			  switch internal one. See
@@ -58,6 +59,10 @@
 			  Documentation/devicetree/bindings/net/ethernet.txt
 			  for details.
 
+- mii-bus		: Should be a phandle to a valid MDIO bus device node.
+			  This mii-bus will be used in preference to the
+			  global dsa,mii-bus defined above, for this switch.
+
 Optional subnodes:
 - fixed-link		: Fixed-link subnode describing a link to a non-MDIO
 			  managed entity. See
@@ -96,10 +101,11 @@
 				label = "cpu";
 			};
 
-			switch0uplink: port@6 {
+			switch0port6: port@6 {
 				reg = <6>;
 				label = "dsa";
-				link = <&switch1uplink>;
+				link = <&switch1port0
+				        &switch2port0>;
 			};
 		};
 
@@ -107,11 +113,31 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <17 1>;	/* MDIO address 17, switch 1 in tree */
+			mii-bus = <&mii_bus1>;
 
-			switch1uplink: port@0 {
+			switch1port0: port@0 {
 				reg = <0>;
 				label = "dsa";
-				link = <&switch0uplink>;
+				link = <&switch0port6>;
+			};
+			switch1port1: port@1 {
+				reg = <1>;
+				label = "dsa";
+				link = <&switch2port1>;
+			};
+		};
+
+		switch@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <18 2>;	/* MDIO address 18, switch 2 in tree */
+			mii-bus = <&mii_bus1>;
+
+			switch2port0: port@0 {
+				reg = <0>;
+				label = "dsa";
+				link = <&switch1port1
+				        &switch0port6>;
 			};
 		};
 	};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 41b3f3f..5d88f37 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -25,7 +25,11 @@
   flow control thresholds.
 - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
   is used for components that can have configurable fifo sizes.
+- managed: string, specifies the PHY management type. Supported values are:
+  "auto", "in-band-status". "auto" is the default, it usess MDIO for
+  management if fixed-link is not specified.
 
 Child nodes of the Ethernet controller are typically the individual PHY devices
 connected via the MDIO bus (sometimes the MDIO bus controller is separate).
 They are described in the phy.txt file in this same directory.
+For non-MDIO PHY management see fixed-link.txt.
diff --git a/Documentation/devicetree/bindings/net/fixed-link.txt b/Documentation/devicetree/bindings/net/fixed-link.txt
index 82bf7e0..ec5d889 100644
--- a/Documentation/devicetree/bindings/net/fixed-link.txt
+++ b/Documentation/devicetree/bindings/net/fixed-link.txt
@@ -17,6 +17,8 @@
   enabled.
 * 'asym-pause' (boolean, optional), to indicate that asym_pause should
   be enabled.
+* 'link-gpios' ('gpio-list', optional), to indicate if a gpio can be read
+  to determine if the link is up.
 
 Old, deprecated 'fixed-link' binding:
 
@@ -30,7 +32,7 @@
   - e: asymmetric pause configuration: 0 for no asymmetric pause, 1 for
     asymmetric pause
 
-Example:
+Examples:
 
 ethernet@0 {
 	...
@@ -40,3 +42,13 @@
 	};
 	...
 };
+
+ethernet@1 {
+	...
+	fixed-link {
+	      speed = <1000>;
+	      pause;
+	      link-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+	};
+	...
+};
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index d0e6fa3..b30ab6b 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -130,7 +130,11 @@
 
 Optional properties:
 - efuse-mac:	If this is 1, then the MAC address for the interface is
-		obtained from the device efuse mac address register
+		obtained from the device efuse mac address register.
+		If this is 2, the two DWORDs occupied by the MAC address
+		are swapped.  The netcp driver will swap the two DWORDs
+		back to the proper order when this property is set to 2
+		when it obtains the mac address from efuse.
 - local-mac-address:	the driver is designed to use the of_get_mac_address api
 			only if efuse-mac is 0. When efuse-mac is 0, the MAC
 			address is obtained from local-mac-address. If this
diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
new file mode 100644
index 0000000..fb1e75f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
@@ -0,0 +1,27 @@
+* Samsung S3FWRN5 NCI NFC Controller
+
+Required properties:
+- compatible: Should be "samsung,s3fwrn5-i2c".
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- s3fwrn5,en-gpios: Output GPIO pin used for enabling/disabling the chip
+- s3fwrn5,fw-gpios: Output GPIO pin used to enter firmware mode and
+  sleep/wakeup control
+
+Example:
+
+&hsi2c_4 {
+	status = "okay";
+	s3fwrn5@27 {
+		compatible = "samsung,s3fwrn5-i2c";
+
+		reg = <0x27>;
+
+		interrupt-parent = <&gpa1>;
+		interrupts = <3 0 0>;
+
+		s3fwrn5,en-gpios = <&gpf1 4 0>;
+		s3fwrn5,fw-gpios = <&gpj0 2 0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
similarity index 100%
rename from Documentation/devicetree/bindings/net/nfc/st-nci.txt
rename to Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
new file mode 100644
index 0000000..525681b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
@@ -0,0 +1,31 @@
+* STMicroelectronics SAS. ST NCI NFC Controller
+
+Required properties:
+- compatible: Should be "st,st21nfcb-spi"
+- spi-max-frequency: Maximum SPI frequency (<= 10000000).
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- reset-gpios: Output GPIO pin used to reset the ST21NFCB
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST21NFCB on SPI4):
+
+&mcspi4 {
+
+	status = "okay";
+
+	st21nfcb: st21nfcb@0 {
+
+		compatible = "st,st21nfcb-spi";
+
+		clock-frequency = <4000000>;
+
+		interrupt-parent = <&gpio5>;
+		interrupts = <2 IRQ_TYPE_EDGE_RISING>;
+
+		reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
new file mode 100644
index 0000000..51f8d2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -0,0 +1,75 @@
+* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+
+
+Required properties:
+- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- reg: Address and length of the register set for the device
+- clocks: Phandles to the reference clock and the bus clock
+- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
+  for the bus clock.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the core's combined interrupt signal
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+- mac-address: See ethernet.txt in the same directory
+- local-mac-address: See ethernet.txt in the same directory
+- snps,en-lpi: If present it enables use of the AXI low-power interface
+- snps,write-requests: Number of write requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,read-requests: Number of read requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,burst-map: Bitmap of allowed AXI burst lengts, with the LSB
+  representing 4, then 8 etc.
+- snps,txpbl: DMA Programmable burst length for the TX DMA
+- snps,rxpbl: DMA Programmable burst length for the RX DMA
+- snps,en-tx-lpi-clockgating: Enable gating of the MAC TX clock during
+  TX low-power mode.
+- phy-handle: See ethernet.txt file in the same directory
+- mdio device tree subnode: When the GMAC has a phy connected to its local
+    mdio, there must be device tree subnode with the following
+    required properties:
+    - compatible: Must be "snps,dwc-qos-ethernet-mdio".
+    - #address-cells: Must be <1>.
+    - #size-cells: Must be <0>.
+
+    For each phy on the mdio bus, there must be a node with the following
+    fields:
+
+    - reg: phy id used to communicate to phy.
+    - device_type: Must be "ethernet-phy".
+    - fixed-mode device tree subnode: see fixed-link.txt in the same directory
+
+Examples:
+ethernet2@40010000 {
+	clock-names = "phy_ref_clk", "apb_pclk";
+	clocks = <&clkc 17>, <&clkc 15>;
+	compatible = "snps,dwc-qos-ethernet-4.10";
+	interrupt-parent = <&intc>;
+	interrupts = <0x0 0x1e 0x4>;
+	reg = <0x40010000 0x4000>;
+	phy-handle = <&phy2>;
+	phy-mode = "gmii";
+
+	snps,en-tx-lpi-clockgating;
+	snps,en-lpi;
+	snps,write-requests = <2>;
+	snps,read-requests = <16>;
+	snps,burst-map = <0x7>;
+	snps,txpbl = <8>;
+	snps,rxpbl = <2>;
+
+	dma-coherent;
+
+	mdio {
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		phy2: phy@1 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			device_type = "ethernet-phy";
+			reg = <0x1>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
new file mode 100644
index 0000000..bae0e2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b080uan01.txt
@@ -0,0 +1,7 @@
+AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101ean01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/lg,lg4573.txt b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
new file mode 100644
index 0000000..824441f
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/lg,lg4573.txt
@@ -0,0 +1,19 @@
+LG LG4573 TFT Liquid Crystal Display with SPI control bus
+
+Required properties:
+  - compatible: "lg,lg4573"
+  - reg: address of the panel on the SPI bus
+
+The panel must obey rules for SPI slave device specified in document [1].
+
+[1]: Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+	lcd_panel: display@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "lg,lg4573";
+		spi-max-frequency = <10000000>;
+		reg = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
new file mode 100644
index 0000000..8e1914d
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/nec,nl4827hc19-05b.txt
@@ -0,0 +1,7 @@
+NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "nec,nl4827hc19-05b"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
new file mode 100644
index 0000000..ddf8e21
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/okaya,rs800480t-7x0gp.txt
@@ -0,0 +1,7 @@
+OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
+
+Required properties:
+- compatible: should be "okaya,rs800480t-7x0gp"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 9462ab7..3c821cd 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -48,7 +48,7 @@
 
 Examples:
 
-pinctrl@01c20800 {
+pio: pinctrl@01c20800 {
 	compatible = "allwinner,sun5i-a13-pinctrl";
 	reg = <0x01c20800 0x400>;
 	#address-cells = <1>;
@@ -68,3 +68,38 @@
 		allwinner,pull = <0>;
 	};
 };
+
+
+GPIO and interrupt controller
+-----------------------------
+
+This hardware also acts as a GPIO controller and an interrupt
+controller.
+
+Consumers that would want to refer to one or the other (or both)
+should provide through the usual *-gpios and interrupts properties a
+cell with 3 arguments, first the number of the bank, then the pin
+inside that bank, and finally the flags for the GPIO/interrupts.
+
+Example:
+
+xio: gpio@38 {
+	compatible = "nxp,pcf8574a";
+	reg = <0x38>;
+
+	gpio-controller;
+	#gpio-cells = <2>;
+
+	interrupt-parent = <&pio>;
+	interrupts = <6 0 IRQ_TYPE_EDGE_FALLING>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
+
+reg_usb1_vbus: usb1-vbus {
+	compatible = "regulator-fixed";
+	regulator-name = "usb1-vbus";
+	regulator-min-microvolt = <5000000>;
+	regulator-max-microvolt = <5000000>;
+	gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt
new file mode 100644
index 0000000..23ce8dc26
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt
@@ -0,0 +1,86 @@
+Conexant Digicolor CX92755 General Purpose Pin Mapping
+
+This document describes the device tree binding of the pin mapping hardware
+modules in the Conexant Digicolor CX92755 SoCs. The CX92755 in one of the
+Digicolor series of SoCs.
+
+=== Pin Controller Node ===
+
+Required Properties:
+
+- compatible: Must be "cnxt,cx92755-pinctrl"
+- reg: Base address of the General Purpose Pin Mapping register block and the
+  size of the block.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells: Must be <2>. The first cell is the pin number and the
+  second cell is used to specify flags. See include/dt-bindings/gpio/gpio.h
+  for possible values.
+
+For example, the following is the bare minimum node:
+
+	pinctrl: pinctrl@f0000e20 {
+		compatible = "cnxt,cx92755-pinctrl";
+		reg = <0xf0000e20 0x100>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+As a pin controller device, in addition to the required properties, this node
+should also contain the pin configuration nodes that client devices reference,
+if any.
+
+For a general description of GPIO bindings, please refer to ../gpio/gpio.txt.
+
+=== Pin Configuration Node ===
+
+Each pin configuration node is a sub-node of the pin controller node and is a
+container of an arbitrary number of subnodes, called pin group nodes in this
+document.
+
+Please refer to the pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the definition of a
+"pin configuration node".
+
+=== Pin Group Node ===
+
+A pin group node specifies the desired pin mux for an arbitrary number of
+pins. The name of the pin group node is optional and not used.
+
+A pin group node only affects the properties specified in the node, and has no
+effect on any properties that are omitted.
+
+The pin group node accepts a subset of the generic pin config properties. For
+details generic pin config properties, please refer to pinctrl-bindings.txt
+and <include/linux/pinctrl/pinconfig-generic.h>.
+
+Required Pin Group Node Properties:
+
+- pins: Multiple strings. Specifies the name(s) of one or more pins to be
+  configured by this node. The format of a pin name string is "GP_xy", where x
+  is an uppercase character from 'A' to 'R', and y is a digit from 0 to 7.
+- function: String. Specifies the pin mux selection. Values must be one of:
+  "gpio", "client_a", "client_b", "client_c"
+
+Example:
+	pinctrl: pinctrl@f0000e20 {
+		compatible = "cnxt,cx92755-pinctrl";
+		reg = <0xf0000e20 0x100>;
+
+		uart0_default: uart0_active {
+			data_signals {
+				pins = "GP_O0", "GP_O1";
+				function = "client_b";
+			};
+		};
+	};
+
+	uart0: uart@f0000740 {
+		compatible = "cnxt,cx92755-usart";
+		...
+		pinctrl-0 = <&uart0_default>;
+		pinctrl-names = "default";
+	};
+
+In the example above, a single pin group configuration node defines the
+"client select" for the Rx and Tx signals of uart0. The uart0 node references
+that pin configuration node using the &uart0_default phandle.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index ed19991..d7803a2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -7,8 +7,13 @@
 	Usage: required
 	Value type: <string>
 	Definition: Should contain one of:
+		    "qcom,pm8018-mpp",
+		    "qcom,pm8038-mpp",
+		    "qcom,pm8821-mpp",
 		    "qcom,pm8841-mpp",
 		    "qcom,pm8916-mpp",
+		    "qcom,pm8917-mpp",
+		    "qcom,pm8921-mpp",
 		    "qcom,pm8941-mpp",
 		    "qcom,pma8084-mpp",
 
@@ -77,12 +82,9 @@
 	Value type: <string>
 	Definition: Specify the alternative function to be configured for the
 		    specified pins.  Valid values are:
-		    "normal",
-		    "paired",
-		    "dtest1",
-		    "dtest2",
-		    "dtest3",
-		    "dtest4"
+		    "digital",
+		    "analog",
+		    "sink"
 
 - bias-disable:
 	Usage: optional
@@ -127,12 +129,18 @@
 	Definition: Selects the power source for the specified pins. Valid power
 		    sources are defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 
-- qcom,analog-mode:
+- qcom,analog-level:
 	Usage: optional
-	Value type: <none>
-	Definition: Selects Analog mode of operation: combined with input-enable
-		    and/or output-high, output-low MPP could operate as
-		    Bidirectional Logic, Analog Input, Analog Output.
+	Value type: <u32>
+	Definition: Selects the source for analog output. Valued values are
+		    defined in <dt-binding/pinctrl/qcom,pmic-mpp.h>
+		    PMIC_MPP_AOUT_LVL_*
+
+- qcom,dtest:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects which dtest rail to be routed in the various functions.
+		    Valid values are 1-4
 
 - qcom,amux-route:
 	Usage: optional
@@ -140,6 +148,10 @@
 	Definition: Selects the source for analog input. Valid values are
 		    defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 		    PMIC_MPP_AMUX_ROUTE_CH5, PMIC_MPP_AMUX_ROUTE_CH6...
+- qcom,paired:
+	Usage: optional
+	Value type: <none>
+	Definition: Indicates that the pin should be operating in paired mode.
 
 Example:
 
@@ -156,7 +168,7 @@
 		pm8841_default: default {
 			gpio {
 				pins = "mpp1", "mpp2", "mpp3", "mpp4";
-				function = "normal";
+				function = "digital";
 				input-enable;
 				power-source = <PM8841_MPP_S3>;
 			};
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 51cee44..9496934 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -58,12 +58,12 @@
 
 Pin Configuration Node Properties:
 
-- renesas,pins : An array of strings, each string containing the name of a pin.
-- renesas,groups : An array of strings, each string containing the name of a pin
+- pins : An array of strings, each string containing the name of a pin.
+- groups : An array of strings, each string containing the name of a pin
   group.
 
-- renesas,function: A string containing the name of the function to mux to the
-  pin group(s) specified by the renesas,groups property
+- function: A string containing the name of the function to mux to the pin
+  group(s) specified by the groups property.
 
   Valid values for pin, group and function names can be found in the group and
   function arrays of the PFC data file corresponding to the SoC
@@ -71,7 +71,9 @@
 
 The pin configuration parameters use the generic pinconf bindings defined in
 pinctrl-bindings.txt in this directory. The supported parameters are
-bias-disable, bias-pull-up and bias-pull-down.
+bias-disable, bias-pull-up, bias-pull-down and power-source. For pins that
+have a configurable I/O voltage, the power-source value should be the
+nominal I/O voltage in millivolts.
 
 
 GPIO
@@ -141,19 +143,19 @@
 
 		mmcif_pins: mmcif {
 			mux {
-				renesas,groups = "mmc0_data8_0", "mmc0_ctrl_0";
-				renesas,function = "mmc0";
+				groups = "mmc0_data8_0", "mmc0_ctrl_0";
+				function = "mmc0";
 			};
 			cfg {
-				renesas,groups = "mmc0_data8_0";
-				renesas,pins = "PORT279";
+				groups = "mmc0_data8_0";
+				pins = "PORT279";
 				bias-pull-up;
 			};
 		};
 
 		scifa4_pins: scifa4 {
-			renesas,groups = "scifa4_data", "scifa4_ctrl";
-			renesas,function = "scifa4";
+			groups = "scifa4_data", "scifa4_ctrl";
+			function = "scifa4";
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
index f63fcb3..2213802 100644
--- a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
@@ -3,7 +3,9 @@
 Required properties:
 - compatible: "stericsson,db8500-pinctrl", "stericsson,db8540-pinctrl",
               "stericsson,stn8815-pinctrl"
-- reg: Should contain the register physical address and length of the PRCMU.
+- nomadik-gpio-chips: array of phandles to the corresponding GPIO chips
+              (these have the register ranges used by the pin controller).
+- prcm: phandle to the PRCMU managing the back end of this pin controller
 
 Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
@@ -74,7 +76,8 @@
 
 	pinctrl@80157000 {
 		compatible = "stericsson,db8500-pinctrl";
-		reg = <0x80157000 0x2000>;
+		nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>;
+		prcm = <&prcmu>;
 
 		pinctrl-names = "default";
 
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt
new file mode 100644
index 0000000..0532c46
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt
@@ -0,0 +1,18 @@
+Freescale Supplement configuration unit (SCFG)
+
+SCFG is the supplemental configuration unit, that provides SoC specific
+configuration and status registers for the chip. Such as getting PEX port
+status.
+
+Required properties:
+
+- compatible: should be "fsl,<chip>-scfg"
+- reg: should contain base address and length of SCFG memory-mapped
+registers
+
+Example:
+
+	scfg: global-utilities@fc000 {
+		compatible = "fsl,t1040-scfg";
+		reg = <0xfc000 0x1000>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/max77686.txt b/Documentation/devicetree/bindings/regulator/max77686.txt
new file mode 100644
index 0000000..0dded64
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max77686.txt
@@ -0,0 +1,71 @@
+Binding for Maxim MAX77686 regulators
+
+This is a part of the device tree bindings of MAX77686 multi-function device.
+More information can be found in ../mfd/max77686.txt file.
+
+The MAX77686 PMIC has 9 high-efficiency Buck and 26 Low-DropOut (LDO)
+regulators that can be controlled over I2C.
+
+Following properties should be present in main device node of the MFD chip.
+
+Optional node:
+- voltage-regulators : The regulators of max77686 have to be instantiated
+  under subnode named "voltage-regulators" using the following format.
+
+	regulator_name {
+		regulator-compatible = LDOn/BUCKn
+		standard regulator constraints....
+	};
+	refer Documentation/devicetree/bindings/regulator/regulator.txt
+
+  The regulator node's name should be initialized with a string
+to get matched with their hardware counterparts as follow:
+
+	-LDOn 	:	for LDOs, where n can lie in range 1 to 26.
+			example: LDO1, LDO2, LDO26.
+	-BUCKn 	:	for BUCKs, where n can lie in range 1 to 9.
+			example: BUCK1, BUCK5, BUCK9.
+
+  Regulators which can be turned off during system suspend:
+	-LDOn	:	2, 6-8, 10-12, 14-16,
+	-BUCKn	:	1-4.
+  Use standard regulator bindings for it ('regulator-off-in-suspend').
+
+  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
+  control. To turn this feature on this property must be added to the regulator
+  sub-node:
+	- maxim,ena-gpios :	one GPIO specifier enable control (the gpio
+				flags are actually ignored and always
+				ACTIVE_HIGH is used)
+
+Example:
+
+	max77686: pmic@09 {
+		compatible = "maxim,max77686";
+		interrupt-parent = <&wakeup_eint>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		reg = <0x09>;
+
+		voltage-regulators {
+			ldo11_reg: LDO11 {
+				regulator-name = "vdd_ldo11";
+				regulator-min-microvolt = <1900000>;
+				regulator-max-microvolt = <1900000>;
+				regulator-always-on;
+			};
+
+			buck1_reg: BUCK1 {
+				regulator-name = "vdd_mif";
+				regulator-min-microvolt = <950000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			buck9_reg: BUCK9 {
+				regulator-name = "CAM_ISP_CORE_1.2V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1200000>;
+				maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
+			};
+	};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt b/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
index 73407f50..daf8826 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
@@ -1,20 +1,23 @@
 STMicroelectronics Low Power Controller (LPC) - RTC
 ===================================================
 
-LPC currently supports Watchdog OR Real Time Clock functionality.
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
 
 [See: ../watchdog/st_lpc_wdt.txt for Watchdog options]
+[See: ../timer/st,stih407-lpc for Clocksource options]
 
 Required properties
 
-- compatible 	: Must be one of: "st,stih407-lpc" "st,stih416-lpc"
-				  "st,stih415-lpc" "st,stid127-lpc"
+- compatible 	: Must be: "st,stih407-lpc"
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
-- st,lpc-mode	: The LPC can run either one of two modes ST_LPC_MODE_RTC [0] or
-		  ST_LPC_MODE_WDT [1].  One (and only one) mode must be
-		  selected.
+- st,lpc-mode	: The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
 
 Example:
 	lpc@fde05000 {
diff --git a/Documentation/devicetree/bindings/sound/cs4349.txt b/Documentation/devicetree/bindings/sound/cs4349.txt
new file mode 100644
index 0000000..54c117b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs4349.txt
@@ -0,0 +1,19 @@
+CS4349 audio CODEC
+
+Required properties:
+
+  - compatible : "cirrus,cs4349"
+
+  - reg : the I2C address of the device for I2C
+
+Optional properties:
+
+  - reset-gpios : a GPIO spec for the reset pin.
+
+Example:
+
+codec: cs4349@48 {
+        compatible = "cirrus,cs4349";
+        reg = <0x48>;
+        reset-gpios = <&gpio 54 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/ics43432.txt b/Documentation/devicetree/bindings/sound/ics43432.txt
new file mode 100644
index 0000000..b02e3a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ics43432.txt
@@ -0,0 +1,17 @@
+Invensense ICS-43432 MEMS microphone with I2S output.
+
+There are no software configuration options for this device, indeed, the only
+host connection is the I2S interface. Apart from requirements on clock
+frequency (460 kHz to 3.379 MHz according to the data sheet) there must be
+64 clock cycles in each stereo output frame; 24 of the 32 available bits
+contain audio data. A hardware pin determines if the device outputs data
+on the left or right channel of the I2S frame.
+
+Required properties:
+  - compatible : Must be "invensense,ics43432"
+
+Example:
+
+	ics43432: ics43432 {
+		compatible = "invensense,ics43432";
+	};
diff --git a/Documentation/devicetree/bindings/sound/max98357a.txt b/Documentation/devicetree/bindings/sound/max98357a.txt
index a7a149a..28645a2 100644
--- a/Documentation/devicetree/bindings/sound/max98357a.txt
+++ b/Documentation/devicetree/bindings/sound/max98357a.txt
@@ -4,7 +4,11 @@
 
 Required properties:
 - compatible   : "maxim,max98357a"
-- sdmode-gpios : GPIO specifier for the GPIO -> DAC SDMODE pin
+
+Optional properties:
+- sdmode-gpios : GPIO specifier for the chip's SD_MODE pin.
+        If this option is not specified then driver does not manage
+        the pin state (e.g. chip is always on).
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index b6b3a78..1173395 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -18,6 +18,12 @@
 - rcar_sound,src		: Should contain SRC feature.
 				  The number of SRC subnode should be same as HW.
 				  see below for detail.
+- rcar_sound,ctu		: Should contain CTU feature.
+				  The number of CTU subnode should be same as HW.
+				  see below for detail.
+- rcar_sound,mix		: Should contain MIX feature.
+				  The number of MIX subnode should be same as HW.
+				  see below for detail.
 - rcar_sound,dvc		: Should contain DVC feature.
 				  The number of DVC subnode should be same as HW.
 				  see below for detail.
@@ -90,6 +96,22 @@
 		};
 	};
 
+	rcar_sound,mix {
+		mix0: mix@0 { };
+		mix1: mix@1 { };
+	};
+
+	rcar_sound,ctu {
+		ctu00: ctu@0 { };
+		ctu01: ctu@1 { };
+		ctu02: ctu@2 { };
+		ctu03: ctu@3 { };
+		ctu10: ctu@4 { };
+		ctu11: ctu@5 { };
+		ctu12: ctu@6 { };
+		ctu13: ctu@7 { };
+	};
+
 	rcar_sound,src {
 		src0: src@0 {
 			interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
index c641550..962748a 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
@@ -6,6 +6,7 @@
 
 - compatible				: "renesas,rsrc-card,<board>"
 					  Examples with soctypes are:
+					    - "renesas,rsrc-card"
 					    - "renesas,rsrc-card,lager"
 					    - "renesas,rsrc-card,koelsch"
 Optional properties:
@@ -29,6 +30,12 @@
 - frame-inversion			: bool property. Add this if the
 					  dai-link uses frame clock inversion.
 - convert-rate				: platform specified sampling rate convert
+- audio-prefix				: see audio-routing
+- audio-routing				: A list of the connections between audio components.
+					  Each entry is a pair of strings, the first being the connection's sink,
+					  the second being the connection's source. Valid names for sources.
+					  use audio-prefix if some components is using same sink/sources naming.
+					  it can be used if compatible was "renesas,rsrc-card";
 
 Required CPU/CODEC subnodes properties:
 
diff --git a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
new file mode 100644
index 0000000..a805aa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
@@ -0,0 +1,19 @@
+ROCKCHIP with MAX98090 CODEC
+
+Required properties:
+- compatible: "rockchip,rockchip-audio-max98090"
+- rockchip,model: The user-visible name of this sound complex
+- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
+  connected to the CODEC
+- rockchip,audio-codec: The phandle of the MAX98090 audio codec
+- rockchip,headset-codec: The phandle of Ext chip for jack detection
+
+Example:
+
+sound {
+	compatible = "rockchip,rockchip-audio-max98090";
+	rockchip,model = "ROCKCHIP-I2S";
+	rockchip,i2s-controller = <&i2s>;
+	rockchip,audio-codec = <&max98090>;
+	rockchip,headset-codec = <&headsetcodec>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
new file mode 100644
index 0000000..411a62b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
@@ -0,0 +1,17 @@
+ROCKCHIP with RT5645/RT5650 CODECS
+
+Required properties:
+- compatible: "rockchip,rockchip-audio-rt5645"
+- rockchip,model: The user-visible name of this sound complex
+- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
+  connected to the CODEC
+- rockchip,audio-codec: The phandle of the RT5645/RT5650 audio codec
+
+Example:
+
+sound {
+	compatible = "rockchip,rockchip-audio-rt5645";
+	rockchip,model = "ROCKCHIP-I2S";
+	rockchip,i2s-controller = <&i2s>;
+	rockchip,audio-codec = <&rt5645>;
+};
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
new file mode 100644
index 0000000..028fa1c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -0,0 +1,155 @@
+STMicroelectronics sti ASoC cards
+
+The sti ASoC Sound Card can be used, for all sti SoCs using internal sti-sas
+codec or external codecs.
+
+sti sound drivers allows to expose sti SoC audio interface through the
+generic ASoC simple card. For details about sound card declaration please refer to
+Documentation/devicetree/bindings/sound/simple-card.txt.
+
+1) sti-uniperiph-dai: audio dai device.
+---------------------------------------
+
+Required properties:
+  - compatible: "st,sti-uni-player" or "st,sti-uni-reader"
+
+  - st,syscfg: phandle to boot-device system configuration registers
+
+  - clock-names: name of the clocks listed in clocks property in the same order
+
+  - reg: CPU DAI IP Base address and size entries, listed  in same
+	 order than the CPU_DAI properties.
+
+  - reg-names: names of the mapped memory regions listed in regs property in
+	       the same order.
+
+  - interrupts: CPU_DAI interrupt line, listed in the same order than the
+		CPU_DAI properties.
+
+  - dma: CPU_DAI DMA controller phandle and DMA request line, listed in the same
+	 order than the CPU_DAI properties.
+
+  - dma-names: identifier string for each DMA request line in the dmas property.
+	"tx" for "st,sti-uni-player" compatibility
+	"rx" for "st,sti-uni-reader" compatibility
+
+  - version: IP version integrated in SOC.
+
+  - dai-name: DAI name that describes the IP.
+
+Required properties ("st,sti-uni-player" compatibility only):
+  - clocks: CPU_DAI IP clock source, listed in the same order than the
+	    CPU_DAI properties.
+
+  - uniperiph-id: internal SOC IP instance ID.
+
+  - IP mode: IP working mode depending on associated codec.
+	"HDMI" connected to HDMI codec IP and IEC HDMI formats.
+	"SPDIF"connected to SPDIF codec and support SPDIF formats.
+	"PCM"  PCM standard mode for I2S or TDM bus.
+
+Optional properties:
+  - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
+	       external codecs connection.
+
+  - pinctrl-names: should contain only one value - "default".
+
+Example:
+
+	sti_uni_player2: sti-uni-player@2 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
+		reg = <0x8D82000 0x158>;
+		interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 4 0 1>;
+		dai-name = "Uni Player #1 (DAC)";
+		dma-names = "tx";
+		uniperiph-id = <2>;
+		version = <5>;
+		mode = "PCM";
+	};
+
+	sti_uni_player3: sti-uni-player@3 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
+		reg = <0x8D85000 0x158>;
+		interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 7 0 1>;
+		dma-names = "tx";
+		dai-name = "Uni Player #1 (PIO)";
+		uniperiph-id = <3>;
+		version = <5>;
+		mode = "SPDIF";
+	};
+
+	sti_uni_reader1: sti-uni-reader@1 {
+		compatible = "st,sti-uni-reader";
+		status = "disabled";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		reg = <0x8D84000 0x158>;
+		interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 6 0 1>;
+		dma-names = "rx";
+		dai-name = "Uni Reader #1 (HDMI RX)";
+		version = <3>;
+	};
+
+2) sti-sas-codec: internal audio codec IPs driver
+-------------------------------------------------
+
+Required properties:
+  - compatible: "st,sti<chip>-sas-codec" .
+	Should be chip "st,stih416-sas-codec" or "st,stih407-sas-codec"
+
+  - st,syscfg: phandle to boot-device system configuration registers.
+
+  - pinctrl-0: SPDIF PIO description.
+
+  - pinctrl-names: should contain only one value - "default".
+
+Example:
+	sti_sas_codec: sti-sas-codec {
+		compatible = "st,stih407-sas-codec";
+		#sound-dai-cells = <1>;
+		st,reg_audio = <&syscfg_core>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_spdif_out >;
+	};
+
+Example of audio card declaration:
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "sti audio card";
+		status = "okay";
+
+		simple-audio-card,dai-link@0 {
+			/* DAC */
+			format = "i2s";
+			dai-tdm-slot-width = <32>;
+			cpu {
+				sound-dai = <&sti_uni_player2>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 1>;
+			};
+		};
+		simple-audio-card,dai-link@1 {
+			/* SPDIF */
+			format = "left_j";
+			cpu {
+				sound-dai = <&sti_uni_player3>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 0>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
new file mode 100644
index 0000000..7afce80
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
@@ -0,0 +1,28 @@
+* Pistachio general-purpose timer based clocksource
+
+Required properties:
+ - compatible: "img,pistachio-gptimer".
+ - reg: Address range of the timer registers.
+ - interrupts: An interrupt for each of the four timers
+ - clocks: Should contain a clock specifier for each entry in clock-names
+ - clock-names: Should contain the following entries:
+                "sys", interface clock
+                "slow", slow counter clock
+                "fast", fast counter clock
+ - img,cr-periph: Must contain a phandle to the peripheral control
+		  syscon node.
+
+Example:
+	timer: timer@18102000 {
+		compatible = "img,pistachio-gptimer";
+		reg = <0x18102000 0x100>;
+		interrupts = <GIC_SHARED 60 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 61 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 62 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 63 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clk_periph PERIPH_CLK_COUNTER_FAST>,
+		         <&clk_periph PERIPH_CLK_COUNTER_SLOW>,
+			 <&cr_periph SYS_CLK_TIMER>;
+		clock-names = "fast", "slow", "sys";
+		img,cr-periph = <&cr_periph>;
+	};
diff --git a/Documentation/devicetree/bindings/timer/st,stih407-lpc b/Documentation/devicetree/bindings/timer/st,stih407-lpc
new file mode 100644
index 0000000..72acb48
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/st,stih407-lpc
@@ -0,0 +1,28 @@
+STMicroelectronics Low Power Controller (LPC) - Clocksource
+===========================================================
+
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
+
+[See: ../watchdog/st_lpc_wdt.txt for Watchdog options]
+[See: ../rtc/rtc-st-lpc.txt for RTC options]
+
+Required properties
+
+- compatible   : Must be: "st,stih407-lpc"
+- reg          : LPC registers base address + size
+- interrupts   : LPC interrupt line number and associated flags
+- clocks       : Clock used by LPC device (See: ../clock/clock-bindings.txt)
+- st,lpc-mode  : The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
+
+Example:
+       lpc@fde05000 {
+               compatible      = "st,stih407-lpc";
+               reg             = <0xfde05000 0x1000>;
+               clocks          = <&clk_s_d3_flexgen CLK_LPC_0>;
+               st,lpc-mode     = <ST_LPC_MODE_CLKSRC>;
+       };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 56a6d4e..ac5f0c3 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -55,6 +55,7 @@
 cosmic	Cosmic Circuits
 crystalfontz	Crystalfontz America, Inc.
 cubietech	Cubietech, Ltd.
+cypress	Cypress Semiconductor Corporation
 dallas	Maxim Integrated Products (formerly Dallas Semiconductor)
 davicom	DAVICOM Semiconductor, Inc.
 delta	Delta Electronics, Inc.
@@ -111,6 +112,7 @@
 innolux	Innolux Corporation
 intel	Intel Corporation
 intercontrol	Inter Control Group
+invensense	InvenSense Inc.
 isee	ISEE 2007 S.L.
 isil	Intersil
 jedec	JEDEC Solid State Technology Association
@@ -143,6 +145,7 @@
 murata	Murata Manufacturing Co., Ltd.
 mxicy	Macronix International Co., Ltd.
 national	National Semiconductor
+nec	NEC LCD Technologies, Ltd.
 neonode		Neonode Inc.
 netgear	NETGEAR
 netlogic	Broadcom Corporation (formerly NetLogic Microsystems)
@@ -150,10 +153,13 @@
 newhaven	Newhaven Display International
 nintendo	Nintendo
 nokia	Nokia
+nuvoton	Nuvoton Technology Corporation
 nvidia	NVIDIA
 nxp	NXP Semiconductors
+okaya	Okaya Electric America, Inc.
 onnn	ON Semiconductor Corp.
 opencores	OpenCores.org
+option	Option NV
 ortustech	Ortus Technology Co., Ltd.
 ovti	OmniVision Technologies
 panasonic	Panasonic Corporation
diff --git a/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt b/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt
new file mode 100644
index 0000000..424f844
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt
@@ -0,0 +1,40 @@
+Binding for Qualcomm PM8941 WLED driver
+
+Required properties:
+- compatible: should be "qcom,pm8941-wled"
+- reg: slave address
+
+Optional properties:
+- label: The name of the backlight device
+- qcom,cs-out: bool; enable current sink output
+- qcom,cabc: bool; enable content adaptive backlight control
+- qcom,ext-gen: bool; use externally generated modulator signal to dim
+- qcom,current-limit: mA; per-string current limit; value from 0 to 25
+	default: 20mA
+- qcom,current-boost-limit: mA; boost current limit; one of:
+	105, 385, 525, 805, 980, 1260, 1400, 1680
+	default: 805mA
+- qcom,switching-freq: kHz; switching frequency; one of:
+	600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
+	1600, 1920, 2400, 3200, 4800, 9600,
+	default: 1600kHz
+- qcom,ovp: V; Over-voltage protection limit; one of:
+	27, 29, 32, 35
+	default: 29V
+- qcom,num-strings: #; number of led strings attached; value from 1 to 3
+	default: 2
+
+Example:
+
+pm8941-wled@d800 {
+	compatible = "qcom,pm8941-wled";
+	reg = <0xd800>;
+	label = "backlight";
+
+	qcom,cs-out;
+	qcom,current-limit = <20>;
+	qcom,current-boost-limit = <805>;
+	qcom,switching-freq = <1600>;
+	qcom,ovp = <29>;
+	qcom,num-strings = <2>;
+};
diff --git a/Documentation/devicetree/bindings/video/fsl,dcu.txt b/Documentation/devicetree/bindings/video/fsl,dcu.txt
new file mode 100644
index 0000000..ebf1be9
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/fsl,dcu.txt
@@ -0,0 +1,22 @@
+Device Tree bindings for Freescale DCU DRM Driver
+
+Required properties:
+- compatible:		Should be one of
+	* "fsl,ls1021a-dcu".
+	* "fsl,vf610-dcu".
+
+- reg:			Address and length of the register set for dcu.
+- clocks:		From common clock binding: handle to dcu clock.
+- clock-names:		From common clock binding: Shall be "dcu".
+- big-endian		Boolean property, LS1021A DCU registers are big-endian.
+- fsl,panel:		The phandle to panel node.
+
+Examples:
+dcu: dcu@2ce0000 {
+	compatible = "fsl,ls1021a-dcu";
+	reg = <0x0 0x2ce0000 0x0 0x10000>;
+	clocks = <&platform_clk 0>;
+	clock-names = "dcu";
+	big-endian;
+	fsl,panel = <&panel>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
index 388c88a..039c5ca 100644
--- a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
@@ -1,9 +1,11 @@
 STMicroelectronics Low Power Controller (LPC) - Watchdog
 ========================================================
 
-LPC currently supports Watchdog OR Real Time Clock functionality.
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
 
 [See: ../rtc/rtc-st-lpc.txt for RTC options]
+[See: ../timer/st,stih407-lpc for Clocksource options]
 
 Required properties
 
@@ -12,9 +14,11 @@
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
-- st,lpc-mode	: The LPC can run either one of two modes ST_LPC_MODE_RTC [0] or
-		  ST_LPC_MODE_WDT [1].  One (and only one) mode must be
-		  selected.
+- st,lpc-mode	: The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
 
 Required properties [watchdog mode]
 
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index ca67b0f..67d4ce4 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -345,12 +345,29 @@
       that abstracts it away.
 
   * DMA_CTRL_ACK
-    - If set, the transfer can be reused after being completed.
-    - There is a guarantee the transfer won't be freed until it is acked
-      by async_tx_ack().
+    - If clear, the descriptor cannot be reused by provider until the
+      client acknowledges receipt, i.e. has has a chance to establish any
+      dependency chains
+    - This can be acked by invoking async_tx_ack()
+    - If set, does not mean descriptor can be reused
+
+  * DMA_CTRL_REUSE
+    - If set, the descriptor can be reused after being completed. It should
+      not be freed by provider if this flag is set.
+    - The descriptor should be prepared for reuse by invoking
+      dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
+    - dmaengine_desc_set_reuse() will succeed only when channel support
+      reusable descriptor as exhibited by capablities
     - As a consequence, if a device driver wants to skip the dma_map_sg() and
       dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
       it can resubmit the transfer right after its completion.
+    - Descriptor can be freed in few ways
+	- Clearing DMA_CTRL_REUSE by invoking dmaengine_desc_clear_reuse()
+	  and submitting for last txn
+	- Explicitly invoking dmaengine_desc_free(), this can succeed only
+	  when DMA_CTRL_REUSE is already set
+	- Terminating the channel
+
 
 General Design Notes
 --------------------
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 4cf1a2a..415484f 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -15,6 +15,10 @@
 
   injects page allocation failures. (alloc_pages(), get_free_pages(), ...)
 
+o fail_futex
+
+  injects futex deadlock and uaddr fault errors.
+
 o fail_make_request
 
   injects disk IO errors on devices permitted by setting
@@ -113,6 +117,12 @@
 	specifies the minimum page allocation order to be injected
 	failures.
 
+- /sys/kernel/debug/fail_futex/ignore-private:
+
+	Format: { 'Y' | 'N' }
+	default is 'N', setting it to 'Y' will disable failure injections
+	when dealing with private (address space) futexes.
+
 o Boot option
 
 In order to inject faults while debugfs is not available (early boot time),
@@ -121,6 +131,7 @@
 	failslab=
 	fail_page_alloc=
 	fail_make_request=
+	fail_futex=
 	mmc_core.fail_request=<interval>,<probability>,<space>,<times>
 
 How to add new fault injection capability
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 4efe36c..d605c3f 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -22,7 +22,7 @@
     |        m68k: | TODO |
     |       metag: | TODO |
     |  microblaze: | TODO |
-    |        mips: | TODO |
+    |        mips: |  ok  |
     |     mn10300: | TODO |
     |       nios2: | TODO |
     |    openrisc: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index bea8009..76d39d6 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -32,7 +32,7 @@
     |       score: | TODO |
     |          sh: | TODO |
     |       sparc: | TODO |
-    |        tile: | TODO |
+    |        tile: |  ok  |
     |          um: | TODO |
     |   unicore32: | TODO |
     |         x86: |  ok  |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
new file mode 100644
index 0000000..261b92e
--- /dev/null
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -0,0 +1,40 @@
+#
+# Feature name:          batch-unmap-tlb-flush
+#         Kconfig:       ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+#         description:   arch supports deferral of TLB flush until multiple pages are unmapped
+#
+    -----------------------
+    |         arch |status|
+    -----------------------
+    |       alpha: | TODO |
+    |         arc: | TODO |
+    |         arm: | TODO |
+    |       arm64: | TODO |
+    |       avr32: |  ..  |
+    |    blackfin: | TODO |
+    |         c6x: |  ..  |
+    |        cris: |  ..  |
+    |         frv: |  ..  |
+    |       h8300: |  ..  |
+    |     hexagon: | TODO |
+    |        ia64: | TODO |
+    |        m32r: | TODO |
+    |        m68k: |  ..  |
+    |       metag: | TODO |
+    |  microblaze: |  ..  |
+    |        mips: | TODO |
+    |     mn10300: | TODO |
+    |       nios2: |  ..  |
+    |    openrisc: |  ..  |
+    |      parisc: | TODO |
+    |     powerpc: | TODO |
+    |        s390: | TODO |
+    |       score: |  ..  |
+    |          sh: | TODO |
+    |       sparc: | TODO |
+    |        tile: | TODO |
+    |          um: |  ..  |
+    |   unicore32: |  ..  |
+    |         x86: |  ok  |
+    |      xtensa: | TODO |
+    -----------------------
diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt
index b971456..5575539 100644
--- a/Documentation/filesystems/ext2.txt
+++ b/Documentation/filesystems/ext2.txt
@@ -360,8 +360,8 @@
 the time of the crash, then there is no guarantee of consistency for
 the blocks in that transaction so they are discarded (which means any
 filesystem changes they represent are also lost).
-Check Documentation/filesystems/ext3.txt if you want to read more about
-ext3 and journaling.
+Check Documentation/filesystems/ext4.txt if you want to read more about
+ext4 and journaling.
 
 References
 ==========
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 7ed0d17..58758fb 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -6,210 +6,7 @@
 for the 2.2 branch, and ported to 2.4 kernels by Peter Braam, Andreas Dilger,
 Andrew Morton, Alexander Viro, Ted Ts'o and Stephen Tweedie.
 
-Ext3 is the ext2 filesystem enhanced with journalling capabilities.
+Ext3 is the ext2 filesystem enhanced with journalling capabilities. The
+filesystem is a subset of ext4 filesystem so use ext4 driver for accessing
+ext3 filesystems.
 
-Options
-=======
-
-When mounting an ext3 filesystem, the following option are accepted:
-(*) == default
-
-ro			Mount filesystem read only. Note that ext3 will replay
-			the journal (and thus write to the partition) even when
-			mounted "read only". Mount options "ro,noload" can be
-			used to prevent writes to the filesystem.
-
-journal=update		Update the ext3 file system's journal to the current
-			format.
-
-journal=inum		When a journal already exists, this option is ignored.
-			Otherwise, it specifies the number of the inode which
-			will represent the ext3 file system's journal file.
-
-journal_path=path
-journal_dev=devnum	When the external journal device's major/minor numbers
-			have changed, these options allow the user to specify
-			the new journal location.  The journal device is
-			identified through either its new major/minor numbers
-			encoded in devnum, or via a path to the device.
-
-norecovery		Don't load the journal on mounting. Note that this forces
-noload			mount of inconsistent filesystem, which can lead to
-			various problems.
-
-data=journal		All data are committed into the journal prior to being
-			written into the main file system.
-
-data=ordered	(*)	All data are forced directly out to the main file
-			system prior to its metadata being committed to the
-			journal.
-
-data=writeback		Data ordering is not preserved, data may be written
-			into the main file system after its metadata has been
-			committed to the journal.
-
-commit=nrsec	(*)	Ext3 can be told to sync all its data and metadata
-			every 'nrsec' seconds. The default value is 5 seconds.
-			This means that if you lose your power, you will lose
-			as much as the latest 5 seconds of work (your
-			filesystem will not be damaged though, thanks to the
-			journaling).  This default value (or any low value)
-			will hurt performance, but it's good for data-safety.
-			Setting it to 0 will have the same effect as leaving
-			it at the default (5 seconds).
-			Setting it to very large values will improve
-			performance.
-
-barrier=<0|1(*)>	This enables/disables the use of write barriers in
-barrier	(*)		the jbd code.  barrier=0 disables, barrier=1 enables.
-nobarrier		This also requires an IO stack which can support
-			barriers, and if jbd gets an error on a barrier
-			write, it will disable again with a warning.
-			Write barriers enforce proper on-disk ordering
-			of journal commits, making volatile disk write caches
-			safe to use, at some performance penalty.  If
-			your disks are battery-backed in one way or another,
-			disabling barriers may safely improve performance.
-			The mount options "barrier" and "nobarrier" can
-			also be used to enable or disable barriers, for
-			consistency with other ext3 mount options.
-
-user_xattr		Enables Extended User Attributes.  Additionally, you
-			need to have extended attribute support enabled in the
-			kernel configuration (CONFIG_EXT3_FS_XATTR).  See the
-			attr(5) manual page and http://acl.bestbits.at/ to
-			learn more about extended attributes.
-
-nouser_xattr		Disables Extended User Attributes.
-
-acl			Enables POSIX Access Control Lists support.
-			Additionally, you need to have ACL support enabled in
-			the kernel configuration (CONFIG_EXT3_FS_POSIX_ACL).
-			See the acl(5) manual page and http://acl.bestbits.at/
-			for more information.
-
-noacl			This option disables POSIX Access Control List
-			support.
-
-reservation
-
-noreservation
-
-bsddf 		(*)	Make 'df' act like BSD.
-minixdf			Make 'df' act like Minix.
-
-check=none		Don't do extra checking of bitmaps on mount.
-nocheck
-
-debug			Extra debugging information is sent to syslog.
-
-errors=remount-ro	Remount the filesystem read-only on an error.
-errors=continue		Keep going on a filesystem error.
-errors=panic		Panic and halt the machine if an error occurs.
-			(These mount options override the errors behavior
-			specified in the superblock, which can be
-			configured using tune2fs.)
-
-data_err=ignore(*)	Just print an error message if an error occurs
-			in a file data buffer in ordered mode.
-data_err=abort		Abort the journal if an error occurs in a file
-			data buffer in ordered mode.
-
-grpid			Give objects the same group ID as their creator.
-bsdgroups
-
-nogrpid		(*)	New objects have the group ID of their creator.
-sysvgroups
-
-resgid=n		The group ID which may use the reserved blocks.
-
-resuid=n		The user ID which may use the reserved blocks.
-
-sb=n			Use alternate superblock at this location.
-
-quota			These options are ignored by the filesystem. They
-noquota			are used only by quota tools to recognize volumes
-grpquota		where quota should be turned on. See documentation
-usrquota		in the quota-tools package for more details
-			(http://sourceforge.net/projects/linuxquota).
-
-jqfmt=<quota type>	These options tell filesystem details about quota
-usrjquota=<file>	so that quota information can be properly updated
-grpjquota=<file>	during journal replay. They replace the above
-			quota options. See documentation in the quota-tools
-			package for more details
-			(http://sourceforge.net/projects/linuxquota).
-
-Specification
-=============
-Ext3 shares all disk implementation with the ext2 filesystem, and adds
-transactions capabilities to ext2.  Journaling is done by the Journaling Block
-Device layer.
-
-Journaling Block Device layer
------------------------------
-The Journaling Block Device layer (JBD) isn't ext3 specific.  It was designed
-to add journaling capabilities to a block device.  The ext3 filesystem code
-will inform the JBD of modifications it is performing (called a transaction).
-The journal supports the transactions start and stop, and in case of a crash,
-the journal can replay the transactions to quickly put the partition back into
-a consistent state.
-
-Handles represent a single atomic update to a filesystem.  JBD can handle an
-external journal on a block device.
-
-Data Mode
----------
-There are 3 different data modes:
-
-* writeback mode
-In data=writeback mode, ext3 does not journal data at all.  This mode provides
-a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
-mode - metadata journaling.  A crash+recovery can cause incorrect data to
-appear in files which were written shortly before the crash.  This mode will
-typically provide the best ext3 performance.
-
-* ordered mode
-In data=ordered mode, ext3 only officially journals metadata, but it logically
-groups metadata and data blocks into a single unit called a transaction.  When
-it's time to write the new metadata out to disk, the associated data blocks
-are written first.  In general, this mode performs slightly slower than
-writeback but significantly faster than journal mode.
-
-* journal mode
-data=journal mode provides full data and metadata journaling.  All new data is
-written to the journal first, and then to its final location.
-In the event of a crash, the journal can be replayed, bringing both data and
-metadata into a consistent state.  This mode is the slowest except when data
-needs to be read from and written to disk at the same time where it
-outperforms all other modes.
-
-Compatibility
--------------
-
-Ext2 partitions can be easily convert to ext3, with `tune2fs -j <dev>`.
-Ext3 is fully compatible with Ext2.  Ext3 partitions can easily be mounted as
-Ext2.
-
-
-External Tools
-==============
-See manual pages to learn more.
-
-tune2fs: 	create a ext3 journal on a ext2 partition with the -j flag.
-mke2fs: 	create a ext3 partition with the -j flag.
-debugfs: 	ext2 and ext3 file system debugger.
-ext2online:	online (mounted) ext2 and ext3 filesystem resizer
-
-
-References
-==========
-
-kernel source:	<file:fs/ext3/>
-		<file:fs/jbd/>
-
-programs: 	http://e2fsprogs.sourceforge.net/
-		http://ext2resize.sourceforge.net
-
-useful links:	http://www.ibm.com/developerworks/library/l-fs7/index.html
-        http://www.ibm.com/developerworks/library/l-fs8/index.html
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e9e750e..e2d5105 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -143,7 +143,9 @@
 extent_cache           Enable an extent cache based on rb-tree, it can cache
                        as many as extent which map between contiguous logical
                        address and physical address per inode, resulting in
-                       increasing the cache hit ratio.
+                       increasing the cache hit ratio. Set by default.
+noextent_cache         Diable an extent cache based on rb-tree explicitly, see
+                       the above extent_cache mount option.
 noinline_data          Disable the inline data feature, inline data feature is
                        enabled by default.
 
diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt
index 95c13aa..906b6c2 100644
--- a/Documentation/filesystems/nfs/nfs-rdma.txt
+++ b/Documentation/filesystems/nfs/nfs-rdma.txt
@@ -138,9 +138,9 @@
   - Build, install, reboot
 
     The NFS/RDMA code will be enabled automatically if NFS and RDMA
-    are turned on. The NFS/RDMA client and server are configured via the
-    SUNRPC_XPRT_RDMA_CLIENT and SUNRPC_XPRT_RDMA_SERVER config options that both
-    depend on SUNRPC and INFINIBAND. The default value of both options will be:
+    are turned on. The NFS/RDMA client and server are configured via the hidden
+    SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The
+    value of SUNRPC_XPRT_RDMA will be:
 
      - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client
        and server will not be built
@@ -238,9 +238,8 @@
 
   - Start the NFS server
 
-    If the NFS/RDMA server was built as a module
-    (CONFIG_SUNRPC_XPRT_RDMA_SERVER=m in kernel config), load the RDMA
-    transport module:
+    If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
+    kernel config), load the RDMA transport module:
 
     $ modprobe svcrdma
 
@@ -259,9 +258,8 @@
 
   - On the client system
 
-    If the NFS/RDMA client was built as a module
-    (CONFIG_SUNRPC_XPRT_RDMA_CLIENT=m in kernel config), load the RDMA client
-    module:
+    If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in
+    kernel config), load the RDMA client module:
 
     $ modprobe xprtrdma.ko
 
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 5eb8456..8c6f07a 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -769,7 +769,7 @@
 	to stall to allow flushers a chance to complete some IO. Ordinarily
 	it can use PageDirty and PageWriteback but some filesystems have
 	more complex state (unstable pages in NFS prevent reclaim) or
-	do not set those flags due to locking problems (jbd). This callback
+	do not set those flags due to locking problems. This callback
 	allows a filesystem to indicate to the VM if a page should be
 	treated as dirty or writeback for the purposes of stalling.
 
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
index 1de43ae..179beb2 100644
--- a/Documentation/gpio/00-INDEX
+++ b/Documentation/gpio/00-INDEX
@@ -6,6 +6,9 @@
 	- How to obtain and use GPIOs in a driver
 driver.txt
 	- How to write a GPIO driver
+drivers-on-gpio.txt:
+	- Drivers in other subsystems that can use GPIO to provide more
+	  complex functionality.
 board.txt
 	- How to assign GPIOs to a consumer device and a function
 sysfs.txt
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 75542b9..a206639 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -237,6 +237,39 @@
 should not have to care about the physical line level.
 
 
+The active-low property
+-----------------------
+
+As a driver should not have to care about the physical line level, all of the
+gpiod_set_value_xxx() or gpiod_set_array_value_xxx() functions operate with
+the *logical* value. With this they take the active-low property into account.
+This means that they check whether the GPIO is configured to be active-low,
+and if so, they manipulate the passed value before the physical line level is
+driven.
+
+With this, all the gpiod_set_(array)_value_xxx() functions interpret the
+parameter "value" as "active" ("1") or "inactive" ("0"). The physical line
+level will be driven accordingly.
+
+As an example, if the active-low property for a dedicated GPIO is set, and the
+gpiod_set_(array)_value_xxx() passes "active" ("1"), the physical line level
+will be driven low.
+
+To summarize:
+
+Function (example)               active-low proporty  physical line
+gpiod_set_raw_value(desc, 0);        don't care           low
+gpiod_set_raw_value(desc, 1);        don't care           high
+gpiod_set_value(desc, 0);       default (active-high)     low
+gpiod_set_value(desc, 1);       default (active-high)     high
+gpiod_set_value(desc, 0);             active-low          high
+gpiod_set_value(desc, 1);             active-low          low
+
+Please note again that the set_raw/get_raw functions should be avoided as much
+as possible, especially by drivers which should not care about the actual
+physical line level and worry about the logical value instead.
+
+
 Set multiple GPIO outputs with a single function call
 -----------------------------------------------------
 The following functions set the output values of an array of GPIOs:
diff --git a/Documentation/gpio/drivers-on-gpio.txt b/Documentation/gpio/drivers-on-gpio.txt
new file mode 100644
index 0000000..f612132
--- /dev/null
+++ b/Documentation/gpio/drivers-on-gpio.txt
@@ -0,0 +1,95 @@
+Subsystem drivers using GPIO
+============================
+
+Note that standard kernel drivers exist for common GPIO tasks and will provide
+the right in-kernel and userspace APIs/ABIs for the job, and that these
+drivers can quite easily interconnect with other kernel subsystems using
+hardware descriptions such as device tree or ACPI:
+
+- leds-gpio: drivers/leds/leds-gpio.c will handle LEDs connected to  GPIO
+  lines, giving you the LED sysfs interface
+
+- ledtrig-gpio: drivers/leds/trigger/ledtrig-gpio.c will provide a LED trigger,
+  i.e. a LED will turn on/off in response to a GPIO line going high or low
+  (and that LED may in turn use the leds-gpio as per above).
+
+- gpio-keys: drivers/input/keyboard/gpio_keys.c is used when your GPIO line
+  can generate interrupts in response to a key press. Also supports debounce.
+
+- gpio-keys-polled: drivers/input/keyboard/gpio_keys_polled.c is used when your
+  GPIO line cannot generate interrupts, so it needs to be periodically polled
+  by a timer.
+
+- gpio_mouse: drivers/input/mouse/gpio_mouse.c is used to provide a mouse with
+  up to three buttons by simply using GPIOs and no mouse port. You can cut the
+  mouse cable and connect the wires to GPIO lines or solder a mouse connector
+  to the lines for a more permanent solution of this type.
+
+- gpio-beeper: drivers/input/misc/gpio-beeper.c is used to provide a beep from
+  an external speaker connected to a GPIO line.
+
+- gpio-tilt-polled: drivers/input/misc/gpio_tilt_polled.c provides tilt
+  detection switches using GPIO, which is useful for your homebrewn pinball
+  machine if for nothing else. It can detect different tilt angles of the
+  monitored object.
+
+- extcon-gpio: drivers/extcon/extcon-gpio.c is used when you need to read an
+  external connector status, such as a headset line for an audio driver or an
+  HDMI connector. It will provide a better userspace sysfs interface than GPIO.
+
+- restart-gpio: drivers/power/gpio-restart.c is used to restart/reboot the
+  system by pulling a GPIO line and will register a restart handler so
+  userspace can issue the right system call to restart the system.
+
+- poweroff-gpio: drivers/power/gpio-poweroff.c is used to power the system down
+  by pulling a GPIO line and will register a pm_power_off() callback so that
+  userspace can issue the right system call to power down the system.
+
+- gpio-gate-clock: drivers/clk/clk-gpio-gate.c is used to control a gated clock
+  (off/on) that uses a GPIO, and integrated with the clock subsystem.
+
+- i2c-gpio: drivers/i2c/busses/i2c-gpio.c is used to drive an I2C bus
+  (two wires, SDA and SCL lines) by hammering (bitbang) two GPIO lines. It will
+  appear as any other I2C bus to the system and makes it possible to connect
+  drivers for the I2C devices on the bus like any other I2C bus driver.
+
+- spi_gpio: drivers/spi/spi-gpio.c is used to drive an SPI bus (variable number
+  of wires, atleast SCK and optionally MISO, MOSI and chip select lines) using
+  GPIO hammering (bitbang). It will appear as any other SPI bus on the system
+  and makes it possible to connect drivers for SPI devices on the bus like
+  any other SPI bus driver. For example any MMC/SD card can then be connected
+  to this SPI by using the mmc_spi host from the MMC/SD card subsystem.
+
+- w1-gpio: drivers/w1/masters/w1-gpio.c is used to drive a one-wire bus using
+  a GPIO line, integrating with the W1 subsystem and handling devices on
+  the bus like any other W1 device.
+
+- gpio-fan: drivers/hwmon/gpio-fan.c is used to control a fan for cooling the
+  system, connected to a GPIO line (and optionally a GPIO alarm line),
+  presenting all the right in-kernel and sysfs interfaces to make your system
+  not overheat.
+
+- gpio-regulator: drivers/regulator/gpio-regulator.c is used to control a
+  regulator providing a certain voltage by pulling a GPIO line, integrating
+  with the regulator subsystem and giving you all the right interfaces.
+
+- gpio-wdt: drivers/watchdog/gpio_wdt.c is used to provide a watchdog timer
+  that will periodically "ping" a hardware connected to a GPIO line by toggling
+  it from 1-to-0-to-1. If that hardware does not recieve its "ping"
+  periodically, it will reset the system.
+
+- gpio-nand: drivers/mtd/nand/gpio.c is used to connect a NAND flash chip to
+  a set of simple GPIO lines: RDY, NCE, ALE, CLE, NWP. It interacts with the
+  NAND flash MTD subsystem and provides chip access and partition parsing like
+  any other NAND driving hardware.
+
+Apart from this there are special GPIO drivers in subsystems like MMC/SD to
+read card detect and write protect GPIO lines, and in the TTY serial subsystem
+to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The
+MTD NOR flash has add-ons for extra GPIO lines too, though the address bus is
+usually connected directly to the flash.
+
+Use those instead of talking directly to the GPIOs using sysfs; they integrate
+with kernel frameworks better than your userspace code could. Needless to say,
+just using the apropriate kernel drivers will simplify and speed up your
+embedded hacking in particular by providing ready-made components.
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
index 535b6a8..0700b55 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.txt
@@ -20,11 +20,10 @@
 standard kernels won't know about. And for some tasks, simple userspace
 GPIO drivers could be all that the system really needs.
 
-Note that standard kernel drivers exist for common "LEDs and Buttons"
-GPIO tasks:  "leds-gpio" and "gpio_keys", respectively. Use those
-instead of talking directly to the GPIOs; they integrate with kernel
-frameworks better than your userspace code could.
-
+DO NOT ABUSE SYFS TO CONTROL HARDWARE THAT HAS PROPER KERNEL DRIVERS.
+PLEASE READ THE DOCUMENT NAMED "drivers-on-gpio.txt" IN THIS DOCUMENTATION
+DIRECTORY TO AVOID REINVENTING KERNEL WHEELS IN USERSPACE. I MEAN IT.
+REALLY.
 
 Paths in Sysfs
 --------------
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 64df08d..39ac654 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -303,6 +303,7 @@
 0xA3	80-8F	Port ACL		in development:
 					<mailto:tlewis@mindspring.com>
 0xA3	90-9F	linux/dtlk.h
+0xAA	00-3F	linux/uapi/linux/userfaultfd.h
 0xAB	00-1F	linux/nbd.h
 0xAC	00-1F	linux/raw.h
 0xAD	00	Netfilter device	in development:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b09dc2c..f252928 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1317,6 +1317,10 @@
 			     <bus_id>,<clkrate>
 
 	i8042.debug	[HW] Toggle i8042 debug mode
+	i8042.unmask_kbd_data
+			[HW] Enable printing of interrupt data from the KBD port
+			     (disabled by default, and as a pre-condition
+			     requires that i8042.debug=1 be enabled)
 	i8042.direct	[HW] Put keyboard port into non-translated mode
 	i8042.dumbkbd	[HW] Pretend that controller can only read data from
 			     keyboard and cannot control its state
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index eafa6a5..2ba8461 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -2327,9 +2327,7 @@
 explicit lock operations, described later).  These include:
 
 	xchg();
-	cmpxchg();
 	atomic_xchg();			atomic_long_xchg();
-	atomic_cmpxchg();		atomic_long_cmpxchg();
 	atomic_inc_return();		atomic_long_inc_return();
 	atomic_dec_return();		atomic_long_dec_return();
 	atomic_add_return();		atomic_long_add_return();
@@ -2342,7 +2340,9 @@
 	test_and_clear_bit();
 	test_and_change_bit();
 
-	/* when succeeds (returns 1) */
+	/* when succeeds */
+	cmpxchg();
+	atomic_cmpxchg();		atomic_long_cmpxchg();
 	atomic_add_unless();		atomic_long_add_unless();
 
 These are used for such things as implementing ACQUIRE-class and RELEASE-class
diff --git a/Documentation/networking/6lowpan.txt b/Documentation/networking/6lowpan.txt
new file mode 100644
index 0000000..a7dc7e9
--- /dev/null
+++ b/Documentation/networking/6lowpan.txt
@@ -0,0 +1,50 @@
+
+Netdev private dataroom for 6lowpan interfaces:
+
+All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
+must have "struct lowpan_priv" placed at beginning of netdev_priv.
+
+The priv_size of each interface should be calculate by:
+
+ dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
+
+Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
+To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
+
+ lowpan_priv(dev)-priv;
+
+to your LL_6LOWPAN_PRIV_DATA structure.
+
+Before registering the lowpan netdev interface you must run:
+
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
+
+wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
+enum lowpan_lltypes.
+
+Example to evaluate the private usually you can do:
+
+static inline sturct lowpan_priv_foobar *
+lowpan_foobar_priv(struct net_device *dev)
+{
+	return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
+}
+
+switch (dev->type) {
+case ARPHRD_6LOWPAN:
+	lowpan_priv = lowpan_priv(dev);
+	/* do great stuff which is ARPHRD_6LOWPAN related */
+	switch (lowpan_priv->lltype) {
+	case LOWPAN_LLTYPE_FOOBAR:
+		/* do 802.15.4 6LoWPAN handling here */
+		lowpan_foobar_priv(dev)->bar = foo;
+		break;
+	...
+	}
+	break;
+...
+}
+
+In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
+on ARPHRD_6LOWPAN, because you can be sure that these function are called
+by ARPHRD_6LOWPAN interfaces.
diff --git a/Documentation/networking/dsa/bcm_sf2.txt b/Documentation/networking/dsa/bcm_sf2.txt
new file mode 100644
index 0000000..d999d0c
--- /dev/null
+++ b/Documentation/networking/dsa/bcm_sf2.txt
@@ -0,0 +1,114 @@
+Broadcom Starfighter 2 Ethernet switch driver
+=============================================
+
+Broadcom's Starfighter 2 Ethernet switch hardware block is commonly found and
+deployed in the following products:
+
+- xDSL gateways such as BCM63138
+- streaming/multimedia Set Top Box such as BCM7445
+- Cable Modem/residential gateways such as BCM7145/BCM3390
+
+The switch is typically deployed in a configuration involving between 5 to 13
+ports, offering a range of built-in and customizable interfaces:
+
+- single integrated Gigabit PHY
+- quad integrated Gigabit PHY
+- quad external Gigabit PHY w/ MDIO multiplexer
+- integrated MoCA PHY
+- several external MII/RevMII/GMII/RGMII interfaces
+
+The switch also supports specific congestion control features which allow MoCA
+fail-over not to lose packets during a MoCA role re-election, as well as out of
+band back-pressure to the host CPU network interface when downstream interfaces
+are connected at a lower speed.
+
+The switch hardware block is typically interfaced using MMIO accesses and
+contains a bunch of sub-blocks/registers:
+
+* SWITCH_CORE: common switch registers
+* SWITCH_REG: external interfaces switch register
+* SWITCH_MDIO: external MDIO bus controller (there is another one in SWITCH_CORE,
+  which is used for indirect PHY accesses)
+* SWITCH_INDIR_RW: 64-bits wide register helper block
+* SWITCH_INTRL2_0/1: Level-2 interrupt controllers
+* SWITCH_ACB: Admission control block
+* SWITCH_FCB: Fail-over control block
+
+Implementation details
+======================
+
+The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA
+driver; see Documentation/networking/dsa/dsa.txt for details on the subsytem
+and what it provides.
+
+The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag
+which gets inserted by the switch for every packet forwarded to the CPU
+interface, conversely, the CPU network interface should insert a similar tag for
+packets entering the CPU port. The tag format is described in
+net/dsa/tag_brcm.c.
+
+Overall, the SF2 driver is a fairly regular DSA driver; there are a few
+specifics covered below.
+
+Device Tree probing
+-------------------
+
+The DSA platform device driver is probed using a specific compatible string
+provided in net/dsa/dsa.c. The reason for that is because the DSA subsystem gets
+registered as a platform device driver currently. DSA will provide the needed
+device_node pointers which are then accessible by the switch driver setup
+function to setup resources such as register ranges and interrupts. This
+currently works very well because none of the of_* functions utilized by the
+driver require a struct device to be bound to a struct device_node, but things
+may change in the future.
+
+MDIO indirect accesses
+----------------------
+
+Due to a limitation in how Broadcom switches have been designed, external
+Broadcom switches connected to a SF2 require the use of the DSA slave MDIO bus
+in order to properly configure them. By default, the SF2 pseudo-PHY address, and
+an external switch pseudo-PHY address will both be snooping for incoming MDIO
+transactions, since they are at the same address (30), resulting in some kind of
+"double" programming. Using DSA, and setting ds->phys_mii_mask accordingly, we
+selectively divert reads and writes towards external Broadcom switches
+pseudo-PHY addresses. Newer revisions of the SF2 hardware have introduced a
+configurable pseudo-PHY address which circumvents the initial design limitation.
+
+Multimedia over CoAxial (MoCA) interfaces
+-----------------------------------------
+
+MoCA interfaces are fairly specific and require the use of a firmware blob which
+gets loaded onto the MoCA processor(s) for packet processing. The switch
+hardware contains logic which will assert/de-assert link states accordingly for
+the MoCA interface whenever the MoCA coaxial cable gets disconnected or the
+firmware gets reloaded. The SF2 driver relies on such events to properly set its
+MoCA interface carrier state and properly report this to the networking stack.
+
+The MoCA interfaces are supported using the PHY library's fixed PHY/emulated PHY
+device and the switch driver registers a fixed_link_update callback for such
+PHYs which reflects the link state obtained from the interrupt handler.
+
+
+Power Management
+----------------
+
+Whenever possible, the SF2 driver tries to minimize the overall switch power
+consumption by applying a combination of:
+
+- turning off internal buffers/memories
+- disabling packet processing logic
+- putting integrated PHYs in IDDQ/low-power
+- reducing the switch core clock based on the active port count
+- enabling and advertising EEE
+- turning off RGMII data processing logic when the link goes down
+
+Wake-on-LAN
+-----------
+
+Wake-on-LAN is currently implemented by utilizing the host processor Ethernet
+MAC controller wake-on logic. Whenever Wake-on-LAN is requested, an intersection
+between the user request and the supported host Ethernet interface WoL
+capabilities is done and the intersection result gets configured. During
+system-wide suspend/resume, only ports not participating in Wake-on-LAN are
+disabled.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
new file mode 100644
index 0000000..aa9c1f9
--- /dev/null
+++ b/Documentation/networking/dsa/dsa.txt
@@ -0,0 +1,615 @@
+Distributed Switch Architecture
+===============================
+
+Introduction
+============
+
+This document describes the Distributed Switch Architecture (DSA) subsystem
+design principles, limitations, interactions with other subsystems, and how to
+develop drivers for this subsystem as well as a TODO for developers interested
+in joining the effort.
+
+Design principles
+=================
+
+The Distributed Switch Architecture is a subsystem which was primarily designed
+to support Marvell Ethernet switches (MV88E6xxx, a.k.a Linkstreet product line)
+using Linux, but has since evolved to support other vendors as well.
+
+The original philosophy behind this design was to be able to use unmodified
+Linux tools such as bridge, iproute2, ifconfig to work transparently whether
+they configured/queried a switch port network device or a regular network
+device.
+
+An Ethernet switch is typically comprised of multiple front-panel ports, and one
+or more CPU or management port. The DSA subsystem currently relies on the
+presence of a management port connected to an Ethernet controller capable of
+receiving Ethernet frames from the switch. This is a very common setup for all
+kinds of Ethernet switches found in Small Home and Office products: routers,
+gateways, or even top-of-the rack switches. This host Ethernet controller will
+be later referred to as "master" and "cpu" in DSA terminology and code.
+
+The D in DSA stands for Distributed, because the subsystem has been designed
+with the ability to configure and manage cascaded switches on top of each other
+using upstream and downstream Ethernet links between switches. These specific
+ports are referred to as "dsa" ports in DSA terminology and code. A collection
+of multiple switches connected to each other is called a "switch tree".
+
+For each front-panel port, DSA will create specialized network devices which are
+used as controlling and data-flowing endpoints for use by the Linux networking
+stack. These specialized network interfaces are referred to as "slave" network
+interfaces in DSA terminology and code.
+
+The ideal case for using DSA is when an Ethernet switch supports a "switch tag"
+which is a hardware feature making the switch insert a specific tag for each
+Ethernet frames it received to/from specific ports to help the management
+interface figure out:
+
+- what port is this frame coming from
+- what was the reason why this frame got forwarded
+- how to send CPU originated traffic to specific ports
+
+The subsystem does support switches not capable of inserting/stripping tags, but
+the features might be slightly limited in that case (traffic separation relies
+on Port-based VLAN IDs).
+
+Note that DSA does not currently create network interfaces for the "cpu" and
+"dsa" ports because:
+
+- the "cpu" port is the Ethernet switch facing side of the management
+  controller, and as such, would create a duplication of feature, since you
+  would get two interfaces for the same conduit: master netdev, and "cpu" netdev
+
+- the "dsa" port(s) are just conduits between two or more switches, and as such
+  cannot really be used as proper network interfaces either, only the
+  downstream, or the top-most upstream interface makes sense with that model
+
+Switch tagging protocols
+------------------------
+
+DSA currently supports 4 different tagging protocols, and a tag-less mode as
+well. The different protocols are implemented in:
+
+net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
+net/dsa/tag_dsa.c: Marvell's original DSA tag
+net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
+net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+
+The exact format of the tag protocol is vendor specific, but in general, they
+all contain something which:
+
+- identifies which port the Ethernet frame came from/should be sent to
+- provides a reason why this frame was forwarded to the management interface
+
+Master network devices
+----------------------
+
+Master network devices are regular, unmodified Linux network device drivers for
+the CPU/management Ethernet interface. Such a driver might occasionally need to
+know whether DSA is enabled (e.g.: to enable/disable specific offload features),
+but the DSA subsystem has been proven to work with industry standard drivers:
+e1000e, mv643xx_eth etc. without having to introduce modifications to these
+drivers. Such network devices are also often referred to as conduit network
+devices since they act as a pipe between the host processor and the hardware
+Ethernet switch.
+
+Networking stack hooks
+----------------------
+
+When a master netdev is used with DSA, a small hook is placed in in the
+networking stack is in order to have the DSA subsystem process the Ethernet
+switch specific tagging protocol. DSA accomplishes this by registering a
+specific (and fake) Ethernet type (later becoming skb->protocol) with the
+networking stack, this is also known as a ptype or packet_type. A typical
+Ethernet Frame receive sequence looks like this:
+
+Master network device (e.g.: e1000e):
+
+Receive interrupt fires:
+- receive function is invoked
+- basic packet processing is done: getting length, status etc.
+- packet is prepared to be processed by the Ethernet layer by calling
+  eth_type_trans
+
+net/ethernet/eth.c:
+
+eth_type_trans(skb, dev)
+	if (dev->dsa_ptr != NULL)
+		-> skb->protocol = ETH_P_XDSA
+
+drivers/net/ethernet/*:
+
+netif_receive_skb(skb)
+	-> iterate over registered packet_type
+		-> invoke handler for ETH_P_XDSA, calls dsa_switch_rcv()
+
+net/dsa/dsa.c:
+	-> dsa_switch_rcv()
+		-> invoke switch tag specific protocol handler in
+		   net/dsa/tag_*.c
+
+net/dsa/tag_*.c:
+	-> inspect and strip switch tag protocol to determine originating port
+	-> locate per-port network device
+	-> invoke eth_type_trans() with the DSA slave network device
+	-> invoked netif_receive_skb()
+
+Past this point, the DSA slave network devices get delivered regular Ethernet
+frames that can be processed by the networking stack.
+
+Slave network devices
+---------------------
+
+Slave network devices created by DSA are stacked on top of their master network
+device, each of these network interfaces will be responsible for being a
+controlling and data-flowing end-point for each front-panel port of the switch.
+These interfaces are specialized in order to:
+
+- insert/remove the switch tag protocol (if it exists) when sending traffic
+  to/from specific switch ports
+- query the switch for ethtool operations: statistics, link state,
+  Wake-on-LAN, register dumps...
+- external/internal PHY management: link, auto-negotiation etc.
+
+These slave network devices have custom net_device_ops and ethtool_ops function
+pointers which allow DSA to introduce a level of layering between the networking
+stack/ethtool, and the switch driver implementation.
+
+Upon frame transmission from these slave network devices, DSA will look up which
+switch tagging protocol is currently registered with these network devices, and
+invoke a specific transmit routine which takes care of adding the relevant
+switch tag in the Ethernet frames.
+
+These frames are then queued for transmission using the master network device
+ndo_start_xmit() function, since they contain the appropriate switch tag, the
+Ethernet switch will be able to process these incoming frames from the
+management interface and delivers these frames to the physical switch port.
+
+Graphical representation
+------------------------
+
+Summarized, this is basically how DSA looks like from a network device
+perspective:
+
+
+			|---------------------------
+			| CPU network device (eth0)|
+			----------------------------
+			| <tag added by switch     |
+			|                          |
+			|                          |
+			|        tag added by CPU> |
+		|--------------------------------------------|
+		| Switch driver				     |
+		|--------------------------------------------|
+                    ||        ||         ||
+		|-------|  |-------|  |-------|
+		| sw0p0 |  | sw0p1 |  | sw0p2 |
+		|-------|  |-------|  |-------|
+
+Slave MDIO bus
+--------------
+
+In order to be able to read to/from a switch PHY built into it, DSA creates a
+slave MDIO bus which allows a specific switch driver to divert and intercept
+MDIO reads/writes towards specific PHY addresses. In most MDIO-connected
+switches, these functions would utilize direct or indirect PHY addressing mode
+to return standard MII registers from the switch builtin PHYs, allowing the PHY
+library and/or to return link status, link partner pages, auto-negotiation
+results etc..
+
+For Ethernet switches which have both external and internal MDIO busses, the
+slave MII bus can be utilized to mux/demux MDIO reads and writes towards either
+internal or external MDIO devices this switch might be connected to: internal
+PHYs, external PHYs, or even external switches.
+
+Data structures
+---------------
+
+DSA data structures are defined in include/net/dsa.h as well as
+net/dsa/dsa_priv.h.
+
+dsa_chip_data: platform data configuration for a given switch device, this
+structure describes a switch device's parent device, its address, as well as
+various properties of its ports: names/labels, and finally a routing table
+indication (when cascading switches)
+
+dsa_platform_data: platform device configuration data which can reference a
+collection of dsa_chip_data structure if multiples switches are cascaded, the
+master network device this switch tree is attached to needs to be referenced
+
+dsa_switch_tree: structure assigned to the master network device under
+"dsa_ptr", this structure references a dsa_platform_data structure as well as
+the tagging protocol supported by the switch tree, and which receive/transmit
+function hooks should be invoked, information about the directly attached switch
+is also provided: CPU port. Finally, a collection of dsa_switch are referenced
+to address individual switches in the tree.
+
+dsa_switch: structure describing a switch device in the tree, referencing a
+dsa_switch_tree as a backpointer, slave network devices, master network device,
+and a reference to the backing dsa_switch_driver
+
+dsa_switch_driver: structure referencing function pointers, see below for a full
+description.
+
+Design limitations
+==================
+
+DSA is a platform device driver
+-------------------------------
+
+DSA is implemented as a DSA platform device driver which is convenient because
+it will register the entire DSA switch tree attached to a master network device
+in one-shot, facilitating the device creation and simplifying the device driver
+model a bit, this comes however with a number of limitations:
+
+- building DSA and its switch drivers as modules is currently not working
+- the device driver parenting does not necessarily reflect the original
+  bus/device the switch can be created from
+- supporting non-MDIO and non-MMIO (platform) switches is not possible
+
+Limits on the number of devices and ports
+-----------------------------------------
+
+DSA currently limits the number of maximum switches within a tree to 4
+(DSA_MAX_SWITCHES), and the number of ports per switch to 12 (DSA_MAX_PORTS).
+These limits could be extended to support larger configurations would this need
+arise.
+
+Lack of CPU/DSA network devices
+-------------------------------
+
+DSA does not currently create slave network devices for the CPU or DSA ports, as
+described before. This might be an issue in the following cases:
+
+- inability to fetch switch CPU port statistics counters using ethtool, which
+  can make it harder to debug MDIO switch connected using xMII interfaces
+
+- inability to configure the CPU port link parameters based on the Ethernet
+  controller capabilities attached to it: http://patchwork.ozlabs.org/patch/509806/
+
+- inability to configure specific VLAN IDs / trunking VLANs between switches
+  when using a cascaded setup
+
+Common pitfalls using DSA setups
+--------------------------------
+
+Once a master network device is configured to use DSA (dev->dsa_ptr becomes
+non-NULL), and the switch behind it expects a tagging protocol, this network
+interface can only exclusively be used as a conduit interface. Sending packets
+directly through this interface (e.g.: opening a socket using this interface)
+will not make us go through the switch tagging protocol transmit function, so
+the Ethernet switch on the other end, expecting a tag will typically drop this
+frame.
+
+Slave network devices check that the master network device is UP before allowing
+you to administratively bring UP these slave network devices. A common
+configuration mistake is forgetting to bring UP the master network device first.
+
+Interactions with other subsystems
+==================================
+
+DSA currently leverages the following subsystems:
+
+- MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
+- Switchdev: net/switchdev/*
+- Device Tree for various of_* functions
+- HWMON: drivers/hwmon/*
+
+MDIO/PHY library
+----------------
+
+Slave network devices exposed by DSA may or may not be interfacing with PHY
+devices (struct phy_device as defined in include/linux/phy.h), but the DSA
+subsystem deals with all possible combinations:
+
+- internal PHY devices, built into the Ethernet switch hardware
+- external PHY devices, connected via an internal or external MDIO bus
+- internal PHY devices, connected via an internal MDIO bus
+- special, non-autonegotiated or non MDIO-managed PHY devices: SFPs, MoCA; a.k.a
+  fixed PHYs
+
+The PHY configuration is done by the dsa_slave_phy_setup() function and the
+logic basically looks like this:
+
+- if Device Tree is used, the PHY device is looked up using the standard
+  "phy-handle" property, if found, this PHY device is created and registered
+  using of_phy_connect()
+
+- if Device Tree is used, and the PHY device is "fixed", that is, conforms to
+  the definition of a non-MDIO managed PHY as defined in
+  Documentation/devicetree/bindings/net/fixed-link.txt, the PHY is registered
+  and connected transparently using the special fixed MDIO bus driver
+
+- finally, if the PHY is built into the switch, as is very common with
+  standalone switch packages, the PHY is probed using the slave MII bus created
+  by DSA
+
+
+SWITCHDEV
+---------
+
+DSA directly utilizes SWITCHDEV when interfacing with the bridge layer, and
+more specifically with its VLAN filtering portion when configuring VLANs on top
+of per-port slave network devices. Since DSA primarily deals with
+MDIO-connected switches, although not exclusively, SWITCHDEV's
+prepare/abort/commit phases are often simplified into a prepare phase which
+checks whether the operation is supporte by the DSA switch driver, and a commit
+phase which applies the changes.
+
+As of today, the only SWITCHDEV objects supported by DSA are the FDB and VLAN
+objects.
+
+Device Tree
+-----------
+
+DSA features a standardized binding which is documented in
+Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
+functions such as of_get_phy_mode(), of_phy_connect() are also used to query
+per-port PHY specific details: interface connection, MDIO bus location etc..
+
+HWMON
+-----
+
+Some switch drivers feature internal temperature sensors which are exposed as
+regular HWMON devices in /sys/class/hwmon/.
+
+Driver development
+==================
+
+DSA switch drivers need to implement a dsa_switch_driver structure which will
+contain the various members described below.
+
+register_switch_driver() registers this dsa_switch_driver in its internal list
+of drivers to probe for. unregister_switch_driver() does the exact opposite.
+
+Unless requested differently by setting the priv_size member accordingly, DSA
+does not allocate any driver private context space.
+
+Switch configuration
+--------------------
+
+- priv_size: additional size needed by the switch driver for its private context
+
+- tag_protocol: this is to indicate what kind of tagging protocol is supported,
+  should be a valid value from the dsa_tag_protocol enum
+
+- probe: probe routine which will be invoked by the DSA platform device upon
+  registration to test for the presence/absence of a switch device. For MDIO
+  devices, it is recommended to issue a read towards internal registers using
+  the switch pseudo-PHY and return whether this is a supported device. For other
+  buses, return a non-NULL string
+
+- setup: setup function for the switch, this function is responsible for setting
+  up the dsa_switch_driver private structure with all it needs: register maps,
+  interrupts, mutexes, locks etc.. This function is also expected to properly
+  configure the switch to separate all network interfaces from each other, that
+  is, they should be isolated by the switch hardware itself, typically by creating
+  a Port-based VLAN ID for each port and allowing only the CPU port and the
+  specific port to be in the forwarding vector. Ports that are unused by the
+  platform should be disabled. Past this function, the switch is expected to be
+  fully configured and ready to serve any kind of request. It is recommended
+  to issue a software reset of the switch during this setup function in order to
+  avoid relying on what a previous software agent such as a bootloader/firmware
+  may have previously configured.
+
+- set_addr: Some switches require the programming of the management interface's
+  Ethernet MAC address, switch drivers can also disable ageing of MAC addresses
+  on the management interface and "hardcode"/"force" this MAC address for the
+  CPU/management interface as an optimization
+
+PHY devices and link management
+-------------------------------
+
+- get_phy_flags: Some switches are interfaced to various kinds of Ethernet PHYs,
+  if the PHY library PHY driver needs to know about information it cannot obtain
+  on its own (e.g.: coming from switch memory mapped registers), this function
+  should return a 32-bits bitmask of "flags", that is private between the switch
+  driver and the Ethernet PHY driver in drivers/net/phy/*.
+
+- phy_read: Function invoked by the DSA slave MDIO bus when attempting to read
+  the switch port MDIO registers. If unavailable, return 0xffff for each read.
+  For builtin switch Ethernet PHYs, this function should allow reading the link
+  status, auto-negotiation results, link partner pages etc..
+
+- phy_write: Function invoked by the DSA slave MDIO bus when attempting to write
+  to the switch port MDIO registers. If unavailable return a negative error
+  code.
+
+- poll_link: Function invoked by DSA to query the link state of the switch
+  builtin Ethernet PHYs, per port. This function is responsible for calling
+  netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
+  single call. Executes from workqueue context.
+
+- adjust_link: Function invoked by the PHY library when a slave network device
+  is attached to a PHY device. This function is responsible for appropriately
+  configuring the switch port link parameters: speed, duplex, pause based on
+  what the phy_device is providing.
+
+- fixed_link_update: Function invoked by the PHY library, and specifically by
+  the fixed PHY driver asking the switch driver for link parameters that could
+  not be auto-negotiated, or obtained by reading the PHY registers through MDIO.
+  This is particularly useful for specific kinds of hardware such as QSGMII,
+  MoCA or other kinds of non-MDIO managed PHYs where out of band link
+  information is obtained
+
+Ethtool operations
+------------------
+
+- get_strings: ethtool function used to query the driver's strings, will
+  typically return statistics strings, private flags strings etc.
+
+- get_ethtool_stats: ethtool function used to query per-port statistics and
+  return their values. DSA overlays slave network devices general statistics:
+  RX/TX counters from the network device, with switch driver specific statistics
+  per port
+
+- get_sset_count: ethtool function used to query the number of statistics items
+
+- get_wol: ethtool function used to obtain Wake-on-LAN settings per-port, this
+  function may, for certain implementations also query the master network device
+  Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
+
+- set_wol: ethtool function used to configure Wake-on-LAN settings per-port,
+  direct counterpart to set_wol with similar restrictions
+
+- set_eee: ethtool function which is used to configure a switch port EEE (Green
+  Ethernet) settings, can optionally invoke the PHY library to enable EEE at the
+  PHY level if relevant. This function should enable EEE at the switch port MAC
+  controller and data-processing logic
+
+- get_eee: ethtool function which is used to query a switch port EEE settings,
+  this function should return the EEE state of the switch port MAC controller
+  and data-processing logic as well as query the PHY for its currently configured
+  EEE settings
+
+- get_eeprom_len: ethtool function returning for a given switch the EEPROM
+  length/size in bytes
+
+- get_eeprom: ethtool function returning for a given switch the EEPROM contents
+
+- set_eeprom: ethtool function writing specified data to a given switch EEPROM
+
+- get_regs_len: ethtool function returning the register length for a given
+  switch
+
+- get_regs: ethtool function returning the Ethernet switch internal register
+  contents. This function might require user-land code in ethtool to
+  pretty-print register values and registers
+
+Power management
+----------------
+
+- suspend: function invoked by the DSA platform device when the system goes to
+  suspend, should quiesce all Ethernet switch activities, but keep ports
+  participating in Wake-on-LAN active as well as additional wake-up logic if
+  supported
+
+- resume: function invoked by the DSA platform device when the system resumes,
+  should resume all Ethernet switch activities and re-configure the switch to be
+  in a fully active state
+
+- port_enable: function invoked by the DSA slave network device ndo_open
+  function when a port is administratively brought up, this function should be
+  fully enabling a given switch port. DSA takes care of marking the port with
+  BR_STATE_BLOCKING if the port is a bridge member, or BR_STATE_FORWARDING if it
+  was not, and propagating these changes down to the hardware
+
+- port_disable: function invoked by the DSA slave network device ndo_close
+  function when a port is administratively brought down, this function should be
+  fully disabling a given switch port. DSA takes care of marking the port with
+  BR_STATE_DISABLED and propagating changes to the hardware if this port is
+  disabled while being a bridge member
+
+Hardware monitoring
+-------------------
+
+These callbacks are only available if CONFIG_NET_DSA_HWMON is enabled:
+
+- get_temp: this function queries the given switch for its temperature
+
+- get_temp_limit: this function returns the switch current maximum temperature
+  limit
+
+- set_temp_limit: this function configures the maximum temperature limit allowed
+
+- get_temp_alarm: this function returns the critical temperature threshold
+  returning an alarm notification
+
+See Documentation/hwmon/sysfs-interface for details.
+
+Bridge layer
+------------
+
+- port_join_bridge: bridge layer function invoked when a given switch port is
+  added to a bridge, this function should be doing the necessary at the switch
+  level to permit the joining port from being added to the relevant logical
+  domain for it to ingress/egress traffic with other members of the bridge. DSA
+  does nothing but calculate a bitmask of switch ports currently members of the
+  specified bridge being requested the join
+
+- port_leave_bridge: bridge layer function invoked when a given switch port is
+  removed from a bridge, this function should be doing the necessary at the
+  switch level to deny the leaving port from ingress/egress traffic from the
+  remaining bridge members. When the port leaves the bridge, it should be aged
+  out at the switch hardware for the switch to (re) learn MAC addresses behind
+  this port. DSA calculates the bitmask of ports still members of the bridge
+  being left
+
+- port_stp_update: bridge layer function invoked when a given switch port STP
+  state is computed by the bridge layer and should be propagated to switch
+  hardware to forward/block/learn traffic. The switch driver is responsible for
+  computing a STP state change based on current and asked parameters and perform
+  the relevant ageing based on the intersection results
+
+Bridge VLAN filtering
+---------------------
+
+- port_pvid_get: bridge layer function invoked when a Port-based VLAN ID is
+  queried for the given switch port
+
+- port_pvid_set: bridge layer function invoked when a Port-based VLAN ID needs
+  to be configured on the given switch port
+
+- port_vlan_add: bridge layer function invoked when a VLAN is configured
+  (tagged or untagged) for the given switch port
+
+- port_vlan_del: bridge layer function invoked when a VLAN is removed from the
+  given switch port
+
+- vlan_getnext: bridge layer function invoked to query the next configured VLAN
+  in the switch, i.e. returns the bitmaps of members and untagged ports
+
+- port_fdb_add: bridge layer function invoked when the bridge wants to install a
+  Forwarding Database entry, the switch hardware should be programmed with the
+  specified address in the specified VLAN Id in the forwarding database
+  associated with this VLAN ID
+
+Note: VLAN ID 0 corresponds to the port private database, which, in the context
+of DSA, would be the its port-based VLAN, used by the associated bridge device.
+
+- port_fdb_del: bridge layer function invoked when the bridge wants to remove a
+  Forwarding Database entry, the switch hardware should be programmed to delete
+  the specified MAC address from the specified VLAN ID if it was mapped into
+  this port forwarding database
+
+TODO
+====
+
+The platform device problem
+---------------------------
+DSA is currently implemented as a platform device driver which is far from ideal
+as was discussed in this thread:
+
+http://permalink.gmane.org/gmane.linux.network/329848
+
+This basically prevents the device driver model to be properly used and applied,
+and support non-MDIO, non-MMIO Ethernet connected switches.
+
+Another problem with the platform device driver approach is that it prevents the
+use of a modular switch drivers build due to a circular dependency, illustrated
+here:
+
+http://comments.gmane.org/gmane.linux.network/345803
+
+Attempts of reworking this has been done here:
+
+https://lwn.net/Articles/643149/
+
+Making SWITCHDEV and DSA converge towards an unified codebase
+-------------------------------------------------------------
+
+SWITCHDEV properly takes care of abstracting the networking stack with offload
+capable hardware, but does not enforce a strict switch device driver model. On
+the other DSA enforces a fairly strict device driver model, and deals with most
+of the switch specific. At some point we should envision a merger between these
+two subsystems and get the best of both worlds.
+
+Other hanging fruits
+--------------------
+
+- making the number of ports fully dynamic and not dependent on DSA_MAX_PORTS
+- allowing more than one CPU/management interface:
+  http://comments.gmane.org/gmane.linux.network/365657
+- porting more drivers from other vendors:
+  http://comments.gmane.org/gmane.linux.network/365510
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5fae770..ebe94f2 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -586,6 +586,21 @@
 	if available window is too small.
 	Default: 2
 
+tcp_pacing_ss_ratio - INTEGER
+	sk->sk_pacing_rate is set by TCP stack using a ratio applied
+	to current rate. (current_rate = cwnd * mss / srtt)
+	If TCP is in slow start, tcp_pacing_ss_ratio is applied
+	to let TCP probe for bigger speeds, assuming cwnd can be
+	doubled every other RTT.
+	Default: 200
+
+tcp_pacing_ca_ratio - INTEGER
+	sk->sk_pacing_rate is set by TCP stack using a ratio applied
+	to current rate. (current_rate = cwnd * mss / srtt)
+	If TCP is in congestion avoidance phase, tcp_pacing_ca_ratio
+	is applied to conservatively probe for bigger throughput.
+	Default: 120
+
 tcp_tso_win_divisor - INTEGER
 	This allows control over what percentage of the congestion window
 	can be consumed by a single TSO frame.
@@ -1181,6 +1196,16 @@
 	Allows you to write a number, which can be used as required.
 	Default value is 0.
 
+xfrm4_gc_thresh - INTEGER
+	The threshold at which we will start garbage collecting for IPv4
+	destination cache entries.  At twice this value the system will
+	refuse new allocations.
+
+igmp_link_local_mcast_reports - BOOLEAN
+	Enable IGMP reports for link local multicast groups in the
+	224.0.0.X range.
+	Default TRUE
+
 Alexey Kuznetsov.
 kuznet@ms2.inr.ac.ru
 
@@ -1215,14 +1240,20 @@
 	FALSE: disabled
 	Default: TRUE
 
-auto_flowlabels - BOOLEAN
-	Automatically generate flow labels based based on a flow hash
-	of the packet. This allows intermediate devices, such as routers,
-	to idenfify packet flows for mechanisms like Equal Cost Multipath
+auto_flowlabels - INTEGER
+	Automatically generate flow labels based on a flow hash of the
+	packet. This allows intermediate devices, such as routers, to
+	identify packet flows for mechanisms like Equal Cost Multipath
 	Routing (see RFC 6438).
-	TRUE: enabled
-	FALSE: disabled
-	Default: false
+	0: automatic flow labels are completely disabled
+	1: automatic flow labels are enabled by default, they can be
+	   disabled on a per socket basis using the IPV6_AUTOFLOWLABEL
+	   socket option
+	2: automatic flow labels are allowed, they may be enabled on a
+	   per socket basis using the IPV6_AUTOFLOWLABEL socket option
+	3: automatic flow labels are enabled and enforced, they cannot
+	   be disabled by the socket option
+	Default: 1
 
 flowlabel_state_ranges - BOOLEAN
 	Split the flow label number space into two ranges. 0-0x7FFFF is
@@ -1340,6 +1371,14 @@
 	   disabled if accept_ra_from_local is disabled
                on a specific interface.
 
+accept_ra_min_hop_limit - INTEGER
+	Minimum hop limit Information in Router Advertisement.
+
+	Hop limit Information in Router Advertisement less than this
+	variable shall be ignored.
+
+	Default: 1
+
 accept_ra_pinfo - BOOLEAN
 	Learn Prefix Information in Router Advertisement.
 
@@ -1435,6 +1474,11 @@
 	Default Maximum Transfer Unit
 	Default: 1280 (IPv6 required minimum)
 
+ip_nonlocal_bind - BOOLEAN
+	If set, allows processes to bind() to non-local IPv6 addresses,
+	which can be quite useful - but may break some applications.
+	Default: 0
+
 router_probe_interval - INTEGER
 	Minimum interval (in seconds) between Router Probing described
 	in RFC4191.
@@ -1455,6 +1499,13 @@
 	routers are present.
 	Default: 3
 
+use_oif_addrs_only - BOOLEAN
+	When enabled, the candidate source addresses for destinations
+	routed via this interface are restricted to the set of addresses
+	configured on this interface (vis. RFC 6724, section 4).
+
+	Default: false
+
 use_tempaddr - INTEGER
 	Preference for Privacy Extensions (RFC3041).
 	  <= 0 : disable Privacy Extensions
@@ -1591,6 +1642,11 @@
 	otherwise the minimal space between responses in milliseconds.
 	Default: 1000
 
+xfrm6_gc_thresh - INTEGER
+	The threshold at which we will start garbage collecting for IPv6
+	destination cache entries.  At twice this value the system will
+	refuse new allocations.
+
 
 IPv6 Update by:
 Pekka Savola <pekkas@netcore.fi>
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index e655e24..d64a147 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -135,12 +135,8 @@
 	int maxmtu;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
-	void *custom_cfg;
-	void *custom_data;
 	void *bsp_priv;
 };
 
@@ -179,15 +175,11 @@
  o bus_setup: perform HW setup of the bus. For example, on some ST platforms
 	     this field is used to configure the AMBA  bridge to generate more
 	     efficient STBus traffic.
- o setup/init/exit: callbacks used for calling a custom initialization;
+ o init/exit: callbacks used for calling a custom initialization;
 	     this is sometime necessary on some platforms (e.g. ST boxes)
 	     where the HW needs to have set some PIO lines or system cfg
-	     registers. setup should return a pointer to private data,
-	     which will be stored in bsp_priv, and then passed to init and
-	     exit callbacks. init/exit callbacks should not use or modify
+	     registers.  init/exit callbacks should not use or modify
 	     platform data.
- o custom_cfg/custom_data: this is a custom configuration that can be passed
-			   while initializing the resources.
  o bsp_priv: another private pointer.
 
 For MDIO bus The we have:
@@ -262,7 +254,7 @@
 
 During the board's device_init we can configure the first
 MAC for fixed_link by calling:
-  fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status));)
+  fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status, -1);
 and the second one, with a real PHY device attached to the bus,
 by using the stmmac_mdio_bus_data structure (to provide the id, the
 reset procedure etc).
@@ -278,8 +270,6 @@
 Please see the following document:
 	Documentation/devicetree/bindings/net/stmmac.txt
 
-and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
-
 4.11) This is a summary of the content of some relevant files:
  o stmmac_main.c: to implement the main network device driver;
  o stmmac_mdio.c: to provide mdio functions;
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index c5d7ade..476df04 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -279,8 +279,18 @@
 current STP state.  The switch driver, knowing which ports are within which
 vlan L2 domain, can program the switch device for flooding.  The packet should
 also be sent to the port netdev for processing by the bridge driver.  The
-bridge should not reflood the packet to the same ports the device flooded.
-XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+bridge should not reflood the packet to the same ports the device flooded,
+otherwise there will be duplicate packets on the wire.
+
+To avoid duplicate packets, the device/driver should mark a packet as already
+forwarded using skb->offload_fwd_mark.  The same mark is set on the device
+ports in the domain using dev->offload_fwd_mark.  If the skb->offload_fwd_mark
+is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel
+will drop the skb right before transmit on the egress port, with the
+understanding that the device already forwarded the packet on same egress port.
+The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark
+for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and
+a group ifindex.
 
 It is possible for the switch device to not handle flooding and push the
 packets up to the bridge driver for flooding.  This is not ideal as the number
@@ -357,4 +367,5 @@
 
 The driver can monitor for updates to arp_tbl using the netevent notifier
 NETEVENT_NEIGH_UPDATE.  The device can be programmed with resolved nexthops
-for the routes as arp_tbl updates.
+for the routes as arp_tbl updates.  The driver implements ndo_neigh_destroy
+to know when arp_tbl neighbor entries are purged from the port.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 5f09226..a977339 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -359,6 +359,13 @@
 supported, the driver may time stamp more than just the requested types
 of packets.
 
+Drivers are free to use a more permissive configuration than the requested
+configuration. It is expected that drivers should only implement directly the
+most generic mode that can be supported. For example if the hardware can
+support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+is more generic (and more useful to applications).
+
 A driver which supports hardware time stamping shall update the struct
 with the actual, possibly more permissive configuration. If the
 requested packets cannot be time stamped, then nothing should be
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt
index 6d99351..c28f498 100644
--- a/Documentation/networking/vxlan.txt
+++ b/Documentation/networking/vxlan.txt
@@ -1,32 +1,36 @@
 Virtual eXtensible Local Area Networking documentation
 ======================================================
 
-The VXLAN protocol is a tunnelling protocol that is designed to
-solve the problem of limited number of available VLAN's (4096).
-With VXLAN identifier is expanded to 24 bits.
+The VXLAN protocol is a tunnelling protocol designed to solve the
+problem of limited VLAN IDs (4096) in IEEE 802.1q.  With VXLAN the
+size of the identifier is expanded to 24 bits (16777216).
 
-It is a draft RFC standard, that is implemented by Cisco Nexus,
-Vmware and Brocade. The protocol runs over UDP using a single
-destination port (still not standardized by IANA).
-This document describes the Linux kernel tunnel device,
-there is also an implantation of VXLAN for Openvswitch.
+VXLAN is described by IETF RFC 7348, and has been implemented by a
+number of vendors.  The protocol runs over UDP using a single
+destination port.  This document describes the Linux kernel tunnel
+device, there is also a separate implementation of VXLAN for
+Openvswitch.
 
-Unlike most tunnels, a VXLAN is a 1 to N network, not just point
-to point. A VXLAN device can either dynamically learn the IP address
-of the other end, in a manner similar to a learning bridge, or the
-forwarding entries can be configured statically.
+Unlike most tunnels, a VXLAN is a 1 to N network, not just point to
+point. A VXLAN device can learn the IP address of the other endpoint
+either dynamically in a manner similar to a learning bridge, or make
+use of statically-configured forwarding entries.
 
-The management of vxlan is done in a similar fashion to it's
-too closest neighbors GRE and VLAN. Configuring VXLAN requires
-the version of iproute2 that matches the kernel release
-where VXLAN was first merged upstream.
+The management of vxlan is done in a manner similar to its two closest
+neighbors GRE and VLAN. Configuring VXLAN requires the version of
+iproute2 that matches the kernel release where VXLAN was first merged
+upstream.
 
 1. Create vxlan device
-  # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
+ # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789
 
-This creates a new device (vxlan0). The device uses the
-the multicast group 239.1.1.1 over eth1 to handle packets where
-no entry is in the forwarding table.
+This creates a new device named vxlan0.  The device uses the multicast
+group 239.1.1.1 over eth1 to handle traffic for which there is no
+entry in the forwarding table.  The destination port number is set to
+the IANA-assigned value of 4789.  The Linux implementation of VXLAN
+pre-dates the IANA's selection of a standard destination port number
+and uses the Linux-selected value by default to maintain backwards
+compatibility.
 
 2. Delete vxlan device
   # ip link delete vxlan0
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index c4407a4..f4cb0b2 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -1,7 +1,22 @@
 			Static Keys
 			-----------
 
-By: Jason Baron <jbaron@redhat.com>
+DEPRECATED API:
+
+The use of 'struct static_key' directly, is now DEPRECATED. In addition
+static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+
+struct static_key false = STATIC_KEY_INIT_FALSE;
+struct static_key true = STATIC_KEY_INIT_TRUE;
+static_key_true()
+static_key_false()
+
+The updated API replacements are:
+
+DEFINE_STATIC_KEY_TRUE(key);
+DEFINE_STATIC_KEY_FALSE(key);
+static_key_likely()
+statick_key_unlikely()
 
 0) Abstract
 
@@ -9,22 +24,22 @@
 performance-sensitive fast-path kernel code, via a GCC feature and a code
 patching technique. A quick example:
 
-	struct static_key key = STATIC_KEY_INIT_FALSE;
+	DEFINE_STATIC_KEY_FALSE(key);
 
 	...
 
-        if (static_key_false(&key))
+        if (static_branch_unlikely(&key))
                 do unlikely code
         else
                 do likely code
 
 	...
-	static_key_slow_inc();
+	static_branch_enable(&key);
 	...
-	static_key_slow_inc();
+	static_branch_disable(&key);
 	...
 
-The static_key_false() branch will be generated into the code with as little
+The static_branch_unlikely() branch will be generated into the code with as little
 impact to the likely code path as possible.
 
 
@@ -56,7 +71,7 @@
 
 For example, if we have a simple branch that is disabled by default:
 
-	if (static_key_false(&key))
+	if (static_branch_unlikely(&key))
 		printk("I am the true branch\n");
 
 Thus, by default the 'printk' will not be emitted. And the code generated will
@@ -75,68 +90,55 @@
 
 In order to make use of this optimization you must first define a key:
 
-	struct static_key key;
-
-Which is initialized as:
-
-	struct static_key key = STATIC_KEY_INIT_TRUE;
+	DEFINE_STATIC_KEY_TRUE(key);
 
 or:
 
-	struct static_key key = STATIC_KEY_INIT_FALSE;
+	DEFINE_STATIC_KEY_FALSE(key);
 
-If the key is not initialized, it is default false. The 'struct static_key',
-must be a 'global'. That is, it can't be allocated on the stack or dynamically
+
+The key must be global, that is, it can't be allocated on the stack or dynamically
 allocated at run-time.
 
 The key is then used in code as:
 
-        if (static_key_false(&key))
+        if (static_branch_unlikely(&key))
                 do unlikely code
         else
                 do likely code
 
 Or:
 
-        if (static_key_true(&key))
+        if (static_branch_likely(&key))
                 do likely code
         else
                 do unlikely code
 
-A key that is initialized via 'STATIC_KEY_INIT_FALSE', must be used in a
-'static_key_false()' construct. Likewise, a key initialized via
-'STATIC_KEY_INIT_TRUE' must be used in a 'static_key_true()' construct. A
-single key can be used in many branches, but all the branches must match the
-way that the key has been initialized.
+Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may
+be used in either static_branch_likely() or static_branch_unlikely()
+statemnts.
 
-The branch(es) can then be switched via:
+Branch(es) can be set true via:
 
-	static_key_slow_inc(&key);
+static_branch_enable(&key);
+
+or false via:
+
+static_branch_disable(&key);
+
+The branch(es) can then be switched via reference counts:
+
+	static_branch_inc(&key);
 	...
-	static_key_slow_dec(&key);
+	static_branch_dec(&key);
 
-Thus, 'static_key_slow_inc()' means 'make the branch true', and
-'static_key_slow_dec()' means 'make the branch false' with appropriate
+Thus, 'static_branch_inc()' means 'make the branch true', and
+'static_branch_dec()' means 'make the branch false' with appropriate
 reference counting. For example, if the key is initialized true, a
-static_key_slow_dec(), will switch the branch to false. And a subsequent
-static_key_slow_inc(), will change the branch back to true. Likewise, if the
-key is initialized false, a 'static_key_slow_inc()', will change the branch to
-true. And then a 'static_key_slow_dec()', will again make the branch false.
-
-An example usage in the kernel is the implementation of tracepoints:
-
-        static inline void trace_##name(proto)                          \
-        {                                                               \
-                if (static_key_false(&__tracepoint_##name.key))		\
-                        __DO_TRACE(&__tracepoint_##name,                \
-                                TP_PROTO(data_proto),                   \
-                                TP_ARGS(data_args),                     \
-                                TP_CONDITION(cond));                    \
-        }
-
-Tracepoints are disabled by default, and can be placed in performance critical
-pieces of the kernel. Thus, by using a static key, the tracepoints can have
-absolutely minimal impact when not in use.
+static_branch_dec(), will switch the branch to false. And a subsequent
+static_branch_inc(), will change the branch back to true. Likewise, if the
+key is initialized false, a 'static_branch_inc()', will change the branch to
+true. And then a 'static_branch_dec()', will again make the branch false.
 
 
 4) Architecture level code patching interface, 'jump labels'
@@ -150,9 +152,12 @@
 
 * #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h
 
-* __always_inline bool arch_static_branch(struct static_key *key), see:
+* __always_inline bool arch_static_branch(struct static_key *key, bool branch), see:
 					arch/x86/include/asm/jump_label.h
 
+* __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch),
+					see: arch/x86/include/asm/jump_label.h
+
 * void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type),
 					see: arch/x86/kernel/jump_label.c
 
@@ -173,7 +178,7 @@
 {
         int pid;
 
-+       if (static_key_false(&key))
++       if (static_branch_unlikely(&key))
 +               printk("I am the true branch\n");
 
         rcu_read_lock();
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 0e307c9..267f393 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -119,6 +119,7 @@
 
 'x'	- Used by xmon interface on ppc/powerpc platforms.
           Show global PMU Registers on sparc64.
+          Dump all TLB entries on MIPS.
 
 'y'	- Show global CPU Registers [SPARC-64 specific]
 
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 7ddb1e3..87bb4aa 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -346,6 +346,11 @@
 	  x86-tsc: Architectures may define their own clocks. For
 	  	   example, x86 uses its own TSC cycle clock here.
 
+	  ppc-tb: This uses the powerpc timebase register value.
+		  This is in sync across CPUs and can also be used
+		  to correlate events across hypervisor/guest if
+		  tb_offset is known.
+
 	To set a clock, simply echo the clock name into this file.
 
 	  echo global > trace_clock
diff --git a/Documentation/vm/userfaultfd.txt b/Documentation/vm/userfaultfd.txt
new file mode 100644
index 0000000..70a3c94
--- /dev/null
+++ b/Documentation/vm/userfaultfd.txt
@@ -0,0 +1,144 @@
+= Userfaultfd =
+
+== Objective ==
+
+Userfaults allow the implementation of on-demand paging from userland
+and more generally they allow userland to take control of various
+memory page faults, something otherwise only the kernel code could do.
+
+For example userfaults allows a proper and more optimal implementation
+of the PROT_NONE+SIGSEGV trick.
+
+== Design ==
+
+Userfaults are delivered and resolved through the userfaultfd syscall.
+
+The userfaultfd (aside from registering and unregistering virtual
+memory ranges) provides two primary functionalities:
+
+1) read/POLLIN protocol to notify a userland thread of the faults
+   happening
+
+2) various UFFDIO_* ioctls that can manage the virtual memory regions
+   registered in the userfaultfd that allows userland to efficiently
+   resolve the userfaults it receives via 1) or to manage the virtual
+   memory in the background
+
+The real advantage of userfaults if compared to regular virtual memory
+management of mremap/mprotect is that the userfaults in all their
+operations never involve heavyweight structures like vmas (in fact the
+userfaultfd runtime load never takes the mmap_sem for writing).
+
+Vmas are not suitable for page- (or hugepage) granular fault tracking
+when dealing with virtual address spaces that could span
+Terabytes. Too many vmas would be needed for that.
+
+The userfaultfd once opened by invoking the syscall, can also be
+passed using unix domain sockets to a manager process, so the same
+manager process could handle the userfaults of a multitude of
+different processes without them being aware about what is going on
+(well of course unless they later try to use the userfaultfd
+themselves on the same region the manager is already tracking, which
+is a corner case that would currently return -EBUSY).
+
+== API ==
+
+When first opened the userfaultfd must be enabled invoking the
+UFFDIO_API ioctl specifying a uffdio_api.api value set to UFFD_API (or
+a later API version) which will specify the read/POLLIN protocol
+userland intends to speak on the UFFD and the uffdio_api.features
+userland requires. The UFFDIO_API ioctl if successful (i.e. if the
+requested uffdio_api.api is spoken also by the running kernel and the
+requested features are going to be enabled) will return into
+uffdio_api.features and uffdio_api.ioctls two 64bit bitmasks of
+respectively all the available features of the read(2) protocol and
+the generic ioctl available.
+
+Once the userfaultfd has been enabled the UFFDIO_REGISTER ioctl should
+be invoked (if present in the returned uffdio_api.ioctls bitmask) to
+register a memory range in the userfaultfd by setting the
+uffdio_register structure accordingly. The uffdio_register.mode
+bitmask will specify to the kernel which kind of faults to track for
+the range (UFFDIO_REGISTER_MODE_MISSING would track missing
+pages). The UFFDIO_REGISTER ioctl will return the
+uffdio_register.ioctls bitmask of ioctls that are suitable to resolve
+userfaults on the range registered. Not all ioctls will necessarily be
+supported for all memory types depending on the underlying virtual
+memory backend (anonymous memory vs tmpfs vs real filebacked
+mappings).
+
+Userland can use the uffdio_register.ioctls to manage the virtual
+address space in the background (to add or potentially also remove
+memory from the userfaultfd registered range). This means a userfault
+could be triggering just before userland maps in the background the
+user-faulted page.
+
+The primary ioctl to resolve userfaults is UFFDIO_COPY. That
+atomically copies a page into the userfault registered range and wakes
+up the blocked userfaults (unless uffdio_copy.mode &
+UFFDIO_COPY_MODE_DONTWAKE is set). Other ioctl works similarly to
+UFFDIO_COPY. They're atomic as in guaranteeing that nothing can see an
+half copied page since it'll keep userfaulting until the copy has
+finished.
+
+== QEMU/KVM ==
+
+QEMU/KVM is using the userfaultfd syscall to implement postcopy live
+migration. Postcopy live migration is one form of memory
+externalization consisting of a virtual machine running with part or
+all of its memory residing on a different node in the cloud. The
+userfaultfd abstraction is generic enough that not a single line of
+KVM kernel code had to be modified in order to add postcopy live
+migration to QEMU.
+
+Guest async page faults, FOLL_NOWAIT and all other GUP features work
+just fine in combination with userfaults. Userfaults trigger async
+page faults in the guest scheduler so those guest processes that
+aren't waiting for userfaults (i.e. network bound) can keep running in
+the guest vcpus.
+
+It is generally beneficial to run one pass of precopy live migration
+just before starting postcopy live migration, in order to avoid
+generating userfaults for readonly guest regions.
+
+The implementation of postcopy live migration currently uses one
+single bidirectional socket but in the future two different sockets
+will be used (to reduce the latency of the userfaults to the minimum
+possible without having to decrease /proc/sys/net/ipv4/tcp_wmem).
+
+The QEMU in the source node writes all pages that it knows are missing
+in the destination node, into the socket, and the migration thread of
+the QEMU running in the destination node runs UFFDIO_COPY|ZEROPAGE
+ioctls on the userfaultfd in order to map the received pages into the
+guest (UFFDIO_ZEROCOPY is used if the source page was a zero page).
+
+A different postcopy thread in the destination node listens with
+poll() to the userfaultfd in parallel. When a POLLIN event is
+generated after a userfault triggers, the postcopy thread read() from
+the userfaultfd and receives the fault address (or -EAGAIN in case the
+userfault was already resolved and waken by a UFFDIO_COPY|ZEROPAGE run
+by the parallel QEMU migration thread).
+
+After the QEMU postcopy thread (running in the destination node) gets
+the userfault address it writes the information about the missing page
+into the socket. The QEMU source node receives the information and
+roughly "seeks" to that page address and continues sending all
+remaining missing pages from that new page offset. Soon after that
+(just the time to flush the tcp_wmem queue through the network) the
+migration thread in the QEMU running in the destination node will
+receive the page that triggered the userfault and it'll map it as
+usual with the UFFDIO_COPY|ZEROPAGE (without actually knowing if it
+was spontaneously sent by the source or if it was an urgent page
+requested through an userfault).
+
+By the time the userfaults start, the QEMU in the destination node
+doesn't need to keep any per-page state bitmap relative to the live
+migration around and a single per-page bitmap has to be maintained in
+the QEMU running in the source node to know which pages are still
+missing in the destination node. The bitmap in the source node is
+checked to find which missing pages to send in round robin and we seek
+over it when receiving incoming userfaults. After sending each page of
+course the bitmap is updated accordingly. It's also useful to avoid
+sending the same page twice (in case the userfault is read by the
+postcopy thread just before UFFDIO_COPY|ZEROPAGE runs in the migration
+thread).
diff --git a/MAINTAINERS b/MAINTAINERS
index 4be7e5e..a9abe32 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -158,6 +158,7 @@
 S:	Maintained
 F:	net/6lowpan/
 F:	include/net/6lowpan.h
+F:	Documentation/networking/6lowpan.txt
 
 6PACK NETWORK DRIVER FOR AX.25
 M:	Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@ -642,9 +643,14 @@
 L:	dri-devel@lists.freedesktop.org
 T:	git git://people.freedesktop.org/~gabbayo/linux.git
 S:	Supported
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+F:	drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
 F:	drivers/gpu/drm/amd/amdkfd/
 F:	drivers/gpu/drm/amd/include/cik_structs.h
 F:	drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+F:	drivers/gpu/drm/amd/include/vi_structs.h
 F:	drivers/gpu/drm/radeon/radeon_kfd.c
 F:	drivers/gpu/drm/radeon/radeon_kfd.h
 F:	include/uapi/linux/kfd_ioctl.h
@@ -734,6 +740,12 @@
 F:	drivers/staging/iio/*/ad*
 F:	staging/iio/trigger/iio-trig-bfin-timer.c
 
+ANALOG DEVICES INC DMA DRIVERS
+M:	Lars-Peter Clausen <lars@metafoo.de>
+W:	http://ez.analog.com/community/linux-device-drivers
+S:	Supported
+F:	drivers/dma/dma-axi-dmac.c
+
 ANDROID DRIVERS
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:	Arve Hjønnevåg <arve@android.com>
@@ -805,11 +817,13 @@
 ARM PMU PROFILING AND DEBUGGING
 M:	Will Deacon <will.deacon@arm.com>
 S:	Maintained
-F:	arch/arm/kernel/perf_event*
+F:	arch/arm/kernel/perf_*
 F:	arch/arm/oprofile/common.c
-F:	arch/arm/include/asm/pmu.h
 F:	arch/arm/kernel/hw_breakpoint.c
 F:	arch/arm/include/asm/hw_breakpoint.h
+F:	arch/arm/include/asm/perf_event.h
+F:	drivers/perf/arm_pmu.c
+F:	include/linux/perf/arm_pmu.h
 
 ARM PORT
 M:	Russell King <linux@arm.linux.org.uk>
@@ -933,7 +947,7 @@
 M:	Robert Richter <rric@kernel.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Supported
-F:	drivers/net/ethernet/cavium/
+F:	drivers/net/ethernet/cavium/thunder/
 
 ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
 M:	Alexander Shiyan <shc_work@mail.ru>
@@ -1508,6 +1522,7 @@
 F:	arch/arm/mach-sti/
 F:	arch/arm/boot/dts/sti*
 F:	drivers/clocksource/arm_global_timer.c
+F:	drivers/clocksource/clksrc_st_lpc.c
 F:	drivers/i2c/busses/i2c-st.c
 F:	drivers/media/rc/st_rc.c
 F:	drivers/mmc/host/sdhci-st.c
@@ -1922,6 +1937,14 @@
 S:	Maintained
 F:	drivers/net/wireless/atmel*
 
+ATMEL MAXTOUCH DRIVER
+M:	Nick Dyer <nick.dyer@itdev.co.uk>
+T:	git git://github.com/atmel-maxtouch/linux.git
+S:	Supported
+F:	Documentation/devicetree/bindings/input/atmel,maxtouch.txt
+F:	drivers/input/touchscreen/atmel_mxt_ts.c
+F:	include/linux/platform_data/atmel_mxt_ts.h
+
 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
 M:	Bradley Grove <linuxdrivers@attotech.com>
 L:	linux-scsi@vger.kernel.org
@@ -2551,7 +2574,6 @@
 L:     netdev@vger.kernel.org
 W:     http://www.cavium.com
 S:     Supported
-F:     drivers/net/ethernet/cavium/
 F:     drivers/net/ethernet/cavium/liquidio/
 
 CC2520 IEEE-802.15.4 RADIO DRIVER
@@ -3560,6 +3582,15 @@
 F:	include/drm/exynos*
 F:	include/uapi/drm/exynos*
 
+DRM DRIVERS FOR FREESCALE DCU
+M:	Jianwei Wang <jianwei.wang.chn@gmail.com>
+M:	Alison Wang <alison.wang@freescale.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/fsl-dcu/
+F:	Documentation/devicetree/bindings/video/fsl,dcu.txt
+F:	Documentation/devicetree/bindings/panel/nec,nl4827hc19_05b.txt
+
 DRM DRIVERS FOR FREESCALE IMX
 M:	Philipp Zabel <p.zabel@pengutronix.de>
 L:	dri-devel@lists.freedesktop.org
@@ -4078,15 +4109,6 @@
 F:	fs/ext2/
 F:	include/linux/ext2*
 
-EXT3 FILE SYSTEM
-M:	Jan Kara <jack@suse.com>
-M:	Andrew Morton <akpm@linux-foundation.org>
-M:	Andreas Dilger <adilger.kernel@dilger.ca>
-L:	linux-ext4@vger.kernel.org
-S:	Maintained
-F:	Documentation/filesystems/ext3.txt
-F:	fs/ext3/
-
 EXT4 FILE SYSTEM
 M:	"Theodore Ts'o" <tytso@mit.edu>
 M:	Andreas Dilger <adilger.kernel@dilger.ca>
@@ -4425,6 +4447,7 @@
 F2FS FILE SYSTEM
 M:	Jaegeuk Kim <jaegeuk@kernel.org>
 M:	Changman Lee <cm224.lee@samsung.com>
+R:	Chao Yu <chao2.yu@samsung.com>
 L:	linux-f2fs-devel@lists.sourceforge.net
 W:	http://en.wikipedia.org/wiki/F2FS
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
@@ -4433,6 +4456,7 @@
 F:	Documentation/ABI/testing/sysfs-fs-f2fs
 F:	fs/f2fs/
 F:	include/linux/f2fs_fs.h
+F:	include/trace/events/f2fs.h
 
 FUJITSU FR-V (FRV) PORT
 M:	David Howells <dhowells@redhat.com>
@@ -5787,16 +5811,9 @@
 F:	fs/jffs2/
 F:	include/uapi/linux/jffs2.h
 
-JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
-M:	Andrew Morton <akpm@linux-foundation.org>
-M:	Jan Kara <jack@suse.com>
-L:	linux-ext4@vger.kernel.org
-S:	Maintained
-F:	fs/jbd/
-F:	include/linux/jbd.h
-
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD2)
 M:	"Theodore Ts'o" <tytso@mit.edu>
+M:	Jan Kara <jack@suse.com>
 L:	linux-ext4@vger.kernel.org
 S:	Maintained
 F:	fs/jbd2/
@@ -6541,7 +6558,7 @@
 
 MARVELL MWIFIEX WIRELESS DRIVER
 M:	Amitkumar Karwar <akarwar@marvell.com>
-M:	Avinash Patil <patila@marvell.com>
+M:	Nishant Sarmukadam <nishants@marvell.com>
 L:	linux-wireless@vger.kernel.org
 S:	Maintained
 F:	drivers/net/wireless/mwifiex/
@@ -6600,6 +6617,14 @@
 F:	drivers/power/max14577_charger.c
 F:	drivers/power/max77693_charger.c
 
+MAXIM MAX77802 MULTIFUNCTION PMIC DEVICE DRIVERS
+M:	Javier Martinez Canillas <javier@osg.samsung.com>
+L:	linux-kernel@vger.kernel.org
+S:	Supported
+F:	drivers/*/*max77802.c
+F:	Documentation/devicetree/bindings/*/*max77802.txt
+F:	include/dt-bindings/*/*max77802.h
+
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:	Chanwoo Choi <cw00.choi@samsung.com>
 M:	Krzysztof Kozlowski <k.kozlowski@samsung.com>
@@ -6613,7 +6638,7 @@
 F:	drivers/rtc/rtc-max77686.c
 F:	drivers/clk/clk-max77686.c
 F:	Documentation/devicetree/bindings/mfd/max14577.txt
-F:	Documentation/devicetree/bindings/mfd/max77686.txt
+F:	Documentation/devicetree/bindings/*/max77686.txt
 F:	Documentation/devicetree/bindings/mfd/max77693.txt
 F:	Documentation/devicetree/bindings/clock/maxim,max77686.txt
 F:	include/linux/mfd/max14577*.h
@@ -6686,6 +6711,15 @@
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 F:	drivers/net/ethernet/mellanox/mlx4/en_*
 
+MELLANOX ETHERNET SWITCH DRIVERS
+M:	Jiri Pirko <jiri@mellanox.com>
+M:	Ido Schimmel <idosch@mellanox.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+W:	http://www.mellanox.com
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
+F:	drivers/net/ethernet/mellanox/mlxsw/
+
 MEMORY MANAGEMENT
 L:	linux-mm@kvack.org
 W:	http://www.linux-mm.org
@@ -8048,7 +8082,6 @@
 
 PIN CONTROLLER - SAMSUNG
 M:	Tomasz Figa <tomasz.figa@gmail.com>
-M:	Thomas Abraham <thomas.abraham@linaro.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:	Maintained
@@ -8125,6 +8158,15 @@
 F:	drivers/power/
 X:	drivers/power/avs/
 
+POWER STATE COORDINATION INTERFACE (PSCI)
+M:	Mark Rutland <mark.rutland@arm.com>
+M:	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+L:	linux-arm-kernel@lists.infradead.org
+S:	Maintained
+F:	drivers/firmware/psci.c
+F:	include/linux/psci.h
+F:	include/uapi/linux/psci.h
+
 PNP SUPPORT
 M:	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 S:	Maintained
@@ -8908,6 +8950,12 @@
 S:	Supported
 F:	drivers/media/i2c/s5k5baf.c
 
+SAMSUNG S3FWRN5 NFC DRIVER
+M:	Robert Baldyga <r.baldyga@samsung.com>
+L:	linux-nfc@lists.01.org (moderated for non-subscribers)
+S:	Supported
+F:	drivers/nfc/s3fwrn5
+
 SAMSUNG SOC CLOCK DRIVERS
 M:	Sylwester Nawrocki <s.nawrocki@samsung.com>
 M:	Tomasz Figa <tomasz.figa@gmail.com>
@@ -8958,6 +9006,13 @@
 F:	include/linux/platform_data/dma-dw.h
 F:	drivers/dma/dw/
 
+SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
+M: Lars Persson <lars.persson@axis.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
@@ -11064,7 +11119,7 @@
 F:	drivers/input/mouse/vmmouse.h
 
 VMWARE VMXNET3 ETHERNET DRIVER
-M:	Shreyas Bhatewara <sbhatewara@vmware.com>
+M:	Shrikrishna Khare <skhare@vmware.com>
 M:	"VMware, Inc." <pv-drivers@vmware.com>
 L:	netdev@vger.kernel.org
 S:	Maintained
@@ -11089,6 +11144,14 @@
 F:	drivers/regulator/
 F:	include/linux/regulator/
 
+VRF
+M:	David Ahern <dsa@cumulusnetworks.com>
+M:	Shrijeet Mukherjee <shm@cumulusnetworks.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/vrf.c
+F:	include/net/vrf.h
+
 VT1211 HARDWARE MONITOR DRIVER
 M:	Juerg Haefliger <juergh@gmail.com>
 L:	lm-sensors@lm-sensors.org
diff --git a/arch/Kconfig b/arch/Kconfig
index 8a8ea71..a71cdbe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -71,6 +71,12 @@
 	 ( On 32-bit x86, the necessary options added to the compiler
 	   flags may increase the size of the kernel slightly. )
 
+config STATIC_KEYS_SELFTEST
+	bool "Static key selftest"
+	depends on JUMP_LABEL
+	help
+	  Boot time self-test of the branch patching code.
+
 config OPTPROBES
 	def_bool y
 	depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 8f8eafb..e8c9560 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -29,13 +29,13 @@
  * branch back to restart the operation.
  */
 
-#define ATOMIC_OP(op)							\
+#define ATOMIC_OP(op, asm_op)						\
 static __inline__ void atomic_##op(int i, atomic_t * v)			\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
 	"1:	ldl_l %0,%1\n"						\
-	"	" #op "l %0,%2,%0\n"					\
+	"	" #asm_op " %0,%2,%0\n"					\
 	"	stl_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -45,15 +45,15 @@
 	:"Ir" (i), "m" (v->counter));					\
 }									\
 
-#define ATOMIC_OP_RETURN(op)						\
+#define ATOMIC_OP_RETURN(op, asm_op)					\
 static inline int atomic_##op##_return(int i, atomic_t *v)		\
 {									\
 	long temp, result;						\
 	smp_mb();							\
 	__asm__ __volatile__(						\
 	"1:	ldl_l %0,%1\n"						\
-	"	" #op "l %0,%3,%2\n"					\
-	"	" #op "l %0,%3,%0\n"					\
+	"	" #asm_op " %0,%3,%2\n"					\
+	"	" #asm_op " %0,%3,%0\n"					\
 	"	stl_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -65,13 +65,13 @@
 	return result;							\
 }
 
-#define ATOMIC64_OP(op)							\
+#define ATOMIC64_OP(op, asm_op)						\
 static __inline__ void atomic64_##op(long i, atomic64_t * v)		\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
 	"1:	ldq_l %0,%1\n"						\
-	"	" #op "q %0,%2,%0\n"					\
+	"	" #asm_op " %0,%2,%0\n"					\
 	"	stq_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -81,15 +81,15 @@
 	:"Ir" (i), "m" (v->counter));					\
 }									\
 
-#define ATOMIC64_OP_RETURN(op)						\
+#define ATOMIC64_OP_RETURN(op, asm_op)					\
 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)	\
 {									\
 	long temp, result;						\
 	smp_mb();							\
 	__asm__ __volatile__(						\
 	"1:	ldq_l %0,%1\n"						\
-	"	" #op "q %0,%3,%2\n"					\
-	"	" #op "q %0,%3,%0\n"					\
+	"	" #asm_op " %0,%3,%2\n"					\
+	"	" #asm_op " %0,%3,%0\n"					\
 	"	stq_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -101,15 +101,27 @@
 	return result;							\
 }
 
-#define ATOMIC_OPS(opg)							\
-	ATOMIC_OP(opg)							\
-	ATOMIC_OP_RETURN(opg)						\
-	ATOMIC64_OP(opg)						\
-	ATOMIC64_OP_RETURN(opg)
+#define ATOMIC_OPS(op)							\
+	ATOMIC_OP(op, op##l)						\
+	ATOMIC_OP_RETURN(op, op##l)					\
+	ATOMIC64_OP(op, op##q)						\
+	ATOMIC64_OP_RETURN(op, op##q)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+#define atomic_andnot atomic_andnot
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, bis)
+ATOMIC_OP(xor, xor)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, bis)
+ATOMIC64_OP(xor, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 87d18ae..c3ecda0 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -172,9 +172,13 @@
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
-ATOMIC_OP(and, &=, and)
 
-#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 41cbb4a..0d1b717 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@
 config ARCH_HAS_BANDGAP
 	bool
 
+config FIX_EARLYCON_MEM
+	def_bool y if MMU
+
 config GENERIC_HWEIGHT
 	bool
 	default y
@@ -1496,6 +1499,7 @@
 config ARM_PSCI
 	bool "Support for the ARM Power State Coordination Interface (PSCI)"
 	depends on CPU_V7
+	select ARM_PSCI_FW
 	help
 	  Say Y here if you want Linux to communicate with system firmware
 	  implementing the PSCI specification for CPU-centric power
@@ -1700,13 +1704,24 @@
 	  consumed by page tables.  Setting this option will allow
 	  user-space 2nd level page tables to reside in high memory.
 
-config HW_PERF_EVENTS
-	bool "Enable hardware performance counter support for perf events"
-	depends on PERF_EVENTS
+config CPU_SW_DOMAIN_PAN
+	bool "Enable use of CPU domains to implement privileged no-access"
+	depends on MMU && !ARM_LPAE
 	default y
 	help
-	  Enable hardware performance counter support for perf events. If
-	  disabled, perf events will use software events only.
+	  Increase kernel security by ensuring that normal kernel accesses
+	  are unable to access userspace addresses.  This can help prevent
+	  use-after-free bugs becoming an exploitable privilege escalation
+	  by ensuring that magic values (such as LIST_POISON) will always
+	  fault when dereferenced.
+
+	  CPUs with low-vector mappings use a best-efforts implementation.
+	  Their lower 1MB needs to remain accessible for the vectors, but
+	  the remainder of userspace will become appropriately inaccessible.
+
+config HW_PERF_EVENTS
+	def_bool y
+	depends on ARM_PMU
 
 config SYS_SUPPORTS_HUGETLBFS
        def_bool y
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 9117c1a..d23e252 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -717,7 +717,7 @@
 		};
 
 		mac: ethernet@4a100000 {
-			compatible = "ti,cpsw";
+			compatible = "ti,am335x-cpsw","ti,cpsw";
 			ti,hwmods = "cpgmac0";
 			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
 			clock-names = "fck", "cpts";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index a0d3d4b..5d65db9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1418,7 +1418,7 @@
 		};
 
 		mac: ethernet@4a100000 {
-			compatible = "ti,cpsw";
+			compatible = "ti,dra7-cpsw","ti,cpsw";
 			ti,hwmods = "gmac";
 			clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
 			clock-names = "fck", "cpts";
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index c5fbde3..50f5e9d 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -294,7 +294,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <0>;
-
+			gpio-ranges = <&pinctrl 0 0 32>;
 			clocks = <&prcc_pclk 1 9>;
 		};
 
@@ -309,7 +309,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <1>;
-
+			gpio-ranges = <&pinctrl 0 32 5>;
 			clocks = <&prcc_pclk 1 9>;
 		};
 
@@ -324,7 +324,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <2>;
-
+			gpio-ranges = <&pinctrl 0 64 32>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -339,7 +339,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <3>;
-
+			gpio-ranges = <&pinctrl 0 96 2>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -354,7 +354,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <4>;
-
+			gpio-ranges = <&pinctrl 0 128 32>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -369,7 +369,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <5>;
-
+			gpio-ranges = <&pinctrl 0 160 12>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -384,7 +384,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <6>;
-
+			gpio-ranges = <&pinctrl 0 192 32>;
 			clocks = <&prcc_pclk 2 11>;
 		};
 
@@ -399,7 +399,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <7>;
-
+			gpio-ranges = <&pinctrl 0 224 7>;
 			clocks = <&prcc_pclk 2 11>;
 		};
 
@@ -414,12 +414,15 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <8>;
-
+			gpio-ranges = <&pinctrl 0 256 12>;
 			clocks = <&prcc_pclk 5 1>;
 		};
 
-		pinctrl {
+		pinctrl: pinctrl {
 			compatible = "stericsson,db8500-pinctrl";
+			nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>,
+						<&gpio4>, <&gpio5>, <&gpio6>, <&gpio7>,
+						<&gpio8>;
 			prcm = <&prcmu>;
 		};
 
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 176e332..314f59c 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -59,6 +59,7 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <0>;
+		gpio-ranges = <&pinctrl 0 0 32>;
 		clocks = <&pclk>;
 	};
 
@@ -72,6 +73,7 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <1>;
+		gpio-ranges = <&pinctrl 0 32 32>;
 		clocks = <&pclk>;
 	};
 
@@ -85,12 +87,14 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <2>;
+		gpio-ranges = <&pinctrl 0 64 32>;
 		clocks = <&pclk>;
 	};
 
 	gpio3: gpio@101e7000 {
 		compatible = "st,nomadik-gpio";
 		reg =  <0x101e7000 0x80>;
+		ngpio = <28>;
 		interrupt-parent = <&vica>;
 		interrupts = <9>;
 		interrupt-controller;
@@ -98,11 +102,13 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <3>;
+		gpio-ranges = <&pinctrl 0 96 28>;
 		clocks = <&pclk>;
 	};
 
-	pinctrl {
+	pinctrl: pinctrl {
 		compatible = "stericsson,stn8815-pinctrl";
+		nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>;
 		/* Pin configurations */
 		uart1 {
 			uart1_default_mux: uart1_mux {
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7..2b25b60 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@
 	return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
 }
 
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
 {
-	/*
-	 * We assume all CPUs may be shut down.
-	 * This would be the hook to use for eventual Secure
-	 * OS migration requests as described in the PSCI spec.
-	 */
-	return 0;
+	/* We assume all CPUs may be shut down. */
+	return true;
 }
 
 static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@
 	.smp_secondary_init	= mcpm_secondary_init,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_kill		= mcpm_cpu_kill,
-	.cpu_disable		= mcpm_cpu_disable,
+	.cpu_can_disable	= mcpm_cpu_can_disable,
 	.cpu_die		= mcpm_cpu_die,
 #endif
 };
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 9504e77..3eaf8fb 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -124,14 +124,14 @@
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS65090=y
 CONFIG_DRM=y
-CONFIG_DRM_PTN3460=y
-CONFIG_DRM_PS8622=y
+CONFIG_DRM_NXP_PTN3460=y
+CONFIG_DRM_PARADE_PS8622=y
 CONFIG_DRM_EXYNOS=y
 CONFIG_DRM_EXYNOS_FIMD=y
 CONFIG_DRM_EXYNOS_DSI=y
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_PANEL_S6E8AA0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
 CONFIG_FB_SIMPLE=y
 CONFIG_EXYNOS_VIDEO=y
 CONFIG_EXYNOS_MIPI_DSI=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 824a0cf..f84471d 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -440,8 +440,8 @@
 CONFIG_DRM=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_PTN3460=m
-CONFIG_DRM_PS8622=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_DSI=y
@@ -449,7 +449,7 @@
 CONFIG_DRM_EXYNOS_HDMI=y
 CONFIG_DRM_RCAR_DU=m
 CONFIG_DRM_TEGRA=y
-CONFIG_DRM_PANEL_S6E8AA0=m
+CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FB_WM8505=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc1..be648eb 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@
 generic-y += kdebug.h
 generic-y += local.h
 generic-y += local64.h
-generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += msgbuf.h
 generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe572..7bbf325 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
 	.endm
 #endif
 
-	.macro asm_trace_hardirqs_off
+	.macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
+	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
+	.endif
 	bl	trace_hardirqs_off
+	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
+	.endif
 #endif
 	.endm
 
-	.macro asm_trace_hardirqs_on_cond, cond
+	.macro asm_trace_hardirqs_on, cond=al, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
 	/*
 	 * actually the registers should be pushed and pop'd conditionally, but
 	 * after bl the flags are certainly clobbered
 	 */
+	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
+	.endif
 	bl\cond	trace_hardirqs_on
+	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
+	.endif
 #endif
 	.endm
 
-	.macro asm_trace_hardirqs_on
-	asm_trace_hardirqs_on_cond al
-	.endm
-
-	.macro disable_irq
+	.macro disable_irq, save=1
 	disable_irq_notrace
-	asm_trace_hardirqs_off
+	asm_trace_hardirqs_off \save
 	.endm
 
 	.macro enable_irq
@@ -173,7 +177,7 @@
 
 	.macro restore_irqs, oldcpsr
 	tst	\oldcpsr, #PSR_I_BIT
-	asm_trace_hardirqs_on_cond eq
+	asm_trace_hardirqs_on cond=eq
 	restore_irqs_notrace \oldcpsr
 	.endm
 
@@ -445,6 +449,53 @@
 #endif
 	.endm
 
+	.macro	uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_DISABLE
+	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_ENABLE
+	mcr	p15, 0, \tmp, c3, c0, 0
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	mrc	p15, 0, \tmp, c3, c0, 0
+	str	\tmp, [sp, #S_FRAME_SIZE]
+#endif
+	.endm
+
+	.macro	uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	ldr	r0, [sp, #S_FRAME_SIZE]
+	mcr	p15, 0, r0, c3, c0, 0
+#endif
+	.endm
+
+	.macro	uaccess_save_and_disable, tmp
+	uaccess_save \tmp
+	uaccess_disable \tmp
+	.endm
+
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c119..fe3ef39 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@
 }									\
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 {									\
 	unsigned long tmp;						\
 	int result;							\
 									\
-	smp_mb();							\
 	prefetchw(&v->counter);						\
 									\
 	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
@@ -75,17 +74,17 @@
 	: "r" (&v->counter), "Ir" (i)					\
 	: "cc");							\
 									\
-	smp_mb();							\
-									\
 	return result;							\
 }
 
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+#define atomic_add_return_relaxed	atomic_add_return_relaxed
+#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
+
+static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 {
 	int oldval;
 	unsigned long res;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	do {
@@ -99,10 +98,9 @@
 		    : "cc");
 	} while (res);
 
-	smp_mb();
-
 	return oldval;
 }
+#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
@@ -194,6 +192,13 @@
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or,  |=, orr)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -290,12 +295,12 @@
 }									\
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)				\
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+static inline long long							\
+atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
 {									\
 	long long result;						\
 	unsigned long tmp;						\
 									\
-	smp_mb();							\
 	prefetchw(&v->counter);						\
 									\
 	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
@@ -309,8 +314,6 @@
 	: "r" (&v->counter), "r" (i)					\
 	: "cc");							\
 									\
-	smp_mb();							\
-									\
 	return result;							\
 }
 
@@ -321,17 +324,26 @@
 ATOMIC64_OPS(add, adds, adc)
 ATOMIC64_OPS(sub, subs, sbc)
 
+#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and, and)
+ATOMIC64_OP(andnot, bic, bic)
+ATOMIC64_OP(or,  orr, orr)
+ATOMIC64_OP(xor, eor, eor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
-					long long new)
+static inline long long
+atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
 {
 	long long oldval;
 	unsigned long res;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	do {
@@ -346,17 +358,15 @@
 		: "cc");
 	} while (res);
 
-	smp_mb();
-
 	return oldval;
 }
+#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
 
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
 {
 	long long result;
 	unsigned long tmp;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	__asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@
 	: "r" (&ptr->counter), "r" (new)
 	: "cc");
 
-	smp_mb();
-
 	return result;
 }
+#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
 
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e..3ff5642 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
 #define __ASM_BARRIER_H
 
 #ifndef __ASSEMBLY__
-#include <asm/outercache.h>
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
@@ -37,12 +36,20 @@
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb()		do { dsb(); outer_sync(); } while (0)
+#define mb()		__arm_heavy_mb()
 #define rmb()		dsb()
-#define wmb()		do { dsb(st); outer_sync(); } while (0)
+#define wmb()		__arm_heavy_mb(st)
 #define dma_rmb()	dmb(osh)
 #define dma_wmb()	dmb(oshst)
 #else
@@ -67,12 +74,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 5638099..e943e6c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p |= mask;
@@ -47,9 +47,9 @@
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p &= ~mask;
@@ -59,9 +59,9 @@
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p ^= mask;
@@ -73,9 +73,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
@@ -90,9 +90,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
@@ -107,9 +107,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda..d5525bf 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-#define dmac_map_area			cpu_cache.dma_map_area
-#define dmac_unmap_area			cpu_cache.dma_unmap_area
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -161,8 +159,6 @@
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
@@ -506,4 +502,21 @@
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 			     void *kaddr, unsigned long len);
 
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side.  This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+	phys_addr_t phys = __pa(addr);
+
+	__cpuc_flush_dcache_area((void *)addr, size);
+	outer_flush_range(phys, phys + size);
+}
+
 #endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05..916a274 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@
 	unsigned int tmp;
 #endif
 
-	smp_mb();
 	prefetchw((const void *)ptr);
 
 	switch (size) {
@@ -98,12 +97,11 @@
 		__bad_xchg(ptr, size), ret = 0;
 		break;
 	}
-	smp_mb();
 
 	return ret;
 }
 
-#define xchg(ptr, x) ({							\
+#define xchg_relaxed(ptr, x) ({						\
 	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
 				   sizeof(*(ptr)));			\
 })
@@ -117,6 +115,8 @@
 #error "SMP is not supported on this platform"
 #endif
 
+#define xchg xchg_relaxed
+
 /*
  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  * them available.
@@ -194,23 +194,11 @@
 	return oldval;
 }
 
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-					 unsigned long new, int size)
-{
-	unsigned long ret;
-
-	smp_mb();
-	ret = __cmpxchg(ptr, old, new, size);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg(ptr,o,n) ({						\
-	(__typeof__(*(ptr)))__cmpxchg_mb((ptr),				\
-					 (unsigned long)(o),		\
-					 (unsigned long)(n),		\
-					 sizeof(*(ptr)));		\
+#define cmpxchg_relaxed(ptr,o,n) ({					\
+	(__typeof__(*(ptr)))__cmpxchg((ptr),				\
+				      (unsigned long)(o),		\
+				      (unsigned long)(n),		\
+				      sizeof(*(ptr)));			\
 })
 
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@
 
 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
 
-static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
-						unsigned long long old,
-						unsigned long long new)
-{
-	unsigned long long ret;
-
-	smp_mb();
-	ret = __cmpxchg64(ptr, old, new);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg64(ptr, o, n) ({						\
-	(__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
-					   (unsigned long long)(o),	\
-					   (unsigned long long)(n));	\
-})
-
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
 #endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d..a68b9d8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 
-#define DMA_ERROR_CODE	(~0)
+#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe44..e878129 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
  */
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL	0
-#define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
 #define DOMAIN_IO	2
 #else
 #define DOMAIN_KERNEL	2
-#define DOMAIN_TABLE	2
 #define DOMAIN_USER	1
 #define DOMAIN_IO	0
 #endif
+#define DOMAIN_VECTORS	3
 
 /*
  * Domain types
@@ -55,11 +54,46 @@
 #define DOMAIN_MANAGER	1
 #endif
 
-#define domain_val(dom,type)	((type) << (2*(dom)))
+#define domain_mask(dom)	((3) << (2 * (dom)))
+#define domain_val(dom,type)	((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+	domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+	unsigned int domain;
+
+	asm(
+	"mrc	p15, 0, %0, c3, c0	@ get domain"
+	 : "=r" (domain));
+
+	return domain;
+}
+
 static inline void set_domain(unsigned val)
 {
 	asm volatile(
@@ -68,17 +102,16 @@
 	isb();
 }
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)					\
 	do {							\
-	struct thread_info *thread = current_thread_info();	\
-	unsigned int domain = thread->cpu_domain;		\
-	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
-	thread->cpu_domain = domain | domain_val(dom, type);	\
-	set_domain(thread->cpu_domain);				\
+		unsigned int domain = get_domain();		\
+		domain &= ~domain_mask(dom);			\
+		domain = domain | domain_val(dom, type);	\
+		set_domain(domain);				\
 	} while (0)
 
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
 #endif
 
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae..58cfe9f 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
 #define FIXADDR_TOP		(FIXADDR_END - PAGE_SIZE)
 
 #include <asm/kmap_types.h>
+#include <asm/pgtable.h>
 
 enum fixed_addresses {
-	FIX_KMAP_BEGIN,
+	FIX_EARLYCON_MEM_BASE,
+	__end_of_permanent_fixed_addresses,
+
+	FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
 	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
 	/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@
 	__end_of_fixed_addresses
 };
 
+#define FIXMAP_PAGE_COMMON	(L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL	(FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO		(FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE	FIXMAP_PAGE_IO
+
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
 
 #include <asm-generic/fixmap.h>
 
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed828..6795368 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
 #ifdef CONFIG_SMP
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags;				\
 	smp_mb();						\
 	prefetchw(uaddr);					\
+	__ua_flags = uaccess_save_and_enable();			\
 	__asm__ __volatile__(					\
 	"1:	ldrex	%1, [%3]\n"				\
 	"	" insn "\n"					\
@@ -34,12 +37,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret;
 	u32 val;
 
@@ -49,6 +55,7 @@
 	smp_mb();
 	/* Prefetching cannot fault */
 	prefetchw(uaddr);
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	ldrex	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -61,6 +68,7 @@
 	: "=&r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 	smp_mb();
 
 	*uval = val;
@@ -73,6 +81,8 @@
 #include <asm/domain.h>
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags = uaccess_save_and_enable();	\
 	__asm__ __volatile__(					\
 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
 	"	" insn "\n"					\
@@ -81,12 +91,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret = 0;
 	u32 val;
 
@@ -94,6 +107,7 @@
 		return -EFAULT;
 
 	preempt_disable();
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -103,6 +117,7 @@
 	: "+r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 
 	*uval = val;
 	preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd..cab07f6 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area			__glue(_CACHE,_dma_unmap_area)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #endif
 
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5f337dc..34f7b69 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,23 +4,32 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
+#include <asm/unified.h>
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-#ifdef CONFIG_THUMB2_KERNEL
-#define JUMP_LABEL_NOP	"nop.w"
-#else
-#define JUMP_LABEL_NOP	"nop"
-#endif
-
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
-		 JUMP_LABEL_NOP "\n\t"
+		 WASM(nop) "\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".word 1b, %l[l_yes], %c0\n\t"
 		 ".popsection\n\t"
-		 : :  "i" (key) :  : l_yes);
+		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 WASM(b) " %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection\n\t"
+		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
 
 	return false;
 l_yes:
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92f..c2bf24f 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@
 
 #endif
 
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
-	if (outer_cache.sync)
-		outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
 #endif	/* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278..d0131ee 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK		PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
 /*
  *   - section
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index 3fc87df..0000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *  linux/arch/arm/include/asm/pmu.h
- *
- *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __ARM_PMU_H__
-#define __ARM_PMU_H__
-
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-
-#include <asm/cputype.h>
-
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- *	interrupt and passed the address of the low level handler,
- *	and can be used to implement any platform specific handling
- *	before or after calling it.
- */
-struct arm_pmu_platdata {
-	irqreturn_t (*handle_irq)(int irq, void *dev,
-				  irq_handler_t pmu_handler);
-};
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
- */
-#define ARMPMU_MAX_HWEVENTS		32
-
-#define HW_OP_UNSUPPORTED		0xFFFF
-#define C(_x)				PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED		0xFFFF
-
-#define PERF_MAP_ALL_UNSUPPORTED					\
-	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
-
-#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
-[0 ... C(MAX) - 1] = {							\
-	[0 ... C(OP_MAX) - 1] = {					\
-		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
-	},								\
-}
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
-	/*
-	 * The events that are active on the PMU for the given index.
-	 */
-	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
-
-	/*
-	 * A 1 bit for an index indicates that the counter is being used for
-	 * an event. A 0 means that the counter can be used.
-	 */
-	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
-
-	/*
-	 * Hardware lock to serialize accesses to PMU registers. Needed for the
-	 * read/modify/write sequences.
-	 */
-	raw_spinlock_t		pmu_lock;
-
-	/*
-	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
-	 * already have to allocate this struct per cpu.
-	 */
-	struct arm_pmu		*percpu_pmu;
-};
-
-struct arm_pmu {
-	struct pmu	pmu;
-	cpumask_t	active_irqs;
-	cpumask_t	supported_cpus;
-	int		*irq_affinity;
-	char		*name;
-	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
-	void		(*enable)(struct perf_event *event);
-	void		(*disable)(struct perf_event *event);
-	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
-					 struct perf_event *event);
-	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
-					 struct perf_event *event);
-	int		(*set_event_filter)(struct hw_perf_event *evt,
-					    struct perf_event_attr *attr);
-	u32		(*read_counter)(struct perf_event *event);
-	void		(*write_counter)(struct perf_event *event, u32 val);
-	void		(*start)(struct arm_pmu *);
-	void		(*stop)(struct arm_pmu *);
-	void		(*reset)(void *);
-	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
-	void		(*free_irq)(struct arm_pmu *);
-	int		(*map_event)(struct perf_event *event);
-	int		num_events;
-	atomic_t	active_events;
-	struct mutex	reserve_mutex;
-	u64		max_period;
-	struct platform_device	*plat_device;
-	struct pmu_hw_events	__percpu *hw_events;
-	struct notifier_block	hotplug_nb;
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-int armpmu_register(struct arm_pmu *armpmu, int type);
-
-u64 armpmu_event_update(struct perf_event *event);
-
-int armpmu_event_set_period(struct perf_event *event);
-
-int armpmu_map_event(struct perf_event *event,
-		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
-						[PERF_COUNT_HW_CACHE_OP_MAX]
-						[PERF_COUNT_HW_CACHE_RESULT_MAX],
-		     u32 raw_event_mask);
-
-struct pmu_probe_info {
-	unsigned int cpuid;
-	unsigned int mask;
-	int (*init)(struct arm_pmu *);
-};
-
-#define PMU_PROBE(_cpuid, _mask, _fn)	\
-{					\
-	.cpuid = (_cpuid),		\
-	.mask = (_mask),		\
-	.init = (_fn),			\
-}
-
-#define ARM_PMU_PROBE(_cpuid, _fn) \
-	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
-
-#define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
-
-#define XSCALE_PMU_PROBE(_version, _fn) \
-	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
-
-int arm_pmu_device_probe(struct platform_device *pdev,
-			 const struct of_device_id *of_table,
-			 const struct pmu_probe_info *probe_table);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3e..68ee3ce 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
 #ifndef __ASM_ARM_PSCI_H
 #define __ASM_ARM_PSCI_H
 
-#define PSCI_POWER_STATE_TYPE_STANDBY		0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
-
-struct psci_power_state {
-	u16	id;
-	u8	type;
-	u8	affinity_level;
-};
-
-struct psci_operations {
-	int (*cpu_suspend)(struct psci_power_state state,
-			   unsigned long entry_point);
-	int (*cpu_off)(struct psci_power_state state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-extern struct psci_operations psci_ops;
 extern struct smp_operations psci_smp_ops;
 
 #ifdef CONFIG_ARM_PSCI
-int psci_init(void);
 bool psci_smp_available(void);
 #else
-static inline int psci_init(void) { return 0; }
 static inline bool psci_smp_available(void) { return false; }
 #endif
 
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1b..ef35665 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@
 #ifdef CONFIG_HOTPLUG_CPU
 	int  (*cpu_kill)(unsigned int cpu);
 	void (*cpu_die)(unsigned int cpu);
+	bool  (*cpu_can_disable)(unsigned int cpu);
 	int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e522..f908071 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@
 extern int platform_can_secondary_boot(void);
 extern int platform_can_cpu_hotplug(void);
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	return 0;
+}
+#endif
+
 #endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32ede..d0a1119 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@
 	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
-	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
 }
 
 #define init_thread_info	(init_thread_union.thread_info)
@@ -136,22 +133,18 @@
 
 /*
  * thread information flags:
- *  TIF_SYSCALL_TRACE	- syscall trace active
- *  TIF_SYSCAL_AUDIT	- syscall auditing active
- *  TIF_SIGPENDING	- signal pending
- *  TIF_NEED_RESCHED	- rescheduling necessary
- *  TIF_NOTIFY_RESUME	- callback before returning to user
  *  TIF_USEDFPU		- FPU was used by this task this quantum (SMP)
  *  TIF_POLLING_NRFLAG	- true if poll_idle() is polling TIF_NEED_RESCHED
  */
-#define TIF_SIGPENDING		0
-#define TIF_NEED_RESCHED	1
+#define TIF_SIGPENDING		0	/* signal pending */
+#define TIF_NEED_RESCHED	1	/* rescheduling necessary */
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
-#define TIF_UPROBE		7
-#define TIF_SYSCALL_TRACE	8
-#define TIF_SYSCALL_AUDIT	9
-#define TIF_SYSCALL_TRACEPOINT	10
-#define TIF_SECCOMP		11	/* seccomp syscall filtering active */
+#define TIF_UPROBE		3	/* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE	4	/* syscall trace active */
+#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
+#define TIF_SECCOMP		7	/* seccomp syscall filtering active */
+
 #define TIF_NOHZ		12	/* in adaptive nohz mode */
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d0..8cc85a4 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@
 extern int fixup_exception(struct pt_regs *regs);
 
 /*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	unsigned int old_domain = get_domain();
+
+	/* Set the current domain access to permit user accesses */
+	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+	return old_domain;
+#else
+	return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/* Restore the user access mask */
+	set_domain(flags);
+#endif
+}
+
+/*
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
  */
@@ -165,6 +194,7 @@
 		register typeof(x) __r2 asm("r2");			\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			if (sizeof((x)) >= 8)				\
@@ -192,6 +222,7 @@
 			break;						\
 		default: __e = __get_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		x = (typeof(*(p))) __r2;				\
 		__e;							\
 	})
@@ -224,6 +255,7 @@
 		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			__put_user_x(__r2, __p, __e, __l, 1);		\
@@ -239,6 +271,7 @@
 			break;						\
 		default: __e = __put_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		__e;							\
 	})
 
@@ -300,20 +333,23 @@
 do {									\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_val;						\
+	unsigned int __ua_flags;					\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
 	default: (__gu_val) = __get_user_bad();				\
 	}								\
+	uaccess_restore(__ua_flags);					\
 	(x) = (__typeof__(*(ptr)))__gu_val;				\
 } while (0)
 
-#define __get_user_asm_byte(x, addr, err)			\
+#define __get_user_asm(x, addr, err, instr)			\
 	__asm__ __volatile__(					\
-	"1:	" TUSER(ldrb) "	%1,[%2],#0\n"			\
+	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
 	"2:\n"							\
 	"	.pushsection .text.fixup,\"ax\"\n"		\
 	"	.align	2\n"					\
@@ -329,6 +365,9 @@
 	: "r" (addr), "i" (-EFAULT)				\
 	: "cc")
 
+#define __get_user_asm_byte(x, addr, err)			\
+	__get_user_asm(x, addr, err, ldrb)
+
 #ifndef __ARMEB__
 #define __get_user_asm_half(x, __gu_addr, err)			\
 ({								\
@@ -348,22 +387,7 @@
 #endif
 
 #define __get_user_asm_word(x, addr, err)			\
-	__asm__ __volatile__(					\
-	"1:	" TUSER(ldr) "	%1,[%2],#0\n"			\
-	"2:\n"							\
-	"	.pushsection .text.fixup,\"ax\"\n"		\
-	"	.align	2\n"					\
-	"3:	mov	%0, %3\n"				\
-	"	mov	%1, #0\n"				\
-	"	b	2b\n"					\
-	"	.popsection\n"					\
-	"	.pushsection __ex_table,\"a\"\n"		\
-	"	.align	3\n"					\
-	"	.long	1b, 3b\n"				\
-	"	.popsection"					\
-	: "+r" (err), "=&r" (x)					\
-	: "r" (addr), "i" (-EFAULT)				\
-	: "cc")
+	__get_user_asm(x, addr, err, ldr)
 
 #define __put_user(x, ptr)						\
 ({									\
@@ -381,9 +405,11 @@
 #define __put_user_err(x, ptr, err)					\
 do {									\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
+	unsigned int __ua_flags;					\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
 	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
@@ -391,11 +417,12 @@
 	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
 	default: __put_user_bad();					\
 	}								\
+	uaccess_restore(__ua_flags);					\
 } while (0)
 
-#define __put_user_asm_byte(x, __pu_addr, err)			\
+#define __put_user_asm(x, __pu_addr, err, instr)		\
 	__asm__ __volatile__(					\
-	"1:	" TUSER(strb) "	%1,[%2],#0\n"			\
+	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
 	"2:\n"							\
 	"	.pushsection .text.fixup,\"ax\"\n"		\
 	"	.align	2\n"					\
@@ -410,6 +437,9 @@
 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
 	: "cc")
 
+#define __put_user_asm_byte(x, __pu_addr, err)			\
+	__put_user_asm(x, __pu_addr, err, strb)
+
 #ifndef __ARMEB__
 #define __put_user_asm_half(x, __pu_addr, err)			\
 ({								\
@@ -427,21 +457,7 @@
 #endif
 
 #define __put_user_asm_word(x, __pu_addr, err)			\
-	__asm__ __volatile__(					\
-	"1:	" TUSER(str) "	%1,[%2],#0\n"			\
-	"2:\n"							\
-	"	.pushsection .text.fixup,\"ax\"\n"		\
-	"	.align	2\n"					\
-	"3:	mov	%0, %3\n"				\
-	"	b	2b\n"					\
-	"	.popsection\n"					\
-	"	.pushsection __ex_table,\"a\"\n"		\
-	"	.align	3\n"					\
-	"	.long	1b, 3b\n"				\
-	"	.popsection"					\
-	: "+r" (err)						\
-	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
-	: "cc")
+	__put_user_asm(x, __pu_addr, err, str)
 
 #ifndef __ARMEB__
 #define	__reg_oper0	"%R2"
@@ -474,11 +490,46 @@
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_from_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_to_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_clear_user(addr, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
 #else
 #define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@
 	return n;
 }
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e69f7a1..af9e59b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -71,8 +71,7 @@
 obj-$(CONFIG_CPU_PJ4B)		+= pj4-cp0.o
 obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
 obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o \
-				   perf_event_xscale.o perf_event_v6.o \
+obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_xscale.o perf_event_v6.o \
 				   perf_event_v7.o
 CFLAGS_pj4-cp0.o		:= -marm
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
@@ -89,7 +88,7 @@
 
 obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o
 ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y				+= psci.o psci-call.o
+obj-y				+= psci-call.o
 obj-$(CONFIG_SMP)		+= psci_smp.o
 endif
 
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a..f89811f 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e..3e1c26e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@
 #define SPFIX(code...)
 #endif
 
-	.macro	svc_entry, stack_hole=0, trace=1
+	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
-	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
@@ -167,7 +167,7 @@
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
-	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
@@ -185,6 +185,11 @@
 	@
 	stmia	r7, {r2 - r6}
 
+	uaccess_save r0
+	.if \uaccess
+	uaccess_disable r0
+	.endif
+
 	.if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
@@ -194,7 +199,7 @@
 
 	.align	5
 __dabt_svc:
-	svc_entry
+	svc_entry uaccess=0
 	mov	r2, sp
 	dabt_helper
  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
@@ -368,7 +373,7 @@
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 
-	.macro	usr_entry, trace=1
+	.macro	usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
 	sub	sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@
  ARM(	stmdb	r0, {sp, lr}^			)
  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 
+	.if \uaccess
+	uaccess_disable ip
+	.endif
+
 	@ Enable the alignment trap while in kernel mode
  ATRAP(	teq	r8, r7)
  ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@
 
 	.align	5
 __dabt_usr:
-	usr_entry
+	usr_entry uaccess=0
 	kuser_cmpxchg_check
 	mov	r2, sp
 	dabt_helper
@@ -458,7 +467,7 @@
 
 	.align	5
 __und_usr:
-	usr_entry
+	usr_entry uaccess=0
 
 	mov	r2, r4
 	mov	r3, r5
@@ -484,6 +493,8 @@
 1:	ldrt	r0, [r4]
  ARM_BE8(rev	r0, r0)				@ little endian instruction
 
+	uaccess_disable ip
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@
 2:	ldrht	r5, [r4]
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
-	blo	__und_usr_fault_16		@ 16bit undefined instruction
+	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 3:	ldrht	r0, [r2]
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
+	uaccess_disable ip
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@
 __und_usr_fault_32:
 	mov	r1, #4
 	b	1f
+__und_usr_fault_16_pan:
+	uaccess_disable ip
 __und_usr_fault_16:
 	mov	r1, #2
 1:	mov	r0, sp
@@ -770,6 +784,8 @@
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 #endif
 	switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f..30a7228 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
 
 
 	.align	5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
 /*
- * This is the fast syscall return path.  We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path.  We do as little as possible here,
+ * such as avoiding writing r0 to the stack.  We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
  */
 ret_fast_syscall:
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)
-	disable_irq				@ disable interrupts
+	disable_irq_notrace			@ disable interrupts
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
-	tst	r1, #_TIF_SYSCALL_WORK
-	bne	__sys_trace_return
-	tst	r1, #_TIF_WORK_MASK
+	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 	bne	fast_work_pending
-	asm_trace_hardirqs_on
 
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
-	ct_user_enter
 
 	restore_user_regs fast = 1, offset = S_OFF
  UNWIND(.fnend		)
+ENDPROC(ret_fast_syscall)
 
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+	/* Ok, we need to do extra processing, enter the slow path. */
 fast_work_pending:
 	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
-work_pending:
+	/* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled.  As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
+	disable_irq_notrace			@ disable interrupts
+	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
+	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+	beq	no_work_pending
+ UNWIND(.fnend		)
+ENDPROC(ret_fast_syscall)
+
+	/* Slower path - fall through to work_pending */
+#endif
+
+	tst	r1, #_TIF_SYSCALL_WORK
+	bne	__sys_trace_return_nosave
+slow_work_pending:
 	mov	r0, sp				@ 'regs'
 	mov	r2, why				@ 'syscall'
 	bl	do_work_pending
@@ -65,16 +85,19 @@
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them.  Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
  */
 ENTRY(ret_to_user)
 ret_slow_syscall:
-	disable_irq				@ disable interrupts
+	disable_irq_notrace			@ disable interrupts
 ENTRY(ret_to_user_from_irq)
 	ldr	r1, [tsk, #TI_FLAGS]
 	tst	r1, #_TIF_WORK_MASK
-	bne	work_pending
+	bne	slow_work_pending
 no_work_pending:
-	asm_trace_hardirqs_on
+	asm_trace_hardirqs_on save = 0
 
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
@@ -174,6 +197,8 @@
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
 #endif
 
+	uaccess_disable tbl
+
 	adr	tbl, sys_call_table		@ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
@@ -252,6 +277,12 @@
 	bl	syscall_trace_exit
 	b	ret_slow_syscall
 
+__sys_trace_return_nosave:
+	enable_irq_notrace
+	mov	r0, sp
+	bl	syscall_trace_exit
+	b	ret_slow_syscall
+
 	.align	5
 #ifdef CONFIG_ALIGNMENT_TRAP
 	.type	__cr_alignment, #object
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045a..0d22ad2 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
 	.endm
 
-#ifndef CONFIG_THUMB2_KERNEL
+
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
 	@ IRQs already off
@@ -215,6 +215,10 @@
 	blne	trace_hardirqs_off
 #endif
 	.endif
+	uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode SVC restore
 	msr	spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
 	strex	r1, r2, [r0]			@ clear the exclusive monitor
 #endif
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+#else
+	@ Thumb mode SVC restore
+	ldr	lr, [sp, #S_SP]			@ top of the stack
+	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
+
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
+
+	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
+	ldmia	sp, {r0 - r12}
+	mov	sp, lr
+	ldr	lr, [sp], #4
+	rfeia	sp!
+#endif
 	.endm
 
 	@
@@ -241,6 +259,9 @@
 	@ on the stack remains correct).
 	@
 	.macro  svc_exit_via_fiq
+	uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r0, sp
 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
 				@ clobber state restored below)
@@ -250,9 +271,27 @@
 	msr	spsr_cxsf, r9
 	ldr	r0, [r0, #S_R0]
 	ldmia	r8, {pc}^
+#else
+	@ Thumb mode restore
+	add	r0, sp, #S_R2
+	ldr	lr, [sp, #S_LR]
+	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+			        @ clobber state restored below)
+	ldmia	r0, {r2 - r12}
+	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+	msr	cpsr_c, r1
+	sub	r0, #S_R2
+	add	r8, r0, #S_PC
+	ldmia	r0, {r0 - r1}
+	rfeia	r8
+#endif
 	.endm
 
+
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
@@ -270,72 +309,16 @@
 						@ after ldm {}^
 	add	sp, sp, #\offset + S_FRAME_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-
-#else	/* CONFIG_THUMB2_KERNEL */
-	.macro	svc_exit, rpsr, irq = 0
-	.if	\irq != 0
-	@ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
-	@ The parent context IRQs must have been enabled to get here in
-	@ the first place, so there's no point checking the PSR I bit.
-	bl	trace_hardirqs_on
-#endif
-	.else
-	@ IRQs off again before pulling preserved data off the stack
-	disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
-	tst	\rpsr, #PSR_I_BIT
-	bleq	trace_hardirqs_on
-	tst	\rpsr, #PSR_I_BIT
-	blne	trace_hardirqs_off
-#endif
-	.endif
-	ldr	lr, [sp, #S_SP]			@ top of the stack
-	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
-
-	@ We must avoid clrex due to Cortex-A15 erratum #830321
-	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
-
-	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
-	ldmia	sp, {r0 - r12}
-	mov	sp, lr
-	ldr	lr, [sp], #4
-	rfeia	sp!
-	.endm
-
-	@
-	@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
-	@
-	@ For full details see non-Thumb implementation above.
-	@
-	.macro  svc_exit_via_fiq
-	add	r0, sp, #S_R2
-	ldr	lr, [sp, #S_LR]
-	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
-			        @ clobber state restored below)
-	ldmia	r0, {r2 - r12}
-	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
-	msr	cpsr_c, r1
-	sub	r0, #S_R2
-	add	r8, r0, #S_PC
-	ldmia	r0, {r0 - r1}
-	rfeia	r8
-	.endm
-
-#ifdef CONFIG_CPU_V7M
-	/*
-	 * Note we don't need to do clrex here as clearing the local monitor is
-	 * part of each exception entry and exit sequence.
-	 */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+	@ V7M restore.
+	@ Note that we don't need to do clrex here as clearing the local
+	@ monitor is part of the exception entry and exit sequence.
 	.if	\offset
 	add	sp, #\offset
 	.endif
 	v7m_exception_slow_exit ret_r0 = \fast
-	.endm
-#else	/* ifdef CONFIG_CPU_V7M */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#else
+	@ Thumb mode restore
 	mov	r2, sp
 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
@@ -353,9 +336,8 @@
 	.endif
 	add	sp, sp, #S_FRAME_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-#endif	/* ifdef CONFIG_CPU_V7M / else */
 #endif	/* !CONFIG_THUMB2_KERNEL */
+	.endm
 
 /*
  * Context tracking subsystem.  Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991..04286fd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@
 #ifdef CONFIG_ARM_LPAE
 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
 #else
-	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	mov	r5, #DACR_INIT
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 #endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index baf8ede..5ff4826 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
 #include <linux/export.h>
 
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
 #include <asm/exception.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index e39cbf4..845a5dd 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -12,7 +12,7 @@
 	void *addr = (void *)entry->code;
 	unsigned int insn;
 
-	if (type == JUMP_LABEL_ENABLE)
+	if (type == JUMP_LABEL_JMP)
 		insn = arm_gen_branch(entry->code, entry->target);
 	else
 		insn = arm_gen_nop();
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
deleted file mode 100644
index 54272e0..0000000
--- a/arch/arm/kernel/perf_event.c
+++ /dev/null
@@ -1,896 +0,0 @@
-#undef DEBUG
-
-/*
- * ARM performance counter support.
- *
- * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
- * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
- *
- * This code is based on the sparc64 perf event code, which is in turn based
- * on the x86 code.
- */
-#define pr_fmt(fmt) "hw perfevents: " fmt
-
-#include <linux/bitmap.h>
-#include <linux/cpumask.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/irqdesc.h>
-
-#include <asm/cputype.h>
-#include <asm/irq_regs.h>
-#include <asm/pmu.h>
-
-static int
-armpmu_map_cache_event(const unsigned (*cache_map)
-				      [PERF_COUNT_HW_CACHE_MAX]
-				      [PERF_COUNT_HW_CACHE_OP_MAX]
-				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
-		       u64 config)
-{
-	unsigned int cache_type, cache_op, cache_result, ret;
-
-	cache_type = (config >>  0) & 0xff;
-	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
-		return -EINVAL;
-
-	cache_op = (config >>  8) & 0xff;
-	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
-		return -EINVAL;
-
-	cache_result = (config >> 16) & 0xff;
-	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
-		return -EINVAL;
-
-	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
-
-	if (ret == CACHE_OP_UNSUPPORTED)
-		return -ENOENT;
-
-	return ret;
-}
-
-static int
-armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
-{
-	int mapping;
-
-	if (config >= PERF_COUNT_HW_MAX)
-		return -EINVAL;
-
-	mapping = (*event_map)[config];
-	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
-}
-
-static int
-armpmu_map_raw_event(u32 raw_event_mask, u64 config)
-{
-	return (int)(config & raw_event_mask);
-}
-
-int
-armpmu_map_event(struct perf_event *event,
-		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-		 const unsigned (*cache_map)
-				[PERF_COUNT_HW_CACHE_MAX]
-				[PERF_COUNT_HW_CACHE_OP_MAX]
-				[PERF_COUNT_HW_CACHE_RESULT_MAX],
-		 u32 raw_event_mask)
-{
-	u64 config = event->attr.config;
-	int type = event->attr.type;
-
-	if (type == event->pmu->type)
-		return armpmu_map_raw_event(raw_event_mask, config);
-
-	switch (type) {
-	case PERF_TYPE_HARDWARE:
-		return armpmu_map_hw_event(event_map, config);
-	case PERF_TYPE_HW_CACHE:
-		return armpmu_map_cache_event(cache_map, config);
-	case PERF_TYPE_RAW:
-		return armpmu_map_raw_event(raw_event_mask, config);
-	}
-
-	return -ENOENT;
-}
-
-int armpmu_event_set_period(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	s64 left = local64_read(&hwc->period_left);
-	s64 period = hwc->sample_period;
-	int ret = 0;
-
-	if (unlikely(left <= -period)) {
-		left = period;
-		local64_set(&hwc->period_left, left);
-		hwc->last_period = period;
-		ret = 1;
-	}
-
-	if (unlikely(left <= 0)) {
-		left += period;
-		local64_set(&hwc->period_left, left);
-		hwc->last_period = period;
-		ret = 1;
-	}
-
-	/*
-	 * Limit the maximum period to prevent the counter value
-	 * from overtaking the one we are about to program. In
-	 * effect we are reducing max_period to account for
-	 * interrupt latency (and we are being very conservative).
-	 */
-	if (left > (armpmu->max_period >> 1))
-		left = armpmu->max_period >> 1;
-
-	local64_set(&hwc->prev_count, (u64)-left);
-
-	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
-
-	perf_event_update_userpage(event);
-
-	return ret;
-}
-
-u64 armpmu_event_update(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	u64 delta, prev_raw_count, new_raw_count;
-
-again:
-	prev_raw_count = local64_read(&hwc->prev_count);
-	new_raw_count = armpmu->read_counter(event);
-
-	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
-			     new_raw_count) != prev_raw_count)
-		goto again;
-
-	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
-
-	local64_add(delta, &event->count);
-	local64_sub(delta, &hwc->period_left);
-
-	return new_raw_count;
-}
-
-static void
-armpmu_read(struct perf_event *event)
-{
-	armpmu_event_update(event);
-}
-
-static void
-armpmu_stop(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-
-	/*
-	 * ARM pmu always has to update the counter, so ignore
-	 * PERF_EF_UPDATE, see comments in armpmu_start().
-	 */
-	if (!(hwc->state & PERF_HES_STOPPED)) {
-		armpmu->disable(event);
-		armpmu_event_update(event);
-		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
-	}
-}
-
-static void armpmu_start(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-
-	/*
-	 * ARM pmu always has to reprogram the period, so ignore
-	 * PERF_EF_RELOAD, see the comment below.
-	 */
-	if (flags & PERF_EF_RELOAD)
-		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
-	hwc->state = 0;
-	/*
-	 * Set the period again. Some counters can't be stopped, so when we
-	 * were stopped we simply disabled the IRQ source and the counter
-	 * may have been left counting. If we don't do this step then we may
-	 * get an interrupt too soon or *way* too late if the overflow has
-	 * happened since disabling.
-	 */
-	armpmu_event_set_period(event);
-	armpmu->enable(event);
-}
-
-static void
-armpmu_del(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx = hwc->idx;
-
-	armpmu_stop(event, PERF_EF_UPDATE);
-	hw_events->events[idx] = NULL;
-	clear_bit(idx, hw_events->used_mask);
-	if (armpmu->clear_event_idx)
-		armpmu->clear_event_idx(hw_events, event);
-
-	perf_event_update_userpage(event);
-}
-
-static int
-armpmu_add(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx;
-	int err = 0;
-
-	/* An event following a process won't be stopped earlier */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return -ENOENT;
-
-	perf_pmu_disable(event->pmu);
-
-	/* If we don't have a space for the counter then finish early. */
-	idx = armpmu->get_event_idx(hw_events, event);
-	if (idx < 0) {
-		err = idx;
-		goto out;
-	}
-
-	/*
-	 * If there is an event in the counter we are going to use then make
-	 * sure it is disabled.
-	 */
-	event->hw.idx = idx;
-	armpmu->disable(event);
-	hw_events->events[idx] = event;
-
-	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
-	if (flags & PERF_EF_START)
-		armpmu_start(event, PERF_EF_RELOAD);
-
-	/* Propagate our changes to the userspace mapping. */
-	perf_event_update_userpage(event);
-
-out:
-	perf_pmu_enable(event->pmu);
-	return err;
-}
-
-static int
-validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
-			       struct perf_event *event)
-{
-	struct arm_pmu *armpmu;
-
-	if (is_software_event(event))
-		return 1;
-
-	/*
-	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
-	 * core perf code won't check that the pmu->ctx == leader->ctx
-	 * until after pmu->event_init(event).
-	 */
-	if (event->pmu != pmu)
-		return 0;
-
-	if (event->state < PERF_EVENT_STATE_OFF)
-		return 1;
-
-	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
-		return 1;
-
-	armpmu = to_arm_pmu(event->pmu);
-	return armpmu->get_event_idx(hw_events, event) >= 0;
-}
-
-static int
-validate_group(struct perf_event *event)
-{
-	struct perf_event *sibling, *leader = event->group_leader;
-	struct pmu_hw_events fake_pmu;
-
-	/*
-	 * Initialise the fake PMU. We only need to populate the
-	 * used_mask for the purposes of validation.
-	 */
-	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
-
-	if (!validate_event(event->pmu, &fake_pmu, leader))
-		return -EINVAL;
-
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
-		if (!validate_event(event->pmu, &fake_pmu, sibling))
-			return -EINVAL;
-	}
-
-	if (!validate_event(event->pmu, &fake_pmu, event))
-		return -EINVAL;
-
-	return 0;
-}
-
-static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
-{
-	struct arm_pmu *armpmu;
-	struct platform_device *plat_device;
-	struct arm_pmu_platdata *plat;
-	int ret;
-	u64 start_clock, finish_clock;
-
-	/*
-	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
-	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
-	 * do any necessary shifting, we just need to perform the first
-	 * dereference.
-	 */
-	armpmu = *(void **)dev;
-	plat_device = armpmu->plat_device;
-	plat = dev_get_platdata(&plat_device->dev);
-
-	start_clock = sched_clock();
-	if (plat && plat->handle_irq)
-		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
-	else
-		ret = armpmu->handle_irq(irq, armpmu);
-	finish_clock = sched_clock();
-
-	perf_sample_event_took(finish_clock - start_clock);
-	return ret;
-}
-
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
-	armpmu->free_irq(armpmu);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
-	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
-	if (err) {
-		armpmu_release_hardware(armpmu);
-		return err;
-	}
-
-	return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	atomic_t *active_events	 = &armpmu->active_events;
-	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
-	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
-		armpmu_release_hardware(armpmu);
-		mutex_unlock(pmu_reserve_mutex);
-	}
-}
-
-static int
-event_requires_mode_exclusion(struct perf_event_attr *attr)
-{
-	return attr->exclude_idle || attr->exclude_user ||
-	       attr->exclude_kernel || attr->exclude_hv;
-}
-
-static int
-__hw_perf_event_init(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	int mapping;
-
-	mapping = armpmu->map_event(event);
-
-	if (mapping < 0) {
-		pr_debug("event %x:%llx not supported\n", event->attr.type,
-			 event->attr.config);
-		return mapping;
-	}
-
-	/*
-	 * We don't assign an index until we actually place the event onto
-	 * hardware. Use -1 to signify that we haven't decided where to put it
-	 * yet. For SMP systems, each core has it's own PMU so we can't do any
-	 * clever allocation or constraints checking at this point.
-	 */
-	hwc->idx		= -1;
-	hwc->config_base	= 0;
-	hwc->config		= 0;
-	hwc->event_base		= 0;
-
-	/*
-	 * Check whether we need to exclude the counter from certain modes.
-	 */
-	if ((!armpmu->set_event_filter ||
-	     armpmu->set_event_filter(hwc, &event->attr)) &&
-	     event_requires_mode_exclusion(&event->attr)) {
-		pr_debug("ARM performance counters do not support "
-			 "mode exclusion\n");
-		return -EOPNOTSUPP;
-	}
-
-	/*
-	 * Store the event encoding into the config_base field.
-	 */
-	hwc->config_base	    |= (unsigned long)mapping;
-
-	if (!is_sampling_event(event)) {
-		/*
-		 * For non-sampling runs, limit the sample_period to half
-		 * of the counter width. That way, the new counter value
-		 * is far less likely to overtake the previous one unless
-		 * you have some serious IRQ latency issues.
-		 */
-		hwc->sample_period  = armpmu->max_period >> 1;
-		hwc->last_period    = hwc->sample_period;
-		local64_set(&hwc->period_left, hwc->sample_period);
-	}
-
-	if (event->group_leader != event) {
-		if (validate_group(event) != 0)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int armpmu_event_init(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	int err = 0;
-	atomic_t *active_events = &armpmu->active_events;
-
-	/*
-	 * Reject CPU-affine events for CPUs that are of a different class to
-	 * that which this PMU handles. Process-following events (where
-	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
-	 * reject them later (in armpmu_add) if they're scheduled on a
-	 * different class of CPU.
-	 */
-	if (event->cpu != -1 &&
-		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
-		return -ENOENT;
-
-	/* does not support taken branch sampling */
-	if (has_branch_stack(event))
-		return -EOPNOTSUPP;
-
-	if (armpmu->map_event(event) == -ENOENT)
-		return -ENOENT;
-
-	event->destroy = hw_perf_event_destroy;
-
-	if (!atomic_inc_not_zero(active_events)) {
-		mutex_lock(&armpmu->reserve_mutex);
-		if (atomic_read(active_events) == 0)
-			err = armpmu_reserve_hardware(armpmu);
-
-		if (!err)
-			atomic_inc(active_events);
-		mutex_unlock(&armpmu->reserve_mutex);
-	}
-
-	if (err)
-		return err;
-
-	err = __hw_perf_event_init(event);
-	if (err)
-		hw_perf_event_destroy(event);
-
-	return err;
-}
-
-static void armpmu_enable(struct pmu *pmu)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
-
-	/* For task-bound events we may be called on other CPUs */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return;
-
-	if (enabled)
-		armpmu->start(armpmu);
-}
-
-static void armpmu_disable(struct pmu *pmu)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(pmu);
-
-	/* For task-bound events we may be called on other CPUs */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return;
-
-	armpmu->stop(armpmu);
-}
-
-/*
- * In heterogeneous systems, events are specific to a particular
- * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
- * the same microarchitecture.
- */
-static int armpmu_filter_match(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	unsigned int cpu = smp_processor_id();
-	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
-}
-
-static void armpmu_init(struct arm_pmu *armpmu)
-{
-	atomic_set(&armpmu->active_events, 0);
-	mutex_init(&armpmu->reserve_mutex);
-
-	armpmu->pmu = (struct pmu) {
-		.pmu_enable	= armpmu_enable,
-		.pmu_disable	= armpmu_disable,
-		.event_init	= armpmu_event_init,
-		.add		= armpmu_add,
-		.del		= armpmu_del,
-		.start		= armpmu_start,
-		.stop		= armpmu_stop,
-		.read		= armpmu_read,
-		.filter_match	= armpmu_filter_match,
-	};
-}
-
-int armpmu_register(struct arm_pmu *armpmu, int type)
-{
-	armpmu_init(armpmu);
-	pr_info("enabled with %s PMU driver, %d counters available\n",
-			armpmu->name, armpmu->num_events);
-	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
-}
-
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *__oprofile_cpu_pmu;
-
-/*
- * Despite the names, these two functions are CPU-specific and are used
- * by the OProfile/perf code.
- */
-const char *perf_pmu_name(void)
-{
-	if (!__oprofile_cpu_pmu)
-		return NULL;
-
-	return __oprofile_cpu_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
-	int max_events = 0;
-
-	if (__oprofile_cpu_pmu != NULL)
-		max_events = __oprofile_cpu_pmu->num_events;
-
-	return max_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
-static void cpu_pmu_enable_percpu_irq(void *data)
-{
-	int irq = *(int *)data;
-
-	enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
-
-static void cpu_pmu_disable_percpu_irq(void *data)
-{
-	int irq = *(int *)data;
-
-	disable_percpu_irq(irq);
-}
-
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
-{
-	int i, irq, irqs;
-	struct platform_device *pmu_device = cpu_pmu->plat_device;
-	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-
-	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
-		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
-		free_percpu_irq(irq, &hw_events->percpu_pmu);
-	} else {
-		for (i = 0; i < irqs; ++i) {
-			int cpu = i;
-
-			if (cpu_pmu->irq_affinity)
-				cpu = cpu_pmu->irq_affinity[i];
-
-			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
-				continue;
-			irq = platform_get_irq(pmu_device, i);
-			if (irq >= 0)
-				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
-		}
-	}
-}
-
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
-{
-	int i, err, irq, irqs;
-	struct platform_device *pmu_device = cpu_pmu->plat_device;
-	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
-	if (!pmu_device)
-		return -ENODEV;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-	if (irqs < 1) {
-		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
-		return 0;
-	}
-
-	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
-		err = request_percpu_irq(irq, handler, "arm-pmu",
-					 &hw_events->percpu_pmu);
-		if (err) {
-			pr_err("unable to request IRQ%d for ARM PMU counters\n",
-				irq);
-			return err;
-		}
-		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
-	} else {
-		for (i = 0; i < irqs; ++i) {
-			int cpu = i;
-
-			err = 0;
-			irq = platform_get_irq(pmu_device, i);
-			if (irq < 0)
-				continue;
-
-			if (cpu_pmu->irq_affinity)
-				cpu = cpu_pmu->irq_affinity[i];
-
-			/*
-			 * If we have a single PMU interrupt that we can't shift,
-			 * assume that we're running on a uniprocessor machine and
-			 * continue. Otherwise, continue without this interrupt.
-			 */
-			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
-				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
-					irq, cpu);
-				continue;
-			}
-
-			err = request_irq(irq, handler,
-					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
-					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
-			if (err) {
-				pr_err("unable to request IRQ%d for ARM PMU counters\n",
-					irq);
-				return err;
-			}
-
-			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
-		}
-	}
-
-	return 0;
-}
-
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
-			  void *hcpu)
-{
-	int cpu = (unsigned long)hcpu;
-	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return NOTIFY_DONE;
-
-	if (pmu->reset)
-		pmu->reset(pmu);
-	else
-		return NOTIFY_DONE;
-
-	return NOTIFY_OK;
-}
-
-static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
-{
-	int err;
-	int cpu;
-	struct pmu_hw_events __percpu *cpu_hw_events;
-
-	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
-	if (!cpu_hw_events)
-		return -ENOMEM;
-
-	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
-	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
-	if (err)
-		goto out_hw_events;
-
-	for_each_possible_cpu(cpu) {
-		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
-		raw_spin_lock_init(&events->pmu_lock);
-		events->percpu_pmu = cpu_pmu;
-	}
-
-	cpu_pmu->hw_events	= cpu_hw_events;
-	cpu_pmu->request_irq	= cpu_pmu_request_irq;
-	cpu_pmu->free_irq	= cpu_pmu_free_irq;
-
-	/* Ensure the PMU has sane values out of reset. */
-	if (cpu_pmu->reset)
-		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
-			 cpu_pmu, 1);
-
-	/* If no interrupts available, set the corresponding capability flag */
-	if (!platform_get_irq(cpu_pmu->plat_device, 0))
-		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
-	return 0;
-
-out_hw_events:
-	free_percpu(cpu_hw_events);
-	return err;
-}
-
-static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
-{
-	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
-	free_percpu(cpu_pmu->hw_events);
-}
-
-/*
- * CPU PMU identification and probing.
- */
-static int probe_current_pmu(struct arm_pmu *pmu,
-			     const struct pmu_probe_info *info)
-{
-	int cpu = get_cpu();
-	unsigned int cpuid = read_cpuid_id();
-	int ret = -ENODEV;
-
-	pr_info("probing PMU on CPU %d\n", cpu);
-
-	for (; info->init != NULL; info++) {
-		if ((cpuid & info->mask) != info->cpuid)
-			continue;
-		ret = info->init(pmu);
-		break;
-	}
-
-	put_cpu();
-	return ret;
-}
-
-static int of_pmu_irq_cfg(struct arm_pmu *pmu)
-{
-	int i, irq, *irqs;
-	struct platform_device *pdev = pmu->plat_device;
-
-	/* Don't bother with PPIs; they're already affine */
-	irq = platform_get_irq(pdev, 0);
-	if (irq >= 0 && irq_is_percpu(irq))
-		return 0;
-
-	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-	if (!irqs)
-		return -ENOMEM;
-
-	for (i = 0; i < pdev->num_resources; ++i) {
-		struct device_node *dn;
-		int cpu;
-
-		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
-				      i);
-		if (!dn) {
-			pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
-				of_node_full_name(pdev->dev.of_node), i);
-			break;
-		}
-
-		for_each_possible_cpu(cpu)
-			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
-				break;
-
-		if (cpu >= nr_cpu_ids) {
-			pr_warn("Failed to find logical CPU for %s\n",
-				dn->name);
-			of_node_put(dn);
-			break;
-		}
-		of_node_put(dn);
-
-		irqs[i] = cpu;
-		cpumask_set_cpu(cpu, &pmu->supported_cpus);
-	}
-
-	if (i == pdev->num_resources) {
-		pmu->irq_affinity = irqs;
-	} else {
-		kfree(irqs);
-		cpumask_setall(&pmu->supported_cpus);
-	}
-
-	return 0;
-}
-
-int arm_pmu_device_probe(struct platform_device *pdev,
-			 const struct of_device_id *of_table,
-			 const struct pmu_probe_info *probe_table)
-{
-	const struct of_device_id *of_id;
-	const int (*init_fn)(struct arm_pmu *);
-	struct device_node *node = pdev->dev.of_node;
-	struct arm_pmu *pmu;
-	int ret = -ENODEV;
-
-	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
-	if (!pmu) {
-		pr_info("failed to allocate PMU device!\n");
-		return -ENOMEM;
-	}
-
-	if (!__oprofile_cpu_pmu)
-		__oprofile_cpu_pmu = pmu;
-
-	pmu->plat_device = pdev;
-
-	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
-		init_fn = of_id->data;
-
-		ret = of_pmu_irq_cfg(pmu);
-		if (!ret)
-			ret = init_fn(pmu);
-	} else {
-		ret = probe_current_pmu(pmu, probe_table);
-		cpumask_setall(&pmu->supported_cpus);
-	}
-
-	if (ret) {
-		pr_info("failed to probe PMU!\n");
-		goto out_free;
-	}
-
-	ret = cpu_pmu_init(pmu);
-	if (ret)
-		goto out_free;
-
-	ret = armpmu_register(pmu, -1);
-	if (ret)
-		goto out_destroy;
-
-	return 0;
-
-out_destroy:
-	cpu_pmu_destroy(pmu);
-out_free:
-	pr_info("failed to register PMU devices!\n");
-	kfree(pmu);
-	return ret;
-}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 09f83e4..09413e7 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -34,9 +34,9 @@
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 enum armv6_perf_types {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f9b37f8..126dc67 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -21,11 +21,11 @@
 #include <asm/cp15.h>
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 #include <asm/vfp.h>
 #include "../vfp/vfpinstr.h"
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 /*
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 304d056..aa0499e 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -16,9 +16,9 @@
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 enum xscale_perf_types {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a..a3089ba 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@
 	ledtrig_cpu(CPU_LED_IDLE_END);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
-	cpu_die();
-}
-#endif
-
 void __show_regs(struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -129,12 +122,36 @@
 	buf[4] = '\0';
 
 #ifndef CONFIG_CPU_V7M
-	printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-		buf, interrupts_enabled(regs) ? "n" : "ff",
-		fast_interrupts_enabled(regs) ? "n" : "ff",
-		processor_modes[processor_mode(regs)],
-		isa_modes[isa_mode(regs)],
-		get_fs() == get_ds() ? "kernel" : "user");
+	{
+		unsigned int domain = get_domain();
+		const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		/*
+		 * Get the domain register for the parent context. In user
+		 * mode, we don't save the DACR, so lets use what it should
+		 * be. For other modes, we place it after the pt_regs struct.
+		 */
+		if (user_mode(regs))
+			domain = DACR_UACCESS_ENABLE;
+		else
+			domain = *(unsigned int *)(regs + 1);
+#endif
+
+		if ((domain & domain_mask(DOMAIN_USER)) ==
+		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+			segment = "none";
+		else if (get_fs() == get_ds())
+			segment = "kernel";
+		else
+			segment = "user";
+
+		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+			buf, interrupts_enabled(regs) ? "n" : "ff",
+			fast_interrupts_enabled(regs) ? "n" : "ff",
+			processor_modes[processor_mode(regs)],
+			isa_modes[isa_mode(regs)], segment);
+	}
 #else
 	printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
@@ -146,10 +163,9 @@
 		buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
 		{
-			unsigned int transbase, dac;
+			unsigned int transbase, dac = get_domain();
 			asm("mrc p15, 0, %0, c2, c0\n\t"
-			    "mrc p15, 0, %1, c3, c0\n"
-			    : "=r" (transbase), "=r" (dac));
+			    : "=r" (transbase));
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			  	transbase, dac);
 		}
@@ -210,6 +226,14 @@
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+	/*
+	 * Copy the initial value of the domain access control register
+	 * from the current thread: thread->addr_limit will have been
+	 * copied from the current thread via setup_thread_stack() in
+	 * kernel/fork.c
+	 */
+	thread->cpu_domain = get_domain();
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
deleted file mode 100644
index 2e60243..0000000
--- a/arch/arm/kernel/psci.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2012 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#define pr_fmt(fmt) "psci: " fmt
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
-#include <uapi/linux/psci.h>
-
-#include <asm/compiler.h>
-#include <asm/errno.h>
-#include <asm/psci.h>
-#include <asm/system_misc.h>
-
-struct psci_operations psci_ops;
-
-static int (*invoke_psci_fn)(u32, u32, u32, u32);
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
-asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
-
-enum psci_function {
-	PSCI_FN_CPU_SUSPEND,
-	PSCI_FN_CPU_ON,
-	PSCI_FN_CPU_OFF,
-	PSCI_FN_MIGRATE,
-	PSCI_FN_AFFINITY_INFO,
-	PSCI_FN_MIGRATE_INFO_TYPE,
-	PSCI_FN_MAX,
-};
-
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
-	switch (errno) {
-	case PSCI_RET_SUCCESS:
-		return 0;
-	case PSCI_RET_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
-	case PSCI_RET_INVALID_PARAMS:
-		return -EINVAL;
-	case PSCI_RET_DENIED:
-		return -EPERM;
-	};
-
-	return -EINVAL;
-}
-
-static u32 psci_power_state_pack(struct psci_power_state state)
-{
-	return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
-			& PSCI_0_2_POWER_STATE_ID_MASK) |
-		((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
-		 & PSCI_0_2_POWER_STATE_TYPE_MASK) |
-		((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
-		 & PSCI_0_2_POWER_STATE_AFFL_MASK);
-}
-
-static int psci_get_version(void)
-{
-	int err;
-
-	err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-	return err;
-}
-
-static int psci_cpu_suspend(struct psci_power_state state,
-			    unsigned long entry_point)
-{
-	int err;
-	u32 fn, power_state;
-
-	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
-	power_state = psci_power_state_pack(state);
-	err = invoke_psci_fn(fn, power_state, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(struct psci_power_state state)
-{
-	int err;
-	u32 fn, power_state;
-
-	fn = psci_function_id[PSCI_FN_CPU_OFF];
-	power_state = psci_power_state_pack(state);
-	err = invoke_psci_fn(fn, power_state, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_ON];
-	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE];
-	err = invoke_psci_fn(fn, cpuid, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
-		unsigned long lowest_affinity_level)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
-	err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
-	return err;
-}
-
-static int psci_migrate_info_type(void)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
-	err = invoke_psci_fn(fn, 0, 0, 0);
-	return err;
-}
-
-static int get_set_conduit_method(struct device_node *np)
-{
-	const char *method;
-
-	pr_info("probing for conduit method from DT.\n");
-
-	if (of_property_read_string(np, "method", &method)) {
-		pr_warn("missing \"method\" property\n");
-		return -ENXIO;
-	}
-
-	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
-	} else {
-		pr_warn("invalid \"method\" property: %s\n", method);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * PSCI Function IDs for v0.2+ are well defined so use
- * standard values.
- */
-static int psci_0_2_init(struct device_node *np)
-{
-	int err, ver;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	ver = psci_get_version();
-
-	if (ver == PSCI_RET_NOT_SUPPORTED) {
-		/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
-		pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
-		err = -EOPNOTSUPP;
-		goto out_put_node;
-	} else {
-		pr_info("PSCIv%d.%d detected in firmware.\n",
-				PSCI_VERSION_MAJOR(ver),
-				PSCI_VERSION_MINOR(ver));
-
-		if (PSCI_VERSION_MAJOR(ver) == 0 &&
-				PSCI_VERSION_MINOR(ver) < 2) {
-			err = -EINVAL;
-			pr_err("Conflicting PSCI version detected.\n");
-			goto out_put_node;
-		}
-	}
-
-	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
-	psci_ops.cpu_suspend = psci_cpu_suspend;
-
-	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
-	psci_ops.cpu_off = psci_cpu_off;
-
-	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
-	psci_ops.cpu_on = psci_cpu_on;
-
-	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
-	psci_ops.migrate = psci_migrate;
-
-	psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
-	psci_ops.affinity_info = psci_affinity_info;
-
-	psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
-		PSCI_0_2_FN_MIGRATE_INFO_TYPE;
-	psci_ops.migrate_info_type = psci_migrate_info_type;
-
-	arm_pm_restart = psci_sys_reset;
-
-	pm_power_off = psci_sys_poweroff;
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int psci_0_1_init(struct device_node *np)
-{
-	u32 id;
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
-	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
-		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
-		psci_ops.cpu_suspend = psci_cpu_suspend;
-	}
-
-	if (!of_property_read_u32(np, "cpu_off", &id)) {
-		psci_function_id[PSCI_FN_CPU_OFF] = id;
-		psci_ops.cpu_off = psci_cpu_off;
-	}
-
-	if (!of_property_read_u32(np, "cpu_on", &id)) {
-		psci_function_id[PSCI_FN_CPU_ON] = id;
-		psci_ops.cpu_on = psci_cpu_on;
-	}
-
-	if (!of_property_read_u32(np, "migrate", &id)) {
-		psci_function_id[PSCI_FN_MIGRATE] = id;
-		psci_ops.migrate = psci_migrate;
-	}
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-static const struct of_device_id const psci_of_match[] __initconst = {
-	{ .compatible = "arm,psci", .data = psci_0_1_init},
-	{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
-	{},
-};
-
-int __init psci_init(void)
-{
-	struct device_node *np;
-	const struct of_device_id *matched_np;
-	psci_initcall_t init_fn;
-
-	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-	if (!np)
-		return -ENODEV;
-
-	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
-}
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 28a1db4..61c04b0 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -17,6 +17,8 @@
 #include <linux/smp.h>
 #include <linux/of.h>
 #include <linux/delay.h>
+#include <linux/psci.h>
+
 #include <uapi/linux/psci.h>
 
 #include <asm/psci.h>
@@ -51,22 +53,34 @@
 {
 	if (psci_ops.cpu_on)
 		return psci_ops.cpu_on(cpu_logical_map(cpu),
-				       __pa(secondary_startup));
+					virt_to_idmap(&secondary_startup));
 	return -ENODEV;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+int psci_cpu_disable(unsigned int cpu)
+{
+	/* Fail early if we don't have CPU_OFF support */
+	if (!psci_ops.cpu_off)
+		return -EOPNOTSUPP;
+
+	/* Trusted OS will deny CPU_OFF */
+	if (psci_tos_resident_on(cpu))
+		return -EPERM;
+
+	return 0;
+}
+
 void __ref psci_cpu_die(unsigned int cpu)
 {
-       const struct psci_power_state ps = {
-               .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-       };
+	u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+		    PSCI_0_2_POWER_STATE_TYPE_SHIFT;
 
-       if (psci_ops.cpu_off)
-               psci_ops.cpu_off(ps);
+	if (psci_ops.cpu_off)
+		psci_ops.cpu_off(state);
 
-       /* We should never return */
-       panic("psci: cpu %d failed to shutdown\n", cpu);
+	/* We should never return */
+	panic("psci: cpu %d failed to shutdown\n", cpu);
 }
 
 int __ref psci_cpu_kill(unsigned int cpu)
@@ -109,6 +123,7 @@
 struct smp_operations __initdata psci_smp_ops = {
 	.smp_boot_secondary	= psci_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= psci_cpu_disable,
 	.cpu_die		= psci_cpu_die,
 	.cpu_kill		= psci_cpu_kill,
 #endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b7..20edd34 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,12 +31,14 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/sort.h>
+#include <linux/psci.h>
 
 #include <asm/unified.h>
 #include <asm/cp15.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
+#include <asm/fixmap.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
@@ -954,6 +956,9 @@
 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 	*cmdline_p = cmd_line;
 
+	if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+		early_fixmap_init();
+
 	parse_early_param();
 
 #ifdef CONFIG_MMU
@@ -972,7 +977,7 @@
 	unflatten_device_tree();
 
 	arm_dt_init_cpu_maps();
-	psci_init();
+	psci_dt_init();
 	xen_early_init();
 #ifdef CONFIG_SMP
 	if (is_smp()) {
@@ -1015,7 +1020,7 @@
 
 	for_each_possible_cpu(cpu) {
 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
-		cpuinfo->cpu.hotpluggable = 1;
+		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
 		register_cpu(&cpuinfo->cpu, cpu);
 	}
 
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e..b6cda06 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@
 asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
+	/*
+	 * The assembly code enters us with IRQs off, but it hasn't
+	 * informed the tracing code of that for efficiency reasons.
+	 * Update the trace code with the current status.
+	 */
+	trace_hardirqs_off();
 	do {
 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 			schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3d6b782..ba0063c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@
 	if (smp_ops.cpu_disable)
 		return smp_ops.cpu_disable(cpu);
 
+	return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	/* cpu_die must be specified to support hotplug */
+	if (!smp_ops.cpu_die)
+		return 0;
+
+	if (smp_ops.cpu_can_disable)
+		return smp_ops.cpu_can_disable(cpu);
+
 	/*
 	 * By default, allow disabling all CPUs except the first one,
 	 * since this is special on a lot of platforms, e.g. because
 	 * of clock tick interrupts.
 	 */
-	return cpu == 0 ? -EPERM : 0;
+	return cpu != 0;
 }
+
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
@@ -253,7 +266,7 @@
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  */
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
 {
 	unsigned int cpu = smp_processor_id();
 
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756..5b26e7e 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@
 
 	while (1) {
 		unsigned long temp;
+		unsigned int __ua_flags;
 
+		__ua_flags = uaccess_save_and_enable();
 		if (type == TYPE_SWPB)
 			__user_swpb_asm(*data, address, res, temp);
 		else
 			__user_swp_asm(*data, address, res, temp);
+		uaccess_restore(__ua_flags);
 
 		if (likely(res != -EAGAIN) || signal_pending(current))
 			break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226..969f9d9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@
 	kuser_init(vectors_base);
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	 * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7..970d6c0 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
 
 		.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
 		stmfd	sp!, {r1, lr}
 		mov	r2, #0
 		cmp	r1, #4
@@ -44,7 +44,7 @@
 USER(		strnebt	r2, [r0])
 		mov	r0, #0
 		ldmfd	sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
 		.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9..1512beb 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_from_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -89,11 +89,11 @@
 
 	.text
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b06..caf5019 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_to_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -93,11 +93,11 @@
 	.text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 
 	.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e..1712f13 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
 
 		.text
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		.macro	save_regs
+		mrc	p15, 0, ip, c3, c0, 0
+		stmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		uaccess_enable ip
+		.endm
+
+		.macro	load_regs
+		ldmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		mcr	p15, 0, ip, c3, c0, 0
+		ret	lr
+		.endm
+#else
 		.macro	save_regs
 		stmfd	sp!, {r1, r2, r4 - r8, lr}
 		.endm
@@ -24,6 +37,7 @@
 		.macro	load_regs
 		ldmfd	sp!, {r1, r2, r4 - r8, pc}
 		.endm
+#endif
 
 		.macro	load1b,	reg1
 		ldrusr	\reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2..d72b909 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	/*
 	 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@
 	return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
 	/* See rational for this in __copy_to_user() above. */
 	if (n < 64)
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 265ffeb..80e277c 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -369,7 +369,7 @@
 		return;
 	}
 
-	sram_pool = gen_pool_get(&pdev->dev);
+	sram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!sram_pool) {
 		pr_warn("%s: sram pool unavailable!\n", __func__);
 		return;
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 231fba0..6050a14 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -28,8 +28,8 @@
 #include <linux/reboot.h>
 #include <linux/amba/bus.h>
 #include <linux/platform_device.h>
+#include <linux/psci.h>
 
-#include <asm/psci.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c
index 7f2bd85..4003116 100644
--- a/arch/arm/mach-highbank/pm.c
+++ b/arch/arm/mach-highbank/pm.c
@@ -16,19 +16,21 @@
 
 #include <linux/cpu_pm.h>
 #include <linux/init.h>
+#include <linux/psci.h>
 #include <linux/suspend.h>
 
 #include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define HIGHBANK_SUSPEND_PARAM \
+	((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+	 (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+	 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
 
 static int highbank_suspend_finish(unsigned long val)
 {
-	const struct psci_power_state ps = {
-		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-		.affinity_level = 1,
-	};
-
-	return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+	return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume));
 }
 
 static int highbank_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index 1885676..532d4b0 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -297,7 +297,7 @@
 		goto put_node;
 	}
 
-	ocram_pool = gen_pool_get(&pdev->dev);
+	ocram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!ocram_pool) {
 		pr_warn("%s: ocram pool unavailable!\n", __func__);
 		ret = -ENODEV;
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 93ecf55..8ff8fc0 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -451,7 +451,7 @@
 		goto put_node;
 	}
 
-	ocram_pool = gen_pool_get(&pdev->dev);
+	ocram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!ocram_pool) {
 		pr_warn("%s: ocram pool unavailable!\n", __func__);
 		ret = -ENODEV;
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 04c9daf..7db5870 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <asm/mach-types.h>
+#include <asm/outercache.h>
 #include <mach/hardware.h>
 #include <mach/cputype.h>
 #include <mach/addr-map.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 9e2a684..07d2e10 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -29,6 +29,7 @@
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
 	select OMAP_INTERCONNECT
+	select OMAP_INTERCONNECT_BARRIER
 	select PL310_ERRATA_588369 if CACHE_L2X0
 	select PL310_ERRATA_727915 if CACHE_L2X0
 	select PM_OPP if PM
@@ -46,6 +47,7 @@
 	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 config SOC_AM33XX
 	bool "TI AM33XX"
@@ -71,6 +73,7 @@
 	select HAVE_ARM_ARCH_TIMER
 	select IRQ_CROSSBAR
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 config ARCH_OMAP2PLUS
 	bool
@@ -92,6 +95,10 @@
 	help
 	  Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
+config OMAP_INTERCONNECT_BARRIER
+	bool
+	select ARM_HEAVY_MB
+	
 
 if ARCH_OMAP2PLUS
 
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e..484cdad 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@
 void __init omap_reserve(void)
 {
 	omap_secure_ram_reserve_memblock();
+	omap_barrier_reserve_memblock();
 }
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 749d50b..92e92cf 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -189,6 +189,15 @@
 }
 #endif
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+void omap_barrier_reserve_memblock(void);
+void omap_barriers_init(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{
+}
+#endif
+
 /* This gets called from mach-omap2/io.c, do not call this */
 void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
 
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644
index 1c582a8..0000000
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * OMAP memory barrier header.
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- *  Santosh Shilimkar <santosh.shilimkar@ti.com>
- *  Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_BARRIERS_H
-#define __MACH_BARRIERS_H
-
-#include <asm/outercache.h>
-
-extern void omap_bus_sync(void);
-
-#define rmb()		dsb()
-#define wmb()		do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
-#define mb()		wmb()
-
-#endif	/* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 6a4822d..980c937 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -352,6 +352,7 @@
 void __init omap4_map_io(void)
 {
 	iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+	omap_barriers_init();
 }
 #endif
 
@@ -359,6 +360,7 @@
 void __init omap5_map_io(void)
 {
 	iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+	omap_barriers_init();
 }
 #endif
 
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350ee..949696b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,127 @@
 
 #define IRQ_LOCALTIMER		29
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA			0xfe600000
+
+static void __iomem *dram_sync, *sram_sync;
+static phys_addr_t dram_sync_paddr;
+static u32 dram_sync_size;
+
+/*
+ * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * data writes from the MPU. These asynchronous bridges can be found on
+ * paths between the MPU to EMIF, and the MPU to L3 interconnects.
+ *
+ * We need to be careful about re-ordering which can happen as a result
+ * of different accesses being performed via different paths, and
+ * therefore different asynchronous bridges.
+ */
+
+/*
+ * OMAP4 interconnect barrier which is called for each mb() and wmb().
+ * This is to ensure that normal paths to DRAM (normal memory, cacheable
+ * accesses) are properly synchronised with writes to DMA coherent memory
+ * (normal memory, uncacheable) and device writes.
+ *
+ * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
+ * path, as we need to ensure that data is visible to other system
+ * masters prior to writes to those system masters being seen.
+ *
+ * Note: the SRAM path is not synchronised via mb() and wmb().
+ */
+static void omap4_mb(void)
+{
+	if (dram_sync)
+		writel_relaxed(0, dram_sync);
+}
+
+/*
+ * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
+ *
+ * If a data is stalled inside asynchronous bridge because of back
+ * pressure, it may be accepted multiple times, creating pointer
+ * misalignment that will corrupt next transfers on that data path until
+ * next reset of the system. No recovery procedure once the issue is hit,
+ * the path remains consistently broken.
+ *
+ * Async bridges can be found on paths between MPU to EMIF and MPU to L3
+ * interconnects.
+ *
+ * This situation can happen only when the idle is initiated by a Master
+ * Request Disconnection (which is trigged by software when executing WFI
+ * on the CPU).
+ *
+ * The work-around for this errata needs all the initiators connected
+ * through an async bridge to ensure that data path is properly drained
+ * before issuing WFI. This condition will be met if one Strongly ordered
+ * access is performed to the target right before executing the WFI.
+ *
+ * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ * IO barrier ensure that there is no synchronisation loss on initiators
+ * operating on both interconnect port simultaneously.
+ *
+ * This is a stronger version of the OMAP4 memory barrier below, and
+ * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
+ * as well, and is necessary prior to executing a WFI.
+ */
+void omap_interconnect_sync(void)
+{
+	if (dram_sync && sram_sync) {
+		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+		isb();
+	}
+}
+
+static int __init omap4_sram_init(void)
+{
+	struct device_node *np;
+	struct gen_pool *sram_pool;
+
+	np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+	if (!np)
+		pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+			__func__);
+	sram_pool = of_gen_pool_get(np, "sram", 0);
+	if (!sram_pool)
+		pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+			__func__);
+	else
+		sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+	return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+void __init omap_barrier_reserve_memblock(void)
+{
+	dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
+	dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
+}
+
+void __init omap_barriers_init(void)
+{
+	struct map_desc dram_io_desc[1];
+
+	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+	dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
+	dram_io_desc[0].length = dram_sync_size;
+	dram_io_desc[0].type = MT_MEMORY_RW_SO;
+	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+	pr_info("OMAP4: Map %pa to %p for dram barrier\n",
+		&dram_sync_paddr, dram_sync);
+
+	soc_mb = omap4_mb;
+}
+
+#endif
+
 void gic_dist_disable(void)
 {
 	if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb94..9b09d85 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,14 +333,12 @@
 
 #endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
 
-ENTRY(omap_bus_sync)
-	ret	lr
-ENDPROC(omap_bus_sync)
-
 ENTRY(omap_do_wfi)
 	stmfd	sp!, {lr}
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
 	/* Drain interconnect write buffers. */
-	bl omap_bus_sync
+	bl	omap_interconnect_sync
+#endif
 
 	/*
 	 * Execute an ISB instruction to ensure that all of the
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index d99d08e..83e94c9 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
+#include <asm/outercache.h>
 #include <asm/suspend.h>
 #include <asm/hardware/cache-l2x0.h>
 
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b..8d27ec5 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@
 extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
 			      unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09..b23378f 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 {
-	return 0; /* Hotplug of any CPU is supported */
+	return true; /* Hotplug of any CPU is supported */
 }
 #endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 2ef0054..4b33d43 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -64,7 +64,7 @@
 	.smp_prepare_cpus	= r8a7790_smp_prepare_cpus,
 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db7..b2508c0 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@
 	.smp_prepare_cpus	= r8a7791_smp_prepare_cpus,
 	.smp_boot_secondary	= r8a7791_smp_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d03aa11..bc2824a 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -60,7 +60,7 @@
 	.smp_prepare_cpus	= sh73a0_smp_prepare_cpus,
 	.smp_boot_secondary	= sh73a0_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_scu_cpu_die,
 	.cpu_kill		= shmobile_smp_scu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c
index 6a4199f..c378ab0 100644
--- a/arch/arm/mach-socfpga/pm.c
+++ b/arch/arm/mach-socfpga/pm.c
@@ -56,7 +56,7 @@
 		goto put_node;
 	}
 
-	ocram_pool = gen_pool_get(&pdev->dev);
+	ocram_pool = gen_pool_get(&pdev->dev, NULL);
 	if (!ocram_pool) {
 		pr_warn("%s: ocram pool unavailable!\n", __func__);
 		ret = -ENODEV;
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 7557bed..780bd13 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -8,6 +8,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 
+#include <asm/outercache.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "db8500-regs.h"
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index ba708ce..f805603 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -20,10 +20,10 @@
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/regulator/machine.h>
 #include <linux/random.h>
 
-#include <asm/pmu.h>
 #include <asm/mach/map.h>
 
 #include "setup.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976..df7537f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@
 
 config OUTER_CACHE_SYNC
 	bool
+	select ARM_HEAVY_MB
 	help
 	  The outer cache has a outer_cache_fns.sync function pointer
 	  that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
 
+config ARM_HEAVY_MB
+	bool
+
 config ARCH_SUPPORTS_BIG_ENDIAN
 	bool
 	help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4..b3b31e3 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	ldr	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable userspace access
 	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	tst	r3, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4..a6a381a 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable user access
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ check write
 	orreq	r1, r1, #1 << 11
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a..00ab011 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@
 	bne	do_DataAbort
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
-	do_ldrd_abort tmp=ip, insn=r3
+	uaccess_disable ip			@ disable userspace access
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c..8801a15 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@
 	ldr	ip, =0x4107b36
 	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
 	teq	ip, r3, lsr #4			@ r0 ARM1136?
-	bne	do_DataAbort
+	bne	1f
 	tst	r5, #PSR_J_BIT			@ Java?
 	tsteq	r5, #PSR_T_BIT			@ Thumb?
-	bne	do_DataAbort
+	bne	1f
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
  ARM_BE8(rev	r3, r3)
 
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	1f				@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 #endif
+1:	uaccess_disable ip			@ disable userspace access
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad0..e8d0e08 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+	uaccess_disable ip			@ disable userspace access
 
 	/*
 	 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f398258..6d8e8e3 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@
 #endif
 	bne	.data_thumb_abort
 	ldr	r8, [r4]			@ read arm instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	and	r7, r8, #15 << 24
@@ -155,6 +156,7 @@
 
 .data_thumb_abort:
 	ldrh	r8, [r4]			@ read instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 11			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 8			@ yes
 	and	r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68e..4509bee 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
 	tst	\psr, #PSR_T_BIT
 	beq	not_thumb
 	ldrh	\tmp, [\pc]			@ Read aborted Thumb instruction
+	uaccess_disable ip			@ disable userspace access
 	and	\tmp, \tmp, # 0xfe00		@ Mask opcode field
 	cmp	\tmp, # 0x5600			@ Is it ldrsb?
 	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes
@@ -29,12 +30,9 @@
  *   [7:4] == 1101
  *    [20] == 0
  */
-	.macro	do_ldrd_abort, tmp, insn
-	tst	\insn, #0x0e100000		@ [27:25,20] == 0
-	bne	not_ldrd
-	and	\tmp, \insn, #0x000000f0	@ [7:4] == 1101
-	cmp	\tmp, #0x000000d0
-	beq	do_DataAbort
-not_ldrd:
+	.macro	teq_ldrd, tmp, insn
+	mov	\tmp, #0x0e100000
+	orr	\tmp, #0x000000f0
+	and	\tmp, \insn, \tmp
+	teq	\tmp, #0x000000d0
 	.endm
-
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e..5c1b7a7 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@
 	struct device_node *node;
 	void __iomem *base;
 	bool l2_wt_override = false;
-	struct resource res;
 
 #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 	l2_wt_override = true;
@@ -376,10 +375,7 @@
 
 	node = of_find_matching_node(NULL, feroceon_ids);
 	if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
-		if (of_address_to_resource(node, 0, &res))
-			return -ENODEV;
-
-		base = ioremap(res.start, resource_size(&res));
+		base = of_iomap(node, 0);
 		if (!base)
 			return -ENOMEM;
 
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d33..493692d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@
 		}
 	}
 
+	if (of_property_read_bool(np, "arm,shared-override")) {
+		*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+	}
+
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cba12f3..bf35abc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 
+#include "dma.h"
 #include "mm.h"
 
 /*
@@ -648,14 +649,18 @@
 	size = PAGE_ALIGN(size);
 	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
-	if (is_coherent || nommu())
+	if (nommu())
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+		addr = __alloc_from_contiguous(dev, size, prot, &page,
+					       caller, want_vaddr);
+	else if (is_coherent)
 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
 	else if (!(gfp & __GFP_WAIT))
 		addr = __alloc_from_pool(size, &page);
-	else if (!dev_get_cma_area(dev))
-		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
 	else
-		addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+					    caller, want_vaddr);
 
 	if (page)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 
-	return __dma_alloc(dev, size, handle, gfp, prot, true,
+	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
 			   attrs, __builtin_return_address(0));
 }
 
@@ -753,12 +757,12 @@
 
 	size = PAGE_ALIGN(size);
 
-	if (is_coherent || nommu()) {
+	if (nommu()) {
 		__dma_free_buffer(page, size);
-	} else if (__free_from_pool(cpu_addr, size)) {
+	} else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
 		return;
 	} else if (!dev_get_cma_area(dev)) {
-		if (want_vaddr)
+		if (want_vaddr && !is_coherent)
 			__dma_free_remap(cpu_addr, size);
 		__dma_free_buffer(page, size);
 	} else {
@@ -1520,7 +1524,7 @@
 		return -ENOMEM;
 
 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
-		phys_addr_t phys = page_to_phys(sg_page(s));
+		phys_addr_t phys = sg_phys(s) & PAGE_MASK;
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!is_coherent &&
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 0000000..70ea6852
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area			__glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area 		__glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area			cpu_cache.dma_map_area
+#define dmac_unmap_area 		cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af..1ec8e75 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
 
 #include "mm.h"
 
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	if (outer_cache.sync)
+		outer_cache.sync();
+#endif
+	if (soc_mb)
+		soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
 #ifdef CONFIG_CPU_CACHE_VIPT
 
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa7..9df5f09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@
 
 	type = kmap_atomic_idx_push();
 
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 	/*
@@ -106,7 +106,7 @@
 
 	if (kvaddr >= (void *)FIXADDR_START) {
 		type = kmap_atomic_idx();
-		idx = type + KM_TYPE_NR * smp_processor_id();
+		idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 
 		if (cache_is_vivt())
 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@
 		return page_address(page);
 
 	type = kmap_atomic_idx_push();
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a..7cd1514 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_HIGH_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_USER | L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_MEMORY_RWX] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@
 }
 EXPORT_SYMBOL(get_mem_type);
 
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+	__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+	pgd_t *pgd = pgd_offset_k(addr);
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+	pmd_t *pmd;
+
+	/*
+	 * The early fixmap range spans multiple pmds, for which
+	 * we are not prepared:
+	 */
+	BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+		     != FIXADDR_TOP >> PMD_SHIFT);
+
+	pmd = fixmap_pmd(FIXADDR_TOP);
+	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+	pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
 /*
  * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
  * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 {
 	unsigned long vaddr = __fix_to_virt(idx);
-	pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
 	/* Make sure fixmap region does not exceed available allocation. */
 	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@
 	}
 
 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-	    md->virtual >= PAGE_OFFSET &&
+	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
 	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@
 
 /*
  * Set up the device mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function.  This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings.  This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
  */
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
@@ -1237,7 +1278,10 @@
 
 	early_trap_init(vectors);
 
-	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+	/*
+	 * Clear page table except top pmd used by early fixmaps
+	 */
+	for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
 
 	/*
@@ -1489,6 +1533,35 @@
 
 #endif
 
+static void __init early_fixmap_shutdown(void)
+{
+	int i;
+	unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+	pte_offset_fixmap = pte_offset_late_fixmap;
+	pmd_clear(fixmap_pmd(va));
+	local_flush_tlb_kernel_page(va);
+
+	for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+		pte_t *pte;
+		struct map_desc map;
+
+		map.virtual = fix_to_virt(i);
+		pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+		/* Only i/o device mappings are supported ATM */
+		if (pte_none(*pte) ||
+		    (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+			continue;
+
+		map.pfn = pte_pfn(*pte);
+		map.type = MT_DEVICE;
+		map.length = PAGE_SIZE;
+
+		create_mapping(&map);
+	}
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@
 	map_lowmem();
 	memblock_set_current_limit(arm_lowmem_limit);
 	dma_contiguous_remap();
+	early_fixmap_shutdown();
 	devicemaps_init(mdesc);
 	kmap_init();
 	tcm_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f1..e683db1 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@
 		if (!new_pte)
 			goto no_pte;
 
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Modify the PTE pointer to have the correct domain.  This
+		 * needs to be the vectors domain to avoid the low vectors
+		 * being unmapped.
+		 */
+		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
 		init_pud = pud_offset(init_pgd, 0);
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c011e22..876060b 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -857,7 +857,9 @@
 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 			break;
 		case BPF_ANC | SKF_AD_IFINDEX:
+		case BPF_ANC | SKF_AD_HATYPE:
 			/* A = skb->dev->ifindex */
+			/* A = skb->dev->type */
 			ctx->seen |= SEEN_SKB;
 			off = offsetof(struct sk_buff, dev);
 			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
@@ -867,8 +869,24 @@
 
 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 						  ifindex) != 4);
-			off = offsetof(struct net_device, ifindex);
-			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+						  type) != 2);
+
+			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+				off = offsetof(struct net_device, ifindex);
+				emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+			} else {
+				/*
+				 * offset of field "type" in "struct
+				 * net_device" is above what can be
+				 * used in the ldrh rd, [rn, #imm]
+				 * instruction, so load the offset in
+				 * a register and use ldrh rd, [rn, rm]
+				 */
+				off = offsetof(struct net_device, type);
+				emit_mov_i(ARM_R3, off, ctx);
+				emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
+			}
 			break;
 		case BPF_ANC | SKF_AD_MARK:
 			ctx->seen |= SEEN_SKB;
@@ -895,6 +913,17 @@
 				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
 			}
 			break;
+		case BPF_ANC | SKF_AD_PKTTYPE:
+			ctx->seen |= SEEN_SKB;
+			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+						  __pkt_type_offset[0]) != 1);
+			off = PKT_TYPE_OFFSET();
+			emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
+			emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+			emit(ARM_LSR_I(r_A, r_A, 5), ctx);
+#endif
+			break;
 		case BPF_ANC | SKF_AD_QUEUE:
 			ctx->seen |= SEEN_SKB;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -904,6 +933,14 @@
 			off = offsetof(struct sk_buff, queue_mapping);
 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 			break;
+		case BPF_ANC | SKF_AD_PAY_OFFSET:
+			ctx->seen |= SEEN_SKB | SEEN_CALL;
+
+			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+			emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
+			emit_blx_r(ARM_R3, ctx);
+			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+			break;
 		case BPF_LDX | BPF_W | BPF_ABS:
 			/*
 			 * load a 32bit word from struct seccomp_data.
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index b2d7d92..4b17d5ab 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -74,6 +74,7 @@
 #define ARM_INST_LDRB_I		0x05d00000
 #define ARM_INST_LDRB_R		0x07d00000
 #define ARM_INST_LDRH_I		0x01d000b0
+#define ARM_INST_LDRH_R		0x019000b0
 #define ARM_INST_LDR_I		0x05900000
 
 #define ARM_INST_LDM		0x08900000
@@ -160,6 +161,8 @@
 				 | (rm))
 #define ARM_LDRH_I(rt, rn, off)	(ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
 				 | (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_LDRH_R(rt, rn, rm)	(ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+				 | (rm))
 
 #define ARM_LDM(rn, regs)	(ARM_INST_LDM | (rn) << 16 | (regs))
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b7b9cea..7d95663 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@
 	select ARM_GIC_V2M if PCI_MSI
 	select ARM_GIC_V3
 	select ARM_GIC_V3_ITS if PCI_MSI
+	select ARM_PSCI_FW
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
 	select COMMON_CLK
@@ -28,7 +29,7 @@
 	select EDAC_SUPPORT
 	select GENERIC_ALLOCATOR
 	select GENERIC_CLOCKEVENTS
-	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_IRQ_PROBE
@@ -53,6 +54,7 @@
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CMPXCHG_DOUBLE
+	select HAVE_CMPXCHG_LOCAL
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
@@ -104,6 +106,10 @@
 config STACKTRACE_SUPPORT
 	def_bool y
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
@@ -113,6 +119,14 @@
 config RWSEM_XCHGADD_ALGORITHM
 	def_bool y
 
+config GENERIC_BUG
+	def_bool y
+	depends on BUG
+
+config GENERIC_BUG_RELATIVE_POINTERS
+	def_bool y
+	depends on GENERIC_BUG
+
 config GENERIC_HWEIGHT
 	def_bool y
 
@@ -137,6 +151,9 @@
 config NEED_SG_DMA_LENGTH
 	def_bool y
 
+config SMP
+	def_bool y
+
 config SWIOTLB
 	def_bool y
 
@@ -371,22 +388,8 @@
        help
          Say Y if you plan on running a kernel in big-endian mode.
 
-config SMP
-	bool "Symmetric Multi-Processing"
-	help
-	  This enables support for systems with more than one CPU.  If
-	  you say N here, the kernel will run on single and
-	  multiprocessor machines, but will use only one CPU of a
-	  multiprocessor machine. If you say Y here, the kernel will run
-	  on many, but not all, single processor machines. On a single
-	  processor machine, the kernel will run faster if you say N
-	  here.
-
-	  If you don't know what to do here, say N.
-
 config SCHED_MC
 	bool "Multi-core scheduler support"
-	depends on SMP
 	help
 	  Multi-core scheduler support improves the CPU scheduler's decision
 	  making when dealing with multi-core CPU chips at a cost of slightly
@@ -394,7 +397,6 @@
 
 config SCHED_SMT
 	bool "SMT scheduler support"
-	depends on SMP
 	help
 	  Improves the CPU scheduler's decision making when dealing with
 	  MultiThreading at a cost of slightly increased overhead in some
@@ -403,23 +405,17 @@
 config NR_CPUS
 	int "Maximum number of CPUs (2-4096)"
 	range 2 4096
-	depends on SMP
 	# These have to remain sorted largest to smallest
 	default "64"
 
 config HOTPLUG_CPU
 	bool "Support for hot-pluggable CPUs"
-	depends on SMP
 	help
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
 
 source kernel/Kconfig.preempt
 
-config UP_LATE_INIT
-       def_bool y
-       depends on !SMP
-
 config HZ
 	int
 	default 100
@@ -561,6 +557,53 @@
 	  If unsure, say Y
 endif
 
+menu "ARMv8.1 architectural features"
+
+config ARM64_HW_AFDBM
+	bool "Support for hardware updates of the Access and Dirty page flags"
+	default y
+	help
+	  The ARMv8.1 architecture extensions introduce support for
+	  hardware updates of the access and dirty information in page
+	  table entries. When enabled in TCR_EL1 (HA and HD bits) on
+	  capable processors, accesses to pages with PTE_AF cleared will
+	  set this bit instead of raising an access flag fault.
+	  Similarly, writes to read-only pages with the DBM bit set will
+	  clear the read-only bit (AP[2]) instead of raising a
+	  permission fault.
+
+	  Kernels built with this configuration option enabled continue
+	  to work on pre-ARMv8.1 hardware and the performance impact is
+	  minimal. If unsure, say Y.
+
+config ARM64_PAN
+	bool "Enable support for Privileged Access Never (PAN)"
+	default y
+	help
+	 Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+	 prevents the kernel or hypervisor from accessing user-space (EL0)
+	 memory directly.
+
+	 Choosing this option will cause any unprotected (not using
+	 copy_to_user et al) memory access to fail with a permission fault.
+
+	 The feature is detected at runtime, and will remain as a 'nop'
+	 instruction if the cpu does not implement the feature.
+
+config ARM64_LSE_ATOMICS
+	bool "Atomic instructions"
+	help
+	  As part of the Large System Extensions, ARMv8.1 introduces new
+	  atomic instructions that are designed specifically to scale in
+	  very large systems.
+
+	  Say Y here to make use of these instructions for the in-kernel
+	  atomic routines. This incurs a small overhead on CPUs that do
+	  not support these instructions and requires the kernel to be
+	  built with binutils >= 2.25.
+
+endmenu
+
 endmenu
 
 menu "Boot options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4d2a925..15ff5b4 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -17,7 +17,18 @@
 
 KBUILD_DEFCONFIG := defconfig
 
-KBUILD_CFLAGS	+= -mgeneral-regs-only
+# Check for binutils support for specific extensions
+lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
+
+ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
+  ifeq ($(lseinstr),)
+$(warning LSE atomics not supported by binutils)
+  endif
+endif
+
+KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
+KBUILD_AFLAGS	+= $(lseinstr)
+
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS	+= -mbig-endian
 AS		+= -EB
@@ -58,7 +69,10 @@
 
 boot := arch/arm64/boot
 
-Image Image.gz: vmlinux
+Image: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zinstall install: vmlinux
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 5a0e3ab..abcbba2 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -19,9 +19,21 @@
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
+$(obj)/Image.bz2: $(obj)/Image FORCE
+	$(call if_changed,bzip2)
+
 $(obj)/Image.gz: $(obj)/Image FORCE
 	$(call if_changed,gzip)
 
+$(obj)/Image.lz4: $(obj)/Image FORCE
+	$(call if_changed,lz4)
+
+$(obj)/Image.lzma: $(obj)/Image FORCE
+	$(call if_changed,lzma)
+
+$(obj)/Image.lzo: $(obj)/Image FORCE
+	$(call if_changed,lzo)
+
 install: $(obj)/Image
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 406485e..208cec0 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,11 +12,11 @@
 #ifndef _ASM_ACPI_H
 #define _ASM_ACPI_H
 
-#include <linux/mm.h>
 #include <linux/irqchip/arm-gic-acpi.h>
+#include <linux/mm.h>
+#include <linux/psci.h>
 
 #include <asm/cputype.h>
-#include <asm/psci.h>
 #include <asm/smp_plat.h>
 
 /* Macros for consistency checks of the GICC subtable of MADT */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index c385a0c..d56ec07 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -3,6 +3,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/init.h>
+#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
@@ -15,7 +17,7 @@
 	u8  alt_len;		/* size of new instruction(s), <= orig_len */
 };
 
-void apply_alternatives_all(void);
+void __init apply_alternatives_all(void);
 void apply_alternatives(void *start, size_t length);
 void free_alternatives_memory(void);
 
@@ -40,7 +42,8 @@
  * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
  * containing commit 4e4d08cf7399b606 or c1baaddf8861).
  */
-#define ALTERNATIVE(oldinstr, newinstr, feature)			\
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)	\
+	".if "__stringify(cfg_enabled)" == 1\n"				\
 	"661:\n\t"							\
 	oldinstr "\n"							\
 	"662:\n"							\
@@ -53,7 +56,11 @@
 	"664:\n\t"							\
 	".popsection\n\t"						\
 	".org	. - (664b-663b) + (662b-661b)\n\t"			\
-	".org	. - (662b-661b) + (664b-663b)\n"
+	".org	. - (662b-661b) + (664b-663b)\n"			\
+	".endif\n"
+
+#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)	\
+	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
 
 #else
 
@@ -65,7 +72,8 @@
 	.byte \alt_len
 .endm
 
-.macro alternative_insn insn1 insn2 cap
+.macro alternative_insn insn1, insn2, cap, enable = 1
+	.if \enable
 661:	\insn1
 662:	.pushsection .altinstructions, "a"
 	altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
@@ -75,8 +83,70 @@
 664:	.popsection
 	.org	. - (664b-663b) + (662b-661b)
 	.org	. - (662b-661b) + (664b-663b)
+	.endif
 .endm
 
+/*
+ * Begin an alternative code sequence.
+ *
+ * The code that follows this macro will be assembled and linked as
+ * normal. There are no restrictions on this code.
+ */
+.macro alternative_if_not cap, enable = 1
+	.if \enable
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
+	.popsection
+661:
+	.endif
+.endm
+
+/*
+ * Provide the alternative code sequence.
+ *
+ * The code that follows this macro is assembled into a special
+ * section to be used for dynamic patching. Code that follows this
+ * macro must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ *    sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ *    alternative sequence it is defined in (branches into an
+ *    alternative sequence are not fixed up).
+ */
+.macro alternative_else, enable = 1
+	.if \enable
+662:	.pushsection .altinstr_replacement, "ax"
+663:
+	.endif
+.endm
+
+/*
+ * Complete an alternative code sequence.
+ */
+.macro alternative_endif, enable = 1
+	.if \enable
+664:	.popsection
+	.org	. - (664b-663b) + (662b-661b)
+	.org	. - (662b-661b) + (664b-663b)
+	.endif
+.endm
+
+#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)	\
+	alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+
+
 #endif  /*  __ASSEMBLY__  */
 
+/*
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
+ *
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
+ * N.B. If CONFIG_FOO is specified, but not selected, the whole block
+ *      will be omitted, including oldinstr.
+ */
+#define ALTERNATIVE(oldinstr, newinstr, ...)   \
+	_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
+
 #endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 144b64a..b51f2cc 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -50,18 +50,6 @@
 	.endm
 
 /*
- * Save/disable and restore interrupts.
- */
-	.macro	save_and_disable_irqs, olddaif
-	mrs	\olddaif, daif
-	disable_irq
-	.endm
-
-	.macro	restore_irqs, olddaif
-	msr	daif, \olddaif
-	.endm
-
-/*
  * Enable and disable debug exceptions.
  */
 	.macro	disable_dbg
@@ -103,9 +91,7 @@
  * SMP data memory barrier
  */
 	.macro	smp_dmb, opt
-#ifdef CONFIG_SMP
 	dmb	\opt
-#endif
 	.endm
 
 #define USER(l, x...)				\
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7047051..35a6778 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -24,233 +24,72 @@
 #include <linux/types.h>
 
 #include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-
-#define ATOMIC_INIT(i)	{ (i) }
+#include <asm/lse.h>
 
 #ifdef __KERNEL__
 
-/*
- * On ARM, ordinary assignment (str instruction) doesn't clear the local
- * strex/ldrex monitor on some implementations. The reason we can use it for
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
-#define atomic_read(v)	ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)	(((v)->counter) = (i))
+#define __ARM64_IN_ATOMIC_IMPL
 
-/*
- * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
+#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
+#include <asm/atomic_lse.h>
+#else
+#include <asm/atomic_ll_sc.h>
+#endif
 
-#define ATOMIC_OP(op, asm_op)						\
-static inline void atomic_##op(int i, atomic_t *v)			\
-{									\
-	unsigned long tmp;						\
-	int result;							\
+#undef __ARM64_IN_ATOMIC_IMPL
+
+#include <asm/cmpxchg.h>
+
+#define ___atomic_add_unless(v, a, u, sfx)				\
+({									\
+	typeof((v)->counter) c, old;					\
 									\
-	asm volatile("// atomic_" #op "\n"				\
-"1:	ldxr	%w0, %2\n"						\
-"	" #asm_op "	%w0, %w0, %w3\n"				\
-"	stxr	%w1, %w0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i));							\
-}									\
+	c = atomic##sfx##_read(v);					\
+	while (c != (u) &&						\
+	      (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c)	\
+		c = old;						\
+	c;								\
+ })
 
-#define ATOMIC_OP_RETURN(op, asm_op)					\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
-{									\
-	unsigned long tmp;						\
-	int result;							\
-									\
-	asm volatile("// atomic_" #op "_return\n"			\
-"1:	ldxr	%w0, %2\n"						\
-"	" #asm_op "	%w0, %w0, %w3\n"				\
-"	stlxr	%w1, %w0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i)							\
-	: "memory");							\
-									\
-	smp_mb();							\
-	return result;							\
-}
+#define ATOMIC_INIT(i)	{ (i) }
 
-#define ATOMIC_OPS(op, asm_op)						\
-	ATOMIC_OP(op, asm_op)						\
-	ATOMIC_OP_RETURN(op, asm_op)
+#define atomic_read(v)			READ_ONCE((v)->counter)
+#define atomic_set(v, i)		(((v)->counter) = (i))
+#define atomic_xchg(v, new)		xchg(&((v)->counter), (new))
+#define atomic_cmpxchg(v, old, new)	cmpxchg(&((v)->counter), (old), (new))
 
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-	unsigned long tmp;
-	int oldval;
-
-	smp_mb();
-
-	asm volatile("// atomic_cmpxchg\n"
-"1:	ldxr	%w1, %2\n"
-"	cmp	%w1, %w3\n"
-"	b.ne	2f\n"
-"	stxr	%w0, %w4, %2\n"
-"	cbnz	%w0, 1b\n"
-"2:"
-	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
-
-	smp_mb();
-	return oldval;
-}
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-	int c, old;
-
-	c = atomic_read(v);
-	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
-		c = old;
-	return c;
-}
-
-#define atomic_inc(v)		atomic_add(1, v)
-#define atomic_dec(v)		atomic_sub(1, v)
-
-#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#define atomic_inc(v)			atomic_add(1, (v))
+#define atomic_dec(v)			atomic_sub(1, (v))
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
+#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
+#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
+#define atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
+#define __atomic_add_unless(v, a, u)	___atomic_add_unless(v, a, u,)
+#define atomic_andnot			atomic_andnot
 
 /*
  * 64-bit atomic operations.
  */
-#define ATOMIC64_INIT(i) { (i) }
+#define ATOMIC64_INIT			ATOMIC_INIT
+#define atomic64_read			atomic_read
+#define atomic64_set			atomic_set
+#define atomic64_xchg			atomic_xchg
+#define atomic64_cmpxchg		atomic_cmpxchg
 
-#define atomic64_read(v)	ACCESS_ONCE((v)->counter)
-#define atomic64_set(v,i)	(((v)->counter) = (i))
-
-#define ATOMIC64_OP(op, asm_op)						\
-static inline void atomic64_##op(long i, atomic64_t *v)			\
-{									\
-	long result;							\
-	unsigned long tmp;						\
-									\
-	asm volatile("// atomic64_" #op "\n"				\
-"1:	ldxr	%0, %2\n"						\
-"	" #asm_op "	%0, %0, %3\n"					\
-"	stxr	%w1, %0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i));							\
-}									\
-
-#define ATOMIC64_OP_RETURN(op, asm_op)					\
-static inline long atomic64_##op##_return(long i, atomic64_t *v)	\
-{									\
-	long result;							\
-	unsigned long tmp;						\
-									\
-	asm volatile("// atomic64_" #op "_return\n"			\
-"1:	ldxr	%0, %2\n"						\
-"	" #asm_op "	%0, %0, %3\n"					\
-"	stlxr	%w1, %0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i)							\
-	: "memory");							\
-									\
-	smp_mb();							\
-	return result;							\
-}
-
-#define ATOMIC64_OPS(op, asm_op)					\
-	ATOMIC64_OP(op, asm_op)						\
-	ATOMIC64_OP_RETURN(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
-{
-	long oldval;
-	unsigned long res;
-
-	smp_mb();
-
-	asm volatile("// atomic64_cmpxchg\n"
-"1:	ldxr	%1, %2\n"
-"	cmp	%1, %3\n"
-"	b.ne	2f\n"
-"	stxr	%w0, %4, %2\n"
-"	cbnz	%w0, 1b\n"
-"2:"
-	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
-
-	smp_mb();
-	return oldval;
-}
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-	long result;
-	unsigned long tmp;
-
-	asm volatile("// atomic64_dec_if_positive\n"
-"1:	ldxr	%0, %2\n"
-"	subs	%0, %0, #1\n"
-"	b.mi	2f\n"
-"	stlxr	%w1, %0, %2\n"
-"	cbnz	%w1, 1b\n"
-"	dmb	ish\n"
-"2:"
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	:
-	: "cc", "memory");
-
-	return result;
-}
-
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-	long c, old;
-
-	c = atomic64_read(v);
-	while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
-		c = old;
-
-	return c != u;
-}
-
-#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)			atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
+#define atomic64_inc(v)			atomic64_add(1, (v))
+#define atomic64_dec(v)			atomic64_sub(1, (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)			atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
+#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
+#define atomic64_add_unless(v, a, u)	(___atomic_add_unless(v, a, u, 64) != u)
+#define atomic64_andnot			atomic64_andnot
+
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
 #endif
 #endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
new file mode 100644
index 0000000..b3b5c4a
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -0,0 +1,247 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LL_SC_H
+#define __ASM_ATOMIC_LL_SC_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+/*
+ * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ *
+ * NOTE: these functions do *not* follow the PCS and must explicitly
+ * save any clobbered registers other than x0 (regardless of return
+ * value).  This is achieved through -fcall-saved-* compiler flags for
+ * this file, which unfortunately don't work on a per-function basis
+ * (the optimize attribute silently ignores these options).
+ */
+
+#define ATOMIC_OP(op, asm_op)						\
+__LL_SC_INLINE void							\
+__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))				\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	asm volatile("// atomic_" #op "\n"				\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%w0, %2\n"						\
+"	" #asm_op "	%w0, %w0, %w3\n"				\
+"	stxr	%w1, %w0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i));							\
+}									\
+__LL_SC_EXPORT(atomic_##op);
+
+#define ATOMIC_OP_RETURN(op, asm_op)					\
+__LL_SC_INLINE int							\
+__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v))		\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	asm volatile("// atomic_" #op "_return\n"			\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%w0, %2\n"						\
+"	" #asm_op "	%w0, %w0, %w3\n"				\
+"	stlxr	%w1, %w0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i)							\
+	: "memory");							\
+									\
+	smp_mb();							\
+	return result;							\
+}									\
+__LL_SC_EXPORT(atomic_##op##_return);
+
+#define ATOMIC_OPS(op, asm_op)						\
+	ATOMIC_OP(op, asm_op)						\
+	ATOMIC_OP_RETURN(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, orr)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define ATOMIC64_OP(op, asm_op)						\
+__LL_SC_INLINE void							\
+__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))			\
+{									\
+	long result;							\
+	unsigned long tmp;						\
+									\
+	asm volatile("// atomic64_" #op "\n"				\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%0, %2\n"						\
+"	" #asm_op "	%0, %0, %3\n"					\
+"	stxr	%w1, %0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i));							\
+}									\
+__LL_SC_EXPORT(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, asm_op)					\
+__LL_SC_INLINE long							\
+__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v))		\
+{									\
+	long result;							\
+	unsigned long tmp;						\
+									\
+	asm volatile("// atomic64_" #op "_return\n"			\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%0, %2\n"						\
+"	" #asm_op "	%0, %0, %3\n"					\
+"	stlxr	%w1, %0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i)							\
+	: "memory");							\
+									\
+	smp_mb();							\
+	return result;							\
+}									\
+__LL_SC_EXPORT(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, asm_op)					\
+	ATOMIC64_OP(op, asm_op)						\
+	ATOMIC64_OP_RETURN(op, asm_op)
+
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
+
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, orr)
+ATOMIC64_OP(xor, eor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+__LL_SC_INLINE long
+__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+{
+	long result;
+	unsigned long tmp;
+
+	asm volatile("// atomic64_dec_if_positive\n"
+"	prfm	pstl1strm, %2\n"
+"1:	ldxr	%0, %2\n"
+"	subs	%0, %0, #1\n"
+"	b.lt	2f\n"
+"	stlxr	%w1, %0, %2\n"
+"	cbnz	%w1, 1b\n"
+"	dmb	ish\n"
+"2:"
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	:
+	: "cc", "memory");
+
+	return result;
+}
+__LL_SC_EXPORT(atomic64_dec_if_positive);
+
+#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl)			\
+__LL_SC_INLINE unsigned long						\
+__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,		\
+				     unsigned long old,			\
+				     unsigned long new))		\
+{									\
+	unsigned long tmp, oldval;					\
+									\
+	asm volatile(							\
+	"	prfm	pstl1strm, %[v]\n"				\
+	"1:	ldxr" #sz "\t%" #w "[oldval], %[v]\n"			\
+	"	eor	%" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"	\
+	"	cbnz	%" #w "[tmp], 2f\n"				\
+	"	st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"	\
+	"	cbnz	%w[tmp], 1b\n"					\
+	"	" #mb "\n"						\
+	"	mov	%" #w "[oldval], %" #w "[old]\n"		\
+	"2:"								\
+	: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),			\
+	  [v] "+Q" (*(unsigned long *)ptr)				\
+	: [old] "Lr" (old), [new] "r" (new)				\
+	: cl);								\
+									\
+	return oldval;							\
+}									\
+__LL_SC_EXPORT(__cmpxchg_case_##name);
+
+__CMPXCHG_CASE(w, b,    1,        ,  ,         )
+__CMPXCHG_CASE(w, h,    2,        ,  ,         )
+__CMPXCHG_CASE(w,  ,    4,        ,  ,         )
+__CMPXCHG_CASE( ,  ,    8,        ,  ,         )
+__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
+__CMPXCHG_CASE(w,  , mb_4, dmb ish, l, "memory")
+__CMPXCHG_CASE( ,  , mb_8, dmb ish, l, "memory")
+
+#undef __CMPXCHG_CASE
+
+#define __CMPXCHG_DBL(name, mb, rel, cl)				\
+__LL_SC_INLINE int							\
+__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,		\
+				      unsigned long old2,		\
+				      unsigned long new1,		\
+				      unsigned long new2,		\
+				      volatile void *ptr))		\
+{									\
+	unsigned long tmp, ret;						\
+									\
+	asm volatile("// __cmpxchg_double" #name "\n"			\
+	"	prfm	pstl1strm, %2\n"				\
+	"1:	ldxp	%0, %1, %2\n"					\
+	"	eor	%0, %0, %3\n"					\
+	"	eor	%1, %1, %4\n"					\
+	"	orr	%1, %0, %1\n"					\
+	"	cbnz	%1, 2f\n"					\
+	"	st" #rel "xp	%w0, %5, %6, %2\n"			\
+	"	cbnz	%w0, 1b\n"					\
+	"	" #mb "\n"						\
+	"2:"								\
+	: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)	\
+	: "r" (old1), "r" (old2), "r" (new1), "r" (new2)		\
+	: cl);								\
+									\
+	return ret;							\
+}									\
+__LL_SC_EXPORT(__cmpxchg_double##name);
+
+__CMPXCHG_DBL(   ,        ,  ,         )
+__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
+
+#undef __CMPXCHG_DBL
+
+#endif	/* __ASM_ATOMIC_LL_SC_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
new file mode 100644
index 0000000..55d740e
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -0,0 +1,391 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LSE_H
+#define __ASM_ATOMIC_LSE_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+#define __LL_SC_ATOMIC(op)	__LL_SC_CALL(atomic_##op)
+
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
+	"	stclr	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
+	"	stset	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
+	"	steor	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
+	"	stadd	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(add_return),
+	/* LSE atomics */
+	"	ldaddal	%w[i], w30, %[v]\n"
+	"	add	%w[i], %w[i], w30")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return w0;
+}
+
+static inline void atomic_and(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(and),
+	/* LSE atomics */
+	"	mvn	%w[i], %w[i]\n"
+	"	stclr	%w[i], %[v]")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(sub),
+	/* LSE atomics */
+	"	neg	%w[i], %w[i]\n"
+	"	stadd	%w[i], %[v]")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(sub_return)
+	"	nop",
+	/* LSE atomics */
+	"	neg	%w[i], %w[i]\n"
+	"	ldaddal	%w[i], w30, %[v]\n"
+	"	add	%w[i], %w[i], w30")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return w0;
+}
+
+#undef __LL_SC_ATOMIC
+
+#define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(atomic64_##op)
+
+static inline void atomic64_andnot(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
+	"	stclr	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
+	"	stset	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
+	"	steor	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_add(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
+	"	stadd	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(add_return),
+	/* LSE atomics */
+	"	ldaddal	%[i], x30, %[v]\n"
+	"	add	%[i], %[i], x30")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return x0;
+}
+
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(and),
+	/* LSE atomics */
+	"	mvn	%[i], %[i]\n"
+	"	stclr	%[i], %[v]")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_sub(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(sub),
+	/* LSE atomics */
+	"	neg	%[i], %[i]\n"
+	"	stadd	%[i], %[v]")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline long atomic64_sub_return(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(sub_return)
+	"	nop",
+	/* LSE atomics */
+	"	neg	%[i], %[i]\n"
+	"	ldaddal	%[i], x30, %[v]\n"
+	"	add	%[i], %[i], x30")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return x0;
+}
+
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+	register long x0 asm ("x0") = (long)v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(dec_if_positive)
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop",
+	/* LSE atomics */
+	"1:	ldr	x30, %[v]\n"
+	"	subs	%[ret], x30, #1\n"
+	"	b.lt	2f\n"
+	"	casal	x30, %[ret], %[v]\n"
+	"	sub	x30, x30, #1\n"
+	"	sub	x30, x30, %[ret]\n"
+	"	cbnz	x30, 1b\n"
+	"2:")
+	: [ret] "+&r" (x0), [v] "+Q" (v->counter)
+	:
+	: "x30", "cc", "memory");
+
+	return x0;
+}
+
+#undef __LL_SC_ATOMIC64
+
+#define __LL_SC_CMPXCHG(op)	__LL_SC_CALL(__cmpxchg_case_##op)
+
+#define __CMPXCHG_CASE(w, sz, name, mb, cl...)				\
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,	\
+						  unsigned long old,	\
+						  unsigned long new)	\
+{									\
+	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
+	register unsigned long x1 asm ("x1") = old;			\
+	register unsigned long x2 asm ("x2") = new;			\
+									\
+	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
+	/* LL/SC */							\
+	"	nop\n"							\
+		__LL_SC_CMPXCHG(name)					\
+	"	nop",							\
+	/* LSE atomics */						\
+	"	mov	" #w "30, %" #w "[old]\n"			\
+	"	cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n"		\
+	"	mov	%" #w "[ret], " #w "30")			\
+	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)		\
+	: [old] "r" (x1), [new] "r" (x2)				\
+	: "x30" , ##cl);						\
+									\
+	return x0;							\
+}
+
+__CMPXCHG_CASE(w, b,    1,   )
+__CMPXCHG_CASE(w, h,    2,   )
+__CMPXCHG_CASE(w,  ,    4,   )
+__CMPXCHG_CASE(x,  ,    8,   )
+__CMPXCHG_CASE(w, b, mb_1, al, "memory")
+__CMPXCHG_CASE(w, h, mb_2, al, "memory")
+__CMPXCHG_CASE(w,  , mb_4, al, "memory")
+__CMPXCHG_CASE(x,  , mb_8, al, "memory")
+
+#undef __LL_SC_CMPXCHG
+#undef __CMPXCHG_CASE
+
+#define __LL_SC_CMPXCHG_DBL(op)	__LL_SC_CALL(__cmpxchg_double##op)
+
+#define __CMPXCHG_DBL(name, mb, cl...)					\
+static inline int __cmpxchg_double##name(unsigned long old1,		\
+					 unsigned long old2,		\
+					 unsigned long new1,		\
+					 unsigned long new2,		\
+					 volatile void *ptr)		\
+{									\
+	unsigned long oldval1 = old1;					\
+	unsigned long oldval2 = old2;					\
+	register unsigned long x0 asm ("x0") = old1;			\
+	register unsigned long x1 asm ("x1") = old2;			\
+	register unsigned long x2 asm ("x2") = new1;			\
+	register unsigned long x3 asm ("x3") = new2;			\
+	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
+									\
+	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
+	/* LL/SC */							\
+	"	nop\n"							\
+	"	nop\n"							\
+	"	nop\n"							\
+	__LL_SC_CMPXCHG_DBL(name),					\
+	/* LSE atomics */						\
+	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
+	"	eor	%[old1], %[old1], %[oldval1]\n"			\
+	"	eor	%[old2], %[old2], %[oldval2]\n"			\
+	"	orr	%[old1], %[old1], %[old2]")			\
+	: [old1] "+r" (x0), [old2] "+r" (x1),				\
+	  [v] "+Q" (*(unsigned long *)ptr)				\
+	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
+	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
+	: "x30" , ##cl);						\
+									\
+	return x0;							\
+}
+
+__CMPXCHG_DBL(   ,   )
+__CMPXCHG_DBL(_mb, al, "memory")
+
+#undef __LL_SC_CMPXCHG_DBL
+#undef __CMPXCHG_DBL
+
+#endif	/* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fa47c4..624f967 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,28 +35,6 @@
 #define dma_rmb()	dmb(oshld)
 #define dma_wmb()	dmb(oshst)
 
-#ifndef CONFIG_SMP
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-
-#define smp_store_release(p, v)						\
-do {									\
-	compiletime_assert_atomic_type(*p);				\
-	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
-} while (0)
-
-#define smp_load_acquire(p)						\
-({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
-	compiletime_assert_atomic_type(*p);				\
-	barrier();							\
-	___p1;								\
-})
-
-#else
-
 #define smp_mb()	dmb(ish)
 #define smp_rmb()	dmb(ishld)
 #define smp_wmb()	dmb(ishst)
@@ -109,8 +87,6 @@
 	___p1;								\
 })
 
-#endif
-
 #define read_barrier_depends()		do { } while(0)
 #define smp_read_barrier_depends()	do { } while(0)
 
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
new file mode 100644
index 0000000..4a748ce
--- /dev/null
+++ b/arch/arm64/include/asm/bug.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015  ARM Limited
+ * Author: Dave Martin <Dave.Martin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ARCH_ARM64_ASM_BUG_H
+#define _ARCH_ARM64_ASM_BUG_H
+
+#include <asm/debug-monitors.h>
+
+#ifdef CONFIG_GENERIC_BUG
+#define HAVE_ARCH_BUG
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line)				\
+		".pushsection .rodata.str,\"aMS\",@progbits,1\n"	\
+	"2:	.string \"" file "\"\n\t"				\
+		".popsection\n\t"					\
+									\
+		".long 2b - 0b\n\t"					\
+		".short " #line "\n\t"
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
+
+#define __BUG_FLAGS(flags) asm volatile (		\
+		".pushsection __bug_table,\"a\"\n\t"	\
+		".align 2\n\t"				\
+	"0:	.long 1f - 0b\n\t"			\
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
+		".short " #flags "\n\t"			\
+		".popsection\n"				\
+							\
+	"1:	brk %[imm]"				\
+		:: [imm] "i" (BUG_BRK_IMM)		\
+)
+
+#define BUG() do {				\
+	_BUG_FLAGS(0);				\
+	unreachable();				\
+} while (0)
+
+#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
+
+#endif /* ! CONFIG_GENERIC_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif /* ! _ARCH_ARM64_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index d8c25b7..899e9f1 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -21,7 +21,9 @@
 #include <linux/bug.h>
 #include <linux/mmdebug.h>
 
+#include <asm/atomic.h>
 #include <asm/barrier.h>
+#include <asm/lse.h>
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 {
@@ -29,37 +31,73 @@
 
 	switch (size) {
 	case 1:
-		asm volatile("//	__xchg1\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxrb	%w0, %2\n"
 		"	stlxrb	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpalb	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 2:
-		asm volatile("//	__xchg2\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxrh	%w0, %2\n"
 		"	stlxrh	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpalh	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 4:
-		asm volatile("//	__xchg4\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxr	%w0, %2\n"
 		"	stlxr	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpal	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 8:
-		asm volatile("//	__xchg8\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxr	%0, %2\n"
 		"	stlxr	%w1, %3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpal	%3, %0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
 			: "r" (x)
 			: "memory");
@@ -68,7 +106,6 @@
 		BUILD_BUG();
 	}
 
-	smp_mb();
 	return ret;
 }
 
@@ -83,131 +120,39 @@
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 				      unsigned long new, int size)
 {
-	unsigned long oldval = 0, res;
-
 	switch (size) {
 	case 1:
-		do {
-			asm volatile("// __cmpxchg1\n"
-			"	ldxrb	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrb	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_1(ptr, (u8)old, new);
 	case 2:
-		do {
-			asm volatile("// __cmpxchg2\n"
-			"	ldxrh	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrh	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_2(ptr, (u16)old, new);
 	case 4:
-		do {
-			asm volatile("// __cmpxchg4\n"
-			"	ldxr	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_4(ptr, old, new);
 	case 8:
-		do {
-			asm volatile("// __cmpxchg8\n"
-			"	ldxr	%1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%1, %3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_8(ptr, old, new);
 	default:
 		BUILD_BUG();
 	}
 
-	return oldval;
-}
-
-#define system_has_cmpxchg_double()     1
-
-static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
-		unsigned long old1, unsigned long old2,
-		unsigned long new1, unsigned long new2, int size)
-{
-	unsigned long loop, lost;
-
-	switch (size) {
-	case 8:
-		VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
-		do {
-			asm volatile("// __cmpxchg_double8\n"
-			"	ldxp	%0, %1, %2\n"
-			"	eor	%0, %0, %3\n"
-			"	eor	%1, %1, %4\n"
-			"	orr	%1, %0, %1\n"
-			"	mov	%w0, #0\n"
-			"	cbnz	%1, 1f\n"
-			"	stxp	%w0, %5, %6, %2\n"
-			"1:\n"
-				: "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
-				: "r" (old1), "r"(old2), "r"(new1), "r"(new2));
-		} while (loop);
-		break;
-	default:
-		BUILD_BUG();
-	}
-
-	return !lost;
-}
-
-static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
-			unsigned long old1, unsigned long old2,
-			unsigned long new1, unsigned long new2, int size)
-{
-	int ret;
-
-	smp_mb();
-	ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
-	smp_mb();
-
-	return ret;
+	unreachable();
 }
 
 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 					 unsigned long new, int size)
 {
-	unsigned long ret;
+	switch (size) {
+	case 1:
+		return __cmpxchg_case_mb_1(ptr, (u8)old, new);
+	case 2:
+		return __cmpxchg_case_mb_2(ptr, (u16)old, new);
+	case 4:
+		return __cmpxchg_case_mb_4(ptr, old, new);
+	case 8:
+		return __cmpxchg_case_mb_8(ptr, old, new);
+	default:
+		BUILD_BUG();
+	}
 
-	smp_mb();
-	ret = __cmpxchg(ptr, old, new, size);
-	smp_mb();
-
-	return ret;
+	unreachable();
 }
 
 #define cmpxchg(ptr, o, n) \
@@ -228,21 +173,32 @@
 	__ret; \
 })
 
+#define system_has_cmpxchg_double()     1
+
+#define __cmpxchg_double_check(ptr1, ptr2)					\
+({										\
+	if (sizeof(*(ptr1)) != 8)						\
+		BUILD_BUG();							\
+	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
+})
+
 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
 ({\
 	int __ret;\
-	__ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
-			(unsigned long)(o2), (unsigned long)(n1), \
-			(unsigned long)(n2), sizeof(*(ptr1)));\
+	__cmpxchg_double_check(ptr1, ptr2); \
+	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
+				     (unsigned long)(n1), (unsigned long)(n2), \
+				     ptr1); \
 	__ret; \
 })
 
 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
 ({\
 	int __ret;\
-	__ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
-			(unsigned long)(o2), (unsigned long)(n1), \
-			(unsigned long)(n2), sizeof(*(ptr1)));\
+	__cmpxchg_double_check(ptr1, ptr2); \
+	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
+				  (unsigned long)(n1), (unsigned long)(n2), \
+				  ptr1); \
 	__ret; \
 })
 
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c104421..1715707 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -25,15 +25,20 @@
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
 #define ARM64_WORKAROUND_845719			2
 #define ARM64_HAS_SYSREG_GIC_CPUIF		3
+#define ARM64_HAS_PAN				4
+#define ARM64_HAS_LSE_ATOMICS			5
 
-#define ARM64_NCAPS				4
+#define ARM64_NCAPS				6
 
 #ifndef __ASSEMBLY__
 
+#include <linux/kernel.h>
+
 struct arm64_cpu_capabilities {
 	const char *desc;
 	u16 capability;
 	bool (*matches)(const struct arm64_cpu_capabilities *);
+	void (*enable)(void);
 	union {
 		struct {	/* To be used for erratum handling only */
 			u32 midr_model;
@@ -41,8 +46,8 @@
 		};
 
 		struct {	/* Feature register checking */
-			u64 register_mask;
-			u64 register_value;
+			int field_pos;
+			int min_field_value;
 		};
 	};
 };
@@ -70,6 +75,13 @@
 		__set_bit(num, cpu_hwcaps);
 }
 
+static inline int __attribute_const__ cpuid_feature_extract_field(u64 features,
+								  int field)
+{
+	return (s64)(features << (64 - 4 - field)) >> (64 - 4);
+}
+
+
 void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info);
 void check_local_cpu_errata(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a84ec60..ee6403d 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -81,9 +81,6 @@
 #define ID_AA64MMFR0_BIGEND(mmfr0)	\
 	(((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
 
-#define SCTLR_EL1_CP15BEN	(0x1 << 5)
-#define SCTLR_EL1_SED		(0x1 << 8)
-
 #ifndef __ASSEMBLY__
 
 /*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 40ec68a..279c85b5 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,12 @@
 
 #ifdef __KERNEL__
 
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/esr.h>
+#include <asm/insn.h>
+#include <asm/ptrace.h>
+
 /* Low-level stepping controls. */
 #define DBG_MDSCR_SS		(1 << 0)
 #define DBG_SPSR_SS		(1 << 21)
@@ -38,12 +44,7 @@
 /*
  * Break point instruction encoding
  */
-#define BREAK_INSTR_SIZE		4
-
-/*
- * ESR values expected for dynamic and compile time BRK instruction
- */
-#define DBG_ESR_VAL_BRK(x)	(0xf2000000 | ((x) & 0xfffff))
+#define BREAK_INSTR_SIZE		AARCH64_INSN_SIZE
 
 /*
  * #imm16 values used for BRK instruction generation
@@ -51,10 +52,12 @@
  * 0x100: for triggering a fault on purpose (reserved)
  * 0x400: for dynamic BRK instruction
  * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
  */
 #define FAULT_BRK_IMM			0x100
 #define KGDB_DYN_DBG_BRK_IMM		0x400
 #define KGDB_COMPILED_DBG_BRK_IMM	0x401
+#define BUG_BRK_IMM			0x800
 
 /*
  * BRK instruction encoding
@@ -68,25 +71,10 @@
  */
 #define AARCH64_BREAK_FAULT	(AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
 
-/*
- * Extract byte from BRK instruction
- */
-#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
-	((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
-
-/*
- * Extract byte from BRK #imm16
- */
-#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
-	(((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
-
-#define KGDB_DYN_DBG_BRK_BYTE(x) \
-	(KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
-
-#define  KGDB_DYN_BRK_INS_BYTE0  KGDB_DYN_DBG_BRK_BYTE(0)
-#define  KGDB_DYN_BRK_INS_BYTE1  KGDB_DYN_DBG_BRK_BYTE(1)
-#define  KGDB_DYN_BRK_INS_BYTE2  KGDB_DYN_DBG_BRK_BYTE(2)
-#define  KGDB_DYN_BRK_INS_BYTE3  KGDB_DYN_DBG_BRK_BYTE(3)
+#define AARCH64_BREAK_KGDB_DYN_DBG	\
+	(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
+#define KGDB_DYN_BRK_INS_BYTE(x)	\
+	((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
 
 #define CACHE_FLUSH_IS_SAFE		1
 
@@ -127,13 +115,13 @@
 
 u8 debug_monitors_arch(void);
 
-enum debug_el {
+enum dbg_active_el {
 	DBG_ACTIVE_EL0 = 0,
 	DBG_ACTIVE_EL1,
 };
 
-void enable_debug_monitors(enum debug_el el);
-void disable_debug_monitors(enum debug_el el);
+void enable_debug_monitors(enum dbg_active_el el);
+void disable_debug_monitors(enum dbg_active_el el);
 
 void user_rewind_single_step(struct task_struct *task);
 void user_fastforward_single_step(struct task_struct *task);
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7052245..77eeb2c 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,6 +18,8 @@
 #ifndef __ASM_ESR_H
 #define __ASM_ESR_H
 
+#include <asm/memory.h>
+
 #define ESR_ELx_EC_UNKNOWN	(0x00)
 #define ESR_ELx_EC_WFx		(0x01)
 /* Unallocated EC: 0x02 */
@@ -99,6 +101,13 @@
 #define ESR_ELx_WFx_ISS_WFE	(UL(1) << 0)
 #define ESR_ELx_xVC_IMM_MASK	((1UL << 16) - 1)
 
+/* ESR value templates for specific events */
+
+/* BRK instruction trap from AArch64 state */
+#define ESR_ELx_VAL_BRK64(imm)					\
+	((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL |	\
+	 ((imm) & 0xffff))
+
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 0303705..6cb7e1a 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -18,7 +18,13 @@
 #ifndef __ASM_EXCEPTION_H
 #define __ASM_EXCEPTION_H
 
+#include <linux/ftrace.h>
+
 #define __exception	__attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry	__irq_entry
+#else
 #define __exception_irq_entry	__exception
+#endif
 
 #endif	/* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index c0739187..8b9884c 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -8,7 +8,7 @@
  * Copyright (C) 1998 Ingo Molnar
  * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
  *
- * Adapted from arch/x86_64 version.
+ * Adapted from arch/x86 version.
  *
  */
 
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 74069b3..007a69f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -20,10 +20,17 @@
 
 #include <linux/futex.h>
 #include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/errno.h>
+#include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 	asm volatile(							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
+"	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
 "2:	stlxr	%w3, %w0, %2\n"						\
@@ -39,6 +46,8 @@
 "	.align	3\n"							\
 "	.quad	1b, 4b, 2b, 4b\n"					\
 "	.popsection\n"							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
 	: "memory")
@@ -112,6 +121,7 @@
 		return -EFAULT;
 
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+"	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
 "	cbnz	%w3, 3f\n"
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 6aae421..2bb7009 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -24,9 +24,7 @@
 
 typedef struct {
 	unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
 	unsigned int ipi_irqs[NR_IPI];
-#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
@@ -34,10 +32,8 @@
 #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
 #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
 
-#ifdef CONFIG_SMP
 u64 smp_irq_stat_cpu(unsigned int cpu);
 #define arch_irq_stat_cpu	smp_irq_stat_cpu
-#endif
 
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
 
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2fd9b14..bb4052e 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -13,10 +13,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __ASM_HUGETLB_H
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index b4f6b19..8e24ef3 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_IRQ_WORK_H
 #define __ASM_IRQ_WORK_H
 
-#ifdef CONFIG_SMP
-
 #include <asm/smp.h>
 
 static inline bool arch_irq_work_has_interrupt(void)
@@ -10,13 +8,4 @@
 	return !!__smp_cross_call;
 }
 
-#else
-
-static inline bool arch_irq_work_has_interrupt(void)
-{
-	return false;
-}
-
-#endif
-
 #endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index c0e5165..1b5e0e8 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,14 +26,28 @@
 
 #define JUMP_LABEL_NOP_SIZE		AARCH64_INSN_SIZE
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm goto("1: nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
 		 ".popsection\n\t"
-		 :  :  "i"(key) :  : l_yes);
+		 :  :  "i"(&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm goto("1: b %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 3\n\t"
+		 ".quad 1b, %l[l_yes], %c0\n\t"
+		 ".popsection\n\t"
+		 :  :  "i"(&((char *)key)[branch]) :  : l_yes);
 
 	return false;
 l_yes:
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
new file mode 100644
index 0000000..3de42d6
--- /dev/null
+++ b/arch/arm64/include/asm/lse.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_LSE_H
+#define __ASM_LSE_H
+
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
+#ifdef __ASSEMBLER__
+
+.arch_extension	lse
+
+.macro alt_lse, llsc, lse
+	alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
+.endm
+
+#else	/* __ASSEMBLER__ */
+
+__asm__(".arch_extension	lse");
+
+/* Move the ll/sc atomics out-of-line */
+#define __LL_SC_INLINE
+#define __LL_SC_PREFIX(x)	__ll_sc_##x
+#define __LL_SC_EXPORT(x)	EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+
+/* Macro for constructing calls to out-of-line ll/sc atomics */
+#define __LL_SC_CALL(op)	"bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+
+/* In-line patching at runtime */
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse)				\
+	ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+
+#endif	/* __ASSEMBLER__ */
+#else	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+
+#ifdef __ASSEMBLER__
+
+.macro alt_lse, llsc, lse
+	\llsc
+.endm
+
+#else	/* __ASSEMBLER__ */
+
+#define __LL_SC_INLINE		static inline
+#define __LL_SC_PREFIX(x)	x
+#define __LL_SC_EXPORT(x)
+
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse)	llsc
+
+#endif	/* __ASSEMBLER__ */
+#endif	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif	/* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f800d45..44a59c2 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -114,6 +114,14 @@
 #define PHYS_OFFSET		({ memstart_addr; })
 
 /*
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
+ */
+#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
+
+/*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
  *
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 79fcfb0..0302087 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -28,7 +28,6 @@
 #define ASID(mm)	((mm)->context.id & 0xffff)
 
 extern void paging_init(void);
-extern void setup_mm_for_reboot(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 extern void init_mem_pgprot(void);
 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 4fde8c1..0a456be 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,8 +16,6 @@
 #ifndef __ASM_PERCPU_H
 #define __ASM_PERCPU_H
 
-#ifdef CONFIG_SMP
-
 static inline void set_my_cpu_offset(unsigned long off)
 {
 	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -38,12 +36,6 @@
 }
 #define __my_cpu_offset __my_cpu_offset()
 
-#else	/* !CONFIG_SMP */
-
-#define set_my_cpu_offset(x)	do { } while (0)
-
-#endif /* CONFIG_SMP */
-
 #define PERCPU_OP(op, asm_op)						\
 static inline unsigned long __percpu_##op(void *ptr,			\
 			unsigned long val, int size)			\
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 6471773..7bd3cdb 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,7 +17,7 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
-#ifdef CONFIG_HW_PERF_EVENTS
+#ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 59bfae7..24154b0 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -104,6 +104,7 @@
 #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
 #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
+#define PTE_DBM			(_AT(pteval_t, 1) << 51)	/* Dirty Bit Management */
 #define PTE_PXN			(_AT(pteval_t, 1) << 53)	/* Privileged XN */
 #define PTE_UXN			(_AT(pteval_t, 1) << 54)	/* User XN */
 
@@ -168,5 +169,7 @@
 #define TCR_TG1_64K		(UL(3) << 30)
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
+#define TCR_HA			(UL(1) << 39)
+#define TCR_HD			(UL(1) << 40)
 
 #endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 56283f8..6900b2d9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_PGTABLE_H
 #define __ASM_PGTABLE_H
 
+#include <asm/bug.h>
 #include <asm/proc-fns.h>
 
 #include <asm/memory.h>
@@ -27,7 +28,11 @@
 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define PTE_WRITE		(PTE_DBM)		 /* same as DBM */
+#else
 #define PTE_WRITE		(_AT(pteval_t, 1) << 57)
+#endif
 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
 
 /*
@@ -48,18 +53,16 @@
 #define FIRST_USER_ADDRESS	0UL
 
 #ifndef __ASSEMBLY__
+
+#include <linux/mmdebug.h>
+
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pud_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
-#ifdef CONFIG_SMP
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#else
-#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF)
-#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF)
-#endif
 
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
@@ -137,12 +140,20 @@
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
-#define pte_dirty(pte)		(!!(pte_val(pte) & PTE_DIRTY))
 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define pte_hw_dirty(pte)	(!(pte_val(pte) & PTE_RDONLY))
+#else
+#define pte_hw_dirty(pte)	(0)
+#endif
+#define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
+#define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
+
+#define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
 #define pte_valid_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 #define pte_valid_not_user(pte) \
@@ -209,20 +220,49 @@
 	}
 }
 
+struct mm_struct;
+struct vm_area_struct;
+
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 
+/*
+ * PTE bits configuration in the presence of hardware Dirty Bit Management
+ * (PTE_WRITE == PTE_DBM):
+ *
+ * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
+ *   0      0      |   1           0          0
+ *   0      1      |   1           1          0
+ *   1      0      |   1           0          1
+ *   1      1      |   0           1          x
+ *
+ * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
+ * the page fault mechanism. Checking the dirty status of a pte becomes:
+ *
+ *   PTE_DIRTY || !PTE_RDONLY
+ */
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pte)
 {
 	if (pte_valid_user(pte)) {
 		if (!pte_special(pte) && pte_exec(pte))
 			__sync_icache_dcache(pte, addr);
-		if (pte_dirty(pte) && pte_write(pte))
+		if (pte_sw_dirty(pte) && pte_write(pte))
 			pte_val(pte) &= ~PTE_RDONLY;
 		else
 			pte_val(pte) |= PTE_RDONLY;
 	}
 
+	/*
+	 * If the existing pte is valid, check for potential race with
+	 * hardware updates of the pte (ptep_set_access_flags safely changes
+	 * valid ptes without going through an invalid entry).
+	 */
+	if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+	    pte_valid(*ptep)) {
+		BUG_ON(!pte_young(pte));
+		BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+	}
+
 	set_pte(ptep, pte);
 }
 
@@ -461,6 +501,9 @@
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
 			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+	/* preserve the hardware dirty information */
+	if (pte_hw_dirty(pte))
+		newprot |= PTE_DIRTY;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
@@ -470,6 +513,101 @@
 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/*
+ * Atomic pte/pmd modifications.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pte_t *ptep)
+{
+	pteval_t pteval;
+	unsigned int tmp, res;
+
+	asm volatile("//	ptep_test_and_clear_young\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	ubfx	%w3, %w0, %5, #1	// extract PTE_AF (young)\n"
+	"	and	%0, %0, %4		// clear PTE_AF\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
+	: "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
+
+	return res;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long address, pte_t *ptep)
+{
+	pteval_t old_pteval;
+	unsigned int tmp;
+
+	asm volatile("//	ptep_get_and_clear\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	stxr	%w1, xzr, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
+
+	return __pte(old_pteval);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address, pmd_t *pmdp)
+{
+	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
+ */
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+	pteval_t pteval;
+	unsigned long tmp;
+
+	asm volatile("//	ptep_set_wrprotect\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	tst	%0, %4			// check for hw dirty (!PTE_RDONLY)\n"
+	"	csel	%1, %3, xzr, eq		// set PTE_DIRTY|PTE_RDONLY if dirty\n"
+	"	orr	%0, %0, %1		// if !dirty, PTE_RDONLY is already set\n"
+	"	and	%0, %0, %5		// clear PTE_WRITE/PTE_DBM\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
+	: "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
+	: "cc");
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+#endif
+#endif	/* CONFIG_ARM64_HW_AFDBM */
+
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
 
@@ -505,6 +643,21 @@
 
 #define pgtable_cache_init() do { } while (0)
 
+/*
+ * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+				    unsigned long addr, pte_t *ptep)
+{
+	/*
+	 * set_pte() does not have a DSB for user mappings, so make sure that
+	 * the page table write is visible.
+	 */
+	dsb(ishst);
+}
+
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e4c893e..98f3235 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -186,4 +186,6 @@
 
 #endif
 
+void cpu_enable_pan(void);
+
 #endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
deleted file mode 100644
index 49d7e1a..0000000
--- a/arch/arm64/include/asm/psci.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2013 ARM Limited
- */
-
-#ifndef __ASM_PSCI_H
-#define __ASM_PSCI_H
-
-int __init psci_dt_init(void);
-
-#ifdef CONFIG_ACPI
-int __init psci_acpi_init(void);
-bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
-#else
-static inline int psci_acpi_init(void) { return 0; }
-static inline bool acpi_psci_present(void) { return false; }
-#endif
-
-#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index d6dd9fd..536274e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -183,11 +183,7 @@
 
 #define instruction_pointer(regs)	((unsigned long)(regs)->pc)
 
-#ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index db02be8..d9c3d6a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -20,10 +20,6 @@
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
 
-#ifndef CONFIG_SMP
-# error "<asm/smp.h> included in non-SMP build"
-#endif
-
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 struct seq_file;
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 7abf757..af58dcd 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -56,6 +56,4 @@
 	return -EINVAL;
 }
 
-void __init do_post_cpus_up_work(void);
-
 #endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cee1287..c85e96d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/lse.h>
 #include <asm/spinlock_types.h>
 #include <asm/processor.h>
 
@@ -38,11 +39,21 @@
 
 	asm volatile(
 	/* Atomically increment the next ticket. */
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 "	prfm	pstl1strm, %3\n"
 "1:	ldaxr	%w0, %3\n"
 "	add	%w1, %w0, %w5\n"
 "	stxr	%w2, %w1, %3\n"
-"	cbnz	%w2, 1b\n"
+"	cbnz	%w2, 1b\n",
+	/* LSE atomics */
+"	mov	%w2, %w5\n"
+"	ldadda	%w2, %w0, %3\n"
+"	nop\n"
+"	nop\n"
+"	nop\n"
+	)
+
 	/* Did we get the lock? */
 "	eor	%w1, %w0, %w0, ror #16\n"
 "	cbz	%w1, 3f\n"
@@ -67,15 +78,25 @@
 	unsigned int tmp;
 	arch_spinlock_t lockval;
 
-	asm volatile(
-"	prfm	pstl1strm, %2\n"
-"1:	ldaxr	%w0, %2\n"
-"	eor	%w1, %w0, %w0, ror #16\n"
-"	cbnz	%w1, 2f\n"
-"	add	%w0, %w0, %3\n"
-"	stxr	%w1, %w0, %2\n"
-"	cbnz	%w1, 1b\n"
-"2:"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldaxr	%w0, %2\n"
+	"	eor	%w1, %w0, %w0, ror #16\n"
+	"	cbnz	%w1, 2f\n"
+	"	add	%w0, %w0, %3\n"
+	"	stxr	%w1, %w0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	ldr	%w0, %2\n"
+	"	eor	%w1, %w0, %w0, ror #16\n"
+	"	cbnz	%w1, 1f\n"
+	"	add	%w1, %w0, %3\n"
+	"	casa	%w0, %w1, %2\n"
+	"	and	%w1, %w1, #0xffff\n"
+	"	eor	%w1, %w1, %w0, lsr #16\n"
+	"1:")
 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
 	: "I" (1 << TICKET_SHIFT)
 	: "memory");
@@ -85,10 +106,19 @@
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-	asm volatile(
-"	stlrh	%w1, %0\n"
-	: "=Q" (lock->owner)
-	: "r" (lock->owner + 1)
+	unsigned long tmp;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	ldrh	%w1, %0\n"
+	"	add	%w1, %w1, #1\n"
+	"	stlrh	%w1, %0",
+	/* LSE atomics */
+	"	mov	%w1, #1\n"
+	"	nop\n"
+	"	staddlh	%w1, %0")
+	: "=Q" (lock->owner), "=&r" (tmp)
+	:
 	: "memory");
 }
 
@@ -123,13 +153,24 @@
 {
 	unsigned int tmp;
 
-	asm volatile(
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"	sevl\n"
 	"1:	wfe\n"
 	"2:	ldaxr	%w0, %1\n"
 	"	cbnz	%w0, 1b\n"
 	"	stxr	%w0, %w2, %1\n"
 	"	cbnz	%w0, 2b\n"
+	"	nop",
+	/* LSE atomics */
+	"1:	mov	%w0, wzr\n"
+	"2:	casa	%w0, %w2, %1\n"
+	"	cbz	%w0, 3f\n"
+	"	ldxr	%w0, %1\n"
+	"	cbz	%w0, 2b\n"
+	"	wfe\n"
+	"	b	1b\n"
+	"3:")
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
 	: "memory");
@@ -139,11 +180,18 @@
 {
 	unsigned int tmp;
 
-	asm volatile(
-	"	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 1f\n"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"1:	ldaxr	%w0, %1\n"
+	"	cbnz	%w0, 2f\n"
 	"	stxr	%w0, %w2, %1\n"
-	"1:\n"
+	"	cbnz	%w0, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	mov	%w0, wzr\n"
+	"	casa	%w0, %w2, %1\n"
+	"	nop\n"
+	"	nop")
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
 	: "memory");
@@ -153,9 +201,10 @@
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-	asm volatile(
-	"	stlr	%w1, %0\n"
-	: "=Q" (rw->lock) : "r" (0) : "memory");
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	"	stlr	wzr, %0",
+	"	swpl	wzr, wzr, %0")
+	: "=Q" (rw->lock) :: "memory");
 }
 
 /* write_can_lock - would write_trylock() succeed? */
@@ -172,6 +221,10 @@
  *
  * The memory barriers are implicit with the load-acquire and store-release
  * instructions.
+ *
+ * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
+ * and LSE implementations may exhibit different behaviour (although this
+ * will have no effect on lockdep).
  */
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
@@ -179,26 +232,43 @@
 
 	asm volatile(
 	"	sevl\n"
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"1:	wfe\n"
 	"2:	ldaxr	%w0, %2\n"
 	"	add	%w0, %w0, #1\n"
 	"	tbnz	%w0, #31, 1b\n"
 	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 2b\n"
+	"	nop\n"
+	"	cbnz	%w1, 2b",
+	/* LSE atomics */
+	"1:	wfe\n"
+	"2:	ldxr	%w0, %2\n"
+	"	adds	%w1, %w0, #1\n"
+	"	tbnz	%w1, #31, 1b\n"
+	"	casa	%w0, %w1, %2\n"
+	"	sbc	%w0, %w1, %w0\n"
+	"	cbnz	%w0, 2b")
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "memory");
+	: "cc", "memory");
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 	unsigned int tmp, tmp2;
 
-	asm volatile(
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"1:	ldxr	%w0, %2\n"
 	"	sub	%w0, %w0, #1\n"
 	"	stlxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b\n"
+	"	cbnz	%w1, 1b",
+	/* LSE atomics */
+	"	movn	%w0, #0\n"
+	"	nop\n"
+	"	nop\n"
+	"	staddl	%w0, %2")
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
 	: "memory");
@@ -206,17 +276,28 @@
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-	unsigned int tmp, tmp2 = 1;
+	unsigned int tmp, tmp2;
 
-	asm volatile(
-	"	ldaxr	%w0, %2\n"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	mov	%w1, #1\n"
+	"1:	ldaxr	%w0, %2\n"
 	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 1f\n"
+	"	tbnz	%w0, #31, 2f\n"
 	"	stxr	%w1, %w0, %2\n"
-	"1:\n"
-	: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+	"	cbnz	%w1, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	ldr	%w0, %2\n"
+	"	adds	%w1, %w0, #1\n"
+	"	tbnz	%w1, #31, 1f\n"
+	"	casa	%w0, %w1, %2\n"
+	"	sbc	%w1, %w1, %w0\n"
+	"	nop\n"
+	"1:")
+	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "memory");
+	: "cc", "memory");
 
 	return !tmp2;
 }
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index b8d3836..55be59a 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,6 +20,8 @@
 # error "please don't include this file directly"
 #endif
 
+#include <linux/types.h>
+
 #define TICKET_SHIFT	16
 
 typedef struct {
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5c89df0..a7f3d4b 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,8 +20,29 @@
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
+#include <asm/opcodes.h>
+
+#define SCTLR_EL1_CP15BEN	(0x1 << 5)
+#define SCTLR_EL1_SED		(0x1 << 8)
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ *  C5.2, version:ARM DDI 0487A.f)
+ *	[20-19] : Op0
+ *	[18-16] : Op1
+ *	[15-12] : CRn
+ *	[11-8]  : CRm
+ *	[7-5]   : Op2
+ */
 #define sys_reg(op0, op1, crn, crm, op2) \
-	((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+	((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#define REG_PSTATE_PAN_IMM                     sys_reg(0, 0, 4, 0, 4)
+#define SCTLR_EL1_SPAN                         (1 << 23)
+
+#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+				     (!!x)<<8 | 0x1f)
 
 #ifdef __ASSEMBLY__
 
@@ -31,11 +52,11 @@
 	.equ	__reg_num_xzr, 31
 
 	.macro	mrs_s, rt, sreg
-	.inst	0xd5300000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5200000|(\sreg)|(__reg_num_\rt)
 	.endm
 
 	.macro	msr_s, sreg, rt
-	.inst	0xd5100000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5000000|(\sreg)|(__reg_num_\rt)
 	.endm
 
 #else
@@ -47,14 +68,23 @@
 "	.equ	__reg_num_xzr, 31\n"
 "\n"
 "	.macro	mrs_s, rt, sreg\n"
-"	.inst	0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
 "	.endm\n"
 "\n"
 "	.macro	msr_s, sreg, rt\n"
-"	.inst	0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
 "	.endm\n"
 );
 
+static inline void config_sctlr_el1(u32 clear, u32 set)
+{
+	u32 val;
+
+	asm volatile("mrs %0, sctlr_el1" : "=r" (val));
+	val &= ~clear;
+	val |= set;
+	asm volatile("msr sctlr_el1, %0" : : "r" (val));
+}
 #endif
 
 #endif	/* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 3a0242c..d6e6b66 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -41,7 +41,12 @@
 		flush_tlb_mm(tlb->mm);
 	} else {
 		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
-		flush_tlb_range(&vma, tlb->start, tlb->end);
+		/*
+		 * The intermediate page table levels are already handled by
+		 * the __(pte|pmd|pud)_free_tlb() functions, so last level
+		 * TLBI is sufficient here.
+		 */
+		__flush_tlb_range(&vma, tlb->start, tlb->end, true);
 	}
 }
 
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 934815d..7bd2da0 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -87,58 +87,64 @@
 		((unsigned long)ASID(vma->vm_mm) << 48);
 
 	dsb(ishst);
-	asm("tlbi	vae1is, %0" : : "r" (addr));
+	asm("tlbi	vale1is, %0" : : "r" (addr));
 	dsb(ish);
 }
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end)
-{
-	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
-	unsigned long addr;
-	start = asid | (start >> 12);
-	end = asid | (end >> 12);
-
-	dsb(ishst);
-	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vae1is, %0" : : "r"(addr));
-	dsb(ish);
-}
-
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
-	unsigned long addr;
-	start >>= 12;
-	end >>= 12;
-
-	dsb(ishst);
-	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vaae1is, %0" : : "r"(addr));
-	dsb(ish);
-	isb();
-}
-
 /*
  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
  * necessarily a performance improvement.
  */
 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
 
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end,
+				     bool last_level)
+{
+	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
+		flush_tlb_mm(vma->vm_mm);
+		return;
+	}
+
+	start = asid | (start >> 12);
+	end = asid | (end >> 12);
+
+	dsb(ishst);
+	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+		if (last_level)
+			asm("tlbi vale1is, %0" : : "r"(addr));
+		else
+			asm("tlbi vae1is, %0" : : "r"(addr));
+	}
+	dsb(ish);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_range(vma, start, end);
-	else
-		flush_tlb_mm(vma->vm_mm);
+	__flush_tlb_range(vma, start, end, false);
 }
 
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_kernel_range(start, end);
-	else
+	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
 		flush_tlb_all();
+		return;
+	}
+
+	start >>= 12;
+	end >>= 12;
+
+	dsb(ishst);
+	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+		asm("tlbi vaae1is, %0" : : "r"(addr));
+	dsb(ish);
+	isb();
 }
 
 /*
@@ -154,20 +160,6 @@
 	asm("tlbi	vae1is, %0" : : "r" (addr));
 	dsb(ish);
 }
-/*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
- */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
-				    unsigned long addr, pte_t *ptep)
-{
-	/*
-	 * set_pte() does not have a DSB for user mappings, so make sure that
-	 * the page table write is visible.
-	 */
-	dsb(ishst);
-}
-
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
 #endif
 
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 225ec35..a3e9d6f 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_TOPOLOGY_H
 #define __ASM_TOPOLOGY_H
 
-#ifdef CONFIG_SMP
-
 #include <linux/cpumask.h>
 
 struct cpu_topology {
@@ -24,13 +22,6 @@
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
-#else
-
-static inline void init_cpu_topology(void) { }
-static inline void store_cpu_topology(unsigned int cpuid) { }
-
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 232e4ba..0cc2f29 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,13 +34,32 @@
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+	extern char __irqentry_text_start[];
+	extern char __irqentry_text_end[];
+
+	return ptr >= (unsigned long)&__irqentry_text_start &&
+	       ptr < (unsigned long)&__irqentry_text_end;
+}
+#else
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+	return 0;
+}
+#endif
+
 static inline int in_exception_text(unsigned long ptr)
 {
 	extern char __exception_text_start[];
 	extern char __exception_text_end[];
+	int in;
 
-	return ptr >= (unsigned long)&__exception_text_start &&
-	       ptr < (unsigned long)&__exception_text_end;
+	in = ptr >= (unsigned long)&__exception_text_start &&
+	     ptr < (unsigned long)&__exception_text_end;
+
+	return in ? : __in_irqentry_text(ptr);
 }
 
 #endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 07e1ba44..b2ede967 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,7 +24,10 @@
 #include <linux/string.h>
 #include <linux/thread_info.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
@@ -131,6 +134,8 @@
 do {									\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
@@ -148,6 +153,8 @@
 		BUILD_BUG();						\
 	}								\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __get_user(x, ptr)						\
@@ -194,6 +201,8 @@
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
@@ -210,6 +219,8 @@
 	default:							\
 		BUILD_BUG();						\
 	}								\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __put_user(x, ptr)						\
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 73cf0f5..361c8a8 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -27,5 +27,6 @@
 #define HWCAP_SHA1		(1 << 5)
 #define HWCAP_SHA2		(1 << 6)
 #define HWCAP_CRC32		(1 << 7)
+#define HWCAP_ATOMICS		(1 << 8)
 
 #endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 6913643..208db3d 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -44,6 +44,7 @@
 #define PSR_I_BIT	0x00000080
 #define PSR_A_BIT	0x00000100
 #define PSR_D_BIT	0x00000200
+#define PSR_PAN_BIT	0x00400000
 #define PSR_Q_BIT	0x08000000
 #define PSR_V_BIT	0x10000000
 #define PSR_C_BIT	0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 426d076..22dc9bc 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,15 +17,15 @@
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
 			   hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o	\
 			   return_address.o cpuinfo.o cpu_errata.o		\
-			   cpufeature.o alternative.o cacheinfo.o
+			   cpufeature.o alternative.o cacheinfo.o		\
+			   smp.o smp_spin_table.o topology.o
 
 arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o entry32.o		\
 					   ../../arm/kernel/opcodes.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o entry-ftrace.o
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP)			+= smp.o smp_spin_table.o topology.o
-arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o
+arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
 arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 221b983..ab9db0e 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -85,7 +85,7 @@
 	return insn;
 }
 
-static int __apply_alternatives(void *alt_region)
+static void __apply_alternatives(void *alt_region)
 {
 	struct alt_instr *alt;
 	struct alt_region *region = alt_region;
@@ -114,19 +114,39 @@
 		flush_icache_range((uintptr_t)origptr,
 				   (uintptr_t)(origptr + nr_inst));
 	}
-
-	return 0;
 }
 
-void apply_alternatives_all(void)
+/*
+ * We might be patching the stop_machine state machine, so implement a
+ * really simple polling protocol here.
+ */
+static int __apply_alternatives_multi_stop(void *unused)
 {
+	static int patched = 0;
 	struct alt_region region = {
 		.begin	= __alt_instructions,
 		.end	= __alt_instructions_end,
 	};
 
+	/* We always have a CPU 0 at this point (__init) */
+	if (smp_processor_id()) {
+		while (!READ_ONCE(patched))
+			cpu_relax();
+		isb();
+	} else {
+		BUG_ON(patched);
+		__apply_alternatives(&region);
+		/* Barriers provided by the cache flushing */
+		WRITE_ONCE(patched, 1);
+	}
+
+	return 0;
+}
+
+void __init apply_alternatives_all(void)
+{
 	/* better not try code patching on a live SMP system */
-	stop_machine(__apply_alternatives, &region, NULL);
+	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
 void apply_alternatives(void *start, size_t length)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 7922c2e..bcee7ab 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,8 +14,11 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/insn.h>
 #include <asm/opcodes.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
@@ -279,6 +282,8 @@
  */
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 	__asm__ __volatile__(					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+		    CONFIG_ARM64_PAN)				\
 	"	mov		%w2, %w1\n"			\
 	"0:	ldxr"B"		%w1, [%3]\n"			\
 	"1:	stxr"B"		%w0, %w2, [%3]\n"		\
@@ -294,7 +299,9 @@
 	"	.align		3\n"				\
 	"	.quad		0b, 3b\n"			\
 	"	.quad		1b, 3b\n"			\
-	"	.popsection"					\
+	"	.popsection\n"					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+		CONFIG_ARM64_PAN)				\
 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
 	: "memory")
@@ -504,16 +511,6 @@
 	return 0;
 }
 
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
-	u32 val;
-
-	asm volatile("mrs %0, sctlr_el1" : "=r" (val));
-	val &= ~clear;
-	val |= set;
-	asm volatile("msr sctlr_el1, %0" : : "r" (val));
-}
-
 static int cp15_barrier_set_hw_mode(bool enable)
 {
 	if (enable)
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index 5ea337d..b6bd7d4 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -30,9 +30,7 @@
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
 static const struct cpu_operations *supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_SMP
 	&smp_spin_table_ops,
-#endif
 	&cpu_psci_ops,
 	NULL,
 };
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5ad86ce..3c9aed3 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -21,24 +21,57 @@
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
+#include <asm/processor.h>
 
 static bool
-has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
+feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 {
-	u64 val;
+	int val = cpuid_feature_extract_field(reg, entry->field_pos);
 
-	val = read_cpuid(id_aa64pfr0_el1);
-	return (val & entry->register_mask) == entry->register_value;
+	return val >= entry->min_field_value;
 }
 
+#define __ID_FEAT_CHK(reg)						\
+static bool __maybe_unused						\
+has_##reg##_feature(const struct arm64_cpu_capabilities *entry)		\
+{									\
+	u64 val;							\
+									\
+	val = read_cpuid(reg##_el1);					\
+	return feature_matches(val, entry);				\
+}
+
+__ID_FEAT_CHK(id_aa64pfr0);
+__ID_FEAT_CHK(id_aa64mmfr1);
+__ID_FEAT_CHK(id_aa64isar0);
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
 		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
 		.matches = has_id_aa64pfr0_feature,
-		.register_mask = (0xf << 24),
-		.register_value = (1 << 24),
+		.field_pos = 24,
+		.min_field_value = 1,
 	},
+#ifdef CONFIG_ARM64_PAN
+	{
+		.desc = "Privileged Access Never",
+		.capability = ARM64_HAS_PAN,
+		.matches = has_id_aa64mmfr1_feature,
+		.field_pos = 20,
+		.min_field_value = 1,
+		.enable = cpu_enable_pan,
+	},
+#endif /* CONFIG_ARM64_PAN */
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+	{
+		.desc = "LSE atomic instructions",
+		.capability = ARM64_HAS_LSE_ATOMICS,
+		.matches = has_id_aa64isar0_feature,
+		.field_pos = 20,
+		.min_field_value = 2,
+	},
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
 	{},
 };
 
@@ -55,9 +88,15 @@
 			pr_info("%s %s\n", info, caps[i].desc);
 		cpus_set_cap(caps[i].capability);
 	}
+
+	/* second pass allows enable() to consider interacting capabilities */
+	for (i = 0; caps[i].desc; i++) {
+		if (cpus_have_cap(caps[i].capability) && caps[i].enable)
+			caps[i].enable();
+	}
 }
 
 void check_local_cpu_features(void)
 {
-	check_cpu_capabilities(arm64_features, "detected feature");
+	check_cpu_capabilities(arm64_features, "detected feature:");
 }
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index b056369..9b3b62a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -82,7 +82,7 @@
 static DEFINE_PER_CPU(int, mde_ref_count);
 static DEFINE_PER_CPU(int, kde_ref_count);
 
-void enable_debug_monitors(enum debug_el el)
+void enable_debug_monitors(enum dbg_active_el el)
 {
 	u32 mdscr, enable = 0;
 
@@ -102,7 +102,7 @@
 	}
 }
 
-void disable_debug_monitors(enum debug_el el)
+void disable_debug_monitors(enum dbg_active_el el)
 {
 	u32 mdscr, disable = 0;
 
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index f537406..816120e 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,7 +13,7 @@
 #include <asm/efi.h>
 #include <asm/sections.h>
 
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
+efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
 					unsigned long *image_addr,
 					unsigned long *image_size,
 					unsigned long *reserve_addr,
@@ -23,21 +23,44 @@
 {
 	efi_status_t status;
 	unsigned long kernel_size, kernel_memsize = 0;
+	unsigned long nr_pages;
+	void *old_image_addr = (void *)*image_addr;
 
 	/* Relocate the image, if required. */
 	kernel_size = _edata - _text;
 	if (*image_addr != (dram_base + TEXT_OFFSET)) {
 		kernel_memsize = kernel_size + (_end - _edata);
-		status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
-				       SZ_2M, reserve_addr);
+
+		/*
+		 * First, try a straight allocation at the preferred offset.
+		 * This will work around the issue where, if dram_base == 0x0,
+		 * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
+		 * address of the allocation to be mistaken for a FAIL return
+		 * value or a NULL pointer). It will also ensure that, on
+		 * platforms where the [dram_base, dram_base + TEXT_OFFSET)
+		 * interval is partially occupied by the firmware (like on APM
+		 * Mustang), we can still place the kernel at the address
+		 * 'dram_base + TEXT_OFFSET'.
+		 */
+		*image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+		nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
+			   EFI_PAGE_SIZE;
+		status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+					EFI_LOADER_DATA, nr_pages,
+					(efi_physical_addr_t *)reserve_addr);
 		if (status != EFI_SUCCESS) {
-			pr_efi_err(sys_table, "Failed to relocate kernel\n");
-			return status;
+			kernel_memsize += TEXT_OFFSET;
+			status = efi_low_alloc(sys_table_arg, kernel_memsize,
+					       SZ_2M, reserve_addr);
+
+			if (status != EFI_SUCCESS) {
+				pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+				return status;
+			}
+			*image_addr = *reserve_addr + TEXT_OFFSET;
 		}
-		memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
-		       kernel_size);
-		*image_addr = *reserve_addr + TEXT_OFFSET;
-		*reserve_size = kernel_memsize + TEXT_OFFSET;
+		memcpy((void *)*image_addr, old_image_addr, kernel_size);
+		*reserve_size = kernel_memsize;
 	}
 
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e163518..4306c93 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -116,41 +116,34 @@
 	*/
 	.endm
 
-	.macro	kernel_exit, el, ret = 0
+	.macro	kernel_exit, el
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
 	ct_user_enter
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
-
 #ifdef CONFIG_ARM64_ERRATUM_845719
-
-#undef SEQUENCE_ORG
-#undef SEQUENCE_ALT
-
+alternative_if_not ARM64_WORKAROUND_845719
+	nop
+	nop
 #ifdef CONFIG_PID_IN_CONTEXTIDR
-
-#define SEQUENCE_ORG	"nop ; nop ; nop"
-#define SEQUENCE_ALT	"tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
-
-#else
-
-#define SEQUENCE_ORG	"nop ; nop"
-#define SEQUENCE_ALT	"tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
-
+	nop
 #endif
-
-	alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
-
+alternative_else
+	tbz	x22, #4, 1f
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+	mrs	x29, contextidr_el1
+	msr	contextidr_el1, x29
+#else
+	msr contextidr_el1, xzr
+#endif
+1:
+alternative_endif
 #endif
 	.endif
 	msr	elr_el1, x21			// set up the return data
 	msr	spsr_el1, x22
-	.if	\ret
-	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
-	.else
 	ldp	x0, x1, [sp, #16 * 0]
-	.endif
 	ldp	x2, x3, [sp, #16 * 1]
 	ldp	x4, x5, [sp, #16 * 2]
 	ldp	x6, x7, [sp, #16 * 3]
@@ -613,22 +606,21 @@
  */
 ret_fast_syscall:
 	disable_irq				// disable interrupts
+	str	x0, [sp, #S_X0]			// returned x0
 	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
 	and	x2, x1, #_TIF_SYSCALL_WORK
 	cbnz	x2, ret_fast_syscall_trace
 	and	x2, x1, #_TIF_WORK_MASK
-	cbnz	x2, fast_work_pending
+	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
-	kernel_exit 0, ret = 1
+	kernel_exit 0
 ret_fast_syscall_trace:
 	enable_irq				// enable interrupts
-	b	__sys_trace_return
+	b	__sys_trace_return_skipped	// we already saved x0
 
 /*
  * Ok, we need to do extra processing, enter the slow path.
  */
-fast_work_pending:
-	str	x0, [sp, #S_X0]			// returned x0
 work_pending:
 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -652,7 +644,7 @@
 	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
 no_work_pending:
-	kernel_exit 0, ret = 0
+	kernel_exit 0
 ENDPROC(ret_to_user)
 
 /*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 44d6f75..c56956a1 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -158,6 +158,7 @@
 void fpsimd_flush_thread(void)
 {
 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+	fpsimd_flush_task_state(current);
 	set_thread_flag(TIF_FOREIGN_FPSTATE);
 }
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c0ff3ce..a055be6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -62,13 +62,8 @@
 /*
  * Initial memory map attributes.
  */
-#ifndef CONFIG_SMP
-#define PTE_FLAGS	PTE_TYPE_PAGE | PTE_AF
-#define PMD_FLAGS	PMD_TYPE_SECT | PMD_SECT_AF
-#else
 #define PTE_FLAGS	PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
 #define PMD_FLAGS	PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
-#endif
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define MM_MMUFLAGS	PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
@@ -574,7 +569,6 @@
 	.long	BOOT_CPU_MODE_EL1
 	.popsection
 
-#ifdef CONFIG_SMP
 	/*
 	 * This provides a "holding pen" for platforms to hold all secondary
 	 * cores are held until we're ready for them to initialise.
@@ -622,7 +616,6 @@
 	mov	x29, #0
 	b	secondary_start_kernel
 ENDPROC(__secondary_switched)
-#endif	/* CONFIG_SMP */
 
 /*
  * Enable the MMU.
@@ -641,5 +634,13 @@
 	isb
 	msr	sctlr_el1, x0
 	isb
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
 	br	x27
 ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 7a1a5da..003bc3d 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -156,7 +156,7 @@
  * Convert a breakpoint privilege level to the corresponding exception
  * level.
  */
-static enum debug_el debug_exception_level(int privilege)
+static enum dbg_active_el debug_exception_level(int privilege)
 {
 	switch (privilege) {
 	case AARCH64_BREAKPOINT_EL0:
@@ -230,7 +230,7 @@
 	struct perf_event **slots;
 	struct debug_info *debug_info = &current->thread.debug;
 	int i, max_slots, ctrl_reg, val_reg, reg_enable;
-	enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
+	enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
 	u32 ctrl;
 
 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -537,7 +537,7 @@
  * exception level at the register level.
  * This is used when single-stepping after a breakpoint exception.
  */
-static void toggle_bp_registers(int reg, enum debug_el el, int enable)
+static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
 {
 	int i, max_slots, privilege;
 	u32 ctrl;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index dd9671c..f341866 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -101,9 +101,8 @@
 		return addr;
 
 	BUG_ON(!page);
-	set_fixmap(fixmap, page_to_phys(page));
-
-	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+			(uintaddr & ~PAGE_MASK));
 }
 
 static void __kprobes patch_unmap(int fixmap)
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 463fa2e..11dc3fd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -33,9 +33,7 @@
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
-#ifdef CONFIG_SMP
 	show_ipi_list(p, prec);
-#endif
 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
 	return 0;
 }
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 4f1fec7..c2dd1ad 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -28,7 +28,7 @@
 	void *addr = (void *)entry->code;
 	u32 insn;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		insn = aarch64_insn_gen_branch_imm(entry->code,
 						   entry->target,
 						   AARCH64_INSN_BRANCH_NOLINK);
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index a0d10c5..bcac81e 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -235,13 +235,13 @@
 
 static struct break_hook kgdb_brkpt_hook = {
 	.esr_mask	= 0xffffffff,
-	.esr_val	= DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
+	.esr_val	= (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
 	.fn		= kgdb_brk_fn
 };
 
 static struct break_hook kgdb_compiled_brkpt_hook = {
 	.esr_mask	= 0xffffffff,
-	.esr_val	= DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
+	.esr_val	= (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
 	.fn		= kgdb_compiled_brk_fn
 };
 
@@ -328,9 +328,9 @@
  */
 struct kgdb_arch arch_kgdb_ops = {
 	.gdb_bpt_instr = {
-		KGDB_DYN_BRK_INS_BYTE0,
-		KGDB_DYN_BRK_INS_BYTE1,
-		KGDB_DYN_BRK_INS_BYTE2,
-		KGDB_DYN_BRK_INS_BYTE3,
+		KGDB_DYN_BRK_INS_BYTE(0),
+		KGDB_DYN_BRK_INS_BYTE(1),
+		KGDB_DYN_BRK_INS_BYTE(2),
+		KGDB_DYN_BRK_INS_BYTE(3),
 	}
 };
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4095379..b3d098b 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -38,6 +38,19 @@
 	return res->start;
 }
 
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ * @mask: bitmask of BARs to enable
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	if (pci_has_flag(PCI_PROBE_ONLY))
+		return 0;
+
+	return pci_enable_resources(dev, mask);
+}
+
 /*
  * Try to assign the IRQ number from DT when adding a new device
  */
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
new file mode 100644
index 0000000..3aa7483
--- /dev/null
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -0,0 +1,196 @@
+/*
+ * arm64 callchain support
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+struct frame_tail {
+	struct frame_tail	__user *fp;
+	unsigned long		lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+	       struct perf_callchain_entry *entry)
+{
+	struct frame_tail buftail;
+	unsigned long err;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err)
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail >= buftail.fp)
+		return NULL;
+
+	return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
+	u32		sp;
+	u32		lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+		      struct perf_callchain_entry *entry)
+{
+	struct compat_frame_tail buftail;
+	unsigned long err;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err)
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail + 1 >= (struct compat_frame_tail __user *)
+			compat_ptr(buftail.fp))
+		return NULL;
+
+	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+void perf_callchain_user(struct perf_callchain_entry *entry,
+			 struct pt_regs *regs)
+{
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	perf_callchain_store(entry, regs->pc);
+
+	if (!compat_user_mode(regs)) {
+		/* AARCH64 mode */
+		struct frame_tail __user *tail;
+
+		tail = (struct frame_tail __user *)regs->regs[29];
+
+		while (entry->nr < PERF_MAX_STACK_DEPTH &&
+		       tail && !((unsigned long)tail & 0xf))
+			tail = user_backtrace(tail, entry);
+	} else {
+#ifdef CONFIG_COMPAT
+		/* AARCH32 compat mode */
+		struct compat_frame_tail __user *tail;
+
+		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+			tail && !((unsigned long)tail & 0x3))
+			tail = compat_user_backtrace(tail, entry);
+#endif
+	}
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+	struct perf_callchain_entry *entry = data;
+	perf_callchain_store(entry, frame->pc);
+	return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+			   struct pt_regs *regs)
+{
+	struct stackframe frame;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	frame.fp = regs->regs[29];
+	frame.sp = regs->sp;
+	frame.pc = regs->pc;
+
+	walk_stackframe(&frame, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+		return perf_guest_cbs->get_guest_ip();
+
+	return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+	int misc = 0;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		if (perf_guest_cbs->is_user_mode())
+			misc |= PERF_RECORD_MISC_GUEST_USER;
+		else
+			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+	} else {
+		if (user_mode(regs))
+			misc |= PERF_RECORD_MISC_USER;
+		else
+			misc |= PERF_RECORD_MISC_KERNEL;
+	}
+
+	return misc;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index b31e9a4..f9a74d4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,7 +25,7 @@
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/perf_event.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -36,7 +36,6 @@
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
 #include <asm/pmu.h>
-#include <asm/stacktrace.h>
 
 /*
  * ARMv8 supports a maximum of 32 events.
@@ -78,6 +77,16 @@
 
 #define CACHE_OP_UNSUPPORTED		0xFFFF
 
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
+	},								\
+}
+
 static int
 armpmu_map_cache_event(const unsigned (*cache_map)
 				      [PERF_COUNT_HW_CACHE_MAX]
@@ -435,10 +444,8 @@
 	unsigned int i, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
 
-	if (!pmu_device) {
-		pr_err("no PMU device registered\n");
+	if (!pmu_device)
 		return -ENODEV;
-	}
 
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 	if (!irqs) {
@@ -703,118 +710,28 @@
 
 /* PMUv3 HW events mapping. */
 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= HW_OP_UNSUPPORTED,
 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 						[PERF_COUNT_HW_CACHE_OP_MAX]
 						[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-	[C(L1D)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(L1I)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(LL)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(DTLB)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(ITLB)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(BPU)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(NODE)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
 };
 
 /*
@@ -1337,7 +1254,7 @@
 		}
 
 		for_each_possible_cpu(cpu)
-			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+			if (dn == of_cpu_device_node_get(cpu))
 				break;
 
 		if (cpu >= nr_cpu_ids) {
@@ -1415,180 +1332,3 @@
 }
 early_initcall(init_hw_perf_events);
 
-/*
- * Callchain handling code.
- */
-struct frame_tail {
-	struct frame_tail	__user *fp;
-	unsigned long		lr;
-} __attribute__((packed));
-
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static struct frame_tail __user *
-user_backtrace(struct frame_tail __user *tail,
-	       struct perf_callchain_entry *entry)
-{
-	struct frame_tail buftail;
-	unsigned long err;
-
-	/* Also check accessibility of one struct frame_tail beyond */
-	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
-		return NULL;
-
-	pagefault_disable();
-	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
-	pagefault_enable();
-
-	if (err)
-		return NULL;
-
-	perf_callchain_store(entry, buftail.lr);
-
-	/*
-	 * Frame pointers should strictly progress back up the stack
-	 * (towards higher addresses).
-	 */
-	if (tail >= buftail.fp)
-		return NULL;
-
-	return buftail.fp;
-}
-
-#ifdef CONFIG_COMPAT
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct compat_frame_tail *)(xxx->fp)-1
- *
- * This code has been adapted from the ARM OProfile support.
- */
-struct compat_frame_tail {
-	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
-	u32		sp;
-	u32		lr;
-} __attribute__((packed));
-
-static struct compat_frame_tail __user *
-compat_user_backtrace(struct compat_frame_tail __user *tail,
-		      struct perf_callchain_entry *entry)
-{
-	struct compat_frame_tail buftail;
-	unsigned long err;
-
-	/* Also check accessibility of one struct frame_tail beyond */
-	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
-		return NULL;
-
-	pagefault_disable();
-	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
-	pagefault_enable();
-
-	if (err)
-		return NULL;
-
-	perf_callchain_store(entry, buftail.lr);
-
-	/*
-	 * Frame pointers should strictly progress back up the stack
-	 * (towards higher addresses).
-	 */
-	if (tail + 1 >= (struct compat_frame_tail __user *)
-			compat_ptr(buftail.fp))
-		return NULL;
-
-	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
-}
-#endif /* CONFIG_COMPAT */
-
-void perf_callchain_user(struct perf_callchain_entry *entry,
-			 struct pt_regs *regs)
-{
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		/* We don't support guest os callchain now */
-		return;
-	}
-
-	perf_callchain_store(entry, regs->pc);
-
-	if (!compat_user_mode(regs)) {
-		/* AARCH64 mode */
-		struct frame_tail __user *tail;
-
-		tail = (struct frame_tail __user *)regs->regs[29];
-
-		while (entry->nr < PERF_MAX_STACK_DEPTH &&
-		       tail && !((unsigned long)tail & 0xf))
-			tail = user_backtrace(tail, entry);
-	} else {
-#ifdef CONFIG_COMPAT
-		/* AARCH32 compat mode */
-		struct compat_frame_tail __user *tail;
-
-		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
-
-		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
-			tail && !((unsigned long)tail & 0x3))
-			tail = compat_user_backtrace(tail, entry);
-#endif
-	}
-}
-
-/*
- * Gets called by walk_stackframe() for every stackframe. This will be called
- * whist unwinding the stackframe and is like a subroutine return so we use
- * the PC.
- */
-static int callchain_trace(struct stackframe *frame, void *data)
-{
-	struct perf_callchain_entry *entry = data;
-	perf_callchain_store(entry, frame->pc);
-	return 0;
-}
-
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
-			   struct pt_regs *regs)
-{
-	struct stackframe frame;
-
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		/* We don't support guest os callchain now */
-		return;
-	}
-
-	frame.fp = regs->regs[29];
-	frame.sp = regs->sp;
-	frame.pc = regs->pc;
-
-	walk_stackframe(&frame, callchain_trace, entry);
-}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
-		return perf_guest_cbs->get_guest_ip();
-
-	return instruction_pointer(regs);
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
-	int misc = 0;
-
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		if (perf_guest_cbs->is_user_mode())
-			misc |= PERF_RECORD_MISC_GUEST_USER;
-		else
-			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
-	} else {
-		if (user_mode(regs))
-			misc |= PERF_RECORD_MISC_USER;
-		else
-			misc |= PERF_RECORD_MISC_KERNEL;
-	}
-
-	return misc;
-}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 869f202..aa94a88 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,23 +18,17 @@
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/smp.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
 #include <linux/delay.h>
+#include <linux/psci.h>
 #include <linux/slab.h>
+
 #include <uapi/linux/psci.h>
 
 #include <asm/compiler.h>
-#include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
-#include <asm/psci.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
-#include <asm/system_misc.h>
-
-#define PSCI_POWER_STATE_TYPE_STANDBY		0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
 
 static bool psci_power_state_loses_context(u32 state)
 {
@@ -50,122 +44,8 @@
 	return !(state & ~valid_mask);
 }
 
-/*
- * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
- * calls to its resident CPU, so we must avoid issuing those. We never migrate
- * a Trusted OS even if it claims to be capable of migration -- doing so will
- * require cooperation with a Trusted OS driver.
- */
-static int resident_cpu = -1;
-
-struct psci_operations {
-	int (*cpu_suspend)(u32 state, unsigned long entry_point);
-	int (*cpu_off)(u32 state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-static struct psci_operations psci_ops;
-
-typedef unsigned long (psci_fn)(unsigned long, unsigned long,
-				unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
-static psci_fn *invoke_psci_fn;
-
-enum psci_function {
-	PSCI_FN_CPU_SUSPEND,
-	PSCI_FN_CPU_ON,
-	PSCI_FN_CPU_OFF,
-	PSCI_FN_MIGRATE,
-	PSCI_FN_MAX,
-};
-
 static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
 
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
-	switch (errno) {
-	case PSCI_RET_SUCCESS:
-		return 0;
-	case PSCI_RET_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
-	case PSCI_RET_INVALID_PARAMS:
-		return -EINVAL;
-	case PSCI_RET_DENIED:
-		return -EPERM;
-	};
-
-	return -EINVAL;
-}
-
-static u32 psci_get_version(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-}
-
-static int psci_cpu_suspend(u32 state, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
-	err = invoke_psci_fn(fn, state, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(u32 state)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_OFF];
-	err = invoke_psci_fn(fn, state, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_ON];
-	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE];
-	err = invoke_psci_fn(fn, cpuid, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
-		unsigned long lowest_affinity_level)
-{
-	return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
-			      lowest_affinity_level, 0);
-}
-
-static int psci_migrate_info_type(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
-}
-
-static unsigned long psci_migrate_info_up_cpu(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
-}
-
 static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
 {
 	int i, ret, count = 0;
@@ -230,240 +110,6 @@
 	return ret;
 }
 
-static int get_set_conduit_method(struct device_node *np)
-{
-	const char *method;
-
-	pr_info("probing for conduit method from DT.\n");
-
-	if (of_property_read_string(np, "method", &method)) {
-		pr_warn("missing \"method\" property\n");
-		return -ENXIO;
-	}
-
-	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
-	} else {
-		pr_warn("invalid \"method\" property: %s\n", method);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
- * return DENIED (which would be fatal).
- */
-static void __init psci_init_migrate(void)
-{
-	unsigned long cpuid;
-	int type, cpu;
-
-	type = psci_ops.migrate_info_type();
-
-	if (type == PSCI_0_2_TOS_MP) {
-		pr_info("Trusted OS migration not required\n");
-		return;
-	}
-
-	if (type == PSCI_RET_NOT_SUPPORTED) {
-		pr_info("MIGRATE_INFO_TYPE not supported.\n");
-		return;
-	}
-
-	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
-	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
-		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
-		return;
-	}
-
-	cpuid = psci_migrate_info_up_cpu();
-	if (cpuid & ~MPIDR_HWID_BITMASK) {
-		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
-			cpuid);
-		return;
-	}
-
-	cpu = get_logical_index(cpuid);
-	resident_cpu = cpu >= 0 ? cpu : -1;
-
-	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
-}
-
-static void __init psci_0_2_set_functions(void)
-{
-	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
-	psci_ops.cpu_suspend = psci_cpu_suspend;
-
-	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
-	psci_ops.cpu_off = psci_cpu_off;
-
-	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
-	psci_ops.cpu_on = psci_cpu_on;
-
-	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
-	psci_ops.migrate = psci_migrate;
-
-	psci_ops.affinity_info = psci_affinity_info;
-
-	psci_ops.migrate_info_type = psci_migrate_info_type;
-
-	arm_pm_restart = psci_sys_reset;
-
-	pm_power_off = psci_sys_poweroff;
-}
-
-/*
- * Probe function for PSCI firmware versions >= 0.2
- */
-static int __init psci_probe(void)
-{
-	u32 ver = psci_get_version();
-
-	pr_info("PSCIv%d.%d detected in firmware.\n",
-			PSCI_VERSION_MAJOR(ver),
-			PSCI_VERSION_MINOR(ver));
-
-	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
-		pr_err("Conflicting PSCI version detected.\n");
-		return -EINVAL;
-	}
-
-	psci_0_2_set_functions();
-
-	psci_init_migrate();
-
-	return 0;
-}
-
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-/*
- * PSCI init function for PSCI versions >=0.2
- *
- * Probe based on PSCI PSCI_VERSION function
- */
-static int __init psci_0_2_init(struct device_node *np)
-{
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-	/*
-	 * Starting with v0.2, the PSCI specification introduced a call
-	 * (PSCI_VERSION) that allows probing the firmware version, so
-	 * that PSCI function IDs and version specific initialization
-	 * can be carried out according to the specific version reported
-	 * by firmware
-	 */
-	err = psci_probe();
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int __init psci_0_1_init(struct device_node *np)
-{
-	u32 id;
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
-	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
-		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
-		psci_ops.cpu_suspend = psci_cpu_suspend;
-	}
-
-	if (!of_property_read_u32(np, "cpu_off", &id)) {
-		psci_function_id[PSCI_FN_CPU_OFF] = id;
-		psci_ops.cpu_off = psci_cpu_off;
-	}
-
-	if (!of_property_read_u32(np, "cpu_on", &id)) {
-		psci_function_id[PSCI_FN_CPU_ON] = id;
-		psci_ops.cpu_on = psci_cpu_on;
-	}
-
-	if (!of_property_read_u32(np, "migrate", &id)) {
-		psci_function_id[PSCI_FN_MIGRATE] = id;
-		psci_ops.migrate = psci_migrate;
-	}
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-static const struct of_device_id psci_of_match[] __initconst = {
-	{ .compatible = "arm,psci",	.data = psci_0_1_init},
-	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
-	{},
-};
-
-int __init psci_dt_init(void)
-{
-	struct device_node *np;
-	const struct of_device_id *matched_np;
-	psci_initcall_t init_fn;
-
-	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-
-	if (!np)
-		return -ENODEV;
-
-	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
-}
-
-#ifdef CONFIG_ACPI
-/*
- * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
- * explicitly clarified in SBBR
- */
-int __init psci_acpi_init(void)
-{
-	if (!acpi_psci_present()) {
-		pr_info("is not implemented in ACPI.\n");
-		return -EOPNOTSUPP;
-	}
-
-	pr_info("probing for conduit method from ACPI.\n");
-
-	if (acpi_psci_use_hvc())
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	else
-		invoke_psci_fn = __invoke_psci_fn_smc;
-
-	return psci_probe();
-}
-#endif
-
-#ifdef CONFIG_SMP
-
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
 	return 0;
@@ -489,11 +135,6 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static bool psci_tos_resident_on(int cpu)
-{
-	return cpu == resident_cpu;
-}
-
 static int cpu_psci_cpu_disable(unsigned int cpu)
 {
 	/* Fail early if we don't have CPU_OFF support */
@@ -550,7 +191,6 @@
 	return -ETIMEDOUT;
 }
 #endif
-#endif
 
 static int psci_suspend_finisher(unsigned long index)
 {
@@ -585,7 +225,6 @@
 	.cpu_init_idle	= cpu_psci_cpu_init_idle,
 	.cpu_suspend	= cpu_psci_cpu_suspend,
 #endif
-#ifdef CONFIG_SMP
 	.cpu_init	= cpu_psci_cpu_init,
 	.cpu_prepare	= cpu_psci_cpu_prepare,
 	.cpu_boot	= cpu_psci_cpu_boot,
@@ -594,6 +233,5 @@
 	.cpu_die	= cpu_psci_cpu_die,
 	.cpu_kill	= cpu_psci_cpu_kill,
 #endif
-#endif
 };
 
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d882b83..1971f49 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -826,6 +826,30 @@
 	return ret;
 }
 
+static int compat_tls_get(struct task_struct *target,
+			  const struct user_regset *regset, unsigned int pos,
+			  unsigned int count, void *kbuf, void __user *ubuf)
+{
+	compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+}
+
+static int compat_tls_set(struct task_struct *target,
+			  const struct user_regset *regset, unsigned int pos,
+			  unsigned int count, const void *kbuf,
+			  const void __user *ubuf)
+{
+	int ret;
+	compat_ulong_t tls;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+	if (ret)
+		return ret;
+
+	target->thread.tp_value = tls;
+	return ret;
+}
+
 static const struct user_regset aarch32_regsets[] = {
 	[REGSET_COMPAT_GPR] = {
 		.core_note_type = NT_PRSTATUS,
@@ -850,6 +874,64 @@
 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
 };
 
+static const struct user_regset aarch32_ptrace_regsets[] = {
+	[REGSET_GPR] = {
+		.core_note_type = NT_PRSTATUS,
+		.n = COMPAT_ELF_NGREG,
+		.size = sizeof(compat_elf_greg_t),
+		.align = sizeof(compat_elf_greg_t),
+		.get = compat_gpr_get,
+		.set = compat_gpr_set
+	},
+	[REGSET_FPR] = {
+		.core_note_type = NT_ARM_VFP,
+		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+		.size = sizeof(compat_ulong_t),
+		.align = sizeof(compat_ulong_t),
+		.get = compat_vfp_get,
+		.set = compat_vfp_set
+	},
+	[REGSET_TLS] = {
+		.core_note_type = NT_ARM_TLS,
+		.n = 1,
+		.size = sizeof(compat_ulong_t),
+		.align = sizeof(compat_ulong_t),
+		.get = compat_tls_get,
+		.set = compat_tls_set,
+	},
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	[REGSET_HW_BREAK] = {
+		.core_note_type = NT_ARM_HW_BREAK,
+		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = hw_break_get,
+		.set = hw_break_set,
+	},
+	[REGSET_HW_WATCH] = {
+		.core_note_type = NT_ARM_HW_WATCH,
+		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = hw_break_get,
+		.set = hw_break_set,
+	},
+#endif
+	[REGSET_SYSTEM_CALL] = {
+		.core_note_type = NT_ARM_SYSTEM_CALL,
+		.n = 1,
+		.size = sizeof(int),
+		.align = sizeof(int),
+		.get = system_call_get,
+		.set = system_call_set,
+	},
+};
+
+static const struct user_regset_view user_aarch32_ptrace_view = {
+	.name = "aarch32", .e_machine = EM_ARM,
+	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
+};
+
 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
 				   compat_ulong_t __user *ret)
 {
@@ -1109,8 +1191,16 @@
 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 {
 #ifdef CONFIG_COMPAT
-	if (is_compat_thread(task_thread_info(task)))
+	/*
+	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
+	 * user_aarch32_view compatible with arm32. Native ptrace requests on
+	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
+	 * access to the TLS register.
+	 */
+	if (is_compat_task())
 		return &user_aarch32_view;
+	else if (is_compat_thread(task_thread_info(task)))
+		return &user_aarch32_ptrace_view;
 #endif
 	return &user_aarch64_view;
 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 926ae8d..8884788 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
 #include <linux/of_platform.h>
 #include <linux/efi.h>
 #include <linux/personality.h>
+#include <linux/psci.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -60,9 +61,7 @@
 #include <asm/tlbflush.h>
 #include <asm/traps.h>
 #include <asm/memblock.h>
-#include <asm/psci.h>
 #include <asm/efi.h>
-#include <asm/virt.h>
 #include <asm/xen/hypervisor.h>
 
 unsigned long elf_hwcap __read_mostly;
@@ -130,7 +129,6 @@
 }
 
 struct mpidr_hash mpidr_hash;
-#ifdef CONFIG_SMP
 /**
  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
  *			  level in order to build a linear index from an
@@ -196,35 +194,11 @@
 		pr_warn("Large number of MPIDR hash buckets detected\n");
 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
-#endif
-
-static void __init hyp_mode_check(void)
-{
-	if (is_hyp_mode_available())
-		pr_info("CPU: All CPU(s) started at EL2\n");
-	else if (is_hyp_mode_mismatched())
-		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
-			   "CPU: CPUs started in inconsistent modes");
-	else
-		pr_info("CPU: All CPU(s) started at EL1\n");
-}
-
-void __init do_post_cpus_up_work(void)
-{
-	hyp_mode_check();
-	apply_alternatives_all();
-}
-
-#ifdef CONFIG_UP_LATE_INIT
-void __init up_late_init(void)
-{
-	do_post_cpus_up_work();
-}
-#endif /* CONFIG_UP_LATE_INIT */
 
 static void __init setup_processor(void)
 {
-	u64 features, block;
+	u64 features;
+	s64 block;
 	u32 cwg;
 	int cls;
 
@@ -254,8 +228,8 @@
 	 * for non-negative values. Negative values are reserved.
 	 */
 	features = read_cpuid(ID_AA64ISAR0_EL1);
-	block = (features >> 4) & 0xf;
-	if (!(block & 0x8)) {
+	block = cpuid_feature_extract_field(features, 4);
+	if (block > 0) {
 		switch (block) {
 		default:
 		case 2:
@@ -267,26 +241,36 @@
 		}
 	}
 
-	block = (features >> 8) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 8) > 0)
 		elf_hwcap |= HWCAP_SHA1;
 
-	block = (features >> 12) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 12) > 0)
 		elf_hwcap |= HWCAP_SHA2;
 
-	block = (features >> 16) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 16) > 0)
 		elf_hwcap |= HWCAP_CRC32;
 
+	block = cpuid_feature_extract_field(features, 20);
+	if (block > 0) {
+		switch (block) {
+		default:
+		case 2:
+			elf_hwcap |= HWCAP_ATOMICS;
+		case 1:
+			/* RESERVED */
+		case 0:
+			break;
+		}
+	}
+
 #ifdef CONFIG_COMPAT
 	/*
 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
-	 * the Aarch32 32-bit execution state.
+	 * the AArch32 32-bit execution state.
 	 */
 	features = read_cpuid(ID_ISAR5_EL1);
-	block = (features >> 4) & 0xf;
-	if (!(block & 0x8)) {
+	block = cpuid_feature_extract_field(features, 4);
+	if (block > 0) {
 		switch (block) {
 		default:
 		case 2:
@@ -298,16 +282,13 @@
 		}
 	}
 
-	block = (features >> 8) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 8) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
 
-	block = (features >> 12) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 12) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
 
-	block = (features >> 16) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 16) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
 #endif
 }
@@ -404,10 +385,8 @@
 	xen_early_init();
 
 	cpu_read_bootcpu_ops();
-#ifdef CONFIG_SMP
 	smp_init_cpus();
 	smp_build_mpidr_hash();
-#endif
 
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
@@ -426,8 +405,13 @@
 
 static int __init arm64_device_init(void)
 {
-	of_iommu_init();
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	if (of_have_populated_dt()) {
+		of_iommu_init();
+		of_platform_populate(NULL, of_default_bus_match_table,
+				     NULL, NULL);
+	} else if (acpi_disabled) {
+		pr_crit("Device tree not populated\n");
+	}
 	return 0;
 }
 arch_initcall_sync(arm64_device_init);
@@ -455,6 +439,7 @@
 	"sha1",
 	"sha2",
 	"crc32",
+	"atomics",
 	NULL
 };
 
@@ -507,9 +492,7 @@
 		 * online processors, looking for lines beginning with
 		 * "processor".  Give glibc what it expects.
 		 */
-#ifdef CONFIG_SMP
 		seq_printf(m, "processor\t: %d\n", i);
-#endif
 
 		/*
 		 * Dump out the common processor features in a single line.
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 803cfea..f586f7c 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -82,7 +82,6 @@
 	str	x2, [x0, #CPU_CTX_SP]
 	ldr	x1, =sleep_save_sp
 	ldr	x1, [x1, #SLEEP_SAVE_SP_VIRT]
-#ifdef CONFIG_SMP
 	mrs	x7, mpidr_el1
 	ldr	x9, =mpidr_hash
 	ldr	x10, [x9, #MPIDR_HASH_MASK]
@@ -94,7 +93,6 @@
 	ldp	w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
 	add	x1, x1, x8, lsl #3
-#endif
 	bl	__cpu_suspend_save
 	/*
 	 * Grab suspend finisher in x20 and its argument in x19
@@ -135,6 +133,14 @@
 	ldr	x3, =cpu_resume_after_mmu
 	msr	sctlr_el1, x0		// restore sctlr_el1
 	isb
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
 	br	x3			// global jump to virtual address
 ENDPROC(cpu_resume_mmu)
 	.popsection
@@ -151,7 +157,6 @@
 
 ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
-#ifdef CONFIG_SMP
 	mrs	x1, mpidr_el1
 	adrp	x8, mpidr_hash
 	add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -161,9 +166,6 @@
 	ldp	w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
         /* x7 contains hash index, let's use it to grab context pointer */
-#else
-	mov	x7, xzr
-#endif
 	ldr_l	x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
 	ldr	x0, [x0, x7, lsl #3]
 	/* load sp from context */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 50fb469..dbdaacd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -52,6 +52,7 @@
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
+#include <asm/virt.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
@@ -310,10 +311,22 @@
 }
 #endif
 
+static void __init hyp_mode_check(void)
+{
+	if (is_hyp_mode_available())
+		pr_info("CPU: All CPU(s) started at EL2\n");
+	else if (is_hyp_mode_mismatched())
+		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+			   "CPU: CPUs started in inconsistent modes");
+	else
+		pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-	do_post_cpus_up_work();
+	hyp_mode_check();
+	apply_alternatives_all();
 }
 
 void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 42f9195..149151fb 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -42,7 +42,6 @@
 #include <asm/thread_info.h>
 #include <asm/stacktrace.h>
 
-#ifdef CONFIG_SMP
 unsigned long profile_pc(struct pt_regs *regs)
 {
 	struct stackframe frame;
@@ -62,7 +61,6 @@
 	return frame.pc;
 }
 EXPORT_SYMBOL(profile_pc);
-#endif
 
 void __init time_init(void)
 {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index fcb8f7b..694f6de 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -300,6 +300,6 @@
 	 * Discard anything that was parsed if we hit an error so we
 	 * don't use partial information.
 	 */
-	if (parse_dt_topology())
+	if (of_have_populated_dt() && parse_dt_topology())
 		reset_cpu_topology();
 }
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 566bc4c..f93aae5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bug.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
 #include <linux/kallsyms.h>
@@ -32,8 +33,10 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/bug.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/insn.h>
 #include <asm/traps.h>
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
@@ -52,11 +55,12 @@
  * Dump out the contents of some memory nicely...
  */
 static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
-		     unsigned long top)
+		     unsigned long top, bool compat)
 {
 	unsigned long first;
 	mm_segment_t fs;
 	int i;
+	unsigned int width = compat ? 4 : 8;
 
 	/*
 	 * We need to switch to kernel mode so that we can use __get_user
@@ -75,13 +79,22 @@
 		memset(str, ' ', sizeof(str));
 		str[sizeof(str) - 1] = '\0';
 
-		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+		for (p = first, i = 0; i < (32 / width)
+					&& p < top; i++, p += width) {
 			if (p >= bottom && p < top) {
-				unsigned int val;
-				if (__get_user(val, (unsigned int *)p) == 0)
-					sprintf(str + i * 9, " %08x", val);
-				else
-					sprintf(str + i * 9, " ????????");
+				unsigned long val;
+
+				if (width == 8) {
+					if (__get_user(val, (unsigned long *)p) == 0)
+						sprintf(str + i * 17, " %016lx", val);
+					else
+						sprintf(str + i * 17, " ????????????????");
+				} else {
+					if (__get_user(val, (unsigned int *)p) == 0)
+						sprintf(str + i * 9, " %08lx", val);
+					else
+						sprintf(str + i * 9, " ????????");
+				}
 			}
 		}
 		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
@@ -95,7 +108,7 @@
 	print_ip_sym(where);
 	if (in_exception_text(where))
 		dump_mem("", "Exception stack", stack,
-			 stack + sizeof(struct pt_regs));
+			 stack + sizeof(struct pt_regs), false);
 }
 
 static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -179,11 +192,7 @@
 #else
 #define S_PREEMPT ""
 #endif
-#ifdef CONFIG_SMP
 #define S_SMP " SMP"
-#else
-#define S_SMP ""
-#endif
 
 static int __die(const char *str, int err, struct thread_info *thread,
 		 struct pt_regs *regs)
@@ -207,7 +216,8 @@
 
 	if (!user_mode(regs) || in_interrupt()) {
 		dump_mem(KERN_EMERG, "Stack: ", regs->sp,
-			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+			 THREAD_SIZE + (unsigned long)task_stack_page(tsk),
+			 compat_user_mode(regs));
 		dump_backtrace(regs, tsk);
 		dump_instr(KERN_EMERG, regs);
 	}
@@ -459,7 +469,63 @@
 	pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
+/* GENERIC_BUG traps */
+
+int is_valid_bugaddr(unsigned long addr)
+{
+	/*
+	 * bug_handler() only called for BRK #BUG_BRK_IMM.
+	 * So the answer is trivial -- any spurious instances with no
+	 * bug table entry will be rejected by report_bug() and passed
+	 * back to the debug-monitors code and handled as a fatal
+	 * unexpected debug exception.
+	 */
+	return 1;
+}
+
+static int bug_handler(struct pt_regs *regs, unsigned int esr)
+{
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
+	switch (report_bug(regs->pc, regs)) {
+	case BUG_TRAP_TYPE_BUG:
+		die("Oops - BUG", regs, 0);
+		break;
+
+	case BUG_TRAP_TYPE_WARN:
+		/* Ideally, report_bug() should backtrace for us... but no. */
+		dump_backtrace(regs, NULL);
+		break;
+
+	default:
+		/* unknown/unrecognised bug trap type */
+		return DBG_HOOK_ERROR;
+	}
+
+	/* If thread survives, skip over the BUG instruction and continue: */
+	regs->pc += AARCH64_INSN_SIZE;	/* skip BRK and resume */
+	return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook bug_break_hook = {
+	.esr_val = 0xf2000000 | BUG_BRK_IMM,
+	.esr_mask = 0xffffffff,
+	.fn = bug_handler,
+};
+
+/*
+ * Initial handler for AArch64 BRK exceptions
+ * This handler only used until debug_traps_init().
+ */
+int __init early_brk64(unsigned long addr, unsigned int esr,
+		struct pt_regs *regs)
+{
+	return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
+}
+
+/* This registration must happen early, before debug_traps_init(). */
 void __init trap_init(void)
 {
-	return;
+	register_break_hook(&bug_break_hook);
 }
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 17a8fb1..10915aa 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -810,7 +810,11 @@
  * Call into the vgic backend for state saving
  */
 .macro save_vgic_state
-	alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+	bl	__save_vgic_v2_state
+alternative_else
+	bl	__save_vgic_v3_state
+alternative_endif
 	mrs	x24, hcr_el2
 	mov	x25, #HCR_INT_OVERRIDE
 	neg	x25, x25
@@ -827,7 +831,11 @@
 	orr	x24, x24, #HCR_INT_OVERRIDE
 	orr	x24, x24, x25
 	msr	hcr_el2, x24
-	alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+	bl	__restore_vgic_v2_state
+alternative_else
+	bl	__restore_vgic_v3_state
+alternative_endif
 .endm
 
 .macro save_timer_state
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d98d3e3..1a811ec 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -3,3 +3,16 @@
 		   clear_page.o memchr.o memcpy.o memmove.o memset.o	\
 		   memcmp.o strcmp.o strncmp.o strlen.o strnlen.o	\
 		   strchr.o strrchr.o
+
+# Tell the compiler to treat all general purpose registers as
+# callee-saved, which allows for efficient runtime patching of the bl
+# instruction in the caller with an atomic instruction when supported by
+# the CPU. Result and argument registers are handled correctly, based on
+# the function prototype.
+lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
+CFLAGS_atomic_ll_sc.o	:= -fcall-used-x0 -ffixed-x1 -ffixed-x2		\
+		   -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6		\
+		   -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9		\
+		   -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12	\
+		   -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15	\
+		   -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c
new file mode 100644
index 0000000..b0c538b
--- /dev/null
+++ b/arch/arm64/lib/atomic_ll_sc.c
@@ -0,0 +1,3 @@
+#include <asm/atomic.h>
+#define __ARM64_IN_ATOMIC_IMPL
+#include <asm/atomic_ll_sc.h>
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371..43ac736 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -18,52 +18,59 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/lse.h>
 
 /*
  * x0: bits 5:0  bit offset
  *     bits 31:6 word offset
  * x1: address
  */
-	.macro	bitop, name, instr
+	.macro	bitop, name, llsc, lse
 ENTRY(	\name	)
 	and	w3, w0, #63		// Get bit offset
 	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
+alt_lse "	prfm	pstl1strm, [x1]",	"nop"
 	lsl	x3, x2, x3		// Create mask
-1:	ldxr	x2, [x1]
-	\instr	x2, x2, x3
-	stxr	w0, x2, [x1]
-	cbnz	w0, 1b
+
+alt_lse	"1:	ldxr	x2, [x1]",		"\lse	x3, [x1]"
+alt_lse	"	\llsc	x2, x2, x3",		"nop"
+alt_lse	"	stxr	w0, x2, [x1]",		"nop"
+alt_lse	"	cbnz	w0, 1b",		"nop"
+
 	ret
 ENDPROC(\name	)
 	.endm
 
-	.macro	testop, name, instr
+	.macro	testop, name, llsc, lse
 ENTRY(	\name	)
 	and	w3, w0, #63		// Get bit offset
 	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
+alt_lse "	prfm	pstl1strm, [x1]",	"nop"
 	lsl	x4, x2, x3		// Create mask
-1:	ldxr	x2, [x1]
-	lsr	x0, x2, x3		// Save old value of bit
-	\instr	x2, x2, x4		// toggle bit
-	stlxr	w5, x2, [x1]
-	cbnz	w5, 1b
-	dmb	ish
+
+alt_lse	"1:	ldxr	x2, [x1]",		"\lse	x4, x2, [x1]"
+	lsr	x0, x2, x3
+alt_lse	"	\llsc	x2, x2, x4",		"nop"
+alt_lse	"	stlxr	w5, x2, [x1]",		"nop"
+alt_lse	"	cbnz	w5, 1b",		"nop"
+alt_lse	"	dmb	ish",			"nop"
+
 	and	x0, x0, #1
-3:	ret
+	ret
 ENDPROC(\name	)
 	.endm
 
 /*
  * Atomic bit operations.
  */
-	bitop	change_bit, eor
-	bitop	clear_bit, bic
-	bitop	set_bit, orr
+	bitop	change_bit, eor, steor
+	bitop	clear_bit, bic, stclr
+	bitop	set_bit, orr, stset
 
-	testop	test_and_change_bit, eor
-	testop	test_and_clear_bit, bic
-	testop	test_and_set_bit, orr
+	testop	test_and_change_bit, eor, ldeoral
+	testop	test_and_clear_bit, bic, ldclral
+	testop	test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index c17967f..a9723c7 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -16,7 +16,11 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 	.text
 
@@ -29,6 +33,8 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
@@ -48,6 +54,8 @@
 	b.mi	5f
 USER(9f, strb	wzr, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__clear_user)
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 5e27add..1be9ef2 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -15,7 +15,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_from_user)
-	add	x4, x1, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x1, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+USER(9f, ldp	x3, x4, [x1], #16)
+	subs	x2, x2, #16
+	stp	x3, x4, [x0], #16
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 USER(9f, ldr	x3, [x1], #8	)
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 	str	x3, [x0], #8
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 USER(9f, ldr	w3, [x1], #4	)
@@ -51,12 +62,14 @@
 USER(9f, ldrb	w3, [x1]	)
 	strb	w3, [x0]
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_from_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x2, x4, x1
+9:	sub	x2, x5, x1
 	mov	x3, x2
 10:	strb	wzr, [x0], #1			// zero remaining buffer space
 	subs	x3, x3, #1
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 84b6c9b..1b94661e 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -17,7 +17,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -30,14 +34,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_in_user)
-	add	x4, x0, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x0, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+USER(9f, ldp	x3, x4, [x1], #16)
+	subs	x2, x2, #16
+USER(9f, stp	x3, x4, [x0], #16)
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 USER(9f, ldr	x3, [x1], #8	)
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 USER(9f, str	x3, [x0], #8	)
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 USER(9f, ldr	w3, [x1], #4	)
@@ -53,11 +64,13 @@
 USER(9f, ldrb	w3, [x1]	)
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x0, x4, x0			// bytes not copied
+9:	sub	x0, x5, x0			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index a0aeeb9..a257b47 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -15,7 +15,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_to_user)
-	add	x4, x0, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x0, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+	ldp	x3, x4, [x1], #16
+	subs	x2, x2, #16
+USER(9f, stp	x3, x4, [x0], #16)
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 	ldr	x3, [x1], #8
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 USER(9f, str	x3, [x0], #8	)
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 	ldr	w3, [x1], #4
@@ -51,11 +62,13 @@
 	ldrb	w3, [x1]
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_to_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x0, x4, x0			// bytes not copied
+9:	sub	x0, x5, x0			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index bdeb5d3..eb48d5d 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -143,7 +143,12 @@
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x0, x0, x3
-1:	alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
+1:
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	cvac, x0
+alternative_else
+	dc	civac, x0
+alternative_endif
 	add	x0, x0, x2
 	cmp	x0, x1
 	b.lo	1b
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 76c1e6c..d70ff14 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -53,8 +53,6 @@
 		__flush_icache_all();
 }
 
-#ifdef CONFIG_SMP
-
 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
 {
 	unsigned long flags;
@@ -110,23 +108,12 @@
 	cpu_switch_mm(mm->pgd, mm);
 }
 
-#else
-
-static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
-	mm->context.id = asid;
-	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
-}
-
-#endif
-
 void __new_context(struct mm_struct *mm)
 {
 	unsigned int asid;
 	unsigned int bits = asid_bits();
 
 	raw_spin_lock(&cpu_asid_lock);
-#ifdef CONFIG_SMP
 	/*
 	 * Check the ASID again, in case the change was broadcast from another
 	 * CPU before we acquired the lock.
@@ -136,7 +123,6 @@
 		raw_spin_unlock(&cpu_asid_lock);
 		return;
 	}
-#endif
 	/*
 	 * At this point, it is guaranteed that the current mm (with an old
 	 * ASID) isn't active on any other CPU since the ASIDs are changed
@@ -155,10 +141,8 @@
 			cpu_last_asid = ASID_FIRST_VERSION;
 		asid = cpu_last_asid + smp_processor_id();
 		flush_context();
-#ifdef CONFIG_SMP
 		smp_wmb();
 		smp_call_function(reset_context, NULL, 1);
-#endif
 		cpu_last_asid += NR_CPUS - 1;
 	}
 
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d16a1ce..0bcc4bc 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -144,6 +144,7 @@
 	struct page *page;
 	void *ptr, *coherent_ptr;
 	bool coherent = is_device_dma_coherent(dev);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 
 	size = PAGE_ALIGN(size);
 
@@ -171,9 +172,7 @@
 	/* create a coherent mapping */
 	page = virt_to_page(ptr);
 	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-				__get_dma_pgprot(attrs,
-					__pgprot(PROT_NORMAL_NC), false),
-					NULL);
+						   prot, NULL);
 	if (!coherent_ptr)
 		goto no_map;
 
@@ -303,9 +302,10 @@
 				       sg->length, dir);
 }
 
-/* vma->vm_page_prot must be set appropriately before calling this function */
-static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-			     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int __swiotlb_mmap(struct device *dev,
+			  struct vm_area_struct *vma,
+			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			  struct dma_attrs *attrs)
 {
 	int ret = -ENXIO;
 	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -314,6 +314,9 @@
 	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
 	unsigned long off = vma->vm_pgoff;
 
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     is_device_dma_coherent(dev));
+
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
 
@@ -327,20 +330,24 @@
 	return ret;
 }
 
-static int __swiotlb_mmap(struct device *dev,
-			  struct vm_area_struct *vma,
-			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
-			  struct dma_attrs *attrs)
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+				 void *cpu_addr, dma_addr_t handle, size_t size,
+				 struct dma_attrs *attrs)
 {
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
-	return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+
+	if (!ret)
+		sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
+			    PAGE_ALIGN(size), 0);
+
+	return ret;
 }
 
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
 	.mmap = __swiotlb_mmap,
+	.get_sgtable = __swiotlb_get_sgtable,
 	.map_page = __swiotlb_map_page,
 	.unmap_page = __swiotlb_unmap_page,
 	.map_sg = __swiotlb_map_sg_attrs,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94d98cd..aba9ead 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,9 +30,11 @@
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
 
+#include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -224,6 +226,13 @@
 	}
 
 	/*
+	 * PAN bit set implies the fault happened in kernel space, but not
+	 * in the arch's user access functions.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+		goto no_context;
+
+	/*
 	 * As per x86, we may deadlock here. However, since the kernel only
 	 * validly references user space from well defined areas of the code,
 	 * we can bug out early if this is from code which shouldn't.
@@ -492,14 +501,22 @@
 	arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
 }
 
-static struct fault_info debug_fault_info[] = {
+int __init early_brk64(unsigned long addr, unsigned int esr,
+		       struct pt_regs *regs);
+
+/*
+ * __refdata because early_brk64 is __init, but the reference to it is
+ * clobbered at arch_initcall time.
+ * See traps.c and debug-monitors.c:debug_traps_init().
+ */
+static struct fault_info __refdata debug_fault_info[] = {
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware breakpoint"	},
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware single-step"	},
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware watchpoint"	},
 	{ do_bad,	SIGBUS,		0,		"unknown 3"		},
 	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch32 BKPT"		},
 	{ do_bad,	SIGTRAP,	0,		"aarch32 vector catch"	},
-	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
+	{ early_brk64,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
 	{ do_bad,	SIGBUS,		0,		"unknown 7"		},
 };
 
@@ -536,3 +553,10 @@
 
 	return 0;
 }
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 4dfa397..c26b804 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -60,14 +60,10 @@
 		       unsigned long uaddr, void *dst, const void *src,
 		       unsigned long len)
 {
-#ifdef CONFIG_SMP
 	preempt_disable();
-#endif
 	memcpy(dst, src, len);
 	flush_ptrace_access(vma, page, uaddr, dst, len);
-#ifdef CONFIG_SMP
 	preempt_enable();
-#endif
 }
 
 void __sync_icache_dcache(pte_t pte, unsigned long addr)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 831ec53..383b03f 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -13,10 +13,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/init.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ad87ce8..f5c0680 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -358,9 +358,9 @@
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
-static int keep_initrd;
+static int keep_initrd __initdata;
 
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
 	if (!keep_initrd)
 		free_reserved_area((void *)start, (void *)end, 0, "initrd");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a4ede4e..9211b85 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -267,7 +267,7 @@
 	return ptr;
 }
 
-static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 				  phys_addr_t size, pgprot_t prot)
 {
 	if (virt < VMALLOC_START) {
@@ -461,17 +461,6 @@
 }
 
 /*
- * Enable the identity mapping to allow the MMU disabling.
- */
-void setup_mm_for_reboot(void)
-{
-	cpu_set_reserved_ttbr0();
-	flush_tlb_all();
-	cpu_set_idmap_tcr_t0sz();
-	cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
  * Check whether a kernel address is valid (derived from arch/x86/).
  */
 int kern_addr_valid(unsigned long addr)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 39139a3..e4ee7bd 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -34,11 +34,7 @@
 #define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
 #endif
 
-#ifdef CONFIG_SMP
 #define TCR_SMP_FLAGS	TCR_SHARED
-#else
-#define TCR_SMP_FLAGS	0
-#endif
 
 /* PTWs cacheable, inner/outer WBWA */
 #define TCR_CACHE_FLAGS	TCR_IRGN_WBWA | TCR_ORGN_WBWA
@@ -150,13 +146,13 @@
  *	value of the SCTLR_EL1 register.
  */
 ENTRY(__cpu_setup)
-	ic	iallu				// I+BTB cache invalidate
 	tlbi	vmalle1is			// invalidate I + D TLBs
 	dsb	ish
 
 	mov	x0, #3 << 20
 	msr	cpacr_el1, x0			// Enable FP/ASIMD
-	msr	mdscr_el1, xzr			// Reset mdscr_el1
+	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
+	msr	mdscr_el1, x0			// access to the DCC from EL0
 	/*
 	 * Memory region attributes for LPAE:
 	 *
@@ -196,6 +192,19 @@
 	 */
 	mrs	x9, ID_AA64MMFR0_EL1
 	bfi	x10, x9, #32, #3
+#ifdef CONFIG_ARM64_HW_AFDBM
+	/*
+	 * Hardware update of the Access and Dirty bits.
+	 */
+	mrs	x9, ID_AA64MMFR1_EL1
+	and	x9, x9, #0xf
+	cbz	x9, 2f
+	cmp	x9, #2
+	b.lt	1f
+	orr	x10, x10, #TCR_HD		// hardware Dirty flag update
+1:	orr	x10, x10, #TCR_HA		// hardware Access flag update
+2:
+#endif	/* CONFIG_ARM64_HW_AFDBM */
 	msr	tcr_el1, x10
 	ret					// return to head.S
 ENDPROC(__cpu_setup)
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 2d07ce1..97c9bdf 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -44,6 +44,18 @@
 ATOMIC_OP_RETURN(sub, sub, rKs21)
 ATOMIC_OP_RETURN(add, add, r)
 
+#define ATOMIC_OP(op, asm_op)						\
+ATOMIC_OP_RETURN(op, asm_op, r)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	(void)__atomic_##op##_return(i, v);				\
+}
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OP
 #undef ATOMIC_OP_RETURN
 
 /*
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98..1c1c423 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,21 @@
 #include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
 
 #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
 
-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
 
-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
-#define atomic_set_mask(m, v)   __raw_atomic_set_asm(&(v)->counter, m)
+#define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
 
 #endif
 
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591..a401c27 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@
 EXPORT_SYMBOL(insl_16);
 
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(__raw_atomic_update_asm);
-EXPORT_SYMBOL(__raw_atomic_clear_asm);
-EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_and_asm);
+EXPORT_SYMBOL(__raw_atomic_or_asm);
 EXPORT_SYMBOL(__raw_atomic_xor_asm);
 EXPORT_SYMBOL(__raw_atomic_test_asm);
+
 EXPORT_SYMBOL(__raw_xchg_1_asm);
 EXPORT_SYMBOL(__raw_xchg_2_asm);
 EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8..26fccb5 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@
  * r0 = ptr
  * r1 = value
  *
- * Add a signed value to a 32bit word and return the new value atomically.
+ * ADD a signed value to a 32bit word and return the new value atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_update_asm)
+ENTRY(___raw_atomic_add_asm)
 	p1 = r0;
 	r3 = r1;
 	[--sp] = rets;
@@ -603,19 +603,19 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_update_asm)
+ENDPROC(___raw_atomic_add_asm)
 
 /*
  * r0 = ptr
  * r1 = mask
  *
- * Clear the mask bits from a 32bit word and return the old 32bit value
+ * AND the mask bits from a 32bit word and return the old 32bit value
  * atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_clear_asm)
+ENTRY(___raw_atomic_and_asm)
 	p1 = r0;
-	r3 = ~r1;
+	r3 = r1;
 	[--sp] = rets;
 	call _get_core_lock;
 	r2 = [p1];
@@ -627,17 +627,17 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_clear_asm)
+ENDPROC(___raw_atomic_and_asm)
 
 /*
  * r0 = ptr
  * r1 = mask
  *
- * Set the mask bits into a 32bit word and return the old 32bit value
+ * OR the mask bits into a 32bit word and return the old 32bit value
  * atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_set_asm)
+ENTRY(___raw_atomic_or_asm)
 	p1 = r0;
 	r3 = r1;
 	[--sp] = rets;
@@ -651,7 +651,7 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_set_asm)
+ENDPROC(___raw_atomic_or_asm)
 
 /*
  * r0 = ptr
@@ -787,7 +787,7 @@
 	r2 = r1;
 	r1 = 1;
 	r1 <<= r2;
-	jump ___raw_atomic_set_asm
+	jump ___raw_atomic_or_asm
 ENDPROC(___raw_bit_set_asm)
 
 /*
@@ -798,10 +798,10 @@
  * Clobbers: r3:0, p1:0
  */
 ENTRY(___raw_bit_clear_asm)
-	r2 = r1;
-	r1 = 1;
-	r1 <<= r2;
-	jump ___raw_atomic_clear_asm
+	r2 = 1;
+	r2 <<= r1;
+	r1 = ~r2;
+	jump ___raw_atomic_and_asm
 ENDPROC(___raw_bit_clear_asm)
 
 /*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c72595..0030e21 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@
 	local_irq_save(flags);
 	for_each_cpu(cpu, cpumask) {
 		bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
-		atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
+		atomic_or((1 << msg), &bfin_ipi_data->bits);
 		atomic_inc(&bfin_ipi_data->count);
 	}
 	local_irq_restore(flags);
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a..0da689d 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
 #define _ASM_ATOMIC_H
 
 #include <linux/types.h>
-#include <asm/spr-regs.h>
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
@@ -23,6 +22,8 @@
 #error not SMP safe
 #endif
 
+#include <asm/atomic_defs.h>
+
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc..
@@ -34,56 +35,26 @@
 #define atomic_read(v)		ACCESS_ONCE((v)->counter)
 #define atomic_set(v, i)	(((v)->counter) = (i))
 
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+static inline int atomic_inc_return(atomic_t *v)
+{
+	return __atomic_add_return(1, &v->counter);
+}
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+	return __atomic_sub_return(1, &v->counter);
+}
+
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-	unsigned long val;
-
-	asm("0:						\n"
-	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-	    "	ckeq		icc3,cc7		\n"
-	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
-	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-	    "	add%I2		%1,%2,%1		\n"
-	    "	cst.p		%1,%M0		,cc3,#1	\n"
-	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
-	    "	beq		icc3,#0,0b		\n"
-	    : "+U"(v->counter), "=&r"(val)
-	    : "NPr"(i)
-	    : "memory", "cc7", "cc3", "icc3"
-	    );
-
-	return val;
+	return __atomic_add_return(i, &v->counter);
 }
 
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
-	unsigned long val;
-
-	asm("0:						\n"
-	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-	    "	ckeq		icc3,cc7		\n"
-	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
-	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-	    "	sub%I2		%1,%2,%1		\n"
-	    "	cst.p		%1,%M0		,cc3,#1	\n"
-	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
-	    "	beq		icc3,#0,0b		\n"
-	    : "+U"(v->counter), "=&r"(val)
-	    : "NPr"(i)
-	    : "memory", "cc7", "cc3", "icc3"
-	    );
-
-	return val;
+	return __atomic_sub_return(i, &v->counter);
 }
 
-#else
-
-extern int atomic_add_return(int i, atomic_t *v);
-extern int atomic_sub_return(int i, atomic_t *v);
-
-#endif
-
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
 	return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@
 
 static inline void atomic_inc(atomic_t *v)
 {
-	atomic_add_return(1, v);
+	atomic_inc_return(v);
 }
 
 static inline void atomic_dec(atomic_t *v)
 {
-	atomic_sub_return(1, v);
+	atomic_dec_return(v);
 }
 
-#define atomic_dec_return(v)		atomic_sub_return(1, (v))
-#define atomic_inc_return(v)		atomic_add_return(1, (v))
-
 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@
  * 64-bit atomic ops
  */
 typedef struct {
-	volatile long long counter;
+	long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
-static inline long long atomic64_read(atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
 	long long counter;
 
 	asm("ldd%I1 %M1,%0"
 	    : "=e"(counter)
 	    : "m"(v->counter));
+
 	return counter;
 }
 
@@ -142,10 +111,25 @@
 		     : "e"(i));
 }
 
-extern long long atomic64_inc_return(atomic64_t *v);
-extern long long atomic64_dec_return(atomic64_t *v);
-extern long long atomic64_add_return(long long i, atomic64_t *v);
-extern long long atomic64_sub_return(long long i, atomic64_t *v);
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+	return __atomic64_add_return(1, &v->counter);
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+	return __atomic64_sub_return(1, &v->counter);
+}
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+	return __atomic64_add_return(i, &v->counter);
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+	return __atomic64_sub_return(i, &v->counter);
+}
 
 static inline long long atomic64_add_negative(long long i, atomic64_t *v)
 {
@@ -176,6 +160,7 @@
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
 
+
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
 #define atomic64_cmpxchg(v, old, new)	(__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,21 @@
 	return c;
 }
 
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	(void)__atomic32_fetch_##op(i, &v->counter);			\
+}									\
+									\
+static inline void atomic64_##op(long long i, atomic64_t *v)		\
+{									\
+	(void)__atomic64_fetch_##op(i, &v->counter);			\
+}
+
+ATOMIC_OP(or)
+ATOMIC_OP(and)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
 
 #endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
new file mode 100644
index 0000000..36e126d
--- /dev/null
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -0,0 +1,172 @@
+
+#include <asm/spr-regs.h>
+
+#ifdef __ATOMIC_LIB__
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_QUALS
+#define ATOMIC_EXPORT(x)	EXPORT_SYMBOL(x)
+
+#else /* !OUTOFLINE && LIB */
+
+#define ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)
+
+#endif /* OUTOFLINE */
+
+#else /* !__ATOMIC_LIB__ */
+
+#define ATOMIC_EXPORT(x)
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_OP_RETURN(op)						\
+extern int __atomic_##op##_return(int i, int *v);			\
+extern long long __atomic64_##op##_return(long long i, long long *v);
+
+#define ATOMIC_FETCH_OP(op)						\
+extern int __atomic32_fetch_##op(int i, int *v);			\
+extern long long __atomic64_fetch_##op(long long i, long long *v);
+
+#else /* !OUTOFLINE && !LIB */
+
+#define ATOMIC_QUALS	static inline
+
+#endif /* OUTOFLINE */
+#endif /* __ATOMIC_LIB__ */
+
+
+/*
+ * Note on the 64 bit inline asm variants...
+ *
+ * CSTD is a conditional instruction and needs a constrained memory reference.
+ * Normally 'U' provides the correct constraints for conditional instructions
+ * and this is used for the 32 bit version, however 'U' does not appear to work
+ * for 64 bit values (gcc-4.9)
+ *
+ * The exact constraint is that conditional instructions cannot deal with an
+ * immediate displacement in the memory reference, so what we do is we read the
+ * address through a volatile cast into a local variable in order to insure we
+ * _have_ to compute the correct address without displacement. This allows us
+ * to use the regular 'm' for the memory address.
+ *
+ * Furthermore, the %Ln operand, which prints the low word register (r+1),
+ * really only works for registers, this means we cannot allow immediate values
+ * for the 64 bit versions -- like we do for the 32 bit ones.
+ *
+ */
+
+#ifndef ATOMIC_OP_RETURN
+#define ATOMIC_OP_RETURN(op)						\
+ATOMIC_QUALS int __atomic_##op##_return(int i, int *v)			\
+{									\
+	int val;							\
+									\
+	asm volatile(							\
+	    "0:						\n"		\
+	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
+	    "	ckeq		icc3,cc7		\n"		\
+	    "	ld.p		%M0,%1			\n"		\
+	    "	orcr		cc7,cc7,cc3		\n"		\
+	    "   "#op"%I2	%1,%2,%1		\n"		\
+	    "	cst.p		%1,%M0		,cc3,#1	\n"		\
+	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
+	    "	beq		icc3,#0,0b		\n"		\
+	    : "+U"(*v), "=&r"(val)					\
+	    : "NPr"(i)							\
+	    : "memory", "cc7", "cc3", "icc3"				\
+	    );								\
+									\
+	return val;							\
+}									\
+ATOMIC_EXPORT(__atomic_##op##_return);					\
+									\
+ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v)	\
+{									\
+	long long *__v = READ_ONCE(v);					\
+	long long val;							\
+									\
+	asm volatile(							\
+	    "0:						\n"		\
+	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
+	    "	ckeq		icc3,cc7		\n"		\
+	    "	ldd.p		%M0,%1			\n"		\
+	    "	orcr		cc7,cc7,cc3		\n"		\
+	    "   "#op"cc		%L1,%L2,%L1,icc0	\n"		\
+	    "   "#op"x		%1,%2,%1,icc0		\n"		\
+	    "	cstd.p		%1,%M0		,cc3,#1	\n"		\
+	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
+	    "	beq		icc3,#0,0b		\n"		\
+	    : "+m"(*__v), "=&e"(val)					\
+	    : "e"(i)							\
+	    : "memory", "cc7", "cc3", "icc0", "icc3"			\
+	    );								\
+									\
+	return val;							\
+}									\
+ATOMIC_EXPORT(__atomic64_##op##_return);
+#endif
+
+#ifndef ATOMIC_FETCH_OP
+#define ATOMIC_FETCH_OP(op)						\
+ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v)			\
+{									\
+	int old, tmp;							\
+									\
+	asm volatile(							\
+		"0:						\n"	\
+		"	orcc		gr0,gr0,gr0,icc3	\n"	\
+		"	ckeq		icc3,cc7		\n"	\
+		"	ld.p		%M0,%1			\n"	\
+		"	orcr		cc7,cc7,cc3		\n"	\
+		"	"#op"%I3	%1,%3,%2		\n"	\
+		"	cst.p		%2,%M0		,cc3,#1	\n"	\
+		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
+		"	beq		icc3,#0,0b		\n"	\
+		: "+U"(*v), "=&r"(old), "=r"(tmp)			\
+		: "NPr"(i)						\
+		: "memory", "cc7", "cc3", "icc3"			\
+		);							\
+									\
+	return old;							\
+}									\
+ATOMIC_EXPORT(__atomic32_fetch_##op);					\
+									\
+ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v)	\
+{									\
+	long long *__v = READ_ONCE(v);					\
+	long long old, tmp;						\
+									\
+	asm volatile(							\
+		"0:						\n"	\
+		"	orcc		gr0,gr0,gr0,icc3	\n"	\
+		"	ckeq		icc3,cc7		\n"	\
+		"	ldd.p		%M0,%1			\n"	\
+		"	orcr		cc7,cc7,cc3		\n"	\
+		"	"#op"		%L1,%L3,%L2		\n"	\
+		"	"#op"		%1,%3,%2		\n"	\
+		"	cstd.p		%2,%M0		,cc3,#1	\n"	\
+		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
+		"	beq		icc3,#0,0b		\n"	\
+		: "+m"(*__v), "=&e"(old), "=e"(tmp)			\
+		: "e"(i)						\
+		: "memory", "cc7", "cc3", "icc3"			\
+		);							\
+									\
+	return old;							\
+}									\
+ATOMIC_EXPORT(__atomic64_fetch_##op);
+#endif
+
+ATOMIC_FETCH_OP(or)
+ATOMIC_FETCH_OP(and)
+ATOMIC_FETCH_OP(xor)
+
+ATOMIC_OP_RETURN(add)
+ATOMIC_OP_RETURN(sub)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_QUALS
+#undef ATOMIC_EXPORT
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 96de220..0df8e95 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,109 +25,30 @@
 
 #include <asm-generic/bitops/ffz.h>
 
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline
-unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	and%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(~mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-static inline
-unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	or%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-static inline
-unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	xor%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-#else
-
-extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-
-#endif
-
-#define atomic_clear_mask(mask, v)	atomic_test_and_ANDNOT_mask((mask), (v))
-#define atomic_set_mask(mask, v)	atomic_test_and_OR_mask((mask), (v))
+#include <asm/atomic.h>
 
 static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
 }
 
 static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
 }
 
 static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
 }
 
 static inline void clear_bit(unsigned long nr, volatile void *addr)
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
index 156184e..370dc9f 100644
--- a/arch/frv/kernel/dma.c
+++ b/arch/frv/kernel/dma.c
@@ -109,13 +109,13 @@
 
 static DEFINE_RWLOCK(frv_dma_channels_lock);
 
-unsigned long frv_dma_inprogress;
+unsigned int frv_dma_inprogress;
 
 #define frv_clear_dma_inprogress(channel) \
-	atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
+	(void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
 
 #define frv_set_dma_inprogress(channel) \
-	atomic_set_mask(1 << (channel), &frv_dma_inprogress);
+	(void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
 
 /*****************************************************************************/
 /*
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 86c516d..cdb4ce9 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -58,11 +58,6 @@
 EXPORT_SYMBOL(__insl_ns);
 
 #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
-EXPORT_SYMBOL(atomic_test_and_OR_mask);
-EXPORT_SYMBOL(atomic_test_and_XOR_mask);
-EXPORT_SYMBOL(atomic_add_return);
-EXPORT_SYMBOL(atomic_sub_return);
 EXPORT_SYMBOL(__xchg_32);
 EXPORT_SYMBOL(__cmpxchg_32);
 #endif
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 4ff2fb1..970e8b4 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
 lib-y := \
 	__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
 	checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
-	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c
new file mode 100644
index 0000000..4d1b887
--- /dev/null
+++ b/arch/frv/lib/atomic-lib.c
@@ -0,0 +1,7 @@
+
+#include <linux/export.h>
+#include <asm/atomic.h>
+
+#define __ATOMIC_LIB__
+
+#include <asm/atomic_defs.h>
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index 5e9e6ab..b7439a9 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -19,116 +19,6 @@
 
 ###############################################################################
 #
-# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_ANDNOT_mask
-        .type		atomic_test_and_ANDNOT_mask,@function
-atomic_test_and_ANDNOT_mask:
-	not.p		gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	and		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_OR_mask
-        .type		atomic_test_and_OR_mask,@function
-atomic_test_and_OR_mask:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	or		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_XOR_mask
-        .type		atomic_test_and_XOR_mask,@function
-atomic_test_and_XOR_mask:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	xor		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
-
-###############################################################################
-#
-# int atomic_add_return(int i, atomic_t *v)
-#
-###############################################################################
-	.globl		atomic_add_return
-        .type		atomic_add_return,@function
-atomic_add_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	add		gr8,gr10,gr8
-	cst.p		gr8,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_add_return, .-atomic_add_return
-
-###############################################################################
-#
-# int atomic_sub_return(int i, atomic_t *v)
-#
-###############################################################################
-	.globl		atomic_sub_return
-        .type		atomic_sub_return,@function
-atomic_sub_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	sub		gr8,gr10,gr8
-	cst.p		gr8,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_sub_return, .-atomic_sub_return
-
-###############################################################################
-#
 # uint32_t __xchg_32(uint32_t i, uint32_t *v)
 #
 ###############################################################################
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
index b6194ee..c4c4723 100644
--- a/arch/frv/lib/atomic64-ops.S
+++ b/arch/frv/lib/atomic64-ops.S
@@ -20,100 +20,6 @@
 
 ###############################################################################
 #
-# long long atomic64_inc_return(atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_inc_return
-        .type		atomic64_inc_return,@function
-atomic64_inc_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	addicc		gr9,#1,gr9,icc0
-	addxi		gr8,#0,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_inc_return, .-atomic64_inc_return
-
-###############################################################################
-#
-# long long atomic64_dec_return(atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_dec_return
-        .type		atomic64_dec_return,@function
-atomic64_dec_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	subicc		gr9,#1,gr9,icc0
-	subxi		gr8,#0,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_dec_return, .-atomic64_dec_return
-
-###############################################################################
-#
-# long long atomic64_add_return(long long i, atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_add_return
-        .type		atomic64_add_return,@function
-atomic64_add_return:
-	or.p		gr8,gr8,gr4
-	or		gr9,gr9,gr5
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	addcc		gr9,gr5,gr9,icc0
-	addx		gr8,gr4,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_add_return, .-atomic64_add_return
-
-###############################################################################
-#
-# long long atomic64_sub_return(long long i, atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_sub_return
-        .type		atomic64_sub_return,@function
-atomic64_sub_return:
-	or.p		gr8,gr8,gr4
-	or		gr9,gr9,gr5
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	subcc		gr9,gr5,gr9,icc0
-	subx		gr8,gr4,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_sub_return, .-atomic64_sub_return
-
-###############################################################################
-#
 # uint64_t __xchg_64(uint64_t i, uint64_t *v)
 #
 ###############################################################################
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 7ca73f8..702ee53 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -16,83 +16,52 @@
 
 #include <linux/kernel.h>
 
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	ret = v->counter += i;
-	arch_local_irq_restore(flags);
-	return ret;
+#define ATOMIC_OP_RETURN(op, c_op)				\
+static inline int atomic_##op##_return(int i, atomic_t *v)	\
+{								\
+	h8300flags flags;					\
+	int ret;						\
+								\
+	flags = arch_local_irq_save();				\
+	ret = v->counter c_op i;				\
+	arch_local_irq_restore(flags);				\
+	return ret;						\
 }
 
-#define atomic_add(i, v) atomic_add_return(i, v)
+#define ATOMIC_OP(op, c_op)					\
+static inline void atomic_##op(int i, atomic_t *v)		\
+{								\
+	h8300flags flags;					\
+								\
+	flags = arch_local_irq_save();				\
+	v->counter c_op i;					\
+	arch_local_irq_restore(flags);				\
+}
+
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP_RETURN(sub, -=)
+
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or,  |=)
+ATOMIC_OP(xor, ^=)
+
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_add(i, v)		(void)atomic_add_return(i, v)
 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
+#define atomic_sub(i, v)		(void)atomic_sub_return(i, v)
+#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
 
-	flags = arch_local_irq_save();
-	ret = v->counter -= i;
-	arch_local_irq_restore(flags);
-	return ret;
-}
+#define atomic_inc_return(v)		atomic_add_return(1, v)
+#define atomic_dec_return(v)		atomic_sub_return(1, v)
 
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define atomic_inc(v)			(void)atomic_inc_return(v)
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
 
-static inline int atomic_inc_return(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	v->counter++;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static inline int atomic_dec_return(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	--v->counter;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret;
-}
-
-#define atomic_dec(v) atomic_dec_return(v)
-
-static inline int atomic_dec_and_test(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	--v->counter;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret == 0;
-}
+#define atomic_dec(v)			(void)atomic_dec_return(v)
+#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
@@ -120,40 +89,4 @@
 	return ret;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-	unsigned char ccr;
-	unsigned long tmp;
-
-	__asm__ __volatile__("stc ccr,%w3\n\t"
-			     "orc #0x80,ccr\n\t"
-			     "mov.l %0,%1\n\t"
-			     "and.l %2,%1\n\t"
-			     "mov.l %1,%0\n\t"
-			     "ldc %w3,ccr"
-			     : "=m"(*v), "=r"(tmp)
-			     : "g"(~(mask)), "r"(ccr));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-	unsigned char ccr;
-	unsigned long tmp;
-
-	__asm__ __volatile__("stc ccr,%w3\n\t"
-			     "orc #0x80,ccr\n\t"
-			     "mov.l %0,%1\n\t"
-			     "or.l %2,%1\n\t"
-			     "mov.l %1,%0\n\t"
-			     "ldc %w3,ccr"
-			     : "=m"(*v), "=r"(tmp)
-			     : "g"(~(mask)), "r"(ccr));
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
 #endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 93d0702..811d61f 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -132,6 +132,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0bf0350..be4beeb 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -45,8 +45,6 @@
 ATOMIC_OP(add, +)
 ATOMIC_OP(sub, -)
 
-#undef ATOMIC_OP
-
 #define atomic_add_return(i,v)						\
 ({									\
 	int __ia64_aar_i = (i);						\
@@ -71,6 +69,16 @@
 		: ia64_atomic_sub(__ia64_asr_i, v);			\
 })
 
+ATOMIC_OP(and, &)
+ATOMIC_OP(or, |)
+ATOMIC_OP(xor, ^)
+
+#define atomic_and(i,v)	(void)ia64_atomic_and(i,v)
+#define atomic_or(i,v)	(void)ia64_atomic_or(i,v)
+#define atomic_xor(i,v)	(void)ia64_atomic_xor(i,v)
+
+#undef ATOMIC_OP
+
 #define ATOMIC64_OP(op, c_op)						\
 static __inline__ long							\
 ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
@@ -89,8 +97,6 @@
 ATOMIC64_OP(add, +)
 ATOMIC64_OP(sub, -)
 
-#undef ATOMIC64_OP
-
 #define atomic64_add_return(i,v)					\
 ({									\
 	long __ia64_aar_i = (i);					\
@@ -115,6 +121,16 @@
 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
 })
 
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#define atomic64_and(i,v)	(void)ia64_atomic64_and(i,v)
+#define atomic64_or(i,v)	(void)ia64_atomic64_or(i,v)
+#define atomic64_xor(i,v)	(void)ia64_atomic64_xor(i,v)
+
+#undef ATOMIC64_OP
+
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 843ba43..df896a1 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -66,12 +66,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 31bb74a..025e2a1 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -94,6 +94,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -239,45 +243,4 @@
 	return c;
 }
 
-
-static __inline__ void atomic_clear_mask(unsigned long  mask, atomic_t *addr)
-{
-	unsigned long flags;
-	unsigned long tmp;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# atomic_clear_mask		\n\t"
-		DCACHE_CLEAR("%0", "r5", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"and	%0, %2;			\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (tmp)
-		: "r" (addr), "r" (~mask)
-		: "memory"
-		__ATOMIC_CLOBBER
-	);
-	local_irq_restore(flags);
-}
-
-static __inline__ void atomic_set_mask(unsigned long  mask, atomic_t *addr)
-{
-	unsigned long flags;
-	unsigned long tmp;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# atomic_set_mask		\n\t"
-		DCACHE_CLEAR("%0", "r5", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"or	%0, %2;			\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (tmp)
-		: "r" (addr), "r" (mask)
-		: "memory"
-		__ATOMIC_CLOBBER
-	);
-	local_irq_restore(flags);
-}
-
 #endif	/* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc7..62d6961 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@
 	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 	spin_lock(&flushcache_lock);
 	mask=cpumask_bits(&cpumask);
-	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
+	atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
 	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
 	_flush_cache_copyback_all();
 	while (flushcache_cpumask)
@@ -407,7 +407,7 @@
 	flush_vma = vma;
 	flush_va = va;
 	mask=cpumask_bits(&cpumask);
-	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
+	atomic_or(*mask, (atomic_t *)&flush_cpumask);
 
 	/*
 	 * We have to send the IPI only to
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index b15219e..c525e4c 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -126,7 +126,7 @@
 static int __init init_BSP(void)
 {
 	m5272_uarts_init();
-	fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
+	fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status, -1);
 	return 0;
 }
 
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e85f047..039fac1 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -77,6 +77,10 @@
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -170,16 +174,6 @@
 	return c != 0;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-	__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-	__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
-}
-
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 948d868..21c4c26 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -74,44 +74,14 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	int temp;
-
-	asm volatile (
-		"1:	LNKGETD %0, [%1]\n"
-		"	AND	%0, %0, %2\n"
-		"	LNKSETD	[%1] %0\n"
-		"	DEFR	%0, TXSTAT\n"
-		"	ANDT	%0, %0, #HI(0x3f000000)\n"
-		"	CMPT	%0, #HI(0x02000000)\n"
-		"	BNZ	1b\n"
-		: "=&d" (temp)
-		: "da" (&v->counter), "bd" (~mask)
-		: "cc");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	int temp;
-
-	asm volatile (
-		"1:	LNKGETD %0, [%1]\n"
-		"	OR	%0, %0, %2\n"
-		"	LNKSETD	[%1], %0\n"
-		"	DEFR	%0, TXSTAT\n"
-		"	ANDT	%0, %0, #HI(0x3f000000)\n"
-		"	CMPT	%0, #HI(0x02000000)\n"
-		"	BNZ	1b\n"
-		: "=&d" (temp)
-		: "da" (&v->counter), "bd" (mask)
-		: "cc");
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int result, temp;
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f5d5898..f8efe38 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -68,31 +68,14 @@
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	__global_lock1(flags);
-	fence();
-	v->counter &= ~mask;
-	__global_unlock1(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	__global_lock1(flags);
-	fence();
-	v->counter |= mask;
-	__global_unlock1(flags);
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int ret;
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5a696e5..172b7e5 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -90,12 +90,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index bf4dec2..c89da63 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,8 +61,7 @@
 	/* FIXME this part of code is untested */
 	for_each_sg(sgl, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
-		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
-							sg->length, direction);
+		__dma_sync(sg_phys(sg), sg->length, direction);
 	}
 
 	return nents;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4ab9a79..752acca 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,8 +1,10 @@
 config MIPS
 	bool
 	default y
+	select ARCH_SUPPORTS_UPROBES
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
+	select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_IDE
@@ -13,7 +15,6 @@
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_BPF_JIT if !CPU_MICROMIPS
-	select ARCH_HAVE_CUSTOM_GPIO_H
 	select HAVE_FUNCTION_TRACER
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
@@ -409,6 +410,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
 	select DMA_MAYBE_COHERENT
 	select GENERIC_ISA_DMA
 	select HAVE_PCSPKR_PLATFORM
@@ -459,6 +461,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select DMA_NONCOHERENT
@@ -899,6 +902,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select ARCH_PHYS_ADDR_T_64BIT
+	select ARCH_REQUIRE_GPIOLIB
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_HIGHMEM
@@ -948,6 +952,7 @@
 source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
+source "arch/mips/pistachio/Kconfig"
 source "arch/mips/pmcs-msp71xx/Kconfig"
 source "arch/mips/ralink/Kconfig"
 source "arch/mips/sgi-ip27/Kconfig"
@@ -1041,6 +1046,9 @@
 config ARCH_DMA_ADDR_T_64BIT
 	def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT
 
+config ARCH_SUPPORTS_UPROBES
+	bool
+
 config DMA_MAYBE_COHERENT
 	select DMA_NONCOHERENT
 	bool
@@ -1364,7 +1372,7 @@
 	  otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
 config CPU_MIPS32_R6
-	bool "MIPS32 Release 6 (EXPERIMENTAL)"
+	bool "MIPS32 Release 6"
 	depends on SYS_HAS_CPU_MIPS32_R6
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
@@ -1415,7 +1423,7 @@
 	  otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
 config CPU_MIPS64_R6
-	bool "MIPS64 Release 6 (EXPERIMENTAL)"
+	bool "MIPS64 Release 6"
 	depends on SYS_HAS_CPU_MIPS64_R6
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
@@ -1965,6 +1973,7 @@
 	select TRAD_SIGNALS
 	help
 	  Select this option if you want to build a 32-bit kernel.
+
 config 64BIT
 	bool "64-bit kernel"
 	depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
@@ -2110,7 +2119,7 @@
 
 config MIPS_MT_SMP
 	bool "MIPS MT SMP support (1 TC on each available VPE)"
-	depends on SYS_SUPPORTS_MULTITHREADING
+	depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select SYNC_R4K
@@ -2211,7 +2220,7 @@
 
 config MIPS_CMP
 	bool "MIPS CMP framework support (DEPRECATED)"
-	depends on SYS_SUPPORTS_MIPS_CMP
+	depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
 	select MIPS_GIC_IPI
 	select SMP
 	select SYNC_R4K
@@ -2228,7 +2237,7 @@
 
 config MIPS_CPS
 	bool "MIPS Coherent Processing System support"
-	depends on SYS_SUPPORTS_MIPS_CPS
+	depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
 	select MIPS_CM
 	select MIPS_CPC
 	select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2303,7 +2312,7 @@
 endchoice
 
 config CPU_HAS_MSA
-	bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
+	bool "Support for the MIPS SIMD Architecture"
 	depends on CPU_SUPPORTS_MSA
 	depends on 64BIT || MIPS_O32_FP64_SUPPORT
 	help
@@ -2643,7 +2652,7 @@
 	  If unsure, say Y. Only embedded should say N here.
 
 config MIPS_O32_FP64_SUPPORT
-	bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
+	bool "Support for O32 binaries using 64-bit FP"
 	depends on 32BIT || MIPS32_O32
 	help
 	  When this is enabled, the kernel will support use of 64-bit floating
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 3a2b775..e250524 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -87,15 +87,6 @@
 	  Select compile flags that produce code that can be processed by the
 	  Corelis mksym utility and UDB Emulator.
 
-config RUNTIME_DEBUG
-	bool "Enable run-time debugging"
-	depends on DEBUG_KERNEL
-	help
-	  If you say Y here, some debugging macros will do run-time checking.
-	  If you say N here, those macros will mostly turn to no-ops.  See
-	  arch/mips/include/asm/debug.h for debugging macros.
-	  If unsure, say N.
-
 config DEBUG_ZBOOT
 	bool "Enable compressed kernel support debugging"
 	depends on DEBUG_KERNEL && SYS_SUPPORTS_ZBOOT
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index b962898..7fa2488 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -6,13 +6,6 @@
 config ALCHEMY_GPIOINT_AU1300
 	bool
 
-# select this in your board config if you don't want to use the gpio
-# namespace as documented in the manuals.  In this case however you need
-# to create the necessary gpio_* functions in your board code/headers!
-# see arch/mips/include/asm/mach-au1x00/gpio.h   for more information.
-config ALCHEMY_GPIO_INDIRECT
-	def_bool n
-
 choice
 	prompt "Machine type"
 	depends on MIPS_ALCHEMY
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index acf9a2a..79efe4c 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -34,6 +34,7 @@
 #include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <prom.h>
 
 const char *get_system_type(void)
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 1e3b102..85bb756 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -32,6 +32,7 @@
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
 #include <prom.h>
 
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
index f64744f..23800b8 100644
--- a/arch/mips/alchemy/common/Makefile
+++ b/arch/mips/alchemy/common/Makefile
@@ -5,10 +5,5 @@
 # Makefile for the Alchemy Au1xx0 CPUs, generic files.
 #
 
-obj-y += prom.o time.o clock.o platform.o power.o \
+obj-y += prom.o time.o clock.o platform.o power.o gpiolib.o \
 	 setup.o sleeper.o dma.o dbdma.o vss.o irq.o usb.o
-
-# optional gpiolib support
-ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),)
- obj-$(CONFIG_GPIOLIB) += gpiolib.o
-endif
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 6cb60ab..4c496c5 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -491,7 +491,7 @@
 	default:
 		ret = -EINVAL;
 	}
-	__irq_set_chip_handler_name_locked(d->irq, chip, handler, name);
+	irq_set_chip_handler_name_locked(d, chip, handler, name);
 
 	wmb();
 
@@ -703,7 +703,7 @@
 		return -EINVAL;
 	}
 
-	__irq_set_chip_handler_name_locked(d->irq, &au1300_gpic, hdl, name);
+	irq_set_chip_handler_name_locked(d, &au1300_gpic, hdl, name);
 
 	au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
 
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 50e17e1..f99d3ec 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -69,11 +69,6 @@
 	return 0;
 }
 
-static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *cd)
-{
-}
-
 static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
 {
 	struct clock_event_device *cd = dev_id;
@@ -86,7 +81,6 @@
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.rating		= 1500,
 	.set_next_event = au1x_rtcmatch2_set_next_event,
-	.set_mode	= au1x_rtcmatch2_set_mode,
 	.cpumask	= cpu_all_mask,
 };
 
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index c98c9ea..324ad72 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/irq.h>
@@ -88,10 +89,11 @@
 static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
 {
 	unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+	struct irq_chip *chip = irq_desc_get_chip(d);
 
-	disable_irq_nosync(irq);
+	chained_irq_enter(chip, d);
 	generic_handle_irq(bcsr_csc_base + __ffs(bisr));
-	enable_irq(irq);
+	chained_irq_exit(chip, d);
 }
 
 static void bcsr_irq_mask(struct irq_data *d)
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 001102e..bdeed9d 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -33,6 +33,7 @@
 #include <linux/spi/spi_gpio.h>
 #include <linux/spi/ads7846.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1000_dma.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-db1x00/bcsr.h>
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 1c64fdb..b580770 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -24,6 +24,7 @@
 #include <linux/wm97xx.h>
 
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-au1x00/au1200fb.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 0fd5177..5740bcf 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -20,6 +20,7 @@
 #include <linux/spi/flash.h>
 #include <asm/bootinfo.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index bfeb8f3..93024dc 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -9,7 +9,7 @@
 #include <linux/suspend.h>
 #include <linux/sysfs.h>
 #include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/gpio.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-db1x00/bcsr.h>
 
 /*
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index d8dbd8f..f493045 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -21,7 +21,10 @@
 #include <linux/module.h>
 #include <linux/gpio.h>
 
-#include <asm/mach-ar7/gpio.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define AR7_GPIO_MAX 32
+#define TITAN_GPIO_MAX 51
 
 struct ar7_gpio_chip {
 	void __iomem		*regs;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index be9ff16..58fca9a 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -39,7 +39,6 @@
 
 #include <asm/addrspace.h>
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/gpio.h>
 #include <asm/mach-ar7/prom.h>
 
 /*****************************************************************************
@@ -679,7 +678,8 @@
 	}
 
 	if (ar7_has_high_cpmac()) {
-		res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status);
+		res = fixed_phy_add(PHY_POLL, cpmac_high.id,
+				    &fixed_phy_status, -1);
 		if (!res) {
 			cpmac_get_mac(1, cpmac_high_data.dev_addr);
 
@@ -692,7 +692,7 @@
 	} else
 		cpmac_low_data.phy_mask = 0xffffffff;
 
-	res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
+	res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status, -1);
 	if (!res) {
 		cpmac_get_mac(0, cpmac_low_data.dev_addr);
 		res = platform_device_register(&cpmac_low);
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
index 820b7a3..7bb9a67 100644
--- a/arch/mips/ar7/setup.c
+++ b/arch/mips/ar7/setup.c
@@ -23,7 +23,6 @@
 #include <asm/reboot.h>
 #include <asm/mach-ar7/ar7.h>
 #include <asm/mach-ar7/prom.h>
-#include <asm/mach-ar7/gpio.h>
 
 static void ar7_machine_restart(char *command)
 {
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 5c9ff69..fcc382c 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -8,7 +8,7 @@
 # under the terms of the GNU General Public License version 2 as published
 # by the Free Software Foundation.
 
-obj-y	:= prom.o setup.o irq.o common.o clock.o gpio.o
+obj-y	:= prom.o setup.o irq.o common.o clock.o
 
 obj-$(CONFIG_EARLY_PRINTK)		+= early_printk.o
 obj-$(CONFIG_PCI)			+= pci.o
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
index e5ea712..ca7cc19 100644
--- a/arch/mips/ath79/common.h
+++ b/arch/mips/ath79/common.h
@@ -25,9 +25,6 @@
 void ath79_ddr_ctrl_init(void);
 void ath79_ddr_wb_flush(unsigned int reg);
 
-void ath79_gpio_function_enable(u32 mask);
-void ath79_gpio_function_disable(u32 mask);
-void ath79_gpio_function_setup(u32 set, u32 clear);
 void ath79_gpio_init(void);
 
 #endif /* __ATH79_COMMON_H */
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
deleted file mode 100644
index f59ccb2..0000000
--- a/arch/mips/ath79/gpio.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X/AR913X GPIO API support
- *
- *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
- *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/gpio-ath79.h>
-#include <linux/of_device.h>
-
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ath79.h>
-#include "common.h"
-
-static void __iomem *ath79_gpio_base;
-static u32 ath79_gpio_count;
-static DEFINE_SPINLOCK(ath79_gpio_lock);
-
-static void __ath79_gpio_set_value(unsigned gpio, int value)
-{
-	void __iomem *base = ath79_gpio_base;
-
-	if (value)
-		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
-}
-
-static int __ath79_gpio_get_value(unsigned gpio)
-{
-	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
-}
-
-static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
-	return __ath79_gpio_get_value(offset);
-}
-
-static void ath79_gpio_set_value(struct gpio_chip *chip,
-				  unsigned offset, int value)
-{
-	__ath79_gpio_set_value(offset, value);
-}
-
-static int ath79_gpio_direction_input(struct gpio_chip *chip,
-				       unsigned offset)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ath79_gpio_direction_output(struct gpio_chip *chip,
-					unsigned offset, int value)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	if (value)
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-					int value)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	if (value)
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static struct gpio_chip ath79_gpio_chip = {
-	.label			= "ath79",
-	.get			= ath79_gpio_get_value,
-	.set			= ath79_gpio_set_value,
-	.direction_input	= ath79_gpio_direction_input,
-	.direction_output	= ath79_gpio_direction_output,
-	.base			= 0,
-};
-
-static void __iomem *ath79_gpio_get_function_reg(void)
-{
-	u32 reg = 0;
-
-	if (soc_is_ar71xx() ||
-	    soc_is_ar724x() ||
-	    soc_is_ar913x() ||
-	    soc_is_ar933x())
-		reg = AR71XX_GPIO_REG_FUNC;
-	else if (soc_is_ar934x())
-		reg = AR934X_GPIO_REG_FUNC;
-	else
-		BUG();
-
-	return ath79_gpio_base + reg;
-}
-
-void ath79_gpio_function_setup(u32 set, u32 clear)
-{
-	void __iomem *reg = ath79_gpio_get_function_reg();
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel((__raw_readl(reg) & ~clear) | set, reg);
-	/* flush write */
-	__raw_readl(reg);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-}
-
-void ath79_gpio_function_enable(u32 mask)
-{
-	ath79_gpio_function_setup(mask, 0);
-}
-
-void ath79_gpio_function_disable(u32 mask)
-{
-	ath79_gpio_function_setup(0, mask);
-}
-
-static const struct of_device_id ath79_gpio_of_match[] = {
-	{ .compatible = "qca,ar7100-gpio" },
-	{ .compatible = "qca,ar9340-gpio" },
-	{},
-};
-
-static int ath79_gpio_probe(struct platform_device *pdev)
-{
-	struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
-	struct device_node *np = pdev->dev.of_node;
-	struct resource *res;
-	bool oe_inverted;
-	int err;
-
-	if (np) {
-		err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
-		if (err) {
-			dev_err(&pdev->dev, "ngpios property is not valid\n");
-			return err;
-		}
-		if (ath79_gpio_count >= 32) {
-			dev_err(&pdev->dev, "ngpios must be less than 32\n");
-			return -EINVAL;
-		}
-		oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
-	} else if (pdata) {
-		ath79_gpio_count = pdata->ngpios;
-		oe_inverted = pdata->oe_inverted;
-	} else {
-		dev_err(&pdev->dev, "No DT node or platform data found\n");
-		return -EINVAL;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ath79_gpio_base = devm_ioremap_nocache(
-		&pdev->dev, res->start, resource_size(res));
-	if (!ath79_gpio_base)
-		return -ENOMEM;
-
-	ath79_gpio_chip.dev = &pdev->dev;
-	ath79_gpio_chip.ngpio = ath79_gpio_count;
-	if (oe_inverted) {
-		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
-		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
-	}
-
-	err = gpiochip_add(&ath79_gpio_chip);
-	if (err) {
-		dev_err(&pdev->dev,
-			"cannot add AR71xx GPIO chip, error=%d", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static struct platform_driver ath79_gpio_driver = {
-	.driver = {
-		.name = "ath79-gpio",
-		.of_match_table	= ath79_gpio_of_match,
-	},
-	.probe = ath79_gpio_probe,
-};
-
-module_platform_driver(ath79_gpio_driver);
-
-int gpio_get_value(unsigned gpio)
-{
-	if (gpio < ath79_gpio_count)
-		return __ath79_gpio_get_value(gpio);
-
-	return __gpio_get_value(gpio);
-}
-EXPORT_SYMBOL(gpio_get_value);
-
-void gpio_set_value(unsigned gpio, int value)
-{
-	if (gpio < ath79_gpio_count)
-		__ath79_gpio_set_value(gpio, value);
-	else
-		__gpio_set_value(gpio, value);
-}
-EXPORT_SYMBOL(gpio_set_value);
-
-int gpio_to_irq(unsigned gpio)
-{
-	/* FIXME */
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	/* FIXME */
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 2021be2..807132b 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -123,8 +123,6 @@
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
 
 	if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
@@ -136,8 +134,6 @@
 	} else {
 		spurious_interrupt();
 	}
-
-	enable_irq(irq);
 }
 
 static void ar934x_ip2_irq_init(void)
@@ -156,14 +152,12 @@
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
 	status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
 
 	if (status == 0) {
 		spurious_interrupt();
-		goto enable;
+		return;
 	}
 
 	if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
@@ -175,17 +169,12 @@
 		/* TODO: flush DDR? */
 		generic_handle_irq(ATH79_IP2_IRQ(1));
 	}
-
-enable:
-	enable_irq(irq);
 }
 
 static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
 	status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
 		  QCA955X_EXT_INT_USB1 |
@@ -193,7 +182,7 @@
 
 	if (status == 0) {
 		spurious_interrupt();
-		goto enable;
+		return;
 	}
 
 	if (status & QCA955X_EXT_INT_USB1) {
@@ -210,9 +199,6 @@
 		/* TODO: flush DDR? */
 		generic_handle_irq(ATH79_IP3_IRQ(2));
 	}
-
-enable:
-	enable_irq(irq);
 }
 
 static void qca955x_irq_init(void)
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 08a4abf..52caa75 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -396,10 +396,9 @@
 {
 	size_t size = nbuttons * sizeof(*buttons);
 
-	bcm47xx_button_pdata.buttons = kmalloc(size, GFP_KERNEL);
+	bcm47xx_button_pdata.buttons = kmemdup(buttons, size, GFP_KERNEL);
 	if (!bcm47xx_button_pdata.buttons)
 		return -ENOMEM;
-	memcpy(bcm47xx_button_pdata.buttons, buttons, size);
 	bcm47xx_button_pdata.nbuttons = nbuttons;
 
 	return 0;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 98c075f..17503a0 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -263,7 +263,7 @@
 	bcm47xx_leds_register();
 	bcm47xx_workarounds();
 
-	fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
+	fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status, -1);
 	return 0;
 }
 device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index e3e808a..1a47ec2 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -60,7 +60,7 @@
 	if (m)
 		enable &= cpumask_test_cpu(cpu, m);
 	else if (irqd_affinity_was_set(d))
-		enable &= cpumask_test_cpu(cpu, d->affinity);
+		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 #endif
 	return enable;
 }
@@ -365,9 +365,9 @@
 
 	irqd_set_trigger_type(d, flow_type);
 	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return IRQ_SET_MASK_OK_NOCOPY;
 }
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index dc91bde..d5bdee1 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -78,7 +78,7 @@
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
-quiet_cmd_zld = LD	$@
+quiet_cmd_zld = LD      $@
       cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
 quiet_cmd_strip = STRIP	  $@
       cmd_strip = $(STRIP) -s $@
diff --git a/arch/mips/boot/dts/netlogic/xlp_evp.dts b/arch/mips/boot/dts/netlogic/xlp_evp.dts
index 89ad048..ec16ec2d 100644
--- a/arch/mips/boot/dts/netlogic/xlp_evp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_evp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp832-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_fvp.dts b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
index 63e62b7..4bcebe6 100644
--- a/arch/mips/boot/dts/netlogic/xlp_fvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp208-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_gvp.dts b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
index bb4ecd1..b3ccb82 100644
--- a/arch/mips/boot/dts/netlogic/xlp_gvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
@@ -69,6 +69,17 @@
 			};
 		};
 
+		gpio: xlp_gpio@114100 {
+			compatible = "netlogic,xlp980-gpio";
+			reg = <0 0x114100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_rvp.dts b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
index 7188aed..3783639a 100644
--- a/arch/mips/boot/dts/netlogic/xlp_rvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
@@ -69,6 +69,17 @@
 			};
 		};
 
+		gpio: xlp_gpio@114100 {
+			compatible = "netlogic,xlp532-gpio";
+			reg = <0 0x114100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_svp.dts b/arch/mips/boot/dts/netlogic/xlp_svp.dts
index 1ebd00ed..44d6640 100644
--- a/arch/mips/boot/dts/netlogic/xlp_svp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_svp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp316-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 9eb0fee..36e30d6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -195,6 +195,12 @@
 			return 8;
 		else
 			return -1;
+	case CVMX_BOARD_TYPE_KONTRON_S1901:
+		if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+			return 1;
+		else
+			return -1;
+
 	}
 
 	/* Some unknown board. Somebody forgot to update this function... */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index 453d7f6..b45b297 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -95,9 +95,9 @@
 	uint8_t *data_address;
 	uint8_t *end_of_data;
 
-	cvmx_dprintf("Packet Length:   %u\n", work->len);
-	cvmx_dprintf("	  Input Port:  %u\n", work->ipprt);
-	cvmx_dprintf("	  QoS:	       %u\n", work->qos);
+	cvmx_dprintf("Packet Length:   %u\n", work->word1.len);
+	cvmx_dprintf("	  Input Port:  %u\n", cvmx_wqe_get_port(work));
+	cvmx_dprintf("	  QoS:	       %u\n", cvmx_wqe_get_qos(work));
 	cvmx_dprintf("	  Buffers:     %u\n", work->word2.s.bufs);
 
 	if (work->word2.s.bufs == 0) {
@@ -127,7 +127,7 @@
 		}
 	} else
 		buffer_ptr = work->packet_ptr;
-	remaining_bytes = work->len;
+	remaining_bytes = work->word1.len;
 
 	while (remaining_bytes) {
 		start_of_buffer =
@@ -382,6 +382,10 @@
 		return port + 32;
 	case 3:
 		return port + 36;
+	case 4:
+		return port + 40;
+	case 5:
+		return port + 44;
 	}
 	return -1;
 }
@@ -404,6 +408,10 @@
 		return 2;
 	else if (ipd_port < 40)
 		return 3;
+	else if (ipd_port < 44)
+		return 4;
+	else if (ipd_port < 48)
+		return 5;
 	else
 		cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
 			     "port number\n");
@@ -428,6 +436,10 @@
 		return ipd_port & 3;
 	else if (ipd_port < 40)
 		return ipd_port & 3;
+	else if (ipd_port < 44)
+		return ipd_port & 3;
+	else if (ipd_port < 48)
+		return ipd_port & 3;
 	else
 		cvmx_dprintf("cvmx_helper_get_interface_index_num: "
 			     "Illegal IPD port number\n");
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 7653b7e..a56ee59 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -124,6 +124,13 @@
 	union cvmx_gmxx_tx_int_en gmx_tx_int_en;
 	union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
 
+	/* Setup PKND */
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+		gmx_cfg.s.pknd = cvmx_helper_get_ipd_port(interface, 0);
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+	}
+
 	/* (1) Interface has already been enabled. */
 
 	/* (2) Disable GMX. */
@@ -151,7 +158,12 @@
 	/* (4)c Aply reset sequence */
 	xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
 	xauiCtl.s.lo_pwr = 0;
-	xauiCtl.s.reset = 1;
+
+	/* Issuing a reset here seems to hang some CN68XX chips. */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
+		xauiCtl.s.reset = 1;
+
 	cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
 
 	/* Wait for PCS to come out of reset */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 7e5cf7a..376701f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -83,6 +83,8 @@
  */
 int cvmx_helper_get_number_of_interfaces(void)
 {
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return 9;
 	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
 		return 4;
 	else
@@ -656,6 +658,21 @@
 	fau_to.s.tout_val = 0xfff;
 	fau_to.s.tout_enb = 0;
 	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_pko_reg_min_pkt min_pkt;
+
+		min_pkt.u64 = 0;
+		min_pkt.s.size1 = 59;
+		min_pkt.s.size2 = 59;
+		min_pkt.s.size3 = 59;
+		min_pkt.s.size4 = 59;
+		min_pkt.s.size5 = 59;
+		min_pkt.s.size6 = 59;
+		min_pkt.s.size7 = 59;
+		cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+	}
+
 	return 0;
 }
 
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 008b881..87be167 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -39,6 +39,143 @@
  * Internal state of packet output
  */
 
+static int __cvmx_pko_int(int interface, int index)
+{
+	switch (interface) {
+	case 0:
+		return index;
+	case 1:
+		return 4;
+	case 2:
+		return index + 0x08;
+	case 3:
+		return index + 0x0c;
+	case 4:
+		return index + 0x10;
+	case 5:
+		return 0x1c;
+	case 6:
+		return 0x1d;
+	case 7:
+		return 0x1e;
+	case 8:
+		return 0x1f;
+	default:
+		return -1;
+	}
+}
+
+static void __cvmx_pko_iport_config(int pko_port)
+{
+	int queue;
+	const int num_queues = 1;
+	const int base_queue = pko_port;
+	const int static_priority_end = 1;
+	const int static_priority_base = 1;
+
+	for (queue = 0; queue < num_queues; queue++) {
+		union cvmx_pko_mem_iqueue_ptrs config;
+		cvmx_cmd_queue_result_t cmd_res;
+		uint64_t *buf_ptr;
+
+		config.u64		= 0;
+		config.s.index		= queue;
+		config.s.qid		= base_queue + queue;
+		config.s.ipid		= pko_port;
+		config.s.tail		= (queue == (num_queues - 1));
+		config.s.s_tail		= (queue == static_priority_end);
+		config.s.static_p	= (static_priority_base >= 0);
+		config.s.static_q	= (queue <= static_priority_end);
+		config.s.qos_mask	= 0xff;
+
+		cmd_res = cvmx_cmd_queue_initialize(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue),
+				CVMX_PKO_MAX_QUEUE_DEPTH,
+				CVMX_FPA_OUTPUT_BUFFER_POOL,
+				(CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+				 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+		WARN(cmd_res,
+		     "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
+			__func__, (int)cmd_res, pko_port, base_queue,
+			num_queues, queue);
+
+		buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue));
+		config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+		CVMX_SYNCWS;
+		cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_queue_alloc_o68(void)
+{
+	int port;
+
+	for (port = 0; port < 48; port++)
+		__cvmx_pko_iport_config(port);
+}
+
+static void __cvmx_pko_port_map_o68(void)
+{
+	int port;
+	int interface, index;
+	cvmx_helper_interface_mode_t mode;
+	union cvmx_pko_mem_iport_ptrs config;
+
+	/*
+	 * Initialize every iport with the invalid eid.
+	 */
+	config.u64 = 0;
+	config.s.eid = 31; /* Invalid */
+	for (port = 0; port < 128; port++) {
+		config.s.ipid = port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+
+	/*
+	 * Set up PKO_MEM_IPORT_PTRS
+	 */
+	for (port = 0; port < 48; port++) {
+		interface = cvmx_helper_get_interface_num(port);
+		index = cvmx_helper_get_interface_index_num(port);
+		mode = cvmx_helper_interface_get_mode(interface);
+		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+			continue;
+
+		config.s.ipid = port;
+		config.s.qos_mask = 0xff;
+		config.s.crc = 1;
+		config.s.min_pkt = 1;
+		config.s.intr = __cvmx_pko_int(interface, index);
+		config.s.eid = config.s.intr;
+		config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+			index : port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_chip_init(void)
+{
+	int i;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		__cvmx_pko_port_map_o68();
+		__cvmx_pko_queue_alloc_o68();
+		return;
+	}
+
+	/*
+	 * Initialize queues
+	 */
+	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
+		const uint64_t priority = 8;
+
+		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+				     &priority);
+	}
+}
+
 /**
  * Call before any other calls to initialize the packet
  * output system.  This does chip global config, and should only be
@@ -47,8 +184,6 @@
 
 void cvmx_pko_initialize_global(void)
 {
-	int i;
-	uint64_t priority = 8;
 	union cvmx_pko_reg_cmd_buf config;
 
 	/*
@@ -62,9 +197,10 @@
 
 	cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
 
-	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
-		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
-				     &priority);
+	/*
+	 * Chip-specific setup.
+	 */
+	__cvmx_pko_chip_init();
 
 	/*
 	 * If we aren't using all of the queues optimize PKO's
@@ -212,6 +348,9 @@
 	int static_priority_base = -1;
 	int static_priority_end = -1;
 
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return CVMX_PKO_SUCCESS;
+
 	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
 	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
 		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index d8124a3..f26c3c6 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -225,13 +225,14 @@
 
 #ifdef CONFIG_SMP
 	int cpu;
-	int weight = cpumask_weight(data->affinity);
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
+	int weight = cpumask_weight(mask);
 	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
 	if (weight > 1) {
 		cpu = cd->current_cpu;
 		for (;;) {
-			cpu = cpumask_next(cpu, data->affinity);
+			cpu = cpumask_next(cpu, mask);
 			if (cpu >= nr_cpu_ids) {
 				cpu = -1;
 				continue;
@@ -240,7 +241,7 @@
 			}
 		}
 	} else if (weight == 1) {
-		cpu = cpumask_first(data->affinity);
+		cpu = cpumask_first(mask);
 	} else {
 		cpu = smp_processor_id();
 	}
@@ -662,6 +663,11 @@
 	irqd_set_trigger_type(data, t);
 	octeon_irq_gpio_setup(data);
 
+	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(data, handle_edge_irq);
+	else
+		irq_set_handler_locked(data, handle_level_irq);
+
 	return IRQ_SET_MASK_OK;
 }
 
@@ -696,32 +702,23 @@
 	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_data *data = irq_desc_get_irq_data(desc);
-
-	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
-		handle_edge_irq(irq, desc);
-	else
-		handle_level_irq(irq, desc);
-}
-
 #ifdef CONFIG_SMP
 
 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
 {
 	int cpu = smp_processor_id();
 	cpumask_t new_affinity;
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
 
-	if (!cpumask_test_cpu(cpu, data->affinity))
+	if (!cpumask_test_cpu(cpu, mask))
 		return;
 
-	if (cpumask_weight(data->affinity) > 1) {
+	if (cpumask_weight(mask) > 1) {
 		/*
 		 * It has multi CPU affinity, just remove this CPU
 		 * from the affinity set.
 		 */
-		cpumask_copy(&new_affinity, data->affinity);
+		cpumask_copy(&new_affinity, mask);
 		cpumask_clear_cpu(cpu, &new_affinity);
 	} else {
 		/* Otherwise, put it on lowest numbered online CPU. */
@@ -1227,8 +1224,13 @@
 		octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
+	/*
+	 * Default to handle_level_irq. If the DT contains a different
+	 * trigger type, it will call the irq_set_type callback and
+	 * the handler gets updated.
+	 */
 	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-		octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+				       octeon_irq_gpio_chip, handle_level_irq);
 	return r;
 }
 
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 1f85460..40ec4ca 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -16,6 +16,5 @@
 generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
-generic-y += ucontext.h
 generic-y += user.h
 generic-y += xor.h
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 7186bb5..37f8407 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -20,6 +20,10 @@
 				     struct pt_regs *regs, sigset_t *set);
 	const unsigned long	rt_signal_return_offset;
 	const unsigned long	restart;
+
+	unsigned	off_sc_fpregs;
+	unsigned	off_sc_fpc_csr;
+	unsigned	off_sc_used_math;
 };
 
 #endif /* _ASM_ABI_H */
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 76317a7..867f924 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -232,6 +232,30 @@
 	.set	pop
 	.endm
 
+	.macro	ld_b	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.b	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	ld_h	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.h	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	ld_w	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.w	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
 	.macro	ld_d	wd, off, base
 	.set	push
 	.set	mips32r2
@@ -241,6 +265,30 @@
 	.set	pop
 	.endm
 
+	.macro	st_b	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.b	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	st_h	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.h	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	st_w	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.w	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
 	.macro	st_d	wd, off, base
 	.set	push
 	.set	mips32r2
@@ -290,7 +338,13 @@
 #ifdef CONFIG_CPU_MICROMIPS
 #define CFC_MSA_INSN		0x587e0056
 #define CTC_MSA_INSN		0x583e0816
+#define LDB_MSA_INSN		0x58000807
+#define LDH_MSA_INSN		0x58000817
+#define LDW_MSA_INSN		0x58000827
 #define LDD_MSA_INSN		0x58000837
+#define STB_MSA_INSN		0x5800080f
+#define STH_MSA_INSN		0x5800081f
+#define STW_MSA_INSN		0x5800082f
 #define STD_MSA_INSN		0x5800083f
 #define COPY_UW_MSA_INSN	0x58f00056
 #define COPY_UD_MSA_INSN	0x58f80056
@@ -299,7 +353,13 @@
 #else
 #define CFC_MSA_INSN		0x787e0059
 #define CTC_MSA_INSN		0x783e0819
+#define LDB_MSA_INSN		0x78000820
+#define LDH_MSA_INSN		0x78000821
+#define LDW_MSA_INSN		0x78000822
 #define LDD_MSA_INSN		0x78000823
+#define STB_MSA_INSN		0x78000824
+#define STH_MSA_INSN		0x78000825
+#define STW_MSA_INSN		0x78000826
 #define STD_MSA_INSN		0x78000827
 #define COPY_UW_MSA_INSN	0x78f00059
 #define COPY_UD_MSA_INSN	0x78f80059
@@ -329,6 +389,33 @@
 	.set	pop
 	.endm
 
+	.macro	ld_b	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDB_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	ld_h	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDH_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	ld_w	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDW_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
 	.macro	ld_d	wd, off, base
 	.set	push
 	.set	noat
@@ -338,6 +425,33 @@
 	.set	pop
 	.endm
 
+	.macro	st_b	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STB_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	st_h	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STH_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	st_w	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STW_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
 	.macro	st_d	wd, off, base
 	.set	push
 	.set	noat
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 26d4363..4c42fd9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -137,6 +137,10 @@
 ATOMIC_OPS(add, +=, addu)
 ATOMIC_OPS(sub, -=, subu)
 
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -416,6 +420,9 @@
 
 ATOMIC64_OPS(add, +=, daddu)
 ATOMIC64_OPS(sub, -=, dsubu)
+ATOMIC64_OP(and, &=, and)
+ATOMIC64_OP(or, |=, or)
+ATOMIC64_OP(xor, ^=, xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 7ecba84..752e0b8 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -133,12 +133,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/mips/include/asm/cdmm.h b/arch/mips/include/asm/cdmm.h
index 16e22ce..bece206 100644
--- a/arch/mips/include/asm/cdmm.h
+++ b/arch/mips/include/asm/cdmm.h
@@ -53,7 +53,7 @@
  * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
  *
  * Picking a suitable physical address at which to map the CDMM region is
- * platform specific, so this weak function can be defined by platform code to
+ * platform specific, so this function can be defined by platform code to
  * pick a suitable value if none is configured by the bootloader.
  *
  * This address must be 32kB aligned, and the region occupies a maximum of 32kB
@@ -61,7 +61,7 @@
  *
  * Returns:	Physical base address for CDMM region, or 0 on failure.
  */
-phys_addr_t __weak mips_cdmm_phys_base(void);
+phys_addr_t mips_cdmm_phys_base(void);
 
 extern struct bus_type mips_cdmm_bustype;
 void __iomem *mips_cdmm_early_probe(unsigned int dev_type);
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index f0edf6f..2e13a03 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -21,7 +21,6 @@
 
 void mips_event_handler(struct clock_event_device *dev);
 int c0_compare_int_usable(void);
-void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
 irqreturn_t c0_compare_interrupt(int, void *);
 
 extern struct irqaction c0_compare_irqaction;
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index f25de77..9801ac9 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -411,4 +411,8 @@
 # define cpu_has_cdmm		(cpu_data[0].options & MIPS_CPU_CDMM)
 #endif
 
+#ifndef cpu_has_small_pages
+# define cpu_has_small_pages	(cpu_data[0].options & MIPS_CPU_SP)
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index d41e8e2..abee2bf 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -77,6 +77,10 @@
 	 */
 #endif
 
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
+	case CPU_I6400:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_R3000
 	case CPU_R2000:
 	case CPU_R3000:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index e46e406..cd89e98 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -120,6 +120,7 @@
 #define PRID_IMP_PROAPTIV_MP	0xa300
 #define PRID_IMP_M5150		0xa700
 #define PRID_IMP_P5600		0xa800
+#define PRID_IMP_I6400		0xa900
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -307,6 +308,7 @@
 	CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
 	CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
 	CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
+	CPU_I6400,
 
 	/*
 	 * MIPS64 class processors
@@ -382,6 +384,7 @@
 #define MIPS_CPU_XPA		0x2000000000ull /* CPU supports Extended Physical Addressing */
 #define MIPS_CPU_CDMM		0x4000000000ull	/* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST	0x8000000000ull /* R12K+ Branch Prediction Global History */
+#define MIPS_CPU_SP		0x10000000000ull /* Small (1KB) page support */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/debug.h b/arch/mips/include/asm/debug.h
deleted file mode 100644
index 1fd5a2b..0000000
--- a/arch/mips/include/asm/debug.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Debug macros for run-time debugging.
- * Turned on/off with CONFIG_RUNTIME_DEBUG option.
- *
- * Copyright (C) 2001 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef _ASM_DEBUG_H
-#define _ASM_DEBUG_H
-
-
-/*
- * run-time macros for catching spurious errors.  Eable CONFIG_RUNTIME_DEBUG in
- * kernel hacking config menu to use them.
- *
- * Use them as run-time debugging aid.  NEVER USE THEM AS ERROR HANDLING CODE!!!
- */
-
-#ifdef CONFIG_RUNTIME_DEBUG
-
-#include <linux/kernel.h>
-
-#define db_assert(x)  if (!(x)) { \
-	panic("assertion failed at %s:%d: %s", __FILE__, __LINE__, #x); }
-#define db_warn(x)  if (!(x)) { \
-	printk(KERN_WARNING "warning at %s:%d: %s", __FILE__, __LINE__, #x); }
-#define db_verify(x, y) db_assert(x y)
-#define db_verify_warn(x, y) db_warn(x y)
-#define db_run(x)  do { x; } while (0)
-
-#else
-
-#define db_assert(x)
-#define db_warn(x)
-#define db_verify(x, y) x
-#define db_verify_warn(x, y) x
-#define db_run(x)
-
-#endif
-
-#endif /* _ASM_DEBUG_H */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index f19e890..53b2693 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -382,7 +382,9 @@
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
 
-#define ELF_HWCAP	(0)
+#define ELF_HWCAP	(elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
 
 /*
  * This yields a string that ld.so will use to load implementation
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 1b06251..9cbf383 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -164,25 +164,30 @@
 	return ret;
 }
 
-static inline void lose_fpu(int save)
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
 {
-	preempt_disable();
 	if (is_msa_enabled()) {
 		if (save) {
-			save_msa(current);
-			current->thread.fpu.fcr31 =
+			save_msa(tsk);
+			tsk->thread.fpu.fcr31 =
 					read_32bit_cp1_register(CP1_STATUS);
 		}
 		disable_msa();
-		clear_thread_flag(TIF_USEDMSA);
+		clear_tsk_thread_flag(tsk, TIF_USEDMSA);
 		__disable_fpu();
 	} else if (is_fpu_owner()) {
 		if (save)
-			_save_fp(current);
+			_save_fp(tsk);
 		__disable_fpu();
 	}
-	KSTK_STATUS(current) &= ~ST0_CU1;
-	clear_thread_flag(TIF_USEDFPU);
+	KSTK_STATUS(tsk) &= ~ST0_CU1;
+	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+}
+
+static inline void lose_fpu(int save)
+{
+	preempt_disable();
+	lose_fpu_inatomic(save, current);
 	preempt_enable();
 }
 
diff --git a/arch/mips/include/asm/gpio.h b/arch/mips/include/asm/gpio.h
deleted file mode 100644
index 06e46fa..0000000
--- a/arch/mips/include/asm/gpio.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_MIPS_GPIO_H
-#define __ASM_MIPS_GPIO_H
-
-#include <gpio.h>
-
-#endif /* __ASM_MIPS_GPIO_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index f0db99f..15e0fec 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -49,7 +49,7 @@
 extern int cp0_perfcount_irq;
 extern int cp0_fdc_irq;
 
-extern int __weak get_c0_fdc_int(void);
+extern int get_c0_fdc_int(void);
 
 void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 608aa57..e776725 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -26,14 +26,29 @@
 #define NOP_INSN "nop"
 #endif
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\t" NOP_INSN "\n\t"
 		"nop\n\t"
 		".pushsection __jump_table,  \"aw\"\n\t"
 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
 		".popsection\n\t"
-		: :  "i" (key) : : l_yes);
+		: :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\tj %l[l_yes]\n\t"
+		"nop\n\t"
+		".pushsection __jump_table,  \"aw\"\n\t"
+		WORD_INSN " 1b, %l[l_yes], %0\n\t"
+		".popsection\n\t"
+		: :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index cba22ab..8e3d08e 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -11,7 +11,9 @@
 	DIE_PAGE_FAULT,
 	DIE_BREAK,
 	DIE_SSTEPBP,
-	DIE_MSAFP
+	DIE_MSAFP,
+	DIE_UPROBE,
+	DIE_UPROBE_XOL,
 };
 
 #endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/linkage.h b/arch/mips/include/asm/linkage.h
index 2767dda..99651b0 100644
--- a/arch/mips/include/asm/linkage.h
+++ b/arch/mips/include/asm/linkage.h
@@ -5,7 +5,6 @@
 #include <asm/asm.h>
 #endif
 
-#define __weak __attribute__((weak))
 #define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall")
 #define SYSCALL_ALIAS(alias, name)					\
 	asm ( #alias " = " #name "\n\t.globl " #alias)
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index 6c62b0f..b02891f 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -26,7 +26,7 @@
  *
  * Return:	The number of MAAR pairs configured.
  */
-unsigned __weak platform_maar_init(unsigned num_pairs);
+unsigned platform_maar_init(unsigned num_pairs);
 
 /**
  * write_maar_pair() - write to a pair of MAARs
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index a47ea0c..468cbd6 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -203,4 +203,8 @@
 int __init ar7_gpio_init(void);
 void __init ar7_init_clocks(void);
 
+/* Board specific GPIO functions */
+int ar7_gpio_enable(unsigned gpio);
+int ar7_gpio_disable(unsigned gpio);
+
 #endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
deleted file mode 100644
index c177cd1..0000000
--- a/arch/mips/include/asm/mach-ar7/gpio.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Florian Fainelli <florian@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef __AR7_GPIO_H__
-#define __AR7_GPIO_H__
-
-#include <asm/mach-ar7/ar7.h>
-
-#define AR7_GPIO_MAX 32
-#define TITAN_GPIO_MAX	51
-#define NR_BUILTIN_GPIO TITAN_GPIO_MAX
-
-#define gpio_to_irq(gpio)	-1
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-/* Board specific GPIO functions */
-int ar7_gpio_enable(unsigned gpio);
-int ar7_gpio_disable(unsigned gpio);
-
-#include <asm-generic/gpio.h>
-
-#endif
diff --git a/arch/mips/include/asm/mach-ath25/gpio.h b/arch/mips/include/asm/mach-ath25/gpio.h
deleted file mode 100644
index 713564b..0000000
--- a/arch/mips/include/asm/mach-ath25/gpio.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __ASM_MACH_ATH25_GPIO_H
-#define __ASM_MACH_ATH25_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-static inline int irq_to_gpio(unsigned irq)
-{
-	return -EINVAL;
-}
-
-#endif	/* __ASM_MACH_ATH25_GPIO_H */
diff --git a/arch/mips/include/asm/mach-ath79/gpio.h b/arch/mips/include/asm/mach-ath79/gpio.h
deleted file mode 100644
index 60dcb62..0000000
--- a/arch/mips/include/asm/mach-ath79/gpio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X/AR913X GPIO API definitions
- *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_MACH_ATH79_GPIO_H
-#define __ASM_MACH_ATH79_GPIO_H
-
-#define ARCH_NR_GPIOS	64
-#include <asm-generic/gpio.h>
-
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-
-#define gpio_cansleep	__gpio_cansleep
-
-#endif /* __ASM_MACH_ATH79_GPIO_H */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 9785e4e..adde1fa 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -266,6 +266,17 @@
 	return -ENXIO;
 }
 
+/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
+ * SYS_PININPUTEN is written to at least once.  On Au1550/Au1200/Au1300 this
+ * register enables use of GPIOs as wake source.
+ */
+static inline void alchemy_gpio1_input_enable(void)
+{
+	void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
+	__raw_writel(0, base + 0x110);		/* the write op is key */
+	wmb();
+}
+
 /*
  * GPIO2 block macros for common linux GPIO functions. The 'gpio'
  * parameter must be in range of ALCHEMY_GPIO2_BASE..ALCHEMY_GPIO2_MAX.
@@ -518,141 +529,4 @@
 	return -ENXIO;
 }
 
-/**********************************************************************/
-
-/* Linux gpio framework integration.
- *
- * 4 use cases of Au1000-Au1200 GPIOS:
- *(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y:
- *	Board must register gpiochips.
- *(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n:
- *	2 (1 for Au1000) gpio_chips are registered.
- *
- *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
- *	the boards' gpio.h must provide the linux gpio wrapper functions,
- *
- *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
- *	inlinable gpio functions are provided which enable access to the
- *	Au1000 gpios only by using the numbers straight out of the data-
- *	sheets.
-
- * Cases 1 and 3 are intended for boards which want to provide their own
- * GPIO namespace and -operations (i.e. for example you have 8 GPIOs
- * which are in part provided by spare Au1000 GPIO pins and in part by
- * an external FPGA but you still want them to be accssible in linux
- * as gpio0-7. The board can of course use the alchemy_gpioX_* functions
- * as required).
- */
-
-#ifndef CONFIG_GPIOLIB
-
-#ifdef CONFIG_ALCHEMY_GPIOINT_AU1000
-
-#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT	/* case (4) */
-
-static inline int gpio_direction_input(int gpio)
-{
-	return alchemy_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(int gpio, int v)
-{
-	return alchemy_gpio_direction_output(gpio, v);
-}
-
-static inline int gpio_get_value(int gpio)
-{
-	return alchemy_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(int gpio, int v)
-{
-	alchemy_gpio_set_value(gpio, v);
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
-	return gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
-	gpio_set_value(gpio, value);
-}
-
-static inline int gpio_is_valid(int gpio)
-{
-	return alchemy_gpio_is_valid(gpio);
-}
-
-static inline int gpio_cansleep(int gpio)
-{
-	return alchemy_gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(int gpio)
-{
-	return alchemy_gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(int irq)
-{
-	return alchemy_irq_to_gpio(irq);
-}
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-	return 0;
-}
-
-static inline int gpio_request_one(unsigned gpio,
-					unsigned long flags, const char *label)
-{
-	return 0;
-}
-
-static inline int gpio_request_array(struct gpio *array, size_t num)
-{
-	return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-}
-
-static inline void gpio_free_array(struct gpio *array, size_t num)
-{
-}
-
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
-				   unsigned gpio)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
-	return -ENOSYS;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
-}
-
-#endif	/* !CONFIG_ALCHEMY_GPIO_INDIRECT */
-
-#endif	/* CONFIG_ALCHEMY_GPIOINT_AU1000 */
-
-#endif	/* !CONFIG_GPIOLIB */
-
 #endif /* _ALCHEMY_GPIO_AU1000_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio.h b/arch/mips/include/asm/mach-au1x00/gpio.h
deleted file mode 100644
index 22e7ff1..0000000
--- a/arch/mips/include/asm/mach-au1x00/gpio.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Alchemy GPIO support.
- *
- * With CONFIG_GPIOLIB=y different types of on-chip GPIO can be supported within
- *  the same kernel image.
- * With CONFIG_GPIOLIB=n, your board must select ALCHEMY_GPIOINT_AU1XXX for the
- *  appropriate CPU type (AU1000 currently).
- */
-
-#ifndef _ALCHEMY_GPIO_H_
-#define _ALCHEMY_GPIO_H_
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/gpio-au1000.h>
-#include <asm/mach-au1x00/gpio-au1300.h>
-
-/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
- * SYS_PININPUTEN is written to at least once.  On Au1550/Au1200/Au1300 this
- * register enables use of GPIOs as wake source.
- */
-static inline void alchemy_gpio1_input_enable(void)
-{
-	void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
-	__raw_writel(0, base + 0x110);		/* the write op is key */
-	wmb();
-}
-
-
-/* Linux gpio framework integration.
-*
-* 4 use cases of Alchemy GPIOS:
-*(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y:
-*	Board must register gpiochips.
-*(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n:
-*	A gpiochip for the 75 GPIOs is registered.
-*
-*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
-*	the boards' gpio.h must provide	the linux gpio wrapper functions,
-*
-*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
-*	inlinable gpio functions are provided which enable access to the
-*	Au1300 gpios only by using the numbers straight out of the data-
-*	sheets.
-
-* Cases 1 and 3 are intended for boards which want to provide their own
-* GPIO namespace and -operations (i.e. for example you have 8 GPIOs
-* which are in part provided by spare Au1300 GPIO pins and in part by
-* an external FPGA but you still want them to be accssible in linux
-* as gpio0-7. The board can of course use the alchemy_gpioX_* functions
-* as required).
-*/
-
-#ifdef CONFIG_GPIOLIB
-
-/* wraps the cpu-dependent irq_to_gpio functions */
-/* FIXME: gpiolib needs an irq_to_gpio hook */
-static inline int __au_irq_to_gpio(unsigned int irq)
-{
-	switch (alchemy_get_cputype()) {
-	case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
-		return alchemy_irq_to_gpio(irq);
-	case ALCHEMY_CPU_AU1300:
-		return au1300_irq_to_gpio(irq);
-	}
-	return -EINVAL;
-}
-
-
-/* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */
-#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT	/* case (2) */
-
-/* get everything through gpiolib */
-#define gpio_to_irq	__gpio_to_irq
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#define irq_to_gpio	__au_irq_to_gpio
-
-#include <asm-generic/gpio.h>
-
-#endif	/* !CONFIG_ALCHEMY_GPIO_INDIRECT */
-
-
-#endif	/* CONFIG_GPIOLIB */
-
-#endif	/* _ALCHEMY_GPIO_H_ */
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
deleted file mode 100644
index 90daefa..0000000
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H
-#define __ASM_MIPS_MACH_BCM47XX_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-static inline int irq_to_gpio(unsigned int irq)
-{
-	return -EINVAL;
-}
-
-#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/gpio.h b/arch/mips/include/asm/mach-bcm63xx/gpio.h
deleted file mode 100644
index 1eb534d..0000000
--- a/arch/mips/include/asm/mach-bcm63xx/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_MIPS_MACH_BCM63XX_GPIO_H
-#define __ASM_MIPS_MACH_BCM63XX_GPIO_H
-
-#include <bcm63xx_gpio.h>
-
-#define gpio_to_irq(gpio)	-1
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-#include <asm-generic/gpio.h>
-
-#endif /* __ASM_MIPS_MACH_BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/gpio.h b/arch/mips/include/asm/mach-cavium-octeon/gpio.h
deleted file mode 100644
index 34e9f7a..0000000
--- a/arch/mips/include/asm/mach-cavium-octeon/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_MACH_CAVIUM_OCTEON_GPIO_H
-#define __ASM_MACH_CAVIUM_OCTEON_GPIO_H
-
-#ifdef CONFIG_GPIOLIB
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#else
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-#endif
-
-#include <asm-generic/gpio.h>
-
-#define gpio_to_irq	__gpio_to_irq
-
-#endif /* __ASM_MACH_GENERIC_GPIO_H */
diff --git a/arch/mips/include/asm/mach-generic/gpio.h b/arch/mips/include/asm/mach-generic/gpio.h
deleted file mode 100644
index b4e7020..0000000
--- a/arch/mips/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_MACH_GENERIC_GPIO_H
-#define __ASM_MACH_GENERIC_GPIO_H
-
-#ifdef CONFIG_GPIOLIB
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#else
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-#endif
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h>		/* cansleep wrappers */
-
-#endif /* __ASM_MACH_GENERIC_GPIO_H */
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index eaacba7..bf8c3e1 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -73,8 +73,6 @@
 void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask);
 uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
 
-#include <asm/mach-generic/gpio.h>
-
 #define JZ_GPIO_PORTA(x) ((x) + 32 * 0)
 #define JZ_GPIO_PORTB(x) ((x) + 32 * 1)
 #define JZ_GPIO_PORTC(x) ((x) + 32 * 2)
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
deleted file mode 100644
index 9ba1cae..0000000
--- a/arch/mips/include/asm/mach-lantiq/gpio.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
-#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
-
-#define gpio_to_irq __gpio_to_irq
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-#include <asm-generic/gpio.h>
-
-#endif
diff --git a/arch/mips/include/asm/mach-loongson64/gpio.h b/arch/mips/include/asm/mach-loongson64/gpio.h
deleted file mode 100644
index b3b2169..0000000
--- a/arch/mips/include/asm/mach-loongson64/gpio.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Loongson GPIO Support
- *
- * Copyright (c) 2008  Richard Liu, STMicroelectronics <richard.liu@st.com>
- * Copyright (c) 2008-2010  Arnaud Patard <apatard@mandriva.com>
- * Copyright (c) 2014  Huacai Chen <chenhc@lemote.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LOONGSON_GPIO_H
-#define __LOONGSON_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-
-/* The chip can do interrupt
- * but it has not been tested and doc not clear
- */
-static inline int gpio_to_irq(int gpio)
-{
-	return -EINVAL;
-}
-
-static inline int irq_to_gpio(int gpio)
-{
-	return -EINVAL;
-}
-
-#endif	/* __LOONGSON_GPIO_H */
diff --git a/arch/mips/include/asm/mach-pistachio/gpio.h b/arch/mips/include/asm/mach-pistachio/gpio.h
deleted file mode 100644
index 6c1649c..0000000
--- a/arch/mips/include/asm/mach-pistachio/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Pistachio IRQ setup
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_PISTACHIO_GPIO_H
-#define __ASM_MACH_PISTACHIO_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#define gpio_to_irq	__gpio_to_irq
-
-#endif /* __ASM_MACH_PISTACHIO_GPIO_H */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index 4dee0a3..db21121 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -13,18 +13,6 @@
 #ifndef _RC32434_GPIO_H_
 #define _RC32434_GPIO_H_
 
-#include <linux/types.h>
-#include <asm-generic/gpio.h>
-
-#define NR_BUILTIN_GPIO		32
-
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-
-#define gpio_to_irq(gpio)	(8 + 4 * 32 + gpio)
-#define irq_to_gpio(irq)	(irq - (8 + 4 * 32))
-
 struct rb532_gpio_reg {
 	u32   gpiofunc;	  /* GPIO Function Register
 			   * gpiofunc[x]==0 bit = gpio
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index edc7ee9..d75b75e 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -33,6 +33,29 @@
  */
 extern phys_addr_t __mips_cm_phys_base(void);
 
+/*
+ * mips_cm_is64 - determine CM register width
+ *
+ * The CM register width is processor and CM specific. A 64-bit processor
+ * usually has a 64-bit CM and a 32-bit one has a 32-bit CM but a 64-bit
+ * processor could come with a 32-bit CM. Moreover, accesses on 64-bit CMs
+ * can be done either using regular 64-bit load/store instructions, or 32-bit
+ * load/store instruction on 32-bit register pairs. We opt for using 64-bit
+ * accesses on 64-bit CMs and kernels and 32-bit in any other case.
+ *
+ * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
+ */
+extern int mips_cm_is64;
+
+/**
+ * mips_cm_error_report - Report CM cache errors
+ */
+#ifdef CONFIG_MIPS_CM
+extern void mips_cm_error_report(void);
+#else
+static inline void mips_cm_error_report(void) {}
+#endif
+
 /**
  * mips_cm_probe - probe for a Coherence Manager
  *
@@ -90,20 +113,46 @@
 
 /* Macros to ease the creation of register access functions */
 #define BUILD_CM_R_(name, off)					\
-static inline u32 __iomem *addr_gcr_##name(void)		\
+static inline unsigned long __iomem *addr_gcr_##name(void)	\
 {								\
-	return (u32 __iomem *)(mips_cm_base + (off));		\
+	return (unsigned long __iomem *)(mips_cm_base + (off));	\
 }								\
 								\
-static inline u32 read_gcr_##name(void)				\
+static inline u32 read32_gcr_##name(void)			\
 {								\
 	return __raw_readl(addr_gcr_##name());			\
+}								\
+								\
+static inline u64 read64_gcr_##name(void)			\
+{								\
+	return __raw_readq(addr_gcr_##name());			\
+}								\
+								\
+static inline unsigned long read_gcr_##name(void)		\
+{								\
+	if (mips_cm_is64)					\
+		return read64_gcr_##name();			\
+	else							\
+		return read32_gcr_##name();			\
 }
 
 #define BUILD_CM__W(name, off)					\
-static inline void write_gcr_##name(u32 value)			\
+static inline void write32_gcr_##name(u32 value)		\
 {								\
 	__raw_writel(value, addr_gcr_##name());			\
+}								\
+								\
+static inline void write64_gcr_##name(u64 value)		\
+{								\
+	__raw_writeq(value, addr_gcr_##name());			\
+}								\
+								\
+static inline void write_gcr_##name(unsigned long value)	\
+{								\
+	if (mips_cm_is64)					\
+		write64_gcr_##name(value);			\
+	else							\
+		write32_gcr_##name(value);			\
 }
 
 #define BUILD_CM_RW(name, off)					\
@@ -144,6 +193,7 @@
 BUILD_CM_RW(reg3_mask,		MIPS_CM_GCB_OFS + 0xc8)
 BUILD_CM_R_(gic_status,		MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,		MIPS_CM_GCB_OFS + 0xf0)
+BUILD_CM_RW(l2_config,		MIPS_CM_GCB_OFS + 0x130)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,	0x00)
@@ -189,6 +239,13 @@
 #define CM_GCR_REV_MINOR_SHF			0
 #define CM_GCR_REV_MINOR_MSK			(_ULCAST_(0xff) << 0)
 
+#define CM_ENCODE_REV(major, minor) \
+		(((major) << CM_GCR_REV_MAJOR_SHF) | \
+		 ((minor) << CM_GCR_REV_MINOR_SHF))
+
+#define CM_REV_CM2				CM_ENCODE_REV(6, 0)
+#define CM_REV_CM3				CM_ENCODE_REV(8, 0)
+
 /* GCR_ERROR_CAUSE register fields */
 #define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF		27
 #define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK		(_ULCAST_(0x1f) << 27)
@@ -249,6 +306,16 @@
 #define CM_GCR_CPC_STATUS_EX_SHF		0
 #define CM_GCR_CPC_STATUS_EX_MSK		(_ULCAST_(0x1) << 0)
 
+/* GCR_L2_CONFIG register fields */
+#define CM_GCR_L2_CONFIG_BYPASS_SHF		20
+#define CM_GCR_L2_CONFIG_BYPASS_MSK		(_ULCAST_(0x1) << 20)
+#define CM_GCR_L2_CONFIG_SET_SIZE_SHF		12
+#define CM_GCR_L2_CONFIG_SET_SIZE_MSK		(_ULCAST_(0xf) << 12)
+#define CM_GCR_L2_CONFIG_LINE_SIZE_SHF		8
+#define CM_GCR_L2_CONFIG_LINE_SIZE_MSK		(_ULCAST_(0xf) << 8)
+#define CM_GCR_L2_CONFIG_ASSOC_SHF		0
+#define CM_GCR_L2_CONFIG_ASSOC_MSK		(_ULCAST_(0xff) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0)
@@ -324,4 +391,18 @@
 	return 0;
 }
 
+/**
+ * mips_cm_revision() - return CM revision
+ *
+ * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
+ * return value should be checked against the CM_REV_* macros.
+ */
+static inline int mips_cm_revision(void)
+{
+	if (!mips_cm_present())
+		return 0;
+
+	return read_gcr_rev();
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index 1cebe8c..f386f32 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -28,16 +28,6 @@
 extern phys_addr_t mips_cpc_default_phys_base(void);
 
 /**
- * mips_cpc_phys_base - retrieve the physical base address of the CPC
- *
- * This function returns the physical base address of the Cluster Power
- * Controller memory mapped registers, or 0 if no Cluster Power Controller
- * is present. It may be overriden by individual platforms which determine
- * this address in a different way.
- */
-extern phys_addr_t __weak mips_cpc_phys_base(void);
-
-/**
  * mips_cpc_probe - probe for a Cluster Power Controller
  *
  * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index c5b0956..d3cd8ea 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -112,6 +112,30 @@
 #define CP0_TX39_CACHE	$7
 
 
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_G		(_ULCAST_(1) << 0)
+#define ENTRYLO_V		(_ULCAST_(1) << 1)
+#define ENTRYLO_D		(_ULCAST_(1) << 2)
+#define ENTRYLO_C_SHIFT		3
+#define ENTRYLO_C		(_ULCAST_(7) << ENTRYLO_C_SHIFT)
+
+/* R3000 EntryLo bit definitions */
+#define R3K_ENTRYLO_G		(_ULCAST_(1) << 8)
+#define R3K_ENTRYLO_V		(_ULCAST_(1) << 9)
+#define R3K_ENTRYLO_D		(_ULCAST_(1) << 10)
+#define R3K_ENTRYLO_N		(_ULCAST_(1) << 11)
+
+/* MIPS32/64 EntryLo bit definitions */
+#ifdef CONFIG_64BIT
+/* as read by dmfc0 */
+#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 62)
+#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 63)
+#else
+/* as read by mfc0 */
+#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 30)
+#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 31)
+#endif
+
 /*
  * Values for PageMask register
  */
@@ -203,6 +227,9 @@
 #define PG_ESP		(_ULCAST_(1) <<	 28)
 #define PG_IEC		(_ULCAST_(1) <<  27)
 
+/* MIPS32/64 EntryHI bit definitions */
+#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
+
 /*
  * R4x00 interrupt enable / cause bits
  */
@@ -579,6 +606,8 @@
 
 #define MIPS_CONF7_IAR		(_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR		(_ULCAST_(1) << 16)
+/* FTLB probability bits for R6 */
+#define MIPS_CONF7_FTLBP_SHIFT	(18)
 
 /* MAAR bit definitions */
 #define MIPS_MAAR_ADDR		((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
@@ -586,31 +615,6 @@
 #define MIPS_MAAR_S		(_ULCAST_(1) << 1)
 #define MIPS_MAAR_V		(_ULCAST_(1) << 0)
 
-/*  EntryHI bit definition */
-#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
-
-/* R3000 EntryLo bit definitions */
-#define R3K_ENTRYLO_G		(_ULCAST_(1) << 8)
-#define R3K_ENTRYLO_V		(_ULCAST_(1) << 9)
-#define R3K_ENTRYLO_D		(_ULCAST_(1) << 10)
-#define R3K_ENTRYLO_N		(_ULCAST_(1) << 11)
-
-/* R4000 compatible EntryLo bit definitions */
-#define MIPS_ENTRYLO_G		(_ULCAST_(1) << 0)
-#define MIPS_ENTRYLO_V		(_ULCAST_(1) << 1)
-#define MIPS_ENTRYLO_D		(_ULCAST_(1) << 2)
-#define MIPS_ENTRYLO_C_SHIFT	3
-#define MIPS_ENTRYLO_C		(_ULCAST_(7) << MIPS_ENTRYLO_C_SHIFT)
-#ifdef CONFIG_64BIT
-/* as read by dmfc0 */
-#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 62)
-#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 63)
-#else
-/* as read by mfc0 */
-#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 30)
-#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 31)
-#endif
-
 /* CMGCRBase bit definitions */
 #define MIPS_CMGCRB_BASE	11
 #define MIPS_CMGCRF_BASE	(~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
@@ -932,7 +936,7 @@
  */
 
 #define __read_32bit_c0_register(source, sel)				\
-({ int __res;								\
+({ unsigned int __res;							\
 	if (sel == 0)							\
 		__asm__ __volatile__(					\
 			"mfc0\t%0, " #source "\n\t"			\
@@ -1014,7 +1018,7 @@
  * On RM7000/RM9000 these are uses to access cop0 set 1 registers
  */
 #define __read_32bit_c0_ctrl_register(source)				\
-({ int __res;								\
+({ unsigned int __res;							\
 	__asm__ __volatile__(						\
 		"cfc0\t%0, " #source "\n\t"				\
 		: "=r" (__res));					\
@@ -1471,7 +1475,7 @@
  */
 #define _read_32bit_cp1_register(source, gas_hardfloat)			\
 ({									\
-	int __res;							\
+	unsigned int __res;						\
 									\
 	__asm__ __volatile__(						\
 	"	.set	push					\n"	\
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index af5638b..bbb85fe 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -14,10 +14,90 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/inst.h>
+
 extern void _save_msa(struct task_struct *);
 extern void _restore_msa(struct task_struct *);
 extern void _init_msa_upper(void);
 
+extern void read_msa_wr_b(unsigned idx, union fpureg *to);
+extern void read_msa_wr_h(unsigned idx, union fpureg *to);
+extern void read_msa_wr_w(unsigned idx, union fpureg *to);
+extern void read_msa_wr_d(unsigned idx, union fpureg *to);
+
+/**
+ * read_msa_wr() - Read a single MSA vector register
+ * @idx:	The index of the vector register to read
+ * @to:		The FPU register union to store the registers value in
+ * @fmt:	The format of the data in the vector register
+ *
+ * Read the value of MSA vector register idx into the FPU register
+ * union to, using the format fmt.
+ */
+static inline void read_msa_wr(unsigned idx, union fpureg *to,
+			       enum msa_2b_fmt fmt)
+{
+	switch (fmt) {
+	case msa_fmt_b:
+		read_msa_wr_b(idx, to);
+		break;
+
+	case msa_fmt_h:
+		read_msa_wr_h(idx, to);
+		break;
+
+	case msa_fmt_w:
+		read_msa_wr_w(idx, to);
+		break;
+
+	case msa_fmt_d:
+		read_msa_wr_d(idx, to);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+extern void write_msa_wr_b(unsigned idx, union fpureg *from);
+extern void write_msa_wr_h(unsigned idx, union fpureg *from);
+extern void write_msa_wr_w(unsigned idx, union fpureg *from);
+extern void write_msa_wr_d(unsigned idx, union fpureg *from);
+
+/**
+ * write_msa_wr() - Write a single MSA vector register
+ * @idx:	The index of the vector register to write
+ * @from:	The FPU register union to take the registers value from
+ * @fmt:	The format of the data in the vector register
+ *
+ * Write the value from the FPU register union from into MSA vector
+ * register idx, using the format fmt.
+ */
+static inline void write_msa_wr(unsigned idx, union fpureg *from,
+				enum msa_2b_fmt fmt)
+{
+	switch (fmt) {
+	case msa_fmt_b:
+		write_msa_wr_b(idx, from);
+		break;
+
+	case msa_fmt_h:
+		write_msa_wr_h(idx, from);
+		break;
+
+	case msa_fmt_w:
+		write_msa_wr_w(idx, from);
+		break;
+
+	case msa_fmt_d:
+		write_msa_wr_d(idx, from);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
 static inline void enable_msa(void)
 {
 	if (cpu_has_msa) {
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index c373d95..d92cf59 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -284,6 +284,7 @@
 	CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
 	CVMX_BOARD_TYPE_UBNT_E100 = 20002,
 	CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
+	CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
 	CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
 
 	/* The remaining range is reserved for future use. */
@@ -384,6 +385,7 @@
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
 	}
 	return "Unsupported Board";
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index df69bfd..c210154 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -37,7 +37,7 @@
 #include <asm/octeon/cvmx-fpa.h>
 #include <asm/octeon/cvmx-pip-defs.h>
 
-#define CVMX_PIP_NUM_INPUT_PORTS		40
+#define CVMX_PIP_NUM_INPUT_PORTS		48
 #define CVMX_PIP_NUM_WATCHERS			4
 
 /*
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index 3da59bb..5f47f76 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -542,6 +542,9 @@
  */
 static inline int cvmx_pko_get_base_queue(int port)
 {
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return port;
+
 	return cvmx_pko_get_base_queue_per_core(port, 0);
 }
 
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
index 9020ef4..6a3db4b 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -52,6 +52,12 @@
 #define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
 #define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
 
+#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
+#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
+#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
+#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
+#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
+
 union cvmx_pow_bist_stat {
 	uint64_t u64;
 	struct cvmx_pow_bist_stat_s {
@@ -1286,4 +1292,27 @@
 	struct cvmx_pow_ws_pcx_s cnf71xx;
 };
 
+union cvmx_sso_wq_int_thrx {
+	uint64_t u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_33_63:31;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_26_27:2;
+		uint64_t ds_thr:12;
+		uint64_t reserved_12_13:2;
+		uint64_t iq_thr:12;
+#else
+		uint64_t iq_thr:12;
+		uint64_t reserved_12_13:2;
+		uint64_t ds_thr:12;
+		uint64_t reserved_26_27:2;
+		uint64_t tc_thr:4;
+		uint64_t tc_en:1;
+		uint64_t reserved_33_63:31;
+#endif
+	} s;
+};
+
 #endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index d5565d7..5153156 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -1810,10 +1810,11 @@
 	cvmx_addr_t ptr;
 	cvmx_pow_tag_req_t tag_req;
 
-	wqp->qos = qos;
-	wqp->tag = tag;
-	wqp->tag_type = tag_type;
-	wqp->grp = grp;
+	wqp->word1.tag = tag;
+	wqp->word1.tag_type = tag_type;
+
+	cvmx_wqe_set_qos(wqp, qos);
+	cvmx_wqe_set_grp(wqp, grp);
 
 	tag_req.u64 = 0;
 	tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index 2d6d0c7..0d697aa 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -193,6 +193,53 @@
 	        uint64_t bufs:8;
 #endif
 	} s;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t bufs:8;
+		uint64_t ip_offset:8;
+		uint64_t vlan_valid:1;
+		uint64_t vlan_stacked:1;
+		uint64_t unassigned:1;
+		uint64_t vlan_cfi:1;
+		uint64_t vlan_id:12;
+		uint64_t port:12;		/* MAC/PIP port number. */
+		uint64_t dec_ipcomp:1;
+		uint64_t tcp_or_udp:1;
+		uint64_t dec_ipsec:1;
+		uint64_t is_v6:1;
+		uint64_t software:1;
+		uint64_t L4_error:1;
+		uint64_t is_frag:1;
+		uint64_t IP_exc:1;
+		uint64_t is_bcast:1;
+		uint64_t is_mcast:1;
+		uint64_t not_IP:1;
+		uint64_t rcv_error:1;
+		uint64_t err_code:8;
+#else
+		uint64_t err_code:8;
+		uint64_t rcv_error:1;
+		uint64_t not_IP:1;
+		uint64_t is_mcast:1;
+		uint64_t is_bcast:1;
+		uint64_t IP_exc:1;
+		uint64_t is_frag:1;
+		uint64_t L4_error:1;
+		uint64_t software:1;
+		uint64_t is_v6:1;
+		uint64_t dec_ipsec:1;
+		uint64_t tcp_or_udp:1;
+		uint64_t dec_ipcomp:1;
+		uint64_t port:12;
+		uint64_t vlan_id:12;
+		uint64_t vlan_cfi:1;
+		uint64_t unassigned:1;
+		uint64_t vlan_stacked:1;
+		uint64_t vlan_valid:1;
+		uint64_t ip_offset:8;
+		uint64_t bufs:8;
+#endif
+	} s_cn68xx;
 
 	/* use this to get at the 16 vlan bits */
 	struct {
@@ -355,6 +402,146 @@
 
 } cvmx_pip_wqe_word2;
 
+union cvmx_pip_wqe_word0 {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		/**
+		 * raw chksum result generated by the HW
+		 */
+		uint16_t hw_chksum;
+		/**
+		 * Field unused by hardware - available for software
+		 */
+		uint8_t unused;
+		/**
+		 * Next pointer used by hardware for list maintenance.
+		 * May be written/read by HW before the work queue
+		 * entry is scheduled to a PP (Only 36 bits used in
+		 * Octeon 1)
+		 */
+		uint64_t next_ptr:40;
+#else
+		uint64_t next_ptr:40;
+		uint8_t unused;
+		uint16_t hw_chksum;
+#endif
+	} cn38xx;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t l4ptr:8;       /* 56..63 */
+		uint64_t unused0:8;     /* 48..55 */
+		uint64_t l3ptr:8;       /* 40..47 */
+		uint64_t l2ptr:8;       /* 32..39 */
+		uint64_t unused1:18;    /* 14..31 */
+		uint64_t bpid:6;        /* 8..13 */
+		uint64_t unused2:2;     /* 6..7 */
+		uint64_t pknd:6;        /* 0..5 */
+#else
+		uint64_t pknd:6;        /* 0..5 */
+		uint64_t unused2:2;     /* 6..7 */
+		uint64_t bpid:6;        /* 8..13 */
+		uint64_t unused1:18;    /* 14..31 */
+		uint64_t l2ptr:8;       /* 32..39 */
+		uint64_t l3ptr:8;       /* 40..47 */
+		uint64_t unused0:8;     /* 48..55 */
+		uint64_t l4ptr:8;       /* 56..63 */
+#endif
+	} cn68xx;
+};
+
+union cvmx_wqe_word0 {
+	uint64_t u64;
+	union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+	uint64_t u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t len:16;
+		uint64_t varies:14;
+		/**
+		 * the type of the tag (ORDERED, ATOMIC, NULL)
+		 */
+		uint64_t tag_type:2;
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t varies:14;
+		uint64_t len:16;
+#endif
+	};
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t len:16;
+		uint64_t zero_0:1;
+		/**
+		 * HW sets this to what it thought the priority of
+		 * the input packet was
+		 */
+		uint64_t qos:3;
+
+		uint64_t zero_1:1;
+		/**
+		 * the group that the work queue entry will be scheduled to
+		 */
+		uint64_t grp:6;
+		uint64_t zero_2:3;
+		uint64_t tag_type:2;
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t zero_2:3;
+		uint64_t grp:6;
+		uint64_t zero_1:1;
+		uint64_t qos:3;
+		uint64_t zero_0:1;
+		uint64_t len:16;
+#endif
+	} cn68xx;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		/**
+		 * HW sets to the total number of bytes in the packet
+		 */
+		uint64_t len:16;
+		/**
+		 * HW sets this to input physical port
+		 */
+		uint64_t ipprt:6;
+
+		/**
+		 * HW sets this to what it thought the priority of
+		 * the input packet was
+		 */
+		uint64_t qos:3;
+
+		/**
+		 * the group that the work queue entry will be scheduled to
+		 */
+		uint64_t grp:4;
+		/**
+		 * the type of the tag (ORDERED, ATOMIC, NULL)
+		 */
+		uint64_t tag_type:3;
+		/**
+		 * the synchronization/ordering tag
+		 */
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t zero_2:1;
+		uint64_t grp:4;
+		uint64_t qos:3;
+		uint64_t ipprt:6;
+		uint64_t len:16;
+#endif
+	} cn38xx;
+};
+
 /**
  * Work queue entry format
  *
@@ -366,70 +553,13 @@
      * WORD 0
      *	HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-    /**
-     * raw chksum result generated by the HW
-     */
-	uint16_t hw_chksum;
-    /**
-     * Field unused by hardware - available for software
-     */
-	uint8_t unused;
-    /**
-     * Next pointer used by hardware for list maintenance.
-     * May be written/read by HW before the work queue
-     *		 entry is scheduled to a PP
-     * (Only 36 bits used in Octeon 1)
-     */
-	uint64_t next_ptr:40;
-#else
-	uint64_t next_ptr:40;
-	uint8_t unused;
-	uint16_t hw_chksum;
-#endif
+	union cvmx_wqe_word0 word0;
 
     /*****************************************************************
      * WORD 1
      *	HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-    /**
-     * HW sets to the total number of bytes in the packet
-     */
-	uint64_t len:16;
-    /**
-     * HW sets this to input physical port
-     */
-	uint64_t ipprt:6;
-
-    /**
-     * HW sets this to what it thought the priority of the input packet was
-     */
-	uint64_t qos:3;
-
-    /**
-     * the group that the work queue entry will be scheduled to
-     */
-	uint64_t grp:4;
-    /**
-     * the type of the tag (ORDERED, ATOMIC, NULL)
-     */
-	uint64_t tag_type:3;
-    /**
-     * the synchronization/ordering tag
-     */
-	uint64_t tag:32;
-#else
-	uint64_t tag:32;
-	uint64_t tag_type:2;
-	uint64_t zero_2:1;
-	uint64_t grp:4;
-	uint64_t qos:3;
-	uint64_t ipprt:6;
-	uint64_t len:16;
-#endif
+	union cvmx_wqe_word1 word1;
 
     /**
      * WORD 2 HW WRITE: the following 64-bits are filled in by
@@ -465,4 +595,64 @@
 
 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
 
+static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)
+{
+	int port;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		port = work->word2.s_cn68xx.port;
+	else
+		port = work->word1.cn38xx.ipprt;
+
+	return port;
+}
+
+static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word2.s_cn68xx.port = port;
+	else
+		work->word1.cn38xx.ipprt = port;
+}
+
+static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+{
+	int grp;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		grp = work->word1.cn68xx.grp;
+	else
+		grp = work->word1.cn38xx.grp;
+
+	return grp;
+}
+
+static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word1.cn68xx.grp = grp;
+	else
+		work->word1.cn38xx.grp = grp;
+}
+
+static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+{
+	int qos;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		qos = work->word1.cn68xx.qos;
+	else
+		qos = work->word1.cn38xx.qos;
+
+	return qos;
+}
+
+static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word1.cn68xx.qos = qos;
+	else
+		work->word1.cn38xx.qos = qos;
+}
+
 #endif /* __CVMX_WQE_H__ */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index c28a849..ff7ad91 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -133,20 +133,13 @@
 #define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING_SHIFT	(_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING		(1 << _PAGE_SPLITTING_SHIFT)
-
-/* Only R2 or newer cores have the XI bit */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-#define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
-#else
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#endif	/* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
-
 #endif	/* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 /* XI - page cannot be executed */
-#ifndef _PAGE_NO_EXEC_SHIFT
+#ifdef _PAGE_SPLITTING_SHIFT
+#define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
+#else
 #define _PAGE_NO_EXEC_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
 #endif
 #define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
@@ -156,14 +149,16 @@
 #define _PAGE_READ		(cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
 #define _PAGE_NO_READ_SHIFT	_PAGE_READ_SHIFT
 #define _PAGE_NO_READ		(cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
+#endif	/* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
 
+#if defined(_PAGE_NO_READ_SHIFT)
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-
-#else	/* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+#elif defined(_PAGE_SPLITTING_SHIFT)
+#define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
+#else
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
+#endif
 #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#endif	/* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
@@ -249,7 +244,7 @@
 #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */
 #define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */
 
-#elif defined(CONFIG_MACH_JZ4740)
+#elif defined(CONFIG_MACH_INGENIC)
 
 /* Ingenic uses the WA bit to achieve write-combine memory writes */
 #define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index ae85694..8957f15 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -393,6 +393,8 @@
 	return __pgprot(prot);
 }
 
+#define pgprot_writecombine pgprot_writecombine
+
 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 {
 	unsigned long prot = pgprot_val(_prot);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 9b3b48e..59ee6dc 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -275,6 +275,7 @@
 	unsigned long cp0_badvaddr;	/* Last user fault */
 	unsigned long cp0_baduaddr;	/* Last kernel fault accessing USEG */
 	unsigned long error_code;
+	unsigned long trap_nr;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
 	struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
@@ -341,6 +342,7 @@
 	.cp0_badvaddr		= 0,				\
 	.cp0_baduaddr		= 0,				\
 	.error_code		= 0,				\
+	.trap_nr		= 0,				\
 	/*							\
 	 * Platform specific cop2 registers(null if no COP2)	\
 	 */							\
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index ffc3203..f6fc6aa 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -14,11 +14,16 @@
 #include <linux/linkage.h>
 #include <linux/types.h>
 #include <asm/isadep.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
 #include <uapi/asm/ptrace.h>
 
 /*
  * This struct defines the way the registers are stored on the stack during a
  * system call/exception. As usual the registers k0/k1 aren't being saved.
+ *
+ * If you add a register here, also add it to regoffset_table[] in
+ * arch/mips/kernel/ptrace.c.
  */
 struct pt_regs {
 #ifdef CONFIG_32BIT
@@ -43,8 +48,83 @@
 	unsigned long long mpl[6];        /* MTM{0-5} */
 	unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
+	unsigned long __last[0];
 } __aligned(8);
 
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+	return regs->regs[31];
+}
+
+/*
+ * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
+ * sense on MIPS.  We rather want an error if they get invoked.
+ */
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+                                           unsigned long val)
+{
+	regs->cp0_epc = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:       pt_regs from which register value is gotten.
+ * @offset:     offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+                                              unsigned int offset)
+{
+	if (unlikely(offset > MAX_REG_OFFSET))
+		return 0;
+
+	return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @addr:       address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+                                           unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @n:          stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+                                                      unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
 struct task_struct;
 
 extern int ptrace_getregs(struct task_struct *child,
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 8efe5a9..003e273 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -23,4 +23,7 @@
 
 #define __ARCH_HAS_IRIX_SIGACTION
 
+extern int protected_save_fp_context(void __user *sc);
+extern int protected_restore_fp_context(void __user *sc);
+
 #endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 9de4ba4..40196be 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -42,6 +42,11 @@
 	return ((counters >> 16) ^ counters) & 0xffff;
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.h.serving_now == lock.h.ticket;
+}
+
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 #define arch_spin_unlock_wait(x) \
 	while (arch_spin_is_locked(x)) { cpu_relax(); }
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 9733cd0..28b5d84a 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -16,29 +16,21 @@
 #include <asm/watch.h>
 #include <asm/dsp.h>
 #include <asm/cop2.h>
-#include <asm/msa.h>
+#include <asm/fpu.h>
 
 struct task_struct;
 
-enum {
-	FP_SAVE_NONE	= 0,
-	FP_SAVE_VECTOR	= -1,
-	FP_SAVE_SCALAR	= 1,
-};
-
 /**
  * resume - resume execution of a task
  * @prev:	The task previously executed.
  * @next:	The task to begin executing.
  * @next_ti:	task_thread_info(next).
- * @fp_save:	Which, if any, FP context to save for prev.
  *
  * This function is used whilst scheduling to save the context of prev & load
  * the context of next. Returns prev.
  */
 extern asmlinkage struct task_struct *resume(struct task_struct *prev,
-		struct task_struct *next, struct thread_info *next_ti,
-		s32 fp_save);
+		struct task_struct *next, struct thread_info *next_ti);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -91,8 +83,8 @@
  */
 #define switch_to(prev, next, last)					\
 do {									\
-	s32 __fpsave = FP_SAVE_NONE;					\
 	__mips_mt_fpaff_switch_to(prev);				\
+	lose_fpu_inatomic(1, prev);					\
 	if (cpu_has_dsp) {						\
 		__save_dsp(prev);					\
 		__restore_dsp(next);					\
@@ -111,15 +103,10 @@
 		clear_c0_status(ST0_CU2);				\
 	}								\
 	__clear_software_ll_bit();					\
-	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))		\
-		__fpsave = FP_SAVE_SCALAR;				\
-	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))		\
-		__fpsave = FP_SAVE_VECTOR;				\
 	if (cpu_has_userlocal)						\
 		write_c0_userlocal(task_thread_info(next)->tp_value);	\
 	__restore_watch();						\
-	disable_msa();							\
-	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\
+	(last) = resume(prev, next, task_thread_info(next));		\
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 9c0014e..e309d8f 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -99,6 +99,7 @@
 #define TIF_SYSCALL_AUDIT	3	/* syscall auditing active */
 #define TIF_SECCOMP		4	/* secure computing */
 #define TIF_NOTIFY_RESUME	5	/* callback before returning to user */
+#define TIF_UPROBE		6	/* breakpointed or singlestepping */
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
@@ -122,6 +123,7 @@
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE		(1<<TIF_UPROBE)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_NOHZ		(1<<TIF_NOHZ)
 #define _TIF_FIXADE		(1<<TIF_FIXADE)
@@ -146,7 +148,8 @@
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		\
-	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
+	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME |	\
+	 _TIF_UPROBE)
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(_TIF_NOHZ | _TIF_WORK_MASK |		\
 				 _TIF_WORK_SYSCALL_EXIT |		\
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 8ab2874..17d4cd2 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -51,7 +51,7 @@
 /*
  * Initialize the calling CPU's compare interrupt as clockevent device
  */
-extern unsigned int __weak get_c0_compare_int(void);
+extern unsigned int get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
diff --git a/arch/mips/include/asm/tlbdebug.h b/arch/mips/include/asm/tlbdebug.h
index bb8f5c2..3a25a87 100644
--- a/arch/mips/include/asm/tlbdebug.h
+++ b/arch/mips/include/asm/tlbdebug.h
@@ -11,6 +11,7 @@
 /*
  * TLB debugging functions:
  */
+extern void dump_tlb_regs(void);
 extern void dump_tlb_all(void);
 
 #endif /* __ASM_TLBDEBUG_H */
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
new file mode 100644
index 0000000..34c325c
--- /dev/null
+++ b/arch/mips/include/asm/uprobes.h
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_UPROBES_H
+#define __ASM_UPROBES_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/*
+ * We want this to be defined as union mips_instruction but that makes the
+ * generic code blow up.
+ */
+typedef u32 uprobe_opcode_t;
+
+/*
+ * Classic MIPS (note this implementation doesn't consider microMIPS yet)
+ * instructions are always 4 bytes but in order to deal with branches and
+ * their delay slots, we treat instructions as having 8 bytes maximum.
+ */
+#define MAX_UINSN_BYTES			8
+#define UPROBE_XOL_SLOT_BYTES		128	/* Max. cache line size */
+
+#define UPROBE_BRK_UPROBE		0x000d000d	/* break 13 */
+#define UPROBE_BRK_UPROBE_XOL		0x000e000d	/* break 14 */
+
+#define UPROBE_SWBP_INSN		UPROBE_BRK_UPROBE
+#define UPROBE_SWBP_INSN_SIZE		4
+
+struct arch_uprobe {
+	unsigned long	resume_epc;
+	u32	insn[2];
+	u32	ixol[2];
+	union	mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
+};
+
+struct arch_uprobe_task {
+	unsigned long saved_trap_nr;
+};
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+	struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self,
+	unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+	struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(
+	unsigned long trampoline_vaddr, struct pt_regs *regs);
+
+#endif /* __ASM_UPROBES_H */
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 7849f39..80e70db 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -122,7 +122,7 @@
 void *alloc_progmem(unsigned long len);
 void release_progmem(void *ptr);
 
-int __weak vpe_run(struct vpe *v);
+int vpe_run(struct vpe *v);
 void cleanup_tc(struct tc *tc);
 
 int __init vpe_module_init(void);
diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 002c39e..9c4265c 100644
--- a/arch/mips/include/uapi/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
@@ -21,6 +21,8 @@
 #define BRK_DIVZERO	7	/* Divide by zero check */
 #define BRK_RANGE	8	/* Range error check */
 #define BRK_BUG		12	/* Used by BUG() */
+#define BRK_UPROBE	13	/* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOL	14	/* See <asm/uprobes.h> */
 #define BRK_MEMU	514	/* Used by FPU emulator */
 #define BRK_KPROBE_BP	515	/* Kprobe break */
 #define BRK_KPROBE_SSTEPBP 516	/* Kprobe single step software implementation */
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
new file mode 100644
index 0000000..c7484a7
--- /dev/null
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -0,0 +1,8 @@
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_MIPS_R6		(1 << 0)
+#define HWCAP_MIPS_MSA		(1 << 1)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index fc0cf5a..9b44d5a 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -26,7 +26,7 @@
 	cop0_op, cop1_op, cop2_op, cop1x_op,
 	beql_op, bnel_op, blezl_op, bgtzl_op,
 	daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
-	spec2_op, jalx_op, mdmx_op, spec3_op,
+	spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
 	lb_op, lh_op, lwl_op, lw_op,
 	lbu_op, lhu_op, lwr_op, lwu_op,
 	sb_op, sh_op, swl_op, sw_op,
@@ -167,8 +167,13 @@
 	fround_op    =	0x0c, ftrunc_op	   =  0x0d,
 	fceil_op     =	0x0e, ffloor_op	   =  0x0f,
 	fmovc_op     =	0x11, fmovz_op	   =  0x12,
-	fmovn_op     =	0x13, frecip_op	   =  0x15,
-	frsqrt_op    =	0x16, fcvts_op	   =  0x20,
+	fmovn_op     =	0x13, fseleqz_op   =  0x14,
+	frecip_op    =  0x15, frsqrt_op    =  0x16,
+	fselnez_op   =  0x17, fmaddf_op    =  0x18,
+	fmsubf_op    =  0x19, frint_op     =  0x1a,
+	fclass_op    =  0x1b, fmin_op      =  0x1c,
+	fmina_op     =  0x1d, fmax_op      =  0x1e,
+	fmaxa_op     =  0x1f, fcvts_op     =  0x20,
 	fcvtd_op     =	0x21, fcvte_op	   =  0x22,
 	fcvtw_op     =	0x24, fcvtl_op	   =  0x25,
 	fcmp_op	     =	0x30
@@ -221,6 +226,24 @@
 };
 
 /*
+ * func field for MSA MI10 format.
+ */
+enum msa_mi10_func {
+	msa_ld_op = 8,
+	msa_st_op = 9,
+};
+
+/*
+ * MSA 2 bit format fields.
+ */
+enum msa_2b_fmt {
+	msa_fmt_b = 0,
+	msa_fmt_h = 1,
+	msa_fmt_w = 2,
+	msa_fmt_d = 3,
+};
+
+/*
  * (microMIPS) Major opcodes.
  */
 enum mm_major_op {
@@ -611,6 +634,16 @@
 	;)))))))
 };
 
+struct msa_mi10_format {		/* MSA MI10 */
+	__BITFIELD_FIELD(unsigned int opcode : 6,
+	__BITFIELD_FIELD(signed int s10 : 10,
+	__BITFIELD_FIELD(unsigned int rs : 5,
+	__BITFIELD_FIELD(unsigned int wd : 5,
+	__BITFIELD_FIELD(unsigned int func : 4,
+	__BITFIELD_FIELD(unsigned int df : 2,
+	;))))))
+};
+
 struct spec3_format {   /* SPEC3 */
 	__BITFIELD_FIELD(unsigned int opcode:6,
 	__BITFIELD_FIELD(unsigned int rs:5,
@@ -888,6 +921,7 @@
 	struct p_format p_format;
 	struct f_format f_format;
 	struct ma_format ma_format;
+	struct msa_mi10_format msa_mi10_format;
 	struct b_format b_format;
 	struct ps_format ps_format;
 	struct v_format v_format;
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 9081d88..5cbd9ae 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,6 +12,18 @@
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
+/* scalar FP context was used */
+#define USED_FP			(1 << 0)
+
+/* the value of Status.FR when context was saved */
+#define USED_FR1		(1 << 1)
+
+/* FR=1, but with odd singles in bits 63:32 of preceding even double */
+#define USED_HYBRID_FPRS	(1 << 2)
+
+/* extended context was used, see struct extcontext for details */
+#define USED_EXTCONTEXT		(1 << 3)
+
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index 8f2d184..c4ddc4f 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -16,11 +16,13 @@
 #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||		\
     defined(_MIPS_ARCH_LOONGSON3A)
 
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u16 __arch_swab16(__u16 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
+	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	.set	pop			\n"
 	: "=r" (x)
@@ -30,11 +32,13 @@
 }
 #define __arch_swab16 __arch_swab16
 
-static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u32 __arch_swab32(__u32 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
+	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	rotr	%0, %0, 16		\n"
 	"	.set	pop			\n"
@@ -50,11 +54,13 @@
  * 64-bit kernel on r2 CPUs.
  */
 #ifdef __mips64
-static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u64 __arch_swab64(__u64 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips64r2		\n"
+	"	.set	nomips16		\n"
 	"	dsbh	%0, %1			\n"
 	"	dshd	%0, %0			\n"
 	"	.set	pop			\n"
diff --git a/arch/mips/include/uapi/asm/ucontext.h b/arch/mips/include/uapi/asm/ucontext.h
new file mode 100644
index 0000000..2320144
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ucontext.h
@@ -0,0 +1,65 @@
+#ifndef __MIPS_UAPI_ASM_UCONTEXT_H
+#define __MIPS_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct extcontext - extended context header structure
+ * @magic:	magic value identifying the type of extended context
+ * @size:	the size in bytes of the enclosing structure
+ *
+ * Extended context structures provide context which does not fit within struct
+ * sigcontext. They are placed sequentially in memory at the end of struct
+ * ucontext and struct sigframe, with each extended context structure beginning
+ * with a header defined by this struct. The type of context represented is
+ * indicated by the magic field. Userland may check each extended context
+ * structure against magic values that it recognises. The size field allows any
+ * unrecognised context to be skipped, allowing for future expansion. The end
+ * of the extended context data is indicated by the magic value
+ * END_EXTCONTEXT_MAGIC.
+ */
+struct extcontext {
+	unsigned int		magic;
+	unsigned int		size;
+};
+
+/**
+ * struct msa_extcontext - MSA extended context structure
+ * @ext:	the extended context header, with magic == MSA_EXTCONTEXT_MAGIC
+ * @wr:		the most significant 64 bits of each MSA vector register
+ * @csr:	the value of the MSA control & status register
+ *
+ * If MSA context is live for a task at the time a signal is delivered to it,
+ * this structure will hold the MSA context of the task as it was prior to the
+ * signal delivery.
+ */
+struct msa_extcontext {
+	struct extcontext	ext;
+#define MSA_EXTCONTEXT_MAGIC	0x784d5341	/* xMSA */
+
+	unsigned long long	wr[32];
+	unsigned int		csr;
+};
+
+#define END_EXTCONTEXT_MAGIC	0x78454e44	/* xEND */
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext:	holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext:	holds extended processor state
+ */
+struct ucontext {
+	/* Historic fields matching asm-generic */
+	unsigned long		uc_flags;
+	struct ucontext		*uc_link;
+	stack_t			uc_stack;
+	struct sigcontext	uc_mcontext;
+	sigset_t		uc_sigmask;
+
+	/* Extended context structures may follow ucontext */
+	unsigned long long	uc_extcontext[0];
+};
+
+#endif /* __MIPS_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index e1ea4f6..5d6828b 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -110,18 +110,11 @@
 	}
 }
 
-static void r4030_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 struct clock_event_device r4030_clockevent = {
 	.name		= "r4030",
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
 	.rating		= 300,
 	.irq		= JAZZ_TIMER_IRQ,
-	.set_mode	= r4030_set_mode,
 };
 
 static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 54c80d4..6cd69fd 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -231,6 +231,13 @@
 	return 0;
 }
 
+static int jz_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	struct jz_gpio_chip *jz_gpio = gpio_chip_to_jz_gpio_chip(chip);
+
+	return jz_gpio->irq_base + gpio;
+}
+
 int jz_gpio_port_direction_input(int port, uint32_t mask)
 {
 	writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
@@ -262,18 +269,6 @@
 }
 EXPORT_SYMBOL(jz_gpio_port_get_value);
 
-int gpio_to_irq(unsigned gpio)
-{
-	return JZ4740_IRQ_GPIO(0) + gpio;
-}
-EXPORT_SYMBOL_GPL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	return irq - JZ4740_IRQ_GPIO(0);
-}
-EXPORT_SYMBOL_GPL(irq_to_gpio);
-
 #define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
 
 static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
@@ -403,6 +398,7 @@
 		.get = jz_gpio_get_value, \
 		.direction_output = jz_gpio_direction_output, \
 		.direction_input = jz_gpio_direction_input, \
+		.to_irq = jz_gpio_to_irq, \
 		.base = JZ4740_GPIO_BASE_ ## _bank, \
 		.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
 	}, \
@@ -423,8 +419,8 @@
 	chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
 
 	chip->irq = JZ4740_IRQ_INTC_GPIO(id);
-	irq_set_handler_data(chip->irq, chip);
-	irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
+	irq_set_chained_handler_and_data(chip->irq,
+					 jz_gpio_irq_demux_handler, chip);
 
 	gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
 		chip->base, handle_level_irq);
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 7ab47fe..1f7ca2c 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -58,7 +58,7 @@
 
 	jz4740_timer_ack_full(TIMER_CLOCKEVENT);
 
-	if (cd->mode != CLOCK_EVT_MODE_PERIODIC)
+	if (!clockevent_state_periodic(cd))
 		jz4740_timer_disable(TIMER_CLOCKEVENT);
 
 	cd->event_handler(cd);
@@ -66,24 +66,29 @@
 	return IRQ_HANDLED;
 }
 
-static void jz4740_clockevent_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *cd)
+static int jz4740_clockevent_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		jz4740_timer_set_count(TIMER_CLOCKEVENT, 0);
-		jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick);
-	case CLOCK_EVT_MODE_RESUME:
-		jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
-		jz4740_timer_enable(TIMER_CLOCKEVENT);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		jz4740_timer_disable(TIMER_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
+	jz4740_timer_set_count(TIMER_CLOCKEVENT, 0);
+	jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick);
+	jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
+	jz4740_timer_enable(TIMER_CLOCKEVENT);
+
+	return 0;
+}
+
+static int jz4740_clockevent_resume(struct clock_event_device *evt)
+{
+	jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
+	jz4740_timer_enable(TIMER_CLOCKEVENT);
+
+	return 0;
+}
+
+static int jz4740_clockevent_shutdown(struct clock_event_device *evt)
+{
+	jz4740_timer_disable(TIMER_CLOCKEVENT);
+
+	return 0;
 }
 
 static int jz4740_clockevent_set_next(unsigned long evt,
@@ -100,7 +105,10 @@
 	.name = "jz4740-timer",
 	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event = jz4740_clockevent_set_next,
-	.set_mode = jz4740_clockevent_set_mode,
+	.set_state_shutdown = jz4740_clockevent_shutdown,
+	.set_state_periodic = jz4740_clockevent_set_periodic,
+	.set_state_oneshot = jz4740_clockevent_shutdown,
+	.tick_resume = jz4740_clockevent_resume,
 	.rating = 200,
 #ifdef CONFIG_MACH_JZ4740
 	.irq = JZ4740_IRQ_TCU0,
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 3156c8d..d982be1 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -99,6 +99,7 @@
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_mipsxx.o
 
 obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o
+obj-$(CONFIG_UPROBES)		+= uprobes.o
 
 obj-$(CONFIG_MIPS_CM)		+= mips-cm.o
 obj-$(CONFIG_MIPS_CPC)		+= mips-cpc.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 072fab1..154e203 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -128,6 +128,7 @@
 	       thread.cp0_baduaddr);
 	OFFSET(THREAD_ECODE, task_struct, \
 	       thread.error_code);
+	OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
 	BLANK();
 }
 
@@ -245,17 +246,6 @@
 }
 #endif
 
-#ifdef CONFIG_MIPS32_COMPAT
-void output_sc32_defines(void)
-{
-	COMMENT("Linux 32-bit sigcontext offsets.");
-	OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
-	OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
-	OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
-	BLANK();
-}
-#endif
-
 void output_signal_defined(void)
 {
 	COMMENT("Linux signal numbers.");
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 7976457..940ac00 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -40,8 +40,8 @@
  * The general purpose timer ticks at 1MHz independent if
  * the rest of the system
  */
-static void sibyte_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
 {
 	unsigned int cpu = smp_processor_id();
 	void __iomem *cfg, *init;
@@ -49,24 +49,22 @@
 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writeq(0, cfg);
-		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
-		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
-			     cfg);
-		break;
+	__raw_writeq(0, cfg);
+	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Stop the timer until we actually program a shot */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writeq(0, cfg);
-		break;
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+	unsigned int cpu = smp_processor_id();
+	void __iomem *cfg;
 
-	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
-	case CLOCK_EVT_MODE_RESUME:
-		;
-	}
+	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+	/* Stop the timer until we actually program a shot */
+	__raw_writeq(0, cfg);
+	return 0;
 }
 
 static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -91,7 +89,7 @@
 	void __iomem *cfg;
 	unsigned long tmode;
 
-	if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(cd))
 		tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
 	else
 		tmode = 0;
@@ -130,7 +128,9 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
-	cd->set_mode		= sibyte_set_mode;
+	cd->set_state_shutdown	= sibyte_shutdown;
+	cd->set_state_periodic	= sibyte_set_periodic;
+	cd->set_state_oneshot	= sibyte_shutdown;
 	clockevents_register_device(cd);
 
 	bcm1480_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index ff1f01b..77a5ddf 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -59,27 +59,32 @@
 	return -EINVAL;
 }
 
-static void ds1287_set_mode(enum clock_event_mode mode,
-			    struct clock_event_device *evt)
+static int ds1287_shutdown(struct clock_event_device *evt)
 {
 	u8 val;
 
 	spin_lock(&rtc_lock);
 
 	val = CMOS_READ(RTC_REG_B);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		val |= RTC_PIE;
-		break;
-	default:
-		val &= ~RTC_PIE;
-		break;
-	}
-
+	val &= ~RTC_PIE;
 	CMOS_WRITE(val, RTC_REG_B);
 
 	spin_unlock(&rtc_lock);
+	return 0;
+}
+
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+	u8 val;
+
+	spin_lock(&rtc_lock);
+
+	val = CMOS_READ(RTC_REG_B);
+	val |= RTC_PIE;
+	CMOS_WRITE(val, RTC_REG_B);
+
+	spin_unlock(&rtc_lock);
+	return 0;
 }
 
 static void ds1287_event_handler(struct clock_event_device *dev)
@@ -87,11 +92,13 @@
 }
 
 static struct clock_event_device ds1287_clockevent = {
-	.name		= "ds1287",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.set_next_event = ds1287_set_next_event,
-	.set_mode	= ds1287_set_mode,
-	.event_handler	= ds1287_event_handler,
+	.name			= "ds1287",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.set_next_event		= ds1287_set_next_event,
+	.set_state_shutdown	= ds1287_shutdown,
+	.set_state_periodic	= ds1287_set_periodic,
+	.tick_resume		= ds1287_shutdown,
+	.event_handler		= ds1287_event_handler,
 };
 
 static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index f069460..6604005 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -64,8 +64,7 @@
 	return 0;
 }
 
-static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
 {
 	u32 ctrl;
 
@@ -73,21 +72,39 @@
 
 	ctrl = GT_READ(GT_TC_CONTROL_OFS);
 	ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		ctrl |= GT_TC_CONTROL_ENTC0_MSK;
-		break;
-	default:
-		break;
-	}
-
 	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
 
 	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+	u32 ctrl;
+
+	raw_spin_lock(&gt641xx_timer_lock);
+
+	ctrl = GT_READ(GT_TC_CONTROL_OFS);
+	ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+	ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
+}
+
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+	u32 ctrl;
+
+	raw_spin_lock(&gt641xx_timer_lock);
+
+	ctrl = GT_READ(GT_TC_CONTROL_OFS);
+	ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
 }
 
 static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
@@ -95,12 +112,16 @@
 }
 
 static struct clock_event_device gt641xx_timer0_clockevent = {
-	.name		= "gt641xx-timer0",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.irq		= GT641XX_TIMER0_IRQ,
-	.set_next_event = gt641xx_timer0_set_next_event,
-	.set_mode	= gt641xx_timer0_set_mode,
-	.event_handler	= gt641xx_timer0_event_handler,
+	.name			= "gt641xx-timer0",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.irq			= GT641XX_TIMER0_IRQ,
+	.set_next_event		= gt641xx_timer0_set_next_event,
+	.set_state_shutdown	= gt641xx_timer0_shutdown,
+	.set_state_periodic	= gt641xx_timer0_set_periodic,
+	.set_state_oneshot	= gt641xx_timer0_set_oneshot,
+	.tick_resume		= gt641xx_timer0_shutdown,
+	.event_handler		= gt641xx_timer0_event_handler,
 };
 
 static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index d70c4d8..8dfe6a6 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -28,12 +28,6 @@
 	return res;
 }
 
-void mips_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
@@ -174,6 +168,11 @@
 	return 1;
 }
 
+unsigned int __weak get_c0_compare_int(void)
+{
+	return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
 int r4k_clockevent_init(void)
 {
 	unsigned int cpu = smp_processor_id();
@@ -189,11 +188,9 @@
 	/*
 	 * With vectored interrupts things are getting platform specific.
 	 * get_c0_compare_int is a hook to allow a platform to return the
-	 * interrupt number of it's liking.
+	 * interrupt number of its liking.
 	 */
-	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
-	if (get_c0_compare_int)
-		irq = get_c0_compare_int();
+	irq = get_c0_compare_int();
 
 	cd = &per_cpu(mips_clockevent_device, cpu);
 
@@ -212,7 +209,6 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= mips_next_event;
-	cd->set_mode		= mips_set_clock_mode;
 	cd->event_handler	= mips_event_handler;
 
 	clockevents_register_device(cd);
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 5ea6d6b..3d860ef 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -38,8 +38,20 @@
  * The general purpose timer ticks at 1MHz independent if
  * the rest of the system
  */
-static void sibyte_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+	void __iomem *cfg;
+
+	cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+	/* Stop the timer until we actually program a shot */
+	__raw_writeq(0, cfg);
+
+	return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
 {
 	unsigned int cpu = smp_processor_id();
 	void __iomem *cfg, *init;
@@ -47,24 +59,11 @@
 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writeq(0, cfg);
-		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
-		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
-			     cfg);
-		break;
+	__raw_writeq(0, cfg);
+	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Stop the timer until we actually program a shot */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writeq(0, cfg);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
-	case CLOCK_EVT_MODE_RESUME:
-		;
-	}
+	return 0;
 }
 
 static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -89,7 +88,7 @@
 	void __iomem *cfg;
 	unsigned long tmode;
 
-	if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(cd))
 		tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
 	else
 		tmode = 0;
@@ -129,7 +128,9 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
-	cd->set_mode		= sibyte_set_mode;
+	cd->set_state_shutdown	= sibyte_shutdown;
+	cd->set_state_periodic	= sibyte_set_periodic;
+	cd->set_state_oneshot	= sibyte_shutdown;
 	clockevents_register_device(cd);
 
 	sb1250_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 7239324..537eefd 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -85,36 +85,54 @@
 	__raw_writel(0, &tmrptr->tisr);
 }
 
-static void txx9tmr_set_mode(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
 {
 	struct txx9_clock_event_device *txx9_cd =
 		container_of(evt, struct txx9_clock_event_device, cd);
 	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
 
 	txx9tmr_stop_and_clear(tmrptr);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
-			     &tmrptr->itmr);
-		/* start timer */
-		__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
-			     evt->shift,
-			     &tmrptr->cpra);
-		__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		__raw_writel(0, &tmrptr->itmr);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		__raw_writel(TIMER_CCD, &tmrptr->ccdr);
-		__raw_writel(0, &tmrptr->itmr);
-		break;
-	}
+
+	__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+	/* start timer */
+	__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+		     &tmrptr->cpra);
+	__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+	return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+	return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(0, &tmrptr->itmr);
+	return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(TIMER_CCD, &tmrptr->ccdr);
+	__raw_writel(0, &tmrptr->itmr);
+	return 0;
 }
 
 static int txx9tmr_set_next_event(unsigned long delta,
@@ -133,12 +151,15 @@
 
 static struct txx9_clock_event_device txx9_clock_event_device = {
 	.cd = {
-		.name		= "TXx9",
-		.features	= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
-		.rating		= 200,
-		.set_mode	= txx9tmr_set_mode,
-		.set_next_event = txx9tmr_set_next_event,
+		.name			= "TXx9",
+		.features		= CLOCK_EVT_FEAT_PERIODIC |
+					  CLOCK_EVT_FEAT_ONESHOT,
+		.rating			= 200,
+		.set_state_shutdown	= txx9tmr_set_state_shutdown,
+		.set_state_periodic	= txx9tmr_set_state_periodic,
+		.set_state_oneshot	= txx9tmr_set_state_oneshot,
+		.tick_resume		= txx9tmr_tick_resume,
+		.set_next_event		= txx9tmr_set_next_event,
 	},
 };
 
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 1b6ca63..9f71c06 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -152,7 +152,7 @@
 
 	/* Enter the coherent domain */
 	li	t0, 0xff
-	PTR_S	t0, GCR_CL_COHERENCE_OFS(v1)
+	sw	t0, GCR_CL_COHERENCE_OFS(v1)
 	ehb
 
 	/* Jump to kseg0 */
@@ -302,7 +302,7 @@
 	PTR_L	t0, 0(t0)
 
 	/* Calculate a pointer to this cores struct core_boot_config */
-	PTR_L	t0, GCR_CL_ID_OFS(t0)
+	lw	t0, GCR_CL_ID_OFS(t0)
 	li	t1, COREBOOTCFG_SIZE
 	mul	t0, t0, t1
 	PTR_LA	t1, mips_cps_core_bootcfg
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dbe0792..571a8e6 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -32,6 +32,9 @@
 #include <asm/spram.h>
 #include <asm/uaccess.h>
 
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+
 /*
  * Get the FPU Implementation/Revision.
  */
@@ -188,7 +191,7 @@
 static int mips_ftlb_disabled;
 static int mips_has_ftlb_configured;
 
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
 
 static int __init ftlb_disable(char *s)
 {
@@ -202,7 +205,10 @@
 		return 1;
 
 	/* Disable it in the boot cpu */
-	set_ftlb_enable(&cpu_data[0], 0);
+	if (set_ftlb_enable(&cpu_data[0], 0)) {
+		pr_warn("Can't turn FTLB off\n");
+		return 1;
+	}
 
 	back_to_back_c0_hazard();
 
@@ -364,30 +370,41 @@
 		return 3;
 }
 
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
-	unsigned int config6;
+	unsigned int config;
 
 	/* It's implementation dependent how the FTLB can be enabled */
 	switch (c->cputype) {
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 		/* proAptiv & related cores use Config6 to enable the FTLB */
-		config6 = read_c0_config6();
+		config = read_c0_config6();
 		/* Clear the old probability value */
-		config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+		config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
 		if (enable)
 			/* Enable FTLB */
-			write_c0_config6(config6 |
+			write_c0_config6(config |
 					 (calculate_ftlb_probability(c)
 					  << MIPS_CONF6_FTLBP_SHIFT)
 					 | MIPS_CONF6_FTLBEN);
 		else
 			/* Disable FTLB */
-			write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
-		back_to_back_c0_hazard();
+			write_c0_config6(config &  ~MIPS_CONF6_FTLBEN);
 		break;
+	case CPU_I6400:
+		/* I6400 & related cores use Config7 to configure FTLB */
+		config = read_c0_config7();
+		/* Clear the old probability value */
+		config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
+		write_c0_config7(config | (calculate_ftlb_probability(c)
+					   << MIPS_CONF7_FTLBP_SHIFT));
+		break;
+	default:
+		return 1;
 	}
+
+	return 0;
 }
 
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -524,6 +541,8 @@
 	}
 	if (config3 & MIPS_CONF3_CDMM)
 		c->options |= MIPS_CPU_CDMM;
+	if (config3 & MIPS_CONF3_SP)
+		c->options |= MIPS_CPU_SP;
 
 	return config3 & MIPS_CONF_M;
 }
@@ -540,7 +559,16 @@
 	if (cpu_has_tlb) {
 		if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
 			c->options |= MIPS_CPU_TLBINV;
-		mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+		/*
+		 * This is a bit ugly. R6 has dropped that field from
+		 * config4 and the only valid configuration is VTLB+FTLB so
+		 * set a good value for mmuextdef for that case.
+		 */
+		if (cpu_has_mips_r6)
+			mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+		else
+			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+
 		switch (mmuextdef) {
 		case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
 			c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
@@ -1121,6 +1149,10 @@
 		c->cputype = CPU_P5600;
 		__cpu_name[cpu] = "MIPS P5600";
 		break;
+	case PRID_IMP_I6400:
+		c->cputype = CPU_I6400;
+		__cpu_name[cpu] = "MIPS I6400";
+		break;
 	case PRID_IMP_M5150:
 		c->cputype = CPU_M5150;
 		__cpu_name[cpu] = "MIPS M5150";
@@ -1492,10 +1524,14 @@
 	else
 		c->srsets = 1;
 
+	if (cpu_has_mips_r6)
+		elf_hwcap |= HWCAP_MIPS_R6;
+
 	if (cpu_has_msa) {
 		c->msa_id = cpu_get_msa_id();
 		WARN(c->msa_id & MSA_IR_WRPF,
 		     "Vector register partitioning unimplemented!");
+		elf_hwcap |= HWCAP_MIPS_MSA;
 	}
 
 	cpu_probe_vmbits(c);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index e4f62b7..ab1478d 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -196,6 +196,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index dda800e..3e586da 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -51,7 +51,7 @@
 	/* Target must have the right alignment and ISA must be preserved. */
 	BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
 		insn.j_format.target = e->target >> J_RANGE_SHIFT;
 	} else {
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 85bbe9b..b8ceee5 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -15,11 +15,131 @@
 
 void __iomem *mips_cm_base;
 void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+	"mem",	"gcr",	"gic",	"mmio",
+	"0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transation type */
+static char *cm3_tr[16] = {
+	[0x0] = "ReqNoData",
+	[0x1] = "0x1",
+	[0x2] = "ReqWData",
+	[0x3] = "0x3",
+	[0x4] = "IReqNoResp",
+	[0x5] = "IReqWResp",
+	[0x6] = "IReqNoRespDat",
+	[0x7] = "IReqWRespDat",
+	[0x8] = "RespNoData",
+	[0x9] = "RespDataFol",
+	[0xa] = "RespWData",
+	[0xb] = "RespDataOnly",
+	[0xc] = "IRespNoData",
+	[0xd] = "IRespDataFol",
+	[0xe] = "IRespWData",
+	[0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+	[0x00] = "0x00",
+	[0x01] = "Legacy Write",
+	[0x02] = "Legacy Read",
+	[0x03] = "0x03",
+	[0x04] = "0x04",
+	[0x05] = "0x05",
+	[0x06] = "0x06",
+	[0x07] = "0x07",
+	[0x08] = "Coherent Read Own",
+	[0x09] = "Coherent Read Share",
+	[0x0a] = "Coherent Read Discard",
+	[0x0b] = "Coherent Ready Share Always",
+	[0x0c] = "Coherent Upgrade",
+	[0x0d] = "Coherent Writeback",
+	[0x0e] = "0x0e",
+	[0x0f] = "0x0f",
+	[0x10] = "Coherent Copyback",
+	[0x11] = "Coherent Copyback Invalidate",
+	[0x12] = "Coherent Invalidate",
+	[0x13] = "Coherent Write Invalidate",
+	[0x14] = "Coherent Completion Sync",
+	[0x15] = "0x15",
+	[0x16] = "0x16",
+	[0x17] = "0x17",
+	[0x18] = "0x18",
+	[0x19] = "0x19",
+	[0x1a] = "0x1a",
+	[0x1b] = "0x1b",
+	[0x1c] = "0x1c",
+	[0x1d] = "0x1d",
+	[0x1e] = "0x1e",
+	[0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+	[0x0] = "Legacy Read",
+	[0x1] = "Legacy Write",
+	[0x2] = "Coherent Read Own",
+	[0x3] = "Coherent Read Share",
+	[0x4] = "Coherent Read Discard",
+	[0x5] = "Coherent Evicted",
+	[0x6] = "Coherent Upgrade",
+	[0x7] = "Coherent Upgrade for Store Conditional",
+	[0x8] = "Coherent Writeback",
+	[0x9] = "Coherent Write Invalidate",
+	[0xa] = "0xa",
+	[0xb] = "0xb",
+	[0xc] = "0xc",
+	[0xd] = "0xd",
+	[0xe] = "0xe",
+	[0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+	[0x0] = "Normal",
+	[0x1] = "Registers",
+	[0x2] = "TLB",
+	[0x3] = "0x3",
+	[0x4] = "L1I",
+	[0x5] = "L1D",
+	[0x6] = "L3",
+	[0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+	"Invalid/OK",	"Invalid/Data",
+	"Shared/OK",	"Shared/Data",
+	"Modified/OK",	"Modified/Data",
+	"Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_causes[32] = {
+	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+	"0x08", "0x09", "0x0a", "0x0b",
+	"0x0c", "0x0d", "0x0e", "0x0f",
+	"0x10", "0x11", "0x12", "0x13",
+	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
+	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+	"0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+	"0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+	"MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+	"CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+	"0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+	"0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+	"0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
 
 phys_addr_t __mips_cm_phys_base(void)
 {
 	u32 config3 = read_c0_config3();
-	u32 cmgcr;
+	unsigned long cmgcr;
 
 	/* Check the CMGCRBase register is implemented */
 	if (!(config3 & MIPS_CONF3_CMGCR))
@@ -81,6 +201,13 @@
 	phys_addr_t addr;
 	u32 base_reg;
 
+	/*
+	 * No need to probe again if we have already been
+	 * here before.
+	 */
+	if (mips_cm_base)
+		return 0;
+
 	addr = mips_cm_phys_base();
 	BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
 	if (!addr)
@@ -117,5 +244,133 @@
 	/* probe for an L2-only sync region */
 	mips_cm_probe_l2sync();
 
+	/* determine register width for this CM */
+	mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
 	return 0;
 }
+
+void mips_cm_error_report(void)
+{
+	unsigned long revision = mips_cm_revision();
+	/*
+	 * CM3 has a 64-bit Error cause register with 0:57 containing the error
+	 * info and 63:58 the error type. For old CMs, everything is contained
+	 * in a single 32-bit register (0:26 and 31:27 respectively). Even
+	 * though the cm_error is u64, we will simply ignore the upper word
+	 * for CM2.
+	 */
+	u64 cm_error = read_gcr_error_cause();
+	int cm_error_cause_sft = CM_GCR_ERROR_CAUSE_ERRTYPE_SHF +
+				 ((revision >= CM_REV_CM3) ? 31 : 0);
+	unsigned long cm_addr = read_gcr_error_addr();
+	unsigned long cm_other = read_gcr_error_mult();
+	int ocause, cause;
+	char buf[256];
+
+	if (!mips_cm_present())
+		return;
+
+	cause = cm_error >> cm_error_cause_sft;
+
+	if (!cause)
+		/* All good */
+		return;
+
+	ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+	if (revision < CM_REV_CM3) { /* CM2 */
+		if (cause < 16) {
+			unsigned long cca_bits = (cm_error >> 15) & 7;
+			unsigned long tr_bits = (cm_error >> 12) & 7;
+			unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+			unsigned long stag_bits = (cm_error >> 3) & 15;
+			unsigned long sport_bits = (cm_error >> 0) & 7;
+
+			snprintf(buf, sizeof(buf),
+				 "CCA=%lu TR=%s MCmd=%s STag=%lu "
+				 "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+				 cm2_cmd[cmd_bits], stag_bits, sport_bits);
+		} else {
+			/* glob state & sresp together */
+			unsigned long c3_bits = (cm_error >> 18) & 7;
+			unsigned long c2_bits = (cm_error >> 15) & 7;
+			unsigned long c1_bits = (cm_error >> 12) & 7;
+			unsigned long c0_bits = (cm_error >> 9) & 7;
+			unsigned long sc_bit = (cm_error >> 8) & 1;
+			unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+			unsigned long sport_bits = (cm_error >> 0) & 7;
+
+			snprintf(buf, sizeof(buf),
+				 "C3=%s C2=%s C1=%s C0=%s SC=%s "
+				 "MCmd=%s SPort=%lu\n",
+				 cm2_core[c3_bits], cm2_core[c2_bits],
+				 cm2_core[c1_bits], cm2_core[c0_bits],
+				 sc_bit ? "True" : "False",
+				 cm2_cmd[cmd_bits], sport_bits);
+		}
+			pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+			       cm2_causes[cause], buf);
+		pr_err("CM_ADDR =%08lx\n", cm_addr);
+		pr_err("CM_OTHER=%08lx %s\n", cm_other, cm2_causes[ocause]);
+	} else { /* CM3 */
+	/* Used by cause == {1,2,3} */
+		unsigned long core_id_bits = (cm_error >> 22) & 0xf;
+		unsigned long vp_id_bits = (cm_error >> 18) & 0xf;
+		unsigned long cmd_bits = (cm_error >> 14) & 0xf;
+		unsigned long cmd_group_bits = (cm_error >> 11) & 0xf;
+		unsigned long cm3_cca_bits = (cm_error >> 8) & 7;
+		unsigned long mcp_bits = (cm_error >> 5) & 0xf;
+		unsigned long cm3_tr_bits = (cm_error >> 1) & 0xf;
+		unsigned long sched_bit = cm_error & 0x1;
+
+		if (cause == 1 || cause == 3) { /* Tag ECC */
+			unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+			unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+			unsigned long dword_bits = (cm_error >> 49) & 0xff;
+			unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+			unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+			unsigned long bank_bit = (cm_error >> 28) & 0x1;
+			snprintf(buf, sizeof(buf),
+				 "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+				 "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+				 "Command Group=%s CCA=%lu MCP=%d"
+				 "Transaction type=%s Scheduler=%lu\n",
+				 tag_ecc ? "TAG" : "DATA",
+				 tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+				 data_way_bits, bank_bit, dword_bits,
+				 data_sets_bits,
+				 core_id_bits, vp_id_bits,
+				 cm3_cmd[cmd_bits],
+				 cm3_cmd_group[cmd_group_bits],
+				 cm3_cca_bits, 1 << mcp_bits,
+				 cm3_tr[cm3_tr_bits], sched_bit);
+		} else if (cause == 2) {
+			unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+			unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+			unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+			unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+			snprintf(buf, sizeof(buf),
+				 "Decode Request Error: Type=%lu, Command=%lu"
+				 "Command Group=%lu Destination ID=%lu"
+				 "CoreID=%lu VPID=%lu Command=%s"
+				 "Command Group=%s CCA=%lu MCP=%d"
+				 "Transaction type=%s Scheduler=%lu\n",
+				 data_error_type, data_decode_cmd,
+				 data_decode_group, data_decode_destination_id,
+				 core_id_bits, vp_id_bits,
+				 cm3_cmd[cmd_bits],
+				 cm3_cmd_group[cmd_group_bits],
+				 cm3_cca_bits, 1 << mcp_bits,
+				 cm3_tr[cm3_tr_bits], sched_bit);
+		}
+
+		pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+		       cm3_causes[cause], buf);
+		pr_err("CM_ADDR =%lx\n", cm_addr);
+		pr_err("CM_OTHER=%lx %s\n", cm_other, cm3_causes[ocause]);
+	}
+
+	/* reprime cause register */
+	write_gcr_error_cause(0);
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 1196450..8af4d62 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,9 +21,16 @@
 
 static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
 
-phys_addr_t __weak mips_cpc_phys_base(void)
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
 {
-	u32 cpc_base;
+	unsigned long cpc_base;
 
 	if (!mips_cm_present())
 		return 0;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index cc1b6fa..d7b8dd4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1556,6 +1556,7 @@
 #endif
 		break;
 	case CPU_P5600:
+	case CPU_I6400:
 		/* 8-bit event numbers */
 		raw_id = config & 0x1ff;
 		base_id = raw_id & 0xff;
@@ -1717,6 +1718,11 @@
 		mipspmu.general_event_map = &mipsxxcore_event_map2;
 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
 		break;
+	case CPU_I6400:
+		mipspmu.name = "mips/I6400";
+		mipspmu.general_event_map = &mipsxxcore_event_map2;
+		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+		break;
 	case CPU_1004K:
 		mipspmu.name = "mips/1004K";
 		mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 0614717..f63a289 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -267,6 +267,7 @@
 
 	/* CPUs which do not require the workaround */
 	case CPU_P5600:
+	case CPU_I6400:
 		return 0;
 
 	default:
@@ -671,6 +672,7 @@
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_P5600:
+	case CPU_I6400:
 		stype_intervention = 0x2;
 		stype_memory = 0x3;
 		stype_ordering = 0x10;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index e933a30..4f0ac78 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
 #include <linux/regset.h>
 #include <linux/smp.h>
 #include <linux/security.h>
+#include <linux/stddef.h>
 #include <linux/tracehook.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
@@ -490,6 +491,93 @@
 	REGSET_FPR,
 };
 
+struct pt_regs_offset {
+	const char *name;
+	int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) {					\
+	.name = #reg,							\
+	.offset = offsetof(struct pt_regs, r)				\
+}
+
+#define REG_OFFSET_END {						\
+	.name = NULL,							\
+	.offset = 0							\
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+	REG_OFFSET_NAME(r0, regs[0]),
+	REG_OFFSET_NAME(r1, regs[1]),
+	REG_OFFSET_NAME(r2, regs[2]),
+	REG_OFFSET_NAME(r3, regs[3]),
+	REG_OFFSET_NAME(r4, regs[4]),
+	REG_OFFSET_NAME(r5, regs[5]),
+	REG_OFFSET_NAME(r6, regs[6]),
+	REG_OFFSET_NAME(r7, regs[7]),
+	REG_OFFSET_NAME(r8, regs[8]),
+	REG_OFFSET_NAME(r9, regs[9]),
+	REG_OFFSET_NAME(r10, regs[10]),
+	REG_OFFSET_NAME(r11, regs[11]),
+	REG_OFFSET_NAME(r12, regs[12]),
+	REG_OFFSET_NAME(r13, regs[13]),
+	REG_OFFSET_NAME(r14, regs[14]),
+	REG_OFFSET_NAME(r15, regs[15]),
+	REG_OFFSET_NAME(r16, regs[16]),
+	REG_OFFSET_NAME(r17, regs[17]),
+	REG_OFFSET_NAME(r18, regs[18]),
+	REG_OFFSET_NAME(r19, regs[19]),
+	REG_OFFSET_NAME(r20, regs[20]),
+	REG_OFFSET_NAME(r21, regs[21]),
+	REG_OFFSET_NAME(r22, regs[22]),
+	REG_OFFSET_NAME(r23, regs[23]),
+	REG_OFFSET_NAME(r24, regs[24]),
+	REG_OFFSET_NAME(r25, regs[25]),
+	REG_OFFSET_NAME(r26, regs[26]),
+	REG_OFFSET_NAME(r27, regs[27]),
+	REG_OFFSET_NAME(r28, regs[28]),
+	REG_OFFSET_NAME(r29, regs[29]),
+	REG_OFFSET_NAME(r30, regs[30]),
+	REG_OFFSET_NAME(r31, regs[31]),
+	REG_OFFSET_NAME(c0_status, cp0_status),
+	REG_OFFSET_NAME(hi, hi),
+	REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+	REG_OFFSET_NAME(acx, acx),
+#endif
+	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+	REG_OFFSET_NAME(c0_cause, cp0_cause),
+	REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_MIPS_MT_SMTC
+	REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
+#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	REG_OFFSET_NAME(mpl0, mpl[0]),
+	REG_OFFSET_NAME(mpl1, mpl[1]),
+	REG_OFFSET_NAME(mpl2, mpl[2]),
+	REG_OFFSET_NAME(mtp0, mtp[0]),
+	REG_OFFSET_NAME(mtp1, mtp[1]),
+	REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+	REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:       the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+        const struct pt_regs_offset *roff;
+        for (roff = regoffset_table; roff->name != NULL; roff++)
+                if (!strcmp(roff->name, name))
+                        return roff->offset;
+        return -EINVAL;
+}
+
 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 
 static const struct user_regset mips_regsets[] = {
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 1d88af2..f09546e 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -35,6 +36,14 @@
 
 	.set	noreorder
 
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
 LEAF(_save_fp_context)
 	.set	push
 	SET_HARDFLOAT
@@ -54,117 +63,60 @@
 	 nop
 #endif
 	/* Store the 16 odd double precision registers */
-	EX	sdc1 $f1, SC_FPREGS+8(a0)
-	EX	sdc1 $f3, SC_FPREGS+24(a0)
-	EX	sdc1 $f5, SC_FPREGS+40(a0)
-	EX	sdc1 $f7, SC_FPREGS+56(a0)
-	EX	sdc1 $f9, SC_FPREGS+72(a0)
-	EX	sdc1 $f11, SC_FPREGS+88(a0)
-	EX	sdc1 $f13, SC_FPREGS+104(a0)
-	EX	sdc1 $f15, SC_FPREGS+120(a0)
-	EX	sdc1 $f17, SC_FPREGS+136(a0)
-	EX	sdc1 $f19, SC_FPREGS+152(a0)
-	EX	sdc1 $f21, SC_FPREGS+168(a0)
-	EX	sdc1 $f23, SC_FPREGS+184(a0)
-	EX	sdc1 $f25, SC_FPREGS+200(a0)
-	EX	sdc1 $f27, SC_FPREGS+216(a0)
-	EX	sdc1 $f29, SC_FPREGS+232(a0)
-	EX	sdc1 $f31, SC_FPREGS+248(a0)
+	EX	sdc1 $f1, 8(a0)
+	EX	sdc1 $f3, 24(a0)
+	EX	sdc1 $f5, 40(a0)
+	EX	sdc1 $f7, 56(a0)
+	EX	sdc1 $f9, 72(a0)
+	EX	sdc1 $f11, 88(a0)
+	EX	sdc1 $f13, 104(a0)
+	EX	sdc1 $f15, 120(a0)
+	EX	sdc1 $f17, 136(a0)
+	EX	sdc1 $f19, 152(a0)
+	EX	sdc1 $f21, 168(a0)
+	EX	sdc1 $f23, 184(a0)
+	EX	sdc1 $f25, 200(a0)
+	EX	sdc1 $f27, 216(a0)
+	EX	sdc1 $f29, 232(a0)
+	EX	sdc1 $f31, 248(a0)
 1:	.set	pop
 #endif
 
 	.set push
 	SET_HARDFLOAT
 	/* Store the 16 even double precision registers */
-	EX	sdc1 $f0, SC_FPREGS+0(a0)
-	EX	sdc1 $f2, SC_FPREGS+16(a0)
-	EX	sdc1 $f4, SC_FPREGS+32(a0)
-	EX	sdc1 $f6, SC_FPREGS+48(a0)
-	EX	sdc1 $f8, SC_FPREGS+64(a0)
-	EX	sdc1 $f10, SC_FPREGS+80(a0)
-	EX	sdc1 $f12, SC_FPREGS+96(a0)
-	EX	sdc1 $f14, SC_FPREGS+112(a0)
-	EX	sdc1 $f16, SC_FPREGS+128(a0)
-	EX	sdc1 $f18, SC_FPREGS+144(a0)
-	EX	sdc1 $f20, SC_FPREGS+160(a0)
-	EX	sdc1 $f22, SC_FPREGS+176(a0)
-	EX	sdc1 $f24, SC_FPREGS+192(a0)
-	EX	sdc1 $f26, SC_FPREGS+208(a0)
-	EX	sdc1 $f28, SC_FPREGS+224(a0)
-	EX	sdc1 $f30, SC_FPREGS+240(a0)
-	EX	sw t1, SC_FPC_CSR(a0)
+	EX	sdc1 $f0, 0(a0)
+	EX	sdc1 $f2, 16(a0)
+	EX	sdc1 $f4, 32(a0)
+	EX	sdc1 $f6, 48(a0)
+	EX	sdc1 $f8, 64(a0)
+	EX	sdc1 $f10, 80(a0)
+	EX	sdc1 $f12, 96(a0)
+	EX	sdc1 $f14, 112(a0)
+	EX	sdc1 $f16, 128(a0)
+	EX	sdc1 $f18, 144(a0)
+	EX	sdc1 $f20, 160(a0)
+	EX	sdc1 $f22, 176(a0)
+	EX	sdc1 $f24, 192(a0)
+	EX	sdc1 $f26, 208(a0)
+	EX	sdc1 $f28, 224(a0)
+	EX	sdc1 $f30, 240(a0)
+	EX	sw t1, 0(a1)
 	jr	ra
 	 li	v0, 0					# success
 	.set pop
 	END(_save_fp_context)
 
-#ifdef CONFIG_MIPS32_COMPAT
-	/* Save 32-bit process floating point context */
-LEAF(_save_fp_context32)
-	.set push
-	.set MIPS_ISA_ARCH_LEVEL_RAW
-	SET_HARDFLOAT
-	cfc1	t1, fcr31
-
-#ifndef CONFIG_CPU_MIPS64_R6
-	mfc0	t0, CP0_STATUS
-	sll	t0, t0, 5
-	bgez	t0, 1f			# skip storing odd if FR=0
-	 nop
-#endif
-
-	/* Store the 16 odd double precision registers */
-	EX      sdc1 $f1, SC32_FPREGS+8(a0)
-	EX      sdc1 $f3, SC32_FPREGS+24(a0)
-	EX      sdc1 $f5, SC32_FPREGS+40(a0)
-	EX      sdc1 $f7, SC32_FPREGS+56(a0)
-	EX      sdc1 $f9, SC32_FPREGS+72(a0)
-	EX      sdc1 $f11, SC32_FPREGS+88(a0)
-	EX      sdc1 $f13, SC32_FPREGS+104(a0)
-	EX      sdc1 $f15, SC32_FPREGS+120(a0)
-	EX      sdc1 $f17, SC32_FPREGS+136(a0)
-	EX      sdc1 $f19, SC32_FPREGS+152(a0)
-	EX      sdc1 $f21, SC32_FPREGS+168(a0)
-	EX      sdc1 $f23, SC32_FPREGS+184(a0)
-	EX      sdc1 $f25, SC32_FPREGS+200(a0)
-	EX      sdc1 $f27, SC32_FPREGS+216(a0)
-	EX      sdc1 $f29, SC32_FPREGS+232(a0)
-	EX      sdc1 $f31, SC32_FPREGS+248(a0)
-
-	/* Store the 16 even double precision registers */
-1:	EX	sdc1 $f0, SC32_FPREGS+0(a0)
-	EX	sdc1 $f2, SC32_FPREGS+16(a0)
-	EX	sdc1 $f4, SC32_FPREGS+32(a0)
-	EX	sdc1 $f6, SC32_FPREGS+48(a0)
-	EX	sdc1 $f8, SC32_FPREGS+64(a0)
-	EX	sdc1 $f10, SC32_FPREGS+80(a0)
-	EX	sdc1 $f12, SC32_FPREGS+96(a0)
-	EX	sdc1 $f14, SC32_FPREGS+112(a0)
-	EX	sdc1 $f16, SC32_FPREGS+128(a0)
-	EX	sdc1 $f18, SC32_FPREGS+144(a0)
-	EX	sdc1 $f20, SC32_FPREGS+160(a0)
-	EX	sdc1 $f22, SC32_FPREGS+176(a0)
-	EX	sdc1 $f24, SC32_FPREGS+192(a0)
-	EX	sdc1 $f26, SC32_FPREGS+208(a0)
-	EX	sdc1 $f28, SC32_FPREGS+224(a0)
-	EX	sdc1 $f30, SC32_FPREGS+240(a0)
-	EX	sw t1, SC32_FPC_CSR(a0)
-	cfc1	t0, $0				# implementation/version
-	EX	sw t0, SC32_FPC_EIR(a0)
-	.set pop
-
-	jr	ra
-	 li	v0, 0					# success
-	END(_save_fp_context32)
-#endif
-
-/*
- * Restore FPU state:
- *  - fp gp registers
- *  - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
  */
 LEAF(_restore_fp_context)
-	EX	lw t1, SC_FPC_CSR(a0)
+	EX	lw t1, 0(a1)
 
 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
 		defined(CONFIG_CPU_MIPS32_R6)
@@ -178,101 +130,231 @@
 	bgez	t0, 1f			# skip loading odd if FR=0
 	 nop
 #endif
-	EX	ldc1 $f1, SC_FPREGS+8(a0)
-	EX	ldc1 $f3, SC_FPREGS+24(a0)
-	EX	ldc1 $f5, SC_FPREGS+40(a0)
-	EX	ldc1 $f7, SC_FPREGS+56(a0)
-	EX	ldc1 $f9, SC_FPREGS+72(a0)
-	EX	ldc1 $f11, SC_FPREGS+88(a0)
-	EX	ldc1 $f13, SC_FPREGS+104(a0)
-	EX	ldc1 $f15, SC_FPREGS+120(a0)
-	EX	ldc1 $f17, SC_FPREGS+136(a0)
-	EX	ldc1 $f19, SC_FPREGS+152(a0)
-	EX	ldc1 $f21, SC_FPREGS+168(a0)
-	EX	ldc1 $f23, SC_FPREGS+184(a0)
-	EX	ldc1 $f25, SC_FPREGS+200(a0)
-	EX	ldc1 $f27, SC_FPREGS+216(a0)
-	EX	ldc1 $f29, SC_FPREGS+232(a0)
-	EX	ldc1 $f31, SC_FPREGS+248(a0)
+	EX	ldc1 $f1, 8(a0)
+	EX	ldc1 $f3, 24(a0)
+	EX	ldc1 $f5, 40(a0)
+	EX	ldc1 $f7, 56(a0)
+	EX	ldc1 $f9, 72(a0)
+	EX	ldc1 $f11, 88(a0)
+	EX	ldc1 $f13, 104(a0)
+	EX	ldc1 $f15, 120(a0)
+	EX	ldc1 $f17, 136(a0)
+	EX	ldc1 $f19, 152(a0)
+	EX	ldc1 $f21, 168(a0)
+	EX	ldc1 $f23, 184(a0)
+	EX	ldc1 $f25, 200(a0)
+	EX	ldc1 $f27, 216(a0)
+	EX	ldc1 $f29, 232(a0)
+	EX	ldc1 $f31, 248(a0)
 1:	.set pop
 #endif
 	.set push
 	SET_HARDFLOAT
-	EX	ldc1 $f0, SC_FPREGS+0(a0)
-	EX	ldc1 $f2, SC_FPREGS+16(a0)
-	EX	ldc1 $f4, SC_FPREGS+32(a0)
-	EX	ldc1 $f6, SC_FPREGS+48(a0)
-	EX	ldc1 $f8, SC_FPREGS+64(a0)
-	EX	ldc1 $f10, SC_FPREGS+80(a0)
-	EX	ldc1 $f12, SC_FPREGS+96(a0)
-	EX	ldc1 $f14, SC_FPREGS+112(a0)
-	EX	ldc1 $f16, SC_FPREGS+128(a0)
-	EX	ldc1 $f18, SC_FPREGS+144(a0)
-	EX	ldc1 $f20, SC_FPREGS+160(a0)
-	EX	ldc1 $f22, SC_FPREGS+176(a0)
-	EX	ldc1 $f24, SC_FPREGS+192(a0)
-	EX	ldc1 $f26, SC_FPREGS+208(a0)
-	EX	ldc1 $f28, SC_FPREGS+224(a0)
-	EX	ldc1 $f30, SC_FPREGS+240(a0)
+	EX	ldc1 $f0, 0(a0)
+	EX	ldc1 $f2, 16(a0)
+	EX	ldc1 $f4, 32(a0)
+	EX	ldc1 $f6, 48(a0)
+	EX	ldc1 $f8, 64(a0)
+	EX	ldc1 $f10, 80(a0)
+	EX	ldc1 $f12, 96(a0)
+	EX	ldc1 $f14, 112(a0)
+	EX	ldc1 $f16, 128(a0)
+	EX	ldc1 $f18, 144(a0)
+	EX	ldc1 $f20, 160(a0)
+	EX	ldc1 $f22, 176(a0)
+	EX	ldc1 $f24, 192(a0)
+	EX	ldc1 $f26, 208(a0)
+	EX	ldc1 $f28, 224(a0)
+	EX	ldc1 $f30, 240(a0)
 	ctc1	t1, fcr31
 	.set pop
 	jr	ra
 	 li	v0, 0					# success
 	END(_restore_fp_context)
 
-#ifdef CONFIG_MIPS32_COMPAT
-LEAF(_restore_fp_context32)
-	/* Restore an o32 sigcontext.  */
-	.set push
-	SET_HARDFLOAT
-	EX	lw t1, SC32_FPC_CSR(a0)
+#ifdef CONFIG_CPU_HAS_MSA
 
-#ifndef CONFIG_CPU_MIPS64_R6
-	mfc0	t0, CP0_STATUS
-	sll	t0, t0, 5
-	bgez	t0, 1f			# skip loading odd if FR=0
-	 nop
-#endif
-
-	EX      ldc1 $f1, SC32_FPREGS+8(a0)
-	EX      ldc1 $f3, SC32_FPREGS+24(a0)
-	EX      ldc1 $f5, SC32_FPREGS+40(a0)
-	EX      ldc1 $f7, SC32_FPREGS+56(a0)
-	EX      ldc1 $f9, SC32_FPREGS+72(a0)
-	EX      ldc1 $f11, SC32_FPREGS+88(a0)
-	EX      ldc1 $f13, SC32_FPREGS+104(a0)
-	EX      ldc1 $f15, SC32_FPREGS+120(a0)
-	EX      ldc1 $f17, SC32_FPREGS+136(a0)
-	EX      ldc1 $f19, SC32_FPREGS+152(a0)
-	EX      ldc1 $f21, SC32_FPREGS+168(a0)
-	EX      ldc1 $f23, SC32_FPREGS+184(a0)
-	EX      ldc1 $f25, SC32_FPREGS+200(a0)
-	EX      ldc1 $f27, SC32_FPREGS+216(a0)
-	EX      ldc1 $f29, SC32_FPREGS+232(a0)
-	EX      ldc1 $f31, SC32_FPREGS+248(a0)
-
-1:	EX	ldc1 $f0, SC32_FPREGS+0(a0)
-	EX	ldc1 $f2, SC32_FPREGS+16(a0)
-	EX	ldc1 $f4, SC32_FPREGS+32(a0)
-	EX	ldc1 $f6, SC32_FPREGS+48(a0)
-	EX	ldc1 $f8, SC32_FPREGS+64(a0)
-	EX	ldc1 $f10, SC32_FPREGS+80(a0)
-	EX	ldc1 $f12, SC32_FPREGS+96(a0)
-	EX	ldc1 $f14, SC32_FPREGS+112(a0)
-	EX	ldc1 $f16, SC32_FPREGS+128(a0)
-	EX	ldc1 $f18, SC32_FPREGS+144(a0)
-	EX	ldc1 $f20, SC32_FPREGS+160(a0)
-	EX	ldc1 $f22, SC32_FPREGS+176(a0)
-	EX	ldc1 $f24, SC32_FPREGS+192(a0)
-	EX	ldc1 $f26, SC32_FPREGS+208(a0)
-	EX	ldc1 $f28, SC32_FPREGS+224(a0)
-	EX	ldc1 $f30, SC32_FPREGS+240(a0)
-	ctc1	t1, fcr31
+	.macro	op_one_wr	op, idx, base
+	.align	4
+\idx:	\op	\idx, 0, \base
 	jr	ra
-	 li	v0, 0					# success
-	.set pop
-	END(_restore_fp_context32)
+	 nop
+	.endm
+
+	.macro	op_msa_wr	name, op
+LEAF(\name)
+	.set		push
+	.set		noreorder
+	sll		t0, a0, 4
+	PTR_LA		t1, 0f
+	PTR_ADDU	t0, t0, t1
+	jr		t0
+	  nop
+	op_one_wr	\op, 0, a1
+	op_one_wr	\op, 1, a1
+	op_one_wr	\op, 2, a1
+	op_one_wr	\op, 3, a1
+	op_one_wr	\op, 4, a1
+	op_one_wr	\op, 5, a1
+	op_one_wr	\op, 6, a1
+	op_one_wr	\op, 7, a1
+	op_one_wr	\op, 8, a1
+	op_one_wr	\op, 9, a1
+	op_one_wr	\op, 10, a1
+	op_one_wr	\op, 11, a1
+	op_one_wr	\op, 12, a1
+	op_one_wr	\op, 13, a1
+	op_one_wr	\op, 14, a1
+	op_one_wr	\op, 15, a1
+	op_one_wr	\op, 16, a1
+	op_one_wr	\op, 17, a1
+	op_one_wr	\op, 18, a1
+	op_one_wr	\op, 19, a1
+	op_one_wr	\op, 20, a1
+	op_one_wr	\op, 21, a1
+	op_one_wr	\op, 22, a1
+	op_one_wr	\op, 23, a1
+	op_one_wr	\op, 24, a1
+	op_one_wr	\op, 25, a1
+	op_one_wr	\op, 26, a1
+	op_one_wr	\op, 27, a1
+	op_one_wr	\op, 28, a1
+	op_one_wr	\op, 29, a1
+	op_one_wr	\op, 30, a1
+	op_one_wr	\op, 31, a1
+	.set		pop
+	END(\name)
+	.endm
+
+	op_msa_wr	read_msa_wr_b, st_b
+	op_msa_wr	read_msa_wr_h, st_h
+	op_msa_wr	read_msa_wr_w, st_w
+	op_msa_wr	read_msa_wr_d, st_d
+
+	op_msa_wr	write_msa_wr_b, ld_b
+	op_msa_wr	write_msa_wr_h, ld_h
+	op_msa_wr	write_msa_wr_w, ld_w
+	op_msa_wr	write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+	.macro	save_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	copy_u_d \wr, 1
+	EX sd	$1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	copy_u_w \wr, 2
+	EX sw	$1, \off(\base)
+	copy_u_w \wr, 3
+	EX sw	$1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	copy_u_w \wr, 2
+	EX sw	$1, (\off+4)(\base)
+	copy_u_w \wr, 3
+	EX sw	$1, \off(\base)
 #endif
+	.set	pop
+	.endm
+
+LEAF(_save_msa_all_upper)
+	save_msa_upper	0, 0x00, a0
+	save_msa_upper	1, 0x08, a0
+	save_msa_upper	2, 0x10, a0
+	save_msa_upper	3, 0x18, a0
+	save_msa_upper	4, 0x20, a0
+	save_msa_upper	5, 0x28, a0
+	save_msa_upper	6, 0x30, a0
+	save_msa_upper	7, 0x38, a0
+	save_msa_upper	8, 0x40, a0
+	save_msa_upper	9, 0x48, a0
+	save_msa_upper	10, 0x50, a0
+	save_msa_upper	11, 0x58, a0
+	save_msa_upper	12, 0x60, a0
+	save_msa_upper	13, 0x68, a0
+	save_msa_upper	14, 0x70, a0
+	save_msa_upper	15, 0x78, a0
+	save_msa_upper	16, 0x80, a0
+	save_msa_upper	17, 0x88, a0
+	save_msa_upper	18, 0x90, a0
+	save_msa_upper	19, 0x98, a0
+	save_msa_upper	20, 0xa0, a0
+	save_msa_upper	21, 0xa8, a0
+	save_msa_upper	22, 0xb0, a0
+	save_msa_upper	23, 0xb8, a0
+	save_msa_upper	24, 0xc0, a0
+	save_msa_upper	25, 0xc8, a0
+	save_msa_upper	26, 0xd0, a0
+	save_msa_upper	27, 0xd8, a0
+	save_msa_upper	28, 0xe0, a0
+	save_msa_upper	29, 0xe8, a0
+	save_msa_upper	30, 0xf0, a0
+	save_msa_upper	31, 0xf8, a0
+	jr	ra
+	 li	v0, 0
+	END(_save_msa_all_upper)
+
+	.macro	restore_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	EX ld	$1, \off(\base)
+	insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	EX lw	$1, \off(\base)
+	insert_w \wr, 2
+	EX lw	$1, (\off+4)(\base)
+	insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	EX lw	$1, (\off+4)(\base)
+	insert_w \wr, 2
+	EX lw	$1, \off(\base)
+	insert_w \wr, 3
+#endif
+	.set	pop
+	.endm
+
+LEAF(_restore_msa_all_upper)
+	restore_msa_upper	0, 0x00, a0
+	restore_msa_upper	1, 0x08, a0
+	restore_msa_upper	2, 0x10, a0
+	restore_msa_upper	3, 0x18, a0
+	restore_msa_upper	4, 0x20, a0
+	restore_msa_upper	5, 0x28, a0
+	restore_msa_upper	6, 0x30, a0
+	restore_msa_upper	7, 0x38, a0
+	restore_msa_upper	8, 0x40, a0
+	restore_msa_upper	9, 0x48, a0
+	restore_msa_upper	10, 0x50, a0
+	restore_msa_upper	11, 0x58, a0
+	restore_msa_upper	12, 0x60, a0
+	restore_msa_upper	13, 0x68, a0
+	restore_msa_upper	14, 0x70, a0
+	restore_msa_upper	15, 0x78, a0
+	restore_msa_upper	16, 0x80, a0
+	restore_msa_upper	17, 0x88, a0
+	restore_msa_upper	18, 0x90, a0
+	restore_msa_upper	19, 0x98, a0
+	restore_msa_upper	20, 0xa0, a0
+	restore_msa_upper	21, 0xa8, a0
+	restore_msa_upper	22, 0xb0, a0
+	restore_msa_upper	23, 0xb8, a0
+	restore_msa_upper	24, 0xc0, a0
+	restore_msa_upper	25, 0xc8, a0
+	restore_msa_upper	26, 0xd0, a0
+	restore_msa_upper	27, 0xd8, a0
+	restore_msa_upper	28, 0xe0, a0
+	restore_msa_upper	29, 0xe8, a0
+	restore_msa_upper	30, 0xf0, a0
+	restore_msa_upper	31, 0xf8, a0
+	jr	ra
+	 li	v0, 0
+	END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
 
 	.set	reorder
 
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 04cbbde3..92cd051 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -34,7 +34,7 @@
 #ifndef USE_ALTERNATE_RESUME_IMPL
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *		       struct thread_info *next_ti, s32 fp_save)
+ *		       struct thread_info *next_ti)
  */
 	.align	5
 	LEAF(resume)
@@ -43,45 +43,6 @@
 	cpu_save_nonscratch a0
 	LONG_S	ra, THREAD_REG31(a0)
 
-	/*
-	 * Check whether we need to save any FP context. FP context is saved
-	 * iff the process has used the context with the scalar FPU or the MSA
-	 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
-	 * _TIF_USEDMSA respectively. switch_to will have set fp_save
-	 * accordingly to an FP_SAVE_ enum value.
-	 */
-	beqz	a3, 2f
-
-	/*
-	 * We do. Clear the saved CU1 bit for prev, such that next time it is
-	 * scheduled it will start in userland with the FPU disabled. If the
-	 * task uses the FPU then it will be enabled again via the do_cpu trap.
-	 * This allows us to lazily restore the FP context.
-	 */
-	PTR_L	t3, TASK_THREAD_INFO(a0)
-	LONG_L	t0, ST_OFF(t3)
-	li	t1, ~ST0_CU1
-	and	t0, t0, t1
-	LONG_S	t0, ST_OFF(t3)
-
-	/* Check whether we're saving scalar or vector context. */
-	bgtz	a3, 1f
-
-	/* Save 128b MSA vector context + scalar FP control & status. */
-	.set push
-	SET_HARDFLOAT
-	cfc1	t1, fcr31
-	msa_save_all	a0
-	.set pop	/* SET_HARDFLOAT */
-
-	sw	t1, THREAD_FCR31(a0)
-	b	2f
-
-1:	/* Save 32b/64b scalar FP context. */
-	fpu_save_double a0 t0 t1		# c0_status passed in t0
-						# clobbers t1
-2:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 	PTR_LA	t8, __stack_chk_guard
 	LONG_L	t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 0b85f82..f50d484 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -31,4 +31,13 @@
 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
 
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
 #endif	/* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6a28c79..2fec67b 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
 #include <linux/wait.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
+#include <linux/uprobes.h>
 #include <linux/compiler.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
@@ -38,20 +39,21 @@
 #include <asm/vdso.h>
 #include <asm/dsp.h>
 #include <asm/inst.h>
+#include <asm/msa.h>
 
 #include "signal-common.h"
 
-static int (*save_fp_context)(struct sigcontext __user *sc);
-static int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
 
 struct sigframe {
 	u32 sf_ass[4];		/* argument save space for o32 */
 	u32 sf_pad[2];		/* Was: signal trampoline */
+
+	/* Matches struct ucontext from its uc_mcontext field onwards */
 	struct sigcontext sf_sc;
 	sigset_t sf_mask;
+	unsigned long long sf_extcontext[0];
 };
 
 struct rt_sigframe {
@@ -65,43 +67,255 @@
  * Thread saved context copy to/from a signal context presumed to be on the
  * user stack, and therefore accessed with appropriate macros from uaccess.h.
  */
-static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_to_sigcontext(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
+	for (i = 0; i < NUM_FPU_REGS; i += inc) {
 		err |=
 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
-			       &sc->sc_fpregs[i]);
+			       &fpregs[i]);
 	}
-	err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+	err |= __put_user(current->thread.fpu.fcr31, csr);
 
 	return err;
 }
 
-static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_from_sigcontext(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 	u64 fpr_val;
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+	for (i = 0; i < NUM_FPU_REGS; i += inc) {
+		err |= __get_user(fpr_val, &fpregs[i]);
 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
 	}
-	err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+	err |= __get_user(current->thread.fpu.fcr31, csr);
 
 	return err;
 }
 
 /*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+	return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+	return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+	struct ucontext __user *uc;
+
+	/*
+	 * We can just pretend the sigcontext is always embedded in a struct
+	 * ucontext here, because the offset from sigcontext to extended
+	 * context is the same in the struct sigframe case.
+	 */
+	uc = container_of(sc, struct ucontext, uc_mcontext);
+	return &uc->uc_extcontext;
+}
+
+static int save_msa_extcontext(void __user *buf)
+{
+	struct msa_extcontext __user *msa = buf;
+	uint64_t val;
+	int i, err;
+
+	if (!thread_msa_context_live())
+		return 0;
+
+	/*
+	 * Ensure that we can't lose the live MSA context between checking
+	 * for it & writing it to memory.
+	 */
+	preempt_disable();
+
+	if (is_msa_enabled()) {
+		/*
+		 * There are no EVA versions of the vector register load/store
+		 * instructions, so MSA context has to be saved to kernel memory
+		 * and then copied to user memory. The save to kernel memory
+		 * should already have been done when handling scalar FP
+		 * context.
+		 */
+		BUG_ON(config_enabled(CONFIG_EVA));
+
+		err = __put_user(read_msa_csr(), &msa->csr);
+		err |= _save_msa_all_upper(&msa->wr);
+
+		preempt_enable();
+	} else {
+		preempt_enable();
+
+		err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+		for (i = 0; i < NUM_FPU_REGS; i++) {
+			val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+			err |= __put_user(val, &msa->wr[i]);
+		}
+	}
+
+	err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+	err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+	return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+	struct msa_extcontext __user *msa = buf;
+	unsigned long long val;
+	unsigned int csr;
+	int i, err;
+
+	if (size != sizeof(*msa))
+		return -EINVAL;
+
+	err = get_user(csr, &msa->csr);
+	if (err)
+		return err;
+
+	preempt_disable();
+
+	if (is_msa_enabled()) {
+		/*
+		 * There are no EVA versions of the vector register load/store
+		 * instructions, so MSA context has to be copied to kernel
+		 * memory and later loaded to registers. The same is true of
+		 * scalar FP context, so FPU & MSA should have already been
+		 * disabled whilst handling scalar FP context.
+		 */
+		BUG_ON(config_enabled(CONFIG_EVA));
+
+		write_msa_csr(csr);
+		err |= _restore_msa_all_upper(&msa->wr);
+		preempt_enable();
+	} else {
+		preempt_enable();
+
+		current->thread.fpu.msacsr = csr;
+
+		for (i = 0; i < NUM_FPU_REGS; i++) {
+			err |= __get_user(val, &msa->wr[i]);
+			set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+		}
+	}
+
+	return err;
+}
+
+static int save_extcontext(void __user *buf)
+{
+	int sz;
+
+	sz = save_msa_extcontext(buf);
+	if (sz < 0)
+		return sz;
+	buf += sz;
+
+	/* If no context was saved then trivially return */
+	if (!sz)
+		return 0;
+
+	/* Write the end marker */
+	if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+		return -EFAULT;
+
+	sz += sizeof(((struct extcontext *)NULL)->magic);
+	return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+	struct extcontext ext;
+	int err;
+
+	while (1) {
+		err = __get_user(ext.magic, (unsigned int *)buf);
+		if (err)
+			return err;
+
+		if (ext.magic == END_EXTCONTEXT_MAGIC)
+			return 0;
+
+		err = __get_user(ext.size, (unsigned int *)(buf
+			+ offsetof(struct extcontext, size)));
+		if (err)
+			return err;
+
+		switch (ext.magic) {
+		case MSA_EXTCONTEXT_MAGIC:
+			err = restore_msa_extcontext(buf, ext.size);
+			break;
+
+		default:
+			err = -EINVAL;
+			break;
+		}
+
+		if (err)
+			return err;
+
+		buf += ext.size;
+	}
+}
+
+/*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc)
+int protected_save_fp_context(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+	uint32_t __user *used_math = sc + abi->off_sc_used_math;
+	unsigned int used, ext_sz;
 	int err;
-#ifndef CONFIG_EVA
+
+	used = used_math() ? USED_FP : 0;
+	if (!used)
+		goto fp_done;
+
+	if (!test_thread_flag(TIF_32BIT_FPREGS))
+		used |= USED_FR1;
+	if (test_thread_flag(TIF_HYBRID_FPREGS))
+		used |= USED_HYBRID_FPRS;
+
+	/*
+	 * EVA does not have userland equivalents of ldc1 or sdc1, so
+	 * save to the kernel FP context & copy that to userland below.
+	 */
+	if (config_enabled(CONFIG_EVA))
+		lose_fpu(1);
+
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
@@ -114,27 +328,57 @@
 		if (likely(!err))
 			break;
 		/* touch the sigcontext and try again */
-		err = __put_user(0, &sc->sc_fpregs[0]) |
-			__put_user(0, &sc->sc_fpregs[31]) |
-			__put_user(0, &sc->sc_fpc_csr);
+		err = __put_user(0, &fpregs[0]) |
+			__put_user(0, &fpregs[31]) |
+			__put_user(0, csr);
 		if (err)
-			break;	/* really bad sigcontext */
+			return err;	/* really bad sigcontext */
 	}
-#else
-	/*
-	 * EVA does not have FPU EVA instructions so saving fpu context directly
-	 * does not work.
-	 */
-	lose_fpu(1);
-	err = save_fp_context(sc); /* this might fail */
-#endif
-	return err;
+
+fp_done:
+	ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+	if (err < 0)
+		return err;
+	used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+	return __put_user(used, used_math);
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+int protected_restore_fp_context(void __user *sc)
 {
-	int err, tmp __maybe_unused;
-#ifndef CONFIG_EVA
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+	uint32_t __user *used_math = sc + abi->off_sc_used_math;
+	unsigned int used;
+	int err, sig = 0, tmp __maybe_unused;
+
+	err = __get_user(used, used_math);
+	conditional_used_math(used & USED_FP);
+
+	/*
+	 * The signal handler may have used FPU; give it up if the program
+	 * doesn't want it following sigreturn.
+	 */
+	if (err || !(used & USED_FP))
+		lose_fpu(0);
+	if (err)
+		return err;
+	if (!(used & USED_FP))
+		goto fp_done;
+
+	err = sig = fpcsr_pending(csr);
+	if (err < 0)
+		return err;
+
+	/*
+	 * EVA does not have userland equivalents of ldc1 or sdc1, so we
+	 * disable the FPU here such that the code below simply copies to
+	 * the kernel FP context.
+	 */
+	if (config_enabled(CONFIG_EVA))
+		lose_fpu(0);
+
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
@@ -147,28 +391,24 @@
 		if (likely(!err))
 			break;
 		/* touch the sigcontext and try again */
-		err = __get_user(tmp, &sc->sc_fpregs[0]) |
-			__get_user(tmp, &sc->sc_fpregs[31]) |
-			__get_user(tmp, &sc->sc_fpc_csr);
+		err = __get_user(tmp, &fpregs[0]) |
+			__get_user(tmp, &fpregs[31]) |
+			__get_user(tmp, csr);
 		if (err)
 			break;	/* really bad sigcontext */
 	}
-#else
-	/*
-	 * EVA does not have FPU EVA instructions so restoring fpu context
-	 * directly does not work.
-	 */
-	lose_fpu(0);
-	err = restore_fp_context(sc); /* this might fail */
-#endif
-	return err;
+
+fp_done:
+	if (used & USED_EXTCONTEXT)
+		err |= restore_extcontext(sc_to_extcontext(sc));
+
+	return err ?: sig;
 }
 
 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
 	int err = 0;
 	int i;
-	unsigned int used_math;
 
 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
 
@@ -191,19 +431,38 @@
 		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
 	}
 
-	used_math = !!used_math();
-	err |= __put_user(used_math, &sc->sc_used_math);
 
-	if (used_math) {
-		/*
-		 * Save FPU state to signal context. Signal handler
-		 * will "inherit" current FPU state.
-		 */
-		err |= protected_save_fp_context(sc);
-	}
+	/*
+	 * Save FPU state to signal context. Signal handler
+	 * will "inherit" current FPU state.
+	 */
+	err |= protected_save_fp_context(sc);
+
 	return err;
 }
 
+static size_t extcontext_max_size(void)
+{
+	size_t sz = 0;
+
+	/*
+	 * The assumption here is that between this point & the point at which
+	 * the extended context is saved the size of the context should only
+	 * ever be able to shrink (if the task is preempted), but never grow.
+	 * That is, what this function returns is an upper bound on the size of
+	 * the extended context for the current task at the current time.
+	 */
+
+	if (thread_msa_context_live())
+		sz += sizeof(struct msa_extcontext);
+
+	/* If any context is saved then we'll append the end marker */
+	if (sz)
+		sz += sizeof(((struct extcontext *)NULL)->magic);
+
+	return sz;
+}
+
 int fpcsr_pending(unsigned int __user *fpcsr)
 {
 	int err, sig = 0;
@@ -223,21 +482,8 @@
 	return err ?: sig;
 }
 
-static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
-{
-	int err, sig;
-
-	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
-	if (err > 0)
-		err = 0;
-	err |= protected_restore_fp_context(sc);
-	return err ?: sig;
-}
-
 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
-	unsigned int used_math;
 	unsigned long treg;
 	int err = 0;
 	int i;
@@ -265,19 +511,7 @@
 	for (i = 1; i < 32; i++)
 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
 
-	err |= __get_user(used_math, &sc->sc_used_math);
-	conditional_used_math(used_math);
-
-	if (used_math) {
-		/* restore fpu context if we have used it before */
-		if (!err)
-			err = check_and_restore_fp_context(sc);
-	} else {
-		/* signal handler may have used FPU.  Give it up. */
-		lose_fpu(0);
-	}
-
-	return err;
+	return err ?: protected_restore_fp_context(sc);
 }
 
 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
@@ -285,6 +519,9 @@
 {
 	unsigned long sp;
 
+	/* Leave space for potential extended context */
+	frame_size += extcontext_max_size();
+
 	/* Default to using normal stack */
 	sp = regs->regs[29];
 
@@ -520,7 +757,11 @@
 	.setup_rt_frame = setup_rt_frame,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, rt_signal_trampoline),
-	.restart	= __NR_restart_syscall
+	.restart	= __NR_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
 };
 
 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
@@ -616,6 +857,9 @@
 
 	user_exit();
 
+	if (thread_info_flags & _TIF_UPROBE)
+		uprobe_notify_resume(regs);
+
 	/* deal with pending signal delivery */
 	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
@@ -629,43 +873,46 @@
 }
 
 #ifdef CONFIG_SMP
-#ifndef CONFIG_EVA
-static int smp_save_fp_context(struct sigcontext __user *sc)
+static int smp_save_fp_context(void __user *sc)
 {
 	return raw_cpu_has_fpu
-	       ? _save_fp_context(sc)
+	       ? save_hw_fp_context(sc)
 	       : copy_fp_to_sigcontext(sc);
 }
 
-static int smp_restore_fp_context(struct sigcontext __user *sc)
+static int smp_restore_fp_context(void __user *sc)
 {
 	return raw_cpu_has_fpu
-	       ? _restore_fp_context(sc)
+	       ? restore_hw_fp_context(sc)
 	       : copy_fp_from_sigcontext(sc);
 }
-#endif /* CONFIG_EVA */
 #endif
 
 static int signal_setup(void)
 {
-#ifndef CONFIG_EVA
+	/*
+	 * The offset from sigcontext to extended context should be the same
+	 * regardless of the type of signal, such that userland can always know
+	 * where to look if it wishes to find the extended context structures.
+	 */
+	BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+		      offsetof(struct sigframe, sf_sc)) !=
+		     (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+		      offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
 #ifdef CONFIG_SMP
 	/* For now just do the cpu_has_fpu check when the functions are invoked */
 	save_fp_context = smp_save_fp_context;
 	restore_fp_context = smp_restore_fp_context;
 #else
 	if (cpu_has_fpu) {
-		save_fp_context = _save_fp_context;
-		restore_fp_context = _restore_fp_context;
+		save_fp_context = save_hw_fp_context;
+		restore_fp_context = restore_hw_fp_context;
 	} else {
 		save_fp_context = copy_fp_to_sigcontext;
 		restore_fp_context = copy_fp_from_sigcontext;
 	}
 #endif /* CONFIG_SMP */
-#else
-	save_fp_context = copy_fp_to_sigcontext;
-	restore_fp_context = copy_fp_from_sigcontext;
-#endif
 
 	return 0;
 }
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 5d7f263..f7e89524 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -36,12 +36,6 @@
 
 #include "signal-common.h"
 
-static int (*save_fp_context32)(struct sigcontext32 __user *sc);
-static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
@@ -74,99 +68,11 @@
 	struct ucontext32 rs_uc;
 };
 
-/*
- * Thread saved context copy to/from a signal context presumed to be on the
- * user stack, and therefore accessed with appropriate macros from uaccess.h.
- */
-static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
-	for (i = 0; i < NUM_FPU_REGS; i += inc) {
-		err |=
-		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
-			       &sc->sc_fpregs[i]);
-	}
-	err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-	return err;
-}
-
-static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-	u64 fpr_val;
-
-	for (i = 0; i < NUM_FPU_REGS; i += inc) {
-		err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
-		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
-	}
-	err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-	return err;
-}
-
-/*
- * sigcontext handlers
- */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err;
-	while (1) {
-		lock_fpu_owner();
-		if (is_fpu_owner()) {
-			err = save_fp_context32(sc);
-			unlock_fpu_owner();
-		} else {
-			unlock_fpu_owner();
-			err = copy_fp_to_sigcontext32(sc);
-		}
-		if (likely(!err))
-			break;
-		/* touch the sigcontext and try again */
-		err = __put_user(0, &sc->sc_fpregs[0]) |
-			__put_user(0, &sc->sc_fpregs[31]) |
-			__put_user(0, &sc->sc_fpc_csr);
-		if (err)
-			break;	/* really bad sigcontext */
-	}
-	return err;
-}
-
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err, tmp __maybe_unused;
-	while (1) {
-		lock_fpu_owner();
-		if (is_fpu_owner()) {
-			err = restore_fp_context32(sc);
-			unlock_fpu_owner();
-		} else {
-			unlock_fpu_owner();
-			err = copy_fp_from_sigcontext32(sc);
-		}
-		if (likely(!err))
-			break;
-		/* touch the sigcontext and try again */
-		err = __get_user(tmp, &sc->sc_fpregs[0]) |
-			__get_user(tmp, &sc->sc_fpregs[31]) |
-			__get_user(tmp, &sc->sc_fpc_csr);
-		if (err)
-			break;	/* really bad sigcontext */
-	}
-	return err;
-}
-
 static int setup_sigcontext32(struct pt_regs *regs,
 			      struct sigcontext32 __user *sc)
 {
 	int err = 0;
 	int i;
-	u32 used_math;
 
 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
 
@@ -186,35 +92,18 @@
 		err |= __put_user(mflo3(), &sc->sc_lo3);
 	}
 
-	used_math = !!used_math();
-	err |= __put_user(used_math, &sc->sc_used_math);
+	/*
+	 * Save FPU state to signal context.  Signal handler
+	 * will "inherit" current FPU state.
+	 */
+	err |= protected_save_fp_context(sc);
 
-	if (used_math) {
-		/*
-		 * Save FPU state to signal context.  Signal handler
-		 * will "inherit" current FPU state.
-		 */
-		err |= protected_save_fp_context32(sc);
-	}
 	return err;
 }
 
-static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err, sig;
-
-	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
-	if (err > 0)
-		err = 0;
-	err |= protected_restore_fp_context32(sc);
-	return err ?: sig;
-}
-
 static int restore_sigcontext32(struct pt_regs *regs,
 				struct sigcontext32 __user *sc)
 {
-	u32 used_math;
 	int err = 0;
 	s32 treg;
 	int i;
@@ -238,70 +127,7 @@
 	for (i = 1; i < 32; i++)
 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
 
-	err |= __get_user(used_math, &sc->sc_used_math);
-	conditional_used_math(used_math);
-
-	if (used_math) {
-		/* restore fpu context if we have used it before */
-		if (!err)
-			err = check_and_restore_fp_context32(sc);
-	} else {
-		/* signal handler may have used FPU.  Give it up. */
-		lose_fpu(0);
-	}
-
-	return err;
-}
-
-/*
- *
- */
-extern void __put_sigset_unknown_nsig(void);
-extern void __get_sigset_unknown_nsig(void);
-
-static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
-{
-	int err = 0;
-
-	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
-		return -EFAULT;
-
-	switch (_NSIG_WORDS) {
-	default:
-		__put_sigset_unknown_nsig();
-	case 2:
-		err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
-		err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
-	case 1:
-		err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
-		err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
-	}
-
-	return err;
-}
-
-static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
-{
-	int err = 0;
-	unsigned long sig[4];
-
-	if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
-		return -EFAULT;
-
-	switch (_NSIG_WORDS) {
-	default:
-		__get_sigset_unknown_nsig();
-	case 2:
-		err |= __get_user(sig[3], &ubuf->sig[3]);
-		err |= __get_user(sig[2], &ubuf->sig[2]);
-		kbuf->sig[1] = sig[2] | (sig[3] << 32);
-	case 1:
-		err |= __get_user(sig[1], &ubuf->sig[1]);
-		err |= __get_user(sig[0], &ubuf->sig[0]);
-		kbuf->sig[0] = sig[0] | (sig[1] << 32);
-	}
-
-	return err;
+	return err ?: protected_restore_fp_context(sc);
 }
 
 /*
@@ -585,20 +411,9 @@
 	.setup_rt_frame = setup_rt_frame_32,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, o32_rt_signal_trampoline),
-	.restart	= __NR_O32_restart_syscall
+	.restart	= __NR_O32_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
 };
-
-static int signal32_init(void)
-{
-	if (cpu_has_fpu) {
-		save_fp_context32 = _save_fp_context32;
-		restore_fp_context32 = _restore_fp_context32;
-	} else {
-		save_fp_context32 = copy_fp_to_sigcontext32;
-		restore_fp_context32 = copy_fp_from_sigcontext32;
-	}
-
-	return 0;
-}
-
-arch_initcall(signal32_init);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index f1d4751..0d017fd 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -153,5 +153,9 @@
 	.setup_rt_frame = setup_rt_frame_n32,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, n32_rt_signal_trampoline),
-	.restart	= __NR_N32_restart_syscall
+	.restart	= __NR_N32_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
 };
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index d1168d7..8489c88 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -209,6 +209,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index 5b539f5..5f05539 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -21,24 +21,12 @@
 
 static void sysrq_tlbdump_single(void *dummy)
 {
-	const int field = 2 * sizeof(unsigned long);
 	unsigned long flags;
 
 	spin_lock_irqsave(&show_lock, flags);
 
 	pr_info("CPU%d:\n", smp_processor_id());
-	pr_info("Index	: %0x\n", read_c0_index());
-	pr_info("Pagemask: %0x\n", read_c0_pagemask());
-	pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
-	pr_info("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
-	pr_info("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
-	pr_info("Wired   : %0x\n", read_c0_wired());
-	pr_info("Pagegrain: %0x\n", read_c0_pagegrain());
-	if (cpu_has_htw) {
-		pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
-		pr_info("PWSize  : %0*lx\n", field, read_c0_pwsize());
-		pr_info("PWCtl   : %0x\n", read_c0_pwctl());
-	}
+	dump_tlb_regs();
 	pr_info("\n");
 	dump_tlb_all();
 	pr_info("\n");
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8ea28e6..fdb392b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -370,11 +370,6 @@
 	set_fs(old_fs);
 }
 
-static int regs_to_trapnr(struct pt_regs *regs)
-{
-	return (regs->cp0_cause >> 2) & 0x1f;
-}
-
 static DEFINE_RAW_SPINLOCK(die_lock);
 
 void __noreturn die(const char *str, struct pt_regs *regs)
@@ -384,7 +379,7 @@
 
 	oops_enter();
 
-	if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 		       SIGSEGV) == NOTIFY_STOP)
 		sig = 0;
 
@@ -470,7 +465,7 @@
 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 	       data ? "Data" : "Instruction",
 	       field, regs->cp0_epc, field, regs->regs[31]);
-	if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 		       SIGBUS) == NOTIFY_STOP)
 		goto out;
 
@@ -826,7 +821,7 @@
 	int sig;
 
 	prev_state = exception_enter();
-	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 		       SIGFPE) == NOTIFY_STOP)
 		goto out;
 
@@ -882,11 +877,12 @@
 	char b[40];
 
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
-	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+			 SIGTRAP) == NOTIFY_STOP)
 		return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
-	if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 		       SIGTRAP) == NOTIFY_STOP)
 		return;
 
@@ -948,6 +944,7 @@
 		set_fs(KERNEL_DS);
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (get_isa16_mode(regs->cp0_epc)) {
 		u16 instr[2];
 
@@ -987,15 +984,27 @@
 	 * pertain to them.
 	 */
 	switch (bcode) {
+	case BRK_UPROBE:
+		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+			goto out;
+		else
+			break;
+	case BRK_UPROBE_XOL:
+		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+			goto out;
+		else
+			break;
 	case BRK_KPROBE_BP:
 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
-			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 			goto out;
 		else
 			break;
 	case BRK_KPROBE_SSTEPBP:
 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
-			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 			goto out;
 		else
 			break;
@@ -1028,6 +1037,7 @@
 		set_fs(get_ds());
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (get_isa16_mode(regs->cp0_epc)) {
 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
@@ -1094,8 +1104,9 @@
 no_r2_instr:
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 
-	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 		       SIGILL) == NOTIFY_STOP)
 		goto out;
 
@@ -1444,8 +1455,9 @@
 	enum ctx_state prev_state;
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
-		       regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
 		goto out;
 
 	/* Clear MSACSR.Cause before enabling interrupts */
@@ -1523,7 +1535,6 @@
 
 asmlinkage void do_mcheck(struct pt_regs *regs)
 {
-	const int field = 2 * sizeof(unsigned long);
 	int multi_match = regs->cp0_status & ST0_TS;
 	enum ctx_state prev_state;
 	mm_segment_t old_fs = get_fs();
@@ -1532,19 +1543,8 @@
 	show_regs(regs);
 
 	if (multi_match) {
-		pr_err("Index	: %0x\n", read_c0_index());
-		pr_err("Pagemask: %0x\n", read_c0_pagemask());
-		pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
-		pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
-		pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
-		pr_err("Wired   : %0x\n", read_c0_wired());
-		pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
-		if (cpu_has_htw) {
-			pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
-			pr_err("PWSize  : %0*lx\n", field, read_c0_pwsize());
-			pr_err("PWCtl   : %0x\n", read_c0_pwctl());
-		}
-		pr_err("\n");
+		dump_tlb_regs();
+		pr_info("\n");
 		dump_tlb_all();
 	}
 
@@ -1651,6 +1651,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index eb3efd1..990354d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -891,6 +891,9 @@
 #ifdef	CONFIG_EVA
 	mm_segment_t seg;
 #endif
+	union fpureg *fpr;
+	enum msa_2b_fmt df;
+	unsigned int wd;
 	origpc = (unsigned long)pc;
 	orig31 = regs->regs[31];
 
@@ -1202,6 +1205,75 @@
 			break;
 		return;
 
+	case msa_op:
+		if (!cpu_has_msa)
+			goto sigill;
+
+		/*
+		 * If we've reached this point then userland should have taken
+		 * the MSA disabled exception & initialised vector context at
+		 * some point in the past.
+		 */
+		BUG_ON(!thread_msa_context_live());
+
+		df = insn.msa_mi10_format.df;
+		wd = insn.msa_mi10_format.wd;
+		fpr = &current->thread.fpu.fpr[wd];
+
+		switch (insn.msa_mi10_format.func) {
+		case msa_ld_op:
+			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+				goto sigbus;
+
+			/*
+			 * Disable preemption to avoid a race between copying
+			 * state from userland, migrating to another CPU and
+			 * updating the hardware vector register below.
+			 */
+			preempt_disable();
+
+			res = __copy_from_user_inatomic(fpr, addr,
+							sizeof(*fpr));
+			if (res)
+				goto fault;
+
+			/*
+			 * Update the hardware register if it is in use by the
+			 * task in this quantum, in order to avoid having to
+			 * save & restore the whole vector context.
+			 */
+			if (test_thread_flag(TIF_USEDMSA))
+				write_msa_wr(wd, fpr, df);
+
+			preempt_enable();
+			break;
+
+		case msa_st_op:
+			if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
+				goto sigbus;
+
+			/*
+			 * Update from the hardware register if it is in use by
+			 * the task in this quantum, in order to avoid having to
+			 * save & restore the whole vector context.
+			 */
+			preempt_disable();
+			if (test_thread_flag(TIF_USEDMSA))
+				read_msa_wr(wd, fpr, df);
+			preempt_enable();
+
+			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+			if (res)
+				goto fault;
+			break;
+
+		default:
+			goto sigbus;
+		}
+
+		compute_return_epc(regs);
+		break;
+
 #ifndef CONFIG_CPU_MIPSR6
 	/*
 	 * COP2 is available to implementor for application specific use.
@@ -2240,5 +2312,5 @@
 		return -ENOMEM;
 	return 0;
 }
-__initcall(debugfs_unaligned);
+arch_initcall(debugfs_unaligned);
 #endif
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
new file mode 100644
index 0000000..8452d93
--- /dev/null
+++ b/arch/mips/kernel/uprobes.c
@@ -0,0 +1,341 @@
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+#include <asm/inst.h>
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+	switch (insn.i_format.opcode) {
+	/*
+	 * jr and jalr are in r_format format.
+	 */
+	case spec_op:
+		switch (insn.r_format.func) {
+		case jalr_op:
+		case jr_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * This group contains:
+	 * bltz_op, bgez_op, bltzl_op, bgezl_op,
+	 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+	 */
+	case bcond_op:
+		switch (insn.i_format.rt) {
+		case bltz_op:
+		case bltzl_op:
+		case bgez_op:
+		case bgezl_op:
+		case bltzal_op:
+		case bltzall_op:
+		case bgezal_op:
+		case bgezall_op:
+		case bposge32_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * These are unconditional and in j_format.
+	 */
+	case jal_op:
+	case j_op:
+	case beq_op:
+	case beql_op:
+	case bne_op:
+	case bnel_op:
+	case blez_op: /* not really i_format */
+	case blezl_op:
+	case bgtz_op:
+	case bgtzl_op:
+		return 1;
+
+	/*
+	 * And now the FPA/cp1 branch instructions.
+	 */
+	case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	case lwc2_op: /* This is bbit0 on Octeon */
+	case ldc2_op: /* This is bbit032 on Octeon */
+	case swc2_op: /* This is bbit1 on Octeon */
+	case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+	struct mm_struct *mm, unsigned long addr)
+{
+	union mips_instruction inst;
+
+	/*
+	 * For the time being this also blocks attempts to use uprobes with
+	 * MIPS16 and microMIPS.
+	 */
+	if (addr & 0x03)
+		return -EINVAL;
+
+	inst.word = aup->insn[0];
+	aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+	aup->ixol[1] = UPROBE_BRK_UPROBE_XOL;		/* NOP  */
+
+	return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS).  We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+	union mips_instruction inst;
+
+	inst.word = *insn;
+
+	switch (inst.i_format.opcode) {
+	case spec_op:
+		switch (inst.r_format.func) {
+		case break_op:
+		case teq_op:
+		case tge_op:
+		case tgeu_op:
+		case tlt_op:
+		case tltu_op:
+		case tne_op:
+			return 1;
+		}
+		break;
+
+	case bcond_op:	/* Yes, really ...  */
+		switch (inst.u_format.rt) {
+		case teqi_op:
+		case tgei_op:
+		case tgeiu_op:
+		case tlti_op:
+		case tltiu_op:
+		case tnei_op:
+			return 1;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+#define UPROBE_TRAP_NR	ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+	union mips_instruction insn;
+
+	/*
+	 * Now find the EPC where to resume after the breakpoint has been
+	 * dealt with.  This may require emulation of a branch.
+	 */
+	aup->resume_epc = regs->cp0_epc + 4;
+	if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+		unsigned long epc;
+
+		epc = regs->cp0_epc;
+		__compute_return_epc_for_insn(regs, insn);
+		aup->resume_epc = regs->cp0_epc;
+	}
+
+	utask->autask.saved_trap_nr = current->thread.trap_nr;
+	current->thread.trap_nr = UPROBE_TRAP_NR;
+	regs->cp0_epc = current->utask->xol_vaddr;
+
+	return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	current->thread.trap_nr = utask->autask.saved_trap_nr;
+	regs->cp0_epc = aup->resume_epc;
+
+	return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+	if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+		return true;
+
+	return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+	unsigned long val, void *data)
+{
+	struct die_args *args = data;
+	struct pt_regs *regs = args->regs;
+
+	/* regs == NULL is a kernel bug */
+	if (WARN_ON(!regs))
+		return NOTIFY_DONE;
+
+	/* We are only interested in userspace traps */
+	if (!user_mode(regs))
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BREAK:
+		if (uprobe_pre_sstep_notifier(regs))
+			return NOTIFY_STOP;
+		break;
+	case DIE_UPROBE_XOL:
+		if (uprobe_post_sstep_notifier(regs))
+			return NOTIFY_STOP;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+	struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+	unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+	unsigned long ra;
+
+	ra = regs->regs[31];
+
+	/* Replace the return address with the trampoline address */
+	regs->regs[31] = ra;
+
+	return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+	unsigned long vaddr)
+{
+	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+/**
+ * set_orig_insn - Restore the original instruction.
+ * @mm: the probed process address space.
+ * @auprobe: arch specific probepoint information.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, restore the original opcode (opcode) at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+		 unsigned long vaddr)
+{
+	return uprobe_write_opcode(mm, vaddr,
+			*(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
+}
+
+void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+				  void *src, unsigned long len)
+{
+	void *kaddr;
+
+	/* Initialize the slot */
+	kaddr = kmap_atomic(page);
+	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+	kunmap_atomic(kaddr);
+
+	/*
+	 * The MIPS version of flush_icache_range will operate safely on
+	 * user space addresses and more importantly, it doesn't require a
+	 * VMA argument.
+	 */
+	flush_icache_range(vaddr, vaddr + len);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns 0.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	return 0;
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 11da314..9067b65 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -817,6 +817,7 @@
 
 static int vpe_release(struct inode *inode, struct file *filp)
 {
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
 	struct vpe *v;
 	Elf_Ehdr *hdr;
 	int ret = 0;
@@ -827,7 +828,7 @@
 
 	hdr = (Elf_Ehdr *) v->pbuffer;
 	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
-		if ((vpe_elfload(v) >= 0) && vpe_run) {
+		if (vpe_elfload(v) >= 0) {
 			vpe_run(v);
 		} else {
 			pr_warn("VPE loader: ELF load failed.\n");
@@ -850,6 +851,10 @@
 	v->plen = 0;
 
 	return ret;
+#else
+	pr_warn("VPE loader: ELF load failed.\n");
+	return -ENOEXEC;
+#endif
 }
 
 static ssize_t vpe_write(struct file *file, const char __user *buffer,
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index a57959e..c710d96 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -270,4 +270,4 @@
 	return 0;
 }
 
-__initcall(lasat_register_sysctl);
+arch_initcall(lasat_register_sysctl);
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 167f3563..92a3731 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -13,6 +13,33 @@
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
 
+void dump_tlb_regs(void)
+{
+	const int field = 2 * sizeof(unsigned long);
+
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("PageMask : %0x\n", read_c0_pagemask());
+	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
+	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
+	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
+	pr_info("Wired    : %0x\n", read_c0_wired());
+	switch (current_cpu_type()) {
+	case CPU_R10000:
+	case CPU_R12000:
+	case CPU_R14000:
+	case CPU_R16000:
+		pr_info("FrameMask: %0x\n", read_c0_framemask());
+		break;
+	}
+	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
+		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
+	if (cpu_has_htw) {
+		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
+		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
+		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
+	}
+}
+
 static inline const char *msk2str(unsigned int mask)
 {
 	switch (mask) {
@@ -87,7 +114,7 @@
 		 * leave only a single G bit set after a machine check exception
 		 * due to duplicate TLB entry.
 		 */
-		if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) &&
+		if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
 		    (entryhi & 0xff) != asid)
 			continue;
 
@@ -96,8 +123,8 @@
 		 */
 		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
 
-		c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
-		c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
+		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 
 		printk("va=%0*lx asid=%02lx\n",
 		       vwidth, (entryhi & ~0x1fffUL),
@@ -114,9 +141,9 @@
 			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
 		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
 		       pwidth, pa, c0,
-		       (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0,
-		       (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0,
-		       (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0);
+		       (entrylo0 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_G) ? 1 : 0);
 		/* RI/XI are in awkward places, so mask them off separately */
 		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		if (xpa)
@@ -128,9 +155,9 @@
 			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
 		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
 		       pwidth, pa, c1,
-		       (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0,
-		       (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0,
-		       (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0);
+		       (entrylo1 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_G) ? 1 : 0);
 	}
 	printk("\n");
 
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 8e0d3cf..cfcbb52 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -14,6 +14,17 @@
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
 
+extern int r3k_have_wired_reg;
+
+void dump_tlb_regs(void)
+{
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("EntryHi  : %0lx\n", read_c0_entryhi());
+	pr_info("EntryLo  : %0lx\n", read_c0_entrylo0());
+	if (r3k_have_wired_reg)
+		pr_info("Wired    : %0x\n", read_c0_wired());
+}
+
 static void dump_tlb(int first, int last)
 {
 	int	i;
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
index df0f850..0996b02 100644
--- a/arch/mips/loongson32/common/time.c
+++ b/arch/mips/loongson32/common/time.c
@@ -126,26 +126,34 @@
 	return IRQ_HANDLED;
 }
 
-static void ls1x_clockevent_set_mode(enum clock_event_mode mode,
-				     struct clock_event_device *cd)
+static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
 {
 	raw_spin_lock(&ls1x_timer_lock);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
-		ls1x_pwmtimer_restart();
-	case CLOCK_EVT_MODE_RESUME:
-		__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
-			     timer_base + PWM_CTRL);
-		break;
-	default:
-		break;
-	}
+	ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+	ls1x_pwmtimer_restart();
+	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
 	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
+}
+
+static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
+{
+	raw_spin_lock(&ls1x_timer_lock);
+	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
+}
+
+static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
+{
+	raw_spin_lock(&ls1x_timer_lock);
+	__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
+		     timer_base + PWM_CTRL);
+	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
 }
 
 static int ls1x_clockevent_set_next(unsigned long evt,
@@ -160,12 +168,15 @@
 }
 
 static struct clock_event_device ls1x_clockevent = {
-	.name		= "ls1x-pwmtimer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.rating		= 300,
-	.irq		= LS1X_TIMER_IRQ,
-	.set_next_event	= ls1x_clockevent_set_next,
-	.set_mode	= ls1x_clockevent_set_mode,
+	.name			= "ls1x-pwmtimer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.rating			= 300,
+	.irq			= LS1X_TIMER_IRQ,
+	.set_next_event		= ls1x_clockevent_set_next,
+	.set_state_shutdown	= ls1x_clockevent_set_state_shutdown,
+	.set_state_periodic	= ls1x_clockevent_set_state_periodic,
+	.set_state_oneshot	= ls1x_clockevent_set_state_shutdown,
+	.tick_resume		= ls1x_clockevent_tick_resume,
 };
 
 static struct irqaction ls1x_pwmtimer_irqaction = {
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 8750370..da77d41 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -51,40 +51,36 @@
 }
 EXPORT_SYMBOL(enable_mfgpt0_counter);
 
-static void init_mfgpt_timer(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int mfgpt_timer_set_periodic(struct clock_event_device *evt)
 {
 	raw_spin_lock(&mfgpt_lock);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		outw(COMPARE, MFGPT0_CMP2);	/* set comparator2 */
-		outw(0, MFGPT0_CNT);	/* set counter to 0 */
-		enable_mfgpt0_counter();
-		break;
+	outw(COMPARE, MFGPT0_CMP2);	/* set comparator2 */
+	outw(0, MFGPT0_CNT);		/* set counter to 0 */
+	enable_mfgpt0_counter();
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
-		    evt->mode == CLOCK_EVT_MODE_ONESHOT)
-			disable_mfgpt0_counter();
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* The oneshot mode have very high deviation, Not use it! */
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
 	raw_spin_unlock(&mfgpt_lock);
+	return 0;
+}
+
+static int mfgpt_timer_shutdown(struct clock_event_device *evt)
+{
+	if (clockevent_state_periodic(evt) || clockevent_state_oneshot(evt)) {
+		raw_spin_lock(&mfgpt_lock);
+		disable_mfgpt0_counter();
+		raw_spin_unlock(&mfgpt_lock);
+	}
+
+	return 0;
 }
 
 static struct clock_event_device mfgpt_clockevent = {
 	.name = "mfgpt",
 	.features = CLOCK_EVT_FEAT_PERIODIC,
-	.set_mode = init_mfgpt_timer,
+
+	/* The oneshot mode have very high deviation, don't use it! */
+	.set_state_shutdown = mfgpt_timer_shutdown,
+	.set_state_periodic = mfgpt_timer_set_periodic,
 	.irq = CS5536_MFGPT_INTR,
 };
 
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index 5c21cd3..bf9f1a7 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -78,55 +78,77 @@
 	/* Do nothing on Loongson-3 */
 }
 
-static void hpet_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int hpet_set_state_periodic(struct clock_event_device *evt)
 {
-	int cfg = 0;
+	int cfg;
 
 	spin_lock(&hpet_lock);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pr_info("set clock event to periodic mode!\n");
-		/* stop counter */
-		hpet_stop_counter();
 
-		/* enables the timer0 to generate a periodic interrupt */
-		cfg = hpet_read(HPET_T0_CFG);
-		cfg &= ~HPET_TN_LEVEL;
-		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
-				HPET_TN_SETVAL | HPET_TN_32BIT;
-		hpet_write(HPET_T0_CFG, cfg);
+	pr_info("set clock event to periodic mode!\n");
+	/* stop counter */
+	hpet_stop_counter();
 
-		/* set the comparator */
-		hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
-		udelay(1);
-		hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+	/* enables the timer0 to generate a periodic interrupt */
+	cfg = hpet_read(HPET_T0_CFG);
+	cfg &= ~HPET_TN_LEVEL;
+	cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
+		HPET_TN_32BIT;
+	hpet_write(HPET_T0_CFG, cfg);
 
-		/* start counter */
-		hpet_start_counter();
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		cfg = hpet_read(HPET_T0_CFG);
-		cfg &= ~HPET_TN_ENABLE;
-		hpet_write(HPET_T0_CFG, cfg);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		pr_info("set clock event to one shot mode!\n");
-		cfg = hpet_read(HPET_T0_CFG);
-		/* set timer0 type
-		 * 1 : periodic interrupt
-		 * 0 : non-periodic(oneshot) interrupt
-		 */
-		cfg &= ~HPET_TN_PERIODIC;
-		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
-		hpet_write(HPET_T0_CFG, cfg);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		hpet_enable_legacy_int();
-		break;
-	}
+	/* set the comparator */
+	hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+	udelay(1);
+	hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+
+	/* start counter */
+	hpet_start_counter();
+
 	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_set_state_shutdown(struct clock_event_device *evt)
+{
+	int cfg;
+
+	spin_lock(&hpet_lock);
+
+	cfg = hpet_read(HPET_T0_CFG);
+	cfg &= ~HPET_TN_ENABLE;
+	hpet_write(HPET_T0_CFG, cfg);
+
+	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_set_state_oneshot(struct clock_event_device *evt)
+{
+	int cfg;
+
+	spin_lock(&hpet_lock);
+
+	pr_info("set clock event to one shot mode!\n");
+	cfg = hpet_read(HPET_T0_CFG);
+	/*
+	 * set timer0 type
+	 * 1 : periodic interrupt
+	 * 0 : non-periodic(oneshot) interrupt
+	 */
+	cfg &= ~HPET_TN_PERIODIC;
+	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+	hpet_write(HPET_T0_CFG, cfg);
+
+	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_tick_resume(struct clock_event_device *evt)
+{
+	spin_lock(&hpet_lock);
+	hpet_enable_legacy_int();
+	spin_unlock(&hpet_lock);
+
+	return 0;
 }
 
 static int hpet_next_event(unsigned long delta,
@@ -206,7 +228,10 @@
 	cd->name = "hpet";
 	cd->rating = 320;
 	cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-	cd->set_mode = hpet_set_mode;
+	cd->set_state_shutdown = hpet_set_state_shutdown;
+	cd->set_state_periodic = hpet_set_state_periodic;
+	cd->set_state_oneshot = hpet_set_state_oneshot;
+	cd->tick_resume = hpet_tick_resume;
 	cd->set_next_event = hpet_next_event;
 	cd->irq = HPET_T0_IRQ;
 	cd->cpumask = cpumask_of(cpu);
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 2e5f962..a19641d 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -4,9 +4,9 @@
 
 obj-y	+= cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
 	   dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
-	   dp_tint.o dp_fint.o \
+	   dp_tint.o dp_fint.o dp_maddf.o dp_msubf.o dp_2008class.o dp_fmin.o dp_fmax.o \
 	   sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
-	   sp_tint.o sp_fint.o \
+	   sp_tint.o sp_fint.o sp_maddf.o sp_msubf.o sp_2008class.o sp_fmin.o sp_fmax.o \
 	   dsemul.o
 
 lib-y	+= ieee754d.o \
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 712f17a..32f0e19 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1137,7 +1137,7 @@
 			break;
 
 		case mfhc_op:
-			if (!cpu_has_mips_r2)
+			if (!cpu_has_mips_r2_r6)
 				goto sigill;
 
 			/* copregister rd -> gpr[rt] */
@@ -1148,7 +1148,7 @@
 			break;
 
 		case mthc_op:
-			if (!cpu_has_mips_r2)
+			if (!cpu_has_mips_r2_r6)
 				goto sigill;
 
 			/* copregister rd <- gpr[rt] */
@@ -1181,6 +1181,24 @@
 			}
 			break;
 
+		case bc1eqz_op:
+		case bc1nez_op:
+			if (!cpu_has_mips_r6 || delay_slot(xcp))
+				return SIGILL;
+
+			cond = likely = 0;
+			switch (MIPSInst_RS(ir)) {
+			case bc1eqz_op:
+				if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
+				    cond = 1;
+				break;
+			case bc1nez_op:
+				if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
+				    cond = 1;
+				break;
+			}
+			goto branch_common;
+
 		case bc_op:
 			if (delay_slot(xcp))
 				return SIGILL;
@@ -1207,7 +1225,7 @@
 			case bct_op:
 				break;
 			}
-
+branch_common:
 			set_delay_slot(xcp);
 			if (cond) {
 				/*
@@ -1376,6 +1394,14 @@
 	IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN,	/* cmp_ule (sig) cmp_ngt */
 };
 
+static const unsigned char negative_cmptab[8] = {
+	0, /* Reserved */
+	IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
+	IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
+	IEEE754_CLT | IEEE754_CGT,
+	/* Reserved */
+};
+
 
 /*
  * Additional MIPS4 instructions
@@ -1717,6 +1743,126 @@
 			SPFROMREG(rv.s, MIPSInst_FS(ir));
 			break;
 
+		case fseleqz_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(rv.s, MIPSInst_FT(ir));
+			if (rv.w & 0x1)
+				rv.w = 0;
+			else
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			break;
+
+		case fselnez_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(rv.s, MIPSInst_FT(ir));
+			if (rv.w & 0x1)
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			else
+				rv.w = 0;
+			break;
+
+		case fmaddf_op: {
+			union ieee754sp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			rv.s = ieee754sp_maddf(fd, fs, ft);
+			break;
+		}
+
+		case fmsubf_op: {
+			union ieee754sp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			rv.s = ieee754sp_msubf(fd, fs, ft);
+			break;
+		}
+
+		case frint_op: {
+			union ieee754sp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.l = ieee754sp_tlong(fs);
+			rv.s = ieee754sp_flong(rv.l);
+			goto copcsr;
+		}
+
+		case fclass_op: {
+			union ieee754sp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.w = ieee754sp_2008class(fs);
+			rfmt = w_fmt;
+			break;
+		}
+
+		case fmin_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmin(fs, ft);
+			break;
+		}
+
+		case fmina_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmina(fs, ft);
+			break;
+		}
+
+		case fmax_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmax(fs, ft);
+			break;
+		}
+
+		case fmaxa_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmaxa(fs, ft);
+			break;
+		}
+
 		case fabs_op:
 			handler.u = ieee754sp_abs;
 			goto scopuop;
@@ -1820,7 +1966,7 @@
 			goto copcsr;
 
 		default:
-			if (MIPSInst_FUNC(ir) >= fcmp_op) {
+			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
 				unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
 				union ieee754sp fs, ft;
 
@@ -1914,6 +2060,127 @@
 				return 0;
 			DPFROMREG(rv.d, MIPSInst_FS(ir));
 			break;
+
+		case fseleqz_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(rv.d, MIPSInst_FT(ir));
+			if (rv.l & 0x1)
+				rv.l = 0;
+			else
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			break;
+
+		case fselnez_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(rv.d, MIPSInst_FT(ir));
+			if (rv.l & 0x1)
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			else
+				rv.l = 0;
+			break;
+
+		case fmaddf_op: {
+			union ieee754dp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			rv.d = ieee754dp_maddf(fd, fs, ft);
+			break;
+		}
+
+		case fmsubf_op: {
+			union ieee754dp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			rv.d = ieee754dp_msubf(fd, fs, ft);
+			break;
+		}
+
+		case frint_op: {
+			union ieee754dp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.l = ieee754dp_tlong(fs);
+			rv.d = ieee754dp_flong(rv.l);
+			goto copcsr;
+		}
+
+		case fclass_op: {
+			union ieee754dp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.w = ieee754dp_2008class(fs);
+			rfmt = w_fmt;
+			break;
+		}
+
+		case fmin_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmin(fs, ft);
+			break;
+		}
+
+		case fmina_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmina(fs, ft);
+			break;
+		}
+
+		case fmax_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmax(fs, ft);
+			break;
+		}
+
+		case fmaxa_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmaxa(fs, ft);
+			break;
+		}
+
 		case fabs_op:
 			handler.u = ieee754dp_abs;
 			goto dcopuop;
@@ -1997,7 +2264,7 @@
 			goto copcsr;
 
 		default:
-			if (MIPSInst_FUNC(ir) >= fcmp_op) {
+			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
 				unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
 				union ieee754dp fs, ft;
 
@@ -2021,8 +2288,11 @@
 			break;
 		}
 		break;
+	}
 
-	case w_fmt:
+	case w_fmt: {
+		union ieee754dp fs;
+
 		switch (MIPSInst_FUNC(ir)) {
 		case fcvts_op:
 			/* convert word to single precision real */
@@ -2036,10 +2306,65 @@
 			rv.d = ieee754dp_fint(fs.bits);
 			rfmt = d_fmt;
 			goto copcsr;
-		default:
-			return SIGILL;
+		default: {
+			/* Emulating the new CMP.condn.fmt R6 instruction */
+#define CMPOP_MASK	0x7
+#define SIGN_BIT	(0x1 << 3)
+#define PREDICATE_BIT	(0x1 << 4)
+
+			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+			union ieee754sp fs, ft;
+
+			/* This is an R6 only instruction */
+			if (!cpu_has_mips_r6 ||
+			    (MIPSInst_FUNC(ir) & 0x20))
+				return SIGILL;
+
+			/* fmt is w_fmt for single precision so fix it */
+			rfmt = s_fmt;
+			/* default to false */
+			rv.w = 0;
+
+			/* CMP.condn.S */
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(ft, MIPSInst_FT(ir));
+
+			/* positive predicates */
+			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+				if (ieee754sp_cmp(fs, ft, cmptab[cmpop],
+						  sig))
+				    rv.w = -1; /* true, all 1s */
+				if ((sig) &&
+				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+				else
+					goto copcsr;
+			} else {
+				/* negative predicates */
+				switch (cmpop) {
+				case 1:
+				case 2:
+				case 3:
+					if (ieee754sp_cmp(fs, ft,
+							  negative_cmptab[cmpop],
+							  sig))
+						rv.w = -1; /* true, all 1s */
+					if (sig &&
+					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+					else
+						goto copcsr;
+					break;
+				default:
+					/* Reserved R6 ops */
+					pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
+					return SIGILL;
+				}
+			}
+			break;
+			}
 		}
-		break;
 	}
 
 	case l_fmt:
@@ -2060,11 +2385,60 @@
 			rv.d = ieee754dp_flong(bits);
 			rfmt = d_fmt;
 			goto copcsr;
-		default:
-			return SIGILL;
-		}
-		break;
+		default: {
+			/* Emulating the new CMP.condn.fmt R6 instruction */
+			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+			union ieee754dp fs, ft;
 
+			if (!cpu_has_mips_r6 ||
+			    (MIPSInst_FUNC(ir) & 0x20))
+				return SIGILL;
+
+			/* fmt is l_fmt for double precision so fix it */
+			rfmt = d_fmt;
+			/* default to false */
+			rv.l = 0;
+
+			/* CMP.condn.D */
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(ft, MIPSInst_FT(ir));
+
+			/* positive predicates */
+			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+				if (ieee754dp_cmp(fs, ft,
+						  cmptab[cmpop], sig))
+				    rv.l = -1LL; /* true, all 1s */
+				if (sig &&
+				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+				else
+					goto copcsr;
+			} else {
+				/* negative predicates */
+				switch (cmpop) {
+				case 1:
+				case 2:
+				case 3:
+					if (ieee754dp_cmp(fs, ft,
+							  negative_cmptab[cmpop],
+							  sig))
+						rv.l = -1LL; /* true, all 1s */
+					if (sig &&
+					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+					else
+						goto copcsr;
+					break;
+				default:
+					/* Reserved R6 ops */
+					pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
+					return SIGILL;
+				}
+			}
+			break;
+			}
+		}
 	default:
 		return SIGILL;
 	}
diff --git a/arch/mips/math-emu/dp_2008class.c b/arch/mips/math-emu/dp_2008class.c
new file mode 100644
index 0000000..9dc39fc
--- /dev/null
+++ b/arch/mips/math-emu/dp_2008class.c
@@ -0,0 +1,55 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+int ieee754dp_2008class(union ieee754dp x)
+{
+	COMPXDP;
+
+	EXPLODEXDP;
+
+	/*
+	 * 10 bit mask as follows:
+	 *
+	 * bit0 = SNAN
+	 * bit1 = QNAN
+	 * bit2 = -INF
+	 * bit3 = -NORM
+	 * bit4 = -DNORM
+	 * bit5 = -ZERO
+	 * bit6 = INF
+	 * bit7 = NORM
+	 * bit8 = DNORM
+	 * bit9 = ZERO
+	 */
+
+	switch(xc) {
+	case IEEE754_CLASS_SNAN:
+		return 0x01;
+	case IEEE754_CLASS_QNAN:
+		return 0x02;
+	case IEEE754_CLASS_INF:
+		return 0x04 << (xs ? 0 : 4);
+	case IEEE754_CLASS_NORM:
+		return 0x08 << (xs ? 0 : 4);
+	case IEEE754_CLASS_DNORM:
+		return 0x10 << (xs ? 0 : 4);
+	case IEEE754_CLASS_ZERO:
+		return 0x20 << (xs ? 0 : 4);
+	default:
+		pr_err("Unknown class: %d\n", xc);
+		return 0;
+	}
+}
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
new file mode 100644
index 0000000..fd71b8d
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return y;
+	else if (xs < ys)
+		return x;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
+
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
new file mode 100644
index 0000000..c1072b0
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return x;
+	else if (xs < ys)
+		return y;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
+
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
new file mode 100644
index 0000000..119eda9
--- /dev/null
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -0,0 +1,265 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	int re;
+	int rs;
+	u64 rm;
+	unsigned lxm;
+	unsigned hxm;
+	unsigned lym;
+	unsigned hym;
+	u64 lrm;
+	u64 hrm;
+	u64 t;
+	u64 at;
+	int s;
+
+	COMPXDP;
+	COMPYDP;
+
+	u64 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+	EXPLODEDP(z, zc, zs, ze, zm)
+
+	FLUSHXDP;
+	FLUSHYDP;
+	FLUSHDP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		DPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754dp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 64 - (DP_FBITS + 1);
+	ym <<= 64 - (DP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+
+	/* 32 * 32 => 64 */
+#define DPXMULT(x, y)	((u64)(x) * (u64)y)
+
+	lxm = xm;
+	hxm = xm >> 32;
+	lym = ym;
+	hym = ym >> 32;
+
+	lrm = DPXMULT(lxm, lym);
+	hrm = DPXMULT(hxm, hym);
+
+	t = DPXMULT(lxm, hym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	t = DPXMULT(hxm, lym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((s64) rm < 0) {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+		     ((rm << (DP_FBITS + 1 + 3)) != 0);
+			re++;
+	} else {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (DP_HIDDEN_BIT << 3));
+
+	/* And now the addition */
+	assert(zm & DP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		rm = XDPSRS(rm, s);
+		re += s;
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		zm = XDPSRS(zm, s);
+		ze += s;
+	}
+	assert(ze == re);
+	assert(ze <= DP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in xm, xs and xe.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+			zm = XDPSRS1(zm);
+			ze++;
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize to rounding precision.
+		 */
+		while ((zm >> (DP_FBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+	}
+
+	return ieee754dp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/dp_msubf.c b/arch/mips/math-emu/dp_msubf.c
new file mode 100644
index 0000000..1224126
--- /dev/null
+++ b/arch/mips/math-emu/dp_msubf.c
@@ -0,0 +1,269 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MSUB.f (Fused Multiply Subtract)
+ * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	int re;
+	int rs;
+	u64 rm;
+	unsigned lxm;
+	unsigned hxm;
+	unsigned lym;
+	unsigned hym;
+	u64 lrm;
+	u64 hrm;
+	u64 t;
+	u64 at;
+	int s;
+
+	COMPXDP;
+	COMPYDP;
+
+	u64 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+	EXPLODEDP(z, zc, zs, ze, zm)
+
+	FLUSHXDP;
+	FLUSHYDP;
+	FLUSHDP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		DPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754dp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 64 - (DP_FBITS + 1);
+	ym <<= 64 - (DP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+
+	/* 32 * 32 => 64 */
+#define DPXMULT(x, y)	((u64)(x) * (u64)y)
+
+	lxm = xm;
+	hxm = xm >> 32;
+	lym = ym;
+	hym = ym >> 32;
+
+	lrm = DPXMULT(lxm, lym);
+	hrm = DPXMULT(hxm, hym);
+
+	t = DPXMULT(lxm, hym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	t = DPXMULT(hxm, lym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((s64) rm < 0) {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+		     ((rm << (DP_FBITS + 1 + 3)) != 0);
+			re++;
+	} else {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (DP_HIDDEN_BIT << 3));
+
+	/* And now the subtraction */
+
+	/* flip sign of r and handle as add */
+	rs ^= 1;
+
+	assert(zm & DP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		rm = XDPSRS(rm, s);
+		re += s;
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		zm = XDPSRS(zm, s);
+		ze += s;
+	}
+	assert(ze == re);
+	assert(ze <= DP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in xm, xs and xe.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+			zm = XDPSRS1(zm);
+			ze++;
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize to rounding precision.
+		 */
+		while ((zm >> (DP_FBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+	}
+
+	return ieee754dp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index e0b5cc2..cbb36c1 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -33,7 +33,6 @@
 
 int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
 {
-	extern asmlinkage void handle_dsemulret(void);
 	struct emuframe __user *fr;
 	int err;
 
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index a5ca108..df94720 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -75,6 +75,16 @@
 
 union ieee754sp ieee754sp_sqrt(union ieee754sp x);
 
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+int ieee754sp_2008class(union ieee754sp x);
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y);
+
 /*
  * double precision (often aka double)
 */
@@ -99,6 +109,15 @@
 
 union ieee754dp ieee754dp_sqrt(union ieee754dp x);
 
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+int ieee754dp_2008class(union ieee754dp x);
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y);
 
 
 /* 5 types of floating point number
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 05389d5..6383e2c 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -65,8 +65,8 @@
 			vc = IEEE754_CLASS_INF;				\
 		else if (vm & SP_MBIT(SP_FBITS-1))			\
 			vc = IEEE754_CLASS_SNAN;			\
-	else								\
-		vc = IEEE754_CLASS_QNAN;				\
+		else							\
+			vc = IEEE754_CLASS_QNAN;			\
 	} else if (ve == SP_EMIN-1+SP_EBIAS) {				\
 		if (vm) {						\
 			ve = SP_EMIN;					\
@@ -105,8 +105,8 @@
 		if (vm) {						\
 			ve = DP_EMIN;					\
 			vc = IEEE754_CLASS_DNORM;			\
-	} else								\
-		vc = IEEE754_CLASS_ZERO;				\
+		} else							\
+			vc = IEEE754_CLASS_ZERO;			\
 	} else {							\
 		ve -= DP_EBIAS;						\
 		vm |= DP_HIDDEN_BIT;					\
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index f308e0f..506a67a 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -65,4 +65,4 @@
 
 	return 0;
 }
-__initcall(debugfs_fpuemu);
+arch_initcall(debugfs_fpuemu);
diff --git a/arch/mips/math-emu/sp_2008class.c b/arch/mips/math-emu/sp_2008class.c
new file mode 100644
index 0000000..ff62606
--- /dev/null
+++ b/arch/mips/math-emu/sp_2008class.c
@@ -0,0 +1,55 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+int ieee754sp_2008class(union ieee754sp x)
+{
+	COMPXSP;
+
+	EXPLODEXSP;
+
+	/*
+	 * 10 bit mask as follows:
+	 *
+	 * bit0 = SNAN
+	 * bit1 = QNAN
+	 * bit2 = -INF
+	 * bit3 = -NORM
+	 * bit4 = -DNORM
+	 * bit5 = -ZERO
+	 * bit6 = INF
+	 * bit7 = NORM
+	 * bit8 = DNORM
+	 * bit9 = ZERO
+	 */
+
+	switch(xc) {
+	case IEEE754_CLASS_SNAN:
+		return 0x01;
+	case IEEE754_CLASS_QNAN:
+		return 0x02;
+	case IEEE754_CLASS_INF:
+		return 0x04 << (xs ? 0 : 4);
+	case IEEE754_CLASS_NORM:
+		return 0x08 << (xs ? 0 : 4);
+	case IEEE754_CLASS_DNORM:
+		return 0x10 << (xs ? 0 : 4);
+	case IEEE754_CLASS_ZERO:
+		return 0x20 << (xs ? 0 : 4);
+	default:
+		pr_err("Unknown class: %d\n", xc);
+		return 0;
+	}
+}
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
new file mode 100644
index 0000000..4d00084
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MAX{,A}.f
+ * MAX : Scalar Floating-Point Maximum
+ * MAXA: Scalar Floating-Point argument with Maximum Absolute Value
+ *
+ * MAX.S : FPR[fd] = maxNum(FPR[fs],FPR[ft])
+ * MAXA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return y;
+	else if (xs < ys)
+		return x;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
+
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
new file mode 100644
index 0000000..4eb1bb9
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.S : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return x;
+	else if (xs < ys)
+		return y;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
+
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
new file mode 100644
index 0000000..dd1dd83
--- /dev/null
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -0,0 +1,255 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	int re;
+	int rs;
+	unsigned rm;
+	unsigned short lxm;
+	unsigned short hxm;
+	unsigned short lym;
+	unsigned short hym;
+	unsigned lrm;
+	unsigned hrm;
+	unsigned t;
+	unsigned at;
+	int s;
+
+	COMPXSP;
+	COMPYSP;
+	u32 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+	EXPLODESP(z, zc, zs, ze, zm)
+
+	FLUSHXSP;
+	FLUSHYSP;
+	FLUSHSP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		SPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754sp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+
+	/* rm = xm * ym, re = xe+ye basically */
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 32 - (SP_FBITS + 1);
+	ym <<= 32 - (SP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+	lxm = xm & 0xffff;
+	hxm = xm >> 16;
+	lym = ym & 0xffff;
+	hym = ym >> 16;
+
+	lrm = lxm * lym;	/* 16 * 16 => 32 */
+	hrm = hxm * hym;	/* 16 * 16 => 32 */
+
+	t = lxm * hym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	t = hxm * lym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((int) rm < 0) {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+		    ((rm << (SP_FBITS + 1 + 3)) != 0);
+		re++;
+	} else {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (SP_HIDDEN_BIT << 3));
+
+	/* And now the addition */
+
+	assert(zm & SP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		SPXSRSYn(s);
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		SPXSRSYn(s);
+	}
+	assert(ze == re);
+	assert(ze <= SP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in zm, zs and ze.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+			SPXSRSX1();
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize in extended single precision
+		 */
+		while ((zm >> (SP_MBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+
+	}
+	return ieee754sp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/sp_msubf.c b/arch/mips/math-emu/sp_msubf.c
new file mode 100644
index 0000000..81c38b980
--- /dev/null
+++ b/arch/mips/math-emu/sp_msubf.c
@@ -0,0 +1,258 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MSUB.f (Fused Multiply Subtract)
+ * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	int re;
+	int rs;
+	unsigned rm;
+	unsigned short lxm;
+	unsigned short hxm;
+	unsigned short lym;
+	unsigned short hym;
+	unsigned lrm;
+	unsigned hrm;
+	unsigned t;
+	unsigned at;
+	int s;
+
+	COMPXSP;
+	COMPYSP;
+	u32 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+	EXPLODESP(z, zc, zs, ze, zm)
+
+	FLUSHXSP;
+	FLUSHYSP;
+	FLUSHSP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		SPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754sp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* fall through to real compuation */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+
+	/* rm = xm * ym, re = xe+ye basically */
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 32 - (SP_FBITS + 1);
+	ym <<= 32 - (SP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+	lxm = xm & 0xffff;
+	hxm = xm >> 16;
+	lym = ym & 0xffff;
+	hym = ym >> 16;
+
+	lrm = lxm * lym;	/* 16 * 16 => 32 */
+	hrm = hxm * hym;	/* 16 * 16 => 32 */
+
+	t = lxm * hym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	t = hxm * lym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((int) rm < 0) {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+		    ((rm << (SP_FBITS + 1 + 3)) != 0);
+		re++;
+	} else {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (SP_HIDDEN_BIT << 3));
+
+	/* And now the subtraction */
+
+	/* Flip sign of r and handle as add */
+	rs ^= 1;
+
+	assert(zm & SP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		SPXSRSYn(s);
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		SPXSRSYn(s);
+	}
+	assert(ze == re);
+	assert(ze <= SP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in zm, zs and ze.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+			SPXSRSX1(); /* shift preserving sticky */
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize in extended single precision
+		 */
+		while ((zm >> (SP_MBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+
+	}
+	return ieee754sp_format(zs, ze, zm);
+}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fbea443..5d3a25e 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1276,6 +1276,7 @@
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		    (c->icache.waysize > PAGE_SIZE))
 			c->icache.flags |= MIPS_CACHE_ALIASES;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index eeaf024..8f23cf0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -194,6 +194,40 @@
 		__free_pages(page, get_order(size));
 }
 
+static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+	void *cpu_addr, dma_addr_t dma_addr, size_t size,
+	struct dma_attrs *attrs)
+{
+	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long addr = (unsigned long)cpu_addr;
+	unsigned long off = vma->vm_pgoff;
+	unsigned long pfn;
+	int ret = -ENXIO;
+
+	if (!plat_device_is_coherent(dev) && !hw_coherentio)
+		addr = CAC_ADDR(addr);
+
+	pfn = page_to_pfn(virt_to_page((void *)addr));
+
+	if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < count && user_count <= (count - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+				      pfn + off,
+				      user_count << PAGE_SHIFT,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
+}
+
 static inline void __dma_sync_virtual(void *addr, size_t size,
 	enum dma_data_direction direction)
 {
@@ -380,6 +414,7 @@
 static struct dma_map_ops mips_default_dma_map_ops = {
 	.alloc = mips_dma_alloc_coherent,
 	.free = mips_dma_free_coherent,
+	.mmap = mips_dma_mmap,
 	.map_page = mips_dma_map_page,
 	.unmap_page = mips_dma_unmap_page,
 	.map_sg = mips_dma_map_sg,
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 852a41c..4b88fa03 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -57,12 +57,10 @@
 
 #ifdef CONFIG_KPROBES
 	/*
-	 * This is to notify the fault handler of the kprobes.	The
-	 * exception code is redundant as it is also carried in REGS,
-	 * but we pass it anyhow.
+	 * This is to notify the fault handler of the kprobes.
 	 */
 	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
-		       (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
+		       current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
 		return;
 #endif
 
@@ -224,6 +222,7 @@
 			print_vma_addr(" ", regs->regs[31]);
 			pr_info("\n");
 		}
+		current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 		info.si_signo = SIGSEGV;
 		info.si_errno = 0;
 		/* info.si_code has been set above */
@@ -282,6 +281,7 @@
 		       field, (unsigned long) regs->cp0_epc,
 		       field, (unsigned long) regs->regs[31]);
 #endif
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	tsk->thread.cp0_badvaddr = address;
 	info.si_signo = SIGBUS;
 	info.si_errno = 0;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 198a314..66d0f49 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -37,6 +37,7 @@
 #include <asm/cpu.h>
 #include <asm/dma.h>
 #include <asm/kmap_types.h>
+#include <asm/maar.h>
 #include <asm/mmu_context.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
@@ -333,9 +334,40 @@
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_maars)
+unsigned __weak platform_maar_init(unsigned num_pairs)
 {
-	return 0;
+	struct maar_config cfg[BOOT_MEM_MAP_MAX];
+	unsigned i, num_configured, num_cfg = 0;
+	phys_addr_t skip;
+
+	for (i = 0; i < boot_mem_map.nr_map; i++) {
+		switch (boot_mem_map.map[i].type) {
+		case BOOT_MEM_RAM:
+		case BOOT_MEM_INIT_RAM:
+			break;
+		default:
+			continue;
+		}
+
+		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+		cfg[num_cfg].lower += skip;
+
+		cfg[num_cfg].upper = cfg[num_cfg].lower;
+		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+		cfg[num_cfg].upper -= skip;
+
+		cfg[num_cfg].attrs = MIPS_MAAR_S;
+		num_cfg++;
+	}
+
+	num_configured = maar_config(cfg, num_cfg, num_pairs);
+	if (num_configured < num_cfg)
+		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+			num_pairs, num_cfg);
+
+	return num_configured;
 }
 
 static void maar_init(void)
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 4ceafd1..53ea839 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -14,6 +14,7 @@
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 #include <asm/r4kcache.h>
+#include <asm/mips-cm.h>
 
 /*
  * MIPS32/MIPS64 L2 cache handling
@@ -94,6 +95,38 @@
 	return 1;
 }
 
+static int __init mips_sc_probe_cm3(void)
+{
+	struct cpuinfo_mips *c = &current_cpu_data;
+	unsigned long cfg = read_gcr_l2_config();
+	unsigned long sets, line_sz, assoc;
+
+	if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK)
+		return 0;
+
+	sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
+	sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
+	c->scache.sets = 64 << sets;
+
+	line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
+	line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
+	c->scache.linesz = 2 << line_sz;
+
+	assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
+	assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
+	c->scache.ways = assoc + 1;
+	c->scache.waysize = c->scache.sets * c->scache.linesz;
+	c->scache.waybit = __ffs(c->scache.waysize);
+
+	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+	return 1;
+}
+
+void __weak platform_early_l2_init(void)
+{
+}
+
 static inline int __init mips_sc_probe(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -103,6 +136,15 @@
 	/* Mark as not present until probe completed */
 	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
 
+	/*
+	 * Do we need some platform specific probing before
+	 * we configure L2?
+	 */
+	platform_early_l2_init();
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		return mips_sc_probe_cm3();
+
 	/* Ignore anything but MIPSxx processors */
 	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
 			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 2b75b8f..b4f366f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -36,7 +36,7 @@
 		"nop\n\t"		\
 		".set	pop\n\t")
 
-static int r3k_have_wired_reg;			/* Should be in cpu_data? */
+int r3k_have_wired_reg;			/* Should be in cpu_data? */
 
 /* TLB operations. */
 static void local_flush_tlb_from(int entry)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index cec3e18..53c2478 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -303,3 +303,10 @@
 	if (!register_vsmp_smp_ops())
 		return;
 }
+
+void platform_early_l2_init(void)
+{
+	/* L2 configuration lives in the CM3 */
+	if (mips_cm_revision() >= CM_REV_CM3)
+		mips_cm_probe();
+}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index fa8f591..c6a6c7a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -382,122 +382,12 @@
 	/* Could change CM error mask register. */
 }
 
-
-static char *tr[8] = {
-	"mem",	"gcr",	"gic",	"mmio",
-	"0x04", "0x05", "0x06", "0x07"
-};
-
-static char *mcmd[32] = {
-	[0x00] = "0x00",
-	[0x01] = "Legacy Write",
-	[0x02] = "Legacy Read",
-	[0x03] = "0x03",
-	[0x04] = "0x04",
-	[0x05] = "0x05",
-	[0x06] = "0x06",
-	[0x07] = "0x07",
-	[0x08] = "Coherent Read Own",
-	[0x09] = "Coherent Read Share",
-	[0x0a] = "Coherent Read Discard",
-	[0x0b] = "Coherent Ready Share Always",
-	[0x0c] = "Coherent Upgrade",
-	[0x0d] = "Coherent Writeback",
-	[0x0e] = "0x0e",
-	[0x0f] = "0x0f",
-	[0x10] = "Coherent Copyback",
-	[0x11] = "Coherent Copyback Invalidate",
-	[0x12] = "Coherent Invalidate",
-	[0x13] = "Coherent Write Invalidate",
-	[0x14] = "Coherent Completion Sync",
-	[0x15] = "0x15",
-	[0x16] = "0x16",
-	[0x17] = "0x17",
-	[0x18] = "0x18",
-	[0x19] = "0x19",
-	[0x1a] = "0x1a",
-	[0x1b] = "0x1b",
-	[0x1c] = "0x1c",
-	[0x1d] = "0x1d",
-	[0x1e] = "0x1e",
-	[0x1f] = "0x1f"
-};
-
-static char *core[8] = {
-	"Invalid/OK",	"Invalid/Data",
-	"Shared/OK",	"Shared/Data",
-	"Modified/OK",	"Modified/Data",
-	"Exclusive/OK", "Exclusive/Data"
-};
-
-static char *causes[32] = {
-	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
-	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
-	"0x08", "0x09", "0x0a", "0x0b",
-	"0x0c", "0x0d", "0x0e", "0x0f",
-	"0x10", "0x11", "0x12", "0x13",
-	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
-	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
-	"0x1c", "0x1d", "0x1e", "0x1f"
-};
-
 int malta_be_handler(struct pt_regs *regs, int is_fixup)
 {
 	/* This duplicates the handling in do_be which seems wrong */
 	int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
 
-	if (mips_cm_present()) {
-		unsigned long cm_error = read_gcr_error_cause();
-		unsigned long cm_addr = read_gcr_error_addr();
-		unsigned long cm_other = read_gcr_error_mult();
-		unsigned long cause, ocause;
-		char buf[256];
-
-		cause = cm_error & CM_GCR_ERROR_CAUSE_ERRTYPE_MSK;
-		if (cause != 0) {
-			cause >>= CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
-			if (cause < 16) {
-				unsigned long cca_bits = (cm_error >> 15) & 7;
-				unsigned long tr_bits = (cm_error >> 12) & 7;
-				unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
-				unsigned long stag_bits = (cm_error >> 3) & 15;
-				unsigned long sport_bits = (cm_error >> 0) & 7;
-
-				snprintf(buf, sizeof(buf),
-					 "CCA=%lu TR=%s MCmd=%s STag=%lu "
-					 "SPort=%lu\n",
-					 cca_bits, tr[tr_bits], mcmd[cmd_bits],
-					 stag_bits, sport_bits);
-			} else {
-				/* glob state & sresp together */
-				unsigned long c3_bits = (cm_error >> 18) & 7;
-				unsigned long c2_bits = (cm_error >> 15) & 7;
-				unsigned long c1_bits = (cm_error >> 12) & 7;
-				unsigned long c0_bits = (cm_error >> 9) & 7;
-				unsigned long sc_bit = (cm_error >> 8) & 1;
-				unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
-				unsigned long sport_bits = (cm_error >> 0) & 7;
-				snprintf(buf, sizeof(buf),
-					 "C3=%s C2=%s C1=%s C0=%s SC=%s "
-					 "MCmd=%s SPort=%lu\n",
-					 core[c3_bits], core[c2_bits],
-					 core[c1_bits], core[c0_bits],
-					 sc_bit ? "True" : "False",
-					 mcmd[cmd_bits], sport_bits);
-			}
-
-			ocause = (cm_other & CM_GCR_ERROR_MULT_ERR2ND_MSK) >>
-				 CM_GCR_ERROR_MULT_ERR2ND_SHF;
-
-			pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
-			       causes[cause], buf);
-			pr_err("CM_ADDR =%08lx\n", cm_addr);
-			pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
-
-			/* reprime cause register */
-			write_gcr_error_cause(0);
-		}
-	}
+	mips_cm_error_report();
 
 	return retval;
 }
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index b769657..dadeb83 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -179,31 +179,6 @@
 	}
 }
 
-unsigned platform_maar_init(unsigned num_pairs)
-{
-	phys_addr_t mem_end = (physical_memsize & ~0xffffull) - 1;
-	struct maar_config cfg[] = {
-		/* DRAM preceding I/O */
-		{ 0x00000000, 0x0fffffff, MIPS_MAAR_S },
-
-		/* DRAM following I/O */
-		{ 0x20000000, mem_end, MIPS_MAAR_S },
-
-		/* DRAM alias in upper half of physical */
-		{ 0x80000000, 0x80000000 + mem_end, MIPS_MAAR_S },
-	};
-	unsigned i, num_cfg = ARRAY_SIZE(cfg);
-
-	/* If DRAM fits before I/O, drop the region following it */
-	if (physical_memsize <= 0x10000000) {
-		num_cfg--;
-		for (i = 1; i < num_cfg; i++)
-			cfg[i] = cfg[i + 1];
-	}
-
-	return maar_config(cfg, num_cfg, num_pairs);
-}
-
 phys_addr_t mips_cdmm_phys_base(void)
 {
 	/* This address is "typically unused" */
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 5f5d18b..3660dc6 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -87,7 +87,7 @@
 static void xlp_pic_enable(struct irq_data *d)
 {
 	unsigned long flags;
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	BUG_ON(!pd);
 	spin_lock_irqsave(&pd->node->piclock, flags);
@@ -97,7 +97,7 @@
 
 static void xlp_pic_disable(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 
 	BUG_ON(!pd);
@@ -108,7 +108,7 @@
 
 static void xlp_pic_mask_ack(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	clear_c0_eimr(pd->picirq);
 	ack_c0_eirr(pd->picirq);
@@ -116,7 +116,7 @@
 
 static void xlp_pic_unmask(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	BUG_ON(!pd);
 
@@ -193,7 +193,7 @@
 	pic_data->picirq = picirq;
 	pic_data->node = nlm_get_node(node);
 	irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
-	irq_set_handler_data(xirq, pic_data);
+	irq_set_chip_data(xirq, pic_data);
 }
 
 void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
@@ -202,7 +202,7 @@
 	int xirq;
 
 	xirq = nlm_irq_to_xirq(node, irq);
-	pic_data = irq_get_handler_data(xirq);
+	pic_data = irq_get_chip_data(xirq);
 	if (WARN_ON(!pic_data))
 		return;
 	pic_data->extra_ack = xack;
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index f5fff22..0136b4f 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -82,8 +82,9 @@
 }
 
 /* IRQ_IPI_SMP_FUNCTION Handler */
-void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
+void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
 	ack_c0_eirr(irq);
 	generic_smp_call_function_interrupt();
@@ -91,8 +92,9 @@
 }
 
 /* IRQ_IPI_SMP_RESCHEDULE  handler */
-void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
+void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
 	ack_c0_eirr(irq);
 	scheduler_ipi();
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
index 7b066a4..c11b9c7 100644
--- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -152,7 +152,7 @@
 	0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00
 };
 
-/* SATA PHY config for register block 2 0x0x8065 .. 0x0x80A4 */
+/* SATA PHY config for register block 2 0x8065 .. 0x80A4 */
 static const u8 sata_phy_config2[]  = {
 	0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18,
 	0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF,
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index a8f4144..80ec929 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -91,6 +91,8 @@
 		return 134;
 	case PIC_SATA_IRQ:
 		return 143;
+	case PIC_NAND_IRQ:
+		return 151;
 	case PIC_SPI_IRQ:
 		return 152;
 	case PIC_MMC_IRQ:
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 81f5895..3c9ec3d 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -91,6 +91,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
+	case CPU_I6400:
 	case CPU_M5150:
 	case CPU_LOONGSON1:
 	case CPU_SB1:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 6a6e2cc..8f988a6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -392,6 +392,10 @@
 		op_model_mipsxx_ops.cpu_type = "mips/P5600";
 		break;
 
+	case CPU_I6400:
+		op_model_mipsxx_ops.cpu_type = "mips/I6400";
+		break;
+
 	case CPU_M5150:
 		op_model_mipsxx_ops.cpu_type = "mips/M5150";
 		break;
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index 3407495..bb14335f 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -131,7 +131,7 @@
  */
 static void xlp_msi_enable(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 	int vec;
 
@@ -148,7 +148,7 @@
 
 static void xlp_msi_disable(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 	int vec;
 
@@ -165,7 +165,7 @@
 
 static void xlp_msi_mask_ack(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	int link, vec;
 
 	link = nlm_irq_msilink(d->irq);
@@ -211,7 +211,7 @@
 	msixvec = nlm_irq_msixvec(d->irq);
 	link = nlm_irq_msixlink(msixvec);
 	pci_msi_mask_irq(d);
-	md = irq_data_get_irq_handler_data(d);
+	md = irq_data_get_irq_chip_data(d);
 
 	/* Ack MSI on bridge */
 	if (cpu_is_xlp9xx()) {
@@ -302,7 +302,7 @@
 	/* Get MSI data for the link */
 	lirq = PIC_PCIE_LINK_MSI_IRQ(link);
 	xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
-	md = irq_get_handler_data(xirq);
+	md = irq_get_chip_data(xirq);
 	msiaddr = MSI_LINK_ADDR(node, link);
 
 	spin_lock_irqsave(&md->msi_lock, flags);
@@ -409,7 +409,7 @@
 	/* Get MSI data for the link */
 	lirq = PIC_PCIE_MSIX_IRQ(link);
 	xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
-	md = irq_get_handler_data(xirq);
+	md = irq_get_chip_data(xirq);
 	msixaddr = MSIX_LINK_ADDR(node, link);
 
 	spin_lock_irqsave(&md->msi_lock, flags);
@@ -485,7 +485,7 @@
 	irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
 	for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
 		irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
-		irq_set_handler_data(i, md);
+		irq_set_chip_data(i, md);
 	}
 
 	for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
@@ -508,7 +508,7 @@
 		/* Initialize MSI-X extended irq space for the link  */
 		irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
 		irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
-		irq_set_handler_data(irq, md);
+		irq_set_chip_data(irq, md);
 	}
 }
 
@@ -520,7 +520,7 @@
 
 	link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
 	irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
-	md = irq_get_handler_data(irqbase);
+	md = irq_get_chip_data(irqbase);
 	if (cpu_is_xlp9xx())
 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
 						md->msi_enabled_mask;
@@ -550,7 +550,7 @@
 
 	link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
 	irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
-	md = irq_get_handler_data(irqbase);
+	md = irq_get_chip_data(irqbase);
 	if (cpu_is_xlp9xx())
 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
 	else
diff --git a/arch/mips/pci/ops-emma2rh.c b/arch/mips/pci/ops-emma2rh.c
index 710aef5..2dc97c4 100644
--- a/arch/mips/pci/ops-emma2rh.c
+++ b/arch/mips/pci/ops-emma2rh.c
@@ -25,7 +25,6 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
-#include <asm/debug.h>
 
 #include <asm/emma/emma2rh.h>
 
@@ -40,10 +39,9 @@
 static int check_args(struct pci_bus *bus, u32 devfn, u32 * bus_num)
 {
 	/* check if the bus is top-level */
-	if (bus->parent != NULL) {
+	if (bus->parent != NULL)
 		*bus_num = bus->number;
-		db_assert(bus_num != NULL);
-	} else
+	else
 		*bus_num = 0;
 
 	if (*bus_num == 0) {
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 283157f..ad35a5e 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -312,8 +312,8 @@
 		irq_set_chip_data(i, apc);
 	}
 
-	irq_set_handler_data(apc->irq, apc);
-	irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler);
+	irq_set_chained_handler_and_data(apc->irq, ar71xx_pci_irq_handler,
+					 apc);
 }
 
 static void ar71xx_pci_reset(void)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 0af362b..907d11d 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -321,8 +321,8 @@
 		irq_set_chip_data(i, apc);
 	}
 
-	irq_set_handler_data(apc->irq, apc);
-	irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
+	irq_set_chained_handler_and_data(apc->irq, ar724x_pci_irq_handler,
+					 apc);
 }
 
 static int ar724x_pci_probe(struct platform_device *pdev)
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index c5347d9..6a15dbd 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -20,7 +20,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 
-#include <asm/gpio.h>
 #include <asm/addrspace.h>
 
 #include <lantiq_soc.h>
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 80fafe6..53c8efa 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -129,7 +129,7 @@
 	rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
 }
 
-static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
 	struct rt3883_pci_controller *rpc;
 	u32 pending;
@@ -145,7 +145,7 @@
 	}
 
 	while (pending) {
-		unsigned bit = __ffs(pending);
+		unsigned irq, bit = __ffs(pending);
 
 		irq = irq_find_mapping(rpc->irq_domain, bit);
 		generic_handle_irq(irq);
@@ -225,8 +225,7 @@
 		return -ENODEV;
 	}
 
-	irq_set_handler_data(irq, rpc);
-	irq_set_chained_handler(irq, rt3883_pci_irq_handler);
+	irq_set_chained_handler_and_data(irq, rt3883_pci_irq_handler, rpc);
 
 	return 0;
 }
diff --git a/arch/mips/pistachio/Kconfig b/arch/mips/pistachio/Kconfig
new file mode 100644
index 0000000..97731ea
--- /dev/null
+++ b/arch/mips/pistachio/Kconfig
@@ -0,0 +1,13 @@
+config PISTACHIO_GPTIMER_CLKSRC
+	bool "Enable General Purpose Timer based clocksource"
+	depends on MACH_PISTACHIO
+	select CLKSRC_PISTACHIO
+	select MIPS_EXTERNAL_TIMER
+	help
+	  This option enables a clocksource driver based on a Pistachio
+	  SoC General Purpose external timer.
+
+	  If you want to enable the CPUFreq, you need to enable
+	  this option.
+
+	  If you don't want to enable CPUFreq, you can leave this disabled.
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index 1207ec4..8b9cf64 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -88,7 +88,8 @@
 	* Make sure we have IRQ affinity.  It may have changed while
 	* we were processing the IRQ.
 	*/
-	if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
+	if (!cpumask_test_cpu(smp_processor_id(),
+			      irq_data_get_affinity_mask(d)))
 		return;
 #endif
 
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 24bf057..a8e70a9 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -36,8 +36,8 @@
 	int freq_scale;
 };
 
-static void systick_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt);
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
 
 static int systick_next_event(unsigned long delta,
 				struct clock_event_device *evt)
@@ -73,11 +73,12 @@
 		 * cevt-r4k uses 300, make sure systick
 		 * gets used if available
 		 */
-		.rating		= 310,
-		.features	= CLOCK_EVT_FEAT_ONESHOT,
-		.set_next_event	= systick_next_event,
-		.set_mode	= systick_set_clock_mode,
-		.event_handler	= systick_event_handler,
+		.rating			= 310,
+		.features		= CLOCK_EVT_FEAT_ONESHOT,
+		.set_next_event		= systick_next_event,
+		.set_state_shutdown	= systick_shutdown,
+		.set_state_oneshot	= systick_set_oneshot,
+		.event_handler		= systick_event_handler,
 	},
 };
 
@@ -87,33 +88,33 @@
 	.dev_id = &systick.dev,
 };
 
-static void systick_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int systick_shutdown(struct clock_event_device *evt)
 {
 	struct systick_device *sdev;
 
 	sdev = container_of(evt, struct systick_device, dev);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		if (!sdev->irq_requested)
-			setup_irq(systick.dev.irq, &systick_irqaction);
-		sdev->irq_requested = 1;
-		iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
-				systick.membase + SYSTICK_CONFIG);
-		break;
+	if (sdev->irq_requested)
+		free_irq(systick.dev.irq, &systick_irqaction);
+	sdev->irq_requested = 0;
+	iowrite32(0, systick.membase + SYSTICK_CONFIG);
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		if (sdev->irq_requested)
-			free_irq(systick.dev.irq, &systick_irqaction);
-		sdev->irq_requested = 0;
-		iowrite32(0, systick.membase + SYSTICK_CONFIG);
-		break;
+	return 0;
+}
 
-	default:
-		pr_err("%s: Unhandeled mips clock_mode\n", systick.dev.name);
-		break;
-	}
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+	struct systick_device *sdev;
+
+	sdev = container_of(evt, struct systick_device, dev);
+
+	if (!sdev->irq_requested)
+		setup_irq(systick.dev.irq, &systick_irqaction);
+	sdev->irq_requested = 1;
+	iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+		  systick.membase + SYSTICK_CONFIG);
+
+	return 0;
 }
 
 static void __init ralink_systick_init(struct device_node *np)
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index e31e8cd..9bd7a2d 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -23,6 +23,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 #include <linux/serial_8250.h>
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 5aa3df8..650d5d3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -140,6 +140,11 @@
 	return 0;
 }
 
+static int rb532_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	return 8 + 4 * 32 + gpio;
+}
+
 static struct rb532_gpio_chip rb532_gpio_chip[] = {
 	[0] = {
 		.chip = {
@@ -148,6 +153,7 @@
 			.direction_output	= rb532_gpio_direction_output,
 			.get			= rb532_gpio_get,
 			.set			= rb532_gpio_set,
+			.to_irq			= rb532_gpio_to_irq,
 			.base			= 0,
 			.ngpio			= 32,
 		},
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index a6d10f6..42d6cb9 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -64,12 +64,6 @@
 	return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
 }
 
-static void rt_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
@@ -124,7 +118,6 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= rt_next_event;
-	cd->set_mode		= rt_set_mode;
 	clockevents_register_device(cd);
 }
 
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
index 41a1d22..a4e55999 100644
--- a/arch/mips/sibyte/common/bus_watcher.c
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -250,4 +250,4 @@
 	return 0;
 }
 
-__initcall(sibyte_bus_watcher);
+device_initcall(sibyte_bus_watcher);
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index cf8ec56..fb4b352 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -14,44 +14,33 @@
 #define SNI_COUNTER2_DIV	64
 #define SNI_COUNTER0_DIV	((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
 
-static void a20r_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int a20r_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  0) = SNI_COUNTER0_DIV;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  0) = SNI_COUNTER0_DIV >> 8;
-		wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8;
+	wmb();
 
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV >> 8;
-		wmb();
-
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
+	wmb();
+	return 0;
 }
 
 static struct clock_event_device a20r_clockevent_device = {
-	.name		= "a20r-timer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
+	.name			= "a20r-timer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
 
 	/* .mult, .shift, .max_delta_ns and .min_delta_ns left uninitialized */
 
-	.rating		= 300,
-	.irq		= SNI_A20R_IRQ_TIMER,
-	.set_mode	= a20r_set_mode,
+	.rating			= 300,
+	.irq			= SNI_A20R_IRQ_TIMER,
+	.set_state_periodic	= a20r_set_periodic,
 };
 
 static irqreturn_t a20r_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 2791b86..9d9962a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -117,22 +117,6 @@
 }
 EXPORT_SYMBOL(clk_put);
 
-/* GPIO support */
-
-#ifdef CONFIG_GPIOLIB
-int gpio_to_irq(unsigned gpio)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-#endif
-
 #define BOARD_VEC(board)	extern struct txx9_board_vec board;
 #include <asm/txx9/boards.h>
 #undef BOARD_VEC
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5be655e..375e591 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -89,6 +89,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -127,73 +131,6 @@
 #define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
 
-/**
- * atomic_clear_mask - Atomically clear bits in memory
- * @mask: Mask of the bits to be cleared
- * @v: pointer to word in memory
- *
- * Atomically clears the bits set in mask from the memory word specified.
- */
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-	int status;
-
-	asm volatile(
-		"1:	mov	%3,(_AAR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"
-		"	and	%4,%0		\n"
-		"	mov	%0,(_ADR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"	/* flush */
-		"	mov	(_ASR,%2),%0	\n"
-		"	or	%0,%0		\n"
-		"	bne	1b		\n"
-		: "=&r"(status), "=m"(*addr)
-		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
-		: "memory", "cc");
-#else
-	unsigned long flags;
-
-	mask = ~mask;
-	flags = arch_local_cli_save();
-	*addr &= mask;
-	arch_local_irq_restore(flags);
-#endif
-}
-
-/**
- * atomic_set_mask - Atomically set bits in memory
- * @mask: Mask of the bits to be set
- * @v: pointer to word in memory
- *
- * Atomically sets the bits set in mask from the memory word specified.
- */
-static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-	int status;
-
-	asm volatile(
-		"1:	mov	%3,(_AAR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"
-		"	or	%4,%0		\n"
-		"	mov	%0,(_ADR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"	/* flush */
-		"	mov	(_ASR,%2),%0	\n"
-		"	or	%0,%0		\n"
-		"	bne	1b		\n"
-		: "=&r"(status), "=m"(*addr)
-		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
-		: "memory", "cc");
-#else
-	unsigned long flags;
-
-	flags = arch_local_cli_save();
-	*addr |= mask;
-	arch_local_irq_restore(flags);
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* CONFIG_SMP */
 #endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index e5d0ef7..9a39ea9 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@
 	flush_mm = mm;
 	flush_va = va;
 #if NR_CPUS <= BITS_PER_LONG
-	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+	atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
 #else
 #error Not supported.
 #endif
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 269c23d..1a8f6f95 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -242,7 +242,6 @@
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 CONFIG_LATENCYTOP=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 33b148f..0ffb08f 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -295,7 +295,6 @@
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_RCU_CPU_STALL_INFO=y
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 226f8ca9..2536965 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -126,6 +126,10 @@
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
 
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -185,6 +189,9 @@
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5ef2711..b447918 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -82,6 +82,9 @@
 	bool
 	default y
 
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+        bool
+
 config PPC
 	bool
 	default y
@@ -155,6 +158,8 @@
 	select HAVE_PERF_EVENTS_NMI if PPC64
 	select EDAC_SUPPORT
 	select EDAC_ATOMIC_SCRUB
+	select ARCH_HAS_DMA_SET_COHERENT_MASK
+	select HAVE_ARCH_SECCOMP_FILTER
 
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
@@ -514,11 +519,6 @@
 	def_bool y
 	depends on NEED_MULTIPLE_NODES
 
-config PPC_HAS_HASH_64K
-	bool
-	depends on PPC64
-	default n
-
 config STDBINUTILS
 	bool "Using standard binutils settings"
 	depends on 44x
@@ -560,16 +560,16 @@
 	bool "4k page size"
 
 config PPC_16K_PAGES
-	bool "16k page size" if 44x || PPC_8xx
+	bool "16k page size"
+	depends on 44x || PPC_8xx
 
 config PPC_64K_PAGES
-	bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
-	depends on !PPC_FSL_BOOK3E
-	select PPC_HAS_HASH_64K if PPC_STD_MMU_64
+	bool "64k page size"
+	depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
 
 config PPC_256K_PAGES
-	bool "256k page size" if 44x
-	depends on !STDBINUTILS
+	bool "256k page size"
+	depends on 44x && !STDBINUTILS
 	help
 	  Make the page size 256k.
 
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 05f464e..4ca54fd 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -288,6 +288,26 @@
 pseries_le_defconfig:
 	$(call merge_into_defconfig,pseries_defconfig,le)
 
+PHONY += mpc85xx_defconfig
+mpc85xx_defconfig:
+	$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+		85xx-32bit 85xx-hw fsl-emb-nonhw)
+
+PHONY += mpc85xx_smp_defconfig
+mpc85xx_smp_defconfig:
+	$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+
+PHONY += corenet32_smp_defconfig
+corenet32_smp_defconfig:
+	$(call merge_into_defconfig,corenet_basic_defconfig,\
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+
+PHONY += corenet64_smp_defconfig
+corenet64_smp_defconfig:
+	$(call merge_into_defconfig,corenet_basic_defconfig,\
+		85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw)
+
 define archhelp
   @echo '* zImage          - Build default images selected by kernel config'
   @echo '  zImage.*        - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index ebf2022..426bf41 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -175,7 +175,7 @@
 
 /include/ "pq3-gpio-0.dtsi"
 
-	display@10000 {
+	display: display@10000 {
 		compatible = "fsl,diu", "fsl,p1022-diu";
 		reg = <0x10000 1000>;
 		interrupts = <64 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
index 1956dea..de76ae8 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
@@ -50,6 +50,8 @@
 		pci0 = &pci0;
 		pci1 = &pci1;
 		pci2 = &pci2;
+		vga = &display;
+		display = &display;
 	};
 
 	cpus {
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 9e9f7e2..9770d02 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -484,6 +484,11 @@
 		reg	   = <0xea000 0x4000>;
 	};
 
+	scfg: global-utilities@fc000 {
+		compatible = "fsl,t1040-scfg";
+		reg = <0xfc000 0x1000>;
+	};
+
 /include/ "elo3-dma-0.dtsi"
 /include/ "elo3-dma-1.dtsi"
 /include/ "qoriq-espi-0.dtsi"
diff --git a/arch/powerpc/boot/dts/t1023rdb.dts b/arch/powerpc/boot/dts/t1023rdb.dts
index 06b090a..d3fa829 100644
--- a/arch/powerpc/boot/dts/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/t1023rdb.dts
@@ -60,7 +60,7 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "fsl,ifc-nand";
-			reg = <0x2 0x0 0x10000>;
+			reg = <0x1 0x0 0x10000>;
 		};
 	};
 
@@ -99,6 +99,17 @@
 		};
 
 		i2c@118100 {
+			current-sensor@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
+			current-sensor@41 {
+				compatible = "ti,ina220";
+				reg = <0x41>;
+				shunt-resistor = <1000>;
+			};
 		};
 	};
 
diff --git a/arch/powerpc/boot/dts/t1024rdb.dts b/arch/powerpc/boot/dts/t1024rdb.dts
index 733e723..bf05e32 100644
--- a/arch/powerpc/boot/dts/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/t1024rdb.dts
@@ -114,6 +114,12 @@
 				reg = <0x4c>;
 			};
 
+			current-sensor@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
 			eeprom@50 {
 				compatible = "atmel,24c256";
 				reg = <0x50>;
diff --git a/arch/powerpc/boot/dts/t1040d4rdb.dts b/arch/powerpc/boot/dts/t1040d4rdb.dts
new file mode 100644
index 0000000..2d1315a
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1040d4rdb.dts
@@ -0,0 +1,46 @@
+/*
+ * T1040D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xd4rdb.dtsi"
+
+/ {
+	model = "fsl,T1040D4RDB";
+	compatible = "fsl,T1040D4RDB";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t1042d4rdb.dts b/arch/powerpc/boot/dts/t1042d4rdb.dts
new file mode 100644
index 0000000..846f8c8
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1042d4rdb.dts
@@ -0,0 +1,53 @@
+/*
+ * T1042D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xd4rdb.dtsi"
+
+/ {
+	model = "fsl,T1042D4RDB";
+	compatible = "fsl,T1042D4RDB";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	ifc: localbus@ffe124000 {
+		cpld@3,0 {
+			compatible = "fsl,t1040d4rdb-cpld",
+					"fsl,deepsleep-cpld";
+		};
+	};
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t104xd4rdb.dtsi b/arch/powerpc/boot/dts/t104xd4rdb.dtsi
new file mode 100644
index 0000000..491367b
--- /dev/null
+++ b/arch/powerpc/boot/dts/t104xd4rdb.dtsi
@@ -0,0 +1,205 @@
+/*
+ * T1040D4RDB/T1042D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+		};
+		qman_fqd: qman-fqd {
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+		};
+		qman_pfdr: qman-pfdr {
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+		};
+	};
+
+	ifc: localbus@ffe124000 {
+		reg = <0xf 0xfe124000 0 0x2000>;
+		ranges = <0 0 0xf 0xe8000000 0x08000000
+			  2 0 0xf 0xff800000 0x00010000
+			  3 0 0xf 0xffdf0000 0x00008000>;
+
+		nor@0,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "cfi-flash";
+			reg = <0x0 0x0 0x8000000>;
+			bank-width = <2>;
+			device-width = <1>;
+		};
+
+		nand@2,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "fsl,ifc-nand";
+			reg = <0x2 0x0 0x10000>;
+		};
+
+		cpld@3,0 {
+			compatible = "fsl,t1040d4rdb-cpld";
+			reg = <3 0 0x300>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+	};
+
+	dcsr: dcsr@f00000000 {
+		ranges = <0x00000000 0xf 0x00000000 0x01072000>;
+	};
+
+	bportals: bman-portals@ff4000000 {
+		ranges = <0x0 0xf 0xf4000000 0x2000000>;
+	};
+
+	qportals: qman-portals@ff6000000 {
+		ranges = <0x0 0xf 0xf6000000 0x2000000>;
+	};
+
+	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
+
+		spi@110000 {
+			flash@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "micron,n25q512ax3";
+				reg = <0>;
+				/* input clock */
+				spi-max-frequency = <10000000>;
+			};
+		};
+		i2c@118000 {
+			hwmon@4c {
+				compatible = "adi,adt7461";
+				reg = <0x4c>;
+			};
+
+			rtc@68 {
+				compatible = "dallas,ds1337";
+				reg = <0x68>;
+				interrupts = <0x2 0x1 0 0>;
+			};
+		};
+
+		i2c@118100 {
+			mux@77 {
+				/*
+				 * Child nodes of mux depend on which i2c
+				 * devices are connected via the mini PCI
+				 * connector slot1, the mini PCI connector
+				 * slot2, the HDMI connector, and the PEX
+				 * slot. Systems with such devices attached
+				 * should provide a wrapper .dts file that
+				 * includes this one, and adds those nodes
+				 */
+				compatible = "nxp,pca9546";
+				reg = <0x77>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+			};
+		};
+
+	};
+
+	pci0: pcie@ffe240000 {
+		reg = <0xf 0xfe240000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x0 0x0 0x10000000
+			  0x01000000 0 0x0 0xf 0xf8000000 0x0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci1: pcie@ffe250000 {
+		reg = <0xf 0xfe250000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x10000000 0 0x10000000
+			  0x01000000 0 0 0xf 0xf8010000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci2: pcie@ffe260000 {
+		reg = <0xf 0xfe260000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci3: pcie@ffe270000 {
+		reg = <0xf 0xfe270000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x30000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+};
diff --git a/arch/powerpc/configs/85xx-32bit.config b/arch/powerpc/configs/85xx-32bit.config
new file mode 100644
index 0000000..6b8894d
--- /dev/null
+++ b/arch/powerpc/configs/85xx-32bit.config
@@ -0,0 +1,5 @@
+CONFIG_HIGHMEM=y
+CONFIG_KEXEC=y
+CONFIG_PPC_85xx=y
+CONFIG_PROC_KCORE=y
+CONFIG_PHYS_64BIT=y
diff --git a/arch/powerpc/configs/85xx-64bit.config b/arch/powerpc/configs/85xx-64bit.config
new file mode 100644
index 0000000..4aba812
--- /dev/null
+++ b/arch/powerpc/configs/85xx-64bit.config
@@ -0,0 +1,4 @@
+CONFIG_MATH_EMULATION=y
+CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
+CONFIG_PPC64=y
+CONFIG_PPC_BOOK3E_64=y
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
new file mode 100644
index 0000000..528ff0e
--- /dev/null
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -0,0 +1,142 @@
+CONFIG_AQUANTIA_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_ATA=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_C293_PCIE=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_CICADA_PHY=y
+CONFIG_CLK_QORIQ=y
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
+CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DMADEVICES=y
+CONFIG_E1000E=y
+CONFIG_E1000=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_MPC85XX=y
+CONFIG_EDAC=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_LEGACY=y
+CONFIG_FB_FSL_DIU=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_CORENET_CF=y
+CONFIG_FSL_DMA=y
+CONFIG_FSL_HV_MANAGER=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_FSL_RIO=y
+CONFIG_FSL_XGMAC_MDIO=y
+CONFIG_GIANFAR=y
+CONFIG_GPIO_MPC8XXX=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CPM=m
+CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C=y
+CONFIG_IGB=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_NETDEVICES=y
+CONFIG_NVRAM=y
+CONFIG_PATA_ALI=y
+CONFIG_PATA_SIL680=y
+CONFIG_PATA_VIA=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI=y
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_QE_GPIO=y
+CONFIG_QUICC_ENGINE=y
+CONFIG_RAPIDIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_SIL=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_QE=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+CONFIG_SND_POWERPC_SOC=y
+# CONFIG_SND_PPC is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SPI_FSL_ESPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI=y
+CONFIG_TERANETICS_PHY=y
+CONFIG_UCC_GETH=y
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HID=m
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VITESSE_PHY=y
diff --git a/arch/powerpc/configs/85xx-smp.config b/arch/powerpc/configs/85xx-smp.config
new file mode 100644
index 0000000..3b4d1e5
--- /dev/null
+++ b/arch/powerpc/configs/85xx-smp.config
@@ -0,0 +1,2 @@
+CONFIG_NR_CPUS=24
+CONFIG_SMP=y
diff --git a/arch/powerpc/configs/altivec.config b/arch/powerpc/configs/altivec.config
new file mode 100644
index 0000000..58a697c
--- /dev/null
+++ b/arch/powerpc/configs/altivec.config
@@ -0,0 +1 @@
+CONFIG_ALTIVEC=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
deleted file mode 100644
index 3765993..0000000
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ /dev/null
@@ -1,185 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_CORENET_GENERIC=y
-CONFIG_HIGHMEM=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_KEXEC=y
-CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_FSL_RIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_SATA_SIL=y
-CONFIG_PATA_SIL680=y
-CONFIG_NETDEVICES=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
-CONFIG_AT803X_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BUS_MUX_GPIO=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_PPC_EPAPR_HV_BYTECHAN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MPC=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_SENSORS_LM90=y
-CONFIG_SENSORS_INA2XX=y
-CONFIG_USB_HID=m
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_MPC85XX=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_UIO=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_FSL_HV_MANAGER=y
-CONFIG_STAGING=y
-CONFIG_FSL_CORENET_CF=y
-CONFIG_CLK_QORIQ=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_INFO=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_RCU_TRACE=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
deleted file mode 100644
index 33cd1df..0000000
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ /dev/null
@@ -1,176 +0,0 @@
-CONFIG_PPC64=y
-CONFIG_PPC_BOOK3E_64=y
-CONFIG_ALTIVEC=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=24
-CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CGROUPS=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_CORENET_GENERIC=y
-# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_FSL_RIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_ATA=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_E1000E=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BUS_MUX_GPIO=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_PPC_EPAPR_HV_BYTECHAN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MPC=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_SENSORS_LM90=y
-CONFIG_SENSORS_INA2XX=y
-CONFIG_USB_HID=m
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_FSL_HV_MANAGER=y
-CONFIG_CLK_QORIQ=y
-CONFIG_FSL_CORENET_CF=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_WARN=1024
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet_basic_defconfig b/arch/powerpc/configs/corenet_basic_defconfig
new file mode 100644
index 0000000..b568d46
--- /dev/null
+++ b/arch/powerpc/configs/corenet_basic_defconfig
@@ -0,0 +1 @@
+CONFIG_CORENET_GENERIC=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
new file mode 100644
index 0000000..41e4d35
--- /dev/null
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -0,0 +1,126 @@
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_AUDIT=y
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_BINFMT_MISC=m
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CRC_T10DIF=y
+CONFIG_CPUSETS=y
+CONFIG_CRAMFS=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_DUMMY=y
+CONFIG_EFS_FS=m
+CONFIG_EXPERT=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS=y
+CONFIG_FB=y
+CONFIG_FHANDLE=y
+CONFIG_FIXED_PHY=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_8x8=y
+CONFIG_FONTS=y
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAME_WARN=1024
+CONFIG_FTL=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPFS_FS=m
+CONFIG_HUGETLBFS=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKCONFIG=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_PNP=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_SCTP=m
+CONFIG_IPV6=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_ISO9660_FS=m
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS=y
+CONFIG_JOLIET=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MSDOS_FS=m
+CONFIG_MTD_UBI=y
+CONFIG_MTD=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_NET_KEY=y
+CONFIG_NET=y
+CONFIG_NFSD=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=m
+CONFIG_NO_HZ=y
+CONFIG_NTFS_FS=y
+CONFIG_PACKET=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_QNX4FS_FS=m
+CONFIG_RCU_TRACE=y
+CONFIG_ROOT_NFS=y
+CONFIG_SYSV_FS=m
+CONFIG_SYSVIPC=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_UFS_FS=m
+CONFIG_UIO=y
+CONFIG_UNIX=y
+CONFIG_VFAT_FS=y
+CONFIG_VXFS_FS=m
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/mpc85xx_basic_defconfig b/arch/powerpc/configs/mpc85xx_basic_defconfig
new file mode 100644
index 0000000..850bd19
--- /dev/null
+++ b/arch/powerpc/configs/mpc85xx_basic_defconfig
@@ -0,0 +1,23 @@
+CONFIG_MATH_EMULATION=y
+CONFIG_MPC8536_DS=y
+CONFIG_MPC8540_ADS=y
+CONFIG_MPC8560_ADS=y
+CONFIG_MPC85xx_CDS=y
+CONFIG_MPC85xx_DS=y
+CONFIG_MPC85xx_MDS=y
+CONFIG_MPC85xx_RDB=y
+CONFIG_KSI8560=y
+CONFIG_MVME2500=y
+CONFIG_P1010_RDB=y
+CONFIG_P1022_DS=y
+CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
+CONFIG_SBC8548=y
+CONFIG_SOCRATES=y
+CONFIG_STX_GP3=y
+CONFIG_TQM8540=y
+CONFIG_TQM8541=y
+CONFIG_TQM8548=y
+CONFIG_TQM8555=y
+CONFIG_TQM8560=y
+CONFIG_XES_MPC85xx=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
deleted file mode 100644
index 6ecf7bd..0000000
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ /dev/null
@@ -1,252 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_C293_PCIE=y
-CONFIG_MPC8540_ADS=y
-CONFIG_MPC8560_ADS=y
-CONFIG_MPC85xx_CDS=y
-CONFIG_MPC85xx_MDS=y
-CONFIG_MPC8536_DS=y
-CONFIG_MPC85xx_DS=y
-CONFIG_MPC85xx_RDB=y
-CONFIG_P1010_RDB=y
-CONFIG_P1022_DS=y
-CONFIG_P1022_RDK=y
-CONFIG_P1023_RDB=y
-CONFIG_SOCRATES=y
-CONFIG_KSI8560=y
-CONFIG_XES_MPC85xx=y
-CONFIG_STX_GP3=y
-CONFIG_TQM8540=y
-CONFIG_TQM8541=y
-CONFIG_TQM8548=y
-CONFIG_TQM8555=y
-CONFIG_TQM8560=y
-CONFIG_SBC8548=y
-CONFIG_MVME2500=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_HIGHMEM=y
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_PATA_ALI=y
-CONFIG_PATA_VIA=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_UCC_GETH=y
-CONFIG_GIANFAR=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
-CONFIG_IGB=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=6
-CONFIG_SERIAL_8250_RUNTIME_UARTS=6
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_QE=m
-CONFIG_NVRAM=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_SENSORS_LM90=y
-CONFIG_FB=y
-CONFIG_FB_FSL_DIU=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_DRIVERS is not set
-CONFIG_SND_INTEL8X0=y
-# CONFIG_SND_PPC is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_POWERPC_SOC=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_MPC85XX=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
deleted file mode 100644
index b6c7111..0000000
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ /dev/null
@@ -1,244 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_C293_PCIE=y
-CONFIG_MPC8540_ADS=y
-CONFIG_MPC8560_ADS=y
-CONFIG_MPC85xx_CDS=y
-CONFIG_MPC85xx_MDS=y
-CONFIG_MPC8536_DS=y
-CONFIG_MPC85xx_DS=y
-CONFIG_MPC85xx_RDB=y
-CONFIG_P1010_RDB=y
-CONFIG_P1022_DS=y
-CONFIG_P1022_RDK=y
-CONFIG_P1023_RDB=y
-CONFIG_SOCRATES=y
-CONFIG_KSI8560=y
-CONFIG_XES_MPC85xx=y
-CONFIG_STX_GP3=y
-CONFIG_TQM8540=y
-CONFIG_TQM8541=y
-CONFIG_TQM8548=y
-CONFIG_TQM8555=y
-CONFIG_TQM8560=y
-CONFIG_SBC8548=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_HIGHMEM=y
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_PATA_ALI=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_UCC_GETH=y
-CONFIG_GIANFAR=y
-CONFIG_E1000E=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_QE=m
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_SENSORS_LM90=y
-CONFIG_FB=y
-CONFIG_FB_FSL_DIU=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_DRIVERS is not set
-CONFIG_SND_INTEL8X0=y
-# CONFIG_SND_PPC is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_POWERPC_SOC=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index a97efc2..6bc0ee4 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -355,3 +355,6 @@
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 0d9efce..7991f37 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -190,7 +190,8 @@
 CONFIG_HVCS=m
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_IBM_BSR=m
-CONFIG_GEN_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=y
 CONFIG_RAW_DRIVER=y
 CONFIG_MAX_RAW_DEVS=1024
 CONFIG_FB=y
@@ -319,3 +320,6 @@
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 050712e..ab9f4e0 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -6,5 +6,4 @@
 generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += rwsem.h
-generic-y += trace_clock.h
 generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 0cc6eed..85e88f7 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -7,14 +7,23 @@
 
 static inline int arch_get_random_long(unsigned long *v)
 {
-	if (ppc_md.get_random_long)
-		return ppc_md.get_random_long(v);
-
 	return 0;
 }
 
 static inline int arch_get_random_int(unsigned int *v)
 {
+	return 0;
+}
+
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+	if (ppc_md.get_random_seed)
+		return ppc_md.get_random_seed(v);
+
+	return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
 	unsigned long val;
 	int rc;
 
@@ -27,22 +36,13 @@
 
 static inline int arch_has_random(void)
 {
-	return !!ppc_md.get_random_long;
+	return 0;
 }
 
-static inline int arch_get_random_seed_long(unsigned long *v)
-{
-	return 0;
-}
-static inline int arch_get_random_seed_int(unsigned int *v)
-{
-	return 0;
-}
 static inline int arch_has_random_seed(void)
 {
-	return 0;
+	return !!ppc_md.get_random_seed;
 }
-
 #endif /* CONFIG_ARCH_RANDOM */
 
 #ifdef CONFIG_PPC_POWERNV
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 512d278..55f106e 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -67,6 +67,10 @@
 ATOMIC_OPS(add, add)
 ATOMIC_OPS(sub, subf)
 
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -304,6 +308,9 @@
 
 ATOMIC64_OPS(add, add)
 ATOMIC64_OPS(sub, subf)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(or, or)
+ATOMIC64_OP(xor, xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 51ccc72..0eca6ef 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,12 +76,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_lwsync();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_lwsync();							\
 	___p1;								\
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 30b35ff..6229e6b 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -40,7 +40,12 @@
 extern void flush_dcache_icache_page(struct page *page);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
 extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+#else
+static inline void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+	BUG();
+}
+#endif
 
 extern void flush_dcache_range(unsigned long start, unsigned long stop);
 #ifdef CONFIG_PPC32
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 8251a3b..e8d9ef4 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -20,15 +20,6 @@
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-					unsigned short len,
-					unsigned short proto,
-					__wsum sum);
-
-/*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
  *
@@ -127,6 +118,34 @@
 #endif
 }
 
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+					unsigned short len,
+					unsigned short proto,
+					__wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+#ifdef __powerpc64__
+	u64 res = (__force u64)csum;
+
+	res += (__force u64)addend;
+	return (__force __wsum)((u32)res + (res >> 32));
+#else
+	asm("addc %0,%0,%1;"
+	    "addze %0,%0;"
+	    : "+r" (csum) : "r" (addend));
+	return csum;
+#endif
+}
+
 #endif
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index b142b8e..4f2df58 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -174,6 +174,13 @@
 			int _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
 			int _fd;
 		} _sigpoll;
+
+		/* SIGSYS */
+		struct {
+			unsigned int _call_addr; /* calling insn */
+			int _syscall;		 /* triggering system call number */
+			unsigned int _arch;	 /* AUDIT_ARCH_* of syscall */
+		} _sigsys;
 	} _sifields;
 } compat_siginfo_t;
 
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index e9bdda8..406c2b1 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -10,6 +10,7 @@
 struct device_node;
 #ifdef CONFIG_PPC64
 struct pci_dn;
+struct iommu_table;
 #endif
 
 /*
@@ -23,13 +24,15 @@
 	struct dma_map_ops	*dma_ops;
 
 	/*
-	 * When an iommu is in use, dma_data is used as a ptr to the base of the
-	 * iommu_table.  Otherwise, it is a simple numerical offset.
+	 * These two used to be a union. However, with the hybrid ops we need
+	 * both so here we store both a DMA offset for direct mappings and
+	 * an iommu_table for remapped DMA.
 	 */
-	union {
-		dma_addr_t	dma_offset;
-		void		*iommu_table_base;
-	} dma_data;
+	dma_addr_t		dma_offset;
+
+#ifdef CONFIG_PPC64
+	struct iommu_table	*iommu_table_base;
+#endif
 
 #ifdef CONFIG_IOMMU_API
 	void			*iommu_domain;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 9103687..710f60e 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -21,12 +21,12 @@
 #define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
 
 /* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag,
+extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+					 dma_addr_t *dma_handle, gfp_t flag,
+					 struct dma_attrs *attrs);
+extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+				       void *vaddr, dma_addr_t dma_handle,
 				       struct dma_attrs *attrs);
-extern void dma_direct_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle,
-				     struct dma_attrs *attrs);
 extern int dma_direct_mmap_coherent(struct device *dev,
 				    struct vm_area_struct *vma,
 				    void *cpu_addr, dma_addr_t handle,
@@ -106,7 +106,7 @@
 static inline dma_addr_t get_dma_offset(struct device *dev)
 {
 	if (dev)
-		return dev->archdata.dma_data.dma_offset;
+		return dev->archdata.dma_offset;
 
 	return PCI_DRAM_OFFSET;
 }
@@ -114,7 +114,7 @@
 static inline void set_dma_offset(struct device *dev, dma_addr_t off)
 {
 	if (dev)
-		dev->archdata.dma_data.dma_offset = off;
+		dev->archdata.dma_offset = off;
 }
 
 /* this will be removed soon */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ca18cff..7b87bab 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -2,17 +2,17 @@
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  * Rewrite, cleanup:
  * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
@@ -131,16 +131,21 @@
 
 struct scatterlist;
 
-static inline void set_iommu_table_base(struct device *dev, void *base)
+#ifdef CONFIG_PPC64
+
+static inline void set_iommu_table_base(struct device *dev,
+					struct iommu_table *base)
 {
-	dev->archdata.dma_data.iommu_table_base = base;
+	dev->archdata.iommu_table_base = base;
 }
 
 static inline void *get_iommu_table_base(struct device *dev)
 {
-	return dev->archdata.dma_data.iommu_table_base;
+	return dev->archdata.iommu_table_base;
 }
 
+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
+
 /* Frees table for an individual device node */
 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
 
@@ -225,6 +230,20 @@
 }
 #endif /* !CONFIG_IOMMU_API */
 
+#else
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+	return NULL;
+}
+
+static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 			    struct scatterlist *sglist, int nelems,
 			    unsigned long mask,
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index efbf9a3..47e155f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -18,14 +18,29 @@
 #define JUMP_ENTRY_TYPE		stringify_in_c(FTR_ENTRY_LONG)
 #define JUMP_LABEL_NOP_SIZE	4
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
 		 "nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 		 ".popsection \n\t"
-		 : :  "i" (key) : : l_yes);
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 952579f..cab6753 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -249,7 +249,7 @@
 #endif
 
 #ifdef CONFIG_ARCH_RANDOM
-	int (*get_random_long)(unsigned long *v);
+	int (*get_random_seed)(unsigned long *v);
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 64dc9f5..8374afe 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -154,7 +154,10 @@
 #define OPAL_FLASH_WRITE			111
 #define OPAL_FLASH_ERASE			112
 #define OPAL_PRD_MSG				113
-#define OPAL_LAST				113
+#define OPAL_LEDS_GET_INDICATOR			114
+#define OPAL_LEDS_SET_INDICATOR			115
+#define OPAL_CEC_REBOOT2			116
+#define OPAL_LAST				116
 
 /* Device tree flags */
 
@@ -340,6 +343,18 @@
 	OPAL_ASSERT_RESET   = 1
 };
 
+enum OpalSlotLedType {
+	OPAL_SLOT_LED_TYPE_ID = 0,	/* IDENTIFY LED */
+	OPAL_SLOT_LED_TYPE_FAULT = 1,	/* FAULT LED */
+	OPAL_SLOT_LED_TYPE_ATTN = 2,	/* System Attention LED */
+	OPAL_SLOT_LED_TYPE_MAX = 3
+};
+
+enum OpalSlotLedState {
+	OPAL_SLOT_LED_STATE_OFF = 0,	/* LED is OFF */
+	OPAL_SLOT_LED_STATE_ON = 1	/* LED is ON */
+};
+
 /*
  * Address cycle types for LPC accesses. These also correspond
  * to the content of the first cell of the "reg" property for
@@ -438,6 +453,7 @@
 /* HMI interrupt event */
 enum OpalHMI_Version {
 	OpalHMIEvt_V1 = 1,
+	OpalHMIEvt_V2 = 2,
 };
 
 enum OpalHMI_Severity {
@@ -468,6 +484,49 @@
 	OpalHMI_ERROR_CAPP_RECOVERY,
 };
 
+enum OpalHMI_XstopType {
+	CHECKSTOP_TYPE_UNKNOWN	=	0,
+	CHECKSTOP_TYPE_CORE	=	1,
+	CHECKSTOP_TYPE_NX	=	2,
+};
+
+enum OpalHMI_CoreXstopReason {
+	CORE_CHECKSTOP_IFU_REGFILE		= 0x00000001,
+	CORE_CHECKSTOP_IFU_LOGIC		= 0x00000002,
+	CORE_CHECKSTOP_PC_DURING_RECOV		= 0x00000004,
+	CORE_CHECKSTOP_ISU_REGFILE		= 0x00000008,
+	CORE_CHECKSTOP_ISU_LOGIC		= 0x00000010,
+	CORE_CHECKSTOP_FXU_LOGIC		= 0x00000020,
+	CORE_CHECKSTOP_VSU_LOGIC		= 0x00000040,
+	CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE	= 0x00000080,
+	CORE_CHECKSTOP_LSU_REGFILE		= 0x00000100,
+	CORE_CHECKSTOP_PC_FWD_PROGRESS		= 0x00000200,
+	CORE_CHECKSTOP_LSU_LOGIC		= 0x00000400,
+	CORE_CHECKSTOP_PC_LOGIC			= 0x00000800,
+	CORE_CHECKSTOP_PC_HYP_RESOURCE		= 0x00001000,
+	CORE_CHECKSTOP_PC_HANG_RECOV_FAILED	= 0x00002000,
+	CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED	= 0x00004000,
+	CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ	= 0x00008000,
+	CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ	= 0x00010000,
+};
+
+enum OpalHMI_NestAccelXstopReason {
+	NX_CHECKSTOP_SHM_INVAL_STATE_ERR	= 0x00000001,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1	= 0x00000002,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2	= 0x00000004,
+	NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR	= 0x00000008,
+	NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR	= 0x00000010,
+	NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR	= 0x00000020,
+	NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR	= 0x00000040,
+	NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR	= 0x00000080,
+	NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR	= 0x00000100,
+	NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR	= 0x00000200,
+	NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR	= 0x00000400,
+	NX_CHECKSTOP_DMA_CRB_UE			= 0x00000800,
+	NX_CHECKSTOP_DMA_CRB_SUE		= 0x00001000,
+	NX_CHECKSTOP_PBI_ISN_UE			= 0x00002000,
+};
+
 struct OpalHMIEvent {
 	uint8_t		version;	/* 0x00 */
 	uint8_t		severity;	/* 0x01 */
@@ -478,6 +537,23 @@
 	__be64		hmer;
 	/* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
 	__be64		tfmr;
+
+	/* version 2 and later */
+	union {
+		/*
+		 * checkstop info (Core/NX).
+		 * Valid for OpalHMI_ERROR_MALFUNC_ALERT.
+		 */
+		struct {
+			uint8_t	xstop_type;	/* enum OpalHMI_XstopType */
+			uint8_t reserved_1[3];
+			__be32  xstop_reason;
+			union {
+				__be32 pir;	/* for CHECKSTOP_TYPE_CORE */
+				__be32 chip_id;	/* for CHECKSTOP_TYPE_NX */
+			} u;
+		} xstop_error;
+	} u;
 };
 
 enum {
@@ -768,6 +844,52 @@
 	__be64 buffer_ra;		/* Buffer real address */
 };
 
+/*
+ * EPOW status sharing (OPAL and the host)
+ *
+ * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
+ * with individual elements being 16 bits wide to fetch the system
+ * wide EPOW status. Each element in the buffer will contain the
+ * EPOW status in it's bit representation for a particular EPOW sub
+ * class as defiend here. So multiple detailed EPOW status bits
+ * specific for any sub class can be represented in a single buffer
+ * element as it's bit representation.
+ */
+
+/* System EPOW type */
+enum OpalSysEpow {
+	OPAL_SYSEPOW_POWER	= 0,	/* Power EPOW */
+	OPAL_SYSEPOW_TEMP	= 1,	/* Temperature EPOW */
+	OPAL_SYSEPOW_COOLING	= 2,	/* Cooling EPOW */
+	OPAL_SYSEPOW_MAX	= 3,	/* Max EPOW categories */
+};
+
+/* Power EPOW */
+enum OpalSysPower {
+	OPAL_SYSPOWER_UPS	= 0x0001, /* System on UPS power */
+	OPAL_SYSPOWER_CHNG	= 0x0002, /* System power config change */
+	OPAL_SYSPOWER_FAIL	= 0x0004, /* System impending power failure */
+	OPAL_SYSPOWER_INCL	= 0x0008, /* System incomplete power */
+};
+
+/* Temperature EPOW */
+enum OpalSysTemp {
+	OPAL_SYSTEMP_AMB	= 0x0001, /* System over ambient temperature */
+	OPAL_SYSTEMP_INT	= 0x0002, /* System over internal temperature */
+	OPAL_SYSTEMP_HMD	= 0x0004, /* System over ambient humidity */
+};
+
+/* Cooling EPOW */
+enum OpalSysCooling {
+	OPAL_SYSCOOL_INSF	= 0x0001, /* System insufficient cooling */
+};
+
+/* Argument to OPAL_CEC_REBOOT2() */
+enum {
+	OPAL_REBOOT_NORMAL		= 0,
+	OPAL_REBOOT_PLATFORM_ERROR	= 1,
+};
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 958e941..8001159 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -44,6 +44,7 @@
 		       uint32_t hour_min);
 int64_t opal_cec_power_down(uint64_t request);
 int64_t opal_cec_reboot(void);
+int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag);
 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
@@ -141,7 +142,8 @@
 int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(__be64 *status);
+int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes);
+int64_t opal_get_dpo_status(__be64 *dpo_timeout);
 int64_t opal_set_system_attention_led(uint8_t led_action);
 int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
 			    __be16 *pci_error_type, __be16 *severity);
@@ -195,6 +197,10 @@
 int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
 			 struct opal_i2c_request *oreq);
 int64_t opal_prd_msg(struct opal_prd_msg *msg);
+int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask,
+			  __be64 *led_value, __be64 *max_led_type);
+int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask,
+			  const u64 led_value, __be64 *max_led_type);
 
 int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
 		uint64_t size, uint64_t token);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 712add5..37fc535 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -42,6 +42,7 @@
 #endif
 
 	int             (*dma_set_mask)(struct pci_dev *dev, u64 dma_mask);
+	u64		(*dma_get_required_mask)(struct pci_dev *dev);
 
 	void		(*shutdown)(struct pci_controller *);
 };
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 3bb7488..fa1dfb7 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -134,11 +134,11 @@
 
 #define pte_iterate_hashed_end() } while(0)
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
-#else
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
 #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
-#endif
 
 #endif /* __real_pte */
 
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 11a3863..0717693 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -169,6 +169,17 @@
 	 * cases, and 32-bit non-hash with 32-bit PTEs.
 	 */
 	*ptep = pte;
+
+#ifdef CONFIG_PPC_BOOK3E_64
+	/*
+	 * With hardware tablewalk, a sync is needed to ensure that
+	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
+	 * mappings, we can't tolerate spurious faults, so make sure
+	 * the new PTE will be seen the first time.
+	 */
+	if (is_kernel_addr(addr))
+		mb();
+#endif
 #endif
 }
 
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 4122a86..ca0c5bf 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -61,6 +61,7 @@
 int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
 void eeh_pe_state_mark(struct eeh_pe *pe, int state);
 void eeh_pe_state_clear(struct eeh_pe *pe, int state);
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
 
 void eeh_sysfs_add_device(struct pci_dev *pdev);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 28ded5d..5afea36 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -264,7 +264,6 @@
 	u64		tm_tfhar;	/* Transaction fail handler addr */
 	u64		tm_texasr;	/* Transaction exception & summary */
 	u64		tm_tfiar;	/* Transaction fail instr address reg */
-	unsigned long	tm_orig_msr;	/* Thread's MSR on ctx switch */
 	struct pt_regs	ckpt_regs;	/* Checkpointed registers */
 
 	unsigned long	tm_tar;
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index b7c8d07..71537a3 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -109,7 +109,8 @@
  * the processor might need it for DMA coherency.
  */
 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \
+	defined(CONFIG_PPC_E500MC)
 #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
 #else
 #define _PAGE_BASE	(_PAGE_BASE_NC)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index af56b5c..aa1cc5f0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1193,8 +1193,7 @@
 #ifdef CONFIG_PPC_BOOK3S_64
 #define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
 				     : : "r" (v) : "memory")
-#define mtmsrd(v)	__mtmsrd((v), 0)
-#define mtmsr(v)	mtmsrd(v)
+#define mtmsr(v)	__mtmsrd((v), 0)
 #else
 #define mtmsr(v)	asm volatile("mtmsr %0" : \
 				     : "r" ((unsigned long)(v)) \
@@ -1281,6 +1280,15 @@
 
 extern void ppc_save_regs(struct pt_regs *regs);
 
+static inline void update_power8_hid0(unsigned long hid0)
+{
+	/*
+	 *  The HID0 update on Power8 should at the very least be
+	 *  preceded by a a SYNC instruction followed by an ISYNC
+	 *  instruction
+	 */
+	asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 7a4ede1..b77ef36 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -343,6 +343,7 @@
 extern void rtas_halt(void);
 extern void rtas_os_term(char *str);
 extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
 extern int rtas_get_power_level(int powerdomain, int *level);
 extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
 extern bool rtas_indicator_present(int token, int *maxindex);
diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h
index a40fd49..51f80b4 100644
--- a/arch/powerpc/include/asm/spu_csa.h
+++ b/arch/powerpc/include/asm/spu_csa.h
@@ -241,12 +241,6 @@
  */
 struct spu_state {
 	struct spu_lscsa *lscsa;
-#ifdef CONFIG_SPU_FS_64K_LS
-	int		use_big_pages;
-	/* One struct page per 64k page */
-#define SPU_LSCSA_NUM_BIG_PAGES	(sizeof(struct spu_lscsa) / 0x10000)
-	struct page	*lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
-#endif
 	struct spu_problem_collapsed prob;
 	struct spu_priv1_collapsed priv1;
 	struct spu_priv2_collapsed priv2;
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index ff21b7a..ab9f3f0 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -22,10 +22,15 @@
 extern const unsigned long sys_call_table[];
 #endif /* CONFIG_FTRACE_SYSCALLS */
 
-static inline long syscall_get_nr(struct task_struct *task,
-				  struct pt_regs *regs)
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 {
-	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
+	/*
+	 * Note that we are returning an int here. That means 0xffffffff, ie.
+	 * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel.
+	 * This is important for seccomp so that compat tasks can set r0 = -1
+	 * to reject the syscall.
+	 */
+	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
 }
 
 static inline void syscall_rollback(struct task_struct *task,
@@ -34,12 +39,6 @@
 	regs->gpr[3] = regs->orig_gpr3;
 }
 
-static inline long syscall_get_error(struct task_struct *task,
-				     struct pt_regs *regs)
-{
-	return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
-}
-
 static inline long syscall_get_return_value(struct task_struct *task,
 					    struct pt_regs *regs)
 {
@@ -50,9 +49,15 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
+	/*
+	 * In the general case it's not obvious that we must deal with CCR
+	 * here, as the syscall exit path will also do that for us. However
+	 * there are some places, eg. the signal code, which check ccr to
+	 * decide if the value in r3 is actually an error.
+	 */
 	if (error) {
 		regs->ccr |= 0x10000000L;
-		regs->gpr[3] = -error;
+		regs->gpr[3] = error;
 	} else {
 		regs->ccr &= ~0x10000000L;
 		regs->gpr[3] = val;
@@ -64,19 +69,22 @@
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 {
+	unsigned long val, mask = -1UL;
+
 	BUG_ON(i + n > 6);
-#ifdef CONFIG_PPC64
-	if (test_tsk_thread_flag(task, TIF_32BIT)) {
-		/*
-		 * Zero-extend 32-bit argument values.  The high bits are
-		 * garbage ignored by the actual syscall dispatch.
-		 */
-		while (n-- > 0)
-			args[n] = (u32) regs->gpr[3 + i + n];
-		return;
-	}
+
+#ifdef CONFIG_COMPAT
+	if (test_tsk_thread_flag(task, TIF_32BIT))
+		mask = 0xffffffff;
 #endif
-	memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+	while (n--) {
+		if (n == 0 && i == 0)
+			val = regs->orig_gpr3;
+		else
+			val = regs->gpr[3 + i + n];
+
+		args[n] = val & mask;
+	}
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
@@ -86,6 +94,10 @@
 {
 	BUG_ON(i + n > 6);
 	memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+
+	/* Also copy the first argument into orig_gpr3 */
+	if (i == 0 && n > 0)
+		regs->orig_gpr3 = args[0];
 }
 
 static inline int syscall_get_arch(void)
diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h
new file mode 100644
index 0000000..cf1ee75
--- /dev/null
+++ b/arch/powerpc/include/asm/trace_clock.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#ifndef _ASM_PPC_TRACE_CLOCK_H
+#define _ASM_PPC_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_ppc_tb(void);
+
+#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 },
+
+#endif  /* _ASM_PPC_TRACE_CLOCK_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f44a0278..dab3717 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -6,6 +6,7 @@
 header-y += bootx.h
 header-y += byteorder.h
 header-y += cputable.h
+header-y += eeh.h
 header-y += elf.h
 header-y += epapr_hcalls.h
 header-y += errno.h
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
index 8c145fd..e8b6b5f 100644
--- a/arch/powerpc/include/uapi/asm/errno.h
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -6,6 +6,4 @@
 #undef	EDEADLOCK
 #define	EDEADLOCK	58	/* File locking deadlock error */
 
-#define _LAST_ERRNO	516
-
 #endif	/* _ASM_POWERPC_ERRNO_H */
diff --git a/arch/powerpc/include/uapi/asm/sigcontext.h b/arch/powerpc/include/uapi/asm/sigcontext.h
index 9c1f24f..3ad0c7f 100644
--- a/arch/powerpc/include/uapi/asm/sigcontext.h
+++ b/arch/powerpc/include/uapi/asm/sigcontext.h
@@ -28,7 +28,7 @@
 /*
  * To maintain compatibility with current implementations the sigcontext is
  * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
- * followed by an unstructured (vmx_reserve) field of 69 doublewords.  This
+ * followed by an unstructured (vmx_reserve) field of 101 doublewords. This
  * allows the array of vector registers to be quadword aligned independent of
  * the alignment of the containing sigcontext or ucontext. It is the
  * responsibility of the code setting the sigcontext to set this pointer to
@@ -80,7 +80,7 @@
  * registers and vscr/vrsave.
  */
 	elf_vrreg_t	__user *v_regs;
-	long		vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1];
+	long		vmx_reserve[ELF_NVRREG + ELF_NVRREG + 1 + 32];
 #endif
 };
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 12868b1..ba33693 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -118,6 +118,7 @@
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
+obj-$(CONFIG_TRACING)		+= trace_clock.o
 
 ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
 obj-y				+= iomap.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9823057..810f433 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -213,7 +213,6 @@
 		offsetof(struct tlb_core_data, esel_max));
 	DEFINE(TCD_ESEL_FIRST,
 		offsetof(struct tlb_core_data, esel_first));
-	DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PPC_STD_MMU_64
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 4c68bfe..41a7d9d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -73,7 +73,7 @@
 }
 
 /* We support DMA to/from any memory page via the iommu */
-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
+int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
 	struct iommu_table *tbl = get_iommu_table_base(dev);
 
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 6e8d764..c6689f6 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -47,8 +47,8 @@
  * for everything else.
  */
 struct dma_map_ops swiotlb_dma_ops = {
-	.alloc = dma_direct_alloc_coherent,
-	.free = dma_direct_free_coherent,
+	.alloc = __dma_direct_alloc_coherent,
+	.free = __dma_direct_free_coherent,
 	.mmap = dma_direct_mmap_coherent,
 	.map_sg = swiotlb_map_sg_attrs,
 	.unmap_sg = swiotlb_unmap_sg_attrs,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 35e4dcc..59503ed 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -16,6 +16,7 @@
 #include <asm/bug.h>
 #include <asm/machdep.h>
 #include <asm/swiotlb.h>
+#include <asm/iommu.h>
 
 /*
  * Generic direct DMA implementation
@@ -39,9 +40,31 @@
 	return pfn;
 }
 
-void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag,
-				struct dma_attrs *attrs)
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_PPC64
+	u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
+
+	/* Limit fits in the mask, we are good */
+	if (mask >= limit)
+		return 1;
+
+#ifdef CONFIG_FSL_SOC
+	/* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
+	 * that will have to be refined if/when they support iommus
+	 */
+	return 1;
+#endif
+	/* Sorry ... */
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+				  dma_addr_t *dma_handle, gfp_t flag,
+				  struct dma_attrs *attrs)
 {
 	void *ret;
 #ifdef CONFIG_NOT_COHERENT_CACHE
@@ -96,9 +119,9 @@
 #endif
 }
 
-void dma_direct_free_coherent(struct device *dev, size_t size,
-			      void *vaddr, dma_addr_t dma_handle,
-			      struct dma_attrs *attrs)
+void __dma_direct_free_coherent(struct device *dev, size_t size,
+				void *vaddr, dma_addr_t dma_handle,
+				struct dma_attrs *attrs)
 {
 #ifdef CONFIG_NOT_COHERENT_CACHE
 	__dma_free_coherent(size, vaddr);
@@ -107,6 +130,51 @@
 #endif
 }
 
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs)
+{
+	struct iommu_table *iommu;
+
+	/* The coherent mask may be smaller than the real mask, check if
+	 * we can really use the direct ops
+	 */
+	if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
+		return __dma_direct_alloc_coherent(dev, size, dma_handle,
+						   flag, attrs);
+
+	/* Ok we can't ... do we have an iommu ? If not, fail */
+	iommu = get_iommu_table_base(dev);
+	if (!iommu)
+		return NULL;
+
+	/* Try to use the iommu */
+	return iommu_alloc_coherent(dev, iommu, size, dma_handle,
+				    dev->coherent_dma_mask, flag,
+				    dev_to_node(dev));
+}
+
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+				     void *vaddr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs)
+{
+	struct iommu_table *iommu;
+
+	/* See comments in dma_direct_alloc_coherent() */
+	if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
+		return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
+						  attrs);
+	/* Maybe we used an iommu ... */
+	iommu = get_iommu_table_base(dev);
+
+	/* If we hit that we should have never allocated in the first
+	 * place so how come we are freeing ?
+	 */
+	if (WARN_ON(!iommu))
+		return;
+	iommu_free_coherent(iommu, size, vaddr, dma_handle);
+}
+
 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
 			     void *cpu_addr, dma_addr_t handle, size_t size,
 			     struct dma_attrs *attrs)
@@ -147,18 +215,6 @@
 {
 }
 
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PPC64
-	/* Could be improved so platforms can set the limit in case
-	 * they have limited DMA windows
-	 */
-	return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
-#else
-	return 1;
-#endif
-}
-
 static u64 dma_direct_get_required_mask(struct device *dev)
 {
 	u64 end, mask;
@@ -230,6 +286,25 @@
 };
 EXPORT_SYMBOL(dma_direct_ops);
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+	if (!dma_supported(dev, mask)) {
+		/*
+		 * We need to special case the direct DMA ops which can
+		 * support a fallback for coherent allocations. There
+		 * is no dma_op->set_coherent_mask() so we have to do
+		 * things the hard way:
+		 */
+		if (get_dma_ops(dev) != &dma_direct_ops ||
+		    get_iommu_table_base(dev) == NULL ||
+		    !dma_iommu_dma_supported(dev, mask))
+			return -EIO;
+	}
+	dev->coherent_dma_mask = mask;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dma_set_coherent_mask);
+
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
 int __dma_set_mask(struct device *dev, u64 dma_mask)
@@ -278,6 +353,13 @@
 	if (ppc_md.dma_get_required_mask)
 		return ppc_md.dma_get_required_mask(dev);
 
+	if (dev_is_pci(dev)) {
+		struct pci_dev *pdev = to_pci_dev(dev);
+		struct pci_controller *phb = pci_bus_to_host(pdev->bus);
+		if (phb->controller_ops.dma_get_required_mask)
+			return phb->controller_ops.dma_get_required_mask(pdev);
+	}
+
 	return __dma_get_required_mask(dev);
 }
 EXPORT_SYMBOL_GPL(dma_get_required_mask);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index af9b597..e968533 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -308,11 +308,26 @@
 	if (!(pe->type & EEH_PE_PHB)) {
 		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
-		eeh_ops->configure_bridge(pe);
-		eeh_pe_restore_bars(pe);
 
-		pci_regs_buf[0] = 0;
-		eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+		/*
+		 * The config space of some PCI devices can't be accessed
+		 * when their PEs are in frozen state. Otherwise, fenced
+		 * PHB might be seen. Those PEs are identified with flag
+		 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
+		 * is set automatically when the PE is put to EEH_PE_ISOLATED.
+		 *
+		 * Restoring BARs possibly triggers PCI config access in
+		 * (OPAL) firmware and then causes fenced PHB. If the
+		 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
+		 * pointless to restore BARs and dump config space.
+		 */
+		eeh_ops->configure_bridge(pe);
+		if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
+			eeh_pe_restore_bars(pe);
+
+			pci_regs_buf[0] = 0;
+			eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+		}
 	}
 
 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
@@ -750,14 +765,14 @@
 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
 		break;
 	case pcie_hot_reset:
-		eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+		eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
 		eeh_ops->reset(pe, EEH_RESET_HOT);
 		break;
 	case pcie_warm_reset:
-		eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+		eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
@@ -1116,9 +1131,6 @@
 		return;
 	}
 
-	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
-		eeh_ops->probe(pdn, NULL);
-
 	/*
 	 * The EEH cache might not be removed correctly because of
 	 * unbalanced kref to the device during unplug time, which
@@ -1142,6 +1154,9 @@
 		dev->dev.archdata.edev = NULL;
 	}
 
+	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+		eeh_ops->probe(pdn, NULL);
+
 	edev->pdev = dev;
 	dev->dev.archdata.edev = edev;
 
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 35f0b62..8654cb1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -657,6 +657,28 @@
 	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
 }
 
+/**
+ * eeh_pe_state_mark_with_cfg - Mark PE state with unblocked config space
+ * @pe: PE
+ * @state: PE state to be set
+ *
+ * Set specified flag to PE and its child PEs. The PCI config space
+ * of some PEs is blocked automatically when EEH_PE_ISOLATED is set,
+ * which isn't needed in some situations. The function allows to set
+ * the specified flag to indicated PEs without blocking their PCI
+ * config space.
+ */
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state)
+{
+	eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
+	if (!(state & EEH_PE_ISOLATED))
+		return;
+
+	/* Clear EEH_PE_CFG_BLOCKED, which might be set just now */
+	state = EEH_PE_CFG_BLOCKED;
+	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
+}
+
 /*
  * Some PCI bridges (e.g. PLX bridges) have primary/secondary
  * buses assigned explicitly by firmware, and we probably have
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 46fc0f4..2405631 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -20,6 +20,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <linux/sys.h>
 #include <linux/threads.h>
 #include <asm/reg.h>
@@ -354,7 +355,7 @@
 	SYNC
 	MTMSRD(r10)
 	lwz	r9,TI_FLAGS(r12)
-	li	r8,-_LAST_ERRNO
+	li	r8,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 	bne-	syscall_exit_work
 	cmplw	0,r3,r8
@@ -457,6 +458,10 @@
 	lwz	r7,GPR7(r1)
 	lwz	r8,GPR8(r1)
 	REST_NVGPRS(r1)
+
+	cmplwi	r0,NR_syscalls
+	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
+	bge-	ret_from_syscall
 	b	syscall_dotrace_cont
 
 syscall_exit_work:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 579e0f9..a94f155 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -19,6 +19,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <asm/unistd.h>
 #include <asm/processor.h>
 #include <asm/page.h>
@@ -150,8 +151,7 @@
 	CURRENT_THREAD_INFO(r11, r1)
 	ld	r10,TI_FLAGS(r11)
 	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
-	bne	syscall_dotrace
-.Lsyscall_dotrace_cont:
+	bne	syscall_dotrace		/* does not return */
 	cmpldi	0,r0,NR_syscalls
 	bge-	syscall_enosys
 
@@ -207,7 +207,7 @@
 #endif /* CONFIG_PPC_BOOK3E */
 
 	ld	r9,TI_FLAGS(r12)
-	li	r11,-_LAST_ERRNO
+	li	r11,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 	bne-	syscall_exit_work
 	cmpld	r3,r11
@@ -245,22 +245,34 @@
 	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_syscall_trace_enter
+
 	/*
-	 * Restore argument registers possibly just changed.
-	 * We use the return value of do_syscall_trace_enter
-	 * for the call number to look up in the table (r0).
+	 * We use the return value of do_syscall_trace_enter() as the syscall
+	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
+	 * returns an invalid syscall number and the test below against
+	 * NR_syscalls will fail.
 	 */
 	mr	r0,r3
+
+	/* Restore argument registers just clobbered and/or possibly changed. */
 	ld	r3,GPR3(r1)
 	ld	r4,GPR4(r1)
 	ld	r5,GPR5(r1)
 	ld	r6,GPR6(r1)
 	ld	r7,GPR7(r1)
 	ld	r8,GPR8(r1)
+
+	/* Repopulate r9 and r10 for the system_call path */
 	addi	r9,r1,STACK_FRAME_OVERHEAD
 	CURRENT_THREAD_INFO(r10, r1)
 	ld	r10,TI_FLAGS(r10)
-	b	.Lsyscall_dotrace_cont
+
+	cmpldi	r0,NR_syscalls
+	blt+	system_call
+
+	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
+	b	.Lsyscall_exit
+
 
 syscall_enosys:
 	li	r3,-ENOSYS
@@ -277,7 +289,7 @@
 	beq+	0f
 	REST_NVGPRS(r1)
 	b	2f
-0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
+0:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
 	blt+	1f
 	andi.	r0,r9,_TIF_NOERROR
 	bne-	1f
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 3e68d1c..f3bd5e7 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1313,11 +1313,14 @@
 	sync
 	isync
 
-/* The mapping only needs to be cache-coherent on SMP */
-#ifdef CONFIG_SMP
-#define M_IF_SMP	MAS2_M
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED	MAS2_M
 #else
-#define M_IF_SMP	0
+#define M_IF_NEEDED	0
 #endif
 
 /* 6. Setup KERNELBASE mapping in TLB[0]
@@ -1332,7 +1335,7 @@
 	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
 	mtspr	SPRN_MAS1,r6
 
-	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
+	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
 	mtspr	SPRN_MAS2,r6
 
 	rlwinm	r5,r5,0,0,25
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index f22e7e4..83dd0f6 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -152,11 +152,14 @@
 	tlbivax 0,r9
 	TLBSYNC
 
-/* The mapping only needs to be cache-coherent on SMP */
-#ifdef CONFIG_SMP
-#define M_IF_SMP	MAS2_M
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED	MAS2_M
 #else
-#define M_IF_SMP	0
+#define M_IF_NEEDED	0
 #endif
 
 #if defined(ENTRY_MAPPING_BOOT_SETUP)
@@ -167,8 +170,8 @@
 	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
 	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
 	mtspr	SPRN_MAS1,r6
-	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
-	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
+	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@h
+	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@l
 	mtspr	SPRN_MAS2,r6
 	mtspr	SPRN_MAS3,r8
 	tlbwe
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index a1ed8a8..6472472 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -17,7 +17,7 @@
 {
 	u32 *addr = (u32 *)(unsigned long)entry->code;
 
-	if (type == JUMP_LABEL_ENABLE)
+	if (type == JUMP_LABEL_JMP)
 		patch_branch(addr, entry->target, 0);
 	else
 		patch_instruction(addr, PPC_INST_NOP);
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 33aa4dd..9ad37f8 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -649,7 +649,6 @@
 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
 		}
 		break;
-		break;
 #endif
 	}
 
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7c6bb4b..ed3ab50 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -596,25 +596,6 @@
 	b	2b
 
 /*
- * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
- * void atomic_set_mask(atomic_t mask, atomic_t *addr);
- */
-_GLOBAL(atomic_clear_mask)
-10:	lwarx	r5,0,r4
-	andc	r5,r5,r3
-	PPC405_ERR77(0,r4)
-	stwcx.	r5,0,r4
-	bne-	10b
-	blr
-_GLOBAL(atomic_set_mask)
-10:	lwarx	r5,0,r4
-	or	r5,r5,r3
-	PPC405_ERR77(0,r4)
-	stwcx.	r5,0,r4
-	bne-	10b
-	blr
-
-/*
  * Extended precision shifts.
  *
  * Updated to be valid for shift counts from 0 to 63 inclusive.
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4e314b9..6e4168c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -475,9 +475,18 @@
 #ifdef CONFIG_KEXEC		/* use no memory without kexec */
 	lwz	r4,0(r5)
 	cmpwi	0,r4,0
-	bnea	0x60
+	beq	99b
+#ifdef CONFIG_PPC_BOOK3S_64
+	li	r10,0x60
+	mfmsr	r11
+	clrrdi	r11,r11,1	/* Clear MSR_LE */
+	mtsrr0	r10
+	mtsrr1	r11
+	rfid
+#else
+	ba	0x60
 #endif
-	b	99b
+#endif
 
 /* this can be in text because we won't change it until we are
  * running in real anyways
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 1e703f8..98ba106 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -541,10 +541,9 @@
 			time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
 			time->tv_nsec = 0;
 		}
-		*buf = kmalloc(length, GFP_KERNEL);
+		*buf = kmemdup(buff + hdr_size, length, GFP_KERNEL);
 		if (*buf == NULL)
 			return -ENOMEM;
-		memcpy(*buf, buff + hdr_size, length);
 		kfree(buff);
 
 		if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
@@ -582,9 +581,10 @@
 	spin_lock_init(&nvram_pstore_info.buf_lock);
 
 	rc = pstore_register(&nvram_pstore_info);
-	if (rc != 0)
-		pr_err("nvram: pstore_register() failed, defaults to "
-				"kmsg_dump; returned %d\n", rc);
+	if (rc && (rc != -EPERM))
+		/* Print error only when pstore.backend == nvram */
+		pr_err("nvram: pstore_register() failed, returned %d. "
+				"Defaults to kmsg_dump\n", rc);
 
 	return rc;
 }
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 02c1d5d..a1d0632 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -823,23 +823,15 @@
 		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
 			/* Only print message if not re-assigning */
 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
-				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
-					 "is unassigned\n",
-					 pci_name(dev), i,
-					 (unsigned long long)res->start,
-					 (unsigned long long)res->end,
-					 (unsigned int)res->flags);
+				pr_debug("PCI:%s Resource %d %pR is unassigned\n",
+					 pci_name(dev), i, res);
 			res->end -= res->start;
 			res->start = 0;
 			res->flags |= IORESOURCE_UNSET;
 			continue;
 		}
 
-		pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
-			 pci_name(dev), i,
-			 (unsigned long long)res->start,\
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags);
+		pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
 	}
 
 	/* Call machine specific resource fixup */
@@ -943,11 +935,7 @@
 			continue;
 		}
 
-		pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
-			 pci_name(dev), i,
-			 (unsigned long long)res->start,\
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags);
+		pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
 
 		/* Try to detect uninitialized P2P bridge resources,
 		 * and clear them out so they get re-assigned later
@@ -1126,10 +1114,8 @@
 	*pp = NULL;
 	for (p = res->child; p != NULL; p = p->sibling) {
 		p->parent = res;
-		pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
-			 p->name,
-			 (unsigned long long)p->start,
-			 (unsigned long long)p->end, res->name);
+		pr_debug("PCI: Reparented %s %pR under %s\n",
+			 p->name, p, res->name);
 	}
 	return 0;
 }
@@ -1198,14 +1184,9 @@
 			}
 		}
 
-		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
-			 "[0x%x], parent %p (%s)\n",
-			 bus->self ? pci_name(bus->self) : "PHB",
-			 bus->number, i,
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags,
-			 pr, (pr && pr->name) ? pr->name : "nil");
+		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
+			 bus->self ? pci_name(bus->self) : "PHB", bus->number,
+			 i, res, pr, (pr && pr->name) ? pr->name : "nil");
 
 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
 			struct pci_dev *dev = bus->self;
@@ -1247,11 +1228,8 @@
 {
 	struct resource *pr, *r = &dev->resource[idx];
 
-	pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
-		 pci_name(dev), idx,
-		 (unsigned long long)r->start,
-		 (unsigned long long)r->end,
-		 (unsigned int)r->flags);
+	pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
+		 pci_name(dev), idx, r);
 
 	pr = pci_find_parent_resource(dev, r);
 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1259,11 +1237,7 @@
 		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
 		       " of device %s, will remap\n", idx, pci_name(dev));
 		if (pr)
-			pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
-				 pr,
-				 (unsigned long long)pr->start,
-				 (unsigned long long)pr->end,
-				 (unsigned int)pr->flags);
+			pr_debug("PCI:  parent is %p: %pR\n", pr, pr);
 		/* We'll assign a new address later */
 		r->flags |= IORESOURCE_UNSET;
 		r->end -= r->start;
@@ -1425,12 +1399,8 @@
 			if (r->parent || !r->start || !r->flags)
 				continue;
 
-			pr_debug("PCI: Claiming %s: "
-				 "Resource %d: %016llx..%016llx [%x]\n",
-				 pci_name(dev), i,
-				 (unsigned long long)r->start,
-				 (unsigned long long)r->end,
-				 (unsigned int)r->flags);
+			pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
+				 pci_name(dev), i, r);
 
 			if (pci_claim_resource(dev, i) == 0)
 				continue;
@@ -1514,11 +1484,8 @@
 	} else {
 		offset = pcibios_io_space_offset(hose);
 
-		pr_debug("PCI: PHB IO resource    = %08llx-%08llx [%lx] off 0x%08llx\n",
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned long)res->flags,
-			 (unsigned long long)offset);
+		pr_debug("PCI: PHB IO resource    = %pR off 0x%08llx\n",
+			 res, (unsigned long long)offset);
 		pci_add_resource_offset(resources, res, offset);
 	}
 
@@ -1535,11 +1502,8 @@
 		offset = hose->mem_offset[i];
 
 
-		pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned long)res->flags,
-			 (unsigned long long)offset);
+		pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
+			 res, (unsigned long long)offset);
 
 		pci_add_resource_offset(resources, res, offset);
 	}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 64e6e9d..75b6676 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -86,7 +86,7 @@
 	if (tsk == current && tsk->thread.regs &&
 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
 	    !test_thread_flag(TIF_RESTORE_TM)) {
-		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
 		set_thread_flag(TIF_RESTORE_TM);
 	}
 
@@ -104,7 +104,7 @@
 	if (tsk == current && tsk->thread.regs &&
 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
 	    !test_thread_flag(TIF_RESTORE_TM)) {
-		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
 		set_thread_flag(TIF_RESTORE_TM);
 	}
 
@@ -540,7 +540,7 @@
 	 * the thread will no longer be transactional.
 	 */
 	if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
-		msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
+		msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
 		if (msr_diff & MSR_FP)
 			memcpy(&thr->transact_fp, &thr->fp_state,
 			       sizeof(struct thread_fp_state));
@@ -591,10 +591,10 @@
 	/* Stash the original thread MSR, as giveup_fpu et al will
 	 * modify it.  We hold onto it to see whether the task used
 	 * FP & vector regs.  If the TIF_RESTORE_TM flag is set,
-	 * tm_orig_msr is already set.
+	 * ckpt_regs.msr is already set.
 	 */
 	if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
-		thr->tm_orig_msr = thr->regs->msr;
+		thr->ckpt_regs.msr = thr->regs->msr;
 
 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -663,7 +663,7 @@
 		tm_restore_sprs(&new->thread);
 		return;
 	}
-	msr = new->thread.tm_orig_msr;
+	msr = new->thread.ckpt_regs.msr;
 	/* Recheckpoint to restore original checkpointed register state. */
 	TM_DEBUG("*** tm_recheckpoint of pid %d "
 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
@@ -723,7 +723,7 @@
 	if (!MSR_TM_ACTIVE(regs->msr))
 		return;
 
-	msr_diff = current->thread.tm_orig_msr & ~regs->msr;
+	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
 	if (msr_diff & MSR_FP) {
 		fp_enable();
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b888b1..bef76c5 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -218,22 +218,18 @@
 }
 
 #ifdef CONFIG_PPC_STD_MMU_64
-static void __init check_cpu_slb_size(unsigned long node)
+static void __init init_mmu_slb_size(unsigned long node)
 {
 	const __be32 *slb_size_ptr;
 
-	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
-	if (slb_size_ptr != NULL) {
+	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
+			of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+
+	if (slb_size_ptr)
 		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-		return;
-	}
-	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
-	if (slb_size_ptr != NULL) {
-		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-	}
 }
 #else
-#define check_cpu_slb_size(node) do { } while(0)
+#define init_mmu_slb_size(node) do { } while(0)
 #endif
 
 static struct feature_property {
@@ -380,7 +376,7 @@
 
 	check_cpu_feature_properties(node);
 	check_cpu_pa_features(node);
-	check_cpu_slb_size(node);
+	init_mmu_slb_size(node);
 
 #ifdef CONFIG_PPC64
 	if (nthreads > 1)
@@ -476,9 +472,10 @@
 		flags = of_read_number(&dm[3], 1);
 		/* skip DRC index, pad, assoc. list index, flags */
 		dm += 4;
-		/* skip this block if the reserved bit is set in flags (0x80)
-		   or if the block is not assigned to this partition (0x8) */
-		if ((flags & 0x80) || !(flags & 0x8))
+		/* skip this block if the reserved bit is set in flags
+		   or if the block is not assigned to this partition */
+		if ((flags & DRCONF_MEM_RESERVED) ||
+				!(flags & DRCONF_MEM_ASSIGNED))
 			continue;
 		size = memblock_size;
 		rngs = 1;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index fcca807..15099c4 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -641,6 +641,15 @@
 #define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
 		((x) >> 8) & 0xff, (x) & 0xff
 
+/* Firmware expects the value to be n - 1, where n is the # of vectors */
+#define NUM_VECTORS(n)		((n) - 1)
+
+/*
+ * Firmware expects 1 + n - 2, where n is the length of the option vector in
+ * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
+ */
+#define VECTOR_LENGTH(n)	(1 + (n) - 2)
+
 unsigned char ibm_architecture_vec[] = {
 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
@@ -651,16 +660,16 @@
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
-	6 - 1,				/* 6 option vectors */
+	NUM_VECTORS(6),			/* 6 option vectors */
 
 	/* option vector 1: processor architectures supported */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
 	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
 
 	/* option vector 2: Open Firmware options supported */
-	34 - 2,				/* length */
+	VECTOR_LENGTH(33),		/* length */
 	OV2_REAL_MODE,
 	0, 0,
 	W(0xffffffff),			/* real_base */
@@ -674,17 +683,17 @@
 	48,				/* max log_2(hash table size) */
 
 	/* option vector 3: processor options supported */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV3_FP | OV3_VMX | OV3_DFP,
 
 	/* option vector 4: IBM PAPR implementation */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't halt */
 	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
 
 	/* option vector 5: PAPR/OF options */
-	19 - 2,				/* length */
+	VECTOR_LENGTH(18),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
 	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -717,12 +726,12 @@
 	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
 	OV5_FEAT(OV5_PFO_HW_842),
 	OV5_FEAT(OV5_SUB_PROCESSORS),
+
 	/* option vector 6: IBM PAPR hints */
-	4 - 2,				/* length */
+	VECTOR_LENGTH(3),		/* length */
 	0,
 	0,
 	OV6_LINUX,
-
 };
 
 /* Old method - ELF header with PT_NOTE sections only works on BE */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index f21897b..737c0d0 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1762,26 +1762,81 @@
 	return ret;
 }
 
-/*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
+#ifdef CONFIG_SECCOMP
+static int do_seccomp(struct pt_regs *regs)
+{
+	if (!test_thread_flag(TIF_SECCOMP))
+		return 0;
+
+	/*
+	 * The ABI we present to seccomp tracers is that r3 contains
+	 * the syscall return value and orig_gpr3 contains the first
+	 * syscall parameter. This is different to the ptrace ABI where
+	 * both r3 and orig_gpr3 contain the first syscall parameter.
+	 */
+	regs->gpr[3] = -ENOSYS;
+
+	/*
+	 * We use the __ version here because we have already checked
+	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
+	 * have already loaded -ENOSYS into r3, or seccomp has put
+	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+	 */
+	if (__secure_computing())
+		return -1;
+
+	/*
+	 * The syscall was allowed by seccomp, restore the register
+	 * state to what ptrace and audit expect.
+	 * Note that we use orig_gpr3, which means a seccomp tracer can
+	 * modify the first syscall parameter (in orig_gpr3) and also
+	 * allow the syscall to proceed.
+	 */
+	regs->gpr[3] = regs->orig_gpr3;
+
+	return 0;
+}
+#else
+static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_SECCOMP */
+
+/**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+ *
+ * Performs various types of tracing on syscall entry. This includes seccomp,
+ * ptrace, syscall tracepoints and audit.
+ *
+ * The pt_regs are potentially visible to userspace via ptrace, so their
+ * contents is ABI.
+ *
+ * One or more of the tracers may modify the contents of pt_regs, in particular
+ * to modify arguments or even the syscall number itself.
+ *
+ * It's also possible that a tracer can choose to reject the system call. In
+ * that case this function will return an illegal syscall number, and will put
+ * an appropriate return value in regs->r3.
+ *
+ * Return: the (possibly changed) syscall number.
  */
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-	long ret = 0;
+	bool abort = false;
 
 	user_exit();
 
-	secure_computing_strict(regs->gpr[0]);
+	if (do_seccomp(regs))
+		return -1;
 
-	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-	    tracehook_report_syscall_entry(regs))
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
 		/*
-		 * Tracing decided this syscall should not happen.
-		 * We'll return a bogus call number to get an ENOSYS
-		 * error, but leave the original number in regs->gpr[0].
+		 * The tracer may decide to abort the syscall, if so tracehook
+		 * will return !0. Note that the tracer may also just change
+		 * regs->gpr[0] to an invalid syscall number, that is handled
+		 * below on the exit path.
 		 */
-		ret = -1L;
+		abort = tracehook_report_syscall_entry(regs) != 0;
+	}
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_enter(regs, regs->gpr[0]);
@@ -1798,7 +1853,17 @@
 				    regs->gpr[5] & 0xffffffff,
 				    regs->gpr[6] & 0xffffffff);
 
-	return ret ?: regs->gpr[0];
+	if (abort || regs->gpr[0] >= NR_syscalls) {
+		/*
+		 * If we are aborting explicitly, or if the syscall number is
+		 * now invalid, set the return value to -ENOSYS.
+		 */
+		regs->gpr[3] = -ENOSYS;
+		return -1;
+	}
+
+	/* Return the possibly modified but valid syscall number */
+	return regs->gpr[0];
 }
 
 void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7a488c1..84bf934 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -478,8 +478,9 @@
 
 	if (status == RTAS_BUSY) {
 		ms = 1;
-	} else if (status >= 9900 && status <= 9905) {
-		order = status - 9900;
+	} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
+		   status <= RTAS_EXTENDED_DELAY_MAX) {
+		order = status - RTAS_EXTENDED_DELAY_MIN;
 		for (ms = 1; order > 0; order--)
 			ms *= 10;
 	}
@@ -584,6 +585,23 @@
 }
 EXPORT_SYMBOL(rtas_get_sensor);
 
+int rtas_get_sensor_fast(int sensor, int index, int *state)
+{
+	int token = rtas_token("get-sensor-state");
+	int rc;
+
+	if (token == RTAS_UNKNOWN_SERVICE)
+		return -ENOENT;
+
+	rc = rtas_call(token, 2, 2, state, sensor, index);
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
+
+	if (rc < 0)
+		return rtas_error_rc(rc);
+	return rc;
+}
+
 bool rtas_indicator_present(int token, int *maxindex)
 {
 	int proplen, count, i;
@@ -641,7 +659,8 @@
 
 	rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
 
-	WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905));
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
 
 	if (rc < 0)
 		return rtas_error_rc(rc);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index da50e0c..0dbee46 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -949,6 +949,11 @@
 		err |= __put_user(s->si_overrun, &d->si_overrun);
 		err |= __put_user(s->si_int, &d->si_int);
 		break;
+	case __SI_SYS >> 16:
+		err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
+		err |= __put_user(s->si_syscall, &d->si_syscall);
+		err |= __put_user(s->si_arch, &d->si_arch);
+		break;
 	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
 	case __SI_MESGQ >> 16:
 		err |= __put_user(s->si_int, &d->si_int);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c7c24d2..20756df 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -74,6 +74,19 @@
 	"%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n";
 
 /*
+ * This computes a quad word aligned pointer inside the vmx_reserve array
+ * element. For historical reasons sigcontext might not be quad word aligned,
+ * but the location we write the VMX regs to must be. See the comment in
+ * sigcontext for more detail.
+ */
+#ifdef CONFIG_ALTIVEC
+static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
+{
+	return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+}
+#endif
+
+/*
  * Set up the sigcontext for the signal frame.
  */
 
@@ -90,7 +103,7 @@
 	 * v_regs pointer or not
 	 */
 #ifdef CONFIG_ALTIVEC
-	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
 #endif
 	unsigned long msr = regs->msr;
 	long err = 0;
@@ -181,10 +194,8 @@
 	 * v_regs pointer or not.
 	 */
 #ifdef CONFIG_ALTIVEC
-	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)
-		(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
-	elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *)
-		(((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful);
+	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
+	elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
 #endif
 	unsigned long msr = regs->msr;
 	long err = 0;
diff --git a/arch/powerpc/kernel/trace_clock.c b/arch/powerpc/kernel/trace_clock.c
new file mode 100644
index 0000000..4917069
--- /dev/null
+++ b/arch/powerpc/kernel/trace_clock.c
@@ -0,0 +1,15 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include <asm/trace_clock.h>
+#include <asm/time.h>
+
+u64 notrace trace_clock_ppc_tb(void)
+{
+	return get_tb();
+}
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index 7874e8a..6d67e05 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -41,22 +41,6 @@
 	blr
 
 /*
- * Compute checksum of TCP or UDP pseudo-header:
- *   csum_tcpudp_magic(saddr, daddr, len, proto, sum)
- */	
-_GLOBAL(csum_tcpudp_magic)
-	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
-	addc	r0,r3,r4	/* add 4 32-bit words together */
-	adde	r0,r0,r5
-	adde	r0,r0,r7
-	addze	r0,r0		/* add in final carry */
-	rlwinm	r3,r0,16,0,31	/* fold two halves together */
-	add	r3,r0,r3
-	not	r3,r3
-	srwi	r3,r3,16
-	blr
-
-/*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
  *
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 57a0720..f3ef354 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -45,27 +45,6 @@
 	blr
 
 /*
- * Compute checksum of TCP or UDP pseudo-header:
- *   csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
- * No real gain trying to do this specially for 64 bit, but
- * the 32 bit addition may spill into the upper bits of
- * the doubleword so we still must fold it down from 64.
- */	
-_GLOBAL(csum_tcpudp_magic)
-	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
-	addc	r0,r3,r4	/* add 4 32-bit words together */
-	adde	r0,r0,r5
-	adde	r0,r0,r7
-        rldicl  r4,r0,32,0      /* fold 64 bit value */
-        add     r0,r4,r0
-        srdi    r0,r0,32
-	rlwinm	r3,r0,16,0,31	/* fold two halves together */
-	add	r3,r0,r3
-	not	r3,r3
-	srwi	r3,r3,16
-	blr
-
-/*
  * Computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit).
  *
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 6813f80..2ef50c6 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -69,9 +69,15 @@
 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
 CACHELINE_MASK = (L1_CACHE_BYTES-1)
 
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero.  This requires that the destination
+ * area is cacheable.  -- paulus
+ */
 _GLOBAL(memset)
 	rlwimi	r4,r4,8,16,23
 	rlwimi	r4,r4,16,0,15
+
 	addi	r6,r3,-4
 	cmplwi	0,r5,4
 	blt	7f
@@ -80,7 +86,29 @@
 	andi.	r0,r6,3
 	add	r5,r0,r5
 	subf	r6,r0,r6
-	srwi	r0,r5,2
+	cmplwi	0,r4,0
+	bne	2f	/* Use normal procedure if r4 is not zero */
+
+	clrlwi	r7,r6,32-LG_CACHELINE_BYTES
+	add	r8,r7,r5
+	srwi	r9,r8,LG_CACHELINE_BYTES
+	addic.	r9,r9,-1	/* total number of complete cachelines */
+	ble	2f
+	xori	r0,r7,CACHELINE_MASK & ~3
+	srwi.	r0,r0,2
+	beq	3f
+	mtctr	r0
+4:	stwu	r4,4(r6)
+	bdnz	4b
+3:	mtctr	r9
+	li	r7,4
+10:	dcbz	r7,r6
+	addi	r6,r6,CACHELINE_BYTES
+	bdnz	10b
+	clrlwi	r5,r8,32-LG_CACHELINE_BYTES
+	addi	r5,r5,4
+
+2:	srwi	r0,r5,2
 	mtctr	r0
 	bdz	6f
 1:	stwu	r4,4(r6)
@@ -94,12 +122,91 @@
 	bdnz	8b
 	blr
 
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic.  This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
 _GLOBAL(memmove)
 	cmplw	0,r3,r4
 	bgt	backwards_memcpy
 	/* fall through */
 
 _GLOBAL(memcpy)
+	add	r7,r3,r5		/* test if the src & dst overlap */
+	add	r8,r4,r5
+	cmplw	0,r4,r7
+	cmplw	1,r3,r8
+	crand	0,0,4			/* cr0.lt &= cr1.lt */
+	blt	generic_memcpy		/* if regions overlap */
+
+	addi	r4,r4,-4
+	addi	r6,r3,-4
+	neg	r0,r3
+	andi.	r0,r0,CACHELINE_MASK	/* # bytes to start of cache line */
+	beq	58f
+
+	cmplw	0,r5,r0			/* is this more than total to do? */
+	blt	63f			/* if not much to do */
+	andi.	r8,r0,3			/* get it word-aligned first */
+	subf	r5,r0,r5
+	mtctr	r8
+	beq+	61f
+70:	lbz	r9,4(r4)		/* do some bytes */
+	addi	r4,r4,1
+	addi	r6,r6,1
+	stb	r9,3(r6)
+	bdnz	70b
+61:	srwi.	r0,r0,2
+	mtctr	r0
+	beq	58f
+72:	lwzu	r9,4(r4)		/* do some words */
+	stwu	r9,4(r6)
+	bdnz	72b
+
+58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+	clrlwi	r5,r5,32-LG_CACHELINE_BYTES
+	li	r11,4
+	mtctr	r0
+	beq	63f
+53:
+	dcbz	r11,r6
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 32
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 64
+	COPY_16_BYTES
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 128
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+#endif
+#endif
+#endif
+	bdnz	53b
+
+63:	srwi.	r0,r5,2
+	mtctr	r0
+	beq	64f
+30:	lwzu	r0,4(r4)
+	stwu	r0,4(r6)
+	bdnz	30b
+
+64:	andi.	r0,r5,3
+	mtctr	r0
+	beq+	65f
+	addi	r4,r4,3
+	addi	r6,r6,3
+40:	lbzu	r0,1(r4)
+	stbu	r0,1(r6)
+	bdnz	40b
+65:	blr
+
+_GLOBAL(generic_memcpy)
 	srwi.	r7,r5,3
 	addi	r6,r3,-4
 	addi	r4,r4,-4
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 9c90e66..354ba3c 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -112,7 +112,7 @@
 
 	tsize = __ilog2(size) - 10;
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
 	if ((flags & _PAGE_NO_CACHE) == 0)
 		flags |= _PAGE_COHERENT;
 #endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 463174a..3b49e32 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -701,7 +701,7 @@
 
 #endif /* CONFIG_PPC_64K_PAGES */
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 
 /*****************************************************************************
  *                                                                           *
@@ -993,7 +993,7 @@
 	b	ht64_bail
 
 
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 
 
 /*****************************************************************************
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5ec987f..aee7017 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -640,7 +640,7 @@
 
 static void __init htab_finish_init(void)
 {
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	patch_branch(ht64_call_hpte_insert1,
 		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
@@ -653,7 +653,7 @@
 	patch_branch(ht64_call_hpte_updatepp,
 		ppc_function_entry(ppc_md.hpte_updatepp),
 		BRANCH_SET_LINK);
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 
 	patch_branch(htab_call_hpte_insert1,
 		ppc_function_entry(ppc_md.hpte_insert),
@@ -1151,12 +1151,12 @@
 		check_paca_psize(ea, mm, psize, user_region);
 #endif /* CONFIG_PPC_64K_PAGES */
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	if (psize == MMU_PAGE_64K)
 		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
 				     flags, ssize);
 	else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 	{
 		int spp = subpage_protection(mm, ea);
 		if (access & spp)
@@ -1264,12 +1264,12 @@
 		update_flags |= HPTE_LOCAL_UPDATE;
 
 	/* Hash it in */
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	if (mm->context.user_psize == MMU_PAGE_64K)
 		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
 				     update_flags, ssize);
 	else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 		rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
 				    ssize, subpage_protection(mm, ea));
 
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index bb0bd70..06c1452 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -808,14 +808,6 @@
 	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
 		return -EINVAL;
 
-#ifdef CONFIG_SPU_FS_64K_LS
-	/* Disable support for 64K huge pages when 64K SPU local store
-	 * support is enabled as the current implementation conflicts.
-	 */
-	if (shift == PAGE_SHIFT_64K)
-		return -EINVAL;
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
 
 	/* Return if huge page size has already been setup */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0f11819..e1fe333 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -414,17 +414,17 @@
 		return;
 	}
 #endif
-#ifdef CONFIG_BOOKE
-	{
+#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
+	/* On 8xx there is no need to kmap since highmem is not supported */
+	__flush_dcache_icache(page_address(page));
+#else
+	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
 		void *start = kmap_atomic(page);
 		__flush_dcache_icache(start);
 		kunmap_atomic(start);
+	} else {
+		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 	}
-#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
-	/* On 8xx there is no need to kmap since highmem is not supported */
-	__flush_dcache_icache(page_address(page)); 
-#else
-	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 #endif
 }
 EXPORT_SYMBOL(flush_dcache_icache_page);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5e80621..8b9502a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -225,7 +225,7 @@
 	for (i = 0; i < distance_ref_points_depth; i++) {
 		const __be32 *entry;
 
-		entry = &associativity[be32_to_cpu(distance_ref_points[i])];
+		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
 		distance_lookup_table[nid][i] = of_read_number(entry, 1);
 	}
 }
@@ -248,8 +248,12 @@
 		nid = -1;
 
 	if (nid > 0 &&
-	    of_read_number(associativity, 1) >= distance_ref_points_depth)
-		initialize_distance_lookup_table(nid, associativity);
+		of_read_number(associativity, 1) >= distance_ref_points_depth) {
+		/*
+		 * Skip the length field and send start of associativity array
+		 */
+		initialize_distance_lookup_table(nid, associativity + 1);
+	}
 
 out:
 	return nid;
@@ -507,6 +511,12 @@
 
 		if (nid == 0xffff || nid >= MAX_NUMNODES)
 			nid = default_nid;
+
+		if (nid > 0) {
+			index = drmem->aa_index * aa->array_sz;
+			initialize_distance_lookup_table(nid,
+							&aa->arrays[index]);
+		}
 	}
 
 	return nid;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 876232d..e92cb21 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -149,17 +149,7 @@
 #endif /* !CONFIG_PPC_MMU_NOHASH */
 	}
 
-#ifdef CONFIG_PPC_BOOK3E_64
-	/*
-	 * With hardware tablewalk, a sync is needed to ensure that
-	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
-	 * mappings, we can't tolerate spurious faults, so make sure
-	 * the new PTE will be seen the first time.
-	 */
-	mb();
-#else
 	smp_wmb();
-#endif
 	return 0;
 }
 
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6e450ca..8a32a2b 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -41,9 +41,9 @@
 	(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
 
 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
-					 unsigned long slot)
+					 unsigned long entry)
 {
-	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
 }
 
 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -249,11 +249,24 @@
 static inline void patch_slb_encoding(unsigned int *insn_addr,
 				      unsigned int immed)
 {
-	int insn = (*insn_addr & 0xffff0000) | immed;
+
+	/*
+	 * This function patches either an li or a cmpldi instruction with
+	 * a new immediate value. This relies on the fact that both li
+	 * (which is actually addi) and cmpldi both take a 16-bit immediate
+	 * value, and it is situated in the same location in the instruction,
+	 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
+	 * The signedness of the immediate operand differs between the two
+	 * instructions however this code is only ever patching a small value,
+	 * much less than 1 << 15, so we can get away with it.
+	 * To patch the value we read the existing instruction, clear the
+	 * immediate value, and or in our new value, then write the instruction
+	 * back.
+	 */
+	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
 	patch_instruction(insn_addr, insn);
 }
 
-extern u32 slb_compare_rr_to_size[];
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -309,12 +322,11 @@
 	lflags = SLB_VSID_KERNEL | linear_llp;
 	vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-	/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+	/* Invalidate the entire SLB (even entry 0) & all the ERATS */
 	asm volatile("isync":::"memory");
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
 	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
 	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 
 	/* For the boot cpu, we're running on the stack in init_thread_union,
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 765b419..e418558 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -308,11 +308,11 @@
 	 *
 	 * MAS6:IND should be already set based on MAS4
 	 */
-1:	lbarx	r15,0,r11
 	lhz	r10,PACAPACAINDEX(r13)
-	cmpdi	r15,0
-	cmpdi	cr1,r15,1	/* set cr1.eq = 0 for non-recursive */
 	addi	r10,r10,1
+	crclr	cr1*4+eq	/* set cr1.eq = 0 for non-recursive */
+1:	lbarx	r15,0,r11
+	cmpdi	r15,0
 	bne	2f
 	stbcx.	r10,0,r11
 	bne	1b
@@ -320,9 +320,9 @@
 	.subsection 1
 2:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
 	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
-	lbz	r15,0(r11)
+10:	lbz	r15,0(r11)
 	cmpdi	r15,0
-	bne	2b
+	bne	10b
 	b	1b
 	.previous
 
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 962fe7b..4b32e94 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -207,7 +207,7 @@
 	unsigned int mmcr0;
 
 	/* set the PMM bit (see comment below) */
-	mtmsrd(mfmsr() | MSR_PMM);
+	mtmsr(mfmsr() | MSR_PMM);
 
 	for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
 		if (ctr[i].enabled) {
@@ -377,7 +377,7 @@
 	is_kernel = get_kernel(pc, mmcra);
 
 	/* set the PMM bit (see comment below) */
-	mtmsrd(mfmsr() | MSR_PMM);
+	mtmsr(mfmsr() | MSR_PMM);
 
 	/* Check that the SIAR  valid bit in MMCRA is set to 1. */
 	if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index d90893b..b0382f3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -53,7 +53,7 @@
 
 	/* BHRB bits */
 	u64				bhrb_filter;	/* BHRB HW branch filter */
-	int				bhrb_users;
+	unsigned int			bhrb_users;
 	void				*bhrb_context;
 	struct	perf_branch_stack	bhrb_stack;
 	struct	perf_branch_entry	bhrb_entries[BHRB_MAX_ENTRIES];
@@ -369,8 +369,8 @@
 	if (!ppmu->bhrb_nr)
 		return;
 
+	WARN_ON_ONCE(!cpuhw->bhrb_users);
 	cpuhw->bhrb_users--;
-	WARN_ON_ONCE(cpuhw->bhrb_users < 0);
 	perf_sched_cb_dec(event->ctx->pmu);
 
 	if (!cpuhw->disabled && !cpuhw->bhrb_users) {
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index df95629..527c8b9 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -416,7 +416,7 @@
 }
 
 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
-				int nonce)
+					    int nonce)
 {
 	int nl, dl;
 	char *name = event_name(event, &nl);
@@ -444,7 +444,7 @@
 }
 
 static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
-		struct hv_24x7_event_data *event, int nonce)
+				   struct hv_24x7_event_data *event, int nonce)
 {
 	unsigned i;
 
@@ -512,7 +512,7 @@
 }
 
 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
-					size_t s2, unsigned d2)
+		       size_t s2, unsigned d2)
 {
 	int r = memord(v1, s1, v2, s2);
 
@@ -526,7 +526,7 @@
 }
 
 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
-				unsigned domain)
+			  unsigned domain)
 {
 	struct rb_node **new = &(root->rb_node), *parent = NULL;
 	struct event_uniq *data;
@@ -650,8 +650,8 @@
 #define MAX_4K (SIZE_MAX / 4096)
 
 static int create_events_from_catalog(struct attribute ***events_,
-		struct attribute ***event_descs_,
-		struct attribute ***event_long_descs_)
+				      struct attribute ***event_descs_,
+				      struct attribute ***event_long_descs_)
 {
 	unsigned long hret;
 	size_t catalog_len, catalog_page_len, event_entry_count,
@@ -1008,8 +1008,8 @@
 };
 
 static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer,
-			unsigned long ret)
+			   struct hv_24x7_data_result_buffer *result_buffer,
+			   unsigned long ret)
 {
 	struct hv_24x7_request *req;
 
@@ -1026,7 +1026,7 @@
  * Start the process for a new H_GET_24x7_DATA hcall.
  */
 static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer)
+			      struct hv_24x7_data_result_buffer *result_buffer)
 {
 
 	memset(request_buffer, 0, 4096);
@@ -1041,7 +1041,7 @@
  * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
  */
 static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer)
+			     struct hv_24x7_data_result_buffer *result_buffer)
 {
 	unsigned long ret;
 
@@ -1104,7 +1104,6 @@
 	unsigned long ret;
 	struct hv_24x7_request_buffer *request_buffer;
 	struct hv_24x7_data_result_buffer *result_buffer;
-	struct hv_24x7_result *resb;
 
 	BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
 	BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1125,8 +1124,7 @@
 	}
 
 	/* process result from hcall */
-	resb = &result_buffer->results[0];
-	*count = be64_to_cpu(resb->elements[0].element_data[0]);
+	*count = be64_to_cpu(result_buffer->results[0].elements[0].element_data[0]);
 
 out:
 	put_cpu_var(hv_24x7_reqb);
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 5aa3f4b..48bf38d 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,8 +7,8 @@
 	select PPC_PCI_CHOICE
 	select FSL_PCI if PCI
 	select ARCH_WANT_OPTIONAL_GPIOLIB
-	select USB_EHCI_BIG_ENDIAN_MMIO
-	select USB_EHCI_BIG_ENDIAN_DESC
+	select USB_EHCI_BIG_ENDIAN_MMIO if USB_EHCI_HCD
+	select USB_EHCI_BIG_ENDIAN_DESC if USB_EHCI_HCD
 
 config MPC5121_ADS
 	bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 84476b6..61bc851 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -66,10 +66,6 @@
 	.probe			= c293_pcie_probe,
 	.setup_arch		= c293_pcie_setup_arch,
 	.init_IRQ		= c293_pcie_pic_init,
-#ifdef CONFIG_PCI
-	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
-	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
-#endif
 	.get_irq		= mpic_get_irq,
 	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index bd839dc..b395571 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -153,6 +153,8 @@
 	"fsl,T1023RDB",
 	"fsl,T1024QDS",
 	"fsl,T1024RDB",
+	"fsl,T1040D4RDB",
+	"fsl,T1042D4RDB",
 	"fsl,T1040QDS",
 	"fsl,T1042QDS",
 	"fsl,T1040RDB",
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2f23133..b0ac177 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -57,21 +57,6 @@
 	  Units on machines implementing the Broadband Processor
 	  Architecture.
 
-config SPU_FS_64K_LS
-	bool "Use 64K pages to map SPE local  store"
-	# we depend on PPC_MM_SLICES for now rather than selecting
-	# it because we depend on hugetlbfs hooks being present. We
-	# will fix that when the generic code has been improved to
-	# not require hijacking hugetlbfs hooks.
-	depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
-	default y
-	select PPC_HAS_HASH_64K
-	help
-	  This option causes SPE local stores to be mapped in process
-	  address spaces using 64K pages while the rest of the kernel
-	  uses 4K pages. This can improve performances of applications
-	  using multiple SPEs by lowering the TLB pressure on them.
-
 config SPU_BASE
 	bool
 	default n
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d966bbe..5038fd5 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -239,23 +239,6 @@
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	unsigned long pfn, offset;
 
-#ifdef CONFIG_SPU_FS_64K_LS
-	struct spu_state *csa = &ctx->csa;
-	int psize;
-
-	/* Check what page size we are using */
-	psize = get_slice_psize(vma->vm_mm, address);
-
-	/* Some sanity checking */
-	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
-
-	/* Wow, 64K, cool, we need to align the address though */
-	if (csa->use_big_pages) {
-		BUG_ON(vma->vm_start & 0xffff);
-		address &= ~0xfffful;
-	}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	offset = vmf->pgoff << PAGE_SHIFT;
 	if (offset >= LS_SIZE)
 		return VM_FAULT_SIGBUS;
@@ -310,22 +293,6 @@
 
 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
 {
-#ifdef CONFIG_SPU_FS_64K_LS
-	struct spu_context	*ctx = file->private_data;
-	struct spu_state	*csa = &ctx->csa;
-
-	/* Sanity check VMA alignment */
-	if (csa->use_big_pages) {
-		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
-			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
-			 vma->vm_pgoff);
-		if (vma->vm_start & 0xffff)
-			return -EINVAL;
-		if (vma->vm_pgoff & 0xf)
-			return -EINVAL;
-	}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
@@ -336,25 +303,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_SPU_FS_64K_LS
-static unsigned long spufs_get_unmapped_area(struct file *file,
-		unsigned long addr, unsigned long len, unsigned long pgoff,
-		unsigned long flags)
-{
-	struct spu_context	*ctx = file->private_data;
-	struct spu_state	*csa = &ctx->csa;
-
-	/* If not using big pages, fallback to normal MM g_u_a */
-	if (!csa->use_big_pages)
-		return current->mm->get_unmapped_area(file, addr, len,
-						      pgoff, flags);
-
-	/* Else, try to obtain a 64K pages slice */
-	return slice_get_unmapped_area(addr, len, flags,
-				       MMU_PAGE_64K, 1);
-}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 static const struct file_operations spufs_mem_fops = {
 	.open			= spufs_mem_open,
 	.release		= spufs_mem_release,
@@ -362,9 +310,6 @@
 	.write			= spufs_mem_write,
 	.llseek			= generic_file_llseek,
 	.mmap			= spufs_mem_mmap,
-#ifdef CONFIG_SPU_FS_64K_LS
-	.get_unmapped_area	= spufs_get_unmapped_area,
-#endif
 };
 
 static int spufs_ps_fault(struct vm_area_struct *vma,
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index 1470699..b847e94 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -31,7 +31,7 @@
 
 #include "spufs.h"
 
-static int spu_alloc_lscsa_std(struct spu_state *csa)
+int spu_alloc_lscsa(struct spu_state *csa)
 {
 	struct spu_lscsa *lscsa;
 	unsigned char *p;
@@ -48,7 +48,7 @@
 	return 0;
 }
 
-static void spu_free_lscsa_std(struct spu_state *csa)
+void spu_free_lscsa(struct spu_state *csa)
 {
 	/* Clear reserved bit before vfree. */
 	unsigned char *p;
@@ -61,123 +61,3 @@
 
 	vfree(csa->lscsa);
 }
-
-#ifdef CONFIG_SPU_FS_64K_LS
-
-#define SPU_64K_PAGE_SHIFT	16
-#define SPU_64K_PAGE_ORDER	(SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
-#define SPU_64K_PAGE_COUNT	(1ul << SPU_64K_PAGE_ORDER)
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
-	struct page	**pgarray;
-	unsigned char	*p;
-	int		i, j, n_4k;
-
-	/* Check availability of 64K pages */
-	if (!spu_64k_pages_available())
-		goto fail;
-
-	csa->use_big_pages = 1;
-
-	pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
-		 csa);
-
-	/* First try to allocate our 64K pages. We need 5 of them
-	 * with the current implementation. In the future, we should try
-	 * to separate the lscsa with the actual local store image, thus
-	 * allowing us to require only 4 64K pages per context
-	 */
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
-		/* XXX This is likely to fail, we should use a special pool
-		 *     similar to what hugetlbfs does.
-		 */
-		csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
-						  SPU_64K_PAGE_ORDER);
-		if (csa->lscsa_pages[i] == NULL)
-			goto fail;
-	}
-
-	pr_debug(" success ! creating vmap...\n");
-
-	/* Now we need to create a vmalloc mapping of these for the kernel
-	 * and SPU context switch code to use. Currently, we stick to a
-	 * normal kernel vmalloc mapping, which in our case will be 4K
-	 */
-	n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
-	pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
-	if (pgarray == NULL)
-		goto fail;
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
-		for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
-			/* We assume all the struct page's are contiguous
-			 * which should be hopefully the case for an order 4
-			 * allocation..
-			 */
-			pgarray[i * SPU_64K_PAGE_COUNT + j] =
-				csa->lscsa_pages[i] + j;
-	csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
-	kfree(pgarray);
-	if (csa->lscsa == NULL)
-		goto fail;
-
-	memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
-
-	/* Set LS pages reserved to allow for user-space mapping.
-	 *
-	 * XXX isn't that a bit obsolete ? I think we should just
-	 * make sure the page count is high enough. Anyway, won't harm
-	 * for now
-	 */
-	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
-		SetPageReserved(vmalloc_to_page(p));
-
-	pr_debug(" all good !\n");
-
-	return 0;
-fail:
-	pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
-	spu_free_lscsa(csa);
-	return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
-	unsigned char *p;
-	int i;
-
-	if (!csa->use_big_pages) {
-		spu_free_lscsa_std(csa);
-		return;
-	}
-	csa->use_big_pages = 0;
-
-	if (csa->lscsa == NULL)
-		goto free_pages;
-
-	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
-		ClearPageReserved(vmalloc_to_page(p));
-
-	vunmap(csa->lscsa);
-	csa->lscsa = NULL;
-
- free_pages:
-
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
-		if (csa->lscsa_pages[i])
-			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
-}
-
-#else /* CONFIG_SPU_FS_64K_LS */
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
-	return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
-	spu_free_lscsa_std(csa);
-}
-
-#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 7cf0df8..3bb6acb 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1394,11 +1394,19 @@
 			 */
 			if (pnv_eeh_get_pe(hose,
 				be64_to_cpu(frozen_pe_no), pe)) {
-				/* Try best to clear it */
 				pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
-					hose->global_number, frozen_pe_no);
+					hose->global_number, be64_to_cpu(frozen_pe_no));
 				pr_info("EEH: PHB location: %s\n",
 					eeh_pe_loc_get(phb_pe));
+
+				/* Dump PHB diag-data */
+				rc = opal_pci_get_phb_diag_data2(phb->opal_id,
+					phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+				if (rc == OPAL_SUCCESS)
+					pnv_pci_dump_phb_diag_data(hose,
+							phb->diag.blob);
+
+				/* Try best to clear it */
 				opal_pci_eeh_freeze_clear(phb->opal_id,
 					frozen_pe_no,
 					OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index a8f49d3..d000f4e 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -35,9 +35,134 @@
 	struct list_head list;
 	struct OpalHMIEvent hmi_evt;
 };
+
+struct xstop_reason {
+	uint32_t xstop_reason;
+	const char *unit_failed;
+	const char *description;
+};
+
 static LIST_HEAD(opal_hmi_evt_list);
 static DEFINE_SPINLOCK(opal_hmi_evt_lock);
 
+static void print_core_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	int i;
+	static const struct xstop_reason xstop_reason[] = {
+		{ CORE_CHECKSTOP_IFU_REGFILE, "IFU",
+				"RegFile core check stop" },
+		{ CORE_CHECKSTOP_IFU_LOGIC, "IFU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_DURING_RECOV, "PC",
+				"Core checkstop during recovery" },
+		{ CORE_CHECKSTOP_ISU_REGFILE, "ISU",
+				"RegFile core check stop (mapper error)" },
+		{ CORE_CHECKSTOP_ISU_LOGIC, "ISU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_FXU_LOGIC, "FXU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_VSU_LOGIC, "VSU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE, "PC",
+				"Recovery in maintenance mode" },
+		{ CORE_CHECKSTOP_LSU_REGFILE, "LSU",
+				"RegFile core check stop" },
+		{ CORE_CHECKSTOP_PC_FWD_PROGRESS, "PC",
+				"Forward Progress Error" },
+		{ CORE_CHECKSTOP_LSU_LOGIC, "LSU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_LOGIC, "PC", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_HYP_RESOURCE, "PC",
+				"Hypervisor Resource error - core check stop" },
+		{ CORE_CHECKSTOP_PC_HANG_RECOV_FAILED, "PC",
+				"Hang Recovery Failed (core check stop)" },
+		{ CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED, "PC",
+				"Ambiguous Hang Detected (unknown source)" },
+		{ CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ, "PC",
+				"Debug Trigger Error inject" },
+		{ CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ, "PC",
+				"Hypervisor check stop via SPRC/SPRD" },
+	};
+
+	/* Validity check */
+	if (!hmi_evt->u.xstop_error.xstop_reason) {
+		printk("%s	Unknown Core check stop.\n", level);
+		return;
+	}
+
+	printk("%s	CPU PIR: %08x\n", level,
+			be32_to_cpu(hmi_evt->u.xstop_error.u.pir));
+	for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+		if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+					xstop_reason[i].xstop_reason)
+			printk("%s	[Unit: %-3s] %s\n", level,
+					xstop_reason[i].unit_failed,
+					xstop_reason[i].description);
+}
+
+static void print_nx_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	int i;
+	static const struct xstop_reason xstop_reason[] = {
+		{ NX_CHECKSTOP_SHM_INVAL_STATE_ERR, "DMA & Engine",
+					"SHM invalid state error" },
+		{ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1, "DMA & Engine",
+					"DMA invalid state error bit 15" },
+		{ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2, "DMA & Engine",
+					"DMA invalid state error bit 16" },
+		{ NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 0 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 1 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 2 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 3 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 4 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 5 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 6 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 7 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CRB_UE, "DMA & Engine",
+					"UE error on CRB(CSB address, CCB)" },
+		{ NX_CHECKSTOP_DMA_CRB_SUE, "DMA & Engine",
+					"SUE error on CRB(CSB address, CCB)" },
+		{ NX_CHECKSTOP_PBI_ISN_UE, "PowerBus Interface",
+		"CRB Kill ISN received while holding ISN with UE error" },
+	};
+
+	/* Validity check */
+	if (!hmi_evt->u.xstop_error.xstop_reason) {
+		printk("%s	Unknown NX check stop.\n", level);
+		return;
+	}
+
+	printk("%s	NX checkstop on CHIP ID: %x\n", level,
+			be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id));
+	for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+		if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+					xstop_reason[i].xstop_reason)
+			printk("%s	[Unit: %-3s] %s\n", level,
+					xstop_reason[i].unit_failed,
+					xstop_reason[i].description);
+}
+
+static void print_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	switch (hmi_evt->u.xstop_error.xstop_type) {
+	case CHECKSTOP_TYPE_CORE:
+		print_core_checkstop_reason(level, hmi_evt);
+		break;
+	case CHECKSTOP_TYPE_NX:
+		print_nx_checkstop_reason(level, hmi_evt);
+		break;
+	case CHECKSTOP_TYPE_UNKNOWN:
+		printk("%s	Unknown Malfunction Alert.\n", level);
+		break;
+	}
+}
+
 static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
 {
 	const char *level, *sevstr, *error_info;
@@ -95,6 +220,13 @@
 		(hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
 		printk("%s	TFMR: %016llx\n", level,
 						be64_to_cpu(hmi_evt->tfmr));
+
+	if (hmi_evt->version < OpalHMIEvt_V2)
+		return;
+
+	/* OpalHMIEvt_V2 and above provides reason for malfunction alert. */
+	if (hmi_evt->type == OpalHMI_ERROR_MALFUNC_ALERT)
+		print_checkstop_reason(level, hmi_evt);
 }
 
 static void hmi_event_handler(struct work_struct *work)
@@ -103,6 +235,8 @@
 	struct OpalHMIEvent *hmi_evt;
 	struct OpalHmiEvtNode *msg_node;
 	uint8_t disposition;
+	struct opal_msg msg;
+	int unrecoverable = 0;
 
 	spin_lock_irqsave(&opal_hmi_evt_lock, flags);
 	while (!list_empty(&opal_hmi_evt_list)) {
@@ -118,14 +252,53 @@
 
 		/*
 		 * Check if HMI event has been recovered or not. If not
-		 * then we can't continue, invoke panic.
+		 * then kernel can't continue, we need to panic.
+		 * But before we do that, display all the HMI event
+		 * available on the list and set unrecoverable flag to 1.
 		 */
 		if (disposition != OpalHMI_DISPOSITION_RECOVERED)
-			panic("Unrecoverable HMI exception");
+			unrecoverable = 1;
 
 		spin_lock_irqsave(&opal_hmi_evt_lock, flags);
 	}
 	spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
+
+	if (unrecoverable) {
+		int ret;
+
+		/* Pull all HMI events from OPAL before we panic. */
+		while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) {
+			u32 type;
+
+			type = be32_to_cpu(msg.msg_type);
+
+			/* skip if not HMI event */
+			if (type != OPAL_MSG_HMI_EVT)
+				continue;
+
+			/* HMI event info starts from param[0] */
+			hmi_evt = (struct OpalHMIEvent *)&msg.params[0];
+			print_hmi_event_info(hmi_evt);
+		}
+
+		/*
+		 * Unrecoverable HMI exception. We need to inform BMC/OCC
+		 * about this error so that it can collect relevant data
+		 * for error analysis before rebooting.
+		 */
+		ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+			"Unrecoverable HMI exception");
+		if (ret == OPAL_UNSUPPORTED) {
+			pr_emerg("Reboot type %d not supported\n",
+						OPAL_REBOOT_PLATFORM_ERROR);
+		}
+
+		/*
+		 * Fall through and panic if opal_cec_reboot2() returns
+		 * OPAL_UNSUPPORTED.
+		 */
+		panic("Unrecoverable HMI exception");
+	}
 }
 
 static DECLARE_WORK(hmi_event_work, hmi_event_handler);
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index ac46c2c..58dc330 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -9,9 +9,12 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt)	"opal-power: "	fmt
+
 #include <linux/kernel.h>
 #include <linux/reboot.h>
 #include <linux/notifier.h>
+#include <linux/of.h>
 
 #include <asm/opal.h>
 #include <asm/machdep.h>
@@ -19,30 +22,116 @@
 #define SOFT_OFF 0x00
 #define SOFT_REBOOT 0x01
 
-static int opal_power_control_event(struct notifier_block *nb,
-				    unsigned long msg_type, void *msg)
+/* Detect EPOW event */
+static bool detect_epow(void)
 {
-	struct opal_msg *power_msg = msg;
+	u16 epow;
+	int i, rc;
+	__be16 epow_classes;
+	__be16 opal_epow_status[OPAL_SYSEPOW_MAX] = {0};
+
+	/*
+	* Check for EPOW event. Kernel sends supported EPOW classes info
+	* to OPAL. OPAL returns EPOW info along with classes present.
+	*/
+	epow_classes = cpu_to_be16(OPAL_SYSEPOW_MAX);
+	rc = opal_get_epow_status(opal_epow_status, &epow_classes);
+	if (rc != OPAL_SUCCESS) {
+		pr_err("Failed to get EPOW event information\n");
+		return false;
+	}
+
+	/* Look for EPOW events present */
+	for (i = 0; i < be16_to_cpu(epow_classes); i++) {
+		epow = be16_to_cpu(opal_epow_status[i]);
+
+		/* Filter events which do not need shutdown. */
+		if (i == OPAL_SYSEPOW_POWER)
+			epow &= ~(OPAL_SYSPOWER_CHNG | OPAL_SYSPOWER_FAIL |
+					OPAL_SYSPOWER_INCL);
+		if (epow)
+			return true;
+	}
+
+	return false;
+}
+
+/* Check for existing EPOW, DPO events */
+static bool poweroff_pending(void)
+{
+	int rc;
+	__be64 opal_dpo_timeout;
+
+	/* Check for DPO event */
+	rc = opal_get_dpo_status(&opal_dpo_timeout);
+	if (rc == OPAL_SUCCESS) {
+		pr_info("Existing DPO event detected.\n");
+		return true;
+	}
+
+	/* Check for EPOW event */
+	if (detect_epow()) {
+		pr_info("Existing EPOW event detected.\n");
+		return true;
+	}
+
+	return false;
+}
+
+/* OPAL power-control events notifier */
+static int opal_power_control_event(struct notifier_block *nb,
+					unsigned long msg_type, void *msg)
+{
 	uint64_t type;
 
-	type = be64_to_cpu(power_msg->params[0]);
-
-	switch (type) {
-	case SOFT_REBOOT:
-		pr_info("OPAL: reboot requested\n");
-		orderly_reboot();
+	switch (msg_type) {
+	case OPAL_MSG_EPOW:
+		if (detect_epow()) {
+			pr_info("EPOW msg received. Powering off system\n");
+			orderly_poweroff(true);
+		}
 		break;
-	case SOFT_OFF:
-		pr_info("OPAL: poweroff requested\n");
+	case OPAL_MSG_DPO:
+		pr_info("DPO msg received. Powering off system\n");
 		orderly_poweroff(true);
 		break;
+	case OPAL_MSG_SHUTDOWN:
+		type = be64_to_cpu(((struct opal_msg *)msg)->params[0]);
+		switch (type) {
+		case SOFT_REBOOT:
+			pr_info("Reboot requested\n");
+			orderly_reboot();
+			break;
+		case SOFT_OFF:
+			pr_info("Poweroff requested\n");
+			orderly_poweroff(true);
+			break;
+		default:
+			pr_err("Unknown power-control type %llu\n", type);
+		}
+		break;
 	default:
-		pr_err("OPAL: power control type unexpected %016llx\n", type);
+		pr_err("Unknown OPAL message type %lu\n", msg_type);
 	}
 
 	return 0;
 }
 
+/* OPAL EPOW event notifier block */
+static struct notifier_block opal_epow_nb = {
+	.notifier_call	= opal_power_control_event,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+/* OPAL DPO event notifier block */
+static struct notifier_block opal_dpo_nb = {
+	.notifier_call	= opal_power_control_event,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+/* OPAL power-control event notifier block */
 static struct notifier_block opal_power_control_nb = {
 	.notifier_call	= opal_power_control_event,
 	.next		= NULL,
@@ -51,16 +140,40 @@
 
 static int __init opal_power_control_init(void)
 {
-	int ret;
+	int ret, supported = 0;
+	struct device_node *np;
 
+	/* Register OPAL power-control events notifier */
 	ret = opal_message_notifier_register(OPAL_MSG_SHUTDOWN,
-					     &opal_power_control_nb);
-	if (ret) {
-		pr_err("%s: Can't register OPAL event notifier (%d)\n",
-				__func__, ret);
-		return ret;
+						&opal_power_control_nb);
+	if (ret)
+		pr_err("Failed to register SHUTDOWN notifier, ret = %d\n", ret);
+
+	/* Determine OPAL EPOW, DPO support */
+	np = of_find_node_by_path("/ibm,opal/epow");
+	if (np) {
+		supported = of_device_is_compatible(np, "ibm,opal-v3-epow");
+		of_node_put(np);
 	}
 
+	if (!supported)
+		return 0;
+	pr_info("OPAL EPOW, DPO support detected.\n");
+
+	/* Register EPOW event notifier */
+	ret = opal_message_notifier_register(OPAL_MSG_EPOW, &opal_epow_nb);
+	if (ret)
+		pr_err("Failed to register EPOW notifier, ret = %d\n", ret);
+
+	/* Register DPO event notifier */
+	ret = opal_message_notifier_register(OPAL_MSG_DPO, &opal_dpo_nb);
+	if (ret)
+		pr_err("Failed to register DPO notifier, ret = %d\n", ret);
+
+	/* Check for any pending EPOW or DPO events. */
+	if (poweroff_pending())
+		orderly_poweroff(true);
+
 	return 0;
 }
 machine_subsys_initcall(powernv, opal_power_control_init);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index d6a7b82..b7a464f 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -202,6 +202,7 @@
 OPAL_CALL(opal_rtc_write,			OPAL_RTC_WRITE);
 OPAL_CALL(opal_cec_power_down,			OPAL_CEC_POWER_DOWN);
 OPAL_CALL(opal_cec_reboot,			OPAL_CEC_REBOOT);
+OPAL_CALL(opal_cec_reboot2,			OPAL_CEC_REBOOT2);
 OPAL_CALL(opal_read_nvram,			OPAL_READ_NVRAM);
 OPAL_CALL(opal_write_nvram,			OPAL_WRITE_NVRAM);
 OPAL_CALL(opal_handle_interrupt,		OPAL_HANDLE_INTERRUPT);
@@ -249,6 +250,7 @@
 OPAL_CALL(opal_pci_mask_pe_error,		OPAL_PCI_MASK_PE_ERROR);
 OPAL_CALL(opal_set_slot_led_status,		OPAL_SET_SLOT_LED_STATUS);
 OPAL_CALL(opal_get_epow_status,			OPAL_GET_EPOW_STATUS);
+OPAL_CALL(opal_get_dpo_status,			OPAL_GET_DPO_STATUS);
 OPAL_CALL(opal_set_system_attention_led,	OPAL_SET_SYSTEM_ATTENTION_LED);
 OPAL_CALL(opal_pci_next_error,			OPAL_PCI_NEXT_ERROR);
 OPAL_CALL(opal_pci_poll,			OPAL_PCI_POLL);
@@ -297,3 +299,5 @@
 OPAL_CALL(opal_flash_write,			OPAL_FLASH_WRITE);
 OPAL_CALL(opal_flash_erase,			OPAL_FLASH_ERASE);
 OPAL_CALL(opal_prd_msg,				OPAL_PRD_MSG);
+OPAL_CALL(opal_leds_get_ind,			OPAL_LEDS_GET_INDICATOR);
+OPAL_CALL(opal_leds_set_ind,			OPAL_LEDS_SET_INDICATOR);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index f084afa..230f3a7 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -441,6 +441,7 @@
 int opal_machine_check(struct pt_regs *regs)
 {
 	struct machine_check_event evt;
+	int ret;
 
 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
 		return 0;
@@ -455,6 +456,40 @@
 
 	if (opal_recover_mce(regs, &evt))
 		return 1;
+
+	/*
+	 * Unrecovered machine check, we are heading to panic path.
+	 *
+	 * We may have hit this MCE in very early stage of kernel
+	 * initialization even before opal-prd has started running. If
+	 * this is the case then this MCE error may go un-noticed or
+	 * un-analyzed if we go down panic path. We need to inform
+	 * BMC/OCC about this error so that they can collect relevant
+	 * data for error analysis before rebooting.
+	 * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so.
+	 * This function may not return on BMC based system.
+	 */
+	ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+			"Unrecoverable Machine Check exception");
+	if (ret == OPAL_UNSUPPORTED) {
+		pr_emerg("Reboot type %d not supported\n",
+					OPAL_REBOOT_PLATFORM_ERROR);
+	}
+
+	/*
+	 * We reached here. There can be three possibilities:
+	 * 1. We are running on a firmware level that do not support
+	 *    opal_cec_reboot2()
+	 * 2. We are running on a firmware level that do not support
+	 *    OPAL_REBOOT_PLATFORM_ERROR reboot type.
+	 * 3. We are running on FSP based system that does not need opal
+	 *    to trigger checkstop explicitly for error analysis. The FSP
+	 *    PRD component would have already got notified about this
+	 *    error through other channels.
+	 *
+	 * In any case, let us just fall through. We anyway heading
+	 * down to panic path.
+	 */
 	return 0;
 }
 
@@ -648,7 +683,7 @@
 
 static int __init opal_init(void)
 {
-	struct device_node *np, *consoles;
+	struct device_node *np, *consoles, *leds;
 	int rc;
 
 	opal_node = of_find_node_by_path("/ibm,opal");
@@ -689,6 +724,13 @@
 	/* Setup a heatbeat thread if requested by OPAL */
 	opal_init_heartbeat();
 
+	/* Create leds platform devices */
+	leds = of_find_node_by_path("/ibm,opal/leds");
+	if (leds) {
+		of_platform_device_create(leds, "opal_leds", NULL);
+		of_node_put(leds);
+	}
+
 	/* Create "opal" kobject under /sys/firmware */
 	rc = opal_sysfs_init();
 	if (rc == 0) {
@@ -841,3 +883,6 @@
 EXPORT_SYMBOL_GPL(opal_tpo_read);
 EXPORT_SYMBOL_GPL(opal_tpo_write);
 EXPORT_SYMBOL_GPL(opal_i2c_request);
+/* Export these symbols for PowerNV LED class driver */
+EXPORT_SYMBOL_GPL(opal_leds_get_ind);
+EXPORT_SYMBOL_GPL(opal_leds_set_ind);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 85cbc96..2927cd5 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -140,11 +140,9 @@
 		return;
 	}
 
-	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
-		pr_warn("%s: PE %d was assigned on PHB#%x\n",
-			__func__, pe_no, phb->hose->global_number);
-		return;
-	}
+	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
+		pr_debug("%s: PE %d was reserved on PHB#%x\n",
+			 __func__, pe_no, phb->hose->global_number);
 
 	phb->ioda.pe_array[pe_no].phb = phb;
 	phb->ioda.pe_array[pe_no].pe_number = pe_no;
@@ -231,61 +229,60 @@
 	return -EIO;
 }
 
-static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
+static void pnv_ioda2_reserve_dev_m64_pe(struct pci_dev *pdev,
+					 unsigned long *pe_bitmap)
 {
-	resource_size_t sgsz = phb->ioda.m64_segsize;
-	struct pci_dev *pdev;
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct resource *r;
-	int base, step, i;
+	resource_size_t base, sgsz, start, end;
+	int segno, i;
 
-	/*
-	 * Root bus always has full M64 range and root port has
-	 * M64 range used in reality. So we're checking root port
-	 * instead of root bus.
-	 */
-	list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
-		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
-			r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
-			if (!r->parent ||
-			    !pnv_pci_is_mem_pref_64(r->flags))
-				continue;
+	base = phb->ioda.m64_base;
+	sgsz = phb->ioda.m64_segsize;
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		r = &pdev->resource[i];
+		if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+			continue;
 
-			base = (r->start - phb->ioda.m64_base) / sgsz;
-			for (step = 0; step < resource_size(r) / sgsz; step++)
-				pnv_ioda_reserve_pe(phb, base + step);
+		start = _ALIGN_DOWN(r->start - base, sgsz);
+		end = _ALIGN_UP(r->end - base, sgsz);
+		for (segno = start / sgsz; segno < end / sgsz; segno++) {
+			if (pe_bitmap)
+				set_bit(segno, pe_bitmap);
+			else
+				pnv_ioda_reserve_pe(phb, segno);
 		}
 	}
 }
 
-static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
-				 struct pci_bus *bus, int all)
+static void pnv_ioda2_reserve_m64_pe(struct pci_bus *bus,
+				     unsigned long *pe_bitmap,
+				     bool all)
 {
-	resource_size_t segsz = phb->ioda.m64_segsize;
 	struct pci_dev *pdev;
-	struct resource *r;
+
+	list_for_each_entry(pdev, &bus->devices, bus_list) {
+		pnv_ioda2_reserve_dev_m64_pe(pdev, pe_bitmap);
+
+		if (all && pdev->subordinate)
+			pnv_ioda2_reserve_m64_pe(pdev->subordinate,
+						 pe_bitmap, all);
+	}
+}
+
+static int pnv_ioda2_pick_m64_pe(struct pci_bus *bus, bool all)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pnv_ioda_pe *master_pe, *pe;
 	unsigned long size, *pe_alloc;
-	bool found;
-	int start, i, j;
+	int i;
 
 	/* Root bus shouldn't use M64 */
 	if (pci_is_root_bus(bus))
 		return IODA_INVALID_PE;
 
-	/* We support only one M64 window on each bus */
-	found = false;
-	pci_bus_for_each_resource(bus, r, i) {
-		if (r && r->parent &&
-		    pnv_pci_is_mem_pref_64(r->flags)) {
-			found = true;
-			break;
-		}
-	}
-
-	/* No M64 window found ? */
-	if (!found)
-		return IODA_INVALID_PE;
-
 	/* Allocate bitmap */
 	size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
 	pe_alloc = kzalloc(size, GFP_KERNEL);
@@ -295,35 +292,8 @@
 		return IODA_INVALID_PE;
 	}
 
-	/*
-	 * Figure out reserved PE numbers by the PE
-	 * the its child PEs.
-	 */
-	start = (r->start - phb->ioda.m64_base) / segsz;
-	for (i = 0; i < resource_size(r) / segsz; i++)
-		set_bit(start + i, pe_alloc);
-
-	if (all)
-		goto done;
-
-	/*
-	 * If the PE doesn't cover all subordinate buses,
-	 * we need subtract from reserved PEs for children.
-	 */
-	list_for_each_entry(pdev, &bus->devices, bus_list) {
-		if (!pdev->subordinate)
-			continue;
-
-		pci_bus_for_each_resource(pdev->subordinate, r, i) {
-			if (!r || !r->parent ||
-			    !pnv_pci_is_mem_pref_64(r->flags))
-				continue;
-
-			start = (r->start - phb->ioda.m64_base) / segsz;
-			for (j = 0; j < resource_size(r) / segsz ; j++)
-				clear_bit(start + j, pe_alloc);
-                }
-        }
+	/* Figure out reserved PE numbers by the PE */
+	pnv_ioda2_reserve_m64_pe(bus, pe_alloc, all);
 
 	/*
 	 * the current bus might not own M64 window and that's all
@@ -339,7 +309,6 @@
 	 * Figure out the master PE and put all slave PEs to master
 	 * PE's list to form compound PE.
 	 */
-done:
 	master_pe = NULL;
 	i = -1;
 	while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
@@ -653,7 +622,7 @@
 		pdev = pe->pdev->bus->self;
 #ifdef CONFIG_PCI_IOV
 	else if (pe->flags & PNV_IODA_PE_VF)
-		pdev = pe->parent_dev->bus->self;
+		pdev = pe->parent_dev;
 #endif /* CONFIG_PCI_IOV */
 	while (pdev) {
 		struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -732,7 +701,7 @@
 		parent = parent->bus->self;
 	}
 
-	opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
 				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 
 	/* Disassociate PE in PELT */
@@ -946,8 +915,9 @@
 		res2 = *res;
 		res->start += size * offset;
 
-		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
-			 i, &res2, res, num_vfs, offset);
+		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
+			 i, &res2, res, (offset > 0) ? "En" : "Dis",
+			 num_vfs, offset);
 		pci_update_resource(dev, i + PCI_IOV_RESOURCES);
 	}
 	return 0;
@@ -1050,7 +1020,7 @@
  * subordinate PCI devices and buses. The second type of PE is normally
  * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
  */
-static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
+static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
 {
 	struct pci_controller *hose = pci_bus_to_host(bus);
 	struct pnv_phb *phb = hose->private_data;
@@ -1059,7 +1029,7 @@
 
 	/* Check if PE is determined by M64 */
 	if (phb->pick_m64_pe)
-		pe_num = phb->pick_m64_pe(phb, bus, all);
+		pe_num = phb->pick_m64_pe(bus, all);
 
 	/* The PE number isn't pinned by M64 */
 	if (pe_num == IODA_INVALID_PE)
@@ -1117,12 +1087,12 @@
 {
 	struct pci_dev *dev;
 
-	pnv_ioda_setup_bus_PE(bus, 0);
+	pnv_ioda_setup_bus_PE(bus, false);
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		if (dev->subordinate) {
 			if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
-				pnv_ioda_setup_bus_PE(dev->subordinate, 1);
+				pnv_ioda_setup_bus_PE(dev->subordinate, true);
 			else
 				pnv_ioda_setup_PEs(dev->subordinate);
 		}
@@ -1147,7 +1117,7 @@
 
 		/* M64 layout might affect PE allocation */
 		if (phb->reserve_m64_pe)
-			phb->reserve_m64_pe(phb);
+			phb->reserve_m64_pe(hose->bus, NULL, true);
 
 		pnv_ioda_setup_PEs(hose->bus);
 	}
@@ -1590,6 +1560,7 @@
 
 	pe = &phb->ioda.pe_array[pdn->pe_number];
 	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
+	set_dma_offset(&pdev->dev, pe->tce_bypass_base);
 	set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
 	/*
 	 * Note: iommu_add_device() will fail here as
@@ -1620,19 +1591,18 @@
 	if (bypass) {
 		dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
 		set_dma_ops(&pdev->dev, &dma_direct_ops);
-		set_dma_offset(&pdev->dev, pe->tce_bypass_base);
 	} else {
 		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
 		set_dma_ops(&pdev->dev, &dma_iommu_ops);
-		set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
 	}
 	*pdev->dev.dma_mask = dma_mask;
 	return 0;
 }
 
-static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
-					      struct pci_dev *pdev)
+static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
 {
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pci_dn *pdn = pci_get_pdn(pdev);
 	struct pnv_ioda_pe *pe;
 	u64 end, mask;
@@ -1659,6 +1629,7 @@
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
+		set_dma_offset(&dev->dev, pe->tce_bypass_base);
 		iommu_add_device(&dev->dev);
 
 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -3057,6 +3028,7 @@
        .window_alignment = pnv_pci_window_alignment,
        .reset_secondary_bus = pnv_pci_reset_secondary_bus,
        .dma_set_mask = pnv_pci_ioda_dma_set_mask,
+       .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
        .shutdown = pnv_pci_ioda_shutdown,
 };
 
@@ -3203,7 +3175,6 @@
 
 	/* Setup TCEs */
 	phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
-	phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
 
 	/* Setup MSI support */
 	pnv_pci_init_ioda_msis(phb);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index bc6d4e0..9b2480b 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -761,17 +761,6 @@
 		phb->dma_dev_setup(phb, pdev);
 }
 
-u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
-	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
-	struct pnv_phb *phb = hose->private_data;
-
-	if (phb && phb->dma_get_required_mask)
-		return phb->dma_get_required_mask(phb, pdev);
-
-	return __dma_get_required_mask(&pdev->dev);
-}
-
 void pnv_pci_shutdown(void)
 {
 	struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8ef2d28..c8ff50e 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -105,13 +105,12 @@
 			 unsigned int hwirq, unsigned int virq,
 			 unsigned int is_64, struct msi_msg *msg);
 	void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
-	u64 (*dma_get_required_mask)(struct pnv_phb *phb,
-				     struct pci_dev *pdev);
 	void (*fixup_phb)(struct pci_controller *hose);
 	u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
 	int (*init_m64)(struct pnv_phb *phb);
-	void (*reserve_m64_pe)(struct pnv_phb *phb);
-	int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
+	void (*reserve_m64_pe)(struct pci_bus *bus,
+			       unsigned long *pe_bitmap, bool all);
+	int (*pick_m64_pe)(struct pci_bus *bus, bool all);
 	int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
 	void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
 	int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 9269e30..6dbc0a1 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -12,15 +12,9 @@
 #ifdef CONFIG_PCI
 extern void pnv_pci_init(void);
 extern void pnv_pci_shutdown(void);
-extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev);
 #else
 static inline void pnv_pci_init(void) { }
 static inline void pnv_pci_shutdown(void) { }
-
-static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
-	return 0;
-}
 #endif
 
 extern u32 pnv_get_supported_cpuidle_states(void);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 6eb808f..5dcbdea 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -128,7 +128,7 @@
 
 	pr_info_once("Registering arch random hook.\n");
 
-	ppc_md.get_random_long = powernv_get_random_long;
+	ppc_md.get_random_seed = powernv_get_random_long;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 53737e0..685b3cb 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -165,14 +165,6 @@
 {
 }
 
-static u64 pnv_dma_get_required_mask(struct device *dev)
-{
-	if (dev_is_pci(dev))
-		return pnv_pci_dma_get_required_mask(to_pci_dev(dev));
-
-	return __dma_get_required_mask(dev);
-}
-
 static void pnv_shutdown(void)
 {
 	/* Let the PCI code clear up IODA tables */
@@ -243,6 +235,13 @@
 	} else {
 		/* Primary waits for the secondaries to have reached OPAL */
 		pnv_kexec_wait_secondaries_down();
+
+		/*
+		 * We might be running as little-endian - now that interrupts
+		 * are disabled, reset the HILE bit to big-endian so we don't
+		 * take interrupts in the wrong endian later
+		 */
+		opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
 	}
 }
 #endif /* CONFIG_KEXEC */
@@ -314,7 +313,6 @@
 	.machine_shutdown	= pnv_shutdown,
 	.power_save             = power7_idle,
 	.calibrate_decr		= generic_calibrate_decr,
-	.dma_get_required_mask	= pnv_dma_get_required_mask,
 #ifdef CONFIG_KEXEC
 	.kexec_cpu_down		= pnv_kexec_cpu_down,
 #endif
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index f60f80a..503a73f 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -190,7 +190,7 @@
 
 	hid0 = mfspr(SPRN_HID0);
 	hid0 &= ~HID0_POWER8_DYNLPARDIS;
-	mtspr(SPRN_HID0, hid0);
+	update_power8_hid0(hid0);
 	update_hid_in_slw(hid0);
 
 	while (mfspr(SPRN_HID0) & mask)
@@ -227,7 +227,7 @@
 	/* Write new mode */
 	hid0  = mfspr(SPRN_HID0);
 	hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
-	mtspr(SPRN_HID0, hid0);
+	update_power8_hid0(hid0);
 	update_hid_in_slw(hid0);
 
 	/* Wait for it to happen */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 0ced387..e9ff44c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -92,13 +92,12 @@
 		return NULL;
 
 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
-	new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+	new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
 	if (!new_prop->name || !new_prop->value) {
 		dlpar_free_drconf_property(new_prop);
 		return NULL;
 	}
 
-	memcpy(new_prop->value, prop->value, prop->length);
 	new_prop->length = prop->length;
 
 	/* Convert the property to cpu endian-ness */
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 10510de..0946b98 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1253,11 +1253,10 @@
 		}
 	}
 
-	/* fall back on iommu ops, restore table pointer with ops */
+	/* fall back on iommu ops */
 	if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
 		dev_info(dev, "Restoring 32-bit DMA via iommu\n");
 		set_dma_ops(dev, &dma_iommu_ops);
-		pci_dma_dev_setup_pSeriesLP(pdev);
 	}
 
 check_mask:
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 02e4a17..3b6647e 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -189,7 +189,8 @@
 	int state;
 	int critical;
 
-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
+	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+				      &state);
 
 	if (state > 3)
 		critical = 1;		/* Time Critical */
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index e096087..31ca557 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -38,7 +38,7 @@
 
 	pr_info("Registering arch random hook.\n");
 
-	ppc_md.get_random_long = pseries_get_random_long;
+	ppc_md.get_random_seed = pseries_get_random_long;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index df6a704..39a74fa 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -254,19 +254,26 @@
 static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
 {
 	struct of_reconfig_data *rd = data;
-	struct device_node *np = rd->dn;
-	struct pci_dn *pci = NULL;
+	struct device_node *parent, *np = rd->dn;
+	struct pci_dn *pdn;
 	int err = NOTIFY_OK;
 
 	switch (action) {
 	case OF_RECONFIG_ATTACH_NODE:
-		pci = np->parent->data;
-		if (pci) {
-			update_dn_pci_info(np, pci->phb);
-
-			/* Create EEH device for the OF node */
-			eeh_dev_init(PCI_DN(np), pci->phb);
+		parent = of_get_parent(np);
+		pdn = parent ? PCI_DN(parent) : NULL;
+		if (pdn) {
+			/* Create pdn and EEH device */
+			update_dn_pci_info(np, pdn->phb);
+			eeh_dev_init(PCI_DN(np), pdn->phb);
 		}
+
+		of_node_put(parent);
+		break;
+	case OF_RECONFIG_DETACH_NODE:
+		pdn = PCI_DN(np);
+		if (pdn)
+			list_del(&pdn->list);
 		break;
 	default:
 		err = NOTIFY_DONE;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 4f78695..e2ea519 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -147,7 +147,7 @@
 	spin_lock_irqsave(&cpm_muram_lock, flags);
 	cpm_muram_info.alignment = align;
 	start = rh_alloc(&cpm_muram_info, size, "commproc");
-	memset(cpm_muram_addr(start), 0, size);
+	memset_io(cpm_muram_addr(start), 0, size);
 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
 
 	return start;
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 90bcdfe..b734863 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -313,20 +313,11 @@
 	set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
 }
 
-static void dma_dev_setup_dart(struct device *dev)
-{
-	/* We only have one iommu table on the mac for now, which makes
-	 * things simple. Setup all PCI devices to point to this table
-	 */
-	if (get_dma_ops(dev) == &dma_direct_ops)
-		set_dma_offset(dev, DART_U4_BYPASS_BASE);
-	else
-		set_iommu_table_base(dev, &iommu_table_dart);
-}
-
 static void pci_dma_dev_setup_dart(struct pci_dev *dev)
 {
-	dma_dev_setup_dart(&dev->dev);
+	if (dart_is_u4)
+		set_dma_offset(&dev->dev, DART_U4_BYPASS_BASE);
+	set_iommu_table_base(&dev->dev, &iommu_table_dart);
 }
 
 static void pci_dma_bus_setup_dart(struct pci_bus *bus)
@@ -370,7 +361,6 @@
 		dev_info(dev, "Using 32-bit DMA via iommu\n");
 		set_dma_ops(dev, &dma_iommu_ops);
 	}
-	dma_dev_setup_dart(dev);
 
 	*dev->dma_mask = dma_mask;
 	return 0;
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index af3c144..52a93dc 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -132,7 +132,7 @@
 	struct pci_controller *phb;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (IS_ERR(mem)) {
+	if (!mem) {
 		dev_err(dev, "Unable to get mmio space\n");
 		return -EINVAL;
 	}
@@ -157,7 +157,7 @@
 		goto out;
 
 	ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL);
-	if (IS_ERR(ppc4xx_hsta_msi.irq_map)) {
+	if (!ppc4xx_hsta_msi.irq_map) {
 		ret = -ENOMEM;
 		goto out1;
 	}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index e599259..6ef1231 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1987,7 +1987,6 @@
 			case '^':
 				adrs -= size;
 				break;
-				break;
 			case '/':
 				if (nslash > 0)
 					adrs -= 1 << nslash;
@@ -2731,7 +2730,7 @@
 void dump_segments(void)
 {
 	int i;
-	unsigned long esid,vsid,valid;
+	unsigned long esid,vsid;
 	unsigned long llp;
 
 	printf("SLB contents of cpu 0x%x\n", smp_processor_id());
@@ -2739,10 +2738,9 @@
 	for (i = 0; i < mmu_slb_size; i++) {
 		asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
 		asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
-		valid = (esid & SLB_ESID_V);
-		if (valid | esid | vsid) {
+		if (esid || vsid) {
 			printf("%02d %016lx %016lx", i, esid, vsid);
-			if (valid) {
+			if (esid & SLB_ESID_V) {
 				llp = vsid & SLB_VSID_LLP;
 				if (vsid & SLB_VSID_B_1T) {
 					printf("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx \n",
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index adbe380..117fa5c 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -27,6 +27,7 @@
 #define __ATOMIC_OR	"lao"
 #define __ATOMIC_AND	"lan"
 #define __ATOMIC_ADD	"laa"
+#define __ATOMIC_XOR	"lax"
 #define __ATOMIC_BARRIER "bcr	14,0\n"
 
 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -49,6 +50,7 @@
 #define __ATOMIC_OR	"or"
 #define __ATOMIC_AND	"nr"
 #define __ATOMIC_ADD	"ar"
+#define __ATOMIC_XOR	"xr"
 #define __ATOMIC_BARRIER "\n"
 
 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -118,15 +120,17 @@
 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
+#define ATOMIC_OP(op, OP)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);	\
 }
 
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
-}
+ATOMIC_OP(and, AND)
+ATOMIC_OP(or, OR)
+ATOMIC_OP(xor, XOR)
+
+#undef ATOMIC_OP
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
@@ -167,6 +171,7 @@
 #define __ATOMIC64_OR	"laog"
 #define __ATOMIC64_AND	"lang"
 #define __ATOMIC64_ADD	"laag"
+#define __ATOMIC64_XOR	"laxg"
 #define __ATOMIC64_BARRIER "bcr	14,0\n"
 
 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -189,6 +194,7 @@
 #define __ATOMIC64_OR	"ogr"
 #define __ATOMIC64_AND	"ngr"
 #define __ATOMIC64_ADD	"agr"
+#define __ATOMIC64_XOR	"xgr"
 #define __ATOMIC64_BARRIER "\n"
 
 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -247,16 +253,6 @@
 	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
 }
 
-static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
-{
-	__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
-}
-
-static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
-{
-	__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
-}
-
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
 static inline long long atomic64_cmpxchg(atomic64_t *v,
@@ -270,6 +266,17 @@
 	return old;
 }
 
+#define ATOMIC64_OP(op, OP)						\
+static inline void atomic64_##op(long i, atomic64_t *v)			\
+{									\
+	__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);	\
+}
+
+ATOMIC64_OP(and, AND)
+ATOMIC64_OP(or, OR)
+ATOMIC64_OP(xor, XOR)
+
+#undef ATOMIC64_OP
 #undef __ATOMIC64_LOOP
 
 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index e6f8615..d48fe01 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -42,12 +42,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 69972b7..7f9fd5e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -12,14 +12,29 @@
  * We use a brcl 0,2 instruction for jump labels at compile time so it
  * can be easily distinguished from a hotpatch generated instruction.
  */
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("0:	brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
 		".pushsection __jump_table, \"aw\"\n"
 		".balign 8\n"
 		".quad 0b, %l[label], %0\n"
 		".popsection\n"
-		: : "X" (key) : : label);
+		: : "X" (&((char *)key)[branch]) : : label);
+
+	return false;
+label:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("0:	brcl 15, %l[label]\n"
+		".pushsection __jump_table, \"aw\"\n"
+		".balign 8\n"
+		".quad 0b, %l[label], %0\n"
+		".popsection\n"
+		: : "X" (&((char *)key)[branch]) : : label);
+
 	return false;
 label:
 	return true;
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index c9dac21..083b05f 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -61,7 +61,7 @@
 {
 	struct insn old, new;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		jump_label_make_nop(entry, &old);
 		jump_label_make_branch(entry, &new);
 	} else {
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 52524b9..017c3a9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -378,7 +378,7 @@
 	 * increase the "sequence" counter to avoid the race of an
 	 * etr event and the complete recovery against get_sync_clock.
 	 */
-	atomic_clear_mask(0x80000000, sw_ptr);
+	atomic_andnot(0x80000000, sw_ptr);
 	atomic_inc(sw_ptr);
 }
 
@@ -389,7 +389,7 @@
 static void enable_sync_clock(void)
 {
 	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
-	atomic_set_mask(0x80000000, sw_ptr);
+	atomic_or(0x80000000, sw_ptr);
 }
 
 /*
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b277d50..5c2c169 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -173,20 +173,20 @@
 
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 	set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 	clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
-			  &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+		    &vcpu->arch.sie_block->cpuflags);
 	vcpu->arch.sie_block->lctl = 0x0000;
 	vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
 
@@ -199,7 +199,7 @@
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
 {
-	atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
 }
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -928,7 +928,7 @@
 	spin_unlock(&li->lock);
 
 	/* clear pending external calls set by sigp interpretation facility */
-	atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
+	atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
 	vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
 }
 
@@ -1026,7 +1026,7 @@
 
 	li->irq.ext = irq->u.ext;
 	set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1041,7 +1041,7 @@
 		/* another external call is pending */
 		return -EBUSY;
 	}
-	atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
 	return 0;
 }
 
@@ -1067,7 +1067,7 @@
 	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
 		return -EBUSY;
 	*extcall = irq->u.extcall;
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1139,7 +1139,7 @@
 
 	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
 	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1183,7 +1183,7 @@
 				   0, 0);
 
 	set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1196,7 +1196,7 @@
 				   0, 0);
 
 	set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1375,13 +1375,13 @@
 	spin_lock(&li->lock);
 	switch (type) {
 	case KVM_S390_MCHK:
-		atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+		atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
 		break;
 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-		atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+		atomic_or(CPUSTAT_IO_INT, li->cpuflags);
 		break;
 	default:
-		atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+		atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 		break;
 	}
 	spin_unlock(&li->lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 98df53c..c91eb94 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1333,12 +1333,12 @@
 	save_access_regs(vcpu->arch.host_acrs);
 	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.gmap);
-	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	gmap_disable(vcpu->arch.gmap);
 
 	save_fpu_regs();
@@ -1443,9 +1443,9 @@
 						    CPUSTAT_STOPPED);
 
 	if (test_kvm_facility(vcpu->kvm, 78))
-		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
 	else if (test_kvm_facility(vcpu->kvm, 8))
-		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
 
 	kvm_s390_vcpu_setup_model(vcpu);
 
@@ -1557,24 +1557,24 @@
 
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 	exit_sie(vcpu);
 }
 
 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 }
 
 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 	exit_sie(vcpu);
 }
 
 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 }
 
 /*
@@ -1583,7 +1583,7 @@
  * return immediately. */
 void exit_sie(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
 		cpu_relax();
 }
@@ -1807,19 +1807,19 @@
 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
 		vcpu->guest_debug = dbg->control;
 		/* enforce guest PER */
-		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 
 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
 			rc = kvm_s390_import_bp_data(vcpu, dbg);
 	} else {
-		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 		vcpu->arch.guestdbg.last_bp = 0;
 	}
 
 	if (rc) {
 		vcpu->guest_debug = 0;
 		kvm_s390_clear_bp_data(vcpu);
-		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 	}
 
 	return rc;
@@ -1894,7 +1894,7 @@
 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
 		if (!ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
-			atomic_set_mask(CPUSTAT_IBS,
+			atomic_or(CPUSTAT_IBS,
 					&vcpu->arch.sie_block->cpuflags);
 		}
 		goto retry;
@@ -1903,7 +1903,7 @@
 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
 		if (ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
-			atomic_clear_mask(CPUSTAT_IBS,
+			atomic_andnot(CPUSTAT_IBS,
 					  &vcpu->arch.sie_block->cpuflags);
 		}
 		goto retry;
@@ -2419,7 +2419,7 @@
 		__disable_ibs_on_all_vcpus(vcpu->kvm);
 	}
 
-	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	/*
 	 * Another VCPU might have used IBS while we were offline.
 	 * Let's play safe and flush the VCPU at startup.
@@ -2445,7 +2445,7 @@
 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
 	kvm_s390_clear_stop_irq(vcpu);
 
-	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	__disable_ibs_on_vcpu(vcpu);
 
 	for (i = 0; i < online_vcpus; i++) {
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 0d002a7..ae4de55 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,7 +15,7 @@
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
 
-static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+static DEFINE_STATIC_KEY_FALSE(have_mvcos);
 
 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
 						 unsigned long size)
@@ -104,7 +104,7 @@
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_from_user_mvcos(to, from, n);
 	return copy_from_user_mvcp(to, from, n);
 }
@@ -177,7 +177,7 @@
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_to_user_mvcos(to, from, n);
 	return copy_to_user_mvcs(to, from, n);
 }
@@ -240,7 +240,7 @@
 
 unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_in_user_mvcos(to, from, n);
 	return copy_in_user_mvc(to, from, n);
 }
@@ -312,7 +312,7 @@
 
 unsigned long __clear_user(void __user *to, unsigned long size)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 			return clear_user_mvcos(to, size);
 	return clear_user_xc(to, size);
 }
@@ -373,7 +373,7 @@
 static int __init uaccess_init(void)
 {
 	if (test_facility(27))
-		static_key_slow_inc(&have_mvcos);
+		static_branch_enable(&have_mvcos);
 	return 0;
 }
 early_initcall(uaccess_init);
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f6498ee..f010c93 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -36,6 +36,8 @@
  *	      |   BPF stack   |     |
  *	      |		      |     |
  *	      +---------------+     |
+ *	      | 8 byte skbp   |     |
+ * R15+170 -> +---------------+     |
  *	      | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *	      | 4 byte align  |     |
@@ -51,11 +53,12 @@
  * We get 160 bytes stack space from calling function, but only use
  * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
  */
-#define STK_SPACE	(MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_SPACE	(MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
 #define STK_160_UNUSED	(160 - 12 * 8)
 #define STK_OFF		(STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP	160	/* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN	168	/* Offset of SKB header length on stack */
+#define STK_OFF_SKBP	170	/* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
 #define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 8d2e516..eeda051 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@
 	int labels[1];		/* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX	4096	/* Max size for program */
+#define BPF_SIZE_MAX	0x7ffff	/* Max size for program (20 bit signed displ) */
 
 #define SEEN_SKB	1	/* skb access */
 #define SEEN_MEM	2	/* use mem[] for temporary storage */
@@ -53,6 +53,7 @@
 #define SEEN_LITERAL	8	/* code uses literals */
 #define SEEN_FUNC	16	/* calls C functions */
 #define SEEN_TAIL_CALL	32	/* code uses tail calls */
+#define SEEN_SKB_CHANGE	64	/* code changes skb data */
 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -203,19 +204,11 @@
 	_EMIT6(op1 | __disp, op2);				\
 })
 
-#define EMIT6_DISP(op1, op2, b1, b2, b3, disp)			\
-({								\
-	_EMIT6_DISP(op1 | reg(b1, b2) << 16 |			\
-		    reg_high(b3) << 8, op2, disp);		\
-	REG_SET_SEEN(b1);					\
-	REG_SET_SEEN(b2);					\
-	REG_SET_SEEN(b3);					\
-})
-
 #define _EMIT6_DISP_LH(op1, op2, disp)				\
 ({								\
-	unsigned int __disp_h = ((u32)disp) & 0xff000;		\
-	unsigned int __disp_l = ((u32)disp) & 0x00fff;		\
+	u32 _disp = (u32) disp;					\
+	unsigned int __disp_h = _disp & 0xff000;		\
+	unsigned int __disp_l = _disp & 0x00fff;		\
 	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 })
 
@@ -390,12 +383,32 @@
 }
 
 /*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+static void emit_load_skb_data_hlen(struct bpf_jit *jit)
+{
+	/* Header length: llgf %w1,<len>(%b1) */
+	EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+		      offsetof(struct sk_buff, len));
+	/* s %w1,<data_len>(%b1) */
+	EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+		   offsetof(struct sk_buff, data_len));
+	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
+	/* lg %skb_data,data_off(%b1) */
+	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+		      BPF_REG_1, offsetof(struct sk_buff, data));
+}
+
+/*
  * Emit function prologue
  *
  * Save registers and create stack frame if necessary.
  * See stack frame layout desription in "bpf_jit.h"!
  */
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
 {
 	if (jit->seen & SEEN_TAIL_CALL) {
 		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -429,32 +442,21 @@
 			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 				      REG_15, 152);
 	}
-	/*
-	 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
-	 * we store the SKB header length on the stack and the SKB data
-	 * pointer in REG_SKB_DATA.
-	 */
-	if (jit->seen & SEEN_SKB) {
-		/* Header length: llgf %w1,<len>(%b1) */
-		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
-			      offsetof(struct sk_buff, len));
-		/* s %w1,<data_len>(%b1) */
-		EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
-			   offsetof(struct sk_buff, data_len));
-		/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+	if (jit->seen & SEEN_SKB)
+		emit_load_skb_data_hlen(jit);
+	if (jit->seen & SEEN_SKB_CHANGE)
+		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
-			      STK_OFF_HLEN);
-		/* lg %skb_data,data_off(%b1) */
-		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-			      BPF_REG_1, offsetof(struct sk_buff, data));
+			      STK_OFF_SKBP);
+	/* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+	if (is_classic) {
+		if (REG_SEEN(BPF_REG_A))
+			/* lghi %ba,0 */
+			EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+		if (REG_SEEN(BPF_REG_X))
+			/* lghi %bx,0 */
+			EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 	}
-	/* BPF compatibility: clear A (%b0) and X (%b7) registers */
-	if (REG_SEEN(BPF_REG_A))
-		/* lghi %ba,0 */
-		EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
-	if (REG_SEEN(BPF_REG_X))
-		/* lghi %bx,0 */
-		EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 }
 
 /*
@@ -976,12 +978,19 @@
 		REG_SET_SEEN(BPF_REG_5);
 		jit->seen |= SEEN_FUNC;
 		/* lg %w1,<d(imm)>(%l) */
-		EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
-			   EMIT_CONST_U64(func));
+		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+			      EMIT_CONST_U64(func));
 		/* basr %r14,%w1 */
 		EMIT2(0x0d00, REG_14, REG_W1);
 		/* lgr %b0,%r2: load return value into %b0 */
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
+		if (bpf_helper_changes_skb_data((void *)func)) {
+			jit->seen |= SEEN_SKB_CHANGE;
+			/* lg %b1,ST_OFF_SKBP(%r15) */
+			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
+				      REG_15, STK_OFF_SKBP);
+			emit_load_skb_data_hlen(jit);
+		}
 		break;
 	}
 	case BPF_JMP | BPF_CALL | BPF_X:
@@ -1023,7 +1032,7 @@
 				      MAX_TAIL_CALL_CNT, 0, 0x2);
 
 		/*
-		 * prog = array->prog[index];
+		 * prog = array->ptrs[index];
 		 * if (prog == NULL)
 		 *         goto out;
 		 */
@@ -1032,7 +1041,7 @@
 		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
 		/* lg %r1,prog(%b2,%r1) */
 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
-			      REG_1, offsetof(struct bpf_array, prog));
+			      REG_1, offsetof(struct bpf_array, ptrs));
 		/* clgij %r1,0,0x8,label0 */
 		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
 
@@ -1236,7 +1245,7 @@
 	jit->lit = jit->lit_start;
 	jit->prg = 0;
 
-	bpf_jit_prologue(jit);
+	bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
 	for (i = 0; i < fp->len; i += insn_count) {
 		insn_count = bpf_jit_insn(jit, fp, i);
 		if (insn_count < 0)
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 97a5fda..b94df40 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -48,47 +48,12 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	int tmp;
-	unsigned int _mask = ~mask;
-
-	__asm__ __volatile__ (
-		"   .align 2              \n\t"
-		"   mova    1f,   r0      \n\t" /* r0 = end point */
-		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
-		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
-		"   mov.l  @%1,   %0      \n\t" /* load  old value */
-		"   and     %2,   %0      \n\t" /* add */
-		"   mov.l   %0,   @%1     \n\t" /* store new value */
-		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (tmp),
-		  "+r"  (v)
-		: "r"   (_mask)
-		: "memory" , "r0", "r1");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	int tmp;
-
-	__asm__ __volatile__ (
-		"   .align 2              \n\t"
-		"   mova    1f,   r0      \n\t" /* r0 = end point */
-		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
-		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
-		"   mov.l  @%1,   %0      \n\t" /* load  old value */
-		"   or      %2,   %0      \n\t" /* or */
-		"   mov.l   %0,   @%1     \n\t" /* store new value */
-		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (tmp),
-		  "+r"  (v)
-		: "r"   (mask)
-		: "memory" , "r0", "r1");
-}
-
 #endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 61d1075..23fcdad 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -37,27 +37,12 @@
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	v->counter &= ~mask;
-	raw_local_irq_restore(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	v->counter |= mask;
-	raw_local_irq_restore(flags);
-}
-
 #endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 8575dcc..33d34b1 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -52,37 +52,12 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
-"	and	%1, %0					\n"
-"	movco.l	%0, @%2					\n"
-"	bf	1b					\n"
-	: "=&z" (tmp)
-	: "r" (~mask), "r" (&v->counter)
-	: "t");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-"1:	movli.l @%2, %0		! atomic_set_mask	\n"
-"	or	%1, %0					\n"
-"	movco.l	%0, @%2					\n"
-"	bf	1b					\n"
-	: "=&z" (tmp)
-	: "r" (mask), "r" (&v->counter)
-	: "t");
-}
-
 #endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 2790b6a..17f4862 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -488,7 +488,7 @@
 int arch_add_memory(int nid, u64 start, u64 size)
 {
 	pg_data_t *pgdat;
-	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	int ret;
 
@@ -517,7 +517,7 @@
 #ifdef CONFIG_MEMORY_HOTREMOVE
 int arch_remove_memory(u64 start, u64 size)
 {
-	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	struct zone *zone;
 	int ret;
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index bce52ba..05713d1 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -33,8 +33,8 @@
 	/* Don't allow bogus node assignment */
 	BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
 
-	start_pfn = start >> PAGE_SHIFT;
-	end_pfn = end >> PAGE_SHIFT;
+	start_pfn = PFN_DOWN(start);
+	end_pfn = PFN_DOWN(end);
 
 	pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
 			 PAGE_KERNEL);
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0e69b7e..7dcbebb 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -17,10 +17,12 @@
 #include <asm/barrier.h>
 #include <asm-generic/atomic64.h>
 
-
 #define ATOMIC_INIT(i)  { (i) }
 
 int atomic_add_return(int, atomic_t *);
+void atomic_and(int, atomic_t *);
+void atomic_or(int, atomic_t *);
+void atomic_xor(int, atomic_t *);
 int atomic_cmpxchg(atomic_t *, int, int);
 int atomic_xchg(atomic_t *, int);
 int __atomic_add_unless(atomic_t *, int, int);
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 4082749..917084a 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -33,6 +33,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 809941e..14a9286 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -60,12 +60,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index cc9b04a..62d0354 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,16 +7,33 @@
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-		asm_volatile_goto("1:\n\t"
-			 "nop\n\t"
-			 "nop\n\t"
-			 ".pushsection __jump_table,  \"aw\"\n\t"
-			 ".align 4\n\t"
-			 ".word 1b, %l[l_yes], %c0\n\t"
-			 ".popsection \n\t"
-			 : :  "i" (key) : : l_yes);
+	asm_volatile_goto("1:\n\t"
+		 "nop\n\t"
+		 "nop\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 4\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes]\n\t"
+		 "nop\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 4\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 48565c1..59bbeff 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -16,7 +16,7 @@
 	u32 val;
 	u32 *insn = (u32 *) (unsigned long) entry->code;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		s32 off = (s32)entry->target - (s32)entry->code;
 
 #ifdef CONFIG_SPARC64
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 71cd65a..b9d63c0 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,22 +27,38 @@
 
 #endif /* SMP */
 
-#define ATOMIC_OP(op, cop)						\
+#define ATOMIC_OP_RETURN(op, c_op)					\
 int atomic_##op##_return(int i, atomic_t *v)				\
 {									\
 	int ret;							\
 	unsigned long flags;						\
 	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 									\
-	ret = (v->counter cop i);					\
+	ret = (v->counter c_op i);					\
 									\
 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 	return ret;							\
 }									\
 EXPORT_SYMBOL(atomic_##op##_return);
 
-ATOMIC_OP(add, +=)
+#define ATOMIC_OP(op, c_op)						\
+void atomic_##op(int i, atomic_t *v)					\
+{									\
+	unsigned long flags;						\
+	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
+									\
+	v->counter c_op i;						\
+									\
+	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
+}									\
+EXPORT_SYMBOL(atomic_##op);
 
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
+#undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
 int atomic_xchg(atomic_t *v, int new)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 05dac43..d6b0363 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -47,6 +47,9 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
@@ -84,6 +87,9 @@
 
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 8069ce1..8eb454c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -111,6 +111,9 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 7931eee..f8b9f71 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -807,7 +807,7 @@
 	}
 
 	if (bpf_jit_enable > 1)
-		bpf_jit_dump(flen, proglen, pass, image);
+		bpf_jit_dump(flen, proglen, pass + 1, image);
 
 	if (image) {
 		bpf_flush_icache(image, image + proglen);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 9def1f5..2ba12d7 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -32,6 +32,7 @@
 	select EDAC_SUPPORT
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
+	select HAVE_ARCH_SECCOMP_FILTER
 
 # FIXME: investigate whether we need/want these options.
 #	select HAVE_IOREMAP_PROT
@@ -221,6 +222,22 @@
 	  If enabled, the kernel will support running TILE-Gx binaries
 	  that were built with the -m32 option.
 
+config SECCOMP
+	bool "Enable seccomp to safely compute untrusted bytecode"
+	depends on PROC_FS
+	help
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via prctl, it cannot be disabled and the task is only
+	  allowed to execute a few safe syscalls defined by each seccomp
+	  mode.
+
+	  If unsure, say N.
+
 config SYSVIPC_COMPAT
 	def_bool y
 	depends on COMPAT && SYSVIPC
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index d8a8431..ba35c41 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -28,6 +28,7 @@
 generic-y += posix_types.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += seccomp.h
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1b109fa..d320ce2 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@
 	_atomic_xchg_add(&v->counter, i);
 }
 
+#define ATOMIC_OP(op)							\
+unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	_atomic_##op((unsigned long *)&v->counter, i);			\
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * atomic_add_return - add integer and return
  * @v: pointer of type atomic_t
@@ -113,6 +126,17 @@
 	_atomic64_xchg_add(&v->counter, i);
 }
 
+#define ATOMIC64_OP(op)						\
+long long _atomic64_##op(long long *v, long long n);		\
+static inline void atomic64_##op(long long i, atomic64_t *v)	\
+{								\
+	_atomic64_##op(&v->counter, i);				\
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
 /**
  * atomic64_add_return - add integer and return
  * @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@
 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
 						  int *lock, int o, int n);
 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@
 					long long n);
 extern long long __atomic64_xchg_add_unless(volatile long long *p,
 					int *lock, long long o, long long n);
+extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
 
 /* Return failure from the atomic wrappers. */
 struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 0496970..096a56d 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -58,6 +58,26 @@
 	return oldval;
 }
 
+static inline void atomic_and(int i, atomic_t *v)
+{
+	__insn_fetchand4((void *)&v->counter, i);
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+	__insn_fetchor4((void *)&v->counter, i);
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+	int guess, oldval = v->counter;
+	do {
+		guess = oldval;
+		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+		oldval = __insn_cmpexch4(&v->counter, guess ^ i);
+	} while (guess != oldval);
+}
+
 /* Now the true 64-bit operations. */
 
 #define ATOMIC64_INIT(i)	{ (i) }
@@ -91,6 +111,26 @@
 	return oldval != u;
 }
 
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+	__insn_fetchand((void *)&v->counter, i);
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+	__insn_fetchor((void *)&v->counter, i);
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+	long guess, oldval = v->counter;
+	do {
+		guess = oldval;
+		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+		oldval = __insn_cmpexch(&v->counter, guess ^ i);
+	} while (guess != oldval);
+}
+
 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
 #define atomic64_inc_return(v)		atomic64_add_return(1, (v))
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 41d9878..c505d77 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -22,6 +22,7 @@
 #include <arch/chip.h>
 
 #include <linux/ptrace.h>
+#include <linux/elf-em.h>
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
@@ -30,9 +31,6 @@
 #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
-#define EM_TILEPRO 188
-#define EM_TILEGX  191
-
 /* Provide a nominal data structure. */
 #define ELF_NFPREG	0
 typedef double elf_fpreg_t;
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h
index 9644b88..373d730 100644
--- a/arch/tile/include/asm/syscall.h
+++ b/arch/tile/include/asm/syscall.h
@@ -20,6 +20,8 @@
 
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <linux/audit.h>
+#include <linux/compat.h>
 #include <arch/abi.h>
 
 /* The array of function pointers for syscalls. */
@@ -61,7 +63,15 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
-	regs->regs[0] = (long) error ?: val;
+	if (error) {
+		/* R0 is the passed-in negative error, R1 is positive. */
+		regs->regs[0] = error;
+		regs->regs[1] = -error;
+	} else {
+		/* R1 set to zero to indicate no error. */
+		regs->regs[0] = val;
+		regs->regs[1] = 0;
+	}
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
@@ -82,4 +92,20 @@
 	memcpy(&regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * tile has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+	if (is_compat_task())
+		return AUDIT_ARCH_TILEGX32;
+
+#ifdef CONFIG_TILEGX
+	return AUDIT_ARCH_TILEGX;
+#else
+	return AUDIT_ARCH_TILEPRO;
+#endif
+}
+
 #endif	/* _ASM_TILE_SYSCALL_H */
diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h
index d76ff2d..9e46eaa 100644
--- a/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ b/arch/tile/include/uapi/arch/opcode_tilegx.h
@@ -830,11 +830,11 @@
   ADDX_RRR_0_OPCODE_X0 = 2,
   ADDX_RRR_0_OPCODE_X1 = 2,
   ADDX_RRR_0_OPCODE_Y0 = 0,
-  ADDX_SPECIAL_0_OPCODE_Y1 = 0,
+  ADDX_RRR_0_OPCODE_Y1 = 0,
   ADD_RRR_0_OPCODE_X0 = 3,
   ADD_RRR_0_OPCODE_X1 = 3,
   ADD_RRR_0_OPCODE_Y0 = 1,
-  ADD_SPECIAL_0_OPCODE_Y1 = 1,
+  ADD_RRR_0_OPCODE_Y1 = 1,
   ANDI_IMM8_OPCODE_X0 = 3,
   ANDI_IMM8_OPCODE_X1 = 3,
   ANDI_OPCODE_Y0 = 2,
@@ -995,6 +995,7 @@
   LD4U_ADD_IMM8_OPCODE_X1 = 12,
   LD4U_OPCODE_Y2 = 2,
   LD4U_UNARY_OPCODE_X1 = 20,
+  LDNA_ADD_IMM8_OPCODE_X1 = 21,
   LDNA_UNARY_OPCODE_X1 = 21,
   LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
   LDNT1S_UNARY_OPCODE_X1 = 22,
@@ -1015,7 +1016,6 @@
   LD_UNARY_OPCODE_X1 = 29,
   LNK_UNARY_OPCODE_X1 = 30,
   LNK_UNARY_OPCODE_Y1 = 14,
-  LWNA_ADD_IMM8_OPCODE_X1 = 21,
   MFSPR_IMM8_OPCODE_X1 = 22,
   MF_UNARY_OPCODE_X1 = 31,
   MM_BF_OPCODE_X0 = 7,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index cdbda45..fbbe2ea 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1224,6 +1224,7 @@
 	 jal    do_syscall_trace_enter
 	}
 	FEEDBACK_REENTER(handle_syscall)
+	blz     r0, .Lsyscall_sigreturn_skip
 
 	/*
 	 * We always reload our registers from the stack at this
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 800b91d..58964d2 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1247,6 +1247,7 @@
 	 jal    do_syscall_trace_enter
 	}
 	FEEDBACK_REENTER(handle_syscall)
+	bltz    r0, .Lsyscall_sigreturn_skip
 
 	/*
 	 * We always reload our registers from the stack at this
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index f84eed8..bdc126f 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -262,6 +262,9 @@
 	if (work & _TIF_NOHZ)
 		user_exit();
 
+	if (secure_computing() == -1)
+		return -1;
+
 	if (work & _TIF_SYSCALL_TRACE) {
 		if (tracehook_report_syscall_entry(regs))
 			regs->regs[TREG_SYSCALL_NR] = -1;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 00178ec..178989e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -140,10 +140,10 @@
  * Whenever anyone tries to change modes, we just mask interrupts
  * and wait for the next event to get set.
  */
-static void tile_timer_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int tile_timer_shutdown(struct clock_event_device *evt)
 {
 	arch_local_irq_mask_now(INT_TILE_TIMER);
+	return 0;
 }
 
 /*
@@ -157,7 +157,9 @@
 	.rating = 100,
 	.irq = -1,
 	.set_next_event = tile_timer_set_next_event,
-	.set_mode = tile_timer_set_mode,
+	.set_state_shutdown = tile_timer_shutdown,
+	.set_state_oneshot = tile_timer_shutdown,
+	.tick_resume = tile_timer_shutdown,
 };
 
 void setup_tile_timer(void)
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
index a025f63..c54fff3 100644
--- a/arch/tile/kernel/vdso/Makefile
+++ b/arch/tile/kernel/vdso/Makefile
@@ -54,7 +54,7 @@
 $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-                            $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+                            $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 SYSCFLAGS_vdso_syms.o = -r
 $(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
 	$(call if_changed,vdsold)
@@ -113,6 +113,6 @@
 $(obj)/vdso32.o: $(obj)/vdso32.so
 
 SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
-			    $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+			    $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 $(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
 	$(call if_changed,vdsold)
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index c89b211..298df1e 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@
 }
 EXPORT_SYMBOL(_atomic_or);
 
+unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+{
+	return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+}
+EXPORT_SYMBOL(_atomic_and);
+
 unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
 {
 	return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@
 }
 EXPORT_SYMBOL(_atomic64_cmpxchg);
 
+long long _atomic64_and(long long *v, long long n)
+{
+	return __atomic64_and(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_and);
+
+long long _atomic64_or(long long *v, long long n)
+{
+	return __atomic64_or(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_or);
+
+long long _atomic64_xor(long long *v, long long n)
+{
+	return __atomic64_xor(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_xor);
 
 /*
  * If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 6bda313..f611265 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@
 atomic_op _xchg_add_unless, 32, \
 	"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
 atomic_op _or, 32, "or r24, r22, r2"
+atomic_op _and, 32, "and r24, r22, r2"
 atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
 atomic_op _xor, 32, "xor r24, r22, r2"
 
@@ -191,6 +192,9 @@
 	{ bbns r26, 3f; add r24, r22, r4 }; \
 	{ bbns r27, 3f; add r25, r23, r5 }; \
 	slt_u r26, r24, r22; add r25, r25, r26"
+atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
 
 	jrp     lr              /* happy backtracer */
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 48f7433..117e2f3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -41,6 +41,7 @@
 	select ARCH_USE_CMPXCHG_LOCKREF		if X86_64
 	select ARCH_USE_QUEUED_RWLOCKS
 	select ARCH_USE_QUEUED_SPINLOCKS
+	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
 	select ARCH_WANT_FRAME_POINTERS
 	select ARCH_WANT_IPC_PARSE_VERSION	if X86_32
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 962297d..cb5b3ab 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -208,7 +208,6 @@
 CONFIG_AGP_INTEL=y
 CONFIG_DRM=y
 CONFIG_DRM_I915=y
-CONFIG_DRM_I915_KMS=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_EFI=y
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 25e3cf1..477bfa6 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -380,3 +380,4 @@
 371	i386	recvfrom		sys_recvfrom			compat_sys_recvfrom
 372	i386	recvmsg			sys_recvmsg			compat_sys_recvmsg
 373	i386	shutdown		sys_shutdown
+374	i386	userfaultfd		sys_userfaultfd
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 9ef32d5..81c4906 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -329,6 +329,7 @@
 320	common	kexec_file_load		sys_kexec_file_load
 321	common	bpf			sys_bpf
 322	64	execveat		stub_execveat
+323	common	userfaultfd		sys_userfaultfd
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index e916895..fb52aa6 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -182,6 +182,21 @@
 	return xchg(&v->counter, new);
 }
 
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
+			: "+m" (v->counter)				\
+			: "ir" (i)					\
+			: "memory");					\
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * __atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
@@ -219,16 +234,6 @@
 	return *v;
 }
 
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "andl %0,%1"			\
-		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
-
-#define atomic_set_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "orl %0,%1"			\
-		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
-		     : "memory")
-
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 #else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index b154de7..a11c30b 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -313,4 +313,18 @@
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
+#define ATOMIC64_OP(op, c_op)						\
+static inline void atomic64_##op(long long i, atomic64_t *v)		\
+{									\
+	long long old, c = 0;						\
+	while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)		\
+		c = old;						\
+}
+
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index b965f9e..50e33ef 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -220,4 +220,19 @@
 	return dec;
 }
 
+#define ATOMIC64_OP(op)							\
+static inline void atomic64_##op(long i, atomic64_t *v)			\
+{									\
+	asm volatile(LOCK_PREFIX #op"q %1,%0"				\
+			: "+m" (v->counter)				\
+			: "er" (i)					\
+			: "memory");					\
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 818cb87..0681d25 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,12 +57,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
@@ -74,12 +74,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a4c1cf7..5daeca3 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -16,15 +16,32 @@
 # define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
 #endif
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:"
 		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
 		".pushsection __jump_table,  \"aw\" \n\t"
 		_ASM_ALIGN "\n\t"
-		_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
+		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
 		".popsection \n\t"
-		: :  "i" (key) : : l_yes);
+		: :  "i" (key), "i" (branch) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:"
+		".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+		"2:\n\t"
+		".pushsection __jump_table,  \"aw\" \n\t"
+		_ASM_ALIGN "\n\t"
+		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+		".popsection \n\t"
+		: :  "i" (key), "i" (branch) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241..c537cbb 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -2,16 +2,6 @@
 #define _ASM_X86_QRWLOCK_H
 
 #include <asm-generic/qrwlock_types.h>
-
-#ifndef CONFIG_X86_PPRO_FENCE
-#define queue_write_unlock queue_write_unlock
-static inline void queue_write_unlock(struct qrwlock *lock)
-{
-        barrier();
-        ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
-}
-#endif
-
 #include <asm-generic/qrwlock.h>
 
 #endif /* _ASM_X86_QRWLOCK_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index cd79194..6df2029 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -261,6 +261,12 @@
 
 #endif	/* SMP */
 
+/* Not inlined due to inc_irq_stat not being defined yet */
+#define flush_tlb_local() {		\
+	inc_irq_stat(irq_tlb_count);	\
+	local_flush_tlb();		\
+}
+
 #ifndef CONFIG_PARAVIRT
 #define flush_tlb_others(mask, mm, start, end)	\
 	native_flush_tlb_others(mask, mm, start, end)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3f124d5..cd9b6d0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -12,7 +12,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/export.h>
-#include <linux/watchdog.h>
+#include <linux/nmi.h>
 
 #include <asm/cpufeature.h>
 #include <asm/hardirq.h>
@@ -3627,7 +3627,10 @@
 		return 0;
 	}
 
-	watchdog_nmi_disable_all();
+	if (lockup_detector_suspend() != 0) {
+		pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
+		return 0;
+	}
 
 	x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
 
@@ -3635,7 +3638,7 @@
 	x86_pmu.commit_scheduling = NULL;
 	x86_pmu.stop_scheduling = NULL;
 
-	watchdog_nmi_enable_all();
+	lockup_detector_resume();
 
 	get_online_cpus();
 
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 26d5a55..e565e0e 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -45,7 +45,7 @@
 	const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		if (init) {
 			/*
 			 * Jump label is enabled for the first time.
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 79055cf..c8d52cb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -38,7 +38,7 @@
    erroneous rdtsc usage on !cpu_has_tsc processors */
 static int __read_mostly tsc_disabled = -1;
 
-static struct static_key __use_tsc = STATIC_KEY_INIT;
+static DEFINE_STATIC_KEY_FALSE(__use_tsc);
 
 int tsc_clocksource_reliable;
 
@@ -274,7 +274,12 @@
  */
 u64 native_sched_clock(void)
 {
-	u64 tsc_now;
+	if (static_branch_likely(&__use_tsc)) {
+		u64 tsc_now = rdtsc();
+
+		/* return the value in ns */
+		return cycles_2_ns(tsc_now);
+	}
 
 	/*
 	 * Fall back to jiffies if there's no TSC available:
@@ -284,16 +289,9 @@
 	 *   very important for it to be as fast as the platform
 	 *   can achieve it. )
 	 */
-	if (!static_key_false(&__use_tsc)) {
-		/* No locking but a rare wrong value is not a big deal: */
-		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
-	}
 
-	/* read the Time Stamp Counter: */
-	tsc_now = rdtsc();
-
-	/* return the value in ns */
-	return cycles_2_ns(tsc_now);
+	/* No locking but a rare wrong value is not a big deal: */
+	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 }
 
 /*
@@ -1212,7 +1210,7 @@
 	/* now allow native_sched_clock() to use rdtsc */
 
 	tsc_disabled = 0;
-	static_key_slow_inc(&__use_tsc);
+	static_branch_enable(&__use_tsc);
 
 	if (!no_sched_irq_time)
 		enable_sched_clock_irqtime();
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 90b924a..8ddb5d0 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -140,6 +140,7 @@
 	info.flush_end = end;
 
 	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
+	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
 	if (is_uv_system()) {
 		unsigned int cpu;
 
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index be2e7a2..70efcd0 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -246,7 +246,7 @@
  *     goto out;
  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
  *     goto out;
- *   prog = array->prog[index];
+ *   prog = array->ptrs[index];
  *   if (prog == NULL)
  *     goto out;
  *   goto *(prog->bpf_func + prologue_size);
@@ -284,9 +284,9 @@
 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
 
-	/* prog = array->prog[index]; */
+	/* prog = array->ptrs[index]; */
 	EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
-		    offsetof(struct bpf_array, prog));
+		    offsetof(struct bpf_array, ptrs));
 	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
 
 	/* if (prog == NULL)
@@ -315,6 +315,26 @@
 	*pprog = prog;
 }
 
+
+static void emit_load_skb_data_hlen(u8 **pprog)
+{
+	u8 *prog = *pprog;
+	int cnt = 0;
+
+	/* r9d = skb->len - skb->data_len (headlen)
+	 * r10 = skb->data
+	 */
+	/* mov %r9d, off32(%rdi) */
+	EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
+
+	/* sub %r9d, off32(%rdi) */
+	EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
+
+	/* mov %r10, off32(%rdi) */
+	EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
+	*pprog = prog;
+}
+
 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 		  int oldproglen, struct jit_context *ctx)
 {
@@ -329,36 +349,8 @@
 
 	emit_prologue(&prog);
 
-	if (seen_ld_abs) {
-		/* r9d : skb->len - skb->data_len (headlen)
-		 * r10 : skb->data
-		 */
-		if (is_imm8(offsetof(struct sk_buff, len)))
-			/* mov %r9d, off8(%rdi) */
-			EMIT4(0x44, 0x8b, 0x4f,
-			      offsetof(struct sk_buff, len));
-		else
-			/* mov %r9d, off32(%rdi) */
-			EMIT3_off32(0x44, 0x8b, 0x8f,
-				    offsetof(struct sk_buff, len));
-
-		if (is_imm8(offsetof(struct sk_buff, data_len)))
-			/* sub %r9d, off8(%rdi) */
-			EMIT4(0x44, 0x2b, 0x4f,
-			      offsetof(struct sk_buff, data_len));
-		else
-			EMIT3_off32(0x44, 0x2b, 0x8f,
-				    offsetof(struct sk_buff, data_len));
-
-		if (is_imm8(offsetof(struct sk_buff, data)))
-			/* mov %r10, off8(%rdi) */
-			EMIT4(0x4c, 0x8b, 0x57,
-			      offsetof(struct sk_buff, data));
-		else
-			/* mov %r10, off32(%rdi) */
-			EMIT3_off32(0x4c, 0x8b, 0x97,
-				    offsetof(struct sk_buff, data));
-	}
+	if (seen_ld_abs)
+		emit_load_skb_data_hlen(&prog);
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
 		const s32 imm32 = insn->imm;
@@ -367,6 +359,7 @@
 		u8 b1 = 0, b2 = 0, b3 = 0;
 		s64 jmp_offset;
 		u8 jmp_cond;
+		bool reload_skb_data;
 		int ilen;
 		u8 *func;
 
@@ -818,12 +811,18 @@
 			func = (u8 *) __bpf_call_base + imm32;
 			jmp_offset = func - (image + addrs[i]);
 			if (seen_ld_abs) {
-				EMIT2(0x41, 0x52); /* push %r10 */
-				EMIT2(0x41, 0x51); /* push %r9 */
-				/* need to adjust jmp offset, since
-				 * pop %r9, pop %r10 take 4 bytes after call insn
-				 */
-				jmp_offset += 4;
+				reload_skb_data = bpf_helper_changes_skb_data(func);
+				if (reload_skb_data) {
+					EMIT1(0x57); /* push %rdi */
+					jmp_offset += 22; /* pop, mov, sub, mov */
+				} else {
+					EMIT2(0x41, 0x52); /* push %r10 */
+					EMIT2(0x41, 0x51); /* push %r9 */
+					/* need to adjust jmp offset, since
+					 * pop %r9, pop %r10 take 4 bytes after call insn
+					 */
+					jmp_offset += 4;
+				}
 			}
 			if (!imm32 || !is_simm32(jmp_offset)) {
 				pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -832,8 +831,13 @@
 			}
 			EMIT1_off32(0xE8, jmp_offset);
 			if (seen_ld_abs) {
-				EMIT2(0x41, 0x59); /* pop %r9 */
-				EMIT2(0x41, 0x5A); /* pop %r10 */
+				if (reload_skb_data) {
+					EMIT1(0x5F); /* pop %rdi */
+					emit_load_skb_data_hlen(&prog);
+				} else {
+					EMIT2(0x41, 0x59); /* pop %r9 */
+					EMIT2(0x41, 0x5A); /* pop %r10 */
+				}
 			}
 			break;
 
@@ -1099,7 +1103,7 @@
 	}
 
 	if (bpf_jit_enable > 1)
-		bpf_jit_dump(prog->len, proglen, 0, image);
+		bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
 	if (image) {
 		bpf_flush_icache(header, image + proglen);
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index e4d193e..f3dfe0d 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -616,7 +616,6 @@
 # CONFIG_SLUB_DEBUG_ON is not set
 # CONFIG_SLUB_STATS is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index ebcd1f6..93795d0 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -145,6 +145,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -250,75 +254,6 @@
 	return c;
 }
 
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-	unsigned long tmp;
-	int result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       and     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (~mask), "a" (v)
-			: "memory"
-			);
-#else
-	unsigned int all_f = -1;
-	unsigned int vval;
-
-	__asm__ __volatile__(
-			"       rsil    a15,"__stringify(TOPLEVEL)"\n"
-			"       l32i    %0, %2, 0\n"
-			"       xor     %1, %4, %3\n"
-			"       and     %0, %0, %4\n"
-			"       s32i    %0, %2, 0\n"
-			"       wsr     a15, ps\n"
-			"       rsync\n"
-			: "=&a" (vval), "=a" (mask)
-			: "a" (v), "a" (all_f), "1" (mask)
-			: "a15", "memory"
-			);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-	unsigned long tmp;
-	int result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       or      %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (mask), "a" (v)
-			: "memory"
-			);
-#else
-	unsigned int vval;
-
-	__asm__ __volatile__(
-			"       rsil    a15,"__stringify(TOPLEVEL)"\n"
-			"       l32i    %0, %2, 0\n"
-			"       or      %0, %0, %1\n"
-			"       s32i    %0, %2, 0\n"
-			"       wsr     a15, ps\n"
-			"       rsync\n"
-			: "=&a" (vval)
-			: "a" (mask), "a" (v)
-			: "a15", "memory"
-			);
-#endif
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b262527..d088cff 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -393,7 +393,7 @@
 		if (rq->cmd_flags & REQ_WRITE)
 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
-		sg->page_link &= ~0x02;
+		sg_unmark_end(sg);
 		sg = sg_next(sg);
 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
 			    q->dma_drain_size,
diff --git a/block/bounce.c b/block/bounce.c
index 2c310ea..0611aea 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -177,26 +177,8 @@
 	__bounce_end_io_read(bio, isa_page_pool);
 }
 
-#ifdef CONFIG_NEED_BOUNCE_POOL
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
-	if (bio_data_dir(bio) != WRITE)
-		return 0;
-
-	if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
-		return 0;
-
-	return bio_flagged(bio, BIO_SNAP_STABLE);
-}
-#else
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
-	return 0;
-}
-#endif /* CONFIG_NEED_BOUNCE_POOL */
-
 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
-			       mempool_t *pool, int force)
+			       mempool_t *pool)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -204,8 +186,6 @@
 	struct bvec_iter iter;
 	unsigned i;
 
-	if (force)
-		goto bounce;
 	bio_for_each_segment(from, *bio_orig, iter)
 		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
 			goto bounce;
@@ -217,7 +197,7 @@
 	bio_for_each_segment_all(to, bio, i) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
+		if (page_to_pfn(page) <= queue_bounce_pfn(q))
 			continue;
 
 		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
@@ -255,7 +235,6 @@
 
 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
-	int must_bounce;
 	mempool_t *pool;
 
 	/*
@@ -264,15 +243,13 @@
 	if (!bio_has_data(*bio_orig))
 		return;
 
-	must_bounce = must_snapshot_stable_pages(q, *bio_orig);
-
 	/*
 	 * for non-isa bounce case, just check if the bounce pfn is equal
 	 * to or bigger than the highest pfn in the system -- in that case,
 	 * don't waste time iterating over bio segments
 	 */
 	if (!(q->bounce_gfp & GFP_DMA)) {
-		if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
+		if (queue_bounce_pfn(q) >= blk_max_pfn)
 			return;
 		pool = page_pool;
 	} else {
@@ -283,7 +260,7 @@
 	/*
 	 * slow path
 	 */
-	__blk_queue_bounce(q, bio_orig, pool, must_bounce);
+	__blk_queue_bounce(q, bio_orig, pool);
 }
 
 EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 9450752..af31a0e 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -145,7 +145,7 @@
 		sgl->cur = 0;
 
 		if (sg)
-			scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+			sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
 
 		list_add_tail(&sgl->list, &ctx->tsgl);
 	}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index ddb4f29..bec329b 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -206,14 +206,14 @@
 	sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
 	sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
 	if (sg != pctx->src + 1)
-		scatterwalk_sg_chain(pctx->src, 2, sg);
+		sg_chain(pctx->src, 2, sg);
 
 	if (req->src != req->dst) {
 		sg_init_table(pctx->dst, 3);
 		sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
 		sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
 		if (sg != pctx->dst + 1)
-			scatterwalk_sg_chain(pctx->dst, 2, sg);
+			sg_chain(pctx->dst, 2, sg);
 	}
 }
 
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4e2e6aa..46b4a8e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -176,6 +176,8 @@
 
 source "drivers/mcb/Kconfig"
 
+source "drivers/perf/Kconfig"
+
 source "drivers/ras/Kconfig"
 
 source "drivers/thunderbolt/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 4c270f5..b250b36 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,7 +11,7 @@
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
 # GPIO must come after pinctrl as gpios may need to mux pins etc
-obj-y				+= pinctrl/
+obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-y				+= gpio/
 obj-y				+= pwm/
 obj-$(CONFIG_PCI)		+= pci/
@@ -161,6 +161,7 @@
 obj-$(CONFIG_FMC)		+= fmc/
 obj-$(CONFIG_POWERCAP)		+= powercap/
 obj-$(CONFIG_MCB)		+= mcb/
+obj-$(CONFIG_PERF_EVENTS)	+= perf/
 obj-$(CONFIG_RAS)		+= ras/
 obj-$(CONFIG_THUNDERBOLT)	+= thunderbolt/
 obj-$(CONFIG_CORESIGHT)		+= hwtracing/coresight/
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 6d08446..12fe0f3 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -27,12 +27,11 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 
 #include <linux/libata.h>
 #include <scsi/scsi_host.h>
 
-#include <asm/gpio.h>
-
 #define DRV_NAME	"pata-rb532-cf"
 #define DRV_VERSION	"0.1.0"
 #define DRV_DESC	"PATA driver for RouterBOARD 532 Compact Flash"
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 31df474d..560751b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -392,6 +392,16 @@
 	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
 		int page_nid;
 
+		/*
+		 * memory block could have several absent sections from start.
+		 * skip pfn range from absent section
+		 */
+		if (!pfn_present(pfn)) {
+			pfn = round_down(pfn + PAGES_PER_SECTION,
+					 PAGES_PER_SECTION) - 1;
+			continue;
+		}
+
 		page_nid = get_nid_for_pfn(pfn);
 		if (page_nid < 0)
 			continue;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 841b15c..ff03f23 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -16,6 +16,8 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/property.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
 
 /**
  * device_add_property_set - Add a collection of properties to a device object.
@@ -154,6 +156,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u8_array(struct device *dev, const char *propname,
 				  u8 *val, size_t nval)
@@ -178,6 +181,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u16_array(struct device *dev, const char *propname,
 				   u16 *val, size_t nval)
@@ -202,6 +206,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u32_array(struct device *dev, const char *propname,
 				   u32 *val, size_t nval)
@@ -226,6 +231,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u64_array(struct device *dev, const char *propname,
 				   u64 *val, size_t nval)
@@ -250,6 +256,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO or %-EILSEQ if the property is not an array of strings,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_string_array(struct device *dev, const char *propname,
 				      const char **val, size_t nval)
@@ -271,6 +278,7 @@
  *	   %-EINVAL if given arguments are not valid,
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO or %-EILSEQ if the property type is not a string.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_string(struct device *dev, const char *propname,
 				const char **val)
@@ -292,9 +300,11 @@
 	else if (is_acpi_node(_fwnode_)) \
 		_ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \
 					   _proptype_, _val_, _nval_); \
-	else \
+	else if (is_pset(_fwnode_)) \
 		_ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
 					     _proptype_, _val_, _nval_); \
+	else \
+		_ret_ = -ENXIO; \
 	_ret_; \
 })
 
@@ -432,9 +442,10 @@
 	else if (is_acpi_node(fwnode))
 		return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
 					  DEV_PROP_STRING, val, nval);
-
-	return pset_prop_read_array(to_pset(fwnode), propname,
-				    DEV_PROP_STRING, val, nval);
+	else if (is_pset(fwnode))
+		return pset_prop_read_array(to_pset(fwnode), propname,
+					    DEV_PROP_STRING, val, nval);
+	return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
 
@@ -535,3 +546,79 @@
 	return coherent;
 }
 EXPORT_SYMBOL_GPL(device_dma_is_coherent);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev:	Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+	const char *pm;
+	int err, i;
+
+	err = device_property_read_string(dev, "phy-mode", &pm);
+	if (err < 0)
+		err = device_property_read_string(dev,
+						  "phy-connection-type", &pm);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+		if (!strcasecmp(pm, phy_modes(i)))
+			return i;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+static void *device_get_mac_addr(struct device *dev,
+				 const char *name, char *addr,
+				 int alen)
+{
+	int ret = device_property_read_u8_array(dev, name, addr, alen);
+
+	if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
+		return addr;
+	return NULL;
+}
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev:	Pointer to the device
+ * @addr:	Address of buffer to store the MAC in
+ * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use.  'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address.  If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware.  For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses.  Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+*/
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+	addr = device_get_mac_addr(dev, "mac-address", addr, alen);
+	if (addr)
+		return addr;
+
+	addr = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+	if (addr)
+		return addr;
+
+	return device_get_mac_addr(dev, "address", addr, alen);
+}
+EXPORT_SYMBOL(device_get_mac_address);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b2b2849..873ddf9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -136,7 +136,7 @@
 	/* if set, the HW registers are known to match map->reg_defaults */
 	bool no_sync_defaults;
 
-	struct reg_default *patch;
+	struct reg_sequence *patch;
 	int patch_regs;
 
 	/* if set, converts bulk rw to single rw */
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7111d04..0a849ee 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -34,7 +34,7 @@
 
 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 			       unsigned int mask, unsigned int val,
-			       bool *change);
+			       bool *change, bool force_write);
 
 static int _regmap_bus_reg_read(void *context, unsigned int reg,
 				unsigned int *val);
@@ -1178,7 +1178,7 @@
 		ret = _regmap_update_bits(map, range->selector_reg,
 					  range->selector_mask,
 					  win_page << range->selector_shift,
-					  &page_chg);
+					  &page_chg, false);
 
 		map->work_buf = orig_work_buf;
 
@@ -1624,6 +1624,18 @@
 }
 EXPORT_SYMBOL_GPL(regmap_fields_write);
 
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+			unsigned int val)
+{
+	if (id >= field->id_size)
+		return -EINVAL;
+
+	return regmap_write_bits(field->regmap,
+				  field->reg + (field->id_offset * id),
+				  field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_force_write);
+
 /**
  * regmap_fields_update_bits():	Perform a read/modify/write cycle
  *                              on the register field
@@ -1743,7 +1755,7 @@
  * relative. The page register has been written if that was neccessary.
  */
 static int _regmap_raw_multi_reg_write(struct regmap *map,
-				       const struct reg_default *regs,
+				       const struct reg_sequence *regs,
 				       size_t num_regs)
 {
 	int ret;
@@ -1800,12 +1812,12 @@
 }
 
 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
-					       struct reg_default *regs,
+					       struct reg_sequence *regs,
 					       size_t num_regs)
 {
 	int ret;
 	int i, n;
-	struct reg_default *base;
+	struct reg_sequence *base;
 	unsigned int this_page = 0;
 	/*
 	 * the set of registers are not neccessarily in order, but
@@ -1843,7 +1855,7 @@
 }
 
 static int _regmap_multi_reg_write(struct regmap *map,
-				   const struct reg_default *regs,
+				   const struct reg_sequence *regs,
 				   size_t num_regs)
 {
 	int i;
@@ -1895,8 +1907,8 @@
 		struct regmap_range_node *range;
 		range = _regmap_range_lookup(map, reg);
 		if (range) {
-			size_t len = sizeof(struct reg_default)*num_regs;
-			struct reg_default *base = kmemdup(regs, len,
+			size_t len = sizeof(struct reg_sequence)*num_regs;
+			struct reg_sequence *base = kmemdup(regs, len,
 							   GFP_KERNEL);
 			if (!base)
 				return -ENOMEM;
@@ -1929,7 +1941,7 @@
  * A value of zero will be returned on success, a negative errno will be
  * returned in error cases.
  */
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 			   int num_regs)
 {
 	int ret;
@@ -1962,7 +1974,7 @@
  * be returned in error cases.
  */
 int regmap_multi_reg_write_bypassed(struct regmap *map,
-				    const struct reg_default *regs,
+				    const struct reg_sequence *regs,
 				    int num_regs)
 {
 	int ret;
@@ -2327,7 +2339,7 @@
 
 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 			       unsigned int mask, unsigned int val,
-			       bool *change)
+			       bool *change, bool force_write)
 {
 	int ret;
 	unsigned int tmp, orig;
@@ -2339,7 +2351,7 @@
 	tmp = orig & ~mask;
 	tmp |= val & mask;
 
-	if (tmp != orig) {
+	if (force_write || (tmp != orig)) {
 		ret = _regmap_write(map, reg, tmp);
 		if (change)
 			*change = true;
@@ -2367,7 +2379,7 @@
 	int ret;
 
 	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, NULL);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
 	map->unlock(map->lock_arg);
 
 	return ret;
@@ -2375,6 +2387,29 @@
 EXPORT_SYMBOL_GPL(regmap_update_bits);
 
 /**
+ * regmap_write_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+		      unsigned int mask, unsigned int val)
+{
+	int ret;
+
+	map->lock(map->lock_arg);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_bits);
+
+/**
  * regmap_update_bits_async: Perform a read/modify/write cycle on the register
  *                           map asynchronously
  *
@@ -2398,7 +2433,7 @@
 
 	map->async = true;
 
-	ret = _regmap_update_bits(map, reg, mask, val, NULL);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
 
 	map->async = false;
 
@@ -2427,7 +2462,7 @@
 	int ret;
 
 	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, change);
+	ret = _regmap_update_bits(map, reg, mask, val, change, false);
 	map->unlock(map->lock_arg);
 	return ret;
 }
@@ -2460,7 +2495,7 @@
 
 	map->async = true;
 
-	ret = _regmap_update_bits(map, reg, mask, val, change);
+	ret = _regmap_update_bits(map, reg, mask, val, change, false);
 
 	map->async = false;
 
@@ -2552,10 +2587,10 @@
  * The caller must ensure that this function cannot be called
  * concurrently with either itself or regcache_sync().
  */
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 			  int num_regs)
 {
-	struct reg_default *p;
+	struct reg_sequence *p;
 	int ret;
 	bool bypass;
 
@@ -2564,7 +2599,7 @@
 		return 0;
 
 	p = krealloc(map->patch,
-		     sizeof(struct reg_default) * (map->patch_regs + num_regs),
+		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
 		     GFP_KERNEL);
 	if (p) {
 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index be5fffb..023d448 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -92,7 +92,7 @@
 config BCMA_DRIVER_GPIO
 	bool "BCMA GPIO driver"
 	depends on BCMA && GPIOLIB
-	select IRQ_DOMAIN if BCMA_HOST_SOC
+	select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
 	help
 	  Driver to provide access to the GPIO pins of the bcma bus.
 
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 15f2b2e..38f1567 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -34,6 +34,7 @@
 int bcma_bus_suspend(struct bcma_bus *bus);
 int bcma_bus_resume(struct bcma_bus *bus);
 #endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
 
 /* scan.c */
 void bcma_detect_chip(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f6018e..504899a7 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -8,10 +8,8 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
-#include <linux/gpio.h>
-#include <linux/irq.h>
+#include <linux/gpio/driver.h>
 #include <linux/interrupt.h>
-#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 
@@ -79,19 +77,11 @@
 }
 
 #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
-	struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
-
-	if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-		return irq_find_mapping(cc->irq_domain, gpio);
-	else
-		return -EINVAL;
-}
 
 static void bcma_gpio_irq_unmask(struct irq_data *d)
 {
-	struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
 	int gpio = irqd_to_hwirq(d);
 	u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
 
@@ -101,7 +91,8 @@
 
 static void bcma_gpio_irq_mask(struct irq_data *d)
 {
-	struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
 	int gpio = irqd_to_hwirq(d);
 
 	bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
@@ -116,6 +107,7 @@
 static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
 {
 	struct bcma_drv_cc *cc = dev_id;
+	struct gpio_chip *gc = &cc->gpio;
 	u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
 	u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
 	u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
@@ -125,81 +117,58 @@
 	if (!irqs)
 		return IRQ_NONE;
 
-	for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
-		generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+	for_each_set_bit(gpio, &irqs, gc->ngpio)
+		generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
 	bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
 
 	return IRQ_HANDLED;
 }
 
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
 	struct gpio_chip *chip = &cc->gpio;
-	int gpio, hwirq, err;
+	int hwirq, err;
 
 	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
 		return 0;
 
-	cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
-					       &irq_domain_simple_ops, cc);
-	if (!cc->irq_domain) {
-		err = -ENODEV;
-		goto err_irq_domain;
-	}
-	for (gpio = 0; gpio < chip->ngpio; gpio++) {
-		int irq = irq_create_mapping(cc->irq_domain, gpio);
-
-		irq_set_chip_data(irq, cc);
-		irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
-					 handle_simple_irq);
-	}
-
 	hwirq = bcma_core_irq(cc->core, 0);
 	err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
 			  cc);
 	if (err)
-		goto err_req_irq;
+		return err;
 
 	bcma_chipco_gpio_intmask(cc, ~0, 0);
 	bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
 
-	return 0;
-
-err_req_irq:
-	for (gpio = 0; gpio < chip->ngpio; gpio++) {
-		int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-		irq_dispose_mapping(irq);
+	err =  gpiochip_irqchip_add(chip,
+				    &bcma_gpio_irq_chip,
+				    0,
+				    handle_simple_irq,
+				    IRQ_TYPE_NONE);
+	if (err) {
+		free_irq(hwirq, cc);
+		return err;
 	}
-	irq_domain_remove(cc->irq_domain);
-err_irq_domain:
-	return err;
+
+	return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
-	struct gpio_chip *chip = &cc->gpio;
-	int gpio;
-
 	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
 		return;
 
 	bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
 	free_irq(bcma_core_irq(cc->core, 0), cc);
-	for (gpio = 0; gpio < chip->ngpio; gpio++) {
-		int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-		irq_dispose_mapping(irq);
-	}
-	irq_domain_remove(cc->irq_domain);
 }
 #else
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
 	return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
 }
 #endif
@@ -218,9 +187,8 @@
 	chip->set		= bcma_gpio_set_value;
 	chip->direction_input	= bcma_gpio_direction_input;
 	chip->direction_output	= bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-	chip->to_irq		= bcma_gpio_to_irq;
-#endif
+	chip->owner		= THIS_MODULE;
+	chip->dev		= bcma_bus_get_host_dev(bus);
 #if IS_BUILTIN(CONFIG_OF)
 	if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
 		chip->of_node	= cc->core->dev.of_node;
@@ -248,13 +216,13 @@
 	else
 		chip->base		= -1;
 
-	err = bcma_gpio_irq_domain_init(cc);
+	err = gpiochip_add(chip);
 	if (err)
 		return err;
 
-	err = gpiochip_add(chip);
+	err = bcma_gpio_irq_init(cc);
 	if (err) {
-		bcma_gpio_irq_domain_exit(cc);
+		gpiochip_remove(chip);
 		return err;
 	}
 
@@ -263,7 +231,7 @@
 
 int bcma_gpio_unregister(struct bcma_drv_cc *cc)
 {
-	bcma_gpio_irq_domain_exit(cc);
+	bcma_gpio_irq_exit(cc);
 	gpiochip_remove(&cc->gpio);
 	return 0;
 }
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9635f10..24882c1 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,11 +7,14 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
 #include <linux/platform_device.h>
+#include <linux/pci.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 
 MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 MODULE_LICENSE("GPL");
@@ -268,6 +271,28 @@
 	}
 }
 
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		if (bus->host_pci)
+			return &bus->host_pci->dev;
+		else
+			return NULL;
+	case BCMA_HOSTTYPE_SOC:
+		if (bus->host_pdev)
+			return &bus->host_pdev->dev;
+		else
+			return NULL;
+	case BCMA_HOSTTYPE_SDIO:
+		if (bus->host_sdio)
+			return &bus->host_sdio->dev;
+		else
+			return NULL;
+	}
+	return NULL;
+}
+
 void bcma_init_bus(struct bcma_bus *bus)
 {
 	mutex_lock(&bcma_buses_mutex);
@@ -387,6 +412,7 @@
 {
 	int err;
 	struct bcma_device *core;
+	struct device *dev;
 
 	/* Scan for devices (cores) */
 	err = bcma_bus_scan(bus);
@@ -409,6 +435,16 @@
 		bcma_core_pci_early_init(&bus->drv_pci[0]);
 	}
 
+	dev = bcma_bus_get_host_dev(bus);
+	/* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
+	 * of_default_bus_match_table is exported or in some other way
+	 * accessible. This is just a temporary workaround.
+	 */
+	if (IS_BUILTIN(CONFIG_BCMA) && dev) {
+		of_platform_populate(dev->of_node, of_default_bus_match_table,
+				     NULL, dev);
+	}
+
 	/* Cores providing flash access go before SPROM init */
 	list_for_each_entry(core, &bus->cores, list) {
 		if (bcma_is_core_needed_early(core->id.id))
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index f169faf..293495a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -33,6 +33,7 @@
 #include <linux/net.h>
 #include <linux/kthread.h>
 #include <linux/types.h>
+#include <linux/debugfs.h>
 
 #include <asm/uaccess.h>
 #include <asm/types.h>
@@ -40,8 +41,7 @@
 #include <linux/nbd.h>
 
 struct nbd_device {
-	int flags;
-	int harderror;		/* Code of hard error			*/
+	u32 flags;
 	struct socket * sock;	/* If == NULL, device is not ready, yet	*/
 	int magic;
 
@@ -56,11 +56,24 @@
 	struct gendisk *disk;
 	int blksize;
 	loff_t bytesize;
-	pid_t pid; /* pid of nbd-client, if attached */
 	int xmit_timeout;
-	int disconnect; /* a disconnect has been requested by user */
+	bool disconnect; /* a disconnect has been requested by user */
+
+	struct timer_list timeout_timer;
+	struct task_struct *task_recv;
+	struct task_struct *task_send;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	struct dentry *dbg_dir;
+#endif
 };
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static struct dentry *nbd_dbg_dir;
+#endif
+
+#define nbd_name(nbd) ((nbd)->disk->disk_name)
+
 #define NBD_MAGIC 0x68797548
 
 static unsigned int nbds_max = 16;
@@ -113,26 +126,36 @@
 /*
  * Forcibly shutdown the socket causing all listeners to error
  */
-static void sock_shutdown(struct nbd_device *nbd, int lock)
+static void sock_shutdown(struct nbd_device *nbd)
 {
-	if (lock)
-		mutex_lock(&nbd->tx_lock);
-	if (nbd->sock) {
-		dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
-		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
-		nbd->sock = NULL;
-	}
-	if (lock)
-		mutex_unlock(&nbd->tx_lock);
+	if (!nbd->sock)
+		return;
+
+	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
+	kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+	nbd->sock = NULL;
+	del_timer_sync(&nbd->timeout_timer);
 }
 
 static void nbd_xmit_timeout(unsigned long arg)
 {
-	struct task_struct *task = (struct task_struct *)arg;
+	struct nbd_device *nbd = (struct nbd_device *)arg;
+	struct task_struct *task;
 
-	printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
-		task->comm, task->pid);
-	force_sig(SIGKILL, task);
+	if (list_empty(&nbd->queue_head))
+		return;
+
+	nbd->disconnect = true;
+
+	task = READ_ONCE(nbd->task_recv);
+	if (task)
+		force_sig(SIGKILL, task);
+
+	task = READ_ONCE(nbd->task_send);
+	if (task)
+		force_sig(SIGKILL, nbd->task_send);
+
+	dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
 }
 
 /*
@@ -171,33 +194,12 @@
 		msg.msg_controllen = 0;
 		msg.msg_flags = msg_flags | MSG_NOSIGNAL;
 
-		if (send) {
-			struct timer_list ti;
-
-			if (nbd->xmit_timeout) {
-				init_timer(&ti);
-				ti.function = nbd_xmit_timeout;
-				ti.data = (unsigned long)current;
-				ti.expires = jiffies + nbd->xmit_timeout;
-				add_timer(&ti);
-			}
+		if (send)
 			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
-			if (nbd->xmit_timeout)
-				del_timer_sync(&ti);
-		} else
+		else
 			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
 						msg.msg_flags);
 
-		if (signal_pending(current)) {
-			siginfo_t info;
-			printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
-				task_pid_nr(current), current->comm,
-				dequeue_signal_lock(current, &current->blocked, &info));
-			result = -EINTR;
-			sock_shutdown(nbd, !send);
-			break;
-		}
-
 		if (result <= 0) {
 			if (result == 0)
 				result = -EPIPE; /* short read */
@@ -210,6 +212,9 @@
 	sigprocmask(SIG_SETMASK, &oldset, NULL);
 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
 
+	if (!send && nbd->xmit_timeout)
+		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
+
 	return result;
 }
 
@@ -333,26 +338,24 @@
 	if (result <= 0) {
 		dev_err(disk_to_dev(nbd->disk),
 			"Receive control failed (result %d)\n", result);
-		goto harderror;
+		return ERR_PTR(result);
 	}
 
 	if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
 		dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
 				(unsigned long)ntohl(reply.magic));
-		result = -EPROTO;
-		goto harderror;
+		return ERR_PTR(-EPROTO);
 	}
 
 	req = nbd_find_request(nbd, *(struct request **)reply.handle);
 	if (IS_ERR(req)) {
 		result = PTR_ERR(req);
 		if (result != -ENOENT)
-			goto harderror;
+			return ERR_PTR(result);
 
 		dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
 			reply.handle);
-		result = -EBADR;
-		goto harderror;
+		return ERR_PTR(-EBADR);
 	}
 
 	if (ntohl(reply.error)) {
@@ -380,18 +383,15 @@
 		}
 	}
 	return req;
-harderror:
-	nbd->harderror = result;
-	return NULL;
 }
 
 static ssize_t pid_show(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {
 	struct gendisk *disk = dev_to_disk(dev);
+	struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
 
-	return sprintf(buf, "%ld\n",
-		(long) ((struct nbd_device *)disk->private_data)->pid);
+	return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
 }
 
 static struct device_attribute pid_attr = {
@@ -399,7 +399,7 @@
 	.show = pid_show,
 };
 
-static int nbd_do_it(struct nbd_device *nbd)
+static int nbd_thread_recv(struct nbd_device *nbd)
 {
 	struct request *req;
 	int ret;
@@ -407,20 +407,43 @@
 	BUG_ON(nbd->magic != NBD_MAGIC);
 
 	sk_set_memalloc(nbd->sock->sk);
-	nbd->pid = task_pid_nr(current);
+
+	nbd->task_recv = current;
+
 	ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
 	if (ret) {
 		dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
-		nbd->pid = 0;
+		nbd->task_recv = NULL;
 		return ret;
 	}
 
-	while ((req = nbd_read_stat(nbd)) != NULL)
+	while (1) {
+		req = nbd_read_stat(nbd);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			break;
+		}
+
 		nbd_end_request(nbd, req);
+	}
 
 	device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
-	nbd->pid = 0;
-	return 0;
+
+	nbd->task_recv = NULL;
+
+	if (signal_pending(current)) {
+		siginfo_t info;
+
+		ret = dequeue_signal_lock(current, &current->blocked, &info);
+		dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
+			 task_pid_nr(current), current->comm, ret);
+		mutex_lock(&nbd->tx_lock);
+		sock_shutdown(nbd);
+		mutex_unlock(&nbd->tx_lock);
+		ret = -ETIMEDOUT;
+	}
+
+	return ret;
 }
 
 static void nbd_clear_que(struct nbd_device *nbd)
@@ -455,6 +478,7 @@
 		req->errors++;
 		nbd_end_request(nbd, req);
 	}
+	dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
 }
 
 
@@ -482,6 +506,9 @@
 
 	nbd->active_req = req;
 
+	if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
+		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
+
 	if (nbd_send_req(nbd, req) != 0) {
 		dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
 		req->errors++;
@@ -503,11 +530,13 @@
 	nbd_end_request(nbd, req);
 }
 
-static int nbd_thread(void *data)
+static int nbd_thread_send(void *data)
 {
 	struct nbd_device *nbd = data;
 	struct request *req;
 
+	nbd->task_send = current;
+
 	set_user_nice(current, MIN_NICE);
 	while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
 		/* wait for something to do */
@@ -515,6 +544,20 @@
 					 kthread_should_stop() ||
 					 !list_empty(&nbd->waiting_queue));
 
+		if (signal_pending(current)) {
+			siginfo_t info;
+			int ret;
+
+			ret = dequeue_signal_lock(current, &current->blocked,
+						  &info);
+			dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
+				 task_pid_nr(current), current->comm, ret);
+			mutex_lock(&nbd->tx_lock);
+			sock_shutdown(nbd);
+			mutex_unlock(&nbd->tx_lock);
+			break;
+		}
+
 		/* extract request */
 		if (list_empty(&nbd->waiting_queue))
 			continue;
@@ -528,6 +571,9 @@
 		/* handle request */
 		nbd_handle_req(nbd, req);
 	}
+
+	nbd->task_send = NULL;
+
 	return 0;
 }
 
@@ -538,7 +584,7 @@
  *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
  */
 
-static void do_nbd_request(struct request_queue *q)
+static void nbd_request_handler(struct request_queue *q)
 		__releases(q->queue_lock) __acquires(q->queue_lock)
 {
 	struct request *req;
@@ -574,6 +620,9 @@
 	}
 }
 
+static int nbd_dev_dbg_init(struct nbd_device *nbd);
+static void nbd_dev_dbg_close(struct nbd_device *nbd);
+
 /* Must be called with tx_lock held */
 
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
@@ -597,7 +646,7 @@
 		if (!nbd->sock)
 			return -EINVAL;
 
-		nbd->disconnect = 1;
+		nbd->disconnect = true;
 
 		nbd_send_req(nbd, &sreq);
 		return 0;
@@ -625,7 +674,7 @@
 			nbd->sock = sock;
 			if (max_part > 0)
 				bdev->bd_invalidated = 1;
-			nbd->disconnect = 0; /* we're connected now */
+			nbd->disconnect = false; /* we're connected now */
 			return 0;
 		}
 		return -EINVAL;
@@ -648,6 +697,12 @@
 
 	case NBD_SET_TIMEOUT:
 		nbd->xmit_timeout = arg * HZ;
+		if (arg)
+			mod_timer(&nbd->timeout_timer,
+				  jiffies + nbd->xmit_timeout);
+		else
+			del_timer_sync(&nbd->timeout_timer);
+
 		return 0;
 
 	case NBD_SET_FLAGS:
@@ -666,7 +721,7 @@
 		struct socket *sock;
 		int error;
 
-		if (nbd->pid)
+		if (nbd->task_recv)
 			return -EBUSY;
 		if (!nbd->sock)
 			return -EINVAL;
@@ -683,24 +738,24 @@
 		else
 			blk_queue_flush(nbd->disk->queue, 0);
 
-		thread = kthread_run(nbd_thread, nbd, "%s",
-				     nbd->disk->disk_name);
+		thread = kthread_run(nbd_thread_send, nbd, "%s",
+				     nbd_name(nbd));
 		if (IS_ERR(thread)) {
 			mutex_lock(&nbd->tx_lock);
 			return PTR_ERR(thread);
 		}
 
-		error = nbd_do_it(nbd);
+		nbd_dev_dbg_init(nbd);
+		error = nbd_thread_recv(nbd);
+		nbd_dev_dbg_close(nbd);
 		kthread_stop(thread);
 
 		mutex_lock(&nbd->tx_lock);
-		if (error)
-			return error;
-		sock_shutdown(nbd, 0);
+
+		sock_shutdown(nbd);
 		sock = nbd->sock;
 		nbd->sock = NULL;
 		nbd_clear_que(nbd);
-		dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
 		kill_bdev(bdev);
 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 		set_device_ro(bdev, false);
@@ -714,7 +769,7 @@
 			blkdev_reread_part(bdev);
 		if (nbd->disconnect) /* user requested, ignore socket errors */
 			return 0;
-		return nbd->harderror;
+		return error;
 	}
 
 	case NBD_CLEAR_QUE:
@@ -758,6 +813,161 @@
 	.ioctl =	nbd_ioctl,
 };
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
+{
+	struct nbd_device *nbd = s->private;
+
+	if (nbd->task_recv)
+		seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
+	if (nbd->task_send)
+		seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
+
+	return 0;
+}
+
+static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nbd_dbg_tasks_show, inode->i_private);
+}
+
+static const struct file_operations nbd_dbg_tasks_ops = {
+	.open = nbd_dbg_tasks_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
+{
+	struct nbd_device *nbd = s->private;
+	u32 flags = nbd->flags;
+
+	seq_printf(s, "Hex: 0x%08x\n\n", flags);
+
+	seq_puts(s, "Known flags:\n");
+
+	if (flags & NBD_FLAG_HAS_FLAGS)
+		seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
+	if (flags & NBD_FLAG_READ_ONLY)
+		seq_puts(s, "NBD_FLAG_READ_ONLY\n");
+	if (flags & NBD_FLAG_SEND_FLUSH)
+		seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+	if (flags & NBD_FLAG_SEND_TRIM)
+		seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
+
+	return 0;
+}
+
+static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nbd_dbg_flags_show, inode->i_private);
+}
+
+static const struct file_operations nbd_dbg_flags_ops = {
+	.open = nbd_dbg_flags_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nbd_dev_dbg_init(struct nbd_device *nbd)
+{
+	struct dentry *dir;
+	struct dentry *f;
+
+	dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
+	if (IS_ERR_OR_NULL(dir)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
+			nbd_name(nbd), PTR_ERR(dir));
+		return PTR_ERR(dir);
+	}
+	nbd->dbg_dir = dir;
+
+	f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	return 0;
+}
+
+static void nbd_dev_dbg_close(struct nbd_device *nbd)
+{
+	debugfs_remove_recursive(nbd->dbg_dir);
+}
+
+static int nbd_dbg_init(void)
+{
+	struct dentry *dbg_dir;
+
+	dbg_dir = debugfs_create_dir("nbd", NULL);
+	if (IS_ERR(dbg_dir))
+		return PTR_ERR(dbg_dir);
+
+	nbd_dbg_dir = dbg_dir;
+
+	return 0;
+}
+
+static void nbd_dbg_close(void)
+{
+	debugfs_remove_recursive(nbd_dbg_dir);
+}
+
+#else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
+
+static int nbd_dev_dbg_init(struct nbd_device *nbd)
+{
+	return 0;
+}
+
+static void nbd_dev_dbg_close(struct nbd_device *nbd)
+{
+}
+
+static int nbd_dbg_init(void)
+{
+	return 0;
+}
+
+static void nbd_dbg_close(void)
+{
+}
+
+#endif
+
 /*
  * And here should be modules and kernel interface 
  *  (Just smiley confuses emacs :-)
@@ -811,7 +1021,7 @@
 		 * every gendisk to have its very own request_queue struct.
 		 * These structs are big so we dynamically allocate them.
 		 */
-		disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
+		disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
 		if (!disk->queue) {
 			put_disk(disk);
 			goto out;
@@ -835,6 +1045,8 @@
 
 	printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
 
+	nbd_dbg_init();
+
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = nbd_dev[i].disk;
 		nbd_dev[i].magic = NBD_MAGIC;
@@ -842,6 +1054,9 @@
 		spin_lock_init(&nbd_dev[i].queue_lock);
 		INIT_LIST_HEAD(&nbd_dev[i].queue_head);
 		mutex_init(&nbd_dev[i].tx_lock);
+		init_timer(&nbd_dev[i].timeout_timer);
+		nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
+		nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
 		init_waitqueue_head(&nbd_dev[i].active_wq);
 		init_waitqueue_head(&nbd_dev[i].waiting_wq);
 		nbd_dev[i].blksize = 1024;
@@ -868,6 +1083,9 @@
 static void __exit nbd_cleanup(void)
 {
 	int i;
+
+	nbd_dbg_close();
+
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = nbd_dev[i].disk;
 		nbd_dev[i].magic = 0;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 2f694d7..b97fc3f 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -72,6 +72,10 @@
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
+static bool use_cmb_sqes = true;
+module_param(use_cmb_sqes, bool, 0644);
+MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
+
 static DEFINE_SPINLOCK(dev_list_lock);
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
@@ -103,6 +107,7 @@
 	char irqname[24];	/* nvme4294967295-65535\0 */
 	spinlock_t q_lock;
 	struct nvme_command *sq_cmds;
+	struct nvme_command __iomem *sq_cmds_io;
 	volatile struct nvme_completion *cqes;
 	struct blk_mq_tags **tags;
 	dma_addr_t sq_dma_addr;
@@ -379,27 +384,28 @@
  *
  * Safe to use from interrupt context
  */
-static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
+						struct nvme_command *cmd)
 {
 	u16 tail = nvmeq->sq_tail;
 
-	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+	if (nvmeq->sq_cmds_io)
+		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
+	else
+		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+
 	if (++tail == nvmeq->q_depth)
 		tail = 0;
 	writel(tail, nvmeq->q_db);
 	nvmeq->sq_tail = tail;
-
-	return 0;
 }
 
-static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
 {
 	unsigned long flags;
-	int ret;
 	spin_lock_irqsave(&nvmeq->q_lock, flags);
-	ret = __nvme_submit_cmd(nvmeq, cmd);
+	__nvme_submit_cmd(nvmeq, cmd);
 	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
-	return ret;
 }
 
 static __le64 **iod_list(struct nvme_iod *iod)
@@ -730,18 +736,16 @@
 static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
 		struct nvme_iod *iod)
 {
-	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+	struct nvme_command cmnd;
 
-	memcpy(cmnd, req->cmd, sizeof(struct nvme_command));
-	cmnd->rw.command_id = req->tag;
+	memcpy(&cmnd, req->cmd, sizeof(cmnd));
+	cmnd.rw.command_id = req->tag;
 	if (req->nr_phys_segments) {
-		cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
-		cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+		cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+		cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
 	}
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 }
 
 /*
@@ -754,45 +758,41 @@
 {
 	struct nvme_dsm_range *range =
 				(struct nvme_dsm_range *)iod_list(iod)[0];
-	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+	struct nvme_command cmnd;
 
 	range->cattr = cpu_to_le32(0);
 	range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
 	range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
 
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->dsm.opcode = nvme_cmd_dsm;
-	cmnd->dsm.command_id = req->tag;
-	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
-	cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
-	cmnd->dsm.nr = 0;
-	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+	memset(&cmnd, 0, sizeof(cmnd));
+	cmnd.dsm.opcode = nvme_cmd_dsm;
+	cmnd.dsm.command_id = req->tag;
+	cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
+	cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
+	cmnd.dsm.nr = 0;
+	cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 }
 
 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
 								int cmdid)
 {
-	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+	struct nvme_command cmnd;
 
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->common.opcode = nvme_cmd_flush;
-	cmnd->common.command_id = cmdid;
-	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+	memset(&cmnd, 0, sizeof(cmnd));
+	cmnd.common.opcode = nvme_cmd_flush;
+	cmnd.common.command_id = cmdid;
+	cmnd.common.nsid = cpu_to_le32(ns->ns_id);
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 }
 
 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
 							struct nvme_ns *ns)
 {
 	struct request *req = iod_get_private(iod);
-	struct nvme_command *cmnd;
+	struct nvme_command cmnd;
 	u16 control = 0;
 	u32 dsmgmt = 0;
 
@@ -804,19 +804,16 @@
 	if (req->cmd_flags & REQ_RAHEAD)
 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
 
-	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-	memset(cmnd, 0, sizeof(*cmnd));
+	memset(&cmnd, 0, sizeof(cmnd));
+	cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+	cmnd.rw.command_id = req->tag;
+	cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
+	cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+	cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
+	cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+	cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
 
-	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
-	cmnd->rw.command_id = req->tag;
-	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
-	cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
-	cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
-	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
-	if (blk_integrity_rq(req)) {
-		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+	if (ns->ms) {
 		switch (ns->pi_type) {
 		case NVME_NS_DPS_PI_TYPE3:
 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -825,19 +822,21 @@
 		case NVME_NS_DPS_PI_TYPE2:
 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
 					NVME_RW_PRINFO_PRCHK_REF;
-			cmnd->rw.reftag = cpu_to_le32(
+			cmnd.rw.reftag = cpu_to_le32(
 					nvme_block_nr(ns, blk_rq_pos(req)));
 			break;
 		}
-	} else if (ns->ms)
-		control |= NVME_RW_PRINFO_PRACT;
+		if (blk_integrity_rq(req))
+			cmnd.rw.metadata =
+				cpu_to_le64(sg_dma_address(iod->meta_sg));
+		else
+			control |= NVME_RW_PRINFO_PRACT;
+	}
 
-	cmnd->rw.control = cpu_to_le16(control);
-	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+	cmnd.rw.control = cpu_to_le16(control);
+	cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 
 	return 0;
 }
@@ -1080,7 +1079,8 @@
 	c.common.command_id = req->tag;
 
 	blk_mq_free_request(req);
-	return __nvme_submit_cmd(nvmeq, &c);
+	__nvme_submit_cmd(nvmeq, &c);
+	return 0;
 }
 
 static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
@@ -1103,7 +1103,8 @@
 
 	cmd->common.command_id = req->tag;
 
-	return nvme_submit_cmd(nvmeq, cmd);
+	nvme_submit_cmd(nvmeq, cmd);
+	return 0;
 }
 
 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1315,12 +1316,7 @@
 
 	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
 							nvmeq->qid);
-	if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
-		dev_warn(nvmeq->q_dmadev,
-				"Could not abort I/O %d QID %d",
-				req->tag, nvmeq->qid);
-		blk_mq_free_request(abort_req);
-	}
+	nvme_submit_cmd(dev->queues[0], &cmd);
 }
 
 static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1374,7 +1370,8 @@
 {
 	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
 				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
-	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+	if (nvmeq->sq_cmds)
+		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
 					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
 	kfree(nvmeq);
 }
@@ -1447,6 +1444,47 @@
 	spin_unlock_irq(&nvmeq->q_lock);
 }
 
+static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+				int entry_size)
+{
+	int q_depth = dev->q_depth;
+	unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
+
+	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
+		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
+		mem_per_q = round_down(mem_per_q, dev->page_size);
+		q_depth = div_u64(mem_per_q, entry_size);
+
+		/*
+		 * Ensure the reduced q_depth is above some threshold where it
+		 * would be better to map queues in system memory with the
+		 * original depth
+		 */
+		if (q_depth < 64)
+			return -ENOMEM;
+	}
+
+	return q_depth;
+}
+
+static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+				int qid, int depth)
+{
+	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+		unsigned offset = (qid - 1) *
+					roundup(SQ_SIZE(depth), dev->page_size);
+		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+		nvmeq->sq_cmds_io = dev->cmb + offset;
+	} else {
+		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+					&nvmeq->sq_dma_addr, GFP_KERNEL);
+		if (!nvmeq->sq_cmds)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 							int depth)
 {
@@ -1459,9 +1497,7 @@
 	if (!nvmeq->cqes)
 		goto free_nvmeq;
 
-	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
-					&nvmeq->sq_dma_addr, GFP_KERNEL);
-	if (!nvmeq->sq_cmds)
+	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
 		goto free_cqdma;
 
 	nvmeq->q_dmadev = dev->dev;
@@ -1696,6 +1732,12 @@
 		page_shift = dev_page_max;
 	}
 
+	dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
+						NVME_CAP_NSSRC(cap) : 0;
+
+	if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
+		writel(NVME_CSTS_NSSRO, &dev->bar->csts);
+
 	result = nvme_disable_ctrl(dev, cap);
 	if (result < 0)
 		return result;
@@ -1856,6 +1898,15 @@
 	return status;
 }
 
+static int nvme_subsys_reset(struct nvme_dev *dev)
+{
+	if (!dev->subsystem)
+		return -ENOTTY;
+
+	writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
+	return 0;
+}
+
 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 							unsigned long arg)
 {
@@ -1989,7 +2040,7 @@
 								!ns->ext)
 		nvme_init_integrity(ns);
 
-	if (ns->ms && !blk_get_integrity(disk))
+	if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
 		set_capacity(disk, 0);
 	else
 		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -2020,7 +2071,10 @@
 		spin_lock(&dev_list_lock);
 		list_for_each_entry_safe(dev, next, &dev_list, node) {
 			int i;
-			if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
+			u32 csts = readl(&dev->bar->csts);
+
+			if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
+							csts & NVME_CSTS_CFS) {
 				if (work_busy(&dev->reset_work))
 					continue;
 				list_del_init(&dev->node);
@@ -2080,8 +2134,11 @@
 	list_add_tail(&ns->list, &dev->namespaces);
 
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-	if (dev->max_hw_sectors)
+	if (dev->max_hw_sectors) {
 		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+		blk_queue_max_segments(ns->queue,
+			((dev->max_hw_sectors << 9) / dev->page_size) + 1);
+	}
 	if (dev->stripe_size)
 		blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
 	if (dev->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2159,6 +2216,58 @@
 	return min(result & 0xffff, result >> 16) + 1;
 }
 
+static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+{
+	u64 szu, size, offset;
+	u32 cmbloc;
+	resource_size_t bar_size;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+	void __iomem *cmb;
+	dma_addr_t dma_addr;
+
+	if (!use_cmb_sqes)
+		return NULL;
+
+	dev->cmbsz = readl(&dev->bar->cmbsz);
+	if (!(NVME_CMB_SZ(dev->cmbsz)))
+		return NULL;
+
+	cmbloc = readl(&dev->bar->cmbloc);
+
+	szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
+	size = szu * NVME_CMB_SZ(dev->cmbsz);
+	offset = szu * NVME_CMB_OFST(cmbloc);
+	bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+
+	if (offset > bar_size)
+		return NULL;
+
+	/*
+	 * Controllers may support a CMB size larger than their BAR,
+	 * for example, due to being behind a bridge. Reduce the CMB to
+	 * the reported size of the BAR
+	 */
+	if (size > bar_size - offset)
+		size = bar_size - offset;
+
+	dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+	cmb = ioremap_wc(dma_addr, size);
+	if (!cmb)
+		return NULL;
+
+	dev->cmb_dma_addr = dma_addr;
+	dev->cmb_size = size;
+	return cmb;
+}
+
+static inline void nvme_release_cmb(struct nvme_dev *dev)
+{
+	if (dev->cmb) {
+		iounmap(dev->cmb);
+		dev->cmb = NULL;
+	}
+}
+
 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
 {
 	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
@@ -2177,6 +2286,15 @@
 	if (result < nr_io_queues)
 		nr_io_queues = result;
 
+	if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+		result = nvme_cmb_qdepth(dev, nr_io_queues,
+				sizeof(struct nvme_command));
+		if (result > 0)
+			dev->q_depth = result;
+		else
+			nvme_release_cmb(dev);
+	}
+
 	size = db_bar_size(dev, nr_io_queues);
 	if (size > 8192) {
 		iounmap(dev->bar);
@@ -2344,7 +2462,6 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int res;
-	unsigned nn;
 	struct nvme_id_ctrl *ctrl;
 	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
@@ -2354,7 +2471,6 @@
 		return -EIO;
 	}
 
-	nn = le32_to_cpup(&ctrl->nn);
 	dev->oncs = le16_to_cpup(&ctrl->oncs);
 	dev->abort_limit = ctrl->acl + 1;
 	dev->vwc = ctrl->vwc;
@@ -2440,6 +2556,8 @@
 	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
 	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
 	dev->dbs = ((void __iomem *)dev->bar) + 4096;
+	if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
+		dev->cmb = nvme_map_cmb(dev);
 
 	return 0;
 
@@ -2820,6 +2938,8 @@
 	case NVME_IOCTL_RESET:
 		dev_warn(dev->dev, "resetting controller\n");
 		return nvme_reset(dev);
+	case NVME_IOCTL_SUBSYS_RESET:
+		return nvme_subsys_reset(dev);
 	default:
 		return -ENOTTY;
 	}
@@ -3145,6 +3265,7 @@
 	nvme_dev_remove_admin(dev);
 	device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
 	nvme_free_queues(dev, 0);
+	nvme_release_cmb(dev);
 	nvme_release_prp_pools(dev);
 	kref_put(&dev->kref, nvme_free_dev);
 }
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2e77707..0bd88c9 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -13,6 +13,10 @@
 	tristate
 	select FW_LOADER
 
+config BT_QCA
+	tristate
+	select FW_LOADER
+
 config BT_HCIBTUSB
 	tristate "HCI USB driver"
 	depends on USB
@@ -132,6 +136,7 @@
 config BT_HCIUART_INTEL
 	bool "Intel protocol support"
 	depends on BT_HCIUART
+	select BT_HCIUART_H4
 	select BT_INTEL
 	help
 	  The Intel protocol support enables Bluetooth HCI over serial
@@ -150,6 +155,19 @@
 
 	  Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIUART_QCA
+	bool "Qualcomm Atheros protocol support"
+	depends on BT_HCIUART
+	select BT_HCIUART_H4
+	select BT_QCA
+	help
+	  The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
+	  over serial port interface(H4) between controller and host.
+	  This protocol is required for UART clock control for QCA Bluetooth
+	  devices.
+
+	  Say Y here to compile support for QCA protocol.
+
 config BT_HCIBCM203X
 	tristate "HCI BCM203x USB driver"
 	depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f40e194..07c9cf3 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_BT_WILINK)		+= btwilink.o
 obj-$(CONFIG_BT_BCM)		+= btbcm.o
 obj-$(CONFIG_BT_RTL)		+= btrtl.o
+obj-$(CONFIG_BT_QCA)		+= btqca.o
 
 btmrvl-y			:= btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)	+= btmrvl_debugfs.o
@@ -34,6 +35,7 @@
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)	+= hci_h5.o
 hci_uart-$(CONFIG_BT_HCIUART_INTEL)	+= hci_intel.o
 hci_uart-$(CONFIG_BT_HCIUART_BCM)	+= hci_bcm.o
+hci_uart-$(CONFIG_BT_HCIUART_QCA)	+= hci_qca.o
 hci_uart-objs				:= $(hci_uart-y)
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index fcfb72e..a5c4d05 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -492,7 +492,7 @@
 	case HCI_SCODATA_PKT:
 		hdev->stat.sco_tx++;
 		break;
-	};
+	}
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 7aab654..a00bb82 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -427,7 +427,7 @@
 	case HCI_SCODATA_PKT:
 		hdev->stat.sco_tx++;
 		break;
-	};
+	}
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 9ceb8ac..02ed816 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,6 +34,7 @@
 
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
 #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
 
 int btbcm_check_bdaddr(struct hci_dev *hdev)
 {
@@ -66,9 +67,13 @@
 	 *
 	 * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
 	 * with waiting for configuration state.
+	 *
+	 * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
+	 * with waiting for configuration state.
 	 */
 	if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
-	    !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
+	    !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+	    !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
 		BT_INFO("%s: BCM: Using default device address (%pMR)",
 			hdev->name, &bda->bdaddr);
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -241,6 +246,7 @@
 	u16 subver;
 	const char *name;
 } bcm_uart_subver_table[] = {
+	{ 0x4103, "BCM4330B1"	},	/* 002.001.003 */
 	{ 0x410e, "BCM43341B0"	},	/* 002.001.014 */
 	{ 0x4406, "BCM4324B3"	},	/* 002.004.006 */
 	{ 0x610c, "BCM4354"	},	/* 003.001.012 */
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 828f2f8..048423f 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -89,7 +89,89 @@
 }
 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
 
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+	struct sk_buff *skb;
+	u8 type = 0x00;
+
+	BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reset after hardware error failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return;
+	}
+	kfree_skb(skb);
+
+	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return;
+	}
+
+	if (skb->len != 13) {
+		BT_ERR("%s: Exception info size mismatch", hdev->name);
+		kfree_skb(skb);
+		return;
+	}
+
+	BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+	kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+	const char *variant;
+
+	switch (ver->fw_variant) {
+	case 0x06:
+		variant = "Bootloader";
+		break;
+	case 0x23:
+		variant = "Firmware";
+		break;
+	default:
+		return;
+	}
+
+	BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+		variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+		ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+EXPORT_SYMBOL_GPL(btintel_version_info);
+
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+			const void *param)
+{
+	while (plen > 0) {
+		struct sk_buff *skb;
+		u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+		cmd_param[0] = fragment_type;
+		memcpy(cmd_param + 1, param, fragment_len);
+
+		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+				     cmd_param, HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb))
+			return PTR_ERR(skb);
+
+		kfree_skb(skb);
+
+		plen -= fragment_len;
+		param += fragment_len;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_secure_send);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("intel/ibt-11-5.sfi");
+MODULE_FIRMWARE("intel/ibt-11-5.ddc");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 4bda6ab..b278d14 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -73,6 +73,11 @@
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+			const void *param);
 
 #else
 
@@ -86,4 +91,18 @@
 	return -EOPNOTSUPP;
 }
 
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
+
+static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+}
+
+static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+				      u32 plen, const void *param)
+{
+	return -EOPNOTSUPP;
+}
+
 #endif
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 086f0ec..27a9aac 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -95,10 +95,10 @@
 	struct btmrvl_device btmrvl_dev;
 	struct btmrvl_adapter *adapter;
 	struct btmrvl_thread main_thread;
-	int (*hw_host_to_card) (struct btmrvl_private *priv,
+	int (*hw_host_to_card)(struct btmrvl_private *priv,
 				u8 *payload, u16 nb);
-	int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
-	int (*hw_process_int_status) (struct btmrvl_private *priv);
+	int (*hw_wakeup_firmware)(struct btmrvl_private *priv);
+	int (*hw_process_int_status)(struct btmrvl_private *priv);
 	void (*firmware_dump)(struct btmrvl_private *priv);
 	spinlock_t driver_lock;		/* spinlock used by driver */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index b9a8119..b9978a7 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1071,8 +1071,6 @@
 		}
 	}
 
-	sdio_release_host(card->func);
-
 	/*
 	 * winner or not, with this test the FW synchronizes when the
 	 * module can continue its initialization
@@ -1082,6 +1080,8 @@
 		return -ETIMEDOUT;
 	}
 
+	sdio_release_host(card->func);
+
 	return 0;
 
 done:
@@ -1376,8 +1376,7 @@
 
 	/* fw_dump_data will be free in device coredump release function
 	   after 5 min*/
-	dev_coredumpv(&priv->btmrvl_dev.hcidev->dev, fw_dump_data,
-		      fw_dump_len, GFP_KERNEL);
+	dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
 	BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
 }
 
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
new file mode 100644
index 0000000..4a62081
--- /dev/null
+++ b/drivers/bluetooth/btqca.c
@@ -0,0 +1,392 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+#define VERSION "0.1"
+
+static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+{
+	struct sk_buff *skb;
+	struct edl_event_hdr *edl;
+	struct rome_version *ver;
+	char cmd;
+	int err = 0;
+
+	BT_DBG("%s: ROME Patch Version Request", hdev->name);
+
+	cmd = EDL_PATCH_VER_REQ_CMD;
+	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+				&cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
+		       err);
+		return err;
+	}
+
+	if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+		BT_ERR("%s: Version size mismatch len %d", hdev->name,
+		       skb->len);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	edl = (struct edl_event_hdr *)(skb->data);
+	if (!edl || !edl->data) {
+		BT_ERR("%s: TLV with no header or no data", hdev->name);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+	    edl->rtype != EDL_APP_VER_RES_EVT) {
+		BT_ERR("%s: Wrong packet received %d %d", hdev->name,
+		       edl->cresp, edl->rtype);
+		err = -EIO;
+		goto out;
+	}
+
+	ver = (struct rome_version *)(edl->data);
+
+	BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
+	BT_DBG("%s: Patch  :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
+	BT_DBG("%s: ROM    :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+	BT_DBG("%s: SOC    :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+
+	/* ROME chipset version can be decided by patch and SoC
+	 * version, combination with upper 2 bytes from SoC
+	 * and lower 2 bytes from patch will be used.
+	 */
+	*rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+		        (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+
+out:
+	kfree_skb(skb);
+
+	return err;
+}
+
+static int rome_reset(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+	int err;
+
+	BT_DBG("%s: ROME HCI_RESET", hdev->name);
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+		return err;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
+static void rome_tlv_check_data(struct rome_config *config,
+				const struct firmware *fw)
+{
+	const u8 *data;
+	u32 type_len;
+	u16 tag_id, tag_len;
+	int idx, length;
+	struct tlv_type_hdr *tlv;
+	struct tlv_type_patch *tlv_patch;
+	struct tlv_type_nvm *tlv_nvm;
+
+	tlv = (struct tlv_type_hdr *)fw->data;
+
+	type_len = le32_to_cpu(tlv->type_len);
+	length = (type_len >> 8) & 0x00ffffff;
+
+	BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+	BT_DBG("Length\t\t : %d bytes", length);
+
+	switch (config->type) {
+	case TLV_TYPE_PATCH:
+		tlv_patch = (struct tlv_type_patch *)tlv->data;
+		BT_DBG("Total Length\t\t : %d bytes",
+		       le32_to_cpu(tlv_patch->total_size));
+		BT_DBG("Patch Data Length\t : %d bytes",
+		       le32_to_cpu(tlv_patch->data_length));
+		BT_DBG("Signing Format Version : 0x%x",
+		       tlv_patch->format_version);
+		BT_DBG("Signature Algorithm\t : 0x%x",
+		       tlv_patch->signature);
+		BT_DBG("Reserved\t\t : 0x%x",
+		       le16_to_cpu(tlv_patch->reserved1));
+		BT_DBG("Product ID\t\t : 0x%04x",
+		       le16_to_cpu(tlv_patch->product_id));
+		BT_DBG("Rom Build Version\t : 0x%04x",
+		       le16_to_cpu(tlv_patch->rom_build));
+		BT_DBG("Patch Version\t\t : 0x%04x",
+		       le16_to_cpu(tlv_patch->patch_version));
+		BT_DBG("Reserved\t\t : 0x%x",
+		       le16_to_cpu(tlv_patch->reserved2));
+		BT_DBG("Patch Entry Address\t : 0x%x",
+		       le32_to_cpu(tlv_patch->entry));
+		break;
+
+	case TLV_TYPE_NVM:
+		idx = 0;
+		data = tlv->data;
+		while (idx < length) {
+			tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+			tag_id = le16_to_cpu(tlv_nvm->tag_id);
+			tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
+			/* Update NVM tags as needed */
+			switch (tag_id) {
+			case EDL_TAG_ID_HCI:
+				/* HCI transport layer parameters
+				 * enabling software inband sleep
+				 * onto controller side.
+				 */
+				tlv_nvm->data[0] |= 0x80;
+
+				/* UART Baud Rate */
+				tlv_nvm->data[2] = config->user_baud_rate;
+
+				break;
+
+			case EDL_TAG_ID_DEEP_SLEEP:
+				/* Sleep enable mask
+				 * enabling deep sleep feature on controller.
+				 */
+				tlv_nvm->data[0] |= 0x01;
+
+				break;
+			}
+
+			idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+		}
+		break;
+
+	default:
+		BT_ERR("Unknown TLV type %d", config->type);
+		break;
+	}
+}
+
+static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
+				 const u8 *data)
+{
+	struct sk_buff *skb;
+	struct edl_event_hdr *edl;
+	struct tlv_seg_resp *tlv_resp;
+	u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
+	int err = 0;
+
+	BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
+
+	cmd[0] = EDL_PATCH_TLV_REQ_CMD;
+	cmd[1] = seg_size;
+	memcpy(cmd + 2, data, seg_size);
+
+	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
+				HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+		return err;
+	}
+
+	if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+		BT_ERR("%s: TLV response size mismatch", hdev->name);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	edl = (struct edl_event_hdr *)(skb->data);
+	if (!edl || !edl->data) {
+		BT_ERR("%s: TLV with no header or no data", hdev->name);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	tlv_resp = (struct tlv_seg_resp *)(edl->data);
+
+	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+	    edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+		BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
+		       hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+		err = -EIO;
+	}
+
+out:
+	kfree_skb(skb);
+
+	return err;
+}
+
+static int rome_tlv_download_request(struct hci_dev *hdev,
+				     const struct firmware *fw)
+{
+	const u8 *buffer, *data;
+	int total_segment, remain_size;
+	int ret, i;
+
+	if (!fw || !fw->data)
+		return -EINVAL;
+
+	total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
+	remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
+
+	BT_DBG("%s: Total segment num %d remain size %d total size %zu",
+	       hdev->name, total_segment, remain_size, fw->size);
+
+	data = fw->data;
+	for (i = 0; i < total_segment; i++) {
+		buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
+		ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
+					    buffer);
+		if (ret < 0)
+			return -EIO;
+	}
+
+	if (remain_size) {
+		buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
+		ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
+					    buffer);
+		if (ret < 0)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static int rome_download_firmware(struct hci_dev *hdev,
+				  struct rome_config *config)
+{
+	const struct firmware *fw;
+	int ret;
+
+	BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+
+	ret = request_firmware(&fw, config->fwname, &hdev->dev);
+	if (ret) {
+		BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
+		       config->fwname, ret);
+		return ret;
+	}
+
+	rome_tlv_check_data(config, fw);
+
+	ret = rome_tlv_download_request(hdev, fw);
+	if (ret) {
+		BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
+		       config->fwname, ret);
+	}
+
+	release_firmware(fw);
+
+	return ret;
+}
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	struct sk_buff *skb;
+	u8 cmd[9];
+	int err;
+
+	cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+	cmd[1] = 0x02; 			/* TAG ID */
+	cmd[2] = sizeof(bdaddr_t);	/* size */
+	memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+	skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
+				HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Change address command failed (%d)",
+		       hdev->name, err);
+		return err;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+{
+	u32 rome_ver = 0;
+	struct rome_config config;
+	int err;
+
+	BT_DBG("%s: ROME setup on UART", hdev->name);
+
+	config.user_baud_rate = baudrate;
+
+	/* Get ROME version information */
+	err = rome_patch_ver_req(hdev, &rome_ver);
+	if (err < 0 || rome_ver == 0) {
+		BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
+		return err;
+	}
+
+	BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+
+	/* Download rampatch file */
+	config.type = TLV_TYPE_PATCH;
+	snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
+		 rome_ver);
+	err = rome_download_firmware(hdev, &config);
+	if (err < 0) {
+		BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+		return err;
+	}
+
+	/* Download NVM configuration */
+	config.type = TLV_TYPE_NVM;
+	snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
+		 rome_ver);
+	err = rome_download_firmware(hdev, &config);
+	if (err < 0) {
+		BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+		return err;
+	}
+
+	/* Perform HCI reset */
+	err = rome_reset(hdev);
+	if (err < 0) {
+		BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+		return err;
+	}
+
+	BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+
+MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
new file mode 100644
index 0000000..65e994b
--- /dev/null
+++ b/drivers/bluetooth/btqca.h
@@ -0,0 +1,135 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros ROME chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define EDL_PATCH_CMD_OPCODE		(0xFC00)
+#define EDL_NVM_ACCESS_OPCODE		(0xFC0B)
+#define EDL_PATCH_CMD_LEN		(1)
+#define EDL_PATCH_VER_REQ_CMD		(0x19)
+#define EDL_PATCH_TLV_REQ_CMD		(0x1E)
+#define EDL_NVM_ACCESS_SET_REQ_CMD	(0x01)
+#define MAX_SIZE_PER_TLV_SEGMENT	(243)
+
+#define EDL_CMD_REQ_RES_EVT		(0x00)
+#define EDL_PATCH_VER_RES_EVT		(0x19)
+#define EDL_APP_VER_RES_EVT		(0x02)
+#define EDL_TVL_DNLD_RES_EVT		(0x04)
+#define EDL_CMD_EXE_STATUS_EVT		(0x00)
+#define EDL_SET_BAUDRATE_RSP_EVT	(0x92)
+#define EDL_NVM_ACCESS_CODE_EVT		(0x0B)
+
+#define EDL_TAG_ID_HCI			(17)
+#define EDL_TAG_ID_DEEP_SLEEP		(27)
+
+enum qca_bardrate {
+	QCA_BAUDRATE_115200 	= 0,
+	QCA_BAUDRATE_57600,
+	QCA_BAUDRATE_38400,
+	QCA_BAUDRATE_19200,
+	QCA_BAUDRATE_9600,
+	QCA_BAUDRATE_230400,
+	QCA_BAUDRATE_250000,
+	QCA_BAUDRATE_460800,
+	QCA_BAUDRATE_500000,
+	QCA_BAUDRATE_720000,
+	QCA_BAUDRATE_921600,
+	QCA_BAUDRATE_1000000,
+	QCA_BAUDRATE_1250000,
+	QCA_BAUDRATE_2000000,
+	QCA_BAUDRATE_3000000,
+	QCA_BAUDRATE_4000000,
+	QCA_BAUDRATE_1600000,
+	QCA_BAUDRATE_3200000,
+	QCA_BAUDRATE_3500000,
+	QCA_BAUDRATE_AUTO 	= 0xFE,
+	QCA_BAUDRATE_RESERVED
+};
+
+enum rome_tlv_type {
+	TLV_TYPE_PATCH = 1,
+	TLV_TYPE_NVM
+};
+
+struct rome_config {
+	u8 type;
+	char fwname[64];
+	uint8_t user_baud_rate;
+};
+
+struct edl_event_hdr {
+	__u8 cresp;
+	__u8 rtype;
+	__u8 data[0];
+} __packed;
+
+struct rome_version {
+	__le32 product_id;
+	__le16 patch_ver;
+	__le16 rome_ver;
+	__le32 soc_id;
+} __packed;
+
+struct tlv_seg_resp {
+	__u8 result;
+} __packed;
+
+struct tlv_type_patch {
+	__le32 total_size;
+	__le32 data_length;
+	__u8   format_version;
+	__u8   signature;
+	__le16 reserved1;
+	__le16 product_id;
+	__le16 rom_build;
+	__le16 patch_version;
+	__le16 reserved2;
+	__le32 entry;
+} __packed;
+
+struct tlv_type_nvm {
+	__le16 tag_id;
+	__le16 tag_len;
+	__le32 reserve1;
+	__le32 reserve2;
+	__u8   data[0];
+} __packed;
+
+struct tlv_type_hdr {
+	__le32 type_len;
+	__u8   data[0];
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_QCA)
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+
+#else
+
+static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b4cf8d9..b6aceaf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@
 	/* Generic Bluetooth AMP device */
 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
 
+	/* Generic Bluetooth USB interface */
+	{ USB_INTERFACE_INFO(0xe0, 0x01, 0x01) },
+
 	/* Apple-specific (Broadcom) devices */
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
 	  .driver_info = BTUSB_BCM_APPLE },
@@ -319,6 +322,9 @@
 	{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
 
+	/* Silicon Wave based devices */
+	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
 	{ }	/* Terminating entry */
 };
 
@@ -1575,7 +1581,7 @@
 
 	/* fw_patch_num indicates the version of patch the device currently
 	 * have. If there is no patch data in the device, it is always 0x00.
-	 * So, if it is other than 0x00, no need to patch the deivce again.
+	 * So, if it is other than 0x00, no need to patch the device again.
 	 */
 	if (ver->fw_patch_num) {
 		BT_INFO("%s: Intel device is already patched. patch num: %02x",
@@ -1878,51 +1884,6 @@
 	return -EILSEQ;
 }
 
-static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
-				   u32 plen, const void *param)
-{
-	while (plen > 0) {
-		struct sk_buff *skb;
-		u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
-
-		cmd_param[0] = fragment_type;
-		memcpy(cmd_param + 1, param, fragment_len);
-
-		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
-				     cmd_param, HCI_INIT_TIMEOUT);
-		if (IS_ERR(skb))
-			return PTR_ERR(skb);
-
-		kfree_skb(skb);
-
-		plen -= fragment_len;
-		param += fragment_len;
-	}
-
-	return 0;
-}
-
-static void btusb_intel_version_info(struct hci_dev *hdev,
-				     struct intel_version *ver)
-{
-	const char *variant;
-
-	switch (ver->fw_variant) {
-	case 0x06:
-		variant = "Bootloader";
-		break;
-	case 0x23:
-		variant = "Firmware";
-		break;
-	default:
-		return;
-	}
-
-	BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
-		variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
-		ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
-}
-
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
 	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
@@ -1984,7 +1945,7 @@
 		return -EINVAL;
 	}
 
-	btusb_intel_version_info(hdev, ver);
+	btintel_version_info(hdev, ver);
 
 	/* The firmware variant determines if the device is in bootloader
 	 * mode or is running operational firmware. The value 0x06 identifies
@@ -2104,7 +2065,7 @@
 	/* Start the firmware download transaction with the Init fragment
 	 * represented by the 128 bytes of CSS header.
 	 */
-	err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
 	if (err < 0) {
 		BT_ERR("%s: Failed to send firmware header (%d)",
 		       hdev->name, err);
@@ -2114,7 +2075,7 @@
 	/* Send the 256 bytes of public key information from the firmware
 	 * as the PKey fragment.
 	 */
-	err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
 	if (err < 0) {
 		BT_ERR("%s: Failed to send firmware public key (%d)",
 		       hdev->name, err);
@@ -2124,7 +2085,7 @@
 	/* Send the 256 bytes of signature information from the firmware
 	 * as the Sign fragment.
 	 */
-	err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
 	if (err < 0) {
 		BT_ERR("%s: Failed to send firmware signature (%d)",
 		       hdev->name, err);
@@ -2139,7 +2100,7 @@
 
 		frag_len += sizeof(*cmd) + cmd->plen;
 
-		/* The paramter length of the secure send command requires
+		/* The parameter length of the secure send command requires
 		 * a 4 byte alignment. It happens so that the firmware file
 		 * contains proper Intel_NOP commands to align the fragments
 		 * as needed.
@@ -2148,8 +2109,7 @@
 		 * firmware data buffer as a single Data fragement.
 		 */
 		if (!(frag_len % 4)) {
-			err = btusb_intel_secure_send(hdev, 0x01, frag_len,
-						      fw_ptr);
+			err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
 			if (err < 0) {
 				BT_ERR("%s: Failed to send firmware data (%d)",
 				       hdev->name, err);
@@ -2291,39 +2251,6 @@
 	return 0;
 }
 
-static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
-{
-	struct sk_buff *skb;
-	u8 type = 0x00;
-
-	BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
-
-	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reset after hardware error failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return;
-	}
-	kfree_skb(skb);
-
-	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb)) {
-		BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return;
-	}
-
-	if (skb->len != 13) {
-		BT_ERR("%s: Exception info size mismatch", hdev->name);
-		kfree_skb(skb);
-		return;
-	}
-
-	BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
-
-	kfree_skb(skb);
-}
-
 static int btusb_shutdown_intel(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
@@ -2783,7 +2710,7 @@
 	if (id->driver_info & BTUSB_INTEL_NEW) {
 		hdev->send = btusb_send_frame_intel;
 		hdev->setup = btusb_setup_intel_new;
-		hdev->hw_error = btusb_hw_error_intel;
+		hdev->hw_error = btintel_hw_error;
 		hdev->set_bdaddr = btintel_set_bdaddr;
 		set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 	}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 78e10f0..84135c5 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -182,9 +182,9 @@
 	int i;
 
 	printk(KERN_INFO "Bluetooth: Nokia control data =");
-	for (i = 0; i < skb->len; i++) {
+	for (i = 0; i < skb->len; i++)
 		printk(" %02x", skb->data[i]);
-	}
+
 	printk("\n");
 
 	/* transition to active state */
@@ -406,7 +406,7 @@
 		break;
 	default:
 		return -EILSEQ;
-	};
+	}
 
 	nsh.zero = 0;
 	nsh.len = skb->len;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 23523e1..835bfab 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -25,6 +25,12 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/tty.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -32,11 +38,37 @@
 #include "btbcm.h"
 #include "hci_uart.h"
 
-struct bcm_data {
-	struct sk_buff *rx_skb;
-	struct sk_buff_head txq;
+struct bcm_device {
+	struct list_head	list;
+
+	struct platform_device	*pdev;
+
+	const char		*name;
+	struct gpio_desc	*device_wakeup;
+	struct gpio_desc	*shutdown;
+
+	struct clk		*clk;
+	bool			clk_enabled;
+
+	u32			init_speed;
+
+#ifdef CONFIG_PM_SLEEP
+	struct hci_uart		*hu;
+	bool			is_suspended; /* suspend/resume flag */
+#endif
 };
 
+struct bcm_data {
+	struct sk_buff		*rx_skb;
+	struct sk_buff_head	txq;
+
+	struct bcm_device	*dev;
+};
+
+/* List of BCM BT UART devices */
+static DEFINE_SPINLOCK(bcm_device_lock);
+static LIST_HEAD(bcm_device_list);
+
 static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 {
 	struct hci_dev *hdev = hu->hdev;
@@ -86,9 +118,41 @@
 	return 0;
 }
 
+/* bcm_device_exists should be protected by bcm_device_lock */
+static bool bcm_device_exists(struct bcm_device *device)
+{
+	struct list_head *p;
+
+	list_for_each(p, &bcm_device_list) {
+		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+		if (device == dev)
+			return true;
+	}
+
+	return false;
+}
+
+static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
+{
+	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+		clk_enable(dev->clk);
+
+	gpiod_set_value(dev->shutdown, powered);
+	gpiod_set_value(dev->device_wakeup, powered);
+
+	if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
+		clk_disable(dev->clk);
+
+	dev->clk_enabled = powered;
+
+	return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
+	struct list_head *p;
 
 	BT_DBG("hu %p", hu);
 
@@ -99,6 +163,30 @@
 	skb_queue_head_init(&bcm->txq);
 
 	hu->priv = bcm;
+
+	spin_lock(&bcm_device_lock);
+	list_for_each(p, &bcm_device_list) {
+		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+		/* Retrieve saved bcm_device based on parent of the
+		 * platform device (saved during device probe) and
+		 * parent of tty device used by hci_uart
+		 */
+		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+			bcm->dev = dev;
+			hu->init_speed = dev->init_speed;
+#ifdef CONFIG_PM_SLEEP
+			dev->hu = hu;
+#endif
+			break;
+		}
+	}
+
+	if (bcm->dev)
+		bcm_gpio_set_power(bcm->dev, true);
+
+	spin_unlock(&bcm_device_lock);
+
 	return 0;
 }
 
@@ -108,6 +196,16 @@
 
 	BT_DBG("hu %p", hu);
 
+	/* Protect bcm->dev against removal of the device or driver */
+	spin_lock(&bcm_device_lock);
+	if (bcm_device_exists(bcm->dev)) {
+		bcm_gpio_set_power(bcm->dev, false);
+#ifdef CONFIG_PM_SLEEP
+		bcm->dev->hu = NULL;
+#endif
+	}
+	spin_unlock(&bcm_device_lock);
+
 	skb_queue_purge(&bcm->txq);
 	kfree_skb(bcm->rx_skb);
 	kfree(bcm);
@@ -232,6 +330,204 @@
 	return skb_dequeue(&bcm->txq);
 }
 
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+	spin_lock(&bcm_device_lock);
+
+	if (!bdev->hu)
+		goto unlock;
+
+	if (!bdev->is_suspended) {
+		hci_uart_set_flow_control(bdev->hu, true);
+
+		/* Once this callback returns, driver suspends BT via GPIO */
+		bdev->is_suspended = true;
+	}
+
+	/* Suspend the device */
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, false);
+		BT_DBG("suspend, delaying 15 ms");
+		mdelay(15);
+	}
+
+unlock:
+	spin_unlock(&bcm_device_lock);
+
+	return 0;
+}
+
+/* Platform resume callback */
+static int bcm_resume(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+	spin_lock(&bcm_device_lock);
+
+	if (!bdev->hu)
+		goto unlock;
+
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, true);
+		BT_DBG("resume, delaying 15 ms");
+		mdelay(15);
+	}
+
+	/* When this callback executes, the device has woken up already */
+	if (bdev->is_suspended) {
+		bdev->is_suspended = false;
+
+		hci_uart_set_flow_control(bdev->hu, false);
+	}
+
+unlock:
+	spin_unlock(&bcm_device_lock);
+
+	return 0;
+}
+#endif
+
+static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
+	{ "device-wakeup-gpios", &device_wakeup_gpios, 1 },
+	{ "shutdown-gpios", &shutdown_gpios, 1 },
+	{ },
+};
+
+#ifdef CONFIG_ACPI
+static int bcm_resource(struct acpi_resource *ares, void *data)
+{
+	struct bcm_device *dev = data;
+
+	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+		struct acpi_resource_uart_serialbus *sb;
+
+		sb = &ares->data.uart_serial_bus;
+		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+			dev->init_speed = sb->default_baud_rate;
+	}
+
+	/* Always tell the ACPI core to skip this resource */
+	return 1;
+}
+
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+	struct platform_device *pdev = dev->pdev;
+	const struct acpi_device_id *id;
+	struct acpi_device *adev;
+	LIST_HEAD(resources);
+	int ret;
+
+	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+	if (!id)
+		return -ENODEV;
+
+	/* Retrieve GPIO data */
+	dev->name = dev_name(&pdev->dev);
+	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+					acpi_bcm_default_gpios);
+	if (ret)
+		return ret;
+
+	dev->clk = devm_clk_get(&pdev->dev, NULL);
+
+	dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+						     "device-wakeup",
+						     GPIOD_OUT_LOW);
+	if (IS_ERR(dev->device_wakeup))
+		return PTR_ERR(dev->device_wakeup);
+
+	dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+						GPIOD_OUT_LOW);
+	if (IS_ERR(dev->shutdown))
+		return PTR_ERR(dev->shutdown);
+
+	/* Make sure at-least one of the GPIO is defined and that
+	 * a name is specified for this instance
+	 */
+	if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
+		dev_err(&pdev->dev, "invalid platform data\n");
+		return -EINVAL;
+	}
+
+	/* Retrieve UART ACPI info */
+	adev = ACPI_COMPANION(&dev->pdev->dev);
+	if (!adev)
+		return 0;
+
+	acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+
+	return 0;
+}
+#else
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm_probe(struct platform_device *pdev)
+{
+	struct bcm_device *dev;
+	struct acpi_device_id *pdata = pdev->dev.platform_data;
+	int ret;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->pdev = pdev;
+
+	if (ACPI_HANDLE(&pdev->dev)) {
+		ret = bcm_acpi_probe(dev);
+		if (ret)
+			return ret;
+	} else if (pdata) {
+		dev->name = pdata->id;
+	} else {
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	dev_info(&pdev->dev, "%s device registered.\n", dev->name);
+
+	/* Place this instance on the device list */
+	spin_lock(&bcm_device_lock);
+	list_add_tail(&dev->list, &bcm_device_list);
+	spin_unlock(&bcm_device_lock);
+
+	bcm_gpio_set_power(dev, false);
+
+	return 0;
+}
+
+static int bcm_remove(struct platform_device *pdev)
+{
+	struct bcm_device *dev = platform_get_drvdata(pdev);
+
+	spin_lock(&bcm_device_lock);
+	list_del(&dev->list);
+	spin_unlock(&bcm_device_lock);
+
+	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
+	dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
+
+	return 0;
+}
+
 static const struct hci_uart_proto bcm_proto = {
 	.id		= HCI_UART_BCM,
 	.name		= "BCM",
@@ -247,12 +543,38 @@
 	.dequeue	= bcm_dequeue,
 };
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bcm_acpi_match[] = {
+	{ "BCM2E39", 0 },
+	{ "BCM2E67", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+#endif
+
+/* Platform suspend and resume callbacks */
+static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+
+static struct platform_driver bcm_driver = {
+	.probe = bcm_probe,
+	.remove = bcm_remove,
+	.driver = {
+		.name = "hci_bcm",
+		.acpi_match_table = ACPI_PTR(bcm_acpi_match),
+		.pm = &bcm_pm_ops,
+	},
+};
+
 int __init bcm_init(void)
 {
+	platform_driver_register(&bcm_driver);
+
 	return hci_uart_register_proto(&bcm_proto);
 }
 
 int __exit bcm_deinit(void)
 {
+	platform_driver_unregister(&bcm_driver);
+
 	return hci_uart_unregister_proto(&bcm_proto);
 }
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 57faddc..eec3f28 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -223,8 +223,7 @@
 			switch ((&pkts[i])->lsize) {
 			case 0:
 				/* No variable data length */
-				(&pkts[i])->recv(hdev, skb);
-				skb = NULL;
+				dlen = 0;
 				break;
 			case 1:
 				/* Single octet variable length */
@@ -252,6 +251,12 @@
 				kfree_skb(skb);
 				return ERR_PTR(-EILSEQ);
 			}
+
+			if (!dlen) {
+				/* No more data, complete frame */
+				(&pkts[i])->recv(hdev, skb);
+				skb = NULL;
+			}
 		} else {
 			/* Complete frame */
 			(&pkts[i])->recv(hdev, skb);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 3455cec..b35b238 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -75,7 +75,7 @@
 	size_t			rx_pending;	/* Expecting more bytes */
 	u8			rx_ack;		/* Last ack number received */
 
-	int			(*rx_func) (struct hci_uart *hu, u8 c);
+	int			(*rx_func)(struct hci_uart *hu, u8 c);
 
 	struct timer_list	timer;		/* Retransmission timer */
 
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 5dd07bf..cf07d11 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -24,8 +24,864 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/acpi.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "hci_uart.h"
+#include "btintel.h"
+
+#define STATE_BOOTLOADER	0
+#define STATE_DOWNLOADING	1
+#define STATE_FIRMWARE_LOADED	2
+#define STATE_FIRMWARE_FAILED	3
+#define STATE_BOOTING		4
+
+struct intel_device {
+	struct list_head list;
+	struct platform_device *pdev;
+	struct gpio_desc *reset;
+};
+
+static LIST_HEAD(intel_device_list);
+static DEFINE_SPINLOCK(intel_device_list_lock);
+
+struct intel_data {
+	struct sk_buff *rx_skb;
+	struct sk_buff_head txq;
+	unsigned long flags;
+};
+
+static u8 intel_convert_speed(unsigned int speed)
+{
+	switch (speed) {
+	case 9600:
+		return 0x00;
+	case 19200:
+		return 0x01;
+	case 38400:
+		return 0x02;
+	case 57600:
+		return 0x03;
+	case 115200:
+		return 0x04;
+	case 230400:
+		return 0x05;
+	case 460800:
+		return 0x06;
+	case 921600:
+		return 0x07;
+	case 1843200:
+		return 0x08;
+	case 3250000:
+		return 0x09;
+	case 2000000:
+		return 0x0a;
+	case 3000000:
+		return 0x0b;
+	default:
+		return 0xff;
+	}
+}
+
+static int intel_wait_booting(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	int err;
+
+	err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
+				  TASK_INTERRUPTIBLE,
+				  msecs_to_jiffies(1000));
+
+	if (err == 1) {
+		BT_ERR("%s: Device boot interrupted", hu->hdev->name);
+		return -EINTR;
+	}
+
+	if (err) {
+		BT_ERR("%s: Device boot timeout", hu->hdev->name);
+		return -ETIMEDOUT;
+	}
+
+	return err;
+}
+
+static int intel_set_power(struct hci_uart *hu, bool powered)
+{
+	struct list_head *p;
+	int err = -ENODEV;
+
+	spin_lock(&intel_device_list_lock);
+
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *idev = list_entry(p, struct intel_device,
+						       list);
+
+		/* tty device and pdev device should share the same parent
+		 * which is the UART port.
+		 */
+		if (hu->tty->dev->parent != idev->pdev->dev.parent)
+			continue;
+
+		if (!idev->reset) {
+			err = -ENOTSUPP;
+			break;
+		}
+
+		BT_INFO("hu %p, Switching compatible pm device (%s) to %u",
+			hu, dev_name(&idev->pdev->dev), powered);
+
+		gpiod_set_value(idev->reset, powered);
+	}
+
+	spin_unlock(&intel_device_list_lock);
+
+	return err;
+}
+
+static int intel_open(struct hci_uart *hu)
+{
+	struct intel_data *intel;
+
+	BT_DBG("hu %p", hu);
+
+	intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+	if (!intel)
+		return -ENOMEM;
+
+	skb_queue_head_init(&intel->txq);
+
+	hu->priv = intel;
+
+	if (!intel_set_power(hu, true))
+		set_bit(STATE_BOOTING, &intel->flags);
+
+	return 0;
+}
+
+static int intel_close(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	intel_set_power(hu, false);
+
+	skb_queue_purge(&intel->txq);
+	kfree_skb(intel->rx_skb);
+	kfree(intel);
+
+	hu->priv = NULL;
+	return 0;
+}
+
+static int intel_flush(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	skb_queue_purge(&intel->txq);
+
+	return 0;
+}
+
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+	struct sk_buff *skb;
+	struct hci_event_hdr *hdr;
+	struct hci_ev_cmd_complete *evt;
+
+	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+	hdr->evt = HCI_EV_CMD_COMPLETE;
+	hdr->plen = sizeof(*evt) + 1;
+
+	evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+	evt->ncmd = 0x01;
+	evt->opcode = cpu_to_le16(opcode);
+
+	*skb_put(skb, 1) = 0x00;
+
+	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+	return hci_recv_frame(hdev, skb);
+}
+
+static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+	struct intel_data *intel = hu->priv;
+	struct hci_dev *hdev = hu->hdev;
+	u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 };
+	struct sk_buff *skb;
+	int err;
+
+	/* This can be the first command sent to the chip, check
+	 * that the controller is ready.
+	 */
+	err = intel_wait_booting(hu);
+
+	clear_bit(STATE_BOOTING, &intel->flags);
+
+	/* In case of timeout, try to continue anyway */
+	if (err && err != ETIMEDOUT)
+		return err;
+
+	BT_INFO("%s: Change controller speed to %d", hdev->name, speed);
+
+	speed_cmd[3] = intel_convert_speed(speed);
+	if (speed_cmd[3] == 0xff) {
+		BT_ERR("%s: Unsupported speed", hdev->name);
+		return -EINVAL;
+	}
+
+	/* Device will not accept speed change if Intel version has not been
+	 * previously requested.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reading Intel version information failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+	kfree_skb(skb);
+
+	skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
+	if (!skb) {
+		BT_ERR("%s: Failed to allocate memory for baudrate packet",
+		       hdev->name);
+		return -ENOMEM;
+	}
+
+	memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd));
+	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+	hci_uart_set_flow_control(hu, true);
+
+	skb_queue_tail(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	/* wait 100ms to change baudrate on controller side */
+	msleep(100);
+
+	hci_uart_set_baudrate(hu, speed);
+	hci_uart_set_flow_control(hu, false);
+
+	return 0;
+}
+
+static int intel_setup(struct hci_uart *hu)
+{
+	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+					  0x00, 0x08, 0x04, 0x00 };
+	struct intel_data *intel = hu->priv;
+	struct hci_dev *hdev = hu->hdev;
+	struct sk_buff *skb;
+	struct intel_version *ver;
+	struct intel_boot_params *params;
+	const struct firmware *fw;
+	const u8 *fw_ptr;
+	char fwname[64];
+	u32 frag_len;
+	ktime_t calltime, delta, rettime;
+	unsigned long long duration;
+	unsigned int init_speed, oper_speed;
+	int speed_change = 0;
+	int err;
+
+	BT_DBG("%s", hdev->name);
+
+	hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+	calltime = ktime_get();
+
+	if (hu->init_speed)
+		init_speed = hu->init_speed;
+	else
+		init_speed = hu->proto->init_speed;
+
+	if (hu->oper_speed)
+		oper_speed = hu->oper_speed;
+	else
+		oper_speed = hu->proto->oper_speed;
+
+	if (oper_speed && init_speed && oper_speed != init_speed)
+		speed_change = 1;
+
+	/* Check that the controller is ready */
+	err = intel_wait_booting(hu);
+
+	clear_bit(STATE_BOOTING, &intel->flags);
+
+	/* In case of timeout, try to continue anyway */
+	if (err && err != ETIMEDOUT)
+		return err;
+
+	set_bit(STATE_BOOTLOADER, &intel->flags);
+
+	/* Read the Intel version information to determine if the device
+	 * is in bootloader mode or if it already has operational firmware
+	 * loaded.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reading Intel version information failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+
+	if (skb->len != sizeof(*ver)) {
+		BT_ERR("%s: Intel version event size mismatch", hdev->name);
+		kfree_skb(skb);
+		return -EILSEQ;
+	}
+
+	ver = (struct intel_version *)skb->data;
+	if (ver->status) {
+		BT_ERR("%s: Intel version command failure (%02x)",
+		       hdev->name, ver->status);
+		err = -bt_to_errno(ver->status);
+		kfree_skb(skb);
+		return err;
+	}
+
+	/* The hardware platform number has a fixed value of 0x37 and
+	 * for now only accept this single value.
+	 */
+	if (ver->hw_platform != 0x37) {
+		BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+		       hdev->name, ver->hw_platform);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	/* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+	 * supported by this firmware loading method. This check has been
+	 * put in place to ensure correct forward compatibility options
+	 * when newer hardware variants come along.
+	 */
+	if (ver->hw_variant != 0x0b) {
+		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+		       hdev->name, ver->hw_variant);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	btintel_version_info(hdev, ver);
+
+	/* The firmware variant determines if the device is in bootloader
+	 * mode or is running operational firmware. The value 0x06 identifies
+	 * the bootloader and the value 0x23 identifies the operational
+	 * firmware.
+	 *
+	 * When the operational firmware is already present, then only
+	 * the check for valid Bluetooth device address is needed. This
+	 * determines if the device will be added as configured or
+	 * unconfigured controller.
+	 *
+	 * It is not possible to use the Secure Boot Parameters in this
+	 * case since that command is only available in bootloader mode.
+	 */
+	if (ver->fw_variant == 0x23) {
+		kfree_skb(skb);
+		clear_bit(STATE_BOOTLOADER, &intel->flags);
+		btintel_check_bdaddr(hdev);
+		return 0;
+	}
+
+	/* If the device is not in bootloader mode, then the only possible
+	 * choice is to return an error and abort the device initialization.
+	 */
+	if (ver->fw_variant != 0x06) {
+		BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+		       hdev->name, ver->fw_variant);
+		kfree_skb(skb);
+		return -ENODEV;
+	}
+
+	kfree_skb(skb);
+
+	/* Read the secure boot parameters to identify the operating
+	 * details of the bootloader.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+
+	if (skb->len != sizeof(*params)) {
+		BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+		kfree_skb(skb);
+		return -EILSEQ;
+	}
+
+	params = (struct intel_boot_params *)skb->data;
+	if (params->status) {
+		BT_ERR("%s: Intel boot parameters command failure (%02x)",
+		       hdev->name, params->status);
+		err = -bt_to_errno(params->status);
+		kfree_skb(skb);
+		return err;
+	}
+
+	BT_INFO("%s: Device revision is %u", hdev->name,
+		le16_to_cpu(params->dev_revid));
+
+	BT_INFO("%s: Secure boot is %s", hdev->name,
+		params->secure_boot ? "enabled" : "disabled");
+
+	BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+		params->min_fw_build_nn, params->min_fw_build_cw,
+		2000 + params->min_fw_build_yy);
+
+	/* It is required that every single firmware fragment is acknowledged
+	 * with a command complete event. If the boot parameters indicate
+	 * that this bootloader does not send them, then abort the setup.
+	 */
+	if (params->limited_cce != 0x00) {
+		BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+		       hdev->name, params->limited_cce);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	/* If the OTP has no valid Bluetooth device address, then there will
+	 * also be no valid address for the operational firmware.
+	 */
+	if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+		BT_INFO("%s: No device address configured", hdev->name);
+		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+	}
+
+	/* With this Intel bootloader only the hardware variant and device
+	 * revision information are used to select the right firmware.
+	 *
+	 * Currently this bootloader support is limited to hardware variant
+	 * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+	 */
+	snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+		 le16_to_cpu(params->dev_revid));
+
+	err = request_firmware(&fw, fwname, &hdev->dev);
+	if (err < 0) {
+		BT_ERR("%s: Failed to load Intel firmware file (%d)",
+		       hdev->name, err);
+		kfree_skb(skb);
+		return err;
+	}
+
+	BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+	kfree_skb(skb);
+
+	if (fw->size < 644) {
+		BT_ERR("%s: Invalid size of firmware file (%zu)",
+		       hdev->name, fw->size);
+		err = -EBADF;
+		goto done;
+	}
+
+	set_bit(STATE_DOWNLOADING, &intel->flags);
+
+	/* Start the firmware download transaction with the Init fragment
+	 * represented by the 128 bytes of CSS header.
+	 */
+	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+	if (err < 0) {
+		BT_ERR("%s: Failed to send firmware header (%d)",
+		       hdev->name, err);
+		goto done;
+	}
+
+	/* Send the 256 bytes of public key information from the firmware
+	 * as the PKey fragment.
+	 */
+	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+	if (err < 0) {
+		BT_ERR("%s: Failed to send firmware public key (%d)",
+		       hdev->name, err);
+		goto done;
+	}
+
+	/* Send the 256 bytes of signature information from the firmware
+	 * as the Sign fragment.
+	 */
+	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+	if (err < 0) {
+		BT_ERR("%s: Failed to send firmware signature (%d)",
+		       hdev->name, err);
+		goto done;
+	}
+
+	fw_ptr = fw->data + 644;
+	frag_len = 0;
+
+	while (fw_ptr - fw->data < fw->size) {
+		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+		frag_len += sizeof(*cmd) + cmd->plen;
+
+		BT_DBG("%s: patching %td/%zu", hdev->name,
+		       (fw_ptr - fw->data), fw->size);
+
+		/* The parameter length of the secure send command requires
+		 * a 4 byte alignment. It happens so that the firmware file
+		 * contains proper Intel_NOP commands to align the fragments
+		 * as needed.
+		 *
+		 * Send set of commands with 4 byte alignment from the
+		 * firmware data buffer as a single Data fragement.
+		 */
+		if (frag_len % 4)
+			continue;
+
+		/* Send each command from the firmware data buffer as
+		 * a single Data fragment.
+		 */
+		err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+		if (err < 0) {
+			BT_ERR("%s: Failed to send firmware data (%d)",
+			       hdev->name, err);
+			goto done;
+		}
+
+		fw_ptr += frag_len;
+		frag_len = 0;
+	}
+
+	set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
+
+	BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+	/* Before switching the device into operational mode and with that
+	 * booting the loaded firmware, wait for the bootloader notification
+	 * that all fragments have been successfully received.
+	 *
+	 * When the event processing receives the notification, then the
+	 * STATE_DOWNLOADING flag will be cleared.
+	 *
+	 * The firmware loading should not take longer than 5 seconds
+	 * and thus just timeout if that happens and fail the setup
+	 * of this device.
+	 */
+	err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
+				  TASK_INTERRUPTIBLE,
+				  msecs_to_jiffies(5000));
+	if (err == 1) {
+		BT_ERR("%s: Firmware loading interrupted", hdev->name);
+		err = -EINTR;
+		goto done;
+	}
+
+	if (err) {
+		BT_ERR("%s: Firmware loading timeout", hdev->name);
+		err = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
+		BT_ERR("%s: Firmware loading failed", hdev->name);
+		err = -ENOEXEC;
+		goto done;
+	}
+
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, calltime);
+	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+	BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+	release_firmware(fw);
+
+	if (err < 0)
+		return err;
+
+	/* We need to restore the default speed before Intel reset */
+	if (speed_change) {
+		err = intel_set_baudrate(hu, init_speed);
+		if (err)
+			return err;
+	}
+
+	calltime = ktime_get();
+
+	set_bit(STATE_BOOTING, &intel->flags);
+
+	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	kfree_skb(skb);
+
+	/* The bootloader will not indicate when the device is ready. This
+	 * is done by the operational firmware sending bootup notification.
+	 *
+	 * Booting into operational firmware should not take longer than
+	 * 1 second. However if that happens, then just fail the setup
+	 * since something went wrong.
+	 */
+	BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+	err = intel_wait_booting(hu);
+	if (err)
+		return err;
+
+	clear_bit(STATE_BOOTING, &intel->flags);
+
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, calltime);
+	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+	BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+	kfree_skb(skb);
+
+	if (speed_change) {
+		err = intel_set_baudrate(hu, oper_speed);
+		if (err)
+			return err;
+	}
+
+	BT_INFO("%s: Setup complete", hdev->name);
+
+	clear_bit(STATE_BOOTLOADER, &intel->flags);
+
+	return 0;
+}
+
+static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct intel_data *intel = hu->priv;
+	struct hci_event_hdr *hdr;
+
+	if (!test_bit(STATE_BOOTLOADER, &intel->flags) &&
+	    !test_bit(STATE_BOOTING, &intel->flags))
+		goto recv;
+
+	hdr = (void *)skb->data;
+
+	/* When the firmware loading completes the device sends
+	 * out a vendor specific event indicating the result of
+	 * the firmware loading.
+	 */
+	if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+	    skb->data[2] == 0x06) {
+		if (skb->data[3] != 0x00)
+			set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
+
+		if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
+		    test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_DOWNLOADING);
+		}
+
+	/* When switching to the operational firmware the device
+	 * sends a vendor specific event indicating that the bootup
+	 * completed.
+	 */
+	} else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+		   skb->data[2] == 0x02) {
+		if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_BOOTING);
+		}
+	}
+recv:
+	return hci_recv_frame(hdev, skb);
+}
+
+static const struct h4_recv_pkt intel_recv_pkts[] = {
+	{ H4_RECV_ACL,   .recv = hci_recv_frame },
+	{ H4_RECV_SCO,   .recv = hci_recv_frame },
+	{ H4_RECV_EVENT, .recv = intel_recv_event },
+};
+
+static int intel_recv(struct hci_uart *hu, const void *data, int count)
+{
+	struct intel_data *intel = hu->priv;
+
+	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+		return -EUNATCH;
+
+	intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+				    intel_recv_pkts,
+				    ARRAY_SIZE(intel_recv_pkts));
+	if (IS_ERR(intel->rx_skb)) {
+		int err = PTR_ERR(intel->rx_skb);
+		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		intel->rx_skb = NULL;
+		return err;
+	}
+
+	return count;
+}
+
+static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+	struct intel_data *intel = hu->priv;
+
+	BT_DBG("hu %p skb %p", hu, skb);
+
+	skb_queue_tail(&intel->txq, skb);
+
+	return 0;
+}
+
+static struct sk_buff *intel_dequeue(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	skb = skb_dequeue(&intel->txq);
+	if (!skb)
+		return skb;
+
+	if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
+	    (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
+		struct hci_command_hdr *cmd = (void *)skb->data;
+		__u16 opcode = le16_to_cpu(cmd->opcode);
+
+		/* When the 0xfc01 command is issued to boot into
+		 * the operational firmware, it will actually not
+		 * send a command complete event. To keep the flow
+		 * control working inject that event here.
+		 */
+		if (opcode == 0xfc01)
+			inject_cmd_complete(hu->hdev, opcode);
+	}
+
+	/* Prepend skb with frame type */
+	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+	return skb;
+}
+
+static const struct hci_uart_proto intel_proto = {
+	.id		= HCI_UART_INTEL,
+	.name		= "Intel",
+	.init_speed	= 115200,
+	.oper_speed	= 3000000,
+	.open		= intel_open,
+	.close		= intel_close,
+	.flush		= intel_flush,
+	.setup		= intel_setup,
+	.set_baudrate	= intel_set_baudrate,
+	.recv		= intel_recv,
+	.enqueue	= intel_enqueue,
+	.dequeue	= intel_dequeue,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id intel_acpi_match[] = {
+	{ "INT33E1", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
+
+static int intel_acpi_probe(struct intel_device *idev)
+{
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(intel_acpi_match, &idev->pdev->dev);
+	if (!id)
+		return -ENODEV;
+
+	return 0;
+}
+#else
+static int intel_acpi_probe(struct intel_device *idev)
+{
+	return -ENODEV;
+}
+#endif
+
+static int intel_probe(struct platform_device *pdev)
+{
+	struct intel_device *idev;
+
+	idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
+	if (!idev)
+		return -ENOMEM;
+
+	idev->pdev = pdev;
+
+	if (ACPI_HANDLE(&pdev->dev)) {
+		int err = intel_acpi_probe(idev);
+		if (err)
+			return err;
+	} else {
+		return -ENODEV;
+	}
+
+	idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
+					      GPIOD_OUT_LOW);
+	if (IS_ERR(idev->reset)) {
+		dev_err(&pdev->dev, "Unable to retrieve gpio\n");
+		return PTR_ERR(idev->reset);
+	}
+
+	platform_set_drvdata(pdev, idev);
+
+	/* Place this instance on the device list */
+	spin_lock(&intel_device_list_lock);
+	list_add_tail(&idev->list, &intel_device_list);
+	spin_unlock(&intel_device_list_lock);
+
+	dev_info(&pdev->dev, "registered.\n");
+
+	return 0;
+}
+
+static int intel_remove(struct platform_device *pdev)
+{
+	struct intel_device *idev = platform_get_drvdata(pdev);
+
+	spin_lock(&intel_device_list_lock);
+	list_del(&idev->list);
+	spin_unlock(&intel_device_list_lock);
+
+	dev_info(&pdev->dev, "unregistered.\n");
+
+	return 0;
+}
+
+static struct platform_driver intel_driver = {
+	.probe = intel_probe,
+	.remove = intel_remove,
+	.driver = {
+		.name = "hci_intel",
+		.acpi_match_table = ACPI_PTR(intel_acpi_match),
+	},
+};
+
+int __init intel_init(void)
+{
+	platform_driver_register(&intel_driver);
+
+	return hci_uart_register_proto(&intel_proto);
+}
+
+int __exit intel_deinit(void)
+{
+	platform_driver_unregister(&intel_driver);
+
+	return hci_uart_unregister_proto(&intel_proto);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 177dd69..0d5a05a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -770,7 +770,7 @@
 
 	/* Register the tty discipline */
 
-	memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
+	memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
 	hci_uart_ldisc.magic		= TTY_LDISC_MAGIC;
 	hci_uart_ldisc.name		= "n_hci";
 	hci_uart_ldisc.open		= hci_uart_tty_open;
@@ -804,9 +804,15 @@
 #ifdef CONFIG_BT_HCIUART_3WIRE
 	h5_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+	intel_init();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
 	bcm_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+	qca_init();
+#endif
 
 	return 0;
 }
@@ -830,9 +836,15 @@
 #ifdef CONFIG_BT_HCIUART_3WIRE
 	h5_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+	intel_deinit();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
 	bcm_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+	qca_deinit();
+#endif
 
 	/* Release tty registration of line discipline */
 	err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
new file mode 100644
index 0000000..6b9b912
--- /dev/null
+++ b/drivers/bluetooth/hci_qca.c
@@ -0,0 +1,969 @@
+/*
+ *  Bluetooth Software UART Qualcomm protocol
+ *
+ *  HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ *  protocol extension to H4.
+ *
+ *  Copyright (C) 2007 Texas Instruments, Inc.
+ *  Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ *
+ *  Acknowledgements:
+ *  This file is based on hci_ll.c, which was...
+ *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ *  which was in turn based on hci_h4.c, which was written
+ *  by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btqca.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND	0xFE
+#define HCI_IBS_WAKE_IND	0xFD
+#define HCI_IBS_WAKE_ACK	0xFC
+#define HCI_MAX_IBS_SIZE 	10
+
+/* Controller states */
+#define STATE_IN_BAND_SLEEP_ENABLED	1
+
+#define IBS_WAKE_RETRANS_TIMEOUT_MS 	100
+#define IBS_TX_IDLE_TIMEOUT_MS 		2000
+#define BAUDRATE_SETTLE_TIMEOUT_MS	300
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states {
+	HCI_IBS_TX_ASLEEP,
+	HCI_IBS_TX_WAKING,
+	HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states {
+	HCI_IBS_RX_ASLEEP,
+	HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote {
+	HCI_IBS_VOTE_STATS_UPDATE,
+	HCI_IBS_TX_VOTE_CLOCK_ON,
+	HCI_IBS_TX_VOTE_CLOCK_OFF,
+	HCI_IBS_RX_VOTE_CLOCK_ON,
+	HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+struct qca_data {
+	struct hci_uart *hu;
+	struct sk_buff *rx_skb;
+	struct sk_buff_head txq;
+	struct sk_buff_head tx_wait_q;	/* HCI_IBS wait queue	*/
+	spinlock_t hci_ibs_lock;	/* HCI_IBS state lock	*/
+	u8 tx_ibs_state;	/* HCI_IBS transmit side power state*/
+	u8 rx_ibs_state;	/* HCI_IBS receive side power state */
+	u32 tx_vote;		/* Clock must be on for TX */
+	u32 rx_vote;		/* Clock must be on for RX */
+	struct timer_list tx_idle_timer;
+	u32 tx_idle_delay;
+	struct timer_list wake_retrans_timer;
+	u32 wake_retrans;
+	struct workqueue_struct *workqueue;
+	struct work_struct ws_awake_rx;
+	struct work_struct ws_awake_device;
+	struct work_struct ws_rx_vote_off;
+	struct work_struct ws_tx_vote_off;
+	unsigned long flags;
+
+	/* For debugging purpose */
+	u64 ibs_sent_wacks;
+	u64 ibs_sent_slps;
+	u64 ibs_sent_wakes;
+	u64 ibs_recv_wacks;
+	u64 ibs_recv_slps;
+	u64 ibs_recv_wakes;
+	u64 vote_last_jif;
+	u32 vote_on_ms;
+	u32 vote_off_ms;
+	u64 tx_votes_on;
+	u64 rx_votes_on;
+	u64 tx_votes_off;
+	u64 rx_votes_off;
+	u64 votes_on;
+	u64 votes_off;
+};
+
+static void __serial_clock_on(struct tty_struct *tty)
+{
+	/* TODO: Some chipset requires to enable UART clock on client
+	 * side to save power consumption or manual work is required.
+	 * Please put your code to control UART clock here if needed
+	 */
+}
+
+static void __serial_clock_off(struct tty_struct *tty)
+{
+	/* TODO: Some chipset requires to disable UART clock on client
+	 * side to save power consumption or manual work is required.
+	 * Please put your code to control UART clock off here if needed
+	 */
+}
+
+/* serial_clock_vote needs to be called with the ibs lock held */
+static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+	unsigned int diff;
+
+	bool old_vote = (qca->tx_vote | qca->rx_vote);
+	bool new_vote;
+
+	switch (vote) {
+	case HCI_IBS_VOTE_STATS_UPDATE:
+		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+		if (old_vote)
+			qca->vote_off_ms += diff;
+		else
+			qca->vote_on_ms += diff;
+		return;
+
+	case HCI_IBS_TX_VOTE_CLOCK_ON:
+		qca->tx_vote = true;
+		qca->tx_votes_on++;
+		new_vote = true;
+		break;
+
+	case HCI_IBS_RX_VOTE_CLOCK_ON:
+		qca->rx_vote = true;
+		qca->rx_votes_on++;
+		new_vote = true;
+		break;
+
+	case HCI_IBS_TX_VOTE_CLOCK_OFF:
+		qca->tx_vote = false;
+		qca->tx_votes_off++;
+		new_vote = qca->rx_vote | qca->tx_vote;
+		break;
+
+	case HCI_IBS_RX_VOTE_CLOCK_OFF:
+		qca->rx_vote = false;
+		qca->rx_votes_off++;
+		new_vote = qca->rx_vote | qca->tx_vote;
+		break;
+
+	default:
+		BT_ERR("Voting irregularity");
+		return;
+	}
+
+	if (new_vote != old_vote) {
+		if (new_vote)
+			__serial_clock_on(hu->tty);
+		else
+			__serial_clock_off(hu->tty);
+
+		BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
+		       vote? "true" : "false");
+
+		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+		if (new_vote) {
+			qca->votes_on++;
+			qca->vote_off_ms += diff;
+		} else {
+			qca->votes_off++;
+			qca->vote_on_ms += diff;
+		}
+		qca->vote_last_jif = jiffies;
+	}
+}
+
+/* Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte.
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+	int err = 0;
+	struct sk_buff *skb = NULL;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
+
+	skb = bt_skb_alloc(1, GFP_ATOMIC);
+	if (!skb) {
+		BT_ERR("Failed to allocate memory for HCI_IBS packet");
+		return -ENOMEM;
+	}
+
+	/* Assign HCI_IBS type */
+	*skb_put(skb, 1) = cmd;
+
+	skb_queue_tail(&qca->txq, skb);
+
+	return err;
+}
+
+static void qca_wq_awake_device(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_awake_device);
+	struct hci_uart *hu = qca->hu;
+	unsigned long retrans_delay;
+
+	BT_DBG("hu %p wq awake device", hu);
+
+	/* Vote for serial clock */
+	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+
+	spin_lock(&qca->hci_ibs_lock);
+
+	/* Send wake indication to device */
+	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
+		BT_ERR("Failed to send WAKE to device");
+
+	qca->ibs_sent_wakes++;
+
+	/* Start retransmit timer */
+	retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+	mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+
+	spin_unlock(&qca->hci_ibs_lock);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_awake_rx(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_awake_rx);
+	struct hci_uart *hu = qca->hu;
+
+	BT_DBG("hu %p wq awake rx", hu);
+
+	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+
+	spin_lock(&qca->hci_ibs_lock);
+	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+
+	/* Always acknowledge device wake up,
+	 * sending IBS message doesn't count as TX ON.
+	 */
+	if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
+		BT_ERR("Failed to acknowledge device wake up");
+
+	qca->ibs_sent_wacks++;
+
+	spin_unlock(&qca->hci_ibs_lock);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_rx_vote_off);
+	struct hci_uart *hu = qca->hu;
+
+	BT_DBG("hu %p rx clock vote off", hu);
+
+	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+}
+
+static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_tx_vote_off);
+	struct hci_uart *hu = qca->hu;
+
+	BT_DBG("hu %p tx clock vote off", hu);
+
+	/* Run HCI tx handling unlocked */
+	hci_uart_tx_wakeup(hu);
+
+	/* Now that message queued to tty driver, vote for tty clocks off.
+	 * It is up to the tty driver to pend the clocks off until tx done.
+	 */
+	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+	struct hci_uart *hu = (struct hci_uart *)arg;
+	struct qca_data *qca = hu->priv;
+	unsigned long flags;
+
+	BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+
+	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+				 flags, SINGLE_DEPTH_NESTING);
+
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		/* TX_IDLE, go to SLEEP */
+		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+			BT_ERR("Failed to send SLEEP to device");
+			break;
+		}
+		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+		qca->ibs_sent_slps++;
+		queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+	case HCI_IBS_TX_WAKING:
+		/* Fall through */
+
+	default:
+		BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+	struct hci_uart *hu = (struct hci_uart *)arg;
+	struct qca_data *qca = hu->priv;
+	unsigned long flags, retrans_delay;
+	unsigned long retransmit = 0;
+
+	BT_DBG("hu %p wake retransmit timeout in %d state",
+		hu, qca->tx_ibs_state);
+
+	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+				 flags, SINGLE_DEPTH_NESTING);
+
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_WAKING:
+		/* No WAKE_ACK, retransmit WAKE */
+		retransmit = 1;
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+			BT_ERR("Failed to acknowledge device wake up");
+			break;
+		}
+		qca->ibs_sent_wakes++;
+		retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+		mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+	case HCI_IBS_TX_AWAKE:
+		/* Fall through */
+
+	default:
+		BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	if (retransmit)
+		hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int qca_open(struct hci_uart *hu)
+{
+	struct qca_data *qca;
+
+	BT_DBG("hu %p qca_open", hu);
+
+	qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+	if (!qca)
+		return -ENOMEM;
+
+	skb_queue_head_init(&qca->txq);
+	skb_queue_head_init(&qca->tx_wait_q);
+	spin_lock_init(&qca->hci_ibs_lock);
+	qca->workqueue = create_singlethread_workqueue("qca_wq");
+	if (!qca->workqueue) {
+		BT_ERR("QCA Workqueue not initialized properly");
+		kfree(qca);
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
+	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
+	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+
+	qca->hu = hu;
+
+	/* Assume we start with both sides asleep -- extra wakes OK */
+	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+
+	/* clocks actually on, but we start votes off */
+	qca->tx_vote = false;
+	qca->rx_vote = false;
+	qca->flags = 0;
+
+	qca->ibs_sent_wacks = 0;
+	qca->ibs_sent_slps = 0;
+	qca->ibs_sent_wakes = 0;
+	qca->ibs_recv_wacks = 0;
+	qca->ibs_recv_slps = 0;
+	qca->ibs_recv_wakes = 0;
+	qca->vote_last_jif = jiffies;
+	qca->vote_on_ms = 0;
+	qca->vote_off_ms = 0;
+	qca->votes_on = 0;
+	qca->votes_off = 0;
+	qca->tx_votes_on = 0;
+	qca->tx_votes_off = 0;
+	qca->rx_votes_on = 0;
+	qca->rx_votes_off = 0;
+
+	hu->priv = qca;
+
+	init_timer(&qca->wake_retrans_timer);
+	qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+	qca->wake_retrans_timer.data = (u_long)hu;
+	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+	init_timer(&qca->tx_idle_timer);
+	qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+	qca->tx_idle_timer.data = (u_long)hu;
+	qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
+	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
+	       qca->tx_idle_delay, qca->wake_retrans);
+
+	return 0;
+}
+
+static void qca_debugfs_init(struct hci_dev *hdev)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct qca_data *qca = hu->priv;
+	struct dentry *ibs_dir;
+	umode_t mode;
+
+	if (!hdev->debugfs)
+		return;
+
+	ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+	/* read only */
+	mode = S_IRUGO;
+	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
+	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+	debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
+			   &qca->ibs_sent_slps);
+	debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
+			   &qca->ibs_sent_wakes);
+	debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
+			   &qca->ibs_sent_wacks);
+	debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
+			   &qca->ibs_recv_slps);
+	debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
+			   &qca->ibs_recv_wakes);
+	debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
+			   &qca->ibs_recv_wacks);
+	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
+	debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
+	debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
+	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
+	debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
+	debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
+	debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
+	debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
+	debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
+	debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+
+	/* read/write */
+	mode = S_IRUGO | S_IWUSR;
+	debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+	debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
+			   &qca->tx_idle_delay);
+}
+
+/* Flush protocol data */
+static int qca_flush(struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p qca flush", hu);
+
+	skb_queue_purge(&qca->tx_wait_q);
+	skb_queue_purge(&qca->txq);
+
+	return 0;
+}
+
+/* Close protocol */
+static int qca_close(struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p qca close", hu);
+
+	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+
+	skb_queue_purge(&qca->tx_wait_q);
+	skb_queue_purge(&qca->txq);
+	del_timer(&qca->tx_idle_timer);
+	del_timer(&qca->wake_retrans_timer);
+	destroy_workqueue(qca->workqueue);
+	qca->hu = NULL;
+
+	kfree_skb(qca->rx_skb);
+
+	hu->priv = NULL;
+
+	kfree(qca);
+
+	return 0;
+}
+
+/* Called upon a wake-up-indication from the device.
+ */
+static void device_want_to_wakeup(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p want to wake up", hu);
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	qca->ibs_recv_wakes++;
+
+	switch (qca->rx_ibs_state) {
+	case HCI_IBS_RX_ASLEEP:
+		/* Make sure clock is on - we may have turned clock off since
+		 * receiving the wake up indicator awake rx clock.
+		 */
+		queue_work(qca->workqueue, &qca->ws_awake_rx);
+		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		return;
+
+	case HCI_IBS_RX_AWAKE:
+		/* Always acknowledge device wake up,
+		 * sending IBS message doesn't count as TX ON.
+		 */
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+			BT_ERR("Failed to acknowledge device wake up");
+			break;
+		}
+		qca->ibs_sent_wacks++;
+		break;
+
+	default:
+		/* Any other state is illegal */
+		BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
+		       qca->rx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+/* Called upon a sleep-indication from the device.
+ */
+static void device_want_to_sleep(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p want to sleep", hu);
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	qca->ibs_recv_slps++;
+
+	switch (qca->rx_ibs_state) {
+	case HCI_IBS_RX_AWAKE:
+		/* Update state */
+		qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+		/* Vote off rx clock under workqueue */
+		queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+		break;
+
+	case HCI_IBS_RX_ASLEEP:
+		/* Fall through */
+
+	default:
+		/* Any other state is illegal */
+		BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
+		       qca->rx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+/* Called upon wake-up-acknowledgement from the device
+ */
+static void device_woke_up(struct hci_uart *hu)
+{
+	unsigned long flags, idle_delay;
+	struct qca_data *qca = hu->priv;
+	struct sk_buff *skb = NULL;
+
+	BT_DBG("hu %p woke up", hu);
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	qca->ibs_recv_wacks++;
+
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		/* Expect one if we send 2 WAKEs */
+		BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
+		       qca->tx_ibs_state);
+		break;
+
+	case HCI_IBS_TX_WAKING:
+		/* Send pending packets */
+		while ((skb = skb_dequeue(&qca->tx_wait_q)))
+			skb_queue_tail(&qca->txq, skb);
+
+		/* Switch timers and change state to HCI_IBS_TX_AWAKE */
+		del_timer(&qca->wake_retrans_timer);
+		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+		qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+		/* Fall through */
+
+	default:
+		BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
+		       qca->tx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+ * two simultaneous tasklets.
+ */
+static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+	unsigned long flags = 0, idle_delay;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
+	       qca->tx_ibs_state);
+
+	/* Prepend skb with frame type */
+	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+	/* Don't go to sleep in middle of patch download or
+	 * Out-Of-Band(GPIOs control) sleep is selected.
+	 */
+	if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+		skb_queue_tail(&qca->txq, skb);
+		return 0;
+	}
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	/* Act according to current state */
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		BT_DBG("Device awake, sending normally");
+		skb_queue_tail(&qca->txq, skb);
+		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+		BT_DBG("Device asleep, waking up and queueing packet");
+		/* Save packet for later */
+		skb_queue_tail(&qca->tx_wait_q, skb);
+
+		qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+		/* Schedule a work queue to wake up device */
+		queue_work(qca->workqueue, &qca->ws_awake_device);
+		break;
+
+	case HCI_IBS_TX_WAKING:
+		BT_DBG("Device waking up, queueing packet");
+		/* Transient state; just keep packet for later */
+		skb_queue_tail(&qca->tx_wait_q, skb);
+		break;
+
+	default:
+		BT_ERR("Illegal tx state: %d (losing packet)",
+		       qca->tx_ibs_state);
+		kfree_skb(skb);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	return 0;
+}
+
+static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+
+	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
+
+	device_want_to_sleep(hu);
+
+	kfree_skb(skb);
+	return 0;
+}
+
+static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+
+	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
+
+	device_want_to_wakeup(hu);
+
+	kfree_skb(skb);
+	return 0;
+}
+
+static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+
+	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
+
+	device_woke_up(hu);
+
+	kfree_skb(skb);
+	return 0;
+}
+
+#define QCA_IBS_SLEEP_IND_EVENT \
+	.type = HCI_IBS_SLEEP_IND, \
+	.hlen = 0, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_IND_EVENT \
+	.type = HCI_IBS_WAKE_IND, \
+	.hlen = 0, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_ACK_EVENT \
+	.type = HCI_IBS_WAKE_ACK, \
+	.hlen = 0, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = HCI_MAX_IBS_SIZE
+
+static const struct h4_recv_pkt qca_recv_pkts[] = {
+	{ H4_RECV_ACL,             .recv = hci_recv_frame    },
+	{ H4_RECV_SCO,             .recv = hci_recv_frame    },
+	{ H4_RECV_EVENT,           .recv = hci_recv_frame    },
+	{ QCA_IBS_WAKE_IND_EVENT,  .recv = qca_ibs_wake_ind  },
+	{ QCA_IBS_WAKE_ACK_EVENT,  .recv = qca_ibs_wake_ack  },
+	{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
+};
+
+static int qca_recv(struct hci_uart *hu, const void *data, int count)
+{
+	struct qca_data *qca = hu->priv;
+
+	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+		return -EUNATCH;
+
+	qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+				  qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
+	if (IS_ERR(qca->rx_skb)) {
+		int err = PTR_ERR(qca->rx_skb);
+		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		qca->rx_skb = NULL;
+		return err;
+	}
+
+	return count;
+}
+
+static struct sk_buff *qca_dequeue(struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+
+	return skb_dequeue(&qca->txq);
+}
+
+static uint8_t qca_get_baudrate_value(int speed)
+{
+	switch(speed) {
+	case 9600:
+		return QCA_BAUDRATE_9600;
+	case 19200:
+		return QCA_BAUDRATE_19200;
+	case 38400:
+		return QCA_BAUDRATE_38400;
+	case 57600:
+		return QCA_BAUDRATE_57600;
+	case 115200:
+		return QCA_BAUDRATE_115200;
+	case 230400:
+		return QCA_BAUDRATE_230400;
+	case 460800:
+		return QCA_BAUDRATE_460800;
+	case 500000:
+		return QCA_BAUDRATE_500000;
+	case 921600:
+		return QCA_BAUDRATE_921600;
+	case 1000000:
+		return QCA_BAUDRATE_1000000;
+	case 2000000:
+		return QCA_BAUDRATE_2000000;
+	case 3000000:
+		return QCA_BAUDRATE_3000000;
+	case 3500000:
+		return QCA_BAUDRATE_3500000;
+	default:
+		return QCA_BAUDRATE_115200;
+	}
+}
+
+static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct qca_data *qca = hu->priv;
+	struct sk_buff *skb;
+	u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
+
+	if (baudrate > QCA_BAUDRATE_3000000)
+		return -EINVAL;
+
+	cmd[4] = baudrate;
+
+	skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+	if (!skb) {
+		BT_ERR("Failed to allocate memory for baudrate packet");
+		return -ENOMEM;
+	}
+
+	/* Assign commands to change baudrate and packet type. */
+	memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+	skb_queue_tail(&qca->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	/* wait 300ms to change new baudrate on controller side
+	 * controller will come back after they receive this HCI command
+	 * then host can communicate with new baudrate to controller
+	 */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	return 0;
+}
+
+static int qca_setup(struct hci_uart *hu)
+{
+	struct hci_dev *hdev = hu->hdev;
+	struct qca_data *qca = hu->priv;
+	unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+	int ret;
+
+	BT_INFO("%s: ROME setup", hdev->name);
+
+	/* Patch downloading has to be done without IBS mode */
+	clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+
+	/* Setup initial baudrate */
+	speed = 0;
+	if (hu->init_speed)
+		speed = hu->init_speed;
+	else if (hu->proto->init_speed)
+		speed = hu->proto->init_speed;
+
+	if (speed)
+		hci_uart_set_baudrate(hu, speed);
+
+	/* Setup user speed if needed */
+	speed = 0;
+	if (hu->oper_speed)
+		speed = hu->oper_speed;
+	else if (hu->proto->oper_speed)
+		speed = hu->proto->oper_speed;
+
+	if (speed) {
+		qca_baudrate = qca_get_baudrate_value(speed);
+
+		BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+		ret = qca_set_baudrate(hdev, qca_baudrate);
+		if (ret) {
+			BT_ERR("%s: Failed to change the baud rate (%d)",
+			       hdev->name, ret);
+			return ret;
+		}
+		hci_uart_set_baudrate(hu, speed);
+	}
+
+	/* Setup patch / NVM configurations */
+	ret = qca_uart_setup_rome(hdev, qca_baudrate);
+	if (!ret) {
+		set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+		qca_debugfs_init(hdev);
+	}
+
+	/* Setup bdaddr */
+	hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+	return ret;
+}
+
+static struct hci_uart_proto qca_proto = {
+	.id		= HCI_UART_QCA,
+	.name		= "QCA",
+	.init_speed	= 115200,
+	.oper_speed	= 3000000,
+	.open		= qca_open,
+	.close		= qca_close,
+	.flush		= qca_flush,
+	.setup		= qca_setup,
+	.recv		= qca_recv,
+	.enqueue	= qca_enqueue,
+	.dequeue	= qca_dequeue,
+};
+
+int __init qca_init(void)
+{
+	return hci_uart_register_proto(&qca_proto);
+}
+
+int __exit qca_deinit(void)
+{
+	return hci_uart_unregister_proto(&qca_proto);
+}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index ce9c670..495b9ef 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
 #define HCIUARTGETFLAGS		_IOR('U', 204, int)
 
 /* UART protocols */
-#define HCI_UART_MAX_PROTO	8
+#define HCI_UART_MAX_PROTO	9
 
 #define HCI_UART_H4	0
 #define HCI_UART_BCSP	1
@@ -45,6 +45,7 @@
 #define HCI_UART_ATH3K	5
 #define HCI_UART_INTEL	6
 #define HCI_UART_BCM	7
+#define HCI_UART_QCA	8
 
 #define HCI_UART_RAW_DEVICE	0
 #define HCI_UART_RESET_ON_INIT	1
@@ -167,7 +168,17 @@
 int h5_deinit(void);
 #endif
 
+#ifdef CONFIG_BT_HCIUART_INTEL
+int intel_init(void);
+int intel_deinit(void);
+#endif
+
 #ifdef CONFIG_BT_HCIUART_BCM
 int bcm_init(void);
 int bcm_deinit(void);
 #endif
+
+#ifdef CONFIG_BT_HCIUART_QCA
+int qca_init(void);
+int qca_deinit(void);
+#endif
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index ab3bde1..1c543ef 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -332,6 +332,18 @@
 }
 
 /**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+	return 0;
+}
+
+/**
  * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
  * @bus:	Pointer to bus information for current CPU.
  *		IS_ERR(bus) is checked, so no need for caller to check.
@@ -368,7 +380,7 @@
 	if (!bus->phys)
 		bus->phys = mips_cdmm_cur_base();
 	/* Otherwise, ask platform code for suggestions */
-	if (!bus->phys && mips_cdmm_phys_base)
+	if (!bus->phys)
 		bus->phys = mips_cdmm_phys_base();
 	/* Otherwise, copy what other CPUs have done */
 	if (!bus->phys)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index c6dea3f..1341a94 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1408,8 +1408,8 @@
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-		   phys_addr_t *mappable_base, unsigned long *mappable_end)
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, u64 *mappable_end)
 {
 	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
 	*stolen_size = intel_private.stolen_size;
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 0df5bae..9040878 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -780,6 +780,7 @@
 	"aclk_cpu",
 	"aclk_peri",
 	"hclk_peri",
+	"pclk_pd_pmu",
 };
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c03f04d8..a7726db 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -111,6 +111,10 @@
 	select CLKSRC_MMIO
 	select CLKSRC_OF
 
+config CLKSRC_PISTACHIO
+	bool
+	select CLKSRC_OF
+
 config CLKSRC_STM32
 	bool "Clocksource for STM32 SoCs" if !ARCH_STM32
 	depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
@@ -293,4 +297,12 @@
 	depends on ARM && CLKDEV_LOOKUP
 	select CLKSRC_MMIO
 
+config CLKSRC_ST_LPC
+	bool
+	depends on ARCH_STI
+	select CLKSRC_OF if OF
+	help
+	  Enable this option to use the Low Power controller timer
+	  as clocksource.
+
 endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f228354..5c00863 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -44,6 +44,7 @@
 obj-$(CONFIG_VF_PIT_TIMER)	+= vf_pit_timer.o
 obj-$(CONFIG_CLKSRC_QCOM)	+= qcom-timer.o
 obj-$(CONFIG_MTK_TIMER)		+= mtk_timer.o
+obj-$(CONFIG_CLKSRC_PISTACHIO)	+= time-pistachio.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o
@@ -60,3 +61,4 @@
 obj-$(CONFIG_H8300)			+= h8300_timer8.o
 obj-$(CONFIG_H8300_TMR16)		+= h8300_timer16.o
 obj-$(CONFIG_H8300_TPU)			+= h8300_tpu.o
+obj-$(CONFIG_CLKSRC_ST_LPC)		+= clksrc_st_lpc.o
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
new file mode 100644
index 0000000..65ec467
--- /dev/null
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -0,0 +1,131 @@
+/*
+ * Clocksource using the Low Power Timer found in the Low Power Controller (LPC)
+ *
+ * Copyright (C) 2015 STMicroelectronics – All Rights Reserved
+ *
+ * Author(s): Francesco Virlinzi <francesco.virlinzi@st.com>
+ *	      Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mfd/st-lpc.h>
+
+/* Low Power Timer */
+#define LPC_LPT_LSB_OFF		0x400
+#define LPC_LPT_MSB_OFF		0x404
+#define LPC_LPT_START_OFF	0x408
+
+static struct st_clksrc_ddata {
+	struct clk		*clk;
+	void __iomem		*base;
+} ddata;
+
+static void __init st_clksrc_reset(void)
+{
+	writel_relaxed(0, ddata.base + LPC_LPT_START_OFF);
+	writel_relaxed(0, ddata.base + LPC_LPT_MSB_OFF);
+	writel_relaxed(0, ddata.base + LPC_LPT_LSB_OFF);
+	writel_relaxed(1, ddata.base + LPC_LPT_START_OFF);
+}
+
+static u64 notrace st_clksrc_sched_clock_read(void)
+{
+	return (u64)readl_relaxed(ddata.base + LPC_LPT_LSB_OFF);
+}
+
+static int __init st_clksrc_init(void)
+{
+	unsigned long rate;
+	int ret;
+
+	st_clksrc_reset();
+
+	rate = clk_get_rate(ddata.clk);
+
+	sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
+
+	ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
+				    "clksrc-st-lpc", rate, 300, 32,
+				    clocksource_mmio_readl_up);
+	if (ret) {
+		pr_err("clksrc-st-lpc: Failed to register clocksource\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init st_clksrc_setup_clk(struct device_node *np)
+{
+	struct clk *clk;
+
+	clk = of_clk_get(np, 0);
+	if (IS_ERR(clk)) {
+		pr_err("clksrc-st-lpc: Failed to get LPC clock\n");
+		return PTR_ERR(clk);
+	}
+
+	if (clk_prepare_enable(clk)) {
+		pr_err("clksrc-st-lpc: Failed to enable LPC clock\n");
+		return -EINVAL;
+	}
+
+	if (!clk_get_rate(clk)) {
+		pr_err("clksrc-st-lpc: Failed to get LPC clock rate\n");
+		clk_disable_unprepare(clk);
+		return -EINVAL;
+	}
+
+	ddata.clk = clk;
+
+	return 0;
+}
+
+static void __init st_clksrc_of_register(struct device_node *np)
+{
+	int ret;
+	uint32_t mode;
+
+	ret = of_property_read_u32(np, "st,lpc-mode", &mode);
+	if (ret) {
+		pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
+		return;
+	}
+
+	/* LPC can either run as a Clocksource or in RTC or WDT mode */
+	if (mode != ST_LPC_MODE_CLKSRC)
+		return;
+
+	ddata.base = of_iomap(np, 0);
+	if (!ddata.base) {
+		pr_err("clksrc-st-lpc: Unable to map iomem\n");
+		return;
+	}
+
+	if (st_clksrc_setup_clk(np)) {
+		iounmap(ddata.base);
+		return;
+	}
+
+	if (st_clksrc_init()) {
+		clk_disable_unprepare(ddata.clk);
+		clk_put(ddata.clk);
+		iounmap(ddata.base);
+		return;
+	}
+
+	pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
+		clk_get_rate(ddata.clk));
+}
+CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index c3810b6..02a1945 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -72,6 +72,13 @@
 	disable_percpu_irq(gic_timer_irq);
 }
 
+static void gic_update_frequency(void *data)
+{
+	unsigned long rate = (unsigned long)data;
+
+	clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
+}
+
 static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
 				void *data)
 {
@@ -87,18 +94,40 @@
 	return NOTIFY_OK;
 }
 
+static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
+			    void *data)
+{
+	struct clk_notifier_data *cnd = data;
+
+	if (action == POST_RATE_CHANGE)
+		on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+
+	return NOTIFY_OK;
+}
+
+
 static struct notifier_block gic_cpu_nb = {
 	.notifier_call = gic_cpu_notifier,
 };
 
+static struct notifier_block gic_clk_nb = {
+	.notifier_call = gic_clk_notifier,
+};
+
 static int gic_clockevent_init(void)
 {
+	int ret;
+
 	if (!cpu_has_counter || !gic_frequency)
 		return -ENXIO;
 
-	setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	if (ret < 0)
+		return ret;
 
-	register_cpu_notifier(&gic_cpu_nb);
+	ret = register_cpu_notifier(&gic_cpu_nb);
+	if (ret < 0)
+		pr_warn("GIC: Unable to register CPU notifier\n");
 
 	gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
 
@@ -118,18 +147,17 @@
 
 static void __init __gic_clocksource_init(void)
 {
+	int ret;
+
 	/* Set clocksource mask. */
 	gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
 
 	/* Calculate a somewhat reasonable rating value. */
 	gic_clocksource.rating = 200 + gic_frequency / 10000000;
 
-	clocksource_register_hz(&gic_clocksource, gic_frequency);
-
-	gic_clockevent_init();
-
-	/* And finally start the counter */
-	gic_start_count();
+	ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
+	if (ret < 0)
+		pr_warn("GIC: Unable to register clocksource\n");
 }
 
 void __init gic_clocksource_init(unsigned int frequency)
@@ -139,11 +167,16 @@
 		GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE);
 
 	__gic_clocksource_init();
+	gic_clockevent_init();
+
+	/* And finally start the counter */
+	gic_start_count();
 }
 
 static void __init gic_clocksource_of_init(struct device_node *node)
 {
 	struct clk *clk;
+	int ret;
 
 	if (WARN_ON(!gic_present || !node->parent ||
 		    !of_device_is_compatible(node->parent, "mti,gic")))
@@ -151,8 +184,13 @@
 
 	clk = of_clk_get(node, 0);
 	if (!IS_ERR(clk)) {
+		if (clk_prepare_enable(clk) < 0) {
+			pr_err("GIC failed to enable clock\n");
+			clk_put(clk);
+			return;
+		}
+
 		gic_frequency = clk_get_rate(clk);
-		clk_put(clk);
 	} else if (of_property_read_u32(node, "clock-frequency",
 					&gic_frequency)) {
 		pr_err("GIC frequency not specified.\n");
@@ -165,6 +203,15 @@
 	}
 
 	__gic_clocksource_init();
+
+	ret = gic_clockevent_init();
+	if (!ret && !IS_ERR(clk)) {
+		if (clk_notifier_register(clk, &gic_clk_nb) < 0)
+			pr_warn("GIC: Unable to register clock notifier\n");
+	}
+
+	/* And finally start the counter */
+	gic_start_count();
 }
 CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
 		       gic_clocksource_of_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
new file mode 100644
index 0000000..18d4266
--- /dev/null
+++ b/drivers/clocksource/time-pistachio.c
@@ -0,0 +1,217 @@
+/*
+ * Pistachio clocksource based on general-purpose timers
+ *
+ * Copyright (C) 2015 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched_clock.h>
+#include <linux/time.h>
+
+/* Top level reg */
+#define CR_TIMER_CTRL_CFG		0x00
+#define TIMER_ME_GLOBAL			BIT(0)
+#define CR_TIMER_REV			0x10
+
+/* Timer specific registers */
+#define TIMER_CFG			0x20
+#define TIMER_ME_LOCAL			BIT(0)
+#define TIMER_RELOAD_VALUE		0x24
+#define TIMER_CURRENT_VALUE		0x28
+#define TIMER_CURRENT_OVERFLOW_VALUE	0x2C
+#define TIMER_IRQ_STATUS		0x30
+#define TIMER_IRQ_CLEAR			0x34
+#define TIMER_IRQ_MASK			0x38
+
+#define PERIP_TIMER_CONTROL		0x90
+
+/* Timer specific configuration Values */
+#define RELOAD_VALUE			0xffffffff
+
+struct pistachio_clocksource {
+	void __iomem *base;
+	raw_spinlock_t lock;
+	struct clocksource cs;
+};
+
+static struct pistachio_clocksource pcs_gpt;
+
+#define to_pistachio_clocksource(cs)	\
+	container_of(cs, struct pistachio_clocksource, cs)
+
+static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id)
+{
+	return readl(base + 0x20 * gpt_id + offset);
+}
+
+static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
+		u32 gpt_id)
+{
+	writel(value, base + 0x20 * gpt_id + offset);
+}
+
+static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+	u32 counter, overflw;
+	unsigned long flags;
+
+	/*
+	 * The counter value is only refreshed after the overflow value is read.
+	 * And they must be read in strict order, hence raw spin lock added.
+	 */
+
+	raw_spin_lock_irqsave(&pcs->lock, flags);
+	overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+	counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
+	raw_spin_unlock_irqrestore(&pcs->lock, flags);
+
+	return ~(cycle_t)counter;
+}
+
+static u64 notrace pistachio_read_sched_clock(void)
+{
+	return pistachio_clocksource_read_cycles(&pcs_gpt.cs);
+}
+
+static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx,
+			int enable)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+	u32 val;
+
+	val = gpt_readl(pcs->base, TIMER_CFG, timeridx);
+	if (enable)
+		val |= TIMER_ME_LOCAL;
+	else
+		val &= ~TIMER_ME_LOCAL;
+
+	gpt_writel(pcs->base, val, TIMER_CFG, timeridx);
+}
+
+static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+
+	/* Disable GPT local before loading reload value */
+	pistachio_clksrc_set_mode(cs, timeridx, false);
+	gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx);
+	pistachio_clksrc_set_mode(cs, timeridx, true);
+}
+
+static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx)
+{
+	/* Disable GPT local */
+	pistachio_clksrc_set_mode(cs, timeridx, false);
+}
+
+static int pistachio_clocksource_enable(struct clocksource *cs)
+{
+	pistachio_clksrc_enable(cs, 0);
+	return 0;
+}
+
+static void pistachio_clocksource_disable(struct clocksource *cs)
+{
+	pistachio_clksrc_disable(cs, 0);
+}
+
+/* Desirable clock source for pistachio platform */
+static struct pistachio_clocksource pcs_gpt = {
+	.cs =	{
+		.name		= "gptimer",
+		.rating		= 300,
+		.enable		= pistachio_clocksource_enable,
+		.disable	= pistachio_clocksource_disable,
+		.read		= pistachio_clocksource_read_cycles,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS |
+				  CLOCK_SOURCE_SUSPEND_NONSTOP,
+		},
+};
+
+static void __init pistachio_clksrc_of_init(struct device_node *node)
+{
+	struct clk *sys_clk, *fast_clk;
+	struct regmap *periph_regs;
+	unsigned long rate;
+	int ret;
+
+	pcs_gpt.base = of_iomap(node, 0);
+	if (!pcs_gpt.base) {
+		pr_err("cannot iomap\n");
+		return;
+	}
+
+	periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
+	if (IS_ERR(periph_regs)) {
+		pr_err("cannot get peripheral regmap (%lu)\n",
+		       PTR_ERR(periph_regs));
+		return;
+	}
+
+	/* Switch to using the fast counter clock */
+	ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
+				 0xf, 0x0);
+	if (ret)
+		return;
+
+	sys_clk = of_clk_get_by_name(node, "sys");
+	if (IS_ERR(sys_clk)) {
+		pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
+		return;
+	}
+
+	fast_clk = of_clk_get_by_name(node, "fast");
+	if (IS_ERR(fast_clk)) {
+		pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
+		return;
+	}
+
+	ret = clk_prepare_enable(sys_clk);
+	if (ret < 0) {
+		pr_err("failed to enable clock (%d)\n", ret);
+		return;
+	}
+
+	ret = clk_prepare_enable(fast_clk);
+	if (ret < 0) {
+		pr_err("failed to enable clock (%d)\n", ret);
+		clk_disable_unprepare(sys_clk);
+		return;
+	}
+
+	rate = clk_get_rate(fast_clk);
+
+	/* Disable irq's for clocksource usage */
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+
+	/* Enable timer block */
+	writel(TIMER_ME_GLOBAL, pcs_gpt.base);
+
+	raw_spin_lock_init(&pcs_gpt.lock);
+	sched_clock_register(pistachio_read_sched_clock, 32, rate);
+	clocksource_register_hz(&pcs_gpt.cs, rate);
+}
+CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
+		       pistachio_clksrc_of_init);
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index c13feec..ea9728f 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -25,16 +25,21 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/platform_device.h>
+#include <linux/psci.h>
+
 #include <asm/cpuidle.h>
 #include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define CALXEDA_IDLE_PARAM \
+	((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+	 (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+	 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
 
 static int calxeda_idle_finish(unsigned long val)
 {
-	const struct psci_power_state ps = {
-		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-	};
-	return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+	return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
 }
 
 static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index d9af940..2f0b333 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -370,8 +370,7 @@
 			sg_init_table(ctx->bufsl, nsg);
 			sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
 			if (nsg > 1)
-				scatterwalk_sg_chain(ctx->bufsl, nsg,
-						req->src);
+				sg_chain(ctx->bufsl, nsg, req->src);
 			ctx->sg = ctx->bufsl;
 		} else
 			ctx->sg = req->src;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index b2024c95..48adb2a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -588,7 +588,7 @@
 		 * the dmaengine may try to DMA the incorrect amount of data.
 		 */
 		sg_init_table(&ctx->sgl, 1);
-		ctx->sgl.page_link = ctx->sg->page_link;
+		sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
 		ctx->sgl.offset = ctx->sg->offset;
 		sg_dma_len(&ctx->sgl) = len32;
 		sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 5c5df1d..be2f504 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -296,7 +296,7 @@
 	if (rctx->buflen) {
 		sg_init_table(rctx->sg, 2);
 		sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
-		scatterwalk_sg_chain(rctx->sg, 2, req->src);
+		sg_chain(rctx->sg, 2, req->src);
 		req->src = rctx->sg;
 	}
 
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 1c19e44..820dc3a 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -999,7 +999,7 @@
 		sg_init_table(rctx->in_sg_chain, 2);
 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 
-		scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
+		sg_chain(rctx->in_sg_chain, 2, req->src);
 
 		rctx->total = req->nbytes + rctx->buf_cnt;
 		rctx->in_sg = rctx->in_sg_chain;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index cd77453..3b20a1b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1929,7 +1929,7 @@
 		sg_init_table(req_ctx->bufsl, nsg);
 		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
 		if (nsg > 1)
-			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
+			sg_chain(req_ctx->bufsl, 2, areq->src);
 		req_ctx->psrc = req_ctx->bufsl;
 	} else
 		req_ctx->psrc = areq->src;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index bdbbe5b..b458475 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,27 +33,29 @@
 
 comment "DMA Devices"
 
-config INTEL_MIC_X100_DMA
-	tristate "Intel MIC X100 DMA Driver"
-	depends on 64BIT && X86 && INTEL_MIC_BUS
-	select DMA_ENGINE
-	help
-	  This enables DMA support for the Intel Many Integrated Core
-	  (MIC) family of PCIe form factor coprocessor X100 devices that
-	  run a 64 bit Linux OS. This driver will be used by both MIC
-	  host and card drivers.
-
-	  If you are building host kernel with a MIC device or a card
-	  kernel for a MIC device, then say M (recommended) or Y, else
-	  say N. If unsure say N.
-
-	  More information about the Intel MIC family as well as the Linux
-	  OS and tools for MIC to use with this driver are available from
-	  <http://software.intel.com/en-us/mic-developer>.
-
+#core
 config ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	bool
 
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	bool
+
+config DMA_ENGINE
+	bool
+
+config DMA_VIRTUAL_CHANNELS
+	tristate
+
+config DMA_ACPI
+	def_bool y
+	depends on ACPI
+
+config DMA_OF
+	def_bool y
+	depends on OF
+	select DMA_ENGINE
+
+#devices
 config AMBA_PL08X
 	bool "ARM PrimeCell PL080 or PL081 support"
 	depends on ARM_AMBA
@@ -63,9 +65,181 @@
 	  Platform has a PL08x DMAC device
 	  which can provide DMA engine support
 
+config AMCC_PPC440SPE_ADMA
+	tristate "AMCC PPC440SPe ADMA support"
+	depends on 440SPe || 440SP
+	select DMA_ENGINE
+	select DMA_ENGINE_RAID
+	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	help
+	  Enable support for the AMCC PPC440SPe RAID engines.
+
+config AT_HDMAC
+	tristate "Atmel AHB DMA support"
+	depends on ARCH_AT91
+	select DMA_ENGINE
+	help
+	  Support the Atmel AHB DMA controller.
+
+config AT_XDMAC
+	tristate "Atmel XDMA support"
+	depends on ARCH_AT91
+	select DMA_ENGINE
+	help
+	  Support the Atmel XDMA controller.
+
+config AXI_DMAC
+	tristate "Analog Devices AXI-DMAC DMA support"
+	depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
+	  controller is often used in Analog Device's reference designs for FPGA
+	  platforms.
+
+config COH901318
+	bool "ST-Ericsson COH901318 DMA support"
+	select DMA_ENGINE
+	depends on ARCH_U300
+	help
+	  Enable support for ST-Ericsson COH 901 318 DMA.
+
+config DMA_BCM2835
+	tristate "BCM2835 DMA engine support"
+	depends on ARCH_BCM2835
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4740
+	tristate "JZ4740 DMA support"
+	depends on MACH_JZ4740
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4780
+	tristate "JZ4780 DMA support"
+	depends on MACH_JZ4780
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  This selects support for the DMA controller in Ingenic JZ4780 SoCs.
+	  If you have a board based on such a SoC and wish to use DMA for
+	  devices which can use the DMA controller, say Y or M here.
+
+config DMA_OMAP
+	tristate "OMAP DMA support"
+	depends on ARCH_OMAP
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	select TI_DMA_CROSSBAR if SOC_DRA7XX
+
+config DMA_SA11X0
+	tristate "SA-11x0 DMA support"
+	depends on ARCH_SA1100
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the DMA engine found on Intel StrongARM SA-1100 and
+	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
+	  devices.
+
+config DMA_SUN4I
+	tristate "Allwinner A10 DMA SoCs support"
+	depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
+	default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+	select DMA_ENGINE
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the DMA controller present in the sun4i,
+	  sun5i and sun7i Allwinner ARM SoCs.
+
+config DMA_SUN6I
+	tristate "Allwinner A31 SoCs DMA support"
+	depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+	depends on RESET_CONTROLLER
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the DMA engine first found in Allwinner A31 SoCs.
+
+config EP93XX_DMA
+	bool "Cirrus Logic EP93xx DMA support"
+	depends on ARCH_EP93XX
+	select DMA_ENGINE
+	help
+	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
+config FSL_DMA
+	tristate "Freescale Elo series DMA support"
+	depends on FSL_SOC
+	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	---help---
+	  Enable support for the Freescale Elo series DMA controllers.
+	  The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
+	  EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
+	  some Txxx and Bxxx parts.
+
+config FSL_EDMA
+	tristate "Freescale eDMA engine support"
+	depends on OF
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Freescale eDMA engine with programmable channel
+	  multiplexing capability for DMA request sources(slot).
+	  This module can be found on Freescale Vybrid and LS-1 SoCs.
+
+config FSL_RAID
+        tristate "Freescale RAID engine Support"
+        depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+        select DMA_ENGINE
+        select DMA_ENGINE_RAID
+        ---help---
+          Enable support for Freescale RAID Engine. RAID Engine is
+          available on some QorIQ SoCs (like P5020/P5040). It has
+          the capability to offload memcpy, xor and pq computation
+	  for raid5/6.
+
+config IMG_MDC_DMA
+	tristate "IMG MDC support"
+	depends on MIPS || COMPILE_TEST
+	depends on MFD_SYSCON
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the IMG multi-threaded DMA controller (MDC).
+
+config IMX_DMA
+	tristate "i.MX DMA support"
+	depends on ARCH_MXC
+	select DMA_ENGINE
+	help
+	  Support the i.MX DMA engine. This engine is integrated into
+	  Freescale i.MX1/21/27 chips.
+
+config IMX_SDMA
+	tristate "i.MX SDMA support"
+	depends on ARCH_MXC
+	select DMA_ENGINE
+	help
+	  Support the i.MX SDMA engine. This engine is integrated into
+	  Freescale i.MX25/31/35/51/53/6 chips.
+
+config IDMA64
+	tristate "Intel integrated DMA 64-bit support"
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable DMA support for Intel Low Power Subsystem such as found on
+	  Intel Skylake PCH.
+
 config INTEL_IOATDMA
 	tristate "Intel I/OAT DMA support"
-	depends on PCI && X86
+	depends on PCI && X86_64
 	select DMA_ENGINE
 	select DMA_ENGINE_RAID
 	select DCA
@@ -85,53 +259,69 @@
 	help
 	  Enable support for the Intel(R) IOP Series RAID engines.
 
-config IDMA64
-	tristate "Intel integrated DMA 64-bit support"
+config INTEL_MIC_X100_DMA
+	tristate "Intel MIC X100 DMA Driver"
+	depends on 64BIT && X86 && INTEL_MIC_BUS
+	select DMA_ENGINE
+	help
+	  This enables DMA support for the Intel Many Integrated Core
+	  (MIC) family of PCIe form factor coprocessor X100 devices that
+	  run a 64 bit Linux OS. This driver will be used by both MIC
+	  host and card drivers.
+
+	  If you are building host kernel with a MIC device or a card
+	  kernel for a MIC device, then say M (recommended) or Y, else
+	  say N. If unsure say N.
+
+	  More information about the Intel MIC family as well as the Linux
+	  OS and tools for MIC to use with this driver are available from
+	  <http://software.intel.com/en-us/mic-developer>.
+
+config K3_DMA
+	tristate "Hisilicon K3 DMA support"
+	depends on ARCH_HI3xxx
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 	help
-	  Enable DMA support for Intel Low Power Subsystem such as found on
-	  Intel Skylake PCH.
+	  Support the DMA engine for Hisilicon K3 platform
+	  devices.
 
-source "drivers/dma/dw/Kconfig"
+config LPC18XX_DMAMUX
+	bool "NXP LPC18xx/43xx DMA MUX for PL080"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	depends on OF && AMBA_PL08X
+	select MFD_SYSCON
+	help
+	  Enable support for DMA on NXP LPC18xx/43xx platforms
+	  with PL080 and multiplexed DMA request lines.
 
-config AT_HDMAC
-	tristate "Atmel AHB DMA support"
-	depends on ARCH_AT91
+config MMP_PDMA
+	bool "MMP PDMA support"
+	depends on (ARCH_MMP || ARCH_PXA)
 	select DMA_ENGINE
 	help
-	  Support the Atmel AHB DMA controller.
+	  Support the MMP PDMA engine for PXA and MMP platform.
 
-config AT_XDMAC
-	tristate "Atmel XDMA support"
-	depends on ARCH_AT91
+config MMP_TDMA
+	bool "MMP Two-Channel DMA support"
+	depends on ARCH_MMP
 	select DMA_ENGINE
+	select MMP_SRAM
 	help
-	  Support the Atmel XDMA controller.
+	  Support the MMP Two-Channel DMA engine.
+	  This engine used for MMP Audio DMA and pxa910 SQU.
+	  It needs sram driver under mach-mmp.
 
-config FSL_DMA
-	tristate "Freescale Elo series DMA support"
-	depends on FSL_SOC
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
 	select DMA_ENGINE
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	---help---
-	  Enable support for the Freescale Elo series DMA controllers.
-	  The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
-	  EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
-	  some Txxx and Bxxx parts.
-
-config FSL_RAID
-        tristate "Freescale RAID engine Support"
-        depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
-        select DMA_ENGINE
-        select DMA_ENGINE_RAID
-        ---help---
-          Enable support for Freescale RAID Engine. RAID Engine is
-          available on some QorIQ SoCs (like P5020/P5040). It has
-          the capability to offload memcpy, xor and pq computation
-	  for raid5/6.
-
-source "drivers/dma/hsu/Kconfig"
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+ 
+	  Say Y here if you enabled MMP ADMA, otherwise say N.
 
 config MPC512X_DMA
 	tristate "Freescale MPC512x built-in DMA engine support"
@@ -140,8 +330,6 @@
 	---help---
 	  Enable support for the Freescale MPC512x built-in DMA engine.
 
-source "drivers/dma/bestcomm/Kconfig"
-
 config MV_XOR
 	bool "Marvell XOR engine support"
 	depends on PLAT_ORION
@@ -151,6 +339,15 @@
 	---help---
 	  Enable support for the Marvell XOR engine.
 
+config MXS_DMA
+	bool "MXS DMA support"
+	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+	select STMP_DEVICE
+	select DMA_ENGINE
+	help
+	  Support the MXS DMA engine. This engine including APBH-DMA
+	  and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+
 config MX3_IPU
 	bool "MX3x Image Processing Unit support"
 	depends on ARCH_MXC
@@ -170,6 +367,36 @@
 	  To avoid bloating the irq_desc[] array we allocate a sufficient
 	  number of IRQ slots and map them dynamically to specific sources.
 
+config NBPFAXI_DMA
+	tristate "Renesas Type-AXI NBPF DMA support"
+	select DMA_ENGINE
+	depends on ARM || COMPILE_TEST
+	help
+	  Support for "Type-AXI" NBPF DMA IPs from Renesas
+
+config PCH_DMA
+	tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
+	depends on PCI && (X86_32 || COMPILE_TEST)
+	select DMA_ENGINE
+	help
+	  Enable support for Intel EG20T PCH DMA engine.
+
+	  This driver also can be used for LAPIS Semiconductor IOH(Input/
+	  Output Hub), ML7213, ML7223 and ML7831.
+	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+config PL330_DMA
+	tristate "DMA API Driver for PL330"
+	select DMA_ENGINE
+	depends on ARM_AMBA
+	help
+	  Select if your platform has one or more PL330 DMACs.
+	  You need to provide platform specific settings via
+	  platform_data for a dma-pl330 device.
+
 config PXA_DMA
 	bool "PXA DMA support"
 	depends on (ARCH_MMP || ARCH_PXA)
@@ -181,6 +408,41 @@
 	  16 to 32 channels for peripheral to memory or memory to memory
 	  transfers.
 
+config QCOM_BAM_DMA
+	tristate "QCOM BAM DMA support"
+	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	---help---
+	  Enable support for the QCOM BAM DMA controller.  This controller
+	  provides DMA capabilities for a variety of on-chip devices.
+
+config SIRF_DMA
+	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+	depends on ARCH_SIRF
+	select DMA_ENGINE
+	help
+	  Enable support for the CSR SiRFprimaII DMA engine.
+
+config STE_DMA40
+	bool "ST-Ericsson DMA40 support"
+	depends on ARCH_U8500
+	select DMA_ENGINE
+	help
+	  Support for ST-Ericsson DMA40 controller
+
+config S3C24XX_DMAC
+	tristate "Samsung S3C24XX DMA support"
+	depends on ARCH_S3C24XX
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the Samsung S3C24XX DMA controller driver. The
+	  DMA controller is having multiple DMA channels which can be
+	  configured for different peripherals like audio, UART, SPI.
+	  The DMA controller can transfer data from memory to peripheral,
+	  periphal to memory, periphal to periphal and memory to memory.
+
 config TXX9_DMAC
 	tristate "Toshiba TXx9 SoC DMA support"
 	depends on MACH_TX49XX || MACH_TX39XX
@@ -201,44 +463,6 @@
 	  This DMA controller transfers data from memory to peripheral fifo
 	  or vice versa. It does not support memory to memory data transfer.
 
-config S3C24XX_DMAC
-	tristate "Samsung S3C24XX DMA support"
-	depends on ARCH_S3C24XX
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support for the Samsung S3C24XX DMA controller driver. The
-	  DMA controller is having multiple DMA channels which can be
-	  configured for different peripherals like audio, UART, SPI.
-	  The DMA controller can transfer data from memory to peripheral,
-	  periphal to memory, periphal to periphal and memory to memory.
-
-source "drivers/dma/sh/Kconfig"
-
-config COH901318
-	bool "ST-Ericsson COH901318 DMA support"
-	select DMA_ENGINE
-	depends on ARCH_U300
-	help
-	  Enable support for ST-Ericsson COH 901 318 DMA.
-
-config STE_DMA40
-	bool "ST-Ericsson DMA40 support"
-	depends on ARCH_U8500
-	select DMA_ENGINE
-	help
-	  Support for ST-Ericsson DMA40 controller
-
-config AMCC_PPC440SPE_ADMA
-	tristate "AMCC PPC440SPe ADMA support"
-	depends on 440SPe || 440SP
-	select DMA_ENGINE
-	select DMA_ENGINE_RAID
-	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	help
-	  Enable support for the AMCC PPC440SPe RAID engines.
-
 config TIMB_DMA
 	tristate "Timberdale FPGA DMA support"
 	depends on MFD_TIMBERDALE
@@ -246,12 +470,16 @@
 	help
 	  Enable support for the Timberdale FPGA DMA engine.
 
-config SIRF_DMA
-	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
-	depends on ARCH_SIRF
+config TI_CPPI41
+	tristate "AM33xx CPPI41 DMA support"
+	depends on ARCH_OMAP
 	select DMA_ENGINE
 	help
-	  Enable support for the CSR SiRFprimaII DMA engine.
+	  The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+	  is currently used by the USB driver on AM335x platforms.
+
+config TI_DMA_CROSSBAR
+	bool
 
 config TI_EDMA
 	bool "TI EDMA support"
@@ -264,160 +492,14 @@
 	  Enable support for the TI EDMA controller. This DMA
 	  engine is found on TI DaVinci and AM33xx parts.
 
-config TI_DMA_CROSSBAR
-	bool
-
-config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
-	bool
-
-config PL330_DMA
-	tristate "DMA API Driver for PL330"
+config XGENE_DMA
+	tristate "APM X-Gene DMA support"
+	depends on ARCH_XGENE || COMPILE_TEST
 	select DMA_ENGINE
-	depends on ARM_AMBA
+	select DMA_ENGINE_RAID
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	help
-	  Select if your platform has one or more PL330 DMACs.
-	  You need to provide platform specific settings via
-	  platform_data for a dma-pl330 device.
-
-config PCH_DMA
-	tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
-	depends on PCI && (X86_32 || COMPILE_TEST)
-	select DMA_ENGINE
-	help
-	  Enable support for Intel EG20T PCH DMA engine.
-
-	  This driver also can be used for LAPIS Semiconductor IOH(Input/
-	  Output Hub), ML7213, ML7223 and ML7831.
-	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
-	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
-	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
-	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-config IMX_SDMA
-	tristate "i.MX SDMA support"
-	depends on ARCH_MXC
-	select DMA_ENGINE
-	help
-	  Support the i.MX SDMA engine. This engine is integrated into
-	  Freescale i.MX25/31/35/51/53/6 chips.
-
-config IMX_DMA
-	tristate "i.MX DMA support"
-	depends on ARCH_MXC
-	select DMA_ENGINE
-	help
-	  Support the i.MX DMA engine. This engine is integrated into
-	  Freescale i.MX1/21/27 chips.
-
-config MXS_DMA
-	bool "MXS DMA support"
-	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
-	select STMP_DEVICE
-	select DMA_ENGINE
-	help
-	  Support the MXS DMA engine. This engine including APBH-DMA
-	  and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
-
-config EP93XX_DMA
-	bool "Cirrus Logic EP93xx DMA support"
-	depends on ARCH_EP93XX
-	select DMA_ENGINE
-	help
-	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
-
-config DMA_SA11X0
-	tristate "SA-11x0 DMA support"
-	depends on ARCH_SA1100
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the DMA engine found on Intel StrongARM SA-1100 and
-	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
-	  devices.
-
-config MMP_TDMA
-	bool "MMP Two-Channel DMA support"
-	depends on ARCH_MMP
-	select DMA_ENGINE
-	select MMP_SRAM
-	help
-	  Support the MMP Two-Channel DMA engine.
-	  This engine used for MMP Audio DMA and pxa910 SQU.
-	  It needs sram driver under mach-mmp.
-
-	  Say Y here if you enabled MMP ADMA, otherwise say N.
-
-config DMA_OMAP
-	tristate "OMAP DMA support"
-	depends on ARCH_OMAP
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	select TI_DMA_CROSSBAR if SOC_DRA7XX
-
-config DMA_BCM2835
-	tristate "BCM2835 DMA engine support"
-	depends on ARCH_BCM2835
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-
-config TI_CPPI41
-	tristate "AM33xx CPPI41 DMA support"
-	depends on ARCH_OMAP
-	select DMA_ENGINE
-	help
-	  The Communications Port Programming Interface (CPPI) 4.1 DMA engine
-	  is currently used by the USB driver on AM335x platforms.
-
-config MMP_PDMA
-	bool "MMP PDMA support"
-	depends on (ARCH_MMP || ARCH_PXA)
-	select DMA_ENGINE
-	help
-	  Support the MMP PDMA engine for PXA and MMP platform.
-
-config DMA_JZ4740
-	tristate "JZ4740 DMA support"
-	depends on MACH_JZ4740
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-
-config DMA_JZ4780
-	tristate "JZ4780 DMA support"
-	depends on MACH_JZ4780
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  This selects support for the DMA controller in Ingenic JZ4780 SoCs.
-	  If you have a board based on such a SoC and wish to use DMA for
-	  devices which can use the DMA controller, say Y or M here.
-
-config K3_DMA
-	tristate "Hisilicon K3 DMA support"
-	depends on ARCH_HI3xxx
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the DMA engine for Hisilicon K3 platform
-	  devices.
-
-config MOXART_DMA
-	tristate "MOXART DMA support"
-	depends on ARCH_MOXART
-	select DMA_ENGINE
-	select DMA_OF
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Enable support for the MOXA ART SoC DMA controller.
- 
-config FSL_EDMA
-	tristate "Freescale eDMA engine support"
-	depends on OF
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the Freescale eDMA engine with programmable channel
-	  multiplexing capability for DMA request sources(slot).
-	  This module can be found on Freescale Vybrid and LS-1 SoCs.
+	  Enable support for the APM X-Gene SoC DMA engine.
 
 config XILINX_VDMA
 	tristate "Xilinx AXI VDMA Engine"
@@ -433,55 +515,25 @@
 	  channels, Memory Mapped to Stream (MM2S) and Stream to
 	  Memory Mapped (S2MM) for the data transfers.
 
-config DMA_SUN6I
-	tristate "Allwinner A31 SoCs DMA support"
-	depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
-	depends on RESET_CONTROLLER
+config ZX_DMA
+	tristate "ZTE ZX296702 DMA support"
+	depends on ARCH_ZX
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 	help
-	  Support for the DMA engine first found in Allwinner A31 SoCs.
+	  Support the DMA engine for ZTE ZX296702 platform devices.
 
-config NBPFAXI_DMA
-	tristate "Renesas Type-AXI NBPF DMA support"
-	select DMA_ENGINE
-	depends on ARM || COMPILE_TEST
-	help
-	  Support for "Type-AXI" NBPF DMA IPs from Renesas
 
-config IMG_MDC_DMA
-	tristate "IMG MDC support"
-	depends on MIPS || COMPILE_TEST
-	depends on MFD_SYSCON
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Enable support for the IMG multi-threaded DMA controller (MDC).
+# driver files
+source "drivers/dma/bestcomm/Kconfig"
 
-config XGENE_DMA
-	tristate "APM X-Gene DMA support"
-	depends on ARCH_XGENE || COMPILE_TEST
-	select DMA_ENGINE
-	select DMA_ENGINE_RAID
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	help
-	  Enable support for the APM X-Gene SoC DMA engine.
+source "drivers/dma/dw/Kconfig"
 
-config DMA_ENGINE
-	bool
+source "drivers/dma/hsu/Kconfig"
 
-config DMA_VIRTUAL_CHANNELS
-	tristate
+source "drivers/dma/sh/Kconfig"
 
-config DMA_ACPI
-	def_bool y
-	depends on ACPI
-
-config DMA_OF
-	def_bool y
-	depends on OF
-	select DMA_ENGINE
-
+# clients
 comment "DMA Clients"
 	depends on DMA_ENGINE
 
@@ -506,13 +558,4 @@
 config DMA_ENGINE_RAID
 	bool
 
-config QCOM_BAM_DMA
-	tristate "QCOM BAM DMA support"
-	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	---help---
-	  Enable support for the QCOM BAM DMA controller.  This controller
-	  provides DMA capabilities for a variety of on-chip devices.
-
 endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 56ff8c7..7711a71 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,59 +1,69 @@
+#dmaengine debug flags
 subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG)  := -DDEBUG
 subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
 
+#core
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
 obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
 obj-$(CONFIG_DMA_OF) += of-dma.o
 
+#dmatest
 obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_HSU_DMA) += hsu/
-obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
-obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
-obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_IDMA64) += idma64.o
-obj-$(CONFIG_DW_DMAC_CORE) += dw/
+
+#devices
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
-obj-$(CONFIG_MX3_IPU) += ipu/
-obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
-obj-$(CONFIG_IMX_DMA) += imx-dma.o
-obj-$(CONFIG_MXS_DMA) += mxs-dma.o
-obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
-obj-$(CONFIG_TI_EDMA) += edma.o
-obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
-obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
-obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
-obj-$(CONFIG_PL330_DMA) += pl330.o
-obj-$(CONFIG_PCH_DMA) += pch_dma.o
-obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
-obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
-obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
-obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
-obj-$(CONFIG_DMA_OMAP) += omap-dma.o
-obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
 obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
-obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
-obj-$(CONFIG_TI_CPPI41) += cppi41.o
-obj-$(CONFIG_K3_DMA) += k3dma.o
-obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
-obj-$(CONFIG_FSL_RAID) += fsl_raid.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
-obj-y += xilinx/
-obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
-obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_DW_DMAC_CORE) += dw/
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IDMA64) += idma64.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_MXS_DMA) += mxs-dma.o
+obj-$(CONFIG_MX3_IPU) += ipu/
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
+obj-$(CONFIG_PXA_DMA) += pxa_dma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
+obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
+obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
+obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+
+obj-y += xilinx/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5de3cf4..9b42c05 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,8 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
 #include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -2030,10 +2032,188 @@
 }
 #endif
 
+#ifdef CONFIG_OF
+static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
+					 u32 id)
+{
+	struct pl08x_dma_chan *chan;
+
+	list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
+		if (chan->signal == id)
+			return &chan->vc.chan;
+	}
+
+	return NULL;
+}
+
+static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
+				       struct of_dma *ofdma)
+{
+	struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
+	struct pl08x_channel_data *data;
+	struct pl08x_dma_chan *chan;
+	struct dma_chan *dma_chan;
+
+	if (!pl08x)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
+	if (dma_chan)
+		return dma_get_slave_channel(dma_chan);
+
+	chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data),
+			    GFP_KERNEL);
+	if (!chan)
+		return NULL;
+
+	data = (void *)&chan[1];
+	data->bus_id = "(none)";
+	data->periph_buses = dma_spec->args[1];
+
+	chan->cd = data;
+	chan->host = pl08x;
+	chan->slave = true;
+	chan->name = data->bus_id;
+	chan->state = PL08X_CHAN_IDLE;
+	chan->signal = dma_spec->args[0];
+	chan->vc.desc_free = pl08x_desc_free;
+
+	vchan_init(&chan->vc, &pl08x->slave);
+
+	return dma_get_slave_channel(&chan->vc.chan);
+}
+
+static int pl08x_of_probe(struct amba_device *adev,
+			  struct pl08x_driver_data *pl08x,
+			  struct device_node *np)
+{
+	struct pl08x_platform_data *pd;
+	u32 cctl_memcpy = 0;
+	u32 val;
+	int ret;
+
+	pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+
+	/* Eligible bus masters for fetching LLIs */
+	if (of_property_read_bool(np, "lli-bus-interface-ahb1"))
+		pd->lli_buses |= PL08X_AHB1;
+	if (of_property_read_bool(np, "lli-bus-interface-ahb2"))
+		pd->lli_buses |= PL08X_AHB2;
+	if (!pd->lli_buses) {
+		dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n");
+		pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2;
+	}
+
+	/* Eligible bus masters for memory access */
+	if (of_property_read_bool(np, "mem-bus-interface-ahb1"))
+		pd->mem_buses |= PL08X_AHB1;
+	if (of_property_read_bool(np, "mem-bus-interface-ahb2"))
+		pd->mem_buses |= PL08X_AHB2;
+	if (!pd->mem_buses) {
+		dev_info(&adev->dev, "no bus masters for memory stated, assume all\n");
+		pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2;
+	}
+
+	/* Parse the memcpy channel properties */
+	ret = of_property_read_u32(np, "memcpy-burst-size", &val);
+	if (ret) {
+		dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n");
+		val = 1;
+	}
+	switch (val) {
+	default:
+		dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
+		/* Fall through */
+	case 1:
+		cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 4:
+		cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 8:
+		cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 16:
+		cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 32:
+		cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 64:
+		cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 128:
+		cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 256:
+		cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	}
+
+	ret = of_property_read_u32(np, "memcpy-bus-width", &val);
+	if (ret) {
+		dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n");
+		val = 8;
+	}
+	switch (val) {
+	default:
+		dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
+		/* Fall through */
+	case 8:
+		cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	case 16:
+		cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	case 32:
+		cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	}
+
+	/* This is currently the only thing making sense */
+	cctl_memcpy |= PL080_CONTROL_PROT_SYS;
+
+	/* Set up memcpy channel */
+	pd->memcpy_channel.bus_id = "memcpy";
+	pd->memcpy_channel.cctl_memcpy = cctl_memcpy;
+	/* Use the buses that can access memory, obviously */
+	pd->memcpy_channel.periph_buses = pd->mem_buses;
+
+	pl08x->pd = pd;
+
+	return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
+					  pl08x);
+}
+#else
+static inline int pl08x_of_probe(struct amba_device *adev,
+				 struct pl08x_driver_data *pl08x,
+				 struct device_node *np)
+{
+	return -EINVAL;
+}
+#endif
+
 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 {
 	struct pl08x_driver_data *pl08x;
 	const struct vendor_data *vd = id->data;
+	struct device_node *np = adev->dev.of_node;
 	u32 tsfr_size;
 	int ret = 0;
 	int i;
@@ -2093,9 +2273,15 @@
 	/* Get the platform data */
 	pl08x->pd = dev_get_platdata(&adev->dev);
 	if (!pl08x->pd) {
-		dev_err(&adev->dev, "no platform data supplied\n");
-		ret = -EINVAL;
-		goto out_no_platdata;
+		if (np) {
+			ret = pl08x_of_probe(adev, pl08x, np);
+			if (ret)
+				goto out_no_platdata;
+		} else {
+			dev_err(&adev->dev, "no platform data supplied\n");
+			ret = -EINVAL;
+			goto out_no_platdata;
+		}
 	}
 
 	/* Assign useful pointers to the driver state */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index d3629b7..58d4062 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -448,6 +448,7 @@
 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 {
 	struct dma_async_tx_descriptor	*txd = &desc->txd;
+	struct at_dma			*atdma = to_at_dma(atchan->chan_common.device);
 
 	dev_vdbg(chan2dev(&atchan->chan_common),
 		"descriptor %u complete\n", txd->cookie);
@@ -456,6 +457,13 @@
 	if (!atc_chan_is_cyclic(atchan))
 		dma_cookie_complete(txd);
 
+	/* If the transfer was a memset, free our temporary buffer */
+	if (desc->memset) {
+		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+			      desc->memset_paddr);
+		desc->memset = false;
+	}
+
 	/* move children to free_list */
 	list_splice_init(&desc->tx_list, &atchan->free_list);
 	/* move myself to free_list */
@@ -717,14 +725,14 @@
 	size_t			len = 0;
 	int			i;
 
+	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
+		return NULL;
+
 	dev_info(chan2dev(chan),
 		 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
 		__func__, xt->src_start, xt->dst_start, xt->numf,
 		xt->frame_size, flags);
 
-	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
-		return NULL;
-
 	/*
 	 * The controller can only "skip" X bytes every Y bytes, so we
 	 * need to make sure we are given a template that fit that
@@ -873,6 +881,93 @@
 	return NULL;
 }
 
+/**
+ * atc_prep_dma_memset - prepare a memcpy operation
+ * @chan: the channel to prepare operation on
+ * @dest: operation virtual destination address
+ * @value: value to set memory buffer to
+ * @len: operation length
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+		    size_t len, unsigned long flags)
+{
+	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
+	struct at_dma		*atdma = to_at_dma(chan->device);
+	struct at_desc		*desc = NULL;
+	size_t			xfer_count;
+	u32			ctrla;
+	u32			ctrlb;
+
+	dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
+		dest, value, len, flags);
+
+	if (unlikely(!len)) {
+		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+		return NULL;
+	}
+
+	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
+		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
+			__func__);
+		return NULL;
+	}
+
+	xfer_count = len >> 2;
+	if (xfer_count > ATC_BTSIZE_MAX) {
+		dev_err(chan2dev(chan), "%s: buffer is too big\n",
+			__func__);
+		return NULL;
+	}
+
+	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
+		| ATC_SRC_ADDR_MODE_FIXED
+		| ATC_DST_ADDR_MODE_INCR
+		| ATC_FC_MEM2MEM;
+
+	ctrla = ATC_SRC_WIDTH(2) |
+		ATC_DST_WIDTH(2);
+
+	desc = atc_desc_get(atchan);
+	if (!desc) {
+		dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+			__func__);
+		return NULL;
+	}
+
+	desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC,
+					    &desc->memset_paddr);
+	if (!desc->memset_vaddr) {
+		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
+			__func__);
+		goto err_put_desc;
+	}
+
+	*desc->memset_vaddr = value;
+	desc->memset = true;
+
+	desc->lli.saddr = desc->memset_paddr;
+	desc->lli.daddr = dest;
+	desc->lli.ctrla = ctrla | xfer_count;
+	desc->lli.ctrlb = ctrlb;
+
+	desc->txd.cookie = -EBUSY;
+	desc->len = len;
+	desc->total_len = len;
+
+	/* set end-of-link on the descriptor */
+	set_desc_eol(desc);
+
+	desc->txd.flags = flags;
+
+	return &desc->txd;
+
+err_put_desc:
+	atc_desc_put(atchan, desc);
+	return NULL;
+}
+
 
 /**
  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
@@ -1755,6 +1850,8 @@
 	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
+	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
 
@@ -1818,7 +1915,16 @@
 	if (!atdma->dma_desc_pool) {
 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
 		err = -ENOMEM;
-		goto err_pool_create;
+		goto err_desc_pool_create;
+	}
+
+	/* create a pool of consistent memory blocks for memset blocks */
+	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
+					     &pdev->dev, sizeof(int), 4, 0);
+	if (!atdma->memset_pool) {
+		dev_err(&pdev->dev, "No memory for memset dma pool\n");
+		err = -ENOMEM;
+		goto err_memset_pool_create;
 	}
 
 	/* clear any pending interrupt */
@@ -1864,6 +1970,11 @@
 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
 
+	if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
+		atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
+		atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
+	}
+
 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
 		/* controller can do slave DMA: can trigger cyclic transfers */
@@ -1884,8 +1995,9 @@
 
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
 
-	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
+	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
+	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
 	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
 	  plat_dat->nr_channels);
@@ -1910,8 +2022,10 @@
 
 err_of_dma_controller_register:
 	dma_async_device_unregister(&atdma->dma_common);
+	dma_pool_destroy(atdma->memset_pool);
+err_memset_pool_create:
 	dma_pool_destroy(atdma->dma_desc_pool);
-err_pool_create:
+err_desc_pool_create:
 	free_irq(platform_get_irq(pdev, 0), atdma);
 err_irq:
 	clk_disable_unprepare(atdma->clk);
@@ -1936,6 +2050,7 @@
 	at_dma_off(atdma);
 	dma_async_device_unregister(&atdma->dma_common);
 
+	dma_pool_destroy(atdma->memset_pool);
 	dma_pool_destroy(atdma->dma_desc_pool);
 	free_irq(platform_get_irq(pdev, 0), atdma);
 
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 7f5a082..c3bebbe 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -200,6 +200,11 @@
 	size_t				boundary;
 	size_t				dst_hole;
 	size_t				src_hole;
+
+	/* Memset temporary buffer */
+	bool				memset;
+	dma_addr_t			memset_paddr;
+	int				*memset_vaddr;
 };
 
 static inline struct at_desc *
@@ -330,6 +335,7 @@
 	u8			all_chan_mask;
 
 	struct dma_pool		*dma_desc_pool;
+	struct dma_pool		*memset_pool;
 	/* AT THE END channels table */
 	struct at_dma_chan	chan[0];
 };
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 40afa2a..a165b4b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -625,12 +625,12 @@
 		       unsigned int sg_len, enum dma_transfer_direction direction,
 		       unsigned long flags, void *context)
 {
-	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
-	struct at_xdmac_desc	*first = NULL, *prev = NULL;
-	struct scatterlist	*sg;
-	int			i;
-	unsigned int		xfer_size = 0;
-	unsigned long		irqflags;
+	struct at_xdmac_chan		*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac_desc		*first = NULL, *prev = NULL;
+	struct scatterlist		*sg;
+	int				i;
+	unsigned int			xfer_size = 0;
+	unsigned long			irqflags;
 	struct dma_async_tx_descriptor	*ret = NULL;
 
 	if (!sgl)
@@ -797,10 +797,7 @@
 		list_add_tail(&desc->desc_node, &first->descs_list);
 	}
 
-	prev->lld.mbr_nda = first->tx_dma_desc.phys;
-	dev_dbg(chan2dev(chan),
-		"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
-		__func__, prev, &prev->lld.mbr_nda);
+	at_xdmac_queue_desc(chan, prev, first);
 	first->tx_dma_desc.flags = flags;
 	first->xfer_size = buf_len;
 	first->direction = direction;
@@ -1135,7 +1132,7 @@
 	 * SAMA5D4x), so we can use the same interface for source and dest,
 	 * that solves the fact we don't know the direction.
 	 */
-	u32			chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+	u32			chan_cc = AT_XDMAC_CC_DAM_UBS_AM
 					| AT_XDMAC_CC_SAM_INCREMENTED_AM
 					| AT_XDMAC_CC_DIF(0)
 					| AT_XDMAC_CC_SIF(0)
@@ -1203,6 +1200,168 @@
 	return &desc->tx_dma_desc;
 }
 
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			    unsigned int sg_len, int value,
+			    unsigned long flags)
+{
+	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac_desc	*desc, *pdesc = NULL,
+				*ppdesc = NULL, *first = NULL;
+	struct scatterlist	*sg, *psg = NULL, *ppsg = NULL;
+	size_t			stride = 0, pstride = 0, len = 0;
+	int			i;
+
+	if (!sgl)
+		return NULL;
+
+	dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
+		__func__, sg_len, value, flags);
+
+	/* Prepare descriptors. */
+	for_each_sg(sgl, sg, sg_len, i) {
+		dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
+			__func__, sg_dma_address(sg), sg_dma_len(sg),
+			value, flags);
+		desc = at_xdmac_memset_create_desc(chan, atchan,
+						   sg_dma_address(sg),
+						   sg_dma_len(sg),
+						   value);
+		if (!desc && first)
+			list_splice_init(&first->descs_list,
+					 &atchan->free_descs_list);
+
+		if (!first)
+			first = desc;
+
+		/* Update our strides */
+		pstride = stride;
+		if (psg)
+			stride = sg_dma_address(sg) -
+				(sg_dma_address(psg) + sg_dma_len(psg));
+
+		/*
+		 * The scatterlist API gives us only the address and
+		 * length of each elements.
+		 *
+		 * Unfortunately, we don't have the stride, which we
+		 * will need to compute.
+		 *
+		 * That make us end up in a situation like this one:
+		 *    len    stride    len    stride    len
+		 * +-------+        +-------+        +-------+
+		 * |  N-2  |        |  N-1  |        |   N   |
+		 * +-------+        +-------+        +-------+
+		 *
+		 * We need all these three elements (N-2, N-1 and N)
+		 * to actually take the decision on whether we need to
+		 * queue N-1 or reuse N-2.
+		 *
+		 * We will only consider N if it is the last element.
+		 */
+		if (ppdesc && pdesc) {
+			if ((stride == pstride) &&
+			    (sg_dma_len(ppsg) == sg_dma_len(psg))) {
+				dev_dbg(chan2dev(chan),
+					"%s: desc 0x%p can be merged with desc 0x%p\n",
+					__func__, pdesc, ppdesc);
+
+				/*
+				 * Increment the block count of the
+				 * N-2 descriptor
+				 */
+				at_xdmac_increment_block_count(chan, ppdesc);
+				ppdesc->lld.mbr_dus = stride;
+
+				/*
+				 * Put back the N-1 descriptor in the
+				 * free descriptor list
+				 */
+				list_add_tail(&pdesc->desc_node,
+					      &atchan->free_descs_list);
+
+				/*
+				 * Make our N-1 descriptor pointer
+				 * point to the N-2 since they were
+				 * actually merged.
+				 */
+				pdesc = ppdesc;
+
+			/*
+			 * Rule out the case where we don't have
+			 * pstride computed yet (our second sg
+			 * element)
+			 *
+			 * We also want to catch the case where there
+			 * would be a negative stride,
+			 */
+			} else if (pstride ||
+				   sg_dma_address(sg) < sg_dma_address(psg)) {
+				/*
+				 * Queue the N-1 descriptor after the
+				 * N-2
+				 */
+				at_xdmac_queue_desc(chan, ppdesc, pdesc);
+
+				/*
+				 * Add the N-1 descriptor to the list
+				 * of the descriptors used for this
+				 * transfer
+				 */
+				list_add_tail(&desc->desc_node,
+					      &first->descs_list);
+				dev_dbg(chan2dev(chan),
+					"%s: add desc 0x%p to descs_list 0x%p\n",
+					__func__, desc, first);
+			}
+		}
+
+		/*
+		 * If we are the last element, just see if we have the
+		 * same size than the previous element.
+		 *
+		 * If so, we can merge it with the previous descriptor
+		 * since we don't care about the stride anymore.
+		 */
+		if ((i == (sg_len - 1)) &&
+		    sg_dma_len(ppsg) == sg_dma_len(psg)) {
+			dev_dbg(chan2dev(chan),
+				"%s: desc 0x%p can be merged with desc 0x%p\n",
+				__func__, desc, pdesc);
+
+			/*
+			 * Increment the block count of the N-1
+			 * descriptor
+			 */
+			at_xdmac_increment_block_count(chan, pdesc);
+			pdesc->lld.mbr_dus = stride;
+
+			/*
+			 * Put back the N descriptor in the free
+			 * descriptor list
+			 */
+			list_add_tail(&desc->desc_node,
+				      &atchan->free_descs_list);
+		}
+
+		/* Update our descriptors */
+		ppdesc = pdesc;
+		pdesc = desc;
+
+		/* Update our scatter pointers */
+		ppsg = psg;
+		psg = sg;
+
+		len += sg_dma_len(sg);
+	}
+
+	first->tx_dma_desc.cookie = -EBUSY;
+	first->tx_dma_desc.flags = flags;
+	first->xfer_size = len;
+
+	return &first->tx_dma_desc;
+}
+
 static enum dma_status
 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 		struct dma_tx_state *txstate)
@@ -1736,6 +1895,7 @@
 	dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
+	dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
 	/*
 	 * Without DMA_PRIVATE the driver is not able to allocate more than
@@ -1751,6 +1911,7 @@
 	atxdmac->dma.device_prep_interleaved_dma	= at_xdmac_prep_interleaved;
 	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
 	atxdmac->dma.device_prep_dma_memset		= at_xdmac_prep_dma_memset;
+	atxdmac->dma.device_prep_dma_memset_sg		= at_xdmac_prep_dma_memset_sg;
 	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
 	atxdmac->dma.device_config			= at_xdmac_device_config;
 	atxdmac->dma.device_pause			= at_xdmac_device_pause;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index fd22dd3..c340ca9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2730,7 +2730,7 @@
 	 * This controller can only access address at even 32bit boundaries,
 	 * i.e. 2^2
 	 */
-	base->dma_memcpy.copy_align = 2;
+	base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
 	err = dma_async_device_register(&base->dma_memcpy);
 
 	if (err)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
new file mode 100644
index 0000000..5b2395e
--- /dev/null
+++ b/drivers/dma/dma-axi-dmac.c
@@ -0,0 +1,691 @@
+/*
+ * Driver for the Analog Devices AXI-DMAC core
+ *
+ * Copyright 2013-2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/dma/axi-dmac.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/*
+ * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
+ * various instantiation parameters which decided the exact feature set support
+ * by the core.
+ *
+ * Each channel of the core has a source interface and a destination interface.
+ * The number of channels and the type of the channel interfaces is selected at
+ * configuration time. A interface can either be a connected to a central memory
+ * interconnect, which allows access to system memory, or it can be connected to
+ * a dedicated bus which is directly connected to a data port on a peripheral.
+ * Given that those are configuration options of the core that are selected when
+ * it is instantiated this means that they can not be changed by software at
+ * runtime. By extension this means that each channel is uni-directional. It can
+ * either be device to memory or memory to device, but not both. Also since the
+ * device side is a dedicated data bus only connected to a single peripheral
+ * there is no address than can or needs to be configured for the device side.
+ */
+
+#define AXI_DMAC_REG_IRQ_MASK		0x80
+#define AXI_DMAC_REG_IRQ_PENDING	0x84
+#define AXI_DMAC_REG_IRQ_SOURCE		0x88
+
+#define AXI_DMAC_REG_CTRL		0x400
+#define AXI_DMAC_REG_TRANSFER_ID	0x404
+#define AXI_DMAC_REG_START_TRANSFER	0x408
+#define AXI_DMAC_REG_FLAGS		0x40c
+#define AXI_DMAC_REG_DEST_ADDRESS	0x410
+#define AXI_DMAC_REG_SRC_ADDRESS	0x414
+#define AXI_DMAC_REG_X_LENGTH		0x418
+#define AXI_DMAC_REG_Y_LENGTH		0x41c
+#define AXI_DMAC_REG_DEST_STRIDE	0x420
+#define AXI_DMAC_REG_SRC_STRIDE		0x424
+#define AXI_DMAC_REG_TRANSFER_DONE	0x428
+#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
+#define AXI_DMAC_REG_STATUS		0x430
+#define AXI_DMAC_REG_CURRENT_SRC_ADDR	0x434
+#define AXI_DMAC_REG_CURRENT_DEST_ADDR	0x438
+
+#define AXI_DMAC_CTRL_ENABLE		BIT(0)
+#define AXI_DMAC_CTRL_PAUSE		BIT(1)
+
+#define AXI_DMAC_IRQ_SOT		BIT(0)
+#define AXI_DMAC_IRQ_EOT		BIT(1)
+
+#define AXI_DMAC_FLAG_CYCLIC		BIT(0)
+
+struct axi_dmac_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dest_addr;
+	unsigned int x_len;
+	unsigned int y_len;
+	unsigned int dest_stride;
+	unsigned int src_stride;
+	unsigned int id;
+};
+
+struct axi_dmac_desc {
+	struct virt_dma_desc vdesc;
+	bool cyclic;
+
+	unsigned int num_submitted;
+	unsigned int num_completed;
+	unsigned int num_sgs;
+	struct axi_dmac_sg sg[];
+};
+
+struct axi_dmac_chan {
+	struct virt_dma_chan vchan;
+
+	struct axi_dmac_desc *next_desc;
+	struct list_head active_descs;
+	enum dma_transfer_direction direction;
+
+	unsigned int src_width;
+	unsigned int dest_width;
+	unsigned int src_type;
+	unsigned int dest_type;
+
+	unsigned int max_length;
+	unsigned int align_mask;
+
+	bool hw_cyclic;
+	bool hw_2d;
+};
+
+struct axi_dmac {
+	void __iomem *base;
+	int irq;
+
+	struct clk *clk;
+
+	struct dma_device dma_dev;
+	struct axi_dmac_chan chan;
+
+	struct device_dma_parameters dma_parms;
+};
+
+static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
+{
+	return container_of(chan->vchan.chan.device, struct axi_dmac,
+		dma_dev);
+}
+
+static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
+{
+	return container_of(c, struct axi_dmac_chan, vchan.chan);
+}
+
+static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct axi_dmac_desc, vdesc);
+}
+
+static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
+	unsigned int val)
+{
+	writel(val, axi_dmac->base + reg);
+}
+
+static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
+{
+	return readl(axi_dmac->base + reg);
+}
+
+static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
+{
+	return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
+{
+	return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
+{
+	if (len == 0 || len > chan->max_length)
+		return false;
+	if ((len & chan->align_mask) != 0) /* Not aligned */
+		return false;
+	return true;
+}
+
+static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
+{
+	if ((addr & chan->align_mask) != 0) /* Not aligned */
+		return false;
+	return true;
+}
+
+static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
+{
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	struct virt_dma_desc *vdesc;
+	struct axi_dmac_desc *desc;
+	struct axi_dmac_sg *sg;
+	unsigned int flags = 0;
+	unsigned int val;
+
+	val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+	if (val) /* Queue is full, wait for the next SOT IRQ */
+		return;
+
+	desc = chan->next_desc;
+
+	if (!desc) {
+		vdesc = vchan_next_desc(&chan->vchan);
+		if (!vdesc)
+			return;
+		list_move_tail(&vdesc->node, &chan->active_descs);
+		desc = to_axi_dmac_desc(vdesc);
+	}
+	sg = &desc->sg[desc->num_submitted];
+
+	desc->num_submitted++;
+	if (desc->num_submitted == desc->num_sgs)
+		chan->next_desc = NULL;
+	else
+		chan->next_desc = desc;
+
+	sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
+		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
+	}
+
+	if (axi_dmac_src_is_mem(chan)) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
+		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+	}
+
+	/*
+	 * If the hardware supports cyclic transfers and there is no callback to
+	 * call, enable hw cyclic mode to avoid unnecessary interrupts.
+	 */
+	if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+		flags |= AXI_DMAC_FLAG_CYCLIC;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
+	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
+	axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
+}
+
+static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
+{
+	return list_first_entry_or_null(&chan->active_descs,
+		struct axi_dmac_desc, vdesc.node);
+}
+
+static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+	unsigned int completed_transfers)
+{
+	struct axi_dmac_desc *active;
+	struct axi_dmac_sg *sg;
+
+	active = axi_dmac_active_desc(chan);
+	if (!active)
+		return;
+
+	if (active->cyclic) {
+		vchan_cyclic_callback(&active->vdesc);
+	} else {
+		do {
+			sg = &active->sg[active->num_completed];
+			if (!(BIT(sg->id) & completed_transfers))
+				break;
+			active->num_completed++;
+			if (active->num_completed == active->num_sgs) {
+				list_del(&active->vdesc.node);
+				vchan_cookie_complete(&active->vdesc);
+				active = axi_dmac_active_desc(chan);
+			}
+		} while (active);
+	}
+}
+
+static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
+{
+	struct axi_dmac *dmac = devid;
+	unsigned int pending;
+
+	pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
+
+	spin_lock(&dmac->chan.vchan.lock);
+	/* One or more transfers have finished */
+	if (pending & AXI_DMAC_IRQ_EOT) {
+		unsigned int completed;
+
+		completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
+		axi_dmac_transfer_done(&dmac->chan, completed);
+	}
+	/* Space has become available in the descriptor queue */
+	if (pending & AXI_DMAC_IRQ_SOT)
+		axi_dmac_start_transfer(&dmac->chan);
+	spin_unlock(&dmac->chan.vchan.lock);
+
+	return IRQ_HANDLED;
+}
+
+static int axi_dmac_terminate_all(struct dma_chan *c)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
+	chan->next_desc = NULL;
+	vchan_get_all_descriptors(&chan->vchan, &head);
+	list_splice_tail_init(&chan->active_descs, &head);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&chan->vchan, &head);
+
+	return 0;
+}
+
+static void axi_dmac_issue_pending(struct dma_chan *c)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	unsigned long flags;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (vchan_issue_pending(&chan->vchan))
+		axi_dmac_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+{
+	struct axi_dmac_desc *desc;
+
+	desc = kzalloc(sizeof(struct axi_dmac_desc) +
+		sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	desc->num_sgs = num_sgs;
+
+	return desc;
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
+	struct dma_chan *c, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	struct scatterlist *sg;
+	unsigned int i;
+
+	if (direction != chan->direction)
+		return NULL;
+
+	desc = axi_dmac_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
+		    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
+			kfree(desc);
+			return NULL;
+		}
+
+		if (direction == DMA_DEV_TO_MEM)
+			desc->sg[i].dest_addr = sg_dma_address(sg);
+		else
+			desc->sg[i].src_addr = sg_dma_address(sg);
+		desc->sg[i].x_len = sg_dma_len(sg);
+		desc->sg[i].y_len = 1;
+	}
+
+	desc->cyclic = false;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
+	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	unsigned int num_periods, i;
+
+	if (direction != chan->direction)
+		return NULL;
+
+	if (!axi_dmac_check_len(chan, buf_len) ||
+	    !axi_dmac_check_addr(chan, buf_addr))
+		return NULL;
+
+	if (period_len == 0 || buf_len % period_len)
+		return NULL;
+
+	num_periods = buf_len / period_len;
+
+	desc = axi_dmac_alloc_desc(num_periods);
+	if (!desc)
+		return NULL;
+
+	for (i = 0; i < num_periods; i++) {
+		if (direction == DMA_DEV_TO_MEM)
+			desc->sg[i].dest_addr = buf_addr;
+		else
+			desc->sg[i].src_addr = buf_addr;
+		desc->sg[i].x_len = period_len;
+		desc->sg[i].y_len = 1;
+		buf_addr += period_len;
+	}
+
+	desc->cyclic = true;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
+	struct dma_chan *c, struct dma_interleaved_template *xt,
+	unsigned long flags)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	size_t dst_icg, src_icg;
+
+	if (xt->frame_size != 1)
+		return NULL;
+
+	if (xt->dir != chan->direction)
+		return NULL;
+
+	if (axi_dmac_src_is_mem(chan)) {
+		if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
+			return NULL;
+	}
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
+			return NULL;
+	}
+
+	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+
+	if (chan->hw_2d) {
+		if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
+		    !axi_dmac_check_len(chan, xt->numf))
+			return NULL;
+		if (xt->sgl[0].size + dst_icg > chan->max_length ||
+		    xt->sgl[0].size + src_icg > chan->max_length)
+			return NULL;
+	} else {
+		if (dst_icg != 0 || src_icg != 0)
+			return NULL;
+		if (chan->max_length / xt->sgl[0].size < xt->numf)
+			return NULL;
+		if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
+			return NULL;
+	}
+
+	desc = axi_dmac_alloc_desc(1);
+	if (!desc)
+		return NULL;
+
+	if (axi_dmac_src_is_mem(chan)) {
+		desc->sg[0].src_addr = xt->src_start;
+		desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+	}
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		desc->sg[0].dest_addr = xt->dst_start;
+		desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+	}
+
+	if (chan->hw_2d) {
+		desc->sg[0].x_len = xt->sgl[0].size;
+		desc->sg[0].y_len = xt->numf;
+	} else {
+		desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
+		desc->sg[0].y_len = 1;
+	}
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void axi_dmac_free_chan_resources(struct dma_chan *c)
+{
+	vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
+{
+	kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+}
+
+/*
+ * The configuration stored in the devicetree matches the configuration
+ * parameters of the peripheral instance and allows the driver to know which
+ * features are implemented and how it should behave.
+ */
+static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
+	struct axi_dmac_chan *chan)
+{
+	u32 val;
+	int ret;
+
+	ret = of_property_read_u32(of_chan, "reg", &val);
+	if (ret)
+		return ret;
+
+	/* We only support 1 channel for now */
+	if (val != 0)
+		return -EINVAL;
+
+	ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
+	if (ret)
+		return ret;
+	if (val > AXI_DMAC_BUS_TYPE_FIFO)
+		return -EINVAL;
+	chan->src_type = val;
+
+	ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
+	if (ret)
+		return ret;
+	if (val > AXI_DMAC_BUS_TYPE_FIFO)
+		return -EINVAL;
+	chan->dest_type = val;
+
+	ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
+	if (ret)
+		return ret;
+	chan->src_width = val / 8;
+
+	ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
+	if (ret)
+		return ret;
+	chan->dest_width = val / 8;
+
+	ret = of_property_read_u32(of_chan, "adi,length-width", &val);
+	if (ret)
+		return ret;
+
+	if (val >= 32)
+		chan->max_length = UINT_MAX;
+	else
+		chan->max_length = (1ULL << val) - 1;
+
+	chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
+
+	if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_MEM_TO_MEM;
+	else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_MEM_TO_DEV;
+	else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_DEV_TO_MEM;
+	else
+		chan->direction = DMA_DEV_TO_DEV;
+
+	chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
+	chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
+
+	return 0;
+}
+
+static int axi_dmac_probe(struct platform_device *pdev)
+{
+	struct device_node *of_channels, *of_chan;
+	struct dma_device *dma_dev;
+	struct axi_dmac *dmac;
+	struct resource *res;
+	int ret;
+
+	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+	if (!dmac)
+		return -ENOMEM;
+
+	dmac->irq = platform_get_irq(pdev, 0);
+	if (dmac->irq <= 0)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmac->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dmac->base))
+		return PTR_ERR(dmac->base);
+
+	dmac->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dmac->clk))
+		return PTR_ERR(dmac->clk);
+
+	INIT_LIST_HEAD(&dmac->chan.active_descs);
+
+	of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
+	if (of_channels == NULL)
+		return -ENODEV;
+
+	for_each_child_of_node(of_channels, of_chan) {
+		ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
+		if (ret) {
+			of_node_put(of_chan);
+			of_node_put(of_channels);
+			return -EINVAL;
+		}
+	}
+	of_node_put(of_channels);
+
+	pdev->dev.dma_parms = &dmac->dma_parms;
+	dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+
+	dma_dev = &dmac->dma_dev;
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+	dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
+	dma_dev->device_tx_status = dma_cookie_status;
+	dma_dev->device_issue_pending = axi_dmac_issue_pending;
+	dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+	dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
+	dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
+	dma_dev->device_terminate_all = axi_dmac_terminate_all;
+	dma_dev->dev = &pdev->dev;
+	dma_dev->chancnt = 1;
+	dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
+	dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
+	dma_dev->directions = BIT(dmac->chan.direction);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	INIT_LIST_HEAD(&dma_dev->channels);
+
+	dmac->chan.vchan.desc_free = axi_dmac_desc_free;
+	vchan_init(&dmac->chan.vchan, dma_dev);
+
+	ret = clk_prepare_enable(dmac->clk);
+	if (ret < 0)
+		return ret;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+
+	ret = dma_async_device_register(dma_dev);
+	if (ret)
+		goto err_clk_disable;
+
+	ret = of_dma_controller_register(pdev->dev.of_node,
+		of_dma_xlate_by_chan_id, dma_dev);
+	if (ret)
+		goto err_unregister_device;
+
+	ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
+		dev_name(&pdev->dev), dmac);
+	if (ret)
+		goto err_unregister_of;
+
+	platform_set_drvdata(pdev, dmac);
+
+	return 0;
+
+err_unregister_of:
+	of_dma_controller_free(pdev->dev.of_node);
+err_unregister_device:
+	dma_async_device_unregister(&dmac->dma_dev);
+err_clk_disable:
+	clk_disable_unprepare(dmac->clk);
+
+	return ret;
+}
+
+static int axi_dmac_remove(struct platform_device *pdev)
+{
+	struct axi_dmac *dmac = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	free_irq(dmac->irq, dmac);
+	tasklet_kill(&dmac->chan.vchan.task);
+	dma_async_device_unregister(&dmac->dma_dev);
+	clk_disable_unprepare(dmac->clk);
+
+	return 0;
+}
+
+static const struct of_device_id axi_dmac_of_match_table[] = {
+	{ .compatible = "adi,axi-dmac-1.00.a" },
+	{ },
+};
+
+static struct platform_driver axi_dmac_driver = {
+	.driver = {
+		.name = "dma-axi-dmac",
+		.of_match_table = axi_dmac_of_match_table,
+	},
+	.probe = axi_dmac_probe,
+	.remove = axi_dmac_remove,
+};
+module_platform_driver(axi_dmac_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 26d2f0e..dade7c4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -145,7 +145,8 @@
 	struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
 };
 
-struct jz4780_dma_data {
+struct jz4780_dma_filter_data {
+	struct device_node *of_node;
 	uint32_t transfer_type;
 	int channel;
 };
@@ -214,11 +215,25 @@
 	kfree(desc);
 }
 
-static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
+static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
 {
-	*ord = ffs(val) - 1;
+	int ord = ffs(val) - 1;
 
-	switch (*ord) {
+	/*
+	 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
+	 * than the maximum, just limit it. It is perfectly safe to fall back
+	 * in this way since we won't exceed the maximum burst size supported
+	 * by the device, the only effect is reduced efficiency. This is better
+	 * than refusing to perform the request at all.
+	 */
+	if (ord == 3)
+		ord = 2;
+	else if (ord > 7)
+		ord = 7;
+
+	*shift = ord;
+
+	switch (ord) {
 	case 0:
 		return JZ_DMA_SIZE_1_BYTE;
 	case 1:
@@ -231,20 +246,17 @@
 		return JZ_DMA_SIZE_32_BYTE;
 	case 6:
 		return JZ_DMA_SIZE_64_BYTE;
-	case 7:
-		return JZ_DMA_SIZE_128_BYTE;
 	default:
-		return -EINVAL;
+		return JZ_DMA_SIZE_128_BYTE;
 	}
 }
 
-static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
 	struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
 	enum dma_transfer_direction direction)
 {
 	struct dma_slave_config *config = &jzchan->config;
 	uint32_t width, maxburst, tsz;
-	int ord;
 
 	if (direction == DMA_MEM_TO_DEV) {
 		desc->dcm = JZ_DMA_DCM_SAI;
@@ -271,8 +283,8 @@
 	 * divisible by the transfer size, and we must not use more than the
 	 * maximum burst specified by the user.
 	 */
-	tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
-	jzchan->transfer_shift = ord;
+	tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+				       &jzchan->transfer_shift);
 
 	switch (width) {
 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -289,12 +301,14 @@
 	desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
 	desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
 
-	desc->dtc = len >> ord;
+	desc->dtc = len >> jzchan->transfer_shift;
+	return 0;
 }
 
 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
-	enum dma_transfer_direction direction, unsigned long flags)
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
 {
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_desc *desc;
@@ -307,12 +321,11 @@
 
 	for (i = 0; i < sg_len; i++) {
 		err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
-					sg_dma_address(&sgl[i]),
-					sg_dma_len(&sgl[i]),
-					direction);
+					      sg_dma_address(&sgl[i]),
+					      sg_dma_len(&sgl[i]),
+					      direction);
 		if (err < 0)
-			return ERR_PTR(err);
-
+			return NULL;
 
 		desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
 
@@ -354,9 +367,9 @@
 
 	for (i = 0; i < periods; i++) {
 		err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
-					period_len, direction);
+					      period_len, direction);
 		if (err < 0)
-			return ERR_PTR(err);
+			return NULL;
 
 		buf_addr += period_len;
 
@@ -390,15 +403,13 @@
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_desc *desc;
 	uint32_t tsz;
-	int ord;
 
 	desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
 	if (!desc)
 		return NULL;
 
-	tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
-	if (tsz < 0)
-		return ERR_PTR(tsz);
+	tsz = jz4780_dma_transfer_size(dest | src | len,
+				       &jzchan->transfer_shift);
 
 	desc->desc[0].dsa = src;
 	desc->desc[0].dta = dest;
@@ -407,7 +418,7 @@
 			    tsz << JZ_DMA_DCM_TSZ_SHIFT |
 			    JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
 			    JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
-	desc->desc[0].dtc = len >> ord;
+	desc->desc[0].dtc = len >> jzchan->transfer_shift;
 
 	return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
 }
@@ -484,8 +495,9 @@
 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 }
 
-static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
+static int jz4780_dma_terminate_all(struct dma_chan *chan)
 {
+	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 	unsigned long flags;
 	LIST_HEAD(head);
@@ -507,9 +519,11 @@
 	return 0;
 }
 
-static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
-	const struct dma_slave_config *config)
+static int jz4780_dma_config(struct dma_chan *chan,
+	struct dma_slave_config *config)
 {
+	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
 	if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 	   || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
 		return -EINVAL;
@@ -567,8 +581,8 @@
 		txstate->residue = 0;
 
 	if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
-		&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
-			status = DMA_ERROR;
+	    && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+		status = DMA_ERROR;
 
 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 	return status;
@@ -671,7 +685,10 @@
 {
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
-	struct jz4780_dma_data *data = param;
+	struct jz4780_dma_filter_data *data = param;
+
+	if (jzdma->dma_device.dev->of_node != data->of_node)
+		return false;
 
 	if (data->channel > -1) {
 		if (data->channel != jzchan->id)
@@ -690,11 +707,12 @@
 {
 	struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
 	dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
-	struct jz4780_dma_data data;
+	struct jz4780_dma_filter_data data;
 
 	if (dma_spec->args_count != 2)
 		return NULL;
 
+	data.of_node = ofdma->of_node;
 	data.transfer_type = dma_spec->args[0];
 	data.channel = dma_spec->args[1];
 
@@ -713,9 +731,14 @@
 				data.channel);
 			return NULL;
 		}
-	}
 
-	return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+		jzdma->chan[data.channel].transfer_type = data.transfer_type;
+
+		return dma_get_slave_channel(
+			&jzdma->chan[data.channel].vchan.chan);
+	} else {
+		return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+	}
 }
 
 static int jz4780_dma_probe(struct platform_device *pdev)
@@ -743,23 +766,26 @@
 	if (IS_ERR(jzdma->base))
 		return PTR_ERR(jzdma->base);
 
-	jzdma->irq = platform_get_irq(pdev, 0);
-	if (jzdma->irq < 0) {
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
 		dev_err(dev, "failed to get IRQ: %d\n", ret);
-		return jzdma->irq;
+		return ret;
 	}
 
-	ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
-			       dev_name(dev), jzdma);
+	jzdma->irq = ret;
+
+	ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
+			  jzdma);
 	if (ret) {
 		dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
-		return -EINVAL;
+		return ret;
 	}
 
 	jzdma->clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(jzdma->clk)) {
 		dev_err(dev, "failed to get clock\n");
-		return PTR_ERR(jzdma->clk);
+		ret = PTR_ERR(jzdma->clk);
+		goto err_free_irq;
 	}
 
 	clk_prepare_enable(jzdma->clk);
@@ -775,13 +801,13 @@
 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
 
 	dd->dev = dev;
-	dd->copy_align = 2; /* 2^2 = 4 byte alignment */
+	dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
 	dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
 	dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
 	dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
 	dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
 	dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
-	dd->device_config = jz4780_dma_slave_config;
+	dd->device_config = jz4780_dma_config;
 	dd->device_terminate_all = jz4780_dma_terminate_all;
 	dd->device_tx_status = jz4780_dma_tx_status;
 	dd->device_issue_pending = jz4780_dma_issue_pending;
@@ -790,7 +816,6 @@
 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
-
 	/*
 	 * Enable DMA controller, mark all channels as not programmable.
 	 * Also set the FMSC bit - it increases MSC performance, so it makes
@@ -832,15 +857,24 @@
 
 err_disable_clk:
 	clk_disable_unprepare(jzdma->clk);
+
+err_free_irq:
+	free_irq(jzdma->irq, jzdma);
 	return ret;
 }
 
 static int jz4780_dma_remove(struct platform_device *pdev)
 {
 	struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
+	int i;
 
 	of_dma_controller_free(pdev->dev.of_node);
-	devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
+
+	free_irq(jzdma->irq, jzdma);
+
+	for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+		tasklet_kill(&jzdma->chan[i].vchan.task);
+
 	dma_async_device_unregister(&jzdma->dma_device);
 	return 0;
 }
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 36e02f0..e00c9b0 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,6 +6,9 @@
 	tristate
 	select DMA_ENGINE
 
+config DW_DMAC_BIG_ENDIAN_IO
+	bool
+
 config DW_DMAC
 	tristate "Synopsys DesignWare AHB DMA platform driver"
 	select DW_DMAC_CORE
@@ -23,6 +26,3 @@
 	  Support the Synopsys DesignWare AHB DMA controller on the
 	  platfroms that enumerate it as a PCI device. For example,
 	  Intel Medfield has integrated this GPDMA controller.
-
-config DW_DMAC_BIG_ENDIAN_IO
-	bool
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 88853af..3e5d4f1 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1000,7 +1000,7 @@
 	 * code using dma memcpy must make sure alignment of
 	 * length is at dma->copy_align boundary.
 	 */
-	dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
 
 	INIT_LIST_HEAD(&dma->channels);
 }
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f42f71e..7669c7d 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -99,21 +99,13 @@
 
 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
 	hsu_chan_disable(hsuc);
 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 }
 
 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
 	hsu_dma_chan_start(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 }
 
 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
@@ -139,9 +131,9 @@
 	unsigned long flags;
 	u32 sr;
 
-	spin_lock_irqsave(&hsuc->lock, flags);
+	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
+	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
 	return sr;
 }
@@ -273,14 +265,11 @@
 	struct hsu_dma_desc *desc = hsuc->desc;
 	size_t bytes = hsu_dma_desc_size(desc);
 	int i;
-	unsigned long flags;
 
-	spin_lock_irqsave(&hsuc->lock, flags);
 	i = desc->active % HSU_DMA_CHAN_NR_DESC;
 	do {
 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 	} while (--i >= 0);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 
 	return bytes;
 }
@@ -327,24 +316,6 @@
 	return 0;
 }
 
-static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
-	hsu_chan_disable(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
-static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
-	hsu_chan_enable(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
 static int hsu_dma_pause(struct dma_chan *chan)
 {
 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
@@ -352,7 +323,7 @@
 
 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
-		hsu_dma_chan_deactivate(hsuc);
+		hsu_chan_disable(hsuc);
 		hsuc->desc->status = DMA_PAUSED;
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
@@ -368,7 +339,7 @@
 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
 		hsuc->desc->status = DMA_IN_PROGRESS;
-		hsu_dma_chan_activate(hsuc);
+		hsu_chan_enable(hsuc);
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
@@ -441,8 +412,6 @@
 
 		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
-
-		spin_lock_init(&hsuc->lock);
 	}
 
 	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 0275233..eeb9fff 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -78,7 +78,6 @@
 	struct virt_dma_chan vchan;
 
 	void __iomem *reg;
-	spinlock_t lock;
 
 	/* hardware configuration */
 	enum dma_transfer_direction direction;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 865501fc..48d85f8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1083,8 +1083,12 @@
 	if (IS_ERR(imxdma->dma_ahb))
 		return PTR_ERR(imxdma->dma_ahb);
 
-	clk_prepare_enable(imxdma->dma_ipg);
-	clk_prepare_enable(imxdma->dma_ahb);
+	ret = clk_prepare_enable(imxdma->dma_ipg);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(imxdma->dma_ahb);
+	if (ret)
+		goto disable_dma_ipg_clk;
 
 	/* reset DMA module */
 	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -1094,20 +1098,20 @@
 				       dma_irq_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 
 		irq_err = platform_get_irq(pdev, 1);
 		if (irq_err < 0) {
 			ret = irq_err;
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 
 		ret = devm_request_irq(&pdev->dev, irq_err,
 				       imxdma_err_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 	}
 
@@ -1144,7 +1148,7 @@
 				dev_warn(imxdma->dev, "Can't register IRQ %d "
 					 "for DMA channel %d\n",
 					 irq + i, i);
-				goto err;
+				goto disable_dma_ahb_clk;
 			}
 			init_timer(&imxdmac->watchdog);
 			imxdmac->watchdog.function = &imxdma_watchdog;
@@ -1183,14 +1187,14 @@
 
 	platform_set_drvdata(pdev, imxdma);
 
-	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
+	imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
 	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
 	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
 
 	ret = dma_async_device_register(&imxdma->dma_device);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register\n");
-		goto err;
+		goto disable_dma_ahb_clk;
 	}
 
 	if (pdev->dev.of_node) {
@@ -1206,9 +1210,10 @@
 
 err_of_dma_controller:
 	dma_async_device_unregister(&imxdma->dma_device);
-err:
-	clk_disable_unprepare(imxdma->dma_ipg);
+disable_dma_ahb_clk:
 	clk_disable_unprepare(imxdma->dma_ahb);
+disable_dma_ipg_clk:
+	clk_disable_unprepare(imxdma->dma_ipg);
 	return ret;
 }
 
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 77b6aab..9d375bc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -35,12 +35,16 @@
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 
 #include <asm/irq.h>
 #include <linux/platform_data/dma-imx-sdma.h>
 #include <linux/platform_data/dma-imx.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 
 #include "dmaengine.h"
 
@@ -124,6 +128,56 @@
 #define CHANGE_ENDIANNESS   0x80
 
 /*
+ *  p_2_p watermark_level description
+ *	Bits		Name			Description
+ *	0-7		Lower WML		Lower watermark level
+ *	8		PS			1: Pad Swallowing
+ *						0: No Pad Swallowing
+ *	9		PA			1: Pad Adding
+ *						0: No Pad Adding
+ *	10		SPDIF			If this bit is set both source
+ *						and destination are on SPBA
+ *	11		Source Bit(SP)		1: Source on SPBA
+ *						0: Source on AIPS
+ *	12		Destination Bit(DP)	1: Destination on SPBA
+ *						0: Destination on AIPS
+ *	13-15		---------		MUST BE 0
+ *	16-23		Higher WML		HWML
+ *	24-27		N			Total number of samples after
+ *						which Pad adding/Swallowing
+ *						must be done. It must be odd.
+ *	28		Lower WML Event(LWE)	SDMA events reg to check for
+ *						LWML event mask
+ *						0: LWE in EVENTS register
+ *						1: LWE in EVENTS2 register
+ *	29		Higher WML Event(HWE)	SDMA events reg to check for
+ *						HWML event mask
+ *						0: HWE in EVENTS register
+ *						1: HWE in EVENTS2 register
+ *	30		---------		MUST BE 0
+ *	31		CONT			1: Amount of samples to be
+ *						transferred is unknown and
+ *						script will keep on
+ *						transferring samples as long as
+ *						both events are detected and
+ *						script must be manually stopped
+ *						by the application
+ *						0: The amount of samples to be
+ *						transferred is equal to the
+ *						count field of mode word
+ */
+#define SDMA_WATERMARK_LEVEL_LWML	0xFF
+#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
+#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
+#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
+#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
+#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
+#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
+#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
+#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
+#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
+
+/*
  * Mode/Count of data node descriptors - IPCv2
  */
 struct sdma_mode_count {
@@ -259,8 +313,9 @@
 	struct sdma_buffer_descriptor	*bd;
 	dma_addr_t			bd_phys;
 	unsigned int			pc_from_device, pc_to_device;
+	unsigned int			device_to_device;
 	unsigned long			flags;
-	dma_addr_t			per_address;
+	dma_addr_t			per_address, per_address2;
 	unsigned long			event_mask[2];
 	unsigned long			watermark_level;
 	u32				shp_addr, per_addr;
@@ -328,6 +383,8 @@
 	u32				script_number;
 	struct sdma_script_start_addrs	*script_addrs;
 	const struct sdma_driver_data	*drvdata;
+	u32				spba_start_addr;
+	u32				spba_end_addr;
 };
 
 static struct sdma_driver_data sdma_imx31 = {
@@ -705,6 +762,7 @@
 
 	sdmac->pc_from_device = 0;
 	sdmac->pc_to_device = 0;
+	sdmac->device_to_device = 0;
 
 	switch (peripheral_type) {
 	case IMX_DMATYPE_MEMORY:
@@ -780,6 +838,7 @@
 
 	sdmac->pc_from_device = per_2_emi;
 	sdmac->pc_to_device = emi_2_per;
+	sdmac->device_to_device = per_2_per;
 }
 
 static int sdma_load_context(struct sdma_channel *sdmac)
@@ -792,11 +851,12 @@
 	int ret;
 	unsigned long flags;
 
-	if (sdmac->direction == DMA_DEV_TO_MEM) {
+	if (sdmac->direction == DMA_DEV_TO_MEM)
 		load_address = sdmac->pc_from_device;
-	} else {
+	else if (sdmac->direction == DMA_DEV_TO_DEV)
+		load_address = sdmac->device_to_device;
+	else
 		load_address = sdmac->pc_to_device;
-	}
 
 	if (load_address < 0)
 		return load_address;
@@ -851,6 +911,46 @@
 	return 0;
 }
 
+static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
+{
+	struct sdma_engine *sdma = sdmac->sdma;
+
+	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
+	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
+
+	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
+	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
+
+	if (sdmac->event_id0 > 31)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
+
+	if (sdmac->event_id1 > 31)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
+
+	/*
+	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
+	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
+	 * r0(event_mask[1]) and r1(event_mask[0]).
+	 */
+	if (lwml > hwml) {
+		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
+						SDMA_WATERMARK_LEVEL_HWML);
+		sdmac->watermark_level |= hwml;
+		sdmac->watermark_level |= lwml << 16;
+		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
+	}
+
+	if (sdmac->per_address2 >= sdma->spba_start_addr &&
+			sdmac->per_address2 <= sdma->spba_end_addr)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
+
+	if (sdmac->per_address >= sdma->spba_start_addr &&
+			sdmac->per_address <= sdma->spba_end_addr)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
+
+	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+}
+
 static int sdma_config_channel(struct dma_chan *chan)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -869,6 +969,12 @@
 		sdma_event_enable(sdmac, sdmac->event_id0);
 	}
 
+	if (sdmac->event_id1) {
+		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
+			return -EINVAL;
+		sdma_event_enable(sdmac, sdmac->event_id1);
+	}
+
 	switch (sdmac->peripheral_type) {
 	case IMX_DMATYPE_DSP:
 		sdma_config_ownership(sdmac, false, true, true);
@@ -887,19 +993,17 @@
 			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
 		/* Handle multiple event channels differently */
 		if (sdmac->event_id1) {
-			sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
-			if (sdmac->event_id1 > 31)
-				__set_bit(31, &sdmac->watermark_level);
-			sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
-			if (sdmac->event_id0 > 31)
-				__set_bit(30, &sdmac->watermark_level);
-		} else {
+			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
+			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
+				sdma_set_watermarklevel_for_p2p(sdmac);
+		} else
 			__set_bit(sdmac->event_id0, sdmac->event_mask);
-		}
+
 		/* Watermark Level */
 		sdmac->watermark_level |= sdmac->watermark_level;
 		/* Address */
 		sdmac->shp_addr = sdmac->per_address;
+		sdmac->per_addr = sdmac->per_address2;
 	} else {
 		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
 	}
@@ -987,17 +1091,22 @@
 
 	sdmac->peripheral_type = data->peripheral_type;
 	sdmac->event_id0 = data->dma_request;
+	sdmac->event_id1 = data->dma_request2;
 
-	clk_enable(sdmac->sdma->clk_ipg);
-	clk_enable(sdmac->sdma->clk_ahb);
+	ret = clk_enable(sdmac->sdma->clk_ipg);
+	if (ret)
+		return ret;
+	ret = clk_enable(sdmac->sdma->clk_ahb);
+	if (ret)
+		goto disable_clk_ipg;
 
 	ret = sdma_request_channel(sdmac);
 	if (ret)
-		return ret;
+		goto disable_clk_ahb;
 
 	ret = sdma_set_channel_priority(sdmac, prio);
 	if (ret)
-		return ret;
+		goto disable_clk_ahb;
 
 	dma_async_tx_descriptor_init(&sdmac->desc, chan);
 	sdmac->desc.tx_submit = sdma_tx_submit;
@@ -1005,6 +1114,12 @@
 	sdmac->desc.flags = DMA_CTRL_ACK;
 
 	return 0;
+
+disable_clk_ahb:
+	clk_disable(sdmac->sdma->clk_ahb);
+disable_clk_ipg:
+	clk_disable(sdmac->sdma->clk_ipg);
+	return ret;
 }
 
 static void sdma_free_chan_resources(struct dma_chan *chan)
@@ -1221,6 +1336,14 @@
 		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
 			dmaengine_cfg->src_addr_width;
 		sdmac->word_size = dmaengine_cfg->src_addr_width;
+	} else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
+		sdmac->per_address2 = dmaengine_cfg->src_addr;
+		sdmac->per_address = dmaengine_cfg->dst_addr;
+		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
+			SDMA_WATERMARK_LEVEL_LWML;
+		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
+			SDMA_WATERMARK_LEVEL_HWML;
+		sdmac->word_size = dmaengine_cfg->dst_addr_width;
 	} else {
 		sdmac->per_address = dmaengine_cfg->dst_addr;
 		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -1337,6 +1460,72 @@
 	release_firmware(fw);
 }
 
+#define EVENT_REMAP_CELLS 3
+
+static int __init sdma_event_remap(struct sdma_engine *sdma)
+{
+	struct device_node *np = sdma->dev->of_node;
+	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
+	struct property *event_remap;
+	struct regmap *gpr;
+	char propname[] = "fsl,sdma-event-remap";
+	u32 reg, val, shift, num_map, i;
+	int ret = 0;
+
+	if (IS_ERR(np) || IS_ERR(gpr_np))
+		goto out;
+
+	event_remap = of_find_property(np, propname, NULL);
+	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
+	if (!num_map) {
+		dev_warn(sdma->dev, "no event needs to be remapped\n");
+		goto out;
+	} else if (num_map % EVENT_REMAP_CELLS) {
+		dev_err(sdma->dev, "the property %s must modulo %d\n",
+				propname, EVENT_REMAP_CELLS);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	gpr = syscon_node_to_regmap(gpr_np);
+	if (IS_ERR(gpr)) {
+		dev_err(sdma->dev, "failed to get gpr regmap\n");
+		ret = PTR_ERR(gpr);
+		goto out;
+	}
+
+	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
+		ret = of_property_read_u32_index(np, propname, i, &reg);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i);
+			goto out;
+		}
+
+		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i + 1);
+			goto out;
+		}
+
+		ret = of_property_read_u32_index(np, propname, i + 2, &val);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i + 2);
+			goto out;
+		}
+
+		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
+	}
+
+out:
+	if (!IS_ERR(gpr_np))
+		of_node_put(gpr_np);
+
+	return ret;
+}
+
 static int sdma_get_firmware(struct sdma_engine *sdma,
 		const char *fw_name)
 {
@@ -1354,8 +1543,12 @@
 	int i, ret;
 	dma_addr_t ccb_phys;
 
-	clk_enable(sdma->clk_ipg);
-	clk_enable(sdma->clk_ahb);
+	ret = clk_enable(sdma->clk_ipg);
+	if (ret)
+		return ret;
+	ret = clk_enable(sdma->clk_ahb);
+	if (ret)
+		goto disable_clk_ipg;
 
 	/* Be sure SDMA has not started yet */
 	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1411,8 +1604,9 @@
 	return 0;
 
 err_dma_alloc:
-	clk_disable(sdma->clk_ipg);
 	clk_disable(sdma->clk_ahb);
+disable_clk_ipg:
+	clk_disable(sdma->clk_ipg);
 	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
 	return ret;
 }
@@ -1444,6 +1638,14 @@
 	data.dma_request = dma_spec->args[0];
 	data.peripheral_type = dma_spec->args[1];
 	data.priority = dma_spec->args[2];
+	/*
+	 * init dma_request2 to zero, which is not used by the dts.
+	 * For P2P, dma_request2 is init from dma_request_channel(),
+	 * chan->private will point to the imx_dma_data, and in
+	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
+	 * be set to sdmac->event_id1.
+	 */
+	data.dma_request2 = 0;
 
 	return dma_request_channel(mask, sdma_filter_fn, &data);
 }
@@ -1453,10 +1655,12 @@
 	const struct of_device_id *of_id =
 			of_match_device(sdma_dt_ids, &pdev->dev);
 	struct device_node *np = pdev->dev.of_node;
+	struct device_node *spba_bus;
 	const char *fw_name;
 	int ret;
 	int irq;
 	struct resource *iores;
+	struct resource spba_res;
 	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	int i;
 	struct sdma_engine *sdma;
@@ -1551,6 +1755,10 @@
 	if (ret)
 		goto err_init;
 
+	ret = sdma_event_remap(sdma);
+	if (ret)
+		goto err_init;
+
 	if (sdma->drvdata->script_addrs)
 		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
 	if (pdata && pdata->script_addrs)
@@ -1608,6 +1816,14 @@
 			dev_err(&pdev->dev, "failed to register controller\n");
 			goto err_register;
 		}
+
+		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
+		ret = of_address_to_resource(spba_bus, 0, &spba_res);
+		if (!ret) {
+			sdma->spba_start_addr = spba_res.start;
+			sdma->spba_end_addr = spba_res.end;
+		}
+		of_node_put(spba_bus);
 	}
 
 	dev_info(sdma->dev, "initialized\n");
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 0ff7270..cf5fedb 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
+ioatdma-y := init.o dma.o prep.o dca.o sysfs.o
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index ea1e107..2cb7c30 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -31,7 +31,6 @@
 
 #include "dma.h"
 #include "registers.h"
-#include "dma_v2.h"
 
 /*
  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -71,14 +70,6 @@
 #define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
 #define IOAT_TAG_MAP_LEN	8
 
-static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
-static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
-
 /* pack PCI B/D/F into a u16 */
 static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
 {
@@ -126,96 +117,6 @@
 	struct ioat_dca_slot 	 req_slots[0];
 };
 
-/* 5000 series chipset DCA Port Requester ID Table Entry Format
- * [15:8]	PCI-Express Bus Number
- * [7:3]	PCI-Express Device Number
- * [2:0]	PCI-Express Function Number
- *
- * 5000 series chipset DCA control register format
- * [7:1]	Reserved (0)
- * [0]		Ignore Function Number
- */
-
-static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 id;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-	id = dcaid_from_pcidev(pdev);
-
-	if (ioatdca->requester_count == ioatdca->max_requesters)
-		return -ENODEV;
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == NULL) {
-			/* found an empty slot */
-			ioatdca->requester_count++;
-			ioatdca->req_slots[i].pdev = pdev;
-			ioatdca->req_slots[i].rid = id;
-			writew(id, ioatdca->dca_base + (i * 4));
-			/* make sure the ignore function bit is off */
-			writeb(0, ioatdca->dca_base + (i * 4) + 2);
-			return i;
-		}
-	}
-	/* Error, ioatdma->requester_count is out of whack */
-	return -EFAULT;
-}
-
-static int ioat_dca_remove_requester(struct dca_provider *dca,
-				     struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == pdev) {
-			writew(0, ioatdca->dca_base + (i * 4));
-			ioatdca->req_slots[i].pdev = NULL;
-			ioatdca->req_slots[i].rid = 0;
-			ioatdca->requester_count--;
-			return i;
-		}
-	}
-	return -ENODEV;
-}
-
-static u8 ioat_dca_get_tag(struct dca_provider *dca,
-			   struct device *dev,
-			   int cpu)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	int i, apic_id, bit, value;
-	u8 entry, tag;
-
-	tag = 0;
-	apic_id = cpu_physical_id(cpu);
-
-	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
-		entry = ioatdca->tag_map[i];
-		if (entry & DCA_TAG_MAP_VALID) {
-			bit = entry & ~DCA_TAG_MAP_VALID;
-			value = (apic_id & (1 << bit)) ? 1 : 0;
-		} else {
-			value = entry ? 1 : 0;
-		}
-		tag |= (value << i);
-	}
-	return tag;
-}
-
 static int ioat_dca_dev_managed(struct dca_provider *dca,
 				struct device *dev)
 {
@@ -231,260 +132,7 @@
 	return 0;
 }
 
-static struct dca_ops ioat_dca_ops = {
-	.add_requester		= ioat_dca_add_requester,
-	.remove_requester	= ioat_dca_remove_requester,
-	.get_tag		= ioat_dca_get_tag,
-	.dev_managed		= ioat_dca_dev_managed,
-};
-
-
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct dca_provider *dca;
-	struct ioat_dca_priv *ioatdca;
-	u8 *tag_map = NULL;
-	int i;
-	int err;
-	u8 version;
-	u8 max_requesters;
-
-	if (!system_has_dca_enabled(pdev))
-		return NULL;
-
-	/* I/OAT v1 systems must have a known tag_map to support DCA */
-	switch (pdev->vendor) {
-	case PCI_VENDOR_ID_INTEL:
-		switch (pdev->device) {
-		case PCI_DEVICE_ID_INTEL_IOAT:
-			tag_map = ioat_tag_map_BNB;
-			break;
-		case PCI_DEVICE_ID_INTEL_IOAT_CNB:
-			tag_map = ioat_tag_map_CNB;
-			break;
-		case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
-			tag_map = ioat_tag_map_SCNB;
-			break;
-		}
-		break;
-	case PCI_VENDOR_ID_UNISYS:
-		switch (pdev->device) {
-		case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
-			tag_map = ioat_tag_map_UNISYS;
-			break;
-		}
-		break;
-	}
-	if (tag_map == NULL)
-		return NULL;
-
-	version = readb(iobase + IOAT_VER_OFFSET);
-	if (version == IOAT_VER_3_0)
-		max_requesters = IOAT3_DCA_MAX_REQ;
-	else
-		max_requesters = IOAT_DCA_MAX_REQ;
-
-	dca = alloc_dca_provider(&ioat_dca_ops,
-			sizeof(*ioatdca) +
-			(sizeof(struct ioat_dca_slot) * max_requesters));
-	if (!dca)
-		return NULL;
-
-	ioatdca = dca_priv(dca);
-	ioatdca->max_requesters = max_requesters;
-	ioatdca->dca_base = iobase + 0x54;
-
-	/* copy over the APIC ID to DCA tag mapping */
-	for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
-		ioatdca->tag_map[i] = tag_map[i];
-
-	err = register_dca_provider(dca, &pdev->dev);
-	if (err) {
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	return dca;
-}
-
-
-static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 id;
-	u16 global_req_table;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-	id = dcaid_from_pcidev(pdev);
-
-	if (ioatdca->requester_count == ioatdca->max_requesters)
-		return -ENODEV;
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == NULL) {
-			/* found an empty slot */
-			ioatdca->requester_count++;
-			ioatdca->req_slots[i].pdev = pdev;
-			ioatdca->req_slots[i].rid = id;
-			global_req_table =
-			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-			writel(id | IOAT_DCA_GREQID_VALID,
-			       ioatdca->iobase + global_req_table + (i * 4));
-			return i;
-		}
-	}
-	/* Error, ioatdma->requester_count is out of whack */
-	return -EFAULT;
-}
-
-static int ioat2_dca_remove_requester(struct dca_provider *dca,
-				      struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 global_req_table;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == pdev) {
-			global_req_table =
-			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-			writel(0, ioatdca->iobase + global_req_table + (i * 4));
-			ioatdca->req_slots[i].pdev = NULL;
-			ioatdca->req_slots[i].rid = 0;
-			ioatdca->requester_count--;
-			return i;
-		}
-	}
-	return -ENODEV;
-}
-
-static u8 ioat2_dca_get_tag(struct dca_provider *dca,
-			    struct device *dev,
-			    int cpu)
-{
-	u8 tag;
-
-	tag = ioat_dca_get_tag(dca, dev, cpu);
-	tag = (~tag) & 0x1F;
-	return tag;
-}
-
-static struct dca_ops ioat2_dca_ops = {
-	.add_requester		= ioat2_dca_add_requester,
-	.remove_requester	= ioat2_dca_remove_requester,
-	.get_tag		= ioat2_dca_get_tag,
-	.dev_managed		= ioat_dca_dev_managed,
-};
-
-static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
-{
-	int slots = 0;
-	u32 req;
-	u16 global_req_table;
-
-	global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
-	if (global_req_table == 0)
-		return 0;
-	do {
-		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
-		slots++;
-	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
-
-	return slots;
-}
-
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct dca_provider *dca;
-	struct ioat_dca_priv *ioatdca;
-	int slots;
-	int i;
-	int err;
-	u32 tag_map;
-	u16 dca_offset;
-	u16 csi_fsb_control;
-	u16 pcie_control;
-	u8 bit;
-
-	if (!system_has_dca_enabled(pdev))
-		return NULL;
-
-	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
-	if (dca_offset == 0)
-		return NULL;
-
-	slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
-	if (slots == 0)
-		return NULL;
-
-	dca = alloc_dca_provider(&ioat2_dca_ops,
-				 sizeof(*ioatdca)
-				      + (sizeof(struct ioat_dca_slot) * slots));
-	if (!dca)
-		return NULL;
-
-	ioatdca = dca_priv(dca);
-	ioatdca->iobase = iobase;
-	ioatdca->dca_base = iobase + dca_offset;
-	ioatdca->max_requesters = slots;
-
-	/* some bios might not know to turn these on */
-	csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-	if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
-		csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
-		writew(csi_fsb_control,
-		       ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-	}
-	pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-	if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
-		pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
-		writew(pcie_control,
-		       ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-	}
-
-
-	/* TODO version, compatibility and configuration checks */
-
-	/* copy out the APIC to DCA tag map */
-	tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
-	for (i = 0; i < 5; i++) {
-		bit = (tag_map >> (4 * i)) & 0x0f;
-		if (bit < 8)
-			ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
-		else
-			ioatdca->tag_map[i] = 0;
-	}
-
-	if (!dca2_tag_map_valid(ioatdca->tag_map)) {
-		WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
-				"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
-				dev_driver_string(&pdev->dev),
-				dev_name(&pdev->dev));
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	err = register_dca_provider(dca, &pdev->dev);
-	if (err) {
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	return dca;
-}
-
-static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
 {
 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
 	struct pci_dev *pdev;
@@ -518,7 +166,7 @@
 	return -EFAULT;
 }
 
-static int ioat3_dca_remove_requester(struct dca_provider *dca,
+static int ioat_dca_remove_requester(struct dca_provider *dca,
 				      struct device *dev)
 {
 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
@@ -545,7 +193,7 @@
 	return -ENODEV;
 }
 
-static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
 			    struct device *dev,
 			    int cpu)
 {
@@ -576,14 +224,14 @@
 	return tag;
 }
 
-static struct dca_ops ioat3_dca_ops = {
-	.add_requester		= ioat3_dca_add_requester,
-	.remove_requester	= ioat3_dca_remove_requester,
-	.get_tag		= ioat3_dca_get_tag,
+static struct dca_ops ioat_dca_ops = {
+	.add_requester		= ioat_dca_add_requester,
+	.remove_requester	= ioat_dca_remove_requester,
+	.get_tag		= ioat_dca_get_tag,
 	.dev_managed		= ioat_dca_dev_managed,
 };
 
-static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
 {
 	int slots = 0;
 	u32 req;
@@ -618,7 +266,7 @@
 		(tag_map[4] == DCA_TAG_MAP_VALID));
 }
 
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
 	struct dca_provider *dca;
 	struct ioat_dca_priv *ioatdca;
@@ -645,11 +293,11 @@
 	if (dca_offset == 0)
 		return NULL;
 
-	slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+	slots = ioat_dca_count_dca_slots(iobase, dca_offset);
 	if (slots == 0)
 		return NULL;
 
-	dca = alloc_dca_provider(&ioat3_dca_ops,
+	dca = alloc_dca_provider(&ioat_dca_ops,
 				 sizeof(*ioatdca)
 				      + (sizeof(struct ioat_dca_slot) * slots));
 	if (!dca)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index ee0aa9f..f66b7e6 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1,6 +1,6 @@
 /*
  * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
+ * Copyright(c) 2004 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -31,31 +31,23 @@
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
 #include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
 #include "dma.h"
 #include "registers.h"
 #include "hw.h"
 
 #include "../dmaengine.h"
 
-int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
-		 "high-water mark for pushing ioat descriptors (default: 4)");
-
-/* internal functions */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat);
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
+static void ioat_eh(struct ioatdma_chan *ioat_chan);
 
 /**
  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
  * @irq: interrupt id
  * @data: interrupt data
  */
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
 {
 	struct ioatdma_device *instance = data;
-	struct ioat_chan_common *chan;
+	struct ioatdma_chan *ioat_chan;
 	unsigned long attnstatus;
 	int bit;
 	u8 intrctrl;
@@ -72,9 +64,9 @@
 
 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
-		chan = ioat_chan_by_index(instance, bit);
-		if (test_bit(IOAT_RUN, &chan->state))
-			tasklet_schedule(&chan->cleanup_task);
+		ioat_chan = ioat_chan_by_index(instance, bit);
+		if (test_bit(IOAT_RUN, &ioat_chan->state))
+			tasklet_schedule(&ioat_chan->cleanup_task);
 	}
 
 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -86,294 +78,32 @@
  * @irq: interrupt id
  * @data: interrupt data
  */
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
 {
-	struct ioat_chan_common *chan = data;
+	struct ioatdma_chan *ioat_chan = data;
 
-	if (test_bit(IOAT_RUN, &chan->state))
-		tasklet_schedule(&chan->cleanup_task);
+	if (test_bit(IOAT_RUN, &ioat_chan->state))
+		tasklet_schedule(&ioat_chan->cleanup_task);
 
 	return IRQ_HANDLED;
 }
 
-/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
+void ioat_stop(struct ioatdma_chan *ioat_chan)
 {
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c = &chan->common;
-	unsigned long data = (unsigned long) c;
-
-	chan->device = device;
-	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
-	spin_lock_init(&chan->cleanup_lock);
-	chan->common.device = dma;
-	dma_cookie_init(&chan->common);
-	list_add_tail(&chan->common.device_node, &dma->channels);
-	device->idx[idx] = chan;
-	init_timer(&chan->timer);
-	chan->timer.function = device->timer_fn;
-	chan->timer.data = data;
-	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
-}
-
-/**
- * ioat1_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat1_enumerate_channels(struct ioatdma_device *device)
-{
-	u8 xfercap_scale;
-	u32 xfercap;
-	int i;
-	struct ioat_dma_chan *ioat;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(device->idx));
-		dma->chancnt = ARRAY_SIZE(device->idx);
-	}
-	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_scale &= 0x1f; /* bits [4:0] valid */
-	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
-
-#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
-		dma->chancnt--;
-#endif
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
-		if (!ioat)
-			break;
-
-		ioat_init_channel(device, &ioat->base, i);
-		ioat->xfercap = xfercap;
-		spin_lock_init(&ioat->desc_lock);
-		INIT_LIST_HEAD(&ioat->free_desc);
-		INIT_LIST_HEAD(&ioat->used_desc);
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- *                                 descriptors to hw
- * @chan: DMA channel handle
- */
-static inline void
-__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
-{
-	void __iomem *reg_base = ioat->base.reg_base;
-
-	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
-		__func__, ioat->pending);
-	ioat->pending = 0;
-	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
-}
-
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
-
-	if (ioat->pending > 0) {
-		spin_lock_bh(&ioat->desc_lock);
-		__ioat1_dma_memcpy_issue_pending(ioat);
-		spin_unlock_bh(&ioat->desc_lock);
-	}
-}
-
-/**
- * ioat1_reset_channel - restart a channel
- * @ioat: IOAT DMA channel handle
- */
-static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	void __iomem *reg_base = chan->reg_base;
-	u32 chansts, chanerr;
-
-	dev_warn(to_dev(chan), "reset\n");
-	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
-	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
-	if (chanerr) {
-		dev_err(to_dev(chan),
-			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
-			chan_num(chan), chansts, chanerr);
-		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/*
-	 * whack it upside the head with a reset
-	 * and wait for things to settle out.
-	 * force the pending count to a really big negative
-	 * to make sure no one forces an issue_pending
-	 * while we're waiting.
-	 */
-
-	ioat->pending = INT_MIN;
-	writeb(IOAT_CHANCMD_RESET,
-	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
-	set_bit(IOAT_RESET_PENDING, &chan->state);
-	mod_timer(&chan->timer, jiffies + RESET_DELAY);
-}
-
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *first;
-	struct ioat_desc_sw *chain_tail;
-	dma_cookie_t cookie;
-
-	spin_lock_bh(&ioat->desc_lock);
-	/* cookie incr and addition to used_list must be atomic */
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
-	/* write address into NextDescriptor field of last desc in chain */
-	first = to_ioat_desc(desc->tx_list.next);
-	chain_tail = to_ioat_desc(ioat->used_desc.prev);
-	/* make descriptor updates globally visible before chaining */
-	wmb();
-	chain_tail->hw->next = first->txd.phys;
-	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
-	dump_desc_dbg(ioat, chain_tail);
-	dump_desc_dbg(ioat, first);
-
-	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	ioat->active += desc->hw->tx_cnt;
-	ioat->pending += desc->hw->tx_cnt;
-	if (ioat->pending >= ioat_pending_level)
-		__ioat1_dma_memcpy_issue_pending(ioat);
-	spin_unlock_bh(&ioat->desc_lock);
-
-	return cookie;
-}
-
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *
-ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
-{
-	struct ioat_dma_descriptor *desc;
-	struct ioat_desc_sw *desc_sw;
-	struct ioatdma_device *ioatdma_device;
-	dma_addr_t phys;
-
-	ioatdma_device = ioat->base.device;
-	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
-	if (unlikely(!desc))
-		return NULL;
-
-	desc_sw = kzalloc(sizeof(*desc_sw), flags);
-	if (unlikely(!desc_sw)) {
-		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
-		return NULL;
-	}
-
-	memset(desc, 0, sizeof(*desc));
-
-	INIT_LIST_HEAD(&desc_sw->tx_list);
-	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
-	desc_sw->txd.tx_submit = ioat1_tx_submit;
-	desc_sw->hw = desc;
-	desc_sw->txd.phys = phys;
-	set_desc_id(desc_sw, -1);
-
-	return desc_sw;
-}
-
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
-		 "ioat1: initial descriptors per channel (default: 256)");
-/**
- * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *desc;
-	u32 chanerr;
-	int i;
-	LIST_HEAD(tmp_list);
-
-	/* have we already been set up? */
-	if (!list_empty(&ioat->free_desc))
-		return ioat->desccount;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	if (chanerr) {
-		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
-		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/* Allocate descriptors */
-	for (i = 0; i < ioat_initial_desc_count; i++) {
-		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
-		if (!desc) {
-			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
-			break;
-		}
-		set_desc_id(desc, i);
-		list_add_tail(&desc->node, &tmp_list);
-	}
-	spin_lock_bh(&ioat->desc_lock);
-	ioat->desccount = i;
-	list_splice(&tmp_list, &ioat->free_desc);
-	spin_unlock_bh(&ioat->desc_lock);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	chan->completion = pci_pool_alloc(chan->device->completion_pool,
-					  GFP_KERNEL, &chan->completion_dma);
-	memset(chan->completion, 0, sizeof(*chan->completion));
-	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) chan->completion_dma) >> 32,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	set_bit(IOAT_RUN, &chan->state);
-	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
-	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
-		__func__, ioat->desccount);
-	return ioat->desccount;
-}
-
-void ioat_stop(struct ioat_chan_common *chan)
-{
-	struct ioatdma_device *device = chan->device;
-	struct pci_dev *pdev = device->pdev;
-	int chan_id = chan_num(chan);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int chan_id = chan_num(ioat_chan);
 	struct msix_entry *msix;
 
 	/* 1/ stop irq from firing tasklets
 	 * 2/ stop the tasklet from re-arming irqs
 	 */
-	clear_bit(IOAT_RUN, &chan->state);
+	clear_bit(IOAT_RUN, &ioat_chan->state);
 
 	/* flush inflight interrupts */
-	switch (device->irq_mode) {
+	switch (ioat_dma->irq_mode) {
 	case IOAT_MSIX:
-		msix = &device->msix_entries[chan_id];
+		msix = &ioat_dma->msix_entries[chan_id];
 		synchronize_irq(msix->vector);
 		break;
 	case IOAT_MSI:
@@ -385,398 +115,67 @@
 	}
 
 	/* flush inflight timers */
-	del_timer_sync(&chan->timer);
+	del_timer_sync(&ioat_chan->timer);
 
 	/* flush inflight tasklet runs */
-	tasklet_kill(&chan->cleanup_task);
+	tasklet_kill(&ioat_chan->cleanup_task);
 
 	/* final cleanup now that everything is quiesced and can't re-arm */
-	device->cleanup_fn((unsigned long) &chan->common);
+	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
 }
 
-/**
- * ioat1_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *ioatdma_device = chan->device;
-	struct ioat_desc_sw *desc, *_desc;
-	int in_use_descs = 0;
+	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
+	ioat_chan->issued = ioat_chan->head;
+	writew(ioat_chan->dmacount,
+	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+}
 
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (ioat->desccount == 0)
-		return;
+void ioat_issue_pending(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
 
-	ioat_stop(chan);
-
-	/* Delay 100ms after reset to allow internal DMA logic to quiesce
-	 * before removing DMA descriptor resources.
-	 */
-	writeb(IOAT_CHANCMD_RESET,
-	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
-	mdelay(100);
-
-	spin_lock_bh(&ioat->desc_lock);
-	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
-		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
-			__func__, desc_id(desc));
-		dump_desc_dbg(ioat, desc);
-		in_use_descs++;
-		list_del(&desc->node);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->txd.phys);
-		kfree(desc);
+	if (ioat_ring_pending(ioat_chan)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		__ioat_issue_pending(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
 	}
-	list_for_each_entry_safe(desc, _desc,
-				 &ioat->free_desc, node) {
-		list_del(&desc->node);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->txd.phys);
-		kfree(desc);
-	}
-	spin_unlock_bh(&ioat->desc_lock);
-
-	pci_pool_free(ioatdma_device->completion_pool,
-		      chan->completion,
-		      chan->completion_dma);
-
-	/* one is ok since we left it on there on purpose */
-	if (in_use_descs > 1)
-		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
-			in_use_descs - 1);
-
-	chan->last_completion = 0;
-	chan->completion_dma = 0;
-	ioat->pending = 0;
-	ioat->desccount = 0;
 }
 
 /**
- * ioat1_dma_get_next_descriptor - return the next available descriptor
- * @ioat: IOAT DMA channel handle
+ * ioat_update_pending - log pending descriptors
+ * @ioat: ioat+ channel
  *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held.  Allocates more descriptors if the channel
- * has run out.
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark.  Called with prep_lock held
  */
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
+static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_desc_sw *new;
-
-	if (!list_empty(&ioat->free_desc)) {
-		new = to_ioat_desc(ioat->free_desc.next);
-		list_del(&new->node);
-	} else {
-		/* try to get another desc */
-		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
-		if (!new) {
-			dev_err(to_dev(&ioat->base), "alloc failed\n");
-			return NULL;
-		}
-	}
-	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
-		__func__, desc_id(new));
-	prefetch(new->hw);
-	return new;
+	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
+		__ioat_issue_pending(ioat_chan);
 }
 
-static struct dma_async_tx_descriptor *
-ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
-		      dma_addr_t dma_src, size_t len, unsigned long flags)
+static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_desc_sw *desc;
-	size_t copy;
-	LIST_HEAD(chain);
-	dma_addr_t src = dma_src;
-	dma_addr_t dest = dma_dest;
-	size_t total_len = len;
-	struct ioat_dma_descriptor *hw = NULL;
-	int tx_cnt = 0;
-
-	spin_lock_bh(&ioat->desc_lock);
-	desc = ioat1_dma_get_next_descriptor(ioat);
-	do {
-		if (!desc)
-			break;
-
-		tx_cnt++;
-		copy = min_t(size_t, len, ioat->xfercap);
-
-		hw = desc->hw;
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dest;
-
-		list_add_tail(&desc->node, &chain);
-
-		len -= copy;
-		dest += copy;
-		src += copy;
-		if (len) {
-			struct ioat_desc_sw *next;
-
-			async_tx_ack(&desc->txd);
-			next = ioat1_dma_get_next_descriptor(ioat);
-			hw->next = next ? next->txd.phys : 0;
-			dump_desc_dbg(ioat, desc);
-			desc = next;
-		} else
-			hw->next = 0;
-	} while (len);
-
-	if (!desc) {
-		struct ioat_chan_common *chan = &ioat->base;
-
-		dev_err(to_dev(chan),
-			"chan%d - get_next_desc failed\n", chan_num(chan));
-		list_splice(&chain, &ioat->free_desc);
-		spin_unlock_bh(&ioat->desc_lock);
-		return NULL;
-	}
-	spin_unlock_bh(&ioat->desc_lock);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	list_splice(&chain, &desc->tx_list);
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->tx_cnt = tx_cnt;
-	dump_desc_dbg(ioat, desc);
-
-	return &desc->txd;
-}
-
-static void ioat1_cleanup_event(unsigned long data)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat1_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
-{
-	dma_addr_t phys_complete;
-	u64 completion;
-
-	completion = *chan->completion;
-	phys_complete = ioat_chansts_to_addr(completion);
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
-		(unsigned long long) phys_complete);
-
-	if (is_ioat_halted(completion)) {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
-			chanerr);
-
-		/* TODO do something to salvage the situation */
-	}
-
-	return phys_complete;
-}
-
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   dma_addr_t *phys_complete)
-{
-	*phys_complete = ioat_get_current_completion(chan);
-	if (*phys_complete == chan->last_completion)
-		return false;
-	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	return true;
-}
-
-static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct list_head *_desc, *n;
-	struct dma_async_tx_descriptor *tx;
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
-		 __func__, (unsigned long long) phys_complete);
-	list_for_each_safe(_desc, n, &ioat->used_desc) {
-		struct ioat_desc_sw *desc;
-
-		prefetch(n);
-		desc = list_entry(_desc, typeof(*desc), node);
-		tx = &desc->txd;
-		/*
-		 * Incoming DMA requests may use multiple descriptors,
-		 * due to exceeding xfercap, perhaps. If so, only the
-		 * last one will have a cookie, and require unmapping.
-		 */
-		dump_desc_dbg(ioat, desc);
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			ioat->active -= desc->hw->tx_cnt;
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys != phys_complete) {
-			/*
-			 * a completed entry, but not the last, so clean
-			 * up if the client is done with the descriptor
-			 */
-			if (async_tx_test_ack(tx))
-				list_move_tail(&desc->node, &ioat->free_desc);
-		} else {
-			/*
-			 * last used desc. Do not remove, so we can
-			 * append from it.
-			 */
-
-			/* if nothing else is pending, cancel the
-			 * completion timeout
-			 */
-			if (n == &ioat->used_desc) {
-				dev_dbg(to_dev(chan),
-					"%s cancel completion timeout\n",
-					__func__);
-				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-			}
-
-			/* TODO check status bits? */
-			break;
-		}
-	}
-
-	chan->last_completion = phys_complete;
-}
-
-/**
- * ioat1_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- *
- * To prevent lock contention we defer cleanup when the locks are
- * contended with a terminal timeout that forces cleanup and catches
- * completion notification errors.
- */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	prefetch(chan->completion);
-
-	if (!spin_trylock_bh(&chan->cleanup_lock))
-		return;
-
-	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	}
-
-	if (!spin_trylock_bh(&ioat->desc_lock)) {
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	}
-
-	__cleanup(ioat, phys_complete);
-
-	spin_unlock_bh(&ioat->desc_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat1_timer_event(unsigned long data)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
-
-	spin_lock_bh(&chan->cleanup_lock);
-	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
-		struct ioat_desc_sw *desc;
-
-		spin_lock_bh(&ioat->desc_lock);
-
-		/* restart active descriptors */
-		desc = to_ioat_desc(ioat->used_desc.prev);
-		ioat_set_chainaddr(ioat, desc->txd.phys);
-		ioat_start(chan);
-
-		ioat->pending = 0;
-		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		spin_unlock_bh(&ioat->desc_lock);
-	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		dma_addr_t phys_complete;
-
-		spin_lock_bh(&ioat->desc_lock);
-		/* if we haven't made progress and we have already
-		 * acknowledged a pending completion once, then be more
-		 * forceful with a restart
-		 */
-		if (ioat_cleanup_preamble(chan, &phys_complete))
-			__cleanup(ioat, phys_complete);
-		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
-			ioat1_reset_channel(ioat);
-		else {
-			u64 status = ioat_chansts(chan);
-
-			/* manually update the last completion address */
-			if (ioat_chansts_to_addr(status) != 0)
-				*chan->completion = status;
-
-			set_bit(IOAT_COMPLETION_ACK, &chan->state);
-			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		}
-		spin_unlock_bh(&ioat->desc_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-enum dma_status
-ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-		   struct dma_tx_state *txstate)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-	struct ioatdma_device *device = chan->device;
-	enum dma_status ret;
-
-	ret = dma_cookie_status(c, cookie, txstate);
-	if (ret == DMA_COMPLETE)
-		return ret;
-
-	device->cleanup_fn((unsigned long) c);
-
-	return dma_cookie_status(c, cookie, txstate);
-}
-
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *desc;
+	struct ioat_ring_ent *desc;
 	struct ioat_dma_descriptor *hw;
 
-	spin_lock_bh(&ioat->desc_lock);
-
-	desc = ioat1_dma_get_next_descriptor(ioat);
-
-	if (!desc) {
-		dev_err(to_dev(chan),
-			"Unable to start null desc - get next desc failed\n");
-		spin_unlock_bh(&ioat->desc_lock);
+	if (ioat_ring_space(ioat_chan) < 1) {
+		dev_err(to_dev(ioat_chan),
+			"Unable to start null desc - ring full\n");
 		return;
 	}
 
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+
 	hw = desc->hw;
 	hw->ctl = 0;
 	hw->ctl_f.null = 1;
@@ -787,460 +186,804 @@
 	hw->src_addr = 0;
 	hw->dst_addr = 0;
 	async_tx_ack(&desc->txd);
-	hw->next = 0;
-	list_add_tail(&desc->node, &ioat->used_desc);
-	dump_desc_dbg(ioat, desc);
-
-	ioat_set_chainaddr(ioat, desc->txd.phys);
-	ioat_start(chan);
-	spin_unlock_bh(&ioat->desc_lock);
+	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+	dump_desc_dbg(ioat_chan, desc);
+	/* make sure descriptors are written before we submit */
+	wmb();
+	ioat_chan->head += 1;
+	__ioat_issue_pending(ioat_chan);
 }
 
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	__ioat_start_null_desc(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
 }
 
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-int ioat_dma_self_test(struct ioatdma_device *device)
+static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
 {
-	int i;
-	u8 *src;
-	u8 *dest;
-	struct dma_device *dma = &device->common;
-	struct device *dev = &device->pdev->dev;
-	struct dma_chan *dma_chan;
-	struct dma_async_tx_descriptor *tx;
-	dma_addr_t dma_dest, dma_src;
-	dma_cookie_t cookie;
+	/* set the tail to be re-issued */
+	ioat_chan->issued = ioat_chan->tail;
+	ioat_chan->dmacount = 0;
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+
+	if (ioat_ring_pending(ioat_chan)) {
+		struct ioat_ring_ent *desc;
+
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+		__ioat_issue_pending(ioat_chan);
+	} else
+		__ioat_start_null_desc(ioat_chan);
+}
+
+static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
 	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	unsigned long flags;
+	u32 status;
 
-	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!src)
-		return -ENOMEM;
-	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!dest) {
-		kfree(src);
-		return -ENOMEM;
+	status = ioat_chansts(ioat_chan);
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		ioat_suspend(ioat_chan);
+	while (is_ioat_active(status) || is_ioat_idle(status)) {
+		if (tmo && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		status = ioat_chansts(ioat_chan);
+		cpu_relax();
 	}
 
-	/* Fill in src buffer */
-	for (i = 0; i < IOAT_TEST_SIZE; i++)
-		src[i] = (u8)i;
-
-	/* Start copy, using first DMA channel */
-	dma_chan = container_of(dma->channels.next, struct dma_chan,
-				device_node);
-	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
-		dev_err(dev, "selftest cannot allocate chan resource\n");
-		err = -ENODEV;
-		goto out;
-	}
-
-	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-	if (dma_mapping_error(dev, dma_src)) {
-		dev_err(dev, "mapping src buffer failed\n");
-		goto free_resources;
-	}
-	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dma_dest)) {
-		dev_err(dev, "mapping dest buffer failed\n");
-		goto unmap_src;
-	}
-	flags = DMA_PREP_INTERRUPT;
-	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
-						   IOAT_TEST_SIZE, flags);
-	if (!tx) {
-		dev_err(dev, "Self-test prep failed, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test setup failed, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL)
-					!= DMA_COMPLETE) {
-		dev_err(dev, "Self-test copy timed out, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
-		dev_err(dev, "Self-test copy failed compare, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-unmap_dma:
-	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-unmap_src:
-	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-free_resources:
-	dma->device_free_chan_resources(dma_chan);
-out:
-	kfree(src);
-	kfree(dest);
 	return err;
 }
 
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
-		    sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
-		 "set ioat interrupt style: msix (default), msi, intx");
+static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
+	int err = 0;
+
+	ioat_reset(ioat_chan);
+	while (ioat_reset_pending(ioat_chan)) {
+		if (end && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		cpu_relax();
+	}
+
+	return err;
+}
+
+static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+	__releases(&ioat_chan->prep_lock)
+{
+	struct dma_chan *c = tx->chan;
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	dma_cookie_t cookie;
+
+	cookie = dma_cookie_assign(tx);
+	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
+
+	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	/* make descriptor updates visible before advancing ioat->head,
+	 * this is purposefully not smp_wmb() since we are also
+	 * publishing the descriptor updates to a dma device
+	 */
+	wmb();
+
+	ioat_chan->head += ioat_chan->produce;
+
+	ioat_update_pending(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+
+	return cookie;
+}
+
+static struct ioat_ring_ent *
+ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+{
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	struct ioatdma_device *ioat_dma;
+	dma_addr_t phys;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+	if (!hw)
+		return NULL;
+	memset(hw, 0, sizeof(*hw));
+
+	desc = kmem_cache_zalloc(ioat_cache, flags);
+	if (!desc) {
+		pci_pool_free(ioat_dma->dma_pool, hw, phys);
+		return NULL;
+	}
+
+	dma_async_tx_descriptor_init(&desc->txd, chan);
+	desc->txd.tx_submit = ioat_tx_submit_unlock;
+	desc->hw = hw;
+	desc->txd.phys = phys;
+	return desc;
+}
+
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+	struct ioatdma_device *ioat_dma;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+	kmem_cache_free(ioat_cache, desc);
+}
+
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+{
+	struct ioat_ring_ent **ring;
+	int descs = 1 << order;
+	int i;
+
+	if (order > ioat_get_max_alloc_order())
+		return NULL;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(descs, sizeof(*ring), flags);
+	if (!ring)
+		return NULL;
+	for (i = 0; i < descs; i++) {
+		ring[i] = ioat_alloc_ring_ent(c, flags);
+		if (!ring[i]) {
+			while (i--)
+				ioat_free_ring_ent(ring[i], c);
+			kfree(ring);
+			return NULL;
+		}
+		set_desc_id(ring[i], i);
+	}
+
+	/* link descs */
+	for (i = 0; i < descs-1; i++) {
+		struct ioat_ring_ent *next = ring[i+1];
+		struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+		hw->next = next->txd.phys;
+	}
+	ring[i]->hw->next = ring[0]->txd.phys;
+
+	return ring;
+}
+
+static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
+{
+	/* reshape differs from normal ring allocation in that we want
+	 * to allocate a new software ring while only
+	 * extending/truncating the hardware ring
+	 */
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	const u32 curr_size = ioat_ring_size(ioat_chan);
+	const u16 active = ioat_ring_active(ioat_chan);
+	const u32 new_size = 1 << order;
+	struct ioat_ring_ent **ring;
+	u32 i;
+
+	if (order > ioat_get_max_alloc_order())
+		return false;
+
+	/* double check that we have at least 1 free descriptor */
+	if (active == curr_size)
+		return false;
+
+	/* when shrinking, verify that we can hold the current active
+	 * set in the new ring
+	 */
+	if (active >= new_size)
+		return false;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+	if (!ring)
+		return false;
+
+	/* allocate/trim descriptors as needed */
+	if (new_size > curr_size) {
+		/* copy current descriptors to the new ring */
+		for (i = 0; i < curr_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* add new descriptors to the ring */
+		for (i = curr_size; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
+			if (!ring[new_idx]) {
+				while (i--) {
+					u16 new_idx = (ioat_chan->tail+i) &
+						       (new_size-1);
+
+					ioat_free_ring_ent(ring[new_idx], c);
+				}
+				kfree(ring);
+				return false;
+			}
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* hw link new descriptors */
+		for (i = curr_size-1; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+			struct ioat_ring_ent *next =
+				ring[(new_idx+1) & (new_size-1)];
+			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
+
+			hw->next = next->txd.phys;
+		}
+	} else {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_ring_ent *next;
+
+		/* copy current descriptors to the new ring, dropping the
+		 * removed descriptors
+		 */
+		for (i = 0; i < new_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* free deleted descriptors */
+		for (i = new_size; i < curr_size; i++) {
+			struct ioat_ring_ent *ent;
+
+			ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
+			ioat_free_ring_ent(ent, c);
+		}
+
+		/* fix up hardware ring */
+		hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
+		next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
+		hw->next = next->txd.phys;
+	}
+
+	dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
+		__func__, new_size);
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = ring;
+	ioat_chan->alloc_order = order;
+
+	return true;
+}
 
 /**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
+ * ioat_check_space_lock - verify space and grab ring producer lock
+ * @ioat: ioat,3 channel (ring) to operate on
+ * @num_descs: allocation length
  */
-int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
+	__acquires(&ioat_chan->prep_lock)
 {
-	struct ioat_chan_common *chan;
-	struct pci_dev *pdev = device->pdev;
-	struct device *dev = &pdev->dev;
-	struct msix_entry *msix;
-	int i, j, msixcnt;
-	int err = -EINVAL;
-	u8 intrctrl = 0;
+	bool retry;
 
-	if (!strcmp(ioat_interrupt_style, "msix"))
-		goto msix;
-	if (!strcmp(ioat_interrupt_style, "msi"))
-		goto msi;
-	if (!strcmp(ioat_interrupt_style, "intx"))
-		goto intx;
-	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
-	goto err_no_irq;
+ retry:
+	spin_lock_bh(&ioat_chan->prep_lock);
+	/* never allow the last descriptor to be consumed, we need at
+	 * least one free at all times to allow for on-the-fly ring
+	 * resizing.
+	 */
+	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
+		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
+			__func__, num_descs, ioat_chan->head,
+			ioat_chan->tail, ioat_chan->issued);
+		ioat_chan->produce = num_descs;
+		return 0;  /* with ioat->prep_lock held */
+	}
+	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
 
-msix:
-	/* The number of MSI-X vectors should equal the number of channels */
-	msixcnt = device->common.chancnt;
-	for (i = 0; i < msixcnt; i++)
-		device->msix_entries[i].entry = i;
+	/* is another cpu already trying to expand the ring? */
+	if (retry)
+		goto retry;
 
-	err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
-	if (err)
-		goto msi;
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
+	clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
 
-	for (i = 0; i < msixcnt; i++) {
-		msix = &device->msix_entries[i];
-		chan = ioat_chan_by_index(device, i);
-		err = devm_request_irq(dev, msix->vector,
-				       ioat_dma_do_interrupt_msix, 0,
-				       "ioat-msix", chan);
-		if (err) {
-			for (j = 0; j < i; j++) {
-				msix = &device->msix_entries[j];
-				chan = ioat_chan_by_index(device, j);
-				devm_free_irq(dev, msix->vector, chan);
+	/* if we were able to expand the ring retry the allocation */
+	if (retry)
+		goto retry;
+
+	dev_dbg_ratelimited(to_dev(ioat_chan),
+			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+			    __func__, num_descs, ioat_chan->head,
+			    ioat_chan->tail, ioat_chan->issued);
+
+	/* progress reclaim in the allocation failure case we may be
+	 * called under bh_disabled so we need to trigger the timer
+	 * event directly
+	 */
+	if (time_is_before_jiffies(ioat_chan->timer.expires)
+	    && timer_pending(&ioat_chan->timer)) {
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		ioat_timer_event((unsigned long)ioat_chan);
+	}
+
+	return -ENOMEM;
+}
+
+static bool desc_has_ext(struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	if (hw->ctl_f.op == IOAT_OP_XOR ||
+	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
+		struct ioat_xor_descriptor *xor = desc->xor;
+
+		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
+			return true;
+	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
+		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
+			return true;
+	}
+
+	return false;
+}
+
+static void
+ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
+{
+	if (!sed)
+		return;
+
+	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+	kmem_cache_free(ioat_sed_cache, sed);
+}
+
+static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+	u64 completion;
+
+	completion = *ioat_chan->completion;
+	phys_complete = ioat_chansts_to_addr(completion);
+
+	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
+		(unsigned long long) phys_complete);
+
+	return phys_complete;
+}
+
+static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
+				   u64 *phys_complete)
+{
+	*phys_complete = ioat_get_current_completion(ioat_chan);
+	if (*phys_complete == ioat_chan->last_completion)
+		return false;
+
+	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	return true;
+}
+
+static void
+desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+	{
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		/* check if there's error written */
+		if (!pq->dwbes_f.wbes)
+			return;
+
+		/* need to set a chanerr var for checking to clear later */
+
+		if (pq->dwbes_f.p_val_err)
+			*desc->result |= SUM_CHECK_P_RESULT;
+
+		if (pq->dwbes_f.q_val_err)
+			*desc->result |= SUM_CHECK_Q_RESULT;
+
+		return;
+	}
+	default:
+		return;
+	}
+}
+
+/**
+ * __cleanup - reclaim used descriptors
+ * @ioat: channel (ring) to clean
+ */
+static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+{
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	bool seen_current = false;
+	int idx = ioat_chan->tail, i;
+	u16 active;
+
+	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+
+	/*
+	 * At restart of the channel, the completion address and the
+	 * channel status will be 0 due to starting a new chain. Since
+	 * it's new chain and the first descriptor "fails", there is
+	 * nothing to clean up. We do not want to reap the entire submitted
+	 * chain due to this 0 address value and then BUG.
+	 */
+	if (!phys_complete)
+		return;
+
+	active = ioat_ring_active(ioat_chan);
+	for (i = 0; i < active && !seen_current; i++) {
+		struct dma_async_tx_descriptor *tx;
+
+		smp_read_barrier_depends();
+		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		dump_desc_dbg(ioat_chan, desc);
+
+		/* set err stat if we are using dwbes */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			desc_get_errstat(ioat_chan, desc);
+
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
 			}
-			goto msi;
+		}
+
+		if (tx->phys == phys_complete)
+			seen_current = true;
+
+		/* skip extended descriptors */
+		if (desc_has_ext(desc)) {
+			BUG_ON(i + 1 >= active);
+			i++;
+		}
+
+		/* cleanup super extended descriptors */
+		if (desc->sed) {
+			ioat_free_sed(ioat_dma, desc->sed);
+			desc->sed = NULL;
 		}
 	}
-	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
-	device->irq_mode = IOAT_MSIX;
-	goto done;
 
-msi:
-	err = pci_enable_msi(pdev);
-	if (err)
-		goto intx;
+	/* finish all descriptor reads before incrementing tail */
+	smp_mb();
+	ioat_chan->tail = idx + i;
+	/* no active descs have written a completion? */
+	BUG_ON(active && !seen_current);
+	ioat_chan->last_completion = phys_complete;
 
-	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
-			       "ioat-msi", device);
-	if (err) {
+	if (active - i == 0) {
+		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
+			__func__);
+		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+
+	/* 5 microsecond delay per pending descriptor */
+	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
+	       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
+}
+
+static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	if (is_ioat_halted(*ioat_chan->completion)) {
+		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+			ioat_eh(ioat_chan);
+		}
+	}
+
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+void ioat_cleanup_event(unsigned long data)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+
+	ioat_cleanup(ioat_chan);
+	if (!test_bit(IOAT_RUN, &ioat_chan->state))
+		return;
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+
+	ioat_quiesce(ioat_chan, 0);
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	__ioat_restart_chan(ioat_chan);
+}
+
+static void ioat_eh(struct ioatdma_chan *ioat_chan)
+{
+	struct pci_dev *pdev = to_pdev(ioat_chan);
+	struct ioat_dma_descriptor *hw;
+	struct dma_async_tx_descriptor *tx;
+	u64 phys_complete;
+	struct ioat_ring_ent *desc;
+	u32 err_handled = 0;
+	u32 chanerr_int;
+	u32 chanerr;
+
+	/* cleanup so tail points to descriptor that caused the error */
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
+		__func__, chanerr, chanerr_int);
+
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+	hw = desc->hw;
+	dump_desc_dbg(ioat_chan, desc);
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_XOR_VAL:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		break;
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+			*desc->result |= SUM_CHECK_Q_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+		}
+		break;
+	}
+
+	/* fault on unhandled error or spurious halt */
+	if (chanerr ^ err_handled || chanerr == 0) {
+		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
+			__func__, chanerr, err_handled);
+		BUG();
+	} else { /* cleanup the faulty descriptor */
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
+	}
+
+	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+	/* mark faulting descriptor as complete */
+	*ioat_chan->completion = desc->txd.phys;
+
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_restart_channel(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
+static void check_active(struct ioatdma_chan *ioat_chan)
+{
+	if (ioat_ring_active(ioat_chan)) {
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		return;
+	}
+
+	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
+		/* if the ring is idle, empty, and oversized try to step
+		 * down the size
+		 */
+		reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
+
+		/* keep shrinking until we get back to our minimum
+		 * default size
+		 */
+		if (ioat_chan->alloc_order > ioat_get_alloc_order())
+			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+
+}
+
+void ioat_timer_event(unsigned long data)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+	dma_addr_t phys_complete;
+	u64 status;
+
+	status = ioat_chansts(ioat_chan);
+
+	/* when halted due to errors check for channel
+	 * programming errors before advancing the completion state
+	 */
+	if (is_ioat_halted(status)) {
+		u32 chanerr;
+
+		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
+			__func__, chanerr);
+		if (test_bit(IOAT_RUN, &ioat_chan->state))
+			BUG_ON(is_ioat_bug(chanerr));
+		else /* we never got off the ground */
+			return;
+	}
+
+	/* if we haven't made progress and we have already
+	 * acknowledged a pending completion once, then be more
+	 * forceful with a restart
+	 */
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		ioat_restart_channel(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+		spin_unlock_bh(&ioat_chan->cleanup_lock);
+		return;
+	} else {
+		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+	}
+
+
+	if (ioat_ring_active(ioat_chan))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+	else {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		check_active(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+	}
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(c, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	ioat_cleanup(ioat_chan);
+
+	return dma_cookie_status(c, cookie, txstate);
+}
+
+static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
+{
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int irq = pdev->irq, i;
+
+	if (!is_bwd_ioat(pdev))
+		return 0;
+
+	switch (ioat_dma->irq_mode) {
+	case IOAT_MSIX:
+		for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
+			struct msix_entry *msix = &ioat_dma->msix_entries[i];
+			struct ioatdma_chan *ioat_chan;
+
+			ioat_chan = ioat_chan_by_index(ioat_dma, i);
+			devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
+		}
+
+		pci_disable_msix(pdev);
+		break;
+	case IOAT_MSI:
 		pci_disable_msi(pdev);
-		goto intx;
+		/* fall through */
+	case IOAT_INTX:
+		devm_free_irq(&pdev->dev, irq, ioat_dma);
+		break;
+	default:
+		return 0;
 	}
-	device->irq_mode = IOAT_MSI;
-	goto done;
+	ioat_dma->irq_mode = IOAT_NOIRQ;
 
-intx:
-	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
-			       IRQF_SHARED, "ioat-intx", device);
-	if (err)
-		goto err_no_irq;
-
-	device->irq_mode = IOAT_INTX;
-done:
-	if (device->intr_quirk)
-		device->intr_quirk(device);
-	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
-	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	return 0;
-
-err_no_irq:
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	device->irq_mode = IOAT_NOIRQ;
-	dev_err(dev, "no usable interrupts\n");
-	return err;
-}
-EXPORT_SYMBOL(ioat_dma_setup_interrupts);
-
-static void ioat_disable_interrupts(struct ioatdma_device *device)
-{
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+	return ioat_dma_setup_interrupts(ioat_dma);
 }
 
-int ioat_probe(struct ioatdma_device *device)
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
 {
-	int err = -ENODEV;
-	struct dma_device *dma = &device->common;
-	struct pci_dev *pdev = device->pdev;
-	struct device *dev = &pdev->dev;
-
-	/* DMA coherent memory pool for DMA descriptor allocations */
-	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
-					   sizeof(struct ioat_dma_descriptor),
-					   64, 0);
-	if (!device->dma_pool) {
-		err = -ENOMEM;
-		goto err_dma_pool;
-	}
-
-	device->completion_pool = pci_pool_create("completion_pool", pdev,
-						  sizeof(u64), SMP_CACHE_BYTES,
-						  SMP_CACHE_BYTES);
-
-	if (!device->completion_pool) {
-		err = -ENOMEM;
-		goto err_completion_pool;
-	}
-
-	device->enumerate_channels(device);
-
-	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
-	dma->dev = &pdev->dev;
-
-	if (!dma->chancnt) {
-		dev_err(dev, "channel enumeration error\n");
-		goto err_setup_interrupts;
-	}
-
-	err = ioat_dma_setup_interrupts(device);
-	if (err)
-		goto err_setup_interrupts;
-
-	err = device->self_test(device);
-	if (err)
-		goto err_self_test;
-
-	return 0;
-
-err_self_test:
-	ioat_disable_interrupts(device);
-err_setup_interrupts:
-	pci_pool_destroy(device->completion_pool);
-err_completion_pool:
-	pci_pool_destroy(device->dma_pool);
-err_dma_pool:
-	return err;
-}
-
-int ioat_register(struct ioatdma_device *device)
-{
-	int err = dma_async_device_register(&device->common);
-
-	if (err) {
-		ioat_disable_interrupts(device);
-		pci_pool_destroy(device->completion_pool);
-		pci_pool_destroy(device->dma_pool);
-	}
-
-	return err;
-}
-
-/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
-static void ioat1_intr_quirk(struct ioatdma_device *device)
-{
-	struct pci_dev *pdev = device->pdev;
-	u32 dmactrl;
-
-	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
-	if (pdev->msi_enabled)
-		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
-	else
-		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
-	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", ioat->desccount);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", ioat->active);
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static ssize_t cap_show(struct dma_chan *c, char *page)
-{
-	struct dma_device *dma = c->device;
-
-	return sprintf(page, "copy%s%s%s%s%s\n",
-		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
-		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
-		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
-		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
-		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
-
-}
-struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
-
-static ssize_t version_show(struct dma_chan *c, char *page)
-{
-	struct dma_device *dma = c->device;
-	struct ioatdma_device *device = to_ioatdma_device(dma);
-
-	return sprintf(page, "%d.%d\n",
-		       device->version >> 4, device->version & 0xf);
-}
-struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
-
-static struct attribute *ioat1_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-static ssize_t
-ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-	struct ioat_sysfs_entry *entry;
-	struct ioat_chan_common *chan;
-
-	entry = container_of(attr, struct ioat_sysfs_entry, attr);
-	chan = container_of(kobj, struct ioat_chan_common, kobj);
-
-	if (!entry->show)
-		return -EIO;
-	return entry->show(&chan->common, page);
-}
-
-const struct sysfs_ops ioat_sysfs_ops = {
-	.show	= ioat_attr_show,
-};
-
-static struct kobj_type ioat1_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat1_attrs,
-};
-
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
-{
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		struct ioat_chan_common *chan = to_chan_common(c);
-		struct kobject *parent = &c->dev->device.kobj;
-		int err;
-
-		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
-		if (err) {
-			dev_warn(to_dev(chan),
-				 "sysfs init error (%d), continuing...\n", err);
-			kobject_put(&chan->kobj);
-			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
-		}
-	}
-}
-
-void ioat_kobject_del(struct ioatdma_device *device)
-{
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		struct ioat_chan_common *chan = to_chan_common(c);
-
-		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
-			kobject_del(&chan->kobj);
-			kobject_put(&chan->kobj);
-		}
-	}
-}
-
-int ioat1_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	struct dma_device *dma;
+	/* throw away whatever the channel was doing and get it
+	 * initialized, with ioat3 specific workarounds
+	 */
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	u32 chanerr;
+	u16 dev_id;
 	int err;
 
-	device->intr_quirk = ioat1_intr_quirk;
-	device->enumerate_channels = ioat1_enumerate_channels;
-	device->self_test = ioat_dma_self_test;
-	device->timer_fn = ioat1_timer_event;
-	device->cleanup_fn = ioat1_cleanup_event;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
-	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
-	dma->device_tx_status = ioat_dma_tx_status;
+	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
 
-	err = ioat_probe(device);
-	if (err)
-		return err;
-	err = ioat_register(device);
-	if (err)
-		return err;
-	ioat_kobject_add(device, &ioat1_ktype);
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
-	if (dca)
-		device->dca = ioat_dca_init(pdev, device->reg_base);
+	if (ioat_dma->version < IOAT_VER_3_3) {
+		/* clear any pending errors */
+		err = pci_read_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+		if (err) {
+			dev_err(&pdev->dev,
+				"channel error register unreachable\n");
+			return err;
+		}
+		pci_write_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+		 * (workaround for spurious config parity error after restart)
+		 */
+		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+			pci_write_config_dword(pdev,
+					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
+					       0x10);
+		}
+	}
+
+	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
+	if (!err)
+		err = ioat_irq_reinit(ioat_dma);
+
+	if (err)
+		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
 
 	return err;
 }
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
-	struct dma_device *dma = &device->common;
-
-	ioat_disable_interrupts(device);
-
-	ioat_kobject_del(device);
-
-	dma_async_device_unregister(dma);
-
-	pci_pool_destroy(device->dma_pool);
-	pci_pool_destroy(device->completion_pool);
-
-	INIT_LIST_HEAD(&dma->channels);
-}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 30f5c7e..1bc08498 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -18,26 +18,32 @@
 #define IOATDMA_H
 
 #include <linux/dmaengine.h>
-#include "hw.h"
-#include "registers.h"
 #include <linux/init.h>
 #include <linux/dmapool.h>
 #include <linux/cache.h>
 #include <linux/pci_ids.h>
-#include <net/tcp.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include "registers.h"
+#include "hw.h"
 
 #define IOAT_DMA_VERSION  "4.00"
 
-#define IOAT_LOW_COMPLETION_MASK	0xffffffc0
 #define IOAT_DMA_DCA_ANY_CPU		~0
 
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
-#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
-#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
+#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
 
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
+
+/* ioat hardware assumes at least two sources for raid operations */
+#define src_cnt_to_sw(x) ((x) + 2)
+#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
 
 /*
  * workaround for IOAT ver.3.0 null descriptor issue
@@ -57,19 +63,15 @@
  * @pdev: PCI-Express device
  * @reg_base: MMIO register space base address
  * @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
+ * @completion_pool: DMA buffers for completion ops
+ * @sed_hw_pool: DMA super descriptor pools
+ * @dma_dev: embedded struct dma_device
  * @version: version of ioatdma device
  * @msix_entries: irq handlers
  * @idx: per channel data
  * @dca: direct cache access context
- * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
- * @enumerate_channels: hw version specific channel enumeration
- * @reset_hw: hw version specific channel (re)initialization
- * @cleanup_fn: select between the v2 and v3 cleanup routines
- * @timer_fn: select between the v2 and v3 timer watchdog routines
- * @self_test: hardware version specific self test for each supported op type
- *
- * Note: the v3 cleanup routine supports raid operations
+ * @irq_mode: interrupt mode (INTX, MSI, MSIX)
+ * @cap: read DMA capabilities register
  */
 struct ioatdma_device {
 	struct pci_dev *pdev;
@@ -78,28 +80,21 @@
 	struct pci_pool *completion_pool;
 #define MAX_SED_POOLS	5
 	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
-	struct dma_device common;
+	struct dma_device dma_dev;
 	u8 version;
 	struct msix_entry msix_entries[4];
-	struct ioat_chan_common *idx[4];
+	struct ioatdma_chan *idx[4];
 	struct dca_provider *dca;
 	enum ioat_irq_mode irq_mode;
 	u32 cap;
-	void (*intr_quirk)(struct ioatdma_device *device);
-	int (*enumerate_channels)(struct ioatdma_device *device);
-	int (*reset_hw)(struct ioat_chan_common *chan);
-	void (*cleanup_fn)(unsigned long data);
-	void (*timer_fn)(unsigned long data);
-	int (*self_test)(struct ioatdma_device *device);
 };
 
-struct ioat_chan_common {
-	struct dma_chan common;
+struct ioatdma_chan {
+	struct dma_chan dma_chan;
 	void __iomem *reg_base;
 	dma_addr_t last_completion;
 	spinlock_t cleanup_lock;
 	unsigned long state;
-	#define IOAT_COMPLETION_PENDING 0
 	#define IOAT_COMPLETION_ACK 1
 	#define IOAT_RESET_PENDING 2
 	#define IOAT_KOBJ_INIT_FAIL 3
@@ -110,11 +105,32 @@
 	#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
 	#define IDLE_TIMEOUT msecs_to_jiffies(2000)
 	#define RESET_DELAY msecs_to_jiffies(100)
-	struct ioatdma_device *device;
+	struct ioatdma_device *ioat_dma;
 	dma_addr_t completion_dma;
 	u64 *completion;
 	struct tasklet_struct cleanup_task;
 	struct kobject kobj;
+
+/* ioat v2 / v3 channel attributes
+ * @xfercap_log; log2 of channel max transfer length (for fast division)
+ * @head: allocated index
+ * @issued: hardware notification point
+ * @tail: cleanup index
+ * @dmacount: identical to 'head' except for occasionally resetting to zero
+ * @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
+ * @ring: software ring buffer implementation of hardware ring
+ * @prep_lock: serializes descriptor preparation (producers)
+ */
+	size_t xfercap_log;
+	u16 head;
+	u16 issued;
+	u16 tail;
+	u16 dmacount;
+	u16 alloc_order;
+	u16 produce;
+	struct ioat_ring_ent **ring;
+	spinlock_t prep_lock;
 };
 
 struct ioat_sysfs_entry {
@@ -123,28 +139,11 @@
 };
 
 /**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
-	struct ioat_chan_common base;
-
-	size_t xfercap;	/* XFERCAP register value expanded out */
-
-	spinlock_t desc_lock;
-	struct list_head free_desc;
-	struct list_head used_desc;
-
-	int pending;
-	u16 desccount;
-	u16 active;
-};
-
-/**
  * struct ioat_sed_ent - wrapper around super extended hardware descriptor
  * @hw: hardware SED
- * @sed_dma: dma address for the SED
- * @list: list member
+ * @dma: dma address for the SED
  * @parent: point to the dma descriptor that's the parent
+ * @hw_pool: descriptor pool index
  */
 struct ioat_sed_ent {
 	struct ioat_sed_raw_descriptor *hw;
@@ -153,39 +152,57 @@
 	unsigned int hw_pool;
 };
 
-static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
-{
-	return container_of(c, struct ioat_chan_common, common);
-}
-
-static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-
-	return container_of(chan, struct ioat_dma_chan, base);
-}
-
-/* wrapper around hardware descriptor format + additional software fields */
-
 /**
- * struct ioat_desc_sw - wrapper around hardware descriptor
+ * struct ioat_ring_ent - wrapper around hardware descriptor
  * @hw: hardware DMA descriptor (for memcpy)
- * @node: this descriptor will either be on the free list,
- *     or attached to a transaction list (tx_list)
+ * @xor: hardware xor descriptor
+ * @xor_ex: hardware xor extension descriptor
+ * @pq: hardware pq descriptor
+ * @pq_ex: hardware pq extension descriptor
+ * @pqu: hardware pq update descriptor
+ * @raw: hardware raw (un-typed) descriptor
  * @txd: the generic software descriptor for all engines
+ * @len: total transaction length for unmap
+ * @result: asynchronous result of validate operations
  * @id: identifier for debug
+ * @sed: pointer to super extended descriptor sw desc
  */
-struct ioat_desc_sw {
-	struct ioat_dma_descriptor *hw;
-	struct list_head node;
+
+struct ioat_ring_ent {
+	union {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_xor_descriptor *xor;
+		struct ioat_xor_ext_descriptor *xor_ex;
+		struct ioat_pq_descriptor *pq;
+		struct ioat_pq_ext_descriptor *pq_ex;
+		struct ioat_pq_update_descriptor *pqu;
+		struct ioat_raw_descriptor *raw;
+	};
 	size_t len;
-	struct list_head tx_list;
 	struct dma_async_tx_descriptor txd;
+	enum sum_check_flags *result;
 	#ifdef DEBUG
 	int id;
 	#endif
+	struct ioat_sed_ent *sed;
 };
 
+extern const struct sysfs_ops ioat_sysfs_ops;
+extern struct ioat_sysfs_entry ioat_version_attr;
+extern struct ioat_sysfs_entry ioat_cap_attr;
+extern int ioat_pending_level;
+extern int ioat_ring_alloc_order;
+extern struct kobj_type ioat_ktype;
+extern struct kmem_cache *ioat_cache;
+extern int ioat_ring_max_alloc_order;
+extern struct kmem_cache *ioat_sed_cache;
+
+static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
+{
+	return container_of(c, struct ioatdma_chan, dma_chan);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
 #ifdef DEBUG
 #define set_desc_id(desc, i) ((desc)->id = (i))
 #define desc_id(desc) ((desc)->id)
@@ -195,10 +212,10 @@
 #endif
 
 static inline void
-__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
+__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
 		struct dma_async_tx_descriptor *tx, int id)
 {
-	struct device *dev = to_dev(chan);
+	struct device *dev = to_dev(ioat_chan);
 
 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
 		" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
@@ -208,25 +225,25 @@
 }
 
 #define dump_desc_dbg(c, d) \
-	({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
+	({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
 
-static inline struct ioat_chan_common *
-ioat_chan_by_index(struct ioatdma_device *device, int index)
+static inline struct ioatdma_chan *
+ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
 {
-	return device->idx[index];
+	return ioat_dma->idx[index];
 }
 
-static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u64 status;
 	u32 status_lo;
 
 	/* We need to read the low address first as this causes the
 	 * chipset to latch the upper bits for the subsequent read
 	 */
-	status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
-	status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
+	status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
+	status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
 	status <<= 32;
 	status |= status_lo;
 
@@ -235,16 +252,16 @@
 
 #if BITS_PER_LONG == 64
 
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u64 status;
 
 	 /* With IOAT v3.3 the status register is 64bit.  */
 	if (ver >= IOAT_VER_3_3)
-		status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+		status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
 	else
-		status = ioat_chansts_32(chan);
+		status = ioat_chansts_32(ioat_chan);
 
 	return status;
 }
@@ -253,56 +270,41 @@
 #define ioat_chansts ioat_chansts_32
 #endif
 
-static inline void ioat_start(struct ioat_chan_common *chan)
-{
-	u8 ver = chan->device->version;
-
-	writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
-}
-
 static inline u64 ioat_chansts_to_addr(u64 status)
 {
 	return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 }
 
-static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
+static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
 {
-	return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+	return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 }
 
-static inline void ioat_suspend(struct ioat_chan_common *chan)
+static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 
-	writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	writeb(IOAT_CHANCMD_SUSPEND,
+	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 }
 
-static inline void ioat_reset(struct ioat_chan_common *chan)
+static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 
-	writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	writeb(IOAT_CHANCMD_RESET,
+	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 }
 
-static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u8 cmd;
 
-	cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 	return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
 }
 
-static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	writel(addr & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-}
-
 static inline bool is_ioat_active(unsigned long status)
 {
 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
@@ -329,24 +331,111 @@
 	return !!err;
 }
 
-int ioat_probe(struct ioatdma_device *device);
-int ioat_register(struct ioatdma_device *device);
-int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat_dma_self_test(struct ioatdma_device *device);
-void ioat_dma_remove(struct ioatdma_device *device);
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
+{
+	return 1 << ioat_chan->alloc_order;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
+			ioat_ring_size(ioat_chan));
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
+			ioat_ring_size(ioat_chan));
+}
+
+static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
+{
+	return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
+}
+
+static inline u16
+ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
+{
+	u16 num_descs = len >> ioat_chan->xfercap_log;
+
+	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
+	return num_descs;
+}
+
+static inline struct ioat_ring_ent *
+ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
+{
+	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
+}
+
+static inline void
+ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
+{
+	writel(addr & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+	writel(addr >> 32,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
+/* IOAT Prep functions */
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+	       unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+		    unsigned int src_cnt, size_t len,
+		    enum sum_check_flags *result, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+	      unsigned int src_cnt, const unsigned char *scf, size_t len,
+	      unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+		  unsigned int src_cnt, const unsigned char *scf, size_t len,
+		  enum sum_check_flags *pqres, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+		 unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+		     unsigned int src_cnt, size_t len,
+		     enum sum_check_flags *result, unsigned long flags);
+
+/* IOAT Operation functions */
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+		struct dma_tx_state *txstate);
+void ioat_cleanup_event(unsigned long data);
+void ioat_timer_event(unsigned long data);
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
+void ioat_issue_pending(struct dma_chan *chan);
+void ioat_timer_event(unsigned long data);
+
+/* IOAT Init functions */
+bool is_bwd_ioat(struct pci_dev *pdev);
 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
-void ioat_init_channel(struct ioatdma_device *device,
-		       struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-				   struct dma_tx_state *txstate);
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   dma_addr_t *phys_complete);
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
-void ioat_kobject_del(struct ioatdma_device *device);
-int ioat_dma_setup_interrupts(struct ioatdma_device *device);
-void ioat_stop(struct ioat_chan_common *chan);
-extern const struct sysfs_ops ioat_sysfs_ops;
-extern struct ioat_sysfs_entry ioat_version_attr;
-extern struct ioat_sysfs_entry ioat_cap_attr;
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
+void ioat_kobject_del(struct ioatdma_device *ioat_dma);
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
+void ioat_stop(struct ioatdma_chan *ioat_chan);
 #endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
deleted file mode 100644
index 69c7dfc..0000000
--- a/drivers/dma/ioat/dma_v2.c
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
- * does asynchronous data movement and checksumming operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-#include "../dmaengine.h"
-
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
-		 "ioat2+: allocate 2^n descriptors per channel"
-		 " (default: 8 max: 16)");
-static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
-		 "ioat2+: upper limit for ring size (default: 16)");
-
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat->dmacount += ioat2_ring_pending(ioat);
-	ioat->issued = ioat->head;
-	writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-	dev_dbg(to_dev(chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-}
-
-void ioat2_issue_pending(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	if (ioat2_ring_pending(ioat)) {
-		spin_lock_bh(&ioat->prep_lock);
-		__ioat2_issue_pending(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-}
-
-/**
- * ioat2_update_pending - log pending descriptors
- * @ioat: ioat2+ channel
- *
- * Check if the number of unsubmitted descriptors has exceeded the
- * watermark.  Called with prep_lock held
- */
-static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
-{
-	if (ioat2_ring_pending(ioat) > ioat_pending_level)
-		__ioat2_issue_pending(ioat);
-}
-
-static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_ring_space(ioat) < 1) {
-		dev_err(to_dev(&ioat->base),
-			"Unable to start null desc - ring full\n");
-		return;
-	}
-
-	dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-	desc = ioat2_get_ring_ent(ioat, ioat->head);
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.compl_write = 1;
-	/* set size to non-zero value (channel returns error when size is 0) */
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-	async_tx_ack(&desc->txd);
-	ioat2_set_chainaddr(ioat, desc->txd.phys);
-	dump_desc_dbg(ioat, desc);
-	wmb();
-	ioat->head += 1;
-	__ioat2_issue_pending(ioat);
-}
-
-static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
-	spin_lock_bh(&ioat->prep_lock);
-	__ioat2_start_null_desc(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct dma_async_tx_descriptor *tx;
-	struct ioat_ring_ent *desc;
-	bool seen_current = false;
-	u16 active;
-	int idx = ioat->tail, i;
-
-	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-
-	active = ioat2_ring_active(ioat);
-	for (i = 0; i < active && !seen_current; i++) {
-		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		tx = &desc->txd;
-		dump_desc_dbg(ioat, desc);
-		if (tx->cookie) {
-			dma_descriptor_unmap(tx);
-			dma_cookie_complete(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys == phys_complete)
-			seen_current = true;
-	}
-	smp_mb(); /* finish all descriptor reads before incrementing tail */
-	ioat->tail = idx + i;
-	BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-
-	chan->last_completion = phys_complete;
-	if (active - i == 0) {
-		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
-			__func__);
-		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-}
-
-/**
- * ioat2_cleanup - clean finished descriptors (advance tail pointer)
- * @chan: ioat channel to be cleaned up
- */
-static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-void ioat2_cleanup_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat2_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	/* set the tail to be re-issued */
-	ioat->issued = ioat->tail;
-	ioat->dmacount = 0;
-	set_bit(IOAT_COMPLETION_PENDING, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	dev_dbg(to_dev(chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-
-	if (ioat2_ring_pending(ioat)) {
-		struct ioat_ring_ent *desc;
-
-		desc = ioat2_get_ring_ent(ioat, ioat->tail);
-		ioat2_set_chainaddr(ioat, desc->txd.phys);
-		__ioat2_issue_pending(ioat);
-	} else
-		__ioat2_start_null_desc(ioat);
-}
-
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-	u32 status;
-
-	status = ioat_chansts(chan);
-	if (is_ioat_active(status) || is_ioat_idle(status))
-		ioat_suspend(chan);
-	while (is_ioat_active(status) || is_ioat_idle(status)) {
-		if (tmo && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		status = ioat_chansts(chan);
-		cpu_relax();
-	}
-
-	return err;
-}
-
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-
-	ioat_reset(chan);
-	while (ioat_reset_pending(chan)) {
-		if (end && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		cpu_relax();
-	}
-
-	return err;
-}
-
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	ioat2_quiesce(chan, 0);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	__ioat2_restart_chan(ioat);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	if (ioat2_ring_active(ioat)) {
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		return;
-	}
-
-	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	else if (ioat->alloc_order > ioat_get_alloc_order()) {
-		/* if the ring is idle, empty, and oversized try to step
-		 * down the size
-		 */
-		reshape_ring(ioat, ioat->alloc_order - 1);
-
-		/* keep shrinking until we get back to our minimum
-		 * default size
-		 */
-		if (ioat->alloc_order > ioat_get_alloc_order())
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-
-}
-
-void ioat2_timer_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-	u64 status;
-
-	status = ioat_chansts(chan);
-
-	/* when halted due to errors check for channel
-	 * programming errors before advancing the completion state
-	 */
-	if (is_ioat_halted(status)) {
-		u32 chanerr;
-
-		chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
-			__func__, chanerr);
-		if (test_bit(IOAT_RUN, &chan->state))
-			BUG_ON(is_ioat_bug(chanerr));
-		else /* we never got off the ground */
-			return;
-	}
-
-	/* if we haven't made progress and we have already
-	 * acknowledged a pending completion once, then be more
-	 * forceful with a restart
-	 */
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
-		spin_lock_bh(&ioat->prep_lock);
-		ioat2_restart_channel(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	} else {
-		set_bit(IOAT_COMPLETION_ACK, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
-
-
-	if (ioat2_ring_active(ioat))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat->prep_lock);
-		check_active(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static int ioat2_reset_hw(struct ioat_chan_common *chan)
-{
-	/* throw away whatever the channel was doing and get it initialized */
-	u32 chanerr;
-
-	ioat2_quiesce(chan, msecs_to_jiffies(100));
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
-	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
-/**
- * ioat2_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-int ioat2_enumerate_channels(struct ioatdma_device *device)
-{
-	struct ioat2_dma_chan *ioat;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-	u8 xfercap_log;
-	int i;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(device->idx));
-		dma->chancnt = ARRAY_SIZE(device->idx);
-	}
-	xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_log &= 0x1f; /* bits [4:0] valid */
-	if (xfercap_log == 0)
-		return 0;
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
-
-	/* FIXME which i/oat version is i7300? */
-#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
-		dma->chancnt--;
-#endif
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
-		if (!ioat)
-			break;
-
-		ioat_init_channel(device, &ioat->base, i);
-		ioat->xfercap_log = xfercap_log;
-		spin_lock_init(&ioat->prep_lock);
-		if (device->reset_hw(&ioat->base)) {
-			i = 0;
-			break;
-		}
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_cookie_t cookie;
-
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
-	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	/* make descriptor updates visible before advancing ioat->head,
-	 * this is purposefully not smp_wmb() since we are also
-	 * publishing the descriptor updates to a dma device
-	 */
-	wmb();
-
-	ioat->head += ioat->produce;
-
-	ioat2_update_pending(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-
-	return cookie;
-}
-
-static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
-{
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	struct ioatdma_device *dma;
-	dma_addr_t phys;
-
-	dma = to_ioatdma_device(chan->device);
-	hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
-	if (!hw)
-		return NULL;
-	memset(hw, 0, sizeof(*hw));
-
-	desc = kmem_cache_zalloc(ioat2_cache, flags);
-	if (!desc) {
-		pci_pool_free(dma->dma_pool, hw, phys);
-		return NULL;
-	}
-
-	dma_async_tx_descriptor_init(&desc->txd, chan);
-	desc->txd.tx_submit = ioat2_tx_submit_unlock;
-	desc->hw = hw;
-	desc->txd.phys = phys;
-	return desc;
-}
-
-static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
-{
-	struct ioatdma_device *dma;
-
-	dma = to_ioatdma_device(chan->device);
-	pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
-	kmem_cache_free(ioat2_cache, desc);
-}
-
-static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
-{
-	struct ioat_ring_ent **ring;
-	int descs = 1 << order;
-	int i;
-
-	if (order > ioat_get_max_alloc_order())
-		return NULL;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(descs, sizeof(*ring), flags);
-	if (!ring)
-		return NULL;
-	for (i = 0; i < descs; i++) {
-		ring[i] = ioat2_alloc_ring_ent(c, flags);
-		if (!ring[i]) {
-			while (i--)
-				ioat2_free_ring_ent(ring[i], c);
-			kfree(ring);
-			return NULL;
-		}
-		set_desc_id(ring[i], i);
-	}
-
-	/* link descs */
-	for (i = 0; i < descs-1; i++) {
-		struct ioat_ring_ent *next = ring[i+1];
-		struct ioat_dma_descriptor *hw = ring[i]->hw;
-
-		hw->next = next->txd.phys;
-	}
-	ring[i]->hw->next = ring[0]->txd.phys;
-
-	return ring;
-}
-
-void ioat2_free_chan_resources(struct dma_chan *c);
-
-/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
- * @chan: channel to be initialized
- */
-int ioat2_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_ring_ent **ring;
-	u64 status;
-	int order;
-	int i = 0;
-
-	/* have we already been set up? */
-	if (ioat->ring)
-		return 1 << ioat->alloc_order;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	chan->completion = pci_pool_alloc(chan->device->completion_pool,
-					  GFP_KERNEL, &chan->completion_dma);
-	if (!chan->completion)
-		return -ENOMEM;
-
-	memset(chan->completion, 0, sizeof(*chan->completion));
-	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) chan->completion_dma) >> 32,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	order = ioat_get_alloc_order();
-	ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
-	if (!ring)
-		return -ENOMEM;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	ioat->ring = ring;
-	ioat->head = 0;
-	ioat->issued = 0;
-	ioat->tail = 0;
-	ioat->alloc_order = order;
-	set_bit(IOAT_RUN, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	ioat2_start_null_desc(ioat);
-
-	/* check that we got off the ground */
-	do {
-		udelay(1);
-		status = ioat_chansts(chan);
-	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
-
-	if (is_ioat_active(status) || is_ioat_idle(status)) {
-		return 1 << ioat->alloc_order;
-	} else {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		dev_WARN(to_dev(chan),
-			"failed to start channel chanerr: %#x\n", chanerr);
-		ioat2_free_chan_resources(c);
-		return -EFAULT;
-	}
-}
-
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
-{
-	/* reshape differs from normal ring allocation in that we want
-	 * to allocate a new software ring while only
-	 * extending/truncating the hardware ring
-	 */
-	struct ioat_chan_common *chan = &ioat->base;
-	struct dma_chan *c = &chan->common;
-	const u32 curr_size = ioat2_ring_size(ioat);
-	const u16 active = ioat2_ring_active(ioat);
-	const u32 new_size = 1 << order;
-	struct ioat_ring_ent **ring;
-	u16 i;
-
-	if (order > ioat_get_max_alloc_order())
-		return false;
-
-	/* double check that we have at least 1 free descriptor */
-	if (active == curr_size)
-		return false;
-
-	/* when shrinking, verify that we can hold the current active
-	 * set in the new ring
-	 */
-	if (active >= new_size)
-		return false;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
-	if (!ring)
-		return false;
-
-	/* allocate/trim descriptors as needed */
-	if (new_size > curr_size) {
-		/* copy current descriptors to the new ring */
-		for (i = 0; i < curr_size; i++) {
-			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* add new descriptors to the ring */
-		for (i = curr_size; i < new_size; i++) {
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
-			if (!ring[new_idx]) {
-				while (i--) {
-					u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-					ioat2_free_ring_ent(ring[new_idx], c);
-				}
-				kfree(ring);
-				return false;
-			}
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* hw link new descriptors */
-		for (i = curr_size-1; i < new_size; i++) {
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-			struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
-			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
-			hw->next = next->txd.phys;
-		}
-	} else {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_ring_ent *next;
-
-		/* copy current descriptors to the new ring, dropping the
-		 * removed descriptors
-		 */
-		for (i = 0; i < new_size; i++) {
-			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* free deleted descriptors */
-		for (i = new_size; i < curr_size; i++) {
-			struct ioat_ring_ent *ent;
-
-			ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
-			ioat2_free_ring_ent(ent, c);
-		}
-
-		/* fix up hardware ring */
-		hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
-		next = ring[(ioat->tail+new_size) & (new_size-1)];
-		hw->next = next->txd.phys;
-	}
-
-	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
-		__func__, new_size);
-
-	kfree(ioat->ring);
-	ioat->ring = ring;
-	ioat->alloc_order = order;
-
-	return true;
-}
-
-/**
- * ioat2_check_space_lock - verify space and grab ring producer lock
- * @ioat: ioat2,3 channel (ring) to operate on
- * @num_descs: allocation length
- */
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	bool retry;
-
- retry:
-	spin_lock_bh(&ioat->prep_lock);
-	/* never allow the last descriptor to be consumed, we need at
-	 * least one free at all times to allow for on-the-fly ring
-	 * resizing.
-	 */
-	if (likely(ioat2_ring_space(ioat) > num_descs)) {
-		dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-		ioat->produce = num_descs;
-		return 0;  /* with ioat->prep_lock held */
-	}
-	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-
-	/* is another cpu already trying to expand the ring? */
-	if (retry)
-		goto retry;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	retry = reshape_ring(ioat, ioat->alloc_order + 1);
-	clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	/* if we were able to expand the ring retry the allocation */
-	if (retry)
-		goto retry;
-
-	if (printk_ratelimit())
-		dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-
-	/* progress reclaim in the allocation failure case we may be
-	 * called under bh_disabled so we need to trigger the timer
-	 * event directly
-	 */
-	if (time_is_before_jiffies(chan->timer.expires)
-	    && timer_pending(&chan->timer)) {
-		struct ioatdma_device *device = chan->device;
-
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		device->timer_fn((unsigned long) &chan->common);
-	}
-
-	return -ENOMEM;
-}
-
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	dma_addr_t dst = dma_dest;
-	dma_addr_t src = dma_src;
-	size_t total_len = len;
-	int num_descs, idx, i;
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		hw = desc->hw;
-
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dst;
-
-		len -= copy;
-		dst += copy;
-		src += copy;
-		dump_desc_dbg(ioat, desc);
-	} while (++i < num_descs);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	dump_desc_dbg(ioat, desc);
-	/* we leave the channel locked to ensure in order submission */
-
-	return &desc->txd;
-}
-
-/**
- * ioat2_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-void ioat2_free_chan_resources(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	const u16 total_descs = 1 << ioat->alloc_order;
-	int descs;
-	int i;
-
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (!ioat->ring)
-		return;
-
-	ioat_stop(chan);
-	device->reset_hw(chan);
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	descs = ioat2_ring_space(ioat);
-	dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
-	for (i = 0; i < descs; i++) {
-		desc = ioat2_get_ring_ent(ioat, ioat->head + i);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	if (descs < total_descs)
-		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
-			total_descs - descs);
-
-	for (i = 0; i < total_descs - descs; i++) {
-		desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
-		dump_desc_dbg(ioat, desc);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	kfree(ioat->ring);
-	ioat->ring = NULL;
-	ioat->alloc_order = 0;
-	pci_pool_free(device->completion_pool, chan->completion,
-		      chan->completion_dma);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	chan->last_completion = 0;
-	chan->completion_dma = 0;
-	ioat->dmacount = 0;
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	/* ...taken outside the lock, no need to be precise */
-	return sprintf(page, "%d\n", ioat2_ring_active(ioat));
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static struct attribute *ioat2_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-struct kobj_type ioat2_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat2_attrs,
-};
-
-int ioat2_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	int err;
-
-	device->enumerate_channels = ioat2_enumerate_channels;
-	device->reset_hw = ioat2_reset_hw;
-	device->cleanup_fn = ioat2_cleanup_event;
-	device->timer_fn = ioat2_timer_event;
-	device->self_test = ioat_dma_self_test;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
-	dma->device_tx_status = ioat_dma_tx_status;
-
-	err = ioat_probe(device);
-	if (err)
-		return err;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		chan = to_chan_common(c);
-		writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
-		       chan->reg_base + IOAT_DCACTRL_OFFSET);
-	}
-
-	err = ioat_register(device);
-	if (err)
-		return err;
-
-	ioat_kobject_add(device, &ioat2_ktype);
-
-	if (dca)
-		device->dca = ioat2_dca_init(pdev, device->reg_base);
-
-	return err;
-}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
deleted file mode 100644
index bf24ebe..0000000
--- a/drivers/dma/ioat/dma_v2.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_V2_H
-#define IOATDMA_V2_H
-
-#include <linux/dmaengine.h>
-#include <linux/circ_buf.h>
-#include "dma.h"
-#include "hw.h"
-
-
-extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
-	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
-	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
-
-/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
- * @base: common ioat channel parameters
- * @xfercap_log; log2 of channel max transfer length (for fast division)
- * @head: allocated index
- * @issued: hardware notification point
- * @tail: cleanup index
- * @dmacount: identical to 'head' except for occasionally resetting to zero
- * @alloc_order: log2 of the number of allocated descriptors
- * @produce: number of descriptors to produce at submit time
- * @ring: software ring buffer implementation of hardware ring
- * @prep_lock: serializes descriptor preparation (producers)
- */
-struct ioat2_dma_chan {
-	struct ioat_chan_common base;
-	size_t xfercap_log;
-	u16 head;
-	u16 issued;
-	u16 tail;
-	u16 dmacount;
-	u16 alloc_order;
-	u16 produce;
-	struct ioat_ring_ent **ring;
-	spinlock_t prep_lock;
-};
-
-static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-
-	return container_of(chan, struct ioat2_dma_chan, base);
-}
-
-static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
-{
-	return 1 << ioat->alloc_order;
-}
-
-/* count of descriptors in flight with the engine */
-static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
-{
-	return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
-}
-
-/* count of descriptors pending submission to hardware */
-static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
-{
-	return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
-}
-
-static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
-{
-	return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
-}
-
-static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
-{
-	u16 num_descs = len >> ioat->xfercap_log;
-
-	num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
-	return num_descs;
-}
-
-/**
- * struct ioat_ring_ent - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor (for memcpy)
- * @fill: hardware fill descriptor
- * @xor: hardware xor descriptor
- * @xor_ex: hardware xor extension descriptor
- * @pq: hardware pq descriptor
- * @pq_ex: hardware pq extension descriptor
- * @pqu: hardware pq update descriptor
- * @raw: hardware raw (un-typed) descriptor
- * @txd: the generic software descriptor for all engines
- * @len: total transaction length for unmap
- * @result: asynchronous result of validate operations
- * @id: identifier for debug
- */
-
-struct ioat_ring_ent {
-	union {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_xor_descriptor *xor;
-		struct ioat_xor_ext_descriptor *xor_ex;
-		struct ioat_pq_descriptor *pq;
-		struct ioat_pq_ext_descriptor *pq_ex;
-		struct ioat_pq_update_descriptor *pqu;
-		struct ioat_raw_descriptor *raw;
-	};
-	size_t len;
-	struct dma_async_tx_descriptor txd;
-	enum sum_check_flags *result;
-	#ifdef DEBUG
-	int id;
-	#endif
-	struct ioat_sed_ent *sed;
-};
-
-static inline struct ioat_ring_ent *
-ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
-{
-	return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
-}
-
-static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	writel(addr & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-}
-
-int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
-int ioat2_enumerate_channels(struct ioatdma_device *device);
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags);
-void ioat2_issue_pending(struct dma_chan *chan);
-int ioat2_alloc_chan_resources(struct dma_chan *c);
-void ioat2_free_chan_resources(struct dma_chan *c);
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_event(unsigned long data);
-void ioat2_timer_event(unsigned long data);
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
-extern struct kobj_type ioat2_ktype;
-extern struct kmem_cache *ioat2_cache;
-#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
deleted file mode 100644
index 64790a4..0000000
--- a/drivers/dma/ioat/dma_v3.c
+++ /dev/null
@@ -1,1717 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * BSD LICENSE
- *
- * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *   * Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *   * Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- *   * Neither the name of Intel Corporation nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Support routines for v3+ hardware
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-#include "../dmaengine.h"
-#include "registers.h"
-#include "hw.h"
-#include "dma.h"
-#include "dma_v2.h"
-
-extern struct kmem_cache *ioat3_sed_cache;
-
-/* ioat hardware assumes at least two sources for raid operations */
-#define src_cnt_to_sw(x) ((x) + 2)
-#define src_cnt_to_hw(x) ((x) - 2)
-#define ndest_to_sw(x) ((x) + 1)
-#define ndest_to_hw(x) ((x) - 1)
-#define src16_cnt_to_sw(x) ((x) + 9)
-#define src16_cnt_to_hw(x) ((x) - 9)
-
-/* provide a lookup table for setting the source address in the base or
- * extended descriptor of an xor or pq descriptor
- */
-static const u8 xor_idx_to_desc = 0xe0;
-static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc = 0xf8;
-static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
-				       2, 2, 2, 2, 2, 2, 2 };
-static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
-static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
-					0, 1, 2, 3, 4, 5, 6 };
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat);
-
-static void xor_set_src(struct ioat_raw_descriptor *descs[2],
-			dma_addr_t addr, u32 offset, int idx)
-{
-	struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
-
-	raw->field[xor_idx_to_field[idx]] = addr + offset;
-}
-
-static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
-{
-	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
-	return raw->field[pq_idx_to_field[idx]];
-}
-
-static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
-{
-	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
-	return raw->field[pq16_idx_to_field[idx]];
-}
-
-static void pq_set_src(struct ioat_raw_descriptor *descs[2],
-		       dma_addr_t addr, u32 offset, u8 coef, int idx)
-{
-	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
-	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
-	raw->field[pq_idx_to_field[idx]] = addr + offset;
-	pq->coef[idx] = coef;
-}
-
-static bool is_jf_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static bool is_hsw_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static bool is_xeon_cb32(struct pci_dev *pdev)
-{
-	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
-		is_hsw_ioat(pdev);
-}
-
-static bool is_bwd_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
-	/* even though not Atom, BDX-DE has same DMA silicon */
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_bwd_noraid(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
-			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
-{
-	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
-	struct ioat_pq16a_descriptor *pq16 =
-		(struct ioat_pq16a_descriptor *)desc[1];
-	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
-	raw->field[pq16_idx_to_field[idx]] = addr + offset;
-
-	if (idx < 8)
-		pq->coef[idx] = coef;
-	else
-		pq16->coef[idx - 8] = coef;
-}
-
-static struct ioat_sed_ent *
-ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
-{
-	struct ioat_sed_ent *sed;
-	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
-
-	sed = kmem_cache_alloc(ioat3_sed_cache, flags);
-	if (!sed)
-		return NULL;
-
-	sed->hw_pool = hw_pool;
-	sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
-				 flags, &sed->dma);
-	if (!sed->hw) {
-		kmem_cache_free(ioat3_sed_cache, sed);
-		return NULL;
-	}
-
-	return sed;
-}
-
-static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
-{
-	if (!sed)
-		return;
-
-	dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
-	kmem_cache_free(ioat3_sed_cache, sed);
-}
-
-static bool desc_has_ext(struct ioat_ring_ent *desc)
-{
-	struct ioat_dma_descriptor *hw = desc->hw;
-
-	if (hw->ctl_f.op == IOAT_OP_XOR ||
-	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
-		struct ioat_xor_descriptor *xor = desc->xor;
-
-		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
-			return true;
-	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
-		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
-		struct ioat_pq_descriptor *pq = desc->pq;
-
-		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
-			return true;
-	}
-
-	return false;
-}
-
-static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
-{
-	u64 phys_complete;
-	u64 completion;
-
-	completion = *chan->completion;
-	phys_complete = ioat_chansts_to_addr(completion);
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
-		(unsigned long long) phys_complete);
-
-	return phys_complete;
-}
-
-static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
-				   u64 *phys_complete)
-{
-	*phys_complete = ioat3_get_current_completion(chan);
-	if (*phys_complete == chan->last_completion)
-		return false;
-
-	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	return true;
-}
-
-static void
-desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
-{
-	struct ioat_dma_descriptor *hw = desc->hw;
-
-	switch (hw->ctl_f.op) {
-	case IOAT_OP_PQ_VAL:
-	case IOAT_OP_PQ_VAL_16S:
-	{
-		struct ioat_pq_descriptor *pq = desc->pq;
-
-		/* check if there's error written */
-		if (!pq->dwbes_f.wbes)
-			return;
-
-		/* need to set a chanerr var for checking to clear later */
-
-		if (pq->dwbes_f.p_val_err)
-			*desc->result |= SUM_CHECK_P_RESULT;
-
-		if (pq->dwbes_f.q_val_err)
-			*desc->result |= SUM_CHECK_Q_RESULT;
-
-		return;
-	}
-	default:
-		return;
-	}
-}
-
-/**
- * __cleanup - reclaim used descriptors
- * @ioat: channel (ring) to clean
- *
- * The difference from the dma_v2.c __cleanup() is that this routine
- * handles extended descriptors and dma-unmapping raid operations.
- */
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	bool seen_current = false;
-	int idx = ioat->tail, i;
-	u16 active;
-
-	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-
-	/*
-	 * At restart of the channel, the completion address and the
-	 * channel status will be 0 due to starting a new chain. Since
-	 * it's new chain and the first descriptor "fails", there is
-	 * nothing to clean up. We do not want to reap the entire submitted
-	 * chain due to this 0 address value and then BUG.
-	 */
-	if (!phys_complete)
-		return;
-
-	active = ioat2_ring_active(ioat);
-	for (i = 0; i < active && !seen_current; i++) {
-		struct dma_async_tx_descriptor *tx;
-
-		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		dump_desc_dbg(ioat, desc);
-
-		/* set err stat if we are using dwbes */
-		if (device->cap & IOAT_CAP_DWBES)
-			desc_get_errstat(ioat, desc);
-
-		tx = &desc->txd;
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys == phys_complete)
-			seen_current = true;
-
-		/* skip extended descriptors */
-		if (desc_has_ext(desc)) {
-			BUG_ON(i + 1 >= active);
-			i++;
-		}
-
-		/* cleanup super extended descriptors */
-		if (desc->sed) {
-			ioat3_free_sed(device, desc->sed);
-			desc->sed = NULL;
-		}
-	}
-	smp_mb(); /* finish all descriptor reads before incrementing tail */
-	ioat->tail = idx + i;
-	BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-	chan->last_completion = phys_complete;
-
-	if (active - i == 0) {
-		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
-			__func__);
-		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-	/* 5 microsecond delay per pending descriptor */
-	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
-	       chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
-}
-
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	u64 phys_complete;
-
-	spin_lock_bh(&chan->cleanup_lock);
-
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	if (is_ioat_halted(*chan->completion)) {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-			ioat3_eh(ioat);
-		}
-	}
-
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat3_cleanup_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat3_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	u64 phys_complete;
-
-	ioat2_quiesce(chan, 0);
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	__ioat2_restart_chan(ioat);
-}
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct pci_dev *pdev = to_pdev(chan);
-	struct ioat_dma_descriptor *hw;
-	struct dma_async_tx_descriptor *tx;
-	u64 phys_complete;
-	struct ioat_ring_ent *desc;
-	u32 err_handled = 0;
-	u32 chanerr_int;
-	u32 chanerr;
-
-	/* cleanup so tail points to descriptor that caused the error */
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
-
-	dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
-		__func__, chanerr, chanerr_int);
-
-	desc = ioat2_get_ring_ent(ioat, ioat->tail);
-	hw = desc->hw;
-	dump_desc_dbg(ioat, desc);
-
-	switch (hw->ctl_f.op) {
-	case IOAT_OP_XOR_VAL:
-		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
-			*desc->result |= SUM_CHECK_P_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
-		}
-		break;
-	case IOAT_OP_PQ_VAL:
-	case IOAT_OP_PQ_VAL_16S:
-		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
-			*desc->result |= SUM_CHECK_P_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
-		}
-		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
-			*desc->result |= SUM_CHECK_Q_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
-		}
-		break;
-	}
-
-	/* fault on unhandled error or spurious halt */
-	if (chanerr ^ err_handled || chanerr == 0) {
-		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
-			__func__, chanerr, err_handled);
-		BUG();
-	} else { /* cleanup the faulty descriptor */
-		tx = &desc->txd;
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-	}
-
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
-
-	/* mark faulting descriptor as complete */
-	*chan->completion = desc->txd.phys;
-
-	spin_lock_bh(&ioat->prep_lock);
-	ioat3_restart_channel(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	if (ioat2_ring_active(ioat)) {
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		return;
-	}
-
-	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	else if (ioat->alloc_order > ioat_get_alloc_order()) {
-		/* if the ring is idle, empty, and oversized try to step
-		 * down the size
-		 */
-		reshape_ring(ioat, ioat->alloc_order - 1);
-
-		/* keep shrinking until we get back to our minimum
-		 * default size
-		 */
-		if (ioat->alloc_order > ioat_get_alloc_order())
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-
-}
-
-static void ioat3_timer_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-	u64 status;
-
-	status = ioat_chansts(chan);
-
-	/* when halted due to errors check for channel
-	 * programming errors before advancing the completion state
-	 */
-	if (is_ioat_halted(status)) {
-		u32 chanerr;
-
-		chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
-			__func__, chanerr);
-		if (test_bit(IOAT_RUN, &chan->state))
-			BUG_ON(is_ioat_bug(chanerr));
-		else /* we never got off the ground */
-			return;
-	}
-
-	/* if we haven't made progress and we have already
-	 * acknowledged a pending completion once, then be more
-	 * forceful with a restart
-	 */
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
-		spin_lock_bh(&ioat->prep_lock);
-		ioat3_restart_channel(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	} else {
-		set_bit(IOAT_COMPLETION_ACK, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
-
-
-	if (ioat2_ring_active(ioat))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat->prep_lock);
-		check_active(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static enum dma_status
-ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-		struct dma_tx_state *txstate)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	enum dma_status ret;
-
-	ret = dma_cookie_status(c, cookie, txstate);
-	if (ret == DMA_COMPLETE)
-		return ret;
-
-	ioat3_cleanup(ioat);
-
-	return dma_cookie_status(c, cookie, txstate);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
-		      dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
-		      size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_ring_ent *compl_desc;
-	struct ioat_ring_ent *desc;
-	struct ioat_ring_ent *ext;
-	size_t total_len = len;
-	struct ioat_xor_descriptor *xor;
-	struct ioat_xor_ext_descriptor *xor_ex = NULL;
-	struct ioat_dma_descriptor *hw;
-	int num_descs, with_ext, idx, i;
-	u32 offset = 0;
-	u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
-
-	BUG_ON(src_cnt < 2);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	/* we need 2x the number of descriptors to cover greater than 5
-	 * sources
-	 */
-	if (src_cnt > 5) {
-		with_ext = 1;
-		num_descs *= 2;
-	} else
-		with_ext = 0;
-
-	/* completion writes from the raid engine may pass completion
-	 * writes from the legacy engine, so we need one extra null
-	 * (legacy) descriptor to ensure all completion writes arrive in
-	 * order.
-	 */
-	if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		struct ioat_raw_descriptor *descs[2];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-		int s;
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		xor = desc->xor;
-
-		/* save a branch by unconditionally retrieving the
-		 * extended descriptor xor_set_src() knows to not write
-		 * to it in the single descriptor case
-		 */
-		ext = ioat2_get_ring_ent(ioat, idx + i + 1);
-		xor_ex = ext->xor_ex;
-
-		descs[0] = (struct ioat_raw_descriptor *) xor;
-		descs[1] = (struct ioat_raw_descriptor *) xor_ex;
-		for (s = 0; s < src_cnt; s++)
-			xor_set_src(descs, src[s], offset, s);
-		xor->size = xfer_size;
-		xor->dst_addr = dest + offset;
-		xor->ctl = 0;
-		xor->ctl_f.op = op;
-		xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
-
-		len -= xfer_size;
-		offset += xfer_size;
-		dump_desc_dbg(ioat, desc);
-	} while ((i += 1 + with_ext) < num_descs);
-
-	/* last xor descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
-	/* completion descriptor carries interrupt bit */
-	compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-	hw = compl_desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	dump_desc_dbg(ioat, compl_desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
-	       unsigned int src_cnt, size_t len, unsigned long flags)
-{
-	return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
-		    unsigned int src_cnt, size_t len,
-		    enum sum_check_flags *result, unsigned long flags)
-{
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*result = 0;
-
-	return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
-				     src_cnt - 1, len, flags);
-}
-
-static void
-dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
-{
-	struct device *dev = to_dev(&ioat->base);
-	struct ioat_pq_descriptor *pq = desc->pq;
-	struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
-	struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
-	int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
-	int i;
-
-	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
-		" src_cnt: %d)\n",
-		desc_id(desc), (unsigned long long) desc->txd.phys,
-		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
-		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
-		pq->ctl_f.compl_write,
-		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
-		pq->ctl_f.src_cnt);
-	for (i = 0; i < src_cnt; i++)
-		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
-			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
-	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
-	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
-}
-
-static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
-			       struct ioat_ring_ent *desc)
-{
-	struct device *dev = to_dev(&ioat->base);
-	struct ioat_pq_descriptor *pq = desc->pq;
-	struct ioat_raw_descriptor *descs[] = { (void *)pq,
-						(void *)pq,
-						(void *)pq };
-	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
-	int i;
-
-	if (desc->sed) {
-		descs[1] = (void *)desc->sed->hw;
-		descs[2] = (void *)desc->sed->hw + 64;
-	}
-
-	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
-		" src_cnt: %d)\n",
-		desc_id(desc), (unsigned long long) desc->txd.phys,
-		(unsigned long long) pq->next,
-		desc->txd.flags, pq->size, pq->ctl,
-		pq->ctl_f.op, pq->ctl_f.int_en,
-		pq->ctl_f.compl_write,
-		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
-		pq->ctl_f.src_cnt);
-	for (i = 0; i < src_cnt; i++) {
-		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
-			(unsigned long long) pq16_get_src(descs, i),
-			pq->coef[i]);
-	}
-	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
-	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
-		     const dma_addr_t *dst, const dma_addr_t *src,
-		     unsigned int src_cnt, const unsigned char *scf,
-		     size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *compl_desc;
-	struct ioat_ring_ent *desc;
-	struct ioat_ring_ent *ext;
-	size_t total_len = len;
-	struct ioat_pq_descriptor *pq;
-	struct ioat_pq_ext_descriptor *pq_ex = NULL;
-	struct ioat_dma_descriptor *hw;
-	u32 offset = 0;
-	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
-	int i, s, idx, with_ext, num_descs;
-	int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
-
-	dev_dbg(to_dev(chan), "%s\n", __func__);
-	/* the engine requires at least two sources (we provide
-	 * at least 1 implied source in the DMA_PREP_CONTINUE case)
-	 */
-	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	/* we need 2x the number of descriptors to cover greater than 3
-	 * sources (we need 1 extra source in the q-only continuation
-	 * case and 3 extra sources in the p+q continuation case.
-	 */
-	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
-	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
-		with_ext = 1;
-		num_descs *= 2;
-	} else
-		with_ext = 0;
-
-	/* completion writes from the raid engine may pass completion
-	 * writes from the legacy engine, so we need one extra null
-	 * (legacy) descriptor to ensure all completion writes arrive in
-	 * order.
-	 */
-	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		struct ioat_raw_descriptor *descs[2];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		pq = desc->pq;
-
-		/* save a branch by unconditionally retrieving the
-		 * extended descriptor pq_set_src() knows to not write
-		 * to it in the single descriptor case
-		 */
-		ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
-		pq_ex = ext->pq_ex;
-
-		descs[0] = (struct ioat_raw_descriptor *) pq;
-		descs[1] = (struct ioat_raw_descriptor *) pq_ex;
-
-		for (s = 0; s < src_cnt; s++)
-			pq_set_src(descs, src[s], offset, scf[s], s);
-
-		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
-		if (dmaf_p_disabled_continue(flags))
-			pq_set_src(descs, dst[1], offset, 1, s++);
-		else if (dmaf_continue(flags)) {
-			pq_set_src(descs, dst[0], offset, 0, s++);
-			pq_set_src(descs, dst[1], offset, 1, s++);
-			pq_set_src(descs, dst[1], offset, 0, s++);
-		}
-		pq->size = xfer_size;
-		pq->p_addr = dst[0] + offset;
-		pq->q_addr = dst[1] + offset;
-		pq->ctl = 0;
-		pq->ctl_f.op = op;
-		/* we turn on descriptor write back error status */
-		if (device->cap & IOAT_CAP_DWBES)
-			pq->ctl_f.wb_en = result ? 1 : 0;
-		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
-		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
-		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
-		len -= xfer_size;
-		offset += xfer_size;
-	} while ((i += 1 + with_ext) < num_descs);
-
-	/* last pq descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	dump_pq_desc_dbg(ioat, desc, ext);
-
-	if (!cb32) {
-		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-		pq->ctl_f.compl_write = 1;
-		compl_desc = desc;
-	} else {
-		/* completion descriptor carries interrupt bit */
-		compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-		hw = compl_desc->hw;
-		hw->ctl = 0;
-		hw->ctl_f.null = 1;
-		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-		hw->ctl_f.compl_write = 1;
-		hw->size = NULL_DESC_BUFFER_SIZE;
-		dump_desc_dbg(ioat, compl_desc);
-	}
-
-
-	/* we leave the channel locked to ensure in order submission */
-	return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
-		       const dma_addr_t *dst, const dma_addr_t *src,
-		       unsigned int src_cnt, const unsigned char *scf,
-		       size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	size_t total_len = len;
-	struct ioat_pq_descriptor *pq;
-	u32 offset = 0;
-	u8 op;
-	int i, s, idx, num_descs;
-
-	/* this function is only called with 9-16 sources */
-	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
-
-	dev_dbg(to_dev(chan), "%s\n", __func__);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-
-	/*
-	 * 16 source pq is only available on cb3.3 and has no completion
-	 * write hw bug.
-	 */
-	if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-
-	i = 0;
-
-	do {
-		struct ioat_raw_descriptor *descs[4];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		pq = desc->pq;
-
-		descs[0] = (struct ioat_raw_descriptor *) pq;
-
-		desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
-		if (!desc->sed) {
-			dev_err(to_dev(chan),
-				"%s: no free sed entries\n", __func__);
-			return NULL;
-		}
-
-		pq->sed_addr = desc->sed->dma;
-		desc->sed->parent = desc;
-
-		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
-		descs[2] = (void *)descs[1] + 64;
-
-		for (s = 0; s < src_cnt; s++)
-			pq16_set_src(descs, src[s], offset, scf[s], s);
-
-		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
-		if (dmaf_p_disabled_continue(flags))
-			pq16_set_src(descs, dst[1], offset, 1, s++);
-		else if (dmaf_continue(flags)) {
-			pq16_set_src(descs, dst[0], offset, 0, s++);
-			pq16_set_src(descs, dst[1], offset, 1, s++);
-			pq16_set_src(descs, dst[1], offset, 0, s++);
-		}
-
-		pq->size = xfer_size;
-		pq->p_addr = dst[0] + offset;
-		pq->q_addr = dst[1] + offset;
-		pq->ctl = 0;
-		pq->ctl_f.op = op;
-		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
-		/* we turn on descriptor write back error status */
-		if (device->cap & IOAT_CAP_DWBES)
-			pq->ctl_f.wb_en = result ? 1 : 0;
-		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
-		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
-		len -= xfer_size;
-		offset += xfer_size;
-	} while (++i < num_descs);
-
-	/* last pq descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
-	/* with cb3.3 we should be able to do completion w/o a null desc */
-	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	pq->ctl_f.compl_write = 1;
-
-	dump_pq16_desc_dbg(ioat, desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &desc->txd;
-}
-
-static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
-{
-	if (dmaf_p_disabled_continue(flags))
-		return src_cnt + 1;
-	else if (dmaf_continue(flags))
-		return src_cnt + 3;
-	else
-		return src_cnt;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
-	      unsigned int src_cnt, const unsigned char *scf, size_t len,
-	      unsigned long flags)
-{
-	/* specify valid address for disabled result */
-	if (flags & DMA_PREP_PQ_DISABLE_P)
-		dst[0] = dst[1];
-	if (flags & DMA_PREP_PQ_DISABLE_Q)
-		dst[1] = dst[0];
-
-	/* handle the single source multiply case from the raid6
-	 * recovery path
-	 */
-	if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
-		dma_addr_t single_source[2];
-		unsigned char single_source_coef[2];
-
-		BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
-		single_source[0] = src[0];
-		single_source[1] = src[0];
-		single_source_coef[0] = scf[0];
-		single_source_coef[1] = 0;
-
-		return src_cnt_flags(src_cnt, flags) > 8 ?
-			__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
-					       2, single_source_coef, len,
-					       flags) :
-			__ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
-					     single_source_coef, len, flags);
-
-	} else {
-		return src_cnt_flags(src_cnt, flags) > 8 ?
-			__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
-					       scf, len, flags) :
-			__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
-					     scf, len, flags);
-	}
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
-		  unsigned int src_cnt, const unsigned char *scf, size_t len,
-		  enum sum_check_flags *pqres, unsigned long flags)
-{
-	/* specify valid address for disabled result */
-	if (flags & DMA_PREP_PQ_DISABLE_P)
-		pq[0] = pq[1];
-	if (flags & DMA_PREP_PQ_DISABLE_Q)
-		pq[1] = pq[0];
-
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*pqres = 0;
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				       flags) :
-		__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				     flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
-		 unsigned int src_cnt, size_t len, unsigned long flags)
-{
-	unsigned char scf[src_cnt];
-	dma_addr_t pq[2];
-
-	memset(scf, 0, src_cnt);
-	pq[0] = dst;
-	flags |= DMA_PREP_PQ_DISABLE_Q;
-	pq[1] = dst; /* specify valid address for disabled result */
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				       flags) :
-		__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				     flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
-		     unsigned int src_cnt, size_t len,
-		     enum sum_check_flags *result, unsigned long flags)
-{
-	unsigned char scf[src_cnt];
-	dma_addr_t pq[2];
-
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*result = 0;
-
-	memset(scf, 0, src_cnt);
-	pq[0] = src[0];
-	flags |= DMA_PREP_PQ_DISABLE_Q;
-	pq[1] = pq[0]; /* specify valid address for disabled result */
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
-				       scf, len, flags) :
-		__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
-				     scf, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_check_space_lock(ioat, 1) == 0)
-		desc = ioat2_get_ring_ent(ioat, ioat->head);
-	else
-		return NULL;
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-
-	desc->txd.flags = flags;
-	desc->len = 1;
-
-	dump_desc_dbg(ioat, desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &desc->txd;
-}
-
-static void ioat3_dma_test_callback(void *dma_async_param)
-{
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
-}
-
-#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
-static int ioat_xor_val_self_test(struct ioatdma_device *device)
-{
-	int i, src_idx;
-	struct page *dest;
-	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
-	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
-	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
-	dma_addr_t dest_dma;
-	struct dma_async_tx_descriptor *tx;
-	struct dma_chan *dma_chan;
-	dma_cookie_t cookie;
-	u8 cmp_byte = 0;
-	u32 cmp_word;
-	u32 xor_val_result;
-	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-	u8 op = 0;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
-		return 0;
-
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
-		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
-		if (!xor_srcs[src_idx]) {
-			while (src_idx--)
-				__free_page(xor_srcs[src_idx]);
-			return -ENOMEM;
-		}
-	}
-
-	dest = alloc_page(GFP_KERNEL);
-	if (!dest) {
-		while (src_idx--)
-			__free_page(xor_srcs[src_idx]);
-		return -ENOMEM;
-	}
-
-	/* Fill in src buffers */
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
-		u8 *ptr = page_address(xor_srcs[src_idx]);
-		for (i = 0; i < PAGE_SIZE; i++)
-			ptr[i] = (1 << src_idx);
-	}
-
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
-		cmp_byte ^= (u8) (1 << src_idx);
-
-	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
-			(cmp_byte << 8) | cmp_byte;
-
-	memset(page_address(dest), 0, PAGE_SIZE);
-
-	dma_chan = container_of(dma->channels.next, struct dma_chan,
-				device_node);
-	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
-		err = -ENODEV;
-		goto out;
-	}
-
-	/* test xor */
-	op = IOAT_OP_XOR;
-
-	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dest_dma))
-		goto dma_unmap;
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
-				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
-				      DMA_PREP_INTERRUPT);
-
-	if (!tx) {
-		dev_err(dev, "Self-test xor prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test xor setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test xor timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
-		u32 *ptr = page_address(dest);
-		if (ptr[i] != cmp_word) {
-			dev_err(dev, "Self-test xor failed compare\n");
-			err = -ENODEV;
-			goto free_resources;
-		}
-	}
-	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
-	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
-	/* skip validate if the capability is not present */
-	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
-		goto free_resources;
-
-	op = IOAT_OP_XOR_VAL;
-
-	/* validate the sources with the destintation page */
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		xor_val_srcs[i] = xor_srcs[i];
-	xor_val_srcs[i] = dest;
-
-	xor_val_result = 1;
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
-					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-					  &xor_val_result, DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(dev, "Self-test zero prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test zero setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test validate timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	if (xor_val_result != 0) {
-		dev_err(dev, "Self-test validate failed compare\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	memset(page_address(dest), 0, PAGE_SIZE);
-
-	/* test for non-zero parity sum */
-	op = IOAT_OP_XOR_VAL;
-
-	xor_val_result = 0;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
-					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-					  &xor_val_result, DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(dev, "Self-test 2nd zero prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test  2nd zero setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test 2nd validate timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	if (xor_val_result != SUM_CHECK_P_RESULT) {
-		dev_err(dev, "Self-test validate failed compare\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	goto free_resources;
-dma_unmap:
-	if (op == IOAT_OP_XOR) {
-		if (dest_dma != DMA_ERROR_CODE)
-			dma_unmap_page(dev, dest_dma, PAGE_SIZE,
-				       DMA_FROM_DEVICE);
-		for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-			if (dma_srcs[i] != DMA_ERROR_CODE)
-				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
-					       DMA_TO_DEVICE);
-	} else if (op == IOAT_OP_XOR_VAL) {
-		for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-			if (dma_srcs[i] != DMA_ERROR_CODE)
-				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
-					       DMA_TO_DEVICE);
-	}
-free_resources:
-	dma->device_free_chan_resources(dma_chan);
-out:
-	src_idx = IOAT_NUM_SRC_TEST;
-	while (src_idx--)
-		__free_page(xor_srcs[src_idx]);
-	__free_page(dest);
-	return err;
-}
-
-static int ioat3_dma_self_test(struct ioatdma_device *device)
-{
-	int rc = ioat_dma_self_test(device);
-
-	if (rc)
-		return rc;
-
-	rc = ioat_xor_val_self_test(device);
-	if (rc)
-		return rc;
-
-	return 0;
-}
-
-static int ioat3_irq_reinit(struct ioatdma_device *device)
-{
-	struct pci_dev *pdev = device->pdev;
-	int irq = pdev->irq, i;
-
-	if (!is_bwd_ioat(pdev))
-		return 0;
-
-	switch (device->irq_mode) {
-	case IOAT_MSIX:
-		for (i = 0; i < device->common.chancnt; i++) {
-			struct msix_entry *msix = &device->msix_entries[i];
-			struct ioat_chan_common *chan;
-
-			chan = ioat_chan_by_index(device, i);
-			devm_free_irq(&pdev->dev, msix->vector, chan);
-		}
-
-		pci_disable_msix(pdev);
-		break;
-	case IOAT_MSI:
-		pci_disable_msi(pdev);
-		/* fall through */
-	case IOAT_INTX:
-		devm_free_irq(&pdev->dev, irq, device);
-		break;
-	default:
-		return 0;
-	}
-	device->irq_mode = IOAT_NOIRQ;
-
-	return ioat_dma_setup_interrupts(device);
-}
-
-static int ioat3_reset_hw(struct ioat_chan_common *chan)
-{
-	/* throw away whatever the channel was doing and get it
-	 * initialized, with ioat3 specific workarounds
-	 */
-	struct ioatdma_device *device = chan->device;
-	struct pci_dev *pdev = device->pdev;
-	u32 chanerr;
-	u16 dev_id;
-	int err;
-
-	ioat2_quiesce(chan, msecs_to_jiffies(100));
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
-	if (device->version < IOAT_VER_3_3) {
-		/* clear any pending errors */
-		err = pci_read_config_dword(pdev,
-				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
-		if (err) {
-			dev_err(&pdev->dev,
-				"channel error register unreachable\n");
-			return err;
-		}
-		pci_write_config_dword(pdev,
-				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
-
-		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-		 * (workaround for spurious config parity error after restart)
-		 */
-		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
-		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
-			pci_write_config_dword(pdev,
-					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
-					       0x10);
-		}
-	}
-
-	err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
-	if (!err)
-		err = ioat3_irq_reinit(device);
-
-	if (err)
-		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
-
-	return err;
-}
-
-static void ioat3_intr_quirk(struct ioatdma_device *device)
-{
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	u32 errmask;
-
-	dma = &device->common;
-
-	/*
-	 * if we have descriptor write back error status, we mask the
-	 * error interrupts
-	 */
-	if (device->cap & IOAT_CAP_DWBES) {
-		list_for_each_entry(c, &dma->channels, device_node) {
-			chan = to_chan_common(c);
-			errmask = readl(chan->reg_base +
-					IOAT_CHANERR_MASK_OFFSET);
-			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
-				   IOAT_CHANERR_XOR_Q_ERR;
-			writel(errmask, chan->reg_base +
-					IOAT_CHANERR_MASK_OFFSET);
-		}
-	}
-}
-
-int ioat3_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	int dca_en = system_has_dca_enabled(pdev);
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	bool is_raid_device = false;
-	int err;
-
-	device->enumerate_channels = ioat2_enumerate_channels;
-	device->reset_hw = ioat3_reset_hw;
-	device->self_test = ioat3_dma_self_test;
-	device->intr_quirk = ioat3_intr_quirk;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
-
-	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
-	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
-
-	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
-
-	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
-		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
-
-	/* dca is incompatible with raid operations */
-	if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
-		device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
-
-	if (device->cap & IOAT_CAP_XOR) {
-		is_raid_device = true;
-		dma->max_xor = 8;
-
-		dma_cap_set(DMA_XOR, dma->cap_mask);
-		dma->device_prep_dma_xor = ioat3_prep_xor;
-
-		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-		dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
-	}
-
-	if (device->cap & IOAT_CAP_PQ) {
-		is_raid_device = true;
-
-		dma->device_prep_dma_pq = ioat3_prep_pq;
-		dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
-		dma_cap_set(DMA_PQ, dma->cap_mask);
-		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
-
-		if (device->cap & IOAT_CAP_RAID16SS) {
-			dma_set_maxpq(dma, 16, 0);
-		} else {
-			dma_set_maxpq(dma, 8, 0);
-		}
-
-		if (!(device->cap & IOAT_CAP_XOR)) {
-			dma->device_prep_dma_xor = ioat3_prep_pqxor;
-			dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
-			dma_cap_set(DMA_XOR, dma->cap_mask);
-			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-
-			if (device->cap & IOAT_CAP_RAID16SS) {
-				dma->max_xor = 16;
-			} else {
-				dma->max_xor = 8;
-			}
-		}
-	}
-
-	dma->device_tx_status = ioat3_tx_status;
-	device->cleanup_fn = ioat3_cleanup_event;
-	device->timer_fn = ioat3_timer_event;
-
-	/* starting with CB3.3 super extended descriptors are supported */
-	if (device->cap & IOAT_CAP_RAID16SS) {
-		char pool_name[14];
-		int i;
-
-		for (i = 0; i < MAX_SED_POOLS; i++) {
-			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
-
-			/* allocate SED DMA pool */
-			device->sed_hw_pool[i] = dmam_pool_create(pool_name,
-					&pdev->dev,
-					SED_SIZE * (i + 1), 64, 0);
-			if (!device->sed_hw_pool[i])
-				return -ENOMEM;
-
-		}
-	}
-
-	err = ioat_probe(device);
-	if (err)
-		return err;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		chan = to_chan_common(c);
-		writel(IOAT_DMA_DCA_ANY_CPU,
-		       chan->reg_base + IOAT_DCACTRL_OFFSET);
-	}
-
-	err = ioat_register(device);
-	if (err)
-		return err;
-
-	ioat_kobject_add(device, &ioat2_ktype);
-
-	if (dca)
-		device->dca = ioat3_dca_init(pdev, device->reg_base);
-
-	return 0;
-}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index a3e731e..690e3b4 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -21,11 +21,6 @@
 #define IOAT_MMIO_BAR		0
 
 /* CB device ID's */
-#define IOAT_PCI_DID_5000       0x1A38
-#define IOAT_PCI_DID_CNB        0x360B
-#define IOAT_PCI_DID_SCNB       0x65FF
-#define IOAT_PCI_DID_SNB        0x402F
-
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB0	0x0e20
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1	0x0e21
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2	0x0e22
@@ -58,6 +53,17 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2	0x6f52
 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3	0x6f53
 
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX0	0x6f20
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX1	0x6f21
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX2	0x6f22
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX3	0x6f23
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX4	0x6f24
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX5	0x6f25
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX6	0x6f26
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX7	0x6f27
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX8	0x6f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX9	0x6f2f
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
new file mode 100644
index 0000000..1c3c9b0
--- /dev/null
+++ b/drivers/dma/ioat/init.c
@@ -0,0 +1,1314 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dca.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static struct pci_device_id ioat_pci_tbl[] = {
+	/* I/OAT v3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+
+	/* I/OAT v3.2 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+
+	/* I/OAT v3.3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void ioat_remove(struct pci_dev *pdev);
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+		  struct ioatdma_chan *ioat_chan, int idx);
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
+
+static int ioat_dca_enabled = 1;
+module_param(ioat_dca_enabled, int, 0644);
+MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
+int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+		 "high-water mark for pushing ioat descriptors (default: 4)");
+int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+		 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
+int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+		 "ioat+: upper limit for ring size (default: 16)");
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+		    sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+		 "set ioat interrupt style: msix (default), msi, intx");
+
+struct kmem_cache *ioat_cache;
+struct kmem_cache *ioat_sed_cache;
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_bdx_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+		is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+}
+
+bool is_bwd_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	/* even though not Atom, BDX-DE has same DMA silicon */
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void ioat_dma_test_callback(void *dma_async_param)
+{
+	struct completion *cmp = dma_async_param;
+
+	complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @ioat_dma: dma device to be tested
+ */
+static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+	int i;
+	u8 *src;
+	u8 *dest;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_chan *dma_chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dest, dma_src;
+	dma_cookie_t cookie;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	unsigned long flags;
+
+	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!src)
+		return -ENOMEM;
+	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!dest) {
+		kfree(src);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffer */
+	for (i = 0; i < IOAT_TEST_SIZE; i++)
+		src[i] = (u8)i;
+
+	/* Start copy, using first DMA channel */
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		dev_err(dev, "selftest cannot allocate chan resource\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_src)) {
+		dev_err(dev, "mapping src buffer failed\n");
+		goto free_resources;
+	}
+	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, dma_dest)) {
+		dev_err(dev, "mapping dest buffer failed\n");
+		goto unmap_src;
+	}
+	flags = DMA_PREP_INTERRUPT;
+	tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
+						      dma_src, IOAT_TEST_SIZE,
+						      flags);
+	if (!tx) {
+		dev_err(dev, "Self-test prep failed, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test setup failed, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL)
+					!= DMA_COMPLETE) {
+		dev_err(dev, "Self-test copy timed out, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+		dev_err(dev, "Self-test copy failed compare, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+unmap_dma:
+	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	kfree(src);
+	kfree(dest);
+	return err;
+}
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @ioat_dma: ioat dma device
+ */
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	struct device *dev = &pdev->dev;
+	struct msix_entry *msix;
+	int i, j, msixcnt;
+	int err = -EINVAL;
+	u8 intrctrl = 0;
+
+	if (!strcmp(ioat_interrupt_style, "msix"))
+		goto msix;
+	if (!strcmp(ioat_interrupt_style, "msi"))
+		goto msi;
+	if (!strcmp(ioat_interrupt_style, "intx"))
+		goto intx;
+	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
+	goto err_no_irq;
+
+msix:
+	/* The number of MSI-X vectors should equal the number of channels */
+	msixcnt = ioat_dma->dma_dev.chancnt;
+	for (i = 0; i < msixcnt; i++)
+		ioat_dma->msix_entries[i].entry = i;
+
+	err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
+	if (err)
+		goto msi;
+
+	for (i = 0; i < msixcnt; i++) {
+		msix = &ioat_dma->msix_entries[i];
+		ioat_chan = ioat_chan_by_index(ioat_dma, i);
+		err = devm_request_irq(dev, msix->vector,
+				       ioat_dma_do_interrupt_msix, 0,
+				       "ioat-msix", ioat_chan);
+		if (err) {
+			for (j = 0; j < i; j++) {
+				msix = &ioat_dma->msix_entries[j];
+				ioat_chan = ioat_chan_by_index(ioat_dma, j);
+				devm_free_irq(dev, msix->vector, ioat_chan);
+			}
+			goto msi;
+		}
+	}
+	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+	ioat_dma->irq_mode = IOAT_MSIX;
+	goto done;
+
+msi:
+	err = pci_enable_msi(pdev);
+	if (err)
+		goto intx;
+
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
+			       "ioat-msi", ioat_dma);
+	if (err) {
+		pci_disable_msi(pdev);
+		goto intx;
+	}
+	ioat_dma->irq_mode = IOAT_MSI;
+	goto done;
+
+intx:
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
+			       IRQF_SHARED, "ioat-intx", ioat_dma);
+	if (err)
+		goto err_no_irq;
+
+	ioat_dma->irq_mode = IOAT_INTX;
+done:
+	if (is_bwd_ioat(pdev))
+		ioat_intr_quirk(ioat_dma);
+	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+	writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+	return 0;
+
+err_no_irq:
+	/* Disable all interrupt generation */
+	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+	ioat_dma->irq_mode = IOAT_NOIRQ;
+	dev_err(dev, "no usable interrupts\n");
+	return err;
+}
+
+static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
+{
+	/* Disable all interrupt generation */
+	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+}
+
+static int ioat_probe(struct ioatdma_device *ioat_dma)
+{
+	int err = -ENODEV;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	struct device *dev = &pdev->dev;
+
+	/* DMA coherent memory pool for DMA descriptor allocations */
+	ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+					     sizeof(struct ioat_dma_descriptor),
+					     64, 0);
+	if (!ioat_dma->dma_pool) {
+		err = -ENOMEM;
+		goto err_dma_pool;
+	}
+
+	ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+						    sizeof(u64),
+						    SMP_CACHE_BYTES,
+						    SMP_CACHE_BYTES);
+
+	if (!ioat_dma->completion_pool) {
+		err = -ENOMEM;
+		goto err_completion_pool;
+	}
+
+	ioat_enumerate_channels(ioat_dma);
+
+	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+	dma->dev = &pdev->dev;
+
+	if (!dma->chancnt) {
+		dev_err(dev, "channel enumeration error\n");
+		goto err_setup_interrupts;
+	}
+
+	err = ioat_dma_setup_interrupts(ioat_dma);
+	if (err)
+		goto err_setup_interrupts;
+
+	err = ioat3_dma_self_test(ioat_dma);
+	if (err)
+		goto err_self_test;
+
+	return 0;
+
+err_self_test:
+	ioat_disable_interrupts(ioat_dma);
+err_setup_interrupts:
+	pci_pool_destroy(ioat_dma->completion_pool);
+err_completion_pool:
+	pci_pool_destroy(ioat_dma->dma_pool);
+err_dma_pool:
+	return err;
+}
+
+static int ioat_register(struct ioatdma_device *ioat_dma)
+{
+	int err = dma_async_device_register(&ioat_dma->dma_dev);
+
+	if (err) {
+		ioat_disable_interrupts(ioat_dma);
+		pci_pool_destroy(ioat_dma->completion_pool);
+		pci_pool_destroy(ioat_dma->dma_pool);
+	}
+
+	return err;
+}
+
+static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+
+	ioat_disable_interrupts(ioat_dma);
+
+	ioat_kobject_del(ioat_dma);
+
+	dma_async_device_unregister(dma);
+
+	pci_pool_destroy(ioat_dma->dma_pool);
+	pci_pool_destroy(ioat_dma->completion_pool);
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+/**
+ * ioat_enumerate_channels - find and initialize the device's channels
+ * @ioat_dma: the ioat dma device to be enumerated
+ */
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 xfercap_log;
+	int i;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+	dma->chancnt &= 0x1f; /* bits [4:0] valid */
+	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
+		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+	}
+	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
+	xfercap_log &= 0x1f; /* bits [4:0] valid */
+	if (xfercap_log == 0)
+		return 0;
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+	for (i = 0; i < dma->chancnt; i++) {
+		ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+		if (!ioat_chan)
+			break;
+
+		ioat_init_channel(ioat_dma, ioat_chan, i);
+		ioat_chan->xfercap_log = xfercap_log;
+		spin_lock_init(&ioat_chan->prep_lock);
+		if (ioat_reset_hw(ioat_chan)) {
+			i = 0;
+			break;
+		}
+	}
+	dma->chancnt = i;
+	return i;
+}
+
+/**
+ * ioat_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat_free_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	const int total_descs = 1 << ioat_chan->alloc_order;
+	int descs;
+	int i;
+
+	/* Before freeing channel resources first check
+	 * if they have been previously allocated for this channel.
+	 */
+	if (!ioat_chan->ring)
+		return;
+
+	ioat_stop(ioat_chan);
+	ioat_reset_hw(ioat_chan);
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	descs = ioat_ring_space(ioat_chan);
+	dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
+	for (i = 0; i < descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	if (descs < total_descs)
+		dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
+			total_descs - descs);
+
+	for (i = 0; i < total_descs - descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
+		dump_desc_dbg(ioat_chan, desc);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = NULL;
+	ioat_chan->alloc_order = 0;
+	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+		      ioat_chan->completion_dma);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_chan->last_completion = 0;
+	ioat_chan->completion_dma = 0;
+	ioat_chan->dmacount = 0;
+}
+
+/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
+ * @chan: channel to be initialized
+ */
+static int ioat_alloc_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent **ring;
+	u64 status;
+	int order;
+	int i = 0;
+	u32 chanerr;
+
+	/* have we already been set up? */
+	if (ioat_chan->ring)
+		return 1 << ioat_chan->alloc_order;
+
+	/* Setup register to interrupt and write completion status on error */
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+	/* allocate a completion writeback area */
+	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+	ioat_chan->completion =
+		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+			       GFP_KERNEL, &ioat_chan->completion_dma);
+	if (!ioat_chan->completion)
+		return -ENOMEM;
+
+	memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
+	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+	writel(((u64)ioat_chan->completion_dma) >> 32,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+	order = ioat_get_alloc_order();
+	ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+	if (!ring)
+		return -ENOMEM;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_chan->ring = ring;
+	ioat_chan->head = 0;
+	ioat_chan->issued = 0;
+	ioat_chan->tail = 0;
+	ioat_chan->alloc_order = order;
+	set_bit(IOAT_RUN, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_start_null_desc(ioat_chan);
+
+	/* check that we got off the ground */
+	do {
+		udelay(1);
+		status = ioat_chansts(ioat_chan);
+	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		return 1 << ioat_chan->alloc_order;
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+	dev_WARN(to_dev(ioat_chan),
+		 "failed to start channel chanerr: %#x\n", chanerr);
+	ioat_free_chan_resources(c);
+	return -EFAULT;
+}
+
+/* common channel initialization */
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+		  struct ioatdma_chan *ioat_chan, int idx)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	unsigned long data = (unsigned long) c;
+
+	ioat_chan->ioat_dma = ioat_dma;
+	ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
+	spin_lock_init(&ioat_chan->cleanup_lock);
+	ioat_chan->dma_chan.device = dma;
+	dma_cookie_init(&ioat_chan->dma_chan);
+	list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
+	ioat_dma->idx[idx] = ioat_chan;
+	init_timer(&ioat_chan->timer);
+	ioat_chan->timer.function = ioat_timer_event;
+	ioat_chan->timer.data = data;
+	tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
+}
+
+#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
+static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
+{
+	int i, src_idx;
+	struct page *dest;
+	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
+	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
+	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
+	dma_addr_t dest_dma;
+	struct dma_async_tx_descriptor *tx;
+	struct dma_chan *dma_chan;
+	dma_cookie_t cookie;
+	u8 cmp_byte = 0;
+	u32 cmp_word;
+	u32 xor_val_result;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 op = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
+		return 0;
+
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+		if (!xor_srcs[src_idx]) {
+			while (src_idx--)
+				__free_page(xor_srcs[src_idx]);
+			return -ENOMEM;
+		}
+	}
+
+	dest = alloc_page(GFP_KERNEL);
+	if (!dest) {
+		while (src_idx--)
+			__free_page(xor_srcs[src_idx]);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffers */
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+		u8 *ptr = page_address(xor_srcs[src_idx]);
+
+		for (i = 0; i < PAGE_SIZE; i++)
+			ptr[i] = (1 << src_idx);
+	}
+
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
+		cmp_byte ^= (u8) (1 << src_idx);
+
+	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+			(cmp_byte << 8) | cmp_byte;
+
+	memset(page_address(dest), 0, PAGE_SIZE);
+
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* test xor */
+	op = IOAT_OP_XOR;
+
+	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, dest_dma))
+		goto dma_unmap;
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
+				      DMA_PREP_INTERRUPT);
+
+	if (!tx) {
+		dev_err(dev, "Self-test xor prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test xor setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test xor timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+		u32 *ptr = page_address(dest);
+
+		if (ptr[i] != cmp_word) {
+			dev_err(dev, "Self-test xor failed compare\n");
+			err = -ENODEV;
+			goto free_resources;
+		}
+	}
+	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	/* skip validate if the capability is not present */
+	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+		goto free_resources;
+
+	op = IOAT_OP_XOR_VAL;
+
+	/* validate the sources with the destintation page */
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		xor_val_srcs[i] = xor_srcs[i];
+	xor_val_srcs[i] = dest;
+
+	xor_val_result = 1;
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+					  &xor_val_result, DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(dev, "Self-test zero prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test zero setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test validate timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	if (xor_val_result != 0) {
+		dev_err(dev, "Self-test validate failed compare\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+	memset(page_address(dest), 0, PAGE_SIZE);
+
+	/* test for non-zero parity sum */
+	op = IOAT_OP_XOR_VAL;
+
+	xor_val_result = 0;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+					  &xor_val_result, DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(dev, "Self-test 2nd zero prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test  2nd zero setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test 2nd validate timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	if (xor_val_result != SUM_CHECK_P_RESULT) {
+		dev_err(dev, "Self-test validate failed compare\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	goto free_resources;
+dma_unmap:
+	if (op == IOAT_OP_XOR) {
+		if (dest_dma != DMA_ERROR_CODE)
+			dma_unmap_page(dev, dest_dma, PAGE_SIZE,
+				       DMA_FROM_DEVICE);
+		for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+			if (dma_srcs[i] != DMA_ERROR_CODE)
+				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+					       DMA_TO_DEVICE);
+	} else if (op == IOAT_OP_XOR_VAL) {
+		for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+			if (dma_srcs[i] != DMA_ERROR_CODE)
+				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+					       DMA_TO_DEVICE);
+	}
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	src_idx = IOAT_NUM_SRC_TEST;
+	while (src_idx--)
+		__free_page(xor_srcs[src_idx]);
+	__free_page(dest);
+	return err;
+}
+
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+	int rc;
+
+	rc = ioat_dma_self_test(ioat_dma);
+	if (rc)
+		return rc;
+
+	rc = ioat_xor_val_self_test(ioat_dma);
+
+	return rc;
+}
+
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioatdma_chan *ioat_chan;
+	u32 errmask;
+
+	dma = &ioat_dma->dma_dev;
+
+	/*
+	 * if we have descriptor write back error status, we mask the
+	 * error interrupts
+	 */
+	if (ioat_dma->cap & IOAT_CAP_DWBES) {
+		list_for_each_entry(c, &dma->channels, device_node) {
+			ioat_chan = to_ioat_chan(c);
+			errmask = readl(ioat_chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+				   IOAT_CHANERR_XOR_Q_ERR;
+			writel(errmask, ioat_chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+		}
+	}
+}
+
+static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+{
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int dca_en = system_has_dca_enabled(pdev);
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioatdma_chan *ioat_chan;
+	bool is_raid_device = false;
+	int err;
+
+	dma = &ioat_dma->dma_dev;
+	dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
+	dma->device_issue_pending = ioat_issue_pending;
+	dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat_free_chan_resources;
+
+	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
+	dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
+
+	ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
+
+	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
+		ioat_dma->cap &=
+			~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
+
+	/* dca is incompatible with raid operations */
+	if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+		ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+
+	if (ioat_dma->cap & IOAT_CAP_XOR) {
+		is_raid_device = true;
+		dma->max_xor = 8;
+
+		dma_cap_set(DMA_XOR, dma->cap_mask);
+		dma->device_prep_dma_xor = ioat_prep_xor;
+
+		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+		dma->device_prep_dma_xor_val = ioat_prep_xor_val;
+	}
+
+	if (ioat_dma->cap & IOAT_CAP_PQ) {
+		is_raid_device = true;
+
+		dma->device_prep_dma_pq = ioat_prep_pq;
+		dma->device_prep_dma_pq_val = ioat_prep_pq_val;
+		dma_cap_set(DMA_PQ, dma->cap_mask);
+		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
+
+		if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+			dma_set_maxpq(dma, 16, 0);
+		else
+			dma_set_maxpq(dma, 8, 0);
+
+		if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
+			dma->device_prep_dma_xor = ioat_prep_pqxor;
+			dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
+			dma_cap_set(DMA_XOR, dma->cap_mask);
+			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+			if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+				dma->max_xor = 16;
+			else
+				dma->max_xor = 8;
+		}
+	}
+
+	dma->device_tx_status = ioat_tx_status;
+
+	/* starting with CB3.3 super extended descriptors are supported */
+	if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
+		char pool_name[14];
+		int i;
+
+		for (i = 0; i < MAX_SED_POOLS; i++) {
+			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
+
+			/* allocate SED DMA pool */
+			ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
+					&pdev->dev,
+					SED_SIZE * (i + 1), 64, 0);
+			if (!ioat_dma->sed_hw_pool[i])
+				return -ENOMEM;
+
+		}
+	}
+
+	if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
+		dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+	err = ioat_probe(ioat_dma);
+	if (err)
+		return err;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		ioat_chan = to_ioat_chan(c);
+		writel(IOAT_DMA_DCA_ANY_CPU,
+		       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+	}
+
+	err = ioat_register(ioat_dma);
+	if (err)
+		return err;
+
+	ioat_kobject_add(ioat_dma, &ioat_ktype);
+
+	if (dca)
+		ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
+
+	return 0;
+}
+
+#define DRV_NAME "ioatdma"
+
+static struct pci_driver ioat_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= ioat_pci_tbl,
+	.probe		= ioat_pci_probe,
+	.remove		= ioat_remove,
+};
+
+static struct ioatdma_device *
+alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
+{
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+
+	if (!d)
+		return NULL;
+	d->pdev = pdev;
+	d->reg_base = iobase;
+	return d;
+}
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	void __iomem * const *iomap;
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *device;
+	int err;
+
+	err = pcim_enable_device(pdev);
+	if (err)
+		return err;
+
+	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
+	if (err)
+		return err;
+	iomap = pcim_iomap_table(pdev);
+	if (!iomap)
+		return -ENOMEM;
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err)
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
+	if (!device)
+		return -ENOMEM;
+	pci_set_master(pdev);
+	pci_set_drvdata(pdev, device);
+
+	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+	if (device->version >= IOAT_VER_3_0)
+		err = ioat3_dma_probe(device, ioat_dca_enabled);
+	else
+		return -ENODEV;
+
+	if (err) {
+		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void ioat_remove(struct pci_dev *pdev)
+{
+	struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+	if (!device)
+		return;
+
+	dev_err(&pdev->dev, "Removing dma and dca services\n");
+	if (device->dca) {
+		unregister_dca_provider(device->dca, &pdev->dev);
+		free_dca_provider(device->dca);
+		device->dca = NULL;
+	}
+	ioat_dma_remove(device);
+}
+
+static int __init ioat_init_module(void)
+{
+	int err = -ENOMEM;
+
+	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
+		DRV_NAME, IOAT_DMA_VERSION);
+
+	ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
+					0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!ioat_cache)
+		return -ENOMEM;
+
+	ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
+	if (!ioat_sed_cache)
+		goto err_ioat_cache;
+
+	err = pci_register_driver(&ioat_pci_driver);
+	if (err)
+		goto err_ioat3_cache;
+
+	return 0;
+
+ err_ioat3_cache:
+	kmem_cache_destroy(ioat_sed_cache);
+
+ err_ioat_cache:
+	kmem_cache_destroy(ioat_cache);
+
+	return err;
+}
+module_init(ioat_init_module);
+
+static void __exit ioat_exit_module(void)
+{
+	pci_unregister_driver(&ioat_pci_driver);
+	kmem_cache_destroy(ioat_cache);
+}
+module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
deleted file mode 100644
index 76f0dc6..0000000
--- a/drivers/dma/ioat/pci.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-#include <linux/slab.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-MODULE_VERSION(IOAT_DMA_VERSION);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-static struct pci_device_id ioat_pci_tbl[] = {
-	/* I/OAT v1 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB)  },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
-	{ PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
-
-	/* I/OAT v2 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
-
-	/* I/OAT v3 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
-
-	/* I/OAT v3.2 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
-
-	/* I/OAT v3.3 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
-
-	{ 0, }
-};
-MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
-static void ioat_remove(struct pci_dev *pdev);
-
-static int ioat_dca_enabled = 1;
-module_param(ioat_dca_enabled, int, 0644);
-MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-
-struct kmem_cache *ioat2_cache;
-struct kmem_cache *ioat3_sed_cache;
-
-#define DRV_NAME "ioatdma"
-
-static struct pci_driver ioat_pci_driver = {
-	.name		= DRV_NAME,
-	.id_table	= ioat_pci_tbl,
-	.probe		= ioat_pci_probe,
-	.remove		= ioat_remove,
-};
-
-static struct ioatdma_device *
-alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct device *dev = &pdev->dev;
-	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
-
-	if (!d)
-		return NULL;
-	d->pdev = pdev;
-	d->reg_base = iobase;
-	return d;
-}
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-	void __iomem * const *iomap;
-	struct device *dev = &pdev->dev;
-	struct ioatdma_device *device;
-	int err;
-
-	err = pcim_enable_device(pdev);
-	if (err)
-		return err;
-
-	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
-	if (err)
-		return err;
-	iomap = pcim_iomap_table(pdev);
-	if (!iomap)
-		return -ENOMEM;
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err)
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err)
-		return err;
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err)
-		return err;
-
-	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
-	if (!device)
-		return -ENOMEM;
-	pci_set_master(pdev);
-	pci_set_drvdata(pdev, device);
-
-	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-	if (device->version == IOAT_VER_1_2)
-		err = ioat1_dma_probe(device, ioat_dca_enabled);
-	else if (device->version == IOAT_VER_2_0)
-		err = ioat2_dma_probe(device, ioat_dca_enabled);
-	else if (device->version >= IOAT_VER_3_0)
-		err = ioat3_dma_probe(device, ioat_dca_enabled);
-	else
-		return -ENODEV;
-
-	if (err) {
-		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void ioat_remove(struct pci_dev *pdev)
-{
-	struct ioatdma_device *device = pci_get_drvdata(pdev);
-
-	if (!device)
-		return;
-
-	dev_err(&pdev->dev, "Removing dma and dca services\n");
-	if (device->dca) {
-		unregister_dca_provider(device->dca, &pdev->dev);
-		free_dca_provider(device->dca);
-		device->dca = NULL;
-	}
-	ioat_dma_remove(device);
-}
-
-static int __init ioat_init_module(void)
-{
-	int err = -ENOMEM;
-
-	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
-		DRV_NAME, IOAT_DMA_VERSION);
-
-	ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
-					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!ioat2_cache)
-		return -ENOMEM;
-
-	ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
-	if (!ioat3_sed_cache)
-		goto err_ioat2_cache;
-
-	err = pci_register_driver(&ioat_pci_driver);
-	if (err)
-		goto err_ioat3_cache;
-
-	return 0;
-
- err_ioat3_cache:
-	kmem_cache_destroy(ioat3_sed_cache);
-
- err_ioat2_cache:
-	kmem_cache_destroy(ioat2_cache);
-
-	return err;
-}
-module_init(ioat_init_module);
-
-static void __exit ioat_exit_module(void)
-{
-	pci_unregister_driver(&ioat_pci_driver);
-	kmem_cache_destroy(ioat2_cache);
-}
-module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
new file mode 100644
index 0000000..ad4fb41
--- /dev/null
+++ b/drivers/dma/ioat/prep.c
@@ -0,0 +1,715 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "hw.h"
+#include "dma.h"
+
+#define MAX_SCF	1024
+
+/* provide a lookup table for setting the source address in the base or
+ * extended descriptor of an xor or pq descriptor
+ */
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+				       2, 2, 2, 2, 2, 2, 2 };
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+					0, 1, 2, 3, 4, 5, 6 };
+
+static void xor_set_src(struct ioat_raw_descriptor *descs[2],
+			dma_addr_t addr, u32 offset, int idx)
+{
+	struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
+
+	raw->field[xor_idx_to_field[idx]] = addr + offset;
+}
+
+static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+{
+	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+	return raw->field[pq_idx_to_field[idx]];
+}
+
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	return raw->field[pq16_idx_to_field[idx]];
+}
+
+static void pq_set_src(struct ioat_raw_descriptor *descs[2],
+		       dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
+	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+	raw->field[pq_idx_to_field[idx]] = addr + offset;
+	pq->coef[idx] = coef;
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+	struct ioat_pq16a_descriptor *pq16 =
+		(struct ioat_pq16a_descriptor *)desc[1];
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+	if (idx < 8)
+		pq->coef[idx] = coef;
+	else
+		pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
+{
+	struct ioat_sed_ent *sed;
+	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+	sed = kmem_cache_alloc(ioat_sed_cache, flags);
+	if (!sed)
+		return NULL;
+
+	sed->hw_pool = hw_pool;
+	sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
+				 flags, &sed->dma);
+	if (!sed->hw) {
+		kmem_cache_free(ioat_sed_cache, sed);
+		return NULL;
+	}
+
+	return sed;
+}
+
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	dma_addr_t dst = dma_dest;
+	dma_addr_t src = dma_src;
+	size_t total_len = len;
+	int num_descs, idx, i;
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		hw = desc->hw;
+
+		hw->size = copy;
+		hw->ctl = 0;
+		hw->src_addr = src;
+		hw->dst_addr = dst;
+
+		len -= copy;
+		dst += copy;
+		src += copy;
+		dump_desc_dbg(ioat_chan, desc);
+	} while (++i < num_descs);
+
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	dump_desc_dbg(ioat_chan, desc);
+	/* we leave the channel locked to ensure in order submission */
+
+	return &desc->txd;
+}
+
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
+		      dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
+		      size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent *compl_desc;
+	struct ioat_ring_ent *desc;
+	struct ioat_ring_ent *ext;
+	size_t total_len = len;
+	struct ioat_xor_descriptor *xor;
+	struct ioat_xor_ext_descriptor *xor_ex = NULL;
+	struct ioat_dma_descriptor *hw;
+	int num_descs, with_ext, idx, i;
+	u32 offset = 0;
+	u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
+
+	BUG_ON(src_cnt < 2);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	/* we need 2x the number of descriptors to cover greater than 5
+	 * sources
+	 */
+	if (src_cnt > 5) {
+		with_ext = 1;
+		num_descs *= 2;
+	} else
+		with_ext = 0;
+
+	/* completion writes from the raid engine may pass completion
+	 * writes from the legacy engine, so we need one extra null
+	 * (legacy) descriptor to ensure all completion writes arrive in
+	 * order.
+	 */
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		struct ioat_raw_descriptor *descs[2];
+		size_t xfer_size = min_t(size_t,
+					 len, 1 << ioat_chan->xfercap_log);
+		int s;
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		xor = desc->xor;
+
+		/* save a branch by unconditionally retrieving the
+		 * extended descriptor xor_set_src() knows to not write
+		 * to it in the single descriptor case
+		 */
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
+		xor_ex = ext->xor_ex;
+
+		descs[0] = (struct ioat_raw_descriptor *) xor;
+		descs[1] = (struct ioat_raw_descriptor *) xor_ex;
+		for (s = 0; s < src_cnt; s++)
+			xor_set_src(descs, src[s], offset, s);
+		xor->size = xfer_size;
+		xor->dst_addr = dest + offset;
+		xor->ctl = 0;
+		xor->ctl_f.op = op;
+		xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
+
+		len -= xfer_size;
+		offset += xfer_size;
+		dump_desc_dbg(ioat_chan, desc);
+	} while ((i += 1 + with_ext) < num_descs);
+
+	/* last xor descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* completion descriptor carries interrupt bit */
+	compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+	hw = compl_desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.compl_write = 1;
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	dump_desc_dbg(ioat_chan, compl_desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &compl_desc->txd;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+	       unsigned int src_cnt, size_t len, unsigned long flags)
+{
+	return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+		    unsigned int src_cnt, size_t len,
+		    enum sum_check_flags *result, unsigned long flags)
+{
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*result = 0;
+
+	return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
+				     src_cnt - 1, len, flags);
+}
+
+static void
+dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
+		 struct ioat_ring_ent *ext)
+{
+	struct device *dev = to_dev(ioat_chan);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
+	struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
+	int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
+		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
+		pq->ctl_f.int_en, pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++)
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
+			       struct ioat_ring_ent *desc)
+{
+	struct device *dev = to_dev(ioat_chan);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_raw_descriptor *descs[] = { (void *)pq,
+						(void *)pq,
+						(void *)pq };
+	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	if (desc->sed) {
+		descs[1] = (void *)desc->sed->hw;
+		descs[2] = (void *)desc->sed->hw + 64;
+	}
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) pq->next,
+		desc->txd.flags, pq->size, pq->ctl,
+		pq->ctl_f.op, pq->ctl_f.int_en,
+		pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++) {
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq16_get_src(descs, i),
+			pq->coef[i]);
+	}
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
+		     const dma_addr_t *dst, const dma_addr_t *src,
+		     unsigned int src_cnt, const unsigned char *scf,
+		     size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *compl_desc;
+	struct ioat_ring_ent *desc;
+	struct ioat_ring_ent *ext;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	struct ioat_pq_ext_descriptor *pq_ex = NULL;
+	struct ioat_dma_descriptor *hw;
+	u32 offset = 0;
+	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+	int i, s, idx, with_ext, num_descs;
+	int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
+
+	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+	/* the engine requires at least two sources (we provide
+	 * at least 1 implied source in the DMA_PREP_CONTINUE case)
+	 */
+	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	/* we need 2x the number of descriptors to cover greater than 3
+	 * sources (we need 1 extra source in the q-only continuation
+	 * case and 3 extra sources in the p+q continuation case.
+	 */
+	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+		with_ext = 1;
+		num_descs *= 2;
+	} else
+		with_ext = 0;
+
+	/* completion writes from the raid engine may pass completion
+	 * writes from the legacy engine, so we need one extra null
+	 * (legacy) descriptor to ensure all completion writes arrive in
+	 * order.
+	 */
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		struct ioat_raw_descriptor *descs[2];
+		size_t xfer_size = min_t(size_t, len,
+					 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		pq = desc->pq;
+
+		/* save a branch by unconditionally retrieving the
+		 * extended descriptor pq_set_src() knows to not write
+		 * to it in the single descriptor case
+		 */
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
+		pq_ex = ext->pq_ex;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+		descs[1] = (struct ioat_raw_descriptor *) pq_ex;
+
+		for (s = 0; s < src_cnt; s++)
+			pq_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq_set_src(descs, dst[0], offset, 0, s++);
+			pq_set_src(descs, dst[1], offset, 1, s++);
+			pq_set_src(descs, dst[1], offset, 0, s++);
+		}
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		/* we turn on descriptor write back error status */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while ((i += 1 + with_ext) < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	dump_pq_desc_dbg(ioat_chan, desc, ext);
+
+	if (!cb32) {
+		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		pq->ctl_f.compl_write = 1;
+		compl_desc = desc;
+	} else {
+		/* completion descriptor carries interrupt bit */
+		compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+		hw = compl_desc->hw;
+		hw->ctl = 0;
+		hw->ctl_f.null = 1;
+		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		hw->ctl_f.compl_write = 1;
+		hw->size = NULL_DESC_BUFFER_SIZE;
+		dump_desc_dbg(ioat_chan, compl_desc);
+	}
+
+
+	/* we leave the channel locked to ensure in order submission */
+	return &compl_desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+		       const dma_addr_t *dst, const dma_addr_t *src,
+		       unsigned int src_cnt, const unsigned char *scf,
+		       size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	u32 offset = 0;
+	u8 op;
+	int i, s, idx, num_descs;
+
+	/* this function is only called with 9-16 sources */
+	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+
+	/*
+	 * 16 source pq is only available on cb3.3 and has no completion
+	 * write hw bug.
+	 */
+	if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+
+	i = 0;
+
+	do {
+		struct ioat_raw_descriptor *descs[4];
+		size_t xfer_size = min_t(size_t, len,
+					 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		pq = desc->pq;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+
+		desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
+		if (!desc->sed) {
+			dev_err(to_dev(ioat_chan),
+				"%s: no free sed entries\n", __func__);
+			return NULL;
+		}
+
+		pq->sed_addr = desc->sed->dma;
+		desc->sed->parent = desc;
+
+		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+		descs[2] = (void *)descs[1] + 64;
+
+		for (s = 0; s < src_cnt; s++)
+			pq16_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq16_set_src(descs, dst[0], offset, 0, s++);
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+			pq16_set_src(descs, dst[1], offset, 0, s++);
+		}
+
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+		/* we turn on descriptor write back error status */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while (++i < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* with cb3.3 we should be able to do completion w/o a null desc */
+	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	pq->ctl_f.compl_write = 1;
+
+	dump_pq16_desc_dbg(ioat_chan, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
+static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
+{
+	if (dmaf_p_disabled_continue(flags))
+		return src_cnt + 1;
+	else if (dmaf_continue(flags))
+		return src_cnt + 3;
+	else
+		return src_cnt;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+	      unsigned int src_cnt, const unsigned char *scf, size_t len,
+	      unsigned long flags)
+{
+	/* specify valid address for disabled result */
+	if (flags & DMA_PREP_PQ_DISABLE_P)
+		dst[0] = dst[1];
+	if (flags & DMA_PREP_PQ_DISABLE_Q)
+		dst[1] = dst[0];
+
+	/* handle the single source multiply case from the raid6
+	 * recovery path
+	 */
+	if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
+		dma_addr_t single_source[2];
+		unsigned char single_source_coef[2];
+
+		BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
+		single_source[0] = src[0];
+		single_source[1] = src[0];
+		single_source_coef[0] = scf[0];
+		single_source_coef[1] = 0;
+
+		return src_cnt_flags(src_cnt, flags) > 8 ?
+			__ioat_prep_pq16_lock(chan, NULL, dst, single_source,
+					       2, single_source_coef, len,
+					       flags) :
+			__ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
+					     single_source_coef, len, flags);
+
+	} else {
+		return src_cnt_flags(src_cnt, flags) > 8 ?
+			__ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+					       scf, len, flags) :
+			__ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+					     scf, len, flags);
+	}
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+		  unsigned int src_cnt, const unsigned char *scf, size_t len,
+		  enum sum_check_flags *pqres, unsigned long flags)
+{
+	/* specify valid address for disabled result */
+	if (flags & DMA_PREP_PQ_DISABLE_P)
+		pq[0] = pq[1];
+	if (flags & DMA_PREP_PQ_DISABLE_Q)
+		pq[1] = pq[0];
+
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*pqres = 0;
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				     flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+		 unsigned int src_cnt, size_t len, unsigned long flags)
+{
+	unsigned char scf[MAX_SCF];
+	dma_addr_t pq[2];
+
+	if (src_cnt > MAX_SCF)
+		return NULL;
+
+	memset(scf, 0, src_cnt);
+	pq[0] = dst;
+	flags |= DMA_PREP_PQ_DISABLE_Q;
+	pq[1] = dst; /* specify valid address for disabled result */
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				     flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+		     unsigned int src_cnt, size_t len,
+		     enum sum_check_flags *result, unsigned long flags)
+{
+	unsigned char scf[MAX_SCF];
+	dma_addr_t pq[2];
+
+	if (src_cnt > MAX_SCF)
+		return NULL;
+
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*result = 0;
+
+	memset(scf, 0, src_cnt);
+	pq[0] = src[0];
+	flags |= DMA_PREP_PQ_DISABLE_Q;
+	pq[1] = pq[0]; /* specify valid address for disabled result */
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+				       scf, len, flags) :
+		__ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+				     scf, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent *desc;
+	struct ioat_dma_descriptor *hw;
+
+	if (ioat_check_space_lock(ioat_chan, 1) == 0)
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+	else
+		return NULL;
+
+	hw = desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = 1;
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	hw->src_addr = 0;
+	hw->dst_addr = 0;
+
+	desc->txd.flags = flags;
+	desc->len = 1;
+
+	dump_desc_dbg(ioat_chan, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
new file mode 100644
index 0000000..cb4a857
--- /dev/null
+++ b/drivers/dma/ioat/sysfs.c
@@ -0,0 +1,135 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+static ssize_t cap_show(struct dma_chan *c, char *page)
+{
+	struct dma_device *dma = c->device;
+
+	return sprintf(page, "copy%s%s%s%s%s\n",
+		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
+		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
+		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
+		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
+		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
+
+}
+struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
+
+static ssize_t version_show(struct dma_chan *c, char *page)
+{
+	struct dma_device *dma = c->device;
+	struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
+
+	return sprintf(page, "%d.%d\n",
+		       ioat_dma->version >> 4, ioat_dma->version & 0xf);
+}
+struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
+
+static ssize_t
+ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct ioat_sysfs_entry *entry;
+	struct ioatdma_chan *ioat_chan;
+
+	entry = container_of(attr, struct ioat_sysfs_entry, attr);
+	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
+
+	if (!entry->show)
+		return -EIO;
+	return entry->show(&ioat_chan->dma_chan, page);
+}
+
+const struct sysfs_ops ioat_sysfs_ops = {
+	.show	= ioat_attr_show,
+};
+
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+		struct kobject *parent = &c->dev->device.kobj;
+		int err;
+
+		err = kobject_init_and_add(&ioat_chan->kobj, type,
+					   parent, "quickdata");
+		if (err) {
+			dev_warn(to_dev(ioat_chan),
+				 "sysfs init error (%d), continuing...\n", err);
+			kobject_put(&ioat_chan->kobj);
+			set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
+		}
+	}
+}
+
+void ioat_kobject_del(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
+			kobject_del(&ioat_chan->kobj);
+			kobject_put(&ioat_chan->kobj);
+		}
+	}
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	/* ...taken outside the lock, no need to be precise */
+	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static struct attribute *ioat_attrs[] = {
+	&ring_size_attr.attr,
+	&ring_active_attr.attr,
+	&ioat_cap_attr.attr,
+	&ioat_version_attr.attr,
+	NULL,
+};
+
+struct kobj_type ioat_ktype = {
+	.sysfs_ops = &ioat_sysfs_ops,
+	.default_attrs = ioat_attrs,
+};
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 2e284a4..4768a82 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -265,10 +265,10 @@
 	return ret;
 }
 
-/* Chained IRQ handler for IPU error interrupt */
-static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
+/* Chained IRQ handler for IPU function and error interrupt */
+static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct ipu *ipu = irq_get_handler_data(irq);
+	struct ipu *ipu = irq_desc_get_handler_data(desc);
 	u32 status;
 	int i, line;
 
@@ -286,43 +286,7 @@
 		raw_spin_unlock(&bank_lock);
 		while ((line = ffs(status))) {
 			struct ipu_irq_map *map;
-
-			line--;
-			status &= ~(1UL << line);
-
-			raw_spin_lock(&bank_lock);
-			map = src2map(32 * i + line);
-			if (map)
-				irq = map->irq;
-			raw_spin_unlock(&bank_lock);
-
-			if (!map) {
-				pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-				       line, i);
-				continue;
-			}
-			generic_handle_irq(irq);
-		}
-	}
-}
-
-/* Chained IRQ handler for IPU function interrupt */
-static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
-{
-	struct ipu *ipu = irq_desc_get_handler_data(desc);
-	u32 status;
-	int i, line;
-
-	for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
-		struct ipu_irq_bank *bank = irq_bank + i;
-
-		raw_spin_lock(&bank_lock);
-		status = ipu_read_reg(ipu, bank->status);
-		/* Not clearing all interrupts, see above */
-		status &= ipu_read_reg(ipu, bank->control);
-		raw_spin_unlock(&bank_lock);
-		while ((line = ffs(status))) {
-			struct ipu_irq_map *map;
+			unsigned int irq = NO_IRQ;
 
 			line--;
 			status &= ~(1UL << line);
@@ -377,16 +341,12 @@
 		irq_map[i].irq = irq;
 		irq_map[i].source = -EINVAL;
 		irq_set_handler(irq, handle_level_irq);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
-	irq_set_handler_data(ipu->irq_fn, ipu);
-	irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn);
+	irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
 
-	irq_set_handler_data(ipu->irq_err, ipu);
-	irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
+	irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
 
 	ipu->irq_base = irq_base;
 
@@ -399,16 +359,12 @@
 
 	irq_base = ipu->irq_base;
 
-	irq_set_chained_handler(ipu->irq_fn, NULL);
-	irq_set_handler_data(ipu->irq_fn, NULL);
+	irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
 
-	irq_set_chained_handler(ipu->irq_err, NULL);
-	irq_set_handler_data(ipu->irq_err, NULL);
+	irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
 
 	for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, 0);
-#endif
+		irq_set_status_flags(irq, IRQ_NOREQUEST);
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 647e362..1ba2fd7 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -24,7 +24,6 @@
 #include "virt-dma.h"
 
 #define DRIVER_NAME		"k3-dma"
-#define DMA_ALIGN		3
 #define DMA_MAX_SIZE		0x1ffc
 
 #define INT_STAT		0x00
@@ -732,7 +731,7 @@
 	d->slave.device_pause = k3_dma_transfer_pause;
 	d->slave.device_resume = k3_dma_transfer_resume;
 	d->slave.device_terminate_all = k3_dma_terminate_all;
-	d->slave.copy_align = DMA_ALIGN;
+	d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
 
 	/* init virtual channel */
 	d->chans = devm_kzalloc(&op->dev,
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
new file mode 100644
index 0000000..761f326
--- /dev/null
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -0,0 +1,183 @@
+/*
+ * DMA Router driver for LPC18xx/43xx DMA MUX
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on TI DMA Crossbar driver by:
+ *   Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *   Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+/* CREG register offset and macros for mux manipulation */
+#define LPC18XX_CREG_DMAMUX		0x11c
+#define LPC18XX_DMAMUX_VAL(v, n)	((v) << (n * 2))
+#define LPC18XX_DMAMUX_MASK(n)		(0x3 << (n * 2))
+#define LPC18XX_DMAMUX_MAX_VAL		0x3
+
+struct lpc18xx_dmamux {
+	u32 value;
+	bool busy;
+};
+
+struct lpc18xx_dmamux_data {
+	struct dma_router dmarouter;
+	struct lpc18xx_dmamux *muxes;
+	u32 dma_master_requests;
+	u32 dma_mux_requests;
+	struct regmap *reg;
+	spinlock_t lock;
+};
+
+static void lpc18xx_dmamux_free(struct device *dev, void *route_data)
+{
+	struct lpc18xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+	struct lpc18xx_dmamux *mux = route_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dmamux->lock, flags);
+	mux->busy = false;
+	spin_unlock_irqrestore(&dmamux->lock, flags);
+}
+
+static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+				    struct of_dma *ofdma)
+{
+	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+	struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+	unsigned long flags;
+	unsigned mux;
+
+	if (dma_spec->args_count != 3) {
+		dev_err(&pdev->dev, "invalid number of dma mux args\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mux = dma_spec->args[0];
+	if (mux >= dmamux->dma_master_requests) {
+		dev_err(&pdev->dev, "invalid mux number: %d\n",
+			dma_spec->args[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
+		dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+			dma_spec->args[1]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* The of_node_put() will be done in the core for the node */
+	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+	if (!dma_spec->np) {
+		dev_err(&pdev->dev, "can't get dma master\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_irqsave(&dmamux->lock, flags);
+	if (dmamux->muxes[mux].busy) {
+		spin_unlock_irqrestore(&dmamux->lock, flags);
+		dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
+			mux, mux, dmamux->muxes[mux].value);
+		of_node_put(dma_spec->np);
+		return ERR_PTR(-EBUSY);
+	}
+
+	dmamux->muxes[mux].busy = true;
+	dmamux->muxes[mux].value = dma_spec->args[1];
+
+	regmap_update_bits(dmamux->reg, LPC18XX_CREG_DMAMUX,
+			   LPC18XX_DMAMUX_MASK(mux),
+			   LPC18XX_DMAMUX_VAL(dmamux->muxes[mux].value, mux));
+	spin_unlock_irqrestore(&dmamux->lock, flags);
+
+	dma_spec->args[1] = dma_spec->args[2];
+	dma_spec->args_count = 2;
+
+	dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
+		dmamux->muxes[mux].value, mux);
+
+	return &dmamux->muxes[mux];
+}
+
+static int lpc18xx_dmamux_probe(struct platform_device *pdev)
+{
+	struct device_node *dma_np, *np = pdev->dev.of_node;
+	struct lpc18xx_dmamux_data *dmamux;
+	int ret;
+
+	dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+	if (!dmamux)
+		return -ENOMEM;
+
+	dmamux->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+	if (IS_ERR(dmamux->reg)) {
+		dev_err(&pdev->dev, "syscon lookup failed\n");
+		return PTR_ERR(dmamux->reg);
+	}
+
+	ret = of_property_read_u32(np, "dma-requests",
+				   &dmamux->dma_mux_requests);
+	if (ret) {
+		dev_err(&pdev->dev, "missing dma-requests property\n");
+		return ret;
+	}
+
+	dma_np = of_parse_phandle(np, "dma-masters", 0);
+	if (!dma_np) {
+		dev_err(&pdev->dev, "can't get dma master\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(dma_np, "dma-requests",
+				   &dmamux->dma_master_requests);
+	of_node_put(dma_np);
+	if (ret) {
+		dev_err(&pdev->dev, "missing master dma-requests property\n");
+		return ret;
+	}
+
+	dmamux->muxes = devm_kcalloc(&pdev->dev, dmamux->dma_master_requests,
+				     sizeof(struct lpc18xx_dmamux),
+				     GFP_KERNEL);
+	if (!dmamux->muxes)
+		return -ENOMEM;
+
+	spin_lock_init(&dmamux->lock);
+	platform_set_drvdata(pdev, dmamux);
+	dmamux->dmarouter.dev = &pdev->dev;
+	dmamux->dmarouter.route_free = lpc18xx_dmamux_free;
+
+	return of_dma_router_register(np, lpc18xx_dmamux_reserve,
+				      &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc18xx_dmamux_match[] = {
+	{ .compatible = "nxp,lpc1850-dmamux" },
+	{},
+};
+
+static struct platform_driver lpc18xx_dmamux_driver = {
+	.probe	= lpc18xx_dmamux_probe,
+	.driver = {
+		.name = "lpc18xx-dmamux",
+		.of_match_table = lpc18xx_dmamux_match,
+	},
+};
+
+static int __init lpc18xx_dmamux_init(void)
+{
+	return platform_driver_register(&lpc18xx_dmamux_driver);
+}
+arch_initcall(lpc18xx_dmamux_init);
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
index f663b0b..d899820 100644
--- a/drivers/dma/mic_x100_dma.h
+++ b/drivers/dma/mic_x100_dma.h
@@ -39,7 +39,7 @@
  */
 #define MIC_DMA_MAX_NUM_CHAN	8
 #define MIC_DMA_NUM_CHAN	4
-#define MIC_DMA_ALIGN_SHIFT	6
+#define MIC_DMA_ALIGN_SHIFT	DMAENGINE_ALIGN_64_BYTES
 #define MIC_DMA_ALIGN_BYTES	(1 << MIC_DMA_ALIGN_SHIFT)
 #define MIC_DMA_DESC_RX_SIZE	(128 * 1024 - 4)
 
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 462a022..e39457f 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -72,7 +72,6 @@
 #define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
 #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
 
-#define PDMA_ALIGNMENT		3
 #define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
@@ -1071,7 +1070,7 @@
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 	pdev->device.device_config = mmp_pdma_config;
 	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
-	pdev->device.copy_align = PDMA_ALIGNMENT;
+	pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
 	pdev->device.src_addr_widths = widths;
 	pdev->device.dst_addr_widths = widths;
 	pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e683761..3df0422 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -100,7 +100,6 @@
 	PXA910_SQU,
 };
 
-#define TDMA_ALIGNMENT		3
 #define TDMA_MAX_XFER_BYTES    SZ_64K
 
 struct mmp_tdma_chan {
@@ -695,7 +694,7 @@
 	tdev->device.device_pause = mmp_tdma_pause_chan;
 	tdev->device.device_resume = mmp_tdma_resume_chan;
 	tdev->device.device_terminate_all = mmp_tdma_terminate_all;
-	tdev->device.copy_align = TDMA_ALIGNMENT;
+	tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
 
 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	platform_set_drvdata(pdev, tdev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f1325f6..1c2de9a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -26,6 +25,7 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/cpumask.h>
 #include <linux/platform_data/dma-mv_xor.h>
 
 #include "dmaengine.h"
@@ -1126,7 +1126,8 @@
 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
 	{},
 };
-MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+
+static unsigned int mv_xor_engine_count;
 
 static int mv_xor_probe(struct platform_device *pdev)
 {
@@ -1134,6 +1135,7 @@
 	struct mv_xor_device *xordev;
 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct resource *res;
+	unsigned int max_engines, max_channels;
 	int i, ret;
 	int op_in_desc;
 
@@ -1177,6 +1179,21 @@
 	if (!IS_ERR(xordev->clk))
 		clk_prepare_enable(xordev->clk);
 
+	/*
+	 * We don't want to have more than one channel per CPU in
+	 * order for async_tx to perform well. So we limit the number
+	 * of engines and channels so that we take into account this
+	 * constraint. Note that we also want to use channels from
+	 * separate engines when possible.
+	 */
+	max_engines = num_present_cpus();
+	max_channels = min_t(unsigned int,
+			     MV_XOR_MAX_CHANNELS,
+			     DIV_ROUND_UP(num_present_cpus(), 2));
+
+	if (mv_xor_engine_count >= max_engines)
+		return 0;
+
 	if (pdev->dev.of_node) {
 		struct device_node *np;
 		int i = 0;
@@ -1190,13 +1207,13 @@
 			int irq;
 			op_in_desc = (int)of_id->data;
 
+			if (i >= max_channels)
+				continue;
+
 			dma_cap_zero(cap_mask);
-			if (of_property_read_bool(np, "dmacap,memcpy"))
-				dma_cap_set(DMA_MEMCPY, cap_mask);
-			if (of_property_read_bool(np, "dmacap,xor"))
-				dma_cap_set(DMA_XOR, cap_mask);
-			if (of_property_read_bool(np, "dmacap,interrupt"))
-				dma_cap_set(DMA_INTERRUPT, cap_mask);
+			dma_cap_set(DMA_MEMCPY, cap_mask);
+			dma_cap_set(DMA_XOR, cap_mask);
+			dma_cap_set(DMA_INTERRUPT, cap_mask);
 
 			irq = irq_of_parse_and_map(np, 0);
 			if (!irq) {
@@ -1216,7 +1233,7 @@
 			i++;
 		}
 	} else if (pdata && pdata->channels) {
-		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+		for (i = 0; i < max_channels; i++) {
 			struct mv_xor_channel_data *cd;
 			struct mv_xor_chan *chan;
 			int irq;
@@ -1263,27 +1280,8 @@
 	return ret;
 }
 
-static int mv_xor_remove(struct platform_device *pdev)
-{
-	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
-	int i;
-
-	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
-		if (xordev->channels[i])
-			mv_xor_channel_remove(xordev->channels[i]);
-	}
-
-	if (!IS_ERR(xordev->clk)) {
-		clk_disable_unprepare(xordev->clk);
-		clk_put(xordev->clk);
-	}
-
-	return 0;
-}
-
 static struct platform_driver mv_xor_driver = {
 	.probe		= mv_xor_probe,
-	.remove		= mv_xor_remove,
 	.driver		= {
 		.name	        = MV_XOR_NAME,
 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
@@ -1295,19 +1293,10 @@
 {
 	return platform_driver_register(&mv_xor_driver);
 }
-module_init(mv_xor_init);
+device_initcall(mv_xor_init);
 
-/* it's currently unsafe to unload this module */
-#if 0
-static void __exit mv_xor_exit(void)
-{
-	platform_driver_unregister(&mv_xor_driver);
-	return;
-}
-
-module_exit(mv_xor_exit);
-#endif
-
+/*
 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
 MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index b859792d..113605f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -11,10 +11,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/dmaengine.h>
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index ecab4ea0..17ee758 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1198,6 +1198,9 @@
 	unsigned lcnt0, lcnt1, ljmp0, ljmp1;
 	struct _arg_LPEND lpend;
 
+	if (*bursts == 1)
+		return _bursts(dry_run, buf, pxs, 1);
+
 	/* Max iterations possible in DMALP is 256 */
 	if (*bursts >= 256*256) {
 		lcnt1 = 256;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index ddcbbf5..5cb61ce 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -184,19 +184,18 @@
 
 static int dbg_show_requester_chan(struct seq_file *s, void *p)
 {
-	int pos = 0;
 	struct pxad_phy *phy = s->private;
 	int i;
 	u32 drcmr;
 
-	pos += seq_printf(s, "DMA channel %d requester :\n", phy->idx);
+	seq_printf(s, "DMA channel %d requester :\n", phy->idx);
 	for (i = 0; i < 70; i++) {
 		drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
 		if ((drcmr & DRCMR_CHLNUM) == phy->idx)
-			pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
-					  !!(drcmr & DRCMR_MAPVLD));
+			seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
+				   !!(drcmr & DRCMR_MAPVLD));
 	}
-	return pos;
+	return 0;
 }
 
 static inline int dbg_burst_from_dcmd(u32 dcmd)
@@ -906,21 +905,21 @@
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
 	*dcmd = 0;
-	if (chan->cfg.direction == DMA_DEV_TO_MEM) {
+	if (dir == DMA_DEV_TO_MEM) {
 		maxburst = chan->cfg.src_maxburst;
 		width = chan->cfg.src_addr_width;
 		dev_addr = chan->cfg.src_addr;
 		*dev_src = dev_addr;
 		*dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
 	}
-	if (chan->cfg.direction == DMA_MEM_TO_DEV) {
+	if (dir == DMA_MEM_TO_DEV) {
 		maxburst = chan->cfg.dst_maxburst;
 		width = chan->cfg.dst_addr_width;
 		dev_addr = chan->cfg.dst_addr;
 		*dev_dst = dev_addr;
 		*dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
 	}
-	if (chan->cfg.direction == DMA_MEM_TO_MEM)
+	if (dir == DMA_MEM_TO_MEM)
 		*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
 			PXA_DCMD_INCSRCADDR;
 
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0f37152..9fda65a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -39,18 +39,6 @@
 
 endif
 
-config SUDMAC
-	tristate "Renesas SUDMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas SUDMAC controllers.
-
-config RCAR_HPB_DMAE
-	tristate "Renesas R-Car HPB DMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas R-Car series DMA controllers.
-
 config RCAR_DMAC
 	tristate "Renesas R-Car Gen2 DMA Controller"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -59,6 +47,12 @@
 	  This driver supports the general purpose DMA controller found in the
 	  Renesas R-Car second generation SoCs.
 
+config RCAR_HPB_DMAE
+	tristate "Renesas R-Car HPB DMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas R-Car series DMA controllers.
+
 config RENESAS_USB_DMAC
 	tristate "Renesas USB-DMA Controller"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -67,3 +61,9 @@
 	help
 	  This driver supports the USB-DMA controller found in the Renesas
 	  SoCs.
+
+config SUDMAC
+	tristate "Renesas SUDMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index b8a59806..0133e46 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -13,7 +13,7 @@
 shdma-objs := $(shdma-y)
 obj-$(CONFIG_SH_DMAE) += shdma.o
 
-obj-$(CONFIG_SUDMAC) += sudmac.o
-obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8c5186c..7d5598d 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -455,6 +455,7 @@
 	switch (sdma->type) {
 	case SIRFSOC_DMA_VER_A7V1:
 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
 		writel_relaxed((1 << cid) | 1 << (cid + 16),
 			       sdma->base +
 			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
@@ -462,6 +463,8 @@
 		break;
 	case SIRFSOC_DMA_VER_A7V2:
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
+		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
+			       sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
 		break;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 3c10f03..750d1b3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2853,7 +2853,7 @@
 		 * This controller can only access address at even
 		 * 32bit boundaries, i.e. 2^2
 		 */
-		dev->copy_align = 2;
+		dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
 	}
 
 	if (dma_has_cap(DMA_SG, dev->cap_mask))
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
new file mode 100644
index 0000000..a1a500d
--- /dev/null
+++ b/drivers/dma/sun4i-dma.c
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (C) 2014 Emilio López
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+/** Common macros to normal and dedicated DMA registers **/
+
+#define SUN4I_DMA_CFG_LOADING			BIT(31)
+#define SUN4I_DMA_CFG_DST_DATA_WIDTH(width)	((width) << 25)
+#define SUN4I_DMA_CFG_DST_BURST_LENGTH(len)	((len) << 23)
+#define SUN4I_DMA_CFG_DST_ADDR_MODE(mode)	((mode) << 21)
+#define SUN4I_DMA_CFG_DST_DRQ_TYPE(type)	((type) << 16)
+#define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width)	((width) << 9)
+#define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len)	((len) << 7)
+#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode)	((mode) << 5)
+#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type)	(type)
+
+/** Normal DMA register values **/
+
+/* Normal DMA source/destination data request type values */
+#define SUN4I_NDMA_DRQ_TYPE_SDRAM		0x16
+#define SUN4I_NDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
+
+/** Normal DMA register layout **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_NDMA_ADDR_MODE_LINEAR		0
+#define SUN4I_NDMA_ADDR_MODE_IO			1
+
+/* Normal DMA configuration register layout */
+#define SUN4I_NDMA_CFG_CONT_MODE		BIT(30)
+#define SUN4I_NDMA_CFG_WAIT_STATE(n)		((n) << 27)
+#define SUN4I_NDMA_CFG_DST_NON_SECURE		BIT(22)
+#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
+#define SUN4I_NDMA_CFG_SRC_NON_SECURE		BIT(6)
+
+/** Dedicated DMA register values **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_DDMA_ADDR_MODE_LINEAR		0
+#define SUN4I_DDMA_ADDR_MODE_IO			1
+#define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE	2
+#define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE	3
+
+/* Dedicated DMA source/destination data request type values */
+#define SUN4I_DDMA_DRQ_TYPE_SDRAM		0x1
+#define SUN4I_DDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
+
+/** Dedicated DMA register layout **/
+
+/* Dedicated DMA configuration register layout */
+#define SUN4I_DDMA_CFG_BUSY			BIT(30)
+#define SUN4I_DDMA_CFG_CONT_MODE		BIT(29)
+#define SUN4I_DDMA_CFG_DST_NON_SECURE		BIT(28)
+#define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
+#define SUN4I_DDMA_CFG_SRC_NON_SECURE		BIT(12)
+
+/* Dedicated DMA parameter register layout */
+#define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n)	(((n) - 1) << 24)
+#define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n)	(((n) - 1) << 16)
+#define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n)	(((n) - 1) << 8)
+#define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n)	(((n) - 1) << 0)
+
+/** DMA register offsets **/
+
+/* General register offsets */
+#define SUN4I_DMA_IRQ_ENABLE_REG		0x0
+#define SUN4I_DMA_IRQ_PENDING_STATUS_REG	0x4
+
+/* Normal DMA register offsets */
+#define SUN4I_NDMA_CHANNEL_REG_BASE(n)		(0x100 + (n) * 0x20)
+#define SUN4I_NDMA_CFG_REG			0x0
+#define SUN4I_NDMA_SRC_ADDR_REG			0x4
+#define SUN4I_NDMA_DST_ADDR_REG		0x8
+#define SUN4I_NDMA_BYTE_COUNT_REG		0xC
+
+/* Dedicated DMA register offsets */
+#define SUN4I_DDMA_CHANNEL_REG_BASE(n)		(0x300 + (n) * 0x20)
+#define SUN4I_DDMA_CFG_REG			0x0
+#define SUN4I_DDMA_SRC_ADDR_REG			0x4
+#define SUN4I_DDMA_DST_ADDR_REG		0x8
+#define SUN4I_DDMA_BYTE_COUNT_REG		0xC
+#define SUN4I_DDMA_PARA_REG			0x18
+
+/** DMA Driver **/
+
+/*
+ * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
+ * that's 16 channels. As for endpoints, there's 29 and 21
+ * respectively. Given that the Normal DMA endpoints (other than
+ * SDRAM) can be used as tx/rx, we need 78 vchans in total
+ */
+#define SUN4I_NDMA_NR_MAX_CHANNELS	8
+#define SUN4I_DDMA_NR_MAX_CHANNELS	8
+#define SUN4I_DMA_NR_MAX_CHANNELS					\
+	(SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
+#define SUN4I_NDMA_NR_MAX_VCHANS	(29 * 2 - 1)
+#define SUN4I_DDMA_NR_MAX_VCHANS	21
+#define SUN4I_DMA_NR_MAX_VCHANS						\
+	(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+
+/* This set of SUN4I_DDMA timing parameters were found experimentally while
+ * working with the SPI driver and seem to make it behave correctly */
+#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
+	(SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) |			\
+	 SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) |				\
+	 SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) |				\
+	 SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+
+struct sun4i_dma_pchan {
+	/* Register base of channel */
+	void __iomem			*base;
+	/* vchan currently being serviced */
+	struct sun4i_dma_vchan		*vchan;
+	/* Is this a dedicated pchan? */
+	int				is_dedicated;
+};
+
+struct sun4i_dma_vchan {
+	struct virt_dma_chan		vc;
+	struct dma_slave_config		cfg;
+	struct sun4i_dma_pchan		*pchan;
+	struct sun4i_dma_promise	*processing;
+	struct sun4i_dma_contract	*contract;
+	u8				endpoint;
+	int				is_dedicated;
+};
+
+struct sun4i_dma_promise {
+	u32				cfg;
+	u32				para;
+	dma_addr_t			src;
+	dma_addr_t			dst;
+	size_t				len;
+	struct list_head		list;
+};
+
+/* A contract is a set of promises */
+struct sun4i_dma_contract {
+	struct virt_dma_desc		vd;
+	struct list_head		demands;
+	struct list_head		completed_demands;
+	int				is_cyclic;
+};
+
+struct sun4i_dma_dev {
+	DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+	struct dma_device		slave;
+	struct sun4i_dma_pchan		*pchans;
+	struct sun4i_dma_vchan		*vchans;
+	void __iomem			*base;
+	struct clk			*clk;
+	int				irq;
+	spinlock_t			lock;
+};
+
+static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
+{
+	return container_of(dev, struct sun4i_dma_dev, slave);
+}
+
+static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
+{
+	return container_of(chan, struct sun4i_dma_vchan, vc.chan);
+}
+
+static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct sun4i_dma_contract, vd);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static int convert_burst(u32 maxburst)
+{
+	if (maxburst > 8)
+		return -EINVAL;
+
+	/* 1 -> 0, 4 -> 1, 8 -> 2 */
+	return (maxburst >> 2);
+}
+
+static int convert_buswidth(enum dma_slave_buswidth addr_width)
+{
+	if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
+		return -EINVAL;
+
+	/* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
+	return (addr_width >> 1);
+}
+
+static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+	vchan_free_chan_resources(&vchan->vc);
+}
+
+static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
+						  struct sun4i_dma_vchan *vchan)
+{
+	struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
+	unsigned long flags;
+	int i, max;
+
+	/*
+	 * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
+	 * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+	 */
+	if (vchan->is_dedicated) {
+		i = SUN4I_NDMA_NR_MAX_CHANNELS;
+		max = SUN4I_DMA_NR_MAX_CHANNELS;
+	} else {
+		i = 0;
+		max = SUN4I_NDMA_NR_MAX_CHANNELS;
+	}
+
+	spin_lock_irqsave(&priv->lock, flags);
+	for_each_clear_bit_from(i, &priv->pchans_used, max) {
+		pchan = &pchans[i];
+		pchan->vchan = vchan;
+		set_bit(i, priv->pchans_used);
+		break;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return pchan;
+}
+
+static void release_pchan(struct sun4i_dma_dev *priv,
+			  struct sun4i_dma_pchan *pchan)
+{
+	unsigned long flags;
+	int nr = pchan - priv->pchans;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	pchan->vchan = NULL;
+	clear_bit(nr, priv->pchans_used);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void configure_pchan(struct sun4i_dma_pchan *pchan,
+			    struct sun4i_dma_promise *d)
+{
+	/*
+	 * Configure addresses and misc parameters depending on type
+	 * SUN4I_DDMA has an extra field with timing parameters
+	 */
+	if (pchan->is_dedicated) {
+		writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
+		writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
+		writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+		writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
+		writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
+	} else {
+		writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
+		writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
+		writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+		writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
+	}
+}
+
+static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
+				struct sun4i_dma_pchan *pchan,
+				int half, int end)
+{
+	u32 reg;
+	int pchan_number = pchan - priv->pchans;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+	if (half)
+		reg |= BIT(pchan_number * 2);
+	else
+		reg &= ~BIT(pchan_number * 2);
+
+	if (end)
+		reg |= BIT(pchan_number * 2 + 1);
+	else
+		reg &= ~BIT(pchan_number * 2 + 1);
+
+	writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/**
+ * Execute pending operations on a vchan
+ *
+ * When given a vchan, this function will try to acquire a suitable
+ * pchan and, if successful, will configure it to fulfill a promise
+ * from the next pending contract.
+ *
+ * This function must be called with &vchan->vc.lock held.
+ */
+static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
+				   struct sun4i_dma_vchan *vchan)
+{
+	struct sun4i_dma_promise *promise = NULL;
+	struct sun4i_dma_contract *contract = NULL;
+	struct sun4i_dma_pchan *pchan;
+	struct virt_dma_desc *vd;
+	int ret;
+
+	lockdep_assert_held(&vchan->vc.lock);
+
+	/* We need a pchan to do anything, so secure one if available */
+	pchan = find_and_use_pchan(priv, vchan);
+	if (!pchan)
+		return -EBUSY;
+
+	/*
+	 * Channel endpoints must not be repeated, so if this vchan
+	 * has already submitted some work, we can't do anything else
+	 */
+	if (vchan->processing) {
+		dev_dbg(chan2dev(&vchan->vc.chan),
+			"processing something to this endpoint already\n");
+		ret = -EBUSY;
+		goto release_pchan;
+	}
+
+	do {
+		/* Figure out which contract we're working with today */
+		vd = vchan_next_desc(&vchan->vc);
+		if (!vd) {
+			dev_dbg(chan2dev(&vchan->vc.chan),
+				"No pending contract found");
+			ret = 0;
+			goto release_pchan;
+		}
+
+		contract = to_sun4i_dma_contract(vd);
+		if (list_empty(&contract->demands)) {
+			/* The contract has been completed so mark it as such */
+			list_del(&contract->vd.node);
+			vchan_cookie_complete(&contract->vd);
+			dev_dbg(chan2dev(&vchan->vc.chan),
+				"Empty contract found and marked complete");
+		}
+	} while (list_empty(&contract->demands));
+
+	/* Now find out what we need to do */
+	promise = list_first_entry(&contract->demands,
+				   struct sun4i_dma_promise, list);
+	vchan->processing = promise;
+
+	/* ... and make it reality */
+	if (promise) {
+		vchan->contract = contract;
+		vchan->pchan = pchan;
+		set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+		configure_pchan(pchan, promise);
+	}
+
+	return 0;
+
+release_pchan:
+	release_pchan(priv, pchan);
+	return ret;
+}
+
+static int sanitize_config(struct dma_slave_config *sconfig,
+			   enum dma_transfer_direction direction)
+{
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+		    !sconfig->dst_maxburst)
+			return -EINVAL;
+
+		if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+			sconfig->src_addr_width = sconfig->dst_addr_width;
+
+		if (!sconfig->src_maxburst)
+			sconfig->src_maxburst = sconfig->dst_maxburst;
+
+		break;
+
+	case DMA_DEV_TO_MEM:
+		if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+		    !sconfig->src_maxburst)
+			return -EINVAL;
+
+		if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+			sconfig->dst_addr_width = sconfig->src_addr_width;
+
+		if (!sconfig->dst_maxburst)
+			sconfig->dst_maxburst = sconfig->src_maxburst;
+
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+/**
+ * Generate a promise, to be used in a normal DMA contract.
+ *
+ * A NDMA promise contains all the information required to program the
+ * normal part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+		      size_t len, struct dma_slave_config *sconfig,
+		      enum dma_transfer_direction direction)
+{
+	struct sun4i_dma_promise *promise;
+	int ret;
+
+	ret = sanitize_config(sconfig, direction);
+	if (ret)
+		return NULL;
+
+	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+	if (!promise)
+		return NULL;
+
+	promise->src = src;
+	promise->dst = dest;
+	promise->len = len;
+	promise->cfg = SUN4I_DMA_CFG_LOADING |
+		SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+	dev_dbg(chan2dev(chan),
+		"src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
+		sconfig->src_maxburst, sconfig->dst_maxburst,
+		sconfig->src_addr_width, sconfig->dst_addr_width);
+
+	/* Source burst */
+	ret = convert_burst(sconfig->src_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+	/* Destination burst */
+	ret = convert_burst(sconfig->dst_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+	/* Source bus width */
+	ret = convert_buswidth(sconfig->src_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+	/* Destination bus width */
+	ret = convert_buswidth(sconfig->dst_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+	return promise;
+
+fail:
+	kfree(promise);
+	return NULL;
+}
+
+/**
+ * Generate a promise, to be used in a dedicated DMA contract.
+ *
+ * A DDMA promise contains all the information required to program the
+ * Dedicated part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+		      size_t len, struct dma_slave_config *sconfig)
+{
+	struct sun4i_dma_promise *promise;
+	int ret;
+
+	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+	if (!promise)
+		return NULL;
+
+	promise->src = src;
+	promise->dst = dest;
+	promise->len = len;
+	promise->cfg = SUN4I_DMA_CFG_LOADING |
+		SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+	/* Source burst */
+	ret = convert_burst(sconfig->src_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+	/* Destination burst */
+	ret = convert_burst(sconfig->dst_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+	/* Source bus width */
+	ret = convert_buswidth(sconfig->src_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+	/* Destination bus width */
+	ret = convert_buswidth(sconfig->dst_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+	return promise;
+
+fail:
+	kfree(promise);
+	return NULL;
+}
+
+/**
+ * Generate a contract
+ *
+ * Contracts function as DMA descriptors. As our hardware does not support
+ * linked lists, we need to implement SG via software. We use a contract
+ * to hold all the pieces of the request and process them serially one
+ * after another. Each piece is represented as a promise.
+ */
+static struct sun4i_dma_contract *generate_dma_contract(void)
+{
+	struct sun4i_dma_contract *contract;
+
+	contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
+	if (!contract)
+		return NULL;
+
+	INIT_LIST_HEAD(&contract->demands);
+	INIT_LIST_HEAD(&contract->completed_demands);
+
+	return contract;
+}
+
+/**
+ * Get next promise on a cyclic transfer
+ *
+ * Cyclic contracts contain a series of promises which are executed on a
+ * loop. This function returns the next promise from a cyclic contract,
+ * so it can be programmed into the hardware.
+ */
+static struct sun4i_dma_promise *
+get_next_cyclic_promise(struct sun4i_dma_contract *contract)
+{
+	struct sun4i_dma_promise *promise;
+
+	promise = list_first_entry_or_null(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	if (!promise) {
+		list_splice_init(&contract->completed_demands,
+				 &contract->demands);
+		promise = list_first_entry(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	}
+
+	return promise;
+}
+
+/**
+ * Free a contract and all its associated promises
+ */
+static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
+{
+	struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
+	struct sun4i_dma_promise *promise;
+
+	/* Free all the demands and completed demands */
+	list_for_each_entry(promise, &contract->demands, list)
+		kfree(promise);
+
+	list_for_each_entry(promise, &contract->completed_demands, list)
+		kfree(promise);
+
+	kfree(contract);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+			  dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	/*
+	 * We can only do the copy to bus aligned addresses, so
+	 * choose the best one so we get decent performance. We also
+	 * maximize the burst size for this same reason.
+	 */
+	sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconfig->src_maxburst = 8;
+	sconfig->dst_maxburst = 8;
+
+	if (vchan->is_dedicated)
+		promise = generate_ddma_promise(chan, src, dest, len, sconfig);
+	else
+		promise = generate_ndma_promise(chan, src, dest, len, sconfig,
+						DMA_MEM_TO_MEM);
+
+	if (!promise) {
+		kfree(contract);
+		return NULL;
+	}
+
+	/* Configure memcpy mode */
+	if (vchan->is_dedicated) {
+		promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
+				SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+	} else {
+		promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+				SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+	}
+
+	/* Fill the contract with our only promise */
+	list_add_tail(&promise->list, &contract->demands);
+
+	/* And add it to the vchan */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
+			  size_t period_len, enum dma_transfer_direction dir,
+			  unsigned long flags)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+	dma_addr_t src, dest;
+	u32 endpoints;
+	int nr_periods, offset, plength, i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+		return NULL;
+	}
+
+	if (vchan->is_dedicated) {
+		/*
+		 * As we are using this just for audio data, we need to use
+		 * normal DMA. There is nothing stopping us from supporting
+		 * dedicated DMA here as well, so if a client comes up and
+		 * requires it, it will be simple to implement it.
+		 */
+		dev_err(chan2dev(chan),
+			"Cyclic transfers are only supported on Normal DMA\n");
+		return NULL;
+	}
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	contract->is_cyclic = 1;
+
+	/* Figure out the endpoints and the address we need */
+	if (dir == DMA_MEM_TO_DEV) {
+		src = buf;
+		dest = sconfig->dst_addr;
+		endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+			    SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+	} else {
+		src = sconfig->src_addr;
+		dest = buf;
+		endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
+			    SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+	}
+
+	/*
+	 * We will be using half done interrupts to make two periods
+	 * out of a promise, so we need to program the DMA engine less
+	 * often
+	 */
+
+	/*
+	 * The engine can interrupt on half-transfer, so we can use
+	 * this feature to program the engine half as often as if we
+	 * didn't use it (keep in mind the hardware doesn't support
+	 * linked lists).
+	 *
+	 * Say you have a set of periods (| marks the start/end, I for
+	 * interrupt, P for programming the engine to do a new
+	 * transfer), the easy but slow way would be to do
+	 *
+	 *  |---|---|---|---| (periods / promises)
+	 *  P  I,P I,P I,P  I
+	 *
+	 * Using half transfer interrupts you can do
+	 *
+	 *  |-------|-------| (promises as configured on hw)
+	 *  |---|---|---|---| (periods)
+	 *  P   I  I,P  I   I
+	 *
+	 * Which requires half the engine programming for the same
+	 * functionality.
+	 */
+	nr_periods = DIV_ROUND_UP(len / period_len, 2);
+	for (i = 0; i < nr_periods; i++) {
+		/* Calculate the offset in the buffer and the length needed */
+		offset = i * period_len * 2;
+		plength = min((len - offset), (period_len * 2));
+		if (dir == DMA_MEM_TO_DEV)
+			src = buf + offset;
+		else
+			dest = buf + offset;
+
+		/* Make the promise */
+		promise = generate_ndma_promise(chan, src, dest,
+						plength, sconfig, dir);
+		if (!promise) {
+			/* TODO: should we free everything? */
+			return NULL;
+		}
+		promise->cfg |= endpoints;
+
+		/* Then add it to the contract */
+		list_add_tail(&promise->list, &contract->demands);
+	}
+
+	/* And add it to the vchan */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			unsigned int sg_len, enum dma_transfer_direction dir,
+			unsigned long flags, void *context)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+	u8 ram_type, io_mode, linear_mode;
+	struct scatterlist *sg;
+	dma_addr_t srcaddr, dstaddr;
+	u32 endpoints, para;
+	int i;
+
+	if (!sgl)
+		return NULL;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+		return NULL;
+	}
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	if (vchan->is_dedicated) {
+		io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+		linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+		ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+	} else {
+		io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+		linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+		ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+	}
+
+	if (dir == DMA_MEM_TO_DEV)
+		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
+	else
+		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
+			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		/* Figure out addresses */
+		if (dir == DMA_MEM_TO_DEV) {
+			srcaddr = sg_dma_address(sg);
+			dstaddr = sconfig->dst_addr;
+		} else {
+			srcaddr = sconfig->src_addr;
+			dstaddr = sg_dma_address(sg);
+		}
+
+		/*
+		 * These are the magic DMA engine timings that keep SPI going.
+		 * I haven't seen any interface on DMAEngine to configure
+		 * timings, and so far they seem to work for everything we
+		 * support, so I've kept them here. I don't know if other
+		 * devices need different timings because, as usual, we only
+		 * have the "para" bitfield meanings, but no comment on what
+		 * the values should be when doing a certain operation :|
+		 */
+		para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
+
+		/* And make a suitable promise */
+		if (vchan->is_dedicated)
+			promise = generate_ddma_promise(chan, srcaddr, dstaddr,
+							sg_dma_len(sg),
+							sconfig);
+		else
+			promise = generate_ndma_promise(chan, srcaddr, dstaddr,
+							sg_dma_len(sg),
+							sconfig, dir);
+
+		if (!promise)
+			return NULL; /* TODO: should we free everything? */
+
+		promise->cfg |= endpoints;
+		promise->para = para;
+
+		/* Then add it to the contract */
+		list_add_tail(&promise->list, &contract->demands);
+	}
+
+	/*
+	 * Once we've got all the promises ready, add the contract
+	 * to the pending list on the vchan
+	 */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static int sun4i_dma_terminate_all(struct dma_chan *chan)
+{
+	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct sun4i_dma_pchan *pchan = vchan->pchan;
+	LIST_HEAD(head);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vchan_get_all_descriptors(&vchan->vc, &head);
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	/*
+	 * Clearing the configuration register will halt the pchan. Interrupts
+	 * may still trigger, so don't forget to disable them.
+	 */
+	if (pchan) {
+		if (pchan->is_dedicated)
+			writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
+		else
+			writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
+		set_pchan_interrupt(priv, pchan, 0, 0);
+		release_pchan(priv, pchan);
+	}
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vchan_dma_desc_free_list(&vchan->vc, &head);
+	/* Clear these so the vchan is usable again */
+	vchan->processing = NULL;
+	vchan->pchan = NULL;
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	return 0;
+}
+
+static int sun4i_dma_config(struct dma_chan *chan,
+			    struct dma_slave_config *config)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+	memcpy(&vchan->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
+static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct sun4i_dma_dev *priv = ofdma->of_dma_data;
+	struct sun4i_dma_vchan *vchan;
+	struct dma_chan *chan;
+	u8 is_dedicated = dma_spec->args[0];
+	u8 endpoint = dma_spec->args[1];
+
+	/* Check if type is Normal or Dedicated */
+	if (is_dedicated != 0 && is_dedicated != 1)
+		return NULL;
+
+	/* Make sure the endpoint looks sane */
+	if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
+	    (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
+		return NULL;
+
+	chan = dma_get_any_slave_channel(&priv->slave);
+	if (!chan)
+		return NULL;
+
+	/* Assign the endpoint to the vchan */
+	vchan = to_sun4i_dma_vchan(chan);
+	vchan->is_dedicated = is_dedicated;
+	vchan->endpoint = endpoint;
+
+	return chan;
+}
+
+static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
+					   dma_cookie_t cookie,
+					   struct dma_tx_state *state)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct sun4i_dma_pchan *pchan = vchan->pchan;
+	struct sun4i_dma_contract *contract;
+	struct sun4i_dma_promise *promise;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(chan, cookie, state);
+	if (!state || (ret == DMA_COMPLETE))
+		return ret;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vd = vchan_find_desc(&vchan->vc, cookie);
+	if (!vd)
+		goto exit;
+	contract = to_sun4i_dma_contract(vd);
+
+	list_for_each_entry(promise, &contract->demands, list)
+		bytes += promise->len;
+
+	/*
+	 * The hardware is configured to return the remaining byte
+	 * quantity. If possible, replace the first listed element's
+	 * full size with the actual remaining amount
+	 */
+	promise = list_first_entry_or_null(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	if (promise && pchan) {
+		bytes -= promise->len;
+		if (pchan->is_dedicated)
+			bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+		else
+			bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+	}
+
+exit:
+
+	dma_set_residue(state, bytes);
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	return ret;
+}
+
+static void sun4i_dma_issue_pending(struct dma_chan *chan)
+{
+	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+
+	/*
+	 * If there are pending transactions for this vchan, push one of
+	 * them into the engine to get the ball rolling.
+	 */
+	if (vchan_issue_pending(&vchan->vc))
+		__execute_vchan_pending(priv, vchan);
+
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
+{
+	struct sun4i_dma_dev *priv = dev_id;
+	struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
+	struct sun4i_dma_vchan *vchan;
+	struct sun4i_dma_contract *contract;
+	struct sun4i_dma_promise *promise;
+	unsigned long pendirq, irqs, disableirqs;
+	int bit, i, free_room, allow_mitigation = 1;
+
+	pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+handle_pending:
+
+	disableirqs = 0;
+	free_room = 0;
+
+	for_each_set_bit(bit, &pendirq, 32) {
+		pchan = &pchans[bit >> 1];
+		vchan = pchan->vchan;
+		if (!vchan) /* a terminated channel may still interrupt */
+			continue;
+		contract = vchan->contract;
+
+		/*
+		 * Disable the IRQ and free the pchan if it's an end
+		 * interrupt (odd bit)
+		 */
+		if (bit & 1) {
+			spin_lock(&vchan->vc.lock);
+
+			/*
+			 * Move the promise into the completed list now that
+			 * we're done with it
+			 */
+			list_del(&vchan->processing->list);
+			list_add_tail(&vchan->processing->list,
+				      &contract->completed_demands);
+
+			/*
+			 * Cyclic DMA transfers are special:
+			 * - There's always something we can dispatch
+			 * - We need to run the callback
+			 * - Latency is very important, as this is used by audio
+			 * We therefore just cycle through the list and dispatch
+			 * whatever we have here, reusing the pchan. There's
+			 * no need to run the thread after this.
+			 *
+			 * For non-cyclic transfers we need to look around,
+			 * so we can program some more work, or notify the
+			 * client that their transfers have been completed.
+			 */
+			if (contract->is_cyclic) {
+				promise = get_next_cyclic_promise(contract);
+				vchan->processing = promise;
+				configure_pchan(pchan, promise);
+				vchan_cyclic_callback(&contract->vd);
+			} else {
+				vchan->processing = NULL;
+				vchan->pchan = NULL;
+
+				free_room = 1;
+				disableirqs |= BIT(bit);
+				release_pchan(priv, pchan);
+			}
+
+			spin_unlock(&vchan->vc.lock);
+		} else {
+			/* Half done interrupt */
+			if (contract->is_cyclic)
+				vchan_cyclic_callback(&contract->vd);
+			else
+				disableirqs |= BIT(bit);
+		}
+	}
+
+	/* Disable the IRQs for events we handled */
+	spin_lock(&priv->lock);
+	irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	writel_relaxed(irqs & ~disableirqs,
+		       priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	spin_unlock(&priv->lock);
+
+	/* Writing 1 to the pending field will clear the pending interrupt */
+	writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+	/*
+	 * If a pchan was freed, we may be able to schedule something else,
+	 * so have a look around
+	 */
+	if (free_room) {
+		for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+			vchan = &priv->vchans[i];
+			spin_lock(&vchan->vc.lock);
+			__execute_vchan_pending(priv, vchan);
+			spin_unlock(&vchan->vc.lock);
+		}
+	}
+
+	/*
+	 * Handle newer interrupts if some showed up, but only do it once
+	 * to avoid a too long a loop
+	 */
+	if (allow_mitigation) {
+		pendirq = readl_relaxed(priv->base +
+					SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+		if (pendirq) {
+			allow_mitigation = 0;
+			goto handle_pending;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int sun4i_dma_probe(struct platform_device *pdev)
+{
+	struct sun4i_dma_dev *priv;
+	struct resource *res;
+	int i, j, ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->irq = platform_get_irq(pdev, 0);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "Cannot claim IRQ\n");
+		return priv->irq;
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "No clock specified\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	platform_set_drvdata(pdev, priv);
+	spin_lock_init(&priv->lock);
+
+	dma_cap_zero(priv->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
+
+	INIT_LIST_HEAD(&priv->slave.channels);
+	priv->slave.device_free_chan_resources	= sun4i_dma_free_chan_resources;
+	priv->slave.device_tx_status		= sun4i_dma_tx_status;
+	priv->slave.device_issue_pending	= sun4i_dma_issue_pending;
+	priv->slave.device_prep_slave_sg	= sun4i_dma_prep_slave_sg;
+	priv->slave.device_prep_dma_memcpy	= sun4i_dma_prep_dma_memcpy;
+	priv->slave.device_prep_dma_cyclic	= sun4i_dma_prep_dma_cyclic;
+	priv->slave.device_config		= sun4i_dma_config;
+	priv->slave.device_terminate_all	= sun4i_dma_terminate_all;
+	priv->slave.copy_align			= 2;
+	priv->slave.src_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	priv->slave.dst_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	priv->slave.directions			= BIT(DMA_DEV_TO_MEM) |
+						  BIT(DMA_MEM_TO_DEV);
+	priv->slave.residue_granularity		= DMA_RESIDUE_GRANULARITY_BURST;
+
+	priv->slave.dev = &pdev->dev;
+
+	priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+				    sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
+	priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
+				    sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
+	if (!priv->vchans || !priv->pchans)
+		return -ENOMEM;
+
+	/*
+	 * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
+	 * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+	 * dedicated ones
+	 */
+	for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+		priv->pchans[i].base = priv->base +
+			SUN4I_NDMA_CHANNEL_REG_BASE(i);
+
+	for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+		priv->pchans[i].base = priv->base +
+			SUN4I_DDMA_CHANNEL_REG_BASE(j);
+		priv->pchans[i].is_dedicated = 1;
+	}
+
+	for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+		struct sun4i_dma_vchan *vchan = &priv->vchans[i];
+
+		spin_lock_init(&vchan->vc.lock);
+		vchan->vc.desc_free = sun4i_dma_free_contract;
+		vchan_init(&vchan->vc, &priv->slave);
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Couldn't enable the clock\n");
+		return ret;
+	}
+
+	/*
+	 * Make sure the IRQs are all disabled and accounted for. The bootloader
+	 * likes to leave these dirty
+	 */
+	writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+	ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
+			       0, dev_name(&pdev->dev), priv);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot request IRQ\n");
+		goto err_clk_disable;
+	}
+
+	ret = dma_async_device_register(&priv->slave);
+	if (ret) {
+		dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
+		goto err_clk_disable;
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
+					 priv);
+	if (ret) {
+		dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+		goto err_dma_unregister;
+	}
+
+	dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
+
+	return 0;
+
+err_dma_unregister:
+	dma_async_device_unregister(&priv->slave);
+err_clk_disable:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static int sun4i_dma_remove(struct platform_device *pdev)
+{
+	struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
+
+	/* Disable IRQ so no more work is scheduled */
+	disable_irq(priv->irq);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&priv->slave);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static const struct of_device_id sun4i_dma_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-dma" },
+	{ /* sentinel */ },
+};
+
+static struct platform_driver sun4i_dma_driver = {
+	.probe	= sun4i_dma_probe,
+	.remove	= sun4i_dma_remove,
+	.driver	= {
+		.name		= "sun4i-dma",
+		.of_match_table	= sun4i_dma_match,
+	},
+};
+
+module_platform_driver(sun4i_dma_driver);
+
+MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
+MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 842ff97..73e0be6 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -969,7 +969,7 @@
 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
-	sdc->slave.copy_align			= 4;
+	sdc->slave.copy_align			= DMAENGINE_ALIGN_4_BYTES;
 	sdc->slave.device_config		= sun6i_dma_config;
 	sdc->slave.device_pause			= sun6i_dma_pause;
 	sdc->slave.device_resume		= sun6i_dma_resume;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index eaf585e..c8f79dc 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -155,7 +155,6 @@
 	int				req_len;
 	bool				configured;
 	bool				last_sg;
-	bool				half_done;
 	struct list_head		node;
 	struct tegra_dma_desc		*dma_desc;
 };
@@ -188,7 +187,7 @@
 	bool			config_init;
 	int			id;
 	int			irq;
-	unsigned long		chan_base_offset;
+	void __iomem		*chan_addr;
 	spinlock_t		lock;
 	bool			busy;
 	struct tegra_dma	*tdma;
@@ -203,8 +202,6 @@
 	/* ISR handler and tasklet for bottom half of isr handling */
 	dma_isr_handler		isr_handler;
 	struct tasklet_struct	tasklet;
-	dma_async_tx_callback	callback;
-	void			*callback_param;
 
 	/* Channel-slave specific configuration */
 	unsigned int slave_id;
@@ -222,6 +219,13 @@
 	void __iomem			*base_addr;
 	const struct tegra_dma_chip_data *chip_data;
 
+	/*
+	 * Counter for managing global pausing of the DMA controller.
+	 * Only applicable for devices that don't support individual
+	 * channel pausing.
+	 */
+	u32				global_pause_count;
+
 	/* Some register need to be cache before suspend */
 	u32				reg_gen;
 
@@ -242,12 +246,12 @@
 static inline void tdc_write(struct tegra_dma_channel *tdc,
 		u32 reg, u32 val)
 {
-	writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+	writel(val, tdc->chan_addr + reg);
 }
 
 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
 {
-	return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+	return readl(tdc->chan_addr + reg);
 }
 
 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
@@ -361,16 +365,32 @@
 	struct tegra_dma *tdma = tdc->tdma;
 
 	spin_lock(&tdma->global_lock);
-	tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
-	if (wait_for_burst_complete)
-		udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+
+	if (tdc->tdma->global_pause_count == 0) {
+		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
+		if (wait_for_burst_complete)
+			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+	}
+
+	tdc->tdma->global_pause_count++;
+
+	spin_unlock(&tdma->global_lock);
 }
 
 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
 {
 	struct tegra_dma *tdma = tdc->tdma;
 
-	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+	spin_lock(&tdma->global_lock);
+
+	if (WARN_ON(tdc->tdma->global_pause_count == 0))
+		goto out;
+
+	if (--tdc->tdma->global_pause_count == 0)
+		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
+			   TEGRA_APBDMA_GENERAL_ENABLE);
+
+out:
 	spin_unlock(&tdma->global_lock);
 }
 
@@ -601,7 +621,6 @@
 		return;
 
 	tdc_start_head_req(tdc);
-	return;
 }
 
 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
@@ -628,7 +647,6 @@
 		if (!st)
 			dma_desc->dma_status = DMA_ERROR;
 	}
-	return;
 }
 
 static void tegra_dma_tasklet(unsigned long data)
@@ -720,7 +738,6 @@
 	}
 end:
 	spin_unlock_irqrestore(&tdc->lock, flags);
-	return;
 }
 
 static int tegra_dma_terminate_all(struct dma_chan *dc)
@@ -932,7 +949,6 @@
 	struct tegra_dma_sg_req  *sg_req = NULL;
 	u32 burst_size;
 	enum dma_slave_buswidth slave_bw;
-	int ret;
 
 	if (!tdc->config_init) {
 		dev_err(tdc2dev(tdc), "dma channel is not configured\n");
@@ -943,9 +959,8 @@
 		return NULL;
 	}
 
-	ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
-				&burst_size, &slave_bw);
-	if (ret < 0)
+	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+				&burst_size, &slave_bw) < 0)
 		return NULL;
 
 	INIT_LIST_HEAD(&req_list);
@@ -1048,7 +1063,6 @@
 	dma_addr_t mem = buf_addr;
 	u32 burst_size;
 	enum dma_slave_buswidth slave_bw;
-	int ret;
 
 	if (!buf_len || !period_len) {
 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
@@ -1087,12 +1101,10 @@
 		return NULL;
 	}
 
-	ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
-				&burst_size, &slave_bw);
-	if (ret < 0)
+	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+				&burst_size, &slave_bw) < 0)
 		return NULL;
 
-
 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
@@ -1136,7 +1148,6 @@
 		sg_req->ch_regs.apb_seq = apb_seq;
 		sg_req->ch_regs.ahb_seq = ahb_seq;
 		sg_req->configured = false;
-		sg_req->half_done = false;
 		sg_req->last_sg = false;
 		sg_req->dma_desc = dma_desc;
 		sg_req->req_len = len;
@@ -1377,8 +1388,9 @@
 	for (i = 0; i < cdata->nr_channels; i++) {
 		struct tegra_dma_channel *tdc = &tdma->channels[i];
 
-		tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
-					i * cdata->channel_reg_size;
+		tdc->chan_addr = tdma->base_addr +
+				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
+				 (i * cdata->channel_reg_size);
 
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 		if (!res) {
@@ -1418,6 +1430,7 @@
 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
 
+	tdma->global_pause_count = 0;
 	tdma->dma_dev.dev = &pdev->dev;
 	tdma->dma_dev.device_alloc_chan_resources =
 					tegra_dma_alloc_chan_resources;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 24f5ca2..5cce8c9 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -20,16 +20,19 @@
 #define TI_XBAR_OUTPUTS	127
 #define TI_XBAR_INPUTS	256
 
-static DEFINE_IDR(map_idr);
+#define TI_XBAR_EDMA_OFFSET	0
+#define TI_XBAR_SDMA_OFFSET	1
 
 struct ti_dma_xbar_data {
 	void __iomem *iomem;
 
 	struct dma_router dmarouter;
+	struct idr map_idr;
 
 	u16 safe_val; /* Value to rest the crossbar lines */
 	u32 xbar_requests; /* number of DMA requests connected to XBAR */
 	u32 dma_requests; /* number of DMA requests forwarded to DMA */
+	u32 dma_offset;
 };
 
 struct ti_dma_xbar_map {
@@ -51,7 +54,7 @@
 		map->xbar_in, map->xbar_out);
 
 	ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
-	idr_remove(&map_idr, map->xbar_out);
+	idr_remove(&xbar->map_idr, map->xbar_out);
 	kfree(map);
 }
 
@@ -81,12 +84,11 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	map->xbar_out = idr_alloc(&map_idr, NULL, 0, xbar->dma_requests,
+	map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests,
 				  GFP_KERNEL);
 	map->xbar_in = (u16)dma_spec->args[0];
 
-	/* The DMA request is 1 based in sDMA */
-	dma_spec->args[0] = map->xbar_out + 1;
+	dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
 
 	dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
 		map->xbar_in, map->xbar_out);
@@ -96,9 +98,22 @@
 	return map;
 }
 
+static const struct of_device_id ti_dma_master_match[] = {
+	{
+		.compatible = "ti,omap4430-sdma",
+		.data = (void *)TI_XBAR_SDMA_OFFSET,
+	},
+	{
+		.compatible = "ti,edma3",
+		.data = (void *)TI_XBAR_EDMA_OFFSET,
+	},
+	{},
+};
+
 static int ti_dma_xbar_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
+	const struct of_device_id *match;
 	struct device_node *dma_node;
 	struct ti_dma_xbar_data *xbar;
 	struct resource *res;
@@ -113,12 +128,20 @@
 	if (!xbar)
 		return -ENOMEM;
 
+	idr_init(&xbar->map_idr);
+
 	dma_node = of_parse_phandle(node, "dma-masters", 0);
 	if (!dma_node) {
 		dev_err(&pdev->dev, "Can't get DMA master node\n");
 		return -ENODEV;
 	}
 
+	match = of_match_node(ti_dma_master_match, dma_node);
+	if (!match) {
+		dev_err(&pdev->dev, "DMA master is not supported\n");
+		return -EINVAL;
+	}
+
 	if (of_property_read_u32(dma_node, "dma-requests",
 				 &xbar->dma_requests)) {
 		dev_info(&pdev->dev,
@@ -139,17 +162,15 @@
 		xbar->safe_val = (u16)safe_val;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	iomem = devm_ioremap_resource(&pdev->dev, res);
-	if (!iomem)
-		return -ENOMEM;
+	if (IS_ERR(iomem))
+		return PTR_ERR(iomem);
 
 	xbar->iomem = iomem;
 
 	xbar->dmarouter.dev = &pdev->dev;
 	xbar->dmarouter.route_free = ti_dma_xbar_free;
+	xbar->dma_offset = (u32)match->data;
 
 	platform_set_drvdata(pdev, xbar);
 
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index c4c3d93..559cd40 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -10,10 +10,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 /* Supports:
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index dff22ab..b23e8d5 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -21,6 +21,7 @@
  * NOTE: PM support is currently not available.
  */
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -151,7 +152,6 @@
 #define XGENE_DMA_PQ_CHANNEL		1
 #define XGENE_DMA_MAX_BYTE_CNT		0x4000	/* 16 KB */
 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT	0x14000	/* 80 KB */
-#define XGENE_DMA_XOR_ALIGNMENT		6	/* 64 Bytes */
 #define XGENE_DMA_MAX_XOR_SRC		5
 #define XGENE_DMA_16K_BUFFER_LEN_CODE	0x0
 #define XGENE_DMA_INVALID_LEN_CODE	0x7800000000000000ULL
@@ -764,12 +764,17 @@
 	struct xgene_dma_ring *ring = &chan->rx_ring;
 	struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
 	struct xgene_dma_desc_hw *desc_hw;
+	struct list_head ld_completed;
 	u8 status;
 
+	INIT_LIST_HEAD(&ld_completed);
+
+	spin_lock_bh(&chan->lock);
+
 	/* Clean already completed and acked descriptors */
 	xgene_dma_clean_completed_descriptor(chan);
 
-	/* Run the callback for each descriptor, in order */
+	/* Move all completed descriptors to ld completed queue, in order */
 	list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
 		/* Get subsequent hw descriptor from DMA rx ring */
 		desc_hw = &ring->desc_hw[ring->head];
@@ -812,15 +817,17 @@
 		/* Mark this hw descriptor as processed */
 		desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
 
-		xgene_dma_run_tx_complete_actions(chan, desc_sw);
-
-		xgene_dma_clean_running_descriptor(chan, desc_sw);
-
 		/*
 		 * Decrement the pending transaction count
 		 * as we have processed one
 		 */
 		chan->pending--;
+
+		/*
+		 * Delete this node from ld running queue and append it to
+		 * ld completed queue for further processing
+		 */
+		list_move_tail(&desc_sw->node, &ld_completed);
 	}
 
 	/*
@@ -829,6 +836,14 @@
 	 * ahead and free the descriptors below.
 	 */
 	xgene_chan_xfer_ld_pending(chan);
+
+	spin_unlock_bh(&chan->lock);
+
+	/* Run the callback for each descriptor, in order */
+	list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
+		xgene_dma_run_tx_complete_actions(chan, desc_sw);
+		xgene_dma_clean_running_descriptor(chan, desc_sw);
+	}
 }
 
 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -877,11 +892,11 @@
 	if (!chan->desc_pool)
 		return;
 
-	spin_lock_bh(&chan->lock);
-
 	/* Process all running descriptor */
 	xgene_dma_cleanup_descriptors(chan);
 
+	spin_lock_bh(&chan->lock);
+
 	/* Clean all link descriptor queues */
 	xgene_dma_free_desc_list(chan, &chan->ld_pending);
 	xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1201,15 +1216,11 @@
 {
 	struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
 
-	spin_lock_bh(&chan->lock);
-
 	/* Run all cleanup for descriptors which have been completed */
 	xgene_dma_cleanup_descriptors(chan);
 
 	/* Re-enable DMA channel IRQ */
 	enable_irq(chan->rx_irq);
-
-	spin_unlock_bh(&chan->lock);
 }
 
 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
@@ -1741,13 +1752,13 @@
 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 		dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
 		dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
-		dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+		dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
 	}
 
 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 		dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
 		dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
-		dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+		dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
 	}
 }
 
@@ -1944,16 +1955,18 @@
 		return ret;
 
 	pdma->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pdma->clk)) {
+	if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
 		dev_err(&pdev->dev, "Failed to get clk\n");
 		return PTR_ERR(pdma->clk);
 	}
 
 	/* Enable clk before accessing registers */
-	ret = clk_prepare_enable(pdma->clk);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
-		return ret;
+	if (!IS_ERR(pdma->clk)) {
+		ret = clk_prepare_enable(pdma->clk);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+			return ret;
+		}
 	}
 
 	/* Remove DMA RAM out of shutdown */
@@ -1998,7 +2011,8 @@
 
 err_dma_mask:
 err_clk_enable:
-	clk_disable_unprepare(pdma->clk);
+	if (!IS_ERR(pdma->clk))
+		clk_disable_unprepare(pdma->clk);
 
 	return ret;
 }
@@ -2022,11 +2036,20 @@
 		xgene_dma_delete_chan_rings(chan);
 	}
 
-	clk_disable_unprepare(pdma->clk);
+	if (!IS_ERR(pdma->clk))
+		clk_disable_unprepare(pdma->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
+	{"APMC0D43", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
+#endif
+
 static const struct of_device_id xgene_dma_of_match_ptr[] = {
 	{.compatible = "apm,xgene-storm-dma",},
 	{},
@@ -2039,6 +2062,7 @@
 	.driver = {
 		.name = "X-Gene-DMA",
 		.of_match_table = xgene_dma_of_match_ptr,
+		.acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
 	},
 };
 
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
new file mode 100644
index 0000000..39915a6
--- /dev/null
+++ b/drivers/dma/zx296702_dma.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright 2015 Linaro.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME		"zx-dma"
+#define DMA_ALIGN		4
+#define DMA_MAX_SIZE		(0x10000 - PAGE_SIZE)
+#define LLI_BLOCK_SIZE		(4 * PAGE_SIZE)
+
+#define REG_ZX_SRC_ADDR			0x00
+#define REG_ZX_DST_ADDR			0x04
+#define REG_ZX_TX_X_COUNT		0x08
+#define REG_ZX_TX_ZY_COUNT		0x0c
+#define REG_ZX_SRC_ZY_STEP		0x10
+#define REG_ZX_DST_ZY_STEP		0x14
+#define REG_ZX_LLI_ADDR			0x1c
+#define REG_ZX_CTRL			0x20
+#define REG_ZX_TC_IRQ			0x800
+#define REG_ZX_SRC_ERR_IRQ		0x804
+#define REG_ZX_DST_ERR_IRQ		0x808
+#define REG_ZX_CFG_ERR_IRQ		0x80c
+#define REG_ZX_TC_IRQ_RAW		0x810
+#define REG_ZX_SRC_ERR_IRQ_RAW		0x814
+#define REG_ZX_DST_ERR_IRQ_RAW		0x818
+#define REG_ZX_CFG_ERR_IRQ_RAW		0x81c
+#define REG_ZX_STATUS			0x820
+#define REG_ZX_DMA_GRP_PRIO		0x824
+#define REG_ZX_DMA_ARB			0x828
+
+#define ZX_FORCE_CLOSE			BIT(31)
+#define ZX_DST_BURST_WIDTH(x)		(((x) & 0x7) << 13)
+#define ZX_MAX_BURST_LEN		16
+#define ZX_SRC_BURST_LEN(x)		(((x) & 0xf) << 9)
+#define ZX_SRC_BURST_WIDTH(x)		(((x) & 0x7) << 6)
+#define ZX_IRQ_ENABLE_ALL		(3 << 4)
+#define ZX_DST_FIFO_MODE		BIT(3)
+#define ZX_SRC_FIFO_MODE		BIT(2)
+#define ZX_SOFT_REQ			BIT(1)
+#define ZX_CH_ENABLE			BIT(0)
+
+#define ZX_DMA_BUSWIDTHS \
+	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+enum zx_dma_burst_width {
+	ZX_DMA_WIDTH_8BIT	= 0,
+	ZX_DMA_WIDTH_16BIT	= 1,
+	ZX_DMA_WIDTH_32BIT	= 2,
+	ZX_DMA_WIDTH_64BIT	= 3,
+};
+
+struct zx_desc_hw {
+	u32 saddr;
+	u32 daddr;
+	u32 src_x;
+	u32 src_zy;
+	u32 src_zy_step;
+	u32 dst_zy_step;
+	u32 reserved1;
+	u32 lli;
+	u32 ctr;
+	u32 reserved[7]; /* pack as hardware registers region size */
+} __aligned(32);
+
+struct zx_dma_desc_sw {
+	struct virt_dma_desc	vd;
+	dma_addr_t		desc_hw_lli;
+	size_t			desc_num;
+	size_t			size;
+	struct zx_desc_hw	*desc_hw;
+};
+
+struct zx_dma_phy;
+
+struct zx_dma_chan {
+	struct dma_slave_config slave_cfg;
+	int			id; /* Request phy chan id */
+	u32			ccfg;
+	u32			cyclic;
+	struct virt_dma_chan	vc;
+	struct zx_dma_phy	*phy;
+	struct list_head	node;
+	dma_addr_t		dev_addr;
+	enum dma_status		status;
+};
+
+struct zx_dma_phy {
+	u32			idx;
+	void __iomem		*base;
+	struct zx_dma_chan	*vchan;
+	struct zx_dma_desc_sw	*ds_run;
+	struct zx_dma_desc_sw	*ds_done;
+};
+
+struct zx_dma_dev {
+	struct dma_device	slave;
+	void __iomem		*base;
+	spinlock_t		lock; /* lock for ch and phy */
+	struct list_head	chan_pending;
+	struct zx_dma_phy	*phy;
+	struct zx_dma_chan	*chans;
+	struct clk		*clk;
+	struct dma_pool		*pool;
+	u32			dma_channels;
+	u32			dma_requests;
+	int 			irq;
+};
+
+#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
+
+static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct zx_dma_chan, vc.chan);
+}
+
+static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
+{
+	u32 val = 0;
+
+	val = readl_relaxed(phy->base + REG_ZX_CTRL);
+	val &= ~ZX_CH_ENABLE;
+	val |= ZX_FORCE_CLOSE;
+	writel_relaxed(val, phy->base + REG_ZX_CTRL);
+
+	val = 0x1 << phy->idx;
+	writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
+{
+	writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
+	writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
+	writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
+	writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
+	writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
+	writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
+	writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
+	writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
+}
+
+static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
+{
+	return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
+}
+
+static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
+{
+	return readl_relaxed(d->base + REG_ZX_STATUS);
+}
+
+static void zx_dma_init_state(struct zx_dma_dev *d)
+{
+	/* set same priority */
+	writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
+	/* clear all irq */
+	writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static int zx_dma_start_txd(struct zx_dma_chan *c)
+{
+	struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+	if (!c->phy)
+		return -EAGAIN;
+
+	if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
+		return -EAGAIN;
+
+	if (vd) {
+		struct zx_dma_desc_sw *ds =
+			container_of(vd, struct zx_dma_desc_sw, vd);
+		/*
+		 * fetch and remove request from vc->desc_issued
+		 * so vc->desc_issued only contains desc pending
+		 */
+		list_del(&ds->vd.node);
+		c->phy->ds_run = ds;
+		c->phy->ds_done = NULL;
+		/* start dma */
+		zx_dma_set_desc(c->phy, ds->desc_hw);
+		return 0;
+	}
+	c->phy->ds_done = NULL;
+	c->phy->ds_run = NULL;
+	return -EAGAIN;
+}
+
+static void zx_dma_task(struct zx_dma_dev *d)
+{
+	struct zx_dma_phy *p;
+	struct zx_dma_chan *c, *cn;
+	unsigned pch, pch_alloc = 0;
+	unsigned long flags;
+
+	/* check new dma request of running channel in vc->desc_issued */
+	list_for_each_entry_safe(c, cn, &d->slave.channels,
+				 vc.chan.device_node) {
+		spin_lock_irqsave(&c->vc.lock, flags);
+		p = c->phy;
+		if (p && p->ds_done && zx_dma_start_txd(c)) {
+			/* No current txd associated with this channel */
+			dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+			/* Mark this channel free */
+			c->phy = NULL;
+			p->vchan = NULL;
+		}
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+	}
+
+	/* check new channel request in d->chan_pending */
+	spin_lock_irqsave(&d->lock, flags);
+	while (!list_empty(&d->chan_pending)) {
+		c = list_first_entry(&d->chan_pending,
+				     struct zx_dma_chan, node);
+		p = &d->phy[c->id];
+		if (!p->vchan) {
+			/* remove from d->chan_pending */
+			list_del_init(&c->node);
+			pch_alloc |= 1 << c->id;
+			/* Mark this channel allocated */
+			p->vchan = c;
+			c->phy = p;
+		} else {
+			dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
+		}
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	for (pch = 0; pch < d->dma_channels; pch++) {
+		if (pch_alloc & (1 << pch)) {
+			p = &d->phy[pch];
+			c = p->vchan;
+			if (c) {
+				spin_lock_irqsave(&c->vc.lock, flags);
+				zx_dma_start_txd(c);
+				spin_unlock_irqrestore(&c->vc.lock, flags);
+			}
+		}
+	}
+}
+
+static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
+{
+	struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
+	struct zx_dma_phy *p;
+	struct zx_dma_chan *c;
+	u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
+	u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
+	u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
+	u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
+	u32 i, irq_chan = 0, task = 0;
+
+	while (tc) {
+		i = __ffs(tc);
+		tc &= ~BIT(i);
+		p = &d->phy[i];
+		c = p->vchan;
+		if (c) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&c->vc.lock, flags);
+			if (c->cyclic) {
+				vchan_cyclic_callback(&p->ds_run->vd);
+			} else {
+				vchan_cookie_complete(&p->ds_run->vd);
+				p->ds_done = p->ds_run;
+				task = 1;
+			}
+			spin_unlock_irqrestore(&c->vc.lock, flags);
+			irq_chan |= BIT(i);
+		}
+	}
+
+	if (serr || derr || cfg)
+		dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
+			 serr, derr, cfg);
+
+	writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+
+	if (task)
+		zx_dma_task(d);
+	return IRQ_HANDLED;
+}
+
+static void zx_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&d->lock, flags);
+	list_del_init(&c->node);
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	vchan_free_chan_resources(&c->vc);
+	c->ccfg = 0;
+}
+
+static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *state)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_phy *p;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(&c->vc.chan, cookie, state);
+	if (ret == DMA_COMPLETE || !state)
+		return ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	p = c->phy;
+	ret = c->status;
+
+	/*
+	 * If the cookie is on our issue queue, then the residue is
+	 * its total size.
+	 */
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
+	} else if ((!p) || (!p->ds_run)) {
+		bytes = 0;
+	} else {
+		struct zx_dma_desc_sw *ds = p->ds_run;
+		u32 clli = 0, index = 0;
+
+		bytes = 0;
+		clli = zx_dma_get_curr_lli(p);
+		index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
+		for (; index < ds->desc_num; index++) {
+			bytes += ds->desc_hw[index].src_x;
+			/* end of lli */
+			if (!ds->desc_hw[index].lli)
+				break;
+		}
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	dma_set_residue(state, bytes);
+	return ret;
+}
+
+static void zx_dma_issue_pending(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	unsigned long flags;
+	int issue = 0;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	/* add request to vc->desc_issued */
+	if (vchan_issue_pending(&c->vc)) {
+		spin_lock(&d->lock);
+		if (!c->phy && list_empty(&c->node)) {
+			/* if new channel, add chan_pending */
+			list_add_tail(&c->node, &d->chan_pending);
+			issue = 1;
+			dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+		}
+		spin_unlock(&d->lock);
+	} else {
+		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	if (issue)
+		zx_dma_task(d);
+}
+
+static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
+			     dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+	if ((num + 1) < ds->desc_num)
+		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+			sizeof(struct zx_desc_hw);
+	ds->desc_hw[num].saddr = src;
+	ds->desc_hw[num].daddr = dst;
+	ds->desc_hw[num].src_x = len;
+	ds->desc_hw[num].ctr = ccfg;
+}
+
+static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
+						     struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
+
+	if (num > lli_limit) {
+		dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+			&c->vc, num, lli_limit);
+		return NULL;
+	}
+
+	ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
+	if (!ds)
+		return NULL;
+
+	ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+	if (!ds->desc_hw) {
+		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+		kfree(ds);
+		return NULL;
+	}
+	memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0);
+	ds->desc_num = num;
+	return ds;
+}
+
+static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
+{
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		return ffs(width) - 1;
+	default:
+		return ZX_DMA_WIDTH_32BIT;
+	}
+}
+
+static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
+{
+	struct dma_slave_config *cfg = &c->slave_cfg;
+	enum zx_dma_burst_width src_width;
+	enum zx_dma_burst_width dst_width;
+	u32 maxburst = 0;
+
+	switch (dir) {
+	case DMA_MEM_TO_MEM:
+		c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
+			| ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
+			| ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
+			| ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
+		break;
+	case DMA_MEM_TO_DEV:
+		c->dev_addr = cfg->dst_addr;
+		/* dst len is calculated from src width, len and dst width.
+		 * We need make sure dst len not exceed MAX LEN.
+		 * Trailing single transaction that does not fill a full
+		 * burst also require identical src/dst data width.
+		 */
+		dst_width = zx_dma_burst_width(cfg->dst_addr_width);
+		maxburst = cfg->dst_maxburst;
+		maxburst = maxburst < ZX_MAX_BURST_LEN ?
+				maxburst : ZX_MAX_BURST_LEN;
+		c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
+			| ZX_SRC_BURST_LEN(maxburst - 1)
+			| ZX_SRC_BURST_WIDTH(dst_width)
+			| ZX_DST_BURST_WIDTH(dst_width);
+		break;
+	case DMA_DEV_TO_MEM:
+		c->dev_addr = cfg->src_addr;
+		src_width = zx_dma_burst_width(cfg->src_addr_width);
+		maxburst = cfg->src_maxburst;
+		maxburst = maxburst < ZX_MAX_BURST_LEN ?
+				maxburst : ZX_MAX_BURST_LEN;
+		c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
+			| ZX_SRC_BURST_LEN(maxburst - 1)
+			| ZX_SRC_BURST_WIDTH(src_width)
+			| ZX_DST_BURST_WIDTH(src_width);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
+	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	size_t copy = 0;
+	int num = 0;
+
+	if (!len)
+		return NULL;
+
+	if (zx_pre_config(c, DMA_MEM_TO_MEM))
+		return NULL;
+
+	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+
+	ds = zx_alloc_desc_resource(num, chan);
+	if (!ds)
+		return NULL;
+
+	ds->size = len;
+	num = 0;
+
+	do {
+		copy = min_t(size_t, len, DMA_MAX_SIZE);
+		zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+		src += copy;
+		dst += copy;
+		len -= copy;
+	} while (len);
+
+	c->cyclic = 0;
+	ds->desc_hw[num - 1].lli = 0;	/* end of link */
+	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+	enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	size_t len, avail, total = 0;
+	struct scatterlist *sg;
+	dma_addr_t addr, src = 0, dst = 0;
+	int num = sglen, i;
+
+	if (!sgl)
+		return NULL;
+
+	if (zx_pre_config(c, dir))
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		avail = sg_dma_len(sg);
+		if (avail > DMA_MAX_SIZE)
+			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+	}
+
+	ds = zx_alloc_desc_resource(num, chan);
+	if (!ds)
+		return NULL;
+
+	c->cyclic = 0;
+	num = 0;
+	for_each_sg(sgl, sg, sglen, i) {
+		addr = sg_dma_address(sg);
+		avail = sg_dma_len(sg);
+		total += avail;
+
+		do {
+			len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+			if (dir == DMA_MEM_TO_DEV) {
+				src = addr;
+				dst = c->dev_addr;
+			} else if (dir == DMA_DEV_TO_MEM) {
+				src = c->dev_addr;
+				dst = addr;
+			}
+
+			zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+			addr += len;
+			avail -= len;
+		} while (avail);
+	}
+
+	ds->desc_hw[num - 1].lli = 0;	/* end of link */
+	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+	ds->size = total;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction dir,
+		unsigned long flags)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	dma_addr_t src = 0, dst = 0;
+	int num_periods = buf_len / period_len;
+	int buf = 0, num = 0;
+
+	if (period_len > DMA_MAX_SIZE) {
+		dev_err(chan->device->dev, "maximum period size exceeded\n");
+		return NULL;
+	}
+
+	if (zx_pre_config(c, dir))
+		return NULL;
+
+	ds = zx_alloc_desc_resource(num_periods, chan);
+	if (!ds)
+		return NULL;
+	c->cyclic = 1;
+
+	while (buf < buf_len) {
+		if (dir == DMA_MEM_TO_DEV) {
+			src = dma_addr;
+			dst = c->dev_addr;
+		} else if (dir == DMA_DEV_TO_MEM) {
+			src = c->dev_addr;
+			dst = dma_addr;
+		}
+		zx_dma_fill_desc(ds, dst, src, period_len, num++,
+				 c->ccfg | ZX_IRQ_ENABLE_ALL);
+		dma_addr += period_len;
+		buf += period_len;
+	}
+
+	ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
+	ds->size = buf_len;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int zx_dma_config(struct dma_chan *chan,
+			 struct dma_slave_config *cfg)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+
+	if (!cfg)
+		return -EINVAL;
+
+	memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
+
+	return 0;
+}
+
+static int zx_dma_terminate_all(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	struct zx_dma_phy *p = c->phy;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/* Clear the tx descriptor lists */
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_get_all_descriptors(&c->vc, &head);
+	if (p) {
+		/* vchan is assigned to a pchan - stop the channel */
+		zx_dma_terminate_chan(p, d);
+		c->phy = NULL;
+		p->vchan = NULL;
+		p->ds_run = NULL;
+		p->ds_done = NULL;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int zx_dma_transfer_pause(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	u32 val = 0;
+
+	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+	val &= ~ZX_CH_ENABLE;
+	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+	return 0;
+}
+
+static int zx_dma_transfer_resume(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	u32 val = 0;
+
+	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+	val |= ZX_CH_ENABLE;
+	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+	return 0;
+}
+
+static void zx_dma_free_desc(struct virt_dma_desc *vd)
+{
+	struct zx_dma_desc_sw *ds =
+		container_of(vd, struct zx_dma_desc_sw, vd);
+	struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
+
+	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+	kfree(ds);
+}
+
+static const struct of_device_id zx6702_dma_dt_ids[] = {
+	{ .compatible = "zte,zx296702-dma", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
+
+static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+					       struct of_dma *ofdma)
+{
+	struct zx_dma_dev *d = ofdma->of_dma_data;
+	unsigned int request = dma_spec->args[0];
+	struct dma_chan *chan;
+	struct zx_dma_chan *c;
+
+	if (request > d->dma_requests)
+		return NULL;
+
+	chan = dma_get_any_slave_channel(&d->slave);
+	if (!chan) {
+		dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
+		return NULL;
+	}
+	c = to_zx_chan(chan);
+	c->id = request;
+	dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
+		 c->id, &c->vc);
+	return chan;
+}
+
+static int zx_dma_probe(struct platform_device *op)
+{
+	struct zx_dma_dev *d;
+	struct resource *iores;
+	int i, ret = 0;
+
+	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -EINVAL;
+
+	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->base = devm_ioremap_resource(&op->dev, iores);
+	if (IS_ERR(d->base))
+		return PTR_ERR(d->base);
+
+	of_property_read_u32((&op->dev)->of_node,
+			     "dma-channels", &d->dma_channels);
+	of_property_read_u32((&op->dev)->of_node,
+			     "dma-requests", &d->dma_requests);
+	if (!d->dma_requests || !d->dma_channels)
+		return -EINVAL;
+
+	d->clk = devm_clk_get(&op->dev, NULL);
+	if (IS_ERR(d->clk)) {
+		dev_err(&op->dev, "no dma clk\n");
+		return PTR_ERR(d->clk);
+	}
+
+	d->irq = platform_get_irq(op, 0);
+	ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
+			       0, DRIVER_NAME, d);
+	if (ret)
+		return ret;
+
+	/* A DMA memory pool for LLIs, align on 32-byte boundary */
+	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+			LLI_BLOCK_SIZE, 32, 0);
+	if (!d->pool)
+		return -ENOMEM;
+
+	/* init phy channel */
+	d->phy = devm_kzalloc(&op->dev,
+		d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
+	if (!d->phy)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_channels; i++) {
+		struct zx_dma_phy *p = &d->phy[i];
+
+		p->idx = i;
+		p->base = d->base + i * 0x40;
+	}
+
+	INIT_LIST_HEAD(&d->slave.channels);
+	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
+	d->slave.dev = &op->dev;
+	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
+	d->slave.device_tx_status = zx_dma_tx_status;
+	d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
+	d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
+	d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
+	d->slave.device_issue_pending = zx_dma_issue_pending;
+	d->slave.device_config = zx_dma_config;
+	d->slave.device_terminate_all = zx_dma_terminate_all;
+	d->slave.device_pause = zx_dma_transfer_pause;
+	d->slave.device_resume = zx_dma_transfer_resume;
+	d->slave.copy_align = DMA_ALIGN;
+	d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
+	d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
+	d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
+			| BIT(DMA_DEV_TO_MEM);
+	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+	/* init virtual channel */
+	d->chans = devm_kzalloc(&op->dev,
+		d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
+	if (!d->chans)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_requests; i++) {
+		struct zx_dma_chan *c = &d->chans[i];
+
+		c->status = DMA_IN_PROGRESS;
+		INIT_LIST_HEAD(&c->node);
+		c->vc.desc_free = zx_dma_free_desc;
+		vchan_init(&c->vc, &d->slave);
+	}
+
+	/* Enable clock before accessing registers */
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+		goto zx_dma_out;
+	}
+
+	zx_dma_init_state(d);
+
+	spin_lock_init(&d->lock);
+	INIT_LIST_HEAD(&d->chan_pending);
+	platform_set_drvdata(op, d);
+
+	ret = dma_async_device_register(&d->slave);
+	if (ret)
+		goto clk_dis;
+
+	ret = of_dma_controller_register((&op->dev)->of_node,
+					 zx_of_dma_simple_xlate, d);
+	if (ret)
+		goto of_dma_register_fail;
+
+	dev_info(&op->dev, "initialized\n");
+	return 0;
+
+of_dma_register_fail:
+	dma_async_device_unregister(&d->slave);
+clk_dis:
+	clk_disable_unprepare(d->clk);
+zx_dma_out:
+	return ret;
+}
+
+static int zx_dma_remove(struct platform_device *op)
+{
+	struct zx_dma_chan *c, *cn;
+	struct zx_dma_dev *d = platform_get_drvdata(op);
+
+	/* explictly free the irq */
+	devm_free_irq(&op->dev, d->irq, d);
+
+	dma_async_device_unregister(&d->slave);
+	of_dma_controller_free((&op->dev)->of_node);
+
+	list_for_each_entry_safe(c, cn, &d->slave.channels,
+				 vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+	}
+	clk_disable_unprepare(d->clk);
+	dmam_pool_destroy(d->pool);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zx_dma_suspend_dev(struct device *dev)
+{
+	struct zx_dma_dev *d = dev_get_drvdata(dev);
+	u32 stat = 0;
+
+	stat = zx_dma_get_chan_stat(d);
+	if (stat) {
+		dev_warn(d->slave.dev,
+			 "chan %d is running fail to suspend\n", stat);
+		return -1;
+	}
+	clk_disable_unprepare(d->clk);
+	return 0;
+}
+
+static int zx_dma_resume_dev(struct device *dev)
+{
+	struct zx_dma_dev *d = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+		return ret;
+	}
+	zx_dma_init_state(d);
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
+
+static struct platform_driver zx_pdma_driver = {
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.pm	= &zx_dma_pmops,
+		.of_match_table = zx6702_dma_dt_ids,
+	},
+	.probe		= zx_dma_probe,
+	.remove		= zx_dma_remove,
+};
+
+module_platform_driver(zx_pdma_driver);
+
+MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
+MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 99c69a3..d8de6a8 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,6 +5,9 @@
 
 menu "Firmware Drivers"
 
+config ARM_PSCI_FW
+	bool
+
 config EDD
 	tristate "BIOS Enhanced Disk Drive calls determine boot disk"
 	depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4a4b897..000830f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
+obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
 obj-$(CONFIG_DMI)		+= dmi_scan.o
 obj-$(CONFIG_DMI_SYSFS)		+= dmi-sysfs.o
 obj-$(CONFIG_EDD)		+= edd.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
new file mode 100644
index 0000000..42700f0
--- /dev/null
+++ b/drivers/firmware/psci.c
@@ -0,0 +1,382 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values. For such
+ * calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
+ * function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_0_2_FN_NATIVE(name)	PSCI_0_2_FN64_##name
+#else
+#define PSCI_0_2_FN_NATIVE(name)	PSCI_0_2_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+
+bool psci_tos_resident_on(int cpu)
+{
+	return cpu == resident_cpu;
+}
+
+struct psci_operations psci_ops;
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+				unsigned long, unsigned long);
+asmlinkage psci_fn __invoke_psci_fn_hvc;
+asmlinkage psci_fn __invoke_psci_fn_smc;
+static psci_fn *invoke_psci_fn;
+
+enum psci_function {
+	PSCI_FN_CPU_SUSPEND,
+	PSCI_FN_CPU_ON,
+	PSCI_FN_CPU_OFF,
+	PSCI_FN_MIGRATE,
+	PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+static int psci_to_linux_errno(int errno)
+{
+	switch (errno) {
+	case PSCI_RET_SUCCESS:
+		return 0;
+	case PSCI_RET_NOT_SUPPORTED:
+		return -EOPNOTSUPP;
+	case PSCI_RET_INVALID_PARAMS:
+		return -EINVAL;
+	case PSCI_RET_DENIED:
+		return -EPERM;
+	};
+
+	return -EINVAL;
+}
+
+static u32 psci_get_version(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+	err = invoke_psci_fn(fn, state, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(u32 state)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_OFF];
+	err = invoke_psci_fn(fn, state, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_ON];
+	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_MIGRATE];
+	err = invoke_psci_fn(fn, cpuid, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+		unsigned long lowest_affinity_level)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
+			      target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
+			      0, 0, 0);
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+	const char *method;
+
+	pr_info("probing for conduit method from DT.\n");
+
+	if (of_property_read_string(np, "method", &method)) {
+		pr_warn("missing \"method\" property\n");
+		return -ENXIO;
+	}
+
+	if (!strcmp("hvc", method)) {
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+	} else if (!strcmp("smc", method)) {
+		invoke_psci_fn = __invoke_psci_fn_smc;
+	} else {
+		pr_warn("invalid \"method\" property: %s\n", method);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+	unsigned long cpuid;
+	int type, cpu = -1;
+
+	type = psci_ops.migrate_info_type();
+
+	if (type == PSCI_0_2_TOS_MP) {
+		pr_info("Trusted OS migration not required\n");
+		return;
+	}
+
+	if (type == PSCI_RET_NOT_SUPPORTED) {
+		pr_info("MIGRATE_INFO_TYPE not supported.\n");
+		return;
+	}
+
+	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+		return;
+	}
+
+	cpuid = psci_migrate_info_up_cpu();
+	if (cpuid & ~MPIDR_HWID_BITMASK) {
+		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+			cpuid);
+		return;
+	}
+
+	cpu = get_logical_index(cpuid);
+	resident_cpu = cpu >= 0 ? cpu : -1;
+
+	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
+	psci_ops.cpu_suspend = psci_cpu_suspend;
+
+	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+	psci_ops.cpu_off = psci_cpu_off;
+
+	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
+	psci_ops.cpu_on = psci_cpu_on;
+
+	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
+	psci_ops.migrate = psci_migrate;
+
+	psci_ops.affinity_info = psci_affinity_info;
+
+	psci_ops.migrate_info_type = psci_migrate_info_type;
+
+	arm_pm_restart = psci_sys_reset;
+
+	pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+	u32 ver = psci_get_version();
+
+	pr_info("PSCIv%d.%d detected in firmware.\n",
+			PSCI_VERSION_MAJOR(ver),
+			PSCI_VERSION_MINOR(ver));
+
+	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+		pr_err("Conflicting PSCI version detected.\n");
+		return -EINVAL;
+	}
+
+	psci_0_2_set_functions();
+
+	psci_init_migrate();
+
+	return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+	/*
+	 * Starting with v0.2, the PSCI specification introduced a call
+	 * (PSCI_VERSION) that allows probing the firmware version, so
+	 * that PSCI function IDs and version specific initialization
+	 * can be carried out according to the specific version reported
+	 * by firmware
+	 */
+	err = psci_probe();
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+	u32 id;
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+
+	pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+		psci_ops.cpu_suspend = psci_cpu_suspend;
+	}
+
+	if (!of_property_read_u32(np, "cpu_off", &id)) {
+		psci_function_id[PSCI_FN_CPU_OFF] = id;
+		psci_ops.cpu_off = psci_cpu_off;
+	}
+
+	if (!of_property_read_u32(np, "cpu_on", &id)) {
+		psci_function_id[PSCI_FN_CPU_ON] = id;
+		psci_ops.cpu_on = psci_cpu_on;
+	}
+
+	if (!of_property_read_u32(np, "migrate", &id)) {
+		psci_function_id[PSCI_FN_MIGRATE] = id;
+		psci_ops.migrate = psci_migrate;
+	}
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+static const struct of_device_id const psci_of_match[] __initconst = {
+	{ .compatible = "arm,psci",	.data = psci_0_1_init},
+	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
+	{},
+};
+
+int __init psci_dt_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *matched_np;
+	psci_initcall_t init_fn;
+
+	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+	if (!np)
+		return -ENODEV;
+
+	init_fn = (psci_initcall_t)matched_np->data;
+	return init_fn(np);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+	if (!acpi_psci_present()) {
+		pr_info("is not implemented in ACPI.\n");
+		return -EOPNOTSUPP;
+	}
+
+	pr_info("probing for conduit method from ACPI.\n");
+
+	if (acpi_psci_use_hvc())
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+	else
+		invoke_psci_fn = __invoke_psci_fn_smc;
+
+	return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 1bd6f9c..29e6850 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/qcom_scm.h>
 
-#include <asm/outercache.h>
 #include <asm/cacheflush.h>
 
 #include "qcom_scm.h"
@@ -219,8 +218,7 @@
 	 * Flush the command buffer so that the secure world sees
 	 * the correct data.
 	 */
-	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
-	outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+	secure_flush_area(cmd, cmd->len);
 
 	ret = smc(cmd_addr);
 	if (ret < 0)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8f1fe73..b4fc9e4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -113,7 +113,6 @@
 config GPIO_ALTERA
 	tristate "Altera GPIO"
 	depends on OF_GPIO
-	select GPIO_GENERIC
 	select GPIOLIB_IRQCHIP
 	help
 	  Say Y or M here to build support for the Altera PIO device.
@@ -131,6 +130,7 @@
 	default y if ARCH_BRCMSTB
 	depends on OF_GPIO && (ARCH_BRCMSTB || COMPILE_TEST)
 	select GPIO_GENERIC
+	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
 
@@ -172,6 +172,7 @@
 	depends on CRIS || COMPILE_TEST
 	depends on OF
 	select GPIO_GENERIC
+	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to support the GPIO controller on Axis ETRAX FS SoCs.
 
@@ -308,7 +309,6 @@
 	def_bool y
 	depends on PLAT_ORION
 	depends on OF
-	select GPIO_GENERIC
 	select GENERIC_IRQ_CHIP
 
 config GPIO_MXC
@@ -1005,6 +1005,12 @@
 	  SPI driver for Freescale MC33880 high-side/low-side switch.
 	  This provides GPIO interface supporting inputs and outputs.
 
+config GPIO_ZX
+	bool "ZTE ZX GPIO support"
+	select GPIOLIB_IRQCHIP
+	help
+	  Say yes here to support the GPIO device on ZTE ZX SoCs.
+
 endmenu
 
 menu "USB GPIO expanders"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f82cd67..f79a7c4 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_GPIO_ALTERA)  	+= gpio-altera.o
 obj-$(CONFIG_GPIO_AMD8111)	+= gpio-amd8111.o
 obj-$(CONFIG_GPIO_ARIZONA)	+= gpio-arizona.o
+obj-$(CONFIG_ATH79)		+= gpio-ath79.o
 obj-$(CONFIG_GPIO_BCM_KONA)	+= gpio-bcm-kona.o
 obj-$(CONFIG_GPIO_BRCMSTB)	+= gpio-brcmstb.o
 obj-$(CONFIG_GPIO_BT8XX)	+= gpio-bt8xx.o
@@ -116,3 +117,4 @@
 obj-$(CONFIG_GPIO_XTENSA)	+= gpio-xtensa.o
 obj-$(CONFIG_GPIO_ZEVIO)	+= gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)		+= gpio-zynq.o
+obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 07ba823..903fcf4 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -59,13 +59,13 @@
  * automatically disposed on driver detach. See gpiod_get() for detailed
  * information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 					      const char *con_id,
 					      enum gpiod_flags flags)
 {
 	return devm_gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(__devm_gpiod_get);
+EXPORT_SYMBOL(devm_gpiod_get);
 
 /**
  * devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
@@ -77,13 +77,13 @@
  * are automatically disposed on driver detach. See gpiod_get_optional() for
  * detailed information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 						       const char *con_id,
 						       enum gpiod_flags flags)
 {
 	return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(__devm_gpiod_get_optional);
+EXPORT_SYMBOL(devm_gpiod_get_optional);
 
 /**
  * devm_gpiod_get_index - Resource-managed gpiod_get_index()
@@ -96,7 +96,7 @@
  * automatically disposed on driver detach. See gpiod_get_index() for detailed
  * information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 						    const char *con_id,
 						    unsigned int idx,
 						    enum gpiod_flags flags)
@@ -120,7 +120,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL(__devm_gpiod_get_index);
+EXPORT_SYMBOL(devm_gpiod_get_index);
 
 /**
  * devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node
@@ -182,10 +182,10 @@
  * gpiod_get_index_optional() for detailed information about behavior and
  * return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
 							     const char *con_id,
 							     unsigned int index,
-							 enum gpiod_flags flags)
+							     enum gpiod_flags flags)
 {
 	struct gpio_desc *desc;
 
@@ -197,7 +197,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL(__devm_gpiod_get_index_optional);
+EXPORT_SYMBOL(devm_gpiod_get_index_optional);
 
 /**
  * devm_gpiod_get_array - Resource-managed gpiod_get_array()
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 0763655..6ed7c0f 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -129,7 +129,7 @@
 	if (IS_ERR(dat))
 		return PTR_ERR(dat);
 
-	priv->flags = (unsigned)of_id->data;
+	priv->flags = (uintptr_t) of_id->data;
 
 	err = bgpio_init(&priv->bgc, &pdev->dev,
 			 DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d3fe6a6..984186e 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -305,15 +305,7 @@
 		irq_set_chip_and_handler(irq, &adp5588_irq_chip,
 					 handle_level_irq);
 		irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-		/*
-		 * ARM needs us to explicitly flag the IRQ as VALID,
-		 * once we do so, it will also set the noprobe.
-		 */
-		set_irq_flags(irq, IRQF_VALID);
-#else
-		irq_set_noprobe(irq);
-#endif
+		irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(client->irq,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 0f3d336..9b7e0b3d 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -338,9 +338,9 @@
 {
 	struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
 
-	gpiochip_remove(&altera_gc->mmchip.gc);
+	of_mm_gpiochip_remove(&altera_gc->mmchip);
 
-	return -EIO;
+	return 0;
 }
 
 static const struct of_device_id altera_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
new file mode 100644
index 0000000..03b9953
--- /dev/null
+++ b/drivers/gpio/gpio-ath79.c
@@ -0,0 +1,204 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO API support
+ *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-ath79.h>
+#include <linux/of_device.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+static void __iomem *ath79_gpio_base;
+static u32 ath79_gpio_count;
+static DEFINE_SPINLOCK(ath79_gpio_lock);
+
+static void __ath79_gpio_set_value(unsigned gpio, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+
+	if (value)
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
+}
+
+static int __ath79_gpio_get_value(unsigned gpio)
+{
+	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+}
+
+static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+	return __ath79_gpio_get_value(offset);
+}
+
+static void ath79_gpio_set_value(struct gpio_chip *chip,
+				  unsigned offset, int value)
+{
+	__ath79_gpio_set_value(offset, value);
+}
+
+static int ath79_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ath79_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+					int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static struct gpio_chip ath79_gpio_chip = {
+	.label			= "ath79",
+	.get			= ath79_gpio_get_value,
+	.set			= ath79_gpio_set_value,
+	.direction_input	= ath79_gpio_direction_input,
+	.direction_output	= ath79_gpio_direction_output,
+	.base			= 0,
+};
+
+static const struct of_device_id ath79_gpio_of_match[] = {
+	{ .compatible = "qca,ar7100-gpio" },
+	{ .compatible = "qca,ar9340-gpio" },
+	{},
+};
+
+static int ath79_gpio_probe(struct platform_device *pdev)
+{
+	struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+	bool oe_inverted;
+	int err;
+
+	if (np) {
+		err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
+		if (err) {
+			dev_err(&pdev->dev, "ngpios property is not valid\n");
+			return err;
+		}
+		if (ath79_gpio_count >= 32) {
+			dev_err(&pdev->dev, "ngpios must be less than 32\n");
+			return -EINVAL;
+		}
+		oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
+	} else if (pdata) {
+		ath79_gpio_count = pdata->ngpios;
+		oe_inverted = pdata->oe_inverted;
+	} else {
+		dev_err(&pdev->dev, "No DT node or platform data found\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ath79_gpio_base = devm_ioremap_nocache(
+		&pdev->dev, res->start, resource_size(res));
+	if (!ath79_gpio_base)
+		return -ENOMEM;
+
+	ath79_gpio_chip.dev = &pdev->dev;
+	ath79_gpio_chip.ngpio = ath79_gpio_count;
+	if (oe_inverted) {
+		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+	}
+
+	err = gpiochip_add(&ath79_gpio_chip);
+	if (err) {
+		dev_err(&pdev->dev,
+			"cannot add AR71xx GPIO chip, error=%d", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static struct platform_driver ath79_gpio_driver = {
+	.driver = {
+		.name = "ath79-gpio",
+		.of_match_table	= ath79_gpio_of_match,
+	},
+	.probe = ath79_gpio_probe,
+};
+
+module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 40343fa..31b90ac 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -438,7 +438,7 @@
 	void __iomem *reg_base;
 	int bit, bank_id;
 	unsigned long sta;
-	struct bcm_kona_gpio_bank *bank = irq_get_handler_data(irq);
+	struct bcm_kona_gpio_bank *bank = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
@@ -525,11 +525,7 @@
 		return ret;
 	irq_set_lockdep_class(irq, &gpio_lock_class);
 	irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -644,17 +640,6 @@
 		dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
 		goto err_irq_domain;
 	}
-	for (i = 0; i < chip->ngpio; i++) {
-		int irq = bcm_kona_gpio_to_irq(chip, i);
-		irq_set_lockdep_class(irq, &gpio_lock_class);
-		irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip,
-					 handle_simple_irq);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID);
-#else
-		irq_set_noprobe(irq);
-#endif
-	}
 	for (i = 0; i < kona_gpio->num_bank; i++) {
 		bank = &kona_gpio->banks[i];
 		irq_set_chained_handler_and_data(bank->irq,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 4630a81..9ea86d2 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -17,6 +17,10 @@
 #include <linux/of_irq.h>
 #include <linux/module.h>
 #include <linux/basic_mmio_gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
 
 #define GIO_BANK_SIZE           0x20
 #define GIO_ODEN(bank)          (((bank) * GIO_BANK_SIZE) + 0x00)
@@ -34,14 +38,18 @@
 	struct bgpio_chip bgc;
 	struct brcmstb_gpio_priv *parent_priv;
 	u32 width;
+	struct irq_chip irq_chip;
 };
 
 struct brcmstb_gpio_priv {
 	struct list_head bank_list;
 	void __iomem *reg_base;
-	int num_banks;
 	struct platform_device *pdev;
+	int parent_irq;
 	int gpio_base;
+	bool can_wake;
+	int parent_wake_irq;
+	struct notifier_block reboot_notifier;
 };
 
 #define MAX_GPIO_PER_BANK           32
@@ -63,6 +71,203 @@
 	return bank->parent_priv;
 }
 
+static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
+		unsigned int offset, bool enable)
+{
+	struct bgpio_chip *bgc = &bank->bgc;
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	u32 mask = bgc->pin2mask(bgc, offset);
+	u32 imask;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bgc->lock, flags);
+	imask = bgc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+	if (enable)
+		imask |= mask;
+	else
+		imask &= ~mask;
+	bgc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
+	spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+/* -------------------- IRQ chip functions -------------------- */
+
+static void brcmstb_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+	brcmstb_gpio_set_imask(bank, d->hwirq, false);
+}
+
+static void brcmstb_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+	brcmstb_gpio_set_imask(bank, d->hwirq, true);
+}
+
+static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	u32 mask = BIT(d->hwirq);
+	u32 edge_insensitive, iedge_insensitive;
+	u32 edge_config, iedge_config;
+	u32 level, ilevel;
+	unsigned long flags;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_LOW:
+		level = 0;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		level = mask;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		level = 0;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		level = 0;
+		edge_config = mask;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		level = 0;
+		edge_config = 0;  /* don't care, but want known value */
+		edge_insensitive = mask;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&bank->bgc.lock, flags);
+
+	iedge_config = bank->bgc.read_reg(priv->reg_base +
+			GIO_EC(bank->id)) & ~mask;
+	iedge_insensitive = bank->bgc.read_reg(priv->reg_base +
+			GIO_EI(bank->id)) & ~mask;
+	ilevel = bank->bgc.read_reg(priv->reg_base +
+			GIO_LEVEL(bank->id)) & ~mask;
+
+	bank->bgc.write_reg(priv->reg_base + GIO_EC(bank->id),
+			iedge_config | edge_config);
+	bank->bgc.write_reg(priv->reg_base + GIO_EI(bank->id),
+			iedge_insensitive | edge_insensitive);
+	bank->bgc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
+			ilevel | level);
+
+	spin_unlock_irqrestore(&bank->bgc.lock, flags);
+	return 0;
+}
+
+static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
+		unsigned int enable)
+{
+	int ret = 0;
+
+	/*
+	 * Only enable wake IRQ once for however many hwirqs can wake
+	 * since they all use the same wake IRQ.  Mask will be set
+	 * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag.
+	 */
+	if (enable)
+		ret = enable_irq_wake(priv->parent_wake_irq);
+	else
+		ret = disable_irq_wake(priv->parent_wake_irq);
+	if (ret)
+		dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
+				enable ? "enable" : "disable");
+	return ret;
+}
+
+static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+
+	return brcmstb_gpio_priv_set_wake(priv, enable);
+}
+
+static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data)
+{
+	struct brcmstb_gpio_priv *priv = data;
+
+	if (!priv || irq != priv->parent_wake_irq)
+		return IRQ_NONE;
+	pm_wakeup_event(&priv->pdev->dev, 0);
+	return IRQ_HANDLED;
+}
+
+static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
+{
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	struct irq_domain *irq_domain = bank->bgc.gc.irqdomain;
+	void __iomem *reg_base = priv->reg_base;
+	unsigned long status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bank->bgc.lock, flags);
+	while ((status = bank->bgc.read_reg(reg_base + GIO_STAT(bank->id)) &
+			 bank->bgc.read_reg(reg_base + GIO_MASK(bank->id)))) {
+		int bit;
+
+		for_each_set_bit(bit, &status, 32) {
+			u32 stat = bank->bgc.read_reg(reg_base +
+						      GIO_STAT(bank->id));
+			if (bit >= bank->width)
+				dev_warn(&priv->pdev->dev,
+					 "IRQ for invalid GPIO (bank=%d, offset=%d)\n",
+					 bank->id, bit);
+			bank->bgc.write_reg(reg_base + GIO_STAT(bank->id),
+					    stat | BIT(bit));
+			generic_handle_irq(irq_find_mapping(irq_domain, bit));
+		}
+	}
+	spin_unlock_irqrestore(&bank->bgc.lock, flags);
+}
+
+/* Each UPG GIO block has one IRQ for all banks */
+static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct list_head *pos;
+
+	/* Interrupts weren't properly cleared during probe */
+	BUG_ON(!priv || !chip);
+
+	chained_irq_enter(chip, desc);
+	list_for_each(pos, &priv->bank_list) {
+		struct brcmstb_gpio_bank *bank =
+			list_entry(pos, struct brcmstb_gpio_bank, node);
+		brcmstb_gpio_irq_bank_handler(bank);
+	}
+	chained_irq_exit(chip, desc);
+}
+
+static int brcmstb_gpio_reboot(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct brcmstb_gpio_priv *priv =
+		container_of(nb, struct brcmstb_gpio_priv, reboot_notifier);
+
+	/* Enable GPIO for S5 cold boot */
+	if (action == SYS_POWER_OFF)
+		brcmstb_gpio_priv_set_wake(priv, 1);
+
+	return NOTIFY_DONE;
+}
+
 /* Make sure that the number of banks matches up between properties */
 static int brcmstb_gpio_sanity_check_banks(struct device *dev,
 		struct device_node *np, struct resource *res)
@@ -100,7 +305,13 @@
 		bank = list_entry(pos, struct brcmstb_gpio_bank, node);
 		ret = bgpio_remove(&bank->bgc);
 		if (ret)
-			dev_err(&pdev->dev, "gpiochip_remove fail in cleanup");
+			dev_err(&pdev->dev, "gpiochip_remove fail in cleanup\n");
+	}
+	if (priv->reboot_notifier.notifier_call) {
+		ret = unregister_reboot_notifier(&priv->reboot_notifier);
+		if (ret)
+			dev_err(&pdev->dev,
+				"failed to unregister reboot notifier\n");
 	}
 	return ret;
 }
@@ -121,7 +332,7 @@
 		return -EINVAL;
 
 	offset = gpiospec->args[0] - (gc->base - priv->gpio_base);
-	if (offset >= gc->ngpio)
+	if (offset >= gc->ngpio || offset < 0)
 		return -EINVAL;
 
 	if (unlikely(offset >= bank->width)) {
@@ -136,6 +347,65 @@
 	return offset;
 }
 
+/* Before calling, must have bank->parent_irq set and gpiochip registered */
+static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
+		struct brcmstb_gpio_bank *bank)
+{
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+
+	bank->irq_chip.name = dev_name(dev);
+	bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+	bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+	bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+
+	/* Ensures that all non-wakeup IRQs are disabled at suspend */
+	bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+	if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
+			of_property_read_bool(np, "wakeup-source")) {
+		priv->parent_wake_irq = platform_get_irq(pdev, 1);
+		if (priv->parent_wake_irq < 0) {
+			dev_warn(dev,
+				"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
+		} else {
+			int err;
+
+			/*
+			 * Set wakeup capability before requesting wakeup
+			 * interrupt, so we can process boot-time "wakeups"
+			 * (e.g., from S5 cold boot)
+			 */
+			device_set_wakeup_capable(dev, true);
+			device_wakeup_enable(dev);
+			err = devm_request_irq(dev, priv->parent_wake_irq,
+					brcmstb_gpio_wake_irq_handler, 0,
+					"brcmstb-gpio-wake", priv);
+
+			if (err < 0) {
+				dev_err(dev, "Couldn't request wake IRQ");
+				return err;
+			}
+
+			priv->reboot_notifier.notifier_call =
+				brcmstb_gpio_reboot;
+			register_reboot_notifier(&priv->reboot_notifier);
+			priv->can_wake = true;
+		}
+	}
+
+	if (priv->can_wake)
+		bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+
+	gpiochip_irqchip_add(&bank->bgc.gc, &bank->irq_chip, 0,
+			handle_simple_irq, IRQ_TYPE_NONE);
+	gpiochip_set_chained_irqchip(&bank->bgc.gc, &bank->irq_chip,
+			priv->parent_irq, brcmstb_gpio_irq_handler);
+
+	return 0;
+}
+
 static int brcmstb_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -146,6 +416,7 @@
 	struct property *prop;
 	const __be32 *p;
 	u32 bank_width;
+	int num_banks = 0;
 	int err;
 	static int gpio_base;
 
@@ -164,6 +435,16 @@
 	priv->reg_base = reg_base;
 	priv->pdev = pdev;
 
+	if (of_property_read_bool(np, "interrupt-controller")) {
+		priv->parent_irq = platform_get_irq(pdev, 0);
+		if (priv->parent_irq <= 0) {
+			dev_err(dev, "Couldn't get IRQ");
+			return -ENOENT;
+		}
+	} else {
+		priv->parent_irq = -ENOENT;
+	}
+
 	if (brcmstb_gpio_sanity_check_banks(dev, np, res))
 		return -EINVAL;
 
@@ -180,7 +461,7 @@
 		}
 
 		bank->parent_priv = priv;
-		bank->id = priv->num_banks;
+		bank->id = num_banks;
 		if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) {
 			dev_err(dev, "Invalid bank width %d\n", bank_width);
 			goto fail;
@@ -212,6 +493,12 @@
 		/* not all ngpio lines are valid, will use bank width later */
 		gc->ngpio = MAX_GPIO_PER_BANK;
 
+		/*
+		 * Mask all interrupts by default, since wakeup interrupts may
+		 * be retained from S5 cold boot
+		 */
+		bank->bgc.write_reg(reg_base + GIO_MASK(bank->id), 0);
+
 		err = gpiochip_add(gc);
 		if (err) {
 			dev_err(dev, "Could not add gpiochip for bank %d\n",
@@ -219,17 +506,24 @@
 			goto fail;
 		}
 		gpio_base += gc->ngpio;
+
+		if (priv->parent_irq > 0) {
+			err = brcmstb_gpio_irq_setup(pdev, bank);
+			if (err)
+				goto fail;
+		}
+
 		dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
 			gc->base, gc->ngpio, bank->width);
 
 		/* Everything looks good, so add bank to list */
 		list_add(&bank->node, &priv->bank_list);
 
-		priv->num_banks++;
+		num_banks++;
 	}
 
 	dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
-			priv->num_banks, priv->gpio_base, gpio_base - 1);
+			num_banks, priv->gpio_base, gpio_base - 1);
 
 	return 0;
 
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c246ac3..94b0ab7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -65,11 +65,11 @@
 	return ptr;
 }
 
-static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
+static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
 {
 	struct davinci_gpio_regs __iomem *g;
 
-	g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
+	g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
 
 	return g;
 }
@@ -287,7 +287,7 @@
 
 static void gpio_irq_disable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d);
 	u32 mask = (u32) irq_data_get_irq_handler_data(d);
 
 	writel_relaxed(mask, &g->clr_falling);
@@ -296,7 +296,7 @@
 
 static void gpio_irq_enable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d);
 	u32 mask = (u32) irq_data_get_irq_handler_data(d);
 	unsigned status = irqd_get_trigger_type(d);
 
@@ -327,8 +327,9 @@
 };
 
 static void
-gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct davinci_gpio_regs __iomem *g;
 	u32 mask = 0xffff;
 	struct davinci_gpio_controller *d;
@@ -396,7 +397,7 @@
 	struct davinci_gpio_regs __iomem *g;
 	u32 mask;
 
-	d = (struct davinci_gpio_controller *)data->handler_data;
+	d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
 	g = (struct davinci_gpio_regs __iomem *)d->regs;
 	mask = __gpio_mask(data->irq - d->gpio_irq);
 
@@ -422,7 +423,6 @@
 	irq_set_irq_type(irq, IRQ_TYPE_NONE);
 	irq_set_chip_data(irq, (__force void *)g);
 	irq_set_handler_data(irq, (void *)__gpio_mask(hw));
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -545,7 +545,7 @@
 		chips[0].chip.to_irq = gpio_to_irq_unbanked;
 		chips[0].gpio_irq = bank_irq;
 		chips[0].gpio_unbanked = pdata->gpio_unbanked;
-		binten = BIT(0);
+		binten = GENMASK(pdata->gpio_unbanked / 16, 0);
 
 		/* AINTC handles mask/unmask; GPIO handles triggering */
 		irq = bank_irq;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 55fa985..c5be4b9 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -149,7 +149,7 @@
 
 static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
 {
-	struct dwapb_gpio *gpio = irq_get_handler_data(irq);
+	struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	dwapb_do_irq(gpio);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index fbf2873..6bca1e1 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -31,7 +31,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/gpio-em.h>
 
 struct em_gio_priv {
 	void __iomem *base0;
@@ -262,7 +261,6 @@
 
 	irq_set_chip_data(irq, h->host_data);
 	irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID); /* kill me now */
 	return 0;
 }
 
@@ -273,13 +271,12 @@
 
 static int em_gio_probe(struct platform_device *pdev)
 {
-	struct gpio_em_config pdata_dt;
-	struct gpio_em_config *pdata = dev_get_platdata(&pdev->dev);
 	struct em_gio_priv *p;
 	struct resource *io[2], *irq[2];
 	struct gpio_chip *gpio_chip;
 	struct irq_chip *irq_chip;
 	const char *name = dev_name(&pdev->dev);
+	unsigned int ngpios;
 	int ret;
 
 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
@@ -319,18 +316,10 @@
 		goto err0;
 	}
 
-	if (!pdata) {
-		memset(&pdata_dt, 0, sizeof(pdata_dt));
-		pdata = &pdata_dt;
-
-		if (of_property_read_u32(pdev->dev.of_node, "ngpios",
-					 &pdata->number_of_pins)) {
-			dev_err(&pdev->dev, "Missing ngpios OF property\n");
-			ret = -EINVAL;
-			goto err0;
-		}
-
-		pdata->gpio_base = -1;
+	if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+		dev_err(&pdev->dev, "Missing ngpios OF property\n");
+		ret = -EINVAL;
+		goto err0;
 	}
 
 	gpio_chip = &p->gpio_chip;
@@ -345,8 +334,8 @@
 	gpio_chip->label = name;
 	gpio_chip->dev = &pdev->dev;
 	gpio_chip->owner = THIS_MODULE;
-	gpio_chip->base = pdata->gpio_base;
-	gpio_chip->ngpio = pdata->number_of_pins;
+	gpio_chip->base = -1;
+	gpio_chip->ngpio = ngpios;
 
 	irq_chip = &p->irq_chip;
 	irq_chip->name = name;
@@ -357,9 +346,7 @@
 	irq_chip->irq_release_resources = em_gio_irq_relres;
 	irq_chip->flags	= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
 
-	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
-					      pdata->number_of_pins,
-					      pdata->irq_base,
+	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
 					      &em_gio_irq_domain_ops, p);
 	if (!p->irq_domain) {
 		ret = -ENXIO;
@@ -387,12 +374,6 @@
 		goto err1;
 	}
 
-	if (pdata->pctl_name) {
-		ret = gpiochip_add_pin_range(gpio_chip, pdata->pctl_name, 0,
-					     gpio_chip->base, gpio_chip->ngpio);
-		if (ret < 0)
-			dev_warn(&pdev->dev, "failed to add pin range\n");
-	}
 	return 0;
 
 err1:
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 45684f3..9d90366 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -100,13 +100,15 @@
 	}
 }
 
-static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ep93xx_gpio_f_irq_handler(unsigned int __irq,
+				      struct irq_desc *desc)
 {
 	/*
 	 * map discontiguous hw irq range to continuous sw irq range:
 	 *
 	 *  IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
 	 */
+	unsigned int irq = irq_desc_get_irq(desc);
 	int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
 	int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
 
@@ -208,7 +210,7 @@
 		return -EINVAL;
 	}
 
-	__irq_set_handler_locked(d->irq, handler);
+	irq_set_handler_locked(d, handler);
 
 	gpio_int_enabled[port] |= port_mask;
 
@@ -234,7 +236,7 @@
 	     gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
 		irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(gpio_irq, IRQF_VALID);
+		irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 28071f4..2ffcd9f 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -1,8 +1,10 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/of_gpio.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/basic_mmio_gpio.h>
 
@@ -13,6 +15,7 @@
 #define ETRAX_FS_rw_intr_mask	16
 #define ETRAX_FS_rw_ack_intr	20
 #define ETRAX_FS_r_intr		24
+#define ETRAX_FS_r_masked_intr	28
 #define ETRAX_FS_rw_pb_dout	32
 #define ETRAX_FS_r_pb_din	36
 #define ETRAX_FS_rw_pb_oe	40
@@ -26,6 +29,48 @@
 #define ETRAX_FS_r_pe_din	84
 #define ETRAX_FS_rw_pe_oe	88
 
+#define ARTPEC3_r_pa_din	0
+#define ARTPEC3_rw_pa_dout	4
+#define ARTPEC3_rw_pa_oe	8
+#define ARTPEC3_r_pb_din	44
+#define ARTPEC3_rw_pb_dout	48
+#define ARTPEC3_rw_pb_oe	52
+#define ARTPEC3_r_pc_din	88
+#define ARTPEC3_rw_pc_dout	92
+#define ARTPEC3_rw_pc_oe	96
+#define ARTPEC3_r_pd_din	116
+#define ARTPEC3_rw_intr_cfg	120
+#define ARTPEC3_rw_intr_pins	124
+#define ARTPEC3_rw_intr_mask	128
+#define ARTPEC3_rw_ack_intr	132
+#define ARTPEC3_r_masked_intr	140
+
+#define GIO_CFG_OFF		0
+#define GIO_CFG_HI		1
+#define GIO_CFG_LO		2
+#define GIO_CFG_SET		3
+#define GIO_CFG_POSEDGE		5
+#define GIO_CFG_NEGEDGE		6
+#define GIO_CFG_ANYEDGE		7
+
+struct etraxfs_gpio_info;
+
+struct etraxfs_gpio_block {
+	spinlock_t lock;
+	u32 mask;
+	u32 cfg;
+	u32 pins;
+	unsigned int group[8];
+
+	void __iomem *regs;
+	const struct etraxfs_gpio_info *info;
+};
+
+struct etraxfs_gpio_chip {
+	struct bgpio_chip bgc;
+	struct etraxfs_gpio_block *block;
+};
+
 struct etraxfs_gpio_port {
 	const char *label;
 	unsigned int oe;
@@ -37,6 +82,12 @@
 struct etraxfs_gpio_info {
 	unsigned int num_ports;
 	const struct etraxfs_gpio_port *ports;
+
+	unsigned int rw_ack_intr;
+	unsigned int rw_intr_mask;
+	unsigned int rw_intr_cfg;
+	unsigned int rw_intr_pins;
+	unsigned int r_masked_intr;
 };
 
 static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = {
@@ -80,8 +131,56 @@
 static const struct etraxfs_gpio_info etraxfs_gpio_etraxfs = {
 	.num_ports = ARRAY_SIZE(etraxfs_gpio_etraxfs_ports),
 	.ports = etraxfs_gpio_etraxfs_ports,
+	.rw_ack_intr	= ETRAX_FS_rw_ack_intr,
+	.rw_intr_mask	= ETRAX_FS_rw_intr_mask,
+	.rw_intr_cfg	= ETRAX_FS_rw_intr_cfg,
+	.r_masked_intr	= ETRAX_FS_r_masked_intr,
 };
 
+static const struct etraxfs_gpio_port etraxfs_gpio_artpec3_ports[] = {
+	{
+		.label	= "A",
+		.ngpio	= 32,
+		.oe	= ARTPEC3_rw_pa_oe,
+		.dout	= ARTPEC3_rw_pa_dout,
+		.din	= ARTPEC3_r_pa_din,
+	},
+	{
+		.label	= "B",
+		.ngpio	= 32,
+		.oe	= ARTPEC3_rw_pb_oe,
+		.dout	= ARTPEC3_rw_pb_dout,
+		.din	= ARTPEC3_r_pb_din,
+	},
+	{
+		.label	= "C",
+		.ngpio	= 16,
+		.oe	= ARTPEC3_rw_pc_oe,
+		.dout	= ARTPEC3_rw_pc_dout,
+		.din	= ARTPEC3_r_pc_din,
+	},
+	{
+		.label	= "D",
+		.ngpio	= 32,
+		.din	= ARTPEC3_r_pd_din,
+	},
+};
+
+static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = {
+	.num_ports = ARRAY_SIZE(etraxfs_gpio_artpec3_ports),
+	.ports = etraxfs_gpio_artpec3_ports,
+	.rw_ack_intr	= ARTPEC3_rw_ack_intr,
+	.rw_intr_mask	= ARTPEC3_rw_intr_mask,
+	.rw_intr_cfg	= ARTPEC3_rw_intr_cfg,
+	.r_masked_intr	= ARTPEC3_r_masked_intr,
+	.rw_intr_pins	= ARTPEC3_rw_intr_pins,
+};
+
+static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc)
+{
+	return gc->label[0] - 'A';
+}
+
 static int etraxfs_gpio_of_xlate(struct gpio_chip *gc,
 			       const struct of_phandle_args *gpiospec,
 			       u32 *flags)
@@ -90,7 +189,7 @@
 	 * Port numbers are A to E, and the properties are integers, so we
 	 * specify them as 0xA - 0xE.
 	 */
-	if (gc->label[0] - 'A' + 0xA != gpiospec->args[2])
+	if (etraxfs_gpio_chip_to_port(gc) + 0xA != gpiospec->args[2])
 		return -EINVAL;
 
 	return of_gpio_simple_xlate(gc, gpiospec, flags);
@@ -101,24 +200,174 @@
 		.compatible = "axis,etraxfs-gio",
 		.data = &etraxfs_gpio_etraxfs,
 	},
+	{
+		.compatible = "axis,artpec3-gio",
+		.data = &etraxfs_gpio_artpec3,
+	},
 	{},
 };
 
+static unsigned int etraxfs_gpio_to_group_irq(unsigned int gpio)
+{
+	return gpio % 8;
+}
+
+static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip,
+					      unsigned int gpio)
+{
+	return 4 * etraxfs_gpio_chip_to_port(&chip->bgc.gc) + gpio / 8;
+}
+
+static void etraxfs_gpio_irq_ack(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	writel(BIT(grpirq), block->regs + block->info->rw_ack_intr);
+}
+
+static void etraxfs_gpio_irq_mask(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->mask &= ~BIT(grpirq);
+	writel(block->mask, block->regs + block->info->rw_intr_mask);
+	spin_unlock(&block->lock);
+}
+
+static void etraxfs_gpio_irq_unmask(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->mask |= BIT(grpirq);
+	writel(block->mask, block->regs + block->info->rw_intr_mask);
+	spin_unlock(&block->lock);
+}
+
+static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+	u32 cfg;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		cfg = GIO_CFG_POSEDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		cfg = GIO_CFG_NEGEDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		cfg = GIO_CFG_ANYEDGE;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		cfg = GIO_CFG_LO;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		cfg = GIO_CFG_HI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock(&block->lock);
+	block->cfg &= ~(0x7 << (grpirq * 3));
+	block->cfg |= (cfg << (grpirq * 3));
+	writel(block->cfg, block->regs + block->info->rw_intr_cfg);
+	spin_unlock(&block->lock);
+
+	return 0;
+}
+
+static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+	int ret = -EBUSY;
+
+	spin_lock(&block->lock);
+	if (block->group[grpirq])
+		goto out;
+
+	ret = gpiochip_lock_as_irq(&chip->bgc.gc, d->hwirq);
+	if (ret)
+		goto out;
+
+	block->group[grpirq] = d->irq;
+	if (block->info->rw_intr_pins) {
+		unsigned int pin = etraxfs_gpio_to_group_pin(chip, d->hwirq);
+
+		block->pins &= ~(0xf << (grpirq * 4));
+		block->pins |= (pin << (grpirq * 4));
+
+		writel(block->pins, block->regs + block->info->rw_intr_pins);
+	}
+
+out:
+	spin_unlock(&block->lock);
+	return ret;
+}
+
+static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->group[grpirq] = 0;
+	gpiochip_unlock_as_irq(&chip->bgc.gc, d->hwirq);
+	spin_unlock(&block->lock);
+}
+
+static struct irq_chip etraxfs_gpio_irq_chip = {
+	.name		= "gpio-etraxfs",
+	.irq_ack	= etraxfs_gpio_irq_ack,
+	.irq_mask	= etraxfs_gpio_irq_mask,
+	.irq_unmask	= etraxfs_gpio_irq_unmask,
+	.irq_set_type	= etraxfs_gpio_irq_set_type,
+	.irq_request_resources = etraxfs_gpio_irq_request_resources,
+	.irq_release_resources = etraxfs_gpio_irq_release_resources,
+};
+
+static irqreturn_t etraxfs_gpio_interrupt(int irq, void *dev_id)
+{
+	struct etraxfs_gpio_block *block = dev_id;
+	unsigned long intr = readl(block->regs + block->info->r_masked_intr);
+	int bit;
+
+	for_each_set_bit(bit, &intr, 8)
+		generic_handle_irq(block->group[bit]);
+
+	return IRQ_RETVAL(intr & 0xff);
+}
+
 static int etraxfs_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	const struct etraxfs_gpio_info *info;
 	const struct of_device_id *match;
-	struct bgpio_chip *chips;
-	struct resource *res;
+	struct etraxfs_gpio_block *block;
+	struct etraxfs_gpio_chip *chips;
+	struct resource *res, *irq;
+	bool allportsirq = false;
 	void __iomem *regs;
 	int ret;
 	int i;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	regs = devm_ioremap_resource(dev, res);
-	if (!regs)
-		return -ENOMEM;
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	match = of_match_node(etraxfs_gpio_of_table, dev->of_node);
 	if (!match)
@@ -130,19 +379,57 @@
 	if (!chips)
 		return -ENOMEM;
 
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq)
+		return -EINVAL;
+
+	block = devm_kzalloc(dev, sizeof(*block), GFP_KERNEL);
+	if (!block)
+		return -ENOMEM;
+
+	spin_lock_init(&block->lock);
+
+	block->regs = regs;
+	block->info = info;
+
+	writel(0, block->regs + info->rw_intr_mask);
+	writel(0, block->regs + info->rw_intr_cfg);
+	if (info->rw_intr_pins) {
+		allportsirq = true;
+		writel(0, block->regs + info->rw_intr_pins);
+	}
+
+	ret = devm_request_irq(dev, irq->start, etraxfs_gpio_interrupt,
+			       IRQF_SHARED, dev_name(dev), block);
+	if (ret) {
+		dev_err(dev, "Unable to request irq %d\n", ret);
+		return ret;
+	}
+
 	for (i = 0; i < info->num_ports; i++) {
-		struct bgpio_chip *bgc = &chips[i];
+		struct etraxfs_gpio_chip *chip = &chips[i];
+		struct bgpio_chip *bgc = &chip->bgc;
 		const struct etraxfs_gpio_port *port = &info->ports[i];
+		unsigned long flags = BGPIOF_READ_OUTPUT_REG_SET;
+		void __iomem *dat = regs + port->din;
+		void __iomem *set = regs + port->dout;
+		void __iomem *dirout = regs + port->oe;
+
+		chip->block = block;
+
+		if (dirout == set) {
+			dirout = set = NULL;
+			flags = BGPIOF_NO_OUTPUT;
+		}
 
 		ret = bgpio_init(bgc, dev, 4,
-				 regs + port->din,	/* dat */
-				 regs + port->dout,	/* set */
-				 NULL,			/* clr */
-				 regs + port->oe,	/* dirout */
-				 NULL,			/* dirin */
-				 BGPIOF_UNREADABLE_REG_SET);
-		if (ret)
-			return ret;
+				 dat, set, NULL, dirout, NULL,
+				 flags);
+		if (ret) {
+			dev_err(dev, "Unable to init port %s\n",
+				port->label);
+			continue;
+		}
 
 		bgc->gc.ngpio = port->ngpio;
 		bgc->gc.label = port->label;
@@ -152,9 +439,21 @@
 		bgc->gc.of_xlate = etraxfs_gpio_of_xlate;
 
 		ret = gpiochip_add(&bgc->gc);
-		if (ret)
+		if (ret) {
 			dev_err(dev, "Unable to register port %s\n",
 				bgc->gc.label);
+			continue;
+		}
+
+		if (i > 0 && !allportsirq)
+			continue;
+
+		ret = gpiochip_irqchip_add(&bgc->gc, &etraxfs_gpio_irq_chip, 0,
+					   handle_level_irq, IRQ_TYPE_NONE);
+		if (ret) {
+			dev_err(dev, "Unable to add irqchip to port %s\n",
+				bgc->gc.label);
+		}
 	}
 
 	return 0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 9bda372..a3f0753 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -153,6 +153,10 @@
 	return !!(bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio));
 }
 
+static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+}
+
 static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -279,6 +283,12 @@
 	return 0;
 }
 
+static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+				int val)
+{
+	return -EINVAL;
+}
+
 static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
 				int val)
 {
@@ -302,6 +312,14 @@
 	return 0;
 }
 
+static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+	return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+	       GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
 static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -351,6 +369,14 @@
 	return 0;
 }
 
+static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+	return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+	       GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
 static int bgpio_setup_accessors(struct device *dev,
 				 struct bgpio_chip *bgc,
 				 bool bit_be,
@@ -444,6 +470,9 @@
 		bgc->reg_set = set;
 		bgc->gc.set = bgpio_set_set;
 		bgc->gc.set_multiple = bgpio_set_multiple_set;
+	} else if (flags & BGPIOF_NO_OUTPUT) {
+		bgc->gc.set = bgpio_set_none;
+		bgc->gc.set_multiple = NULL;
 	} else {
 		bgc->gc.set = bgpio_set;
 		bgc->gc.set_multiple = bgpio_set_multiple;
@@ -460,7 +489,8 @@
 
 static int bgpio_setup_direction(struct bgpio_chip *bgc,
 				 void __iomem *dirout,
-				 void __iomem *dirin)
+				 void __iomem *dirin,
+				 unsigned long flags)
 {
 	if (dirout && dirin) {
 		return -EINVAL;
@@ -468,12 +498,17 @@
 		bgc->reg_dir = dirout;
 		bgc->gc.direction_output = bgpio_dir_out;
 		bgc->gc.direction_input = bgpio_dir_in;
+		bgc->gc.get_direction = bgpio_get_dir;
 	} else if (dirin) {
 		bgc->reg_dir = dirin;
 		bgc->gc.direction_output = bgpio_dir_out_inv;
 		bgc->gc.direction_input = bgpio_dir_in_inv;
+		bgc->gc.get_direction = bgpio_get_dir_inv;
 	} else {
-		bgc->gc.direction_output = bgpio_simple_dir_out;
+		if (flags & BGPIOF_NO_OUTPUT)
+			bgc->gc.direction_output = bgpio_dir_out_err;
+		else
+			bgc->gc.direction_output = bgpio_simple_dir_out;
 		bgc->gc.direction_input = bgpio_simple_dir_in;
 	}
 
@@ -525,7 +560,7 @@
 	if (ret)
 		return ret;
 
-	ret = bgpio_setup_direction(bgc, dirout, dirin);
+	ret = bgpio_setup_direction(bgc, dirout, dirin, flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 0a8f761..801423f 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -104,17 +104,12 @@
 {
 	struct bgpio_chip *bgc = &priv->bgc;
 	unsigned long mask = bgc->pin2mask(bgc, offset);
-	unsigned long flags;
-
-	spin_lock_irqsave(&bgc->lock, flags);
 
 	if (val)
 		priv->imask |= mask;
 	else
 		priv->imask &= ~mask;
 	bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
-
-	spin_unlock_irqrestore(&bgc->lock, flags);
 }
 
 static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -180,16 +175,26 @@
 {
 	struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
 	int offset = d->hwirq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->bgc.lock, flags);
 
 	grgpio_set_imask(priv, offset, 0);
+
+	spin_unlock_irqrestore(&priv->bgc.lock, flags);
 }
 
 static void grgpio_irq_unmask(struct irq_data *d)
 {
 	struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
 	int offset = d->hwirq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->bgc.lock, flags);
 
 	grgpio_set_imask(priv, offset, 1);
+
+	spin_unlock_irqrestore(&priv->bgc.lock, flags);
 }
 
 static struct irq_chip grgpio_irq_chip = {
@@ -281,12 +286,7 @@
 	irq_set_chip_data(irq, priv);
 	irq_set_chip_and_handler(irq, &grgpio_irq_chip,
 				 handle_simple_irq);
-	irq_clear_status_flags(irq, IRQ_NOREQUEST);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return ret;
 }
@@ -301,9 +301,6 @@
 	int ngpio = priv->bgc.gc.ngpio;
 	int i;
 
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, 0);
-#endif
 	irq_set_chip_and_handler(irq, NULL, NULL);
 	irq_set_chip_data(irq, NULL);
 
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 7d3c90e..8c5252c 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -685,9 +685,14 @@
 
 	mutex_init(&chip->lock);
 
-	max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
-	if (nr_port > 8)
-		max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+	ret = max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+	if (ret)
+		goto out_failed;
+	if (nr_port > 8) {
+		ret = max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+		if (ret)
+			goto out_failed;
+	}
 
 	ret = gpiochip_add(&chip->gpio_chip);
 	if (ret)
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 2fc7ff8..73db7ec 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -507,11 +507,7 @@
 		irq_set_chip_data(irq, mcp);
 		irq_set_chip(irq, &mcp23s08_irq_chip);
 		irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID);
-#else
 		irq_set_noprobe(irq);
-#endif
 	}
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 20aa66f..8ef7a12 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -32,7 +32,7 @@
 
 struct mpc8xxx_gpio_chip {
 	struct of_mm_gpio_chip mm_gc;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/*
 	 * shadowed data register to be able to clear/set output pins in
@@ -95,7 +95,7 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	if (val)
 		mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
@@ -104,7 +104,7 @@
 
 	out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
@@ -115,7 +115,7 @@
 	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	for (i = 0; i < gc->ngpio; i++) {
 		if (*mask == 0)
@@ -130,7 +130,7 @@
 
 	out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -139,11 +139,11 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 
 	return 0;
 }
@@ -156,11 +156,11 @@
 
 	mpc8xxx_gpio_set(gc, gpio, val);
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 
 	return 0;
 }
@@ -174,6 +174,15 @@
 	return mpc8xxx_gpio_dir_out(gc, gpio, val);
 }
 
+static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	/* GPIO 0..3 are input only on MPC5125 */
+	if (gpio <= 3)
+		return -EINVAL;
+
+	return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
 	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -206,11 +215,11 @@
 	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_irq_mask(struct irq_data *d)
@@ -219,11 +228,11 @@
 	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -242,17 +251,17 @@
 
 	switch (flow_type) {
 	case IRQ_TYPE_EDGE_FALLING:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		setbits32(mm->regs + GPIO_ICR,
 			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrbits32(mm->regs + GPIO_ICR,
 			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	default:
@@ -282,22 +291,22 @@
 	switch (flow_type) {
 	case IRQ_TYPE_EDGE_FALLING:
 	case IRQ_TYPE_LEVEL_LOW:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrsetbits_be32(reg, 3 << shift, 2 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_RISING:
 	case IRQ_TYPE_LEVEL_HIGH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrsetbits_be32(reg, 3 << shift, 1 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrbits32(reg, 3 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	default:
@@ -312,17 +321,13 @@
 	.irq_unmask	= mpc8xxx_irq_unmask,
 	.irq_mask	= mpc8xxx_irq_mask,
 	.irq_ack	= mpc8xxx_irq_ack,
+	/* this might get overwritten in mpc8xxx_probe() */
 	.irq_set_type	= mpc8xxx_irq_set_type,
 };
 
 static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
 				irq_hw_number_t hwirq)
 {
-	struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
-
-	if (mpc8xxx_gc->of_dev_id_data)
-		mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
-
 	irq_set_chip_data(irq, h->host_data);
 	irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
 
@@ -334,11 +339,38 @@
 	.xlate	= irq_domain_xlate_twocell,
 };
 
-static struct of_device_id mpc8xxx_gpio_ids[] = {
+struct mpc8xxx_gpio_devtype {
+	int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
+	int (*gpio_get)(struct gpio_chip *, unsigned int);
+	int (*irq_set_type)(struct irq_data *, unsigned int);
+};
+
+static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
+	.gpio_dir_out = mpc5121_gpio_dir_out,
+	.irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
+	.gpio_dir_out = mpc5125_gpio_dir_out,
+	.irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = {
+	.gpio_get = mpc8572_gpio_get,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
+	.gpio_dir_out = mpc8xxx_gpio_dir_out,
+	.gpio_get = mpc8xxx_gpio_get,
+	.irq_set_type = mpc8xxx_irq_set_type,
+};
+
+static const struct of_device_id mpc8xxx_gpio_ids[] = {
 	{ .compatible = "fsl,mpc8349-gpio", },
-	{ .compatible = "fsl,mpc8572-gpio", },
+	{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
 	{ .compatible = "fsl,mpc8610-gpio", },
-	{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+	{ .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
+	{ .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
 	{ .compatible = "fsl,pq3-gpio",     },
 	{ .compatible = "fsl,qoriq-gpio",   },
 	{}
@@ -351,6 +383,8 @@
 	struct of_mm_gpio_chip *mm_gc;
 	struct gpio_chip *gc;
 	const struct of_device_id *id;
+	const struct mpc8xxx_gpio_devtype *devtype =
+		of_device_get_match_data(&pdev->dev);
 	int ret;
 
 	mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -359,7 +393,7 @@
 
 	platform_set_drvdata(pdev, mpc8xxx_gc);
 
-	spin_lock_init(&mpc8xxx_gc->lock);
+	raw_spin_lock_init(&mpc8xxx_gc->lock);
 
 	mm_gc = &mpc8xxx_gc->mm_gc;
 	gc = &mm_gc->gc;
@@ -367,10 +401,18 @@
 	mm_gc->save_regs = mpc8xxx_gpio_save_regs;
 	gc->ngpio = MPC8XXX_GPIO_PINS;
 	gc->direction_input = mpc8xxx_gpio_dir_in;
-	gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
-		mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
-	gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
-		mpc8572_gpio_get : mpc8xxx_gpio_get;
+
+	if (!devtype)
+		devtype = &mpc8xxx_gpio_devtype_default;
+
+	/*
+	 * It's assumed that only a single type of gpio controller is available
+	 * on the current machine, so overwriting global data is fine.
+	 */
+	mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+
+	gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out;
+	gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get;
 	gc->set = mpc8xxx_gpio_set;
 	gc->set_multiple = mpc8xxx_gpio_set_multiple;
 	gc->to_irq = mpc8xxx_gpio_to_irq;
@@ -396,8 +438,8 @@
 	out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
 	out_be32(mm_gc->regs + GPIO_IMR, 0);
 
-	irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc);
-	irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade);
+	irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
+					 mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
 
 	return 0;
 }
@@ -407,8 +449,7 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev);
 
 	if (mpc8xxx_gc->irq) {
-		irq_set_handler_data(mpc8xxx_gc->irqn, NULL);
-		irq_set_chained_handler(mpc8xxx_gc->irqn, NULL);
+		irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, NULL, NULL);
 		irq_domain_remove(mpc8xxx_gc->irq);
 	}
 
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 52ff182..d2012cf 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -187,14 +187,6 @@
 	return irq_create_mapping(domain, offset);
 }
 
-static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
-{
-	struct irq_data *irq_data = irq_get_irq_data(irq);
-
-	return irq_data->hwirq;
-}
-
-
 /* For dual-edge interrupts in software, since the hardware has no
  * such support:
  *
@@ -238,7 +230,7 @@
 
 static void msm_gpio_irq_ack(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+	int gpio = d->hwirq;
 
 	writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
 	if (test_bit(gpio, msm_gpio.dual_edge_irqs))
@@ -247,8 +239,8 @@
 
 static void msm_gpio_irq_mask(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
 	writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
@@ -259,8 +251,8 @@
 
 static void msm_gpio_irq_unmask(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
 	__set_bit(gpio, msm_gpio.enabled_irqs);
@@ -271,8 +263,8 @@
 
 static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 	uint32_t bits;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
@@ -281,14 +273,14 @@
 
 	if (flow_type & IRQ_TYPE_EDGE_BOTH) {
 		bits |= BIT(INTR_DECT_CTL);
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
 			__set_bit(gpio, msm_gpio.dual_edge_irqs);
 		else
 			__clear_bit(gpio, msm_gpio.dual_edge_irqs);
 	} else {
 		bits &= ~BIT(INTR_DECT_CTL);
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		__clear_bit(gpio, msm_gpio.dual_edge_irqs);
 	}
 
@@ -331,7 +323,7 @@
 
 static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+	int gpio = d->hwirq;
 
 	if (on) {
 		if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
@@ -363,7 +355,6 @@
 	irq_set_lockdep_class(irq, &msm_gpio_lock_class);
 	irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
 			handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 1a54205..b396bf3 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,9 +458,9 @@
 	return 0;
 }
 
-static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+	struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 cause, type;
 	int i;
@@ -787,8 +787,8 @@
 
 		if (irq < 0)
 			continue;
-		irq_set_handler_data(irq, mvchip);
-		irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
+		irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+						 mvchip);
 	}
 
 	mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index ec1eb1b..b752b56 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -275,8 +275,8 @@
 static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
 	u32 irq_stat;
-	struct mxc_gpio_port *port = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
 
@@ -292,7 +292,7 @@
 {
 	u32 irq_msk, irq_stat;
 	struct mxc_gpio_port *port;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
 
@@ -339,7 +339,7 @@
 	return 0;
 }
 
-static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
 {
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
@@ -354,6 +354,7 @@
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	ct->chip.irq_set_type = gpio_set_irq_type;
 	ct->chip.irq_set_wake = gpio_set_wake_irq;
+	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
 	ct->regs.ack = GPIO_ISR;
 	ct->regs.mask = GPIO_IMR;
 
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 551d15d..b7f383e 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -157,7 +157,7 @@
 static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
 	u32 irq_stat;
-	struct mxs_gpio_port *port = irq_get_handler_data(irq);
+	struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 61a731f..2ae0d47 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
 #include <linux/platform_data/gpio-omap.h>
 
 #define OFF_MODE	1
+#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
 
 static LIST_HEAD(omap_gpio_list);
 
@@ -57,7 +58,7 @@
 	u32 saved_datain;
 	u32 level_mask;
 	u32 toggle_mask;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	struct gpio_chip chip;
 	struct clk *dbck;
 	u32 mod_usage;
@@ -175,7 +176,7 @@
 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
 {
 	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
-		clk_prepare_enable(bank->dbck);
+		clk_enable(bank->dbck);
 		bank->dbck_enabled = true;
 
 		writel_relaxed(bank->dbck_enable_mask,
@@ -193,7 +194,7 @@
 		 */
 		writel_relaxed(0, bank->base + bank->regs->debounce_en);
 
-		clk_disable_unprepare(bank->dbck);
+		clk_disable(bank->dbck);
 		bank->dbck_enabled = false;
 	}
 }
@@ -204,8 +205,9 @@
  * @offset: the gpio number on this @bank
  * @debounce: debounce time to use
  *
- * OMAP's debounce time is in 31us steps so we need
- * to convert and round up to the closest unit.
+ * OMAP's debounce time is in 31us steps
+ *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
+ * so we need to convert and round up to the closest unit.
  */
 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
 				    unsigned debounce)
@@ -213,34 +215,33 @@
 	void __iomem		*reg;
 	u32			val;
 	u32			l;
+	bool			enable = !!debounce;
 
 	if (!bank->dbck_flag)
 		return;
 
-	if (debounce < 32)
-		debounce = 0x01;
-	else if (debounce > 7936)
-		debounce = 0xff;
-	else
-		debounce = (debounce / 0x1f) - 1;
+	if (enable) {
+		debounce = DIV_ROUND_UP(debounce, 31) - 1;
+		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+	}
 
 	l = BIT(offset);
 
-	clk_prepare_enable(bank->dbck);
+	clk_enable(bank->dbck);
 	reg = bank->base + bank->regs->debounce;
 	writel_relaxed(debounce, reg);
 
 	reg = bank->base + bank->regs->debounce_en;
 	val = readl_relaxed(reg);
 
-	if (debounce)
+	if (enable)
 		val |= l;
 	else
 		val &= ~l;
 	bank->dbck_enable_mask = val;
 
 	writel_relaxed(val, reg);
-	clk_disable_unprepare(bank->dbck);
+	clk_disable(bank->dbck);
 	/*
 	 * Enable debounce clock per module.
 	 * This call is mandatory because in omap_gpio_request() when
@@ -285,7 +286,7 @@
 		bank->context.debounce = 0;
 		writel_relaxed(bank->context.debounce, bank->base +
 			     bank->regs->debounce);
-		clk_disable_unprepare(bank->dbck);
+		clk_disable(bank->dbck);
 		bank->dbck_enabled = false;
 	}
 }
@@ -498,24 +499,24 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	retval = omap_set_gpio_triggering(bank, offset, type);
 	if (retval) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		goto error;
 	}
 	omap_gpio_init_irq(bank, offset);
 	if (!omap_gpio_is_input(bank, offset)) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		retval = -EINVAL;
 		goto error;
 	}
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 
@@ -636,14 +637,14 @@
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (enable)
 		bank->context.wake_en |= gpio_bit;
 	else
 		bank->context.wake_en &= ~gpio_bit;
 
 	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -669,10 +670,10 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_enable_gpio_module(bank, offset);
 	bank->mod_usage |= BIT(offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -682,14 +683,14 @@
 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->mod_usage &= ~(BIT(offset));
 	if (!LINE_USED(bank->irq_usage, offset)) {
 		omap_set_gpio_direction(bank, offset, 1);
 		omap_clear_gpio_debounce(bank, offset);
 	}
 	omap_disable_gpio_module(bank, offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last gpio to be freed in the bank,
@@ -716,7 +717,8 @@
 	struct gpio_bank *bank;
 	int unmasked = 0;
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
-	struct gpio_chip *chip = irq_get_handler_data(irq);
+	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+	unsigned long lock_flags;
 
 	chained_irq_enter(irqchip, desc);
 
@@ -731,6 +733,8 @@
 		u32 isr_saved, level_mask = 0;
 		u32 enabled;
 
+		raw_spin_lock_irqsave(&bank->lock, lock_flags);
+
 		enabled = omap_get_gpio_irqbank_mask(bank);
 		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
 
@@ -744,6 +748,8 @@
 		omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
 		omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
 
+		raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
 		/* if there is only edge sensitive GPIO pin interrupts
 		configured, we could unmask GPIO bank interrupt immediately */
 		if (!level_mask && !unmasked) {
@@ -758,6 +764,7 @@
 			bit = __ffs(isr);
 			isr &= ~(BIT(bit));
 
+			raw_spin_lock_irqsave(&bank->lock, lock_flags);
 			/*
 			 * Some chips can't respond to both rising and falling
 			 * at the same time.  If this irq was requested with
@@ -768,6 +775,8 @@
 			if (bank->toggle_mask & (BIT(bit)))
 				omap_toggle_gpio_edge_triggering(bank, bit);
 
+			raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
 			generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
 							    bit));
 		}
@@ -791,7 +800,7 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	if (!LINE_USED(bank->mod_usage, offset))
 		omap_set_gpio_direction(bank, offset, 1);
@@ -800,12 +809,12 @@
 	omap_enable_gpio_module(bank, offset);
 	bank->irq_usage |= BIT(offset);
 
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	omap_gpio_unmask_irq(d);
 
 	return 0;
 err:
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	if (!BANK_USED(bank))
 		pm_runtime_put(bank->dev);
 	return -EINVAL;
@@ -817,7 +826,7 @@
 	unsigned long flags;
 	unsigned offset = d->hwirq;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->irq_usage &= ~(BIT(offset));
 	omap_set_gpio_irqenable(bank, offset, 0);
 	omap_clear_gpio_irqstatus(bank, offset);
@@ -825,7 +834,7 @@
 	if (!LINE_USED(bank->mod_usage, offset))
 		omap_clear_gpio_debounce(bank, offset);
 	omap_disable_gpio_module(bank, offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last IRQ to be freed in the bank,
@@ -849,10 +858,10 @@
 	unsigned offset = d->hwirq;
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_irqenable(bank, offset, 0);
 	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static void omap_gpio_unmask_irq(struct irq_data *d)
@@ -862,7 +871,7 @@
 	u32 trigger = irqd_get_trigger_type(d);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (trigger)
 		omap_set_gpio_triggering(bank, offset, trigger);
 
@@ -874,7 +883,7 @@
 	}
 
 	omap_set_gpio_irqenable(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@@ -887,9 +896,9 @@
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -902,9 +911,9 @@
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -950,9 +959,9 @@
 
 	bank = container_of(chip, struct gpio_bank, chip);
 	reg = bank->base + bank->regs->direction;
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	dir = !!(readl_relaxed(reg) & BIT(offset));
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return dir;
 }
 
@@ -962,9 +971,9 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_direction(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@@ -986,10 +995,10 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
 	omap_set_gpio_direction(bank, offset, 0);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@@ -1001,9 +1010,9 @@
 
 	bank = container_of(chip, struct gpio_bank, chip);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap2_set_gpio_debounce(bank, offset, debounce);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -1014,9 +1023,9 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@@ -1061,10 +1070,6 @@
 	 /* Initialize interface clk ungated, module enabled */
 	if (bank->regs->ctrl)
 		writel_relaxed(0, base + bank->regs->ctrl);
-
-	bank->dbck = clk_get(bank->dev, "dbclk");
-	if (IS_ERR(bank->dbck))
-		dev_err(bank->dev, "Could not get gpio dbck\n");
 }
 
 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
@@ -1178,13 +1183,16 @@
 	irqc->irq_set_wake = omap_gpio_wake_enable,
 	irqc->name = dev_name(&pdev->dev);
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (unlikely(!res)) {
-		dev_err(dev, "Invalid IRQ resource\n");
-		return -ENODEV;
+	bank->irq = platform_get_irq(pdev, 0);
+	if (bank->irq <= 0) {
+		if (!bank->irq)
+			bank->irq = -ENXIO;
+		if (bank->irq != -EPROBE_DEFER)
+			dev_err(dev,
+				"can't get irq resource ret=%d\n", bank->irq);
+		return bank->irq;
 	}
 
-	bank->irq = res->start;
 	bank->dev = dev;
 	bank->chip.dev = dev;
 	bank->chip.owner = THIS_MODULE;
@@ -1213,16 +1221,26 @@
 	else
 		bank->set_dataout = omap_set_gpio_dataout_mask;
 
-	spin_lock_init(&bank->lock);
+	raw_spin_lock_init(&bank->lock);
 
 	/* Static mapping, never released */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	bank->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(bank->base)) {
-		irq_domain_remove(bank->chip.irqdomain);
 		return PTR_ERR(bank->base);
 	}
 
+	if (bank->dbck_flag) {
+		bank->dbck = devm_clk_get(bank->dev, "dbclk");
+		if (IS_ERR(bank->dbck)) {
+			dev_err(bank->dev,
+				"Could not get gpio dbck. Disable debounce\n");
+			bank->dbck_flag = false;
+		} else {
+			clk_prepare(bank->dbck);
+		}
+	}
+
 	platform_set_drvdata(pdev, bank);
 
 	pm_runtime_enable(bank->dev);
@@ -1254,6 +1272,8 @@
 	list_del(&bank->node);
 	gpiochip_remove(&bank->chip);
 	pm_runtime_disable(bank->dev);
+	if (bank->dbck_flag)
+		clk_unprepare(bank->dbck);
 
 	return 0;
 }
@@ -1271,7 +1291,7 @@
 	unsigned long flags;
 	u32 wake_low, wake_hi;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * Only edges can generate a wakeup event to the PRCM.
@@ -1324,7 +1344,7 @@
 				bank->get_context_loss_count(bank->dev);
 
 	omap_gpio_dbck_disable(bank);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -1339,7 +1359,7 @@
 	unsigned long flags;
 	int c;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * On the first resume during the probe, the context has not
@@ -1375,14 +1395,14 @@
 			if (c != bank->context_loss_count) {
 				omap_gpio_restore_context(bank);
 			} else {
-				spin_unlock_irqrestore(&bank->lock, flags);
+				raw_spin_unlock_irqrestore(&bank->lock, flags);
 				return 0;
 			}
 		}
 	}
 
 	if (!bank->workaround_enabled) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		return 0;
 	}
 
@@ -1437,7 +1457,7 @@
 	}
 
 	bank->workaround_enabled = false;
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 404f3c6..1d4d9bc 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -88,7 +88,6 @@
 	struct gpio_chip	chip;
 	struct i2c_client	*client;
 	struct mutex		lock;		/* protect 'out' */
-	spinlock_t		slock;		/* protect irq demux */
 	unsigned		out;		/* software latch */
 	unsigned		status;		/* current status */
 	unsigned int		irq_parent;
@@ -185,23 +184,21 @@
 static irqreturn_t pcf857x_irq(int irq, void *data)
 {
 	struct pcf857x  *gpio = data;
-	unsigned long change, i, status, flags;
+	unsigned long change, i, status;
 
 	status = gpio->read(gpio->client);
 
-	spin_lock_irqsave(&gpio->slock, flags);
-
 	/*
 	 * call the interrupt handler iff gpio is used as
 	 * interrupt source, just to avoid bad irqs
 	 */
-
+	mutex_lock(&gpio->lock);
 	change = (gpio->status ^ status) & gpio->irq_enabled;
+	gpio->status = status;
+	mutex_unlock(&gpio->lock);
+
 	for_each_set_bit(i, &change, gpio->chip.ngpio)
 		handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
-	gpio->status = status;
-
-	spin_unlock_irqrestore(&gpio->slock, flags);
 
 	return IRQ_HANDLED;
 }
@@ -293,7 +290,6 @@
 		return -ENOMEM;
 
 	mutex_init(&gpio->lock);
-	spin_lock_init(&gpio->slock);
 
 	gpio->chip.base			= pdata ? pdata->gpio_base : -1;
 	gpio->chip.can_sleep		= true;
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 2d9a950..34ed176 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -281,9 +281,9 @@
 
 	/* And the handler */
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 unlock:
 	spin_unlock_irqrestore(&chip->spinlock, flags);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index cdbbcf0..55a11de 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -524,7 +524,7 @@
 {
 	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 				 handle_edge_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_noprobe(irq);
 	return 0;
 }
 
@@ -643,20 +643,20 @@
 			irq = gpio_to_irq(0);
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		if (irq1 > 0) {
 			irq = gpio_to_irq(1);
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 
 		for (irq  = gpio_to_irq(gpio_offset);
 			irq <= gpio_to_irq(pxa_last_gpio); irq++) {
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 	}
 
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 1e14a6c..2a81224 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -251,17 +251,32 @@
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-	return pinctrl_request_gpio(chip->base + offset);
+	struct gpio_rcar_priv *p = gpio_to_priv(chip);
+	int error;
+
+	error = pm_runtime_get_sync(&p->pdev->dev);
+	if (error < 0)
+		return error;
+
+	error = pinctrl_request_gpio(chip->base + offset);
+	if (error)
+		pm_runtime_put(&p->pdev->dev);
+
+	return error;
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
+	struct gpio_rcar_priv *p = gpio_to_priv(chip);
+
 	pinctrl_free_gpio(chip->base + offset);
 
 	/* Set the GPIO as an input to ensure that the next GPIO request won't
 	 * drive the GPIO pin as an output.
 	 */
 	gpio_rcar_config_general_input_output_mode(chip, offset, false);
+
+	pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -327,6 +342,10 @@
 		.compatible = "renesas,gpio-r8a7794",
 		.data = &gpio_rcar_info_gen2,
 	}, {
+		.compatible = "renesas,gpio-r8a7795",
+		/* Gen3 GPIO is identical to Gen2. */
+		.data = &gpio_rcar_info_gen2,
+	}, {
 		.compatible = "renesas,gpio-rcar",
 		.data = &gpio_rcar_info_gen1,
 	}, {
@@ -405,7 +424,6 @@
 	}
 
 	pm_runtime_enable(dev);
-	pm_runtime_get_sync(dev);
 
 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -487,7 +505,6 @@
 err1:
 	gpiochip_remove(gpio_chip);
 err0:
-	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 	return ret;
 }
@@ -498,7 +515,6 @@
 
 	gpiochip_remove(&p->gpio_chip);
 
-	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 3fa22da..67bd2f5 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@
 {
 	irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
 				 handle_edge_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_noprobe(irq);
 
 	return 0;
 }
@@ -173,9 +173,9 @@
  * and call the handler.
  */
 static void
-sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc)
+sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	unsigned int mask;
+	unsigned int irq, mask;
 
 	mask = GEDR;
 	do {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 18579ac..55e47828 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -346,7 +346,7 @@
 			i = chip->irq_base + j;
 			irq_set_chip_and_handler(i, &ct->chip, ct->handler);
 			irq_set_chip_data(i, gc);
-			irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+			irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		gc->irq_cnt = i - gc->irq_base;
 	}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 31b244c..d1d585d 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -102,7 +102,7 @@
 static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
@@ -130,7 +130,7 @@
 static void tc3589x_gpio_irq_lock(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 
 	mutex_lock(&tc3589x_gpio->irq_lock);
 }
@@ -138,7 +138,7 @@
 static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	static const u8 regmap[] = {
 		[REG_IBE]	= TC3589x_GPIOIBE0,
@@ -167,7 +167,7 @@
 static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
@@ -178,7 +178,7 @@
 static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b25c90..9b14aaf 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -252,9 +252,9 @@
 	tegra_gpio_enable(gpio);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
@@ -268,16 +268,14 @@
 
 static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct tegra_gpio_bank *bank;
 	int port;
 	int pin;
 	int unmasked = 0;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
 
 	chained_irq_enter(chip, desc);
 
-	bank = irq_get_handler_data(irq);
-
 	for (port = 0; port < 4; port++) {
 		int gpio = tegra_gpio_compose(bank->bank, port, 0);
 		unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
@@ -509,7 +507,6 @@
 		irq_set_chip_data(irq, bank);
 		irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
 					 handle_simple_irq);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	for (i = 0; i < tegra_gpio_bank_count; i++) {
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index e8f97e0..5a49205 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -194,11 +194,12 @@
 
 static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct timbgpio *tgpio = irq_get_handler_data(irq);
+	struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	unsigned long ipr;
 	int offset;
 
-	desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
+	data->chip->irq_ack(data);
 	ipr = ioread32(tgpio->membase + TGPIO_IPR);
 	iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
@@ -294,13 +295,10 @@
 		irq_set_chip_and_handler(tgpio->irq_base + i,
 			&timbgpio_irqchip, handle_simple_irq);
 		irq_set_chip_data(tgpio->irq_base + i, tgpio);
-#ifdef CONFIG_ARM
-		set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
-#endif
+		irq_clear_status_flags(tgpio->irq_base + i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
-	irq_set_handler_data(irq, tgpio);
-	irq_set_chained_handler(irq, timbgpio_irq);
+	irq_set_chained_handler_and_data(irq, timbgpio_irq, tgpio);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 445660a..bbac92a 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -510,8 +510,8 @@
 	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;
 
 	/* Setup chained handler for this GPIO bank */
-	irq_set_handler_data(bank->irq, bank);
-	irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler);
+	irq_set_chained_handler_and_data(bank->irq, tz1090_gpio_irq_handler,
+					 bank);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7bd9f20..3d5714d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -60,6 +60,8 @@
 #define PORT_INT_EITHER_EDGE	0xb
 #define PORT_INT_LOGIC_ONE	0xc
 
+static struct irq_chip vf610_gpio_irq_chip;
+
 static const struct of_device_id vf610_gpio_dt_ids[] = {
 	{ .compatible = "fsl,vf610-gpio" },
 	{ /* sentinel */ }
@@ -120,7 +122,7 @@
 
 static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
-	struct vf610_gpio_port *port = irq_get_handler_data(irq);
+	struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int pin;
 	unsigned long irq_isfr;
@@ -173,6 +175,11 @@
 
 	port->irqc[d->hwirq] = irqc;
 
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_handler_locked(d->irq, handle_level_irq);
+	else
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
+
 	return 0;
 }
 
@@ -263,7 +270,7 @@
 	vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
 	ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
-				   handle_simple_irq, IRQ_TYPE_NONE);
+				   handle_edge_irq, IRQ_TYPE_NONE);
 	if (ret) {
 		dev_err(dev, "failed to add irqchip\n");
 		gpiochip_remove(gc);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 9bdab72..e02499a 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -387,7 +387,7 @@
 	irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
 	if (irq_base < 0) {
 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
-		return err;
+		return -ENODEV;
 	}
 
 	err = gpiochip_add(gc);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
new file mode 100644
index 0000000..12ee196
--- /dev/null
+++ b/drivers/gpio/gpio-zx.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define ZX_GPIO_DIR	0x00
+#define ZX_GPIO_IVE	0x04
+#define ZX_GPIO_IV	0x08
+#define ZX_GPIO_IEP	0x0C
+#define ZX_GPIO_IEN	0x10
+#define ZX_GPIO_DI	0x14
+#define ZX_GPIO_DO1	0x18
+#define ZX_GPIO_DO0	0x1C
+#define ZX_GPIO_DO	0x20
+
+#define ZX_GPIO_IM	0x28
+#define ZX_GPIO_IE	0x2C
+
+#define ZX_GPIO_MIS	0x30
+#define ZX_GPIO_IC	0x34
+
+#define ZX_GPIO_NR	16
+
+struct zx_gpio {
+	spinlock_t		lock;
+
+	void __iomem		*base;
+	struct gpio_chip	gc;
+	bool			uses_pinctrl;
+};
+
+static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
+{
+	return container_of(gc, struct zx_gpio, gc);
+}
+
+static int zx_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	int gpio = gc->base + offset;
+
+	if (chip->uses_pinctrl)
+		return pinctrl_request_gpio(gpio);
+	return 0;
+}
+
+static void zx_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	int gpio = gc->base + offset;
+
+	if (chip->uses_pinctrl)
+		pinctrl_free_gpio(gpio);
+}
+
+static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	unsigned long flags;
+	u16 gpiodir;
+
+	if (offset >= gc->ngpio)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+	gpiodir &= ~BIT(offset);
+	writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
+		int value)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	unsigned long flags;
+	u16 gpiodir;
+
+	if (offset >= gc->ngpio)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+	gpiodir |= BIT(offset);
+	writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+
+	if (value)
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+	else
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int zx_get_value(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+
+	return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset));
+}
+
+static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value)
+{
+	struct zx_gpio *chip = to_zx(gc);
+
+	if (value)
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+	else
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+}
+
+static int zx_irq_type(struct irq_data *d, unsigned trigger)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	int offset = irqd_to_hwirq(d);
+	unsigned long flags;
+	u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev;
+	u16 bit = BIT(offset);
+
+	if (offset < 0 || offset >= ZX_GPIO_NR)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+
+	gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
+	gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
+	gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP);
+	gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN);
+
+	if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+		gpiois |= bit;
+		if (trigger & IRQ_TYPE_LEVEL_HIGH)
+			gpioiev |= bit;
+		else
+			gpioiev &= ~bit;
+	} else
+		gpiois &= ~bit;
+
+	if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+		gpioi_epos |= bit;
+		gpioi_eneg |= bit;
+	} else {
+		if (trigger & IRQ_TYPE_EDGE_RISING) {
+			gpioi_epos |= bit;
+			gpioi_eneg &= ~bit;
+		} else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+			gpioi_eneg |= bit;
+			gpioi_epos &= ~bit;
+		}
+	}
+
+	writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE);
+	writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
+	writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
+	writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static void zx_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+	unsigned long pending;
+	int offset;
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct zx_gpio *chip = to_zx(gc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(irqchip, desc);
+
+	pending = readw_relaxed(chip->base + ZX_GPIO_MIS);
+	writew_relaxed(pending, chip->base + ZX_GPIO_IC);
+	if (pending) {
+		for_each_set_bit(offset, &pending, ZX_GPIO_NR)
+			generic_handle_irq(irq_find_mapping(gc->irqdomain,
+							    offset));
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static void zx_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+	u16 gpioie;
+
+	spin_lock(&chip->lock);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+	spin_unlock(&chip->lock);
+}
+
+static void zx_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+	u16 gpioie;
+
+	spin_lock(&chip->lock);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+	spin_unlock(&chip->lock);
+}
+
+static struct irq_chip zx_irqchip = {
+	.name		= "zx-gpio",
+	.irq_mask	= zx_irq_mask,
+	.irq_unmask	= zx_irq_unmask,
+	.irq_set_type	= zx_irq_type,
+};
+
+static int zx_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct zx_gpio *chip;
+	struct resource *res;
+	int irq, id, ret;
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(chip->base))
+		return PTR_ERR(chip->base);
+
+	spin_lock_init(&chip->lock);
+	if (of_property_read_bool(dev->of_node, "gpio-ranges"))
+		chip->uses_pinctrl = true;
+
+	id = of_alias_get_id(dev->of_node, "gpio");
+	chip->gc.request = zx_gpio_request;
+	chip->gc.free = zx_gpio_free;
+	chip->gc.direction_input = zx_direction_input;
+	chip->gc.direction_output = zx_direction_output;
+	chip->gc.get = zx_get_value;
+	chip->gc.set = zx_set_value;
+	chip->gc.base = ZX_GPIO_NR * id;
+	chip->gc.ngpio = ZX_GPIO_NR;
+	chip->gc.label = dev_name(dev);
+	chip->gc.dev = dev;
+	chip->gc.owner = THIS_MODULE;
+
+	ret = gpiochip_add(&chip->gc);
+	if (ret)
+		return ret;
+
+	/*
+	 * irq_chip support
+	 */
+	writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
+	writew_relaxed(0, chip->base + ZX_GPIO_IE);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "invalid IRQ\n");
+		gpiochip_remove(&chip->gc);
+		return -ENODEV;
+	}
+
+	ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip,
+				   0, handle_simple_irq,
+				   IRQ_TYPE_NONE);
+	if (ret) {
+		dev_err(dev, "could not add irqchip\n");
+		gpiochip_remove(&chip->gc);
+		return ret;
+	}
+	gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip,
+				     irq, zx_irq_handler);
+
+	platform_set_drvdata(pdev, chip);
+	dev_info(dev, "ZX GPIO chip registered\n");
+
+	return 0;
+}
+
+static const struct of_device_id zx_gpio_match[] = {
+	{
+		.compatible = "zte,zx296702-gpio",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, zx_gpio_match);
+
+static struct platform_driver zx_gpio_driver = {
+	.probe		= zx_gpio_probe,
+	.driver = {
+		.name	= "zx_gpio",
+		.of_match_table = of_match_ptr(zx_gpio_match),
+	},
+};
+
+module_platform_driver(zx_gpio_driver)
+
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index a788823..27348e7 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -441,10 +441,10 @@
 		       gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
 
 	if (type & IRQ_TYPE_LEVEL_MASK) {
-		__irq_set_chip_handler_name_locked(irq_data->irq,
+		irq_set_chip_handler_name_locked(irq_data,
 			&zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
 	} else {
-		__irq_set_chip_handler_name_locked(irq_data->irq,
+		irq_set_chip_handler_name_locked(irq_data,
 			&zynq_gpio_edge_irqchip, handle_level_irq, NULL);
 	}
 
@@ -518,7 +518,7 @@
 {
 	u32 int_sts, int_enb;
 	unsigned int bank_num;
-	struct zynq_gpio *gpio = irq_get_handler_data(irq);
+	struct zynq_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(irqchip, desc);
@@ -782,6 +782,12 @@
 }
 postcore_initcall(zynq_gpio_init);
 
+static void __exit zynq_gpio_exit(void)
+{
+	platform_driver_unregister(&zynq_gpio_driver);
+}
+module_exit(zynq_gpio_exit);
+
 MODULE_AUTHOR("Xilinx Inc.");
 MODULE_DESCRIPTION("Zynq GPIO driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 9a0ec48..fa6e3c8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -136,7 +136,6 @@
 {
 	struct device_node *chip_np;
 	enum of_gpio_flags xlate_flags;
-	struct gpio_desc *desc;
 	struct gg_data gg_data = {
 		.flags = &xlate_flags,
 	};
@@ -193,9 +192,7 @@
 	if (name && of_property_read_string(np, "line-name", name))
 		*name = np->name;
 
-	desc = gg_data.out_gpio;
-
-	return desc;
+	return gg_data.out_gpio;
 }
 
 /**
@@ -338,7 +335,7 @@
 EXPORT_SYMBOL(of_mm_gpiochip_remove);
 
 #ifdef CONFIG_PINCTRL
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
 {
 	struct device_node *np = chip->of_node;
 	struct of_phandle_args pinspec;
@@ -349,7 +346,7 @@
 	struct property *group_names;
 
 	if (!np)
-		return;
+		return 0;
 
 	group_names = of_find_property(np, group_names_propname, NULL);
 
@@ -361,11 +358,11 @@
 
 		pctldev = of_pinctrl_get(pinspec.np);
 		if (!pctldev)
-			break;
+			return -EPROBE_DEFER;
 
 		if (pinspec.args[2]) {
 			if (group_names) {
-				ret = of_property_read_string_index(np,
+				of_property_read_string_index(np,
 						group_names_propname,
 						index, &name);
 				if (strlen(name)) {
@@ -381,7 +378,7 @@
 					pinspec.args[1],
 					pinspec.args[2]);
 			if (ret)
-				break;
+				return ret;
 		} else {
 			/* npins == 0: special range */
 			if (pinspec.args[1]) {
@@ -411,32 +408,41 @@
 			ret = gpiochip_add_pingroup_range(chip, pctldev,
 						pinspec.args[0], name);
 			if (ret)
-				break;
+				return ret;
 		}
 	}
+
+	return 0;
 }
 
 #else
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip) {}
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; }
 #endif
 
-void of_gpiochip_add(struct gpio_chip *chip)
+int of_gpiochip_add(struct gpio_chip *chip)
 {
+	int status;
+
 	if ((!chip->of_node) && (chip->dev))
 		chip->of_node = chip->dev->of_node;
 
 	if (!chip->of_node)
-		return;
+		return 0;
 
 	if (!chip->of_xlate) {
 		chip->of_gpio_n_cells = 2;
 		chip->of_xlate = of_gpio_simple_xlate;
 	}
 
-	of_gpiochip_add_pin_range(chip);
+	status = of_gpiochip_add_pin_range(chip);
+	if (status)
+		return status;
+
 	of_node_get(chip->of_node);
 
 	of_gpiochip_scan_hogs(chip);
+
+	return 0;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bf4bd1d..980c1f8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -190,7 +190,7 @@
  */
 static int gpiochip_add_to_list(struct gpio_chip *chip)
 {
-	struct list_head *pos = &gpio_chips;
+	struct list_head *pos;
 	struct gpio_chip *_chip;
 	int err = 0;
 
@@ -287,7 +287,13 @@
 	INIT_LIST_HEAD(&chip->pin_ranges);
 #endif
 
-	of_gpiochip_add(chip);
+	if (!chip->owner && chip->dev && chip->dev->driver)
+		chip->owner = chip->dev->driver->owner;
+
+	status = of_gpiochip_add(chip);
+	if (status)
+		goto err_remove_chip;
+
 	acpi_gpiochip_add(chip);
 
 	status = gpiochip_sysfs_register(chip);
@@ -443,8 +449,8 @@
 		 * The parent irqchip is already using the chip_data for this
 		 * irqchip, so our callbacks simply use the handler_data.
 		 */
-		irq_set_handler_data(parent_irq, gpiochip);
-		irq_set_chained_handler(parent_irq, parent_handler);
+		irq_set_chained_handler_and_data(parent_irq, parent_handler,
+						 gpiochip);
 
 		gpiochip->irq_parent = parent_irq;
 	}
@@ -456,12 +462,6 @@
 }
 EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
 
-/*
- * This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpiochip_irq_lock_class;
-
 /**
  * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
  * @d: the irqdomain used by this irqchip
@@ -478,16 +478,17 @@
 	struct gpio_chip *chip = d->host_data;
 
 	irq_set_chip_data(irq, chip);
-	irq_set_lockdep_class(irq, &gpiochip_irq_lock_class);
+	/*
+	 * This lock class tells lockdep that GPIO irqs are in a different
+	 * category than their parents, so it won't report false recursion.
+	 */
+	irq_set_lockdep_class(irq, chip->lock_key);
 	irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
 	/* Chips that can sleep need nested thread handlers */
 	if (chip->can_sleep && !chip->irq_not_threaded)
 		irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	/*
 	 * No set-up of the hardware will happen if IRQ_TYPE_NONE
 	 * is passed as default type.
@@ -502,9 +503,6 @@
 {
 	struct gpio_chip *chip = d->host_data;
 
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, 0);
-#endif
 	if (chip->can_sleep)
 		irq_set_nested_thread(irq, 0);
 	irq_set_chip_and_handler(irq, NULL, NULL);
@@ -522,10 +520,14 @@
 {
 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
+	if (!try_module_get(chip->owner))
+		return -ENODEV;
+
 	if (gpiochip_lock_as_irq(chip, d->hwirq)) {
 		chip_err(chip,
 			"unable to lock HW IRQ %lu for IRQ\n",
 			d->hwirq);
+		module_put(chip->owner);
 		return -EINVAL;
 	}
 	return 0;
@@ -536,6 +538,7 @@
 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
 	gpiochip_unlock_as_irq(chip, d->hwirq);
+	module_put(chip->owner);
 }
 
 static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -584,6 +587,7 @@
  * @handler: the irq handler to use (often a predefined irq core function)
  * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
  * to have the core avoid setting up any default type in the hardware.
+ * @lock_key: lockdep class
  *
  * This function closely associates a certain irqchip with a certain
  * gpiochip, providing an irq domain to translate the local IRQs to
@@ -599,11 +603,12 @@
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-			 struct irq_chip *irqchip,
-			 unsigned int first_irq,
-			 irq_flow_handler_t handler,
-			 unsigned int type)
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+			  struct irq_chip *irqchip,
+			  unsigned int first_irq,
+			  irq_flow_handler_t handler,
+			  unsigned int type,
+			  struct lock_class_key *lock_key)
 {
 	struct device_node *of_node;
 	unsigned int offset;
@@ -629,6 +634,7 @@
 	gpiochip->irq_handler = handler;
 	gpiochip->irq_default_type = type;
 	gpiochip->to_irq = gpiochip_to_irq;
+	gpiochip->lock_key = lock_key;
 	gpiochip->irqdomain = irq_domain_add_simple(of_node,
 					gpiochip->ngpio, first_irq,
 					&gpiochip_domain_ops, gpiochip);
@@ -636,8 +642,16 @@
 		gpiochip->irqchip = NULL;
 		return -EINVAL;
 	}
-	irqchip->irq_request_resources = gpiochip_irq_reqres;
-	irqchip->irq_release_resources = gpiochip_irq_relres;
+
+	/*
+	 * It is possible for a driver to override this, but only if the
+	 * alternative functions are both implemented.
+	 */
+	if (!irqchip->irq_request_resources &&
+	    !irqchip->irq_release_resources) {
+		irqchip->irq_request_resources = gpiochip_irq_reqres;
+		irqchip->irq_release_resources = gpiochip_irq_relres;
+	}
 
 	/*
 	 * Prepare the mapping since the irqchip shall be orthogonal to
@@ -658,7 +672,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
@@ -671,7 +685,7 @@
 /**
  * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
  * @chip: the gpiochip to add the range for
- * @pinctrl: the dev_name() of the pin controller to map to
+ * @pctldev: the pin controller to map to
  * @gpio_offset: the start offset in the current gpio_chip number space
  * @pin_group: name of the pin group inside the pin controller
  */
@@ -1672,6 +1686,19 @@
 	mutex_unlock(&gpio_lookup_lock);
 }
 
+/**
+ * gpiod_remove_lookup_table() - unregister GPIO device consumers
+ * @table: table of consumers to unregister
+ */
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
+{
+	mutex_lock(&gpio_lookup_lock);
+
+	list_del(&table->list);
+
+	mutex_unlock(&gpio_lookup_lock);
+}
+
 static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 				      unsigned int idx,
 				      enum gpio_lookup_flags *flags)
@@ -1894,12 +1921,12 @@
  * dev, -ENOENT if no GPIO has been assigned to the requested function, or
  * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
  */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id,
+struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id,
 					 enum gpiod_flags flags)
 {
 	return gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL_GPL(__gpiod_get);
+EXPORT_SYMBOL_GPL(gpiod_get);
 
 /**
  * gpiod_get_optional - obtain an optional GPIO for a given GPIO function
@@ -1911,13 +1938,13 @@
  * the requested function it will return NULL. This is convenient for drivers
  * that need to handle optional GPIOs.
  */
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 						  const char *con_id,
 						  enum gpiod_flags flags)
 {
 	return gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
 
 /**
@@ -1974,7 +2001,7 @@
  * requested function and/or index, or another IS_ERR() code if an error
  * occurred while trying to acquire the GPIO.
  */
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 					       const char *con_id,
 					       unsigned int idx,
 					       enum gpiod_flags flags)
@@ -2023,7 +2050,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_index);
+EXPORT_SYMBOL_GPL(gpiod_get_index);
 
 /**
  * fwnode_get_named_gpiod - obtain a GPIO from firmware node
@@ -2092,7 +2119,7 @@
  * specified index was assigned to the requested function it will return NULL.
  * This is convenient for drivers that need to handle optional GPIOs.
  */
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
 							const char *con_id,
 							unsigned int index,
 							enum gpiod_flags flags)
@@ -2107,7 +2134,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
 
 /**
  * gpiod_hog - Hog the specified GPIO desc given the provided flags
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c46ca31..1a0a8df 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -37,9 +37,29 @@
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+	select FB_SYS_FOPS
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
 	help
 	  FBDEV helpers for KMS drivers.
 
+config DRM_FBDEV_EMULATION
+	bool "Enable legacy fbdev support for your modesetting driver"
+	depends on DRM
+	select DRM_KMS_HELPER
+	select DRM_KMS_FB_HELPER
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provides the linux console
+	  support on top of your modesetting driver.
+
+	  If in doubt, say "Y".
+
 config DRM_LOAD_EDID_FIRMWARE
 	bool "Allow to specify an EDID data set instead of probing for it"
 	depends on DRM_KMS_HELPER
@@ -79,8 +99,6 @@
 
 source "drivers/gpu/drm/i2c/Kconfig"
 
-source "drivers/gpu/drm/bridge/Kconfig"
-
 config DRM_TDFX
 	tristate "3dfx Banshee/Voodoo3+"
 	depends on DRM && PCI
@@ -110,6 +128,7 @@
 	select POWER_SUPPLY
 	select HWMON
 	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
 	select INTERVAL_TREE
 	help
 	  Choose this option if you have an ATI Radeon graphics card.  There
@@ -133,6 +152,7 @@
 	select POWER_SUPPLY
 	select HWMON
 	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
 	select INTERVAL_TREE
 	help
 	  Choose this option if you have a recent AMD Radeon graphics card.
@@ -231,10 +251,14 @@
 
 source "drivers/gpu/drm/msm/Kconfig"
 
+source "drivers/gpu/drm/fsl-dcu/Kconfig"
+
 source "drivers/gpu/drm/tegra/Kconfig"
 
 source "drivers/gpu/drm/panel/Kconfig"
 
+source "drivers/gpu/drm/bridge/Kconfig"
+
 source "drivers/gpu/drm/sti/Kconfig"
 
 source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5713d053..45e7719 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
 		drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
+drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -70,3 +70,4 @@
 obj-y			+= i2c/
 obj-y			+= panel/
 obj-y			+= bridge/
+obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 616dfd4..04c2707 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -3,7 +3,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
-	-Idrivers/gpu/drm/amd/include
+	-Idrivers/gpu/drm/amd/include \
+	-Idrivers/gpu/drm/amd/amdgpu \
+	-Idrivers/gpu/drm/amd/scheduler
 
 amdgpu-y := amdgpu_drv.o
 
@@ -21,7 +23,8 @@
 
 # add asic specific block
 amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
-	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
+	amdgpu_amdkfd_gfx_v7.o
 
 amdgpu-y += \
 	vi.o
@@ -43,6 +46,7 @@
 	amdgpu_dpm.o \
 	cz_smc.o cz_dpm.o \
 	tonga_smc.o tonga_dpm.o \
+	fiji_smc.o fiji_dpm.o \
 	iceland_smc.o iceland_dpm.o
 
 # add DCE block
@@ -71,6 +75,20 @@
 	amdgpu_vce.o \
 	vce_v3_0.o
 
+# add amdkfd interfaces
+amdgpu-y += \
+	 amdgpu_amdkfd.o \
+	 amdgpu_amdkfd_gfx_v8.o
+
+# add cgs
+amdgpu-y += amdgpu_cgs.o
+
+# GPU scheduler
+amdgpu-y += \
+	../scheduler/gpu_scheduler.o \
+	../scheduler/sched_fence.o \
+	amdgpu_sched.o
+
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
 amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f7b49d5c..668939a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -42,17 +42,19 @@
 #include <ttm/ttm_module.h>
 #include <ttm/ttm_execbuf_util.h>
 
+#include <drm/drmP.h>
 #include <drm/drm_gem.h>
 #include <drm/amdgpu_drm.h>
 
 #include "amd_shared.h"
-#include "amdgpu_family.h"
 #include "amdgpu_mode.h"
 #include "amdgpu_ih.h"
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
 
+#include "gpu_scheduler.h"
+
 /*
  * Modules parameters.
  */
@@ -77,7 +79,11 @@
 extern int amdgpu_deep_color;
 extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
+extern int amdgpu_enable_scheduler;
+extern int amdgpu_sched_jobs;
+extern int amdgpu_sched_hw_submission;
 
+#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
 #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */
 #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
 /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
@@ -92,6 +98,9 @@
 #define AMDGPU_MAX_COMPUTE_RINGS		8
 #define AMDGPU_MAX_VCE_RINGS			2
 
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES		2
+
 /* number of hw syncs before falling back on blocking */
 #define AMDGPU_NUM_SYNCS			4
 
@@ -177,7 +186,9 @@
 struct amdgpu_ring;
 struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 struct amdgpu_irq_src;
+struct amdgpu_fpriv;
 
 enum amdgpu_cp_irq {
 	AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -239,7 +250,7 @@
 	unsigned	copy_num_dw;
 
 	/* used for buffer migration */
-	void (*emit_copy_buffer)(struct amdgpu_ring *ring,
+	void (*emit_copy_buffer)(struct amdgpu_ib *ib,
 				 /* src addr in bytes */
 				 uint64_t src_offset,
 				 /* dst addr in bytes */
@@ -254,7 +265,7 @@
 	unsigned	fill_num_dw;
 
 	/* used for buffer clearing */
-	void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+	void (*emit_fill_buffer)(struct amdgpu_ib *ib,
 				 /* value to write to memory */
 				 uint32_t src_data,
 				 /* dst addr in bytes */
@@ -332,6 +343,8 @@
 	int (*test_ring)(struct amdgpu_ring *ring);
 	int (*test_ib)(struct amdgpu_ring *ring);
 	bool (*is_lockup)(struct amdgpu_ring *ring);
+	/* insert NOP packets */
+	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
 };
 
 /*
@@ -381,10 +394,10 @@
 	uint64_t			sync_seq[AMDGPU_MAX_RINGS];
 	atomic64_t			last_seq;
 	bool				initialized;
-	bool				delayed_irq;
 	struct amdgpu_irq_src		*irq_src;
 	unsigned			irq_type;
 	struct delayed_work             lockup_work;
+	wait_queue_head_t		fence_queue;
 };
 
 /* some special values for the owner field */
@@ -423,20 +436,20 @@
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
 				   struct amdgpu_irq_src *irq_src,
 				   unsigned irq_type);
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
 		      struct amdgpu_fence **fence);
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-			  uint64_t seq, struct amdgpu_fence **fence);
 void amdgpu_fence_process(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-			  struct amdgpu_fence **fences,
-			  bool intr);
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+				  struct fence **array,
+				  uint32_t count,
+				  bool intr,
+				  signed long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -481,7 +494,7 @@
 	return a->seq < b->seq;
 }
 
-int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, 
+int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
 			   void *owner, struct amdgpu_fence **fence);
 
 /*
@@ -509,7 +522,7 @@
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct amdgpu_fence **fence);
+		       struct fence **fence);
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 
 struct amdgpu_bo_list_entry {
@@ -532,14 +545,16 @@
 struct amdgpu_bo_va {
 	/* protected by bo being reserved */
 	struct list_head		bo_list;
-	uint64_t			addr;
-	struct amdgpu_fence		*last_pt_update;
+	struct fence		        *last_pt_update;
 	unsigned			ref_count;
 
-	/* protected by vm mutex */
-	struct list_head		mappings;
+	/* protected by vm mutex and spinlock */
 	struct list_head		vm_status;
 
+	/* mappings for this bo_va */
+	struct list_head		invalids;
+	struct list_head		valids;
+
 	/* constant after initialization */
 	struct amdgpu_vm		*vm;
 	struct amdgpu_bo		*bo;
@@ -643,7 +658,7 @@
 	struct amdgpu_sa_manager	*manager;
 	unsigned			soffset;
 	unsigned			eoffset;
-	struct amdgpu_fence		*fence;
+	struct fence		        *fence;
 };
 
 /*
@@ -685,7 +700,7 @@
 				struct amdgpu_semaphore *semaphore);
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
 			   struct amdgpu_semaphore **semaphore,
-			   struct amdgpu_fence *fence);
+			   struct fence *fence);
 
 /*
  * Synchronization
@@ -693,20 +708,23 @@
 struct amdgpu_sync {
 	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
 	struct amdgpu_fence	*sync_to[AMDGPU_MAX_RINGS];
-	struct amdgpu_fence	*last_vm_update;
+	DECLARE_HASHTABLE(fences, 4);
+	struct fence	        *last_vm_update;
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-		       struct amdgpu_fence *fence);
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
 		     struct amdgpu_sync *sync,
 		     struct reservation_object *resv,
 		     void *owner);
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
 		      struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-		      struct amdgpu_fence *fence);
+		      struct fence *fence);
 
 /*
  * GART structures, functions & helpers
@@ -821,7 +839,9 @@
 	uint64_t			base;
 	struct drm_pending_vblank_event *event;
 	struct amdgpu_bo		*old_rbo;
-	struct fence			*fence;
+	struct fence			*excl;
+	unsigned			shared_count;
+	struct fence			**shared;
 };
 
 
@@ -844,6 +864,8 @@
 	uint32_t			gws_base, gws_size;
 	uint32_t			oa_base, oa_size;
 	uint32_t			flags;
+	/* resulting sequence number */
+	uint64_t			sequence;
 };
 
 enum amdgpu_ring_type {
@@ -854,11 +876,23 @@
 	AMDGPU_RING_TYPE_VCE
 };
 
+extern struct amd_sched_backend_ops amdgpu_sched_ops;
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+					 struct amdgpu_ring *ring,
+					 struct amdgpu_ib *ibs,
+					 unsigned num_ibs,
+					 int (*free_job)(struct amdgpu_job *),
+					 void *owner,
+					 struct fence **fence);
+
 struct amdgpu_ring {
 	struct amdgpu_device		*adev;
 	const struct amdgpu_ring_funcs	*funcs;
 	struct amdgpu_fence_driver	fence_drv;
+	struct amd_gpu_scheduler 	*scheduler;
 
+	spinlock_t              fence_lock;
 	struct mutex		*ring_lock;
 	struct amdgpu_bo	*ring_obj;
 	volatile uint32_t	*ring;
@@ -892,6 +926,7 @@
 	struct amdgpu_ctx	*current_ctx;
 	enum amdgpu_ring_type	type;
 	char			name[16];
+	bool                    is_pte_ring;
 };
 
 /*
@@ -933,7 +968,7 @@
 	unsigned		id;
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
-	struct amdgpu_fence	*flushed_updates;
+	struct fence	        *flushed_updates;
 	/* last use of vmid */
 	struct amdgpu_fence	*last_id_use;
 };
@@ -943,18 +978,22 @@
 
 	struct rb_root		va;
 
-	/* protecting invalidated and freed */
+	/* protecting invalidated */
 	spinlock_t		status_lock;
 
 	/* BOs moved, but not yet updated in the PT */
 	struct list_head	invalidated;
 
-	/* BOs freed, but not yet updated in the PT */
+	/* BOs cleared in the PT because of a move */
+	struct list_head	cleared;
+
+	/* BO mappings freed, but not yet updated in the PT */
 	struct list_head	freed;
 
 	/* contains the page directory */
 	struct amdgpu_bo	*page_directory;
 	unsigned		max_pde_used;
+	struct fence		*page_directory_fence;
 
 	/* array of page tables, one for each page directory entry */
 	struct amdgpu_vm_pt	*page_tables;
@@ -983,27 +1022,47 @@
  * context related structures
  */
 
-struct amdgpu_ctx_state {
-	uint64_t flags;
-	uint32_t hangs;
+#define AMDGPU_CTX_MAX_CS_PENDING	16
+
+struct amdgpu_ctx_ring {
+	uint64_t		sequence;
+	struct fence		*fences[AMDGPU_CTX_MAX_CS_PENDING];
+	struct amd_sched_entity	entity;
 };
 
 struct amdgpu_ctx {
-	/* call kref_get()before CS start and kref_put() after CS fence signaled */
-	struct kref refcount;
-	struct amdgpu_fpriv *fpriv;
-	struct amdgpu_ctx_state state;
-	uint32_t id;
-	unsigned reset_counter;
+	struct kref		refcount;
+	struct amdgpu_device    *adev;
+	unsigned		reset_counter;
+	spinlock_t		ring_lock;
+	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
 };
 
 struct amdgpu_ctx_mgr {
-	struct amdgpu_device *adev;
-	struct idr ctx_handles;
-	/* lock for IDR system */
-	struct mutex lock;
+	struct amdgpu_device	*adev;
+	struct mutex		lock;
+	/* protected by lock */
+	struct idr		ctx_handles;
 };
 
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+		    struct amdgpu_ctx *ctx);
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+			      struct fence *fence);
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				   struct amdgpu_ring *ring, uint64_t seq);
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *filp);
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
 /*
  * file private structure
  */
@@ -1012,7 +1071,7 @@
 	struct amdgpu_vm	vm;
 	struct mutex		bo_list_lock;
 	struct idr		bo_list_handles;
-	struct amdgpu_ctx_mgr ctx_mgr;
+	struct amdgpu_ctx_mgr	ctx_mgr;
 };
 
 /*
@@ -1160,6 +1219,7 @@
 void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
@@ -1207,6 +1267,16 @@
 	struct amdgpu_user_fence uf;
 };
 
+struct amdgpu_job {
+	struct amd_sched_job    base;
+	struct amdgpu_device	*adev;
+	struct amdgpu_ib	*ibs;
+	uint32_t		num_ibs;
+	struct mutex            job_lock;
+	struct amdgpu_user_fence uf;
+	int (*free_job)(struct amdgpu_job *sched_job);
+};
+
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
 {
 	return p->ibs[ib_idx].ptr[idx];
@@ -1601,7 +1671,6 @@
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
 	uint64_t		gpu_addr;
-	void			*saved_bo;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
 	struct delayed_work	idle_work;
@@ -1645,6 +1714,7 @@
 	uint32_t		feature_version;
 
 	struct amdgpu_ring	ring;
+	bool			burst_nop;
 };
 
 /*
@@ -1849,17 +1919,12 @@
 	struct amdgpu_atcs_functions functions;
 };
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
-							uint32_t *id,uint32_t flags);
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
-						  uint32_t id);
+/*
+ * CGS
+ */
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(void *cgs_device);
 
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
-
-extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
-						 struct drm_file *filp);
 
 /*
  * Core structure, functions and helpers.
@@ -1883,7 +1948,7 @@
 	struct rw_semaphore		exclusive_lock;
 
 	/* ASIC */
-	enum amdgpu_asic_type           asic_type;
+	enum amd_asic_type		asic_type;
 	uint32_t			family;
 	uint32_t			rev_id;
 	uint32_t			external_rev_id;
@@ -1976,7 +2041,6 @@
 	struct amdgpu_irq_src		hpd_irq;
 
 	/* rings */
-	wait_queue_head_t		fence_queue;
 	unsigned			fence_context;
 	struct mutex			ring_lock;
 	unsigned			num_rings;
@@ -1999,7 +2063,7 @@
 	struct amdgpu_gfx		gfx;
 
 	/* sdma */
-	struct amdgpu_sdma		sdma[2];
+	struct amdgpu_sdma		sdma[AMDGPU_MAX_SDMA_INSTANCES];
 	struct amdgpu_irq_src		sdma_trap_irq;
 	struct amdgpu_irq_src		sdma_illegal_inst_irq;
 
@@ -2025,6 +2089,12 @@
 	/* tracking pinned memory */
 	u64 vram_pin_size;
 	u64 gart_pin_size;
+
+	/* amdkfd interface */
+	struct kfd_dev          *kfd;
+
+	/* kernel conext for IB submission */
+	struct amdgpu_ctx	kernel_ctx;
 };
 
 bool amdgpu_device_is_px(struct drm_device *dev);
@@ -2132,6 +2202,21 @@
 	ring->ring_free_dw--;
 }
 
+static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	int i;
+
+	for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
+		if (&adev->sdma[i].ring == ring)
+			break;
+
+	if (i < AMDGPU_MAX_SDMA_INSTANCES)
+		return &adev->sdma[i];
+	else
+		return NULL;
+}
+
 /*
  * ASICs macro.
  */
@@ -2183,8 +2268,8 @@
 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
-#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
@@ -2212,6 +2297,12 @@
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+						 struct drm_file *filp,
+						 struct amdgpu_ctx *ctx,
+						 struct amdgpu_ib *ibs,
+						 uint32_t num_ibs);
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
 		       u32 ip_instance, u32 ring,
@@ -2275,11 +2366,11 @@
 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
 					  struct amdgpu_vm *vm,
 					  struct list_head *head);
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-				       struct amdgpu_vm *vm);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *updates);
+		     struct fence *updates);
 void amdgpu_vm_fence(struct amdgpu_device *adev,
 		     struct amdgpu_vm *vm,
 		     struct amdgpu_fence *fence);
@@ -2309,7 +2400,7 @@
 		       uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
 		      struct amdgpu_bo_va *bo_va);
-
+int amdgpu_vm_free_job(struct amdgpu_job *job);
 /*
  * functions used by amdgpu_encoder.c
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
new file mode 100644
index 0000000..496ed21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_amdkfd.h"
+#include "amd_shared.h"
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include <linux/module.h>
+
+const struct kfd2kgd_calls *kfd2kgd;
+const struct kgd2kfd_calls *kgd2kfd;
+bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+bool amdgpu_amdkfd_init(void)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+
+	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+	if (kgd2kfd_init_p == NULL)
+		return false;
+#endif
+	return true;
+}
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+{
+#if defined(CONFIG_HSA_AMD_MODULE)
+	bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+#endif
+
+	switch (rdev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_CIK
+	case CHIP_KAVERI:
+		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
+		break;
+#endif
+	case CHIP_CARRIZO:
+		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
+		break;
+	default:
+		return false;
+	}
+
+#if defined(CONFIG_HSA_AMD_MODULE)
+	kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+
+	if (kgd2kfd_init_p == NULL) {
+		kfd2kgd = NULL;
+		return false;
+	}
+
+	if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+		symbol_put(kgd2kfd_init);
+		kfd2kgd = NULL;
+		kgd2kfd = NULL;
+
+		return false;
+	}
+
+	return true;
+#elif defined(CONFIG_HSA_AMD)
+	if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
+		kfd2kgd = NULL;
+		kgd2kfd = NULL;
+		return false;
+	}
+
+	return true;
+#else
+	kfd2kgd = NULL;
+	return false;
+#endif
+}
+
+void amdgpu_amdkfd_fini(void)
+{
+	if (kgd2kfd) {
+		kgd2kfd->exit();
+		symbol_put(kgd2kfd_init);
+	}
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+{
+	if (kgd2kfd)
+		rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
+					rdev->pdev, kfd2kgd);
+}
+
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd) {
+		struct kgd2kfd_shared_resources gpu_resources = {
+			.compute_vmid_bitmap = 0xFF00,
+
+			.first_compute_pipe = 1,
+			.compute_pipe_count = 4 - 1,
+		};
+
+		amdgpu_doorbell_get_kfd_info(rdev,
+				&gpu_resources.doorbell_physical_address,
+				&gpu_resources.doorbell_aperture_size,
+				&gpu_resources.doorbell_start_offset);
+
+		kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+	}
+}
+
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd) {
+		kgd2kfd->device_exit(rdev->kfd);
+		rdev->kfd = NULL;
+	}
+}
+
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+		const void *ih_ring_entry)
+{
+	if (rdev->kfd)
+		kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+{
+	if (rdev->kfd)
+		kgd2kfd->suspend(rdev->kfd);
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+{
+	int r = 0;
+
+	if (rdev->kfd)
+		r = kgd2kfd->resume(rdev->kfd);
+
+	return r;
+}
+
+u32 pool_to_domain(enum kgd_memory_pool p)
+{
+	switch (p) {
+	case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
+	default: return AMDGPU_GEM_DOMAIN_GTT;
+	}
+}
+
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+			void **mem_obj, uint64_t *gpu_addr,
+			void **cpu_ptr)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+	struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+	int r;
+
+	BUG_ON(kgd == NULL);
+	BUG_ON(gpu_addr == NULL);
+	BUG_ON(cpu_ptr == NULL);
+
+	*mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+	if ((*mem) == NULL)
+		return -ENOMEM;
+
+	r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
+			AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+	if (r) {
+		dev_err(rdev->dev,
+			"failed to allocate BO for amdkfd (%d)\n", r);
+		return r;
+	}
+
+	/* map the buffer */
+	r = amdgpu_bo_reserve((*mem)->bo, true);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+		goto allocate_mem_reserve_bo_failed;
+	}
+
+	r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
+				&(*mem)->gpu_addr);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+		goto allocate_mem_pin_bo_failed;
+	}
+	*gpu_addr = (*mem)->gpu_addr;
+
+	r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+	if (r) {
+		dev_err(rdev->dev,
+			"(%d) failed to map bo to kernel for amdkfd\n", r);
+		goto allocate_mem_kmap_bo_failed;
+	}
+	*cpu_ptr = (*mem)->cpu_ptr;
+
+	amdgpu_bo_unreserve((*mem)->bo);
+
+	return 0;
+
+allocate_mem_kmap_bo_failed:
+	amdgpu_bo_unpin((*mem)->bo);
+allocate_mem_pin_bo_failed:
+	amdgpu_bo_unreserve((*mem)->bo);
+allocate_mem_reserve_bo_failed:
+	amdgpu_bo_unref(&(*mem)->bo);
+
+	return r;
+}
+
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+{
+	struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+
+	BUG_ON(mem == NULL);
+
+	amdgpu_bo_reserve(mem->bo, true);
+	amdgpu_bo_kunmap(mem->bo);
+	amdgpu_bo_unpin(mem->bo);
+	amdgpu_bo_unreserve(mem->bo);
+	amdgpu_bo_unref(&(mem->bo));
+	kfree(mem);
+}
+
+uint64_t get_vmem_size(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev =
+		(struct amdgpu_device *)kgd;
+
+	BUG_ON(kgd == NULL);
+
+	return rdev->mc.real_vram_size;
+}
+
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+	if (rdev->asic_funcs->get_gpu_clock_counter)
+		return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+	return 0;
+}
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+
+	/* The sclk is in quantas of 10kHz */
+	return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
new file mode 100644
index 0000000..a8be765
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
+
+#ifndef AMDGPU_AMDKFD_H_INCLUDED
+#define AMDGPU_AMDKFD_H_INCLUDED
+
+#include <linux/types.h>
+#include <kgd_kfd_interface.h>
+
+struct amdgpu_device;
+
+struct kgd_mem {
+	struct amdgpu_bo *bo;
+	uint64_t gpu_addr;
+	void *cpu_ptr;
+};
+
+bool amdgpu_amdkfd_init(void);
+void amdgpu_amdkfd_fini(void);
+
+bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
+int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+			const void *ih_ring_entry);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+
+/* Shared API */
+int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+			void **mem_obj, uint64_t *gpu_addr,
+			void **cpu_ptr);
+void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint64_t get_vmem_size(struct kgd_dev *kgd);
+uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
new file mode 100644
index 0000000..dd2037b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "cikd.h"
+#include "cik_sdma.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_enum.h"
+#include "gca/gfx_7_2_sh_mask.h"
+#include "oss/oss_2_0_d.h"
+#include "oss/oss_2_0_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "cik_structs.h"
+
+#define CIK_PIPE_PER_MEC	(4)
+
+enum {
+	MAX_TRAPID = 8,		/* 3 bits in the bitfield. */
+	MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+	ADDRESS_WATCH_REG_ADDR_HI = 0,
+	ADDRESS_WATCH_REG_ADDR_LO,
+	ADDRESS_WATCH_REG_CNTL,
+	ADDRESS_WATCH_REG_MAX
+};
+
+/*  not defined in the CI/KV reg file  */
+enum {
+	ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+	ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+	ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+	/* extend the mask to 26 bits to match the low address field */
+	ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+	ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
+	mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
+	mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
+	mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
+	mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
+};
+
+union TCP_WATCH_CNTL_BITS {
+	struct {
+		uint32_t mask:24;
+		uint32_t vmid:4;
+		uint32_t atc:1;
+		uint32_t mode:2;
+		uint32_t valid:1;
+	} bitfields, bits;
+	uint32_t u32All;
+	signed int i32All;
+	float f32All;
+};
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
+		uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid);
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id);
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+							uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_vmem_size = get_vmem_size,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_pipeline = kgd_init_pipeline,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
+	.write_vmid_invalidate_request = write_vmid_invalidate_request,
+	.get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+	mutex_lock(&adev->srbm_mutex);
+	WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	WREG32(mmSRBM_GFX_CNTL, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+	WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished and the
+	 * SW cleared it. So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+		cpu_relax();
+	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+	return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+	WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
+	WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
+	WREG32(mmCP_HPD_EOP_VMID, 0);
+	WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
+	pipe = (pipe_id % CIK_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+			CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+	uint32_t retval;
+
+	retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+			m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+
+	pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+	return retval;
+}
+
+static inline struct cik_mqd *get_mqd(void *mqd)
+{
+	return (struct cik_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+	return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t wptr_shadow, is_wptr_shadow_valid;
+	struct cik_mqd *m;
+
+	m = get_mqd(mqd);
+
+	is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+	WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+
+	WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+	WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+
+	WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+	WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
+	WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
+
+	WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
+
+	WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+	WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
+	WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+
+	WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
+	WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
+	WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
+	WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+			m->cp_hqd_pq_rptr_report_addr_hi);
+
+	WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
+	WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+	WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+
+	WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+
+	WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+	WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+
+	WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+
+	if (is_wptr_shadow_valid)
+		WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+
+	WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+			m->sdma_rlc_virtual_addr);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
+			m->sdma_rlc_rb_base);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+			m->sdma_rlc_rb_base_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+			m->sdma_rlc_rb_rptr_addr_lo);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+			m->sdma_rlc_rb_rptr_addr_hi);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+			m->sdma_rlc_doorbell);
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+			m->sdma_rlc_rb_cntl);
+
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(mmCP_HQD_ACTIVE);
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+				high == RREG32(mmCP_HQD_PQ_BASE_HI))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t temp;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+	while (true) {
+		temp = RREG32(mmCP_HQD_ACTIVE);
+		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+			break;
+		if (timeout == 0) {
+			pr_err("kfd: cp queue preemption time out (%dms)\n",
+				temp);
+			release_queue(kgd);
+			return -ETIME;
+		}
+		msleep(20);
+		timeout -= 20;
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+			break;
+		if (timeout == 0)
+			return -ETIME;
+		msleep(20);
+		timeout -= 20;
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+	return 0;
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	union TCP_WATCH_CNTL_BITS cntl;
+	unsigned int i;
+
+	cntl.u32All = 0;
+
+	cntl.bitfields.valid = 0;
+	cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
+	cntl.bitfields.atc = 1;
+
+	/* Turning off this address until we set all the registers */
+	for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
+		WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
+			ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	union TCP_WATCH_CNTL_BITS cntl;
+
+	cntl.u32All = cntl_val;
+
+	/* Turning off this watch point until we set all the registers */
+	cntl.bitfields.valid = 0;
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
+
+	/* Enable the watch point */
+	cntl.bitfields.valid = 1;
+
+	WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
+		ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+	WREG32(mmSQ_CMD, sq_cmd);
+
+	/*  Restore the GRBM_GFX_INDEX register  */
+
+	data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
+		GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
+		GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
+
+	WREG32(mmGRBM_GFX_INDEX, data);
+
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	BUG_ON(kgd == NULL);
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
new file mode 100644
index 0000000..dfd1d50
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ucode.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "vi_structs.h"
+#include "vid.h"
+
+#define VI_PIPE_PER_MEC	(4)
+
+struct cik_sdma_rlc_registers;
+
+/*
+ * Register access functions
+ */
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+		uint32_t sh_mem_config,
+		uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+		uint32_t sh_mem_bases);
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+		unsigned int vmid);
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+		uint32_t hpd_size, uint64_t hpd_gpu_addr);
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+		uint32_t queue_id, uint32_t __user *wptr);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+		uint32_t pipe_id, uint32_t queue_id);
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id);
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static int kgd_address_watch_disable(struct kgd_dev *kgd);
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo);
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd);
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset);
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+		uint8_t vmid);
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+
+static const struct kfd2kgd_calls kfd2kgd = {
+	.init_gtt_mem_allocation = alloc_gtt_mem,
+	.free_gtt_mem = free_gtt_mem,
+	.get_vmem_size = get_vmem_size,
+	.get_gpu_clock_counter = get_gpu_clock_counter,
+	.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+	.program_sh_mem_settings = kgd_program_sh_mem_settings,
+	.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+	.init_pipeline = kgd_init_pipeline,
+	.init_interrupts = kgd_init_interrupts,
+	.hqd_load = kgd_hqd_load,
+	.hqd_sdma_load = kgd_hqd_sdma_load,
+	.hqd_is_occupied = kgd_hqd_is_occupied,
+	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+	.hqd_destroy = kgd_hqd_destroy,
+	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+	.address_watch_disable = kgd_address_watch_disable,
+	.address_watch_execute = kgd_address_watch_execute,
+	.wave_control_execute = kgd_wave_control_execute,
+	.address_watch_get_offset = kgd_address_watch_get_offset,
+	.get_atc_vmid_pasid_mapping_pasid =
+			get_atc_vmid_pasid_mapping_pasid,
+	.get_atc_vmid_pasid_mapping_valid =
+			get_atc_vmid_pasid_mapping_valid,
+	.write_vmid_invalidate_request = write_vmid_invalidate_request,
+	.get_fw_version = get_fw_version
+};
+
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+{
+	return (struct kfd2kgd_calls *)&kfd2kgd;
+}
+
+static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
+{
+	return (struct amdgpu_device *)kgd;
+}
+
+static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+			uint32_t queue, uint32_t vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
+
+	mutex_lock(&adev->srbm_mutex);
+	WREG32(mmSRBM_GFX_CNTL, value);
+}
+
+static void unlock_srbm(struct kgd_dev *kgd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	WREG32(mmSRBM_GFX_CNTL, 0);
+	mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+	uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct kgd_dev *kgd)
+{
+	unlock_srbm(kgd);
+}
+
+static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+					uint32_t sh_mem_config,
+					uint32_t sh_mem_ape1_base,
+					uint32_t sh_mem_ape1_limit,
+					uint32_t sh_mem_bases)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	lock_srbm(kgd, 0, 0, 0, vmid);
+
+	WREG32(mmSH_MEM_CONFIG, sh_mem_config);
+	WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
+	WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
+	WREG32(mmSH_MEM_BASES, sh_mem_bases);
+
+	unlock_srbm(kgd);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
+					unsigned int vmid)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	/*
+	 * We have to assume that there is no outstanding mapping.
+	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+	 * a mapping is in progress or because a mapping finished
+	 * and the SW cleared it.
+	 * So the protocol is to always wait & clear.
+	 */
+	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+			ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+	WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
+
+	while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
+		cpu_relax();
+	WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+
+	/* Mapping vmid to pasid also for IH block */
+	WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
+
+	return 0;
+}
+
+static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
+				uint32_t hpd_size, uint64_t hpd_gpu_addr)
+{
+	return 0;
+}
+
+static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t mec;
+	uint32_t pipe;
+
+	mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
+	pipe = (pipe_id % VI_PIPE_PER_MEC);
+
+	lock_srbm(kgd, mec, pipe, 0, 0);
+
+	WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+
+	unlock_srbm(kgd);
+
+	return 0;
+}
+
+static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+{
+	return 0;
+}
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+	return (struct vi_mqd *)mqd;
+}
+
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+	return (struct cik_sdma_rlc_registers *)mqd;
+}
+
+static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
+			uint32_t queue_id, uint32_t __user *wptr)
+{
+	struct vi_mqd *m;
+	uint32_t shadow_wptr, valid_wptr;
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+	m = get_mqd(mqd);
+
+	valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+	WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
+	WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+
+	WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+	WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
+	WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
+	WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+	WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+	WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
+	WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
+	WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+			m->cp_hqd_pq_rptr_report_addr_hi);
+
+	if (valid_wptr > 0)
+		WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+
+	WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+	WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+
+	WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
+	WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
+	WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
+	WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+	WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+	WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+
+	WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
+	WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
+	WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
+	WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
+	WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
+	WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
+	WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+
+	WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
+	WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
+	WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+	WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+
+	WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+
+	release_queue(kgd);
+
+	return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+{
+	return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+				uint32_t pipe_id, uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t act;
+	bool retval = false;
+	uint32_t low, high;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+	act = RREG32(mmCP_HQD_ACTIVE);
+	if (act) {
+		low = lower_32_bits(queue_address >> 8);
+		high = upper_32_bits(queue_address >> 8);
+
+		if (low == RREG32(mmCP_HQD_PQ_BASE) &&
+				high == RREG32(mmCP_HQD_PQ_BASE_HI))
+			retval = true;
+	}
+	release_queue(kgd);
+	return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t sdma_rlc_rb_cntl;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+
+	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+		return true;
+
+	return false;
+}
+
+static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+				unsigned int timeout, uint32_t pipe_id,
+				uint32_t queue_id)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t temp;
+
+	acquire_queue(kgd, pipe_id, queue_id);
+
+	WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+
+	while (true) {
+		temp = RREG32(mmCP_HQD_ACTIVE);
+		if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+			break;
+		if (timeout == 0) {
+			pr_err("kfd: cp queue preemption time out (%dms)\n",
+				temp);
+			release_queue(kgd);
+			return -ETIME;
+		}
+		msleep(20);
+		timeout -= 20;
+	}
+
+	release_queue(kgd);
+	return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+				unsigned int timeout)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	struct cik_sdma_rlc_registers *m;
+	uint32_t sdma_base_addr;
+	uint32_t temp;
+
+	m = get_sdma_mqd(mqd);
+	sdma_base_addr = get_sdma_base_addr(m);
+
+	temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+
+	while (true) {
+		temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+		if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+			break;
+		if (timeout == 0)
+			return -ETIME;
+		msleep(20);
+		timeout -= 20;
+	}
+
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+	WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+
+	return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
+							uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
+								uint8_t vmid)
+{
+	uint32_t reg;
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+	return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+}
+
+static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+}
+
+static int kgd_address_watch_disable(struct kgd_dev *kgd)
+{
+	return 0;
+}
+
+static int kgd_address_watch_execute(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					uint32_t cntl_val,
+					uint32_t addr_hi,
+					uint32_t addr_lo)
+{
+	return 0;
+}
+
+static int kgd_wave_control_execute(struct kgd_dev *kgd,
+					uint32_t gfx_index_val,
+					uint32_t sq_cmd)
+{
+	struct amdgpu_device *adev = get_amdgpu_device(kgd);
+	uint32_t data = 0;
+
+	mutex_lock(&adev->grbm_idx_mutex);
+
+	WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
+	WREG32(mmSQ_CMD, sq_cmd);
+
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		INSTANCE_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SH_BROADCAST_WRITES, 1);
+	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+		SE_BROADCAST_WRITES, 1);
+
+	WREG32(mmGRBM_GFX_INDEX, data);
+	mutex_unlock(&adev->grbm_idx_mutex);
+
+	return 0;
+}
+
+static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
+					unsigned int watch_point_id,
+					unsigned int reg_offset)
+{
+	return 0;
+}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+	const union amdgpu_firmware_header *hdr;
+
+	BUG_ON(kgd == NULL);
+
+	switch (type) {
+	case KGD_ENGINE_PFP:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.pfp_fw->data;
+		break;
+
+	case KGD_ENGINE_ME:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.me_fw->data;
+		break;
+
+	case KGD_ENGINE_CE:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.ce_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec_fw->data;
+		break;
+
+	case KGD_ENGINE_MEC2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.mec2_fw->data;
+		break;
+
+	case KGD_ENGINE_RLC:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->gfx.rlc_fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA1:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[0].fw->data;
+		break;
+
+	case KGD_ENGINE_SDMA2:
+		hdr = (const union amdgpu_firmware_header *)
+							adev->sdma[1].fw->data;
+		break;
+
+	default:
+		return 0;
+	}
+
+	if (hdr == NULL)
+		return 0;
+
+	/* Only 12 bit in use*/
+	return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 6a58837..77f1d7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -897,7 +897,7 @@
 					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
 					    (id == ASIC_INTERNAL_MEMORY_SS))
 						ss->rate /= 100;
-					if (adev->flags & AMDGPU_IS_APU)
+					if (adev->flags & AMD_IS_APU)
 						amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
 					return true;
 				}
@@ -1058,7 +1058,7 @@
 	SET_MEMORY_CLOCK_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 2742b9a..98d59ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@
 {
 	unsigned long start_jiffies;
 	unsigned long end_jiffies;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int i, r;
 
 	start_jiffies = jiffies;
@@ -42,17 +42,17 @@
 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
 		if (r)
 			goto exit_do_move;
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r)
 			goto exit_do_move;
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 	}
 	end_jiffies = jiffies;
 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
 exit_do_move:
 	if (fence)
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index ceb444f..02add0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -48,7 +48,7 @@
 	resource_size_t vram_base;
 	resource_size_t size = 256 * 1024; /* ??? */
 
-	if (!(adev->flags & AMDGPU_IS_APU))
+	if (!(adev->flags & AMD_IS_APU))
 		if (!amdgpu_card_posted(adev))
 			return false;
 
@@ -184,7 +184,7 @@
 	bool found = false;
 
 	/* ATRM is for the discrete card only */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return false;
 
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
@@ -246,7 +246,7 @@
 
 static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
 {
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return igp_read_bios_from_vram(adev);
 	else
 		return amdgpu_asic_read_disabled_bios(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
new file mode 100644
index 0000000..6b1243f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <drm/drmP.h>
+#include <linux/firmware.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+#include "cgs_linux.h"
+#include "atom.h"
+#include "amdgpu_ucode.h"
+
+
+struct amdgpu_cgs_device {
+	struct cgs_device base;
+	struct amdgpu_device *adev;
+};
+
+#define CGS_FUNC_ADEV							\
+	struct amdgpu_device *adev =					\
+		((struct amdgpu_cgs_device *)cgs_device)->adev
+
+static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
+				   uint64_t *mc_start, uint64_t *mc_size,
+				   uint64_t *mem_size)
+{
+	CGS_FUNC_ADEV;
+	switch(type) {
+	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+		*mc_start = 0;
+		*mc_size = adev->mc.visible_vram_size;
+		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
+		break;
+	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+		*mc_start = adev->mc.visible_vram_size;
+		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
+		*mem_size = *mc_size;
+		break;
+	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+		*mc_start = adev->mc.gtt_start;
+		*mc_size = adev->mc.gtt_size;
+		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
+				uint64_t size,
+				uint64_t min_offset, uint64_t max_offset,
+				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
+{
+	CGS_FUNC_ADEV;
+	int ret;
+	struct amdgpu_bo *bo;
+	struct page *kmem_page = vmalloc_to_page(kmem);
+	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
+	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
+			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+	if (ret)
+		return ret;
+	ret = amdgpu_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* pin buffer into GTT */
+	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
+				       min_offset, max_offset, mcaddr);
+	amdgpu_bo_unreserve(bo);
+
+	*kmem_handle = (cgs_handle_t)bo;
+	return ret;
+}
+
+static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
+{
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
+
+	if (obj) {
+		int r = amdgpu_bo_reserve(obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_unpin(obj);
+			amdgpu_bo_unreserve(obj);
+		}
+		amdgpu_bo_unref(&obj);
+
+	}
+	return 0;
+}
+
+static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
+				    enum cgs_gpu_mem_type type,
+				    uint64_t size, uint64_t align,
+				    uint64_t min_offset, uint64_t max_offset,
+				    cgs_handle_t *handle)
+{
+	CGS_FUNC_ADEV;
+	uint16_t flags = 0;
+	int ret = 0;
+	uint32_t domain = 0;
+	struct amdgpu_bo *obj;
+	struct ttm_placement placement;
+	struct ttm_place place;
+
+	if (min_offset > max_offset) {
+		BUG_ON(1);
+		return -EINVAL;
+	}
+
+	/* fail if the alignment is not a power of 2 */
+	if (((align != 1) && (align & (align - 1)))
+	    || size == 0 || align == 0)
+		return -EINVAL;
+
+
+	switch(type) {
+	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
+		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (max_offset > adev->mc.real_vram_size)
+			return -EINVAL;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_VRAM;
+		break;
+	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
+	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
+		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+		domain = AMDGPU_GEM_DOMAIN_VRAM;
+		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+			place.fpfn =
+				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
+			place.lpfn =
+				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
+			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_VRAM;
+		}
+
+		break;
+	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		break;
+	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
+		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+		domain = AMDGPU_GEM_DOMAIN_GTT;
+		place.fpfn = min_offset >> PAGE_SHIFT;
+		place.lpfn = max_offset >> PAGE_SHIFT;
+		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
+			TTM_PL_FLAG_UNCACHED;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+
+	*handle = 0;
+
+	placement.placement = &place;
+	placement.num_placement = 1;
+	placement.busy_placement = &place;
+	placement.num_busy_placement = 1;
+
+	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
+					  true, domain, flags,
+					  NULL, &placement, &obj);
+	if (ret) {
+		DRM_ERROR("(%d) bo create failed\n", ret);
+		return ret;
+	}
+	*handle = (cgs_handle_t)obj;
+
+	return ret;
+}
+
+static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
+				     cgs_handle_t *handle)
+{
+	CGS_FUNC_ADEV;
+	int r;
+	uint32_t dma_handle;
+	struct drm_gem_object *obj;
+	struct amdgpu_bo *bo;
+	struct drm_device *dev = adev->ddev;
+	struct drm_file *file_priv = NULL, *priv;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(priv, &dev->filelist, lhead) {
+		rcu_read_lock();
+		if (priv->pid == get_pid(task_pid(current)))
+			file_priv = priv;
+		rcu_read_unlock();
+		if (file_priv)
+			break;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	r = dev->driver->prime_fd_to_handle(dev,
+					    file_priv, dmabuf_fd,
+					    &dma_handle);
+	spin_lock(&file_priv->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&file_priv->object_idr, dma_handle);
+	if (obj == NULL) {
+		spin_unlock(&file_priv->table_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&file_priv->table_lock);
+	bo = gem_to_amdgpu_bo(obj);
+	*handle = (cgs_handle_t)bo;
+	return 0;
+}
+
+static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+	if (obj) {
+		int r = amdgpu_bo_reserve(obj, false);
+		if (likely(r == 0)) {
+			amdgpu_bo_kunmap(obj);
+			amdgpu_bo_unpin(obj);
+			amdgpu_bo_unreserve(obj);
+		}
+		amdgpu_bo_unref(&obj);
+
+	}
+	return 0;
+}
+
+static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+				   uint64_t *mcaddr)
+{
+	int r;
+	u64 min_offset, max_offset;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+
+	WARN_ON_ONCE(obj->placement.num_placement > 1);
+
+	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
+	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
+
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
+				     min_offset, max_offset, mcaddr);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_unpin(obj);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+				   void **map)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_kmap(obj, map);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+{
+	int r;
+	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
+	r = amdgpu_bo_reserve(obj, false);
+	if (unlikely(r != 0))
+		return r;
+	amdgpu_bo_kunmap(obj);
+	amdgpu_bo_unreserve(obj);
+	return r;
+}
+
+static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
+{
+	CGS_FUNC_ADEV;
+	return RREG32(offset);
+}
+
+static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
+				      uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	WREG32(offset, value);
+}
+
+static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
+					     enum cgs_ind_reg space,
+					     unsigned index)
+{
+	CGS_FUNC_ADEV;
+	switch (space) {
+	case CGS_IND_REG__MMIO:
+		return RREG32_IDX(index);
+	case CGS_IND_REG__PCIE:
+		return RREG32_PCIE(index);
+	case CGS_IND_REG__SMC:
+		return RREG32_SMC(index);
+	case CGS_IND_REG__UVD_CTX:
+		return RREG32_UVD_CTX(index);
+	case CGS_IND_REG__DIDT:
+		return RREG32_DIDT(index);
+	case CGS_IND_REG__AUDIO_ENDPT:
+		DRM_ERROR("audio endpt register access not implemented.\n");
+		return 0;
+	}
+	WARN(1, "Invalid indirect register space");
+	return 0;
+}
+
+static void amdgpu_cgs_write_ind_register(void *cgs_device,
+					  enum cgs_ind_reg space,
+					  unsigned index, uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	switch (space) {
+	case CGS_IND_REG__MMIO:
+		return WREG32_IDX(index, value);
+	case CGS_IND_REG__PCIE:
+		return WREG32_PCIE(index, value);
+	case CGS_IND_REG__SMC:
+		return WREG32_SMC(index, value);
+	case CGS_IND_REG__UVD_CTX:
+		return WREG32_UVD_CTX(index, value);
+	case CGS_IND_REG__DIDT:
+		return WREG32_DIDT(index, value);
+	case CGS_IND_REG__AUDIO_ENDPT:
+		DRM_ERROR("audio endpt register access not implemented.\n");
+		return;
+	}
+	WARN(1, "Invalid indirect register space");
+}
+
+static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint8_t val;
+	int ret = pci_read_config_byte(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_byte error"))
+		return 0;
+	return val;
+}
+
+static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint16_t val;
+	int ret = pci_read_config_word(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_word error"))
+		return 0;
+	return val;
+}
+
+static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
+						 unsigned addr)
+{
+	CGS_FUNC_ADEV;
+	uint32_t val;
+	int ret = pci_read_config_dword(adev->pdev, addr, &val);
+	if (WARN(ret, "pci_read_config_dword error"))
+		return 0;
+	return val;
+}
+
+static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
+					     uint8_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_byte(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_byte error");
+}
+
+static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
+					     uint16_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_word(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_word error");
+}
+
+static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
+					      uint32_t value)
+{
+	CGS_FUNC_ADEV;
+	int ret = pci_write_config_dword(adev->pdev, addr, value);
+	WARN(ret, "pci_write_config_dword error");
+}
+
+static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
+						  unsigned table, uint16_t *size,
+						  uint8_t *frev, uint8_t *crev)
+{
+	CGS_FUNC_ADEV;
+	uint16_t data_start;
+
+	if (amdgpu_atom_parse_data_header(
+		    adev->mode_info.atom_context, table, size,
+		    frev, crev, &data_start))
+		return (uint8_t*)adev->mode_info.atom_context->bios +
+			data_start;
+
+	return NULL;
+}
+
+static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
+					      uint8_t *frev, uint8_t *crev)
+{
+	CGS_FUNC_ADEV;
+
+	if (amdgpu_atom_parse_cmd_header(
+		    adev->mode_info.atom_context, table,
+		    frev, crev))
+		return 0;
+
+	return -EINVAL;
+}
+
+static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
+					  void *args)
+{
+	CGS_FUNC_ADEV;
+
+	return amdgpu_atom_execute_table(
+		adev->mode_info.atom_context, table, args);
+}
+
+static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
+				     int active)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
+				       enum cgs_clock clock, unsigned freq)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
+					enum cgs_engine engine, int powered)
+{
+	/* TODO */
+	return 0;
+}
+
+
+
+static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
+					    enum cgs_clock clock,
+					    struct cgs_clock_limits *limits)
+{
+	/* TODO */
+	return 0;
+}
+
+static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
+					  const uint32_t *voltages)
+{
+	DRM_ERROR("not implemented");
+	return -EPERM;
+}
+
+struct cgs_irq_params {
+	unsigned src_id;
+	cgs_irq_source_set_func_t set;
+	cgs_irq_handler_func_t handler;
+	void *private_data;
+};
+
+static int cgs_set_irq_state(struct amdgpu_device *adev,
+			     struct amdgpu_irq_src *src,
+			     unsigned type,
+			     enum amdgpu_interrupt_state state)
+{
+	struct cgs_irq_params *irq_params =
+		(struct cgs_irq_params *)src->data;
+	if (!irq_params)
+		return -EINVAL;
+	if (!irq_params->set)
+		return -EINVAL;
+	return irq_params->set(irq_params->private_data,
+			       irq_params->src_id,
+			       type,
+			       (int)state);
+}
+
+static int cgs_process_irq(struct amdgpu_device *adev,
+			   struct amdgpu_irq_src *source,
+			   struct amdgpu_iv_entry *entry)
+{
+	struct cgs_irq_params *irq_params =
+		(struct cgs_irq_params *)source->data;
+	if (!irq_params)
+		return -EINVAL;
+	if (!irq_params->handler)
+		return -EINVAL;
+	return irq_params->handler(irq_params->private_data,
+				   irq_params->src_id,
+				   entry->iv_entry);
+}
+
+static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
+	.set = cgs_set_irq_state,
+	.process = cgs_process_irq,
+};
+
+static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
+				     unsigned num_types,
+				     cgs_irq_source_set_func_t set,
+				     cgs_irq_handler_func_t handler,
+				     void *private_data)
+{
+	CGS_FUNC_ADEV;
+	int ret = 0;
+	struct cgs_irq_params *irq_params;
+	struct amdgpu_irq_src *source =
+		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
+	if (!source)
+		return -ENOMEM;
+	irq_params =
+		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
+	if (!irq_params) {
+		kfree(source);
+		return -ENOMEM;
+	}
+	source->num_types = num_types;
+	source->funcs = &cgs_irq_funcs;
+	irq_params->src_id = src_id;
+	irq_params->set = set;
+	irq_params->handler = handler;
+	irq_params->private_data = private_data;
+	source->data = (void *)irq_params;
+	ret = amdgpu_irq_add_id(adev, src_id, source);
+	if (ret) {
+		kfree(irq_params);
+		kfree(source);
+	}
+
+	return ret;
+}
+
+static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
+}
+
+static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
+{
+	CGS_FUNC_ADEV;
+	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
+}
+
+int amdgpu_cgs_set_clockgating_state(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state)
+{
+	CGS_FUNC_ADEV;
+	int i, r = -1;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_clockgating_state(
+								(void *)adev,
+									state);
+			break;
+		}
+	}
+	return r;
+}
+
+int amdgpu_cgs_set_powergating_state(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state)
+{
+	CGS_FUNC_ADEV;
+	int i, r = -1;
+
+	for (i = 0; i < adev->num_ip_blocks; i++) {
+		if (!adev->ip_block_status[i].valid)
+			continue;
+
+		if (adev->ip_blocks[i].type == block_type) {
+			r = adev->ip_blocks[i].funcs->set_powergating_state(
+								(void *)adev,
+									state);
+			break;
+		}
+	}
+	return r;
+}
+
+
+static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
+{
+	CGS_FUNC_ADEV;
+	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
+
+	switch (fw_type) {
+	case CGS_UCODE_ID_SDMA0:
+		result = AMDGPU_UCODE_ID_SDMA0;
+		break;
+	case CGS_UCODE_ID_SDMA1:
+		result = AMDGPU_UCODE_ID_SDMA1;
+		break;
+	case CGS_UCODE_ID_CP_CE:
+		result = AMDGPU_UCODE_ID_CP_CE;
+		break;
+	case CGS_UCODE_ID_CP_PFP:
+		result = AMDGPU_UCODE_ID_CP_PFP;
+		break;
+	case CGS_UCODE_ID_CP_ME:
+		result = AMDGPU_UCODE_ID_CP_ME;
+		break;
+	case CGS_UCODE_ID_CP_MEC:
+	case CGS_UCODE_ID_CP_MEC_JT1:
+		result = AMDGPU_UCODE_ID_CP_MEC1;
+		break;
+	case CGS_UCODE_ID_CP_MEC_JT2:
+		if (adev->asic_type == CHIP_TONGA)
+			result = AMDGPU_UCODE_ID_CP_MEC2;
+		else if (adev->asic_type == CHIP_CARRIZO)
+			result = AMDGPU_UCODE_ID_CP_MEC1;
+		break;
+	case CGS_UCODE_ID_RLC_G:
+		result = AMDGPU_UCODE_ID_RLC_G;
+		break;
+	default:
+		DRM_ERROR("Firmware type not supported\n");
+	}
+	return result;
+}
+
+static int amdgpu_cgs_get_firmware_info(void *cgs_device,
+					enum cgs_ucode_id type,
+					struct cgs_firmware_info *info)
+{
+	CGS_FUNC_ADEV;
+
+	if (CGS_UCODE_ID_SMU != type) {
+		uint64_t gpu_addr;
+		uint32_t data_size;
+		const struct gfx_firmware_header_v1_0 *header;
+		enum AMDGPU_UCODE_ID id;
+		struct amdgpu_firmware_info *ucode;
+
+		id = fw_type_convert(cgs_device, type);
+		ucode = &adev->firmware.ucode[id];
+		if (ucode->fw == NULL)
+			return -EINVAL;
+
+		gpu_addr  = ucode->mc_addr;
+		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+		data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
+		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
+			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+			data_size = le32_to_cpu(header->jt_size) << 2;
+		}
+		info->mc_addr = gpu_addr;
+		info->image_size = data_size;
+		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
+	} else {
+		char fw_name[30] = {0};
+		int err = 0;
+		uint32_t ucode_size;
+		uint32_t ucode_start_address;
+		const uint8_t *src;
+		const struct smc_firmware_header_v1_0 *hdr;
+
+		switch (adev->asic_type) {
+		case CHIP_TONGA:
+			strcpy(fw_name, "amdgpu/tonga_smc.bin");
+			break;
+		default:
+			DRM_ERROR("SMC firmware not supported\n");
+			return -EINVAL;
+		}
+
+		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+		if (err) {
+			DRM_ERROR("Failed to request firmware\n");
+			return err;
+		}
+
+		err = amdgpu_ucode_validate(adev->pm.fw);
+		if (err) {
+			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+			release_firmware(adev->pm.fw);
+			adev->pm.fw = NULL;
+			return err;
+		}
+
+		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
+		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+		src = (const uint8_t *)(adev->pm.fw->data +
+		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+		info->version = adev->pm.fw_version;
+		info->image_size = ucode_size;
+		info->kptr = (void *)src;
+	}
+	return 0;
+}
+
+static const struct cgs_ops amdgpu_cgs_ops = {
+	amdgpu_cgs_gpu_mem_info,
+	amdgpu_cgs_gmap_kmem,
+	amdgpu_cgs_gunmap_kmem,
+	amdgpu_cgs_alloc_gpu_mem,
+	amdgpu_cgs_free_gpu_mem,
+	amdgpu_cgs_gmap_gpu_mem,
+	amdgpu_cgs_gunmap_gpu_mem,
+	amdgpu_cgs_kmap_gpu_mem,
+	amdgpu_cgs_kunmap_gpu_mem,
+	amdgpu_cgs_read_register,
+	amdgpu_cgs_write_register,
+	amdgpu_cgs_read_ind_register,
+	amdgpu_cgs_write_ind_register,
+	amdgpu_cgs_read_pci_config_byte,
+	amdgpu_cgs_read_pci_config_word,
+	amdgpu_cgs_read_pci_config_dword,
+	amdgpu_cgs_write_pci_config_byte,
+	amdgpu_cgs_write_pci_config_word,
+	amdgpu_cgs_write_pci_config_dword,
+	amdgpu_cgs_atom_get_data_table,
+	amdgpu_cgs_atom_get_cmd_table_revs,
+	amdgpu_cgs_atom_exec_cmd_table,
+	amdgpu_cgs_create_pm_request,
+	amdgpu_cgs_destroy_pm_request,
+	amdgpu_cgs_set_pm_request,
+	amdgpu_cgs_pm_request_clock,
+	amdgpu_cgs_pm_request_engine,
+	amdgpu_cgs_pm_query_clock_limits,
+	amdgpu_cgs_set_camera_voltages,
+	amdgpu_cgs_get_firmware_info,
+	amdgpu_cgs_set_powergating_state,
+	amdgpu_cgs_set_clockgating_state
+};
+
+static const struct cgs_os_ops amdgpu_cgs_os_ops = {
+	amdgpu_cgs_import_gpu_mem,
+	amdgpu_cgs_add_irq_source,
+	amdgpu_cgs_irq_get,
+	amdgpu_cgs_irq_put
+};
+
+void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+{
+	struct amdgpu_cgs_device *cgs_device =
+		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
+
+	if (!cgs_device) {
+		DRM_ERROR("Couldn't allocate CGS device structure\n");
+		return NULL;
+	}
+
+	cgs_device->base.ops = &amdgpu_cgs_ops;
+	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
+	cgs_device->adev = adev;
+
+	return cgs_device;
+}
+
+void amdgpu_cgs_destroy_device(void *cgs_device)
+{
+	kfree(cgs_device);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 27df17a..89c3dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -75,6 +75,11 @@
 			if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 1f040d8..3b355ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,6 +126,30 @@
 	return 0;
 }
 
+struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
+                                               struct drm_file *filp,
+                                               struct amdgpu_ctx *ctx,
+                                               struct amdgpu_ib *ibs,
+                                               uint32_t num_ibs)
+{
+	struct amdgpu_cs_parser *parser;
+	int i;
+
+	parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
+	if (!parser)
+		return NULL;
+
+	parser->adev = adev;
+	parser->filp = filp;
+	parser->ctx = ctx;
+	parser->ibs = ibs;
+	parser->num_ibs = num_ibs;
+	for (i = 0; i < num_ibs; i++)
+		ibs[i].ctx = ctx;
+
+	return parser;
+}
+
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
 	union drm_amdgpu_cs *cs = data;
@@ -147,13 +171,13 @@
 
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
-	chunk_array = kcalloc(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
 	if (chunk_array == NULL) {
 		r = -ENOMEM;
 		goto out;
 	}
 
-	chunk_array_user = (uint64_t *)(unsigned long)(cs->in.chunks);
+	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 		r = -EFAULT;
@@ -161,7 +185,7 @@
 	}
 
 	p->nchunks = cs->in.num_chunks;
-	p->chunks = kcalloc(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
 			    GFP_KERNEL);
 	if (p->chunks == NULL) {
 		r = -ENOMEM;
@@ -173,7 +197,7 @@
 		struct drm_amdgpu_cs_chunk user_chunk;
 		uint32_t __user *cdata;
 
-		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+		chunk_ptr = (void __user *)chunk_array[i];
 		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
 			r = -EFAULT;
@@ -183,7 +207,7 @@
 		p->chunks[i].length_dw = user_chunk.length_dw;
 
 		size = p->chunks[i].length_dw;
-		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+		cdata = (void __user *)user_chunk.chunk_data;
 		p->chunks[i].user_ptr = cdata;
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -235,11 +259,10 @@
 		}
 	}
 
+
 	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
-	if (!p->ibs) {
+	if (!p->ibs)
 		r = -ENOMEM;
-		goto out;
-	}
 
 out:
 	kfree(chunk_array);
@@ -331,7 +354,7 @@
 			 * into account. We don't want to disallow buffer moves
 			 * completely.
 			 */
-			if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+			if ((lobj->allowed_domains & current_domain) != 0 &&
 			    (domain & current_domain) == 0 && /* will be moved */
 			    bytes_moved > bytes_moved_threshold) {
 				/* don't move it */
@@ -415,18 +438,8 @@
 	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 }
 
-/**
- * cs_parser_fini() - clean parser states
- * @parser:	parser structure holding parsing context.
- * @error:	error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
-	unsigned i;
-
 	if (!error) {
 		/* Sort the buffer list from the smallest to largest buffer,
 		 * which affects the order of buffers in the LRU list.
@@ -447,21 +460,45 @@
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 	}
+}
 
+static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
+{
+	unsigned i;
 	if (parser->ctx)
 		amdgpu_ctx_put(parser->ctx);
 	if (parser->bo_list)
 		amdgpu_bo_list_put(parser->bo_list);
+
 	drm_free_large(parser->vm_bos);
 	for (i = 0; i < parser->nchunks; i++)
 		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
-	if (parser->ibs)
-		for (i = 0; i < parser->num_ibs; i++)
-			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-	kfree(parser->ibs);
-	if (parser->uf.bo)
-		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+	if (!amdgpu_enable_scheduler)
+	{
+		if (parser->ibs)
+			for (i = 0; i < parser->num_ibs; i++)
+				amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+		kfree(parser->ibs);
+		if (parser->uf.bo)
+			drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+	}
+
+	kfree(parser);
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+{
+       amdgpu_cs_parser_fini_early(parser, error, backoff);
+       amdgpu_cs_parser_fini_late(parser);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -476,12 +513,18 @@
 	if (r)
 		return r;
 
+	r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
+	if (r)
+		return r;
+
 	r = amdgpu_vm_clear_freed(adev, vm);
 	if (r)
 		return r;
 
 	if (p->bo_list) {
 		for (i = 0; i < p->bo_list->num_entries; i++) {
+			struct fence *f;
+
 			/* ignore duplicates */
 			bo = p->bo_list->array[i].robj;
 			if (!bo)
@@ -495,7 +538,10 @@
 			if (r)
 				return r;
 
-			amdgpu_sync_fence(&p->ibs[0].sync, bo_va->last_pt_update);
+			f = bo_va->last_pt_update;
+			r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+			if (r)
+				return r;
 		}
 	}
 
@@ -529,9 +575,9 @@
 		goto out;
 	}
 	amdgpu_cs_sync_rings(parser);
-
-	r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
-			       parser->filp);
+	if (!amdgpu_enable_scheduler)
+		r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
+				       parser->filp);
 
 out:
 	mutex_unlock(&vm->mutex);
@@ -650,7 +696,6 @@
 			ib->oa_size = amdgpu_bo_size(oa);
 		}
 	}
-
 	/* wrap the last IB with user fence */
 	if (parser->uf.bo) {
 		struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
@@ -693,9 +738,9 @@
 			sizeof(struct drm_amdgpu_cs_chunk_dep);
 
 		for (j = 0; j < num_deps; ++j) {
-			struct amdgpu_fence *fence;
 			struct amdgpu_ring *ring;
 			struct amdgpu_ctx *ctx;
+			struct fence *fence;
 
 			r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
 					       deps[j].ip_instance,
@@ -707,85 +752,137 @@
 			if (ctx == NULL)
 				return -EINVAL;
 
-			r = amdgpu_fence_recreate(ring, p->filp,
-						  deps[j].handle,
-						  &fence);
-			if (r) {
+			fence = amdgpu_ctx_get_fence(ctx, ring,
+						     deps[j].handle);
+			if (IS_ERR(fence)) {
+				r = PTR_ERR(fence);
 				amdgpu_ctx_put(ctx);
 				return r;
-			}
 
-			amdgpu_sync_fence(&ib->sync, fence);
-			amdgpu_fence_unref(&fence);
-			amdgpu_ctx_put(ctx);
+			} else if (fence) {
+				r = amdgpu_sync_fence(adev, &ib->sync, fence);
+				fence_put(fence);
+				amdgpu_ctx_put(ctx);
+				if (r)
+					return r;
+			}
 		}
 	}
 
 	return 0;
 }
 
+static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+{
+	int i;
+	if (sched_job->ibs)
+		for (i = 0; i < sched_job->num_ibs; i++)
+			amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+	kfree(sched_job->ibs);
+	if (sched_job->uf.bo)
+		drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+	return 0;
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct amdgpu_device *adev = dev->dev_private;
 	union drm_amdgpu_cs *cs = data;
-	struct amdgpu_cs_parser parser;
-	int r, i;
+	struct amdgpu_cs_parser *parser;
 	bool reserved_buffers = false;
+	int i, r;
 
 	down_read(&adev->exclusive_lock);
 	if (!adev->accel_working) {
 		up_read(&adev->exclusive_lock);
 		return -EBUSY;
 	}
-	/* initialize parser */
-	memset(&parser, 0, sizeof(struct amdgpu_cs_parser));
-	parser.filp = filp;
-	parser.adev = adev;
-	r = amdgpu_cs_parser_init(&parser, data);
+
+	parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
+	if (!parser)
+		return -ENOMEM;
+	r = amdgpu_cs_parser_init(parser, data);
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
-		amdgpu_cs_parser_fini(&parser, r, false);
+		amdgpu_cs_parser_fini(parser, r, false);
 		up_read(&adev->exclusive_lock);
 		r = amdgpu_cs_handle_lockup(adev, r);
 		return r;
 	}
 
-	r = amdgpu_cs_parser_relocs(&parser);
-	if (r) {
-		if (r != -ERESTARTSYS) {
-			if (r == -ENOMEM)
-				DRM_ERROR("Not enough memory for command submission!\n");
-			else
-				DRM_ERROR("Failed to process the buffer list %d!\n", r);
-		}
+	r = amdgpu_cs_parser_relocs(parser);
+	if (r == -ENOMEM)
+		DRM_ERROR("Not enough memory for command submission!\n");
+	else if (r && r != -ERESTARTSYS)
+		DRM_ERROR("Failed to process the buffer list %d!\n", r);
+	else if (!r) {
+		reserved_buffers = true;
+		r = amdgpu_cs_ib_fill(adev, parser);
 	}
 
 	if (!r) {
-		reserved_buffers = true;
-		r = amdgpu_cs_ib_fill(adev, &parser);
+		r = amdgpu_cs_dependencies(adev, parser);
+		if (r)
+			DRM_ERROR("Failed in the dependencies handling %d!\n", r);
 	}
 
-	if (!r)
-		r = amdgpu_cs_dependencies(adev, &parser);
-
-	if (r) {
-		amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
-		up_read(&adev->exclusive_lock);
-		r = amdgpu_cs_handle_lockup(adev, r);
-		return r;
-	}
-
-	for (i = 0; i < parser.num_ibs; i++)
-		trace_amdgpu_cs(&parser, i);
-
-	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
-	if (r) {
+	if (r)
 		goto out;
+
+	for (i = 0; i < parser->num_ibs; i++)
+		trace_amdgpu_cs(parser, i);
+
+	r = amdgpu_cs_ib_vm_chunk(adev, parser);
+	if (r)
+		goto out;
+
+	if (amdgpu_enable_scheduler && parser->num_ibs) {
+		struct amdgpu_job *job;
+		struct amdgpu_ring * ring =  parser->ibs->ring;
+		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+		if (!job)
+			return -ENOMEM;
+		job->base.sched = ring->scheduler;
+		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+		job->adev = parser->adev;
+		job->ibs = parser->ibs;
+		job->num_ibs = parser->num_ibs;
+		job->base.owner = parser->filp;
+		mutex_init(&job->job_lock);
+		if (job->ibs[job->num_ibs - 1].user) {
+			memcpy(&job->uf,  &parser->uf,
+			       sizeof(struct amdgpu_user_fence));
+			job->ibs[job->num_ibs - 1].user = &job->uf;
+		}
+
+		job->free_job = amdgpu_cs_free_job;
+		mutex_lock(&job->job_lock);
+		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		if (r) {
+			mutex_unlock(&job->job_lock);
+			amdgpu_cs_free_job(job);
+			kfree(job);
+			goto out;
+		}
+		cs->out.handle =
+			amdgpu_ctx_add_fence(parser->ctx, ring,
+					     &job->base.s_fence->base);
+		parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+
+		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
+		ttm_eu_fence_buffer_objects(&parser->ticket,
+				&parser->validated,
+				&job->base.s_fence->base);
+
+		mutex_unlock(&job->job_lock);
+		amdgpu_cs_parser_fini_late(parser);
+		up_read(&adev->exclusive_lock);
+		return 0;
 	}
 
-	cs->out.handle = parser.ibs[parser.num_ibs - 1].fence->seq;
+	cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
-	amdgpu_cs_parser_fini(&parser, r, true);
+	amdgpu_cs_parser_fini(parser, r, reserved_buffers);
 	up_read(&adev->exclusive_lock);
 	r = amdgpu_cs_handle_lockup(adev, r);
 	return r;
@@ -806,30 +903,29 @@
 	union drm_amdgpu_wait_cs *wait = data;
 	struct amdgpu_device *adev = dev->dev_private;
 	unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
-	struct amdgpu_fence *fence = NULL;
 	struct amdgpu_ring *ring = NULL;
 	struct amdgpu_ctx *ctx;
+	struct fence *fence;
 	long r;
 
+	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
+			       wait->in.ring, &ring);
+	if (r)
+		return r;
+
 	ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
 	if (ctx == NULL)
 		return -EINVAL;
 
-	r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
-			       wait->in.ring, &ring);
-	if (r) {
-		amdgpu_ctx_put(ctx);
-		return r;
-	}
+	fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+	if (IS_ERR(fence))
+		r = PTR_ERR(fence);
+	else if (fence) {
+		r = fence_wait_timeout(fence, true, timeout);
+		fence_put(fence);
+	} else
+		r = 1;
 
-	r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
-	if (r) {
-		amdgpu_ctx_put(ctx);
-		return r;
-	}
-
-	r = fence_wait_timeout(&fence->base, true, timeout);
-	amdgpu_fence_unref(&fence);
 	amdgpu_ctx_put(ctx);
 	if (r < 0)
 		return r;
@@ -864,7 +960,16 @@
 		if (!reloc->bo_va)
 			continue;
 
-		list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
+		list_for_each_entry(mapping, &reloc->bo_va->valids, list) {
+			if (mapping->it.start > addr ||
+			    addr > mapping->it.last)
+				continue;
+
+			*bo = reloc->bo_va->bo;
+			return mapping;
+		}
+
+		list_for_each_entry(mapping, &reloc->bo_va->invalids, list) {
 			if (mapping->it.start > addr ||
 			    addr > mapping->it.last)
 				continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6c66ac8..20cbc4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,54 +25,107 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-static void amdgpu_ctx_do_release(struct kref *ref)
+int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+		    struct amdgpu_ctx *ctx)
 {
-	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr;
+	unsigned i, j;
+	int r;
 
-	ctx = container_of(ref, struct amdgpu_ctx, refcount);
-	mgr = &ctx->fpriv->ctx_mgr;
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->adev = adev;
+	kref_init(&ctx->refcount);
+	spin_lock_init(&ctx->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		ctx->rings[i].sequence = 1;
 
-	idr_remove(&mgr->ctx_handles, ctx->id);
-	kfree(ctx);
+	if (amdgpu_enable_scheduler) {
+		/* create context entity for each ring */
+		for (i = 0; i < adev->num_rings; i++) {
+			struct amd_sched_rq *rq;
+			if (kernel)
+				rq = &adev->rings[i]->scheduler->kernel_rq;
+			else
+				rq = &adev->rings[i]->scheduler->sched_rq;
+			r = amd_sched_entity_init(adev->rings[i]->scheduler,
+						  &ctx->rings[i].entity,
+						  rq, amdgpu_sched_jobs);
+			if (r)
+				break;
+		}
+
+		if (i < adev->num_rings) {
+			for (j = 0; j < i; j++)
+				amd_sched_entity_fini(adev->rings[j]->scheduler,
+						      &ctx->rings[j].entity);
+			kfree(ctx);
+			return r;
+		}
+	}
+	return 0;
 }
 
-int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags)
+void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
 {
-	int r;
-	struct amdgpu_ctx *ctx;
+	struct amdgpu_device *adev = ctx->adev;
+	unsigned i, j;
+
+	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
+			fence_put(ctx->rings[i].fences[j]);
+
+	if (amdgpu_enable_scheduler) {
+		for (i = 0; i < adev->num_rings; i++)
+			amd_sched_entity_fini(adev->rings[i]->scheduler,
+					      &ctx->rings[i].entity);
+	}
+}
+
+static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
+			    struct amdgpu_fpriv *fpriv,
+			    uint32_t *id)
+{
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx *ctx;
+	int r;
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
 		return -ENOMEM;
 
 	mutex_lock(&mgr->lock);
-	r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL);
+	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
 	if (r < 0) {
 		mutex_unlock(&mgr->lock);
 		kfree(ctx);
 		return r;
 	}
 	*id = (uint32_t)r;
-
-	memset(ctx, 0, sizeof(*ctx));
-	ctx->id = *id;
-	ctx->fpriv = fpriv;
-	kref_init(&ctx->refcount);
+	r = amdgpu_ctx_init(adev, false, ctx);
 	mutex_unlock(&mgr->lock);
 
-	return 0;
+	return r;
 }
 
-int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id)
+static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
+
+	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+
+	amdgpu_ctx_fini(ctx);
+
+	kfree(ctx);
+}
+
+static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
+{
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx *ctx;
 
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
 	if (ctx) {
+		idr_remove(&mgr->ctx_handles, id);
 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 		mutex_unlock(&mgr->lock);
 		return 0;
@@ -86,9 +139,13 @@
 			    union drm_amdgpu_ctx_out *out)
 {
 	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx_mgr *mgr;
 	unsigned reset_counter;
 
+	if (!fpriv)
+		return -EINVAL;
+
+	mgr = &fpriv->ctx_mgr;
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
 	if (!ctx) {
@@ -97,8 +154,8 @@
 	}
 
 	/* TODO: these two are always zero */
-	out->state.flags = ctx->state.flags;
-	out->state.hangs = ctx->state.hangs;
+	out->state.flags = 0x0;
+	out->state.hangs = 0x0;
 
 	/* determine if a GPU reset has occured since the last call */
 	reset_counter = atomic_read(&adev->gpu_reset_counter);
@@ -113,28 +170,11 @@
 	return 0;
 }
 
-void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv)
-{
-	struct idr *idp;
-	struct amdgpu_ctx *ctx;
-	uint32_t id;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
-	idp = &mgr->ctx_handles;
-
-	idr_for_each_entry(idp,ctx,id) {
-		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
-			DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id);
-	}
-
-	mutex_destroy(&mgr->lock);
-}
-
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 		     struct drm_file *filp)
 {
 	int r;
 	uint32_t id;
-	uint32_t flags;
 
 	union drm_amdgpu_ctx *args = data;
 	struct amdgpu_device *adev = dev->dev_private;
@@ -142,15 +182,14 @@
 
 	r = 0;
 	id = args->in.ctx_id;
-	flags = args->in.flags;
 
 	switch (args->in.op) {
 		case AMDGPU_CTX_OP_ALLOC_CTX:
-			r = amdgpu_ctx_alloc(adev, fpriv, &id, flags);
+			r = amdgpu_ctx_alloc(adev, fpriv, &id);
 			args->out.alloc.ctx_id = id;
 			break;
 		case AMDGPU_CTX_OP_FREE_CTX:
-			r = amdgpu_ctx_free(adev, fpriv, id);
+			r = amdgpu_ctx_free(fpriv, id);
 			break;
 		case AMDGPU_CTX_OP_QUERY_STATE:
 			r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
@@ -165,7 +204,12 @@
 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 {
 	struct amdgpu_ctx *ctx;
-	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
+	struct amdgpu_ctx_mgr *mgr;
+
+	if (!fpriv)
+		return NULL;
+
+	mgr = &fpriv->ctx_mgr;
 
 	mutex_lock(&mgr->lock);
 	ctx = idr_find(&mgr->ctx_handles, id);
@@ -177,17 +221,86 @@
 
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 {
-	struct amdgpu_fpriv *fpriv;
-	struct amdgpu_ctx_mgr *mgr;
-
 	if (ctx == NULL)
 		return -EINVAL;
 
-	fpriv = ctx->fpriv;
-	mgr = &fpriv->ctx_mgr;
-	mutex_lock(&mgr->lock);
 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
-	mutex_unlock(&mgr->lock);
-
 	return 0;
 }
+
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+			      struct fence *fence)
+{
+	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+	uint64_t seq = cring->sequence;
+	unsigned idx = 0;
+	struct fence *other = NULL;
+
+	idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+	other = cring->fences[idx];
+	if (other) {
+		signed long r;
+		r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+		if (r < 0)
+			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+	}
+
+	fence_get(fence);
+
+	spin_lock(&ctx->ring_lock);
+	cring->fences[idx] = fence;
+	cring->sequence++;
+	spin_unlock(&ctx->ring_lock);
+
+	fence_put(other);
+
+	return seq;
+}
+
+struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+				   struct amdgpu_ring *ring, uint64_t seq)
+{
+	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+	struct fence *fence;
+
+	spin_lock(&ctx->ring_lock);
+
+	if (seq >= cring->sequence) {
+		spin_unlock(&ctx->ring_lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+
+	if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+		spin_unlock(&ctx->ring_lock);
+		return NULL;
+	}
+
+	fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
+	spin_unlock(&ctx->ring_lock);
+
+	return fence;
+}
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+{
+	mutex_init(&mgr->lock);
+	idr_init(&mgr->ctx_handles);
+}
+
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+{
+	struct amdgpu_ctx *ctx;
+	struct idr *idp;
+	uint32_t id;
+
+	idp = &mgr->ctx_handles;
+
+	idr_for_each_entry(idp, ctx, id) {
+		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+			DRM_ERROR("ctx %p is still alive\n", ctx);
+	}
+
+	idr_destroy(&mgr->ctx_handles);
+	mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 99f158e..6ff6ae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -55,6 +55,7 @@
 	"MULLINS",
 	"TOPAZ",
 	"TONGA",
+	"FIJI",
 	"CARRIZO",
 	"LAST",
 };
@@ -63,7 +64,7 @@
 {
 	struct amdgpu_device *adev = dev->dev_private;
 
-	if (adev->flags & AMDGPU_IS_PX)
+	if (adev->flags & AMD_IS_PX)
 		return true;
 	return false;
 }
@@ -243,7 +244,8 @@
 
 	if (adev->vram_scratch.robj == NULL) {
 		r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 				     NULL, &adev->vram_scratch.robj);
 		if (r) {
 			return r;
@@ -1160,6 +1162,7 @@
 	switch (adev->asic_type) {
 	case CHIP_TOPAZ:
 	case CHIP_TONGA:
+	case CHIP_FIJI:
 	case CHIP_CARRIZO:
 		if (adev->asic_type == CHIP_CARRIZO)
 			adev->family = AMDGPU_FAMILY_CZ;
@@ -1377,7 +1380,7 @@
 	adev->ddev = ddev;
 	adev->pdev = pdev;
 	adev->flags = flags;
-	adev->asic_type = flags & AMDGPU_ASIC_MASK;
+	adev->asic_type = flags & AMD_ASIC_MASK;
 	adev->is_atom_bios = false;
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
 	adev->mc.gtt_size = 512 * 1024 * 1024;
@@ -1523,6 +1526,11 @@
 		return r;
 	}
 
+	r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
+	if (r) {
+		dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
+		return r;
+	}
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
@@ -1584,6 +1592,7 @@
 	adev->shutdown = true;
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
+	amdgpu_ctx_fini(&adev->kernel_ctx);
 	amdgpu_ib_pool_fini(adev);
 	amdgpu_fence_driver_fini(adev);
 	amdgpu_fbdev_fini(adev);
@@ -1627,8 +1636,7 @@
 	struct amdgpu_device *adev;
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
-	int i, r;
-	bool force_completion = false;
+	int r;
 
 	if (dev == NULL || dev->dev_private == NULL) {
 		return -ENODEV;
@@ -1667,21 +1675,7 @@
 	/* evict vram memory */
 	amdgpu_bo_evict_vram(adev);
 
-	/* wait for gpu to finish processing current batch */
-	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-		struct amdgpu_ring *ring = adev->rings[i];
-		if (!ring)
-			continue;
-
-		r = amdgpu_fence_wait_empty(ring);
-		if (r) {
-			/* delay GPU reset to resume */
-			force_completion = true;
-		}
-	}
-	if (force_completion) {
-		amdgpu_fence_driver_force_completion(adev);
-	}
+	amdgpu_fence_driver_suspend(adev);
 
 	r = amdgpu_suspend(adev);
 
@@ -1739,6 +1733,8 @@
 
 	r = amdgpu_resume(adev);
 
+	amdgpu_fence_driver_resume(adev);
+
 	r = amdgpu_ib_ring_tests(adev);
 	if (r)
 		DRM_ERROR("ib ring test failed (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b16b925..e3d7077 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -35,6 +35,36 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
+static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
+				   struct fence **f)
+{
+	struct amdgpu_fence *fence;
+	long r;
+
+	if (*f == NULL)
+		return;
+
+	fence = to_amdgpu_fence(*f);
+	if (fence) {
+		r = fence_wait(&fence->base, false);
+		if (r == -EDEADLK) {
+			up_read(&adev->exclusive_lock);
+			r = amdgpu_gpu_reset(adev);
+			down_read(&adev->exclusive_lock);
+		}
+	} else
+		r = fence_wait(*f, false);
+
+	if (r)
+		DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r);
+
+	/* We continue with the page flip even if we failed to wait on
+	 * the fence, otherwise the DRM core and userspace will be
+	 * confused about which BO the CRTC is scanning out
+	 */
+	fence_put(*f);
+	*f = NULL;
+}
 
 static void amdgpu_flip_work_func(struct work_struct *__work)
 {
@@ -44,34 +74,13 @@
 	struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id];
 
 	struct drm_crtc *crtc = &amdgpuCrtc->base;
-	struct amdgpu_fence *fence;
 	unsigned long flags;
-	int r;
+	unsigned i;
 
 	down_read(&adev->exclusive_lock);
-	if (work->fence) {
-		fence = to_amdgpu_fence(work->fence);
-		if (fence) {
-			r = amdgpu_fence_wait(fence, false);
-			if (r == -EDEADLK) {
-				up_read(&adev->exclusive_lock);
-				r = amdgpu_gpu_reset(adev);
-				down_read(&adev->exclusive_lock);
-			}
-		} else
-			r = fence_wait(work->fence, false);
-
-		if (r)
-			DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
-
-		/* We continue with the page flip even if we failed to wait on
-		 * the fence, otherwise the DRM core and userspace will be
-		 * confused about which BO the CRTC is scanning out
-		 */
-
-		fence_put(work->fence);
-		work->fence = NULL;
-	}
+	amdgpu_flip_wait_fence(adev, &work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		amdgpu_flip_wait_fence(adev, &work->shared[i]);
 
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -108,6 +117,7 @@
 		DRM_ERROR("failed to reserve buffer after flip\n");
 
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	kfree(work->shared);
 	kfree(work);
 }
 
@@ -127,7 +137,7 @@
 	unsigned long flags;
 	u64 tiling_flags;
 	u64 base;
-	int r;
+	int i, r;
 
 	work = kzalloc(sizeof *work, GFP_KERNEL);
 	if (work == NULL)
@@ -167,7 +177,19 @@
 		goto cleanup;
 	}
 
-	work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+	r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
+					      &work->shared_count,
+					      &work->shared);
+	if (unlikely(r != 0)) {
+		amdgpu_bo_unreserve(new_rbo);
+		DRM_ERROR("failed to get fences for buffer\n");
+		goto cleanup;
+	}
+
+	fence_get(work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		fence_get(work->shared[i]);
+
 	amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
 	amdgpu_bo_unreserve(new_rbo);
 
@@ -212,7 +234,10 @@
 
 cleanup:
 	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-	fence_put(work->fence);
+	fence_put(work->excl);
+	for (i = 0; i < work->shared_count; ++i)
+		fence_put(work->shared[i]);
+	kfree(work->shared);
 	kfree(work);
 
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 56da962..0fcc0bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -44,12 +44,15 @@
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * KMS wrapper.
  * - 3.0.0 - initial driver
+ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
  */
 #define KMS_DRIVER_MAJOR	3
-#define KMS_DRIVER_MINOR	0
+#define KMS_DRIVER_MINOR	1
 #define KMS_DRIVER_PATCHLEVEL	0
 
 int amdgpu_vram_limit = 0;
@@ -61,7 +64,7 @@
 int amdgpu_hw_i2c = 0;
 int amdgpu_pcie_gen2 = -1;
 int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 10000;
+int amdgpu_lockup_timeout = 0;
 int amdgpu_dpm = -1;
 int amdgpu_smc_load_fw = 1;
 int amdgpu_aspm = -1;
@@ -73,6 +76,9 @@
 int amdgpu_vm_size = 8;
 int amdgpu_vm_block_size = -1;
 int amdgpu_exp_hw_support = 0;
+int amdgpu_enable_scheduler = 0;
+int amdgpu_sched_jobs = 16;
+int amdgpu_sched_hw_submission = 2;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -101,7 +107,7 @@
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(msi, amdgpu_msi, int, 0444);
 
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -137,36 +143,45 @@
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
+MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
+module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
+
+MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
+module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+
+MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
+module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	/* Kaveri */
-	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
-	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMDGPU_IS_APU},
+	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
+	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
 	/* Bonaire */
-	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
-	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMDGPU_IS_MOBILITY},
+	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
+	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
 	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
 	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
 	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
@@ -188,39 +203,39 @@
 	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
 	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
 	/* Kabini */
-	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
-	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMDGPU_IS_APU},
+	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
+	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
 	/* mullins */
-	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
-	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMDGPU_IS_MOBILITY|AMDGPU_IS_APU},
+	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
 	/* topaz */
 	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -238,12 +253,14 @@
 	{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+	/* fiji */
+	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
 	/* carrizo */
-	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
-	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMDGPU_IS_APU},
+	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
+	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
 
 	{0, 0, 0}
 };
@@ -279,7 +296,7 @@
 	unsigned long flags = ent->driver_data;
 	int ret;
 
-	if ((flags & AMDGPU_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
+	if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
 		DRM_INFO("This hardware requires experimental hardware support.\n"
 			 "See modparam exp_hw_support\n");
 		return -ENODEV;
@@ -527,12 +544,15 @@
 	driver->num_ioctls = amdgpu_max_kms_ioctl;
 	amdgpu_register_atpx_handler();
 
+	amdgpu_amdkfd_init();
+
 	/* let modprobe override vga console setting */
 	return drm_pci_init(driver, pdriver);
 }
 
 static void __exit amdgpu_exit(void)
 {
+	amdgpu_amdkfd_fini();
 	drm_pci_exit(driver, pdriver);
 	amdgpu_unregister_atpx_handler();
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index cceeb33..e3a4f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -31,7 +31,7 @@
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
 
-#include "amdgpu_family.h"
+#include "amd_shared.h"
 
 /* General customization:
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
deleted file mode 100644
index 0698764..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_family.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
- */
-
-/* this file defines the CHIP_  and family flags used in the pciids,
- * its is common between kms and non-kms because duplicating it and
- * changing one place is fail.
- */
-#ifndef AMDGPU_FAMILY_H
-#define AMDGPU_FAMILY_H
-/*
- * Supported ASIC types
- */
-enum amdgpu_asic_type {
-	CHIP_BONAIRE = 0,
-	CHIP_KAVERI,
-	CHIP_KABINI,
-	CHIP_HAWAII,
-	CHIP_MULLINS,
-	CHIP_TOPAZ,
-	CHIP_TONGA,
-	CHIP_CARRIZO,
-	CHIP_LAST,
-};
-
-/*
- * Chip flags
- */
-enum amdgpu_chip_flags {
-	AMDGPU_ASIC_MASK = 0x0000ffffUL,
-	AMDGPU_FLAGS_MASK  = 0xffff0000UL,
-	AMDGPU_IS_MOBILITY = 0x00010000UL,
-	AMDGPU_IS_APU      = 0x00020000UL,
-	AMDGPU_IS_PX       = 0x00040000UL,
-	AMDGPU_EXP_HW_SUPPORT = 0x00080000UL,
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c1645d2..8a122b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -53,9 +53,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -126,8 +126,8 @@
 	aligned_size = ALIGN(size, PAGE_SIZE);
 	ret = amdgpu_gem_object_create(adev, aligned_size, 0,
 				       AMDGPU_GEM_DOMAIN_VRAM,
-				       0, true,
-				       &gobj);
+				       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				       true, &gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
 		       aligned_size);
@@ -179,7 +179,6 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct amdgpu_bo *rbo = NULL;
-	struct device *device = &adev->pdev->dev;
 	int ret;
 	unsigned long tmp;
 
@@ -201,9 +200,9 @@
 	rbo = gem_to_amdgpu_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -212,14 +211,13 @@
 	ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	fb = &rfbdev->rfb.base;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
-	rfbdev->helper.fbdev = info;
 
 	memset_io(rbo->kptr, 0x0, amdgpu_bo_size(rbo));
 
@@ -239,11 +237,6 @@
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = adev->mc.aper_size;
 
@@ -251,13 +244,7 @@
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
@@ -269,6 +256,8 @@
 	vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unref:
 	if (rbo) {
 
@@ -290,17 +279,10 @@
 
 static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
 {
-	struct fb_info *info;
 	struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
 
-	if (rfbdev->helper.fbdev) {
-		info = rfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+	drm_fb_helper_release_fbi(&rfbdev->helper);
 
 	if (rfb->obj) {
 		amdgpufb_destroy_pinned_object(rfb->obj);
@@ -395,7 +377,8 @@
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
 {
 	if (adev->mode_info.rfbdev)
-		fb_set_suspend(adev->mode_info.rfbdev->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
+			state);
 }
 
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index a7189a1..1be2bd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -126,7 +126,8 @@
 	(*fence)->ring = ring;
 	(*fence)->owner = owner;
 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&adev->fence_queue.lock, adev->fence_context + ring->idx,
+		&ring->fence_drv.fence_queue.lock,
+		adev->fence_context + ring->idx,
 		(*fence)->seq);
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       (*fence)->seq,
@@ -136,38 +137,6 @@
 }
 
 /**
- * amdgpu_fence_recreate - recreate a fence from an user fence
- *
- * @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @seq: user fence sequence number
- * @fence: resulting amdgpu fence object
- *
- * Recreates a fence command from the user fence sequence number (all asics).
- * Returns 0 on success, -ENOMEM on failure.
- */
-int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
-			  uint64_t seq, struct amdgpu_fence **fence)
-{
-	struct amdgpu_device *adev = ring->adev;
-
-	if (seq > ring->fence_drv.sync_seq[ring->idx])
-		return -EINVAL;
-
-	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
-	if ((*fence) == NULL)
-		return -ENOMEM;
-
-	(*fence)->seq = seq;
-	(*fence)->ring = ring;
-	(*fence)->owner = owner;
-	fence_init(&(*fence)->base, &amdgpu_fence_ops,
-		&adev->fence_queue.lock, adev->fence_context + ring->idx,
-		(*fence)->seq);
-	return 0;
-}
-
-/**
  * amdgpu_fence_check_signaled - callback from fence_queue
  *
  * this function is called with fence_queue lock held, which is also used
@@ -196,9 +165,7 @@
 		else
 			FENCE_TRACE(&fence->base, "was already signaled\n");
 
-		amdgpu_irq_put(adev, fence->ring->fence_drv.irq_src,
-				fence->ring->fence_drv.irq_type);
-		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
+		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
 		fence_put(&fence->base);
 	} else
 		FENCE_TRACE(&fence->base, "pending\n");
@@ -299,14 +266,9 @@
 		return;
 	}
 
-	if (fence_drv->delayed_irq && ring->adev->ddev->irq_enabled) {
-		fence_drv->delayed_irq = false;
-		amdgpu_irq_update(ring->adev, fence_drv->irq_src,
-				fence_drv->irq_type);
+	if (amdgpu_fence_activity(ring)) {
+		wake_up_all(&ring->fence_drv.fence_queue);
 	}
-
-	if (amdgpu_fence_activity(ring))
-		wake_up_all(&ring->adev->fence_queue);
 	else if (amdgpu_ring_is_lockup(ring)) {
 		/* good news we believe it's a lockup */
 		dev_warn(ring->adev->dev, "GPU lockup (current fence id "
@@ -316,7 +278,7 @@
 
 		/* remember that we need an reset */
 		ring->adev->needs_reset = true;
-		wake_up_all(&ring->adev->fence_queue);
+		wake_up_all(&ring->fence_drv.fence_queue);
 	}
 	up_read(&ring->adev->exclusive_lock);
 }
@@ -332,62 +294,8 @@
  */
 void amdgpu_fence_process(struct amdgpu_ring *ring)
 {
-	uint64_t seq, last_seq, last_emitted;
-	unsigned count_loop = 0;
-	bool wake = false;
-
-	/* Note there is a scenario here for an infinite loop but it's
-	 * very unlikely to happen. For it to happen, the current polling
-	 * process need to be interrupted by another process and another
-	 * process needs to update the last_seq btw the atomic read and
-	 * xchg of the current process.
-	 *
-	 * More over for this to go in infinite loop there need to be
-	 * continuously new fence signaled ie amdgpu_fence_read needs
-	 * to return a different value each time for both the currently
-	 * polling process and the other process that xchg the last_seq
-	 * btw atomic read and xchg of the current process. And the
-	 * value the other process set as last seq must be higher than
-	 * the seq value we just read. Which means that current process
-	 * need to be interrupted after amdgpu_fence_read and before
-	 * atomic xchg.
-	 *
-	 * To be even more safe we count the number of time we loop and
-	 * we bail after 10 loop just accepting the fact that we might
-	 * have temporarly set the last_seq not to the true real last
-	 * seq but to an older one.
-	 */
-	last_seq = atomic64_read(&ring->fence_drv.last_seq);
-	do {
-		last_emitted = ring->fence_drv.sync_seq[ring->idx];
-		seq = amdgpu_fence_read(ring);
-		seq |= last_seq & 0xffffffff00000000LL;
-		if (seq < last_seq) {
-			seq &= 0xffffffff;
-			seq |= last_emitted & 0xffffffff00000000LL;
-		}
-
-		if (seq <= last_seq || seq > last_emitted) {
-			break;
-		}
-		/* If we loop over we don't want to return without
-		 * checking if a fence is signaled as it means that the
-		 * seq we just read is different from the previous on.
-		 */
-		wake = true;
-		last_seq = seq;
-		if ((count_loop++) > 10) {
-			/* We looped over too many time leave with the
-			 * fact that we might have set an older fence
-			 * seq then the current real last seq as signaled
-			 * by the hw.
-			 */
-			break;
-		}
-	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
-
-	if (wake)
-		wake_up_all(&ring->adev->fence_queue);
+	if (amdgpu_fence_activity(ring))
+		wake_up_all(&ring->fence_drv.fence_queue);
 }
 
 /**
@@ -447,284 +355,49 @@
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_ring *ring = fence->ring;
-	struct amdgpu_device *adev = ring->adev;
 
 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
 		return false;
 
-	if (down_read_trylock(&adev->exclusive_lock)) {
-		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
-			ring->fence_drv.irq_type);
-		if (amdgpu_fence_activity(ring))
-			wake_up_all_locked(&adev->fence_queue);
-
-		/* did fence get signaled after we enabled the sw irq? */
-		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) {
-			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
-				ring->fence_drv.irq_type);
-			up_read(&adev->exclusive_lock);
-			return false;
-		}
-
-		up_read(&adev->exclusive_lock);
-	} else {
-		/* we're probably in a lockup, lets not fiddle too much */
-		if (amdgpu_irq_get_delayed(adev, ring->fence_drv.irq_src,
-			ring->fence_drv.irq_type))
-			ring->fence_drv.delayed_irq = true;
-		amdgpu_fence_schedule_check(ring);
-	}
-
 	fence->fence_wake.flags = 0;
 	fence->fence_wake.private = NULL;
 	fence->fence_wake.func = amdgpu_fence_check_signaled;
-	__add_wait_queue(&adev->fence_queue, &fence->fence_wake);
+	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
 	fence_get(f);
 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 	return true;
 }
 
-/**
- * amdgpu_fence_signaled - check if a fence has signaled
+/*
+ * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
+ * @ring: ring to wait on for the seq number
+ * @seq: seq number wait for
  *
- * @fence: amdgpu fence object
- *
- * Check if the requested fence has signaled (all asics).
- * Returns true if the fence has signaled or false if it has not.
+ * return value:
+ * 0: seq signaled, and gpu not hang
+ * -EDEADL: GPU hang detected
+ * -EINVAL: some paramter is not valid
  */
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
+static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
 {
-	if (!fence)
-		return true;
+	struct amdgpu_device *adev = ring->adev;
+	bool signaled = false;
 
-	if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
-		if (!fence_signal(&fence->base))
-			FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
-		return true;
-	}
+	BUG_ON(!ring);
+	if (seq > ring->fence_drv.sync_seq[ring->idx])
+		return -EINVAL;
 
-	return false;
-}
+	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+		return 0;
 
-/**
- * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
- *
- * @adev: amdgpu device pointer
- * @seq: sequence numbers
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if any has signaled (current value is >= requested value)
- * or false if it has not. Helper function for amdgpu_fence_wait_seq.
- */
-static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
-{
-	unsigned i;
+	wait_event(ring->fence_drv.fence_queue, (
+		   (signaled = amdgpu_fence_seq_signaled(ring, seq))
+		   || adev->needs_reset));
 
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		if (!adev->rings[i] || !seq[i])
-			continue;
-
-		if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
-			return true;
-	}
-
-	return false;
-}
-
-/**
- * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
- *
- * @adev: amdgpu device pointer
- * @target_seq: sequence number(s) we want to wait for
- * @intr: use interruptable sleep
- * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
- *
- * Wait for the requested sequence number(s) to be written by any ring
- * (all asics).  Sequnce number array is indexed by ring id.
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the sequence number.  Helper function
- * for amdgpu_fence_wait_*().
- * Returns remaining time if the sequence number has passed, 0 when
- * the wait timeout, or an error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected.
- */
-static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
-					  u64 *target_seq, bool intr,
-					  long timeout)
-{
-	uint64_t last_seq[AMDGPU_MAX_RINGS];
-	bool signaled;
-	int i;
-	long r;
-
-	if (timeout == 0) {
-		return amdgpu_fence_any_seq_signaled(adev, target_seq);
-	}
-
-	while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
-
-		/* Save current sequence values, used to check for GPU lockups */
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_ring *ring = adev->rings[i];
-
-			if (!ring || !target_seq[i])
-				continue;
-
-			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
-			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
-			amdgpu_irq_get(adev, ring->fence_drv.irq_src,
-				       ring->fence_drv.irq_type);
-		}
-
-		if (intr) {
-			r = wait_event_interruptible_timeout(adev->fence_queue, (
-				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
-				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
-		} else {
-			r = wait_event_timeout(adev->fence_queue, (
-				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
-				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
-		}
-
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_ring *ring = adev->rings[i];
-
-			if (!ring || !target_seq[i])
-				continue;
-
-			amdgpu_irq_put(adev, ring->fence_drv.irq_src,
-				       ring->fence_drv.irq_type);
-			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
-		}
-
-		if (unlikely(r < 0))
-			return r;
-
-		if (unlikely(!signaled)) {
-
-			if (adev->needs_reset)
-				return -EDEADLK;
-
-			/* we were interrupted for some reason and fence
-			 * isn't signaled yet, resume waiting */
-			if (r)
-				continue;
-
-			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-				struct amdgpu_ring *ring = adev->rings[i];
-
-				if (!ring || !target_seq[i])
-					continue;
-
-				if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
-					break;
-			}
-
-			if (i != AMDGPU_MAX_RINGS)
-				continue;
-
-			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-				if (!adev->rings[i] || !target_seq[i])
-					continue;
-
-				if (amdgpu_ring_is_lockup(adev->rings[i]))
-					break;
-			}
-
-			if (i < AMDGPU_MAX_RINGS) {
-				/* good news we believe it's a lockup */
-				dev_warn(adev->dev, "GPU lockup (waiting for "
-					 "0x%016llx last fence id 0x%016llx on"
-					 " ring %d)\n",
-					 target_seq[i], last_seq[i], i);
-
-				/* remember that we need an reset */
-				adev->needs_reset = true;
-				wake_up_all(&adev->fence_queue);
-				return -EDEADLK;
-			}
-
-			if (timeout < MAX_SCHEDULE_TIMEOUT) {
-				timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
-				if (timeout <= 0) {
-					return 0;
-				}
-			}
-		}
-	}
-	return timeout;
-}
-
-/**
- * amdgpu_fence_wait - wait for a fence to signal
- *
- * @fence: amdgpu fence object
- * @intr: use interruptable sleep
- *
- * Wait for the requested fence to signal (all asics).
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the fence.
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
-{
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
-
-	seq[fence->ring->idx] = fence->seq;
-	r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		return r;
-	}
-
-	r = fence_signal(&fence->base);
-	if (!r)
-		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
-	return 0;
-}
-
-/**
- * amdgpu_fence_wait_any - wait for a fence to signal on any ring
- *
- * @adev: amdgpu device pointer
- * @fences: amdgpu fence object(s)
- * @intr: use interruptable sleep
- *
- * Wait for any requested fence to signal (all asics).  Fence
- * array is indexed by ring id.  @intr selects whether to use
- * interruptable (true) or non-interruptable (false) sleep when
- * waiting for the fences. Used by the suballocator.
- * Returns 0 if any fence has passed, error for all other cases.
- */
-int amdgpu_fence_wait_any(struct amdgpu_device *adev,
-			  struct amdgpu_fence **fences,
-			  bool intr)
-{
-	uint64_t seq[AMDGPU_MAX_RINGS];
-	unsigned i, num_rings = 0;
-	long r;
-
-	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		seq[i] = 0;
-
-		if (!fences[i]) {
-			continue;
-		}
-
-		seq[i] = fences[i]->seq;
-		++num_rings;
-	}
-
-	/* nothing to wait for ? */
-	if (num_rings == 0)
-		return -ENOENT;
-
-	r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		return r;
-	}
-	return 0;
+	if (signaled)
+		return 0;
+	else
+		return -EDEADLK;
 }
 
 /**
@@ -739,19 +412,12 @@
  */
 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
 {
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
+	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
 
-	seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
-	if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
-		/* nothing to wait for, last_seq is
-		   already the last emited fence */
+	if (seq >= ring->fence_drv.sync_seq[ring->idx])
 		return -ENOENT;
-	}
-	r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0)
-		return r;
-	return 0;
+
+	return amdgpu_fence_ring_wait_seq(ring, seq);
 }
 
 /**
@@ -766,23 +432,12 @@
  */
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
-	struct amdgpu_device *adev = ring->adev;
-	uint64_t seq[AMDGPU_MAX_RINGS] = {};
-	long r;
+	uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
 
-	seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
-	if (!seq[ring->idx])
+	if (!seq)
 		return 0;
 
-	r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
-	if (r < 0) {
-		if (r == -EDEADLK)
-			return -EDEADLK;
-
-		dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
-			ring->idx, r);
-	}
-	return 0;
+	return amdgpu_fence_ring_wait_seq(ring, seq);
 }
 
 /**
@@ -933,9 +588,12 @@
 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
 	}
 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
-	ring->fence_drv.initialized = true;
+	amdgpu_irq_get(adev, irq_src, irq_type);
+
 	ring->fence_drv.irq_src = irq_src;
 	ring->fence_drv.irq_type = irq_type;
+	ring->fence_drv.initialized = true;
+
 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
 		 "cpu addr 0x%p\n", ring->idx,
 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
@@ -966,6 +624,16 @@
 	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
 			amdgpu_fence_check_lockup);
 	ring->fence_drv.ring = ring;
+
+	if (amdgpu_enable_scheduler) {
+		ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
+						   ring->idx,
+						   amdgpu_sched_hw_submission,
+						   (void *)ring->adev);
+		if (!ring->scheduler)
+			DRM_ERROR("Failed to create scheduler on ring %d.\n",
+				  ring->idx);
+	}
 }
 
 /**
@@ -982,7 +650,6 @@
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
-	init_waitqueue_head(&adev->fence_queue);
 	if (amdgpu_debugfs_fence_init(adev))
 		dev_err(adev->dev, "fence debugfs file creation failed\n");
 
@@ -1011,13 +678,78 @@
 			/* no need to trigger GPU reset as we are unloading */
 			amdgpu_fence_driver_force_completion(adev);
 		}
-		wake_up_all(&adev->fence_queue);
+		wake_up_all(&ring->fence_drv.fence_queue);
+		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+		if (ring->scheduler)
+			amd_sched_destroy(ring->scheduler);
 		ring->fence_drv.initialized = false;
 	}
 	mutex_unlock(&adev->ring_lock);
 }
 
 /**
+ * amdgpu_fence_driver_suspend - suspend the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Suspend the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+{
+	int i, r;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		/* wait for gpu to finish processing current batch */
+		r = amdgpu_fence_wait_empty(ring);
+		if (r) {
+			/* delay GPU reset to resume */
+			amdgpu_fence_driver_force_completion(adev);
+		}
+
+		/* disable the interrupt */
+		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_resume - resume the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Resume the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+{
+	int i;
+
+	mutex_lock(&adev->ring_lock);
+	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+		struct amdgpu_ring *ring = adev->rings[i];
+		if (!ring || !ring->fence_drv.initialized)
+			continue;
+
+		/* enable the interrupt */
+		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+			       ring->fence_drv.irq_type);
+	}
+	mutex_unlock(&adev->ring_lock);
+}
+
+/**
  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
  *
  * @adev: amdgpu device pointer
@@ -1104,6 +836,21 @@
 	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
 }
 
+static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
+{
+	int idx;
+	struct fence *fence;
+
+	for (idx = 0; idx < count; ++idx) {
+		fence = fences[idx];
+		if (fence) {
+			if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+				return true;
+		}
+	}
+	return false;
+}
+
 struct amdgpu_wait_cb {
 	struct fence_cb base;
 	struct task_struct *task;
@@ -1121,12 +868,48 @@
 {
 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
 	struct amdgpu_device *adev = fence->ring->adev;
-	struct amdgpu_wait_cb cb;
 
-	cb.task = current;
+	return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
+}
 
-	if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
-		return t;
+/**
+ * Wait the fence array with timeout
+ *
+ * @adev:     amdgpu device
+ * @array:    the fence array with amdgpu fence pointer
+ * @count:    the number of the fence array
+ * @intr:     when sleep, set the current task interruptable or not
+ * @t:        timeout to wait
+ *
+ * It will return when any fence is signaled or timeout.
+ */
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+				  struct fence **array, uint32_t count,
+				  bool intr, signed long t)
+{
+	struct amdgpu_wait_cb *cb;
+	struct fence *fence;
+	unsigned idx;
+
+	BUG_ON(!array);
+
+	cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
+	if (cb == NULL) {
+		t = -ENOMEM;
+		goto err_free_cb;
+	}
+
+	for (idx = 0; idx < count; ++idx) {
+		fence = array[idx];
+		if (fence) {
+			cb[idx].task = current;
+			if (fence_add_callback(fence,
+					&cb[idx].base, amdgpu_fence_wait_cb)) {
+				/* The fence is already signaled */
+				goto fence_rm_cb;
+			}
+		}
+	}
 
 	while (t > 0) {
 		if (intr)
@@ -1135,10 +918,10 @@
 			set_current_state(TASK_UNINTERRUPTIBLE);
 
 		/*
-		 * amdgpu_test_signaled must be called after
+		 * amdgpu_test_signaled_any must be called after
 		 * set_current_state to prevent a race with wake_up_process
 		 */
-		if (amdgpu_test_signaled(fence))
+		if (amdgpu_test_signaled_any(array, count))
 			break;
 
 		if (adev->needs_reset) {
@@ -1153,7 +936,16 @@
 	}
 
 	__set_current_state(TASK_RUNNING);
-	fence_remove_callback(f, &cb.base);
+
+fence_rm_cb:
+	for (idx = 0; idx < count; ++idx) {
+		fence = array[idx];
+		if (fence && cb[idx].base.func)
+			fence_remove_callback(fence, &cb[idx].base);
+	}
+
+err_free_cb:
+	kfree(cb);
 
 	return t;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index e02db0b..cbd3a48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -125,7 +125,8 @@
 
 	if (adev->gart.robj == NULL) {
 		r = amdgpu_bo_create(adev, adev->gart.table_size,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 				     NULL, &adev->gart.robj);
 		if (r) {
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4afc507..5839fab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -615,6 +615,7 @@
 		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
 		info.domains = robj->initial_domain;
 		info.domain_flags = robj->flags;
+		amdgpu_bo_unreserve(robj);
 		if (copy_to_user(out, &info, sizeof(info)))
 			r = -EFAULT;
 		break;
@@ -622,17 +623,19 @@
 	case AMDGPU_GEM_OP_SET_PLACEMENT:
 		if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
 			r = -EPERM;
+			amdgpu_bo_unreserve(robj);
 			break;
 		}
 		robj->initial_domain = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 						      AMDGPU_GEM_DOMAIN_GTT |
 						      AMDGPU_GEM_DOMAIN_CPU);
+		amdgpu_bo_unreserve(robj);
 		break;
 	default:
+		amdgpu_bo_unreserve(robj);
 		r = -EINVAL;
 	}
 
-	amdgpu_bo_unreserve(robj);
 out:
 	drm_gem_object_unreference_unlocked(gobj);
 	return r;
@@ -653,7 +656,8 @@
 
 	r = amdgpu_gem_object_create(adev, args->size, 0,
 				     AMDGPU_GEM_DOMAIN_VRAM,
-				     0, ttm_bo_type_device,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+				     ttm_bo_type_device,
 				     &gobj);
 	if (r)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bc0fac6..c439735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -73,28 +73,12 @@
 
 		if (!vm)
 			ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
-		else
-			ib->gpu_addr = 0;
-
-	} else {
-		ib->sa_bo = NULL;
-		ib->ptr = NULL;
-		ib->gpu_addr = 0;
 	}
 
 	amdgpu_sync_create(&ib->sync);
 
 	ib->ring = ring;
-	ib->fence = NULL;
-	ib->user = NULL;
 	ib->vm = vm;
-	ib->gds_base = 0;
-	ib->gds_size = 0;
-	ib->gws_base = 0;
-	ib->gws_size = 0;
-	ib->oa_base = 0;
-	ib->oa_size = 0;
-	ib->flags = 0;
 
 	return 0;
 }
@@ -109,8 +93,8 @@
  */
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
 {
-	amdgpu_sync_free(adev, &ib->sync, ib->fence);
-	amdgpu_sa_bo_free(adev, &ib->sa_bo, ib->fence);
+	amdgpu_sync_free(adev, &ib->sync, &ib->fence->base);
+	amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
 	amdgpu_fence_unref(&ib->fence);
 }
 
@@ -156,7 +140,11 @@
 		dev_err(adev->dev, "couldn't schedule ib\n");
 		return -EINVAL;
 	}
-
+	r = amdgpu_sync_wait(&ibs->sync);
+	if (r) {
+		dev_err(adev->dev, "IB sync failed (%d).\n", r);
+		return r;
+	}
 	r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
 	if (r) {
 		dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
@@ -165,9 +153,11 @@
 
 	if (vm) {
 		/* grab a vm id if necessary */
-		struct amdgpu_fence *vm_id_fence = NULL;
-		vm_id_fence = amdgpu_vm_grab_id(ibs->ring, ibs->vm);
-		amdgpu_sync_fence(&ibs->sync, vm_id_fence);
+		r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
+		if (r) {
+			amdgpu_ring_unlock_undo(ring);
+			return r;
+		}
 	}
 
 	r = amdgpu_sync_rings(&ibs->sync, ring);
@@ -212,11 +202,15 @@
 		return r;
 	}
 
+	if (!amdgpu_enable_scheduler && ib->ctx)
+		ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
+						    &ib->fence->base);
+
 	/* wrap the last IB with fence */
 	if (ib->user) {
 		uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
 		addr += ib->user->offset;
-		amdgpu_ring_emit_fence(ring, addr, ib->fence->seq,
+		amdgpu_ring_emit_fence(ring, addr, ib->sequence,
 				       AMDGPU_FENCE_FLAG_64BIT);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index db5422e..5c8a803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -24,6 +24,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_ih.h"
+#include "amdgpu_amdkfd.h"
 
 /**
  * amdgpu_ih_ring_alloc - allocate memory for the IH ring
@@ -97,18 +98,12 @@
 			/* add 8 bytes for the rptr/wptr shadows and
 			 * add them to the end of the ring allocation.
 			 */
-			adev->irq.ih.ring = kzalloc(adev->irq.ih.ring_size + 8, GFP_KERNEL);
+			adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
+								 adev->irq.ih.ring_size + 8,
+								 &adev->irq.ih.rb_dma_addr);
 			if (adev->irq.ih.ring == NULL)
 				return -ENOMEM;
-			adev->irq.ih.rb_dma_addr = pci_map_single(adev->pdev,
-								  (void *)adev->irq.ih.ring,
-								  adev->irq.ih.ring_size,
-								  PCI_DMA_BIDIRECTIONAL);
-			if (pci_dma_mapping_error(adev->pdev, adev->irq.ih.rb_dma_addr)) {
-				dev_err(&adev->pdev->dev, "Failed to DMA MAP the IH RB page\n");
-				kfree((void *)adev->irq.ih.ring);
-				return -ENOMEM;
-			}
+			memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
 			adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
 			adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
 		}
@@ -148,9 +143,9 @@
 			/* add 8 bytes for the rptr/wptr shadows and
 			 * add them to the end of the ring allocation.
 			 */
-			pci_unmap_single(adev->pdev, adev->irq.ih.rb_dma_addr,
-					 adev->irq.ih.ring_size + 8, PCI_DMA_BIDIRECTIONAL);
-			kfree((void *)adev->irq.ih.ring);
+			pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
+					    (void *)adev->irq.ih.ring,
+					    adev->irq.ih.rb_dma_addr);
 			adev->irq.ih.ring = NULL;
 		}
 	} else {
@@ -199,6 +194,14 @@
 	rmb();
 
 	while (adev->irq.ih.rptr != wptr) {
+		u32 ring_index = adev->irq.ih.rptr >> 2;
+
+		/* Before dispatching irq to IP blocks, send it to amdkfd */
+		amdgpu_amdkfd_interrupt(adev,
+				(const void *) &adev->irq.ih.ring[ring_index]);
+
+		entry.iv_entry = (const uint32_t *)
+			&adev->irq.ih.ring[ring_index];
 		amdgpu_ih_decode_iv(adev, &entry);
 		adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index c62b09e..ba38ae6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -52,6 +52,7 @@
 	unsigned ring_id;
 	unsigned vm_id;
 	unsigned pas_id;
+	const uint32_t *iv_entry;
 };
 
 int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index b4d36f0..0aba8e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -272,6 +272,11 @@
 
 		kfree(src->enabled_types);
 		src->enabled_types = NULL;
+		if (src->data) {
+			kfree(src->data);
+			kfree(src);
+			adev->irq.sources[i] = NULL;
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 8299795..17b01aef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -40,6 +40,7 @@
 	unsigned				num_types;
 	atomic_t				*enabled_types;
 	const struct amdgpu_irq_src_funcs	*funcs;
+	void *data;
 };
 
 /* provided by interrupt generating IP blocks */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 3bfe67d..2236793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -34,6 +34,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include "amdgpu_amdkfd.h"
 
 #if defined(CONFIG_VGA_SWITCHEROO)
 bool amdgpu_has_atpx(void);
@@ -61,6 +62,8 @@
 
 	pm_runtime_get_sync(dev->dev);
 
+	amdgpu_amdkfd_device_fini(adev);
+
 	amdgpu_acpi_fini(adev);
 
 	amdgpu_device_fini(adev);
@@ -93,8 +96,8 @@
 
 	if ((amdgpu_runtime_pm != 0) &&
 	    amdgpu_has_atpx() &&
-	    ((flags & AMDGPU_IS_APU) == 0))
-		flags |= AMDGPU_IS_PX;
+	    ((flags & AMD_IS_APU) == 0))
+		flags |= AMD_IS_PX;
 
 	/* amdgpu_device_init should report only fatal error
 	 * like memory allocation failure or iomapping failure,
@@ -118,6 +121,10 @@
 				"Error during ACPI methods call\n");
 	}
 
+	amdgpu_amdkfd_load_interface(adev);
+	amdgpu_amdkfd_device_probe(adev);
+	amdgpu_amdkfd_device_init(adev);
+
 	if (amdgpu_device_is_px(dev)) {
 		pm_runtime_use_autosuspend(dev->dev);
 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -444,11 +451,11 @@
 		dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
 		dev_info._pad = 0;
 		dev_info.ids_flags = 0;
-		if (adev->flags & AMDGPU_IS_APU)
+		if (adev->flags & AMD_IS_APU)
 			dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
 		dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
 		dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
-		dev_info.virtual_address_alignment = max(PAGE_SIZE, 0x10000UL);
+		dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
 		dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
 					     AMDGPU_GPU_PAGE_SIZE;
 		dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
@@ -520,10 +527,7 @@
 	mutex_init(&fpriv->bo_list_lock);
 	idr_init(&fpriv->bo_list_handles);
 
-	/* init context manager */
-	mutex_init(&fpriv->ctx_mgr.lock);
-	idr_init(&fpriv->ctx_mgr.ctx_handles);
-	fpriv->ctx_mgr.adev = adev;
+	amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
 
 	file_priv->driver_priv = fpriv;
 
@@ -556,6 +560,8 @@
 	if (!fpriv)
 		return;
 
+	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+
 	amdgpu_vm_fini(adev, &fpriv->vm);
 
 	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -564,9 +570,6 @@
 	idr_destroy(&fpriv->bo_list_handles);
 	mutex_destroy(&fpriv->bo_list_lock);
 
-	/* release context */
-	amdgpu_ctx_fini(fpriv);
-
 	kfree(fpriv);
 	file_priv->driver_priv = NULL;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8da6424..08b09d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -127,7 +127,7 @@
 			placements[c].fpfn =
 				adev->mc.visible_vram_size >> PAGE_SHIFT;
 			placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-				TTM_PL_FLAG_VRAM;
+				TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
 		}
 		placements[c].fpfn = 0;
 		placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
@@ -223,18 +223,6 @@
 	size_t acc_size;
 	int r;
 
-	/* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
-	 * do this as a temporary workaround
-	 */
-	if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
-		if (adev->asic_type >= CHIP_TOPAZ) {
-			if (byte_align & 0x7fff)
-				byte_align = ALIGN(byte_align, 0x8000);
-			if (size & 0x7fff)
-				size = ALIGN(size, 0x8000);
-		}
-	}
-
 	page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 	size = ALIGN(size, PAGE_SIZE);
 
@@ -462,7 +450,7 @@
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-	if (0 && (adev->flags & AMDGPU_IS_APU)) {
+	if (0 && (adev->flags & AMD_IS_APU)) {
 		/* Useless to evict on IGP chips */
 		return 0;
 	}
@@ -478,7 +466,6 @@
 	}
 	dev_err(adev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
-		mutex_lock(&adev->ddev->struct_mutex);
 		dev_err(adev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +473,7 @@
 		list_del_init(&bo->list);
 		mutex_unlock(&bo->adev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&adev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
@@ -658,13 +644,13 @@
  * @shared: true if fence should be added shared
  *
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared)
 {
 	struct reservation_object *resv = bo->tbo.resv;
 
 	if (shared)
-		reservation_object_add_shared_fence(resv, &fence->base);
+		reservation_object_add_shared_fence(resv, fence);
 	else
-		reservation_object_add_excl_fence(resv, &fence->base);
+		reservation_object_add_excl_fence(resv, fence);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 675bdc3..6ea18dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -161,7 +161,7 @@
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 				  struct ttm_mem_reg *new_mem);
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 		     bool shared);
 
 /*
@@ -193,7 +193,7 @@
 			    unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 			      struct amdgpu_sa_bo **sa_bo,
-			      struct amdgpu_fence *fence);
+			      struct fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 					 struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ed13baa..efed115 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -82,7 +82,7 @@
 	mutex_unlock(&adev->pm.mutex);
 
 	/* Can't set dpm state when the card is off */
-	if (!(adev->flags & AMDGPU_IS_PX) ||
+	if (!(adev->flags & AMD_IS_PX) ||
 	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
 		amdgpu_pm_compute_clocks(adev);
 fail:
@@ -538,7 +538,7 @@
 		/* vce just modifies an existing state so force a change */
 		if (ps->vce_active != adev->pm.dpm.vce_active)
 			goto force;
-		if (adev->flags & AMDGPU_IS_APU) {
+		if (adev->flags & AMD_IS_APU) {
 			/* for APUs if the num crtcs changed but state is the same,
 			 * all we need to do is update the display configuration.
 			 */
@@ -580,7 +580,6 @@
 		amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&adev->ddev->struct_mutex);
 	mutex_lock(&adev->ring_lock);
 
 	/* update whether vce is active */
@@ -628,7 +627,6 @@
 
 done:
 	mutex_unlock(&adev->ring_lock);
-	mutex_unlock(&adev->ddev->struct_mutex);
 }
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 855e219..9bec914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -131,6 +131,21 @@
 	return 0;
 }
 
+/** amdgpu_ring_insert_nop - insert NOP packets
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @count: the number of NOP packets to insert
+ *
+ * This is the generic insert_nop function for rings except SDMA
+ */
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * amdgpu_ring_commit - tell the GPU to execute the new
  * commands on the ring buffer
@@ -143,10 +158,13 @@
  */
 void amdgpu_ring_commit(struct amdgpu_ring *ring)
 {
+	uint32_t count;
+
 	/* We pad to match fetch size */
-	while (ring->wptr & ring->align_mask) {
-		amdgpu_ring_write(ring, ring->nop);
-	}
+	count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
+	count %= ring->align_mask + 1;
+	ring->funcs->insert_nop(ring, count);
+
 	mb();
 	amdgpu_ring_set_wptr(ring);
 }
@@ -342,6 +360,8 @@
 		amdgpu_fence_driver_init_ring(ring);
 	}
 
+	init_waitqueue_head(&ring->fence_drv.fence_queue);
+
 	r = amdgpu_wb_get(adev, &ring->rptr_offs);
 	if (r) {
 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -367,7 +387,7 @@
 	}
 	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
 	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
+	spin_lock_init(&ring->fence_lock);
 	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
 	if (r) {
 		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index eb20987..74dad27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -139,6 +139,20 @@
 	return r;
 }
 
+static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
+{
+	struct amdgpu_fence *a_fence;
+	struct amd_sched_fence *s_fence;
+
+	s_fence = to_amd_sched_fence(f);
+	if (s_fence)
+		return s_fence->scheduler->ring_id;
+	a_fence = to_amdgpu_fence(f);
+	if (a_fence)
+		return a_fence->ring->idx;
+	return 0;
+}
+
 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
 {
 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
@@ -147,7 +161,7 @@
 	}
 	list_del_init(&sa_bo->olist);
 	list_del_init(&sa_bo->flist);
-	amdgpu_fence_unref(&sa_bo->fence);
+	fence_put(sa_bo->fence);
 	kfree(sa_bo);
 }
 
@@ -160,7 +174,8 @@
 
 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
-		if (sa_bo->fence == NULL || !amdgpu_fence_signaled(sa_bo->fence)) {
+		if (sa_bo->fence == NULL ||
+		    !fence_is_signaled(sa_bo->fence)) {
 			return;
 		}
 		amdgpu_sa_bo_remove_locked(sa_bo);
@@ -245,7 +260,7 @@
 }
 
 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
-				   struct amdgpu_fence **fences,
+				   struct fence **fences,
 				   unsigned *tries)
 {
 	struct amdgpu_sa_bo *best_bo = NULL;
@@ -274,7 +289,7 @@
 		sa_bo = list_first_entry(&sa_manager->flist[i],
 					 struct amdgpu_sa_bo, flist);
 
-		if (!amdgpu_fence_signaled(sa_bo->fence)) {
+		if (!fence_is_signaled(sa_bo->fence)) {
 			fences[i] = sa_bo->fence;
 			continue;
 		}
@@ -298,7 +313,8 @@
 	}
 
 	if (best_bo) {
-		++tries[best_bo->fence->ring->idx];
+		uint32_t idx = amdgpu_sa_get_ring_from_fence(best_bo->fence);
+		++tries[idx];
 		sa_manager->hole = best_bo->olist.prev;
 
 		/* we knew that this one is signaled,
@@ -314,9 +330,10 @@
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 {
-	struct amdgpu_fence *fences[AMDGPU_MAX_RINGS];
+	struct fence *fences[AMDGPU_MAX_RINGS];
 	unsigned tries[AMDGPU_MAX_RINGS];
 	int i, r;
+	signed long t;
 
 	BUG_ON(align > sa_manager->align);
 	BUG_ON(size > sa_manager->size);
@@ -350,7 +367,9 @@
 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
 
 		spin_unlock(&sa_manager->wq.lock);
-		r = amdgpu_fence_wait_any(adev, fences, false);
+		t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+					  false, MAX_SCHEDULE_TIMEOUT);
+		r = (t > 0) ? 0 : t;
 		spin_lock(&sa_manager->wq.lock);
 		/* if we have nothing to wait for block */
 		if (r == -ENOENT) {
@@ -369,7 +388,7 @@
 }
 
 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
-		       struct amdgpu_fence *fence)
+		       struct fence *fence)
 {
 	struct amdgpu_sa_manager *sa_manager;
 
@@ -379,10 +398,11 @@
 
 	sa_manager = (*sa_bo)->manager;
 	spin_lock(&sa_manager->wq.lock);
-	if (fence && !amdgpu_fence_signaled(fence)) {
-		(*sa_bo)->fence = amdgpu_fence_ref(fence);
-		list_add_tail(&(*sa_bo)->flist,
-			      &sa_manager->flist[fence->ring->idx]);
+	if (fence && !fence_is_signaled(fence)) {
+		uint32_t idx;
+		(*sa_bo)->fence = fence_get(fence);
+		idx = amdgpu_sa_get_ring_from_fence(fence);
+		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
 	} else {
 		amdgpu_sa_bo_remove_locked(*sa_bo);
 	}
@@ -409,8 +429,16 @@
 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
 			   soffset, eoffset, eoffset - soffset);
 		if (i->fence) {
-			seq_printf(m, " protected by 0x%016llx on ring %d",
-				   i->fence->seq, i->fence->ring->idx);
+			struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
+			struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
+			if (a_fence)
+				seq_printf(m, " protected by 0x%016llx on ring %d",
+					   a_fence->seq, a_fence->ring->idx);
+			if (s_fence)
+				seq_printf(m, " protected by 0x%016x on ring %d",
+					   s_fence->base.seqno,
+					   s_fence->scheduler->ring_id);
+
 		}
 		seq_printf(m, "\n");
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 0000000..de98fbd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
+	return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+}
+
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job;
+	struct amdgpu_fence *fence;
+	int r;
+
+	if (!job) {
+		DRM_ERROR("job is null\n");
+		return NULL;
+	}
+	sched_job = (struct amdgpu_job *)job;
+	mutex_lock(&sched_job->job_lock);
+	r = amdgpu_ib_schedule(sched_job->adev,
+			       sched_job->num_ibs,
+			       sched_job->ibs,
+			       sched_job->base.owner);
+	if (r)
+		goto err;
+	fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
+
+	if (sched_job->free_job)
+		sched_job->free_job(sched_job);
+
+	mutex_unlock(&sched_job->job_lock);
+	return &fence->base;
+
+err:
+	DRM_ERROR("Run job error\n");
+	mutex_unlock(&sched_job->job_lock);
+	job->sched->ops->process_job(job);
+	return NULL;
+}
+
+static void amdgpu_sched_process_job(struct amd_sched_job *job)
+{
+	struct amdgpu_job *sched_job;
+
+	if (!job) {
+		DRM_ERROR("job is null\n");
+		return;
+	}
+	sched_job = (struct amdgpu_job *)job;
+	/* after processing job, free memory */
+	fence_put(&sched_job->base.s_fence->base);
+	kfree(sched_job);
+}
+
+struct amd_sched_backend_ops amdgpu_sched_ops = {
+	.dependency = amdgpu_sched_dependency,
+	.run_job = amdgpu_sched_run_job,
+	.process_job = amdgpu_sched_process_job
+};
+
+int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+					 struct amdgpu_ring *ring,
+					 struct amdgpu_ib *ibs,
+					 unsigned num_ibs,
+					 int (*free_job)(struct amdgpu_job *),
+					 void *owner,
+					 struct fence **f)
+{
+	int r = 0;
+	if (amdgpu_enable_scheduler) {
+		struct amdgpu_job *job =
+			kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+		if (!job)
+			return -ENOMEM;
+		job->base.sched = ring->scheduler;
+		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+		job->adev = adev;
+		job->ibs = ibs;
+		job->num_ibs = num_ibs;
+		job->base.owner = owner;
+		mutex_init(&job->job_lock);
+		job->free_job = free_job;
+		mutex_lock(&job->job_lock);
+		r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+		if (r) {
+			mutex_unlock(&job->job_lock);
+			kfree(job);
+			return r;
+		}
+		*f = fence_get(&job->base.s_fence->base);
+		mutex_unlock(&job->job_lock);
+	} else {
+		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
+		if (r)
+			return r;
+		*f = fence_get(&ibs[num_ibs - 1].fence->base);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index d6d41a4..ff3ca52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -87,7 +87,7 @@
 
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
 			   struct amdgpu_semaphore **semaphore,
-			   struct amdgpu_fence *fence)
+			   struct fence *fence)
 {
 	if (semaphore == NULL || *semaphore == NULL) {
 		return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 21accbd..068aeaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -32,6 +32,11 @@
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
+struct amdgpu_sync_entry {
+	struct hlist_node	node;
+	struct fence		*fence;
+};
+
 /**
  * amdgpu_sync_create - zero init sync object
  *
@@ -49,36 +54,104 @@
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		sync->sync_to[i] = NULL;
 
+	hash_init(sync->fences);
 	sync->last_vm_update = NULL;
 }
 
+static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+	if (a_fence)
+		return a_fence->ring->adev == adev;
+	if (s_fence)
+		return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+	return false;
+}
+
+static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+	if (s_fence)
+		return s_fence->owner == owner;
+	if (a_fence)
+		return a_fence->owner == owner;
+	return false;
+}
+
 /**
- * amdgpu_sync_fence - use the semaphore to sync to a fence
+ * amdgpu_sync_fence - remember to sync to this fence
  *
  * @sync: sync object to add fence to
  * @fence: fence to sync to
  *
- * Sync to the fence using the semaphore objects
  */
-void amdgpu_sync_fence(struct amdgpu_sync *sync,
-		       struct amdgpu_fence *fence)
+int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+		      struct fence *f)
 {
+	struct amdgpu_sync_entry *e;
+	struct amdgpu_fence *fence;
 	struct amdgpu_fence *other;
+	struct fence *tmp, *later;
 
-	if (!fence)
-		return;
+	if (!f)
+		return 0;
+
+	if (amdgpu_sync_same_dev(adev, f) &&
+	    amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM)) {
+		if (sync->last_vm_update) {
+			tmp = sync->last_vm_update;
+			BUG_ON(f->context != tmp->context);
+			later = (f->seqno - tmp->seqno <= INT_MAX) ? f : tmp;
+			sync->last_vm_update = fence_get(later);
+			fence_put(tmp);
+		} else
+			sync->last_vm_update = fence_get(f);
+	}
+
+	fence = to_amdgpu_fence(f);
+	if (!fence || fence->ring->adev != adev) {
+		hash_for_each_possible(sync->fences, e, node, f->context) {
+			struct fence *new;
+			if (unlikely(e->fence->context != f->context))
+				continue;
+			new = fence_get(fence_later(e->fence, f));
+			if (new) {
+				fence_put(e->fence);
+				e->fence = new;
+			}
+			return 0;
+		}
+
+		e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+		if (!e)
+			return -ENOMEM;
+
+		hash_add(sync->fences, &e->node, f->context);
+		e->fence = fence_get(f);
+		return 0;
+	}
 
 	other = sync->sync_to[fence->ring->idx];
 	sync->sync_to[fence->ring->idx] = amdgpu_fence_ref(
 		amdgpu_fence_later(fence, other));
 	amdgpu_fence_unref(&other);
 
-	if (fence->owner == AMDGPU_FENCE_OWNER_VM) {
-		other = sync->last_vm_update;
-		sync->last_vm_update = amdgpu_fence_ref(
-			amdgpu_fence_later(fence, other));
-		amdgpu_fence_unref(&other);
-	}
+	return 0;
+}
+
+static void *amdgpu_sync_get_owner(struct fence *f)
+{
+	struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
+	struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+	if (s_fence)
+		return s_fence->owner;
+	else if (a_fence)
+		return a_fence->owner;
+	return AMDGPU_FENCE_OWNER_UNDEFINED;
 }
 
 /**
@@ -97,7 +170,7 @@
 {
 	struct reservation_object_list *flist;
 	struct fence *f;
-	struct amdgpu_fence *fence;
+	void *fence_owner;
 	unsigned i;
 	int r = 0;
 
@@ -106,11 +179,7 @@
 
 	/* always sync to the exclusive fence */
 	f = reservation_object_get_excl(resv);
-	fence = f ? to_amdgpu_fence(f) : NULL;
-	if (fence && fence->ring->adev == adev)
-		amdgpu_sync_fence(sync, fence);
-	else if (f)
-		r = fence_wait(f, true);
+	r = amdgpu_sync_fence(adev, sync, f);
 
 	flist = reservation_object_get_list(resv);
 	if (!flist || r)
@@ -119,20 +188,72 @@
 	for (i = 0; i < flist->shared_count; ++i) {
 		f = rcu_dereference_protected(flist->shared[i],
 					      reservation_object_held(resv));
-		fence = f ? to_amdgpu_fence(f) : NULL;
-		if (fence && fence->ring->adev == adev) {
-			if (fence->owner != owner ||
-			    fence->owner == AMDGPU_FENCE_OWNER_UNDEFINED)
-				amdgpu_sync_fence(sync, fence);
-		} else if (f) {
-			r = fence_wait(f, true);
-			if (r)
-				break;
+		if (amdgpu_sync_same_dev(adev, f)) {
+			/* VM updates are only interesting
+			 * for other VM updates and moves.
+			 */
+			fence_owner = amdgpu_sync_get_owner(f);
+			if ((owner != AMDGPU_FENCE_OWNER_MOVE) &&
+			    (fence_owner != AMDGPU_FENCE_OWNER_MOVE) &&
+			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
+			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
+				continue;
+
+			/* Ignore fence from the same owner as
+			 * long as it isn't undefined.
+			 */
+			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+			    fence_owner == owner)
+				continue;
 		}
+
+		r = amdgpu_sync_fence(adev, sync, f);
+		if (r)
+			break;
 	}
 	return r;
 }
 
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	struct fence *f;
+	int i;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+
+		f = e->fence;
+
+		hash_del(&e->node);
+		kfree(e);
+
+		if (!fence_is_signaled(f))
+			return f;
+
+		fence_put(f);
+	}
+	return NULL;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync)
+{
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
+	int i, r;
+
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		r = fence_wait(e->fence, false);
+		if (r)
+			return r;
+
+		hash_del(&e->node);
+		fence_put(e->fence);
+		kfree(e);
+	}
+	return 0;
+}
+
 /**
  * amdgpu_sync_rings - sync ring to all registered fences
  *
@@ -164,9 +285,9 @@
 			return -EINVAL;
 		}
 
-		if (count >= AMDGPU_NUM_SYNCS) {
+		if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
 			/* not enough room, wait manually */
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -186,7 +307,7 @@
 		if (!amdgpu_semaphore_emit_signal(other, semaphore)) {
 			/* signaling wasn't successful wait manually */
 			amdgpu_ring_undo(other);
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -196,7 +317,7 @@
 		if (!amdgpu_semaphore_emit_wait(ring, semaphore)) {
 			/* waiting wasn't successful wait manually */
 			amdgpu_ring_undo(other);
-			r = amdgpu_fence_wait(fence, false);
+			r = fence_wait(&fence->base, false);
 			if (r)
 				return r;
 			continue;
@@ -220,15 +341,23 @@
  */
 void amdgpu_sync_free(struct amdgpu_device *adev,
 		      struct amdgpu_sync *sync,
-		      struct amdgpu_fence *fence)
+		      struct fence *fence)
 {
+	struct amdgpu_sync_entry *e;
+	struct hlist_node *tmp;
 	unsigned i;
 
+	hash_for_each_safe(sync->fences, i, tmp, e, node) {
+		hash_del(&e->node);
+		fence_put(e->fence);
+		kfree(e);
+	}
+
 	for (i = 0; i < AMDGPU_NUM_SYNCS; ++i)
 		amdgpu_semaphore_free(adev, &sync->semaphores[i], fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		amdgpu_fence_unref(&sync->sync_to[i]);
 
-	amdgpu_fence_unref(&sync->last_vm_update);
+	fence_put(sync->last_vm_update);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index df20299..f80b1a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -77,7 +77,7 @@
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
-		struct amdgpu_fence *fence = NULL;
+		struct fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
@@ -116,13 +116,13 @@
 			goto out_lclean_unpin;
 		}
 
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 
 		r = amdgpu_bo_kmap(vram_obj, &vram_map);
 		if (r) {
@@ -161,13 +161,13 @@
 			goto out_lclean_unpin;
 		}
 
-		r = amdgpu_fence_wait(fence, false);
+		r = fence_wait(fence, false);
 		if (r) {
 			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		amdgpu_fence_unref(&fence);
+		fence_put(fence);
 
 		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
@@ -214,7 +214,7 @@
 			amdgpu_bo_unref(&gtt_obj[i]);
 		}
 		if (fence)
-			amdgpu_fence_unref(&fence);
+			fence_put(fence);
 		break;
 	}
 
@@ -238,7 +238,7 @@
 
 static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
 					     struct amdgpu_ring *ring,
-					     struct amdgpu_fence **fence)
+					     struct fence **fence)
 {
 	uint32_t handle = ring->idx ^ 0xdeafbeef;
 	int r;
@@ -269,15 +269,16 @@
 			DRM_ERROR("Failed to get dummy destroy msg\n");
 			return r;
 		}
-
 	} else {
+		struct amdgpu_fence *a_fence = NULL;
 		r = amdgpu_ring_lock(ring, 64);
 		if (r) {
 			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
 			return r;
 		}
-		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+		amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence);
 		amdgpu_ring_unlock_commit(ring);
+		*fence = &a_fence->base;
 	}
 	return 0;
 }
@@ -286,7 +287,7 @@
 			   struct amdgpu_ring *ringA,
 			   struct amdgpu_ring *ringB)
 {
-	struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
+	struct fence *fence1 = NULL, *fence2 = NULL;
 	struct amdgpu_semaphore *semaphore = NULL;
 	int r;
 
@@ -322,7 +323,7 @@
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fence1)) {
+	if (fence_is_signaled(fence1)) {
 		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -335,7 +336,7 @@
 	amdgpu_semaphore_emit_signal(ringB, semaphore);
 	amdgpu_ring_unlock_commit(ringB);
 
-	r = amdgpu_fence_wait(fence1, false);
+	r = fence_wait(fence1, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence 1\n");
 		goto out_cleanup;
@@ -343,7 +344,7 @@
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fence2)) {
+	if (fence_is_signaled(fence2)) {
 		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -356,7 +357,7 @@
 	amdgpu_semaphore_emit_signal(ringB, semaphore);
 	amdgpu_ring_unlock_commit(ringB);
 
-	r = amdgpu_fence_wait(fence2, false);
+	r = fence_wait(fence2, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence 1\n");
 		goto out_cleanup;
@@ -366,10 +367,10 @@
 	amdgpu_semaphore_free(adev, &semaphore, NULL);
 
 	if (fence1)
-		amdgpu_fence_unref(&fence1);
+		fence_put(fence1);
 
 	if (fence2)
-		amdgpu_fence_unref(&fence2);
+		fence_put(fence2);
 
 	if (r)
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
@@ -380,7 +381,7 @@
 			    struct amdgpu_ring *ringB,
 			    struct amdgpu_ring *ringC)
 {
-	struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
+	struct fence *fenceA = NULL, *fenceB = NULL;
 	struct amdgpu_semaphore *semaphore = NULL;
 	bool sigA, sigB;
 	int i, r;
@@ -416,11 +417,11 @@
 
 	mdelay(1000);
 
-	if (amdgpu_fence_signaled(fenceA)) {
+	if (fence_is_signaled(fenceA)) {
 		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
-	if (amdgpu_fence_signaled(fenceB)) {
+	if (fence_is_signaled(fenceB)) {
 		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
 		goto out_cleanup;
 	}
@@ -435,8 +436,8 @@
 
 	for (i = 0; i < 30; ++i) {
 		mdelay(100);
-		sigA = amdgpu_fence_signaled(fenceA);
-		sigB = amdgpu_fence_signaled(fenceB);
+		sigA = fence_is_signaled(fenceA);
+		sigB = fence_is_signaled(fenceB);
 		if (sigA || sigB)
 			break;
 	}
@@ -461,12 +462,12 @@
 
 	mdelay(1000);
 
-	r = amdgpu_fence_wait(fenceA, false);
+	r = fence_wait(fenceA, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence A\n");
 		goto out_cleanup;
 	}
-	r = amdgpu_fence_wait(fenceB, false);
+	r = fence_wait(fenceB, false);
 	if (r) {
 		DRM_ERROR("Failed to wait for sync fence B\n");
 		goto out_cleanup;
@@ -476,10 +477,10 @@
 	amdgpu_semaphore_free(adev, &semaphore, NULL);
 
 	if (fenceA)
-		amdgpu_fence_unref(&fenceA);
+		fence_put(fenceA);
 
 	if (fenceB)
-		amdgpu_fence_unref(&fenceB);
+		fence_put(fenceB);
 
 	if (r)
 		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dd3415d..b5abd5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -228,7 +228,7 @@
 	struct amdgpu_device *adev;
 	struct amdgpu_ring *ring;
 	uint64_t old_start, new_start;
-	struct amdgpu_fence *fence;
+	struct fence *fence;
 	int r;
 
 	adev = amdgpu_get_adev(bo->bdev);
@@ -269,9 +269,9 @@
 			       new_mem->num_pages * PAGE_SIZE, /* bytes */
 			       bo->resv, &fence);
 	/* FIXME: handle copy error */
-	r = ttm_bo_move_accel_cleanup(bo, &fence->base,
+	r = ttm_bo_move_accel_cleanup(bo, fence,
 				      evict, no_wait_gpu, new_mem);
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
 
@@ -859,7 +859,8 @@
 	amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
 	r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
 			     NULL, &adev->stollen_vga_memory);
 	if (r) {
 		return r;
@@ -987,46 +988,48 @@
 		       uint64_t dst_offset,
 		       uint32_t byte_count,
 		       struct reservation_object *resv,
-		       struct amdgpu_fence **fence)
+		       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_sync sync;
 	uint32_t max_bytes;
 	unsigned num_loops, num_dw;
+	struct amdgpu_ib *ib;
 	unsigned i;
 	int r;
 
-	/* sync other rings */
-	amdgpu_sync_create(&sync);
-	if (resv) {
-		r = amdgpu_sync_resv(adev, &sync, resv, false);
-		if (r) {
-			DRM_ERROR("sync failed (%d).\n", r);
-			amdgpu_sync_free(adev, &sync, NULL);
-			return r;
-		}
-	}
-
 	max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
 	num_loops = DIV_ROUND_UP(byte_count, max_bytes);
 	num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
 
-	/* for fence and sync */
-	num_dw += 64 + AMDGPU_NUM_SYNCS * 8;
+	/* for IB padding */
+	while (num_dw & 0x7)
+		num_dw++;
 
-	r = amdgpu_ring_lock(ring, num_dw);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
 	if (r) {
-		DRM_ERROR("ring lock failed (%d).\n", r);
-		amdgpu_sync_free(adev, &sync, NULL);
+		kfree(ib);
 		return r;
 	}
 
-	amdgpu_sync_rings(&sync, ring);
+	ib->length_dw = 0;
+
+	if (resv) {
+		r = amdgpu_sync_resv(adev, &ib->sync, resv,
+				     AMDGPU_FENCE_OWNER_UNDEFINED);
+		if (r) {
+			DRM_ERROR("sync failed (%d).\n", r);
+			goto error_free;
+		}
+	}
 
 	for (i = 0; i < num_loops; i++) {
 		uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
-		amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset,
+		amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
 					cur_size_in_bytes);
 
 		src_offset += cur_size_in_bytes;
@@ -1034,17 +1037,24 @@
 		byte_count -= cur_size_in_bytes;
 	}
 
-	r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence);
-	if (r) {
-		amdgpu_ring_unlock_undo(ring);
-		amdgpu_sync_free(adev, &sync, NULL);
-		return r;
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > num_dw);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_MOVE,
+						 fence);
+	if (r)
+		goto error_free;
+
+	if (!amdgpu_enable_scheduler) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 	}
-
-	amdgpu_ring_unlock_commit(ring);
-	amdgpu_sync_free(adev, &sync, *fence);
-
 	return 0;
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 #if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index f5c2255..2cf6c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -52,6 +52,7 @@
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_uvd.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_uvd.bin"
+#define FIRMWARE_FIJI		"amdgpu/fiji_uvd.bin"
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -81,6 +82,7 @@
 #endif
 MODULE_FIRMWARE(FIRMWARE_TONGA);
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
 
 static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -116,6 +118,9 @@
 	case CHIP_TONGA:
 		fw_name = FIRMWARE_TONGA;
 		break;
+	case CHIP_FIJI:
+		fw_name = FIRMWARE_FIJI;
+		break;
 	case CHIP_CARRIZO:
 		fw_name = FIRMWARE_CARRIZO;
 		break;
@@ -149,7 +154,9 @@
 	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
 		 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
 	r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &adev->uvd.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
 		return r;
@@ -216,31 +223,32 @@
 
 int amdgpu_uvd_suspend(struct amdgpu_device *adev)
 {
-	unsigned size;
-	void *ptr;
-	const struct common_firmware_header *hdr;
-	int i;
+	struct amdgpu_ring *ring = &adev->uvd.ring;
+	int i, r;
 
 	if (adev->uvd.vcpu_bo == NULL)
 		return 0;
 
-	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
-		if (atomic_read(&adev->uvd.handles[i]))
-			break;
+	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+		if (handle != 0) {
+			struct fence *fence;
 
-	if (i == AMDGPU_MAX_UVD_HANDLES)
-		return 0;
+			amdgpu_uvd_note_usage(adev);
 
-	hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+			r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
 
-	size = amdgpu_bo_size(adev->uvd.vcpu_bo);
-	size -= le32_to_cpu(hdr->ucode_size_bytes);
+			fence_wait(fence, false);
+			fence_put(fence);
 
-	ptr = adev->uvd.cpu_addr;
-	ptr += le32_to_cpu(hdr->ucode_size_bytes);
-
-	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-	memcpy(adev->uvd.saved_bo, ptr, size);
+			adev->uvd.filp[i] = NULL;
+			atomic_set(&adev->uvd.handles[i], 0);
+		}
+	}
 
 	return 0;
 }
@@ -265,12 +273,7 @@
 	ptr = adev->uvd.cpu_addr;
 	ptr += le32_to_cpu(hdr->ucode_size_bytes);
 
-	if (adev->uvd.saved_bo != NULL) {
-		memcpy(ptr, adev->uvd.saved_bo, size);
-		kfree(adev->uvd.saved_bo);
-		adev->uvd.saved_bo = NULL;
-	} else
-		memset(ptr, 0, size);
+	memset(ptr, 0, size);
 
 	return 0;
 }
@@ -283,7 +286,7 @@
 	for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
 		uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 		if (handle != 0 && adev->uvd.filp[i] == filp) {
-			struct amdgpu_fence *fence;
+			struct fence *fence;
 
 			amdgpu_uvd_note_usage(adev);
 
@@ -293,8 +296,8 @@
 				continue;
 			}
 
-			amdgpu_fence_wait(fence, false);
-			amdgpu_fence_unref(&fence);
+			fence_wait(fence, false);
+			fence_put(fence);
 
 			adev->uvd.filp[i] = NULL;
 			atomic_set(&adev->uvd.handles[i], 0);
@@ -374,7 +377,8 @@
 	unsigned height_in_mb = ALIGN(height / 16, 2);
 	unsigned fs_in_mb = width_in_mb * height_in_mb;
 
-	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
+	unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+	unsigned min_ctx_size = 0;
 
 	image_size = width * height;
 	image_size += image_size / 2;
@@ -507,28 +511,25 @@
 {
 	struct amdgpu_device *adev = ctx->parser->adev;
 	int32_t *msg, msg_type, handle;
-	struct fence *f;
 	void *ptr;
-
-	int i, r;
+	long r;
+	int i;
 
 	if (offset & 0x3F) {
 		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
 		return -EINVAL;
 	}
 
-	f = reservation_object_get_excl(bo->tbo.resv);
-	if (f) {
-		r = amdgpu_fence_wait((struct amdgpu_fence *)f, false);
-		if (r) {
-			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
-			return r;
-		}
+	r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
+						MAX_SCHEDULE_TIMEOUT);
+	if (r < 0) {
+		DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
+		return r;
 	}
 
 	r = amdgpu_bo_kmap(bo, &ptr);
 	if (r) {
-		DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
 		return r;
 	}
 
@@ -803,14 +804,24 @@
 	return 0;
 }
 
+static int amdgpu_uvd_free_job(
+	struct amdgpu_job *sched_job)
+{
+	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
 			       struct amdgpu_bo *bo,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t addr;
 	int i, r;
 
@@ -832,34 +843,49 @@
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 	if (r)
 		goto err;
-
-	r = amdgpu_ib_get(ring, NULL, 64, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib) {
+		r = -ENOMEM;
 		goto err;
+	}
+	r = amdgpu_ib_get(ring, NULL, 64, ib);
+	if (r)
+		goto err1;
 
 	addr = amdgpu_bo_gpu_offset(bo);
-	ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
-	ib.ptr[1] = addr;
-	ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
-	ib.ptr[3] = addr >> 32;
-	ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
-	ib.ptr[5] = 0;
+	ib->ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
+	ib->ptr[1] = addr;
+	ib->ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
+	ib->ptr[3] = addr >> 32;
+	ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
+	ib->ptr[5] = 0;
 	for (i = 6; i < 16; ++i)
-		ib.ptr[i] = PACKET2(0);
-	ib.length_dw = 16;
+		ib->ptr[i] = PACKET2(0);
+	ib->length_dw = 16;
 
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_uvd_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
 	if (r)
-		goto err;
-	ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base);
+		goto err2;
+
+	ttm_eu_fence_buffer_objects(&ticket, &head, f);
 
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
+		*fence = fence_get(f);
 	amdgpu_bo_unref(&bo);
-	return 0;
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
 
+	amdgpu_ib_free(ring->adev, ib);
+	kfree(ib);
+	return 0;
+err2:
+	amdgpu_ib_free(ring->adev, ib);
+err1:
+	kfree(ib);
 err:
 	ttm_eu_backoff_reservation(&ticket, &head);
 	return r;
@@ -869,7 +895,7 @@
    crash the vcpu so just try to emmit a dummy create/destroy msg to
    avoid this */
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence)
+			      struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -877,7 +903,9 @@
 	int r, i;
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &bo);
 	if (r)
 		return r;
 
@@ -916,7 +944,7 @@
 }
 
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_bo *bo;
@@ -924,7 +952,9 @@
 	int r, i;
 
 	r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &bo);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 2255aa7..1724c2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -29,9 +29,9 @@
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
 int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence);
+			      struct fence **fence);
 int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence);
+			       struct fence **fence);
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
 			     struct drm_file *filp);
 int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index d3ca730..3cab96c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -48,6 +48,7 @@
 #endif
 #define FIRMWARE_TONGA		"amdgpu/tonga_vce.bin"
 #define FIRMWARE_CARRIZO	"amdgpu/carrizo_vce.bin"
+#define FIRMWARE_FIJI		"amdgpu/fiji_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -58,6 +59,7 @@
 #endif
 MODULE_FIRMWARE(FIRMWARE_TONGA);
 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
+MODULE_FIRMWARE(FIRMWARE_FIJI);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -101,6 +103,9 @@
 	case CHIP_CARRIZO:
 		fw_name = FIRMWARE_CARRIZO;
 		break;
+	case CHIP_FIJI:
+		fw_name = FIRMWARE_FIJI;
+		break;
 
 	default:
 		return -EINVAL;
@@ -136,7 +141,9 @@
 	/* allocate firmware, stack and heap BO */
 
 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			     NULL, &adev->vce.vcpu_bo);
 	if (r) {
 		dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
 		return r;
@@ -334,6 +341,14 @@
 	}
 }
 
+static int amdgpu_vce_free_job(
+	struct amdgpu_job *sched_job)
+{
+	amdgpu_ib_free(sched_job->adev, sched_job->ibs);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 /**
  * amdgpu_vce_get_create_msg - generate a VCE create msg
  *
@@ -345,59 +360,69 @@
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence)
+			      struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+		kfree(ib);
 		return r;
 	}
 
-	dummy = ib.gpu_addr + 1024;
+	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE create msg */
-	ib.length_dw = 0;
-	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
-	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
-	ib.ptr[ib.length_dw++] = handle;
+	ib->length_dw = 0;
+	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+	ib->ptr[ib->length_dw++] = handle;
 
-	ib.ptr[ib.length_dw++] = 0x00000030; /* len */
-	ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
-	ib.ptr[ib.length_dw++] = 0x00000000;
-	ib.ptr[ib.length_dw++] = 0x00000042;
-	ib.ptr[ib.length_dw++] = 0x0000000a;
-	ib.ptr[ib.length_dw++] = 0x00000001;
-	ib.ptr[ib.length_dw++] = 0x00000080;
-	ib.ptr[ib.length_dw++] = 0x00000060;
-	ib.ptr[ib.length_dw++] = 0x00000100;
-	ib.ptr[ib.length_dw++] = 0x00000100;
-	ib.ptr[ib.length_dw++] = 0x0000000c;
-	ib.ptr[ib.length_dw++] = 0x00000000;
+	ib->ptr[ib->length_dw++] = 0x00000030; /* len */
+	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
+	ib->ptr[ib->length_dw++] = 0x00000000;
+	ib->ptr[ib->length_dw++] = 0x00000042;
+	ib->ptr[ib->length_dw++] = 0x0000000a;
+	ib->ptr[ib->length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000080;
+	ib->ptr[ib->length_dw++] = 0x00000060;
+	ib->ptr[ib->length_dw++] = 0x00000100;
+	ib->ptr[ib->length_dw++] = 0x00000100;
+	ib->ptr[ib->length_dw++] = 0x0000000c;
+	ib->ptr[ib->length_dw++] = 0x00000000;
 
-	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
-	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
-	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
-	ib.ptr[ib.length_dw++] = dummy;
-	ib.ptr[ib.length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+	ib->ptr[ib->length_dw++] = dummy;
+	ib->ptr[ib->length_dw++] = 0x00000001;
 
-	for (i = ib.length_dw; i < ib_size_dw; ++i)
-		ib.ptr[i] = 0x0;
+	for (i = ib->length_dw; i < ib_size_dw; ++i)
+		ib->ptr[i] = 0x0;
 
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-	}
-
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vce_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err;
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
-
+		*fence = fence_get(f);
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
+err:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 	return r;
 }
 
@@ -412,49 +437,59 @@
  * Close up a stream for HW test or if userspace failed to do so
  */
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence)
+			       struct fence **fence)
 {
 	const unsigned ib_size_dw = 1024;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib = NULL;
+	struct fence *f = NULL;
+	struct amdgpu_device *adev = ring->adev;
 	uint64_t dummy;
 	int i, r;
 
-	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib);
 	if (r) {
+		kfree(ib);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 		return r;
 	}
 
-	dummy = ib.gpu_addr + 1024;
+	dummy = ib->gpu_addr + 1024;
 
 	/* stitch together an VCE destroy msg */
-	ib.length_dw = 0;
-	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
-	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
-	ib.ptr[ib.length_dw++] = handle;
+	ib->length_dw = 0;
+	ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
+	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
+	ib->ptr[ib->length_dw++] = handle;
 
-	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
-	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
-	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
-	ib.ptr[ib.length_dw++] = dummy;
-	ib.ptr[ib.length_dw++] = 0x00000001;
+	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
+	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+	ib->ptr[ib->length_dw++] = dummy;
+	ib->ptr[ib->length_dw++] = 0x00000001;
 
-	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
-	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
+	ib->ptr[ib->length_dw++] = 0x00000008; /* len */
+	ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
 
-	for (i = ib.length_dw; i < ib_size_dw; ++i)
-		ib.ptr[i] = 0x0;
-
-	r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-	}
-
+	for (i = ib->length_dw; i < ib_size_dw; ++i)
+		ib->ptr[i] = 0x0;
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vce_free_job,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err;
 	if (fence)
-		*fence = amdgpu_fence_ref(ib.fence);
-
-	amdgpu_ib_free(ring->adev, &ib);
-
+		*fence = fence_get(f);
+	fence_put(f);
+	if (amdgpu_enable_scheduler)
+		return 0;
+err:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 	return r;
 }
 
@@ -800,9 +835,13 @@
  */
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
+	/* skip vce ring1 ib test for now, since it's not reliable */
+	if (ring == &ring->adev->vce.ring[1])
+		return 0;
+
 	r = amdgpu_vce_get_create_msg(ring, 1, NULL);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
@@ -815,13 +854,13 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 	} else {
 		DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
 	}
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 7ccdb59..ba2da8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -29,9 +29,9 @@
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-			      struct amdgpu_fence **fence);
+			      struct fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-			       struct amdgpu_fence **fence);
+			       struct fence **fence);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9a4e3b6..f68b7cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -127,16 +127,16 @@
 /**
  * amdgpu_vm_grab_id - allocate the next free VMID
  *
- * @ring: ring we want to submit job to
  * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
  *
- * Allocate an id for the vm (cayman+).
- * Returns the fence we need to sync to (if any).
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
  *
- * Global and local mutex must be locked!
+ * Global mutex must be locked!
  */
-struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
-				       struct amdgpu_vm *vm)
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync)
 {
 	struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -148,7 +148,7 @@
 	/* check if the id is still valid */
 	if (vm_id->id && vm_id->last_id_use &&
 	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
-		return NULL;
+		return 0;
 
 	/* we definately need to flush */
 	vm_id->pd_gpu_addr = ~0ll;
@@ -161,7 +161,7 @@
 			/* found a free one */
 			vm_id->id = i;
 			trace_amdgpu_vm_grab_id(i, ring->idx);
-			return NULL;
+			return 0;
 		}
 
 		if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
@@ -172,15 +172,19 @@
 
 	for (i = 0; i < 2; ++i) {
 		if (choices[i]) {
+			struct amdgpu_fence *fence;
+
+			fence  = adev->vm_manager.active[choices[i]];
 			vm_id->id = choices[i];
+
 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
-			return adev->vm_manager.active[choices[i]];
+			return amdgpu_sync_fence(ring->adev, sync, &fence->base);
 		}
 	}
 
 	/* should never happen */
 	BUG();
-	return NULL;
+	return -EINVAL;
 }
 
 /**
@@ -196,17 +200,29 @@
  */
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *updates)
+		     struct fence *updates)
 {
 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
+	struct fence *flushed_updates = vm_id->flushed_updates;
+	bool is_earlier = false;
 
-	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
-	    amdgpu_fence_is_earlier(vm_id->flushed_updates, updates)) {
+	if (flushed_updates && updates) {
+		BUG_ON(flushed_updates->context != updates->context);
+		is_earlier = (updates->seqno - flushed_updates->seqno <=
+			      INT_MAX) ? true : false;
+	}
+
+	if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
+	    is_earlier) {
 
 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-		amdgpu_fence_unref(&vm_id->flushed_updates);
-		vm_id->flushed_updates = amdgpu_fence_ref(updates);
+		if (is_earlier) {
+			vm_id->flushed_updates = fence_get(updates);
+			fence_put(flushed_updates);
+		}
+		if (!flushed_updates)
+			vm_id->flushed_updates = fence_get(updates);
 		vm_id->pd_gpu_addr = pd_addr;
 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
 	}
@@ -300,6 +316,15 @@
 	}
 }
 
+int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+{
+	int i;
+	for (i = 0; i < sched_job->num_ibs; i++)
+		amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
+	kfree(sched_job->ibs);
+	return 0;
+}
+
 /**
  * amdgpu_vm_clear_bo - initially clear the page dir/table
  *
@@ -310,7 +335,8 @@
 			      struct amdgpu_bo *bo)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
-	struct amdgpu_ib ib;
+	struct fence *fence = NULL;
+	struct amdgpu_ib *ib;
 	unsigned entries;
 	uint64_t addr;
 	int r;
@@ -330,24 +356,33 @@
 	addr = amdgpu_bo_gpu_offset(bo);
 	entries = amdgpu_bo_size(bo) / 8;
 
-	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
 		goto error_unreserve;
 
-	ib.length_dw = 0;
-
-	amdgpu_vm_update_pages(adev, &ib, addr, 0, entries, 0, 0, 0);
-	amdgpu_vm_pad_ib(adev, &ib);
-	WARN_ON(ib.length_dw > 64);
-
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
+	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
 	if (r)
 		goto error_free;
 
-	amdgpu_bo_fence(bo, ib.fence, true);
+	ib->length_dw = 0;
 
+	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > 64);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_VM,
+						 &fence);
+	if (!r)
+		amdgpu_bo_fence(bo, fence, true);
+	fence_put(fence);
+	if (amdgpu_enable_scheduler) {
+		amdgpu_bo_unreserve(bo);
+		return 0;
+	}
 error_free:
-	amdgpu_ib_free(adev, &ib);
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
 
 error_unreserve:
 	amdgpu_bo_unreserve(bo);
@@ -400,7 +435,9 @@
 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
 	uint64_t last_pde = ~0, last_pt = ~0;
 	unsigned count = 0, pt_idx, ndw;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib;
+	struct fence *fence = NULL;
+
 	int r;
 
 	/* padding, etc. */
@@ -413,10 +450,14 @@
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
 	if (r)
 		return r;
-	ib.length_dw = 0;
+	ib->length_dw = 0;
 
 	/* walk over the address space and update the page directory */
 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -436,7 +477,7 @@
 		    ((last_pt + incr * count) != pt)) {
 
 			if (count) {
-				amdgpu_vm_update_pages(adev, &ib, last_pde,
+				amdgpu_vm_update_pages(adev, ib, last_pde,
 						       last_pt, count, incr,
 						       AMDGPU_PTE_VALID, 0);
 			}
@@ -450,23 +491,37 @@
 	}
 
 	if (count)
-		amdgpu_vm_update_pages(adev, &ib, last_pde, last_pt, count,
+		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
 				       incr, AMDGPU_PTE_VALID, 0);
 
-	if (ib.length_dw != 0) {
-		amdgpu_vm_pad_ib(adev, &ib);
-		amdgpu_sync_resv(adev, &ib.sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
-		WARN_ON(ib.length_dw > ndw);
-		r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
-		if (r) {
-			amdgpu_ib_free(adev, &ib);
-			return r;
-		}
-		amdgpu_bo_fence(pd, ib.fence, true);
+	if (ib->length_dw != 0) {
+		amdgpu_vm_pad_ib(adev, ib);
+		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+		WARN_ON(ib->length_dw > ndw);
+		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+							 &amdgpu_vm_free_job,
+							 AMDGPU_FENCE_OWNER_VM,
+							 &fence);
+		if (r)
+			goto error_free;
+
+		amdgpu_bo_fence(pd, fence, true);
+		fence_put(vm->page_directory_fence);
+		vm->page_directory_fence = fence_get(fence);
+		fence_put(fence);
 	}
-	amdgpu_ib_free(adev, &ib);
+
+	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
+	}
 
 	return 0;
+
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 /**
@@ -572,9 +627,14 @@
 {
 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 	uint64_t last_pte = ~0, last_dst = ~0;
+	void *owner = AMDGPU_FENCE_OWNER_VM;
 	unsigned count = 0;
 	uint64_t addr;
 
+	/* sync to everything on unmapping */
+	if (!(flags & AMDGPU_PTE_VALID))
+		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
 	/* walk over the address space and update the page tables */
 	for (addr = start; addr < end; ) {
 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
@@ -583,8 +643,7 @@
 		uint64_t pte;
 		int r;
 
-		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv,
-				 AMDGPU_FENCE_OWNER_VM);
+		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
 		r = reservation_object_reserve_shared(pt->tbo.resv);
 		if (r)
 			return r;
@@ -640,7 +699,7 @@
  */
 static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
 				uint64_t start, uint64_t end,
-				struct amdgpu_fence *fence)
+				struct fence *fence)
 {
 	unsigned i;
 
@@ -670,12 +729,13 @@
 				       struct amdgpu_vm *vm,
 				       struct amdgpu_bo_va_mapping *mapping,
 				       uint64_t addr, uint32_t gtt_flags,
-				       struct amdgpu_fence **fence)
+				       struct fence **fence)
 {
 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
 	unsigned nptes, ncmds, ndw;
 	uint32_t flags = gtt_flags;
-	struct amdgpu_ib ib;
+	struct amdgpu_ib *ib;
+	struct fence *f = NULL;
 	int r;
 
 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -722,46 +782,54 @@
 	if (ndw > 0xfffff)
 		return -ENOMEM;
 
-	r = amdgpu_ib_get(ring, NULL, ndw * 4, &ib);
-	if (r)
+	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
+	if (!ib)
+		return -ENOMEM;
+
+	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
+	if (r) {
+		kfree(ib);
 		return r;
-	ib.length_dw = 0;
-
-	if (!(flags & AMDGPU_PTE_VALID)) {
-		unsigned i;
-
-		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-			struct amdgpu_fence *f = vm->ids[i].last_id_use;
-			amdgpu_sync_fence(&ib.sync, f);
-		}
 	}
 
-	r = amdgpu_vm_update_ptes(adev, vm, &ib, mapping->it.start,
+	ib->length_dw = 0;
+
+	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
 				  mapping->it.last + 1, addr + mapping->offset,
 				  flags, gtt_flags);
 
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
 		return r;
 	}
 
-	amdgpu_vm_pad_ib(adev, &ib);
-	WARN_ON(ib.length_dw > ndw);
+	amdgpu_vm_pad_ib(adev, ib);
+	WARN_ON(ib->length_dw > ndw);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
+						 &amdgpu_vm_free_job,
+						 AMDGPU_FENCE_OWNER_VM,
+						 &f);
+	if (r)
+		goto error_free;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_VM);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		return r;
-	}
 	amdgpu_vm_fence_pts(vm, mapping->it.start,
-			    mapping->it.last + 1, ib.fence);
+			    mapping->it.last + 1, f);
 	if (fence) {
-		amdgpu_fence_unref(fence);
-		*fence = amdgpu_fence_ref(ib.fence);
+		fence_put(*fence);
+		*fence = fence_get(f);
 	}
-	amdgpu_ib_free(adev, &ib);
-
+	fence_put(f);
+	if (!amdgpu_enable_scheduler) {
+		amdgpu_ib_free(adev, ib);
+		kfree(ib);
+	}
 	return 0;
+
+error_free:
+	amdgpu_ib_free(adev, ib);
+	kfree(ib);
+	return r;
 }
 
 /**
@@ -794,21 +862,25 @@
 		addr = 0;
 	}
 
-	if (addr == bo_va->addr)
-		return 0;
-
 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
 
-	list_for_each_entry(mapping, &bo_va->mappings, list) {
+	spin_lock(&vm->status_lock);
+	if (!list_empty(&bo_va->vm_status))
+		list_splice_init(&bo_va->valids, &bo_va->invalids);
+	spin_unlock(&vm->status_lock);
+
+	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
 						flags, &bo_va->last_pt_update);
 		if (r)
 			return r;
 	}
 
-	bo_va->addr = addr;
 	spin_lock(&vm->status_lock);
+	list_splice_init(&bo_va->invalids, &bo_va->valids);
 	list_del_init(&bo_va->vm_status);
+	if (!mem)
+		list_add(&bo_va->vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
 	return 0;
@@ -861,7 +933,7 @@
 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
 {
 	struct amdgpu_bo_va *bo_va = NULL;
-	int r;
+	int r = 0;
 
 	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->invalidated)) {
@@ -878,8 +950,9 @@
 	spin_unlock(&vm->status_lock);
 
 	if (bo_va)
-		amdgpu_sync_fence(sync, bo_va->last_pt_update);
-	return 0;
+		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+
+	return r;
 }
 
 /**
@@ -907,10 +980,10 @@
 	}
 	bo_va->vm = vm;
 	bo_va->bo = bo;
-	bo_va->addr = 0;
 	bo_va->ref_count = 1;
 	INIT_LIST_HEAD(&bo_va->bo_list);
-	INIT_LIST_HEAD(&bo_va->mappings);
+	INIT_LIST_HEAD(&bo_va->valids);
+	INIT_LIST_HEAD(&bo_va->invalids);
 	INIT_LIST_HEAD(&bo_va->vm_status);
 
 	mutex_lock(&vm->mutex);
@@ -999,12 +1072,10 @@
 	mapping->offset = offset;
 	mapping->flags = flags;
 
-	list_add(&mapping->list, &bo_va->mappings);
+	list_add(&mapping->list, &bo_va->invalids);
 	interval_tree_insert(&mapping->it, &vm->va);
 	trace_amdgpu_vm_bo_map(bo_va, mapping);
 
-	bo_va->addr = 0;
-
 	/* Make sure the page tables are allocated */
 	saddr >>= amdgpu_vm_block_size;
 	eaddr >>= amdgpu_vm_block_size;
@@ -1028,7 +1099,9 @@
 
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
-				     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &pt);
+				     AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+				     NULL, &pt);
 		if (r)
 			goto error_free;
 
@@ -1085,17 +1158,27 @@
 {
 	struct amdgpu_bo_va_mapping *mapping;
 	struct amdgpu_vm *vm = bo_va->vm;
+	bool valid = true;
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
 
-	list_for_each_entry(mapping, &bo_va->mappings, list) {
+	list_for_each_entry(mapping, &bo_va->valids, list) {
 		if (mapping->it.start == saddr)
 			break;
 	}
 
-	if (&mapping->list == &bo_va->mappings) {
-		amdgpu_bo_unreserve(bo_va->bo);
-		return -ENOENT;
+	if (&mapping->list == &bo_va->valids) {
+		valid = false;
+
+		list_for_each_entry(mapping, &bo_va->invalids, list) {
+			if (mapping->it.start == saddr)
+				break;
+		}
+
+		if (&mapping->list == &bo_va->invalids) {
+			amdgpu_bo_unreserve(bo_va->bo);
+			return -ENOENT;
+		}
 	}
 
 	mutex_lock(&vm->mutex);
@@ -1103,12 +1186,10 @@
 	interval_tree_remove(&mapping->it, &vm->va);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
-	if (bo_va->addr) {
-		/* clear the old address */
+	if (valid)
 		list_add(&mapping->list, &vm->freed);
-	} else {
+	else
 		kfree(mapping);
-	}
 	mutex_unlock(&vm->mutex);
 	amdgpu_bo_unreserve(bo_va->bo);
 
@@ -1139,16 +1220,19 @@
 	list_del(&bo_va->vm_status);
 	spin_unlock(&vm->status_lock);
 
-	list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
+	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
 		list_del(&mapping->list);
 		interval_tree_remove(&mapping->it, &vm->va);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
-		if (bo_va->addr)
-			list_add(&mapping->list, &vm->freed);
-		else
-			kfree(mapping);
+		list_add(&mapping->list, &vm->freed);
 	}
-	amdgpu_fence_unref(&bo_va->last_pt_update);
+	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
+		list_del(&mapping->list);
+		interval_tree_remove(&mapping->it, &vm->va);
+		kfree(mapping);
+	}
+
+	fence_put(bo_va->last_pt_update);
 	kfree(bo_va);
 
 	mutex_unlock(&vm->mutex);
@@ -1169,12 +1253,10 @@
 	struct amdgpu_bo_va *bo_va;
 
 	list_for_each_entry(bo_va, &bo->va, bo_list) {
-		if (bo_va->addr) {
-			spin_lock(&bo_va->vm->status_lock);
-			list_del(&bo_va->vm_status);
+		spin_lock(&bo_va->vm->status_lock);
+		if (list_empty(&bo_va->vm_status))
 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-			spin_unlock(&bo_va->vm->status_lock);
-		}
+		spin_unlock(&bo_va->vm->status_lock);
 	}
 }
 
@@ -1202,6 +1284,7 @@
 	vm->va = RB_ROOT;
 	spin_lock_init(&vm->status_lock);
 	INIT_LIST_HEAD(&vm->invalidated);
+	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
 
 	pd_size = amdgpu_vm_directory_size(adev);
@@ -1215,8 +1298,11 @@
 		return -ENOMEM;
 	}
 
+	vm->page_directory_fence = NULL;
+
 	r = amdgpu_bo_create(adev, pd_size, align, true,
-			     AMDGPU_GEM_DOMAIN_VRAM, 0,
+			     AMDGPU_GEM_DOMAIN_VRAM,
+			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
 			     NULL, &vm->page_directory);
 	if (r)
 		return r;
@@ -1263,9 +1349,10 @@
 	kfree(vm->page_tables);
 
 	amdgpu_bo_unref(&vm->page_directory);
+	fence_put(vm->page_directory_fence);
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-		amdgpu_fence_unref(&vm->ids[i].flushed_updates);
+		fence_put(vm->ids[i].flushed_updates);
 		amdgpu_fence_unref(&vm->ids[i].last_id_use);
 	}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 9ba0a7d..92b6aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -139,7 +139,8 @@
 
 	tx_buf[0] = msg->address & 0xff;
 	tx_buf[1] = msg->address >> 8;
-	tx_buf[2] = msg->request << 4;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index ae8caca..cd6edc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -812,7 +812,7 @@
 			else
 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
-			if ((adev->flags & AMDGPU_IS_APU) &&
+			if ((adev->flags & AMD_IS_APU) &&
 			    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
 				if (is_dp ||
 				    !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 341c566..4b6ce74 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -64,6 +64,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "amdgpu_amdkfd.h"
+
 /*
  * Indirect registers accessor
  */
@@ -836,7 +838,7 @@
 {
 	u32 reference_clock = adev->clock.spll.reference_freq;
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
 			return reference_clock / 2;
 	} else {
@@ -1233,7 +1235,7 @@
 	if (reset_mask & AMDGPU_RESET_VMC)
 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_VMC_MASK;
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		if (reset_mask & AMDGPU_RESET_MC)
 			srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_MC_MASK;
 	}
@@ -1409,7 +1411,7 @@
 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
 	}
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		kv_save_regs_for_reset(adev, &kv_save);
 
 	/* disable BM */
@@ -1427,7 +1429,7 @@
 	}
 
 	/* does asic init need to be run first??? */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		kv_restore_regs_for_reset(adev, &kv_save);
 }
 
@@ -1568,7 +1570,7 @@
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -1728,7 +1730,7 @@
 		return;
 
 	/* XXX double check APUs */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
@@ -2448,14 +2450,21 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_amdkfd_suspend(adev);
+
 	return cik_common_hw_fini(adev);
 }
 
 static int cik_common_resume(void *handle)
 {
+	int r;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	return cik_common_hw_init(adev);
+	r = cik_common_hw_init(adev);
+	if (r)
+		return r;
+
+	return amdgpu_amdkfd_resume(adev);
 }
 
 static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 15df46c..9ea9de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -188,6 +188,19 @@
 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 }
 
+static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+					  SDMA_NOP_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -213,8 +226,8 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 4)
-		amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+	cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
+
 	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
 	amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
@@ -501,6 +514,8 @@
 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 		fw_data = (const __le32 *)
 			(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 		WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
@@ -614,6 +629,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -629,12 +645,11 @@
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -643,20 +658,16 @@
 	ib.ptr[3] = 1;
 	ib.ptr[4] = 0xDEADBEEF;
 	ib.length_dw = 5;
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -666,12 +677,17 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -814,8 +830,19 @@
  */
 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
+					SDMA_NOP_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+					SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
 }
 
 /**
@@ -1302,6 +1329,7 @@
 	.test_ring = cik_sdma_ring_test_ring,
 	.test_ib = cik_sdma_ring_test_ib,
 	.is_lockup = cik_sdma_ring_is_lockup,
+	.insert_nop = cik_sdma_ring_insert_nop,
 };
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@@ -1338,18 +1366,18 @@
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void cik_sdma_emit_copy_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
 				      uint64_t src_offset,
 				      uint64_t dst_offset,
 				      uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1362,16 +1390,16 @@
  *
  * Fill GPU buffers using the DMA engine (CIK).
  */
-static void cik_sdma_emit_fill_buffer(struct amdgpu_ring *ring,
+static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
 				      uint32_t src_data,
 				      uint64_t dst_offset,
 				      uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
@@ -1404,5 +1432,6 @@
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index d19085a..7f6d457 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -487,6 +487,7 @@
 					 (((op) & 0xFF) << 0))
 /* sDMA opcodes */
 #define	SDMA_OPCODE_NOP					  0
+#	define SDMA_NOP_COUNT(x)			  (((x) & 0x3FFF) << 16)
 #define	SDMA_OPCODE_COPY				  1
 #       define SDMA_COPY_SUB_OPCODE_LINEAR                0
 #       define SDMA_COPY_SUB_OPCODE_TILED                 1
@@ -552,6 +553,12 @@
 #define VCE_CMD_IB_AUTO		0x00000005
 #define VCE_CMD_SEMAPHORE	0x00000006
 
+/* if PTR32, these are the bases for scratch and lds */
+#define	PRIVATE_BASE(x)	((x) << 0) /* scratch */
+#define	SHARED_BASE(x)	((x) << 16) /* LDS */
+
+#define KFD_CIK_SDMA_QUEUE_OFFSET	0x200
+
 /* valid for both DEFAULT_MTYPE and APE1_MTYPE */
 enum {
 	MTYPE_CACHED = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index ace870a..44fa96a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1596,9 +1596,9 @@
 
 	if (pi->sys_info.nb_dpm_enable) {
 		if (ps->force_high)
-			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
-		else
 			cz_dpm_nbdpm_lm_pstate_enable(adev, false);
+		else
+			cz_dpm_nbdpm_lm_pstate_enable(adev, true);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e70a26f..e4d101b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -126,9 +126,31 @@
 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+	mmFBC_MISC, 0x1f311fff, 0x12300000,
+	mmHDMI_CONTROL, 0x31000111, 0x00000011,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
+	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
+};
+
 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -803,11 +825,11 @@
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		mem_cfg = 1;
@@ -1331,7 +1353,7 @@
 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* restore original selection */
@@ -2888,6 +2910,7 @@
 	dce_v10_0_set_irq_funcs(adev);
 
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		adev->mode_info.num_crtc = 6; /* XXX 7??? */
 		adev->mode_info.num_hpd = 6;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index dcb402e..6411e82 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -801,11 +801,11 @@
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			mem_cfg = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		mem_cfg = 1;
@@ -1329,7 +1329,7 @@
 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
-	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
+	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 	/* restore original selection */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index cc050a3..c86911c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -770,11 +770,11 @@
 			buffer_alloc = 2;
 		} else if (mode->crtc_hdisplay < 4096) {
 			tmp = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		} else {
 			DRM_DEBUG_KMS("Mode too big for LB!\n");
 			tmp = 0;
-			buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4;
+			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
 		}
 	} else {
 		tmp = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
new file mode 100644
index 0000000..8f9845d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_smumgr.h"
+
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
+
+static int fiji_dpm_early_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_set_funcs(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
+{
+	char fw_name[30] = "amdgpu/fiji_smc.bin";
+	int err;
+
+	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+	if (err)
+		goto out;
+	err = amdgpu_ucode_validate(adev->pm.fw);
+
+out:
+	if (err) {
+		DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+		release_firmware(adev->pm.fw);
+		adev->pm.fw = NULL;
+	}
+	return err;
+}
+
+static int fiji_dpm_sw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	ret = fiji_dpm_init_microcode(adev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int fiji_dpm_sw_fini(void *handle)
+{
+	return 0;
+}
+
+static int fiji_dpm_hw_init(void *handle)
+{
+	int ret;
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	mutex_lock(&adev->pm.mutex);
+
+	ret = fiji_smu_init(adev);
+	if (ret) {
+		DRM_ERROR("SMU initialization failed\n");
+		goto fail;
+	}
+
+	ret = fiji_smu_start(adev);
+	if (ret) {
+		DRM_ERROR("SMU start failed\n");
+		goto fail;
+	}
+
+	mutex_unlock(&adev->pm.mutex);
+	return 0;
+
+fail:
+	adev->firmware.smu_load = false;
+	mutex_unlock(&adev->pm.mutex);
+	return -EINVAL;
+}
+
+static int fiji_dpm_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	mutex_lock(&adev->pm.mutex);
+	fiji_smu_fini(adev);
+	mutex_unlock(&adev->pm.mutex);
+	return 0;
+}
+
+static int fiji_dpm_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_hw_fini(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	fiji_dpm_hw_init(adev);
+
+	return 0;
+}
+
+static int fiji_dpm_set_clockgating_state(void *handle,
+			enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int fiji_dpm_set_powergating_state(void *handle,
+			enum amd_powergating_state state)
+{
+	return 0;
+}
+
+const struct amd_ip_funcs fiji_dpm_ip_funcs = {
+	.early_init = fiji_dpm_early_init,
+	.late_init = NULL,
+	.sw_init = fiji_dpm_sw_init,
+	.sw_fini = fiji_dpm_sw_fini,
+	.hw_init = fiji_dpm_hw_init,
+	.hw_fini = fiji_dpm_hw_fini,
+	.suspend = fiji_dpm_suspend,
+	.resume = fiji_dpm_resume,
+	.is_idle = NULL,
+	.wait_for_idle = NULL,
+	.soft_reset = NULL,
+	.print_status = NULL,
+	.set_clockgating_state = fiji_dpm_set_clockgating_state,
+	.set_powergating_state = fiji_dpm_set_powergating_state,
+};
+
+static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
+	.get_temperature = NULL,
+	.pre_set_power_state = NULL,
+	.set_power_state = NULL,
+	.post_set_power_state = NULL,
+	.display_configuration_changed = NULL,
+	.get_sclk = NULL,
+	.get_mclk = NULL,
+	.print_power_state = NULL,
+	.debugfs_print_current_performance_level = NULL,
+	.force_performance_level = NULL,
+	.vblank_too_short = NULL,
+	.powergate_uvd = NULL,
+};
+
+static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
+{
+	if (NULL == adev->pm.funcs)
+		adev->pm.funcs = &fiji_dpm_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
new file mode 100644
index 0000000..3c48240
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_ppsmc.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_PP_SMC_H
+#define FIJI_PP_SMC_H
+
+#pragma pack(push, 1)
+
+#define PPSMC_SWSTATE_FLAG_DC                           0x01
+#define PPSMC_SWSTATE_FLAG_UVD                          0x02
+#define PPSMC_SWSTATE_FLAG_VCE                          0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL             0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL             0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE                 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC                        0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC                       0x02
+#define PPSMC_SYSTEMFLAG_GDDR5                          0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP               0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT                  0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG           0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK              0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK     0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE   0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE  0x01
+
+#define PPSMC_DPM2FLAGS_TDPCLMP                         0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT                         0x02
+#define PPSMC_DPM2FLAGS_OCP                             0x04
+
+#define PPSMC_DISPLAY_WATERMARK_LOW                     0
+#define PPSMC_DISPLAY_WATERMARK_HIGH                    1
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP    		0x01
+#define PPSMC_STATEFLAG_POWERBOOST         		0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 		0x04
+#define PPSMC_STATEFLAG_POWERSHIFT         		0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN   		0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 		0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS   		0x40
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+	FAN_CONTROL_FUZZY,
+	FAN_CONTROL_TABLE
+};
+
+//Gemini Modes
+#define PPSMC_GeminiModeNone   0  //Single GPU board
+#define PPSMC_GeminiModeMaster 1  //Master GPU on a Gemini board
+#define PPSMC_GeminiModeSlave  2  //Slave GPU on a Gemini board
+
+#define PPSMC_Result_OK             			((uint16_t)0x01)
+#define PPSMC_Result_NoMore         			((uint16_t)0x02)
+#define PPSMC_Result_NotNow         			((uint16_t)0x03)
+#define PPSMC_Result_Failed         			((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd     			((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT      			((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+#define PPSMC_MSG_Halt                      		((uint16_t)0x10)
+#define PPSMC_MSG_Resume                    		((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel            		((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled        		((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled         		((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled         		((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt    		((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC               		((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp                   		((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown                 		((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters          		((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState           		((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast       		((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState      		((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel             		((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh                 		((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh         		((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower      		((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower    		((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac                 		((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac                		((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart          		((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop           		((uint16_t)0x56)
+#define PPSMC_CACHistoryStart               		((uint16_t)0x57)
+#define PPSMC_CACHistoryStop                		((uint16_t)0x58)
+#define PPSMC_TDPClampingActive             		((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive           		((uint16_t)0x5A)
+#define PPSMC_StartFanControl               		((uint16_t)0x5B)
+#define PPSMC_StopFanControl                		((uint16_t)0x5C)
+#define PPSMC_NoDisplay                     		((uint16_t)0x5D)
+#define PPSMC_HasDisplay                    		((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF               		((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON                		((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV                 		((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV                		((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV                  		((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV                   		((uint16_t)0x65)
+#define PPSMC_PowerShiftActive              		((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive            		((uint16_t)0x6B)
+#define PPSMC_OCPActive                     		((uint16_t)0x6C)
+#define PPSMC_OCPInactive                   		((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable          		((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable         		((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start  		((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop   		((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState    		((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState       		((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start       		((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop        		((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState   		((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState  		((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest        		((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping          		((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln   		((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib    		((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly         		((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr 		((uint16_t)0x7D)
+#define PPSMC_MSG_ExtremitiesTest_Start     		((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop      		((uint16_t)0x7F)
+#define PPSMC_FlushDataCache                		((uint16_t)0x80)
+#define PPSMC_FlushInstrCache               		((uint16_t)0x81)
+#define PPSMC_MSG_SetEnabledLevels          		((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels           		((uint16_t)0x83)
+#define PPSMC_MSG_ResetToDefaults           		((uint16_t)0x84)
+#define PPSMC_MSG_SetForcedLevelsAndJump    		((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode         		((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE                 		((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE                		((uint16_t)0x88)
+#define PPSMC_MSG_SmcSpaceSetAddress        		((uint16_t)0x89)
+#define PPSMC_MSG_SmcSpaceWriteDWordInc     		((uint16_t)0x8A)
+#define PPSMC_MSG_SmcSpaceWriteWordInc      		((uint16_t)0x8B)
+#define PPSMC_MSG_SmcSpaceWriteByteInc      		((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK                     		((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test                      		((uint16_t)0x100)
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI            		((uint16_t)0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO            		((uint16_t)0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI            		((uint16_t)0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO            		((uint16_t)0x253)
+#define PPSMC_MSG_LoadUcodes                  		((uint16_t)0x254)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL          		0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT     		0x00000002
+#define PPSMC_EVENT_STATUS_DC               		0x00000004
+#define PPSMC_EVENT_STATUS_GPIO17           		0x00000008
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
new file mode 100644
index 0000000..322edea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -0,0 +1,857 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "amdgpu.h"
+#include "fiji_ppsmc.h"
+#include "fiji_smumgr.h"
+#include "smu_ucode_xfer_vi.h"
+#include "amdgpu_ucode.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+#define FIJI_SMC_SIZE 0x20000
+
+static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
+{
+	uint32_t val;
+
+	if (smc_address & 3)
+		return -EINVAL;
+
+	if ((smc_address + 3) > limit)
+		return -EINVAL;
+
+	WREG32(mmSMC_IND_INDEX_0, smc_address);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+	return 0;
+}
+
+static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+	uint32_t addr;
+	uint32_t data, orig_data;
+	int result = 0;
+	uint32_t extra_shift;
+	unsigned long flags;
+
+	if (smc_start_address & 3)
+		return -EINVAL;
+
+	if ((smc_start_address + byte_count) > limit)
+		return -EINVAL;
+
+	addr = smc_start_address;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	while (byte_count >= 4) {
+		/* Bytes are written into the SMC addres space with the MSB first */
+		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+
+		if (result)
+			goto out;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+
+		src += 4;
+		byte_count -= 4;
+		addr += 4;
+	}
+
+	if (0 != byte_count) {
+		/* Now write odd bytes left, do a read modify write cycle */
+		data = 0;
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+		if (result)
+			goto out;
+
+		orig_data = RREG32(mmSMC_IND_DATA_0);
+		extra_shift = 8 * (4 - byte_count);
+
+		while (byte_count > 0) {
+			data = (data << 8) + *src++;
+			byte_count--;
+		}
+
+		data <<= extra_shift;
+		data |= (orig_data & ~((~0UL) << extra_shift));
+
+		result = fiji_set_smc_sram_address(adev, addr, limit);
+		if (result)
+			goto out;
+
+		WREG32(mmSMC_IND_DATA_0, data);
+	}
+
+out:
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_program_jump_on_start(struct amdgpu_device *adev)
+{
+	static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
+	fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
+
+	return 0;
+}
+
+static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
+{
+	uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
+
+	return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
+}
+
+static int wait_smu_response(struct amdgpu_device *adev)
+{
+	int i;
+	uint32_t val;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32(mmSMC_RESP_0);
+		if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, 0x20000);
+	WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send message\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
+{
+	if (!fiji_is_smc_ram_running(adev))
+	{
+		return -EINVAL;;
+	}
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send message\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
+						PPSMC_Msg msg)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MESSAGE_0, msg);
+
+	return 0;
+}
+
+static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
+						PPSMC_Msg msg,
+						uint32_t parameter)
+{
+	if (!fiji_is_smc_ram_running(adev))
+		return -EINVAL;
+
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+
+	return fiji_send_msg_to_smc(adev, msg);
+}
+
+static int fiji_send_msg_to_smc_with_parameter_without_waiting(
+					struct amdgpu_device *adev,
+					PPSMC_Msg msg, uint32_t parameter)
+{
+	if (wait_smu_response(adev)) {
+		DRM_ERROR("Failed to send previous message\n");
+		return -EINVAL;
+	}
+
+	WREG32(mmSMC_MSG_ARG_0, parameter);
+
+	return fiji_send_msg_to_smc_without_waiting(adev, msg);
+}
+
+#if 0 /* not used yet */
+static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
+{
+	int i;
+	uint32_t val;
+
+	if (!fiji_is_smc_ram_running(adev))
+		return -EINVAL;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+		if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout)
+		return -EINVAL;
+
+	return 0;
+}
+#endif
+
+static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
+{
+	const struct smc_firmware_header_v1_0 *hdr;
+	uint32_t ucode_size;
+	uint32_t ucode_start_address;
+	const uint8_t *src;
+	uint32_t val;
+	uint32_t byte_count;
+	uint32_t *data;
+	unsigned long flags;
+
+	if (!adev->pm.fw)
+		return -EINVAL;
+
+	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
+	amdgpu_ucode_print_smc_hdr(&hdr->header);
+
+	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+	src = (const uint8_t *)
+		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+	if (ucode_size & 3) {
+		DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
+		return -EINVAL;
+	}
+
+	if (ucode_size > FIJI_SMC_SIZE) {
+		DRM_ERROR("SMC address is beyond the SMC RAM area\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+
+	byte_count = ucode_size;
+	data = (uint32_t *)src;
+	for (; byte_count >= 4; data++, byte_count -= 4)
+		WREG32(mmSMC_IND_DATA_0, data[0]);
+
+	val = RREG32(mmSMC_IND_ACCESS_CNTL);
+	val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+	WREG32(mmSMC_IND_ACCESS_CNTL, val);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+
+	return 0;
+}
+
+#if 0 /* not used yet */
+static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
+				uint32_t smc_address,
+				uint32_t *value,
+				uint32_t limit)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	result = fiji_set_smc_sram_address(adev, smc_address, limit);
+	if (result == 0)
+		*value = RREG32(mmSMC_IND_DATA_0);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
+				uint32_t smc_address,
+				uint32_t value,
+				uint32_t limit)
+{
+	int result;
+	unsigned long flags;
+
+	spin_lock_irqsave(&adev->smc_idx_lock, flags);
+	result = fiji_set_smc_sram_address(adev, smc_address, limit);
+	if (result == 0)
+		WREG32(mmSMC_IND_DATA_0, value);
+	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+	return result;
+}
+
+static int fiji_smu_stop_smc(struct amdgpu_device *adev)
+{
+	uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	return 0;
+}
+#endif
+
+static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
+{
+	switch (fw_type) {
+		case UCODE_ID_SDMA0:
+			return AMDGPU_UCODE_ID_SDMA0;
+		case UCODE_ID_SDMA1:
+			return AMDGPU_UCODE_ID_SDMA1;
+		case UCODE_ID_CP_CE:
+			return AMDGPU_UCODE_ID_CP_CE;
+		case UCODE_ID_CP_PFP:
+			return AMDGPU_UCODE_ID_CP_PFP;
+		case UCODE_ID_CP_ME:
+			return AMDGPU_UCODE_ID_CP_ME;
+		case UCODE_ID_CP_MEC:
+		case UCODE_ID_CP_MEC_JT1:
+		case UCODE_ID_CP_MEC_JT2:
+			return AMDGPU_UCODE_ID_CP_MEC1;
+		case UCODE_ID_RLC_G:
+			return AMDGPU_UCODE_ID_RLC_G;
+		default:
+			DRM_ERROR("ucode type is out of range!\n");
+			return AMDGPU_UCODE_ID_MAXIMUM;
+	}
+}
+
+static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
+						uint32_t fw_type,
+						struct SMU_Entry *entry)
+{
+	enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
+	struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
+	const struct gfx_firmware_header_v1_0 *header = NULL;
+	uint64_t gpu_addr;
+	uint32_t data_size;
+
+	if (ucode->fw == NULL)
+		return -EINVAL;
+	gpu_addr  = ucode->mc_addr;
+	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+	data_size = le32_to_cpu(header->header.ucode_size_bytes);
+
+	if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
+		(fw_type == UCODE_ID_CP_MEC_JT2)) {
+		gpu_addr += le32_to_cpu(header->jt_offset) << 2;
+		data_size = le32_to_cpu(header->jt_size) << 2;
+	}
+
+	entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+	entry->id = (uint16_t)fw_type;
+	entry->image_addr_high = upper_32_bits(gpu_addr);
+	entry->image_addr_low = lower_32_bits(gpu_addr);
+	entry->meta_data_addr_high = 0;
+	entry->meta_data_addr_low = 0;
+	entry->data_size_byte = data_size;
+	entry->num_register_entries = 0;
+
+	if (fw_type == UCODE_ID_RLC_G)
+		entry->flags = 1;
+	else
+		entry->flags = 0;
+
+	return 0;
+}
+
+static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
+{
+	struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
+	struct SMU_DRAMData_TOC *toc;
+	uint32_t fw_to_load;
+
+	WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
+
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
+
+	toc = (struct SMU_DRAMData_TOC *)private->header;
+	toc->num_entries = 0;
+	toc->structure_version = 1;
+
+	if (!adev->firmware.smu_load)
+		return 0;
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for RLC\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for CE\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for PFP\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for ME\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+		return -EINVAL;
+	}
+
+	if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
+			&toc->entry[toc->num_entries++])) {
+		DRM_ERROR("Failed to get firmware entry for SDMA1\n");
+		return -EINVAL;
+	}
+
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
+	fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
+
+	fw_to_load = UCODE_ID_RLC_G_MASK |
+			UCODE_ID_SDMA0_MASK |
+			UCODE_ID_SDMA1_MASK |
+			UCODE_ID_CP_CE_MASK |
+			UCODE_ID_CP_ME_MASK |
+			UCODE_ID_CP_PFP_MASK |
+			UCODE_ID_CP_MEC_MASK;
+
+	if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+		DRM_ERROR("Fail to request SMU load ucode\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
+{
+	switch (fw_type) {
+		case AMDGPU_UCODE_ID_SDMA0:
+			return UCODE_ID_SDMA0_MASK;
+		case AMDGPU_UCODE_ID_SDMA1:
+			return UCODE_ID_SDMA1_MASK;
+		case AMDGPU_UCODE_ID_CP_CE:
+			return UCODE_ID_CP_CE_MASK;
+		case AMDGPU_UCODE_ID_CP_PFP:
+			return UCODE_ID_CP_PFP_MASK;
+		case AMDGPU_UCODE_ID_CP_ME:
+			return UCODE_ID_CP_ME_MASK;
+		case AMDGPU_UCODE_ID_CP_MEC1:
+			return UCODE_ID_CP_MEC_MASK;
+		case AMDGPU_UCODE_ID_CP_MEC2:
+			return UCODE_ID_CP_MEC_MASK;
+		case AMDGPU_UCODE_ID_RLC_G:
+			return UCODE_ID_RLC_G_MASK;
+		default:
+			DRM_ERROR("ucode type is out of range!\n");
+			return 0;
+	}
+}
+
+static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
+					uint32_t fw_type)
+{
+	uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
+	int i;
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("check firmware loading failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
+{
+	int result;
+	uint32_t val;
+	int i;
+
+	/* Assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	result = fiji_smu_upload_firmware_image(adev);
+	if (result)
+		return result;
+
+	/* Clear status */
+	WREG32_SMC(ixSMU_STATUS, 0);
+
+	/* Enable clock */
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	/* De-assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	/* Set SMU Auto Start */
+	val = RREG32_SMC(ixSMU_INPUT_DATA);
+	val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
+	WREG32_SMC(ixSMU_INPUT_DATA, val);
+
+	/* Clear firmware interrupt enable flag */
+	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixRCU_UC_EVENTS);
+		if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Interrupt is not enabled by firmware\n");
+		return -EINVAL;
+	}
+
+	/* Call Test SMU message with 0x20000 offset
+	 * to trigger SMU start
+	 */
+	fiji_send_msg_to_smc_offset(adev);
+	DRM_INFO("[FM]try triger smu start\n");
+	/* Wait for done bit to be set */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixSMU_STATUS);
+		if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Timeout for SMU start\n");
+		return -EINVAL;
+	}
+
+	/* Check pass/failed indicator */
+	val = RREG32_SMC(ixSMU_STATUS);
+	if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
+		DRM_ERROR("SMU Firmware start failed\n");
+		return -EINVAL;
+	}
+	DRM_INFO("[FM]smu started\n");
+	/* Wait for firmware to initialize */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixFIRMWARE_FLAGS);
+		if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("SMU firmware initialization failed\n");
+		return -EINVAL;
+	}
+	DRM_INFO("[FM]smu initialized\n");
+
+	return 0;
+}
+
+static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
+{
+	int i, result;
+	uint32_t val;
+
+	/* wait for smc boot up */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixRCU_UC_EVENTS);
+		val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
+		if (val)
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("SMC boot sequence is not completed\n");
+		return -EINVAL;
+	}
+
+	/* Clear firmware interrupt enable flag */
+	WREG32_SMC(ixFIRMWARE_FLAGS, 0);
+
+	/* Assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	result = fiji_smu_upload_firmware_image(adev);
+	if (result)
+		return result;
+
+	/* Set smc instruct start point at 0x0 */
+	fiji_program_jump_on_start(adev);
+
+	/* Enable clock */
+	val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+	val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+	WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
+
+	/* De-assert reset */
+	val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
+	val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+	WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
+
+	/* Wait for firmware to initialize */
+	for (i = 0; i < adev->usec_timeout; i++) {
+		val = RREG32_SMC(ixFIRMWARE_FLAGS);
+		if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
+			break;
+		udelay(1);
+	}
+
+	if (i == adev->usec_timeout) {
+		DRM_ERROR("Timeout for SMC firmware initialization\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int fiji_smu_start(struct amdgpu_device *adev)
+{
+	int result;
+	uint32_t val;
+
+	if (!fiji_is_smc_ram_running(adev)) {
+		val = RREG32_SMC(ixSMU_FIRMWARE);
+		if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
+			DRM_INFO("[FM]start smu in nonprotection mode\n");
+			result = fiji_smu_start_in_non_protection_mode(adev);
+			if (result)
+				return result;
+		} else {
+			DRM_INFO("[FM]start smu in protection mode\n");
+			result = fiji_smu_start_in_protection_mode(adev);
+			if (result)
+				return result;
+		}
+	}
+
+	return fiji_smu_request_load_fw(adev);
+}
+
+static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
+	.check_fw_load_finish = fiji_smu_check_fw_load_finish,
+	.request_smu_load_fw = NULL,
+	.request_smu_specific_fw = NULL,
+};
+
+int fiji_smu_init(struct amdgpu_device *adev)
+{
+	struct fiji_smu_private_data *private;
+	uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+	uint32_t smu_internal_buffer_size = 200*4096;
+	struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
+	struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
+	uint64_t mc_addr;
+	void *toc_buf_ptr;
+	void *smu_buf_ptr;
+	int ret;
+
+	private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
+	if (NULL == private)
+		return -ENOMEM;
+
+	/* allocate firmware buffers */
+	if (adev->firmware.smu_load)
+		amdgpu_ucode_init_bo(adev);
+
+	adev->smu.priv = private;
+	adev->smu.fw_flags = 0;
+
+	/* Allocate FW image data structure and header buffer */
+	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
+	if (ret) {
+		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Allocate buffer for SMU internal buffer */
+	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, smu_buf);
+	if (ret) {
+		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Retrieve GPU address for header buffer and internal buffer */
+	ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
+	if (ret) {
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to reserve the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.toc_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to pin the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.toc_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to map the TOC buffer\n");
+		return -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(adev->smu.toc_buf);
+	private->header_addr_low = lower_32_bits(mc_addr);
+	private->header_addr_high = upper_32_bits(mc_addr);
+	private->header = toc_buf_ptr;
+
+	ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
+	if (ret) {
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to reserve the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to pin the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
+	if (ret) {
+		amdgpu_bo_unreserve(adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.smu_buf);
+		amdgpu_bo_unref(&adev->smu.toc_buf);
+		DRM_ERROR("Failed to map the SMU internal buffer\n");
+		return -EINVAL;
+	}
+
+	amdgpu_bo_unreserve(adev->smu.smu_buf);
+	private->smu_buffer_addr_low = lower_32_bits(mc_addr);
+	private->smu_buffer_addr_high = upper_32_bits(mc_addr);
+
+	adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
+
+	return 0;
+}
+
+int fiji_smu_fini(struct amdgpu_device *adev)
+{
+	amdgpu_bo_unref(&adev->smu.toc_buf);
+	amdgpu_bo_unref(&adev->smu.smu_buf);
+	kfree(adev->smu.priv);
+	adev->smu.priv = NULL;
+	if (adev->firmware.fw_buf)
+		amdgpu_ucode_fini_bo(adev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
new file mode 100644
index 0000000..1cef03d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smumgr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef FIJI_SMUMGR_H
+#define FIJI_SMUMGR_H
+
+#include "fiji_ppsmc.h"
+
+int fiji_smu_init(struct amdgpu_device *adev);
+int fiji_smu_fini(struct amdgpu_device *adev);
+int fiji_smu_start(struct amdgpu_device *adev);
+
+struct fiji_smu_private_data
+{
+	uint8_t *header;
+	uint32_t smu_buffer_addr_high;
+	uint32_t smu_buffer_addr_low;
+	uint32_t header_addr_high;
+	uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0d8bf2c..4bd1e5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2173,7 +2173,7 @@
 
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		/* Get memory bank mapping mode. */
 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2648,6 +2648,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -2659,29 +2660,27 @@
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
+		goto err1;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		return r;
+		goto err2;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -2691,14 +2690,19 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err2;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	amdgpu_gfx_scratch_free(adev, scratch);
+
+err2:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
 
@@ -3758,7 +3762,7 @@
 	int r;
 
 	/* allocate rlc buffers */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		if (adev->asic_type == CHIP_KAVERI) {
 			adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list;
 			adev->gfx.rlc.reg_list_size =
@@ -3782,7 +3786,9 @@
 		/* save restore block */
 		if (adev->gfx.rlc.save_restore_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.save_restore_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.save_restore_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
 				return r;
@@ -3823,7 +3829,9 @@
 
 		if (adev->gfx.rlc.clear_state_obj == NULL) {
 			r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.clear_state_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.clear_state_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -3860,7 +3868,9 @@
 	if (adev->gfx.rlc.cp_table_size) {
 		if (adev->gfx.rlc.cp_table_obj == NULL) {
 			r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
-					     AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->gfx.rlc.cp_table_obj);
+					     AMDGPU_GEM_DOMAIN_VRAM,
+					     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+					     NULL, &adev->gfx.rlc.cp_table_obj);
 			if (r) {
 				dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
 				gfx_v7_0_rlc_fini(adev);
@@ -5594,6 +5604,7 @@
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.is_lockup = gfx_v7_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5610,6 +5621,7 @@
 	.test_ring = gfx_v7_0_ring_test_ring,
 	.test_ib = gfx_v7_0_ring_test_ib,
 	.is_lockup = gfx_v7_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 20e2cfd..53f0743 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -87,6 +87,13 @@
 MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
+MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
+MODULE_FIRMWARE("amdgpu/fiji_me.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
+MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
 	{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -217,6 +224,71 @@
 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
+static const u32 fiji_golden_common_all[] =
+{
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
+	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
+	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
+	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+};
+
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x00000100,
+	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+	mmTCC_CTRL, 0x00100000, 0xf30fff7f,
+	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
+	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x7d6cf5e4,
+	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x3928b1a0,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffc0,
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
+	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
+	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
+	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
+	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
+	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
+	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
+	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
+	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
+	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
+	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
+	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
+	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
+};
+
 static const u32 golden_settings_iceland_a11[] =
 {
 	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
@@ -439,6 +511,18 @@
 						 iceland_golden_common_all,
 						 (const u32)ARRAY_SIZE(iceland_golden_common_all));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		amdgpu_program_register_sequence(adev,
+						 fiji_golden_common_all,
+						 (const u32)ARRAY_SIZE(fiji_golden_common_all));
+		break;
+
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -526,6 +610,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	uint32_t scratch;
 	uint32_t tmp = 0;
 	unsigned i;
@@ -537,29 +622,27 @@
 		return r;
 	}
 	WREG32(scratch, 0xCAFEDEAD);
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		return r;
+		goto err1;
 	}
 	ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
 	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
 	ib.ptr[2] = 0xDEADBEEF;
 	ib.length_dw = 3;
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
-	if (r) {
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
+
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err2;
+
+	r = fence_wait(f, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		amdgpu_gfx_scratch_free(adev, scratch);
-		amdgpu_ib_free(adev, &ib);
-		return r;
+		goto err2;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = RREG32(scratch);
@@ -569,14 +652,18 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err2;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
-	amdgpu_gfx_scratch_free(adev, scratch);
+err2:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err1:
+	amdgpu_gfx_scratch_free(adev, scratch);
 	return r;
 }
 
@@ -601,6 +688,9 @@
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	default:
 		BUG();
 	}
@@ -1236,6 +1326,7 @@
 			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
 			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
 		}
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
 			switch (reg_offset) {
@@ -1914,7 +2005,7 @@
 }
 
 /**
- * gmc_v8_0_init_compute_vmid - gart enable
+ * gfx_v8_0_init_compute_vmid - gart enable
  *
  * @rdev: amdgpu_device pointer
  *
@@ -1924,7 +2015,7 @@
 #define DEFAULT_SH_MEM_BASES	(0x6000)
 #define FIRST_COMPUTE_VMID	(8)
 #define LAST_COMPUTE_VMID	(16)
-static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 {
 	int i;
 	uint32_t sh_mem_config;
@@ -1984,6 +2075,23 @@
 		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
 		gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
 		break;
+	case CHIP_FIJI:
+		adev->gfx.config.max_shader_engines = 4;
+		adev->gfx.config.max_tile_pipes = 16;
+		adev->gfx.config.max_cu_per_sh = 16;
+		adev->gfx.config.max_sh_per_se = 1;
+		adev->gfx.config.max_backends_per_se = 4;
+		adev->gfx.config.max_texture_channel_caches = 8;
+		adev->gfx.config.max_gprs = 256;
+		adev->gfx.config.max_gs_threads = 32;
+		adev->gfx.config.max_hw_contexts = 8;
+
+		adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+		adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+		adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+		adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+		break;
 	case CHIP_TONGA:
 		adev->gfx.config.max_shader_engines = 4;
 		adev->gfx.config.max_tile_pipes = 8;
@@ -2078,7 +2186,7 @@
 
 	adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
 	adev->gfx.config.mem_max_burst_length_bytes = 256;
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		/* Get memory bank mapping mode. */
 		tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
 		dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
@@ -2174,7 +2282,7 @@
 	vi_srbm_select(adev, 0, 0, 0, 0);
 	mutex_unlock(&adev->srbm_mutex);
 
-	gmc_v8_0_init_compute_vmid(adev);
+	gfx_v8_0_init_compute_vmid(adev);
 
 	mutex_lock(&adev->grbm_idx_mutex);
 	/*
@@ -2490,6 +2598,7 @@
 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (adev->asic_type) {
 	case CHIP_TONGA:
+	case CHIP_FIJI:
 		amdgpu_ring_write(ring, 0x16000012);
 		amdgpu_ring_write(ring, 0x0000002A);
 		break;
@@ -3131,7 +3240,8 @@
 
 		/* enable the doorbell if requested */
 		if (use_doorbell) {
-			if (adev->asic_type == CHIP_CARRIZO) {
+			if ((adev->asic_type == CHIP_CARRIZO) ||
+			    (adev->asic_type == CHIP_FIJI)) {
 				WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
 				       AMDGPU_DOORBELL_KIQ << 2);
 				WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -3875,7 +3985,8 @@
 	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
 	if (ring->adev->asic_type == CHIP_TOPAZ ||
-	    ring->adev->asic_type == CHIP_TONGA)
+	    ring->adev->asic_type == CHIP_TONGA ||
+	    ring->adev->asic_type == CHIP_FIJI)
 		/* we got a hw semaphore bug in VI TONGA, return false to switch back to sw fence wait */
 		return false;
 	else {
@@ -4268,6 +4379,7 @@
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.is_lockup = gfx_v8_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -4284,6 +4396,7 @@
 	.test_ring = gfx_v8_0_ring_test_ring,
 	.test_ib = gfx_v8_0_ring_test_ib,
 	.is_lockup = gfx_v8_0_ring_is_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index ae37fce..774528a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -523,17 +523,11 @@
 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 			    amdgpu_vm_block_size - 9);
@@ -636,7 +630,7 @@
 	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
 
 	/* base offset of vram pages */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 		tmp <<= 22;
 		adev->vm_manager.vram_base_offset = tmp;
@@ -841,7 +835,7 @@
 	gmc_v7_0_set_gart_funcs(adev);
 	gmc_v7_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 	} else {
 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -852,6 +846,13 @@
 	return 0;
 }
 
+static int gmc_v7_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v7_0_sw_init(void *handle)
 {
 	int r;
@@ -957,7 +958,7 @@
 
 	gmc_v7_0_mc_program(adev);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		r = gmc_v7_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -976,6 +977,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
 	gmc_v7_0_gart_disable(adev);
 
 	return 0;
@@ -1172,7 +1174,7 @@
 
 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
-		if (!(adev->flags & AMDGPU_IS_APU))
+		if (!(adev->flags & AMD_IS_APU))
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
@@ -1282,7 +1284,7 @@
 	if (state == AMD_CG_STATE_GATE)
 		gate = true;
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		gmc_v7_0_enable_mc_mgcg(adev, gate);
 		gmc_v7_0_enable_mc_ls(adev, gate);
 	}
@@ -1301,7 +1303,7 @@
 
 const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.early_init = gmc_v7_0_early_init,
-	.late_init = NULL,
+	.late_init = gmc_v7_0_late_init,
 	.sw_init = gmc_v7_0_sw_init,
 	.sw_fini = gmc_v7_0_sw_fini,
 	.hw_init = gmc_v7_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 8135963..9a07742 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -44,6 +44,7 @@
 
 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -61,6 +62,19 @@
 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
 static const u32 golden_settings_iceland_a11[] =
 {
 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
@@ -90,6 +104,14 @@
 						 golden_settings_iceland_a11,
 						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -202,6 +224,9 @@
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	case CHIP_CARRIZO:
 		return 0;
 	default: BUG();
@@ -628,19 +653,12 @@
 	tmp = RREG32(mmVM_CONTEXT1_CNTL);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
 	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
 			    amdgpu_vm_block_size - 9);
@@ -737,7 +755,7 @@
 	adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS;
 
 	/* base offset of vram pages */
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
 		tmp <<= 22;
 		adev->vm_manager.vram_base_offset = tmp;
@@ -816,7 +834,7 @@
 	gmc_v8_0_set_gart_funcs(adev);
 	gmc_v8_0_set_irq_funcs(adev);
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
 	} else {
 		u32 tmp = RREG32(mmMC_SEQ_MISC0);
@@ -827,6 +845,13 @@
 	return 0;
 }
 
+static int gmc_v8_0_late_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+}
+
 static int gmc_v8_0_sw_init(void *handle)
 {
 	int r;
@@ -934,7 +959,7 @@
 
 	gmc_v8_0_mc_program(adev);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		r = gmc_v8_0_mc_load_microcode(adev);
 		if (r) {
 			DRM_ERROR("Failed to load MC firmware!\n");
@@ -953,6 +978,7 @@
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
 	gmc_v8_0_gart_disable(adev);
 
 	return 0;
@@ -1147,7 +1173,7 @@
 
 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
-		if (!(adev->flags & AMDGPU_IS_APU))
+		if (!(adev->flags & AMD_IS_APU))
 			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
 							SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
 	}
@@ -1263,7 +1289,7 @@
 
 const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.early_init = gmc_v8_0_early_init,
-	.late_init = NULL,
+	.late_init = gmc_v8_0_late_init,
 	.sw_init = gmc_v8_0_sw_init,
 	.sw_fini = gmc_v8_0_sw_fini,
 	.hw_init = gmc_v8_0_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
index c723602..ee6a041 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h
@@ -2163,5 +2163,10 @@
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __ICELAND_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c6f1e2f..c900aa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -623,7 +623,9 @@
 
 	/* Allocate FW image data structure and header buffer */
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-			       true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/pptable.h b/drivers/gpu/drm/amd/amdgpu/pptable.h
deleted file mode 100644
index 0030f72..0000000
--- a/drivers/gpu/drm/amd/amdgpu/pptable.h
+++ /dev/null
@@ -1,698 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _PPTABLE_H
-#define _PPTABLE_H
-
-#pragma pack(1)
-
-typedef struct _ATOM_PPLIB_THERMALCONTROLLER
-
-{
-    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
-    UCHAR ucI2cLine;        // as interpreted by DAL I2C
-    UCHAR ucI2cAddress;
-    UCHAR ucFanParameters;  // Fan Control Parameters.
-    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
-    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
-    UCHAR ucReserved;       // ----
-    UCHAR ucFlags;          // to be defined
-} ATOM_PPLIB_THERMALCONTROLLER;
-
-#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
-
-#define ATOM_PP_THERMALCONTROLLER_NONE      0
-#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_LM64      5
-#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
-#define ATOM_PP_THERMALCONTROLLER_RV770     8
-#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
-#define ATOM_PP_THERMALCONTROLLER_KONG      10
-#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
-#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
-#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
-#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
-#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
-#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
-#define ATOM_PP_THERMALCONTROLLER_LM96163   17
-#define ATOM_PP_THERMALCONTROLLER_CISLANDS  18
-#define ATOM_PP_THERMALCONTROLLER_KAVERI    19
-
-
-// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
-// We probably should reserve the bit 0x80 for this use.
-// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
-// The driver can pick the correct internal controller based on the ASIC.
-
-#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
-#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
-
-typedef struct _ATOM_PPLIB_STATE
-{
-    UCHAR ucNonClockStateIndex;
-    UCHAR ucClockStateIndices[1]; // variable-sized
-} ATOM_PPLIB_STATE;
-
-
-typedef struct _ATOM_PPLIB_FANTABLE
-{
-    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
-    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
-    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
-    USHORT  usTMed;                          // The middle temperature where we change slopes.
-    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
-    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
-    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
-    USHORT  usPWMHigh;                       // The PWM value at THigh.
-} ATOM_PPLIB_FANTABLE;
-
-typedef struct _ATOM_PPLIB_FANTABLE2
-{
-    ATOM_PPLIB_FANTABLE basicTable;
-    USHORT  usTMax;                          // The max temperature
-} ATOM_PPLIB_FANTABLE2;
-
-typedef struct _ATOM_PPLIB_FANTABLE3
-{
-	ATOM_PPLIB_FANTABLE2 basicTable2;
-	UCHAR ucFanControlMode;
-	USHORT usFanPWMMax;
-	USHORT usFanOutputSensitivity;
-} ATOM_PPLIB_FANTABLE3;
-
-typedef struct _ATOM_PPLIB_EXTENDEDHEADER
-{
-    USHORT  usSize;
-    ULONG   ulMaxEngineClock;   // For Overdrive.
-    ULONG   ulMaxMemoryClock;   // For Overdrive.
-    // Add extra system parameters here, always adjust size to include all fields.
-    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
-    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
-    USHORT  usSAMUTableOffset;  //points to ATOM_PPLIB_SAMU_Table
-    USHORT  usPPMTableOffset;   //points to ATOM_PPLIB_PPM_Table
-    USHORT  usACPTableOffset;  //points to ATOM_PPLIB_ACP_Table   
-    /* points to ATOM_PPLIB_POWERTUNE_Table */
-    USHORT  usPowerTuneTableOffset;
-    /* points to ATOM_PPLIB_CLOCK_Voltage_Dependency_Table for sclkVddgfxTable */
-    USHORT  usSclkVddgfxTableOffset;
-} ATOM_PPLIB_EXTENDEDHEADER;
-
-//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
-#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
-#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
-#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
-#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
-#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
-#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
-#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
-#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
-#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
-#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
-#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
-#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
-#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
-#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
-#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
-#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
-#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
-#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
-#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE   0x00040000           // Does the driver supports new CAC voltage table.
-#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY   0x00080000     // Does the driver supports revert GPIO5 polarity.
-#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17   0x00100000     // Does the driver supports thermal2GPIO17.
-#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE   0x00200000   // Does the driver supports VR HOT GPIO Configurable.
-#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION   0x00400000            // Does the driver supports Temp Inversion feature.
-#define ATOM_PP_PLATFORM_CAP_EVV    0x00800000
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE
-{
-      ATOM_COMMON_TABLE_HEADER sHeader;
-
-      UCHAR ucDataRevision;
-
-      UCHAR ucNumStates;
-      UCHAR ucStateEntrySize;
-      UCHAR ucClockInfoSize;
-      UCHAR ucNonClockSize;
-
-      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
-      USHORT usStateArrayOffset;
-
-      // offset from start of this table to array of ASIC-specific structures,
-      // currently ATOM_PPLIB_CLOCK_INFO.
-      USHORT usClockInfoArrayOffset;
-
-      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
-      USHORT usNonClockInfoArrayOffset;
-
-      USHORT usBackbiasTime;    // in microseconds
-      USHORT usVoltageTime;     // in microseconds
-      USHORT usTableSize;       //the size of this structure, or the extended structure
-
-      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
-
-      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
-
-      USHORT usBootClockInfoOffset;
-      USHORT usBootNonClockInfoOffset;
-
-} ATOM_PPLIB_POWERPLAYTABLE;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
-{
-    ATOM_PPLIB_POWERPLAYTABLE basicTable;
-    UCHAR   ucNumCustomThermalPolicy;
-    USHORT  usCustomThermalPolicyArrayOffset;
-}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
-{
-    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
-    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
-    USHORT                     usFanTableOffset;
-    USHORT                     usExtendendedHeaderOffset;
-} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
-{
-    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
-    ULONG                      ulGoldenPPID;                    // PPGen use only     
-    ULONG                      ulGoldenRevision;                // PPGen use only
-    USHORT                     usVddcDependencyOnSCLKOffset;
-    USHORT                     usVddciDependencyOnMCLKOffset;
-    USHORT                     usVddcDependencyOnMCLKOffset;
-    USHORT                     usMaxClockVoltageOnDCOffset;
-    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
-    USHORT                     usMvddDependencyOnMCLKOffset;  
-} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
-{
-    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
-    ULONG                      ulTDPLimit;
-    ULONG                      ulNearTDPLimit;
-    ULONG                      ulSQRampingThreshold;
-    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
-    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
-    USHORT                     usTDPODLimit;
-    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
-} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
-// 2, 4, 6, 7 are reserved
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
-#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
-#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
-#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
-#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
-#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
-#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
-#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
-#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
-#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
-#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
-
-//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
-#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
-#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
-
-// 0 is 2.5Gb/s, 1 is 5Gb/s
-#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
-#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
-
-// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
-
-// lookup into reduced refresh-rate table
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
-
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
-// 2-15 TBD as needed.
-
-#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
-#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
-
-#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
-
-#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
-
-//memory related flags
-#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
-
-//M3 Arb    //2bits, current 3 sets of parameters in total
-#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
-#define ATOM_PPLIB_M3ARB_SHIFT                      17
-
-#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
-
-// remaining 16 bits are reserved
-typedef struct _ATOM_PPLIB_THERMAL_STATE
-{
-    UCHAR   ucMinTemperature;
-    UCHAR   ucMaxTemperature;
-    UCHAR   ucThermalAction;
-}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
-#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
-typedef struct _ATOM_PPLIB_NONCLOCK_INFO
-{
-      USHORT usClassification;
-      UCHAR  ucMinTemperature;
-      UCHAR  ucMaxTemperature;
-      ULONG  ulCapsAndSettings;
-      UCHAR  ucRequiredPower;
-      USHORT usClassification2;
-      ULONG  ulVCLK;
-      ULONG  ulDCLK;
-      UCHAR  ucUnused[5];
-} ATOM_PPLIB_NONCLOCK_INFO;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR ucMemoryClockHigh;
-
-      USHORT usVDDC;
-      USHORT usUnused1;
-      USHORT usUnused2;
-
-      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_R600_CLOCK_INFO;
-
-// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
-#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
-#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
-#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
-#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
-
-typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
-
-{
-      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
-      UCHAR  ucLowEngineClockHigh;
-      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
-      UCHAR  ucHighEngineClockHigh;
-      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
-      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
-      UCHAR  ucPadding;                   // For proper alignment and size.
-      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
-      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
-      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could 
-      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
-      ULONG  ulFlags; 
-} ATOM_PPLIB_RS780_CLOCK_INFO;
-
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
-
-#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
-#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
-#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
-
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
-
-typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR  ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR  ucMemoryClockHigh;
-
-      USHORT usVDDC;
-      USHORT usVDDCI;
-      USHORT usUnused;
-
-      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR  ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR  ucMemoryClockHigh;
-
-      USHORT usVDDC;
-      USHORT usVDDCI;
-      UCHAR  ucPCIEGen;
-      UCHAR  ucUnused1;
-
-      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
-
-} ATOM_PPLIB_SI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
-{
-      USHORT usEngineClockLow;
-      UCHAR  ucEngineClockHigh;
-
-      USHORT usMemoryClockLow;
-      UCHAR  ucMemoryClockHigh;
-      
-      UCHAR  ucPCIEGen;
-      USHORT usPCIELane;
-} ATOM_PPLIB_CI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
-      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
-      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
-      UCHAR  vddcIndex;         //2-bit vddc index;
-      USHORT tdpLimit;
-      //please initalize to 0
-      USHORT rsv1;
-      //please initialize to 0s
-      ULONG rsv2[2];
-}ATOM_PPLIB_SUMO_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CZ_CLOCK_INFO {
-      UCHAR index;
-      UCHAR rsv[3];
-} ATOM_PPLIB_CZ_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_STATE_V2
-{
-      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
-      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
-      UCHAR ucNumDPMLevels;
-      
-      //a index to the array of nonClockInfos
-      UCHAR nonClockInfoIndex;
-      /**
-      * Driver will read the first ucNumDPMLevels in this array
-      */
-      UCHAR clockInfoIndex[1];
-} ATOM_PPLIB_STATE_V2;
-
-typedef struct _StateArray{
-    //how many states we have 
-    UCHAR ucNumEntries;
-    
-    ATOM_PPLIB_STATE_V2 states[1];
-}StateArray;
-
-
-typedef struct _ClockInfoArray{
-    //how many clock levels we have
-    UCHAR ucNumEntries;
-    
-    //sizeof(ATOM_PPLIB_CLOCK_INFO)
-    UCHAR ucEntrySize;
-    
-    UCHAR clockInfo[1];
-}ClockInfoArray;
-
-typedef struct _NonClockInfoArray{
-
-    //how many non-clock levels we have. normally should be same as number of states
-    UCHAR ucNumEntries;
-    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
-    UCHAR ucEntrySize;
-    
-    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
-}NonClockInfoArray;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
-{
-    USHORT usClockLow;
-    UCHAR  ucClockHigh;
-    USHORT usVoltage;
-}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
-{
-    UCHAR ucNumEntries;                                                // Number of entries.
-    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
-{
-    USHORT usSclkLow;
-    UCHAR  ucSclkHigh;
-    USHORT usMclkLow;
-    UCHAR  ucMclkHigh;
-    USHORT usVddc;
-    USHORT usVddci;
-}ATOM_PPLIB_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
-{
-    UCHAR ucNumEntries;                                                // Number of entries.
-    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Limit_Table;
-
-union _ATOM_PPLIB_CAC_Leakage_Record
-{
-    struct
-    {
-        USHORT usVddc;          // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
-        ULONG  ulLeakageValue;  // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
-
-    };
-    struct
-     {
-        USHORT usVddc1;
-        USHORT usVddc2;
-        USHORT usVddc3;
-     };
-};
-
-typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Table
-{
-    UCHAR ucNumEntries;                                                 // Number of entries.
-    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
-}ATOM_PPLIB_CAC_Leakage_Table;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
-{
-    USHORT usVoltage;
-    USHORT usSclkLow;
-    UCHAR  ucSclkHigh;
-    USHORT usMclkLow;
-    UCHAR  ucMclkHigh;
-}ATOM_PPLIB_PhaseSheddingLimits_Record;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
-{
-    UCHAR ucNumEntries;                                                 // Number of entries.
-    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
-}ATOM_PPLIB_PhaseSheddingLimits_Table;
-
-typedef struct _VCEClockInfo{
-    USHORT usEVClkLow;
-    UCHAR  ucEVClkHigh;
-    USHORT usECClkLow;
-    UCHAR  ucECClkHigh;
-}VCEClockInfo;
-
-typedef struct _VCEClockInfoArray{
-    UCHAR ucNumEntries;
-    VCEClockInfo entries[1];
-}VCEClockInfoArray;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
-{
-    USHORT usVoltage;
-    UCHAR  ucVCEClockInfoIndex;
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
-{
-    UCHAR numEntries;
-    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_VCE_State_Record
-{
-    UCHAR  ucVCEClockInfoIndex;
-    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_VCE_State_Record;
-
-typedef struct _ATOM_PPLIB_VCE_State_Table
-{
-    UCHAR numEntries;
-    ATOM_PPLIB_VCE_State_Record entries[1];
-}ATOM_PPLIB_VCE_State_Table;
-
-
-typedef struct _ATOM_PPLIB_VCE_Table
-{
-      UCHAR revid;
-//    VCEClockInfoArray array;
-//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
-//    ATOM_PPLIB_VCE_State_Table states;
-}ATOM_PPLIB_VCE_Table;
-
-
-typedef struct _UVDClockInfo{
-    USHORT usVClkLow;
-    UCHAR  ucVClkHigh;
-    USHORT usDClkLow;
-    UCHAR  ucDClkHigh;
-}UVDClockInfo;
-
-typedef struct _UVDClockInfoArray{
-    UCHAR ucNumEntries;
-    UVDClockInfo entries[1];
-}UVDClockInfoArray;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
-{
-    USHORT usVoltage;
-    UCHAR  ucUVDClockInfoIndex;
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
-{
-    UCHAR numEntries;
-    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_UVD_Table
-{
-      UCHAR revid;
-//    UVDClockInfoArray array;
-//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
-}ATOM_PPLIB_UVD_Table;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
-{
-      USHORT usVoltage;
-      USHORT usSAMClockLow;
-      UCHAR  ucSAMClockHigh;
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
-    UCHAR numEntries;
-    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_SAMU_Table
-{
-      UCHAR revid;
-      ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_SAMU_Table;
-
-typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
-{
-      USHORT usVoltage;
-      USHORT usACPClockLow;
-      UCHAR  ucACPClockHigh;
-}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
-    UCHAR numEntries;
-    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_ACP_Table
-{
-      UCHAR revid;
-      ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_ACP_Table;
-
-typedef struct _ATOM_PowerTune_Table{
-    USHORT usTDP;
-    USHORT usConfigurableTDP;
-    USHORT usTDC;
-    USHORT usBatteryPowerLimit;
-    USHORT usSmallPowerLimit;
-    USHORT usLowCACLeakage;
-    USHORT usHighCACLeakage;
-}ATOM_PowerTune_Table;
-
-typedef struct _ATOM_PPLIB_POWERTUNE_Table
-{
-      UCHAR revid;
-      ATOM_PowerTune_Table power_tune_table;
-}ATOM_PPLIB_POWERTUNE_Table;
-
-typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
-{
-      UCHAR revid;
-      ATOM_PowerTune_Table power_tune_table;
-      USHORT usMaximumPowerDeliveryLimit;
-      USHORT usReserve[7];
-} ATOM_PPLIB_POWERTUNE_Table_V1;
-
-#define ATOM_PPM_A_A    1
-#define ATOM_PPM_A_I    2
-typedef struct _ATOM_PPLIB_PPM_Table
-{
-      UCHAR  ucRevId;
-      UCHAR  ucPpmDesign;          //A+I or A+A
-      USHORT usCpuCoreNumber;
-      ULONG  ulPlatformTDP;
-      ULONG  ulSmallACPlatformTDP;
-      ULONG  ulPlatformTDC;
-      ULONG  ulSmallACPlatformTDC;
-      ULONG  ulApuTDP;
-      ULONG  ulDGpuTDP;  
-      ULONG  ulDGpuUlvPower;
-      ULONG  ulTjmax;
-} ATOM_PPLIB_PPM_Table;
-
-#pragma pack()
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index a988dfb..14e8723 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -146,6 +146,8 @@
 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 
 		if (adev->firmware.smu_load) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -218,6 +220,19 @@
 	WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 }
 
+static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -245,8 +260,8 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 2)
-		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+	sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
+
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
 	/* base must be 32 byte aligned */
@@ -673,6 +688,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -688,12 +704,11 @@
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -707,19 +722,16 @@
 	ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -729,12 +741,17 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -877,8 +894,19 @@
  */
 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1312,6 +1340,7 @@
 	.test_ring = sdma_v2_4_ring_test_ring,
 	.test_ib = sdma_v2_4_ring_test_ib,
 	.is_lockup = sdma_v2_4_ring_is_lockup,
+	.insert_nop = sdma_v2_4_ring_insert_nop,
 };
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@@ -1348,19 +1377,19 @@
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
 				       uint64_t src_offset,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1373,16 +1402,16 @@
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
 				       uint32_t src_data,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
@@ -1415,5 +1444,6 @@
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2b86569..9bfe92d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -53,6 +53,8 @@
 MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
 MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
+MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
 {
@@ -80,6 +82,24 @@
 	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
 };
 
+static const u32 golden_settings_fiji_a10[] =
+{
+	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
+	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
+};
+
 static const u32 cz_golden_settings_a11[] =
 {
 	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -122,6 +142,14 @@
 static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
 {
 	switch (adev->asic_type) {
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		amdgpu_program_register_sequence(adev,
+						 golden_settings_fiji_a10,
+						 (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -167,6 +195,9 @@
 	case CHIP_TONGA:
 		chip_name = "tonga";
 		break;
+	case CHIP_FIJI:
+		chip_name = "fiji";
+		break;
 	case CHIP_CARRIZO:
 		chip_name = "carrizo";
 		break;
@@ -187,6 +218,8 @@
 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
 		adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
 		adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+		if (adev->sdma[i].feature_version >= 20)
+			adev->sdma[i].burst_nop = true;
 
 		if (adev->firmware.smu_load) {
 			info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -273,6 +306,19 @@
 	}
 }
 
+static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+	int i;
+
+	for (i = 0; i < count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			amdgpu_ring_write(ring, ring->nop |
+				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
+		else
+			amdgpu_ring_write(ring, ring->nop);
+}
+
 /**
  * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
  *
@@ -299,8 +345,7 @@
 	amdgpu_ring_write(ring, next_rptr);
 
 	/* IB packet must end on a 8 DW boundary */
-	while ((ring->wptr & 7) != 2)
-		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP));
+	sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
@@ -763,6 +808,7 @@
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_ib ib;
+	struct fence *f = NULL;
 	unsigned i;
 	unsigned index;
 	int r;
@@ -778,12 +824,11 @@
 	gpu_addr = adev->wb.gpu_addr + (index * 4);
 	tmp = 0xCAFEDEAD;
 	adev->wb.wb[index] = cpu_to_le32(tmp);
-
+	memset(&ib, 0, sizeof(ib));
 	r = amdgpu_ib_get(ring, NULL, 256, &ib);
 	if (r) {
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
-		return r;
+		goto err0;
 	}
 
 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
@@ -797,19 +842,16 @@
 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
 	ib.length_dw = 8;
 
-	r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
+	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
+						 AMDGPU_FENCE_OWNER_UNDEFINED,
+						 &f);
+	if (r)
+		goto err1;
+
+	r = fence_wait(f, false);
 	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
-		DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
-		return r;
-	}
-	r = amdgpu_fence_wait(ib.fence, false);
-	if (r) {
-		amdgpu_ib_free(adev, &ib);
-		amdgpu_wb_free(adev, index);
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
-		return r;
+		goto err1;
 	}
 	for (i = 0; i < adev->usec_timeout; i++) {
 		tmp = le32_to_cpu(adev->wb.wb[index]);
@@ -819,12 +861,16 @@
 	}
 	if (i < adev->usec_timeout) {
 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-			 ib.fence->ring->idx, i);
+			 ring->idx, i);
+		goto err1;
 	} else {
 		DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
 		r = -EINVAL;
 	}
+err1:
+	fence_put(f);
 	amdgpu_ib_free(adev, &ib);
+err0:
 	amdgpu_wb_free(adev, index);
 	return r;
 }
@@ -967,8 +1013,19 @@
  */
 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
 {
-	while (ib->length_dw & 0x7)
-		ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
+	struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+	u32 pad_count;
+	int i;
+
+	pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+	for (i = 0; i < pad_count; i++)
+		if (sdma && sdma->burst_nop && (i == 0))
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
+				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
+		else
+			ib->ptr[ib->length_dw++] =
+				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
 }
 
 /**
@@ -1406,6 +1463,7 @@
 	.test_ring = sdma_v3_0_ring_test_ring,
 	.test_ib = sdma_v3_0_ring_test_ib,
 	.is_lockup = sdma_v3_0_ring_is_lockup,
+	.insert_nop = sdma_v3_0_ring_insert_nop,
 };
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@@ -1442,19 +1500,19 @@
  * Used by the amdgpu ttm implementation to move pages if
  * registered as the asic copy callback.
  */
-static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
 				       uint64_t src_offset,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
-			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR));
-	amdgpu_ring_write(ring, byte_count);
-	amdgpu_ring_write(ring, 0); /* src/dst endian swap */
-	amdgpu_ring_write(ring, lower_32_bits(src_offset));
-	amdgpu_ring_write(ring, upper_32_bits(src_offset));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
+		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+	ib->ptr[ib->length_dw++] = byte_count;
+	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
 }
 
 /**
@@ -1467,16 +1525,16 @@
  *
  * Fill GPU buffers using the DMA engine (VI).
  */
-static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring,
+static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
 				       uint32_t src_data,
 				       uint64_t dst_offset,
 				       uint32_t byte_count)
 {
-	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL));
-	amdgpu_ring_write(ring, lower_32_bits(dst_offset));
-	amdgpu_ring_write(ring, upper_32_bits(dst_offset));
-	amdgpu_ring_write(ring, src_data);
-	amdgpu_ring_write(ring, byte_count);
+	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
+	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
+	ib->ptr[ib->length_dw++] = src_data;
+	ib->ptr[ib->length_dw++] = byte_count;
 }
 
 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
@@ -1509,5 +1567,6 @@
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
 		adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+		adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
index 099b7b5..e5ebd08 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h
@@ -2236,5 +2236,10 @@
 #define SDMA_PKT_NOP_HEADER_sub_op_shift  8
 #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift)
 
+/*define for count field*/
+#define SDMA_PKT_NOP_HEADER_count_offset 0
+#define SDMA_PKT_NOP_HEADER_count_mask   0x00003FFF
+#define SDMA_PKT_NOP_HEADER_count_shift  16
+#define SDMA_PKT_NOP_HEADER_COUNT(x) (((x) & SDMA_PKT_NOP_HEADER_count_mask) << SDMA_PKT_NOP_HEADER_count_shift)
 
 #endif /* __TONGA_SDMA_PKT_OPEN_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 5fc53a4..1f5ac94 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -761,7 +761,9 @@
 
 	/* Allocate FW image data structure and header buffer */
 	ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, toc_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for TOC buffer\n");
 		return -ENOMEM;
@@ -769,7 +771,9 @@
 
 	/* Allocate buffer for SMU internal buffer */
 	ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
-				true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf);
+			       true, AMDGPU_GEM_DOMAIN_VRAM,
+			       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+			       NULL, smu_buf);
 	if (ret) {
 		DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 4efd671..5fac5da 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -534,7 +534,7 @@
 static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -555,14 +555,14 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 	return r;
 }
@@ -886,6 +886,7 @@
 	.test_ring = uvd_v4_2_ring_test_ring,
 	.test_ib = uvd_v4_2_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index b756bd9..2d5c59c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -580,7 +580,7 @@
 static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
 {
 	struct amdgpu_device *adev = ring->adev;
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
@@ -601,14 +601,14 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	amdgpu_asic_set_uvd_clocks(adev, 0, 0);
 	return r;
 }
@@ -825,6 +825,7 @@
 	.test_ring = uvd_v5_0_ring_test_ring,
 	.test_ib = uvd_v5_0_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 49aa931..d9f553f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -575,7 +575,7 @@
  */
 static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
 {
-	struct amdgpu_fence *fence = NULL;
+	struct fence *fence = NULL;
 	int r;
 
 	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
@@ -590,14 +590,14 @@
 		goto error;
 	}
 
-	r = amdgpu_fence_wait(fence, false);
+	r = fence_wait(fence, false);
 	if (r) {
 		DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 		goto error;
 	}
 	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
 error:
-	amdgpu_fence_unref(&fence);
+	fence_put(fence);
 	return r;
 }
 
@@ -805,6 +805,7 @@
 	.test_ring = uvd_v6_0_ring_test_ring,
 	.test_ib = uvd_v6_0_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 303d961d..cd16df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -643,6 +643,7 @@
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d1064ca..f0656df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -32,8 +32,8 @@
 #include "vid.h"
 #include "vce/vce_3_0_d.h"
 #include "vce/vce_3_0_sh_mask.h"
-#include "oss/oss_2_0_d.h"
-#include "oss/oss_2_0_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
@@ -205,7 +205,14 @@
 	u32 tmp;
 	unsigned ret;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	/* Fiji is single pipe */
+	if (adev->asic_type == CHIP_FIJI) {
+		ret = AMDGPU_VCE_HARVEST_VCE1;
+		return ret;
+	}
+
+	/* Tonga and CZ are dual or single pipe */
+	if (adev->flags & AMD_IS_APU)
 		tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
 		       VCE_HARVEST_FUSE_MACRO__MASK) >>
 			VCE_HARVEST_FUSE_MACRO__SHIFT;
@@ -419,17 +426,41 @@
 static bool vce_v3_0_is_idle(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
 
-	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+		else
+			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+	}
+
+	return !(RREG32(mmSRBM_STATUS2) & mask);
 }
 
 static int vce_v3_0_wait_for_idle(void *handle)
 {
 	unsigned i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
+
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_STATUS2__VCE0_BUSY_MASK;
+		else
+			mask |= SRBM_STATUS2__VCE1_BUSY_MASK;
+	}
 
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK))
+		if (!(RREG32(mmSRBM_STATUS2) & mask))
 			return 0;
 	}
 	return -ETIMEDOUT;
@@ -438,9 +469,21 @@
 static int vce_v3_0_soft_reset(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	u32 mask = 0;
+	int idx;
 
-	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK,
-			~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK);
+	for (idx = 0; idx < 2; ++idx) {
+		if (adev->vce.harvest_config & (1 << idx))
+			continue;
+
+		if (idx == 0)
+			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
+		else
+			mask |= SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
+	}
+	WREG32_P(mmSRBM_SOFT_RESET, mask,
+		 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
+		   SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
 	mdelay(5);
 
 	return vce_v3_0_start(adev);
@@ -601,6 +644,7 @@
 	.test_ring = amdgpu_vce_ring_test_ring,
 	.test_ib = amdgpu_vce_ring_test_ib,
 	.is_lockup = amdgpu_ring_test_lockup,
+	.insert_nop = amdgpu_ring_insert_nop,
 };
 
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 68552da..552d9e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -203,6 +203,17 @@
 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 };
 
+static const u32 fiji_mgcg_cgcg_init[] =
+{
+	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
+	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
+	mmPCIE_DATA, 0x000f0000, 0x00000000,
+	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
+	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
+	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
+	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
+};
+
 static const u32 iceland_mgcg_cgcg_init[] =
 {
 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
@@ -232,6 +243,11 @@
 						 iceland_mgcg_cgcg_init,
 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
 		break;
+	case CHIP_FIJI:
+		amdgpu_program_register_sequence(adev,
+						 fiji_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
+		break;
 	case CHIP_TONGA:
 		amdgpu_program_register_sequence(adev,
 						 tonga_mgcg_cgcg_init,
@@ -261,7 +277,7 @@
 	u32 reference_clock = adev->clock.spll.reference_freq;
 	u32 tmp;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return reference_clock;
 
 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
@@ -362,6 +378,26 @@
 
 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
 	{mmGRBM_STATUS, false},
+	{mmGRBM_STATUS2, false},
+	{mmGRBM_STATUS_SE0, false},
+	{mmGRBM_STATUS_SE1, false},
+	{mmGRBM_STATUS_SE2, false},
+	{mmGRBM_STATUS_SE3, false},
+	{mmSRBM_STATUS, false},
+	{mmSRBM_STATUS2, false},
+	{mmSRBM_STATUS3, false},
+	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
+	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
+	{mmCP_STAT, false},
+	{mmCP_STALLED_STAT1, false},
+	{mmCP_STALLED_STAT2, false},
+	{mmCP_STALLED_STAT3, false},
+	{mmCP_CPF_BUSY_STAT, false},
+	{mmCP_CPF_STALLED_STAT1, false},
+	{mmCP_CPF_STATUS, false},
+	{mmCP_CPC_BUSY_STAT, false},
+	{mmCP_CPC_STALLED_STAT1, false},
+	{mmCP_CPC_STATUS, false},
 	{mmGB_ADDR_CONFIG, false},
 	{mmMC_ARB_RAMCFG, false},
 	{mmGB_TILE_MODE0, false},
@@ -449,6 +485,7 @@
 		asic_register_table = tonga_allowed_read_registers;
 		size = ARRAY_SIZE(tonga_allowed_read_registers);
 		break;
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 	case CHIP_CARRIZO:
 		asic_register_table = cz_allowed_read_registers;
@@ -751,7 +788,7 @@
 		srbm_soft_reset =
 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
 
-	if (!(adev->flags & AMDGPU_IS_APU)) {
+	if (!(adev->flags & AMD_IS_APU)) {
 		if (reset_mask & AMDGPU_RESET_MC)
 		srbm_soft_reset =
 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
@@ -971,7 +1008,7 @@
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
@@ -999,7 +1036,7 @@
 	u32 tmp;
 
 	/* not necessary on CZ */
-	if (adev->flags & AMDGPU_IS_APU)
+	if (adev->flags & AMD_IS_APU)
 		return;
 
 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
@@ -1127,6 +1164,74 @@
 	},
 };
 
+static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
+{
+	/* ORDER MATTERS! */
+	{
+		.type = AMD_IP_BLOCK_TYPE_COMMON,
+		.major = 2,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vi_common_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GMC,
+		.major = 8,
+		.minor = 5,
+		.rev = 0,
+		.funcs = &gmc_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_IH,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &tonga_ih_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SMC,
+		.major = 7,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &fiji_dpm_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_DCE,
+		.major = 10,
+		.minor = 1,
+		.rev = 0,
+		.funcs = &dce_v10_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_GFX,
+		.major = 8,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &gfx_v8_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_SDMA,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &sdma_v3_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_UVD,
+		.major = 6,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &uvd_v6_0_ip_funcs,
+	},
+	{
+		.type = AMD_IP_BLOCK_TYPE_VCE,
+		.major = 3,
+		.minor = 0,
+		.rev = 0,
+		.funcs = &vce_v3_0_ip_funcs,
+	},
+};
+
 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 {
 	/* ORDER MATTERS! */
@@ -1202,6 +1307,10 @@
 		adev->ip_blocks = topaz_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
 		break;
+	case CHIP_FIJI:
+		adev->ip_blocks = fiji_ip_blocks;
+		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
+		break;
 	case CHIP_TONGA:
 		adev->ip_blocks = tonga_ip_blocks;
 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
@@ -1248,7 +1357,7 @@
 	bool smc_enabled = false;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	if (adev->flags & AMDGPU_IS_APU) {
+	if (adev->flags & AMD_IS_APU) {
 		adev->smc_rreg = &cz_smc_rreg;
 		adev->smc_wreg = &cz_smc_wreg;
 	} else {
@@ -1279,6 +1388,7 @@
 		if (amdgpu_smc_load_fw && smc_enabled)
 			adev->firmware.smu_load = true;
 		break;
+	case CHIP_FIJI:
 	case CHIP_TONGA:
 		adev->has_uvd = true;
 		adev->cg_flags = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
index 3b45332..fc120ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h
@@ -30,7 +30,7 @@
 int cz_smu_fini(struct amdgpu_device *adev);
 
 extern const struct amd_ip_funcs tonga_dpm_ip_funcs;
-
+extern const struct amd_ip_funcs fiji_dpm_ip_funcs;
 extern const struct amd_ip_funcs iceland_dpm_ip_funcs;
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 31bb894..d98aa9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -66,6 +66,11 @@
 
 #define AMDGPU_NUM_OF_VMIDS			8
 
+#define		PIPEID(x)					((x) << 0)
+#define		MEID(x)						((x) << 2)
+#define		VMID(x)						((x) << 4)
+#define		QUEUEID(x)					((x) << 8)
+
 #define RB_BITMAP_WIDTH_PER_SH     2
 
 #define MC_SEQ_MISC0__MT__MASK	0xf0000000
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index 8dfac37..e13c67c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
 
 config HSA_AMD
 	tristate "HSA kernel driver for AMD GPU devices"
-	depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
+	depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
 	help
 	  Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 2855115..7fc9b0f 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -2,7 +2,8 @@
 # Makefile for Heterogenous System Architecture support for AMD GPU devices
 #
 
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/  \
+		-Idrivers/gpu/drm/amd/include/asic_reg
 
 amdkfd-y	:= kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
 		kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 183be5b..48769d1 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -65,17 +65,6 @@
 
 #define	AQL_ENABLE					1
 
-#define	SDMA_RB_VMID(x)					(x << 24)
-#define	SDMA_RB_ENABLE					(1 << 0)
-#define	SDMA_RB_SIZE(x)					((x) << 1) /* log2 */
-#define	SDMA_RPTR_WRITEBACK_ENABLE			(1 << 12)
-#define	SDMA_RPTR_WRITEBACK_TIMER(x)			((x) << 16) /* log2 */
-#define	SDMA_OFFSET(x)					(x << 0)
-#define	SDMA_DB_ENABLE					(1 << 28)
-#define	SDMA_ATC					(1 << 0)
-#define	SDMA_VA_PTR32					(1 << 4)
-#define	SDMA_VA_SHARED_BASE(x)				(x << 8)
-
 #define GRBM_GFX_INDEX					0x30800
 
 #define	ATC_VMID_PASID_MAPPING_VALID			(1U << 31)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index c991973..c6a1b4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,7 @@
 #include <uapi/linux/kfd_ioctl.h>
 #include <linux/time.h>
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 75312c8..3f95f7c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -80,7 +80,12 @@
 	{ 0x1318, &kaveri_device_info },	/* Kaveri */
 	{ 0x131B, &kaveri_device_info },	/* Kaveri */
 	{ 0x131C, &kaveri_device_info },	/* Kaveri */
-	{ 0x131D, &kaveri_device_info }		/* Kaveri */
+	{ 0x131D, &kaveri_device_info },	/* Kaveri */
+	{ 0x9870, &carrizo_device_info },	/* Carrizo */
+	{ 0x9874, &carrizo_device_info },	/* Carrizo */
+	{ 0x9875, &carrizo_device_info },	/* Carrizo */
+	{ 0x9876, &carrizo_device_info },	/* Carrizo */
+	{ 0x9877, &carrizo_device_info }	/* Carrizo */
 };
 
 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 9ce8a20..c6f435a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -23,6 +23,7 @@
 
 #include "kfd_device_queue_manager.h"
 #include "cik_regs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
@@ -135,13 +136,16 @@
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 				struct qcm_process_device *qpd)
 {
-	uint32_t value = SDMA_ATC;
+	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
 
 	if (q->process->is_32bit_user_mode)
-		value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
+		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+				get_sh_mem_bases_32(qpd_to_pdd(qpd));
 	else
-		value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
-							qpd_to_pdd(qpd)));
+		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
 	q->properties.sdma_vm_addr = value;
 }
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 4c15212..7e9cae9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -22,6 +22,10 @@
  */
 
 #include "kfd_device_queue_manager.h"
+#include "gca/gfx_8_0_enum.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+#include "oss/oss_3_0_sh_mask.h"
 
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
@@ -37,14 +41,40 @@
 
 void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
 {
-	pr_warn("amdkfd: VI DQM is not currently supported\n");
-
 	ops->set_cache_memory_policy = set_cache_memory_policy_vi;
 	ops->register_process = register_process_vi;
 	ops->initialize = initialize_cpsch_vi;
 	ops->init_sdma_vm = init_sdma_vm;
 }
 
+static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+{
+	/* In 64-bit mode, we can only control the top 3 bits of the LDS,
+	 * scratch and GPUVM apertures.
+	 * The hardware fills in the remaining 59 bits according to the
+	 * following pattern:
+	 * LDS:		X0000000'00000000 - X0000001'00000000 (4GB)
+	 * Scratch:	X0000001'00000000 - X0000002'00000000 (4GB)
+	 * GPUVM:	Y0010000'00000000 - Y0020000'00000000 (1TB)
+	 *
+	 * (where X/Y is the configurable nybble with the low-bit 0)
+	 *
+	 * LDS and scratch will have the same top nybble programmed in the
+	 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
+	 * GPUVM can have a different top nybble programmed in the
+	 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
+	 * We don't bother to support different top nybbles
+	 * for LDS/Scratch and GPUVM.
+	 */
+
+	BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+		top_address_nybble == 0);
+
+	return top_address_nybble << 12 |
+			(top_address_nybble << 12) <<
+			SH_MEM_BASES__SHARED_BASE__SHIFT;
+}
+
 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
 				   struct qcm_process_device *qpd,
 				   enum cache_policy default_policy,
@@ -52,18 +82,83 @@
 				   void __user *alternate_aperture_base,
 				   uint64_t alternate_aperture_size)
 {
-	return false;
+	uint32_t default_mtype;
+	uint32_t ape1_mtype;
+
+	default_mtype = (default_policy == cache_policy_coherent) ?
+			MTYPE_CC :
+			MTYPE_NC;
+
+	ape1_mtype = (alternate_policy == cache_policy_coherent) ?
+			MTYPE_CC :
+			MTYPE_NC;
+
+	qpd->sh_mem_config = (qpd->sh_mem_config &
+			SH_MEM_CONFIG__ADDRESS_MODE_MASK) |
+		SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+		default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+		ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+		SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+	return true;
 }
 
 static int register_process_vi(struct device_queue_manager *dqm,
 					struct qcm_process_device *qpd)
 {
-	return -1;
+	struct kfd_process_device *pdd;
+	unsigned int temp;
+
+	BUG_ON(!dqm || !qpd);
+
+	pdd = qpd_to_pdd(qpd);
+
+	/* check if sh_mem_config register already configured */
+	if (qpd->sh_mem_config == 0) {
+		qpd->sh_mem_config =
+			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+				SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
+			MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
+			MTYPE_CC << SH_MEM_CONFIG__APE1_MTYPE__SHIFT |
+			SH_MEM_CONFIG__PRIVATE_ATC_MASK;
+
+		qpd->sh_mem_ape1_limit = 0;
+		qpd->sh_mem_ape1_base = 0;
+	}
+
+	if (qpd->pqm->process->is_32bit_user_mode) {
+		temp = get_sh_mem_bases_32(pdd);
+		qpd->sh_mem_bases = temp << SH_MEM_BASES__SHARED_BASE__SHIFT;
+		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA32 <<
+					SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+	} else {
+		temp = get_sh_mem_bases_nybble_64(pdd);
+		qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+		qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
+			SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+	}
+
+	pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+		qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+
+	return 0;
 }
 
 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
 				struct qcm_process_device *qpd)
 {
+	uint32_t value = (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT);
+
+	if (q->process->is_32bit_user_mode)
+		value |= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT) |
+				get_sh_mem_bases_32(qpd_to_pdd(qpd));
+	else
+		value |= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
+				SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
+
+	q->properties.sdma_vm_addr = value;
 }
 
 static int initialize_cpsch_vi(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 35b9875..2b65510 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -33,7 +33,7 @@
 #include <linux/time.h>
 #include "kfd_priv.h"
 #include <linux/mm.h>
-#include <uapi/asm-generic/mman-common.h>
+#include <linux/mman.h>
 #include <asm/processor.h>
 
 /*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4349794..d83de98 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -27,6 +27,7 @@
 #include "kfd_mqd_manager.h"
 #include "cik_regs.h"
 #include "cik_structs.h"
+#include "oss/oss_2_4_sh_mask.h"
 
 static inline struct cik_mqd *get_mqd(void *mqd)
 {
@@ -214,17 +215,20 @@
 	BUG_ON(!mm || !mqd || !q);
 
 	m = get_sdma_mqd(mqd);
-	m->sdma_rlc_rb_cntl =
-		SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
-		SDMA_RB_VMID(q->vmid) |
-		SDMA_RPTR_WRITEBACK_ENABLE |
-		SDMA_RPTR_WRITEBACK_TIMER(6);
+	m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
+			SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+			q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+			1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+			6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
 
 	m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
 	m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
 	m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
 	m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
-	m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
+	m->sdma_rlc_doorbell = q->doorbell_off <<
+			SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
+			1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+
 	m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
 
 	m->sdma_engine_id = q->sdma_engine_id;
@@ -234,7 +238,9 @@
 	if (q->queue_size > 0 &&
 			q->queue_address != 0 &&
 			q->queue_percent > 0) {
-		m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
+		m->sdma_rlc_rb_cntl |=
+				1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
+
 		q->is_active = true;
 	}
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index b3a7e3b..fa32c32 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -22,12 +22,255 @@
  */
 
 #include <linux/printk.h>
+#include <linux/slab.h>
 #include "kfd_priv.h"
 #include "kfd_mqd_manager.h"
+#include "vi_structs.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "gca/gfx_8_0_enum.h"
+
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+
+static inline struct vi_mqd *get_mqd(void *mqd)
+{
+	return (struct vi_mqd *)mqd;
+}
+
+static int init_mqd(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	int retval;
+	uint64_t addr;
+	struct vi_mqd *m;
+
+	retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct vi_mqd),
+			mqd_mem_obj);
+	if (retval != 0)
+		return -ENOMEM;
+
+	m = (struct vi_mqd *) (*mqd_mem_obj)->cpu_ptr;
+	addr = (*mqd_mem_obj)->gpu_addr;
+
+	memset(m, 0, sizeof(struct vi_mqd));
+
+	m->header = 0xC0310800;
+	m->compute_pipelinestat_enable = 1;
+	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+
+	m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
+			0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+
+	m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
+			MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
+
+	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+
+	m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
+			1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
+			10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
+
+	m->cp_hqd_pipe_priority = 1;
+	m->cp_hqd_queue_priority = 15;
+
+	m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL)
+		m->cp_hqd_iq_rptr = 1;
+
+	*mqd = m;
+	if (gart_addr != NULL)
+		*gart_addr = addr;
+	retval = mm->update_mqd(mm, m, q);
+
+	return retval;
+}
+
+static int load_mqd(struct mqd_manager *mm, void *mqd,
+			uint32_t pipe_id, uint32_t queue_id,
+			uint32_t __user *wptr)
+{
+	return mm->dev->kfd2kgd->hqd_load
+		(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+}
+
+static int __update_mqd(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q, unsigned int mtype,
+			unsigned int atc_bit)
+{
+	struct vi_mqd *m;
+
+	BUG_ON(!mm || !q || !mqd);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	m = get_mqd(mqd);
+
+	m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
+			atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
+			mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
+	m->cp_hqd_pq_control |=
+			ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+	pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+
+	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
+	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
+
+	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+
+	m->cp_hqd_pq_doorbell_control =
+		1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
+		q->doorbell_off <<
+			CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+	pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+			m->cp_hqd_pq_doorbell_control);
+
+	m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
+			mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
+
+	m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
+			3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
+			mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
+
+	m->cp_hqd_eop_control |=
+		ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+	m->cp_hqd_eop_base_addr_lo =
+			lower_32_bits(q->eop_ring_buffer_address >> 8);
+	m->cp_hqd_eop_base_addr_hi =
+			upper_32_bits(q->eop_ring_buffer_address >> 8);
+
+	m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
+			mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
+
+	m->cp_hqd_vmid = q->vmid;
+
+	if (q->format == KFD_QUEUE_FORMAT_AQL) {
+		m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
+				2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
+	}
+
+	m->cp_hqd_active = 0;
+	q->is_active = false;
+	if (q->queue_size > 0 &&
+			q->queue_address != 0 &&
+			q->queue_percent > 0) {
+		m->cp_hqd_active = 1;
+		q->is_active = true;
+	}
+
+	return 0;
+}
+
+
+static int update_mqd(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	return __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+}
+
+static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+			enum kfd_preempt_type type,
+			unsigned int timeout, uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_destroy
+		(mm->dev->kgd, type, timeout,
+		pipe_id, queue_id);
+}
+
+static void uninit_mqd(struct mqd_manager *mm, void *mqd,
+			struct kfd_mem_obj *mqd_mem_obj)
+{
+	BUG_ON(!mm || !mqd);
+	kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static bool is_occupied(struct mqd_manager *mm, void *mqd,
+			uint64_t queue_address,	uint32_t pipe_id,
+			uint32_t queue_id)
+{
+	return mm->dev->kfd2kgd->hqd_is_occupied(
+		mm->dev->kgd, queue_address,
+		pipe_id, queue_id);
+}
+
+static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+			struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+			struct queue_properties *q)
+{
+	struct vi_mqd *m;
+	int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(*mqd);
+
+	m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
+			1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
+
+	return retval;
+}
+
+static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+			struct queue_properties *q)
+{
+	struct vi_mqd *m;
+	int retval = __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+
+	if (retval != 0)
+		return retval;
+
+	m = get_mqd(mqd);
+	m->cp_hqd_vmid = q->vmid;
+	return retval;
+}
 
 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
-					struct kfd_dev *dev)
+		struct kfd_dev *dev)
 {
-	pr_warn("amdkfd: VI MQD is not currently supported\n");
-	return NULL;
+	struct mqd_manager *mqd;
+
+	BUG_ON(!dev);
+	BUG_ON(type >= KFD_MQD_TYPE_MAX);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+	if (!mqd)
+		return NULL;
+
+	mqd->dev = dev;
+
+	switch (type) {
+	case KFD_MQD_TYPE_CP:
+	case KFD_MQD_TYPE_COMPUTE:
+		mqd->init_mqd = init_mqd;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+		break;
+	case KFD_MQD_TYPE_HIQ:
+		mqd->init_mqd = init_mqd_hiq;
+		mqd->uninit_mqd = uninit_mqd;
+		mqd->load_mqd = load_mqd;
+		mqd->update_mqd = update_mqd_hiq;
+		mqd->destroy_mqd = destroy_mqd;
+		mqd->is_occupied = is_occupied;
+		break;
+	case KFD_MQD_TYPE_SDMA:
+		break;
+	default:
+		kfree(mqd);
+		return NULL;
+	}
+
+	return mqd;
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 99b6d28..90f3914 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -27,6 +27,7 @@
 #include "kfd_kernel_queue.h"
 #include "kfd_priv.h"
 #include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
 #include "kfd_pm4_opcodes.h"
 
 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
@@ -55,6 +56,7 @@
 				bool *over_subscription)
 {
 	unsigned int process_count, queue_count;
+	unsigned int map_queue_size;
 
 	BUG_ON(!pm || !rlib_size || !over_subscription);
 
@@ -69,9 +71,13 @@
 		pr_debug("kfd: over subscribed runlist\n");
 	}
 
+	map_queue_size =
+		(pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
+		sizeof(struct pm4_mes_map_queues) :
+		sizeof(struct pm4_map_queues);
 	/* calculate run list ib allocation size */
 	*rlib_size = process_count * sizeof(struct pm4_map_process) +
-		     queue_count * sizeof(struct pm4_map_queues);
+		     queue_count * map_queue_size;
 
 	/*
 	 * Increase the allocation size in case we need a chained run list
@@ -176,6 +182,71 @@
 	return 0;
 }
 
+static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+		struct queue *q, bool is_static)
+{
+	struct pm4_mes_map_queues *packet;
+	bool use_static = is_static;
+
+	BUG_ON(!pm || !buffer || !q);
+
+	pr_debug("kfd: In func %s\n", __func__);
+
+	packet = (struct pm4_mes_map_queues *)buffer;
+	memset(buffer, 0, sizeof(struct pm4_map_queues));
+
+	packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
+						sizeof(struct pm4_map_queues));
+	packet->bitfields2.alloc_format =
+		alloc_format__mes_map_queues__one_per_pipe_vi;
+	packet->bitfields2.num_queues = 1;
+	packet->bitfields2.queue_sel =
+		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
+
+	packet->bitfields2.engine_sel =
+		engine_sel__mes_map_queues__compute_vi;
+	packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_compute_vi;
+
+	switch (q->properties.type) {
+	case KFD_QUEUE_TYPE_COMPUTE:
+		if (use_static)
+			packet->bitfields2.queue_type =
+		queue_type__mes_map_queues__normal_latency_static_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_DIQ:
+		packet->bitfields2.queue_type =
+			queue_type__mes_map_queues__debug_interface_queue_vi;
+		break;
+	case KFD_QUEUE_TYPE_SDMA:
+		packet->bitfields2.engine_sel =
+				engine_sel__mes_map_queues__sdma0_vi;
+		use_static = false; /* no static queues under SDMA */
+		break;
+	default:
+		pr_err("kfd: in %s queue type %d\n", __func__,
+				q->properties.type);
+		BUG();
+		break;
+	}
+	packet->bitfields3.doorbell_offset =
+			q->properties.doorbell_off;
+
+	packet->mqd_addr_lo =
+			lower_32_bits(q->gart_mqd_addr);
+
+	packet->mqd_addr_hi =
+			upper_32_bits(q->gart_mqd_addr);
+
+	packet->wptr_addr_lo =
+			lower_32_bits((uint64_t)q->properties.write_ptr);
+
+	packet->wptr_addr_hi =
+			upper_32_bits((uint64_t)q->properties.write_ptr);
+
+	return 0;
+}
+
 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
 				struct queue *q, bool is_static)
 {
@@ -292,8 +363,17 @@
 			pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
 				kq->queue->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-						kq->queue, qpd->is_debug);
+			if (pm->dqm->dev->device_info->asic_family ==
+					CHIP_CARRIZO)
+				retval = pm_create_map_queue_vi(pm,
+						&rl_buffer[rl_wptr],
+						kq->queue,
+						qpd->is_debug);
+			else
+				retval = pm_create_map_queue(pm,
+						&rl_buffer[rl_wptr],
+						kq->queue,
+						qpd->is_debug);
 			if (retval != 0)
 				return retval;
 
@@ -309,8 +389,17 @@
 			pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
 				q->queue, qpd->is_debug);
 
-			retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
-						q,  qpd->is_debug);
+			if (pm->dqm->dev->device_info->asic_family ==
+					CHIP_CARRIZO)
+				retval = pm_create_map_queue_vi(pm,
+						&rl_buffer[rl_wptr],
+						q,
+						qpd->is_debug);
+			else
+				retval = pm_create_map_queue(pm,
+						&rl_buffer[rl_wptr],
+						q,
+						qpd->is_debug);
 
 			if (retval != 0)
 				return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
new file mode 100644
index 0000000..08c7219
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef F32_MES_PM4_PACKETS_H
+#define F32_MES_PM4_PACKETS_H
+
+#ifndef PM4_MES_HEADER_DEFINED
+#define PM4_MES_HEADER_DEFINED
+union PM4_MES_TYPE_3_HEADER {
+	struct {
+		uint32_t reserved1 : 8; /* < reserved */
+		uint32_t opcode    : 8; /* < IT opcode */
+		uint32_t count     : 14;/* < number of DWORDs - 1 in the
+		information body. */
+		uint32_t type      : 2; /* < packet identifier.
+					It should be 3 for type 3 packets */
+	};
+	uint32_t u32All;
+};
+#endif /* PM4_MES_HEADER_DEFINED */
+
+/*--------------------MES_SET_RESOURCES--------------------*/
+
+#ifndef PM4_MES_SET_RESOURCES_DEFINED
+#define PM4_MES_SET_RESOURCES_DEFINED
+enum mes_set_resources_queue_type_enum {
+	queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
+	queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
+	queue_type__mes_set_resources__hsa_debug_interface_queue = 4
+};
+
+
+struct pm4_mes_set_resources {
+	union {
+		union PM4_MES_TYPE_3_HEADER	header;		/* header */
+		uint32_t			ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t vmid_mask:16;
+			uint32_t unmap_latency:8;
+			uint32_t reserved1:5;
+			enum mes_set_resources_queue_type_enum queue_type:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	uint32_t queue_mask_lo;
+	uint32_t queue_mask_hi;
+	uint32_t gws_mask_lo;
+	uint32_t gws_mask_hi;
+
+	union {
+		struct {
+			uint32_t oac_mask:16;
+			uint32_t reserved2:16;
+		} bitfields7;
+		uint32_t ordinal7;
+	};
+
+	union {
+		struct {
+		uint32_t gds_heap_base:6;
+		uint32_t reserved3:5;
+		uint32_t gds_heap_size:6;
+		uint32_t reserved4:15;
+		} bitfields8;
+		uint32_t ordinal8;
+	};
+
+};
+#endif
+
+/*--------------------MES_RUN_LIST--------------------*/
+
+#ifndef PM4_MES_RUN_LIST_DEFINED
+#define PM4_MES_RUN_LIST_DEFINED
+
+struct pm4_mes_runlist {
+	union {
+	    union PM4_MES_TYPE_3_HEADER   header;            /* header */
+	    uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:2;
+			uint32_t ib_base_lo:30;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t ib_base_hi:16;
+			uint32_t reserved2:16;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	union {
+		struct {
+			uint32_t ib_size:20;
+			uint32_t chain:1;
+			uint32_t offload_polling:1;
+			uint32_t reserved3:1;
+			uint32_t valid:1;
+			uint32_t reserved4:8;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_PROCESS--------------------*/
+
+#ifndef PM4_MES_MAP_PROCESS_DEFINED
+#define PM4_MES_MAP_PROCESS_DEFINED
+
+struct pm4_mes_map_process {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:8;
+			uint32_t diq_enable:1;
+			uint32_t process_quantum:7;
+		} bitfields2;
+		uint32_t ordinal2;
+};
+
+	union {
+		struct {
+			uint32_t page_table_base:28;
+			uint32_t reserved2:4;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t sh_mem_bases;
+	uint32_t sh_mem_ape1_base;
+	uint32_t sh_mem_ape1_limit;
+	uint32_t sh_mem_config;
+	uint32_t gds_addr_lo;
+	uint32_t gds_addr_hi;
+
+	union {
+		struct {
+			uint32_t num_gws:6;
+			uint32_t reserved3:2;
+			uint32_t num_oac:4;
+			uint32_t reserved4:4;
+			uint32_t gds_size:6;
+			uint32_t num_queues:10;
+		} bitfields10;
+		uint32_t ordinal10;
+	};
+
+};
+#endif
+
+/*--------------------MES_MAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED
+#define PM4_MES_MAP_QUEUES_VI_DEFINED
+enum mes_map_queues_queue_sel_vi_enum {
+	queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0,
+queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1
+};
+
+enum mes_map_queues_queue_type_vi_enum {
+	queue_type__mes_map_queues__normal_compute_vi = 0,
+	queue_type__mes_map_queues__debug_interface_queue_vi = 1,
+	queue_type__mes_map_queues__normal_latency_static_queue_vi = 2,
+queue_type__mes_map_queues__low_latency_static_queue_vi = 3
+};
+
+enum mes_map_queues_alloc_format_vi_enum {
+	alloc_format__mes_map_queues__one_per_pipe_vi = 0,
+alloc_format__mes_map_queues__all_on_one_pipe_vi = 1
+};
+
+enum mes_map_queues_engine_sel_vi_enum {
+	engine_sel__mes_map_queues__compute_vi = 0,
+	engine_sel__mes_map_queues__sdma0_vi = 2,
+	engine_sel__mes_map_queues__sdma1_vi = 3
+};
+
+
+struct pm4_mes_map_queues {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t reserved1:4;
+			enum mes_map_queues_queue_sel_vi_enum queue_sel:2;
+			uint32_t reserved2:15;
+			enum mes_map_queues_queue_type_vi_enum queue_type:3;
+			enum mes_map_queues_alloc_format_vi_enum alloc_format:2;
+			enum mes_map_queues_engine_sel_vi_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t reserved3:1;
+			uint32_t check_disable:1;
+			uint32_t doorbell_offset:21;
+			uint32_t reserved4:3;
+			uint32_t queue:6;
+		} bitfields3;
+		uint32_t ordinal3;
+	};
+
+	uint32_t mqd_addr_lo;
+	uint32_t mqd_addr_hi;
+	uint32_t wptr_addr_lo;
+	uint32_t wptr_addr_hi;
+};
+#endif
+
+/*--------------------MES_QUERY_STATUS--------------------*/
+
+#ifndef PM4_MES_QUERY_STATUS_DEFINED
+#define PM4_MES_QUERY_STATUS_DEFINED
+enum mes_query_status_interrupt_sel_enum {
+	interrupt_sel__mes_query_status__completion_status = 0,
+	interrupt_sel__mes_query_status__process_status = 1,
+	interrupt_sel__mes_query_status__queue_status = 2
+};
+
+enum mes_query_status_command_enum {
+	command__mes_query_status__interrupt_only = 0,
+	command__mes_query_status__fence_only_immediate = 1,
+	command__mes_query_status__fence_only_after_write_ack = 2,
+	command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
+};
+
+enum mes_query_status_engine_sel_enum {
+	engine_sel__mes_query_status__compute = 0,
+	engine_sel__mes_query_status__sdma0_queue = 2,
+	engine_sel__mes_query_status__sdma1_queue = 3
+};
+
+struct pm4_mes_query_status {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			uint32_t context_id:28;
+			enum mes_query_status_interrupt_sel_enum
+				interrupt_sel:2;
+			enum mes_query_status_command_enum command:2;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved1:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved2:2;
+			uint32_t doorbell_offset:21;
+			uint32_t reserved3:2;
+			enum mes_query_status_engine_sel_enum engine_sel:3;
+			uint32_t reserved4:4;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	uint32_t addr_lo;
+	uint32_t addr_hi;
+	uint32_t data_lo;
+	uint32_t data_hi;
+};
+#endif
+
+/*--------------------MES_UNMAP_QUEUES--------------------*/
+
+#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
+#define PM4_MES_UNMAP_QUEUES_DEFINED
+enum mes_unmap_queues_action_enum {
+	action__mes_unmap_queues__preempt_queues = 0,
+	action__mes_unmap_queues__reset_queues = 1,
+	action__mes_unmap_queues__disable_process_queues = 2,
+	action__mes_unmap_queues__reserved = 3
+};
+
+enum mes_unmap_queues_queue_sel_enum {
+	queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
+	queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
+	queue_sel__mes_unmap_queues__unmap_all_queues = 2,
+	queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3
+};
+
+enum mes_unmap_queues_engine_sel_enum {
+	engine_sel__mes_unmap_queues__compute = 0,
+	engine_sel__mes_unmap_queues__sdma0 = 2,
+	engine_sel__mes_unmap_queues__sdmal = 3
+};
+
+struct PM4_MES_UNMAP_QUEUES {
+	union {
+		union PM4_MES_TYPE_3_HEADER   header;            /* header */
+		uint32_t            ordinal1;
+	};
+
+	union {
+		struct {
+			enum mes_unmap_queues_action_enum action:2;
+			uint32_t reserved1:2;
+			enum mes_unmap_queues_queue_sel_enum queue_sel:2;
+			uint32_t reserved2:20;
+			enum mes_unmap_queues_engine_sel_enum engine_sel:3;
+			uint32_t num_queues:3;
+		} bitfields2;
+		uint32_t ordinal2;
+	};
+
+	union {
+		struct {
+			uint32_t pasid:16;
+			uint32_t reserved3:16;
+		} bitfields3a;
+		struct {
+			uint32_t reserved4:2;
+			uint32_t doorbell_offset0:21;
+			uint32_t reserved5:9;
+		} bitfields3b;
+		uint32_t ordinal3;
+	};
+
+	union {
+	struct {
+			uint32_t reserved6:2;
+			uint32_t doorbell_offset1:21;
+			uint32_t reserved7:9;
+		} bitfields4;
+		uint32_t ordinal4;
+	};
+
+	union {
+		struct {
+			uint32_t reserved8:2;
+			uint32_t doorbell_offset2:21;
+			uint32_t reserved9:9;
+		} bitfields5;
+		uint32_t ordinal5;
+	};
+
+	union {
+		struct {
+			uint32_t reserved10:2;
+			uint32_t doorbell_offset3:21;
+			uint32_t reserved11:9;
+		} bitfields6;
+		uint32_t ordinal6;
+	};
+};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c25728b..74909e7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1186,6 +1186,11 @@
 	 * TODO: Retrieve max engine clock values from KGD
 	 */
 
+	if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
+		dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
+		pr_info("amdkfd: adding doorbell packet type capability\n");
+	}
+
 	res = 0;
 
 err:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 989624b..c3ddb9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -40,6 +40,7 @@
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK	0x00000f00
 #define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT	8
 #define HSA_CAP_RESERVED			0xfffff000
+#define HSA_CAP_DOORBELL_PACKET_TYPE		0x00001000
 
 struct kfd_node_properties {
 	uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 5bdf1b4..68a8eaa 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,6 +23,45 @@
 #ifndef __AMD_SHARED_H__
 #define __AMD_SHARED_H__
 
+#define AMD_MAX_USEC_TIMEOUT		100000  /* 100 ms */
+
+/*
+* Supported GPU families (aligned with amdgpu_drm.h)
+*/
+#define AMD_FAMILY_UNKNOWN              0
+#define AMD_FAMILY_CI                   120 /* Bonaire, Hawaii */
+#define AMD_FAMILY_KV                   125 /* Kaveri, Kabini, Mullins */
+#define AMD_FAMILY_VI                   130 /* Iceland, Tonga */
+#define AMD_FAMILY_CZ                   135 /* Carrizo */
+
+/*
+ * Supported ASIC types
+ */
+enum amd_asic_type {
+	CHIP_BONAIRE = 0,
+	CHIP_KAVERI,
+	CHIP_KABINI,
+	CHIP_HAWAII,
+	CHIP_MULLINS,
+	CHIP_TOPAZ,
+	CHIP_TONGA,
+	CHIP_FIJI,
+	CHIP_CARRIZO,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum amd_chip_flags {
+	AMD_ASIC_MASK = 0x0000ffffUL,
+	AMD_FLAGS_MASK  = 0xffff0000UL,
+	AMD_IS_MOBILITY = 0x00010000UL,
+	AMD_IS_APU      = 0x00020000UL,
+	AMD_IS_PX       = 0x00040000UL,
+	AMD_EXP_HW_SUPPORT = 0x00080000UL,
+};
+
 enum amd_ip_block_type {
 	AMD_IP_BLOCK_TYPE_COMMON,
 	AMD_IP_BLOCK_TYPE_GMC,
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
new file mode 100644
index 0000000..44b1855
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -0,0 +1,1246 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_D_H
+#define SMU_7_1_3_D_H
+
+#define mmGCK_SMC_IND_INDEX                                                     0x80
+#define mmGCK0_GCK_SMC_IND_INDEX                                                0x80
+#define mmGCK1_GCK_SMC_IND_INDEX                                                0x82
+#define mmGCK2_GCK_SMC_IND_INDEX                                                0x84
+#define mmGCK3_GCK_SMC_IND_INDEX                                                0x86
+#define mmGCK_SMC_IND_DATA                                                      0x81
+#define mmGCK0_GCK_SMC_IND_DATA                                                 0x81
+#define mmGCK1_GCK_SMC_IND_DATA                                                 0x83
+#define mmGCK2_GCK_SMC_IND_DATA                                                 0x85
+#define mmGCK3_GCK_SMC_IND_DATA                                                 0x87
+#define ixGCK_MCLK_FUSES                                                        0xc0500008
+#define ixCG_DCLK_CNTL                                                          0xc050009c
+#define ixCG_DCLK_STATUS                                                        0xc05000a0
+#define ixCG_VCLK_CNTL                                                          0xc05000a4
+#define ixCG_VCLK_STATUS                                                        0xc05000a8
+#define ixCG_ECLK_CNTL                                                          0xc05000ac
+#define ixCG_ECLK_STATUS                                                        0xc05000b0
+#define ixCG_ACLK_CNTL                                                          0xc05000dc
+#define ixCG_MCLK_CNTL                                                          0xc0500120
+#define ixCG_MCLK_STATUS                                                        0xc0500124
+#define ixGCK_DFS_BYPASS_CNTL                                                   0xc0500118
+#define ixCG_SPLL_FUNC_CNTL                                                     0xc0500140
+#define ixCG_SPLL_FUNC_CNTL_2                                                   0xc0500144
+#define ixCG_SPLL_FUNC_CNTL_3                                                   0xc0500148
+#define ixCG_SPLL_FUNC_CNTL_4                                                   0xc050014c
+#define ixCG_SPLL_FUNC_CNTL_5                                                   0xc0500150
+#define ixCG_SPLL_FUNC_CNTL_6                                                   0xc0500154
+#define ixCG_SPLL_FUNC_CNTL_7                                                   0xc0500158
+#define ixSPLL_CNTL_MODE                                                        0xc0500160
+#define ixCG_SPLL_SPREAD_SPECTRUM                                               0xc0500164
+#define ixCG_SPLL_SPREAD_SPECTRUM_2                                             0xc0500168
+#define ixMPLL_BYPASSCLK_SEL                                                    0xc050019c
+#define ixCG_CLKPIN_CNTL                                                        0xc05001a0
+#define ixCG_CLKPIN_CNTL_2                                                      0xc05001a4
+#define ixCG_CLKPIN_CNTL_DC                                                     0xc0500204
+#define ixTHM_CLK_CNTL                                                          0xc05001a8
+#define ixMISC_CLK_CTRL                                                         0xc05001ac
+#define ixGCK_PLL_TEST_CNTL                                                     0xc05001c0
+#define ixGCK_PLL_TEST_CNTL_2                                                   0xc05001c4
+#define ixGCK_ADFS_CLK_BYPASS_CNTL1                                             0xc05001c8
+#define mmSMC_IND_INDEX                                                         0x80
+#define mmSMC0_SMC_IND_INDEX                                                    0x80
+#define mmSMC1_SMC_IND_INDEX                                                    0x82
+#define mmSMC2_SMC_IND_INDEX                                                    0x84
+#define mmSMC3_SMC_IND_INDEX                                                    0x86
+#define mmSMC_IND_DATA                                                          0x81
+#define mmSMC0_SMC_IND_DATA                                                     0x81
+#define mmSMC1_SMC_IND_DATA                                                     0x83
+#define mmSMC2_SMC_IND_DATA                                                     0x85
+#define mmSMC3_SMC_IND_DATA                                                     0x87
+#define mmSMC_IND_INDEX_0                                                       0x80
+#define mmSMC_IND_DATA_0                                                        0x81
+#define mmSMC_IND_INDEX_1                                                       0x82
+#define mmSMC_IND_DATA_1                                                        0x83
+#define mmSMC_IND_INDEX_2                                                       0x84
+#define mmSMC_IND_DATA_2                                                        0x85
+#define mmSMC_IND_INDEX_3                                                       0x86
+#define mmSMC_IND_DATA_3                                                        0x87
+#define mmSMC_IND_INDEX_4                                                       0x88
+#define mmSMC_IND_DATA_4                                                        0x89
+#define mmSMC_IND_INDEX_5                                                       0x8a
+#define mmSMC_IND_DATA_5                                                        0x8b
+#define mmSMC_IND_INDEX_6                                                       0x8c
+#define mmSMC_IND_DATA_6                                                        0x8d
+#define mmSMC_IND_INDEX_7                                                       0x8e
+#define mmSMC_IND_DATA_7                                                        0x8f
+#define mmSMC_IND_ACCESS_CNTL                                                   0x92
+#define mmSMC_MESSAGE_0                                                         0x94
+#define mmSMC_RESP_0                                                            0x95
+#define mmSMC_MESSAGE_1                                                         0x96
+#define mmSMC_RESP_1                                                            0x97
+#define mmSMC_MESSAGE_2                                                         0x98
+#define mmSMC_RESP_2                                                            0x99
+#define mmSMC_MESSAGE_3                                                         0x9a
+#define mmSMC_RESP_3                                                            0x9b
+#define mmSMC_MESSAGE_4                                                         0x9c
+#define mmSMC_RESP_4                                                            0x9d
+#define mmSMC_MESSAGE_5                                                         0x9e
+#define mmSMC_RESP_5                                                            0x9f
+#define mmSMC_MESSAGE_6                                                         0xa0
+#define mmSMC_RESP_6                                                            0xa1
+#define mmSMC_MESSAGE_7                                                         0xa2
+#define mmSMC_RESP_7                                                            0xa3
+#define mmSMC_MSG_ARG_0                                                         0xa4
+#define mmSMC_MSG_ARG_1                                                         0xa5
+#define mmSMC_MSG_ARG_2                                                         0xa6
+#define mmSMC_MSG_ARG_3                                                         0xa7
+#define mmSMC_MSG_ARG_4                                                         0xa8
+#define mmSMC_MSG_ARG_5                                                         0xa9
+#define mmSMC_MSG_ARG_6                                                         0xaa
+#define mmSMC_MSG_ARG_7                                                         0xab
+#define mmSMC_MESSAGE_8                                                         0xb5
+#define mmSMC_RESP_8                                                            0xb6
+#define mmSMC_MESSAGE_9                                                         0xb7
+#define mmSMC_RESP_9                                                            0xb8
+#define mmSMC_MESSAGE_10                                                        0xb9
+#define mmSMC_RESP_10                                                           0xba
+#define mmSMC_MESSAGE_11                                                        0xbb
+#define mmSMC_RESP_11                                                           0xbc
+#define mmSMC_MSG_ARG_8                                                         0xbd
+#define mmSMC_MSG_ARG_9                                                         0xbe
+#define mmSMC_MSG_ARG_10                                                        0xbf
+#define mmSMC_MSG_ARG_11                                                        0x93
+#define ixSMC_SYSCON_RESET_CNTL                                            0x80000000
+#define ixSMC_SYSCON_CLOCK_CNTL_0                                          0x80000004
+#define ixSMC_SYSCON_CLOCK_CNTL_1                                          0x80000008
+#define ixSMC_SYSCON_CLOCK_CNTL_2                                          0x8000000c
+#define ixSMC_SYSCON_MISC_CNTL                                                  0x80000010
+#define ixSMC_SYSCON_MSG_ARG_0                                                  0x80000068
+#define ixSMC_PC_C                                                              0x80000370
+#define ixSMC_SCRATCH9                                                          0x80000424
+#define mmGPIOPAD_SW_INT_STAT                                                   0x180
+#define mmGPIOPAD_STRENGTH                                                      0x181
+#define mmGPIOPAD_MASK                                                          0x182
+#define mmGPIOPAD_A                                                             0x183
+#define mmGPIOPAD_EN                                                            0x184
+#define mmGPIOPAD_Y                                                             0x185
+#define mmGPIOPAD_PINSTRAPS                                                     0x186
+#define mmGPIOPAD_INT_STAT_EN                                                   0x187
+#define mmGPIOPAD_INT_STAT                                                      0x188
+#define mmGPIOPAD_INT_STAT_AK                                                   0x189
+#define mmGPIOPAD_INT_EN                                                        0x18a
+#define mmGPIOPAD_INT_TYPE                                                      0x18b
+#define mmGPIOPAD_INT_POLARITY                                                  0x18c
+#define mmGPIOPAD_EXTERN_TRIG_CNTL                                              0x18d
+#define mmGPIOPAD_RCVR_SEL                                                      0x191
+#define mmGPIOPAD_PU_EN                                                         0x192
+#define mmGPIOPAD_PD_EN                                                         0x193
+#define mmCG_FPS_CNT                                                            0x1b6
+#define mmSMU_IND_INDEX_0                                                       0x1a6
+#define mmSMU_IND_DATA_0                                                        0x1a7
+#define mmSMU_IND_INDEX_1                                                       0x1a8
+#define mmSMU_IND_DATA_1                                                        0x1a9
+#define mmSMU_IND_INDEX_2                                                       0x1aa
+#define mmSMU_IND_DATA_2                                                        0x1ab
+#define mmSMU_IND_INDEX_3                                                       0x1ac
+#define mmSMU_IND_DATA_3                                                        0x1ad
+#define mmSMU_IND_INDEX_4                                                       0x1ae
+#define mmSMU_IND_DATA_4                                                        0x1af
+#define mmSMU_IND_INDEX_5                                                       0x1b0
+#define mmSMU_IND_DATA_5                                                        0x1b1
+#define mmSMU_IND_INDEX_6                                                       0x1b2
+#define mmSMU_IND_DATA_6                                                        0x1b3
+#define mmSMU_IND_INDEX_7                                                       0x1b4
+#define mmSMU_IND_DATA_7                                                        0x1b5
+#define mmSMU_SMC_IND_INDEX                                                     0x80
+#define mmSMU0_SMU_SMC_IND_INDEX                                                0x80
+#define mmSMU1_SMU_SMC_IND_INDEX                                                0x82
+#define mmSMU2_SMU_SMC_IND_INDEX                                                0x84
+#define mmSMU3_SMU_SMC_IND_INDEX                                                0x86
+#define mmSMU_SMC_IND_DATA                                                      0x81
+#define mmSMU0_SMU_SMC_IND_DATA                                                 0x81
+#define mmSMU1_SMU_SMC_IND_DATA                                                 0x83
+#define mmSMU2_SMU_SMC_IND_DATA                                                 0x85
+#define mmSMU3_SMU_SMC_IND_DATA                                                 0x87
+#define ixRCU_UC_EVENTS                                                         0xc0000004
+#define ixRCU_MISC_CTRL                                                         0xc0000010
+#define ixRCU_VIRT_RESET_REQ                                                    0xc0000024
+#define ixCC_RCU_FUSES                                                          0xc00c0000
+#define ixCC_SMU_MISC_FUSES                                                     0xc00c0004
+#define ixCC_SCLK_VID_FUSES                                                     0xc00c0008
+#define ixCC_GIO_IOCCFG_FUSES                                                   0xc00c000c
+#define ixCC_GIO_IOC_FUSES                                                      0xc00c0010
+#define ixCC_SMU_TST_EFUSE1_MISC                                                0xc00c001c
+#define ixCC_TST_ID_STRAPS                                                      0xc00c0020
+#define ixCC_FCTRL_FUSES                                                        0xc00c0024
+#define ixCC_HARVEST_FUSES                                                      0xc00c0028
+#define ixSMU_MAIN_PLL_OP_FREQ                                                  0xe0003020
+#define ixSMU_STATUS                                                       0xe0003088
+#define ixSMU_FIRMWARE                                                     0xe00030a4
+#define ixSMU_INPUT_DATA                                                   0xe00030b8
+#define ixSMU_EFUSE_0                                                           0xc0100000
+#define ixFIRMWARE_FLAGS                                                        0x3f000
+#define ixTDC_STATUS                                                            0x3f004
+#define ixTDC_MV_AVERAGE                                                        0x3f008
+#define ixTDC_VRM_LIMIT                                                         0x3f00c
+#define ixFEATURE_STATUS                                                        0x3f010
+#define ixENTITY_TEMPERATURES_1                                                 0x3f014
+#define ixMCARB_DRAM_TIMING_TABLE_1                                             0x3f018
+#define ixMCARB_DRAM_TIMING_TABLE_2                                             0x3f01c
+#define ixMCARB_DRAM_TIMING_TABLE_3                                             0x3f020
+#define ixMCARB_DRAM_TIMING_TABLE_4                                             0x3f024
+#define ixMCARB_DRAM_TIMING_TABLE_5                                             0x3f028
+#define ixMCARB_DRAM_TIMING_TABLE_6                                             0x3f02c
+#define ixMCARB_DRAM_TIMING_TABLE_7                                             0x3f030
+#define ixMCARB_DRAM_TIMING_TABLE_8                                             0x3f034
+#define ixMCARB_DRAM_TIMING_TABLE_9                                             0x3f038
+#define ixMCARB_DRAM_TIMING_TABLE_10                                            0x3f03c
+#define ixMCARB_DRAM_TIMING_TABLE_11                                            0x3f040
+#define ixMCARB_DRAM_TIMING_TABLE_12                                            0x3f044
+#define ixMCARB_DRAM_TIMING_TABLE_13                                            0x3f048
+#define ixMCARB_DRAM_TIMING_TABLE_14                                            0x3f04c
+#define ixMCARB_DRAM_TIMING_TABLE_15                                            0x3f050
+#define ixMCARB_DRAM_TIMING_TABLE_16                                            0x3f054
+#define ixMCARB_DRAM_TIMING_TABLE_17                                            0x3f058
+#define ixMCARB_DRAM_TIMING_TABLE_18                                            0x3f05c
+#define ixMCARB_DRAM_TIMING_TABLE_19                                            0x3f060
+#define ixMCARB_DRAM_TIMING_TABLE_20                                            0x3f064
+#define ixMCARB_DRAM_TIMING_TABLE_21                                            0x3f068
+#define ixMCARB_DRAM_TIMING_TABLE_22                                            0x3f06c
+#define ixMCARB_DRAM_TIMING_TABLE_23                                            0x3f070
+#define ixMCARB_DRAM_TIMING_TABLE_24                                            0x3f074
+#define ixMCARB_DRAM_TIMING_TABLE_25                                            0x3f078
+#define ixMCARB_DRAM_TIMING_TABLE_26                                            0x3f07c
+#define ixMCARB_DRAM_TIMING_TABLE_27                                            0x3f080
+#define ixMCARB_DRAM_TIMING_TABLE_28                                            0x3f084
+#define ixMCARB_DRAM_TIMING_TABLE_29                                            0x3f088
+#define ixMCARB_DRAM_TIMING_TABLE_30                                            0x3f08c
+#define ixMCARB_DRAM_TIMING_TABLE_31                                            0x3f090
+#define ixMCARB_DRAM_TIMING_TABLE_32                                            0x3f094
+#define ixMCARB_DRAM_TIMING_TABLE_33                                            0x3f098
+#define ixMCARB_DRAM_TIMING_TABLE_34                                            0x3f09c
+#define ixMCARB_DRAM_TIMING_TABLE_35                                            0x3f0a0
+#define ixMCARB_DRAM_TIMING_TABLE_36                                            0x3f0a4
+#define ixMCARB_DRAM_TIMING_TABLE_37                                            0x3f0a8
+#define ixMCARB_DRAM_TIMING_TABLE_38                                            0x3f0ac
+#define ixMCARB_DRAM_TIMING_TABLE_39                                            0x3f0b0
+#define ixMCARB_DRAM_TIMING_TABLE_40                                            0x3f0b4
+#define ixMCARB_DRAM_TIMING_TABLE_41                                            0x3f0b8
+#define ixMCARB_DRAM_TIMING_TABLE_42                                            0x3f0bc
+#define ixMCARB_DRAM_TIMING_TABLE_43                                            0x3f0c0
+#define ixMCARB_DRAM_TIMING_TABLE_44                                            0x3f0c4
+#define ixMCARB_DRAM_TIMING_TABLE_45                                            0x3f0c8
+#define ixMCARB_DRAM_TIMING_TABLE_46                                            0x3f0cc
+#define ixMCARB_DRAM_TIMING_TABLE_47                                            0x3f0d0
+#define ixMCARB_DRAM_TIMING_TABLE_48                                            0x3f0d4
+#define ixMCARB_DRAM_TIMING_TABLE_49                                            0x3f0d8
+#define ixMCARB_DRAM_TIMING_TABLE_50                                            0x3f0dc
+#define ixMCARB_DRAM_TIMING_TABLE_51                                            0x3f0e0
+#define ixMCARB_DRAM_TIMING_TABLE_52                                            0x3f0e4
+#define ixMCARB_DRAM_TIMING_TABLE_53                                            0x3f0e8
+#define ixMCARB_DRAM_TIMING_TABLE_54                                            0x3f0ec
+#define ixMCARB_DRAM_TIMING_TABLE_55                                            0x3f0f0
+#define ixMCARB_DRAM_TIMING_TABLE_56                                            0x3f0f4
+#define ixMCARB_DRAM_TIMING_TABLE_57                                            0x3f0f8
+#define ixMCARB_DRAM_TIMING_TABLE_58                                            0x3f0fc
+#define ixMCARB_DRAM_TIMING_TABLE_59                                            0x3f100
+#define ixMCARB_DRAM_TIMING_TABLE_60                                            0x3f104
+#define ixMCARB_DRAM_TIMING_TABLE_61                                            0x3f108
+#define ixMCARB_DRAM_TIMING_TABLE_62                                            0x3f10c
+#define ixMCARB_DRAM_TIMING_TABLE_63                                            0x3f110
+#define ixMCARB_DRAM_TIMING_TABLE_64                                            0x3f114
+#define ixMCARB_DRAM_TIMING_TABLE_65                                            0x3f118
+#define ixMCARB_DRAM_TIMING_TABLE_66                                            0x3f11c
+#define ixMCARB_DRAM_TIMING_TABLE_67                                            0x3f120
+#define ixMCARB_DRAM_TIMING_TABLE_68                                            0x3f124
+#define ixMCARB_DRAM_TIMING_TABLE_69                                            0x3f128
+#define ixMCARB_DRAM_TIMING_TABLE_70                                            0x3f12c
+#define ixMCARB_DRAM_TIMING_TABLE_71                                            0x3f130
+#define ixMCARB_DRAM_TIMING_TABLE_72                                            0x3f134
+#define ixMCARB_DRAM_TIMING_TABLE_73                                            0x3f138
+#define ixMCARB_DRAM_TIMING_TABLE_74                                            0x3f13c
+#define ixMCARB_DRAM_TIMING_TABLE_75                                            0x3f140
+#define ixMCARB_DRAM_TIMING_TABLE_76                                            0x3f144
+#define ixMCARB_DRAM_TIMING_TABLE_77                                            0x3f148
+#define ixMCARB_DRAM_TIMING_TABLE_78                                            0x3f14c
+#define ixMCARB_DRAM_TIMING_TABLE_79                                            0x3f150
+#define ixMCARB_DRAM_TIMING_TABLE_80                                            0x3f154
+#define ixMCARB_DRAM_TIMING_TABLE_81                                            0x3f158
+#define ixMCARB_DRAM_TIMING_TABLE_82                                            0x3f15c
+#define ixMCARB_DRAM_TIMING_TABLE_83                                            0x3f160
+#define ixMCARB_DRAM_TIMING_TABLE_84                                            0x3f164
+#define ixMCARB_DRAM_TIMING_TABLE_85                                            0x3f168
+#define ixMCARB_DRAM_TIMING_TABLE_86                                            0x3f16c
+#define ixMCARB_DRAM_TIMING_TABLE_87                                            0x3f170
+#define ixMCARB_DRAM_TIMING_TABLE_88                                            0x3f174
+#define ixMCARB_DRAM_TIMING_TABLE_89                                            0x3f178
+#define ixMCARB_DRAM_TIMING_TABLE_90                                            0x3f17c
+#define ixMCARB_DRAM_TIMING_TABLE_91                                            0x3f180
+#define ixMCARB_DRAM_TIMING_TABLE_92                                            0x3f184
+#define ixMCARB_DRAM_TIMING_TABLE_93                                            0x3f188
+#define ixMCARB_DRAM_TIMING_TABLE_94                                            0x3f18c
+#define ixMCARB_DRAM_TIMING_TABLE_95                                            0x3f190
+#define ixMCARB_DRAM_TIMING_TABLE_96                                            0x3f194
+#define ixDPM_TABLE_1                                                           0x3f198
+#define ixDPM_TABLE_2                                                           0x3f19c
+#define ixDPM_TABLE_3                                                           0x3f1a0
+#define ixDPM_TABLE_4                                                           0x3f1a4
+#define ixDPM_TABLE_5                                                           0x3f1a8
+#define ixDPM_TABLE_6                                                           0x3f1ac
+#define ixDPM_TABLE_7                                                           0x3f1b0
+#define ixDPM_TABLE_8                                                           0x3f1b4
+#define ixDPM_TABLE_9                                                           0x3f1b8
+#define ixDPM_TABLE_10                                                          0x3f1bc
+#define ixDPM_TABLE_11                                                          0x3f1c0
+#define ixDPM_TABLE_12                                                          0x3f1c4
+#define ixDPM_TABLE_13                                                          0x3f1c8
+#define ixDPM_TABLE_14                                                          0x3f1cc
+#define ixDPM_TABLE_15                                                          0x3f1d0
+#define ixDPM_TABLE_16                                                          0x3f1d4
+#define ixDPM_TABLE_17                                                          0x3f1d8
+#define ixDPM_TABLE_18                                                          0x3f1dc
+#define ixDPM_TABLE_19                                                          0x3f1e0
+#define ixDPM_TABLE_20                                                          0x3f1e4
+#define ixDPM_TABLE_21                                                          0x3f1e8
+#define ixDPM_TABLE_22                                                          0x3f1ec
+#define ixDPM_TABLE_23                                                          0x3f1f0
+#define ixDPM_TABLE_24                                                          0x3f1f4
+#define ixDPM_TABLE_25                                                          0x3f1f8
+#define ixDPM_TABLE_26                                                          0x3f1fc
+#define ixDPM_TABLE_27                                                          0x3f200
+#define ixDPM_TABLE_28                                                          0x3f204
+#define ixDPM_TABLE_29                                                          0x3f208
+#define ixDPM_TABLE_30                                                          0x3f20c
+#define ixDPM_TABLE_31                                                          0x3f210
+#define ixDPM_TABLE_32                                                          0x3f214
+#define ixDPM_TABLE_33                                                          0x3f218
+#define ixDPM_TABLE_34                                                          0x3f21c
+#define ixDPM_TABLE_35                                                          0x3f220
+#define ixDPM_TABLE_36                                                          0x3f224
+#define ixDPM_TABLE_37                                                          0x3f228
+#define ixDPM_TABLE_38                                                          0x3f22c
+#define ixDPM_TABLE_39                                                          0x3f230
+#define ixDPM_TABLE_40                                                          0x3f234
+#define ixDPM_TABLE_41                                                          0x3f238
+#define ixDPM_TABLE_42                                                          0x3f23c
+#define ixDPM_TABLE_43                                                          0x3f240
+#define ixDPM_TABLE_44                                                          0x3f244
+#define ixDPM_TABLE_45                                                          0x3f248
+#define ixDPM_TABLE_46                                                          0x3f24c
+#define ixDPM_TABLE_47                                                          0x3f250
+#define ixDPM_TABLE_48                                                          0x3f254
+#define ixDPM_TABLE_49                                                          0x3f258
+#define ixDPM_TABLE_50                                                          0x3f25c
+#define ixDPM_TABLE_51                                                          0x3f260
+#define ixDPM_TABLE_52                                                          0x3f264
+#define ixDPM_TABLE_53                                                          0x3f268
+#define ixDPM_TABLE_54                                                          0x3f26c
+#define ixDPM_TABLE_55                                                          0x3f270
+#define ixDPM_TABLE_56                                                          0x3f274
+#define ixDPM_TABLE_57                                                          0x3f278
+#define ixDPM_TABLE_58                                                          0x3f27c
+#define ixDPM_TABLE_59                                                          0x3f280
+#define ixDPM_TABLE_60                                                          0x3f284
+#define ixDPM_TABLE_61                                                          0x3f288
+#define ixDPM_TABLE_62                                                          0x3f28c
+#define ixDPM_TABLE_63                                                          0x3f290
+#define ixDPM_TABLE_64                                                          0x3f294
+#define ixDPM_TABLE_65                                                          0x3f298
+#define ixDPM_TABLE_66                                                          0x3f29c
+#define ixDPM_TABLE_67                                                          0x3f2a0
+#define ixDPM_TABLE_68                                                          0x3f2a4
+#define ixDPM_TABLE_69                                                          0x3f2a8
+#define ixDPM_TABLE_70                                                          0x3f2ac
+#define ixDPM_TABLE_71                                                          0x3f2b0
+#define ixDPM_TABLE_72                                                          0x3f2b4
+#define ixDPM_TABLE_73                                                          0x3f2b8
+#define ixDPM_TABLE_74                                                          0x3f2bc
+#define ixDPM_TABLE_75                                                          0x3f2c0
+#define ixDPM_TABLE_76                                                          0x3f2c4
+#define ixDPM_TABLE_77                                                          0x3f2c8
+#define ixDPM_TABLE_78                                                          0x3f2cc
+#define ixDPM_TABLE_79                                                          0x3f2d0
+#define ixDPM_TABLE_80                                                          0x3f2d4
+#define ixDPM_TABLE_81                                                          0x3f2d8
+#define ixDPM_TABLE_82                                                          0x3f2dc
+#define ixDPM_TABLE_83                                                          0x3f2e0
+#define ixDPM_TABLE_84                                                          0x3f2e4
+#define ixDPM_TABLE_85                                                          0x3f2e8
+#define ixDPM_TABLE_86                                                          0x3f2ec
+#define ixDPM_TABLE_87                                                          0x3f2f0
+#define ixDPM_TABLE_88                                                          0x3f2f4
+#define ixDPM_TABLE_89                                                          0x3f2f8
+#define ixDPM_TABLE_90                                                          0x3f2fc
+#define ixDPM_TABLE_91                                                          0x3f300
+#define ixDPM_TABLE_92                                                          0x3f304
+#define ixDPM_TABLE_93                                                          0x3f308
+#define ixDPM_TABLE_94                                                          0x3f30c
+#define ixDPM_TABLE_95                                                          0x3f310
+#define ixDPM_TABLE_96                                                          0x3f314
+#define ixDPM_TABLE_97                                                          0x3f318
+#define ixDPM_TABLE_98                                                          0x3f31c
+#define ixDPM_TABLE_99                                                          0x3f320
+#define ixDPM_TABLE_100                                                         0x3f324
+#define ixDPM_TABLE_101                                                         0x3f328
+#define ixDPM_TABLE_102                                                         0x3f32c
+#define ixDPM_TABLE_103                                                         0x3f330
+#define ixDPM_TABLE_104                                                         0x3f334
+#define ixDPM_TABLE_105                                                         0x3f338
+#define ixDPM_TABLE_106                                                         0x3f33c
+#define ixDPM_TABLE_107                                                         0x3f340
+#define ixDPM_TABLE_108                                                         0x3f344
+#define ixDPM_TABLE_109                                                         0x3f348
+#define ixDPM_TABLE_110                                                         0x3f34c
+#define ixDPM_TABLE_111                                                         0x3f350
+#define ixDPM_TABLE_112                                                         0x3f354
+#define ixDPM_TABLE_113                                                         0x3f358
+#define ixDPM_TABLE_114                                                         0x3f35c
+#define ixDPM_TABLE_115                                                         0x3f360
+#define ixDPM_TABLE_116                                                         0x3f364
+#define ixDPM_TABLE_117                                                         0x3f368
+#define ixDPM_TABLE_118                                                         0x3f36c
+#define ixDPM_TABLE_119                                                         0x3f370
+#define ixDPM_TABLE_120                                                         0x3f374
+#define ixDPM_TABLE_121                                                         0x3f378
+#define ixDPM_TABLE_122                                                         0x3f37c
+#define ixDPM_TABLE_123                                                         0x3f380
+#define ixDPM_TABLE_124                                                         0x3f384
+#define ixDPM_TABLE_125                                                         0x3f388
+#define ixDPM_TABLE_126                                                         0x3f38c
+#define ixDPM_TABLE_127                                                         0x3f390
+#define ixDPM_TABLE_128                                                         0x3f394
+#define ixDPM_TABLE_129                                                         0x3f398
+#define ixDPM_TABLE_130                                                         0x3f39c
+#define ixDPM_TABLE_131                                                         0x3f3a0
+#define ixDPM_TABLE_132                                                         0x3f3a4
+#define ixDPM_TABLE_133                                                         0x3f3a8
+#define ixDPM_TABLE_134                                                         0x3f3ac
+#define ixDPM_TABLE_135                                                         0x3f3b0
+#define ixDPM_TABLE_136                                                         0x3f3b4
+#define ixDPM_TABLE_137                                                         0x3f3b8
+#define ixDPM_TABLE_138                                                         0x3f3bc
+#define ixDPM_TABLE_139                                                         0x3f3c0
+#define ixDPM_TABLE_140                                                         0x3f3c4
+#define ixDPM_TABLE_141                                                         0x3f3c8
+#define ixDPM_TABLE_142                                                         0x3f3cc
+#define ixDPM_TABLE_143                                                         0x3f3d0
+#define ixDPM_TABLE_144                                                         0x3f3d4
+#define ixDPM_TABLE_145                                                         0x3f3d8
+#define ixDPM_TABLE_146                                                         0x3f3dc
+#define ixDPM_TABLE_147                                                         0x3f3e0
+#define ixDPM_TABLE_148                                                         0x3f3e4
+#define ixDPM_TABLE_149                                                         0x3f3e8
+#define ixDPM_TABLE_150                                                         0x3f3ec
+#define ixDPM_TABLE_151                                                         0x3f3f0
+#define ixDPM_TABLE_152                                                         0x3f3f4
+#define ixDPM_TABLE_153                                                         0x3f3f8
+#define ixDPM_TABLE_154                                                         0x3f3fc
+#define ixDPM_TABLE_155                                                         0x3f400
+#define ixDPM_TABLE_156                                                         0x3f404
+#define ixDPM_TABLE_157                                                         0x3f408
+#define ixDPM_TABLE_158                                                         0x3f40c
+#define ixDPM_TABLE_159                                                         0x3f410
+#define ixDPM_TABLE_160                                                         0x3f414
+#define ixDPM_TABLE_161                                                         0x3f418
+#define ixDPM_TABLE_162                                                         0x3f41c
+#define ixDPM_TABLE_163                                                         0x3f420
+#define ixDPM_TABLE_164                                                         0x3f424
+#define ixDPM_TABLE_165                                                         0x3f428
+#define ixDPM_TABLE_166                                                         0x3f42c
+#define ixDPM_TABLE_167                                                         0x3f430
+#define ixDPM_TABLE_168                                                         0x3f434
+#define ixDPM_TABLE_169                                                         0x3f438
+#define ixDPM_TABLE_170                                                         0x3f43c
+#define ixDPM_TABLE_171                                                         0x3f440
+#define ixDPM_TABLE_172                                                         0x3f444
+#define ixDPM_TABLE_173                                                         0x3f448
+#define ixDPM_TABLE_174                                                         0x3f44c
+#define ixDPM_TABLE_175                                                         0x3f450
+#define ixDPM_TABLE_176                                                         0x3f454
+#define ixDPM_TABLE_177                                                         0x3f458
+#define ixDPM_TABLE_178                                                         0x3f45c
+#define ixDPM_TABLE_179                                                         0x3f460
+#define ixDPM_TABLE_180                                                         0x3f464
+#define ixDPM_TABLE_181                                                         0x3f468
+#define ixDPM_TABLE_182                                                         0x3f46c
+#define ixDPM_TABLE_183                                                         0x3f470
+#define ixDPM_TABLE_184                                                         0x3f474
+#define ixDPM_TABLE_185                                                         0x3f478
+#define ixDPM_TABLE_186                                                         0x3f47c
+#define ixDPM_TABLE_187                                                         0x3f480
+#define ixDPM_TABLE_188                                                         0x3f484
+#define ixDPM_TABLE_189                                                         0x3f488
+#define ixDPM_TABLE_190                                                         0x3f48c
+#define ixDPM_TABLE_191                                                         0x3f490
+#define ixDPM_TABLE_192                                                         0x3f494
+#define ixDPM_TABLE_193                                                         0x3f498
+#define ixDPM_TABLE_194                                                         0x3f49c
+#define ixDPM_TABLE_195                                                         0x3f4a0
+#define ixDPM_TABLE_196                                                         0x3f4a4
+#define ixDPM_TABLE_197                                                         0x3f4a8
+#define ixDPM_TABLE_198                                                         0x3f4ac
+#define ixDPM_TABLE_199                                                         0x3f4b0
+#define ixDPM_TABLE_200                                                         0x3f4b4
+#define ixDPM_TABLE_201                                                         0x3f4b8
+#define ixDPM_TABLE_202                                                         0x3f4bc
+#define ixDPM_TABLE_203                                                         0x3f4c0
+#define ixDPM_TABLE_204                                                         0x3f4c4
+#define ixDPM_TABLE_205                                                         0x3f4c8
+#define ixDPM_TABLE_206                                                         0x3f4cc
+#define ixDPM_TABLE_207                                                         0x3f4d0
+#define ixDPM_TABLE_208                                                         0x3f4d4
+#define ixDPM_TABLE_209                                                         0x3f4d8
+#define ixDPM_TABLE_210                                                         0x3f4dc
+#define ixDPM_TABLE_211                                                         0x3f4e0
+#define ixDPM_TABLE_212                                                         0x3f4e4
+#define ixDPM_TABLE_213                                                         0x3f4e8
+#define ixDPM_TABLE_214                                                         0x3f4ec
+#define ixDPM_TABLE_215                                                         0x3f4f0
+#define ixDPM_TABLE_216                                                         0x3f4f4
+#define ixDPM_TABLE_217                                                         0x3f4f8
+#define ixDPM_TABLE_218                                                         0x3f4fc
+#define ixDPM_TABLE_219                                                         0x3f500
+#define ixDPM_TABLE_220                                                         0x3f504
+#define ixDPM_TABLE_221                                                         0x3f508
+#define ixDPM_TABLE_222                                                         0x3f50c
+#define ixDPM_TABLE_223                                                         0x3f510
+#define ixDPM_TABLE_224                                                         0x3f514
+#define ixDPM_TABLE_225                                                         0x3f518
+#define ixDPM_TABLE_226                                                         0x3f51c
+#define ixDPM_TABLE_227                                                         0x3f520
+#define ixDPM_TABLE_228                                                         0x3f524
+#define ixDPM_TABLE_229                                                         0x3f528
+#define ixDPM_TABLE_230                                                         0x3f52c
+#define ixDPM_TABLE_231                                                         0x3f530
+#define ixDPM_TABLE_232                                                         0x3f534
+#define ixDPM_TABLE_233                                                         0x3f538
+#define ixDPM_TABLE_234                                                         0x3f53c
+#define ixDPM_TABLE_235                                                         0x3f540
+#define ixDPM_TABLE_236                                                         0x3f544
+#define ixDPM_TABLE_237                                                         0x3f548
+#define ixDPM_TABLE_238                                                         0x3f54c
+#define ixDPM_TABLE_239                                                         0x3f550
+#define ixDPM_TABLE_240                                                         0x3f554
+#define ixDPM_TABLE_241                                                         0x3f558
+#define ixDPM_TABLE_242                                                         0x3f55c
+#define ixDPM_TABLE_243                                                         0x3f560
+#define ixDPM_TABLE_244                                                         0x3f564
+#define ixDPM_TABLE_245                                                         0x3f568
+#define ixDPM_TABLE_246                                                         0x3f56c
+#define ixDPM_TABLE_247                                                         0x3f570
+#define ixDPM_TABLE_248                                                         0x3f574
+#define ixDPM_TABLE_249                                                         0x3f578
+#define ixDPM_TABLE_250                                                         0x3f57c
+#define ixDPM_TABLE_251                                                         0x3f580
+#define ixDPM_TABLE_252                                                         0x3f584
+#define ixDPM_TABLE_253                                                         0x3f588
+#define ixDPM_TABLE_254                                                         0x3f58c
+#define ixDPM_TABLE_255                                                         0x3f590
+#define ixDPM_TABLE_256                                                         0x3f594
+#define ixDPM_TABLE_257                                                         0x3f598
+#define ixDPM_TABLE_258                                                         0x3f59c
+#define ixDPM_TABLE_259                                                         0x3f5a0
+#define ixDPM_TABLE_260                                                         0x3f5a4
+#define ixDPM_TABLE_261                                                         0x3f5a8
+#define ixDPM_TABLE_262                                                         0x3f5ac
+#define ixDPM_TABLE_263                                                         0x3f5b0
+#define ixDPM_TABLE_264                                                         0x3f5b4
+#define ixDPM_TABLE_265                                                         0x3f5b8
+#define ixDPM_TABLE_266                                                         0x3f5bc
+#define ixDPM_TABLE_267                                                         0x3f5c0
+#define ixDPM_TABLE_268                                                         0x3f5c4
+#define ixDPM_TABLE_269                                                         0x3f5c8
+#define ixDPM_TABLE_270                                                         0x3f5cc
+#define ixDPM_TABLE_271                                                         0x3f5d0
+#define ixDPM_TABLE_272                                                         0x3f5d4
+#define ixDPM_TABLE_273                                                         0x3f5d8
+#define ixDPM_TABLE_274                                                         0x3f5dc
+#define ixDPM_TABLE_275                                                         0x3f5e0
+#define ixDPM_TABLE_276                                                         0x3f5e4
+#define ixDPM_TABLE_277                                                         0x3f5e8
+#define ixDPM_TABLE_278                                                         0x3f5ec
+#define ixDPM_TABLE_279                                                         0x3f5f0
+#define ixDPM_TABLE_280                                                         0x3f5f4
+#define ixDPM_TABLE_281                                                         0x3f5f8
+#define ixDPM_TABLE_282                                                         0x3f5fc
+#define ixDPM_TABLE_283                                                         0x3f600
+#define ixDPM_TABLE_284                                                         0x3f604
+#define ixDPM_TABLE_285                                                         0x3f608
+#define ixDPM_TABLE_286                                                         0x3f60c
+#define ixDPM_TABLE_287                                                         0x3f610
+#define ixDPM_TABLE_288                                                         0x3f614
+#define ixDPM_TABLE_289                                                         0x3f618
+#define ixDPM_TABLE_290                                                         0x3f61c
+#define ixDPM_TABLE_291                                                         0x3f620
+#define ixDPM_TABLE_292                                                         0x3f624
+#define ixDPM_TABLE_293                                                         0x3f628
+#define ixDPM_TABLE_294                                                         0x3f62c
+#define ixDPM_TABLE_295                                                         0x3f630
+#define ixDPM_TABLE_296                                                         0x3f634
+#define ixDPM_TABLE_297                                                         0x3f638
+#define ixDPM_TABLE_298                                                         0x3f63c
+#define ixDPM_TABLE_299                                                         0x3f640
+#define ixDPM_TABLE_300                                                         0x3f644
+#define ixDPM_TABLE_301                                                         0x3f648
+#define ixDPM_TABLE_302                                                         0x3f64c
+#define ixDPM_TABLE_303                                                         0x3f650
+#define ixDPM_TABLE_304                                                         0x3f654
+#define ixDPM_TABLE_305                                                         0x3f658
+#define ixDPM_TABLE_306                                                         0x3f65c
+#define ixDPM_TABLE_307                                                         0x3f660
+#define ixDPM_TABLE_308                                                         0x3f664
+#define ixDPM_TABLE_309                                                         0x3f668
+#define ixDPM_TABLE_310                                                         0x3f66c
+#define ixDPM_TABLE_311                                                         0x3f670
+#define ixDPM_TABLE_312                                                         0x3f674
+#define ixDPM_TABLE_313                                                         0x3f678
+#define ixDPM_TABLE_314                                                         0x3f67c
+#define ixDPM_TABLE_315                                                         0x3f680
+#define ixDPM_TABLE_316                                                         0x3f684
+#define ixDPM_TABLE_317                                                         0x3f688
+#define ixDPM_TABLE_318                                                         0x3f68c
+#define ixDPM_TABLE_319                                                         0x3f690
+#define ixDPM_TABLE_320                                                         0x3f694
+#define ixDPM_TABLE_321                                                         0x3f698
+#define ixDPM_TABLE_322                                                         0x3f69c
+#define ixDPM_TABLE_323                                                         0x3f6a0
+#define ixDPM_TABLE_324                                                         0x3f6a4
+#define ixDPM_TABLE_325                                                         0x3f6a8
+#define ixDPM_TABLE_326                                                         0x3f6ac
+#define ixDPM_TABLE_327                                                         0x3f6b0
+#define ixDPM_TABLE_328                                                         0x3f6b4
+#define ixDPM_TABLE_329                                                         0x3f6b8
+#define ixDPM_TABLE_330                                                         0x3f6bc
+#define ixDPM_TABLE_331                                                         0x3f6c0
+#define ixDPM_TABLE_332                                                         0x3f6c4
+#define ixDPM_TABLE_333                                                         0x3f6c8
+#define ixDPM_TABLE_334                                                         0x3f6cc
+#define ixDPM_TABLE_335                                                         0x3f6d0
+#define ixDPM_TABLE_336                                                         0x3f6d4
+#define ixDPM_TABLE_337                                                         0x3f6d8
+#define ixDPM_TABLE_338                                                         0x3f6dc
+#define ixDPM_TABLE_339                                                         0x3f6e0
+#define ixDPM_TABLE_340                                                         0x3f6e4
+#define ixDPM_TABLE_341                                                         0x3f6e8
+#define ixDPM_TABLE_342                                                         0x3f6ec
+#define ixDPM_TABLE_343                                                         0x3f6f0
+#define ixDPM_TABLE_344                                                         0x3f6f4
+#define ixDPM_TABLE_345                                                         0x3f6f8
+#define ixDPM_TABLE_346                                                         0x3f6fc
+#define ixDPM_TABLE_347                                                         0x3f700
+#define ixDPM_TABLE_348                                                         0x3f704
+#define ixDPM_TABLE_349                                                         0x3f708
+#define ixDPM_TABLE_350                                                         0x3f70c
+#define ixDPM_TABLE_351                                                         0x3f710
+#define ixDPM_TABLE_352                                                         0x3f714
+#define ixDPM_TABLE_353                                                         0x3f718
+#define ixDPM_TABLE_354                                                         0x3f71c
+#define ixDPM_TABLE_355                                                         0x3f720
+#define ixDPM_TABLE_356                                                         0x3f724
+#define ixDPM_TABLE_357                                                         0x3f728
+#define ixDPM_TABLE_358                                                         0x3f72c
+#define ixDPM_TABLE_359                                                         0x3f730
+#define ixDPM_TABLE_360                                                         0x3f734
+#define ixDPM_TABLE_361                                                         0x3f738
+#define ixDPM_TABLE_362                                                         0x3f73c
+#define ixDPM_TABLE_363                                                         0x3f740
+#define ixDPM_TABLE_364                                                         0x3f744
+#define ixDPM_TABLE_365                                                         0x3f748
+#define ixDPM_TABLE_366                                                         0x3f74c
+#define ixDPM_TABLE_367                                                         0x3f750
+#define ixDPM_TABLE_368                                                         0x3f754
+#define ixDPM_TABLE_369                                                         0x3f758
+#define ixDPM_TABLE_370                                                         0x3f75c
+#define ixDPM_TABLE_371                                                         0x3f760
+#define ixDPM_TABLE_372                                                         0x3f764
+#define ixDPM_TABLE_373                                                         0x3f768
+#define ixDPM_TABLE_374                                                         0x3f76c
+#define ixDPM_TABLE_375                                                         0x3f770
+#define ixDPM_TABLE_376                                                         0x3f774
+#define ixDPM_TABLE_377                                                         0x3f778
+#define ixDPM_TABLE_378                                                         0x3f77c
+#define ixDPM_TABLE_379                                                         0x3f780
+#define ixDPM_TABLE_380                                                         0x3f784
+#define ixDPM_TABLE_381                                                         0x3f788
+#define ixDPM_TABLE_382                                                         0x3f78c
+#define ixDPM_TABLE_383                                                         0x3f790
+#define ixDPM_TABLE_384                                                         0x3f794
+#define ixDPM_TABLE_385                                                         0x3f798
+#define ixDPM_TABLE_386                                                         0x3f79c
+#define ixDPM_TABLE_387                                                         0x3f7a0
+#define ixDPM_TABLE_388                                                         0x3f7a4
+#define ixDPM_TABLE_389                                                         0x3f7a8
+#define ixDPM_TABLE_390                                                         0x3f7ac
+#define ixDPM_TABLE_391                                                         0x3f7b0
+#define ixDPM_TABLE_392                                                         0x3f7b4
+#define ixDPM_TABLE_393                                                         0x3f7b8
+#define ixDPM_TABLE_394                                                         0x3f7bc
+#define ixDPM_TABLE_395                                                         0x3f7c0
+#define ixDPM_TABLE_396                                                         0x3f7c4
+#define ixDPM_TABLE_397                                                         0x3f7c8
+#define ixDPM_TABLE_398                                                         0x3f7cc
+#define ixDPM_TABLE_399                                                         0x3f7d0
+#define ixDPM_TABLE_400                                                         0x3f7d4
+#define ixDPM_TABLE_401                                                         0x3f7d8
+#define ixDPM_TABLE_402                                                         0x3f7dc
+#define ixDPM_TABLE_403                                                         0x3f7e0
+#define ixDPM_TABLE_404                                                         0x3f7e4
+#define ixDPM_TABLE_405                                                         0x3f7e8
+#define ixDPM_TABLE_406                                                         0x3f7ec
+#define ixDPM_TABLE_407                                                         0x3f7f0
+#define ixDPM_TABLE_408                                                         0x3f7f4
+#define ixDPM_TABLE_409                                                         0x3f7f8
+#define ixDPM_TABLE_410                                                         0x3f7fc
+#define ixDPM_TABLE_411                                                         0x3f800
+#define ixDPM_TABLE_412                                                         0x3f804
+#define ixDPM_TABLE_413                                                         0x3f808
+#define ixDPM_TABLE_414                                                         0x3f80c
+#define ixDPM_TABLE_415                                                         0x3f810
+#define ixDPM_TABLE_416                                                         0x3f814
+#define ixDPM_TABLE_417                                                         0x3f818
+#define ixDPM_TABLE_418                                                         0x3f81c
+#define ixDPM_TABLE_419                                                         0x3f820
+#define ixDPM_TABLE_420                                                         0x3f824
+#define ixDPM_TABLE_421                                                         0x3f828
+#define ixDPM_TABLE_422                                                         0x3f82c
+#define ixDPM_TABLE_423                                                         0x3f830
+#define ixDPM_TABLE_424                                                         0x3f834
+#define ixDPM_TABLE_425                                                         0x3f838
+#define ixDPM_TABLE_426                                                         0x3f83c
+#define ixDPM_TABLE_427                                                         0x3f840
+#define ixDPM_TABLE_428                                                         0x3f844
+#define ixDPM_TABLE_429                                                         0x3f848
+#define ixDPM_TABLE_430                                                         0x3f84c
+#define ixDPM_TABLE_431                                                         0x3f850
+#define ixDPM_TABLE_432                                                         0x3f854
+#define ixDPM_TABLE_433                                                         0x3f858
+#define ixDPM_TABLE_434                                                         0x3f85c
+#define ixDPM_TABLE_435                                                         0x3f860
+#define ixDPM_TABLE_436                                                         0x3f864
+#define ixDPM_TABLE_437                                                         0x3f868
+#define ixDPM_TABLE_438                                                         0x3f86c
+#define ixDPM_TABLE_439                                                         0x3f870
+#define ixDPM_TABLE_440                                                         0x3f874
+#define ixSOFT_REGISTERS_TABLE_1                                                0x3f89c
+#define ixSOFT_REGISTERS_TABLE_2                                                0x3f8a0
+#define ixSOFT_REGISTERS_TABLE_3                                                0x3f8a4
+#define ixSOFT_REGISTERS_TABLE_4                                                0x3f8a8
+#define ixSOFT_REGISTERS_TABLE_5                                                0x3f8ac
+#define ixSOFT_REGISTERS_TABLE_6                                                0x3f8b0
+#define ixSOFT_REGISTERS_TABLE_7                                                0x3f8b4
+#define ixSOFT_REGISTERS_TABLE_8                                                0x3f8b8
+#define ixSOFT_REGISTERS_TABLE_9                                                0x3f8bc
+#define ixSOFT_REGISTERS_TABLE_10                                               0x3f8c0
+#define ixSOFT_REGISTERS_TABLE_11                                               0x3f8c4
+#define ixSOFT_REGISTERS_TABLE_12                                               0x3f8c8
+#define ixSOFT_REGISTERS_TABLE_13                                               0x3f8cc
+#define ixSOFT_REGISTERS_TABLE_14                                               0x3f8d0
+#define ixSOFT_REGISTERS_TABLE_15                                               0x3f8d4
+#define ixSOFT_REGISTERS_TABLE_16                                               0x3f8d8
+#define ixSOFT_REGISTERS_TABLE_17                                               0x3f8dc
+#define ixSOFT_REGISTERS_TABLE_18                                               0x3f8e0
+#define ixSOFT_REGISTERS_TABLE_19                                               0x3f8e4
+#define ixSOFT_REGISTERS_TABLE_20                                               0x3f8e8
+#define ixSOFT_REGISTERS_TABLE_21                                               0x3f8ec
+#define ixSOFT_REGISTERS_TABLE_22                                               0x3f8f0
+#define ixSOFT_REGISTERS_TABLE_23                                               0x3f8f4
+#define ixSOFT_REGISTERS_TABLE_24                                               0x3f8f8
+#define ixSOFT_REGISTERS_TABLE_25                                               0x3f8fc
+#define ixSOFT_REGISTERS_TABLE_26                                               0x3f900
+#define ixSOFT_REGISTERS_TABLE_27                                               0x3f904
+#define ixSOFT_REGISTERS_TABLE_28                                               0x3f888
+#define ixSOFT_REGISTERS_TABLE_29                                               0x3f90c
+#define ixSOFT_REGISTERS_TABLE_30                                               0x3f910
+#define ixPM_FUSES_1                                                            0x3f914
+#define ixPM_FUSES_2                                                            0x3f918
+#define ixPM_FUSES_3                                                            0x3f91c
+#define ixPM_FUSES_4                                                            0x3f920
+#define ixPM_FUSES_5                                                            0x3f924
+#define ixPM_FUSES_6                                                            0x3f928
+#define ixPM_FUSES_7                                                            0x3f92c
+#define ixPM_FUSES_8                                                            0x3f930
+#define ixPM_FUSES_9                                                            0x3f934
+#define ixPM_FUSES_10                                                           0x3f938
+#define ixPM_FUSES_11                                                           0x3f93c
+#define ixPM_FUSES_12                                                           0x3f940
+#define ixPM_FUSES_13                                                           0x3f944
+#define ixPM_FUSES_14                                                           0x3f948
+#define ixPM_FUSES_15                                                           0x3f94c
+#define ixSMU_PM_STATUS_0                                                       0x3fe00
+#define ixSMU_PM_STATUS_1                                                       0x3fe04
+#define ixSMU_PM_STATUS_2                                                       0x3fe08
+#define ixSMU_PM_STATUS_3                                                       0x3fe0c
+#define ixSMU_PM_STATUS_4                                                       0x3fe10
+#define ixSMU_PM_STATUS_5                                                       0x3fe14
+#define ixSMU_PM_STATUS_6                                                       0x3fe18
+#define ixSMU_PM_STATUS_7                                                       0x3fe1c
+#define ixSMU_PM_STATUS_8                                                       0x3fe20
+#define ixSMU_PM_STATUS_9                                                       0x3fe24
+#define ixSMU_PM_STATUS_10                                                      0x3fe28
+#define ixSMU_PM_STATUS_11                                                      0x3fe2c
+#define ixSMU_PM_STATUS_12                                                      0x3fe30
+#define ixSMU_PM_STATUS_13                                                      0x3fe34
+#define ixSMU_PM_STATUS_14                                                      0x3fe38
+#define ixSMU_PM_STATUS_15                                                      0x3fe3c
+#define ixSMU_PM_STATUS_16                                                      0x3fe40
+#define ixSMU_PM_STATUS_17                                                      0x3fe44
+#define ixSMU_PM_STATUS_18                                                      0x3fe48
+#define ixSMU_PM_STATUS_19                                                      0x3fe4c
+#define ixSMU_PM_STATUS_20                                                      0x3fe50
+#define ixSMU_PM_STATUS_21                                                      0x3fe54
+#define ixSMU_PM_STATUS_22                                                      0x3fe58
+#define ixSMU_PM_STATUS_23                                                      0x3fe5c
+#define ixSMU_PM_STATUS_24                                                      0x3fe60
+#define ixSMU_PM_STATUS_25                                                      0x3fe64
+#define ixSMU_PM_STATUS_26                                                      0x3fe68
+#define ixSMU_PM_STATUS_27                                                      0x3fe6c
+#define ixSMU_PM_STATUS_28                                                      0x3fe70
+#define ixSMU_PM_STATUS_29                                                      0x3fe74
+#define ixSMU_PM_STATUS_30                                                      0x3fe78
+#define ixSMU_PM_STATUS_31                                                      0x3fe7c
+#define ixSMU_PM_STATUS_32                                                      0x3fe80
+#define ixSMU_PM_STATUS_33                                                      0x3fe84
+#define ixSMU_PM_STATUS_34                                                      0x3fe88
+#define ixSMU_PM_STATUS_35                                                      0x3fe8c
+#define ixSMU_PM_STATUS_36                                                      0x3fe90
+#define ixSMU_PM_STATUS_37                                                      0x3fe94
+#define ixSMU_PM_STATUS_38                                                      0x3fe98
+#define ixSMU_PM_STATUS_39                                                      0x3fe9c
+#define ixSMU_PM_STATUS_40                                                      0x3fea0
+#define ixSMU_PM_STATUS_41                                                      0x3fea4
+#define ixSMU_PM_STATUS_42                                                      0x3fea8
+#define ixSMU_PM_STATUS_43                                                      0x3feac
+#define ixSMU_PM_STATUS_44                                                      0x3feb0
+#define ixSMU_PM_STATUS_45                                                      0x3feb4
+#define ixSMU_PM_STATUS_46                                                      0x3feb8
+#define ixSMU_PM_STATUS_47                                                      0x3febc
+#define ixSMU_PM_STATUS_48                                                      0x3fec0
+#define ixSMU_PM_STATUS_49                                                      0x3fec4
+#define ixSMU_PM_STATUS_50                                                      0x3fec8
+#define ixSMU_PM_STATUS_51                                                      0x3fecc
+#define ixSMU_PM_STATUS_52                                                      0x3fed0
+#define ixSMU_PM_STATUS_53                                                      0x3fed4
+#define ixSMU_PM_STATUS_54                                                      0x3fed8
+#define ixSMU_PM_STATUS_55                                                      0x3fedc
+#define ixSMU_PM_STATUS_56                                                      0x3fee0
+#define ixSMU_PM_STATUS_57                                                      0x3fee4
+#define ixSMU_PM_STATUS_58                                                      0x3fee8
+#define ixSMU_PM_STATUS_59                                                      0x3feec
+#define ixSMU_PM_STATUS_60                                                      0x3fef0
+#define ixSMU_PM_STATUS_61                                                      0x3fef4
+#define ixSMU_PM_STATUS_62                                                      0x3fef8
+#define ixSMU_PM_STATUS_63                                                      0x3fefc
+#define ixSMU_PM_STATUS_64                                                      0x3ff00
+#define ixSMU_PM_STATUS_65                                                      0x3ff04
+#define ixSMU_PM_STATUS_66                                                      0x3ff08
+#define ixSMU_PM_STATUS_67                                                      0x3ff0c
+#define ixSMU_PM_STATUS_68                                                      0x3ff10
+#define ixSMU_PM_STATUS_69                                                      0x3ff14
+#define ixSMU_PM_STATUS_70                                                      0x3ff18
+#define ixSMU_PM_STATUS_71                                                      0x3ff1c
+#define ixSMU_PM_STATUS_72                                                      0x3ff20
+#define ixSMU_PM_STATUS_73                                                      0x3ff24
+#define ixSMU_PM_STATUS_74                                                      0x3ff28
+#define ixSMU_PM_STATUS_75                                                      0x3ff2c
+#define ixSMU_PM_STATUS_76                                                      0x3ff30
+#define ixSMU_PM_STATUS_77                                                      0x3ff34
+#define ixSMU_PM_STATUS_78                                                      0x3ff38
+#define ixSMU_PM_STATUS_79                                                      0x3ff3c
+#define ixSMU_PM_STATUS_80                                                      0x3ff40
+#define ixSMU_PM_STATUS_81                                                      0x3ff44
+#define ixSMU_PM_STATUS_82                                                      0x3ff48
+#define ixSMU_PM_STATUS_83                                                      0x3ff4c
+#define ixSMU_PM_STATUS_84                                                      0x3ff50
+#define ixSMU_PM_STATUS_85                                                      0x3ff54
+#define ixSMU_PM_STATUS_86                                                      0x3ff58
+#define ixSMU_PM_STATUS_87                                                      0x3ff5c
+#define ixSMU_PM_STATUS_88                                                      0x3ff60
+#define ixSMU_PM_STATUS_89                                                      0x3ff64
+#define ixSMU_PM_STATUS_90                                                      0x3ff68
+#define ixSMU_PM_STATUS_91                                                      0x3ff6c
+#define ixSMU_PM_STATUS_92                                                      0x3ff70
+#define ixSMU_PM_STATUS_93                                                      0x3ff74
+#define ixSMU_PM_STATUS_94                                                      0x3ff78
+#define ixSMU_PM_STATUS_95                                                      0x3ff7c
+#define ixSMU_PM_STATUS_96                                                      0x3ff80
+#define ixSMU_PM_STATUS_97                                                      0x3ff84
+#define ixSMU_PM_STATUS_98                                                      0x3ff88
+#define ixSMU_PM_STATUS_99                                                      0x3ff8c
+#define ixSMU_PM_STATUS_100                                                     0x3ff90
+#define ixSMU_PM_STATUS_101                                                     0x3ff94
+#define ixSMU_PM_STATUS_102                                                     0x3ff98
+#define ixSMU_PM_STATUS_103                                                     0x3ff9c
+#define ixSMU_PM_STATUS_104                                                     0x3ffa0
+#define ixSMU_PM_STATUS_105                                                     0x3ffa4
+#define ixSMU_PM_STATUS_106                                                     0x3ffa8
+#define ixSMU_PM_STATUS_107                                                     0x3ffac
+#define ixSMU_PM_STATUS_108                                                     0x3ffb0
+#define ixSMU_PM_STATUS_109                                                     0x3ffb4
+#define ixSMU_PM_STATUS_110                                                     0x3ffb8
+#define ixSMU_PM_STATUS_111                                                     0x3ffbc
+#define ixSMU_PM_STATUS_112                                                     0x3ffc0
+#define ixSMU_PM_STATUS_113                                                     0x3ffc4
+#define ixSMU_PM_STATUS_114                                                     0x3ffc8
+#define ixSMU_PM_STATUS_115                                                     0x3ffcc
+#define ixSMU_PM_STATUS_116                                                     0x3ffd0
+#define ixSMU_PM_STATUS_117                                                     0x3ffd4
+#define ixSMU_PM_STATUS_118                                                     0x3ffd8
+#define ixSMU_PM_STATUS_119                                                     0x3ffdc
+#define ixSMU_PM_STATUS_120                                                     0x3ffe0
+#define ixSMU_PM_STATUS_121                                                     0x3ffe4
+#define ixSMU_PM_STATUS_122                                                     0x3ffe8
+#define ixSMU_PM_STATUS_123                                                     0x3ffec
+#define ixSMU_PM_STATUS_124                                                     0x3fff0
+#define ixSMU_PM_STATUS_125                                                     0x3fff4
+#define ixSMU_PM_STATUS_126                                                     0x3fff8
+#define ixSMU_PM_STATUS_127                                                     0x3fffc
+#define ixCG_THERMAL_INT_ENA                                                    0xc2100024
+#define ixCG_THERMAL_INT_CTRL                                                   0xc2100028
+#define ixCG_THERMAL_INT_STATUS                                                 0xc210002c
+#define ixCG_THERMAL_CTRL                                                       0xc0300004
+#define ixCG_THERMAL_STATUS                                                     0xc0300008
+#define ixCG_THERMAL_INT                                                        0xc030000c
+#define ixCG_MULT_THERMAL_CTRL                                                  0xc0300010
+#define ixCG_MULT_THERMAL_STATUS                                                0xc0300014
+#define ixTHM_TMON2_CTRL                                                        0xc0300034
+#define ixTHM_TMON2_CTRL2                                                       0xc0300038
+#define ixTHM_TMON2_CSR_WR                                                      0xc0300054
+#define ixTHM_TMON2_CSR_RD                                                      0xc0300058
+#define ixCG_FDO_CTRL0                                                          0xc0300064
+#define ixCG_FDO_CTRL1                                                          0xc0300068
+#define ixCG_FDO_CTRL2                                                          0xc030006c
+#define ixCG_TACH_CTRL                                                          0xc0300070
+#define ixCG_TACH_STATUS                                                        0xc0300074
+#define ixCC_THM_STRAPS0                                                        0xc0300080
+#define ixTHM_TMON0_RDIL0_DATA                                                  0xc0300100
+#define ixTHM_TMON0_RDIL1_DATA                                                  0xc0300104
+#define ixTHM_TMON0_RDIL2_DATA                                                  0xc0300108
+#define ixTHM_TMON0_RDIL3_DATA                                                  0xc030010c
+#define ixTHM_TMON0_RDIL4_DATA                                                  0xc0300110
+#define ixTHM_TMON0_RDIL5_DATA                                                  0xc0300114
+#define ixTHM_TMON0_RDIL6_DATA                                                  0xc0300118
+#define ixTHM_TMON0_RDIL7_DATA                                                  0xc030011c
+#define ixTHM_TMON0_RDIL8_DATA                                                  0xc0300120
+#define ixTHM_TMON0_RDIL9_DATA                                                  0xc0300124
+#define ixTHM_TMON0_RDIL10_DATA                                                 0xc0300128
+#define ixTHM_TMON0_RDIL11_DATA                                                 0xc030012c
+#define ixTHM_TMON0_RDIL12_DATA                                                 0xc0300130
+#define ixTHM_TMON0_RDIL13_DATA                                                 0xc0300134
+#define ixTHM_TMON0_RDIL14_DATA                                                 0xc0300138
+#define ixTHM_TMON0_RDIL15_DATA                                                 0xc030013c
+#define ixTHM_TMON0_RDIR0_DATA                                                  0xc0300140
+#define ixTHM_TMON0_RDIR1_DATA                                                  0xc0300144
+#define ixTHM_TMON0_RDIR2_DATA                                                  0xc0300148
+#define ixTHM_TMON0_RDIR3_DATA                                                  0xc030014c
+#define ixTHM_TMON0_RDIR4_DATA                                                  0xc0300150
+#define ixTHM_TMON0_RDIR5_DATA                                                  0xc0300154
+#define ixTHM_TMON0_RDIR6_DATA                                                  0xc0300158
+#define ixTHM_TMON0_RDIR7_DATA                                                  0xc030015c
+#define ixTHM_TMON0_RDIR8_DATA                                                  0xc0300160
+#define ixTHM_TMON0_RDIR9_DATA                                                  0xc0300164
+#define ixTHM_TMON0_RDIR10_DATA                                                 0xc0300168
+#define ixTHM_TMON0_RDIR11_DATA                                                 0xc030016c
+#define ixTHM_TMON0_RDIR12_DATA                                                 0xc0300170
+#define ixTHM_TMON0_RDIR13_DATA                                                 0xc0300174
+#define ixTHM_TMON0_RDIR14_DATA                                                 0xc0300178
+#define ixTHM_TMON0_RDIR15_DATA                                                 0xc030017c
+#define ixTHM_TMON1_RDIL0_DATA                                                  0xc0300180
+#define ixTHM_TMON1_RDIL1_DATA                                                  0xc0300184
+#define ixTHM_TMON1_RDIL2_DATA                                                  0xc0300188
+#define ixTHM_TMON1_RDIL3_DATA                                                  0xc030018c
+#define ixTHM_TMON1_RDIL4_DATA                                                  0xc0300190
+#define ixTHM_TMON1_RDIL5_DATA                                                  0xc0300194
+#define ixTHM_TMON1_RDIL6_DATA                                                  0xc0300198
+#define ixTHM_TMON1_RDIL7_DATA                                                  0xc030019c
+#define ixTHM_TMON1_RDIL8_DATA                                                  0xc03001a0
+#define ixTHM_TMON1_RDIL9_DATA                                                  0xc03001a4
+#define ixTHM_TMON1_RDIL10_DATA                                                 0xc03001a8
+#define ixTHM_TMON1_RDIL11_DATA                                                 0xc03001ac
+#define ixTHM_TMON1_RDIL12_DATA                                                 0xc03001b0
+#define ixTHM_TMON1_RDIL13_DATA                                                 0xc03001b4
+#define ixTHM_TMON1_RDIL14_DATA                                                 0xc03001b8
+#define ixTHM_TMON1_RDIL15_DATA                                                 0xc03001bc
+#define ixTHM_TMON1_RDIR0_DATA                                                  0xc03001c0
+#define ixTHM_TMON1_RDIR1_DATA                                                  0xc03001c4
+#define ixTHM_TMON1_RDIR2_DATA                                                  0xc03001c8
+#define ixTHM_TMON1_RDIR3_DATA                                                  0xc03001cc
+#define ixTHM_TMON1_RDIR4_DATA                                                  0xc03001d0
+#define ixTHM_TMON1_RDIR5_DATA                                                  0xc03001d4
+#define ixTHM_TMON1_RDIR6_DATA                                                  0xc03001d8
+#define ixTHM_TMON1_RDIR7_DATA                                                  0xc03001dc
+#define ixTHM_TMON1_RDIR8_DATA                                                  0xc03001e0
+#define ixTHM_TMON1_RDIR9_DATA                                                  0xc03001e4
+#define ixTHM_TMON1_RDIR10_DATA                                                 0xc03001e8
+#define ixTHM_TMON1_RDIR11_DATA                                                 0xc03001ec
+#define ixTHM_TMON1_RDIR12_DATA                                                 0xc03001f0
+#define ixTHM_TMON1_RDIR13_DATA                                                 0xc03001f4
+#define ixTHM_TMON1_RDIR14_DATA                                                 0xc03001f8
+#define ixTHM_TMON1_RDIR15_DATA                                                 0xc03001fc
+#define ixTHM_TMON2_RDIL0_DATA                                                  0xc0300200
+#define ixTHM_TMON2_RDIL1_DATA                                                  0xc0300204
+#define ixTHM_TMON2_RDIL2_DATA                                                  0xc0300208
+#define ixTHM_TMON2_RDIL3_DATA                                                  0xc030020c
+#define ixTHM_TMON2_RDIL4_DATA                                                  0xc0300210
+#define ixTHM_TMON2_RDIL5_DATA                                                  0xc0300214
+#define ixTHM_TMON2_RDIL6_DATA                                                  0xc0300218
+#define ixTHM_TMON2_RDIL7_DATA                                                  0xc030021c
+#define ixTHM_TMON2_RDIL8_DATA                                                  0xc0300220
+#define ixTHM_TMON2_RDIL9_DATA                                                  0xc0300224
+#define ixTHM_TMON2_RDIL10_DATA                                                 0xc0300228
+#define ixTHM_TMON2_RDIL11_DATA                                                 0xc030022c
+#define ixTHM_TMON2_RDIL12_DATA                                                 0xc0300230
+#define ixTHM_TMON2_RDIL13_DATA                                                 0xc0300234
+#define ixTHM_TMON2_RDIL14_DATA                                                 0xc0300238
+#define ixTHM_TMON2_RDIL15_DATA                                                 0xc030023c
+#define ixTHM_TMON2_RDIR0_DATA                                                  0xc0300240
+#define ixTHM_TMON2_RDIR1_DATA                                                  0xc0300244
+#define ixTHM_TMON2_RDIR2_DATA                                                  0xc0300248
+#define ixTHM_TMON2_RDIR3_DATA                                                  0xc030024c
+#define ixTHM_TMON2_RDIR4_DATA                                                  0xc0300250
+#define ixTHM_TMON2_RDIR5_DATA                                                  0xc0300254
+#define ixTHM_TMON2_RDIR6_DATA                                                  0xc0300258
+#define ixTHM_TMON2_RDIR7_DATA                                                  0xc030025c
+#define ixTHM_TMON2_RDIR8_DATA                                                  0xc0300260
+#define ixTHM_TMON2_RDIR9_DATA                                                  0xc0300264
+#define ixTHM_TMON2_RDIR10_DATA                                                 0xc0300268
+#define ixTHM_TMON2_RDIR11_DATA                                                 0xc030026c
+#define ixTHM_TMON2_RDIR12_DATA                                                 0xc0300270
+#define ixTHM_TMON2_RDIR13_DATA                                                 0xc0300274
+#define ixTHM_TMON2_RDIR14_DATA                                                 0xc0300278
+#define ixTHM_TMON2_RDIR15_DATA                                                 0xc030027c
+#define ixTHM_TMON0_INT_DATA                                                    0xc0300300
+#define ixTHM_TMON1_INT_DATA                                                    0xc0300304
+#define ixTHM_TMON2_INT_DATA                                                    0xc0300308
+#define ixTHM_TMON0_DEBUG                                                       0xc0300310
+#define ixTHM_TMON1_DEBUG                                                       0xc0300314
+#define ixTHM_TMON2_DEBUG                                                       0xc0300318
+#define ixTHM_TMON0_STATUS                                                      0xc0300320
+#define ixTHM_TMON1_STATUS                                                      0xc0300324
+#define ixTHM_TMON2_STATUS                                                      0xc0300328
+#define ixGENERAL_PWRMGT                                                        0xc0200000
+#define ixCNB_PWRMGT_CNTL                                                       0xc0200004
+#define ixSCLK_PWRMGT_CNTL                                                      0xc0200008
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX                                      0xc0200014
+#define ixPWR_PCC_CONTROL                                                       0xc0200018
+#define ixPWR_PCC_GPIO_SELECT                                                   0xc020001c
+#define ixCG_FREQ_TRAN_VOTING_0                                                 0xc02001a8
+#define ixCG_FREQ_TRAN_VOTING_1                                                 0xc02001ac
+#define ixCG_FREQ_TRAN_VOTING_2                                                 0xc02001b0
+#define ixCG_FREQ_TRAN_VOTING_3                                                 0xc02001b4
+#define ixCG_FREQ_TRAN_VOTING_4                                                 0xc02001b8
+#define ixCG_FREQ_TRAN_VOTING_5                                                 0xc02001bc
+#define ixCG_FREQ_TRAN_VOTING_6                                                 0xc02001c0
+#define ixCG_FREQ_TRAN_VOTING_7                                                 0xc02001c4
+#define ixPLL_TEST_CNTL                                                         0xc020003c
+#define ixCG_STATIC_SCREEN_PARAMETER                                            0xc0200044
+#define ixCG_DISPLAY_GAP_CNTL                                                   0xc0200060
+#define ixCG_DISPLAY_GAP_CNTL2                                                  0xc0200230
+#define ixCG_ACPI_CNTL                                                          0xc0200064
+#define ixSCLK_DEEP_SLEEP_CNTL                                                  0xc0200080
+#define ixSCLK_DEEP_SLEEP_CNTL2                                                 0xc0200084
+#define ixSCLK_DEEP_SLEEP_CNTL3                                                 0xc020009c
+#define ixSCLK_DEEP_SLEEP_MISC_CNTL                                             0xc0200088
+#define ixLCLK_DEEP_SLEEP_CNTL                                                  0xc020008c
+#define ixLCLK_DEEP_SLEEP_CNTL2                                                 0xc0200310
+#define ixTARGET_AND_CURRENT_PROFILE_INDEX_1                                    0xc02000f0
+#define ixCG_ULV_PARAMETER                                                      0xc020015c
+#define ixSCLK_MIN_DIV                                                          0xc02003ac
+#define ixPWR_AVFS_SEL                                                          0xc0200384
+#define ixPWR_AVFS_CNTL                                                         0xc0200388
+#define ixPWR_AVFS0_CNTL_STATUS                                                 0xc0200400
+#define ixPWR_AVFS1_CNTL_STATUS                                                 0xc0200404
+#define ixPWR_AVFS2_CNTL_STATUS                                                 0xc0200408
+#define ixPWR_AVFS3_CNTL_STATUS                                                 0xc020040c
+#define ixPWR_AVFS4_CNTL_STATUS                                                 0xc0200410
+#define ixPWR_AVFS5_CNTL_STATUS                                                 0xc0200414
+#define ixPWR_AVFS6_CNTL_STATUS                                                 0xc0200418
+#define ixPWR_AVFS7_CNTL_STATUS                                                 0xc020041c
+#define ixPWR_AVFS8_CNTL_STATUS                                                 0xc0200420
+#define ixPWR_AVFS9_CNTL_STATUS                                                 0xc0200424
+#define ixPWR_AVFS10_CNTL_STATUS                                                0xc0200428
+#define ixPWR_AVFS11_CNTL_STATUS                                                0xc020042c
+#define ixPWR_AVFS12_CNTL_STATUS                                                0xc0200430
+#define ixPWR_AVFS13_CNTL_STATUS                                                0xc0200434
+#define ixPWR_AVFS14_CNTL_STATUS                                                0xc0200438
+#define ixPWR_AVFS15_CNTL_STATUS                                                0xc020043c
+#define ixPWR_AVFS16_CNTL_STATUS                                                0xc0200440
+#define ixPWR_AVFS17_CNTL_STATUS                                                0xc0200444
+#define ixPWR_AVFS18_CNTL_STATUS                                                0xc0200448
+#define ixPWR_AVFS19_CNTL_STATUS                                                0xc020044c
+#define ixPWR_AVFS20_CNTL_STATUS                                                0xc0200450
+#define ixPWR_AVFS21_CNTL_STATUS                                                0xc0200454
+#define ixPWR_AVFS22_CNTL_STATUS                                                0xc0200458
+#define ixPWR_AVFS23_CNTL_STATUS                                                0xc020045c
+#define ixPWR_AVFS24_CNTL_STATUS                                                0xc0200460
+#define ixPWR_AVFS25_CNTL_STATUS                                                0xc0200464
+#define ixPWR_AVFS26_CNTL_STATUS                                                0xc0200468
+#define ixPWR_AVFS27_CNTL_STATUS                                                0xc020046c
+#define ixPWR_CKS_ENABLE                                                        0xc020034c
+#define ixPWR_CKS_CNTL                                                          0xc0200350
+#define ixPWR_DISP_TIMER_CONTROL                                                0xc02003c0
+#define ixPWR_DISP_TIMER_DEBUG                                                  0xc02003c4
+#define ixPWR_DISP_TIMER2_CONTROL                                               0xc02003c8
+#define ixPWR_DISP_TIMER2_DEBUG                                                 0xc02003cc
+#define ixPWR_DISP_TIMER_CONTROL2                                               0xc0200378
+#define ixVDDGFX_IDLE_PARAMETER                                                 0xc020036c
+#define ixVDDGFX_IDLE_CONTROL                                                   0xc0200370
+#define ixVDDGFX_IDLE_EXIT                                                      0xc0200374
+#define ixLCAC_MC0_CNTL                                                         0xc0400130
+#define ixLCAC_MC0_OVR_SEL                                                      0xc0400134
+#define ixLCAC_MC0_OVR_VAL                                                      0xc0400138
+#define ixLCAC_MC1_CNTL                                                         0xc040013c
+#define ixLCAC_MC1_OVR_SEL                                                      0xc0400140
+#define ixLCAC_MC1_OVR_VAL                                                      0xc0400144
+#define ixLCAC_MC2_CNTL                                                         0xc0400148
+#define ixLCAC_MC2_OVR_SEL                                                      0xc040014c
+#define ixLCAC_MC2_OVR_VAL                                                      0xc0400150
+#define ixLCAC_MC3_CNTL                                                         0xc0400154
+#define ixLCAC_MC3_OVR_SEL                                                      0xc0400158
+#define ixLCAC_MC3_OVR_VAL                                                      0xc040015c
+#define ixLCAC_MC4_CNTL                                                         0xc0400d60
+#define ixLCAC_MC4_OVR_SEL                                                      0xc0400d64
+#define ixLCAC_MC4_OVR_VAL                                                      0xc0400d68
+#define ixLCAC_MC5_CNTL                                                         0xc0400d6c
+#define ixLCAC_MC5_OVR_SEL                                                      0xc0400d70
+#define ixLCAC_MC5_OVR_VAL                                                      0xc0400d74
+#define ixLCAC_MC6_CNTL                                                         0xc0400d78
+#define ixLCAC_MC6_OVR_SEL                                                      0xc0400d7c
+#define ixLCAC_MC6_OVR_VAL                                                      0xc0400d80
+#define ixLCAC_MC7_CNTL                                                         0xc0400d84
+#define ixLCAC_MC7_OVR_SEL                                                      0xc0400d88
+#define ixLCAC_MC7_OVR_VAL                                                      0xc0400d8c
+#define ixLCAC_CPL_CNTL                                                         0xc0400160
+#define ixLCAC_CPL_OVR_SEL                                                      0xc0400164
+#define ixLCAC_CPL_OVR_VAL                                                      0xc0400168
+#define mmROM_SMC_IND_INDEX                                                     0x80
+#define mmROM0_ROM_SMC_IND_INDEX                                                0x80
+#define mmROM1_ROM_SMC_IND_INDEX                                                0x82
+#define mmROM2_ROM_SMC_IND_INDEX                                                0x84
+#define mmROM3_ROM_SMC_IND_INDEX                                                0x86
+#define mmROM_SMC_IND_DATA                                                      0x81
+#define mmROM0_ROM_SMC_IND_DATA                                                 0x81
+#define mmROM1_ROM_SMC_IND_DATA                                                 0x83
+#define mmROM2_ROM_SMC_IND_DATA                                                 0x85
+#define mmROM3_ROM_SMC_IND_DATA                                                 0x87
+#define ixROM_CNTL                                                              0xc0600000
+#define ixPAGE_MIRROR_CNTL                                                      0xc0600004
+#define ixROM_STATUS                                                            0xc0600008
+#define ixCGTT_ROM_CLK_CTRL0                                                    0xc060000c
+#define ixROM_INDEX                                                             0xc0600010
+#define ixROM_DATA                                                              0xc0600014
+#define ixROM_START                                                             0xc0600018
+#define ixROM_SW_CNTL                                                           0xc060001c
+#define ixROM_SW_STATUS                                                         0xc0600020
+#define ixROM_SW_COMMAND                                                        0xc0600024
+#define ixROM_SW_DATA_1                                                         0xc0600028
+#define ixROM_SW_DATA_2                                                         0xc060002c
+#define ixROM_SW_DATA_3                                                         0xc0600030
+#define ixROM_SW_DATA_4                                                         0xc0600034
+#define ixROM_SW_DATA_5                                                         0xc0600038
+#define ixROM_SW_DATA_6                                                         0xc060003c
+#define ixROM_SW_DATA_7                                                         0xc0600040
+#define ixROM_SW_DATA_8                                                         0xc0600044
+#define ixROM_SW_DATA_9                                                         0xc0600048
+#define ixROM_SW_DATA_10                                                        0xc060004c
+#define ixROM_SW_DATA_11                                                        0xc0600050
+#define ixROM_SW_DATA_12                                                        0xc0600054
+#define ixROM_SW_DATA_13                                                        0xc0600058
+#define ixROM_SW_DATA_14                                                        0xc060005c
+#define ixROM_SW_DATA_15                                                        0xc0600060
+#define ixROM_SW_DATA_16                                                        0xc0600064
+#define ixROM_SW_DATA_17                                                        0xc0600068
+#define ixROM_SW_DATA_18                                                        0xc060006c
+#define ixROM_SW_DATA_19                                                        0xc0600070
+#define ixROM_SW_DATA_20                                                        0xc0600074
+#define ixROM_SW_DATA_21                                                        0xc0600078
+#define ixROM_SW_DATA_22                                                        0xc060007c
+#define ixROM_SW_DATA_23                                                        0xc0600080
+#define ixROM_SW_DATA_24                                                        0xc0600084
+#define ixROM_SW_DATA_25                                                        0xc0600088
+#define ixROM_SW_DATA_26                                                        0xc060008c
+#define ixROM_SW_DATA_27                                                        0xc0600090
+#define ixROM_SW_DATA_28                                                        0xc0600094
+#define ixROM_SW_DATA_29                                                        0xc0600098
+#define ixROM_SW_DATA_30                                                        0xc060009c
+#define ixROM_SW_DATA_31                                                        0xc06000a0
+#define ixROM_SW_DATA_32                                                        0xc06000a4
+#define ixROM_SW_DATA_33                                                        0xc06000a8
+#define ixROM_SW_DATA_34                                                        0xc06000ac
+#define ixROM_SW_DATA_35                                                        0xc06000b0
+#define ixROM_SW_DATA_36                                                        0xc06000b4
+#define ixROM_SW_DATA_37                                                        0xc06000b8
+#define ixROM_SW_DATA_38                                                        0xc06000bc
+#define ixROM_SW_DATA_39                                                        0xc06000c0
+#define ixROM_SW_DATA_40                                                        0xc06000c4
+#define ixROM_SW_DATA_41                                                        0xc06000c8
+#define ixROM_SW_DATA_42                                                        0xc06000cc
+#define ixROM_SW_DATA_43                                                        0xc06000d0
+#define ixROM_SW_DATA_44                                                        0xc06000d4
+#define ixROM_SW_DATA_45                                                        0xc06000d8
+#define ixROM_SW_DATA_46                                                        0xc06000dc
+#define ixROM_SW_DATA_47                                                        0xc06000e0
+#define ixROM_SW_DATA_48                                                        0xc06000e4
+#define ixROM_SW_DATA_49                                                        0xc06000e8
+#define ixROM_SW_DATA_50                                                        0xc06000ec
+#define ixROM_SW_DATA_51                                                        0xc06000f0
+#define ixROM_SW_DATA_52                                                        0xc06000f4
+#define ixROM_SW_DATA_53                                                        0xc06000f8
+#define ixROM_SW_DATA_54                                                        0xc06000fc
+#define ixROM_SW_DATA_55                                                        0xc0600100
+#define ixROM_SW_DATA_56                                                        0xc0600104
+#define ixROM_SW_DATA_57                                                        0xc0600108
+#define ixROM_SW_DATA_58                                                        0xc060010c
+#define ixROM_SW_DATA_59                                                        0xc0600110
+#define ixROM_SW_DATA_60                                                        0xc0600114
+#define ixROM_SW_DATA_61                                                        0xc0600118
+#define ixROM_SW_DATA_62                                                        0xc060011c
+#define ixROM_SW_DATA_63                                                        0xc0600120
+#define ixROM_SW_DATA_64                                                        0xc0600124
+#define mmGC_CAC_CGTT_CLK_CTRL                                                  0x3292
+#define mmSE_CAC_CGTT_CLK_CTRL                                                  0x3293
+#define mmGC_CAC_LKG_AGGR_LOWER                                                 0x3296
+#define mmGC_CAC_LKG_AGGR_UPPER                                                 0x3297
+#define ixGC_CAC_WEIGHT_CU_0                                                    0x32
+#define ixGC_CAC_WEIGHT_CU_1                                                    0x33
+#define ixGC_CAC_WEIGHT_CU_2                                                    0x34
+#define ixGC_CAC_WEIGHT_CU_3                                                    0x35
+#define ixGC_CAC_WEIGHT_CU_4                                                    0x36
+#define ixGC_CAC_WEIGHT_CU_5                                                    0x37
+#define ixGC_CAC_WEIGHT_CU_6                                                    0x38
+#define ixGC_CAC_WEIGHT_CU_7                                                    0x39
+#define ixGC_CAC_ACC_CU0                                                        0xba
+#define ixGC_CAC_ACC_CU1                                                        0xbb
+#define ixGC_CAC_ACC_CU2                                                        0xbc
+#define ixGC_CAC_ACC_CU3                                                        0xbd
+#define ixGC_CAC_ACC_CU4                                                        0xbe
+#define ixGC_CAC_ACC_CU5                                                        0xbf
+#define ixGC_CAC_ACC_CU6                                                        0xc0
+#define ixGC_CAC_ACC_CU7                                                        0xc1
+#define ixGC_CAC_ACC_CU8                                                        0xc2
+#define ixGC_CAC_ACC_CU9                                                        0xc3
+#define ixGC_CAC_ACC_CU10                                                       0xc4
+#define ixGC_CAC_ACC_CU11                                                       0xc5
+#define ixGC_CAC_ACC_CU12                                                       0xc6
+#define ixGC_CAC_ACC_CU13                                                       0xc7
+#define ixGC_CAC_ACC_CU14                                                       0xc8
+#define ixGC_CAC_ACC_CU15                                                       0xc9
+#define ixGC_CAC_OVRD_CU                                                        0xe7
+
+#endif /* SMU_7_1_3_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
new file mode 100644
index 0000000..f19c420
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_enum.h
@@ -0,0 +1,1282 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_ENUM_H
+#define SMU_7_1_3_ENUM_H
+
+#define CG_SRBM_START_ADDR                        0x600
+#define CG_SRBM_END_ADDR                          0x8ff
+#define RCU_CCF_DWORDS0                           0xa0
+#define RCU_CCF_BITS0                             0x1400
+#define RCU_SAM_BYTES                             0x2c
+#define RCU_SAM_RTL_BYTES                         0x2c
+#define RCU_SMU_BYTES                             0x14
+#define RCU_SMU_RTL_BYTES                         0x14
+#define SFP_CHAIN_ADDR                            0x1
+#define SFP_SADR                                  0x0
+#define SFP_EADR                                  0x37f
+#define SAMU_KEY_CHAIN_ADR                        0x0
+#define SAMU_KEY_SADR                             0x280
+#define SAMU_KEY_EADR                             0x2ab
+#define SMU_KEY_CHAIN_ADR                         0x0
+#define SMU_KEY_SADR                              0x2ac
+#define SMU_KEY_EADR                              0x2bf
+#define SMC_MSG_TEST                              0x1
+#define SMC_MSG_PHY_LN_OFF                        0x2
+#define SMC_MSG_PHY_LN_ON                         0x3
+#define SMC_MSG_DDI_PHY_OFF                       0x4
+#define SMC_MSG_DDI_PHY_ON                        0x5
+#define SMC_MSG_CASCADE_PLL_OFF                   0x6
+#define SMC_MSG_CASCADE_PLL_ON                    0x7
+#define SMC_MSG_PWR_OFF_x16                       0x8
+#define SMC_MSG_CONFIG_LCLK_DPM                   0x9
+#define SMC_MSG_FLUSH_DATA_CACHE                  0xa
+#define SMC_MSG_FLUSH_INSTRUCTION_CACHE           0xb
+#define SMC_MSG_CONFIG_VPC_ACCUMULATOR            0xc
+#define SMC_MSG_CONFIG_BAPM                       0xd
+#define SMC_MSG_CONFIG_TDC_LIMIT                  0xe
+#define SMC_MSG_CONFIG_LPMx                       0xf
+#define SMC_MSG_CONFIG_HTC_LIMIT                  0x10
+#define SMC_MSG_CONFIG_THERMAL_CNTL               0x11
+#define SMC_MSG_CONFIG_VOLTAGE_CNTL               0x12
+#define SMC_MSG_CONFIG_TDP_CNTL                   0x13
+#define SMC_MSG_EN_PM_CNTL                        0x14
+#define SMC_MSG_DIS_PM_CNTL                       0x15
+#define SMC_MSG_CONFIG_NBDPM                      0x16
+#define SMC_MSG_CONFIG_LOADLINE                   0x17
+#define SMC_MSG_ADJUST_LOADLINE                   0x18
+#define SMC_MSG_RESET                             0x20
+#define SMC_MSG_VOLTAGE                           0x25
+#define SMC_VERSION_MAJOR                         0x7
+#define SMC_VERSION_MINOR                         0x0
+#define SMC_HEADER_SIZE                           0x40
+#define ROM_SIGNATURE                             0xaa55
+typedef enum SurfaceEndian {
+	ENDIAN_NONE                                      = 0x0,
+	ENDIAN_8IN16                                     = 0x1,
+	ENDIAN_8IN32                                     = 0x2,
+	ENDIAN_8IN64                                     = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+	ARRAY_LINEAR_GENERAL                             = 0x0,
+	ARRAY_LINEAR_ALIGNED                             = 0x1,
+	ARRAY_1D_TILED_THIN1                             = 0x2,
+	ARRAY_1D_TILED_THICK                             = 0x3,
+	ARRAY_2D_TILED_THIN1                             = 0x4,
+	ARRAY_PRT_TILED_THIN1                            = 0x5,
+	ARRAY_PRT_2D_TILED_THIN1                         = 0x6,
+	ARRAY_2D_TILED_THICK                             = 0x7,
+	ARRAY_2D_TILED_XTHICK                            = 0x8,
+	ARRAY_PRT_TILED_THICK                            = 0x9,
+	ARRAY_PRT_2D_TILED_THICK                         = 0xa,
+	ARRAY_PRT_3D_TILED_THIN1                         = 0xb,
+	ARRAY_3D_TILED_THIN1                             = 0xc,
+	ARRAY_3D_TILED_THICK                             = 0xd,
+	ARRAY_3D_TILED_XTHICK                            = 0xe,
+	ARRAY_PRT_3D_TILED_THICK                         = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+	CONFIG_1_PIPE                                    = 0x0,
+	CONFIG_2_PIPE                                    = 0x1,
+	CONFIG_4_PIPE                                    = 0x2,
+	CONFIG_8_PIPE                                    = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+	CONFIG_4_BANK                                    = 0x0,
+	CONFIG_8_BANK                                    = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+	CONFIG_256B_GROUP                                = 0x0,
+	CONFIG_512B_GROUP                                = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+	CONFIG_1KB_ROW                                   = 0x0,
+	CONFIG_2KB_ROW                                   = 0x1,
+	CONFIG_4KB_ROW                                   = 0x2,
+	CONFIG_8KB_ROW                                   = 0x3,
+	CONFIG_1KB_ROW_OPT                               = 0x4,
+	CONFIG_2KB_ROW_OPT                               = 0x5,
+	CONFIG_4KB_ROW_OPT                               = 0x6,
+	CONFIG_8KB_ROW_OPT                               = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+	CONFIG_128B_SWAPS                                = 0x0,
+	CONFIG_256B_SWAPS                                = 0x1,
+	CONFIG_512B_SWAPS                                = 0x2,
+	CONFIG_1KB_SWAPS                                 = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+	CONFIG_1KB_SPLIT                                 = 0x0,
+	CONFIG_2KB_SPLIT                                 = 0x1,
+	CONFIG_4KB_SPLIT                                 = 0x2,
+	CONFIG_8KB_SPLIT                                 = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+	ADDR_CONFIG_1_PIPE                               = 0x0,
+	ADDR_CONFIG_2_PIPE                               = 0x1,
+	ADDR_CONFIG_4_PIPE                               = 0x2,
+	ADDR_CONFIG_8_PIPE                               = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+	ADDR_CONFIG_PIPE_INTERLEAVE_256B                 = 0x0,
+	ADDR_CONFIG_PIPE_INTERLEAVE_512B                 = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+	ADDR_CONFIG_BANK_INTERLEAVE_1                    = 0x0,
+	ADDR_CONFIG_BANK_INTERLEAVE_2                    = 0x1,
+	ADDR_CONFIG_BANK_INTERLEAVE_4                    = 0x2,
+	ADDR_CONFIG_BANK_INTERLEAVE_8                    = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+	ADDR_CONFIG_1_SHADER_ENGINE                      = 0x0,
+	ADDR_CONFIG_2_SHADER_ENGINE                      = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+	ADDR_CONFIG_SE_TILE_16                           = 0x0,
+	ADDR_CONFIG_SE_TILE_32                           = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+	ADDR_CONFIG_1_GPU                                = 0x0,
+	ADDR_CONFIG_2_GPU                                = 0x1,
+	ADDR_CONFIG_4_GPU                                = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+	ADDR_CONFIG_GPU_TILE_16                          = 0x0,
+	ADDR_CONFIG_GPU_TILE_32                          = 0x1,
+	ADDR_CONFIG_GPU_TILE_64                          = 0x2,
+	ADDR_CONFIG_GPU_TILE_128                         = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+	ADDR_CONFIG_1KB_ROW                              = 0x0,
+	ADDR_CONFIG_2KB_ROW                              = 0x1,
+	ADDR_CONFIG_4KB_ROW                              = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+	ADDR_CONFIG_1_LOWER_PIPES                        = 0x0,
+	ADDR_CONFIG_2_LOWER_PIPES                        = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+	DBG_CLIENT_BLKID_RESERVED                        = 0x0,
+	DBG_CLIENT_BLKID_dbg                             = 0x1,
+	DBG_CLIENT_BLKID_scf2                            = 0x2,
+	DBG_CLIENT_BLKID_mcd5_0                          = 0x3,
+	DBG_CLIENT_BLKID_mcd5_1                          = 0x4,
+	DBG_CLIENT_BLKID_mcd6_0                          = 0x5,
+	DBG_CLIENT_BLKID_mcd6_1                          = 0x6,
+	DBG_CLIENT_BLKID_mcd7_0                          = 0x7,
+	DBG_CLIENT_BLKID_mcd7_1                          = 0x8,
+	DBG_CLIENT_BLKID_vmc                             = 0x9,
+	DBG_CLIENT_BLKID_sx30                            = 0xa,
+	DBG_CLIENT_BLKID_mcd2_0                          = 0xb,
+	DBG_CLIENT_BLKID_mcd2_1                          = 0xc,
+	DBG_CLIENT_BLKID_bci1                            = 0xd,
+	DBG_CLIENT_BLKID_xdma_dbg_client_wrapper         = 0xe,
+	DBG_CLIENT_BLKID_mcc0                            = 0xf,
+	DBG_CLIENT_BLKID_uvdf_0                          = 0x10,
+	DBG_CLIENT_BLKID_uvdf_1                          = 0x11,
+	DBG_CLIENT_BLKID_uvdf_2                          = 0x12,
+	DBG_CLIENT_BLKID_bci0                            = 0x13,
+	DBG_CLIENT_BLKID_vcec0_0                         = 0x14,
+	DBG_CLIENT_BLKID_cb100                           = 0x15,
+	DBG_CLIENT_BLKID_cb001                           = 0x16,
+	DBG_CLIENT_BLKID_cb002                           = 0x17,
+	DBG_CLIENT_BLKID_cb003                           = 0x18,
+	DBG_CLIENT_BLKID_mcd4_0                          = 0x19,
+	DBG_CLIENT_BLKID_mcd4_1                          = 0x1a,
+	DBG_CLIENT_BLKID_tmonw00                         = 0x1b,
+	DBG_CLIENT_BLKID_cb101                           = 0x1c,
+	DBG_CLIENT_BLKID_cb102                           = 0x1d,
+	DBG_CLIENT_BLKID_cb103                           = 0x1e,
+	DBG_CLIENT_BLKID_sx10                            = 0x1f,
+	DBG_CLIENT_BLKID_cb301                           = 0x20,
+	DBG_CLIENT_BLKID_cb302                           = 0x21,
+	DBG_CLIENT_BLKID_cb303                           = 0x22,
+	DBG_CLIENT_BLKID_tmonw01                         = 0x23,
+	DBG_CLIENT_BLKID_tmonw02                         = 0x24,
+	DBG_CLIENT_BLKID_vcea0_0                         = 0x25,
+	DBG_CLIENT_BLKID_vcea0_1                         = 0x26,
+	DBG_CLIENT_BLKID_vcea0_2                         = 0x27,
+	DBG_CLIENT_BLKID_vcea0_3                         = 0x28,
+	DBG_CLIENT_BLKID_scf1                            = 0x29,
+	DBG_CLIENT_BLKID_sx20                            = 0x2a,
+	DBG_CLIENT_BLKID_spim1                           = 0x2b,
+	DBG_CLIENT_BLKID_scb1                            = 0x2c,
+	DBG_CLIENT_BLKID_pa10                            = 0x2d,
+	DBG_CLIENT_BLKID_pa00                            = 0x2e,
+	DBG_CLIENT_BLKID_gmcon                           = 0x2f,
+	DBG_CLIENT_BLKID_mcb                             = 0x30,
+	DBG_CLIENT_BLKID_vgt0                            = 0x31,
+	DBG_CLIENT_BLKID_pc0                             = 0x32,
+	DBG_CLIENT_BLKID_bci2                            = 0x33,
+	DBG_CLIENT_BLKID_uvdb_0                          = 0x34,
+	DBG_CLIENT_BLKID_spim3                           = 0x35,
+	DBG_CLIENT_BLKID_scb3                            = 0x36,
+	DBG_CLIENT_BLKID_cpc_0                           = 0x37,
+	DBG_CLIENT_BLKID_cpc_1                           = 0x38,
+	DBG_CLIENT_BLKID_uvdm_0                          = 0x39,
+	DBG_CLIENT_BLKID_uvdm_1                          = 0x3a,
+	DBG_CLIENT_BLKID_uvdm_2                          = 0x3b,
+	DBG_CLIENT_BLKID_uvdm_3                          = 0x3c,
+	DBG_CLIENT_BLKID_cb000                           = 0x3d,
+	DBG_CLIENT_BLKID_spim0                           = 0x3e,
+	DBG_CLIENT_BLKID_scb0                            = 0x3f,
+	DBG_CLIENT_BLKID_mcc2                            = 0x40,
+	DBG_CLIENT_BLKID_ds0                             = 0x41,
+	DBG_CLIENT_BLKID_srbm                            = 0x42,
+	DBG_CLIENT_BLKID_ih                              = 0x43,
+	DBG_CLIENT_BLKID_sem                             = 0x44,
+	DBG_CLIENT_BLKID_sdma_0                          = 0x45,
+	DBG_CLIENT_BLKID_sdma_1                          = 0x46,
+	DBG_CLIENT_BLKID_hdp                             = 0x47,
+	DBG_CLIENT_BLKID_acp_0                           = 0x48,
+	DBG_CLIENT_BLKID_acp_1                           = 0x49,
+	DBG_CLIENT_BLKID_cb200                           = 0x4a,
+	DBG_CLIENT_BLKID_scf3                            = 0x4b,
+	DBG_CLIENT_BLKID_bci3                            = 0x4c,
+	DBG_CLIENT_BLKID_mcd0_0                          = 0x4d,
+	DBG_CLIENT_BLKID_mcd0_1                          = 0x4e,
+	DBG_CLIENT_BLKID_pa11                            = 0x4f,
+	DBG_CLIENT_BLKID_pa01                            = 0x50,
+	DBG_CLIENT_BLKID_cb201                           = 0x51,
+	DBG_CLIENT_BLKID_cb202                           = 0x52,
+	DBG_CLIENT_BLKID_cb203                           = 0x53,
+	DBG_CLIENT_BLKID_spim2                           = 0x54,
+	DBG_CLIENT_BLKID_scb2                            = 0x55,
+	DBG_CLIENT_BLKID_vgt2                            = 0x56,
+	DBG_CLIENT_BLKID_pc2                             = 0x57,
+	DBG_CLIENT_BLKID_smu_0                           = 0x58,
+	DBG_CLIENT_BLKID_smu_1                           = 0x59,
+	DBG_CLIENT_BLKID_smu_2                           = 0x5a,
+	DBG_CLIENT_BLKID_cb1                             = 0x5b,
+	DBG_CLIENT_BLKID_ia0                             = 0x5c,
+	DBG_CLIENT_BLKID_wd                              = 0x5d,
+	DBG_CLIENT_BLKID_ia1                             = 0x5e,
+	DBG_CLIENT_BLKID_scf0                            = 0x5f,
+	DBG_CLIENT_BLKID_vgt1                            = 0x60,
+	DBG_CLIENT_BLKID_pc1                             = 0x61,
+	DBG_CLIENT_BLKID_cb0                             = 0x62,
+	DBG_CLIENT_BLKID_gdc_one_0                       = 0x63,
+	DBG_CLIENT_BLKID_gdc_one_1                       = 0x64,
+	DBG_CLIENT_BLKID_gdc_one_2                       = 0x65,
+	DBG_CLIENT_BLKID_gdc_one_3                       = 0x66,
+	DBG_CLIENT_BLKID_gdc_one_4                       = 0x67,
+	DBG_CLIENT_BLKID_gdc_one_5                       = 0x68,
+	DBG_CLIENT_BLKID_gdc_one_6                       = 0x69,
+	DBG_CLIENT_BLKID_gdc_one_7                       = 0x6a,
+	DBG_CLIENT_BLKID_gdc_one_8                       = 0x6b,
+	DBG_CLIENT_BLKID_gdc_one_9                       = 0x6c,
+	DBG_CLIENT_BLKID_gdc_one_10                      = 0x6d,
+	DBG_CLIENT_BLKID_gdc_one_11                      = 0x6e,
+	DBG_CLIENT_BLKID_gdc_one_12                      = 0x6f,
+	DBG_CLIENT_BLKID_gdc_one_13                      = 0x70,
+	DBG_CLIENT_BLKID_gdc_one_14                      = 0x71,
+	DBG_CLIENT_BLKID_gdc_one_15                      = 0x72,
+	DBG_CLIENT_BLKID_gdc_one_16                      = 0x73,
+	DBG_CLIENT_BLKID_gdc_one_17                      = 0x74,
+	DBG_CLIENT_BLKID_gdc_one_18                      = 0x75,
+	DBG_CLIENT_BLKID_gdc_one_19                      = 0x76,
+	DBG_CLIENT_BLKID_gdc_one_20                      = 0x77,
+	DBG_CLIENT_BLKID_gdc_one_21                      = 0x78,
+	DBG_CLIENT_BLKID_gdc_one_22                      = 0x79,
+	DBG_CLIENT_BLKID_gdc_one_23                      = 0x7a,
+	DBG_CLIENT_BLKID_gdc_one_24                      = 0x7b,
+	DBG_CLIENT_BLKID_gdc_one_25                      = 0x7c,
+	DBG_CLIENT_BLKID_gdc_one_26                      = 0x7d,
+	DBG_CLIENT_BLKID_gdc_one_27                      = 0x7e,
+	DBG_CLIENT_BLKID_gdc_one_28                      = 0x7f,
+	DBG_CLIENT_BLKID_gdc_one_29                      = 0x80,
+	DBG_CLIENT_BLKID_gdc_one_30                      = 0x81,
+	DBG_CLIENT_BLKID_gdc_one_31                      = 0x82,
+	DBG_CLIENT_BLKID_gdc_one_32                      = 0x83,
+	DBG_CLIENT_BLKID_gdc_one_33                      = 0x84,
+	DBG_CLIENT_BLKID_gdc_one_34                      = 0x85,
+	DBG_CLIENT_BLKID_gdc_one_35                      = 0x86,
+	DBG_CLIENT_BLKID_vceb0_0                         = 0x87,
+	DBG_CLIENT_BLKID_vgt3                            = 0x88,
+	DBG_CLIENT_BLKID_pc3                             = 0x89,
+	DBG_CLIENT_BLKID_mcd3_0                          = 0x8a,
+	DBG_CLIENT_BLKID_mcd3_1                          = 0x8b,
+	DBG_CLIENT_BLKID_uvdu_0                          = 0x8c,
+	DBG_CLIENT_BLKID_uvdu_1                          = 0x8d,
+	DBG_CLIENT_BLKID_uvdu_2                          = 0x8e,
+	DBG_CLIENT_BLKID_uvdu_3                          = 0x8f,
+	DBG_CLIENT_BLKID_uvdu_4                          = 0x90,
+	DBG_CLIENT_BLKID_uvdu_5                          = 0x91,
+	DBG_CLIENT_BLKID_uvdu_6                          = 0x92,
+	DBG_CLIENT_BLKID_cb300                           = 0x93,
+	DBG_CLIENT_BLKID_mcd1_0                          = 0x94,
+	DBG_CLIENT_BLKID_mcd1_1                          = 0x95,
+	DBG_CLIENT_BLKID_sx00                            = 0x96,
+	DBG_CLIENT_BLKID_uvdc_0                          = 0x97,
+	DBG_CLIENT_BLKID_uvdc_1                          = 0x98,
+	DBG_CLIENT_BLKID_mcc3                            = 0x99,
+	DBG_CLIENT_BLKID_mcc4                            = 0x9a,
+	DBG_CLIENT_BLKID_mcc5                            = 0x9b,
+	DBG_CLIENT_BLKID_mcc6                            = 0x9c,
+	DBG_CLIENT_BLKID_mcc7                            = 0x9d,
+	DBG_CLIENT_BLKID_cpg_0                           = 0x9e,
+	DBG_CLIENT_BLKID_cpg_1                           = 0x9f,
+	DBG_CLIENT_BLKID_gck                             = 0xa0,
+	DBG_CLIENT_BLKID_mcc1                            = 0xa1,
+	DBG_CLIENT_BLKID_cpf_0                           = 0xa2,
+	DBG_CLIENT_BLKID_cpf_1                           = 0xa3,
+	DBG_CLIENT_BLKID_rlc                             = 0xa4,
+	DBG_CLIENT_BLKID_grbm                            = 0xa5,
+	DBG_CLIENT_BLKID_sammsp                          = 0xa6,
+	DBG_CLIENT_BLKID_dci_pg                          = 0xa7,
+	DBG_CLIENT_BLKID_dci_0                           = 0xa8,
+	DBG_CLIENT_BLKID_dccg0_0                         = 0xa9,
+	DBG_CLIENT_BLKID_dccg0_1                         = 0xaa,
+	DBG_CLIENT_BLKID_dcfe01_0                        = 0xab,
+	DBG_CLIENT_BLKID_dcfe02_0                        = 0xac,
+	DBG_CLIENT_BLKID_dcfe03_0                        = 0xad,
+	DBG_CLIENT_BLKID_dcfe04_0                        = 0xae,
+	DBG_CLIENT_BLKID_dcfe05_0                        = 0xaf,
+	DBG_CLIENT_BLKID_dcfe06_0                        = 0xb0,
+	DBG_CLIENT_BLKID_mcq0_0                          = 0xb1,
+	DBG_CLIENT_BLKID_mcq0_1                          = 0xb2,
+	DBG_CLIENT_BLKID_mcq1_0                          = 0xb3,
+	DBG_CLIENT_BLKID_mcq1_1                          = 0xb4,
+	DBG_CLIENT_BLKID_mcq2_0                          = 0xb5,
+	DBG_CLIENT_BLKID_mcq2_1                          = 0xb6,
+	DBG_CLIENT_BLKID_mcq3_0                          = 0xb7,
+	DBG_CLIENT_BLKID_mcq3_1                          = 0xb8,
+	DBG_CLIENT_BLKID_mcq4_0                          = 0xb9,
+	DBG_CLIENT_BLKID_mcq4_1                          = 0xba,
+	DBG_CLIENT_BLKID_mcq5_0                          = 0xbb,
+	DBG_CLIENT_BLKID_mcq5_1                          = 0xbc,
+	DBG_CLIENT_BLKID_mcq6_0                          = 0xbd,
+	DBG_CLIENT_BLKID_mcq6_1                          = 0xbe,
+	DBG_CLIENT_BLKID_mcq7_0                          = 0xbf,
+	DBG_CLIENT_BLKID_mcq7_1                          = 0xc0,
+	DBG_CLIENT_BLKID_uvdi_0                          = 0xc1,
+	DBG_CLIENT_BLKID_RESERVED_LAST                   = 0xc2,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+	DBG_BLOCK_ID_RESERVED                            = 0x0,
+	DBG_BLOCK_ID_DBG                                 = 0x1,
+	DBG_BLOCK_ID_VMC                                 = 0x2,
+	DBG_BLOCK_ID_PDMA                                = 0x3,
+	DBG_BLOCK_ID_CG                                  = 0x4,
+	DBG_BLOCK_ID_SRBM                                = 0x5,
+	DBG_BLOCK_ID_GRBM                                = 0x6,
+	DBG_BLOCK_ID_RLC                                 = 0x7,
+	DBG_BLOCK_ID_CSC                                 = 0x8,
+	DBG_BLOCK_ID_SEM                                 = 0x9,
+	DBG_BLOCK_ID_IH                                  = 0xa,
+	DBG_BLOCK_ID_SC                                  = 0xb,
+	DBG_BLOCK_ID_SQ                                  = 0xc,
+	DBG_BLOCK_ID_AVP                                 = 0xd,
+	DBG_BLOCK_ID_GMCON                               = 0xe,
+	DBG_BLOCK_ID_SMU                                 = 0xf,
+	DBG_BLOCK_ID_DMA0                                = 0x10,
+	DBG_BLOCK_ID_DMA1                                = 0x11,
+	DBG_BLOCK_ID_SPIM                                = 0x12,
+	DBG_BLOCK_ID_GDS                                 = 0x13,
+	DBG_BLOCK_ID_SPIS                                = 0x14,
+	DBG_BLOCK_ID_UNUSED0                             = 0x15,
+	DBG_BLOCK_ID_PA0                                 = 0x16,
+	DBG_BLOCK_ID_PA1                                 = 0x17,
+	DBG_BLOCK_ID_CP0                                 = 0x18,
+	DBG_BLOCK_ID_CP1                                 = 0x19,
+	DBG_BLOCK_ID_CP2                                 = 0x1a,
+	DBG_BLOCK_ID_UNUSED1                             = 0x1b,
+	DBG_BLOCK_ID_UVDU                                = 0x1c,
+	DBG_BLOCK_ID_UVDM                                = 0x1d,
+	DBG_BLOCK_ID_VCE                                 = 0x1e,
+	DBG_BLOCK_ID_UNUSED2                             = 0x1f,
+	DBG_BLOCK_ID_VGT0                                = 0x20,
+	DBG_BLOCK_ID_VGT1                                = 0x21,
+	DBG_BLOCK_ID_IA                                  = 0x22,
+	DBG_BLOCK_ID_UNUSED3                             = 0x23,
+	DBG_BLOCK_ID_SCT0                                = 0x24,
+	DBG_BLOCK_ID_SCT1                                = 0x25,
+	DBG_BLOCK_ID_SPM0                                = 0x26,
+	DBG_BLOCK_ID_SPM1                                = 0x27,
+	DBG_BLOCK_ID_TCAA                                = 0x28,
+	DBG_BLOCK_ID_TCAB                                = 0x29,
+	DBG_BLOCK_ID_TCCA                                = 0x2a,
+	DBG_BLOCK_ID_TCCB                                = 0x2b,
+	DBG_BLOCK_ID_MCC0                                = 0x2c,
+	DBG_BLOCK_ID_MCC1                                = 0x2d,
+	DBG_BLOCK_ID_MCC2                                = 0x2e,
+	DBG_BLOCK_ID_MCC3                                = 0x2f,
+	DBG_BLOCK_ID_SX0                                 = 0x30,
+	DBG_BLOCK_ID_SX1                                 = 0x31,
+	DBG_BLOCK_ID_SX2                                 = 0x32,
+	DBG_BLOCK_ID_SX3                                 = 0x33,
+	DBG_BLOCK_ID_UNUSED4                             = 0x34,
+	DBG_BLOCK_ID_UNUSED5                             = 0x35,
+	DBG_BLOCK_ID_UNUSED6                             = 0x36,
+	DBG_BLOCK_ID_UNUSED7                             = 0x37,
+	DBG_BLOCK_ID_PC0                                 = 0x38,
+	DBG_BLOCK_ID_PC1                                 = 0x39,
+	DBG_BLOCK_ID_UNUSED8                             = 0x3a,
+	DBG_BLOCK_ID_UNUSED9                             = 0x3b,
+	DBG_BLOCK_ID_UNUSED10                            = 0x3c,
+	DBG_BLOCK_ID_UNUSED11                            = 0x3d,
+	DBG_BLOCK_ID_MCB                                 = 0x3e,
+	DBG_BLOCK_ID_UNUSED12                            = 0x3f,
+	DBG_BLOCK_ID_SCB0                                = 0x40,
+	DBG_BLOCK_ID_SCB1                                = 0x41,
+	DBG_BLOCK_ID_UNUSED13                            = 0x42,
+	DBG_BLOCK_ID_UNUSED14                            = 0x43,
+	DBG_BLOCK_ID_SCF0                                = 0x44,
+	DBG_BLOCK_ID_SCF1                                = 0x45,
+	DBG_BLOCK_ID_UNUSED15                            = 0x46,
+	DBG_BLOCK_ID_UNUSED16                            = 0x47,
+	DBG_BLOCK_ID_BCI0                                = 0x48,
+	DBG_BLOCK_ID_BCI1                                = 0x49,
+	DBG_BLOCK_ID_BCI2                                = 0x4a,
+	DBG_BLOCK_ID_BCI3                                = 0x4b,
+	DBG_BLOCK_ID_UNUSED17                            = 0x4c,
+	DBG_BLOCK_ID_UNUSED18                            = 0x4d,
+	DBG_BLOCK_ID_UNUSED19                            = 0x4e,
+	DBG_BLOCK_ID_UNUSED20                            = 0x4f,
+	DBG_BLOCK_ID_CB00                                = 0x50,
+	DBG_BLOCK_ID_CB01                                = 0x51,
+	DBG_BLOCK_ID_CB02                                = 0x52,
+	DBG_BLOCK_ID_CB03                                = 0x53,
+	DBG_BLOCK_ID_CB04                                = 0x54,
+	DBG_BLOCK_ID_UNUSED21                            = 0x55,
+	DBG_BLOCK_ID_UNUSED22                            = 0x56,
+	DBG_BLOCK_ID_UNUSED23                            = 0x57,
+	DBG_BLOCK_ID_CB10                                = 0x58,
+	DBG_BLOCK_ID_CB11                                = 0x59,
+	DBG_BLOCK_ID_CB12                                = 0x5a,
+	DBG_BLOCK_ID_CB13                                = 0x5b,
+	DBG_BLOCK_ID_CB14                                = 0x5c,
+	DBG_BLOCK_ID_UNUSED24                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED25                            = 0x5e,
+	DBG_BLOCK_ID_UNUSED26                            = 0x5f,
+	DBG_BLOCK_ID_TCP0                                = 0x60,
+	DBG_BLOCK_ID_TCP1                                = 0x61,
+	DBG_BLOCK_ID_TCP2                                = 0x62,
+	DBG_BLOCK_ID_TCP3                                = 0x63,
+	DBG_BLOCK_ID_TCP4                                = 0x64,
+	DBG_BLOCK_ID_TCP5                                = 0x65,
+	DBG_BLOCK_ID_TCP6                                = 0x66,
+	DBG_BLOCK_ID_TCP7                                = 0x67,
+	DBG_BLOCK_ID_TCP8                                = 0x68,
+	DBG_BLOCK_ID_TCP9                                = 0x69,
+	DBG_BLOCK_ID_TCP10                               = 0x6a,
+	DBG_BLOCK_ID_TCP11                               = 0x6b,
+	DBG_BLOCK_ID_TCP12                               = 0x6c,
+	DBG_BLOCK_ID_TCP13                               = 0x6d,
+	DBG_BLOCK_ID_TCP14                               = 0x6e,
+	DBG_BLOCK_ID_TCP15                               = 0x6f,
+	DBG_BLOCK_ID_TCP16                               = 0x70,
+	DBG_BLOCK_ID_TCP17                               = 0x71,
+	DBG_BLOCK_ID_TCP18                               = 0x72,
+	DBG_BLOCK_ID_TCP19                               = 0x73,
+	DBG_BLOCK_ID_TCP20                               = 0x74,
+	DBG_BLOCK_ID_TCP21                               = 0x75,
+	DBG_BLOCK_ID_TCP22                               = 0x76,
+	DBG_BLOCK_ID_TCP23                               = 0x77,
+	DBG_BLOCK_ID_TCP_RESERVED0                       = 0x78,
+	DBG_BLOCK_ID_TCP_RESERVED1                       = 0x79,
+	DBG_BLOCK_ID_TCP_RESERVED2                       = 0x7a,
+	DBG_BLOCK_ID_TCP_RESERVED3                       = 0x7b,
+	DBG_BLOCK_ID_TCP_RESERVED4                       = 0x7c,
+	DBG_BLOCK_ID_TCP_RESERVED5                       = 0x7d,
+	DBG_BLOCK_ID_TCP_RESERVED6                       = 0x7e,
+	DBG_BLOCK_ID_TCP_RESERVED7                       = 0x7f,
+	DBG_BLOCK_ID_DB00                                = 0x80,
+	DBG_BLOCK_ID_DB01                                = 0x81,
+	DBG_BLOCK_ID_DB02                                = 0x82,
+	DBG_BLOCK_ID_DB03                                = 0x83,
+	DBG_BLOCK_ID_DB04                                = 0x84,
+	DBG_BLOCK_ID_UNUSED27                            = 0x85,
+	DBG_BLOCK_ID_UNUSED28                            = 0x86,
+	DBG_BLOCK_ID_UNUSED29                            = 0x87,
+	DBG_BLOCK_ID_DB10                                = 0x88,
+	DBG_BLOCK_ID_DB11                                = 0x89,
+	DBG_BLOCK_ID_DB12                                = 0x8a,
+	DBG_BLOCK_ID_DB13                                = 0x8b,
+	DBG_BLOCK_ID_DB14                                = 0x8c,
+	DBG_BLOCK_ID_UNUSED30                            = 0x8d,
+	DBG_BLOCK_ID_UNUSED31                            = 0x8e,
+	DBG_BLOCK_ID_UNUSED32                            = 0x8f,
+	DBG_BLOCK_ID_TCC0                                = 0x90,
+	DBG_BLOCK_ID_TCC1                                = 0x91,
+	DBG_BLOCK_ID_TCC2                                = 0x92,
+	DBG_BLOCK_ID_TCC3                                = 0x93,
+	DBG_BLOCK_ID_TCC4                                = 0x94,
+	DBG_BLOCK_ID_TCC5                                = 0x95,
+	DBG_BLOCK_ID_TCC6                                = 0x96,
+	DBG_BLOCK_ID_TCC7                                = 0x97,
+	DBG_BLOCK_ID_SPS00                               = 0x98,
+	DBG_BLOCK_ID_SPS01                               = 0x99,
+	DBG_BLOCK_ID_SPS02                               = 0x9a,
+	DBG_BLOCK_ID_SPS10                               = 0x9b,
+	DBG_BLOCK_ID_SPS11                               = 0x9c,
+	DBG_BLOCK_ID_SPS12                               = 0x9d,
+	DBG_BLOCK_ID_UNUSED33                            = 0x9e,
+	DBG_BLOCK_ID_UNUSED34                            = 0x9f,
+	DBG_BLOCK_ID_TA00                                = 0xa0,
+	DBG_BLOCK_ID_TA01                                = 0xa1,
+	DBG_BLOCK_ID_TA02                                = 0xa2,
+	DBG_BLOCK_ID_TA03                                = 0xa3,
+	DBG_BLOCK_ID_TA04                                = 0xa4,
+	DBG_BLOCK_ID_TA05                                = 0xa5,
+	DBG_BLOCK_ID_TA06                                = 0xa6,
+	DBG_BLOCK_ID_TA07                                = 0xa7,
+	DBG_BLOCK_ID_TA08                                = 0xa8,
+	DBG_BLOCK_ID_TA09                                = 0xa9,
+	DBG_BLOCK_ID_TA0A                                = 0xaa,
+	DBG_BLOCK_ID_TA0B                                = 0xab,
+	DBG_BLOCK_ID_UNUSED35                            = 0xac,
+	DBG_BLOCK_ID_UNUSED36                            = 0xad,
+	DBG_BLOCK_ID_UNUSED37                            = 0xae,
+	DBG_BLOCK_ID_UNUSED38                            = 0xaf,
+	DBG_BLOCK_ID_TA10                                = 0xb0,
+	DBG_BLOCK_ID_TA11                                = 0xb1,
+	DBG_BLOCK_ID_TA12                                = 0xb2,
+	DBG_BLOCK_ID_TA13                                = 0xb3,
+	DBG_BLOCK_ID_TA14                                = 0xb4,
+	DBG_BLOCK_ID_TA15                                = 0xb5,
+	DBG_BLOCK_ID_TA16                                = 0xb6,
+	DBG_BLOCK_ID_TA17                                = 0xb7,
+	DBG_BLOCK_ID_TA18                                = 0xb8,
+	DBG_BLOCK_ID_TA19                                = 0xb9,
+	DBG_BLOCK_ID_TA1A                                = 0xba,
+	DBG_BLOCK_ID_TA1B                                = 0xbb,
+	DBG_BLOCK_ID_UNUSED39                            = 0xbc,
+	DBG_BLOCK_ID_UNUSED40                            = 0xbd,
+	DBG_BLOCK_ID_UNUSED41                            = 0xbe,
+	DBG_BLOCK_ID_UNUSED42                            = 0xbf,
+	DBG_BLOCK_ID_TD00                                = 0xc0,
+	DBG_BLOCK_ID_TD01                                = 0xc1,
+	DBG_BLOCK_ID_TD02                                = 0xc2,
+	DBG_BLOCK_ID_TD03                                = 0xc3,
+	DBG_BLOCK_ID_TD04                                = 0xc4,
+	DBG_BLOCK_ID_TD05                                = 0xc5,
+	DBG_BLOCK_ID_TD06                                = 0xc6,
+	DBG_BLOCK_ID_TD07                                = 0xc7,
+	DBG_BLOCK_ID_TD08                                = 0xc8,
+	DBG_BLOCK_ID_TD09                                = 0xc9,
+	DBG_BLOCK_ID_TD0A                                = 0xca,
+	DBG_BLOCK_ID_TD0B                                = 0xcb,
+	DBG_BLOCK_ID_UNUSED43                            = 0xcc,
+	DBG_BLOCK_ID_UNUSED44                            = 0xcd,
+	DBG_BLOCK_ID_UNUSED45                            = 0xce,
+	DBG_BLOCK_ID_UNUSED46                            = 0xcf,
+	DBG_BLOCK_ID_TD10                                = 0xd0,
+	DBG_BLOCK_ID_TD11                                = 0xd1,
+	DBG_BLOCK_ID_TD12                                = 0xd2,
+	DBG_BLOCK_ID_TD13                                = 0xd3,
+	DBG_BLOCK_ID_TD14                                = 0xd4,
+	DBG_BLOCK_ID_TD15                                = 0xd5,
+	DBG_BLOCK_ID_TD16                                = 0xd6,
+	DBG_BLOCK_ID_TD17                                = 0xd7,
+	DBG_BLOCK_ID_TD18                                = 0xd8,
+	DBG_BLOCK_ID_TD19                                = 0xd9,
+	DBG_BLOCK_ID_TD1A                                = 0xda,
+	DBG_BLOCK_ID_TD1B                                = 0xdb,
+	DBG_BLOCK_ID_UNUSED47                            = 0xdc,
+	DBG_BLOCK_ID_UNUSED48                            = 0xdd,
+	DBG_BLOCK_ID_UNUSED49                            = 0xde,
+	DBG_BLOCK_ID_UNUSED50                            = 0xdf,
+	DBG_BLOCK_ID_MCD0                                = 0xe0,
+	DBG_BLOCK_ID_MCD1                                = 0xe1,
+	DBG_BLOCK_ID_MCD2                                = 0xe2,
+	DBG_BLOCK_ID_MCD3                                = 0xe3,
+	DBG_BLOCK_ID_MCD4                                = 0xe4,
+	DBG_BLOCK_ID_MCD5                                = 0xe5,
+	DBG_BLOCK_ID_UNUSED51                            = 0xe6,
+	DBG_BLOCK_ID_UNUSED52                            = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+	DBG_BLOCK_ID_RESERVED_BY2                        = 0x0,
+	DBG_BLOCK_ID_VMC_BY2                             = 0x1,
+	DBG_BLOCK_ID_CG_BY2                              = 0x2,
+	DBG_BLOCK_ID_GRBM_BY2                            = 0x3,
+	DBG_BLOCK_ID_CSC_BY2                             = 0x4,
+	DBG_BLOCK_ID_IH_BY2                              = 0x5,
+	DBG_BLOCK_ID_SQ_BY2                              = 0x6,
+	DBG_BLOCK_ID_GMCON_BY2                           = 0x7,
+	DBG_BLOCK_ID_DMA0_BY2                            = 0x8,
+	DBG_BLOCK_ID_SPIM_BY2                            = 0x9,
+	DBG_BLOCK_ID_SPIS_BY2                            = 0xa,
+	DBG_BLOCK_ID_PA0_BY2                             = 0xb,
+	DBG_BLOCK_ID_CP0_BY2                             = 0xc,
+	DBG_BLOCK_ID_CP2_BY2                             = 0xd,
+	DBG_BLOCK_ID_UVDU_BY2                            = 0xe,
+	DBG_BLOCK_ID_VCE_BY2                             = 0xf,
+	DBG_BLOCK_ID_VGT0_BY2                            = 0x10,
+	DBG_BLOCK_ID_IA_BY2                              = 0x11,
+	DBG_BLOCK_ID_SCT0_BY2                            = 0x12,
+	DBG_BLOCK_ID_SPM0_BY2                            = 0x13,
+	DBG_BLOCK_ID_TCAA_BY2                            = 0x14,
+	DBG_BLOCK_ID_TCCA_BY2                            = 0x15,
+	DBG_BLOCK_ID_MCC0_BY2                            = 0x16,
+	DBG_BLOCK_ID_MCC2_BY2                            = 0x17,
+	DBG_BLOCK_ID_SX0_BY2                             = 0x18,
+	DBG_BLOCK_ID_SX2_BY2                             = 0x19,
+	DBG_BLOCK_ID_UNUSED4_BY2                         = 0x1a,
+	DBG_BLOCK_ID_UNUSED6_BY2                         = 0x1b,
+	DBG_BLOCK_ID_PC0_BY2                             = 0x1c,
+	DBG_BLOCK_ID_UNUSED8_BY2                         = 0x1d,
+	DBG_BLOCK_ID_UNUSED10_BY2                        = 0x1e,
+	DBG_BLOCK_ID_MCB_BY2                             = 0x1f,
+	DBG_BLOCK_ID_SCB0_BY2                            = 0x20,
+	DBG_BLOCK_ID_UNUSED13_BY2                        = 0x21,
+	DBG_BLOCK_ID_SCF0_BY2                            = 0x22,
+	DBG_BLOCK_ID_UNUSED15_BY2                        = 0x23,
+	DBG_BLOCK_ID_BCI0_BY2                            = 0x24,
+	DBG_BLOCK_ID_BCI2_BY2                            = 0x25,
+	DBG_BLOCK_ID_UNUSED17_BY2                        = 0x26,
+	DBG_BLOCK_ID_UNUSED19_BY2                        = 0x27,
+	DBG_BLOCK_ID_CB00_BY2                            = 0x28,
+	DBG_BLOCK_ID_CB02_BY2                            = 0x29,
+	DBG_BLOCK_ID_CB04_BY2                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED22_BY2                        = 0x2b,
+	DBG_BLOCK_ID_CB10_BY2                            = 0x2c,
+	DBG_BLOCK_ID_CB12_BY2                            = 0x2d,
+	DBG_BLOCK_ID_CB14_BY2                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED25_BY2                        = 0x2f,
+	DBG_BLOCK_ID_TCP0_BY2                            = 0x30,
+	DBG_BLOCK_ID_TCP2_BY2                            = 0x31,
+	DBG_BLOCK_ID_TCP4_BY2                            = 0x32,
+	DBG_BLOCK_ID_TCP6_BY2                            = 0x33,
+	DBG_BLOCK_ID_TCP8_BY2                            = 0x34,
+	DBG_BLOCK_ID_TCP10_BY2                           = 0x35,
+	DBG_BLOCK_ID_TCP12_BY2                           = 0x36,
+	DBG_BLOCK_ID_TCP14_BY2                           = 0x37,
+	DBG_BLOCK_ID_TCP16_BY2                           = 0x38,
+	DBG_BLOCK_ID_TCP18_BY2                           = 0x39,
+	DBG_BLOCK_ID_TCP20_BY2                           = 0x3a,
+	DBG_BLOCK_ID_TCP22_BY2                           = 0x3b,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY2                   = 0x3c,
+	DBG_BLOCK_ID_TCP_RESERVED2_BY2                   = 0x3d,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY2                   = 0x3e,
+	DBG_BLOCK_ID_TCP_RESERVED6_BY2                   = 0x3f,
+	DBG_BLOCK_ID_DB00_BY2                            = 0x40,
+	DBG_BLOCK_ID_DB02_BY2                            = 0x41,
+	DBG_BLOCK_ID_DB04_BY2                            = 0x42,
+	DBG_BLOCK_ID_UNUSED28_BY2                        = 0x43,
+	DBG_BLOCK_ID_DB10_BY2                            = 0x44,
+	DBG_BLOCK_ID_DB12_BY2                            = 0x45,
+	DBG_BLOCK_ID_DB14_BY2                            = 0x46,
+	DBG_BLOCK_ID_UNUSED31_BY2                        = 0x47,
+	DBG_BLOCK_ID_TCC0_BY2                            = 0x48,
+	DBG_BLOCK_ID_TCC2_BY2                            = 0x49,
+	DBG_BLOCK_ID_TCC4_BY2                            = 0x4a,
+	DBG_BLOCK_ID_TCC6_BY2                            = 0x4b,
+	DBG_BLOCK_ID_SPS00_BY2                           = 0x4c,
+	DBG_BLOCK_ID_SPS02_BY2                           = 0x4d,
+	DBG_BLOCK_ID_SPS11_BY2                           = 0x4e,
+	DBG_BLOCK_ID_UNUSED33_BY2                        = 0x4f,
+	DBG_BLOCK_ID_TA00_BY2                            = 0x50,
+	DBG_BLOCK_ID_TA02_BY2                            = 0x51,
+	DBG_BLOCK_ID_TA04_BY2                            = 0x52,
+	DBG_BLOCK_ID_TA06_BY2                            = 0x53,
+	DBG_BLOCK_ID_TA08_BY2                            = 0x54,
+	DBG_BLOCK_ID_TA0A_BY2                            = 0x55,
+	DBG_BLOCK_ID_UNUSED35_BY2                        = 0x56,
+	DBG_BLOCK_ID_UNUSED37_BY2                        = 0x57,
+	DBG_BLOCK_ID_TA10_BY2                            = 0x58,
+	DBG_BLOCK_ID_TA12_BY2                            = 0x59,
+	DBG_BLOCK_ID_TA14_BY2                            = 0x5a,
+	DBG_BLOCK_ID_TA16_BY2                            = 0x5b,
+	DBG_BLOCK_ID_TA18_BY2                            = 0x5c,
+	DBG_BLOCK_ID_TA1A_BY2                            = 0x5d,
+	DBG_BLOCK_ID_UNUSED39_BY2                        = 0x5e,
+	DBG_BLOCK_ID_UNUSED41_BY2                        = 0x5f,
+	DBG_BLOCK_ID_TD00_BY2                            = 0x60,
+	DBG_BLOCK_ID_TD02_BY2                            = 0x61,
+	DBG_BLOCK_ID_TD04_BY2                            = 0x62,
+	DBG_BLOCK_ID_TD06_BY2                            = 0x63,
+	DBG_BLOCK_ID_TD08_BY2                            = 0x64,
+	DBG_BLOCK_ID_TD0A_BY2                            = 0x65,
+	DBG_BLOCK_ID_UNUSED43_BY2                        = 0x66,
+	DBG_BLOCK_ID_UNUSED45_BY2                        = 0x67,
+	DBG_BLOCK_ID_TD10_BY2                            = 0x68,
+	DBG_BLOCK_ID_TD12_BY2                            = 0x69,
+	DBG_BLOCK_ID_TD14_BY2                            = 0x6a,
+	DBG_BLOCK_ID_TD16_BY2                            = 0x6b,
+	DBG_BLOCK_ID_TD18_BY2                            = 0x6c,
+	DBG_BLOCK_ID_TD1A_BY2                            = 0x6d,
+	DBG_BLOCK_ID_UNUSED47_BY2                        = 0x6e,
+	DBG_BLOCK_ID_UNUSED49_BY2                        = 0x6f,
+	DBG_BLOCK_ID_MCD0_BY2                            = 0x70,
+	DBG_BLOCK_ID_MCD2_BY2                            = 0x71,
+	DBG_BLOCK_ID_MCD4_BY2                            = 0x72,
+	DBG_BLOCK_ID_UNUSED51_BY2                        = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+	DBG_BLOCK_ID_RESERVED_BY4                        = 0x0,
+	DBG_BLOCK_ID_CG_BY4                              = 0x1,
+	DBG_BLOCK_ID_CSC_BY4                             = 0x2,
+	DBG_BLOCK_ID_SQ_BY4                              = 0x3,
+	DBG_BLOCK_ID_DMA0_BY4                            = 0x4,
+	DBG_BLOCK_ID_SPIS_BY4                            = 0x5,
+	DBG_BLOCK_ID_CP0_BY4                             = 0x6,
+	DBG_BLOCK_ID_UVDU_BY4                            = 0x7,
+	DBG_BLOCK_ID_VGT0_BY4                            = 0x8,
+	DBG_BLOCK_ID_SCT0_BY4                            = 0x9,
+	DBG_BLOCK_ID_TCAA_BY4                            = 0xa,
+	DBG_BLOCK_ID_MCC0_BY4                            = 0xb,
+	DBG_BLOCK_ID_SX0_BY4                             = 0xc,
+	DBG_BLOCK_ID_UNUSED4_BY4                         = 0xd,
+	DBG_BLOCK_ID_PC0_BY4                             = 0xe,
+	DBG_BLOCK_ID_UNUSED10_BY4                        = 0xf,
+	DBG_BLOCK_ID_SCB0_BY4                            = 0x10,
+	DBG_BLOCK_ID_SCF0_BY4                            = 0x11,
+	DBG_BLOCK_ID_BCI0_BY4                            = 0x12,
+	DBG_BLOCK_ID_UNUSED17_BY4                        = 0x13,
+	DBG_BLOCK_ID_CB00_BY4                            = 0x14,
+	DBG_BLOCK_ID_CB04_BY4                            = 0x15,
+	DBG_BLOCK_ID_CB10_BY4                            = 0x16,
+	DBG_BLOCK_ID_CB14_BY4                            = 0x17,
+	DBG_BLOCK_ID_TCP0_BY4                            = 0x18,
+	DBG_BLOCK_ID_TCP4_BY4                            = 0x19,
+	DBG_BLOCK_ID_TCP8_BY4                            = 0x1a,
+	DBG_BLOCK_ID_TCP12_BY4                           = 0x1b,
+	DBG_BLOCK_ID_TCP16_BY4                           = 0x1c,
+	DBG_BLOCK_ID_TCP20_BY4                           = 0x1d,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY4                   = 0x1e,
+	DBG_BLOCK_ID_TCP_RESERVED4_BY4                   = 0x1f,
+	DBG_BLOCK_ID_DB_BY4                              = 0x20,
+	DBG_BLOCK_ID_DB04_BY4                            = 0x21,
+	DBG_BLOCK_ID_DB10_BY4                            = 0x22,
+	DBG_BLOCK_ID_DB14_BY4                            = 0x23,
+	DBG_BLOCK_ID_TCC0_BY4                            = 0x24,
+	DBG_BLOCK_ID_TCC4_BY4                            = 0x25,
+	DBG_BLOCK_ID_SPS00_BY4                           = 0x26,
+	DBG_BLOCK_ID_SPS11_BY4                           = 0x27,
+	DBG_BLOCK_ID_TA00_BY4                            = 0x28,
+	DBG_BLOCK_ID_TA04_BY4                            = 0x29,
+	DBG_BLOCK_ID_TA08_BY4                            = 0x2a,
+	DBG_BLOCK_ID_UNUSED35_BY4                        = 0x2b,
+	DBG_BLOCK_ID_TA10_BY4                            = 0x2c,
+	DBG_BLOCK_ID_TA14_BY4                            = 0x2d,
+	DBG_BLOCK_ID_TA18_BY4                            = 0x2e,
+	DBG_BLOCK_ID_UNUSED39_BY4                        = 0x2f,
+	DBG_BLOCK_ID_TD00_BY4                            = 0x30,
+	DBG_BLOCK_ID_TD04_BY4                            = 0x31,
+	DBG_BLOCK_ID_TD08_BY4                            = 0x32,
+	DBG_BLOCK_ID_UNUSED43_BY4                        = 0x33,
+	DBG_BLOCK_ID_TD10_BY4                            = 0x34,
+	DBG_BLOCK_ID_TD14_BY4                            = 0x35,
+	DBG_BLOCK_ID_TD18_BY4                            = 0x36,
+	DBG_BLOCK_ID_UNUSED47_BY4                        = 0x37,
+	DBG_BLOCK_ID_MCD0_BY4                            = 0x38,
+	DBG_BLOCK_ID_MCD4_BY4                            = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+	DBG_BLOCK_ID_RESERVED_BY8                        = 0x0,
+	DBG_BLOCK_ID_CSC_BY8                             = 0x1,
+	DBG_BLOCK_ID_DMA0_BY8                            = 0x2,
+	DBG_BLOCK_ID_CP0_BY8                             = 0x3,
+	DBG_BLOCK_ID_VGT0_BY8                            = 0x4,
+	DBG_BLOCK_ID_TCAA_BY8                            = 0x5,
+	DBG_BLOCK_ID_SX0_BY8                             = 0x6,
+	DBG_BLOCK_ID_PC0_BY8                             = 0x7,
+	DBG_BLOCK_ID_SCB0_BY8                            = 0x8,
+	DBG_BLOCK_ID_BCI0_BY8                            = 0x9,
+	DBG_BLOCK_ID_CB00_BY8                            = 0xa,
+	DBG_BLOCK_ID_CB10_BY8                            = 0xb,
+	DBG_BLOCK_ID_TCP0_BY8                            = 0xc,
+	DBG_BLOCK_ID_TCP8_BY8                            = 0xd,
+	DBG_BLOCK_ID_TCP16_BY8                           = 0xe,
+	DBG_BLOCK_ID_TCP_RESERVED0_BY8                   = 0xf,
+	DBG_BLOCK_ID_DB00_BY8                            = 0x10,
+	DBG_BLOCK_ID_DB10_BY8                            = 0x11,
+	DBG_BLOCK_ID_TCC0_BY8                            = 0x12,
+	DBG_BLOCK_ID_SPS00_BY8                           = 0x13,
+	DBG_BLOCK_ID_TA00_BY8                            = 0x14,
+	DBG_BLOCK_ID_TA08_BY8                            = 0x15,
+	DBG_BLOCK_ID_TA10_BY8                            = 0x16,
+	DBG_BLOCK_ID_TA18_BY8                            = 0x17,
+	DBG_BLOCK_ID_TD00_BY8                            = 0x18,
+	DBG_BLOCK_ID_TD08_BY8                            = 0x19,
+	DBG_BLOCK_ID_TD10_BY8                            = 0x1a,
+	DBG_BLOCK_ID_TD18_BY8                            = 0x1b,
+	DBG_BLOCK_ID_MCD0_BY8                            = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+	DBG_BLOCK_ID_RESERVED_BY16                       = 0x0,
+	DBG_BLOCK_ID_DMA0_BY16                           = 0x1,
+	DBG_BLOCK_ID_VGT0_BY16                           = 0x2,
+	DBG_BLOCK_ID_SX0_BY16                            = 0x3,
+	DBG_BLOCK_ID_SCB0_BY16                           = 0x4,
+	DBG_BLOCK_ID_CB00_BY16                           = 0x5,
+	DBG_BLOCK_ID_TCP0_BY16                           = 0x6,
+	DBG_BLOCK_ID_TCP16_BY16                          = 0x7,
+	DBG_BLOCK_ID_DB00_BY16                           = 0x8,
+	DBG_BLOCK_ID_TCC0_BY16                           = 0x9,
+	DBG_BLOCK_ID_TA00_BY16                           = 0xa,
+	DBG_BLOCK_ID_TA10_BY16                           = 0xb,
+	DBG_BLOCK_ID_TD00_BY16                           = 0xc,
+	DBG_BLOCK_ID_TD10_BY16                           = 0xd,
+	DBG_BLOCK_ID_MCD0_BY16                           = 0xe,
+} DebugBlockId_BY16;
+typedef enum ColorTransform {
+	DCC_CT_AUTO                                      = 0x0,
+	DCC_CT_NONE                                      = 0x1,
+	ABGR_TO_A_BG_G_RB                                = 0x2,
+	BGRA_TO_BG_G_RB_A                                = 0x3,
+} ColorTransform;
+typedef enum CompareRef {
+	REF_NEVER                                        = 0x0,
+	REF_LESS                                         = 0x1,
+	REF_EQUAL                                        = 0x2,
+	REF_LEQUAL                                       = 0x3,
+	REF_GREATER                                      = 0x4,
+	REF_NOTEQUAL                                     = 0x5,
+	REF_GEQUAL                                       = 0x6,
+	REF_ALWAYS                                       = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+	READ_256_BITS                                    = 0x0,
+	READ_512_BITS                                    = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+	DEPTH_INVALID                                    = 0x0,
+	DEPTH_16                                         = 0x1,
+	DEPTH_X8_24                                      = 0x2,
+	DEPTH_8_24                                       = 0x3,
+	DEPTH_X8_24_FLOAT                                = 0x4,
+	DEPTH_8_24_FLOAT                                 = 0x5,
+	DEPTH_32_FLOAT                                   = 0x6,
+	DEPTH_X24_8_32_FLOAT                             = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+	Z_INVALID                                        = 0x0,
+	Z_16                                             = 0x1,
+	Z_24                                             = 0x2,
+	Z_32_FLOAT                                       = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+	STENCIL_INVALID                                  = 0x0,
+	STENCIL_8                                        = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+	CMASK_CLEAR_NONE                                 = 0x0,
+	CMASK_CLEAR_ONE                                  = 0x1,
+	CMASK_CLEAR_ALL                                  = 0x2,
+	CMASK_ANY_EXPANDED                               = 0x3,
+	CMASK_ALPHA0_FRAG1                               = 0x4,
+	CMASK_ALPHA0_FRAG2                               = 0x5,
+	CMASK_ALPHA0_FRAG4                               = 0x6,
+	CMASK_ALPHA0_FRAGS                               = 0x7,
+	CMASK_ALPHA1_FRAG1                               = 0x8,
+	CMASK_ALPHA1_FRAG2                               = 0x9,
+	CMASK_ALPHA1_FRAG4                               = 0xa,
+	CMASK_ALPHA1_FRAGS                               = 0xb,
+	CMASK_ALPHAX_FRAG1                               = 0xc,
+	CMASK_ALPHAX_FRAG2                               = 0xd,
+	CMASK_ALPHAX_FRAG4                               = 0xe,
+	CMASK_ALPHAX_FRAGS                               = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+	EXPORT_UNUSED                                    = 0x0,
+	EXPORT_32_R                                      = 0x1,
+	EXPORT_32_GR                                     = 0x2,
+	EXPORT_32_AR                                     = 0x3,
+	EXPORT_FP16_ABGR                                 = 0x4,
+	EXPORT_UNSIGNED16_ABGR                           = 0x5,
+	EXPORT_SIGNED16_ABGR                             = 0x6,
+	EXPORT_32_ABGR                                   = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+	EXPORT_4P_32BPC_ABGR                             = 0x0,
+	EXPORT_4P_16BPC_ABGR                             = 0x1,
+	EXPORT_4P_32BPC_GR                               = 0x2,
+	EXPORT_4P_32BPC_AR                               = 0x3,
+	EXPORT_2P_32BPC_ABGR                             = 0x4,
+	EXPORT_8P_32BPC_R                                = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+	COLOR_INVALID                                    = 0x0,
+	COLOR_8                                          = 0x1,
+	COLOR_16                                         = 0x2,
+	COLOR_8_8                                        = 0x3,
+	COLOR_32                                         = 0x4,
+	COLOR_16_16                                      = 0x5,
+	COLOR_10_11_11                                   = 0x6,
+	COLOR_11_11_10                                   = 0x7,
+	COLOR_10_10_10_2                                 = 0x8,
+	COLOR_2_10_10_10                                 = 0x9,
+	COLOR_8_8_8_8                                    = 0xa,
+	COLOR_32_32                                      = 0xb,
+	COLOR_16_16_16_16                                = 0xc,
+	COLOR_RESERVED_13                                = 0xd,
+	COLOR_32_32_32_32                                = 0xe,
+	COLOR_RESERVED_15                                = 0xf,
+	COLOR_5_6_5                                      = 0x10,
+	COLOR_1_5_5_5                                    = 0x11,
+	COLOR_5_5_5_1                                    = 0x12,
+	COLOR_4_4_4_4                                    = 0x13,
+	COLOR_8_24                                       = 0x14,
+	COLOR_24_8                                       = 0x15,
+	COLOR_X24_8_32_FLOAT                             = 0x16,
+	COLOR_RESERVED_23                                = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+	FMT_INVALID                                      = 0x0,
+	FMT_8                                            = 0x1,
+	FMT_16                                           = 0x2,
+	FMT_8_8                                          = 0x3,
+	FMT_32                                           = 0x4,
+	FMT_16_16                                        = 0x5,
+	FMT_10_11_11                                     = 0x6,
+	FMT_11_11_10                                     = 0x7,
+	FMT_10_10_10_2                                   = 0x8,
+	FMT_2_10_10_10                                   = 0x9,
+	FMT_8_8_8_8                                      = 0xa,
+	FMT_32_32                                        = 0xb,
+	FMT_16_16_16_16                                  = 0xc,
+	FMT_32_32_32                                     = 0xd,
+	FMT_32_32_32_32                                  = 0xe,
+	FMT_RESERVED_4                                   = 0xf,
+	FMT_5_6_5                                        = 0x10,
+	FMT_1_5_5_5                                      = 0x11,
+	FMT_5_5_5_1                                      = 0x12,
+	FMT_4_4_4_4                                      = 0x13,
+	FMT_8_24                                         = 0x14,
+	FMT_24_8                                         = 0x15,
+	FMT_X24_8_32_FLOAT                               = 0x16,
+	FMT_RESERVED_33                                  = 0x17,
+	FMT_11_11_10_FLOAT                               = 0x18,
+	FMT_16_FLOAT                                     = 0x19,
+	FMT_32_FLOAT                                     = 0x1a,
+	FMT_16_16_FLOAT                                  = 0x1b,
+	FMT_8_24_FLOAT                                   = 0x1c,
+	FMT_24_8_FLOAT                                   = 0x1d,
+	FMT_32_32_FLOAT                                  = 0x1e,
+	FMT_10_11_11_FLOAT                               = 0x1f,
+	FMT_16_16_16_16_FLOAT                            = 0x20,
+	FMT_3_3_2                                        = 0x21,
+	FMT_6_5_5                                        = 0x22,
+	FMT_32_32_32_32_FLOAT                            = 0x23,
+	FMT_RESERVED_36                                  = 0x24,
+	FMT_1                                            = 0x25,
+	FMT_1_REVERSED                                   = 0x26,
+	FMT_GB_GR                                        = 0x27,
+	FMT_BG_RG                                        = 0x28,
+	FMT_32_AS_8                                      = 0x29,
+	FMT_32_AS_8_8                                    = 0x2a,
+	FMT_5_9_9_9_SHAREDEXP                            = 0x2b,
+	FMT_8_8_8                                        = 0x2c,
+	FMT_16_16_16                                     = 0x2d,
+	FMT_16_16_16_FLOAT                               = 0x2e,
+	FMT_4_4                                          = 0x2f,
+	FMT_32_32_32_FLOAT                               = 0x30,
+	FMT_BC1                                          = 0x31,
+	FMT_BC2                                          = 0x32,
+	FMT_BC3                                          = 0x33,
+	FMT_BC4                                          = 0x34,
+	FMT_BC5                                          = 0x35,
+	FMT_BC6                                          = 0x36,
+	FMT_BC7                                          = 0x37,
+	FMT_32_AS_32_32_32_32                            = 0x38,
+	FMT_APC3                                         = 0x39,
+	FMT_APC4                                         = 0x3a,
+	FMT_APC5                                         = 0x3b,
+	FMT_APC6                                         = 0x3c,
+	FMT_APC7                                         = 0x3d,
+	FMT_CTX1                                         = 0x3e,
+	FMT_RESERVED_63                                  = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+	BUF_DATA_FORMAT_INVALID                          = 0x0,
+	BUF_DATA_FORMAT_8                                = 0x1,
+	BUF_DATA_FORMAT_16                               = 0x2,
+	BUF_DATA_FORMAT_8_8                              = 0x3,
+	BUF_DATA_FORMAT_32                               = 0x4,
+	BUF_DATA_FORMAT_16_16                            = 0x5,
+	BUF_DATA_FORMAT_10_11_11                         = 0x6,
+	BUF_DATA_FORMAT_11_11_10                         = 0x7,
+	BUF_DATA_FORMAT_10_10_10_2                       = 0x8,
+	BUF_DATA_FORMAT_2_10_10_10                       = 0x9,
+	BUF_DATA_FORMAT_8_8_8_8                          = 0xa,
+	BUF_DATA_FORMAT_32_32                            = 0xb,
+	BUF_DATA_FORMAT_16_16_16_16                      = 0xc,
+	BUF_DATA_FORMAT_32_32_32                         = 0xd,
+	BUF_DATA_FORMAT_32_32_32_32                      = 0xe,
+	BUF_DATA_FORMAT_RESERVED_15                      = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+	IMG_DATA_FORMAT_INVALID                          = 0x0,
+	IMG_DATA_FORMAT_8                                = 0x1,
+	IMG_DATA_FORMAT_16                               = 0x2,
+	IMG_DATA_FORMAT_8_8                              = 0x3,
+	IMG_DATA_FORMAT_32                               = 0x4,
+	IMG_DATA_FORMAT_16_16                            = 0x5,
+	IMG_DATA_FORMAT_10_11_11                         = 0x6,
+	IMG_DATA_FORMAT_11_11_10                         = 0x7,
+	IMG_DATA_FORMAT_10_10_10_2                       = 0x8,
+	IMG_DATA_FORMAT_2_10_10_10                       = 0x9,
+	IMG_DATA_FORMAT_8_8_8_8                          = 0xa,
+	IMG_DATA_FORMAT_32_32                            = 0xb,
+	IMG_DATA_FORMAT_16_16_16_16                      = 0xc,
+	IMG_DATA_FORMAT_32_32_32                         = 0xd,
+	IMG_DATA_FORMAT_32_32_32_32                      = 0xe,
+	IMG_DATA_FORMAT_RESERVED_15                      = 0xf,
+	IMG_DATA_FORMAT_5_6_5                            = 0x10,
+	IMG_DATA_FORMAT_1_5_5_5                          = 0x11,
+	IMG_DATA_FORMAT_5_5_5_1                          = 0x12,
+	IMG_DATA_FORMAT_4_4_4_4                          = 0x13,
+	IMG_DATA_FORMAT_8_24                             = 0x14,
+	IMG_DATA_FORMAT_24_8                             = 0x15,
+	IMG_DATA_FORMAT_X24_8_32                         = 0x16,
+	IMG_DATA_FORMAT_RESERVED_23                      = 0x17,
+	IMG_DATA_FORMAT_RESERVED_24                      = 0x18,
+	IMG_DATA_FORMAT_RESERVED_25                      = 0x19,
+	IMG_DATA_FORMAT_RESERVED_26                      = 0x1a,
+	IMG_DATA_FORMAT_RESERVED_27                      = 0x1b,
+	IMG_DATA_FORMAT_RESERVED_28                      = 0x1c,
+	IMG_DATA_FORMAT_RESERVED_29                      = 0x1d,
+	IMG_DATA_FORMAT_RESERVED_30                      = 0x1e,
+	IMG_DATA_FORMAT_RESERVED_31                      = 0x1f,
+	IMG_DATA_FORMAT_GB_GR                            = 0x20,
+	IMG_DATA_FORMAT_BG_RG                            = 0x21,
+	IMG_DATA_FORMAT_5_9_9_9                          = 0x22,
+	IMG_DATA_FORMAT_BC1                              = 0x23,
+	IMG_DATA_FORMAT_BC2                              = 0x24,
+	IMG_DATA_FORMAT_BC3                              = 0x25,
+	IMG_DATA_FORMAT_BC4                              = 0x26,
+	IMG_DATA_FORMAT_BC5                              = 0x27,
+	IMG_DATA_FORMAT_BC6                              = 0x28,
+	IMG_DATA_FORMAT_BC7                              = 0x29,
+	IMG_DATA_FORMAT_RESERVED_42                      = 0x2a,
+	IMG_DATA_FORMAT_RESERVED_43                      = 0x2b,
+	IMG_DATA_FORMAT_FMASK8_S2_F1                     = 0x2c,
+	IMG_DATA_FORMAT_FMASK8_S4_F1                     = 0x2d,
+	IMG_DATA_FORMAT_FMASK8_S8_F1                     = 0x2e,
+	IMG_DATA_FORMAT_FMASK8_S2_F2                     = 0x2f,
+	IMG_DATA_FORMAT_FMASK8_S4_F2                     = 0x30,
+	IMG_DATA_FORMAT_FMASK8_S4_F4                     = 0x31,
+	IMG_DATA_FORMAT_FMASK16_S16_F1                   = 0x32,
+	IMG_DATA_FORMAT_FMASK16_S8_F2                    = 0x33,
+	IMG_DATA_FORMAT_FMASK32_S16_F2                   = 0x34,
+	IMG_DATA_FORMAT_FMASK32_S8_F4                    = 0x35,
+	IMG_DATA_FORMAT_FMASK32_S8_F8                    = 0x36,
+	IMG_DATA_FORMAT_FMASK64_S16_F4                   = 0x37,
+	IMG_DATA_FORMAT_FMASK64_S16_F8                   = 0x38,
+	IMG_DATA_FORMAT_4_4                              = 0x39,
+	IMG_DATA_FORMAT_6_5_5                            = 0x3a,
+	IMG_DATA_FORMAT_1                                = 0x3b,
+	IMG_DATA_FORMAT_1_REVERSED                       = 0x3c,
+	IMG_DATA_FORMAT_32_AS_8                          = 0x3d,
+	IMG_DATA_FORMAT_32_AS_8_8                        = 0x3e,
+	IMG_DATA_FORMAT_32_AS_32_32_32_32                = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+	BUF_NUM_FORMAT_UNORM                             = 0x0,
+	BUF_NUM_FORMAT_SNORM                             = 0x1,
+	BUF_NUM_FORMAT_USCALED                           = 0x2,
+	BUF_NUM_FORMAT_SSCALED                           = 0x3,
+	BUF_NUM_FORMAT_UINT                              = 0x4,
+	BUF_NUM_FORMAT_SINT                              = 0x5,
+	BUF_NUM_FORMAT_RESERVED_6                        = 0x6,
+	BUF_NUM_FORMAT_FLOAT                             = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+	IMG_NUM_FORMAT_UNORM                             = 0x0,
+	IMG_NUM_FORMAT_SNORM                             = 0x1,
+	IMG_NUM_FORMAT_USCALED                           = 0x2,
+	IMG_NUM_FORMAT_SSCALED                           = 0x3,
+	IMG_NUM_FORMAT_UINT                              = 0x4,
+	IMG_NUM_FORMAT_SINT                              = 0x5,
+	IMG_NUM_FORMAT_RESERVED_6                        = 0x6,
+	IMG_NUM_FORMAT_FLOAT                             = 0x7,
+	IMG_NUM_FORMAT_RESERVED_8                        = 0x8,
+	IMG_NUM_FORMAT_SRGB                              = 0x9,
+	IMG_NUM_FORMAT_RESERVED_10                       = 0xa,
+	IMG_NUM_FORMAT_RESERVED_11                       = 0xb,
+	IMG_NUM_FORMAT_RESERVED_12                       = 0xc,
+	IMG_NUM_FORMAT_RESERVED_13                       = 0xd,
+	IMG_NUM_FORMAT_RESERVED_14                       = 0xe,
+	IMG_NUM_FORMAT_RESERVED_15                       = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+	ARRAY_COLOR_TILE                                 = 0x0,
+	ARRAY_DEPTH_TILE                                 = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+	ADDR_SURF_MICRO_TILING_DISPLAY                   = 0x0,
+	ADDR_SURF_MICRO_TILING_NON_DISPLAY               = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+	ADDR_SURF_DISPLAY_MICRO_TILING                   = 0x0,
+	ADDR_SURF_THIN_MICRO_TILING                      = 0x1,
+	ADDR_SURF_DEPTH_MICRO_TILING                     = 0x2,
+	ADDR_SURF_ROTATED_MICRO_TILING                   = 0x3,
+	ADDR_SURF_THICK_MICRO_TILING                     = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+	ADDR_SURF_TILE_SPLIT_64B                         = 0x0,
+	ADDR_SURF_TILE_SPLIT_128B                        = 0x1,
+	ADDR_SURF_TILE_SPLIT_256B                        = 0x2,
+	ADDR_SURF_TILE_SPLIT_512B                        = 0x3,
+	ADDR_SURF_TILE_SPLIT_1KB                         = 0x4,
+	ADDR_SURF_TILE_SPLIT_2KB                         = 0x5,
+	ADDR_SURF_TILE_SPLIT_4KB                         = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+	ADDR_SURF_SAMPLE_SPLIT_1                         = 0x0,
+	ADDR_SURF_SAMPLE_SPLIT_2                         = 0x1,
+	ADDR_SURF_SAMPLE_SPLIT_4                         = 0x2,
+	ADDR_SURF_SAMPLE_SPLIT_8                         = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+	ADDR_SURF_P2                                     = 0x0,
+	ADDR_SURF_P2_RESERVED0                           = 0x1,
+	ADDR_SURF_P2_RESERVED1                           = 0x2,
+	ADDR_SURF_P2_RESERVED2                           = 0x3,
+	ADDR_SURF_P4_8x16                                = 0x4,
+	ADDR_SURF_P4_16x16                               = 0x5,
+	ADDR_SURF_P4_16x32                               = 0x6,
+	ADDR_SURF_P4_32x32                               = 0x7,
+	ADDR_SURF_P8_16x16_8x16                          = 0x8,
+	ADDR_SURF_P8_16x32_8x16                          = 0x9,
+	ADDR_SURF_P8_32x32_8x16                          = 0xa,
+	ADDR_SURF_P8_16x32_16x16                         = 0xb,
+	ADDR_SURF_P8_32x32_16x16                         = 0xc,
+	ADDR_SURF_P8_32x32_16x32                         = 0xd,
+	ADDR_SURF_P8_32x64_32x32                         = 0xe,
+	ADDR_SURF_P8_RESERVED0                           = 0xf,
+	ADDR_SURF_P16_32x32_8x16                         = 0x10,
+	ADDR_SURF_P16_32x32_16x16                        = 0x11,
+} PipeConfig;
+typedef enum NumBanks {
+	ADDR_SURF_2_BANK                                 = 0x0,
+	ADDR_SURF_4_BANK                                 = 0x1,
+	ADDR_SURF_8_BANK                                 = 0x2,
+	ADDR_SURF_16_BANK                                = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+	ADDR_SURF_BANK_WIDTH_1                           = 0x0,
+	ADDR_SURF_BANK_WIDTH_2                           = 0x1,
+	ADDR_SURF_BANK_WIDTH_4                           = 0x2,
+	ADDR_SURF_BANK_WIDTH_8                           = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+	ADDR_SURF_BANK_HEIGHT_1                          = 0x0,
+	ADDR_SURF_BANK_HEIGHT_2                          = 0x1,
+	ADDR_SURF_BANK_HEIGHT_4                          = 0x2,
+	ADDR_SURF_BANK_HEIGHT_8                          = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+	ADDR_SURF_BANK_WH_1                              = 0x0,
+	ADDR_SURF_BANK_WH_2                              = 0x1,
+	ADDR_SURF_BANK_WH_4                              = 0x2,
+	ADDR_SURF_BANK_WH_8                              = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+	ADDR_SURF_MACRO_ASPECT_1                         = 0x0,
+	ADDR_SURF_MACRO_ASPECT_2                         = 0x1,
+	ADDR_SURF_MACRO_ASPECT_4                         = 0x2,
+	ADDR_SURF_MACRO_ASPECT_8                         = 0x3,
+} MacroTileAspect;
+typedef enum GATCL1RequestType {
+	GATCL1_TYPE_NORMAL                               = 0x0,
+	GATCL1_TYPE_SHOOTDOWN                            = 0x1,
+	GATCL1_TYPE_BYPASS                               = 0x2,
+} GATCL1RequestType;
+typedef enum TCC_CACHE_POLICIES {
+	TCC_CACHE_POLICY_LRU                             = 0x0,
+	TCC_CACHE_POLICY_STREAM                          = 0x1,
+} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+	MTYPE_NC_NV                                      = 0x0,
+	MTYPE_NC                                         = 0x1,
+	MTYPE_CC                                         = 0x2,
+	MTYPE_UC                                         = 0x3,
+} MTYPE;
+typedef enum PERFMON_COUNTER_MODE {
+	PERFMON_COUNTER_MODE_ACCUM                       = 0x0,
+	PERFMON_COUNTER_MODE_ACTIVE_CYCLES               = 0x1,
+	PERFMON_COUNTER_MODE_MAX                         = 0x2,
+	PERFMON_COUNTER_MODE_DIRTY                       = 0x3,
+	PERFMON_COUNTER_MODE_SAMPLE                      = 0x4,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT    = 0x5,
+	PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT     = 0x6,
+	PERFMON_COUNTER_MODE_CYCLES_GE_HI                = 0x7,
+	PERFMON_COUNTER_MODE_CYCLES_EQ_HI                = 0x8,
+	PERFMON_COUNTER_MODE_INACTIVE_CYCLES             = 0x9,
+	PERFMON_COUNTER_MODE_RESERVED                    = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+	PERFMON_SPM_MODE_OFF                             = 0x0,
+	PERFMON_SPM_MODE_16BIT_CLAMP                     = 0x1,
+	PERFMON_SPM_MODE_16BIT_NO_CLAMP                  = 0x2,
+	PERFMON_SPM_MODE_32BIT_CLAMP                     = 0x3,
+	PERFMON_SPM_MODE_32BIT_NO_CLAMP                  = 0x4,
+	PERFMON_SPM_MODE_RESERVED_5                      = 0x5,
+	PERFMON_SPM_MODE_RESERVED_6                      = 0x6,
+	PERFMON_SPM_MODE_RESERVED_7                      = 0x7,
+	PERFMON_SPM_MODE_TEST_MODE_0                     = 0x8,
+	PERFMON_SPM_MODE_TEST_MODE_1                     = 0x9,
+	PERFMON_SPM_MODE_TEST_MODE_2                     = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+	ARRAY_LINEAR                                     = 0x0,
+	ARRAY_TILED                                      = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+	ARRAY_1D                                         = 0x0,
+	ARRAY_2D                                         = 0x1,
+	ARRAY_3D                                         = 0x2,
+	ARRAY_3D_SLICE                                   = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+	ARRAY_2D_ALT_COLOR                               = 0x0,
+	ARRAY_2D_COLOR                                   = 0x1,
+	ARRAY_3D_SLICE_COLOR                             = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+	ARRAY_2D_ALT_DEPTH                               = 0x0,
+	ARRAY_2D_DEPTH                                   = 0x1,
+} DepthArray;
+typedef enum ENUM_NUM_SIMD_PER_CU {
+	NUM_SIMD_PER_CU                                  = 0x4,
+} ENUM_NUM_SIMD_PER_CU;
+typedef enum MEM_PWR_FORCE_CTRL {
+	NO_FORCE_REQUEST                                 = 0x0,
+	FORCE_LIGHT_SLEEP_REQUEST                        = 0x1,
+	FORCE_DEEP_SLEEP_REQUEST                         = 0x2,
+	FORCE_SHUT_DOWN_REQUEST                          = 0x3,
+} MEM_PWR_FORCE_CTRL;
+typedef enum MEM_PWR_FORCE_CTRL2 {
+	NO_FORCE_REQ                                     = 0x0,
+	FORCE_LIGHT_SLEEP_REQ                            = 0x1,
+} MEM_PWR_FORCE_CTRL2;
+typedef enum MEM_PWR_DIS_CTRL {
+	ENABLE_MEM_PWR_CTRL                              = 0x0,
+	DISABLE_MEM_PWR_CTRL                             = 0x1,
+} MEM_PWR_DIS_CTRL;
+typedef enum MEM_PWR_SEL_CTRL {
+	DYNAMIC_SHUT_DOWN_ENABLE                         = 0x0,
+	DYNAMIC_DEEP_SLEEP_ENABLE                        = 0x1,
+	DYNAMIC_LIGHT_SLEEP_ENABLE                       = 0x2,
+} MEM_PWR_SEL_CTRL;
+typedef enum MEM_PWR_SEL_CTRL2 {
+	DYNAMIC_DEEP_SLEEP_EN                            = 0x0,
+	DYNAMIC_LIGHT_SLEEP_EN                           = 0x1,
+} MEM_PWR_SEL_CTRL2;
+
+#endif /* SMU_7_1_3_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
new file mode 100644
index 0000000..1ede9e2
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -0,0 +1,6080 @@
+/*
+ * SMU_7_1_3 Register documentation
+ *
+ * Copyright (C) 2014  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SMU_7_1_3_SH_MASK_H
+#define SMU_7_1_3_SH_MASK_H
+
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define GCK_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define GCK_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define GCK_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define GCK_MCLK_FUSES__StartupMClkDid_MASK 0x7f
+#define GCK_MCLK_FUSES__StartupMClkDid__SHIFT 0x0
+#define GCK_MCLK_FUSES__MClkADCA_MASK 0x780
+#define GCK_MCLK_FUSES__MClkADCA__SHIFT 0x7
+#define GCK_MCLK_FUSES__MClkDDCA_MASK 0x1800
+#define GCK_MCLK_FUSES__MClkDDCA__SHIFT 0xb
+#define GCK_MCLK_FUSES__MClkDiDtWait_MASK 0xe000
+#define GCK_MCLK_FUSES__MClkDiDtWait__SHIFT 0xd
+#define GCK_MCLK_FUSES__MClkDiDtFloor_MASK 0x30000
+#define GCK_MCLK_FUSES__MClkDiDtFloor__SHIFT 0x10
+#define CG_DCLK_CNTL__DCLK_DIVIDER_MASK 0x7f
+#define CG_DCLK_CNTL__DCLK_DIVIDER__SHIFT 0x0
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_DCLK_CNTL__DCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_DCLK_STATUS__DCLK_STATUS_MASK 0x1
+#define CG_DCLK_STATUS__DCLK_STATUS__SHIFT 0x0
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_DCLK_STATUS__DCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_VCLK_CNTL__VCLK_DIVIDER_MASK 0x7f
+#define CG_VCLK_CNTL__VCLK_DIVIDER__SHIFT 0x0
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_VCLK_CNTL__VCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_VCLK_STATUS__VCLK_STATUS_MASK 0x1
+#define CG_VCLK_STATUS__VCLK_STATUS__SHIFT 0x0
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_VCLK_STATUS__VCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ECLK_CNTL__ECLK_DIVIDER_MASK 0x7f
+#define CG_ECLK_CNTL__ECLK_DIVIDER__SHIFT 0x0
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ECLK_CNTL__ECLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_ECLK_STATUS__ECLK_STATUS_MASK 0x1
+#define CG_ECLK_STATUS__ECLK_STATUS__SHIFT 0x0
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_ECLK_STATUS__ECLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x7f
+#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x0
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN_MASK 0x100
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_ACLK_CNTL__ACLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_CNTL__MCLK_DIVIDER_MASK 0x7f
+#define CG_MCLK_CNTL__MCLK_DIVIDER__SHIFT 0x0
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN_MASK 0x100
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_EN__SHIFT 0x8
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG_MASK 0x200
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_TOG__SHIFT 0x9
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER_MASK 0x1fc00
+#define CG_MCLK_CNTL__MCLK_DIR_CNTL_DIVIDER__SHIFT 0xa
+#define CG_MCLK_STATUS__MCLK_STATUS_MASK 0x1
+#define CG_MCLK_STATUS__MCLK_STATUS__SHIFT 0x0
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG_MASK 0x2
+#define CG_MCLK_STATUS__MCLK_DIR_CNTL_DONETOG__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSECLK__SHIFT 0x0
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK_MASK 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSLCLK__SHIFT 0x1
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK_MASK 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSEVCLK__SHIFT 0x2
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSDCLK__SHIFT 0x3
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK 0x10
+#define GCK_DFS_BYPASS_CNTL__BYPASSVCLK__SHIFT 0x4
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK_MASK 0x20
+#define GCK_DFS_BYPASS_CNTL__BYPASSDISPCLK__SHIFT 0x5
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK_MASK 0x40
+#define GCK_DFS_BYPASS_CNTL__BYPASSDPREFCLK__SHIFT 0x6
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK 0x80
+#define GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT 0x7
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK_MASK 0x100
+#define GCK_DFS_BYPASS_CNTL__BYPASSADIVCLK__SHIFT 0x8
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK_MASK 0x200
+#define GCK_DFS_BYPASS_CNTL__BYPASSPSPCLK__SHIFT 0x9
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK_MASK 0x400
+#define GCK_DFS_BYPASS_CNTL__BYPASSSAMCLK__SHIFT 0xa
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK_MASK 0x800
+#define GCK_DFS_BYPASS_CNTL__BYPASSSCLK__SHIFT 0xb
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN_MASK 0x1000
+#define GCK_DFS_BYPASS_CNTL__USE_SPLL_BYPASS_EN__SHIFT 0xc
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK_MASK 0x2000
+#define GCK_DFS_BYPASS_CNTL__BYPASSMCLK__SHIFT 0xd
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN_MASK 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVEN__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK 0x8
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT 0x3
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS_MASK 0x10
+#define CG_SPLL_FUNC_CNTL__SPLL_BYPASS_THRU_DFS__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK 0x7e0
+#define CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE_MASK 0x800
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_UPDATE__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN_MASK 0x1000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_EN__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK 0x7f00000
+#define CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT 0x14
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL__SPLL_DIVA_ACK__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL__SPLL_OTEST_LOCK_EN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK 0x1ff
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_MASK 0x800
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ__SHIFT 0xb
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK 0x400000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT 0x16
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_RESET_CHG__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_BABY_STEP_CHG__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE_MASK 0x4000000
+#define CG_SPLL_FUNC_CNTL_2__SCLK_MUX_UPDATE__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR_MASK 0x8000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_UNLOCK_CLEAR__SHIFT 0x1b
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_CLKF_UPDATE__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR_MASK 0x40000000
+#define CG_SPLL_FUNC_CNTL_2__SPLL_TEST_UNLOCK_CLR__SHIFT 0x1e
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK 0x3ffffff
+#define CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK 0x10000000
+#define CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL_MASK 0xf
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_TEST_SEL__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL_MASK 0x60
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_SEL__SHIFT 0x5
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN_MASK 0x180
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EN__SHIFT 0x7
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK 0xe00
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV_MASK 0x7f000
+#define CG_SPLL_FUNC_CNTL_4__PCC_INC_DIV__SHIFT 0xc
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS_MASK 0x200000
+#define CG_SPLL_FUNC_CNTL_4__TEST_FRAC_BYPASS__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK_MASK 0x800000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_ILOCK__SHIFT 0x17
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL_MASK 0x1000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_FBCLK_SEL__SHIFT 0x18
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN_MASK 0x2000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VCTRLADC_EN__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT_MASK 0xc000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SCLK_EXT__SHIFT 0x1a
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT_MASK 0x70000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_EXT__SHIFT 0x1c
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL_MASK 0x80000000
+#define CG_SPLL_FUNC_CNTL_4__SPLL_VTOI_BIAS_CNTL__SHIFT 0x1f
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS_MASK 0x1
+#define CG_SPLL_FUNC_CNTL_5__FBDIV_SSC_BYPASS__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN_MASK 0x2
+#define CG_SPLL_FUNC_CNTL_5__RISEFBVCO_EN__SHIFT 0x1
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL_MASK 0xc
+#define CG_SPLL_FUNC_CNTL_5__PFD_RESET_CNTRL__SHIFT 0x2
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER_MASK 0x30
+#define CG_SPLL_FUNC_CNTL_5__RESET_TIMER__SHIFT 0x4
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL_MASK 0xc0
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_CNTRL__SHIFT 0x6
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN_MASK 0x100
+#define CG_SPLL_FUNC_CNTL_5__FAST_LOCK_EN__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX_MASK 0x200
+#define CG_SPLL_FUNC_CNTL_5__RESET_ANTI_MUX__SHIFT 0x9
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT_MASK 0xff
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX0_CLKOFF_CNT__SHIFT 0x0
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT_MASK 0xff00
+#define CG_SPLL_FUNC_CNTL_6__SCLKMUX1_CLKOFF_CNT__SHIFT 0x8
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN_MASK 0x10000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_EN__SHIFT 0x10
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN_MASK 0x1e0000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_IN__SHIFT 0x11
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT_MASK 0x1e00000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_VCTL_CNTRL_OUT__SHIFT 0x15
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR_MASK 0xfe000000
+#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
+#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
+#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
+#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV__SHIFT 0x1
+#define SPLL_CNTL_MODE__SPLL_TEST_MASK 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST__SHIFT 0x2
+#define SPLL_CNTL_MODE__SPLL_FASTEN_MASK 0x8
+#define SPLL_CNTL_MODE__SPLL_FASTEN__SHIFT 0x3
+#define SPLL_CNTL_MODE__SPLL_ENSAT_MASK 0x10
+#define SPLL_CNTL_MODE__SPLL_ENSAT__SHIFT 0x4
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV_MASK 0xc00
+#define SPLL_CNTL_MODE__SPLL_TEST_CLK_EXT_DIV__SHIFT 0xa
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT_MASK 0xff000
+#define SPLL_CNTL_MODE__SPLL_CTLREQ_DLY_CNT__SHIFT 0xc
+#define SPLL_CNTL_MODE__SPLL_RESET_EN_MASK 0x10000000
+#define SPLL_CNTL_MODE__SPLL_RESET_EN__SHIFT 0x1c
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE_MASK 0x60000000
+#define SPLL_CNTL_MODE__SPLL_VCO_MODE__SHIFT 0x1d
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK 0x1
+#define CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT 0x0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK 0xfff0
+#define CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT 0x4
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK 0x3ffffff
+#define CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT 0x0
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK 0xff00
+#define MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT 0x8
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK 0x2
+#define CG_CLKPIN_CNTL__XTALIN_DIVIDE__SHIFT 0x1
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK 0x4
+#define CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT 0x2
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK 0x1
+#define CG_CLKPIN_CNTL_2__ENABLE_XCLK__SHIFT 0x0
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK 0x8
+#define CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT 0x3
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK_MASK 0x100
+#define CG_CLKPIN_CNTL_2__MUX_TCLK_TO_XCLK__SHIFT 0x8
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN_MASK 0x4000
+#define CG_CLKPIN_CNTL_2__XO_IN_OSCIN_EN__SHIFT 0xe
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE_MASK 0x8000
+#define CG_CLKPIN_CNTL_2__XO_IN_ICORE_CLK_OE__SHIFT 0xf
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN_MASK 0x10000
+#define CG_CLKPIN_CNTL_2__XO_IN_CML_RXEN__SHIFT 0x10
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE_MASK 0x20000
+#define CG_CLKPIN_CNTL_2__XO_IN_BIDIR_CML_OE__SHIFT 0x11
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN_MASK 0x40000
+#define CG_CLKPIN_CNTL_2__XO_IN2_OSCIN_EN__SHIFT 0x12
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE_MASK 0x80000
+#define CG_CLKPIN_CNTL_2__XO_IN2_ICORE_CLK_OE__SHIFT 0x13
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN_MASK 0x100000
+#define CG_CLKPIN_CNTL_2__XO_IN2_CML_RXEN__SHIFT 0x14
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE_MASK 0x200000
+#define CG_CLKPIN_CNTL_2__XO_IN2_BIDIR_CML_OE__SHIFT 0x15
+#define CG_CLKPIN_CNTL_2__CML_CTRL_MASK 0xc00000
+#define CG_CLKPIN_CNTL_2__CML_CTRL__SHIFT 0x16
+#define CG_CLKPIN_CNTL_2__CLK_SPARE_MASK 0xff000000
+#define CG_CLKPIN_CNTL_2__CLK_SPARE__SHIFT 0x18
+#define CG_CLKPIN_CNTL_DC__OSC_EN_MASK 0x1
+#define CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT 0x0
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN_MASK 0x6
+#define CG_CLKPIN_CNTL_DC__XTL_LOW_GAIN__SHIFT 0x1
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN_MASK 0x200
+#define CG_CLKPIN_CNTL_DC__XTL_XOCLK_DRV_R_EN__SHIFT 0x9
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK 0x1c00
+#define CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT 0xa
+#define THM_CLK_CNTL__CMON_CLK_SEL_MASK 0xff
+#define THM_CLK_CNTL__CMON_CLK_SEL__SHIFT 0x0
+#define THM_CLK_CNTL__TMON_CLK_SEL_MASK 0xff00
+#define THM_CLK_CNTL__TMON_CLK_SEL__SHIFT 0x8
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN_MASK 0x10000
+#define THM_CLK_CNTL__CTF_CLK_SHUTOFF_EN__SHIFT 0x10
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK 0xff
+#define MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT 0x0
+#define MISC_CLK_CTRL__ZCLK_SEL_MASK 0xff00
+#define MISC_CLK_CTRL__ZCLK_SEL__SHIFT 0x8
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK 0xff0000
+#define MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT 0x10
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL_MASK 0x1f
+#define GCK_PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL_MASK 0x3e0
+#define GCK_PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x5
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x1fc00
+#define GCK_PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0xa
+#define GCK_PLL_TEST_CNTL__TST_RESET_MASK 0x20000
+#define GCK_PLL_TEST_CNTL__TST_RESET__SHIFT 0x11
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE_MASK 0x40000
+#define GCK_PLL_TEST_CNTL__TST_CLK_SEL_MODE__SHIFT 0x12
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT_MASK 0xfffe0000
+#define GCK_PLL_TEST_CNTL_2__TEST_COUNT__SHIFT 0x11
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL_MASK 0x7
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ECLK_BYPASS_CNTL__SHIFT 0x0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL_MASK 0x38
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SCLK_BYPASS_CNTL__SHIFT 0x3
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL_MASK 0x1c0
+#define GCK_ADFS_CLK_BYPASS_CNTL1__LCLK_BYPASS_CNTL__SHIFT 0x6
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL_MASK 0xe00
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DCLK_BYPASS_CNTL__SHIFT 0x9
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL_MASK 0x7000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__VCLK_BYPASS_CNTL__SHIFT 0xc
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL_MASK 0x38000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DISPCLK_BYPASS_CNTL__SHIFT 0xf
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL_MASK 0x1c0000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__DRREFCLK_BYPASS_CNTL__SHIFT 0x12
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL_MASK 0xe00000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_BYPASS_CNTL__SHIFT 0x15
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL_MASK 0x7000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__SAMCLK_BYPASS_CNTL__SHIFT 0x18
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL_MASK 0x38000000
+#define GCK_ADFS_CLK_BYPASS_CNTL1__ACLK_DIV_BYPASS_CNTL__SHIFT 0x1b
+#define SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMC_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMC_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMC_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0__SHIFT 0x0
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1_MASK 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_1__SHIFT 0x1
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2_MASK 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_2__SHIFT 0x2
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3_MASK 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_3__SHIFT 0x3
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4_MASK 0x10
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_4__SHIFT 0x4
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5_MASK 0x20
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_5__SHIFT 0x5
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6_MASK 0x40
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_6__SHIFT 0x6
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7_MASK 0x80
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_7__SHIFT 0x7
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8_MASK 0x100
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_8__SHIFT 0x8
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9_MASK 0x200
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_9__SHIFT 0x9
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10_MASK 0x400
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_10__SHIFT 0xa
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11_MASK 0x800
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_11__SHIFT 0xb
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12_MASK 0x1000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_12__SHIFT 0xc
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13_MASK 0x2000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_13__SHIFT 0xd
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14_MASK 0x4000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_14__SHIFT 0xe
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15_MASK 0x8000
+#define SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_15__SHIFT 0xf
+#define SMC_MESSAGE_0__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_0__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_0__SMC_RESP_MASK 0xffff
+#define SMC_RESP_0__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_1__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_1__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_1__SMC_RESP_MASK 0xffff
+#define SMC_RESP_1__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_2__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_2__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_2__SMC_RESP_MASK 0xffff
+#define SMC_RESP_2__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_3__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_3__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_3__SMC_RESP_MASK 0xffff
+#define SMC_RESP_3__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_4__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_4__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_4__SMC_RESP_MASK 0xffff
+#define SMC_RESP_4__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_5__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_5__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_5__SMC_RESP_MASK 0xffff
+#define SMC_RESP_5__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_6__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_6__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_6__SMC_RESP_MASK 0xffff
+#define SMC_RESP_6__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_7__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_7__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_7__SMC_RESP_MASK 0xffff
+#define SMC_RESP_7__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_0__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_0__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_1__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_1__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_2__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_2__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_3__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_3__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_4__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_4__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_5__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_5__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_6__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_6__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_7__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_7__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MESSAGE_8__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_8__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_8__SMC_RESP_MASK 0xffff
+#define SMC_RESP_8__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_9__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_9__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_9__SMC_RESP_MASK 0xffff
+#define SMC_RESP_9__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_10__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_10__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_10__SMC_RESP_MASK 0xffff
+#define SMC_RESP_10__SMC_RESP__SHIFT 0x0
+#define SMC_MESSAGE_11__SMC_MSG_MASK 0xffff
+#define SMC_MESSAGE_11__SMC_MSG__SHIFT 0x0
+#define SMC_RESP_11__SMC_RESP_MASK 0xffff
+#define SMC_RESP_11__SMC_RESP__SHIFT 0x0
+#define SMC_MSG_ARG_8__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_8__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_9__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_9__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_10__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_10__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_MSG_ARG_11__SMC_MSG_ARG_MASK 0xffffffff
+#define SMC_MSG_ARG_11__SMC_MSG_ARG__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__rst_reg_MASK 0x1
+#define SMC_SYSCON_RESET_CNTL__rst_reg__SHIFT 0x0
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override_MASK 0x2
+#define SMC_SYSCON_RESET_CNTL__srbm_soft_rst_override__SHIFT 0x1
+#define SMC_SYSCON_RESET_CNTL__RegReset_MASK 0x40000000
+#define SMC_SYSCON_RESET_CNTL__RegReset__SHIFT 0x1e
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en_MASK 0x2
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_en__SHIFT 0x1
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout_MASK 0xffff00
+#define SMC_SYSCON_CLOCK_CNTL_0__auto_cg_timeout__SHIFT 0x8
+#define SMC_SYSCON_CLOCK_CNTL_0__cken_MASK 0x1000000
+#define SMC_SYSCON_CLOCK_CNTL_0__cken__SHIFT 0x18
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable_MASK 0x1
+#define SMC_SYSCON_CLOCK_CNTL_1__auto_ck_disable__SHIFT 0x0
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq_MASK 0xffffffff
+#define SMC_SYSCON_CLOCK_CNTL_2__wake_on_irq__SHIFT 0x0
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding_MASK 0x2
+#define SMC_SYSCON_MISC_CNTL__dma_no_outstanding__SHIFT 0x1
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg_MASK 0xffffffff
+#define SMC_SYSCON_MSG_ARG_0__smc_msg_arg__SHIFT 0x0
+#define SMC_PC_C__smc_pc_c_MASK 0xffffffff
+#define SMC_PC_C__smc_pc_c__SHIFT 0x0
+#define SMC_SCRATCH9__SCRATCH_VALUE_MASK 0xffffffff
+#define SMC_SCRATCH9__SCRATCH_VALUE__SHIFT 0x0
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x1
+#define GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN_MASK 0xf
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SN__SHIFT 0x0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP_MASK 0xf0
+#define GPIOPAD_STRENGTH__GPIO_STRENGTH_SP__SHIFT 0x4
+#define GPIOPAD_MASK__GPIO_MASK_MASK 0x7fffffff
+#define GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0
+#define GPIOPAD_A__GPIO_A_MASK 0x7fffffff
+#define GPIOPAD_A__GPIO_A__SHIFT 0x0
+#define GPIOPAD_EN__GPIO_EN_MASK 0x7fffffff
+#define GPIOPAD_EN__GPIO_EN__SHIFT 0x0
+#define GPIOPAD_Y__GPIO_Y_MASK 0x7fffffff
+#define GPIOPAD_Y__GPIO_Y__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x20
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x40
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x80
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x100
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x200
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x400
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x800
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x1000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x2000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x4000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x8000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x10000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x20000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x40000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x80000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x100000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x200000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x400000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x800000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x1000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x2000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x4000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x8000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000
+#define GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000
+#define GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1fffffff
+#define GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000
+#define GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x20
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x40
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x80
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x100
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x200
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x400
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x800
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x1000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x2000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x4000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x8000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x10000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x20000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x40000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x80000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x100000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x200000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x400000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x800000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x1000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x2000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x4000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x8000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000
+#define GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000
+#define GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f
+#define GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1fffffff
+#define GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000
+#define GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1fffffff
+#define GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000
+#define GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1fffffff
+#define GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000
+#define GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL_MASK 0x1f
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_SEL__SHIFT 0x0
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR_MASK 0x20
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_CLR__SHIFT 0x5
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ_MASK 0x40
+#define GPIOPAD_EXTERN_TRIG_CNTL__EXTERN_TRIG_READ__SHIFT 0x6
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL_MASK 0x7fffffff
+#define GPIOPAD_RCVR_SEL__GPIO_RCVR_SEL__SHIFT 0x0
+#define GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7fffffff
+#define GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0
+#define GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7fffffff
+#define GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0
+#define CG_FPS_CNT__FPS_CNT_MASK 0xffffffff
+#define CG_FPS_CNT__FPS_CNT__SHIFT 0x0
+#define SMU_IND_INDEX_0__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_0__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_0__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_0__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_1__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_1__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_1__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_1__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_2__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_2__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_2__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_2__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_3__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_3__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_3__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_3__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_4__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_4__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_4__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_4__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_5__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_5__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_5__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_5__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_6__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_6__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_6__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_6__SMC_IND_DATA__SHIFT 0x0
+#define SMU_IND_INDEX_7__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_IND_INDEX_7__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_IND_DATA_7__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_IND_DATA_7__SMC_IND_DATA__SHIFT 0x0
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define SMU_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define SMU_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define SMU_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req_MASK 0x1
+#define RCU_UC_EVENTS__RCU_TST_jpc_rep_req__SHIFT 0x0
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done_MASK 0x2
+#define RCU_UC_EVENTS__TST_RCU_jpc_rep_done__SHIFT 0x1
+#define RCU_UC_EVENTS__drv_rst_mode_MASK 0x4
+#define RCU_UC_EVENTS__drv_rst_mode__SHIFT 0x2
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid_MASK 0x8
+#define RCU_UC_EVENTS__SMU_DC_efuse_status_invalid__SHIFT 0x3
+#define RCU_UC_EVENTS__TP_Tester_MASK 0x40
+#define RCU_UC_EVENTS__TP_Tester__SHIFT 0x6
+#define RCU_UC_EVENTS__boot_seq_done_MASK 0x80
+#define RCU_UC_EVENTS__boot_seq_done__SHIFT 0x7
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit_MASK 0x100
+#define RCU_UC_EVENTS__sclk_deep_sleep_exit__SHIFT 0x8
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE_MASK 0x200
+#define RCU_UC_EVENTS__BREAK_PT1_ACTIVE__SHIFT 0x9
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE_MASK 0x400
+#define RCU_UC_EVENTS__BREAK_PT2_ACTIVE__SHIFT 0xa
+#define RCU_UC_EVENTS__FCH_HALT_MASK 0x800
+#define RCU_UC_EVENTS__FCH_HALT__SHIFT 0xb
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown_MASK 0x2000
+#define RCU_UC_EVENTS__RCU_GIO_fch_lockdown__SHIFT 0xd
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED_MASK 0x10000
+#define RCU_UC_EVENTS__INTERRUPTS_ENABLED__SHIFT 0x10
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done_MASK 0x20000
+#define RCU_UC_EVENTS__RCU_DtmCnt0_Done__SHIFT 0x11
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done_MASK 0x40000
+#define RCU_UC_EVENTS__RCU_DtmCnt1_Done__SHIFT 0x12
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done_MASK 0x80000
+#define RCU_UC_EVENTS__RCU_DtmCnt2_Done__SHIFT 0x13
+#define RCU_UC_EVENTS__irq31_sel_MASK 0x3000000
+#define RCU_UC_EVENTS__irq31_sel__SHIFT 0x18
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE_MASK 0x2
+#define RCU_MISC_CTRL__REG_DRV_RST_MODE__SHIFT 0x1
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS_MASK 0x8
+#define RCU_MISC_CTRL__REG_RCU_MEMREP_DIS__SHIFT 0x3
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE_MASK 0x10
+#define RCU_MISC_CTRL__REG_CC_FUSE_DISABLE__SHIFT 0x4
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE_MASK 0x20
+#define RCU_MISC_CTRL__REG_SAMU_FUSE_DISABLE__SHIFT 0x5
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE_MASK 0x100
+#define RCU_MISC_CTRL__REG_CC_SRBM_RD_DISABLE__SHIFT 0x8
+#define RCU_MISC_CTRL__BREAK_PT1_DONE_MASK 0x10000
+#define RCU_MISC_CTRL__BREAK_PT1_DONE__SHIFT 0x10
+#define RCU_MISC_CTRL__BREAK_PT2_DONE_MASK 0x20000
+#define RCU_MISC_CTRL__BREAK_PT2_DONE__SHIFT 0x11
+#define RCU_MISC_CTRL__SAMU_START_MASK 0x400000
+#define RCU_MISC_CTRL__SAMU_START__SHIFT 0x16
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH_MASK 0xff800000
+#define RCU_MISC_CTRL__RST_PULSE_WIDTH__SHIFT 0x17
+#define RCU_VIRT_RESET_REQ__VF_MASK 0xffff
+#define RCU_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define RCU_VIRT_RESET_REQ__PF_MASK 0x80000000
+#define RCU_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define CC_RCU_FUSES__GPU_DIS_MASK 0x2
+#define CC_RCU_FUSES__GPU_DIS__SHIFT 0x1
+#define CC_RCU_FUSES__DEBUG_DISABLE_MASK 0x4
+#define CC_RCU_FUSES__DEBUG_DISABLE__SHIFT 0x2
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE_MASK 0x10
+#define CC_RCU_FUSES__EFUSE_RD_DISABLE__SHIFT 0x4
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS_MASK 0x20
+#define CC_RCU_FUSES__CG_RST_GLB_REQ_DIS__SHIFT 0x5
+#define CC_RCU_FUSES__DRV_RST_MODE_MASK 0x40
+#define CC_RCU_FUSES__DRV_RST_MODE__SHIFT 0x6
+#define CC_RCU_FUSES__ROM_DIS_MASK 0x80
+#define CC_RCU_FUSES__ROM_DIS__SHIFT 0x7
+#define CC_RCU_FUSES__JPC_REP_DISABLE_MASK 0x100
+#define CC_RCU_FUSES__JPC_REP_DISABLE__SHIFT 0x8
+#define CC_RCU_FUSES__RCU_BREAK_POINT1_MASK 0x200
+#define CC_RCU_FUSES__RCU_BREAK_POINT1__SHIFT 0x9
+#define CC_RCU_FUSES__RCU_BREAK_POINT2_MASK 0x400
+#define CC_RCU_FUSES__RCU_BREAK_POINT2__SHIFT 0xa
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE_MASK 0x4000
+#define CC_RCU_FUSES__SMU_IOC_MST_DISABLE__SHIFT 0xe
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE_MASK 0x8000
+#define CC_RCU_FUSES__FCH_LOCKOUT_ENABLE__SHIFT 0xf
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE_MASK 0x10000
+#define CC_RCU_FUSES__FCH_XFIRE_FILTER_ENABLE__SHIFT 0x10
+#define CC_RCU_FUSES__XFIRE_DISABLE_MASK 0x20000
+#define CC_RCU_FUSES__XFIRE_DISABLE__SHIFT 0x11
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE_MASK 0x40000
+#define CC_RCU_FUSES__SAMU_FUSE_DISABLE__SHIFT 0x12
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE_MASK 0x80000
+#define CC_RCU_FUSES__BIF_RST_POLLING_DISABLE__SHIFT 0x13
+#define CC_RCU_FUSES__MEM_HARDREP_EN_MASK 0x200000
+#define CC_RCU_FUSES__MEM_HARDREP_EN__SHIFT 0x15
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE_MASK 0x400000
+#define CC_RCU_FUSES__PCIE_INIT_DISABLE__SHIFT 0x16
+#define CC_RCU_FUSES__DSMU_DISABLE_MASK 0x800000
+#define CC_RCU_FUSES__DSMU_DISABLE__SHIFT 0x17
+#define CC_RCU_FUSES__WRP_FUSE_VALID_MASK 0x1000000
+#define CC_RCU_FUSES__WRP_FUSE_VALID__SHIFT 0x18
+#define CC_RCU_FUSES__PHY_FUSE_VALID_MASK 0x2000000
+#define CC_RCU_FUSES__PHY_FUSE_VALID__SHIFT 0x19
+#define CC_RCU_FUSES__RCU_SPARE_MASK 0xfc000000
+#define CC_RCU_FUSES__RCU_SPARE__SHIFT 0x1a
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE_MASK 0x2
+#define CC_SMU_MISC_FUSES__IOMMU_V2_DISABLE__SHIFT 0x1
+#define CC_SMU_MISC_FUSES__MinSClkDid_MASK 0x1fc
+#define CC_SMU_MISC_FUSES__MinSClkDid__SHIFT 0x2
+#define CC_SMU_MISC_FUSES__MISC_SPARE_MASK 0x600
+#define CC_SMU_MISC_FUSES__MISC_SPARE__SHIFT 0x9
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid_MASK 0x3f800
+#define CC_SMU_MISC_FUSES__PostResetGnbClkDid__SHIFT 0xb
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half_MASK 0x40000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_dtc_half__SHIFT 0x12
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half_MASK 0x80000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_half__SHIFT 0x13
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half_MASK 0x100000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_half__SHIFT 0x14
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half_MASK 0x200000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_pdc_half__SHIFT 0x15
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis_MASK 0x400000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_ptc_dis__SHIFT 0x16
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis_MASK 0x800000
+#define CC_SMU_MISC_FUSES__L2IMU_tn2_itc_dis__SHIFT 0x17
+#define CC_SMU_MISC_FUSES__VCE_DISABLE_MASK 0x8000000
+#define CC_SMU_MISC_FUSES__VCE_DISABLE__SHIFT 0x1b
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE_MASK 0x10000000
+#define CC_SMU_MISC_FUSES__IOC_IOMMU_DISABLE__SHIFT 0x1c
+#define CC_SMU_MISC_FUSES__GNB_SPARE_MASK 0x60000000
+#define CC_SMU_MISC_FUSES__GNB_SPARE__SHIFT 0x1d
+#define CC_SCLK_VID_FUSES__SClkVid0_MASK 0xff
+#define CC_SCLK_VID_FUSES__SClkVid0__SHIFT 0x0
+#define CC_SCLK_VID_FUSES__SClkVid1_MASK 0xff00
+#define CC_SCLK_VID_FUSES__SClkVid1__SHIFT 0x8
+#define CC_SCLK_VID_FUSES__SClkVid2_MASK 0xff0000
+#define CC_SCLK_VID_FUSES__SClkVid2__SHIFT 0x10
+#define CC_SCLK_VID_FUSES__SClkVid3_MASK 0xff000000
+#define CC_SCLK_VID_FUSES__SClkVid3__SHIFT 0x18
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID_MASK 0x7fe
+#define CC_GIO_IOCCFG_FUSES__NB_REV_ID__SHIFT 0x1
+#define CC_GIO_IOC_FUSES__IOC_FUSES_MASK 0x3e
+#define CC_GIO_IOC_FUSES__IOC_FUSES__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2_MASK 0x3e
+#define CC_SMU_TST_EFUSE1_MISC__RF_RM_6_2__SHIFT 0x1
+#define CC_SMU_TST_EFUSE1_MISC__RME_MASK 0x40
+#define CC_SMU_TST_EFUSE1_MISC__RME__SHIFT 0x6
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE_MASK 0x80
+#define CC_SMU_TST_EFUSE1_MISC__MBIST_DISABLE__SHIFT 0x7
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE_MASK 0x100
+#define CC_SMU_TST_EFUSE1_MISC__HARD_REPAIR_DISABLE__SHIFT 0x8
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE_MASK 0x200
+#define CC_SMU_TST_EFUSE1_MISC__SOFT_REPAIR_DISABLE__SHIFT 0x9
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS_MASK 0x400
+#define CC_SMU_TST_EFUSE1_MISC__GPU_DIS__SHIFT 0xa
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE_MASK 0x800
+#define CC_SMU_TST_EFUSE1_MISC__SMS_PWRDWN_DISABLE__SHIFT 0xb
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA_MASK 0x1000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISA__SHIFT 0xc
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB_MASK 0x2000
+#define CC_SMU_TST_EFUSE1_MISC__CRBBMP1500_DISB__SHIFT 0xd
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8_MASK 0x4000
+#define CC_SMU_TST_EFUSE1_MISC__RM_RF8__SHIFT 0xe
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1_MASK 0x400000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE1__SHIFT 0x16
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2_MASK 0x800000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE2__SHIFT 0x17
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3_MASK 0x1000000
+#define CC_SMU_TST_EFUSE1_MISC__DFT_SPARE3__SHIFT 0x18
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE_MASK 0x2000000
+#define CC_SMU_TST_EFUSE1_MISC__VCE_DISABLE__SHIFT 0x19
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE_MASK 0x4000000
+#define CC_SMU_TST_EFUSE1_MISC__DCE_SCAN_DISABLE__SHIFT 0x1a
+#define CC_TST_ID_STRAPS__DEVICE_ID_MASK 0xffff0
+#define CC_TST_ID_STRAPS__DEVICE_ID__SHIFT 0x4
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID_MASK 0xf00000
+#define CC_TST_ID_STRAPS__MAJOR_REV_ID__SHIFT 0x14
+#define CC_TST_ID_STRAPS__MINOR_REV_ID_MASK 0xf000000
+#define CC_TST_ID_STRAPS__MINOR_REV_ID__SHIFT 0x18
+#define CC_TST_ID_STRAPS__ATI_REV_ID_MASK 0xf0000000
+#define CC_TST_ID_STRAPS__ATI_REV_ID__SHIFT 0x1c
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT_MASK 0x2
+#define CC_FCTRL_FUSES__EXT_EFUSE_MACRO_PRESENT__SHIFT 0x1
+#define CC_HARVEST_FUSES__VCE_DISABLE_MASK 0x6
+#define CC_HARVEST_FUSES__VCE_DISABLE__SHIFT 0x1
+#define CC_HARVEST_FUSES__UVD_DISABLE_MASK 0x10
+#define CC_HARVEST_FUSES__UVD_DISABLE__SHIFT 0x4
+#define CC_HARVEST_FUSES__ACP_DISABLE_MASK 0x40
+#define CC_HARVEST_FUSES__ACP_DISABLE__SHIFT 0x6
+#define CC_HARVEST_FUSES__DC_DISABLE_MASK 0x3f00
+#define CC_HARVEST_FUSES__DC_DISABLE__SHIFT 0x8
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ_MASK 0xffffffff
+#define SMU_MAIN_PLL_OP_FREQ__PLL_OP_FREQ__SHIFT 0x0
+#define SMU_STATUS__SMU_DONE_MASK 0x1
+#define SMU_STATUS__SMU_DONE__SHIFT 0x0
+#define SMU_STATUS__SMU_PASS_MASK 0x2
+#define SMU_STATUS__SMU_PASS__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG_MASK 0x1
+#define SMU_FIRMWARE__SMU_IN_PROG__SHIFT 0x0
+#define SMU_FIRMWARE__SMU_RD_DONE_MASK 0x6
+#define SMU_FIRMWARE__SMU_RD_DONE__SHIFT 0x1
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN_MASK 0x8
+#define SMU_FIRMWARE__SMU_SRAM_RD_BLOCK_EN__SHIFT 0x3
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN_MASK 0x10
+#define SMU_FIRMWARE__SMU_SRAM_WR_BLOCK_EN__SHIFT 0x4
+#define SMU_FIRMWARE__SMU_counter_MASK 0xf00
+#define SMU_FIRMWARE__SMU_counter__SHIFT 0x8
+#define SMU_FIRMWARE__SMU_MODE_MASK 0x10000
+#define SMU_FIRMWARE__SMU_MODE__SHIFT 0x10
+#define SMU_FIRMWARE__SMU_SEL_MASK 0x20000
+#define SMU_FIRMWARE__SMU_SEL__SHIFT 0x11
+#define SMU_INPUT_DATA__START_ADDR_MASK 0x7fffffff
+#define SMU_INPUT_DATA__START_ADDR__SHIFT 0x0
+#define SMU_INPUT_DATA__AUTO_START_MASK 0x80000000
+#define SMU_INPUT_DATA__AUTO_START__SHIFT 0x1f
+#define SMU_EFUSE_0__EFUSE_DATA_MASK 0xffffffff
+#define SMU_EFUSE_0__EFUSE_DATA__SHIFT 0x0
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x1
+#define FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
+#define FIRMWARE_FLAGS__RESERVED_MASK 0xfffffe
+#define FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
+#define FIRMWARE_FLAGS__TEST_COUNT_MASK 0xff000000
+#define FIRMWARE_FLAGS__TEST_COUNT__SHIFT 0x18
+#define TDC_STATUS__VDD_Boost_MASK 0xff
+#define TDC_STATUS__VDD_Boost__SHIFT 0x0
+#define TDC_STATUS__VDD_Throttle_MASK 0xff00
+#define TDC_STATUS__VDD_Throttle__SHIFT 0x8
+#define TDC_STATUS__VDDC_Boost_MASK 0xff0000
+#define TDC_STATUS__VDDC_Boost__SHIFT 0x10
+#define TDC_STATUS__VDDC_Throttle_MASK 0xff000000
+#define TDC_STATUS__VDDC_Throttle__SHIFT 0x18
+#define TDC_MV_AVERAGE__IDD_MASK 0xffff
+#define TDC_MV_AVERAGE__IDD__SHIFT 0x0
+#define TDC_MV_AVERAGE__IDDC_MASK 0xffff0000
+#define TDC_MV_AVERAGE__IDDC__SHIFT 0x10
+#define TDC_VRM_LIMIT__IDD_MASK 0xffff
+#define TDC_VRM_LIMIT__IDD__SHIFT 0x0
+#define TDC_VRM_LIMIT__IDDC_MASK 0xffff0000
+#define TDC_VRM_LIMIT__IDDC__SHIFT 0x10
+#define FEATURE_STATUS__SCLK_DPM_ON_MASK 0x1
+#define FEATURE_STATUS__SCLK_DPM_ON__SHIFT 0x0
+#define FEATURE_STATUS__MCLK_DPM_ON_MASK 0x2
+#define FEATURE_STATUS__MCLK_DPM_ON__SHIFT 0x1
+#define FEATURE_STATUS__LCLK_DPM_ON_MASK 0x4
+#define FEATURE_STATUS__LCLK_DPM_ON__SHIFT 0x2
+#define FEATURE_STATUS__UVD_DPM_ON_MASK 0x8
+#define FEATURE_STATUS__UVD_DPM_ON__SHIFT 0x3
+#define FEATURE_STATUS__VCE_DPM_ON_MASK 0x10
+#define FEATURE_STATUS__VCE_DPM_ON__SHIFT 0x4
+#define FEATURE_STATUS__SAMU_DPM_ON_MASK 0x20
+#define FEATURE_STATUS__SAMU_DPM_ON__SHIFT 0x5
+#define FEATURE_STATUS__ACP_DPM_ON_MASK 0x40
+#define FEATURE_STATUS__ACP_DPM_ON__SHIFT 0x6
+#define FEATURE_STATUS__PCIE_DPM_ON_MASK 0x80
+#define FEATURE_STATUS__PCIE_DPM_ON__SHIFT 0x7
+#define FEATURE_STATUS__BAPM_ON_MASK 0x100
+#define FEATURE_STATUS__BAPM_ON__SHIFT 0x8
+#define FEATURE_STATUS__LPMX_ON_MASK 0x200
+#define FEATURE_STATUS__LPMX_ON__SHIFT 0x9
+#define FEATURE_STATUS__NBDPM_ON_MASK 0x400
+#define FEATURE_STATUS__NBDPM_ON__SHIFT 0xa
+#define FEATURE_STATUS__LHTC_ON_MASK 0x800
+#define FEATURE_STATUS__LHTC_ON__SHIFT 0xb
+#define FEATURE_STATUS__VPC_ON_MASK 0x1000
+#define FEATURE_STATUS__VPC_ON__SHIFT 0xc
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON_MASK 0x2000
+#define FEATURE_STATUS__VOLTAGE_CONTROLLER_ON__SHIFT 0xd
+#define FEATURE_STATUS__TDC_LIMIT_ON_MASK 0x4000
+#define FEATURE_STATUS__TDC_LIMIT_ON__SHIFT 0xe
+#define FEATURE_STATUS__GPU_CAC_ON_MASK 0x8000
+#define FEATURE_STATUS__GPU_CAC_ON__SHIFT 0xf
+#define FEATURE_STATUS__AVS_ON_MASK 0x10000
+#define FEATURE_STATUS__AVS_ON__SHIFT 0x10
+#define FEATURE_STATUS__SPMI_ON_MASK 0x20000
+#define FEATURE_STATUS__SPMI_ON__SHIFT 0x11
+#define FEATURE_STATUS__SCLK_DPM_FORCED_MASK 0x40000
+#define FEATURE_STATUS__SCLK_DPM_FORCED__SHIFT 0x12
+#define FEATURE_STATUS__MCLK_DPM_FORCED_MASK 0x80000
+#define FEATURE_STATUS__MCLK_DPM_FORCED__SHIFT 0x13
+#define FEATURE_STATUS__LCLK_DPM_FORCED_MASK 0x100000
+#define FEATURE_STATUS__LCLK_DPM_FORCED__SHIFT 0x14
+#define FEATURE_STATUS__PCIE_DPM_FORCED_MASK 0x200000
+#define FEATURE_STATUS__PCIE_DPM_FORCED__SHIFT 0x15
+#define FEATURE_STATUS__RESERVED_MASK 0xffc00000
+#define FEATURE_STATUS__RESERVED__SHIFT 0x16
+#define ENTITY_TEMPERATURES_1__GPU_MASK 0xffffffff
+#define ENTITY_TEMPERATURES_1__GPU__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_1__entries_0_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_2__entries_0_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_3__entries_0_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_4__entries_0_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_5__entries_0_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_6__entries_0_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_7__entries_0_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_8__entries_0_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_9__entries_0_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_10__entries_0_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_11__entries_0_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_12__entries_0_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_13__entries_1_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_14__entries_1_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_15__entries_1_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_16__entries_1_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_17__entries_1_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_18__entries_1_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_19__entries_1_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_20__entries_1_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_21__entries_1_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_22__entries_1_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_23__entries_1_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_24__entries_1_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_25__entries_2_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_26__entries_2_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_27__entries_2_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_28__entries_2_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_29__entries_2_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_30__entries_2_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_31__entries_2_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_32__entries_2_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_33__entries_2_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_34__entries_2_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_35__entries_2_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_36__entries_2_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_37__entries_3_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_38__entries_3_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_39__entries_3_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_40__entries_3_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_41__entries_3_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_42__entries_3_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_43__entries_3_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_44__entries_3_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_45__entries_3_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_46__entries_3_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_47__entries_3_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_48__entries_3_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_49__entries_4_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_50__entries_4_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_51__entries_4_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_52__entries_4_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_53__entries_4_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_54__entries_4_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_55__entries_4_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_56__entries_4_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_57__entries_4_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_58__entries_4_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_59__entries_4_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_60__entries_4_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_61__entries_5_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_62__entries_5_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_63__entries_5_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_64__entries_5_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_65__entries_5_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_66__entries_5_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_67__entries_5_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_68__entries_5_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_69__entries_5_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_70__entries_5_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_71__entries_5_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_72__entries_5_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_73__entries_6_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_74__entries_6_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_75__entries_6_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_76__entries_6_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_77__entries_6_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_78__entries_6_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_79__entries_6_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_80__entries_6_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_81__entries_6_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_82__entries_6_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_83__entries_6_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_84__entries_6_3_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_85__entries_7_0_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_86__entries_7_0_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_87__entries_7_0_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_88__entries_7_1_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_89__entries_7_1_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_90__entries_7_1_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_91__entries_7_2_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_92__entries_7_2_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_93__entries_7_2_McArbBurstTime__SHIFT 0x18
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_94__entries_7_3_McArbDramTiming__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2_MASK 0xffffffff
+#define MCARB_DRAM_TIMING_TABLE_95__entries_7_3_McArbDramTiming2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2_MASK 0xff
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_2__SHIFT 0x0
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1_MASK 0xff00
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_1__SHIFT 0x8
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0_MASK 0xff0000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_padding_0__SHIFT 0x10
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime_MASK 0xff000000
+#define MCARB_DRAM_TIMING_TABLE_96__entries_7_3_McArbBurstTime__SHIFT 0x18
+#define DPM_TABLE_1__GraphicsPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_1__GraphicsPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_2__GraphicsPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_3__GraphicsPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_4__GraphicsPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_5__GraphicsPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_6__GraphicsPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_7__GraphicsPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_7__GraphicsPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_8__GraphicsPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_9__GraphicsPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_9__GraphicsPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_10__MemoryPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_10__MemoryPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_11__MemoryPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_12__MemoryPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_13__MemoryPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_14__MemoryPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_15__MemoryPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_15__MemoryPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_16__MemoryPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_16__MemoryPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_17__MemoryPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_18__MemoryPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_18__MemoryPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_19__LinkPIDController_Ki_MASK 0xffffffff
+#define DPM_TABLE_19__LinkPIDController_Ki__SHIFT 0x0
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim_MASK 0xffffffff
+#define DPM_TABLE_20__LinkPIDController_LFWindupUpperLim__SHIFT 0x0
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim_MASK 0xffffffff
+#define DPM_TABLE_21__LinkPIDController_LFWindupLowerLim__SHIFT 0x0
+#define DPM_TABLE_22__LinkPIDController_StatePrecision_MASK 0xffffffff
+#define DPM_TABLE_22__LinkPIDController_StatePrecision__SHIFT 0x0
+#define DPM_TABLE_23__LinkPIDController_LfPrecision_MASK 0xffffffff
+#define DPM_TABLE_23__LinkPIDController_LfPrecision__SHIFT 0x0
+#define DPM_TABLE_24__LinkPIDController_LfOffset_MASK 0xffffffff
+#define DPM_TABLE_24__LinkPIDController_LfOffset__SHIFT 0x0
+#define DPM_TABLE_25__LinkPIDController_MaxState_MASK 0xffffffff
+#define DPM_TABLE_25__LinkPIDController_MaxState__SHIFT 0x0
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction_MASK 0xffffffff
+#define DPM_TABLE_26__LinkPIDController_MaxLfFraction__SHIFT 0x0
+#define DPM_TABLE_27__LinkPIDController_StateShift_MASK 0xffffffff
+#define DPM_TABLE_27__LinkPIDController_StateShift__SHIFT 0x0
+#define DPM_TABLE_28__SystemFlags_MASK 0xffffffff
+#define DPM_TABLE_28__SystemFlags__SHIFT 0x0
+#define DPM_TABLE_29__VRConfig_MASK 0xffffffff
+#define DPM_TABLE_29__VRConfig__SHIFT 0x0
+#define DPM_TABLE_30__SmioMask1_MASK 0xffffffff
+#define DPM_TABLE_30__SmioMask1__SHIFT 0x0
+#define DPM_TABLE_31__SmioMask2_MASK 0xffffffff
+#define DPM_TABLE_31__SmioMask2__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_32__SmioTable1_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_32__SmioTable1_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_33__SmioTable1_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_33__SmioTable1_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_34__SmioTable1_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_34__SmioTable1_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_35__SmioTable1_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_35__SmioTable1_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding_MASK 0xff
+#define DPM_TABLE_36__SmioTable2_Pattern_0_padding__SHIFT 0x0
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio_MASK 0xff00
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Smio__SHIFT 0x8
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage_MASK 0xffff0000
+#define DPM_TABLE_36__SmioTable2_Pattern_0_Voltage__SHIFT 0x10
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding_MASK 0xff
+#define DPM_TABLE_37__SmioTable2_Pattern_1_padding__SHIFT 0x0
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio_MASK 0xff00
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Smio__SHIFT 0x8
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage_MASK 0xffff0000
+#define DPM_TABLE_37__SmioTable2_Pattern_1_Voltage__SHIFT 0x10
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding_MASK 0xff
+#define DPM_TABLE_38__SmioTable2_Pattern_2_padding__SHIFT 0x0
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio_MASK 0xff00
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Smio__SHIFT 0x8
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage_MASK 0xffff0000
+#define DPM_TABLE_38__SmioTable2_Pattern_2_Voltage__SHIFT 0x10
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding_MASK 0xff
+#define DPM_TABLE_39__SmioTable2_Pattern_3_padding__SHIFT 0x0
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio_MASK 0xff00
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Smio__SHIFT 0x8
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage_MASK 0xffff0000
+#define DPM_TABLE_39__SmioTable2_Pattern_3_Voltage__SHIFT 0x10
+#define DPM_TABLE_40__VddcLevelCount_MASK 0xffffffff
+#define DPM_TABLE_40__VddcLevelCount__SHIFT 0x0
+#define DPM_TABLE_41__VddciLevelCount_MASK 0xffffffff
+#define DPM_TABLE_41__VddciLevelCount__SHIFT 0x0
+#define DPM_TABLE_42__VddGfxLevelCount_MASK 0xffffffff
+#define DPM_TABLE_42__VddGfxLevelCount__SHIFT 0x0
+#define DPM_TABLE_43__MvddLevelCount_MASK 0xffffffff
+#define DPM_TABLE_43__MvddLevelCount__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_1_MASK 0xffff
+#define DPM_TABLE_44__VddcTable_1__SHIFT 0x0
+#define DPM_TABLE_44__VddcTable_0_MASK 0xffff0000
+#define DPM_TABLE_44__VddcTable_0__SHIFT 0x10
+#define DPM_TABLE_45__VddcTable_3_MASK 0xffff
+#define DPM_TABLE_45__VddcTable_3__SHIFT 0x0
+#define DPM_TABLE_45__VddcTable_2_MASK 0xffff0000
+#define DPM_TABLE_45__VddcTable_2__SHIFT 0x10
+#define DPM_TABLE_46__VddcTable_5_MASK 0xffff
+#define DPM_TABLE_46__VddcTable_5__SHIFT 0x0
+#define DPM_TABLE_46__VddcTable_4_MASK 0xffff0000
+#define DPM_TABLE_46__VddcTable_4__SHIFT 0x10
+#define DPM_TABLE_47__VddcTable_7_MASK 0xffff
+#define DPM_TABLE_47__VddcTable_7__SHIFT 0x0
+#define DPM_TABLE_47__VddcTable_6_MASK 0xffff0000
+#define DPM_TABLE_47__VddcTable_6__SHIFT 0x10
+#define DPM_TABLE_48__VddcTable_9_MASK 0xffff
+#define DPM_TABLE_48__VddcTable_9__SHIFT 0x0
+#define DPM_TABLE_48__VddcTable_8_MASK 0xffff0000
+#define DPM_TABLE_48__VddcTable_8__SHIFT 0x10
+#define DPM_TABLE_49__VddcTable_11_MASK 0xffff
+#define DPM_TABLE_49__VddcTable_11__SHIFT 0x0
+#define DPM_TABLE_49__VddcTable_10_MASK 0xffff0000
+#define DPM_TABLE_49__VddcTable_10__SHIFT 0x10
+#define DPM_TABLE_50__VddcTable_13_MASK 0xffff
+#define DPM_TABLE_50__VddcTable_13__SHIFT 0x0
+#define DPM_TABLE_50__VddcTable_12_MASK 0xffff0000
+#define DPM_TABLE_50__VddcTable_12__SHIFT 0x10
+#define DPM_TABLE_51__VddcTable_15_MASK 0xffff
+#define DPM_TABLE_51__VddcTable_15__SHIFT 0x0
+#define DPM_TABLE_51__VddcTable_14_MASK 0xffff0000
+#define DPM_TABLE_51__VddcTable_14__SHIFT 0x10
+#define DPM_TABLE_52__VddGfxTable_1_MASK 0xffff
+#define DPM_TABLE_52__VddGfxTable_1__SHIFT 0x0
+#define DPM_TABLE_52__VddGfxTable_0_MASK 0xffff0000
+#define DPM_TABLE_52__VddGfxTable_0__SHIFT 0x10
+#define DPM_TABLE_53__VddGfxTable_3_MASK 0xffff
+#define DPM_TABLE_53__VddGfxTable_3__SHIFT 0x0
+#define DPM_TABLE_53__VddGfxTable_2_MASK 0xffff0000
+#define DPM_TABLE_53__VddGfxTable_2__SHIFT 0x10
+#define DPM_TABLE_54__VddGfxTable_5_MASK 0xffff
+#define DPM_TABLE_54__VddGfxTable_5__SHIFT 0x0
+#define DPM_TABLE_54__VddGfxTable_4_MASK 0xffff0000
+#define DPM_TABLE_54__VddGfxTable_4__SHIFT 0x10
+#define DPM_TABLE_55__VddGfxTable_7_MASK 0xffff
+#define DPM_TABLE_55__VddGfxTable_7__SHIFT 0x0
+#define DPM_TABLE_55__VddGfxTable_6_MASK 0xffff0000
+#define DPM_TABLE_55__VddGfxTable_6__SHIFT 0x10
+#define DPM_TABLE_56__VddGfxTable_9_MASK 0xffff
+#define DPM_TABLE_56__VddGfxTable_9__SHIFT 0x0
+#define DPM_TABLE_56__VddGfxTable_8_MASK 0xffff0000
+#define DPM_TABLE_56__VddGfxTable_8__SHIFT 0x10
+#define DPM_TABLE_57__VddGfxTable_11_MASK 0xffff
+#define DPM_TABLE_57__VddGfxTable_11__SHIFT 0x0
+#define DPM_TABLE_57__VddGfxTable_10_MASK 0xffff0000
+#define DPM_TABLE_57__VddGfxTable_10__SHIFT 0x10
+#define DPM_TABLE_58__VddGfxTable_13_MASK 0xffff
+#define DPM_TABLE_58__VddGfxTable_13__SHIFT 0x0
+#define DPM_TABLE_58__VddGfxTable_12_MASK 0xffff0000
+#define DPM_TABLE_58__VddGfxTable_12__SHIFT 0x10
+#define DPM_TABLE_59__VddGfxTable_15_MASK 0xffff
+#define DPM_TABLE_59__VddGfxTable_15__SHIFT 0x0
+#define DPM_TABLE_59__VddGfxTable_14_MASK 0xffff0000
+#define DPM_TABLE_59__VddGfxTable_14__SHIFT 0x10
+#define DPM_TABLE_60__VddciTable_1_MASK 0xffff
+#define DPM_TABLE_60__VddciTable_1__SHIFT 0x0
+#define DPM_TABLE_60__VddciTable_0_MASK 0xffff0000
+#define DPM_TABLE_60__VddciTable_0__SHIFT 0x10
+#define DPM_TABLE_61__VddciTable_3_MASK 0xffff
+#define DPM_TABLE_61__VddciTable_3__SHIFT 0x0
+#define DPM_TABLE_61__VddciTable_2_MASK 0xffff0000
+#define DPM_TABLE_61__VddciTable_2__SHIFT 0x10
+#define DPM_TABLE_62__VddciTable_5_MASK 0xffff
+#define DPM_TABLE_62__VddciTable_5__SHIFT 0x0
+#define DPM_TABLE_62__VddciTable_4_MASK 0xffff0000
+#define DPM_TABLE_62__VddciTable_4__SHIFT 0x10
+#define DPM_TABLE_63__VddciTable_7_MASK 0xffff
+#define DPM_TABLE_63__VddciTable_7__SHIFT 0x0
+#define DPM_TABLE_63__VddciTable_6_MASK 0xffff0000
+#define DPM_TABLE_63__VddciTable_6__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_64__BapmVddGfxVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_65__BapmVddGfxVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_66__BapmVddGfxVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_67__BapmVddGfxVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_68__BapmVddGfxVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_69__BapmVddGfxVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_70__BapmVddGfxVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_71__BapmVddGfxVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_72__BapmVddGfxVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_73__BapmVddGfxVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_74__BapmVddGfxVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_75__BapmVddGfxVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3_MASK 0xff
+#define DPM_TABLE_76__BapmVddcVidHiSidd_3__SHIFT 0x0
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2_MASK 0xff00
+#define DPM_TABLE_76__BapmVddcVidHiSidd_2__SHIFT 0x8
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1_MASK 0xff0000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_1__SHIFT 0x10
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0_MASK 0xff000000
+#define DPM_TABLE_76__BapmVddcVidHiSidd_0__SHIFT 0x18
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7_MASK 0xff
+#define DPM_TABLE_77__BapmVddcVidHiSidd_7__SHIFT 0x0
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6_MASK 0xff00
+#define DPM_TABLE_77__BapmVddcVidHiSidd_6__SHIFT 0x8
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5_MASK 0xff0000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_5__SHIFT 0x10
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4_MASK 0xff000000
+#define DPM_TABLE_77__BapmVddcVidHiSidd_4__SHIFT 0x18
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11_MASK 0xff
+#define DPM_TABLE_78__BapmVddcVidHiSidd_11__SHIFT 0x0
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10_MASK 0xff00
+#define DPM_TABLE_78__BapmVddcVidHiSidd_10__SHIFT 0x8
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9_MASK 0xff0000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_9__SHIFT 0x10
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8_MASK 0xff000000
+#define DPM_TABLE_78__BapmVddcVidHiSidd_8__SHIFT 0x18
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15_MASK 0xff
+#define DPM_TABLE_79__BapmVddcVidHiSidd_15__SHIFT 0x0
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14_MASK 0xff00
+#define DPM_TABLE_79__BapmVddcVidHiSidd_14__SHIFT 0x8
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13_MASK 0xff0000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_13__SHIFT 0x10
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12_MASK 0xff000000
+#define DPM_TABLE_79__BapmVddcVidHiSidd_12__SHIFT 0x18
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3_MASK 0xff
+#define DPM_TABLE_80__BapmVddcVidLoSidd_3__SHIFT 0x0
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2_MASK 0xff00
+#define DPM_TABLE_80__BapmVddcVidLoSidd_2__SHIFT 0x8
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1_MASK 0xff0000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_1__SHIFT 0x10
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0_MASK 0xff000000
+#define DPM_TABLE_80__BapmVddcVidLoSidd_0__SHIFT 0x18
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7_MASK 0xff
+#define DPM_TABLE_81__BapmVddcVidLoSidd_7__SHIFT 0x0
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6_MASK 0xff00
+#define DPM_TABLE_81__BapmVddcVidLoSidd_6__SHIFT 0x8
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5_MASK 0xff0000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_5__SHIFT 0x10
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4_MASK 0xff000000
+#define DPM_TABLE_81__BapmVddcVidLoSidd_4__SHIFT 0x18
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11_MASK 0xff
+#define DPM_TABLE_82__BapmVddcVidLoSidd_11__SHIFT 0x0
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10_MASK 0xff00
+#define DPM_TABLE_82__BapmVddcVidLoSidd_10__SHIFT 0x8
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9_MASK 0xff0000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_9__SHIFT 0x10
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8_MASK 0xff000000
+#define DPM_TABLE_82__BapmVddcVidLoSidd_8__SHIFT 0x18
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15_MASK 0xff
+#define DPM_TABLE_83__BapmVddcVidLoSidd_15__SHIFT 0x0
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14_MASK 0xff00
+#define DPM_TABLE_83__BapmVddcVidLoSidd_14__SHIFT 0x8
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13_MASK 0xff0000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_13__SHIFT 0x10
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12_MASK 0xff000000
+#define DPM_TABLE_83__BapmVddcVidLoSidd_12__SHIFT 0x18
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3_MASK 0xff
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_3__SHIFT 0x0
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2_MASK 0xff00
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_2__SHIFT 0x8
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1_MASK 0xff0000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_1__SHIFT 0x10
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0_MASK 0xff000000
+#define DPM_TABLE_84__BapmVddcVidHiSidd2_0__SHIFT 0x18
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7_MASK 0xff
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_7__SHIFT 0x0
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6_MASK 0xff00
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_6__SHIFT 0x8
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5_MASK 0xff0000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_5__SHIFT 0x10
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4_MASK 0xff000000
+#define DPM_TABLE_85__BapmVddcVidHiSidd2_4__SHIFT 0x18
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11_MASK 0xff
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_11__SHIFT 0x0
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10_MASK 0xff00
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_10__SHIFT 0x8
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9_MASK 0xff0000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_9__SHIFT 0x10
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8_MASK 0xff000000
+#define DPM_TABLE_86__BapmVddcVidHiSidd2_8__SHIFT 0x18
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15_MASK 0xff
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_15__SHIFT 0x0
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14_MASK 0xff00
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_14__SHIFT 0x8
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13_MASK 0xff0000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_13__SHIFT 0x10
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12_MASK 0xff000000
+#define DPM_TABLE_87__BapmVddcVidHiSidd2_12__SHIFT 0x18
+#define DPM_TABLE_88__MasterDeepSleepControl_MASK 0xff
+#define DPM_TABLE_88__MasterDeepSleepControl__SHIFT 0x0
+#define DPM_TABLE_88__LinkLevelCount_MASK 0xff00
+#define DPM_TABLE_88__LinkLevelCount__SHIFT 0x8
+#define DPM_TABLE_88__MemoryDpmLevelCount_MASK 0xff0000
+#define DPM_TABLE_88__MemoryDpmLevelCount__SHIFT 0x10
+#define DPM_TABLE_88__GraphicsDpmLevelCount_MASK 0xff000000
+#define DPM_TABLE_88__GraphicsDpmLevelCount__SHIFT 0x18
+#define DPM_TABLE_89__SamuLevelCount_MASK 0xff
+#define DPM_TABLE_89__SamuLevelCount__SHIFT 0x0
+#define DPM_TABLE_89__AcpLevelCount_MASK 0xff00
+#define DPM_TABLE_89__AcpLevelCount__SHIFT 0x8
+#define DPM_TABLE_89__VceLevelCount_MASK 0xff0000
+#define DPM_TABLE_89__VceLevelCount__SHIFT 0x10
+#define DPM_TABLE_89__UvdLevelCount_MASK 0xff000000
+#define DPM_TABLE_89__UvdLevelCount__SHIFT 0x18
+#define DPM_TABLE_90__Reserved_0_MASK 0xff
+#define DPM_TABLE_90__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_90__ThermOutMode_MASK 0xff00
+#define DPM_TABLE_90__ThermOutMode__SHIFT 0x8
+#define DPM_TABLE_90__ThermOutPolarity_MASK 0xff0000
+#define DPM_TABLE_90__ThermOutPolarity__SHIFT 0x10
+#define DPM_TABLE_90__ThermOutGpio_MASK 0xff000000
+#define DPM_TABLE_90__ThermOutGpio__SHIFT 0x18
+#define DPM_TABLE_91__Reserved_0_MASK 0xffffffff
+#define DPM_TABLE_91__Reserved_0__SHIFT 0x0
+#define DPM_TABLE_92__Reserved_1_MASK 0xffffffff
+#define DPM_TABLE_92__Reserved_1__SHIFT 0x0
+#define DPM_TABLE_93__Reserved_2_MASK 0xffffffff
+#define DPM_TABLE_93__Reserved_2__SHIFT 0x0
+#define DPM_TABLE_94__Reserved_3_MASK 0xffffffff
+#define DPM_TABLE_94__Reserved_3__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_95__GraphicsLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_96__GraphicsLevel_0_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_97__GraphicsLevel_0_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_97__GraphicsLevel_0_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_97__GraphicsLevel_0_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_98__GraphicsLevel_0_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_99__GraphicsLevel_0_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_100__GraphicsLevel_0_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_101__GraphicsLevel_0_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_102__GraphicsLevel_0_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_103__GraphicsLevel_0_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_104__GraphicsLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_104__GraphicsLevel_0_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid_MASK 0xff000000
+#define DPM_TABLE_104__GraphicsLevel_0_SclkDid__SHIFT 0x18
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle_MASK 0xff
+#define DPM_TABLE_105__GraphicsLevel_0_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_105__GraphicsLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_105__GraphicsLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_105__GraphicsLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_106__GraphicsLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_107__GraphicsLevel_1_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_108__GraphicsLevel_1_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_108__GraphicsLevel_1_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_108__GraphicsLevel_1_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_109__GraphicsLevel_1_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_110__GraphicsLevel_1_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_111__GraphicsLevel_1_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_112__GraphicsLevel_1_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_113__GraphicsLevel_1_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_114__GraphicsLevel_1_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_115__GraphicsLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_115__GraphicsLevel_1_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid_MASK 0xff000000
+#define DPM_TABLE_115__GraphicsLevel_1_SclkDid__SHIFT 0x18
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle_MASK 0xff
+#define DPM_TABLE_116__GraphicsLevel_1_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_116__GraphicsLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_116__GraphicsLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_116__GraphicsLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_117__GraphicsLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_118__GraphicsLevel_2_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_119__GraphicsLevel_2_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_119__GraphicsLevel_2_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_119__GraphicsLevel_2_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_120__GraphicsLevel_2_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_121__GraphicsLevel_2_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_122__GraphicsLevel_2_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_123__GraphicsLevel_2_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_124__GraphicsLevel_2_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_125__GraphicsLevel_2_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_126__GraphicsLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_126__GraphicsLevel_2_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid_MASK 0xff000000
+#define DPM_TABLE_126__GraphicsLevel_2_SclkDid__SHIFT 0x18
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle_MASK 0xff
+#define DPM_TABLE_127__GraphicsLevel_2_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_127__GraphicsLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_127__GraphicsLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_127__GraphicsLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_128__GraphicsLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_129__GraphicsLevel_3_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_130__GraphicsLevel_3_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_130__GraphicsLevel_3_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_130__GraphicsLevel_3_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_131__GraphicsLevel_3_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_132__GraphicsLevel_3_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_133__GraphicsLevel_3_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_134__GraphicsLevel_3_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_135__GraphicsLevel_3_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_136__GraphicsLevel_3_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_137__GraphicsLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_137__GraphicsLevel_3_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid_MASK 0xff000000
+#define DPM_TABLE_137__GraphicsLevel_3_SclkDid__SHIFT 0x18
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle_MASK 0xff
+#define DPM_TABLE_138__GraphicsLevel_3_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_138__GraphicsLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_138__GraphicsLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_138__GraphicsLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_139__GraphicsLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_140__GraphicsLevel_4_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_141__GraphicsLevel_4_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_141__GraphicsLevel_4_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_141__GraphicsLevel_4_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_142__GraphicsLevel_4_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_143__GraphicsLevel_4_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_144__GraphicsLevel_4_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_145__GraphicsLevel_4_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_146__GraphicsLevel_4_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_147__GraphicsLevel_4_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_148__GraphicsLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_148__GraphicsLevel_4_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid_MASK 0xff000000
+#define DPM_TABLE_148__GraphicsLevel_4_SclkDid__SHIFT 0x18
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle_MASK 0xff
+#define DPM_TABLE_149__GraphicsLevel_4_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_149__GraphicsLevel_4_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst_MASK 0xff0000
+#define DPM_TABLE_149__GraphicsLevel_4_DownHyst__SHIFT 0x10
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst_MASK 0xff000000
+#define DPM_TABLE_149__GraphicsLevel_4_UpHyst__SHIFT 0x18
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_150__GraphicsLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_151__GraphicsLevel_5_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_152__GraphicsLevel_5_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_152__GraphicsLevel_5_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_152__GraphicsLevel_5_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_153__GraphicsLevel_5_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_154__GraphicsLevel_5_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_155__GraphicsLevel_5_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_156__GraphicsLevel_5_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_157__GraphicsLevel_5_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_158__GraphicsLevel_5_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_159__GraphicsLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_159__GraphicsLevel_5_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid_MASK 0xff000000
+#define DPM_TABLE_159__GraphicsLevel_5_SclkDid__SHIFT 0x18
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle_MASK 0xff
+#define DPM_TABLE_160__GraphicsLevel_5_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_160__GraphicsLevel_5_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst_MASK 0xff0000
+#define DPM_TABLE_160__GraphicsLevel_5_DownHyst__SHIFT 0x10
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst_MASK 0xff000000
+#define DPM_TABLE_160__GraphicsLevel_5_UpHyst__SHIFT 0x18
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_161__GraphicsLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_162__GraphicsLevel_6_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_163__GraphicsLevel_6_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_163__GraphicsLevel_6_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_163__GraphicsLevel_6_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_164__GraphicsLevel_6_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_165__GraphicsLevel_6_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_166__GraphicsLevel_6_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_167__GraphicsLevel_6_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_168__GraphicsLevel_6_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_169__GraphicsLevel_6_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_170__GraphicsLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_170__GraphicsLevel_6_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid_MASK 0xff000000
+#define DPM_TABLE_170__GraphicsLevel_6_SclkDid__SHIFT 0x18
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle_MASK 0xff
+#define DPM_TABLE_171__GraphicsLevel_6_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_171__GraphicsLevel_6_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst_MASK 0xff0000
+#define DPM_TABLE_171__GraphicsLevel_6_DownHyst__SHIFT 0x10
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst_MASK 0xff000000
+#define DPM_TABLE_171__GraphicsLevel_6_UpHyst__SHIFT 0x18
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_172__GraphicsLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_173__GraphicsLevel_7_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel_MASK 0xffff
+#define DPM_TABLE_174__GraphicsLevel_7_ActivityLevel__SHIFT 0x0
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId_MASK 0xff0000
+#define DPM_TABLE_174__GraphicsLevel_7_DeepSleepDivId__SHIFT 0x10
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel_MASK 0xff000000
+#define DPM_TABLE_174__GraphicsLevel_7_pcieDpmLevel__SHIFT 0x18
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_175__GraphicsLevel_7_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_176__GraphicsLevel_7_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_177__GraphicsLevel_7_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_178__GraphicsLevel_7_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_179__GraphicsLevel_7_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_180__GraphicsLevel_7_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle_MASK 0xff
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForThrottle__SHIFT 0x0
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_181__GraphicsLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_181__GraphicsLevel_7_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid_MASK 0xff000000
+#define DPM_TABLE_181__GraphicsLevel_7_SclkDid__SHIFT 0x18
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle_MASK 0xff
+#define DPM_TABLE_182__GraphicsLevel_7_PowerThrottle__SHIFT 0x0
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_182__GraphicsLevel_7_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst_MASK 0xff0000
+#define DPM_TABLE_182__GraphicsLevel_7_DownHyst__SHIFT 0x10
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst_MASK 0xff000000
+#define DPM_TABLE_182__GraphicsLevel_7_UpHyst__SHIFT 0x18
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_183__MemoryACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_184__MemoryACPILevel_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_185__MemoryACPILevel_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_186__MemoryACPILevel_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange_MASK 0xff0000
+#define DPM_TABLE_186__MemoryACPILevel_FreqRange__SHIFT 0x10
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_186__MemoryACPILevel_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_187__MemoryACPILevel_padding_MASK 0xff
+#define DPM_TABLE_187__MemoryACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_187__MemoryACPILevel_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst_MASK 0xff0000
+#define DPM_TABLE_187__MemoryACPILevel_DownHyst__SHIFT 0x10
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst_MASK 0xff000000
+#define DPM_TABLE_187__MemoryACPILevel_UpHyst__SHIFT 0x18
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider_MASK 0xff
+#define DPM_TABLE_188__MemoryACPILevel_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_188__MemoryACPILevel_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_188__MemoryACPILevel_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_189__MemoryLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_190__MemoryLevel_0_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_191__MemoryLevel_0_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_192__MemoryLevel_0_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange_MASK 0xff0000
+#define DPM_TABLE_192__MemoryLevel_0_FreqRange__SHIFT 0x10
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_192__MemoryLevel_0_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_193__MemoryLevel_0_padding_MASK 0xff
+#define DPM_TABLE_193__MemoryLevel_0_padding__SHIFT 0x0
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_193__MemoryLevel_0_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst_MASK 0xff0000
+#define DPM_TABLE_193__MemoryLevel_0_DownHyst__SHIFT 0x10
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst_MASK 0xff000000
+#define DPM_TABLE_193__MemoryLevel_0_UpHyst__SHIFT 0x18
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider_MASK 0xff
+#define DPM_TABLE_194__MemoryLevel_0_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_194__MemoryLevel_0_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_194__MemoryLevel_0_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_195__MemoryLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_196__MemoryLevel_1_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_197__MemoryLevel_1_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_198__MemoryLevel_1_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange_MASK 0xff0000
+#define DPM_TABLE_198__MemoryLevel_1_FreqRange__SHIFT 0x10
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_198__MemoryLevel_1_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_199__MemoryLevel_1_padding_MASK 0xff
+#define DPM_TABLE_199__MemoryLevel_1_padding__SHIFT 0x0
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_199__MemoryLevel_1_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst_MASK 0xff0000
+#define DPM_TABLE_199__MemoryLevel_1_DownHyst__SHIFT 0x10
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst_MASK 0xff000000
+#define DPM_TABLE_199__MemoryLevel_1_UpHyst__SHIFT 0x18
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider_MASK 0xff
+#define DPM_TABLE_200__MemoryLevel_1_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_200__MemoryLevel_1_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_200__MemoryLevel_1_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_201__MemoryLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_202__MemoryLevel_2_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_203__MemoryLevel_2_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_204__MemoryLevel_2_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange_MASK 0xff0000
+#define DPM_TABLE_204__MemoryLevel_2_FreqRange__SHIFT 0x10
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_204__MemoryLevel_2_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_205__MemoryLevel_2_padding_MASK 0xff
+#define DPM_TABLE_205__MemoryLevel_2_padding__SHIFT 0x0
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_205__MemoryLevel_2_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst_MASK 0xff0000
+#define DPM_TABLE_205__MemoryLevel_2_DownHyst__SHIFT 0x10
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst_MASK 0xff000000
+#define DPM_TABLE_205__MemoryLevel_2_UpHyst__SHIFT 0x18
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider_MASK 0xff
+#define DPM_TABLE_206__MemoryLevel_2_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_206__MemoryLevel_2_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_206__MemoryLevel_2_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_207__MemoryLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd_MASK 0xffffffff
+#define DPM_TABLE_208__MemoryLevel_3_MinMvdd__SHIFT 0x0
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_209__MemoryLevel_3_MclkFrequency__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity_MASK 0xff
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForActivity__SHIFT 0x0
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle_MASK 0xff00
+#define DPM_TABLE_210__MemoryLevel_3_EnabledForThrottle__SHIFT 0x8
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange_MASK 0xff0000
+#define DPM_TABLE_210__MemoryLevel_3_FreqRange__SHIFT 0x10
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable_MASK 0xff000000
+#define DPM_TABLE_210__MemoryLevel_3_StutterEnable__SHIFT 0x18
+#define DPM_TABLE_211__MemoryLevel_3_padding_MASK 0xff
+#define DPM_TABLE_211__MemoryLevel_3_padding__SHIFT 0x0
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst_MASK 0xff00
+#define DPM_TABLE_211__MemoryLevel_3_VoltageDownHyst__SHIFT 0x8
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst_MASK 0xff0000
+#define DPM_TABLE_211__MemoryLevel_3_DownHyst__SHIFT 0x10
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst_MASK 0xff000000
+#define DPM_TABLE_211__MemoryLevel_3_UpHyst__SHIFT 0x18
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider_MASK 0xff
+#define DPM_TABLE_212__MemoryLevel_3_MclkDivider__SHIFT 0x0
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark_MASK 0xff00
+#define DPM_TABLE_212__MemoryLevel_3_DisplayWatermark__SHIFT 0x8
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel_MASK 0xffff0000
+#define DPM_TABLE_212__MemoryLevel_3_ActivityLevel__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_SPC_MASK 0xff
+#define DPM_TABLE_213__LinkLevel_0_SPC__SHIFT 0x0
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_213__LinkLevel_0_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_213__LinkLevel_0_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_213__LinkLevel_0_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_214__LinkLevel_0_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_215__LinkLevel_0_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_216__LinkLevel_0_Reserved_MASK 0xffffffff
+#define DPM_TABLE_216__LinkLevel_0_Reserved__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_SPC_MASK 0xff
+#define DPM_TABLE_217__LinkLevel_1_SPC__SHIFT 0x0
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_217__LinkLevel_1_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_217__LinkLevel_1_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_217__LinkLevel_1_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_218__LinkLevel_1_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_219__LinkLevel_1_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_220__LinkLevel_1_Reserved_MASK 0xffffffff
+#define DPM_TABLE_220__LinkLevel_1_Reserved__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_SPC_MASK 0xff
+#define DPM_TABLE_221__LinkLevel_2_SPC__SHIFT 0x0
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_221__LinkLevel_2_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_221__LinkLevel_2_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_221__LinkLevel_2_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_222__LinkLevel_2_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_223__LinkLevel_2_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_224__LinkLevel_2_Reserved_MASK 0xffffffff
+#define DPM_TABLE_224__LinkLevel_2_Reserved__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_SPC_MASK 0xff
+#define DPM_TABLE_225__LinkLevel_3_SPC__SHIFT 0x0
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_225__LinkLevel_3_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_225__LinkLevel_3_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_225__LinkLevel_3_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_226__LinkLevel_3_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_227__LinkLevel_3_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_228__LinkLevel_3_Reserved_MASK 0xffffffff
+#define DPM_TABLE_228__LinkLevel_3_Reserved__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_SPC_MASK 0xff
+#define DPM_TABLE_229__LinkLevel_4_SPC__SHIFT 0x0
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_229__LinkLevel_4_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_229__LinkLevel_4_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_229__LinkLevel_4_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_230__LinkLevel_4_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_231__LinkLevel_4_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_232__LinkLevel_4_Reserved_MASK 0xffffffff
+#define DPM_TABLE_232__LinkLevel_4_Reserved__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_SPC_MASK 0xff
+#define DPM_TABLE_233__LinkLevel_5_SPC__SHIFT 0x0
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_233__LinkLevel_5_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_233__LinkLevel_5_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_233__LinkLevel_5_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_234__LinkLevel_5_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_235__LinkLevel_5_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_236__LinkLevel_5_Reserved_MASK 0xffffffff
+#define DPM_TABLE_236__LinkLevel_5_Reserved__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_SPC_MASK 0xff
+#define DPM_TABLE_237__LinkLevel_6_SPC__SHIFT 0x0
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_237__LinkLevel_6_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_237__LinkLevel_6_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_237__LinkLevel_6_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_238__LinkLevel_6_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_239__LinkLevel_6_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_240__LinkLevel_6_Reserved_MASK 0xffffffff
+#define DPM_TABLE_240__LinkLevel_6_Reserved__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_SPC_MASK 0xff
+#define DPM_TABLE_241__LinkLevel_7_SPC__SHIFT 0x0
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity_MASK 0xff00
+#define DPM_TABLE_241__LinkLevel_7_EnabledForActivity__SHIFT 0x8
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount_MASK 0xff0000
+#define DPM_TABLE_241__LinkLevel_7_PcieLaneCount__SHIFT 0x10
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed_MASK 0xff000000
+#define DPM_TABLE_241__LinkLevel_7_PcieGenSpeed__SHIFT 0x18
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold_MASK 0xffffffff
+#define DPM_TABLE_242__LinkLevel_7_DownThreshold__SHIFT 0x0
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold_MASK 0xffffffff
+#define DPM_TABLE_243__LinkLevel_7_UpThreshold__SHIFT 0x0
+#define DPM_TABLE_244__LinkLevel_7_Reserved_MASK 0xffffffff
+#define DPM_TABLE_244__LinkLevel_7_Reserved__SHIFT 0x0
+#define DPM_TABLE_245__ACPILevel_Flags_MASK 0xffffffff
+#define DPM_TABLE_245__ACPILevel_Flags__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_246__ACPILevel_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_246__ACPILevel_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_247__ACPILevel_SclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_247__ACPILevel_SclkFrequency__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_padding_MASK 0xff
+#define DPM_TABLE_248__ACPILevel_padding__SHIFT 0x0
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId_MASK 0xff00
+#define DPM_TABLE_248__ACPILevel_DeepSleepDivId__SHIFT 0x8
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark_MASK 0xff0000
+#define DPM_TABLE_248__ACPILevel_DisplayWatermark__SHIFT 0x10
+#define DPM_TABLE_248__ACPILevel_SclkDid_MASK 0xff000000
+#define DPM_TABLE_248__ACPILevel_SclkDid__SHIFT 0x18
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl_MASK 0xffffffff
+#define DPM_TABLE_249__ACPILevel_CgSpllFuncCntl__SHIFT 0x0
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2_MASK 0xffffffff
+#define DPM_TABLE_250__ACPILevel_CgSpllFuncCntl2__SHIFT 0x0
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3_MASK 0xffffffff
+#define DPM_TABLE_251__ACPILevel_CgSpllFuncCntl3__SHIFT 0x0
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4_MASK 0xffffffff
+#define DPM_TABLE_252__ACPILevel_CgSpllFuncCntl4__SHIFT 0x0
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum_MASK 0xffffffff
+#define DPM_TABLE_253__ACPILevel_SpllSpreadSpectrum__SHIFT 0x0
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2_MASK 0xffffffff
+#define DPM_TABLE_254__ACPILevel_SpllSpreadSpectrum2__SHIFT 0x0
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_255__ACPILevel_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_256__ACPILevel_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_257__UvdLevel_0_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_258__UvdLevel_0_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_259__UvdLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_260__UvdLevel_0_padding_1_MASK 0xff
+#define DPM_TABLE_260__UvdLevel_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_260__UvdLevel_0_padding_0_MASK 0xff00
+#define DPM_TABLE_260__UvdLevel_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_260__UvdLevel_0_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_260__UvdLevel_0_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_261__UvdLevel_1_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_262__UvdLevel_1_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_263__UvdLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_264__UvdLevel_1_padding_1_MASK 0xff
+#define DPM_TABLE_264__UvdLevel_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_264__UvdLevel_1_padding_0_MASK 0xff00
+#define DPM_TABLE_264__UvdLevel_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_264__UvdLevel_1_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_264__UvdLevel_1_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_265__UvdLevel_2_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_266__UvdLevel_2_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_267__UvdLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_268__UvdLevel_2_padding_1_MASK 0xff
+#define DPM_TABLE_268__UvdLevel_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_268__UvdLevel_2_padding_0_MASK 0xff00
+#define DPM_TABLE_268__UvdLevel_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_268__UvdLevel_2_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_268__UvdLevel_2_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_269__UvdLevel_3_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_270__UvdLevel_3_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_271__UvdLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_272__UvdLevel_3_padding_1_MASK 0xff
+#define DPM_TABLE_272__UvdLevel_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_272__UvdLevel_3_padding_0_MASK 0xff00
+#define DPM_TABLE_272__UvdLevel_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_272__UvdLevel_3_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_272__UvdLevel_3_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_273__UvdLevel_4_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_274__UvdLevel_4_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_275__UvdLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_276__UvdLevel_4_padding_1_MASK 0xff
+#define DPM_TABLE_276__UvdLevel_4_padding_1__SHIFT 0x0
+#define DPM_TABLE_276__UvdLevel_4_padding_0_MASK 0xff00
+#define DPM_TABLE_276__UvdLevel_4_padding_0__SHIFT 0x8
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_276__UvdLevel_4_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_276__UvdLevel_4_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_277__UvdLevel_5_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_278__UvdLevel_5_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_279__UvdLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_280__UvdLevel_5_padding_1_MASK 0xff
+#define DPM_TABLE_280__UvdLevel_5_padding_1__SHIFT 0x0
+#define DPM_TABLE_280__UvdLevel_5_padding_0_MASK 0xff00
+#define DPM_TABLE_280__UvdLevel_5_padding_0__SHIFT 0x8
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_280__UvdLevel_5_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_280__UvdLevel_5_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_281__UvdLevel_6_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_282__UvdLevel_6_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_283__UvdLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_284__UvdLevel_6_padding_1_MASK 0xff
+#define DPM_TABLE_284__UvdLevel_6_padding_1__SHIFT 0x0
+#define DPM_TABLE_284__UvdLevel_6_padding_0_MASK 0xff00
+#define DPM_TABLE_284__UvdLevel_6_padding_0__SHIFT 0x8
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_284__UvdLevel_6_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_284__UvdLevel_6_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_285__UvdLevel_7_VclkFrequency__SHIFT 0x0
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency_MASK 0xffffffff
+#define DPM_TABLE_286__UvdLevel_7_DclkFrequency__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_287__UvdLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_288__UvdLevel_7_padding_1_MASK 0xff
+#define DPM_TABLE_288__UvdLevel_7_padding_1__SHIFT 0x0
+#define DPM_TABLE_288__UvdLevel_7_padding_0_MASK 0xff00
+#define DPM_TABLE_288__UvdLevel_7_padding_0__SHIFT 0x8
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider_MASK 0xff0000
+#define DPM_TABLE_288__UvdLevel_7_DclkDivider__SHIFT 0x10
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider_MASK 0xff000000
+#define DPM_TABLE_288__UvdLevel_7_VclkDivider__SHIFT 0x18
+#define DPM_TABLE_289__VceLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_289__VceLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_290__VceLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_291__VceLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_291__VceLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_291__VceLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_291__VceLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_291__VceLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_291__VceLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_291__VceLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_291__VceLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_292__VceLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_292__VceLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_293__VceLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_294__VceLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_294__VceLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_294__VceLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_294__VceLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_294__VceLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_294__VceLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_294__VceLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_294__VceLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_295__VceLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_295__VceLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_296__VceLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_297__VceLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_297__VceLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_297__VceLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_297__VceLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_297__VceLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_297__VceLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_297__VceLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_297__VceLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_298__VceLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_298__VceLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_299__VceLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_300__VceLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_300__VceLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_300__VceLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_300__VceLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_300__VceLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_300__VceLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_300__VceLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_300__VceLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_301__VceLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_301__VceLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_302__VceLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_303__VceLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_303__VceLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_303__VceLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_303__VceLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_303__VceLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_303__VceLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_303__VceLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_303__VceLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_304__VceLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_304__VceLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_305__VceLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_306__VceLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_306__VceLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_306__VceLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_306__VceLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_306__VceLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_306__VceLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_306__VceLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_306__VceLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_307__VceLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_307__VceLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_308__VceLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_309__VceLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_309__VceLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_309__VceLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_309__VceLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_309__VceLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_309__VceLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_309__VceLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_309__VceLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_310__VceLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_310__VceLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_311__VceLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_312__VceLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_312__VceLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_312__VceLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_312__VceLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_312__VceLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_312__VceLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_312__VceLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_312__VceLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_313__AcpLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_313__AcpLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_314__AcpLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_315__AcpLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_315__AcpLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_315__AcpLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_315__AcpLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_315__AcpLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_315__AcpLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_315__AcpLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_315__AcpLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_316__AcpLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_316__AcpLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_317__AcpLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_318__AcpLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_318__AcpLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_318__AcpLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_318__AcpLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_318__AcpLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_318__AcpLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_318__AcpLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_318__AcpLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_319__AcpLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_319__AcpLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_320__AcpLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_321__AcpLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_321__AcpLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_321__AcpLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_321__AcpLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_321__AcpLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_321__AcpLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_321__AcpLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_321__AcpLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_322__AcpLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_322__AcpLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_323__AcpLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_324__AcpLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_324__AcpLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_324__AcpLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_324__AcpLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_324__AcpLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_324__AcpLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_324__AcpLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_324__AcpLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_325__AcpLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_325__AcpLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_326__AcpLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_327__AcpLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_327__AcpLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_327__AcpLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_327__AcpLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_327__AcpLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_327__AcpLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_327__AcpLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_327__AcpLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_328__AcpLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_328__AcpLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_329__AcpLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_330__AcpLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_330__AcpLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_330__AcpLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_330__AcpLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_330__AcpLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_330__AcpLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_330__AcpLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_330__AcpLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_331__AcpLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_331__AcpLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_332__AcpLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_333__AcpLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_333__AcpLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_333__AcpLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_333__AcpLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_333__AcpLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_333__AcpLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_333__AcpLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_333__AcpLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_334__AcpLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_334__AcpLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_335__AcpLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_336__AcpLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_336__AcpLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_336__AcpLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_336__AcpLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_336__AcpLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_336__AcpLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_336__AcpLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_336__AcpLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_337__SamuLevel_0_Frequency_MASK 0xffffffff
+#define DPM_TABLE_337__SamuLevel_0_Frequency__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_338__SamuLevel_0_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_339__SamuLevel_0_padding_2_MASK 0xff
+#define DPM_TABLE_339__SamuLevel_0_padding_2__SHIFT 0x0
+#define DPM_TABLE_339__SamuLevel_0_padding_1_MASK 0xff00
+#define DPM_TABLE_339__SamuLevel_0_padding_1__SHIFT 0x8
+#define DPM_TABLE_339__SamuLevel_0_padding_0_MASK 0xff0000
+#define DPM_TABLE_339__SamuLevel_0_padding_0__SHIFT 0x10
+#define DPM_TABLE_339__SamuLevel_0_Divider_MASK 0xff000000
+#define DPM_TABLE_339__SamuLevel_0_Divider__SHIFT 0x18
+#define DPM_TABLE_340__SamuLevel_1_Frequency_MASK 0xffffffff
+#define DPM_TABLE_340__SamuLevel_1_Frequency__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_341__SamuLevel_1_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_342__SamuLevel_1_padding_2_MASK 0xff
+#define DPM_TABLE_342__SamuLevel_1_padding_2__SHIFT 0x0
+#define DPM_TABLE_342__SamuLevel_1_padding_1_MASK 0xff00
+#define DPM_TABLE_342__SamuLevel_1_padding_1__SHIFT 0x8
+#define DPM_TABLE_342__SamuLevel_1_padding_0_MASK 0xff0000
+#define DPM_TABLE_342__SamuLevel_1_padding_0__SHIFT 0x10
+#define DPM_TABLE_342__SamuLevel_1_Divider_MASK 0xff000000
+#define DPM_TABLE_342__SamuLevel_1_Divider__SHIFT 0x18
+#define DPM_TABLE_343__SamuLevel_2_Frequency_MASK 0xffffffff
+#define DPM_TABLE_343__SamuLevel_2_Frequency__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_344__SamuLevel_2_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_345__SamuLevel_2_padding_2_MASK 0xff
+#define DPM_TABLE_345__SamuLevel_2_padding_2__SHIFT 0x0
+#define DPM_TABLE_345__SamuLevel_2_padding_1_MASK 0xff00
+#define DPM_TABLE_345__SamuLevel_2_padding_1__SHIFT 0x8
+#define DPM_TABLE_345__SamuLevel_2_padding_0_MASK 0xff0000
+#define DPM_TABLE_345__SamuLevel_2_padding_0__SHIFT 0x10
+#define DPM_TABLE_345__SamuLevel_2_Divider_MASK 0xff000000
+#define DPM_TABLE_345__SamuLevel_2_Divider__SHIFT 0x18
+#define DPM_TABLE_346__SamuLevel_3_Frequency_MASK 0xffffffff
+#define DPM_TABLE_346__SamuLevel_3_Frequency__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_347__SamuLevel_3_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_348__SamuLevel_3_padding_2_MASK 0xff
+#define DPM_TABLE_348__SamuLevel_3_padding_2__SHIFT 0x0
+#define DPM_TABLE_348__SamuLevel_3_padding_1_MASK 0xff00
+#define DPM_TABLE_348__SamuLevel_3_padding_1__SHIFT 0x8
+#define DPM_TABLE_348__SamuLevel_3_padding_0_MASK 0xff0000
+#define DPM_TABLE_348__SamuLevel_3_padding_0__SHIFT 0x10
+#define DPM_TABLE_348__SamuLevel_3_Divider_MASK 0xff000000
+#define DPM_TABLE_348__SamuLevel_3_Divider__SHIFT 0x18
+#define DPM_TABLE_349__SamuLevel_4_Frequency_MASK 0xffffffff
+#define DPM_TABLE_349__SamuLevel_4_Frequency__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_350__SamuLevel_4_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_351__SamuLevel_4_padding_2_MASK 0xff
+#define DPM_TABLE_351__SamuLevel_4_padding_2__SHIFT 0x0
+#define DPM_TABLE_351__SamuLevel_4_padding_1_MASK 0xff00
+#define DPM_TABLE_351__SamuLevel_4_padding_1__SHIFT 0x8
+#define DPM_TABLE_351__SamuLevel_4_padding_0_MASK 0xff0000
+#define DPM_TABLE_351__SamuLevel_4_padding_0__SHIFT 0x10
+#define DPM_TABLE_351__SamuLevel_4_Divider_MASK 0xff000000
+#define DPM_TABLE_351__SamuLevel_4_Divider__SHIFT 0x18
+#define DPM_TABLE_352__SamuLevel_5_Frequency_MASK 0xffffffff
+#define DPM_TABLE_352__SamuLevel_5_Frequency__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_353__SamuLevel_5_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_354__SamuLevel_5_padding_2_MASK 0xff
+#define DPM_TABLE_354__SamuLevel_5_padding_2__SHIFT 0x0
+#define DPM_TABLE_354__SamuLevel_5_padding_1_MASK 0xff00
+#define DPM_TABLE_354__SamuLevel_5_padding_1__SHIFT 0x8
+#define DPM_TABLE_354__SamuLevel_5_padding_0_MASK 0xff0000
+#define DPM_TABLE_354__SamuLevel_5_padding_0__SHIFT 0x10
+#define DPM_TABLE_354__SamuLevel_5_Divider_MASK 0xff000000
+#define DPM_TABLE_354__SamuLevel_5_Divider__SHIFT 0x18
+#define DPM_TABLE_355__SamuLevel_6_Frequency_MASK 0xffffffff
+#define DPM_TABLE_355__SamuLevel_6_Frequency__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_356__SamuLevel_6_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_357__SamuLevel_6_padding_2_MASK 0xff
+#define DPM_TABLE_357__SamuLevel_6_padding_2__SHIFT 0x0
+#define DPM_TABLE_357__SamuLevel_6_padding_1_MASK 0xff00
+#define DPM_TABLE_357__SamuLevel_6_padding_1__SHIFT 0x8
+#define DPM_TABLE_357__SamuLevel_6_padding_0_MASK 0xff0000
+#define DPM_TABLE_357__SamuLevel_6_padding_0__SHIFT 0x10
+#define DPM_TABLE_357__SamuLevel_6_Divider_MASK 0xff000000
+#define DPM_TABLE_357__SamuLevel_6_Divider__SHIFT 0x18
+#define DPM_TABLE_358__SamuLevel_7_Frequency_MASK 0xffffffff
+#define DPM_TABLE_358__SamuLevel_7_Frequency__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases_MASK 0xff
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_359__SamuLevel_7_MinVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_360__SamuLevel_7_padding_2_MASK 0xff
+#define DPM_TABLE_360__SamuLevel_7_padding_2__SHIFT 0x0
+#define DPM_TABLE_360__SamuLevel_7_padding_1_MASK 0xff00
+#define DPM_TABLE_360__SamuLevel_7_padding_1__SHIFT 0x8
+#define DPM_TABLE_360__SamuLevel_7_padding_0_MASK 0xff0000
+#define DPM_TABLE_360__SamuLevel_7_padding_0__SHIFT 0x10
+#define DPM_TABLE_360__SamuLevel_7_Divider_MASK 0xff000000
+#define DPM_TABLE_360__SamuLevel_7_Divider__SHIFT 0x18
+#define DPM_TABLE_361__Ulv_CcPwrDynRm_MASK 0xffffffff
+#define DPM_TABLE_361__Ulv_CcPwrDynRm__SHIFT 0x0
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1_MASK 0xffffffff
+#define DPM_TABLE_362__Ulv_CcPwrDynRm1__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcPhase_MASK 0xff
+#define DPM_TABLE_363__Ulv_VddcPhase__SHIFT 0x0
+#define DPM_TABLE_363__Ulv_VddcOffsetVid_MASK 0xff00
+#define DPM_TABLE_363__Ulv_VddcOffsetVid__SHIFT 0x8
+#define DPM_TABLE_363__Ulv_VddcOffset_MASK 0xffff0000
+#define DPM_TABLE_363__Ulv_VddcOffset__SHIFT 0x10
+#define DPM_TABLE_364__Ulv_Reserved_MASK 0xffffffff
+#define DPM_TABLE_364__Ulv_Reserved__SHIFT 0x0
+#define DPM_TABLE_365__SclkStepSize_MASK 0xffffffff
+#define DPM_TABLE_365__SclkStepSize__SHIFT 0x0
+#define DPM_TABLE_366__Smio_0_MASK 0xffffffff
+#define DPM_TABLE_366__Smio_0__SHIFT 0x0
+#define DPM_TABLE_367__Smio_1_MASK 0xffffffff
+#define DPM_TABLE_367__Smio_1__SHIFT 0x0
+#define DPM_TABLE_368__Smio_2_MASK 0xffffffff
+#define DPM_TABLE_368__Smio_2__SHIFT 0x0
+#define DPM_TABLE_369__Smio_3_MASK 0xffffffff
+#define DPM_TABLE_369__Smio_3__SHIFT 0x0
+#define DPM_TABLE_370__Smio_4_MASK 0xffffffff
+#define DPM_TABLE_370__Smio_4__SHIFT 0x0
+#define DPM_TABLE_371__Smio_5_MASK 0xffffffff
+#define DPM_TABLE_371__Smio_5__SHIFT 0x0
+#define DPM_TABLE_372__Smio_6_MASK 0xffffffff
+#define DPM_TABLE_372__Smio_6__SHIFT 0x0
+#define DPM_TABLE_373__Smio_7_MASK 0xffffffff
+#define DPM_TABLE_373__Smio_7__SHIFT 0x0
+#define DPM_TABLE_374__Smio_8_MASK 0xffffffff
+#define DPM_TABLE_374__Smio_8__SHIFT 0x0
+#define DPM_TABLE_375__Smio_9_MASK 0xffffffff
+#define DPM_TABLE_375__Smio_9__SHIFT 0x0
+#define DPM_TABLE_376__Smio_10_MASK 0xffffffff
+#define DPM_TABLE_376__Smio_10__SHIFT 0x0
+#define DPM_TABLE_377__Smio_11_MASK 0xffffffff
+#define DPM_TABLE_377__Smio_11__SHIFT 0x0
+#define DPM_TABLE_378__Smio_12_MASK 0xffffffff
+#define DPM_TABLE_378__Smio_12__SHIFT 0x0
+#define DPM_TABLE_379__Smio_13_MASK 0xffffffff
+#define DPM_TABLE_379__Smio_13__SHIFT 0x0
+#define DPM_TABLE_380__Smio_14_MASK 0xffffffff
+#define DPM_TABLE_380__Smio_14__SHIFT 0x0
+#define DPM_TABLE_381__Smio_15_MASK 0xffffffff
+#define DPM_TABLE_381__Smio_15__SHIFT 0x0
+#define DPM_TABLE_382__Smio_16_MASK 0xffffffff
+#define DPM_TABLE_382__Smio_16__SHIFT 0x0
+#define DPM_TABLE_383__Smio_17_MASK 0xffffffff
+#define DPM_TABLE_383__Smio_17__SHIFT 0x0
+#define DPM_TABLE_384__Smio_18_MASK 0xffffffff
+#define DPM_TABLE_384__Smio_18__SHIFT 0x0
+#define DPM_TABLE_385__Smio_19_MASK 0xffffffff
+#define DPM_TABLE_385__Smio_19__SHIFT 0x0
+#define DPM_TABLE_386__Smio_20_MASK 0xffffffff
+#define DPM_TABLE_386__Smio_20__SHIFT 0x0
+#define DPM_TABLE_387__Smio_21_MASK 0xffffffff
+#define DPM_TABLE_387__Smio_21__SHIFT 0x0
+#define DPM_TABLE_388__Smio_22_MASK 0xffffffff
+#define DPM_TABLE_388__Smio_22__SHIFT 0x0
+#define DPM_TABLE_389__Smio_23_MASK 0xffffffff
+#define DPM_TABLE_389__Smio_23__SHIFT 0x0
+#define DPM_TABLE_390__Smio_24_MASK 0xffffffff
+#define DPM_TABLE_390__Smio_24__SHIFT 0x0
+#define DPM_TABLE_391__Smio_25_MASK 0xffffffff
+#define DPM_TABLE_391__Smio_25__SHIFT 0x0
+#define DPM_TABLE_392__Smio_26_MASK 0xffffffff
+#define DPM_TABLE_392__Smio_26__SHIFT 0x0
+#define DPM_TABLE_393__Smio_27_MASK 0xffffffff
+#define DPM_TABLE_393__Smio_27__SHIFT 0x0
+#define DPM_TABLE_394__Smio_28_MASK 0xffffffff
+#define DPM_TABLE_394__Smio_28__SHIFT 0x0
+#define DPM_TABLE_395__Smio_29_MASK 0xffffffff
+#define DPM_TABLE_395__Smio_29__SHIFT 0x0
+#define DPM_TABLE_396__Smio_30_MASK 0xffffffff
+#define DPM_TABLE_396__Smio_30__SHIFT 0x0
+#define DPM_TABLE_397__Smio_31_MASK 0xffffffff
+#define DPM_TABLE_397__Smio_31__SHIFT 0x0
+#define DPM_TABLE_398__SamuBootLevel_MASK 0xff
+#define DPM_TABLE_398__SamuBootLevel__SHIFT 0x0
+#define DPM_TABLE_398__AcpBootLevel_MASK 0xff00
+#define DPM_TABLE_398__AcpBootLevel__SHIFT 0x8
+#define DPM_TABLE_398__VceBootLevel_MASK 0xff0000
+#define DPM_TABLE_398__VceBootLevel__SHIFT 0x10
+#define DPM_TABLE_398__UvdBootLevel_MASK 0xff000000
+#define DPM_TABLE_398__UvdBootLevel__SHIFT 0x18
+#define DPM_TABLE_399__GraphicsInterval_MASK 0xff
+#define DPM_TABLE_399__GraphicsInterval__SHIFT 0x0
+#define DPM_TABLE_399__GraphicsThermThrottleEnable_MASK 0xff00
+#define DPM_TABLE_399__GraphicsThermThrottleEnable__SHIFT 0x8
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable_MASK 0xff0000
+#define DPM_TABLE_399__GraphicsVoltageChangeEnable__SHIFT 0x10
+#define DPM_TABLE_399__GraphicsBootLevel_MASK 0xff000000
+#define DPM_TABLE_399__GraphicsBootLevel__SHIFT 0x18
+#define DPM_TABLE_400__TemperatureLimitHigh_MASK 0xffff
+#define DPM_TABLE_400__TemperatureLimitHigh__SHIFT 0x0
+#define DPM_TABLE_400__ThermalInterval_MASK 0xff0000
+#define DPM_TABLE_400__ThermalInterval__SHIFT 0x10
+#define DPM_TABLE_400__VoltageInterval_MASK 0xff000000
+#define DPM_TABLE_400__VoltageInterval__SHIFT 0x18
+#define DPM_TABLE_401__MemoryVoltageChangeEnable_MASK 0xff
+#define DPM_TABLE_401__MemoryVoltageChangeEnable__SHIFT 0x0
+#define DPM_TABLE_401__MemoryBootLevel_MASK 0xff00
+#define DPM_TABLE_401__MemoryBootLevel__SHIFT 0x8
+#define DPM_TABLE_401__TemperatureLimitLow_MASK 0xffff0000
+#define DPM_TABLE_401__TemperatureLimitLow__SHIFT 0x10
+#define DPM_TABLE_402__MemoryThermThrottleEnable_MASK 0xff
+#define DPM_TABLE_402__MemoryThermThrottleEnable__SHIFT 0x0
+#define DPM_TABLE_402__MemoryInterval_MASK 0xff00
+#define DPM_TABLE_402__MemoryInterval__SHIFT 0x8
+#define DPM_TABLE_402__BootMVdd_MASK 0xffff0000
+#define DPM_TABLE_402__BootMVdd__SHIFT 0x10
+#define DPM_TABLE_403__PhaseResponseTime_MASK 0xffff
+#define DPM_TABLE_403__PhaseResponseTime__SHIFT 0x0
+#define DPM_TABLE_403__VoltageResponseTime_MASK 0xffff0000
+#define DPM_TABLE_403__VoltageResponseTime__SHIFT 0x10
+#define DPM_TABLE_404__DTEMode_MASK 0xff
+#define DPM_TABLE_404__DTEMode__SHIFT 0x0
+#define DPM_TABLE_404__DTEInterval_MASK 0xff00
+#define DPM_TABLE_404__DTEInterval__SHIFT 0x8
+#define DPM_TABLE_404__PCIeGenInterval_MASK 0xff0000
+#define DPM_TABLE_404__PCIeGenInterval__SHIFT 0x10
+#define DPM_TABLE_404__PCIeBootLinkLevel_MASK 0xff000000
+#define DPM_TABLE_404__PCIeBootLinkLevel__SHIFT 0x18
+#define DPM_TABLE_405__ThermGpio_MASK 0xff
+#define DPM_TABLE_405__ThermGpio__SHIFT 0x0
+#define DPM_TABLE_405__AcDcGpio_MASK 0xff00
+#define DPM_TABLE_405__AcDcGpio__SHIFT 0x8
+#define DPM_TABLE_405__VRHotGpio_MASK 0xff0000
+#define DPM_TABLE_405__VRHotGpio__SHIFT 0x10
+#define DPM_TABLE_405__SVI2Enable_MASK 0xff000000
+#define DPM_TABLE_405__SVI2Enable__SHIFT 0x18
+#define DPM_TABLE_406__PPM_TemperatureLimit_MASK 0xffff
+#define DPM_TABLE_406__PPM_TemperatureLimit__SHIFT 0x0
+#define DPM_TABLE_406__PPM_PkgPwrLimit_MASK 0xffff0000
+#define DPM_TABLE_406__PPM_PkgPwrLimit__SHIFT 0x10
+#define DPM_TABLE_407__TargetTdp_MASK 0xffff
+#define DPM_TABLE_407__TargetTdp__SHIFT 0x0
+#define DPM_TABLE_407__DefaultTdp_MASK 0xffff0000
+#define DPM_TABLE_407__DefaultTdp__SHIFT 0x10
+#define DPM_TABLE_408__FpsLowThreshold_MASK 0xffff
+#define DPM_TABLE_408__FpsLowThreshold__SHIFT 0x0
+#define DPM_TABLE_408__FpsHighThreshold_MASK 0xffff0000
+#define DPM_TABLE_408__FpsHighThreshold__SHIFT 0x10
+#define DPM_TABLE_409__BAPMTI_R_0_1_0_MASK 0xffff
+#define DPM_TABLE_409__BAPMTI_R_0_1_0__SHIFT 0x0
+#define DPM_TABLE_409__BAPMTI_R_0_0_0_MASK 0xffff0000
+#define DPM_TABLE_409__BAPMTI_R_0_0_0__SHIFT 0x10
+#define DPM_TABLE_410__BAPMTI_R_1_0_0_MASK 0xffff
+#define DPM_TABLE_410__BAPMTI_R_1_0_0__SHIFT 0x0
+#define DPM_TABLE_410__BAPMTI_R_0_2_0_MASK 0xffff0000
+#define DPM_TABLE_410__BAPMTI_R_0_2_0__SHIFT 0x10
+#define DPM_TABLE_411__BAPMTI_R_1_2_0_MASK 0xffff
+#define DPM_TABLE_411__BAPMTI_R_1_2_0__SHIFT 0x0
+#define DPM_TABLE_411__BAPMTI_R_1_1_0_MASK 0xffff0000
+#define DPM_TABLE_411__BAPMTI_R_1_1_0__SHIFT 0x10
+#define DPM_TABLE_412__BAPMTI_R_2_1_0_MASK 0xffff
+#define DPM_TABLE_412__BAPMTI_R_2_1_0__SHIFT 0x0
+#define DPM_TABLE_412__BAPMTI_R_2_0_0_MASK 0xffff0000
+#define DPM_TABLE_412__BAPMTI_R_2_0_0__SHIFT 0x10
+#define DPM_TABLE_413__BAPMTI_R_3_0_0_MASK 0xffff
+#define DPM_TABLE_413__BAPMTI_R_3_0_0__SHIFT 0x0
+#define DPM_TABLE_413__BAPMTI_R_2_2_0_MASK 0xffff0000
+#define DPM_TABLE_413__BAPMTI_R_2_2_0__SHIFT 0x10
+#define DPM_TABLE_414__BAPMTI_R_3_2_0_MASK 0xffff
+#define DPM_TABLE_414__BAPMTI_R_3_2_0__SHIFT 0x0
+#define DPM_TABLE_414__BAPMTI_R_3_1_0_MASK 0xffff0000
+#define DPM_TABLE_414__BAPMTI_R_3_1_0__SHIFT 0x10
+#define DPM_TABLE_415__BAPMTI_R_4_1_0_MASK 0xffff
+#define DPM_TABLE_415__BAPMTI_R_4_1_0__SHIFT 0x0
+#define DPM_TABLE_415__BAPMTI_R_4_0_0_MASK 0xffff0000
+#define DPM_TABLE_415__BAPMTI_R_4_0_0__SHIFT 0x10
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0_MASK 0xffff
+#define DPM_TABLE_416__BAPMTI_RC_0_0_0__SHIFT 0x0
+#define DPM_TABLE_416__BAPMTI_R_4_2_0_MASK 0xffff0000
+#define DPM_TABLE_416__BAPMTI_R_4_2_0__SHIFT 0x10
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0_MASK 0xffff
+#define DPM_TABLE_417__BAPMTI_RC_0_2_0__SHIFT 0x0
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0_MASK 0xffff0000
+#define DPM_TABLE_417__BAPMTI_RC_0_1_0__SHIFT 0x10
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0_MASK 0xffff
+#define DPM_TABLE_418__BAPMTI_RC_1_1_0__SHIFT 0x0
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0_MASK 0xffff0000
+#define DPM_TABLE_418__BAPMTI_RC_1_0_0__SHIFT 0x10
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0_MASK 0xffff
+#define DPM_TABLE_419__BAPMTI_RC_2_0_0__SHIFT 0x0
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0_MASK 0xffff0000
+#define DPM_TABLE_419__BAPMTI_RC_1_2_0__SHIFT 0x10
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0_MASK 0xffff
+#define DPM_TABLE_420__BAPMTI_RC_2_2_0__SHIFT 0x0
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0_MASK 0xffff0000
+#define DPM_TABLE_420__BAPMTI_RC_2_1_0__SHIFT 0x10
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0_MASK 0xffff
+#define DPM_TABLE_421__BAPMTI_RC_3_1_0__SHIFT 0x0
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0_MASK 0xffff0000
+#define DPM_TABLE_421__BAPMTI_RC_3_0_0__SHIFT 0x10
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0_MASK 0xffff
+#define DPM_TABLE_422__BAPMTI_RC_4_0_0__SHIFT 0x0
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0_MASK 0xffff0000
+#define DPM_TABLE_422__BAPMTI_RC_3_2_0__SHIFT 0x10
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0_MASK 0xffff
+#define DPM_TABLE_423__BAPMTI_RC_4_2_0__SHIFT 0x0
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0_MASK 0xffff0000
+#define DPM_TABLE_423__BAPMTI_RC_4_1_0__SHIFT 0x10
+#define DPM_TABLE_424__GpuTjHyst_MASK 0xff
+#define DPM_TABLE_424__GpuTjHyst__SHIFT 0x0
+#define DPM_TABLE_424__GpuTjMax_MASK 0xff00
+#define DPM_TABLE_424__GpuTjMax__SHIFT 0x8
+#define DPM_TABLE_424__DTETjOffset_MASK 0xff0000
+#define DPM_TABLE_424__DTETjOffset__SHIFT 0x10
+#define DPM_TABLE_424__DTEAmbientTempBase_MASK 0xff000000
+#define DPM_TABLE_424__DTEAmbientTempBase__SHIFT 0x18
+#define DPM_TABLE_425__BootVoltage_Phases_MASK 0xff
+#define DPM_TABLE_425__BootVoltage_Phases__SHIFT 0x0
+#define DPM_TABLE_425__BootVoltage_VddGfx_MASK 0xff00
+#define DPM_TABLE_425__BootVoltage_VddGfx__SHIFT 0x8
+#define DPM_TABLE_425__BootVoltage_Vddci_MASK 0xff0000
+#define DPM_TABLE_425__BootVoltage_Vddci__SHIFT 0x10
+#define DPM_TABLE_425__BootVoltage_Vddc_MASK 0xff000000
+#define DPM_TABLE_425__BootVoltage_Vddc__SHIFT 0x18
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT_MASK 0xffffffff
+#define DPM_TABLE_426__BAPM_TEMP_GRADIENT__SHIFT 0x0
+#define DPM_TABLE_427__LowSclkInterruptThreshold_MASK 0xffffffff
+#define DPM_TABLE_427__LowSclkInterruptThreshold__SHIFT 0x0
+#define DPM_TABLE_428__VddGfxReChkWait_MASK 0xffffffff
+#define DPM_TABLE_428__VddGfxReChkWait__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1_MASK 0xff
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_1__SHIFT 0x0
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0_MASK 0xff00
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_padding_0__SHIFT 0x8
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID_MASK 0xff0000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_maxVID__SHIFT 0x10
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID_MASK 0xff000000
+#define DPM_TABLE_429__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_minVID__SHIFT 0x18
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3_MASK 0xff
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_3__SHIFT 0x0
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2_MASK 0xff00
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_2__SHIFT 0x8
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1_MASK 0xff0000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_1__SHIFT 0x10
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0_MASK 0xff000000
+#define DPM_TABLE_430__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_0__SHIFT 0x18
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7_MASK 0xff
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_7__SHIFT 0x0
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6_MASK 0xff00
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_6__SHIFT 0x8
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5_MASK 0xff0000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_5__SHIFT 0x10
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4_MASK 0xff000000
+#define DPM_TABLE_431__ClockStretcherDataTable_ClockStretcherDataTableEntry_0_setting_4__SHIFT 0x18
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1_MASK 0xff
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_1__SHIFT 0x0
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0_MASK 0xff00
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_padding_0__SHIFT 0x8
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID_MASK 0xff0000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_maxVID__SHIFT 0x10
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID_MASK 0xff000000
+#define DPM_TABLE_432__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_minVID__SHIFT 0x18
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3_MASK 0xff
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_3__SHIFT 0x0
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2_MASK 0xff00
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_2__SHIFT 0x8
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1_MASK 0xff0000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_1__SHIFT 0x10
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0_MASK 0xff000000
+#define DPM_TABLE_433__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_0__SHIFT 0x18
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7_MASK 0xff
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_7__SHIFT 0x0
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6_MASK 0xff00
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_6__SHIFT 0x8
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5_MASK 0xff0000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_5__SHIFT 0x10
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4_MASK 0xff000000
+#define DPM_TABLE_434__ClockStretcherDataTable_ClockStretcherDataTableEntry_1_setting_4__SHIFT 0x18
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1_MASK 0xff
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_1__SHIFT 0x0
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0_MASK 0xff00
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_padding_0__SHIFT 0x8
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID_MASK 0xff0000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_maxVID__SHIFT 0x10
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID_MASK 0xff000000
+#define DPM_TABLE_435__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_minVID__SHIFT 0x18
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3_MASK 0xff
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_3__SHIFT 0x0
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2_MASK 0xff00
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_2__SHIFT 0x8
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1_MASK 0xff0000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_1__SHIFT 0x10
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0_MASK 0xff000000
+#define DPM_TABLE_436__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_0__SHIFT 0x18
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7_MASK 0xff
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_7__SHIFT 0x0
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6_MASK 0xff00
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_6__SHIFT 0x8
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5_MASK 0xff0000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_5__SHIFT 0x10
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4_MASK 0xff000000
+#define DPM_TABLE_437__ClockStretcherDataTable_ClockStretcherDataTableEntry_2_setting_4__SHIFT 0x18
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1_MASK 0xff
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_1__SHIFT 0x0
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0_MASK 0xff00
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_padding_0__SHIFT 0x8
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID_MASK 0xff0000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_maxVID__SHIFT 0x10
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID_MASK 0xff000000
+#define DPM_TABLE_438__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_minVID__SHIFT 0x18
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3_MASK 0xff
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_3__SHIFT 0x0
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2_MASK 0xff00
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_2__SHIFT 0x8
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1_MASK 0xff0000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_1__SHIFT 0x10
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0_MASK 0xff000000
+#define DPM_TABLE_439__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_0__SHIFT 0x18
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7_MASK 0xff
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_7__SHIFT 0x0
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6_MASK 0xff00
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_6__SHIFT 0x8
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5_MASK 0xff0000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_5__SHIFT 0x10
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4_MASK 0xff000000
+#define DPM_TABLE_440__ClockStretcherDataTable_ClockStretcherDataTableEntry_3_setting_4__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_1__RefClockFrequency__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_2__PmTimerPeriod__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_3__FeatureEnables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_4__PreVBlankGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_5__VBlankTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_6__TrainTimeGap__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_7__MvddSwitchTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_8__LongestAcpiTrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_9__AcpiDelay__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_10__G5TrainTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_11__DelayMpllPwron__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_12__VoltageChangeTimeout__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_13__HandshakeDisables__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy4Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy3Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy2Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_14__DisplayPhy1Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config_MASK 0xff
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy8Config__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy7Config__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy6Config__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_15__DisplayPhy5Config__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_16__AverageGraphicsActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_17__AverageMemoryActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_18__AverageGioActivity__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_19__PCIeDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_19__LClkDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_19__MClkDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_19__SClkDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels_MASK 0xff
+#define SOFT_REGISTERS_TABLE_20__VCEDpmEnabledLevels__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels_MASK 0xff00
+#define SOFT_REGISTERS_TABLE_20__ACPDpmEnabledLevels__SHIFT 0x8
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels_MASK 0xff0000
+#define SOFT_REGISTERS_TABLE_20__SAMUDpmEnabledLevels__SHIFT 0x10
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels_MASK 0xff000000
+#define SOFT_REGISTERS_TABLE_20__UVDDpmEnabledLevels__SHIFT 0x18
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_21__DRAM_LOG_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_22__DRAM_LOG_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_23__DRAM_LOG_PHY_ADDR_H__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_24__DRAM_LOG_PHY_ADDR_L__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_25__DRAM_LOG_BUFF_SIZE__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_26__UlvEnterCount__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_27__UlvTime_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_27__UlvTime__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_28__UcodeLoadStatus__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_29__Reserved_0_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_29__Reserved_0__SHIFT 0x0
+#define SOFT_REGISTERS_TABLE_30__Reserved_1_MASK 0xffffffff
+#define SOFT_REGISTERS_TABLE_30__Reserved_1__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineOffsetVddC_MASK 0xff
+#define PM_FUSES_1__SviLoadLineOffsetVddC__SHIFT 0x0
+#define PM_FUSES_1__SviLoadLineTrimVddC_MASK 0xff00
+#define PM_FUSES_1__SviLoadLineTrimVddC__SHIFT 0x8
+#define PM_FUSES_1__SviLoadLineVddC_MASK 0xff0000
+#define PM_FUSES_1__SviLoadLineVddC__SHIFT 0x10
+#define PM_FUSES_1__SviLoadLineEn_MASK 0xff000000
+#define PM_FUSES_1__SviLoadLineEn__SHIFT 0x18
+#define PM_FUSES_2__TDC_MAWt_MASK 0xff
+#define PM_FUSES_2__TDC_MAWt__SHIFT 0x0
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc_MASK 0xff00
+#define PM_FUSES_2__TDC_VDDC_ThrottleReleaseLimitPerc__SHIFT 0x8
+#define PM_FUSES_2__TDC_VDDC_PkgLimit_MASK 0xffff0000
+#define PM_FUSES_2__TDC_VDDC_PkgLimit__SHIFT 0x10
+#define PM_FUSES_3__Reserved_MASK 0xff
+#define PM_FUSES_3__Reserved__SHIFT 0x0
+#define PM_FUSES_3__LPMLTemperatureMax_MASK 0xff00
+#define PM_FUSES_3__LPMLTemperatureMax__SHIFT 0x8
+#define PM_FUSES_3__LPMLTemperatureMin_MASK 0xff0000
+#define PM_FUSES_3__LPMLTemperatureMin__SHIFT 0x10
+#define PM_FUSES_3__TdcWaterfallCtl_MASK 0xff000000
+#define PM_FUSES_3__TdcWaterfallCtl__SHIFT 0x18
+#define PM_FUSES_4__LPMLTemperatureScaler_3_MASK 0xff
+#define PM_FUSES_4__LPMLTemperatureScaler_3__SHIFT 0x0
+#define PM_FUSES_4__LPMLTemperatureScaler_2_MASK 0xff00
+#define PM_FUSES_4__LPMLTemperatureScaler_2__SHIFT 0x8
+#define PM_FUSES_4__LPMLTemperatureScaler_1_MASK 0xff0000
+#define PM_FUSES_4__LPMLTemperatureScaler_1__SHIFT 0x10
+#define PM_FUSES_4__LPMLTemperatureScaler_0_MASK 0xff000000
+#define PM_FUSES_4__LPMLTemperatureScaler_0__SHIFT 0x18
+#define PM_FUSES_5__LPMLTemperatureScaler_7_MASK 0xff
+#define PM_FUSES_5__LPMLTemperatureScaler_7__SHIFT 0x0
+#define PM_FUSES_5__LPMLTemperatureScaler_6_MASK 0xff00
+#define PM_FUSES_5__LPMLTemperatureScaler_6__SHIFT 0x8
+#define PM_FUSES_5__LPMLTemperatureScaler_5_MASK 0xff0000
+#define PM_FUSES_5__LPMLTemperatureScaler_5__SHIFT 0x10
+#define PM_FUSES_5__LPMLTemperatureScaler_4_MASK 0xff000000
+#define PM_FUSES_5__LPMLTemperatureScaler_4__SHIFT 0x18
+#define PM_FUSES_6__LPMLTemperatureScaler_11_MASK 0xff
+#define PM_FUSES_6__LPMLTemperatureScaler_11__SHIFT 0x0
+#define PM_FUSES_6__LPMLTemperatureScaler_10_MASK 0xff00
+#define PM_FUSES_6__LPMLTemperatureScaler_10__SHIFT 0x8
+#define PM_FUSES_6__LPMLTemperatureScaler_9_MASK 0xff0000
+#define PM_FUSES_6__LPMLTemperatureScaler_9__SHIFT 0x10
+#define PM_FUSES_6__LPMLTemperatureScaler_8_MASK 0xff000000
+#define PM_FUSES_6__LPMLTemperatureScaler_8__SHIFT 0x18
+#define PM_FUSES_7__LPMLTemperatureScaler_15_MASK 0xff
+#define PM_FUSES_7__LPMLTemperatureScaler_15__SHIFT 0x0
+#define PM_FUSES_7__LPMLTemperatureScaler_14_MASK 0xff00
+#define PM_FUSES_7__LPMLTemperatureScaler_14__SHIFT 0x8
+#define PM_FUSES_7__LPMLTemperatureScaler_13_MASK 0xff0000
+#define PM_FUSES_7__LPMLTemperatureScaler_13__SHIFT 0x10
+#define PM_FUSES_7__LPMLTemperatureScaler_12_MASK 0xff000000
+#define PM_FUSES_7__LPMLTemperatureScaler_12__SHIFT 0x18
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta_MASK 0xffff
+#define PM_FUSES_8__FuzzyFan_ErrorRateSetDelta__SHIFT 0x0
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta_MASK 0xffff0000
+#define PM_FUSES_8__FuzzyFan_ErrorSetDelta__SHIFT 0x10
+#define PM_FUSES_9__Reserved6_MASK 0xffff
+#define PM_FUSES_9__Reserved6__SHIFT 0x0
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta_MASK 0xffff0000
+#define PM_FUSES_9__FuzzyFan_PwmSetDelta__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_3_MASK 0xff
+#define PM_FUSES_10__GnbLPML_3__SHIFT 0x0
+#define PM_FUSES_10__GnbLPML_2_MASK 0xff00
+#define PM_FUSES_10__GnbLPML_2__SHIFT 0x8
+#define PM_FUSES_10__GnbLPML_1_MASK 0xff0000
+#define PM_FUSES_10__GnbLPML_1__SHIFT 0x10
+#define PM_FUSES_10__GnbLPML_0_MASK 0xff000000
+#define PM_FUSES_10__GnbLPML_0__SHIFT 0x18
+#define PM_FUSES_11__GnbLPML_7_MASK 0xff
+#define PM_FUSES_11__GnbLPML_7__SHIFT 0x0
+#define PM_FUSES_11__GnbLPML_6_MASK 0xff00
+#define PM_FUSES_11__GnbLPML_6__SHIFT 0x8
+#define PM_FUSES_11__GnbLPML_5_MASK 0xff0000
+#define PM_FUSES_11__GnbLPML_5__SHIFT 0x10
+#define PM_FUSES_11__GnbLPML_4_MASK 0xff000000
+#define PM_FUSES_11__GnbLPML_4__SHIFT 0x18
+#define PM_FUSES_12__GnbLPML_11_MASK 0xff
+#define PM_FUSES_12__GnbLPML_11__SHIFT 0x0
+#define PM_FUSES_12__GnbLPML_10_MASK 0xff00
+#define PM_FUSES_12__GnbLPML_10__SHIFT 0x8
+#define PM_FUSES_12__GnbLPML_9_MASK 0xff0000
+#define PM_FUSES_12__GnbLPML_9__SHIFT 0x10
+#define PM_FUSES_12__GnbLPML_8_MASK 0xff000000
+#define PM_FUSES_12__GnbLPML_8__SHIFT 0x18
+#define PM_FUSES_13__GnbLPML_15_MASK 0xff
+#define PM_FUSES_13__GnbLPML_15__SHIFT 0x0
+#define PM_FUSES_13__GnbLPML_14_MASK 0xff00
+#define PM_FUSES_13__GnbLPML_14__SHIFT 0x8
+#define PM_FUSES_13__GnbLPML_13_MASK 0xff0000
+#define PM_FUSES_13__GnbLPML_13__SHIFT 0x10
+#define PM_FUSES_13__GnbLPML_12_MASK 0xff000000
+#define PM_FUSES_13__GnbLPML_12__SHIFT 0x18
+#define PM_FUSES_14__Reserved1_1_MASK 0xff
+#define PM_FUSES_14__Reserved1_1__SHIFT 0x0
+#define PM_FUSES_14__Reserved1_0_MASK 0xff00
+#define PM_FUSES_14__Reserved1_0__SHIFT 0x8
+#define PM_FUSES_14__GnbLPMLMinVid_MASK 0xff0000
+#define PM_FUSES_14__GnbLPMLMinVid__SHIFT 0x10
+#define PM_FUSES_14__GnbLPMLMaxVid_MASK 0xff000000
+#define PM_FUSES_14__GnbLPMLMaxVid__SHIFT 0x18
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd_MASK 0xffff
+#define PM_FUSES_15__BapmVddCBaseLeakageLoSidd__SHIFT 0x0
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd_MASK 0xffff0000
+#define PM_FUSES_15__BapmVddCBaseLeakageHiSidd__SHIFT 0x10
+#define SMU_PM_STATUS_0__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_0__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_1__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_1__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_2__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_2__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_3__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_3__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_4__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_4__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_5__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_5__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_6__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_6__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_7__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_7__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_8__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_8__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_9__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_9__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_10__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_10__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_11__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_11__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_12__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_12__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_13__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_13__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_14__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_14__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_15__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_15__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_16__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_16__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_17__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_17__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_18__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_18__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_19__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_19__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_20__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_20__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_21__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_21__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_22__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_22__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_23__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_23__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_24__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_24__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_25__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_25__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_26__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_26__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_27__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_27__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_28__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_28__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_29__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_29__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_30__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_30__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_31__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_31__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_32__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_32__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_33__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_33__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_34__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_34__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_35__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_35__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_36__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_36__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_37__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_37__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_38__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_38__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_39__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_39__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_40__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_40__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_41__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_41__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_42__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_42__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_43__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_43__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_44__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_44__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_45__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_45__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_46__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_46__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_47__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_47__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_48__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_48__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_49__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_49__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_50__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_50__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_51__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_51__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_52__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_52__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_53__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_53__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_54__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_54__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_55__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_55__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_56__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_56__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_57__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_57__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_58__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_58__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_59__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_59__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_60__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_60__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_61__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_61__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_62__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_62__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_63__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_63__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_64__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_64__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_65__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_65__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_66__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_66__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_67__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_67__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_68__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_68__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_69__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_69__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_70__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_70__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_71__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_71__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_72__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_72__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_73__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_73__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_74__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_74__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_75__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_75__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_76__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_76__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_77__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_77__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_78__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_78__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_79__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_79__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_80__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_80__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_81__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_81__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_82__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_82__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_83__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_83__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_84__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_84__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_85__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_85__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_86__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_86__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_87__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_87__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_88__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_88__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_89__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_89__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_90__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_90__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_91__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_91__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_92__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_92__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_93__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_93__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_94__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_94__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_95__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_95__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_96__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_96__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_97__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_97__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_98__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_98__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_99__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_99__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_100__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_100__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_101__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_101__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_102__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_102__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_103__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_103__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_104__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_104__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_105__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_105__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_106__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_106__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_107__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_107__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_108__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_108__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_109__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_109__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_110__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_110__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_111__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_111__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_112__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_112__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_113__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_113__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_114__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_114__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_115__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_115__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_116__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_116__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_117__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_117__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_118__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_118__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_119__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_119__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_120__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_120__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_121__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_121__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_122__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_122__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_123__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_123__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_124__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_124__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_125__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_125__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_126__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_126__DATA__SHIFT 0x0
+#define SMU_PM_STATUS_127__DATA_MASK 0xffffffff
+#define SMU_PM_STATUS_127__DATA__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET_MASK 0x1
+#define CG_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET_MASK 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTL_SET__SHIFT 0x1
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET_MASK 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_SET__SHIFT 0x2
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR_MASK 0x8
+#define CG_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT 0x3
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR_MASK 0x10
+#define CG_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT 0x4
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR_MASK 0x20
+#define CG_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT 0x5
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK 0xff
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT 0x0
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK 0xff00
+#define CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT 0x8
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD_MASK 0xff0000
+#define CG_THERMAL_INT_CTRL__GNB_TEMP_THRESHOLD__SHIFT 0x10
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK 0x1000000
+#define CG_THERMAL_INT_CTRL__THERM_INTH_MASK__SHIFT 0x18
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK 0x2000000
+#define CG_THERMAL_INT_CTRL__THERM_INTL_MASK__SHIFT 0x19
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK 0x4000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_MASK__SHIFT 0x1a
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK_MASK 0x8000000
+#define CG_THERMAL_INT_CTRL__THERM_TRIGGER_CNB_MASK__SHIFT 0x1b
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA_MASK 0x10000000
+#define CG_THERMAL_INT_CTRL__THERM_GNB_HW_ENA__SHIFT 0x1c
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT_MASK 0x1
+#define CG_THERMAL_INT_STATUS__THERM_INTH_DETECT__SHIFT 0x0
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT_MASK 0x2
+#define CG_THERMAL_INT_STATUS__THERM_INTL_DETECT__SHIFT 0x1
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT_MASK 0x4
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_DETECT__SHIFT 0x2
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT_MASK 0x8
+#define CG_THERMAL_INT_STATUS__THERM_TRIGGER_CNB_DETECT__SHIFT 0x3
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK 0x7
+#define CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT 0x0
+#define CG_THERMAL_CTRL__THERM_INC_CLK_MASK 0x8
+#define CG_THERMAL_CTRL__THERM_INC_CLK__SHIFT 0x3
+#define CG_THERMAL_CTRL__SPARE_MASK 0x3ff0
+#define CG_THERMAL_CTRL__SPARE__SHIFT 0x4
+#define CG_THERMAL_CTRL__DIG_THERM_DPM_MASK 0x3fc000
+#define CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT 0xe
+#define CG_THERMAL_CTRL__RESERVED_MASK 0x1c00000
+#define CG_THERMAL_CTRL__RESERVED__SHIFT 0x16
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY_MASK 0x2000000
+#define CG_THERMAL_CTRL__CTF_PAD_POLARITY__SHIFT 0x19
+#define CG_THERMAL_CTRL__CTF_PAD_EN_MASK 0x4000000
+#define CG_THERMAL_CTRL__CTF_PAD_EN__SHIFT 0x1a
+#define CG_THERMAL_STATUS__SPARE_MASK 0x1ff
+#define CG_THERMAL_STATUS__SPARE__SHIFT 0x0
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK 0x1fe00
+#define CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT 0x9
+#define CG_THERMAL_STATUS__THERM_ALERT_MASK 0x20000
+#define CG_THERMAL_STATUS__THERM_ALERT__SHIFT 0x11
+#define CG_THERMAL_STATUS__GEN_STATUS_MASK 0x3c0000
+#define CG_THERMAL_STATUS__GEN_STATUS__SHIFT 0x12
+#define CG_THERMAL_INT__DIG_THERM_CTF_MASK 0xff
+#define CG_THERMAL_INT__DIG_THERM_CTF__SHIFT 0x0
+#define CG_THERMAL_INT__DIG_THERM_INTH_MASK 0xff00
+#define CG_THERMAL_INT__DIG_THERM_INTH__SHIFT 0x8
+#define CG_THERMAL_INT__DIG_THERM_INTL_MASK 0xff0000
+#define CG_THERMAL_INT__DIG_THERM_INTL__SHIFT 0x10
+#define CG_THERMAL_INT__THERM_INT_MASK_MASK 0xf000000
+#define CG_THERMAL_INT__THERM_INT_MASK__SHIFT 0x18
+#define CG_MULT_THERMAL_CTRL__TS_FILTER_MASK 0xf
+#define CG_MULT_THERMAL_CTRL__TS_FILTER__SHIFT 0x0
+#define CG_MULT_THERMAL_CTRL__UNUSED_MASK 0x1f0
+#define CG_MULT_THERMAL_CTRL__UNUSED__SHIFT 0x4
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST_MASK 0x200
+#define CG_MULT_THERMAL_CTRL__THERMAL_RANGE_RST__SHIFT 0x9
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK 0xff00000
+#define CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT 0x14
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR_MASK 0x10000000
+#define CG_MULT_THERMAL_CTRL__THM_READY_CLEAR__SHIFT 0x1c
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x1ff
+#define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT 0x0
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x3fe00
+#define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9
+#define THM_TMON2_CTRL__POWER_DOWN_MASK 0x1
+#define THM_TMON2_CTRL__POWER_DOWN__SHIFT 0x0
+#define THM_TMON2_CTRL__BGADJ_MASK 0x1fe
+#define THM_TMON2_CTRL__BGADJ__SHIFT 0x1
+#define THM_TMON2_CTRL__BGADJ_MODE_MASK 0x200
+#define THM_TMON2_CTRL__BGADJ_MODE__SHIFT 0x9
+#define THM_TMON2_CTRL__TMON_PAUSE_MASK 0x400
+#define THM_TMON2_CTRL__TMON_PAUSE__SHIFT 0xa
+#define THM_TMON2_CTRL__INT_MEAS_EN_MASK 0x800
+#define THM_TMON2_CTRL__INT_MEAS_EN__SHIFT 0xb
+#define THM_TMON2_CTRL__DEBUG_MODE_MASK 0x1000
+#define THM_TMON2_CTRL__DEBUG_MODE__SHIFT 0xc
+#define THM_TMON2_CTRL__EN_CFG_SERDES_MASK 0x2000
+#define THM_TMON2_CTRL__EN_CFG_SERDES__SHIFT 0xd
+#define THM_TMON2_CTRL2__RDIL_PRESENT_MASK 0xffff
+#define THM_TMON2_CTRL2__RDIL_PRESENT__SHIFT 0x0
+#define THM_TMON2_CTRL2__RDIR_PRESENT_MASK 0xffff0000
+#define THM_TMON2_CTRL2__RDIR_PRESENT__SHIFT 0x10
+#define THM_TMON2_CSR_WR__CSR_WRITE_MASK 0x1
+#define THM_TMON2_CSR_WR__CSR_WRITE__SHIFT 0x0
+#define THM_TMON2_CSR_WR__CSR_READ_MASK 0x2
+#define THM_TMON2_CSR_WR__CSR_READ__SHIFT 0x1
+#define THM_TMON2_CSR_WR__CSR_ADDR_MASK 0xffc
+#define THM_TMON2_CSR_WR__CSR_ADDR__SHIFT 0x2
+#define THM_TMON2_CSR_WR__WRITE_DATA_MASK 0xfff000
+#define THM_TMON2_CSR_WR__WRITE_DATA__SHIFT 0xc
+#define THM_TMON2_CSR_WR__SPARE_MASK 0x1000000
+#define THM_TMON2_CSR_WR__SPARE__SHIFT 0x18
+#define THM_TMON2_CSR_RD__READ_DATA_MASK 0xfff
+#define THM_TMON2_CSR_RD__READ_DATA__SHIFT 0x0
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0xff
+#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY_MASK 0xff00
+#define CG_FDO_CTRL0__FAN_SPINUP_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL_MASK 0x10000
+#define CG_FDO_CTRL0__FDO_PWM_MANUAL__SHIFT 0x10
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER_MASK 0x7e0000
+#define CG_FDO_CTRL0__FDO_PWM_HYSTER__SHIFT 0x11
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN_MASK 0x800000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_EN__SHIFT 0x17
+#define CG_FDO_CTRL0__FDO_PWM_RAMP_MASK 0xff000000
+#define CG_FDO_CTRL0__FDO_PWM_RAMP__SHIFT 0x18
+#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0xff
+#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0
+#define CG_FDO_CTRL1__FMIN_DUTY_MASK 0xff00
+#define CG_FDO_CTRL1__FMIN_DUTY__SHIFT 0x8
+#define CG_FDO_CTRL1__M_MASK 0xff0000
+#define CG_FDO_CTRL1__M__SHIFT 0x10
+#define CG_FDO_CTRL1__RESERVED_MASK 0x3f000000
+#define CG_FDO_CTRL1__RESERVED__SHIFT 0x18
+#define CG_FDO_CTRL1__FDO_PWRDNB_MASK 0x40000000
+#define CG_FDO_CTRL1__FDO_PWRDNB__SHIFT 0x1e
+#define CG_FDO_CTRL2__TMIN_MASK 0xff
+#define CG_FDO_CTRL2__TMIN__SHIFT 0x0
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME_MASK 0x700
+#define CG_FDO_CTRL2__FAN_SPINUP_TIME__SHIFT 0x8
+#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x3800
+#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb
+#define CG_FDO_CTRL2__TMIN_HYSTER_MASK 0x1c000
+#define CG_FDO_CTRL2__TMIN_HYSTER__SHIFT 0xe
+#define CG_FDO_CTRL2__TMAX_MASK 0x1fe0000
+#define CG_FDO_CTRL2__TMAX__SHIFT 0x11
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK 0xfe000000
+#define CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT 0x19
+#define CG_TACH_CTRL__EDGE_PER_REV_MASK 0x7
+#define CG_TACH_CTRL__EDGE_PER_REV__SHIFT 0x0
+#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xfffffff8
+#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3
+#define CG_TACH_STATUS__TACH_PERIOD_MASK 0xffffffff
+#define CG_TACH_STATUS__TACH_PERIOD__SHIFT 0x0
+#define CC_THM_STRAPS0__TMON0_BGADJ_MASK 0x1fe
+#define CC_THM_STRAPS0__TMON0_BGADJ__SHIFT 0x1
+#define CC_THM_STRAPS0__TMON1_BGADJ_MASK 0x1fe00
+#define CC_THM_STRAPS0__TMON1_BGADJ__SHIFT 0x9
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL_MASK 0x20000
+#define CC_THM_STRAPS0__TMON_CMON_FUSE_SEL__SHIFT 0x11
+#define CC_THM_STRAPS0__NUM_ACQ_MASK 0x1c0000
+#define CC_THM_STRAPS0__NUM_ACQ__SHIFT 0x12
+#define CC_THM_STRAPS0__TMON_CLK_SEL_MASK 0xe00000
+#define CC_THM_STRAPS0__TMON_CLK_SEL__SHIFT 0x15
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE_MASK 0x1000000
+#define CC_THM_STRAPS0__TMON_CONFIG_SOURCE__SHIFT 0x18
+#define CC_THM_STRAPS0__CTF_DISABLE_MASK 0x2000000
+#define CC_THM_STRAPS0__CTF_DISABLE__SHIFT 0x19
+#define CC_THM_STRAPS0__TMON0_DISABLE_MASK 0x4000000
+#define CC_THM_STRAPS0__TMON0_DISABLE__SHIFT 0x1a
+#define CC_THM_STRAPS0__TMON1_DISABLE_MASK 0x8000000
+#define CC_THM_STRAPS0__TMON1_DISABLE__SHIFT 0x1b
+#define CC_THM_STRAPS0__TMON2_DISABLE_MASK 0x10000000
+#define CC_THM_STRAPS0__TMON2_DISABLE__SHIFT 0x1c
+#define CC_THM_STRAPS0__TMON3_DISABLE_MASK 0x20000000
+#define CC_THM_STRAPS0__TMON3_DISABLE__SHIFT 0x1d
+#define CC_THM_STRAPS0__UNUSED_MASK 0x80000000
+#define CC_THM_STRAPS0__UNUSED__SHIFT 0x1f
+#define THM_TMON0_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON0_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON0_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON0_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON1_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON1_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON1_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIL15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIL15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIL15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIL15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIL15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIL15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR0_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR0_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR0_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR0_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR0_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR0_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR1_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR1_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR1_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR1_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR1_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR1_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR2_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR2_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR2_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR2_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR2_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR2_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR3_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR3_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR3_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR3_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR3_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR3_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR4_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR4_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR4_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR4_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR4_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR4_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR5_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR5_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR5_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR5_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR5_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR5_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR6_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR6_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR6_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR6_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR6_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR6_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR7_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR7_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR7_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR7_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR7_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR7_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR8_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR8_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR8_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR8_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR8_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR8_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR9_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR9_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR9_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR9_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR9_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR9_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR10_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR10_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR10_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR10_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR10_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR10_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR11_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR11_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR11_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR11_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR11_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR11_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR12_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR12_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR12_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR12_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR12_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR12_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR13_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR13_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR13_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR13_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR13_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR13_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR14_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR14_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR14_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR14_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR14_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR14_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_RDIR15_DATA__Z_MASK 0x7ff
+#define THM_TMON2_RDIR15_DATA__Z__SHIFT 0x0
+#define THM_TMON2_RDIR15_DATA__VALID_MASK 0x800
+#define THM_TMON2_RDIR15_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_RDIR15_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_RDIR15_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON0_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON0_INT_DATA__VALID_MASK 0x800
+#define THM_TMON0_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON0_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON0_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON1_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON1_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON1_INT_DATA__VALID_MASK 0x800
+#define THM_TMON1_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON1_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON1_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON2_INT_DATA__Z_MASK 0x7ff
+#define THM_TMON2_INT_DATA__Z__SHIFT 0x0
+#define THM_TMON2_INT_DATA__VALID_MASK 0x800
+#define THM_TMON2_INT_DATA__VALID__SHIFT 0xb
+#define THM_TMON2_INT_DATA__TEMP_MASK 0xfff000
+#define THM_TMON2_INT_DATA__TEMP__SHIFT 0xc
+#define THM_TMON0_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON0_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON0_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON0_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON1_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON1_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON1_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON1_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON2_DEBUG__DEBUG_RDI_MASK 0x1f
+#define THM_TMON2_DEBUG__DEBUG_RDI__SHIFT 0x0
+#define THM_TMON2_DEBUG__DEBUG_Z_MASK 0xffe0
+#define THM_TMON2_DEBUG__DEBUG_Z__SHIFT 0x5
+#define THM_TMON0_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON0_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON0_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON0_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON1_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON1_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON1_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON1_STATUS__MEAS_DONE__SHIFT 0x5
+#define THM_TMON2_STATUS__CURRENT_RDI_MASK 0x1f
+#define THM_TMON2_STATUS__CURRENT_RDI__SHIFT 0x0
+#define THM_TMON2_STATUS__MEAS_DONE_MASK 0x20
+#define THM_TMON2_STATUS__MEAS_DONE__SHIFT 0x5
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK 0x1
+#define GENERAL_PWRMGT__GLOBAL_PWRMGT_EN__SHIFT 0x0
+#define GENERAL_PWRMGT__STATIC_PM_EN_MASK 0x2
+#define GENERAL_PWRMGT__STATIC_PM_EN__SHIFT 0x1
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK 0x4
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_DIS__SHIFT 0x2
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE_MASK 0x8
+#define GENERAL_PWRMGT__THERMAL_PROTECTION_TYPE__SHIFT 0x3
+#define GENERAL_PWRMGT__SW_SMIO_INDEX_MASK 0x40
+#define GENERAL_PWRMGT__SW_SMIO_INDEX__SHIFT 0x6
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI_MASK 0x100
+#define GENERAL_PWRMGT__LOW_VOLT_D2_ACPI__SHIFT 0x8
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI_MASK 0x200
+#define GENERAL_PWRMGT__LOW_VOLT_D3_ACPI__SHIFT 0x9
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK 0x400
+#define GENERAL_PWRMGT__VOLT_PWRMGT_EN__SHIFT 0xa
+#define GENERAL_PWRMGT__SPARE11_MASK 0x800
+#define GENERAL_PWRMGT__SPARE11__SHIFT 0xb
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI_MASK 0x4000
+#define GENERAL_PWRMGT__GPU_COUNTER_ACPI__SHIFT 0xe
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK 0x8000
+#define GENERAL_PWRMGT__GPU_COUNTER_CLK__SHIFT 0xf
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF_MASK 0x10000
+#define GENERAL_PWRMGT__GPU_COUNTER_OFF__SHIFT 0x10
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF_MASK 0x20000
+#define GENERAL_PWRMGT__GPU_COUNTER_INTF_OFF__SHIFT 0x11
+#define GENERAL_PWRMGT__SPARE18_MASK 0x40000
+#define GENERAL_PWRMGT__SPARE18__SHIFT 0x12
+#define GENERAL_PWRMGT__ACPI_D3_VID_MASK 0x180000
+#define GENERAL_PWRMGT__ACPI_D3_VID__SHIFT 0x13
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK 0x800000
+#define GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN__SHIFT 0x17
+#define GENERAL_PWRMGT__SPARE27_MASK 0x8000000
+#define GENERAL_PWRMGT__SPARE27__SHIFT 0x1b
+#define GENERAL_PWRMGT__SPARE_MASK 0xf0000000
+#define GENERAL_PWRMGT__SPARE__SHIFT 0x1c
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK 0x3
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT 0x0
+#define CNB_PWRMGT_CNTL__GNB_SLOW_MASK 0x4
+#define CNB_PWRMGT_CNTL__GNB_SLOW__SHIFT 0x2
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK 0x8
+#define CNB_PWRMGT_CNTL__FORCE_NB_PS1__SHIFT 0x3
+#define CNB_PWRMGT_CNTL__DPM_ENABLED_MASK 0x10
+#define CNB_PWRMGT_CNTL__DPM_ENABLED__SHIFT 0x4
+#define CNB_PWRMGT_CNTL__SPARE_MASK 0xffffffe0
+#define CNB_PWRMGT_CNTL__SPARE__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK 0x1
+#define SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF__SHIFT 0x0
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK 0x10
+#define SCLK_PWRMGT_CNTL__RESET_BUSY_CNT__SHIFT 0x4
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK 0x20
+#define SCLK_PWRMGT_CNTL__RESET_SCLK_CNT__SHIFT 0x5
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN_MASK 0x4000
+#define SCLK_PWRMGT_CNTL__DYN_LIGHT_SLEEP_EN__SHIFT 0xe
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP_MASK 0x8000
+#define SCLK_PWRMGT_CNTL__AUTO_SCLK_PULSE_SKIP__SHIFT 0xf
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER_MASK 0x1f0000
+#define SCLK_PWRMGT_CNTL__LIGHT_SLEEP_COUNTER__SHIFT 0x10
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK 0x200000
+#define SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARGET_STATE__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_MCLK_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK 0x1f0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX_MASK 0x3e00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_SCLK_INDEX__SHIFT 0x15
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX_MASK 0x1c000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__CURR_LCLK_INDEX__SHIFT 0x1a
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX_MASK 0xe0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX__TARG_LCLK_INDEX__SHIFT 0x1d
+#define PWR_PCC_CONTROL__PCC_POLARITY_MASK 0x1
+#define PWR_PCC_CONTROL__PCC_POLARITY__SHIFT 0x0
+#define PWR_PCC_GPIO_SELECT__GPIO_MASK 0xffffffff
+#define PWR_PCC_GPIO_SELECT__GPIO__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_0__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_0__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_0__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_0__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_0__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_0__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_0__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_0__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_0__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_0__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_0__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_0__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_0__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_0__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_0__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_0__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_1__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_1__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_1__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_1__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_1__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_1__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_1__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_1__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_1__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_1__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_1__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_1__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_1__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_1__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_1__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_1__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_2__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_2__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_2__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_2__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_2__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_2__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_2__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_2__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_2__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_2__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_2__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_2__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_2__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_2__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_2__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_2__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_3__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_3__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_3__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_3__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_3__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_3__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_3__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_3__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_3__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_3__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_3__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_3__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_3__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_3__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_3__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_3__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_4__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_4__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_4__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_4__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_4__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_4__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_4__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_4__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_4__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_4__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_4__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_4__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_4__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_4__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_4__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_4__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_5__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_5__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_5__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_5__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_5__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_5__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_5__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_5__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_5__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_5__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_5__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_5__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_5__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_5__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_5__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_5__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_6__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_6__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_6__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_6__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_6__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_6__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_6__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_6__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_6__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_6__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_6__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_6__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_6__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_6__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_6__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_6__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN_MASK 0x1
+#define CG_FREQ_TRAN_VOTING_7__BIF_FREQ_THROTTLING_VOTE_EN__SHIFT 0x0
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN_MASK 0x2
+#define CG_FREQ_TRAN_VOTING_7__HDP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN_MASK 0x4
+#define CG_FREQ_TRAN_VOTING_7__ROM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x2
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN_MASK 0x8
+#define CG_FREQ_TRAN_VOTING_7__IH_SEM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x3
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x10
+#define CG_FREQ_TRAN_VOTING_7__PDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x4
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN_MASK 0x20
+#define CG_FREQ_TRAN_VOTING_7__DRM_FREQ_THROTTLING_VOTE_EN__SHIFT 0x5
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN_MASK 0x40
+#define CG_FREQ_TRAN_VOTING_7__IDCT_FREQ_THROTTLING_VOTE_EN__SHIFT 0x6
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN_MASK 0x80
+#define CG_FREQ_TRAN_VOTING_7__ACP_FREQ_THROTTLING_VOTE_EN__SHIFT 0x7
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN_MASK 0x100
+#define CG_FREQ_TRAN_VOTING_7__SDMA_FREQ_THROTTLING_VOTE_EN__SHIFT 0x8
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN_MASK 0x200
+#define CG_FREQ_TRAN_VOTING_7__UVD_FREQ_THROTTLING_VOTE_EN__SHIFT 0x9
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN_MASK 0x400
+#define CG_FREQ_TRAN_VOTING_7__VCE_FREQ_THROTTLING_VOTE_EN__SHIFT 0xa
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN_MASK 0x800
+#define CG_FREQ_TRAN_VOTING_7__DC_AZ_FREQ_THROTTLING_VOTE_EN__SHIFT 0xb
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN_MASK 0x1000
+#define CG_FREQ_TRAN_VOTING_7__SAM_FREQ_THROTTLING_VOTE_EN__SHIFT 0xc
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN_MASK 0x2000
+#define CG_FREQ_TRAN_VOTING_7__AVP_FREQ_THROTTLING_VOTE_EN__SHIFT 0xd
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN_MASK 0x4000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_0_FREQ_THROTTLING_VOTE_EN__SHIFT 0xe
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN_MASK 0x8000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_1_FREQ_THROTTLING_VOTE_EN__SHIFT 0xf
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN_MASK 0x10000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_2_FREQ_THROTTLING_VOTE_EN__SHIFT 0x10
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN_MASK 0x20000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_3_FREQ_THROTTLING_VOTE_EN__SHIFT 0x11
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN_MASK 0x40000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_4_FREQ_THROTTLING_VOTE_EN__SHIFT 0x12
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN_MASK 0x80000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_5_FREQ_THROTTLING_VOTE_EN__SHIFT 0x13
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN_MASK 0x100000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_6_FREQ_THROTTLING_VOTE_EN__SHIFT 0x14
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN_MASK 0x200000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_7_FREQ_THROTTLING_VOTE_EN__SHIFT 0x15
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN_MASK 0x400000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_8_FREQ_THROTTLING_VOTE_EN__SHIFT 0x16
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN_MASK 0x800000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_9_FREQ_THROTTLING_VOTE_EN__SHIFT 0x17
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN_MASK 0x1000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_10_FREQ_THROTTLING_VOTE_EN__SHIFT 0x18
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN_MASK 0x2000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_11_FREQ_THROTTLING_VOTE_EN__SHIFT 0x19
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN_MASK 0x4000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_12_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1a
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN_MASK 0x8000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_13_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1b
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN_MASK 0x10000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_14_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1c
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN_MASK 0x20000000
+#define CG_FREQ_TRAN_VOTING_7__GRBM_15_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1d
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN_MASK 0x40000000
+#define CG_FREQ_TRAN_VOTING_7__RLC_FREQ_THROTTLING_VOTE_EN__SHIFT 0x1e
+#define PLL_TEST_CNTL__TST_SRC_SEL_MASK 0xf
+#define PLL_TEST_CNTL__TST_SRC_SEL__SHIFT 0x0
+#define PLL_TEST_CNTL__TST_REF_SEL_MASK 0xf0
+#define PLL_TEST_CNTL__TST_REF_SEL__SHIFT 0x4
+#define PLL_TEST_CNTL__REF_TEST_COUNT_MASK 0x7f00
+#define PLL_TEST_CNTL__REF_TEST_COUNT__SHIFT 0x8
+#define PLL_TEST_CNTL__TST_RESET_MASK 0x8000
+#define PLL_TEST_CNTL__TST_RESET__SHIFT 0xf
+#define PLL_TEST_CNTL__TEST_COUNT_MASK 0xfffe0000
+#define PLL_TEST_CNTL__TEST_COUNT__SHIFT 0x11
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_MASK 0xffff
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT 0x0
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT 0x10
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK 0x3
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT 0x0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT_MASK 0x3fff0
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_COUNT__SHIFT 0x4
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT_MASK 0x700000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_UNIT__SHIFT 0x14
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK 0x3000000
+#define CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT 0x18
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE_MASK 0x10000000
+#define CG_DISPLAY_GAP_CNTL__VBI_TIMER_DISABLE__SHIFT 0x1c
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION_MASK 0xffffffff
+#define CG_DISPLAY_GAP_CNTL2__VBI_PREDICTION__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV_MASK 0x7f
+#define CG_ACPI_CNTL__SCLK_ACPI_DIV__SHIFT 0x0
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP_MASK 0x80
+#define CG_ACPI_CNTL__SCLK_CHANGE_SKIP__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define SCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK_MASK 0x10000
+#define SCLK_DEEP_SLEEP_CNTL__SCLK_RUNNING_MASK__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK_MASK 0x20000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_MASK__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK_MASK 0x40000
+#define SCLK_DEEP_SLEEP_CNTL__ALLOW_NBPSTATE_MASK__SHIFT 0x12
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK_MASK 0x80000
+#define SCLK_DEEP_SLEEP_CNTL__BIF_BUSY_MASK__SHIFT 0x13
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK_MASK 0x100000
+#define SCLK_DEEP_SLEEP_CNTL__UVD_BUSY_MASK__SHIFT 0x14
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK_MASK 0x200000
+#define SCLK_DEEP_SLEEP_CNTL__MC0SRBM_BUSY_MASK__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK_MASK 0x400000
+#define SCLK_DEEP_SLEEP_CNTL__MC1SRBM_BUSY_MASK__SHIFT 0x16
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK_MASK 0x800000
+#define SCLK_DEEP_SLEEP_CNTL__MC_ALLOW_MASK__SHIFT 0x17
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK_MASK 0x1000000
+#define SCLK_DEEP_SLEEP_CNTL__SMU_BUSY_MASK__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK_MASK 0x2000000
+#define SCLK_DEEP_SLEEP_CNTL__SELF_REFRESH_NLC_MASK__SHIFT 0x19
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE_MASK 0x4000000
+#define SCLK_DEEP_SLEEP_CNTL__FAST_EXIT_REQ_NBPSTATE__SHIFT 0x1a
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE_MASK 0x8000000
+#define SCLK_DEEP_SLEEP_CNTL__DEEP_SLEEP_ENTRY_MODE__SHIFT 0x1b
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK_MASK 0x10000000
+#define SCLK_DEEP_SLEEP_CNTL__MBUS2_ACTIVE_MASK__SHIFT 0x1c
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK_MASK 0x20000000
+#define SCLK_DEEP_SLEEP_CNTL__VCE_BUSY_MASK__SHIFT 0x1d
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK_MASK 0x40000000
+#define SCLK_DEEP_SLEEP_CNTL__AZ_BUSY_MASK__SHIFT 0x1e
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define SCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__HDP_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__ROM_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__IH_SEM_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL2__PDMA_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL2__IDCT_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL2__SDMA_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL2__DC_AZ_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL2__ACP_SMU_ALLOW_DSLEEP_STUTTER_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL2__UVD_CG_MC_STAT_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL2__VCE_CG_MC_STAT_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_MC_STAT_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL2__SAM_CG_STATUS_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID_MASK 0xe00000
+#define SCLK_DEEP_SLEEP_CNTL2__SHALLOW_DIV_ID__SHIFT 0x15
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION_MASK 0xff000000
+#define SCLK_DEEP_SLEEP_CNTL2__INOUT_CUSHION__SHIFT 0x18
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK_MASK 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_0_SMU_BUSY_MASK__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK_MASK 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_1_SMU_BUSY_MASK__SHIFT 0x1
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK_MASK 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_2_SMU_BUSY_MASK__SHIFT 0x2
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK_MASK 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_3_SMU_BUSY_MASK__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK_MASK 0x10
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_4_SMU_BUSY_MASK__SHIFT 0x4
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK_MASK 0x20
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_5_SMU_BUSY_MASK__SHIFT 0x5
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK_MASK 0x40
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_6_SMU_BUSY_MASK__SHIFT 0x6
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK_MASK 0x80
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_7_SMU_BUSY_MASK__SHIFT 0x7
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK_MASK 0x100
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_8_SMU_BUSY_MASK__SHIFT 0x8
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK_MASK 0x200
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_9_SMU_BUSY_MASK__SHIFT 0x9
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK_MASK 0x400
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_10_SMU_BUSY_MASK__SHIFT 0xa
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK_MASK 0x800
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_11_SMU_BUSY_MASK__SHIFT 0xb
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK_MASK 0x1000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_12_SMU_BUSY_MASK__SHIFT 0xc
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK_MASK 0x2000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_13_SMU_BUSY_MASK__SHIFT 0xd
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK_MASK 0x4000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_14_SMU_BUSY_MASK__SHIFT 0xe
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK_MASK 0x8000
+#define SCLK_DEEP_SLEEP_CNTL3__GRBM_15_SMU_BUSY_MASK__SHIFT 0xf
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID_MASK 0x7
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_DS_DIV_ID__SHIFT 0x0
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID_MASK 0x38
+#define SCLK_DEEP_SLEEP_MISC_CNTL__DPM_SS_DIV_ID__SHIFT 0x3
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE_MASK 0x10000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_ENABLE__SHIFT 0x10
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID_MASK 0xe0000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_DS_DIV_ID__SHIFT 0x11
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID_MASK 0x700000
+#define SCLK_DEEP_SLEEP_MISC_CNTL__OCP_SS_DIV_ID__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID_MASK 0x7
+#define LCLK_DEEP_SLEEP_CNTL__DIV_ID__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL__RAMP_DIS__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS_MASK 0xfff0
+#define LCLK_DEEP_SLEEP_CNTL__HYSTERESIS__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED_MASK 0x7fff0000
+#define LCLK_DEEP_SLEEP_CNTL__RESERVED__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS_MASK 0x80000000
+#define LCLK_DEEP_SLEEP_CNTL__ENABLE_DS__SHIFT 0x1f
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK_MASK 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__RFE_BUSY_MASK__SHIFT 0x0
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK_MASK 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__BIF_CG_LCLK_BUSY_MASK__SHIFT 0x1
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK_MASK 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMU_SMU_IDLE_MASK__SHIFT 0x2
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3_MASK 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_BIT3__SHIFT 0x3
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK_MASK 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__SCLK_RUNNING_MASK__SHIFT 0x4
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK_MASK 0x20
+#define LCLK_DEEP_SLEEP_CNTL2__SMU_BUSY_MASK__SHIFT 0x5
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK_MASK 0x40
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE1_MASK__SHIFT 0x6
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK_MASK 0x80
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE2_MASK__SHIFT 0x7
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK_MASK 0x100
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE3_MASK__SHIFT 0x8
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK_MASK 0x200
+#define LCLK_DEEP_SLEEP_CNTL2__PCIE_LCLK_IDLE4_MASK__SHIFT 0x9
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK_MASK 0x400
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPP_IDLE_MASK__SHIFT 0xa
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK_MASK 0x800
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUGPPSB_IDLE_MASK__SHIFT 0xb
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK_MASK 0x1000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUBIF_IDLE_MASK__SHIFT 0xc
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK_MASK 0x2000
+#define LCLK_DEEP_SLEEP_CNTL2__L1IMUINTGEN_IDLE_MASK__SHIFT 0xd
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK_MASK 0x4000
+#define LCLK_DEEP_SLEEP_CNTL2__L2IMU_IDLE_MASK__SHIFT 0xe
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK_MASK 0x8000
+#define LCLK_DEEP_SLEEP_CNTL2__ORB_IDLE_MASK__SHIFT 0xf
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK_MASK 0x10000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_MASK__SHIFT 0x10
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK_MASK 0x20000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_INB_WAKE_ACK_MASK__SHIFT 0x11
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK_MASK 0x40000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_MASK__SHIFT 0x12
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK_MASK 0x80000
+#define LCLK_DEEP_SLEEP_CNTL2__ON_OUTB_WAKE_ACK_MASK__SHIFT 0x13
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK_MASK 0x100000
+#define LCLK_DEEP_SLEEP_CNTL2__DMAACTIVE_MASK__SHIFT 0x14
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK_MASK 0x200000
+#define LCLK_DEEP_SLEEP_CNTL2__RLC_SMU_GFXCLK_OFF_MASK__SHIFT 0x15
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED_MASK 0xffc00000
+#define LCLK_DEEP_SLEEP_CNTL2__RESERVED__SHIFT 0x16
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX_MASK 0xf
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDCI_INDEX__SHIFT 0x0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX_MASK 0xf0
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDCI_INDEX__SHIFT 0x4
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX_MASK 0xf00
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_MVDD_INDEX__SHIFT 0x8
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX_MASK 0xf000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_MVDD_INDEX__SHIFT 0xc
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX_MASK 0xf0000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_VDDC_INDEX__SHIFT 0x10
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX_MASK 0xf00000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_VDDC_INDEX__SHIFT 0x14
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK 0xf000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT 0x18
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX_MASK 0xf0000000
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1__TARG_PCIE_INDEX__SHIFT 0x1c
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_MASK 0xffff
+#define CG_ULV_PARAMETER__ULV_THRESHOLD__SHIFT 0x0
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT_MASK 0xf0000
+#define CG_ULV_PARAMETER__ULV_THRESHOLD_UNIT__SHIFT 0x10
+#define SCLK_MIN_DIV__FRACV_MASK 0xfff
+#define SCLK_MIN_DIV__FRACV__SHIFT 0x0
+#define SCLK_MIN_DIV__INTV_MASK 0x7f000
+#define SCLK_MIN_DIV__INTV__SHIFT 0xc
+#define PWR_AVFS_SEL__AvfsSel_MASK 0xfffffff
+#define PWR_AVFS_SEL__AvfsSel__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmBusIn_MASK 0xff
+#define PWR_AVFS_CNTL__MmBusIn__SHIFT 0x0
+#define PWR_AVFS_CNTL__MmLclRdEn_MASK 0x100
+#define PWR_AVFS_CNTL__MmLclRdEn__SHIFT 0x8
+#define PWR_AVFS_CNTL__MmLclWrEn_MASK 0x200
+#define PWR_AVFS_CNTL__MmLclWrEn__SHIFT 0x9
+#define PWR_AVFS_CNTL__MmLclSz_MASK 0xc00
+#define PWR_AVFS_CNTL__MmLclSz__SHIFT 0xa
+#define PWR_AVFS_CNTL__MmState_MASK 0x3f000
+#define PWR_AVFS_CNTL__MmState__SHIFT 0xc
+#define PWR_AVFS_CNTL__PsmScanMode_MASK 0x40000
+#define PWR_AVFS_CNTL__PsmScanMode__SHIFT 0x12
+#define PWR_AVFS_CNTL__PsmGater_MASK 0x80000
+#define PWR_AVFS_CNTL__PsmGater__SHIFT 0x13
+#define PWR_AVFS_CNTL__PsmTrst_MASK 0x100000
+#define PWR_AVFS_CNTL__PsmTrst__SHIFT 0x14
+#define PWR_AVFS_CNTL__PsmEn_MASK 0x200000
+#define PWR_AVFS_CNTL__PsmEn__SHIFT 0x15
+#define PWR_AVFS_CNTL__SkipPhaseEn_MASK 0x400000
+#define PWR_AVFS_CNTL__SkipPhaseEn__SHIFT 0x16
+#define PWR_AVFS_CNTL__Isolate_MASK 0x800000
+#define PWR_AVFS_CNTL__Isolate__SHIFT 0x17
+#define PWR_AVFS_CNTL__AvfsRst_MASK 0x1000000
+#define PWR_AVFS_CNTL__AvfsRst__SHIFT 0x18
+#define PWR_AVFS_CNTL__PccIsolateEn_MASK 0x2000000
+#define PWR_AVFS_CNTL__PccIsolateEn__SHIFT 0x19
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn_MASK 0x4000000
+#define PWR_AVFS_CNTL__DeepSleepIsolateEn__SHIFT 0x1a
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS0_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS0_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS0_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS1_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS1_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS1_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS2_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS2_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS2_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS3_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS3_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS3_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS4_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS4_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS4_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS5_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS5_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS5_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS6_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS6_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS6_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS7_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS7_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS7_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS8_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS8_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS8_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS9_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS9_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS9_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS10_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS10_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS10_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS11_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS11_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS11_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS12_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS12_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS12_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS13_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS13_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS13_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS14_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS14_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS14_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS15_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS15_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS15_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS16_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS16_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS16_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS17_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS17_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS17_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS18_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS18_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS18_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS19_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS19_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS19_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS20_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS20_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS20_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS21_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS21_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS21_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS22_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS22_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS22_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS23_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS23_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS23_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS24_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS24_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS24_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS25_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS25_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS25_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS26_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS26_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS26_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut_MASK 0xff
+#define PWR_AVFS27_CNTL_STATUS__MmDatOut__SHIFT 0x0
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo_MASK 0x100
+#define PWR_AVFS27_CNTL_STATUS__PsmTdo__SHIFT 0x8
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag_MASK 0x200
+#define PWR_AVFS27_CNTL_STATUS__AlarmFlag__SHIFT 0x9
+#define PWR_CKS_ENABLE__STRETCH_ENABLE_MASK 0x1
+#define PWR_CKS_ENABLE__STRETCH_ENABLE__SHIFT 0x0
+#define PWR_CKS_ENABLE__masterReset_MASK 0x2
+#define PWR_CKS_ENABLE__masterReset__SHIFT 0x1
+#define PWR_CKS_ENABLE__staticEnable_MASK 0x4
+#define PWR_CKS_ENABLE__staticEnable__SHIFT 0x2
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT_MASK 0x8
+#define PWR_CKS_ENABLE__IGNORE_DROOP_DETECT__SHIFT 0x3
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN_MASK 0x10
+#define PWR_CKS_ENABLE__PCC_HAND_SHAKE_EN__SHIFT 0x4
+#define PWR_CKS_ENABLE__MET_CTRL_SEL_MASK 0x60
+#define PWR_CKS_ENABLE__MET_CTRL_SEL__SHIFT 0x5
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN_MASK 0x80
+#define PWR_CKS_ENABLE__DS_HAND_SHAKE_EN__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_BYPASS_MASK 0x1
+#define PWR_CKS_CNTL__CKS_BYPASS__SHIFT 0x0
+#define PWR_CKS_CNTL__CKS_PCCEnable_MASK 0x2
+#define PWR_CKS_CNTL__CKS_PCCEnable__SHIFT 0x1
+#define PWR_CKS_CNTL__CKS_TEMP_COMP_MASK 0x4
+#define PWR_CKS_CNTL__CKS_TEMP_COMP__SHIFT 0x2
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT_MASK 0x78
+#define PWR_CKS_CNTL__CKS_STRETCH_AMOUNT__SHIFT 0x3
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS_MASK 0x80
+#define PWR_CKS_CNTL__CKS_SKIP_PHASE_BYPASS__SHIFT 0x7
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE_MASK 0xf00
+#define PWR_CKS_CNTL__CKS_SAMPLE_SIZE__SHIFT 0x8
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES_MASK 0xf000
+#define PWR_CKS_CNTL__CKS_FSM_WAIT_CYCLES__SHIFT 0xc
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ_MASK 0x10000
+#define PWR_CKS_CNTL__CKS_USE_FOR_LOW_FREQ__SHIFT 0x10
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP_MASK 0x20000
+#define PWR_CKS_CNTL__CKS_NO_EXTRA_COARSE_STEP__SHIFT 0x11
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL_MASK 0x3c0000
+#define PWR_CKS_CNTL__CKS_LDO_REFSEL__SHIFT 0x12
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL_MASK 0x400000
+#define PWR_CKS_CNTL__DDT_DEBUS_SEL__SHIFT 0x16
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL_MASK 0x7f800000
+#define PWR_CKS_CNTL__CKS_LDO_READY_COUNT_VAL__SHIFT 0x17
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x1ffffff
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x2000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x4000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x8000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000
+#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x4
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xffffff80
+#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH_MASK 0x3ff
+#define PWR_DISP_TIMER_CONTROL2__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_MASK 0xffff
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD__SHIFT 0x0
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT_MASK 0xf0000
+#define VDDGFX_IDLE_PARAMETER__VDDGFX_IDLE_THRESHOLD_UNIT__SHIFT 0x10
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN_MASK 0x1
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_EN__SHIFT 0x0
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT_MASK 0x2
+#define VDDGFX_IDLE_CONTROL__VDDGFX_IDLE_DETECT__SHIFT 0x1
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT_MASK 0x4
+#define VDDGFX_IDLE_CONTROL__FORCE_VDDGFX_IDLE_EXIT__SHIFT 0x2
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE_MASK 0x8
+#define VDDGFX_IDLE_CONTROL__SMC_VDDGFX_IDLE_STATE__SHIFT 0x3
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ_MASK 0x1
+#define VDDGFX_IDLE_EXIT__BIF_EXIT_REQ__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_ENABLE_MASK 0x1
+#define LCAC_MC0_CNTL__MC0_ENABLE__SHIFT 0x0
+#define LCAC_MC0_CNTL__MC0_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT 0x1
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC0_CNTL__MC0_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC0_OVR_SEL__MC0_OVR_SEL__SHIFT 0x0
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC0_OVR_VAL__MC0_OVR_VAL__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_ENABLE_MASK 0x1
+#define LCAC_MC1_CNTL__MC1_ENABLE__SHIFT 0x0
+#define LCAC_MC1_CNTL__MC1_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC1_CNTL__MC1_THRESHOLD__SHIFT 0x1
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC1_CNTL__MC1_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC1_CNTL__MC1_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC1_OVR_SEL__MC1_OVR_SEL__SHIFT 0x0
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC1_OVR_VAL__MC1_OVR_VAL__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_ENABLE_MASK 0x1
+#define LCAC_MC2_CNTL__MC2_ENABLE__SHIFT 0x0
+#define LCAC_MC2_CNTL__MC2_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC2_CNTL__MC2_THRESHOLD__SHIFT 0x1
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC2_CNTL__MC2_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC2_CNTL__MC2_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC2_OVR_SEL__MC2_OVR_SEL__SHIFT 0x0
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC2_OVR_VAL__MC2_OVR_VAL__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_ENABLE_MASK 0x1
+#define LCAC_MC3_CNTL__MC3_ENABLE__SHIFT 0x0
+#define LCAC_MC3_CNTL__MC3_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC3_CNTL__MC3_THRESHOLD__SHIFT 0x1
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC3_CNTL__MC3_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC3_CNTL__MC3_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC3_OVR_SEL__MC3_OVR_SEL__SHIFT 0x0
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC3_OVR_VAL__MC3_OVR_VAL__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_ENABLE_MASK 0x1
+#define LCAC_MC4_CNTL__MC4_ENABLE__SHIFT 0x0
+#define LCAC_MC4_CNTL__MC4_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC4_CNTL__MC4_THRESHOLD__SHIFT 0x1
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC4_CNTL__MC4_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC4_CNTL__MC4_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC4_OVR_SEL__MC4_OVR_SEL__SHIFT 0x0
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC4_OVR_VAL__MC4_OVR_VAL__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_ENABLE_MASK 0x1
+#define LCAC_MC5_CNTL__MC5_ENABLE__SHIFT 0x0
+#define LCAC_MC5_CNTL__MC5_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC5_CNTL__MC5_THRESHOLD__SHIFT 0x1
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC5_CNTL__MC5_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC5_CNTL__MC5_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC5_OVR_SEL__MC5_OVR_SEL__SHIFT 0x0
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC5_OVR_VAL__MC5_OVR_VAL__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_ENABLE_MASK 0x1
+#define LCAC_MC6_CNTL__MC6_ENABLE__SHIFT 0x0
+#define LCAC_MC6_CNTL__MC6_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC6_CNTL__MC6_THRESHOLD__SHIFT 0x1
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC6_CNTL__MC6_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC6_CNTL__MC6_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC6_OVR_SEL__MC6_OVR_SEL__SHIFT 0x0
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC6_OVR_VAL__MC6_OVR_VAL__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_ENABLE_MASK 0x1
+#define LCAC_MC7_CNTL__MC7_ENABLE__SHIFT 0x0
+#define LCAC_MC7_CNTL__MC7_THRESHOLD_MASK 0x1fffe
+#define LCAC_MC7_CNTL__MC7_THRESHOLD__SHIFT 0x1
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID_MASK 0x3e0000
+#define LCAC_MC7_CNTL__MC7_BLOCK_ID__SHIFT 0x11
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_MC7_CNTL__MC7_SIGNAL_ID__SHIFT 0x16
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL_MASK 0xffffffff
+#define LCAC_MC7_OVR_SEL__MC7_OVR_SEL__SHIFT 0x0
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL_MASK 0xffffffff
+#define LCAC_MC7_OVR_VAL__MC7_OVR_VAL__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_ENABLE_MASK 0x1
+#define LCAC_CPL_CNTL__CPL_ENABLE__SHIFT 0x0
+#define LCAC_CPL_CNTL__CPL_THRESHOLD_MASK 0x1fffe
+#define LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT 0x1
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID_MASK 0x3e0000
+#define LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT 0x11
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID_MASK 0x3fc00000
+#define LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT 0x16
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL_MASK 0xffffffff
+#define LCAC_CPL_OVR_SEL__CPL_OVR_SEL__SHIFT 0x0
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL_MASK 0xffffffff
+#define LCAC_CPL_OVR_VAL__CPL_OVR_VAL__SHIFT 0x0
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR_MASK 0xffffffff
+#define ROM_SMC_IND_INDEX__SMC_IND_ADDR__SHIFT 0x0
+#define ROM_SMC_IND_DATA__SMC_IND_DATA_MASK 0xffffffff
+#define ROM_SMC_IND_DATA__SMC_IND_DATA__SHIFT 0x0
+#define ROM_CNTL__SCK_OVERWRITE_MASK 0x2
+#define ROM_CNTL__SCK_OVERWRITE__SHIFT 0x1
+#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x4
+#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x2
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME_MASK 0xff00
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_SETUP_TIME__SHIFT 0x8
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME_MASK 0xff0000
+#define ROM_CNTL__CSB_ACTIVE_TO_SCK_HOLD_TIME__SHIFT 0x10
+#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0xf000000
+#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x18
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK_MASK 0xf0000000
+#define ROM_CNTL__SCK_PRESCALE_CRYSTAL_CLK__SHIFT 0x1c
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0xffffff
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x1000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x18
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x2000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0xc000000
+#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a
+#define ROM_STATUS__ROM_BUSY_MASK 0x1
+#define ROM_STATUS__ROM_BUSY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0xf
+#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0xff0
+#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000
+#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define ROM_INDEX__ROM_INDEX_MASK 0xffffff
+#define ROM_INDEX__ROM_INDEX__SHIFT 0x0
+#define ROM_DATA__ROM_DATA_MASK 0xffffffff
+#define ROM_DATA__ROM_DATA__SHIFT 0x0
+#define ROM_START__ROM_START_MASK 0xffffff
+#define ROM_START__ROM_START__SHIFT 0x0
+#define ROM_SW_CNTL__DATA_SIZE_MASK 0xffff
+#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0
+#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x30000
+#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x40000
+#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x12
+#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x1
+#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0xff
+#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xffffff00
+#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8
+#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0
+#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff
+#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0xf
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0xff0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_LOWER__LKG_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32_MASK 0xffffffff
+#define GC_CAC_LKG_AGGR_UPPER__LKG_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_1__WEIGHT_CU_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_2__WEIGHT_CU_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_3__WEIGHT_CU_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_4__WEIGHT_CU_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_5__WEIGHT_CU_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG12__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_6__WEIGHT_CU_SIG13__SHIFT 0x10
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14_MASK 0xffff
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG14__SHIFT 0x0
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15_MASK 0xffff0000
+#define GC_CAC_WEIGHT_CU_7__WEIGHT_CU_SIG15__SHIFT 0x10
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0_MASK 0xffffffff
+#define GC_CAC_ACC_CU15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_SELECT_MASK 0xffff
+#define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0
+#define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000
+#define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10
+
+#endif /* SMU_7_1_3_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-bits.h b/drivers/gpu/drm/amd/include/atom-bits.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atom-bits.h
rename to drivers/gpu/drm/amd/include/atom-bits.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-names.h b/drivers/gpu/drm/amd/include/atom-names.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atom-names.h
rename to drivers/gpu/drm/amd/include/atom-names.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atom-types.h b/drivers/gpu/drm/amd/include/atom-types.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atom-types.h
rename to drivers/gpu/drm/amd/include/atom-types.h
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
similarity index 100%
rename from drivers/gpu/drm/amd/amdgpu/atombios.h
rename to drivers/gpu/drm/amd/include/atombios.h
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
new file mode 100644
index 0000000..992dcd8
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_COMMON_H
+#define _CGS_COMMON_H
+
+#include "amd_shared.h"
+
+/**
+ * enum cgs_gpu_mem_type - GPU memory types
+ */
+enum cgs_gpu_mem_type {
+	CGS_GPU_MEM_TYPE__VISIBLE_FB,
+	CGS_GPU_MEM_TYPE__INVISIBLE_FB,
+	CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+	CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB,
+	CGS_GPU_MEM_TYPE__GART_CACHEABLE,
+	CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
+};
+
+/**
+ * enum cgs_ind_reg - Indirect register spaces
+ */
+enum cgs_ind_reg {
+	CGS_IND_REG__MMIO,
+	CGS_IND_REG__PCIE,
+	CGS_IND_REG__SMC,
+	CGS_IND_REG__UVD_CTX,
+	CGS_IND_REG__DIDT,
+	CGS_IND_REG__AUDIO_ENDPT
+};
+
+/**
+ * enum cgs_clock - Clocks controlled by the SMU
+ */
+enum cgs_clock {
+	CGS_CLOCK__SCLK,
+	CGS_CLOCK__MCLK,
+	CGS_CLOCK__VCLK,
+	CGS_CLOCK__DCLK,
+	CGS_CLOCK__ECLK,
+	CGS_CLOCK__ACLK,
+	CGS_CLOCK__ICLK,
+	/* ... */
+};
+
+/**
+ * enum cgs_engine - Engines that can be statically power-gated
+ */
+enum cgs_engine {
+	CGS_ENGINE__UVD,
+	CGS_ENGINE__VCE,
+	CGS_ENGINE__VP8,
+	CGS_ENGINE__ACP_DMA,
+	CGS_ENGINE__ACP_DSP0,
+	CGS_ENGINE__ACP_DSP1,
+	CGS_ENGINE__ISP,
+	/* ... */
+};
+
+/**
+ * enum cgs_voltage_planes - Voltage planes for external camera HW
+ */
+enum cgs_voltage_planes {
+	CGS_VOLTAGE_PLANE__SENSOR0,
+	CGS_VOLTAGE_PLANE__SENSOR1,
+	/* ... */
+};
+
+/*
+ * enum cgs_ucode_id - Firmware types for different IPs
+ */
+enum cgs_ucode_id {
+	CGS_UCODE_ID_SMU = 0,
+	CGS_UCODE_ID_SDMA0,
+	CGS_UCODE_ID_SDMA1,
+	CGS_UCODE_ID_CP_CE,
+	CGS_UCODE_ID_CP_PFP,
+	CGS_UCODE_ID_CP_ME,
+	CGS_UCODE_ID_CP_MEC,
+	CGS_UCODE_ID_CP_MEC_JT1,
+	CGS_UCODE_ID_CP_MEC_JT2,
+	CGS_UCODE_ID_GMCON_RENG,
+	CGS_UCODE_ID_RLC_G,
+	CGS_UCODE_ID_MAXIMUM,
+};
+
+/**
+ * struct cgs_clock_limits - Clock limits
+ *
+ * Clocks are specified in 10KHz units.
+ */
+struct cgs_clock_limits {
+	unsigned min;		/**< Minimum supported frequency */
+	unsigned max;		/**< Maxumim supported frequency */
+	unsigned sustainable;	/**< Thermally sustainable frequency */
+};
+
+/**
+ * struct cgs_firmware_info - Firmware information
+ */
+struct cgs_firmware_info {
+	uint16_t		version;
+	uint16_t		feature_version;
+	uint32_t		image_size;
+	uint64_t		mc_addr;
+	void			*kptr;
+};
+
+typedef unsigned long cgs_handle_t;
+
+/**
+ * cgs_gpu_mem_info() - Return information about memory heaps
+ * @cgs_device: opaque device handle
+ * @type:	memory type
+ * @mc_start:	Start MC address of the heap (output)
+ * @mc_size:	MC address space size (output)
+ * @mem_size:	maximum amount of memory available for allocation (output)
+ *
+ * This function returns information about memory heaps. The type
+ * parameter is used to select the memory heap. The mc_start and
+ * mc_size for GART heaps may be bigger than the memory available for
+ * allocation.
+ *
+ * mc_start and mc_size are undefined for non-contiguous FB memory
+ * types, since buffers allocated with these types may or may not be
+ * GART mapped.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+				  uint64_t *mc_start, uint64_t *mc_size,
+				  uint64_t *mem_size);
+
+/**
+ * cgs_gmap_kmem() - map kernel memory to GART aperture
+ * @cgs_device:	opaque device handle
+ * @kmem:	pointer to kernel memory
+ * @size:	size to map
+ * @min_offset: minimum offset from start of GART aperture
+ * @max_offset: maximum offset from start of GART aperture
+ * @kmem_handle: kernel memory handle (output)
+ * @mcaddr:	MC address (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
+			       uint64_t min_offset, uint64_t max_offset,
+			       cgs_handle_t *kmem_handle, uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_kmem() - unmap kernel memory
+ * @cgs_device:	opaque device handle
+ * @kmem_handle: kernel memory handle returned by gmap_kmem
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
+
+/**
+ * cgs_alloc_gpu_mem() - Allocate GPU memory
+ * @cgs_device:	opaque device handle
+ * @type:	memory type
+ * @size:	size in bytes
+ * @align:	alignment in bytes
+ * @min_offset: minimum offset from start of heap
+ * @max_offset: maximum offset from start of heap
+ * @handle:	memory handle (output)
+ *
+ * The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
+ * memory allocation. This guarantees that the MC address returned by
+ * cgs_gmap_gpu_mem is not mapped through the GART. The non-contiguous
+ * FB memory types may be GART mapped depending on memory
+ * fragmentation and memory allocator policies.
+ *
+ * If min/max_offset are non-0, the allocation will be forced to
+ * reside between these offsets in its respective memory heap. The
+ * base address that the offset relates to, depends on the memory
+ * type.
+ *
+ * - CGS_GPU_MEM_TYPE__*_CONTIG_FB: FB MC base address
+ * - CGS_GPU_MEM_TYPE__GART_*:	    GART aperture base address
+ * - others:			    undefined, don't use with max_offset
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+				   uint64_t size, uint64_t align,
+				   uint64_t min_offset, uint64_t max_offset,
+				   cgs_handle_t *handle);
+
+/**
+ * cgs_free_gpu_mem() - Free GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_gmap_gpu_mem() - GPU-map GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ * @mcaddr:	MC address (output)
+ *
+ * Ensures that a buffer is GPU accessible and returns its MC address.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+				  uint64_t *mcaddr);
+
+/**
+ * cgs_gunmap_gpu_mem() - GPU-unmap GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Allows the buffer to be migrated while it's not used by the GPU.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_kmap_gpu_mem() - Kernel-map GPU memory
+ *
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ * @map:	Kernel virtual address the memory was mapped to (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+				  void **map);
+
+/**
+ * cgs_kunmap_gpu_mem() - Kernel-unmap GPU memory
+ * @cgs_device:	opaque device handle
+ * @handle:	memory handle returned by alloc or import
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+
+/**
+ * cgs_read_register() - Read an MMIO register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ *
+ * Return:  register value
+ */
+typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
+
+/**
+ * cgs_write_register() - Write an MMIO register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ * @value:	register value
+ */
+typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
+				     uint32_t value);
+
+/**
+ * cgs_read_ind_register() - Read an indirect register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ *
+ * Return:  register value
+ */
+typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+					    unsigned index);
+
+/**
+ * cgs_write_ind_register() - Write an indirect register
+ * @cgs_device:	opaque device handle
+ * @offset:	register offset
+ * @value:	register value
+ */
+typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+					 unsigned index, uint32_t value);
+
+/**
+ * cgs_read_pci_config_byte() - Read byte from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address
+ *
+ * Return:  Value read
+ */
+typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_word() - Read word from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be word-aligned
+ *
+ * Return:  Value read
+ */
+typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
+
+/**
+ * cgs_read_pci_config_dword() - Read dword from PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be dword-aligned
+ *
+ * Return:  Value read
+ */
+typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
+						unsigned addr);
+
+/**
+ * cgs_write_pci_config_byte() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
+					    uint8_t value);
+
+/**
+ * cgs_write_pci_config_word() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be word-aligned
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
+					    uint16_t value);
+
+/**
+ * cgs_write_pci_config_dword() - Write byte to PCI configuration space
+ * @cgs_device:	opaque device handle
+ * @addr:	address, must be dword-aligned
+ * @value:	value to write
+ */
+typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
+					     uint32_t value);
+
+/**
+ * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table
+ * @cgs_device:	opaque device handle
+ * @table:	data table index
+ * @size:	size of the table (output, may be NULL)
+ * @frev:	table format revision (output, may be NULL)
+ * @crev:	table content revision (output, may be NULL)
+ *
+ * Return: Pointer to start of the table, or NULL on failure
+ */
+typedef const void *(*cgs_atom_get_data_table_t)(
+	void *cgs_device, unsigned table,
+	uint16_t *size, uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions
+ * @cgs_device:	opaque device handle
+ * @table:	data table index
+ * @frev:	table format revision (output, may be NULL)
+ * @crev:	table content revision (output, may be NULL)
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
+					     uint8_t *frev, uint8_t *crev);
+
+/**
+ * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table
+ * @cgs_device: opaque device handle
+ * @table:	command table index
+ * @args:	arguments
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
+					 unsigned table, void *args);
+
+/**
+ * cgs_create_pm_request() - Create a power management request
+ * @cgs_device:	opaque device handle
+ * @request:	handle of created PM request (output)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
+
+/**
+ * cgs_destroy_pm_request() - Destroy a power management request
+ * @cgs_device:	opaque device handle
+ * @request:	handle of created PM request
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
+
+/**
+ * cgs_set_pm_request() - Activate or deactiveate a PM request
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @active:	0 = deactivate, non-0 = activate
+ *
+ * While a PM request is active, its minimum clock requests are taken
+ * into account as the requested engines are powered up. When the
+ * request is inactive, the engines may be powered down and clocks may
+ * be lower, depending on other PM requests by other driver
+ * components.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
+				    int active);
+
+/**
+ * cgs_pm_request_clock() - Request a minimum frequency for a specific clock
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @clock:	which clock?
+ * @freq:	requested min. frequency in 10KHz units (0 to clear request)
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
+				      enum cgs_clock clock, unsigned freq);
+
+/**
+ * cgs_pm_request_engine() - Request an engine to be powered up
+ * @cgs_device:	opaque device handle
+ * @request:	PM request handle
+ * @engine:	which engine?
+ * @powered:	0 = powered down, non-0 = powered up
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
+				       enum cgs_engine engine, int powered);
+
+/**
+ * cgs_pm_query_clock_limits() - Query clock frequency limits
+ * @cgs_device:	opaque device handle
+ * @clock:	which clock?
+ * @limits:	clock limits
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
+					   enum cgs_clock clock,
+					   struct cgs_clock_limits *limits);
+
+/**
+ * cgs_set_camera_voltages() - Apply specific voltages to PMIC voltage planes
+ * @cgs_device:	opaque device handle
+ * @mask:	bitmask of voltages to change (1<<CGS_VOLTAGE_PLANE__xyz|...)
+ * @voltages:	pointer to array of voltage values in 1mV units
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
+					 const uint32_t *voltages);
+/**
+ * cgs_get_firmware_info - Get the firmware information from core driver
+ * @cgs_device: opaque device handle
+ * @type: the firmware type
+ * @info: returend firmware information
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+typedef int (*cgs_get_firmware_info)(void *cgs_device,
+				     enum cgs_ucode_id type,
+				     struct cgs_firmware_info *info);
+
+typedef int(*cgs_set_powergating_state)(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_powergating_state state);
+
+typedef int(*cgs_set_clockgating_state)(void *cgs_device,
+				  enum amd_ip_block_type block_type,
+				  enum amd_clockgating_state state);
+
+struct cgs_ops {
+	/* memory management calls (similar to KFD interface) */
+	cgs_gpu_mem_info_t gpu_mem_info;
+	cgs_gmap_kmem_t gmap_kmem;
+	cgs_gunmap_kmem_t gunmap_kmem;
+	cgs_alloc_gpu_mem_t alloc_gpu_mem;
+	cgs_free_gpu_mem_t free_gpu_mem;
+	cgs_gmap_gpu_mem_t gmap_gpu_mem;
+	cgs_gunmap_gpu_mem_t gunmap_gpu_mem;
+	cgs_kmap_gpu_mem_t kmap_gpu_mem;
+	cgs_kunmap_gpu_mem_t kunmap_gpu_mem;
+	/* MMIO access */
+	cgs_read_register_t read_register;
+	cgs_write_register_t write_register;
+	cgs_read_ind_register_t read_ind_register;
+	cgs_write_ind_register_t write_ind_register;
+	/* PCI configuration space access */
+	cgs_read_pci_config_byte_t read_pci_config_byte;
+	cgs_read_pci_config_word_t read_pci_config_word;
+	cgs_read_pci_config_dword_t read_pci_config_dword;
+	cgs_write_pci_config_byte_t write_pci_config_byte;
+	cgs_write_pci_config_word_t write_pci_config_word;
+	cgs_write_pci_config_dword_t write_pci_config_dword;
+	/* ATOM BIOS */
+	cgs_atom_get_data_table_t atom_get_data_table;
+	cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs;
+	cgs_atom_exec_cmd_table_t atom_exec_cmd_table;
+	/* Power management */
+	cgs_create_pm_request_t create_pm_request;
+	cgs_destroy_pm_request_t destroy_pm_request;
+	cgs_set_pm_request_t set_pm_request;
+	cgs_pm_request_clock_t pm_request_clock;
+	cgs_pm_request_engine_t pm_request_engine;
+	cgs_pm_query_clock_limits_t pm_query_clock_limits;
+	cgs_set_camera_voltages_t set_camera_voltages;
+	/* Firmware Info */
+	cgs_get_firmware_info get_firmware_info;
+	/* cg pg interface*/
+	cgs_set_powergating_state set_powergating_state;
+	cgs_set_clockgating_state set_clockgating_state;
+	/* ACPI (TODO) */
+};
+
+struct cgs_os_ops; /* To be define in OS-specific CGS header */
+
+struct cgs_device
+{
+	const struct cgs_ops *ops;
+	const struct cgs_os_ops *os_ops;
+	/* to be embedded at the start of driver private structure */
+};
+
+/* Convenience macros that make CGS indirect function calls look like
+ * normal function calls */
+#define CGS_CALL(func,dev,...) \
+	(((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
+#define CGS_OS_CALL(func,dev,...) \
+	(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
+
+#define cgs_gpu_mem_info(dev,type,mc_start,mc_size,mem_size)		\
+	CGS_CALL(gpu_mem_info,dev,type,mc_start,mc_size,mem_size)
+#define cgs_gmap_kmem(dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)	\
+	CGS_CALL(gmap_kmem,dev,kmem,size,min_off,max_off,kmem_handle,mcaddr)
+#define cgs_gunmap_kmem(dev,kmem_handle)	\
+	CGS_CALL(gunmap_kmem,dev,keme_handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle)	\
+	CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_free_gpu_mem(dev,handle)		\
+	CGS_CALL(free_gpu_mem,dev,handle)
+#define cgs_gmap_gpu_mem(dev,handle,mcaddr)	\
+	CGS_CALL(gmap_gpu_mem,dev,handle,mcaddr)
+#define cgs_gunmap_gpu_mem(dev,handle)		\
+	CGS_CALL(gunmap_gpu_mem,dev,handle)
+#define cgs_kmap_gpu_mem(dev,handle,map)	\
+	CGS_CALL(kmap_gpu_mem,dev,handle,map)
+#define cgs_kunmap_gpu_mem(dev,handle)		\
+	CGS_CALL(kunmap_gpu_mem,dev,handle)
+
+#define cgs_read_register(dev,offset)		\
+	CGS_CALL(read_register,dev,offset)
+#define cgs_write_register(dev,offset,value)		\
+	CGS_CALL(write_register,dev,offset,value)
+#define cgs_read_ind_register(dev,space,index)		\
+	CGS_CALL(read_ind_register,dev,space,index)
+#define cgs_write_ind_register(dev,space,index,value)		\
+	CGS_CALL(write_ind_register,dev,space,index,value)
+
+#define cgs_read_pci_config_byte(dev,addr)	\
+	CGS_CALL(read_pci_config_byte,dev,addr)
+#define cgs_read_pci_config_word(dev,addr)	\
+	CGS_CALL(read_pci_config_word,dev,addr)
+#define cgs_read_pci_config_dword(dev,addr)		\
+	CGS_CALL(read_pci_config_dword,dev,addr)
+#define cgs_write_pci_config_byte(dev,addr,value)	\
+	CGS_CALL(write_pci_config_byte,dev,addr,value)
+#define cgs_write_pci_config_word(dev,addr,value)	\
+	CGS_CALL(write_pci_config_word,dev,addr,value)
+#define cgs_write_pci_config_dword(dev,addr,value)	\
+	CGS_CALL(write_pci_config_dword,dev,addr,value)
+
+#define cgs_atom_get_data_table(dev,table,size,frev,crev)	\
+	CGS_CALL(atom_get_data_table,dev,table,size,frev,crev)
+#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev)	\
+	CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev)
+#define cgs_atom_exec_cmd_table(dev,table,args)		\
+	CGS_CALL(atom_exec_cmd_table,dev,table,args)
+
+#define cgs_create_pm_request(dev,request)	\
+	CGS_CALL(create_pm_request,dev,request)
+#define cgs_destroy_pm_request(dev,request)		\
+	CGS_CALL(destroy_pm_request,dev,request)
+#define cgs_set_pm_request(dev,request,active)		\
+	CGS_CALL(set_pm_request,dev,request,active)
+#define cgs_pm_request_clock(dev,request,clock,freq)		\
+	CGS_CALL(pm_request_clock,dev,request,clock,freq)
+#define cgs_pm_request_engine(dev,request,engine,powered)	\
+	CGS_CALL(pm_request_engine,dev,request,engine,powered)
+#define cgs_pm_query_clock_limits(dev,clock,limits)		\
+	CGS_CALL(pm_query_clock_limits,dev,clock,limits)
+#define cgs_set_camera_voltages(dev,mask,voltages)	\
+	CGS_CALL(set_camera_voltages,dev,mask,voltages)
+#define cgs_get_firmware_info(dev, type, info)	\
+	CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_set_powergating_state(dev, block_type, state)	\
+	CGS_CALL(set_powergating_state, dev, block_type, state)
+#define cgs_set_clockgating_state(dev, block_type, state)	\
+	CGS_CALL(set_clockgating_state, dev, block_type, state)
+
+#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
new file mode 100644
index 0000000..488642f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _CGS_LINUX_H
+#define _CGS_LINUX_H
+
+#include "cgs_common.h"
+
+/**
+ * cgs_import_gpu_mem() - Import dmabuf handle
+ * @cgs_device:  opaque device handle
+ * @dmabuf_fd:   DMABuf file descriptor
+ * @handle:      memory handle (output)
+ *
+ * Must be called in the process context that dmabuf_fd belongs to.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
+				    cgs_handle_t *handle);
+
+/**
+ * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
+ * @private_data:  private data provided to cgs_add_irq_source
+ * @src_id:        interrupt source ID
+ * @type:          interrupt type
+ * @enabled:       0 = disable source, non-0 = enable source
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_source_set_func_t)(void *private_data,
+					 unsigned src_id, unsigned type,
+					 int enabled);
+
+/**
+ * cgs_irq_handler_func() - Interrupt handler callback
+ * @private_data:  private data provided to cgs_add_irq_source
+ * @src_id:        interrupt source ID
+ * @iv_entry:      pointer to raw ih ring entry
+ *
+ * This callback runs in interrupt context.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_handler_func_t)(void *private_data,
+				      unsigned src_id, const uint32_t *iv_entry);
+
+/**
+ * cgs_add_irq_source() - Add an IRQ source
+ * @cgs_device:    opaque device handle
+ * @src_id:        interrupt source ID
+ * @num_types:     number of interrupt types that can be independently enabled
+ * @set:           callback function to enable/disable an interrupt type
+ * @handler:       interrupt handler callback
+ * @private_data:  private data to pass to callback functions
+ *
+ * The same IRQ source can be added only once. Adding an IRQ source
+ * indicates ownership of that IRQ source and all its IRQ types.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
+				    unsigned num_types,
+				    cgs_irq_source_set_func_t set,
+				    cgs_irq_handler_func_t handler,
+				    void *private_data);
+
+/**
+ * cgs_irq_get() - Request enabling an IRQ source and type
+ * @cgs_device:  opaque device handle
+ * @src_id:      interrupt source ID
+ * @type:        interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+/**
+ * cgs_irq_put() - Indicate IRQ source is no longer needed
+ * @cgs_device:  opaque device handle
+ * @src_id:      interrupt source ID
+ * @type:        interrupt type
+ *
+ * cgs_irq_get and cgs_irq_put calls must be balanced. They count
+ * "references" to IRQ sources. Even after cgs_irq_put is called, the
+ * IRQ handler may still be called if there are more refecences to
+ * the IRQ source.
+ *
+ * Return:  0 on success, -errno otherwise
+ */
+typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
+
+struct cgs_os_ops {
+	cgs_import_gpu_mem_t import_gpu_mem;
+
+	/* IRQ handling */
+	cgs_add_irq_source_t add_irq_source;
+	cgs_irq_get_t irq_get;
+	cgs_irq_put_t irq_put;
+};
+
+#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)		\
+	CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
+#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
+	CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,	\
+		    private_data)
+#define cgs_irq_get(dev,src_id,type)		\
+	CGS_OS_CALL(irq_get,dev,src_id,type)
+#define cgs_irq_put(dev,src_id,type)		\
+	CGS_OS_CALL(irq_put,dev,src_id,type)
+
+#endif /* _CGS_LINUX_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9080daa..888250b 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -52,7 +52,8 @@
 	KGD_ENGINE_MEC1,
 	KGD_ENGINE_MEC2,
 	KGD_ENGINE_RLC,
-	KGD_ENGINE_SDMA,
+	KGD_ENGINE_SDMA1,
+	KGD_ENGINE_SDMA2,
 	KGD_ENGINE_MAX
 };
 
diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h
new file mode 100644
index 0000000..ee6978b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/pptable.h
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PPTABLE_H
+#define _PPTABLE_H
+
+#pragma pack(1)
+
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
+    UCHAR ucI2cLine;        // as interpreted by DAL I2C
+    UCHAR ucI2cAddress;
+    UCHAR ucFanParameters;  // Fan Control Parameters.
+    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
+    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
+    UCHAR ucReserved;       // ----
+    UCHAR ucFlags;          // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64      5
+#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
+#define ATOM_PP_THERMALCONTROLLER_RV770     8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_KONG      10
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
+#define ATOM_PP_THERMALCONTROLLER_LM96163   17
+#define ATOM_PP_THERMALCONTROLLER_CISLANDS  18
+#define ATOM_PP_THERMALCONTROLLER_KAVERI    19
+
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+    UCHAR ucNonClockStateIndex;
+    UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+    ATOM_PPLIB_FANTABLE basicTable;
+    USHORT  usTMax;                          // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_FANTABLE3
+{
+	ATOM_PPLIB_FANTABLE2 basicTable2;
+	UCHAR ucFanControlMode;
+	USHORT usFanPWMMax;
+	USHORT usFanOutputSensitivity;
+} ATOM_PPLIB_FANTABLE3;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
+    USHORT  usSAMUTableOffset;  //points to ATOM_PPLIB_SAMU_Table
+    USHORT  usPPMTableOffset;   //points to ATOM_PPLIB_PPM_Table
+    USHORT  usACPTableOffset;  //points to ATOM_PPLIB_ACP_Table   
+    /* points to ATOM_PPLIB_POWERTUNE_Table */
+    USHORT  usPowerTuneTableOffset;
+    /* points to ATOM_PPLIB_CLOCK_Voltage_Dependency_Table for sclkVddgfxTable */
+    USHORT  usSclkVddgfxTableOffset;
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
+#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE   0x00040000           // Does the driver supports new CAC voltage table.
+#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY   0x00080000     // Does the driver supports revert GPIO5 polarity.
+#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17   0x00100000     // Does the driver supports thermal2GPIO17.
+#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE   0x00200000   // Does the driver supports VR HOT GPIO Configurable.
+#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION   0x00400000            // Does the driver supports Temp Inversion feature.
+#define ATOM_PP_PLATFORM_CAP_EVV    0x00800000
+#define ATOM_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL    0x01000000
+#define ATOM_PP_PLATFORM_LOAD_POST_PRODUCTION_FIRMWARE      0x02000000
+#define ATOM_PP_PLATFORM_CAP_DISABLE_USING_ACTUAL_TEMPERATURE_FOR_POWER_CALC 0x04000000
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+      ATOM_COMMON_TABLE_HEADER sHeader;
+
+      UCHAR ucDataRevision;
+
+      UCHAR ucNumStates;
+      UCHAR ucStateEntrySize;
+      UCHAR ucClockInfoSize;
+      UCHAR ucNonClockSize;
+
+      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+      USHORT usStateArrayOffset;
+
+      // offset from start of this table to array of ASIC-specific structures,
+      // currently ATOM_PPLIB_CLOCK_INFO.
+      USHORT usClockInfoArrayOffset;
+
+      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+      USHORT usNonClockInfoArrayOffset;
+
+      USHORT usBackbiasTime;    // in microseconds
+      USHORT usVoltageTime;     // in microseconds
+      USHORT usTableSize;       //the size of this structure, or the extended structure
+
+      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
+
+      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
+
+      USHORT usBootClockInfoOffset;
+      USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+    USHORT                     usMvddDependencyOnMCLKOffset;  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
+    USHORT                     usTDPODLimit;
+    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
+
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+      USHORT usClassification;
+      UCHAR  ucMinTemperature;
+      UCHAR  ucMaxTemperature;
+      ULONG  ulCapsAndSettings;
+      UCHAR  ucRequiredPower;
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usUnused1;
+      USHORT usUnused2;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
+      UCHAR  ucLowEngineClockHigh;
+      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
+      UCHAR  ucHighEngineClockHigh;
+      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
+      UCHAR  ucPadding;                   // For proper alignment and size.
+      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
+      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
+      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could 
+      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+      ULONG  ulFlags; 
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      UCHAR  ucPCIEGen;
+      UCHAR  ucUnused1;
+
+      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+      
+      UCHAR  ucPCIEGen;
+      USHORT usPCIELane;
+} ATOM_PPLIB_CI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      USHORT tdpLimit;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CZ_CLOCK_INFO {
+      UCHAR index;
+      UCHAR rsv[3];
+} ATOM_PPLIB_CZ_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+union _ATOM_PPLIB_CAC_Leakage_Record
+{
+    struct
+    {
+        USHORT usVddc;          // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
+        ULONG  ulLeakageValue;  // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
+
+    };
+    struct
+     {
+        USHORT usVddc1;
+        USHORT usVddc2;
+        USHORT usVddc3;
+     };
+};
+
+typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+    USHORT usVoltage;
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+    USHORT usEVClkLow;
+    UCHAR  ucEVClkHigh;
+    USHORT usECClkLow;
+    UCHAR  ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+    UCHAR ucNumEntries;
+    VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+    UCHAR  ucVCEClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+      UCHAR revid;
+//    VCEClockInfoArray array;
+//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+    USHORT usVClkLow;
+    UCHAR  ucVClkHigh;
+    USHORT usDClkLow;
+    UCHAR  ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+    UCHAR ucNumEntries;
+    UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+      UCHAR revid;
+//    UVDClockInfoArray array;
+//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+}ATOM_PPLIB_UVD_Table;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+{
+      USHORT usVoltage;
+      USHORT usSAMClockLow;
+      UCHAR  ucSAMClockHigh;
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+    UCHAR numEntries;
+    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_SAMU_Table
+{
+      UCHAR revid;
+      ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_SAMU_Table;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+{
+      USHORT usVoltage;
+      USHORT usACPClockLow;
+      UCHAR  ucACPClockHigh;
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+    UCHAR numEntries;
+    ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_ACP_Table
+{
+      UCHAR revid;
+      ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_ACP_Table;
+
+typedef struct _ATOM_PowerTune_Table{
+    USHORT usTDP;
+    USHORT usConfigurableTDP;
+    USHORT usTDC;
+    USHORT usBatteryPowerLimit;
+    USHORT usSmallPowerLimit;
+    USHORT usLowCACLeakage;
+    USHORT usHighCACLeakage;
+}ATOM_PowerTune_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table
+{
+      UCHAR revid;
+      ATOM_PowerTune_Table power_tune_table;
+}ATOM_PPLIB_POWERTUNE_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
+{
+      UCHAR revid;
+      ATOM_PowerTune_Table power_tune_table;
+      USHORT usMaximumPowerDeliveryLimit;
+      USHORT usTjMax;
+      USHORT usReserve[6];
+} ATOM_PPLIB_POWERTUNE_Table_V1;
+
+#define ATOM_PPM_A_A    1
+#define ATOM_PPM_A_I    2
+typedef struct _ATOM_PPLIB_PPM_Table
+{
+      UCHAR  ucRevId;
+      UCHAR  ucPpmDesign;          //A+I or A+A
+      USHORT usCpuCoreNumber;
+      ULONG  ulPlatformTDP;
+      ULONG  ulSmallACPlatformTDP;
+      ULONG  ulPlatformTDC;
+      ULONG  ulSmallACPlatformTDC;
+      ULONG  ulApuTDP;
+      ULONG  ulDGpuTDP;  
+      ULONG  ulDGpuUlvPower;
+      ULONG  ulTjmax;
+} ATOM_PPLIB_PPM_Table;
+
+#pragma pack()
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
new file mode 100644
index 0000000..65cfacd
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef VI_STRUCTS_H_
+#define VI_STRUCTS_H_
+
+struct vi_sdma_mqd {
+	uint32_t sdmax_rlcx_rb_cntl;
+	uint32_t sdmax_rlcx_rb_base;
+	uint32_t sdmax_rlcx_rb_base_hi;
+	uint32_t sdmax_rlcx_rb_rptr;
+	uint32_t sdmax_rlcx_rb_wptr;
+	uint32_t sdmax_rlcx_rb_wptr_poll_cntl;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi;
+	uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo;
+	uint32_t sdmax_rlcx_rb_rptr_addr_hi;
+	uint32_t sdmax_rlcx_rb_rptr_addr_lo;
+	uint32_t sdmax_rlcx_ib_cntl;
+	uint32_t sdmax_rlcx_ib_rptr;
+	uint32_t sdmax_rlcx_ib_offset;
+	uint32_t sdmax_rlcx_ib_base_lo;
+	uint32_t sdmax_rlcx_ib_base_hi;
+	uint32_t sdmax_rlcx_ib_size;
+	uint32_t sdmax_rlcx_skip_cntl;
+	uint32_t sdmax_rlcx_context_status;
+	uint32_t sdmax_rlcx_doorbell;
+	uint32_t sdmax_rlcx_virtual_addr;
+	uint32_t sdmax_rlcx_ape1_cntl;
+	uint32_t sdmax_rlcx_doorbell_log;
+	uint32_t reserved_22;
+	uint32_t reserved_23;
+	uint32_t reserved_24;
+	uint32_t reserved_25;
+	uint32_t reserved_26;
+	uint32_t reserved_27;
+	uint32_t reserved_28;
+	uint32_t reserved_29;
+	uint32_t reserved_30;
+	uint32_t reserved_31;
+	uint32_t reserved_32;
+	uint32_t reserved_33;
+	uint32_t reserved_34;
+	uint32_t reserved_35;
+	uint32_t reserved_36;
+	uint32_t reserved_37;
+	uint32_t reserved_38;
+	uint32_t reserved_39;
+	uint32_t reserved_40;
+	uint32_t reserved_41;
+	uint32_t reserved_42;
+	uint32_t reserved_43;
+	uint32_t reserved_44;
+	uint32_t reserved_45;
+	uint32_t reserved_46;
+	uint32_t reserved_47;
+	uint32_t reserved_48;
+	uint32_t reserved_49;
+	uint32_t reserved_50;
+	uint32_t reserved_51;
+	uint32_t reserved_52;
+	uint32_t reserved_53;
+	uint32_t reserved_54;
+	uint32_t reserved_55;
+	uint32_t reserved_56;
+	uint32_t reserved_57;
+	uint32_t reserved_58;
+	uint32_t reserved_59;
+	uint32_t reserved_60;
+	uint32_t reserved_61;
+	uint32_t reserved_62;
+	uint32_t reserved_63;
+	uint32_t reserved_64;
+	uint32_t reserved_65;
+	uint32_t reserved_66;
+	uint32_t reserved_67;
+	uint32_t reserved_68;
+	uint32_t reserved_69;
+	uint32_t reserved_70;
+	uint32_t reserved_71;
+	uint32_t reserved_72;
+	uint32_t reserved_73;
+	uint32_t reserved_74;
+	uint32_t reserved_75;
+	uint32_t reserved_76;
+	uint32_t reserved_77;
+	uint32_t reserved_78;
+	uint32_t reserved_79;
+	uint32_t reserved_80;
+	uint32_t reserved_81;
+	uint32_t reserved_82;
+	uint32_t reserved_83;
+	uint32_t reserved_84;
+	uint32_t reserved_85;
+	uint32_t reserved_86;
+	uint32_t reserved_87;
+	uint32_t reserved_88;
+	uint32_t reserved_89;
+	uint32_t reserved_90;
+	uint32_t reserved_91;
+	uint32_t reserved_92;
+	uint32_t reserved_93;
+	uint32_t reserved_94;
+	uint32_t reserved_95;
+	uint32_t reserved_96;
+	uint32_t reserved_97;
+	uint32_t reserved_98;
+	uint32_t reserved_99;
+	uint32_t reserved_100;
+	uint32_t reserved_101;
+	uint32_t reserved_102;
+	uint32_t reserved_103;
+	uint32_t reserved_104;
+	uint32_t reserved_105;
+	uint32_t reserved_106;
+	uint32_t reserved_107;
+	uint32_t reserved_108;
+	uint32_t reserved_109;
+	uint32_t reserved_110;
+	uint32_t reserved_111;
+	uint32_t reserved_112;
+	uint32_t reserved_113;
+	uint32_t reserved_114;
+	uint32_t reserved_115;
+	uint32_t reserved_116;
+	uint32_t reserved_117;
+	uint32_t reserved_118;
+	uint32_t reserved_119;
+	uint32_t reserved_120;
+	uint32_t reserved_121;
+	uint32_t reserved_122;
+	uint32_t reserved_123;
+	uint32_t reserved_124;
+	uint32_t reserved_125;
+	uint32_t reserved_126;
+	uint32_t reserved_127;
+};
+
+struct vi_mqd {
+	uint32_t header;
+	uint32_t compute_dispatch_initiator;
+	uint32_t compute_dim_x;
+	uint32_t compute_dim_y;
+	uint32_t compute_dim_z;
+	uint32_t compute_start_x;
+	uint32_t compute_start_y;
+	uint32_t compute_start_z;
+	uint32_t compute_num_thread_x;
+	uint32_t compute_num_thread_y;
+	uint32_t compute_num_thread_z;
+	uint32_t compute_pipelinestat_enable;
+	uint32_t compute_perfcount_enable;
+	uint32_t compute_pgm_lo;
+	uint32_t compute_pgm_hi;
+	uint32_t compute_tba_lo;
+	uint32_t compute_tba_hi;
+	uint32_t compute_tma_lo;
+	uint32_t compute_tma_hi;
+	uint32_t compute_pgm_rsrc1;
+	uint32_t compute_pgm_rsrc2;
+	uint32_t compute_vmid;
+	uint32_t compute_resource_limits;
+	uint32_t compute_static_thread_mgmt_se0;
+	uint32_t compute_static_thread_mgmt_se1;
+	uint32_t compute_tmpring_size;
+	uint32_t compute_static_thread_mgmt_se2;
+	uint32_t compute_static_thread_mgmt_se3;
+	uint32_t compute_restart_x;
+	uint32_t compute_restart_y;
+	uint32_t compute_restart_z;
+	uint32_t compute_thread_trace_enable;
+	uint32_t compute_misc_reserved;
+	uint32_t compute_dispatch_id;
+	uint32_t compute_threadgroup_id;
+	uint32_t compute_relaunch;
+	uint32_t compute_wave_restore_addr_lo;
+	uint32_t compute_wave_restore_addr_hi;
+	uint32_t compute_wave_restore_control;
+	uint32_t reserved_39;
+	uint32_t reserved_40;
+	uint32_t reserved_41;
+	uint32_t reserved_42;
+	uint32_t reserved_43;
+	uint32_t reserved_44;
+	uint32_t reserved_45;
+	uint32_t reserved_46;
+	uint32_t reserved_47;
+	uint32_t reserved_48;
+	uint32_t reserved_49;
+	uint32_t reserved_50;
+	uint32_t reserved_51;
+	uint32_t reserved_52;
+	uint32_t reserved_53;
+	uint32_t reserved_54;
+	uint32_t reserved_55;
+	uint32_t reserved_56;
+	uint32_t reserved_57;
+	uint32_t reserved_58;
+	uint32_t reserved_59;
+	uint32_t reserved_60;
+	uint32_t reserved_61;
+	uint32_t reserved_62;
+	uint32_t reserved_63;
+	uint32_t reserved_64;
+	uint32_t compute_user_data_0;
+	uint32_t compute_user_data_1;
+	uint32_t compute_user_data_2;
+	uint32_t compute_user_data_3;
+	uint32_t compute_user_data_4;
+	uint32_t compute_user_data_5;
+	uint32_t compute_user_data_6;
+	uint32_t compute_user_data_7;
+	uint32_t compute_user_data_8;
+	uint32_t compute_user_data_9;
+	uint32_t compute_user_data_10;
+	uint32_t compute_user_data_11;
+	uint32_t compute_user_data_12;
+	uint32_t compute_user_data_13;
+	uint32_t compute_user_data_14;
+	uint32_t compute_user_data_15;
+	uint32_t cp_compute_csinvoc_count_lo;
+	uint32_t cp_compute_csinvoc_count_hi;
+	uint32_t reserved_83;
+	uint32_t reserved_84;
+	uint32_t reserved_85;
+	uint32_t cp_mqd_query_time_lo;
+	uint32_t cp_mqd_query_time_hi;
+	uint32_t cp_mqd_connect_start_time_lo;
+	uint32_t cp_mqd_connect_start_time_hi;
+	uint32_t cp_mqd_connect_end_time_lo;
+	uint32_t cp_mqd_connect_end_time_hi;
+	uint32_t cp_mqd_connect_end_wf_count;
+	uint32_t cp_mqd_connect_end_pq_rptr;
+	uint32_t cp_mqd_connect_end_pq_wptr;
+	uint32_t cp_mqd_connect_end_ib_rptr;
+	uint32_t reserved_96;
+	uint32_t reserved_97;
+	uint32_t cp_mqd_save_start_time_lo;
+	uint32_t cp_mqd_save_start_time_hi;
+	uint32_t cp_mqd_save_end_time_lo;
+	uint32_t cp_mqd_save_end_time_hi;
+	uint32_t cp_mqd_restore_start_time_lo;
+	uint32_t cp_mqd_restore_start_time_hi;
+	uint32_t cp_mqd_restore_end_time_lo;
+	uint32_t cp_mqd_restore_end_time_hi;
+	uint32_t reserved_106;
+	uint32_t reserved_107;
+	uint32_t gds_cs_ctxsw_cnt0;
+	uint32_t gds_cs_ctxsw_cnt1;
+	uint32_t gds_cs_ctxsw_cnt2;
+	uint32_t gds_cs_ctxsw_cnt3;
+	uint32_t reserved_112;
+	uint32_t reserved_113;
+	uint32_t cp_pq_exe_status_lo;
+	uint32_t cp_pq_exe_status_hi;
+	uint32_t cp_packet_id_lo;
+	uint32_t cp_packet_id_hi;
+	uint32_t cp_packet_exe_status_lo;
+	uint32_t cp_packet_exe_status_hi;
+	uint32_t gds_save_base_addr_lo;
+	uint32_t gds_save_base_addr_hi;
+	uint32_t gds_save_mask_lo;
+	uint32_t gds_save_mask_hi;
+	uint32_t ctx_save_base_addr_lo;
+	uint32_t ctx_save_base_addr_hi;
+	uint32_t reserved_126;
+	uint32_t reserved_127;
+	uint32_t cp_mqd_base_addr_lo;
+	uint32_t cp_mqd_base_addr_hi;
+	uint32_t cp_hqd_active;
+	uint32_t cp_hqd_vmid;
+	uint32_t cp_hqd_persistent_state;
+	uint32_t cp_hqd_pipe_priority;
+	uint32_t cp_hqd_queue_priority;
+	uint32_t cp_hqd_quantum;
+	uint32_t cp_hqd_pq_base_lo;
+	uint32_t cp_hqd_pq_base_hi;
+	uint32_t cp_hqd_pq_rptr;
+	uint32_t cp_hqd_pq_rptr_report_addr_lo;
+	uint32_t cp_hqd_pq_rptr_report_addr_hi;
+	uint32_t cp_hqd_pq_wptr_poll_addr_lo;
+	uint32_t cp_hqd_pq_wptr_poll_addr_hi;
+	uint32_t cp_hqd_pq_doorbell_control;
+	uint32_t cp_hqd_pq_wptr;
+	uint32_t cp_hqd_pq_control;
+	uint32_t cp_hqd_ib_base_addr_lo;
+	uint32_t cp_hqd_ib_base_addr_hi;
+	uint32_t cp_hqd_ib_rptr;
+	uint32_t cp_hqd_ib_control;
+	uint32_t cp_hqd_iq_timer;
+	uint32_t cp_hqd_iq_rptr;
+	uint32_t cp_hqd_dequeue_request;
+	uint32_t cp_hqd_dma_offload;
+	uint32_t cp_hqd_sema_cmd;
+	uint32_t cp_hqd_msg_type;
+	uint32_t cp_hqd_atomic0_preop_lo;
+	uint32_t cp_hqd_atomic0_preop_hi;
+	uint32_t cp_hqd_atomic1_preop_lo;
+	uint32_t cp_hqd_atomic1_preop_hi;
+	uint32_t cp_hqd_hq_status0;
+	uint32_t cp_hqd_hq_control0;
+	uint32_t cp_mqd_control;
+	uint32_t cp_hqd_hq_status1;
+	uint32_t cp_hqd_hq_control1;
+	uint32_t cp_hqd_eop_base_addr_lo;
+	uint32_t cp_hqd_eop_base_addr_hi;
+	uint32_t cp_hqd_eop_control;
+	uint32_t cp_hqd_eop_rptr;
+	uint32_t cp_hqd_eop_wptr;
+	uint32_t cp_hqd_eop_done_events;
+	uint32_t cp_hqd_ctx_save_base_addr_lo;
+	uint32_t cp_hqd_ctx_save_base_addr_hi;
+	uint32_t cp_hqd_ctx_save_control;
+	uint32_t cp_hqd_cntl_stack_offset;
+	uint32_t cp_hqd_cntl_stack_size;
+	uint32_t cp_hqd_wg_state_offset;
+	uint32_t cp_hqd_ctx_save_size;
+	uint32_t cp_hqd_gds_resource_state;
+	uint32_t cp_hqd_error;
+	uint32_t cp_hqd_eop_wptr_mem;
+	uint32_t cp_hqd_eop_dones;
+	uint32_t reserved_182;
+	uint32_t reserved_183;
+	uint32_t reserved_184;
+	uint32_t reserved_185;
+	uint32_t reserved_186;
+	uint32_t reserved_187;
+	uint32_t reserved_188;
+	uint32_t reserved_189;
+	uint32_t reserved_190;
+	uint32_t reserved_191;
+	uint32_t iqtimer_pkt_header;
+	uint32_t iqtimer_pkt_dw0;
+	uint32_t iqtimer_pkt_dw1;
+	uint32_t iqtimer_pkt_dw2;
+	uint32_t iqtimer_pkt_dw3;
+	uint32_t iqtimer_pkt_dw4;
+	uint32_t iqtimer_pkt_dw5;
+	uint32_t iqtimer_pkt_dw6;
+	uint32_t iqtimer_pkt_dw7;
+	uint32_t iqtimer_pkt_dw8;
+	uint32_t iqtimer_pkt_dw9;
+	uint32_t iqtimer_pkt_dw10;
+	uint32_t iqtimer_pkt_dw11;
+	uint32_t iqtimer_pkt_dw12;
+	uint32_t iqtimer_pkt_dw13;
+	uint32_t iqtimer_pkt_dw14;
+	uint32_t iqtimer_pkt_dw15;
+	uint32_t iqtimer_pkt_dw16;
+	uint32_t iqtimer_pkt_dw17;
+	uint32_t iqtimer_pkt_dw18;
+	uint32_t iqtimer_pkt_dw19;
+	uint32_t iqtimer_pkt_dw20;
+	uint32_t iqtimer_pkt_dw21;
+	uint32_t iqtimer_pkt_dw22;
+	uint32_t iqtimer_pkt_dw23;
+	uint32_t iqtimer_pkt_dw24;
+	uint32_t iqtimer_pkt_dw25;
+	uint32_t iqtimer_pkt_dw26;
+	uint32_t iqtimer_pkt_dw27;
+	uint32_t iqtimer_pkt_dw28;
+	uint32_t iqtimer_pkt_dw29;
+	uint32_t iqtimer_pkt_dw30;
+	uint32_t iqtimer_pkt_dw31;
+	uint32_t reserved_225;
+	uint32_t reserved_226;
+	uint32_t reserved_227;
+	uint32_t set_resources_header;
+	uint32_t set_resources_dw1;
+	uint32_t set_resources_dw2;
+	uint32_t set_resources_dw3;
+	uint32_t set_resources_dw4;
+	uint32_t set_resources_dw5;
+	uint32_t set_resources_dw6;
+	uint32_t set_resources_dw7;
+	uint32_t reserved_236;
+	uint32_t reserved_237;
+	uint32_t reserved_238;
+	uint32_t reserved_239;
+	uint32_t queue_doorbell_id0;
+	uint32_t queue_doorbell_id1;
+	uint32_t queue_doorbell_id2;
+	uint32_t queue_doorbell_id3;
+	uint32_t queue_doorbell_id4;
+	uint32_t queue_doorbell_id5;
+	uint32_t queue_doorbell_id6;
+	uint32_t queue_doorbell_id7;
+	uint32_t queue_doorbell_id8;
+	uint32_t queue_doorbell_id9;
+	uint32_t queue_doorbell_id10;
+	uint32_t queue_doorbell_id11;
+	uint32_t queue_doorbell_id12;
+	uint32_t queue_doorbell_id13;
+	uint32_t queue_doorbell_id14;
+	uint32_t queue_doorbell_id15;
+};
+
+#endif /* VI_STRUCTS_H_ */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
new file mode 100644
index 0000000..9259f1b
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity);
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+
+/* Initialize a given run queue struct */
+static void amd_sched_rq_init(struct amd_sched_rq *rq)
+{
+	spin_lock_init(&rq->lock);
+	INIT_LIST_HEAD(&rq->entities);
+	rq->current_entity = NULL;
+}
+
+static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
+				    struct amd_sched_entity *entity)
+{
+	spin_lock(&rq->lock);
+	list_add_tail(&entity->list, &rq->entities);
+	spin_unlock(&rq->lock);
+}
+
+static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
+				       struct amd_sched_entity *entity)
+{
+	spin_lock(&rq->lock);
+	list_del_init(&entity->list);
+	if (rq->current_entity == entity)
+		rq->current_entity = NULL;
+	spin_unlock(&rq->lock);
+}
+
+/**
+ * Select next job from a specified run queue with round robin policy.
+ * Return NULL if nothing available.
+ */
+static struct amd_sched_job *
+amd_sched_rq_select_job(struct amd_sched_rq *rq)
+{
+	struct amd_sched_entity *entity;
+	struct amd_sched_job *job;
+
+	spin_lock(&rq->lock);
+
+	entity = rq->current_entity;
+	if (entity) {
+		list_for_each_entry_continue(entity, &rq->entities, list) {
+			job = amd_sched_entity_pop_job(entity);
+			if (job) {
+				rq->current_entity = entity;
+				spin_unlock(&rq->lock);
+				return job;
+			}
+		}
+	}
+
+	list_for_each_entry(entity, &rq->entities, list) {
+
+		job = amd_sched_entity_pop_job(entity);
+		if (job) {
+			rq->current_entity = entity;
+			spin_unlock(&rq->lock);
+			return job;
+		}
+
+		if (entity == rq->current_entity)
+			break;
+	}
+
+	spin_unlock(&rq->lock);
+
+	return NULL;
+}
+
+/**
+ * Init a context entity used by scheduler when submit to HW ring.
+ *
+ * @sched	The pointer to the scheduler
+ * @entity	The pointer to a valid amd_sched_entity
+ * @rq		The run queue this entity belongs
+ * @kernel	If this is an entity for the kernel
+ * @jobs	The max number of jobs in the job queue
+ *
+ * return 0 if succeed. negative error code on failure
+*/
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+			  struct amd_sched_entity *entity,
+			  struct amd_sched_rq *rq,
+			  uint32_t jobs)
+{
+	if (!(sched && entity && rq))
+		return -EINVAL;
+
+	memset(entity, 0, sizeof(struct amd_sched_entity));
+	entity->belongto_rq = rq;
+	entity->scheduler = sched;
+	entity->fence_context = fence_context_alloc(1);
+	if(kfifo_alloc(&entity->job_queue,
+		       jobs * sizeof(void *),
+		       GFP_KERNEL))
+		return -EINVAL;
+
+	spin_lock_init(&entity->queue_lock);
+	atomic_set(&entity->fence_seq, 0);
+
+	/* Add the entity to the run queue */
+	amd_sched_rq_add_entity(rq, entity);
+	return 0;
+}
+
+/**
+ * Query if entity is initialized
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * return true if entity is initialized, false otherwise
+*/
+static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
+					    struct amd_sched_entity *entity)
+{
+	return entity->scheduler == sched &&
+		entity->belongto_rq != NULL;
+}
+
+/**
+ * Check if entity is idle
+ *
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * Return true if entity don't has any unscheduled jobs.
+ */
+static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+{
+	rmb();
+	if (kfifo_is_empty(&entity->job_queue))
+		return true;
+
+	return false;
+}
+
+/**
+ * Destroy a context entity
+ *
+ * @sched       Pointer to scheduler instance
+ * @entity	The pointer to a valid scheduler entity
+ *
+ * Cleanup and free the allocated resources.
+ */
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+			   struct amd_sched_entity *entity)
+{
+	struct amd_sched_rq *rq = entity->belongto_rq;
+
+	if (!amd_sched_entity_is_initialized(sched, entity))
+		return;
+
+	/**
+	 * The client will not queue more IBs during this fini, consume existing
+	 * queued IBs
+	*/
+	wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+
+	amd_sched_rq_remove_entity(rq, entity);
+	kfifo_free(&entity->job_queue);
+}
+
+static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_entity *entity =
+		container_of(cb, struct amd_sched_entity, cb);
+	entity->dependency = NULL;
+	fence_put(f);
+	amd_sched_wakeup(entity->scheduler);
+}
+
+static struct amd_sched_job *
+amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+{
+	struct amd_gpu_scheduler *sched = entity->scheduler;
+	struct amd_sched_job *job;
+
+	if (ACCESS_ONCE(entity->dependency))
+		return NULL;
+
+	if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+		return NULL;
+
+	while ((entity->dependency = sched->ops->dependency(job))) {
+
+		if (fence_add_callback(entity->dependency, &entity->cb,
+				       amd_sched_entity_wakeup))
+			fence_put(entity->dependency);
+		else
+			return NULL;
+	}
+
+	return job;
+}
+
+/**
+ * Helper to submit a job to the job queue
+ *
+ * @job		The pointer to job required to submit
+ *
+ * Returns true if we could submit the job.
+ */
+static bool amd_sched_entity_in(struct amd_sched_job *job)
+{
+	struct amd_sched_entity *entity = job->s_entity;
+	bool added, first = false;
+
+	spin_lock(&entity->queue_lock);
+	added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+
+	if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+		first = true;
+
+	spin_unlock(&entity->queue_lock);
+
+	/* first job wakes up scheduler */
+	if (first)
+		amd_sched_wakeup(job->sched);
+
+	return added;
+}
+
+/**
+ * Submit a job to the job queue
+ *
+ * @job		The pointer to job required to submit
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+{
+	struct amd_sched_entity *entity = sched_job->s_entity;
+	struct amd_sched_fence *fence = amd_sched_fence_create(
+		entity, sched_job->owner);
+
+	if (!fence)
+		return -ENOMEM;
+
+	fence_get(&fence->base);
+	sched_job->s_fence = fence;
+
+	wait_event(entity->scheduler->job_scheduled,
+		   amd_sched_entity_in(sched_job));
+
+	return 0;
+}
+
+/**
+ * Return ture if we can push more jobs to the hw.
+ */
+static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+{
+	return atomic_read(&sched->hw_rq_count) <
+		sched->hw_submission_limit;
+}
+
+/**
+ * Wake up the scheduler when it is ready
+ */
+static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+{
+	if (amd_sched_ready(sched))
+		wake_up_interruptible(&sched->wake_up_worker);
+}
+
+/**
+ * Select next to run
+*/
+static struct amd_sched_job *
+amd_sched_select_job(struct amd_gpu_scheduler *sched)
+{
+	struct amd_sched_job *job;
+
+	if (!amd_sched_ready(sched))
+		return NULL;
+
+	/* Kernel run queue has higher priority than normal run queue*/
+	job = amd_sched_rq_select_job(&sched->kernel_rq);
+	if (job == NULL)
+		job = amd_sched_rq_select_job(&sched->sched_rq);
+
+	return job;
+}
+
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+{
+	struct amd_sched_job *sched_job =
+		container_of(cb, struct amd_sched_job, cb);
+	struct amd_gpu_scheduler *sched;
+
+	sched = sched_job->sched;
+	amd_sched_fence_signal(sched_job->s_fence);
+	atomic_dec(&sched->hw_rq_count);
+	fence_put(&sched_job->s_fence->base);
+	sched->ops->process_job(sched_job);
+	wake_up_interruptible(&sched->wake_up_worker);
+}
+
+static int amd_sched_main(void *param)
+{
+	struct sched_param sparam = {.sched_priority = 1};
+	struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+	int r, count;
+
+	sched_setscheduler(current, SCHED_FIFO, &sparam);
+
+	while (!kthread_should_stop()) {
+		struct amd_sched_entity *entity;
+		struct amd_sched_job *job;
+		struct fence *fence;
+
+		wait_event_interruptible(sched->wake_up_worker,
+			kthread_should_stop() ||
+			(job = amd_sched_select_job(sched)));
+
+		if (!job)
+			continue;
+
+		entity = job->s_entity;
+		atomic_inc(&sched->hw_rq_count);
+		fence = sched->ops->run_job(job);
+		if (fence) {
+			r = fence_add_callback(fence, &job->cb,
+					       amd_sched_process_job);
+			if (r == -ENOENT)
+				amd_sched_process_job(fence, &job->cb);
+			else if (r)
+				DRM_ERROR("fence add callback failed (%d)\n", r);
+			fence_put(fence);
+		}
+
+		count = kfifo_out(&entity->job_queue, &job, sizeof(job));
+		WARN_ON(count != sizeof(job));
+		wake_up(&sched->job_scheduled);
+	}
+	return 0;
+}
+
+/**
+ * Create a gpu scheduler
+ *
+ * @ops			The backend operations for this scheduler.
+ * @ring		The the ring id for the scheduler.
+ * @hw_submissions	Number of hw submissions to do.
+ *
+ * Return the pointer to scheduler for success, otherwise return NULL
+*/
+struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
+					   unsigned ring, unsigned hw_submission,
+					   void *priv)
+{
+	struct amd_gpu_scheduler *sched;
+
+	sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
+	if (!sched)
+		return NULL;
+
+	sched->ops = ops;
+	sched->ring_id = ring;
+	sched->hw_submission_limit = hw_submission;
+	sched->priv = priv;
+	snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+	amd_sched_rq_init(&sched->sched_rq);
+	amd_sched_rq_init(&sched->kernel_rq);
+
+	init_waitqueue_head(&sched->wake_up_worker);
+	init_waitqueue_head(&sched->job_scheduled);
+	atomic_set(&sched->hw_rq_count, 0);
+	/* Each scheduler will run on a seperate kernel thread */
+	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+	if (IS_ERR(sched->thread)) {
+		DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
+		kfree(sched);
+		return NULL;
+	}
+
+	return sched;
+}
+
+/**
+ * Destroy a gpu scheduler
+ *
+ * @sched	The pointer to the scheduler
+ *
+ * return 0 if succeed. -1 if failed.
+ */
+int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+{
+	kthread_stop(sched->thread);
+	kfree(sched);
+	return  0;
+}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
new file mode 100644
index 0000000..2af0e4d
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _GPU_SCHEDULER_H_
+#define _GPU_SCHEDULER_H_
+
+#include <linux/kfifo.h>
+#include <linux/fence.h>
+
+struct amd_gpu_scheduler;
+struct amd_sched_rq;
+
+/**
+ * A scheduler entity is a wrapper around a job queue or a group
+ * of other entities. Entities take turns emitting jobs from their 
+ * job queues to corresponding hardware ring based on scheduling
+ * policy.
+*/
+struct amd_sched_entity {
+	struct list_head		list;
+	struct amd_sched_rq		*belongto_rq;
+	atomic_t			fence_seq;
+	/* the job_queue maintains the jobs submitted by clients */
+	struct kfifo                    job_queue;
+	spinlock_t			queue_lock;
+	struct amd_gpu_scheduler	*scheduler;
+	uint64_t                        fence_context;
+	struct fence			*dependency;
+	struct fence_cb			cb;
+};
+
+/**
+ * Run queue is a set of entities scheduling command submissions for
+ * one specific ring. It implements the scheduling policy that selects
+ * the next entity to emit commands from.
+*/
+struct amd_sched_rq {
+	spinlock_t		lock;
+	struct list_head	entities;
+	struct amd_sched_entity	*current_entity;
+};
+
+struct amd_sched_fence {
+	struct fence                    base;
+	struct amd_gpu_scheduler	*scheduler;
+	spinlock_t			lock;
+	void                            *owner;
+};
+
+struct amd_sched_job {
+	struct fence_cb                 cb;
+	struct amd_gpu_scheduler        *sched;
+	struct amd_sched_entity         *s_entity;
+	struct amd_sched_fence          *s_fence;
+	void		                *owner;
+};
+
+extern const struct fence_ops amd_sched_fence_ops;
+static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
+{
+	struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+
+	if (__f->base.ops == &amd_sched_fence_ops)
+		return __f;
+
+	return NULL;
+}
+
+/**
+ * Define the backend operations called by the scheduler,
+ * these functions should be implemented in driver side
+*/
+struct amd_sched_backend_ops {
+	struct fence *(*dependency)(struct amd_sched_job *job);
+	struct fence *(*run_job)(struct amd_sched_job *job);
+	void (*process_job)(struct amd_sched_job *job);
+};
+
+/**
+ * One scheduler is implemented for each hardware ring
+*/
+struct amd_gpu_scheduler {
+	struct task_struct		*thread;
+	struct amd_sched_rq		sched_rq;
+	struct amd_sched_rq		kernel_rq;
+	atomic_t			hw_rq_count;
+	struct amd_sched_backend_ops	*ops;
+	uint32_t			ring_id;
+	wait_queue_head_t		wake_up_worker;
+	wait_queue_head_t		job_scheduled;
+	uint32_t                        hw_submission_limit;
+	char                            name[20];
+	void                            *priv;
+};
+
+struct amd_gpu_scheduler *
+amd_sched_create(struct amd_sched_backend_ops *ops,
+		 uint32_t ring, uint32_t hw_submission, void *priv);
+int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+
+int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+			  struct amd_sched_entity *entity,
+			  struct amd_sched_rq *rq,
+			  uint32_t jobs);
+void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+			   struct amd_sched_entity *entity);
+int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+struct amd_sched_fence *amd_sched_fence_create(
+	struct amd_sched_entity *s_entity, void *owner);
+void amd_sched_fence_signal(struct amd_sched_fence *fence);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
new file mode 100644
index 0000000..e62c379
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <drm/drmP.h>
+#include "gpu_scheduler.h"
+
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+{
+	struct amd_sched_fence *fence = NULL;
+	unsigned seq;
+
+	fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+	if (fence == NULL)
+		return NULL;
+	fence->owner = owner;
+	fence->scheduler = s_entity->scheduler;
+	spin_lock_init(&fence->lock);
+
+	seq = atomic_inc_return(&s_entity->fence_seq);
+	fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
+		   s_entity->fence_context, seq);
+
+	return fence;
+}
+
+void amd_sched_fence_signal(struct amd_sched_fence *fence)
+{
+	int ret = fence_signal(&fence->base);
+	if (!ret)
+		FENCE_TRACE(&fence->base, "signaled from irq context\n");
+	else
+		FENCE_TRACE(&fence->base, "was already signaled\n");
+}
+
+static const char *amd_sched_fence_get_driver_name(struct fence *fence)
+{
+	return "amd_sched";
+}
+
+static const char *amd_sched_fence_get_timeline_name(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+	return (const char *)fence->scheduler->name;
+}
+
+static bool amd_sched_fence_enable_signaling(struct fence *f)
+{
+	return true;
+}
+
+const struct fence_ops amd_sched_fence_ops = {
+	.get_driver_name = amd_sched_fence_get_driver_name,
+	.get_timeline_name = amd_sched_fence_get_timeline_name,
+	.enable_signaling = amd_sched_fence_enable_signaling,
+	.signaled = NULL,
+	.wait = fence_default_wait,
+	.release = NULL,
+};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 7838e73..7d03c51 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -22,9 +22,9 @@
 	.owner		= THIS_MODULE,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_pan_display	= drm_fb_helper_pan_display,
 	.fb_blank	= drm_fb_helper_blank,
 	.fb_setcmap	= drm_fb_helper_setcmap,
@@ -80,18 +80,12 @@
 	if (IS_ERR(dfb))
 		return PTR_ERR(dfb);
 
-	info = framebuffer_alloc(0, dev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(fbh);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto err_fballoc;
 	}
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto err_fbcmap;
-	}
-
 	strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
 	info->par = fbh;
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
@@ -101,7 +95,7 @@
 	info->screen_size = obj->obj.size;
 	info->screen_base = ptr;
 	fbh->fb = &dfb->fb;
-	fbh->fbdev = info;
+
 	drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
 	drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
 
@@ -111,8 +105,6 @@
 
 	return 0;
 
- err_fbcmap:
-	framebuffer_release(info);
  err_fballoc:
 	dfb->fb.funcs->destroy(&dfb->fb);
 	return ret;
@@ -171,6 +163,7 @@
 
 	return 0;
  err_fb_setup:
+	drm_fb_helper_release_fbi(fbh);
 	drm_fb_helper_fini(fbh);
  err_fb_helper:
 	priv->fbdev = NULL;
@@ -191,14 +184,8 @@
 	struct drm_fb_helper *fbh = priv->fbdev;
 
 	if (fbh) {
-		struct fb_info *info = fbh->fbdev;
-
-		if (info) {
-			unregister_framebuffer(info);
-			if (info->cmap.len)
-				fb_dealloc_cmap(&info->cmap);
-			framebuffer_release(info);
-		}
+		drm_fb_helper_unregister_fbi(fbh);
+		drm_fb_helper_release_fbi(fbh);
 
 		drm_fb_helper_fini(fbh);
 
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index ff68eef..f31db28 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -125,7 +125,7 @@
 			 const struct fb_fillrect *rect)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -134,7 +134,7 @@
 			 const struct fb_copyarea *area)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	ast_dirty_update(afbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -143,7 +143,7 @@
 			  const struct fb_image *image)
 {
 	struct ast_fbdev *afbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	ast_dirty_update(afbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -193,7 +193,6 @@
 	struct drm_framebuffer *fb;
 	struct fb_info *info;
 	int size, ret;
-	struct device *device = &dev->pdev->dev;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
 	struct ast_bo *bo = NULL;
@@ -217,40 +216,28 @@
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
-		goto out;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
+		goto err_free_vram;
 	}
 	info->par = afbdev;
 
 	ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
 	if (ret)
-		goto out;
+		goto err_release_fbi;
 
 	afbdev->sysram = sysram;
 	afbdev->size = size;
 
 	fb = &afbdev->afb.base;
 	afbdev->helper.fb = fb;
-	afbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "astdrmfb");
 
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &astfb_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out;
-	}
 	info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
 	info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 
@@ -266,7 +253,11 @@
 		      fb->width, fb->height);
 
 	return 0;
-out:
+
+err_release_fbi:
+	drm_fb_helper_release_fbi(helper);
+err_free_vram:
+	vfree(afbdev->sysram);
 	return ret;
 }
 
@@ -297,15 +288,10 @@
 static void ast_fbdev_destroy(struct drm_device *dev,
 			      struct ast_fbdev *afbdev)
 {
-	struct fb_info *info;
 	struct ast_framebuffer *afb = &afbdev->afb;
-	if (afbdev->helper.fbdev) {
-		info = afbdev->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+
+	drm_fb_helper_unregister_fbi(&afbdev->helper);
+	drm_fb_helper_release_fbi(&afbdev->helper);
 
 	if (afb->obj) {
 		drm_gem_object_unreference_unlocked(afb->obj);
@@ -377,5 +363,5 @@
 	if (!ast->fbdev)
 		return;
 
-	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+	drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
 }
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 035dacc..838217f 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -571,24 +571,18 @@
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct ast_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_ast_bo(obj);
 	*offset = ast_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
 
 }
 
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 5ae5c69..9f6e234 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -239,7 +239,8 @@
 	return atmel_hlcdc_plane_prepare_disc_area(s);
 }
 
-static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
+					  struct drm_crtc_state *old_s)
 {
 	struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
 
@@ -253,7 +254,8 @@
 	}
 }
 
-static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_s)
 {
 	/* TODO: write common plane control register if available */
 }
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index ef6182b..8bc62ec 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -29,6 +29,115 @@
 
 #define ATMEL_HLCDC_LAYER_IRQS_OFFSET		8
 
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 5,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 1280,
+	.max_height = 860,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
+	.layers = atmel_hlcdc_at91sam9n12_layers,
+};
+
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 5,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+			.disc_pos = 5,
+			.disc_size = 6,
+		},
+	},
+	{
+		.name = "overlay1",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x100,
+		.id = 1,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "high-end-overlay",
+		.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+		.regs_offset = 0x280,
+		.id = 2,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 17,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.memsize = 4,
+			.xstride = { 5, 7 },
+			.pstride = { 6, 8 },
+			.default_color = 9,
+			.chroma_key = 10,
+			.chroma_key_mask = 11,
+			.general_config = 12,
+			.csc = 14,
+		},
+	},
+	{
+		.name = "cursor",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x340,
+		.id = 3,
+		.type = ATMEL_HLCDC_CURSOR_LAYER,
+		.nconfigs = 10,
+		.max_width = 128,
+		.max_height = 128,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 800,
+	.max_height = 600,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
+	.layers = atmel_hlcdc_at91sam9x5_layers,
+};
+
 static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
 	{
 		.name = "base",
@@ -132,11 +241,105 @@
 	.layers = atmel_hlcdc_sama5d3_layers,
 };
 
+static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
+	{
+		.name = "base",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x40,
+		.id = 0,
+		.type = ATMEL_HLCDC_BASE_LAYER,
+		.nconfigs = 7,
+		.layout = {
+			.xstride = { 2 },
+			.default_color = 3,
+			.general_config = 4,
+			.disc_pos = 5,
+			.disc_size = 6,
+		},
+	},
+	{
+		.name = "overlay1",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x140,
+		.id = 1,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "overlay2",
+		.formats = &atmel_hlcdc_plane_rgb_formats,
+		.regs_offset = 0x240,
+		.id = 2,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 10,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.xstride = { 4 },
+			.pstride = { 5 },
+			.default_color = 6,
+			.chroma_key = 7,
+			.chroma_key_mask = 8,
+			.general_config = 9,
+		},
+	},
+	{
+		.name = "high-end-overlay",
+		.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
+		.regs_offset = 0x340,
+		.id = 3,
+		.type = ATMEL_HLCDC_OVERLAY_LAYER,
+		.nconfigs = 42,
+		.layout = {
+			.pos = 2,
+			.size = 3,
+			.memsize = 4,
+			.xstride = { 5, 7 },
+			.pstride = { 6, 8 },
+			.default_color = 9,
+			.chroma_key = 10,
+			.chroma_key_mask = 11,
+			.general_config = 12,
+			.csc = 14,
+		},
+	},
+};
+
+static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
+	.min_width = 0,
+	.min_height = 0,
+	.max_width = 2048,
+	.max_height = 2048,
+	.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
+	.layers = atmel_hlcdc_sama5d4_layers,
+};
 static const struct of_device_id atmel_hlcdc_of_match[] = {
 	{
+		.compatible = "atmel,at91sam9n12-hlcdc",
+		.data = &atmel_hlcdc_dc_at91sam9n12,
+	},
+	{
+		.compatible = "atmel,at91sam9x5-hlcdc",
+		.data = &atmel_hlcdc_dc_at91sam9x5,
+	},
+	{
 		.compatible = "atmel,sama5d3-hlcdc",
 		.data = &atmel_hlcdc_dc_sama5d3,
 	},
+	{
+		.compatible = "atmel,sama5d4-hlcdc",
+		.data = &atmel_hlcdc_dc_sama5d4,
+	},
 	{ /* sentinel */ },
 };
 
@@ -485,7 +688,9 @@
 };
 
 static struct drm_driver atmel_hlcdc_dc_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+			   DRIVER_MODESET | DRIVER_PRIME |
+			   DRIVER_ATOMIC,
 	.preclose = atmel_hlcdc_dc_preclose,
 	.lastclose = atmel_hlcdc_dc_lastclose,
 	.irq_handler = atmel_hlcdc_dc_irq_handler,
@@ -497,6 +702,15 @@
 	.disable_vblank = atmel_hlcdc_dc_disable_vblank,
 	.gem_free_object = drm_gem_cma_free_object,
 	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
 	.dumb_create = drm_gem_cma_dumb_create,
 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
 	.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 9c45130..067e4c1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -126,12 +126,16 @@
 
 	if (info->num_bus_formats) {
 		switch (info->bus_formats[0]) {
+		case MEDIA_BUS_FMT_RGB565_1X16:
+			cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
+			break;
 		case MEDIA_BUS_FMT_RGB666_1X18:
 			cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
 			break;
 		case MEDIA_BUS_FMT_RGB888_1X24:
 			cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
 			break;
+		case MEDIA_BUS_FMT_RGB444_1X12:
 		default:
 			break;
 		}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 98837bd..7f1a360 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -109,7 +109,7 @@
 
 	if (bochs->fb.initialized) {
 		console_lock();
-		fb_set_suspend(bochs->fb.helper.fbdev, 1);
+		drm_fb_helper_set_suspend(&bochs->fb.helper, 1);
 		console_unlock();
 	}
 
@@ -126,7 +126,7 @@
 
 	if (bochs->fb.initialized) {
 		console_lock();
-		fb_set_suspend(bochs->fb.helper.fbdev, 0);
+		drm_fb_helper_set_suspend(&bochs->fb.helper, 0);
 		console_unlock();
 	}
 
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 976d979..09a0637 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -24,9 +24,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -56,11 +56,9 @@
 {
 	struct bochs_device *bochs =
 		container_of(helper, struct bochs_device, fb.helper);
-	struct drm_device *dev = bochs->dev;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	struct drm_gem_object *gobj = NULL;
 	struct bochs_bo *bo = NULL;
 	int size, ret;
@@ -106,22 +104,23 @@
 	ttm_bo_unreserve(&bo->bo);
 
 	/* init fb device */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = &bochs->fb.helper;
 
 	ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
-	if (ret)
+	if (ret) {
+		drm_fb_helper_release_fbi(helper);
 		return ret;
+	}
 
 	bochs->fb.size = size;
 
 	/* setup helper */
 	fb = &bochs->fb.gfb.base;
 	bochs->fb.helper.fb = fb;
-	bochs->fb.helper.fbdev = info;
 
 	strcpy(info->fix.id, "bochsdrmfb");
 
@@ -139,30 +138,17 @@
 	info->fix.smem_start = 0;
 	info->fix.smem_len = size;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		return -ENOMEM;
-	}
-
 	return 0;
 }
 
 static int bochs_fbdev_destroy(struct bochs_device *bochs)
 {
 	struct bochs_framebuffer *gfb = &bochs->fb.gfb;
-	struct fb_info *info;
 
 	DRM_DEBUG_DRIVER("\n");
 
-	if (bochs->fb.helper.fbdev) {
-		info = bochs->fb.helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&bochs->fb.helper);
+	drm_fb_helper_release_fbi(&bochs->fb.helper);
 
 	if (gfb->obj) {
 		drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 66286ff..f69e6bf 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -454,25 +454,17 @@
 			   uint32_t handle, uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct bochs_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_bochs_bo(obj);
 	*offset = bochs_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
 }
 
 /* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index acef322..2de52a5 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -1,24 +1,32 @@
+config DRM_BRIDGE
+	def_bool y
+	depends on DRM
+	help
+	  Bridge registration and lookup framework.
+
+menu "Display Interface Bridges"
+	depends on DRM && DRM_BRIDGE
+
 config DRM_DW_HDMI
 	tristate
-	depends on DRM
 	select DRM_KMS_HELPER
 
-config DRM_PTN3460
-	tristate "PTN3460 DP/LVDS bridge"
-	depends on DRM
+config DRM_NXP_PTN3460
+	tristate "NXP PTN3460 DP/LVDS bridge"
 	depends on OF
 	select DRM_KMS_HELPER
 	select DRM_PANEL
 	---help---
-	  ptn3460 eDP-LVDS bridge chip driver.
+	  NXP PTN3460 eDP-LVDS bridge chip driver.
 
-config DRM_PS8622
+config DRM_PARADE_PS8622
 	tristate "Parade eDP/LVDS bridge"
-	depends on DRM
 	depends on OF
 	select DRM_PANEL
 	select DRM_KMS_HELPER
 	select BACKLIGHT_LCD_SUPPORT
 	select BACKLIGHT_CLASS_DEVICE
 	---help---
-	  parade eDP-LVDS bridge chip driver.
+	  Parade eDP-LVDS bridge chip driver.
+
+endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 8dfebd9..e2eef1c 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm
 
-obj-$(CONFIG_DRM_PS8622) += ps8622.o
-obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
 obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
+obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
+obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index 816d104..0083d4e 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -18,6 +18,7 @@
 #include <linux/hdmi.h>
 #include <linux/mutex.h>
 #include <linux/of_device.h>
+#include <linux/spinlock.h>
 
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
@@ -81,10 +82,6 @@
 };
 
 struct hdmi_vmode {
-	bool mdvi;
-	bool mhsyncpolarity;
-	bool mvsyncpolarity;
-	bool minterlaced;
 	bool mdataenablepolarity;
 
 	unsigned int mpixelclock;
@@ -123,12 +120,20 @@
 	bool phy_enabled;
 	struct drm_display_mode previous_mode;
 
-	struct regmap *regmap;
 	struct i2c_adapter *ddc;
 	void __iomem *regs;
+	bool sink_is_hdmi;
+	bool sink_has_audio;
 
+	struct mutex mutex;		/* for state below and previous_mode */
+	bool disabled;			/* DRM has disabled our bridge */
+
+	spinlock_t audio_lock;
 	struct mutex audio_mutex;
 	unsigned int sample_rate;
+	unsigned int audio_cts;
+	unsigned int audio_n;
+	bool audio_enable;
 	int ratio;
 
 	void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
@@ -335,42 +340,76 @@
 }
 
 static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
-				     unsigned long pixel_clk)
+	unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio)
 {
-	unsigned int clk_n, clk_cts;
+	unsigned int n, cts;
 
-	clk_n = hdmi_compute_n(hdmi->sample_rate, pixel_clk,
-			       hdmi->ratio);
-	clk_cts = hdmi_compute_cts(hdmi->sample_rate, pixel_clk,
-				   hdmi->ratio);
-
-	if (!clk_cts) {
-		dev_dbg(hdmi->dev, "%s: pixel clock not supported: %lu\n",
-			__func__, pixel_clk);
-		return;
+	n = hdmi_compute_n(sample_rate, pixel_clk, ratio);
+	cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio);
+	if (!cts) {
+		dev_err(hdmi->dev,
+			"%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n",
+			__func__, pixel_clk, sample_rate);
 	}
 
-	dev_dbg(hdmi->dev, "%s: samplerate=%d  ratio=%d  pixelclk=%lu  N=%d cts=%d\n",
-		__func__, hdmi->sample_rate, hdmi->ratio,
-		pixel_clk, clk_n, clk_cts);
+	dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n",
+		__func__, sample_rate, ratio, pixel_clk, n, cts);
 
-	hdmi_set_cts_n(hdmi, clk_cts, clk_n);
+	spin_lock_irq(&hdmi->audio_lock);
+	hdmi->audio_n = n;
+	hdmi->audio_cts = cts;
+	hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0);
+	spin_unlock_irq(&hdmi->audio_lock);
 }
 
 static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
 {
 	mutex_lock(&hdmi->audio_mutex);
-	hdmi_set_clk_regenerator(hdmi, 74250000);
+	hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate,
+				 hdmi->ratio);
 	mutex_unlock(&hdmi->audio_mutex);
 }
 
 static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
 {
 	mutex_lock(&hdmi->audio_mutex);
-	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock);
+	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+				 hdmi->sample_rate, hdmi->ratio);
 	mutex_unlock(&hdmi->audio_mutex);
 }
 
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
+{
+	mutex_lock(&hdmi->audio_mutex);
+	hdmi->sample_rate = rate;
+	hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
+				 hdmi->sample_rate, hdmi->ratio);
+	mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
+
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->audio_lock, flags);
+	hdmi->audio_enable = true;
+	hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
+	spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_enable);
+
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->audio_lock, flags);
+	hdmi->audio_enable = false;
+	hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
+	spin_unlock_irqrestore(&hdmi->audio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable);
+
 /*
  * this submodule is responsible for the video data synchronization.
  * for example, for RGB 4:4:4 input, the data map is defined as
@@ -701,9 +740,9 @@
 	return 0;
 }
 
-static void dw_hdmi_phy_enable_power(struct dw_hdmi *hdmi, u8 enable)
+static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable)
 {
-	hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0,
+	hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0,
 			 HDMI_PHY_CONF0_PDZ_OFFSET,
 			 HDMI_PHY_CONF0_PDZ_MASK);
 }
@@ -753,12 +792,12 @@
 static int hdmi_phy_configure(struct dw_hdmi *hdmi, unsigned char prep,
 			      unsigned char res, int cscon)
 {
-	unsigned res_idx, i;
+	unsigned res_idx;
 	u8 val, msec;
-	const struct dw_hdmi_plat_data *plat_data = hdmi->plat_data;
-	const struct dw_hdmi_mpll_config *mpll_config = plat_data->mpll_cfg;
-	const struct dw_hdmi_curr_ctrl *curr_ctrl = plat_data->cur_ctr;
-	const struct dw_hdmi_phy_config *phy_config = plat_data->phy_config;
+	const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
+	const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg;
+	const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr;
+	const struct dw_hdmi_phy_config *phy_config = pdata->phy_config;
 
 	if (prep)
 		return -EINVAL;
@@ -778,6 +817,30 @@
 		return -EINVAL;
 	}
 
+	/* PLL/MPLL Cfg - always match on final entry */
+	for (; mpll_config->mpixelclock != ~0UL; mpll_config++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    mpll_config->mpixelclock)
+			break;
+
+	for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    curr_ctrl->mpixelclock)
+			break;
+
+	for (; phy_config->mpixelclock != ~0UL; phy_config++)
+		if (hdmi->hdmi_data.video_mode.mpixelclock <=
+		    phy_config->mpixelclock)
+			break;
+
+	if (mpll_config->mpixelclock == ~0UL ||
+	    curr_ctrl->mpixelclock == ~0UL ||
+	    phy_config->mpixelclock == ~0UL) {
+		dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
+			hdmi->hdmi_data.video_mode.mpixelclock);
+		return -EINVAL;
+	}
+
 	/* Enable csc path */
 	if (cscon)
 		val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
@@ -803,48 +866,23 @@
 		    HDMI_PHY_I2CM_SLAVE_ADDR);
 	hdmi_phy_test_clear(hdmi, 0);
 
-	/* PLL/MPLL Cfg - always match on final entry */
-	for (i = 0; mpll_config[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    mpll_config[i].mpixelclock)
-			break;
-
-	hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].cpce, 0x06);
-	hdmi_phy_i2c_write(hdmi, mpll_config[i].res[res_idx].gmp, 0x15);
-
-	for (i = 0; curr_ctrl[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    curr_ctrl[i].mpixelclock)
-			break;
-
-	if (curr_ctrl[i].mpixelclock == (~0UL)) {
-		dev_err(hdmi->dev, "Pixel clock %d - unsupported by HDMI\n",
-			hdmi->hdmi_data.video_mode.mpixelclock);
-		return -EINVAL;
-	}
+	hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].cpce, 0x06);
+	hdmi_phy_i2c_write(hdmi, mpll_config->res[res_idx].gmp, 0x15);
 
 	/* CURRCTRL */
-	hdmi_phy_i2c_write(hdmi, curr_ctrl[i].curr[res_idx], 0x10);
+	hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[res_idx], 0x10);
 
 	hdmi_phy_i2c_write(hdmi, 0x0000, 0x13);  /* PLLPHBYCTRL */
 	hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
 
-	for (i = 0; phy_config[i].mpixelclock != (~0UL); i++)
-		if (hdmi->hdmi_data.video_mode.mpixelclock <=
-		    phy_config[i].mpixelclock)
-			break;
-
-	/* RESISTANCE TERM 133Ohm Cfg */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].term, 0x19);  /* TXTERM */
-	/* PREEMP Cgf 0.00 */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].sym_ctr, 0x09); /* CKSYMTXCTRL */
-	/* TX/CK LVL 10 */
-	hdmi_phy_i2c_write(hdmi, phy_config[i].vlev_ctr, 0x0E); /* VLEVCTRL */
+	hdmi_phy_i2c_write(hdmi, phy_config->term, 0x19);  /* TXTERM */
+	hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, 0x09); /* CKSYMTXCTRL */
+	hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, 0x0E); /* VLEVCTRL */
 
 	/* REMOVE CLK TERM */
 	hdmi_phy_i2c_write(hdmi, 0x8000, 0x05);  /* CKCALCTRL */
 
-	dw_hdmi_phy_enable_power(hdmi, 1);
+	dw_hdmi_phy_enable_powerdown(hdmi, false);
 
 	/* toggle TMDS enable */
 	dw_hdmi_phy_enable_tmds(hdmi, 0);
@@ -879,18 +917,17 @@
 static int dw_hdmi_phy_init(struct dw_hdmi *hdmi)
 {
 	int i, ret;
-	bool cscon = false;
+	bool cscon;
 
 	/*check csc whether needed activated in HDMI mode */
-	cscon = (is_color_space_conversion(hdmi) &&
-			!hdmi->hdmi_data.video_mode.mdvi);
+	cscon = hdmi->sink_is_hdmi && is_color_space_conversion(hdmi);
 
 	/* HDMI Phy spec says to do the phy initialization sequence twice */
 	for (i = 0; i < 2; i++) {
 		dw_hdmi_phy_sel_data_en_pol(hdmi, 1);
 		dw_hdmi_phy_sel_interface_control(hdmi, 0);
 		dw_hdmi_phy_enable_tmds(hdmi, 0);
-		dw_hdmi_phy_enable_power(hdmi, 0);
+		dw_hdmi_phy_enable_powerdown(hdmi, true);
 
 		/* Enable CSC */
 		ret = hdmi_phy_configure(hdmi, 0, 8, cscon);
@@ -921,74 +958,76 @@
 		  HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1);
 }
 
-static void hdmi_config_AVI(struct dw_hdmi *hdmi)
+static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
 {
-	u8 val, pix_fmt, under_scan;
-	u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
-	bool aspect_16_9;
+	struct hdmi_avi_infoframe frame;
+	u8 val;
 
-	aspect_16_9 = false; /* FIXME */
+	/* Initialise info frame from DRM mode */
+	drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
 
-	/* AVI Data Byte 1 */
 	if (hdmi->hdmi_data.enc_out_format == YCBCR444)
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
+		frame.colorspace = HDMI_COLORSPACE_YUV444;
 	else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
+		frame.colorspace = HDMI_COLORSPACE_YUV422;
 	else
-		pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
-
-		under_scan =  HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
-
-	/*
-	 * Active format identification data is present in the AVI InfoFrame.
-	 * Under scan info, no bar data
-	 */
-	val = pix_fmt | under_scan |
-		HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
-		HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
-
-	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
-
-	/* AVI Data Byte 2 -Set the Aspect Ratio */
-	if (aspect_16_9) {
-		act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
-		coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
-	} else {
-		act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
-		coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
-	}
+		frame.colorspace = HDMI_COLORSPACE_RGB;
 
 	/* Set up colorimetry */
 	if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
-		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
+		frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
 		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
-			ext_colorimetry =
-				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+			frame.extended_colorimetry =
+				HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
-			ext_colorimetry =
-				HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
+			frame.extended_colorimetry =
+				HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
 	} else if (hdmi->hdmi_data.enc_out_format != RGB) {
-		if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
-			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
-		else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
-			colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
-		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+		frame.colorimetry = hdmi->hdmi_data.colorimetry;
+		frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 	} else { /* Carries no data */
-		colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
-		ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
+		frame.colorimetry = HDMI_COLORIMETRY_NONE;
+		frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
 	}
 
-	val = colorimetry | coded_ratio | act_ratio;
+	frame.scan_mode = HDMI_SCAN_MODE_NONE;
+
+	/*
+	 * The Designware IP uses a different byte format from standard
+	 * AVI info frames, though generally the bits are in the correct
+	 * bytes.
+	 */
+
+	/*
+	 * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6,
+	 * active aspect present in bit 6 rather than 4.
+	 */
+	val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3);
+	if (frame.active_aspect & 15)
+		val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT;
+	if (frame.top_bar || frame.bottom_bar)
+		val |= HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR;
+	if (frame.left_bar || frame.right_bar)
+		val |= HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR;
+	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0);
+
+	/* AVI data byte 2 differences: none */
+	val = ((frame.colorimetry & 0x3) << 6) |
+	      ((frame.picture_aspect & 0x3) << 4) |
+	      (frame.active_aspect & 0xf);
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1);
 
-	/* AVI Data Byte 3 */
-	val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
-		HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT |
-		HDMI_FC_AVICONF2_SCALING_NONE;
+	/* AVI data byte 3 differences: none */
+	val = ((frame.extended_colorimetry & 0x7) << 4) |
+	      ((frame.quantization_range & 0x3) << 2) |
+	      (frame.nups & 0x3);
+	if (frame.itc)
+		val |= HDMI_FC_AVICONF2_IT_CONTENT_VALID;
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2);
 
-	/* AVI Data Byte 4 */
-	hdmi_writeb(hdmi, hdmi->vic, HDMI_FC_AVIVID);
+	/* AVI data byte 4 differences: none */
+	val = frame.video_code & 0x7f;
+	hdmi_writeb(hdmi, val, HDMI_FC_AVIVID);
 
 	/* AVI Data Byte 5- set up input and output pixel repetition */
 	val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) <<
@@ -999,20 +1038,23 @@
 		HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
 	hdmi_writeb(hdmi, val, HDMI_FC_PRCONF);
 
-	/* IT Content and quantization range = don't care */
-	val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
-		HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
+	/*
+	 * AVI data byte 5 differences: content type in 0,1 rather than 4,5,
+	 * ycc range in bits 2,3 rather than 6,7
+	 */
+	val = ((frame.ycc_quantization_range & 0x3) << 2) |
+	      (frame.content_type & 0x3);
 	hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3);
 
 	/* AVI Data Bytes 6-13 */
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIETB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISBB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVIELB1);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB0);
-	hdmi_writeb(hdmi, 0, HDMI_FC_AVISRB1);
+	hdmi_writeb(hdmi, frame.top_bar & 0xff, HDMI_FC_AVIETB0);
+	hdmi_writeb(hdmi, (frame.top_bar >> 8) & 0xff, HDMI_FC_AVIETB1);
+	hdmi_writeb(hdmi, frame.bottom_bar & 0xff, HDMI_FC_AVISBB0);
+	hdmi_writeb(hdmi, (frame.bottom_bar >> 8) & 0xff, HDMI_FC_AVISBB1);
+	hdmi_writeb(hdmi, frame.left_bar & 0xff, HDMI_FC_AVIELB0);
+	hdmi_writeb(hdmi, (frame.left_bar >> 8) & 0xff, HDMI_FC_AVIELB1);
+	hdmi_writeb(hdmi, frame.right_bar & 0xff, HDMI_FC_AVISRB0);
+	hdmi_writeb(hdmi, (frame.right_bar >> 8) & 0xff, HDMI_FC_AVISRB1);
 }
 
 static void hdmi_av_composer(struct dw_hdmi *hdmi,
@@ -1022,9 +1064,6 @@
 	struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
 	int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
 
-	vmode->mhsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PHSYNC);
-	vmode->mvsyncpolarity = !!(mode->flags & DRM_MODE_FLAG_PVSYNC);
-	vmode->minterlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
 	vmode->mpixelclock = mode->clock * 1000;
 
 	dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
@@ -1034,13 +1073,13 @@
 		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE :
 		HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE);
 
-	inv_val |= (vmode->mvsyncpolarity ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
 		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
-		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
+		HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW;
 
-	inv_val |= (vmode->mhsyncpolarity ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
 		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
-		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
+		HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW;
 
 	inv_val |= (vmode->mdataenablepolarity ?
 		HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
@@ -1049,17 +1088,17 @@
 	if (hdmi->vic == 39)
 		inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
 	else
-		inv_val |= (vmode->minterlaced ?
+		inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
 			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
-			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
+			HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW;
 
-	inv_val |= (vmode->minterlaced ?
+	inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
 		HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
-		HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
+		HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE;
 
-	inv_val |= (vmode->mdvi ?
-		HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
-		HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
+	inv_val |= hdmi->sink_is_hdmi ?
+		HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE :
+		HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE;
 
 	hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
 
@@ -1105,7 +1144,7 @@
 		return;
 
 	dw_hdmi_phy_enable_tmds(hdmi, 0);
-	dw_hdmi_phy_enable_power(hdmi, 0);
+	dw_hdmi_phy_enable_powerdown(hdmi, true);
 
 	hdmi->phy_enabled = false;
 }
@@ -1186,10 +1225,8 @@
 
 	if (!hdmi->vic) {
 		dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n");
-		hdmi->hdmi_data.video_mode.mdvi = true;
 	} else {
 		dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic);
-		hdmi->hdmi_data.video_mode.mdvi = false;
 	}
 
 	if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
@@ -1200,18 +1237,7 @@
 	else
 		hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
 
-	if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
-	    (hdmi->vic == 12) || (hdmi->vic == 13) ||
-	    (hdmi->vic == 14) || (hdmi->vic == 15) ||
-	    (hdmi->vic == 25) || (hdmi->vic == 26) ||
-	    (hdmi->vic == 27) || (hdmi->vic == 28) ||
-	    (hdmi->vic == 29) || (hdmi->vic == 30) ||
-	    (hdmi->vic == 35) || (hdmi->vic == 36) ||
-	    (hdmi->vic == 37) || (hdmi->vic == 38))
-		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 1;
-	else
-		hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
-
+	hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
 	hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
 
 	/* TODO: Get input format from IPU (via FB driver interface) */
@@ -1235,18 +1261,22 @@
 	/* HDMI Initialization Step B.3 */
 	dw_hdmi_enable_video_path(hdmi);
 
-	/* not for DVI mode */
-	if (hdmi->hdmi_data.video_mode.mdvi) {
-		dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
-	} else {
-		dev_dbg(hdmi->dev, "%s CEA mode\n", __func__);
+	if (hdmi->sink_has_audio) {
+		dev_dbg(hdmi->dev, "sink has audio support\n");
 
 		/* HDMI Initialization Step E - Configure audio */
 		hdmi_clk_regenerator_update_pixel_clock(hdmi);
 		hdmi_enable_audio_clk(hdmi);
+	}
+
+	/* not for DVI mode */
+	if (hdmi->sink_is_hdmi) {
+		dev_dbg(hdmi->dev, "%s HDMI mode\n", __func__);
 
 		/* HDMI Initialization Step F - Configure AVI InfoFrame */
-		hdmi_config_AVI(hdmi);
+		hdmi_config_AVI(hdmi, mode);
+	} else {
+		dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
 	}
 
 	hdmi_video_packetize(hdmi);
@@ -1255,7 +1285,7 @@
 	hdmi_tx_hdcp_config(hdmi);
 
 	dw_hdmi_clear_overflow(hdmi);
-	if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mdvi)
+	if (hdmi->cable_plugin && hdmi->sink_is_hdmi)
 		hdmi_enable_overflow_interrupts(hdmi);
 
 	return 0;
@@ -1348,10 +1378,12 @@
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
-	dw_hdmi_setup(hdmi, mode);
+	mutex_lock(&hdmi->mutex);
 
 	/* Store the display mode for plugin/DKMS poweron events */
 	memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
+
+	mutex_unlock(&hdmi->mutex);
 }
 
 static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -1365,14 +1397,20 @@
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
+	mutex_lock(&hdmi->mutex);
+	hdmi->disabled = true;
 	dw_hdmi_poweroff(hdmi);
+	mutex_unlock(&hdmi->mutex);
 }
 
 static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
 {
 	struct dw_hdmi *hdmi = bridge->driver_private;
 
+	mutex_lock(&hdmi->mutex);
 	dw_hdmi_poweron(hdmi);
+	hdmi->disabled = false;
+	mutex_unlock(&hdmi->mutex);
 }
 
 static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
@@ -1405,6 +1443,8 @@
 		dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n",
 			edid->width_cm, edid->height_cm);
 
+		hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+		hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
 		kfree(edid);
@@ -1423,6 +1463,10 @@
 					   struct dw_hdmi, connector);
 	enum drm_mode_status mode_status = MODE_OK;
 
+	/* We don't support double-clocked modes */
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_BAD;
+
 	if (hdmi->plat_data->mode_valid)
 		mode_status = hdmi->plat_data->mode_valid(connector, mode);
 
@@ -1489,21 +1533,21 @@
 	phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
 
 	if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+		hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0);
+		mutex_lock(&hdmi->mutex);
 		if (phy_int_pol & HDMI_PHY_HPD) {
 			dev_dbg(hdmi->dev, "EVENT=plugin\n");
 
-			hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
-
-			dw_hdmi_poweron(hdmi);
+			if (!hdmi->disabled)
+				dw_hdmi_poweron(hdmi);
 		} else {
 			dev_dbg(hdmi->dev, "EVENT=plugout\n");
 
-			hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
-				  HDMI_PHY_POL0);
-
-			dw_hdmi_poweroff(hdmi);
+			if (!hdmi->disabled)
+				dw_hdmi_poweroff(hdmi);
 		}
-		drm_helper_hpd_irq_event(hdmi->connector.dev);
+		mutex_unlock(&hdmi->mutex);
+		drm_helper_hpd_irq_event(hdmi->bridge->dev);
 	}
 
 	hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
@@ -1570,8 +1614,11 @@
 	hdmi->sample_rate = 48000;
 	hdmi->ratio = 100;
 	hdmi->encoder = encoder;
+	hdmi->disabled = true;
 
+	mutex_init(&hdmi->mutex);
 	mutex_init(&hdmi->audio_mutex);
+	spin_lock_init(&hdmi->audio_lock);
 
 	of_property_read_u32(np, "reg-io-width", &val);
 
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.h b/drivers/gpu/drm/bridge/dw_hdmi.h
index 175dbc8..ee7f7ed 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.h
+++ b/drivers/gpu/drm/bridge/dw_hdmi.h
@@ -7,8 +7,8 @@
  * (at your option) any later version.
  */
 
-#ifndef __IMX_HDMI_H__
-#define __IMX_HDMI_H__
+#ifndef __DW_HDMI_H__
+#define __DW_HDMI_H__
 
 /* Identification Registers */
 #define HDMI_DESIGN_ID                          0x0000
@@ -525,7 +525,7 @@
 
 /* I2C Master Registers (E-DDC) */
 #define HDMI_I2CM_SLAVE                         0x7E00
-#define HDMI_I2CMESS                            0x7E01
+#define HDMI_I2CM_ADDRESS                       0x7E01
 #define HDMI_I2CM_DATAO                         0x7E02
 #define HDMI_I2CM_DATAI                         0x7E03
 #define HDMI_I2CM_OPERATION                     0x7E04
@@ -1031,4 +1031,4 @@
 	HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
 };
 
-#endif /* __IMX_HDMI_H__ */
+#endif /* __DW_HDMI_H__ */
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
similarity index 100%
rename from drivers/gpu/drm/bridge/ptn3460.c
rename to drivers/gpu/drm/bridge/nxp-ptn3460.c
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
similarity index 100%
rename from drivers/gpu/drm/bridge/ps8622.c
rename to drivers/gpu/drm/bridge/parade-ps8622.c
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b914003..b1619e2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -92,7 +92,7 @@
 
 	if (cdev->mode_info.gfbdev) {
 		console_lock();
-		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+		drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
 		console_unlock();
 	}
 
@@ -109,7 +109,7 @@
 
 	if (cdev->mode_info.gfbdev) {
 		console_lock();
-		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+		drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
 		console_unlock();
 	}
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 13ddf1c..589103b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -98,7 +98,7 @@
 			 const struct fb_fillrect *rect)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -107,7 +107,7 @@
 			 const struct fb_copyarea *area)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -116,7 +116,7 @@
 			  const struct fb_image *image)
 {
 	struct cirrus_fbdev *afbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -165,12 +165,10 @@
 {
 	struct cirrus_fbdev *gfbdev =
 		container_of(helper, struct cirrus_fbdev, helper);
-	struct drm_device *dev = gfbdev->helper.dev;
 	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	void *sysram;
 	struct drm_gem_object *gobj = NULL;
 	struct cirrus_bo *bo = NULL;
@@ -195,9 +193,9 @@
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = gfbdev;
 
@@ -216,11 +214,9 @@
 
 	/* setup helper */
 	gfbdev->helper.fb = fb;
-	gfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "cirrusdrmfb");
 
-
 	info->flags = FBINFO_DEFAULT;
 	info->fbops = &cirrusfb_ops;
 
@@ -229,11 +225,6 @@
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_iounmap;
-	}
 	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = cdev->mc.vram_size;
 
@@ -246,13 +237,6 @@
 	info->fix.mmio_start = 0;
 	info->fix.mmio_len = 0;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		ret = -ENOMEM;
-		goto out_iounmap;
-	}
-
 	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
 	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
 	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
@@ -260,24 +244,15 @@
 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
 
 	return 0;
-out_iounmap:
-	return ret;
 }
 
 static int cirrus_fbdev_destroy(struct drm_device *dev,
 				struct cirrus_fbdev *gfbdev)
 {
-	struct fb_info *info;
 	struct cirrus_framebuffer *gfb = &gfbdev->gfb;
 
-	if (gfbdev->helper.fbdev) {
-		info = gfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&gfbdev->helper);
+	drm_fb_helper_release_fbi(&gfbdev->helper);
 
 	if (gfb->obj) {
 		drm_gem_object_unreference_unlocked(gfb->obj);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e4b9766..055fd86 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -293,25 +293,18 @@
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct cirrus_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_cirrus_bo(obj);
 	*offset = cirrus_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
 
+	return 0;
 }
 
 bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f6f2fb5..4349154 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -153,9 +153,15 @@
 		if (!connector)
 			continue;
 
-		WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
-		connector->funcs->atomic_destroy_state(connector,
+		/*
+		 * FIXME: Async commits can race with connector unplugging and
+		 * there's currently nothing that prevents cleanup up state for
+		 * deleted connectors. As long as the callback doesn't look at
+		 * the connector we'll be fine though, so make sure that's the
+		 * case by setting all connector pointers to NULL.
+		 */
+		state->connector_states[i]->connector = NULL;
+		connector->funcs->atomic_destroy_state(NULL,
 						       state->connector_states[i]);
 		state->connectors[i] = NULL;
 		state->connector_states[i] = NULL;
@@ -1063,7 +1069,7 @@
 	 * Changed connectors are already in @state, so only need to look at the
 	 * current configuration.
 	 */
-	list_for_each_entry(connector, &config->connector_list, head) {
+	drm_for_each_connector(connector, state->dev) {
 		if (connector->state->crtc != crtc)
 			continue;
 
@@ -1463,24 +1469,18 @@
 
 		if (get_user(obj_id, objs_ptr + copied_objs)) {
 			ret = -EFAULT;
-			goto fail;
+			goto out;
 		}
 
 		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
 		if (!obj || !obj->properties) {
 			ret = -ENOENT;
-			goto fail;
-		}
-
-		if (obj->type == DRM_MODE_OBJECT_PLANE) {
-			plane = obj_to_plane(obj);
-			plane_mask |= (1 << drm_plane_index(plane));
-			plane->old_fb = plane->fb;
+			goto out;
 		}
 
 		if (get_user(count_props, count_props_ptr + copied_objs)) {
 			ret = -EFAULT;
-			goto fail;
+			goto out;
 		}
 
 		copied_objs++;
@@ -1492,28 +1492,34 @@
 
 			if (get_user(prop_id, props_ptr + copied_props)) {
 				ret = -EFAULT;
-				goto fail;
+				goto out;
 			}
 
 			prop = drm_property_find(dev, prop_id);
 			if (!prop) {
 				ret = -ENOENT;
-				goto fail;
+				goto out;
 			}
 
 			if (copy_from_user(&prop_value,
 					   prop_values_ptr + copied_props,
 					   sizeof(prop_value))) {
 				ret = -EFAULT;
-				goto fail;
+				goto out;
 			}
 
 			ret = atomic_set_prop(state, obj, prop, prop_value);
 			if (ret)
-				goto fail;
+				goto out;
 
 			copied_props++;
 		}
+
+		if (obj->type == DRM_MODE_OBJECT_PLANE && count_props) {
+			plane = obj_to_plane(obj);
+			plane_mask |= (1 << drm_plane_index(plane));
+			plane->old_fb = plane->fb;
+		}
 	}
 
 	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1523,7 +1529,7 @@
 			e = create_vblank_event(dev, file_priv, arg->user_data);
 			if (!e) {
 				ret = -ENOMEM;
-				goto fail;
+				goto out;
 			}
 
 			crtc_state->event = e;
@@ -1533,13 +1539,15 @@
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		ret = drm_atomic_check_only(state);
 		/* _check_only() does not free state, unlike _commit() */
-		drm_atomic_state_free(state);
+		if (!ret)
+			drm_atomic_state_free(state);
 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
 		ret = drm_atomic_async_commit(state);
 	} else {
 		ret = drm_atomic_commit(state);
 	}
 
+out:
 	/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
 	 * locks (ie. while it is still safe to deref plane->state).  We
 	 * need to do this here because the driver entry points cannot
@@ -1552,41 +1560,35 @@
 				drm_framebuffer_reference(new_fb);
 			plane->fb = new_fb;
 			plane->crtc = plane->state->crtc;
-		} else {
-			plane->old_fb = NULL;
+
+			if (plane->old_fb)
+				drm_framebuffer_unreference(plane->old_fb);
 		}
-		if (plane->old_fb) {
-			drm_framebuffer_unreference(plane->old_fb);
-			plane->old_fb = NULL;
+		plane->old_fb = NULL;
+	}
+
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+
+	if (ret) {
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			for_each_crtc_in_state(state, crtc, crtc_state, i) {
+				if (!crtc_state->event)
+					continue;
+
+				destroy_vblank_event(dev, file_priv,
+						     crtc_state->event);
+			}
 		}
+
+		drm_atomic_state_free(state);
 	}
 
 	drm_modeset_drop_locks(&ctx);
 	drm_modeset_acquire_fini(&ctx);
 
 	return ret;
-
-fail:
-	if (ret == -EDEADLK)
-		goto backoff;
-
-	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			destroy_vblank_event(dev, file_priv, crtc_state->event);
-			crtc_state->event = NULL;
-		}
-	}
-
-	drm_atomic_state_free(state);
-
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-
-	return ret;
-
-backoff:
-	drm_atomic_state_clear(state);
-	drm_modeset_backoff(&ctx);
-
-	goto retry;
 }
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 3e65daa3..aecb5d6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -89,7 +89,7 @@
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(connector, &config->connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (connector->state->best_encoder != encoder)
 			continue;
 
@@ -124,7 +124,7 @@
 	if (IS_ERR(crtc_state))
 		return PTR_ERR(crtc_state);
 
-	crtc_state->mode_changed = true;
+	crtc_state->connectors_changed = true;
 
 	list_for_each_entry(connector, &config->connector_list, head) {
 		if (connector->state->best_encoder != encoder)
@@ -174,14 +174,14 @@
 			idx = drm_crtc_index(connector->state->crtc);
 
 			crtc_state = state->crtc_states[idx];
-			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 
 		if (connector_state->crtc) {
 			idx = drm_crtc_index(connector_state->crtc);
 
 			crtc_state = state->crtc_states[idx];
-			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 	}
 
@@ -241,7 +241,7 @@
 	idx = drm_crtc_index(connector_state->crtc);
 
 	crtc_state = state->crtc_states[idx];
-	crtc_state->mode_changed = true;
+	crtc_state->connectors_changed = true;
 
 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
 			 connector->base.id,
@@ -264,7 +264,8 @@
 	bool ret;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!crtc_state->mode_changed)
+		if (!crtc_state->mode_changed &&
+		    !crtc_state->connectors_changed)
 			continue;
 
 		drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
@@ -306,7 +307,7 @@
 						 encoder->base.id, encoder->name);
 				return ret;
 			}
-		} else {
+		} else if (funcs->mode_fixup) {
 			ret = funcs->mode_fixup(encoder, &crtc_state->mode,
 						&crtc_state->adjusted_mode);
 			if (!ret) {
@@ -320,7 +321,8 @@
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
 		const struct drm_crtc_helper_funcs *funcs;
 
-		if (!crtc_state->mode_changed)
+		if (!crtc_state->mode_changed &&
+		    !crtc_state->connectors_changed)
 			continue;
 
 		funcs = crtc->helper_private;
@@ -346,9 +348,14 @@
  *
  * Check the state object to see if the requested state is physically possible.
  * This does all the crtc and connector related computations for an atomic
- * update. It computes and updates crtc_state->mode_changed, adds any additional
- * connectors needed for full modesets and calls down into ->mode_fixup
- * functions of the driver backend.
+ * update and adds any additional connectors needed for full modesets and calls
+ * down into ->mode_fixup functions of the driver backend.
+ *
+ * crtc_state->mode_changed is set when the input mode is changed.
+ * crtc_state->connectors_changed is set when a connector is added or
+ * removed from the crtc.
+ * crtc_state->active_changed is set when crtc_state->active changes,
+ * which is used for dpms.
  *
  * IMPORTANT:
  *
@@ -381,7 +388,17 @@
 		if (crtc->state->enable != crtc_state->enable) {
 			DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
 					 crtc->base.id);
+
+			/*
+			 * For clarity this assignment is done here, but
+			 * enable == 0 is only true when there are no
+			 * connectors and a NULL mode.
+			 *
+			 * The other way around is true as well. enable != 0
+			 * iff connectors are attached and a mode is set.
+			 */
 			crtc_state->mode_changed = true;
+			crtc_state->connectors_changed = true;
 		}
 	}
 
@@ -456,6 +473,9 @@
  * This does all the plane update related checks using by calling into the
  * ->atomic_check hooks provided by the driver.
  *
+ * It also sets crtc_state->planes_changed to indicate that a crtc has
+ * updated planes.
+ *
  * RETURNS
  * Zero for success or -errno
  */
@@ -648,15 +668,29 @@
 	struct drm_crtc_state *old_crtc_state;
 	int i;
 
-	/* clear out existing links */
+	/* clear out existing links and update dpms */
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
-		if (!connector->encoder)
-			continue;
+		if (connector->encoder) {
+			WARN_ON(!connector->encoder->crtc);
 
-		WARN_ON(!connector->encoder->crtc);
+			connector->encoder->crtc = NULL;
+			connector->encoder = NULL;
+		}
 
-		connector->encoder->crtc = NULL;
-		connector->encoder = NULL;
+		crtc = connector->state->crtc;
+		if ((!crtc && old_conn_state->crtc) ||
+		    (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
+			struct drm_property *dpms_prop =
+				dev->mode_config.dpms_property;
+			int mode = DRM_MODE_DPMS_OFF;
+
+			if (crtc && crtc->state->active)
+				mode = DRM_MODE_DPMS_ON;
+
+			connector->dpms = mode;
+			drm_object_property_set_value(&connector->base,
+						      dpms_prop, mode);
+		}
 	}
 
 	/* set new links */
@@ -673,10 +707,16 @@
 
 	/* set legacy state in the crtc structure */
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct drm_plane *primary = crtc->primary;
+
 		crtc->mode = crtc->state->mode;
 		crtc->enabled = crtc->state->enable;
-		crtc->x = crtc->primary->state->src_x >> 16;
-		crtc->y = crtc->primary->state->src_y >> 16;
+
+		if (drm_atomic_get_existing_plane_state(old_state, primary) &&
+		    primary->state->crtc == crtc) {
+			crtc->x = primary->state->src_x >> 16;
+			crtc->y = primary->state->src_y >> 16;
+		}
 
 		if (crtc->state->enable)
 			drm_calc_timestamping_constants(crtc,
@@ -926,7 +966,7 @@
 			continue;
 
 		old_crtc_state->enable = true;
-		old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
+		old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc);
 	}
 
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -935,7 +975,7 @@
 
 		ret = wait_event_timeout(dev->vblank[i].queue,
 				old_crtc_state->last_vblank_count !=
-					drm_vblank_count(dev, i),
+					drm_crtc_vblank_count(crtc),
 				msecs_to_jiffies(50));
 
 		drm_crtc_vblank_put(crtc);
@@ -1146,7 +1186,7 @@
 		if (!funcs || !funcs->atomic_begin)
 			continue;
 
-		funcs->atomic_begin(crtc);
+		funcs->atomic_begin(crtc, old_crtc_state);
 	}
 
 	for_each_plane_in_state(old_state, plane, old_plane_state, i) {
@@ -1176,7 +1216,7 @@
 		if (!funcs || !funcs->atomic_flush)
 			continue;
 
-		funcs->atomic_flush(crtc);
+		funcs->atomic_flush(crtc, old_crtc_state);
 	}
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@@ -1212,7 +1252,7 @@
 
 	crtc_funcs = crtc->helper_private;
 	if (crtc_funcs && crtc_funcs->atomic_begin)
-		crtc_funcs->atomic_begin(crtc);
+		crtc_funcs->atomic_begin(crtc, old_crtc_state);
 
 	drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
 		struct drm_plane_state *old_plane_state =
@@ -1235,7 +1275,7 @@
 	}
 
 	if (crtc_funcs && crtc_funcs->atomic_flush)
-		crtc_funcs->atomic_flush(crtc);
+		crtc_funcs->atomic_flush(crtc, old_crtc_state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
 
@@ -1923,10 +1963,6 @@
 	if (ret != 0)
 		goto fail;
 
-	/* TODO: ->page_flip is the only driver callback where the core
-	 * doesn't update plane->fb. For now patch it up here. */
-	plane->fb = plane->state->fb;
-
 	/* Driver takes ownership of state on successful async commit. */
 	return 0;
 fail:
@@ -1960,9 +1996,12 @@
  * implementing the legacy DPMS connector interface. It computes the new desired
  * ->active state for the corresponding CRTC (if the connector is enabled) and
  *  updates it.
+ *
+ * Returns:
+ * Returns 0 on success, negative errno numbers on failure.
  */
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-				      int mode)
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+				     int mode)
 {
 	struct drm_mode_config *config = &connector->dev->mode_config;
 	struct drm_atomic_state *state;
@@ -1971,6 +2010,7 @@
 	struct drm_connector *tmp_connector;
 	int ret;
 	bool active = false;
+	int old_mode = connector->dpms;
 
 	if (mode != DRM_MODE_DPMS_ON)
 		mode = DRM_MODE_DPMS_OFF;
@@ -1979,22 +2019,23 @@
 	crtc = connector->state->crtc;
 
 	if (!crtc)
-		return;
+		return 0;
 
-	/* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
 	state = drm_atomic_state_alloc(connector->dev);
 	if (!state)
-		return;
+		return -ENOMEM;
 
 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
-	if (IS_ERR(crtc_state))
-		return;
+	if (IS_ERR(crtc_state)) {
+		ret = PTR_ERR(crtc_state);
+		goto fail;
+	}
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(tmp_connector, &config->connector_list, head) {
+	drm_for_each_connector(tmp_connector, connector->dev) {
 		if (tmp_connector->state->crtc != crtc)
 			continue;
 
@@ -2009,17 +2050,16 @@
 	if (ret != 0)
 		goto fail;
 
-	/* Driver takes ownership of state on successful async commit. */
-	return;
+	/* Driver takes ownership of state on successful commit. */
+	return 0;
 fail:
 	if (ret == -EDEADLK)
 		goto backoff;
 
+	connector->dpms = old_mode;
 	drm_atomic_state_free(state);
 
-	WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
-
-	return;
+	return ret;
 backoff:
 	drm_atomic_state_clear(state);
 	drm_atomic_legacy_backoff(state);
@@ -2080,6 +2120,7 @@
 	state->mode_changed = false;
 	state->active_changed = false;
 	state->planes_changed = false;
+	state->connectors_changed = false;
 	state->event = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 9b23525..192a5f9 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -53,6 +53,10 @@
  */
 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_remove(&dev->ctx_idr, ctx_handle);
 	mutex_unlock(&dev->struct_mutex);
@@ -85,10 +89,13 @@
  *
  * Initialise the drm_device::ctx_idr
  */
-int drm_legacy_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	idr_init(&dev->ctx_idr);
-	return 0;
 }
 
 /**
@@ -101,6 +108,10 @@
  */
 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
 {
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->struct_mutex);
 	idr_destroy(&dev->ctx_idr);
 	mutex_unlock(&dev->struct_mutex);
@@ -119,6 +130,10 @@
 {
 	struct drm_ctx_list *pos, *tmp;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
 	mutex_lock(&dev->ctxlist_mutex);
 
 	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
@@ -161,6 +176,10 @@
 	struct drm_local_map *map;
 	struct drm_map_list *_entry;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 
 	map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -205,6 +224,10 @@
 	struct drm_local_map *map = NULL;
 	struct drm_map_list *r_list = NULL;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(r_list, &dev->maplist, head) {
 		if (r_list->map
@@ -305,6 +328,10 @@
 	struct drm_ctx ctx;
 	int i;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (res->count >= DRM_RESERVED_CONTEXTS) {
 		memset(&ctx, 0, sizeof(ctx));
 		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -335,6 +362,10 @@
 	struct drm_ctx_list *ctx_entry;
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	ctx->handle = drm_legacy_ctxbitmap_next(dev);
 	if (ctx->handle == DRM_KERNEL_CONTEXT) {
 		/* Skip kernel's context and get a new one. */
@@ -378,6 +409,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	/* This is 0, because we don't handle any context flags */
 	ctx->flags = 0;
 
@@ -400,6 +435,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	return drm_context_switch(dev, dev->last_context, ctx->handle);
 }
@@ -420,6 +459,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	drm_context_switch_complete(dev, file_priv, ctx->handle);
 
@@ -442,6 +485,10 @@
 {
 	struct drm_ctx *ctx = data;
 
+	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+	    drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	DRM_DEBUG("%d\n", ctx->handle);
 	if (ctx->handle != DRM_KERNEL_CONTEXT) {
 		if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fed7483..33d877c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -615,7 +615,7 @@
 	if (atomic_read(&fb->refcount.refcount) > 1) {
 		drm_modeset_lock_all(dev);
 		/* remove from any CRTC */
-		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		drm_for_each_crtc(crtc, dev) {
 			if (crtc->primary->fb == fb) {
 				/* should turn off the crtc */
 				memset(&set, 0, sizeof(struct drm_mode_set));
@@ -627,7 +627,7 @@
 			}
 		}
 
-		list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		drm_for_each_plane(plane, dev) {
 			if (plane->fb == fb)
 				drm_plane_force_disable(plane);
 		}
@@ -736,7 +736,7 @@
 	unsigned int index = 0;
 	struct drm_crtc *tmp;
 
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, crtc->dev) {
 		if (tmp == crtc)
 			return index;
 
@@ -988,7 +988,7 @@
 
 	WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-	list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+	drm_for_each_connector(tmp, connector->dev) {
 		if (tmp == connector)
 			return index;
 
@@ -1054,7 +1054,7 @@
 {
 	struct drm_connector *connector;
 
-	/* taking the mode config mutex ends up in a clash with sysfs */
+	/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
 		drm_connector_unregister(connector);
 
@@ -1151,7 +1151,7 @@
 int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 			     unsigned long possible_crtcs,
 			     const struct drm_plane_funcs *funcs,
-			     const uint32_t *formats, uint32_t format_count,
+			     const uint32_t *formats, unsigned int format_count,
 			     enum drm_plane_type type)
 {
 	struct drm_mode_config *config = &dev->mode_config;
@@ -1225,7 +1225,7 @@
 int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
 		   unsigned long possible_crtcs,
 		   const struct drm_plane_funcs *funcs,
-		   const uint32_t *formats, uint32_t format_count,
+		   const uint32_t *formats, unsigned int format_count,
 		   bool is_primary)
 {
 	enum drm_plane_type type;
@@ -1280,7 +1280,7 @@
 	unsigned int index = 0;
 	struct drm_plane *tmp;
 
-	list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+	drm_for_each_plane(tmp, plane->dev) {
 		if (tmp == plane)
 			return index;
 
@@ -1305,7 +1305,7 @@
 	struct drm_plane *plane;
 	unsigned int i = 0;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		if (i == idx)
 			return plane;
 		i++;
@@ -1679,70 +1679,6 @@
 }
 EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
-static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
-{
-	uint32_t total_objects = 0;
-
-	total_objects += dev->mode_config.num_crtc;
-	total_objects += dev->mode_config.num_connector;
-	total_objects += dev->mode_config.num_encoder;
-
-	group->id_list = kcalloc(total_objects, sizeof(uint32_t), GFP_KERNEL);
-	if (!group->id_list)
-		return -ENOMEM;
-
-	group->num_crtcs = 0;
-	group->num_connectors = 0;
-	group->num_encoders = 0;
-	return 0;
-}
-
-void drm_mode_group_destroy(struct drm_mode_group *group)
-{
-	kfree(group->id_list);
-	group->id_list = NULL;
-}
-
-/*
- * NOTE: Driver's shouldn't ever call drm_mode_group_init_legacy_group - it is
- * the drm core's responsibility to set up mode control groups.
- */
-int drm_mode_group_init_legacy_group(struct drm_device *dev,
-				     struct drm_mode_group *group)
-{
-	struct drm_crtc *crtc;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	int ret;
-
-	ret = drm_mode_group_init(dev, group);
-	if (ret)
-		return ret;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-		group->id_list[group->num_crtcs++] = crtc->base.id;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		group->id_list[group->num_crtcs + group->num_encoders++] =
-		encoder->base.id;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-		group->id_list[group->num_crtcs + group->num_encoders +
-			       group->num_connectors++] = connector->base.id;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
-
-void drm_reinit_primary_mode_group(struct drm_device *dev)
-{
-	drm_modeset_lock_all(dev);
-	drm_mode_group_destroy(&dev->primary->mode_group);
-	drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
-	drm_modeset_unlock_all(dev);
-}
-EXPORT_SYMBOL(drm_reinit_primary_mode_group);
-
 /**
  * drm_mode_getresources - get graphics configuration
  * @dev: drm device for the ioctl
@@ -1771,12 +1707,11 @@
 	int crtc_count = 0;
 	int fb_count = 0;
 	int encoder_count = 0;
-	int copied = 0, i;
+	int copied = 0;
 	uint32_t __user *fb_id;
 	uint32_t __user *crtc_id;
 	uint32_t __user *connector_id;
 	uint32_t __user *encoder_id;
-	struct drm_mode_group *mode_group;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -1809,24 +1744,14 @@
 	/* mode_config.mutex protects the connector list against e.g. DP MST
 	 * connector hot-adding. CRTC/Plane lists are invariant. */
 	mutex_lock(&dev->mode_config.mutex);
-	if (!drm_is_primary_client(file_priv)) {
+	drm_for_each_crtc(crtc, dev)
+		crtc_count++;
 
-		mode_group = NULL;
-		list_for_each(lh, &dev->mode_config.crtc_list)
-			crtc_count++;
+	drm_for_each_connector(connector, dev)
+		connector_count++;
 
-		list_for_each(lh, &dev->mode_config.connector_list)
-			connector_count++;
-
-		list_for_each(lh, &dev->mode_config.encoder_list)
-			encoder_count++;
-	} else {
-
-		mode_group = &file_priv->master->minor->mode_group;
-		crtc_count = mode_group->num_crtcs;
-		connector_count = mode_group->num_connectors;
-		encoder_count = mode_group->num_encoders;
-	}
+	drm_for_each_encoder(encoder, dev)
+		encoder_count++;
 
 	card_res->max_height = dev->mode_config.max_height;
 	card_res->min_height = dev->mode_config.min_height;
@@ -1837,25 +1762,13 @@
 	if (card_res->count_crtcs >= crtc_count) {
 		copied = 0;
 		crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-					    head) {
-				DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
-				if (put_user(crtc->base.id, crtc_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_crtc(crtc, dev) {
+			DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+			if (put_user(crtc->base.id, crtc_id + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-		} else {
-			for (i = 0; i < mode_group->num_crtcs; i++) {
-				if (put_user(mode_group->id_list[i],
-					     crtc_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
+			copied++;
 		}
 	}
 	card_res->count_crtcs = crtc_count;
@@ -1864,29 +1777,15 @@
 	if (card_res->count_encoders >= encoder_count) {
 		copied = 0;
 		encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(encoder,
-					    &dev->mode_config.encoder_list,
-					    head) {
-				DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
-						encoder->name);
-				if (put_user(encoder->base.id, encoder_id +
-					     copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_encoder(encoder, dev) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+					encoder->name);
+			if (put_user(encoder->base.id, encoder_id +
+				     copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-		} else {
-			for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
-				if (put_user(mode_group->id_list[i],
-					     encoder_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
-
+			copied++;
 		}
 	}
 	card_res->count_encoders = encoder_count;
@@ -1895,31 +1794,16 @@
 	if (card_res->count_connectors >= connector_count) {
 		copied = 0;
 		connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
-		if (!mode_group) {
-			list_for_each_entry(connector,
-					    &dev->mode_config.connector_list,
-					    head) {
-				DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-					connector->base.id,
-					connector->name);
-				if (put_user(connector->base.id,
-					     connector_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
+		drm_for_each_connector(connector, dev) {
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+				connector->base.id,
+				connector->name);
+			if (put_user(connector->base.id,
+				     connector_id + copied)) {
+				ret = -EFAULT;
+				goto out;
 			}
-		} else {
-			int start = mode_group->num_crtcs +
-				mode_group->num_encoders;
-			for (i = start; i < start + mode_group->num_connectors; i++) {
-				if (put_user(mode_group->id_list[i],
-					     connector_id + copied)) {
-					ret = -EFAULT;
-					goto out;
-				}
-				copied++;
-			}
+			copied++;
 		}
 	}
 	card_res->count_connectors = connector_count;
@@ -2187,7 +2071,7 @@
 
 	/* For atomic drivers only state objects are synchronously updated and
 	 * protected by modeset locks, so check those first. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (!connector->state)
 			continue;
 
@@ -2291,7 +2175,7 @@
 		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
 		/* Plane lists are invariant, no locking needed. */
-		list_for_each_entry(plane, &config->plane_list, head) {
+		drm_for_each_plane(plane, dev) {
 			/*
 			 * Unless userspace set the 'universal planes'
 			 * capability bit, only advertise overlays.
@@ -2596,7 +2480,7 @@
 	 * connectors from it), hence we need to refcount the fbs across all
 	 * crtcs. Atomic modeset will have saner semantics ...
 	 */
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(tmp, crtc->dev)
 		tmp->primary->old_fb = tmp->primary->fb;
 
 	fb = set->fb;
@@ -2607,7 +2491,7 @@
 		crtc->primary->fb = fb;
 	}
 
-	list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, crtc->dev) {
 		if (tmp->primary->fb)
 			drm_framebuffer_reference(tmp->primary->fb);
 		if (tmp->primary->old_fb)
@@ -4301,7 +4185,6 @@
 		mutex_unlock(&dev->mode_config.blob_lock);
 	else
 		might_lock(&dev->mode_config.blob_lock);
-
 }
 EXPORT_SYMBOL(drm_property_unreference_blob);
 
@@ -4472,9 +4355,7 @@
 			goto err_created;
 	}
 
-	if (old_blob)
-		drm_property_unreference_blob(old_blob);
-
+	drm_property_unreference_blob(old_blob);
 	*replace = new_blob;
 
 	return 0;
@@ -4872,9 +4753,9 @@
 
 	/* Do DPMS ourselves */
 	if (property == connector->dev->mode_config.dpms_property) {
-		if (connector->funcs->dpms)
-			(*connector->funcs->dpms)(connector, (int)value);
 		ret = 0;
+		if (connector->funcs->dpms)
+			ret = (*connector->funcs->dpms)(connector, (int)value);
 	} else if (connector->funcs->set_property)
 		ret = connector->funcs->set_property(connector, property, value);
 
@@ -5349,13 +5230,7 @@
 		/* Keep the old fb, don't unref it. */
 		crtc->primary->old_fb = NULL;
 	} else {
-		/*
-		 * Warn if the driver hasn't properly updated the crtc->fb
-		 * field to reflect that the new framebuffer is now used.
-		 * Failing to do so will screw with the reference counting
-		 * on framebuffers.
-		 */
-		WARN_ON(crtc->primary->fb != fb);
+		crtc->primary->fb = fb;
 		/* Unref only the old framebuffer. */
 		fb = NULL;
 	}
@@ -5386,21 +5261,23 @@
 	struct drm_encoder *encoder;
 	struct drm_connector *connector;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+	drm_for_each_plane(plane, dev)
 		if (plane->funcs->reset)
 			plane->funcs->reset(plane);
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(crtc, dev)
 		if (crtc->funcs->reset)
 			crtc->funcs->reset(crtc);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+	drm_for_each_encoder(encoder, dev)
 		if (encoder->funcs->reset)
 			encoder->funcs->reset(encoder);
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev)
 		if (connector->funcs->reset)
 			connector->funcs->reset(connector);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 393114d..ef53475 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,7 +121,7 @@
 		WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 	}
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder)
 			return true;
 	return false;
@@ -151,7 +151,7 @@
 	if (!oops_in_progress)
 		WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+	drm_for_each_encoder(encoder, dev)
 		if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
 			return true;
 	return false;
@@ -180,7 +180,7 @@
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		if (!drm_helper_encoder_in_use(encoder)) {
 			drm_encoder_disable(encoder);
 			/* disconnect encoder from any connector */
@@ -188,7 +188,7 @@
 		}
 	}
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 		crtc->enabled = drm_helper_crtc_in_use(crtc);
 		if (!crtc->enabled) {
@@ -230,7 +230,7 @@
 	const struct drm_encoder_helper_funcs *encoder_funcs;
 	struct drm_encoder *encoder;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		encoder_funcs = encoder->helper_private;
 		/* Disable unused encoders */
 		if (encoder->crtc == NULL)
@@ -305,7 +305,7 @@
 	 * adjust it according to limitations or connector properties, and also
 	 * a chance to reject the mode entirely.
 	 */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -334,7 +334,7 @@
 	crtc->hwmode = *adjusted_mode;
 
 	/* Prepare the encoders and CRTCs before setting the mode. */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -359,7 +359,7 @@
 	if (!ret)
 	    goto done;
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -376,7 +376,7 @@
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	crtc_funcs->commit(crtc);
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 
 		if (encoder->crtc != crtc)
 			continue;
@@ -418,11 +418,11 @@
 	struct drm_encoder *encoder;
 
 	/* Decouple all encoders and their attached connectors from this crtc */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		if (encoder->crtc != crtc)
 			continue;
 
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_for_each_connector(connector, dev) {
 			if (connector->encoder != encoder)
 				continue;
 
@@ -519,12 +519,12 @@
 	 * restored, not the drivers personal bookkeeping.
 	 */
 	count = 0;
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		save_encoders[count++] = *encoder;
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		save_connectors[count++] = *connector;
 	}
 
@@ -562,7 +562,7 @@
 
 	/* a) traverse passed in connector list and get encoders for them */
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		const struct drm_connector_helper_funcs *connector_funcs =
 			connector->helper_private;
 		new_encoder = connector->encoder;
@@ -602,7 +602,7 @@
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		if (!connector->encoder)
 			continue;
 
@@ -685,12 +685,12 @@
 fail:
 	/* Restore all previous data. */
 	count = 0;
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+	drm_for_each_encoder(encoder, dev) {
 		*encoder = save_encoders[count++];
 	}
 
 	count = 0;
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 		*connector = save_connectors[count++];
 	}
 
@@ -712,7 +712,7 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder)
 			if (connector->dpms < dpms)
 				dpms = connector->dpms;
@@ -746,7 +746,7 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = crtc->dev;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder && connector->encoder->crtc == crtc)
 			if (connector->dpms < dpms)
 				dpms = connector->dpms;
@@ -762,15 +762,18 @@
  * implementing the DPMS connector attribute. It computes the new desired DPMS
  * state for all encoders and crtcs in the output mesh and calls the ->dpms()
  * callback provided by the driver appropriately.
+ *
+ * Returns:
+ * Always returns 0.
  */
-void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
 	struct drm_encoder *encoder = connector->encoder;
 	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
 	int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
 
 	if (mode == connector->dpms)
-		return;
+		return 0;
 
 	old_dpms = connector->dpms;
 	connector->dpms = mode;
@@ -802,7 +805,7 @@
 		}
 	}
 
-	return;
+	return 0;
 }
 EXPORT_SYMBOL(drm_helper_connector_dpms);
 
@@ -862,7 +865,7 @@
 	bool ret;
 
 	drm_modeset_lock_all(dev);
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 
 		if (!crtc->enabled)
 			continue;
@@ -876,7 +879,7 @@
 
 		/* Turn off outputs that were already powered off */
 		if (drm_helper_choose_crtc_dpms(crtc)) {
-			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			drm_for_each_encoder(encoder, dev) {
 
 				if(encoder->crtc != crtc)
 					continue;
@@ -928,15 +931,15 @@
 	if (crtc->funcs->atomic_duplicate_state)
 		crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
 	else {
-		crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-		if (!crtc_state)
-			return -ENOMEM;
-		if (crtc->state)
-			__drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
-		else
-			crtc_state->crtc = crtc;
+		if (!crtc->state)
+			drm_atomic_helper_crtc_reset(crtc);
+
+		crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
 	}
 
+	if (!crtc_state)
+		return -ENOMEM;
+
 	crtc_state->planes_changed = true;
 	crtc_state->mode_changed = true;
 	ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -957,11 +960,11 @@
 	ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
 
 out:
-	if (crtc->funcs->atomic_destroy_state)
-		crtc->funcs->atomic_destroy_state(crtc, crtc_state);
-	else {
-		__drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
-		kfree(crtc_state);
+	if (crtc_state) {
+		if (crtc->funcs->atomic_destroy_state)
+			crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+		else
+			drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index eb603f1de..e23df5f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2632,6 +2632,16 @@
 			seq_printf(m, "%02x ", buf[i]);
 		seq_printf(m, "\n");
 
+		/* dump the standard OUI branch header */
+		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+		seq_printf(m, "branch oui: ");
+		for (i = 0; i < 0x3; i++)
+			seq_printf(m, "%02x", buf[i]);
+		seq_printf(m, " devid: ");
+		for (i = 0x3; i < 0x8; i++)
+			seq_printf(m, "%c", buf[i]);
+		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
+		seq_printf(m, "\n");
 		bret = dump_dp_payload_table(mgr, buf);
 		if (bret == true) {
 			seq_printf(m, "payload table: ");
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b7bf4ce..53d09a1 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -285,7 +285,6 @@
 	if (!minor)
 		return;
 
-	drm_mode_group_destroy(&minor->mode_group);
 	put_device(minor->kdev);
 
 	spin_lock_irqsave(&drm_minor_lock, flags);
@@ -582,11 +581,7 @@
 	if (drm_ht_create(&dev->map_hash, 12))
 		goto err_minors;
 
-	ret = drm_legacy_ctxbitmap_init(dev);
-	if (ret) {
-		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-		goto err_ht;
-	}
+	drm_legacy_ctxbitmap_init(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
 		ret = drm_gem_init(dev);
@@ -600,7 +595,6 @@
 
 err_ctxbitmap:
 	drm_legacy_ctxbitmap_cleanup(dev);
-err_ht:
 	drm_ht_remove(&dev->map_hash);
 err_minors:
 	drm_minor_free(dev, DRM_MINOR_LEGACY);
@@ -705,20 +699,9 @@
 			goto err_minors;
 	}
 
-	/* setup grouping for legacy outputs */
-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = drm_mode_group_init_legacy_group(dev,
-				&dev->primary->mode_group);
-		if (ret)
-			goto err_unload;
-	}
-
 	ret = 0;
 	goto out_unlock;
 
-err_unload:
-	if (dev->driver->unload)
-		dev->driver->unload(dev);
 err_minors:
 	drm_minor_unregister(dev, DRM_MINOR_LEGACY);
 	drm_minor_unregister(dev, DRM_MINOR_RENDER);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7087da3..05bb731 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3413,7 +3413,7 @@
 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev)
 		if (connector->encoder == encoder && connector->eld[0])
 			return connector;
 
@@ -3802,7 +3802,7 @@
 	struct drm_display_mode *mode;
 	struct drm_device *dev = connector->dev;
 
-	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+	count = ARRAY_SIZE(drm_dmt_modes);
 	if (hdisplay < 0)
 		hdisplay = 0;
 	if (vdisplay < 0)
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5c1aca4..c19a625 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -209,23 +209,11 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_framebuffer *fb;
-	int ret;
 
-	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
-	if (ret)
-		return ret;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret) {
-		mutex_unlock(&dev->mode_config.mutex);
-		return ret;
-	}
-
-	list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+	mutex_lock(&dev->mode_config.fb_lock);
+	drm_for_each_fb(fb, dev)
 		drm_fb_cma_describe(fb, m);
-
-	mutex_unlock(&dev->struct_mutex);
-	mutex_unlock(&dev->mode_config.mutex);
+	mutex_unlock(&dev->mode_config.fb_lock);
 
 	return 0;
 }
@@ -234,9 +222,9 @@
 
 static struct fb_ops drm_fbdev_cma_ops = {
 	.owner		= THIS_MODULE,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
+	.fb_fillrect	= drm_fb_helper_sys_fillrect,
+	.fb_copyarea	= drm_fb_helper_sys_copyarea,
+	.fb_imageblit	= drm_fb_helper_sys_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -275,10 +263,9 @@
 	if (IS_ERR(obj))
 		return -ENOMEM;
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
-		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
-		ret = -ENOMEM;
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		ret = PTR_ERR(fbi);
 		goto err_drm_gem_cma_free_object;
 	}
 
@@ -286,23 +273,16 @@
 	if (IS_ERR(fbdev_cma->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(fbdev_cma->fb);
-		goto err_framebuffer_release;
+		goto err_fb_info_destroy;
 	}
 
 	fb = &fbdev_cma->fb->fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->fbops = &drm_fbdev_cma_ops;
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		dev_err(dev->dev, "Failed to allocate color map.\n");
-		goto err_drm_fb_cma_destroy;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -317,11 +297,8 @@
 
 	return 0;
 
-err_drm_fb_cma_destroy:
-	drm_framebuffer_unregister_private(fb);
-	drm_fb_cma_destroy(fb);
-err_framebuffer_release:
-	framebuffer_release(fbi);
+err_fb_info_destroy:
+	drm_fb_helper_release_fbi(helper);
 err_drm_gem_cma_free_object:
 	drm_gem_cma_free_object(&obj->base);
 	return ret;
@@ -397,20 +374,8 @@
  */
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
-	if (fbdev_cma->fb_helper.fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = fbdev_cma->fb_helper.fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+	drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
 
 	if (fbdev_cma->fb) {
 		drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index cac4229..418d299 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -56,8 +56,8 @@
  * Teardown is done with drm_fb_helper_fini().
  *
  * At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
- * should also notify the fb helper code from updates to the output
+ * drm_fb_helper_restore_fbdev_mode_unlocked() from their ->lastclose callback.
+ * They should also notify the fb helper code from updates to the output
  * configuration by calling drm_fb_helper_hotplug_event(). For easier
  * integration with the output polling code in drm_crtc_helper.c the modeset
  * code provides a ->output_poll_changed callback.
@@ -89,8 +89,9 @@
  * connectors to the fbdev, e.g. if some are reserved for special purposes or
  * not adequate to be used for the fbcon.
  *
- * Since this is part of the initial setup before the fbdev is published, no
- * locking is required.
+ * This function is protected against concurrent connector hotadds/removals
+ * using drm_fb_helper_add_one_connector() and
+ * drm_fb_helper_remove_one_connector().
  */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -98,7 +99,8 @@
 	struct drm_connector *connector;
 	int i;
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev) {
 		struct drm_fb_helper_connector *fb_helper_connector;
 
 		fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
@@ -108,6 +110,7 @@
 		fb_helper_connector->connector = connector;
 		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
 	}
+	mutex_unlock(&dev->mode_config.mutex);
 	return 0;
 fail:
 	for (i = 0; i < fb_helper->connector_count; i++) {
@@ -115,6 +118,8 @@
 		fb_helper->connector_info[i] = NULL;
 	}
 	fb_helper->connector_count = 0;
+	mutex_unlock(&dev->mode_config.mutex);
+
 	return -ENOMEM;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@@ -163,11 +168,14 @@
 	}
 	set->num_connectors--;
 
-	/* because i915 is pissy about this..
+	/*
 	 * TODO maybe need to makes sure we set it back to !=NULL somewhere?
 	 */
-	if (set->num_connectors == 0)
+	if (set->num_connectors == 0) {
 		set->fb = NULL;
+		drm_mode_destroy(connector->dev, set->mode);
+		set->mode = NULL;
+	}
 }
 
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
@@ -269,7 +277,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_crtc *c;
 
-	list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(c, dev) {
 		if (crtc->base.id == c->base.id)
 			return c->primary->fb;
 	}
@@ -321,7 +329,7 @@
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		if (plane->type != DRM_PLANE_TYPE_PRIMARY)
 			drm_plane_force_disable(plane);
 
@@ -349,21 +357,6 @@
 	}
 	return error;
 }
-/**
- * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
- * @fb_helper: fbcon to restore
- *
- * This should be called from driver's drm ->lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * Use this variant if you need to bypass locking (panic), or already
- * hold all modeset locks.  Otherwise use drm_fb_helper_restore_fbdev_mode_unlocked()
- */
-static bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
-{
-	return restore_fbdev_mode(fb_helper);
-}
 
 /**
  * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
@@ -393,6 +386,31 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_crtc *crtc;
+	int bound = 0, crtcs_bound = 0;
+
+	/* Sometimes user space wants everything disabled, so don't steal the
+	 * display if there's a master. */
+	if (dev->primary->master)
+		return false;
+
+	drm_for_each_crtc(crtc, dev) {
+		if (crtc->primary->fb)
+			crtcs_bound++;
+		if (crtc->primary->fb == fb_helper->fb)
+			bound++;
+	}
+
+	if (bound < crtcs_bound)
+		return false;
+
+	return true;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
 /*
  * restore fbcon display for all kms driver's using this helper, used for sysrq
  * and panic handling.
@@ -411,67 +429,15 @@
 		if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 			continue;
 
-		/*
-		 * NOTE: Use trylock mode to avoid deadlocks and sleeping in
-		 * panic context.
-		 */
-		if (__drm_modeset_lock_all(dev, true) != 0) {
-			error = true;
-			continue;
-		}
-
-		ret = drm_fb_helper_restore_fbdev_mode(helper);
+		drm_modeset_lock_all(dev);
+		ret = restore_fbdev_mode(helper);
 		if (ret)
 			error = true;
-
 		drm_modeset_unlock_all(dev);
 	}
 	return error;
 }
 
-static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
-			void *panic_str)
-{
-	/*
-	 * It's a waste of time and effort to switch back to text console
-	 * if the kernel should reboot before panic messages can be seen.
-	 */
-	if (panic_timeout < 0)
-		return 0;
-
-	pr_err("panic occurred, switching back to text console\n");
-	return drm_fb_helper_force_kernel_mode();
-}
-
-static struct notifier_block paniced = {
-	.notifier_call = drm_fb_helper_panic,
-};
-
-static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
-{
-	struct drm_device *dev = fb_helper->dev;
-	struct drm_crtc *crtc;
-	int bound = 0, crtcs_bound = 0;
-
-	/* Sometimes user space wants everything disabled, so don't steal the
-	 * display if there's a master. */
-	if (dev->primary->master)
-		return false;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb)
-			crtcs_bound++;
-		if (crtc->primary->fb == fb_helper->fb)
-			bound++;
-	}
-
-	if (bound < crtcs_bound)
-		return false;
-
-	return true;
-}
-
-#ifdef CONFIG_MAGIC_SYSRQ
 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 {
 	bool ret;
@@ -504,14 +470,6 @@
 	int i, j;
 
 	/*
-	 * fbdev->blank can be called from irq context in case of a panic.
-	 * Since we already have our own special panic handler which will
-	 * restore the fbdev console mode completely, just bail out early.
-	 */
-	if (oops_in_progress)
-		return;
-
-	/*
 	 * For each CRTC in this fb, turn the connectors on/off.
 	 */
 	drm_modeset_lock_all(dev);
@@ -544,6 +502,9 @@
  */
 int drm_fb_helper_blank(int blank, struct fb_info *info)
 {
+	if (oops_in_progress)
+		return -EBUSY;
+
 	switch (blank) {
 	/* Display: On; HSync: On, VSync: On */
 	case FB_BLANK_UNBLANK:
@@ -655,7 +616,7 @@
 	}
 
 	i = 0;
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		fb_helper->crtc_info[i].mode_set.crtc = crtc;
 		i++;
 	}
@@ -667,14 +628,91 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_init);
 
+/**
+ * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to alloc fb_info and the members cmap and apertures. Called
+ * by the driver within the fb_probe fb_helper callback function.
+ *
+ * RETURNS:
+ * fb_info pointer if things went okay, pointer containing error code
+ * otherwise
+ */
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+	struct device *dev = fb_helper->dev->dev;
+	struct fb_info *info;
+	int ret;
+
+	info = framebuffer_alloc(0, dev);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret)
+		goto err_release;
+
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_free_cmap;
+	}
+
+	fb_helper->fbdev = info;
+
+	return info;
+
+err_free_cmap:
+	fb_dealloc_cmap(&info->cmap);
+err_release:
+	framebuffer_release(info);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_fb_helper_alloc_fbi);
+
+/**
+ * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unregister_framebuffer, to release the fb_info
+ * framebuffer device
+ */
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper && fb_helper->fbdev)
+		unregister_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unregister_fbi);
+
+/**
+ * drm_fb_helper_release_fbi - dealloc fb_info and its members
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A helper to free memory taken by fb_info and the members cmap and
+ * apertures
+ */
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper) {
+		struct fb_info *info = fb_helper->fbdev;
+
+		if (info) {
+			if (info->cmap.len)
+				fb_dealloc_cmap(&info->cmap);
+			framebuffer_release(info);
+		}
+
+		fb_helper->fbdev = NULL;
+	}
+}
+EXPORT_SYMBOL(drm_fb_helper_release_fbi);
+
 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 {
 	if (!list_empty(&fb_helper->kernel_fb_list)) {
 		list_del(&fb_helper->kernel_fb_list);
 		if (list_empty(&kernel_fb_helper_list)) {
-			pr_info("drm: unregistered panic notifier\n");
-			atomic_notifier_chain_unregister(&panic_notifier_list,
-							 &paniced);
 			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 		}
 	}
@@ -684,6 +722,149 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
+/**
+ * drm_fb_helper_unlink_fbi - wrapper around unlink_framebuffer
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * A wrapper around unlink_framebuffer implemented by fbdev core
+ */
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+	if (fb_helper && fb_helper->fbdev)
+		unlink_framebuffer(fb_helper->fbdev);
+}
+EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+
+/**
+ * drm_fb_helper_sys_read - wrapper around fb_sys_read
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to read from framebuffer memory
+ * @count: number of bytes to read from framebuffer memory
+ * @ppos: read offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_read implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+			       size_t count, loff_t *ppos)
+{
+	return fb_sys_read(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_read);
+
+/**
+ * drm_fb_helper_sys_write - wrapper around fb_sys_write
+ * @info: fb_info struct pointer
+ * @buf: userspace buffer to write to framebuffer memory
+ * @count: number of bytes to write to framebuffer memory
+ * @ppos: write offset within framebuffer memory
+ *
+ * A wrapper around fb_sys_write implemented by fbdev core
+ */
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	return fb_sys_write(info, buf, count, ppos);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_write);
+
+/**
+ * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around sys_fillrect implemented by fbdev core
+ */
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	sys_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
+
+/**
+ * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around sys_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	sys_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
+
+/**
+ * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around sys_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	sys_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
+
+/**
+ * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect
+ * @info: fbdev registered by the helper
+ * @rect: info about rectangle to fill
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect)
+{
+	cfb_fillrect(info, rect);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
+
+/**
+ * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea
+ * @info: fbdev registered by the helper
+ * @area: info about area to copy
+ *
+ * A wrapper around cfb_copyarea implemented by fbdev core
+ */
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area)
+{
+	cfb_copyarea(info, area);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
+
+/**
+ * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit
+ * @info: fbdev registered by the helper
+ * @image: info about image to blit
+ *
+ * A wrapper around cfb_imageblit implemented by fbdev core
+ */
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+				 const struct fb_image *image)
+{
+	cfb_imageblit(info, image);
+}
+EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
+
+/**
+ * drm_fb_helper_set_suspend - wrapper around fb_set_suspend
+ * @fb_helper: driver-allocated fbdev helper
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * A wrapper around fb_set_suspend implemented by fbdev core
+ */
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state)
+{
+	if (fb_helper && fb_helper->fbdev)
+		fb_set_suspend(fb_helper->fbdev, state);
+}
+EXPORT_SYMBOL(drm_fb_helper_set_suspend);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -771,9 +952,10 @@
 	int i, j, rc = 0;
 	int start;
 
-	if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+	if (oops_in_progress)
 		return -EBUSY;
-	}
+
+	drm_modeset_lock_all(dev);
 	if (!drm_fb_helper_is_bound(fb_helper)) {
 		drm_modeset_unlock_all(dev);
 		return -EBUSY;
@@ -922,6 +1104,9 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct fb_var_screeninfo *var = &info->var;
 
+	if (oops_in_progress)
+		return -EBUSY;
+
 	if (var->pixclock != 0) {
 		DRM_ERROR("PIXEL CLOCK SET\n");
 		return -EINVAL;
@@ -947,9 +1132,10 @@
 	int ret = 0;
 	int i;
 
-	if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+	if (oops_in_progress)
 		return -EBUSY;
-	}
+
+	drm_modeset_lock_all(dev);
 	if (!drm_fb_helper_is_bound(fb_helper)) {
 		drm_modeset_unlock_all(dev);
 		return -EBUSY;
@@ -1109,12 +1295,7 @@
 	dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
 			info->node, info->fix.id);
 
-	/* Switch back to kernel console on panic */
-	/* multi card linked list maybe */
 	if (list_empty(&kernel_fb_helper_list)) {
-		dev_info(fb_helper->dev->dev, "registered panic notifier\n");
-		atomic_notifier_chain_register(&panic_notifier_list,
-					       &paniced);
 		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
 
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 16a1647..3c2d4ab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -766,7 +766,7 @@
 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
 	struct drm_device *dev = obj->dev;
 
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (dev->driver->gem_free_object != NULL)
 		dev->driver->gem_free_object(obj);
@@ -778,22 +778,14 @@
 	struct drm_gem_object *obj = vma->vm_private_data;
 
 	drm_gem_object_reference(obj);
-
-	mutex_lock(&obj->dev->struct_mutex);
-	drm_vm_open_locked(obj->dev, vma);
-	mutex_unlock(&obj->dev->struct_mutex);
 }
 EXPORT_SYMBOL(drm_gem_vm_open);
 
 void drm_gem_vm_close(struct vm_area_struct *vma)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
 
-	mutex_lock(&dev->struct_mutex);
-	drm_vm_close_locked(obj->dev, vma);
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
+	drm_gem_object_unreference_unlocked(obj);
 }
 EXPORT_SYMBOL(drm_gem_vm_close);
 
@@ -850,7 +842,6 @@
 	 */
 	drm_gem_object_reference(obj);
 
-	drm_vm_open_locked(dev, vma);
 	return 0;
 }
 EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bd75f30..86cc793 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -289,20 +289,15 @@
 {
 	struct drm_gem_object *gem_obj;
 
-	mutex_lock(&drm->struct_mutex);
-
 	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
 	if (!gem_obj) {
 		dev_err(drm->dev, "failed to lookup GEM object\n");
-		mutex_unlock(&drm->struct_mutex);
 		return -EINVAL;
 	}
 
 	*offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
 
-	drm_gem_object_unreference(gem_obj);
-
-	mutex_unlock(&drm->struct_mutex);
+	drm_gem_object_unreference_unlocked(gem_obj);
 
 	return 0;
 }
@@ -381,11 +376,8 @@
 			  struct seq_file *m)
 {
 	struct drm_gem_object *obj = &cma_obj->base;
-	struct drm_device *dev = obj->dev;
 	uint64_t off;
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 	off = drm_vma_node_start(&obj->vma_node);
 
 	seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 9cfcd0a..ddfa601 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -95,7 +95,7 @@
 		return -EFAULT;
 
 	version = compat_alloc_user_space(sizeof(*version));
-	if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+	if (!version)
 		return -EFAULT;
 	if (__put_user(v32.name_len, &version->name_len)
 	    || __put_user((void __user *)(unsigned long)v32.name,
@@ -142,7 +142,7 @@
 		return -EFAULT;
 
 	u = compat_alloc_user_space(sizeof(*u));
-	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+	if (!u)
 		return -EFAULT;
 	if (__put_user(uq32.unique_len, &u->unique_len)
 	    || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -170,7 +170,7 @@
 		return -EFAULT;
 
 	u = compat_alloc_user_space(sizeof(*u));
-	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+	if (!u)
 		return -EFAULT;
 	if (__put_user(uq32.unique_len, &u->unique_len)
 	    || __put_user((void __user *)(unsigned long)uq32.unique,
@@ -202,7 +202,7 @@
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user(idx, &map->offset))
 		return -EFAULT;
@@ -239,7 +239,7 @@
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user(m32.offset, &map->offset)
 	    || __put_user(m32.size, &map->size)
@@ -279,7 +279,7 @@
 		return -EFAULT;
 
 	map = compat_alloc_user_space(sizeof(*map));
-	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+	if (!map)
 		return -EFAULT;
 	if (__put_user((void *)(unsigned long)handle, &map->handle))
 		return -EFAULT;
@@ -308,7 +308,7 @@
 		return -EFAULT;
 
 	client = compat_alloc_user_space(sizeof(*client));
-	if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+	if (!client)
 		return -EFAULT;
 	if (__put_user(idx, &client->idx))
 		return -EFAULT;
@@ -347,7 +347,7 @@
 	int i, err;
 
 	stats = compat_alloc_user_space(sizeof(*stats));
-	if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+	if (!stats)
 		return -EFAULT;
 
 	err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
@@ -384,8 +384,7 @@
 	unsigned long agp_start;
 
 	buf = compat_alloc_user_space(sizeof(*buf));
-	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+	if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
 		return -EFAULT;
 
 	if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
@@ -416,7 +415,7 @@
 		return -EFAULT;
 
 	buf = compat_alloc_user_space(sizeof(*buf));
-	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+	if (!buf)
 		return -EFAULT;
 
 	if (__put_user(b32.size, &buf->size)
@@ -457,7 +456,7 @@
 
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
 	request = compat_alloc_user_space(nbytes);
-	if (!access_ok(VERIFY_WRITE, request, nbytes))
+	if (!request)
 		return -EFAULT;
 	list = (struct drm_buf_desc *) (request + 1);
 
@@ -518,7 +517,7 @@
 		return -EINVAL;
 	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
 	request = compat_alloc_user_space(nbytes);
-	if (!access_ok(VERIFY_WRITE, request, nbytes))
+	if (!request)
 		return -EFAULT;
 	list = (struct drm_buf_pub *) (request + 1);
 
@@ -565,7 +564,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(req32.count, &request->count)
 	    || __put_user((int __user *)(unsigned long)req32.list,
@@ -591,7 +590,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(req32.ctx_id, &request->ctx_id)
 	    || __put_user((void *)(unsigned long)req32.handle,
@@ -615,7 +614,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+	if (!request)
 		return -EFAULT;
 	if (__put_user(ctx_id, &request->ctx_id))
 		return -EFAULT;
@@ -648,7 +647,7 @@
 		return -EFAULT;
 
 	res = compat_alloc_user_space(sizeof(*res));
-	if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+	if (!res)
 		return -EFAULT;
 	if (__put_user(res32.count, &res->count)
 	    || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
@@ -691,7 +690,7 @@
 		return -EFAULT;
 
 	d = compat_alloc_user_space(sizeof(*d));
-	if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+	if (!d)
 		return -EFAULT;
 
 	if (__put_user(d32.context, &d->context)
@@ -766,7 +765,7 @@
 	int err;
 
 	info = compat_alloc_user_space(sizeof(*info));
-	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+	if (!info)
 		return -EFAULT;
 
 	err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
@@ -809,7 +808,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.size, &request->size)
 	    || __put_user(req32.type, &request->type))
 		return -EFAULT;
@@ -836,7 +835,7 @@
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || get_user(handle, &argp->handle)
 	    || __put_user(handle, &request->handle))
 		return -EFAULT;
@@ -860,7 +859,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.handle, &request->handle)
 	    || __put_user(req32.offset, &request->offset))
 		return -EFAULT;
@@ -876,7 +875,7 @@
 	u32 handle;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || get_user(handle, &argp->handle)
 	    || __put_user(handle, &request->handle))
 		return -EFAULT;
@@ -899,8 +898,7 @@
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
 	    || __get_user(x, &argp->size)
 	    || __put_user(x, &request->size))
 		return -EFAULT;
@@ -925,8 +923,7 @@
 	unsigned long x;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
 	    || __get_user(x, &argp->handle)
 	    || __put_user(x << PAGE_SHIFT, &request->handle))
 		return -EFAULT;
@@ -954,7 +951,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+	if (!request ||
 	    __put_user(update32.handle, &request->handle) ||
 	    __put_user(update32.type, &request->type) ||
 	    __put_user(update32.num, &request->num) ||
@@ -996,7 +993,7 @@
 		return -EFAULT;
 
 	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	if (!request
 	    || __put_user(req32.request.type, &request->request.type)
 	    || __put_user(req32.request.sequence, &request->request.sequence)
 	    || __put_user(req32.request.signal, &request->request.signal))
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b1d303f..9a860ca 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -480,7 +480,7 @@
  * indicated permissions. If so, returns zero. Otherwise returns an
  * error code suitable for ioctl return.
  */
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 {
 	/* ROOT_ONLY is only for CAP_SYS_ADMIN */
 	if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -508,6 +508,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(drm_ioctl_permit);
 
 #define DRM_IOCTL_DEF(ioctl, _func, _flags)	\
 	[DRM_IOCTL_NR(ioctl)] = {		\
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b50fa0a..22d207e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -43,8 +43,8 @@
 #include <linux/export.h>
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) \
-	((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
+#define vblanktimestamp(dev, pipe, count) \
+	((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
@@ -57,7 +57,7 @@
 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
 static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  struct timeval *tvblank, unsigned flags);
 
 static unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
@@ -107,7 +107,7 @@
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
- * @crtc: counter to update
+ * @pipe: counter to update
  *
  * Call back into the driver to update the appropriate vblank counter
  * (specified by @crtc).  Deal with wraparound, if it occurred, and
@@ -120,9 +120,9 @@
  * Note: caller must hold dev->vbl_lock since this reads & writes
  * device vblank fields.
  */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 cur_vblank, diff;
 	bool rc;
 	struct timeval t_vblank;
@@ -140,21 +140,21 @@
 	 * corresponding vblank timestamp.
 	 */
 	do {
-		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
-		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
-	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+		cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
+		rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
+	} while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
 
 	/* Deal with counter wrap */
 	diff = cur_vblank - vblank->last;
 	if (cur_vblank < vblank->last) {
 		diff += dev->max_vblank_count + 1;
 
-		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-			  crtc, vblank->last, cur_vblank, diff);
+		DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+			  pipe, vblank->last, cur_vblank, diff);
 	}
 
-	DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
-		  crtc, diff);
+	DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
+		  pipe, diff);
 
 	if (diff == 0)
 		return;
@@ -167,7 +167,7 @@
 	if (!rc)
 		t_vblank = (struct timeval) {0, 0};
 
-	store_vblank(dev, crtc, diff, &t_vblank);
+	store_vblank(dev, pipe, diff, &t_vblank);
 }
 
 /*
@@ -176,9 +176,9 @@
  * are preserved, even if there are any spurious vblank irq's after
  * disable.
  */
-static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 	u32 vblcount;
 	s64 diff_ns;
@@ -206,8 +206,8 @@
 	 * vblank interrupt is disabled.
 	 */
 	if (!vblank->enabled &&
-	    drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
-		drm_update_vblank_count(dev, crtc);
+	    drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
+		drm_update_vblank_count(dev, pipe);
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 		return;
 	}
@@ -218,7 +218,7 @@
 	 * hardware potentially runtime suspended.
 	 */
 	if (vblank->enabled) {
-		dev->driver->disable_vblank(dev, crtc);
+		dev->driver->disable_vblank(dev, pipe);
 		vblank->enabled = false;
 	}
 
@@ -235,9 +235,9 @@
 	 * delayed gpu counter increment.
 	 */
 	do {
-		vblank->last = dev->driver->get_vblank_counter(dev, crtc);
-		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-	} while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+		vblank->last = dev->driver->get_vblank_counter(dev, pipe);
+		vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
+	} while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
 
 	if (!count)
 		vblrc = 0;
@@ -247,7 +247,7 @@
 	 */
 	vblcount = vblank->count;
 	diff_ns = timeval_to_ns(&tvblank) -
-		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+		  timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
 
 	/* If there is at least 1 msec difference between the last stored
 	 * timestamp and tvblank, then we are currently executing our
@@ -262,7 +262,7 @@
 	 * hope for the best.
 	 */
 	if (vblrc && (abs64(diff_ns) > 1000000))
-		store_vblank(dev, crtc, 1, &tvblank);
+		store_vblank(dev, pipe, 1, &tvblank);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
@@ -271,16 +271,16 @@
 {
 	struct drm_vblank_crtc *vblank = (void *)arg;
 	struct drm_device *dev = vblank->dev;
+	unsigned int pipe = vblank->pipe;
 	unsigned long irqflags;
-	int crtc = vblank->crtc;
 
 	if (!dev->vblank_disable_allowed)
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
-		DRM_DEBUG("disabling vblank on crtc %d\n", crtc);
-		vblank_disable_and_save(dev, crtc);
+		DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+		vblank_disable_and_save(dev, pipe);
 	}
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -293,14 +293,14 @@
  */
 void drm_vblank_cleanup(struct drm_device *dev)
 {
-	int crtc;
+	unsigned int pipe;
 
 	/* Bail if the driver didn't call drm_vblank_init() */
 	if (dev->num_crtcs == 0)
 		return;
 
-	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
-		struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
+		struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
 		WARN_ON(vblank->enabled &&
 			drm_core_check_feature(dev, DRIVER_MODESET));
@@ -316,17 +316,18 @@
 
 /**
  * drm_vblank_init - initialize vblank support
- * @dev: drm_device
- * @num_crtcs: number of crtcs supported by @dev
+ * @dev: DRM device
+ * @num_crtcs: number of CRTCs supported by @dev
  *
  * This function initializes vblank support for @num_crtcs display pipelines.
  *
  * Returns:
  * Zero on success or a negative error code on failure.
  */
-int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
 {
-	int i, ret = -ENOMEM;
+	int ret = -ENOMEM;
+	unsigned int i;
 
 	spin_lock_init(&dev->vbl_lock);
 	spin_lock_init(&dev->vblank_time_lock);
@@ -341,7 +342,7 @@
 		struct drm_vblank_crtc *vblank = &dev->vblank[i];
 
 		vblank->dev = dev;
-		vblank->crtc = i;
+		vblank->pipe = i;
 		init_waitqueue_head(&vblank->queue);
 		setup_timer(&vblank->disable_timer, vblank_disable_fn,
 			    (unsigned long)vblank);
@@ -624,17 +625,17 @@
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 			framedur_ns /= 2;
 	} else
-		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+		DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
 			  crtc->base.id);
 
 	crtc->pixeldur_ns = pixeldur_ns;
 	crtc->linedur_ns  = linedur_ns;
 	crtc->framedur_ns = framedur_ns;
 
-	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+	DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
 		  crtc->base.id, mode->crtc_htotal,
 		  mode->crtc_vtotal, mode->crtc_vdisplay);
-	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+	DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
 		  crtc->base.id, dotclock, framedur_ns,
 		  linedur_ns, pixeldur_ns);
 }
@@ -643,7 +644,7 @@
 /**
  * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
  * @dev: DRM device
- * @crtc: Which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
  * @max_error: Desired maximum allowable error in timestamps (nanosecs)
  *             On return contains true maximum error of timestamp
  * @vblank_time: Pointer to struct timeval which should receive the timestamp
@@ -686,7 +687,8 @@
  * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
  *
  */
-int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+					  unsigned int pipe,
 					  int *max_error,
 					  struct timeval *vblank_time,
 					  unsigned flags,
@@ -700,8 +702,8 @@
 	int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
 	bool invbl;
 
-	if (crtc < 0 || crtc >= dev->num_crtcs) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %u\n", pipe);
 		return -EINVAL;
 	}
 
@@ -720,7 +722,7 @@
 	 * Happens during initial modesetting of a crtc.
 	 */
 	if (framedur_ns == 0) {
-		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+		DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
 		return -EAGAIN;
 	}
 
@@ -736,13 +738,13 @@
 		 * Get vertical and horizontal scanout position vpos, hpos,
 		 * and bounding timestamps stime, etime, pre/post query.
 		 */
-		vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
+		vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
 							       &hpos, &stime, &etime);
 
 		/* Return as no-op if scanout query unsupported or failed. */
 		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
-			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
-				  crtc, vbl_status);
+			DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
+				  pipe, vbl_status);
 			return -EIO;
 		}
 
@@ -756,8 +758,8 @@
 
 	/* Noisy system timing? */
 	if (i == DRM_TIMESTAMP_MAXRETRIES) {
-		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
-			  crtc, duration_ns/1000, *max_error/1000, i);
+		DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+			  pipe, duration_ns/1000, *max_error/1000, i);
 	}
 
 	/* Return upper bound of timestamp precision error. */
@@ -790,8 +792,8 @@
 		etime = ktime_sub_ns(etime, delta_ns);
 	*vblank_time = ktime_to_timeval(etime);
 
-	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
-		  crtc, (int)vbl_status, hpos, vpos,
+	DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+		  pipe, (int)vbl_status, hpos, vpos,
 		  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
 		  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
 		  duration_ns/1000, i);
@@ -816,7 +818,7 @@
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  *                             vblank interval
  * @dev: DRM device
- * @crtc: which CRTC's vblank timestamp to retrieve
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
  * @tvblank: Pointer to target struct timeval which should receive the timestamp
  * @flags: Flags to pass to driver:
  *         0 = Default,
@@ -833,7 +835,7 @@
  * True if timestamp is considered to be very precise, false otherwise.
  */
 static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
 			  struct timeval *tvblank, unsigned flags)
 {
 	int ret;
@@ -843,7 +845,7 @@
 
 	/* Query driver if possible and precision timestamping enabled. */
 	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
-		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+		ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
 							tvblank, flags);
 		if (ret > 0)
 			return true;
@@ -860,7 +862,7 @@
 /**
  * drm_vblank_count - retrieve "cooked" vblank counter value
  * @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC for which to retrieve the counter
  *
  * Fetches the "cooked" vblank count value that represents the number of
  * vblank events since the system was booted, including lost events due to
@@ -871,12 +873,13 @@
  * Returns:
  * The software vblank counter.
  */
-u32 drm_vblank_count(struct drm_device *dev, int crtc)
+u32 drm_vblank_count(struct drm_device *dev, int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return 0;
+
 	return vblank->count;
 }
 EXPORT_SYMBOL(drm_vblank_count);
@@ -901,11 +904,10 @@
 EXPORT_SYMBOL(drm_crtc_vblank_count);
 
 /**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
- * and the system timestamp corresponding to that vblank counter value.
- *
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ *     system timestamp corresponding to that vblank counter value.
  * @dev: DRM device
- * @crtc: which counter to retrieve
+ * @pipe: index of CRTC whose counter to retrieve
  * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
  *
  * Fetches the "cooked" vblank count value that represents the number of
@@ -913,13 +915,13 @@
  * modesetting activity. Returns corresponding system timestamp of the time
  * of the vblank interval that corresponds to the current vblank counter value.
  */
-u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 			      struct timeval *vblanktime)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 cur_vblank;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return 0;
 
 	/*
@@ -930,7 +932,7 @@
 	do {
 		cur_vblank = vblank->count;
 		smp_rmb();
-		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+		*vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
 		smp_rmb();
 	} while (cur_vblank != vblank->count);
 
@@ -957,7 +959,7 @@
 /**
  * drm_send_vblank_event - helper to send vblank event after pageflip
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  * @e: the event to send
  *
  * Updates sequence # and timestamp on event, and sends it to userspace.
@@ -965,20 +967,20 @@
  *
  * This is the legacy version of drm_crtc_send_vblank_event().
  */
-void drm_send_vblank_event(struct drm_device *dev, int crtc,
-		struct drm_pending_vblank_event *e)
+void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+			   struct drm_pending_vblank_event *e)
 {
 	struct timeval now;
 	unsigned int seq;
 
-	if (crtc >= 0) {
-		seq = drm_vblank_count_and_time(dev, crtc, &now);
+	if (dev->num_crtcs > 0) {
+		seq = drm_vblank_count_and_time(dev, pipe, &now);
 	} else {
 		seq = 0;
 
 		now = get_drm_timestamp();
 	}
-	e->pipe = crtc;
+	e->pipe = pipe;
 	send_vblank_event(dev, e, seq, &now);
 }
 EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1003,11 +1005,14 @@
 /**
  * drm_vblank_enable - enable the vblank interrupt on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
  */
-static int drm_vblank_enable(struct drm_device *dev, int crtc)
+static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int ret = 0;
 
 	assert_spin_locked(&dev->vbl_lock);
@@ -1022,13 +1027,13 @@
 		 * timestamps. Filtercode in drm_handle_vblank() will
 		 * prevent double-accounting of same vblank interval.
 		 */
-		ret = dev->driver->enable_vblank(dev, crtc);
-		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+		ret = dev->driver->enable_vblank(dev, pipe);
+		DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
 		if (ret)
 			atomic_dec(&vblank->refcount);
 		else {
 			vblank->enabled = true;
-			drm_update_vblank_count(dev, crtc);
+			drm_update_vblank_count(dev, pipe);
 		}
 	}
 
@@ -1040,7 +1045,7 @@
 /**
  * drm_vblank_get - get a reference count on vblank events
  * @dev: DRM device
- * @crtc: which CRTC to own
+ * @pipe: index of CRTC to own
  *
  * Acquire a reference count on vblank events to avoid having them disabled
  * while in use.
@@ -1048,24 +1053,24 @@
  * This is the legacy version of drm_crtc_vblank_get().
  *
  * Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
  */
-int drm_vblank_get(struct drm_device *dev, int crtc)
+int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 	int ret = 0;
 
 	if (!dev->num_crtcs)
 		return -EINVAL;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return -EINVAL;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
 	if (atomic_add_return(1, &vblank->refcount) == 1) {
-		ret = drm_vblank_enable(dev, crtc);
+		ret = drm_vblank_enable(dev, pipe);
 	} else {
 		if (!vblank->enabled) {
 			atomic_dec(&vblank->refcount);
@@ -1088,7 +1093,7 @@
  * This is the native kms version of drm_vblank_get().
  *
  * Returns:
- * Zero on success, nonzero on failure.
+ * Zero on success or a negative error code on failure.
  */
 int drm_crtc_vblank_get(struct drm_crtc *crtc)
 {
@@ -1097,23 +1102,23 @@
 EXPORT_SYMBOL(drm_crtc_vblank_get);
 
 /**
- * drm_vblank_put - give up ownership of vblank events
+ * drm_vblank_put - release ownership of vblank events
  * @dev: DRM device
- * @crtc: which counter to give up
+ * @pipe: index of CRTC to release
  *
  * Release ownership of a given vblank counter, turning off interrupts
  * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
  *
  * This is the legacy version of drm_crtc_vblank_put().
  */
-void drm_vblank_put(struct drm_device *dev, int crtc)
+void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
-	if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(atomic_read(&vblank->refcount) == 0))
 		return;
 
 	/* Last user schedules interrupt disable */
@@ -1147,30 +1152,34 @@
 /**
  * drm_wait_one_vblank - wait for one vblank
  * @dev: DRM device
- * @crtc: crtc index
+ * @pipe: CRTC index
  *
  * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
  * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
  * due to lack of driver support or because the crtc is off.
  */
-void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
 {
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	int ret;
 	u32 last;
 
-	ret = drm_vblank_get(dev, crtc);
-	if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
-	last = drm_vblank_count(dev, crtc);
+	ret = drm_vblank_get(dev, pipe);
+	if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
+		return;
 
-	ret = wait_event_timeout(dev->vblank[crtc].queue,
-				 last != drm_vblank_count(dev, crtc),
+	last = drm_vblank_count(dev, pipe);
+
+	ret = wait_event_timeout(vblank->queue,
+				 last != drm_vblank_count(dev, pipe),
 				 msecs_to_jiffies(100));
 
-	WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
+	WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
 
-	drm_vblank_put(dev, crtc);
+	drm_vblank_put(dev, pipe);
 }
 EXPORT_SYMBOL(drm_wait_one_vblank);
 
@@ -1191,7 +1200,7 @@
 /**
  * drm_vblank_off - disable vblank events on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * Drivers can use this function to shut down the vblank interrupt handling when
  * disabling a crtc. This function ensures that the latest vblank frame count is
@@ -1202,21 +1211,21 @@
  *
  * This is the legacy version of drm_crtc_vblank_off().
  */
-void drm_vblank_off(struct drm_device *dev, int crtc)
+void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
 	unsigned long irqflags;
 	unsigned int seq;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	spin_lock_irqsave(&dev->event_lock, irqflags);
 
 	spin_lock(&dev->vbl_lock);
-	vblank_disable_and_save(dev, crtc);
+	vblank_disable_and_save(dev, pipe);
 	wake_up(&vblank->queue);
 
 	/*
@@ -1230,16 +1239,16 @@
 	spin_unlock(&dev->vbl_lock);
 
 	/* Send any queued vblank events, lest the natives grow disquiet */
-	seq = drm_vblank_count_and_time(dev, crtc, &now);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
-		if (e->pipe != crtc)
+		if (e->pipe != pipe)
 			continue;
 		DRM_DEBUG("Sending premature vblank event on disable: \
 			  wanted %d, current %d\n",
 			  e->event.sequence, seq);
 		list_del(&e->base.link);
-		drm_vblank_put(dev, e->pipe);
+		drm_vblank_put(dev, pipe);
 		send_vblank_event(dev, e, seq, &now);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1267,7 +1276,7 @@
 
 /**
  * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
+ * @drm_crtc: CRTC in question
  *
  * Drivers can use this function to reset the vblank state to off at load time.
  * Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1300,7 +1309,7 @@
 /**
  * drm_vblank_on - enable vblank events on a CRTC
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * This functions restores the vblank interrupt state captured with
  * drm_vblank_off() again. Note that calls to drm_vblank_on() and
@@ -1309,12 +1318,12 @@
  *
  * This is the legacy version of drm_crtc_vblank_on().
  */
-void drm_vblank_on(struct drm_device *dev, int crtc)
+void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1332,7 +1341,7 @@
 	 * vblank counter value before and after a modeset
 	 */
 	vblank->last =
-		(dev->driver->get_vblank_counter(dev, crtc) - 1) &
+		(dev->driver->get_vblank_counter(dev, pipe) - 1) &
 		dev->max_vblank_count;
 	/*
 	 * re-enable interrupts if there are users left, or the
@@ -1340,7 +1349,7 @@
 	 */
 	if (atomic_read(&vblank->refcount) != 0 ||
 	    (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
-		WARN_ON(drm_vblank_enable(dev, crtc));
+		WARN_ON(drm_vblank_enable(dev, pipe));
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_vblank_on);
@@ -1365,7 +1374,7 @@
 /**
  * drm_vblank_pre_modeset - account for vblanks across mode sets
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * Account for vblank events across mode setting events, which will likely
  * reset the hardware frame counter.
@@ -1385,15 +1394,15 @@
  * Drivers must call drm_vblank_post_modeset() when re-enabling the same crtc
  * again.
  */
-void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
 	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return;
 
 	/*
@@ -1405,7 +1414,7 @@
 	 */
 	if (!vblank->inmodeset) {
 		vblank->inmodeset = 0x1;
-		if (drm_vblank_get(dev, crtc) == 0)
+		if (drm_vblank_get(dev, pipe) == 0)
 			vblank->inmodeset |= 0x2;
 	}
 }
@@ -1414,27 +1423,30 @@
 /**
  * drm_vblank_post_modeset - undo drm_vblank_pre_modeset changes
  * @dev: DRM device
- * @crtc: CRTC in question
+ * @pipe: CRTC index
  *
  * This function again drops the temporary vblank reference acquired in
  * drm_vblank_pre_modeset.
  */
-void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	unsigned long irqflags;
 
 	/* vblank is not initialized (IRQ not installed ?), or has been freed */
 	if (!dev->num_crtcs)
 		return;
 
+	if (WARN_ON(pipe >= dev->num_crtcs))
+		return;
+
 	if (vblank->inmodeset) {
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
 		dev->vblank_disable_allowed = true;
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
 		if (vblank->inmodeset & 0x2)
-			drm_vblank_put(dev, crtc);
+			drm_vblank_put(dev, pipe);
 
 		vblank->inmodeset = 0;
 	}
@@ -1456,7 +1468,7 @@
 		    struct drm_file *file_priv)
 {
 	struct drm_modeset_ctl *modeset = data;
-	unsigned int crtc;
+	unsigned int pipe;
 
 	/* If drm_vblank_init() hasn't been called yet, just no-op */
 	if (!dev->num_crtcs)
@@ -1466,16 +1478,16 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
 
-	crtc = modeset->crtc;
-	if (crtc >= dev->num_crtcs)
+	pipe = modeset->crtc;
+	if (pipe >= dev->num_crtcs)
 		return -EINVAL;
 
 	switch (modeset->cmd) {
 	case _DRM_PRE_MODESET:
-		drm_vblank_pre_modeset(dev, crtc);
+		drm_vblank_pre_modeset(dev, pipe);
 		break;
 	case _DRM_POST_MODESET:
-		drm_vblank_post_modeset(dev, crtc);
+		drm_vblank_post_modeset(dev, pipe);
 		break;
 	default:
 		return -EINVAL;
@@ -1484,7 +1496,7 @@
 	return 0;
 }
 
-static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
 				  union drm_wait_vblank *vblwait,
 				  struct drm_file *file_priv)
 {
@@ -1538,7 +1550,7 @@
 		vblwait->reply.sequence = vblwait->request.sequence;
 	}
 
-	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+	DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
 		  vblwait->request.sequence, seq, pipe);
 
 	trace_drm_vblank_event_queued(current->pid, pipe,
@@ -1587,7 +1599,7 @@
 	struct drm_vblank_crtc *vblank;
 	union drm_wait_vblank *vblwait = data;
 	int ret;
-	unsigned int flags, seq, crtc, high_crtc;
+	unsigned int flags, seq, pipe, high_pipe;
 
 	if (!dev->irq_enabled)
 		return -EINVAL;
@@ -1606,22 +1618,22 @@
 	}
 
 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
-	high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
-	if (high_crtc)
-		crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+	high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+	if (high_pipe)
+		pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
 	else
-		crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
-	if (crtc >= dev->num_crtcs)
+		pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+	if (pipe >= dev->num_crtcs)
 		return -EINVAL;
 
-	vblank = &dev->vblank[crtc];
+	vblank = &dev->vblank[pipe];
 
-	ret = drm_vblank_get(dev, crtc);
+	ret = drm_vblank_get(dev, pipe);
 	if (ret) {
 		DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
 		return ret;
 	}
-	seq = drm_vblank_count(dev, crtc);
+	seq = drm_vblank_count(dev, pipe);
 
 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
 	case _DRM_VBLANK_RELATIVE:
@@ -1638,7 +1650,7 @@
 		/* must hold on to the vblank ref until the event fires
 		 * drm_vblank_put will be called asynchronously
 		 */
-		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+		return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
 	}
 
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -1646,11 +1658,11 @@
 		vblwait->request.sequence = seq + 1;
 	}
 
-	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
-		  vblwait->request.sequence, crtc);
+	DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
+		  vblwait->request.sequence, pipe);
 	vblank->last_wait = vblwait->request.sequence;
 	DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
-		    (((drm_vblank_count(dev, crtc) -
+		    (((drm_vblank_count(dev, pipe) -
 		       vblwait->request.sequence) <= (1 << 23)) ||
 		     !vblank->enabled ||
 		     !dev->irq_enabled));
@@ -1658,7 +1670,7 @@
 	if (ret != -EINTR) {
 		struct timeval now;
 
-		vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+		vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
 
@@ -1669,11 +1681,11 @@
 	}
 
 done:
-	drm_vblank_put(dev, crtc);
+	drm_vblank_put(dev, pipe);
 	return ret;
 }
 
-static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
 {
 	struct drm_pending_vblank_event *e, *t;
 	struct timeval now;
@@ -1681,10 +1693,10 @@
 
 	assert_spin_locked(&dev->event_lock);
 
-	seq = drm_vblank_count_and_time(dev, crtc, &now);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
 
 	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
-		if (e->pipe != crtc)
+		if (e->pipe != pipe)
 			continue;
 		if ((seq - e->event.sequence) > (1<<23))
 			continue;
@@ -1693,26 +1705,26 @@
 			  e->event.sequence, seq);
 
 		list_del(&e->base.link);
-		drm_vblank_put(dev, e->pipe);
+		drm_vblank_put(dev, pipe);
 		send_vblank_event(dev, e, seq, &now);
 	}
 
-	trace_drm_vblank_event(crtc, seq);
+	trace_drm_vblank_event(pipe, seq);
 }
 
 /**
  * drm_handle_vblank - handle a vblank event
  * @dev: DRM device
- * @crtc: where this event occurred
+ * @pipe: index of CRTC where this event occurred
  *
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
  *
  * This is the legacy version of drm_crtc_handle_vblank().
  */
-bool drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
 {
-	struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 	u32 vblcount;
 	s64 diff_ns;
 	struct timeval tvblank;
@@ -1721,7 +1733,7 @@
 	if (WARN_ON_ONCE(!dev->num_crtcs))
 		return false;
 
-	if (WARN_ON(crtc >= dev->num_crtcs))
+	if (WARN_ON(pipe >= dev->num_crtcs))
 		return false;
 
 	spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1745,11 +1757,11 @@
 
 	/* Get current timestamp and count. */
 	vblcount = vblank->count;
-	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+	drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
 	/* Compute time difference to timestamp of last vblank */
 	diff_ns = timeval_to_ns(&tvblank) -
-		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+		  timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
 
 	/* Update vblank timestamp and count if at least
 	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
@@ -1761,15 +1773,15 @@
 	 * ignore those for accounting.
 	 */
 	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
-		store_vblank(dev, crtc, 1, &tvblank);
+		store_vblank(dev, pipe, 1, &tvblank);
 	else
-		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
-			  crtc, (int) diff_ns);
+		DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
+			  pipe, (int) diff_ns);
 
 	spin_unlock(&dev->vblank_time_lock);
 
 	wake_up(&vblank->queue);
-	drm_handle_vblank_events(dev, crtc);
+	drm_handle_vblank_events(dev, pipe);
 
 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
 
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index c1dc614..9b73178 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,7 +42,7 @@
 #define DRM_KERNEL_CONTEXT		0
 #define DRM_RESERVED_CONTEXTS		1
 
-int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_init(struct drm_device *dev);
 void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
 void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
 void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f861361..4924d381 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -61,6 +61,9 @@
 	struct drm_master *master = file_priv->master;
 	int ret = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	++file_priv->lock_count;
 
 	if (lock->context == DRM_KERNEL_CONTEXT) {
@@ -153,6 +156,9 @@
 	struct drm_lock *lock = data;
 	struct drm_master *master = file_priv->master;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
 	if (lock->context == DRM_KERNEL_CONTEXT) {
 		DRM_ERROR("Process %d using kernel context %d\n",
 			  task_pid_nr(current), lock->context);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c0a5cd8..fba321c 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -55,41 +55,27 @@
  *     drm_modeset_acquire_fini(&ctx);
  */
 
-
 /**
- * __drm_modeset_lock_all - internal helper to grab all modeset locks
- * @dev: DRM device
- * @trylock: trylock mode for atomic contexts
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
  *
- * This is a special version of drm_modeset_lock_all() which can also be used in
- * atomic contexts. Then @trylock must be set to true.
- *
- * Returns:
- * 0 on success or negative error code on failure.
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
  */
-int __drm_modeset_lock_all(struct drm_device *dev,
-			   bool trylock)
+void drm_modeset_lock_all(struct drm_device *dev)
 {
 	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_modeset_acquire_ctx *ctx;
 	int ret;
 
-	ctx = kzalloc(sizeof(*ctx),
-		      trylock ? GFP_ATOMIC : GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (WARN_ON(!ctx))
+		return;
 
-	if (trylock) {
-		if (!mutex_trylock(&config->mutex)) {
-			ret = -EBUSY;
-			goto out;
-		}
-	} else {
-		mutex_lock(&config->mutex);
-	}
+	mutex_lock(&config->mutex);
 
 	drm_modeset_acquire_init(ctx, 0);
-	ctx->trylock_only = trylock;
 
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
@@ -108,7 +94,7 @@
 
 	drm_warn_on_modeset_not_all_locked(dev);
 
-	return 0;
+	return;
 
 fail:
 	if (ret == -EDEADLK) {
@@ -116,23 +102,7 @@
 		goto retry;
 	}
 
-out:
 	kfree(ctx);
-	return ret;
-}
-EXPORT_SYMBOL(__drm_modeset_lock_all);
-
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
-	WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
 }
 EXPORT_SYMBOL(drm_modeset_lock_all);
 
@@ -276,7 +246,7 @@
 	if (oops_in_progress)
 		return;
 
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+	drm_for_each_crtc(crtc, dev)
 		WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -464,18 +434,17 @@
 int drm_modeset_lock_all_crtcs(struct drm_device *dev,
 		struct drm_modeset_acquire_ctx *ctx)
 {
-	struct drm_mode_config *config = &dev->mode_config;
 	struct drm_crtc *crtc;
 	struct drm_plane *plane;
 	int ret = 0;
 
-	list_for_each_entry(crtc, &config->crtc_list, head) {
+	drm_for_each_crtc(crtc, dev) {
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
 			return ret;
 	}
 
-	list_for_each_entry(plane, &config->plane_list, head) {
+	drm_for_each_plane(plane, dev) {
 		ret = drm_modeset_lock(&plane->mutex, ctx);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index aaa1307..be38840 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -19,7 +19,7 @@
 	unsigned int index = 0;
 	struct drm_crtc *tmp;
 
-	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+	drm_for_each_crtc(tmp, dev) {
 		if (tmp->port == port)
 			return 1 << index;
 
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 2f0ed11..5e5a07a 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -91,13 +91,14 @@
 	 */
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+	drm_for_each_connector(connector, dev) {
 		if (connector->encoder && connector->encoder->crtc == crtc) {
 			if (connector_list != NULL && count < num_connectors)
 				*(connector_list++) = connector;
 
 			count++;
 		}
+	}
 
 	return count;
 }
@@ -436,7 +437,7 @@
 
 	for (i = 0; i < 2; i++) {
 		if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
-			crtc_funcs[i]->atomic_begin(crtc[i]);
+			crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
 	}
 
 	/*
@@ -451,7 +452,7 @@
 
 	for (i = 0; i < 2; i++) {
 		if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
-			crtc_funcs[i]->atomic_flush(crtc[i]);
+			crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
 	}
 
 	/*
@@ -525,10 +526,12 @@
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
@@ -572,10 +575,12 @@
 
 	if (plane->funcs->atomic_duplicate_state)
 		plane_state = plane->funcs->atomic_duplicate_state(plane);
-	else if (plane->state)
+	else {
+		if (!plane->state)
+			drm_atomic_helper_plane_reset(plane);
+
 		plane_state = drm_atomic_helper_plane_duplicate_state(plane);
-	else
-		plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+	}
 	if (!plane_state)
 		return -ENOMEM;
 	plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 04203c0..d734780 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -93,6 +93,27 @@
 	return 1;
 }
 
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+	bool poll = false;
+	struct drm_connector *connector;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+		return;
+
+	drm_for_each_connector(connector, dev) {
+		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					 DRM_CONNECTOR_POLL_DISCONNECT))
+			poll = true;
+	}
+
+	if (poll)
+		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
 							      uint32_t maxX, uint32_t maxY, bool merge_type_bits)
 {
@@ -153,7 +174,7 @@
 
 	/* Re-enable polling in case the global poll config changed. */
 	if (drm_kms_helper_poll != dev->mode_config.poll_running)
-		drm_kms_helper_poll_enable(dev);
+		__drm_kms_helper_poll_enable(dev);
 
 	dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -295,7 +316,6 @@
 }
 EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
-#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(struct work_struct *work)
 {
 	struct delayed_work *delayed_work = to_delayed_work(work);
@@ -312,7 +332,7 @@
 		goto out;
 
 	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 
 		/* Ignore forced connectors. */
 		if (connector->force)
@@ -407,20 +427,9 @@
  */
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
-	bool poll = false;
-	struct drm_connector *connector;
-
-	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
-		return;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
-					 DRM_CONNECTOR_POLL_DISCONNECT))
-			poll = true;
-	}
-
-	if (poll)
-		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+	mutex_lock(&dev->mode_config.mutex);
+	__drm_kms_helper_poll_enable(dev);
+	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
@@ -495,7 +504,7 @@
 		return false;
 
 	mutex_lock(&dev->mode_config.mutex);
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+	drm_for_each_connector(connector, dev) {
 
 		/* Only handle HPD capable connectors. */
 		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 43003c4..df0b61a 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -56,7 +56,7 @@
 
 config DRM_EXYNOS_DP
 	bool "EXYNOS DRM DP driver support"
-	depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
+	depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
 	default DRM_EXYNOS
 	select DRM_PANEL
 	help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 7de0b10..02aecfe 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -3,10 +3,9 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
-exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
-		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
-		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
-		exynos_drm_plane.o exynos_drm_dmabuf.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
+		exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
+		exynos_drm_plane.o
 
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8b1225f..b3c7307 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -54,6 +54,13 @@
 	"sclk_decon_eclk",
 };
 
+static const uint32_t decon_formats[] = {
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
 static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -152,15 +159,15 @@
 #define OFFSIZE(x)		(((x) & 0x3fff) << 14)
 #define PAGEWIDTH(x)		((x) & 0x3fff)
 
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+				 struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val &= ~WINCONx_BPPMODE_MASK;
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB1555:
 		val |= WINCONx_BPPMODE_16BPP_I1555;
 		val |= WINCONx_HAWSWP_F;
@@ -186,7 +193,7 @@
 		return;
 	}
 
-	DRM_DEBUG_KMS("bpp = %u\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -196,7 +203,7 @@
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
@@ -219,27 +226,35 @@
 	writel(val, ctx->addr + DECON_SHADOWCON);
 }
 
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-	u32 val;
-
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
 
 	if (ctx->suspended)
 		return;
 
-	decon_shadow_protect_win(ctx, win, true);
+	decon_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+	struct drm_plane_state *state = plane->base.state;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
+	u32 val;
+
+	if (ctx->suspended)
+		return;
 
 	val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
 	writel(val, ctx->addr + DECON_VIDOSDxA(win));
 
-	val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) |
-		COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1);
+	val = COORDINATE_X(plane->crtc_x + plane->crtc_w - 1) |
+		COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
 	writel(val, ctx->addr + DECON_VIDOSDxB(win));
 
 	val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -252,42 +267,33 @@
 
 	writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
 
-	val = plane->dma_addr[0] + plane->pitch * plane->crtc_height;
+	val = plane->dma_addr[0] + pitch * plane->crtc_h;
 	writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
 
-	val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3))
-		| PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3));
+	val = OFFSIZE(pitch - plane->crtc_w * bpp)
+		| PAGEWIDTH(plane->crtc_w * bpp);
 	writel(val, ctx->addr + DECON_VIDW0xADD2(win));
 
-	decon_win_set_pixfmt(ctx, win);
+	decon_win_set_pixfmt(ctx, win, state->fb);
 
 	/* window enable */
 	val = readl(ctx->addr + DECON_WINCONx(win));
 	val |= WINCONx_ENWIN_F;
 	writel(val, ctx->addr + DECON_WINCONx(win));
 
-	decon_shadow_protect_win(ctx, win, false);
-
 	/* standalone update */
 	val = readl(ctx->addr + DECON_UPDATE);
 	val |= STANDALONE_UPDATE_F;
 	writel(val, ctx->addr + DECON_UPDATE);
-
-	if (ctx->i80_if)
-		atomic_set(&ctx->win_updated, 1);
 }
 
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	if (ctx->suspended)
 		return;
 
@@ -306,6 +312,20 @@
 	writel(val, ctx->addr + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	decon_shadow_protect_win(ctx, plane->zpos, false);
+
+	if (ctx->i80_if)
+		atomic_set(&ctx->win_updated, 1);
+}
+
 static void decon_swreset(struct decon_context *ctx)
 {
 	unsigned int tries;
@@ -378,7 +398,7 @@
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		decon_win_disable(crtc, i);
+		decon_disable_plane(crtc, &ctx->planes[i]);
 
 	decon_swreset(ctx);
 
@@ -407,7 +427,7 @@
 		writel(val, ctx->addr + DECON_TRIGCON);
 	}
 
-	drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+	drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -460,10 +480,11 @@
 	.enable_vblank		= decon_enable_vblank,
 	.disable_vblank		= decon_disable_vblank,
 	.commit			= decon_commit,
-	.win_commit		= decon_win_commit,
-	.win_disable		= decon_win_disable,
+	.atomic_begin		= decon_atomic_begin,
+	.update_plane		= decon_update_plane,
+	.disable_plane		= decon_disable_plane,
+	.atomic_flush		= decon_atomic_flush,
 	.te_handler		= decon_te_irq_handler,
-	.clear_channels		= decon_clear_channels,
 };
 
 static int decon_bind(struct device *dev, struct device *master, void *data)
@@ -483,7 +504,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 							DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-				1 << ctx->pipe, type, zpos);
+				1 << ctx->pipe, type, decon_formats,
+				ARRAY_SIZE(decon_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -497,7 +519,9 @@
 		goto err;
 	}
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+	decon_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret)
 		goto err;
 
@@ -514,8 +538,7 @@
 	decon_disable(ctx->crtc);
 
 	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 }
 
 static const struct component_ops decon_component_ops = {
@@ -533,7 +556,7 @@
 
 	val = readl(ctx->addr + DECON_VIDINTCON1);
 	if (val & VIDINTCON1_INTFRMPEND) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 
 		/* clear */
 		writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
@@ -547,13 +570,21 @@
 {
 	struct decon_context *ctx = dev_id;
 	u32 val;
+	int win;
 
 	if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
 		goto out;
 
 	val = readl(ctx->addr + DECON_VIDINTCON1);
 	if (val & VIDINTCON1_INTFRMDONEPEND) {
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		for (win = 0 ; win < WINDOWS_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* clear */
 		writel(VIDINTCON1_INTFRMDONEPEND,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 362532a..cbdb78e 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -61,7 +61,7 @@
 	atomic_t			wait_vsync_event;
 
 	struct exynos_drm_panel_info panel;
-	struct exynos_drm_display *display;
+	struct drm_encoder *encoder;
 };
 
 static const struct of_device_id decon_driver_dt_match[] = {
@@ -70,6 +70,18 @@
 };
 MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
 
+static const uint32_t decon_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_RGBX8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_BGRA8888,
+};
+
 static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -126,7 +138,9 @@
 	ctx->drm_dev = drm_dev;
 	ctx->pipe = priv->pipe++;
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
+	decon_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, ctx->dev);
 	if (ret)
 		priv->pipe--;
 
@@ -136,8 +150,7 @@
 static void decon_ctx_remove(struct decon_context *ctx)
 {
 	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
@@ -271,16 +284,16 @@
 	}
 }
 
-static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
+static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+				 struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 	int padding;
 
 	val = readl(ctx->regs + WINCON(win));
 	val &= ~WINCONx_BPPMODE_MASK;
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_RGB565:
 		val |= WINCONx_BPPMODE_16BPP_565;
 		val |= WINCONx_BURSTLEN_16WORD;
@@ -329,7 +342,7 @@
 		break;
 	}
 
-	DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -339,8 +352,8 @@
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
-	if (plane->fb_width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width;
+	if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_8WORD;
 	}
@@ -382,23 +395,30 @@
 	writel(val, ctx->regs + SHADOWCON);
 }
 
-static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_atomic_begin(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
-	struct exynos_drm_plane *plane;
-	int padding;
-	unsigned long val, alpha;
-	unsigned int last_x;
-	unsigned int last_y;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
+	decon_shadow_protect_win(ctx, plane->zpos, true);
+}
 
-	plane = &ctx->planes[win];
+static void decon_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+	struct drm_plane_state *state = plane->base.state;
+	int padding;
+	unsigned long val, alpha;
+	unsigned int last_x;
+	unsigned int last_y;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
 
 	if (ctx->suspended)
 		return;
@@ -413,18 +433,15 @@
 	 * is set.
 	 */
 
-	/* protect windows */
-	decon_shadow_protect_win(ctx, win, true);
-
 	/* buffer start address */
 	val = (unsigned long)plane->dma_addr[0];
 	writel(val, ctx->regs + VIDW_BUF_START(win));
 
-	padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
+	padding = (pitch / bpp) - state->fb->width;
 
 	/* buffer size */
-	writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win));
-	writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
+	writel(state->fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
+	writel(state->fb->height, ctx->regs + VIDW_WHOLE_Y(win));
 
 	/* offset from the start of the buffer to read */
 	writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win));
@@ -433,25 +450,25 @@
 	DRM_DEBUG_KMS("start addr = 0x%lx\n",
 			(unsigned long)val);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
-			plane->crtc_width, plane->crtc_height);
+			plane->crtc_w, plane->crtc_h);
 
 	/*
 	 * OSD position.
 	 * In case the window layout goes of LCD layout, DECON fails.
 	 */
-	if ((plane->crtc_x + plane->crtc_width) > mode->hdisplay)
-		plane->crtc_x = mode->hdisplay - plane->crtc_width;
-	if ((plane->crtc_y + plane->crtc_height) > mode->vdisplay)
-		plane->crtc_y = mode->vdisplay - plane->crtc_height;
+	if ((plane->crtc_x + plane->crtc_w) > mode->hdisplay)
+		plane->crtc_x = mode->hdisplay - plane->crtc_w;
+	if ((plane->crtc_y + plane->crtc_h) > mode->vdisplay)
+		plane->crtc_y = mode->vdisplay - plane->crtc_h;
 
 	val = VIDOSDxA_TOPLEFT_X(plane->crtc_x) |
 		VIDOSDxA_TOPLEFT_Y(plane->crtc_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	last_x = plane->crtc_x + plane->crtc_width;
+	last_x = plane->crtc_x + plane->crtc_w;
 	if (last_x)
 		last_x--;
-	last_y = plane->crtc_y + plane->crtc_height;
+	last_y = plane->crtc_y + plane->crtc_h;
 	if (last_y)
 		last_y--;
 
@@ -475,7 +492,7 @@
 
 	writel(alpha, ctx->regs + VIDOSD_D(win));
 
-	decon_win_set_pixfmt(ctx, win);
+	decon_win_set_pixfmt(ctx, win, state->fb);
 
 	/* hardware window 0 doesn't support color key. */
 	if (win != 0)
@@ -495,17 +512,13 @@
 	writel(val, ctx->regs + DECON_UPDATE);
 }
 
-static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void decon_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct decon_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	u32 val;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	if (ctx->suspended)
 		return;
 
@@ -517,14 +530,22 @@
 	val &= ~WINCONx_ENWIN;
 	writel(val, ctx->regs + WINCON(win));
 
-	/* unprotect windows */
-	decon_shadow_protect_win(ctx, win, false);
-
 	val = readl(ctx->regs + DECON_UPDATE);
 	val |= DECON_UPDATE_STANDALONE_F;
 	writel(val, ctx->regs + DECON_UPDATE);
 }
 
+static void decon_atomic_flush(struct exynos_drm_crtc *crtc,
+					struct exynos_drm_plane *plane)
+{
+	struct decon_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	decon_shadow_protect_win(ctx, plane->zpos, false);
+}
+
 static void decon_init(struct decon_context *ctx)
 {
 	u32 val;
@@ -601,7 +622,7 @@
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		decon_win_disable(crtc, i);
+		decon_disable_plane(crtc, &ctx->planes[i]);
 
 	clk_disable_unprepare(ctx->vclk);
 	clk_disable_unprepare(ctx->eclk);
@@ -621,9 +642,10 @@
 	.enable_vblank = decon_enable_vblank,
 	.disable_vblank = decon_disable_vblank,
 	.wait_for_vblank = decon_wait_for_vblank,
-	.win_commit = decon_win_commit,
-	.win_disable = decon_win_disable,
-	.clear_channels = decon_clear_channels,
+	.atomic_begin = decon_atomic_begin,
+	.update_plane = decon_update_plane,
+	.disable_plane = decon_disable_plane,
+	.atomic_flush = decon_atomic_flush,
 };
 
 
@@ -631,6 +653,7 @@
 {
 	struct decon_context *ctx = (struct decon_context *)dev_id;
 	u32 val, clear_bit;
+	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -643,8 +666,15 @@
 		goto out;
 
 	if (!ctx->i80_if) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+		for (win = 0 ; win < WINDOWS_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
@@ -675,7 +705,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, decon_formats,
+					ARRAY_SIZE(decon_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -689,8 +720,8 @@
 		return PTR_ERR(ctx->crtc);
 	}
 
-	if (ctx->display)
-		exynos_drm_create_enc_conn(drm_dev, ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_bind(drm_dev, ctx->encoder);
 
 	return 0;
 
@@ -703,8 +734,8 @@
 
 	decon_disable(ctx->crtc);
 
-	if (ctx->display)
-		exynos_dpi_remove(ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_remove(ctx->encoder);
 
 	decon_ctx_remove(ctx);
 }
@@ -789,9 +820,9 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	ctx->display = exynos_dpi_probe(dev);
-	if (IS_ERR(ctx->display)) {
-		ret = PTR_ERR(ctx->display);
+	ctx->encoder = exynos_dpi_probe(dev);
+	if (IS_ERR(ctx->encoder)) {
+		ret = PTR_ERR(ctx->encoder);
 		goto err_iounmap;
 	}
 
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 172b800..d66ade0 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,19 +32,20 @@
 #include <drm/drm_panel.h>
 
 #include "exynos_dp_core.h"
+#include "exynos_drm_crtc.h"
 
 #define ctx_from_connector(c)	container_of(c, struct exynos_dp_device, \
 					connector)
 
 static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
 {
-	return to_exynos_crtc(dp->encoder->crtc);
+	return to_exynos_crtc(dp->encoder.crtc);
 }
 
-static inline struct exynos_dp_device *
-display_to_dp(struct exynos_drm_display *d)
+static inline struct exynos_dp_device *encoder_to_dp(
+						struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dp_device, display);
+	return container_of(e, struct exynos_dp_device, encoder);
 }
 
 struct bridge_init {
@@ -795,9 +796,6 @@
 	/* Configure video slave mode */
 	exynos_dp_enable_video_master(dp, 0);
 
-	/* Enable video */
-	exynos_dp_start_video(dp);
-
 	timeout_loop = 0;
 
 	for (;;) {
@@ -891,9 +889,9 @@
 		drm_helper_hpd_irq_event(dp->drm_dev);
 }
 
-static void exynos_dp_commit(struct exynos_drm_display *display)
+static void exynos_dp_commit(struct drm_encoder *encoder)
 {
-	struct exynos_dp_device *dp = display_to_dp(display);
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	int ret;
 
 	/* Keep the panel disabled while we configure video */
@@ -938,6 +936,9 @@
 		if (drm_panel_enable(dp->panel))
 			DRM_ERROR("failed to enable the panel\n");
 	}
+
+	/* Enable video */
+	exynos_dp_start_video(dp);
 }
 
 static enum drm_connector_status exynos_dp_detect(
@@ -994,7 +995,7 @@
 {
 	struct exynos_dp_device *dp = ctx_from_connector(connector);
 
-	return dp->encoder;
+	return &dp->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
@@ -1019,15 +1020,12 @@
 	return 0;
 }
 
-static int exynos_dp_create_connector(struct exynos_drm_display *display,
-				struct drm_encoder *encoder)
+static int exynos_dp_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dp_device *dp = display_to_dp(display);
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct drm_connector *connector = &dp->connector;
 	int ret;
 
-	dp->encoder = encoder;
-
 	/* Pre-empt DP connector creation if there's a bridge */
 	if (dp->bridge) {
 		ret = exynos_drm_attach_lcd_bridge(dp, encoder);
@@ -1054,20 +1052,22 @@
 	return ret;
 }
 
-static void exynos_dp_phy_init(struct exynos_dp_device *dp)
+static bool exynos_dp_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
 {
-	if (dp->phy)
-		phy_power_on(dp->phy);
+	return true;
 }
 
-static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
 {
-	if (dp->phy)
-		phy_power_off(dp->phy);
 }
 
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_enable(struct drm_encoder *encoder)
 {
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
 
 	if (dp->dpms_mode == DRM_MODE_DPMS_ON)
@@ -1084,14 +1084,17 @@
 		crtc->ops->clock_enable(dp_to_crtc(dp), true);
 
 	clk_prepare_enable(dp->clock);
-	exynos_dp_phy_init(dp);
+	phy_power_on(dp->phy);
 	exynos_dp_init_dp(dp);
 	enable_irq(dp->irq);
-	exynos_dp_commit(&dp->display);
+	exynos_dp_commit(&dp->encoder);
+
+	dp->dpms_mode = DRM_MODE_DPMS_ON;
 }
 
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dp_device *dp = encoder_to_dp(encoder);
 	struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
 
 	if (dp->dpms_mode != DRM_MODE_DPMS_ON)
@@ -1106,7 +1109,7 @@
 
 	disable_irq(dp->irq);
 	flush_work(&dp->hotplug_work);
-	exynos_dp_phy_exit(dp);
+	phy_power_off(dp->phy);
 	clk_disable_unprepare(dp->clock);
 
 	if (crtc->ops->clock_enable)
@@ -1116,31 +1119,19 @@
 		if (drm_panel_unprepare(dp->panel))
 			DRM_ERROR("failed to turnoff the panel\n");
 	}
+
+	dp->dpms_mode = DRM_MODE_DPMS_OFF;
 }
 
-static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dp_device *dp = display_to_dp(display);
+static struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+	.mode_fixup = exynos_dp_mode_fixup,
+	.mode_set = exynos_dp_mode_set,
+	.enable = exynos_dp_enable,
+	.disable = exynos_dp_disable,
+};
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		exynos_dp_poweron(dp);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		exynos_dp_poweroff(dp);
-		break;
-	default:
-		break;
-	}
-	dp->dpms_mode = mode;
-}
-
-static struct exynos_drm_display_ops exynos_dp_display_ops = {
-	.create_connector = exynos_dp_create_connector,
-	.dpms = exynos_dp_dpms,
-	.commit = exynos_dp_commit,
+static struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
@@ -1219,9 +1210,10 @@
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 	struct platform_device *pdev = to_platform_device(dev);
 	struct drm_device *drm_dev = data;
+	struct drm_encoder *encoder = &dp->encoder;
 	struct resource *res;
 	unsigned int irq_flags;
-	int ret = 0;
+	int pipe, ret = 0;
 
 	dp->dev = &pdev->dev;
 	dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1297,7 +1289,7 @@
 
 	INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
 
-	exynos_dp_phy_init(dp);
+	phy_power_on(dp->phy);
 
 	exynos_dp_init_dp(dp);
 
@@ -1311,7 +1303,28 @@
 
 	dp->drm_dev = drm_dev;
 
-	return exynos_drm_create_enc_conn(drm_dev, &dp->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+	ret = exynos_dp_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -1319,7 +1332,7 @@
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+	exynos_dp_disable(&dp->encoder);
 }
 
 static const struct component_ops exynos_dp_ops = {
@@ -1338,8 +1351,6 @@
 	if (!dp)
 		return -ENOMEM;
 
-	dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	dp->display.ops = &exynos_dp_display_ops;
 	platform_set_drvdata(pdev, dp);
 
 	panel_node = of_parse_phandle(dev->of_node, "panel", 0);
@@ -1377,7 +1388,7 @@
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
+	exynos_dp_disable(&dp->encoder);
 	return 0;
 }
 
@@ -1385,7 +1396,7 @@
 {
 	struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-	exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
+	exynos_dp_enable(&dp->encoder);
 	return 0;
 }
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a4e7996..e413b6f 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -147,11 +147,10 @@
 };
 
 struct exynos_dp_device {
-	struct exynos_drm_display display;
+	struct drm_encoder	encoder;
 	struct device		*dev;
 	struct drm_device	*drm_dev;
 	struct drm_connector	connector;
-	struct drm_encoder	*encoder;
 	struct drm_panel	*panel;
 	struct drm_bridge	*bridge;
 	struct clk		*clock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
deleted file mode 100644
index 24994ba..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* exynos_drm_buf.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
-#include "exynos_drm_iommu.h"
-
-static int lowlevel_buffer_allocate(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
-	int ret = 0;
-	enum dma_attr attr;
-	unsigned int nr_pages;
-
-	if (buf->dma_addr) {
-		DRM_DEBUG_KMS("already allocated.\n");
-		return 0;
-	}
-
-	init_dma_attrs(&buf->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
-	 * region will be allocated else physically contiguous
-	 * as possible.
-	 */
-	if (!(flags & EXYNOS_BO_NONCONTIG))
-		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
-	 * else cachable mapping.
-	 */
-	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
-		attr = DMA_ATTR_WRITE_COMBINE;
-	else
-		attr = DMA_ATTR_NON_CONSISTENT;
-
-	dma_set_attr(attr, &buf->dma_attrs);
-	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
-
-	nr_pages = buf->size >> PAGE_SHIFT;
-
-	if (!is_drm_iommu_supported(dev)) {
-		dma_addr_t start_addr;
-		unsigned int i = 0;
-
-		buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate pages.\n");
-			return -ENOMEM;
-		}
-
-		buf->cookie = dma_alloc_attrs(dev->dev,
-					buf->size,
-					&buf->dma_addr, GFP_KERNEL,
-					&buf->dma_attrs);
-		if (!buf->cookie) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			ret = -ENOMEM;
-			goto err_free;
-		}
-
-		start_addr = buf->dma_addr;
-		while (i < nr_pages) {
-			buf->pages[i] = phys_to_page(start_addr);
-			start_addr += PAGE_SIZE;
-			i++;
-		}
-	} else {
-
-		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
-					&buf->dma_addr, GFP_KERNEL,
-					&buf->dma_attrs);
-		if (!buf->pages) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			return -ENOMEM;
-		}
-	}
-
-	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
-	if (IS_ERR(buf->sgt)) {
-		DRM_ERROR("failed to get sg table.\n");
-		ret = PTR_ERR(buf->sgt);
-		goto err_free_attrs;
-	}
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->dma_addr,
-			buf->size);
-
-	return ret;
-
-err_free_attrs:
-	dma_free_attrs(dev->dev, buf->size, buf->pages,
-			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-	buf->dma_addr = (dma_addr_t)NULL;
-err_free:
-	if (!is_drm_iommu_supported(dev))
-		drm_free_large(buf->pages);
-
-	return ret;
-}
-
-static void lowlevel_buffer_deallocate(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buf)
-{
-	if (!buf->dma_addr) {
-		DRM_DEBUG_KMS("dma_addr is invalid.\n");
-		return;
-	}
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buf->dma_addr,
-			buf->size);
-
-	sg_free_table(buf->sgt);
-
-	kfree(buf->sgt);
-	buf->sgt = NULL;
-
-	if (!is_drm_iommu_supported(dev)) {
-		dma_free_attrs(dev->dev, buf->size, buf->cookie,
-				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-		drm_free_large(buf->pages);
-	} else
-		dma_free_attrs(dev->dev, buf->size, buf->pages,
-				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-
-	buf->dma_addr = (dma_addr_t)NULL;
-}
-
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-						unsigned int size)
-{
-	struct exynos_drm_gem_buf *buffer;
-
-	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer)
-		return NULL;
-
-	buffer->size = size;
-	return buffer;
-}
-
-void exynos_drm_fini_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buffer)
-{
-	kfree(buffer);
-	buffer = NULL;
-}
-
-int exynos_drm_alloc_buf(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buf, unsigned int flags)
-{
-
-	/*
-	 * allocate memory region and set the memory information
-	 * to vaddr and dma_addr of a buffer object.
-	 */
-	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void exynos_drm_free_buf(struct drm_device *dev,
-		unsigned int flags, struct exynos_drm_gem_buf *buffer)
-{
-
-	lowlevel_buffer_deallocate(dev, flags, buffer);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
deleted file mode 100644
index a6412f1..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* exynos_drm_buf.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_BUF_H_
-#define _EXYNOS_DRM_BUF_H_
-
-/* create and initialize buffer object. */
-struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
-						unsigned int size);
-
-/* destroy buffer object. */
-void exynos_drm_fini_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buffer);
-
-/* allocate physical memory region and setup sgt. */
-int exynos_drm_alloc_buf(struct drm_device *dev,
-				struct exynos_drm_gem_buf *buf,
-				unsigned int flags);
-
-/* release physical memory region, and sgt. */
-void exynos_drm_free_buf(struct drm_device *dev,
-				unsigned int flags,
-				struct exynos_drm_gem_buf *buffer);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 4c9f972..c68a6a2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,46 +15,10 @@
 #include <drm/drmP.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_fbdev.h"
 
 static LIST_HEAD(exynos_drm_subdrv_list);
 
-int exynos_drm_create_enc_conn(struct drm_device *dev,
-					struct exynos_drm_display *display)
-{
-	struct drm_encoder *encoder;
-	int ret;
-	unsigned long possible_crtcs = 0;
-
-	ret = exynos_drm_crtc_get_pipe_from_type(dev, display->type);
-	if (ret < 0)
-		return ret;
-
-	possible_crtcs |= 1 << ret;
-
-	/* create and initialize a encoder for this sub driver. */
-	encoder = exynos_drm_encoder_create(dev, display, possible_crtcs);
-	if (!encoder) {
-		DRM_ERROR("failed to create encoder\n");
-		return -EFAULT;
-	}
-
-	display->encoder = encoder;
-
-	ret = display->ops->create_connector(display, encoder);
-	if (ret) {
-		DRM_ERROR("failed to create connector ret = %d\n", ret);
-		goto err_destroy_encoder;
-	}
-
-	return 0;
-
-err_destroy_encoder:
-	encoder->funcs->destroy(encoder);
-	return ret;
-}
-
 int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
 {
 	if (!subdrv)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 644b4b7..0872aa2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,21 +19,15 @@
 
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_plane.h"
 
 static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (exynos_crtc->enabled)
-		return;
-
 	if (exynos_crtc->ops->enable)
 		exynos_crtc->ops->enable(exynos_crtc);
 
-	exynos_crtc->enabled = true;
-
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -41,20 +35,10 @@
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
 
-	if (!exynos_crtc->enabled)
-		return;
-
-	/* wait for the completion of page flip. */
-	if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
-				(exynos_crtc->event == NULL), HZ/20))
-		exynos_crtc->event = NULL;
-
 	drm_crtc_vblank_off(crtc);
 
 	if (exynos_crtc->ops->disable)
 		exynos_crtc->ops->disable(exynos_crtc);
-
-	exynos_crtc->enabled = false;
 }
 
 static bool
@@ -80,18 +64,36 @@
 		exynos_crtc->ops->commit(exynos_crtc);
 }
 
-static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane;
 
-	if (crtc->state->event) {
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-		exynos_crtc->event = crtc->state->event;
+	exynos_crtc->event = crtc->state->event;
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+		if (exynos_crtc->ops->atomic_begin)
+			exynos_crtc->ops->atomic_begin(exynos_crtc,
+							exynos_plane);
 	}
 }
 
-static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
+static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane;
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
+
+		if (exynos_crtc->ops->atomic_flush)
+			exynos_crtc->ops->atomic_flush(exynos_crtc,
+							exynos_plane);
+	}
 }
 
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
@@ -139,13 +141,13 @@
 	if (!exynos_crtc)
 		return ERR_PTR(-ENOMEM);
 
-	init_waitqueue_head(&exynos_crtc->pending_flip_queue);
-
 	exynos_crtc->pipe = pipe;
 	exynos_crtc->type = type;
 	exynos_crtc->ops = ops;
 	exynos_crtc->ctx = ctx;
 
+	init_waitqueue_head(&exynos_crtc->wait_update);
+
 	crtc = &exynos_crtc->base;
 
 	private->crtc[pipe] = crtc;
@@ -171,11 +173,8 @@
 	struct exynos_drm_crtc *exynos_crtc =
 		to_exynos_crtc(private->crtc[pipe]);
 
-	if (!exynos_crtc->enabled)
-		return -EPERM;
-
 	if (exynos_crtc->ops->enable_vblank)
-		exynos_crtc->ops->enable_vblank(exynos_crtc);
+		return exynos_crtc->ops->enable_vblank(exynos_crtc);
 
 	return 0;
 }
@@ -186,31 +185,34 @@
 	struct exynos_drm_crtc *exynos_crtc =
 		to_exynos_crtc(private->crtc[pipe]);
 
-	if (!exynos_crtc->enabled)
-		return;
-
 	if (exynos_crtc->ops->disable_vblank)
 		exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc)
 {
-	struct exynos_drm_private *dev_priv = dev->dev_private;
-	struct drm_crtc *drm_crtc = dev_priv->crtc[pipe];
-	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+	wait_event_timeout(exynos_crtc->wait_update,
+			   (atomic_read(&exynos_crtc->pending_update) == 0),
+			   msecs_to_jiffies(50));
+}
+
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+				struct exynos_drm_plane *exynos_plane)
+{
+	struct drm_crtc *crtc = &exynos_crtc->base;
 	unsigned long flags;
 
-	spin_lock_irqsave(&dev->event_lock, flags);
-	if (exynos_crtc->event) {
+	exynos_plane->pending_fb = NULL;
 
-		drm_send_vblank_event(dev, -1, exynos_crtc->event);
-		drm_vblank_put(dev, pipe);
-		wake_up(&exynos_crtc->pending_flip_queue);
+	if (atomic_dec_and_test(&exynos_crtc->pending_update))
+		wake_up(&exynos_crtc->wait_update);
 
-	}
+	spin_lock_irqsave(&crtc->dev->event_lock, flags);
+	if (exynos_crtc->event)
+		drm_crtc_send_vblank_event(crtc, exynos_crtc->event);
 
 	exynos_crtc->event = NULL;
-	spin_unlock_irqrestore(&dev->event_lock, flags);
+	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
@@ -237,7 +239,7 @@
 }
 
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
-					unsigned int out_type)
+				       enum exynos_drm_output_type out_type)
 {
 	struct drm_crtc *crtc;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 0f3aa70..f87d4ab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -25,12 +25,14 @@
 					void *context);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
+void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
+void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
+				   struct exynos_drm_plane *exynos_plane);
 void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
 
 /* This function gets pipe value to crtc device matched with out_type. */
 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
-					unsigned int out_type);
+				       enum exynos_drm_output_type out_type);
 
 /*
  * This function calls the crtc device(manager)'s te_handler() callback
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
deleted file mode 100644
index cd485c0..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/* exynos_drm_dmabuf.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_dmabuf.h"
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-
-#include <linux/dma-buf.h>
-
-struct exynos_drm_dmabuf_attachment {
-	struct sg_table sgt;
-	enum dma_data_direction dir;
-	bool is_mapped;
-};
-
-static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
-{
-	return to_exynos_gem_obj(buf->priv);
-}
-
-static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
-					struct device *dev,
-					struct dma_buf_attachment *attach)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach;
-
-	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
-	if (!exynos_attach)
-		return -ENOMEM;
-
-	exynos_attach->dir = DMA_NONE;
-	attach->priv = exynos_attach;
-
-	return 0;
-}
-
-static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
-					struct dma_buf_attachment *attach)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct sg_table *sgt;
-
-	if (!exynos_attach)
-		return;
-
-	sgt = &exynos_attach->sgt;
-
-	if (exynos_attach->dir != DMA_NONE)
-		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
-				exynos_attach->dir);
-
-	sg_free_table(sgt);
-	kfree(exynos_attach);
-	attach->priv = NULL;
-}
-
-static struct sg_table *
-		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
-					enum dma_data_direction dir)
-{
-	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
-	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
-	struct drm_device *dev = gem_obj->base.dev;
-	struct exynos_drm_gem_buf *buf;
-	struct scatterlist *rd, *wr;
-	struct sg_table *sgt = NULL;
-	unsigned int i;
-	int nents, ret;
-
-	/* just return current sgt if already requested. */
-	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
-		return &exynos_attach->sgt;
-
-	buf = gem_obj->buffer;
-	if (!buf) {
-		DRM_ERROR("buffer is null.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sgt = &exynos_attach->sgt;
-
-	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
-	if (ret) {
-		DRM_ERROR("failed to alloc sgt.\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	mutex_lock(&dev->struct_mutex);
-
-	rd = buf->sgt->sgl;
-	wr = sgt->sgl;
-	for (i = 0; i < sgt->orig_nents; ++i) {
-		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-		rd = sg_next(rd);
-		wr = sg_next(wr);
-	}
-
-	if (dir != DMA_NONE) {
-		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-		if (!nents) {
-			DRM_ERROR("failed to map sgl with iommu.\n");
-			sg_free_table(sgt);
-			sgt = ERR_PTR(-EIO);
-			goto err_unlock;
-		}
-	}
-
-	exynos_attach->is_mapped = true;
-	exynos_attach->dir = dir;
-	attach->priv = exynos_attach;
-
-	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
-
-err_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return sgt;
-}
-
-static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
-						struct sg_table *sgt,
-						enum dma_data_direction dir)
-{
-	/* Nothing to do. */
-}
-
-static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-						unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-						unsigned long page_num,
-						void *addr)
-{
-	/* TODO */
-}
-
-static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
-					unsigned long page_num)
-{
-	/* TODO */
-
-	return NULL;
-}
-
-static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
-					unsigned long page_num, void *addr)
-{
-	/* TODO */
-}
-
-static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
-	struct vm_area_struct *vma)
-{
-	return -ENOTTY;
-}
-
-static struct dma_buf_ops exynos_dmabuf_ops = {
-	.attach			= exynos_gem_attach_dma_buf,
-	.detach			= exynos_gem_detach_dma_buf,
-	.map_dma_buf		= exynos_gem_map_dma_buf,
-	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
-	.kmap			= exynos_gem_dmabuf_kmap,
-	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
-	.kunmap			= exynos_gem_dmabuf_kunmap,
-	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
-	.mmap			= exynos_gem_dmabuf_mmap,
-	.release		= drm_gem_dmabuf_release,
-};
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
-				struct drm_gem_object *obj, int flags)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &exynos_dmabuf_ops;
-	exp_info.size = exynos_gem_obj->base.size;
-	exp_info.flags = flags;
-	exp_info.priv = obj;
-
-	return dma_buf_export(&exp_info);
-}
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
-				struct dma_buf *dma_buf)
-{
-	struct dma_buf_attachment *attach;
-	struct sg_table *sgt;
-	struct scatterlist *sgl;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buffer;
-	int ret;
-
-	/* is this one of own objects? */
-	if (dma_buf->ops == &exynos_dmabuf_ops) {
-		struct drm_gem_object *obj;
-
-		obj = dma_buf->priv;
-
-		/* is it from our device? */
-		if (obj->dev == drm_dev) {
-			/*
-			 * Importing dmabuf exported from out own gem increases
-			 * refcount on gem itself instead of f_count of dmabuf.
-			 */
-			drm_gem_object_reference(obj);
-			return obj;
-		}
-	}
-
-	attach = dma_buf_attach(dma_buf, drm_dev->dev);
-	if (IS_ERR(attach))
-		return ERR_PTR(-EINVAL);
-
-	get_dma_buf(dma_buf);
-
-	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sgt)) {
-		ret = PTR_ERR(sgt);
-		goto err_buf_detach;
-	}
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer) {
-		ret = -ENOMEM;
-		goto err_unmap_attach;
-	}
-
-	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_free_buffer;
-	}
-
-	sgl = sgt->sgl;
-
-	buffer->size = dma_buf->size;
-	buffer->dma_addr = sg_dma_address(sgl);
-
-	if (sgt->nents == 1) {
-		/* always physically continuous memory if sgt->nents is 1. */
-		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
-	} else {
-		/*
-		 * this case could be CONTIG or NONCONTIG type but for now
-		 * sets NONCONTIG.
-		 * TODO. we have to find a way that exporter can notify
-		 * the type of its own buffer to importer.
-		 */
-		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
-	}
-
-	exynos_gem_obj->buffer = buffer;
-	buffer->sgt = sgt;
-	exynos_gem_obj->base.import_attach = attach;
-
-	DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
-								buffer->size);
-
-	return &exynos_gem_obj->base;
-
-err_free_buffer:
-	kfree(buffer);
-	buffer = NULL;
-err_unmap_attach:
-	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
-err_buf_detach:
-	dma_buf_detach(dma_buf, attach);
-	dma_buf_put(dma_buf);
-
-	return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
deleted file mode 100644
index 886de9f..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* exynos_drm_dmabuf.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_DMABUF_H_
-#define _EXYNOS_DRM_DMABUF_H_
-
-struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
-				struct drm_gem_object *obj, int flags);
-
-struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
-						struct dma_buf *dma_buf);
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 7cb6595..c748b87 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -20,26 +20,24 @@
 #include <video/of_videomode.h>
 #include <video/videomode.h>
 
-#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
 
 struct exynos_dpi {
-	struct exynos_drm_display display;
+	struct drm_encoder encoder;
 	struct device *dev;
 	struct device_node *panel_node;
 
 	struct drm_panel *panel;
 	struct drm_connector connector;
-	struct drm_encoder *encoder;
 
 	struct videomode *vm;
-	int dpms_mode;
 };
 
 #define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
 
-static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
+static inline struct exynos_dpi *encoder_to_dpi(struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dpi, display);
+	return container_of(e, struct exynos_dpi, encoder);
 }
 
 static enum drm_connector_status
@@ -99,7 +97,7 @@
 {
 	struct exynos_dpi *ctx = connector_to_dpi(connector);
 
-	return ctx->encoder;
+	return &ctx->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
@@ -107,15 +105,12 @@
 	.best_encoder = exynos_dpi_best_encoder,
 };
 
-static int exynos_dpi_create_connector(struct exynos_drm_display *display,
-				       struct drm_encoder *encoder)
+static int exynos_dpi_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dpi *ctx = display_to_dpi(display);
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
 	struct drm_connector *connector = &ctx->connector;
 	int ret;
 
-	ctx->encoder = encoder;
-
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	ret = drm_connector_init(encoder->dev, connector,
@@ -133,46 +128,48 @@
 	return 0;
 }
 
-static void exynos_dpi_poweron(struct exynos_dpi *ctx)
+static bool exynos_dpi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
 {
+	return true;
+}
+
+static void exynos_dpi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_dpi_enable(struct drm_encoder *encoder)
+{
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
 	if (ctx->panel) {
 		drm_panel_prepare(ctx->panel);
 		drm_panel_enable(ctx->panel);
 	}
 }
 
-static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
+static void exynos_dpi_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
+
 	if (ctx->panel) {
 		drm_panel_disable(ctx->panel);
 		drm_panel_unprepare(ctx->panel);
 	}
 }
 
-static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dpi *ctx = display_to_dpi(display);
+static struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
+	.mode_fixup = exynos_dpi_mode_fixup,
+	.mode_set = exynos_dpi_mode_set,
+	.enable = exynos_dpi_enable,
+	.disable = exynos_dpi_disable,
+};
 
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		if (ctx->dpms_mode != DRM_MODE_DPMS_ON)
-				exynos_dpi_poweron(ctx);
-			break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		if (ctx->dpms_mode == DRM_MODE_DPMS_ON)
-			exynos_dpi_poweroff(ctx);
-		break;
-	default:
-		break;
-	}
-	ctx->dpms_mode = mode;
-}
-
-static struct exynos_drm_display_ops exynos_dpi_display_ops = {
-	.create_connector = exynos_dpi_create_connector,
-	.dpms = exynos_dpi_dpms
+static struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 /* of_* functions will be removed after merge of of_graph patches */
@@ -299,7 +296,34 @@
 	return 0;
 }
 
-struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
+{
+	int ret;
+
+	ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
+	if (ret < 0)
+		return ret;
+
+	encoder->possible_crtcs = 1 << ret;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+
+	ret = exynos_dpi_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct drm_encoder *exynos_dpi_probe(struct device *dev)
 {
 	struct exynos_dpi *ctx;
 	int ret;
@@ -308,10 +332,7 @@
 	if (!ctx)
 		return ERR_PTR(-ENOMEM);
 
-	ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	ctx->display.ops = &exynos_dpi_display_ops;
 	ctx->dev = dev;
-	ctx->dpms_mode = DRM_MODE_DPMS_OFF;
 
 	ret = exynos_dpi_parse_dt(ctx);
 	if (ret < 0) {
@@ -325,14 +346,14 @@
 			return ERR_PTR(-EPROBE_DEFER);
 	}
 
-	return &ctx->display;
+	return &ctx->encoder;
 }
 
-int exynos_dpi_remove(struct exynos_drm_display *display)
+int exynos_dpi_remove(struct drm_encoder *encoder)
 {
-	struct exynos_dpi *ctx = display_to_dpi(display);
+	struct exynos_dpi *ctx = encoder_to_dpi(encoder);
 
-	exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
+	exynos_dpi_disable(&ctx->encoder);
 
 	if (ctx->panel)
 		drm_panel_detach(ctx->panel);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 63a68c6..831d2e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -13,6 +13,8 @@
 
 #include <linux/pm_runtime.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 
 #include <linux/component.h>
@@ -21,13 +23,11 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_vidi.h"
-#include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
 #include "exynos_drm_ipp.h"
 #include "exynos_drm_iommu.h"
@@ -38,15 +38,112 @@
 #define DRIVER_MAJOR	1
 #define DRIVER_MINOR	0
 
+struct exynos_atomic_commit {
+	struct work_struct	work;
+	struct drm_device	*dev;
+	struct drm_atomic_state *state;
+	u32			crtcs;
+};
+
+static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i, ret;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+		if (!crtc->state->enable)
+			continue;
+
+		ret = drm_crtc_vblank_get(crtc);
+		if (ret)
+			continue;
+
+		exynos_drm_crtc_wait_pending_update(exynos_crtc);
+		drm_crtc_vblank_put(crtc);
+	}
+}
+
+static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
+{
+	struct drm_device *dev = commit->dev;
+	struct exynos_drm_private *priv = dev->dev_private;
+	struct drm_atomic_state *state = commit->state;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_plane_state *plane_state;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	drm_atomic_helper_commit_modeset_disables(dev, state);
+
+	drm_atomic_helper_commit_modeset_enables(dev, state);
+
+	/*
+	 * Exynos can't update planes with CRTCs and encoders disabled,
+	 * its updates routines, specially for FIMD, requires the clocks
+	 * to be enabled. So it is necessary to handle the modeset operations
+	 * *before* the commit_planes() step, this way it will always
+	 * have the relevant clocks enabled to perform the update.
+	 */
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+		atomic_set(&exynos_crtc->pending_update, 0);
+	}
+
+	for_each_plane_in_state(state, plane, plane_state, i) {
+		struct exynos_drm_crtc *exynos_crtc =
+						to_exynos_crtc(plane->crtc);
+
+		if (!plane->crtc)
+			continue;
+
+		atomic_inc(&exynos_crtc->pending_update);
+	}
+
+	drm_atomic_helper_commit_planes(dev, state);
+
+	exynos_atomic_wait_for_commit(state);
+
+	drm_atomic_helper_cleanup_planes(dev, state);
+
+	drm_atomic_state_free(state);
+
+	spin_lock(&priv->lock);
+	priv->pending &= ~commit->crtcs;
+	spin_unlock(&priv->lock);
+
+	wake_up_all(&priv->wait);
+
+	kfree(commit);
+}
+
+static void exynos_drm_atomic_work(struct work_struct *work)
+{
+	struct exynos_atomic_commit *commit = container_of(work,
+				struct exynos_atomic_commit, work);
+
+	exynos_atomic_commit_complete(commit);
+}
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
 	struct exynos_drm_private *private;
-	int ret;
+	struct drm_encoder *encoder;
+	unsigned int clone_mask;
+	int cnt, ret;
 
 	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
 	if (!private)
 		return -ENOMEM;
 
+	init_waitqueue_head(&private->wait);
+	spin_lock_init(&private->lock);
+
 	dev_set_drvdata(dev->dev, dev);
 	dev->dev_private = (void *)private;
 
@@ -67,7 +164,13 @@
 	exynos_drm_mode_config_init(dev);
 
 	/* setup possible_clones. */
-	exynos_drm_encoder_setup(dev);
+	cnt = 0;
+	clone_mask = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		clone_mask |= (1 << (cnt++));
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		encoder->possible_clones = clone_mask;
 
 	platform_set_drvdata(dev->platformdev, dev);
 
@@ -143,6 +246,64 @@
 	return 0;
 }
 
+static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
+{
+	bool pending;
+
+	spin_lock(&priv->lock);
+	pending = priv->pending & crtcs;
+	spin_unlock(&priv->lock);
+
+	return pending;
+}
+
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+			 bool async)
+{
+	struct exynos_drm_private *priv = dev->dev_private;
+	struct exynos_atomic_commit *commit;
+	int i, ret;
+
+	commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+	if (!commit)
+		return -ENOMEM;
+
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret) {
+		kfree(commit);
+		return ret;
+	}
+
+	/* This is the point of no return */
+
+	INIT_WORK(&commit->work, exynos_drm_atomic_work);
+	commit->dev = dev;
+	commit->state = state;
+
+	/* Wait until all affected CRTCs have completed previous commits and
+	 * mark them as pending.
+	 */
+	for (i = 0; i < dev->mode_config.num_crtc; ++i) {
+		if (state->crtcs[i])
+			commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
+	}
+
+	wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
+
+	spin_lock(&priv->lock);
+	priv->pending |= commit->crtcs;
+	spin_unlock(&priv->lock);
+
+	drm_atomic_helper_swap_state(dev, state);
+
+	if (async)
+		schedule_work(&commit->work);
+	else
+		exynos_atomic_commit_complete(commit);
+
+	return 0;
+}
+
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
 	struct drm_connector *connector;
@@ -242,25 +403,25 @@
 
 static const struct drm_ioctl_desc exynos_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
+			DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
 			DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
-			exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
-	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
-			vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
-			exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
-			exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
-			exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
-			exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
-			exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
-			exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
-			exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
+			DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -277,11 +438,10 @@
 };
 
 static struct drm_driver exynos_drm_driver = {
-	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+				  | DRIVER_ATOMIC | DRIVER_RENDER,
 	.load			= exynos_drm_load,
 	.unload			= exynos_drm_unload,
-	.suspend		= exynos_drm_suspend,
-	.resume			= exynos_drm_resume,
 	.open			= exynos_drm_open,
 	.preclose		= exynos_drm_preclose,
 	.lastclose		= exynos_drm_lastclose,
@@ -297,8 +457,12 @@
 	.dumb_destroy		= drm_gem_dumb_destroy,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
-	.gem_prime_export	= exynos_dmabuf_prime_export,
-	.gem_prime_import	= exynos_dmabuf_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_get_sg_table	= exynos_drm_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table	= exynos_drm_gem_prime_import_sg_table,
+	.gem_prime_vmap		= exynos_drm_gem_prime_vmap,
+	.gem_prime_vunmap	= exynos_drm_gem_prime_vunmap,
 	.ioctls			= exynos_ioctls,
 	.num_ioctls		= ARRAY_SIZE(exynos_ioctls),
 	.fops			= &exynos_drm_driver_fops,
@@ -345,9 +509,6 @@
  * because connector requires pipe number of its crtc during initialization.
  */
 static struct platform_driver *const exynos_drm_kms_drivers[] = {
-#ifdef CONFIG_DRM_EXYNOS_VIDI
-	&vidi_driver,
-#endif
 #ifdef CONFIG_DRM_EXYNOS_FIMD
 	&fimd_driver,
 #endif
@@ -370,6 +531,9 @@
 	&mixer_driver,
 	&hdmi_driver,
 #endif
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+	&vidi_driver,
+#endif
 };
 
 static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index dd00f16..b7ba21d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -44,23 +44,14 @@
  *	- the unit is screen coordinates.
  * @src_y: offset y on a framebuffer to be displayed.
  *	- the unit is screen coordinates.
- * @src_width: width of a partial image to be displayed from framebuffer.
- * @src_height: height of a partial image to be displayed from framebuffer.
- * @fb_width: width of a framebuffer.
- * @fb_height: height of a framebuffer.
+ * @src_w: width of a partial image to be displayed from framebuffer.
+ * @src_h: height of a partial image to be displayed from framebuffer.
  * @crtc_x: offset x on hardware screen.
  * @crtc_y: offset y on hardware screen.
- * @crtc_width: window width to be displayed (hardware screen).
- * @crtc_height: window height to be displayed (hardware screen).
- * @mode_width: width of screen mode.
- * @mode_height: height of screen mode.
+ * @crtc_w: window width to be displayed (hardware screen).
+ * @crtc_h: window height to be displayed (hardware screen).
  * @h_ratio: horizontal scaling ratio, 16.16 fixed point
  * @v_ratio: vertical scaling ratio, 16.16 fixed point
- * @refresh: refresh rate.
- * @scan_flag: interlace or progressive way.
- *	(it could be DRM_MODE_FLAG_*)
- * @bpp: pixel size.(in bit)
- * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *	      allocated for a overlay.
  * @zpos: order of overlay layer(z position).
@@ -73,73 +64,17 @@
 	struct drm_plane base;
 	unsigned int src_x;
 	unsigned int src_y;
-	unsigned int src_width;
-	unsigned int src_height;
-	unsigned int fb_width;
-	unsigned int fb_height;
+	unsigned int src_w;
+	unsigned int src_h;
 	unsigned int crtc_x;
 	unsigned int crtc_y;
-	unsigned int crtc_width;
-	unsigned int crtc_height;
-	unsigned int mode_width;
-	unsigned int mode_height;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
 	unsigned int h_ratio;
 	unsigned int v_ratio;
-	unsigned int refresh;
-	unsigned int scan_flag;
-	unsigned int bpp;
-	unsigned int pitch;
-	uint32_t pixel_format;
 	dma_addr_t dma_addr[MAX_FB_BUFFER];
 	unsigned int zpos;
-};
-
-/*
- * Exynos DRM Display Structure.
- *	- this structure is common to analog tv, digital tv and lcd panel.
- *
- * @create_connector: initialize and register a new connector
- * @remove: cleans up the display for removal
- * @mode_fixup: fix mode data comparing to hw specific display mode.
- * @mode_set: convert drm_display_mode to hw specific display mode and
- *	      would be called by encoder->mode_set().
- * @check_mode: check if mode is valid or not.
- * @dpms: display device on or off.
- * @commit: apply changes to hw
- */
-struct exynos_drm_display;
-struct exynos_drm_display_ops {
-	int (*create_connector)(struct exynos_drm_display *display,
-				struct drm_encoder *encoder);
-	void (*remove)(struct exynos_drm_display *display);
-	void (*mode_fixup)(struct exynos_drm_display *display,
-				struct drm_connector *connector,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
-	void (*mode_set)(struct exynos_drm_display *display,
-				struct drm_display_mode *mode);
-	int (*check_mode)(struct exynos_drm_display *display,
-				struct drm_display_mode *mode);
-	void (*dpms)(struct exynos_drm_display *display, int mode);
-	void (*commit)(struct exynos_drm_display *display);
-};
-
-/*
- * Exynos drm display structure, maps 1:1 with an encoder/connector
- *
- * @list: the list entry for this manager
- * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @encoder: encoder object this display maps to
- * @connector: connector object this display maps to
- * @ops: pointer to callbacks for exynos drm specific functionality
- * @ctx: A pointer to the display's implementation specific context
- */
-struct exynos_drm_display {
-	struct list_head list;
-	enum exynos_drm_output_type type;
-	struct drm_encoder *encoder;
-	struct drm_connector *connector;
-	struct exynos_drm_display_ops *ops;
+	struct drm_framebuffer *pending_fb;
 };
 
 /*
@@ -153,8 +88,10 @@
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
  * @wait_for_vblank: wait for vblank interrupt to make sure that
  *	hardware overlay is updated.
- * @win_commit: apply hardware specific overlay data to registers.
- * @win_disable: disable hardware specific overlay.
+ * @atomic_begin: prepare a window to receive a update
+ * @atomic_flush: mark the end of a window update
+ * @update_plane: apply hardware specific overlay data to registers.
+ * @disable_plane: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *	synchronization signal if there is a page flip request.
  * @clock_enable: optional function enabling/disabling display domain clock,
@@ -173,11 +110,16 @@
 	int (*enable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*disable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
-	void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
-	void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
+	void (*atomic_begin)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
+	void (*update_plane)(struct exynos_drm_crtc *crtc,
+			     struct exynos_drm_plane *plane);
+	void (*disable_plane)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
+	void (*atomic_flush)(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane);
 	void (*te_handler)(struct exynos_drm_crtc *crtc);
 	void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
-	void (*clear_channels)(struct exynos_drm_crtc *crtc);
 };
 
 /*
@@ -194,6 +136,8 @@
  *	this pipe value.
  * @enabled: if the crtc is enabled or not
  * @event: vblank event that is currently queued for flip
+ * @wait_update: wait all pending planes updates to finish
+ * @pending_update: number of pending plane updates in this crtc
  * @ops: pointer to callbacks for exynos drm specific functionality
  * @ctx: A pointer to the crtc's implementation specific context
  */
@@ -201,9 +145,9 @@
 	struct drm_crtc			base;
 	enum exynos_drm_output_type	type;
 	unsigned int			pipe;
-	bool				enabled;
-	wait_queue_head_t		pending_flip_queue;
 	struct drm_pending_vblank_event	*event;
+	wait_queue_head_t		wait_update;
+	atomic_t			pending_update;
 	const struct exynos_drm_crtc_ops	*ops;
 	void				*ctx;
 };
@@ -229,6 +173,9 @@
  * @da_space_size: size of device address space.
  *	if 0 then default value is used for it.
  * @pipe: the pipe number for this crtc/manager.
+ * @pending: the crtcs that have pending updates to finish
+ * @lock: protect access to @pending
+ * @wait: wait an atomic commit to finish
  */
 struct exynos_drm_private {
 	struct drm_fb_helper *fb_helper;
@@ -244,6 +191,11 @@
 	unsigned long da_space_size;
 
 	unsigned int pipe;
+
+	/* for atomic commit */
+	u32			pending;
+	spinlock_t		lock;
+	wait_queue_head_t	wait;
 };
 
 /*
@@ -285,20 +237,26 @@
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
 #ifdef CONFIG_DRM_EXYNOS_DPI
-struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
-int exynos_dpi_remove(struct exynos_drm_display *display);
+struct drm_encoder *exynos_dpi_probe(struct device *dev);
+int exynos_dpi_remove(struct drm_encoder *encoder);
+int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder);
 #else
-static inline struct exynos_drm_display *
+static inline struct drm_encoder *
 exynos_dpi_probe(struct device *dev) { return NULL; }
-static inline int exynos_dpi_remove(struct exynos_drm_display *display)
+static inline int exynos_dpi_remove(struct drm_encoder *encoder)
+{
+	return 0;
+}
+static inline int exynos_dpi_bind(struct drm_device *dev,
+				  struct drm_encoder *encoder)
 {
 	return 0;
 }
 #endif
 
-/* This function creates a encoder and a connector, and initializes them. */
-int exynos_drm_create_enc_conn(struct drm_device *dev,
-				struct exynos_drm_display *display);
+int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
+			 bool async);
+
 
 extern struct platform_driver fimd_driver;
 extern struct platform_driver exynos5433_decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 0e58b36..12b03b3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -259,7 +259,7 @@
 };
 
 struct exynos_dsi {
-	struct exynos_drm_display display;
+	struct drm_encoder encoder;
 	struct mipi_dsi_host dsi_host;
 	struct drm_connector connector;
 	struct device_node *panel_node;
@@ -295,9 +295,9 @@
 #define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
 #define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
 
-static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
+static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
 {
-	return container_of(d, struct exynos_dsi, display);
+	return container_of(e, struct exynos_dsi, encoder);
 }
 
 enum reg_idx {
@@ -1272,7 +1272,7 @@
 static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
 {
 	struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
-	struct drm_encoder *encoder = dsi->display.encoder;
+	struct drm_encoder *encoder = &dsi->encoder;
 
 	if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
 		exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1518,16 +1518,17 @@
 		dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
 }
 
-static int exynos_dsi_enable(struct exynos_dsi *dsi)
+static void exynos_dsi_enable(struct drm_encoder *encoder)
 {
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	int ret;
 
 	if (dsi->state & DSIM_STATE_ENABLED)
-		return 0;
+		return;
 
 	ret = exynos_dsi_poweron(dsi);
 	if (ret < 0)
-		return ret;
+		return;
 
 	dsi->state |= DSIM_STATE_ENABLED;
 
@@ -1535,7 +1536,7 @@
 	if (ret < 0) {
 		dsi->state &= ~DSIM_STATE_ENABLED;
 		exynos_dsi_poweroff(dsi);
-		return ret;
+		return;
 	}
 
 	exynos_dsi_set_display_mode(dsi);
@@ -1547,16 +1548,16 @@
 		exynos_dsi_set_display_enable(dsi, false);
 		drm_panel_unprepare(dsi->panel);
 		exynos_dsi_poweroff(dsi);
-		return ret;
+		return;
 	}
 
 	dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
-
-	return 0;
 }
 
-static void exynos_dsi_disable(struct exynos_dsi *dsi)
+static void exynos_dsi_disable(struct drm_encoder *encoder)
 {
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+
 	if (!(dsi->state & DSIM_STATE_ENABLED))
 		return;
 
@@ -1571,26 +1572,6 @@
 	exynos_dsi_poweroff(dsi);
 }
 
-static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct exynos_dsi *dsi = display_to_dsi(display);
-
-	if (dsi->panel) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			exynos_dsi_enable(dsi);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			exynos_dsi_disable(dsi);
-			break;
-		default:
-			break;
-		}
-	}
-}
-
 static enum drm_connector_status
 exynos_dsi_detect(struct drm_connector *connector, bool force)
 {
@@ -1601,10 +1582,10 @@
 		if (dsi->panel)
 			drm_panel_attach(dsi->panel, &dsi->connector);
 	} else if (!dsi->panel_node) {
-		struct exynos_drm_display *display;
+		struct drm_encoder *encoder;
 
-		display = platform_get_drvdata(to_platform_device(dsi->dev));
-		exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+		encoder = platform_get_drvdata(to_platform_device(dsi->dev));
+		exynos_dsi_disable(encoder);
 		drm_panel_detach(dsi->panel);
 		dsi->panel = NULL;
 	}
@@ -1647,7 +1628,7 @@
 {
 	struct exynos_dsi *dsi = connector_to_dsi(connector);
 
-	return dsi->display.encoder;
+	return &dsi->encoder;
 }
 
 static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1655,10 +1636,9 @@
 	.best_encoder = exynos_dsi_best_encoder,
 };
 
-static int exynos_dsi_create_connector(struct exynos_drm_display *display,
-				       struct drm_encoder *encoder)
+static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 {
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	struct drm_connector *connector = &dsi->connector;
 	int ret;
 
@@ -1679,26 +1659,40 @@
 	return 0;
 }
 
-static void exynos_dsi_mode_set(struct exynos_drm_display *display,
-			 struct drm_display_mode *mode)
+static bool exynos_dsi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
 {
-	struct exynos_dsi *dsi = display_to_dsi(display);
-	struct videomode *vm = &dsi->vm;
-
-	vm->hactive = mode->hdisplay;
-	vm->vactive = mode->vdisplay;
-	vm->vfront_porch = mode->vsync_start - mode->vdisplay;
-	vm->vback_porch = mode->vtotal - mode->vsync_end;
-	vm->vsync_len = mode->vsync_end - mode->vsync_start;
-	vm->hfront_porch = mode->hsync_start - mode->hdisplay;
-	vm->hback_porch = mode->htotal - mode->hsync_end;
-	vm->hsync_len = mode->hsync_end - mode->hsync_start;
+	return true;
 }
 
-static struct exynos_drm_display_ops exynos_dsi_display_ops = {
-	.create_connector = exynos_dsi_create_connector,
+static void exynos_dsi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
+	struct videomode *vm = &dsi->vm;
+	struct drm_display_mode *m = adjusted_mode;
+
+	vm->hactive = m->hdisplay;
+	vm->vactive = m->vdisplay;
+	vm->vfront_porch = m->vsync_start - m->vdisplay;
+	vm->vback_porch = m->vtotal - m->vsync_end;
+	vm->vsync_len = m->vsync_end - m->vsync_start;
+	vm->hfront_porch = m->hsync_start - m->hdisplay;
+	vm->hback_porch = m->htotal - m->hsync_end;
+	vm->hsync_len = m->hsync_end - m->hsync_start;
+}
+
+static struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
+	.mode_fixup = exynos_dsi_mode_fixup,
 	.mode_set = exynos_dsi_mode_set,
-	.dpms = exynos_dsi_dpms
+	.enable = exynos_dsi_enable,
+	.disable = exynos_dsi_disable,
+};
+
+static struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@@ -1821,22 +1815,35 @@
 static int exynos_dsi_bind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct exynos_drm_display *display = dev_get_drvdata(dev);
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct drm_encoder *encoder = dev_get_drvdata(dev);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 	struct drm_device *drm_dev = data;
 	struct drm_bridge *bridge;
 	int ret;
 
-	ret = exynos_drm_create_enc_conn(drm_dev, display);
+	ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_LCD);
+	if (ret < 0)
+		return ret;
+
+	encoder->possible_crtcs = 1 << ret;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+
+	ret = exynos_dsi_create_connector(encoder);
 	if (ret) {
-		DRM_ERROR("Encoder create [%d] failed with %d\n",
-			  display->type, ret);
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
 		return ret;
 	}
 
 	bridge = of_drm_find_bridge(dsi->bridge_node);
 	if (bridge) {
-		display->encoder->bridge = bridge;
 		drm_bridge_attach(drm_dev, bridge);
 	}
 
@@ -1846,10 +1853,10 @@
 static void exynos_dsi_unbind(struct device *dev, struct device *master,
 				void *data)
 {
-	struct exynos_drm_display *display = dev_get_drvdata(dev);
-	struct exynos_dsi *dsi = display_to_dsi(display);
+	struct drm_encoder *encoder = dev_get_drvdata(dev);
+	struct exynos_dsi *dsi = encoder_to_dsi(encoder);
 
-	exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
+	exynos_dsi_disable(encoder);
 
 	mipi_dsi_host_unregister(&dsi->dsi_host);
 }
@@ -1870,9 +1877,6 @@
 	if (!dsi)
 		return -ENOMEM;
 
-	dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
-	dsi->display.ops = &exynos_dsi_display_ops;
-
 	/* To be checked as invalid one */
 	dsi->te_gpio = -ENOENT;
 
@@ -1948,7 +1952,7 @@
 		return ret;
 	}
 
-	platform_set_drvdata(pdev, &dsi->display);
+	platform_set_drvdata(pdev, &dsi->encoder);
 
 	return component_add(dev, &exynos_dsi_component_ops);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
deleted file mode 100644
index 7b89fd5..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/* exynos_drm_encoder.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_encoder.h"
-
-#define to_exynos_encoder(x)	container_of(x, struct exynos_drm_encoder,\
-				drm_encoder)
-
-/*
- * exynos specific encoder structure.
- *
- * @drm_encoder: encoder object.
- * @display: the display structure that maps to this encoder
- */
-struct exynos_drm_encoder {
-	struct drm_encoder		drm_encoder;
-	struct exynos_drm_display	*display;
-};
-
-static bool
-exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
-			       const struct drm_display_mode *mode,
-			       struct drm_display_mode *adjusted_mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-	struct drm_connector *connector;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (connector->encoder != encoder)
-			continue;
-
-		if (display->ops->mode_fixup)
-			display->ops->mode_fixup(display, connector, mode,
-					adjusted_mode);
-	}
-
-	return true;
-}
-
-static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
-					 struct drm_display_mode *mode,
-					 struct drm_display_mode *adjusted_mode)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->mode_set)
-		display->ops->mode_set(display, adjusted_mode);
-}
-
-static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->dpms)
-		display->ops->dpms(display, DRM_MODE_DPMS_ON);
-
-	if (display->ops->commit)
-		display->ops->commit(display);
-}
-
-static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-
-	if (display->ops->dpms)
-		display->ops->dpms(display, DRM_MODE_DPMS_OFF);
-}
-
-static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
-	.mode_fixup	= exynos_drm_encoder_mode_fixup,
-	.mode_set	= exynos_drm_encoder_mode_set,
-	.enable		= exynos_drm_encoder_enable,
-	.disable	= exynos_drm_encoder_disable,
-};
-
-static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
-{
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-
-	drm_encoder_cleanup(encoder);
-	kfree(exynos_encoder);
-}
-
-static struct drm_encoder_funcs exynos_encoder_funcs = {
-	.destroy = exynos_drm_encoder_destroy,
-};
-
-static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
-{
-	struct drm_encoder *clone;
-	struct drm_device *dev = encoder->dev;
-	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
-	struct exynos_drm_display *display = exynos_encoder->display;
-	unsigned int clone_mask = 0;
-	int cnt = 0;
-
-	list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
-		switch (display->type) {
-		case EXYNOS_DISPLAY_TYPE_LCD:
-		case EXYNOS_DISPLAY_TYPE_HDMI:
-		case EXYNOS_DISPLAY_TYPE_VIDI:
-			clone_mask |= (1 << (cnt++));
-			break;
-		default:
-			continue;
-		}
-	}
-
-	return clone_mask;
-}
-
-void exynos_drm_encoder_setup(struct drm_device *dev)
-{
-	struct drm_encoder *encoder;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		encoder->possible_clones = exynos_drm_encoder_clones(encoder);
-}
-
-struct drm_encoder *
-exynos_drm_encoder_create(struct drm_device *dev,
-			   struct exynos_drm_display *display,
-			   unsigned long possible_crtcs)
-{
-	struct drm_encoder *encoder;
-	struct exynos_drm_encoder *exynos_encoder;
-
-	if (!possible_crtcs)
-		return NULL;
-
-	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
-	if (!exynos_encoder)
-		return NULL;
-
-	exynos_encoder->display = display;
-	encoder = &exynos_encoder->drm_encoder;
-	encoder->possible_crtcs = possible_crtcs;
-
-	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
-	drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
-			DRM_MODE_ENCODER_TMDS);
-
-	drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
-
-	DRM_DEBUG_KMS("encoder has been created\n");
-
-	return encoder;
-}
-
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder)
-{
-	return to_exynos_encoder(encoder)->display;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
deleted file mode 100644
index 26305d8..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Authors:
- *	Inki Dae <inki.dae@samsung.com>
- *	Joonyoung Shim <jy0922.shim@samsung.com>
- *	Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ENCODER_H_
-#define _EXYNOS_DRM_ENCODER_H_
-
-void exynos_drm_encoder_setup(struct drm_device *dev);
-struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
-			struct exynos_drm_display *mgr,
-			unsigned long possible_crtcs);
-struct exynos_drm_display *exynos_drm_get_display(struct drm_encoder *encoder);
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 2b6320e..0842808 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -23,7 +23,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 #include "exynos_drm_crtc.h"
 
@@ -33,12 +32,10 @@
  * exynos specific framebuffer structure.
  *
  * @fb: drm framebuffer obejct.
- * @buf_cnt: a buffer count to drm framebuffer.
  * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
  */
 struct exynos_drm_fb {
 	struct drm_framebuffer		fb;
-	unsigned int			buf_cnt;
 	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER];
 };
 
@@ -98,10 +95,6 @@
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
 
-	/* This fb should have only one gem object. */
-	if (WARN_ON(exynos_fb->buf_cnt != 1))
-		return -EINVAL;
-
 	return drm_gem_handle_create(file_priv,
 			&exynos_fb->exynos_gem_obj[0]->base, handle);
 }
@@ -122,138 +115,96 @@
 	.dirty		= exynos_drm_fb_dirty,
 };
 
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-						unsigned int cnt)
-{
-	struct exynos_drm_fb *exynos_fb;
-
-	exynos_fb = to_exynos_fb(fb);
-
-	exynos_fb->buf_cnt = cnt;
-}
-
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
-{
-	struct exynos_drm_fb *exynos_fb;
-
-	exynos_fb = to_exynos_fb(fb);
-
-	return exynos_fb->buf_cnt;
-}
-
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_mode_fb_cmd2 *mode_cmd,
-			    struct drm_gem_object *obj)
+			    struct exynos_drm_gem_obj **gem_obj,
+			    int count)
 {
 	struct exynos_drm_fb *exynos_fb;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int i;
 	int ret;
 
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-
-	ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-	if (ret < 0)
-		return ERR_PTR(ret);
-
 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
 	if (!exynos_fb)
 		return ERR_PTR(-ENOMEM);
 
+	for (i = 0; i < count; i++) {
+		ret = check_fb_gem_memory_type(dev, gem_obj[i]);
+		if (ret < 0)
+			goto err;
+
+		exynos_fb->exynos_gem_obj[i] = gem_obj[i];
+	}
+
 	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
 
 	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-	if (ret) {
-		kfree(exynos_fb);
+	if (ret < 0) {
 		DRM_ERROR("failed to initialize framebuffer\n");
-		return ERR_PTR(ret);
+		goto err;
 	}
 
 	return &exynos_fb->fb;
+
+err:
+	kfree(exynos_fb);
+	return ERR_PTR(ret);
 }
 
 static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 		      struct drm_mode_fb_cmd2 *mode_cmd)
 {
+	struct exynos_drm_gem_obj *gem_objs[MAX_FB_BUFFER];
 	struct drm_gem_object *obj;
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_fb *exynos_fb;
-	int i, ret;
+	struct drm_framebuffer *fb;
+	int i;
+	int ret;
 
-	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
-	if (!exynos_fb)
-		return ERR_PTR(-ENOMEM);
-
-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
-	if (!obj) {
-		DRM_ERROR("failed to lookup gem object\n");
-		ret = -ENOENT;
-		goto err_free;
-	}
-
-	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-	exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
-
-	DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
-
-	for (i = 1; i < exynos_fb->buf_cnt; i++) {
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
 		obj = drm_gem_object_lookup(dev, file_priv,
-				mode_cmd->handles[i]);
+					    mode_cmd->handles[i]);
 		if (!obj) {
 			DRM_ERROR("failed to lookup gem object\n");
 			ret = -ENOENT;
-			exynos_fb->buf_cnt = i;
-			goto err_unreference;
+			goto err;
 		}
 
-		exynos_gem_obj = to_exynos_gem_obj(obj);
-		exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
-
-		ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
-		if (ret < 0)
-			goto err_unreference;
+		gem_objs[i] = to_exynos_gem_obj(obj);
 	}
 
-	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
-	if (ret) {
-		DRM_ERROR("failed to init framebuffer.\n");
-		goto err_unreference;
+	fb = exynos_drm_framebuffer_init(dev, mode_cmd, gem_objs, i);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
+		goto err;
 	}
 
-	return &exynos_fb->fb;
+	return fb;
 
-err_unreference:
-	for (i = 0; i < exynos_fb->buf_cnt; i++) {
-		struct drm_gem_object *obj;
+err:
+	while (i--)
+		drm_gem_object_unreference_unlocked(&gem_objs[i]->base);
 
-		obj = &exynos_fb->exynos_gem_obj[i]->base;
-		if (obj)
-			drm_gem_object_unreference_unlocked(obj);
-	}
-err_free:
-	kfree(exynos_fb);
 	return ERR_PTR(ret);
 }
 
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
-						int index)
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
+						 int index)
 {
 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
-	struct exynos_drm_gem_buf *buffer;
+	struct exynos_drm_gem_obj *obj;
 
 	if (index >= MAX_FB_BUFFER)
 		return NULL;
 
-	buffer = exynos_fb->exynos_gem_obj[index]->buffer;
-	if (!buffer)
+	obj = exynos_fb->exynos_gem_obj[index];
+	if (!obj)
 		return NULL;
 
-	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)obj->dma_addr);
 
-	return buffer;
+	return obj;
 }
 
 static void exynos_drm_output_poll_changed(struct drm_device *dev)
@@ -267,41 +218,6 @@
 		exynos_drm_fbdev_init(dev);
 }
 
-static int exynos_atomic_commit(struct drm_device *dev,
-				struct drm_atomic_state *state,
-				bool async)
-{
-	int ret;
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* This is the point of no return */
-
-	drm_atomic_helper_swap_state(dev, state);
-
-	drm_atomic_helper_commit_modeset_disables(dev, state);
-
-	drm_atomic_helper_commit_modeset_enables(dev, state);
-
-	/*
-	 * Exynos can't update planes with CRTCs and encoders disabled,
-	 * its updates routines, specially for FIMD, requires the clocks
-	 * to be enabled. So it is necessary to handle the modeset operations
-	 * *before* the commit_planes() step, this way it will always
-	 * have the relevant clocks enabled to perform the update.
-	 */
-
-	drm_atomic_helper_commit_planes(dev, state);
-
-	drm_atomic_helper_cleanup_planes(dev, state);
-
-	drm_atomic_state_free(state);
-
-	return 0;
-}
-
 static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
 	.fb_create = exynos_user_fb_create,
 	.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 517471b..85e4445 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -14,22 +14,18 @@
 #ifndef _EXYNOS_DRM_FB_H_
 #define _EXYNOS_DRM_FB_H
 
+#include "exynos_drm_gem.h"
+
 struct drm_framebuffer *
 exynos_drm_framebuffer_init(struct drm_device *dev,
 			    struct drm_mode_fb_cmd2 *mode_cmd,
-			    struct drm_gem_object *obj);
+			    struct exynos_drm_gem_obj **gem_obj,
+			    int count);
 
-/* get memory information of a drm framebuffer */
-struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+/* get gem object of a drm framebuffer */
+struct exynos_drm_gem_obj *exynos_drm_fb_gem_obj(struct drm_framebuffer *fb,
 						 int index);
 
 void exynos_drm_mode_config_init(struct drm_device *dev);
 
-/* set a buffer count to drm framebuffer. */
-void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
-						unsigned int cnt);
-
-/* get a buffer count to drm framebuffer. */
-unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
-
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e0b085b..a221f75 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -21,7 +21,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_fbdev.h"
-#include "exynos_drm_gem.h"
 #include "exynos_drm_iommu.h"
 
 #define MAX_CONNECTOR		4
@@ -32,7 +31,7 @@
 
 struct exynos_drm_fbdev {
 	struct drm_fb_helper		drm_fb_helper;
-	struct exynos_drm_gem_obj	*exynos_gem_obj;
+	struct exynos_drm_gem_obj	*obj;
 };
 
 static int exynos_drm_fb_mmap(struct fb_info *info,
@@ -40,8 +39,7 @@
 {
 	struct drm_fb_helper *helper = info->par;
 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
-	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+	struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
 	unsigned long vm_size;
 	int ret;
 
@@ -49,11 +47,11 @@
 
 	vm_size = vma->vm_end - vma->vm_start;
 
-	if (vm_size > buffer->size)
+	if (vm_size > obj->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
-		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+	ret = dma_mmap_attrs(helper->dev->dev, vma, obj->pages, obj->dma_addr,
+			     obj->size, &obj->dma_attrs);
 	if (ret < 0) {
 		DRM_ERROR("failed to mmap.\n");
 		return ret;
@@ -65,9 +63,9 @@
 static struct fb_ops exynos_drm_fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_mmap        = exynos_drm_fb_mmap,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -76,42 +74,42 @@
 };
 
 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
-				     struct drm_fb_helper_surface_size *sizes,
-				     struct drm_framebuffer *fb)
+				   struct drm_fb_helper_surface_size *sizes,
+				   struct exynos_drm_gem_obj *obj)
 {
-	struct fb_info *fbi = helper->fbdev;
-	struct exynos_drm_gem_buf *buffer;
+	struct fb_info *fbi;
+	struct drm_framebuffer *fb = helper->fb;
 	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
 	unsigned int nr_pages;
 	unsigned long offset;
 
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		DRM_ERROR("failed to allocate fb info.\n");
+		return PTR_ERR(fbi);
+	}
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &exynos_drm_fb_ops;
+
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
-	/* RGB formats use only one buffer */
-	buffer = exynos_drm_fb_buffer(fb, 0);
-	if (!buffer) {
-		DRM_DEBUG_KMS("buffer is null.\n");
-		return -EFAULT;
-	}
+	nr_pages = obj->size >> PAGE_SHIFT;
 
-	nr_pages = buffer->size >> PAGE_SHIFT;
-
-	buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
-			nr_pages, VM_MAP,
+	obj->kvaddr = (void __iomem *) vmap(obj->pages, nr_pages, VM_MAP,
 			pgprot_writecombine(PAGE_KERNEL));
-	if (!buffer->kvaddr) {
+	if (!obj->kvaddr) {
 		DRM_ERROR("failed to map pages to kernel space.\n");
+		drm_fb_helper_release_fbi(helper);
 		return -EIO;
 	}
 
-	/* buffer count to framebuffer always is 1 at booting time. */
-	exynos_drm_fb_set_buf_cnt(fb, 1);
-
 	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
 	offset += fbi->var.yoffset * fb->pitches[0];
 
-	fbi->screen_base = buffer->kvaddr + offset;
+	fbi->screen_base = obj->kvaddr + offset;
 	fbi->screen_size = size;
 	fbi->fix.smem_len = size;
 
@@ -122,9 +120,8 @@
 				    struct drm_fb_helper_surface_size *sizes)
 {
 	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_obj *obj;
 	struct drm_device *dev = helper->dev;
-	struct fb_info *fbi;
 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 	struct platform_device *pdev = dev->platformdev;
 	unsigned long size;
@@ -142,69 +139,44 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	fbi = framebuffer_alloc(0, &pdev->dev);
-	if (!fbi) {
-		DRM_ERROR("failed to allocate fb info.\n");
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+	obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
 	/*
 	 * If physically contiguous memory allocation fails and if IOMMU is
 	 * supported then try to get buffer from non physically contiguous
 	 * memory area.
 	 */
-	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+	if (IS_ERR(obj) && is_drm_iommu_supported(dev)) {
 		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
-		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
-							size);
+		obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG, size);
 	}
 
-	if (IS_ERR(exynos_gem_obj)) {
-		ret = PTR_ERR(exynos_gem_obj);
-		goto err_release_framebuffer;
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto out;
 	}
 
-	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+	exynos_fbdev->obj = obj;
 
-	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
-			&exynos_gem_obj->base);
+	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd, &obj, 1);
 	if (IS_ERR(helper->fb)) {
 		DRM_ERROR("failed to create drm framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
 		goto err_destroy_gem;
 	}
 
-	helper->fbdev = fbi;
-
-	fbi->par = helper;
-	fbi->flags = FBINFO_FLAG_DEFAULT;
-	fbi->fbops = &exynos_drm_fb_ops;
-
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("failed to allocate cmap.\n");
-		goto err_destroy_framebuffer;
-	}
-
-	ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
+	ret = exynos_drm_fbdev_update(helper, sizes, obj);
 	if (ret < 0)
-		goto err_dealloc_cmap;
+		goto err_destroy_framebuffer;
 
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 
-err_dealloc_cmap:
-	fb_dealloc_cmap(&fbi->cmap);
 err_destroy_framebuffer:
 	drm_framebuffer_cleanup(helper->fb);
 err_destroy_gem:
-	exynos_drm_gem_destroy(exynos_gem_obj);
-err_release_framebuffer:
-	framebuffer_release(fbi);
+	exynos_drm_gem_destroy(obj);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -297,11 +269,11 @@
 				      struct drm_fb_helper *fb_helper)
 {
 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
-	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+	struct exynos_drm_gem_obj *obj = exynos_fbd->obj;
 	struct drm_framebuffer *fb;
 
-	if (exynos_gem_obj->buffer->kvaddr)
-		vunmap(exynos_gem_obj->buffer->kvaddr);
+	if (obj->kvaddr)
+		vunmap(obj->kvaddr);
 
 	/* release drm framebuffer and real buffer */
 	if (fb_helper->fb && fb_helper->fb->funcs) {
@@ -312,21 +284,8 @@
 		}
 	}
 
-	/* release linux framebuffer */
-	if (fb_helper->fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = fb_helper->fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(fb_helper);
+	drm_fb_helper_release_fbi(fb_helper);
 
 	drm_fb_helper_fini(fb_helper);
 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 794e56c..750a9e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -59,6 +59,7 @@
 #define VIDWnALPHA1(win)	(VIDW_ALPHA + 0x04 + (win) * 8)
 
 #define VIDWx_BUF_START(win, buf)	(VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_START_S(win, buf)	(VIDW_BUF_START_S(buf) + (win) * 8)
 #define VIDWx_BUF_END(win, buf)		(VIDW_BUF_END(buf) + (win) * 8)
 #define VIDWx_BUF_SIZE(win, buf)	(VIDW_BUF_SIZE(buf) + (win) * 4)
 
@@ -169,7 +170,7 @@
 
 	struct exynos_drm_panel_info panel;
 	struct fimd_driver_data *driver_data;
-	struct exynos_drm_display *display;
+	struct drm_encoder *encoder;
 };
 
 static const struct of_device_id fimd_driver_dt_match[] = {
@@ -187,6 +188,14 @@
 };
 MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
 
+static const uint32_t fimd_formats[] = {
+	DRM_FORMAT_C8,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
 	struct platform_device *pdev)
 {
@@ -348,13 +357,6 @@
 	pm_runtime_put(ctx->dev);
 }
 
-static void fimd_iommu_detach_devices(struct fimd_context *ctx)
-{
-	/* detach this sub driver from iommu mapping if supported. */
-	if (is_drm_iommu_supported(ctx->drm_dev))
-		drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
-}
-
 static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
 		const struct drm_display_mode *mode)
 {
@@ -486,9 +488,9 @@
 }
 
 
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+				struct drm_framebuffer *fb)
 {
-	struct exynos_drm_plane *plane = &ctx->planes[win];
 	unsigned long val;
 
 	val = WINCONx_ENWIN;
@@ -498,11 +500,11 @@
 	 * So the request format is ARGB8888 then change it to XRGB8888.
 	 */
 	if (ctx->driver_data->has_limited_fmt && !win) {
-		if (plane->pixel_format == DRM_FORMAT_ARGB8888)
-			plane->pixel_format = DRM_FORMAT_XRGB8888;
+		if (fb->pixel_format == DRM_FORMAT_ARGB8888)
+			fb->pixel_format = DRM_FORMAT_XRGB8888;
 	}
 
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_C8:
 		val |= WINCON0_BPPMODE_8BPP_PALETTE;
 		val |= WINCONx_BURSTLEN_8WORD;
@@ -538,7 +540,7 @@
 		break;
 	}
 
-	DRM_DEBUG_KMS("bpp = %d\n", plane->bpp);
+	DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel);
 
 	/*
 	 * In case of exynos, setting dma-burst to 16Word causes permanent
@@ -548,7 +550,7 @@
 	 * movement causes unstable DMA which results into iommu crash/tear.
 	 */
 
-	if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
+	if (fb->width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
 		val &= ~WINCONx_BURSTLEN_MASK;
 		val |= WINCONx_BURSTLEN_4WORD;
 	}
@@ -598,6 +600,16 @@
 {
 	u32 reg, bits, val;
 
+	/*
+	 * SHADOWCON/PRTCON register is used for enabling timing.
+	 *
+	 * for example, once only width value of a register is set,
+	 * if the dma is started then fimd hardware could malfunction so
+	 * with protect window setting, the register fields with prefix '_F'
+	 * wouldn't be updated at vsync also but updated once unprotect window
+	 * is set.
+	 */
+
 	if (ctx->driver_data->has_shadowcon) {
 		reg = SHADOWCON;
 		bits = SHADOWCON_WINx_PROTECT(win);
@@ -614,41 +626,45 @@
 	writel(val, ctx->regs + reg);
 }
 
-static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_atomic_begin(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct fimd_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
+
+	if (ctx->suspended)
+		return;
+
+	fimd_shadow_protect_win(ctx, plane->zpos, true);
+}
+
+static void fimd_atomic_flush(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
+{
+	struct fimd_context *ctx = crtc->ctx;
+
+	if (ctx->suspended)
+		return;
+
+	fimd_shadow_protect_win(ctx, plane->zpos, false);
+}
+
+static void fimd_update_plane(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane)
+{
+	struct fimd_context *ctx = crtc->ctx;
+	struct drm_plane_state *state = plane->base.state;
 	dma_addr_t dma_addr;
 	unsigned long val, size, offset;
 	unsigned int last_x, last_y, buf_offsize, line_size;
+	unsigned int win = plane->zpos;
+	unsigned int bpp = state->fb->bits_per_pixel >> 3;
+	unsigned int pitch = state->fb->pitches[0];
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
-	if (ctx->suspended)
-		return;
-
-	/*
-	 * SHADOWCON/PRTCON register is used for enabling timing.
-	 *
-	 * for example, once only width value of a register is set,
-	 * if the dma is started then fimd hardware could malfunction so
-	 * with protect window setting, the register fields with prefix '_F'
-	 * wouldn't be updated at vsync also but updated once unprotect window
-	 * is set.
-	 */
-
-	/* protect windows */
-	fimd_shadow_protect_win(ctx, win, true);
-
-
-	offset = plane->src_x * (plane->bpp >> 3);
-	offset += plane->src_y * plane->pitch;
+	offset = plane->src_x * bpp;
+	offset += plane->src_y * pitch;
 
 	/* buffer start address */
 	dma_addr = plane->dma_addr[0] + offset;
@@ -656,18 +672,18 @@
 	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
 
 	/* buffer end address */
-	size = plane->pitch * plane->crtc_height;
+	size = pitch * plane->crtc_h;
 	val = (unsigned long)(dma_addr + size);
 	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
 
 	DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
 			(unsigned long)dma_addr, val, size);
 	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
-			plane->crtc_width, plane->crtc_height);
+			plane->crtc_w, plane->crtc_h);
 
 	/* buffer size */
-	buf_offsize = plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
-	line_size = plane->crtc_width * (plane->bpp >> 3);
+	buf_offsize = pitch - (plane->crtc_w * bpp);
+	line_size = plane->crtc_w * bpp;
 	val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
 		VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
 		VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
@@ -681,10 +697,10 @@
 		VIDOSDxA_TOPLEFT_Y_E(plane->crtc_y);
 	writel(val, ctx->regs + VIDOSD_A(win));
 
-	last_x = plane->crtc_x + plane->crtc_width;
+	last_x = plane->crtc_x + plane->crtc_w;
 	if (last_x)
 		last_x--;
-	last_y = plane->crtc_y + plane->crtc_height;
+	last_y = plane->crtc_y + plane->crtc_h;
 	if (last_y)
 		last_y--;
 
@@ -701,13 +717,13 @@
 		u32 offset = VIDOSD_D(win);
 		if (win == 0)
 			offset = VIDOSD_C(win);
-		val = plane->crtc_width * plane->crtc_height;
+		val = plane->crtc_w * plane->crtc_h;
 		writel(val, ctx->regs + offset);
 
 		DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
 	}
 
-	fimd_win_set_pixfmt(ctx, win);
+	fimd_win_set_pixfmt(ctx, win, state->fb);
 
 	/* hardware window 0 doesn't support color key. */
 	if (win != 0)
@@ -718,36 +734,23 @@
 	if (ctx->driver_data->has_shadowcon)
 		fimd_enable_shadow_channel_path(ctx, win, true);
 
-	/* Enable DMA channel and unprotect windows */
-	fimd_shadow_protect_win(ctx, win, false);
-
 	if (ctx->i80_if)
 		atomic_set(&ctx->win_updated, 1);
 }
 
-static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void fimd_disable_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct fimd_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
-
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
+	unsigned int win = plane->zpos;
 
 	if (ctx->suspended)
 		return;
 
-	/* protect windows */
-	fimd_shadow_protect_win(ctx, win, true);
-
 	fimd_enable_video_output(ctx, win, false);
 
 	if (ctx->driver_data->has_shadowcon)
 		fimd_enable_shadow_channel_path(ctx, win, false);
-
-	/* unprotect windows */
-	fimd_shadow_protect_win(ctx, win, false);
 }
 
 static void fimd_enable(struct exynos_drm_crtc *crtc)
@@ -795,7 +798,7 @@
 	 * a destroyed buffer later.
 	 */
 	for (i = 0; i < WINDOWS_NR; i++)
-		fimd_win_disable(crtc, i);
+		fimd_disable_plane(crtc, &ctx->planes[i]);
 
 	fimd_enable_vblank(crtc);
 	fimd_wait_for_vblank(crtc);
@@ -862,7 +865,7 @@
 	}
 
 	if (test_bit(0, &ctx->irq_flags))
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 }
 
 static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
@@ -890,17 +893,19 @@
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
 	.wait_for_vblank = fimd_wait_for_vblank,
-	.win_commit = fimd_win_commit,
-	.win_disable = fimd_win_disable,
+	.atomic_begin = fimd_atomic_begin,
+	.update_plane = fimd_update_plane,
+	.disable_plane = fimd_disable_plane,
+	.atomic_flush = fimd_atomic_flush,
 	.te_handler = fimd_te_handler,
 	.clock_enable = fimd_dp_clock_enable,
-	.clear_channels = fimd_clear_channels,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
 {
 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
-	u32 val, clear_bit;
+	u32 val, clear_bit, start, start_s;
+	int win;
 
 	val = readl(ctx->regs + VIDINTCON1);
 
@@ -912,15 +917,25 @@
 	if (ctx->pipe < 0 || !ctx->drm_dev)
 		goto out;
 
-	if (ctx->i80_if) {
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	if (!ctx->i80_if)
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 
+	for (win = 0 ; win < WINDOWS_NR ; win++) {
+		struct exynos_drm_plane *plane = &ctx->planes[win];
+
+		if (!plane->pending_fb)
+			continue;
+
+		start = readl(ctx->regs + VIDWx_BUF_START(win, 0));
+		start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0));
+		if (start == start_s)
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+	}
+
+	if (ctx->i80_if) {
 		/* Exits triggering mode */
 		atomic_set(&ctx->triggering, 0);
 	} else {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
-
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
 			atomic_set(&ctx->wait_vsync_event, 0);
@@ -949,7 +964,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, fimd_formats,
+					ARRAY_SIZE(fimd_formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -961,10 +977,13 @@
 	if (IS_ERR(ctx->crtc))
 		return PTR_ERR(ctx->crtc);
 
-	if (ctx->display)
-		exynos_drm_create_enc_conn(drm_dev, ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_bind(drm_dev, ctx->encoder);
 
-	ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
+	if (is_drm_iommu_supported(drm_dev))
+		fimd_clear_channels(ctx->crtc);
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret)
 		priv->pipe--;
 
@@ -978,10 +997,10 @@
 
 	fimd_disable(ctx->crtc);
 
-	fimd_iommu_detach_devices(ctx);
+	drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
 
-	if (ctx->display)
-		exynos_dpi_remove(ctx->display);
+	if (ctx->encoder)
+		exynos_dpi_remove(ctx->encoder);
 }
 
 static const struct component_ops fimd_component_ops = {
@@ -1088,10 +1107,9 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	ctx->display = exynos_dpi_probe(dev);
-	if (IS_ERR(ctx->display)) {
-		return PTR_ERR(ctx->display);
-	}
+	ctx->encoder = exynos_dpi_probe(dev);
+	if (IS_ERR(ctx->encoder))
+		return PTR_ERR(ctx->encoder);
 
 	pm_runtime_enable(dev);
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 81a2508..535b4ad 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,11 +48,13 @@
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR		0x0304
+#define G2D_SRC_STRIDE_REG		0x0308
 #define G2D_SRC_COLOR_MODE		0x030C
 #define G2D_SRC_LEFT_TOP		0x0310
 #define G2D_SRC_RIGHT_BOTTOM		0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR	0x0318
 #define G2D_DST_BASE_ADDR		0x0404
+#define G2D_DST_STRIDE_REG		0x0408
 #define G2D_DST_COLOR_MODE		0x040C
 #define G2D_DST_LEFT_TOP		0x0410
 #define G2D_DST_RIGHT_BOTTOM		0x0414
@@ -148,6 +150,7 @@
  * A structure of buffer description
  *
  * @format: color format
+ * @stride: buffer stride/pitch in bytes
  * @left_x: the x coordinates of left top corner
  * @top_y: the y coordinates of left top corner
  * @right_x: the x coordinates of right bottom corner
@@ -156,6 +159,7 @@
  */
 struct g2d_buf_desc {
 	unsigned int	format;
+	unsigned int	stride;
 	unsigned int	left_x;
 	unsigned int	top_y;
 	unsigned int	right_x;
@@ -589,6 +593,7 @@
 
 	switch (reg_offset) {
 	case G2D_SRC_BASE_ADDR:
+	case G2D_SRC_STRIDE_REG:
 	case G2D_SRC_COLOR_MODE:
 	case G2D_SRC_LEFT_TOP:
 	case G2D_SRC_RIGHT_BOTTOM:
@@ -598,6 +603,7 @@
 		reg_type = REG_TYPE_SRC_PLANE2;
 		break;
 	case G2D_DST_BASE_ADDR:
+	case G2D_DST_STRIDE_REG:
 	case G2D_DST_COLOR_MODE:
 	case G2D_DST_LEFT_TOP:
 	case G2D_DST_RIGHT_BOTTOM:
@@ -652,8 +658,8 @@
 						enum g2d_reg_type reg_type,
 						unsigned long size)
 {
-	unsigned int width, height;
-	unsigned long area;
+	int width, height;
+	unsigned long bpp, last_pos;
 
 	/*
 	 * check source and destination buffers only.
@@ -662,22 +668,37 @@
 	if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
 		return true;
 
-	width = buf_desc->right_x - buf_desc->left_x;
+	/* This check also makes sure that right_x > left_x. */
+	width = (int)buf_desc->right_x - (int)buf_desc->left_x;
 	if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
-		DRM_ERROR("width[%u] is out of range!\n", width);
+		DRM_ERROR("width[%d] is out of range!\n", width);
 		return false;
 	}
 
-	height = buf_desc->bottom_y - buf_desc->top_y;
+	/* This check also makes sure that bottom_y > top_y. */
+	height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
 	if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
-		DRM_ERROR("height[%u] is out of range!\n", height);
+		DRM_ERROR("height[%d] is out of range!\n", height);
 		return false;
 	}
 
-	area = (unsigned long)width * (unsigned long)height *
-					g2d_get_buf_bpp(buf_desc->format);
-	if (area > size) {
-		DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+	bpp = g2d_get_buf_bpp(buf_desc->format);
+
+	/* Compute the position of the last byte that the engine accesses. */
+	last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
+		(unsigned long)buf_desc->stride +
+		(unsigned long)buf_desc->right_x * bpp - 1;
+
+	/*
+	 * Since right_x > left_x and bottom_y > top_y we already know
+	 * that the first_pos < last_pos (first_pos being the position
+	 * of the first byte the engine accesses), it just remains to
+	 * check if last_pos is smaller then the buffer size.
+	 */
+
+	if (last_pos >= size) {
+		DRM_ERROR("last engine access position [%lu] "
+			"is out of range [%lu]!\n", last_pos, size);
 		return false;
 	}
 
@@ -973,8 +994,6 @@
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			/* check userptr buffer type. */
 			if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -983,14 +1002,22 @@
 			} else
 				buf_info->types[reg_type] = BUF_TYPE_GEM;
 			break;
+		case G2D_SRC_STRIDE_REG:
+		case G2D_DST_STRIDE_REG:
+			if (for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+
+			buf_desc = &buf_info->descs[reg_type];
+			buf_desc->stride = cmdlist->data[index + 1];
+			break;
 		case G2D_SRC_COLOR_MODE:
 		case G2D_DST_COLOR_MODE:
 			if (for_addr)
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1003,8 +1030,6 @@
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1018,8 +1043,6 @@
 				goto err;
 
 			reg_type = g2d_get_reg_type(reg_offset);
-			if (reg_type == REG_TYPE_NONE)
-				goto err;
 
 			buf_desc = &buf_info->descs[reg_type];
 			value = cmdlist->data[index + 1];
@@ -1319,9 +1342,6 @@
 		return ret;
 	}
 
-	if (!is_drm_iommu_supported(drm_dev))
-		return 0;
-
 	ret = drm_iommu_attach_device(drm_dev, dev);
 	if (ret < 0) {
 		dev_err(dev, "failed to enable iommu.\n");
@@ -1334,9 +1354,6 @@
 
 static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 {
-	if (!is_drm_iommu_supported(drm_dev))
-		return;
-
 	drm_iommu_detach_device(drm_dev, dev);
 }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 0d5b969..62b9ea1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -13,98 +13,112 @@
 #include <drm/drm_vma_manager.h>
 
 #include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
 #include "exynos_drm_iommu.h"
 
-static unsigned int convert_to_vm_err_msg(int msg)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
 {
-	unsigned int out_msg;
+	struct drm_device *dev = obj->base.dev;
+	enum dma_attr attr;
+	unsigned int nr_pages;
 
-	switch (msg) {
-	case 0:
-	case -ERESTARTSYS:
-	case -EINTR:
-		out_msg = VM_FAULT_NOPAGE;
-		break;
-
-	case -ENOMEM:
-		out_msg = VM_FAULT_OOM;
-		break;
-
-	default:
-		out_msg = VM_FAULT_SIGBUS;
-		break;
+	if (obj->dma_addr) {
+		DRM_DEBUG_KMS("already allocated.\n");
+		return 0;
 	}
 
-	return out_msg;
-}
+	init_dma_attrs(&obj->dma_attrs);
 
-static int check_gem_flags(unsigned int flags)
-{
-	if (flags & ~(EXYNOS_BO_MASK)) {
-		DRM_ERROR("invalid flags.\n");
-		return -EINVAL;
+	/*
+	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+	 * region will be allocated else physically contiguous
+	 * as possible.
+	 */
+	if (!(obj->flags & EXYNOS_BO_NONCONTIG))
+		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
+
+	/*
+	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+	 * else cachable mapping.
+	 */
+	if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
+		attr = DMA_ATTR_WRITE_COMBINE;
+	else
+		attr = DMA_ATTR_NON_CONSISTENT;
+
+	dma_set_attr(attr, &obj->dma_attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
+
+	nr_pages = obj->size >> PAGE_SHIFT;
+
+	if (!is_drm_iommu_supported(dev)) {
+		dma_addr_t start_addr;
+		unsigned int i = 0;
+
+		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+		if (!obj->pages) {
+			DRM_ERROR("failed to allocate pages.\n");
+			return -ENOMEM;
+		}
+
+		obj->cookie = dma_alloc_attrs(dev->dev,
+					obj->size,
+					&obj->dma_addr, GFP_KERNEL,
+					&obj->dma_attrs);
+		if (!obj->cookie) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			drm_free_large(obj->pages);
+			return -ENOMEM;
+		}
+
+		start_addr = obj->dma_addr;
+		while (i < nr_pages) {
+			obj->pages[i] = phys_to_page(start_addr);
+			start_addr += PAGE_SIZE;
+			i++;
+		}
+	} else {
+		obj->pages = dma_alloc_attrs(dev->dev, obj->size,
+					&obj->dma_addr, GFP_KERNEL,
+					&obj->dma_attrs);
+		if (!obj->pages) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			return -ENOMEM;
+		}
 	}
 
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)obj->dma_addr,
+			obj->size);
+
 	return 0;
 }
 
-static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
-					struct vm_area_struct *vma)
+static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
 {
-	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+	struct drm_device *dev = obj->base.dev;
 
-	/* non-cachable as default. */
-	if (obj->flags & EXYNOS_BO_CACHABLE)
-		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-	else if (obj->flags & EXYNOS_BO_WC)
-		vma->vm_page_prot =
-			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-	else
-		vma->vm_page_prot =
-			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
-}
-
-static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
-{
-	/* TODO */
-
-	return roundup(size, PAGE_SIZE);
-}
-
-static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
-					struct vm_area_struct *vma,
-					unsigned long f_vaddr,
-					pgoff_t page_offset)
-{
-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-	struct scatterlist *sgl;
-	unsigned long pfn;
-	int i;
-
-	if (!buf->sgt)
-		return -EINTR;
-
-	if (page_offset >= (buf->size >> PAGE_SHIFT)) {
-		DRM_ERROR("invalid page offset\n");
-		return -EINVAL;
+	if (!obj->dma_addr) {
+		DRM_DEBUG_KMS("dma_addr is invalid.\n");
+		return;
 	}
 
-	sgl = buf->sgt->sgl;
-	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
-		if (page_offset < (sgl->length >> PAGE_SHIFT))
-			break;
-		page_offset -=	(sgl->length >> PAGE_SHIFT);
-	}
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)obj->dma_addr, obj->size);
 
-	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+	if (!is_drm_iommu_supported(dev)) {
+		dma_free_attrs(dev->dev, obj->size, obj->cookie,
+				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+		drm_free_large(obj->pages);
+	} else
+		dma_free_attrs(dev->dev, obj->size, obj->pages,
+				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-	return vm_insert_mixed(vma, f_vaddr, pfn);
+	obj->dma_addr = (dma_addr_t)NULL;
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -131,11 +145,7 @@
 
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 {
-	struct drm_gem_object *obj;
-	struct exynos_drm_gem_buf *buf;
-
-	obj = &exynos_gem_obj->base;
-	buf = exynos_gem_obj->buffer;
+	struct drm_gem_object *obj = &exynos_gem_obj->base;
 
 	DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
 
@@ -148,12 +158,9 @@
 	if (obj->import_attach)
 		goto out;
 
-	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+	exynos_drm_free_buf(exynos_gem_obj);
 
 out:
-	exynos_drm_fini_buf(obj->dev, buf);
-	exynos_gem_obj->buffer = NULL;
-
 	drm_gem_free_mmap_offset(obj);
 
 	/* release file pointer to gem object. */
@@ -180,7 +187,7 @@
 
 	drm_gem_object_unreference_unlocked(obj);
 
-	return exynos_gem_obj->buffer->size;
+	return exynos_gem_obj->size;
 }
 
 
@@ -193,7 +200,7 @@
 
 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
 	if (!exynos_gem_obj)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	exynos_gem_obj->size = size;
 	obj = &exynos_gem_obj->base;
@@ -202,7 +209,7 @@
 	if (ret < 0) {
 		DRM_ERROR("failed to initialize gem object\n");
 		kfree(exynos_gem_obj);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -215,47 +222,35 @@
 						unsigned long size)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buf;
 	int ret;
 
+	if (flags & ~(EXYNOS_BO_MASK)) {
+		DRM_ERROR("invalid flags.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	if (!size) {
 		DRM_ERROR("invalid size.\n");
 		return ERR_PTR(-EINVAL);
 	}
 
-	size = roundup_gem_size(size, flags);
-
-	ret = check_gem_flags(flags);
-	if (ret)
-		return ERR_PTR(ret);
-
-	buf = exynos_drm_init_buf(dev, size);
-	if (!buf)
-		return ERR_PTR(-ENOMEM);
+	size = roundup(size, PAGE_SIZE);
 
 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
-	if (!exynos_gem_obj) {
-		ret = -ENOMEM;
-		goto err_fini_buf;
-	}
-
-	exynos_gem_obj->buffer = buf;
+	if (IS_ERR(exynos_gem_obj))
+		return exynos_gem_obj;
 
 	/* set memory type and cache attribute from user side. */
 	exynos_gem_obj->flags = flags;
 
-	ret = exynos_drm_alloc_buf(dev, buf, flags);
-	if (ret < 0)
-		goto err_gem_fini;
+	ret = exynos_drm_alloc_buf(exynos_gem_obj);
+	if (ret < 0) {
+		drm_gem_object_release(&exynos_gem_obj->base);
+		kfree(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
 
 	return exynos_gem_obj;
-
-err_gem_fini:
-	drm_gem_object_release(&exynos_gem_obj->base);
-	kfree(exynos_gem_obj);
-err_fini_buf:
-	exynos_drm_fini_buf(dev, buf);
-	return ERR_PTR(ret);
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -294,7 +289,7 @@
 
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	return &exynos_gem_obj->buffer->dma_addr;
+	return &exynos_gem_obj->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
@@ -322,7 +317,6 @@
 				      struct vm_area_struct *vma)
 {
 	struct drm_device *drm_dev = exynos_gem_obj->base.dev;
-	struct exynos_drm_gem_buf *buffer;
 	unsigned long vm_size;
 	int ret;
 
@@ -331,19 +325,13 @@
 
 	vm_size = vma->vm_end - vma->vm_start;
 
-	/*
-	 * a buffer contains information to physically continuous memory
-	 * allocated by user request or at framebuffer creation.
-	 */
-	buffer = exynos_gem_obj->buffer;
-
 	/* check if user-requested size is valid. */
-	if (vm_size > buffer->size)
+	if (vm_size > exynos_gem_obj->size)
 		return -EINVAL;
 
-	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
-				buffer->dma_addr, buffer->size,
-				&buffer->dma_attrs);
+	ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
+				exynos_gem_obj->dma_addr, exynos_gem_obj->size,
+				&exynos_gem_obj->dma_attrs);
 	if (ret < 0) {
 		DRM_ERROR("failed to mmap.\n");
 		return ret;
@@ -503,15 +491,6 @@
 
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
-	struct exynos_drm_gem_obj *exynos_gem_obj;
-	struct exynos_drm_gem_buf *buf;
-
-	exynos_gem_obj = to_exynos_gem_obj(obj);
-	buf = exynos_gem_obj->buffer;
-
-	if (obj->import_attach)
-		drm_prime_gem_destroy(obj, buf->sgt);
-
 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
@@ -595,24 +574,34 @@
 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
-	unsigned long f_vaddr;
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	unsigned long pfn;
 	pgoff_t page_offset;
 	int ret;
 
 	page_offset = ((unsigned long)vmf->virtual_address -
 			vma->vm_start) >> PAGE_SHIFT;
-	f_vaddr = (unsigned long)vmf->virtual_address;
 
-	mutex_lock(&dev->struct_mutex);
+	if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
+		DRM_ERROR("invalid page offset\n");
+		ret = -EINVAL;
+		goto out;
+	}
 
-	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
-	if (ret < 0)
-		DRM_ERROR("failed to map a buffer with user.\n");
+	pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
+	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
 
-	mutex_unlock(&dev->struct_mutex);
-
-	return convert_to_vm_err_msg(ret);
+out:
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
 }
 
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -631,11 +620,17 @@
 	obj = vma->vm_private_data;
 	exynos_gem_obj = to_exynos_gem_obj(obj);
 
-	ret = check_gem_flags(exynos_gem_obj->flags);
-	if (ret)
-		goto err_close_vm;
+	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
 
-	update_vm_cache_attr(exynos_gem_obj, vma);
+	/* non-cachable as default. */
+	if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	else
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 
 	ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
 	if (ret)
@@ -649,3 +644,76 @@
 
 	return ret;
 }
+
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	int npages;
+
+	npages = exynos_gem_obj->size >> PAGE_SHIFT;
+
+	return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
+}
+
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+				     struct dma_buf_attachment *attach,
+				     struct sg_table *sgt)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int npages;
+	int ret;
+
+	exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
+	if (IS_ERR(exynos_gem_obj)) {
+		ret = PTR_ERR(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
+
+	exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
+
+	npages = exynos_gem_obj->size >> PAGE_SHIFT;
+	exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (!exynos_gem_obj->pages) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
+			npages);
+	if (ret < 0)
+		goto err_free_large;
+
+	if (sgt->nents == 1) {
+		/* always physically continuous memory if sgt->nents is 1. */
+		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+	} else {
+		/*
+		 * this case could be CONTIG or NONCONTIG type but for now
+		 * sets NONCONTIG.
+		 * TODO. we have to find a way that exporter can notify
+		 * the type of its own buffer to importer.
+		 */
+		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+	}
+
+	return &exynos_gem_obj->base;
+
+err_free_large:
+	drm_free_large(exynos_gem_obj->pages);
+err:
+	drm_gem_object_release(&exynos_gem_obj->base);
+	kfree(exynos_gem_obj);
+	return ERR_PTR(ret);
+}
+
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	return NULL;
+}
+
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	/* Nothing to do */
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 6f42e22..cd62f84 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -20,35 +20,6 @@
 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
 
 /*
- * exynos drm gem buffer structure.
- *
- * @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
- * *userptr: user space address.
- * @dma_addr: bus address(accessed by dma) to allocated memory region.
- *	- this address could be physical address without IOMMU and
- *	device address with IOMMU.
- * @write: whether pages will be written to by the caller.
- * @pages: Array of backing pages.
- * @sgt: sg table to transfer page data.
- * @size: size of allocated memory region.
- * @pfnmap: indicate whether memory region from userptr is mmaped with
- *	VM_PFNMAP or not.
- */
-struct exynos_drm_gem_buf {
-	void 			*cookie;
-	void __iomem		*kvaddr;
-	unsigned long		userptr;
-	dma_addr_t		dma_addr;
-	struct dma_attrs	dma_attrs;
-	unsigned int		write;
-	struct page		**pages;
-	struct sg_table		*sgt;
-	unsigned long		size;
-	bool			pfnmap;
-};
-
-/*
  * exynos drm buffer structure.
  *
  * @base: a gem object.
@@ -59,18 +30,28 @@
  *	by user request or at framebuffer creation.
  *	continuous memory region allocated by user request
  *	or at framebuffer creation.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
  * @size: size requested from user, in bytes and this size is aligned
  *	in page unit.
- * @flags: indicate memory type to allocated buffer and cache attruibute.
+ * @cookie: cookie returned by dma_alloc_attrs
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ *	- this address could be physical address without IOMMU and
+ *	device address with IOMMU.
+ * @pages: Array of backing pages.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *	user can access the buffer through kms_bo.handle.
  */
 struct exynos_drm_gem_obj {
-	struct drm_gem_object		base;
-	struct exynos_drm_gem_buf	*buffer;
-	unsigned long			size;
-	unsigned int			flags;
+	struct drm_gem_object	base;
+	unsigned int		flags;
+	unsigned long		size;
+	void			*cookie;
+	void __iomem		*kvaddr;
+	dma_addr_t		dma_addr;
+	struct dma_attrs	dma_attrs;
+	struct page		**pages;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -177,4 +158,13 @@
 				struct sg_table *sgt,
 				enum dma_data_direction dir);
 
+/* low-level interface prime helpers */
+struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
+				     struct dma_buf_attachment *attach,
+				     struct sg_table *sgt);
+void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
+void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index f1c6b76..808a0a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -582,9 +582,17 @@
 		break;
 	case EXYNOS_DRM_DEGREE_180:
 		cfg |= GSC_IN_ROT_180;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	case EXYNOS_DRM_DEGREE_270:
 		cfg |= GSC_IN_ROT_270;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	default:
 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
@@ -845,9 +853,17 @@
 		break;
 	case EXYNOS_DRM_DEGREE_180:
 		cfg |= GSC_IN_ROT_180;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	case EXYNOS_DRM_DEGREE_270:
 		cfg |= GSC_IN_ROT_270;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~GSC_IN_ROT_YFLIP;
 		break;
 	default:
 		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index d4ec746..055e8ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -87,10 +87,8 @@
 	struct device *dev = drm_dev->dev;
 	int ret;
 
-	if (!dev->archdata.mapping) {
-		DRM_ERROR("iommu_mapping is null.\n");
-		return -EFAULT;
-	}
+	if (!dev->archdata.mapping)
+		return 0;
 
 	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
 					sizeof(*subdrv_dev->dma_parms),
@@ -144,17 +142,3 @@
 	iommu_detach_device(mapping->domain, subdrv_dev);
 	drm_release_iommu_mapping(drm_dev);
 }
-
-int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
-			struct drm_device *drm_dev, struct device *subdrv_dev)
-{
-	int ret = 0;
-
-	if (is_drm_iommu_supported(drm_dev)) {
-		if (exynos_crtc->ops->clear_channels)
-			exynos_crtc->ops->clear_channels(exynos_crtc);
-		return drm_iommu_attach_device(drm_dev, subdrv_dev);
-	}
-
-	return ret;
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 8341c7a..dc1b544 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -29,19 +29,11 @@
 
 static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
 {
-#ifdef CONFIG_ARM_DMA_USE_IOMMU
 	struct device *dev = drm_dev->dev;
 
 	return dev->archdata.mapping ? true : false;
-#else
-	return false;
-#endif
 }
 
-int drm_iommu_attach_device_if_possible(
-		struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
-		struct device *subdrv_dev);
-
 #else
 
 static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -69,12 +61,5 @@
 	return false;
 }
 
-static inline int drm_iommu_attach_device_if_possible(
-		struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
-		struct device *subdrv_dev)
-{
-	return 0;
-}
-
 #endif
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67e5451..67d2423 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -1622,12 +1622,10 @@
 		INIT_LIST_HEAD(&ippdrv->cmd_list);
 		mutex_init(&ippdrv->cmd_lock);
 
-		if (is_drm_iommu_supported(drm_dev)) {
-			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
-			if (ret) {
-				DRM_ERROR("failed to activate iommu\n");
-				goto err;
-			}
+		ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+		if (ret) {
+			DRM_ERROR("failed to activate iommu\n");
+			goto err;
 		}
 	}
 
@@ -1637,8 +1635,7 @@
 	/* get ipp driver entry */
 	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
 						drv_list) {
-		if (is_drm_iommu_supported(drm_dev))
-			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+		drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
 				ippdrv->prop_list.ipp_id);
@@ -1654,8 +1651,7 @@
 
 	/* get ipp driver entry */
 	list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
-		if (is_drm_iommu_supported(drm_dev))
-			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+		drm_iommu_detach_device(drm_dev, ippdrv->dev);
 
 		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
 				ippdrv->prop_list.ipp_id);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index a729980..7148224 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -20,12 +20,6 @@
 #include "exynos_drm_gem.h"
 #include "exynos_drm_plane.h"
 
-static const uint32_t formats[] = {
-	DRM_FORMAT_XRGB8888,
-	DRM_FORMAT_ARGB8888,
-	DRM_FORMAT_NV12,
-};
-
 /*
  * This function is to get X or Y size shown via screen. This needs length and
  * start position of CRTC.
@@ -97,29 +91,18 @@
 	/* set drm framebuffer data. */
 	exynos_plane->src_x = src_x;
 	exynos_plane->src_y = src_y;
-	exynos_plane->src_width = (actual_w * exynos_plane->h_ratio) >> 16;
-	exynos_plane->src_height = (actual_h * exynos_plane->v_ratio) >> 16;
-	exynos_plane->fb_width = fb->width;
-	exynos_plane->fb_height = fb->height;
-	exynos_plane->bpp = fb->bits_per_pixel;
-	exynos_plane->pitch = fb->pitches[0];
-	exynos_plane->pixel_format = fb->pixel_format;
+	exynos_plane->src_w = (actual_w * exynos_plane->h_ratio) >> 16;
+	exynos_plane->src_h = (actual_h * exynos_plane->v_ratio) >> 16;
 
 	/* set plane range to be displayed. */
 	exynos_plane->crtc_x = crtc_x;
 	exynos_plane->crtc_y = crtc_y;
-	exynos_plane->crtc_width = actual_w;
-	exynos_plane->crtc_height = actual_h;
-
-	/* set drm mode data. */
-	exynos_plane->mode_width = mode->hdisplay;
-	exynos_plane->mode_height = mode->vdisplay;
-	exynos_plane->refresh = mode->vrefresh;
-	exynos_plane->scan_flag = mode->flags;
+	exynos_plane->crtc_w = actual_w;
+	exynos_plane->crtc_h = actual_h;
 
 	DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
 			exynos_plane->crtc_x, exynos_plane->crtc_y,
-			exynos_plane->crtc_width, exynos_plane->crtc_height);
+			exynos_plane->crtc_w, exynos_plane->crtc_h);
 
 	plane->crtc = crtc;
 }
@@ -143,17 +126,17 @@
 	if (!state->fb)
 		return 0;
 
-	nr = exynos_drm_fb_get_buf_cnt(state->fb);
+	nr = drm_format_num_planes(state->fb->pixel_format);
 	for (i = 0; i < nr; i++) {
-		struct exynos_drm_gem_buf *buffer =
-					exynos_drm_fb_buffer(state->fb, i);
+		struct exynos_drm_gem_obj *obj =
+					exynos_drm_fb_gem_obj(state->fb, i);
 
-		if (!buffer) {
-			DRM_DEBUG_KMS("buffer is null\n");
+		if (!obj) {
+			DRM_DEBUG_KMS("gem object is null\n");
 			return -EFAULT;
 		}
 
-		exynos_plane->dma_addr[i] = buffer->dma_addr +
+		exynos_plane->dma_addr[i] = obj->dma_addr +
 					    state->fb->offsets[i];
 
 		DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
@@ -179,8 +162,10 @@
 			      state->src_x >> 16, state->src_y >> 16,
 			      state->src_w >> 16, state->src_h >> 16);
 
-	if (exynos_crtc->ops->win_commit)
-		exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
+	exynos_plane->pending_fb = state->fb;
+
+	if (exynos_crtc->ops->update_plane)
+		exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
 }
 
 static void exynos_plane_atomic_disable(struct drm_plane *plane,
@@ -192,9 +177,9 @@
 	if (!old_state->crtc)
 		return;
 
-	if (exynos_crtc->ops->win_disable)
-		exynos_crtc->ops->win_disable(exynos_crtc,
-					      exynos_plane->zpos);
+	if (exynos_crtc->ops->disable_plane)
+		exynos_crtc->ops->disable_plane(exynos_crtc,
+						exynos_plane);
 }
 
 static const struct drm_plane_helper_funcs plane_helper_funcs = {
@@ -226,13 +211,14 @@
 int exynos_plane_init(struct drm_device *dev,
 		      struct exynos_drm_plane *exynos_plane,
 		      unsigned long possible_crtcs, enum drm_plane_type type,
+		      const uint32_t *formats, unsigned int fcount,
 		      unsigned int zpos)
 {
 	int err;
 
 	err = drm_universal_plane_init(dev, &exynos_plane->base, possible_crtcs,
-				       &exynos_plane_funcs, formats,
-				       ARRAY_SIZE(formats), type);
+				       &exynos_plane_funcs, formats, fcount,
+				       type);
 	if (err) {
 		DRM_ERROR("failed to initialize plane\n");
 		return err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 8c88ae9..476c934 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -12,4 +12,5 @@
 int exynos_plane_init(struct drm_device *dev,
 		      struct exynos_drm_plane *exynos_plane,
 		      unsigned long possible_crtcs, enum drm_plane_type type,
+		      const uint32_t *formats, unsigned int fcount,
 		      unsigned int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3413393..75718e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -25,7 +25,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
-#include "exynos_drm_encoder.h"
 #include "exynos_drm_vidi.h"
 
 /* vidi has totally three virtual windows. */
@@ -35,11 +34,10 @@
 					connector)
 
 struct vidi_context {
-	struct exynos_drm_display	display;
+	struct drm_encoder		encoder;
 	struct platform_device		*pdev;
 	struct drm_device		*drm_dev;
 	struct exynos_drm_crtc		*crtc;
-	struct drm_encoder		*encoder;
 	struct drm_connector		connector;
 	struct exynos_drm_plane		planes[WINDOWS_NR];
 	struct edid			*raw_edid;
@@ -55,9 +53,9 @@
 	int				pipe;
 };
 
-static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
+static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
 {
-	return container_of(d, struct vidi_context, display);
+	return container_of(e, struct vidi_context, encoder);
 }
 
 static const char fake_edid_info[] = {
@@ -85,6 +83,12 @@
 	0x00, 0x00, 0x00, 0x06
 };
 
+static const uint32_t formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+};
+
 static int vidi_enable_vblank(struct exynos_drm_crtc *crtc)
 {
 	struct vidi_context *ctx = crtc->ctx;
@@ -100,7 +104,7 @@
 	/*
 	 * in case of page flip request, vidi_finish_pageflip function
 	 * will not be called because direct_vblank is true and then
-	 * that function will be called by crtc_ops->win_commit callback
+	 * that function will be called by crtc_ops->update_plane callback
 	 */
 	schedule_work(&ctx->work);
 
@@ -118,19 +122,14 @@
 		ctx->vblank_on = false;
 }
 
-static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void vidi_update_plane(struct exynos_drm_crtc *crtc,
+			      struct exynos_drm_plane *plane)
 {
 	struct vidi_context *ctx = crtc->ctx;
-	struct exynos_drm_plane *plane;
 
 	if (ctx->suspended)
 		return;
 
-	if (win < 0 || win >= WINDOWS_NR)
-		return;
-
-	plane = &ctx->planes[win];
-
 	DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
 
 	if (ctx->vblank_on)
@@ -179,13 +178,14 @@
 	.disable = vidi_disable,
 	.enable_vblank = vidi_enable_vblank,
 	.disable_vblank = vidi_disable_vblank,
-	.win_commit = vidi_win_commit,
+	.update_plane = vidi_update_plane,
 };
 
 static void vidi_fake_vblank_handler(struct work_struct *work)
 {
 	struct vidi_context *ctx = container_of(work, struct vidi_context,
 					work);
+	int win;
 
 	if (ctx->pipe < 0)
 		return;
@@ -196,7 +196,7 @@
 	mutex_lock(&ctx->lock);
 
 	if (ctx->direct_vblank) {
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
 		ctx->direct_vblank = false;
 		mutex_unlock(&ctx->lock);
 		return;
@@ -204,7 +204,14 @@
 
 	mutex_unlock(&ctx->lock);
 
-	exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+	for (win = 0 ; win < WINDOWS_NR ; win++) {
+		struct exynos_drm_plane *plane = &ctx->planes[win];
+
+		if (!plane->pending_fb)
+			continue;
+
+		exynos_drm_crtc_finish_update(ctx->crtc, plane);
+	}
 }
 
 static int vidi_show_connection(struct device *dev,
@@ -259,9 +266,7 @@
 int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
 				struct drm_file *file_priv)
 {
-	struct vidi_context *ctx = NULL;
-	struct drm_encoder *encoder;
-	struct exynos_drm_display *display;
+	struct vidi_context *ctx = dev_get_drvdata(drm_dev->dev);
 	struct drm_exynos_vidi_connection *vidi = data;
 
 	if (!vidi) {
@@ -274,21 +279,6 @@
 		return -EINVAL;
 	}
 
-	list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
-								head) {
-		display = exynos_drm_get_display(encoder);
-
-		if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
-			ctx = display_to_vidi(display);
-			break;
-		}
-	}
-
-	if (!ctx) {
-		DRM_DEBUG_KMS("not found virtual device type encoder.\n");
-		return -EINVAL;
-	}
-
 	if (ctx->connected == vidi->connection) {
 		DRM_DEBUG_KMS("same connection request.\n");
 		return -EINVAL;
@@ -381,7 +371,7 @@
 {
 	struct vidi_context *ctx = ctx_from_connector(connector);
 
-	return ctx->encoder;
+	return &ctx->encoder;
 }
 
 static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
@@ -389,14 +379,12 @@
 	.best_encoder = vidi_best_encoder,
 };
 
-static int vidi_create_connector(struct exynos_drm_display *display,
-				struct drm_encoder *encoder)
+static int vidi_create_connector(struct drm_encoder *encoder)
 {
-	struct vidi_context *ctx = display_to_vidi(display);
+	struct vidi_context *ctx = encoder_to_vidi(encoder);
 	struct drm_connector *connector = &ctx->connector;
 	int ret;
 
-	ctx->encoder = encoder;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 	ret = drm_connector_init(ctx->drm_dev, connector,
@@ -413,19 +401,47 @@
 	return 0;
 }
 
+static bool exynos_vidi_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
 
-static struct exynos_drm_display_ops vidi_display_ops = {
-	.create_connector = vidi_create_connector,
+static void exynos_vidi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_vidi_enable(struct drm_encoder *encoder)
+{
+}
+
+static void exynos_vidi_disable(struct drm_encoder *encoder)
+{
+}
+
+static struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs = {
+	.mode_fixup = exynos_vidi_mode_fixup,
+	.mode_set = exynos_vidi_mode_set,
+	.enable = exynos_vidi_enable,
+	.disable = exynos_vidi_disable,
+};
+
+static struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static int vidi_bind(struct device *dev, struct device *master, void *data)
 {
 	struct vidi_context *ctx = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
+	struct drm_encoder *encoder = &ctx->encoder;
 	struct exynos_drm_plane *exynos_plane;
 	enum drm_plane_type type;
 	unsigned int zpos;
-	int ret;
+	int pipe, ret;
 
 	vidi_ctx_initialize(ctx, drm_dev);
 
@@ -433,7 +449,8 @@
 		type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, formats,
+					ARRAY_SIZE(formats), zpos);
 		if (ret)
 			return ret;
 	}
@@ -447,9 +464,24 @@
 		return PTR_ERR(ctx->crtc);
 	}
 
-	ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_VIDI);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+
+	ret = vidi_create_connector(encoder);
 	if (ret) {
-		ctx->crtc->base.funcs->destroy(&ctx->crtc->base);
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
 		return ret;
 	}
 
@@ -475,8 +507,6 @@
 	if (!ctx)
 		return -ENOMEM;
 
-	ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
-	ctx->display.ops = &vidi_display_ops;
 	ctx->default_win = 0;
 	ctx->pdev = pdev;
 
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 4a00990..932f7fa 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -22,7 +22,6 @@
 #include "regs-hdmi.h"
 
 #include <linux/kernel.h>
-#include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
@@ -33,8 +32,8 @@
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
-#include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/hdmi.h>
 #include <linux/component.h>
@@ -48,7 +47,6 @@
 #include "exynos_mixer.h"
 
 #include <linux/gpio.h>
-#include <media/s5p_hdmi.h>
 
 #define ctx_from_connector(c)	container_of(c, struct hdmi_context, connector)
 
@@ -88,109 +86,14 @@
 	int				regul_count;
 };
 
-struct hdmi_tg_regs {
-	u8 cmd[1];
-	u8 h_fsz[2];
-	u8 hact_st[2];
-	u8 hact_sz[2];
-	u8 v_fsz[2];
-	u8 vsync[2];
-	u8 vsync2[2];
-	u8 vact_st[2];
-	u8 vact_sz[2];
-	u8 field_chg[2];
-	u8 vact_st2[2];
-	u8 vact_st3[2];
-	u8 vact_st4[2];
-	u8 vsync_top_hdmi[2];
-	u8 vsync_bot_hdmi[2];
-	u8 field_top_hdmi[2];
-	u8 field_bot_hdmi[2];
-	u8 tg_3d[1];
-};
-
-struct hdmi_v13_core_regs {
-	u8 h_blank[2];
-	u8 v_blank[3];
-	u8 h_v_line[3];
-	u8 vsync_pol[1];
-	u8 int_pro_mode[1];
-	u8 v_blank_f[3];
-	u8 h_sync_gen[3];
-	u8 v_sync_gen1[3];
-	u8 v_sync_gen2[3];
-	u8 v_sync_gen3[3];
-};
-
-struct hdmi_v14_core_regs {
-	u8 h_blank[2];
-	u8 v2_blank[2];
-	u8 v1_blank[2];
-	u8 v_line[2];
-	u8 h_line[2];
-	u8 hsync_pol[1];
-	u8 vsync_pol[1];
-	u8 int_pro_mode[1];
-	u8 v_blank_f0[2];
-	u8 v_blank_f1[2];
-	u8 h_sync_start[2];
-	u8 h_sync_end[2];
-	u8 v_sync_line_bef_2[2];
-	u8 v_sync_line_bef_1[2];
-	u8 v_sync_line_aft_2[2];
-	u8 v_sync_line_aft_1[2];
-	u8 v_sync_line_aft_pxl_2[2];
-	u8 v_sync_line_aft_pxl_1[2];
-	u8 v_blank_f2[2]; /* for 3D mode */
-	u8 v_blank_f3[2]; /* for 3D mode */
-	u8 v_blank_f4[2]; /* for 3D mode */
-	u8 v_blank_f5[2]; /* for 3D mode */
-	u8 v_sync_line_aft_3[2];
-	u8 v_sync_line_aft_4[2];
-	u8 v_sync_line_aft_5[2];
-	u8 v_sync_line_aft_6[2];
-	u8 v_sync_line_aft_pxl_3[2];
-	u8 v_sync_line_aft_pxl_4[2];
-	u8 v_sync_line_aft_pxl_5[2];
-	u8 v_sync_line_aft_pxl_6[2];
-	u8 vact_space_1[2];
-	u8 vact_space_2[2];
-	u8 vact_space_3[2];
-	u8 vact_space_4[2];
-	u8 vact_space_5[2];
-	u8 vact_space_6[2];
-};
-
-struct hdmi_v13_conf {
-	struct hdmi_v13_core_regs core;
-	struct hdmi_tg_regs tg;
-};
-
-struct hdmi_v14_conf {
-	struct hdmi_v14_core_regs core;
-	struct hdmi_tg_regs tg;
-};
-
-struct hdmi_conf_regs {
-	int pixel_clock;
-	int cea_video_id;
-	enum hdmi_picture_aspect aspect_ratio;
-	union {
-		struct hdmi_v13_conf v13_conf;
-		struct hdmi_v14_conf v14_conf;
-	} conf;
-};
-
 struct hdmi_context {
-	struct exynos_drm_display	display;
+	struct drm_encoder		encoder;
 	struct device			*dev;
 	struct drm_device		*drm_dev;
 	struct drm_connector		connector;
-	struct drm_encoder		*encoder;
 	bool				hpd;
 	bool				powered;
 	bool				dvi_mode;
-	struct mutex			hdmi_mutex;
 
 	void __iomem			*regs;
 	int				irq;
@@ -201,22 +104,20 @@
 
 	/* current hdmiphy conf regs */
 	struct drm_display_mode		current_mode;
-	struct hdmi_conf_regs		mode_conf;
+	u8				cea_video_id;
 
 	struct hdmi_resources		res;
+	const struct hdmi_driver_data	*drv_data;
 
 	int				hpd_gpio;
 	void __iomem			*regs_hdmiphy;
-	const struct hdmiphy_config		*phy_confs;
-	unsigned int			phy_conf_count;
 
 	struct regmap			*pmureg;
-	enum hdmi_type			type;
 };
 
-static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
+static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
 {
-	return container_of(d, struct hdmi_context, display);
+	return container_of(e, struct hdmi_context, encoder);
 }
 
 struct hdmiphy_config {
@@ -624,6 +525,16 @@
 	writeb(value, hdata->regs + reg_id);
 }
 
+static inline void hdmi_reg_writev(struct hdmi_context *hdata, u32 reg_id,
+				   int bytes, u32 val)
+{
+	while (--bytes >= 0) {
+		writeb(val & 0xff, hdata->regs + reg_id);
+		val >>= 8;
+		reg_id += 4;
+	}
+}
+
 static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
 				 u32 reg_id, u32 value, u32 mask)
 {
@@ -930,7 +841,7 @@
 
 static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
 {
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_v13_regs_dump(hdata, prefix);
 	else
 		hdmi_v14_regs_dump(hdata, prefix);
@@ -957,7 +868,7 @@
 	u32 hdr_sum;
 	u8 chksum;
 	u32 mod;
-	u32 vic;
+	u8 ar;
 
 	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
 	if (hdata->dvi_mode) {
@@ -988,27 +899,22 @@
 		 * Set the aspect ratio as per the mode, mentioned in
 		 * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard
 		 */
-		switch (hdata->mode_conf.aspect_ratio) {
+		ar = hdata->current_mode.picture_aspect_ratio;
+		switch (ar) {
 		case HDMI_PICTURE_ASPECT_4_3:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_4_3_CENTER_RATIO);
+			ar |= AVI_4_3_CENTER_RATIO;
 			break;
 		case HDMI_PICTURE_ASPECT_16_9:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_16_9_CENTER_RATIO);
+			ar |= AVI_16_9_CENTER_RATIO;
 			break;
 		case HDMI_PICTURE_ASPECT_NONE:
 		default:
-			hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2),
-					hdata->mode_conf.aspect_ratio |
-					AVI_SAME_AS_PIC_ASPECT_RATIO);
+			ar |= AVI_SAME_AS_PIC_ASPECT_RATIO;
 			break;
 		}
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), ar);
 
-		vic = hdata->mode_conf.cea_video_id;
-		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), hdata->cea_video_id);
 
 		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
 					infoframe->any.length, hdr_sum);
@@ -1038,10 +944,10 @@
 {
 	struct hdmi_context *hdata = ctx_from_connector(connector);
 
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+	if (gpio_get_value(hdata->hpd_gpio))
+		return connector_status_connected;
 
-	return hdata->hpd ? connector_status_connected :
-			connector_status_disconnected;
+	return connector_status_disconnected;
 }
 
 static void hdmi_connector_destroy(struct drm_connector *connector)
@@ -1091,8 +997,8 @@
 {
 	int i;
 
-	for (i = 0; i < hdata->phy_conf_count; i++)
-		if (hdata->phy_confs[i].pixel_clock == pixel_clock)
+	for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
+		if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
 			return i;
 
 	DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1125,7 +1031,7 @@
 {
 	struct hdmi_context *hdata = ctx_from_connector(connector);
 
-	return hdata->encoder;
+	return &hdata->encoder;
 }
 
 static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -1134,14 +1040,12 @@
 	.best_encoder = hdmi_best_encoder,
 };
 
-static int hdmi_create_connector(struct exynos_drm_display *display,
-			struct drm_encoder *encoder)
+static int hdmi_create_connector(struct drm_encoder *encoder)
 {
-	struct hdmi_context *hdata = display_to_hdmi(display);
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct drm_connector *connector = &hdata->connector;
 	int ret;
 
-	hdata->encoder = encoder;
 	connector->interlace_allowed = true;
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
@@ -1159,23 +1063,30 @@
 	return 0;
 }
 
-static void hdmi_mode_fixup(struct exynos_drm_display *display,
-				struct drm_connector *connector,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+static bool hdmi_mode_fixup(struct drm_encoder *encoder,
+			    const struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
 {
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
 	struct drm_display_mode *m;
 	int mode_ok;
 
-	DRM_DEBUG_KMS("%s\n", __FILE__);
-
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder)
+			break;
+	}
+
+	if (connector->encoder != encoder)
+		return true;
+
 	mode_ok = hdmi_mode_valid(connector, adjusted_mode);
 
 	/* just return if user desired mode exists. */
 	if (mode_ok == MODE_OK)
-		return;
+		return true;
 
 	/*
 	 * otherwise, find the most suitable mode among modes and change it
@@ -1195,6 +1106,8 @@
 			break;
 		}
 	}
+
+	return true;
 }
 
 static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1257,7 +1170,7 @@
 	hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
 	hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
 
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
 	else
 		hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
@@ -1391,7 +1304,7 @@
 				HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
 	}
 
-	if (hdata->type == HDMI_TYPE13) {
+	if (hdata->drv_data->type == HDMI_TYPE13) {
 		/* choose bluescreen (fecal) color */
 		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
 		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
@@ -1424,66 +1337,94 @@
 
 static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
 {
-	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
-	const struct hdmi_v13_core_regs *core =
-		&hdata->mode_conf.conf.v13_conf.core;
+	struct drm_display_mode *m = &hdata->current_mode;
+	unsigned int val;
 	int tries;
 
-	/* setting core registers */
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
-	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+	hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_V13_H_V_LINE_0, 3,
+			(m->htotal << 12) | m->vtotal);
+
+	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1, val);
+
+	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1, val);
+
+	val = (m->hsync_start - m->hdisplay - 2);
+	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+	hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
+
+	/*
+	 * Quirk requirement for exynos HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		val = ((m->vsync_end - m->vdisplay) / 2);
+		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+		val = m->vtotal / 2;
+		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+		val = (m->vtotal +
+			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+		val |= m->vtotal << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, val);
+
+		val = ((m->vtotal / 2) + 7);
+		val |= ((m->vtotal / 2) + 2) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, val);
+
+		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		val |= ((m->htotal / 2) +
+			(m->hsync_start - m->hdisplay)) << 12;
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
+	} else {
+		/* Progressive Mode */
+
+		val = m->vtotal;
+		val |= (m->vtotal - m->vdisplay) << 11;
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_F_0, 3, 0);
+
+		val = (m->vsync_end - m->vdisplay);
+		val |= ((m->vsync_start - m->vdisplay) << 12);
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
+
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_2_0, 3, 0x1001);
+		hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_3_0, 3, 0x1001);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+	}
+
 	/* Timing generator registers */
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
 
 	/* waiting for HDMIPHY's PLL to get to steady state */
 	for (tries = 100; tries; --tries) {
@@ -1508,144 +1449,119 @@
 
 static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
 {
-	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
-	const struct hdmi_v14_core_regs *core =
-		&hdata->mode_conf.conf.v14_conf.core;
+	struct drm_display_mode *m = &hdata->current_mode;
 	int tries;
 
-	/* setting core registers */
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
-	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
-	hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
-	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
-	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
-			core->v_sync_line_bef_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
-			core->v_sync_line_bef_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
-			core->v_sync_line_bef_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
-			core->v_sync_line_bef_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
-			core->v_sync_line_aft_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
-			core->v_sync_line_aft_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
-			core->v_sync_line_aft_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
-			core->v_sync_line_aft_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
-			core->v_sync_line_aft_pxl_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
-			core->v_sync_line_aft_pxl_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
-			core->v_sync_line_aft_pxl_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
-			core->v_sync_line_aft_pxl_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
-			core->v_sync_line_aft_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
-			core->v_sync_line_aft_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
-			core->v_sync_line_aft_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
-			core->v_sync_line_aft_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
-			core->v_sync_line_aft_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
-			core->v_sync_line_aft_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
-			core->v_sync_line_aft_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
-			core->v_sync_line_aft_6[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
-			core->v_sync_line_aft_pxl_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
-			core->v_sync_line_aft_pxl_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
-			core->v_sync_line_aft_pxl_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
-			core->v_sync_line_aft_pxl_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
-			core->v_sync_line_aft_pxl_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
-			core->v_sync_line_aft_pxl_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
-			core->v_sync_line_aft_pxl_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
-			core->v_sync_line_aft_pxl_6[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
-	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+	hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
+			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
+	hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
+			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+	hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
+			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+	/*
+	 * Quirk requirement for exynos 5 HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+			(m->vsync_end - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+			(m->vsync_start - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal / 2);
+		hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2,
+				m->vtotal - m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, m->vtotal);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2,
+				(m->vtotal / 2) + 7);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2,
+				(m->vtotal / 2) + 2);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				(m->vtotal - m->vdisplay) / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2,
+				m->vtotal - m->vdisplay / 2);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2,
+				(m->vtotal / 2) + 1);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
+	} else {
+		/* Progressive Mode */
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
+			m->vsync_end - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
+			m->vsync_start - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V2_BLANK_0, 2, m->vtotal);
+		hdmi_reg_writev(hdata, HDMI_V1_BLANK_0, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F0_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_BLANK_F1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_2_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, 2, 0xffff);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
+				m->vtotal - m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
+		hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
+		hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
+		hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
+	}
+
+	/* Following values & calculations are same irrespective of mode type */
+	hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
+			m->hsync_start - m->hdisplay - 2);
+	hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
+			m->hsync_end - m->hdisplay - 2);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_1_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_2_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_VACT_SPACE_6_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F2_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_BLANK_F5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_6_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
+	hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
 
 	/* Timing generator registers */
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
-	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
-	hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+	hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+	hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
+	hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
+	hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
 
 	/* waiting for HDMIPHY's PLL to get to steady state */
 	for (tries = 100; tries; --tries) {
@@ -1670,7 +1586,7 @@
 
 static void hdmi_mode_apply(struct hdmi_context *hdata)
 {
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		hdmi_v13_mode_apply(hdata);
 	else
 		hdmi_v14_mode_apply(hdata);
@@ -1688,7 +1604,7 @@
 	hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
 				HDMI_PHY_ENABLE_MODE_SET);
 
-	if (hdata->type == HDMI_TYPE13)
+	if (hdata->drv_data->type == HDMI_TYPE13)
 		reg = HDMI_V13_PHY_RSTOUT;
 	else
 		reg = HDMI_PHY_RSTOUT;
@@ -1702,7 +1618,7 @@
 
 static void hdmiphy_poweron(struct hdmi_context *hdata)
 {
-	if (hdata->type != HDMI_TYPE14)
+	if (hdata->drv_data->type != HDMI_TYPE14)
 		return;
 
 	DRM_DEBUG_KMS("\n");
@@ -1722,7 +1638,7 @@
 
 static void hdmiphy_poweroff(struct hdmi_context *hdata)
 {
-	if (hdata->type != HDMI_TYPE14)
+	if (hdata->drv_data->type != HDMI_TYPE14)
 		return;
 
 	DRM_DEBUG_KMS("\n");
@@ -1748,13 +1664,14 @@
 	int i;
 
 	/* pixel clock */
-	i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+	i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
 	if (i < 0) {
 		DRM_ERROR("failed to find hdmiphy conf\n");
 		return;
 	}
 
-	ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32);
+	ret = hdmiphy_reg_write_buf(hdata, 0,
+			hdata->drv_data->phy_confs[i].conf, 32);
 	if (ret) {
 		DRM_ERROR("failed to configure hdmiphy\n");
 		return;
@@ -1776,10 +1693,8 @@
 	hdmiphy_conf_reset(hdata);
 	hdmiphy_conf_apply(hdata);
 
-	mutex_lock(&hdata->hdmi_mutex);
 	hdmi_start(hdata, false);
 	hdmi_conf_init(hdata);
-	mutex_unlock(&hdata->hdmi_mutex);
 
 	hdmi_audio_init(hdata);
 
@@ -1790,271 +1705,32 @@
 	hdmi_regs_dump(hdata, "start");
 }
 
-static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+static void hdmi_mode_set(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
 {
-	int i;
-	BUG_ON(num_bytes > 4);
-	for (i = 0; i < num_bytes; i++)
-		reg_pair[i] = (value >> (8 * i)) & 0xff;
-}
-
-static void hdmi_v13_mode_set(struct hdmi_context *hdata,
-			struct drm_display_mode *m)
-{
-	struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
-	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
-	unsigned int val;
-
-	hdata->mode_conf.cea_video_id =
-		drm_match_cea_mode((struct drm_display_mode *)m);
-	hdata->mode_conf.pixel_clock = m->clock * 1000;
-	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
-	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
-
-	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
-	hdmi_set_reg(core->vsync_pol, 1, val);
-
-	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
-	hdmi_set_reg(core->int_pro_mode, 1, val);
-
-	val = (m->hsync_start - m->hdisplay - 2);
-	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
-	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
-	hdmi_set_reg(core->h_sync_gen, 3, val);
-
-	/*
-	 * Quirk requirement for exynos HDMI IP design,
-	 * 2 pixels less than the actual calculation for hsync_start
-	 * and end.
-	 */
-
-	/* Following values & calculations differ for different type of modes */
-	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
-		val = ((m->vsync_end - m->vdisplay) / 2);
-		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
-		hdmi_set_reg(core->v_sync_gen1, 3, val);
-
-		val = m->vtotal / 2;
-		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
-		hdmi_set_reg(core->v_blank, 3, val);
-
-		val = (m->vtotal +
-			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
-		val |= m->vtotal << 11;
-		hdmi_set_reg(core->v_blank_f, 3, val);
-
-		val = ((m->vtotal / 2) + 7);
-		val |= ((m->vtotal / 2) + 2) << 12;
-		hdmi_set_reg(core->v_sync_gen2, 3, val);
-
-		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		val |= ((m->htotal / 2) +
-			(m->hsync_start - m->hdisplay)) << 12;
-		hdmi_set_reg(core->v_sync_gen3, 3, val);
-
-		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-
-		hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
-	} else {
-		/* Progressive Mode */
-
-		val = m->vtotal;
-		val |= (m->vtotal - m->vdisplay) << 11;
-		hdmi_set_reg(core->v_blank, 3, val);
-
-		hdmi_set_reg(core->v_blank_f, 3, 0);
-
-		val = (m->vsync_end - m->vdisplay);
-		val |= ((m->vsync_start - m->vdisplay) << 12);
-		hdmi_set_reg(core->v_sync_gen1, 3, val);
-
-		hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value  */
-		hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value  */
-		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
-		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
-	}
-
-	/* Timing generator registers */
-	hdmi_set_reg(tg->cmd, 1, 0x0);
-	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
-	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
-	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
-	hdmi_set_reg(tg->vsync, 2, 0x1);
-	hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
-}
-
-static void hdmi_v14_mode_set(struct hdmi_context *hdata,
-			struct drm_display_mode *m)
-{
-	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
-	struct hdmi_v14_core_regs *core =
-		&hdata->mode_conf.conf.v14_conf.core;
-
-	hdata->mode_conf.cea_video_id =
-		drm_match_cea_mode((struct drm_display_mode *)m);
-	hdata->mode_conf.pixel_clock = m->clock * 1000;
-	hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio;
-
-	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(core->v_line, 2, m->vtotal);
-	hdmi_set_reg(core->h_line, 2, m->htotal);
-	hdmi_set_reg(core->hsync_pol, 1,
-			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
-	hdmi_set_reg(core->vsync_pol, 1,
-			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
-	hdmi_set_reg(core->int_pro_mode, 1,
-			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
-
-	/*
-	 * Quirk requirement for exynos 5 HDMI IP design,
-	 * 2 pixels less than the actual calculation for hsync_start
-	 * and end.
-	 */
-
-	/* Following values & calculations differ for different type of modes */
-	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
-		/* Interlaced Mode */
-		hdmi_set_reg(core->v_sync_line_bef_2, 2,
-			(m->vsync_end - m->vdisplay) / 2);
-		hdmi_set_reg(core->v_sync_line_bef_1, 2,
-			(m->vsync_start - m->vdisplay) / 2);
-		hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
-		hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2);
-		hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
-		hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
-		hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
-			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
-			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
-		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
-		hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2);
-		hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1);
-		hdmi_set_reg(tg->vact_st3, 2, 0x0);
-		hdmi_set_reg(tg->vact_st4, 2, 0x0);
-	} else {
-		/* Progressive Mode */
-		hdmi_set_reg(core->v_sync_line_bef_2, 2,
-			m->vsync_end - m->vdisplay);
-		hdmi_set_reg(core->v_sync_line_bef_1, 2,
-			m->vsync_start - m->vdisplay);
-		hdmi_set_reg(core->v2_blank, 2, m->vtotal);
-		hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
-		hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
-		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
-		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
-		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
-		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
-		hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
-		hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
-		hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
-		hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
-		hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
-	}
-
-	/* Following values & calculations are same irrespective of mode type */
-	hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
-	hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
-	hdmi_set_reg(core->vact_space_1, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_2, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_3, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_4, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_5, 2, 0xffff);
-	hdmi_set_reg(core->vact_space_6, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
-	hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
-	hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
-
-	/* Timing generator registers */
-	hdmi_set_reg(tg->cmd, 1, 0x0);
-	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
-	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
-	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
-	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
-	hdmi_set_reg(tg->vsync, 2, 0x1);
-	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
-	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
-	hdmi_set_reg(tg->tg_3d, 1, 0x0);
-}
-
-static void hdmi_mode_set(struct exynos_drm_display *display,
-			struct drm_display_mode *mode)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-	struct drm_display_mode *m = mode;
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+	struct drm_display_mode *m = adjusted_mode;
 
 	DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
 		m->hdisplay, m->vdisplay,
 		m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
 		"INTERLACED" : "PROGRESSIVE");
 
-	/* preserve mode information for later use. */
-	drm_mode_copy(&hdata->current_mode, mode);
-
-	if (hdata->type == HDMI_TYPE13)
-		hdmi_v13_mode_set(hdata, mode);
-	else
-		hdmi_v14_mode_set(hdata, mode);
+	drm_mode_copy(&hdata->current_mode, m);
+	hdata->cea_video_id = drm_match_cea_mode(mode);
 }
 
-static void hdmi_commit(struct exynos_drm_display *display)
+static void hdmi_enable(struct drm_encoder *encoder)
 {
-	struct hdmi_context *hdata = display_to_hdmi(display);
-
-	mutex_lock(&hdata->hdmi_mutex);
-	if (!hdata->powered) {
-		mutex_unlock(&hdata->hdmi_mutex);
-		return;
-	}
-	mutex_unlock(&hdata->hdmi_mutex);
-
-	hdmi_conf_apply(hdata);
-}
-
-static void hdmi_poweron(struct hdmi_context *hdata)
-{
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct hdmi_resources *res = &hdata->res;
 
-	mutex_lock(&hdata->hdmi_mutex);
-	if (hdata->powered) {
-		mutex_unlock(&hdata->hdmi_mutex);
+	if (hdata->powered)
 		return;
-	}
 
 	hdata->powered = true;
 
-	mutex_unlock(&hdata->hdmi_mutex);
-
 	pm_runtime_get_sync(hdata->dev);
 
 	if (regulator_bulk_enable(res->regul_count, res->regul_bulk))
@@ -2068,17 +1744,32 @@
 	clk_prepare_enable(res->sclk_hdmi);
 
 	hdmiphy_poweron(hdata);
-	hdmi_commit(&hdata->display);
+	hdmi_conf_apply(hdata);
 }
 
-static void hdmi_poweroff(struct hdmi_context *hdata)
+static void hdmi_disable(struct drm_encoder *encoder)
 {
+	struct hdmi_context *hdata = encoder_to_hdmi(encoder);
 	struct hdmi_resources *res = &hdata->res;
+	struct drm_crtc *crtc = encoder->crtc;
+	const struct drm_crtc_helper_funcs *funcs = NULL;
 
-	mutex_lock(&hdata->hdmi_mutex);
 	if (!hdata->powered)
-		goto out;
-	mutex_unlock(&hdata->hdmi_mutex);
+		return;
+
+	/*
+	 * The SFRs of VP and Mixer are updated by Vertical Sync of
+	 * Timing generator which is a part of HDMI so the sequence
+	 * to disable TV Subsystem should be as following,
+	 *	VP -> Mixer -> HDMI
+	 *
+	 * Below codes will try to disable Mixer and VP(if used)
+	 * prior to disabling HDMI.
+	 */
+	if (crtc)
+		funcs = crtc->helper_private;
+	if (funcs && funcs->disable)
+		(*funcs->disable)(crtc);
 
 	/* HDMI System Disable */
 	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
@@ -2098,57 +1789,18 @@
 
 	pm_runtime_put_sync(hdata->dev);
 
-	mutex_lock(&hdata->hdmi_mutex);
 	hdata->powered = false;
-
-out:
-	mutex_unlock(&hdata->hdmi_mutex);
 }
 
-static void hdmi_dpms(struct exynos_drm_display *display, int mode)
-{
-	struct hdmi_context *hdata = display_to_hdmi(display);
-	struct drm_encoder *encoder = hdata->encoder;
-	struct drm_crtc *crtc = encoder->crtc;
-	const struct drm_crtc_helper_funcs *funcs = NULL;
-
-	DRM_DEBUG_KMS("mode %d\n", mode);
-
-	switch (mode) {
-	case DRM_MODE_DPMS_ON:
-		hdmi_poweron(hdata);
-		break;
-	case DRM_MODE_DPMS_STANDBY:
-	case DRM_MODE_DPMS_SUSPEND:
-	case DRM_MODE_DPMS_OFF:
-		/*
-		 * The SFRs of VP and Mixer are updated by Vertical Sync of
-		 * Timing generator which is a part of HDMI so the sequence
-		 * to disable TV Subsystem should be as following,
-		 *	VP -> Mixer -> HDMI
-		 *
-		 * Below codes will try to disable Mixer and VP(if used)
-		 * prior to disabling HDMI.
-		 */
-		if (crtc)
-			funcs = crtc->helper_private;
-		if (funcs && funcs->disable)
-			(*funcs->disable)(crtc);
-
-		hdmi_poweroff(hdata);
-		break;
-	default:
-		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
-		break;
-	}
-}
-
-static struct exynos_drm_display_ops hdmi_display_ops = {
-	.create_connector = hdmi_create_connector,
+static struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
 	.mode_fixup	= hdmi_mode_fixup,
 	.mode_set	= hdmi_mode_set,
-	.dpms		= hdmi_dpms,
-	.commit		= hdmi_commit,
+	.enable		= hdmi_enable,
+	.disable	= hdmi_disable,
+};
+
+static struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
 };
 
 static void hdmi_hotplug_work_func(struct work_struct *work)
@@ -2157,10 +1809,6 @@
 
 	hdata = container_of(work, struct hdmi_context, hotplug_work.work);
 
-	mutex_lock(&hdata->hdmi_mutex);
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-	mutex_unlock(&hdata->hdmi_mutex);
-
 	if (hdata->drm_dev)
 		drm_helper_hpd_irq_event(hdata->drm_dev);
 }
@@ -2259,30 +1907,6 @@
 	return ret;
 }
 
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
-					(struct device *dev)
-{
-	struct device_node *np = dev->of_node;
-	struct s5p_hdmi_platform_data *pd;
-	u32 value;
-
-	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-	if (!pd)
-		goto err_data;
-
-	if (!of_find_property(np, "hpd-gpio", &value)) {
-		DRM_ERROR("no hpd gpio property found\n");
-		goto err_data;
-	}
-
-	pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0);
-
-	return pd;
-
-err_data:
-	return NULL;
-}
-
 static struct of_device_id hdmi_match_types[] = {
 	{
 		.compatible = "samsung,exynos5-hdmi",
@@ -2306,10 +1930,33 @@
 {
 	struct drm_device *drm_dev = data;
 	struct hdmi_context *hdata = dev_get_drvdata(dev);
+	struct drm_encoder *encoder = &hdata->encoder;
+	int ret, pipe;
 
 	hdata->drm_dev = drm_dev;
 
-	return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
+	pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+						  EXYNOS_DISPLAY_TYPE_HDMI);
+	if (pipe < 0)
+		return pipe;
+
+	encoder->possible_crtcs = 1 << pipe;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+
+	ret = hdmi_create_connector(encoder);
+	if (ret) {
+		DRM_ERROR("failed to create connector ret = %d\n", ret);
+		drm_encoder_cleanup(encoder);
+		return ret;
+	}
+
+	return 0;
 }
 
 static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2343,43 +1990,30 @@
 static int hdmi_probe(struct platform_device *pdev)
 {
 	struct device_node *ddc_node, *phy_node;
-	struct s5p_hdmi_platform_data *pdata;
-	struct hdmi_driver_data *drv_data;
 	const struct of_device_id *match;
 	struct device *dev = &pdev->dev;
 	struct hdmi_context *hdata;
 	struct resource *res;
 	int ret;
 
-	if (!dev->of_node)
-		return -ENODEV;
-
-	pdata = drm_hdmi_dt_parse_pdata(dev);
-	if (!pdata)
-		return -EINVAL;
-
 	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
 	if (!hdata)
 		return -ENOMEM;
 
-	hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
-	hdata->display.ops = &hdmi_display_ops;
-
-	mutex_init(&hdata->hdmi_mutex);
-
-	platform_set_drvdata(pdev, hdata);
-
-	match = of_match_node(hdmi_match_types, dev->of_node);
+	match = of_match_device(hdmi_match_types, dev);
 	if (!match)
 		return -ENODEV;
 
-	drv_data = (struct hdmi_driver_data *)match->data;
-	hdata->type = drv_data->type;
-	hdata->phy_confs = drv_data->phy_confs;
-	hdata->phy_conf_count = drv_data->phy_conf_count;
+	hdata->drv_data = match->data;
 
-	hdata->hpd_gpio = pdata->hpd_gpio;
+	platform_set_drvdata(pdev, hdata);
+
 	hdata->dev = dev;
+	hdata->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpio", 0);
+	if (hdata->hpd_gpio < 0) {
+		DRM_ERROR("cannot get hpd gpio property\n");
+		return hdata->hpd_gpio;
+	}
 
 	ret = hdmi_resources_init(hdata);
 	if (ret) {
@@ -2431,7 +2065,7 @@
 	}
 
 out_get_phy_port:
-	if (drv_data->is_apb_phy) {
+	if (hdata->drv_data->is_apb_phy) {
 		hdata->regs_hdmiphy = of_iomap(phy_node, 0);
 		if (!hdata->regs_hdmiphy) {
 			DRM_ERROR("failed to ioremap hdmi phy\n");
@@ -2454,8 +2088,6 @@
 		goto err_hdmiphy;
 	}
 
-	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
-
 	INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func);
 
 	ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4706b56..7f81cce 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -43,6 +43,7 @@
 
 #define MIXER_WIN_NR		3
 #define MIXER_DEFAULT_WIN	0
+#define VP_DEFAULT_WIN		2
 
 /* The pixelformats that are natively supported by the mixer. */
 #define MXR_FORMAT_RGB565	4
@@ -69,6 +70,24 @@
 	MXR_VER_128_0_0_184,
 };
 
+enum mixer_flag_bits {
+	MXR_BIT_POWERED,
+	MXR_BIT_VSYNC,
+};
+
+static const uint32_t mixer_formats[] = {
+	DRM_FORMAT_XRGB4444,
+	DRM_FORMAT_XRGB1555,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+};
+
+static const uint32_t vp_formats[] = {
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+};
+
 struct mixer_context {
 	struct platform_device *pdev;
 	struct device		*dev;
@@ -76,13 +95,11 @@
 	struct exynos_drm_crtc	*crtc;
 	struct exynos_drm_plane	planes[MIXER_WIN_NR];
 	int			pipe;
+	unsigned long		flags;
 	bool			interlace;
-	bool			powered;
 	bool			vp_enabled;
 	bool			has_sclk;
-	u32			int_en;
 
-	struct mutex		mixer_mutex;
 	struct mixer_resources	mixer_res;
 	enum mixer_version_id	mxr_ver;
 	wait_queue_head_t	wait_vsync_queue;
@@ -380,19 +397,20 @@
 		usleep_range(10000, 12000);
 }
 
-static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
+static void vp_video_buffer(struct mixer_context *ctx,
+			    struct exynos_drm_plane *plane)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
+	struct drm_plane_state *state = plane->base.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &state->crtc->mode;
 	unsigned long flags;
-	struct exynos_drm_plane *plane;
 	dma_addr_t luma_addr[2], chroma_addr[2];
 	bool tiled_mode = false;
 	bool crcb_mode = false;
 	u32 val;
 
-	plane = &ctx->planes[win];
-
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_NV12:
 		crcb_mode = false;
 		break;
@@ -401,21 +419,21 @@
 		break;
 	default:
 		DRM_ERROR("pixel format for vp is wrong [%d].\n",
-				plane->pixel_format);
+				fb->pixel_format);
 		return;
 	}
 
 	luma_addr[0] = plane->dma_addr[0];
 	chroma_addr[0] = plane->dma_addr[1];
 
-	if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
 		ctx->interlace = true;
 		if (tiled_mode) {
 			luma_addr[1] = luma_addr[0] + 0x40;
 			chroma_addr[1] = chroma_addr[0] + 0x40;
 		} else {
-			luma_addr[1] = luma_addr[0] + plane->pitch;
-			chroma_addr[1] = chroma_addr[0] + plane->pitch;
+			luma_addr[1] = luma_addr[0] + fb->pitches[0];
+			chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
 		}
 	} else {
 		ctx->interlace = false;
@@ -436,25 +454,25 @@
 	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
 
 	/* setting size of input image */
-	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(plane->pitch) |
-		VP_IMG_VSIZE(plane->fb_height));
+	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+		VP_IMG_VSIZE(fb->height));
 	/* chroma height has to reduced by 2 to avoid chroma distorions */
-	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(plane->pitch) |
-		VP_IMG_VSIZE(plane->fb_height / 2));
+	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+		VP_IMG_VSIZE(fb->height / 2));
 
-	vp_reg_write(res, VP_SRC_WIDTH, plane->src_width);
-	vp_reg_write(res, VP_SRC_HEIGHT, plane->src_height);
+	vp_reg_write(res, VP_SRC_WIDTH, plane->src_w);
+	vp_reg_write(res, VP_SRC_HEIGHT, plane->src_h);
 	vp_reg_write(res, VP_SRC_H_POSITION,
 			VP_SRC_H_POSITION_VAL(plane->src_x));
 	vp_reg_write(res, VP_SRC_V_POSITION, plane->src_y);
 
-	vp_reg_write(res, VP_DST_WIDTH, plane->crtc_width);
+	vp_reg_write(res, VP_DST_WIDTH, plane->crtc_w);
 	vp_reg_write(res, VP_DST_H_POSITION, plane->crtc_x);
 	if (ctx->interlace) {
-		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height / 2);
+		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h / 2);
 		vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y / 2);
 	} else {
-		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_height);
+		vp_reg_write(res, VP_DST_HEIGHT, plane->crtc_h);
 		vp_reg_write(res, VP_DST_V_POSITION, plane->crtc_y);
 	}
 
@@ -469,9 +487,9 @@
 	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
 	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
 
-	mixer_cfg_scan(ctx, plane->mode_height);
-	mixer_cfg_rgb_fmt(ctx, plane->mode_height);
-	mixer_cfg_layer(ctx, win, true);
+	mixer_cfg_scan(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+	mixer_cfg_layer(ctx, plane->zpos, true);
 	mixer_run(ctx);
 
 	mixer_vsync_set_update(ctx, true);
@@ -491,15 +509,15 @@
 static int mixer_setup_scale(const struct exynos_drm_plane *plane,
 		unsigned int *x_ratio, unsigned int *y_ratio)
 {
-	if (plane->crtc_width != plane->src_width) {
-		if (plane->crtc_width == 2 * plane->src_width)
+	if (plane->crtc_w != plane->src_w) {
+		if (plane->crtc_w == 2 * plane->src_w)
 			*x_ratio = 1;
 		else
 			goto fail;
 	}
 
-	if (plane->crtc_height != plane->src_height) {
-		if (plane->crtc_height == 2 * plane->src_height)
+	if (plane->crtc_h != plane->src_h) {
+		if (plane->crtc_h == 2 * plane->src_h)
 			*y_ratio = 1;
 		else
 			goto fail;
@@ -512,20 +530,22 @@
 	return -ENOTSUPP;
 }
 
-static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
+static void mixer_graph_buffer(struct mixer_context *ctx,
+			       struct exynos_drm_plane *plane)
 {
 	struct mixer_resources *res = &ctx->mixer_res;
+	struct drm_plane_state *state = plane->base.state;
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &state->crtc->mode;
 	unsigned long flags;
-	struct exynos_drm_plane *plane;
+	unsigned int win = plane->zpos;
 	unsigned int x_ratio = 0, y_ratio = 0;
 	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
 	dma_addr_t dma_addr;
 	unsigned int fmt;
 	u32 val;
 
-	plane = &ctx->planes[win];
-
-	switch (plane->pixel_format) {
+	switch (fb->pixel_format) {
 	case DRM_FORMAT_XRGB4444:
 		fmt = MXR_FORMAT_ARGB4444;
 		break;
@@ -557,12 +577,12 @@
 
 	/* converting dma address base and source offset */
 	dma_addr = plane->dma_addr[0]
-		+ (plane->src_x * plane->bpp >> 3)
-		+ (plane->src_y * plane->pitch);
+		+ (plane->src_x * fb->bits_per_pixel >> 3)
+		+ (plane->src_y * fb->pitches[0]);
 	src_x_offset = 0;
 	src_y_offset = 0;
 
-	if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE)
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
 		ctx->interlace = true;
 	else
 		ctx->interlace = false;
@@ -576,18 +596,18 @@
 
 	/* setup geometry */
 	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
-			plane->pitch / (plane->bpp >> 3));
+			fb->pitches[0] / (fb->bits_per_pixel >> 3));
 
 	/* setup display size */
 	if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
 		win == MIXER_DEFAULT_WIN) {
-		val  = MXR_MXR_RES_HEIGHT(plane->mode_height);
-		val |= MXR_MXR_RES_WIDTH(plane->mode_width);
+		val  = MXR_MXR_RES_HEIGHT(mode->vdisplay);
+		val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
 		mixer_reg_write(res, MXR_RESOLUTION, val);
 	}
 
-	val  = MXR_GRP_WH_WIDTH(plane->src_width);
-	val |= MXR_GRP_WH_HEIGHT(plane->src_height);
+	val  = MXR_GRP_WH_WIDTH(plane->src_w);
+	val |= MXR_GRP_WH_HEIGHT(plane->src_h);
 	val |= MXR_GRP_WH_H_SCALE(x_ratio);
 	val |= MXR_GRP_WH_V_SCALE(y_ratio);
 	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
@@ -605,8 +625,8 @@
 	/* set buffer address to mixer */
 	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
 
-	mixer_cfg_scan(ctx, plane->mode_height);
-	mixer_cfg_rgb_fmt(ctx, plane->mode_height);
+	mixer_cfg_scan(ctx, mode->vdisplay);
+	mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
 	mixer_cfg_layer(ctx, win, true);
 
 	/* layer update mandatory for mixer 16.0.33.0 */
@@ -710,6 +730,7 @@
 	struct mixer_context *ctx = arg;
 	struct mixer_resources *res = &ctx->mixer_res;
 	u32 val, base, shadow;
+	int win;
 
 	spin_lock(&res->reg_slock);
 
@@ -735,8 +756,15 @@
 				goto out;
 		}
 
-		drm_handle_vblank(ctx->drm_dev, ctx->pipe);
-		exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+		drm_crtc_handle_vblank(&ctx->crtc->base);
+		for (win = 0 ; win < MIXER_WIN_NR ; win++) {
+			struct exynos_drm_plane *plane = &ctx->planes[win];
+
+			if (!plane->pending_fb)
+				continue;
+
+			exynos_drm_crtc_finish_update(ctx->crtc, plane);
+		}
 
 		/* set wait vsync event to zero and wake up queue. */
 		if (atomic_read(&ctx->wait_vsync_event)) {
@@ -881,8 +909,7 @@
 		}
 	}
 
-	ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
-								mixer_ctx->dev);
+	ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
 	if (ret)
 		priv->pipe--;
 
@@ -891,8 +918,7 @@
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-	if (is_drm_iommu_supported(mixer_ctx->drm_dev))
-		drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+	drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
@@ -900,10 +926,9 @@
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 
-	if (!mixer_ctx->powered) {
-		mixer_ctx->int_en |= MXR_INT_EN_VSYNC;
+	__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return 0;
-	}
 
 	/* enable vsync interrupt */
 	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
@@ -917,54 +942,48 @@
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 
-	if (!mixer_ctx->powered) {
-		mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+	__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
+
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
 
 	/* disable vsync interrupt */
 	mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
 	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
-static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_update_plane(struct exynos_drm_crtc *crtc,
+			       struct exynos_drm_plane *plane)
 {
 	struct mixer_context *mixer_ctx = crtc->ctx;
 
-	DRM_DEBUG_KMS("win: %d\n", win);
+	DRM_DEBUG_KMS("win: %d\n", plane->zpos);
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
-	if (win > 1 && mixer_ctx->vp_enabled)
-		vp_video_buffer(mixer_ctx, win);
+	if (plane->zpos > 1 && mixer_ctx->vp_enabled)
+		vp_video_buffer(mixer_ctx, plane);
 	else
-		mixer_graph_buffer(mixer_ctx, win);
+		mixer_graph_buffer(mixer_ctx, plane);
 }
 
-static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
+static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
+				struct exynos_drm_plane *plane)
 {
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	struct mixer_resources *res = &mixer_ctx->mixer_res;
 	unsigned long flags;
 
-	DRM_DEBUG_KMS("win: %d\n", win);
+	DRM_DEBUG_KMS("win: %d\n", plane->zpos);
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
 	spin_lock_irqsave(&res->reg_slock, flags);
 	mixer_vsync_set_update(mixer_ctx, false);
 
-	mixer_cfg_layer(mixer_ctx, win, false);
+	mixer_cfg_layer(mixer_ctx, plane->zpos, false);
 
 	mixer_vsync_set_update(mixer_ctx, true);
 	spin_unlock_irqrestore(&res->reg_slock, flags);
@@ -975,12 +994,8 @@
 	struct mixer_context *mixer_ctx = crtc->ctx;
 	int err;
 
-	mutex_lock(&mixer_ctx->mixer_mutex);
-	if (!mixer_ctx->powered) {
-		mutex_unlock(&mixer_ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
 		return;
-	}
-	mutex_unlock(&mixer_ctx->mixer_mutex);
 
 	err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
 	if (err < 0) {
@@ -1008,13 +1023,8 @@
 	struct mixer_resources *res = &ctx->mixer_res;
 	int ret;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
+	if (test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
-	}
-
-	mutex_unlock(&ctx->mixer_mutex);
 
 	pm_runtime_get_sync(ctx->dev);
 
@@ -1046,15 +1056,14 @@
 		}
 	}
 
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = true;
-	mutex_unlock(&ctx->mixer_mutex);
+	set_bit(MXR_BIT_POWERED, &ctx->flags);
 
 	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
-	if (ctx->int_en & MXR_INT_EN_VSYNC)
+	if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
 		mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
-	mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+		mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+	}
 	mixer_win_reset(ctx);
 }
 
@@ -1064,24 +1073,16 @@
 	struct mixer_resources *res = &ctx->mixer_res;
 	int i;
 
-	mutex_lock(&ctx->mixer_mutex);
-	if (!ctx->powered) {
-		mutex_unlock(&ctx->mixer_mutex);
+	if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
-	}
-	mutex_unlock(&ctx->mixer_mutex);
 
 	mixer_stop(ctx);
 	mixer_regs_dump(ctx);
 
 	for (i = 0; i < MIXER_WIN_NR; i++)
-		mixer_win_disable(crtc, i);
+		mixer_disable_plane(crtc, &ctx->planes[i]);
 
-	ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
-
-	mutex_lock(&ctx->mixer_mutex);
-	ctx->powered = false;
-	mutex_unlock(&ctx->mixer_mutex);
+	clear_bit(MXR_BIT_POWERED, &ctx->flags);
 
 	clk_disable_unprepare(res->hdmi);
 	clk_disable_unprepare(res->mixer);
@@ -1120,8 +1121,8 @@
 	.enable_vblank		= mixer_enable_vblank,
 	.disable_vblank		= mixer_disable_vblank,
 	.wait_for_vblank	= mixer_wait_for_vblank,
-	.win_commit		= mixer_win_commit,
-	.win_disable		= mixer_win_disable,
+	.update_plane		= mixer_update_plane,
+	.disable_plane		= mixer_disable_plane,
 };
 
 static struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1184,7 +1185,6 @@
 	struct mixer_context *ctx = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	struct exynos_drm_plane *exynos_plane;
-	enum drm_plane_type type;
 	unsigned int zpos;
 	int ret;
 
@@ -1193,10 +1193,23 @@
 		return ret;
 
 	for (zpos = 0; zpos < MIXER_WIN_NR; zpos++) {
+		enum drm_plane_type type;
+		const uint32_t *formats;
+		unsigned int fcount;
+
 		type = (zpos == MIXER_DEFAULT_WIN) ? DRM_PLANE_TYPE_PRIMARY :
 						DRM_PLANE_TYPE_OVERLAY;
+		if (zpos < VP_DEFAULT_WIN) {
+			formats = mixer_formats;
+			fcount = ARRAY_SIZE(mixer_formats);
+		} else {
+			formats = vp_formats;
+			fcount = ARRAY_SIZE(vp_formats);
+		}
+
 		ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
-					1 << ctx->pipe, type, zpos);
+					1 << ctx->pipe, type, formats, fcount,
+					zpos);
 		if (ret)
 			return ret;
 	}
@@ -1243,8 +1256,6 @@
 		return -ENOMEM;
 	}
 
-	mutex_init(&ctx->mixer_mutex);
-
 	if (dev->of_node) {
 		const struct of_device_id *match;
 
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
new file mode 100644
index 0000000..c78cf3f
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -0,0 +1,18 @@
+config DRM_FSL_DCU
+	tristate "DRM Support for Freescale DCU"
+	depends on DRM && OF && ARM
+	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_KMS_FB_HELPER
+	select DRM_PANEL
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	select REGMAP_MMIO
+	select VIDEOMODE_HELPERS
+	help
+	  Choose this option if you have an Freescale DCU chipset.
+	  If M is selected the module will be called fsl-dcu-drm.
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
new file mode 100644
index 0000000..6ea1523
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -0,0 +1,7 @@
+fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
+		 fsl_dcu_drm_kms.o \
+		 fsl_dcu_drm_rgb.o \
+		 fsl_dcu_drm_plane.o \
+		 fsl_dcu_drm_crtc.o \
+		 fsl_dcu_drm_fbdev.o
+obj-$(CONFIG_DRM_FSL_DCU)	+= fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
new file mode 100644
index 0000000..82a3d31
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
+					 struct drm_crtc_state *state)
+{
+	return 0;
+}
+
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+					  struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	int ret;
+
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	if (ret)
+		dev_err(fsl_dev->dev, "Disable CRTC failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	int ret;
+
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(fsl_dev->dev, "Enable CRTC failed\n");
+}
+
+static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	struct drm_display_mode *mode = &crtc->state->mode;
+	unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index;
+	unsigned long dcuclk;
+	int ret;
+
+	index = drm_crtc_index(crtc);
+	dcuclk = clk_get_rate(fsl_dev->clk);
+	div = dcuclk / mode->clock / 1000;
+
+	/* Configure timings: */
+	hbp = mode->htotal - mode->hsync_end;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsw = mode->vsync_end - mode->vsync_start;
+
+	ret = regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
+			   DCU_HSYN_PARA_BP(hbp) |
+			   DCU_HSYN_PARA_PW(hsw) |
+			   DCU_HSYN_PARA_FP(hfp));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
+			   DCU_VSYN_PARA_BP(vbp) |
+			   DCU_VSYN_PARA_PW(vsw) |
+			   DCU_VSYN_PARA_FP(vfp));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
+			   DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
+			   DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_SYN_POL,
+			   DCU_SYN_POL_INV_VS_LOW | DCU_SYN_POL_INV_HS_LOW);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
+			   DCU_BGND_G(0) | DCU_BGND_B(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_DCU_MODE,
+			   DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_THRESHOLD,
+			   DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
+			   DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
+			   DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto set_failed;
+	return;
+set_failed:
+	dev_err(dev->dev, "set DCU register failed\n");
+}
+
+static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
+	.atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
+	.atomic_check = fsl_dcu_drm_crtc_atomic_check,
+	.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
+	.disable = fsl_dcu_drm_disable_crtc,
+	.enable = fsl_dcu_drm_crtc_enable,
+	.mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
+	.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
+};
+
+static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+	.destroy = drm_crtc_cleanup,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = drm_atomic_helper_crtc_reset,
+	.set_config = drm_atomic_helper_set_config,
+};
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
+{
+	struct drm_plane *primary;
+	struct drm_crtc *crtc = &fsl_dev->crtc;
+	unsigned int i, j, reg_num;
+	int ret;
+
+	primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
+	ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL,
+					&fsl_dcu_drm_crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
+
+	if (!strcmp(fsl_dev->soc->name, "ls1021a"))
+		reg_num = LS1021A_LAYER_REG_NUM;
+	else
+		reg_num = VF610_LAYER_REG_NUM;
+	for (i = 0; i <= fsl_dev->soc->total_layer; i++) {
+		for (j = 0; j < reg_num; j++) {
+			ret = regmap_write(fsl_dev->regmap,
+					   DCU_CTRLDESCLN(i, j), 0);
+			if (ret)
+				goto init_failed;
+		}
+	}
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+	if (ret)
+		goto init_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto init_failed;
+
+	return 0;
+init_failed:
+	dev_err(fsl_dev->dev, "init DCU register failed\n");
+	return ret;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
new file mode 100644
index 0000000..43d4da2
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CRTC_H__
+#define __FSL_DCU_DRM_CRTC_H__
+
+struct fsl_dcu_drm_device;
+
+int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
new file mode 100644
index 0000000..9a8e2da
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -0,0 +1,404 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct regmap_config fsl_dcu_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static int fsl_dcu_drm_irq_init(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = drm_irq_install(dev, fsl_dev->irq);
+	if (ret < 0)
+		dev_err(dev->dev, "failed to install IRQ handler\n");
+
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value &= DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+	return ret;
+}
+
+static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
+{
+	struct device *dev = drm->dev;
+	struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
+	int ret;
+
+	ret = fsl_dcu_drm_modeset_init(fsl_dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize mode setting\n");
+		return ret;
+	}
+
+	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (ret < 0) {
+		dev_err(dev, "failed to initialize vblank\n");
+		goto done;
+	}
+	drm->vblank_disable_allowed = true;
+
+	ret = fsl_dcu_drm_irq_init(drm);
+	if (ret < 0)
+		goto done;
+	drm->irq_enabled = true;
+
+	fsl_dcu_fbdev_init(drm);
+
+	return 0;
+done:
+	if (ret) {
+		drm_mode_config_cleanup(drm);
+		drm_vblank_cleanup(drm);
+		drm_irq_uninstall(drm);
+		drm->dev_private = NULL;
+	}
+
+	return ret;
+}
+
+static int fsl_dcu_unload(struct drm_device *dev)
+{
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+}
+
+static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int int_status;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	if (int_status & DCU_INT_STATUS_VBLANK)
+		drm_handle_vblank(dev, 0);
+
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0xffffffff);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_STATUS failed\n");
+	ret = regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+			   DCU_UPDATE_MODE_READREG);
+	if (ret)
+		dev_err(dev->dev, "set DCU_UPDATE_MODE failed\n");
+
+	return IRQ_HANDLED;
+}
+
+static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value &= ~DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+	return 0;
+}
+
+static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+	unsigned int value;
+	int ret;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value);
+	if (ret)
+		dev_err(dev->dev, "read DCU_INT_MASK failed\n");
+	value |= DCU_INT_MASK_VBLANK;
+	ret = regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
+	if (ret)
+		dev_err(dev->dev, "set DCU_INT_MASK failed\n");
+}
+
+static const struct file_operations fsl_dcu_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver fsl_dcu_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
+				| DRIVER_PRIME | DRIVER_ATOMIC,
+	.load			= fsl_dcu_load,
+	.unload			= fsl_dcu_unload,
+	.preclose		= fsl_dcu_drm_preclose,
+	.irq_handler		= fsl_dcu_drm_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= fsl_dcu_drm_enable_vblank,
+	.disable_vblank		= fsl_dcu_drm_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_import	= drm_gem_prime_import,
+	.gem_prime_export	= drm_gem_prime_export,
+	.gem_prime_get_sg_table	= drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap		= drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap	= drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap		= drm_gem_cma_prime_mmap,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_dumb_destroy,
+	.fops			= &fsl_dcu_drm_fops,
+	.name			= "fsl-dcu-drm",
+	.desc			= "Freescale DCU DRM",
+	.date			= "20150213",
+	.major			= 1,
+	.minor			= 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int fsl_dcu_drm_pm_suspend(struct device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+
+	if (!fsl_dev)
+		return 0;
+
+	drm_kms_helper_poll_disable(fsl_dev->drm);
+	regcache_cache_only(fsl_dev->regmap, true);
+	regcache_mark_dirty(fsl_dev->regmap);
+	clk_disable(fsl_dev->clk);
+	clk_unprepare(fsl_dev->clk);
+
+	return 0;
+}
+
+static int fsl_dcu_drm_pm_resume(struct device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
+	int ret;
+
+	if (!fsl_dev)
+		return 0;
+
+	ret = clk_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		clk_unprepare(fsl_dev->clk);
+		return ret;
+	}
+	ret = clk_prepare(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare dcu clk\n");
+		return ret;
+	}
+
+	drm_kms_helper_poll_enable(fsl_dev->drm);
+	regcache_cache_only(fsl_dev->regmap, false);
+	regcache_sync(fsl_dev->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_dcu_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume)
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
+	.name = "ls1021a",
+	.total_layer = 16,
+	.max_layer = 4,
+};
+
+static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
+	.name = "vf610",
+	.total_layer = 64,
+	.max_layer = 6,
+};
+
+static const struct of_device_id fsl_dcu_of_match[] = {
+	{
+		.compatible = "fsl,ls1021a-dcu",
+		.data = &fsl_dcu_ls1021a_data,
+	}, {
+		.compatible = "fsl,vf610-dcu",
+		.data = &fsl_dcu_vf610_data,
+	}, {
+	},
+};
+MODULE_DEVICE_TABLE(of, fsl_dcu_of_match);
+
+static int fsl_dcu_drm_probe(struct platform_device *pdev)
+{
+	struct fsl_dcu_drm_device *fsl_dev;
+	struct drm_device *drm;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *base;
+	struct drm_driver *driver = &fsl_dcu_drm_driver;
+	const struct of_device_id *id;
+	int ret;
+
+	fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
+	if (!fsl_dev)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "could not get memory IO resource\n");
+		return -ENODEV;
+	}
+
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base)) {
+		ret = PTR_ERR(base);
+		return ret;
+	}
+
+	fsl_dev->irq = platform_get_irq(pdev, 0);
+	if (fsl_dev->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return -ENXIO;
+	}
+
+	fsl_dev->clk = devm_clk_get(dev, "dcu");
+	if (IS_ERR(fsl_dev->clk)) {
+		ret = PTR_ERR(fsl_dev->clk);
+		dev_err(dev, "failed to get dcu clock\n");
+		return ret;
+	}
+	ret = clk_prepare(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to prepare dcu clk\n");
+		return ret;
+	}
+	ret = clk_enable(fsl_dev->clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable dcu clk\n");
+		clk_unprepare(fsl_dev->clk);
+		return ret;
+	}
+
+	fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
+			&fsl_dcu_regmap_config);
+	if (IS_ERR(fsl_dev->regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(fsl_dev->regmap);
+	}
+
+	id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
+	if (!id)
+		return -ENODEV;
+	fsl_dev->soc = id->data;
+
+	drm = drm_dev_alloc(driver, dev);
+	if (!drm)
+		return -ENOMEM;
+
+	fsl_dev->dev = dev;
+	fsl_dev->drm = drm;
+	fsl_dev->np = dev->of_node;
+	drm->dev_private = fsl_dev;
+	dev_set_drvdata(dev, fsl_dev);
+	drm_dev_set_unique(drm, dev_name(dev));
+
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0)
+		goto unref;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+		 driver->major, driver->minor, driver->patchlevel,
+		 driver->date, drm->primary->index);
+
+	return 0;
+
+unref:
+	drm_dev_unref(drm);
+	return ret;
+}
+
+static int fsl_dcu_drm_remove(struct platform_device *pdev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+
+	drm_put_dev(fsl_dev->drm);
+
+	return 0;
+}
+
+static struct platform_driver fsl_dcu_drm_platform_driver = {
+	.probe		= fsl_dcu_drm_probe,
+	.remove		= fsl_dcu_drm_remove,
+	.driver		= {
+		.name	= "fsl-dcu",
+		.pm	= &fsl_dcu_drm_pm_ops,
+		.of_match_table = fsl_dcu_of_match,
+	},
+};
+
+module_platform_driver(fsl_dcu_drm_platform_driver);
+
+MODULE_DESCRIPTION("Freescale DCU DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
new file mode 100644
index 0000000..579b9e4
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_DRV_H__
+#define __FSL_DCU_DRM_DRV_H__
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_output.h"
+#include "fsl_dcu_drm_plane.h"
+
+#define DCU_DCU_MODE			0x0010
+#define DCU_MODE_BLEND_ITER(x)		((x) << 20)
+#define DCU_MODE_RASTER_EN		BIT(14)
+#define DCU_MODE_DCU_MODE(x)		(x)
+#define DCU_MODE_DCU_MODE_MASK		0x03
+#define DCU_MODE_OFF			0
+#define DCU_MODE_NORMAL			1
+#define DCU_MODE_TEST			2
+#define DCU_MODE_COLORBAR		3
+
+#define DCU_BGND			0x0014
+#define DCU_BGND_R(x)			((x) << 16)
+#define DCU_BGND_G(x)			((x) << 8)
+#define DCU_BGND_B(x)			(x)
+
+#define DCU_DISP_SIZE			0x0018
+#define DCU_DISP_SIZE_DELTA_Y(x)	((x) << 16)
+/*Regisiter value 1/16 of horizontal resolution*/
+#define DCU_DISP_SIZE_DELTA_X(x)	((x) >> 4)
+
+#define DCU_HSYN_PARA			0x001c
+#define DCU_HSYN_PARA_BP(x)		((x) << 22)
+#define DCU_HSYN_PARA_PW(x)		((x) << 11)
+#define DCU_HSYN_PARA_FP(x)		(x)
+
+#define DCU_VSYN_PARA			0x0020
+#define DCU_VSYN_PARA_BP(x)		((x) << 22)
+#define DCU_VSYN_PARA_PW(x)		((x) << 11)
+#define DCU_VSYN_PARA_FP(x)		(x)
+
+#define DCU_SYN_POL			0x0024
+#define DCU_SYN_POL_INV_PXCK_FALL	(0 << 6)
+#define DCU_SYN_POL_NEG_REMAIN		(0 << 5)
+#define DCU_SYN_POL_INV_VS_LOW		BIT(1)
+#define DCU_SYN_POL_INV_HS_LOW		BIT(0)
+
+#define DCU_THRESHOLD			0x0028
+#define DCU_THRESHOLD_LS_BF_VS(x)	((x) << 16)
+#define DCU_THRESHOLD_OUT_BUF_HIGH(x)	((x) << 8)
+#define DCU_THRESHOLD_OUT_BUF_LOW(x)	(x)
+#define BF_VS_VAL			0x03
+#define BUF_MAX_VAL			0x78
+#define BUF_MIN_VAL			0x0a
+
+#define DCU_INT_STATUS			0x002C
+#define DCU_INT_STATUS_VSYNC		BIT(0)
+#define DCU_INT_STATUS_UNDRUN		BIT(1)
+#define DCU_INT_STATUS_LSBFVS		BIT(2)
+#define DCU_INT_STATUS_VBLANK		BIT(3)
+#define DCU_INT_STATUS_CRCREADY		BIT(4)
+#define DCU_INT_STATUS_CRCOVERFLOW	BIT(5)
+#define DCU_INT_STATUS_P1FIFOLO		BIT(6)
+#define DCU_INT_STATUS_P1FIFOHI		BIT(7)
+#define DCU_INT_STATUS_P2FIFOLO		BIT(8)
+#define DCU_INT_STATUS_P2FIFOHI		BIT(9)
+#define DCU_INT_STATUS_PROGEND		BIT(10)
+#define DCU_INT_STATUS_IPMERROR		BIT(11)
+#define DCU_INT_STATUS_LYRTRANS		BIT(12)
+#define DCU_INT_STATUS_DMATRANS		BIT(14)
+#define DCU_INT_STATUS_P3FIFOLO		BIT(16)
+#define DCU_INT_STATUS_P3FIFOHI		BIT(17)
+#define DCU_INT_STATUS_P4FIFOLO		BIT(18)
+#define DCU_INT_STATUS_P4FIFOHI		BIT(19)
+#define DCU_INT_STATUS_P1EMPTY		BIT(26)
+#define DCU_INT_STATUS_P2EMPTY		BIT(27)
+#define DCU_INT_STATUS_P3EMPTY		BIT(28)
+#define DCU_INT_STATUS_P4EMPTY		BIT(29)
+
+#define DCU_INT_MASK			0x0030
+#define DCU_INT_MASK_VSYNC		BIT(0)
+#define DCU_INT_MASK_UNDRUN		BIT(1)
+#define DCU_INT_MASK_LSBFVS		BIT(2)
+#define DCU_INT_MASK_VBLANK		BIT(3)
+#define DCU_INT_MASK_CRCREADY		BIT(4)
+#define DCU_INT_MASK_CRCOVERFLOW	BIT(5)
+#define DCU_INT_MASK_P1FIFOLO		BIT(6)
+#define DCU_INT_MASK_P1FIFOHI		BIT(7)
+#define DCU_INT_MASK_P2FIFOLO		BIT(8)
+#define DCU_INT_MASK_P2FIFOHI		BIT(9)
+#define DCU_INT_MASK_PROGEND		BIT(10)
+#define DCU_INT_MASK_IPMERROR		BIT(11)
+#define DCU_INT_MASK_LYRTRANS		BIT(12)
+#define DCU_INT_MASK_DMATRANS		BIT(14)
+#define DCU_INT_MASK_P3FIFOLO		BIT(16)
+#define DCU_INT_MASK_P3FIFOHI		BIT(17)
+#define DCU_INT_MASK_P4FIFOLO		BIT(18)
+#define DCU_INT_MASK_P4FIFOHI		BIT(19)
+#define DCU_INT_MASK_P1EMPTY		BIT(26)
+#define DCU_INT_MASK_P2EMPTY		BIT(27)
+#define DCU_INT_MASK_P3EMPTY		BIT(28)
+#define DCU_INT_MASK_P4EMPTY		BIT(29)
+
+#define DCU_DIV_RATIO			0x0054
+
+#define DCU_UPDATE_MODE			0x00cc
+#define DCU_UPDATE_MODE_MODE		BIT(31)
+#define DCU_UPDATE_MODE_READREG		BIT(30)
+
+#define DCU_DCFB_MAX			0x300
+
+#define DCU_CTRLDESCLN(layer, reg)	(0x200 + (reg - 1) * 4 + (layer) * 0x40)
+
+#define DCU_LAYER_HEIGHT(x)		((x) << 16)
+#define DCU_LAYER_WIDTH(x)		(x)
+
+#define DCU_LAYER_POSY(x)		((x) << 16)
+#define DCU_LAYER_POSX(x)		(x)
+
+#define DCU_LAYER_EN			BIT(31)
+#define DCU_LAYER_TILE_EN		BIT(30)
+#define DCU_LAYER_DATA_SEL_CLUT		BIT(29)
+#define DCU_LAYER_SAFETY_EN		BIT(28)
+#define DCU_LAYER_TRANS(x)		((x) << 20)
+#define DCU_LAYER_BPP(x)		((x) << 16)
+#define DCU_LAYER_RLE_EN		BIT(15)
+#define DCU_LAYER_LUOFFS(x)		((x) << 4)
+#define DCU_LAYER_BB_ON			BIT(2)
+#define DCU_LAYER_AB(x)			(x)
+
+#define DCU_LAYER_CKMAX_R(x)		((x) << 16)
+#define DCU_LAYER_CKMAX_G(x)		((x) << 8)
+#define DCU_LAYER_CKMAX_B(x)		(x)
+
+#define DCU_LAYER_CKMIN_R(x)		((x) << 16)
+#define DCU_LAYER_CKMIN_G(x)		((x) << 8)
+#define DCU_LAYER_CKMIN_B(x)		(x)
+
+#define DCU_LAYER_TILE_VER(x)		((x) << 16)
+#define DCU_LAYER_TILE_HOR(x)		(x)
+
+#define DCU_LAYER_FG_FCOLOR(x)		(x)
+
+#define DCU_LAYER_BG_BCOLOR(x)		(x)
+
+#define DCU_LAYER_POST_SKIP(x)		((x) << 16)
+#define DCU_LAYER_PRE_SKIP(x)		(x)
+
+#define FSL_DCU_RGB565			4
+#define FSL_DCU_RGB888			5
+#define FSL_DCU_ARGB8888		6
+#define FSL_DCU_ARGB1555		11
+#define FSL_DCU_ARGB4444		12
+#define FSL_DCU_YUV422			14
+
+#define VF610_LAYER_REG_NUM		9
+#define LS1021A_LAYER_REG_NUM		10
+
+struct clk;
+struct device;
+struct drm_device;
+
+struct fsl_dcu_soc_data {
+	const char *name;
+	/*total layer number*/
+	unsigned int total_layer;
+	/*max layer number DCU supported*/
+	unsigned int max_layer;
+};
+
+struct fsl_dcu_drm_device {
+	struct device *dev;
+	struct device_node *np;
+	struct regmap *regmap;
+	int irq;
+	struct clk *clk;
+	/*protects hardware register*/
+	spinlock_t irq_lock;
+	struct drm_device *drm;
+	struct drm_fbdev_cma *fbdev;
+	struct drm_crtc crtc;
+	struct drm_encoder encoder;
+	struct fsl_dcu_drm_connector connector;
+	const struct fsl_dcu_soc_data *soc;
+};
+
+void fsl_dcu_fbdev_init(struct drm_device *dev);
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev);
+
+#endif /* __FSL_DCU_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
new file mode 100644
index 0000000..8b8b819
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_fbdev.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+/* initialize fbdev helper */
+void fsl_dcu_fbdev_init(struct drm_device *dev)
+{
+	struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev->dev);
+
+	fsl_dev->fbdev = drm_fbdev_cma_init(dev, 24, 1, 1);
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
new file mode 100644
index 0000000..0ef5959
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "fsl_dcu_drm_crtc.h"
+#include "fsl_dcu_drm_drv.h"
+
+static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = drm_atomic_helper_commit,
+	.fb_create = drm_fb_cma_create,
+};
+
+int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
+{
+	drm_mode_config_init(fsl_dev->drm);
+
+	fsl_dev->drm->mode_config.min_width = 0;
+	fsl_dev->drm->mode_config.min_height = 0;
+	fsl_dev->drm->mode_config.max_width = 2031;
+	fsl_dev->drm->mode_config.max_height = 2047;
+	fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs;
+
+	drm_kms_helper_poll_init(fsl_dev->drm);
+	fsl_dcu_drm_crtc_create(fsl_dev);
+	fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
+	fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+	drm_mode_config_reset(fsl_dev->drm);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
new file mode 100644
index 0000000..7093109
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_CONNECTOR_H__
+#define __FSL_DCU_DRM_CONNECTOR_H__
+
+struct fsl_dcu_drm_connector {
+	struct drm_connector base;
+	struct drm_encoder *encoder;
+	struct drm_panel *panel;
+};
+
+static inline struct fsl_dcu_drm_connector *
+to_fsl_dcu_connector(struct drm_connector *con)
+{
+	return con ? container_of(con, struct fsl_dcu_drm_connector, base)
+		     : NULL;
+}
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+				 struct drm_encoder *encoder);
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+			       struct drm_crtc *crtc);
+
+#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
new file mode 100644
index 0000000..82be6b8
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "fsl_dcu_drm_drv.h"
+#include "fsl_dcu_drm_plane.h"
+
+static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	unsigned int total_layer = fsl_dev->soc->total_layer;
+	unsigned int index;
+
+	index = drm_plane_index(plane);
+	if (index < total_layer)
+		return total_layer - index - 1;
+
+	dev_err(fsl_dev->dev, "No more layer left\n");
+	return -EINVAL;
+}
+
+static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
+					  struct drm_plane_state *state)
+{
+	struct drm_framebuffer *fb = state->fb;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_YUV422:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
+					     struct drm_plane_state *old_state)
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	unsigned int index, value, ret;
+
+	index = fsl_dcu_drm_plane_index(plane);
+	if (index < 0)
+		return;
+
+	ret = regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value);
+	if (ret)
+		dev_err(fsl_dev->dev, "read DCU_INT_MASK failed\n");
+	value &= ~DCU_LAYER_EN;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value);
+	if (ret)
+		dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
+					    struct drm_plane_state *old_state)
+
+{
+	struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
+	struct drm_plane_state *state = plane->state;
+	struct drm_framebuffer *fb = plane->state->fb;
+	struct drm_gem_cma_object *gem;
+	unsigned int alpha, bpp;
+	int index, ret;
+
+	if (!fb)
+		return;
+
+	index = fsl_dcu_drm_plane_index(plane);
+	if (index < 0)
+		return;
+
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+		bpp = FSL_DCU_RGB565;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_RGB888:
+		bpp = FSL_DCU_RGB888;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		bpp = FSL_DCU_ARGB8888;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_BGRA4444:
+		bpp = FSL_DCU_ARGB4444;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_ARGB1555:
+		bpp = FSL_DCU_ARGB1555;
+		alpha = 0xff;
+		break;
+	case DRM_FORMAT_YUV422:
+		bpp = FSL_DCU_YUV422;
+		alpha = 0xff;
+		break;
+	default:
+		return;
+	}
+
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
+			   DCU_LAYER_HEIGHT(state->crtc_h) |
+			   DCU_LAYER_WIDTH(state->crtc_w));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
+			   DCU_LAYER_POSY(state->crtc_y) |
+			   DCU_LAYER_POSX(state->crtc_x));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap,
+			   DCU_CTRLDESCLN(index, 3), gem->paddr);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
+			   DCU_LAYER_EN |
+			   DCU_LAYER_TRANS(alpha) |
+			   DCU_LAYER_BPP(bpp) |
+			   DCU_LAYER_AB(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5),
+			   DCU_LAYER_CKMAX_R(0xFF) |
+			   DCU_LAYER_CKMAX_G(0xFF) |
+			   DCU_LAYER_CKMAX_B(0xFF));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6),
+			   DCU_LAYER_CKMIN_R(0) |
+			   DCU_LAYER_CKMIN_G(0) |
+			   DCU_LAYER_CKMIN_B(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0);
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8),
+			   DCU_LAYER_FG_FCOLOR(0));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9),
+			   DCU_LAYER_BG_BCOLOR(0));
+	if (ret)
+		goto set_failed;
+	if (!strcmp(fsl_dev->soc->name, "ls1021a")) {
+		ret = regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10),
+				   DCU_LAYER_POST_SKIP(0) |
+				   DCU_LAYER_PRE_SKIP(0));
+		if (ret)
+			goto set_failed;
+	}
+	ret = regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+				 DCU_MODE_DCU_MODE_MASK,
+				 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
+	if (ret)
+		goto set_failed;
+	ret = regmap_write(fsl_dev->regmap,
+			   DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
+	if (ret)
+		goto set_failed;
+	return;
+
+set_failed:
+	dev_err(fsl_dev->dev, "set DCU register failed\n");
+}
+
+static void
+fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     const struct drm_plane_state *new_state)
+{
+}
+
+static int
+fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     const struct drm_plane_state *new_state)
+{
+	return 0;
+}
+
+static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
+	.atomic_check = fsl_dcu_drm_plane_atomic_check,
+	.atomic_disable = fsl_dcu_drm_plane_atomic_disable,
+	.atomic_update = fsl_dcu_drm_plane_atomic_update,
+	.cleanup_fb = fsl_dcu_drm_plane_cleanup_fb,
+	.prepare_fb = fsl_dcu_drm_plane_prepare_fb,
+};
+
+static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
+{
+	drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+	.destroy = fsl_dcu_drm_plane_destroy,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.reset = drm_atomic_helper_plane_reset,
+	.update_plane = drm_atomic_helper_update_plane,
+};
+
+static const u32 fsl_dcu_drm_plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_ARGB4444,
+	DRM_FORMAT_ARGB1555,
+	DRM_FORMAT_YUV422,
+};
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
+{
+	struct drm_plane *primary;
+	int ret;
+
+	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+	if (!primary) {
+		DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+		return NULL;
+	}
+
+	/* possible_crtc's will be filled in later by crtc_init */
+	ret = drm_universal_plane_init(dev, primary, 0,
+				       &fsl_dcu_drm_plane_funcs,
+				       fsl_dcu_drm_plane_formats,
+				       ARRAY_SIZE(fsl_dcu_drm_plane_formats),
+				       DRM_PLANE_TYPE_PRIMARY);
+	if (ret) {
+		kfree(primary);
+		primary = NULL;
+	}
+	drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs);
+
+	return primary;
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
new file mode 100644
index 0000000..d657f08
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_DCU_DRM_PLANE_H__
+#define __FSL_DCU_DRM_PLANE_H__
+
+struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
+
+#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
new file mode 100644
index 0000000..fe8ab5d
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * Freescale DCU drm device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "fsl_dcu_drm_drv.h"
+
+static int
+fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
+				 struct drm_crtc_state *crtc_state,
+				 struct drm_connector_state *conn_state)
+{
+	return 0;
+}
+
+static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.atomic_check = fsl_dcu_drm_encoder_atomic_check,
+	.disable = fsl_dcu_drm_encoder_disable,
+	.enable = fsl_dcu_drm_encoder_enable,
+};
+
+static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = fsl_dcu_drm_encoder_destroy,
+};
+
+int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
+			       struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder = &fsl_dev->encoder;
+	int ret;
+
+	encoder->possible_crtcs = 1;
+	ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+	return 0;
+}
+
+static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_unregister(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+fsl_dcu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
+	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+	.destroy = fsl_dcu_drm_connector_destroy,
+	.detect = fsl_dcu_drm_connector_detect,
+	.dpms = drm_atomic_helper_connector_dpms,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.reset = drm_atomic_helper_connector_reset,
+};
+
+static struct drm_encoder *
+fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
+
+	return fsl_con->encoder;
+}
+
+static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct fsl_dcu_drm_connector *fsl_connector;
+	int (*get_modes)(struct drm_panel *panel);
+	int num_modes = 0;
+
+	fsl_connector = to_fsl_dcu_connector(connector);
+	if (fsl_connector->panel && fsl_connector->panel->funcs &&
+	    fsl_connector->panel->funcs->get_modes) {
+		get_modes = fsl_connector->panel->funcs->get_modes;
+		num_modes = get_modes(fsl_connector->panel);
+	}
+
+	return num_modes;
+}
+
+static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	if (mode->hdisplay & 0xf)
+		return MODE_ERROR;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.best_encoder = fsl_dcu_drm_connector_best_encoder,
+	.get_modes = fsl_dcu_drm_connector_get_modes,
+	.mode_valid = fsl_dcu_drm_connector_mode_valid,
+};
+
+int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
+				 struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = &fsl_dev->connector.base;
+	struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
+	struct device_node *panel_node;
+	int ret;
+
+	fsl_dev->connector.encoder = encoder;
+
+	ret = drm_connector_init(fsl_dev->drm, connector,
+				 &fsl_dcu_drm_connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_connector_register(connector);
+	if (ret < 0)
+		goto err_cleanup;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret < 0)
+		goto err_sysfs;
+
+	drm_object_property_set_value(&connector->base,
+				      mode_config.dpms_property,
+				      DRM_MODE_DPMS_OFF);
+
+	panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+	if (panel_node) {
+		fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+		if (!fsl_dev->connector.panel) {
+			ret = -EPROBE_DEFER;
+			goto err_sysfs;
+		}
+	of_node_put(panel_node);
+	}
+
+	ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+	if (ret) {
+		dev_err(fsl_dev->dev, "failed to attach panel\n");
+		goto err_sysfs;
+	}
+
+	return 0;
+
+err_sysfs:
+	drm_connector_unregister(connector);
+err_cleanup:
+	drm_connector_cleanup(connector);
+	return ret;
+}
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index de6f62a..db9f7d0 100644
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -276,12 +276,12 @@
 		break;
 	default:
 		/* software fallback */
-		cfb_copyarea(info, a);
+		drm_fb_helper_cfb_copyarea(info, a);
 		return;
 	}
 
 	if (!gma_power_begin(dev, false)) {
-		cfb_copyarea(info, a);
+		drm_fb_helper_cfb_copyarea(info, a);
 		return;
 	}
 	psb_accel_2d_copy(dev_priv,
@@ -308,7 +308,7 @@
 	/* Avoid the 8 pixel erratum */
 	if (region->width == 8 || region->height == 8 ||
 		(info->flags & FBINFO_HWACCEL_DISABLED))
-		return cfb_copyarea(info, region);
+		return drm_fb_helper_cfb_copyarea(info, region);
 
 	psbfb_copyarea_accel(info, region);
 }
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2d42ce6..2eaf1b3 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -194,9 +194,9 @@
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
 	.fb_copyarea = psbfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_mmap = psbfb_mmap,
 	.fb_sync = psbfb_sync,
 	.fb_ioctl = psbfb_ioctl,
@@ -208,9 +208,9 @@
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = psbfb_pan,
 	.fb_mmap = psbfb_mmap,
 	.fb_ioctl = psbfb_ioctl,
@@ -222,9 +222,9 @@
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcolreg = psbfb_setcolreg,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_mmap = psbfb_mmap,
 	.fb_ioctl = psbfb_ioctl,
 };
@@ -343,7 +343,6 @@
 	struct drm_framebuffer *fb;
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct device *device = &dev->pdev->dev;
 	int size;
 	int ret;
 	struct gtt_range *backing;
@@ -409,9 +408,9 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_err1;
 	}
 	info->par = fbdev;
@@ -426,7 +425,6 @@
 	psbfb->fbdev = info;
 
 	fbdev->psb_fb_helper.fb = fb;
-	fbdev->psb_fb_helper.fbdev = info;
 
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	strcpy(info->fix.id, "psbdrmfb");
@@ -440,12 +438,6 @@
 	} else	/* Software */
 		info->fbops = &psbfb_unaccel_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
-
 	info->fix.smem_start = dev->mode_config.fb_base;
 	info->fix.smem_len = size;
 	info->fix.ywrapstep = gtt_roll;
@@ -456,11 +448,6 @@
 	info->screen_size = size;
 
 	if (dev_priv->gtt.stolen_size) {
-		info->apertures = alloc_apertures(1);
-		if (!info->apertures) {
-			ret = -ENOMEM;
-			goto out_unref;
-		}
 		info->apertures->ranges[0].base = dev->mode_config.fb_base;
 		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
 	}
@@ -483,6 +470,8 @@
 		psb_gtt_free_range(dev, backing);
 	else
 		drm_gem_object_unreference(&backing->gem);
+
+	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
 out_err1:
 	mutex_unlock(&dev->struct_mutex);
 	psb_gtt_free_range(dev, backing);
@@ -570,16 +559,11 @@
 
 static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
 {
-	struct fb_info *info;
 	struct psb_framebuffer *psbfb = &fbdev->pfb;
 
-	if (fbdev->psb_fb_helper.fbdev) {
-		info = fbdev->psb_fb_helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
+	drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
+
 	drm_fb_helper_fini(&fbdev->psb_fb_helper);
 	drm_framebuffer_unregister_private(&psbfb->base);
 	drm_framebuffer_cleanup(&psbfb->base);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 2aaa3c8..00416f2 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -54,7 +54,7 @@
 }
 
 /* ADI recommended values for proper operation. */
-static const struct reg_default adv7511_fixed_registers[] = {
+static const struct reg_sequence adv7511_fixed_registers[] = {
 	{ 0x98, 0x03 },
 	{ 0x9a, 0xe0 },
 	{ 0x9c, 0x30 },
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 74acca9..051eab3 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -36,30 +36,6 @@
 	  i810 driver instead, and the Atom z5xx series has an entirely
 	  different implementation.
 
-config DRM_I915_KMS
-	bool "Enable modesetting on intel by default"
-	depends on DRM_I915
-	default y
-	help
-	  Choose this option if you want kernel modesetting enabled by default.
-
-	  If in doubt, say "Y".
-
-config DRM_I915_FBDEV
-	bool "Enable legacy fbdev support for the modesetting intel driver"
-	depends on DRM_I915
-	select DRM_KMS_FB_HELPER
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	default y
-	help
-	  Choose this option if you have a need for the legacy fbdev
-	  support. Note that this support also provide the linux console
-	  support on top of the intel modesetting driver.
-
-	  If in doubt, say "Y".
-
 config DRM_I915_PRELIMINARY_HW_SUPPORT
 	bool "Enable preliminary support for prerelease Intel hardware by default"
 	depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b7ddf48..998b464 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -6,12 +6,13 @@
 
 # core driver code
 i915-y := i915_drv.o \
+	  i915_irq.o \
 	  i915_params.o \
           i915_suspend.o \
 	  i915_sysfs.o \
+	  intel_csr.o \
 	  intel_pm.o \
-	  intel_runtime_pm.o \
-	  intel_csr.o
+	  intel_runtime_pm.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
@@ -20,21 +21,22 @@
 i915-y += i915_cmd_parser.o \
 	  i915_gem_batch_pool.o \
 	  i915_gem_context.o \
-	  i915_gem_render_state.o \
 	  i915_gem_debug.o \
 	  i915_gem_dmabuf.o \
 	  i915_gem_evict.o \
 	  i915_gem_execbuffer.o \
+	  i915_gem_fence.o \
 	  i915_gem_gtt.o \
 	  i915_gem.o \
+	  i915_gem_render_state.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
 	  i915_gem_userptr.o \
 	  i915_gpu_error.o \
-	  i915_irq.o \
 	  i915_trace_points.o \
 	  intel_lrc.o \
+	  intel_mocs.o \
 	  intel_ringbuffer.o \
 	  intel_uncore.o
 
@@ -46,18 +48,21 @@
 
 # modesetting core code
 i915-y += intel_audio.o \
+	  intel_atomic.o \
+	  intel_atomic_plane.o \
 	  intel_bios.o \
 	  intel_display.o \
 	  intel_fbc.o \
 	  intel_fifo_underrun.o \
 	  intel_frontbuffer.o \
+	  intel_hotplug.o \
 	  intel_modes.o \
 	  intel_overlay.o \
 	  intel_psr.o \
 	  intel_sideband.o \
 	  intel_sprite.o
 i915-$(CONFIG_ACPI)		+= intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_I915_FBDEV)	+= intel_fbdev.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION)	+= intel_fbdev.o
 
 # modesetting output/encoder code
 i915-y += dvo_ch7017.o \
@@ -66,15 +71,13 @@
 	  dvo_ns2501.o \
 	  dvo_sil164.o \
 	  dvo_tfp410.o \
-	  intel_atomic.o \
-	  intel_atomic_plane.o \
 	  intel_crt.o \
 	  intel_ddi.o \
-	  intel_dp.o \
 	  intel_dp_mst.o \
+	  intel_dp.o \
 	  intel_dsi.o \
-	  intel_dsi_pll.o \
 	  intel_dsi_panel_vbt.o \
+	  intel_dsi_pll.o \
 	  intel_dvo.o \
 	  intel_hdmi.o \
 	  intel_i2c.o \
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 89b08a8..732ce87 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -22,6 +22,7 @@
  *
  * Authors:
  *    Eric Anholt <eric@anholt.net>
+ *    Thomas Richter <thor@math.tu-berlin.de>
  *
  * Minor modifications (Dithering enable):
  *    Thomas Richter <thor@math.tu-berlin.de>
@@ -90,7 +91,7 @@
 /*
  * LCD Vertical Display Size
  */
-#define VR21	0x20
+#define VR21	0x21
 
 /*
  * Panel power down status
@@ -155,16 +156,33 @@
 # define VR8F_POWER_MASK		(0x3c)
 # define VR8F_POWER_POS			(2)
 
+/* Some Bios implementations do not restore the DVO state upon
+ * resume from standby. Thus, this driver has to handle it
+ * instead. The following list contains all registers that
+ * require saving.
+ */
+static const uint16_t backup_addresses[] = {
+	0x11, 0x12,
+	0x18, 0x19, 0x1a, 0x1f,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x8e, 0x8f,
+	0x10		/* this must come last */
+};
+
 
 struct ivch_priv {
 	bool quiet;
 
 	uint16_t width, height;
+
+	/* Register backup */
+
+	uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
 };
 
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo);
-
 /**
  * Reads a register on the ivch.
  *
@@ -246,6 +264,7 @@
 {
 	struct ivch_priv *priv;
 	uint16_t temp;
+	int i;
 
 	priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
 	if (priv == NULL)
@@ -273,6 +292,14 @@
 	ivch_read(dvo, VR20, &priv->width);
 	ivch_read(dvo, VR21, &priv->height);
 
+	/* Make a backup of the registers to be able to restore them
+	 * upon suspend.
+	 */
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_read(dvo, backup_addresses[i], priv->reg_backup + i);
+
+	ivch_dump_regs(dvo);
+
 	return true;
 
 out:
@@ -294,12 +321,31 @@
 	return MODE_OK;
 }
 
+/* Restore the DVO registers after a resume
+ * from RAM. Registers have been saved during
+ * the initialization.
+ */
+static void ivch_reset(struct intel_dvo_device *dvo)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	int i;
+
+	DRM_DEBUG_KMS("Resetting the IVCH registers\n");
+
+	ivch_write(dvo, VR10, 0x0000);
+
+	for (i = 0; i < ARRAY_SIZE(backup_addresses); i++)
+		ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]);
+}
+
 /** Sets the power state of the panel connected to the ivch */
 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
 	int i;
 	uint16_t vr01, vr30, backlight;
 
+	ivch_reset(dvo);
+
 	/* Set the new power state of the panel. */
 	if (!ivch_read(dvo, VR01, &vr01))
 		return;
@@ -308,6 +354,7 @@
 		backlight = 1;
 	else
 		backlight = 0;
+
 	ivch_write(dvo, VR80, backlight);
 
 	if (enable)
@@ -334,6 +381,8 @@
 {
 	uint16_t vr01;
 
+	ivch_reset(dvo);
+
 	/* Set the new power state of the panel. */
 	if (!ivch_read(dvo, VR01, &vr01))
 		return false;
@@ -348,11 +397,15 @@
 			  struct drm_display_mode *mode,
 			  struct drm_display_mode *adjusted_mode)
 {
+	struct ivch_priv *priv = dvo->dev_priv;
 	uint16_t vr40 = 0;
 	uint16_t vr01 = 0;
 	uint16_t vr10;
 
-	ivch_read(dvo, VR10, &vr10);
+	ivch_reset(dvo);
+
+	vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1];
+
 	/* Enable dithering for 18 bpp pipelines */
 	vr10 &= VR10_INTERFACE_DEPTH_MASK;
 	if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
@@ -366,7 +419,7 @@
 		uint16_t x_ratio, y_ratio;
 
 		vr01 |= VR01_PANEL_FIT_ENABLE;
-		vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
+		vr40 |= VR40_CLOCK_GATING_ENABLE;
 		x_ratio = (((mode->hdisplay - 1) << 16) /
 			   (adjusted_mode->hdisplay - 1)) >> 2;
 		y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -381,8 +434,6 @@
 
 	ivch_write(dvo, VR01, vr01);
 	ivch_write(dvo, VR40, vr40);
-
-	ivch_dump_regs(dvo);
 }
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 306d9e4..237ff68 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -131,7 +131,7 @@
 			.mask = MI_GLOBAL_GTT,
 			.expected = 0,
 	      }},						       ),
-	CMD(  MI_LOAD_REGISTER_MEM,             SMI,   !F,  0xFF,   W | B,
+	CMD(  MI_LOAD_REGISTER_MEM(1),             SMI,   !F,  0xFF,   W | B,
 	      .reg = { .offset = 1, .mask = 0x007FFFFC },
 	      .bits = {{
 			.offset = 0,
@@ -151,8 +151,8 @@
 	CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
 	CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
 	CMD(  MI_TOPOLOGY_FILTER,               SMI,    F,  1,      S  ),
-	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+	CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_SET_CONTEXT,                   SMI,   !F,  0xFF,   R  ),
 	CMD(  MI_URB_CLEAR,                     SMI,   !F,  0xFF,   S  ),
 	CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3F,   B,
@@ -564,7 +564,7 @@
 
 		for (j = 0; j < table->count; j++) {
 			const struct drm_i915_cmd_descriptor *desc =
-				&table->table[i];
+				&table->table[j];
 			u32 curr = desc->cmd.value & desc->cmd.mask;
 
 			if (curr < previous) {
@@ -1021,7 +1021,7 @@
 			 * only MI_LOAD_REGISTER_IMM commands.
 			 */
 			if (reg_addr == OACONTROL) {
-				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
 					return false;
 				}
@@ -1035,7 +1035,7 @@
 			 * allowed mask/value pair given in the whitelist entry.
 			 */
 			if (reg->mask) {
-				if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+				if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
 					DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
 							 reg_addr);
 					return false;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 82bbe3f..e3ec904 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -117,6 +117,20 @@
 	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
 }
 
+static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
+{
+	u64 size = 0;
+	struct i915_vma *vma;
+
+	list_for_each_entry(vma, &obj->vma_list, vma_link) {
+		if (i915_is_ggtt(vma->vm) &&
+		    drm_mm_node_allocated(&vma->node))
+			size += vma->node.size;
+	}
+
+	return size;
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -156,13 +170,13 @@
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
-		if (!i915_is_ggtt(vma->vm))
-			seq_puts(m, " (pp");
+		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+			   i915_is_ggtt(vma->vm) ? "g" : "pp",
+			   vma->node.start, vma->node.size);
+		if (i915_is_ggtt(vma->vm))
+			seq_printf(m, ", type: %u)", vma->ggtt_view.type);
 		else
-			seq_puts(m, " (g");
-		seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
-			   vma->node.start, vma->node.size,
-			   vma->ggtt_view.type);
+			seq_puts(m, ")");
 	}
 	if (obj->stolen)
 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
@@ -198,7 +212,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct i915_vma *vma;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -231,7 +245,7 @@
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 	return 0;
 }
@@ -253,7 +267,7 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	LIST_HEAD(stolen);
 	int count, ret;
 
@@ -269,7 +283,7 @@
 		list_add(&obj->obj_exec_link, &stolen);
 
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_ggtt_size(obj);
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
@@ -292,14 +306,14 @@
 	}
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 	return 0;
 }
 
 #define count_objects(list, member) do { \
 	list_for_each_entry(obj, list, member) { \
-		size += i915_gem_obj_ggtt_size(obj); \
+		size += i915_gem_obj_total_ggtt_size(obj); \
 		++count; \
 		if (obj->map_and_fenceable) { \
 			mappable_size += i915_gem_obj_ggtt_size(obj); \
@@ -310,10 +324,10 @@
 
 struct file_stats {
 	struct drm_i915_file_private *file_priv;
-	int count;
-	size_t total, unbound;
-	size_t global, shared;
-	size_t active, inactive;
+	unsigned long count;
+	u64 total, unbound;
+	u64 global, shared;
+	u64 active, inactive;
 };
 
 static int per_file_stats(int id, void *ptr, void *data)
@@ -370,7 +384,7 @@
 
 #define print_file_stats(m, name, stats) do { \
 	if (stats.count) \
-		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
 			   name, \
 			   stats.count, \
 			   stats.total, \
@@ -405,7 +419,7 @@
 
 #define count_vmas(list, member) do { \
 	list_for_each_entry(vma, list, member) { \
-		size += i915_gem_obj_ggtt_size(vma->obj); \
+		size += i915_gem_obj_total_ggtt_size(vma->obj); \
 		++count; \
 		if (vma->obj->map_and_fenceable) { \
 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
@@ -420,7 +434,7 @@
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 count, mappable_count, purgeable_count;
-	size_t size, mappable_size, purgeable_size;
+	u64 size, mappable_size, purgeable_size;
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm = &dev_priv->gtt.base;
 	struct drm_file *file;
@@ -437,17 +451,17 @@
 
 	size = count = mappable_size = mappable_count = 0;
 	count_objects(&dev_priv->mm.bound_list, global_list);
-	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_vmas(&vm->active_list, mm_list);
-	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = mappable_size = mappable_count = 0;
 	count_vmas(&vm->inactive_list, mm_list);
-	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
 		   count, mappable_count, size, mappable_size);
 
 	size = count = purgeable_size = purgeable_count = 0;
@@ -456,7 +470,7 @@
 		if (obj->madv == I915_MADV_DONTNEED)
 			purgeable_size += obj->base.size, ++purgeable_count;
 	}
-	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
 
 	size = count = mappable_size = mappable_count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -473,16 +487,16 @@
 			++purgeable_count;
 		}
 	}
-	seq_printf(m, "%u purgeable objects, %zu bytes\n",
+	seq_printf(m, "%u purgeable objects, %llu bytes\n",
 		   purgeable_count, purgeable_size);
-	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
 		   mappable_count, mappable_size);
-	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
 		   count, size);
 
-	seq_printf(m, "%zu [%lu] gtt total\n",
+	seq_printf(m, "%llu [%llu] gtt total\n",
 		   dev_priv->gtt.base.total,
-		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
 	seq_putc(m, '\n');
 	print_batch_pool_stats(m, dev_priv);
@@ -519,7 +533,7 @@
 	uintptr_t list = (uintptr_t) node->info_ent->data;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
-	size_t total_obj_size, total_gtt_size;
+	u64 total_obj_size, total_gtt_size;
 	int count, ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -535,13 +549,13 @@
 		describe_obj(m, obj);
 		seq_putc(m, '\n');
 		total_obj_size += obj->base.size;
-		total_gtt_size += i915_gem_obj_ggtt_size(obj);
+		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
 		count++;
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
-	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
 		   count, total_obj_size, total_gtt_size);
 
 	return 0;
@@ -1132,9 +1146,9 @@
 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
 	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
 		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
-		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
-		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
-		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		u32 rp_state_limits;
+		u32 gt_perf_status;
+		u32 rp_state_cap;
 		u32 rpmodectl, rpinclimit, rpdeclimit;
 		u32 rpstat, cagf, reqf;
 		u32 rpupei, rpcurup, rpprevup;
@@ -1142,6 +1156,15 @@
 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
 		int max_freq;
 
+		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		if (IS_BROXTON(dev)) {
+			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
+		} else {
+			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		}
+
 		/* RPSTAT1 is in the GT power well */
 		ret = mutex_lock_interruptible(&dev->struct_mutex);
 		if (ret)
@@ -1229,7 +1252,8 @@
 		seq_printf(m, "Down threshold: %d%%\n",
 			   dev_priv->rps.down_threshold);
 
-		max_freq = (rp_state_cap & 0xff0000) >> 16;
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
+			    rp_state_cap >> 16) & 0xff;
 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1239,7 +1263,8 @@
 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
 
-		max_freq = rp_state_cap & 0xff;
+		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
+			    rp_state_cap >> 0) & 0xff;
 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
 			   intel_gpu_freq(dev_priv, max_freq));
@@ -1581,6 +1606,21 @@
 		return ironlake_drpc_info(m);
 }
 
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+		   dev_priv->fb_tracking.busy_bits);
+
+	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+		   dev_priv->fb_tracking.flip_bits);
+
+	return 0;
+}
+
 static int i915_fbc_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
@@ -1593,51 +1633,20 @@
 	}
 
 	intel_runtime_pm_get(dev_priv);
+	mutex_lock(&dev_priv->fbc.lock);
 
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv))
 		seq_puts(m, "FBC enabled\n");
-	} else {
-		seq_puts(m, "FBC disabled: ");
-		switch (dev_priv->fbc.no_fbc_reason) {
-		case FBC_OK:
-			seq_puts(m, "FBC actived, but currently disabled in hardware");
-			break;
-		case FBC_UNSUPPORTED:
-			seq_puts(m, "unsupported by this chipset");
-			break;
-		case FBC_NO_OUTPUT:
-			seq_puts(m, "no outputs");
-			break;
-		case FBC_STOLEN_TOO_SMALL:
-			seq_puts(m, "not enough stolen memory");
-			break;
-		case FBC_UNSUPPORTED_MODE:
-			seq_puts(m, "mode not supported");
-			break;
-		case FBC_MODE_TOO_LARGE:
-			seq_puts(m, "mode too large");
-			break;
-		case FBC_BAD_PLANE:
-			seq_puts(m, "FBC unsupported on plane");
-			break;
-		case FBC_NOT_TILED:
-			seq_puts(m, "scanout buffer not tiled");
-			break;
-		case FBC_MULTIPLE_PIPES:
-			seq_puts(m, "multiple pipes are enabled");
-			break;
-		case FBC_MODULE_PARAM:
-			seq_puts(m, "disabled per module param (default off)");
-			break;
-		case FBC_CHIP_DEFAULT:
-			seq_puts(m, "disabled per chip default");
-			break;
-		default:
-			seq_puts(m, "unknown reason");
-		}
-		seq_putc(m, '\n');
-	}
+	else
+		seq_printf(m, "FBC disabled: %s\n",
+			  intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
 
+	if (INTEL_INFO(dev_priv)->gen >= 7)
+		seq_printf(m, "Compressing: %s\n",
+			   yesno(I915_READ(FBC_STATUS2) &
+				 FBC_COMPRESSION_MASK));
+
+	mutex_unlock(&dev_priv->fbc.lock);
 	intel_runtime_pm_put(dev_priv);
 
 	return 0;
@@ -1651,9 +1660,7 @@
 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
 	*val = dev_priv->fbc.false_color;
-	drm_modeset_unlock_all(dev);
 
 	return 0;
 }
@@ -1667,7 +1674,7 @@
 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
 		return -ENODEV;
 
-	drm_modeset_lock_all(dev);
+	mutex_lock(&dev_priv->fbc.lock);
 
 	reg = I915_READ(ILK_DPFC_CONTROL);
 	dev_priv->fbc.false_color = val;
@@ -1676,7 +1683,7 @@
 		   (reg | FBC_CTL_FALSE_COLOR) :
 		   (reg & ~FBC_CTL_FALSE_COLOR));
 
-	drm_modeset_unlock_all(dev);
+	mutex_unlock(&dev_priv->fbc.lock);
 	return 0;
 }
 
@@ -1778,8 +1785,9 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret = 0;
 	int gpu_freq, ia_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
 
-	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+	if (!HAS_CORE_RING_FREQ(dev)) {
 		seq_puts(m, "unsupported on this chipset\n");
 		return 0;
 	}
@@ -1792,17 +1800,27 @@
 	if (ret)
 		goto out;
 
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq =
+			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
+		max_gpu_freq =
+			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
+		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+	}
+
 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
-	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
-	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
-	     gpu_freq++) {
+	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
 		ia_freq = gpu_freq;
 		sandybridge_pcode_read(dev_priv,
 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
 				       &ia_freq);
 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
-			   intel_gpu_freq(dev_priv, gpu_freq),
+			   intel_gpu_freq(dev_priv, (gpu_freq *
+				(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
 			   ((ia_freq >> 0) & 0xff) * 100,
 			   ((ia_freq >> 8) & 0xff) * 100);
 	}
@@ -1848,8 +1866,9 @@
 	struct drm_device *dev = node->minor->dev;
 	struct intel_fbdev *ifbdev = NULL;
 	struct intel_framebuffer *fb;
+	struct drm_framebuffer *drm_fb;
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ifbdev = dev_priv->fbdev;
@@ -1867,7 +1886,8 @@
 #endif
 
 	mutex_lock(&dev->mode_config.fb_lock);
-	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+	drm_for_each_fb(drm_fb, dev) {
+		fb = to_intel_framebuffer(drm_fb);
 		if (ifbdev && &fb->base == ifbdev->helper.fb)
 			continue;
 
@@ -2248,7 +2268,7 @@
 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
 		seq_puts(m, "aliasing PPGTT:\n");
-		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
+		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
 
 		ppgtt->debug_dump(ppgtt, m);
 	}
@@ -2479,13 +2499,13 @@
 	return 0;
 }
 
-static int i915_pc8_status(struct seq_file *m, void *unused)
+static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
+	if (!HAS_RUNTIME_PM(dev)) {
 		seq_puts(m, "not supported\n");
 		return 0;
 	}
@@ -2493,6 +2513,12 @@
 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
 	seq_printf(m, "IRQs disabled: %s\n",
 		   yesno(!intel_irqs_enabled(dev_priv)));
+#ifdef CONFIG_PM
+	seq_printf(m, "Usage count: %d\n",
+		   atomic_read(&dev->dev->power.usage_count));
+#else
+	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
+#endif
 
 	return 0;
 }
@@ -2536,6 +2562,8 @@
 		return "PORT_DDI_D_2_LANES";
 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
 		return "PORT_DDI_D_4_LANES";
+	case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+		return "PORT_DDI_E_2_LANES";
 	case POWER_DOMAIN_PORT_DSI:
 		return "PORT_DSI";
 	case POWER_DOMAIN_PORT_CRT:
@@ -2780,13 +2808,16 @@
 	seq_printf(m, "---------\n");
 	for_each_intel_crtc(dev, crtc) {
 		bool active;
+		struct intel_crtc_state *pipe_config;
 		int x, y;
 
+		pipe_config = to_intel_crtc_state(crtc->base.state);
+
 		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
 			   crtc->base.base.id, pipe_name(crtc->pipe),
-			   yesno(crtc->active), crtc->config->pipe_src_w,
-			   crtc->config->pipe_src_h);
-		if (crtc->active) {
+			   yesno(pipe_config->base.active),
+			   pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+		if (pipe_config->base.active) {
 			intel_crtc_info(m, crtc);
 
 			active = cursor_position(dev, crtc->pipe, &x, &y);
@@ -3027,7 +3058,7 @@
 
 	seq_puts(m, "\n\n");
 
-	if (intel_crtc->config->has_drrs) {
+	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
 		struct intel_panel *panel;
 
 		mutex_lock(&drrs->mutex);
@@ -3079,7 +3110,7 @@
 	for_each_intel_crtc(dev, intel_crtc) {
 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
 
-		if (intel_crtc->active) {
+		if (intel_crtc->base.state->active) {
 			active_crtc_cnt++;
 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
 
@@ -3616,53 +3647,40 @@
 	return 0;
 }
 
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+	struct intel_crtc_state *pipe_config;
+	struct drm_atomic_state *state;
+	int ret = 0;
 
 	drm_modeset_lock_all(dev);
-	/*
-	 * If we use the eDP transcoder we need to make sure that we don't
-	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-	 * relevant on hsw with pipe A when using the always-on power well
-	 * routing.
-	 */
-	if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
-	    !crtc->config->pch_pfit.enabled) {
-		crtc->config->pch_pfit.force_thru = true;
-
-		intel_display_power_get(dev_priv,
-					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
-
-		intel_crtc_reset(crtc);
+	state = drm_atomic_state_alloc(dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
 	}
-	drm_modeset_unlock_all(dev);
-}
 
-static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc =
-		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
-
-	drm_modeset_lock_all(dev);
-	/*
-	 * If we use the eDP transcoder we need to make sure that we don't
-	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
-	 * relevant on hsw with pipe A when using the always-on power well
-	 * routing.
-	 */
-	if (crtc->config->pch_pfit.force_thru) {
-		crtc->config->pch_pfit.force_thru = false;
-
-		intel_crtc_reset(crtc);
-
-		intel_display_power_put(dev_priv,
-					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+	pipe_config = intel_atomic_get_crtc_state(state, crtc);
+	if (IS_ERR(pipe_config)) {
+		ret = PTR_ERR(pipe_config);
+		goto out;
 	}
+
+	pipe_config->pch_pfit.force_thru = enable;
+	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+	    pipe_config->pch_pfit.enabled != enable)
+		pipe_config->base.connectors_changed = true;
+
+	ret = drm_atomic_commit(state);
+out:
 	drm_modeset_unlock_all(dev);
+	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+	if (ret)
+		drm_atomic_state_free(state);
 }
 
 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
@@ -3682,7 +3700,7 @@
 		break;
 	case INTEL_PIPE_CRC_SOURCE_PF:
 		if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_trans_edp_pipe_A_crc_wa(dev);
+			hsw_trans_edp_pipe_A_crc_wa(dev, true);
 
 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
 		break;
@@ -3776,7 +3794,7 @@
 				 pipe_name(pipe));
 
 		drm_modeset_lock(&crtc->base.mutex, NULL);
-		if (crtc->active)
+		if (crtc->base.state->active)
 			intel_wait_for_vblank(dev, pipe);
 		drm_modeset_unlock(&crtc->base.mutex);
 
@@ -3794,7 +3812,7 @@
 		else if (IS_VALLEYVIEW(dev))
 			vlv_undo_pipe_scramble_reset(dev, pipe);
 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
-			hsw_undo_trans_edp_pipe_A_crc_wa(dev);
+			hsw_trans_edp_pipe_A_crc_wa(dev, false);
 
 		hsw_enable_ips(crtc);
 	}
@@ -3980,24 +3998,14 @@
 {
 	char *input_buffer;
 	int status = 0;
-	struct seq_file *m;
 	struct drm_device *dev;
 	struct drm_connector *connector;
 	struct list_head *connector_list;
 	struct intel_dp *intel_dp;
 	int val = 0;
 
-	m = file->private_data;
-	if (!m) {
-		status = -ENODEV;
-		return status;
-	}
-	dev = m->private;
+	dev = ((struct seq_file *)file->private_data)->private;
 
-	if (!dev) {
-		status = -ENODEV;
-		return status;
-	}
 	connector_list = &dev->mode_config.connector_list;
 
 	if (len == 0)
@@ -4021,9 +4029,7 @@
 		    DRM_MODE_CONNECTOR_DisplayPort)
 			continue;
 
-		if (connector->connector_type ==
-		    DRM_MODE_CONNECTOR_DisplayPort &&
-		    connector->status == connector_status_connected &&
+		if (connector->status == connector_status_connected &&
 		    connector->encoder != NULL) {
 			intel_dp = enc_to_intel_dp(connector->encoder);
 			status = kstrtoint(input_buffer, 10, &val);
@@ -4055,9 +4061,6 @@
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4102,9 +4105,6 @@
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4144,9 +4144,6 @@
 	struct list_head *connector_list = &dev->mode_config.connector_list;
 	struct intel_dp *intel_dp;
 
-	if (!dev)
-		return -ENODEV;
-
 	list_for_each_entry(connector, connector_list, head) {
 
 		if (connector->connector_type !=
@@ -4183,8 +4180,15 @@
 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
 	struct drm_device *dev = m->private;
-	int num_levels = ilk_wm_max_level(dev) + 1;
 	int level;
+	int num_levels;
+
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
 
 	drm_modeset_lock_all(dev);
 
@@ -4193,9 +4197,9 @@
 
 		/*
 		 * - WM1+ latency values in 0.5us units
-		 * - latencies are in us on gen9
+		 * - latencies are in us on gen9/vlv/chv
 		 */
-		if (INTEL_INFO(dev)->gen >= 9)
+		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
 			latency *= 10;
 		else if (level > 0)
 			latency *= 5;
@@ -4259,7 +4263,7 @@
 {
 	struct drm_device *dev = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev))
+	if (INTEL_INFO(dev)->gen < 5)
 		return -ENODEV;
 
 	return single_open(file, pri_wm_latency_show, dev);
@@ -4291,11 +4295,18 @@
 	struct seq_file *m = file->private_data;
 	struct drm_device *dev = m->private;
 	uint16_t new[8] = { 0 };
-	int num_levels = ilk_wm_max_level(dev) + 1;
+	int num_levels;
 	int level;
 	int ret;
 	char tmp[32];
 
+	if (IS_CHERRYVIEW(dev))
+		num_levels = 3;
+	else if (IS_VALLEYVIEW(dev))
+		num_levels = 1;
+	else
+		num_levels = ilk_wm_max_level(dev) + 1;
+
 	if (len >= sizeof(tmp))
 		return -EINVAL;
 
@@ -5027,6 +5038,7 @@
 	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_emon_status", i915_emon_status, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
+	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_ips_status", i915_ips_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
@@ -5042,7 +5054,7 @@
 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
-	{"i915_pc8_status", i915_pc8_status, 0},
+	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
 	{"i915_power_domain_info", i915_power_domain_info, 0},
 	{"i915_display_info", i915_display_info, 0},
 	{"i915_semaphore_status", i915_semaphore_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d2df321..ab37d11 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -163,6 +163,13 @@
 		if (!value)
 			return -ENODEV;
 		break;
+	case I915_PARAM_HAS_GPU_RESET:
+		value = i915.enable_hangcheck &&
+			intel_has_gpu_reset(dev);
+		break;
+	case I915_PARAM_HAS_RESOURCE_STREAMER:
+		value = HAS_RESOURCE_STREAMER(dev);
+		break;
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", param->param);
 		return -EINVAL;
@@ -719,11 +726,19 @@
 
 	info = (struct intel_device_info *)&dev_priv->info;
 
+	/*
+	 * Skylake and Broxton currently don't expose the topmost plane as its
+	 * use is exclusive with the legacy cursor and we only want to expose
+	 * one of those, not both. Until we can safely expose the topmost plane
+	 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+	 * we don't expose the topmost plane at all to prevent ABI breakage
+	 * down the line.
+	 */
 	if (IS_BROXTON(dev)) {
-		info->num_sprites[PIPE_A] = 3;
-		info->num_sprites[PIPE_B] = 3;
-		info->num_sprites[PIPE_C] = 2;
-	} else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+		info->num_sprites[PIPE_A] = 2;
+		info->num_sprites[PIPE_B] = 2;
+		info->num_sprites[PIPE_C] = 1;
+	} else if (IS_VALLEYVIEW(dev))
 		for_each_pipe(dev_priv, pipe)
 			info->num_sprites[pipe] = 2;
 	else
@@ -933,8 +948,8 @@
 		goto out_mtrrfree;
 	}
 
-	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-	if (dev_priv->dp_wq == NULL) {
+	dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+	if (dev_priv->hotplug.dp_wq == NULL) {
 		DRM_ERROR("Failed to create our dp workqueue.\n");
 		ret = -ENOMEM;
 		goto out_freewq;
@@ -1029,7 +1044,7 @@
 	pm_qos_remove_request(&dev_priv->pm_qos);
 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 out_freedpwq:
-	destroy_workqueue(dev_priv->dp_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
 out_freewq:
 	destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
@@ -1116,6 +1131,7 @@
 	i915_gem_cleanup_ringbuffer(dev);
 	i915_gem_context_fini(dev);
 	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_cleanup_cfb(dev_priv);
 	i915_gem_cleanup_stolen(dev);
 
 	intel_csr_ucode_fini(dev);
@@ -1123,7 +1139,7 @@
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 
-	destroy_workqueue(dev_priv->dp_wq);
+	destroy_workqueue(dev_priv->hotplug.dp_wq);
 	destroy_workqueue(dev_priv->wq);
 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 	pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1258,13 +1274,3 @@
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
-
-/*
- * This is really ugly: Because old userspace abused the linux agp interface to
- * manage the gtt, we need to claim that all intel devices are agp.  For
- * otherwise the drm core refuses to initialize the agp support code.
- */
-int i915_driver_device_is_agp(struct drm_device *dev)
-{
-	return 1;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 884b4f9..ab64d68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,7 +356,6 @@
 };
 
 static const struct intel_device_info intel_skylake_info = {
-	.is_preliminary = 1,
 	.is_skylake = 1,
 	.gen = 9, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
@@ -369,7 +368,6 @@
 };
 
 static const struct intel_device_info intel_skylake_gt3_info = {
-	.is_preliminary = 1,
 	.is_skylake = 1,
 	.gen = 9, .num_pipes = 3,
 	.need_gfx_hws = 1, .has_hotplug = 1,
@@ -440,9 +438,7 @@
 	{0, 0, 0}
 };
 
-#if defined(CONFIG_DRM_I915_KMS)
 MODULE_DEVICE_TABLE(pci, pciidlist);
-#endif
 
 void intel_detect_pch(struct drm_device *dev)
 {
@@ -541,21 +537,6 @@
 	return true;
 }
 
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
-{
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	dev_priv->long_hpd_port_mask = 0;
-	dev_priv->short_hpd_port_mask = 0;
-	dev_priv->hpd_event_bits = 0;
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	cancel_work_sync(&dev_priv->dig_port_work);
-	cancel_work_sync(&dev_priv->hotplug_work);
-	cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
-}
-
 void i915_firmware_load_error_print(const char *fw_path, int err)
 {
 	DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
@@ -601,7 +582,6 @@
 static int i915_drm_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 	pci_power_t opregion_target_state;
 	int error;
 
@@ -632,8 +612,7 @@
 	 * for _thaw. Also, power gate the CRTC power wells.
 	 */
 	drm_modeset_lock_all(dev);
-	for_each_crtc(dev, crtc)
-		intel_crtc_control(crtc, false);
+	intel_display_suspend(dev);
 	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_suspend(dev);
@@ -683,15 +662,18 @@
 
 	pci_disable_device(drm_dev->pdev);
 	/*
-	 * During hibernation on some GEN4 platforms the BIOS may try to access
+	 * During hibernation on some platforms the BIOS may try to access
 	 * the device even though it's already in D3 and hang the machine. So
 	 * leave the device in D0 on those platforms and hope the BIOS will
-	 * power down the device properly. Platforms where this was seen:
-	 * Lenovo Thinkpad X301, X61s
+	 * power down the device properly. The issue was seen on multiple old
+	 * GENs with different BIOS vendors, so having an explicit blacklist
+	 * is inpractical; apply the workaround on everything pre GEN6. The
+	 * platforms where the issue was seen:
+	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
+	 * Fujitsu FSC S7110
+	 * Acer Aspire 1830T
 	 */
-	if (!(hibernation &&
-	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
-	      INTEL_INFO(dev_priv)->gen == 4))
+	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
 		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
 	return 0;
@@ -748,7 +730,7 @@
 	mutex_lock(&dev->struct_mutex);
 	if (i915_gem_init_hw(dev)) {
 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
@@ -760,7 +742,7 @@
 	spin_unlock_irq(&dev_priv->irq_lock);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, true);
+	intel_display_resume(dev);
 	drm_modeset_unlock_all(dev);
 
 	intel_dp_mst_resume(dev);
@@ -865,9 +847,6 @@
 	bool simulated;
 	int ret;
 
-	if (!i915.reset)
-		return 0;
-
 	intel_reset_gt_powersave(dev);
 
 	mutex_lock(&dev->struct_mutex);
@@ -959,8 +938,6 @@
 	if (PCI_FUNC(pdev->devfn))
 		return -ENODEV;
 
-	driver.driver_features &= ~(DRIVER_USE_AGP);
-
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -1515,7 +1492,15 @@
 	 * FIXME: We really should find a document that references the arguments
 	 * used below!
 	 */
-	if (IS_HASWELL(dev)) {
+	if (IS_BROADWELL(dev)) {
+		/*
+		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+		 * being detected, and the call we do at intel_runtime_resume()
+		 * won't be able to restore them. Since PCI_D3hot matches the
+		 * actual specification and appears to be working, use it.
+		 */
+		intel_opregion_notify_adapter(dev, PCI_D3hot);
+	} else {
 		/*
 		 * current versions of firmware which depend on this opregion
 		 * notification have repurposed the D1 definition to mean
@@ -1524,16 +1509,6 @@
 		 * the suspend path.
 		 */
 		intel_opregion_notify_adapter(dev, PCI_D1);
-	} else {
-		/*
-		 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
-		 * being detected, and the call we do at intel_runtime_resume()
-		 * won't be able to restore them. Since PCI_D3hot matches the
-		 * actual specification and appears to be working, use it. Let's
-		 * assume the other non-Haswell platforms will stay the same as
-		 * Broadwell.
-		 */
-		intel_opregion_notify_adapter(dev, PCI_D3hot);
 	}
 
 	assert_forcewakes_inactive(dev_priv);
@@ -1673,7 +1648,6 @@
 	 * deal with them for Intel hardware.
 	 */
 	.driver_features =
-	    DRIVER_USE_AGP |
 	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
 	    DRIVER_RENDER,
 	.load = i915_driver_load,
@@ -1688,7 +1662,6 @@
 	.suspend = i915_suspend_legacy,
 	.resume = i915_resume_legacy,
 
-	.device_is_agp = i915_driver_device_is_agp,
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = i915_debugfs_init,
 	.debugfs_cleanup = i915_debugfs_cleanup,
@@ -1727,20 +1700,14 @@
 	driver.num_ioctls = i915_max_ioctl;
 
 	/*
-	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
-	 * explicitly disabled with the module pararmeter.
-	 *
-	 * Otherwise, just follow the parameter (defaulting to off).
-	 *
-	 * Allow optional vga_text_mode_force boot option to override
-	 * the default behavior.
+	 * Enable KMS by default, unless explicitly overriden by
+	 * either the i915.modeset prarameter or by the
+	 * vga_text_mode_force boot option.
 	 */
-#if defined(CONFIG_DRM_I915_KMS)
-	if (i915.modeset != 0)
-		driver.driver_features |= DRIVER_MODESET;
-#endif
-	if (i915.modeset == 1)
-		driver.driver_features |= DRIVER_MODESET;
+	driver.driver_features |= DRIVER_MODESET;
+
+	if (i915.modeset == 0)
+		driver.driver_features &= ~DRIVER_MODESET;
 
 #ifdef CONFIG_VGA_CONSOLE
 	if (vgacon_text_force() && i915.modeset == -1)
@@ -1759,7 +1726,7 @@
 	 * to the atomic ioctl and the atomic properties.  Only plane operations on
 	 * a single CRTC will actually work.
 	 */
-	if (i915.nuclear_pageflip)
+	if (driver.driver_features & DRIVER_MODESET)
 		driver.driver_features |= DRIVER_ATOMIC;
 
 	return drm_pci_init(&driver, &i915_pci_driver);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fd1de45..81adf89 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20150522"
+#define DRIVER_DATE		"20150731"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -182,6 +182,7 @@
 	POWER_DOMAIN_PORT_DDI_C_4_LANES,
 	POWER_DOMAIN_PORT_DDI_D_2_LANES,
 	POWER_DOMAIN_PORT_DDI_D_4_LANES,
+	POWER_DOMAIN_PORT_DDI_E_2_LANES,
 	POWER_DOMAIN_PORT_DSI,
 	POWER_DOMAIN_PORT_CRT,
 	POWER_DOMAIN_PORT_OTHER,
@@ -206,17 +207,51 @@
 
 enum hpd_pin {
 	HPD_NONE = 0,
-	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
 	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
 	HPD_CRT,
 	HPD_SDVO_B,
 	HPD_SDVO_C,
+	HPD_PORT_A,
 	HPD_PORT_B,
 	HPD_PORT_C,
 	HPD_PORT_D,
+	HPD_PORT_E,
 	HPD_NUM_PINS
 };
 
+#define for_each_hpd_pin(__pin) \
+	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+
+struct i915_hotplug {
+	struct work_struct hotplug_work;
+
+	struct {
+		unsigned long last_jiffies;
+		int count;
+		enum {
+			HPD_ENABLED = 0,
+			HPD_DISABLED = 1,
+			HPD_MARK_DISABLED = 2
+		} state;
+	} stats[HPD_NUM_PINS];
+	u32 event_bits;
+	struct delayed_work reenable_work;
+
+	struct intel_digital_port *irq_port[I915_MAX_PORTS];
+	u32 long_port_mask;
+	u32 short_port_mask;
+	struct work_struct dig_port_work;
+
+	/*
+	 * if we get a HPD irq from DP and a HPD irq from non-DP
+	 * the non-DP HPD could block the workqueue on a mode config
+	 * mutex getting, that userspace may have taken. However
+	 * userspace is waiting on the DP workqueue to run which is
+	 * blocked behind the non-DP one.
+	 */
+	struct workqueue_struct *dp_wq;
+};
+
 #define I915_GEM_GPU_DOMAINS \
 	(I915_GEM_DOMAIN_RENDER | \
 	 I915_GEM_DOMAIN_SAMPLER | \
@@ -243,6 +278,12 @@
 			    &dev->mode_config.plane_list,	\
 			    base.head)
 
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)	\
+	list_for_each_entry(intel_plane,				\
+			    &(dev)->mode_config.plane_list,		\
+			    base.head)					\
+		if ((intel_plane)->pipe == (intel_crtc)->pipe)
+
 #define for_each_intel_crtc(dev, intel_crtc) \
 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
@@ -333,7 +374,8 @@
 	uint32_t cfgcr1, cfgcr2;
 
 	/* bxt */
-	uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
+	uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+		 pcsdw12;
 };
 
 struct intel_shared_dpll_config {
@@ -343,7 +385,6 @@
 
 struct intel_shared_dpll {
 	struct intel_shared_dpll_config config;
-	struct intel_shared_dpll_config *new_config;
 
 	int active; /* count of number of active CRTCs (i.e. DPMS on) */
 	bool on; /* is the PLL actually active? Disabled during modeset */
@@ -445,6 +486,7 @@
 	struct timeval time;
 
 	char error_msg[128];
+	int iommu;
 	u32 reset_count;
 	u32 suspend_count;
 
@@ -559,9 +601,6 @@
 struct dpll;
 
 struct drm_i915_display_funcs {
-	bool (*fbc_enabled)(struct drm_device *dev);
-	void (*enable_fbc)(struct drm_crtc *crtc);
-	void (*disable_fbc)(struct drm_device *dev);
 	int (*get_display_clock_speed)(struct drm_device *dev);
 	int (*get_fifo_size)(struct drm_device *dev, int plane);
 	/**
@@ -587,7 +626,8 @@
 				 struct drm_crtc *crtc,
 				 uint32_t sprite_width, uint32_t sprite_height,
 				 int pixel_size, bool enable, bool scaled);
-	void (*modeset_global_resources)(struct drm_atomic_state *state);
+	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+	void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
 	/* Returns the active state of the crtc, and if the crtc is active,
 	 * fills out the pipe-config with the hw state. */
 	bool (*get_pipe_config)(struct intel_crtc *,
@@ -598,7 +638,6 @@
 				  struct intel_crtc_state *crtc_state);
 	void (*crtc_enable)(struct drm_crtc *crtc);
 	void (*crtc_disable)(struct drm_crtc *crtc);
-	void (*off)(struct drm_crtc *crtc);
 	void (*audio_codec_enable)(struct drm_connector *connector,
 				   struct intel_encoder *encoder,
 				   struct drm_display_mode *mode);
@@ -608,7 +647,7 @@
 	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
 			  struct drm_framebuffer *fb,
 			  struct drm_i915_gem_object *obj,
-			  struct intel_engine_cs *ring,
+			  struct drm_i915_gem_request *req,
 			  uint32_t flags);
 	void (*update_primary_plane)(struct drm_crtc *crtc,
 				     struct drm_framebuffer *fb,
@@ -706,7 +745,7 @@
 
 struct intel_csr {
 	const char *fw_path;
-	__be32 *dmc_payload;
+	uint32_t *dmc_payload;
 	uint32_t dmc_fw_size;
 	uint32_t mmio_count;
 	uint32_t mmioaddr[8];
@@ -805,11 +844,15 @@
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
+
+#define CONTEXT_NO_ZEROMAP (1<<0)
 /**
  * struct intel_context - as the name implies, represents a context.
  * @ref: reference count.
  * @user_handle: userspace tracking identity for this context.
  * @remap_slice: l3 row remapping information.
+ * @flags: context specific flags:
+ *         CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
  * @file_priv: filp associated with this context (NULL for global default
  *	       context).
  * @hang_stats: information about the role of this context in possible GPU
@@ -827,6 +870,7 @@
 	int user_handle;
 	uint8_t remap_slice;
 	struct drm_i915_private *i915;
+	int flags;
 	struct drm_i915_file_private *file_priv;
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_hw_ppgtt *ppgtt;
@@ -853,9 +897,13 @@
 	ORIGIN_CPU,
 	ORIGIN_CS,
 	ORIGIN_FLIP,
+	ORIGIN_DIRTYFB,
 };
 
 struct i915_fbc {
+	/* This is always the inner lock when overlapping with struct_mutex and
+	 * it's the outer lock when overlapping with stolen_lock. */
+	struct mutex lock;
 	unsigned long uncompressed_size;
 	unsigned threshold;
 	unsigned int fb_id;
@@ -875,7 +923,7 @@
 
 	struct intel_fbc_work {
 		struct delayed_work work;
-		struct drm_crtc *crtc;
+		struct intel_crtc *crtc;
 		struct drm_framebuffer *fb;
 	} *fbc_work;
 
@@ -891,7 +939,13 @@
 		FBC_MULTIPLE_PIPES, /* more than one pipe active */
 		FBC_MODULE_PARAM,
 		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+		FBC_ROTATION, /* rotation is not supported */
+		FBC_IN_DBG_MASTER, /* kernel debugger is active */
 	} no_fbc_reason;
+
+	bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
+	void (*enable_fbc)(struct intel_crtc *crtc);
+	void (*disable_fbc)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -1201,6 +1255,10 @@
 struct i915_gem_mm {
 	/** Memory allocator for GTT stolen memory */
 	struct drm_mm stolen;
+	/** Protects the usage of the GTT stolen memory allocator. This is
+	 * always the inner lock when overlapping with struct_mutex. */
+	struct mutex stolen_lock;
+
 	/** List of all objects in gtt_space. Used to restore gtt
 	 * mappings on resume */
 	struct list_head bound_list;
@@ -1354,6 +1412,15 @@
 	MODESET_SUSPENDED,
 };
 
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+
+#define DDC_PIN_B  0x05
+#define DDC_PIN_C  0x04
+#define DDC_PIN_D  0x06
+
 struct ddi_vbt_port_info {
 	/*
 	 * This is an index in the HDMI/DVI DDI buffer translation table.
@@ -1366,6 +1433,12 @@
 	uint8_t supports_dvi:1;
 	uint8_t supports_hdmi:1;
 	uint8_t supports_dp:1;
+
+	uint8_t alternate_aux_channel;
+	uint8_t alternate_ddc_pin;
+
+	uint8_t dp_boost_level;
+	uint8_t hdmi_boost_level;
 };
 
 enum psr_lines_to_wait {
@@ -1461,23 +1534,27 @@
 	enum intel_ddb_partitioning partitioning;
 };
 
+struct vlv_pipe_wm {
+	uint16_t primary;
+	uint16_t sprite[2];
+	uint8_t cursor;
+};
+
+struct vlv_sr_wm {
+	uint16_t plane;
+	uint8_t cursor;
+};
+
 struct vlv_wm_values {
-	struct {
-		uint16_t primary;
-		uint16_t sprite[2];
-		uint8_t cursor;
-	} pipe[3];
-
-	struct {
-		uint16_t plane;
-		uint8_t cursor;
-	} sr;
-
+	struct vlv_pipe_wm pipe[3];
+	struct vlv_sr_wm sr;
 	struct {
 		uint8_t cursor;
 		uint8_t sprite[2];
 		uint8_t primary;
 	} ddl[3];
+	uint8_t level;
+	bool cxsr;
 };
 
 struct skl_ddb_entry {
@@ -1611,6 +1688,18 @@
 	bool active;
 };
 
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	uint32_t                        dispatch_flags;
+	uint32_t                        args_batch_start_offset;
+	uint32_t                        batch_obj_vm_offset;
+	struct intel_engine_cs          *ring;
+	struct drm_i915_gem_object      *batch_obj;
+	struct intel_context            *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
 struct drm_i915_private {
 	struct drm_device *dev;
 	struct kmem_cache *objects;
@@ -1680,19 +1769,7 @@
 	u32 pm_rps_events;
 	u32 pipestat_irq_mask[I915_MAX_PIPES];
 
-	struct work_struct hotplug_work;
-	struct {
-		unsigned long hpd_last_jiffies;
-		int hpd_cnt;
-		enum {
-			HPD_ENABLED = 0,
-			HPD_DISABLED = 1,
-			HPD_MARK_DISABLED = 2
-		} hpd_mark;
-	} hpd_stats[HPD_NUM_PINS];
-	u32 hpd_event_bits;
-	struct delayed_work hotplug_reenable_work;
-
+	struct i915_hotplug hotplug;
 	struct i915_fbc fbc;
 	struct i915_drrs drrs;
 	struct intel_opregion opregion;
@@ -1718,7 +1795,7 @@
 
 	unsigned int fsb_freq, mem_freq, is_ddr3;
 	unsigned int skl_boot_cdclk;
-	unsigned int cdclk_freq;
+	unsigned int cdclk_freq, max_cdclk_freq;
 	unsigned int hpll_freq;
 
 	/**
@@ -1769,9 +1846,6 @@
 
 	/* Reclocking support */
 	bool render_reclock_avail;
-	bool lvds_downclock_avail;
-	/* indicates the reduced downclock for LVDS*/
-	int lvds_downclock;
 
 	struct i915_frontbuffer_tracking fb_tracking;
 
@@ -1799,7 +1873,7 @@
 
 	struct drm_i915_gem_object *vlv_pctx;
 
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 	struct work_struct fbdev_suspend_work;
@@ -1809,6 +1883,7 @@
 	struct drm_property *force_audio_property;
 
 	/* hda/i915 audio component */
+	struct i915_audio_component *audio_component;
 	bool audio_component_registered;
 
 	uint32_t hw_context_size;
@@ -1858,29 +1933,11 @@
 
 	struct i915_runtime_pm pm;
 
-	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
-	u32 long_hpd_port_mask;
-	u32 short_hpd_port_mask;
-	struct work_struct dig_port_work;
-
-	/*
-	 * if we get a HPD irq from DP and a HPD irq from non-DP
-	 * the non-DP HPD could block the workqueue on a mode config
-	 * mutex getting, that userspace may have taken. However
-	 * userspace is waiting on the DP workqueue to run which is
-	 * blocked behind the non-DP one.
-	 */
-	struct workqueue_struct *dp_wq;
-
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
-				      struct intel_engine_cs *ring,
-				      struct intel_context *ctx,
+		int (*execbuf_submit)(struct i915_execbuffer_params *params,
 				      struct drm_i915_gem_execbuffer2 *args,
-				      struct list_head *vmas,
-				      struct drm_i915_gem_object *batch_obj,
-				      u64 exec_start, u32 flags);
+				      struct list_head *vmas);
 		int (*init_rings)(struct drm_device *dev);
 		void (*cleanup_ring)(struct intel_engine_cs *ring);
 		void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2148,7 +2205,8 @@
 	struct intel_context *ctx;
 	struct intel_ringbuffer *ringbuf;
 
-	/** Batch buffer related to this request if any */
+	/** Batch buffer related to this request if any (used for
+	    error state dump only) */
 	struct drm_i915_gem_object *batch_obj;
 
 	/** Time at which this request was emitted, in jiffies. */
@@ -2186,8 +2244,12 @@
 };
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx);
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file);
 
 static inline uint32_t
 i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
@@ -2391,6 +2453,9 @@
 				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
 				 (INTEL_DEVID(dev) & 0xf) == 0xe))
+/* ULX machines are also considered ULT. */
+#define IS_BDW_ULX(dev)		(IS_BROADWELL(dev) && \
+				 (INTEL_DEVID(dev) & 0xf) == 0xe)
 #define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
 				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 #define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
@@ -2400,6 +2465,14 @@
 /* ULX machines are also considered ULT. */
 #define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
 				 INTEL_DEVID(dev) == 0x0A1E)
+#define IS_SKL_ULT(dev)		(INTEL_DEVID(dev) == 0x1906 || \
+				 INTEL_DEVID(dev) == 0x1913 || \
+				 INTEL_DEVID(dev) == 0x1916 || \
+				 INTEL_DEVID(dev) == 0x1921 || \
+				 INTEL_DEVID(dev) == 0x1926)
+#define IS_SKL_ULX(dev)		(INTEL_DEVID(dev) == 0x190E || \
+				 INTEL_DEVID(dev) == 0x1915 || \
+				 INTEL_DEVID(dev) == 0x191E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 #define SKL_REVID_A0		(0x0)
@@ -2466,9 +2539,6 @@
  */
 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
 						      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 
@@ -2494,6 +2564,12 @@
 
 #define HAS_CSR(dev)	(IS_SKYLAKE(dev))
 
+#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
+				    INTEL_INFO(dev)->gen >= 8)
+
+#define HAS_CORE_RING_FREQ(dev)	(INTEL_INFO(dev)->gen >= 6 && \
+				 !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
+
 #define INTEL_PCH_DEVICE_ID_MASK		0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
@@ -2533,7 +2609,6 @@
 	int modeset;
 	int panel_ignore_lid;
 	int semaphores;
-	unsigned int lvds_downclock;
 	int lvds_channel_mode;
 	int panel_use_ssc;
 	int vbt_sdvo_panel_type;
@@ -2555,10 +2630,11 @@
 	bool reset;
 	bool disable_display;
 	bool disable_vtd_wa;
+	bool enable_guc_submission;
+	int guc_log_level;
 	int use_mmio_flip;
 	int mmio_debug;
 	bool verbose_state_checks;
-	bool nuclear_pageflip;
 	int edp_vswing;
 };
 extern struct i915_params i915 __read_mostly;
@@ -2572,21 +2648,27 @@
 				 struct drm_file *file);
 extern void i915_driver_postclose(struct drm_device *dev,
 				  struct drm_file *file);
-extern int i915_driver_device_is_agp(struct drm_device * dev);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 #endif
 extern int intel_gpu_reset(struct drm_device *dev);
+extern bool intel_has_gpu_reset(struct drm_device *dev);
 extern int i915_reset(struct drm_device *dev);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 void i915_firmware_load_error_print(const char *fw_path, int err);
 
+/* intel_hotplug.c */
+void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_init(struct drm_i915_private *dev_priv);
+void intel_hpd_init_work(struct drm_i915_private *dev_priv);
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
 __printf(3, 4)
@@ -2594,7 +2676,6 @@
 		       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_hpd_init(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
@@ -2661,19 +2742,11 @@
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct intel_engine_cs *ring);
-void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-					 struct drm_file *file,
-					 struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *obj);
-int i915_gem_ringbuffer_submission(struct drm_device *dev,
-				   struct drm_file *file,
-				   struct intel_engine_cs *ring,
-				   struct intel_context *ctx,
+					struct drm_i915_gem_request *req);
+void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 				   struct drm_i915_gem_execbuffer2 *args,
-				   struct list_head *vmas,
-				   struct drm_i915_gem_object *batch_obj,
-				   u64 exec_start, u32 flags);
+				   struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2706,6 +2779,8 @@
 			 const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 						  size_t size);
+struct drm_i915_gem_object *i915_gem_object_create_from_data(
+		struct drm_device *dev, const void *data, size_t size);
 void i915_init_vm(struct drm_i915_private *dev_priv,
 		  struct i915_address_space *vm);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -2780,9 +2855,10 @@
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct intel_engine_cs *to);
+			 struct intel_engine_cs *to,
+			 struct drm_i915_gem_request **to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring);
+			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
@@ -2811,11 +2887,6 @@
 
 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
-int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
-
-bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
-void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring);
@@ -2824,7 +2895,6 @@
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
-int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -2859,16 +2929,18 @@
 int __must_check i915_gem_init(struct drm_device *dev);
 int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-int __i915_add_request(struct intel_engine_cs *ring,
-		       struct drm_file *file,
-		       struct drm_i915_gem_object *batch_obj);
-#define i915_add_request(ring) \
-	__i915_add_request(ring, NULL, NULL)
+void __i915_add_request(struct drm_i915_gem_request *req,
+			struct drm_i915_gem_object *batch_obj,
+			bool flush_caches);
+#define i915_add_request(req) \
+	__i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+	__i915_add_request(req, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
 			unsigned reset_counter,
 			bool interruptible,
@@ -2888,6 +2960,7 @@
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
 				     const struct i915_ggtt_view *view);
 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
 					      const struct i915_ggtt_view *view);
@@ -2911,8 +2984,6 @@
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				struct drm_gem_object *gem_obj, int flags);
 
-void i915_gem_restore_fences(struct drm_device *dev);
-
 unsigned long
 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 			      const struct i915_ggtt_view *view);
@@ -3007,15 +3078,27 @@
 	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
 }
 
+/* i915_gem_fence.c */
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_private *dev_priv);
+int i915_gem_context_enable(struct drm_i915_gem_request *req);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct intel_engine_cs *ring,
-			struct intel_context *to);
+int i915_switch_context(struct drm_i915_gem_request *req);
 struct intel_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
@@ -3065,9 +3148,12 @@
 }
 
 /* i915_gem_stolen.c */
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment);
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node);
 int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
 void i915_gem_cleanup_stolen(struct drm_device *dev);
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
@@ -3097,10 +3183,6 @@
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
-void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
-
 /* i915_gem_debug.c */
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
@@ -3222,8 +3304,7 @@
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern void intel_connector_unregister(struct intel_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev,
-					 bool force_restore);
+extern void intel_display_resume(struct drm_device *dev);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 52b446b..4d631a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,11 +46,6 @@
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
 static void
 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj);
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -66,18 +61,6 @@
 	return obj->pin_display;
 }
 
-static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
-{
-	if (obj->tiling_mode)
-		i915_gem_release_mmap(obj);
-
-	/* As we do not have an associated fence register, we will force
-	 * a tiling change if we ever need to acquire one.
-	 */
-	obj->fence_dirty = false;
-	obj->fence_reg = I915_FENCE_REG_NONE;
-}
-
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
 				  size_t size)
@@ -149,14 +132,18 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_get_aperture *args = data;
-	struct drm_i915_gem_object *obj;
+	struct i915_gtt *ggtt = &dev_priv->gtt;
+	struct i915_vma *vma;
 	size_t pinned;
 
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-		if (i915_gem_obj_is_pinned(obj))
-			pinned += i915_gem_obj_ggtt_size(obj);
+	list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
+	list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+		if (vma->pin_count)
+			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
 	args->aper_size = dev_priv->gtt.base.total;
@@ -347,7 +334,7 @@
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 		unsigned long unwritten;
 
@@ -368,7 +355,7 @@
 	i915_gem_chipset_flush(dev);
 
 out:
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
 }
 
@@ -801,7 +788,7 @@
 
 	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -832,7 +819,7 @@
 	}
 
 out_flush:
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
 	i915_gem_object_ggtt_unpin(obj);
 out:
@@ -945,7 +932,7 @@
 	if (ret)
 		return ret;
 
-	intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
 	i915_gem_object_pin_pages(obj);
 
@@ -1025,7 +1012,7 @@
 	if (needs_clflush_after)
 		i915_gem_chipset_flush(dev);
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 	return ret;
 }
 
@@ -1146,23 +1133,6 @@
 	return 0;
 }
 
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
-	int ret;
-
-	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
-	ret = 0;
-	if (req == req->ring->outstanding_lazy_request)
-		ret = i915_add_request(req->ring);
-
-	return ret;
-}
-
 static void fake_irq(unsigned long data)
 {
 	wake_up_process((struct task_struct *)data);
@@ -1334,6 +1304,33 @@
 	return ret;
 }
 
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file)
+{
+	struct drm_i915_private *dev_private;
+	struct drm_i915_file_private *file_priv;
+
+	WARN_ON(!req || !file || req->file_priv);
+
+	if (!req || !file)
+		return -EINVAL;
+
+	if (req->file_priv)
+		return -EINVAL;
+
+	dev_private = req->ring->dev->dev_private;
+	file_priv = file->driver_priv;
+
+	spin_lock(&file_priv->mm.lock);
+	req->file_priv = file_priv;
+	list_add_tail(&req->client_list, &file_priv->mm.request_list);
+	spin_unlock(&file_priv->mm.lock);
+
+	req->pid = get_pid(task_pid(current));
+
+	return 0;
+}
+
 static inline void
 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 {
@@ -1346,6 +1343,9 @@
 	list_del(&request->client_list);
 	request->file_priv = NULL;
 	spin_unlock(&file_priv->mm.lock);
+
+	put_pid(request->pid);
+	request->pid = NULL;
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
@@ -1365,8 +1365,6 @@
 	list_del_init(&request->list);
 	i915_gem_request_remove_from_client(request);
 
-	put_pid(request->pid);
-
 	i915_gem_request_unreference(request);
 }
 
@@ -1415,10 +1413,6 @@
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(req);
-	if (ret)
-		return ret;
-
 	ret = __i915_wait_request(req,
 				  atomic_read(&dev_priv->gpu_error.reset_counter),
 				  interruptible, NULL, NULL);
@@ -1518,10 +1512,6 @@
 		if (req == NULL)
 			return 0;
 
-		ret = i915_gem_check_olr(req);
-		if (ret)
-			goto err;
-
 		requests[n++] = i915_gem_request_reference(req);
 	} else {
 		for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -1531,10 +1521,6 @@
 			if (req == NULL)
 				continue;
 
-			ret = i915_gem_check_olr(req);
-			if (ret)
-				goto err;
-
 			requests[n++] = i915_gem_request_reference(req);
 		}
 	}
@@ -1545,7 +1531,6 @@
 					  NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
-err:
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
 			i915_gem_object_retire_request(obj, requests[i]);
@@ -1613,6 +1598,11 @@
 	else
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
+	if (write_domain != 0)
+		intel_fb_obj_invalidate(obj,
+					write_domain == I915_GEM_DOMAIN_GTT ?
+					ORIGIN_GTT : ORIGIN_CPU);
+
 unref:
 	drm_gem_object_unreference(&obj->base);
 unlock:
@@ -2349,9 +2339,12 @@
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring)
+			     struct drm_i915_gem_request *req)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *ring;
+
+	ring = i915_gem_request_get_ring(req);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (obj->active == 0)
@@ -2359,8 +2352,7 @@
 	obj->active |= intel_ring_flag(ring);
 
 	list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-	i915_gem_request_assign(&obj->last_read_req[ring->id],
-				intel_ring_get_request(ring));
+	i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2372,7 +2364,7 @@
 	RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 
 	i915_gem_request_assign(&obj->last_write_req, NULL);
-	intel_fb_obj_flush(obj, true);
+	intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
@@ -2393,6 +2385,13 @@
 	if (obj->active)
 		return;
 
+	/* Bump our place on the bound list to keep it roughly in LRU order
+	 * so that we don't steal from recently used but inactive objects
+	 * (unless we are forced to ofc!)
+	 */
+	list_move_tail(&obj->global_list,
+		       &to_i915(obj->base.dev)->mm.bound_list);
+
 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
 		if (!list_empty(&vma->mm_list))
 			list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
@@ -2472,24 +2471,34 @@
 	return 0;
 }
 
-int __i915_add_request(struct intel_engine_cs *ring,
-		       struct drm_file *file,
-		       struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+			struct drm_i915_gem_object *obj,
+			bool flush_caches)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct drm_i915_gem_request *request;
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
 	struct intel_ringbuffer *ringbuf;
 	u32 request_start;
 	int ret;
 
-	request = ring->outstanding_lazy_request;
 	if (WARN_ON(request == NULL))
-		return -ENOMEM;
+		return;
 
-	if (i915.enable_execlists) {
-		ringbuf = request->ctx->engine[ring->id].ringbuf;
-	} else
-		ringbuf = ring->buffer;
+	ring = request->ring;
+	dev_priv = ring->dev->dev_private;
+	ringbuf = request->ringbuf;
+
+	/*
+	 * To ensure that this call will not fail, space for its emissions
+	 * should already have been reserved in the ring buffer. Let the ring
+	 * know that it is time to use that space up.
+	 */
+	intel_ring_reserved_space_use(ringbuf);
 
 	request_start = intel_ring_get_tail(ringbuf);
 	/*
@@ -2499,14 +2508,13 @@
 	 * is that the flush _must_ happen before the next request, no matter
 	 * what.
 	 */
-	if (i915.enable_execlists) {
-		ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
-		if (ret)
-			return ret;
-	} else {
-		ret = intel_ring_flush_all_caches(ring);
-		if (ret)
-			return ret;
+	if (flush_caches) {
+		if (i915.enable_execlists)
+			ret = logical_ring_flush_all_caches(request);
+		else
+			ret = intel_ring_flush_all_caches(request);
+		/* Not allowed to fail! */
+		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
 
 	/* Record the position of the start of the request so that
@@ -2516,17 +2524,15 @@
 	 */
 	request->postfix = intel_ring_get_tail(ringbuf);
 
-	if (i915.enable_execlists) {
-		ret = ring->emit_request(ringbuf, request);
-		if (ret)
-			return ret;
-	} else {
-		ret = ring->add_request(ring);
-		if (ret)
-			return ret;
+	if (i915.enable_execlists)
+		ret = ring->emit_request(request);
+	else {
+		ret = ring->add_request(request);
 
 		request->tail = intel_ring_get_tail(ringbuf);
 	}
+	/* Not allowed to fail! */
+	WARN(ret, "emit|add_request failed: %d!\n", ret);
 
 	request->head = request_start;
 
@@ -2538,34 +2544,11 @@
 	 */
 	request->batch_obj = obj;
 
-	if (!i915.enable_execlists) {
-		/* Hold a reference to the current context so that we can inspect
-		 * it later in case a hangcheck error event fires.
-		 */
-		request->ctx = ring->last_context;
-		if (request->ctx)
-			i915_gem_context_reference(request->ctx);
-	}
-
 	request->emitted_jiffies = jiffies;
 	ring->last_submitted_seqno = request->seqno;
 	list_add_tail(&request->list, &ring->request_list);
-	request->file_priv = NULL;
-
-	if (file) {
-		struct drm_i915_file_private *file_priv = file->driver_priv;
-
-		spin_lock(&file_priv->mm.lock);
-		request->file_priv = file_priv;
-		list_add_tail(&request->client_list,
-			      &file_priv->mm.request_list);
-		spin_unlock(&file_priv->mm.lock);
-
-		request->pid = get_pid(task_pid(current));
-	}
 
 	trace_i915_gem_request_add(request);
-	ring->outstanding_lazy_request = NULL;
 
 	i915_queue_hangcheck(ring->dev);
 
@@ -2574,7 +2557,8 @@
 			   round_jiffies_up_relative(HZ));
 	intel_mark_busy(dev_priv->dev);
 
-	return 0;
+	/* Sanity check that the reserved size was large enough. */
+	intel_ring_reserved_space_end(ringbuf);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2628,12 +2612,13 @@
 						 typeof(*req), ref);
 	struct intel_context *ctx = req->ctx;
 
+	if (req->file_priv)
+		i915_gem_request_remove_from_client(req);
+
 	if (ctx) {
 		if (i915.enable_execlists) {
-			struct intel_engine_cs *ring = req->ring;
-
-			if (ctx != ring->default_context)
-				intel_lr_context_unpin(ring, ctx);
+			if (ctx != req->ring->default_context)
+				intel_lr_context_unpin(req);
 		}
 
 		i915_gem_context_unreference(ctx);
@@ -2643,36 +2628,63 @@
 }
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx)
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out)
 {
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	struct drm_i915_gem_request *req;
 	int ret;
 
-	if (ring->outstanding_lazy_request)
-		return 0;
+	if (!req_out)
+		return -EINVAL;
+
+	*req_out = NULL;
 
 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
 	if (req == NULL)
 		return -ENOMEM;
 
-	kref_init(&req->ref);
-	req->i915 = dev_priv;
-
 	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
 	if (ret)
 		goto err;
 
+	kref_init(&req->ref);
+	req->i915 = dev_priv;
 	req->ring = ring;
+	req->ctx  = ctx;
+	i915_gem_context_reference(req->ctx);
 
 	if (i915.enable_execlists)
-		ret = intel_logical_ring_alloc_request_extras(req, ctx);
+		ret = intel_logical_ring_alloc_request_extras(req);
 	else
 		ret = intel_ring_alloc_request_extras(req);
-	if (ret)
+	if (ret) {
+		i915_gem_context_unreference(req->ctx);
 		goto err;
+	}
 
-	ring->outstanding_lazy_request = req;
+	/*
+	 * Reserve space in the ring buffer for all the commands required to
+	 * eventually emit this request. This is to guarantee that the
+	 * i915_add_request() call can't fail. Note that the reserve may need
+	 * to be redone if the request is not actually submitted straight
+	 * away, e.g. because a GPU scheduler has deferred it.
+	 */
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_reserve_space(req);
+	else
+		ret = intel_ring_reserve_space(req);
+	if (ret) {
+		/*
+		 * At this point, the request is fully allocated even if not
+		 * fully prepared. Thus it can be cleaned up using the proper
+		 * free code.
+		 */
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	*req_out = req;
 	return 0;
 
 err:
@@ -2680,6 +2692,13 @@
 	return ret;
 }
 
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+	intel_ring_reserved_space_cancel(req->ringbuf);
+
+	i915_gem_request_unreference(req);
+}
+
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
@@ -2741,7 +2760,7 @@
 		list_del(&submit_req->execlist_link);
 
 		if (submit_req->ctx != ring->default_context)
-			intel_lr_context_unpin(ring, submit_req->ctx);
+			intel_lr_context_unpin(submit_req);
 
 		i915_gem_request_unreference(submit_req);
 	}
@@ -2762,30 +2781,6 @@
 
 		i915_gem_request_retire(request);
 	}
-
-	/* This may not have been flushed before the reset, so clean it now */
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
-}
-
-void i915_gem_restore_fences(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
-	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
-		/*
-		 * Commit delayed tiling changes if we have an object still
-		 * attached to the fence, otherwise just clear the fence.
-		 */
-		if (reg->obj) {
-			i915_gem_object_update_fence(reg->obj, reg,
-						     reg->obj->tiling_mode);
-		} else {
-			i915_gem_write_fence(dev, i, NULL);
-		}
-	}
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2947,7 +2942,7 @@
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
-	int ret, i;
+	int i;
 
 	if (!obj->active)
 		return 0;
@@ -2962,10 +2957,6 @@
 		if (list_empty(&req->list))
 			goto retire;
 
-		ret = i915_gem_check_olr(req);
-		if (ret)
-			return ret;
-
 		if (i915_gem_request_completed(req, true)) {
 			__i915_gem_request_retire__upto(req);
 retire:
@@ -3068,25 +3059,22 @@
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		       struct intel_engine_cs *to,
-		       struct drm_i915_gem_request *req)
+		       struct drm_i915_gem_request *from_req,
+		       struct drm_i915_gem_request **to_req)
 {
 	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_ring(req);
+	from = i915_gem_request_get_ring(from_req);
 	if (to == from)
 		return 0;
 
-	if (i915_gem_request_completed(req, true))
+	if (i915_gem_request_completed(from_req, true))
 		return 0;
 
-	ret = i915_gem_check_olr(req);
-	if (ret)
-		return ret;
-
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
 		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-		ret = __i915_wait_request(req,
+		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
 					  NULL,
@@ -3094,16 +3082,24 @@
 		if (ret)
 			return ret;
 
-		i915_gem_object_retire_request(obj, req);
+		i915_gem_object_retire_request(obj, from_req);
 	} else {
 		int idx = intel_ring_sync_index(from, to);
-		u32 seqno = i915_gem_request_get_seqno(req);
+		u32 seqno = i915_gem_request_get_seqno(from_req);
+
+		WARN_ON(!to_req);
 
 		if (seqno <= from->semaphore.sync_seqno[idx])
 			return 0;
 
-		trace_i915_gem_ring_sync_to(from, to, req);
-		ret = to->semaphore.sync_to(to, from, seqno);
+		if (*to_req == NULL) {
+			ret = i915_gem_request_alloc(to, to->default_context, to_req);
+			if (ret)
+				return ret;
+		}
+
+		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
+		ret = to->semaphore.sync_to(*to_req, from, seqno);
 		if (ret)
 			return ret;
 
@@ -3123,11 +3119,14 @@
  *
  * @obj: object which may be in use on another ring.
  * @to: ring we wish to use the object on. May be NULL.
+ * @to_req: request we wish to use the object for. See below.
+ *          This will be allocated and returned if a request is
+ *          required but not passed in.
  *
  * This code is meant to abstract object synchronization with the GPU.
  * Calling with NULL implies synchronizing the object with the CPU
  * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow on engine to write
+ * between engines inside the GPU. We only allow one engine to write
  * into a buffer at any time, but multiple readers. To ensure each has
  * a coherent view of memory, we must:
  *
@@ -3138,11 +3137,22 @@
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
+ * For CPU synchronisation (NULL to) no request is required. For syncing with
+ * rings to_req must be non-NULL. However, a request does not have to be
+ * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
+ * request will be allocated automatically and returned through *to_req. Note
+ * that it is not guaranteed that commands will be emitted (because the system
+ * might already be idle). Hence there is no need to create a request that
+ * might never have any work submitted. Note further that if a request is
+ * returned in *to_req, it is the responsibility of the caller to submit
+ * that request (after potentially adding more work to it).
+ *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct intel_engine_cs *to)
+		     struct intel_engine_cs *to,
+		     struct drm_i915_gem_request **to_req)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
 	struct drm_i915_gem_request *req[I915_NUM_RINGS];
@@ -3164,7 +3174,7 @@
 				req[n++] = obj->last_read_req[i];
 	}
 	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i]);
+		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
 		if (ret)
 			return ret;
 	}
@@ -3275,9 +3285,19 @@
 	/* Flush everything onto the inactive list. */
 	for_each_ring(ring, dev_priv, i) {
 		if (!i915.enable_execlists) {
-			ret = i915_switch_context(ring, ring->default_context);
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 			if (ret)
 				return ret;
+
+			ret = i915_switch_context(req);
+			if (ret) {
+				i915_gem_request_cancel(req);
+				return ret;
+			}
+
+			i915_add_request_no_flush(req);
 		}
 
 		ret = intel_ring_idle(ring);
@@ -3289,343 +3309,6 @@
 	return 0;
 }
 
-static void i965_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fence_reg;
-	int fence_pitch_shift;
-
-	if (INTEL_INFO(dev)->gen >= 6) {
-		fence_reg = FENCE_REG_SANDYBRIDGE_0;
-		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
-	} else {
-		fence_reg = FENCE_REG_965_0;
-		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
-	}
-
-	fence_reg += reg * 8;
-
-	/* To w/a incoherency with non-atomic 64-bit register updates,
-	 * we split the 64-bit update into two 32-bit writes. In order
-	 * for a partial fence not to be evaluated between writes, we
-	 * precede the update with write to turn off the fence register,
-	 * and only enable the fence as the last step.
-	 *
-	 * For extra levels of paranoia, we make sure each step lands
-	 * before applying the next step.
-	 */
-	I915_WRITE(fence_reg, 0);
-	POSTING_READ(fence_reg);
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint64_t val;
-
-		/* Adjust fence size to match tiled area */
-		if (obj->tiling_mode != I915_TILING_NONE) {
-			uint32_t row_size = obj->stride *
-				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
-			size = (size / row_size) * row_size;
-		}
-
-		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
-				 0xfffff000) << 32;
-		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
-		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
-		val |= I965_FENCE_REG_VALID;
-
-		I915_WRITE(fence_reg + 4, val >> 32);
-		POSTING_READ(fence_reg + 4);
-
-		I915_WRITE(fence_reg + 0, val);
-		POSTING_READ(fence_reg);
-	} else {
-		I915_WRITE(fence_reg + 4, 0);
-		POSTING_READ(fence_reg + 4);
-	}
-}
-
-static void i915_write_fence_reg(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		int pitch_val;
-		int tile_width;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
-
-		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
-			tile_width = 128;
-		else
-			tile_width = 512;
-
-		/* Note: pitch better be a power of two tile widths */
-		pitch_val = obj->stride / tile_width;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I915_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	if (reg < 8)
-		reg = FENCE_REG_830_0 + reg * 4;
-	else
-		reg = FENCE_REG_945_8 + (reg - 8) * 4;
-
-	I915_WRITE(reg, val);
-	POSTING_READ(reg);
-}
-
-static void i830_write_fence_reg(struct drm_device *dev, int reg,
-				struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t val;
-
-	if (obj) {
-		u32 size = i915_gem_obj_ggtt_size(obj);
-		uint32_t pitch_val;
-
-		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
-		     (size & -size) != size ||
-		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
-		     i915_gem_obj_ggtt_offset(obj), size);
-
-		pitch_val = obj->stride / 128;
-		pitch_val = ffs(pitch_val) - 1;
-
-		val = i915_gem_obj_ggtt_offset(obj);
-		if (obj->tiling_mode == I915_TILING_Y)
-			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-		val |= I830_FENCE_SIZE_BITS(size);
-		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
-		val |= I830_FENCE_REG_VALID;
-	} else
-		val = 0;
-
-	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
-	POSTING_READ(FENCE_REG_830_0 + reg * 4);
-}
-
-inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
-{
-	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
-}
-
-static void i915_gem_write_fence(struct drm_device *dev, int reg,
-				 struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* Ensure that all CPU reads are completed before installing a fence
-	 * and all writes before removing the fence.
-	 */
-	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
-		mb();
-
-	WARN(obj && (!obj->stride || !obj->tiling_mode),
-	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
-	     obj->stride, obj->tiling_mode);
-
-	if (IS_GEN2(dev))
-		i830_write_fence_reg(dev, reg, obj);
-	else if (IS_GEN3(dev))
-		i915_write_fence_reg(dev, reg, obj);
-	else if (INTEL_INFO(dev)->gen >= 4)
-		i965_write_fence_reg(dev, reg, obj);
-
-	/* And similarly be paranoid that no direct access to this region
-	 * is reordered to before the fence is installed.
-	 */
-	if (i915_gem_object_needs_mb(obj))
-		mb();
-}
-
-static inline int fence_number(struct drm_i915_private *dev_priv,
-			       struct drm_i915_fence_reg *fence)
-{
-	return fence - dev_priv->fence_regs;
-}
-
-static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
-					 struct drm_i915_fence_reg *fence,
-					 bool enable)
-{
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	int reg = fence_number(dev_priv, fence);
-
-	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
-
-	if (enable) {
-		obj->fence_reg = reg;
-		fence->obj = obj;
-		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
-	} else {
-		obj->fence_reg = I915_FENCE_REG_NONE;
-		fence->obj = NULL;
-		list_del_init(&fence->lru_list);
-	}
-	obj->fence_dirty = false;
-}
-
-static int
-i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->last_fenced_req) {
-		int ret = i915_wait_request(obj->last_fenced_req);
-		if (ret)
-			return ret;
-
-		i915_gem_request_assign(&obj->last_fenced_req, NULL);
-	}
-
-	return 0;
-}
-
-int
-i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	struct drm_i915_fence_reg *fence;
-	int ret;
-
-	ret = i915_gem_object_wait_fence(obj);
-	if (ret)
-		return ret;
-
-	if (obj->fence_reg == I915_FENCE_REG_NONE)
-		return 0;
-
-	fence = &dev_priv->fence_regs[obj->fence_reg];
-
-	if (WARN_ON(fence->pin_count))
-		return -EBUSY;
-
-	i915_gem_object_fence_lost(obj);
-	i915_gem_object_update_fence(obj, fence, false);
-
-	return 0;
-}
-
-static struct drm_i915_fence_reg *
-i915_find_fence_reg(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_fence_reg *reg, *avail;
-	int i;
-
-	/* First try to find a free reg */
-	avail = NULL;
-	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
-		reg = &dev_priv->fence_regs[i];
-		if (!reg->obj)
-			return reg;
-
-		if (!reg->pin_count)
-			avail = reg;
-	}
-
-	if (avail == NULL)
-		goto deadlock;
-
-	/* None available, try to steal one or wait for a user to finish */
-	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
-		if (reg->pin_count)
-			continue;
-
-		return reg;
-	}
-
-deadlock:
-	/* Wait for completion of pending flips which consume fences */
-	if (intel_has_pending_fb_unpin(dev))
-		return ERR_PTR(-EAGAIN);
-
-	return ERR_PTR(-EDEADLK);
-}
-
-/**
- * i915_gem_object_get_fence - set up fencing for an object
- * @obj: object to map through a fence reg
- *
- * When mapping objects through the GTT, userspace wants to be able to write
- * to them without having to worry about swizzling if the object is tiled.
- * This function walks the fence regs looking for a free one for @obj,
- * stealing one if it can't find any.
- *
- * It then sets up the reg based on the object's properties: address, pitch
- * and tiling format.
- *
- * For an untiled surface, this removes any existing fence.
- */
-int
-i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	bool enable = obj->tiling_mode != I915_TILING_NONE;
-	struct drm_i915_fence_reg *reg;
-	int ret;
-
-	/* Have we updated the tiling parameters upon the object and so
-	 * will need to serialise the write to the associated fence register?
-	 */
-	if (obj->fence_dirty) {
-		ret = i915_gem_object_wait_fence(obj);
-		if (ret)
-			return ret;
-	}
-
-	/* Just update our place in the LRU if our fence is getting reused. */
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		reg = &dev_priv->fence_regs[obj->fence_reg];
-		if (!obj->fence_dirty) {
-			list_move_tail(&reg->lru_list,
-				       &dev_priv->mm.fence_list);
-			return 0;
-		}
-	} else if (enable) {
-		if (WARN_ON(!obj->map_and_fenceable))
-			return -EINVAL;
-
-		reg = i915_find_fence_reg(dev);
-		if (IS_ERR(reg))
-			return PTR_ERR(reg);
-
-		if (reg->obj) {
-			struct drm_i915_gem_object *old = reg->obj;
-
-			ret = i915_gem_object_wait_fence(old);
-			if (ret)
-				return ret;
-
-			i915_gem_object_fence_lost(old);
-		}
-	} else
-		return 0;
-
-	i915_gem_object_update_fence(obj, reg, enable);
-
-	return 0;
-}
-
 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 				     unsigned long cache_level)
 {
@@ -3673,9 +3356,9 @@
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
-	unsigned long start =
+	u64 start =
 		flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-	unsigned long end =
+	u64 end =
 		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
 	struct i915_vma *vma;
 	int ret;
@@ -3731,7 +3414,7 @@
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
 			  ggtt_view ? ggtt_view->type : 0,
 			  size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3853,7 +3536,7 @@
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
@@ -3875,7 +3558,7 @@
 	old_write_domain = obj->base.write_domain;
 	obj->base.write_domain = 0;
 
-	intel_fb_obj_flush(obj, false);
+	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
 
 	trace_i915_gem_object_change_domain(obj,
 					    obj->base.read_domains,
@@ -3937,9 +3620,6 @@
 		obj->dirty = 1;
 	}
 
-	if (write)
-		intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
-
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4094,12 +3774,13 @@
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 				     u32 alignment,
 				     struct intel_engine_cs *pipelined,
+				     struct drm_i915_gem_request **pipelined_request,
 				     const struct i915_ggtt_view *view)
 {
 	u32 old_read_domains, old_write_domain;
 	int ret;
 
-	ret = i915_gem_object_sync(obj, pipelined);
+	ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
 	if (ret)
 		return ret;
 
@@ -4210,9 +3891,6 @@
 		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
-	if (write)
-		intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
-
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
 					    old_write_domain);
@@ -4253,6 +3931,13 @@
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
+		/*
+		 * Note that the request might not have been submitted yet.
+		 * In which case emitted_jiffies will be zero.
+		 */
+		if (!request->emitted_jiffies)
+			continue;
+
 		target = request;
 	}
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
@@ -4423,32 +4108,6 @@
 	--vma->pin_count;
 }
 
-bool
-i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
-
-		WARN_ON(!ggtt_vma ||
-			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
-		dev_priv->fence_regs[obj->fence_reg].pin_count++;
-		return true;
-	} else
-		return false;
-}
-
-void
-i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
-{
-	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
-		dev_priv->fence_regs[obj->fence_reg].pin_count--;
-	}
-}
-
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		    struct drm_file *file)
@@ -4810,8 +4469,9 @@
 	return ret;
 }
 
-int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
+int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
@@ -4821,7 +4481,7 @@
 	if (!HAS_L3_DPF(dev) || !remap_info)
 		return 0;
 
-	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
 	if (ret)
 		return ret;
 
@@ -4967,7 +4627,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring;
-	int ret, i;
+	int ret, i, j;
 
 	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
 		return -EIO;
@@ -5004,27 +4664,55 @@
 	 */
 	init_unused_rings(dev);
 
+	BUG_ON(!dev_priv->ring[RCS].default_context);
+
+	ret = i915_ppgtt_init_hw(dev);
+	if (ret) {
+		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
+		goto out;
+	}
+
+	/* Need to do basic initialisation of all rings first: */
 	for_each_ring(ring, dev_priv, i) {
 		ret = ring->init_hw(ring);
 		if (ret)
 			goto out;
 	}
 
-	for (i = 0; i < NUM_L3_SLICES(dev); i++)
-		i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+	/* Now it is safe to go back round and do everything else: */
+	for_each_ring(ring, dev_priv, i) {
+		struct drm_i915_gem_request *req;
 
-	ret = i915_ppgtt_init_hw(dev);
-	if (ret && ret != -EIO) {
-		DRM_ERROR("PPGTT enable failed %d\n", ret);
-		i915_gem_cleanup_ringbuffer(dev);
-	}
+		WARN_ON(!ring->default_context);
 
-	ret = i915_gem_context_enable(dev_priv);
-	if (ret && ret != -EIO) {
-		DRM_ERROR("Context enable failed %d\n", ret);
-		i915_gem_cleanup_ringbuffer(dev);
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
+		if (ret) {
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
 
-		goto out;
+		if (ring->id == RCS) {
+			for (j = 0; j < NUM_L3_SLICES(dev); j++)
+				i915_gem_l3_remap(req, j);
+		}
+
+		ret = i915_ppgtt_init_ring(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		ret = i915_gem_context_enable(req);
+		if (ret && ret != -EIO) {
+			DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
+			i915_gem_request_cancel(req);
+			i915_gem_cleanup_ringbuffer(dev);
+			goto out;
+		}
+
+		i915_add_request_no_flush(req);
 	}
 
 out:
@@ -5091,7 +4779,7 @@
 		 * for all other failure, such as an allocation failure, bail.
 		 */
 		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
 		ret = 0;
 	}
 
@@ -5111,6 +4799,14 @@
 
 	for_each_ring(ring, dev_priv, i)
 		dev_priv->gt.cleanup_ring(ring);
+
+    if (i915.enable_execlists)
+            /*
+             * Neither the BIOS, ourselves or any other kernel
+             * expects the system to be in execlists mode on startup,
+             * so we need to reset the GPU back to legacy mode.
+             */
+            intel_gpu_reset(dev);
 }
 
 static void
@@ -5388,3 +5084,42 @@
 	return false;
 }
 
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_device *dev,
+			         const void *data, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	struct sg_table *sg;
+	size_t bytes;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
+	if (IS_ERR_OR_NULL(obj))
+		return obj;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret)
+		goto fail;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto fail;
+
+	i915_gem_object_pin_pages(obj);
+	sg = obj->pages;
+	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
+	i915_gem_object_unpin_pages(obj);
+
+	if (WARN_ON(bytes != size)) {
+		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	return obj;
+
+fail:
+	drm_gem_object_unreference(&obj->base);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 48afa77..8e893b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -287,6 +287,7 @@
 	if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
 		i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
 err_destroy:
+	idr_remove(&file_priv->context_idr, ctx->user_handle);
 	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
 }
@@ -407,32 +408,23 @@
 	i915_gem_context_unreference(dctx);
 }
 
-int i915_gem_context_enable(struct drm_i915_private *dev_priv)
+int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring;
-	int ret, i;
-
-	BUG_ON(!dev_priv->ring[RCS].default_context);
+	struct intel_engine_cs *ring = req->ring;
+	int ret;
 
 	if (i915.enable_execlists) {
-		for_each_ring(ring, dev_priv, i) {
-			if (ring->init_context) {
-				ret = ring->init_context(ring,
-						ring->default_context);
-				if (ret) {
-					DRM_ERROR("ring init context: %d\n",
-							ret);
-					return ret;
-				}
-			}
-		}
+		if (ring->init_context == NULL)
+			return 0;
 
+		ret = ring->init_context(req);
 	} else
-		for_each_ring(ring, dev_priv, i) {
-			ret = i915_switch_context(ring, ring->default_context);
-			if (ret)
-				return ret;
-		}
+		ret = i915_switch_context(req);
+
+	if (ret) {
+		DRM_ERROR("ring init context: %d\n", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -485,10 +477,9 @@
 }
 
 static inline int
-mi_set_context(struct intel_engine_cs *ring,
-	       struct intel_context *new_context,
-	       u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -503,13 +494,15 @@
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(ring->dev)) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
+	else if (INTEL_INFO(ring->dev)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
@@ -517,7 +510,7 @@
 	if (INTEL_INFO(ring->dev)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
@@ -540,7 +533,7 @@
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
+	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -621,9 +614,10 @@
 	return false;
 }
 
-static int do_switch(struct intel_engine_cs *ring,
-		     struct intel_context *to)
+static int do_switch(struct drm_i915_gem_request *req)
 {
+	struct intel_context *to = req->ctx;
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct intel_context *from = ring->last_context;
 	u32 hw_flags = 0;
@@ -659,7 +653,7 @@
 		 * Register Immediate commands in Ring Buffer before submitting
 		 * a context."*/
 		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		if (ret)
 			goto unpin_out;
 
@@ -701,7 +695,7 @@
 	WARN_ON(needs_pd_load_pre(ring, to) &&
 		needs_pd_load_post(ring, to, hw_flags));
 
-	ret = mi_set_context(ring, to, hw_flags);
+	ret = mi_set_context(req, hw_flags);
 	if (ret)
 		goto unpin_out;
 
@@ -710,7 +704,7 @@
 	 */
 	if (needs_pd_load_post(ring, to, hw_flags)) {
 		trace_switch_mm(ring, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+		ret = to->ppgtt->switch_mm(to->ppgtt, req);
 		/* The hardware context switch is emitted, but we haven't
 		 * actually changed the state - so it's probably safe to bail
 		 * here. Still, let the user know something dangerous has
@@ -726,7 +720,7 @@
 		if (!(to->remap_slice & (1<<i)))
 			continue;
 
-		ret = i915_gem_l3_remap(ring, i);
+		ret = i915_gem_l3_remap(req, i);
 		/* If it failed, try again next round */
 		if (ret)
 			DRM_DEBUG_DRIVER("L3 remapping failed\n");
@@ -742,7 +736,7 @@
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -766,7 +760,7 @@
 
 	if (uninitialized) {
 		if (ring->init_context) {
-			ret = ring->init_context(ring, to);
+			ret = ring->init_context(req);
 			if (ret)
 				DRM_ERROR("ring init context: %d\n", ret);
 		}
@@ -782,8 +776,7 @@
 
 /**
  * i915_switch_context() - perform a GPU context switch.
- * @ring: ring for which we'll execute the context switch
- * @to: the context to switch to
+ * @req: request for which we'll execute the context switch
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
@@ -794,25 +787,25 @@
  * switched by writing to the ELSP and requests keep a reference to their
  * context.
  */
-int i915_switch_context(struct intel_engine_cs *ring,
-			struct intel_context *to)
+int i915_switch_context(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 	WARN_ON(i915.enable_execlists);
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
-		if (to != ring->last_context) {
-			i915_gem_context_reference(to);
+	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
+		if (req->ctx != ring->last_context) {
+			i915_gem_context_reference(req->ctx);
 			if (ring->last_context)
 				i915_gem_context_unreference(ring->last_context);
-			ring->last_context = to;
+			ring->last_context = req->ctx;
 		}
 		return 0;
 	}
 
-	return do_switch(ring, to);
+	return do_switch(req);
 }
 
 static bool contexts_enabled(struct drm_device *dev)
@@ -898,6 +891,9 @@
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 		args->value = ctx->hang_stats.ban_period_seconds;
 		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
+		break;
 	default:
 		ret = -EINVAL;
 		break;
@@ -935,6 +931,14 @@
 		else
 			ctx->hang_stats.ban_period_seconds = args->value;
 		break;
+	case I915_CONTEXT_PARAM_NO_ZEROMAP:
+		if (args->size) {
+			ret = -EINVAL;
+		} else {
+			ctx->flags &= ~CONTEXT_NO_ZEROMAP;
+			ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
+		}
+		break;
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a7fa145..923a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -677,6 +677,7 @@
 static int
 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
 			    struct list_head *vmas,
+			    struct intel_context *ctx,
 			    bool *need_relocs)
 {
 	struct drm_i915_gem_object *obj;
@@ -699,6 +700,9 @@
 		obj = vma->obj;
 		entry = vma->exec_entry;
 
+		if (ctx->flags & CONTEXT_NO_ZEROMAP)
+			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
 		if (!has_fenced_gpu_access)
 			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
 		need_fence =
@@ -776,7 +780,8 @@
 				  struct drm_file *file,
 				  struct intel_engine_cs *ring,
 				  struct eb_vmas *eb,
-				  struct drm_i915_gem_exec_object2 *exec)
+				  struct drm_i915_gem_exec_object2 *exec,
+				  struct intel_context *ctx)
 {
 	struct drm_i915_gem_relocation_entry *reloc;
 	struct i915_address_space *vm;
@@ -862,7 +867,7 @@
 		goto err;
 
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -887,10 +892,10 @@
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_ring_flag(ring);
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -900,7 +905,7 @@
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, ring);
+			ret = i915_gem_object_sync(obj, req->ring, &req);
 			if (ret)
 				return ret;
 		}
@@ -912,7 +917,7 @@
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(ring->dev);
+		i915_gem_chipset_flush(req->ring->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -920,7 +925,7 @@
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(ring);
+	return intel_ring_invalidate_all_caches(req);
 }
 
 static bool
@@ -953,6 +958,9 @@
 		if (exec[i].flags & invalid_flags)
 			return -EINVAL;
 
+		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
+			return -EINVAL;
+
 		/* First check for malicious input causing overflow in
 		 * the worst case where we need to allocate the entire
 		 * relocation tree as a single array.
@@ -1013,9 +1021,9 @@
 
 void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-				   struct intel_engine_cs *ring)
+				   struct drm_i915_gem_request *req)
 {
-	struct drm_i915_gem_request *req = intel_ring_get_request(ring);
+	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1029,12 +1037,12 @@
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, ring);
+		i915_vma_move_to_active(vma, req);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			i915_gem_request_assign(&obj->last_write_req, req);
 
-			intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+			intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
 			/* update for the implicit flush after a batch */
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1053,22 +1061,20 @@
 }
 
 void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-				    struct drm_file *file,
-				    struct intel_engine_cs *ring,
-				    struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
 	/* Unconditionally force add_request to emit a full flush. */
-	ring->gpu_caches_dirty = true;
+	params->ring->gpu_caches_dirty = true;
 
 	/* Add a breadcrumb for the completion of the batch buffer */
-	(void)__i915_add_request(ring, file, obj);
+	__i915_add_request(params->request, params->batch_obj, true);
 }
 
 static int
 i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct intel_engine_cs *ring)
+			    struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret, i;
 
@@ -1077,7 +1083,7 @@
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(ring, 4 * 3);
+	ret = intel_ring_begin(req, 4 * 3);
 	if (ret)
 		return ret;
 
@@ -1093,10 +1099,11 @@
 }
 
 static int
-i915_emit_box(struct intel_engine_cs *ring,
+i915_emit_box(struct drm_i915_gem_request *req,
 	      struct drm_clip_rect *box,
 	      int DR1, int DR4)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
@@ -1107,7 +1114,7 @@
 	}
 
 	if (INTEL_INFO(ring->dev)->gen >= 4) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(req, 4);
 		if (ret)
 			return ret;
 
@@ -1116,7 +1123,7 @@
 		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
 		intel_ring_emit(ring, DR4);
 	} else {
-		ret = intel_ring_begin(ring, 6);
+		ret = intel_ring_begin(req, 6);
 		if (ret)
 			return ret;
 
@@ -1186,17 +1193,15 @@
 }
 
 int
-i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags)
+			       struct list_head *vmas)
 {
 	struct drm_clip_rect *cliprects = NULL;
+	struct drm_device *dev = params->dev;
+	struct intel_engine_cs *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 exec_len;
+	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
 	int i, ret = 0;
@@ -1244,15 +1249,15 @@
 		}
 	}
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
 	if (ret)
 		goto error;
 
-	ret = i915_switch_context(ring, ctx);
+	ret = i915_switch_context(params->request);
 	if (ret)
 		goto error;
 
-	WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
 	     "%s didn't clear reload\n", ring->name);
 
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1294,7 +1299,7 @@
 
 	if (ring == &dev_priv->ring[RCS] &&
 			instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			goto error;
 
@@ -1308,37 +1313,40 @@
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, ring);
+		ret = i915_reset_gen7_sol_offsets(dev, params->request);
 		if (ret)
 			goto error;
 	}
 
-	exec_len = args->batch_len;
+	exec_len   = args->batch_len;
+	exec_start = params->batch_obj_vm_offset +
+		     params->args_batch_start_offset;
+
 	if (cliprects) {
 		for (i = 0; i < args->num_cliprects; i++) {
-			ret = i915_emit_box(ring, &cliprects[i],
+			ret = i915_emit_box(params->request, &cliprects[i],
 					    args->DR1, args->DR4);
 			if (ret)
 				goto error;
 
-			ret = ring->dispatch_execbuffer(ring,
+			ret = ring->dispatch_execbuffer(params->request,
 							exec_start, exec_len,
-							dispatch_flags);
+							params->dispatch_flags);
 			if (ret)
 				goto error;
 		}
 	} else {
-		ret = ring->dispatch_execbuffer(ring,
+		ret = ring->dispatch_execbuffer(params->request,
 						exec_start, exec_len,
-						dispatch_flags);
+						params->dispatch_flags);
 		if (ret)
 			return ret;
 	}
 
-	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
 
 error:
 	kfree(cliprects);
@@ -1408,8 +1416,9 @@
 	struct intel_engine_cs *ring;
 	struct intel_context *ctx;
 	struct i915_address_space *vm;
+	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
+	struct i915_execbuffer_params *params = &params_master;
 	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
-	u64 exec_start = args->batch_start_offset;
 	u32 dispatch_flags;
 	int ret;
 	bool need_relocs;
@@ -1482,6 +1491,20 @@
 		return -EINVAL;
 	}
 
+	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
+		if (!HAS_RESOURCE_STREAMER(dev)) {
+			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
+			return -EINVAL;
+		}
+		if (ring->id != RCS) {
+			DRM_DEBUG("RS is not available on %s\n",
+				 ring->name);
+			return -EINVAL;
+		}
+
+		dispatch_flags |= I915_DISPATCH_RS;
+	}
+
 	intel_runtime_pm_get(dev_priv);
 
 	ret = i915_mutex_lock_interruptible(dev);
@@ -1502,6 +1525,8 @@
 	else
 		vm = &dev_priv->gtt.base;
 
+	memset(&params_master, 0x00, sizeof(params_master));
+
 	eb = eb_create(args);
 	if (eb == NULL) {
 		i915_gem_context_unreference(ctx);
@@ -1520,7 +1545,7 @@
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -1530,7 +1555,7 @@
 	if (ret) {
 		if (ret == -EFAULT) {
 			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-								eb, exec);
+								eb, exec, ctx);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		if (ret)
@@ -1544,6 +1569,7 @@
 		goto err;
 	}
 
+	params->args_batch_start_offset = args->batch_start_offset;
 	if (i915_needs_cmd_parser(ring) && args->batch_len) {
 		struct drm_i915_gem_object *parsed_batch_obj;
 
@@ -1575,7 +1601,7 @@
 			 * command parser has accepted.
 			 */
 			dispatch_flags |= I915_DISPATCH_SECURE;
-			exec_start = 0;
+			params->args_batch_start_offset = 0;
 			batch_obj = parsed_batch_obj;
 		}
 	}
@@ -1600,14 +1626,35 @@
 		if (ret)
 			goto err;
 
-		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
+		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
 	} else
-		exec_start += i915_gem_obj_offset(batch_obj, vm);
+		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
-	ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
-					  &eb->vmas, batch_obj, exec_start,
-					  dispatch_flags);
+	/* Allocate a request for this batch buffer nice and early. */
+	ret = i915_gem_request_alloc(ring, ctx, &params->request);
+	if (ret)
+		goto err_batch_unpin;
 
+	ret = i915_gem_request_add_to_client(params->request, file);
+	if (ret)
+		goto err_batch_unpin;
+
+	/*
+	 * Save assorted stuff away to pass through to *_submission().
+	 * NB: This data should be 'persistent' and not local as it will
+	 * kept around beyond the duration of the IOCTL once the GPU
+	 * scheduler arrives.
+	 */
+	params->dev                     = dev;
+	params->file                    = file;
+	params->ring                    = ring;
+	params->dispatch_flags          = dispatch_flags;
+	params->batch_obj               = batch_obj;
+	params->ctx                     = ctx;
+
+	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+
+err_batch_unpin:
 	/*
 	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
 	 * batch vma for correctness. For less ugly and less fragility this
@@ -1616,11 +1663,20 @@
 	 */
 	if (dispatch_flags & I915_DISPATCH_SECURE)
 		i915_gem_object_ggtt_unpin(batch_obj);
+
 err:
 	/* the request owns the ref now */
 	i915_gem_context_unreference(ctx);
 	eb_destroy(eb);
 
+	/*
+	 * If the request was created but not successfully submitted then it
+	 * must be freed again. If it was submitted then it is being tracked
+	 * on the active request list and no clean up is required here.
+	 */
+	if (ret && params->request)
+		i915_gem_request_cancel(params->request);
+
 	mutex_unlock(&dev->struct_mutex);
 
 pre_mutex_err:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
new file mode 100644
index 0000000..af1f8c4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/**
+ * DOC: fence register handling
+ *
+ * Important to avoid confusions: "fences" in the i915 driver are not execution
+ * fences used to track command completion but hardware detiler objects which
+ * wrap a given range of the global GTT. Each platform has only a fairly limited
+ * set of these objects.
+ *
+ * Fences are used to detile GTT memory mappings. They're also connected to the
+ * hardware frontbuffer render tracking and hence interract with frontbuffer
+ * conmpression. Furthermore on older platforms fences are required for tiled
+ * objects used by the display engine. They can also be used by the render
+ * engine - they're required for blitter commands and are optional for render
+ * commands. But on gen4+ both display (with the exception of fbc) and rendering
+ * have their own tiling state bits and don't need fences.
+ *
+ * Also note that fences only support X and Y tiling and hence can't be used for
+ * the fancier new tiling formats like W, Ys and Yf.
+ *
+ * Finally note that because fences are such a restricted resource they're
+ * dynamically associated with objects. Furthermore fence state is committed to
+ * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
+ * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * for cpu access. Also note that some code wants an unfenced view, for those
+ * cases the fence can be removed forcefully with i915_gem_object_put_fence().
+ *
+ * Internally these functions will synchronize with userspace access by removing
+ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
+ */
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int fence_reg;
+	int fence_pitch_shift;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		fence_reg = FENCE_REG_SANDYBRIDGE_0;
+		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+	} else {
+		fence_reg = FENCE_REG_965_0;
+		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+	}
+
+	fence_reg += reg * 8;
+
+	/* To w/a incoherency with non-atomic 64-bit register updates,
+	 * we split the 64-bit update into two 32-bit writes. In order
+	 * for a partial fence not to be evaluated between writes, we
+	 * precede the update with write to turn off the fence register,
+	 * and only enable the fence as the last step.
+	 *
+	 * For extra levels of paranoia, we make sure each step lands
+	 * before applying the next step.
+	 */
+	I915_WRITE(fence_reg, 0);
+	POSTING_READ(fence_reg);
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint64_t val;
+
+		/* Adjust fence size to match tiled area */
+		if (obj->tiling_mode != I915_TILING_NONE) {
+			uint32_t row_size = obj->stride *
+				(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+			size = (size / row_size) * row_size;
+		}
+
+		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
+				 0xfffff000) << 32;
+		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
+		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
+
+		I915_WRITE(fence_reg + 4, val >> 32);
+		POSTING_READ(fence_reg + 4);
+
+		I915_WRITE(fence_reg + 0, val);
+		POSTING_READ(fence_reg);
+	} else {
+		I915_WRITE(fence_reg + 4, 0);
+		POSTING_READ(fence_reg + 4);
+	}
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		int pitch_val;
+		int tile_width;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
+
+		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+			tile_width = 128;
+		else
+			tile_width = 512;
+
+		/* Note: pitch better be a power of two tile widths */
+		pitch_val = obj->stride / tile_width;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I915_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	if (reg < 8)
+		reg = FENCE_REG_830_0 + reg * 4;
+	else
+		reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+				struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	if (obj) {
+		u32 size = i915_gem_obj_ggtt_size(obj);
+		uint32_t pitch_val;
+
+		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+		     i915_gem_obj_ggtt_offset(obj), size);
+
+		pitch_val = obj->stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = i915_gem_obj_ggtt_offset(obj);
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I830_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+	POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+		mb();
+
+	WARN(obj && (!obj->stride || !obj->tiling_mode),
+	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+	     obj->stride, obj->tiling_mode);
+
+	if (IS_GEN2(dev))
+		i830_write_fence_reg(dev, reg, obj);
+	else if (IS_GEN3(dev))
+		i915_write_fence_reg(dev, reg, obj);
+	else if (INTEL_INFO(dev)->gen >= 4)
+		i965_write_fence_reg(dev, reg, obj);
+
+	/* And similarly be paranoid that no direct access to this region
+	 * is reordered to before the fence is installed.
+	 */
+	if (i915_gem_object_needs_mb(obj))
+		mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+			       struct drm_i915_fence_reg *fence)
+{
+	return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int reg = fence_number(dev_priv, fence);
+
+	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
+
+	if (enable) {
+		obj->fence_reg = reg;
+		fence->obj = obj;
+		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+	} else {
+		obj->fence_reg = I915_FENCE_REG_NONE;
+		fence->obj = NULL;
+		list_del_init(&fence->lru_list);
+	}
+	obj->fence_dirty = false;
+}
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	/* As we do not have an associated fence register, we will force
+	 * a tiling change if we ever need to acquire one.
+	 */
+	obj->fence_dirty = false;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->last_fenced_req) {
+		int ret = i915_wait_request(obj->last_fenced_req);
+		if (ret)
+			return ret;
+
+		i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_put_fence - force-remove fence for an object
+ * @obj: object to map through a fence reg
+ *
+ * This function force-removes any fence from the given object, which is useful
+ * if the kernel wants to do untiled GTT access.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_fence_reg *fence;
+	int ret;
+
+	ret = i915_gem_object_wait_fence(obj);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg == I915_FENCE_REG_NONE)
+		return 0;
+
+	fence = &dev_priv->fence_regs[obj->fence_reg];
+
+	if (WARN_ON(fence->pin_count))
+		return -EBUSY;
+
+	i915_gem_object_fence_lost(obj);
+	i915_gem_object_update_fence(obj, fence, false);
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_fence_reg *reg, *avail;
+	int i;
+
+	/* First try to find a free reg */
+	avail = NULL;
+	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+		reg = &dev_priv->fence_regs[i];
+		if (!reg->obj)
+			return reg;
+
+		if (!reg->pin_count)
+			avail = reg;
+	}
+
+	if (avail == NULL)
+		goto deadlock;
+
+	/* None available, try to steal one or wait for a user to finish */
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->pin_count)
+			continue;
+
+		return reg;
+	}
+
+deadlock:
+	/* Wait for completion of pending flips which consume fences */
+	if (intel_has_pending_fb_unpin(dev))
+		return ERR_PTR(-EAGAIN);
+
+	return ERR_PTR(-EDEADLK);
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = obj->tiling_mode != I915_TILING_NONE;
+	struct drm_i915_fence_reg *reg;
+	int ret;
+
+	/* Have we updated the tiling parameters upon the object and so
+	 * will need to serialise the write to the associated fence register?
+	 */
+	if (obj->fence_dirty) {
+		ret = i915_gem_object_wait_fence(obj);
+		if (ret)
+			return ret;
+	}
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		if (!obj->fence_dirty) {
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+			return 0;
+		}
+	} else if (enable) {
+		if (WARN_ON(!obj->map_and_fenceable))
+			return -EINVAL;
+
+		reg = i915_find_fence_reg(dev);
+		if (IS_ERR(reg))
+			return PTR_ERR(reg);
+
+		if (reg->obj) {
+			struct drm_i915_gem_object *old = reg->obj;
+
+			ret = i915_gem_object_wait_fence(old);
+			if (ret)
+				return ret;
+
+			i915_gem_object_fence_lost(old);
+		}
+	} else
+		return 0;
+
+	i915_gem_object_update_fence(obj, reg, enable);
+
+	return 0;
+}
+
+/**
+ * i915_gem_object_pin_fence - pin fencing state
+ * @obj: object to pin fencing for
+ *
+ * This pins the fencing state (whether tiled or untiled) to make sure the
+ * object is ready to be used as a scanout target. Fencing status must be
+ * synchronize first by calling i915_gem_object_get_fence():
+ *
+ * The resulting fence pin reference must be released again with
+ * i915_gem_object_unpin_fence().
+ *
+ * Returns:
+ *
+ * True if the object has a fence, false otherwise.
+ */
+bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
+
+		WARN_ON(!ggtt_vma ||
+			dev_priv->fence_regs[obj->fence_reg].pin_count >
+			ggtt_vma->pin_count);
+		dev_priv->fence_regs[obj->fence_reg].pin_count++;
+		return true;
+	} else
+		return false;
+}
+
+/**
+ * i915_gem_object_unpin_fence - unpin fencing state
+ * @obj: object to unpin fencing for
+ *
+ * This releases the fence pin reference acquired through
+ * i915_gem_object_pin_fence. It will handle both objects with and without an
+ * attached fence correctly, callers do not need to distinguish this.
+ */
+void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
+		dev_priv->fence_regs[obj->fence_reg].pin_count--;
+	}
+}
+
+/**
+ * i915_gem_restore_fences - restore fence state
+ * @dev: DRM device
+ *
+ * Restore the hw fence state to match the software tracking again, to be called
+ * after a gpu reset and on resume.
+ */
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+		/*
+		 * Commit delayed tiling changes if we have an object still
+		 * attached to the fence, otherwise just clear the fence.
+		 */
+		if (reg->obj) {
+			i915_gem_object_update_fence(reg->obj, reg,
+						     reg->obj->tiling_mode);
+		} else {
+			i915_gem_write_fence(dev, i, NULL);
+		}
+	}
+}
+
+/**
+ * DOC: tiling swizzling details
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @dev: DRM device
+ *
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
+		/*
+		 * On BDW+, swizzling is not used. We leave the CPU memory
+		 * controller in charge of optimizing memory accesses without
+		 * the extra address manipulation GPU side.
+		 *
+		 * VLV and CHV don't have GPU swizzling.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		if (dev_priv->preserve_bios_swizzle) {
+			if (I915_READ(DISP_ARB_CTL) &
+			    DISP_TILE_SURFACE_SWIZZLING) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		} else {
+			uint32_t dimm_c0, dimm_c1;
+			dimm_c0 = I915_READ(MAD_DIMM_C0);
+			dimm_c1 = I915_READ(MAD_DIMM_C1);
+			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+			/* Enable swizzling when the channels are populated
+			 * with identically sized dimms. We don't need to check
+			 * the 3rd channel because no cpu with gpu attached
+			 * ships in that configuration. Also, swizzling only
+			 * makes sense for 2 channels anyway. */
+			if (dimm_c0 == dimm_c1) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else {
+				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			}
+		}
+	} else if (IS_GEN5(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
+		 * same swizzling setup.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+		swizzle_y = I915_BIT_6_SWIZZLE_9;
+	} else if (IS_GEN2(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+		uint32_t dcc;
+
+		/* On 9xx chipsets, channel interleave by the CPU is
+		 * determined by DCC.  For single-channel, neither the CPU
+		 * nor the GPU do swizzling.  For dual channel interleaved,
+		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+		 * 9 for Y tiled.  The CPU's interleave is independent, and
+		 * can be based on either bit 11 (haven't seen this yet) or
+		 * bit 17 (common).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+				/* This is the base swizzling by the GPU for
+				 * tiled buffers.
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+				/* Bit 11 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+			}
+			break;
+		}
+
+		/* check for L-shaped memory aka modified enhanced addressing */
+		if (IS_GEN4(dev)) {
+			uint32_t ddc2 = I915_READ(DCC2);
+
+			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
+				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+		}
+
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 */
+		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/*
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+	char temp[64];
+	char *vaddr;
+	int i;
+
+	vaddr = kmap(page);
+
+	for (i = 0; i < PAGE_SIZE; i += 128) {
+		memcpy(temp, &vaddr[i], 64);
+		memcpy(&vaddr[i], &vaddr[i + 64], 64);
+		memcpy(&vaddr[i + 64], temp, 64);
+	}
+
+	kunmap(page);
+}
+
+/**
+ * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function fixes up the swizzling in case any page frame number for this
+ * object has changed in bit 17 since that state has been saved with
+ * i915_gem_object_save_bit_17_swizzle().
+ *
+ * This is called when pinning backing storage again, since the kernel is free
+ * to move unpinned backing storage around (either by directly moving pages or
+ * by swapping them out and back in again).
+ */
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int i;
+
+	if (obj->bit_17 == NULL)
+		return;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		char new_bit_17 = page_to_phys(page) >> 17;
+		if ((new_bit_17 & 0x1) !=
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
+		}
+		i++;
+	}
+}
+
+/**
+ * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
+ * @obj: i915 GEM buffer object
+ *
+ * This function saves the bit 17 of each page frame number so that swizzling
+ * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
+ * be called before the backing storage can be unpinned.
+ */
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int page_count = obj->base.size >> PAGE_SHIFT;
+	int i;
+
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+				      sizeof(long), GFP_KERNEL);
+		if (obj->bit_17 == NULL) {
+			DRM_ERROR("Failed to allocate memory for bit 17 "
+				  "record\n");
+			return;
+		}
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+			__set_bit(i, obj->bit_17);
+		else
+			__clear_bit(i, obj->bit_17);
+		i++;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 31e8269..96054a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -192,9 +192,8 @@
 	return pte;
 }
 
-static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
-				  dma_addr_t addr,
-				  enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
+				  const enum i915_cache_level level)
 {
 	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
 	pde |= addr;
@@ -301,75 +300,120 @@
 	return pte;
 }
 
-#define i915_dma_unmap_single(px, dev) \
-	__i915_dma_unmap_single((px)->daddr, dev)
-
-static void __i915_dma_unmap_single(dma_addr_t daddr,
-				    struct drm_device *dev)
+static int __setup_page_dma(struct drm_device *dev,
+			    struct i915_page_dma *p, gfp_t flags)
 {
 	struct device *device = &dev->pdev->dev;
 
-	dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
-}
-
-/**
- * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
- * @px:	Page table/dir/etc to get a DMA map for
- * @dev:	drm device
- *
- * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping. If we keep the structs
- * symmetric here, the simple macro covers us for every page table type.
- *
- * Return: 0 if success.
- */
-#define i915_dma_map_single(px, dev) \
-	i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
-
-static int i915_dma_map_page_single(struct page *page,
-				    struct drm_device *dev,
-				    dma_addr_t *daddr)
-{
-	struct device *device = &dev->pdev->dev;
-
-	*daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(device, *daddr))
+	p->page = alloc_page(flags);
+	if (!p->page)
 		return -ENOMEM;
 
+	p->daddr = dma_map_page(device,
+				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(device, p->daddr)) {
+		__free_page(p->page);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
-static void unmap_and_free_pt(struct i915_page_table *pt,
-			       struct drm_device *dev)
+static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
 {
-	if (WARN_ON(!pt->page))
+	return __setup_page_dma(dev, p, GFP_KERNEL);
+}
+
+static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
+{
+	if (WARN_ON(!p->page))
 		return;
 
-	i915_dma_unmap_single(pt, dev);
-	__free_page(pt->page);
-	kfree(pt->used_ptes);
-	kfree(pt);
+	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+	__free_page(p->page);
+	memset(p, 0, sizeof(*p));
 }
 
-static void gen8_initialize_pt(struct i915_address_space *vm,
-			       struct i915_page_table *pt)
+static void *kmap_page_dma(struct i915_page_dma *p)
 {
-	gen8_pte_t *pt_vaddr, scratch_pte;
-	int i;
-
-	pt_vaddr = kmap_atomic(pt->page);
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
-				      I915_CACHE_LLC, true);
-
-	for (i = 0; i < GEN8_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-	kunmap_atomic(pt_vaddr);
+	return kmap_atomic(p->page);
 }
 
-static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
+/* We use the flushing unmap only with ppgtt structures:
+ * page directories, page tables and scratch pages.
+ */
+static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
+{
+	/* There are only few exceptions for gen >=6. chv and bxt.
+	 * And we are not sure about the latter so play safe for now.
+	 */
+	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+		drm_clflush_virt_range(vaddr, PAGE_SIZE);
+
+	kunmap_atomic(vaddr);
+}
+
+#define kmap_px(px) kmap_page_dma(px_base(px))
+#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
+
+#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
+#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
+#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
+#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
+
+static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
+			  const uint64_t val)
+{
+	int i;
+	uint64_t * const vaddr = kmap_page_dma(p);
+
+	for (i = 0; i < 512; i++)
+		vaddr[i] = val;
+
+	kunmap_page_dma(dev, vaddr);
+}
+
+static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
+			     const uint32_t val32)
+{
+	uint64_t v = val32;
+
+	v = v << 32 | val32;
+
+	fill_page_dma(dev, p, v);
+}
+
+static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
+{
+	struct i915_page_scratch *sp;
+	int ret;
+
+	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+	if (sp == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
+	if (ret) {
+		kfree(sp);
+		return ERR_PTR(ret);
+	}
+
+	set_pages_uc(px_page(sp), 1);
+
+	return sp;
+}
+
+static void free_scratch_page(struct drm_device *dev,
+			      struct i915_page_scratch *sp)
+{
+	set_pages_wb(px_page(sp), 1);
+
+	cleanup_px(dev, sp);
+	kfree(sp);
+}
+
+static struct i915_page_table *alloc_pt(struct drm_device *dev)
 {
 	struct i915_page_table *pt;
 	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
@@ -386,19 +430,13 @@
 	if (!pt->used_ptes)
 		goto fail_bitmap;
 
-	pt->page = alloc_page(GFP_KERNEL);
-	if (!pt->page)
-		goto fail_page;
-
-	ret = i915_dma_map_single(pt, dev);
+	ret = setup_px(dev, pt);
 	if (ret)
-		goto fail_dma;
+		goto fail_page_m;
 
 	return pt;
 
-fail_dma:
-	__free_page(pt->page);
-fail_page:
+fail_page_m:
 	kfree(pt->used_ptes);
 fail_bitmap:
 	kfree(pt);
@@ -406,18 +444,38 @@
 	return ERR_PTR(ret);
 }
 
-static void unmap_and_free_pd(struct i915_page_directory *pd,
-			      struct drm_device *dev)
+static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
 {
-	if (pd->page) {
-		i915_dma_unmap_single(pd, dev);
-		__free_page(pd->page);
-		kfree(pd->used_pdes);
-		kfree(pd);
-	}
+	cleanup_px(dev, pt);
+	kfree(pt->used_ptes);
+	kfree(pt);
 }
 
-static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
+static void gen8_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen8_pte_t scratch_pte;
+
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+				      I915_CACHE_LLC, true);
+
+	fill_px(vm->dev, pt, scratch_pte);
+}
+
+static void gen6_initialize_pt(struct i915_address_space *vm,
+			       struct i915_page_table *pt)
+{
+	gen6_pte_t scratch_pte;
+
+	WARN_ON(px_dma(vm->scratch_page) == 0);
+
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
+
+	fill32_px(vm->dev, pt, scratch_pte);
+}
+
+static struct i915_page_directory *alloc_pd(struct drm_device *dev)
 {
 	struct i915_page_directory *pd;
 	int ret = -ENOMEM;
@@ -429,38 +487,52 @@
 	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
 				sizeof(*pd->used_pdes), GFP_KERNEL);
 	if (!pd->used_pdes)
-		goto free_pd;
+		goto fail_bitmap;
 
-	pd->page = alloc_page(GFP_KERNEL);
-	if (!pd->page)
-		goto free_bitmap;
-
-	ret = i915_dma_map_single(pd, dev);
+	ret = setup_px(dev, pd);
 	if (ret)
-		goto free_page;
+		goto fail_page_m;
 
 	return pd;
 
-free_page:
-	__free_page(pd->page);
-free_bitmap:
+fail_page_m:
 	kfree(pd->used_pdes);
-free_pd:
+fail_bitmap:
 	kfree(pd);
 
 	return ERR_PTR(ret);
 }
 
+static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
+{
+	if (px_page(pd)) {
+		cleanup_px(dev, pd);
+		kfree(pd->used_pdes);
+		kfree(pd);
+	}
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+			       struct i915_page_directory *pd)
+{
+	gen8_pde_t scratch_pde;
+
+	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+
+	fill_px(vm->dev, pd, scratch_pde);
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring,
+static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -476,16 +548,14 @@
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
 	int i, ret;
 
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
-		struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
-		dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
-		/* The page directory might be NULL, but we need to clear out
-		 * whatever the previous context might have used. */
-		ret = gen8_write_pdp(ring, i, pd_daddr);
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		ret = gen8_write_pdp(req, i, pd_daddr);
 		if (ret)
 			return ret;
 	}
@@ -507,13 +577,12 @@
 	unsigned num_entries = length >> PAGE_SHIFT;
 	unsigned last_pte, i;
 
-	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
+	scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
 				      I915_CACHE_LLC, use_scratch);
 
 	while (num_entries) {
 		struct i915_page_directory *pd;
 		struct i915_page_table *pt;
-		struct page *page_table;
 
 		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
 			break;
@@ -525,25 +594,21 @@
 
 		pt = pd->page_table[pde];
 
-		if (WARN_ON(!pt->page))
+		if (WARN_ON(!px_page(pt)))
 			break;
 
-		page_table = pt->page;
-
 		last_pte = pte + num_entries;
 		if (last_pte > GEN8_PTES)
 			last_pte = GEN8_PTES;
 
-		pt_vaddr = kmap_atomic(page_table);
+		pt_vaddr = kmap_px(pt);
 
 		for (i = pte; i < last_pte; i++) {
 			pt_vaddr[i] = scratch_pte;
 			num_entries--;
 		}
 
-		if (!HAS_LLC(ppgtt->base.dev))
-			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt);
 
 		pte = 0;
 		if (++pde == I915_PDES) {
@@ -575,18 +640,14 @@
 		if (pt_vaddr == NULL) {
 			struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
 			struct i915_page_table *pt = pd->page_table[pde];
-			struct page *page_table = pt->page;
-
-			pt_vaddr = kmap_atomic(page_table);
+			pt_vaddr = kmap_px(pt);
 		}
 
 		pt_vaddr[pte] =
 			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
 					cache_level, true);
 		if (++pte == GEN8_PTES) {
-			if (!HAS_LLC(ppgtt->base.dev))
-				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-			kunmap_atomic(pt_vaddr);
+			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
 			if (++pde == I915_PDES) {
 				pdpe++;
@@ -595,58 +656,64 @@
 			pte = 0;
 		}
 	}
-	if (pt_vaddr) {
-		if (!HAS_LLC(ppgtt->base.dev))
-			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
-		kunmap_atomic(pt_vaddr);
-	}
+
+	if (pt_vaddr)
+		kunmap_px(ppgtt, pt_vaddr);
 }
 
-static void __gen8_do_map_pt(gen8_pde_t * const pde,
-			     struct i915_page_table *pt,
-			     struct drm_device *dev)
-{
-	gen8_pde_t entry =
-		gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
-	*pde = entry;
-}
-
-static void gen8_initialize_pd(struct i915_address_space *vm,
-			       struct i915_page_directory *pd)
-{
-	struct i915_hw_ppgtt *ppgtt =
-			container_of(vm, struct i915_hw_ppgtt, base);
-	gen8_pde_t *page_directory;
-	struct i915_page_table *pt;
-	int i;
-
-	page_directory = kmap_atomic(pd->page);
-	pt = ppgtt->scratch_pt;
-	for (i = 0; i < I915_PDES; i++)
-		/* Map the PDE to the page table */
-		__gen8_do_map_pt(page_directory + i, pt, vm->dev);
-
-	if (!HAS_LLC(vm->dev))
-		drm_clflush_virt_range(page_directory, PAGE_SIZE);
-	kunmap_atomic(page_directory);
-}
-
-static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
+static void gen8_free_page_tables(struct drm_device *dev,
+				  struct i915_page_directory *pd)
 {
 	int i;
 
-	if (!pd->page)
+	if (!px_page(pd))
 		return;
 
 	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
 		if (WARN_ON(!pd->page_table[i]))
 			continue;
 
-		unmap_and_free_pt(pd->page_table[i], dev);
+		free_pt(dev, pd->page_table[i]);
 		pd->page_table[i] = NULL;
 	}
 }
 
+static int gen8_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	vm->scratch_pd = alloc_pd(dev);
+	if (IS_ERR(vm->scratch_pd)) {
+		free_pt(dev, vm->scratch_pt);
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pd);
+	}
+
+	gen8_initialize_pt(vm, vm->scratch_pt);
+	gen8_initialize_pd(vm, vm->scratch_pd);
+
+	return 0;
+}
+
+static void gen8_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pd(dev, vm->scratch_pd);
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -657,12 +724,12 @@
 		if (WARN_ON(!ppgtt->pdp.page_directory[i]))
 			continue;
 
-		gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
-		unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+		gen8_free_page_tables(ppgtt->base.dev,
+				      ppgtt->pdp.page_directory[i]);
+		free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
 	}
 
-	unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+	gen8_free_scratch(vm);
 }
 
 /**
@@ -698,24 +765,24 @@
 		/* Don't reallocate page tables */
 		if (pt) {
 			/* Scratch is never allocated this way */
-			WARN_ON(pt == ppgtt->scratch_pt);
+			WARN_ON(pt == ppgtt->base.scratch_pt);
 			continue;
 		}
 
-		pt = alloc_pt_single(dev);
+		pt = alloc_pt(dev);
 		if (IS_ERR(pt))
 			goto unwind_out;
 
 		gen8_initialize_pt(&ppgtt->base, pt);
 		pd->page_table[pde] = pt;
-		set_bit(pde, new_pts);
+		__set_bit(pde, new_pts);
 	}
 
 	return 0;
 
 unwind_out:
 	for_each_set_bit(pde, new_pts, I915_PDES)
-		unmap_and_free_pt(pd->page_table[pde], dev);
+		free_pt(dev, pd->page_table[pde]);
 
 	return -ENOMEM;
 }
@@ -756,27 +823,24 @@
 
 	WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
 
-	/* FIXME: upper bound must not overflow 32 bits  */
-	WARN_ON((start + length) > (1ULL << 32));
-
 	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
 		if (pd)
 			continue;
 
-		pd = alloc_pd_single(dev);
+		pd = alloc_pd(dev);
 		if (IS_ERR(pd))
 			goto unwind_out;
 
 		gen8_initialize_pd(&ppgtt->base, pd);
 		pdp->page_directory[pdpe] = pd;
-		set_bit(pdpe, new_pds);
+		__set_bit(pdpe, new_pds);
 	}
 
 	return 0;
 
 unwind_out:
 	for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
-		unmap_and_free_pd(pdp->page_directory[pdpe], dev);
+		free_pd(dev, pdp->page_directory[pdpe]);
 
 	return -ENOMEM;
 }
@@ -830,6 +894,16 @@
 	return -ENOMEM;
 }
 
+/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+ * the page table structures, we mark them dirty so that
+ * context switching/execlist queuing code takes extra steps
+ * to ensure that tlbs are flushed.
+ */
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+{
+	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
+}
+
 static int gen8_alloc_va_range(struct i915_address_space *vm,
 			       uint64_t start,
 			       uint64_t length)
@@ -848,7 +922,10 @@
 	 * actually use the other side of the canonical address space.
 	 */
 	if (WARN_ON(start + length < start))
-		return -ERANGE;
+		return -ENODEV;
+
+	if (WARN_ON(start + length > ppgtt->base.total))
+		return -ENODEV;
 
 	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
 	if (ret)
@@ -876,7 +953,7 @@
 	/* Allocations have completed successfully, so set the bitmaps, and do
 	 * the mappings. */
 	gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-		gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+		gen8_pde_t *const page_directory = kmap_px(pd);
 		struct i915_page_table *pt;
 		uint64_t pd_len = gen8_clamp_pd(start, length);
 		uint64_t pd_start = start;
@@ -897,36 +974,36 @@
 				   gen8_pte_count(pd_start, pd_len));
 
 			/* Our pde is now pointing to the pagetable, pt */
-			set_bit(pde, pd->used_pdes);
+			__set_bit(pde, pd->used_pdes);
 
 			/* Map the PDE to the page table */
-			__gen8_do_map_pt(page_directory + pde, pt, vm->dev);
+			page_directory[pde] = gen8_pde_encode(px_dma(pt),
+							      I915_CACHE_LLC);
 
 			/* NB: We haven't yet mapped ptes to pages. At this
 			 * point we're still relying on insert_entries() */
 		}
 
-		if (!HAS_LLC(vm->dev))
-			drm_clflush_virt_range(page_directory, PAGE_SIZE);
+		kunmap_px(ppgtt, page_directory);
 
-		kunmap_atomic(page_directory);
-
-		set_bit(pdpe, ppgtt->pdp.used_pdpes);
+		__set_bit(pdpe, ppgtt->pdp.used_pdpes);
 	}
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
 	return 0;
 
 err_out:
 	while (pdpe--) {
 		for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
-			unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+			free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
 	}
 
 	for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
-		unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+		free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
 
 	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+	mark_tlbs_dirty(ppgtt);
 	return ret;
 }
 
@@ -939,16 +1016,11 @@
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pt))
-		return PTR_ERR(ppgtt->scratch_pt);
+	int ret;
 
-	ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pd))
-		return PTR_ERR(ppgtt->scratch_pd);
-
-	gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
-	gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
+	ret = gen8_init_scratch(&ppgtt->base);
+	if (ret)
+		return ret;
 
 	ppgtt->base.start = 0;
 	ppgtt->base.total = 1ULL << 32;
@@ -980,12 +1052,13 @@
 	uint32_t  pte, pde, temp;
 	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
 		u32 expected;
 		gen6_pte_t *pt_vaddr;
-		dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
+		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
 		pd_entry = readl(ppgtt->pd_addr + pde);
 		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
 
@@ -996,7 +1069,8 @@
 				   expected);
 		seq_printf(m, "\tPDE: %x\n", pd_entry);
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+
 		for (pte = 0; pte < GEN6_PTES; pte+=4) {
 			unsigned long va =
 				(pde * PAGE_SIZE * GEN6_PTES) +
@@ -1018,7 +1092,7 @@
 			}
 			seq_puts(m, "\n");
 		}
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 	}
 }
 
@@ -1031,7 +1105,7 @@
 		container_of(pd, struct i915_hw_ppgtt, pd);
 	u32 pd_entry;
 
-	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
+	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
 	pd_entry |= GEN6_PDE_VALID;
 
 	writel(pd_entry, ppgtt->pd_addr + pde);
@@ -1056,22 +1130,23 @@
 
 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 {
-	BUG_ON(ppgtt->pd.pd_offset & 0x3f);
+	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
 
-	return (ppgtt->pd.pd_offset / 64) << 16;
+	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring)
+			 struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1087,8 +1162,9 @@
 }
 
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
 
 	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
@@ -1097,16 +1173,17 @@
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1120,7 +1197,7 @@
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (ring->id != RCS) {
-		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -1129,8 +1206,9 @@
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-			  struct intel_engine_cs *ring)
+			  struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -1214,19 +1292,20 @@
 	unsigned first_pte = first_entry % GEN6_PTES;
 	unsigned last_pte, i;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, true, 0);
 
 	while (num_entries) {
 		last_pte = first_pte + num_entries;
 		if (last_pte > GEN6_PTES)
 			last_pte = GEN6_PTES;
 
-		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		for (i = first_pte; i < last_pte; i++)
 			pt_vaddr[i] = scratch_pte;
 
-		kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 
 		num_entries -= last_pte - first_pte;
 		first_pte = 0;
@@ -1250,54 +1329,25 @@
 	pt_vaddr = NULL;
 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 		if (pt_vaddr == NULL)
-			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
+			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
 		pt_vaddr[act_pte] =
 			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
 				       cache_level, true, flags);
 
 		if (++act_pte == GEN6_PTES) {
-			kunmap_atomic(pt_vaddr);
+			kunmap_px(ppgtt, pt_vaddr);
 			pt_vaddr = NULL;
 			act_pt++;
 			act_pte = 0;
 		}
 	}
 	if (pt_vaddr)
-		kunmap_atomic(pt_vaddr);
-}
-
-/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
- * are switching between contexts with the same LRCA, we also must do a force
- * restore.
- */
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
-{
-	/* If current vm != vm, */
-	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
-		struct i915_page_table *pt)
-{
-	gen6_pte_t *pt_vaddr, scratch_pte;
-	int i;
-
-	WARN_ON(vm->scratch.addr == 0);
-
-	scratch_pte = vm->pte_encode(vm->scratch.addr,
-			I915_CACHE_LLC, true, 0);
-
-	pt_vaddr = kmap_atomic(pt->page);
-
-	for (i = 0; i < GEN6_PTES; i++)
-		pt_vaddr[i] = scratch_pte;
-
-	kunmap_atomic(pt_vaddr);
+		kunmap_px(ppgtt, pt_vaddr);
 }
 
 static int gen6_alloc_va_range(struct i915_address_space *vm,
-			       uint64_t start, uint64_t length)
+			       uint64_t start_in, uint64_t length_in)
 {
 	DECLARE_BITMAP(new_page_tables, I915_PDES);
 	struct drm_device *dev = vm->dev;
@@ -1305,11 +1355,15 @@
 	struct i915_hw_ppgtt *ppgtt =
 				container_of(vm, struct i915_hw_ppgtt, base);
 	struct i915_page_table *pt;
-	const uint32_t start_save = start, length_save = length;
+	uint32_t start, length, start_save, length_save;
 	uint32_t pde, temp;
 	int ret;
 
-	WARN_ON(upper_32_bits(start));
+	if (WARN_ON(start_in + length_in > ppgtt->base.total))
+		return -ENODEV;
+
+	start = start_save = start_in;
+	length = length_save = length_in;
 
 	bitmap_zero(new_page_tables, I915_PDES);
 
@@ -1319,7 +1373,7 @@
 	 * tables.
 	 */
 	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
-		if (pt != ppgtt->scratch_pt) {
+		if (pt != vm->scratch_pt) {
 			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
 			continue;
 		}
@@ -1327,7 +1381,7 @@
 		/* We've already allocated a page table */
 		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
 
-		pt = alloc_pt_single(dev);
+		pt = alloc_pt(dev);
 		if (IS_ERR(pt)) {
 			ret = PTR_ERR(pt);
 			goto unwind_out;
@@ -1336,7 +1390,7 @@
 		gen6_initialize_pt(vm, pt);
 
 		ppgtt->pd.page_table[pde] = pt;
-		set_bit(pde, new_page_tables);
+		__set_bit(pde, new_page_tables);
 		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
 	}
 
@@ -1350,7 +1404,7 @@
 		bitmap_set(tmp_bitmap, gen6_pte_index(start),
 			   gen6_pte_count(start, length));
 
-		if (test_and_clear_bit(pde, new_page_tables))
+		if (__test_and_clear_bit(pde, new_page_tables))
 			gen6_write_pde(&ppgtt->pd, pde, pt);
 
 		trace_i915_page_table_entry_map(vm, pde, pt,
@@ -1374,14 +1428,41 @@
 	for_each_set_bit(pde, new_page_tables, I915_PDES) {
 		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
 
-		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
-		unmap_and_free_pt(pt, vm->dev);
+		ppgtt->pd.page_table[pde] = vm->scratch_pt;
+		free_pt(vm->dev, pt);
 	}
 
 	mark_tlbs_dirty(ppgtt);
 	return ret;
 }
 
+static int gen6_init_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	vm->scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(vm->scratch_page))
+		return PTR_ERR(vm->scratch_page);
+
+	vm->scratch_pt = alloc_pt(dev);
+	if (IS_ERR(vm->scratch_pt)) {
+		free_scratch_page(dev, vm->scratch_page);
+		return PTR_ERR(vm->scratch_pt);
+	}
+
+	gen6_initialize_pt(vm, vm->scratch_pt);
+
+	return 0;
+}
+
+static void gen6_free_scratch(struct i915_address_space *vm)
+{
+	struct drm_device *dev = vm->dev;
+
+	free_pt(dev, vm->scratch_pt);
+	free_scratch_page(dev, vm->scratch_page);
+}
+
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
 	struct i915_hw_ppgtt *ppgtt =
@@ -1389,20 +1470,19 @@
 	struct i915_page_table *pt;
 	uint32_t pde;
 
-
 	drm_mm_remove_node(&ppgtt->node);
 
 	gen6_for_all_pdes(pt, ppgtt, pde) {
-		if (pt != ppgtt->scratch_pt)
-			unmap_and_free_pt(pt, ppgtt->base.dev);
+		if (pt != vm->scratch_pt)
+			free_pt(ppgtt->base.dev, pt);
 	}
 
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
-	unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
+	gen6_free_scratch(vm);
 }
 
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
+	struct i915_address_space *vm = &ppgtt->base;
 	struct drm_device *dev = ppgtt->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool retried = false;
@@ -1413,11 +1493,10 @@
 	 * size. We allocate at the top of the GTT to avoid fragmentation.
 	 */
 	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
-	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
-	if (IS_ERR(ppgtt->scratch_pt))
-		return PTR_ERR(ppgtt->scratch_pt);
 
-	gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+	ret = gen6_init_scratch(vm);
+	if (ret)
+		return ret;
 
 alloc:
 	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
@@ -1448,7 +1527,7 @@
 	return 0;
 
 err_out:
-	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+	gen6_free_scratch(vm);
 	return ret;
 }
 
@@ -1464,7 +1543,7 @@
 	uint32_t pde, temp;
 
 	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
-		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
+		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 }
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
@@ -1500,11 +1579,11 @@
 	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
 	ppgtt->debug_dump = gen6_dump_ppgtt;
 
-	ppgtt->pd.pd_offset =
+	ppgtt->pd.base.ggtt_offset =
 		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
 
 	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
-		ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
+		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
 
 	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
 
@@ -1515,23 +1594,21 @@
 			 ppgtt->node.start / PAGE_SIZE);
 
 	DRM_DEBUG("Adding PPGTT at offset %x\n",
-		  ppgtt->pd.pd_offset << 10);
+		  ppgtt->pd.base.ggtt_offset << 10);
 
 	return 0;
 }
 
 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	ppgtt->base.dev = dev;
-	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
 	if (INTEL_INFO(dev)->gen < 8)
 		return gen6_ppgtt_init(ppgtt);
 	else
 		return gen8_ppgtt_init(ppgtt);
 }
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1550,11 +1627,6 @@
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-	int i, ret = 0;
-
 	/* In the case of execlists, PPGTT is enabled by the context descriptor
 	 * and the PDPs are contained within the context itself.  We don't
 	 * need to do anything here. */
@@ -1573,16 +1645,23 @@
 	else
 		MISSING_CASE(INTEL_INFO(dev)->gen);
 
-	if (ppgtt) {
-		for_each_ring(ring, dev_priv, i) {
-			ret = ppgtt->switch_mm(ppgtt, ring);
-			if (ret != 0)
-				return ret;
-		}
-	}
-
-	return ret;
+	return 0;
 }
+
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	if (i915.enable_execlists)
+		return 0;
+
+	if (!ppgtt)
+		return 0;
+
+	return ppgtt->switch_mm(ppgtt, req);
+}
+
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 {
@@ -1843,7 +1922,7 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = gen8_pte_encode(vm->scratch.addr,
+	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
 				      I915_CACHE_LLC,
 				      use_scratch);
 	for (i = 0; i < num_entries; i++)
@@ -1869,7 +1948,8 @@
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
+	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
+				     I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -2105,7 +2185,7 @@
 void i915_gem_init_global_gtt(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long gtt_size, mappable_size;
+	u64 gtt_size, mappable_size;
 
 	gtt_size = dev_priv->gtt.base.total;
 	mappable_size = dev_priv->gtt.mappable_end;
@@ -2135,42 +2215,6 @@
 	vm->cleanup(vm);
 }
 
-static int setup_scratch_page(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct page *page;
-	dma_addr_t dma_addr;
-
-	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-	if (page == NULL)
-		return -ENOMEM;
-	set_pages_uc(page, 1);
-
-#ifdef CONFIG_INTEL_IOMMU
-	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
-				PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(dev->pdev, dma_addr))
-		return -EINVAL;
-#else
-	dma_addr = page_to_phys(page);
-#endif
-	dev_priv->gtt.base.scratch.page = page;
-	dev_priv->gtt.base.scratch.addr = dma_addr;
-
-	return 0;
-}
-
-static void teardown_scratch_page(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct page *page = dev_priv->gtt.base.scratch.page;
-
-	set_pages_wb(page, 1);
-	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
-		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	__free_page(page);
-}
-
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
 {
 	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
@@ -2253,8 +2297,8 @@
 			     size_t gtt_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_page_scratch *scratch_page;
 	phys_addr_t gtt_phys_addr;
-	int ret;
 
 	/* For Modern GENs the PTEs and register space are split in the BAR */
 	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
@@ -2276,14 +2320,17 @@
 		return -ENOMEM;
 	}
 
-	ret = setup_scratch_page(dev);
-	if (ret) {
+	scratch_page = alloc_scratch_page(dev);
+	if (IS_ERR(scratch_page)) {
 		DRM_ERROR("Scratch setup failed\n");
 		/* iounmap will also get called at remove, but meh */
 		iounmap(dev_priv->gtt.gsm);
+		return PTR_ERR(scratch_page);
 	}
 
-	return ret;
+	dev_priv->gtt.base.scratch_page = scratch_page;
+
+	return 0;
 }
 
 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
@@ -2360,13 +2407,13 @@
 }
 
 static int gen8_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned int gtt_size;
+	u64 gtt_size;
 	u16 snb_gmch_ctl;
 	int ret;
 
@@ -2408,10 +2455,10 @@
 }
 
 static int gen6_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned int gtt_size;
@@ -2425,7 +2472,7 @@
 	 * a coarse sanity check.
 	 */
 	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
-		DRM_ERROR("Unknown GMADR size (%lx)\n",
+		DRM_ERROR("Unknown GMADR size (%llx)\n",
 			  dev_priv->gtt.mappable_end);
 		return -ENXIO;
 	}
@@ -2455,14 +2502,14 @@
 	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
 	iounmap(gtt->gsm);
-	teardown_scratch_page(vm->dev);
+	free_scratch_page(vm->dev, vm->scratch_page);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
-			   size_t *gtt_total,
+			   u64 *gtt_total,
 			   size_t *stolen,
 			   phys_addr_t *mappable_base,
-			   unsigned long *mappable_end)
+			   u64 *mappable_end)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -2519,17 +2566,17 @@
 		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
 	}
 
+	gtt->base.dev = dev;
+
 	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
 			     &gtt->mappable_base, &gtt->mappable_end);
 	if (ret)
 		return ret;
 
-	gtt->base.dev = dev;
-
 	/* GMADR is the PCI mmio aperture into the global GTT. */
-	DRM_INFO("Memory usable by graphics device = %zdM\n",
+	DRM_INFO("Memory usable by graphics device = %lluM\n",
 		 gtt->base.total >> 20);
-	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
 	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped)
@@ -2706,30 +2753,17 @@
 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
 			  struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
-	unsigned long size, pages, rot_pages;
+	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
 	struct sg_page_iter sg_iter;
 	unsigned long i;
 	dma_addr_t *page_addr_list;
 	struct sg_table *st;
-	unsigned int tile_pitch, tile_height;
-	unsigned int width_pages, height_pages;
 	int ret = -ENOMEM;
 
-	pages = obj->base.size / PAGE_SIZE;
-
-	/* Calculate tiling geometry. */
-	tile_height = intel_tile_height(dev, rot_info->pixel_format,
-					rot_info->fb_modifier);
-	tile_pitch = PAGE_SIZE / tile_height;
-	width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
-	height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
-	rot_pages = width_pages * height_pages;
-	size = rot_pages * PAGE_SIZE;
-
 	/* Allocate a temporary list of source pages for random access. */
-	page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
+	page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
+				       sizeof(dma_addr_t));
 	if (!page_addr_list)
 		return ERR_PTR(ret);
 
@@ -2738,7 +2772,7 @@
 	if (!st)
 		goto err_st_alloc;
 
-	ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
+	ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
 	if (ret)
 		goto err_sg_alloc;
 
@@ -2750,13 +2784,15 @@
 	}
 
 	/* Rotate the pages. */
-	rotate_pages(page_addr_list, width_pages, height_pages, st);
+	rotate_pages(page_addr_list,
+		     rot_info->width_pages, rot_info->height_pages,
+		     st);
 
 	DRM_DEBUG_KMS(
-		      "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
-		      size, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, width_pages, height_pages,
-		      rot_pages);
+		      "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+		      obj->base.size, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages);
 
 	drm_free_large(page_addr_list);
 
@@ -2768,10 +2804,10 @@
 	drm_free_large(page_addr_list);
 
 	DRM_DEBUG_KMS(
-		      "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
-		      size, ret, rot_info->pitch, rot_info->height,
-		      rot_info->pixel_format, width_pages, height_pages,
-		      rot_pages);
+		      "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+		      obj->base.size, ret, rot_info->pitch, rot_info->height,
+		      rot_info->pixel_format, rot_info->width_pages,
+		      rot_info->height_pages, size_pages);
 	return ERR_PTR(ret);
 }
 
@@ -2889,9 +2925,12 @@
 				    vma->node.size,
 				    VM_TO_TRACE_NAME(vma->vm));
 
+		/* XXX: i915_vma_pin() will fix this +- hack */
+		vma->pin_count++;
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
+		vma->pin_count--;
 		if (ret)
 			return ret;
 	}
@@ -2916,9 +2955,10 @@
 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
 		    const struct i915_ggtt_view *view)
 {
-	if (view->type == I915_GGTT_VIEW_NORMAL ||
-	    view->type == I915_GGTT_VIEW_ROTATED) {
+	if (view->type == I915_GGTT_VIEW_NORMAL) {
 		return obj->base.size;
+	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+		return view->rotation_info.size;
 	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
 		return view->params.partial.size << PAGE_SHIFT;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0d46dd2..e1cfa29 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -126,6 +126,8 @@
 	unsigned int pitch;
 	uint32_t pixel_format;
 	uint64_t fb_modifier;
+	unsigned int width_pages, height_pages;
+	uint64_t size;
 };
 
 struct i915_ggtt_view {
@@ -205,19 +207,34 @@
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
-struct i915_page_table {
+struct i915_page_dma {
 	struct page *page;
-	dma_addr_t daddr;
+	union {
+		dma_addr_t daddr;
+
+		/* For gen6/gen7 only. This is the offset in the GGTT
+		 * where the page directory entries for PPGTT begin
+		 */
+		uint32_t ggtt_offset;
+	};
+};
+
+#define px_base(px) (&(px)->base)
+#define px_page(px) (px_base(px)->page)
+#define px_dma(px) (px_base(px)->daddr)
+
+struct i915_page_scratch {
+	struct i915_page_dma base;
+};
+
+struct i915_page_table {
+	struct i915_page_dma base;
 
 	unsigned long *used_ptes;
 };
 
 struct i915_page_directory {
-	struct page *page; /* NULL for GEN6-GEN7 */
-	union {
-		uint32_t pd_offset;
-		dma_addr_t daddr;
-	};
+	struct i915_page_dma base;
 
 	unsigned long *used_pdes;
 	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
@@ -233,13 +250,12 @@
 	struct drm_mm mm;
 	struct drm_device *dev;
 	struct list_head global_link;
-	unsigned long start;		/* Start offset always 0 for dri2 */
-	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
+	u64 start;		/* Start offset always 0 for dri2 */
+	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
-	struct {
-		dma_addr_t addr;
-		struct page *page;
-	} scratch;
+	struct i915_page_scratch *scratch_page;
+	struct i915_page_table *scratch_pt;
+	struct i915_page_directory *scratch_pd;
 
 	/**
 	 * List of objects currently involved in rendering.
@@ -300,9 +316,9 @@
  */
 struct i915_gtt {
 	struct i915_address_space base;
-	size_t stolen_size;		/* Total size of stolen memory */
 
-	unsigned long mappable_end;	/* End offset that we can CPU map */
+	size_t stolen_size;		/* Total size of stolen memory */
+	u64 mappable_end;		/* End offset that we can CPU map */
 	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
 	phys_addr_t mappable_base;	/* PA of our GMADR */
 
@@ -314,9 +330,9 @@
 	int mtrr;
 
 	/* global gtt ops */
-	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
 			  size_t *stolen, phys_addr_t *mappable_base,
-			  unsigned long *mappable_end);
+			  u64 *mappable_end);
 };
 
 struct i915_hw_ppgtt {
@@ -329,16 +345,13 @@
 		struct i915_page_directory pd;
 	};
 
-	struct i915_page_table *scratch_pt;
-	struct i915_page_directory *scratch_pd;
-
 	struct drm_i915_file_private *file_priv;
 
 	gen6_pte_t __iomem *pd_addr;
 
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-			 struct intel_engine_cs *ring);
+			 struct drm_i915_gem_request *req);
 	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
@@ -468,6 +481,14 @@
 	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
 }
 
+static inline dma_addr_t
+i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+{
+	return test_bit(n, ppgtt->pdp.used_pdpes) ?
+		px_dma(ppgtt->pdp.page_directory[n]) :
+		px_dma(ppgtt->base.scratch_pd);
+}
+
 int i915_gem_gtt_init(struct drm_device *dev);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -475,6 +496,7 @@
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
+int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
 					struct drm_i915_file_private *fpriv);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 521548a..5026a62 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -73,6 +73,24 @@
 	return ret;
 }
 
+/*
+ * Macro to add commands to auxiliary batch.
+ * This macro only checks for page overflow before inserting the commands,
+ * this is sufficient as the null state generator makes the final batch
+ * with two passes to build command and state separately. At this point
+ * the size of both are known and it compacts them by relocating the state
+ * right after the commands taking care of aligment so we should sufficient
+ * space below them for adding new commands.
+ */
+#define OUT_BATCH(batch, i, val)				\
+	do {							\
+		if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {	\
+			ret = -ENOSPC;				\
+			goto err_out;				\
+		}						\
+		(batch)[(i)++] = (val);				\
+	} while(0)
+
 static int render_state_setup(struct render_state *so)
 {
 	const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -96,8 +114,10 @@
 			s = lower_32_bits(r);
 			if (so->gen >= 8) {
 				if (i + 1 >= rodata->batch_items ||
-				    rodata->batch[i + 1] != 0)
-					return -EINVAL;
+				    rodata->batch[i + 1] != 0) {
+					ret = -EINVAL;
+					goto err_out;
+				}
 
 				d[i++] = s;
 				s = upper_32_bits(r);
@@ -108,6 +128,21 @@
 
 		d[i++] = s;
 	}
+
+	while (i % CACHELINE_DWORDS)
+		OUT_BATCH(d, i, MI_NOOP);
+
+	so->aux_batch_offset = i * sizeof(u32);
+
+	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
+	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
+
+	/*
+	 * Since we are sending length, we need to strictly conform to
+	 * all requirements. For Gen2 this must be a multiple of 8.
+	 */
+	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
+
 	kunmap(page);
 
 	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
@@ -120,8 +155,14 @@
 	}
 
 	return 0;
+
+err_out:
+	kunmap(page);
+	return ret;
 }
 
+#undef OUT_BATCH
+
 void i915_gem_render_state_fini(struct render_state *so)
 {
 	i915_gem_object_ggtt_unpin(so->obj);
@@ -152,29 +193,36 @@
 	return 0;
 }
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(ring, &so);
+	ret = i915_gem_render_state_prepare(req->ring, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = ring->dispatch_execbuffer(ring,
-					so.ggtt_offset,
-					so.rodata->batch_items * 4,
-					I915_DISPATCH_SECURE);
+	ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+					     so.rodata->batch_items * 4,
+					     I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	if (so.aux_batch_size > 8) {
+		ret = req->ring->dispatch_execbuffer(req,
+						     (so.ggtt_offset +
+						      so.aux_batch_offset),
+						     so.aux_batch_size,
+						     I915_DISPATCH_SECURE);
+		if (ret)
+			goto out;
+	}
 
-	ret = __i915_add_request(ring, NULL, so.obj);
-	/* __i915_add_request moves object to inactive if it fails */
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+
 out:
 	i915_gem_render_state_fini(&so);
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index c44961e..e641bb0 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -37,9 +37,11 @@
 	struct drm_i915_gem_object *obj;
 	u64 ggtt_offset;
 	int gen;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
 };
 
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
+int i915_gem_render_state_init(struct drm_i915_gem_request *req);
 void i915_gem_render_state_fini(struct render_state *so);
 int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
 				  struct render_state *so);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8b5b784..f361c4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,6 +42,31 @@
  * for is a boon.
  */
 
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+				struct drm_mm_node *node, u64 size,
+				unsigned alignment)
+{
+	int ret;
+
+	if (!drm_mm_initialized(&dev_priv->mm.stolen))
+		return -ENODEV;
+
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
+				 DRM_MM_SEARCH_DEFAULT);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+
+	return ret;
+}
+
+void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+				 struct drm_mm_node *node)
+{
+	mutex_lock(&dev_priv->mm.stolen_lock);
+	drm_mm_remove_node(node);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
+}
+
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -151,134 +176,6 @@
 	return base;
 }
 
-static int find_compression_threshold(struct drm_device *dev,
-				      struct drm_mm_node *node,
-				      int size,
-				      int fb_cpp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int compression_threshold = 1;
-	int ret;
-
-	/* HACK: This code depends on what we will do in *_enable_fbc. If that
-	 * code changes, this code needs to change as well.
-	 *
-	 * The enable_fbc code will attempt to use one of our 2 compression
-	 * thresholds, therefore, in that case, we only have 1 resort.
-	 */
-
-	/* Try to over-allocate to reduce reallocations and fragmentation. */
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
-	if (ret == 0)
-		return compression_threshold;
-
-again:
-	/* HW's ability to limit the CFB is 1:4 */
-	if (compression_threshold > 4 ||
-	    (fb_cpp == 2 && compression_threshold == 2))
-		return 0;
-
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
-				 size >>= 1, 4096,
-				 DRM_MM_SEARCH_DEFAULT);
-	if (ret && INTEL_INFO(dev)->gen <= 4) {
-		return 0;
-	} else if (ret) {
-		compression_threshold <<= 1;
-		goto again;
-	} else {
-		return compression_threshold;
-	}
-}
-
-static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_mm_node *uninitialized_var(compressed_llb);
-	int ret;
-
-	ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
-					 size, fb_cpp);
-	if (!ret)
-		goto err_llb;
-	else if (ret > 1) {
-		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
-	}
-
-	dev_priv->fbc.threshold = ret;
-
-	if (INTEL_INFO(dev_priv)->gen >= 5)
-		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-	else if (IS_GM45(dev)) {
-		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
-	} else {
-		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
-		if (!compressed_llb)
-			goto err_fb;
-
-		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
-					 4096, 4096, DRM_MM_SEARCH_DEFAULT);
-		if (ret)
-			goto err_fb;
-
-		dev_priv->fbc.compressed_llb = compressed_llb;
-
-		I915_WRITE(FBC_CFB_BASE,
-			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
-		I915_WRITE(FBC_LL_BASE,
-			   dev_priv->mm.stolen_base + compressed_llb->start);
-	}
-
-	dev_priv->fbc.uncompressed_size = size;
-
-	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
-		      size);
-
-	return 0;
-
-err_fb:
-	kfree(compressed_llb);
-	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-err_llb:
-	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
-	return -ENOSPC;
-}
-
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (!drm_mm_initialized(&dev_priv->mm.stolen))
-		return -ENODEV;
-
-	if (size <= dev_priv->fbc.uncompressed_size)
-		return 0;
-
-	/* Release any current block */
-	i915_gem_stolen_cleanup_compression(dev);
-
-	return i915_setup_compression(dev, size, fb_cpp);
-}
-
-void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->fbc.uncompressed_size == 0)
-		return;
-
-	drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
-
-	if (dev_priv->fbc.compressed_llb) {
-		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
-		kfree(dev_priv->fbc.compressed_llb);
-	}
-
-	dev_priv->fbc.uncompressed_size = 0;
-}
-
 void i915_gem_cleanup_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -286,15 +183,108 @@
 	if (!drm_mm_initialized(&dev_priv->mm.stolen))
 		return;
 
-	i915_gem_stolen_cleanup_compression(dev);
 	drm_mm_takedown(&dev_priv->mm.stolen);
 }
 
+static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN6_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_512K:
+		*size = 512 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	case GEN6_STOLEN_RESERVED_128K:
+		*size = 128 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN7_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN7_STOLEN_RESERVED_256K:
+		*size = 256 * 1024;
+		break;
+	default:
+		*size = 1024 * 1024;
+		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				     unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+	case GEN8_STOLEN_RESERVED_1M:
+		*size = 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_2M:
+		*size = 2 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_4M:
+		*size = 4 * 1024 * 1024;
+		break;
+	case GEN8_STOLEN_RESERVED_8M:
+		*size = 8 * 1024 * 1024;
+		break;
+	default:
+		*size = 8 * 1024 * 1024;
+		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+	}
+}
+
+static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+				    unsigned long *base, unsigned long *size)
+{
+	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+	unsigned long stolen_top;
+
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+
+	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
+
+	/* On these platforms, the register doesn't have a size field, so the
+	 * size is the distance between the base and the top of the stolen
+	 * memory. We also have the genuine case where base is zero and there's
+	 * nothing reserved. */
+	if (*base == 0)
+		*size = 0;
+	else
+		*size = stolen_top - *base;
+}
+
 int i915_gem_init_stolen(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 tmp;
-	int bios_reserved = 0;
+	unsigned long reserved_total, reserved_base, reserved_size;
+	unsigned long stolen_top;
+
+	mutex_init(&dev_priv->mm.stolen_lock);
 
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
@@ -310,26 +300,61 @@
 	if (dev_priv->mm.stolen_base == 0)
 		return 0;
 
-	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
-		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
+	stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
 
-	if (INTEL_INFO(dev)->gen >= 8) {
-		tmp = I915_READ(GEN7_BIOS_RESERVED);
-		tmp >>= GEN8_BIOS_RESERVED_SHIFT;
-		tmp &= GEN8_BIOS_RESERVED_MASK;
-		bios_reserved = (1024*1024) << tmp;
-	} else if (IS_GEN7(dev)) {
-		tmp = I915_READ(GEN7_BIOS_RESERVED);
-		bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
-			256*1024 : 1024*1024;
+	switch (INTEL_INFO(dev_priv)->gen) {
+	case 2:
+	case 3:
+	case 4:
+	case 5:
+		/* Assume the gen6 maximum for the older platforms. */
+		reserved_size = 1024 * 1024;
+		reserved_base = stolen_top - reserved_size;
+		break;
+	case 6:
+		gen6_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	case 7:
+		gen7_get_stolen_reserved(dev_priv, &reserved_base,
+					 &reserved_size);
+		break;
+	default:
+		if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+			bdw_get_stolen_reserved(dev_priv, &reserved_base,
+						&reserved_size);
+		else
+			gen8_get_stolen_reserved(dev_priv, &reserved_base,
+						 &reserved_size);
+		break;
 	}
 
-	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+	/* It is possible for the reserved base to be zero, but the register
+	 * field for size doesn't have a zero option. */
+	if (reserved_base == 0) {
+		reserved_size = 0;
+		reserved_base = stolen_top;
+	}
+
+	if (reserved_base < dev_priv->mm.stolen_base ||
+	    reserved_base + reserved_size > stolen_top) {
+		DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
+			      reserved_base, reserved_base + reserved_size,
+			      dev_priv->mm.stolen_base, stolen_top);
 		return 0;
+	}
+
+	/* It is possible for the reserved area to end before the end of stolen
+	 * memory, so just consider the start. */
+	reserved_total = stolen_top - reserved_base;
+
+	DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+		      dev_priv->gtt.stolen_size >> 10,
+		      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
 	/* Basic memrange allocator for stolen space */
 	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
-		    bios_reserved);
+		    reserved_total);
 
 	return 0;
 }
@@ -386,8 +411,10 @@
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
 	if (obj->stolen) {
-		drm_mm_remove_node(obj->stolen);
+		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
 		kfree(obj->stolen);
 		obj->stolen = NULL;
 	}
@@ -448,8 +475,7 @@
 	if (!stolen)
 		return NULL;
 
-	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
-				 4096, DRM_MM_SEARCH_DEFAULT);
+	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
 	if (ret) {
 		kfree(stolen);
 		return NULL;
@@ -459,7 +485,7 @@
 	if (obj)
 		return obj;
 
-	drm_mm_remove_node(stolen);
+	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 	return NULL;
 }
@@ -494,7 +520,9 @@
 
 	stolen->start = stolen_offset;
 	stolen->size = size;
+	mutex_lock(&dev_priv->mm.stolen_lock);
 	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+	mutex_unlock(&dev_priv->mm.stolen_lock);
 	if (ret) {
 		DRM_DEBUG_KMS("failed to allocate stolen space\n");
 		kfree(stolen);
@@ -504,7 +532,7 @@
 	obj = _i915_gem_object_create_stolen(dev, stolen);
 	if (obj == NULL) {
 		DRM_DEBUG_KMS("failed to allocate stolen object\n");
-		drm_mm_remove_node(stolen);
+		i915_gem_stolen_remove_node(dev_priv, stolen);
 		kfree(stolen);
 		return NULL;
 	}
@@ -545,7 +573,7 @@
 err_vma:
 	i915_gem_vma_destroy(vma);
 err_out:
-	drm_mm_remove_node(stolen);
+	i915_gem_stolen_remove_node(dev_priv, stolen);
 	kfree(stolen);
 	drm_gem_object_unreference(&obj->base);
 	return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d19c9db..8a6717c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -31,201 +31,32 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled.  However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y.  So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip --  Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics.  This
- * is called "Channel XOR Randomization" in the MCH documentation.  The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
- *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
- *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
- *
- * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
- *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
 /**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
+ * DOC: buffer object tiling
+ *
+ * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
+ * declare fence register requirements.
+ *
+ * In principle GEM doesn't care at all about the internal data layout of an
+ * object, and hence it also doesn't care about tiling or swizzling. There's two
+ * exceptions:
+ *
+ * - For X and Y tiling the hardware provides detilers for CPU access, so called
+ *   fences. Since there's only a limited amount of them the kernel must manage
+ *   these, and therefore userspace must tell the kernel the object tiling if it
+ *   wants to use fences for detiling.
+ * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which
+ *   depends upon the physical page frame number. When swapping such objects the
+ *   page frame number might change and the kernel must be able to fix this up
+ *   and hence now the tiling. Note that on a subset of platforms with
+ *   asymmetric memory channel population the swizzling pattern changes in an
+ *   unknown way, and for those the kernel simply forbids swapping completely.
+ *
+ * Since neither of this applies for new tiling layouts on modern platforms like
+ * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled.
+ * Anything else can be handled in userspace entirely without the kernel's
+ * invovlement.
  */
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-
-	if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
-		/*
-		 * On BDW+, swizzling is not used. We leave the CPU memory
-		 * controller in charge of optimizing memory accesses without
-		 * the extra address manipulation GPU side.
-		 *
-		 * VLV and CHV don't have GPU swizzling.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (INTEL_INFO(dev)->gen >= 6) {
-		if (dev_priv->preserve_bios_swizzle) {
-			if (I915_READ(DISP_ARB_CTL) &
-			    DISP_TILE_SURFACE_SWIZZLING) {
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else {
-				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			}
-		} else {
-			uint32_t dimm_c0, dimm_c1;
-			dimm_c0 = I915_READ(MAD_DIMM_C0);
-			dimm_c1 = I915_READ(MAD_DIMM_C1);
-			dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-			dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
-			/* Enable swizzling when the channels are populated
-			 * with identically sized dimms. We don't need to check
-			 * the 3rd channel because no cpu with gpu attached
-			 * ships in that configuration. Also, swizzling only
-			 * makes sense for 2 channels anyway. */
-			if (dimm_c0 == dimm_c1) {
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else {
-				swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-				swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			}
-		}
-	} else if (IS_GEN5(dev)) {
-		/* On Ironlake whatever DRAM config, GPU always do
-		 * same swizzling setup.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-		swizzle_y = I915_BIT_6_SWIZZLE_9;
-	} else if (IS_GEN2(dev)) {
-		/* As far as we know, the 865 doesn't have these bit 6
-		 * swizzling issues.
-		 */
-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
-		uint32_t dcc;
-
-		/* On 9xx chipsets, channel interleave by the CPU is
-		 * determined by DCC.  For single-channel, neither the CPU
-		 * nor the GPU do swizzling.  For dual channel interleaved,
-		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
-		 * 9 for Y tiled.  The CPU's interleave is independent, and
-		 * can be based on either bit 11 (haven't seen this yet) or
-		 * bit 17 (common).
-		 */
-		dcc = I915_READ(DCC);
-		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
-		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
-		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-			break;
-		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
-			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
-				/* This is the base swizzling by the GPU for
-				 * tiled buffers.
-				 */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-				swizzle_y = I915_BIT_6_SWIZZLE_9;
-			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
-				/* Bit 11 swizzling by the CPU in addition. */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
-				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
-			} else {
-				/* Bit 17 swizzling by the CPU in addition. */
-				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
-				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
-			}
-			break;
-		}
-
-		/* check for L-shaped memory aka modified enhanced addressing */
-		if (IS_GEN4(dev)) {
-			uint32_t ddc2 = I915_READ(DCC2);
-
-			if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
-				dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
-		}
-
-		if (dcc == 0xffffffff) {
-			DRM_ERROR("Couldn't read from MCHBAR.  "
-				  "Disabling tiling.\n");
-			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-		}
-	} else {
-		/* The 965, G33, and newer, have a very flexible memory
-		 * configuration.  It will enable dual-channel mode
-		 * (interleaving) on as much memory as it can, and the GPU
-		 * will additionally sometimes enable different bit 6
-		 * swizzling for tiled objects from the CPU.
-		 *
-		 * Here's what I found on the G965:
-		 *    slot fill         memory size  swizzling
-		 * 0A   0B   1A   1B    1-ch   2-ch
-		 * 512  0    0    0     512    0     O
-		 * 512  0    512  0     16     1008  X
-		 * 512  0    0    512   16     1008  X
-		 * 0    512  0    512   16     1008  X
-		 * 1024 1024 1024 0     2048   1024  O
-		 *
-		 * We could probably detect this based on either the DRB
-		 * matching, which was the case for the swizzling required in
-		 * the table above, or from the 1-ch value being less than
-		 * the minimum size of a rank.
-		 */
-		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
-			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
-			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
-		} else {
-			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
-			swizzle_y = I915_BIT_6_SWIZZLE_9;
-		}
-	}
-
-	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
-	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
 
 /* Check pitch constriants for all chips & tiling formats */
 static bool
@@ -313,8 +144,18 @@
 }
 
 /**
+ * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Sets the tiling mode of an object, returning the required swizzling of
  * bit 6 of addresses in the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
@@ -432,7 +273,17 @@
 }
 
 /**
+ * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * @dev: DRM device
+ * @data: data pointer for the ioctl
+ * @file: DRM file for the ioctl call
+ *
  * Returns the current tiling mode and required bit 6 swizzling for the object.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -478,75 +329,3 @@
 
 	return 0;
 }
-
-/**
- * Swap every 64 bytes of this page around, to account for it having a new
- * bit 17 of its physical address and therefore being interpreted differently
- * by the GPU.
- */
-static void
-i915_gem_swizzle_page(struct page *page)
-{
-	char temp[64];
-	char *vaddr;
-	int i;
-
-	vaddr = kmap(page);
-
-	for (i = 0; i < PAGE_SIZE; i += 128) {
-		memcpy(temp, &vaddr[i], 64);
-		memcpy(&vaddr[i], &vaddr[i + 64], 64);
-		memcpy(&vaddr[i + 64], temp, 64);
-	}
-
-	kunmap(page);
-}
-
-void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	int i;
-
-	if (obj->bit_17 == NULL)
-		return;
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-		struct page *page = sg_page_iter_page(&sg_iter);
-		char new_bit_17 = page_to_phys(page) >> 17;
-		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj->bit_17) != 0)) {
-			i915_gem_swizzle_page(page);
-			set_page_dirty(page);
-		}
-		i++;
-	}
-}
-
-void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
-{
-	struct sg_page_iter sg_iter;
-	int page_count = obj->base.size >> PAGE_SHIFT;
-	int i;
-
-	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
-				      sizeof(long), GFP_KERNEL);
-		if (obj->bit_17 == NULL) {
-			DRM_ERROR("Failed to allocate memory for bit 17 "
-				  "record\n");
-			return;
-		}
-	}
-
-	i = 0;
-	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
-			__set_bit(i, obj->bit_17);
-		else
-			__clear_bit(i, obj->bit_17);
-		i++;
-	}
-}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6f42569..41d0739 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -369,6 +369,7 @@
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 	err_printf(m, "EIR: 0x%08x\n", error->eir);
 	err_printf(m, "IER: 0x%08x\n", error->ier);
 	if (INTEL_INFO(dev)->gen >= 8) {
@@ -1266,6 +1267,10 @@
 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error)
 {
+	error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+	error->iommu = intel_iommu_gfx_mapped;
+#endif
 	error->reset_count = i915_reset_count(&dev_priv->gpu_error);
 	error->suspend_count = dev_priv->suspend_count;
 }
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
new file mode 100644
index 0000000..ccdc6c8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_GUC_REG_H_
+#define _I915_GUC_REG_H_
+
+/* Definitions of GuC H/W registers, bits, etc */
+
+#define GUC_STATUS			0xc000
+#define   GS_BOOTROM_SHIFT		1
+#define   GS_BOOTROM_MASK		  (0x7F << GS_BOOTROM_SHIFT)
+#define   GS_BOOTROM_RSA_FAILED		  (0x50 << GS_BOOTROM_SHIFT)
+#define   GS_UKERNEL_SHIFT		8
+#define   GS_UKERNEL_MASK		  (0xFF << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_LAPIC_DONE		  (0x30 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_DPC_ERROR		  (0x60 << GS_UKERNEL_SHIFT)
+#define   GS_UKERNEL_READY		  (0xF0 << GS_UKERNEL_SHIFT)
+#define   GS_MIA_SHIFT			16
+#define   GS_MIA_MASK			  (0x07 << GS_MIA_SHIFT)
+
+#define GUC_WOPCM_SIZE			0xc050
+#define   GUC_WOPCM_SIZE_VALUE  	  (0x80 << 12)	/* 512KB */
+#define GUC_WOPCM_OFFSET		0x80000		/* 512KB */
+
+#define SOFT_SCRATCH(n)			(0xc180 + ((n) * 4))
+
+#define UOS_RSA_SCRATCH_0		0xc200
+#define DMA_ADDR_0_LOW			0xc300
+#define DMA_ADDR_0_HIGH			0xc304
+#define DMA_ADDR_1_LOW			0xc308
+#define DMA_ADDR_1_HIGH			0xc30c
+#define   DMA_ADDRESS_SPACE_WOPCM	  (7 << 16)
+#define   DMA_ADDRESS_SPACE_GTT		  (8 << 16)
+#define DMA_COPY_SIZE			0xc310
+#define DMA_CTRL			0xc314
+#define   UOS_MOVE			  (1<<4)
+#define   START_DMA			  (1<<0)
+#define DMA_GUC_WOPCM_OFFSET		0xc340
+
+#define GEN8_GT_PM_CONFIG		0x138140
+#define GEN9_GT_PM_CONFIG		0x13816c
+#define   GEN8_GT_DOORBELL_ENABLE	  (1<<0)
+
+#define GEN8_GTCR			0x4274
+#define   GEN8_GTCR_INVALIDATE		  (1<<0)
+
+#define GUC_ARAT_C6DIS			0xA178
+
+#define GUC_SHIM_CONTROL		0xc064
+#define   GUC_DISABLE_SRAM_INIT_TO_ZEROES	(1<<0)
+#define   GUC_ENABLE_READ_CACHE_LOGIC		(1<<1)
+#define   GUC_ENABLE_MIA_CACHING		(1<<2)
+#define   GUC_GEN10_MSGCH_ENABLE		(1<<4)
+#define   GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	(1<<9)
+#define   GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA	(1<<10)
+#define   GUC_ENABLE_MIA_CLOCK_GATING		(1<<15)
+#define   GUC_GEN10_SHIM_WC_ENABLE		(1<<21)
+
+#define GUC_SHIM_CONTROL_VALUE	(GUC_DISABLE_SRAM_INIT_TO_ZEROES	| \
+				 GUC_ENABLE_READ_CACHE_LOGIC		| \
+				 GUC_ENABLE_MIA_CACHING			| \
+				 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA	| \
+				 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+
+#define HOST2GUC_INTERRUPT		0xc4c8
+#define   HOST2GUC_TRIGGER		  (1<<0)
+
+#define DRBMISC1			0x1984
+#define   DOORBELL_ENABLE		  (1<<0)
+
+#define GEN8_DRBREGL(x)			(0x1000 + (x) * 8)
+#define   GEN8_DRB_VALID		  (1<<0)
+#define GEN8_DRBREGU(x)			(GEN8_DRBREGL(x) + 4)
+
+#define DE_GUCRMR			0x44054
+
+#define GUC_BCS_RCS_IER			0xC550
+#define GUC_VCS2_VCS1_IER		0xC554
+#define GUC_WD_VECS_IER			0xC558
+#define GUC_PM_P24C_IER			0xC55C
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 23aa04c..97f3a56 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -35,107 +35,20 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-typedef struct _drm_i915_batchbuffer32 {
-	int start;		/* agp offset */
-	int used;		/* nr bytes in use */
-	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
-	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
-	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	u32 cliprects;		/* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
-				   unsigned long arg)
-{
-	drm_i915_batchbuffer32_t batchbuffer32;
-	drm_i915_batchbuffer_t __user *batchbuffer;
-
-	if (copy_from_user
-	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
-		return -EFAULT;
-
-	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
-	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
-	    || __put_user(batchbuffer32.start, &batchbuffer->start)
-	    || __put_user(batchbuffer32.used, &batchbuffer->used)
-	    || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
-	    || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
-	    || __put_user(batchbuffer32.num_cliprects,
-			  &batchbuffer->num_cliprects)
-	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
-			  &batchbuffer->cliprects))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
-			 (unsigned long)batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
-	u32 buf;		/* pointer to userspace command buffer */
-	int sz;			/* nr bytes in buf */
-	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
-	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
-	int num_cliprects;	/* mulitpass with multiple cliprects? */
-	u32 cliprects;		/* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
-				 unsigned long arg)
-{
-	drm_i915_cmdbuffer32_t cmdbuffer32;
-	drm_i915_cmdbuffer_t __user *cmdbuffer;
-
-	if (copy_from_user
-	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
-		return -EFAULT;
-
-	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
-	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
-	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
-			  &cmdbuffer->buf)
-	    || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
-	    || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
-	    || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
-	    || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
-	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
-			  &cmdbuffer->cliprects))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
-			 (unsigned long)cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
-	u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
-				unsigned long arg)
-{
-	drm_i915_irq_emit32_t req32;
-	drm_i915_irq_emit_t __user *request;
-
-	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-		return -EFAULT;
-
-	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
-			  &request->irq_seq))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
-			 (unsigned long)request);
-}
-typedef struct drm_i915_getparam32 {
-	int param;
+struct drm_i915_getparam32 {
+	s32 param;
+	/*
+	 * We screwed up the generic ioctl struct here and used a variable-sized
+	 * pointer. Use u32 in the compat struct to match the 32bit pointer
+	 * userspace expects.
+	 */
 	u32 value;
-} drm_i915_getparam32_t;
+};
 
 static int compat_i915_getparam(struct file *file, unsigned int cmd,
 				unsigned long arg)
 {
-	drm_i915_getparam32_t req32;
+	struct drm_i915_getparam32 req32;
 	drm_i915_getparam_t __user *request;
 
 	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
@@ -152,41 +65,8 @@
 			 (unsigned long)request);
 }
 
-typedef struct drm_i915_mem_alloc32 {
-	int region;
-	int alignment;
-	int size;
-	u32 region_offset;	/* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	drm_i915_mem_alloc32_t req32;
-	drm_i915_mem_alloc_t __user *request;
-
-	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
-		return -EFAULT;
-
-	request = compat_alloc_user_space(sizeof(*request));
-	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
-	    || __put_user(req32.region, &request->region)
-	    || __put_user(req32.alignment, &request->alignment)
-	    || __put_user(req32.size, &request->size)
-	    || __put_user((void __user *)(unsigned long)req32.region_offset,
-			  &request->region_offset))
-		return -EFAULT;
-
-	return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
-			 (unsigned long)request);
-}
-
 static drm_ioctl_compat_t *i915_compat_ioctls[] = {
-	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
-	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
 	[DRM_I915_GETPARAM] = compat_i915_getparam,
-	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
-	[DRM_I915_ALLOC] = compat_i915_alloc
 };
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 984e2fe..b5fb143 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -61,6 +61,13 @@
 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
 };
 
+static const u32 hpd_spt[HPD_NUM_PINS] = {
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+};
+
 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
@@ -564,8 +571,7 @@
 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 	struct intel_crtc *intel_crtc =
 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-	const struct drm_display_mode *mode =
-		&intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 
 	htotal = mode->crtc_htotal;
 	hsync_start = mode->crtc_hsync_start;
@@ -620,7 +626,7 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &crtc->base.hwmode;
 	enum pipe pipe = crtc->pipe;
 	int position, vtotal;
 
@@ -647,14 +653,14 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 	int position;
 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
 	bool in_vbl = true;
 	int ret = 0;
 	unsigned long irqflags;
 
-	if (!intel_crtc->active) {
+	if (WARN_ON(!mode->crtc_clock)) {
 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
 				 "pipe %c\n", pipe_name(pipe));
 		return 0;
@@ -796,7 +802,7 @@
 		return -EINVAL;
 	}
 
-	if (!crtc->state->enable) {
+	if (!crtc->hwmode.crtc_clock) {
 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
 		return -EBUSY;
 	}
@@ -805,151 +811,7 @@
 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 						     vblank_time, flags,
 						     crtc,
-						     &to_intel_crtc(crtc)->config->base.adjusted_mode);
-}
-
-static bool intel_hpd_irq_event(struct drm_device *dev,
-				struct drm_connector *connector)
-{
-	enum drm_connector_status old_status;
-
-	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-	old_status = connector->status;
-
-	connector->status = connector->funcs->detect(connector, false);
-	if (old_status == connector->status)
-		return false;
-
-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-		      connector->base.id,
-		      connector->name,
-		      drm_get_connector_status_name(old_status),
-		      drm_get_connector_status_name(connector->status));
-
-	return true;
-}
-
-static void i915_digport_work_func(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, dig_port_work);
-	u32 long_port_mask, short_port_mask;
-	struct intel_digital_port *intel_dig_port;
-	int i;
-	u32 old_bits = 0;
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	long_port_mask = dev_priv->long_hpd_port_mask;
-	dev_priv->long_hpd_port_mask = 0;
-	short_port_mask = dev_priv->short_hpd_port_mask;
-	dev_priv->short_hpd_port_mask = 0;
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	for (i = 0; i < I915_MAX_PORTS; i++) {
-		bool valid = false;
-		bool long_hpd = false;
-		intel_dig_port = dev_priv->hpd_irq_port[i];
-		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
-			continue;
-
-		if (long_port_mask & (1 << i))  {
-			valid = true;
-			long_hpd = true;
-		} else if (short_port_mask & (1 << i))
-			valid = true;
-
-		if (valid) {
-			enum irqreturn ret;
-
-			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
-			if (ret == IRQ_NONE) {
-				/* fall back to old school hpd */
-				old_bits |= (1 << intel_dig_port->base.hpd_pin);
-			}
-		}
-	}
-
-	if (old_bits) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		dev_priv->hpd_event_bits |= old_bits;
-		spin_unlock_irq(&dev_priv->irq_lock);
-		schedule_work(&dev_priv->hotplug_work);
-	}
-}
-
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
-
-static void i915_hotplug_work_func(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, struct drm_i915_private, hotplug_work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct intel_connector *intel_connector;
-	struct intel_encoder *intel_encoder;
-	struct drm_connector *connector;
-	bool hpd_disabled = false;
-	bool changed = false;
-	u32 hpd_event_bits;
-
-	mutex_lock(&mode_config->mutex);
-	DRM_DEBUG_KMS("running encoder hotplug functions\n");
-
-	spin_lock_irq(&dev_priv->irq_lock);
-
-	hpd_event_bits = dev_priv->hpd_event_bits;
-	dev_priv->hpd_event_bits = 0;
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
-			continue;
-		intel_encoder = intel_connector->encoder;
-		if (intel_encoder->hpd_pin > HPD_NONE &&
-		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
-		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
-			DRM_INFO("HPD interrupt storm detected on connector %s: "
-				 "switching from hotplug detection to polling\n",
-				connector->name);
-			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
-			connector->polled = DRM_CONNECTOR_POLL_CONNECT
-				| DRM_CONNECTOR_POLL_DISCONNECT;
-			hpd_disabled = true;
-		}
-		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
-				      connector->name, intel_encoder->hpd_pin);
-		}
-	}
-	 /* if there were no outputs to poll, poll was disabled,
-	  * therefore make sure it's enabled when disabling HPD on
-	  * some connectors */
-	if (hpd_disabled) {
-		drm_kms_helper_poll_enable(dev);
-		mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
-				 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
-	}
-
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		intel_connector = to_intel_connector(connector);
-		if (!intel_connector->encoder)
-			continue;
-		intel_encoder = intel_connector->encoder;
-		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
-			if (intel_encoder->hot_plug)
-				intel_encoder->hot_plug(intel_encoder);
-			if (intel_hpd_irq_event(dev, connector))
-				changed = true;
-		}
-	}
-	mutex_unlock(&mode_config->mutex);
-
-	if (changed)
-		drm_kms_helper_hotplug_event(dev);
+						     &crtc->hwmode);
 }
 
 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
@@ -1372,165 +1234,80 @@
 	return ret;
 }
 
-#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
-
-static int pch_port_to_hotplug_shift(enum port port)
+static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
 	case PORT_A:
-	case PORT_E:
-	default:
-		return -1;
+		return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
 	case PORT_B:
-		return 0;
+		return val & PORTB_HOTPLUG_LONG_DETECT;
 	case PORT_C:
-		return 8;
+		return val & PORTC_HOTPLUG_LONG_DETECT;
 	case PORT_D:
-		return 16;
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	default:
+		return false;
 	}
 }
 
-static int i915_port_to_hotplug_shift(enum port port)
+static bool pch_port_hotplug_long_detect(enum port port, u32 val)
 {
 	switch (port) {
-	case PORT_A:
-	case PORT_E:
-	default:
-		return -1;
 	case PORT_B:
-		return 17;
+		return val & PORTB_HOTPLUG_LONG_DETECT;
 	case PORT_C:
-		return 19;
+		return val & PORTC_HOTPLUG_LONG_DETECT;
 	case PORT_D:
-		return 21;
-	}
-}
-
-static enum port get_port_from_pin(enum hpd_pin pin)
-{
-	switch (pin) {
-	case HPD_PORT_B:
-		return PORT_B;
-	case HPD_PORT_C:
-		return PORT_C;
-	case HPD_PORT_D:
-		return PORT_D;
+		return val & PORTD_HOTPLUG_LONG_DETECT;
+	case PORT_E:
+		return val & PORTE_HOTPLUG_LONG_DETECT;
 	default:
-		return PORT_A; /* no hpd */
+		return false;
 	}
 }
 
-static void intel_hpd_irq_handler(struct drm_device *dev,
-				  u32 hotplug_trigger,
-				  u32 dig_hotplug_reg,
-				  const u32 hpd[HPD_NUM_PINS])
+static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
+	switch (port) {
+	case PORT_B:
+		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
+	case PORT_C:
+		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
+	case PORT_D:
+		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
+	default:
+		return false;
+	}
+}
+
+/* Get a bit mask of pins that have triggered, and which ones may be long. */
+static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
+			     u32 hotplug_trigger, u32 dig_hotplug_reg,
+			     const u32 hpd[HPD_NUM_PINS],
+			     bool long_pulse_detect(enum port port, u32 val))
+{
 	enum port port;
-	bool storm_detected = false;
-	bool queue_dig = false, queue_hp = false;
-	u32 dig_shift;
-	u32 dig_port_mask = 0;
+	int i;
 
-	if (!hotplug_trigger)
-		return;
+	*pin_mask = 0;
+	*long_mask = 0;
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
-			 hotplug_trigger, dig_hotplug_reg);
-
-	spin_lock(&dev_priv->irq_lock);
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		if (!(hpd[i] & hotplug_trigger))
+	for_each_hpd_pin(i) {
+		if ((hpd[i] & hotplug_trigger) == 0)
 			continue;
 
-		port = get_port_from_pin(i);
-		if (port && dev_priv->hpd_irq_port[port]) {
-			bool long_hpd;
+		*pin_mask |= BIT(i);
 
-			if (!HAS_GMCH_DISPLAY(dev_priv)) {
-				dig_shift = pch_port_to_hotplug_shift(port);
-				long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-			} else {
-				dig_shift = i915_port_to_hotplug_shift(port);
-				long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
-			}
+		if (!intel_hpd_pin_to_port(i, &port))
+			continue;
 
-			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
-					 port_name(port),
-					 long_hpd ? "long" : "short");
-			/* for long HPD pulses we want to have the digital queue happen,
-			   but we still want HPD storm detection to function. */
-			if (long_hpd) {
-				dev_priv->long_hpd_port_mask |= (1 << port);
-				dig_port_mask |= hpd[i];
-			} else {
-				/* for short HPD just trigger the digital queue */
-				dev_priv->short_hpd_port_mask |= (1 << port);
-				hotplug_trigger &= ~hpd[i];
-			}
-			queue_dig = true;
-		}
+		if (long_pulse_detect(port, dig_hotplug_reg))
+			*long_mask |= BIT(i);
 	}
 
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		if (hpd[i] & hotplug_trigger &&
-		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
-			/*
-			 * On GMCH platforms the interrupt mask bits only
-			 * prevent irq generation, not the setting of the
-			 * hotplug bits itself. So only WARN about unexpected
-			 * interrupts on saner platforms.
-			 */
-			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
-				  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-				  hotplug_trigger, i, hpd[i]);
+	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
+			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
 
-			continue;
-		}
-
-		if (!(hpd[i] & hotplug_trigger) ||
-		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
-			continue;
-
-		if (!(dig_port_mask & hpd[i])) {
-			dev_priv->hpd_event_bits |= (1 << i);
-			queue_hp = true;
-		}
-
-		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
-				   dev_priv->hpd_stats[i].hpd_last_jiffies
-				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
-			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
-			dev_priv->hpd_stats[i].hpd_cnt = 0;
-			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
-		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
-			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
-			dev_priv->hpd_event_bits &= ~(1 << i);
-			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
-			storm_detected = true;
-		} else {
-			dev_priv->hpd_stats[i].hpd_cnt++;
-			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
-				      dev_priv->hpd_stats[i].hpd_cnt);
-		}
-	}
-
-	if (storm_detected)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock(&dev_priv->irq_lock);
-
-	/*
-	 * Our hotplug handler can grab modeset locks (by calling down into the
-	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
-	 * queue for otherwise the flush_work in the pageflip code will
-	 * deadlock.
-	 */
-	if (queue_dig)
-		queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
-	if (queue_hp)
-		schedule_work(&dev_priv->hotplug_work);
 }
 
 static void gmbus_irq_handler(struct drm_device *dev)
@@ -1755,28 +1532,35 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+	u32 pin_mask, long_mask;
 
-	if (hotplug_status) {
-		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
-		/*
-		 * Make sure hotplug status is cleared before we clear IIR, or else we
-		 * may miss hotplug events.
-		 */
-		POSTING_READ(PORT_HOTPLUG_STAT);
+	if (!hotplug_status)
+		return;
 
-		if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
-			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+	/*
+	 * Make sure hotplug status is cleared before we clear IIR, or else we
+	 * may miss hotplug events.
+	 */
+	POSTING_READ(PORT_HOTPLUG_STAT);
 
-			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
-		} else {
-			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-			intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
-		}
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   hotplug_trigger, hpd_status_g4x,
+				   i9xx_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
 
-		if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
-		    hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
 			dp_aux_irq_handler(dev);
+	} else {
+		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   hotplug_trigger, hpd_status_g4x,
+				   i9xx_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
 	}
 }
 
@@ -1875,12 +1659,18 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
-	u32 dig_hotplug_reg;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg, pin_mask, long_mask;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+				   dig_hotplug_reg, hpd_ibx,
+				   pch_port_hotplug_long_detect);
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+	}
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1971,13 +1761,38 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int pipe;
-	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
-	u32 dig_hotplug_reg;
+	u32 hotplug_trigger;
 
-	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+	if (HAS_PCH_SPT(dev))
+		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
+	else
+		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-	intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
+	if (hotplug_trigger) {
+		u32 dig_hotplug_reg, pin_mask, long_mask;
+
+		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+		if (HAS_PCH_SPT(dev)) {
+			intel_get_hpd_pins(&pin_mask, &long_mask,
+					   hotplug_trigger,
+					   dig_hotplug_reg, hpd_spt,
+					   pch_port_hotplug_long_detect);
+
+			/* detect PORTE HP event */
+			dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+			if (pch_port_hotplug_long_detect(PORT_E,
+							 dig_hotplug_reg))
+				long_mask |= 1 << HPD_PORT_E;
+		} else
+			intel_get_hpd_pins(&pin_mask, &long_mask,
+					   hotplug_trigger,
+					   dig_hotplug_reg, hpd_cpt,
+					   pch_port_hotplug_long_detect);
+
+		intel_hpd_irq_handler(dev, pin_mask, long_mask);
+	}
 
 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2176,8 +1991,8 @@
 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t hp_control;
-	uint32_t hp_trigger;
+	u32 hp_control, hp_trigger;
+	u32 pin_mask, long_mask;
 
 	/* Get the status */
 	hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
@@ -2189,20 +2004,12 @@
 		return;
 	}
 
-	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-		hp_control & BXT_HOTPLUG_CTL_MASK);
-
-	/* Check for HPD storm and schedule bottom half */
-	intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
-
-	/*
-	 * FIXME: Save the hot plug status for bottom half before
-	 * clearing the sticky status bits, else the status will be
-	 * lost.
-	 */
-
 	/* Clear sticky bits in hpd status */
 	I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+
+	intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
+			   hpd_bxt, bxt_port_hotplug_long_detect);
+	intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
@@ -2446,7 +2253,7 @@
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
-			atomic_set_mask(I915_WEDGED, &error->reset_counter);
+			atomic_or(I915_WEDGED, &error->reset_counter);
 		}
 
 		/*
@@ -2574,7 +2381,7 @@
 	i915_report_and_clear_eir(dev);
 
 	if (wedged) {
-		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
 				&dev_priv->gpu_error.reset_counter);
 
 		/*
@@ -3203,12 +3010,17 @@
 	if (HAS_PCH_IBX(dev)) {
 		hotplug_irqs = SDE_HOTPLUG_MASK;
 		for_each_intel_encoder(dev, intel_encoder)
-			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+	} else if (HAS_PCH_SPT(dev)) {
+		hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+		for_each_intel_encoder(dev, intel_encoder)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
+				enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
 	} else {
 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
 		for_each_intel_encoder(dev, intel_encoder)
-			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
 	}
 
@@ -3226,6 +3038,13 @@
 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+	/* enable SPT PORTE hot plug */
+	if (HAS_PCH_SPT(dev)) {
+		hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+		hotplug |= PORTE_HOTPLUG_ENABLE;
+		I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+	}
 }
 
 static void bxt_hpd_irq_setup(struct drm_device *dev)
@@ -3237,7 +3056,7 @@
 
 	/* Now, enable HPD */
 	for_each_intel_encoder(dev, intel_encoder) {
-		if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
 				== HPD_ENABLED)
 			hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
 	}
@@ -4130,7 +3949,7 @@
 	/* Note HDMI and DP share hotplug bits */
 	/* enable bits are the same for all generations */
 	for_each_intel_encoder(dev, intel_encoder)
-		if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
 			hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
 	/* Programming the CRT detection parameters tends
 	   to generate a spurious hotplug event about three
@@ -4270,46 +4089,6 @@
 	I915_WRITE(IIR, I915_READ(IIR));
 }
 
-static void intel_hpd_irq_reenable_work(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv),
-			     hotplug_reenable_work.work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	int i;
-
-	intel_runtime_pm_get(dev_priv);
-
-	spin_lock_irq(&dev_priv->irq_lock);
-	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
-		struct drm_connector *connector;
-
-		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
-			continue;
-
-		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-
-		list_for_each_entry(connector, &mode_config->connector_list, head) {
-			struct intel_connector *intel_connector = to_intel_connector(connector);
-
-			if (intel_connector->encoder->hpd_pin == i) {
-				if (connector->polled != intel_connector->polled)
-					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
-							 connector->name);
-				connector->polled = intel_connector->polled;
-				if (!connector->polled)
-					connector->polled = DRM_CONNECTOR_POLL_HPD;
-			}
-		}
-	}
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
-
-	intel_runtime_pm_put(dev_priv);
-}
-
 /**
  * intel_irq_init - initializes irq support
  * @dev_priv: i915 device instance
@@ -4321,8 +4100,8 @@
 {
 	struct drm_device *dev = dev_priv->dev;
 
-	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
-	INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
+	intel_hpd_init_work(dev_priv);
+
 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
@@ -4335,8 +4114,6 @@
 
 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
 			  i915_hangcheck_elapsed);
-	INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
-			  intel_hpd_irq_reenable_work);
 
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
@@ -4422,46 +4199,6 @@
 }
 
 /**
- * intel_hpd_init - initializes and enables hpd support
- * @dev_priv: i915 device instance
- *
- * This function enables the hotplug support. It requires that interrupts have
- * already been enabled with intel_irq_init_hw(). From this point on hotplug and
- * poll request can run concurrently to other code, so locking rules must be
- * obeyed.
- *
- * This is a separate step from interrupt enabling to simplify the locking rules
- * in the driver load and resume code.
- */
-void intel_hpd_init(struct drm_i915_private *dev_priv)
-{
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_connector *connector;
-	int i;
-
-	for (i = 1; i < HPD_NUM_PINS; i++) {
-		dev_priv->hpd_stats[i].hpd_cnt = 0;
-		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
-	}
-	list_for_each_entry(connector, &mode_config->connector_list, head) {
-		struct intel_connector *intel_connector = to_intel_connector(connector);
-		connector->polled = intel_connector->polled;
-		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
-		if (intel_connector->mst_port)
-			connector->polled = DRM_CONNECTOR_POLL_HPD;
-	}
-
-	/* Interrupt setup is already guaranteed to be single-threaded, this is
-	 * just to make the assert_spin_locked checks happy. */
-	spin_lock_irq(&dev_priv->irq_lock);
-	if (dev_priv->display.hpd_irq_setup)
-		dev_priv->display.hpd_irq_setup(dev);
-	spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-/**
  * intel_irq_install - enables the hardware interrupt
  * @dev_priv: i915 device instance
  *
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ac5a1b..5ae4b0a 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -28,7 +28,6 @@
 	.modeset = -1,
 	.panel_ignore_lid = 1,
 	.semaphores = -1,
-	.lvds_downclock = 0,
 	.lvds_channel_mode = 0,
 	.panel_use_ssc = -1,
 	.vbt_sdvo_panel_type = -1,
@@ -52,13 +51,14 @@
 	.use_mmio_flip = 0,
 	.mmio_debug = 0,
 	.verbose_state_checks = 1,
-	.nuclear_pageflip = 0,
 	.edp_vswing = 0,
+	.enable_guc_submission = false,
+	.guc_log_level = -1,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
 MODULE_PARM_DESC(modeset,
-	"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+	"Use kernel modesetting [KMS] (0=disable, "
 	"1=on, -1=force vga console preference [default])");
 
 module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
@@ -84,11 +84,6 @@
 	"Enable frame buffer compression for power savings "
 	"(default: -1 (use per-chip default))");
 
-module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
-MODULE_PARM_DESC(lvds_downclock,
-	"Use panel (LVDS/eDP) downclocking for power savings "
-	"(default: false)");
-
 module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
 MODULE_PARM_DESC(lvds_channel_mode,
 	 "Specify LVDS channel mode "
@@ -104,7 +99,7 @@
 	"Override/Ignore selection of SDVO panel mode in the VBT "
 	"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
-module_param_named(reset, i915.reset, bool, 0600);
+module_param_named_unsafe(reset, i915.reset, bool, 0600);
 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
 module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
@@ -182,13 +177,16 @@
 MODULE_PARM_DESC(verbose_state_checks,
 	"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
 
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
-MODULE_PARM_DESC(nuclear_pageflip,
-		 "Force atomic modeset functionality; only planes work for now (default: false).");
-
 /* WA to get away with the default setting in VBT for early platforms.Will be removed */
 module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
 MODULE_PARM_DESC(edp_vswing,
 		 "Ignore/Override vswing pre-emph table selection from VBT "
 		 "(0=use value from vbt [default], 1=low power swing(200mV),"
 		 "2=default swing(400mV))");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
+MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+
+module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
+MODULE_PARM_DESC(guc_log_level,
+	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2030f60..83a0888 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -50,12 +50,17 @@
 
 /* PCI config space */
 
-#define HPLLCC	0xc0 /* 855 only */
-#define   GC_CLOCK_CONTROL_MASK		(0xf << 0)
+#define HPLLCC	0xc0 /* 85x only */
+#define   GC_CLOCK_CONTROL_MASK		(0x7 << 0)
 #define   GC_CLOCK_133_200		(0 << 0)
 #define   GC_CLOCK_100_200		(1 << 0)
 #define   GC_CLOCK_100_133		(2 << 0)
-#define   GC_CLOCK_166_250		(3 << 0)
+#define   GC_CLOCK_133_266		(3 << 0)
+#define   GC_CLOCK_133_200_2		(4 << 0)
+#define   GC_CLOCK_133_266_2		(5 << 0)
+#define   GC_CLOCK_166_266		(6 << 0)
+#define   GC_CLOCK_166_250		(7 << 0)
+
 #define GCFGC2	0xda
 #define GCFGC	0xf0 /* 915+ only */
 #define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
@@ -155,6 +160,7 @@
 #define GAM_ECOCHK			0x4090
 #define   BDW_DISABLE_HDC_INVALIDATION	(1<<25)
 #define   ECOCHK_SNB_BIT		(1<<10)
+#define   ECOCHK_DIS_TLB		(1<<8)
 #define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
 #define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
 #define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
@@ -172,13 +178,22 @@
 #define GAB_CTL				0x24000
 #define   GAB_CTL_CONT_AFTER_PAGEFAULT	(1<<8)
 
-#define GEN7_BIOS_RESERVED		0x1082C0
-#define GEN7_BIOS_RESERVED_1M		(0 << 5)
-#define GEN7_BIOS_RESERVED_256K		(1 << 5)
-#define GEN8_BIOS_RESERVED_SHIFT       7
-#define GEN7_BIOS_RESERVED_MASK        0x1
-#define GEN8_BIOS_RESERVED_MASK        0x3
-
+#define GEN6_STOLEN_RESERVED		0x1082C0
+#define GEN6_STOLEN_RESERVED_ADDR_MASK	(0xFFF << 20)
+#define GEN7_STOLEN_RESERVED_ADDR_MASK	(0x3FFF << 18)
+#define GEN6_STOLEN_RESERVED_SIZE_MASK	(3 << 4)
+#define GEN6_STOLEN_RESERVED_1M		(0 << 4)
+#define GEN6_STOLEN_RESERVED_512K	(1 << 4)
+#define GEN6_STOLEN_RESERVED_256K	(2 << 4)
+#define GEN6_STOLEN_RESERVED_128K	(3 << 4)
+#define GEN7_STOLEN_RESERVED_SIZE_MASK	(1 << 5)
+#define GEN7_STOLEN_RESERVED_1M		(0 << 5)
+#define GEN7_STOLEN_RESERVED_256K	(1 << 5)
+#define GEN8_STOLEN_RESERVED_SIZE_MASK	(3 << 7)
+#define GEN8_STOLEN_RESERVED_1M		(0 << 7)
+#define GEN8_STOLEN_RESERVED_2M		(1 << 7)
+#define GEN8_STOLEN_RESERVED_4M		(2 << 7)
+#define GEN8_STOLEN_RESERVED_8M		(3 << 7)
 
 /* VGA stuff */
 
@@ -316,6 +331,8 @@
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
 #define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
+#define   HSW_MI_RS_SAVE_STATE_EN       (1<<3)
+#define   HSW_MI_RS_RESTORE_STATE_EN    (1<<2)
 #define MI_SEMAPHORE_SIGNAL	MI_INSTR(0x1b, 0) /* GEN8+ */
 #define   MI_SEMAPHORE_TARGET(engine)	((engine)<<15)
 #define MI_SEMAPHORE_WAIT	MI_INSTR(0x1c, 2) /* GEN8+ */
@@ -347,6 +364,8 @@
 #define   MI_INVALIDATE_BSD		(1<<7)
 #define   MI_FLUSH_DW_USE_GTT		(1<<2)
 #define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
+#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
+#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE		(1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -356,6 +375,7 @@
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_BATCH_BUFFER_START_GEN8	MI_INSTR(0x31, 1)
+#define   MI_BATCH_RESOURCE_STREAMER (1<<10)
 
 #define MI_PREDICATE_SRC0	(0x2400)
 #define MI_PREDICATE_SRC1	(0x2408)
@@ -410,6 +430,7 @@
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
 #define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_FLUSH_L3				(1<<27)
 #define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
 #define   PIPE_CONTROL_MMIO_WRITE			(1<<23)
 #define   PIPE_CONTROL_STORE_DATA_INDEX			(1<<21)
@@ -426,6 +447,7 @@
 #define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
 #define   PIPE_CONTROL_NOTIFY				(1<<8)
 #define   PIPE_CONTROL_FLUSH_ENABLE			(1<<7) /* gen7+ */
+#define   PIPE_CONTROL_DC_FLUSH_ENABLE			(1<<5)
 #define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
 #define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
 #define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
@@ -449,7 +471,6 @@
 #define MI_CLFLUSH              MI_INSTR(0x27, 0)
 #define MI_REPORT_PERF_COUNT    MI_INSTR(0x28, 0)
 #define   MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_MEM    MI_INSTR(0x29, 0)
 #define MI_LOAD_REGISTER_REG    MI_INSTR(0x2A, 0)
 #define MI_RS_STORE_DATA_IMM    MI_INSTR(0x2B, 0)
 #define MI_LOAD_URB_MEM         MI_INSTR(0x2C, 0)
@@ -1163,10 +1184,12 @@
 #define _PORT_PLL_EBB_0_A		0x162034
 #define _PORT_PLL_EBB_0_B		0x6C034
 #define _PORT_PLL_EBB_0_C		0x6C340
-#define   PORT_PLL_P1_MASK		(0x07 << 13)
-#define   PORT_PLL_P1(x)		((x)  << 13)
-#define   PORT_PLL_P2_MASK		(0x1f << 8)
-#define   PORT_PLL_P2(x)		((x)  << 8)
+#define   PORT_PLL_P1_SHIFT		13
+#define   PORT_PLL_P1_MASK		(0x07 << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P1(x)		((x)  << PORT_PLL_P1_SHIFT)
+#define   PORT_PLL_P2_SHIFT		8
+#define   PORT_PLL_P2_MASK		(0x1f << PORT_PLL_P2_SHIFT)
+#define   PORT_PLL_P2(x)		((x)  << PORT_PLL_P2_SHIFT)
 #define BXT_PORT_PLL_EBB_0(port)	_PORT3(port, _PORT_PLL_EBB_0_A, \
 						_PORT_PLL_EBB_0_B,	\
 						_PORT_PLL_EBB_0_C)
@@ -1186,8 +1209,9 @@
 /* PORT_PLL_0_A */
 #define   PORT_PLL_M2_MASK		0xFF
 /* PORT_PLL_1_A */
-#define   PORT_PLL_N_MASK		(0x0F << 8)
-#define   PORT_PLL_N(x)			((x) << 8)
+#define   PORT_PLL_N_SHIFT		8
+#define   PORT_PLL_N_MASK		(0x0F << PORT_PLL_N_SHIFT)
+#define   PORT_PLL_N(x)			((x) << PORT_PLL_N_SHIFT)
 /* PORT_PLL_2_A */
 #define   PORT_PLL_M2_FRAC_MASK		0x3FFFFF
 /* PORT_PLL_3_A */
@@ -1201,9 +1225,11 @@
 /* PORT_PLL_8_A */
 #define   PORT_PLL_TARGET_CNT_MASK	0x3FF
 /* PORT_PLL_9_A */
-#define  PORT_PLL_LOCK_THRESHOLD_MASK	0xe
+#define  PORT_PLL_LOCK_THRESHOLD_SHIFT	1
+#define  PORT_PLL_LOCK_THRESHOLD_MASK	(0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
 /* PORT_PLL_10_A */
 #define  PORT_PLL_DCO_AMP_OVR_EN_H	(1<<27)
+#define  PORT_PLL_DCO_AMP_DEFAULT	15
 #define  PORT_PLL_DCO_AMP_MASK		0x3c00
 #define  PORT_PLL_DCO_AMP(x)		(x<<10)
 #define _PORT_PLL_BASE(port)		_PORT3(port, _PORT_PLL_0_A,	\
@@ -1377,6 +1403,18 @@
 							_PORT_TX_DW14_LN0_C) + \
 					 _BXT_LANE_OFFSET(lane))
 
+/* UAIMI scratch pad register 1 */
+#define UAIMI_SPR1			0x4F074
+/* SKL VccIO mask */
+#define SKL_VCCIO_MASK			0x1
+/* SKL balance leg register */
+#define DISPIO_CR_TX_BMU_CR0		0x6C00C
+/* I_boost values */
+#define BALANCE_LEG_SHIFT(port)		(8+3*(port))
+#define BALANCE_LEG_MASK(port)		(7<<(8+3*(port)))
+/* Balance leg disable bits */
+#define BALANCE_LEG_DISABLE_SHIFT	23
+
 /*
  * Fence registers
  */
@@ -1456,6 +1494,9 @@
 #define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
+#define RING_RESET_CTL(base)	((base)+0xd0)
+#define   RESET_CTL_REQUEST_RESET  (1 << 0)
+#define   RESET_CTL_READY_TO_RESET (1 << 1)
 
 #define HSW_GTT_CACHE_EN	0x4024
 #define   GTT_CACHE_EN_ALL	0xF0007FFF
@@ -1946,6 +1987,9 @@
 #define FBC_FENCE_OFF		0x03218 /* BSpec typo has 321Bh */
 #define FBC_TAG			0x03300
 
+#define FBC_STATUS2		0x43214
+#define  FBC_COMPRESSION_MASK	0x7ff
+
 #define FBC_LL_SIZE		(1536)
 
 /* Framebuffer compression for GM45+ */
@@ -2116,7 +2160,7 @@
 #define   DPLL_DVO_2X_MODE		(1 << 30)
 #define   DPLL_EXT_BUFFER_ENABLE_VLV	(1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
-#define   DPLL_REFA_CLK_ENABLE_VLV	(1 << 29)
+#define   DPLL_REF_CLK_ENABLE_VLV	(1 << 29)
 #define   DPLL_VGA_MODE_DIS		(1 << 28)
 #define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
 #define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
@@ -2130,8 +2174,8 @@
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
 #define   DPLL_LOCK_VLV			(1<<15)
 #define   DPLL_INTEGRATED_CRI_CLK_VLV	(1<<14)
-#define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13)
-#define   DPLL_SSC_REF_CLOCK_CHV	(1<<13)
+#define   DPLL_INTEGRATED_REF_CLK_VLV	(1<<13)
+#define   DPLL_SSC_REF_CLK_CHV		(1<<13)
 #define   DPLL_PORTC_READY_MASK		(0xf << 4)
 #define   DPLL_PORTB_READY_MASK		(0xf)
 
@@ -2488,6 +2532,9 @@
 #define CLKCFG_MEM_800					(3 << 4)
 #define CLKCFG_MEM_MASK					(7 << 4)
 
+#define HPLLVCO                 (MCHBAR_MIRROR_BASE + 0xc38)
+#define HPLLVCO_MOBILE          (MCHBAR_MIRROR_BASE + 0xc0f)
+
 #define TSC1			0x11001
 #define   TSE			(1<<0)
 #define TR1			0x11006
@@ -2718,8 +2765,10 @@
 #define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
 
 #define GEN6_GT_PERF_STATUS	(MCHBAR_MIRROR_BASE_SNB + 0x5948)
+#define BXT_GT_PERF_STATUS      (MCHBAR_MIRROR_BASE_SNB + 0x7070)
 #define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
 #define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define BXT_RP_STATE_CAP        0x138170
 
 #define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
 #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
@@ -2767,7 +2816,8 @@
  * valid. Now, docs explain in dwords what is in the context object. The full
  * size is 70720 bytes, however, the power context and execlist context will
  * never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
  */
 #define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
 /* Same as Haswell, but 72064 bytes now. */
@@ -4398,9 +4448,32 @@
 #define   DSPARB_BSTART_SHIFT	0
 #define   DSPARB_BEND_SHIFT	9 /* on 855 */
 #define   DSPARB_AEND_SHIFT	0
-
+#define   DSPARB_SPRITEA_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEB_SHIFT_VLV	8
+#define   DSPARB_SPRITEB_MASK_VLV	(0xff << 8)
+#define   DSPARB_SPRITEC_SHIFT_VLV	16
+#define   DSPARB_SPRITEC_MASK_VLV	(0xff << 16)
+#define   DSPARB_SPRITED_SHIFT_VLV	24
+#define   DSPARB_SPRITED_MASK_VLV	(0xff << 24)
 #define DSPARB2			(VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */
+#define   DSPARB_SPRITEA_HI_SHIFT_VLV	0
+#define   DSPARB_SPRITEA_HI_MASK_VLV	(0x1 << 0)
+#define   DSPARB_SPRITEB_HI_SHIFT_VLV	4
+#define   DSPARB_SPRITEB_HI_MASK_VLV	(0x1 << 4)
+#define   DSPARB_SPRITEC_HI_SHIFT_VLV	8
+#define   DSPARB_SPRITEC_HI_MASK_VLV	(0x1 << 8)
+#define   DSPARB_SPRITED_HI_SHIFT_VLV	12
+#define   DSPARB_SPRITED_HI_MASK_VLV	(0x1 << 12)
+#define   DSPARB_SPRITEE_HI_SHIFT_VLV	16
+#define   DSPARB_SPRITEE_HI_MASK_VLV	(0x1 << 16)
+#define   DSPARB_SPRITEF_HI_SHIFT_VLV	20
+#define   DSPARB_SPRITEF_HI_MASK_VLV	(0x1 << 20)
 #define DSPARB3			(VLV_DISPLAY_BASE + 0x7006c) /* chv */
+#define   DSPARB_SPRITEE_SHIFT_VLV	0
+#define   DSPARB_SPRITEE_MASK_VLV	(0xff << 0)
+#define   DSPARB_SPRITEF_SHIFT_VLV	8
+#define   DSPARB_SPRITEF_MASK_VLV	(0xff << 8)
 
 /* pnv/gen4/g4x/vlv/chv */
 #define DSPFW1			(dev_priv->info.display_mmio_offset + 0x70034)
@@ -5754,6 +5827,13 @@
 #define HSW_NDE_RSTWRN_OPT	0x46408
 #define  RESET_PCH_HANDSHAKE_ENABLE	(1<<4)
 
+#define SKL_DFSM			0x51000
+#define SKL_DFSM_CDCLK_LIMIT_MASK	(3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675	(0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540	(1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450	(2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5	(3 << 23)
+
 #define FF_SLICE_CS_CHICKEN2			0x20e4
 #define  GEN9_TSG_BARRIER_ACK_DISABLE		(1<<8)
 
@@ -5791,6 +5871,7 @@
 
 #define GEN8_L3SQCREG4				0xb118
 #define  GEN8_LQSC_RO_PERF_DIS			(1<<27)
+#define  GEN8_LQSC_FLUSH_COHERENT_LINES		(1<<21)
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0				0x7300
@@ -5868,6 +5949,7 @@
 #define SDE_AUXC_CPT		(1 << 26)
 #define SDE_AUXB_CPT		(1 << 25)
 #define SDE_AUX_MASK_CPT	(7 << 25)
+#define SDE_PORTE_HOTPLUG_SPT	(1 << 25)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
@@ -5878,6 +5960,10 @@
 				 SDE_PORTD_HOTPLUG_CPT |	\
 				 SDE_PORTC_HOTPLUG_CPT |	\
 				 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_HOTPLUG_MASK_SPT	(SDE_PORTE_HOTPLUG_SPT |	\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT)
 #define SDE_GMBUS_CPT		(1 << 17)
 #define SDE_ERROR_CPT		(1 << 16)
 #define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
@@ -5913,6 +5999,11 @@
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030		/* SHOTPLUG_CTL */
+#define BXT_PORTA_HOTPLUG_ENABLE	(1 << 28)
+#define BXT_PORTA_HOTPLUG_STATUS_MASK	(0x3 << 24)
+#define  BXT_PORTA_HOTPLUG_NO_DETECT	(0 << 24)
+#define  BXT_PORTA_HOTPLUG_SHORT_DETECT	(1 << 24)
+#define  BXT_PORTA_HOTPLUG_LONG_DETECT	(2 << 24)
 #define PORTD_HOTPLUG_ENABLE            (1 << 20)
 #define PORTD_PULSE_DURATION_2ms        (0)
 #define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
@@ -5944,6 +6035,13 @@
 #define  PORTB_HOTPLUG_SHORT_DETECT	(1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT	(2 << 0)
 
+#define PCH_PORT_HOTPLUG2        0xc403C		/* SHOTPLUG_CTL2 */
+#define PORTE_HOTPLUG_ENABLE            (1 << 4)
+#define PORTE_HOTPLUG_STATUS_MASK	(0x3 << 0)
+#define  PORTE_HOTPLUG_NO_DETECT	(0 << 0)
+#define  PORTE_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  PORTE_HOTPLUG_LONG_DETECT	(2 << 0)
+
 #define PCH_GPIOA               0xc5010
 #define PCH_GPIOB               0xc5014
 #define PCH_GPIOC               0xc5018
@@ -6047,6 +6145,9 @@
 #define _VIDEO_DIP_CTL_A         0xe0200
 #define _VIDEO_DIP_DATA_A        0xe0208
 #define _VIDEO_DIP_GCP_A         0xe0210
+#define  GCP_COLOR_INDICATION		(1 << 2)
+#define  GCP_DEFAULT_PHASE_ENABLE	(1 << 1)
+#define  GCP_AV_MUTE			(1 << 0)
 
 #define _VIDEO_DIP_CTL_B         0xe1200
 #define _VIDEO_DIP_DATA_B        0xe1208
@@ -6186,6 +6287,7 @@
 #define _TRANSA_CHICKEN1	 0xf0060
 #define _TRANSB_CHICKEN1	 0xf1060
 #define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE	(1<<10)
 #define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE	(1<<4)
 #define _TRANSA_CHICKEN2	 0xf0064
 #define _TRANSB_CHICKEN2	 0xf1064
@@ -6370,6 +6472,8 @@
 #define PCH_PP_CONTROL		0xc7204
 #define  PANEL_UNLOCK_REGS	(0xabcd << 16)
 #define  PANEL_UNLOCK_MASK	(0xffff << 16)
+#define  BXT_POWER_CYCLE_DELAY_MASK	(0x1f0)
+#define  BXT_POWER_CYCLE_DELAY_SHIFT	4
 #define  EDP_FORCE_VDD		(1 << 3)
 #define  EDP_BLC_ENABLE		(1 << 2)
 #define  PANEL_POWER_RESET	(1 << 1)
@@ -6398,6 +6502,17 @@
 #define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
 #define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
+/* BXT PPS changes - 2nd set of PPS registers */
+#define _BXT_PP_STATUS2 	0xc7300
+#define _BXT_PP_CONTROL2 	0xc7304
+#define _BXT_PP_ON_DELAYS2	0xc7308
+#define _BXT_PP_OFF_DELAYS2	0xc730c
+
+#define BXT_PP_STATUS(n)	((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2)
+#define BXT_PP_CONTROL(n)	((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2)
+#define BXT_PP_ON_DELAYS(n)	((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2)
+#define BXT_PP_OFF_DELAYS(n)	((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2)
+
 #define PCH_DP_B		0xe4100
 #define PCH_DPB_AUX_CH_CTL	0xe4110
 #define PCH_DPB_AUX_CH_DATA1	0xe4114
@@ -6698,6 +6813,7 @@
 #define	  GEN6_PCODE_READ_RC6VIDS		0x5
 #define     GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
 #define     GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define   BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ	0x18
 #define   GEN9_PCODE_READ_MEM_LATENCY		0x6
 #define     GEN9_MEM_LATENCY_LEVEL_MASK		0xFF
 #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
@@ -6756,6 +6872,9 @@
 #define GEN7_MISCCPCTL			(0x9424)
 #define   GEN7_DOP_CLOCK_GATE_ENABLE	(1<<0)
 
+#define GEN8_GARBCNTL                   0xB004
+#define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
+
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
 #define HSW_L3CDERRST11			0xB208 /* L3CD Error Status register 1 slice 1 */
@@ -7163,6 +7282,7 @@
 #define  LCPLL_CLK_FREQ_337_5_BDW	(2<<26)
 #define  LCPLL_CLK_FREQ_675_BDW		(3<<26)
 #define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE	(1<<24)
 #define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
 #define  LCPLL_POWER_DOWN_ALLOW		(1<<22)
 #define  LCPLL_CD_SOURCE_FCLK		(1<<21)
@@ -7265,12 +7385,6 @@
 #define DC_STATE_EN			0x45504
 #define  DC_STATE_EN_UPTO_DC5		(1<<0)
 #define  DC_STATE_EN_DC9		(1<<3)
-
-/*
-* SKL DC
-*/
-#define  DC_STATE_EN			0x45504
-#define  DC_STATE_EN_UPTO_DC5		(1<<0)
 #define  DC_STATE_EN_UPTO_DC6		(2<<0)
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
@@ -7822,4 +7936,13 @@
 #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
 #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
 
+/* MOCS (Memory Object Control State) registers */
+#define GEN9_LNCFCMOCS0		0xb020	/* L3 Cache Control base */
+
+#define GEN9_GFX_MOCS_0		0xc800	/* Graphics MOCS base register*/
+#define GEN9_MFX0_MOCS_0	0xc900	/* Media 0 MOCS base register*/
+#define GEN9_MFX1_MOCS_0	0xca00	/* Media 1 MOCS base register*/
+#define GEN9_VEBOX_MOCS_0	0xcb00	/* Video MOCS base register*/
+#define GEN9_BLT_MOCS_0		0xcc00	/* Blitter MOCS base register*/
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index cf67f82..1ccac61 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -92,7 +92,7 @@
 	}
 
 	/* only restore FBC info on the platform that supports FBC*/
-	intel_fbc_disable(dev);
+	intel_fbc_disable(dev_priv);
 
 	/* restore FBC interval */
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2476268..55bd04c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -64,24 +64,16 @@
 			goto out;
 		}
 
-		units = 0;
-		div = 1000000ULL;
-
-		if (IS_CHERRYVIEW(dev)) {
+		if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
 			/* Special case for 320Mhz */
-			if (czcount_30ns == 1) {
-				div = 10000000ULL;
-				units = 3125ULL;
-			} else {
-				/* chv counts are one less */
-				czcount_30ns += 1;
-			}
+			div = 10000000ULL;
+			units = 3125ULL;
+		} else {
+			czcount_30ns += 1;
+			div = 1000000ULL;
+			units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
 		}
 
-		if (units == 0)
-			units = DIV_ROUND_UP_ULL(30ULL * bias,
-						 (u64)czcount_30ns);
-
 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
 			units <<= 8;
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 849a259..2f34c47 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -424,10 +424,10 @@
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-	    TP_PROTO(struct intel_engine_cs *from,
-		     struct intel_engine_cs *to,
+	    TP_PROTO(struct drm_i915_gem_request *to_req,
+		     struct intel_engine_cs *from,
 		     struct drm_i915_gem_request *req),
-	    TP_ARGS(from, to, req),
+	    TP_ARGS(to_req, from, req),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -439,7 +439,7 @@
 	    TP_fast_assign(
 			   __entry->dev = from->dev->primary->index;
 			   __entry->sync_from = from->id;
-			   __entry->sync_to = to->id;
+			   __entry->sync_to = to_req->ring->id;
 			   __entry->seqno = i915_gem_request_get_seqno(req);
 			   ),
 
@@ -475,8 +475,8 @@
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
-	    TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
-	    TP_ARGS(ring, invalidate, flush),
+	    TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
+	    TP_ARGS(req, invalidate, flush),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -486,8 +486,8 @@
 			     ),
 
 	    TP_fast_assign(
-			   __entry->dev = ring->dev->primary->index;
-			   __entry->ring = ring->id;
+			   __entry->dev = req->ring->dev->primary->index;
+			   __entry->ring = req->ring->id;
 			   __entry->invalidate = invalidate;
 			   __entry->flush = flush;
 			   ),
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8e35e0d..e2531cf 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,133 +35,6 @@
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
 
-
-/**
- * intel_atomic_check - validate state object
- * @dev: drm device
- * @state: state to validate
- */
-int intel_atomic_check(struct drm_device *dev,
-		       struct drm_atomic_state *state)
-{
-	int nplanes = dev->mode_config.num_total_plane;
-	int ncrtcs = dev->mode_config.num_crtc;
-	int nconnectors = dev->mode_config.num_connector;
-	enum pipe nuclear_pipe = INVALID_PIPE;
-	struct intel_crtc *nuclear_crtc = NULL;
-	struct intel_crtc_state *crtc_state = NULL;
-	int ret;
-	int i;
-	bool not_nuclear = false;
-
-	/*
-	 * FIXME:  At the moment, we only support "nuclear pageflip" on a
-	 * single CRTC.  Cross-crtc updates will be added later.
-	 */
-	for (i = 0; i < nplanes; i++) {
-		struct intel_plane *plane = to_intel_plane(state->planes[i]);
-		if (!plane)
-			continue;
-
-		if (nuclear_pipe == INVALID_PIPE) {
-			nuclear_pipe = plane->pipe;
-		} else if (nuclear_pipe != plane->pipe) {
-			DRM_DEBUG_KMS("i915 only support atomic plane operations on a single CRTC at the moment\n");
-			return -EINVAL;
-		}
-	}
-
-	/*
-	 * FIXME:  We only handle planes for now; make sure there are no CRTC's
-	 * or connectors involved.
-	 */
-	state->allow_modeset = false;
-	for (i = 0; i < ncrtcs; i++) {
-		struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
-		if (crtc)
-			memset(&crtc->atomic, 0, sizeof(crtc->atomic));
-		if (crtc && crtc->pipe != nuclear_pipe)
-			not_nuclear = true;
-		if (crtc && crtc->pipe == nuclear_pipe) {
-			nuclear_crtc = crtc;
-			crtc_state = to_intel_crtc_state(state->crtc_states[i]);
-		}
-	}
-	for (i = 0; i < nconnectors; i++)
-		if (state->connectors[i] != NULL)
-			not_nuclear = true;
-
-	if (not_nuclear) {
-		DRM_DEBUG_KMS("i915 only supports atomic plane operations at the moment\n");
-		return -EINVAL;
-	}
-
-	ret = drm_atomic_helper_check_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* FIXME: move to crtc atomic check function once it is ready */
-	ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
-	if (ret)
-		return ret;
-
-	return ret;
-}
-
-
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @async: asynchronous commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
- * we can only handle plane-related operations and do not yet support
- * asynchronous commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int intel_atomic_commit(struct drm_device *dev,
-			struct drm_atomic_state *state,
-			bool async)
-{
-	struct drm_crtc_state *crtc_state;
-	struct drm_crtc *crtc;
-	int ret, i;
-
-	if (async) {
-		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
-		return -EINVAL;
-	}
-
-	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
-		return ret;
-
-	/* Point of no return */
-	drm_atomic_helper_swap_state(dev, state);
-
-	/* swap crtc_scaler_state */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
-
-		if (INTEL_INFO(dev)->gen >= 9)
-			skl_detach_scalers(to_intel_crtc(crtc));
-
-		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
-	}
-
-	drm_atomic_helper_wait_for_vblanks(dev, state);
-	drm_atomic_helper_cleanup_planes(dev, state);
-	drm_atomic_state_free(state);
-
-	return 0;
-}
-
 /**
  * intel_connector_atomic_get_property - fetch connector property value
  * @connector: connector to fetch property for
@@ -269,17 +142,12 @@
 	struct drm_plane *plane = NULL;
 	struct intel_plane *intel_plane;
 	struct intel_plane_state *plane_state = NULL;
-	struct intel_crtc_scaler_state *scaler_state;
-	struct drm_atomic_state *drm_state;
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct drm_atomic_state *drm_state = crtc_state->base.state;
 	int num_scalers_need;
 	int i, j;
 
-	if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
-		return 0;
-
-	scaler_state = &crtc_state->scaler_state;
-	drm_state = crtc_state->base.state;
-
 	num_scalers_need = hweight32(scaler_state->scaler_users);
 	DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
 		crtc_state, num_scalers_need, intel_crtc->num_scalers,
@@ -307,17 +175,21 @@
 	/* walkthrough scaler_users bits and start assigning scalers */
 	for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
 		int *scaler_id;
+		const char *name;
+		int idx;
 
 		/* skip if scaler not required */
 		if (!(scaler_state->scaler_users & (1 << i)))
 			continue;
 
 		if (i == SKL_CRTC_INDEX) {
+			name = "CRTC";
+			idx = intel_crtc->base.base.id;
+
 			/* panel fitter case: assign as a crtc scaler */
 			scaler_id = &scaler_state->scaler_id;
 		} else {
-			if (!drm_state)
-				continue;
+			name = "PLANE";
 
 			/* plane scaler case: assign as a plane scaler */
 			/* find the plane that set the bit as scaler_user */
@@ -336,9 +208,19 @@
 						plane->base.id);
 					return PTR_ERR(state);
 				}
+
+				/*
+				 * the plane is added after plane checks are run,
+				 * but since this plane is unchanged just do the
+				 * minimum required validation.
+				 */
+				if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+					intel_crtc->atomic.wait_for_flips = true;
+				crtc_state->base.planes_changed = true;
 			}
 
 			intel_plane = to_intel_plane(plane);
+			idx = plane->base.id;
 
 			/* plane on different crtc cannot be a scaler user of this crtc */
 			if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
@@ -354,23 +236,16 @@
 			for (j = 0; j < intel_crtc->num_scalers; j++) {
 				if (!scaler_state->scalers[j].in_use) {
 					scaler_state->scalers[j].in_use = 1;
-					*scaler_id = scaler_state->scalers[j].id;
+					*scaler_id = j;
 					DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
-						intel_crtc->pipe,
-						i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
-							plane_state->scaler_id,
-						i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-						i == SKL_CRTC_INDEX ?  intel_crtc->base.base.id :
-						plane->base.id);
+						intel_crtc->pipe, *scaler_id, name, idx);
 					break;
 				}
 			}
 		}
 
 		if (WARN_ON(*scaler_id < 0)) {
-			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
-				i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
-				i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+			DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
 			continue;
 		}
 
@@ -392,3 +267,54 @@
 
 	return 0;
 }
+
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll_config *shared_dpll)
+{
+	enum intel_dpll_id i;
+
+	/* Copy shared dpll state */
+	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+		shared_dpll[i] = pll->config;
+	}
+}
+
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+	if (!state->dpll_set) {
+		state->dpll_set = true;
+
+		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+						  state->shared_dpll);
+	}
+
+	return state->shared_dpll;
+}
+
+struct drm_atomic_state *
+intel_atomic_state_alloc(struct drm_device *dev)
+{
+	struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+		kfree(state);
+		return NULL;
+	}
+
+	return &state->base;
+}
+
+void intel_atomic_state_clear(struct drm_atomic_state *s)
+{
+	struct intel_atomic_state *state = to_intel_atomic_state(s);
+	drm_atomic_state_default_clear(&state->base);
+	state->dpll_set = false;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 86ba4b2..f1ab8e4 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -56,6 +56,7 @@
 
 	state->base.plane = plane;
 	state->base.rotation = BIT(DRM_ROTATE_0);
+	state->ckey.flags = I915_SET_COLORKEY_NONE;
 
 	return state;
 }
@@ -114,8 +115,10 @@
 	struct intel_crtc_state *crtc_state;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct intel_plane_state *intel_state = to_intel_plane_state(state);
+	struct drm_crtc_state *drm_crtc_state;
+	int ret;
 
-	crtc = crtc ? crtc : plane->crtc;
+	crtc = crtc ? crtc : plane->state->crtc;
 	intel_crtc = to_intel_crtc(crtc);
 
 	/*
@@ -127,16 +130,11 @@
 	if (!crtc)
 		return 0;
 
-	/* FIXME: temporary hack necessary while we still use the plane update
-	 * helper. */
-	if (state->state) {
-		crtc_state =
-			intel_atomic_get_crtc_state(state->state, intel_crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	} else {
-		crtc_state = intel_crtc->config;
-	}
+	drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+	if (WARN_ON(!drm_crtc_state))
+		return -EINVAL;
+
+	crtc_state = to_intel_crtc_state(drm_crtc_state);
 
 	/*
 	 * The original src/dest coordinates are stored in state->base, but
@@ -160,20 +158,6 @@
 	intel_state->clip.y2 =
 		crtc_state->base.active ? crtc_state->pipe_src_h : 0;
 
-	/*
-	 * Disabling a plane is always okay; we just need to update
-	 * fb tracking in a special way since cleanup_fb() won't
-	 * get called by the plane helpers.
-	 */
-	if (state->fb == NULL && plane->state->fb != NULL) {
-		/*
-		 * 'prepare' is never called when plane is being disabled, so
-		 * we need to handle frontbuffer tracking as a special case
-		 */
-		intel_crtc->atomic.disabled_planes |=
-			(1 << drm_plane_index(plane));
-	}
-
 	if (state->fb && intel_rotation_90_or_270(state->rotation)) {
 		if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
 			state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
@@ -198,7 +182,12 @@
 		}
 	}
 
-	return intel_plane->check_plane(plane, intel_state);
+	intel_state->visible = false;
+	ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+	if (ret)
+		return ret;
+
+	return intel_plane_atomic_calc_changes(&crtc_state->base, state);
 }
 
 static void intel_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 3da9b84..89c1a8ce 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -41,7 +41,8 @@
  *
  * The disable sequences must be performed before disabling the transcoder or
  * port. The enable sequences may only be performed after enabling the
- * transcoder and port, and after completed link training.
+ * transcoder and port, and after completed link training. Therefore the audio
+ * enable/disable sequences are part of the modeset sequence.
  *
  * The codec and controller sequences could be done either parallel or serial,
  * but generally the ELDV/PD change in the codec sequence indicates to the audio
@@ -399,6 +400,9 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
 
 	connector = drm_select_eld(encoder, mode);
 	if (!connector)
@@ -419,6 +423,9 @@
 
 	if (dev_priv->display.audio_codec_enable)
 		dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
 }
 
 /**
@@ -428,13 +435,20 @@
  * The disable sequences must be performed before disabling the transcoder or
  * port.
  */
-void intel_audio_codec_disable(struct intel_encoder *encoder)
+void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
 
 	if (dev_priv->display.audio_codec_disable)
-		dev_priv->display.audio_codec_disable(encoder);
+		dev_priv->display.audio_codec_disable(intel_encoder);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
 }
 
 /**
@@ -525,12 +539,16 @@
 				     struct device *hda_dev, void *data)
 {
 	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
 	if (WARN_ON(acomp->ops || acomp->dev))
 		return -EEXIST;
 
+	drm_modeset_lock_all(dev_priv->dev);
 	acomp->ops = &i915_audio_component_ops;
 	acomp->dev = i915_dev;
+	dev_priv->audio_component = acomp;
+	drm_modeset_unlock_all(dev_priv->dev);
 
 	return 0;
 }
@@ -539,9 +557,13 @@
 					struct device *hda_dev, void *data)
 {
 	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
+	drm_modeset_lock_all(dev_priv->dev);
 	acomp->ops = NULL;
 	acomp->dev = NULL;
+	dev_priv->audio_component = NULL;
+	drm_modeset_unlock_all(dev_priv->dev);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 198fc3c..b3e437b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -122,42 +122,6 @@
 	drm_mode_set_name(panel_fixed_mode);
 }
 
-static bool
-lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
-			   const struct lvds_dvo_timing *b)
-{
-	if (a->hactive_hi != b->hactive_hi ||
-	    a->hactive_lo != b->hactive_lo)
-		return false;
-
-	if (a->hsync_off_hi != b->hsync_off_hi ||
-	    a->hsync_off_lo != b->hsync_off_lo)
-		return false;
-
-	if (a->hsync_pulse_width != b->hsync_pulse_width)
-		return false;
-
-	if (a->hblank_hi != b->hblank_hi ||
-	    a->hblank_lo != b->hblank_lo)
-		return false;
-
-	if (a->vactive_hi != b->vactive_hi ||
-	    a->vactive_lo != b->vactive_lo)
-		return false;
-
-	if (a->vsync_off != b->vsync_off)
-		return false;
-
-	if (a->vsync_pulse_width != b->vsync_pulse_width)
-		return false;
-
-	if (a->vblank_hi != b->vblank_hi ||
-	    a->vblank_lo != b->vblank_lo)
-		return false;
-
-	return true;
-}
-
 static const struct lvds_dvo_timing *
 get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
 		    const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
@@ -213,7 +177,7 @@
 	const struct lvds_dvo_timing *panel_dvo_timing;
 	const struct lvds_fp_timing *fp_timing;
 	struct drm_display_mode *panel_fixed_mode;
-	int i, downclock, drrs_mode;
+	int drrs_mode;
 
 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
 	if (!lvds_options)
@@ -272,30 +236,6 @@
 	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
-	/*
-	 * Iterate over the LVDS panel timing info to find the lowest clock
-	 * for the native resolution.
-	 */
-	downclock = panel_dvo_timing->clock;
-	for (i = 0; i < 16; i++) {
-		const struct lvds_dvo_timing *dvo_timing;
-
-		dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
-						 lvds_lfp_data_ptrs,
-						 i);
-		if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
-		    dvo_timing->clock < downclock)
-			downclock = dvo_timing->clock;
-	}
-
-	if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) {
-		dev_priv->lvds_downclock_avail = 1;
-		dev_priv->lvds_downclock = downclock * 10;
-		DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
-			      "Normal Clock %dKHz, downclock %dKHz\n",
-			      panel_fixed_mode->clock, 10*downclock);
-	}
-
 	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
 				       lvds_lfp_data_ptrs,
 				       lvds_options->panel_type);
@@ -461,7 +401,7 @@
 {
 	struct sdvo_device_mapping *p_mapping;
 	const struct bdb_general_definitions *p_defs;
-	const union child_device_config *p_child;
+	const struct old_child_dev_config *child; /* legacy */
 	int i, child_device_num, count;
 	u16	block_size;
 
@@ -470,14 +410,14 @@
 		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
 		return;
 	}
-	/* judge whether the size of child device meets the requirements.
-	 * If the child device size obtained from general definition block
-	 * is different with sizeof(struct child_device_config), skip the
-	 * parsing of sdvo device info
+
+	/*
+	 * Only parse SDVO mappings when the general definitions block child
+	 * device size matches that of the *legacy* child device config
+	 * struct. Thus, SDVO mapping will be skipped for newer VBT.
 	 */
-	if (p_defs->child_dev_size != sizeof(*p_child)) {
-		/* different child dev size . Ignore it */
-		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+	if (p_defs->child_dev_size != sizeof(*child)) {
+		DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
 		return;
 	}
 	/* get the block size of general definitions */
@@ -487,37 +427,37 @@
 		p_defs->child_dev_size;
 	count = 0;
 	for (i = 0; i < child_device_num; i++) {
-		p_child = child_device_ptr(p_defs, i);
-		if (!p_child->old.device_type) {
+		child = &child_device_ptr(p_defs, i)->old;
+		if (!child->device_type) {
 			/* skip the device block if device type is invalid */
 			continue;
 		}
-		if (p_child->old.slave_addr != SLAVE_ADDR1 &&
-			p_child->old.slave_addr != SLAVE_ADDR2) {
+		if (child->slave_addr != SLAVE_ADDR1 &&
+		    child->slave_addr != SLAVE_ADDR2) {
 			/*
 			 * If the slave address is neither 0x70 nor 0x72,
 			 * it is not a SDVO device. Skip it.
 			 */
 			continue;
 		}
-		if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
-			p_child->old.dvo_port != DEVICE_PORT_DVOC) {
+		if (child->dvo_port != DEVICE_PORT_DVOB &&
+		    child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
 			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
 			continue;
 		}
 		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
-				" %s port\n",
-				p_child->old.slave_addr,
-				(p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
-					"SDVOB" : "SDVOC");
-		p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
+			      " %s port\n",
+			      child->slave_addr,
+			      (child->dvo_port == DEVICE_PORT_DVOB) ?
+			      "SDVOB" : "SDVOC");
+		p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
 		if (!p_mapping->initialized) {
-			p_mapping->dvo_port = p_child->old.dvo_port;
-			p_mapping->slave_addr = p_child->old.slave_addr;
-			p_mapping->dvo_wiring = p_child->old.dvo_wiring;
-			p_mapping->ddc_pin = p_child->old.ddc_pin;
-			p_mapping->i2c_pin = p_child->old.i2c_pin;
+			p_mapping->dvo_port = child->dvo_port;
+			p_mapping->slave_addr = child->slave_addr;
+			p_mapping->dvo_wiring = child->dvo_wiring;
+			p_mapping->ddc_pin = child->ddc_pin;
+			p_mapping->i2c_pin = child->i2c_pin;
 			p_mapping->initialized = 1;
 			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
 				      p_mapping->dvo_port,
@@ -529,7 +469,7 @@
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
-		if (p_child->old.slave2_addr) {
+		if (child->slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
 			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -946,6 +886,17 @@
 	memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
 }
 
+static u8 translate_iboost(u8 val)
+{
+	static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
+
+	if (val >= ARRAY_SIZE(mapping)) {
+		DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+		return 0;
+	}
+	return mapping[val];
+}
+
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
 			   const struct bdb_header *bdb)
 {
@@ -954,23 +905,23 @@
 	uint8_t hdmi_level_shift;
 	int i, j;
 	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
-	uint8_t aux_channel;
+	uint8_t aux_channel, ddc_pin;
 	/* Each DDI port can have more than one value on the "DVO Port" field,
 	 * so look for all the possible values for each port and abort if more
 	 * than one is found. */
-	int dvo_ports[][2] = {
-		{DVO_PORT_HDMIA, DVO_PORT_DPA},
-		{DVO_PORT_HDMIB, DVO_PORT_DPB},
-		{DVO_PORT_HDMIC, DVO_PORT_DPC},
-		{DVO_PORT_HDMID, DVO_PORT_DPD},
-		{DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+	int dvo_ports[][3] = {
+		{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+		{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+		{DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+		{DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+		{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
 	};
 
 	/* Find the child device to use, abort if more than one found. */
 	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
 		it = dev_priv->vbt.child_dev + i;
 
-		for (j = 0; j < 2; j++) {
+		for (j = 0; j < 3; j++) {
 			if (dvo_ports[port][j] == -1)
 				break;
 
@@ -988,6 +939,7 @@
 		return;
 
 	aux_channel = child->raw[25];
+	ddc_pin = child->common.ddc_pin;
 
 	is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
 	is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1019,22 +971,53 @@
 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
 
 	if (is_dvi) {
-		if (child->common.ddc_pin == 0x05 && port != PORT_B)
+		if (port == PORT_E) {
+			info->alternate_ddc_pin = ddc_pin;
+			/* if DDIE share ddc pin with other port, then
+			 * dvi/hdmi couldn't exist on the shared port.
+			 * Otherwise they share the same ddc bin and system
+			 * couldn't communicate with them seperately. */
+			if (ddc_pin == DDC_PIN_B) {
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_C) {
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
+			} else if (ddc_pin == DDC_PIN_D) {
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
+			}
+		} else if (ddc_pin == DDC_PIN_B && port != PORT_B)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
-		if (child->common.ddc_pin == 0x04 && port != PORT_C)
+		else if (ddc_pin == DDC_PIN_C && port != PORT_C)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
-		if (child->common.ddc_pin == 0x06 && port != PORT_D)
+		else if (ddc_pin == DDC_PIN_D && port != PORT_D)
 			DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
 	}
 
 	if (is_dp) {
-		if (aux_channel == 0x40 && port != PORT_A)
+		if (port == PORT_E) {
+			info->alternate_aux_channel = aux_channel;
+			/* if DDIE share aux channel with other port, then
+			 * DP couldn't exist on the shared port. Otherwise
+			 * they share the same aux channel and system
+			 * couldn't communicate with them seperately. */
+			if (aux_channel == DP_AUX_A)
+				dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
+			else if (aux_channel == DP_AUX_B)
+				dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
+			else if (aux_channel == DP_AUX_C)
+				dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
+			else if (aux_channel == DP_AUX_D)
+				dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
+		}
+		else if (aux_channel == DP_AUX_A && port != PORT_A)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
-		if (aux_channel == 0x10 && port != PORT_B)
+		else if (aux_channel == DP_AUX_B && port != PORT_B)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
-		if (aux_channel == 0x20 && port != PORT_C)
+		else if (aux_channel == DP_AUX_C && port != PORT_C)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
-		if (aux_channel == 0x30 && port != PORT_D)
+		else if (aux_channel == DP_AUX_D && port != PORT_D)
 			DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
 	}
 
@@ -1046,6 +1029,16 @@
 			      hdmi_level_shift);
 		info->hdmi_level_shift = hdmi_level_shift;
 	}
+
+	/* Parse the I_boost config for SKL and above */
+	if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+		info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+		DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
+			      port_name(port), info->dp_boost_level);
+		info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+		DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
+			      port_name(port), info->hdmi_boost_level);
+	}
 }
 
 static void parse_ddi_ports(struct drm_i915_private *dev_priv,
@@ -1075,17 +1068,39 @@
 	const union child_device_config *p_child;
 	union child_device_config *child_dev_ptr;
 	int i, child_device_num, count;
-	u16	block_size;
+	u8 expected_size;
+	u16 block_size;
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
 		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
 		return;
 	}
-	if (p_defs->child_dev_size < sizeof(*p_child)) {
-		DRM_ERROR("General definiton block child device size is too small.\n");
+	if (bdb->version < 195) {
+		expected_size = sizeof(struct old_child_dev_config);
+	} else if (bdb->version == 195) {
+		expected_size = 37;
+	} else if (bdb->version <= 197) {
+		expected_size = 38;
+	} else {
+		expected_size = 38;
+		BUILD_BUG_ON(sizeof(*p_child) < 38);
+		DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
+				 bdb->version, expected_size);
+	}
+
+	/* The legacy sized child device config is the minimum we need. */
+	if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+		DRM_ERROR("Child device config size %u is too small.\n",
+			  p_defs->child_dev_size);
 		return;
 	}
+
+	/* Flag an error for unexpected size, but continue anyway. */
+	if (p_defs->child_dev_size != expected_size)
+		DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
+			  p_defs->child_dev_size, expected_size, bdb->version);
+
 	/* get the block size of general definitions */
 	block_size = get_blocksize(p_defs);
 	/* get the number of child device */
@@ -1130,7 +1145,14 @@
 
 		child_dev_ptr = dev_priv->vbt.child_dev + count;
 		count++;
-		memcpy(child_dev_ptr, p_child, sizeof(*p_child));
+
+		/*
+		 * Copy as much as we know (sizeof) and is available
+		 * (child_dev_size) of the child device. Accessing the data must
+		 * depend on VBT version.
+		 */
+		memcpy(child_dev_ptr, p_child,
+		       min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
 	}
 	return;
 }
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index af0b476..46cd5c7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -203,9 +203,11 @@
 #define DEVICE_PORT_DVOB	0x01
 #define DEVICE_PORT_DVOC	0x02
 
-/* We used to keep this struct but without any version control. We should avoid
+/*
+ * We used to keep this struct but without any version control. We should avoid
  * using it in the future, but it should be safe to keep using it in the old
- * code. */
+ * code. Do not change; we rely on its size.
+ */
 struct old_child_dev_config {
 	u16 handle;
 	u16 device_type;
@@ -231,6 +233,10 @@
 /* This one contains field offsets that are known to be common for all BDB
  * versions. Notice that the meaning of the contents contents may still change,
  * but at least the offsets are consistent. */
+
+/* Definitions for flags_1 */
+#define IBOOST_ENABLE (1<<3)
+
 struct common_child_dev_config {
 	u16 handle;
 	u16 device_type;
@@ -239,8 +245,13 @@
 	u8 not_common2[2];
 	u8 ddc_pin;
 	u16 edid_ptr;
+	u8 obsolete;
+	u8 flags_1;
+	u8 not_common3[13];
+	u8 iboost_level;
 } __packed;
 
+
 /* This field changes depending on the BDB version, so the most reliable way to
  * read it is by checking the BDB version and reading the raw pointer. */
 union child_device_config {
@@ -747,11 +758,6 @@
 #define		DVO_C		2
 #define		DVO_D		3
 
-/* define the PORT for DP output type */
-#define		PORT_IDPB	7
-#define		PORT_IDPC	8
-#define		PORT_IDPD	9
-
 /* Possible values for the "DVO Port" field for versions >= 155: */
 #define DVO_PORT_HDMIA	0
 #define DVO_PORT_HDMIB	1
@@ -764,6 +770,8 @@
 #define DVO_PORT_DPC	8
 #define DVO_PORT_DPD	9
 #define DVO_PORT_DPA	10
+#define DVO_PORT_DPE	11
+#define DVO_PORT_HDMIE	12
 #define DVO_PORT_MIPIA	21
 #define DVO_PORT_MIPIB	22
 #define DVO_PORT_MIPIC	23
@@ -778,6 +786,13 @@
 #define MIPI_DSI_UNDEFINED_PANEL_ID	0
 #define MIPI_DSI_GENERIC_PANEL_ID	1
 
+/*
+ * PMIC vs SoC Backlight support specified in pwm_blc
+ * field in mipi_config block below.
+*/
+#define PPS_BLC_PMIC   0
+#define PPS_BLC_SOC    1
+
 struct mipi_config {
 	u16 panel_id;
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 521af2c..af5e43b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -236,53 +236,6 @@
 	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_crt_dpms(struct drm_connector *connector, int mode)
-{
-	struct drm_device *dev = connector->dev;
-	struct intel_encoder *encoder = intel_attached_encoder(connector);
-	struct drm_crtc *crtc;
-	int old_dpms;
-
-	/* PCH platforms and VLV only support on/off. */
-	if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	old_dpms = connector->dpms;
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = encoder->base.crtc;
-	if (!crtc) {
-		encoder->connectors_active = false;
-		return;
-	}
-
-	/* We need the pipe to run for anything but OFF. */
-	if (mode == DRM_MODE_DPMS_OFF)
-		encoder->connectors_active = false;
-	else
-		encoder->connectors_active = true;
-
-	/* We call connector dpms manually below in case pipe dpms doesn't
-	 * change due to cloning. */
-	if (mode < old_dpms) {
-		/* From off to on, enable the pipe first. */
-		intel_crtc_update_dpms(crtc);
-
-		intel_crt_set_dpms(encoder, mode);
-	} else {
-		intel_crt_set_dpms(encoder, mode);
-
-		intel_crtc_update_dpms(crtc);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_crt_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
@@ -798,7 +751,7 @@
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
 	.reset = intel_crt_reset,
-	.dpms = intel_crt_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.destroy = intel_crt_destroy,
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bcb41e6..ba1ae03 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -244,7 +244,7 @@
 void intel_csr_load_program(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	__be32 *payload = dev_priv->csr.dmc_payload;
+	u32 *payload = dev_priv->csr.dmc_payload;
 	uint32_t i, fw_size;
 
 	if (!IS_GEN9(dev)) {
@@ -256,7 +256,7 @@
 	fw_size = dev_priv->csr.dmc_fw_size;
 	for (i = 0; i < fw_size; i++)
 		I915_WRITE(CSR_PROGRAM_BASE + i * 4,
-			(u32 __force)payload[i]);
+			payload[i]);
 
 	for (i = 0; i < dev_priv->csr.mmio_count; i++) {
 		I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -279,7 +279,7 @@
 	char substepping = intel_get_substepping(dev);
 	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
 	uint32_t i;
-	__be32 *dmc_payload;
+	uint32_t *dmc_payload;
 	bool fw_loaded = false;
 
 	if (!fw) {
@@ -375,20 +375,13 @@
 	}
 
 	dmc_payload = csr->dmc_payload;
-	for (i = 0; i < dmc_header->fw_size; i++) {
-		uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
-		/*
-		 * The firmware payload is an array of 32 bit words stored in
-		 * little-endian format in the firmware image and programmed
-		 * as 32 bit big-endian format to memory.
-		 */
-		dmc_payload[i] = cpu_to_be32(*tmp);
-	}
+	memcpy(dmc_payload, &fw->data[readcount], nbytes);
 
 	/* load csr program during system boot, as needed for DC states */
 	intel_csr_load_program(dev);
 	fw_loaded = true;
 
+	DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
 out:
 	if (fw_loaded)
 		intel_runtime_pm_put(dev_priv);
@@ -422,6 +415,8 @@
 		return;
 	}
 
+	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+
 	/*
 	 * Obtain a runtime pm reference, until CSR is loaded,
 	 * to avoid entering runtime-suspend.
@@ -459,7 +454,8 @@
 
 void assert_csr_loaded(struct drm_i915_private *dev_priv)
 {
-	WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+	WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+	     "CSR is not loaded.\n");
 	WARN(!I915_READ(CSR_PROGRAM_BASE),
 				"CSR program storage start is NULL\n");
 	WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cacb07b..61575f6 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -31,6 +31,7 @@
 struct ddi_buf_trans {
 	u32 trans1;	/* balance leg enable, de-emph level */
 	u32 trans2;	/* vref sel, vswing */
+	u8 i_boost;	/* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
 };
 
 /* HDMI/DVI modes ignore everything but the last 2 items. So we share
@@ -38,134 +39,213 @@
  * automatically adapt to HDMI connections as well
  */
 static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
-	{ 0x00FFFFFF, 0x0006000E },
-	{ 0x00D75FFF, 0x0005000A },
-	{ 0x00C30FFF, 0x00040006 },
-	{ 0x80AAAFFF, 0x000B0000 },
-	{ 0x00FFFFFF, 0x0005000A },
-	{ 0x00D75FFF, 0x000C0004 },
-	{ 0x80C30FFF, 0x000B0000 },
-	{ 0x00FFFFFF, 0x00040006 },
-	{ 0x80D75FFF, 0x000B0000 },
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },
+	{ 0x00D75FFF, 0x0005000A, 0x0 },
+	{ 0x00C30FFF, 0x00040006, 0x0 },
+	{ 0x80AAAFFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },
+	{ 0x00D75FFF, 0x000C0004, 0x0 },
+	{ 0x80C30FFF, 0x000B0000, 0x0 },
+	{ 0x00FFFFFF, 0x00040006, 0x0 },
+	{ 0x80D75FFF, 0x000B0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
-	{ 0x00FFFFFF, 0x0007000E },
-	{ 0x00D75FFF, 0x000F000A },
-	{ 0x00C30FFF, 0x00060006 },
-	{ 0x00AAAFFF, 0x001E0000 },
-	{ 0x00FFFFFF, 0x000F000A },
-	{ 0x00D75FFF, 0x00160004 },
-	{ 0x00C30FFF, 0x001E0000 },
-	{ 0x00FFFFFF, 0x00060006 },
-	{ 0x00D75FFF, 0x001E0000 },
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000F000A, 0x0 },
+	{ 0x00C30FFF, 0x00060006, 0x0 },
+	{ 0x00AAAFFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x000F000A, 0x0 },
+	{ 0x00D75FFF, 0x00160004, 0x0 },
+	{ 0x00C30FFF, 0x001E0000, 0x0 },
+	{ 0x00FFFFFF, 0x00060006, 0x0 },
+	{ 0x00D75FFF, 0x001E0000, 0x0 },
 };
 
 static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
 					/* Idx	NT mV d	T mV d	db	*/
-	{ 0x00FFFFFF, 0x0006000E },	/* 0:	400	400	0	*/
-	{ 0x00E79FFF, 0x000E000C },	/* 1:	400	500	2	*/
-	{ 0x00D75FFF, 0x0005000A },	/* 2:	400	600	3.5	*/
-	{ 0x00FFFFFF, 0x0005000A },	/* 3:	600	600	0	*/
-	{ 0x00E79FFF, 0x001D0007 },	/* 4:	600	750	2	*/
-	{ 0x00D75FFF, 0x000C0004 },	/* 5:	600	900	3.5	*/
-	{ 0x00FFFFFF, 0x00040006 },	/* 6:	800	800	0	*/
-	{ 0x80E79FFF, 0x00030002 },	/* 7:	800	1000	2	*/
-	{ 0x00FFFFFF, 0x00140005 },	/* 8:	850	850	0	*/
-	{ 0x00FFFFFF, 0x000C0004 },	/* 9:	900	900	0	*/
-	{ 0x00FFFFFF, 0x001C0003 },	/* 10:	950	950	0	*/
-	{ 0x80FFFFFF, 0x00030002 },	/* 11:	1000	1000	0	*/
+	{ 0x00FFFFFF, 0x0006000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00E79FFF, 0x000E000C, 0x0 },/* 1:	400	500	2	*/
+	{ 0x00D75FFF, 0x0005000A, 0x0 },/* 2:	400	600	3.5	*/
+	{ 0x00FFFFFF, 0x0005000A, 0x0 },/* 3:	600	600	0	*/
+	{ 0x00E79FFF, 0x001D0007, 0x0 },/* 4:	600	750	2	*/
+	{ 0x00D75FFF, 0x000C0004, 0x0 },/* 5:	600	900	3.5	*/
+	{ 0x00FFFFFF, 0x00040006, 0x0 },/* 6:	800	800	0	*/
+	{ 0x80E79FFF, 0x00030002, 0x0 },/* 7:	800	1000	2	*/
+	{ 0x00FFFFFF, 0x00140005, 0x0 },/* 8:	850	850	0	*/
+	{ 0x00FFFFFF, 0x000C0004, 0x0 },/* 9:	900	900	0	*/
+	{ 0x00FFFFFF, 0x001C0003, 0x0 },/* 10:	950	950	0	*/
+	{ 0x80FFFFFF, 0x00030002, 0x0 },/* 11:	1000	1000	0	*/
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
-	{ 0x00FFFFFF, 0x00000012 },
-	{ 0x00EBAFFF, 0x00020011 },
-	{ 0x00C71FFF, 0x0006000F },
-	{ 0x00AAAFFF, 0x000E000A },
-	{ 0x00FFFFFF, 0x00020011 },
-	{ 0x00DB6FFF, 0x0005000F },
-	{ 0x00BEEFFF, 0x000A000C },
-	{ 0x00FFFFFF, 0x0005000F },
-	{ 0x00DB6FFF, 0x000A000C },
+	{ 0x00FFFFFF, 0x00000012, 0x0 },
+	{ 0x00EBAFFF, 0x00020011, 0x0 },
+	{ 0x00C71FFF, 0x0006000F, 0x0 },
+	{ 0x00AAAFFF, 0x000E000A, 0x0 },
+	{ 0x00FFFFFF, 0x00020011, 0x0 },
+	{ 0x00DB6FFF, 0x0005000F, 0x0 },
+	{ 0x00BEEFFF, 0x000A000C, 0x0 },
+	{ 0x00FFFFFF, 0x0005000F, 0x0 },
+	{ 0x00DB6FFF, 0x000A000C, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
-	{ 0x00FFFFFF, 0x0007000E },
-	{ 0x00D75FFF, 0x000E000A },
-	{ 0x00BEFFFF, 0x00140006 },
-	{ 0x80B2CFFF, 0x001B0002 },
-	{ 0x00FFFFFF, 0x000E000A },
-	{ 0x00DB6FFF, 0x00160005 },
-	{ 0x80C71FFF, 0x001A0002 },
-	{ 0x00F7DFFF, 0x00180004 },
-	{ 0x80D75FFF, 0x001B0002 },
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },
+	{ 0x00D75FFF, 0x000E000A, 0x0 },
+	{ 0x00BEFFFF, 0x00140006, 0x0 },
+	{ 0x80B2CFFF, 0x001B0002, 0x0 },
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },
+	{ 0x00DB6FFF, 0x00160005, 0x0 },
+	{ 0x80C71FFF, 0x001A0002, 0x0 },
+	{ 0x00F7DFFF, 0x00180004, 0x0 },
+	{ 0x80D75FFF, 0x001B0002, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
-	{ 0x00FFFFFF, 0x0001000E },
-	{ 0x00D75FFF, 0x0004000A },
-	{ 0x00C30FFF, 0x00070006 },
-	{ 0x00AAAFFF, 0x000C0000 },
-	{ 0x00FFFFFF, 0x0004000A },
-	{ 0x00D75FFF, 0x00090004 },
-	{ 0x00C30FFF, 0x000C0000 },
-	{ 0x00FFFFFF, 0x00070006 },
-	{ 0x00D75FFF, 0x000C0000 },
+	{ 0x00FFFFFF, 0x0001000E, 0x0 },
+	{ 0x00D75FFF, 0x0004000A, 0x0 },
+	{ 0x00C30FFF, 0x00070006, 0x0 },
+	{ 0x00AAAFFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x0004000A, 0x0 },
+	{ 0x00D75FFF, 0x00090004, 0x0 },
+	{ 0x00C30FFF, 0x000C0000, 0x0 },
+	{ 0x00FFFFFF, 0x00070006, 0x0 },
+	{ 0x00D75FFF, 0x000C0000, 0x0 },
 };
 
 static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
 					/* Idx	NT mV d	T mV df	db	*/
-	{ 0x00FFFFFF, 0x0007000E },	/* 0:	400	400	0	*/
-	{ 0x00D75FFF, 0x000E000A },	/* 1:	400	600	3.5	*/
-	{ 0x00BEFFFF, 0x00140006 },	/* 2:	400	800	6	*/
-	{ 0x00FFFFFF, 0x0009000D },	/* 3:	450	450	0	*/
-	{ 0x00FFFFFF, 0x000E000A },	/* 4:	600	600	0	*/
-	{ 0x00D7FFFF, 0x00140006 },	/* 5:	600	800	2.5	*/
-	{ 0x80CB2FFF, 0x001B0002 },	/* 6:	600	1000	4.5	*/
-	{ 0x00FFFFFF, 0x00140006 },	/* 7:	800	800	0	*/
-	{ 0x80E79FFF, 0x001B0002 },	/* 8:	800	1000	2	*/
-	{ 0x80FFFFFF, 0x001B0002 },	/* 9:	1000	1000	0	*/
+	{ 0x00FFFFFF, 0x0007000E, 0x0 },/* 0:	400	400	0	*/
+	{ 0x00D75FFF, 0x000E000A, 0x0 },/* 1:	400	600	3.5	*/
+	{ 0x00BEFFFF, 0x00140006, 0x0 },/* 2:	400	800	6	*/
+	{ 0x00FFFFFF, 0x0009000D, 0x0 },/* 3:	450	450	0	*/
+	{ 0x00FFFFFF, 0x000E000A, 0x0 },/* 4:	600	600	0	*/
+	{ 0x00D7FFFF, 0x00140006, 0x0 },/* 5:	600	800	2.5	*/
+	{ 0x80CB2FFF, 0x001B0002, 0x0 },/* 6:	600	1000	4.5	*/
+	{ 0x00FFFFFF, 0x00140006, 0x0 },/* 7:	800	800	0	*/
+	{ 0x80E79FFF, 0x001B0002, 0x0 },/* 8:	800	1000	2	*/
+	{ 0x80FFFFFF, 0x001B0002, 0x0 },/* 9:	1000	1000	0	*/
 };
 
+/* Skylake H and S */
 static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
-	{ 0x00000018, 0x000000a2 },
-	{ 0x00004014, 0x0000009B },
-	{ 0x00006012, 0x00000088 },
-	{ 0x00008010, 0x00000087 },
-	{ 0x00000018, 0x0000009B },
-	{ 0x00004014, 0x00000088 },
-	{ 0x00006012, 0x00000087 },
-	{ 0x00000018, 0x00000088 },
-	{ 0x00004014, 0x00000087 },
+	{ 0x00002016, 0x000000A0, 0x0 },
+	{ 0x00005012, 0x0000009B, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00009010, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x0000009B, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x000000DF, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
 };
 
-/* eDP 1.4 low vswing translation parameters */
+/* Skylake U */
+static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
+	{ 0x0000201B, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x1 },	/* Uses I_boost level 0x1 */
+	{ 0x0000201B, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00002016, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
+	{ 0x00000018, 0x000000A2, 0x0 },
+	{ 0x00005012, 0x00000088, 0x0 },
+	{ 0x00007011, 0x00000087, 0x0 },
+	{ 0x80009010, 0x000000C7, 0x3 },	/* Uses I_boost level 0x3 */
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+	{ 0x00007011, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x00000088, 0x0 },
+	{ 0x00005012, 0x000000C7, 0x0 },
+};
+
+/*
+ * Skylake H and S
+ * eDP 1.4 low vswing translation parameters
+ */
 static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
-	{ 0x00000018, 0x000000a8 },
-	{ 0x00002016, 0x000000ab },
-	{ 0x00006012, 0x000000a2 },
-	{ 0x00008010, 0x00000088 },
-	{ 0x00000018, 0x000000ab },
-	{ 0x00004014, 0x000000a2 },
-	{ 0x00006012, 0x000000a6 },
-	{ 0x00000018, 0x000000a2 },
-	{ 0x00005013, 0x0000009c },
-	{ 0x00000018, 0x00000088 },
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00000018, 0x000000AB, 0x0 },
+	{ 0x00007013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
 };
 
+/*
+ * Skylake U
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000A9, 0x0 },
+	{ 0x00007011, 0x000000A2, 0x0 },
+	{ 0x00009010, 0x0000009C, 0x0 },
+	{ 0x00000018, 0x000000A9, 0x0 },
+	{ 0x00006013, 0x000000A2, 0x0 },
+	{ 0x00007011, 0x000000A6, 0x0 },
+	{ 0x00002016, 0x000000AB, 0x0 },
+	{ 0x00005013, 0x0000009F, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+};
 
+/*
+ * Skylake Y
+ * eDP 1.4 low vswing translation parameters
+ */
+static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
+	{ 0x00000018, 0x000000A8, 0x0 },
+	{ 0x00004013, 0x000000AB, 0x0 },
+	{ 0x00007011, 0x000000A4, 0x0 },
+	{ 0x00009010, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x000000AA, 0x0 },
+	{ 0x00006013, 0x000000A4, 0x0 },
+	{ 0x00007011, 0x0000009D, 0x0 },
+	{ 0x00000018, 0x000000A0, 0x0 },
+	{ 0x00006012, 0x000000DF, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+};
+
+/* Skylake U, H and S */
 static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
-	{ 0x00000018, 0x000000ac },
-	{ 0x00005012, 0x0000009d },
-	{ 0x00007011, 0x00000088 },
-	{ 0x00000018, 0x000000a1 },
-	{ 0x00000018, 0x00000098 },
-	{ 0x00004013, 0x00000088 },
-	{ 0x00006012, 0x00000087 },
-	{ 0x00000018, 0x000000df },
-	{ 0x00003015, 0x00000087 },
-	{ 0x00003015, 0x000000c7 },
-	{ 0x00000018, 0x000000c7 },
+	{ 0x00000018, 0x000000AC, 0x0 },
+	{ 0x00005012, 0x0000009D, 0x0 },
+	{ 0x00007011, 0x00000088, 0x0 },
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00000018, 0x00000098, 0x0 },
+	{ 0x00004013, 0x00000088, 0x0 },
+	{ 0x00006012, 0x00000087, 0x0 },
+	{ 0x00000018, 0x000000DF, 0x0 },
+	{ 0x00003015, 0x00000087, 0x0 },	/* Default */
+	{ 0x00003015, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x000000C7, 0x0 },
+};
+
+/* Skylake Y */
+static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
+	{ 0x00000018, 0x000000A1, 0x0 },
+	{ 0x00005012, 0x000000DF, 0x0 },
+	{ 0x00007011, 0x00000084, 0x0 },
+	{ 0x00000018, 0x000000A4, 0x0 },
+	{ 0x00000018, 0x0000009D, 0x0 },
+	{ 0x00004013, 0x00000080, 0x0 },
+	{ 0x00006013, 0x000000C7, 0x0 },
+	{ 0x00000018, 0x0000008A, 0x0 },
+	{ 0x00003015, 0x000000C7, 0x0 },	/* Default */
+	{ 0x80003015, 0x000000C7, 0x7 },	/* Uses I_boost level 0x7 */
+	{ 0x00000018, 0x000000C7, 0x0 },
 };
 
 struct bxt_ddi_buf_trans {
@@ -181,16 +261,16 @@
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
 					/* Idx	NT mV diff	db  */
-	{ 52,  0,    0, 128, true  },	/* 0:	400		0   */
-	{ 78,  0,    0, 85,  false },	/* 1:	400		3.5 */
-	{ 104, 0,    0, 64,  false },	/* 2:	400		6   */
-	{ 154, 0,    0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0,    0, 128, false },	/* 4:	600		0   */
-	{ 116, 0,    0, 85,  false },	/* 5:	600		3.5 */
-	{ 154, 0,    0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0,    0, 128, false },	/* 7:	800		0   */
-	{ 154, 0,    0, 85,  false },	/* 8:	800		3.5 */
-	{ 154, 0x9A, 1, 128, false },  /* 9:	1200		0   */
+	{ 52,  0x9A, 0, 128, true  },	/* 0:	400		0   */
+	{ 78,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 104, 0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 154, 0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 116, 0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 154, 0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 154, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
+	{ 154, 0x9A, 1, 128, false },	/* 9:	1200		0   */
 };
 
 /* BSpec has 2 recommended values - entries 0 and 8.
@@ -198,18 +278,21 @@
  */
 static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
 					/* Idx	NT mV diff	db  */
-	{ 52,  0,    0, 128, false },	/* 0:	400		0   */
-	{ 52,  0,    0, 85,  false },	/* 1:	400		3.5 */
-	{ 52,  0,    0, 64,  false },	/* 2:	400		6   */
-	{ 42,  0,    0, 43,  false },	/* 3:	400		9.5 */
-	{ 77,  0,    0, 128, false },	/* 4:	600		0   */
-	{ 77,  0,    0, 85,  false },	/* 5:	600		3.5 */
-	{ 77,  0,    0, 64,  false },	/* 6:	600		6   */
-	{ 102, 0,    0, 128, false },	/* 7:	800		0   */
-	{ 102, 0,    0, 85,  false },	/* 8:	800		3.5 */
+	{ 52,  0x9A, 0, 128, false },	/* 0:	400		0   */
+	{ 52,  0x9A, 0, 85,  false },	/* 1:	400		3.5 */
+	{ 52,  0x9A, 0, 64,  false },	/* 2:	400		6   */
+	{ 42,  0x9A, 0, 43,  false },	/* 3:	400		9.5 */
+	{ 77,  0x9A, 0, 128, false },	/* 4:	600		0   */
+	{ 77,  0x9A, 0, 85,  false },	/* 5:	600		3.5 */
+	{ 77,  0x9A, 0, 64,  false },	/* 6:	600		6   */
+	{ 102, 0x9A, 0, 128, false },	/* 7:	800		0   */
+	{ 102, 0x9A, 0, 85,  false },	/* 8:	800		3.5 */
 	{ 154, 0x9A, 1, 128, true },	/* 9:	1200		0   */
 };
 
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type);
+
 static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
 				 struct intel_digital_port **dig_port,
 				 enum port *port)
@@ -249,6 +332,77 @@
 	return intel_dig_port->hdmi.hdmi_reg;
 }
 
+static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
+							int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+	} else if (IS_SKL_ULT(dev)) {
+		ddi_translations = skl_u_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+	} else {
+		ddi_translations = skl_ddi_translations_dp;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
+							 int *n_entries)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_y_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_y_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
+		}
+	} else if (IS_SKL_ULT(dev)) {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_u_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_u_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+		}
+	} else {
+		if (dev_priv->edp_low_vswing) {
+			ddi_translations = skl_ddi_translations_edp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
+		} else {
+			ddi_translations = skl_ddi_translations_dp;
+			*n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+		}
+	}
+
+	return ddi_translations;
+}
+
+static const struct ddi_buf_trans *
+skl_get_buf_trans_hdmi(struct drm_device *dev,
+		       int *n_entries)
+{
+	const struct ddi_buf_trans *ddi_translations;
+
+	if (IS_SKL_ULX(dev)) {
+		ddi_translations = skl_y_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+	} else {
+		ddi_translations = skl_ddi_translations_hdmi;
+		*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+	}
+
+	return ddi_translations;
+}
+
 /*
  * Starting with Haswell, DDI port buffers must be programmed with correct
  * values in advance. The buffer values are different for FDI and DP modes,
@@ -261,6 +415,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
+	u32 iboost_bit = 0;
 	int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
 	    size;
 	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
@@ -280,19 +435,17 @@
 		return;
 	} else if (IS_SKYLAKE(dev)) {
 		ddi_translations_fdi = NULL;
-		ddi_translations_dp = skl_ddi_translations_dp;
-		n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		if (dev_priv->edp_low_vswing) {
-			ddi_translations_edp = skl_ddi_translations_edp;
-			n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
-		} else {
-			ddi_translations_edp = skl_ddi_translations_dp;
-			n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
-		}
-
-		ddi_translations_hdmi = skl_ddi_translations_hdmi;
-		n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
-		hdmi_default_entry = 7;
+		ddi_translations_dp =
+				skl_get_buf_trans_dp(dev, &n_dp_entries);
+		ddi_translations_edp =
+				skl_get_buf_trans_edp(dev, &n_edp_entries);
+		ddi_translations_hdmi =
+				skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+		hdmi_default_entry = 8;
+		/* If we're boosting the current, set bit 31 of trans1 */
+		if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
+		    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+			iboost_bit = 1<<31;
 	} else if (IS_BROADWELL(dev)) {
 		ddi_translations_fdi = bdw_ddi_translations_fdi;
 		ddi_translations_dp = bdw_ddi_translations_dp;
@@ -353,7 +506,7 @@
 	}
 
 	for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
-		I915_WRITE(reg, ddi_translations[i].trans1);
+		I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit);
 		reg += 4;
 		I915_WRITE(reg, ddi_translations[i].trans2);
 		reg += 4;
@@ -368,7 +521,7 @@
 		hdmi_level = hdmi_default_entry;
 
 	/* Entry 9 is for HDMI: */
-	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
+	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
 	reg += 4;
 	I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
 	reg += 4;
@@ -625,11 +778,11 @@
 	(void) (&__a == &__b);			\
 	__a > __b ? (__a - __b) : (__b - __a); })
 
-struct wrpll_rnp {
+struct hsw_wrpll_rnp {
 	unsigned p, n2, r2;
 };
 
-static unsigned wrpll_get_budget_for_freq(int clock)
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
 {
 	unsigned budget;
 
@@ -703,9 +856,9 @@
 	return budget;
 }
 
-static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
-			     unsigned r2, unsigned n2, unsigned p,
-			     struct wrpll_rnp *best)
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+				 unsigned r2, unsigned n2, unsigned p,
+				 struct hsw_wrpll_rnp *best)
 {
 	uint64_t a, b, c, d, diff, diff_best;
 
@@ -762,8 +915,7 @@
 	/* Otherwise a < c && b >= d, do nothing */
 }
 
-static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
-				     int reg)
+static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
 {
 	int refclk = LC_FREQ;
 	int n, p, r;
@@ -856,6 +1008,26 @@
 	return dco_freq / (p0 * p1 * p2 * 5);
 }
 
+static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
+{
+	int dotclock;
+
+	if (pipe_config->has_pch_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->fdi_m_n);
+	else if (pipe_config->has_dp_encoder)
+		dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+						    &pipe_config->dp_m_n);
+	else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
+		dotclock = pipe_config->port_clock * 2 / 3;
+	else
+		dotclock = pipe_config->port_clock;
+
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
+	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+}
 
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
 				struct intel_crtc_state *pipe_config)
@@ -902,12 +1074,7 @@
 
 	pipe_config->port_clock = link_clock;
 
-	if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
@@ -929,10 +1096,10 @@
 		link_clock = 270000;
 		break;
 	case PORT_CLK_SEL_WRPLL1:
-		link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1);
 		break;
 	case PORT_CLK_SEL_WRPLL2:
-		link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
+		link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2);
 		break;
 	case PORT_CLK_SEL_SPLL:
 		pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
@@ -954,23 +1121,32 @@
 
 	pipe_config->port_clock = link_clock * 2;
 
-	if (pipe_config->has_pch_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->fdi_m_n);
-	else if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-						 &pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 				enum intel_dpll_id dpll)
 {
-	/* FIXME formula not available in bspec */
-	return 0;
+	struct intel_shared_dpll *pll;
+	struct intel_dpll_hw_state *state;
+	intel_clock_t clock;
+
+	/* For DDI ports we always use a shared PLL. */
+	if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+		return 0;
+
+	pll = &dev_priv->shared_dplls[dpll];
+	state = &pll->config.hw_state;
+
+	clock.m1 = 2;
+	clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
+	if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+		clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
+	clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+	clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+	clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+	return chv_calc_dpll_params(100000, &clock);
 }
 
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
@@ -980,16 +1156,9 @@
 	enum port port = intel_ddi_get_encoder_port(encoder);
 	uint32_t dpll = port;
 
-	pipe_config->port_clock =
-		bxt_calc_pll_link(dev_priv, dpll);
+	pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
 
-	if (pipe_config->has_dp_encoder)
-		pipe_config->base.adjusted_mode.crtc_clock =
-			intel_dotclock_calculate(pipe_config->port_clock,
-							&pipe_config->dp_m_n);
-	else
-		pipe_config->base.adjusted_mode.crtc_clock =
-							pipe_config->port_clock;
+	ddi_dotclock_get(pipe_config);
 }
 
 void intel_ddi_clock_get(struct intel_encoder *encoder,
@@ -1011,12 +1180,12 @@
 {
 	uint64_t freq2k;
 	unsigned p, n2, r2;
-	struct wrpll_rnp best = { 0, 0, 0 };
+	struct hsw_wrpll_rnp best = { 0, 0, 0 };
 	unsigned budget;
 
 	freq2k = clock / 100;
 
-	budget = wrpll_get_budget_for_freq(clock);
+	budget = hsw_wrpll_get_budget_for_freq(clock);
 
 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
 	 * and directly pass the LC PLL to it. */
@@ -1060,8 +1229,8 @@
 		     n2++) {
 
 			for (p = P_MIN; p <= P_MAX; p += P_INC)
-				wrpll_update_rnp(freq2k, budget,
-						 r2, n2, p, &best);
+				hsw_wrpll_update_rnp(freq2k, budget,
+						     r2, n2, p, &best);
 		}
 	}
 
@@ -1105,6 +1274,102 @@
 	return true;
 }
 
+struct skl_wrpll_context {
+	uint64_t min_deviation;		/* current minimal deviation */
+	uint64_t central_freq;		/* chosen central freq */
+	uint64_t dco_freq;		/* chosen dco freq */
+	unsigned int p;			/* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6%  of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION	100
+#define SKL_DCO_MAX_NDEVIATION	600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+				  uint64_t central_freq,
+				  uint64_t dco_freq,
+				  unsigned int divider)
+{
+	uint64_t deviation;
+
+	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+			      central_freq);
+
+	/* positive deviation */
+	if (dco_freq >= central_freq) {
+		if (deviation < SKL_DCO_MAX_PDEVIATION &&
+		    deviation < ctx->min_deviation) {
+			ctx->min_deviation = deviation;
+			ctx->central_freq = central_freq;
+			ctx->dco_freq = dco_freq;
+			ctx->p = divider;
+		}
+	/* negative deviation */
+	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+		   deviation < ctx->min_deviation) {
+		ctx->min_deviation = deviation;
+		ctx->central_freq = central_freq;
+		ctx->dco_freq = dco_freq;
+		ctx->p = divider;
+	}
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+				      unsigned int *p0 /* out */,
+				      unsigned int *p1 /* out */,
+				      unsigned int *p2 /* out */)
+{
+	/* even dividers */
+	if (p % 2 == 0) {
+		unsigned int half = p / 2;
+
+		if (half == 1 || half == 2 || half == 3 || half == 5) {
+			*p0 = 2;
+			*p1 = 1;
+			*p2 = half;
+		} else if (half % 2 == 0) {
+			*p0 = 2;
+			*p1 = half / 2;
+			*p2 = 2;
+		} else if (half % 3 == 0) {
+			*p0 = 3;
+			*p1 = half / 3;
+			*p2 = 2;
+		} else if (half % 7 == 0) {
+			*p0 = 7;
+			*p1 = half / 7;
+			*p2 = 2;
+		}
+	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = p / 3;
+	} else if (p == 5 || p == 7) {
+		*p0 = p;
+		*p1 = 1;
+		*p2 = 1;
+	} else if (p == 15) {
+		*p0 = 3;
+		*p1 = 1;
+		*p2 = 5;
+	} else if (p == 21) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 3;
+	} else if (p == 35) {
+		*p0 = 7;
+		*p1 = 1;
+		*p2 = 5;
+	}
+}
+
 struct skl_wrpll_params {
 	uint32_t        dco_fraction;
 	uint32_t        dco_integer;
@@ -1115,7 +1380,74 @@
 	uint32_t        central_freq;
 };
 
-static void
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+				      uint64_t afe_clock,
+				      uint64_t central_freq,
+				      uint32_t p0, uint32_t p1, uint32_t p2)
+{
+	uint64_t dco_freq;
+
+	switch (central_freq) {
+	case 9600000000ULL:
+		params->central_freq = 0;
+		break;
+	case 9000000000ULL:
+		params->central_freq = 1;
+		break;
+	case 8400000000ULL:
+		params->central_freq = 3;
+	}
+
+	switch (p0) {
+	case 1:
+		params->pdiv = 0;
+		break;
+	case 2:
+		params->pdiv = 1;
+		break;
+	case 3:
+		params->pdiv = 2;
+		break;
+	case 7:
+		params->pdiv = 4;
+		break;
+	default:
+		WARN(1, "Incorrect PDiv\n");
+	}
+
+	switch (p2) {
+	case 5:
+		params->kdiv = 0;
+		break;
+	case 2:
+		params->kdiv = 1;
+		break;
+	case 3:
+		params->kdiv = 2;
+		break;
+	case 1:
+		params->kdiv = 3;
+		break;
+	default:
+		WARN(1, "Incorrect KDiv\n");
+	}
+
+	params->qdiv_ratio = p1;
+	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+	dco_freq = p0 * p1 * p2 * afe_clock;
+
+	/*
+	 * Intermediate values are in Hz.
+	 * Divide by MHz to match bsepc
+	 */
+	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+	params->dco_fraction =
+		div_u64((div_u64(dco_freq, 24) -
+			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
 skl_ddi_calculate_wrpll(int clock /* in Hz */,
 			struct skl_wrpll_params *wrpll_params)
 {
@@ -1123,142 +1455,70 @@
 	uint64_t dco_central_freq[3] = {8400000000ULL,
 					9000000000ULL,
 					9600000000ULL};
-	uint32_t min_dco_deviation = 400;
-	uint32_t min_dco_index = 3;
-	uint32_t P0[4] = {1, 2, 3, 7};
-	uint32_t P2[4] = {1, 2, 3, 5};
-	bool found = false;
-	uint32_t candidate_p = 0;
-	uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
-	uint32_t candidate_p2[3] = {0};
-	uint32_t dco_central_freq_deviation[3];
-	uint32_t i, P1, k, dco_count;
-	bool retry_with_odd = false;
-	uint64_t dco_freq;
+	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
+					     24, 28, 30, 32, 36, 40, 42, 44,
+					     48, 52, 54, 56, 60, 64, 66, 68,
+					     70, 72, 76, 78, 80, 84, 88, 90,
+					     92, 96, 98 };
+	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+	static const struct {
+		const int *list;
+		int n_dividers;
+	} dividers[] = {
+		{ even_dividers, ARRAY_SIZE(even_dividers) },
+		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
+	};
+	struct skl_wrpll_context ctx;
+	unsigned int dco, d, i;
+	unsigned int p0, p1, p2;
 
-	/* Determine P0, P1 or P2 */
-	for (dco_count = 0; dco_count < 3; dco_count++) {
-		found = false;
-		candidate_p =
-			div64_u64(dco_central_freq[dco_count], afe_clock);
-		if (retry_with_odd == false)
-			candidate_p = (candidate_p % 2 == 0 ?
-				candidate_p : candidate_p + 1);
+	skl_wrpll_context_init(&ctx);
 
-		for (P1 = 1; P1 < candidate_p; P1++) {
-			for (i = 0; i < 4; i++) {
-				if (!(P0[i] != 1 || P1 == 1))
-					continue;
+	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+			for (i = 0; i < dividers[d].n_dividers; i++) {
+				unsigned int p = dividers[d].list[i];
+				uint64_t dco_freq = p * afe_clock;
 
-				for (k = 0; k < 4; k++) {
-					if (P1 != 1 && P2[k] != 2)
-						continue;
-
-					if (candidate_p == P0[i] * P1 * P2[k]) {
-						/* Found possible P0, P1, P2 */
-						found = true;
-						candidate_p0[dco_count] = P0[i];
-						candidate_p1[dco_count] = P1;
-						candidate_p2[dco_count] = P2[k];
-						goto found;
-					}
-
-				}
+				skl_wrpll_try_divider(&ctx,
+						      dco_central_freq[dco],
+						      dco_freq,
+						      p);
+				/*
+				 * Skip the remaining dividers if we're sure to
+				 * have found the definitive divider, we can't
+				 * improve a 0 deviation.
+				 */
+				if (ctx.min_deviation == 0)
+					goto skip_remaining_dividers;
 			}
 		}
 
-found:
-		if (found) {
-			dco_central_freq_deviation[dco_count] =
-				div64_u64(10000 *
-					  abs_diff((candidate_p * afe_clock),
-						   dco_central_freq[dco_count]),
-					  dco_central_freq[dco_count]);
-
-			if (dco_central_freq_deviation[dco_count] <
-				min_dco_deviation) {
-				min_dco_deviation =
-					dco_central_freq_deviation[dco_count];
-				min_dco_index = dco_count;
-			}
-		}
-
-		if (min_dco_index > 2 && dco_count == 2) {
-			retry_with_odd = true;
-			dco_count = 0;
-		}
-	}
-
-	if (min_dco_index > 2) {
-		WARN(1, "No valid values found for the given pixel clock\n");
-	} else {
-		wrpll_params->central_freq = dco_central_freq[min_dco_index];
-
-		switch (dco_central_freq[min_dco_index]) {
-		case 9600000000ULL:
-			wrpll_params->central_freq = 0;
-			break;
-		case 9000000000ULL:
-			wrpll_params->central_freq = 1;
-			break;
-		case 8400000000ULL:
-			wrpll_params->central_freq = 3;
-		}
-
-		switch (candidate_p0[min_dco_index]) {
-		case 1:
-			wrpll_params->pdiv = 0;
-			break;
-		case 2:
-			wrpll_params->pdiv = 1;
-			break;
-		case 3:
-			wrpll_params->pdiv = 2;
-			break;
-		case 7:
-			wrpll_params->pdiv = 4;
-			break;
-		default:
-			WARN(1, "Incorrect PDiv\n");
-		}
-
-		switch (candidate_p2[min_dco_index]) {
-		case 5:
-			wrpll_params->kdiv = 0;
-			break;
-		case 2:
-			wrpll_params->kdiv = 1;
-			break;
-		case 3:
-			wrpll_params->kdiv = 2;
-			break;
-		case 1:
-			wrpll_params->kdiv = 3;
-			break;
-		default:
-			WARN(1, "Incorrect KDiv\n");
-		}
-
-		wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
-		wrpll_params->qdiv_mode =
-			(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
-
-		dco_freq = candidate_p0[min_dco_index] *
-			candidate_p1[min_dco_index] *
-			candidate_p2[min_dco_index] * afe_clock;
-
+skip_remaining_dividers:
 		/*
-		 * Intermediate values are in Hz.
-		 * Divide by MHz to match bsepc
+		 * If a solution is found with an even divider, prefer
+		 * this one.
 		 */
-		wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
-		wrpll_params->dco_fraction =
-			div_u64(((div_u64(dco_freq, 24) -
-				  wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
-
+		if (d == 0 && ctx.p)
+			break;
 	}
-}
 
+	if (!ctx.p) {
+		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+		return false;
+	}
+
+	/*
+	 * gcc incorrectly analyses that these can be used without being
+	 * initialized. To be fair, it's hard to guess.
+	 */
+	p0 = p1 = p2 = 0;
+	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+				  p0, p1, p2);
+
+	return true;
+}
 
 static bool
 skl_ddi_pll_select(struct intel_crtc *intel_crtc,
@@ -1281,7 +1541,8 @@
 
 		ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
-		skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
+		if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+			return false;
 
 		cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
 			 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1293,17 +1554,14 @@
 			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
 			 wrpll_params.central_freq;
 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
-		struct drm_encoder *encoder = &intel_encoder->base;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
+		switch (crtc_state->port_clock / 2) {
+		case 81000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
 			break;
-		case DP_LINK_BW_2_7:
+		case 135000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
 			break;
-		case DP_LINK_BW_5_4:
+		case 270000:
 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
 			break;
 		}
@@ -1334,6 +1592,7 @@
 
 /* bxt clock parameters */
 struct bxt_clk_div {
+	int clock;
 	uint32_t p1;
 	uint32_t p2;
 	uint32_t m2_int;
@@ -1343,14 +1602,14 @@
 };
 
 /* pre-calculated values for DP linkrates */
-static struct bxt_clk_div bxt_dp_clk_val[7] = {
-	/* 162 */ {4, 2, 32, 1677722, 1, 1},
-	/* 270 */ {4, 1, 27,       0, 0, 1},
-	/* 540 */ {2, 1, 27,       0, 0, 1},
-	/* 216 */ {3, 2, 32, 1677722, 1, 1},
-	/* 243 */ {4, 1, 24, 1258291, 1, 1},
-	/* 324 */ {4, 1, 32, 1677722, 1, 1},
-	/* 432 */ {3, 1, 32, 1677722, 1, 1}
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+	{162000, 4, 2, 32, 1677722, 1, 1},
+	{270000, 4, 1, 27,       0, 0, 1},
+	{540000, 2, 1, 27,       0, 0, 1},
+	{216000, 3, 2, 32, 1677722, 1, 1},
+	{243000, 4, 1, 24, 1258291, 1, 1},
+	{324000, 4, 1, 32, 1677722, 1, 1},
+	{432000, 3, 1, 32, 1677722, 1, 1}
 };
 
 static bool
@@ -1363,7 +1622,7 @@
 	struct bxt_clk_div clk_div = {0};
 	int vco = 0;
 	uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
-	uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+	uint32_t lanestagger;
 
 	if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
 		intel_clock_t best_clock;
@@ -1390,29 +1649,19 @@
 		vco = best_clock.vco;
 	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
 			intel_encoder->type == INTEL_OUTPUT_EDP) {
-		struct drm_encoder *encoder = &intel_encoder->base;
-		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		int i;
 
-		switch (intel_dp->link_bw) {
-		case DP_LINK_BW_1_62:
-			clk_div = bxt_dp_clk_val[0];
-			break;
-		case DP_LINK_BW_2_7:
-			clk_div = bxt_dp_clk_val[1];
-			break;
-		case DP_LINK_BW_5_4:
-			clk_div = bxt_dp_clk_val[2];
-			break;
-		default:
-			clk_div = bxt_dp_clk_val[0];
-			DRM_ERROR("Unknown link rate\n");
+		clk_div = bxt_dp_clk_val[0];
+		for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+			if (bxt_dp_clk_val[i].clock == clock) {
+				clk_div = bxt_dp_clk_val[i];
+				break;
+			}
 		}
 		vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
 	}
 
-	dco_amp = 15;
-	dcoampovr_en_h = 0;
-	if (vco >= 6200000 && vco <= 6480000) {
+	if (vco >= 6200000 && vco <= 6700000) {
 		prop_coef = 4;
 		int_coef = 9;
 		gain_ctl = 3;
@@ -1423,8 +1672,6 @@
 		int_coef = 11;
 		gain_ctl = 3;
 		targ_cnt = 9;
-		if (vco >= 4800000 && vco < 5400000)
-			dcoampovr_en_h = 1;
 	} else if (vco == 5400000) {
 		prop_coef = 3;
 		int_coef = 8;
@@ -1466,10 +1713,13 @@
 
 	crtc_state->dpll_hw_state.pll8 = targ_cnt;
 
-	if (dcoampovr_en_h)
-		crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+	crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
 
-	crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+	crtc_state->dpll_hw_state.pll10 =
+		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+		| PORT_PLL_DCO_AMP_OVR_EN_H;
+
+	crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
 
 	crtc_state->dpll_hw_state.pcsdw12 =
 		LANESTAGGER_STRAP_OVRD | lanestagger;
@@ -1799,8 +2049,65 @@
 			   TRANS_CLK_SEL_DISABLED);
 }
 
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-			     enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
+			       enum port port, int type)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct ddi_buf_trans *ddi_translations;
+	uint8_t iboost;
+	uint8_t dp_iboost, hdmi_iboost;
+	int n_entries;
+	u32 reg;
+
+	/* VBT may override standard boost values */
+	dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+	hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_EDP) {
+		if (dp_iboost) {
+			iboost = dp_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		if (hdmi_iboost) {
+			iboost = hdmi_iboost;
+		} else {
+			ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+			iboost = ddi_translations[port].i_boost;
+		}
+	} else {
+		return;
+	}
+
+	/* Make sure that the requested I_boost is valid */
+	if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
+		DRM_ERROR("Invalid I_boost value %u\n", iboost);
+		return;
+	}
+
+	reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
+	reg &= ~BALANCE_LEG_MASK(port);
+	reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
+
+	if (iboost)
+		reg |= iboost << BALANCE_LEG_SHIFT(port);
+	else
+		reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+
+	I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+}
+
+static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+				    enum port port, int type)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const struct bxt_ddi_buf_trans *ddi_translations;
@@ -1860,6 +2167,73 @@
 	I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
 }
 
+static uint32_t translate_signal_level(int signal_levels)
+{
+	uint32_t level;
+
+	switch (signal_levels) {
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+			      signal_levels);
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 0;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 1;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 2;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+		level = 3;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 4;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 5;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+		level = 6;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 7;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+		level = 8;
+		break;
+
+	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+		level = 9;
+		break;
+	}
+
+	return level;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dport->base.base.dev;
+	struct intel_encoder *encoder = &dport->base;
+	uint8_t train_set = intel_dp->train_set[0];
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	enum port port = dport->port;
+	uint32_t level;
+
+	level = translate_signal_level(signal_levels);
+
+	if (IS_SKYLAKE(dev))
+		skl_ddi_set_iboost(dev, level, port, encoder->type);
+	else if (IS_BROXTON(dev))
+		bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+
+	return DDI_BUF_TRANS_SELECT(level);
+}
+
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
 	struct drm_encoder *encoder = &intel_encoder->base;
@@ -2404,7 +2778,7 @@
 
 	temp = I915_READ(BXT_PORT_PLL(port, 9));
 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
-	temp |= (5 << 1);
+	temp |= pll->config.hw_state.pll9;
 	I915_WRITE(BXT_PORT_PLL(port, 9), temp);
 
 	temp = I915_READ(BXT_PORT_PLL(port, 10));
@@ -2417,8 +2791,8 @@
 	temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
 	temp |= PORT_PLL_RECALIBRATE;
 	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-	/* Enable 10 bit clock */
-	temp |= PORT_PLL_10BIT_CLK_ENABLE;
+	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+	temp |= pll->config.hw_state.ebb4;
 	I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
 
 	/* Enable PLL */
@@ -2469,13 +2843,38 @@
 		return false;
 
 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+	hw_state->pll0 &= PORT_PLL_M2_MASK;
+
 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+	hw_state->pll1 &= PORT_PLL_N_MASK;
+
 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+			  PORT_PLL_INT_COEFF_MASK |
+			  PORT_PLL_GAIN_CTL_MASK;
+
 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+	hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+			   PORT_PLL_DCO_AMP_MASK;
+
 	/*
 	 * While we write to the group register to program all lanes at once we
 	 * can read only lane registers. We configure all lanes the same way, so
@@ -2486,6 +2885,7 @@
 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
 				 hw_state->pcsdw12,
 				 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
 
 	return true;
 }
@@ -2510,7 +2910,6 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t val = I915_READ(LCPLL_CTL);
-	int cdclk_freq;
 
 	if (IS_SKYLAKE(dev))
 		skl_shared_dplls_init(dev_priv);
@@ -2519,10 +2918,10 @@
 	else
 		hsw_shared_dplls_init(dev_priv);
 
-	cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-	DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
-
 	if (IS_SKYLAKE(dev)) {
+		int cdclk_freq;
+
+		cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
 		dev_priv->skl_boot_cdclk = cdclk_freq;
 		if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
 			DRM_ERROR("LCPLL1 is disabled\n");
@@ -2618,20 +3017,6 @@
 	I915_WRITE(_FDI_RXA_CTL, val);
 }
 
-static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
-{
-	struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
-	int type = intel_dig_port->base.type;
-
-	if (type != INTEL_OUTPUT_DISPLAYPORT &&
-	    type != INTEL_OUTPUT_EDP &&
-	    type != INTEL_OUTPUT_UNKNOWN) {
-		return;
-	}
-
-	intel_dp_hot_plug(intel_encoder);
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
 			  struct intel_crtc_state *pipe_config)
 {
@@ -2793,10 +3178,9 @@
 		     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
 	init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
 	if (!init_dp && !init_hdmi) {
-		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
+		DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
 			      port_name(port));
-		init_hdmi = true;
-		init_dp = true;
+		return;
 	}
 
 	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
@@ -2825,14 +3209,13 @@
 	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	intel_encoder->cloneable = 0;
-	intel_encoder->hot_plug = intel_ddi_hot_plug;
 
 	if (init_dp) {
 		if (!intel_ddi_init_dp_connector(intel_dig_port))
 			goto err;
 
 		intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-		dev_priv->hpd_irq_port[port] = intel_dig_port;
+		dev_priv->hotplug.irq_port[port] = intel_dig_port;
 	}
 
 	/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 87476ff..ca9278b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -86,9 +86,6 @@
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config);
 
-static int intel_set_mode(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  bool force_restore);
 static int intel_framebuffer_init(struct drm_device *dev,
 				  struct intel_framebuffer *ifb,
 				  struct drm_mode_fb_cmd2 *mode_cmd,
@@ -105,22 +102,13 @@
 			    const struct intel_crtc_state *pipe_config);
 static void chv_prepare_pll(struct intel_crtc *crtc,
 			    const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *crtc);
-static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
 	struct intel_crtc_state *crtc_state);
 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
 			   int num_connectors);
-static void intel_crtc_enable_planes(struct drm_crtc *crtc);
-static void intel_crtc_disable_planes(struct drm_crtc *crtc);
-
-static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
-{
-	if (!connector->mst_port)
-		return connector->encoder;
-	else
-		return &connector->mst_port->mst_encoders[pipe]->base;
-}
+static void intel_modeset_setup_hw_state(struct drm_device *dev);
 
 typedef struct {
 	int	min, max;
@@ -413,7 +401,7 @@
 static const intel_limit_t intel_limits_bxt = {
 	/* FIXME: find real dot limits */
 	.dot = { .min = 0, .max = INT_MAX },
-	.vco = { .min = 4800000, .max = 6480000 },
+	.vco = { .min = 4800000, .max = 6700000 },
 	.n = { .min = 1, .max = 1 },
 	.m1 = { .min = 2, .max = 2 },
 	/* FIXME: find real m2 limits */
@@ -422,14 +410,10 @@
 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
 };
 
-static void vlv_clock(int refclk, intel_clock_t *clock)
+static bool
+needs_modeset(struct drm_crtc_state *state)
 {
-	clock->m = clock->m1 * clock->m2;
-	clock->p = clock->p1 * clock->p2;
-	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
-	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
-	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+	return drm_atomic_crtc_needs_modeset(state);
 }
 
 /**
@@ -561,15 +545,25 @@
 	return limit;
 }
 
+/*
+ * Platform specific helpers to calculate the port PLL loopback- (clock.m),
+ * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
+ * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
+ * The helpers' return value is the rate of the clock that is fed to the
+ * display engine's pipe which can be the above fast dot clock rate or a
+ * divided-down version of it.
+ */
 /* m1 is reserved as 0 in Pineview, n is a ring counter */
-static void pineview_clock(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m2 + 2;
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -577,25 +571,41 @@
 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 }
 
-static void i9xx_clock(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = i9xx_dpll_compute_m(clock);
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
-		return;
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot;
 }
 
-static void chv_clock(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
 {
 	clock->m = clock->m1 * clock->m2;
 	clock->p = clock->p1 * clock->p2;
 	if (WARN_ON(clock->n == 0 || clock->p == 0))
-		return;
+		return 0;
+	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
+}
+
+int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m1 * clock->m2;
+	clock->p = clock->p1 * clock->p2;
+	if (WARN_ON(clock->n == 0 || clock->p == 0))
+		return 0;
 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
 			clock->n << 22);
 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+
+	return clock->dot / 5;
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -639,16 +649,12 @@
 	return true;
 }
 
-static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
-		    struct intel_crtc_state *crtc_state,
-		    int target, int refclk, intel_clock_t *match_clock,
-		    intel_clock_t *best_clock)
+static int
+i9xx_select_p2_div(const intel_limit_t *limit,
+		   const struct intel_crtc_state *crtc_state,
+		   int target)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
-	intel_clock_t clock;
-	int err = target;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 
 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
 		/*
@@ -657,18 +663,31 @@
 		 * single/dual channel state, if we even can.
 		 */
 		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
+			return limit->p2.p2_fast;
 		else
-			clock.p2 = limit->p2.p2_slow;
+			return limit->p2.p2_slow;
 	} else {
 		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
+			return limit->p2.p2_slow;
 		else
-			clock.p2 = limit->p2.p2_fast;
+			return limit->p2.p2_fast;
 	}
+}
+
+static bool
+i9xx_find_best_dpll(const intel_limit_t *limit,
+		    struct intel_crtc_state *crtc_state,
+		    int target, int refclk, intel_clock_t *match_clock,
+		    intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc_state->base.crtc->dev;
+	intel_clock_t clock;
+	int err = target;
 
 	memset(best_clock, 0, sizeof(*best_clock));
 
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 	     clock.m1++) {
 		for (clock.m2 = limit->m2.min;
@@ -681,7 +700,7 @@
 					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					i9xx_clock(refclk, &clock);
+					i9xx_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -708,30 +727,14 @@
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	intel_clock_t clock;
 	int err = target;
 
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		/*
-		 * For LVDS just rely on its current settings for dual-channel.
-		 * We haven't figured out how to reliably set up different
-		 * single/dual channel state, if we even can.
-		 */
-		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
-		else
-			clock.p2 = limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
-		else
-			clock.p2 = limit->p2.p2_fast;
-	}
-
 	memset(best_clock, 0, sizeof(*best_clock));
 
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
 	     clock.m1++) {
 		for (clock.m2 = limit->m2.min;
@@ -742,7 +745,7 @@
 					clock.p1 <= limit->p1.max; clock.p1++) {
 					int this_err;
 
-					pineview_clock(refclk, &clock);
+					pnv_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -769,28 +772,17 @@
 		   int target, int refclk, intel_clock_t *match_clock,
 		   intel_clock_t *best_clock)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-	struct drm_device *dev = crtc->base.dev;
+	struct drm_device *dev = crtc_state->base.crtc->dev;
 	intel_clock_t clock;
 	int max_n;
-	bool found;
+	bool found = false;
 	/* approximately equals target * 0.00585 */
 	int err_most = (target >> 8) + (target >> 9);
-	found = false;
-
-	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
-		if (intel_is_dual_link_lvds(dev))
-			clock.p2 = limit->p2.p2_fast;
-		else
-			clock.p2 = limit->p2.p2_slow;
-	} else {
-		if (target < limit->p2.dot_limit)
-			clock.p2 = limit->p2.p2_slow;
-		else
-			clock.p2 = limit->p2.p2_fast;
-	}
 
 	memset(best_clock, 0, sizeof(*best_clock));
+
+	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
+
 	max_n = limit->n.max;
 	/* based on hardware requirement, prefer smaller n to precision */
 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
@@ -803,7 +795,7 @@
 				     clock.p1 >= limit->p1.min; clock.p1--) {
 					int this_err;
 
-					i9xx_clock(refclk, &clock);
+					i9xx_calc_dpll_params(refclk, &clock);
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
 						continue;
@@ -893,7 +885,7 @@
 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
 								     refclk * clock.m1);
 
-					vlv_clock(refclk, &clock);
+					vlv_calc_dpll_params(refclk, &clock);
 
 					if (!intel_PLL_is_valid(dev, limit,
 								&clock))
@@ -956,7 +948,7 @@
 
 			clock.m2 = m2;
 
-			chv_clock(refclk, &clock);
+			chv_calc_dpll_params(refclk, &clock);
 
 			if (!intel_PLL_is_valid(dev, limit, &clock))
 				continue;
@@ -1026,7 +1018,7 @@
 		line_mask = DSL_LINEMASK_GEN3;
 
 	line1 = I915_READ(reg) & line_mask;
-	mdelay(5);
+	msleep(5);
 	line2 = I915_READ(reg) & line_mask;
 
 	return line1 == line2;
@@ -1106,6 +1098,9 @@
 		case PORT_D:
 			bit = SDE_PORTD_HOTPLUG_CPT;
 			break;
+		case PORT_E:
+			bit = SDE_PORTE_HOTPLUG_SPT;
+			break;
 		default:
 			return true;
 		}
@@ -1694,7 +1689,7 @@
 	int count = 0;
 
 	for_each_intel_crtc(dev, crtc)
-		count += crtc->active &&
+		count += crtc->base.state->active &&
 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
 	return count;
@@ -1775,7 +1770,7 @@
 	/* Disable DVO 2x clock on both PLLs if necessary */
 	if (IS_I830(dev) &&
 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
-	    intel_num_dvo_pipes(dev) == 1) {
+	    !intel_num_dvo_pipes(dev)) {
 		I915_WRITE(DPLL(PIPE_B),
 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
 		I915_WRITE(DPLL(PIPE_A),
@@ -1790,13 +1785,13 @@
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
 
-	I915_WRITE(DPLL(pipe), 0);
+	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
 	POSTING_READ(DPLL(pipe));
 }
 
 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-	u32 val = 0;
+	u32 val;
 
 	/* Make sure the pipe isn't still relying on us */
 	assert_pipe_disabled(dev_priv, pipe);
@@ -1805,8 +1800,9 @@
 	 * Leave integrated clock source and reference clock enabled for pipe B.
 	 * The latter is needed for VGA hotplug / manual detection.
 	 */
+	val = DPLL_VGA_MODE_DIS;
 	if (pipe == PIPE_B)
-		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
+		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), val);
 	POSTING_READ(DPLL(pipe));
 
@@ -1821,7 +1817,8 @@
 	assert_pipe_disabled(dev_priv, pipe);
 
 	/* Set PLL en = 0 */
-	val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
+	val = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 	if (pipe != PIPE_A)
 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
 	I915_WRITE(DPLL(pipe), val);
@@ -1942,11 +1939,13 @@
 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
 
 	/* PCH only available on ILK+ */
-	BUG_ON(INTEL_INFO(dev)->gen < 5);
-	if (WARN_ON(pll == NULL))
-	       return;
+	if (INTEL_INFO(dev)->gen < 5)
+		return;
 
-	if (WARN_ON(pll->config.crtc_mask == 0))
+	if (pll == NULL)
+		return;
+
+	if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
 		return;
 
 	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -2004,11 +2003,15 @@
 
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/*
-		 * make the BPC in transcoder be consistent with
-		 * that in pipeconf reg.
+		 * Make the BPC in transcoder be consistent with
+		 * that in pipeconf reg. For HDMI we must use 8bpc
+		 * here for both 8bpc and 12bpc.
 		 */
 		val &= ~PIPECONF_BPC_MASK;
-		val |= pipeconf_val & PIPECONF_BPC_MASK;
+		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+			val |= PIPECONF_8BPC;
+		else
+			val |= pipeconf_val & PIPECONF_BPC_MASK;
 	}
 
 	val &= ~TRANS_INTERLACE_MASK;
@@ -2122,6 +2125,8 @@
 	int reg;
 	u32 val;
 
+	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+
 	assert_planes_disabled(dev_priv, pipe);
 	assert_cursor_disabled(dev_priv, pipe);
 	assert_sprites_disabled(dev_priv, pipe);
@@ -2181,6 +2186,8 @@
 	int reg;
 	u32 val;
 
+	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+
 	/*
 	 * Make sure planes won't keep trying to pump pixels to us,
 	 * or we might hang the display.
@@ -2211,28 +2218,6 @@
 		intel_wait_for_pipe_off(crtc);
 }
 
-/**
- * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @plane:  plane to be enabled
- * @crtc: crtc for the plane
- *
- * Enable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_enable_primary_hw_plane(struct drm_plane *plane,
-					  struct drm_crtc *crtc)
-{
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	/* If the pipe isn't enabled, we can't pump pixels and may hang */
-	assert_pipe_enabled(dev_priv, intel_crtc->pipe);
-	to_intel_plane_state(plane->state)->visible = true;
-
-	dev_priv->display.update_primary_plane(crtc, plane->fb,
-					       crtc->x, crtc->y);
-}
-
 static bool need_vtd_wa(struct drm_device *dev)
 {
 #ifdef CONFIG_INTEL_IOMMU
@@ -2302,6 +2287,7 @@
 			const struct drm_plane_state *plane_state)
 {
 	struct intel_rotation_info *info = &view->rotation_info;
+	unsigned int tile_height, tile_pitch;
 
 	*view = i915_ggtt_view_normal;
 
@@ -2318,14 +2304,35 @@
 	info->pitch = fb->pitches[0];
 	info->fb_modifier = fb->modifier[0];
 
+	tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+					fb->modifier[0]);
+	tile_pitch = PAGE_SIZE / tile_height;
+	info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+	info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
+	info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+
 	return 0;
 }
 
+static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+{
+	if (INTEL_INFO(dev_priv)->gen >= 9)
+		return 256 * 1024;
+	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+		 IS_VALLEYVIEW(dev_priv))
+		return 128 * 1024;
+	else if (INTEL_INFO(dev_priv)->gen >= 4)
+		return 4 * 1024;
+	else
+		return 0;
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			   struct drm_framebuffer *fb,
 			   const struct drm_plane_state *plane_state,
-			   struct intel_engine_cs *pipelined)
+			   struct intel_engine_cs *pipelined,
+			   struct drm_i915_gem_request **pipelined_request)
 {
 	struct drm_device *dev = fb->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2338,14 +2345,7 @@
 
 	switch (fb->modifier[0]) {
 	case DRM_FORMAT_MOD_NONE:
-		if (INTEL_INFO(dev)->gen >= 9)
-			alignment = 256 * 1024;
-		else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
-			alignment = 128 * 1024;
-		else if (INTEL_INFO(dev)->gen >= 4)
-			alignment = 4 * 1024;
-		else
-			alignment = 64 * 1024;
+		alignment = intel_linear_alignment(dev_priv);
 		break;
 	case I915_FORMAT_MOD_X_TILED:
 		if (INTEL_INFO(dev)->gen >= 9)
@@ -2390,7 +2390,7 @@
 
 	dev_priv->mm.interruptible = false;
 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
-						   &view);
+						   pipelined_request, &view);
 	if (ret)
 		goto err_interruptible;
 
@@ -2400,7 +2400,18 @@
 	 * a fence as the cost is not that onerous.
 	 */
 	ret = i915_gem_object_get_fence(obj);
-	if (ret)
+	if (ret == -EDEADLK) {
+		/*
+		 * -EDEADLK means there are no free fences
+		 * no pending flips.
+		 *
+		 * This is propagated to atomic, but it uses
+		 * -EDEADLK to force a locking recovery, so
+		 * change the returned error to -EBUSY.
+		 */
+		ret = -EBUSY;
+		goto err_unpin;
+	} else if (ret)
 		goto err_unpin;
 
 	i915_gem_object_pin_fence(obj);
@@ -2435,7 +2446,8 @@
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
 					     unsigned int tiling_mode,
 					     unsigned int cpp,
 					     unsigned int pitch)
@@ -2451,12 +2463,13 @@
 
 		return tile_rows * pitch * 8 + tiles * 4096;
 	} else {
+		unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
 		unsigned int offset;
 
 		offset = *y * pitch + *x * cpp;
-		*y = 0;
-		*x = (offset & 4095) / cpp;
-		return offset & -4096;
+		*y = (offset & alignment) / pitch;
+		*x = ((offset & alignment) - *y * pitch) / cpp;
+		return offset & ~alignment;
 	}
 }
 
@@ -2583,6 +2596,7 @@
 	struct intel_crtc *i;
 	struct drm_i915_gem_object *obj;
 	struct drm_plane *primary = intel_crtc->base.primary;
+	struct drm_plane_state *plane_state = primary->state;
 	struct drm_framebuffer *fb;
 
 	if (!plane_config->fb)
@@ -2622,15 +2636,23 @@
 	return;
 
 valid_fb:
+	plane_state->src_x = plane_state->src_y = 0;
+	plane_state->src_w = fb->width << 16;
+	plane_state->src_h = fb->height << 16;
+
+	plane_state->crtc_x = plane_state->src_y = 0;
+	plane_state->crtc_w = fb->width;
+	plane_state->crtc_h = fb->height;
+
 	obj = intel_fb_obj(fb);
 	if (obj->tiling_mode != I915_TILING_NONE)
 		dev_priv->preserve_bios_swizzle = true;
 
-	primary->fb = fb;
-	primary->state->crtc = &intel_crtc->base;
-	primary->crtc = &intel_crtc->base;
-	update_state_fb(primary);
-	obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+	drm_framebuffer_reference(fb);
+	primary->fb = primary->state->fb = fb;
+	primary->crtc = primary->state->crtc = &intel_crtc->base;
+	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
 }
 
 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
@@ -2725,7 +2747,8 @@
 
 	if (INTEL_INFO(dev)->gen >= 4) {
 		intel_crtc->dspaddr_offset =
-			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+			intel_gen4_compute_page_offset(dev_priv,
+						       &x, &y, obj->tiling_mode,
 						       pixel_size,
 						       fb->pitches[0]);
 		linear_offset -= intel_crtc->dspaddr_offset;
@@ -2826,7 +2849,8 @@
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	intel_crtc->dspaddr_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size,
 					       fb->pitches[0]);
 	linear_offset -= intel_crtc->dspaddr_offset;
@@ -2904,32 +2928,32 @@
 	return i915_gem_obj_ggtt_offset_view(obj, view);
 }
 
+static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+	DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+		intel_crtc->base.base.id, intel_crtc->pipe, id);
+}
+
 /*
  * This function detaches (aka. unbinds) unused scalers in hardware
  */
-void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(struct intel_crtc *intel_crtc)
 {
-	struct drm_device *dev;
-	struct drm_i915_private *dev_priv;
 	struct intel_crtc_scaler_state *scaler_state;
 	int i;
 
-	if (!intel_crtc || !intel_crtc->config)
-		return;
-
-	dev = intel_crtc->base.dev;
-	dev_priv = dev->dev_private;
 	scaler_state = &intel_crtc->config->scaler_state;
 
 	/* loop through and disable scalers that aren't in use */
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
-		if (!scaler_state->scalers[i].in_use) {
-			I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
-			I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
-			I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
-			DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
-				intel_crtc->base.base.id, intel_crtc->pipe, i);
-		}
+		if (!scaler_state->scalers[i].in_use)
+			skl_detach_scaler(intel_crtc, i);
 	}
 }
 
@@ -3132,8 +3156,8 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->display.disable_fbc)
-		dev_priv->display.disable_fbc(dev);
+	if (dev_priv->fbc.disable_fbc)
+		dev_priv->fbc.disable_fbc(dev_priv);
 
 	dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
@@ -3176,24 +3200,8 @@
 	}
 }
 
-void intel_crtc_reset(struct intel_crtc *crtc)
-{
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	if (!crtc->active)
-		return;
-
-	intel_crtc_disable_planes(&crtc->base);
-	dev_priv->display.crtc_disable(&crtc->base);
-	dev_priv->display.crtc_enable(&crtc->base);
-	intel_crtc_enable_planes(&crtc->base);
-}
-
 void intel_prepare_reset(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_crtc *crtc;
-
 	/* no reset support for gen2 */
 	if (IS_GEN2(dev))
 		return;
@@ -3203,18 +3211,11 @@
 		return;
 
 	drm_modeset_lock_all(dev);
-
 	/*
 	 * Disabling the crtcs gracefully seems nicer. Also the
 	 * g33 docs say we should at least disable all the planes.
 	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (!crtc->active)
-			continue;
-
-		intel_crtc_disable_planes(&crtc->base);
-		dev_priv->display.crtc_disable(&crtc->base);
-	}
+	intel_display_suspend(dev);
 }
 
 void intel_finish_reset(struct drm_device *dev)
@@ -3258,7 +3259,7 @@
 		dev_priv->display.hpd_irq_setup(dev);
 	spin_unlock_irq(&dev_priv->irq_lock);
 
-	intel_modeset_setup_hw_state(dev, true);
+	intel_display_resume(dev);
 
 	intel_hpd_init(dev_priv);
 
@@ -4200,34 +4201,16 @@
 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
-void intel_put_shared_dpll(struct intel_crtc *crtc)
-{
-	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
-	if (pll == NULL)
-		return;
-
-	if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
-		WARN(1, "bad %s crtc mask\n", pll->name);
-		return;
-	}
-
-	pll->config.crtc_mask &= ~(1 << crtc->pipe);
-	if (pll->config.crtc_mask == 0) {
-		WARN_ON(pll->on);
-		WARN_ON(pll->active);
-	}
-
-	crtc->config->shared_dpll = DPLL_ID_PRIVATE;
-}
-
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *crtc_state)
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 	struct intel_shared_dpll *pll;
+	struct intel_shared_dpll_config *shared_dpll;
 	enum intel_dpll_id i;
 
+	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
 	if (HAS_PCH_IBX(dev_priv->dev)) {
 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
 		i = (enum intel_dpll_id) crtc->pipe;
@@ -4236,7 +4219,7 @@
 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
 			      crtc->base.base.id, pll->name);
 
-		WARN_ON(pll->new_config->crtc_mask);
+		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
 	}
@@ -4256,7 +4239,7 @@
 		pll = &dev_priv->shared_dplls[i];
 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
 			crtc->base.base.id, pll->name);
-		WARN_ON(pll->new_config->crtc_mask);
+		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
 	}
@@ -4265,15 +4248,15 @@
 		pll = &dev_priv->shared_dplls[i];
 
 		/* Only want to check enabled timings first */
-		if (pll->new_config->crtc_mask == 0)
+		if (shared_dpll[i].crtc_mask == 0)
 			continue;
 
 		if (memcmp(&crtc_state->dpll_hw_state,
-			   &pll->new_config->hw_state,
-			   sizeof(pll->new_config->hw_state)) == 0) {
+			   &shared_dpll[i].hw_state,
+			   sizeof(crtc_state->dpll_hw_state)) == 0) {
 			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
 				      crtc->base.base.id, pll->name,
-				      pll->new_config->crtc_mask,
+				      shared_dpll[i].crtc_mask,
 				      pll->active);
 			goto found;
 		}
@@ -4282,7 +4265,7 @@
 	/* Ok no matching timings, maybe there's a free one? */
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		pll = &dev_priv->shared_dplls[i];
-		if (pll->new_config->crtc_mask == 0) {
+		if (shared_dpll[i].crtc_mask == 0) {
 			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
 				      crtc->base.base.id, pll->name);
 			goto found;
@@ -4292,83 +4275,33 @@
 	return NULL;
 
 found:
-	if (pll->new_config->crtc_mask == 0)
-		pll->new_config->hw_state = crtc_state->dpll_hw_state;
+	if (shared_dpll[i].crtc_mask == 0)
+		shared_dpll[i].hw_state =
+			crtc_state->dpll_hw_state;
 
 	crtc_state->shared_dpll = i;
 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
 			 pipe_name(crtc->pipe));
 
-	pll->new_config->crtc_mask |= 1 << crtc->pipe;
+	shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
 
 	return pll;
 }
 
-/**
- * intel_shared_dpll_start_config - start a new PLL staged config
- * @dev_priv: DRM device
- * @clear_pipes: mask of pipes that will have their PLLs freed
- *
- * Starts a new PLL staged config, copying the current config but
- * releasing the references of pipes specified in clear_pipes.
- */
-static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
-					  unsigned clear_pipes)
+static void intel_shared_dpll_commit(struct drm_atomic_state *state)
 {
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	struct intel_shared_dpll_config *shared_dpll;
 	struct intel_shared_dpll *pll;
 	enum intel_dpll_id i;
 
+	if (!to_intel_atomic_state(state)->dpll_set)
+		return;
+
+	shared_dpll = to_intel_atomic_state(state)->shared_dpll;
 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
 		pll = &dev_priv->shared_dplls[i];
-
-		pll->new_config = kmemdup(&pll->config, sizeof pll->config,
-					  GFP_KERNEL);
-		if (!pll->new_config)
-			goto cleanup;
-
-		pll->new_config->crtc_mask &= ~clear_pipes;
-	}
-
-	return 0;
-
-cleanup:
-	while (--i >= 0) {
-		pll = &dev_priv->shared_dplls[i];
-		kfree(pll->new_config);
-		pll->new_config = NULL;
-	}
-
-	return -ENOMEM;
-}
-
-static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
-{
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		WARN_ON(pll->new_config == &pll->config);
-
-		pll->config = *pll->new_config;
-		kfree(pll->new_config);
-		pll->new_config = NULL;
-	}
-}
-
-static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
-{
-	struct intel_shared_dpll *pll;
-	enum intel_dpll_id i;
-
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
-		pll = &dev_priv->shared_dplls[i];
-
-		WARN_ON(pll->new_config == &pll->config);
-
-		kfree(pll->new_config);
-		pll->new_config = NULL;
+		pll->config = shared_dpll[i];
 	}
 }
 
@@ -4386,62 +4319,16 @@
 	}
 }
 
-/**
- * skl_update_scaler_users - Stages update to crtc's scaler state
- * @intel_crtc: crtc
- * @crtc_state: crtc_state
- * @plane: plane (NULL indicates crtc is requesting update)
- * @plane_state: plane's state
- * @force_detach: request unconditional detachment of scaler
- *
- * This function updates scaler state for requested plane or crtc.
- * To request scaler usage update for a plane, caller shall pass plane pointer.
- * To request scaler usage update for crtc, caller shall pass plane pointer
- * as NULL.
- *
- * Return
- *     0 - scaler_usage updated successfully
- *    error - requested scaling cannot be supported or other error condition
- */
-int
-skl_update_scaler_users(
-	struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
-	struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
-	int force_detach)
+static int
+skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
+		  int src_w, int src_h, int dst_w, int dst_h)
 {
+	struct intel_crtc_scaler_state *scaler_state =
+		&crtc_state->scaler_state;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(crtc_state->base.crtc);
 	int need_scaling;
-	int idx;
-	int src_w, src_h, dst_w, dst_h;
-	int *scaler_id;
-	struct drm_framebuffer *fb;
-	struct intel_crtc_scaler_state *scaler_state;
-	unsigned int rotation;
-
-	if (!intel_crtc || !crtc_state)
-		return 0;
-
-	scaler_state = &crtc_state->scaler_state;
-
-	idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
-	fb = intel_plane ? plane_state->base.fb : NULL;
-
-	if (intel_plane) {
-		src_w = drm_rect_width(&plane_state->src) >> 16;
-		src_h = drm_rect_height(&plane_state->src) >> 16;
-		dst_w = drm_rect_width(&plane_state->dst);
-		dst_h = drm_rect_height(&plane_state->dst);
-		scaler_id = &plane_state->scaler_id;
-		rotation = plane_state->base.rotation;
-	} else {
-		struct drm_display_mode *adjusted_mode =
-			&crtc_state->base.adjusted_mode;
-		src_w = crtc_state->pipe_src_w;
-		src_h = crtc_state->pipe_src_h;
-		dst_w = adjusted_mode->hdisplay;
-		dst_h = adjusted_mode->vdisplay;
-		scaler_id = &scaler_state->scaler_id;
-		rotation = DRM_ROTATE_0;
-	}
 
 	need_scaling = intel_rotation_90_or_270(rotation) ?
 		(src_h != dst_w || src_w != dst_h):
@@ -4457,17 +4344,14 @@
 	 * update to free the scaler is done in plane/panel-fit programming.
 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
 	 */
-	if (force_detach || !need_scaling || (intel_plane &&
-		(!fb || !plane_state->visible))) {
+	if (force_detach || !need_scaling) {
 		if (*scaler_id >= 0) {
-			scaler_state->scaler_users &= ~(1 << idx);
+			scaler_state->scaler_users &= ~(1 << scaler_user);
 			scaler_state->scalers[*scaler_id].in_use = 0;
 
-			DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
-				"crtc_state = %p scaler_users = 0x%x\n",
-				intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
-				intel_plane ? intel_plane->base.base.id :
-				intel_crtc->base.base.id, crtc_state,
+			DRM_DEBUG_KMS("scaler_user index %u.%u: "
+				"Staged freeing scaler id %d scaler_users = 0x%x\n",
+				intel_crtc->pipe, scaler_user, *scaler_id,
 				scaler_state->scaler_users);
 			*scaler_id = -1;
 		}
@@ -4480,55 +4364,123 @@
 
 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
-		DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
 			"size is out of scaler range\n",
-			intel_plane ? "PLANE" : "CRTC",
-			intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-			intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
 		return -EINVAL;
 	}
 
+	/* mark this plane as a scaler user in crtc_state */
+	scaler_state->scaler_users |= (1 << scaler_user);
+	DRM_DEBUG_KMS("scaler_user index %u.%u: "
+		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+		scaler_state->scaler_users);
+
+	return 0;
+}
+
+/**
+ * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
+ *
+ * @state: crtc's scaler state
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+int skl_update_scaler_crtc(struct intel_crtc_state *state)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+	struct drm_display_mode *adjusted_mode =
+		&state->base.adjusted_mode;
+
+	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
+		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+
+	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+		&state->scaler_state.scaler_id, DRM_ROTATE_0,
+		state->pipe_src_w, state->pipe_src_h,
+		adjusted_mode->hdisplay, adjusted_mode->vdisplay);
+}
+
+/**
+ * skl_update_scaler_plane - Stages update to scaler state for a given plane.
+ *
+ * @state: crtc's scaler state
+ * @plane_state: atomic plane state to update
+ *
+ * Return
+ *     0 - scaler_usage updated successfully
+ *    error - requested scaling cannot be supported or other error condition
+ */
+static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
+				   struct intel_plane_state *plane_state)
+{
+
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+	struct intel_plane *intel_plane =
+		to_intel_plane(plane_state->base.plane);
+	struct drm_framebuffer *fb = plane_state->base.fb;
+	int ret;
+
+	bool force_detach = !fb || !plane_state->visible;
+
+	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
+		      intel_plane->base.base.id, intel_crtc->pipe,
+		      drm_plane_index(&intel_plane->base));
+
+	ret = skl_update_scaler(crtc_state, force_detach,
+				drm_plane_index(&intel_plane->base),
+				&plane_state->scaler_id,
+				plane_state->base.rotation,
+				drm_rect_width(&plane_state->src) >> 16,
+				drm_rect_height(&plane_state->src) >> 16,
+				drm_rect_width(&plane_state->dst),
+				drm_rect_height(&plane_state->dst));
+
+	if (ret || plane_state->scaler_id < 0)
+		return ret;
+
 	/* check colorkey */
-	if (WARN_ON(intel_plane &&
-		intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
-		DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
-			intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
+		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
+			      intel_plane->base.base.id);
 		return -EINVAL;
 	}
 
 	/* Check src format */
-	if (intel_plane) {
-		switch (fb->pixel_format) {
-		case DRM_FORMAT_RGB565:
-		case DRM_FORMAT_XBGR8888:
-		case DRM_FORMAT_XRGB8888:
-		case DRM_FORMAT_ABGR8888:
-		case DRM_FORMAT_ARGB8888:
-		case DRM_FORMAT_XRGB2101010:
-		case DRM_FORMAT_XBGR2101010:
-		case DRM_FORMAT_YUYV:
-		case DRM_FORMAT_YVYU:
-		case DRM_FORMAT_UYVY:
-		case DRM_FORMAT_VYUY:
-			break;
-		default:
-			DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
-				intel_plane->base.base.id, fb->base.id, fb->pixel_format);
-			return -EINVAL;
-		}
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		break;
+	default:
+		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
+			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+		return -EINVAL;
 	}
 
-	/* mark this plane as a scaler user in crtc_state */
-	scaler_state->scaler_users |= (1 << idx);
-	DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
-		"crtc_state = %p scaler_users = 0x%x\n",
-		intel_plane ? "PLANE" : "CRTC",
-		intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
-		src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
 	return 0;
 }
 
-static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
+static void skylake_scaler_disable(struct intel_crtc *crtc)
+{
+	int i;
+
+	for (i = 0; i < crtc->num_scalers; i++)
+		skl_detach_scaler(crtc, i);
+}
+
+static void skylake_pfit_enable(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4538,13 +4490,6 @@
 
 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
 
-	/* To update pfit, first update scaler state */
-	skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
-	intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
-	skl_detach_scalers(crtc);
-	if (!enable)
-		return;
-
 	if (crtc->config->pch_pfit.enabled) {
 		int id;
 
@@ -4584,20 +4529,6 @@
 	}
 }
 
-static void intel_enable_sprite_planes(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	enum pipe pipe = to_intel_crtc(crtc)->pipe;
-	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
-
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
-		intel_plane = to_intel_plane(plane);
-		if (intel_plane->pipe == pipe)
-			intel_plane_restore(&intel_plane->base);
-	}
-}
-
 void hsw_enable_ips(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -4668,7 +4599,7 @@
 	bool reenable_ips = false;
 
 	/* The clocks have to be on to load the palette. */
-	if (!crtc->state->enable || !intel_crtc->active)
+	if (!crtc->state->active)
 		return;
 
 	if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
@@ -4755,10 +4686,6 @@
 	 */
 	hsw_enable_ips(intel_crtc);
 
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
-
 	/*
 	 * Gen2 reports pipe underruns whenever all planes are disabled.
 	 * So don't enable underrun reporting before at least some planes
@@ -4810,13 +4737,11 @@
 	 * event which is after the vblank start event, so we need to have a
 	 * wait-for-vblank between disabling the plane and the pipe.
 	 */
-	if (HAS_GMCH_DISPLAY(dev))
+	if (HAS_GMCH_DISPLAY(dev)) {
 		intel_set_memory_cxsr(dev_priv, false);
-
-	mutex_lock(&dev->struct_mutex);
-	if (dev_priv->fbc.crtc == intel_crtc)
-		intel_fbc_disable(dev);
-	mutex_unlock(&dev->struct_mutex);
+		dev_priv->wm.vlv.cxsr = false;
+		intel_wait_for_vblank(dev, pipe);
+	}
 
 	/*
 	 * FIXME IPS should be fine as long as one plane is
@@ -4827,49 +4752,83 @@
 	hsw_disable_ips(intel_crtc);
 }
 
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int pipe = intel_crtc->pipe;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_plane *plane;
 
-	intel_enable_primary_hw_plane(crtc->primary, crtc);
-	intel_enable_sprite_planes(crtc);
-	intel_crtc_update_cursor(crtc, true);
+	if (atomic->wait_vblank)
+		intel_wait_for_vblank(dev, crtc->pipe);
 
-	intel_post_enable_primary(crtc);
+	intel_frontbuffer_flip(dev, atomic->fb_bits);
 
-	/*
-	 * FIXME: Once we grow proper nuclear flip support out of this we need
-	 * to compute the mask of flip planes precisely. For the time being
-	 * consider this a flip to a NULL plane.
-	 */
-	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+	if (atomic->disable_cxsr)
+		crtc->wm.cxsr_allowed = true;
+
+	if (crtc->atomic.update_wm_post)
+		intel_update_watermarks(&crtc->base);
+
+	if (atomic->update_fbc)
+		intel_fbc_update(dev_priv);
+
+	if (atomic->post_enable_primary)
+		intel_post_enable_primary(&crtc->base);
+
+	drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
+		intel_update_sprite_watermarks(plane, &crtc->base,
+					       0, 0, 0, false, false);
+
+	memset(atomic, 0, sizeof(*atomic));
 }
 
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+	struct drm_plane *p;
+
+	/* Track fb's for any planes being disabled */
+	drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
+		struct intel_plane *plane = to_intel_plane(p);
+
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
+				  plane->frontbuffer_bit);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	if (atomic->wait_for_flips)
+		intel_crtc_wait_for_pending_flips(&crtc->base);
+
+	if (atomic->disable_fbc)
+		intel_fbc_disable_crtc(crtc);
+
+	if (crtc->atomic.disable_ips)
+		hsw_disable_ips(crtc);
+
+	if (atomic->pre_disable_primary)
+		intel_pre_disable_primary(&crtc->base);
+
+	if (atomic->disable_cxsr) {
+		crtc->wm.cxsr_allowed = false;
+		intel_set_memory_cxsr(dev_priv, false);
+	}
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
 {
 	struct drm_device *dev = crtc->dev;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
+	struct drm_plane *p;
 	int pipe = intel_crtc->pipe;
 
-	if (!intel_crtc->active)
-		return;
-
-	intel_crtc_wait_for_pending_flips(crtc);
-
-	intel_pre_disable_primary(crtc);
-
 	intel_crtc_dpms_overlay_disable(intel_crtc);
-	for_each_intel_plane(dev, intel_plane) {
-		if (intel_plane->pipe == pipe) {
-			struct drm_crtc *from = intel_plane->base.crtc;
 
-			intel_plane->disable_plane(&intel_plane->base,
-						   from ?: crtc, true);
-		}
-	}
+	drm_for_each_plane_mask(p, dev, plane_mask)
+		to_intel_plane(p)->disable_plane(p, crtc);
 
 	/*
 	 * FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4887,9 +4846,7 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	if (intel_crtc->config->has_pch_encoder)
@@ -4956,46 +4913,17 @@
 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-/*
- * This implements the workaround described in the "notes" section of the mode
- * set sequence documentation. When going from no pipes or single pipe to
- * multiple pipes, and planes are enabled after the pipe, we need to wait at
- * least 2 vblanks on the first pipe before enabling planes on the second pipe.
- */
-static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
-{
-	struct drm_device *dev = crtc->base.dev;
-	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
-
-	/* We want to get the other_active_crtc only if there's only 1 other
-	 * active crtc. */
-	for_each_intel_crtc(dev, crtc_it) {
-		if (!crtc_it->active || crtc_it == crtc)
-			continue;
-
-		if (other_active_crtc)
-			return;
-
-		other_active_crtc = crtc_it;
-	}
-	if (!other_active_crtc)
-		return;
-
-	intel_wait_for_vblank(dev, other_active_crtc->pipe);
-	intel_wait_for_vblank(dev, other_active_crtc->pipe);
-}
-
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_encoder *encoder;
-	int pipe = intel_crtc->pipe;
+	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc->state);
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	if (intel_crtc_to_shared_dpll(intel_crtc))
@@ -5036,7 +4964,7 @@
 	intel_ddi_enable_pipe_clock(intel_crtc);
 
 	if (INTEL_INFO(dev)->gen == 9)
-		skylake_pfit_update(intel_crtc, 1);
+		skylake_pfit_enable(intel_crtc);
 	else if (INTEL_INFO(dev)->gen < 9)
 		ironlake_pfit_enable(intel_crtc);
 	else
@@ -5070,7 +4998,11 @@
 
 	/* If we change the relative order between pipe/planes enabling, we need
 	 * to change the workaround. */
-	haswell_mode_set_planes_workaround(intel_crtc);
+	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+		intel_wait_for_vblank(dev, hsw_workaround_pipe);
+	}
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -5097,9 +5029,6 @@
 	int pipe = intel_crtc->pipe;
 	u32 reg, temp;
 
-	if (!intel_crtc->active)
-		return;
-
 	for_each_encoder_on_crtc(dev, crtc, encoder)
 		encoder->disable(encoder);
 
@@ -5138,18 +5067,11 @@
 			I915_WRITE(PCH_DPLL_SEL, temp);
 		}
 
-		/* disable PCH DPLL */
-		intel_disable_shared_dpll(intel_crtc);
-
 		ironlake_fdi_pll_disable(intel_crtc);
 	}
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5160,9 +5082,6 @@
 	struct intel_encoder *encoder;
 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
-	if (!intel_crtc->active)
-		return;
-
 	for_each_encoder_on_crtc(dev, crtc, encoder) {
 		intel_opregion_notify_encoder(encoder, false);
 		encoder->disable(encoder);
@@ -5182,7 +5101,7 @@
 	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
 	if (INTEL_INFO(dev)->gen == 9)
-		skylake_pfit_update(intel_crtc, 0);
+		skylake_scaler_disable(intel_crtc);
 	else if (INTEL_INFO(dev)->gen < 9)
 		ironlake_pfit_disable(intel_crtc);
 	else
@@ -5201,22 +5120,8 @@
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
-
-	if (intel_crtc_to_shared_dpll(intel_crtc))
-		intel_disable_shared_dpll(intel_crtc);
 }
 
-static void ironlake_crtc_off(struct drm_crtc *crtc)
-{
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	intel_put_shared_dpll(intel_crtc);
-}
-
-
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
 	struct drm_device *dev = crtc->base.dev;
@@ -5252,6 +5157,8 @@
 		return POWER_DOMAIN_PORT_DDI_C_4_LANES;
 	case PORT_D:
 		return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+	case PORT_E:
+		return POWER_DOMAIN_PORT_DDI_E_2_LANES;
 	default:
 		WARN_ON_ONCE(1);
 		return POWER_DOMAIN_PORT_OTHER;
@@ -5298,6 +5205,9 @@
 	unsigned long mask;
 	enum transcoder transcoder;
 
+	if (!crtc->state->active)
+		return 0;
+
 	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
 
 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
@@ -5312,45 +5222,131 @@
 	return mask;
 }
 
+static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum intel_display_power_domain domain;
+	unsigned long domains, new_domains, old_domains;
+
+	old_domains = intel_crtc->enabled_power_domains;
+	intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
+
+	domains = new_domains & ~old_domains;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_get(dev_priv, domain);
+
+	return old_domains & ~new_domains;
+}
+
+static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
+				      unsigned long domains)
+{
+	enum intel_display_power_domain domain;
+
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+}
+
 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
-	struct intel_crtc *crtc;
+	unsigned long put_domains[I915_MAX_PIPES] = {};
+	struct drm_crtc_state *crtc_state;
+	struct drm_crtc *crtc;
+	int i;
 
-	/*
-	 * First get all needed power domains, then put all unneeded, to avoid
-	 * any unnecessary toggling of the power wells.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		enum intel_display_power_domain domain;
-
-		if (!crtc->base.state->enable)
-			continue;
-
-		pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
-
-		for_each_power_domain(domain, pipe_domains[crtc->pipe])
-			intel_display_power_get(dev_priv, domain);
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (needs_modeset(crtc->state))
+			put_domains[to_intel_crtc(crtc)->pipe] =
+				modeset_get_crtc_power_domains(crtc);
 	}
 
-	if (dev_priv->display.modeset_global_resources)
-		dev_priv->display.modeset_global_resources(state);
+	if (dev_priv->display.modeset_commit_cdclk) {
+		unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
 
-	for_each_intel_crtc(dev, crtc) {
-		enum intel_display_power_domain domain;
-
-		for_each_power_domain(domain, crtc->enabled_power_domains)
-			intel_display_power_put(dev_priv, domain);
-
-		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+		if (cdclk != dev_priv->cdclk_freq &&
+		    !WARN_ON(!state->allow_modeset))
+			dev_priv->display.modeset_commit_cdclk(state);
 	}
 
-	intel_display_set_init_power(dev_priv, false);
+	for (i = 0; i < I915_MAX_PIPES; i++)
+		if (put_domains[i])
+			modeset_put_power_domains(dev_priv, put_domains[i]);
 }
 
-void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void intel_update_max_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_SKYLAKE(dev)) {
+		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+
+		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+			dev_priv->max_cdclk_freq = 675000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+			dev_priv->max_cdclk_freq = 540000;
+		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+			dev_priv->max_cdclk_freq = 450000;
+		else
+			dev_priv->max_cdclk_freq = 337500;
+	} else if (IS_BROADWELL(dev))  {
+		/*
+		 * FIXME with extra cooling we can allow
+		 * 540 MHz for ULX and 675 Mhz for ULT.
+		 * How can we know if extra cooling is
+		 * available? PCI ID, VTB, something else?
+		 */
+		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULX(dev))
+			dev_priv->max_cdclk_freq = 450000;
+		else if (IS_BDW_ULT(dev))
+			dev_priv->max_cdclk_freq = 540000;
+		else
+			dev_priv->max_cdclk_freq = 675000;
+	} else if (IS_CHERRYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 320000;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->max_cdclk_freq = 400000;
+	} else {
+		/* otherwise assume cdclk is fixed */
+		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
+	}
+
+	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+			 dev_priv->max_cdclk_freq);
+}
+
+static void intel_update_cdclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+			 dev_priv->cdclk_freq);
+
+	/*
+	 * Program the gmbus_freq based on the cdclk frequency.
+	 * BSpec erroneously claims we should aim for 4MHz, but
+	 * in fact 1MHz is the correct frequency.
+	 */
+	if (IS_VALLEYVIEW(dev)) {
+		/*
+		 * Program the gmbus_freq based on the cdclk frequency.
+		 * BSpec erroneously claims we should aim for 4MHz, but
+		 * in fact 1MHz is the correct frequency.
+		 */
+		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
+	}
+
+	if (dev_priv->max_cdclk_freq == 0)
+		intel_update_max_cdclk(dev);
+}
+
+static void broxton_set_cdclk(struct drm_device *dev, int frequency)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t divider;
@@ -5466,7 +5462,7 @@
 		return;
 	}
 
-	dev_priv->cdclk_freq = frequency;
+	intel_update_cdclk(dev);
 }
 
 void broxton_init_cdclk(struct drm_device *dev)
@@ -5641,6 +5637,7 @@
 
 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
 {
+	struct drm_device *dev = dev_priv->dev;
 	u32 freq_select, pcu_ack;
 
 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
@@ -5681,6 +5678,8 @@
 	mutex_lock(&dev_priv->rps.hw_lock);
 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
 	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
 }
 
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -5714,16 +5713,13 @@
 	/* enable PG1 and Misc I/O */
 	intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
 
-	/* DPLL0 already enabed !? */
-	if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
-		DRM_DEBUG_DRIVER("DPLL0 already running\n");
-		return;
+	/* DPLL0 not enabled (happens on early BIOS versions) */
+	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+		/* enable DPLL0 */
+		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+		skl_dpll0_enable(dev_priv, required_vco);
 	}
 
-	/* enable DPLL0 */
-	required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
-	skl_dpll0_enable(dev_priv, required_vco);
-
 	/* set CDCLK to the frequency the BIOS chose */
 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
 
@@ -5751,22 +5747,6 @@
 	return vco_freq[hpll_freq] * 1000;
 }
 
-static void vlv_update_cdclk(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-			 dev_priv->cdclk_freq);
-
-	/*
-	 * Program the gmbus_freq based on the cdclk frequency.
-	 * BSpec erroneously claims we should aim for 4MHz, but
-	 * in fact 1MHz is the correct frequency.
-	 */
-	I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
 /* Adjust CDclk dividers to allow high res or save power if possible */
 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 {
@@ -5830,7 +5810,7 @@
 
 	mutex_unlock(&dev_priv->sb_lock);
 
-	vlv_update_cdclk(dev);
+	intel_update_cdclk(dev);
 }
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
@@ -5871,7 +5851,7 @@
 	}
 	mutex_unlock(&dev_priv->rps.hw_lock);
 
-	vlv_update_cdclk(dev);
+	intel_update_cdclk(dev);
 }
 
 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
@@ -5934,11 +5914,7 @@
 	int max_pixclk = 0;
 
 	for_each_intel_crtc(dev, intel_crtc) {
-		if (state)
-			crtc_state =
-				intel_atomic_get_crtc_state(state, intel_crtc);
-		else
-			crtc_state = intel_crtc->config;
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
 		if (IS_ERR(crtc_state))
 			return PTR_ERR(crtc_state);
 
@@ -5952,39 +5928,32 @@
 	return max_pixclk;
 }
 
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
+static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-	struct drm_i915_private *dev_priv = to_i915(state->dev);
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int max_pixclk = intel_mode_max_pixclk(state->dev, state);
-	int cdclk, i;
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
 
 	if (max_pixclk < 0)
 		return max_pixclk;
 
-	if (IS_VALLEYVIEW(dev_priv))
-		cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
-	else
-		cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+	to_intel_atomic_state(state)->cdclk =
+		valleyview_calc_cdclk(dev_priv, max_pixclk);
 
-	if (cdclk == dev_priv->cdclk_freq)
-		return 0;
+	return 0;
+}
 
-	/* add all active pipes to the state */
-	for_each_crtc(state->dev, crtc) {
-		if (!crtc->state->enable)
-			continue;
+static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int max_pixclk = intel_mode_max_pixclk(dev, state);
 
-		crtc_state = drm_atomic_get_crtc_state(state, crtc);
-		if (IS_ERR(crtc_state))
-			return PTR_ERR(crtc_state);
-	}
+	if (max_pixclk < 0)
+		return max_pixclk;
 
-	/* disable/enable all currently active pipes while we change cdclk */
-	for_each_crtc_in_state(state, crtc, crtc_state, i)
-		if (crtc_state->enable)
-			crtc_state->mode_changed = true;
+	to_intel_atomic_state(state)->cdclk =
+		broxton_calc_cdclk(dev_priv, max_pixclk);
 
 	return 0;
 }
@@ -6001,7 +5970,7 @@
 	if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
 		/* CHV suggested value is 31 or 63 */
 		if (IS_CHERRYVIEW(dev_priv))
-			credits = PFI_CREDIT_31;
+			credits = PFI_CREDIT_63;
 		else
 			credits = PFI_CREDIT(15);
 	} else {
@@ -6025,41 +5994,31 @@
 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
 }
 
-static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
+static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-	int req_cdclk;
 
-	/* The path in intel_mode_max_pixclk() with a NULL atomic state should
-	 * never fail. */
-	if (WARN_ON(max_pixclk < 0))
-		return;
+	/*
+	 * FIXME: We can end up here with all power domains off, yet
+	 * with a CDCLK frequency other than the minimum. To account
+	 * for this take the PIPE-A power domain, which covers the HW
+	 * blocks needed for the following programming. This can be
+	 * removed once it's guaranteed that we get here either with
+	 * the minimum CDCLK set, or the required power domains
+	 * enabled.
+	 */
+	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
-	req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+	if (IS_CHERRYVIEW(dev))
+		cherryview_set_cdclk(dev, req_cdclk);
+	else
+		valleyview_set_cdclk(dev, req_cdclk);
 
-	if (req_cdclk != dev_priv->cdclk_freq) {
-		/*
-		 * FIXME: We can end up here with all power domains off, yet
-		 * with a CDCLK frequency other than the minimum. To account
-		 * for this take the PIPE-A power domain, which covers the HW
-		 * blocks needed for the following programming. This can be
-		 * removed once it's guaranteed that we get here either with
-		 * the minimum CDCLK set, or the required power domains
-		 * enabled.
-		 */
-		intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+	vlv_program_pfi_credits(dev_priv);
 
-		if (IS_CHERRYVIEW(dev))
-			cherryview_set_cdclk(dev, req_cdclk);
-		else
-			valleyview_set_cdclk(dev, req_cdclk);
-
-		vlv_program_pfi_credits(dev_priv);
-
-		intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
-	}
+	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
 }
 
 static void valleyview_crtc_enable(struct drm_crtc *crtc)
@@ -6071,9 +6030,7 @@
 	int pipe = intel_crtc->pipe;
 	bool is_dsi;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
@@ -6122,7 +6079,6 @@
 
 	intel_crtc_load_lut(crtc);
 
-	intel_update_watermarks(crtc);
 	intel_enable_pipe(intel_crtc);
 
 	assert_vblank_disabled(crtc);
@@ -6149,9 +6105,7 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	WARN_ON(!crtc->state->enable);
-
-	if (intel_crtc->active)
+	if (WARN_ON(intel_crtc->active))
 		return;
 
 	i9xx_set_pll_dividers(intel_crtc);
@@ -6211,9 +6165,6 @@
 	struct intel_encoder *encoder;
 	int pipe = intel_crtc->pipe;
 
-	if (!intel_crtc->active)
-		return;
-
 	/*
 	 * On gen2 planes are double buffered but the pipe isn't, so we must
 	 * wait for planes to fully turn off before disabling the pipe.
@@ -6250,88 +6201,89 @@
 
 	intel_crtc->active = false;
 	intel_update_watermarks(crtc);
-
-	mutex_lock(&dev->struct_mutex);
-	intel_fbc_update(dev);
-	mutex_unlock(&dev->struct_mutex);
 }
 
-static void i9xx_crtc_off(struct drm_crtc *crtc)
+static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 {
-}
-
-/* Master function to enable/disable CRTC and corresponding power wells */
-void intel_crtc_control(struct drm_crtc *crtc, bool enable)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	enum intel_display_power_domain domain;
 	unsigned long domains;
 
-	if (enable) {
-		if (!intel_crtc->active) {
-			domains = get_crtc_power_domains(crtc);
-			for_each_power_domain(domain, domains)
-				intel_display_power_get(dev_priv, domain);
-			intel_crtc->enabled_power_domains = domains;
+	if (!intel_crtc->active)
+		return;
 
-			dev_priv->display.crtc_enable(crtc);
-			intel_crtc_enable_planes(crtc);
-		}
-	} else {
-		if (intel_crtc->active) {
-			intel_crtc_disable_planes(crtc);
-			dev_priv->display.crtc_disable(crtc);
-
-			domains = intel_crtc->enabled_power_domains;
-			for_each_power_domain(domain, domains)
-				intel_display_power_put(dev_priv, domain);
-			intel_crtc->enabled_power_domains = 0;
-		}
+	if (to_intel_plane_state(crtc->primary->state)->visible) {
+		intel_crtc_wait_for_pending_flips(crtc);
+		intel_pre_disable_primary(crtc);
 	}
-}
 
-/**
- * Sets the power management mode of the pipe and plane.
- */
-void intel_crtc_update_dpms(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct intel_encoder *intel_encoder;
-	bool enable = false;
-
-	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
-		enable |= intel_encoder->connectors_active;
-
-	intel_crtc_control(crtc, enable);
-
-	crtc->state->active = enable;
-}
-
-static void intel_crtc_disable(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_connector *connector;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	intel_crtc_disable_planes(crtc);
+	intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
 	dev_priv->display.crtc_disable(crtc);
-	dev_priv->display.off(crtc);
+	intel_disable_shared_dpll(intel_crtc);
 
-	drm_plane_helper_disable(crtc->primary);
+	domains = intel_crtc->enabled_power_domains;
+	for_each_power_domain(domain, domains)
+		intel_display_power_put(dev_priv, domain);
+	intel_crtc->enabled_power_domains = 0;
+}
 
-	/* Update computed state. */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (!connector->encoder || !connector->encoder->crtc)
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
+ */
+int intel_display_suspend(struct drm_device *dev)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+	struct drm_atomic_state *state;
+	struct drm_crtc *crtc;
+	unsigned crtc_mask = 0;
+	int ret = 0;
+
+	if (WARN_ON(!ctx))
+		return 0;
+
+	lockdep_assert_held(&ctx->ww_ctx);
+	state = drm_atomic_state_alloc(dev);
+	if (WARN_ON(!state))
+		return -ENOMEM;
+
+	state->acquire_ctx = ctx;
+	state->allow_modeset = true;
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto free;
+
+		if (!crtc_state->active)
 			continue;
 
-		if (connector->encoder->crtc != crtc)
-			continue;
-
-		connector->dpms = DRM_MODE_DPMS_OFF;
-		to_intel_encoder(connector->encoder)->connectors_active = false;
+		crtc_state->active = false;
+		crtc_mask |= 1 << drm_crtc_index(crtc);
 	}
+
+	if (crtc_mask) {
+		ret = drm_atomic_commit(state);
+
+		if (!ret) {
+			for_each_crtc(dev, crtc)
+				if (crtc_mask & (1 << drm_crtc_index(crtc)))
+					crtc->state->active = true;
+
+			return ret;
+		}
+	}
+
+free:
+	if (ret)
+		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+	drm_atomic_state_free(state);
+	return ret;
 }
 
 void intel_encoder_destroy(struct drm_encoder *encoder)
@@ -6342,62 +6294,42 @@
 	kfree(intel_encoder);
 }
 
-/* Simple dpms helper for encoders with just one connector, no cloning and only
- * one kind of off state. It clamps all !ON modes to fully OFF and changes the
- * state of the entire output pipe. */
-static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
-{
-	if (mode == DRM_MODE_DPMS_ON) {
-		encoder->connectors_active = true;
-
-		intel_crtc_update_dpms(encoder->base.crtc);
-	} else {
-		encoder->connectors_active = false;
-
-		intel_crtc_update_dpms(encoder->base.crtc);
-	}
-}
-
 /* Cross check the actual hw state with our own modeset state tracking (and it's
  * internal consistency). */
 static void intel_connector_check_state(struct intel_connector *connector)
 {
+	struct drm_crtc *crtc = connector->base.state->crtc;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+		      connector->base.base.id,
+		      connector->base.name);
+
 	if (connector->get_hw_state(connector)) {
-		struct intel_encoder *encoder = connector->encoder;
-		struct drm_crtc *crtc;
-		bool encoder_enabled;
-		enum pipe pipe;
+		struct drm_encoder *encoder = &connector->encoder->base;
+		struct drm_connector_state *conn_state = connector->base.state;
 
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-			      connector->base.base.id,
-			      connector->base.name);
+		I915_STATE_WARN(!crtc,
+			 "connector enabled without attached crtc\n");
 
-		/* there is no real hw state for MST connectors */
-		if (connector->mst_port)
+		if (!crtc)
 			return;
 
-		I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
-		     "wrong connector dpms state\n");
-		I915_STATE_WARN(connector->base.encoder != &encoder->base,
-		     "active connector not linked to encoder\n");
+		I915_STATE_WARN(!crtc->state->active,
+		      "connector is active, but attached crtc isn't\n");
 
-		if (encoder) {
-			I915_STATE_WARN(!encoder->connectors_active,
-			     "encoder->connectors_active not set\n");
+		if (!encoder)
+			return;
 
-			encoder_enabled = encoder->get_hw_state(encoder, &pipe);
-			I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n");
-			if (I915_STATE_WARN_ON(!encoder->base.crtc))
-				return;
+		I915_STATE_WARN(conn_state->best_encoder != encoder,
+			"atomic encoder doesn't match attached encoder\n");
 
-			crtc = encoder->base.crtc;
-
-			I915_STATE_WARN(!crtc->state->enable,
-					"crtc not enabled\n");
-			I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
-			I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe,
-			     "encoder active on the wrong pipe\n");
-		}
+		I915_STATE_WARN(conn_state->crtc != encoder->crtc,
+			"attached encoder crtc differs from connector crtc\n");
+	} else {
+		I915_STATE_WARN(crtc && crtc->state->active,
+			"attached crtc is active, but connector isn't\n");
+		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+			"best encoder set without crtc!\n");
 	}
 }
 
@@ -6429,26 +6361,6 @@
 	return connector;
 }
 
-/* Even simpler default implementation, if there's really no special case to
- * consider. */
-void intel_connector_dpms(struct drm_connector *connector, int mode)
-{
-	/* All the simple cases only support two dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	if (connector->encoder)
-		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
-
-	intel_modeset_check_state(connector->dev);
-}
-
 /* Simple connector->get_hw_state implementation for encoders that support only
  * one connector and no cloning and hence the encoder state determines the state
  * of the connector. */
@@ -6586,12 +6498,36 @@
 	return ret;
 }
 
+static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
+				     struct intel_crtc_state *pipe_config)
+{
+	if (pipe_config->pipe_bpp > 24)
+		return false;
+
+	/* HSW can handle pixel rate up to cdclk? */
+	if (IS_HASWELL(dev_priv->dev))
+		return true;
+
+	/*
+	 * We compare against max which means we must take
+	 * the increased cdclk requirement into account when
+	 * calculating the new cdclk.
+	 *
+	 * Should measure whether using a lower cdclk w/o IPS
+	 */
+	return ilk_pipe_pixel_rate(pipe_config) <=
+		dev_priv->max_cdclk_freq * 95 / 100;
+}
+
 static void hsw_compute_ips_config(struct intel_crtc *crtc,
 				   struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
 	pipe_config->ips_enabled = i915.enable_ips &&
-				   hsw_crtc_supports_ips(crtc) &&
-				   pipe_config->pipe_bpp <= 24;
+		hsw_crtc_supports_ips(crtc) &&
+		pipe_config_supports_ips(dev_priv, pipe_config);
 }
 
 static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -6600,12 +6536,10 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int ret;
 
 	/* FIXME should check pixel clock limits on all platforms */
 	if (INTEL_INFO(dev)->gen < 4) {
-		int clock_limit =
-			dev_priv->display.get_display_clock_speed(dev);
+		int clock_limit = dev_priv->max_cdclk_freq;
 
 		/*
 		 * Enable pixel doubling when the dot clock
@@ -6647,14 +6581,7 @@
 	if (pipe_config->has_pch_encoder)
 		return ironlake_fdi_compute_config(crtc, pipe_config);
 
-	/* FIXME: remove below call once atomic mode set is place and all crtc
-	 * related checks called from atomic_crtc_check function */
-	ret = 0;
-	DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
-		crtc, pipe_config->base.state);
-	ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
-
-	return ret;
+	return 0;
 }
 
 static int skylake_get_display_clock_speed(struct drm_device *dev)
@@ -6664,10 +6591,8 @@
 	uint32_t cdctl = I915_READ(CDCLK_CTL);
 	uint32_t linkrate;
 
-	if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
-		WARN(1, "LCPLL1 not enabled\n");
+	if (!(lcpll1 & LCPLL_PLL_ENABLE))
 		return 24000; /* 24MHz is the cd freq with NSSC ref */
-	}
 
 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
 		return 540000;
@@ -6706,6 +6631,34 @@
 	return 24000;
 }
 
+static int broxton_get_display_clock_speed(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	uint32_t cdctl = I915_READ(CDCLK_CTL);
+	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
+	int cdclk;
+
+	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
+		return 19200;
+
+	cdclk = 19200 * pll_ratio / 2;
+
+	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+	case BXT_CDCLK_CD2X_DIV_SEL_1:
+		return cdclk;  /* 576MHz or 624MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+		return cdclk * 2 / 3; /* 384MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_2:
+		return cdclk / 2; /* 288MHz */
+	case BXT_CDCLK_CD2X_DIV_SEL_4:
+		return cdclk / 4; /* 144MHz */
+	}
+
+	/* error case, do as if DE PLL isn't enabled */
+	return 19200;
+}
+
 static int broadwell_get_display_clock_speed(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6834,20 +6787,37 @@
 	return 266667;
 }
 
-static int i855_get_display_clock_speed(struct drm_device *dev)
+static int i85x_get_display_clock_speed(struct drm_device *dev)
 {
 	u16 hpllcc = 0;
+
+	/*
+	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
+	 * encoding is different :(
+	 * FIXME is this the right way to detect 852GM/852GMV?
+	 */
+	if (dev->pdev->revision == 0x1)
+		return 133333;
+
+	pci_bus_read_config_word(dev->pdev->bus,
+				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
 	/* Assume that the hardware is in the high speed state.  This
 	 * should be the default.
 	 */
 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
 	case GC_CLOCK_133_200:
+	case GC_CLOCK_133_200_2:
 	case GC_CLOCK_100_200:
 		return 200000;
 	case GC_CLOCK_166_250:
 		return 250000;
 	case GC_CLOCK_100_133:
 		return 133333;
+	case GC_CLOCK_133_266:
+	case GC_CLOCK_133_266_2:
+	case GC_CLOCK_166_266:
+		return 266667;
 	}
 
 	/* Shouldn't happen */
@@ -6859,6 +6829,175 @@
 	return 133333;
 }
 
+static unsigned int intel_hpll_vco(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	static const unsigned int blb_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 6400000,
+	};
+	static const unsigned int pnv_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+		[4] = 2666667,
+	};
+	static const unsigned int cl_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 3333333,
+		[5] = 3566667,
+		[6] = 4266667,
+	};
+	static const unsigned int elk_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 4800000,
+	};
+	static const unsigned int ctg_vco[8] = {
+		[0] = 3200000,
+		[1] = 4000000,
+		[2] = 5333333,
+		[3] = 6400000,
+		[4] = 2666667,
+		[5] = 4266667,
+	};
+	const unsigned int *vco_table;
+	unsigned int vco;
+	uint8_t tmp = 0;
+
+	/* FIXME other chipsets? */
+	if (IS_GM45(dev))
+		vco_table = ctg_vco;
+	else if (IS_G4X(dev))
+		vco_table = elk_vco;
+	else if (IS_CRESTLINE(dev))
+		vco_table = cl_vco;
+	else if (IS_PINEVIEW(dev))
+		vco_table = pnv_vco;
+	else if (IS_G33(dev))
+		vco_table = blb_vco;
+	else
+		return 0;
+
+	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
+
+	vco = vco_table[tmp & 0x7];
+	if (vco == 0)
+		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+	else
+		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+	return vco;
+}
+
+static int gm45_get_display_clock_speed(struct drm_device *dev)
+{
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 12) & 0x1;
+
+	switch (vco) {
+	case 2666667:
+	case 4000000:
+	case 5333333:
+		return cdclk_sel ? 333333 : 222222;
+	case 3200000:
+		return cdclk_sel ? 320000 : 228571;
+	default:
+		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
+		return 222222;
+	}
+}
+
+static int i965gm_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 16, 10,  8 };
+	static const uint8_t div_4000[] = { 20, 12, 10 };
+	static const uint8_t div_5333[] = { 24, 16, 14 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
+	return 200000;
+}
+
+static int g33_get_display_clock_speed(struct drm_device *dev)
+{
+	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
+	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
+	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+	const uint8_t *div_table;
+	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
+	uint16_t tmp = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &tmp);
+
+	cdclk_sel = (tmp >> 4) & 0x7;
+
+	if (cdclk_sel >= ARRAY_SIZE(div_3200))
+		goto fail;
+
+	switch (vco) {
+	case 3200000:
+		div_table = div_3200;
+		break;
+	case 4000000:
+		div_table = div_4000;
+		break;
+	case 4800000:
+		div_table = div_4800;
+		break;
+	case 5333333:
+		div_table = div_5333;
+		break;
+	default:
+		goto fail;
+	}
+
+	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
+
+fail:
+	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
+	return 190476;
+}
+
 static void
 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
 {
@@ -7064,8 +7203,8 @@
 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
 }
 
-static void vlv_update_pll(struct intel_crtc *crtc,
-			   struct intel_crtc_state *pipe_config)
+static void vlv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
 {
 	u32 dpll, dpll_md;
 
@@ -7074,8 +7213,8 @@
 	 * clock for pipe B, since VGA hotplug / manual detection depends
 	 * on it.
 	 */
-	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
-		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
+		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
 	/* We should never disable this, set it here for state tracking */
 	if (crtc->pipe == PIPE_B)
 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7178,11 +7317,11 @@
 	mutex_unlock(&dev_priv->sb_lock);
 }
 
-static void chv_update_pll(struct intel_crtc *crtc,
-			   struct intel_crtc_state *pipe_config)
+static void chv_compute_dpll(struct intel_crtc *crtc,
+			     struct intel_crtc_state *pipe_config)
 {
-	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
-		DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
 		DPLL_VCO_ENABLE;
 	if (crtc->pipe != PIPE_A)
 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
@@ -7318,11 +7457,11 @@
 	};
 
 	if (IS_CHERRYVIEW(dev)) {
-		chv_update_pll(crtc, &pipe_config);
+		chv_compute_dpll(crtc, &pipe_config);
 		chv_prepare_pll(crtc, &pipe_config);
 		chv_enable_pll(crtc, &pipe_config);
 	} else {
-		vlv_update_pll(crtc, &pipe_config);
+		vlv_compute_dpll(crtc, &pipe_config);
 		vlv_prepare_pll(crtc, &pipe_config);
 		vlv_enable_pll(crtc, &pipe_config);
 	}
@@ -7344,10 +7483,10 @@
 		vlv_disable_pll(to_i915(dev), pipe);
 }
 
-static void i9xx_update_pll(struct intel_crtc *crtc,
-			    struct intel_crtc_state *crtc_state,
-			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+static void i9xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7421,10 +7560,10 @@
 	}
 }
 
-static void i8xx_update_pll(struct intel_crtc *crtc,
-			    struct intel_crtc_state *crtc_state,
-			    intel_clock_t *reduced_clock,
-			    int num_connectors)
+static void i8xx_compute_dpll(struct intel_crtc *crtc,
+			      struct intel_crtc_state *crtc_state,
+			      intel_clock_t *reduced_clock,
+			      int num_connectors)
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7584,9 +7723,14 @@
 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
 
 	mode->flags = pipe_config->base.adjusted_mode.flags;
+	mode->type = DRM_MODE_TYPE_DRIVER;
 
 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
 	mode->flags |= pipe_config->base.adjusted_mode.flags;
+
+	mode->hsync = drm_mode_hsync(mode);
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
 }
 
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
@@ -7658,9 +7802,9 @@
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int refclk, num_connectors = 0;
-	intel_clock_t clock, reduced_clock;
-	bool ok, has_reduced_clock = false;
-	bool is_lvds = false, is_dsi = false;
+	intel_clock_t clock;
+	bool ok;
+	bool is_dsi = false;
 	struct intel_encoder *encoder;
 	const intel_limit_t *limit;
 	struct drm_atomic_state *state = crtc_state->base.state;
@@ -7678,9 +7822,6 @@
 		encoder = to_intel_encoder(connector_state->best_encoder);
 
 		switch (encoder->type) {
-		case INTEL_OUTPUT_LVDS:
-			is_lvds = true;
-			break;
 		case INTEL_OUTPUT_DSI:
 			is_dsi = true;
 			break;
@@ -7712,19 +7853,6 @@
 			return -EINVAL;
 		}
 
-		if (is_lvds && dev_priv->lvds_downclock_avail) {
-			/*
-			 * Ensure we match the reduced clock's P to the target
-			 * clock.  If the clocks don't match, we can't switch
-			 * the display clock by using the FP0/FP1. In such case
-			 * we will disable the LVDS downclock feature.
-			 */
-			has_reduced_clock =
-				dev_priv->display.find_dpll(limit, crtc_state,
-							    dev_priv->lvds_downclock,
-							    refclk, &clock,
-							    &reduced_clock);
-		}
 		/* Compat-code for transition, will disappear. */
 		crtc_state->dpll.n = clock.n;
 		crtc_state->dpll.m1 = clock.m1;
@@ -7734,17 +7862,15 @@
 	}
 
 	if (IS_GEN2(dev)) {
-		i8xx_update_pll(crtc, crtc_state,
-				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+		i8xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
 	} else if (IS_CHERRYVIEW(dev)) {
-		chv_update_pll(crtc, crtc_state);
+		chv_compute_dpll(crtc, crtc_state);
 	} else if (IS_VALLEYVIEW(dev)) {
-		vlv_update_pll(crtc, crtc_state);
+		vlv_compute_dpll(crtc, crtc_state);
 	} else {
-		i9xx_update_pll(crtc, crtc_state,
-				has_reduced_clock ? &reduced_clock : NULL,
-				num_connectors);
+		i9xx_compute_dpll(crtc, crtc_state, NULL,
+				  num_connectors);
 	}
 
 	return 0;
@@ -7804,10 +7930,7 @@
 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
 
-	vlv_clock(refclk, &clock);
-
-	/* clock.dot is the fast clock */
-	pipe_config->port_clock = clock.dot / 5;
+	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
 }
 
 static void
@@ -7906,10 +8029,7 @@
 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
 
-	chv_clock(refclk, &clock);
-
-	/* clock.dot is the fast clock */
-	pipe_config->port_clock = clock.dot / 5;
+	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
 }
 
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
@@ -8558,9 +8678,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int refclk;
 	const intel_limit_t *limit;
-	bool ret, is_lvds = false;
-
-	is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+	bool ret;
 
 	refclk = ironlake_get_refclk(crtc_state);
 
@@ -8576,20 +8694,6 @@
 	if (!ret)
 		return false;
 
-	if (is_lvds && dev_priv->lvds_downclock_avail) {
-		/*
-		 * Ensure we match the reduced clock's P to the target clock.
-		 * If the clocks don't match, we can't switch the display clock
-		 * by using the FP0/FP1. In such case we will disable the LVDS
-		 * downclock feature.
-		*/
-		*has_reduced_clock =
-			dev_priv->display.find_dpll(limit, crtc_state,
-						    dev_priv->lvds_downclock,
-						    refclk, clock,
-						    reduced_clock);
-	}
-
 	return true;
 }
 
@@ -9297,6 +9401,7 @@
 	}
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+	intel_update_cdclk(dev_priv->dev);
 }
 
 /*
@@ -9358,21 +9463,160 @@
 	intel_prepare_ddi(dev);
 }
 
-static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
 	struct drm_device *dev = old_state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int max_pixclk = intel_mode_max_pixclk(dev, NULL);
-	int req_cdclk;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
 
-	/* see the comment in valleyview_modeset_global_resources */
-	if (WARN_ON(max_pixclk < 0))
+	broxton_set_cdclk(dev, req_cdclk);
+}
+
+/* compute the max rate for new configuration */
+static int ilk_max_pixel_rate(struct drm_atomic_state *state)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_crtc_state *crtc_state;
+	int max_pixel_rate = 0;
+
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		int pixel_rate;
+
+		crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->base.enable)
+			continue;
+
+		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
+
+		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+		if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+		max_pixel_rate = max(max_pixel_rate, pixel_rate);
+	}
+
+	return max_pixel_rate;
+}
+
+static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val, data;
+	int ret;
+
+	if (WARN((I915_READ(LCPLL_CTL) &
+		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+		 "trying to change cdclk frequency with cdclk not enabled\n"))
 		return;
 
-	req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = sandybridge_pcode_write(dev_priv,
+				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+	if (ret) {
+		DRM_ERROR("failed to inform pcode about cdclk change\n");
+		return;
+	}
 
-	if (req_cdclk != dev_priv->cdclk_freq)
-		broxton_set_cdclk(dev, req_cdclk);
+	val = I915_READ(LCPLL_CTL);
+	val |= LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+			       LCPLL_CD_SOURCE_FCLK_DONE, 1))
+		DRM_ERROR("Switching to FCLK failed\n");
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CLK_FREQ_MASK;
+
+	switch (cdclk) {
+	case 450000:
+		val |= LCPLL_CLK_FREQ_450;
+		data = 0;
+		break;
+	case 540000:
+		val |= LCPLL_CLK_FREQ_54O_BDW;
+		data = 1;
+		break;
+	case 337500:
+		val |= LCPLL_CLK_FREQ_337_5_BDW;
+		data = 2;
+		break;
+	case 675000:
+		val |= LCPLL_CLK_FREQ_675_BDW;
+		data = 3;
+		break;
+	default:
+		WARN(1, "invalid cdclk frequency\n");
+		return;
+	}
+
+	I915_WRITE(LCPLL_CTL, val);
+
+	val = I915_READ(LCPLL_CTL);
+	val &= ~LCPLL_CD_SOURCE_FCLK;
+	I915_WRITE(LCPLL_CTL, val);
+
+	if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+				LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+		DRM_ERROR("Switching back to LCPLL failed\n");
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	intel_update_cdclk(dev);
+
+	WARN(cdclk != dev_priv->cdclk_freq,
+	     "cdclk requested %d kHz but got %d kHz\n",
+	     cdclk, dev_priv->cdclk_freq);
+}
+
+static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->dev);
+	int max_pixclk = ilk_max_pixel_rate(state);
+	int cdclk;
+
+	/*
+	 * FIXME should also account for plane ratio
+	 * once 64bpp pixel formats are supported.
+	 */
+	if (max_pixclk > 540000)
+		cdclk = 675000;
+	else if (max_pixclk > 450000)
+		cdclk = 540000;
+	else if (max_pixclk > 337500)
+		cdclk = 450000;
+	else
+		cdclk = 337500;
+
+	/*
+	 * FIXME move the cdclk caclulation to
+	 * compute_config() so we can fail gracegully.
+	 */
+	if (cdclk > dev_priv->max_cdclk_freq) {
+		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+			  cdclk, dev_priv->max_cdclk_freq);
+		cdclk = dev_priv->max_cdclk_freq;
+	}
+
+	to_intel_atomic_state(state)->cdclk = cdclk;
+
+	return 0;
+}
+
+static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+	struct drm_device *dev = old_state->dev;
+	unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+
+	broadwell_set_cdclk(dev, req_cdclk);
 }
 
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
@@ -9889,7 +10133,7 @@
 mode_fits_in_fbdev(struct drm_device *dev,
 		   struct drm_display_mode *mode)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 	struct drm_framebuffer *fb;
@@ -9978,7 +10222,7 @@
 retry:
 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
 	if (ret)
-		goto fail_unlock;
+		goto fail;
 
 	/*
 	 * Algorithm gets a little messy:
@@ -9996,10 +10240,10 @@
 
 		ret = drm_modeset_lock(&crtc->mutex, ctx);
 		if (ret)
-			goto fail_unlock;
+			goto fail;
 		ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 		if (ret)
-			goto fail_unlock;
+			goto fail;
 
 		old->dpms_mode = connector->dpms;
 		old->load_detect_temp = false;
@@ -10018,9 +10262,6 @@
 			continue;
 		if (possible_crtc->state->enable)
 			continue;
-		/* This can occur when applying the pipe A quirk on resume. */
-		if (to_intel_crtc(possible_crtc)->new_enabled)
-			continue;
 
 		crtc = possible_crtc;
 		break;
@@ -10031,20 +10272,17 @@
 	 */
 	if (!crtc) {
 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
-		goto fail_unlock;
+		goto fail;
 	}
 
 	ret = drm_modeset_lock(&crtc->mutex, ctx);
 	if (ret)
-		goto fail_unlock;
+		goto fail;
 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
 	if (ret)
-		goto fail_unlock;
-	intel_encoder->new_crtc = to_intel_crtc(crtc);
-	to_intel_connector(connector)->new_encoder = intel_encoder;
+		goto fail;
 
 	intel_crtc = to_intel_crtc(crtc);
-	intel_crtc->new_enabled = true;
 	old->dpms_mode = connector->dpms;
 	old->load_detect_temp = true;
 	old->release_fb = NULL;
@@ -10100,7 +10338,7 @@
 
 	drm_mode_copy(&crtc_state->base.mode, mode);
 
-	if (intel_set_mode(crtc, state, true)) {
+	if (drm_atomic_commit(state)) {
 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
 		if (old->release_fb)
 			old->release_fb->funcs->destroy(old->release_fb);
@@ -10112,9 +10350,7 @@
 	intel_wait_for_vblank(dev, intel_crtc->pipe);
 	return true;
 
- fail:
-	intel_crtc->new_enabled = crtc->state->enable;
-fail_unlock:
+fail:
 	drm_atomic_state_free(state);
 	state = NULL;
 
@@ -10160,10 +10396,6 @@
 		if (IS_ERR(crtc_state))
 			goto fail;
 
-		to_intel_connector(connector)->new_encoder = NULL;
-		intel_encoder->new_crtc = NULL;
-		intel_crtc->new_enabled = false;
-
 		connector_state->best_encoder = NULL;
 		connector_state->crtc = NULL;
 
@@ -10174,7 +10406,7 @@
 		if (ret)
 			goto fail;
 
-		ret = intel_set_mode(crtc, state, true);
+		ret = drm_atomic_commit(state);
 		if (ret)
 			goto fail;
 
@@ -10222,6 +10454,7 @@
 	u32 dpll = pipe_config->dpll_hw_state.dpll;
 	u32 fp;
 	intel_clock_t clock;
+	int port_clock;
 	int refclk = i9xx_pll_refclk(dev, pipe_config);
 
 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
@@ -10262,9 +10495,9 @@
 		}
 
 		if (IS_PINEVIEW(dev))
-			pineview_clock(refclk, &clock);
+			port_clock = pnv_calc_dpll_params(refclk, &clock);
 		else
-			i9xx_clock(refclk, &clock);
+			port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	} else {
 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
@@ -10290,7 +10523,7 @@
 				clock.p2 = 2;
 		}
 
-		i9xx_clock(refclk, &clock);
+		port_clock = i9xx_calc_dpll_params(refclk, &clock);
 	}
 
 	/*
@@ -10298,7 +10531,7 @@
 	 * port_clock to compute adjusted_mode.crtc_clock in the
 	 * encoder's get_config() function.
 	 */
-	pipe_config->port_clock = clock.dot;
+	pipe_config->port_clock = port_clock;
 }
 
 int intel_dotclock_calculate(int link_freq,
@@ -10387,42 +10620,6 @@
 	return mode;
 }
 
-static void intel_decrease_pllclock(struct drm_crtc *crtc)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	if (!HAS_GMCH_DISPLAY(dev))
-		return;
-
-	if (!dev_priv->lvds_downclock_avail)
-		return;
-
-	/*
-	 * Since this is called by a timer, we should never get here in
-	 * the manual case.
-	 */
-	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
-		int pipe = intel_crtc->pipe;
-		int dpll_reg = DPLL(pipe);
-		int dpll;
-
-		DRM_DEBUG_DRIVER("downclocking LVDS\n");
-
-		assert_panel_unlocked(dev_priv, pipe);
-
-		dpll = I915_READ(dpll_reg);
-		dpll |= DISPLAY_RATE_SELECT_FPA1;
-		I915_WRITE(dpll_reg, dpll);
-		intel_wait_for_vblank(dev, pipe);
-		dpll = I915_READ(dpll_reg);
-		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
-			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-	}
-
-}
-
 void intel_mark_busy(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10440,20 +10637,12 @@
 void intel_mark_idle(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_crtc *crtc;
 
 	if (!dev_priv->mm.busy)
 		return;
 
 	dev_priv->mm.busy = false;
 
-	for_each_crtc(dev, crtc) {
-		if (!crtc->primary->fb)
-			continue;
-
-		intel_decrease_pllclock(crtc);
-	}
-
 	if (INTEL_INFO(dev)->gen >= 6)
 		gen6_rps_idle(dev->dev_private);
 
@@ -10485,24 +10674,23 @@
 {
 	struct intel_unpin_work *work =
 		container_of(__work, struct intel_unpin_work, work);
-	struct drm_device *dev = work->crtc->dev;
-	enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
+	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_plane *primary = crtc->base.primary;
 
 	mutex_lock(&dev->struct_mutex);
-	intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state);
+	intel_unpin_fb_obj(work->old_fb, primary->state);
 	drm_gem_object_unreference(&work->pending_flip_obj->base);
 
-	intel_fbc_update(dev);
-
 	if (work->flip_queued_req)
 		i915_gem_request_assign(&work->flip_queued_req, NULL);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
 	drm_framebuffer_unreference(work->old_fb);
 
-	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
-	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
+	atomic_dec(&crtc->unpin_work_count);
 
 	kfree(work);
 }
@@ -10635,14 +10823,15 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -10662,7 +10851,6 @@
 	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10670,14 +10858,15 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -10694,7 +10883,6 @@
 	intel_ring_emit(ring, MI_NOOP);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10702,15 +10890,16 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -10733,7 +10922,6 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10741,15 +10929,16 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -10769,7 +10958,6 @@
 	intel_ring_emit(ring, pf | pipesrc);
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10777,9 +10965,10 @@
 				 struct drm_crtc *crtc,
 				 struct drm_framebuffer *fb,
 				 struct drm_i915_gem_object *obj,
-				 struct intel_engine_cs *ring,
+				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -10821,11 +11010,11 @@
 	 * then do the cacheline alignment, and finally emit the
 	 * MI_DISPLAY_FLIP.
 	 */
-	ret = intel_ring_cacheline_align(ring);
+	ret = intel_ring_cacheline_align(req);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, len);
+	ret = intel_ring_begin(req, len);
 	if (ret)
 		return ret;
 
@@ -10864,7 +11053,6 @@
 	intel_ring_emit(ring, (MI_NOOP));
 
 	intel_mark_page_flip_active(intel_crtc);
-	__intel_ring_advance(ring);
 	return 0;
 }
 
@@ -10973,12 +11161,11 @@
 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 {
 	struct drm_device *dev = intel_crtc->base.dev;
-	bool atomic_update;
 	u32 start_vbl_count;
 
 	intel_mark_page_flip_active(intel_crtc);
 
-	atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
+	intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
 	if (INTEL_INFO(dev)->gen >= 9)
 		skl_do_mmio_flip(intel_crtc);
@@ -10986,8 +11173,7 @@
 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
 		ilk_do_mmio_flip(intel_crtc);
 
-	if (atomic_update)
-		intel_pipe_update_end(intel_crtc, start_vbl_count);
+	intel_pipe_update_end(intel_crtc, start_vbl_count);
 }
 
 static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11034,7 +11220,7 @@
 				    struct drm_crtc *crtc,
 				    struct drm_framebuffer *fb,
 				    struct drm_i915_gem_object *obj,
-				    struct intel_engine_cs *ring,
+				    struct drm_i915_gem_request *req,
 				    uint32_t flags)
 {
 	return -ENODEV;
@@ -11120,6 +11306,7 @@
 	struct intel_unpin_work *work;
 	struct intel_engine_cs *ring;
 	bool mmio_flip;
+	struct drm_i915_gem_request *request = NULL;
 	int ret;
 
 	/*
@@ -11226,7 +11413,7 @@
 	 */
 	ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
 					 crtc->primary->state,
-					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
+					 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request);
 	if (ret)
 		goto cleanup_pending;
 
@@ -11242,31 +11429,34 @@
 		i915_gem_request_assign(&work->flip_queued_req,
 					obj->last_write_req);
 	} else {
-		if (obj->last_write_req) {
-			ret = i915_gem_check_olr(obj->last_write_req);
+		if (!request) {
+			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
 			if (ret)
 				goto cleanup_unpin;
 		}
 
-		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 						   page_flip_flags);
 		if (ret)
 			goto cleanup_unpin;
 
-		i915_gem_request_assign(&work->flip_queued_req,
-					intel_ring_get_request(ring));
+		i915_gem_request_assign(&work->flip_queued_req, request);
 	}
 
+	if (request)
+		i915_add_request_no_flush(request);
+
 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
 	work->enable_stall_check = true;
 
 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
-			  INTEL_FRONTBUFFER_PRIMARY(pipe));
-
-	intel_fbc_disable(dev);
-	intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
+	intel_fbc_disable_crtc(intel_crtc);
+	intel_frontbuffer_flip_prepare(dev,
+				       to_intel_plane(primary)->frontbuffer_bit);
+
 	trace_i915_flip_request(intel_crtc->plane, obj);
 
 	return 0;
@@ -11274,6 +11464,8 @@
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state);
 cleanup_pending:
+	if (request)
+		i915_gem_request_cancel(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
@@ -11292,8 +11484,35 @@
 	kfree(work);
 
 	if (ret == -EIO) {
+		struct drm_atomic_state *state;
+		struct drm_plane_state *plane_state;
+
 out_hang:
-		ret = intel_plane_restore(primary);
+		state = drm_atomic_state_alloc(dev);
+		if (!state)
+			return -ENOMEM;
+		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
+
+retry:
+		plane_state = drm_atomic_get_plane_state(state, primary);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			drm_atomic_set_fb_for_plane(plane_state, fb);
+
+			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+			if (!ret)
+				ret = drm_atomic_commit(state);
+		}
+
+		if (ret == -EDEADLK) {
+			drm_modeset_backoff(state->acquire_ctx);
+			drm_atomic_state_clear(state);
+			goto retry;
+		}
+
+		if (ret)
+			drm_atomic_state_free(state);
+
 		if (ret == 0 && event) {
 			spin_lock_irq(&dev->event_lock);
 			drm_send_vblank_event(dev, pipe, event);
@@ -11303,44 +11522,274 @@
 	return ret;
 }
 
+
+/**
+ * intel_wm_need_update - Check whether watermarks need updating
+ * @plane: drm plane
+ * @state: new plane state
+ *
+ * Check current plane state versus the new one to determine whether
+ * watermarks need to be recalculated.
+ *
+ * Returns true or false.
+ */
+static bool intel_wm_need_update(struct drm_plane *plane,
+				 struct drm_plane_state *state)
+{
+	/* Update watermarks on tiling changes. */
+	if (!plane->state->fb || !state->fb ||
+	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
+	    plane->state->rotation != state->rotation)
+		return true;
+
+	if (plane->state->crtc_w != state->crtc_w)
+		return true;
+
+	return false;
+}
+
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state)
+{
+	struct drm_crtc *crtc = crtc_state->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_plane *plane = plane_state->plane;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane_state *old_plane_state =
+		to_intel_plane_state(plane->state);
+	int idx = intel_crtc->base.base.id, ret;
+	int i = drm_plane_index(plane);
+	bool mode_changed = needs_modeset(crtc_state);
+	bool was_crtc_enabled = crtc->state->active;
+	bool is_crtc_enabled = crtc_state->active;
+
+	bool turn_off, turn_on, visible, was_visible;
+	struct drm_framebuffer *fb = plane_state->fb;
+
+	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
+	    plane->type != DRM_PLANE_TYPE_CURSOR) {
+		ret = skl_update_scaler_plane(
+			to_intel_crtc_state(crtc_state),
+			to_intel_plane_state(plane_state));
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * Disabling a plane is always okay; we just need to update
+	 * fb tracking in a special way since cleanup_fb() won't
+	 * get called by the plane helpers.
+	 */
+	if (old_plane_state->base.fb && !fb)
+		intel_crtc->atomic.disabled_planes |= 1 << i;
+
+	was_visible = old_plane_state->visible;
+	visible = to_intel_plane_state(plane_state)->visible;
+
+	if (!was_crtc_enabled && WARN_ON(was_visible))
+		was_visible = false;
+
+	if (!is_crtc_enabled && WARN_ON(visible))
+		visible = false;
+
+	if (!was_visible && !visible)
+		return 0;
+
+	turn_off = was_visible && (!visible || mode_changed);
+	turn_on = visible && (!was_visible || mode_changed);
+
+	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
+			 plane->base.id, fb ? fb->base.id : -1);
+
+	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
+			 plane->base.id, was_visible, visible,
+			 turn_off, turn_on, mode_changed);
+
+	if (turn_on) {
+		intel_crtc->atomic.update_wm_pre = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			intel_crtc->atomic.disable_cxsr = true;
+			/* to potentially re-enable cxsr */
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_wm_post = true;
+		}
+	} else if (turn_off) {
+		intel_crtc->atomic.update_wm_post = true;
+		/* must disable cxsr around plane enable/disable */
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			if (is_crtc_enabled)
+				intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.disable_cxsr = true;
+		}
+	} else if (intel_wm_need_update(plane, plane_state)) {
+		intel_crtc->atomic.update_wm_pre = true;
+	}
+
+	if (visible)
+		intel_crtc->atomic.fb_bits |=
+			to_intel_plane(plane)->frontbuffer_bit;
+
+	switch (plane->type) {
+	case DRM_PLANE_TYPE_PRIMARY:
+		intel_crtc->atomic.wait_for_flips = true;
+		intel_crtc->atomic.pre_disable_primary = turn_off;
+		intel_crtc->atomic.post_enable_primary = turn_on;
+
+		if (turn_off) {
+			/*
+			 * FIXME: Actually if we will still have any other
+			 * plane enabled on the pipe we could let IPS enabled
+			 * still, but for now lets consider that when we make
+			 * primary invisible by setting DSPCNTR to 0 on
+			 * update_primary_plane function IPS needs to be
+			 * disable.
+			 */
+			intel_crtc->atomic.disable_ips = true;
+
+			intel_crtc->atomic.disable_fbc = true;
+		}
+
+		/*
+		 * FBC does not work on some platforms for rotated
+		 * planes, so disable it when rotation is not 0 and
+		 * update it when rotation is set back to 0.
+		 *
+		 * FIXME: This is redundant with the fbc update done in
+		 * the primary plane enable function except that that
+		 * one is done too late. We eventually need to unify
+		 * this.
+		 */
+
+		if (visible &&
+		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+		    dev_priv->fbc.crtc == intel_crtc &&
+		    plane_state->rotation != BIT(DRM_ROTATE_0))
+			intel_crtc->atomic.disable_fbc = true;
+
+		/*
+		 * BDW signals flip done immediately if the plane
+		 * is disabled, even if the plane enable is already
+		 * armed to occur at the next vblank :(
+		 */
+		if (turn_on && IS_BROADWELL(dev))
+			intel_crtc->atomic.wait_vblank = true;
+
+		intel_crtc->atomic.update_fbc |= visible || mode_changed;
+		break;
+	case DRM_PLANE_TYPE_CURSOR:
+		break;
+	case DRM_PLANE_TYPE_OVERLAY:
+		if (turn_off && !mode_changed) {
+			intel_crtc->atomic.wait_vblank = true;
+			intel_crtc->atomic.update_sprite_watermarks |=
+				1 << i;
+		}
+	}
+	return 0;
+}
+
+static bool encoders_cloneable(const struct intel_encoder *a,
+			       const struct intel_encoder *b)
+{
+	/* masks could be asymmetric, so check both ways */
+	return a == b || (a->cloneable & (1 << b->type) &&
+			  b->cloneable & (1 << a->type));
+}
+
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+					 struct intel_crtc *crtc,
+					 struct intel_encoder *encoder)
+{
+	struct intel_encoder *source_encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		source_encoder =
+			to_intel_encoder(connector_state->best_encoder);
+		if (!encoders_cloneable(encoder, source_encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+				  struct intel_crtc *crtc)
+{
+	struct intel_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_connector_state *connector_state;
+	int i;
+
+	for_each_connector_in_state(state, connector, connector_state, i) {
+		if (connector_state->crtc != &crtc->base)
+			continue;
+
+		encoder = to_intel_encoder(connector_state->best_encoder);
+		if (!check_single_encoder_cloning(state, crtc, encoder))
+			return false;
+	}
+
+	return true;
+}
+
+static int intel_crtc_atomic_check(struct drm_crtc *crtc,
+				   struct drm_crtc_state *crtc_state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_crtc_state *pipe_config =
+		to_intel_crtc_state(crtc_state);
+	struct drm_atomic_state *state = crtc_state->state;
+	int ret;
+	bool mode_changed = needs_modeset(crtc_state);
+
+	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
+		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+		return -EINVAL;
+	}
+
+	if (mode_changed && !crtc_state->active)
+		intel_crtc->atomic.update_wm_post = true;
+
+	if (mode_changed && crtc_state->enable &&
+	    dev_priv->display.crtc_compute_clock &&
+	    !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
+							   pipe_config);
+		if (ret)
+			return ret;
+	}
+
+	ret = 0;
+	if (INTEL_INFO(dev)->gen >= 9) {
+		if (mode_changed)
+			ret = skl_update_scaler_crtc(pipe_config);
+
+		if (!ret)
+			ret = intel_atomic_setup_scalers(dev, intel_crtc,
+							 pipe_config);
+	}
+
+	return ret;
+}
+
 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
 	.load_lut = intel_crtc_load_lut,
 	.atomic_begin = intel_begin_crtc_commit,
 	.atomic_flush = intel_finish_crtc_commit,
+	.atomic_check = intel_crtc_atomic_check,
 };
 
-/**
- * intel_modeset_update_staged_output_state
- *
- * Updates the staged output configuration state, e.g. after we've read out the
- * current hw state.
- */
-static void intel_modeset_update_staged_output_state(struct drm_device *dev)
-{
-	struct intel_crtc *crtc;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-
-	for_each_intel_connector(dev, connector) {
-		connector->new_encoder =
-			to_intel_encoder(connector->base.encoder);
-	}
-
-	for_each_intel_encoder(dev, encoder) {
-		encoder->new_crtc =
-			to_intel_crtc(encoder->base.crtc);
-	}
-
-	for_each_intel_crtc(dev, crtc) {
-		crtc->new_enabled = crtc->base.state->enable;
-	}
-}
-
-/* Transitional helper to copy current connector/encoder state to
- * connector->state. This is needed so that code that is partially
- * converted to atomic does the right thing.
- */
 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
 {
 	struct intel_connector *connector;
@@ -11358,39 +11807,6 @@
 	}
 }
 
-/* Fixup legacy state after an atomic state swap.
- */
-static void intel_modeset_fixup_state(struct drm_atomic_state *state)
-{
-	struct intel_crtc *crtc;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-
-	for_each_intel_connector(state->dev, connector) {
-		connector->base.encoder = connector->base.state->best_encoder;
-		if (connector->base.encoder)
-			connector->base.encoder->crtc =
-				connector->base.state->crtc;
-	}
-
-	/* Update crtc of disabled encoders */
-	for_each_intel_encoder(state->dev, encoder) {
-		int num_connectors = 0;
-
-		for_each_intel_connector(state->dev, connector)
-			if (connector->base.encoder == &encoder->base)
-				num_connectors++;
-
-		if (num_connectors == 0)
-			encoder->base.crtc = NULL;
-	}
-
-	for_each_intel_crtc(state->dev, crtc) {
-		crtc->base.enabled = crtc->base.state->enable;
-		crtc->config = to_intel_crtc_state(crtc->base.state);
-	}
-}
-
 static void
 connected_sink_compute_bpp(struct intel_connector *connector,
 			   struct intel_crtc_state *pipe_config)
@@ -11526,17 +11942,20 @@
 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 
 	if (IS_BROXTON(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
-			      "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
 			      pipe_config->ddi_pll_sel,
 			      pipe_config->dpll_hw_state.ebb0,
+			      pipe_config->dpll_hw_state.ebb4,
 			      pipe_config->dpll_hw_state.pll0,
 			      pipe_config->dpll_hw_state.pll1,
 			      pipe_config->dpll_hw_state.pll2,
 			      pipe_config->dpll_hw_state.pll3,
 			      pipe_config->dpll_hw_state.pll6,
 			      pipe_config->dpll_hw_state.pll8,
+			      pipe_config->dpll_hw_state.pll9,
+			      pipe_config->dpll_hw_state.pll10,
 			      pipe_config->dpll_hw_state.pcsdw12);
 	} else if (IS_SKYLAKE(dev)) {
 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
@@ -11593,56 +12012,6 @@
 	}
 }
 
-static bool encoders_cloneable(const struct intel_encoder *a,
-			       const struct intel_encoder *b)
-{
-	/* masks could be asymmetric, so check both ways */
-	return a == b || (a->cloneable & (1 << b->type) &&
-			  b->cloneable & (1 << a->type));
-}
-
-static bool check_single_encoder_cloning(struct drm_atomic_state *state,
-					 struct intel_crtc *crtc,
-					 struct intel_encoder *encoder)
-{
-	struct intel_encoder *source_encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		source_encoder =
-			to_intel_encoder(connector_state->best_encoder);
-		if (!encoders_cloneable(encoder, source_encoder))
-			return false;
-	}
-
-	return true;
-}
-
-static bool check_encoder_cloning(struct drm_atomic_state *state,
-				  struct intel_crtc *crtc)
-{
-	struct intel_encoder *encoder;
-	struct drm_connector *connector;
-	struct drm_connector_state *connector_state;
-	int i;
-
-	for_each_connector_in_state(state, connector, connector_state, i) {
-		if (connector_state->crtc != &crtc->base)
-			continue;
-
-		encoder = to_intel_encoder(connector_state->best_encoder);
-		if (!check_single_encoder_cloning(state, crtc, encoder))
-			return false;
-	}
-
-	return true;
-}
-
 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
@@ -11696,6 +12065,7 @@
 	struct intel_dpll_hw_state dpll_hw_state;
 	enum intel_dpll_id shared_dpll;
 	uint32_t ddi_pll_sel;
+	bool force_thru;
 
 	/* FIXME: before the switch to atomic started, a new pipe_config was
 	 * kzalloc'd. Code that depends on any field being zero should be
@@ -11707,6 +12077,7 @@
 	shared_dpll = crtc_state->shared_dpll;
 	dpll_hw_state = crtc_state->dpll_hw_state;
 	ddi_pll_sel = crtc_state->ddi_pll_sel;
+	force_thru = crtc_state->pch_pfit.force_thru;
 
 	memset(crtc_state, 0, sizeof *crtc_state);
 
@@ -11715,13 +12086,14 @@
 	crtc_state->shared_dpll = shared_dpll;
 	crtc_state->dpll_hw_state = dpll_hw_state;
 	crtc_state->ddi_pll_sel = ddi_pll_sel;
+	crtc_state->pch_pfit.force_thru = force_thru;
 }
 
 static int
 intel_modeset_pipe_config(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
 			  struct intel_crtc_state *pipe_config)
 {
+	struct drm_atomic_state *state = pipe_config->base.state;
 	struct intel_encoder *encoder;
 	struct drm_connector *connector;
 	struct drm_connector_state *connector_state;
@@ -11729,16 +12101,6 @@
 	int i;
 	bool retry = true;
 
-	if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
-		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-		return -EINVAL;
-	}
-
-	if (!check_digital_port_conflicts(state)) {
-		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
-		return -EINVAL;
-	}
-
 	clear_intel_crtc_state(pipe_config);
 
 	pipe_config->cpu_transcoder =
@@ -11832,90 +12194,27 @@
 	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
-	return 0;
 fail:
 	return ret;
 }
 
-static bool intel_crtc_in_use(struct drm_crtc *crtc)
-{
-	struct drm_encoder *encoder;
-	struct drm_device *dev = crtc->dev;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-		if (encoder->crtc == crtc)
-			return true;
-
-	return false;
-}
-
-static bool
-needs_modeset(struct drm_crtc_state *state)
-{
-	return state->mode_changed || state->active_changed;
-}
-
 static void
-intel_modeset_update_state(struct drm_atomic_state *state)
+intel_modeset_update_crtc_state(struct drm_atomic_state *state)
 {
-	struct drm_device *dev = state->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_encoder *intel_encoder;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	struct drm_connector *connector;
 	int i;
 
-	intel_shared_dpll_commit(dev_priv);
-
-	for_each_intel_encoder(dev, intel_encoder) {
-		if (!intel_encoder->base.crtc)
-			continue;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			if (crtc != intel_encoder->base.crtc)
-				continue;
-
-			if (crtc_state->enable && needs_modeset(crtc_state))
-				intel_encoder->connectors_active = false;
-
-			break;
-		}
-	}
-
-	drm_atomic_helper_swap_state(state->dev, state);
-	intel_modeset_fixup_state(state);
-
 	/* Double check state. */
-	for_each_crtc(dev, crtc) {
-		WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
+
+		/* Update hwmode for vblank functions */
+		if (crtc->state->active)
+			crtc->hwmode = crtc->state->adjusted_mode;
+		else
+			crtc->hwmode.crtc_clock = 0;
 	}
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		if (!connector->encoder || !connector->encoder->crtc)
-			continue;
-
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			if (crtc != connector->encoder->crtc)
-				continue;
-
-			if (crtc->state->enable && needs_modeset(crtc->state)) {
-				struct drm_property *dpms_property =
-					dev->mode_config.dpms_property;
-
-				connector->dpms = DRM_MODE_DPMS_ON;
-				drm_object_property_set_value(&connector->base,
-								 dpms_property,
-								 DRM_MODE_DPMS_ON);
-
-				intel_encoder = to_intel_encoder(connector->encoder);
-				intel_encoder->connectors_active = true;
-			}
-
-			break;
-		}
-	}
-
 }
 
 static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11942,27 +12241,133 @@
 			    base.head) \
 		if (mask & (1 <<(intel_crtc)->pipe))
 
+
+static bool
+intel_compare_m_n(unsigned int m, unsigned int n,
+		  unsigned int m2, unsigned int n2,
+		  bool exact)
+{
+	if (m == m2 && n == n2)
+		return true;
+
+	if (exact || !m || !n || !m2 || !n2)
+		return false;
+
+	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
+
+	if (m > m2) {
+		while (m > m2) {
+			m2 <<= 1;
+			n2 <<= 1;
+		}
+	} else if (m < m2) {
+		while (m < m2) {
+			m <<= 1;
+			n <<= 1;
+		}
+	}
+
+	return m == m2 && n == n2;
+}
+
+static bool
+intel_compare_link_m_n(const struct intel_link_m_n *m_n,
+		       struct intel_link_m_n *m2_n2,
+		       bool adjust)
+{
+	if (m_n->tu == m2_n2->tu &&
+	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
+	    intel_compare_m_n(m_n->link_m, m_n->link_n,
+			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
+		if (adjust)
+			*m2_n2 = *m_n;
+
+		return true;
+	}
+
+	return false;
+}
+
 static bool
 intel_pipe_config_compare(struct drm_device *dev,
 			  struct intel_crtc_state *current_config,
-			  struct intel_crtc_state *pipe_config)
+			  struct intel_crtc_state *pipe_config,
+			  bool adjust)
 {
+	bool ret = true;
+
+#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
+	do { \
+		if (!adjust) \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else \
+			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
+	} while (0)
+
 #define PIPE_CONF_CHECK_X(name)	\
 	if (current_config->name != pipe_config->name) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected 0x%08x, found 0x%08x)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_I(name)	\
 	if (current_config->name != pipe_config->name) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N(name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name,\
+				    adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
+	}
+
+#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
+	if (!intel_compare_link_m_n(&current_config->name, \
+				    &pipe_config->name, adjust) && \
+	    !intel_compare_link_m_n(&current_config->alt_name, \
+				    &pipe_config->name, adjust)) { \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+			  "(expected tu %i gmch %i/%i link %i/%i, " \
+			  "or tu %i gmch %i/%i link %i/%i, " \
+			  "found tu %i, gmch %i/%i link %i/%i)\n", \
+			  current_config->name.tu, \
+			  current_config->name.gmch_m, \
+			  current_config->name.gmch_n, \
+			  current_config->name.link_m, \
+			  current_config->name.link_n, \
+			  current_config->alt_name.tu, \
+			  current_config->alt_name.gmch_m, \
+			  current_config->alt_name.gmch_n, \
+			  current_config->alt_name.link_m, \
+			  current_config->alt_name.link_n, \
+			  pipe_config->name.tu, \
+			  pipe_config->name.gmch_m, \
+			  pipe_config->name.gmch_n, \
+			  pipe_config->name.link_m, \
+			  pipe_config->name.link_n); \
+		ret = false; \
 	}
 
 /* This is required for BDW+ where there is only one set of registers for
@@ -11973,30 +12378,30 @@
 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
 	if ((current_config->name != pipe_config->name) && \
 		(current_config->alt_name != pipe_config->name)) { \
-			DRM_ERROR("mismatch in " #name " " \
+			INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 				  "(expected %i or %i, found %i)\n", \
 				  current_config->name, \
 				  current_config->alt_name, \
 				  pipe_config->name); \
-			return false; \
+			ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
-		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name & (mask), \
 			  pipe_config->name & (mask)); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
-		DRM_ERROR("mismatch in " #name " " \
+		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
 			  "(expected %i, found %i)\n", \
 			  current_config->name, \
 			  pipe_config->name); \
-		return false; \
+		ret = false; \
 	}
 
 #define PIPE_CONF_QUIRK(quirk)	\
@@ -12006,35 +12411,18 @@
 
 	PIPE_CONF_CHECK_I(has_pch_encoder);
 	PIPE_CONF_CHECK_I(fdi_lanes);
-	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
-	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
-	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
-	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
-	PIPE_CONF_CHECK_I(fdi_m_n.tu);
+	PIPE_CONF_CHECK_M_N(fdi_m_n);
 
 	PIPE_CONF_CHECK_I(has_dp_encoder);
 
 	if (INTEL_INFO(dev)->gen < 8) {
-		PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
-		PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
-		PIPE_CONF_CHECK_I(dp_m_n.link_m);
-		PIPE_CONF_CHECK_I(dp_m_n.link_n);
-		PIPE_CONF_CHECK_I(dp_m_n.tu);
+		PIPE_CONF_CHECK_M_N(dp_m_n);
 
-		if (current_config->has_drrs) {
-			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
-			PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
-			PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
-			PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
-			PIPE_CONF_CHECK_I(dp_m2_n2.tu);
-		}
-	} else {
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
-		PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
-	}
+		PIPE_CONF_CHECK_I(has_drrs);
+		if (current_config->has_drrs)
+			PIPE_CONF_CHECK_M_N(dp_m2_n2);
+	} else
+		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12076,21 +12464,11 @@
 	PIPE_CONF_CHECK_I(pipe_src_w);
 	PIPE_CONF_CHECK_I(pipe_src_h);
 
-	/*
-	 * FIXME: BIOS likes to set up a cloned config with lvds+external
-	 * screen. Since we don't yet re-compute the pipe config when moving
-	 * just the lvds port away to another pipe the sw tracking won't match.
-	 *
-	 * Proper atomic modesets with recomputed global state will fix this.
-	 * Until then just don't check gmch state for inherited modes.
-	 */
-	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
-		PIPE_CONF_CHECK_I(gmch_pfit.control);
-		/* pfit ratios are autocomputed by the hw on gen4+ */
-		if (INTEL_INFO(dev)->gen < 4)
-			PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-		PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
-	}
+	PIPE_CONF_CHECK_I(gmch_pfit.control);
+	/* pfit ratios are autocomputed by the hw on gen4+ */
+	if (INTEL_INFO(dev)->gen < 4)
+		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
 
 	PIPE_CONF_CHECK_I(pch_pfit.enabled);
 	if (current_config->pch_pfit.enabled) {
@@ -12130,8 +12508,9 @@
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
+#undef INTEL_ERR_OR_DBG_KMS
 
-	return true;
+	return ret;
 }
 
 static void check_wm_state(struct drm_device *dev)
@@ -12185,17 +12564,23 @@
 }
 
 static void
-check_connector_state(struct drm_device *dev)
+check_connector_state(struct drm_device *dev,
+		      struct drm_atomic_state *old_state)
 {
-	struct intel_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	struct drm_connector *connector;
+	int i;
 
-	for_each_intel_connector(dev, connector) {
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		struct drm_encoder *encoder = connector->encoder;
+		struct drm_connector_state *state = connector->state;
+
 		/* This also checks the encoder/connector hw state with the
 		 * ->get_hw_state callbacks. */
-		intel_connector_check_state(connector);
+		intel_connector_check_state(to_intel_connector(connector));
 
-		I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder,
-		     "connector's staged encoder doesn't match current encoder\n");
+		I915_STATE_WARN(state->best_encoder != encoder,
+		     "connector's atomic encoder doesn't match legacy encoder\n");
 	}
 }
 
@@ -12207,124 +12592,106 @@
 
 	for_each_intel_encoder(dev, encoder) {
 		bool enabled = false;
-		bool active = false;
-		enum pipe pipe, tracked_pipe;
+		enum pipe pipe;
 
 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
 
-		I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc,
-		     "encoder's stage crtc doesn't match current crtc\n");
-		I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc,
-		     "encoder's active_connectors set, but no crtc\n");
-
 		for_each_intel_connector(dev, connector) {
-			if (connector->base.encoder != &encoder->base)
+			if (connector->base.state->best_encoder != &encoder->base)
 				continue;
 			enabled = true;
-			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
-				active = true;
+
+			I915_STATE_WARN(connector->base.state->crtc !=
+					encoder->base.crtc,
+			     "connector's crtc doesn't match encoder crtc\n");
 		}
-		/*
-		 * for MST connectors if we unplug the connector is gone
-		 * away but the encoder is still connected to a crtc
-		 * until a modeset happens in response to the hotplug.
-		 */
-		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
-			continue;
 
 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
 		     "encoder's enabled state mismatch "
 		     "(expected %i, found %i)\n",
 		     !!encoder->base.crtc, enabled);
-		I915_STATE_WARN(active && !encoder->base.crtc,
-		     "active encoder with no crtc\n");
 
-		I915_STATE_WARN(encoder->connectors_active != active,
-		     "encoder's computed active state doesn't match tracked active state "
-		     "(expected %i, found %i)\n", active, encoder->connectors_active);
+		if (!encoder->base.crtc) {
+			bool active;
 
-		active = encoder->get_hw_state(encoder, &pipe);
-		I915_STATE_WARN(active != encoder->connectors_active,
-		     "encoder's hw state doesn't match sw tracking "
-		     "(expected %i, found %i)\n",
-		     encoder->connectors_active, active);
-
-		if (!encoder->base.crtc)
-			continue;
-
-		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-		I915_STATE_WARN(active && pipe != tracked_pipe,
-		     "active encoder's pipe doesn't match"
-		     "(expected %i, found %i)\n",
-		     tracked_pipe, pipe);
-
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active,
+			     "encoder detached but still enabled on pipe %c.\n",
+			     pipe_name(pipe));
+		}
 	}
 }
 
 static void
-check_crtc_state(struct drm_device *dev)
+check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *crtc;
 	struct intel_encoder *encoder;
-	struct intel_crtc_state pipe_config;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc *crtc;
+	int i;
 
-	for_each_intel_crtc(dev, crtc) {
-		bool enabled = false;
-		bool active = false;
+	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		struct intel_crtc_state *pipe_config, *sw_config;
+		bool active;
 
-		memset(&pipe_config, 0, sizeof(pipe_config));
+		if (!needs_modeset(crtc->state))
+			continue;
+
+		__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
+		pipe_config = to_intel_crtc_state(old_crtc_state);
+		memset(pipe_config, 0, sizeof(*pipe_config));
+		pipe_config->base.crtc = crtc;
+		pipe_config->base.state = old_state;
 
 		DRM_DEBUG_KMS("[CRTC:%d]\n",
-			      crtc->base.base.id);
+			      crtc->base.id);
 
-		I915_STATE_WARN(crtc->active && !crtc->base.state->enable,
-		     "active crtc, but not enabled in sw tracking\n");
-
-		for_each_intel_encoder(dev, encoder) {
-			if (encoder->base.crtc != &crtc->base)
-				continue;
-			enabled = true;
-			if (encoder->connectors_active)
-				active = true;
-		}
-
-		I915_STATE_WARN(active != crtc->active,
-		     "crtc's computed active state doesn't match tracked active state "
-		     "(expected %i, found %i)\n", active, crtc->active);
-		I915_STATE_WARN(enabled != crtc->base.state->enable,
-		     "crtc's computed enabled state doesn't match tracked enabled state "
-		     "(expected %i, found %i)\n", enabled,
-				crtc->base.state->enable);
-
-		active = dev_priv->display.get_pipe_config(crtc,
-							   &pipe_config);
+		active = dev_priv->display.get_pipe_config(intel_crtc,
+							   pipe_config);
 
 		/* hw state is inconsistent with the pipe quirk */
-		if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
-		    (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
-			active = crtc->active;
+		if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+		    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+			active = crtc->state->active;
 
-		for_each_intel_encoder(dev, encoder) {
+		I915_STATE_WARN(crtc->state->active != active,
+		     "crtc active state doesn't match with hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, active);
+
+		I915_STATE_WARN(intel_crtc->active != crtc->state->active,
+		     "transitional active state does not match atomic hw state "
+		     "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+
+		for_each_encoder_on_crtc(dev, crtc, encoder) {
 			enum pipe pipe;
-			if (encoder->base.crtc != &crtc->base)
-				continue;
-			if (encoder->get_hw_state(encoder, &pipe))
-				encoder->get_config(encoder, &pipe_config);
+
+			active = encoder->get_hw_state(encoder, &pipe);
+			I915_STATE_WARN(active != crtc->state->active,
+				"[ENCODER:%i] active %i with crtc active %i\n",
+				encoder->base.base.id, active, crtc->state->active);
+
+			I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+					"Encoder connected to wrong pipe %c\n",
+					pipe_name(pipe));
+
+			if (active)
+				encoder->get_config(encoder, pipe_config);
 		}
 
-		I915_STATE_WARN(crtc->active != active,
-		     "crtc active state doesn't match with hw state "
-		     "(expected %i, found %i)\n", crtc->active, active);
+		if (!crtc->state->active)
+			continue;
 
-		if (active &&
-		    !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) {
+		sw_config = to_intel_crtc_state(crtc->state);
+		if (!intel_pipe_config_compare(dev, sw_config,
+					       pipe_config, false)) {
 			I915_STATE_WARN(1, "pipe state doesn't match!\n");
-			intel_dump_pipe_config(crtc, &pipe_config,
+			intel_dump_pipe_config(intel_crtc, pipe_config,
 					       "[hw state]");
-			intel_dump_pipe_config(crtc, crtc->config,
+			intel_dump_pipe_config(intel_crtc, sw_config,
 					       "[sw state]");
 		}
 	}
@@ -12379,13 +12746,14 @@
 	}
 }
 
-void
-intel_modeset_check_state(struct drm_device *dev)
+static void
+intel_modeset_check_state(struct drm_device *dev,
+			  struct drm_atomic_state *old_state)
 {
 	check_wm_state(dev);
-	check_connector_state(dev);
+	check_connector_state(dev, old_state);
 	check_encoder_state(dev);
-	check_crtc_state(dev);
+	check_crtc_state(dev, old_state);
 	check_shared_dpll_state(dev);
 }
 
@@ -12439,113 +12807,145 @@
 		crtc->scanline_offset = 1;
 }
 
-static struct intel_crtc_state *
-intel_modeset_compute_config(struct drm_crtc *crtc,
-			     struct drm_atomic_state *state)
-{
-	struct intel_crtc_state *pipe_config;
-	int ret = 0;
-
-	ret = drm_atomic_add_affected_connectors(state, crtc);
-	if (ret)
-		return ERR_PTR(ret);
-
-	ret = drm_atomic_helper_check_modeset(state->dev, state);
-	if (ret)
-		return ERR_PTR(ret);
-
-	/*
-	 * Note this needs changes when we start tracking multiple modes
-	 * and crtcs.  At that point we'll need to compute the whole config
-	 * (i.e. one pipe_config for each crtc) rather than just the one
-	 * for this crtc.
-	 */
-	pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-	if (IS_ERR(pipe_config))
-		return pipe_config;
-
-	if (!pipe_config->base.enable)
-		return pipe_config;
-
-	ret = intel_modeset_pipe_config(crtc, state, pipe_config);
-	if (ret)
-		return ERR_PTR(ret);
-
-	/* Check things that can only be changed through modeset */
-	if (pipe_config->has_audio !=
-	    to_intel_crtc(crtc)->config->has_audio)
-		pipe_config->base.mode_changed = true;
-
-	/*
-	 * Note we have an issue here with infoframes: current code
-	 * only updates them on the full mode set path per hw
-	 * requirements.  So here we should be checking for any
-	 * required changes and forcing a mode set.
-	 */
-
-	intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
-
-	ret = drm_atomic_helper_check_planes(state->dev, state);
-	if (ret)
-		return ERR_PTR(ret);
-
-	return pipe_config;
-}
-
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned clear_pipes = 0;
+	struct intel_shared_dpll_config *shared_dpll = NULL;
 	struct intel_crtc *intel_crtc;
 	struct intel_crtc_state *intel_crtc_state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	int ret = 0;
 	int i;
 
 	if (!dev_priv->display.crtc_compute_clock)
-		return 0;
+		return;
 
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		int dpll;
+
 		intel_crtc = to_intel_crtc(crtc);
 		intel_crtc_state = to_intel_crtc_state(crtc_state);
+		dpll = intel_crtc_state->shared_dpll;
 
-		if (needs_modeset(crtc_state)) {
-			clear_pipes |= 1 << intel_crtc->pipe;
-			intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
-		}
-	}
-
-	ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
-	if (ret)
-		goto done;
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc_state) || !crtc_state->enable)
+		if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
 			continue;
 
-		intel_crtc = to_intel_crtc(crtc);
-		intel_crtc_state = to_intel_crtc_state(crtc_state);
+		intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
 
-		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
-							   intel_crtc_state);
-		if (ret) {
-			intel_shared_dpll_abort_config(dev_priv);
-			goto done;
+		if (!shared_dpll)
+			shared_dpll = intel_atomic_get_shared_dpll_state(state);
+
+		shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+	}
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
+{
+	struct drm_crtc_state *crtc_state;
+	struct intel_crtc *intel_crtc;
+	struct drm_crtc *crtc;
+	struct intel_crtc_state *first_crtc_state = NULL;
+	struct intel_crtc_state *other_crtc_state = NULL;
+	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
+	int i;
+
+	/* look at all crtc's that are going to be enabled in during modeset */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		intel_crtc = to_intel_crtc(crtc);
+
+		if (!crtc_state->active || !needs_modeset(crtc_state))
+			continue;
+
+		if (first_crtc_state) {
+			other_crtc_state = to_intel_crtc_state(crtc_state);
+			break;
+		} else {
+			first_crtc_state = to_intel_crtc_state(crtc_state);
+			first_pipe = intel_crtc->pipe;
 		}
 	}
 
-done:
+	/* No workaround needed? */
+	if (!first_crtc_state)
+		return 0;
+
+	/* w/a possibly needed, check how many crtc's are already enabled. */
+	for_each_intel_crtc(state->dev, intel_crtc) {
+		struct intel_crtc_state *pipe_config;
+
+		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
+		if (IS_ERR(pipe_config))
+			return PTR_ERR(pipe_config);
+
+		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
+
+		if (!pipe_config->base.active ||
+		    needs_modeset(&pipe_config->base))
+			continue;
+
+		/* 2 or more enabled crtcs means no need for w/a */
+		if (enabled_pipe != INVALID_PIPE)
+			return 0;
+
+		enabled_pipe = intel_crtc->pipe;
+	}
+
+	if (enabled_pipe != INVALID_PIPE)
+		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
+	else if (other_crtc_state)
+		other_crtc_state->hsw_workaround_pipe = first_pipe;
+
+	return 0;
+}
+
+static int intel_modeset_all_pipes(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret = 0;
+
+	/* add all active pipes to the state */
+	for_each_crtc(state->dev, crtc) {
+		crtc_state = drm_atomic_get_crtc_state(state, crtc);
+		if (IS_ERR(crtc_state))
+			return PTR_ERR(crtc_state);
+
+		if (!crtc_state->active || needs_modeset(crtc_state))
+			continue;
+
+		crtc_state->mode_changed = true;
+
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			break;
+
+		ret = drm_atomic_add_affected_planes(state, crtc);
+		if (ret)
+			break;
+	}
+
 	return ret;
 }
 
-/* Code that should eventually be part of atomic_check() */
-static int __intel_set_mode_checks(struct drm_atomic_state *state)
+
+static int intel_modeset_checks(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	if (!check_digital_port_conflicts(state)) {
+		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+		return -EINVAL;
+	}
+
 	/*
 	 * See if the config requires any additional preparation, e.g.
 	 * to adjust global state with pipes off.  We need to do this
@@ -12553,405 +12953,244 @@
 	 * mode set on this crtc.  For other crtcs we need to use the
 	 * adjusted_mode bits in the crtc directly.
 	 */
-	if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
-		ret = valleyview_modeset_global_pipes(state);
-		if (ret)
-			return ret;
-	}
+	if (dev_priv->display.modeset_calc_cdclk) {
+		unsigned int cdclk;
 
-	ret = __intel_set_mode_setup_plls(state);
-	if (ret)
-		return ret;
+		ret = dev_priv->display.modeset_calc_cdclk(state);
+
+		cdclk = to_intel_atomic_state(state)->cdclk;
+		if (!ret && cdclk != dev_priv->cdclk_freq)
+			ret = intel_modeset_all_pipes(state);
+
+		if (ret < 0)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+
+	intel_modeset_clear_plls(state);
+
+	if (IS_HASWELL(dev))
+		return haswell_mode_set_planes_workaround(state);
 
 	return 0;
 }
 
-static int __intel_set_mode(struct drm_crtc *modeset_crtc,
-			    struct intel_crtc_state *pipe_config)
+/**
+ * intel_atomic_check - validate state object
+ * @dev: drm device
+ * @state: state to validate
+ */
+static int intel_atomic_check(struct drm_device *dev,
+			      struct drm_atomic_state *state)
 {
-	struct drm_device *dev = modeset_crtc->dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int ret, i;
+	bool any_ms = false;
+
+	ret = drm_atomic_helper_check_modeset(dev, state);
+	if (ret)
+		return ret;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct intel_crtc_state *pipe_config =
+			to_intel_crtc_state(crtc_state);
+
+		/* Catch I915_MODE_FLAG_INHERITED */
+		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+			crtc_state->mode_changed = true;
+
+		if (!crtc_state->enable) {
+			if (needs_modeset(crtc_state))
+				any_ms = true;
+			continue;
+		}
+
+		if (!needs_modeset(crtc_state))
+			continue;
+
+		/* FIXME: For only active_changed we shouldn't need to do any
+		 * state recomputation at all. */
+
+		ret = drm_atomic_add_affected_connectors(state, crtc);
+		if (ret)
+			return ret;
+
+		ret = intel_modeset_pipe_config(crtc, pipe_config);
+		if (ret)
+			return ret;
+
+		if (i915.fastboot &&
+		    intel_pipe_config_compare(state->dev,
+					to_intel_crtc_state(crtc->state),
+					pipe_config, true)) {
+			crtc_state->mode_changed = false;
+		}
+
+		if (needs_modeset(crtc_state)) {
+			any_ms = true;
+
+			ret = drm_atomic_add_affected_planes(state, crtc);
+			if (ret)
+				return ret;
+		}
+
+		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
+				       needs_modeset(crtc_state) ?
+				       "[modeset]" : "[fastset]");
+	}
+
+	if (any_ms) {
+		ret = intel_modeset_checks(state);
+
+		if (ret)
+			return ret;
+	} else
+		to_intel_atomic_state(state)->cdclk =
+			to_i915(state->dev)->cdclk_freq;
+
+	return drm_atomic_helper_check_planes(state->dev, state);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @async: asynchronous commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
+ * we can only handle plane-related operations and do not yet support
+ * asynchronous commit.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+			       struct drm_atomic_state *state,
+			       bool async)
+{
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_atomic_state *state = pipe_config->base.state;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
 	int ret = 0;
 	int i;
+	bool any_ms = false;
 
-	ret = __intel_set_mode_checks(state);
-	if (ret < 0)
-		return ret;
+	if (async) {
+		DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+		return -EINVAL;
+	}
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
 	if (ret)
 		return ret;
 
+	drm_atomic_helper_swap_state(dev, state);
+
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc_state))
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		if (!needs_modeset(crtc->state))
 			continue;
 
-		if (!crtc_state->enable) {
-			if (crtc->state->enable)
-				intel_crtc_disable(crtc);
-		} else if (crtc->state->enable) {
-			intel_crtc_disable_planes(crtc);
+		any_ms = true;
+		intel_pre_plane_update(intel_crtc);
+
+		if (crtc_state->active) {
+			intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
 			dev_priv->display.crtc_disable(crtc);
+			intel_crtc->active = false;
+			intel_disable_shared_dpll(intel_crtc);
 		}
 	}
 
-	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
-	 * to set it here already despite that we pass it down the callchain.
-	 *
-	 * Note we'll need to fix this up when we start tracking multiple
-	 * pipes; here we assume a single modeset_pipe and only track the
-	 * single crtc and mode.
-	 */
-	if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
-		modeset_crtc->mode = pipe_config->base.mode;
-
-		/*
-		 * Calculate and store various constants which
-		 * are later needed by vblank and swap-completion
-		 * timestamping. They are derived from true hwmode.
-		 */
-		drm_calc_timestamping_constants(modeset_crtc,
-						&pipe_config->base.adjusted_mode);
-	}
-
 	/* Only after disabling all output pipelines that will be changed can we
 	 * update the the output configuration. */
-	intel_modeset_update_state(state);
+	intel_modeset_update_crtc_state(state);
 
-	/* The state has been swaped above, so state actually contains the
-	 * old state now. */
+	if (any_ms) {
+		intel_shared_dpll_commit(state);
 
-	modeset_update_crtc_power_domains(state);
+		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+		modeset_update_crtc_power_domains(state);
+	}
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		if (!needs_modeset(crtc->state) || !crtc->state->enable) {
-			drm_atomic_helper_commit_planes_on_crtc(crtc_state);
-			continue;
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		bool modeset = needs_modeset(crtc->state);
+
+		if (modeset && crtc->state->active) {
+			update_scanline_offset(to_intel_crtc(crtc));
+			dev_priv->display.crtc_enable(crtc);
 		}
 
-		update_scanline_offset(to_intel_crtc(crtc));
+		if (!modeset)
+			intel_pre_plane_update(intel_crtc);
 
-		dev_priv->display.crtc_enable(crtc);
 		drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+		intel_post_plane_update(intel_crtc);
 	}
 
 	/* FIXME: add subpixel order */
 
+	drm_atomic_helper_wait_for_vblanks(dev, state);
 	drm_atomic_helper_cleanup_planes(dev, state);
 
+	if (any_ms)
+		intel_modeset_check_state(dev, state);
+
 	drm_atomic_state_free(state);
 
 	return 0;
 }
 
-static int intel_set_mode_with_config(struct drm_crtc *crtc,
-				      struct intel_crtc_state *pipe_config,
-				      bool force_restore)
-{
-	int ret;
-
-	ret = __intel_set_mode(crtc, pipe_config);
-
-	if (ret == 0 && force_restore) {
-		intel_modeset_update_staged_output_state(crtc->dev);
-		intel_modeset_check_state(crtc->dev);
-	}
-
-	return ret;
-}
-
-static int intel_set_mode(struct drm_crtc *crtc,
-			  struct drm_atomic_state *state,
-			  bool force_restore)
-{
-	struct intel_crtc_state *pipe_config;
-	int ret = 0;
-
-	pipe_config = intel_modeset_compute_config(crtc, state);
-	if (IS_ERR(pipe_config)) {
-		ret = PTR_ERR(pipe_config);
-		goto out;
-	}
-
-	ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
-	if (ret)
-		goto out;
-
-out:
-	return ret;
-}
-
 void intel_crtc_restore_mode(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_atomic_state *state;
-	struct intel_encoder *encoder;
-	struct intel_connector *connector;
-	struct drm_connector_state *connector_state;
-	struct intel_crtc_state *crtc_state;
+	struct drm_crtc_state *crtc_state;
 	int ret;
 
 	state = drm_atomic_state_alloc(dev);
 	if (!state) {
-		DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory",
+		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
 			      crtc->base.id);
 		return;
 	}
 
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
+	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 
-	/* The force restore path in the HW readout code relies on the staged
-	 * config still keeping the user requested config while the actual
-	 * state has been overwritten by the configuration read from HW. We
-	 * need to copy the staged config to the atomic state, otherwise the
-	 * mode set will just reapply the state the HW is already in. */
-	for_each_intel_encoder(dev, encoder) {
-		if (&encoder->new_crtc->base != crtc)
-			continue;
+retry:
+	crtc_state = drm_atomic_get_crtc_state(state, crtc);
+	ret = PTR_ERR_OR_ZERO(crtc_state);
+	if (!ret) {
+		if (!crtc_state->active)
+			goto out;
 
-		for_each_intel_connector(dev, connector) {
-			if (connector->new_encoder != encoder)
-				continue;
-
-			connector_state = drm_atomic_get_connector_state(state, &connector->base);
-			if (IS_ERR(connector_state)) {
-				DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n",
-					      connector->base.base.id,
-					      connector->base.name,
-					      PTR_ERR(connector_state));
-				continue;
-			}
-
-			connector_state->crtc = crtc;
-			connector_state->best_encoder = &encoder->base;
-		}
+		crtc_state->mode_changed = true;
+		ret = drm_atomic_commit(state);
 	}
 
-	crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
-	if (IS_ERR(crtc_state)) {
-		DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
-			      crtc->base.id, PTR_ERR(crtc_state));
-		drm_atomic_state_free(state);
-		return;
+	if (ret == -EDEADLK) {
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(state->acquire_ctx);
+		goto retry;
 	}
 
-	crtc_state->base.active = crtc_state->base.enable =
-		to_intel_crtc(crtc)->new_enabled;
-
-	drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
-
-	intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
-					crtc->primary->fb, crtc->x, crtc->y);
-
-	ret = intel_set_mode(crtc, state, false);
 	if (ret)
+out:
 		drm_atomic_state_free(state);
 }
 
 #undef for_each_intel_crtc_masked
 
-static bool intel_connector_in_mode_set(struct intel_connector *connector,
-					struct drm_mode_set *set)
-{
-	int ro;
-
-	for (ro = 0; ro < set->num_connectors; ro++)
-		if (set->connectors[ro] == &connector->base)
-			return true;
-
-	return false;
-}
-
-static int
-intel_modeset_stage_output_state(struct drm_device *dev,
-				 struct drm_mode_set *set,
-				 struct drm_atomic_state *state)
-{
-	struct intel_connector *connector;
-	struct drm_connector *drm_connector;
-	struct drm_connector_state *connector_state;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
-	int i, ret;
-
-	/* The upper layers ensure that we either disable a crtc or have a list
-	 * of connectors. For paranoia, double-check this. */
-	WARN_ON(!set->fb && (set->num_connectors != 0));
-	WARN_ON(set->fb && (set->num_connectors == 0));
-
-	for_each_intel_connector(dev, connector) {
-		bool in_mode_set = intel_connector_in_mode_set(connector, set);
-
-		if (!in_mode_set && connector->base.state->crtc != set->crtc)
-			continue;
-
-		connector_state =
-			drm_atomic_get_connector_state(state, &connector->base);
-		if (IS_ERR(connector_state))
-			return PTR_ERR(connector_state);
-
-		if (in_mode_set) {
-			int pipe = to_intel_crtc(set->crtc)->pipe;
-			connector_state->best_encoder =
-				&intel_find_encoder(connector, pipe)->base;
-		}
-
-		if (connector->base.state->crtc != set->crtc)
-			continue;
-
-		/* If we disable the crtc, disable all its connectors. Also, if
-		 * the connector is on the changing crtc but not on the new
-		 * connector list, disable it. */
-		if (!set->fb || !in_mode_set) {
-			connector_state->best_encoder = NULL;
-
-			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
-				connector->base.base.id,
-				connector->base.name);
-		}
-	}
-	/* connector->new_encoder is now updated for all connectors. */
-
-	for_each_connector_in_state(state, drm_connector, connector_state, i) {
-		connector = to_intel_connector(drm_connector);
-
-		if (!connector_state->best_encoder) {
-			ret = drm_atomic_set_crtc_for_connector(connector_state,
-								NULL);
-			if (ret)
-				return ret;
-
-			continue;
-		}
-
-		if (intel_connector_in_mode_set(connector, set)) {
-			struct drm_crtc *crtc = connector->base.state->crtc;
-
-			/* If this connector was in a previous crtc, add it
-			 * to the state. We might need to disable it. */
-			if (crtc) {
-				crtc_state =
-					drm_atomic_get_crtc_state(state, crtc);
-				if (IS_ERR(crtc_state))
-					return PTR_ERR(crtc_state);
-			}
-
-			ret = drm_atomic_set_crtc_for_connector(connector_state,
-								set->crtc);
-			if (ret)
-				return ret;
-		}
-
-		/* Make sure the new CRTC will work with the encoder */
-		if (!drm_encoder_crtc_ok(connector_state->best_encoder,
-					 connector_state->crtc)) {
-			return -EINVAL;
-		}
-
-		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
-			connector->base.base.id,
-			connector->base.name,
-			connector_state->crtc->base.id);
-
-		if (connector_state->best_encoder != &connector->encoder->base)
-			connector->encoder =
-				to_intel_encoder(connector_state->best_encoder);
-	}
-
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
-		bool has_connectors;
-
-		ret = drm_atomic_add_affected_connectors(state, crtc);
-		if (ret)
-			return ret;
-
-		has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
-		if (has_connectors != crtc_state->enable)
-			crtc_state->enable =
-			crtc_state->active = has_connectors;
-	}
-
-	ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
-					      set->fb, set->x, set->y);
-	if (ret)
-		return ret;
-
-	crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
-	if (IS_ERR(crtc_state))
-		return PTR_ERR(crtc_state);
-
-	if (set->mode)
-		drm_mode_copy(&crtc_state->mode, set->mode);
-
-	if (set->num_connectors)
-		crtc_state->active = true;
-
-	return 0;
-}
-
-static int intel_crtc_set_config(struct drm_mode_set *set)
-{
-	struct drm_device *dev;
-	struct drm_atomic_state *state = NULL;
-	struct intel_crtc_state *pipe_config;
-	int ret;
-
-	BUG_ON(!set);
-	BUG_ON(!set->crtc);
-	BUG_ON(!set->crtc->helper_private);
-
-	/* Enforce sane interface api - has been abused by the fb helper. */
-	BUG_ON(!set->mode && set->fb);
-	BUG_ON(set->fb && set->num_connectors == 0);
-
-	if (set->fb) {
-		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
-				set->crtc->base.id, set->fb->base.id,
-				(int)set->num_connectors, set->x, set->y);
-	} else {
-		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-	}
-
-	dev = set->crtc->dev;
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return -ENOMEM;
-
-	state->acquire_ctx = dev->mode_config.acquire_ctx;
-
-	ret = intel_modeset_stage_output_state(dev, set, state);
-	if (ret)
-		goto out;
-
-	pipe_config = intel_modeset_compute_config(set->crtc, state);
-	if (IS_ERR(pipe_config)) {
-		ret = PTR_ERR(pipe_config);
-		goto out;
-	}
-
-	intel_update_pipe_size(to_intel_crtc(set->crtc));
-
-	ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
-
-	if (ret) {
-		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
-			      set->crtc->base.id, ret);
-	}
-
-out:
-	if (ret)
-		drm_atomic_state_free(state);
-	return ret;
-}
-
 static const struct drm_crtc_funcs intel_crtc_funcs = {
 	.gamma_set = intel_crtc_gamma_set,
-	.set_config = intel_crtc_set_config,
+	.set_config = drm_atomic_helper_set_config,
 	.destroy = intel_crtc_destroy,
 	.page_flip = intel_crtc_page_flip,
 	.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13048,6 +13287,8 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	intel_update_cdclk(dev);
+
 	if (HAS_DDI(dev))
 		intel_ddi_pll_init(dev);
 	else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
@@ -13059,28 +13300,6 @@
 }
 
 /**
- * intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
- *
- * Check current plane state versus the new one to determine whether
- * watermarks need to be recalculated.
- *
- * Returns true or false.
- */
-bool intel_wm_need_update(struct drm_plane *plane,
-			  struct drm_plane_state *state)
-{
-	/* Update watermarks on tiling changes. */
-	if (!plane->state->fb || !state->fb ||
-	    plane->state->fb->modifier[0] != state->fb->modifier[0] ||
-	    plane->state->rotation != state->rotation)
-		return true;
-
-	return false;
-}
-
-/**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
  * @fb: framebuffer to prepare for presentation
@@ -13099,27 +13318,13 @@
 {
 	struct drm_device *dev = plane->dev;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
-	enum pipe pipe = intel_plane->pipe;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
-	unsigned frontbuffer_bits = 0;
 	int ret = 0;
 
 	if (!obj)
 		return 0;
 
-	switch (plane->type) {
-	case DRM_PLANE_TYPE_PRIMARY:
-		frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe);
-		break;
-	case DRM_PLANE_TYPE_CURSOR:
-		frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe);
-		break;
-	case DRM_PLANE_TYPE_OVERLAY:
-		frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe);
-		break;
-	}
-
 	mutex_lock(&dev->struct_mutex);
 
 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
@@ -13129,11 +13334,11 @@
 		if (ret)
 			DRM_DEBUG_KMS("failed to attach phys object\n");
 	} else {
-		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL);
+		ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL);
 	}
 
 	if (ret == 0)
-		i915_gem_track_fb(old_obj, obj, frontbuffer_bits);
+		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -13180,7 +13385,7 @@
 	dev = intel_crtc->base.dev;
 	dev_priv = dev->dev_private;
 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
-	cdclk = dev_priv->display.get_display_clock_speed(dev);
+	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
 	if (!crtc_clock || !cdclk)
 		return DRM_PLANE_HELPER_NO_SCALING;
@@ -13198,112 +13403,28 @@
 
 static int
 intel_check_primary_plane(struct drm_plane *plane,
+			  struct intel_crtc_state *crtc_state,
 			  struct intel_plane_state *state)
 {
-	struct drm_device *dev = plane->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_crtc *intel_crtc;
-	struct intel_crtc_state *crtc_state;
 	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_rect *dest = &state->dst;
-	struct drm_rect *src = &state->src;
-	const struct drm_rect *clip = &state->clip;
-	bool can_position = false;
-	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
-	int ret;
+	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+	bool can_position = false;
 
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-	crtc_state = state->base.state ?
-		intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		/* use scaler when colorkey is not required */
-		if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
-			min_scale = 1;
-			max_scale = skl_max_scale(intel_crtc, crtc_state);
-		}
+	/* use scaler when colorkey is not required */
+	if (INTEL_INFO(plane->dev)->gen >= 9 &&
+	    state->ckey.flags == I915_SET_COLORKEY_NONE) {
+		min_scale = 1;
+		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
 		can_position = true;
 	}
 
-	ret = drm_plane_helper_check_update(plane, crtc, fb,
-					    src, dest, clip,
-					    min_scale,
-					    max_scale,
-					    can_position, true,
-					    &state->visible);
-	if (ret)
-		return ret;
-
-	if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
-		struct intel_plane_state *old_state =
-			to_intel_plane_state(plane->state);
-
-		intel_crtc->atomic.wait_for_flips = true;
-
-		/*
-		 * FBC does not work on some platforms for rotated
-		 * planes, so disable it when rotation is not 0 and
-		 * update it when rotation is set back to 0.
-		 *
-		 * FIXME: This is redundant with the fbc update done in
-		 * the primary plane enable function except that that
-		 * one is done too late. We eventually need to unify
-		 * this.
-		 */
-		if (state->visible &&
-		    INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-		    dev_priv->fbc.crtc == intel_crtc &&
-		    state->base.rotation != BIT(DRM_ROTATE_0)) {
-			intel_crtc->atomic.disable_fbc = true;
-		}
-
-		if (state->visible && !old_state->visible) {
-			/*
-			 * BDW signals flip done immediately if the plane
-			 * is disabled, even if the plane enable is already
-			 * armed to occur at the next vblank :(
-			 */
-			if (IS_BROADWELL(dev))
-				intel_crtc->atomic.wait_vblank = true;
-
-			if (crtc_state)
-				intel_crtc->atomic.post_enable_primary = true;
-		}
-
-		/*
-		 * FIXME: Actually if we will still have any other plane enabled
-		 * on the pipe we could let IPS enabled still, but for
-		 * now lets consider that when we make primary invisible
-		 * by setting DSPCNTR to 0 on update_primary_plane function
-		 * IPS needs to be disable.
-		 */
-		if (!state->visible || !fb)
-			intel_crtc->atomic.disable_ips = true;
-
-		if (!state->visible && old_state->visible &&
-		    crtc_state && !needs_modeset(&crtc_state->base))
-			intel_crtc->atomic.pre_disable_primary = true;
-
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
-
-		intel_crtc->atomic.update_fbc = true;
-
-		if (intel_wm_need_update(plane, &state->base))
-			intel_crtc->atomic.update_wm = true;
-	}
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		ret = skl_update_scaler_users(intel_crtc, crtc_state,
-			to_intel_plane(plane), state, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
+	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					     &state->dst, &state->clip,
+					     min_scale, max_scale,
+					     can_position, true,
+					     &state->visible);
 }
 
 static void
@@ -13324,20 +13445,19 @@
 	crtc->x = src->x1 >> 16;
 	crtc->y = src->y1 >> 16;
 
-	if (intel_crtc->active) {
-		if (state->visible)
-			/* FIXME: kill this fastboot hack */
-			intel_update_pipe_size(intel_crtc);
+	if (!crtc->state->active)
+		return;
 
-		dev_priv->display.update_primary_plane(crtc, plane->fb,
-						       crtc->x, crtc->y);
-	}
+	if (state->visible)
+		/* FIXME: kill this fastboot hack */
+		intel_update_pipe_size(intel_crtc);
+
+	dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
 }
 
 static void
 intel_disable_primary_plane(struct drm_plane *plane,
-			    struct drm_crtc *crtc,
-			    bool force)
+			    struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -13345,96 +13465,30 @@
 	dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
 }
 
-static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+static void intel_begin_crtc_commit(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct intel_plane *intel_plane;
-	struct drm_plane *p;
-	unsigned fb_bits = 0;
 
-	/* Track fb's for any planes being disabled */
-	list_for_each_entry(p, &dev->mode_config.plane_list, head) {
-		intel_plane = to_intel_plane(p);
-
-		if (intel_crtc->atomic.disabled_planes &
-		    (1 << drm_plane_index(p))) {
-			switch (p->type) {
-			case DRM_PLANE_TYPE_PRIMARY:
-				fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe);
-				break;
-			case DRM_PLANE_TYPE_CURSOR:
-				fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe);
-				break;
-			case DRM_PLANE_TYPE_OVERLAY:
-				fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe);
-				break;
-			}
-
-			mutex_lock(&dev->struct_mutex);
-			i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits);
-			mutex_unlock(&dev->struct_mutex);
-		}
-	}
-
-	if (intel_crtc->atomic.wait_for_flips)
-		intel_crtc_wait_for_pending_flips(crtc);
-
-	if (intel_crtc->atomic.disable_fbc)
-		intel_fbc_disable(dev);
-
-	if (intel_crtc->atomic.disable_ips)
-		hsw_disable_ips(intel_crtc);
-
-	if (intel_crtc->atomic.pre_disable_primary)
-		intel_pre_disable_primary(crtc);
-
-	if (intel_crtc->atomic.update_wm)
+	if (intel_crtc->atomic.update_wm_pre)
 		intel_update_watermarks(crtc);
 
-	intel_runtime_pm_get(dev_priv);
-
 	/* Perform vblank evasion around commit operation */
-	if (intel_crtc->active)
-		intel_crtc->atomic.evade =
-			intel_pipe_update_start(intel_crtc,
-						&intel_crtc->atomic.start_vbl_count);
+	if (crtc->state->active)
+		intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+
+	if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+		skl_detach_scalers(intel_crtc);
 }
 
-static void intel_finish_crtc_commit(struct drm_crtc *crtc)
+static void intel_finish_crtc_commit(struct drm_crtc *crtc,
+				     struct drm_crtc_state *old_crtc_state)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_plane *p;
 
-	if (intel_crtc->atomic.evade)
-		intel_pipe_update_end(intel_crtc,
-				      intel_crtc->atomic.start_vbl_count);
-
-	intel_runtime_pm_put(dev_priv);
-
-	if (intel_crtc->atomic.wait_vblank)
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
-
-	intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits);
-
-	if (intel_crtc->atomic.update_fbc) {
-		mutex_lock(&dev->struct_mutex);
-		intel_fbc_update(dev);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	if (intel_crtc->atomic.post_enable_primary)
-		intel_post_enable_primary(crtc);
-
-	drm_for_each_legacy_plane(p, &dev->mode_config.plane_list)
-		if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p))
-			intel_update_sprite_watermarks(p, crtc, 0, 0, 0,
-						       false, false);
-
-	memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic));
+	if (crtc->state->active)
+		intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
 }
 
 /**
@@ -13469,7 +13523,7 @@
 	struct intel_plane *primary;
 	struct intel_plane_state *state;
 	const uint32_t *intel_primary_formats;
-	int num_formats;
+	unsigned int num_formats;
 
 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
 	if (primary == NULL)
@@ -13490,10 +13544,10 @@
 	}
 	primary->pipe = pipe;
 	primary->plane = pipe;
+	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
 	primary->check_plane = intel_check_primary_plane;
 	primary->commit_plane = intel_commit_primary_plane;
 	primary->disable_plane = intel_disable_primary_plane;
-	primary->ckey.flags = I915_SET_COLORKEY_NONE;
 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
 		primary->plane = !pipe;
 
@@ -13541,37 +13595,29 @@
 
 static int
 intel_check_cursor_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
-	struct drm_crtc *crtc = state->base.crtc;
-	struct drm_device *dev = plane->dev;
+	struct drm_crtc *crtc = crtc_state->base.crtc;
 	struct drm_framebuffer *fb = state->base.fb;
-	struct drm_rect *dest = &state->dst;
-	struct drm_rect *src = &state->src;
-	const struct drm_rect *clip = &state->clip;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc;
 	unsigned stride;
 	int ret;
 
-	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
-
-	ret = drm_plane_helper_check_update(plane, crtc, fb,
-					    src, dest, clip,
+	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
+					    &state->dst, &state->clip,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    DRM_PLANE_HELPER_NO_SCALING,
 					    true, true, &state->visible);
 	if (ret)
 		return ret;
 
-
 	/* if we want to turn off the cursor ignore width and height */
 	if (!obj)
-		goto finish;
+		return 0;
 
 	/* Check for which cursor types we support */
-	if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) {
+	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
 			  state->base.crtc_w, state->base.crtc_h);
 		return -EINVAL;
@@ -13585,34 +13631,16 @@
 
 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
-		ret = -EINVAL;
+		return -EINVAL;
 	}
 
-finish:
-	if (intel_crtc->active) {
-		if (plane->state->crtc_w != state->base.crtc_w)
-			intel_crtc->atomic.update_wm = true;
-
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe);
-	}
-
-	return ret;
+	return 0;
 }
 
 static void
 intel_disable_cursor_plane(struct drm_plane *plane,
-			   struct drm_crtc *crtc,
-			   bool force)
+			   struct drm_crtc *crtc)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-	if (!force) {
-		plane->fb = NULL;
-		intel_crtc->cursor_bo = NULL;
-		intel_crtc->cursor_addr = 0;
-	}
-
 	intel_crtc_update_cursor(crtc, false);
 }
 
@@ -13645,9 +13673,9 @@
 
 	intel_crtc->cursor_addr = addr;
 	intel_crtc->cursor_bo = obj;
-update:
 
-	if (intel_crtc->active)
+update:
+	if (crtc->state->active)
 		intel_crtc_update_cursor(crtc, state->visible);
 }
 
@@ -13672,6 +13700,7 @@
 	cursor->max_downscale = 1;
 	cursor->pipe = pipe;
 	cursor->plane = pipe;
+	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
 	cursor->check_plane = intel_check_cursor_plane;
 	cursor->commit_plane = intel_commit_cursor_plane;
 	cursor->disable_plane = intel_disable_cursor_plane;
@@ -13712,8 +13741,6 @@
 	for (i = 0; i < intel_crtc->num_scalers; i++) {
 		intel_scaler = &scaler_state->scalers[i];
 		intel_scaler->in_use = 0;
-		intel_scaler->id = i;
-
 		intel_scaler->mode = PS_SCALER_MODE_DYN;
 	}
 
@@ -13785,6 +13812,8 @@
 	intel_crtc->cursor_cntl = ~0;
 	intel_crtc->cursor_size = ~0;
 
+	intel_crtc->wm.cxsr_allowed = true;
+
 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
@@ -13919,8 +13948,7 @@
 		 */
 		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
 		/* WaIgnoreDDIAStrap: skl */
-		if (found ||
-		    (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0))
+		if (found || IS_SKYLAKE(dev))
 			intel_ddi_init(dev, PORT_A);
 
 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -13933,6 +13961,15 @@
 			intel_ddi_init(dev, PORT_C);
 		if (found & SFUSE_STRAP_DDID_DETECTED)
 			intel_ddi_init(dev, PORT_D);
+		/*
+		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+		 */
+		if (IS_SKYLAKE(dev) &&
+		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+			intel_ddi_init(dev, PORT_E);
+
 	} else if (HAS_PCH_SPLIT(dev)) {
 		int found;
 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
@@ -13996,18 +14033,18 @@
 		}
 
 		intel_dsi_init(dev);
-	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
 		bool found = false;
 
 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
 			DRM_DEBUG_KMS("probing SDVOB\n");
 			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
-			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+			if (!found && IS_G4X(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
 			}
 
-			if (!found && SUPPORTS_INTEGRATED_DP(dev))
+			if (!found && IS_G4X(dev))
 				intel_dp_init(dev, DP_B, PORT_B);
 		}
 
@@ -14020,15 +14057,15 @@
 
 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
 
-			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+			if (IS_G4X(dev)) {
 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
 			}
-			if (SUPPORTS_INTEGRATED_DP(dev))
+			if (IS_G4X(dev))
 				intel_dp_init(dev, DP_C, PORT_C);
 		}
 
-		if (SUPPORTS_INTEGRATED_DP(dev) &&
+		if (IS_G4X(dev) &&
 		    (I915_READ(DP_D) & DP_DETECTED))
 			intel_dp_init(dev, DP_D, PORT_D);
 	} else if (IS_GEN2(dev))
@@ -14073,9 +14110,27 @@
 	return drm_gem_handle_create(file, &obj->base, handle);
 }
 
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+					struct drm_file *file,
+					unsigned flags, unsigned color,
+					struct drm_clip_rect *clips,
+					unsigned num_clips)
+{
+	struct drm_device *dev = fb->dev;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
 	.destroy = intel_user_framebuffer_destroy,
 	.create_handle = intel_user_framebuffer_create_handle,
+	.dirty = intel_user_framebuffer_dirty,
 };
 
 static
@@ -14270,7 +14325,7 @@
 	return intel_framebuffer_create(dev, mode_cmd, obj);
 }
 
-#ifndef CONFIG_DRM_I915_FBDEV
+#ifndef CONFIG_DRM_FBDEV_EMULATION
 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
 }
@@ -14281,6 +14336,8 @@
 	.output_poll_changed = intel_fbdev_output_poll_changed,
 	.atomic_check = intel_atomic_check,
 	.atomic_commit = intel_atomic_commit,
+	.atomic_state_alloc = intel_atomic_state_alloc,
+	.atomic_state_clear = intel_atomic_state_clear,
 };
 
 /* Set up chip specific display functions */
@@ -14307,7 +14364,6 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			skylake_update_primary_plane;
 	} else if (HAS_DDI(dev)) {
@@ -14318,7 +14374,6 @@
 			haswell_crtc_compute_clock;
 		dev_priv->display.crtc_enable = haswell_crtc_enable;
 		dev_priv->display.crtc_disable = haswell_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (HAS_PCH_SPLIT(dev)) {
@@ -14329,7 +14384,6 @@
 			ironlake_crtc_compute_clock;
 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
-		dev_priv->display.off = ironlake_crtc_off;
 		dev_priv->display.update_primary_plane =
 			ironlake_update_primary_plane;
 	} else if (IS_VALLEYVIEW(dev)) {
@@ -14339,7 +14393,6 @@
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_primary_plane =
 			i9xx_update_primary_plane;
 	} else {
@@ -14349,7 +14402,6 @@
 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
-		dev_priv->display.off = i9xx_crtc_off;
 		dev_priv->display.update_primary_plane =
 			i9xx_update_primary_plane;
 	}
@@ -14358,6 +14410,9 @@
 	if (IS_SKYLAKE(dev))
 		dev_priv->display.get_display_clock_speed =
 			skylake_get_display_clock_speed;
+	else if (IS_BROXTON(dev))
+		dev_priv->display.get_display_clock_speed =
+			broxton_get_display_clock_speed;
 	else if (IS_BROADWELL(dev))
 		dev_priv->display.get_display_clock_speed =
 			broadwell_get_display_clock_speed;
@@ -14371,9 +14426,21 @@
 		dev_priv->display.get_display_clock_speed =
 			ilk_get_display_clock_speed;
 	else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
-		 IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+		 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
 		dev_priv->display.get_display_clock_speed =
 			i945_get_display_clock_speed;
+	else if (IS_GM45(dev))
+		dev_priv->display.get_display_clock_speed =
+			gm45_get_display_clock_speed;
+	else if (IS_CRESTLINE(dev))
+		dev_priv->display.get_display_clock_speed =
+			i965gm_get_display_clock_speed;
+	else if (IS_PINEVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			pnv_get_display_clock_speed;
+	else if (IS_G33(dev) || IS_G4X(dev))
+		dev_priv->display.get_display_clock_speed =
+			g33_get_display_clock_speed;
 	else if (IS_I915G(dev))
 		dev_priv->display.get_display_clock_speed =
 			i915_get_display_clock_speed;
@@ -14391,10 +14458,12 @@
 			i865_get_display_clock_speed;
 	else if (IS_I85X(dev))
 		dev_priv->display.get_display_clock_speed =
-			i855_get_display_clock_speed;
-	else /* 852, 830 */
+			i85x_get_display_clock_speed;
+	else { /* 830 */
+		WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
 		dev_priv->display.get_display_clock_speed =
 			i830_get_display_clock_speed;
+	}
 
 	if (IS_GEN5(dev)) {
 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
@@ -14405,12 +14474,22 @@
 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+		if (IS_BROADWELL(dev)) {
+			dev_priv->display.modeset_commit_cdclk =
+				broadwell_modeset_commit_cdclk;
+			dev_priv->display.modeset_calc_cdclk =
+				broadwell_modeset_calc_cdclk;
+		}
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.modeset_global_resources =
-			valleyview_modeset_global_resources;
+		dev_priv->display.modeset_commit_cdclk =
+			valleyview_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			valleyview_modeset_calc_cdclk;
 	} else if (IS_BROXTON(dev)) {
-		dev_priv->display.modeset_global_resources =
-			broxton_modeset_global_resources;
+		dev_priv->display.modeset_commit_cdclk =
+			broxton_modeset_commit_cdclk;
+		dev_priv->display.modeset_calc_cdclk =
+			broxton_modeset_calc_cdclk;
 	}
 
 	switch (INTEL_INFO(dev)->gen) {
@@ -14629,13 +14708,9 @@
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
+	intel_update_cdclk(dev);
 	intel_prepare_ddi(dev);
-
-	if (IS_VALLEYVIEW(dev))
-		vlv_update_cdclk(dev);
-
 	intel_init_clock_gating(dev);
-
 	intel_enable_gt_powersave(dev);
 }
 
@@ -14665,6 +14740,24 @@
 	if (INTEL_INFO(dev)->num_pipes == 0)
 		return;
 
+	/*
+	 * There may be no VBT; and if the BIOS enabled SSC we can
+	 * just keep using it to avoid unnecessary flicker.  Whereas if the
+	 * BIOS isn't using it, don't assume it will work even if the VBT
+	 * indicates as much.
+	 */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+					    DREF_SSC1_ENABLE);
+
+		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+				     bios_lvds_use_ssc ? "en" : "dis",
+				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+		}
+	}
+
 	intel_init_display(dev);
 	intel_init_audio(dev);
 
@@ -14715,13 +14808,15 @@
 	intel_setup_outputs(dev);
 
 	/* Just in case the BIOS is doing something questionable. */
-	intel_fbc_disable(dev);
+	intel_fbc_disable(dev_priv);
 
 	drm_modeset_lock_all(dev);
-	intel_modeset_setup_hw_state(dev, false);
+	intel_modeset_setup_hw_state(dev);
 	drm_modeset_unlock_all(dev);
 
 	for_each_intel_crtc(dev, crtc) {
+		struct intel_initial_plane_config plane_config = {};
+
 		if (!crtc->active)
 			continue;
 
@@ -14732,15 +14827,14 @@
 		 * can even allow for smooth boot transitions if the BIOS
 		 * fb is large enough for the active pipe configuration.
 		 */
-		if (dev_priv->display.get_initial_plane_config) {
-			dev_priv->display.get_initial_plane_config(crtc,
-							   &crtc->plane_config);
-			/*
-			 * If the fb is shared between multiple heads, we'll
-			 * just get the first one.
-			 */
-			intel_find_initial_plane_obj(crtc, &crtc->plane_config);
-		}
+		dev_priv->display.get_initial_plane_config(crtc,
+							   &plane_config);
+
+		/*
+		 * If the fb is shared between multiple heads, we'll
+		 * just get the first one.
+		 */
+		intel_find_initial_plane_obj(crtc, &plane_config);
 	}
 }
 
@@ -14792,7 +14886,9 @@
 {
 	struct drm_device *dev = crtc->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
 	u32 reg;
+	bool enable;
 
 	/* Clear any frame start delays used for debugging left by the BIOS */
 	reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14801,6 +14897,7 @@
 	/* restore vblank interrupts to correct state */
 	drm_crtc_vblank_reset(&crtc->base);
 	if (crtc->active) {
+		drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
 		update_scanline_offset(crtc);
 		drm_crtc_vblank_on(&crtc->base);
 	}
@@ -14809,7 +14906,6 @@
 	 * disable the crtc (and hence change the state) if it is wrong. Note
 	 * that gen4+ has a fixed plane -> pipe mapping.  */
 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
-		struct intel_connector *connector;
 		bool plane;
 
 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
@@ -14821,30 +14917,8 @@
 		plane = crtc->plane;
 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
 		crtc->plane = !plane;
-		intel_crtc_disable_planes(&crtc->base);
-		dev_priv->display.crtc_disable(&crtc->base);
+		intel_crtc_disable_noatomic(&crtc->base);
 		crtc->plane = plane;
-
-		/* ... and break all links. */
-		for_each_intel_connector(dev, connector) {
-			if (connector->encoder->base.crtc != &crtc->base)
-				continue;
-
-			connector->base.dpms = DRM_MODE_DPMS_OFF;
-			connector->base.encoder = NULL;
-		}
-		/* multiple connectors may have the same encoder:
-		 *  handle them and break crtc link separately */
-		for_each_intel_connector(dev, connector)
-			if (connector->encoder->base.crtc == &crtc->base) {
-				connector->encoder->base.crtc = NULL;
-				connector->encoder->connectors_active = false;
-			}
-
-		WARN_ON(crtc->active);
-		crtc->base.state->enable = false;
-		crtc->base.state->active = false;
-		crtc->base.enabled = false;
 	}
 
 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
@@ -14858,20 +14932,27 @@
 
 	/* Adjust the state of the output pipe according to whether we
 	 * have active connectors/encoders. */
-	intel_crtc_update_dpms(&crtc->base);
+	enable = false;
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+		enable = true;
+		break;
+	}
 
-	if (crtc->active != crtc->base.state->enable) {
-		struct intel_encoder *encoder;
+	if (!enable)
+		intel_crtc_disable_noatomic(&crtc->base);
+
+	if (crtc->active != crtc->base.state->active) {
 
 		/* This can happen either due to bugs in the get_hw_state
-		 * functions or because the pipe is force-enabled due to the
+		 * functions or because of calls to intel_crtc_disable_noatomic,
+		 * or because the pipe is force-enabled due to the
 		 * pipe A quirk. */
 		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
 			      crtc->base.base.id,
 			      crtc->base.state->enable ? "enabled" : "disabled",
 			      crtc->active ? "enabled" : "disabled");
 
-		crtc->base.state->enable = crtc->active;
+		WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 
@@ -14882,10 +14963,8 @@
 		 *  actually up, hence no need to break them. */
 		WARN_ON(crtc->active);
 
-		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-			WARN_ON(encoder->connectors_active);
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder)
 			encoder->base.crtc = NULL;
-		}
 	}
 
 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
@@ -14911,6 +14990,7 @@
 {
 	struct intel_connector *connector;
 	struct drm_device *dev = encoder->base.dev;
+	bool active = false;
 
 	/* We need to check both for a crtc link (meaning that the
 	 * encoder is active and trying to read from a pipe) and the
@@ -14918,7 +14998,15 @@
 	bool has_active_crtc = encoder->base.crtc &&
 		to_intel_crtc(encoder->base.crtc)->active;
 
-	if (encoder->connectors_active && !has_active_crtc) {
+	for_each_intel_connector(dev, connector) {
+		if (connector->base.encoder != &encoder->base)
+			continue;
+
+		active = true;
+		break;
+	}
+
+	if (active && !has_active_crtc) {
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
 			      encoder->base.base.id,
 			      encoder->base.name);
@@ -14935,7 +15023,6 @@
 				encoder->post_disable(encoder);
 		}
 		encoder->base.crtc = NULL;
-		encoder->connectors_active = false;
 
 		/* Inconsistent output/port/pipe state happens presumably due to
 		 * a bug in one of the get_hw_state functions. Or someplace else
@@ -14984,10 +15071,31 @@
 {
 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!crtc->active)
-		return false;
+	return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+}
 
-	return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE;
+static void readout_plane_state(struct intel_crtc *crtc,
+				struct intel_crtc_state *crtc_state)
+{
+	struct intel_plane *p;
+	struct intel_plane_state *plane_state;
+	bool active = crtc_state->base.active;
+
+	for_each_intel_plane(crtc->base.dev, p) {
+		if (crtc->pipe != p->pipe)
+			continue;
+
+		plane_state = to_intel_plane_state(p->base.state);
+
+		if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+			plane_state->visible = primary_get_hw_state(crtc);
+		else {
+			if (active)
+				p->disable_plane(&p->base, &crtc->base);
+
+			plane_state->visible = false;
+		}
+	}
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15000,23 +15108,44 @@
 	int i;
 
 	for_each_intel_crtc(dev, crtc) {
-		struct drm_plane *primary = crtc->base.primary;
-		struct intel_plane_state *plane_state;
-
+		__drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
 		memset(crtc->config, 0, sizeof(*crtc->config));
 		crtc->config->base.crtc = &crtc->base;
 
-		crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
-
 		crtc->active = dev_priv->display.get_pipe_config(crtc,
 								 crtc->config);
 
-		crtc->base.state->enable = crtc->active;
 		crtc->base.state->active = crtc->active;
 		crtc->base.enabled = crtc->active;
 
-		plane_state = to_intel_plane_state(primary->state);
-		plane_state->visible = primary_get_hw_state(crtc);
+		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+		if (crtc->base.state->active) {
+			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+			/*
+			 * The initial mode needs to be set in order to keep
+			 * the atomic core happy. It wants a valid mode if the
+			 * crtc's enabled, so we do the above call.
+			 *
+			 * At this point some state updated by the connectors
+			 * in their ->detect() callback has not run yet, so
+			 * no recalculation can be done yet.
+			 *
+			 * Even if we could do a recalculation and modeset
+			 * right now it would cause a double modeset if
+			 * fbdev or userspace chooses a different initial mode.
+			 *
+			 * If that happens, someone indicated they wanted a
+			 * mode change, which means it's safe to do a full
+			 * recalculation.
+			 */
+			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+		}
+
+		crtc->base.hwmode = crtc->config->base.adjusted_mode;
+		readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
 
 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
 			      crtc->base.base.id,
@@ -15055,7 +15184,6 @@
 			encoder->base.crtc = NULL;
 		}
 
-		encoder->connectors_active = false;
 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
 			      encoder->base.base.id,
 			      encoder->base.name,
@@ -15066,7 +15194,6 @@
 	for_each_intel_connector(dev, connector) {
 		if (connector->get_hw_state(connector)) {
 			connector->base.dpms = DRM_MODE_DPMS_ON;
-			connector->encoder->connectors_active = true;
 			connector->base.encoder = &connector->encoder->base;
 		} else {
 			connector->base.dpms = DRM_MODE_DPMS_OFF;
@@ -15079,10 +15206,11 @@
 	}
 }
 
-/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
- * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev,
-				  bool force_restore)
+/* Scan out the current hw modeset state,
+ * and sanitizes it to the current state
+ */
+static void
+intel_modeset_setup_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum pipe pipe;
@@ -15092,21 +15220,6 @@
 
 	intel_modeset_readout_hw_state(dev);
 
-	/*
-	 * Now that we have the config, copy it to each CRTC struct
-	 * Note that this could go away if we move to using crtc_config
-	 * checking everywhere.
-	 */
-	for_each_intel_crtc(dev, crtc) {
-		if (crtc->active && i915.fastboot) {
-			intel_mode_from_pipe_config(&crtc->base.mode,
-						    crtc->config);
-			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
-				      crtc->base.base.id);
-			drm_mode_debug_printmodeline(&crtc->base.mode);
-		}
-	}
-
 	/* HW state is read out, now we need to sanitize this mess. */
 	for_each_intel_encoder(dev, encoder) {
 		intel_sanitize_encoder(encoder);
@@ -15133,34 +15246,77 @@
 		pll->on = false;
 	}
 
-	if (IS_GEN9(dev))
+	if (IS_VALLEYVIEW(dev))
+		vlv_wm_get_hw_state(dev);
+	else if (IS_GEN9(dev))
 		skl_wm_get_hw_state(dev);
 	else if (HAS_PCH_SPLIT(dev))
 		ilk_wm_get_hw_state(dev);
 
-	if (force_restore) {
-		i915_redisable_vga(dev);
+	for_each_intel_crtc(dev, crtc) {
+		unsigned long put_domains;
 
-		/*
-		 * We need to use raw interfaces for restoring state to avoid
-		 * checking (bogus) intermediate states.
-		 */
-		for_each_pipe(dev_priv, pipe) {
-			struct drm_crtc *crtc =
-				dev_priv->pipe_to_crtc_mapping[pipe];
+		put_domains = modeset_get_crtc_power_domains(&crtc->base);
+		if (WARN_ON(put_domains))
+			modeset_put_power_domains(dev_priv, put_domains);
+	}
+	intel_display_set_init_power(dev_priv, false);
+}
 
-			intel_crtc_restore_mode(crtc);
-		}
-	} else {
-		intel_modeset_update_staged_output_state(dev);
+void intel_display_resume(struct drm_device *dev)
+{
+	struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
+	struct intel_connector *conn;
+	struct intel_plane *plane;
+	struct drm_crtc *crtc;
+	int ret;
+
+	if (!state)
+		return;
+
+	state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+	/* preserve complete old state, including dpll */
+	intel_atomic_get_shared_dpll_state(state);
+
+	for_each_crtc(dev, crtc) {
+		struct drm_crtc_state *crtc_state =
+			drm_atomic_get_crtc_state(state, crtc);
+
+		ret = PTR_ERR_OR_ZERO(crtc_state);
+		if (ret)
+			goto err;
+
+		/* force a restore */
+		crtc_state->mode_changed = true;
 	}
 
-	intel_modeset_check_state(dev);
+	for_each_intel_plane(dev, plane) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
+		if (ret)
+			goto err;
+	}
+
+	for_each_intel_connector(dev, conn) {
+		ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
+		if (ret)
+			goto err;
+	}
+
+	intel_modeset_setup_hw_state(dev);
+
+	i915_redisable_vga(dev);
+	ret = drm_atomic_commit(state);
+	if (!ret)
+		return;
+
+err:
+	DRM_ERROR("Restoring old state failed with %i\n", ret);
+	drm_atomic_state_free(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *c;
 	struct drm_i915_gem_object *obj;
 	int ret;
@@ -15169,16 +15325,6 @@
 	intel_init_gt_powersave(dev);
 	mutex_unlock(&dev->struct_mutex);
 
-	/*
-	 * There may be no VBT; and if the BIOS enabled SSC we can
-	 * just keep using it to avoid unnecessary flicker.  Whereas if the
-	 * BIOS isn't using it, don't assume it will work even if the VBT
-	 * indicates as much.
-	 */
-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
-						DREF_SSC1_ENABLE);
-
 	intel_modeset_init_hw(dev);
 
 	intel_setup_overlay(dev);
@@ -15197,14 +15343,16 @@
 		ret = intel_pin_and_fence_fb_obj(c->primary,
 						 c->primary->fb,
 						 c->primary->state,
-						 NULL);
+						 NULL, NULL);
 		mutex_unlock(&dev->struct_mutex);
 		if (ret) {
 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
 				  to_intel_crtc(c)->pipe);
 			drm_framebuffer_unreference(c->primary->fb);
 			c->primary->fb = NULL;
+			c->primary->crtc = c->primary->state->crtc = NULL;
 			update_state_fb(c->primary);
+			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
 		}
 	}
 
@@ -15241,13 +15389,9 @@
 	 */
 	drm_kms_helper_poll_fini(dev);
 
-	mutex_lock(&dev->struct_mutex);
-
 	intel_unregister_dsm_handler();
 
-	intel_fbc_disable(dev);
-
-	mutex_unlock(&dev->struct_mutex);
+	intel_fbc_disable(dev_priv);
 
 	/* flush any delayed tasks or pending work */
 	flush_scheduled_work();
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1df0e1f..0a2e33f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -48,28 +48,28 @@
 #define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
 
 struct dp_link_dpll {
-	int link_bw;
+	int clock;
 	struct dpll dpll;
 };
 
 static const struct dp_link_dpll gen4_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
 };
 
 static const struct dp_link_dpll pch_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
 };
 
 static const struct dp_link_dpll vlv_dpll[] = {
-	{ DP_LINK_BW_1_62,
+	{ 162000,
 		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
-	{ DP_LINK_BW_2_7,
+	{ 270000,
 		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
 };
 
@@ -83,14 +83,16 @@
 	 * m2 is stored in fixed point format using formula below
 	 * (m2_int << 22) | m2_fraction
 	 */
-	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
+	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
 		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
-	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
+	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
-	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
+	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
 		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
 };
 
+static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
+				  324000, 432000, 540000 };
 static const int skl_rates[] = { 162000, 216000, 270000,
 				  324000, 432000, 540000 };
 static const int default_rates[] = { 162000, 270000, 540000 };
@@ -562,7 +564,9 @@
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_BROXTON(dev))
+		return BXT_PP_CONTROL(0);
+	else if (HAS_PCH_SPLIT(dev))
 		return PCH_PP_CONTROL;
 	else
 		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
@@ -572,7 +576,9 @@
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_BROXTON(dev))
+		return BXT_PP_STATUS(0);
+	else if (HAS_PCH_SPLIT(dev))
 		return PCH_PP_STATUS;
 	else
 		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
@@ -705,7 +711,8 @@
 		return 0;
 
 	if (intel_dig_port->port == PORT_A) {
-		return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
+		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
+
 	} else {
 		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
 	}
@@ -720,7 +727,7 @@
 	if (intel_dig_port->port == PORT_A) {
 		if (index)
 			return 0;
-		return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
+		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
 		/* Workaround for non-ULT HSW */
 		switch (index) {
@@ -839,8 +846,15 @@
 	}
 
 	if (try == 3) {
-		WARN(1, "dp_aux_ch not started status 0x%08x\n",
-		     I915_READ(ch_ctl));
+		static u32 last_status = -1;
+		const u32 status = I915_READ(ch_ctl);
+
+		if (status != last_status) {
+			WARN(1, "dp_aux_ch not started status 0x%08x\n",
+			     status);
+			last_status = status;
+		}
+
 		ret = -EBUSY;
 		goto out;
 	}
@@ -1016,11 +1030,34 @@
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
 	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
+	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
 	const char *name = NULL;
+	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
 	int ret;
 
+	/* On SKL we don't have Aux for port E so we rely on VBT to set
+	 * a proper alternate aux channel.
+	 */
+	if (IS_SKYLAKE(dev) && port == PORT_E) {
+		switch (info->alternate_aux_channel) {
+		case DP_AUX_B:
+			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
+			break;
+		case DP_AUX_C:
+			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
+			break;
+		case DP_AUX_D:
+			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
+			break;
+		case DP_AUX_A:
+		default:
+			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
+		}
+	}
+
 	switch (port) {
 	case PORT_A:
 		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
@@ -1038,6 +1075,10 @@
 		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
 		name = "DPDDC-D";
 		break;
+	case PORT_E:
+		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
+		name = "DPDDC-E";
+		break;
 	default:
 		BUG();
 	}
@@ -1051,7 +1092,7 @@
 	 *
 	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
 	 */
-	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
+	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
 		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
 	intel_dp->aux.name = name;
@@ -1089,7 +1130,7 @@
 }
 
 static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
+skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
 {
 	u32 ctrl1;
 
@@ -1101,7 +1142,7 @@
 	pipe_config->dpll_hw_state.cfgcr2 = 0;
 
 	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-	switch (link_clock / 2) {
+	switch (pipe_config->port_clock / 2) {
 	case 81000:
 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
 					      SKL_DPLL0);
@@ -1134,20 +1175,20 @@
 	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
 }
 
-static void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
+void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
 {
 	memset(&pipe_config->dpll_hw_state, 0,
 	       sizeof(pipe_config->dpll_hw_state));
 
-	switch (link_bw) {
-	case DP_LINK_BW_1_62:
+	switch (pipe_config->port_clock / 2) {
+	case 81000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
 		break;
-	case DP_LINK_BW_2_7:
+	case 135000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
 		break;
-	case DP_LINK_BW_5_4:
+	case 270000:
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
 		break;
 	}
@@ -1182,23 +1223,29 @@
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
-	if (IS_SKYLAKE(dev)) {
+	int size;
+
+	if (IS_BROXTON(dev)) {
+		*source_rates = bxt_rates;
+		size = ARRAY_SIZE(bxt_rates);
+	} else if (IS_SKYLAKE(dev)) {
 		*source_rates = skl_rates;
-		return ARRAY_SIZE(skl_rates);
+		size = ARRAY_SIZE(skl_rates);
+	} else {
+		*source_rates = default_rates;
+		size = ARRAY_SIZE(default_rates);
 	}
 
-	*source_rates = default_rates;
-
 	/* This depends on the fact that 5.4 is last value in the array */
-	if (intel_dp_source_supports_hbr2(dev))
-		return (DP_LINK_BW_5_4 >> 3) + 1;
-	else
-		return (DP_LINK_BW_2_7 >> 3) + 1;
+	if (!intel_dp_source_supports_hbr2(dev))
+		size--;
+
+	return size;
 }
 
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
-		   struct intel_crtc_state *pipe_config, int link_bw)
+		   struct intel_crtc_state *pipe_config)
 {
 	struct drm_device *dev = encoder->base.dev;
 	const struct dp_link_dpll *divisor = NULL;
@@ -1220,7 +1267,7 @@
 
 	if (divisor && count) {
 		for (i = 0; i < count; i++) {
-			if (link_bw == divisor[i].link_bw) {
+			if (pipe_config->port_clock == divisor[i].clock) {
 				pipe_config->dpll = divisor[i].dpll;
 				pipe_config->clock_set = true;
 				break;
@@ -1378,7 +1425,7 @@
 
 		if (INTEL_INFO(dev)->gen >= 9) {
 			int ret;
-			ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+			ret = skl_update_scaler_crtc(pipe_config);
 			if (ret)
 				return ret;
 		}
@@ -1403,7 +1450,10 @@
 	 * bpc in between. */
 	bpp = pipe_config->pipe_bpp;
 	if (is_edp(intel_dp)) {
-		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+
+		/* Get bpp from vbt only for panels that dont have bpp in edid */
+		if (intel_connector->base.display_info.bpc == 0 &&
+			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
 			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
 				      dev_priv->vbt.edp_bpp);
 			bpp = dev_priv->vbt.edp_bpp;
@@ -1494,13 +1544,13 @@
 	}
 
 	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
-		skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+		skl_edp_set_pll_config(pipe_config);
 	else if (IS_BROXTON(dev))
 		/* handled in ddi */;
 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+		hsw_dp_set_ddi_pll_sel(pipe_config);
 	else
-		intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+		intel_dp_set_clock(encoder, pipe_config);
 
 	return true;
 }
@@ -1703,8 +1753,10 @@
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
 	control = I915_READ(_pp_ctrl_reg(intel_dp));
-	control &= ~PANEL_UNLOCK_MASK;
-	control |= PANEL_UNLOCK_REGS;
+	if (!IS_BROXTON(dev)) {
+		control &= ~PANEL_UNLOCK_MASK;
+		control |= PANEL_UNLOCK_REGS;
+	}
 	return control;
 }
 
@@ -2616,7 +2668,7 @@
 		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
 			      pipe_name(pipe), port_name(port));
 
-		WARN(encoder->connectors_active,
+		WARN(encoder->base.crtc,
 		     "stealing pipe %c power sequencer from active eDP port %c\n",
 		     pipe_name(pipe), port_name(port));
 
@@ -3418,92 +3470,6 @@
 	}
 }
 
-/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
-static uint32_t
-hsw_signal_levels(uint8_t train_set)
-{
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(0);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(1);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		return DDI_BUF_TRANS_SELECT(2);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-		return DDI_BUF_TRANS_SELECT(3);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(4);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(5);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		return DDI_BUF_TRANS_SELECT(6);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(7);
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		return DDI_BUF_TRANS_SELECT(8);
-
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		return DDI_BUF_TRANS_SELECT(9);
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
-			      "0x%x\n", signal_levels);
-		return DDI_BUF_TRANS_SELECT(0);
-	}
-}
-
-static void bxt_signal_levels(struct intel_dp *intel_dp)
-{
-	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-	enum port port = dport->port;
-	struct drm_device *dev = dport->base.base.dev;
-	struct intel_encoder *encoder = &dport->base;
-	uint8_t train_set = intel_dp->train_set[0];
-	uint32_t level = 0;
-
-	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
-					 DP_TRAIN_PRE_EMPHASIS_MASK);
-	switch (signal_levels) {
-	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 0;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 1;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		level = 2;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
-		level = 3;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 4;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 5;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
-		level = 6;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 7;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
-		level = 8;
-		break;
-	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
-		level = 9;
-		break;
-	}
-
-	bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
-}
-
 /* Properly updates "DP" with the correct signal levels. */
 static void
 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3511,22 +3477,20 @@
 	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 	enum port port = intel_dig_port->port;
 	struct drm_device *dev = intel_dig_port->base.base.dev;
-	uint32_t signal_levels, mask;
+	uint32_t signal_levels, mask = 0;
 	uint8_t train_set = intel_dp->train_set[0];
 
-	if (IS_BROXTON(dev)) {
-		signal_levels = 0;
-		bxt_signal_levels(intel_dp);
-		mask = 0;
-	} else if (HAS_DDI(dev)) {
-		signal_levels = hsw_signal_levels(train_set);
-		mask = DDI_BUF_EMP_MASK;
+	if (HAS_DDI(dev)) {
+		signal_levels = ddi_signal_levels(intel_dp);
+
+		if (IS_BROXTON(dev))
+			signal_levels = 0;
+		else
+			mask = DDI_BUF_EMP_MASK;
 	} else if (IS_CHERRYVIEW(dev)) {
 		signal_levels = chv_signal_levels(intel_dp);
-		mask = 0;
 	} else if (IS_VALLEYVIEW(dev)) {
 		signal_levels = vlv_signal_levels(intel_dp);
-		mask = 0;
 	} else if (IS_GEN7(dev) && port == PORT_A) {
 		signal_levels = gen7_edp_signal_levels(train_set);
 		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
@@ -4043,43 +4007,67 @@
 	return intel_dp->is_mst;
 }
 
-int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 {
-	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-	struct drm_device *dev = intel_dig_port->base.base.dev;
-	struct intel_crtc *intel_crtc =
-		to_intel_crtc(intel_dig_port->base.base.crtc);
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
 	u8 buf;
-	int test_crc_count;
-	int attempts = 6;
-	int ret = 0;
-
-	hsw_disable_ips(intel_crtc);
-
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-
-	if (!(buf & DP_TEST_CRC_SUPPORTED)) {
-		ret = -ENOTTY;
-		goto out;
-	}
 
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-		ret = -EIO;
-		goto out;
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+		return;
 	}
 
 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-				buf | DP_TEST_SINK_START) < 0) {
-		ret = -EIO;
-		goto out;
+			       buf & ~DP_TEST_SINK_START) < 0)
+		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+
+	hsw_enable_ips(intel_crtc);
+}
+
+static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
+		return -EIO;
+
+	if (!(buf & DP_TEST_CRC_SUPPORTED))
+		return -ENOTTY;
+
+	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
+		return -EIO;
+
+	hsw_disable_ips(intel_crtc);
+
+	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
+			       buf | DP_TEST_SINK_START) < 0) {
+		hsw_enable_ips(intel_crtc);
+		return -EIO;
 	}
 
+	return 0;
+}
+
+int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+{
+	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = dig_port->base.base.dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
+	u8 buf;
+	int test_crc_count;
+	int attempts = 6;
+	int ret;
+
+	ret = intel_dp_sink_crc_start(intel_dp);
+	if (ret)
+		return ret;
+
 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
 		ret = -EIO;
-		goto out;
+		goto stop;
 	}
 
 	test_crc_count = buf & DP_TEST_COUNT_MASK;
@@ -4088,7 +4076,7 @@
 		if (drm_dp_dpcd_readb(&intel_dp->aux,
 				      DP_TEST_SINK_MISC, &buf) < 0) {
 			ret = -EIO;
-			goto out;
+			goto stop;
 		}
 		intel_wait_for_vblank(dev, intel_crtc->pipe);
 	} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
@@ -4096,25 +4084,13 @@
 	if (attempts == 0) {
 		DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
 		ret = -ETIMEDOUT;
-		goto out;
+		goto stop;
 	}
 
-	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
 		ret = -EIO;
-		goto out;
-	}
-
-	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-			       buf & ~DP_TEST_SINK_START) < 0) {
-		ret = -EIO;
-		goto out;
-	}
-out:
-	hsw_enable_ips(intel_crtc);
+stop:
+	intel_dp_sink_crc_stop(intel_dp);
 	return ret;
 }
 
@@ -4175,9 +4151,16 @@
 				      intel_dp->aux.i2c_defer_count);
 		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
 	} else {
+		struct edid *block = intel_connector->detect_edid;
+
+		/* We have to write the checksum
+		 * of the last block read
+		 */
+		block += intel_connector->detect_edid->extensions;
+
 		if (!drm_dp_dpcd_write(&intel_dp->aux,
 					DP_TEST_EDID_CHECKSUM,
-					&intel_connector->detect_edid->checksum,
+					&block->checksum,
 					1))
 			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
 
@@ -4325,10 +4308,7 @@
 
 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
-	if (!intel_encoder->connectors_active)
-		return;
-
-	if (WARN_ON(!intel_encoder->base.crtc))
+	if (!intel_encoder->base.crtc)
 		return;
 
 	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
@@ -4909,7 +4889,7 @@
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dp_detect,
 	.force = intel_dp_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4931,12 +4911,6 @@
 	.destroy = intel_dp_encoder_destroy,
 };
 
-void
-intel_dp_hot_plug(struct intel_encoder *intel_encoder)
-{
-	return;
-}
-
 enum irqreturn
 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
 {
@@ -4987,9 +4961,12 @@
 
 		intel_dp_probe_oui(intel_dp);
 
-		if (!intel_dp_probe_mst(intel_dp))
+		if (!intel_dp_probe_mst(intel_dp)) {
+			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+			intel_dp_check_link_status(intel_dp);
+			drm_modeset_unlock(&dev->mode_config.connection_mutex);
 			goto mst_fail;
-
+		}
 	} else {
 		if (intel_dp->is_mst) {
 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
@@ -4997,10 +4974,6 @@
 		}
 
 		if (!intel_dp->is_mst) {
-			/*
-			 * we'll check the link status via the normal hot plug path later -
-			 * but for short hpds we should check it now
-			 */
 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 			intel_dp_check_link_status(intel_dp);
 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -5042,16 +5015,17 @@
 	return -1;
 }
 
-/* check the VBT to see whether the eDP is on DP-D port */
+/* check the VBT to see whether the eDP is on another port */
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	union child_device_config *p_child;
 	int i;
 	static const short port_mapping[] = {
-		[PORT_B] = PORT_IDPB,
-		[PORT_C] = PORT_IDPC,
-		[PORT_D] = PORT_IDPD,
+		[PORT_B] = DVO_PORT_DPB,
+		[PORT_C] = DVO_PORT_DPC,
+		[PORT_D] = DVO_PORT_DPD,
+		[PORT_E] = DVO_PORT_DPE,
 	};
 
 	if (port == PORT_A)
@@ -5104,8 +5078,8 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct edp_power_seq cur, vbt, spec,
 		*final = &intel_dp->pps_delays;
-	u32 pp_on, pp_off, pp_div, pp;
-	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -5113,7 +5087,16 @@
 	if (final->t11_t12 != 0)
 		return;
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+	} else if (HAS_PCH_SPLIT(dev)) {
 		pp_ctrl_reg = PCH_PP_CONTROL;
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
@@ -5129,12 +5112,14 @@
 
 	/* Workaround: Need to write PP_CONTROL with the unlock key as
 	 * the very first thing. */
-	pp = ironlake_get_pp_control(intel_dp);
-	I915_WRITE(pp_ctrl_reg, pp);
+	pp_ctl = ironlake_get_pp_control(intel_dp);
 
 	pp_on = I915_READ(pp_on_reg);
 	pp_off = I915_READ(pp_off_reg);
-	pp_div = I915_READ(pp_div_reg);
+	if (!IS_BROXTON(dev)) {
+		I915_WRITE(pp_ctrl_reg, pp_ctl);
+		pp_div = I915_READ(pp_div_reg);
+	}
 
 	/* Pull timing values out of registers */
 	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
@@ -5149,8 +5134,17 @@
 	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
 		PANEL_POWER_DOWN_DELAY_SHIFT;
 
-	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+	if (IS_BROXTON(dev)) {
+		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+			BXT_POWER_CYCLE_DELAY_SHIFT;
+		if (tmp > 0)
+			cur.t11_t12 = (tmp - 1) * 1000;
+		else
+			cur.t11_t12 = 0;
+	} else {
+		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
 		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+	}
 
 	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
 		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
@@ -5207,13 +5201,23 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_on, pp_off, pp_div, port_sel = 0;
 	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
-	int pp_on_reg, pp_off_reg, pp_div_reg;
+	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
 	enum port port = dp_to_dig_port(intel_dp)->port;
 	const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
 	lockdep_assert_held(&dev_priv->pps_mutex);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_BROXTON(dev)) {
+		/*
+		 * TODO: BXT has 2 sets of PPS registers.
+		 * Correct Register for Broxton need to be identified
+		 * using VBT. hardcoding for now
+		 */
+		pp_ctrl_reg = BXT_PP_CONTROL(0);
+		pp_on_reg = BXT_PP_ON_DELAYS(0);
+		pp_off_reg = BXT_PP_OFF_DELAYS(0);
+
+	} else if (HAS_PCH_SPLIT(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_div_reg = PCH_PP_DIVISOR;
@@ -5239,9 +5243,16 @@
 		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
 	/* Compute the divisor for the pp clock, simply match the Bspec
 	 * formula. */
-	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
-	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
-			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	if (IS_BROXTON(dev)) {
+		pp_div = I915_READ(pp_ctrl_reg);
+		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
+		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+				<< BXT_POWER_CYCLE_DELAY_SHIFT);
+	} else {
+		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+	}
 
 	/* Haswell doesn't have any port selection bits for the panel
 	 * power sequencer any more. */
@@ -5258,11 +5269,16 @@
 
 	I915_WRITE(pp_on_reg, pp_on);
 	I915_WRITE(pp_off_reg, pp_off);
-	I915_WRITE(pp_div_reg, pp_div);
+	if (IS_BROXTON(dev))
+		I915_WRITE(pp_ctrl_reg, pp_div);
+	else
+		I915_WRITE(pp_div_reg, pp_div);
 
 	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
 		      I915_READ(pp_on_reg),
 		      I915_READ(pp_off_reg),
+		      IS_BROXTON(dev) ?
+		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
 		      I915_READ(pp_div_reg));
 }
 
@@ -5467,13 +5483,12 @@
 }
 
 /**
- * intel_edp_drrs_invalidate - Invalidate DRRS
+ * intel_edp_drrs_invalidate - Disable Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is a disturbance on screen (due to cursor movement/time
- * update etc), DRRS needs to be invalidated, i.e. need to switch to
- * high RR.
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5498,26 +5513,27 @@
 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
 
-	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	/* invalidate means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
 		intel_dp_set_drrs_state(dev_priv->dev,
 				dev_priv->drrs.dp->attached_connector->panel.
 				fixed_mode->vrefresh);
-	}
 
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
-	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
 	mutex_unlock(&dev_priv->drrs.mutex);
 }
 
 /**
- * intel_edp_drrs_flush - Flush DRRS
+ * intel_edp_drrs_flush - Restart Idleness DRRS
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
- * When there is no movement on screen, DRRS work can be scheduled.
- * This DRRS work is responsible for setting relevant registers after a
- * timeout of 1 second.
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
@@ -5541,10 +5557,21 @@
 
 	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
-			!dev_priv->drrs.busy_frontbuffer_bits)
+	/* flush means busy screen hence upclock */
+	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
+		intel_dp_set_drrs_state(dev_priv->dev,
+				dev_priv->drrs.dp->attached_connector->panel.
+				fixed_mode->vrefresh);
+
+	/*
+	 * flush also means no more activity hence schedule downclock, if all
+	 * other fbs are quiescent too
+	 */
+	if (!dev_priv->drrs.busy_frontbuffer_bits)
 		schedule_delayed_work(&dev_priv->drrs.work,
 				msecs_to_jiffies(1000));
 	mutex_unlock(&dev_priv->drrs.mutex);
@@ -5833,6 +5860,9 @@
 	case PORT_D:
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
+	case PORT_E:
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
 	default:
 		BUG();
 	}
@@ -5948,10 +5978,9 @@
 		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 	}
 	intel_encoder->cloneable = 0;
-	intel_encoder->hot_plug = intel_dp_hot_plug;
 
 	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-	dev_priv->hpd_irq_port[port] = intel_dig_port;
+	dev_priv->hotplug.irq_port[port] = intel_dig_port;
 
 	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
 		drm_encoder_cleanup(encoder);
@@ -5967,7 +5996,7 @@
 
 	/* disable MST */
 	for (i = 0; i < I915_MAX_PORTS; i++) {
-		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
 		if (!intel_dig_port)
 			continue;
 
@@ -5986,7 +6015,7 @@
 	int i;
 
 	for (i = 0; i < I915_MAX_PORTS; i++) {
-		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
 		if (!intel_dig_port)
 			continue;
 		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 600afdb..983553c 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,6 +33,7 @@
 static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 					struct intel_crtc_state *pipe_config)
 {
+	struct drm_device *dev = encoder->base.dev;
 	struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
 	struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -97,6 +98,10 @@
 			       &pipe_config->dp_m_n);
 
 	pipe_config->dp_m_n.tu = slots;
+
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+		hsw_dp_set_ddi_pll_sel(pipe_config);
+
 	return true;
 
 }
@@ -328,7 +333,7 @@
 }
 
 static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dp_mst_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_dp_mst_set_property,
@@ -406,7 +411,7 @@
 
 static void intel_connector_add_to_fbdev(struct intel_connector *connector)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
 #endif
@@ -414,7 +419,7 @@
 
 static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
 {
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 	drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
 #endif
@@ -452,10 +457,9 @@
 	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
 	drm_mode_connector_set_path_property(connector, pathprop);
-	drm_reinit_primary_mode_group(dev);
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	intel_connector_add_to_fbdev(intel_connector);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_unlock_all(dev);
 	drm_connector_register(&intel_connector->base);
 	return connector;
 }
@@ -465,19 +469,28 @@
 {
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct drm_device *dev = connector->dev;
+
 	/* need to nuke the connector */
-	mutex_lock(&dev->mode_config.mutex);
-	intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
+	if (connector->state->crtc) {
+		struct drm_mode_set set;
+		int ret;
+
+		memset(&set, 0, sizeof(set));
+		set.crtc = connector->state->crtc,
+
+		ret = drm_atomic_helper_set_config(&set);
+
+		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
+	}
+	drm_modeset_unlock_all(dev);
 
 	intel_connector->unregister(intel_connector);
 
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	intel_connector_remove_from_fbdev(intel_connector);
 	drm_connector_cleanup(connector);
-	mutex_unlock(&dev->mode_config.mutex);
-
-	drm_reinit_primary_mode_group(dev);
+	drm_modeset_unlock_all(dev);
 
 	kfree(intel_connector);
 	DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1059283..2b9e6f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -130,15 +130,9 @@
 
 struct intel_encoder {
 	struct drm_encoder base;
-	/*
-	 * The new crtc this encoder will be driven from. Only differs from
-	 * base->crtc while a modeset is in progress.
-	 */
-	struct intel_crtc *new_crtc;
 
 	enum intel_output_type type;
 	unsigned int cloneable;
-	bool connectors_active;
 	void (*hot_plug)(struct intel_encoder *);
 	bool (*compute_config)(struct intel_encoder *,
 			       struct intel_crtc_state *);
@@ -182,6 +176,10 @@
 		bool enabled;
 		bool combination_mode;	/* gen 2/4 only */
 		bool active_low_pwm;
+
+		/* PWM chip */
+		struct pwm_device *pwm;
+
 		struct backlight_device *device;
 	} backlight;
 
@@ -195,12 +193,6 @@
 	 */
 	struct intel_encoder *encoder;
 
-	/*
-	 * The new encoder this connector will be driven. Only differs from
-	 * encoder while a modeset is in progress.
-	 */
-	struct intel_encoder *new_encoder;
-
 	/* Reads out the current hw, returning true if the connector is enabled
 	 * and active (i.e. dpms ON state). */
 	bool (*get_hw_state)(struct intel_connector *);
@@ -241,6 +233,14 @@
 	int	p;
 } intel_clock_t;
 
+struct intel_atomic_state {
+	struct drm_atomic_state base;
+
+	unsigned int cdclk;
+	bool dpll_set;
+	struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+};
+
 struct intel_plane_state {
 	struct drm_plane_state base;
 	struct drm_rect src;
@@ -256,7 +256,7 @@
 	 * plane requiring a scaler:
 	 *   - During check_plane, its bit is set in
 	 *     crtc_state->scaler_state.scaler_users by calling helper function
-	 *     update_scaler_users.
+	 *     update_scaler_plane.
 	 *   - scaler_id indicates the scaler it got assigned.
 	 *
 	 * plane doesn't require a scaler:
@@ -264,9 +264,11 @@
 	 *     got disabled.
 	 *   - During check_plane, corresponding bit is reset in
 	 *     crtc_state->scaler_state.scaler_users by calling helper function
-	 *     update_scaler_users.
+	 *     update_scaler_plane.
 	 */
 	int scaler_id;
+
+	struct drm_intel_sprite_colorkey ckey;
 };
 
 struct intel_initial_plane_config {
@@ -286,7 +288,6 @@
 #define SKL_MAX_DST_H 4096
 
 struct intel_scaler {
-	int id;
 	int in_use;
 	uint32_t mode;
 };
@@ -319,6 +320,9 @@
 	int scaler_id;
 };
 
+/* drm_mode->private_flags */
+#define I915_MODE_FLAG_INHERITED 1
+
 struct intel_crtc_state {
 	struct drm_crtc_state base;
 
@@ -331,7 +335,6 @@
 	 * accordingly.
 	 */
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS	(1<<0) /* unreliable sync mode.flags */
-#define PIPE_CONFIG_QUIRK_INHERITED_MODE	(1<<1) /* mode inherited from firmware */
 	unsigned long quirks;
 
 	/* Pipe source size (ie. panel fitter input size)
@@ -447,6 +450,18 @@
 	int pbn;
 
 	struct intel_crtc_scaler_state scaler_state;
+
+	/* w/a for waiting 2 vblanks during crtc enable */
+	enum pipe hsw_workaround_pipe;
+};
+
+struct vlv_wm_state {
+	struct vlv_pipe_wm wm[3];
+	struct vlv_sr_wm sr[3];
+	uint8_t num_active_planes;
+	uint8_t num_levels;
+	uint8_t level;
+	bool cxsr;
 };
 
 struct intel_pipe_wm {
@@ -478,16 +493,13 @@
  * and thus can't be run with interrupts disabled.
  */
 struct intel_crtc_atomic_commit {
-	/* vblank evasion */
-	bool evade;
-	unsigned start_vbl_count;
-
 	/* Sleepable operations to perform before commit */
 	bool wait_for_flips;
 	bool disable_fbc;
 	bool disable_ips;
+	bool disable_cxsr;
 	bool pre_disable_primary;
-	bool update_wm;
+	bool update_wm_pre, update_wm_post;
 	unsigned disabled_planes;
 
 	/* Sleepable operations to perform after commit */
@@ -527,9 +539,7 @@
 	uint32_t cursor_size;
 	uint32_t cursor_base;
 
-	struct intel_initial_plane_config plane_config;
 	struct intel_crtc_state *config;
-	bool new_enabled;
 
 	/* reset counter value when the last flip was submitted */
 	unsigned int reset_counter;
@@ -544,14 +554,19 @@
 		struct intel_pipe_wm active;
 		/* SKL wm values currently in use */
 		struct skl_pipe_wm skl_active;
+		/* allow CxSR on this pipe */
+		bool cxsr_allowed;
 	} wm;
 
 	int scanline_offset;
 
+	unsigned start_vbl_count;
 	struct intel_crtc_atomic_commit atomic;
 
 	/* scalers available on this crtc */
 	int num_scalers;
+
+	struct vlv_wm_state wm_state;
 };
 
 struct intel_plane_wm_parameters {
@@ -570,6 +585,7 @@
 	bool scaled;
 	u64 tiling;
 	unsigned int rotation;
+	uint16_t fifo_size;
 };
 
 struct intel_plane {
@@ -578,9 +594,7 @@
 	enum pipe pipe;
 	bool can_scale;
 	int max_downscale;
-
-	/* FIXME convert to properties */
-	struct drm_intel_sprite_colorkey ckey;
+	uint32_t frontbuffer_bit;
 
 	/* Since we need to change the watermarks before/after
 	 * enabling/disabling the planes, we need to store the parameters here
@@ -603,8 +617,9 @@
 			     uint32_t x, uint32_t y,
 			     uint32_t src_w, uint32_t src_h);
 	void (*disable_plane)(struct drm_plane *plane,
-			      struct drm_crtc *crtc, bool force);
+			      struct drm_crtc *crtc);
 	int (*check_plane)(struct drm_plane *plane,
+			   struct intel_crtc_state *crtc_state,
 			   struct intel_plane_state *state);
 	void (*commit_plane)(struct drm_plane *plane,
 			     struct intel_plane_state *state);
@@ -629,6 +644,7 @@
 	unsigned long cursor_hpll_disable;
 };
 
+#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
@@ -940,43 +956,23 @@
 void intel_ddi_clock_get(struct intel_encoder *encoder,
 			 struct intel_crtc_state *pipe_config);
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
-void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-				enum port port, int type);
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring,
 			     enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flush(struct drm_device *dev,
-			     unsigned frontbuffer_bits);
-/**
- * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. This is for
- * synchronous plane updates which will happen on the next vblank and which will
- * not get delayed by pending gpu rendering.
- *
- * Can be called without any locks held.
- */
-static inline
 void intel_frontbuffer_flip(struct drm_device *dev,
-			    unsigned frontbuffer_bits)
-{
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
-}
-
+			    unsigned frontbuffer_bits);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
-
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+			enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
 			      uint32_t pixel_format);
 
@@ -994,15 +990,11 @@
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
-void intel_crtc_control(struct drm_crtc *crtc, bool enable);
-void intel_crtc_reset(struct intel_crtc *crtc);
-void intel_crtc_update_dpms(struct drm_crtc *crtc);
+int intel_display_suspend(struct drm_device *dev);
 void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
-void intel_connector_dpms(struct drm_connector *, int mode);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
 				struct intel_digital_port *port);
 void intel_connector_attach_encoder(struct intel_connector *connector,
@@ -1035,7 +1027,8 @@
 int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 			       struct drm_framebuffer *fb,
 			       const struct drm_plane_state *plane_state,
-			       struct intel_engine_cs *pipelined);
+			       struct intel_engine_cs *pipelined,
+			       struct drm_i915_gem_request **pipelined_request);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
 			   struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1058,6 +1051,8 @@
 				    struct drm_plane_state *state,
 				    struct drm_property *property,
 				    uint64_t val);
+int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+				    struct drm_plane_state *plane_state);
 
 unsigned int
 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
@@ -1072,9 +1067,6 @@
 void intel_create_rotation_property(struct drm_device *dev,
 					struct intel_plane *plane);
 
-bool intel_wm_need_update(struct drm_plane *plane,
-			  struct drm_plane_state *state);
-
 /* shared dpll functions */
 struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
@@ -1084,7 +1076,6 @@
 #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 						struct intel_crtc_state *state);
-void intel_put_shared_dpll(struct intel_crtc *crtc);
 
 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
 		      const struct dpll *dpll);
@@ -1104,7 +1095,8 @@
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
+					     int *x, int *y,
 					     unsigned int tiling_mode,
 					     unsigned int bpp,
 					     unsigned int pitch);
@@ -1114,7 +1106,6 @@
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
 void broxton_init_cdclk(struct drm_device *dev);
 void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_set_cdclk(struct drm_device *dev, int frequency);
 void broxton_ddi_phy_init(struct drm_device *dev);
 void broxton_ddi_phy_uninit(struct drm_device *dev);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
@@ -1130,6 +1121,8 @@
 				int dotclock);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
 			intel_clock_t *best_clock);
+int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
 void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1139,10 +1132,8 @@
 				 struct intel_crtc_state *pipe_config);
 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
-void skl_detach_scalers(struct intel_crtc *intel_crtc);
-int skl_update_scaler_users(struct intel_crtc *intel_crtc,
-	struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
-	struct intel_plane_state *plane_state, int force_detach);
+
+int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
@@ -1194,6 +1185,7 @@
 void intel_edp_drrs_invalidate(struct drm_device *dev,
 		unsigned frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
@@ -1207,7 +1199,7 @@
 
 
 /* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_I915_FBDEV
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
 extern void intel_fbdev_fini(struct drm_device *dev);
@@ -1238,15 +1230,18 @@
 #endif
 
 /* intel_fbc.c */
-bool intel_fbc_enabled(struct drm_device *dev);
-void intel_fbc_update(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv);
+void intel_fbc_update(struct drm_i915_private *dev_priv);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_disable(struct drm_device *dev);
+void intel_fbc_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_disable_crtc(struct intel_crtc *crtc);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin);
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-		     unsigned int frontbuffer_bits);
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin);
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
 
 /* intel_hdmi.c */
 void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
@@ -1314,11 +1309,13 @@
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
 void intel_psr_invalidate(struct drm_device *dev,
-			      unsigned frontbuffer_bits);
+			  unsigned frontbuffer_bits);
 void intel_psr_flush(struct drm_device *dev,
-			 unsigned frontbuffer_bits);
+		     unsigned frontbuffer_bits,
+		     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
@@ -1372,11 +1369,12 @@
 		    unsigned long submitted);
 void intel_queue_rps_boost_for_request(struct drm_device *dev,
 				       struct drm_i915_gem_request *req);
+void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_allocation *ddb /* out */);
-
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
@@ -1384,10 +1382,9 @@
 
 /* intel_sprite.c */
 int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-int intel_plane_restore(struct drm_plane *plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
-bool intel_pipe_update_start(struct intel_crtc *crtc,
+void intel_pipe_update_start(struct intel_crtc *crtc,
 			     uint32_t *start_vbl_count);
 void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 
@@ -1395,11 +1392,6 @@
 void intel_tv_init(struct drm_device *dev);
 
 /* intel_atomic.c */
-int intel_atomic_check(struct drm_device *dev,
-		       struct drm_atomic_state *state);
-int intel_atomic_commit(struct drm_device *dev,
-			struct drm_atomic_state *state,
-			bool async);
 int intel_connector_atomic_get_property(struct drm_connector *connector,
 					const struct drm_connector_state *state,
 					struct drm_property *property,
@@ -1407,6 +1399,11 @@
 struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
 void intel_crtc_destroy_state(struct drm_crtc *crtc,
 			       struct drm_crtc_state *state);
+struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_clear(struct drm_atomic_state *);
+struct intel_shared_dpll_config *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
+
 static inline struct intel_crtc_state *
 intel_atomic_get_crtc_state(struct drm_atomic_state *state,
 			    struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b5a5558..4a601cf 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -31,6 +31,7 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
 #include <linux/slab.h>
+#include <linux/gpio/consumer.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_dsi.h"
@@ -261,11 +262,6 @@
 	return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
 }
 
-static void intel_dsi_hot_plug(struct intel_encoder *encoder)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 				     struct intel_crtc_state *config)
 {
@@ -401,6 +397,8 @@
 
 		intel_dsi_port_enable(encoder);
 	}
+
+	intel_panel_enable_backlight(intel_dsi->attached_connector);
 }
 
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
@@ -415,15 +413,21 @@
 
 	DRM_DEBUG_KMS("\n");
 
+	/* Panel Enable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+
+	msleep(intel_dsi->panel_on_delay);
+
 	/* Disable DPOunit clock gating, can stall pipe
 	 * and we need DPLL REFA always enabled */
 	tmp = I915_READ(DPLL(pipe));
-	tmp |= DPLL_REFA_CLK_ENABLE_VLV;
+	tmp |= DPLL_REF_CLK_ENABLE_VLV;
 	I915_WRITE(DPLL(pipe), tmp);
 
 	/* update the hw state for DPLL */
-	intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
-		DPLL_REFA_CLK_ENABLE_VLV;
+	intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
 
 	tmp = I915_READ(DSPCLK_GATE_D);
 	tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -432,8 +436,6 @@
 	/* put device in ready state */
 	intel_dsi_device_ready(encoder);
 
-	msleep(intel_dsi->panel_on_delay);
-
 	drm_panel_prepare(intel_dsi->panel);
 
 	for_each_dsi_port(port, intel_dsi->ports)
@@ -461,6 +463,8 @@
 
 	DRM_DEBUG_KMS("\n");
 
+	intel_panel_disable_backlight(intel_dsi->attached_connector);
+
 	if (is_vid_mode(intel_dsi)) {
 		/* Send Shutdown command to the panel in LP mode */
 		for_each_dsi_port(port, intel_dsi->ports)
@@ -576,6 +580,10 @@
 
 	msleep(intel_dsi->panel_off_delay);
 	msleep(intel_dsi->panel_pwr_cycle_delay);
+
+	/* Panel Disable over CRC PMIC */
+	if (intel_dsi->gpio_panel)
+		gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -955,6 +963,11 @@
 		/* XXX: Logically this call belongs in the panel driver. */
 		drm_panel_remove(intel_dsi->panel);
 	}
+
+	/* dispose of the gpios */
+	if (intel_dsi->gpio_panel)
+		gpiod_put(intel_dsi->gpio_panel);
+
 	intel_encoder_destroy(encoder);
 }
 
@@ -969,7 +982,7 @@
 };
 
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dsi_detect,
 	.destroy = intel_dsi_connector_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1022,7 +1035,6 @@
 	drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
 
 	/* XXX: very likely not all of these are needed */
-	intel_encoder->hot_plug = intel_dsi_hot_plug;
 	intel_encoder->compute_config = intel_dsi_compute_config;
 	intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
 	intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1071,6 +1083,20 @@
 		goto err;
 	}
 
+	/*
+	 * In case of BYT with CRC PMIC, we need to use GPIO for
+	 * Panel control.
+	 */
+	if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+		intel_dsi->gpio_panel =
+			gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
+
+		if (IS_ERR(intel_dsi->gpio_panel)) {
+			DRM_ERROR("Failed to own gpio for panel control\n");
+			intel_dsi->gpio_panel = NULL;
+		}
+	}
+
 	intel_encoder->type = INTEL_OUTPUT_DSI;
 	intel_encoder->cloneable = 0;
 	drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
@@ -1104,6 +1130,7 @@
 	}
 
 	intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+	intel_panel_setup_backlight(connector, INVALID_PIPE);
 
 	return;
 
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 2784ac4..42a6859 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -42,6 +42,9 @@
 	struct drm_panel *panel;
 	struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
 
+	/* GPIO Desc for CRC based Panel control */
+	struct gpio_desc *gpio_panel;
+
 	struct intel_connector *attached_connector;
 
 	/* bit mask of ports being driven */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d20cf37..c6a8975 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -38,6 +38,27 @@
 #define DSI_HFP_PACKET_EXTRA_SIZE	6
 #define DSI_EOTP_PACKET_SIZE		4
 
+static int dsi_pixel_format_bpp(int pixel_format)
+{
+	int bpp;
+
+	switch (pixel_format) {
+	default:
+	case VID_MODE_FORMAT_RGB888:
+	case VID_MODE_FORMAT_RGB666_LOOSE:
+		bpp = 24;
+		break;
+	case VID_MODE_FORMAT_RGB666:
+		bpp = 18;
+		break;
+	case VID_MODE_FORMAT_RGB565:
+		bpp = 16;
+		break;
+	}
+
+	return bpp;
+}
+
 struct dsi_mnp {
 	u32 dsi_pll_ctrl;
 	u32 dsi_pll_div;
@@ -46,8 +67,8 @@
 static const u32 lfsr_converts[] = {
 	426, 469, 234, 373, 442, 221, 110, 311, 411,		/* 62 - 70 */
 	461, 486, 243, 377, 188, 350, 175, 343, 427, 213,	/* 71 - 80 */
-	106, 53, 282, 397, 354, 227, 113, 56, 284, 142,		/* 81 - 90 */
-	71, 35							/* 91 - 92 */
+	106, 53, 282, 397, 454, 227, 113, 56, 284, 142,		/* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506		/* 91 - 100 */
 };
 
 #ifdef DSI_CLK_FROM_RR
@@ -65,19 +86,7 @@
 	u32 dsi_bit_clock_hz;
 	u32 dsi_clk;
 
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	bpp = dsi_pixel_format_bpp(pixel_format);
 
 	hactive = mode->hdisplay;
 	vactive = mode->vdisplay;
@@ -137,21 +146,7 @@
 static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 {
 	u32 dsi_clk_khz;
-	u32 bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	u32 bpp = dsi_pixel_format_bpp(pixel_format);
 
 	/* DSI data rate = pixel clock * bits per pixel / lane count
 	   pixel clock is converted from KHz to Hz */
@@ -162,11 +157,13 @@
 
 #endif
 
-static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
+			struct dsi_mnp *dsi_mnp, int target_dsi_clk)
 {
 	unsigned int calc_m = 0, calc_p = 0;
-	unsigned int m, n = 1, p;
-	int ref_clk = 25000;
+	unsigned int m_min, m_max, p_min = 2, p_max = 6;
+	unsigned int m, n, p;
+	int ref_clk;
 	int delta = target_dsi_clk;
 	u32 m_seed;
 
@@ -176,8 +173,20 @@
 		return -ECHRNG;
 	}
 
-	for (m = 62; m <= 92 && delta; m++) {
-		for (p = 2; p <= 6 && delta; p++) {
+	if (IS_CHERRYVIEW(dev_priv)) {
+		ref_clk = 100000;
+		n = 4;
+		m_min = 70;
+		m_max = 96;
+	} else {
+		ref_clk = 25000;
+		n = 1;
+		m_min = 62;
+		m_max = 92;
+	}
+
+	for (m = m_min; m <= m_max && delta; m++) {
+		for (p = p_min; p <= p_max && delta; p++) {
 			/*
 			 * Find the optimal m and p divisors with minimal delta
 			 * +/- the required clock
@@ -217,7 +226,7 @@
 	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
 				    intel_dsi->lane_count);
 
-	ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+	ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
 	if (ret) {
 		DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
 		return;
@@ -286,21 +295,7 @@
 
 static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
 {
-	int bpp;
-
-	switch (pixel_format) {
-	default:
-	case VID_MODE_FORMAT_RGB888:
-	case VID_MODE_FORMAT_RGB666_LOOSE:
-		bpp = 24;
-		break;
-	case VID_MODE_FORMAT_RGB666:
-		bpp = 18;
-		break;
-	case VID_MODE_FORMAT_RGB565:
-		bpp = 16;
-		break;
-	}
+	int bpp = dsi_pixel_format_bpp(pixel_format);
 
 	WARN(bpp != pipe_bpp,
 	     "bpp match assertion failure (expected %d, current %d)\n",
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ece5bd7..dc532bb 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -196,50 +196,6 @@
 	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_dvo_dpms(struct drm_connector *connector, int mode)
-{
-	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
-	struct drm_crtc *crtc;
-	struct intel_crtc_state *config;
-
-	/* dvo supports only 2 dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = intel_dvo->base.base.crtc;
-	if (!crtc) {
-		intel_dvo->base.connectors_active = false;
-		return;
-	}
-
-	/* We call connector dpms manually below in case pipe dpms doesn't
-	 * change due to cloning. */
-	if (mode == DRM_MODE_DPMS_ON) {
-		config = to_intel_crtc(crtc)->config;
-
-		intel_dvo->base.connectors_active = true;
-
-		intel_crtc_update_dpms(crtc);
-
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
-	} else {
-		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
-
-		intel_dvo->base.connectors_active = false;
-
-		intel_crtc_update_dpms(crtc);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_dvo_mode_valid(struct drm_connector *connector,
 		     struct drm_display_mode *mode)
@@ -387,7 +343,7 @@
 }
 
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
-	.dpms = intel_dvo_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_dvo_detect,
 	.destroy = intel_dvo_destroy,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 6abb834..1f97fb5 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -41,9 +41,8 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void i8xx_fbc_disable(struct drm_device *dev)
+static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 fbc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -65,13 +64,11 @@
 	DRM_DEBUG_KMS("disabled FBC\n");
 }
 
-static void i8xx_fbc_enable(struct drm_crtc *crtc)
+static void i8xx_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int cfb_pitch;
 	int i;
 	u32 fbc_ctl;
@@ -84,7 +81,7 @@
 		cfb_pitch = fb->pitches[0];
 
 	/* FBC_CTL wants 32B or 64B units */
-	if (IS_GEN2(dev))
+	if (IS_GEN2(dev_priv))
 		cfb_pitch = (cfb_pitch / 32) - 1;
 	else
 		cfb_pitch = (cfb_pitch / 64) - 1;
@@ -93,66 +90,61 @@
 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
 		I915_WRITE(FBC_TAG + (i * 4), 0);
 
-	if (IS_GEN4(dev)) {
+	if (IS_GEN4(dev_priv)) {
 		u32 fbc_ctl2;
 
 		/* Set it up... */
 		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-		fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
+		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
 		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-		I915_WRITE(FBC_FENCE_OFF, crtc->y);
+		I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
 	}
 
 	/* enable it... */
 	fbc_ctl = I915_READ(FBC_CONTROL);
 	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
 	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
-	if (IS_I945GM(dev))
+	if (IS_I945GM(dev_priv))
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= obj->fence_reg;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
 	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
-		      cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
+		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
 }
 
-static bool i8xx_fbc_enabled(struct drm_device *dev)
+static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_enable(struct drm_crtc *crtc)
+static void g4x_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
 		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
 	else
 		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
-	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
 
 	/* enable it... */
 	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void g4x_fbc_disable(struct drm_device *dev)
+static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -167,10 +159,8 @@
 	}
 }
 
-static bool g4x_fbc_enabled(struct drm_device *dev)
+static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
@@ -180,22 +170,21 @@
 	POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_enable(struct drm_crtc *crtc)
+static void ilk_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
+	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
 	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-		dev_priv->fbc.threshold++;
+		threshold++;
 
-	switch (dev_priv->fbc.threshold) {
+	switch (threshold) {
 	case 4:
 	case 3:
 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -208,28 +197,27 @@
 		break;
 	}
 	dpfc_ctl |= DPFC_CTL_FENCE_EN;
-	if (IS_GEN5(dev))
+	if (IS_GEN5(dev_priv))
 		dpfc_ctl |= obj->fence_reg;
 
-	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
 	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(dev_priv)) {
 		I915_WRITE(SNB_DPFC_CTL_SA,
 			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 	}
 
 	intel_fbc_nuke(dev_priv);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
-static void ilk_fbc_disable(struct drm_device *dev)
+static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 dpfc_ctl;
 
 	dev_priv->fbc.enabled = false;
@@ -244,29 +232,29 @@
 	}
 }
 
-static bool ilk_fbc_enabled(struct drm_device *dev)
+static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_enable(struct drm_crtc *crtc)
+static void gen7_fbc_enable(struct intel_crtc *crtc)
 {
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_framebuffer *fb = crtc->primary->fb;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+	struct drm_framebuffer *fb = crtc->base.primary->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 dpfc_ctl;
+	int threshold = dev_priv->fbc.threshold;
 
 	dev_priv->fbc.enabled = true;
 
-	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
-	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
-		dev_priv->fbc.threshold++;
+	dpfc_ctl = 0;
+	if (IS_IVYBRIDGE(dev_priv))
+		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
 
-	switch (dev_priv->fbc.threshold) {
+	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+		threshold++;
+
+	switch (threshold) {
 	case 4:
 	case 3:
 		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
@@ -286,39 +274,37 @@
 
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
-	if (IS_IVYBRIDGE(dev)) {
+	if (IS_IVYBRIDGE(dev_priv)) {
 		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
 			   ILK_FBCQ_DIS);
 	} else {
 		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-		I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
-			   I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
+		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
+			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
 			   HSW_FBCQ_DIS);
 	}
 
 	I915_WRITE(SNB_DPFC_CTL_SA,
 		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+	I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
 
 	intel_fbc_nuke(dev_priv);
 
-	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
+	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
 }
 
 /**
  * intel_fbc_enabled - Is FBC enabled?
- * @dev: the drm_device
+ * @dev_priv: i915 device instance
  *
  * This function is used to verify the current state of FBC.
  * FIXME: This should be tracked in the plane config eventually
  *        instead of queried at runtime for most callers.
  */
-bool intel_fbc_enabled(struct drm_device *dev)
+bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
 	return dev_priv->fbc.enabled;
 }
 
@@ -327,31 +313,33 @@
 	struct intel_fbc_work *work =
 		container_of(to_delayed_work(__work),
 			     struct intel_fbc_work, work);
-	struct drm_device *dev = work->crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
+	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
 
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&dev_priv->fbc.lock);
 	if (work == dev_priv->fbc.fbc_work) {
 		/* Double check that we haven't switched fb without cancelling
 		 * the prior work.
 		 */
-		if (work->crtc->primary->fb == work->fb) {
-			dev_priv->display.enable_fbc(work->crtc);
+		if (crtc_fb == work->fb) {
+			dev_priv->fbc.enable_fbc(work->crtc);
 
-			dev_priv->fbc.crtc = to_intel_crtc(work->crtc);
-			dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
-			dev_priv->fbc.y = work->crtc->y;
+			dev_priv->fbc.crtc = work->crtc;
+			dev_priv->fbc.fb_id = crtc_fb->base.id;
+			dev_priv->fbc.y = work->crtc->base.y;
 		}
 
 		dev_priv->fbc.fbc_work = NULL;
 	}
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev_priv->fbc.lock);
 
 	kfree(work);
 }
 
 static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
 {
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+
 	if (dev_priv->fbc.fbc_work == NULL)
 		return;
 
@@ -373,26 +361,24 @@
 	dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_fbc_enable(struct drm_crtc *crtc)
+static void intel_fbc_enable(struct intel_crtc *crtc)
 {
 	struct intel_fbc_work *work;
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
 
-	if (!dev_priv->display.enable_fbc)
-		return;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	intel_fbc_cancel_work(dev_priv);
 
 	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL) {
 		DRM_ERROR("Failed to allocate FBC work structure\n");
-		dev_priv->display.enable_fbc(crtc);
+		dev_priv->fbc.enable_fbc(crtc);
 		return;
 	}
 
 	work->crtc = crtc;
-	work->fb = crtc->primary->fb;
+	work->fb = crtc->base.primary->fb;
 	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
 	dev_priv->fbc.fbc_work = work;
@@ -413,75 +399,274 @@
 	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
 
-/**
- * intel_fbc_disable - disable FBC
- * @dev: the drm_device
- *
- * This function disables FBC.
- */
-void intel_fbc_disable(struct drm_device *dev)
+static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	intel_fbc_cancel_work(dev_priv);
 
-	if (!dev_priv->display.disable_fbc)
-		return;
-
-	dev_priv->display.disable_fbc(dev);
+	dev_priv->fbc.disable_fbc(dev_priv);
 	dev_priv->fbc.crtc = NULL;
 }
 
-static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+/**
+ * intel_fbc_disable - disable FBC
+ * @dev_priv: i915 device instance
+ *
+ * This function disables FBC.
+ */
+void intel_fbc_disable(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+/*
+ * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * @crtc: the CRTC
+ *
+ * This function disables FBC if it's associated with the provided CRTC.
+ */
+void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	if (dev_priv->fbc.crtc == crtc)
+		__intel_fbc_disable(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
+{
+	switch (reason) {
+	case FBC_OK:
+		return "FBC enabled but currently disabled in hardware";
+	case FBC_UNSUPPORTED:
+		return "unsupported by this chipset";
+	case FBC_NO_OUTPUT:
+		return "no output";
+	case FBC_STOLEN_TOO_SMALL:
+		return "not enough stolen memory";
+	case FBC_UNSUPPORTED_MODE:
+		return "mode incompatible with compression";
+	case FBC_MODE_TOO_LARGE:
+		return "mode too large for compression";
+	case FBC_BAD_PLANE:
+		return "FBC unsupported on plane";
+	case FBC_NOT_TILED:
+		return "framebuffer not tiled or fenced";
+	case FBC_MULTIPLE_PIPES:
+		return "more than one pipe active";
+	case FBC_MODULE_PARAM:
+		return "disabled per module param";
+	case FBC_CHIP_DEFAULT:
+		return "disabled per chip default";
+	case FBC_ROTATION:
+		return "rotation unsupported";
+	case FBC_IN_DBG_MASTER:
+		return "Kernel debugger is active";
+	default:
+		MISSING_CASE(reason);
+		return "unknown reason";
+	}
+}
+
+static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
 			      enum no_fbc_reason reason)
 {
 	if (dev_priv->fbc.no_fbc_reason == reason)
-		return false;
+		return;
 
 	dev_priv->fbc.no_fbc_reason = reason;
-	return true;
+	DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
 }
 
 static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
 {
 	struct drm_crtc *crtc = NULL, *tmp_crtc;
 	enum pipe pipe;
-	bool pipe_a_only = false, one_pipe_only = false;
+	bool pipe_a_only = false;
 
 	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
 		pipe_a_only = true;
-	else if (INTEL_INFO(dev_priv)->gen <= 4)
-		one_pipe_only = true;
 
 	for_each_pipe(dev_priv, pipe) {
 		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
 		if (intel_crtc_active(tmp_crtc) &&
-		    to_intel_plane_state(tmp_crtc->primary->state)->visible) {
-			if (one_pipe_only && crtc) {
-				if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
-					DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-				return NULL;
-			}
+		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
 			crtc = tmp_crtc;
-		}
 
 		if (pipe_a_only)
 			break;
 	}
 
-	if (!crtc || crtc->primary->fb == NULL) {
-		if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
-			DRM_DEBUG_KMS("no output, disabling\n");
+	if (!crtc || crtc->primary->fb == NULL)
 		return NULL;
-	}
 
 	return crtc;
 }
 
+static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
+{
+	enum pipe pipe;
+	int n_pipes = 0;
+	struct drm_crtc *crtc;
+
+	if (INTEL_INFO(dev_priv)->gen > 4)
+		return true;
+
+	for_each_pipe(dev_priv, pipe) {
+		crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+		if (intel_crtc_active(crtc) &&
+		    to_intel_plane_state(crtc->primary->state)->visible)
+			n_pipes++;
+	}
+
+	return (n_pipes < 2);
+}
+
+static int find_compression_threshold(struct drm_i915_private *dev_priv,
+				      struct drm_mm_node *node,
+				      int size,
+				      int fb_cpp)
+{
+	int compression_threshold = 1;
+	int ret;
+
+	/* HACK: This code depends on what we will do in *_enable_fbc. If that
+	 * code changes, this code needs to change as well.
+	 *
+	 * The enable_fbc code will attempt to use one of our 2 compression
+	 * thresholds, therefore, in that case, we only have 1 resort.
+	 */
+
+	/* Try to over-allocate to reduce reallocations and fragmentation. */
+	ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+	if (ret == 0)
+		return compression_threshold;
+
+again:
+	/* HW's ability to limit the CFB is 1:4 */
+	if (compression_threshold > 4 ||
+	    (fb_cpp == 2 && compression_threshold == 2))
+		return 0;
+
+	ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
+		return 0;
+	} else if (ret) {
+		compression_threshold <<= 1;
+		goto again;
+	} else {
+		return compression_threshold;
+	}
+}
+
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	struct drm_mm_node *uninitialized_var(compressed_llb);
+	int ret;
+
+	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+					 size, fb_cpp);
+	if (!ret)
+		goto err_llb;
+	else if (ret > 1) {
+		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+	}
+
+	dev_priv->fbc.threshold = ret;
+
+	if (INTEL_INFO(dev_priv)->gen >= 5)
+		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	else if (IS_GM45(dev_priv)) {
+		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+	} else {
+		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
+		if (!compressed_llb)
+			goto err_fb;
+
+		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
+						  4096, 4096);
+		if (ret)
+			goto err_fb;
+
+		dev_priv->fbc.compressed_llb = compressed_llb;
+
+		I915_WRITE(FBC_CFB_BASE,
+			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+		I915_WRITE(FBC_LL_BASE,
+			   dev_priv->mm.stolen_base + compressed_llb->start);
+	}
+
+	dev_priv->fbc.uncompressed_size = size;
+
+	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+		      size);
+
+	return 0;
+
+err_fb:
+	kfree(compressed_llb);
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+err_llb:
+	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+	return -ENOSPC;
+}
+
+static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->fbc.uncompressed_size == 0)
+		return;
+
+	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+
+	if (dev_priv->fbc.compressed_llb) {
+		i915_gem_stolen_remove_node(dev_priv,
+					    dev_priv->fbc.compressed_llb);
+		kfree(dev_priv->fbc.compressed_llb);
+	}
+
+	dev_priv->fbc.uncompressed_size = 0;
+}
+
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_cleanup_cfb(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
+}
+
+static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
+			       int fb_cpp)
+{
+	if (size <= dev_priv->fbc.uncompressed_size)
+		return 0;
+
+	/* Release any current block */
+	__intel_fbc_cleanup_cfb(dev_priv);
+
+	return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
+}
+
 /**
- * intel_fbc_update - enable/disable FBC as needed
- * @dev: the drm_device
+ * __intel_fbc_update - enable/disable FBC as needed, unlocked
+ * @dev_priv: i915 device instance
  *
  * Set up the framebuffer compression hardware at mode set time.  We
  * enable it if possible:
@@ -498,9 +683,8 @@
  *
  * We need to enable/disable FBC on a global basis.
  */
-void intel_fbc_update(struct drm_device *dev)
+static void __intel_fbc_update(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = NULL;
 	struct intel_crtc *intel_crtc;
 	struct drm_framebuffer *fb;
@@ -508,22 +692,19 @@
 	const struct drm_display_mode *adjusted_mode;
 	unsigned int max_width, max_height;
 
-	if (!HAS_FBC(dev))
-		return;
+	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
 
 	/* disable framebuffer compression in vGPU */
-	if (intel_vgpu_active(dev))
+	if (intel_vgpu_active(dev_priv->dev))
 		i915.enable_fbc = 0;
 
 	if (i915.enable_fbc < 0) {
-		if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
-			DRM_DEBUG_KMS("disabled per chip default\n");
+		set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
 		goto out_disable;
 	}
 
 	if (!i915.enable_fbc) {
-		if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
-			DRM_DEBUG_KMS("fbc disabled per module param\n");
+		set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
 		goto out_disable;
 	}
 
@@ -537,8 +718,15 @@
 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
 	 */
 	crtc = intel_fbc_find_crtc(dev_priv);
-	if (!crtc)
+	if (!crtc) {
+		set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
 		goto out_disable;
+	}
+
+	if (!multiple_pipes_ok(dev_priv)) {
+		set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
+		goto out_disable;
+	}
 
 	intel_crtc = to_intel_crtc(crtc);
 	fb = crtc->primary->fb;
@@ -547,16 +735,14 @@
 
 	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
 	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-			DRM_DEBUG_KMS("mode incompatible with compression, "
-				      "disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
 		goto out_disable;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
 		max_width = 4096;
 		max_height = 4096;
-	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
 		max_width = 4096;
 		max_height = 2048;
 	} else {
@@ -565,14 +751,12 @@
 	}
 	if (intel_crtc->config->pipe_src_w > max_width ||
 	    intel_crtc->config->pipe_src_h > max_height) {
-		if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
-			DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
 		goto out_disable;
 	}
-	if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
+	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
 	    intel_crtc->plane != PLANE_A) {
-		if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
-			DRM_DEBUG_KMS("plane not A, disabling compression\n");
+		set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
 		goto out_disable;
 	}
 
@@ -581,25 +765,24 @@
 	 */
 	if (obj->tiling_mode != I915_TILING_X ||
 	    obj->fence_reg == I915_FENCE_REG_NONE) {
-		if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
-			DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+		set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
 		goto out_disable;
 	}
-	if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
+	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
 	    crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
-		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-			DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
+		set_no_fbc_reason(dev_priv, FBC_ROTATION);
 		goto out_disable;
 	}
 
 	/* If the kernel debugger is active, always disable compression */
-	if (in_dbg_master())
+	if (in_dbg_master()) {
+		set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
 		goto out_disable;
+	}
 
-	if (i915_gem_stolen_setup_compression(dev, obj->base.size,
-					      drm_format_plane_cpp(fb->pixel_format, 0))) {
-		if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
-			DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+	if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
+				drm_format_plane_cpp(fb->pixel_format, 0))) {
+		set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
 		goto out_disable;
 	}
 
@@ -613,7 +796,7 @@
 	    dev_priv->fbc.y == crtc->y)
 		return;
 
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv)) {
 		/* We update FBC along two paths, after changing fb/crtc
 		 * configuration (modeswitching) and after page-flipping
 		 * finishes. For the latter, we know that not only did
@@ -638,58 +821,87 @@
 		 * some point. And we wait before enabling FBC anyway.
 		 */
 		DRM_DEBUG_KMS("disabling active FBC for update\n");
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
 	}
 
-	intel_fbc_enable(crtc);
+	intel_fbc_enable(intel_crtc);
 	dev_priv->fbc.no_fbc_reason = FBC_OK;
 	return;
 
 out_disable:
 	/* Multiple disables should be harmless */
-	if (intel_fbc_enabled(dev)) {
+	if (intel_fbc_enabled(dev_priv)) {
 		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
 	}
-	i915_gem_stolen_cleanup_compression(dev);
+	__intel_fbc_cleanup_cfb(dev_priv);
+}
+
+/*
+ * intel_fbc_update - enable/disable FBC as needed
+ * @dev_priv: i915 device instance
+ *
+ * This function reevaluates the overall state and enables or disables FBC.
+ */
+void intel_fbc_update(struct drm_i915_private *dev_priv)
+{
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+	__intel_fbc_update(dev_priv);
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned int frontbuffer_bits,
 			  enum fb_op_origin origin)
 {
-	struct drm_device *dev = dev_priv->dev;
 	unsigned int fbc_bits;
 
+	if (!dev_priv->fbc.enable_fbc)
+		return;
+
 	if (origin == ORIGIN_GTT)
 		return;
 
+	mutex_lock(&dev_priv->fbc.lock);
+
 	if (dev_priv->fbc.enabled)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
 	else if (dev_priv->fbc.fbc_work)
 		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
-			to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
+					dev_priv->fbc.fbc_work->crtc->pipe);
 	else
 		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
 
 	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
 
 	if (dev_priv->fbc.busy_bits)
-		intel_fbc_disable(dev);
+		__intel_fbc_disable(dev_priv);
+
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
-		     unsigned int frontbuffer_bits)
+		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_device *dev = dev_priv->dev;
-
-	if (!dev_priv->fbc.busy_bits)
+	if (!dev_priv->fbc.enable_fbc)
 		return;
 
+	if (origin == ORIGIN_GTT)
+		return;
+
+	mutex_lock(&dev_priv->fbc.lock);
+
 	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
 
-	if (!dev_priv->fbc.busy_bits)
-		intel_fbc_update(dev);
+	if (!dev_priv->fbc.busy_bits) {
+		__intel_fbc_disable(dev_priv);
+		__intel_fbc_update(dev_priv);
+	}
+
+	mutex_unlock(&dev_priv->fbc.lock);
 }
 
 /**
@@ -702,6 +914,8 @@
 {
 	enum pipe pipe;
 
+	mutex_init(&dev_priv->fbc.lock);
+
 	if (!HAS_FBC(dev_priv)) {
 		dev_priv->fbc.enabled = false;
 		dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
@@ -717,25 +931,25 @@
 	}
 
 	if (INTEL_INFO(dev_priv)->gen >= 7) {
-		dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->display.enable_fbc = gen7_fbc_enable;
-		dev_priv->display.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
 	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
-		dev_priv->display.fbc_enabled = ilk_fbc_enabled;
-		dev_priv->display.enable_fbc = ilk_fbc_enable;
-		dev_priv->display.disable_fbc = ilk_fbc_disable;
+		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
+		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
+		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
 	} else if (IS_GM45(dev_priv)) {
-		dev_priv->display.fbc_enabled = g4x_fbc_enabled;
-		dev_priv->display.enable_fbc = g4x_fbc_enable;
-		dev_priv->display.disable_fbc = g4x_fbc_disable;
+		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
+		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
+		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
 	} else {
-		dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
-		dev_priv->display.enable_fbc = i8xx_fbc_enable;
-		dev_priv->display.disable_fbc = i8xx_fbc_disable;
+		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
+		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
+		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
 
 		/* This value was pulled out of someone's hat */
 		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
 	}
 
-	dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
+	dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
 }
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6372cfc..8c6a6fa 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -55,16 +55,8 @@
 	ret = drm_fb_helper_set_par(info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
-							true);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -81,15 +73,8 @@
 	ret = drm_fb_helper_blank(blank, info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -107,15 +92,8 @@
 	ret = drm_fb_helper_pan_display(var, info);
 
 	if (ret == 0) {
-		/*
-		 * FIXME: fbdev presumes that all callbacks also work from
-		 * atomic contexts and relies on that for emergency oops
-		 * printing. KMS totally doesn't do that and the locking here is
-		 * by far not the only place this goes wrong.  Ignore this for
-		 * now until we solve this for real.
-		 */
 		mutex_lock(&fb_helper->dev->struct_mutex);
-		intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
 		mutex_unlock(&fb_helper->dev->struct_mutex);
 	}
 
@@ -126,9 +104,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = intel_fbdev_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = intel_fbdev_pan_display,
 	.fb_blank = intel_fbdev_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -177,7 +155,7 @@
 	}
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL);
+	ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL, NULL);
 	if (ret) {
 		DRM_ERROR("failed to pin obj: %d\n", ret);
 		goto out_fb;
@@ -237,9 +215,9 @@
 	obj = intel_fb->obj;
 	size = obj->base.size;
 
-	info = framebuffer_alloc(0, &dev->pdev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unpin;
 	}
 
@@ -248,24 +226,13 @@
 	fb = &ifbdev->fb->base;
 
 	ifbdev->helper.fb = fb;
-	ifbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "inteldrmfb");
 
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
@@ -277,7 +244,7 @@
 			   size);
 	if (!info->screen_base) {
 		ret = -ENOSPC;
-		goto out_unpin;
+		goto out_destroy_fbi;
 	}
 	info->screen_size = size;
 
@@ -304,6 +271,8 @@
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unpin:
 	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
@@ -484,18 +453,13 @@
 			 * IMPORTANT: We want to use the adjusted mode (i.e.
 			 * after the panel fitter upscaling) as the initial
 			 * config, not the input mode, which is what crtc->mode
-			 * usually contains. But since our current fastboot
+			 * usually contains. But since our current
 			 * code puts a mode derived from the post-pfit timings
-			 * into crtc->mode this works out correctly. We don't
-			 * use hwmode anywhere right now, so use it for this
-			 * since the fb helper layer wants a pointer to
-			 * something we own.
+			 * into crtc->mode this works out correctly.
 			 */
 			DRM_DEBUG_KMS("looking for current mode on connector %s\n",
 				      connector->name);
-			intel_mode_from_pipe_config(&encoder->crtc->hwmode,
-						    to_intel_crtc(encoder->crtc)->config);
-			modes[i] = &encoder->crtc->hwmode;
+			modes[i] = &encoder->crtc->mode;
 		}
 		crtcs[i] = new_crtc;
 
@@ -550,16 +514,9 @@
 static void intel_fbdev_destroy(struct drm_device *dev,
 				struct intel_fbdev *ifbdev)
 {
-	if (ifbdev->helper.fbdev) {
-		struct fb_info *info = ifbdev->helper.fbdev;
 
-		unregister_framebuffer(info);
-		iounmap(info->screen_base);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&ifbdev->helper);
+	drm_fb_helper_release_fbi(&ifbdev->helper);
 
 	drm_fb_helper_fini(&ifbdev->helper);
 
@@ -582,7 +539,6 @@
 	struct intel_framebuffer *fb = NULL;
 	struct drm_crtc *crtc;
 	struct intel_crtc *intel_crtc;
-	struct intel_initial_plane_config *plane_config = NULL;
 	unsigned int max_size = 0;
 
 	if (!i915.fastboot)
@@ -590,20 +546,21 @@
 
 	/* Find the largest fb */
 	for_each_crtc(dev, crtc) {
+		struct drm_i915_gem_object *obj =
+			intel_fb_obj(crtc->primary->state->fb);
 		intel_crtc = to_intel_crtc(crtc);
 
-		if (!intel_crtc->active || !crtc->primary->fb) {
+		if (!intel_crtc->active || !obj) {
 			DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
 				      pipe_name(intel_crtc->pipe));
 			continue;
 		}
 
-		if (intel_crtc->plane_config.size > max_size) {
+		if (obj->base.size > max_size) {
 			DRM_DEBUG_KMS("found possible fb from plane %c\n",
 				      pipe_name(intel_crtc->pipe));
-			plane_config = &intel_crtc->plane_config;
-			fb = to_intel_framebuffer(crtc->primary->fb);
-			max_size = plane_config->size;
+			fb = to_intel_framebuffer(crtc->primary->state->fb);
+			max_size = obj->base.size;
 		}
 	}
 
@@ -638,7 +595,6 @@
 			DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
 				      pipe_name(intel_crtc->pipe),
 				      cur_size, fb->base.pitches[0]);
-			plane_config = NULL;
 			fb = NULL;
 			break;
 		}
@@ -659,7 +615,6 @@
 			DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
 				      pipe_name(intel_crtc->pipe),
 				      cur_size, max_size);
-			plane_config = NULL;
 			fb = NULL;
 			break;
 		}
@@ -810,7 +765,7 @@
 	if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
 		memset_io(info->screen_base, 0, info->screen_size);
 
-	fb_set_suspend(info, state);
+	drm_fb_helper_set_suspend(&ifbdev->helper, state);
 	console_unlock();
 }
 
@@ -825,11 +780,20 @@
 {
 	int ret;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct drm_fb_helper *fb_helper;
 
-	if (!dev_priv->fbdev)
+	if (!ifbdev)
 		return;
 
-	ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
-	if (ret)
+	fb_helper = &ifbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret) {
 		DRM_DEBUG("failed to restore crtc mode\n");
+	} else {
+		mutex_lock(&fb_helper->dev->struct_mutex);
+		intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
+		mutex_unlock(&fb_helper->dev->struct_mutex);
+	}
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 57095f5..ac85357 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -65,84 +65,29 @@
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static void intel_increase_pllclock(struct drm_device *dev,
-				    enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int dpll_reg = DPLL(pipe);
-	int dpll;
-
-	if (!HAS_GMCH_DISPLAY(dev))
-		return;
-
-	if (!dev_priv->lvds_downclock_avail)
-		return;
-
-	dpll = I915_READ(dpll_reg);
-	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
-		DRM_DEBUG_DRIVER("upclocking LVDS\n");
-
-		assert_panel_unlocked(dev_priv, pipe);
-
-		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
-		I915_WRITE(dpll_reg, dpll);
-		intel_wait_for_vblank(dev, pipe);
-
-		dpll = I915_READ(dpll_reg);
-		if (dpll & DISPLAY_RATE_SELECT_FPA1)
-			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-	}
-}
-
-/**
- * intel_mark_fb_busy - mark given planes as busy
- * @dev: DRM device
- * @frontbuffer_bits: bits for the affected planes
- * @ring: optional ring for asynchronous commands
- *
- * This function gets called every time the screen contents change. It can be
- * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
- */
-static void intel_mark_fb_busy(struct drm_device *dev,
-			       unsigned frontbuffer_bits,
-			       struct intel_engine_cs *ring)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	enum pipe pipe;
-
-	for_each_pipe(dev_priv, pipe) {
-		if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
-			continue;
-
-		intel_increase_pllclock(dev, pipe);
-	}
-}
-
 /**
  * intel_fb_obj_invalidate - invalidate frontbuffer object
  * @obj: GEM object to invalidate
- * @ring: set for asynchronous rendering
  * @origin: which operation caused the invalidation
  *
  * This function gets called every time rendering on the given object starts and
  * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
 void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     struct intel_engine_cs *ring,
 			     enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 	if (!obj->frontbuffer_bits)
 		return;
 
-	if (ring) {
+	if (origin == ORIGIN_CS) {
 		mutex_lock(&dev_priv->fb_tracking.lock);
 		dev_priv->fb_tracking.busy_bits
 			|= obj->frontbuffer_bits;
@@ -151,8 +96,6 @@
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
-
 	intel_psr_invalidate(dev, obj->frontbuffer_bits);
 	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
 	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
@@ -162,6 +105,7 @@
  * intel_frontbuffer_flush - flush frontbuffer
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given planes has
  * completed and frontbuffer caching can be started again. Flushes will get
@@ -169,37 +113,40 @@
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flush(struct drm_device *dev,
-			     unsigned frontbuffer_bits)
+static void intel_frontbuffer_flush(struct drm_device *dev,
+				    unsigned frontbuffer_bits,
+				    enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Delay flushing when rings are still busy.*/
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+	if (!frontbuffer_bits)
+		return;
 
 	intel_edp_drrs_flush(dev, frontbuffer_bits);
-	intel_psr_flush(dev, frontbuffer_bits);
-	intel_fbc_flush(dev_priv, frontbuffer_bits);
+	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_fb_obj_flush - flush frontbuffer object
  * @obj: GEM object to flush
  * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
  *
  * This function gets called every time rendering on the given object has
  * completed and frontbuffer caching can be started again. If @retire is true
  * then any delayed flushes will be unblocked.
  */
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire)
+			bool retire, enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	unsigned frontbuffer_bits;
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -218,7 +165,7 @@
 		mutex_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
+	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -236,7 +183,7 @@
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
@@ -244,7 +191,7 @@
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_psr_single_frame_update(dev);
+	intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
 
 /**
@@ -260,7 +207,7 @@
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	mutex_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
@@ -268,5 +215,29 @@
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
 	mutex_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits);
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+}
+
+/**
+ * intel_frontbuffer_flip - synchronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+
+	mutex_lock(&dev_priv->fb_tracking.lock);
+	/* Remove stale busy bits due to the old buffer. */
+	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+	mutex_unlock(&dev_priv->fb_tracking.lock);
+
+	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
new file mode 100644
index 0000000..18d7f20
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef _INTEL_GUC_FWIF_H
+#define _INTEL_GUC_FWIF_H
+
+/*
+ * This file is partially autogenerated, although currently with some manual
+ * fixups afterwards. In future, it should be entirely autogenerated, in order
+ * to ensure that the definitions herein remain in sync with those used by the
+ * GuC's own firmware.
+ *
+ * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
+ */
+
+#define GFXCORE_FAMILY_GEN8		11
+#define GFXCORE_FAMILY_GEN9		12
+#define GFXCORE_FAMILY_FORCE_ULONG	0x7fffffff
+
+#define GUC_CTX_PRIORITY_CRITICAL	0
+#define GUC_CTX_PRIORITY_HIGH		1
+#define GUC_CTX_PRIORITY_NORMAL		2
+#define GUC_CTX_PRIORITY_LOW		3
+
+#define GUC_MAX_GPU_CONTEXTS		1024
+#define	GUC_INVALID_CTX_ID		(GUC_MAX_GPU_CONTEXTS + 1)
+
+/* Work queue item header definitions */
+#define WQ_STATUS_ACTIVE		1
+#define WQ_STATUS_SUSPENDED		2
+#define WQ_STATUS_CMD_ERROR		3
+#define WQ_STATUS_ENGINE_ID_NOT_USED	4
+#define WQ_STATUS_SUSPENDED_FROM_RESET	5
+#define WQ_TYPE_SHIFT			0
+#define   WQ_TYPE_BATCH_BUF		(0x1 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_PSEUDO		(0x2 << WQ_TYPE_SHIFT)
+#define   WQ_TYPE_INORDER		(0x3 << WQ_TYPE_SHIFT)
+#define WQ_TARGET_SHIFT			10
+#define WQ_LEN_SHIFT			16
+#define WQ_NO_WCFLUSH_WAIT		(1 << 27)
+#define WQ_PRESENT_WORKLOAD		(1 << 28)
+#define WQ_WORKLOAD_SHIFT		29
+#define   WQ_WORKLOAD_GENERAL		(0 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_GPGPU		(1 << WQ_WORKLOAD_SHIFT)
+#define   WQ_WORKLOAD_TOUCH		(2 << WQ_WORKLOAD_SHIFT)
+
+#define WQ_RING_TAIL_SHIFT		20
+#define WQ_RING_TAIL_MASK		(0x7FF << WQ_RING_TAIL_SHIFT)
+
+#define GUC_DOORBELL_ENABLED		1
+#define GUC_DOORBELL_DISABLED		0
+
+#define GUC_CTX_DESC_ATTR_ACTIVE	(1 << 0)
+#define GUC_CTX_DESC_ATTR_PENDING_DB	(1 << 1)
+#define GUC_CTX_DESC_ATTR_KERNEL	(1 << 2)
+#define GUC_CTX_DESC_ATTR_PREEMPT	(1 << 3)
+#define GUC_CTX_DESC_ATTR_RESET		(1 << 4)
+#define GUC_CTX_DESC_ATTR_WQLOCKED	(1 << 5)
+#define GUC_CTX_DESC_ATTR_PCH		(1 << 6)
+
+/* The guc control data is 10 DWORDs */
+#define GUC_CTL_CTXINFO			0
+#define   GUC_CTL_CTXNUM_IN16_SHIFT	0
+#define   GUC_CTL_BASE_ADDR_SHIFT	12
+#define GUC_CTL_ARAT_HIGH		1
+#define GUC_CTL_ARAT_LOW		2
+#define GUC_CTL_DEVICE_INFO		3
+#define   GUC_CTL_GTTYPE_SHIFT		0
+#define   GUC_CTL_COREFAMILY_SHIFT	7
+#define GUC_CTL_LOG_PARAMS		4
+#define   GUC_LOG_VALID			(1 << 0)
+#define   GUC_LOG_NOTIFY_ON_HALF_FULL	(1 << 1)
+#define   GUC_LOG_ALLOC_IN_MEGABYTE	(1 << 3)
+#define   GUC_LOG_CRASH_PAGES		1
+#define   GUC_LOG_CRASH_SHIFT		4
+#define   GUC_LOG_DPC_PAGES		3
+#define   GUC_LOG_DPC_SHIFT		6
+#define   GUC_LOG_ISR_PAGES		3
+#define   GUC_LOG_ISR_SHIFT		9
+#define   GUC_LOG_BUF_ADDR_SHIFT	12
+#define GUC_CTL_PAGE_FAULT_CONTROL	5
+#define GUC_CTL_WA			6
+#define   GUC_CTL_WA_UK_BY_DRIVER	(1 << 3)
+#define GUC_CTL_FEATURE			7
+#define   GUC_CTL_VCS2_ENABLED		(1 << 0)
+#define   GUC_CTL_KERNEL_SUBMISSIONS	(1 << 1)
+#define   GUC_CTL_FEATURE2		(1 << 2)
+#define   GUC_CTL_POWER_GATING		(1 << 3)
+#define   GUC_CTL_DISABLE_SCHEDULER	(1 << 4)
+#define   GUC_CTL_PREEMPTION_LOG	(1 << 5)
+#define   GUC_CTL_ENABLE_SLPC		(1 << 7)
+#define GUC_CTL_DEBUG			8
+#define   GUC_LOG_VERBOSITY_SHIFT	0
+#define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_MED		(1 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_HIGH	(2 << GUC_LOG_VERBOSITY_SHIFT)
+#define   GUC_LOG_VERBOSITY_ULTRA	(3 << GUC_LOG_VERBOSITY_SHIFT)
+/* Verbosity range-check limits, without the shift */
+#define	  GUC_LOG_VERBOSITY_MIN		0
+#define	  GUC_LOG_VERBOSITY_MAX		3
+
+#define GUC_CTL_MAX_DWORDS		(GUC_CTL_DEBUG + 1)
+
+struct guc_doorbell_info {
+	u32 db_status;
+	u32 cookie;
+	u32 reserved[14];
+} __packed;
+
+union guc_doorbell_qw {
+	struct {
+		u32 db_status;
+		u32 cookie;
+	};
+	u64 value_qw;
+} __packed;
+
+#define GUC_MAX_DOORBELLS		256
+#define GUC_INVALID_DOORBELL_ID		(GUC_MAX_DOORBELLS)
+
+#define GUC_DB_SIZE			(PAGE_SIZE)
+#define GUC_WQ_SIZE			(PAGE_SIZE * 2)
+
+/* Work item for submitting workloads into work queue of GuC. */
+struct guc_wq_item {
+	u32 header;
+	u32 context_desc;
+	u32 ring_tail;
+	u32 fence_id;
+} __packed;
+
+struct guc_process_desc {
+	u32 context_id;
+	u64 db_base_addr;
+	u32 head;
+	u32 tail;
+	u32 error_offset;
+	u64 wq_base_addr;
+	u32 wq_size_bytes;
+	u32 wq_status;
+	u32 engine_presence;
+	u32 priority;
+	u32 reserved[30];
+} __packed;
+
+/* engine id and context id is packed into guc_execlist_context.context_id*/
+#define GUC_ELC_CTXID_OFFSET		0
+#define GUC_ELC_ENGINE_OFFSET		29
+
+/* The execlist context including software and HW information */
+struct guc_execlist_context {
+	u32 context_desc;
+	u32 context_id;
+	u32 ring_status;
+	u32 ring_lcra;
+	u32 ring_begin;
+	u32 ring_end;
+	u32 ring_next_free_location;
+	u32 ring_current_tail_pointer_value;
+	u8 engine_state_submit_value;
+	u8 engine_state_wait_value;
+	u16 pagefault_count;
+	u16 engine_submit_queue_count;
+} __packed;
+
+/*Context descriptor for communicating between uKernel and Driver*/
+struct guc_context_desc {
+	u32 sched_common_area;
+	u32 context_id;
+	u32 pas_id;
+	u8 engines_used;
+	u64 db_trigger_cpu;
+	u32 db_trigger_uk;
+	u64 db_trigger_phy;
+	u16 db_id;
+
+	struct guc_execlist_context lrc[I915_NUM_RINGS];
+
+	u8 attribute;
+
+	u32 priority;
+
+	u32 wq_sampled_tail_offset;
+	u32 wq_total_submit_enqueues;
+
+	u32 process_desc;
+	u32 wq_addr;
+	u32 wq_size;
+
+	u32 engine_presence;
+
+	u32 reserved0[1];
+	u64 reserved1[1];
+
+	u64 desc_private;
+} __packed;
+
+/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
+enum host2guc_action {
+	HOST2GUC_ACTION_DEFAULT = 0x0,
+	HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+	HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+	HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+	HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
+	HOST2GUC_ACTION_LIMIT
+};
+
+/*
+ * The GuC sends its response to a command by overwriting the
+ * command in SS0. The response is distinguishable from a command
+ * by the fact that all the MASK bits are set. The remaining bits
+ * give more detail.
+ */
+#define	GUC2HOST_RESPONSE_MASK		((u32)0xF0000000)
+#define	GUC2HOST_IS_RESPONSE(x) 	((u32)(x) >= GUC2HOST_RESPONSE_MASK)
+#define	GUC2HOST_STATUS(x)		(GUC2HOST_RESPONSE_MASK | (x))
+
+/* GUC will return status back to SOFT_SCRATCH_O_REG */
+enum guc2host_status {
+	GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
+	GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
+	GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
+	GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+};
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e97731a..dcd336b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -174,10 +174,14 @@
 	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
 	u32 val = I915_READ(VIDEO_DIP_CTL);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 }
 
 static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -227,10 +231,15 @@
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -282,7 +291,12 @@
 	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -332,10 +346,15 @@
 	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
 	u32 val = I915_READ(reg);
 
-	if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
-		return val & VIDEO_DIP_ENABLE;
+	if ((val & VIDEO_DIP_ENABLE) == 0)
+		return false;
 
-	return false;
+	if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
+		return false;
+
+	return val & (VIDEO_DIP_ENABLE_AVI |
+		      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		      VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 }
 
 static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -383,8 +402,9 @@
 	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
 	u32 val = I915_READ(ctl_reg);
 
-	return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
-		      VIDEO_DIP_ENABLE_VS_HSW);
+	return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		      VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		      VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
 }
 
 /*
@@ -514,7 +534,13 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		if (port != (val & VIDEO_DIP_PORT_MASK)) {
+			DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
+		}
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
@@ -522,16 +548,17 @@
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
 		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
+			DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
+				      (val & VIDEO_DIP_PORT_MASK) >> 29);
+			return;
 		}
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~VIDEO_DIP_ENABLE_VENDOR;
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -541,6 +568,97 @@
 	intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
 }
 
+static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+
+	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+	/*
+	 * HDMI cloning is only supported on g4x which doesn't
+	 * support deep color or GCP infoframes anyway so no
+	 * need to worry about multiple HDMI sinks here.
+	 */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			return connector->display_info.bpc > 8;
+
+	return false;
+}
+
+/*
+ * Determine if default_phase=1 can be indicated in the GCP infoframe.
+ *
+ * From HDMI specification 1.4a:
+ * - The first pixel of each Video Data Period shall always have a pixel packing phase of 0
+ * - The first pixel following each Video Data Period shall have a pixel packing phase of 0
+ * - The PP bits shall be constant for all GCPs and will be equal to the last packing phase
+ * - The first pixel following every transition of HSYNC or VSYNC shall have a pixel packing
+ *   phase of 0
+ */
+static bool gcp_default_phase_possible(int pipe_bpp,
+				       const struct drm_display_mode *mode)
+{
+	unsigned int pixels_per_group;
+
+	switch (pipe_bpp) {
+	case 30:
+		/* 4 pixels in 5 clocks */
+		pixels_per_group = 4;
+		break;
+	case 36:
+		/* 2 pixels in 3 clocks */
+		pixels_per_group = 2;
+		break;
+	case 48:
+		/* 1 pixel in 2 clocks */
+		pixels_per_group = 1;
+		break;
+	default:
+		/* phase information not relevant for 8bpc */
+		return false;
+	}
+
+	return mode->crtc_hdisplay % pixels_per_group == 0 &&
+		mode->crtc_htotal % pixels_per_group == 0 &&
+		mode->crtc_hblank_start % pixels_per_group == 0 &&
+		mode->crtc_hblank_end % pixels_per_group == 0 &&
+		mode->crtc_hsync_start % pixels_per_group == 0 &&
+		mode->crtc_hsync_end % pixels_per_group == 0 &&
+		((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0 ||
+		 mode->crtc_htotal/2 % pixels_per_group == 0);
+}
+
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+	u32 reg, val = 0;
+
+	if (HAS_DDI(dev_priv))
+		reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+	else if (IS_VALLEYVIEW(dev_priv))
+		reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+	else if (HAS_PCH_SPLIT(dev_priv->dev))
+		reg = TVIDEO_DIP_GCP(crtc->pipe);
+	else
+		return false;
+
+	/* Indicate color depth whenever the sink supports deep color */
+	if (hdmi_sink_is_deep_color(encoder))
+		val |= GCP_COLOR_INDICATION;
+
+	/* Enable default_phase whenever the display mode is suitably aligned */
+	if (gcp_default_phase_possible(crtc->config->pipe_bpp,
+				       &crtc->config->base.adjusted_mode))
+		val |= GCP_DEFAULT_PHASE_ENABLE;
+
+	I915_WRITE(reg, val);
+
+	return val != 0;
+}
+
 static void ibx_set_infoframes(struct drm_encoder *encoder,
 			       bool enable,
 			       struct drm_display_mode *adjusted_mode)
@@ -561,25 +679,29 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
-		}
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-		 VIDEO_DIP_ENABLE_GCP);
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -607,7 +729,9 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
@@ -616,7 +740,10 @@
 	/* Set both together, unset both together: see the spec. */
 	val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
 	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
-		 VIDEO_DIP_ENABLE_GCP);
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -646,25 +773,29 @@
 	if (!enable) {
 		if (!(val & VIDEO_DIP_ENABLE))
 			return;
-		val &= ~VIDEO_DIP_ENABLE;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
+			 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+			 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
 		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
 	if (port != (val & VIDEO_DIP_PORT_MASK)) {
-		if (val & VIDEO_DIP_ENABLE) {
-			val &= ~VIDEO_DIP_ENABLE;
-			I915_WRITE(reg, val);
-			POSTING_READ(reg);
-		}
+		WARN(val & VIDEO_DIP_ENABLE,
+		     "DIP already enabled on port %c\n",
+		     (val & VIDEO_DIP_PORT_MASK) >> 29);
 		val &= ~VIDEO_DIP_PORT_MASK;
 		val |= port;
 	}
 
 	val |= VIDEO_DIP_ENABLE;
-	val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR |
-		 VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_GCP);
+	val &= ~(VIDEO_DIP_ENABLE_AVI |
+		 VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
+
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -686,14 +817,18 @@
 
 	assert_hdmi_port_disabled(intel_hdmi);
 
+	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+		 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+		 VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+
 	if (!enable) {
-		I915_WRITE(reg, 0);
+		I915_WRITE(reg, val);
 		POSTING_READ(reg);
 		return;
 	}
 
-	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
-		 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+	if (intel_hdmi_set_gcp_infoframe(encoder))
+		val |= VIDEO_DIP_ENABLE_GCP_HSW;
 
 	I915_WRITE(reg, val);
 	POSTING_READ(reg);
@@ -808,58 +943,146 @@
 	else
 		dotclock = pipe_config->port_clock;
 
+	if (pipe_config->pixel_multiplier)
+		dotclock /= pipe_config->pixel_multiplier;
+
 	if (HAS_PCH_SPLIT(dev_priv->dev))
 		ironlake_check_encoder_dotclock(pipe_config, dotclock);
 
 	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 }
 
-static void intel_enable_hdmi(struct intel_encoder *encoder)
+static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
+{
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+	WARN_ON(!crtc->config->has_hdmi_sink);
+	DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+			 pipe_name(crtc->pipe));
+	intel_audio_codec_enable(encoder);
+}
+
+static void g4x_enable_hdmi(struct intel_encoder *encoder)
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	u32 temp;
-	u32 enable_bits = SDVO_ENABLE;
-
-	if (intel_crtc->config->has_audio)
-		enable_bits |= SDVO_AUDIO_ENABLE;
 
 	temp = I915_READ(intel_hdmi->hdmi_reg);
 
-	/* HW workaround for IBX, we need to move the port to transcoder A
-	 * before disabling it, so restore the transcoder select bit here. */
-	if (HAS_PCH_IBX(dev))
-		enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
-
-	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
-	 * we do this anyway which shows more stable in testing.
-	 */
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
-		POSTING_READ(intel_hdmi->hdmi_reg);
-	}
-
-	temp |= enable_bits;
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
 
 	I915_WRITE(intel_hdmi->hdmi_reg, temp);
 	POSTING_READ(intel_hdmi->hdmi_reg);
 
-	/* HW workaround, need to write this twice for issue that may result
-	 * in first write getting masked.
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void ibx_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * HW workaround, need to write this twice for issue
+	 * that may result in first write getting masked.
 	 */
-	if (HAS_PCH_SPLIT(dev)) {
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/*
+	 * HW workaround, need to toggle enable bit off and on
+	 * for 12bpc with pixel repeat.
+	 *
+	 * FIXME: BSpec says this should be done at the end of
+	 * of the modeset sequence, so not sure if this isn't too soon.
+	 */
+	if (crtc->config->pipe_bpp > 24 &&
+	    crtc->config->pixel_multiplier > 1) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		/*
+		 * HW workaround, need to write this twice for issue
+		 * that may result in first write getting masked.
+		 */
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
 
-	if (intel_crtc->config->has_audio) {
-		WARN_ON(!intel_crtc->config->has_hdmi_sink);
-		DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
-				 pipe_name(intel_crtc->pipe));
-		intel_audio_codec_enable(encoder);
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
+}
+
+static void cpt_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	enum pipe pipe = crtc->pipe;
+	u32 temp;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	temp |= SDVO_ENABLE;
+	if (crtc->config->has_audio)
+		temp |= SDVO_AUDIO_ENABLE;
+
+	/*
+	 * WaEnableHDMI8bpcBefore12bpc:snb,ivb
+	 *
+	 * The procedure for 12bpc is as follows:
+	 * 1. disable HDMI clock gating
+	 * 2. enable HDMI with 8bpc
+	 * 3. enable HDMI with 12bpc
+	 * 4. enable HDMI clock gating
+	 */
+
+	if (crtc->config->pipe_bpp > 24) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) |
+			   TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= SDVO_COLOR_FORMAT_8bpc;
 	}
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	if (crtc->config->pipe_bpp > 24) {
+		temp &= ~SDVO_COLOR_FORMAT_MASK;
+		temp |= HDMI_COLOR_FORMAT_12bpc;
+
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   I915_READ(TRANS_CHICKEN1(pipe)) &
+			   ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+	}
+
+	if (crtc->config->has_audio)
+		intel_enable_hdmi_audio(encoder);
 }
 
 static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -901,6 +1124,8 @@
 		I915_WRITE(intel_hdmi->hdmi_reg, temp);
 		POSTING_READ(intel_hdmi->hdmi_reg);
 	}
+
+	intel_hdmi->set_infoframes(&encoder->base, false, NULL);
 }
 
 static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -926,7 +1151,7 @@
 	intel_disable_hdmi(encoder);
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
 	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
@@ -939,24 +1164,51 @@
 }
 
 static enum drm_mode_status
+hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+		      int clock, bool respect_dvi_limit)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+	if (clock < 25000)
+		return MODE_CLOCK_LOW;
+	if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+		return MODE_CLOCK_HIGH;
+
+	/* BXT DPLL can't generate 223-240 MHz */
+	if (IS_BROXTON(dev) && clock > 223333 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	/* CHV DPLL can't generate 216-240 MHz */
+	if (IS_CHERRYVIEW(dev) && clock > 216000 && clock < 240000)
+		return MODE_CLOCK_RANGE;
+
+	return MODE_OK;
+}
+
+static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
 {
-	int clock = mode->clock;
-
-	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
-		clock *= 2;
-
-	if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
-					 true))
-		return MODE_CLOCK_HIGH;
-	if (clock < 20000)
-		return MODE_CLOCK_LOW;
+	struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+	struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+	enum drm_mode_status status;
+	int clock;
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 
-	return MODE_OK;
+	clock = mode->clock;
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		clock *= 2;
+
+	/* check if we can do 8bpc */
+	status = hdmi_port_clock_valid(hdmi, clock, true);
+
+	/* if we can't do 8bpc we may still be able to do 12bpc */
+	if (!HAS_GMCH_DISPLAY(dev) && status != MODE_OK)
+		status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true);
+
+	return status;
 }
 
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
@@ -997,8 +1249,8 @@
 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 	struct drm_device *dev = encoder->base.dev;
 	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
-	int clock_12bpc = pipe_config->base.adjusted_mode.crtc_clock * 3 / 2;
-	int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
+	int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+	int clock_12bpc = clock_8bpc * 3 / 2;
 	int desired_bpp;
 
 	pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
@@ -1017,6 +1269,8 @@
 
 	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
 		pipe_config->pixel_multiplier = 2;
+		clock_8bpc *= 2;
+		clock_12bpc *= 2;
 	}
 
 	if (intel_hdmi->color_range)
@@ -1035,9 +1289,8 @@
 	 * within limits.
 	 */
 	if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
-	    clock_12bpc <= portclock_limit &&
-	    hdmi_12bpc_possible(pipe_config) &&
-	    0 /* FIXME 12bpc support totally broken */) {
+	    hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+	    hdmi_12bpc_possible(pipe_config)) {
 		DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
 		desired_bpp = 12*3;
 
@@ -1046,6 +1299,8 @@
 	} else {
 		DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
 		desired_bpp = 8*3;
+
+		pipe_config->port_clock = clock_8bpc;
 	}
 
 	if (!pipe_config->bw_constrained) {
@@ -1053,8 +1308,9 @@
 		pipe_config->pipe_bpp = desired_bpp;
 	}
 
-	if (adjusted_mode->crtc_clock > portclock_limit) {
-		DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
+	if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
+				  false) != MODE_OK) {
+		DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
 		return false;
 	}
 
@@ -1323,7 +1579,7 @@
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	intel_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1640,7 +1896,7 @@
 				   intel_crtc->config->has_hdmi_sink,
 				   adjusted_mode);
 
-	intel_enable_hdmi(encoder);
+	g4x_enable_hdmi(encoder);
 
 	vlv_wait_port_ready(dev_priv, dport, 0x0);
 }
@@ -1653,7 +1909,7 @@
 }
 
 static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_hdmi_detect,
 	.force = intel_hdmi_force,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1702,6 +1958,7 @@
 	struct drm_device *dev = intel_encoder->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	enum port port = intel_dig_port->port;
+	uint8_t alternate_ddc_pin;
 
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
@@ -1735,6 +1992,26 @@
 			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
 		intel_encoder->hpd_pin = HPD_PORT_D;
 		break;
+	case PORT_E:
+		/* On SKL PORT E doesn't have seperate GMBUS pin
+		 *  We rely on VBT to set a proper alternate GMBUS pin. */
+		alternate_ddc_pin =
+			dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
+		switch (alternate_ddc_pin) {
+		case DDC_PIN_B:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+			break;
+		case DDC_PIN_C:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+			break;
+		case DDC_PIN_D:
+			intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+			break;
+		default:
+			MISSING_CASE(alternate_ddc_pin);
+		}
+		intel_encoder->hpd_pin = HPD_PORT_E;
+		break;
 	case PORT_A:
 		intel_encoder->hpd_pin = HPD_PORT_A;
 		/* Internal port only for eDP. */
@@ -1827,7 +2104,12 @@
 		intel_encoder->post_disable = vlv_hdmi_post_disable;
 	} else {
 		intel_encoder->pre_enable = intel_hdmi_pre_enable;
-		intel_encoder->enable = intel_enable_hdmi;
+		if (HAS_PCH_CPT(dev))
+			intel_encoder->enable = cpt_enable_hdmi;
+		else if (HAS_PCH_IBX(dev))
+			intel_encoder->enable = ibx_enable_hdmi;
+		else
+			intel_encoder->enable = g4x_enable_hdmi;
 	}
 
 	intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
new file mode 100644
index 0000000..53c0173
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/**
+ * DOC: Hotplug
+ *
+ * Simply put, hotplug occurs when a display is connected to or disconnected
+ * from the system. However, there may be adapters and docking stations and
+ * Display Port short pulses and MST devices involved, complicating matters.
+ *
+ * Hotplug in i915 is handled in many different levels of abstraction.
+ *
+ * The platform dependent interrupt handling code in i915_irq.c enables,
+ * disables, and does preliminary handling of the interrupts. The interrupt
+ * handlers gather the hotplug detect (HPD) information from relevant registers
+ * into a platform independent mask of hotplug pins that have fired.
+ *
+ * The platform independent interrupt handler intel_hpd_irq_handler() in
+ * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
+ * further processing to appropriate bottom halves (Display Port specific and
+ * regular hotplug).
+ *
+ * The Display Port work function i915_digport_work_func() calls into
+ * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
+ * pulses, with failures and non-MST long pulses triggering regular hotplug
+ * processing on the connector.
+ *
+ * The regular hotplug work function i915_hotplug_work_func() calls connector
+ * detect hooks, and, if connector status changes, triggers sending of hotplug
+ * uevent to userspace via drm_kms_helper_hotplug_event().
+ *
+ * Finally, the userspace is responsible for triggering a modeset upon receiving
+ * the hotplug uevent, disabling or enabling the crtc as needed.
+ *
+ * The hotplug interrupt storm detection and mitigation code keeps track of the
+ * number of interrupts per hotplug pin per a period of time, and if the number
+ * of interrupts exceeds a certain threshold, the interrupt is disabled for a
+ * while before being re-enabled. The intention is to mitigate issues raising
+ * from broken hardware triggering massive amounts of interrupts and grinding
+ * the system to a halt.
+ *
+ * Current implementation expects that hotplug interrupt storm will not be
+ * seen when display port sink is connected, hence on platforms whose DP
+ * callback is handled by i915_digport_work_func reenabling of hpd is not
+ * performed (it was never expected to be disabled in the first place ;) )
+ * this is specific to DP sinks handled by this routine and any other display
+ * such as HDMI or DVI enabled on the same port will have proper logic since
+ * it will use i915_hotplug_work_func where this logic is handled.
+ */
+
+bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+{
+	switch (pin) {
+	case HPD_PORT_A:
+		*port = PORT_A;
+		return true;
+	case HPD_PORT_B:
+		*port = PORT_B;
+		return true;
+	case HPD_PORT_C:
+		*port = PORT_C;
+		return true;
+	case HPD_PORT_D:
+		*port = PORT_D;
+		return true;
+	case HPD_PORT_E:
+		*port = PORT_E;
+		return true;
+	default:
+		return false;	/* no hpd */
+	}
+}
+
+#define HPD_STORM_DETECT_PERIOD		1000
+#define HPD_STORM_THRESHOLD		5
+#define HPD_STORM_REENABLE_DELAY	(2 * 60 * 1000)
+
+/**
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * @dev_priv: private driver data pointer
+ * @pin: the pin to gather stats on
+ *
+ * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * storms. Only the pin specific stats and state are changed, the caller is
+ * responsible for further action.
+ *
+ * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
+ * otherwise it's considered an irq storm, and the irq state is set to
+ * @HPD_MARK_DISABLED.
+ *
+ * Return true if an irq storm was detected on @pin.
+ */
+static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
+				       enum hpd_pin pin)
+{
+	unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+	unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+	bool storm = false;
+
+	if (!time_in_range(jiffies, start, end)) {
+		dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
+		dev_priv->hotplug.stats[pin].count = 0;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
+	} else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+		dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+		DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+		storm = true;
+	} else {
+		dev_priv->hotplug.stats[pin].count++;
+		DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+			      dev_priv->hotplug.stats[pin].count);
+	}
+
+	return storm;
+}
+
+static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	enum hpd_pin pin;
+	bool hpd_disabled = false;
+
+	assert_spin_locked(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (connector->polled != DRM_CONNECTOR_POLL_HPD)
+			continue;
+
+		intel_connector = to_intel_connector(connector);
+		intel_encoder = intel_connector->encoder;
+		if (!intel_encoder)
+			continue;
+
+		pin = intel_encoder->hpd_pin;
+		if (pin == HPD_NONE ||
+		    dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+			continue;
+
+		DRM_INFO("HPD interrupt storm detected on connector %s: "
+			 "switching from hotplug detection to polling\n",
+			 connector->name);
+
+		dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT
+			| DRM_CONNECTOR_POLL_DISCONNECT;
+		hpd_disabled = true;
+	}
+
+	/* Enable polling and queue hotplug re-enabling. */
+	if (hpd_disabled) {
+		drm_kms_helper_poll_enable(dev);
+		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
+	}
+}
+
+static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv),
+			     hotplug.reenable_work.work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	int i;
+
+	intel_runtime_pm_get(dev_priv);
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		struct drm_connector *connector;
+
+		if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+			continue;
+
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+
+		list_for_each_entry(connector, &mode_config->connector_list, head) {
+			struct intel_connector *intel_connector = to_intel_connector(connector);
+
+			if (intel_connector->encoder->hpd_pin == i) {
+				if (connector->polled != intel_connector->polled)
+					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+							 connector->name);
+				connector->polled = intel_connector->polled;
+				if (!connector->polled)
+					connector->polled = DRM_CONNECTOR_POLL_HPD;
+			}
+		}
+	}
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	intel_runtime_pm_put(dev_priv);
+}
+
+static bool intel_hpd_irq_event(struct drm_device *dev,
+				struct drm_connector *connector)
+{
+	enum drm_connector_status old_status;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	old_status = connector->status;
+
+	connector->status = connector->funcs->detect(connector, false);
+	if (old_status == connector->status)
+		return false;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+		      connector->base.id,
+		      connector->name,
+		      drm_get_connector_status_name(old_status),
+		      drm_get_connector_status_name(connector->status));
+
+	return true;
+}
+
+static void i915_digport_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+	u32 long_port_mask, short_port_mask;
+	struct intel_digital_port *intel_dig_port;
+	int i;
+	u32 old_bits = 0;
+
+	spin_lock_irq(&dev_priv->irq_lock);
+	long_port_mask = dev_priv->hotplug.long_port_mask;
+	dev_priv->hotplug.long_port_mask = 0;
+	short_port_mask = dev_priv->hotplug.short_port_mask;
+	dev_priv->hotplug.short_port_mask = 0;
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	for (i = 0; i < I915_MAX_PORTS; i++) {
+		bool valid = false;
+		bool long_hpd = false;
+		intel_dig_port = dev_priv->hotplug.irq_port[i];
+		if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+			continue;
+
+		if (long_port_mask & (1 << i))  {
+			valid = true;
+			long_hpd = true;
+		} else if (short_port_mask & (1 << i))
+			valid = true;
+
+		if (valid) {
+			enum irqreturn ret;
+
+			ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+			if (ret == IRQ_NONE) {
+				/* fall back to old school hpd */
+				old_bits |= (1 << intel_dig_port->base.hpd_pin);
+			}
+		}
+	}
+
+	if (old_bits) {
+		spin_lock_irq(&dev_priv->irq_lock);
+		dev_priv->hotplug.event_bits |= old_bits;
+		spin_unlock_irq(&dev_priv->irq_lock);
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+	}
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	bool changed = false;
+	u32 hpd_event_bits;
+
+	mutex_lock(&mode_config->mutex);
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	hpd_event_bits = dev_priv->hotplug.event_bits;
+	dev_priv->hotplug.event_bits = 0;
+
+	/* Disable hotplug on connectors that hit an irq storm. */
+	intel_hpd_irq_storm_disable(dev_priv);
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		if (!intel_connector->encoder)
+			continue;
+		intel_encoder = intel_connector->encoder;
+		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
+				      connector->name, intel_encoder->hpd_pin);
+			if (intel_encoder->hot_plug)
+				intel_encoder->hot_plug(intel_encoder);
+			if (intel_hpd_irq_event(dev, connector))
+				changed = true;
+		}
+	}
+	mutex_unlock(&mode_config->mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
+}
+
+
+/**
+ * intel_hpd_irq_handler - main hotplug irq handler
+ * @dev: drm device
+ * @pin_mask: a mask of hpd pins that have triggered the irq
+ * @long_mask: a mask of hpd pins that may be long hpd pulses
+ *
+ * This is the main hotplug irq handler for all platforms. The platform specific
+ * irq handlers call the platform specific hotplug irq handlers, which read and
+ * decode the appropriate registers into bitmasks about hpd pins that have
+ * triggered (@pin_mask), and which of those pins may be long pulses
+ * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
+ * is not a digital port.
+ *
+ * Here, we do hotplug irq storm detection and mitigation, and pass further
+ * processing to appropriate bottom halves.
+ */
+void intel_hpd_irq_handler(struct drm_device *dev,
+			   u32 pin_mask, u32 long_mask)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	enum port port;
+	bool storm_detected = false;
+	bool queue_dig = false, queue_hp = false;
+	bool is_dig_port;
+
+	if (!pin_mask)
+		return;
+
+	spin_lock(&dev_priv->irq_lock);
+	for_each_hpd_pin(i) {
+		if (!(BIT(i) & pin_mask))
+			continue;
+
+		is_dig_port = intel_hpd_pin_to_port(i, &port) &&
+			      dev_priv->hotplug.irq_port[port];
+
+		if (is_dig_port) {
+			bool long_hpd = long_mask & BIT(i);
+
+			DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+					 long_hpd ? "long" : "short");
+			/*
+			 * For long HPD pulses we want to have the digital queue happen,
+			 * but we still want HPD storm detection to function.
+			 */
+			queue_dig = true;
+			if (long_hpd) {
+				dev_priv->hotplug.long_port_mask |= (1 << port);
+			} else {
+				/* for short HPD just trigger the digital queue */
+				dev_priv->hotplug.short_port_mask |= (1 << port);
+				continue;
+			}
+		}
+
+		if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+			/*
+			 * On GMCH platforms the interrupt mask bits only
+			 * prevent irq generation, not the setting of the
+			 * hotplug bits itself. So only WARN about unexpected
+			 * interrupts on saner platforms.
+			 */
+			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+				  "Received HPD interrupt on pin %d although disabled\n", i);
+			continue;
+		}
+
+		if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+			continue;
+
+		if (!is_dig_port) {
+			dev_priv->hotplug.event_bits |= BIT(i);
+			queue_hp = true;
+		}
+
+		if (intel_hpd_irq_storm_detect(dev_priv, i)) {
+			dev_priv->hotplug.event_bits &= ~BIT(i);
+			storm_detected = true;
+		}
+	}
+
+	if (storm_detected)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock(&dev_priv->irq_lock);
+
+	/*
+	 * Our hotplug handler can grab modeset locks (by calling down into the
+	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
+	 * queue for otherwise the flush_work in the pageflip code will
+	 * deadlock.
+	 */
+	if (queue_dig)
+		queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+	if (queue_hp)
+		schedule_work(&dev_priv->hotplug.hotplug_work);
+}
+
+/**
+ * intel_hpd_init - initializes and enables hpd support
+ * @dev_priv: i915 device instance
+ *
+ * This function enables the hotplug support. It requires that interrupts have
+ * already been enabled with intel_irq_init_hw(). From this point on hotplug and
+ * poll request can run concurrently to other code, so locking rules must be
+ * obeyed.
+ *
+ * This is a separate step from interrupt enabling to simplify the locking rules
+ * in the driver load and resume code.
+ */
+void intel_hpd_init(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	int i;
+
+	for_each_hpd_pin(i) {
+		dev_priv->hotplug.stats[i].count = 0;
+		dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+	}
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct intel_connector *intel_connector = to_intel_connector(connector);
+		connector->polled = intel_connector->polled;
+		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+		if (intel_connector->mst_port)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+	}
+
+	/*
+	 * Interrupt setup is already guaranteed to be single-threaded, this is
+	 * just to make the assert_spin_locked checks happy.
+	 */
+	spin_lock_irq(&dev_priv->irq_lock);
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void intel_hpd_init_work(struct drm_i915_private *dev_priv)
+{
+	INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+	INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+			  intel_hpd_irq_storm_reenable_work);
+}
+
+void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+
+	dev_priv->hotplug.long_port_mask = 0;
+	dev_priv->hotplug.short_port_mask = 0;
+	dev_priv->hotplug.event_bits = 0;
+
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	cancel_work_sync(&dev_priv->hotplug.dig_port_work);
+	cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+	cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f2161a..72e0edd 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -135,6 +135,7 @@
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "intel_mocs.h"
 
 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
@@ -190,9 +191,7 @@
 #define GEN8_CTX_PRIVILEGE (1<<8)
 
 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
-	const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
-		ppgtt->pdp.page_directory[n]->daddr : \
-		ppgtt->scratch_pd->daddr; \
+	const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n));	\
 	reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
 	reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
 }
@@ -211,9 +210,9 @@
 	FAULT_AND_CONTINUE /* Unsupported */
 };
 #define GEN8_CTX_ID_SHIFT 32
+#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -263,10 +262,11 @@
 	return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
 	struct drm_device *dev = ring->dev;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
 	uint64_t desc;
 	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
@@ -294,55 +294,59 @@
 	return desc;
 }
 
-static void execlists_elsp_write(struct intel_engine_cs *ring,
-				 struct drm_i915_gem_object *ctx_obj0,
-				 struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+				 struct drm_i915_gem_request *rq1)
 {
+
+	struct intel_engine_cs *ring = rq0->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint64_t temp = 0;
-	uint32_t desc[4];
+	uint64_t desc[2];
 
-	/* XXX: You must always write both descriptors in the order below. */
-	if (ctx_obj1)
-		temp = execlists_ctx_descriptor(ring, ctx_obj1);
-	else
-		temp = 0;
-	desc[1] = (u32)(temp >> 32);
-	desc[0] = (u32)temp;
+	if (rq1) {
+		desc[1] = execlists_ctx_descriptor(rq1);
+		rq1->elsp_submitted++;
+	} else {
+		desc[1] = 0;
+	}
 
-	temp = execlists_ctx_descriptor(ring, ctx_obj0);
-	desc[3] = (u32)(temp >> 32);
-	desc[2] = (u32)temp;
+	desc[0] = execlists_ctx_descriptor(rq0);
+	rq0->elsp_submitted++;
 
+	/* You must always write both descriptors in the order below. */
 	spin_lock(&dev_priv->uncore.lock);
 	intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-	I915_WRITE_FW(RING_ELSP(ring), desc[1]);
-	I915_WRITE_FW(RING_ELSP(ring), desc[0]);
-	I915_WRITE_FW(RING_ELSP(ring), desc[3]);
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
 
+	I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
 	/* The context is automatically loaded after the following */
-	I915_WRITE_FW(RING_ELSP(ring), desc[2]);
+	I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
 
-	/* ELSP is a wo register, so use another nearby reg for posting instead */
+	/* ELSP is a wo register, use another nearby reg for posting */
 	POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
 	intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
 	spin_unlock(&dev_priv->uncore.lock);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-				    struct drm_i915_gem_object *ring_obj,
-				    struct i915_hw_ppgtt *ppgtt,
-				    u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
+	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
 	struct page *page;
 	uint32_t *reg_state;
 
+	BUG_ON(!ctx_obj);
+	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
 	page = i915_gem_object_get_page(ctx_obj, 1);
 	reg_state = kmap_atomic(page);
 
-	reg_state[CTX_RING_TAIL+1] = tail;
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
 	/* True PPGTT with dynamic page allocation: update PDP registers and
 	 * point the unallocated PDPs to the scratch page
@@ -359,32 +363,15 @@
 	return 0;
 }
 
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
-				      struct intel_context *to0, u32 tail0,
-				      struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+				      struct drm_i915_gem_request *rq1)
 {
-	struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
-	struct drm_i915_gem_object *ctx_obj1 = NULL;
-	struct intel_ringbuffer *ringbuf1 = NULL;
+	execlists_update_context(rq0);
 
-	BUG_ON(!ctx_obj0);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
-	WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
+	if (rq1)
+		execlists_update_context(rq1);
 
-	execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
-
-	if (to1) {
-		ringbuf1 = to1->engine[ring->id].ringbuf;
-		ctx_obj1 = to1->engine[ring->id].state;
-		BUG_ON(!ctx_obj1);
-		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
-		WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
-
-		execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
-	}
-
-	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+	execlists_elsp_write(rq0, rq1);
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -444,13 +431,7 @@
 
 	WARN_ON(req1 && req1->elsp_submitted);
 
-	execlists_submit_contexts(ring, req0->ctx, req0->tail,
-				  req1 ? req1->ctx : NULL,
-				  req1 ? req1->tail : 0);
-
-	req0->elsp_submitted++;
-	if (req1)
-		req1->elsp_submitted++;
+	execlists_submit_requests(req0, req1);
 }
 
 static bool execlists_check_remove_request(struct intel_engine_cs *ring,
@@ -516,6 +497,9 @@
 		status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
 				(read_pointer % 6) * 8 + 4);
 
+		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+			continue;
+
 		if (status & GEN8_CTX_STATUS_PREEMPTED) {
 			if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
 				if (execlists_check_remove_request(ring, status_id))
@@ -540,37 +524,21 @@
 	ring->next_context_status_buffer = write_pointer % 6;
 
 	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   ((u32)ring->next_context_status_buffer & 0x07) << 8);
+		   _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
 }
 
-static int execlists_context_queue(struct intel_engine_cs *ring,
-				   struct intel_context *to,
-				   u32 tail,
-				   struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
+	struct intel_engine_cs *ring = request->ring;
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	if (to != ring->default_context)
-		intel_lr_context_pin(ring, to);
+	if (request->ctx != ring->default_context)
+		intel_lr_context_pin(request);
 
-	if (!request) {
-		/*
-		 * If there isn't a request associated with this submission,
-		 * create one as a temporary holder.
-		 */
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-		request->ring = ring;
-		request->ctx = to;
-		kref_init(&request->ref);
-		i915_gem_context_reference(request->ctx);
-	} else {
-		i915_gem_request_reference(request);
-		WARN_ON(to != request->ctx);
-	}
-	request->tail = tail;
+	i915_gem_request_reference(request);
+
+	request->tail = request->ringbuf->tail;
 
 	spin_lock_irq(&ring->execlist_lock);
 
@@ -585,7 +553,7 @@
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
-		if (to == tail_req->ctx) {
+		if (request->ctx == tail_req->ctx) {
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
 			list_del(&tail_req->execlist_link);
@@ -603,10 +571,9 @@
 	return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
-					      struct intel_context *ctx)
+static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
@@ -614,8 +581,7 @@
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->emit_flush(ringbuf, ctx,
-			       I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
@@ -623,12 +589,10 @@
 	return 0;
 }
 
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
-				 struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	const unsigned other_rings = ~intel_ring_flag(ring);
+	const unsigned other_rings = ~intel_ring_flag(req->ring);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -638,7 +602,7 @@
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, ring);
+			ret = i915_gem_object_sync(obj, req->ring, &req);
 			if (ret)
 				return ret;
 		}
@@ -655,59 +619,59 @@
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(ringbuf, ctx);
+	return logical_ring_invalidate_all_caches(req);
 }
 
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-					    struct intel_context *ctx)
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
 	int ret;
 
-	if (ctx != request->ring->default_context) {
-		ret = intel_lr_context_pin(request->ring, ctx);
+	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
+	if (request->ctx != request->ring->default_context) {
+		ret = intel_lr_context_pin(request);
 		if (ret)
 			return ret;
 	}
 
-	request->ringbuf = ctx->engine[request->ring->id].ringbuf;
-	request->ctx     = ctx;
-	i915_gem_context_reference(request->ctx);
-
 	return 0;
 }
 
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
-				       struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 				       int bytes)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_i915_gem_request *request;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_gem_request *target;
 	unsigned space;
 	int ret;
 
 	if (intel_ring_space(ringbuf) >= bytes)
 		return 0;
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
+	list_for_each_entry(target, &ring->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		if (request->ringbuf != ringbuf)
+		if (target->ringbuf != ringbuf)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(request->postfix, ringbuf->tail,
+		space = __intel_ring_space(target->postfix, ringbuf->tail,
 					   ringbuf->size);
 		if (space >= bytes)
 			break;
 	}
 
-	if (WARN_ON(&request->list == &ring->request_list))
+	if (WARN_ON(&target->list == &ring->request_list))
 		return -ENOSPC;
 
-	ret = i915_wait_request(request);
+	ret = i915_wait_request(target);
 	if (ret)
 		return ret;
 
@@ -717,7 +681,7 @@
 
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
  * really happens during submission is that the context and current tail will be placed
@@ -725,33 +689,23 @@
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
-				      struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = request->ring;
 
-	intel_logical_ring_advance(ringbuf);
+	intel_logical_ring_advance(request->ringbuf);
 
 	if (intel_ring_stopped(ring))
 		return;
 
-	execlists_context_queue(ring, ctx, ringbuf->tail, request);
+	execlists_context_queue(request);
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -759,25 +713,50 @@
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
-				struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
-	int ret;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
 
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(ringbuf, ctx);
-		if (unlikely(ret))
-			return ret;
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+	if (wait_bytes) {
+		ret = logical_ring_wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
@@ -786,7 +765,8 @@
 /**
  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
  *
- * @ringbuf: Logical ringbuffer.
+ * @request: The request to start some new work for
+ * @ctx: Logical ring context whose ringbuffer is being prepared.
  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
  *
  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -796,32 +776,42 @@
  *
  * Return: non-zero if the ringbuffer is not ready to be written to.
  */
-static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx, int num_dwords)
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv;
 	int ret;
 
+	WARN_ON(req == NULL);
+	dev_priv = req->ring->dev->dev_private;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
 		return ret;
 
-	ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
+	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
 	if (ret)
 		return ret;
 
-	/* Preallocate the olr before touching the ring */
-	ret = i915_gem_request_alloc(ring, ctx);
-	if (ret)
-		return ret;
-
-	ringbuf->space -= num_dwords * sizeof(uint32_t);
+	req->ringbuf->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
+{
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+	return intel_logical_ring_begin(request, 0);
+}
+
 /**
  * execlists_submission() - submit a batchbuffer for execution, Execlists style
  * @dev: DRM device.
@@ -839,16 +829,15 @@
  *
  * Return: non-zero if the submission fails.
  */
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags)
+			       struct list_head *vmas)
 {
+	struct drm_device       *dev = params->dev;
+	struct intel_engine_cs  *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
 	int ret;
@@ -899,13 +888,13 @@
 		return -EINVAL;
 	}
 
-	ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+	ret = execlists_move_to_gpu(params->request, vmas);
 	if (ret)
 		return ret;
 
 	if (ring == &dev_priv->ring[RCS] &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+		ret = intel_logical_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
@@ -918,14 +907,17 @@
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
-	ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
+	exec_start = params->batch_obj_vm_offset +
+		     args->batch_start_offset;
+
+	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
 
-	i915_gem_execbuffer_move_to_active(vmas, ring);
-	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+	i915_gem_execbuffer_move_to_active(vmas, params->request);
+	i915_gem_execbuffer_retire_commands(params);
 
 	return 0;
 }
@@ -950,7 +942,7 @@
 				ctx->engine[ring->id].state;
 
 		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(ring, ctx);
+			intel_lr_context_unpin(req);
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
@@ -978,16 +970,15 @@
 	I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx)
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -995,15 +986,15 @@
 	return 0;
 }
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 	int ret = 0;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (ctx->engine[ring->id].pin_count++ == 0) {
+	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
 		ret = i915_gem_obj_ggtt_pin(ctx_obj,
 				GEN8_LR_CONTEXT_ALIGN, 0);
 		if (ret)
@@ -1021,31 +1012,31 @@
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
-	ctx->engine[ring->id].pin_count = 0;
+	rq->ctx->engine[ring->id].pin_count = 0;
 
 	return ret;
 }
 
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (ctx_obj) {
 		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--ctx->engine[ring->id].pin_count == 0) {
+		if (--rq->ctx->engine[ring->id].pin_count == 0) {
 			intel_unpin_ringbuffer_obj(ringbuf);
 			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
 	}
 }
 
-static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
-					       struct intel_context *ctx)
+static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1054,11 +1045,11 @@
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf, ctx);
+	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, w->count * 2 + 2);
+	ret = intel_logical_ring_begin(req, w->count * 2 + 2);
 	if (ret)
 		return ret;
 
@@ -1072,13 +1063,361 @@
 	intel_logical_ring_advance(ringbuf);
 
 	ring->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(ringbuf, ctx);
+	ret = logical_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
+#define wa_ctx_emit(batch, index, cmd)					\
+	do {								\
+		int __index = (index)++;				\
+		if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
+			return -ENOSPC;					\
+		}							\
+		batch[__index] = (cmd);					\
+	} while (0)
+
+
+/*
+ * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
+ * PIPE_CONTROL instruction. This is required for the flush to happen correctly
+ * but there is a slight complication as this is applied in WA batch where the
+ * values are only initialized once so we cannot take register value at the
+ * beginning and reuse it further; hence we save its value to memory, upload a
+ * constant value with bit21 set and then we restore it back with the saved value.
+ * To simplify the WA, a constant value is formed by using the default value
+ * of this register. This shouldn't be a problem because we are only modifying
+ * it for a short period and this batch in non-premptible. We can ofcourse
+ * use additional instructions that read the actual value of the register
+ * at that time and set our bit of interest but it makes the WA complicated.
+ *
+ * This WA is also required for Gen9 so extracting as a function avoids
+ * code duplication.
+ */
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+						uint32_t *const batch,
+						uint32_t index)
+{
+	uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+	/*
+	 * WaDisableLSQCROPERFforOCL:skl
+	 * This WA is implemented in skl_init_clock_gating() but since
+	 * this batch updates GEN8_L3SQCREG4 with default value we need to
+	 * set this bit here to retain the WA during flush.
+	 */
+	if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
+		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
+
+	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, l3sqc4_flush);
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_DC_FLUSH_ENABLE));
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+				   MI_SRM_LRM_GLOBAL_GTT));
+	wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
+	wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+	wa_ctx_emit(batch, index, 0);
+
+	return index;
+}
+
+static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t offset,
+				    uint32_t start_alignment)
+{
+	return wa_ctx->offset = ALIGN(offset, start_alignment);
+}
+
+static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
+			     uint32_t offset,
+			     uint32_t size_alignment)
+{
+	wa_ctx->size = offset - wa_ctx->offset;
+
+	WARN(wa_ctx->size % size_alignment,
+	     "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
+	     wa_ctx->size, size_alignment);
+	return 0;
+}
+
+/**
+ * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned. This is updated
+ *    with the offset value received as input.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of the batch, it should be
+ *  cache-aligned otherwise it is adjusted accordingly.
+ *  Typically we only have one indirect_ctx and per_ctx batch buffer which are
+ *  initialized at the beginning and shared across all contexts but this field
+ *  helps us to have multiple batches at different offsets and select them based
+ *  on a criteria. At the moment this batch always start at the beginning of the page
+ *  and at this point we don't have multiple wa_ctx batch buffers.
+ *
+ *  The number of WA applied are not known at the beginning; we use this field
+ *  to return the no of DWORDS written.
+ *
+ *  It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
+ *  so it adds NOOPs as padding to make it cacheline aligned.
+ *  MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
+ *  makes a complete batch buffer.
+ *
+ * Return: non-zero if we exceed the PAGE_SIZE limit.
+ */
+
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	uint32_t scratch_addr;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
+	if (IS_BROADWELL(ring->dev)) {
+		index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+		if (index < 0)
+			return index;
+	}
+
+	/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
+	/* Actual scratch location is at 128 bytes offset */
+	scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+	wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+	wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+				   PIPE_CONTROL_GLOBAL_GTT_IVB |
+				   PIPE_CONTROL_CS_STALL |
+				   PIPE_CONTROL_QW_WRITE));
+	wa_ctx_emit(batch, index, scratch_addr);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+	wa_ctx_emit(batch, index, 0);
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	/*
+	 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
+	 * execution depends on the length specified in terms of cache lines
+	 * in the register CTX_RCS_INDIRECT_CTX
+	 */
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+/**
+ * gen8_init_perctx_bb() - initialize per ctx batch with WA
+ *
+ * @ring: only applicable for RCS
+ * @wa_ctx: structure representing wa_ctx
+ *  offset: specifies start of the batch, should be cache-aligned.
+ *  size: size of the batch in DWORDS but HW expects in terms of cachelines
+ * @batch: page in which WA are loaded
+ * @offset: This field specifies the start of this batch.
+ *   This batch is started immediately after indirect_ctx batch. Since we ensure
+ *   that indirect_ctx ends on a cacheline this batch is aligned automatically.
+ *
+ *   The number of DWORDS written are returned using this field.
+ *
+ *  This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
+ *  to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
+ */
+static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:bdw,chv */
+	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+				    struct i915_wa_ctx_bb *wa_ctx,
+				    uint32_t *const batch,
+				    uint32_t *offset)
+{
+	int ret;
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+
+	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
+	ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+	if (ret < 0)
+		return ret;
+	index = ret;
+
+	/* Pad to end of cacheline */
+	while (index % CACHELINE_DWORDS)
+		wa_ctx_emit(batch, index, MI_NOOP);
+
+	return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+}
+
+static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+			       struct i915_wa_ctx_bb *wa_ctx,
+			       uint32_t *const batch,
+			       uint32_t *offset)
+{
+	struct drm_device *dev = ring->dev;
+	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+
+	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) {
+		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
+		wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
+		wa_ctx_emit(batch, index,
+			    _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
+		wa_ctx_emit(batch, index, MI_NOOP);
+	}
+
+	/* WaDisableCtxRestoreArbitration:skl,bxt */
+	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) ||
+	    (IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0)))
+		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+
+	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+
+	return wa_ctx_end(wa_ctx, *offset = index, 1);
+}
+
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+{
+	int ret;
+
+	ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
+	if (!ring->wa_ctx.obj) {
+		DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
+		return -ENOMEM;
+	}
+
+	ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+	if (ret) {
+		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
+				 ret);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+{
+	if (ring->wa_ctx.obj) {
+		i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
+		drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+		ring->wa_ctx.obj = NULL;
+	}
+}
+
+static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+{
+	int ret;
+	uint32_t *batch;
+	uint32_t offset;
+	struct page *page;
+	struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+
+	WARN_ON(ring->id != RCS);
+
+	/* update this when WA for higher Gen are added */
+	if (INTEL_INFO(ring->dev)->gen > 9) {
+		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
+			  INTEL_INFO(ring->dev)->gen);
+		return 0;
+	}
+
+	/* some WA perform writes to scratch page, ensure it is valid */
+	if (ring->scratch.obj == NULL) {
+		DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+		return -EINVAL;
+	}
+
+	ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
+		return ret;
+	}
+
+	page = i915_gem_object_get_page(wa_ctx->obj, 0);
+	batch = kmap_atomic(page);
+	offset = 0;
+
+	if (INTEL_INFO(ring->dev)->gen == 8) {
+		ret = gen8_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen8_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	} else if (INTEL_INFO(ring->dev)->gen == 9) {
+		ret = gen9_init_indirectctx_bb(ring,
+					       &wa_ctx->indirect_ctx,
+					       batch,
+					       &offset);
+		if (ret)
+			goto out;
+
+		ret = gen9_init_perctx_bb(ring,
+					  &wa_ctx->per_ctx,
+					  batch,
+					  &offset);
+		if (ret)
+			goto out;
+	}
+
+out:
+	kunmap_atomic(batch);
+	if (ret)
+		lrc_destroy_wa_ctx_obj(ring);
+
+	return ret;
+}
+
 static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
 	struct drm_device *dev = ring->dev;
@@ -1139,19 +1478,64 @@
 	return init_workarounds_ring(ring);
 }
 
-static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
-			      struct intel_context *ctx,
+static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
+{
+	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+	struct intel_engine_cs *ring = req->ring;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
+	int i, ret;
+
+	ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (ret)
+		return ret;
+
+	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+		intel_logical_ring_emit(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+	/* Don't rely in hw updating PDPs, specially in lite-restore.
+	 * Ideally, we should set Force PD Restore in ctx descriptor,
+	 * but we can't. Force Restore would be a second option, but
+	 * it is unsafe in case of lite-restore (because the ctx is
+	 * not idle). */
+	if (req->ctx->ppgtt &&
+	    (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+		ret = intel_logical_ring_emit_pdps(req);
+		if (ret)
+			return ret;
+
+		req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+	}
+
+	ret = intel_logical_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+				(ppgtt<<8) |
+				(dispatch_flags & I915_DISPATCH_RS ?
+				 MI_BATCH_RESOURCE_STREAMER : 0));
 	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
 	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -1193,18 +1577,18 @@
 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
-static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
-			   struct intel_context *ctx,
+static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, 4);
+	ret = intel_logical_ring_begin(request, 4);
 	if (ret)
 		return ret;
 
@@ -1234,11 +1618,11 @@
 	return 0;
 }
 
-static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx,
+static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa;
@@ -1270,7 +1654,7 @@
 	vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
 		      flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
 
-	ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
+	ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
 	if (ret)
 		return ret;
 
@@ -1304,9 +1688,9 @@
 	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
-			     struct drm_i915_gem_request *request)
+static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
+	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *ring = ringbuf->ring;
 	u32 cmd;
 	int ret;
@@ -1316,7 +1700,7 @@
 	 * used as a workaround for not being allowed to do lite
 	 * restore with HEAD==TAIL (WaIdleLiteRestore).
 	 */
-	ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
+	ret = intel_logical_ring_begin(request, 8);
 	if (ret)
 		return ret;
 
@@ -1328,11 +1712,10 @@
 				(ring->status_page.gfx_addr +
 				(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
 	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf,
-		i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+	intel_logical_ring_advance_and_submit(request);
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
@@ -1345,49 +1728,53 @@
 	return 0;
 }
 
-static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
-					      struct intel_context *ctx)
+static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
 	struct render_state so;
-	struct drm_i915_file_private *file_priv = ctx->file_priv;
-	struct drm_file *file = file_priv ? file_priv->file : NULL;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(ring, &so);
+	ret = i915_gem_render_state_prepare(req->ring, &so);
 	if (ret)
 		return ret;
 
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = ring->emit_bb_start(ringbuf,
-			ctx,
-			so.ggtt_offset,
-			I915_DISPATCH_SECURE);
+	ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+				       I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
+	ret = req->ring->emit_bb_start(req,
+				       (so.ggtt_offset + so.aux_batch_offset),
+				       I915_DISPATCH_SECURE);
+	if (ret)
+		goto out;
 
-	ret = __i915_add_request(ring, file, so.obj);
-	/* intel_logical_ring_add_request moves object to inactive if it
-	 * fails */
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+
 out:
 	i915_gem_render_state_fini(&so);
 	return ret;
 }
 
-static int gen8_init_rcs_context(struct intel_engine_cs *ring,
-		       struct intel_context *ctx)
+static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	ret = intel_logical_ring_workarounds_emit(ring, ctx);
+	ret = intel_logical_ring_workarounds_emit(req);
 	if (ret)
 		return ret;
 
-	return intel_lr_context_render_state_init(ring, ctx);
+	ret = intel_rcs_context_init_mocs(req);
+	/*
+	 * Failing to program the MOCS is non-fatal.The system will not
+	 * run at peak performance. So generate an error and carry on.
+	 */
+	if (ret)
+		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
+
+	return intel_lr_context_render_state_init(req);
 }
 
 /**
@@ -1407,7 +1794,6 @@
 
 	intel_logical_ring_stop(ring);
 	WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -1419,6 +1805,8 @@
 		kunmap(sg_page(ring->status_page.obj->pages->sgl));
 		ring->status_page.obj = NULL;
 	}
+
+	lrc_destroy_wa_ctx_obj(ring);
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
@@ -1478,11 +1866,28 @@
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	ring->dev = dev;
-	ret = logical_ring_init(dev, ring);
+
+	ret = intel_init_pipe_control(ring);
 	if (ret)
 		return ret;
 
-	return intel_init_pipe_control(ring);
+	ret = intel_init_workaround_bb(ring);
+	if (ret) {
+		/*
+		 * We continue even if we fail to initialize WA batch
+		 * because we only expect rare glitches but nothing
+		 * critical to prevent us from using GPU
+		 */
+		DRM_ERROR("WA batch buffer initialization failed: %d\n",
+			  ret);
+	}
+
+	ret = logical_ring_init(dev, ring);
+	if (ret) {
+		lrc_destroy_wa_ctx_obj(ring);
+	}
+
+	return ret;
 }
 
 static int logical_bsd_ring_init(struct drm_device *dev)
@@ -1737,7 +2142,8 @@
 	reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
 	reg_state[CTX_CONTEXT_CONTROL+1] =
 		_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
-				CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+				   CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+				   CTX_CTRL_RS_CTX_ENABLE);
 	reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
 	reg_state[CTX_RING_HEAD+1] = 0;
 	reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
@@ -1762,15 +2168,27 @@
 	reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
 	reg_state[CTX_SECOND_BB_STATE+1] = 0;
 	if (ring->id == RCS) {
-		/* TODO: according to BSpec, the register state context
-		 * for CHV does not have these. OTOH, these registers do
-		 * exist in CHV. I'm waiting for a clarification */
 		reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
 		reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
 		reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
 		reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
 		reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
+		if (ring->wa_ctx.obj) {
+			struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+			uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
+
+			reg_state[CTX_RCS_INDIRECT_CTX+1] =
+				(ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
+				(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+
+			reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+				CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6;
+
+			reg_state[CTX_BB_PER_CTX_PTR+1] =
+				(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
+				0x01;
+		}
 	}
 	reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
 	reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
@@ -1975,13 +2393,22 @@
 		lrc_setup_hardware_status_page(ring, ctx_obj);
 	else if (ring->id == RCS && !ctx->rcs_initialized) {
 		if (ring->init_context) {
-			ret = ring->init_context(ring, ctx);
+			struct drm_i915_gem_request *req;
+
+			ret = i915_gem_request_alloc(ring, ctx, &req);
+			if (ret)
+				return ret;
+
+			ret = ring->init_context(req);
 			if (ret) {
 				DRM_ERROR("ring init context: %d\n", ret);
+				i915_gem_request_cancel(req);
 				ctx->engine[ring->id].ringbuf = NULL;
 				ctx->engine[ring->id].state = NULL;
 				goto error;
 			}
+
+			i915_add_request_no_flush(req);
 		}
 
 		ctx->rcs_initialized = true;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 04d3a6d..64f89f99 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -32,18 +32,19 @@
 #define RING_CONTEXT_CONTROL(ring)	((ring)->mmio_base+0x244)
 #define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
 #define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
+#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
 #define RING_CONTEXT_STATUS_BUF(ring)	((ring)->mmio_base+0x370)
 #define RING_CONTEXT_STATUS_PTR(ring)	((ring)->mmio_base+0x3a0)
 
 /* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
-					    struct intel_context *ctx);
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
+int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
-				  struct intel_context *ctx);
+int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
  * @ringbuf: Ringbuffer to advance.
@@ -70,20 +71,16 @@
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
-			       struct intel_engine_cs *ring,
-			       struct intel_context *ctx,
+struct i915_execbuffer_params;
+int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas,
-			       struct drm_i915_gem_object *batch_obj,
-			       u64 exec_start, u32 dispatch_flags);
+			       struct list_head *vmas);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 161ab26..881b5d1 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -239,8 +239,6 @@
 {
 	struct drm_device *dev = encoder->base.dev;
 	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-	struct intel_connector *intel_connector =
-		&lvds_encoder->attached_connector->base;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 ctl_reg, stat_reg;
 
@@ -252,8 +250,6 @@
 		stat_reg = PP_STATUS;
 	}
 
-	intel_panel_disable_backlight(intel_connector);
-
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
 		DRM_ERROR("timed out waiting for panel to power off\n");
@@ -262,6 +258,31 @@
 	POSTING_READ(lvds_encoder->reg);
 }
 
+static void gmch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+
+	intel_disable_lvds(encoder);
+}
+
+static void pch_disable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+
+	intel_panel_disable_backlight(intel_connector);
+}
+
+static void pch_post_disable_lvds(struct intel_encoder *encoder)
+{
+	intel_disable_lvds(encoder);
+}
+
 static enum drm_mode_status
 intel_lvds_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
@@ -452,7 +473,7 @@
 	 */
 	if (!HAS_PCH_SPLIT(dev)) {
 		drm_modeset_lock_all(dev);
-		intel_modeset_setup_hw_state(dev, true);
+		intel_display_resume(dev);
 		drm_modeset_unlock_all(dev);
 	}
 
@@ -528,7 +549,7 @@
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_lvds_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_lvds_set_property,
@@ -942,12 +963,6 @@
 	if (dmi_check_system(intel_no_lvds))
 		return;
 
-	pin = GMBUS_PIN_PANEL;
-	if (!lvds_is_present_in_vbt(dev, &pin)) {
-		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
-		return;
-	}
-
 	if (HAS_PCH_SPLIT(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return;
@@ -957,6 +972,16 @@
 		}
 	}
 
+	pin = GMBUS_PIN_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
+		u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS;
+		if ((I915_READ(reg) & LVDS_PORT_EN) == 0) {
+			DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+			return;
+		}
+		DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+	}
+
 	lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
 	if (!lvds_encoder)
 		return;
@@ -988,7 +1013,12 @@
 	intel_encoder->enable = intel_enable_lvds;
 	intel_encoder->pre_enable = intel_pre_enable_lvds;
 	intel_encoder->compute_config = intel_lvds_compute_config;
-	intel_encoder->disable = intel_disable_lvds;
+	if (HAS_PCH_SPLIT(dev_priv)) {
+		intel_encoder->disable = pch_disable_lvds;
+		intel_encoder->post_disable = pch_post_disable_lvds;
+	} else {
+		intel_encoder->disable = gmch_disable_lvds;
+	}
 	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
 	intel_encoder->get_config = intel_lvds_get_config;
 	intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1068,24 +1098,8 @@
 			drm_mode_debug_printmodeline(scan);
 
 			fixed_mode = drm_mode_duplicate(dev, scan);
-			if (fixed_mode) {
-				downclock_mode =
-					intel_find_panel_downclock(dev,
-					fixed_mode, connector);
-				if (downclock_mode != NULL &&
-					i915.lvds_downclock) {
-					/* We found the downclock for LVDS. */
-					dev_priv->lvds_downclock_avail = true;
-					dev_priv->lvds_downclock =
-						downclock_mode->clock;
-					DRM_DEBUG_KMS("LVDS downclock is found"
-					" in EDID. Normal clock %dKhz, "
-					"downclock %dKhz\n",
-					fixed_mode->clock,
-					dev_priv->lvds_downclock);
-				}
+			if (fixed_mode)
 				goto out;
-			}
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
new file mode 100644
index 0000000..6d3c6c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions: *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "intel_mocs.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+/* structures required */
+struct drm_i915_mocs_entry {
+	u32 control_value;
+	u16 l3cc_value;
+};
+
+struct drm_i915_mocs_table {
+	u32 size;
+	const struct drm_i915_mocs_entry *table;
+};
+
+/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
+#define LE_CACHEABILITY(value)	((value) << 0)
+#define LE_TGT_CACHE(value)	((value) << 2)
+#define LE_LRUM(value)		((value) << 4)
+#define LE_AOM(value)		((value) << 6)
+#define LE_RSC(value)		((value) << 7)
+#define LE_SCC(value)		((value) << 8)
+#define LE_PFM(value)		((value) << 11)
+#define LE_SCF(value)		((value) << 14)
+
+/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
+#define L3_ESC(value)		((value) << 0)
+#define L3_SCC(value)		((value) << 1)
+#define L3_CACHEABILITY(value)	((value) << 4)
+
+/* Helper defines */
+#define GEN9_NUM_MOCS_ENTRIES	62  /* 62 out of 64 - 63 & 64 are reserved. */
+
+/* (e)LLC caching options */
+#define LE_PAGETABLE		0
+#define LE_UC			1
+#define LE_WT			2
+#define LE_WB			3
+
+/* L3 caching options */
+#define L3_DIRECT		0
+#define L3_UC			1
+#define L3_RESERVED		2
+#define L3_WB			3
+
+/* Target cache */
+#define ELLC			0
+#define LLC			1
+#define LLC_ELLC		2
+
+/*
+ * MOCS tables
+ *
+ * These are the MOCS tables that are programmed across all the rings.
+ * The control value is programmed to all the rings that support the
+ * MOCS registers. While the l3cc_values are only programmed to the
+ * LNCFCMOCS0 - LNCFCMOCS32 registers.
+ *
+ * These tables are intended to be kept reasonably consistent across
+ * platforms. However some of the fields are not applicable to all of
+ * them.
+ *
+ * Entries not part of the following tables are undefined as far as
+ * userspace is concerned and shouldn't be relied upon.  For the time
+ * being they will be implicitly initialized to the strictest caching
+ * configuration (uncached) to guarantee forwards compatibility with
+ * userspace programs written against more recent kernels providing
+ * additional MOCS entries.
+ *
+ * NOTE: These tables MUST start with being uncached and the length
+ *       MUST be less than 63 as the last two registers are reserved
+ *       by the hardware.  These tables are part of the kernel ABI and
+ *       may only be updated incrementally by adding entries at the
+ *       end.
+ */
+static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/* NOTE: the LE_TGT_CACHE is not used on Broxton */
+static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
+	/* { 0x00000009, 0x0010 } */
+	{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
+	/* { 0x00000038, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
+	/* { 0x0000003b, 0x0030 } */
+	{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
+	   LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
+	  (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+};
+
+/**
+ * get_mocs_settings()
+ * @dev:        DRM device.
+ * @table:      Output table that will be made to point at appropriate
+ *              MOCS values for the device.
+ *
+ * This function will return the values of the MOCS table that needs to
+ * be programmed for the platform. It will return the values that need
+ * to be programmed and if they need to be programmed.
+ *
+ * Return: true if there are applicable MOCS settings for the device.
+ */
+static bool get_mocs_settings(struct drm_device *dev,
+			      struct drm_i915_mocs_table *table)
+{
+	bool result = false;
+
+	if (IS_SKYLAKE(dev)) {
+		table->size  = ARRAY_SIZE(skylake_mocs_table);
+		table->table = skylake_mocs_table;
+		result = true;
+	} else if (IS_BROXTON(dev)) {
+		table->size  = ARRAY_SIZE(broxton_mocs_table);
+		table->table = broxton_mocs_table;
+		result = true;
+	} else {
+		WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+			  "Platform that should have a MOCS table does not.\n");
+	}
+
+	return result;
+}
+
+/**
+ * emit_mocs_control_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ * @reg_base:	The base for the engine that needs to be programmed.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_control_table(struct drm_i915_gem_request *req,
+				   const struct drm_i915_mocs_table *table,
+				   u32 reg_base)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int index;
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+
+	for (index = 0; index < table->size; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf,
+					table->table[index].control_value);
+	}
+
+	/*
+	 * Ok, now set the unused entries to uncached. These entries
+	 * are officially undefined and no contract for the contents
+	 * and settings is given for these entries.
+	 *
+	 * Entry 0 in the table is uncached - so we are just writing
+	 * that value to all the used entries.
+	 */
+	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
+		intel_logical_ring_emit(ringbuf, reg_base + index * 4);
+		intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * emit_mocs_l3cc_table() - emit the mocs control table
+ * @req:	Request to set up the MOCS table for.
+ * @table:	The values to program into the control regs.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address. This register set is
+ * programmed in pairs.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
+				const struct drm_i915_mocs_table *table)
+{
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	unsigned int count;
+	unsigned int i;
+	u32 value;
+	u32 filler = (table->table[0].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	int ret;
+
+	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
+		return -ENODEV;
+
+	ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (ret) {
+		DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+		return ret;
+	}
+
+	intel_logical_ring_emit(ringbuf,
+			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+
+	for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[count + 1].l3cc_value & 0xffff) << 16);
+
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+	}
+
+	if (table->size & 0x01) {
+		/* Odd table size - 1 left over */
+		value = (table->table[count].l3cc_value & 0xffff) |
+			((table->table[0].l3cc_value & 0xffff) << 16);
+	} else
+		value = filler;
+
+	/*
+	 * Now set the rest of the table to uncached - use entry 0 as
+	 * this will be uncached. Leave the last pair uninitialised as
+	 * they are reserved by the hardware.
+	 */
+	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
+		intel_logical_ring_emit(ringbuf, GEN9_LNCFCMOCS0 + i * 4);
+		intel_logical_ring_emit(ringbuf, value);
+
+		value = filler;
+	}
+
+	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_logical_ring_advance(ringbuf);
+
+	return 0;
+}
+
+/**
+ * intel_rcs_context_init_mocs() - program the MOCS register.
+ * @req:	Request to set up the MOCS tables for.
+ *
+ * This function will emit a batch buffer with the values required for
+ * programming the MOCS register values for all the currently supported
+ * rings.
+ *
+ * These registers are partially stored in the RCS context, so they are
+ * emitted at the same time so that when a context is created these registers
+ * are set up. These registers have to be emitted into the start of the
+ * context as setting the ELSP will re-init some of these registers back
+ * to the hw values.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_mocs_table t;
+	int ret;
+
+	if (get_mocs_settings(req->ring->dev, &t)) {
+		/* Program the control registers */
+		ret = emit_mocs_control_table(req, &t, GEN9_GFX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX0_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_MFX1_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_VEBOX_MOCS_0);
+		if (ret)
+			return ret;
+
+		ret = emit_mocs_control_table(req, &t, GEN9_BLT_MOCS_0);
+		if (ret)
+			return ret;
+
+		/* Now program the l3cc registers */
+		ret = emit_mocs_l3cc_table(req, &t);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
new file mode 100644
index 0000000..76e45b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef INTEL_MOCS_H
+#define INTEL_MOCS_H
+
+/**
+ * DOC: Memory Objects Control State (MOCS)
+ *
+ * Motivation:
+ * In previous Gens the MOCS settings was a value that was set by user land as
+ * part of the batch. In Gen9 this has changed to be a single table (per ring)
+ * that all batches now reference by index instead of programming the MOCS
+ * directly.
+ *
+ * The one wrinkle in this is that only PART of the MOCS tables are included
+ * in context (The GFX_MOCS_0 - GFX_MOCS_64 and the LNCFCMOCS0 - LNCFCMOCS32
+ * registers). The rest are not (the settings for the other rings).
+ *
+ * This table needs to be set at system start-up because the way the table
+ * interacts with the contexts and the GmmLib interface.
+ *
+ *
+ * Implementation:
+ *
+ * The tables (one per supported platform) are defined in intel_mocs.c
+ * and are programmed in the first batch after the context is loaded
+ * (with the hardware workarounds). This will then let the usual
+ * context handling keep the MOCS in step.
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4813374..cb1c657 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,8 +25,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/acpi.h>
 #include <acpi/video.h>
 
@@ -53,6 +51,7 @@
 #define MBOX_ACPI      (1<<0)
 #define MBOX_SWSCI     (1<<1)
 #define MBOX_ASLE      (1<<2)
+#define MBOX_ASLE_EXT  (1<<4)
 
 struct opregion_header {
 	u8 signature[16];
@@ -62,7 +61,10 @@
 	u8 vbios_ver[16];
 	u8 driver_ver[16];
 	u32 mboxes;
-	u8 reserved[164];
+	u32 driver_model;
+	u32 pcon;
+	u8 dver[32];
+	u8 rsvd[124];
 } __packed;
 
 /* OpRegion mailbox #1: public ACPI methods */
@@ -84,7 +86,9 @@
 	u32 evts;       /* ASL supported events */
 	u32 cnot;       /* current OS notification */
 	u32 nrdy;       /* driver status */
-	u8 rsvd2[60];
+	u32 did2[7];	/* extended supported display devices ID list */
+	u32 cpd2[7];	/* extended attached display devices list */
+	u8 rsvd2[4];
 } __packed;
 
 /* OpRegion mailbox #2: SWSCI */
@@ -113,7 +117,10 @@
 	u32 pcft;       /* power conservation features */
 	u32 srot;       /* supported rotation angles */
 	u32 iuer;       /* IUER events */
-	u8 rsvd[86];
+	u64 fdss;
+	u32 fdsp;
+	u32 stat;
+	u8 rsvd[70];
 } __packed;
 
 /* Driver readiness indicator */
@@ -611,6 +618,38 @@
  * (version 3)
  */
 
+static u32 get_did(struct intel_opregion *opregion, int i)
+{
+	u32 did;
+
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		did = ioread32(&opregion->acpi->didl[i]);
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return 0;
+
+		did = ioread32(&opregion->acpi->did2[i]);
+	}
+
+	return did;
+}
+
+static void set_did(struct intel_opregion *opregion, int i, u32 val)
+{
+	if (i < ARRAY_SIZE(opregion->acpi->didl)) {
+		iowrite32(val, &opregion->acpi->didl[i]);
+	} else {
+		i -= ARRAY_SIZE(opregion->acpi->didl);
+
+		if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2)))
+			return;
+
+		iowrite32(val, &opregion->acpi->did2[i]);
+	}
+}
+
 static void intel_didl_outputs(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -620,7 +659,7 @@
 	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
 	unsigned long long device_id;
 	acpi_status status;
-	u32 temp;
+	u32 temp, max_outputs;
 	int i = 0;
 
 	handle = ACPI_HANDLE(&dev->pdev->dev);
@@ -639,41 +678,50 @@
 	}
 
 	if (!acpi_video_bus) {
-		pr_warn("No ACPI video bus found\n");
+		DRM_ERROR("No ACPI video bus found\n");
 		return;
 	}
 
+	/*
+	 * In theory, did2, the extended didl, gets added at opregion version
+	 * 3.0. In practice, however, we're supposed to set it for earlier
+	 * versions as well, since a BIOS that doesn't understand did2 should
+	 * not look at it anyway. Use a variable so we can tweak this if a need
+	 * arises later.
+	 */
+	max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
+		ARRAY_SIZE(opregion->acpi->did2);
+
 	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
-		if (i >= 8) {
-			dev_dbg(&dev->pdev->dev,
-				"More than 8 outputs detected via ACPI\n");
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs detected via ACPI\n",
+				      max_outputs);
 			return;
 		}
-		status =
-			acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
-						NULL, &device_id);
+		status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+					       NULL, &device_id);
 		if (ACPI_SUCCESS(status)) {
 			if (!device_id)
 				goto blind_set;
-			iowrite32((u32)(device_id & 0x0f0f),
-				  &opregion->acpi->didl[i]);
-			i++;
+			set_did(opregion, i++, (u32)(device_id & 0x0f0f));
 		}
 	}
 
 end:
-	/* If fewer than 8 outputs, the list must be null terminated */
-	if (i < 8)
-		iowrite32(0, &opregion->acpi->didl[i]);
+	DRM_DEBUG_KMS("%d outputs detected\n", i);
+
+	/* If fewer than max outputs, the list must be null terminated */
+	if (i < max_outputs)
+		set_did(opregion, i, 0);
 	return;
 
 blind_set:
 	i = 0;
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		int output_type = ACPI_OTHER_OUTPUT;
-		if (i >= 8) {
-			dev_dbg(&dev->pdev->dev,
-				"More than 8 outputs in connector list\n");
+		if (i >= max_outputs) {
+			DRM_DEBUG_KMS("More than %u outputs in connector list\n",
+				      max_outputs);
 			return;
 		}
 		switch (connector->connector_type) {
@@ -698,9 +746,8 @@
 			output_type = ACPI_LVDS_OUTPUT;
 			break;
 		}
-		temp = ioread32(&opregion->acpi->didl[i]);
-		iowrite32(temp | (1<<31) | output_type | i,
-			  &opregion->acpi->didl[i]);
+		temp = get_did(opregion, i);
+		set_did(opregion, i, temp | (1 << 31) | output_type | i);
 		i++;
 	}
 	goto end;
@@ -720,7 +767,7 @@
 	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
 	 * there are less than eight devices. */
 	do {
-		disp_id = ioread32(&opregion->acpi->didl[i]);
+		disp_id = get_did(opregion, i);
 		iowrite32(disp_id, &opregion->acpi->cadl[i]);
 	} while (++i < 8 && disp_id != 0);
 }
@@ -852,6 +899,11 @@
 	char buf[sizeof(OPREGION_SIGNATURE)];
 	int err = 0;
 
+	BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_swsci) != 0x100);
+	BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
+
 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
 	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25c8ec6..4445426 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -210,19 +210,14 @@
 }
 
 static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+					 struct drm_i915_gem_request *req,
 					 void (*tail)(struct intel_overlay *))
 {
-	struct drm_device *dev = overlay->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
 	int ret;
 
 	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req,
-					     ring->outstanding_lazy_request);
-	ret = i915_add_request(ring);
-	if (ret)
-		return ret;
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
 
 	overlay->flip_tail = tail;
 	ret = i915_wait_request(overlay->last_flip_req);
@@ -239,15 +234,22 @@
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	int ret;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	ret = intel_ring_begin(ring, 4);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 4);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	overlay->active = true;
 
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
@@ -256,7 +258,7 @@
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, NULL);
+	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
 
 /* overlay needs to be enabled in OCMD reg */
@@ -266,6 +268,7 @@
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -280,18 +283,25 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	ret = intel_ring_begin(ring, 2);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 2);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
 	intel_ring_advance(ring);
 
 	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req,
-					     ring->outstanding_lazy_request);
-	return i915_add_request(ring);
+	i915_gem_request_assign(&overlay->last_flip_req, req);
+	i915_add_request(req);
+
+	return 0;
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -327,6 +337,7 @@
 	struct drm_device *dev = overlay->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+	struct drm_i915_gem_request *req;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -338,10 +349,16 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 	if (ret)
 		return ret;
 
+	ret = intel_ring_begin(req, 6);
+	if (ret) {
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
 	/* wait for overlay to go idle */
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
@@ -360,7 +377,7 @@
 	}
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
@@ -404,15 +421,23 @@
 
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
-		ret = intel_ring_begin(ring, 2);
+		struct drm_i915_gem_request *req;
+
+		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
 		if (ret)
 			return ret;
 
+		ret = intel_ring_begin(req, 2);
+		if (ret) {
+			i915_gem_request_cancel(req);
+			return ret;
+		}
+
 		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_advance(ring);
 
-		ret = intel_overlay_do_wait_request(overlay,
+		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
 		if (ret)
 			return ret;
@@ -724,7 +749,7 @@
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL,
+	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, NULL,
 						   &i915_ggtt_view_normal);
 	if (ret != 0)
 		return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 55aad23..e2ab3f6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -32,8 +32,11 @@
 
 #include <linux/kernel.h>
 #include <linux/moduleparam.h>
+#include <linux/pwm.h>
 #include "intel_drv.h"
 
+#define CRC_PMIC_PWM_PERIOD_NS	21333
+
 void
 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
@@ -544,6 +547,15 @@
 	return I915_READ(BXT_BLC_PWM_DUTY1);
 }
 
+static u32 pwm_get_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns;
+
+	duty_ns = pwm_get_duty_cycle(panel->backlight.pwm);
+	return DIV_ROUND_UP(duty_ns * 100, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static u32 intel_panel_get_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -632,6 +644,14 @@
 	I915_WRITE(BXT_BLC_PWM_DUTY1, level);
 }
 
+static void pwm_set_backlight(struct intel_connector *connector, u32 level)
+{
+	struct intel_panel *panel = &connector->panel;
+	int duty_ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+	pwm_config(panel->backlight.pwm, duty_ns, CRC_PMIC_PWM_PERIOD_NS);
+}
+
 static void
 intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
 {
@@ -769,6 +789,16 @@
 	I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_disable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	/* Disable the backlight */
+	pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+	usleep_range(2000, 3000);
+	pwm_disable(panel->backlight.pwm);
+}
+
 void intel_panel_disable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -1010,6 +1040,14 @@
 	I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
 }
 
+static void pwm_enable_backlight(struct intel_connector *connector)
+{
+	struct intel_panel *panel = &connector->panel;
+
+	pwm_enable(panel->backlight.pwm);
+	intel_panel_actually_set_backlight(connector, panel->backlight.level);
+}
+
 void intel_panel_enable_backlight(struct intel_connector *connector)
 {
 	struct drm_device *dev = connector->base.dev;
@@ -1386,6 +1424,40 @@
 	return 0;
 }
 
+static int pwm_setup_backlight(struct intel_connector *connector,
+			       enum pipe pipe)
+{
+	struct drm_device *dev = connector->base.dev;
+	struct intel_panel *panel = &connector->panel;
+	int retval;
+
+	/* Get the PWM chip for backlight control */
+	panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+	if (IS_ERR(panel->backlight.pwm)) {
+		DRM_ERROR("Failed to own the pwm chip\n");
+		panel->backlight.pwm = NULL;
+		return -ENODEV;
+	}
+
+	retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
+			    CRC_PMIC_PWM_PERIOD_NS);
+	if (retval < 0) {
+		DRM_ERROR("Failed to configure the pwm chip\n");
+		pwm_put(panel->backlight.pwm);
+		panel->backlight.pwm = NULL;
+		return retval;
+	}
+
+	panel->backlight.min = 0; /* 0% */
+	panel->backlight.max = 100; /* 100% */
+	panel->backlight.level = DIV_ROUND_UP(
+				 pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+				 CRC_PMIC_PWM_PERIOD_NS);
+	panel->backlight.enabled = panel->backlight.level != 0;
+
+	return 0;
+}
+
 int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
 {
 	struct drm_device *dev = connector->dev;
@@ -1429,6 +1501,10 @@
 	struct intel_connector *intel_connector = to_intel_connector(connector);
 	struct intel_panel *panel = &intel_connector->panel;
 
+	/* dispose of the pwm */
+	if (panel->backlight.pwm)
+		pwm_put(panel->backlight.pwm);
+
 	panel->backlight.present = false;
 }
 
@@ -1456,11 +1532,19 @@
 		dev_priv->display.set_backlight = pch_set_backlight;
 		dev_priv->display.get_backlight = pch_get_backlight;
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.setup_backlight = vlv_setup_backlight;
-		dev_priv->display.enable_backlight = vlv_enable_backlight;
-		dev_priv->display.disable_backlight = vlv_disable_backlight;
-		dev_priv->display.set_backlight = vlv_set_backlight;
-		dev_priv->display.get_backlight = vlv_get_backlight;
+		if (dev_priv->vbt.has_mipi) {
+			dev_priv->display.setup_backlight = pwm_setup_backlight;
+			dev_priv->display.enable_backlight = pwm_enable_backlight;
+			dev_priv->display.disable_backlight = pwm_disable_backlight;
+			dev_priv->display.set_backlight = pwm_set_backlight;
+			dev_priv->display.get_backlight = pwm_get_backlight;
+		} else {
+			dev_priv->display.setup_backlight = vlv_setup_backlight;
+			dev_priv->display.enable_backlight = vlv_enable_backlight;
+			dev_priv->display.disable_backlight = vlv_disable_backlight;
+			dev_priv->display.set_backlight = vlv_set_backlight;
+			dev_priv->display.get_backlight = vlv_get_backlight;
+		}
 	} else if (IS_GEN4(dev)) {
 		dev_priv->display.setup_backlight = i965_setup_backlight;
 		dev_priv->display.enable_backlight = i965_enable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eadc15c..fff0c22 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -59,6 +59,10 @@
 	/* WaEnableLbsSlaRetryTimerDecrement:skl */
 	I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
 		   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+	/* WaDisableKillLogic:bxt,skl */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+		   ECOCHK_DIS_TLB);
 }
 
 static void skl_init_clock_gating(struct drm_device *dev)
@@ -91,10 +95,19 @@
 			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
 	}
 
+	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+	 * involving this register should also be added to WA batch as required.
+	 */
 	if (INTEL_REVID(dev) <= SKL_REVID_E0)
 		/* WaDisableLSQCROPERFforOCL:skl */
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 			   GEN8_LQSC_RO_PERF_DIS);
+
+	/* WaEnableGapsTsvCreditFix:skl */
+	if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
+		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+					   GEN9_GAPS_TSV_CREDIT_DISABLE));
+	}
 }
 
 static void bxt_init_clock_gating(struct drm_device *dev)
@@ -334,22 +347,26 @@
 
 	if (IS_VALLEYVIEW(dev)) {
 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
-		if (IS_CHERRYVIEW(dev))
-			chv_set_memory_pm5(dev_priv, enable);
+		POSTING_READ(FW_BLC_SELF_VLV);
+		dev_priv->wm.vlv.cxsr = enable;
 	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_PINEVIEW(dev)) {
 		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
 		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
 		I915_WRITE(DSPFW3, val);
+		POSTING_READ(DSPFW3);
 	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
 		I915_WRITE(FW_BLC_SELF, val);
+		POSTING_READ(FW_BLC_SELF);
 	} else if (IS_I915GM(dev)) {
 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
 		I915_WRITE(INSTPM, val);
+		POSTING_READ(INSTPM);
 	} else {
 		return;
 	}
@@ -923,223 +940,484 @@
 			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
 	}
 
-	POSTING_READ(DSPFW1);
+	/* zero (unused) WM1 watermarks */
+	I915_WRITE(DSPFW4, 0);
+	I915_WRITE(DSPFW5, 0);
+	I915_WRITE(DSPFW6, 0);
+	I915_WRITE(DSPHOWM1, 0);
 
-	dev_priv->wm.vlv = *wm;
+	POSTING_READ(DSPFW1);
 }
 
 #undef FW_WM_VLV
 
-static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc,
-					 struct drm_plane *plane)
+enum vlv_wm_level {
+	VLV_WM_LEVEL_PM2,
+	VLV_WM_LEVEL_PM5,
+	VLV_WM_LEVEL_DDR_DVFS,
+	CHV_WM_NUM_LEVELS,
+	VLV_WM_NUM_LEVELS = 1,
+};
+
+/* latency must be in 0.1us units. */
+static unsigned int vlv_wm_method2(unsigned int pixel_rate,
+				   unsigned int pipe_htotal,
+				   unsigned int horiz_pixels,
+				   unsigned int bytes_per_pixel,
+				   unsigned int latency)
 {
-	struct drm_device *dev = crtc->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	int entries, prec_mult, drain_latency, pixel_size;
-	int clock = intel_crtc->config->base.adjusted_mode.crtc_clock;
-	const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64;
+	unsigned int ret;
 
-	/*
-	 * FIXME the plane might have an fb
-	 * but be invisible (eg. due to clipping)
-	 */
-	if (!intel_crtc->active || !plane->state->fb)
-		return 0;
+	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
+	ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+	ret = DIV_ROUND_UP(ret, 64);
 
-	if (WARN(clock == 0, "Pixel clock is zero!\n"))
-		return 0;
-
-	pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
-
-	if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
-		return 0;
-
-	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
-
-	prec_mult = high_precision;
-	drain_latency = 64 * prec_mult * 4 / entries;
-
-	if (drain_latency > DRAIN_LATENCY_MASK) {
-		prec_mult /= 2;
-		drain_latency = 64 * prec_mult * 4 / entries;
-	}
-
-	if (drain_latency > DRAIN_LATENCY_MASK)
-		drain_latency = DRAIN_LATENCY_MASK;
-
-	return drain_latency | (prec_mult == high_precision ?
-				DDL_PRECISION_HIGH : DDL_PRECISION_LOW);
+	return ret;
 }
 
-static int vlv_compute_wm(struct intel_crtc *crtc,
-			  struct intel_plane *plane,
-			  int fifo_size)
+static void vlv_setup_wm_latency(struct drm_device *dev)
 {
-	int clock, entries, pixel_size;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/*
-	 * FIXME the plane might have an fb
-	 * but be invisible (eg. due to clipping)
-	 */
-	if (!crtc->active || !plane->base.state->fb)
+	/* all latencies in usec */
+	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+	}
+}
+
+static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
+				     struct intel_crtc *crtc,
+				     const struct intel_plane_state *state,
+				     int level)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	int clock, htotal, pixel_size, width, wm;
+
+	if (dev_priv->wm.pri_latency[level] == 0)
+		return USHRT_MAX;
+
+	if (!state->visible)
 		return 0;
 
-	pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0);
+	pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
 	clock = crtc->config->base.adjusted_mode.crtc_clock;
+	htotal = crtc->config->base.adjusted_mode.crtc_htotal;
+	width = crtc->config->pipe_src_w;
+	if (WARN_ON(htotal == 0))
+		htotal = 1;
 
-	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
-
-	/*
-	 * Set up the watermark such that we don't start issuing memory
-	 * requests until we are within PND's max deadline value (256us).
-	 * Idea being to be idle as long as possible while still taking
-	 * advatange of PND's deadline scheduling. The limit of 8
-	 * cachelines (used when the FIFO will anyway drain in less time
-	 * than 256us) should match what we would be done if trickle
-	 * feed were enabled.
-	 */
-	return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8);
-}
-
-static bool vlv_compute_sr_wm(struct drm_device *dev,
-			      struct vlv_wm_values *wm)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_crtc *crtc;
-	enum pipe pipe = INVALID_PIPE;
-	int num_planes = 0;
-	int fifo_size = 0;
-	struct intel_plane *plane;
-
-	wm->sr.cursor = wm->sr.plane = 0;
-
-	crtc = single_enabled_crtc(dev);
-	/* maxfifo not supported on pipe C */
-	if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) {
-		pipe = to_intel_crtc(crtc)->pipe;
-		num_planes = !!wm->pipe[pipe].primary +
-			!!wm->pipe[pipe].sprite[0] +
-			!!wm->pipe[pipe].sprite[1];
-		fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+	if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+		/*
+		 * FIXME the formula gives values that are
+		 * too big for the cursor FIFO, and hence we
+		 * would never be able to use cursors. For
+		 * now just hardcode the watermark.
+		 */
+		wm = 63;
+	} else {
+		wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+				    dev_priv->wm.pri_latency[level] * 10);
 	}
 
-	if (fifo_size == 0 || num_planes > 1)
-		return false;
+	return min_t(int, wm, USHRT_MAX);
+}
 
-	wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc),
-				       to_intel_plane(crtc->cursor), 0x3f);
+static void vlv_compute_fifo(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	unsigned int total_rate = 0;
+	const int fifo_size = 512 - 1;
+	int fifo_extra, fifo_left = fifo_size;
 
-	list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) {
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
-		if (plane->pipe != pipe)
+		if (state->visible) {
+			wm_state->num_active_planes++;
+			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+		unsigned int rate;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			plane->wm.fifo_size = 63;
+			continue;
+		}
+
+		if (!state->visible) {
+			plane->wm.fifo_size = 0;
+			continue;
+		}
+
+		rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+		plane->wm.fifo_size = fifo_size * rate / total_rate;
+		fifo_left -= plane->wm.fifo_size;
+	}
+
+	fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
+
+	/* spread the remainder evenly */
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		int plane_extra;
+
+		if (fifo_left == 0)
+			break;
+
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
 			continue;
 
-		wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc),
-					      plane, fifo_size);
-		if (wm->sr.plane != 0)
-			break;
+		/* give it all to the first plane if none are active */
+		if (plane->wm.fifo_size == 0 &&
+		    wm_state->num_active_planes)
+			continue;
+
+		plane_extra = min(fifo_extra, fifo_left);
+		plane->wm.fifo_size += plane_extra;
+		fifo_left -= plane_extra;
 	}
 
-	return true;
+	WARN_ON(fifo_left != 0);
 }
 
-static void valleyview_update_wm(struct drm_crtc *crtc)
+static void vlv_invert_wms(struct intel_crtc *crtc)
+{
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	int level;
+
+	for (level = 0; level < wm_state->num_levels; level++) {
+		struct drm_device *dev = crtc->base.dev;
+		const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+		struct intel_plane *plane;
+
+		wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
+		wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
+
+		for_each_intel_plane_on_crtc(dev, crtc, plane) {
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = plane->wm.fifo_size -
+					wm_state->wm[level].cursor;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = plane->wm.fifo_size -
+					wm_state->wm[level].primary;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
+					wm_state->wm[level].sprite[sprite];
+				break;
+			}
+		}
+	}
+}
+
+static void vlv_compute_wm(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct vlv_wm_state *wm_state = &crtc->wm_state;
+	struct intel_plane *plane;
+	int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
+	int level;
+
+	memset(wm_state, 0, sizeof(*wm_state));
+
+	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
+	if (IS_CHERRYVIEW(dev))
+		wm_state->num_levels = CHV_WM_NUM_LEVELS;
+	else
+		wm_state->num_levels = VLV_WM_NUM_LEVELS;
+
+	wm_state->num_active_planes = 0;
+
+	vlv_compute_fifo(crtc);
+
+	if (wm_state->num_active_planes != 1)
+		wm_state->cxsr = false;
+
+	if (wm_state->cxsr) {
+		for (level = 0; level < wm_state->num_levels; level++) {
+			wm_state->sr[level].plane = sr_fifo_size;
+			wm_state->sr[level].cursor = 63;
+		}
+	}
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		struct intel_plane_state *state =
+			to_intel_plane_state(plane->base.state);
+
+		if (!state->visible)
+			continue;
+
+		/* normal watermarks */
+		for (level = 0; level < wm_state->num_levels; level++) {
+			int wm = vlv_compute_wm_level(plane, crtc, state, level);
+			int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+
+			/* hack */
+			if (WARN_ON(level == 0 && wm > max_wm))
+				wm = max_wm;
+
+			if (wm > plane->wm.fifo_size)
+				break;
+
+			switch (plane->base.type) {
+				int sprite;
+			case DRM_PLANE_TYPE_CURSOR:
+				wm_state->wm[level].cursor = wm;
+				break;
+			case DRM_PLANE_TYPE_PRIMARY:
+				wm_state->wm[level].primary = wm;
+				break;
+			case DRM_PLANE_TYPE_OVERLAY:
+				sprite = plane->plane;
+				wm_state->wm[level].sprite[sprite] = wm;
+				break;
+			}
+		}
+
+		wm_state->num_levels = level;
+
+		if (!wm_state->cxsr)
+			continue;
+
+		/* maxfifo watermarks */
+		switch (plane->base.type) {
+			int sprite, level;
+		case DRM_PLANE_TYPE_CURSOR:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].cursor =
+					wm_state->sr[level].cursor;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].primary);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			for (level = 0; level < wm_state->num_levels; level++)
+				wm_state->sr[level].plane =
+					min(wm_state->sr[level].plane,
+					    wm_state->wm[level].sprite[sprite]);
+			break;
+		}
+	}
+
+	/* clear any (partially) filled invalid levels */
+	for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
+		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+	}
+
+	vlv_invert_wms(crtc);
+}
+
+#define VLV_FIFO(plane, value) \
+	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
+
+static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_plane *plane;
+	int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
+
+	for_each_intel_plane_on_crtc(dev, crtc, plane) {
+		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+			WARN_ON(plane->wm.fifo_size != 63);
+			continue;
+		}
+
+		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+			sprite0_start = plane->wm.fifo_size;
+		else if (plane->plane == 0)
+			sprite1_start = sprite0_start + plane->wm.fifo_size;
+		else
+			fifo_size = sprite1_start + plane->wm.fifo_size;
+	}
+
+	WARN_ON(fifo_size != 512 - 1);
+
+	DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
+		      pipe_name(crtc->pipe), sprite0_start,
+		      sprite1_start, fifo_size);
+
+	switch (crtc->pipe) {
+		uint32_t dsparb, dsparb2, dsparb3;
+	case PIPE_A:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
+			    VLV_FIFO(SPRITEB, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
+			   VLV_FIFO(SPRITEB, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
+			     VLV_FIFO(SPRITEB_HI, 0x1));
+		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_B:
+		dsparb = I915_READ(DSPARB);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
+			    VLV_FIFO(SPRITED, 0xff));
+		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
+			   VLV_FIFO(SPRITED, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
+			     VLV_FIFO(SPRITED_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB, dsparb);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	case PIPE_C:
+		dsparb3 = I915_READ(DSPARB3);
+		dsparb2 = I915_READ(DSPARB2);
+
+		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
+			     VLV_FIFO(SPRITEF, 0xff));
+		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
+			    VLV_FIFO(SPRITEF, sprite1_start));
+
+		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
+			     VLV_FIFO(SPRITEF_HI, 0xff));
+		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
+			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
+
+		I915_WRITE(DSPARB3, dsparb3);
+		I915_WRITE(DSPARB2, dsparb2);
+		break;
+	default:
+		break;
+	}
+}
+
+#undef VLV_FIFO
+
+static void vlv_merge_wm(struct drm_device *dev,
+			 struct vlv_wm_values *wm)
+{
+	struct intel_crtc *crtc;
+	int num_active_crtcs = 0;
+
+	if (IS_CHERRYVIEW(dev))
+		wm->level = VLV_WM_LEVEL_DDR_DVFS;
+	else
+		wm->level = VLV_WM_LEVEL_PM2;
+	wm->cxsr = true;
+
+	for_each_intel_crtc(dev, crtc) {
+		const struct vlv_wm_state *wm_state = &crtc->wm_state;
+
+		if (!crtc->active)
+			continue;
+
+		if (!wm_state->cxsr)
+			wm->cxsr = false;
+
+		num_active_crtcs++;
+		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
+	}
+
+	if (num_active_crtcs != 1)
+		wm->cxsr = false;
+
+	if (num_active_crtcs > 1)
+		wm->level = VLV_WM_LEVEL_PM2;
+
+	for_each_intel_crtc(dev, crtc) {
+		struct vlv_wm_state *wm_state = &crtc->wm_state;
+		enum pipe pipe = crtc->pipe;
+
+		if (!crtc->active)
+			continue;
+
+		wm->pipe[pipe] = wm_state->wm[wm->level];
+		if (wm->cxsr)
+			wm->sr = wm_state->sr[wm->level];
+
+		wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
+		wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+	}
+}
+
+static void vlv_update_wm(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	bool cxsr_enabled;
-	struct vlv_wm_values wm = dev_priv->wm.vlv;
+	struct vlv_wm_values wm = {};
 
-	wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary);
-	wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc,
-					       to_intel_plane(crtc->primary),
-					       vlv_get_fifo_size(dev, pipe, 0));
+	vlv_compute_wm(intel_crtc);
+	vlv_merge_wm(dev, &wm);
 
-	wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor);
-	wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc,
-					      to_intel_plane(crtc->cursor),
-					      0x3f);
-
-	cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
-	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
+	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+		/* FIXME should be part of crtc atomic commit */
+		vlv_pipe_set_fifo_size(intel_crtc);
 		return;
+	}
 
-	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
-		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-		      wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
-		      wm.sr.plane, wm.sr.cursor);
-
-	/*
-	 * FIXME DDR DVFS introduces massive memory latencies which
-	 * are not known to system agent so any deadline specified
-	 * by the display may not be respected. To support DDR DVFS
-	 * the watermark code needs to be rewritten to essentially
-	 * bypass deadline mechanism and rely solely on the
-	 * watermarks. For now disable DDR DVFS.
-	 */
-	if (IS_CHERRYVIEW(dev_priv))
+	if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
 		chv_set_memory_dvfs(dev_priv, false);
 
-	if (!cxsr_enabled)
+	if (wm.level < VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, false);
+
+	if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
 		intel_set_memory_cxsr(dev_priv, false);
 
+	/* FIXME should be part of crtc atomic commit */
+	vlv_pipe_set_fifo_size(intel_crtc);
+
 	vlv_write_wm_values(intel_crtc, &wm);
 
-	if (cxsr_enabled)
+	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
+		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
+		      pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
+		      wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
+		      wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+
+	if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
 		intel_set_memory_cxsr(dev_priv, true);
-}
 
-static void valleyview_update_sprite_wm(struct drm_plane *plane,
-					struct drm_crtc *crtc,
-					uint32_t sprite_width,
-					uint32_t sprite_height,
-					int pixel_size,
-					bool enabled, bool scaled)
-{
-	struct drm_device *dev = crtc->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	enum pipe pipe = intel_crtc->pipe;
-	int sprite = to_intel_plane(plane)->plane;
-	bool cxsr_enabled;
-	struct vlv_wm_values wm = dev_priv->wm.vlv;
+	if (wm.level >= VLV_WM_LEVEL_PM5 &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+		chv_set_memory_pm5(dev_priv, true);
 
-	if (enabled) {
-		wm.ddl[pipe].sprite[sprite] =
-			vlv_compute_drain_latency(crtc, plane);
+	if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
+	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+		chv_set_memory_dvfs(dev_priv, true);
 
-		wm.pipe[pipe].sprite[sprite] =
-			vlv_compute_wm(intel_crtc,
-				       to_intel_plane(plane),
-				       vlv_get_fifo_size(dev, pipe, sprite+1));
-	} else {
-		wm.ddl[pipe].sprite[sprite] = 0;
-		wm.pipe[pipe].sprite[sprite] = 0;
-	}
-
-	cxsr_enabled = vlv_compute_sr_wm(dev, &wm);
-
-	if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0)
-		return;
-
-	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, "
-		      "SR: plane=%d, cursor=%d\n", pipe_name(pipe),
-		      sprite_name(pipe, sprite),
-		      wm.pipe[pipe].sprite[sprite],
-		      wm.sr.plane, wm.sr.cursor);
-
-	if (!cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, false);
-
-	vlv_write_wm_values(intel_crtc, &wm);
-
-	if (cxsr_enabled)
-		intel_set_memory_cxsr(dev_priv, true);
+	dev_priv->wm.vlv = wm;
 }
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1434,23 +1712,22 @@
 	I915_WRITE(FW_BLC, fwater_lo);
 }
 
-static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
-				    struct drm_crtc *crtc)
+uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 {
-	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pixel_rate;
 
-	pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock;
+	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
 
 	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
 	 * adjust the pixel_rate here. */
 
-	if (intel_crtc->config->pch_pfit.enabled) {
+	if (pipe_config->pch_pfit.enabled) {
 		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
-		uint32_t pfit_size = intel_crtc->config->pch_pfit.size;
+		uint32_t pfit_size = pipe_config->pch_pfit.size;
 
-		pipe_w = intel_crtc->config->pipe_src_w;
-		pipe_h = intel_crtc->config->pipe_src_h;
+		pipe_w = pipe_config->pipe_src_w;
+		pipe_h = pipe_config->pipe_src_h;
+
 		pfit_w = (pfit_size >> 16) & 0xFFFF;
 		pfit_h = pfit_size & 0xFFFF;
 		if (pipe_w < pfit_w)
@@ -1815,7 +2092,7 @@
 	linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
 				     mode->crtc_clock);
 	ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
-					 dev_priv->display.get_display_clock_speed(dev_priv->dev));
+					 dev_priv->cdclk_freq);
 
 	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
 	       PIPE_WM_LINETIME_TIME(linetime);
@@ -2066,7 +2343,7 @@
 
 	p->active = true;
 	p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
-	p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+	p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
 
 	if (crtc->primary->state->fb)
 		p->pri.bytes_per_pixel =
@@ -2085,7 +2362,7 @@
 	p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
 	p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
 
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+	drm_for_each_legacy_plane(plane, dev) {
 		struct intel_plane *intel_plane = to_intel_plane(plane);
 
 		if (intel_plane->pipe == pipe) {
@@ -2215,6 +2492,7 @@
 			 const struct ilk_wm_maximums *max,
 			 struct intel_pipe_wm *merged)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int level, max_level = ilk_wm_max_level(dev);
 	int last_enabled_level = max_level;
 
@@ -2255,7 +2533,8 @@
 	 * What we should check here is whether FBC can be
 	 * enabled sometime later.
 	 */
-	if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
+	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
+	    intel_fbc_enabled(dev_priv)) {
 		for (level = 2; level <= max_level; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
 
@@ -3043,8 +3322,10 @@
 	if (!to_intel_crtc(crtc)->active)
 		return 0;
 
-	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
+	if (WARN_ON(p->pixel_rate == 0))
+		return 0;
 
+	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 }
 
 static void skl_compute_transition_wm(struct drm_crtc *crtc,
@@ -3685,6 +3966,139 @@
 	}
 }
 
+#define _FW_WM(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
+#define _FW_WM_VLV(value, plane) \
+	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+
+static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
+			       struct vlv_wm_values *wm)
+{
+	enum pipe pipe;
+	uint32_t tmp;
+
+	for_each_pipe(dev_priv, pipe) {
+		tmp = I915_READ(VLV_DDL(pipe));
+
+		wm->ddl[pipe].primary =
+			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].cursor =
+			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[0] =
+			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+		wm->ddl[pipe].sprite[1] =
+			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
+	}
+
+	tmp = I915_READ(DSPFW1);
+	wm->sr.plane = _FW_WM(tmp, SR);
+	wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
+	wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
+	wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+
+	tmp = I915_READ(DSPFW2);
+	wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
+	wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
+	wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+
+	tmp = I915_READ(DSPFW3);
+	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		tmp = I915_READ(DSPFW7_CHV);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPFW8_CHV);
+		wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
+		wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+
+		tmp = I915_READ(DSPFW9_CHV);
+		wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
+		wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+		wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+		wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	} else {
+		tmp = I915_READ(DSPFW7);
+		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
+		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+
+		tmp = I915_READ(DSPHOWM);
+		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
+		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+	}
+}
+
+#undef _FW_WM
+#undef _FW_WM_VLV
+
+void vlv_wm_get_hw_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+	struct intel_plane *plane;
+	enum pipe pipe;
+	u32 val;
+
+	vlv_read_wm_values(dev_priv, wm);
+
+	for_each_intel_plane(dev, plane) {
+		switch (plane->base.type) {
+			int sprite;
+		case DRM_PLANE_TYPE_CURSOR:
+			plane->wm.fifo_size = 63;
+			break;
+		case DRM_PLANE_TYPE_PRIMARY:
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
+			break;
+		case DRM_PLANE_TYPE_OVERLAY:
+			sprite = plane->plane;
+			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
+			break;
+		}
+	}
+
+	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+	wm->level = VLV_WM_LEVEL_PM2;
+
+	if (IS_CHERRYVIEW(dev_priv)) {
+		mutex_lock(&dev_priv->rps.hw_lock);
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+		if (val & DSP_MAXFIFO_PM5_ENABLE)
+			wm->level = VLV_WM_LEVEL_PM5;
+
+		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+		if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+			wm->level = VLV_WM_LEVEL_DDR_DVFS;
+
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	}
+
+	for_each_pipe(dev_priv, pipe)
+		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
+			      pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
+			      wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+
+	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
+		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
+}
+
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4083,14 +4497,14 @@
 		      "Odd GPU freq value\n"))
 		val &= ~1;
 
+	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+
 	if (val != dev_priv->rps.cur_freq) {
 		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
 		if (!IS_CHERRYVIEW(dev_priv))
 			gen6_set_rps_thresholds(dev_priv, val);
 	}
 
-	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
 	dev_priv->rps.cur_freq = val;
 	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
@@ -4250,12 +4664,8 @@
 
 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 {
-	/* No RC6 before Ironlake */
-	if (INTEL_INFO(dev)->gen < 5)
-		return 0;
-
-	/* RC6 is only on Ironlake mobile not on desktop */
-	if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
+	/* No RC6 before Ironlake and code is gone for ilk. */
+	if (INTEL_INFO(dev)->gen < 6)
 		return 0;
 
 	/* Respect the kernel parameter if it is set */
@@ -4275,10 +4685,6 @@
 		return enable_rc6 & mask;
 	}
 
-	/* Disable RC6 on Ironlake */
-	if (INTEL_INFO(dev)->gen == 5)
-		return 0;
-
 	if (IS_IVYBRIDGE(dev))
 		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
@@ -4297,25 +4703,26 @@
 	u32 ddcc_status = 0;
 	int ret;
 
-	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
 	/* All of these values are in units of 50MHz */
 	dev_priv->rps.cur_freq		= 0;
 	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
-	dev_priv->rps.rp0_freq		= (rp_state_cap >>  0) & 0xff;
-	dev_priv->rps.rp1_freq		= (rp_state_cap >>  8) & 0xff;
-	dev_priv->rps.min_freq		= (rp_state_cap >> 16) & 0xff;
-	if (IS_SKYLAKE(dev)) {
-		/* Store the frequency values in 16.66 MHZ units, which is
-		   the natural hardware unit for SKL */
-		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
-		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
-		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+	if (IS_BROXTON(dev)) {
+		rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
+	} else {
+		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
+		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
+		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
 	}
+
 	/* hw_max = RP0 until we check for overclocking */
 	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
 
 	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+	if (IS_HASWELL(dev) || IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
 		ret = sandybridge_pcode_read(dev_priv,
 					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
 					&ddcc_status);
@@ -4327,6 +4734,16 @@
 					dev_priv->rps.max_freq);
 	}
 
+	if (IS_SKYLAKE(dev)) {
+		/* Store the frequency values in 16.66 MHZ units, which is
+		   the natural hardware unit for SKL */
+		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
+		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+	}
+
 	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
 
 	/* Preserve min/max settings in case of re-init */
@@ -4619,6 +5036,7 @@
 	int min_freq = 15;
 	unsigned int gpu_freq;
 	unsigned int max_ia_freq, min_ring_freq;
+	unsigned int max_gpu_freq, min_gpu_freq;
 	int scaling_factor = 180;
 	struct cpufreq_policy *policy;
 
@@ -4643,17 +5061,31 @@
 	/* convert DDR frequency from units of 266.6MHz to bandwidth */
 	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
+	if (IS_SKYLAKE(dev)) {
+		/* Convert GT frequency to 50 HZ units */
+		min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
+		max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+	} else {
+		min_gpu_freq = dev_priv->rps.min_freq;
+		max_gpu_freq = dev_priv->rps.max_freq;
+	}
+
 	/*
 	 * For each potential GPU frequency, load a ring frequency we'd like
 	 * to use for memory access.  We do this by specifying the IA frequency
 	 * the PCU should use as a reference to determine the ring frequency.
 	 */
-	for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
-	     gpu_freq--) {
-		int diff = dev_priv->rps.max_freq - gpu_freq;
+	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
+		int diff = max_gpu_freq - gpu_freq;
 		unsigned int ia_freq = 0, ring_freq = 0;
 
-		if (INTEL_INFO(dev)->gen >= 8) {
+		if (IS_SKYLAKE(dev)) {
+			/*
+			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
+			 * No floor required for ring frequency on SKL.
+			 */
+			ring_freq = gpu_freq;
+		} else if (INTEL_INFO(dev)->gen >= 8) {
 			/* max(2 * GT, DDR). NB: GT is 50MHz units */
 			ring_freq = max(min_ring_freq, gpu_freq);
 		} else if (IS_HASWELL(dev)) {
@@ -4687,7 +5119,7 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
+	if (!HAS_CORE_RING_FREQ(dev))
 		return;
 
 	mutex_lock(&dev_priv->rps.hw_lock);
@@ -5802,7 +6234,8 @@
 	} else if (INTEL_INFO(dev)->gen >= 9) {
 		gen9_enable_rc6(dev);
 		gen9_enable_rps(dev);
-		__gen6_update_ring_freq(dev);
+		if (IS_SKYLAKE(dev))
+			__gen6_update_ring_freq(dev);
 	} else if (IS_BROADWELL(dev)) {
 		gen8_enable_rps(dev);
 		__gen6_update_ring_freq(dev);
@@ -6686,13 +7119,15 @@
 		else if (INTEL_INFO(dev)->gen == 8)
 			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
 	} else if (IS_CHERRYVIEW(dev)) {
-		dev_priv->display.update_wm = valleyview_update_wm;
-		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
 		dev_priv->display.init_clock_gating =
 			cherryview_init_clock_gating;
 	} else if (IS_VALLEYVIEW(dev)) {
-		dev_priv->display.update_wm = valleyview_update_wm;
-		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
+		vlv_setup_wm_latency(dev);
+
+		dev_priv->display.update_wm = vlv_update_wm;
 		dev_priv->display.init_clock_gating =
 			valleyview_init_clock_gating;
 	} else if (IS_PINEVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 5ee0fa5..a04b4dc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -254,10 +254,13 @@
 	uint32_t max_sleep_time = 0x1f;
 	/* Lately it was identified that depending on panel idle frame count
 	 * calculated at HW can be off by 1. So let's use what came
-	 * from VBT + 1 and at minimum 2 to be on the safe side.
+	 * from VBT + 1.
+	 * There are also other cases where panel demands at least 4
+	 * but VBT is not being set. To cover these 2 cases lets use
+	 * at least 5 when VBT isn't set to be on the safest side.
 	 */
 	uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
-			       dev_priv->vbt.psr.idle_frames + 1 : 2;
+			       dev_priv->vbt.psr.idle_frames + 1 : 5;
 	uint32_t val = 0x0;
 	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
@@ -400,7 +403,7 @@
 
 		/* Avoid continuous PSR exit by masking memup and hpd */
 		I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
-			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
+			   EDP_PSR_DEBUG_MASK_HPD);
 
 		/* Enable PSR on the panel */
 		hsw_psr_enable_sink(intel_dp);
@@ -596,13 +599,15 @@
 /**
  * intel_psr_single_frame_update - Single Frame Update
  * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
  * send and update only one frame on Remote Frame Buffer.
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev)
+void intel_psr_single_frame_update(struct drm_device *dev,
+				   unsigned frontbuffer_bits)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
@@ -624,14 +629,16 @@
 
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
-	val = I915_READ(VLV_PSRCTL(pipe));
 
-	/*
-	 * We need to set this bit before writing registers for a flip.
-	 * This bit will be self-clear when it gets to the PSR active state.
-	 */
-	I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
+		val = I915_READ(VLV_PSRCTL(pipe));
 
+		/*
+		 * We need to set this bit before writing registers for a flip.
+		 * This bit will be self-clear when it gets to the PSR active state.
+		 */
+		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+	}
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -648,7 +655,7 @@
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
 void intel_psr_invalidate(struct drm_device *dev,
-			      unsigned frontbuffer_bits)
+			  unsigned frontbuffer_bits)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
@@ -663,11 +670,12 @@
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
 
-	intel_psr_exit(dev);
-
 	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
-
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+
+	if (frontbuffer_bits)
+		intel_psr_exit(dev);
+
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -675,6 +683,7 @@
  * intel_psr_flush - Flush PSR
  * @dev: DRM device
  * @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
  * with the software frontbuffer tracking. This function gets called every
@@ -684,11 +693,12 @@
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
 void intel_psr_flush(struct drm_device *dev,
-			 unsigned frontbuffer_bits)
+		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc;
 	enum pipe pipe;
+	int delay_ms = HAS_DDI(dev) ? 100 : 500;
 
 	mutex_lock(&dev_priv->psr.lock);
 	if (!dev_priv->psr.enabled) {
@@ -698,30 +708,33 @@
 
 	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
 	pipe = to_intel_crtc(crtc)->pipe;
+
+	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
-	/*
-	 * On Haswell sprite plane updates don't result in a psr invalidating
-	 * signal in the hardware. Which means we need to manually fake this in
-	 * software for all flushes, not just when we've seen a preceding
-	 * invalidation through frontbuffer rendering.
-	 */
-	if (IS_HASWELL(dev) &&
-	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
-		intel_psr_exit(dev);
-
-	/*
-	 * On Valleyview and Cherryview we don't use hardware tracking so
-	 * any plane updates or cursor moves don't result in a PSR
-	 * invalidating. Which means we need to manually fake this in
-	 * software for all flushes, not just when we've seen a preceding
-	 * invalidation through frontbuffer rendering. */
-	if (!HAS_DDI(dev))
-		intel_psr_exit(dev);
+	if (HAS_DDI(dev)) {
+		/*
+		 * By definition every flush should mean invalidate + flush,
+		 * however on core platforms let's minimize the
+		 * disable/re-enable so we can avoid the invalidate when flip
+		 * originated the flush.
+		 */
+		if (frontbuffer_bits && origin != ORIGIN_FLIP)
+			intel_psr_exit(dev);
+	} else {
+		/*
+		 * On Valleyview and Cherryview we don't use hardware tracking
+		 * so any plane updates or cursor moves don't result in a PSR
+		 * invalidating. Which means we need to manually fake this in
+		 * software for all flushes.
+		 */
+		if (frontbuffer_bits)
+			intel_psr_exit(dev);
+	}
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		schedule_delayed_work(&dev_priv->psr.work,
-				      msecs_to_jiffies(100));
+				      msecs_to_jiffies(delay_ms));
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3817a6f..6e6b8db 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -81,7 +81,7 @@
 	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
 }
 
-void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *ring)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
@@ -91,10 +91,11 @@
 }
 
 static int
-gen2_render_ring_flush(struct intel_engine_cs *ring,
+gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -105,7 +106,7 @@
 	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -117,10 +118,11 @@
 }
 
 static int
-gen4_render_ring_flush(struct intel_engine_cs *ring,
+gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	u32 cmd;
 	int ret;
@@ -163,7 +165,7 @@
 	    (IS_G4X(dev) || IS_GEN5(dev)))
 		cmd |= MI_INVALIDATE_ISP;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -212,13 +214,13 @@
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
-
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -231,7 +233,7 @@
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -247,15 +249,16 @@
 }
 
 static int
-gen6_render_ring_flush(struct intel_engine_cs *ring,
-                         u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req,
+		       u32 invalidate_domains, u32 flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
-	ret = intel_emit_post_sync_nonzero_flush(ring);
+	ret = intel_emit_post_sync_nonzero_flush(req);
 	if (ret)
 		return ret;
 
@@ -285,7 +288,7 @@
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -299,11 +302,12 @@
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -318,9 +322,10 @@
 }
 
 static int
-gen7_render_ring_flush(struct intel_engine_cs *ring,
+gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -362,10 +367,10 @@
 		/* Workaround: we must issue a pipe_control with CS-stall bit
 		 * set before a pipe_control command that has the state cache
 		 * invalidate bit set. */
-		gen7_render_ring_cs_stall_wa(ring);
+		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -379,12 +384,13 @@
 }
 
 static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -400,11 +406,11 @@
 }
 
 static int
-gen8_render_ring_flush(struct intel_engine_cs *ring,
+gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -424,7 +430,7 @@
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
 		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
-		ret = gen8_emit_pipe_control(ring,
+		ret = gen8_emit_pipe_control(req,
 					     PIPE_CONTROL_CS_STALL |
 					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
 					     0);
@@ -432,7 +438,7 @@
 			return ret;
 	}
 
-	return gen8_emit_pipe_control(ring, flags, scratch_addr);
+	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
 static void ring_write_tail(struct intel_engine_cs *ring,
@@ -703,10 +709,10 @@
 	return ret;
 }
 
-static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
-				       struct intel_context *ctx)
+static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_workarounds *w = &dev_priv->workarounds;
@@ -715,11 +721,11 @@
 		return 0;
 
 	ring->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(ring);
+	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(ring, (w->count * 2 + 2));
+	ret = intel_ring_begin(req, (w->count * 2 + 2));
 	if (ret)
 		return ret;
 
@@ -733,7 +739,7 @@
 	intel_ring_advance(ring);
 
 	ring->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(ring);
+	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -742,16 +748,15 @@
 	return 0;
 }
 
-static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
-			      struct intel_context *ctx)
+static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	ret = intel_ring_workarounds_emit(ring, ctx);
+	ret = intel_ring_workarounds_emit(req);
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_render_state_init(ring);
+	ret = i915_gem_render_state_init(req);
 	if (ret)
 		DRM_ERROR("init render state: %d\n", ret);
 
@@ -775,11 +780,11 @@
 	return 0;
 }
 
-#define WA_REG(addr, mask, val) { \
+#define WA_REG(addr, mask, val) do { \
 		const int r = wa_add(dev_priv, (addr), (mask), (val)); \
 		if (r) \
 			return r; \
-	}
+	} while (0)
 
 #define WA_SET_BIT_MASKED(addr, mask) \
 	WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -800,6 +805,11 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:bdw */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
 	/* WaDisablePartialInstShootdown:bdw */
 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -861,6 +871,11 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+	/* WaDisableAsyncFlipPerfMode:chv */
+	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
 	/* WaDisablePartialInstShootdown:chv */
 	/* WaDisableThreadStallDopClockGating:chv */
 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -931,8 +946,11 @@
 		/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
-		WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
-				  DISABLE_PIXEL_MASK_CAMMING);
+		/*
+		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+		 * but we do that in per ctx batchbuffer as there is an issue
+		 * with this register not getting restored on ctx restore
+		 */
 	}
 
 	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
@@ -1023,13 +1041,6 @@
 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
-	    INTEL_REVID(dev) == SKL_REVID_D0)
-		/* WaBarrierPerformanceFixDisable:skl */
-		WA_SET_BIT_MASKED(HDC_CHICKEN0,
-				  HDC_FENCE_DEST_SLM_DISABLE |
-				  HDC_BARRIER_PERFORMANCE_DISABLE);
-
 	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
 		/*
 		 *Use Force Non-Coherent whenever executing a 3D context. This
@@ -1041,6 +1052,20 @@
 				  HDC_FORCE_NON_COHERENT);
 	}
 
+	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+	    INTEL_REVID(dev) == SKL_REVID_D0)
+		/* WaBarrierPerformanceFixDisable:skl */
+		WA_SET_BIT_MASKED(HDC_CHICKEN0,
+				  HDC_FENCE_DEST_SLM_DISABLE |
+				  HDC_BARRIER_PERFORMANCE_DISABLE);
+
+	/* WaDisableSbeCacheDispatchPortSharing:skl */
+	if (INTEL_REVID(dev) <= SKL_REVID_F0) {
+		WA_SET_BIT_MASKED(
+			GEN7_HALF_SLICE_CHICKEN1,
+			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+	}
+
 	return skl_tune_iz_hashing(ring);
 }
 
@@ -1105,9 +1130,9 @@
 	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
 	 * programmed to '1' on all products.
 	 *
-	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
+	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
 	/* Required for the hardware to program scanline values for waiting */
@@ -1132,7 +1157,7 @@
 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6)
+	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
 	if (HAS_L3_DPF(dev))
@@ -1155,10 +1180,11 @@
 	intel_fini_pipe_control(ring);
 }
 
-static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
@@ -1168,7 +1194,7 @@
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -1178,8 +1204,7 @@
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+		seqno = i915_gem_request_get_seqno(signaller_req);
 		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
 		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
 					   PIPE_CONTROL_QW_WRITE |
@@ -1196,10 +1221,11 @@
 	return 0;
 }
 
-static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *waiter;
@@ -1209,7 +1235,7 @@
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -1219,8 +1245,7 @@
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+		seqno = i915_gem_request_get_seqno(signaller_req);
 		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
 					   MI_FLUSH_DW_OP_STOREDW);
 		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
@@ -1235,9 +1260,10 @@
 	return 0;
 }
 
-static int gen6_signal(struct intel_engine_cs *signaller,
+static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
+	struct intel_engine_cs *signaller = signaller_req->ring;
 	struct drm_device *dev = signaller->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *useless;
@@ -1248,15 +1274,14 @@
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
-	ret = intel_ring_begin(signaller, num_dwords);
+	ret = intel_ring_begin(signaller_req, num_dwords);
 	if (ret)
 		return ret;
 
 	for_each_ring(useless, dev_priv, i) {
 		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
 		if (mbox_reg != GEN6_NOSYNC) {
-			u32 seqno = i915_gem_request_get_seqno(
-					   signaller->outstanding_lazy_request);
+			u32 seqno = i915_gem_request_get_seqno(signaller_req);
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
 			intel_ring_emit(signaller, mbox_reg);
 			intel_ring_emit(signaller, seqno);
@@ -1272,30 +1297,29 @@
 
 /**
  * gen6_add_request - Update the semaphore mailbox registers
- * 
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (ring->semaphore.signal)
-		ret = ring->semaphore.signal(ring, 4);
+		ret = ring->semaphore.signal(req, 4);
 	else
-		ret = intel_ring_begin(ring, 4);
+		ret = intel_ring_begin(req, 4);
 
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	__intel_ring_advance(ring);
 
@@ -1318,14 +1342,15 @@
  */
 
 static int
-gen8_ring_sync(struct intel_engine_cs *waiter,
+gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
+	struct intel_engine_cs *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
 	int ret;
 
-	ret = intel_ring_begin(waiter, 4);
+	ret = intel_ring_begin(waiter_req, 4);
 	if (ret)
 		return ret;
 
@@ -1343,10 +1368,11 @@
 }
 
 static int
-gen6_ring_sync(struct intel_engine_cs *waiter,
+gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
+	struct intel_engine_cs *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1361,7 +1387,7 @@
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(waiter, 4);
+	ret = intel_ring_begin(waiter_req, 4);
 	if (ret)
 		return ret;
 
@@ -1392,8 +1418,9 @@
 } while (0)
 
 static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
@@ -1405,7 +1432,7 @@
 	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
 	 * memory before requesting an interrupt.
 	 */
-	ret = intel_ring_begin(ring, 32);
+	ret = intel_ring_begin(req, 32);
 	if (ret)
 		return ret;
 
@@ -1413,8 +1440,7 @@
 			PIPE_CONTROL_WRITE_FLUSH |
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1433,8 +1459,7 @@
 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, 0);
 	__intel_ring_advance(ring);
 
@@ -1585,13 +1610,14 @@
 }
 
 static int
-bsd_ring_flush(struct intel_engine_cs *ring,
+bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -1602,18 +1628,18 @@
 }
 
 static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring,
-		    i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	__intel_ring_advance(ring);
 
@@ -1745,13 +1771,14 @@
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_engine_cs *ring,
+i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -1771,14 +1798,15 @@
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct intel_engine_cs *ring,
+i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 cs_offset = ring->scratch.gtt_offset;
 	int ret;
 
-	ret = intel_ring_begin(ring, 6);
+	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
@@ -1795,7 +1823,7 @@
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(ring, 6 + 2);
+		ret = intel_ring_begin(req, 6 + 2);
 		if (ret)
 			return ret;
 
@@ -1818,7 +1846,7 @@
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -1833,13 +1861,14 @@
 }
 
 static int
-i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -2082,7 +2111,6 @@
 
 	intel_unpin_ringbuffer_obj(ringbuf);
 	intel_destroy_ringbuffer_obj(ringbuf);
-	i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -2106,6 +2134,9 @@
 	if (intel_ring_space(ringbuf) >= n)
 		return 0;
 
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
 	list_for_each_entry(request, &ring->request_list, list) {
 		space = __intel_ring_space(request->postfix, ringbuf->tail,
 					   ringbuf->size);
@@ -2124,18 +2155,11 @@
 	return 0;
 }
 
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
-	struct intel_ringbuffer *ringbuf = ring->buffer;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	if (ringbuf->space < rem) {
-		int ret = ring_wait_for_space(ring, rem);
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -2143,21 +2167,11 @@
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
 int intel_ring_idle(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_request *req;
-	int ret;
-
-	/* We need to add any requests required to flush the objects and ring */
-	if (ring->outstanding_lazy_request) {
-		ret = i915_add_request(ring);
-		if (ret)
-			return ret;
-	}
 
 	/* Wait upon the last request to be completed */
 	if (list_empty(&ring->request_list))
@@ -2180,33 +2194,126 @@
 	return 0;
 }
 
-static int __intel_ring_prepare(struct intel_engine_cs *ring,
-				int bytes)
+int intel_ring_reserve_space(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = ring->buffer;
-	int ret;
+	/*
+	 * The first call merely notes the reserve request and is common for
+	 * all back ends. The subsequent localised _begin() call actually
+	 * ensures that the reservation is available. Without the begin, if
+	 * the request creator immediately submitted the request without
+	 * adding any commands to it then there might not actually be
+	 * sufficient room for the submission commands.
+	 */
+	intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
 
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = intel_wrap_ring_buffer(ring);
-		if (unlikely(ret))
-			return ret;
+	return intel_ring_begin(request, 0);
+}
+
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
+{
+	WARN_ON(ringbuf->reserved_size);
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size = size;
+}
+
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(ringbuf->reserved_in_use);
+
+	ringbuf->reserved_in_use = true;
+	ringbuf->reserved_tail   = ringbuf->tail;
+}
+
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+{
+	WARN_ON(!ringbuf->reserved_in_use);
+	if (ringbuf->tail > ringbuf->reserved_tail) {
+		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+		     "request reserved size too small: %d vs %d!\n",
+		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+	} else {
+		/*
+		 * The ring was wrapped while the reserved space was in use.
+		 * That means that some unknown amount of the ring tail was
+		 * no-op filled and skipped. Thus simply adding the ring size
+		 * to the tail and doing the above space check will not work.
+		 * Rather than attempt to track how much tail was skipped,
+		 * it is much simpler to say that also skipping the sanity
+		 * check every once in a while is not a big issue.
+		 */
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = ring_wait_for_space(ring, bytes);
+	ringbuf->reserved_size   = 0;
+	ringbuf->reserved_in_use = false;
+}
+
+static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+{
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	int remain_actual = ringbuf->size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
+
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
+
+	if (unlikely(bytes > remain_usable)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain_actual + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain_usable)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain_actual + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
+		}
+	}
+
+	if (wait_bytes) {
+		ret = ring_wait_for_space(ring, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
 }
 
-int intel_ring_begin(struct intel_engine_cs *ring,
+int intel_ring_begin(struct drm_i915_gem_request *req,
 		     int num_dwords)
 {
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
 	int ret;
 
+	WARN_ON(req == NULL);
+	ring = req->ring;
+	dev_priv = ring->dev->dev_private;
+
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
 	if (ret)
@@ -2216,18 +2323,14 @@
 	if (ret)
 		return ret;
 
-	/* Preallocate the olr before touching the ring */
-	ret = i915_gem_request_alloc(ring, ring->default_context);
-	if (ret)
-		return ret;
-
 	ring->buffer->space -= num_dwords * sizeof(uint32_t);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct intel_engine_cs *ring)
+int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
@@ -2235,7 +2338,7 @@
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(ring, num_dwords);
+	ret = intel_ring_begin(req, num_dwords);
 	if (ret)
 		return ret;
 
@@ -2252,8 +2355,6 @@
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	BUG_ON(ring->outstanding_lazy_request);
-
 	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
 		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
 		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
@@ -2298,13 +2399,14 @@
 		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 }
 
-static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -2342,20 +2444,23 @@
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	bool ppgtt = USES_PPGTT(ring->dev) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
 	intel_ring_emit(ring, lower_32_bits(offset));
 	intel_ring_emit(ring, upper_32_bits(offset));
 	intel_ring_emit(ring, MI_NOOP);
@@ -2365,20 +2470,23 @@
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
+			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
 	intel_ring_emit(ring, offset);
 	intel_ring_advance(ring);
@@ -2387,13 +2495,14 @@
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
+gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
-	ret = intel_ring_begin(ring, 2);
+	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
@@ -2410,14 +2519,15 @@
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct intel_engine_cs *ring,
+static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
+	struct intel_engine_cs *ring = req->ring;
 	struct drm_device *dev = ring->dev;
 	uint32_t cmd;
 	int ret;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -2818,26 +2928,28 @@
 }
 
 int
-intel_ring_flush_all_caches(struct intel_engine_cs *ring)
+intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->gpu_caches_dirty)
 		return 0;
 
-	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
 	ring->gpu_caches_dirty = false;
 	return 0;
 }
 
 int
-intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
+intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	uint32_t flush_domains;
 	int ret;
 
@@ -2845,11 +2957,11 @@
 	if (ring->gpu_caches_dirty)
 		flush_domains = I915_GEM_GPU_DOMAINS;
 
-	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 	if (ret)
 		return ret;
 
-	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
 	ring->gpu_caches_dirty = false;
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4be66f6..2e85fda 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -12,6 +12,7 @@
  * workarounds!
  */
 #define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
 
 /*
  * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
@@ -105,6 +106,9 @@
 	int space;
 	int size;
 	int effective_size;
+	int reserved_size;
+	int reserved_tail;
+	bool reserved_in_use;
 
 	/** We track the position of the requests in the ring buffer, and
 	 * when each is retired we increment last_retired_head as the GPU
@@ -120,6 +124,25 @@
 struct	intel_context;
 struct drm_i915_reg_descriptor;
 
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ *  offset: specifies batch starting position, also helpful in case
+ *    if we want to have multiple batches at different offsets based on
+ *    some criteria. It is not a requirement at the moment but provides
+ *    an option for future use.
+ *  size: size of the batch in DWORDS
+ */
+struct  i915_ctx_workarounds {
+	struct i915_wa_ctx_bb {
+		u32 offset;
+		u32 size;
+	} indirect_ctx, per_ctx;
+	struct drm_i915_gem_object *obj;
+};
+
 struct  intel_engine_cs {
 	const char	*name;
 	enum intel_ring_id {
@@ -143,6 +166,7 @@
 	struct i915_gem_batch_pool batch_pool;
 
 	struct intel_hw_status_page status_page;
+	struct i915_ctx_workarounds wa_ctx;
 
 	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
@@ -152,15 +176,14 @@
 
 	int		(*init_hw)(struct intel_engine_cs *ring);
 
-	int		(*init_context)(struct intel_engine_cs *ring,
-					struct intel_context *ctx);
+	int		(*init_context)(struct drm_i915_gem_request *req);
 
 	void		(*write_tail)(struct intel_engine_cs *ring,
 				      u32 value);
-	int __must_check (*flush)(struct intel_engine_cs *ring,
+	int __must_check (*flush)(struct drm_i915_gem_request *req,
 				  u32	invalidate_domains,
 				  u32	flush_domains);
-	int		(*add_request)(struct intel_engine_cs *ring);
+	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -171,11 +194,12 @@
 				     bool lazy_coherency);
 	void		(*set_seqno)(struct intel_engine_cs *ring,
 				     u32 seqno);
-	int		(*dispatch_execbuffer)(struct intel_engine_cs *ring,
+	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
 					       u64 offset, u32 length,
 					       unsigned dispatch_flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 	void		(*cleanup)(struct intel_engine_cs *ring);
 
 	/* GEN8 signal/wait table - never trust comments!
@@ -229,10 +253,10 @@
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct intel_engine_cs *ring,
-				   struct intel_engine_cs *to,
+		int	(*sync_to)(struct drm_i915_gem_request *to_req,
+				   struct intel_engine_cs *from,
 				   u32 seqno);
-		int	(*signal)(struct intel_engine_cs *signaller,
+		int	(*signal)(struct drm_i915_gem_request *signaller_req,
 				  /* num_dwords needed by caller */
 				  unsigned int num_dwords);
 	} semaphore;
@@ -243,14 +267,11 @@
 	struct list_head execlist_retired_req_list;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-	int		(*emit_request)(struct intel_ringbuffer *ringbuf,
-					struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
+	int		(*emit_request)(struct drm_i915_gem_request *request);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
 				      u32 invalidate_domains,
 				      u32 flush_domains);
-	int		(*emit_bb_start)(struct intel_ringbuffer *ringbuf,
-					 struct intel_context *ctx,
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
 					 u64 offset, unsigned dispatch_flags);
 
 	/**
@@ -272,10 +293,6 @@
 	struct list_head request_list;
 
 	/**
-	 * Do we have some not yet emitted requests outstanding?
-	 */
-	struct drm_i915_gem_request *outstanding_lazy_request;
-	/**
 	 * Seqno of request most recently submitted to request_list.
 	 * Used exclusively by hang checker to avoid grabbing lock while
 	 * inspecting request list.
@@ -408,8 +425,8 @@
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
-int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
-int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
+int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
+int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 static inline void intel_ring_emit(struct intel_engine_cs *ring,
 				   u32 data)
 {
@@ -426,12 +443,11 @@
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
 int intel_ring_space(struct intel_ringbuffer *ringbuf);
 bool intel_ring_stopped(struct intel_engine_cs *ring);
-void __intel_ring_advance(struct intel_engine_cs *ring);
 
 int __must_check intel_ring_idle(struct intel_engine_cs *ring);
 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
-int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
-int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
+int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 void intel_fini_pipe_control(struct intel_engine_cs *ring);
 int intel_init_pipe_control(struct intel_engine_cs *ring);
@@ -451,11 +467,29 @@
 	return ringbuf->tail;
 }
 
-static inline struct drm_i915_gem_request *
-intel_ring_get_request(struct intel_engine_cs *ring)
-{
-	BUG_ON(ring->outstanding_lazy_request == NULL);
-	return ring->outstanding_lazy_request;
-}
+/*
+ * Arbitrary size for largest possible 'add request' sequence. The code paths
+ * are complex and variable. Empirical measurement shows that the worst case
+ * is ILK at 136 words. Reserving too much is better than reserving too little
+ * as that allows for corner cases that might have been missed. So the figure
+ * has been rounded up to 160 words.
+ */
+#define MIN_SPACE_FOR_ADD_REQUEST	160
+
+/*
+ * Reserve space in the ring to guarantee that the i915_add_request() call
+ * will always have sufficient room to do its stuff. The request creation
+ * code calls this automatically.
+ */
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
+/* Cancel the reservation, e.g. because the request is being discarded. */
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+/* Use the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+/* Finish with the reserved space - for use by i915_add_request() only. */
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 1a45385..af7fdb3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,22 @@
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
 				    int power_well_id);
 
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+				    struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
+	power_well->ops->enable(dev_priv, power_well);
+	power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+				     struct i915_power_well *power_well)
+{
+	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+	power_well->hw_enabled = false;
+	power_well->ops->disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -281,6 +297,7 @@
 	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
 	BIT(POWER_DOMAIN_AUX_B) |                       \
 	BIT(POWER_DOMAIN_AUX_C) |			\
 	BIT(POWER_DOMAIN_AUX_D) |			\
@@ -300,6 +317,7 @@
 #define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
 	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
+	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
 	BIT(POWER_DOMAIN_INIT))
 #define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
 	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
@@ -835,12 +853,8 @@
 	return enabled;
 }
 
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-					  struct i915_power_well *power_well)
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
-	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
-	vlv_set_power_well(dev_priv, power_well, true);
 
 	spin_lock_irq(&dev_priv->irq_lock);
 	valleyview_enable_display_irqs(dev_priv);
@@ -858,18 +872,33 @@
 	i915_redisable_vga_power_on(dev_priv->dev);
 }
 
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+	spin_lock_irq(&dev_priv->irq_lock);
+	valleyview_disable_display_irqs(dev_priv);
+	spin_unlock_irq(&dev_priv->irq_lock);
+
+	vlv_power_sequencer_reset(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+					  struct i915_power_well *power_well)
+{
+	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
+
+	vlv_set_power_well(dev_priv, power_well, true);
+
+	vlv_display_power_well_init(dev_priv);
+}
+
 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
 					   struct i915_power_well *power_well)
 {
 	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
-	spin_lock_irq(&dev_priv->irq_lock);
-	valleyview_disable_display_irqs(dev_priv);
-	spin_unlock_irq(&dev_priv->irq_lock);
+	vlv_display_power_well_deinit(dev_priv);
 
 	vlv_set_power_well(dev_priv, power_well, false);
-
-	vlv_power_sequencer_reset(dev_priv);
 }
 
 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -882,8 +911,8 @@
 	 * display and the reference clock for VGA
 	 * hotplug / manual detection.
 	 */
-	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+		   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
 	vlv_set_power_well(dev_priv, power_well, true);
@@ -933,14 +962,14 @@
 	 */
 	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
 		phy = DPIO_PHY0;
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_REFA_CLK_ENABLE_VLV);
-		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV);
+		I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	} else {
 		phy = DPIO_PHY1;
-		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
-			   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+		I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
+			   DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
 	}
 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 	vlv_set_power_well(dev_priv, power_well, true);
@@ -1042,53 +1071,29 @@
 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
+	WARN_ON_ONCE(power_well->data != PIPE_A);
+
 	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
 }
 
 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
 				       struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A &&
-		     power_well->data != PIPE_B &&
-		     power_well->data != PIPE_C);
+	WARN_ON_ONCE(power_well->data != PIPE_A);
 
 	chv_set_pipe_power_well(dev_priv, power_well, true);
 
-	if (power_well->data == PIPE_A) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		valleyview_enable_display_irqs(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
-
-		/*
-		 * During driver initialization/resume we can avoid restoring the
-		 * part of the HW/SW state that will be inited anyway explicitly.
-		 */
-		if (dev_priv->power_domains.initializing)
-			return;
-
-		intel_hpd_init(dev_priv);
-
-		i915_redisable_vga_power_on(dev_priv->dev);
-	}
+	vlv_display_power_well_init(dev_priv);
 }
 
 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
 					struct i915_power_well *power_well)
 {
-	WARN_ON_ONCE(power_well->data != PIPE_A &&
-		     power_well->data != PIPE_B &&
-		     power_well->data != PIPE_C);
+	WARN_ON_ONCE(power_well->data != PIPE_A);
 
-	if (power_well->data == PIPE_A) {
-		spin_lock_irq(&dev_priv->irq_lock);
-		valleyview_disable_display_irqs(dev_priv);
-		spin_unlock_irq(&dev_priv->irq_lock);
-	}
+	vlv_display_power_well_deinit(dev_priv);
 
 	chv_set_pipe_power_well(dev_priv, power_well, false);
-
-	if (power_well->data == PIPE_A)
-		vlv_power_sequencer_reset(dev_priv);
 }
 
 /**
@@ -1117,11 +1122,8 @@
 	mutex_lock(&power_domains->lock);
 
 	for_each_power_well(i, power_well, BIT(domain), power_domains) {
-		if (!power_well->count++) {
-			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-			power_well->ops->enable(dev_priv, power_well);
-			power_well->hw_enabled = true;
-		}
+		if (!power_well->count++)
+			intel_power_well_enable(dev_priv, power_well);
 	}
 
 	power_domains->domain_use_count[domain]++;
@@ -1155,11 +1157,8 @@
 	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
 		WARN_ON(!power_well->count);
 
-		if (!--power_well->count && i915.disable_power_well) {
-			DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-			power_well->hw_enabled = false;
-			power_well->ops->disable(dev_priv, power_well);
-		}
+		if (!--power_well->count && i915.disable_power_well)
+			intel_power_well_disable(dev_priv, power_well);
 	}
 
 	mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index aa2fd75..c98098e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1508,51 +1508,6 @@
 	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
 }
 
-/* Special dpms function to support cloning between dvo/sdvo/crt. */
-static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
-{
-	struct drm_crtc *crtc;
-	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-
-	/* dvo supports only 2 dpms states. */
-	if (mode != DRM_MODE_DPMS_ON)
-		mode = DRM_MODE_DPMS_OFF;
-
-	if (mode == connector->dpms)
-		return;
-
-	connector->dpms = mode;
-
-	/* Only need to change hw state when actually enabled */
-	crtc = intel_sdvo->base.base.crtc;
-	if (!crtc) {
-		intel_sdvo->base.connectors_active = false;
-		return;
-	}
-
-	/* We set active outputs manually below in case pipe dpms doesn't change
-	 * due to cloning. */
-	if (mode != DRM_MODE_DPMS_ON) {
-		intel_sdvo_set_active_outputs(intel_sdvo, 0);
-		if (0)
-			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-
-		intel_sdvo->base.connectors_active = false;
-
-		intel_crtc_update_dpms(crtc);
-	} else {
-		intel_sdvo->base.connectors_active = true;
-
-		intel_crtc_update_dpms(crtc);
-
-		if (0)
-			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
-		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
-	}
-
-	intel_modeset_check_state(connector->dev);
-}
-
 static enum drm_mode_status
 intel_sdvo_mode_valid(struct drm_connector *connector,
 		      struct drm_display_mode *mode)
@@ -2190,7 +2145,7 @@
 }
 
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
-	.dpms = intel_sdvo_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_sdvo_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.set_property = intel_sdvo_set_property,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8193a35..9d8af2f 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -75,10 +75,8 @@
  * until a subsequent call to intel_pipe_update_end(). That is done to
  * avoid random delays. The value written to @start_vbl_count should be
  * supplied to intel_pipe_update_end() for error checking.
- *
- * Return: true if the call was successful
  */
-bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 {
 	struct drm_device *dev = crtc->base.dev;
 	const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -96,13 +94,14 @@
 	min = vblank_start - usecs_to_scanlines(mode, 100);
 	max = vblank_start - 1;
 
+	local_irq_disable();
+	*start_vbl_count = 0;
+
 	if (min <= 0 || max <= 0)
-		return false;
+		return;
 
 	if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
-		return false;
-
-	local_irq_disable();
+		return;
 
 	trace_i915_pipe_update_start(crtc, min, max);
 
@@ -138,8 +137,6 @@
 	*start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
 
 	trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
-
-	return true;
 }
 
 /**
@@ -161,7 +158,7 @@
 
 	local_irq_enable();
 
-	if (start_vbl_count != end_vbl_count)
+	if (start_vbl_count && start_vbl_count != end_vbl_count)
 		DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
 			  pipe_name(pipe), start_vbl_count, end_vbl_count);
 }
@@ -182,7 +179,8 @@
 	const int plane = intel_plane->plane + 1;
 	u32 plane_ctl, stride_div, stride;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(drm_plane->state)->ckey;
 	unsigned long surf_addr;
 	u32 tile_height, plane_offset, plane_size;
 	unsigned int rotation;
@@ -272,7 +270,7 @@
 }
 
 static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -344,7 +342,8 @@
 	u32 sprctl;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(dplane->state)->ckey;
 
 	sprctl = SP_ENABLE;
 
@@ -400,10 +399,6 @@
 	if (obj->tiling_mode != I915_TILING_NONE)
 		sprctl |= SP_TILED;
 
-	intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
-				       pixel_size, true,
-				       src_w != crtc_w || src_h != crtc_h);
-
 	/* Sizes are 0 based */
 	src_w--;
 	src_h--;
@@ -411,7 +406,8 @@
 	crtc_h--;
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
-	sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+	sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
+							&x, &y,
 							obj->tiling_mode,
 							pixel_size,
 							fb->pitches[0]);
@@ -455,7 +451,7 @@
 }
 
 static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = dplane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -467,8 +463,6 @@
 
 	I915_WRITE(SPSURF(pipe, plane), 0);
 	POSTING_READ(SPSURF(pipe, plane));
-
-	intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
 }
 
 static void
@@ -487,7 +481,8 @@
 	u32 sprctl, sprscale = 0;
 	unsigned long sprsurf_offset, linear_offset;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
 
 	sprctl = SPRITE_ENABLE;
 
@@ -546,7 +541,8 @@
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	sprsurf_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size, fb->pitches[0]);
 	linear_offset -= sprsurf_offset;
 
@@ -595,7 +591,7 @@
 }
 
 static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -627,7 +623,8 @@
 	unsigned long dvssurf_offset, linear_offset;
 	u32 dvscntr, dvsscale;
 	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-	const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
+	const struct drm_intel_sprite_colorkey *key =
+		&to_intel_plane_state(plane->state)->ckey;
 
 	dvscntr = DVS_ENABLE;
 
@@ -682,7 +679,8 @@
 
 	linear_offset = y * fb->pitches[0] + x * pixel_size;
 	dvssurf_offset =
-		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+		intel_gen4_compute_page_offset(dev_priv,
+					       &x, &y, obj->tiling_mode,
 					       pixel_size, fb->pitches[0]);
 	linear_offset -= dvssurf_offset;
 
@@ -722,7 +720,7 @@
 }
 
 static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
 	struct drm_device *dev = plane->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -739,11 +737,12 @@
 
 static int
 intel_check_sprite_plane(struct drm_plane *plane,
+			 struct intel_crtc_state *crtc_state,
 			 struct intel_plane_state *state)
 {
 	struct drm_device *dev = plane->dev;
-	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
-	struct intel_crtc_state *crtc_state;
+	struct drm_crtc *crtc = state->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = state->base.fb;
 	int crtc_x, crtc_y;
@@ -756,15 +755,10 @@
 	int max_scale, min_scale;
 	bool can_scale;
 	int pixel_size;
-	int ret;
-
-	intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
-	crtc_state = state->base.state ?
-		intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
 
 	if (!fb) {
 		state->visible = false;
-		goto finish;
+		return 0;
 	}
 
 	/* Don't modify another pipe's plane */
@@ -782,7 +776,7 @@
 	/* setup can_scale, min_scale, max_scale */
 	if (INTEL_INFO(dev)->gen >= 9) {
 		/* use scaler when colorkey is not required */
-		if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
 			can_scale = 1;
 			min_scale = 1;
 			max_scale = skl_max_scale(intel_crtc, crtc_state);
@@ -802,7 +796,6 @@
 	 * coordinates and sizes. We probably need some way to decide whether
 	 * more strict checking should be done instead.
 	 */
-
 	drm_rect_rotate(src, fb->width << 16, fb->height << 16,
 			state->base.rotation);
 
@@ -812,7 +805,7 @@
 	vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
 	BUG_ON(vscale < 0);
 
-	state->visible =  drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
+	state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
 	crtc_x = dst->x1;
 	crtc_y = dst->y1;
@@ -917,36 +910,6 @@
 	dst->y1 = crtc_y;
 	dst->y2 = crtc_y + crtc_h;
 
-finish:
-	/*
-	 * If the sprite is completely covering the primary plane,
-	 * we can disable the primary and save power.
-	 */
-	if (intel_crtc->active) {
-		intel_crtc->atomic.fb_bits |=
-			INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
-
-		if (intel_wm_need_update(plane, &state->base))
-			intel_crtc->atomic.update_wm = true;
-
-		if (!state->visible) {
-			/*
-			 * Avoid underruns when disabling the sprite.
-			 * FIXME remove once watermark updates are done properly.
-			 */
-			intel_crtc->atomic.wait_vblank = true;
-			intel_crtc->atomic.update_sprite_watermarks |=
-				(1 << drm_plane_index(plane));
-		}
-	}
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
-			state, 0);
-		if (ret)
-			return ret;
-	}
-
 	return 0;
 }
 
@@ -955,34 +918,27 @@
 			  struct intel_plane_state *state)
 {
 	struct drm_crtc *crtc = state->base.crtc;
-	struct intel_crtc *intel_crtc;
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	struct drm_framebuffer *fb = state->base.fb;
-	int crtc_x, crtc_y;
-	unsigned int crtc_w, crtc_h;
-	uint32_t src_x, src_y, src_w, src_h;
 
 	crtc = crtc ? crtc : plane->crtc;
-	intel_crtc = to_intel_crtc(crtc);
 
 	plane->fb = fb;
 
-	if (intel_crtc->active) {
-		if (state->visible) {
-			crtc_x = state->dst.x1;
-			crtc_y = state->dst.y1;
-			crtc_w = drm_rect_width(&state->dst);
-			crtc_h = drm_rect_height(&state->dst);
-			src_x = state->src.x1 >> 16;
-			src_y = state->src.y1 >> 16;
-			src_w = drm_rect_width(&state->src) >> 16;
-			src_h = drm_rect_height(&state->src) >> 16;
-			intel_plane->update_plane(plane, crtc, fb,
-						  crtc_x, crtc_y, crtc_w, crtc_h,
-						  src_x, src_y, src_w, src_h);
-		} else {
-			intel_plane->disable_plane(plane, crtc, false);
-		}
+	if (!crtc->state->active)
+		return;
+
+	if (state->visible) {
+		intel_plane->update_plane(plane, crtc, fb,
+					  state->dst.x1, state->dst.y1,
+					  drm_rect_width(&state->dst),
+					  drm_rect_height(&state->dst),
+					  state->src.x1 >> 16,
+					  state->src.y1 >> 16,
+					  drm_rect_width(&state->src) >> 16,
+					  drm_rect_height(&state->src) >> 16);
+	} else {
+		intel_plane->disable_plane(plane, crtc);
 	}
 }
 
@@ -991,7 +947,9 @@
 {
 	struct drm_intel_sprite_colorkey *set = data;
 	struct drm_plane *plane;
-	struct intel_plane *intel_plane;
+	struct drm_plane_state *plane_state;
+	struct drm_atomic_state *state;
+	struct drm_modeset_acquire_ctx ctx;
 	int ret = 0;
 
 	/* Make sure we don't try to enable both src & dest simultaneously */
@@ -1002,52 +960,43 @@
 	    set->flags & I915_SET_COLORKEY_DESTINATION)
 		return -EINVAL;
 
-	drm_modeset_lock_all(dev);
-
 	plane = drm_plane_find(dev, set->plane_id);
-	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
-		ret = -ENOENT;
-		goto out_unlock;
+	if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
+		return -ENOENT;
+
+	drm_modeset_acquire_init(&ctx, 0);
+
+	state = drm_atomic_state_alloc(plane->dev);
+	if (!state) {
+		ret = -ENOMEM;
+		goto out;
 	}
+	state->acquire_ctx = &ctx;
 
-	intel_plane = to_intel_plane(plane);
-
-	if (INTEL_INFO(dev)->gen >= 9) {
-		/* plane scaling and colorkey are mutually exclusive */
-		if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
-			DRM_ERROR("colorkey not allowed with scaler\n");
-			ret = -EINVAL;
-			goto out_unlock;
+	while (1) {
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		ret = PTR_ERR_OR_ZERO(plane_state);
+		if (!ret) {
+			to_intel_plane_state(plane_state)->ckey = *set;
+			ret = drm_atomic_commit(state);
 		}
+
+		if (ret != -EDEADLK)
+			break;
+
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(&ctx);
 	}
 
-	intel_plane->ckey = *set;
+	if (ret)
+		drm_atomic_state_free(state);
 
-	/*
-	 * The only way this could fail would be due to
-	 * the current plane state being unsupportable already,
-	 * and we dont't consider that an error for the
-	 * colorkey ioctl. So just ignore any error.
-	 */
-	intel_plane_restore(plane);
-
-out_unlock:
-	drm_modeset_unlock_all(dev);
+out:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
 	return ret;
 }
 
-int intel_plane_restore(struct drm_plane *plane)
-{
-	if (!plane->crtc || !plane->state->fb)
-		return 0;
-
-	return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
-				       plane->state->crtc_x, plane->state->crtc_y,
-				       plane->state->crtc_w, plane->state->crtc_h,
-				       plane->state->src_x, plane->state->src_y,
-				       plane->state->src_w, plane->state->src_h);
-}
-
 static const uint32_t ilk_plane_formats[] = {
 	DRM_FORMAT_XRGB8888,
 	DRM_FORMAT_YUYV,
@@ -1172,9 +1121,9 @@
 
 	intel_plane->pipe = pipe;
 	intel_plane->plane = plane;
+	intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
 	intel_plane->check_plane = intel_check_sprite_plane;
 	intel_plane->commit_plane = intel_commit_sprite_plane;
-	intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
 	possible_crtcs = (1 << pipe);
 	ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
 				       &intel_plane_funcs,
@@ -1189,6 +1138,6 @@
 
 	drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
 
- out:
+out:
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 8b9d325..0568ae6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1509,7 +1509,7 @@
 }
 
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
-	.dpms = intel_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.detect = intel_tv_detect,
 	.destroy = intel_tv_destroy,
 	.set_property = intel_tv_set_property,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 260389a..9d3c2e4 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1467,20 +1467,80 @@
 	return ret;
 }
 
+static int wait_for_register(struct drm_i915_private *dev_priv,
+			     const u32 reg,
+			     const u32 mask,
+			     const u32 value,
+			     const unsigned long timeout_ms)
+{
+	return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+}
+
+static int gen8_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_engine_cs *engine;
+	int i;
+
+	for_each_ring(engine, dev_priv, i) {
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+		if (wait_for_register(dev_priv,
+				      RING_RESET_CTL(engine->mmio_base),
+				      RESET_CTL_READY_TO_RESET,
+				      RESET_CTL_READY_TO_RESET,
+				      700)) {
+			DRM_ERROR("%s: reset request timeout\n", engine->name);
+			goto not_ready;
+		}
+	}
+
+	return gen6_do_reset(dev);
+
+not_ready:
+	for_each_ring(engine, dev_priv, i)
+		I915_WRITE(RING_RESET_CTL(engine->mmio_base),
+			   _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+
+	return -EIO;
+}
+
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+{
+	if (!i915.reset)
+		return NULL;
+
+	if (INTEL_INFO(dev)->gen >= 8)
+		return gen8_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		return gen6_do_reset;
+	else if (IS_GEN5(dev))
+		return ironlake_do_reset;
+	else if (IS_G4X(dev))
+		return g4x_do_reset;
+	else if (IS_G33(dev))
+		return g33_do_reset;
+	else if (INTEL_INFO(dev)->gen >= 3)
+		return i915_do_reset;
+	else
+		return NULL;
+}
+
 int intel_gpu_reset(struct drm_device *dev)
 {
-	if (INTEL_INFO(dev)->gen >= 6)
-		return gen6_do_reset(dev);
-	else if (IS_GEN5(dev))
-		return ironlake_do_reset(dev);
-	else if (IS_G4X(dev))
-		return g4x_do_reset(dev);
-	else if (IS_G33(dev))
-		return g33_do_reset(dev);
-	else if (INTEL_INFO(dev)->gen >= 3)
-		return i915_do_reset(dev);
-	else
+	int (*reset)(struct drm_device *);
+
+	reset = intel_get_gpu_reset(dev);
+	if (reset == NULL)
 		return -ENODEV;
+
+	return reset(dev);
+}
+
+bool intel_has_gpu_reset(struct drm_device *dev)
+{
+	return intel_get_gpu_reset(dev) != NULL;
 }
 
 void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a3ecf10..644edf6 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -75,6 +75,11 @@
 	},
 };
 
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
 static const struct dw_hdmi_phy_config imx_phy_config[] = {
 	/*pixelclk   symbol   term   vlev */
 	{ 148500000, 0x800d, 0x0005, 0x01ad},
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 9f9780b..4f2068f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,18 +70,22 @@
 	BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
 	BUG_ON(pixels_current == pixels_prev);
 
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
 	ret = mgag200_bo_reserve(pixels_1, true);
 	if (ret) {
 		WREG8(MGA_CURPOSXL, 0);
 		WREG8(MGA_CURPOSXH, 0);
-		return ret;
+		goto out_unref;
 	}
 	ret = mgag200_bo_reserve(pixels_2, true);
 	if (ret) {
 		WREG8(MGA_CURPOSXL, 0);
 		WREG8(MGA_CURPOSXH, 0);
 		mgag200_bo_unreserve(pixels_1);
-		return ret;
+		goto out_unreserve1;
 	}
 
 	if (!handle) {
@@ -106,16 +110,6 @@
 		}
 	}
 
-	mutex_lock(&dev->struct_mutex);
-	obj = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!obj) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = -ENOENT;
-		goto out1;
-	}
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
-
 	bo = gem_to_mga_bo(obj);
 	ret = mgag200_bo_reserve(bo, true);
 	if (ret) {
@@ -252,7 +246,11 @@
 	if (ret)
 		mga_hide_cursor(mdev);
 	mgag200_bo_unreserve(pixels_1);
+out_unreserve1:
 	mgag200_bo_unreserve(pixels_2);
+out_unref:
+	drm_gem_object_unreference_unlocked(obj);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 9774599..b0af774 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -35,6 +35,7 @@
 	{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
 	{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
 	{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+	{ PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
 	{0,}
 };
 
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index e9eea1d..912151c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -180,6 +180,7 @@
 	G200_EV,
 	G200_EH,
 	G200_ER,
+	G200_EW3,
 };
 
 #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index c36b830..87de15e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -101,7 +101,7 @@
 			 const struct fb_fillrect *rect)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -110,7 +110,7 @@
 			 const struct fb_copyarea *area)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -119,7 +119,7 @@
 			  const struct fb_image *image)
 {
 	struct mga_fbdev *mfbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -166,8 +166,6 @@
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_gem_object *gobj = NULL;
-	struct device *device = &dev->pdev->dev;
-	struct mgag200_bo *bo;
 	int ret;
 	void *sysram;
 	int size;
@@ -185,15 +183,14 @@
 		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
 		return ret;
 	}
-	bo = gem_to_mga_bo(gobj);
 
 	sysram = vmalloc(size);
 	if (!sysram)
 		return -ENOMEM;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL)
-		return -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info))
+		return PTR_ERR(info);
 
 	info->par = mfbdev;
 
@@ -208,14 +205,6 @@
 
 	/* setup helper */
 	mfbdev->helper.fb = fb;
-	mfbdev->helper.fbdev = info;
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
-		ret = -ENOMEM;
-		goto out;
-	}
 
 	strcpy(info->fix.id, "mgadrmfb");
 
@@ -223,11 +212,6 @@
 	info->fbops = &mgag200fb_ops;
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out;
-	}
 	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
 	info->apertures->ranges[0].size = mdev->mc.vram_size;
 
@@ -242,24 +226,15 @@
 	DRM_DEBUG_KMS("allocated %dx%d\n",
 		      fb->width, fb->height);
 	return 0;
-out:
-	return ret;
 }
 
 static int mga_fbdev_destroy(struct drm_device *dev,
 				struct mga_fbdev *mfbdev)
 {
-	struct fb_info *info;
 	struct mga_framebuffer *mfb = &mfbdev->mfb;
 
-	if (mfbdev->helper.fbdev) {
-		info = mfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&mfbdev->helper);
+	drm_fb_helper_release_fbi(&mfbdev->helper);
 
 	if (mfb->obj) {
 		drm_gem_object_unreference_unlocked(mfb->obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index d3dcf54..10535e3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -101,6 +101,7 @@
 	case G200_SE_B:
 	case G200_EV:
 	case G200_WB:
+	case G200_EW3:
 		data = 1;
 		clock = 2;
 		break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index f6b283b..de06388 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -82,12 +82,19 @@
 	int orig;
 	int test1, test2;
 	int orig1, orig2;
+	unsigned int vram_size;
 
 	/* Probe */
 	orig = ioread16(mem);
 	iowrite16(0, mem);
 
-	for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+	vram_size = mdev->mc.vram_window;
+
+	if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
+		vram_size = vram_size - 0x400000;
+	}
+
+	for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
 		orig1 = ioread8(mem + offset);
 		orig2 = ioread8(mem + offset + 0x100);
 
@@ -345,23 +352,15 @@
 		     uint64_t *offset)
 {
 	struct drm_gem_object *obj;
-	int ret;
 	struct mgag200_bo *bo;
 
-	mutex_lock(&dev->struct_mutex);
 	obj = drm_gem_object_lookup(dev, file, handle);
-	if (obj == NULL) {
-		ret = -ENOENT;
-		goto out_unlock;
-	}
+	if (obj == NULL)
+		return -ENOENT;
 
 	bo = gem_to_mga_bo(obj);
 	*offset = mgag200_bo_mmap_offset(bo);
 
-	drm_gem_object_unreference(obj);
-	ret = 0;
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index ad4b901..c99d3fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -104,6 +104,8 @@
 	return true;
 }
 
+#define P_ARRAY_SIZE 9
+
 static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
@@ -111,37 +113,97 @@
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
+	unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
+	unsigned int fvv;
+	unsigned int i;
 
-	m = n = p = 0;
-	vcomax = 320000;
-	vcomin = 160000;
-	pllreffreq = 25000;
+	if (mdev->unique_rev_id <= 0x03) {
 
-	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
+		m = n = p = 0;
+		vcomax = 320000;
+		vcomin = 160000;
+		pllreffreq = 25000;
 
-	for (testp = 8; testp > 0; testp /= 2) {
-		if (clock * testp > vcomax)
-			continue;
-		if (clock * testp < vcomin)
-			continue;
+		delta = 0xffffffff;
+		permitteddelta = clock * 5 / 1000;
 
-		for (testn = 17; testn < 256; testn++) {
-			for (testm = 1; testm < 32; testm++) {
-				computed = (pllreffreq * testn) /
-					(testm * testp);
-				if (computed > clock)
-					tmpdelta = computed - clock;
-				else
-					tmpdelta = clock - computed;
-				if (tmpdelta < delta) {
-					delta = tmpdelta;
-					m = testm - 1;
-					n = testn - 1;
-					p = testp - 1;
+		for (testp = 8; testp > 0; testp /= 2) {
+			if (clock * testp > vcomax)
+				continue;
+			if (clock * testp < vcomin)
+				continue;
+
+			for (testn = 17; testn < 256; testn++) {
+				for (testm = 1; testm < 32; testm++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm - 1;
+						n = testn - 1;
+						p = testp - 1;
+					}
 				}
 			}
 		}
+	} else {
+
+
+		m = n = p = 0;
+		vcomax        = 1600000;
+		vcomin        = 800000;
+		pllreffreq    = 25000;
+
+		if (clock < 25000)
+			clock = 25000;
+
+		clock = clock * 2;
+
+		delta = 0xFFFFFFFF;
+		/* Permited delta is 0.5% as VESA Specification */
+		permitteddelta = clock * 5 / 1000;
+
+		for (i = 0 ; i < P_ARRAY_SIZE ; i++) {
+			testp = pvalues_e4[i];
+
+			if ((clock * testp) > vcomax)
+				continue;
+			if ((clock * testp) < vcomin)
+				continue;
+
+			for (testn = 50; testn <= 256; testn++) {
+				for (testm = 1; testm <= 32; testm++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm - 1;
+						n = testn - 1;
+						p = testp - 1;
+					}
+				}
+			}
+		}
+
+		fvv = pllreffreq * testn / testm;
+		fvv = (fvv - 800000) / 50000;
+
+		if (fvv > 15)
+			fvv = 15;
+
+		p |= (fvv << 4);
+		m |= 0x80;
+
+		clock = clock / 2;
 	}
 
 	if (delta > permitteddelta) {
@@ -158,8 +220,8 @@
 static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
-	unsigned int testp, testm, testn;
+	unsigned int delta, tmpdelta;
+	unsigned int testp, testm, testn, testp2;
 	unsigned int p, m, n;
 	unsigned int computed;
 	int i, j, tmpcount, vcount;
@@ -167,32 +229,71 @@
 	u8 tmp;
 
 	m = n = p = 0;
-	vcomax = 550000;
-	vcomin = 150000;
-	pllreffreq = 48000;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
-	for (testp = 1; testp < 9; testp++) {
-		if (clock * testp > vcomax)
-			continue;
-		if (clock * testp < vcomin)
-			continue;
+	if (mdev->type == G200_EW3) {
 
-		for (testm = 1; testm < 17; testm++) {
-			for (testn = 1; testn < 151; testn++) {
-				computed = (pllreffreq * testn) /
-					(testm * testp);
-				if (computed > clock)
-					tmpdelta = computed - clock;
-				else
-					tmpdelta = clock - computed;
-				if (tmpdelta < delta) {
-					delta = tmpdelta;
-					n = testn - 1;
-					m = (testm - 1) | ((n >> 1) & 0x80);
-					p = testp - 1;
+		vcomax = 800000;
+		vcomin = 400000;
+		pllreffreq = 25000;
+
+		for (testp = 1; testp < 8; testp++) {
+			for (testp2 = 1; testp2 < 8; testp2++) {
+				if (testp < testp2)
+					continue;
+				if ((clock * testp * testp2) > vcomax)
+					continue;
+				if ((clock * testp * testp2) < vcomin)
+					continue;
+				for (testm = 1; testm < 26; testm++) {
+					for (testn = 32; testn < 2048 ; testn++) {
+						computed = (pllreffreq * testn) /
+							(testm * testp * testp2);
+						if (computed > clock)
+							tmpdelta = computed - clock;
+						else
+							tmpdelta = clock - computed;
+						if (tmpdelta < delta) {
+							delta = tmpdelta;
+							m = ((testn & 0x100) >> 1) |
+								(testm);
+							n = (testn & 0xFF);
+							p = ((testn & 0x600) >> 3) |
+								(testp2 << 3) |
+								(testp);
+						}
+					}
+				}
+			}
+		}
+	} else {
+
+		vcomax = 550000;
+		vcomin = 150000;
+		pllreffreq = 48000;
+
+		for (testp = 1; testp < 9; testp++) {
+			if (clock * testp > vcomax)
+				continue;
+			if (clock * testp < vcomin)
+				continue;
+
+			for (testm = 1; testm < 17; testm++) {
+				for (testn = 1; testn < 151; testn++) {
+					computed = (pllreffreq * testn) /
+						(testm * testp);
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						n = testn - 1;
+						m = (testm - 1) |
+							((n >> 1) & 0x80);
+						p = testp - 1;
+					}
 				}
 			}
 		}
@@ -298,7 +399,7 @@
 static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int delta, tmpdelta;
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
@@ -310,7 +411,6 @@
 	pllreffreq = 50000;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
 	for (testp = 16; testp > 0; testp--) {
 		if (clock * testp > vcomax)
@@ -392,7 +492,7 @@
 static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
 {
 	unsigned int vcomax, vcomin, pllreffreq;
-	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int delta, tmpdelta;
 	unsigned int testp, testm, testn;
 	unsigned int p, m, n;
 	unsigned int computed;
@@ -406,7 +506,6 @@
 	pllreffreq = 33333;
 
 	delta = 0xffffffff;
-	permitteddelta = clock * 5 / 1000;
 
 	for (testp = 16; testp > 0; testp >>= 1) {
 		if (clock * testp > vcomax)
@@ -572,6 +671,7 @@
 		return mga_g200se_set_plls(mdev, clock);
 		break;
 	case G200_WB:
+	case G200_EW3:
 		return mga_g200wb_set_plls(mdev, clock);
 		break;
 	case G200_EV:
@@ -823,6 +923,7 @@
 		option2 = 0x00008000;
 		break;
 	case G200_WB:
+	case G200_EW3:
 		dacvalue[MGA1064_VREF_CTL] = 0x07;
 		option = 0x41049120;
 		option2 = 0x0000b000;
@@ -878,7 +979,10 @@
 		if (IS_G200_SE(mdev) &&
 		    ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
 			continue;
-		if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+		if ((mdev->type == G200_EV ||
+		    mdev->type == G200_WB ||
+		    mdev->type == G200_EH ||
+		    mdev->type == G200_EW3) &&
 		    (i >= 0x44) && (i <= 0x4e))
 			continue;
 
@@ -980,7 +1084,7 @@
 	else
 		ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
 	ext_vga[4] = 0;
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		ext_vga[1] |= 0x88;
 
 	/* Set pixel clocks */
@@ -996,6 +1100,9 @@
 	if (mdev->type == G200_ER)
 		WREG_ECRT(0x24, 0x5);
 
+	if (mdev->type == G200_EW3)
+		WREG_ECRT(0x34, 0x5);
+
 	if (mdev->type == G200_EV) {
 		WREG_ECRT(6, 0);
 	}
@@ -1208,7 +1315,7 @@
 		WREG_SEQ(1, tmp | 0x20);
 	}
 
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		mga_g200wb_prepare(crtc);
 
 	WREG_CRT(17, 0);
@@ -1225,7 +1332,7 @@
 	const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	u8 tmp;
 
-	if (mdev->type == G200_WB)
+	if (mdev->type == G200_WB || mdev->type == G200_EW3)
 		mga_g200wb_commit(crtc);
 
 	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
@@ -1495,7 +1602,7 @@
 			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
 				> (24400 * 1024))
 				return MODE_BANDWIDTH;
-		} else if (mdev->unique_rev_id >= 0x02) {
+		} else if (mdev->unique_rev_id == 0x02) {
 			if (mode->hdisplay > 1920)
 				return MODE_VIRTUAL_X;
 			if (mode->vdisplay > 1200)
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d16964e..05108b5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -378,7 +378,7 @@
 
 int mgag200_bo_unpin(struct mgag200_bo *bo)
 {
-	int i, ret;
+	int i;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
 		return 0;
@@ -389,11 +389,7 @@
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
-	if (ret)
-		return ret;
-
-	return 0;
+	return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
 }
 
 int mgag200_bo_push_sysram(struct mgag200_bo *bo)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 08ba8d0..8e6c7c6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -9,6 +9,7 @@
 	select DRM_PANEL
 	select SHMEM
 	select TMPFS
+	select QCOM_SCM
 	default y
 	help
 	  DRM/KMS driver for MSM/snapdragon.
@@ -53,3 +54,17 @@
 	help
 	  Choose this option to enable DSI PLL driver which provides DSI
 	  source clocks under common clock framework.
+
+config DRM_MSM_DSI_28NM_PHY
+	bool "Enable DSI 28nm PHY driver in MSM DRM"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if the 28nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_20NM_PHY
+	bool "Enable DSI 20nm PHY driver in MSM DRM"
+	depends on DRM_MSM_DSI
+	default y
+	help
+	  Choose this option if the 20nm DSI PHY is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 16a81b9..0a543eb 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,5 @@
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
-ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 
 msm-y := \
 	adreno/adreno_device.o \
@@ -10,6 +10,7 @@
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
 	hdmi/hdmi_connector.o \
+	hdmi/hdmi_hdcp.o \
 	hdmi/hdmi_i2c.o \
 	hdmi/hdmi_phy_8960.o \
 	hdmi/hdmi_phy_8x60.o \
@@ -53,12 +54,18 @@
 msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
 
 msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+			dsi/dsi_cfg.o \
 			dsi/dsi_host.o \
 			dsi/dsi_manager.o \
-			dsi/dsi_phy.o \
+			dsi/phy/dsi_phy.o \
 			mdp/mdp5/mdp5_cmd_encoder.o
 
-msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
-				dsi/pll/dsi_pll_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
+msm-y += dsi/pll/dsi_pll.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
+endif
 
 obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 23176e4..0261f0d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,15 +8,15 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 1c599e5..48d1337 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,13 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -326,6 +326,13 @@
 	A3XX_TEX_3D = 3,
 };
 
+enum a3xx_tex_msaa {
+	A3XX_TPL1_MSAA1X = 0,
+	A3XX_TPL1_MSAA2X = 1,
+	A3XX_TPL1_MSAA4X = 2,
+	A3XX_TPL1_MSAA8X = 3,
+};
+
 #define A3XX_INT0_RBBM_GPU_IDLE					0x00000001
 #define A3XX_INT0_RBBM_AHB_ERROR				0x00000002
 #define A3XX_INT0_RBBM_REG_TIMEOUT				0x00000004
@@ -2652,6 +2659,7 @@
 #define REG_A3XX_VGT_IMMED_DATA					0x000021fd
 
 #define REG_A3XX_TEX_SAMP_0					0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE				0x00000001
 #define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR			0x00000002
 #define A3XX_TEX_SAMP_0_XY_MAG__MASK				0x0000000c
 #define A3XX_TEX_SAMP_0_XY_MAG__SHIFT				2
@@ -2695,6 +2703,7 @@
 {
 	return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
 }
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF			0x01000000
 #define A3XX_TEX_SAMP_0_UNNORM_COORDS				0x80000000
 
 #define REG_A3XX_TEX_SAMP_1					0x00000001
@@ -2750,6 +2759,12 @@
 {
 	return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
 }
+#define A3XX_TEX_CONST_0_MSAATEX__MASK				0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT				20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+	return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
 #define A3XX_TEX_CONST_0_FMT__MASK				0x1fc00000
 #define A3XX_TEX_CONST_0_FMT__SHIFT				22
 static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
@@ -2785,7 +2800,7 @@
 }
 
 #define REG_A3XX_TEX_CONST_2					0x00000002
-#define A3XX_TEX_CONST_2_INDX__MASK				0x000000ff
+#define A3XX_TEX_CONST_2_INDX__MASK				0x000001ff
 #define A3XX_TEX_CONST_2_INDX__SHIFT				0
 static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
 {
@@ -2805,7 +2820,7 @@
 }
 
 #define REG_A3XX_TEX_CONST_3					0x00000003
-#define A3XX_TEX_CONST_3_LAYERSZ1__MASK				0x00007fff
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK				0x0001ffff
 #define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT			0
 static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
 {
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3f06ecf..ac55066 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,13 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -227,6 +227,7 @@
 	DEPTH4_NONE = 0,
 	DEPTH4_16 = 1,
 	DEPTH4_24_8 = 2,
+	DEPTH4_32 = 3,
 };
 
 enum a4xx_tess_spacing {
@@ -429,7 +430,7 @@
 	return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
 }
 #define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB				0x00002000
-#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK		0x007fc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK		0xffffc000
 #define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT		14
 static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
 {
@@ -439,7 +440,7 @@
 static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
 
 static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
-#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK			0x0001fff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK			0x03fffff8
 #define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT			3
 static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
 {
@@ -570,6 +571,15 @@
 	return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
 }
 
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL			0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY			0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK			0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT		2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+	return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
 #define REG_A4XX_RB_RENDER_COMPONENTS				0x000020fb
 #define A4XX_RB_RENDER_COMPONENTS_RT0__MASK			0x0000000f
 #define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT			0
@@ -811,6 +821,23 @@
 #define REG_A4XX_RB_STENCIL_CONTROL2				0x00002107
 #define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER			0x00000001
 
+#define REG_A4XX_RB_STENCIL_INFO				0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL			0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK			0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT		12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+	return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH				0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK				0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT				0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+	return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
 #define REG_A4XX_RB_STENCILREFMASK				0x0000210b
 #define A4XX_RB_STENCILREFMASK_STENCILREF__MASK			0x000000ff
 #define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT		0
@@ -1433,6 +1460,7 @@
 {
 	return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
 }
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB				0x00040000
 
 #define REG_A4XX_SP_CS_CTRL_REG0				0x00002300
 
@@ -1470,6 +1498,76 @@
 
 #define REG_A4XX_SP_HS_LENGTH_REG				0x00002312
 
+#define REG_A4XX_SP_DS_PARAM_REG				0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK			0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT			0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK		0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT		20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK			0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT			9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK			0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT			25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
 #define REG_A4XX_SP_DS_OBJ_OFFSET_REG				0x00002334
 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
@@ -1492,6 +1590,82 @@
 
 #define REG_A4XX_SP_DS_LENGTH_REG				0x00002339
 
+#define REG_A4XX_SP_GS_PARAM_REG				0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK			0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT			0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK			0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT			8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK		0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT		20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK			0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT			0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK			0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT			9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK			0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT			16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK			0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT			25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK			0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT			0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK			0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT			8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK			0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT			16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK			0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT			24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+	return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
 #define REG_A4XX_SP_GS_OBJ_OFFSET_REG				0x0000235b
 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK	0x01ff0000
 #define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT	16
@@ -1693,6 +1867,18 @@
 {
 	return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
 }
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK			0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT			16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+	return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK			0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT			24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+	return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
 
 #define REG_A4XX_VFD_CONTROL_4					0x00002204
 
@@ -2489,6 +2675,8 @@
 
 #define REG_A4XX_UNKNOWN_22D7					0x000022d7
 
+#define REG_A4XX_UNKNOWN_2352					0x00002352
+
 #define REG_A4XX_TEX_SAMP_0					0x00000000
 #define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR			0x00000001
 #define A4XX_TEX_SAMP_0_XY_MAG__MASK				0x00000006
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 9562a1f..399a9e5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,15 +8,15 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index bd5b23b..41904fe 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,13 +8,13 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2013-11-30 14:47:15)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2014-06-02 15:21:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2014-11-13 22:44:30)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14895 bytes, from 2015-04-19 15:23:28)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  66709 bytes, from 2015-04-12 18:16:35)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  60633 bytes, from 2015-05-20 14:48:19)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml               (    364 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml  (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml          (  32901 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml (  10551 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml    (  14968 bytes, from 2015-05-20 20:12:27)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml          (  67120 bytes, from 2015-08-14 23:22:03)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml          (  63785 bytes, from 2015-08-14 18:27:06)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -67,7 +67,7 @@
 
 enum pc_di_primtype {
 	DI_PT_NONE = 0,
-	DI_PT_POINTLIST_A2XX = 1,
+	DI_PT_POINTLIST_PSIZE = 1,
 	DI_PT_LINELIST = 2,
 	DI_PT_LINESTRIP = 3,
 	DI_PT_TRILIST = 4,
@@ -75,7 +75,7 @@
 	DI_PT_TRISTRIP = 6,
 	DI_PT_LINELOOP = 7,
 	DI_PT_RECTLIST = 8,
-	DI_PT_POINTLIST_A3XX = 9,
+	DI_PT_POINTLIST = 9,
 	DI_PT_LINE_ADJ = 10,
 	DI_PT_LINESTRIP_ADJ = 11,
 	DI_PT_TRI_ADJ = 12,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1f2561e..6edcd6f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -15,10 +15,10 @@
 
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
 {
-	if (!msm_dsi || !msm_dsi->panel)
+	if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
 		return NULL;
 
-	return (msm_dsi->panel_flags & MIPI_DSI_MODE_VIDEO) ?
+	return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ?
 		msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] :
 		msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
 }
@@ -74,19 +74,15 @@
 
 static struct msm_dsi *dsi_init(struct platform_device *pdev)
 {
-	struct msm_dsi *msm_dsi = NULL;
+	struct msm_dsi *msm_dsi;
 	int ret;
 
-	if (!pdev) {
-		ret = -ENXIO;
-		goto fail;
-	}
+	if (!pdev)
+		return ERR_PTR(-ENXIO);
 
 	msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
-	if (!msm_dsi) {
-		ret = -ENOMEM;
-		goto fail;
-	}
+	if (!msm_dsi)
+		return ERR_PTR(-ENOMEM);
 	DBG("dsi probed=%p", msm_dsi);
 
 	msm_dsi->pdev = pdev;
@@ -95,24 +91,22 @@
 	/* Init dsi host */
 	ret = msm_dsi_host_init(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	/* GET dsi PHY */
 	ret = dsi_get_phy(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	/* Register to dsi manager */
 	ret = msm_dsi_manager_register(msm_dsi);
 	if (ret)
-		goto fail;
+		goto destroy_dsi;
 
 	return msm_dsi;
 
-fail:
-	if (msm_dsi)
-		dsi_destroy(msm_dsi);
-
+destroy_dsi:
+	dsi_destroy(msm_dsi);
 	return ERR_PTR(ret);
 }
 
@@ -196,6 +190,7 @@
 		struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM])
 {
 	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_bridge *ext_bridge;
 	int ret, i;
 
 	if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
@@ -223,10 +218,25 @@
 		msm_dsi->encoders[i] = encoders[i];
 	}
 
-	msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
+	/*
+	 * check if the dsi encoder output is connected to a panel or an
+	 * external bridge. We create a connector only if we're connected to a
+	 * drm_panel device. When we're connected to an external bridge, we
+	 * assume that the drm_bridge driver will create the connector itself.
+	 */
+	ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
+
+	if (ext_bridge)
+		msm_dsi->connector =
+			msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+	else
+		msm_dsi->connector =
+			msm_dsi_manager_connector_init(msm_dsi->id);
+
 	if (IS_ERR(msm_dsi->connector)) {
 		ret = PTR_ERR(msm_dsi->connector);
-		dev_err(dev->dev, "failed to create dsi connector: %d\n", ret);
+		dev_err(dev->dev,
+			"failed to create dsi connector: %d\n", ret);
 		msm_dsi->connector = NULL;
 		goto fail;
 	}
@@ -242,10 +252,12 @@
 			msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
 			msm_dsi->bridge = NULL;
 		}
-		if (msm_dsi->connector) {
+
+		/* don't destroy connector if we didn't make it */
+		if (msm_dsi->connector && !msm_dsi->external_bridge)
 			msm_dsi->connector->funcs->destroy(msm_dsi->connector);
-			msm_dsi->connector = NULL;
-		}
+
+		msm_dsi->connector = NULL;
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 92d697d..5f5a373 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -27,21 +27,10 @@
 #define DSI_1	1
 #define DSI_MAX	2
 
-#define DSI_CLOCK_MASTER	DSI_0
-#define DSI_CLOCK_SLAVE		DSI_1
-
-#define DSI_LEFT		DSI_0
-#define DSI_RIGHT		DSI_1
-
-/* According to the current drm framework sequence, take the encoder of
- * DSI_1 as master encoder
- */
-#define DSI_ENCODER_MASTER	DSI_1
-#define DSI_ENCODER_SLAVE	DSI_0
-
 enum msm_dsi_phy_type {
 	MSM_DSI_PHY_28NM_HPM,
 	MSM_DSI_PHY_28NM_LP,
+	MSM_DSI_PHY_20NM,
 	MSM_DSI_PHY_MAX
 };
 
@@ -65,13 +54,21 @@
 	struct drm_device *dev;
 	struct platform_device *pdev;
 
+	/* connector managed by us when we're connected to a drm_panel */
 	struct drm_connector *connector;
+	/* internal dsi bridge attached to MDP interface */
 	struct drm_bridge *bridge;
 
 	struct mipi_dsi_host *host;
 	struct msm_dsi_phy *phy;
+
+	/*
+	 * panel/external_bridge connected to dsi bridge output, only one of the
+	 * two can be valid at a time
+	 */
 	struct drm_panel *panel;
-	unsigned long panel_flags;
+	struct drm_bridge *external_bridge;
+	unsigned long device_flags;
 
 	struct device *phy_dev;
 	bool phy_enabled;
@@ -86,6 +83,7 @@
 struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
 struct drm_connector *msm_dsi_manager_connector_init(u8 id);
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
 int msm_dsi_manager_phy_enable(int id,
 		const unsigned long bit_rate, const unsigned long esc_rate,
 		u32 *clk_pre, u32 *clk_post);
@@ -96,6 +94,11 @@
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
 
 /* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+	return msm_dsi->panel || msm_dsi->external_bridge;
+}
+
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
 
 /* dsi pll */
@@ -106,6 +109,8 @@
 void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
 int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
 	struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
 #else
 static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			 enum msm_dsi_phy_type type, int id) {
@@ -119,6 +124,13 @@
 {
 	return -ENODEV;
 }
+static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+}
+static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	return 0;
+}
 #endif
 
 /* dsi host */
@@ -140,6 +152,7 @@
 					struct drm_display_mode *mode);
 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
 					unsigned long *panel_flags);
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
@@ -153,9 +166,9 @@
 struct msm_dsi_phy;
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_unregister(void);
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 	const unsigned long bit_rate, const unsigned long esc_rate);
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
 					u32 *clk_pre, u32 *clk_post);
 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 9791ea0..1d2e32f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -382,6 +382,11 @@
 #define REG_DSI_TRIG_DMA					0x0000008c
 
 #define REG_DSI_DLN0_PHY_ERR					0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC				0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC			0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL			0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0		0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1		0x00010000
 
 #define REG_DSI_TIMEOUT_STATUS					0x000000bc
 
@@ -435,6 +440,9 @@
 #define REG_DSI_PHY_RESET					0x00000128
 #define DSI_PHY_RESET_RESET					0x00000001
 
+#define REG_DSI_T_CLK_PRE_EXTEND				0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK			0x00000001
+
 #define REG_DSI_RDBK_DATA_CTRL					0x000001d0
 #define DSI_RDBK_DATA_CTRL_COUNT__MASK				0x00ff0000
 #define DSI_RDBK_DATA_CTRL_COUNT__SHIFT				16
@@ -830,6 +838,7 @@
 #define REG_DSI_28nm_PHY_BIST_CTRL_5				0x000001c8
 
 #define REG_DSI_28nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
 
 #define REG_DSI_28nm_PHY_LDO_CNTRL				0x000001dc
 
@@ -994,5 +1003,185 @@
 
 #define REG_DSI_28nm_PHY_PLL_CTRL_54				0x000000d4
 
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0				0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1				0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2				0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3				0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4				0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH			0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL				0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0				0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1				0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0				0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1				0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2				0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3				0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8			0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4				0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5				0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6				0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7				0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8				0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9				0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK			0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT			0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK		0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT		4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10				0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK		0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11				0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK		0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT		0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+	return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0					0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1					0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2					0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3					0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4					0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0				0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1				0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0				0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1				0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2				0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3				0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4				0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5				0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL				0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL		0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL				0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0			0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1			0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2			0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3			0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4			0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5			0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG			0x00000018
+
 
 #endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 0000000..5872d5e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_cfg.h"
+
+/* DSI v2 has not been supported by now */
+static const struct msm_dsi_config dsi_v2_cfg = {
+	.io_offset = 0,
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdd", 3000000, 3000000, 150000, 100},
+			{"vdda", 1200000, 1200000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 4,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdd", 2850000, 2850000, 100000, 100},
+			{"vdda", 1200000, 1200000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+	.io_offset = DSI_6G_REG_SHIFT,
+	.reg_cfg = {
+		.num = 7,
+		.regs = {
+			{"gdsc", -1, -1, -1, -1},
+			{"vdda", 1250000, 1250000, 100000, 100},
+			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vcca", 1000000, 1000000, 10000, 100},
+			{"vdd", 1800000, 1800000, 100000, 100},
+			{"lab_reg", -1, -1, -1, -1},
+			{"ibb_reg", -1, -1, -1, -1},
+		},
+	}
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+	{MSM_DSI_VER_MAJOR_V2, U32_MAX, &dsi_v2_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+						&msm8974_apq8084_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
+	{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+	int i;
+
+	for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+		if ((dsi_cfg_handlers[i].major == major) &&
+			(dsi_cfg_handlers[i].minor == minor)) {
+			cfg_hnd = &dsi_cfg_handlers[i];
+			break;
+		}
+	}
+
+	return cfg_hnd;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 0000000..4cf8872
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2	0x02
+#define MSM_DSI_VER_MAJOR_6G	0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3	0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
+
+#define DSI_6G_REG_SHIFT	4
+
+struct msm_dsi_config {
+	u32 io_offset;
+	struct dsi_reg_config reg_cfg;
+};
+
+struct msm_dsi_cfg_handler {
+	u32 major;
+	u32 minor;
+	const struct msm_dsi_config *cfg;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index de04009..8d82973 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -20,103 +20,15 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spinlock.h>
 #include <video/mipi_display.h>
 
 #include "dsi.h"
 #include "dsi.xml.h"
-
-#define MSM_DSI_VER_MAJOR_V2	0x02
-#define MSM_DSI_VER_MAJOR_6G	0x03
-#define MSM_DSI_6G_VER_MINOR_V1_0	0x10000000
-#define MSM_DSI_6G_VER_MINOR_V1_1	0x10010000
-#define MSM_DSI_6G_VER_MINOR_V1_1_1	0x10010001
-#define MSM_DSI_6G_VER_MINOR_V1_2	0x10020000
-#define MSM_DSI_6G_VER_MINOR_V1_3_1	0x10030001
-
-#define DSI_6G_REG_SHIFT	4
-
-struct dsi_config {
-	u32 major;
-	u32 minor;
-	u32 io_offset;
-	struct dsi_reg_config reg_cfg;
-};
-
-static const struct dsi_config dsi_cfgs[] = {
-	{MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
-	{ /* 8974 v1 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_0,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8974 v2 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8974 v3 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8084 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_2,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 3000000, 3000000, 150000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-	{ /* 8916 */
-		.major = MSM_DSI_VER_MAJOR_6G,
-		.minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
-		.io_offset = DSI_6G_REG_SHIFT,
-		.reg_cfg = {
-			.num = 4,
-			.regs = {
-				{"gdsc", -1, -1, -1, -1},
-				{"vdd", 2850000, 2850000, 100000, 100},
-				{"vdda", 1200000, 1200000, 100000, 100},
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-	},
-};
+#include "dsi_cfg.h"
 
 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
 {
@@ -194,7 +106,7 @@
 	struct gpio_desc *disp_en_gpio;
 	struct gpio_desc *te_gpio;
 
-	const struct dsi_config *cfg;
+	const struct msm_dsi_cfg_handler *cfg_hnd;
 
 	struct completion dma_comp;
 	struct completion video_comp;
@@ -212,8 +124,8 @@
 
 	struct drm_display_mode *mode;
 
-	/* Panel info */
-	struct device_node *panel_node;
+	/* connected device info */
+	struct device_node *device_node;
 	unsigned int channel;
 	unsigned int lanes;
 	enum mipi_dsi_pixel_format format;
@@ -239,61 +151,58 @@
 
 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
 {
-	return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+	return msm_readl(msm_host->ctrl_base + reg);
 }
 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
 {
-	msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg);
+	msm_writel(data, msm_host->ctrl_base + reg);
 }
 
 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
 
-static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+						struct msm_dsi_host *msm_host)
 {
-	const struct dsi_config *cfg;
+	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
 	struct regulator *gdsc_reg;
-	int i, ret;
+	int ret;
 	u32 major = 0, minor = 0;
 
 	gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
 	if (IS_ERR(gdsc_reg)) {
 		pr_err("%s: cannot get gdsc\n", __func__);
-		goto fail;
+		goto exit;
 	}
 	ret = regulator_enable(gdsc_reg);
 	if (ret) {
 		pr_err("%s: unable to enable gdsc\n", __func__);
-		regulator_put(gdsc_reg);
-		goto fail;
+		goto put_gdsc;
 	}
 	ret = clk_prepare_enable(msm_host->ahb_clk);
 	if (ret) {
 		pr_err("%s: unable to enable ahb_clk\n", __func__);
-		regulator_disable(gdsc_reg);
-		regulator_put(gdsc_reg);
-		goto fail;
+		goto disable_gdsc;
 	}
 
 	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
-
-	clk_disable_unprepare(msm_host->ahb_clk);
-	regulator_disable(gdsc_reg);
-	regulator_put(gdsc_reg);
 	if (ret) {
 		pr_err("%s: Invalid version\n", __func__);
-		goto fail;
+		goto disable_clks;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) {
-		cfg = dsi_cfgs + i;
-		if ((cfg->major == major) && (cfg->minor == minor))
-			return cfg;
-	}
-	pr_err("%s: Version %x:%x not support\n", __func__, major, minor);
+	cfg_hnd = msm_dsi_cfg_get(major, minor);
 
-fail:
-	return NULL;
+	DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+	clk_disable_unprepare(msm_host->ahb_clk);
+disable_gdsc:
+	regulator_disable(gdsc_reg);
+put_gdsc:
+	regulator_put(gdsc_reg);
+exit:
+	return cfg_hnd;
 }
 
 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
@@ -304,8 +213,8 @@
 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int i;
 
 	DBG("");
@@ -320,8 +229,8 @@
 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int ret, i;
 
 	DBG("");
@@ -354,8 +263,8 @@
 static int dsi_regulator_init(struct msm_dsi_host *msm_host)
 {
 	struct regulator_bulk_data *s = msm_host->supplies;
-	const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs;
-	int num = msm_host->cfg->reg_cfg.num;
+	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
+	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
 	int i, ret;
 
 	for (i = 0; i < num; i++)
@@ -697,6 +606,7 @@
 {
 	u32 flags = msm_host->mode_flags;
 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 	u32 data = 0;
 
 	if (!enable) {
@@ -750,8 +660,8 @@
 	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
 	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
 	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
-	if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
-		(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+		(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
 		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
 	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
 
@@ -1257,7 +1167,11 @@
 
 	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
 
-	if (status) {
+	if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
 		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
 		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
 	}
@@ -1359,7 +1273,8 @@
 		return PTR_ERR(msm_host->disp_en_gpio);
 	}
 
-	msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
+	msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+								GPIOD_IN);
 	if (IS_ERR(msm_host->te_gpio)) {
 		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
 		return PTR_ERR(msm_host->te_gpio);
@@ -1379,7 +1294,7 @@
 	msm_host->format = dsi->format;
 	msm_host->mode_flags = dsi->mode_flags;
 
-	msm_host->panel_node = dsi->dev.of_node;
+	WARN_ON(dsi->dev.of_node != msm_host->device_node);
 
 	/* Some gpios defined in panel DT need to be controlled by host */
 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1398,7 +1313,7 @@
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
-	msm_host->panel_node = NULL;
+	msm_host->device_node = NULL;
 
 	DBG("id=%d", msm_host->id);
 	if (msm_host->dev)
@@ -1429,6 +1344,48 @@
 	.transfer = dsi_host_transfer,
 };
 
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+	struct device *dev = &msm_host->pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node *endpoint, *device_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
+	if (ret) {
+		dev_err(dev, "%s: host index not specified, ret=%d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	/*
+	 * Get the first endpoint node. In our case, dsi has one output port
+	 * to which the panel is connected. Don't return an error if a port
+	 * isn't defined. It's possible that there is nothing connected to
+	 * the dsi output.
+	 */
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_dbg(dev, "%s: no endpoint\n", __func__);
+		return 0;
+	}
+
+	/* Get panel node from the output port's endpoint data */
+	device_node = of_graph_get_remote_port_parent(endpoint);
+	if (!device_node) {
+		dev_err(dev, "%s: no valid device\n", __func__);
+		of_node_put(endpoint);
+		return -ENODEV;
+	}
+
+	of_node_put(endpoint);
+	of_node_put(device_node);
+
+	msm_host->device_node = device_node;
+
+	return 0;
+}
+
 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 {
 	struct msm_dsi_host *msm_host = NULL;
@@ -1443,15 +1400,13 @@
 		goto fail;
 	}
 
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,dsi-host-index", &msm_host->id);
+	msm_host->pdev = pdev;
+
+	ret = dsi_host_parse_dt(msm_host);
 	if (ret) {
-		dev_err(&pdev->dev,
-			"%s: host index not specified, ret=%d\n",
-			__func__, ret);
+		pr_err("%s: failed to parse dt\n", __func__);
 		goto fail;
 	}
-	msm_host->pdev = pdev;
 
 	ret = dsi_clk_init(msm_host);
 	if (ret) {
@@ -1466,13 +1421,16 @@
 		goto fail;
 	}
 
-	msm_host->cfg = dsi_get_config(msm_host);
-	if (!msm_host->cfg) {
+	msm_host->cfg_hnd = dsi_get_config(msm_host);
+	if (!msm_host->cfg_hnd) {
 		ret = -EINVAL;
 		pr_err("%s: get config failed\n", __func__);
 		goto fail;
 	}
 
+	/* fixup base address by io offset */
+	msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+
 	ret = dsi_regulator_init(msm_host);
 	if (ret) {
 		pr_err("%s: regulator init failed\n", __func__);
@@ -1559,7 +1517,6 @@
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-	struct device_node *node;
 	int ret;
 
 	/* Register mipi dsi host */
@@ -1577,14 +1534,13 @@
 		 * It makes sure panel is connected when fbcon detects
 		 * connector status and gets the proper display mode to
 		 * create framebuffer.
+		 * Don't try to defer if there is nothing connected to the dsi
+		 * output
 		 */
-		if (check_defer) {
-			node = of_get_child_by_name(msm_host->pdev->dev.of_node,
-							"panel");
-			if (node) {
-				if (!of_drm_find_panel(node))
+		if (check_defer && msm_host->device_node) {
+			if (!of_drm_find_panel(msm_host->device_node))
+				if (!of_drm_find_bridge(msm_host->device_node))
 					return -EPROBE_DEFER;
-			}
 		}
 	}
 
@@ -1663,6 +1619,7 @@
 				const struct mipi_dsi_msg *msg)
 {
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 	int data_byte, rx_byte, dlen, end;
 	int short_response, diff, pkt_size, ret = 0;
 	char cmd;
@@ -1704,8 +1661,8 @@
 			return -EINVAL;
 		}
 
-		if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) &&
-			(msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+		if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+			(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
 			/* Clear the RDBK_DATA registers */
 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
 					DSI_RDBK_DATA_CTRL_CLR);
@@ -1919,6 +1876,13 @@
 		goto fail_disable_reg;
 	}
 
+	ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+	if (ret) {
+		pr_err("%s: failed to set pinctrl default state, %d\n",
+			__func__, ret);
+		goto fail_disable_clk;
+	}
+
 	dsi_timing_setup(msm_host);
 	dsi_sw_reset(msm_host);
 	dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
@@ -1931,6 +1895,8 @@
 
 	return 0;
 
+fail_disable_clk:
+	dsi_clk_ctrl(msm_host, 0);
 fail_disable_reg:
 	dsi_host_regulator_disable(msm_host);
 unlock_ret:
@@ -1953,6 +1919,8 @@
 	if (msm_host->disp_en_gpio)
 		gpiod_set_value(msm_host->disp_en_gpio, 0);
 
+	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
 	msm_dsi_manager_phy_disable(msm_host->id);
 
 	dsi_clk_ctrl(msm_host, 0);
@@ -1993,10 +1961,16 @@
 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 	struct drm_panel *panel;
 
-	panel = of_drm_find_panel(msm_host->panel_node);
+	panel = of_drm_find_panel(msm_host->device_node);
 	if (panel_flags)
 			*panel_flags = msm_host->mode_flags;
 
 	return panel;
 }
 
+struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
+{
+	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+	return of_drm_find_bridge(msm_host->device_node);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 87ac661..0455ff7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -14,19 +14,31 @@
 #include "msm_kms.h"
 #include "dsi.h"
 
+#define DSI_CLOCK_MASTER	DSI_0
+#define DSI_CLOCK_SLAVE		DSI_1
+
+#define DSI_LEFT		DSI_0
+#define DSI_RIGHT		DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER	DSI_1
+#define DSI_ENCODER_SLAVE	DSI_0
+
 struct msm_dsi_manager {
 	struct msm_dsi *dsi[DSI_MAX];
 
-	bool is_dual_panel;
+	bool is_dual_dsi;
 	bool is_sync_needed;
-	int master_panel_id;
+	int master_dsi_link_id;
 };
 
 static struct msm_dsi_manager msm_dsim_glb;
 
-#define IS_DUAL_PANEL()		(msm_dsim_glb.is_dual_panel)
+#define IS_DUAL_DSI()		(msm_dsim_glb.is_dual_dsi)
 #define IS_SYNC_NEEDED()	(msm_dsim_glb.is_sync_needed)
-#define IS_MASTER_PANEL(id)	(msm_dsim_glb.master_panel_id == id)
+#define IS_MASTER_DSI_LINK(id)	(msm_dsim_glb.master_dsi_link_id == id)
 
 static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
 {
@@ -38,23 +50,23 @@
 	return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
 }
 
-static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
+static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id)
 {
 	struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
 
-	/* We assume 2 dsi nodes have the same information of dual-panel and
+	/* We assume 2 dsi nodes have the same information of dual-dsi and
 	 * sync-mode, and only one node specifies master in case of dual mode.
 	 */
-	if (!msm_dsim->is_dual_panel)
-		msm_dsim->is_dual_panel = of_property_read_bool(
-						np, "qcom,dual-panel-mode");
+	if (!msm_dsim->is_dual_dsi)
+		msm_dsim->is_dual_dsi = of_property_read_bool(
+						np, "qcom,dual-dsi-mode");
 
-	if (msm_dsim->is_dual_panel) {
-		if (of_property_read_bool(np, "qcom,master-panel"))
-			msm_dsim->master_panel_id = id;
+	if (msm_dsim->is_dual_dsi) {
+		if (of_property_read_bool(np, "qcom,master-dsi"))
+			msm_dsim->master_dsi_link_id = id;
 		if (!msm_dsim->is_sync_needed)
 			msm_dsim->is_sync_needed = of_property_read_bool(
-					np, "qcom,sync-dual-panel");
+					np, "qcom,sync-dual-dsi");
 	}
 
 	return 0;
@@ -68,7 +80,7 @@
 	struct msm_dsi_pll *src_pll;
 	int ret;
 
-	if (!IS_DUAL_PANEL()) {
+	if (!IS_DUAL_DSI()) {
 		ret = msm_dsi_host_register(msm_dsi->host, true);
 		if (ret)
 			return ret;
@@ -78,9 +90,9 @@
 	} else if (!other_dsi) {
 		ret = 0;
 	} else {
-		struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
+		struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ?
 					msm_dsi : other_dsi;
-		struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
+		struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ?
 					other_dsi : msm_dsi;
 		/* Register slave host first, so that slave DSI device
 		 * has a chance to probe, and do not block the master
@@ -144,28 +156,28 @@
 	DBG("id=%d", id);
 	if (!msm_dsi->panel) {
 		msm_dsi->panel = msm_dsi_host_get_panel(msm_dsi->host,
-						&msm_dsi->panel_flags);
+						&msm_dsi->device_flags);
 
 		/* There is only 1 panel in the global panel list
-		 * for dual panel mode. Therefore slave dsi should get
+		 * for dual DSI mode. Therefore slave dsi should get
 		 * the drm_panel instance from master dsi, and
 		 * keep using the panel flags got from the current DSI link.
 		 */
-		if (!msm_dsi->panel && IS_DUAL_PANEL() &&
-			!IS_MASTER_PANEL(id) && other_dsi)
+		if (!msm_dsi->panel && IS_DUAL_DSI() &&
+			!IS_MASTER_DSI_LINK(id) && other_dsi)
 			msm_dsi->panel = msm_dsi_host_get_panel(
 					other_dsi->host, NULL);
 
-		if (msm_dsi->panel && IS_DUAL_PANEL())
+		if (msm_dsi->panel && IS_DUAL_DSI())
 			drm_object_attach_property(&connector->base,
 				connector->dev->mode_config.tile_property, 0);
 
-		/* Set split display info to kms once dual panel is connected
-		 * to both hosts
+		/* Set split display info to kms once dual DSI panel is
+		 * connected to both hosts.
 		 */
-		if (msm_dsi->panel && IS_DUAL_PANEL() &&
+		if (msm_dsi->panel && IS_DUAL_DSI() &&
 			other_dsi && other_dsi->panel) {
-			bool cmd_mode = !(msm_dsi->panel_flags &
+			bool cmd_mode = !(msm_dsi->device_flags &
 						MIPI_DSI_MODE_VIDEO);
 			struct drm_encoder *encoder = msm_dsi_get_encoder(
 					dsi_mgr_get_dsi(DSI_ENCODER_MASTER));
@@ -176,7 +188,7 @@
 				kms->funcs->set_split_display(kms, encoder,
 							slave_enc, cmd_mode);
 			else
-				pr_err("mdp does not support dual panel\n");
+				pr_err("mdp does not support dual DSI\n");
 		}
 	}
 
@@ -273,7 +285,7 @@
 	if (!num)
 		return 0;
 
-	if (IS_DUAL_PANEL()) {
+	if (IS_DUAL_DSI()) {
 		/* report half resolution to user */
 		dsi_dual_connector_fix_modes(connector);
 		ret = dsi_dual_connector_tile_init(connector, id);
@@ -328,11 +340,12 @@
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
-	if (!panel || (is_dual_panel && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi) ||
+			(is_dual_dsi && (DSI_1 == id)))
 		return;
 
 	ret = msm_dsi_host_power_on(host);
@@ -341,7 +354,7 @@
 		goto host_on_fail;
 	}
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_power_on(msm_dsi1->host);
 		if (ret) {
 			pr_err("%s: power on host1 failed, %d\n",
@@ -353,10 +366,13 @@
 	/* Always call panel functions once, because even for dual panels,
 	 * there is only one drm_panel instance.
 	 */
-	ret = drm_panel_prepare(panel);
-	if (ret) {
-		pr_err("%s: prepare panel %d failed, %d\n", __func__, id, ret);
-		goto panel_prep_fail;
+	if (panel) {
+		ret = drm_panel_prepare(panel);
+		if (ret) {
+			pr_err("%s: prepare panel %d failed, %d\n", __func__,
+								id, ret);
+			goto panel_prep_fail;
+		}
 	}
 
 	ret = msm_dsi_host_enable(host);
@@ -365,7 +381,7 @@
 		goto host_en_fail;
 	}
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_enable(msm_dsi1->host);
 		if (ret) {
 			pr_err("%s: enable host1 failed, %d\n", __func__, ret);
@@ -373,23 +389,27 @@
 		}
 	}
 
-	ret = drm_panel_enable(panel);
-	if (ret) {
-		pr_err("%s: enable panel %d failed, %d\n", __func__, id, ret);
-		goto panel_en_fail;
+	if (panel) {
+		ret = drm_panel_enable(panel);
+		if (ret) {
+			pr_err("%s: enable panel %d failed, %d\n", __func__, id,
+									ret);
+			goto panel_en_fail;
+		}
 	}
 
 	return;
 
 panel_en_fail:
-	if (is_dual_panel && msm_dsi1)
+	if (is_dual_dsi && msm_dsi1)
 		msm_dsi_host_disable(msm_dsi1->host);
 host1_en_fail:
 	msm_dsi_host_disable(host);
 host_en_fail:
-	drm_panel_unprepare(panel);
+	if (panel)
+		drm_panel_unprepare(panel);
 panel_prep_fail:
-	if (is_dual_panel && msm_dsi1)
+	if (is_dual_dsi && msm_dsi1)
 		msm_dsi_host_power_off(msm_dsi1->host);
 host1_on_fail:
 	msm_dsi_host_power_off(host);
@@ -414,37 +434,44 @@
 	struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
 	struct mipi_dsi_host *host = msm_dsi->host;
 	struct drm_panel *panel = msm_dsi->panel;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 	int ret;
 
 	DBG("id=%d", id);
 
-	if (!panel || (is_dual_panel && (DSI_1 == id)))
+	if (!msm_dsi_device_connected(msm_dsi) ||
+			(is_dual_dsi && (DSI_1 == id)))
 		return;
 
-	ret = drm_panel_disable(panel);
-	if (ret)
-		pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, ret);
+	if (panel) {
+		ret = drm_panel_disable(panel);
+		if (ret)
+			pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
+									ret);
+	}
 
 	ret = msm_dsi_host_disable(host);
 	if (ret)
 		pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_disable(msm_dsi1->host);
 		if (ret)
 			pr_err("%s: host1 disable failed, %d\n", __func__, ret);
 	}
 
-	ret = drm_panel_unprepare(panel);
-	if (ret)
-		pr_err("%s: Panel %d unprepare failed,%d\n", __func__, id, ret);
+	if (panel) {
+		ret = drm_panel_unprepare(panel);
+		if (ret)
+			pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
+								id, ret);
+	}
 
 	ret = msm_dsi_host_power_off(host);
 	if (ret)
 		pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
 
-	if (is_dual_panel && msm_dsi1) {
+	if (is_dual_dsi && msm_dsi1) {
 		ret = msm_dsi_host_power_off(msm_dsi1->host);
 		if (ret)
 			pr_err("%s: host1 power off failed, %d\n",
@@ -460,7 +487,7 @@
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
 	struct mipi_dsi_host *host = msm_dsi->host;
-	bool is_dual_panel = IS_DUAL_PANEL();
+	bool is_dual_dsi = IS_DUAL_DSI();
 
 	DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
 			mode->base.id, mode->name,
@@ -471,11 +498,11 @@
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 
-	if (is_dual_panel && (DSI_1 == id))
+	if (is_dual_dsi && (DSI_1 == id))
 		return;
 
 	msm_dsi_host_set_display_mode(host, adjusted_mode);
-	if (is_dual_panel && other_dsi)
+	if (is_dual_dsi && other_dsi)
 		msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
 }
 
@@ -503,7 +530,7 @@
 	.mode_set = dsi_mgr_bridge_mode_set,
 };
 
-/* initialize connector */
+/* initialize connector when we're connected to a drm_panel */
 struct drm_connector *msm_dsi_manager_connector_init(u8 id)
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
@@ -588,6 +615,53 @@
 	return ERR_PTR(ret);
 }
 
+struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+{
+	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+	struct drm_device *dev = msm_dsi->dev;
+	struct drm_encoder *encoder;
+	struct drm_bridge *int_bridge, *ext_bridge;
+	struct drm_connector *connector;
+	struct list_head *connector_list;
+
+	int_bridge = msm_dsi->bridge;
+	ext_bridge = msm_dsi->external_bridge =
+			msm_dsi_host_get_bridge(msm_dsi->host);
+
+	/*
+	 * HACK: we may not know the external DSI bridge device's mode
+	 * flags here. We'll get to know them only when the device
+	 * attaches to the dsi host. For now, assume the bridge supports
+	 * DSI video mode
+	 */
+	encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID];
+
+	/* link the internal dsi bridge to the external bridge */
+	int_bridge->next = ext_bridge;
+	/* set the external bridge's encoder as dsi's encoder */
+	ext_bridge->encoder = encoder;
+
+	drm_bridge_attach(dev, ext_bridge);
+
+	/*
+	 * we need the drm_connector created by the external bridge
+	 * driver (or someone else) to feed it to our driver's
+	 * priv->connector[] list, mainly for msm_fbdev_init()
+	 */
+	connector_list = &dev->mode_config.connector_list;
+
+	list_for_each_entry(connector, connector_list, head) {
+		int i;
+
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] == encoder->base.id)
+				return connector;
+		}
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
 void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
 {
 }
@@ -598,12 +672,29 @@
 {
 	struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
 	struct msm_dsi_phy *phy = msm_dsi->phy;
+	int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
+	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
 	int ret;
 
-	ret = msm_dsi_phy_enable(phy, IS_DUAL_PANEL(), bit_rate, esc_rate);
+	ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate);
 	if (ret)
 		return ret;
 
+	/*
+	 * Reset DSI PHY silently changes its PLL registers to reset status,
+	 * which will confuse clock driver and result in wrong output rate of
+	 * link clocks. Restore PLL status if its PLL is being used as clock
+	 * source.
+	 */
+	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) {
+		ret = msm_dsi_pll_restore_state(pll);
+		if (ret) {
+			pr_err("%s: failed to restore pll state\n", __func__);
+			msm_dsi_phy_disable(phy);
+			return ret;
+		}
+	}
+
 	msm_dsi->phy_enabled = true;
 	msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post);
 
@@ -616,13 +707,18 @@
 	struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
 	struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
 	struct msm_dsi_phy *phy = msm_dsi->phy;
+	struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+
+	/* Save PLL status if it is a clock source */
+	if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER))
+		msm_dsi_pll_save_state(pll);
 
 	/* disable DSI phy
 	 * In dual-dsi configuration, the phy should be disabled for the
 	 * first controller only when the second controller is disabled.
 	 */
 	msm_dsi->phy_enabled = false;
-	if (IS_DUAL_PANEL() && mdsi && sdsi) {
+	if (IS_DUAL_DSI() && mdsi && sdsi) {
 		if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
 			msm_dsi_phy_disable(sdsi->phy);
 			msm_dsi_phy_disable(mdsi->phy);
@@ -713,9 +809,9 @@
 
 	msm_dsim->dsi[id] = msm_dsi;
 
-	ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
+	ret = dsi_mgr_parse_dual_dsi(msm_dsi->pdev->dev.of_node, id);
 	if (ret) {
-		pr_err("%s: failed to parse dual panel info\n", __func__);
+		pr_err("%s: failed to parse dual DSI info\n", __func__);
 		goto fail;
 	}
 
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
deleted file mode 100644
index 2d3b33c..0000000
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
-#include "dsi.h"
-#include "dsi.xml.h"
-
-#define dsi_phy_read(offset) msm_readl((offset))
-#define dsi_phy_write(offset, data) msm_writel((data), (offset))
-
-struct dsi_phy_ops {
-	int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
-		const unsigned long bit_rate, const unsigned long esc_rate);
-	int (*disable)(struct msm_dsi_phy *phy);
-};
-
-struct dsi_phy_cfg {
-	enum msm_dsi_phy_type type;
-	struct dsi_reg_config reg_cfg;
-	struct dsi_phy_ops ops;
-};
-
-struct dsi_dphy_timing {
-	u32 clk_pre;
-	u32 clk_post;
-	u32 clk_zero;
-	u32 clk_trail;
-	u32 clk_prepare;
-	u32 hs_exit;
-	u32 hs_zero;
-	u32 hs_prepare;
-	u32 hs_trail;
-	u32 hs_rqst;
-	u32 ta_go;
-	u32 ta_sure;
-	u32 ta_get;
-};
-
-struct msm_dsi_phy {
-	struct platform_device *pdev;
-	void __iomem *base;
-	void __iomem *reg_base;
-	int id;
-
-	struct clk *ahb_clk;
-	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
-
-	struct dsi_dphy_timing timing;
-	const struct dsi_phy_cfg *cfg;
-
-	struct msm_dsi_pll *pll;
-};
-
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	struct device *dev = &phy->pdev->dev;
-	int num = phy->cfg->reg_cfg.num;
-	int i, ret;
-
-	for (i = 0; i < num; i++)
-		s[i].supply = regs[i].name;
-
-	ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
-	if (ret < 0) {
-		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
-						__func__, ret);
-		return ret;
-	}
-
-	for (i = 0; i < num; i++) {
-		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
-			ret = regulator_set_voltage(s[i].consumer,
-				regs[i].min_voltage, regs[i].max_voltage);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set voltage failed, %d\n",
-					i, ret);
-				return ret;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	int num = phy->cfg->reg_cfg.num;
-	int i;
-
-	DBG("");
-	for (i = num - 1; i >= 0; i--)
-		if (regs[i].disable_load >= 0)
-			regulator_set_load(s[i].consumer,
-						regs[i].disable_load);
-
-	regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
-	struct regulator_bulk_data *s = phy->supplies;
-	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
-	struct device *dev = &phy->pdev->dev;
-	int num = phy->cfg->reg_cfg.num;
-	int ret, i;
-
-	DBG("");
-	for (i = 0; i < num; i++) {
-		if (regs[i].enable_load >= 0) {
-			ret = regulator_set_load(s[i].consumer,
-							regs[i].enable_load);
-			if (ret < 0) {
-				dev_err(dev,
-					"regulator %d set op mode failed, %d\n",
-					i, ret);
-				goto fail;
-			}
-		}
-	}
-
-	ret = regulator_bulk_enable(num, s);
-	if (ret < 0) {
-		dev_err(dev, "regulator enable failed, %d\n", ret);
-		goto fail;
-	}
-
-	return 0;
-
-fail:
-	for (i--; i >= 0; i--)
-		regulator_set_load(s[i].consumer, regs[i].disable_load);
-	return ret;
-}
-
-#define S_DIV_ROUND_UP(n, d)	\
-	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
-
-static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
-				s32 min_result, bool even)
-{
-	s32 v;
-	v = (tmax - tmin) * percent;
-	v = S_DIV_ROUND_UP(v, 100) + tmin;
-	if (even && (v & 0x1))
-		return max_t(s32, min_result, v - 1);
-	else
-		return max_t(s32, min_result, v);
-}
-
-static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
-					s32 ui, s32 coeff, s32 pcnt)
-{
-	s32 tmax, tmin, clk_z;
-	s32 temp;
-
-	/* reset */
-	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
-	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
-	if (tmin > 255) {
-		tmax = 511;
-		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
-	} else {
-		tmax = 255;
-		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
-	}
-
-	/* adjust */
-	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
-	timing->clk_zero = clk_z + 8 - temp;
-}
-
-static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
-	const unsigned long bit_rate, const unsigned long esc_rate)
-{
-	s32 ui, lpx;
-	s32 tmax, tmin;
-	s32 pcnt0 = 10;
-	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
-	s32 pcnt2 = 10;
-	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
-	s32 coeff = 1000; /* Precision, should avoid overflow */
-	s32 temp;
-
-	if (!bit_rate || !esc_rate)
-		return -EINVAL;
-
-	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
-	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
-
-	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
-	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
-	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
-
-	temp = lpx / ui;
-	if (temp & 0x1)
-		timing->hs_rqst = temp;
-	else
-		timing->hs_rqst = max_t(s32, 0, temp - 2);
-
-	/* Calculate clk_zero after clk_prepare and hs_rqst */
-	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
-
-	temp = 105 * coeff + 12 * ui - 20 * coeff;
-	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
-	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
-	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
-
-	temp = 85 * coeff + 6 * ui;
-	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
-	temp = 40 * coeff + 4 * ui;
-	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
-	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
-
-	tmax = 255;
-	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
-	temp = 145 * coeff + 10 * ui - temp;
-	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
-	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
-
-	temp = 105 * coeff + 12 * ui - 20 * coeff;
-	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
-	temp = 60 * coeff + 4 * ui;
-	tmin = DIV_ROUND_UP(temp, ui) - 2;
-	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
-
-	tmax = 255;
-	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
-	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
-
-	tmax = 63;
-	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
-	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
-	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
-	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
-
-	tmax = 63;
-	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
-	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
-	temp += 8 * ui + lpx;
-	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
-	if (tmin > tmax) {
-		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
-		timing->clk_pre = temp >> 1;
-		temp = (2 * tmax - tmin) * pcnt2;
-	} else {
-		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
-	}
-
-	timing->ta_go = 3;
-	timing->ta_sure = 0;
-	timing->ta_get = 4;
-
-	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
-		timing->clk_pre, timing->clk_post, timing->clk_zero,
-		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
-		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
-		timing->hs_rqst);
-
-	return 0;
-}
-
-static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
-{
-	void __iomem *base = phy->reg_base;
-
-	if (!enable) {
-		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
-		return;
-	}
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
-}
-
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
-		const unsigned long bit_rate, const unsigned long esc_rate)
-{
-	struct dsi_dphy_timing *timing = &phy->timing;
-	int i;
-	void __iomem *base = phy->base;
-
-	DBG("");
-
-	if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
-		pr_err("%s: D-PHY timing calculation failed\n", __func__);
-		return -EINVAL;
-	}
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
-
-	dsi_28nm_phy_regulator_ctrl(phy, true);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
-		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
-		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
-		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
-	if (timing->clk_zero & BIT(8))
-		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
-			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
-		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
-		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
-		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
-		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
-		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
-		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
-		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
-		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
-	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
-		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
-
-	for (i = 0; i < 4; i++) {
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
-		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
-	}
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
-	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
-
-	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
-
-	if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
-		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
-	else
-		dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
-
-	return 0;
-}
-
-static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
-{
-	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
-	dsi_28nm_phy_regulator_ctrl(phy, false);
-
-	/*
-	 * Wait for the registers writes to complete in order to
-	 * ensure that the phy is completely disabled
-	 */
-	wmb();
-
-	return 0;
-}
-
-static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
-{
-	int ret;
-
-	pm_runtime_get_sync(&phy->pdev->dev);
-
-	ret = clk_prepare_enable(phy->ahb_clk);
-	if (ret) {
-		pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
-		pm_runtime_put_sync(&phy->pdev->dev);
-	}
-
-	return ret;
-}
-
-static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
-{
-	clk_disable_unprepare(phy->ahb_clk);
-	pm_runtime_put_sync(&phy->pdev->dev);
-}
-
-static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
-	[MSM_DSI_PHY_28NM_HPM] = {
-		.type = MSM_DSI_PHY_28NM_HPM,
-		.reg_cfg = {
-			.num = 1,
-			.regs = {
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-		.ops = {
-			.enable = dsi_28nm_phy_enable,
-			.disable = dsi_28nm_phy_disable,
-		}
-	},
-	[MSM_DSI_PHY_28NM_LP] = {
-		.type = MSM_DSI_PHY_28NM_LP,
-		.reg_cfg = {
-			.num = 1,
-			.regs = {
-				{"vddio", 1800000, 1800000, 100000, 100},
-			},
-		},
-		.ops = {
-			.enable = dsi_28nm_phy_enable,
-			.disable = dsi_28nm_phy_disable,
-		}
-	},
-};
-
-static const struct of_device_id dsi_phy_dt_match[] = {
-	{ .compatible = "qcom,dsi-phy-28nm-hpm",
-	  .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
-	{ .compatible = "qcom,dsi-phy-28nm-lp",
-	  .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
-	{}
-};
-
-static int dsi_phy_driver_probe(struct platform_device *pdev)
-{
-	struct msm_dsi_phy *phy;
-	const struct of_device_id *match;
-	int ret;
-
-	phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
-	if (!phy)
-		return -ENOMEM;
-
-	match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
-	if (!match)
-		return -ENODEV;
-
-	phy->cfg = match->data;
-	phy->pdev = pdev;
-
-	ret = of_property_read_u32(pdev->dev.of_node,
-				"qcom,dsi-phy-index", &phy->id);
-	if (ret) {
-		dev_err(&pdev->dev,
-			"%s: PHY index not specified, ret=%d\n",
-			__func__, ret);
-		goto fail;
-	}
-
-	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-	if (IS_ERR(phy->base)) {
-		dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
-		ret = -ENOMEM;
-		goto fail;
-	}
-	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
-	if (IS_ERR(phy->reg_base)) {
-		dev_err(&pdev->dev,
-			"%s: failed to map phy regulator base\n", __func__);
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	ret = dsi_phy_regulator_init(phy);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
-		goto fail;
-	}
-
-	phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
-	if (IS_ERR(phy->ahb_clk)) {
-		pr_err("%s: Unable to get ahb clk\n", __func__);
-		ret = PTR_ERR(phy->ahb_clk);
-		goto fail;
-	}
-
-	/* PLL init will call into clk_register which requires
-	 * register access, so we need to enable power and ahb clock.
-	 */
-	ret = dsi_phy_enable_resource(phy);
-	if (ret)
-		goto fail;
-
-	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
-	if (!phy->pll)
-		dev_info(&pdev->dev,
-			"%s: pll init failed, need separate pll clk driver\n",
-			__func__);
-
-	dsi_phy_disable_resource(phy);
-
-	platform_set_drvdata(pdev, phy);
-
-	return 0;
-
-fail:
-	return ret;
-}
-
-static int dsi_phy_driver_remove(struct platform_device *pdev)
-{
-	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
-
-	if (phy && phy->pll) {
-		msm_dsi_pll_destroy(phy->pll);
-		phy->pll = NULL;
-	}
-
-	platform_set_drvdata(pdev, NULL);
-
-	return 0;
-}
-
-static struct platform_driver dsi_phy_platform_driver = {
-	.probe      = dsi_phy_driver_probe,
-	.remove     = dsi_phy_driver_remove,
-	.driver     = {
-		.name   = "msm_dsi_phy",
-		.of_match_table = dsi_phy_dt_match,
-	},
-};
-
-void __init msm_dsi_phy_driver_register(void)
-{
-	platform_driver_register(&dsi_phy_platform_driver);
-}
-
-void __exit msm_dsi_phy_driver_unregister(void)
-{
-	platform_driver_unregister(&dsi_phy_platform_driver);
-}
-
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
-	const unsigned long bit_rate, const unsigned long esc_rate)
-{
-	int ret;
-
-	if (!phy || !phy->cfg->ops.enable)
-		return -EINVAL;
-
-	ret = dsi_phy_regulator_enable(phy);
-	if (ret) {
-		dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
-			__func__, ret);
-		return ret;
-	}
-
-	return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
-}
-
-int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
-{
-	if (!phy || !phy->cfg->ops.disable)
-		return -EINVAL;
-
-	phy->cfg->ops.disable(phy);
-	dsi_phy_regulator_disable(phy);
-
-	return 0;
-}
-
-void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
-	u32 *clk_pre, u32 *clk_post)
-{
-	if (!phy)
-		return;
-	if (clk_pre)
-		*clk_pre = phy->timing.clk_pre;
-	if (clk_post)
-		*clk_post = phy->timing.clk_post;
-}
-
-struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
-{
-	if (!phy)
-		return NULL;
-
-	return phy->pll;
-}
-
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 728152f..5de505e 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013-2014 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
new file mode 100644
index 0000000..401ff58
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+
+#include "dsi_phy.h"
+
+#define S_DIV_ROUND_UP(n, d)	\
+	(((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+				s32 min_result, bool even)
+{
+	s32 v;
+
+	v = (tmax - tmin) * percent;
+	v = S_DIV_ROUND_UP(v, 100) + tmin;
+	if (even && (v & 0x1))
+		return max_t(s32, min_result, v - 1);
+	else
+		return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
+					s32 ui, s32 coeff, s32 pcnt)
+{
+	s32 tmax, tmin, clk_z;
+	s32 temp;
+
+	/* reset */
+	temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	if (tmin > 255) {
+		tmax = 511;
+		clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+	} else {
+		tmax = 255;
+		clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+	}
+
+	/* adjust */
+	temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+	timing->clk_zero = clk_z + 8 - temp;
+}
+
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+	const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	s32 ui, lpx;
+	s32 tmax, tmin;
+	s32 pcnt0 = 10;
+	s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+	s32 pcnt2 = 10;
+	s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+	s32 coeff = 1000; /* Precision, should avoid overflow */
+	s32 temp;
+
+	if (!bit_rate || !esc_rate)
+		return -EINVAL;
+
+	ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+	lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+	tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+	tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+	timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+	temp = lpx / ui;
+	if (temp & 0x1)
+		timing->hs_rqst = temp;
+	else
+		timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+	/* Calculate clk_zero after clk_prepare and hs_rqst */
+	dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+	timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	temp = 85 * coeff + 6 * ui;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 40 * coeff + 4 * ui;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+	tmax = 255;
+	temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+	temp = 145 * coeff + 10 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+	temp = 105 * coeff + 12 * ui - 20 * coeff;
+	tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+	temp = 60 * coeff + 4 * ui;
+	tmin = DIV_ROUND_UP(temp, ui) - 2;
+	timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+	tmax = 255;
+	tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+	timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+	tmax = 63;
+	temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+	temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+	tmax = 63;
+	temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+	temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+	temp += 8 * ui + lpx;
+	tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+	if (tmin > tmax) {
+		temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
+		timing->clk_pre = temp >> 1;
+	} else {
+		timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
+	}
+
+	timing->ta_go = 3;
+	timing->ta_sure = 0;
+	timing->ta_get = 4;
+
+	DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+		timing->clk_pre, timing->clk_post, timing->clk_zero,
+		timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+		timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+		timing->hs_rqst);
+
+	return 0;
+}
+
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask)
+{
+	int phy_id = phy->id;
+	u32 val;
+
+	if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
+		return;
+
+	val = dsi_phy_read(phy->base + reg);
+
+	if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
+		dsi_phy_write(phy->base + reg, val | bit_mask);
+	else
+		dsi_phy_write(phy->base + reg, val & (~bit_mask));
+}
+
+static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int i, ret;
+
+	for (i = 0; i < num; i++)
+		s[i].supply = regs[i].name;
+
+	ret = devm_regulator_bulk_get(dev, num, s);
+	if (ret < 0) {
+		dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+						__func__, ret);
+		return ret;
+	}
+
+	for (i = 0; i < num; i++) {
+		if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
+			ret = regulator_set_voltage(s[i].consumer,
+				regs[i].min_voltage, regs[i].max_voltage);
+			if (ret < 0) {
+				dev_err(dev,
+					"regulator %d set voltage failed, %d\n",
+					i, ret);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	int num = phy->cfg->reg_cfg.num;
+	int i;
+
+	DBG("");
+	for (i = num - 1; i >= 0; i--)
+		if (regs[i].disable_load >= 0)
+			regulator_set_load(s[i].consumer, regs[i].disable_load);
+
+	regulator_bulk_disable(num, s);
+}
+
+static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
+{
+	struct regulator_bulk_data *s = phy->supplies;
+	const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
+	struct device *dev = &phy->pdev->dev;
+	int num = phy->cfg->reg_cfg.num;
+	int ret, i;
+
+	DBG("");
+	for (i = 0; i < num; i++) {
+		if (regs[i].enable_load >= 0) {
+			ret = regulator_set_load(s[i].consumer,
+							regs[i].enable_load);
+			if (ret < 0) {
+				dev_err(dev,
+					"regulator %d set op mode failed, %d\n",
+					i, ret);
+				goto fail;
+			}
+		}
+	}
+
+	ret = regulator_bulk_enable(num, s);
+	if (ret < 0) {
+		dev_err(dev, "regulator enable failed, %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		regulator_set_load(s[i].consumer, regs[i].disable_load);
+	return ret;
+}
+
+static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+{
+	struct device *dev = &phy->pdev->dev;
+	int ret;
+
+	pm_runtime_get_sync(dev);
+
+	ret = clk_prepare_enable(phy->ahb_clk);
+	if (ret) {
+		dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+		pm_runtime_put_sync(dev);
+	}
+
+	return ret;
+}
+
+static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
+{
+	clk_disable_unprepare(phy->ahb_clk);
+	pm_runtime_put_sync(&phy->pdev->dev);
+}
+
+static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
+	{ .compatible = "qcom,dsi-phy-28nm-hpm",
+	  .data = &dsi_phy_28nm_hpm_cfgs },
+	{ .compatible = "qcom,dsi-phy-28nm-lp",
+	  .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+	{ .compatible = "qcom,dsi-phy-20nm",
+	  .data = &dsi_phy_20nm_cfgs },
+#endif
+	{}
+};
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *phy;
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	int ret;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	match = of_match_node(dsi_phy_dt_match, dev->of_node);
+	if (!match)
+		return -ENODEV;
+
+	phy->cfg = match->data;
+	phy->pdev = pdev;
+
+	ret = of_property_read_u32(dev->of_node,
+				"qcom,dsi-phy-index", &phy->id);
+	if (ret) {
+		dev_err(dev, "%s: PHY index not specified, %d\n",
+			__func__, ret);
+		goto fail;
+	}
+
+	phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+				"qcom,dsi-phy-regulator-ldo-mode");
+
+	phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+	if (IS_ERR(phy->base)) {
+		dev_err(dev, "%s: failed to map phy base\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
+				"DSI_PHY_REG");
+	if (IS_ERR(phy->reg_base)) {
+		dev_err(dev, "%s: failed to map phy regulator base\n",
+			__func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = dsi_phy_regulator_init(phy);
+	if (ret) {
+		dev_err(dev, "%s: failed to init regulator\n", __func__);
+		goto fail;
+	}
+
+	phy->ahb_clk = devm_clk_get(dev, "iface_clk");
+	if (IS_ERR(phy->ahb_clk)) {
+		dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+		ret = PTR_ERR(phy->ahb_clk);
+		goto fail;
+	}
+
+	/* PLL init will call into clk_register which requires
+	 * register access, so we need to enable power and ahb clock.
+	 */
+	ret = dsi_phy_enable_resource(phy);
+	if (ret)
+		goto fail;
+
+	phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
+	if (!phy->pll)
+		dev_info(dev,
+			"%s: pll init failed, need separate pll clk driver\n",
+			__func__);
+
+	dsi_phy_disable_resource(phy);
+
+	platform_set_drvdata(pdev, phy);
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int dsi_phy_driver_remove(struct platform_device *pdev)
+{
+	struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
+
+	if (phy && phy->pll) {
+		msm_dsi_pll_destroy(phy->pll);
+		phy->pll = NULL;
+	}
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+	.probe      = dsi_phy_driver_probe,
+	.remove     = dsi_phy_driver_remove,
+	.driver     = {
+		.name   = "msm_dsi_phy",
+		.of_match_table = dsi_phy_dt_match,
+	},
+};
+
+void __init msm_dsi_phy_driver_register(void)
+{
+	platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void __exit msm_dsi_phy_driver_unregister(void)
+{
+	platform_driver_unregister(&dsi_phy_platform_driver);
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+	const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct device *dev = &phy->pdev->dev;
+	int ret;
+
+	if (!phy || !phy->cfg->ops.enable)
+		return -EINVAL;
+
+	ret = dsi_phy_regulator_enable(phy);
+	if (ret) {
+		dev_err(dev, "%s: regulator enable failed, %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
+	if (ret) {
+		dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+		dsi_phy_regulator_disable(phy);
+		return ret;
+	}
+
+	return 0;
+}
+
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+	if (!phy || !phy->cfg->ops.disable)
+		return;
+
+	phy->cfg->ops.disable(phy);
+
+	dsi_phy_regulator_disable(phy);
+}
+
+void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
+					u32 *clk_pre, u32 *clk_post)
+{
+	if (!phy)
+		return;
+
+	if (clk_pre)
+		*clk_pre = phy->timing.clk_pre;
+	if (clk_post)
+		*clk_post = phy->timing.clk_post;
+}
+
+struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
+{
+	if (!phy)
+		return NULL;
+
+	return phy->pll;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 0000000..0456b25
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+
+struct msm_dsi_phy_ops {
+	int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate);
+	void (*disable)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+	enum msm_dsi_phy_type type;
+	struct dsi_reg_config reg_cfg;
+	struct msm_dsi_phy_ops ops;
+
+	/*
+	 * Each cell {phy_id, pll_id} of the truth table indicates
+	 * if the source PLL selection bit should be set for each PHY.
+	 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
+	 */
+	bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+
+struct msm_dsi_dphy_timing {
+	u32 clk_pre;
+	u32 clk_post;
+	u32 clk_zero;
+	u32 clk_trail;
+	u32 clk_prepare;
+	u32 hs_exit;
+	u32 hs_zero;
+	u32 hs_prepare;
+	u32 hs_trail;
+	u32 hs_rqst;
+	u32 ta_go;
+	u32 ta_sure;
+	u32 ta_get;
+};
+
+struct msm_dsi_phy {
+	struct platform_device *pdev;
+	void __iomem *base;
+	void __iomem *reg_base;
+	int id;
+
+	struct clk *ahb_clk;
+	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+
+	struct msm_dsi_dphy_timing timing;
+	const struct msm_dsi_phy_cfg *cfg;
+
+	bool regulator_ldo_mode;
+
+	struct msm_dsi_pll *pll;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+	const unsigned long bit_rate, const unsigned long esc_rate);
+void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
+				u32 bit_mask);
+
+#endif /* __DSI_PHY_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 0000000..2e9ba11
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+		DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+		DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+		DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+			DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+		DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+		DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+		DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+		DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+		DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+		DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+		DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	if (phy->regulator_ldo_mode) {
+		dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+		return;
+	}
+
+	/* non LDO mode */
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+	u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_20nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
+				DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+							(i >> 1) * 0x40);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+		dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+	}
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+	dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+	dsi_20nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+	dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+	/* make sure everything is written before enable */
+	wmb();
+	dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+	return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+	dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+	.type = MSM_DSI_PHY_20NM,
+	.src_pll_truthtable = { {false, true}, {false, true} },
+	.reg_cfg = {
+		.num = 2,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+			{"vcca", 1000000, 1000000, 10000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_20nm_phy_enable,
+		.disable = dsi_20nm_phy_disable,
+	}
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 0000000..f1a7c7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+		struct msm_dsi_dphy_timing *timing)
+{
+	void __iomem *base = phy->base;
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+		DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+		DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+		DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+	if (timing->clk_zero & BIT(8))
+		dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+			DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+		DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+		DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+		DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+		DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+		DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+		DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+		DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+	dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+		DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+	void __iomem *base = phy->reg_base;
+
+	if (!enable) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+		return;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+		const unsigned long bit_rate, const unsigned long esc_rate)
+{
+	struct msm_dsi_dphy_timing *timing = &phy->timing;
+	int i;
+	void __iomem *base = phy->base;
+
+	DBG("");
+
+	if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
+		dev_err(&phy->pdev->dev,
+			"%s: D-PHY timing calculation failed\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+	dsi_28nm_phy_regulator_ctrl(phy, true);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+
+	dsi_28nm_dphy_set_timing(phy, timing);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+	for (i = 0; i < 4; i++) {
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+		dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+	}
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+	dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+	dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+	msm_dsi_phy_set_src_pll(phy, src_pll_id,
+				REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
+				DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+
+	return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+	dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+	dsi_28nm_phy_regulator_ctrl(phy, false);
+
+	/*
+	 * Wait for the registers writes to complete in order to
+	 * ensure that the phy is completely disabled
+	 */
+	wmb();
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+	.type = MSM_DSI_PHY_28NM_HPM,
+	.src_pll_truthtable = { {true, true}, {false, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+	},
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+	.type = MSM_DSI_PHY_28NM_LP,
+	.src_pll_truthtable = { {true, true}, {true, true} },
+	.reg_cfg = {
+		.num = 1,
+		.regs = {
+			{"vddio", 1800000, 1800000, 100000, 100},
+		},
+	},
+	.ops = {
+		.enable = dsi_28nm_phy_enable,
+		.disable = dsi_28nm_phy_disable,
+	},
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 509376f..5104fc9 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -72,31 +72,14 @@
 int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
 {
 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-	int ret;
 
-	/*
-	 * Certain PLLs need to update the same VCO rate and registers
-	 * after resume in suspend/resume scenario.
-	 */
-	if (pll->restore_state) {
-		ret = pll->restore_state(pll);
-		if (ret)
-			goto error;
-	}
-
-	ret = dsi_pll_enable(pll);
-
-error:
-	return ret;
+	return dsi_pll_enable(pll);
 }
 
 void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
 {
 	struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
 
-	if (pll->save_state)
-		pll->save_state(pll);
-
 	dsi_pll_disable(pll);
 }
 
@@ -134,6 +117,29 @@
 		pll->destroy(pll);
 }
 
+void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
+{
+	if (pll->save_state) {
+		pll->save_state(pll);
+		pll->state_saved = true;
+	}
+}
+
+int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
+{
+	int ret;
+
+	if (pll->restore_state && pll->state_saved) {
+		ret = pll->restore_state(pll);
+		if (ret)
+			return ret;
+
+		pll->state_saved = false;
+	}
+
+	return 0;
+}
+
 struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
 			enum msm_dsi_phy_type type, int id)
 {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 5a3bb24..063caa2 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -27,6 +27,7 @@
 
 	struct clk_hw	clk_hw;
 	bool		pll_on;
+	bool		state_saved;
 
 	unsigned long	min_rate;
 	unsigned long	max_rate;
@@ -82,8 +83,16 @@
 /*
  * Initialization for Each PLL Type
  */
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
 struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
 					enum msm_dsi_phy_type type, int id);
+#else
+static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
+	struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif
 
 #endif /* __DSI_PLL_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 18b7727..598fdaf 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -465,26 +465,21 @@
 	void __iomem *base = pll_28nm->mmio;
 	int ret;
 
-	if ((cached_state->vco_rate != 0) &&
-		(cached_state->vco_rate == clk_hw_get_rate(&pll->clk_hw))) {
-		ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-						cached_state->vco_rate, 0);
-		if (ret) {
-			dev_err(&pll_28nm->pdev->dev,
-				"restore vco rate failed. ret=%d\n", ret);
-			return ret;
-		}
-
-		pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-				cached_state->postdiv3);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-				cached_state->postdiv1);
-		pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
-				cached_state->byte_mux);
-
-		cached_state->vco_rate = 0;
+	ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+					cached_state->vco_rate, 0);
+	if (ret) {
+		dev_err(&pll_28nm->pdev->dev,
+			"restore vco rate failed. ret=%d\n", ret);
+		return ret;
 	}
 
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+			cached_state->postdiv3);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+			cached_state->postdiv1);
+	pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+			cached_state->byte_mux);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 26f268e..06cbddf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f9c71dc..bef1d65 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7991069..81200e9 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -373,7 +373,7 @@
 	struct device *dev = &ctrl->pdev->dev;
 	int ret;
 
-	ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd");
+	ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
 	if (IS_ERR(ctrl->panel_hpd_gpio)) {
 		ret = PTR_ERR(ctrl->panel_hpd_gpio);
 		ctrl->panel_hpd_gpio = NULL;
@@ -381,13 +381,7 @@
 		return ret;
 	}
 
-	ret = gpiod_direction_input(ctrl->panel_hpd_gpio);
-	if (ret) {
-		pr_err("%s: Set direction for hpd failed, %d\n", __func__, ret);
-		return ret;
-	}
-
-	ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en");
+	ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
 	if (IS_ERR(ctrl->panel_en_gpio)) {
 		ret = PTR_ERR(ctrl->panel_en_gpio);
 		ctrl->panel_en_gpio = NULL;
@@ -395,13 +389,6 @@
 		return ret;
 	}
 
-	ret = gpiod_direction_output(ctrl->panel_en_gpio, 0);
-	if (ret) {
-		pr_err("%s: Set direction for panel_en failed, %d\n",
-				__func__, ret);
-		return ret;
-	}
-
 	DBG("gpio on");
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 8145362..101b324 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -22,7 +22,9 @@
 void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
 {
 	uint32_t ctrl = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
 	if (power_on) {
 		ctrl |= HDMI_CTRL_ENABLE;
 		if (!hdmi->hdmi_mode) {
@@ -37,6 +39,7 @@
 	}
 
 	hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
 	DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
 			power_on ? "Enable" : "Disable", ctrl);
 }
@@ -51,6 +54,10 @@
 	/* Process DDC: */
 	hdmi_i2c_irq(hdmi->i2c);
 
+	/* Process HDCP: */
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+
 	/* TODO audio.. */
 
 	return IRQ_HANDLED;
@@ -60,6 +67,15 @@
 {
 	struct hdmi_phy *phy = hdmi->phy;
 
+	/*
+	 * at this point, hpd has been disabled,
+	 * after flush workq, it's safe to deinit hdcp
+	 */
+	if (hdmi->workq) {
+		flush_workqueue(hdmi->workq);
+		destroy_workqueue(hdmi->workq);
+	}
+	hdmi_hdcp_destroy(hdmi);
 	if (phy)
 		phy->funcs->destroy(phy);
 
@@ -77,6 +93,7 @@
 {
 	struct hdmi_platform_config *config = pdev->dev.platform_data;
 	struct hdmi *hdmi = NULL;
+	struct resource *res;
 	int i, ret;
 
 	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
@@ -87,18 +104,18 @@
 
 	hdmi->pdev = pdev;
 	hdmi->config = config;
+	spin_lock_init(&hdmi->reg_lock);
 
 	/* not sure about which phy maps to which msm.. probably I miss some */
-	if (config->phy_init)
+	if (config->phy_init) {
 		hdmi->phy = config->phy_init(hdmi);
-	else
-		hdmi->phy = ERR_PTR(-ENXIO);
 
-	if (IS_ERR(hdmi->phy)) {
-		ret = PTR_ERR(hdmi->phy);
-		dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
-		hdmi->phy = NULL;
-		goto fail;
+		if (IS_ERR(hdmi->phy)) {
+			ret = PTR_ERR(hdmi->phy);
+			dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
+			hdmi->phy = NULL;
+			goto fail;
+		}
 	}
 
 	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
@@ -107,6 +124,18 @@
 		goto fail;
 	}
 
+	/* HDCP needs physical address of hdmi register */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		config->mmio_name);
+	hdmi->mmio_phy_addr = res->start;
+
+	hdmi->qfprom_mmio = msm_ioremap(pdev,
+		config->qfprom_mmio_name, "HDMI_QFPROM");
+	if (IS_ERR(hdmi->qfprom_mmio)) {
+		dev_info(&pdev->dev, "can't find qfprom resource\n");
+		hdmi->qfprom_mmio = NULL;
+	}
+
 	hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
 			config->hpd_reg_cnt, GFP_KERNEL);
 	if (!hdmi->hpd_regs) {
@@ -189,6 +218,8 @@
 		hdmi->pwr_clks[i] = clk;
 	}
 
+	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+
 	hdmi->i2c = hdmi_i2c_init(hdmi);
 	if (IS_ERR(hdmi->i2c)) {
 		ret = PTR_ERR(hdmi->i2c);
@@ -197,6 +228,12 @@
 		goto fail;
 	}
 
+	hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+	if (IS_ERR(hdmi->hdcp_ctrl)) {
+		dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
+		hdmi->hdcp_ctrl = NULL;
+	}
+
 	return hdmi;
 
 fail:
@@ -310,7 +347,7 @@
 static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
 static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
 
-static struct hdmi_platform_config hdmi_tx_8074_config = {
+static struct hdmi_platform_config hdmi_tx_8974_config = {
 		.phy_init = hdmi_phy_8x74_init,
 		HDMI_CFG(pwr_reg, 8x74),
 		HDMI_CFG(hpd_reg, 8x74),
@@ -330,9 +367,21 @@
 		.hpd_freq      = hpd_clk_freq_8x74,
 };
 
+static const char *hpd_reg_names_8x94[] = {};
+
+static struct hdmi_platform_config hdmi_tx_8994_config = {
+		.phy_init = NULL, /* nothing to do for this HDMI PHY 20nm */
+		HDMI_CFG(pwr_reg, 8x74),
+		HDMI_CFG(hpd_reg, 8x94),
+		HDMI_CFG(pwr_clk, 8x74),
+		HDMI_CFG(hpd_clk, 8x74),
+		.hpd_freq      = hpd_clk_freq_8x74,
+};
+
 static const struct of_device_id dt_match[] = {
+	{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
 	{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
-	{ .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config },
+	{ .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
 	{ .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
 	{ .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config },
 	{}
@@ -347,8 +396,7 @@
 		snprintf(name2, sizeof(name2), "%s-gpio", name);
 		gpio = of_get_named_gpio(of_node, name2, 0);
 		if (gpio < 0) {
-			dev_err(dev, "failed to get gpio: %s (%d)\n",
-					name, gpio);
+			DBG("failed to get gpio: %s (%d)", name, gpio);
 			gpio = -1;
 		}
 	}
@@ -376,6 +424,7 @@
 	}
 
 	hdmi_cfg->mmio_name     = "core_physical";
+	hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
 	hdmi_cfg->ddc_clk_gpio  = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
 	hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
 	hdmi_cfg->hpd_gpio      = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
@@ -391,7 +440,6 @@
 	if (cpu_is_apq8064()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -404,7 +452,6 @@
 	} else if (cpu_is_msm8960() || cpu_is_msm8960ab()) {
 		static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
 		config.phy_init      = hdmi_phy_8960_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -419,7 +466,6 @@
 				"8901_hdmi_mvs", "8901_mpp0"
 		};
 		config.phy_init      = hdmi_phy_8x60_init;
-		config.mmio_name     = "hdmi_msm_hdmi_addr";
 		config.hpd_reg_names = hpd_reg_names;
 		config.hpd_reg_cnt   = ARRAY_SIZE(hpd_reg_names);
 		config.hpd_clk_names = hpd_clk_names;
@@ -430,6 +476,9 @@
 		config.mux_en_gpio   = -1;
 		config.mux_sel_gpio  = -1;
 	}
+	config.mmio_name     = "hdmi_msm_hdmi_addr";
+	config.qfprom_mmio_name = "hdmi_msm_qfprom_addr";
+
 	hdmi_cfg = &config;
 #endif
 	dev->platform_data = hdmi_cfg;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 68fdfb3..d0e6631 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -37,6 +37,8 @@
 	int rate;
 };
 
+struct hdmi_hdcp_ctrl;
+
 struct hdmi {
 	struct drm_device *dev;
 	struct platform_device *pdev;
@@ -51,6 +53,8 @@
 	unsigned long int pixclock;
 
 	void __iomem *mmio;
+	void __iomem *qfprom_mmio;
+	phys_addr_t mmio_phy_addr;
 
 	struct regulator **hpd_regs;
 	struct regulator **pwr_regs;
@@ -68,12 +72,25 @@
 	bool hdmi_mode;               /* are we in hdmi mode? */
 
 	int irq;
+	struct workqueue_struct *workq;
+
+	struct hdmi_hdcp_ctrl *hdcp_ctrl;
+
+	/*
+	* spinlock to protect registers shared by different execution
+	* REG_HDMI_CTRL
+	* REG_HDMI_DDC_ARBITRATION
+	* REG_HDMI_HDCP_INT_CTRL
+	* REG_HDMI_HPD_CTRL
+	*/
+	spinlock_t reg_lock;
 };
 
 /* platform config data (ie. from DT, or pdata) */
 struct hdmi_platform_config {
 	struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
 	const char *mmio_name;
+	const char *qfprom_mmio_name;
 
 	/* regulators that need to be on for hpd: */
 	const char **hpd_reg_names;
@@ -109,6 +126,11 @@
 	return msm_readl(hdmi->mmio + reg);
 }
 
+static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
+{
+	return msm_readl(hdmi->qfprom_mmio + reg);
+}
+
 /*
  * The phy appears to be different, for example between 8960 and 8x60,
  * so split the phy related functions out and load the correct one at
@@ -117,7 +139,6 @@
 
 struct hdmi_phy_funcs {
 	void (*destroy)(struct hdmi_phy *phy);
-	void (*reset)(struct hdmi_phy *phy);
 	void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
 	void (*powerdown)(struct hdmi_phy *phy);
 };
@@ -163,4 +184,13 @@
 void hdmi_i2c_destroy(struct i2c_adapter *i2c);
 struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
 
+/*
+ * hdcp
+ */
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
+void hdmi_hdcp_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+
 #endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e6f0348..0b1b558 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -441,6 +441,12 @@
 
 #define REG_HDMI_HDCP_SW_LOWER_AKSV				0x00000288
 
+#define REG_HDMI_CEC_CTRL					0x0000028c
+
+#define REG_HDMI_CEC_WR_DATA					0x00000290
+
+#define REG_HDMI_CEC_CEC_RETRANSMIT				0x00000294
+
 #define REG_HDMI_CEC_STATUS					0x00000298
 
 #define REG_HDMI_CEC_INT					0x0000029c
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index 872485f..df232e2 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -203,7 +203,6 @@
 		audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
 		audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
 	} else {
-		hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
 		acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
 		acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
 		vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index a7a1d82..92b69ae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -100,8 +100,13 @@
 		hdmi_audio_update(hdmi);
 	}
 
-	phy->funcs->powerup(phy, hdmi->pixclock);
+	if (phy)
+		phy->funcs->powerup(phy, hdmi->pixclock);
+
 	hdmi_set_mode(hdmi, true);
+
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_on(hdmi->hdcp_ctrl);
 }
 
 static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -118,9 +123,14 @@
 	struct hdmi *hdmi = hdmi_bridge->hdmi;
 	struct hdmi_phy *phy = hdmi->phy;
 
+	if (hdmi->hdcp_ctrl)
+		hdmi_hdcp_off(hdmi->hdcp_ctrl);
+
 	DBG("power down");
 	hdmi_set_mode(hdmi, false);
-	phy->funcs->powerdown(phy);
+
+	if (phy)
+		phy->funcs->powerdown(phy);
 
 	if (hdmi->power_on) {
 		power_off(bridge);
@@ -142,8 +152,6 @@
 
 	hdmi->pixclock = mode->clock * 1000;
 
-	hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
-
 	hstart = mode->htotal - mode->hsync_start;
 	hend   = mode->htotal - mode->hsync_start + mode->hdisplay;
 
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 54aa93f..a3b05ae 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -28,6 +28,55 @@
 };
 #define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
 
+static void hdmi_phy_reset(struct hdmi *hdmi)
+{
+	unsigned int val;
+
+	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+
+	msleep(100);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull high */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+}
+
 static int gpio_config(struct hdmi *hdmi, bool on)
 {
 	struct device *dev = &hdmi->pdev->dev;
@@ -35,21 +84,25 @@
 	int ret;
 
 	if (on) {
-		ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
-			goto error1;
+		if (config->ddc_clk_gpio != -1) {
+			ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+			if (ret) {
+				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+				goto error1;
+			}
+			gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 		}
-		gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
 
-		ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
-		if (ret) {
-			dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
-				"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
-			goto error2;
+		if (config->ddc_data_gpio != -1) {
+			ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+			if (ret) {
+				dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
+					"HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+				goto error2;
+			}
+			gpio_set_value_cansleep(config->ddc_data_gpio, 1);
 		}
-		gpio_set_value_cansleep(config->ddc_data_gpio, 1);
 
 		ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
 		if (ret) {
@@ -94,8 +147,12 @@
 		}
 		DBG("gpio on");
 	} else {
-		gpio_free(config->ddc_clk_gpio);
-		gpio_free(config->ddc_data_gpio);
+		if (config->ddc_clk_gpio != -1)
+			gpio_free(config->ddc_clk_gpio);
+
+		if (config->ddc_data_gpio != -1)
+			gpio_free(config->ddc_data_gpio);
+
 		gpio_free(config->hpd_gpio);
 
 		if (config->mux_en_gpio != -1) {
@@ -126,9 +183,11 @@
 error4:
 	gpio_free(config->hpd_gpio);
 error3:
-	gpio_free(config->ddc_data_gpio);
+	if (config->ddc_data_gpio != -1)
+		gpio_free(config->ddc_data_gpio);
 error2:
-	gpio_free(config->ddc_clk_gpio);
+	if (config->ddc_clk_gpio != -1)
+		gpio_free(config->ddc_clk_gpio);
 error1:
 	return ret;
 }
@@ -138,9 +197,9 @@
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	const struct hdmi_platform_config *config = hdmi->config;
 	struct device *dev = &hdmi->pdev->dev;
-	struct hdmi_phy *phy = hdmi->phy;
 	uint32_t hpd_ctrl;
 	int i, ret;
+	unsigned long flags;
 
 	for (i = 0; i < config->hpd_reg_cnt; i++) {
 		ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -181,7 +240,7 @@
 	}
 
 	hdmi_set_mode(hdmi, false);
-	phy->funcs->reset(phy);
+	hdmi_phy_reset(hdmi);
 	hdmi_set_mode(hdmi, true);
 
 	hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -192,6 +251,7 @@
 			HDMI_HPD_INT_CTRL_INT_EN);
 
 	/* set timeout to 4.1ms (max) for hardware debounce */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
 	hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
 	hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
 
@@ -200,6 +260,7 @@
 			~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
 	hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
 			HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
 
 	return 0;
 
@@ -250,7 +311,6 @@
 void hdmi_connector_irq(struct drm_connector *connector)
 {
 	struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-	struct msm_drm_private *priv = connector->dev->dev_private;
 	struct hdmi *hdmi = hdmi_connector->hdmi;
 	uint32_t hpd_int_status, hpd_int_ctrl;
 
@@ -274,7 +334,7 @@
 			hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
 		hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
 
-		queue_work(priv->wq, &hdmi_connector->hpd_work);
+		queue_work(hdmi->workq, &hdmi_connector->hpd_work);
 	}
 }
 
@@ -350,6 +410,7 @@
 
 	hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
 
+	hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
 	drm_mode_connector_update_edid_property(connector, edid);
 
 	if (edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 0000000..1dc9c34
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1437 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hdmi.h"
+#include <linux/qcom_scm.h>
+
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+#define HDCP_PORT_ADDR 0x74
+
+#define HDCP_INT_STATUS_MASK ( \
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
+		HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
+
+#define AUTH_WORK_RETRIES_TIME 100
+#define AUTH_RETRIES_TIME 30
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  0x000000F8
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  0x000000FC
+#define HDCP_KSV_LSB                     0x000060D8
+#define HDCP_KSV_MSB                     0x000060DC
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum hdmi_hdcp_state {
+	HDCP_STATE_NO_AKSV,
+	HDCP_STATE_INACTIVE,
+	HDCP_STATE_AUTHENTICATING,
+	HDCP_STATE_AUTHENTICATED,
+	HDCP_STATE_AUTH_FAILED
+};
+
+struct hdmi_hdcp_reg_data {
+	u32 reg_id;
+	u32 off;
+	char *name;
+	u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+	struct hdmi *hdmi;
+	u32 auth_retries;
+	bool tz_hdcp;
+	enum hdmi_hdcp_state hdcp_state;
+	struct work_struct hdcp_auth_work;
+	struct work_struct hdcp_reauth_work;
+
+#define AUTH_ABORT_EV 1
+#define AUTH_RESULT_RDY_EV 2
+	unsigned long auth_event;
+	wait_queue_head_t auth_event_queue;
+
+	u32 ksv_fifo_w_index;
+	/*
+	 * store aksv from qfprom
+	 */
+	u32 aksv_lsb;
+	u32 aksv_msb;
+	bool aksv_valid;
+	u32 ds_type;
+	u32 bksv_lsb;
+	u32 bksv_msb;
+	u8 dev_count;
+	u8 depth;
+	u8 ksv_list[5 * 127];
+	bool max_cascade_exceeded;
+	bool max_dev_exceeded;
+};
+
+static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+	u8 *data, u16 data_len)
+{
+	int rc;
+	int retry = 5;
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+			.buf	= &offset,
+		}, {
+			.addr	= addr >> 1,
+			.flags	= I2C_M_RD,
+			.len	= data_len,
+			.buf	= data,
+		}
+	};
+
+	DBG("Start DDC read");
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+	retry--;
+	if (rc == 2)
+		rc = 0;
+	else if (retry > 0)
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC read %d", rc);
+
+	return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
+
+static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+	u8 *data, u16 data_len)
+{
+	int rc;
+	int retry = 10;
+	u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+	struct i2c_msg msgs[] = {
+		{
+			.addr	= addr >> 1,
+			.flags	= 0,
+			.len	= 1,
+		}
+	};
+
+	DBG("Start DDC write");
+	if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+		pr_err("%s: write size too big\n", __func__);
+		return -ERANGE;
+	}
+
+	buf[0] = offset;
+	memcpy(&buf[1], data, data_len);
+	msgs[0].buf = buf;
+	msgs[0].len = data_len + 1;
+retry:
+	rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+	retry--;
+	if (rc == 1)
+		rc = 0;
+	else if (retry > 0)
+		goto retry;
+	else
+		rc = -EIO;
+
+	DBG("End DDC write %d", rc);
+
+	return rc;
+}
+
+static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+	u32 *pdata, u32 count)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
+	u32 resp, phy_addr, idx = 0;
+	int i, ret = 0;
+
+	WARN_ON(!pdata || !preg || (count == 0));
+
+	if (hdcp_ctrl->tz_hdcp) {
+		phy_addr = (u32)hdmi->mmio_phy_addr;
+
+		while (count) {
+			memset(scm_buf, 0, sizeof(scm_buf));
+			for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
+				i++) {
+				scm_buf[i].addr = phy_addr + preg[idx];
+				scm_buf[i].val  = pdata[idx];
+				idx++;
+			}
+			ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
+
+			if (ret || resp) {
+				pr_err("%s: error: scm_call ret=%d resp=%u\n",
+					__func__, ret, resp);
+				ret = -EINVAL;
+				break;
+			}
+
+			count -= i;
+		}
+	} else {
+		for (i = 0; i < count; i++)
+			hdmi_write(hdmi, preg[i], pdata[i]);
+	}
+
+	return ret;
+}
+
+void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val, hdcp_int_status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
+	hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
+	if (!hdcp_int_status) {
+		spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+		return;
+	}
+	/* Clear Interrupts */
+	reg_val |= hdcp_int_status << 1;
+	/* Clear AUTH_FAIL_INFO as well */
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
+		reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	DBG("hdcp irq %x", hdcp_int_status);
+
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
+		pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
+		if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+			set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+			wake_up_all(&hdcp_ctrl->auth_event_queue);
+		}
+	}
+
+	if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+			__func__, reg_val);
+		if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
+			queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+		else if (HDCP_STATE_AUTHENTICATING ==
+				hdcp_ctrl->hdcp_state) {
+			set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+			wake_up_all(&hdcp_ctrl->auth_event_queue);
+		}
+	}
+}
+
+static int hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+{
+	int rc;
+
+	rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
+		!!test_bit(ev, &hdcp_ctrl->auth_event),
+		msecs_to_jiffies(ms));
+	if (rc) {
+		pr_info("%s: msleep is canceled by event %d\n",
+				__func__, ev);
+		clear_bit(ev, &hdcp_ctrl->auth_event);
+		return -ECANCELED;
+	}
+
+	return 0;
+}
+
+static int hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+	/* Fetch aksv from QFPROM, this info should be public. */
+	hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
+	hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
+
+	/* check there are 20 ones in AKSV */
+	if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
+			!= 20) {
+		pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+			__func__);
+		pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+			__func__, hdcp_ctrl->aksv_msb,
+			hdcp_ctrl->aksv_lsb);
+		return -EINVAL;
+	}
+	DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
+
+	return 0;
+}
+
+static int reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val, failure, nack0;
+	int rc = 0;
+
+	/* Check for any DDC transfer failures */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+	failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
+	nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
+	DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
+		reg_val, failure, nack0);
+
+	if (failure) {
+		/*
+		 * Indicates that the last HDCP HW DDC transfer failed.
+		 * This occurs when a transfer is attempted with HDCP DDC
+		 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+		 * matches HDCP_DDC_RETRY_CNT.
+		 * Failure occurred,  let's clear it.
+		 */
+		DBG("DDC failure detected");
+
+		/* First, Disable DDC */
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
+			HDMI_HDCP_DDC_CTRL_0_DISABLE);
+
+		/* ACK the Failure to Clear it */
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
+		reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
+
+		/* Check if the FAILURE got Cleared */
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+		if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
+			pr_info("%s: Unable to clear HDCP DDC Failure\n",
+				__func__);
+
+		/* Re-Enable HDCP DDC */
+		hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
+	}
+
+	if (nack0) {
+		DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
+			hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+		/* Reset HDMI DDC software status */
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		/* Reset HDMI DDC Controller */
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+		/* If previous msleep is aborted, skip this msleep */
+		if (!rc)
+			rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+		reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+		reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
+		hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+		DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
+			hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+	}
+
+	return rc;
+}
+
+static int hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 hdcp_ddc_status, ddc_hw_status;
+	u32 xfer_done, xfer_req, hw_done;
+	bool hw_not_ready;
+	u32 timeout_count;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+	if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
+		return 0;
+
+	/* Wait to be clean on DDC HW engine */
+	timeout_count = 100;
+	do {
+		hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+		ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+
+		xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
+		xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
+		hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
+		hw_not_ready = !xfer_done || xfer_req || !hw_done;
+
+		if (hw_not_ready)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_warn("%s: hw_ddc_clean failed\n", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static void hdmi_hdcp_reauth_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_reauth_work);
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	u32 reg_val;
+
+	DBG("HDCP REAUTH WORK");
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+	/* Disable HDCP interrupts */
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+	/* Wait to be clean on DDC HW engine */
+	if (hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+		pr_info("%s: reauth work aborted\n", __func__);
+		return;
+	}
+
+	/* Disable encryption and disable the HDCP block */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+	/* Enable HPD circuitry */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val |= HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Only retry defined times then abort current authenticating process
+	 */
+	if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
+		hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+		hdcp_ctrl->auth_retries = 0;
+		pr_info("%s: abort reauthentication!\n", __func__);
+
+		return;
+	}
+
+	DBG("Queue AUTH WORK");
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+static int hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	u32 reg_val;
+	unsigned long flags;
+	int rc;
+
+	if (!hdcp_ctrl->aksv_valid) {
+		rc = hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+		if (rc) {
+			pr_err("%s: ASKV validation failed\n", __func__);
+			hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
+			return -ENOTSUPP;
+		}
+		hdcp_ctrl->aksv_valid = true;
+	}
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	/* disable HDMI Encrypt */
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+	/* Enabling Software DDC */
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+	reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+	hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Write AKSV read from QFPROM to the HDCP registers.
+	 * This step is needed for HDCP authentication and must be
+	 * written before enabling HDCP.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
+	hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
+
+	/*
+	 * HDCP setup prior to enabling HDCP_CTRL.
+	 * Setup seed values for random number An.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+	hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+	/* Disable the RngCipher state */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
+	reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
+	hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
+	DBG("HDCP_DEBUG_CTRL=0x%08x",
+		hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
+
+	/*
+	 * Ensure that all register writes are completed before
+	 * enabling HDCP cipher
+	 */
+	wmb();
+
+	/*
+	 * Enable HDCP
+	 * This needs to be done as early as possible in order for the
+	 * hardware to make An available to read
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
+
+	/*
+	 * If we had stale values for the An ready bit, it should most
+	 * likely be cleared now after enabling HDCP cipher
+	 */
+	link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+	DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
+	if (!(link0_status &
+		(HDMI_HDCP_LINK0_STATUS_AN_0_READY |
+		HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
+		DBG("An not ready after enabling HDCP");
+
+	/* Clear any DDC failures from previous tries before enable HDCP*/
+	rc = reset_hdcp_ddc_failures(hdcp_ctrl);
+
+	return rc;
+}
+
+static void hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	DBG("hdcp auth failed, queue reauth work");
+	/* clear HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+}
+
+static void hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	/*
+	 * Disable software DDC before going into part3 to make sure
+	 * there is no Arbitration between software and hardware for DDC
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+	reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+	hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/* enable HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val |= HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+	hdcp_ctrl->auth_retries = 0;
+}
+
+/*
+ * hdcp authenticating part 1
+ * Wait Key/An ready
+ * Read BCAPS from sink
+ * Write BCAPS and AKSV into HDCP engine
+ * Write An and AKSV to sink
+ * Read BKSV from sink and write into HDCP engine
+ */
+static int hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status, keys_state;
+	u32 timeout_count;
+	bool an_ready;
+
+	/* Wait for HDCP keys to be checked and validated */
+	timeout_count = 100;
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		keys_state = (link0_status >> 28) & 0x7;
+		if (keys_state == HDCP_KEYS_STATE_VALID)
+			break;
+
+		DBG("Keys not ready(%d). s=%d, l0=%0x08x",
+			timeout_count, keys_state, link0_status);
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait key state timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	timeout_count = 100;
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
+			&& (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
+		if (an_ready)
+			break;
+
+		DBG("An not ready(%d). l0_status=0x%08x",
+			timeout_count, link0_status);
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait An timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static int hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_aksv_0, link0_aksv_1;
+	u32 link0_an[2];
+	u8 aksv[5];
+
+	/* Read An0 and An1 */
+	link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
+	link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
+
+	/* Read AKSV */
+	link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
+	link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
+
+	DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
+	/* Copy An and AKSV to byte arrays for transmission */
+	aksv[0] =  link0_aksv_0        & 0xFF;
+	aksv[1] = (link0_aksv_0 >> 8)  & 0xFF;
+	aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+	aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+	aksv[4] =  link0_aksv_1        & 0xFF;
+
+	/* Write An to offset 0x18 */
+	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+		(u16)sizeof(link0_an));
+	if (rc) {
+		pr_err("%s:An write failed\n", __func__);
+		return rc;
+	}
+	DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
+
+	/* Write AKSV to offset 0x10 */
+	rc = hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+	if (rc) {
+		pr_err("%s:AKSV write failed\n", __func__);
+		return rc;
+	}
+	DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
+
+	return 0;
+}
+
+static int hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u8 bksv[5];
+	u32 reg[2], data[2];
+
+	/* Read BKSV at offset 0x00 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+	if (rc) {
+		pr_err("%s:BKSV read failed\n", __func__);
+		return rc;
+	}
+
+	hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
+		(bksv[2] << 16) | (bksv[3] << 24);
+	hdcp_ctrl->bksv_msb = bksv[4];
+	DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
+
+	/* check there are 20 ones in BKSV */
+	if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
+			!= 20) {
+		pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
+		pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+			bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+		return -EINVAL;
+	}
+
+	/* Write BKSV read from sink to HDCP registers */
+	reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
+	data[0] = hdcp_ctrl->bksv_lsb;
+	reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
+	data[1] = hdcp_ctrl->bksv_msb;
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+	return rc;
+}
+
+static int hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg, data;
+	u8 bcaps;
+
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+	if (rc) {
+		pr_err("%s:BCAPS read failed\n", __func__);
+		return rc;
+	}
+	DBG("BCAPS=%02x", bcaps);
+
+	/* receiver (0), repeater (1) */
+	hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
+
+	/* Write BCAPS to the hardware */
+	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+	data = (u32)bcaps;
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	int rc;
+
+	/* Wait for AKSV key and An ready */
+	rc = hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: wait key and an ready failed\n", __func__);
+		return rc;
+	};
+
+	/* Read BCAPS and send to HDCP engine */
+	rc = hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: read bcaps error, abort\n", __func__);
+		return rc;
+	}
+
+	/*
+	 * 1.1_Features turned off by default.
+	 * No need to write AInfo since 1.1_Features is disabled.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
+
+	/* Send AKSV and An to sink */
+	rc = hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s:An/Aksv write failed\n", __func__);
+		return rc;
+	}
+
+	/* Read BKSV and send to HDCP engine*/
+	rc = hdmi_hdcp_recv_bksv(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s:BKSV Process failed\n", __func__);
+		return rc;
+	}
+
+	/* Enable HDCP interrupts and ack/clear any stale interrupts */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
+		HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
+		HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	return 0;
+}
+
+/* read R0' from sink and pass it to HDCP engine */
+static int hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	int rc = 0;
+	u8 buf[2];
+
+	/*
+	 * HDCP Compliance Test case 1A-01:
+	 * Wait here at least 100ms before reading R0'
+	 */
+	rc = hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+	if (rc)
+		return rc;
+
+	/* Read R0' at offset 0x08 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+	if (rc) {
+		pr_err("%s:R0' read failed\n", __func__);
+		return rc;
+	}
+	DBG("R0'=%02x%02x", buf[1], buf[0]);
+
+	/* Write R0' to HDCP registers and check to see if it is a match */
+	hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
+		(((u32)buf[1]) << 8) | buf[0]);
+
+	return 0;
+}
+
+/* Wait for authenticating result: R0/R0' are matched or not */
+static int hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	int rc;
+
+	/* wait for hdcp irq, 10 sec should be long enough */
+	rc = hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+	if (!rc) {
+		pr_err("%s: Wait Auth IRQ timeout\n", __func__);
+		return -ETIMEDOUT;
+	}
+
+	link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+	if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
+		pr_err("%s: Authentication Part I failed\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Enable HDCP Encryption */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
+		HDMI_HDCP_CTRL_ENABLE |
+		HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
+
+	return 0;
+}
+
+static int hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+	u16 *pbstatus)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	bool max_devs_exceeded = false, max_cascade_exceeded = false;
+	u32 repeater_cascade_depth = 0, down_stream_devices = 0;
+	u16 bstatus;
+	u8 buf[2];
+
+	/* Read BSTATUS at offset 0x41 */
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+	if (rc) {
+		pr_err("%s: BSTATUS read failed\n", __func__);
+		goto error;
+	}
+	*pbstatus = bstatus = (buf[1] << 8) | buf[0];
+
+
+	down_stream_devices = bstatus & 0x7F;
+	repeater_cascade_depth = (bstatus >> 8) & 0x7;
+	max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
+	max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
+
+	if (down_stream_devices == 0) {
+		/*
+		 * If no downstream devices are attached to the repeater
+		 * then part II fails.
+		 * todo: The other approach would be to continue PART II.
+		 */
+		pr_err("%s: No downstream devices\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-05:
+	 * Check if no. of devices connected to repeater
+	 * exceed max_devices_connected from bit 7 of Bstatus.
+	 */
+	if (max_devs_exceeded) {
+		pr_err("%s: no. of devs connected exceeds max allowed",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-06:
+	 * Check if no. of cascade connected to repeater
+	 * exceed max_cascade_connected from bit 11 of Bstatus.
+	 */
+	if (max_cascade_exceeded) {
+		pr_err("%s: no. of cascade conn exceeds max allowed",
+			__func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+error:
+	hdcp_ctrl->dev_count = down_stream_devices;
+	hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
+	hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
+	hdcp_ctrl->depth = repeater_cascade_depth;
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg, data;
+	u32 timeout_count;
+	u16 bstatus;
+	u8 bcaps;
+
+	/*
+	 * Wait until READY bit is set in BCAPS, as per HDCP specifications
+	 * maximum permitted time to check for READY bit is five seconds.
+	 */
+	timeout_count = 100;
+	do {
+		/* Read BCAPS at offset 0x40 */
+		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+		if (rc) {
+			pr_err("%s: BCAPS read failed\n", __func__);
+			return rc;
+		}
+
+		if (bcaps & BIT(5))
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Wait KSV fifo ready timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	rc = hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+	if (rc) {
+		pr_err("%s: bstatus error\n", __func__);
+		return rc;
+	}
+
+	/* Write BSTATUS and BCAPS to HDCP registers */
+	reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+	data = bcaps | (bstatus << 8);
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+	if (rc) {
+		pr_err("%s: BSTATUS write failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * hdcp authenticating part 2: 2nd
+ * read ksv fifo from sink
+ * transfer V' from sink to HDCP engine
+ * reset SHA engine
+ */
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	int rc = 0;
+	struct hdmi_hdcp_reg_data reg_data[]  = {
+		{REG_HDMI_HDCP_RCVPORT_DATA7,  0x20, "V' H0"},
+		{REG_HDMI_HDCP_RCVPORT_DATA8,  0x24, "V' H1"},
+		{REG_HDMI_HDCP_RCVPORT_DATA9,  0x28, "V' H2"},
+		{REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+		{REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+	};
+	struct hdmi_hdcp_reg_data *rd;
+	u32 size = ARRAY_SIZE(reg_data);
+	u32 reg[ARRAY_SIZE(reg_data)];
+	u32 data[ARRAY_SIZE(reg_data)];
+	int i;
+
+	for (i = 0; i < size; i++) {
+		rd = &reg_data[i];
+		rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+			rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
+		if (rc) {
+			pr_err("%s: Read %s failed\n", __func__, rd->name);
+			goto error;
+		}
+
+		DBG("%s =%x", rd->name, data[i]);
+		reg[i] = reg_data[i].reg_id;
+	}
+
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+
+error:
+	return rc;
+}
+
+static int hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 ksv_bytes;
+
+	ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+	rc = hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+		hdcp_ctrl->ksv_list, ksv_bytes);
+	if (rc)
+		pr_err("%s: KSV FIFO read failed\n", __func__);
+
+	return rc;
+}
+
+static int hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	u32 reg[2], data[2];
+	u32 rc  = 0;
+
+	reg[0] = REG_HDMI_HDCP_SHA_CTRL;
+	data[0] = HDCP_REG_ENABLE;
+	reg[1] = REG_HDMI_HDCP_SHA_CTRL;
+	data[1] = HDCP_REG_DISABLE;
+
+	rc = hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+	return rc;
+}
+
+static int hdmi_hdcp_auth_part2_recv_ksv_fifo(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 timeout_count;
+
+	/*
+	 * Read KSV FIFO over DDC
+	 * Key Selection vector FIFO Used to pull downstream KSVs
+	 * from HDCP Repeaters.
+	 * All bytes (DEVICE_COUNT * 5) must be read in a single,
+	 * auto incrementing access.
+	 * All bytes read as 0x00 for HDCP Receivers that are not
+	 * HDCP Repeaters (REPEATER == 0).
+	 */
+	timeout_count = 100;
+	do {
+		rc = hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+		if (!rc)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Recv ksv fifo timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: transfer V failed\n", __func__);
+		return rc;
+	}
+
+	/* reset SHA engine before write ksv fifo */
+	rc = hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: fail to reset sha engine\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+/*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ * If the last byte is written, we need to poll for
+ * HDCP_SHA_COMP_DONE to wait until HW finish
+ */
+static int hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int i;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 ksv_bytes, last_byte = 0;
+	u8 *ksv_fifo = NULL;
+	u32 reg_val, data, reg;
+	u32 rc  = 0;
+
+	ksv_bytes  = 5 * hdcp_ctrl->dev_count;
+
+	/* Check if need to wait for HW completion */
+	if (hdcp_ctrl->ksv_fifo_w_index) {
+		reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
+		DBG("HDCP_SHA_STATUS=%08x", reg_val);
+		if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
+			/* check COMP_DONE if last write */
+			if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
+				DBG("COMP_DONE");
+				return 0;
+			} else {
+				return -EAGAIN;
+			}
+		} else {
+			/* check BLOCK_DONE if not last write */
+			if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
+				return -EAGAIN;
+
+			DBG("BLOCK_DONE");
+		}
+	}
+
+	ksv_bytes  -= hdcp_ctrl->ksv_fifo_w_index;
+	if (ksv_bytes <= 64)
+		last_byte = 1;
+	else
+		ksv_bytes = 64;
+
+	ksv_fifo = hdcp_ctrl->ksv_list;
+	ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
+
+	for (i = 0; i < ksv_bytes; i++) {
+		/* Write KSV byte and set DONE bit[0] for last byte*/
+		reg_val = ksv_fifo[i] << 16;
+		if ((i == (ksv_bytes - 1)) && last_byte)
+			reg_val |= HDMI_HDCP_SHA_DATA_DONE;
+
+		reg = REG_HDMI_HDCP_SHA_DATA;
+		data = reg_val;
+		rc = hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+		if (rc)
+			return rc;
+	}
+
+	hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
+
+	/*
+	 *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
+	 */
+	return -EAGAIN;
+}
+
+/* write ksv fifo into HDCP engine */
+static int hdmi_hdcp_auth_part2_write_ksv_fifo(
+	struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 timeout_count;
+
+	hdcp_ctrl->ksv_fifo_w_index = 0;
+	timeout_count = 100;
+	do {
+		rc = hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+		if (!rc)
+			break;
+
+		if (rc != -EAGAIN)
+			return rc;
+
+		timeout_count--;
+		if (!timeout_count) {
+			pr_err("%s: Write KSV fifo timedout", __func__);
+			return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static int hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc = 0;
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 link0_status;
+	u32 timeout_count = 100;
+
+	do {
+		link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+		if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
+			break;
+
+		timeout_count--;
+		if (!timeout_count) {
+				pr_err("%s: HDCP V Match timedout", __func__);
+				return -ETIMEDOUT;
+		}
+
+		rc = hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+		if (rc)
+			return rc;
+	} while (1);
+
+	return 0;
+}
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_auth_work);
+	int rc;
+
+	rc = hdmi_hdcp_auth_prepare(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: auth prepare failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	/* HDCP PartI */
+	rc = hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: key exchange failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: receive r0 failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: verify r0 failed %d\n", __func__, rc);
+		goto end;
+	}
+	pr_info("%s: Authentication Part I successful\n", __func__);
+	if (hdcp_ctrl->ds_type == DS_RECEIVER)
+		goto end;
+
+	/* HDCP PartII */
+	rc = hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+	if (rc) {
+		pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
+		goto end;
+	}
+
+	rc = hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+	if (rc)
+		pr_err("%s: check v match failed %d\n", __func__, rc);
+
+end:
+	if (rc == -ECANCELED) {
+		pr_info("%s: hdcp authentication canceled\n", __func__);
+	} else if (rc == -ENOTSUPP) {
+		pr_info("%s: hdcp is not supported\n", __func__);
+	} else if (rc) {
+		pr_err("%s: hdcp authentication failed\n", __func__);
+		hdmi_hdcp_auth_fail(hdcp_ctrl);
+	} else {
+		hdmi_hdcp_auth_done(hdcp_ctrl);
+	}
+}
+
+void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	u32 reg_val;
+	unsigned long flags;
+
+	if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
+		(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+		DBG("still active or activating or no askv. returning");
+		return;
+	}
+
+	/* clear HDMI Encrypt */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->auth_event = 0;
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	hdcp_ctrl->auth_retries = 0;
+	queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	struct hdmi *hdmi = hdcp_ctrl->hdmi;
+	unsigned long flags;
+	u32 reg_val;
+
+	if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
+		(HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+		DBG("hdcp inactive or no aksv. returning");
+		return;
+	}
+
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+	/*
+	 * Disable HDCP interrupts.
+	 * Also, need to set the state to inactive here so that any ongoing
+	 * reauth works will know that the HDCP session has been turned off.
+	 */
+	hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	/*
+	 * Cancel any pending auth/reauth attempts.
+	 * If one is ongoing, this will wait for it to finish.
+	 * No more reauthentication attempts will be scheduled since we
+	 * set the current state to inactive.
+	 */
+	set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
+	wake_up_all(&hdcp_ctrl->auth_event_queue);
+	cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
+	cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
+
+	hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+		HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+	/* Disable encryption and disable the HDCP block */
+	hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+	spin_lock_irqsave(&hdmi->reg_lock, flags);
+	reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+	reg_val &= ~HDMI_CTRL_ENCRYPTED;
+	hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+	/* Enable HPD circuitry */
+	reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+	reg_val |= HDMI_HPD_CTRL_ENABLE;
+	hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+	spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+
+	DBG("HDCP: Off");
+}
+
+struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+	if (!hdmi->qfprom_mmio) {
+		pr_err("%s: HDCP is not supported without qfprom\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+	if (!hdcp_ctrl)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, hdmi_hdcp_reauth_work);
+	init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
+	hdcp_ctrl->hdmi = hdmi;
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	hdcp_ctrl->aksv_valid = false;
+
+	if (qcom_scm_hdcp_available())
+		hdcp_ctrl->tz_hdcp = true;
+	else
+		hdcp_ctrl->tz_hdcp = false;
+
+	return hdcp_ctrl;
+}
+
+void hdmi_hdcp_destroy(struct hdmi *hdmi)
+{
+	if (hdmi && hdmi->hdcp_ctrl) {
+		kfree(hdmi->hdcp_ctrl);
+		hdmi->hdcp_ctrl = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index 6997ec6..3a01cb5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -426,57 +426,6 @@
 	kfree(phy_8960);
 }
 
-static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
-	struct hdmi *hdmi = phy_8960->hdmi;
-	unsigned int val;
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-}
-
 static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -511,7 +460,6 @@
 
 static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
 		.destroy = hdmi_phy_8960_destroy,
-		.reset = hdmi_phy_8960_reset,
 		.powerup = hdmi_phy_8960_powerup,
 		.powerdown = hdmi_phy_8960_powerdown,
 };
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
index 391433c..cb01421 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -29,37 +29,6 @@
 	kfree(phy_8x60);
 }
 
-static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
-	struct hdmi *hdmi = phy_8x60->hdmi;
-	unsigned int val;
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-}
-
 static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -182,7 +151,6 @@
 
 static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
 		.destroy = hdmi_phy_8x60_destroy,
-		.reset = hdmi_phy_8x60_reset,
 		.powerup = hdmi_phy_8x60_powerup,
 		.powerdown = hdmi_phy_8x60_powerdown,
 };
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index 59fa6cd..56ab891 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -19,7 +19,6 @@
 
 struct hdmi_phy_8x74 {
 	struct hdmi_phy base;
-	struct hdmi *hdmi;
 	void __iomem *mmio;
 };
 #define to_hdmi_phy_8x74(x) container_of(x, struct hdmi_phy_8x74, base)
@@ -41,59 +40,6 @@
 	kfree(phy_8x74);
 }
 
-static void hdmi_phy_8x74_reset(struct hdmi_phy *phy)
-{
-	struct hdmi_phy_8x74 *phy_8x74 = to_hdmi_phy_8x74(phy);
-	struct hdmi *hdmi = phy_8x74->hdmi;
-	unsigned int val;
-
-	/* NOTE that HDMI_PHY_CTL is in core mmio, not phy mmio: */
-
-	val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-
-	msleep(100);
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET);
-	}
-
-	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
-		/* pull high */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val | HDMI_PHY_CTRL_SW_RESET_PLL);
-	} else {
-		/* pull low */
-		hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
-				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
-	}
-}
-
 static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
 		unsigned long int pixclock)
 {
@@ -117,7 +63,6 @@
 
 static const struct hdmi_phy_funcs hdmi_phy_8x74_funcs = {
 		.destroy = hdmi_phy_8x74_destroy,
-		.reset = hdmi_phy_8x74_reset,
 		.powerup = hdmi_phy_8x74_powerup,
 		.powerdown = hdmi_phy_8x74_powerdown,
 };
@@ -138,8 +83,6 @@
 
 	phy->funcs = &hdmi_phy_8x74_funcs;
 
-	phy_8x74->hdmi = hdmi;
-
 	/* for 8x74, the phy mmio is mapped separately: */
 	phy_8x74->mmio = msm_ioremap(hdmi->pdev,
 			"phy_physical", "HDMI_8x74");
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 978c3f7..2aa23b9 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 
 Permission is hereby granted, free of charge, to any person obtaining
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 153fc48..74b8673 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c4bb9d9..6ac9aa1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -334,13 +334,15 @@
 	return 0;
 }
 
-static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	DBG("%s: begin", mdp4_crtc->name);
 }
 
-static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -680,7 +682,5 @@
 	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
 	plane->crtc = crtc;
 
-	mdp4_plane_install_properties(plane, &crtc->base);
-
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index 7369ee7..5ed38cf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -19,8 +19,11 @@
 #include "msm_drv.h"
 #include "mdp4_kms.h"
 
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask)
 {
+	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+		irqmask ^ (irqmask & old_irqmask));
 	mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
 }
 
@@ -68,9 +71,10 @@
 	struct drm_device *dev = mdp4_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	unsigned int id;
-	uint32_t status;
+	uint32_t status, enable;
 
-	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+	enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
 	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
 
 	VERB("status=%08x", status);
@@ -86,13 +90,22 @@
 
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+	mdp4_enable(mdp4_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp4_crtc_vblank(crtc), true);
+	mdp4_disable(mdp4_kms);
+
 	return 0;
 }
 
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+	mdp4_enable(mdp4_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp4_crtc_vblank(crtc), false);
+	mdp4_disable(mdp4_kms);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 531e4ac..077f752 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -241,22 +241,37 @@
 }
 
 #ifdef CONFIG_OF
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
 {
-	struct device_node *n;
+	struct device_node *endpoint, *panel_node;
+	struct device_node *np = dev->dev->of_node;
 	struct drm_panel *panel = NULL;
 
-	n = of_parse_phandle(dev->dev->of_node, name, 0);
-	if (n) {
-		panel = of_drm_find_panel(n);
-		if (!panel)
-			panel = ERR_PTR(-EPROBE_DEFER);
+	endpoint = of_graph_get_next_endpoint(np, NULL);
+	if (!endpoint) {
+		dev_err(dev->dev, "no valid endpoint\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	panel_node = of_graph_get_remote_port_parent(endpoint);
+	if (!panel_node) {
+		dev_err(dev->dev, "no valid panel node\n");
+		of_node_put(endpoint);
+		return ERR_PTR(-ENODEV);
+	}
+
+	of_node_put(endpoint);
+
+	panel = of_drm_find_panel(panel_node);
+	if (!panel) {
+		of_node_put(panel_node);
+		return ERR_PTR(-EPROBE_DEFER);
 	}
 
 	return panel;
 }
 #else
-static struct drm_panel *detect_panel(struct drm_device *dev, const char *name)
+static struct drm_panel *detect_panel(struct drm_device *dev)
 {
 	// ??? maybe use a module param to specify which panel is attached?
 }
@@ -294,7 +309,7 @@
 	 * Setup the LCDC/LVDS path: RGB2 -> DMA_P -> LCDC -> LVDS:
 	 */
 
-	panel = detect_panel(dev, "qcom,lvds-panel");
+	panel = detect_panel(dev);
 	if (IS_ERR(panel)) {
 		ret = PTR_ERR(panel);
 		dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
@@ -527,6 +542,11 @@
 		goto fail;
 	}
 
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+
 	return kms;
 
 fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c1ecb9d..8a7f6e1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -167,7 +167,8 @@
 int mdp4_disable(struct mdp4_kms *mdp4_kms);
 int mdp4_enable(struct mdp4_kms *mdp4_kms);
 
-void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 void mdp4_irq_preinstall(struct msm_kms *kms);
 int mdp4_irq_postinstall(struct msm_kms *kms);
 void mdp4_irq_uninstall(struct msm_kms *kms);
@@ -175,29 +176,24 @@
 int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
 
-static inline bool pipe_supports_yuv(enum mdp4_pipe pipe)
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
 {
 	switch (pipe) {
 	case VG1:
 	case VG2:
 	case VG3:
 	case VG4:
-		return true;
+		return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+	case RGB1:
+	case RGB2:
+	case RGB3:
+		return MDP_PIPE_CAP_SCALE;
 	default:
-		return false;
+		return 0;
 	}
 }
 
-static inline
-uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
-		uint32_t max_formats)
-{
-	return mdp_get_formats(pixel_formats, max_formats,
-				!pipe_supports_yuv(pipe_id));
-}
-
-void mdp4_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj);
 enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp4_plane_init(struct drm_device *dev,
 		enum mdp4_pipe pipe_id, bool private_plane);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index c048433..4cd6e72 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -346,8 +346,10 @@
 
 	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
 
-	if (panel)
+	if (panel) {
 		drm_panel_disable(panel);
+		drm_panel_unprepare(panel);
+	}
 
 	/*
 	 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -412,8 +414,10 @@
 	if (ret)
 		dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
 
-	if (panel)
+	if (panel) {
+		drm_panel_prepare(panel);
 		drm_panel_enable(panel);
+	}
 
 	setup_phy(encoder);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 247a424..e9dee36 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -26,6 +26,7 @@
 
 	enum mdp4_pipe pipe;
 
+	uint32_t caps;
 	uint32_t nformats;
 	uint32_t formats[32];
 
@@ -74,7 +75,7 @@
 }
 
 /* helper to install properties which are common to planes and crtcs */
-void mdp4_plane_install_properties(struct drm_plane *plane,
+static void mdp4_plane_install_properties(struct drm_plane *plane,
 		struct drm_mode_object *obj)
 {
 	// XXX
@@ -382,9 +383,11 @@
 
 	mdp4_plane->pipe = pipe_id;
 	mdp4_plane->name = pipe_names[pipe_id];
+	mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
 
-	mdp4_plane->nformats = mdp4_get_formats(pipe_id, mdp4_plane->formats,
-			ARRAY_SIZE(mdp4_plane->formats));
+	mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+			ARRAY_SIZE(mdp4_plane->formats),
+			!pipe_supports_yuv(mdp4_plane->caps));
 
 	type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
 	ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 50e1752..3469f50d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -381,49 +381,49 @@
 static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
 #define MDP5_CTL_LAYER_REG_VIG0__MASK				0x00000007
 #define MDP5_CTL_LAYER_REG_VIG0__SHIFT				0
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_VIG1__MASK				0x00000038
 #define MDP5_CTL_LAYER_REG_VIG1__SHIFT				3
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
 }
 #define MDP5_CTL_LAYER_REG_VIG2__MASK				0x000001c0
 #define MDP5_CTL_LAYER_REG_VIG2__SHIFT				6
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB0__MASK				0x00000e00
 #define MDP5_CTL_LAYER_REG_RGB0__SHIFT				9
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB1__MASK				0x00007000
 #define MDP5_CTL_LAYER_REG_RGB1__SHIFT				12
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB2__MASK				0x00038000
 #define MDP5_CTL_LAYER_REG_RGB2__SHIFT				15
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
 }
 #define MDP5_CTL_LAYER_REG_DMA0__MASK				0x001c0000
 #define MDP5_CTL_LAYER_REG_DMA0__SHIFT				18
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
 }
 #define MDP5_CTL_LAYER_REG_DMA1__MASK				0x00e00000
 #define MDP5_CTL_LAYER_REG_DMA1__SHIFT				21
-static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
 }
@@ -431,13 +431,13 @@
 #define MDP5_CTL_LAYER_REG_CURSOR_OUT				0x02000000
 #define MDP5_CTL_LAYER_REG_VIG3__MASK				0x1c000000
 #define MDP5_CTL_LAYER_REG_VIG3__SHIFT				26
-static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
 }
 #define MDP5_CTL_LAYER_REG_RGB3__MASK				0xe0000000
 #define MDP5_CTL_LAYER_REG_RGB3__SHIFT				29
-static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
 {
 	return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
 }
@@ -499,6 +499,44 @@
 
 static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
 
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return 0x00000040;
+		case 1: return 0x00000044;
+		case 2: return 0x00000048;
+		case 3: return 0x0000004c;
+		case 4: return 0x00000050;
+		case 5: return 0x00000054;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3			0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3			0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3			0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3			0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3			0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3			0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3			0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3			0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3			0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3			0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK			0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT			20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK			0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT			26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+	return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
 static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
 {
 	switch (idx) {
@@ -803,11 +841,11 @@
 }
 #define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT			0x00020000
 #define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB			0x00040000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK			0x00180000
-#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT			19
-static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val)
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK			0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT			19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
 {
-	return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
+	return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
 }
 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK			0x01800000
 #define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT			23
@@ -897,41 +935,41 @@
 static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
 #define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN			0x00000001
 #define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN			0x00000002
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK		0x00000300
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT		8
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK	0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT	8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK		0x00000c00
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT		10
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK	0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT	10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK		0x00003000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT		12
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK	0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT	12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK		0x0000c000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT		14
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK	0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT	14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK		0x00030000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT		16
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK	0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT	16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
 }
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK		0x000c0000
-#define MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT		18
-static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_filter val)
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK	0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT	18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
 {
-	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
+	return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
 }
 
 static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
@@ -984,9 +1022,22 @@
 
 static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
 
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+	switch (idx) {
+		case 0: return 0x00000020;
+		case 1: return 0x00000050;
+		case 2: return 0x00000080;
+		case 3: return 0x000000b0;
+		case 4: return 0x00000230;
+		case 5: return 0x00000260;
+		case 6: return 0x00000290;
+		default: return INVALID_IDX(idx);
+	}
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK			0x00000003
 #define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT			0
 static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -1008,25 +1059,25 @@
 #define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA			0x00001000
 #define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN			0x00002000
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
 
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
 
 static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
 #define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK			0x0000ffff
@@ -1260,6 +1311,13 @@
 static inline uint32_t __offset_WB(uint32_t idx)
 {
 	switch (idx) {
+#if 0  /* TEMPORARY until patch that adds wb.base[] is merged */
+		case 0: return (mdp5_cfg->wb.base[0]);
+		case 1: return (mdp5_cfg->wb.base[1]);
+		case 2: return (mdp5_cfg->wb.base[2]);
+		case 3: return (mdp5_cfg->wb.base[3]);
+		case 4: return (mdp5_cfg->wb.base[4]);
+#endif
 		default: return INVALID_IDX(idx);
 	}
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b9a793..a1e26f2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -22,7 +22,76 @@
 /* mdp5_cfg must be exposed (used in mdp5.xml.h) */
 const struct mdp5_cfg_hw *mdp5_cfg = NULL;
 
-const struct mdp5_cfg_hw msm8x74_config = {
+const struct mdp5_cfg_hw msm8x74v1_config = {
+	.name = "msm8x74v1",
+	.mdp = {
+		.count = 1,
+		.base = { 0x00100 },
+	},
+	.smp = {
+		.mmb_count = 22,
+		.mmb_size = 4096,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4, [SSPP_VIG2] =  7,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+		.flush_hw_mask = 0x0003ffff,
+	},
+	.pipe_vig = {
+		.count = 3,
+		.base = { 0x01200, 0x01600, 0x01a00 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			MDP_PIPE_CAP_SCALE |
+			MDP_PIPE_CAP_CSC   |
+			0,
+	},
+	.pipe_rgb = {
+		.count = 3,
+		.base = { 0x01e00, 0x02200, 0x02600 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			MDP_PIPE_CAP_SCALE |
+			0,
+	},
+	.pipe_dma = {
+		.count = 2,
+		.base = { 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP |
+			MDP_PIPE_CAP_VFLIP |
+			0,
+	},
+	.lm = {
+		.count = 5,
+		.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+		.nb_stages = 5,
+	},
+	.dspp = {
+		.count = 3,
+		.base = { 0x04600, 0x04a00, 0x04e00 },
+	},
+	.pp = {
+		.count = 3,
+		.base = { 0x21b00, 0x21c00, 0x21d00 },
+	},
+	.intf = {
+		.base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+		.connect = {
+			[0] = INTF_eDP,
+			[1] = INTF_DSI,
+			[2] = INTF_DSI,
+			[3] = INTF_HDMI,
+		},
+	},
+	.max_clk = 200000000,
+};
+
+const struct mdp5_cfg_hw msm8x74v2_config = {
 	.name = "msm8x74",
 	.mdp = {
 		.count = 1,
@@ -45,19 +114,27 @@
 	.pipe_vig = {
 		.count = 3,
 		.base = { 0x01200, 0x01600, 0x01a00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 3,
 		.base = { 0x01e00, 0x02200, 0x02600 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 2,
 		.base = { 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 5,
 		.base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 3,
@@ -65,7 +142,7 @@
 	},
 	.ad = {
 		.count = 2,
-		.base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+		.base = { 0x13100, 0x13300 },
 	},
 	.pp = {
 		.count = 3,
@@ -113,19 +190,27 @@
 	.pipe_vig = {
 		.count = 4,
 		.base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 4,
 		.base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 2,
 		.base = { 0x03200, 0x03600 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 6,
 		.base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 4,
@@ -174,19 +259,27 @@
 	.pipe_vig = {
 		.count = 1,
 		.base = { 0x05000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_rgb = {
 		.count = 2,
 		.base = { 0x15000, 0x17000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
 	},
 	.pipe_dma = {
 		.count = 1,
 		.base = { 0x25000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
 	},
 	.lm = {
 		.count = 2, /* LM0 and LM3 */
 		.base = { 0x45000, 0x48000 },
 		.nb_stages = 5,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
 	},
 	.dspp = {
 		.count = 1,
@@ -203,13 +296,90 @@
 	.max_clk = 320000000,
 };
 
-static const struct mdp5_cfg_handler cfg_handlers[] = {
-	{ .revision = 0, .config = { .hw = &msm8x74_config } },
-	{ .revision = 2, .config = { .hw = &msm8x74_config } },
-	{ .revision = 3, .config = { .hw = &apq8084_config } },
-	{ .revision = 6, .config = { .hw = &msm8x16_config } },
+const struct mdp5_cfg_hw msm8x94_config = {
+	.name = "msm8x94",
+	.mdp = {
+		.count = 1,
+		.base = { 0x01000 },
+	},
+	.smp = {
+		.mmb_count = 44,
+		.mmb_size = 8192,
+		.clients = {
+			[SSPP_VIG0] =  1, [SSPP_VIG1] =  4,
+			[SSPP_VIG2] =  7, [SSPP_VIG3] = 19,
+			[SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+			[SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+			[SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+		},
+		.reserved_state[0] = GENMASK(23, 0),	/* first 24 MMBs */
+		.reserved = {
+			 [1] = 1,  [4] = 1,  [7] = 1, [19] = 1,
+			[16] = 5, [17] = 5, [18] = 5, [22] = 5,
+		},
+	},
+	.ctl = {
+		.count = 5,
+		.base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+		.flush_hw_mask = 0xf0ffffff,
+	},
+	.pipe_vig = {
+		.count = 4,
+		.base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+				MDP_PIPE_CAP_DECIMATION,
+	},
+	.pipe_rgb = {
+		.count = 4,
+		.base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+				MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+	},
+	.pipe_dma = {
+		.count = 2,
+		.base = { 0x25000, 0x27000 },
+		.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+	},
+	.lm = {
+		.count = 6,
+		.base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+		.nb_stages = 8,
+		.max_width = 2048,
+		.max_height = 0xFFFF,
+	},
+	.dspp = {
+		.count = 4,
+		.base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+
+	},
+	.ad = {
+		.count = 3,
+		.base = { 0x79000, 0x79800, 0x7a000 },
+	},
+	.pp = {
+		.count = 4,
+		.base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+	},
+	.intf = {
+		.base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+		.connect = {
+			[0] = INTF_DISABLED,
+			[1] = INTF_DSI,
+			[2] = INTF_DSI,
+			[3] = INTF_HDMI,
+		},
+	},
+	.max_clk = 320000000,
 };
 
+static const struct mdp5_cfg_handler cfg_handlers[] = {
+	{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
+	{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
+	{ .revision = 3, .config = { .hw = &apq8084_config } },
+	{ .revision = 6, .config = { .hw = &msm8x16_config } },
+	{ .revision = 9, .config = { .hw = &msm8x94_config } },
+};
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 69349ab..efb918d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -42,6 +42,13 @@
 struct mdp5_lm_block {
 	MDP5_SUB_BLOCK_DEFINITION;
 	uint32_t nb_stages;		/* number of stages per blender */
+	uint32_t max_width;		/* Maximum output resolution */
+	uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+	MDP5_SUB_BLOCK_DEFINITION;
+	uint32_t caps;			/* pipe capabilities */
 };
 
 struct mdp5_ctl_block {
@@ -70,9 +77,9 @@
 	struct mdp5_sub_block mdp;
 	struct mdp5_smp_block smp;
 	struct mdp5_ctl_block ctl;
-	struct mdp5_sub_block pipe_vig;
-	struct mdp5_sub_block pipe_rgb;
-	struct mdp5_sub_block pipe_dma;
+	struct mdp5_pipe_block pipe_vig;
+	struct mdp5_pipe_block pipe_rgb;
+	struct mdp5_pipe_block pipe_dma;
 	struct mdp5_lm_block  lm;
 	struct mdp5_sub_block dspp;
 	struct mdp5_sub_block ad;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16..8e6c9b5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@
 	struct mdp5_interface intf;
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
 
@@ -210,13 +212,14 @@
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 	pingpong_tearcheck_setup(encoder, mode);
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
+				mdp5_cmd_enc->ctl);
 }
 
 static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@
 static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@
 
 /* initialize command mode encoder */
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@
 
 	memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
 	encoder = &mdp5_cmd_enc->base;
+	mdp5_cmd_enc->ctl = ctl;
 
 	drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
 			DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index dea3d2e..7f9f4ac 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@
 
 	if (mdp5_crtc->ctl && !crtc->state->enable) {
 		/* set STAGE_UNUSED for all layers */
-		mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
-		mdp5_ctl_release(mdp5_crtc->ctl);
+		mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
 		mdp5_crtc->ctl = NULL;
 	}
 }
@@ -196,13 +195,9 @@
 /*
  * blend_setup() - blend all the planes of a CRTC
  *
- * When border is enabled, the border color will ALWAYS be the base layer.
- * Therefore, the first plane (private RGB pipe) will start at STAGE0.
- * If disabled, the first plane starts at STAGE_BASE.
- *
- * Note:
- * Border is not enabled here because the private plane is exactly
- * the CRTC resolution.
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
  */
 static void blend_setup(struct drm_crtc *crtc)
 {
@@ -210,9 +205,14 @@
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	const struct mdp5_cfg_hw *hw_cfg;
-	uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
+	struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+	const struct mdp_format *format;
+	uint32_t lm = mdp5_crtc->lm;
+	uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
 	unsigned long flags;
-#define blender(stage)	((stage) - STAGE_BASE)
+	uint8_t stage[STAGE_MAX + 1];
+	int i, plane_cnt = 0;
+#define blender(stage)	((stage) - STAGE0)
 
 	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
@@ -222,33 +222,73 @@
 	if (!mdp5_crtc->ctl)
 		goto out;
 
+	/* Collect all plane information */
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
-		enum mdp_mixer_stage_id stage =
-			to_mdp5_plane_state(plane->state)->stage;
-
-		/*
-		 * Note: This cannot happen with current implementation but
-		 * we need to check this condition once z property is added
-		 */
-		BUG_ON(stage > hw_cfg->lm.nb_stages);
-
-		/* LM */
-		mdp5_write(mdp5_kms,
-				REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
-				MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
-				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
-		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
-				blender(stage)), 0xff);
-		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
-				blender(stage)), 0x00);
-		/* CTL */
-		blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
-		DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
-				pipe2name(mdp5_plane_pipe(plane)), stage);
+		pstate = to_mdp5_plane_state(plane->state);
+		pstates[pstate->stage] = pstate;
+		stage[pstate->stage] = mdp5_plane_pipe(plane);
+		plane_cnt++;
 	}
 
-	DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
-	mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+	/*
+	* If there is no base layer, enable border color.
+	* Although it's not possbile in current blend logic,
+	* put it here as a reminder.
+	*/
+	if (!pstates[STAGE_BASE] && plane_cnt) {
+		ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+		DBG("Border Color is enabled");
+	}
+
+	/* The reset for blending */
+	for (i = STAGE0; i <= STAGE_MAX; i++) {
+		if (!pstates[i])
+			continue;
+
+		format = to_mdp_format(
+			msm_framebuffer_format(pstates[i]->base.fb));
+		plane = pstates[i]->base.plane;
+		blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+			MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+		fg_alpha = pstates[i]->alpha;
+		bg_alpha = 0xFF - pstates[i]->alpha;
+		DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+		if (format->alpha_enable && pstates[i]->premultiplied) {
+			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |=
+					MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+					MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+			}
+		} else if (format->alpha_enable) {
+			blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+				MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+			if (fg_alpha != 0xff) {
+				bg_alpha = fg_alpha;
+				blend_op |=
+				       MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+				       MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+			} else {
+				blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+			}
+		}
+
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+				blender(i)), blend_op);
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+				blender(i)), fg_alpha);
+		mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+				blender(i)), bg_alpha);
+	}
+
+	mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
 
 out:
 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -339,25 +379,19 @@
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	struct drm_device *dev = crtc->dev;
-	struct plane_state pstates[STAGE3 + 1];
+	struct plane_state pstates[STAGE_MAX + 1];
+	const struct mdp5_cfg_hw *hw_cfg;
 	int cnt = 0, i;
 
 	DBG("%s: check", mdp5_crtc->name);
 
-	/* request a free CTL, if none is already allocated for this CRTC */
-	if (state->enable && !mdp5_crtc->ctl) {
-		mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
-		if (WARN_ON(!mdp5_crtc->ctl))
-			return -EINVAL;
-	}
-
 	/* verify that there are not too many planes attached to crtc
 	 * and that we don't have conflicting mixer stages:
 	 */
+	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	drm_atomic_crtc_state_for_each_plane(plane, state) {
 		struct drm_plane_state *pstate;
-
-		if (cnt >= ARRAY_SIZE(pstates)) {
+		if (cnt >= (hw_cfg->lm.nb_stages)) {
 			dev_err(dev->dev, "too many planes!\n");
 			return -EINVAL;
 		}
@@ -369,13 +403,13 @@
 		 */
 		if (!pstate)
 			pstate = plane->state;
-
 		pstates[cnt].plane = plane;
 		pstates[cnt].state = to_mdp5_plane_state(pstate);
 
 		cnt++;
 	}
 
+	/* assign a stage based on sorted zpos property */
 	sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
 
 	for (i = 0; i < cnt; i++) {
@@ -388,13 +422,15 @@
 	return 0;
 }
 
-static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	DBG("%s: begin", mdp5_crtc->name);
 }
 
-static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+				   struct drm_crtc_state *old_crtc_state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -691,8 +727,8 @@
 	complete_flip(crtc, file);
 }
 
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -715,7 +751,8 @@
 
 	mdp_irq_update(&mdp5_kms->base);
 
-	mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+	mdp5_crtc->ctl = ctl;
+	mdp5_ctl_set_pipeline(ctl, intf, lm);
 }
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -724,12 +761,6 @@
 	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
 }
 
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
-{
-	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-	return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
-}
-
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -774,7 +805,5 @@
 	drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
 	plane->crtc = crtc;
 
-	mdp5_plane_install_properties(plane, &crtc->base);
-
 	return crtc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f2..4e81ca4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
 /*
  * CTL - MDP Control Pool Manager
  *
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
  *
  * They are intended to be used for data path configuration.
  * The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
  *
  * In certain use cases (high-resolution dual pipe), one single CTL can be
  * shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
  */
 
+#define CTL_STAT_BUSY		0x1
+#define CTL_STAT_BOOKED	0x2
+
 struct op_mode {
 	struct mdp5_interface intf;
 
@@ -46,8 +45,8 @@
 	u32 id;
 	int lm;
 
-	/* whether this CTL has been allocated or not: */
-	bool busy;
+	/* CTL status bitmask */
+	u32 status;
 
 	/* Operation Mode Configuration for the Pipeline */
 	struct op_mode pipeline;
@@ -61,7 +60,10 @@
 
 	bool cursor_on;
 
-	struct drm_crtc *crtc;
+	/* True if the current CTL has FLUSH bits pending for single FLUSH. */
+	bool flush_pending;
+
+	struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
 };
 
 struct mdp5_ctl_manager {
@@ -74,6 +76,10 @@
 	/* to filter out non-present bits in the current hardware config */
 	u32 flush_hw_mask;
 
+	/* status for single FLUSH */
+	bool single_flush_supported;
+	u32 single_flush_pending_mask;
+
 	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
 	spinlock_t pool_lock;
 	struct mdp5_ctl ctls[MAX_CTL];
@@ -168,11 +174,21 @@
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 }
 
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+		struct mdp5_interface *intf, int lm)
 {
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
 
+	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+		dev_err(mdp5_kms->dev->dev,
+			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
+			ctl->id, ctl->pipeline.intf.num, intf->num);
+		return -EINVAL;
+	}
+
+	ctl->lm = lm;
+
 	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
 
 	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -287,29 +303,85 @@
 		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 
 	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	ctl->cursor_on = enable;
 
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
 	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
-	ctl->cursor_on = enable;
 
 	return 0;
 }
 
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+		enum mdp_mixer_stage_id stage)
+{
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+	default:	return 0;
+	}
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+		enum mdp_mixer_stage_id stage)
+{
+	if (stage < STAGE6)
+		return 0;
+
+	switch (pipe) {
+	case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+	case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+	case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+	case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+	case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+	case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+	case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+	case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+	case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+	case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+	default:	return 0;
+	}
+}
+
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+	u32 ctl_blend_op_flags)
 {
 	unsigned long flags;
+	u32 blend_cfg = 0, blend_ext_cfg = 0;
+	int i, start_stage;
 
-	if (ctl->cursor_on)
-		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
-	else
-		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+	if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+		start_stage = STAGE0;
+		blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+	} else {
+		start_stage = STAGE_BASE;
+	}
+
+	for (i = start_stage; i < start_stage + stage_cnt; i++) {
+		blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
+		blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+	}
 
 	spin_lock_irqsave(&ctl->hw_lock, flags);
-	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	if (ctl->cursor_on)
+		blend_cfg |=  MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
-	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+
+	DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+		blend_cfg, blend_ext_cfg);
 
 	return 0;
 }
@@ -379,6 +451,31 @@
 	return sw_mask;
 }
 
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+		u32 *flush_id)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+	if (ctl->pair) {
+		DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+		ctl->flush_pending = true;
+		ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+		*flush_mask = 0;
+
+		if (ctl->pair->flush_pending) {
+			*flush_id = min_t(u32, ctl->id, ctl->pair->id);
+			*flush_mask = ctl_mgr->single_flush_pending_mask;
+
+			ctl->flush_pending = false;
+			ctl->pair->flush_pending = false;
+			ctl_mgr->single_flush_pending_mask = 0;
+
+			DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+				*flush_id);
+		}
+	}
+}
+
 /**
  * mdp5_ctl_commit() - Register Flush
  *
@@ -400,6 +497,8 @@
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct op_mode *pipeline = &ctl->pipeline;
 	unsigned long flags;
+	u32 flush_id = ctl->id;
+	u32 curr_ctl_flush_mask;
 
 	pipeline->start_mask &= ~flush_mask;
 
@@ -415,9 +514,13 @@
 
 	flush_mask &= ctl_mgr->flush_hw_mask;
 
+	curr_ctl_flush_mask = flush_mask;
+
+	fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
 	if (flush_mask) {
 		spin_lock_irqsave(&ctl->hw_lock, flags);
-		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
 	}
 
@@ -426,7 +529,7 @@
 		refill_start_mask(ctl);
 	}
 
-	return flush_mask;
+	return curr_ctl_flush_mask;
 }
 
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -434,59 +537,85 @@
 	return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
 }
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
-{
-	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
-	unsigned long flags;
-
-	if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
-		dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
-				ctl->id, ctl->busy);
-		return;
-	}
-
-	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
-	ctl->busy = false;
-	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
-
-	DBG("CTL %d released", ctl->id);
-}
-
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 {
 	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
 }
 
 /*
- * mdp5_ctl_request() - CTL dynamic allocation
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+	/* do nothing silently if hw doesn't support */
+	if (!ctl_mgr->single_flush_supported)
+		return 0;
+
+	if (!enable) {
+		ctlx->pair = NULL;
+		ctly->pair = NULL;
+		mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+		return 0;
+	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+		return -EINVAL;
+	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+		return -EINVAL;
+	}
+
+	ctlx->pair = ctly;
+	ctly->pair = ctlx;
+
+	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+
+	return 0;
+}
+
+/*
+ * mdp5_ctl_request() - CTL allocation
  *
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
  *
- * @return first free CTL
+ * @return fail if no CTL is available.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
-		struct drm_crtc *crtc)
+		int intf_num)
 {
 	struct mdp5_ctl *ctl = NULL;
+	const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+	u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
 	unsigned long flags;
 	int c;
 
 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 
+	/* search the preferred */
 	for (c = 0; c < ctl_mgr->nctl; c++)
-		if (!ctl_mgr->ctls[c].busy)
-			break;
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
-	if (unlikely(c >= ctl_mgr->nctl)) {
-		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
-		goto unlock;
-	}
+	dev_warn(ctl_mgr->dev->dev,
+		"fall back to the other CTL category for INTF %d!\n", intf_num);
 
+	match ^= CTL_STAT_BOOKED;
+	for (c = 0; c < ctl_mgr->nctl; c++)
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
+
+	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+	goto unlock;
+
+found:
 	ctl = &ctl_mgr->ctls[c];
-
-	ctl->lm = mdp5_crtc_get_lm(crtc);
-	ctl->crtc = crtc;
-	ctl->busy = true;
+	ctl->pipeline.intf.num = intf_num;
+	ctl->lm = -1;
+	ctl->status |= CTL_STAT_BUSY;
 	ctl->pending_ctl_trigger = 0;
 	DBG("CTL %d allocated", ctl->id);
 
@@ -515,9 +644,11 @@
 }
 
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
 {
 	struct mdp5_ctl_manager *ctl_mgr;
+	const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+	int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
 	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
 	unsigned long flags;
 	int c, ret;
@@ -551,14 +682,28 @@
 		if (WARN_ON(!ctl_cfg->base[c])) {
 			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
 			ret = -EINVAL;
+			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 			goto fail;
 		}
 		ctl->ctlm = ctl_mgr;
 		ctl->id = c;
 		ctl->reg_offset = ctl_cfg->base[c];
-		ctl->busy = false;
+		ctl->status = 0;
 		spin_lock_init(&ctl->hw_lock);
 	}
+
+	/*
+	 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+	 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+	 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+	 * Single FLUSH is supported from hw rev v3.0.
+	 */
+	if (rev >= 3) {
+		ctl_mgr->single_flush_supported = true;
+		/* Reserve CTL0/1 for INTF1/2 */
+		ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+		ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+	}
 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228..96148c6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
  */
 struct mdp5_ctl_manager;
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
 
@@ -32,49 +32,32 @@
  * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
  * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
  */
-struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
 
 struct mdp5_interface;
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+				int lm);
 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
 
 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
-
-/*
- * blend_cfg (LM blender config):
- *
- * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
- * are being blended according to their stage (z-order), through @blend_cfg arg.
- */
-static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
-		enum mdp_mixer_stage_id stage)
-{
-	switch (pipe) {
-	case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
-	case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
-	case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
-	case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
-	case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
-	case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
-	case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
-	case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
-	case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
-	case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
-	default:	return 0;
-	}
-}
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
 
 /*
  * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
  *
- * @blend_cfg: see LM blender config definition below
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
  *
  * Note:
  * CTL registers need to be flushed after calling this function
  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT	BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt,
+	u32 ctl_blend_op_flags);
 
 /**
  * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +74,6 @@
 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
-
 
 
 #endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08..c9e32b0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@
 	spinlock_t intf_lock;	/* protect REG_MDP5_INTF_* registers */
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
 
@@ -222,14 +224,15 @@
 
 	spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
+				mdp5_encoder->ctl);
 }
 
 static void mdp5_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	int lm = mdp5_crtc_get_lm(encoder->crtc);
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
 	unsigned long flags;
@@ -294,6 +297,7 @@
 					struct drm_encoder *slave_encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
 	struct mdp5_kms *mdp5_kms;
 	int intf_num;
 	u32 data = 0;
@@ -316,12 +320,13 @@
 
 	/* Make sure clocks are on when connectors calling this function. */
 	mdp5_enable(mdp5_kms);
-	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
-		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 	/* Dumb Panel, Sync mode */
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+	mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
 	mdp5_disable(mdp5_kms);
 
 	return 0;
@@ -329,7 +334,7 @@
 
 /* initialize encoder */
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +350,7 @@
 
 	memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
 	encoder = &mdp5_encoder->base;
+	mdp5_encoder->ctl = ctl;
 
 	spin_lock_init(&mdp5_encoder->intf_lock);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 33bd4c6..b1f73be 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -21,8 +21,11 @@
 #include "msm_drv.h"
 #include "mdp5_kms.h"
 
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask)
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask)
 {
+	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
+		irqmask ^ (irqmask & old_irqmask));
 	mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
 }
 
@@ -71,9 +74,10 @@
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	unsigned int id;
-	uint32_t status;
+	uint32_t status, enable;
 
-	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0));
+	enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
+	status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
 
 	VERB("status=%08x", status);
@@ -112,15 +116,24 @@
 
 int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+	mdp5_enable(mdp5_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp5_crtc_vblank(crtc), true);
+	mdp5_disable(mdp5_kms);
+
 	return 0;
 }
 
 void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
+	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+	mdp5_enable(mdp5_kms);
 	mdp_update_vblank_mask(to_mdp_kms(kms),
 			mdp5_crtc_vblank(crtc), false);
+	mdp5_disable(mdp5_kms);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e253db5..047cb04 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -177,7 +177,8 @@
 	clk_disable_unprepare(mdp5_kms->ahb_clk);
 	clk_disable_unprepare(mdp5_kms->axi_clk);
 	clk_disable_unprepare(mdp5_kms->core_clk);
-	clk_disable_unprepare(mdp5_kms->lut_clk);
+	if (mdp5_kms->lut_clk)
+		clk_disable_unprepare(mdp5_kms->lut_clk);
 
 	return 0;
 }
@@ -189,14 +190,15 @@
 	clk_prepare_enable(mdp5_kms->ahb_clk);
 	clk_prepare_enable(mdp5_kms->axi_clk);
 	clk_prepare_enable(mdp5_kms->core_clk);
-	clk_prepare_enable(mdp5_kms->lut_clk);
+	if (mdp5_kms->lut_clk)
+		clk_prepare_enable(mdp5_kms->lut_clk);
 
 	return 0;
 }
 
 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 		enum mdp5_intf_type intf_type, int intf_num,
-		enum mdp5_intf_mode intf_mode)
+		enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
 {
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -209,9 +211,9 @@
 
 	if ((intf_type == INTF_DSI) &&
 		(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
-		encoder = mdp5_cmd_encoder_init(dev, &intf);
+		encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
 	else
-		encoder = mdp5_encoder_init(dev, &intf);
+		encoder = mdp5_encoder_init(dev, &intf, ctl);
 
 	if (IS_ERR(encoder)) {
 		dev_err(dev->dev, "failed to construct encoder\n");
@@ -249,6 +251,8 @@
 	const struct mdp5_cfg_hw *hw_cfg =
 					mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
+	struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+	struct mdp5_ctl *ctl;
 	struct drm_encoder *encoder;
 	int ret = 0;
 
@@ -259,8 +263,14 @@
 		if (!priv->edp)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -272,8 +282,14 @@
 		if (!priv->hdmi)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -298,14 +314,20 @@
 		if (!priv->dsi[dsi_id])
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
 			mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
 				MDP5_INTF_DSI_MODE_COMMAND :
 				MDP5_INTF_DSI_MODE_VIDEO;
 			dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
-							intf_num, mode);
-			if (IS_ERR(dsi_encs)) {
-				ret = PTR_ERR(dsi_encs);
+							intf_num, mode, ctl);
+			if (IS_ERR(dsi_encs[i])) {
+				ret = PTR_ERR(dsi_encs[i]);
 				break;
 			}
 		}
@@ -327,9 +349,12 @@
 	static const enum mdp5_pipe crtcs[] = {
 			SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
 	};
-	static const enum mdp5_pipe pub_planes[] = {
+	static const enum mdp5_pipe vig_planes[] = {
 			SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
 	};
+	static const enum mdp5_pipe dma_planes[] = {
+			SSPP_DMA0, SSPP_DMA1,
+	};
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
 	const struct mdp5_cfg_hw *hw_cfg;
@@ -350,7 +375,7 @@
 		struct drm_crtc *crtc;
 
 		plane = mdp5_plane_init(dev, crtcs[i], true,
-				hw_cfg->pipe_rgb.base[i]);
+			hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -368,16 +393,30 @@
 		priv->crtcs[priv->num_crtcs++] = crtc;
 	}
 
-	/* Construct public planes: */
+	/* Construct video planes: */
 	for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
 		struct drm_plane *plane;
 
-		plane = mdp5_plane_init(dev, pub_planes[i], false,
-				hw_cfg->pipe_vig.base[i]);
+		plane = mdp5_plane_init(dev, vig_planes[i], false,
+			hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps);
 		if (IS_ERR(plane)) {
 			ret = PTR_ERR(plane);
 			dev_err(dev->dev, "failed to construct %s plane: %d\n",
-					pipe2name(pub_planes[i]), ret);
+					pipe2name(vig_planes[i]), ret);
+			goto fail;
+		}
+	}
+
+	/* DMA planes */
+	for (i = 0; i < hw_cfg->pipe_dma.count; i++) {
+		struct drm_plane *plane;
+
+		plane = mdp5_plane_init(dev, dma_planes[i], false,
+				hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps);
+		if (IS_ERR(plane)) {
+			ret = PTR_ERR(plane);
+			dev_err(dev->dev, "failed to construct %s plane: %d\n",
+					pipe2name(dma_planes[i]), ret);
 			goto fail;
 		}
 	}
@@ -489,7 +528,7 @@
 		goto fail;
 	ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
 	if (ret)
-		goto fail;
+		DBG("failed to get (optional) lut_clk clock");
 	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
 	if (ret)
 		goto fail;
@@ -521,7 +560,7 @@
 		goto fail;
 	}
 
-	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
 	if (IS_ERR(mdp5_kms->ctlm)) {
 		ret = PTR_ERR(mdp5_kms->ctlm);
 		mdp5_kms->ctlm = NULL;
@@ -577,6 +616,11 @@
 		goto fail;
 	}
 
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = config->hw->lm.max_width;
+	dev->mode_config.max_height = config->hw->lm.max_height;
+
 	return kms;
 
 fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e79ac09..0bb6242 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -70,18 +70,12 @@
 struct mdp5_plane_state {
 	struct drm_plane_state base;
 
-	/* "virtual" zpos.. we calculate actual mixer-stage at runtime
-	 * by sorting the attached planes by zpos and then assigning
-	 * mixer stage lowest to highest.  Private planes get default
-	 * zpos of zero, and public planes a unique value that is
-	 * greater than zero.  This way, things work out if a naive
-	 * userspace assigns planes to a crtc without setting zpos.
-	 */
-	int zpos;
+	/* aligned with property */
+	uint8_t premultiplied;
+	uint8_t zpos;
+	uint8_t alpha;
 
-	/* the actual mixer stage, calculated in crtc->atomic_check()
-	 * NOTE: this should move to mdp5_crtc_state, when that exists
-	 */
+	/* assigned by crtc blender */
 	enum mdp_mixer_stage_id stage;
 
 	/* some additional transactional status to help us know in the
@@ -192,7 +186,8 @@
 int mdp5_disable(struct mdp5_kms *mdp5_kms);
 int mdp5_enable(struct mdp5_kms *mdp5_kms);
 
-void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 void mdp5_irq_preinstall(struct msm_kms *kms);
 int mdp5_irq_postinstall(struct msm_kms *kms);
 void mdp5_irq_uninstall(struct msm_kms *kms);
@@ -202,60 +197,38 @@
 int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
-static inline bool pipe_supports_yuv(enum mdp5_pipe pipe)
-{
-	switch (pipe) {
-	case SSPP_VIG0:
-	case SSPP_VIG1:
-	case SSPP_VIG2:
-	case SSPP_VIG3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static inline
-uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
-		uint32_t max_formats)
-{
-	return mdp_get_formats(pixel_formats, max_formats,
-				!pipe_supports_yuv(pipe));
-}
-
-void mdp5_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj);
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 void mdp5_plane_complete_flip(struct drm_plane *plane);
 void mdp5_plane_complete_commit(struct drm_plane *plane,
 	struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
+		enum mdp5_pipe pipe, bool private_plane,
+		uint32_t reg_offset, uint32_t caps);
 
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, int id);
 
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-		struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 
 #ifdef CONFIG_DRM_MSM_DSI
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 #else
-static inline struct drm_encoder *mdp5_cmd_encoder_init(
-			struct drm_device *dev, struct mdp5_interface *intf)
+static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	return ERR_PTR(-EINVAL);
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 2227556..07fb62f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -26,6 +26,7 @@
 
 	spinlock_t pipe_lock;	/* protect REG_MDP5_PIPE_* registers */
 	uint32_t reg_offset;
+	uint32_t caps;
 
 	uint32_t flush_mask;	/* used to commit pipe registers */
 
@@ -40,6 +41,7 @@
 		unsigned int crtc_w, unsigned int crtc_h,
 		uint32_t src_x, uint32_t src_y,
 		uint32_t src_w, uint32_t src_h);
+
 static void set_scanout_locked(struct drm_plane *plane,
 		struct drm_framebuffer *fb);
 
@@ -64,18 +66,122 @@
 	kfree(mdp5_plane);
 }
 
-/* helper to install properties which are common to planes and crtcs */
-void mdp5_plane_install_properties(struct drm_plane *plane,
-		struct drm_mode_object *obj)
+static void mdp5_plane_install_rotation_property(struct drm_device *dev,
+		struct drm_plane *plane)
 {
-	// XXX
+	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+	if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) &&
+		!(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP))
+		return;
+
+	if (!dev->mode_config.rotation_property)
+		dev->mode_config.rotation_property =
+			drm_mode_create_rotation_property(dev,
+			BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y));
+
+	if (dev->mode_config.rotation_property)
+		drm_object_attach_property(&plane->base,
+			dev->mode_config.rotation_property,
+			0);
 }
 
-int mdp5_plane_set_property(struct drm_plane *plane,
-		struct drm_property *property, uint64_t val)
+/* helper to install properties which are common to planes and crtcs */
+static void mdp5_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj)
 {
-	// XXX
-	return -EINVAL;
+	struct drm_device *dev = plane->dev;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
+		prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
+		if (!prop) { \
+			prop = drm_property_##fnc(dev, 0, #name, \
+				##__VA_ARGS__); \
+			if (!prop) { \
+				dev_warn(dev->dev, \
+					"Create property %s failed\n", \
+					#name); \
+				return; \
+			} \
+			dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
+		} \
+		drm_object_attach_property(&plane->base, prop, init_val); \
+	} while (0)
+
+#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
+		INSTALL_PROPERTY(name, NAME, init_val, \
+				create_range, min, max)
+
+#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
+		INSTALL_PROPERTY(name, NAME, init_val, \
+				create_enum, name##_prop_enum_list, \
+				ARRAY_SIZE(name##_prop_enum_list))
+
+	INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
+
+	mdp5_plane_install_rotation_property(dev, plane);
+
+#undef INSTALL_RANGE_PROPERTY
+#undef INSTALL_ENUM_PROPERTY
+#undef INSTALL_PROPERTY
+}
+
+static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
+		struct drm_plane_state *state, struct drm_property *property,
+		uint64_t val)
+{
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane_state *pstate;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	pstate = to_mdp5_plane_state(state);
+
+#define SET_PROPERTY(name, NAME, type) do { \
+		if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+			pstate->name = (type)val; \
+			DBG("Set property %s %d", #name, (type)val); \
+			goto done; \
+		} \
+	} while (0)
+
+	SET_PROPERTY(zpos, ZPOS, uint8_t);
+
+	dev_err(dev->dev, "Invalid property\n");
+	ret = -EINVAL;
+done:
+	return ret;
+#undef SET_PROPERTY
+}
+
+static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
+		const struct drm_plane_state *state,
+		struct drm_property *property, uint64_t *val)
+{
+	struct drm_device *dev = plane->dev;
+	struct mdp5_plane_state *pstate;
+	struct msm_drm_private *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	pstate = to_mdp5_plane_state(state);
+
+#define GET_PROPERTY(name, NAME, type) do { \
+		if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+			*val = pstate->name; \
+			DBG("Get property %s %lld", #name, *val); \
+			goto done; \
+		} \
+	} while (0)
+
+	GET_PROPERTY(zpos, ZPOS, uint8_t);
+
+	dev_err(dev->dev, "Invalid property\n");
+	ret = -EINVAL;
+done:
+	return ret;
+#undef SET_PROPERTY
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -88,11 +194,15 @@
 	kfree(to_mdp5_plane_state(plane->state));
 	mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
 
-	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
-		mdp5_state->zpos = 0;
-	} else {
-		mdp5_state->zpos = 1 + drm_plane_index(plane);
-	}
+	/* assign default blend parameters */
+	mdp5_state->alpha = 255;
+	mdp5_state->premultiplied = 0;
+
+	if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+		mdp5_state->zpos = STAGE_BASE;
+	else
+		mdp5_state->zpos = STAGE0 + drm_plane_index(plane);
+
 	mdp5_state->base.plane = plane;
 
 	plane->state = &mdp5_state->base;
@@ -131,7 +241,9 @@
 		.update_plane = drm_atomic_helper_update_plane,
 		.disable_plane = drm_atomic_helper_disable_plane,
 		.destroy = mdp5_plane_destroy,
-		.set_property = mdp5_plane_set_property,
+		.set_property = drm_atomic_helper_plane_set_property,
+		.atomic_set_property = mdp5_plane_atomic_set_property,
+		.atomic_get_property = mdp5_plane_atomic_get_property,
 		.reset = mdp5_plane_reset,
 		.atomic_duplicate_state = mdp5_plane_duplicate_state,
 		.atomic_destroy_state = mdp5_plane_destroy_state,
@@ -164,10 +276,44 @@
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 	struct drm_plane_state *old_state = plane->state;
+	const struct mdp_format *format;
+	bool vflip, hflip;
 
 	DBG("%s: check (%d -> %d)", mdp5_plane->name,
 			plane_enabled(old_state), plane_enabled(state));
 
+	if (plane_enabled(state)) {
+		format = to_mdp_format(msm_framebuffer_format(state->fb));
+		if (MDP_FORMAT_IS_YUV(format) &&
+			!pipe_supports_yuv(mdp5_plane->caps)) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support YUV\n");
+
+			return -EINVAL;
+		}
+
+		if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
+			(((state->src_w >> 16) != state->crtc_w) ||
+			((state->src_h >> 16) != state->crtc_h))) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+				state->src_w >> 16, state->src_h >> 16,
+				state->crtc_w, state->crtc_h);
+
+			return -EINVAL;
+		}
+
+		hflip = !!(state->rotation & BIT(DRM_REFLECT_X));
+		vflip = !!(state->rotation & BIT(DRM_REFLECT_Y));
+		if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
+			(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
+			dev_err(plane->dev->dev,
+				"Pipe doesn't support flip\n");
+
+			return -EINVAL;
+		}
+	}
+
 	if (plane_enabled(state) && plane_enabled(old_state)) {
 		/* we cannot change SMP block configuration during scanout: */
 		bool full_modeset = false;
@@ -346,16 +492,21 @@
 	return 0;
 }
 
-static int calc_scalex_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scalex_steps(struct drm_plane *plane,
+		uint32_t pixel_format, uint32_t src, uint32_t dest,
 		uint32_t phasex_steps[2])
 {
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	struct device *dev = mdp5_kms->dev->dev;
 	uint32_t phasex_step;
 	unsigned int hsub;
 	int ret;
 
 	ret = calc_phase_step(src, dest, &phasex_step);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
+	}
 
 	hsub = drm_format_horz_chroma_subsampling(pixel_format);
 
@@ -365,16 +516,21 @@
 	return 0;
 }
 
-static int calc_scaley_steps(uint32_t pixel_format, uint32_t src, uint32_t dest,
+static int calc_scaley_steps(struct drm_plane *plane,
+		uint32_t pixel_format, uint32_t src, uint32_t dest,
 		uint32_t phasey_steps[2])
 {
+	struct mdp5_kms *mdp5_kms = get_kms(plane);
+	struct device *dev = mdp5_kms->dev->dev;
 	uint32_t phasey_step;
 	unsigned int vsub;
 	int ret;
 
 	ret = calc_phase_step(src, dest, &phasey_step);
-	if (ret)
+	if (ret) {
+		dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
 		return ret;
+	}
 
 	vsub = drm_format_vert_chroma_subsampling(pixel_format);
 
@@ -384,28 +540,38 @@
 	return 0;
 }
 
-static uint32_t get_scalex_config(uint32_t src, uint32_t dest)
+static uint32_t get_scale_config(enum mdp_chroma_samp_type chroma_sample,
+		uint32_t src, uint32_t dest, bool hor)
 {
-	uint32_t filter;
+	uint32_t y_filter =   (src <= dest) ? SCALE_FILTER_CA  : SCALE_FILTER_PCMN;
+	uint32_t y_a_filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t uv_filter = ((src / 2) <= dest) ? /* 2x upsample */
+			SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	uint32_t value = 0;
 
-	filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+	if (chroma_sample == CHROMA_420 || chroma_sample == CHROMA_H2V1) {
+		if (hor)
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter);
+		else
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter);
+	} else if (src != dest) {
+		if (hor)
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(y_a_filter);
+		else
+			value = MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(y_a_filter) |
+				MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(y_a_filter);
+	}
 
-	return  MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER(filter) |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_CR_FILTER(filter)  |
-		MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(filter);
-}
-
-static uint32_t get_scaley_config(uint32_t src, uint32_t dest)
-{
-	uint32_t filter;
-
-	filter = (src <= dest) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
-
-	return  MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_MIN_FILTER(filter) |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_CR_FILTER(filter)  |
-		MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(filter);
+	return value;
 }
 
 static int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -416,8 +582,8 @@
 		uint32_t src_w, uint32_t src_h)
 {
 	struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+	struct drm_plane_state *pstate = plane->state;
 	struct mdp5_kms *mdp5_kms = get_kms(plane);
-	struct device *dev = mdp5_kms->dev->dev;
 	enum mdp5_pipe pipe = mdp5_plane->pipe;
 	const struct mdp_format *format;
 	uint32_t nplanes, config = 0;
@@ -425,6 +591,7 @@
 	uint32_t phasex_step[2] = {0,}, phasey_step[2] = {0,};
 	uint32_t hdecm = 0, vdecm = 0;
 	uint32_t pix_format;
+	bool vflip, hflip;
 	unsigned long flags;
 	int ret;
 
@@ -449,7 +616,7 @@
 
 	/* Request some memory from the SMP: */
 	ret = mdp5_smp_request(mdp5_kms->smp,
-			mdp5_plane->pipe, fb->pixel_format, src_w);
+			mdp5_plane->pipe, format, src_w, false);
 	if (ret)
 		return ret;
 
@@ -461,29 +628,23 @@
 	 */
 	mdp5_smp_configure(mdp5_kms->smp, pipe);
 
+	ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+	if (ret)
+		return ret;
+
+	ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+	if (ret)
+		return ret;
+
+	/* TODO calc hdecm, vdecm */
+
 	/* SCALE is used to both scale and up-sample chroma components */
+	config |= get_scale_config(format->chroma_sample, src_w, crtc_w, true);
+	config |= get_scale_config(format->chroma_sample, src_h, crtc_h, false);
+	DBG("scale config = %x", config);
 
-	if ((src_w != crtc_w) || MDP_FORMAT_IS_YUV(format)) {
-		/* TODO calc hdecm */
-		ret = calc_scalex_steps(pix_format, src_w, crtc_w, phasex_step);
-		if (ret) {
-			dev_err(dev, "X scaling (%d -> %d) failed: %d\n",
-					src_w, crtc_w, ret);
-			return ret;
-		}
-		config |= get_scalex_config(src_w, crtc_w);
-	}
-
-	if ((src_h != crtc_h) || MDP_FORMAT_IS_YUV(format)) {
-		/* TODO calc vdecm */
-		ret = calc_scaley_steps(pix_format, src_h, crtc_h, phasey_step);
-		if (ret) {
-			dev_err(dev, "Y scaling (%d -> %d) failed: %d\n",
-					src_h, crtc_h, ret);
-			return ret;
-		}
-		config |= get_scaley_config(src_h, crtc_h);
-	}
+	hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X));
+	vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y));
 
 	spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
 
@@ -516,7 +677,7 @@
 			MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
 			MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
 			COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
-			MDP5_PIPE_SRC_FORMAT_NUM_PLANES(format->fetch_type) |
+			MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
 			MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
@@ -526,29 +687,35 @@
 			MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
 
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+			(hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+			(vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
 			MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
 
 	/* not using secure mode: */
 	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
 
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
-			phasex_step[0]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
-			phasey_step[0]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
-			phasex_step[1]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
-			phasey_step[1]);
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
-			MDP5_PIPE_DECIMATION_VERT(vdecm) |
-			MDP5_PIPE_DECIMATION_HORZ(hdecm));
-	mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+	if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) {
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+				phasex_step[0]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+				phasey_step[0]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+				phasex_step[1]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+				phasey_step[1]);
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+				MDP5_PIPE_DECIMATION_VERT(vdecm) |
+				MDP5_PIPE_DECIMATION_HORZ(hdecm));
+		mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
+	}
 
-	if (MDP_FORMAT_IS_YUV(format))
-		csc_enable(mdp5_kms, pipe,
-				mdp_get_default_csc_cfg(CSC_YUV2RGB));
-	else
-		csc_disable(mdp5_kms, pipe);
+	if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) {
+		if (MDP_FORMAT_IS_YUV(format))
+			csc_enable(mdp5_kms, pipe,
+					mdp_get_default_csc_cfg(CSC_YUV2RGB));
+		else
+			csc_disable(mdp5_kms, pipe);
+	}
 
 	set_scanout_locked(plane, fb);
 
@@ -599,7 +766,8 @@
 
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
-		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
+		enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset,
+		uint32_t caps)
 {
 	struct drm_plane *plane = NULL;
 	struct mdp5_plane *mdp5_plane;
@@ -616,9 +784,11 @@
 
 	mdp5_plane->pipe = pipe;
 	mdp5_plane->name = pipe2name(pipe);
+	mdp5_plane->caps = caps;
 
-	mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
-			ARRAY_SIZE(mdp5_plane->formats));
+	mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+		ARRAY_SIZE(mdp5_plane->formats),
+		!pipe_supports_yuv(mdp5_plane->caps));
 
 	mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
 	mdp5_plane->reg_offset = reg_offset;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 64a27d8..563cca9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -90,6 +90,8 @@
 struct mdp5_smp {
 	struct drm_device *dev;
 
+	const struct mdp5_smp_block *cfg;
+
 	int blk_cnt;
 	int blk_size;
 
@@ -137,14 +139,12 @@
 		u32 cid, int nblks)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(smp);
-	const struct mdp5_cfg_hw *hw_cfg;
 	struct mdp5_client_smp_state *ps = &smp->client_state[cid];
 	int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
 	int reserved;
 	unsigned long flags;
 
-	hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-	reserved = hw_cfg->smp.reserved[cid];
+	reserved = smp->cfg->reserved[cid];
 
 	spin_lock_irqsave(&smp->state_lock, flags);
 
@@ -209,12 +209,14 @@
  * decimated width.  Ie. SMP buffering sits downstream of decimation (which
  * presumably happens during the dma from scanout buffer).
  */
-int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
+int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+		const struct mdp_format *format, u32 width, bool hdecim)
 {
 	struct mdp5_kms *mdp5_kms = get_kms(smp);
 	struct drm_device *dev = mdp5_kms->dev;
 	int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
 	int i, hsub, nplanes, nlines, nblks, ret;
+	u32 fmt = format->base.pixel_format;
 
 	nplanes = drm_format_num_planes(fmt);
 	hsub = drm_format_horz_chroma_subsampling(fmt);
@@ -222,6 +224,21 @@
 	/* different if BWC (compressed framebuffer?) enabled: */
 	nlines = 2;
 
+	/* Newer MDPs have split/packing logic, which fetches sub-sampled
+	 * U and V components (splits them from Y if necessary) and packs
+	 * them together, writes to SMP using a single client.
+	 */
+	if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+		fmt = DRM_FORMAT_NV24;
+		nplanes = 2;
+
+		/* if decimation is enabled, HW decimates less on the
+		 * sub sampled chroma components
+		 */
+		if (hdecim && (hsub > 1))
+			hsub = 1;
+	}
+
 	for (i = 0, nblks = 0; i < nplanes; i++) {
 		int n, fetch_stride, cpp;
 
@@ -388,6 +405,7 @@
 	}
 
 	smp->dev = dev;
+	smp->cfg = cfg;
 	smp->blk_cnt = cfg->mmb_count;
 	smp->blk_size = cfg->mmb_size;
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 5b6c236..20b87e8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -39,7 +39,8 @@
 struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
 void  mdp5_smp_destroy(struct mdp5_smp *smp);
 
-int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
+int  mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe,
+		const struct mdp_format *format, u32 width, bool hdecim);
 void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 641d036..4f792c4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -8,17 +8,17 @@
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2014-12-05 15:34:49)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-03-24 22:05:22)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2352 bytes, from 2015-04-12 15:02:42)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  35083 bytes, from 2015-04-12 15:04:03)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  22094 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2013-08-11 19:26:32)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2014-10-31 16:48:57)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29012 bytes, from 2015-05-12 12:45:23)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-12 12:45:23)
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2576 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  36021 bytes, from 2015-07-09 22:10:24)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  26057 bytes, from 2015-08-14 21:47:57)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    344 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
 
 Copyright (C) 2013-2015 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,7 +46,7 @@
 
 
 enum mdp_chroma_samp_type {
-	CHROMA_RGB = 0,
+	CHROMA_FULL = 0,
 	CHROMA_H2V1 = 1,
 	CHROMA_H1V2 = 2,
 	CHROMA_420 = 3,
@@ -65,6 +65,10 @@
 	STAGE1 = 3,
 	STAGE2 = 4,
 	STAGE3 = 5,
+	STAGE4 = 6,
+	STAGE5 = 7,
+	STAGE6 = 8,
+	STAGE_MAX = 8,
 };
 
 enum mdp_alpha_type {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 7b0524d..1c2caff 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -71,7 +71,7 @@
 	},
 };
 
-#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs) { \
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
 		.base = { .pixel_format = DRM_FORMAT_ ## name }, \
 		.bpc_a = BPC ## a ## A,                          \
 		.bpc_r = BPC ## r,                               \
@@ -83,7 +83,8 @@
 		.cpp = c,                                        \
 		.unpack_count = cnt,                             \
 		.fetch_type = fp,                                \
-		.chroma_sample = cs                              \
+		.chroma_sample = cs,                             \
+		.is_yuv = yuv,                                   \
 }
 
 #define BPC0A 0
@@ -95,30 +96,49 @@
 static const struct mdp_format formats[] = {
 	/*  name      a  r  g  b   e0 e1 e2 e3  alpha   tight  cpp cnt ... */
 	FMT(ARGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(ABGR8888, 8, 8, 8, 8,  2, 0, 1, 3,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGBA8888, 8, 8, 8, 8,  3, 1, 0, 2,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGRA8888, 8, 8, 8, 8,  3, 2, 0, 1,  true,   true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(XRGB8888, 8, 8, 8, 8,  1, 0, 2, 3,  false,  true,  4,  4,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB888,   0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  3,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR888,   0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  3,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(RGB565,   0, 5, 6, 5,  1, 0, 2, 0,  false,  true,  2,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 	FMT(BGR565,   0, 5, 6, 5,  2, 0, 1, 0,  false,  true,  2,  3,
-			MDP_PLANE_INTERLEAVED, CHROMA_RGB),
+			MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
 
 	/* --- RGB formats above / YUV formats below this line --- */
 
+	/* 2 plane YUV */
 	FMT(NV12,     0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  2, 2,
-			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
 	FMT(NV21,     0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  2, 2,
-			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420),
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+	FMT(NV16,     0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  2, 2,
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+	FMT(NV61,     0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  2, 2,
+			MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+	/* 1 plane YUV */
+	FMT(VYUY,     0, 8, 8, 8,  2, 0, 1, 0,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(UYVY,     0, 8, 8, 8,  1, 0, 2, 0,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(YUYV,     0, 8, 8, 8,  0, 1, 0, 2,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	FMT(YVYU,     0, 8, 8, 8,  0, 2, 0, 1,  false,  true,  2, 4,
+			MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+	/* 3 plane YUV */
+	FMT(YUV420,   0, 8, 8, 8,  2, 1, 0, 0,  false,  true,  1, 1,
+			MDP_PLANE_PLANAR, CHROMA_420, true),
+	FMT(YVU420,   0, 8, 8, 8,  1, 2, 0, 0,  false,  true,  1, 1,
+			MDP_PLANE_PLANAR, CHROMA_420, true),
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 1988c24..6428730 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -39,7 +39,8 @@
 	list_for_each_entry(irq, &mdp_kms->irq_list, node)
 		irqmask |= irq->irqmask;
 
-	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
+	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+	mdp_kms->cur_irq_mask = irqmask;
 }
 
 /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 2d3428c..46a94e7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -30,7 +30,8 @@
 
 struct mdp_kms_funcs {
 	struct msm_kms_funcs base;
-	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask);
+	void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+		uint32_t old_irqmask);
 };
 
 struct mdp_kms {
@@ -42,6 +43,7 @@
 	bool in_irq;
 	struct list_head irq_list;    /* list of mdp4_irq */
 	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+	uint32_t cur_irq_mask;        /* current irq mask */
 };
 #define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
 
@@ -90,13 +92,27 @@
 	uint8_t cpp, unpack_count;
 	enum mdp_fetch_type fetch_type;
 	enum mdp_chroma_samp_type chroma_sample;
+	bool is_yuv;
 };
 #define to_mdp_format(x) container_of(x, struct mdp_format, base)
-#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->chroma_sample > CHROMA_RGB)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
 
 uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
 const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
 
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP			BIT(0)
+#define MDP_PIPE_CAP_VFLIP			BIT(1)
+#define MDP_PIPE_CAP_SCALE			BIT(2)
+#define MDP_PIPE_CAP_CSC			BIT(3)
+#define MDP_PIPE_CAP_DECIMATION			BIT(4)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+	return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+		(pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
 enum csc_type {
 	CSC_RGB2RGB = 0,
 	CSC_YUV2RGB,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d3467b1..0339c5d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -116,6 +116,65 @@
 	return val;
 }
 
+struct vblank_event {
+	struct list_head node;
+	int crtc_id;
+	bool enable;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
+						struct msm_vblank_ctrl, work);
+	struct msm_drm_private *priv = container_of(vbl_ctrl,
+					struct msm_drm_private, vblank_ctrl);
+	struct msm_kms *kms = priv->kms;
+	struct vblank_event *vbl_ev, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+		if (vbl_ev->enable)
+			kms->funcs->enable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+		else
+			kms->funcs->disable_vblank(kms,
+						priv->crtcs[vbl_ev->crtc_id]);
+
+		kfree(vbl_ev);
+
+		spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	}
+
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+					int crtc_id, bool enable)
+{
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev;
+	unsigned long flags;
+
+	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
+	if (!vbl_ev)
+		return -ENOMEM;
+
+	vbl_ev->crtc_id = crtc_id;
+	vbl_ev->enable = enable;
+
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
+	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+
+	queue_work(priv->wq, &vbl_ctrl->work);
+
+	return 0;
+}
+
 /*
  * DRM operations:
  */
@@ -125,6 +184,18 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_gpu *gpu = priv->gpu;
+	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
+	struct vblank_event *vbl_ev, *tmp;
+
+	/* We must cancel and cleanup any pending vblank enable/disable
+	 * work before drm_irq_uninstall() to avoid work re-enabling an
+	 * irq after uninstall has disabled it.
+	 */
+	cancel_work_sync(&vbl_ctrl->work);
+	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+		list_del(&vbl_ev->node);
+		kfree(vbl_ev);
+	}
 
 	drm_kms_helper_poll_fini(dev);
 	drm_mode_config_cleanup(dev);
@@ -282,6 +353,9 @@
 
 	INIT_LIST_HEAD(&priv->inactive_list);
 	INIT_LIST_HEAD(&priv->fence_cbs);
+	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(dev);
 
@@ -331,10 +405,6 @@
 		}
 	}
 
-	dev->mode_config.min_width = 0;
-	dev->mode_config.min_height = 0;
-	dev->mode_config.max_width = 2048;
-	dev->mode_config.max_height = 2048;
 	dev->mode_config.funcs = &mode_config_funcs;
 
 	ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -468,7 +538,7 @@
 	if (!kms)
 		return -ENXIO;
 	DBG("dev=%p, crtc=%d", dev, crtc_id);
-	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+	return vblank_ctrl_queue_work(priv, crtc_id, true);
 }
 
 static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
@@ -478,7 +548,7 @@
 	if (!kms)
 		return;
 	DBG("dev=%p, crtc=%d", dev, crtc_id);
-	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+	vblank_ctrl_queue_work(priv, crtc_id, false);
 }
 
 /*
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4ff0ec9..3be7a56 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
 #include <linux/list.h>
 #include <linux/iommu.h>
 #include <linux/types.h>
+#include <linux/of_graph.h>
 #include <asm/sizes.h>
 
 #ifndef CONFIG_OF
@@ -64,6 +65,19 @@
 	int dummy;
 };
 
+enum msm_mdp_plane_property {
+	PLANE_PROP_ZPOS,
+	PLANE_PROP_ALPHA,
+	PLANE_PROP_PREMULTIPLIED,
+	PLANE_PROP_MAX_NUM
+};
+
+struct msm_vblank_ctrl {
+	struct work_struct work;
+	struct list_head event_list;
+	spinlock_t lock;
+};
+
 struct msm_drm_private {
 
 	struct msm_kms *kms;
@@ -128,6 +142,9 @@
 	unsigned int num_connectors;
 	struct drm_connector *connectors[8];
 
+	/* Properties */
+	struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
 		unsigned long size;
@@ -137,6 +154,8 @@
 		 */
 		struct drm_mm mm;
 	} vram;
+
+	struct msm_vblank_ctrl vblank_ctrl;
 };
 
 struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 95f6532..f97a196 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -43,11 +43,11 @@
 	/* Note: to properly handle manual update displays, we wrap the
 	 * basic fbdev ops which write to the framebuffer
 	 */
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_read = drm_fb_helper_sys_read,
+	.fb_write = drm_fb_helper_sys_write,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_mmap = msm_fbdev_mmap,
 
 	.fb_check_var = drm_fb_helper_check_var,
@@ -144,10 +144,10 @@
 		goto fail_unlock;
 	}
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
 
@@ -155,7 +155,6 @@
 
 	fbdev->fb = fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_DEFAULT;
@@ -163,12 +162,6 @@
 
 	strcpy(fbi->fix.id, "msm");
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto fail_unlock;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -191,7 +184,6 @@
 fail:
 
 	if (ret) {
-		framebuffer_release(fbi);
 		if (fb) {
 			drm_framebuffer_unregister_private(fb);
 			drm_framebuffer_remove(fb);
@@ -266,17 +258,11 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_fb_helper *helper = priv->fbdev;
 	struct msm_fbdev *fbdev;
-	struct fb_info *fbi;
 
 	DBG();
 
-	fbi = helper->fbdev;
-
-	/* only cleanup framebuffer if it is present */
-	if (fbi) {
-		unregister_framebuffer(fbi);
-		framebuffer_release(fbi);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2b76566..a34b437 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -18,7 +18,6 @@
 ifdef CONFIG_X86
 nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
 endif
-nouveau-y += nouveau_agp.o
 nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
 nouveau-y += nouveau_drm.o
 nouveau-y += nouveau_hwmon.o
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index c636142..82bd465 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,7 +198,7 @@
 		int *burst, int *lwm)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nv_fifo_info fifo_data;
 	struct nv_sim_state sim_data;
 	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index af7249c..78cb033 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@
 
 static int sample_load_twice(struct drm_device *dev, bool sense[2])
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_object *device = &drm->device.object;
 	int i;
 
 	for (i = 0; i < 2; i++) {
@@ -80,17 +80,22 @@
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (nvif_msec(&drm->device, 10,
+			if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000001))
+
+		if (nvif_msec(&drm->device, 10,
+			if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
-		if (!nvkm_timer_wait_eq(ptimer, 10000000,
-					NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+
+		if (nvif_msec(&drm->device, 10,
+			if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
+				break;
+		) < 0)
 			return -EBUSY;
 
 		udelay(100);
@@ -128,7 +133,7 @@
 						 struct drm_connector *connector)
 {
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
 	uint8_t saved_palette0[3], saved_palette_mask;
@@ -231,8 +236,8 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &nouveau_drm(dev)->device;
-	struct nvkm_gpio *gpio = nvxx_gpio(device);
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
+	struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
 	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -265,10 +270,10 @@
 	}
 
 	if (gpio) {
-		saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
-		saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
+		saved_gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+		saved_gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
 	}
 
 	msleep(4);
@@ -320,8 +325,8 @@
 	nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
 
 	if (gpio) {
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
-		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
 	}
 
 	return sample;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 7cfb0cb..429ab5e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@
 			      struct drm_display_mode *adjusted_mode)
 {
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
 	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -485,7 +485,7 @@
 {
 #ifdef __powerpc__
 	struct drm_device *dev = encoder->dev;
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 
 	/* BIOS scripts usually take care of the backlight, thanks
 	 * Apple for your consistency.
@@ -493,11 +493,11 @@
 	if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
 	    dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
 		if (mode == DRM_MODE_DPMS_ON) {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
-			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
+			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31);
+			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
 		} else {
-			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
-			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
+			nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+			nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
 		}
 	}
 #endif
@@ -624,8 +624,8 @@
 	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *port = i2c->find(i2c, 2);
-	struct nvkm_i2c_board_info info[] = {
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	struct nvkm_i2c_bus_probe info[] = {
 		{
 		    {
 		        .type = "sil164",
@@ -639,16 +639,15 @@
 	};
 	int type;
 
-	if (!nv_gf4_disp_arch(dev) || !port ||
-	    get_tmds_slave(encoder))
+	if (!nv_gf4_disp_arch(dev) || !bus || get_tmds_slave(encoder))
 		return;
 
-	type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
+	type = nvkm_i2c_bus_probe(bus, "TMDS transmitter", info, NULL, NULL);
 	if (type < 0)
 		return;
 
 	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-			     &port->adapter, &info[type].dev);
+			     &bus->i2c, &info[type].dev);
 }
 
 static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4131be55..9e65008 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -47,7 +47,7 @@
 	if (!disp)
 		return -ENOMEM;
 
-	nvif_object_map(nvif_object(&drm->device));
+	nvif_object_map(&drm->device.object);
 
 	nouveau_display(dev)->priv = disp;
 	nouveau_display(dev)->dtor = nv04_display_destroy;
@@ -101,7 +101,9 @@
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-		nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+		struct nvkm_i2c_bus *bus =
+			nvkm_i2c_bus_find(i2c, nv_encoder->dcb->i2c_index);
+		nv_encoder->i2c = bus ? &bus->i2c : NULL;
 	}
 
 	/* Save previous state */
@@ -151,7 +153,7 @@
 	nouveau_display(dev)->priv = NULL;
 	kfree(disp);
 
-	nvif_object_unmap(nvif_object(&drm->device));
+	nvif_object_unmap(&drm->device.object);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index c910c5d..6c9a1e8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -172,7 +172,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_bios *bios = nvxx_bios(&drm->device);
 	struct nvbios_init init = {
-		.subdev = nv_subdev(bios),
+		.subdev = &bios->subdev,
 		.bios = bios,
 		.offset = table,
 		.outp = outp,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 42e07af..956a833 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -165,8 +165,8 @@
 		       struct nvkm_pll_vals *pllvals)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
-	struct nvkm_bios *bios = nvxx_bios(device);
+	struct nvif_object *device = &drm->device.object;
+	struct nvkm_bios *bios = nvxx_bios(&drm->device);
 	uint32_t reg1, pll1, pll2 = 0;
 	struct nvbios_pll pll_lim;
 	int ret;
@@ -660,8 +660,7 @@
 		  struct nv04_mode_state *state)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
+	struct nvif_object *device = &drm->device.object;
 	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
 	uint32_t reg900;
 	int i;
@@ -678,10 +677,10 @@
 		nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
 		nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
 		nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
-		nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
-		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
+		nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
 		nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
 
 		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
@@ -741,8 +740,14 @@
 		if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nvkm_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nvif_msec(&drm->device, 650,
+				if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+					break;
+			);
+			nvif_msec(&drm->device, 650,
+				if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
+					break;
+			);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -765,7 +770,7 @@
 nv_save_state_palette(struct drm_device *dev, int head,
 		      struct nv04_mode_state *state)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
 	nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
@@ -784,7 +789,7 @@
 nouveau_hw_load_state_palette(struct drm_device *dev, int head,
 			      struct nv04_mode_state *state)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	int head_offset = head * NV_PRMDIO_SIZE, i;
 
 	nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index 6c79617..3bded60 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,7 +60,7 @@
 static inline uint32_t NVReadCRTC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint32_t val;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
@@ -71,7 +71,7 @@
 static inline void NVWriteCRTC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (head)
 		reg += NV_PCRTC0_SIZE;
 	nvif_wr32(device, reg, val);
@@ -80,7 +80,7 @@
 static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint32_t val;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
@@ -91,7 +91,7 @@
 static inline void NVWriteRAMDAC(struct drm_device *dev,
 					int head, uint32_t reg, uint32_t val)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (head)
 		reg += NV_PRAMDAC0_SIZE;
 	nvif_wr32(device, reg, val);
@@ -120,7 +120,7 @@
 static inline void NVWriteVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
 	nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
 }
@@ -128,7 +128,7 @@
 static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
 					int head, uint8_t index)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint8_t val;
 	nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
 	val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
@@ -165,7 +165,7 @@
 static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
 					int head, uint32_t reg)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	uint8_t val;
 
@@ -181,7 +181,7 @@
 static inline void NVWritePRMVIO(struct drm_device *dev,
 					int head, uint32_t reg, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
@@ -194,14 +194,14 @@
 
 static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
 	nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
 }
 
 static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
 	return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
 }
@@ -209,7 +209,7 @@
 static inline void NVWriteVgaAttr(struct drm_device *dev,
 					int head, uint8_t index, uint8_t value)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
 	else
@@ -223,7 +223,7 @@
 static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
 					int head, uint8_t index)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	uint8_t val;
 	if (NVGetEnablePalette(dev, head))
 		index &= ~0x20;
@@ -259,7 +259,7 @@
 static inline bool
 nv_heads_tied(struct drm_device *dev)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nvif_object *device = &nouveau_drm(dev)->device.object;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 
 	if (drm->device.info.chipset == 0x11)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 9f24985..aeebdd4 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,8 @@
 		  uint32_t src_x, uint32_t src_y,
 		  uint32_t src_w, uint32_t src_h)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nouveau_drm *drm = nouveau_drm(plane->dev);
+	struct nvif_object *dev = &drm->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -118,7 +119,7 @@
 	if (format > 0xffff)
 		return -ERANGE;
 
-	if (dev->info.chipset >= 0x30) {
+	if (drm->device.info.chipset >= 0x30) {
 		if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
 			return -ERANGE;
 	} else {
@@ -173,7 +174,7 @@
 static int
 nv10_disable_plane(struct drm_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 
@@ -197,7 +198,7 @@
 static void
 nv10_set_params(struct nouveau_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
 	u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
 	u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
 		(cos_mul(plane->hue, plane->saturation) & 0xffff);
@@ -261,7 +262,7 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(device);
 	struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
-	int num_formats = ARRAY_SIZE(formats);
+	unsigned int num_formats = ARRAY_SIZE(formats);
 	int ret;
 
 	if (!plane)
@@ -346,7 +347,7 @@
 		  uint32_t src_x, uint32_t src_y,
 		  uint32_t src_w, uint32_t src_h)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
@@ -426,7 +427,7 @@
 static int
 nv04_disable_plane(struct drm_plane *plane)
 {
-	struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
+	struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
 	struct nouveau_plane *nv_plane =
 		container_of(plane, struct nouveau_plane, base);
 
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 70e95cf..5345eb5 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,7 +35,7 @@
 
 #include <drm/i2c/ch7006.h>
 
-static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = {
+static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
 	{
 		{
 			I2C_BOARD_INFO("ch7006", 0x75),
@@ -55,9 +55,13 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-
-	return i2c->identify(i2c, i2c_index, "TV encoder",
-			     nv04_tv_encoder_info, NULL, NULL);
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
+	if (bus) {
+		return nvkm_i2c_bus_probe(bus, "TV encoder",
+					  nv04_tv_encoder_info,
+					  NULL, NULL);
+	}
+	return -ENODEV;
 }
 
 
@@ -205,7 +209,7 @@
 	struct drm_device *dev = connector->dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index);
+	struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
 	int type, ret;
 
 	/* Ensure that we can talk to this encoder */
@@ -231,7 +235,7 @@
 
 	/* Run the slave-specific initialization */
 	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
-				   &port->adapter,
+				   &bus->i2c,
 				   &nv04_tv_encoder_info[type].dev);
 	if (ret < 0)
 		goto fail_cleanup;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index d9720dd..b734195 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -62,8 +62,8 @@
 	head = (dacclk & 0x100) >> 8;
 
 	/* Save the previous state. */
-	gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
-	gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+	gpio1 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+	gpio0 = nvkm_gpio_get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
 	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
 	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
 	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -74,8 +74,8 @@
 	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
 
 	/* Prepare the DAC for load detection.  */
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
 
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -120,8 +120,8 @@
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
 
 	return sample;
 }
@@ -130,18 +130,10 @@
 get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvkm_device *device = nvxx_device(&drm->device);
 
-	/* Zotac FX5200 */
-	if (nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x1035) ||
-	    nv_device_match(nvxx_object(device), 0x0322, 0x19da, 0x2035)) {
-		*pin_mask = 0xc;
-		return false;
-	}
-
-	/* MSI nForce2 IGP */
-	if (nv_device_match(nvxx_object(device), 0x01f0, 0x1462, 0x5710)) {
-		*pin_mask = 0xc;
+	if (device->quirk && device->quirk->tv_pin_mask) {
+		*pin_mask = device->quirk->tv_pin_mask;
 		return false;
 	}
 
@@ -395,8 +387,8 @@
 
 	nv_load_ptv(dev, regs, 200);
 
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
-	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
 
 	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 225894c..459910b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -131,13 +131,13 @@
 				uint32_t val)
 {
 	struct nvif_device *device = &nouveau_drm(dev)->device;
-	nvif_wr32(device, reg, val);
+	nvif_wr32(&device->object, reg, val);
 }
 
 static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
 {
 	struct nvif_device *device = &nouveau_drm(dev)->device;
-	return nvif_rd32(device, reg);
+	return nvif_rd32(&device->object, reg);
 }
 
 static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 64f8b2f..95a64d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -45,6 +45,11 @@
 #define GM107_DISP                                                   0x00009470
 #define GM204_DISP                                                   0x00009570
 
+#define NV31_MPEG                                                    0x00003174
+#define G82_MPEG                                                     0x00008274
+
+#define NV74_VP2                                                     0x00007476
+
 #define NV50_DISP_CURSOR                                             0x0000507a
 #define G82_DISP_CURSOR                                              0x0000827a
 #define GT214_DISP_CURSOR                                            0x0000857a
@@ -94,15 +99,40 @@
 #define MAXWELL_A                                                    0x0000b097
 #define MAXWELL_B                                                    0x0000b197
 
+#define NV74_BSP                                                     0x000074b0
+
+#define GT212_MSVLD                                                  0x000085b1
+#define IGT21A_MSVLD                                                 0x000086b1
+#define G98_MSVLD                                                    0x000088b1
+#define GF100_MSVLD                                                  0x000090b1
+#define GK104_MSVLD                                                  0x000095b1
+
+#define GT212_MSPDEC                                                 0x000085b2
+#define G98_MSPDEC                                                   0x000088b2
+#define GF100_MSPDEC                                                 0x000090b2
+#define GK104_MSPDEC                                                 0x000095b2
+
+#define GT212_MSPPP                                                  0x000085b3
+#define G98_MSPPP                                                    0x000088b3
+#define GF100_MSPPP                                                  0x000090b3
+
+#define G98_SEC                                                      0x000088b4
+
+#define GT212_DMA                                                    0x000085b5
+#define FERMI_DMA                                                    0x000090b5
+#define KEPLER_DMA_COPY_A                                            0x0000a0b5
+#define MAXWELL_DMA_COPY_A                                           0x0000b0b5
+
+#define FERMI_DECOMPRESS                                             0x000090b8
+
 #define FERMI_COMPUTE_A                                              0x000090c0
 #define FERMI_COMPUTE_B                                              0x000091c0
-
 #define KEPLER_COMPUTE_A                                             0x0000a0c0
 #define KEPLER_COMPUTE_B                                             0x0000a1c0
-
 #define MAXWELL_COMPUTE_A                                            0x0000b0c0
 #define MAXWELL_COMPUTE_B                                            0x0000b1c0
 
+#define NV74_CIPHER                                                  0x000074c1
 
 /*******************************************************************************
  * client
@@ -126,32 +156,10 @@
 	__u8  version;
 	__u8  pad01[7];
 	__u64 device;	/* device identifier, ~0 for client default */
-#define NV_DEVICE_V0_DISABLE_IDENTIFY                     0x0000000000000001ULL
-#define NV_DEVICE_V0_DISABLE_MMIO                         0x0000000000000002ULL
-#define NV_DEVICE_V0_DISABLE_VBIOS                        0x0000000000000004ULL
-#define NV_DEVICE_V0_DISABLE_CORE                         0x0000000000000008ULL
-#define NV_DEVICE_V0_DISABLE_DISP                         0x0000000000010000ULL
-#define NV_DEVICE_V0_DISABLE_FIFO                         0x0000000000020000ULL
-#define NV_DEVICE_V0_DISABLE_GR                           0x0000000100000000ULL
-#define NV_DEVICE_V0_DISABLE_MPEG                         0x0000000200000000ULL
-#define NV_DEVICE_V0_DISABLE_ME                           0x0000000400000000ULL
-#define NV_DEVICE_V0_DISABLE_VP                           0x0000000800000000ULL
-#define NV_DEVICE_V0_DISABLE_CIPHER                       0x0000001000000000ULL
-#define NV_DEVICE_V0_DISABLE_BSP                          0x0000002000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPPP                        0x0000004000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE0                          0x0000008000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE1                          0x0000010000000000ULL
-#define NV_DEVICE_V0_DISABLE_VIC                          0x0000020000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSENC                        0x0000040000000000ULL
-#define NV_DEVICE_V0_DISABLE_CE2                          0x0000080000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSVLD                        0x0000100000000000ULL
-#define NV_DEVICE_V0_DISABLE_SEC                          0x0000200000000000ULL
-#define NV_DEVICE_V0_DISABLE_MSPDEC                       0x0000400000000000ULL
-	__u64 disable;	/* disable particular subsystems */
-	__u64 debug0;	/* as above, but *internal* ids, and *NOT* ABI */
 };
 
 #define NV_DEVICE_V0_INFO                                                  0x00
+#define NV_DEVICE_V0_TIME                                                  0x01
 
 struct nv_device_info_v0 {
 	__u8  version;
@@ -176,6 +184,14 @@
 	__u8  pad06[2];
 	__u64 ram_size;
 	__u64 ram_user;
+	char  chip[16];
+	char  name[64];
+};
+
+struct nv_device_time_v0 {
+	__u8  version;
+	__u8  pad01[7];
+	__u64 time;
 };
 
 
@@ -235,13 +251,13 @@
 	__u8  pad03[5];
 };
 
-struct gf110_dma_v0 {
+struct gf119_dma_v0 {
 	__u8  version;
-#define GF110_DMA_V0_PAGE_LP                                               0x00
-#define GF110_DMA_V0_PAGE_SP                                               0x01
+#define GF119_DMA_V0_PAGE_LP                                               0x00
+#define GF119_DMA_V0_PAGE_SP                                               0x01
 	__u8  page;
-#define GF110_DMA_V0_KIND_PITCH                                            0x00
-#define GF110_DMA_V0_KIND_VM                                               0xff
+#define GF119_DMA_V0_KIND_PITCH                                            0x00
+#define GF119_DMA_V0_KIND_VM                                               0xff
 	__u8  kind;
 	__u8  pad03[5];
 };
@@ -251,33 +267,74 @@
  * perfmon
  ******************************************************************************/
 
-struct nvif_perfctr_v0 {
-	__u8  version;
-	__u8  pad01[1];
-	__u16 logic_op;
-	__u8  pad04[4];
-	char  name[4][64];
-};
+#define NVIF_PERFMON_V0_QUERY_DOMAIN                                       0x00
+#define NVIF_PERFMON_V0_QUERY_SIGNAL                                       0x01
+#define NVIF_PERFMON_V0_QUERY_SOURCE                                       0x02
 
-#define NVIF_PERFCTR_V0_QUERY                                              0x00
-#define NVIF_PERFCTR_V0_SAMPLE                                             0x01
-#define NVIF_PERFCTR_V0_READ                                               0x02
-
-struct nvif_perfctr_query_v0 {
+struct nvif_perfmon_query_domain_v0 {
 	__u8  version;
-	__u8  pad01[3];
-	__u32 iter;
+	__u8  id;
+	__u8  counter_nr;
+	__u8  iter;
+	__u16 signal_nr;
+	__u8  pad05[2];
 	char  name[64];
 };
 
-struct nvif_perfctr_sample {
+struct nvif_perfmon_query_signal_v0 {
+	__u8  version;
+	__u8  domain;
+	__u16 iter;
+	__u8  signal;
+	__u8  source_nr;
+	__u8  pad05[2];
+	char  name[64];
 };
 
-struct nvif_perfctr_read_v0 {
+struct nvif_perfmon_query_source_v0 {
+	__u8  version;
+	__u8  domain;
+	__u8  signal;
+	__u8  iter;
+	__u8  pad04[4];
+	__u32 source;
+	__u32 mask;
+	char  name[64];
+};
+
+
+/*******************************************************************************
+ * perfdom
+ ******************************************************************************/
+
+struct nvif_perfdom_v0 {
+	__u8  version;
+	__u8  domain;
+	__u8  mode;
+	__u8  pad03[1];
+	struct {
+		__u8  signal[4];
+		__u64 source[4][8];
+		__u16 logic_op;
+	} ctr[4];
+};
+
+#define NVIF_PERFDOM_V0_INIT                                               0x00
+#define NVIF_PERFDOM_V0_SAMPLE                                             0x01
+#define NVIF_PERFDOM_V0_READ                                               0x02
+
+struct nvif_perfdom_init {
+};
+
+struct nvif_perfdom_sample {
+};
+
+struct nvif_perfdom_read_v0 {
 	__u8  version;
 	__u8  pad01[7];
-	__u32 ctr;
+	__u32 ctr[4];
 	__u32 clk;
+	__u8  pad04[4];
 };
 
 
@@ -337,7 +394,16 @@
 	__u8  version;
 	__u8  chid;
 	__u8  pad02[2];
-	__u32 pushbuf;
+	__u32 offset;
+	__u64 pushbuf;
+};
+
+struct nv50_channel_dma_v0 {
+	__u8  version;
+	__u8  chid;
+	__u8  pad02[6];
+	__u64 vm;
+	__u64 pushbuf;
 	__u64 offset;
 };
 
@@ -350,10 +416,20 @@
 struct nv50_channel_gpfifo_v0 {
 	__u8  version;
 	__u8  chid;
-	__u8  pad01[6];
-	__u32 pushbuf;
+	__u8  pad02[2];
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 pushbuf;
+	__u64 vm;
+};
+
+struct fermi_channel_gpfifo_v0 {
+	__u8  version;
+	__u8  chid;
+	__u8  pad02[2];
+	__u32 ilength;
+	__u64 ioffset;
+	__u64 vm;
 };
 
 struct kepler_channel_gpfifo_a_v0 {
@@ -367,10 +443,9 @@
 #define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC                              0x40
 	__u8  engine;
 	__u16 chid;
-	__u8  pad04[4];
-	__u32 pushbuf;
 	__u32 ilength;
 	__u64 ioffset;
+	__u64 vm;
 };
 
 /*******************************************************************************
@@ -491,8 +566,8 @@
 /* core */
 struct nv50_disp_core_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[3];
-	__u32 pushbuf;
+	__u8  pad01[7];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT                          0x00
@@ -509,9 +584,9 @@
 /* base */
 struct nv50_disp_base_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[2];
 	__u8  head;
-	__u32 pushbuf;
+	__u8  pad02[6];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT                          0x00
@@ -519,9 +594,9 @@
 /* overlay */
 struct nv50_disp_overlay_channel_dma_v0 {
 	__u8  version;
-	__u8  pad01[2];
 	__u8  head;
-	__u32 pushbuf;
+	__u8  pad02[6];
+	__u64 pushbuf;
 };
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA_V0_NTFY_UEVENT                       0x00
@@ -536,6 +611,20 @@
 #define NV50_DISP_OVERLAY_V0_NTFY_UEVENT                                   0x00
 
 /*******************************************************************************
+ * software
+ ******************************************************************************/
+
+#define NVSW_NTFY_UEVENT                                                   0x00
+
+#define NV04_NVSW_GET_REF                                                  0x00
+
+struct nv04_nvsw_get_ref_v0 {
+	__u8  version;
+	__u8  pad01[3];
+	__u32 ref;
+};
+
+/*******************************************************************************
  * fermi
  ******************************************************************************/
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index eca648e..4a7f6f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -4,36 +4,25 @@
 #include <nvif/object.h>
 
 struct nvif_client {
-	struct nvif_object base;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
+	struct nvif_object object;
 	const struct nvif_driver *driver;
+	u64 version;
+	u8 route;
 	bool super;
 };
 
-static inline struct nvif_client *
-nvif_client(struct nvif_object *object)
-{
-	while (object && object->parent != object)
-		object = object->parent;
-	return (void *)object;
-}
-
-int  nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
-		      const char *, u64, const char *, const char *,
+int  nvif_client_init(const char *drv, const char *name, u64 device,
+		      const char *cfg, const char *dbg,
 		      struct nvif_client *);
 void nvif_client_fini(struct nvif_client *);
-int  nvif_client_new(const char *, const char *, u64, const char *,
-		     const char *, struct nvif_client **);
-void nvif_client_ref(struct nvif_client *, struct nvif_client **);
 int  nvif_client_ioctl(struct nvif_client *, void *, u32);
 int  nvif_client_suspend(struct nvif_client *);
 int  nvif_client_resume(struct nvif_client *);
 
 /*XXX*/
 #include <core/client.h>
-#define nvxx_client(a) ({ \
-	struct nvif_client *_client = nvif_client(nvif_object(a)); \
-	nvkm_client(_client->base.priv); \
+#define nvxx_client(a) ({                                                      \
+	struct nvif_client *_client = (a);                                     \
+	(struct nvkm_client *)_client->object.priv;                            \
 })
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 88553a7..700a9b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -5,26 +5,35 @@
 #include <nvif/class.h>
 
 struct nvif_device {
-	struct nvif_object base;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
+	struct nvif_object object;
 	struct nv_device_info_v0 info;
 };
 
-static inline struct nvif_device *
-nvif_device(struct nvif_object *object)
-{
-	while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
-		object = object->parent;
-	return (void *)object;
-}
-
-int  nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
-		      u32 handle, u32 oclass, void *, u32,
+int  nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
 		      struct nvif_device *);
 void nvif_device_fini(struct nvif_device *);
-int  nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
-		     void *, u32, struct nvif_device **);
-void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+u64  nvif_device_time(struct nvif_device *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({                                              \
+	struct nvif_device *_device = (d);                                     \
+	u64 _nsecs = (n), _time0 = nvif_device_time(_device);                  \
+	s64 _taken = 0;                                                        \
+                                                                               \
+	do {                                                                   \
+		cond                                                           \
+	} while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
+                                                                               \
+	if (_taken >= _nsecs)                                                  \
+		_taken = -ETIMEDOUT;                                           \
+	_taken;                                                                \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
 
 /*XXX*/
 #include <subdev/bios.h>
@@ -36,26 +45,30 @@
 #include <subdev/i2c.h>
 #include <subdev/timer.h>
 #include <subdev/therm.h>
+#include <subdev/pci.h>
 
-#define nvxx_device(a) nv_device(nvxx_object((a)))
-#define nvxx_bios(a) nvkm_bios(nvxx_device(a))
-#define nvxx_fb(a) nvkm_fb(nvxx_device(a))
-#define nvxx_mmu(a) nvkm_mmu(nvxx_device(a))
-#define nvxx_bar(a) nvkm_bar(nvxx_device(a))
-#define nvxx_gpio(a) nvkm_gpio(nvxx_device(a))
-#define nvxx_clk(a) nvkm_clk(nvxx_device(a))
-#define nvxx_i2c(a) nvkm_i2c(nvxx_device(a))
-#define nvxx_timer(a) nvkm_timer(nvxx_device(a))
-#define nvxx_wait(a,b,c,d) nv_wait(nvxx_timer(a), (b), (c), (d))
-#define nvxx_wait_cb(a,b,c) nv_wait_cb(nvxx_timer(a), (b), (c))
-#define nvxx_therm(a) nvkm_therm(nvxx_device(a))
+#define nvxx_device(a) ({                                                      \
+	struct nvif_device *_device = (a);                                     \
+	struct {                                                               \
+		struct nvkm_object object;                                     \
+		struct nvkm_device *device;                                    \
+	} *_udevice = _device->object.priv;                                    \
+	_udevice->device;                                                      \
+})
+#define nvxx_bios(a) nvxx_device(a)->bios
+#define nvxx_fb(a) nvxx_device(a)->fb
+#define nvxx_mmu(a) nvxx_device(a)->mmu
+#define nvxx_bar(a) nvxx_device(a)->bar
+#define nvxx_gpio(a) nvxx_device(a)->gpio
+#define nvxx_clk(a) nvxx_device(a)->clk
+#define nvxx_i2c(a) nvxx_device(a)->i2c
+#define nvxx_therm(a) nvxx_device(a)->therm
 
 #include <core/device.h>
 #include <engine/fifo.h>
 #include <engine/gr.h>
 #include <engine/sw.h>
 
-#define nvxx_fifo(a) nvkm_fifo(nvxx_device(a))
-#define nvxx_fifo_chan(a) ((struct nvkm_fifo_chan *)nvxx_object(a))
-#define nvxx_gr(a) ((struct nvkm_gr *)nvkm_engine(nvxx_object(a), NVDEV_ENGINE_GR))
+#define nvxx_fifo(a) nvxx_device(a)->fifo
+#define nvxx_gr(a) nvxx_device(a)->gr
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 4cd8e32..b0ac021 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,11 +1,10 @@
 #ifndef __NVIF_IOCTL_H__
 #define __NVIF_IOCTL_H__
 
+#define NVIF_VERSION_LATEST                               0x0000000000000000ULL
+
 struct nvif_ioctl_v0 {
 	__u8  version;
-#define NVIF_IOCTL_V0_OWNER_NVIF                                           0x00
-#define NVIF_IOCTL_V0_OWNER_ANY                                            0xff
-	__u8  owner;
 #define NVIF_IOCTL_V0_NOP                                                  0x00
 #define NVIF_IOCTL_V0_SCLASS                                               0x01
 #define NVIF_IOCTL_V0_NEW                                                  0x02
@@ -20,17 +19,20 @@
 #define NVIF_IOCTL_V0_NTFY_GET                                             0x0b
 #define NVIF_IOCTL_V0_NTFY_PUT                                             0x0c
 	__u8  type;
-	__u8  path_nr;
+	__u8  pad02[4];
+#define NVIF_IOCTL_V0_OWNER_NVIF                                           0x00
+#define NVIF_IOCTL_V0_OWNER_ANY                                            0xff
+	__u8  owner;
 #define NVIF_IOCTL_V0_ROUTE_NVIF                                           0x00
 #define NVIF_IOCTL_V0_ROUTE_HIDDEN                                         0xff
-	__u8  pad04[3];
 	__u8  route;
 	__u64 token;
-	__u32 path[8];		/* in reverse */
+	__u64 object;
 	__u8  data[];		/* ioctl data (below) */
 };
 
-struct nvif_ioctl_nop {
+struct nvif_ioctl_nop_v0 {
+	__u64 version;
 };
 
 struct nvif_ioctl_sclass_v0 {
@@ -38,7 +40,11 @@
 	__u8  version;
 	__u8  count;
 	__u8  pad02[6];
-	__u32 oclass[];
+	struct nvif_ioctl_sclass_oclass_v0 {
+		__s32 oclass;
+		__s16 minver;
+		__s16 maxver;
+	} oclass[];
 };
 
 struct nvif_ioctl_new_v0 {
@@ -47,11 +53,17 @@
 	__u8  pad01[6];
 	__u8  route;
 	__u64 token;
+	__u64 object;
 	__u32 handle;
 /* these class numbers are made up by us, and not nvidia-assigned */
-#define NVIF_IOCTL_NEW_V0_PERFCTR                                    0x0000ffff
-#define NVIF_IOCTL_NEW_V0_CONTROL                                    0x0000fffe
-	__u32 oclass;
+#define NVIF_IOCTL_NEW_V0_CONTROL                                            -1
+#define NVIF_IOCTL_NEW_V0_PERFMON                                            -2
+#define NVIF_IOCTL_NEW_V0_PERFDOM                                            -3
+#define NVIF_IOCTL_NEW_V0_SW_NV04                                            -4
+#define NVIF_IOCTL_NEW_V0_SW_NV10                                            -5
+#define NVIF_IOCTL_NEW_V0_SW_NV50                                            -6
+#define NVIF_IOCTL_NEW_V0_SW_GF100                                           -7
+	__s32 oclass;
 	__u8  data[];		/* class data (class.h) */
 };
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 9ebfa3b..51e2eb5 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -23,17 +23,11 @@
 	struct work_struct work;
 };
 
-int  nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
-		      int (*func)(struct nvif_notify *), bool work, u8 type,
-		      void *data, u32 size, u32 reply, struct nvif_notify *);
+int  nvif_notify_init(struct nvif_object *, int (*func)(struct nvif_notify *),
+		      bool work, u8 type, void *data, u32 size, u32 reply,
+		      struct nvif_notify *);
 int  nvif_notify_fini(struct nvif_notify *);
 int  nvif_notify_get(struct nvif_notify *);
 int  nvif_notify_put(struct nvif_notify *);
 int  nvif_notify(const void *, u32, const void *, u32);
-
-int  nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
-		     bool work, u8 type, void *data, u32 size, u32 reply,
-		     struct nvif_notify **);
-void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 04c8747..8d81596 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -3,73 +3,73 @@
 
 #include <nvif/os.h>
 
+struct nvif_sclass {
+	s32 oclass;
+	int minver;
+	int maxver;
+};
+
 struct nvif_object {
-	struct nvif_object *parent;
-	struct nvif_object *object; /*XXX: hack for nvif_object() */
-	struct kref refcount;
+	struct nvif_client *client;
 	u32 handle;
-	u32 oclass;
-	void *data;
-	u32   size;
+	s32 oclass;
 	void *priv; /*XXX: hack */
-	void (*dtor)(struct nvif_object *);
 	struct {
 		void __iomem *ptr;
 		u32 size;
 	} map;
 };
 
-int  nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
-		      u32 handle, u32 oclass, void *, u32,
+int  nvif_object_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
 		      struct nvif_object *);
 void nvif_object_fini(struct nvif_object *);
-int  nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
-		     void *, u32, struct nvif_object **);
-void nvif_object_ref(struct nvif_object *, struct nvif_object **);
 int  nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
-int  nvif_object_sclass(struct nvif_object *, u32 *, int);
+int  nvif_object_sclass_get(struct nvif_object *, struct nvif_sclass **);
+void nvif_object_sclass_put(struct nvif_sclass **);
 u32  nvif_object_rd(struct nvif_object *, int, u64);
 void nvif_object_wr(struct nvif_object *, int, u64, u32);
 int  nvif_object_mthd(struct nvif_object *, u32, void *, u32);
 int  nvif_object_map(struct nvif_object *);
 void nvif_object_unmap(struct nvif_object *);
 
+#define nvif_handle(a) (unsigned long)(void *)(a)
 #define nvif_object(a) (a)->object
 
-#define ioread8_native ioread8
-#define iowrite8_native iowrite8
-#define nvif_rd(a,b,c) ({                                                      \
-	struct nvif_object *_object = nvif_object(a);                          \
+#define nvif_rd(a,f,b,c) ({                                                    \
+	struct nvif_object *_object = (a);                                     \
 	u32 _data;                                                             \
 	if (likely(_object->map.ptr))                                          \
-		_data = ioread##b##_native((u8 __iomem *)_object->map.ptr + (c));      \
+		_data = f((u8 __iomem *)_object->map.ptr + (c));               \
 	else                                                                   \
-		_data = nvif_object_rd(_object, (b) / 8, (c));                 \
+		_data = nvif_object_rd(_object, (b), (c));                     \
 	_data;                                                                 \
 })
-#define nvif_wr(a,b,c,d) ({                                                    \
-	struct nvif_object *_object = nvif_object(a);                          \
+#define nvif_wr(a,f,b,c,d) ({                                                  \
+	struct nvif_object *_object = (a);                                     \
 	if (likely(_object->map.ptr))                                          \
-		iowrite##b##_native((d), (u8 __iomem *)_object->map.ptr + (c));        \
+		f((d), (u8 __iomem *)_object->map.ptr + (c));                  \
 	else                                                                   \
-		nvif_object_wr(_object, (b) / 8, (c), (d));                    \
+		nvif_object_wr(_object, (b), (c), (d));                        \
 })
-#define nvif_rd08(a,b) ({ u8  _v = nvif_rd((a), 8, (b)); _v; })
-#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
-#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
-#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
-#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
-#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
+#define nvif_rd08(a,b) ({ ((u8)nvif_rd((a), ioread8, 1, (b))); })
+#define nvif_rd16(a,b) ({ ((u16)nvif_rd((a), ioread16_native, 2, (b))); })
+#define nvif_rd32(a,b) ({ ((u32)nvif_rd((a), ioread32_native, 4, (b))); })
+#define nvif_wr08(a,b,c) nvif_wr((a), iowrite8, 1, (b), (u8)(c))
+#define nvif_wr16(a,b,c) nvif_wr((a), iowrite16_native, 2, (b), (u16)(c))
+#define nvif_wr32(a,b,c) nvif_wr((a), iowrite32_native, 4, (b), (u32)(c))
 #define nvif_mask(a,b,c,d) ({                                                  \
-	u32 _v = nvif_rd32(nvif_object(a), (b));                               \
-	nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d));                     \
-	_v;                                                                    \
+	struct nvif_object *__object = (a);                                    \
+	u32 _addr = (b), _data = nvif_rd32(__object, _addr);                   \
+	nvif_wr32(__object, _addr, (_data & ~(c)) | (d));                      \
+	_data;                                                                 \
 })
 
-#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
+#define nvif_mthd(a,b,c,d) nvif_object_mthd((a), (b), (c), (d))
 
 /*XXX*/
 #include <core/object.h>
-#define nvxx_object(a) ((struct nvkm_object *)nvif_object(a)->priv)
-
+#define nvxx_object(a) ({                                                      \
+	struct nvif_object *_object = (a);                                     \
+	(struct nvkm_object *)_object->priv;                                   \
+})
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index bdd05ee..3accc99 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -24,9 +24,15 @@
 #include <linux/power_supply.h>
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
+#include <linux/agp_backend.h>
+#include <linux/reset.h>
+#include <linux/iommu.h>
 
 #include <asm/unaligned.h>
 
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
 #ifndef ioread32_native
 #ifdef __BIG_ENDIAN
 #define ioread16_native ioread16be
@@ -40,5 +46,4 @@
 #define iowrite32_native iowrite32
 #endif /* def __BIG_ENDIAN else */
 #endif /* !ioread32_native */
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index a35b382..eaf5905 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,55 +1,52 @@
 #ifndef __NVKM_CLIENT_H__
 #define __NVKM_CLIENT_H__
-#include <core/namedb.h>
+#include <core/object.h>
 
 struct nvkm_client {
-	struct nvkm_namedb namedb;
-	struct nvkm_handle *root;
-	struct nvkm_object *device;
+	struct nvkm_object object;
 	char name[32];
+	u64 device;
 	u32 debug;
-	struct nvkm_vm *vm;
+
+	struct nvkm_client_notify *notify[16];
+	struct rb_root objroot;
+	struct rb_root dmaroot;
+
 	bool super;
 	void *data;
-
 	int (*ntfy)(const void *, u32, const void *, u32);
-	struct nvkm_client_notify *notify[16];
+
+	struct nvkm_vm *vm;
 };
 
-static inline struct nvkm_client *
-nv_client(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
-		nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
+void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
+struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
 
-static inline struct nvkm_client *
-nvkm_client(void *obj)
-{
-	struct nvkm_object *client = nv_object(obj);
-	while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
-		client = client->parent;
-	return (void *)client;
-}
-
-#define nvkm_client_create(n,c,oc,od,d)                                     \
-	nvkm_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
-
-int  nvkm_client_create_(const char *name, u64 device, const char *cfg,
-			    const char *dbg, int, void **);
-#define nvkm_client_destroy(p)                                              \
-	nvkm_namedb_destroy(&(p)->base)
-
+int  nvkm_client_new(const char *name, u64 device, const char *cfg,
+		     const char *dbg, struct nvkm_client **);
+void nvkm_client_del(struct nvkm_client **);
 int  nvkm_client_init(struct nvkm_client *);
 int  nvkm_client_fini(struct nvkm_client *, bool suspend);
-const char *nvkm_client_name(void *obj);
 
 int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
 			   void *data, u32 size);
 int nvkm_client_notify_del(struct nvkm_client *, int index);
 int nvkm_client_notify_get(struct nvkm_client *, int index);
 int nvkm_client_notify_put(struct nvkm_client *, int index);
+
+/* logging for client-facing objects */
+#define nvif_printk(o,l,p,f,a...) do {                                         \
+	struct nvkm_object *_object = (o);                                     \
+	struct nvkm_client *_client = _object->client;                         \
+	if (_client->debug >= NV_DBG_##l)                                      \
+		printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name,     \
+		       _object->handle, _object->oclass, ##a);                 \
+} while(0)
+#define nvif_fatal(o,f,a...) nvif_printk((o), FATAL, CRIT, f, ##a)
+#define nvif_error(o,f,a...) nvif_printk((o), ERROR,  ERR, f, ##a)
+#define nvif_debug(o,f,a...) nvif_printk((o), DEBUG, INFO, f, ##a)
+#define nvif_trace(o,f,a...) nvif_printk((o), TRACE, INFO, f, ##a)
+#define nvif_info(o,f,a...)  nvif_printk((o),  INFO, INFO, f, ##a)
+#define nvif_ioctl(o,f,a...) nvif_trace((o), "ioctl: "f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index d07cb86..c59fd4e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,18 +1,11 @@
 #ifndef __NVKM_DEBUG_H__
 #define __NVKM_DEBUG_H__
-extern int nv_info_debug_level;
-
 #define NV_DBG_FATAL    0
 #define NV_DBG_ERROR    1
 #define NV_DBG_WARN     2
-#define NV_DBG_INFO     nv_info_debug_level
+#define NV_DBG_INFO     3
 #define NV_DBG_DEBUG    4
 #define NV_DBG_TRACE    5
 #define NV_DBG_PARANOIA 6
 #define NV_DBG_SPAM     7
-
-#define NV_DBG_INFO_NORMAL 3
-#define NV_DBG_INFO_SILENT NV_DBG_DEBUG
-
-#define nv_debug_level(a) nv_info_debug_level = NV_DBG_INFO_##a
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 333db33..8f76000 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,24 +1,84 @@
 #ifndef __NVKM_DEVICE_H__
 #define __NVKM_DEVICE_H__
-#include <core/engine.h>
 #include <core/event.h>
+#include <core/object.h>
+
+enum nvkm_devidx {
+	NVKM_SUBDEV_PCI,
+	NVKM_SUBDEV_VBIOS,
+	NVKM_SUBDEV_DEVINIT,
+	NVKM_SUBDEV_IBUS,
+	NVKM_SUBDEV_GPIO,
+	NVKM_SUBDEV_I2C,
+	NVKM_SUBDEV_FUSE,
+	NVKM_SUBDEV_MXM,
+	NVKM_SUBDEV_MC,
+	NVKM_SUBDEV_BUS,
+	NVKM_SUBDEV_TIMER,
+	NVKM_SUBDEV_FB,
+	NVKM_SUBDEV_LTC,
+	NVKM_SUBDEV_INSTMEM,
+	NVKM_SUBDEV_MMU,
+	NVKM_SUBDEV_BAR,
+	NVKM_SUBDEV_PMU,
+	NVKM_SUBDEV_VOLT,
+	NVKM_SUBDEV_THERM,
+	NVKM_SUBDEV_CLK,
+
+	NVKM_ENGINE_DMAOBJ,
+	NVKM_ENGINE_IFB,
+	NVKM_ENGINE_FIFO,
+	NVKM_ENGINE_SW,
+	NVKM_ENGINE_GR,
+	NVKM_ENGINE_MPEG,
+	NVKM_ENGINE_ME,
+	NVKM_ENGINE_VP,
+	NVKM_ENGINE_CIPHER,
+	NVKM_ENGINE_BSP,
+	NVKM_ENGINE_MSPPP,
+	NVKM_ENGINE_CE0,
+	NVKM_ENGINE_CE1,
+	NVKM_ENGINE_CE2,
+	NVKM_ENGINE_VIC,
+	NVKM_ENGINE_MSENC,
+	NVKM_ENGINE_DISP,
+	NVKM_ENGINE_PM,
+	NVKM_ENGINE_MSVLD,
+	NVKM_ENGINE_SEC,
+	NVKM_ENGINE_MSPDEC,
+
+	NVKM_SUBDEV_NR
+};
+
+enum nvkm_device_type {
+	NVKM_DEVICE_PCI,
+	NVKM_DEVICE_AGP,
+	NVKM_DEVICE_PCIE,
+	NVKM_DEVICE_TEGRA,
+};
 
 struct nvkm_device {
-	struct nvkm_engine engine;
-	struct list_head head;
-
-	struct pci_dev *pdev;
-	struct platform_device *platformdev;
+	const struct nvkm_device_func *func;
+	const struct nvkm_device_quirk *quirk;
+	struct device *dev;
+	enum nvkm_device_type type;
 	u64 handle;
+	const char *name;
+	const char *cfgopt;
+	const char *dbgopt;
+
+	struct list_head head;
+	struct mutex mutex;
+	int refcount;
+
+	void __iomem *pri;
 
 	struct nvkm_event event;
 
-	const char *cfgopt;
-	const char *dbgopt;
-	const char *name;
-	const char *cname;
 	u64 disable_mask;
+	u32 debug;
 
+	const struct nvkm_device_chip *chip;
 	enum {
 		NV_04    = 0x04,
 		NV_10    = 0x10,
@@ -35,67 +95,157 @@
 	u8  chiprev;
 	u32 crystal;
 
-	struct nvkm_oclass *oclass[NVDEV_SUBDEV_NR];
-	struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
-
 	struct {
 		struct notifier_block nb;
 	} acpi;
+
+	struct nvkm_bar *bar;
+	struct nvkm_bios *bios;
+	struct nvkm_bus *bus;
+	struct nvkm_clk *clk;
+	struct nvkm_devinit *devinit;
+	struct nvkm_fb *fb;
+	struct nvkm_fuse *fuse;
+	struct nvkm_gpio *gpio;
+	struct nvkm_i2c *i2c;
+	struct nvkm_subdev *ibus;
+	struct nvkm_instmem *imem;
+	struct nvkm_ltc *ltc;
+	struct nvkm_mc *mc;
+	struct nvkm_mmu *mmu;
+	struct nvkm_subdev *mxm;
+	struct nvkm_pci *pci;
+	struct nvkm_pmu *pmu;
+	struct nvkm_therm *therm;
+	struct nvkm_timer *timer;
+	struct nvkm_volt *volt;
+
+	struct nvkm_engine *bsp;
+	struct nvkm_engine *ce[3];
+	struct nvkm_engine *cipher;
+	struct nvkm_disp *disp;
+	struct nvkm_dma *dma;
+	struct nvkm_fifo *fifo;
+	struct nvkm_gr *gr;
+	struct nvkm_engine *ifb;
+	struct nvkm_engine *me;
+	struct nvkm_engine *mpeg;
+	struct nvkm_engine *msenc;
+	struct nvkm_engine *mspdec;
+	struct nvkm_engine *msppp;
+	struct nvkm_engine *msvld;
+	struct nvkm_pm *pm;
+	struct nvkm_engine *sec;
+	struct nvkm_sw *sw;
+	struct nvkm_engine *vic;
+	struct nvkm_engine *vp;
+};
+
+struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int index);
+struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int index);
+
+struct nvkm_device_func {
+	struct nvkm_device_pci *(*pci)(struct nvkm_device *);
+	struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
+	void *(*dtor)(struct nvkm_device *);
+	int (*preinit)(struct nvkm_device *);
+	int (*init)(struct nvkm_device *);
+	void (*fini)(struct nvkm_device *, bool suspend);
+	resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
+	resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+	bool cpu_coherent;
+};
+
+struct nvkm_device_quirk {
+	u8 tv_pin_mask;
+	u8 tv_gpio;
+	bool War00C800_0;
+};
+
+struct nvkm_device_chip {
+	const char *name;
+
+	int (*bar    )(struct nvkm_device *, int idx, struct nvkm_bar **);
+	int (*bios   )(struct nvkm_device *, int idx, struct nvkm_bios **);
+	int (*bus    )(struct nvkm_device *, int idx, struct nvkm_bus **);
+	int (*clk    )(struct nvkm_device *, int idx, struct nvkm_clk **);
+	int (*devinit)(struct nvkm_device *, int idx, struct nvkm_devinit **);
+	int (*fb     )(struct nvkm_device *, int idx, struct nvkm_fb **);
+	int (*fuse   )(struct nvkm_device *, int idx, struct nvkm_fuse **);
+	int (*gpio   )(struct nvkm_device *, int idx, struct nvkm_gpio **);
+	int (*i2c    )(struct nvkm_device *, int idx, struct nvkm_i2c **);
+	int (*ibus   )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*imem   )(struct nvkm_device *, int idx, struct nvkm_instmem **);
+	int (*ltc    )(struct nvkm_device *, int idx, struct nvkm_ltc **);
+	int (*mc     )(struct nvkm_device *, int idx, struct nvkm_mc **);
+	int (*mmu    )(struct nvkm_device *, int idx, struct nvkm_mmu **);
+	int (*mxm    )(struct nvkm_device *, int idx, struct nvkm_subdev **);
+	int (*pci    )(struct nvkm_device *, int idx, struct nvkm_pci **);
+	int (*pmu    )(struct nvkm_device *, int idx, struct nvkm_pmu **);
+	int (*therm  )(struct nvkm_device *, int idx, struct nvkm_therm **);
+	int (*timer  )(struct nvkm_device *, int idx, struct nvkm_timer **);
+	int (*volt   )(struct nvkm_device *, int idx, struct nvkm_volt **);
+
+	int (*bsp    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*ce[3]  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*disp   )(struct nvkm_device *, int idx, struct nvkm_disp **);
+	int (*dma    )(struct nvkm_device *, int idx, struct nvkm_dma **);
+	int (*fifo   )(struct nvkm_device *, int idx, struct nvkm_fifo **);
+	int (*gr     )(struct nvkm_device *, int idx, struct nvkm_gr **);
+	int (*ifb    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*me     )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mpeg   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msenc  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msppp  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*msvld  )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*pm     )(struct nvkm_device *, int idx, struct nvkm_pm **);
+	int (*sec    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*sw     )(struct nvkm_device *, int idx, struct nvkm_sw **);
+	int (*vic    )(struct nvkm_device *, int idx, struct nvkm_engine **);
+	int (*vp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
 };
 
 struct nvkm_device *nvkm_device_find(u64 name);
 int nvkm_device_list(u64 *name, int size);
 
-struct nvkm_device *nv_device(void *obj);
+/* privileged register interface accessor macros */
+#define nvkm_rd08(d,a) ioread8((d)->pri + (a))
+#define nvkm_rd16(d,a) ioread16_native((d)->pri + (a))
+#define nvkm_rd32(d,a) ioread32_native((d)->pri + (a))
+#define nvkm_wr08(d,a,v) iowrite8((v), (d)->pri + (a))
+#define nvkm_wr16(d,a,v) iowrite16_native((v), (d)->pri + (a))
+#define nvkm_wr32(d,a,v) iowrite32_native((v), (d)->pri + (a))
+#define nvkm_mask(d,a,m,v) ({                                                  \
+	struct nvkm_device *_device = (d);                                     \
+	u32 _addr = (a), _temp = nvkm_rd32(_device, _addr);                    \
+	nvkm_wr32(_device, _addr, (_temp & ~(m)) | (v));                       \
+	_temp;                                                                 \
+})
 
-static inline bool
-nv_device_match(struct nvkm_object *object, u16 dev, u16 ven, u16 sub)
-{
-	struct nvkm_device *device = nv_device(object);
-	return device->pdev->device == dev &&
-	       device->pdev->subsystem_vendor == ven &&
-	       device->pdev->subsystem_device == sub;
-}
+void nvkm_device_del(struct nvkm_device **);
 
-static inline bool
-nv_device_is_pci(struct nvkm_device *device)
-{
-	return device->pdev != NULL;
-}
-
-static inline bool
-nv_device_is_cpu_coherent(struct nvkm_device *device)
-{
-	return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
-}
-
-static inline struct device *
-nv_device_base(struct nvkm_device *device)
-{
-	return nv_device_is_pci(device) ? &device->pdev->dev :
-					  &device->platformdev->dev;
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall);
-
-struct platform_device;
-
-enum nv_bus_type {
-	NVKM_BUS_PCI,
-	NVKM_BUS_PLATFORM,
+struct nvkm_device_oclass {
+	int (*ctor)(struct nvkm_device *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
 };
 
-#define nvkm_device_create(p,t,n,s,c,d,u)                                   \
-	nvkm_device_create_((void *)(p), (t), (n), (s), (c), (d),           \
-			       sizeof(**u), (void **)u)
-int  nvkm_device_create_(void *, enum nv_bus_type type, u64 name,
-			    const char *sname, const char *cfg, const char *dbg,
-			    int, void **);
+extern const struct nvkm_sclass nvkm_udevice_sclass;
+
+/* device logging */
+#define nvdev_printk_(d,l,p,f,a...) do {                                       \
+	struct nvkm_device *_device = (d);                                     \
+	if (_device->debug >= (l))                                             \
+		dev_##p(_device->dev, f, ##a);                                 \
+} while(0)
+#define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a)
+#define nvdev_fatal(d,f,a...) nvdev_printk((d), FATAL,   crit, f, ##a)
+#define nvdev_error(d,f,a...) nvdev_printk((d), ERROR,    err, f, ##a)
+#define nvdev_warn(d,f,a...)  nvdev_printk((d),  WARN, notice, f, ##a)
+#define nvdev_info(d,f,a...)  nvdev_printk((d),  INFO,   info, f, ##a)
+#define nvdev_debug(d,f,a...) nvdev_printk((d), DEBUG,   info, f, ##a)
+#define nvdev_trace(d,f,a...) nvdev_printk((d), TRACE,   info, f, ##a)
+#define nvdev_spam(d,f,a...)  nvdev_printk((d),  SPAM,    dbg, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
deleted file mode 100644
index 60c5888..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/devidx.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#ifndef __NVKM_DEVIDX_H__
-#define __NVKM_DEVIDX_H__
-enum nvkm_devidx {
-	NVDEV_ENGINE_DEVICE,
-	NVDEV_SUBDEV_VBIOS,
-
-	/* All subdevs from DEVINIT to DEVINIT_LAST will be created before
-	 * *any* of them are initialised.  This subdev category is used
-	 * for any subdevs that the VBIOS init table parsing may call out
-	 * to during POST.
-	 */
-	NVDEV_SUBDEV_DEVINIT,
-	NVDEV_SUBDEV_IBUS,
-	NVDEV_SUBDEV_GPIO,
-	NVDEV_SUBDEV_I2C,
-	NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
-
-	/* This grouping of subdevs are initialised right after they've
-	 * been created, and are allowed to assume any subdevs in the
-	 * list above them exist and have been initialised.
-	 */
-	NVDEV_SUBDEV_FUSE,
-	NVDEV_SUBDEV_MXM,
-	NVDEV_SUBDEV_MC,
-	NVDEV_SUBDEV_BUS,
-	NVDEV_SUBDEV_TIMER,
-	NVDEV_SUBDEV_FB,
-	NVDEV_SUBDEV_LTC,
-	NVDEV_SUBDEV_INSTMEM,
-	NVDEV_SUBDEV_MMU,
-	NVDEV_SUBDEV_BAR,
-	NVDEV_SUBDEV_PMU,
-	NVDEV_SUBDEV_VOLT,
-	NVDEV_SUBDEV_THERM,
-	NVDEV_SUBDEV_CLK,
-
-	NVDEV_ENGINE_FIRST,
-	NVDEV_ENGINE_DMAOBJ = NVDEV_ENGINE_FIRST,
-	NVDEV_ENGINE_IFB,
-	NVDEV_ENGINE_FIFO,
-	NVDEV_ENGINE_SW,
-	NVDEV_ENGINE_GR,
-	NVDEV_ENGINE_MPEG,
-	NVDEV_ENGINE_ME,
-	NVDEV_ENGINE_VP,
-	NVDEV_ENGINE_CIPHER,
-	NVDEV_ENGINE_BSP,
-	NVDEV_ENGINE_MSPPP,
-	NVDEV_ENGINE_CE0,
-	NVDEV_ENGINE_CE1,
-	NVDEV_ENGINE_CE2,
-	NVDEV_ENGINE_VIC,
-	NVDEV_ENGINE_MSENC,
-	NVDEV_ENGINE_DISP,
-	NVDEV_ENGINE_PM,
-	NVDEV_ENGINE_MSVLD,
-	NVDEV_ENGINE_SEC,
-	NVDEV_ENGINE_MSPDEC,
-
-	NVDEV_SUBDEV_NR,
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
deleted file mode 100644
index 1bf2e8e..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engctx.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef __NVKM_ENGCTX_H__
-#define __NVKM_ENGCTX_H__
-#include <core/gpuobj.h>
-
-#include <subdev/mmu.h>
-
-#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
-#define NV_ENGCTX(name,var)  NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
-
-struct nvkm_engctx {
-	struct nvkm_gpuobj gpuobj;
-	struct nvkm_vma vma;
-	struct list_head head;
-	unsigned long save;
-	u64 addr;
-};
-
-static inline struct nvkm_engctx *
-nv_engctx(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
-		nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_engctx_create(p,e,c,g,s,a,f,d)                                 \
-	nvkm_engctx_create_((p), (e), (c), (g), (s), (a), (f),              \
-			       sizeof(**d), (void **)d)
-
-int  nvkm_engctx_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, struct nvkm_object *,
-			    u32 size, u32 align, u32 flags,
-			    int length, void **data);
-void nvkm_engctx_destroy(struct nvkm_engctx *);
-int  nvkm_engctx_init(struct nvkm_engctx *);
-int  nvkm_engctx_fini(struct nvkm_engctx *, bool suspend);
-
-int  _nvkm_engctx_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_engctx_dtor(struct nvkm_object *);
-int  _nvkm_engctx_init(struct nvkm_object *);
-int  _nvkm_engctx_fini(struct nvkm_object *, bool suspend);
-#define _nvkm_engctx_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_engctx_wr32 _nvkm_gpuobj_wr32
-
-struct nvkm_object *nvkm_engctx_get(struct nvkm_engine *, u64 addr);
-void nvkm_engctx_put(struct nvkm_object *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index faf0fd2..48bf128 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,56 +1,49 @@
 #ifndef __NVKM_ENGINE_H__
 #define __NVKM_ENGINE_H__
+#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
 #include <core/subdev.h>
-
-#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
-#define NV_ENGINE(name,var)  NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+struct nvkm_fifo_chan;
+struct nvkm_fb_tile;
 
 struct nvkm_engine {
+	const struct nvkm_engine_func *func;
 	struct nvkm_subdev subdev;
-	struct nvkm_oclass *cclass;
-	struct nvkm_oclass *sclass;
-
-	struct list_head contexts;
 	spinlock_t lock;
 
-	void (*tile_prog)(struct nvkm_engine *, int region);
-	int  (*tlb_flush)(struct nvkm_engine *);
+	int usecount;
 };
 
-static inline struct nvkm_engine *
-nv_engine(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
-		nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_engine_func {
+	void *(*dtor)(struct nvkm_engine *);
+	int (*oneinit)(struct nvkm_engine *);
+	int (*init)(struct nvkm_engine *);
+	int (*fini)(struct nvkm_engine *, bool suspend);
+	void (*intr)(struct nvkm_engine *);
+	void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
 
-static inline int
-nv_engidx(struct nvkm_engine *engine)
-{
-	return nv_subidx(&engine->subdev);
-}
+	struct {
+		int (*sclass)(struct nvkm_oclass *, int index,
+			      const struct nvkm_device_oclass **);
+	} base;
 
-struct nvkm_engine *nvkm_engine(void *obj, int idx);
+	struct {
+		int (*cclass)(struct nvkm_fifo_chan *,
+			      const struct nvkm_oclass *,
+			      struct nvkm_object **);
+		int (*sclass)(struct nvkm_oclass *, int index);
+	} fifo;
 
-#define nvkm_engine_create(p,e,c,d,i,f,r)                                   \
-	nvkm_engine_create_((p), (e), (c), (d), (i), (f),                   \
-			       sizeof(**r),(void **)r)
+	const struct nvkm_object_func *cclass;
+	struct nvkm_sclass sclass[];
+};
 
-#define nvkm_engine_destroy(p)                                              \
-	nvkm_subdev_destroy(&(p)->subdev)
-#define nvkm_engine_init(p)                                                 \
-	nvkm_subdev_init(&(p)->subdev)
-#define nvkm_engine_fini(p,s)                                               \
-	nvkm_subdev_fini(&(p)->subdev, (s))
-
-int nvkm_engine_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, bool, const char *,
-			   const char *, int, void **);
-
-#define _nvkm_engine_dtor _nvkm_subdev_dtor
-#define _nvkm_engine_init _nvkm_subdev_init
-#define _nvkm_engine_fini _nvkm_subdev_fini
+int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
+		     int index, u32 pmc_enable, bool enable,
+		     struct nvkm_engine *);
+int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
+		     int index, u32 pmc_enable, bool enable,
+		     struct nvkm_engine **);
+struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
+void nvkm_engine_unref(struct nvkm_engine **);
+void nvkm_engine_tile(struct nvkm_engine *, int region);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index e76f76f..40429a8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -10,12 +10,11 @@
 };
 
 const struct nvkm_enum *nvkm_enum_find(const struct nvkm_enum *, u32 value);
-const struct nvkm_enum *nvkm_enum_print(const struct nvkm_enum *, u32 value);
 
 struct nvkm_bitfield {
 	u32 mask;
 	const char *name;
 };
 
-void nvkm_bitfield_print(const struct nvkm_bitfield *, u32 value);
+void nvkm_snprintbf(char *, int, const struct nvkm_bitfield *, u32 value);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index e0187e7..d4f56ea 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,64 +1,40 @@
 #ifndef __NVKM_GPUOBJ_H__
 #define __NVKM_GPUOBJ_H__
 #include <core/object.h>
+#include <core/memory.h>
 #include <core/mm.h>
 struct nvkm_vma;
 struct nvkm_vm;
 
 #define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
-#define NVOBJ_FLAG_ZERO_FREE  0x00000002
 #define NVOBJ_FLAG_HEAP       0x00000004
 
 struct nvkm_gpuobj {
 	struct nvkm_object object;
-	struct nvkm_object *parent;
+	const struct nvkm_gpuobj_func *func;
+	struct nvkm_gpuobj *parent;
+	struct nvkm_memory *memory;
 	struct nvkm_mm_node *node;
-	struct nvkm_mm heap;
 
-	u32 flags;
 	u64 addr;
 	u32 size;
+	struct nvkm_mm heap;
+
+	void __iomem *map;
 };
 
-static inline struct nvkm_gpuobj *
-nv_gpuobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
-		nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_gpuobj_func {
+	void *(*acquire)(struct nvkm_gpuobj *);
+	void (*release)(struct nvkm_gpuobj *);
+	u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
+	void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+};
 
-#define nvkm_gpuobj_create(p,e,c,v,g,s,a,f,d)                               \
-	nvkm_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f),         \
-			       sizeof(**d), (void **)d)
-#define nvkm_gpuobj_init(p) nvkm_object_init(&(p)->object)
-#define nvkm_gpuobj_fini(p,s) nvkm_object_fini(&(p)->object, (s))
-int  nvkm_gpuobj_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_object *, u32 size, u32 align,
-			    u32 flags, int length, void **);
-void nvkm_gpuobj_destroy(struct nvkm_gpuobj *);
-
-int  nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-		     u32 align, u32 flags, struct nvkm_gpuobj **);
-int  nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
-		     struct nvkm_gpuobj **);
-int  nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
-int  nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
-			struct nvkm_vma *);
+int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
+		    struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
+void nvkm_gpuobj_del(struct nvkm_gpuobj **);
+int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
+int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
+		    struct nvkm_vma *);
 void nvkm_gpuobj_unmap(struct nvkm_vma *);
-
-static inline void
-nvkm_gpuobj_ref(struct nvkm_gpuobj *obj, struct nvkm_gpuobj **ref)
-{
-	nvkm_object_ref(&obj->object, (struct nvkm_object **)ref);
-}
-
-void _nvkm_gpuobj_dtor(struct nvkm_object *);
-int  _nvkm_gpuobj_init(struct nvkm_object *);
-int  _nvkm_gpuobj_fini(struct nvkm_object *, bool);
-u32  _nvkm_gpuobj_rd32(struct nvkm_object *, u64);
-void _nvkm_gpuobj_wr32(struct nvkm_object *, u64, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h b/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
deleted file mode 100644
index 67f384d..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/handle.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __NVKM_HANDLE_H__
-#define __NVKM_HANDLE_H__
-#include <core/os.h>
-struct nvkm_object;
-
-struct nvkm_handle {
-	struct nvkm_namedb *namedb;
-	struct list_head node;
-
-	struct list_head head;
-	struct list_head tree;
-	u32 name;
-	u32 priv;
-
-	u8  route;
-	u64 token;
-
-	struct nvkm_handle *parent;
-	struct nvkm_object *object;
-};
-
-int  nvkm_handle_create(struct nvkm_object *, u32 parent, u32 handle,
-			struct nvkm_object *, struct nvkm_handle **);
-void nvkm_handle_destroy(struct nvkm_handle *);
-int  nvkm_handle_init(struct nvkm_handle *);
-int  nvkm_handle_fini(struct nvkm_handle *, bool suspend);
-
-struct nvkm_object *nvkm_handle_ref(struct nvkm_object *, u32 name);
-
-struct nvkm_handle *nvkm_handle_get_class(struct nvkm_object *, u16);
-struct nvkm_handle *nvkm_handle_get_vinst(struct nvkm_object *, u64);
-struct nvkm_handle *nvkm_handle_get_cinst(struct nvkm_object *, u32);
-void nvkm_handle_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
new file mode 100644
index 0000000..9363b83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -0,0 +1,53 @@
+#ifndef __NVKM_MEMORY_H__
+#define __NVKM_MEMORY_H__
+#include <core/os.h>
+struct nvkm_device;
+struct nvkm_vma;
+struct nvkm_vm;
+
+enum nvkm_memory_target {
+	NVKM_MEM_TARGET_INST,
+	NVKM_MEM_TARGET_VRAM,
+	NVKM_MEM_TARGET_HOST,
+};
+
+struct nvkm_memory {
+	const struct nvkm_memory_func *func;
+};
+
+struct nvkm_memory_func {
+	void *(*dtor)(struct nvkm_memory *);
+	enum nvkm_memory_target (*target)(struct nvkm_memory *);
+	u64 (*addr)(struct nvkm_memory *);
+	u64 (*size)(struct nvkm_memory *);
+	void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+	void __iomem *(*acquire)(struct nvkm_memory *);
+	void (*release)(struct nvkm_memory *);
+	u32 (*rd32)(struct nvkm_memory *, u64 offset);
+	void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
+	void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
+};
+
+void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
+int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
+		    u64 size, u32 align, bool zero, struct nvkm_memory **);
+void nvkm_memory_del(struct nvkm_memory **);
+#define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_addr(p) (p)->func->addr(p)
+#define nvkm_memory_size(p) (p)->func->size(p)
+#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
+#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+
+/* accessor macros - kmap()/done() must bracket use of the other accessor
+ * macros to guarantee correct behaviour across all chipsets
+ */
+#define nvkm_kmap(o)     (o)->func->acquire(o)
+#define nvkm_ro32(o,a)   (o)->func->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_mo32(o,a,m,d) ({                                                  \
+	u32 _addr = (a), _data = nvkm_ro32((o), _addr);                        \
+	nvkm_wo32((o), _addr, (_data & ~(m)) | (d));                           \
+	_data;                                                                 \
+})
+#define nvkm_done(o)     (o)->func->release(o)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 096eb1a..d92fd41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -27,7 +27,7 @@
 static inline bool
 nvkm_mm_initialised(struct nvkm_mm *mm)
 {
-	return mm->block_size != 0;
+	return mm->heap_nodes;
 }
 
 int  nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
@@ -37,4 +37,5 @@
 int  nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
 		  u32 size_min, u32 align, struct nvkm_mm_node **);
 void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
+void nvkm_mm_dump(struct nvkm_mm *, const char *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h b/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
deleted file mode 100644
index 4cfe16f..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/namedb.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_NAMEDB_H__
-#define __NVKM_NAMEDB_H__
-#include <core/parent.h>
-struct nvkm_handle;
-
-struct nvkm_namedb {
-	struct nvkm_parent parent;
-	rwlock_t lock;
-	struct list_head list;
-};
-
-static inline struct nvkm_namedb *
-nv_namedb(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
-		nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_namedb_create(p,e,c,v,s,m,d)                                   \
-	nvkm_namedb_create_((p), (e), (c), (v), (s), (m),                   \
-			       sizeof(**d), (void **)d)
-#define nvkm_namedb_init(p)                                                 \
-	nvkm_parent_init(&(p)->parent)
-#define nvkm_namedb_fini(p,s)                                               \
-	nvkm_parent_fini(&(p)->parent, (s))
-#define nvkm_namedb_destroy(p)                                              \
-	nvkm_parent_destroy(&(p)->parent)
-
-int  nvkm_namedb_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_oclass *, u64 engcls,
-			    int size, void **);
-
-int  _nvkm_namedb_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-#define _nvkm_namedb_dtor _nvkm_parent_dtor
-#define _nvkm_namedb_init _nvkm_parent_init
-#define _nvkm_namedb_fini _nvkm_parent_fini
-
-int  nvkm_namedb_insert(struct nvkm_namedb *, u32 name, struct nvkm_object *,
-			struct nvkm_handle *);
-void nvkm_namedb_remove(struct nvkm_handle *);
-
-struct nvkm_handle *nvkm_namedb_get(struct nvkm_namedb *, u32);
-struct nvkm_handle *nvkm_namedb_get_class(struct nvkm_namedb *, u16);
-struct nvkm_handle *nvkm_namedb_get_vinst(struct nvkm_namedb *, u64);
-struct nvkm_handle *nvkm_namedb_get_cinst(struct nvkm_namedb *, u32);
-void nvkm_namedb_put(struct nvkm_handle *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 6e3cd39..dcd048b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,203 +1,88 @@
 #ifndef __NVKM_OBJECT_H__
 #define __NVKM_OBJECT_H__
 #include <core/os.h>
-#include <core/printk.h>
-
-#define NV_PARENT_CLASS 0x80000000
-#define NV_NAMEDB_CLASS 0x40000000
-#define NV_CLIENT_CLASS 0x20000000
-#define NV_SUBDEV_CLASS 0x10000000
-#define NV_ENGINE_CLASS 0x08000000
-#define NV_MEMOBJ_CLASS 0x04000000
-#define NV_GPUOBJ_CLASS 0x02000000
-#define NV_ENGCTX_CLASS 0x01000000
-#define NV_OBJECT_CLASS 0x0000ffff
+#include <core/debug.h>
+struct nvkm_event;
+struct nvkm_gpuobj;
+struct nvkm_oclass;
 
 struct nvkm_object {
-	struct nvkm_oclass *oclass;
+	const struct nvkm_object_func *func;
+	struct nvkm_client *client;
+	struct nvkm_engine *engine;
+	s32 oclass;
+	u32 handle;
+
+	struct list_head head;
+	struct list_head tree;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct rb_node node;
+};
+
+struct nvkm_object_func {
+	void *(*dtor)(struct nvkm_object *);
+	int (*init)(struct nvkm_object *);
+	int (*fini)(struct nvkm_object *, bool suspend);
+	int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
+	int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+	int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+	int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
+	int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
+	int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
+	int (*wr08)(struct nvkm_object *, u64 addr, u8 data);
+	int (*wr16)(struct nvkm_object *, u64 addr, u16 data);
+	int (*wr32)(struct nvkm_object *, u64 addr, u32 data);
+	int (*bind)(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+		    struct nvkm_gpuobj **);
+	int (*sclass)(struct nvkm_object *, int index, struct nvkm_oclass *);
+};
+
+void nvkm_object_ctor(const struct nvkm_object_func *,
+		      const struct nvkm_oclass *, struct nvkm_object *);
+int nvkm_object_new_(const struct nvkm_object_func *,
+		     const struct nvkm_oclass *, void *data, u32 size,
+		     struct nvkm_object **);
+int nvkm_object_new(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+void nvkm_object_del(struct nvkm_object **);
+void *nvkm_object_dtor(struct nvkm_object *);
+int nvkm_object_init(struct nvkm_object *);
+int nvkm_object_fini(struct nvkm_object *, bool suspend);
+int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
+int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
+int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8  *data);
+int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
+int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
+int nvkm_object_wr08(struct nvkm_object *, u64 addr, u8   data);
+int nvkm_object_wr16(struct nvkm_object *, u64 addr, u16  data);
+int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32  data);
+int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
+		     struct nvkm_gpuobj **);
+
+struct nvkm_sclass {
+	int minver;
+	int maxver;
+	s32 oclass;
+	const struct nvkm_object_func *func;
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+};
+
+struct nvkm_oclass {
+	int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const void *priv;
+	const void *engn;
+	u32 handle;
+	u8  route;
+	u64 token;
+	u64 object;
+	struct nvkm_client *client;
 	struct nvkm_object *parent;
 	struct nvkm_engine *engine;
-	atomic_t refcount;
-	atomic_t usecount;
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-#define NVKM_OBJECT_MAGIC 0x75ef0bad
-	struct list_head list;
-	u32 _magic;
-#endif
 };
-
-static inline struct nvkm_object *
-nv_object(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (likely(obj)) {
-		struct nvkm_object *object = obj;
-		if (unlikely(object->_magic != NVKM_OBJECT_MAGIC))
-			nv_assert("BAD CAST -> NvObject, invalid magic");
-	}
-#endif
-	return obj;
-}
-
-#define nvkm_object_create(p,e,c,s,d)                                       \
-	nvkm_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
-int  nvkm_object_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32, int size, void **);
-void nvkm_object_destroy(struct nvkm_object *);
-int  nvkm_object_init(struct nvkm_object *);
-int  nvkm_object_fini(struct nvkm_object *, bool suspend);
-
-int _nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, void *, u32,
-			 struct nvkm_object **);
-
-extern struct nvkm_ofuncs nvkm_object_ofuncs;
-
-/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
- * ".data". */
-struct nvkm_oclass {
-	u32 handle;
-	struct nvkm_ofuncs * const ofuncs;
-	struct nvkm_omthds * const omthds;
-	struct lock_class_key lock_class_key;
-};
-
-#define nv_oclass(o)    nv_object(o)->oclass
-#define nv_hclass(o)    nv_oclass(o)->handle
-#define nv_iclass(o,i) (nv_hclass(o) & (i))
-#define nv_mclass(o)    nv_iclass(o, NV_OBJECT_CLASS)
-
-static inline struct nvkm_object *
-nv_pclass(struct nvkm_object *parent, u32 oclass)
-{
-	while (parent && !nv_iclass(parent, oclass))
-		parent = parent->parent;
-	return parent;
-}
-
-struct nvkm_omthds {
-	u32 start;
-	u32 limit;
-	int (*call)(struct nvkm_object *, u32, void *, u32);
-};
-
-struct nvkm_event;
-struct nvkm_ofuncs {
-	int  (*ctor)(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-	void (*dtor)(struct nvkm_object *);
-	int  (*init)(struct nvkm_object *);
-	int  (*fini)(struct nvkm_object *, bool suspend);
-	int  (*mthd)(struct nvkm_object *, u32, void *, u32);
-	int  (*ntfy)(struct nvkm_object *, u32, struct nvkm_event **);
-	int  (* map)(struct nvkm_object *, u64 *, u32 *);
-	u8   (*rd08)(struct nvkm_object *, u64 offset);
-	u16  (*rd16)(struct nvkm_object *, u64 offset);
-	u32  (*rd32)(struct nvkm_object *, u64 offset);
-	void (*wr08)(struct nvkm_object *, u64 offset, u8 data);
-	void (*wr16)(struct nvkm_object *, u64 offset, u16 data);
-	void (*wr32)(struct nvkm_object *, u64 offset, u32 data);
-};
-
-static inline struct nvkm_ofuncs *
-nv_ofuncs(void *obj)
-{
-	return nv_oclass(obj)->ofuncs;
-}
-
-int  nvkm_object_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-void nvkm_object_ref(struct nvkm_object *, struct nvkm_object **);
-int  nvkm_object_inc(struct nvkm_object *);
-int  nvkm_object_dec(struct nvkm_object *, bool suspend);
-void nvkm_object_debug(void);
-
-static inline int
-nv_exec(void *obj, u32 mthd, void *data, u32 size)
-{
-	struct nvkm_omthds *method = nv_oclass(obj)->omthds;
-
-	while (method && method->call) {
-		if (mthd >= method->start && mthd <= method->limit)
-			return method->call(obj, mthd, data, size);
-		method++;
-	}
-
-	return -EINVAL;
-}
-
-static inline int
-nv_call(void *obj, u32 mthd, u32 data)
-{
-	return nv_exec(obj, mthd, &data, sizeof(data));
-}
-
-static inline u8
-nv_ro08(void *obj, u64 addr)
-{
-	u8 data = nv_ofuncs(obj)->rd08(obj, addr);
-	nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
-	return data;
-}
-
-static inline u16
-nv_ro16(void *obj, u64 addr)
-{
-	u16 data = nv_ofuncs(obj)->rd16(obj, addr);
-	nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
-	return data;
-}
-
-static inline u32
-nv_ro32(void *obj, u64 addr)
-{
-	u32 data = nv_ofuncs(obj)->rd32(obj, addr);
-	nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
-	return data;
-}
-
-static inline void
-nv_wo08(void *obj, u64 addr, u8 data)
-{
-	nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
-	nv_ofuncs(obj)->wr08(obj, addr, data);
-}
-
-static inline void
-nv_wo16(void *obj, u64 addr, u16 data)
-{
-	nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
-	nv_ofuncs(obj)->wr16(obj, addr, data);
-}
-
-static inline void
-nv_wo32(void *obj, u64 addr, u32 data)
-{
-	nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
-	nv_ofuncs(obj)->wr32(obj, addr, data);
-}
-
-static inline u32
-nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
-{
-	u32 temp = nv_ro32(obj, addr);
-	nv_wo32(obj, addr, (temp & ~mask) | data);
-	return temp;
-}
-
-static inline int
-nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
-{
-	unsigned char c1, c2;
-
-	while (len--) {
-		c1 = nv_ro08(obj, addr++);
-		c2 = *(str++);
-		if (c1 != c2)
-			return c1 - c2;
-	}
-	return 0;
-}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
new file mode 100644
index 0000000..bd52236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -0,0 +1,22 @@
+#ifndef __NVKM_OPROXY_H__
+#define __NVKM_OPROXY_H__
+#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
+#include <core/object.h>
+
+struct nvkm_oproxy {
+	const struct nvkm_oproxy_func *func;
+	struct nvkm_object base;
+	struct nvkm_object *object;
+};
+
+struct nvkm_oproxy_func {
+	void (*dtor[2])(struct nvkm_oproxy *);
+	int  (*init[2])(struct nvkm_oproxy *);
+	int  (*fini[2])(struct nvkm_oproxy *, bool suspend);
+};
+
+void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
+		      const struct nvkm_oclass *, struct nvkm_oproxy *);
+int  nvkm_oproxy_new_(const struct nvkm_oproxy_func *,
+		      const struct nvkm_oclass *, struct nvkm_oproxy **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 532bfa8..80fdc14 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -4,6 +4,7 @@
 
 const char *nvkm_stropt(const char *optstr, const char *opt, int *len);
 bool nvkm_boolopt(const char *optstr, const char *opt, bool value);
+long nvkm_longopt(const char *optstr, const char *opt, long value);
 int  nvkm_dbgopt(const char *optstr, const char *sub);
 
 /* compares unterminated string 'str' with zero-terminated string 'cmp' */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h b/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
deleted file mode 100644
index 837e4fe..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/parent.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef __NVKM_PARENT_H__
-#define __NVKM_PARENT_H__
-#include <core/object.h>
-
-struct nvkm_sclass {
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-};
-
-struct nvkm_parent {
-	struct nvkm_object object;
-
-	struct nvkm_sclass *sclass;
-	u64 engine;
-
-	int  (*context_attach)(struct nvkm_object *, struct nvkm_object *);
-	int  (*context_detach)(struct nvkm_object *, bool suspend,
-			       struct nvkm_object *);
-
-	int  (*object_attach)(struct nvkm_object *parent,
-			      struct nvkm_object *object, u32 name);
-	void (*object_detach)(struct nvkm_object *parent, int cookie);
-};
-
-static inline struct nvkm_parent *
-nv_parent(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
-		nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-#define nvkm_parent_create(p,e,c,v,s,m,d)                                   \
-	nvkm_parent_create_((p), (e), (c), (v), (s), (m),                   \
-			       sizeof(**d), (void **)d)
-#define nvkm_parent_init(p)                                                 \
-	nvkm_object_init(&(p)->object)
-#define nvkm_parent_fini(p,s)                                               \
-	nvkm_object_fini(&(p)->object, (s))
-
-int  nvkm_parent_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    struct nvkm_oclass *, u64 engcls,
-			    int size, void **);
-void nvkm_parent_destroy(struct nvkm_parent *);
-
-void _nvkm_parent_dtor(struct nvkm_object *);
-#define _nvkm_parent_init nvkm_object_init
-#define _nvkm_parent_fini nvkm_object_fini
-
-int nvkm_parent_sclass(struct nvkm_object *, u16 handle,
-		       struct nvkm_object **pengine,
-		       struct nvkm_oclass **poclass);
-int nvkm_parent_lclass(struct nvkm_object *, u32 *, int);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
new file mode 100644
index 0000000..78d41be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_DEVICE_PCI_H__
+#define __NVKM_DEVICE_PCI_H__
+#include <core/device.h>
+
+struct nvkm_device_pci {
+	struct nvkm_device device;
+	struct pci_dev *pdev;
+	bool suspend;
+};
+
+int nvkm_device_pci_new(struct pci_dev *, const char *cfg, const char *dbg,
+			bool detect, bool mmio, u64 subdev_mask,
+			struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h b/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
deleted file mode 100644
index 8364817..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/printk.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __NVKM_PRINTK_H__
-#define __NVKM_PRINTK_H__
-#include <core/os.h>
-#include <core/debug.h>
-struct nvkm_object;
-
-void __printf(3, 4)
-nv_printk_(struct nvkm_object *, int, const char *, ...);
-
-#define nv_printk(o,l,f,a...) do {                                             \
-	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
-		nv_printk_(nv_object(o), NV_DBG_##l, f, ##a);                  \
-} while(0)
-
-#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
-#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
-#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
-#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
-#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
-#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
-#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
-#define nv_ioctl(o,f,a...) nv_trace(nvkm_client(o), "ioctl: "f, ##a)
-
-#define nv_assert(f,a...) do {                                                 \
-	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
-		nv_printk_(NULL, NV_DBG_FATAL, f "\n", ##a);                   \
-	BUG_ON(1);                                                             \
-} while(0)
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index cc132ea..5ee6298 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,19 +2,27 @@
 #define __NVKM_RAMHT_H__
 #include <core/gpuobj.h>
 
-struct nvkm_ramht {
-	struct nvkm_gpuobj gpuobj;
-	int bits;
+struct nvkm_ramht_data {
+	struct nvkm_gpuobj *inst;
+	int chid;
+	u32 handle;
 };
 
-int  nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context);
-void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
-int  nvkm_ramht_new(struct nvkm_object *, struct nvkm_object *, u32 size,
-		    u32 align, struct nvkm_ramht **);
+struct nvkm_ramht {
+	struct nvkm_device *device;
+	struct nvkm_gpuobj *parent;
+	struct nvkm_gpuobj *gpuobj;
+	int size;
+	int bits;
+	struct nvkm_ramht_data data[];
+};
 
-static inline void
-nvkm_ramht_ref(struct nvkm_ramht *obj, struct nvkm_ramht **ref)
-{
-	nvkm_gpuobj_ref(&obj->gpuobj, (struct nvkm_gpuobj **)ref);
-}
+int  nvkm_ramht_new(struct nvkm_device *, u32 size, u32 align,
+		    struct nvkm_gpuobj *, struct nvkm_ramht **);
+void nvkm_ramht_del(struct nvkm_ramht **);
+int  nvkm_ramht_insert(struct nvkm_ramht *, struct nvkm_object *,
+		       int chid, int addr, u32 handle, u32 context);
+void nvkm_ramht_remove(struct nvkm_ramht *, int cookie);
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *, int chid, u32 handle);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 6fdc391..3b5dc9c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,119 +1,50 @@
 #ifndef __NVKM_SUBDEV_H__
 #define __NVKM_SUBDEV_H__
-#include <core/object.h>
-#include <core/devidx.h>
-
-#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
-#define NV_SUBDEV(name,var)  NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
+#include <core/device.h>
 
 struct nvkm_subdev {
-	struct nvkm_object object;
+	const struct nvkm_subdev_func *func;
+	struct nvkm_device *device;
+	enum nvkm_devidx index;
+	u32 pmc_enable;
 	struct mutex mutex;
-	const char *name;
-	void __iomem *mmio;
 	u32 debug;
-	u32 unit;
 
+	bool oneinit;
+};
+
+struct nvkm_subdev_func {
+	void *(*dtor)(struct nvkm_subdev *);
+	int (*preinit)(struct nvkm_subdev *);
+	int (*oneinit)(struct nvkm_subdev *);
+	int (*init)(struct nvkm_subdev *);
+	int (*fini)(struct nvkm_subdev *, bool suspend);
 	void (*intr)(struct nvkm_subdev *);
 };
 
-static inline struct nvkm_subdev *
-nv_subdev(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
-		nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
-
-static inline int
-nv_subidx(struct nvkm_subdev *subdev)
-{
-	return nv_hclass(subdev) & 0xff;
-}
-
-struct nvkm_subdev *nvkm_subdev(void *obj, int idx);
-
-#define nvkm_subdev_create(p,e,o,v,s,f,d)                                   \
-	nvkm_subdev_create_((p), (e), (o), (v), (s), (f),                   \
-			       sizeof(**d),(void **)d)
-
-int  nvkm_subdev_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, u32 pclass,
-			    const char *sname, const char *fname,
-			    int size, void **);
-void nvkm_subdev_destroy(struct nvkm_subdev *);
+extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
+		      int index, u32 pmc_enable, struct nvkm_subdev *);
+void nvkm_subdev_del(struct nvkm_subdev **);
+int  nvkm_subdev_preinit(struct nvkm_subdev *);
 int  nvkm_subdev_init(struct nvkm_subdev *);
 int  nvkm_subdev_fini(struct nvkm_subdev *, bool suspend);
-void nvkm_subdev_reset(struct nvkm_object *);
+void nvkm_subdev_intr(struct nvkm_subdev *);
 
-void _nvkm_subdev_dtor(struct nvkm_object *);
-int  _nvkm_subdev_init(struct nvkm_object *);
-int  _nvkm_subdev_fini(struct nvkm_object *, bool suspend);
-
-#define s_printk(s,l,f,a...) do {                                              \
-	if ((s)->debug >= OS_DBG_##l) {                                        \
-		nv_printk((s)->base.parent, (s)->name, l, f, ##a);             \
+/* subdev logging */
+#define nvkm_printk_(s,l,p,f,a...) do {                                        \
+	struct nvkm_subdev *_subdev = (s);                                     \
+	if (_subdev->debug >= (l)) {                                           \
+		dev_##p(_subdev->device->dev, "%s: "f,                         \
+			nvkm_subdev_name[_subdev->index], ##a);                \
 	}                                                                      \
 } while(0)
-
-static inline u8
-nv_rd08(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u8 data = ioread8(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
-	return data;
-}
-
-static inline u16
-nv_rd16(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u16 data = ioread16_native(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
-	return data;
-}
-
-static inline u32
-nv_rd32(void *obj, u32 addr)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	u32 data = ioread32_native(subdev->mmio + addr);
-	nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
-	return data;
-}
-
-static inline void
-nv_wr08(void *obj, u32 addr, u8 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
-	iowrite8(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr16(void *obj, u32 addr, u16 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
-	iowrite16_native(data, subdev->mmio + addr);
-}
-
-static inline void
-nv_wr32(void *obj, u32 addr, u32 data)
-{
-	struct nvkm_subdev *subdev = nv_subdev(obj);
-	nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
-	iowrite32_native(data, subdev->mmio + addr);
-}
-
-static inline u32
-nv_mask(void *obj, u32 addr, u32 mask, u32 data)
-{
-	u32 temp = nv_rd32(obj, addr);
-	nv_wr32(obj, addr, (temp & ~mask) | data);
-	return temp;
-}
+#define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a)
+#define nvkm_fatal(s,f,a...) nvkm_printk((s), FATAL,   crit, f, ##a)
+#define nvkm_error(s,f,a...) nvkm_printk((s), ERROR,    err, f, ##a)
+#define nvkm_warn(s,f,a...)  nvkm_printk((s),  WARN, notice, f, ##a)
+#define nvkm_info(s,f,a...)  nvkm_printk((s),  INFO,   info, f, ##a)
+#define nvkm_debug(s,f,a...) nvkm_printk((s), DEBUG,   info, f, ##a)
+#define nvkm_trace(s,f,a...) nvkm_printk((s), TRACE,   info, f, ##a)
+#define nvkm_spam(s,f,a...)  nvkm_printk((s),  SPAM,    dbg, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
new file mode 100644
index 0000000..5aa2480
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -0,0 +1,35 @@
+#ifndef __NVKM_DEVICE_TEGRA_H__
+#define __NVKM_DEVICE_TEGRA_H__
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nvkm_device_tegra {
+	struct nvkm_device device;
+	struct platform_device *pdev;
+	int irq;
+
+	struct reset_control *rst;
+	struct clk *clk;
+	struct clk *clk_pwr;
+
+	struct regulator *vdd;
+
+	struct {
+		/*
+		 * Protects accesses to mm from subsystems
+		 */
+		struct mutex mutex;
+
+		struct nvkm_mm mm;
+		struct iommu_domain *domain;
+		unsigned long pgshift;
+	} iommu;
+
+	int gpu_speedo;
+};
+
+int nvkm_device_tegra_new(struct platform_device *,
+			  const char *cfg, const char *dbg,
+			  bool detect, bool mmio, u64 subdev_mask,
+			  struct nvkm_device **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index e489bee..9048205 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_BSP_H__
 #define __NVKM_BSP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_bsp_oclass;
+#include <engine/xtensa.h>
+int g84_bsp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index e832f72..e2e22cd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,16 +1,9 @@
 #ifndef __NVKM_CE_H__
 #define __NVKM_CE_H__
-#include <core/engine.h>
+#include <engine/falcon.h>
 
-void gt215_ce_intr(struct nvkm_subdev *);
-
-extern struct nvkm_oclass gt215_ce_oclass;
-extern struct nvkm_oclass gf100_ce0_oclass;
-extern struct nvkm_oclass gf100_ce1_oclass;
-extern struct nvkm_oclass gk104_ce0_oclass;
-extern struct nvkm_oclass gk104_ce1_oclass;
-extern struct nvkm_oclass gk104_ce2_oclass;
-extern struct nvkm_oclass gm204_ce0_oclass;
-extern struct nvkm_oclass gm204_ce1_oclass;
-extern struct nvkm_oclass gm204_ce2_oclass;
+int gt215_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gm204_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 57c29e9..03fa57a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_CIPHER_H__
 #define __NVKM_CIPHER_H__
 #include <core/engine.h>
-extern struct nvkm_oclass g84_cipher_oclass;
+int g84_cipher_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
deleted file mode 100644
index 5d4805e..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/device.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
-#define __NOUVEAU_SUBDEV_DEVICE_H__
-
-#include <core/device.h>
-
-struct platform_device;
-
-enum nv_bus_type {
-	NOUVEAU_BUS_PCI,
-	NOUVEAU_BUS_PLATFORM,
-};
-
-#define nouveau_device_create(p,t,n,s,c,d,u)                                   \
-	nouveau_device_create_((void *)(p), (t), (n), (s), (c), (d),           \
-			       sizeof(**u), (void **)u)
-
-int  nouveau_device_create_(void *, enum nv_bus_type type, u64 name,
-			    const char *sname, const char *cfg, const char *dbg,
-			    int, void **);
-
-int nv04_identify(struct nouveau_device *);
-int nv10_identify(struct nouveau_device *);
-int nv20_identify(struct nouveau_device *);
-int nv30_identify(struct nouveau_device *);
-int nv40_identify(struct nouveau_device *);
-int nv50_identify(struct nouveau_device *);
-int nvc0_identify(struct nouveau_device *);
-int nve0_identify(struct nouveau_device *);
-int gm100_identify(struct nouveau_device *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index a5e1ed8..efc74d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,32 +1,35 @@
 #ifndef __NVKM_DISP_H__
 #define __NVKM_DISP_H__
+#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
 #include <core/engine.h>
 #include <core/event.h>
 
 struct nvkm_disp {
-	struct nvkm_engine base;
+	const struct nvkm_disp_func *func;
+	struct nvkm_engine engine;
+
+	struct nvkm_oproxy *client;
 
 	struct list_head outp;
+	struct list_head conn;
 
 	struct nvkm_event hpd;
 	struct nvkm_event vblank;
+
+	struct {
+		int nr;
+	} head;
 };
 
-static inline struct nvkm_disp *
-nvkm_disp(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_DISP);
-}
-
-extern struct nvkm_oclass *nv04_disp_oclass;
-extern struct nvkm_oclass *nv50_disp_oclass;
-extern struct nvkm_oclass *g84_disp_oclass;
-extern struct nvkm_oclass *gt200_disp_oclass;
-extern struct nvkm_oclass *g94_disp_oclass;
-extern struct nvkm_oclass *gt215_disp_oclass;
-extern struct nvkm_oclass *gf110_disp_oclass;
-extern struct nvkm_oclass *gk104_disp_oclass;
-extern struct nvkm_oclass *gk110_disp_oclass;
-extern struct nvkm_oclass *gm107_disp_oclass;
-extern struct nvkm_oclass *gm204_disp_oclass;
+int nv04_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int nv50_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g84_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int g94_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gt215_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gf119_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gm204_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
new file mode 100644
index 0000000..114bfb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -0,0 +1,32 @@
+#ifndef __NVKM_DMA_H__
+#define __NVKM_DMA_H__
+#include <core/engine.h>
+struct nvkm_client;
+
+struct nvkm_dmaobj {
+	const struct nvkm_dmaobj_func *func;
+	struct nvkm_dma *dma;
+
+	struct nvkm_object object;
+	u32 target;
+	u32 access;
+	u64 start;
+	u64 limit;
+
+	struct rb_node rb;
+	u64 handle; /*XXX HANDLE MERGE */
+};
+
+struct nvkm_dma {
+	const struct nvkm_dma_func *func;
+	struct nvkm_engine engine;
+};
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
+
+int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf100_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+int gf119_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
deleted file mode 100644
index c4fce8a..0000000
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dmaobj.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __NVKM_DMAOBJ_H__
-#define __NVKM_DMAOBJ_H__
-#include <core/engine.h>
-struct nvkm_gpuobj;
-
-struct nvkm_dmaobj {
-	struct nvkm_object base;
-	u32 target;
-	u32 access;
-	u64 start;
-	u64 limit;
-};
-
-struct nvkm_dmaeng {
-	struct nvkm_engine base;
-
-	/* creates a "physical" dma object from a struct nvkm_dmaobj */
-	int (*bind)(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		    struct nvkm_gpuobj **);
-};
-
-extern struct nvkm_oclass *nv04_dmaeng_oclass;
-extern struct nvkm_oclass *nv50_dmaeng_oclass;
-extern struct nvkm_oclass *gf100_dmaeng_oclass;
-extern struct nvkm_oclass *gf110_dmaeng_oclass;
-#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index bd38cf9..81c0bc6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,41 +1,18 @@
 #ifndef __NVKM_FALCON_H__
 #define __NVKM_FALCON_H__
-#include <core/engctx.h>
-
-struct nvkm_falcon_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_falcon_context_create(p,e,c,g,s,a,f,d)                         \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_falcon_context_destroy(d)                                      \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_falcon_context_init(d)                                         \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_falcon_context_fini(d,s)                                       \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_falcon_context_ctor _nvkm_engctx_ctor
-#define _nvkm_falcon_context_dtor _nvkm_engctx_dtor
-#define _nvkm_falcon_context_init _nvkm_engctx_init
-#define _nvkm_falcon_context_fini _nvkm_engctx_fini
-#define _nvkm_falcon_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_falcon_context_wr32 _nvkm_engctx_wr32
-
-struct nvkm_falcon_data {
-	bool external;
-};
-
+#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
 #include <core/engine.h>
+struct nvkm_fifo_chan;
 
 struct nvkm_falcon {
-	struct nvkm_engine base;
+	const struct nvkm_falcon_func *func;
+	struct nvkm_engine engine;
 
 	u32 addr;
 	u8  version;
 	u8  secret;
 
-	struct nvkm_gpuobj *core;
+	struct nvkm_memory *core;
 	bool external;
 
 	struct {
@@ -51,31 +28,21 @@
 	} data;
 };
 
-#define nv_falcon(priv) (&(priv)->base)
+int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
-#define nvkm_falcon_create(p,e,c,b,d,i,f,r)                                 \
-	nvkm_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
-			       sizeof(**r),(void **)r)
-#define nvkm_falcon_destroy(p)                                              \
-	nvkm_engine_destroy(&(p)->base)
-#define nvkm_falcon_init(p) ({                                              \
-	struct nvkm_falcon *falcon = (p);                                   \
-	_nvkm_falcon_init(nv_object(falcon));                               \
-})
-#define nvkm_falcon_fini(p,s) ({                                            \
-	struct nvkm_falcon *falcon = (p);                                   \
-	_nvkm_falcon_fini(nv_object(falcon), (s));                          \
-})
-
-int nvkm_falcon_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, u32, bool, const char *,
-			   const char *, int, void **);
-
-void nvkm_falcon_intr(struct nvkm_subdev *subdev);
-
-#define _nvkm_falcon_dtor _nvkm_engine_dtor
-int  _nvkm_falcon_init(struct nvkm_object *);
-int  _nvkm_falcon_fini(struct nvkm_object *, bool);
-u32  _nvkm_falcon_rd32(struct nvkm_object *, u64);
-void _nvkm_falcon_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_falcon_func {
+	struct {
+		u32 *data;
+		u32  size;
+	} code;
+	struct {
+		u32 *data;
+		u32  size;
+	} data;
+	u32 pmc_enable;
+	void (*init)(struct nvkm_falcon *);
+	void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+	struct nvkm_sclass sclass[];
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 97cdeab..9e664495 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,127 +1,67 @@
 #ifndef __NVKM_FIFO_H__
 #define __NVKM_FIFO_H__
-#include <core/namedb.h>
-
-struct nvkm_fifo_chan {
-	struct nvkm_namedb namedb;
-	struct nvkm_dmaobj *pushdma;
-	struct nvkm_gpuobj *pushgpu;
-	void __iomem *user;
-	u64 addr;
-	u32 size;
-	u16 chid;
-	atomic_t refcnt; /* NV04_NVSW_SET_REF */
-};
-
-static inline struct nvkm_fifo_chan *
-nvkm_fifo_chan(void *obj)
-{
-	return (void *)nv_namedb(obj);
-}
-
-#define nvkm_fifo_channel_create(p,e,c,b,a,s,n,m,d)                         \
-	nvkm_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n),        \
-				     (m), sizeof(**d), (void **)d)
-#define nvkm_fifo_channel_init(p)                                           \
-	nvkm_namedb_init(&(p)->namedb)
-#define nvkm_fifo_channel_fini(p,s)                                         \
-	nvkm_namedb_fini(&(p)->namedb, (s))
-
-int  nvkm_fifo_channel_create_(struct nvkm_object *,
-				  struct nvkm_object *,
-				  struct nvkm_oclass *,
-				  int bar, u32 addr, u32 size, u32 push,
-				  u64 engmask, int len, void **);
-void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *);
-
-#define _nvkm_fifo_channel_init _nvkm_namedb_init
-#define _nvkm_fifo_channel_fini _nvkm_namedb_fini
-
-void _nvkm_fifo_channel_dtor(struct nvkm_object *);
-int  _nvkm_fifo_channel_map(struct nvkm_object *, u64 *, u32 *);
-u32  _nvkm_fifo_channel_rd32(struct nvkm_object *, u64);
-void _nvkm_fifo_channel_wr32(struct nvkm_object *, u64, u32);
-int  _nvkm_fifo_channel_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-
-#include <core/gpuobj.h>
-
-struct nvkm_fifo_base {
-	struct nvkm_gpuobj gpuobj;
-};
-
-#define nvkm_fifo_context_create(p,e,c,g,s,a,f,d)                           \
-	nvkm_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
-#define nvkm_fifo_context_destroy(p)                                        \
-	nvkm_gpuobj_destroy(&(p)->gpuobj)
-#define nvkm_fifo_context_init(p)                                           \
-	nvkm_gpuobj_init(&(p)->gpuobj)
-#define nvkm_fifo_context_fini(p,s)                                         \
-	nvkm_gpuobj_fini(&(p)->gpuobj, (s))
-
-#define _nvkm_fifo_context_dtor _nvkm_gpuobj_dtor
-#define _nvkm_fifo_context_init _nvkm_gpuobj_init
-#define _nvkm_fifo_context_fini _nvkm_gpuobj_fini
-#define _nvkm_fifo_context_rd32 _nvkm_gpuobj_rd32
-#define _nvkm_fifo_context_wr32 _nvkm_gpuobj_wr32
-
 #include <core/engine.h>
 #include <core/event.h>
 
-struct nvkm_fifo {
-	struct nvkm_engine base;
+#define NVKM_FIFO_CHID_NR 4096
 
-	struct nvkm_event cevent; /* channel creation event */
-	struct nvkm_event uevent; /* async user trigger */
-
-	struct nvkm_object **channel;
-	spinlock_t lock;
-	u16 min;
-	u16 max;
-
-	int  (*chid)(struct nvkm_fifo *, struct nvkm_object *);
-	void (*pause)(struct nvkm_fifo *, unsigned long *);
-	void (*start)(struct nvkm_fifo *, unsigned long *);
+struct nvkm_fifo_engn {
+	struct nvkm_object *object;
+	int refcount;
+	int usecount;
 };
 
-static inline struct nvkm_fifo *
-nvkm_fifo(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_FIFO);
-}
+struct nvkm_fifo_chan {
+	const struct nvkm_fifo_chan_func *func;
+	struct nvkm_fifo *fifo;
+	u64 engines;
+	struct nvkm_object object;
 
-#define nvkm_fifo_create(o,e,c,fc,lc,d)                                     \
-	nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
-#define nvkm_fifo_init(p)                                                   \
-	nvkm_engine_init(&(p)->base)
-#define nvkm_fifo_fini(p,s)                                                 \
-	nvkm_engine_fini(&(p)->base, (s))
+	struct list_head head;
+	u16 chid;
+	struct nvkm_gpuobj *inst;
+	struct nvkm_gpuobj *push;
+	struct nvkm_vm *vm;
+	void __iomem *user;
+	u64 addr;
+	u32 size;
 
-int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int min, int max,
-			 int size, void **);
-void nvkm_fifo_destroy(struct nvkm_fifo *);
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid);
+	struct nvkm_fifo_engn engn[NVKM_SUBDEV_NR];
+};
 
-#define _nvkm_fifo_init _nvkm_engine_init
-#define _nvkm_fifo_fini _nvkm_engine_fini
+struct nvkm_fifo {
+	const struct nvkm_fifo_func *func;
+	struct nvkm_engine engine;
 
-extern struct nvkm_oclass *nv04_fifo_oclass;
-extern struct nvkm_oclass *nv10_fifo_oclass;
-extern struct nvkm_oclass *nv17_fifo_oclass;
-extern struct nvkm_oclass *nv40_fifo_oclass;
-extern struct nvkm_oclass *nv50_fifo_oclass;
-extern struct nvkm_oclass *g84_fifo_oclass;
-extern struct nvkm_oclass *gf100_fifo_oclass;
-extern struct nvkm_oclass *gk104_fifo_oclass;
-extern struct nvkm_oclass *gk20a_fifo_oclass;
-extern struct nvkm_oclass *gk208_fifo_oclass;
-extern struct nvkm_oclass *gm204_fifo_oclass;
+	DECLARE_BITMAP(mask, NVKM_FIFO_CHID_NR);
+	int nr;
+	struct list_head chan;
+	spinlock_t lock;
 
-int  nvkm_fifo_uevent_ctor(struct nvkm_object *, void *, u32,
-			   struct nvkm_notify *);
-void nvkm_fifo_uevent(struct nvkm_fifo *);
+	struct nvkm_event uevent; /* async user trigger */
+	struct nvkm_event cevent; /* channel creation event */
+};
 
-void nv04_fifo_intr(struct nvkm_subdev *);
-int  nv04_fifo_context_attach(struct nvkm_object *, struct nvkm_object *);
+void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nvkm_fifo_start(struct nvkm_fifo *, unsigned long *);
+
+void nvkm_fifo_chan_put(struct nvkm_fifo *, unsigned long flags,
+			struct nvkm_fifo_chan **);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *, u64 inst, unsigned long *flags);
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *, int chid, unsigned long *flags);
+
+int nv04_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv10_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv17_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv40_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int nv50_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int g84_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gf100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk208_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm204_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 7cbe202..f126e54 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,88 +1,46 @@
 #ifndef __NVKM_GR_H__
 #define __NVKM_GR_H__
-#include <core/engctx.h>
-
-struct nvkm_gr_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_gr_context_create(p,e,c,g,s,a,f,d)                          \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_gr_context_destroy(d)                                       \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_gr_context_init(d)                                          \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_gr_context_fini(d,s)                                        \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
-#define _nvkm_gr_context_init _nvkm_engctx_init
-#define _nvkm_gr_context_fini _nvkm_engctx_fini
-#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
-
 #include <core/engine.h>
 
 struct nvkm_gr {
-	struct nvkm_engine base;
-
-	/* Returns chipset-specific counts of units packed into an u64.
-	 */
-	u64 (*units)(struct nvkm_gr *);
+	const struct nvkm_gr_func *func;
+	struct nvkm_engine engine;
 };
 
-static inline struct nvkm_gr *
-nvkm_gr(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
-}
+u64 nvkm_gr_units(struct nvkm_gr *);
+int nvkm_gr_tlb_flush(struct nvkm_gr *);
 
-#define nvkm_gr_create(p,e,c,y,d)                                        \
-	nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
-#define nvkm_gr_destroy(d)                                               \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_gr_init(d)                                                  \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_gr_fini(d,s)                                                \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_gr_dtor _nvkm_engine_dtor
-#define _nvkm_gr_init _nvkm_engine_init
-#define _nvkm_gr_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv04_gr_oclass;
-extern struct nvkm_oclass nv10_gr_oclass;
-extern struct nvkm_oclass nv20_gr_oclass;
-extern struct nvkm_oclass nv25_gr_oclass;
-extern struct nvkm_oclass nv2a_gr_oclass;
-extern struct nvkm_oclass nv30_gr_oclass;
-extern struct nvkm_oclass nv34_gr_oclass;
-extern struct nvkm_oclass nv35_gr_oclass;
-extern struct nvkm_oclass nv40_gr_oclass;
-extern struct nvkm_oclass nv50_gr_oclass;
-extern struct nvkm_oclass *gf100_gr_oclass;
-extern struct nvkm_oclass *gf108_gr_oclass;
-extern struct nvkm_oclass *gf104_gr_oclass;
-extern struct nvkm_oclass *gf110_gr_oclass;
-extern struct nvkm_oclass *gf117_gr_oclass;
-extern struct nvkm_oclass *gf119_gr_oclass;
-extern struct nvkm_oclass *gk104_gr_oclass;
-extern struct nvkm_oclass *gk20a_gr_oclass;
-extern struct nvkm_oclass *gk110_gr_oclass;
-extern struct nvkm_oclass *gk110b_gr_oclass;
-extern struct nvkm_oclass *gk208_gr_oclass;
-extern struct nvkm_oclass *gm107_gr_oclass;
-extern struct nvkm_oclass *gm204_gr_oclass;
-extern struct nvkm_oclass *gm206_gr_oclass;
-
-#include <core/enum.h>
-
-extern const struct nvkm_bitfield nv04_gr_nsource[];
-extern struct nvkm_ofuncs nv04_gr_ofuncs;
-bool nv04_gr_idle(void *obj);
-
-extern const struct nvkm_bitfield nv10_gr_intr_name[];
-extern const struct nvkm_bitfield nv10_gr_nstatus[];
-
-extern const struct nvkm_enum nv50_data_error_names[];
+int nv04_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv10_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv15_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv17_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv20_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv25_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv2a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv30_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv34_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv35_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv40_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv44_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int nv50_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int g84_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp79_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gt215_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int mcp89_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf108_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf117_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gf119_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk104_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk110b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk208_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm204_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm206_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 4e500b3..257738e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,62 +1,9 @@
 #ifndef __NVKM_MPEG_H__
 #define __NVKM_MPEG_H__
-#include <core/engctx.h>
-
-struct nvkm_mpeg_chan {
-	struct nvkm_engctx base;
-};
-
-#define nvkm_mpeg_context_create(p,e,c,g,s,a,f,d)                           \
-	nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nvkm_mpeg_context_destroy(d)                                        \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_mpeg_context_init(d)                                           \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_mpeg_context_fini(d,s)                                         \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_context_dtor _nvkm_engctx_dtor
-#define _nvkm_mpeg_context_init _nvkm_engctx_init
-#define _nvkm_mpeg_context_fini _nvkm_engctx_fini
-#define _nvkm_mpeg_context_rd32 _nvkm_engctx_rd32
-#define _nvkm_mpeg_context_wr32 _nvkm_engctx_wr32
-
 #include <core/engine.h>
-
-struct nvkm_mpeg {
-	struct nvkm_engine base;
-};
-
-#define nvkm_mpeg_create(p,e,c,d)                                           \
-	nvkm_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
-#define nvkm_mpeg_destroy(d)                                                \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_mpeg_init(d)                                                   \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_mpeg_fini(d,s)                                                 \
-	nvkm_engine_fini(&(d)->base, (s))
-
-#define _nvkm_mpeg_dtor _nvkm_engine_dtor
-#define _nvkm_mpeg_init _nvkm_engine_init
-#define _nvkm_mpeg_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass nv31_mpeg_oclass;
-extern struct nvkm_oclass nv40_mpeg_oclass;
-extern struct nvkm_oclass nv44_mpeg_oclass;
-extern struct nvkm_oclass nv50_mpeg_oclass;
-extern struct nvkm_oclass g84_mpeg_oclass;
-extern struct nvkm_ofuncs nv31_mpeg_ofuncs;
-extern struct nvkm_oclass nv31_mpeg_cclass;
-extern struct nvkm_oclass nv31_mpeg_sclass[];
-extern struct nvkm_oclass nv40_mpeg_sclass[];
-void nv31_mpeg_intr(struct nvkm_subdev *);
-void nv31_mpeg_tile_prog(struct nvkm_engine *, int);
-int  nv31_mpeg_init(struct nvkm_object *);
-
-extern struct nvkm_ofuncs nv50_mpeg_ofuncs;
-int  nv50_mpeg_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, void *, u32,
-			    struct nvkm_object **);
-void nv50_mpeg_intr(struct nvkm_subdev *);
-int  nv50_mpeg_init(struct nvkm_object *);
+int nv31_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv40_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv44_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int nv50_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
+int g84_mpeg_new(struct nvkm_device *, int index, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 54b7672e..08516ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,7 +1,8 @@
 #ifndef __NVKM_MSPDEC_H__
 #define __NVKM_MSPDEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_mspdec_oclass;
-extern struct nvkm_oclass gf100_mspdec_oclass;
-extern struct nvkm_oclass gk104_mspdec_oclass;
+#include <engine/falcon.h>
+int g98_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_mspdec_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index c6c69d0..85fd306 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,6 +1,7 @@
 #ifndef __NVKM_MSPPP_H__
 #define __NVKM_MSPPP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msppp_oclass;
-extern struct nvkm_oclass gf100_msppp_oclass;
+#include <engine/falcon.h>
+int g98_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msppp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 1f193b7..99757ed 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,7 +1,9 @@
 #ifndef __NVKM_MSVLD_H__
 #define __NVKM_MSVLD_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_msvld_oclass;
-extern struct nvkm_oclass gf100_msvld_oclass;
-extern struct nvkm_oclass gk104_msvld_oclass;
+#include <engine/falcon.h>
+int g98_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gt215_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int mcp89_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gf100_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gk104_msvld_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 93181bb..240855a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -2,33 +2,24 @@
 #define __NVKM_PM_H__
 #include <core/engine.h>
 
-struct nvkm_perfdom;
-struct nvkm_perfctr;
 struct nvkm_pm {
-	struct nvkm_engine base;
+	const struct nvkm_pm_func *func;
+	struct nvkm_engine engine;
 
-	struct nvkm_perfctx *context;
-	void *profile_data;
+	struct nvkm_object *perfmon;
 
 	struct list_head domains;
+	struct list_head sources;
 	u32 sequence;
-
-	/*XXX: temp for daemon backend */
-	u32 pwr[8];
-	u32 last;
 };
 
-static inline struct nvkm_pm *
-nvkm_pm(void *obj)
-{
-	return (void *)nvkm_engine(obj, NVDEV_ENGINE_PM);
-}
-
-extern struct nvkm_oclass *nv40_pm_oclass;
-extern struct nvkm_oclass *nv50_pm_oclass;
-extern struct nvkm_oclass *g84_pm_oclass;
-extern struct nvkm_oclass *gt215_pm_oclass;
-extern struct nvkm_oclass gf100_pm_oclass;
-extern struct nvkm_oclass gk104_pm_oclass;
-extern struct nvkm_oclass gk110_pm_oclass;
+int nv40_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int nv50_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int g84_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt200_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gt215_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf100_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf108_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gf117_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
+int gk104_pm_new(struct nvkm_device *, int, struct nvkm_pm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 44590a2..7317ef4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_SEC_H__
 #define __NVKM_SEC_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g98_sec_oclass;
+#include <engine/falcon.h>
+int g98_sec_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index a529013..096e7db 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,50 +1,18 @@
 #ifndef __NVKM_SW_H__
 #define __NVKM_SW_H__
-#include <core/engctx.h>
-
-struct nvkm_sw_chan {
-	struct nvkm_engctx base;
-
-	int (*flip)(void *);
-	void *flip_data;
-};
-
-#define nvkm_sw_context_create(p,e,c,d)                               \
-	nvkm_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
-#define nvkm_sw_context_destroy(d)                                    \
-	nvkm_engctx_destroy(&(d)->base)
-#define nvkm_sw_context_init(d)                                       \
-	nvkm_engctx_init(&(d)->base)
-#define nvkm_sw_context_fini(d,s)                                     \
-	nvkm_engctx_fini(&(d)->base, (s))
-
-#define _nvkm_sw_context_dtor _nvkm_engctx_dtor
-#define _nvkm_sw_context_init _nvkm_engctx_init
-#define _nvkm_sw_context_fini _nvkm_engctx_fini
-
 #include <core/engine.h>
 
 struct nvkm_sw {
-	struct nvkm_engine base;
+	const struct nvkm_sw_func *func;
+	struct nvkm_engine engine;
+
+	struct list_head chan;
 };
 
-#define nvkm_sw_create(p,e,c,d)                                       \
-	nvkm_engine_create((p), (e), (c), true, "SW", "software", (d))
-#define nvkm_sw_destroy(d)                                            \
-	nvkm_engine_destroy(&(d)->base)
-#define nvkm_sw_init(d)                                               \
-	nvkm_engine_init(&(d)->base)
-#define nvkm_sw_fini(d,s)                                             \
-	nvkm_engine_fini(&(d)->base, (s))
+bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data);
 
-#define _nvkm_sw_dtor _nvkm_engine_dtor
-#define _nvkm_sw_init _nvkm_engine_init
-#define _nvkm_sw_fini _nvkm_engine_fini
-
-extern struct nvkm_oclass *nv04_sw_oclass;
-extern struct nvkm_oclass *nv10_sw_oclass;
-extern struct nvkm_oclass *nv50_sw_oclass;
-extern struct nvkm_oclass *gf100_sw_oclass;
-
-void nv04_sw_intr(struct nvkm_subdev *);
+int nv04_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv10_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int nv50_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
+int gf100_sw_new(struct nvkm_device *, int, struct nvkm_sw **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 7851f18..616ea91 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,5 +1,5 @@
 #ifndef __NVKM_VP_H__
 #define __NVKM_VP_H__
-#include <core/engine.h>
-extern struct nvkm_oclass g84_vp_oclass;
+#include <engine/xtensa.h>
+int g84_vp_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 7a216cc..3128d21 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,35 +1,23 @@
 #ifndef __NVKM_XTENSA_H__
 #define __NVKM_XTENSA_H__
+#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
 #include <core/engine.h>
-struct nvkm_gpuobj;
 
 struct nvkm_xtensa {
-	struct nvkm_engine base;
-
+	const struct nvkm_xtensa_func *func;
 	u32 addr;
-	struct nvkm_gpuobj *gpu_fw;
-	u32 fifo_val;
-	u32 unkd28;
+	struct nvkm_engine engine;
+
+	struct nvkm_memory *gpu_fw;
 };
 
-#define nvkm_xtensa_create(p,e,c,b,d,i,f,r)				\
-	nvkm_xtensa_create_((p), (e), (c), (b), (d), (i), (f),	\
-			       sizeof(**r),(void **)r)
+int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
+		     int index, bool enable, u32 addr, struct nvkm_engine **);
 
-int _nvkm_xtensa_engctx_ctor(struct nvkm_object *,
-				struct nvkm_object *,
-				struct nvkm_oclass *, void *, u32,
-				struct nvkm_object **);
-
-void _nvkm_xtensa_intr(struct nvkm_subdev *);
-int nvkm_xtensa_create_(struct nvkm_object *,
-			   struct nvkm_object *,
-			   struct nvkm_oclass *, u32, bool,
-			   const char *, const char *,
-			   int, void **);
-#define _nvkm_xtensa_dtor _nvkm_engine_dtor
-int _nvkm_xtensa_init(struct nvkm_object *);
-int _nvkm_xtensa_fini(struct nvkm_object *, bool);
-u32  _nvkm_xtensa_rd32(struct nvkm_object *, u64);
-void _nvkm_xtensa_wr32(struct nvkm_object *, u64, u32);
+struct nvkm_xtensa_func {
+	u32 pmc_enable;
+	u32 fifo_val;
+	u32 unkd28;
+	struct nvkm_sclass sclass[];
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index c7a007b..d3071b5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,33 +1,24 @@
 #ifndef __NVKM_BAR_H__
 #define __NVKM_BAR_H__
 #include <core/subdev.h>
-struct nvkm_mem;
 struct nvkm_vma;
 
 struct nvkm_bar {
-	struct nvkm_subdev base;
+	const struct nvkm_bar_func *func;
+	struct nvkm_subdev subdev;
 
-	int  (*alloc)(struct nvkm_bar *, struct nvkm_object *,
-		      struct nvkm_mem *, struct nvkm_object **);
-
-	int  (*kmap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
-		     struct nvkm_vma *);
-	int  (*umap)(struct nvkm_bar *, struct nvkm_mem *, u32 flags,
-		     struct nvkm_vma *);
-	void (*unmap)(struct nvkm_bar *, struct nvkm_vma *);
-	void (*flush)(struct nvkm_bar *);
+	spinlock_t lock;
 
 	/* whether the BAR supports to be ioremapped WC or should be uncached */
 	bool iomap_uncached;
 };
 
-static inline struct nvkm_bar *
-nvkm_bar(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BAR);
-}
+void nvkm_bar_flush(struct nvkm_bar *);
+struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
+int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
 
-extern struct nvkm_oclass nv50_bar_oclass;
-extern struct nvkm_oclass gf100_bar_oclass;
-extern struct nvkm_oclass gk20a_bar_oclass;
+int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index cef287e..e39a1fea 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -3,7 +3,7 @@
 #include <core/subdev.h>
 
 struct nvkm_bios {
-	struct nvkm_subdev base;
+	struct nvkm_subdev subdev;
 	u32 size;
 	u8 *data;
 
@@ -19,14 +19,13 @@
 	} version;
 };
 
-static inline struct nvkm_bios *
-nvkm_bios(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VBIOS);
-}
-
 u8  nvbios_checksum(const u8 *data, int size);
 u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
 
-extern struct nvkm_oclass nvkm_bios_oclass;
+#define nvbios_rd08(b,o) (b)->data[(o)]
+#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
+#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+
+int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 4107aa5..3f0c7c4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -4,8 +4,8 @@
 bmp_version(struct nvkm_bios *bios)
 {
 	if (bios->bmp_offset) {
-		return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
-		       nv_ro08(bios, bios->bmp_offset + 6);
+		return nvbios_rd08(bios, bios->bmp_offset + 5) << 8 |
+		       nvbios_rd08(bios, bios->bmp_offset + 6);
 	}
 
 	return 0x0000;
@@ -15,7 +15,7 @@
 bmp_mem_init_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 24);
+		return nvbios_rd16(bios, bios->bmp_offset + 24);
 	return 0x0000;
 }
 
@@ -23,7 +23,7 @@
 bmp_sdr_seq_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 26);
+		return nvbios_rd16(bios, bios->bmp_offset + 26);
 	return 0x0000;
 }
 
@@ -31,7 +31,7 @@
 bmp_ddr_seq_table(struct nvkm_bios *bios)
 {
 	if (bmp_version(bios) >= 0x0300)
-		return nv_ro16(bios, bios->bmp_offset + 28);
+		return nvbios_rd16(bios, bios->bmp_offset + 28);
 	return 0x0000;
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 578a667..4dc1c8a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,5 +1,6 @@
 #ifndef __NVBIOS_INIT_H__
 #define __NVBIOS_INIT_H__
+
 struct nvbios_init {
 	struct nvkm_subdev *subdev;
 	struct nvkm_bios *bios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index 4204267..3a9abd3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -7,6 +7,11 @@
 	unsigned rammap_max;
 	union {
 		struct {
+			unsigned rammap_00_16_20:1;
+			unsigned rammap_00_16_40:1;
+			unsigned rammap_00_17_02:1;
+		};
+		struct {
 			unsigned rammap_10_04_02:1;
 			unsigned rammap_10_04_08:1;
 		};
@@ -32,15 +37,32 @@
 	unsigned ramcfg_ver;
 	unsigned ramcfg_hdr;
 	unsigned ramcfg_timing;
+	unsigned ramcfg_DLLoff;
+	unsigned ramcfg_RON;
 	union {
 		struct {
+			unsigned ramcfg_00_03_01:1;
+			unsigned ramcfg_00_03_02:1;
+			unsigned ramcfg_00_03_08:1;
+			unsigned ramcfg_00_03_10:1;
+			unsigned ramcfg_00_04_02:1;
+			unsigned ramcfg_00_04_04:1;
+			unsigned ramcfg_00_04_20:1;
+			unsigned ramcfg_00_05:8;
+			unsigned ramcfg_00_06:8;
+			unsigned ramcfg_00_07:8;
+			unsigned ramcfg_00_08:8;
+			unsigned ramcfg_00_09:8;
+			unsigned ramcfg_00_0a_0f:4;
+			unsigned ramcfg_00_0a_f0:4;
+		};
+		struct {
 			unsigned ramcfg_10_02_01:1;
 			unsigned ramcfg_10_02_02:1;
 			unsigned ramcfg_10_02_04:1;
 			unsigned ramcfg_10_02_08:1;
 			unsigned ramcfg_10_02_10:1;
 			unsigned ramcfg_10_02_20:1;
-			unsigned ramcfg_10_DLLoff:1;
 			unsigned ramcfg_10_03_0f:4;
 			unsigned ramcfg_10_04_01:1;
 			unsigned ramcfg_10_05:8;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 609a905..8d8ee13 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -7,6 +7,8 @@
 
 u32 nvbios_rammapEe(struct nvkm_bios *, int idx,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u32 nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+		    struct nvbios_ramcfg *p);
 u32 nvbios_rammapEp(struct nvkm_bios *, int idx,
 		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *);
 u32 nvbios_rammapEm(struct nvkm_bios *, u16 mhz,
@@ -15,6 +17,8 @@
 u32 nvbios_rammapSe(struct nvkm_bios *, u32 data,
 		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		    u8 *ver, u8 *hdr);
+u32 nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+		    struct nvbios_ramcfg *p);
 u32 nvbios_rammapSp(struct nvkm_bios *, u32 data,
 		    u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		    u8 *ver, u8 *hdr, struct nvbios_ramcfg *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index fba83c0..6a04d9c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -2,49 +2,23 @@
 #define __NVKM_BUS_H__
 #include <core/subdev.h>
 
-struct nvkm_bus_intr {
-	u32 stat;
-	u32 unit;
-};
-
 struct nvkm_bus {
-	struct nvkm_subdev base;
-	int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
-	u32 hwsq_size;
+	const struct nvkm_bus_func *func;
+	struct nvkm_subdev subdev;
 };
 
-static inline struct nvkm_bus *
-nvkm_bus(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_BUS);
-}
-
-#define nvkm_bus_create(p, e, o, d)                                         \
-	nvkm_subdev_create_((p), (e), (o), 0, "PBUS", "master",             \
-			       sizeof(**d), (void **)d)
-#define nvkm_bus_destroy(p)                                                 \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_bus_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_bus_fini(p, s)                                                 \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_bus_dtor _nvkm_subdev_dtor
-#define _nvkm_bus_init _nvkm_subdev_init
-#define _nvkm_bus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass *nv04_bus_oclass;
-extern struct nvkm_oclass *nv31_bus_oclass;
-extern struct nvkm_oclass *nv50_bus_oclass;
-extern struct nvkm_oclass *g94_bus_oclass;
-extern struct nvkm_oclass *gf100_bus_oclass;
-
 /* interface to sequencer */
 struct nvkm_hwsq;
-int  nvkm_hwsq_init(struct nvkm_bus *, struct nvkm_hwsq **);
+int  nvkm_hwsq_init(struct nvkm_subdev *, struct nvkm_hwsq **);
 int  nvkm_hwsq_fini(struct nvkm_hwsq **, bool exec);
 void nvkm_hwsq_wr32(struct nvkm_hwsq *, u32 addr, u32 data);
 void nvkm_hwsq_setf(struct nvkm_hwsq *, u8 flag, int data);
 void nvkm_hwsq_wait(struct nvkm_hwsq *, u8 flag, u8 data);
 void nvkm_hwsq_nsec(struct nvkm_hwsq *, u32 nsec);
+
+int nv04_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv31_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int nv50_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int g94_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
+int gf100_bus_new(struct nvkm_device *, int, struct nvkm_bus **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index f5d30385..8708f0a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -71,9 +71,10 @@
 };
 
 struct nvkm_clk {
-	struct nvkm_subdev base;
+	const struct nvkm_clk_func *func;
+	struct nvkm_subdev subdev;
 
-	struct nvkm_domain *domains;
+	const struct nvkm_domain *domains;
 	struct nvkm_pstate bstate;
 
 	struct list_head states;
@@ -94,68 +95,27 @@
 
 	bool allow_reclock;
 
-	int  (*read)(struct nvkm_clk *, enum nv_clk_src);
-	int  (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
-	int  (*prog)(struct nvkm_clk *);
-	void (*tidy)(struct nvkm_clk *);
-
 	/*XXX: die, these are here *only* to support the completely
-	 *     bat-shit insane what-was-nvkm_hw.c code
+	 *     bat-shit insane what-was-nouveau_hw.c code
 	 */
 	int (*pll_calc)(struct nvkm_clk *, struct nvbios_pll *, int clk,
 			struct nvkm_pll_vals *pv);
 	int (*pll_prog)(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *pv);
 };
 
-static inline struct nvkm_clk *
-nvkm_clk(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_CLK);
-}
-
-#define nvkm_clk_create(p,e,o,i,r,s,n,d)                                  \
-	nvkm_clk_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d),  \
-			      (void **)d)
-#define nvkm_clk_destroy(p) ({                                            \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_dtor(nv_object(clk));                                   \
-})
-#define nvkm_clk_init(p) ({                                               \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_init(nv_object(clk));                                   \
-})
-#define nvkm_clk_fini(p,s) ({                                             \
-	struct nvkm_clk *clk = (p);                                       \
-	_nvkm_clk_fini(nv_object(clk), (s));                              \
-})
-
-int  nvkm_clk_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *,
-			   struct nvkm_domain *, struct nvkm_pstate *,
-			   int, bool, int, void **);
-void _nvkm_clk_dtor(struct nvkm_object *);
-int  _nvkm_clk_init(struct nvkm_object *);
-int  _nvkm_clk_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nv04_clk_oclass;
-extern struct nvkm_oclass nv40_clk_oclass;
-extern struct nvkm_oclass *nv50_clk_oclass;
-extern struct nvkm_oclass *g84_clk_oclass;
-extern struct nvkm_oclass *mcp77_clk_oclass;
-extern struct nvkm_oclass gt215_clk_oclass;
-extern struct nvkm_oclass gf100_clk_oclass;
-extern struct nvkm_oclass gk104_clk_oclass;
-extern struct nvkm_oclass gk20a_clk_oclass;
-
-int nv04_clk_pll_set(struct nvkm_clk *, u32 type, u32 freq);
-int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
-		      struct nvkm_pll_vals *);
-int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
-int gt215_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *,
-		       int clk, struct nvkm_pll_vals *);
-
+int nvkm_clk_read(struct nvkm_clk *, enum nv_clk_src);
 int nvkm_clk_ustate(struct nvkm_clk *, int req, int pwr);
 int nvkm_clk_astate(struct nvkm_clk *, int req, int rel, bool wait);
 int nvkm_clk_dstate(struct nvkm_clk *, int req, int rel);
 int nvkm_clk_tstate(struct nvkm_clk *, int req, int rel);
+
+int nv04_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv40_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int nv50_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int g84_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int mcp77_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gt215_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gf100_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk104_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
+int gk20a_clk_new(struct nvkm_device *, int, struct nvkm_clk **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index d1bbe0d..6c1407f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,32 +1,31 @@
 #ifndef __NVKM_DEVINIT_H__
 #define __NVKM_DEVINIT_H__
 #include <core/subdev.h>
+struct nvkm_devinit;
 
 struct nvkm_devinit {
-	struct nvkm_subdev base;
+	const struct nvkm_devinit_func *func;
+	struct nvkm_subdev subdev;
 	bool post;
-	void (*meminit)(struct nvkm_devinit *);
-	int  (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
-	u32  (*mmio)(struct nvkm_devinit *, u32 addr);
 };
 
-static inline struct nvkm_devinit *
-nvkm_devinit(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_DEVINIT);
-}
+u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
+int nvkm_devinit_pll_set(struct nvkm_devinit *, u32 type, u32 khz);
+void nvkm_devinit_meminit(struct nvkm_devinit *);
+u64 nvkm_devinit_disable(struct nvkm_devinit *);
+int nvkm_devinit_post(struct nvkm_devinit *, u64 *disable);
 
-extern struct nvkm_oclass *nv04_devinit_oclass;
-extern struct nvkm_oclass *nv05_devinit_oclass;
-extern struct nvkm_oclass *nv10_devinit_oclass;
-extern struct nvkm_oclass *nv1a_devinit_oclass;
-extern struct nvkm_oclass *nv20_devinit_oclass;
-extern struct nvkm_oclass *nv50_devinit_oclass;
-extern struct nvkm_oclass *g84_devinit_oclass;
-extern struct nvkm_oclass *g98_devinit_oclass;
-extern struct nvkm_oclass *gt215_devinit_oclass;
-extern struct nvkm_oclass *mcp89_devinit_oclass;
-extern struct nvkm_oclass *gf100_devinit_oclass;
-extern struct nvkm_oclass *gm107_devinit_oclass;
-extern struct nvkm_oclass *gm204_devinit_oclass;
+int nv04_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv05_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv10_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv1a_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv20_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int nv50_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g84_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int g98_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gt215_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int mcp89_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int gm204_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 16da56c..85ab72c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -18,7 +18,7 @@
 #define NV_MEM_TARGET_VM          3
 #define NV_MEM_TARGET_GART        4
 
-#define NV_MEM_TYPE_VM 0x7f
+#define NVKM_RAM_TYPE_VM 0x7f
 #define NV_MEM_COMP_VM 0x03
 
 struct nvkm_mem {
@@ -46,62 +46,47 @@
 };
 
 struct nvkm_fb {
-	struct nvkm_subdev base;
-
-	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+	const struct nvkm_fb_func *func;
+	struct nvkm_subdev subdev;
 
 	struct nvkm_ram *ram;
 
-	struct nvkm_mm vram;
-	struct nvkm_mm tags;
-
 	struct {
 		struct nvkm_fb_tile region[16];
 		int regions;
-		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
-			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
-		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
-			     struct nvkm_fb_tile *);
-		void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-		void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
 	} tile;
 };
 
-static inline struct nvkm_fb *
-nvkm_fb(void *obj)
-{
-	/* fbram uses this before device subdev pointer is valid */
-	if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
-	    nv_subidx(obj) == NVDEV_SUBDEV_FB)
-		return obj;
+bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
+void nvkm_fb_tile_prog(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
 
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FB);
-}
-
-extern struct nvkm_oclass *nv04_fb_oclass;
-extern struct nvkm_oclass *nv10_fb_oclass;
-extern struct nvkm_oclass *nv1a_fb_oclass;
-extern struct nvkm_oclass *nv20_fb_oclass;
-extern struct nvkm_oclass *nv25_fb_oclass;
-extern struct nvkm_oclass *nv30_fb_oclass;
-extern struct nvkm_oclass *nv35_fb_oclass;
-extern struct nvkm_oclass *nv36_fb_oclass;
-extern struct nvkm_oclass *nv40_fb_oclass;
-extern struct nvkm_oclass *nv41_fb_oclass;
-extern struct nvkm_oclass *nv44_fb_oclass;
-extern struct nvkm_oclass *nv46_fb_oclass;
-extern struct nvkm_oclass *nv47_fb_oclass;
-extern struct nvkm_oclass *nv49_fb_oclass;
-extern struct nvkm_oclass *nv4e_fb_oclass;
-extern struct nvkm_oclass *nv50_fb_oclass;
-extern struct nvkm_oclass *g84_fb_oclass;
-extern struct nvkm_oclass *gt215_fb_oclass;
-extern struct nvkm_oclass *mcp77_fb_oclass;
-extern struct nvkm_oclass *mcp89_fb_oclass;
-extern struct nvkm_oclass *gf100_fb_oclass;
-extern struct nvkm_oclass *gk104_fb_oclass;
-extern struct nvkm_oclass *gk20a_fb_oclass;
-extern struct nvkm_oclass *gm107_fb_oclass;
+int nv04_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv10_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv1a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv20_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv25_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv30_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv35_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv36_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv40_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv41_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv44_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv46_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv47_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv49_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv4e_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int nv50_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int g84_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
@@ -112,36 +97,35 @@
 	u32 freq;
 };
 
+enum nvkm_ram_type {
+	NVKM_RAM_TYPE_UNKNOWN = 0,
+	NVKM_RAM_TYPE_STOLEN,
+	NVKM_RAM_TYPE_SGRAM,
+	NVKM_RAM_TYPE_SDRAM,
+	NVKM_RAM_TYPE_DDR1,
+	NVKM_RAM_TYPE_DDR2,
+	NVKM_RAM_TYPE_DDR3,
+	NVKM_RAM_TYPE_GDDR2,
+	NVKM_RAM_TYPE_GDDR3,
+	NVKM_RAM_TYPE_GDDR4,
+	NVKM_RAM_TYPE_GDDR5
+};
+
 struct nvkm_ram {
-	struct nvkm_object base;
-	enum {
-		NV_MEM_TYPE_UNKNOWN = 0,
-		NV_MEM_TYPE_STOLEN,
-		NV_MEM_TYPE_SGRAM,
-		NV_MEM_TYPE_SDRAM,
-		NV_MEM_TYPE_DDR1,
-		NV_MEM_TYPE_DDR2,
-		NV_MEM_TYPE_DDR3,
-		NV_MEM_TYPE_GDDR2,
-		NV_MEM_TYPE_GDDR3,
-		NV_MEM_TYPE_GDDR4,
-		NV_MEM_TYPE_GDDR5
-	} type;
-	u64 stolen;
+	const struct nvkm_ram_func *func;
+	struct nvkm_fb *fb;
+	enum nvkm_ram_type type;
 	u64 size;
-	u32 tags;
+
+#define NVKM_RAM_MM_SHIFT 12
+	struct nvkm_mm vram;
+	struct nvkm_mm tags;
+	u64 stolen;
 
 	int ranks;
 	int parts;
 	int part_mask;
 
-	int  (*get)(struct nvkm_fb *, u64 size, u32 align, u32 size_nc,
-		    u32 type, struct nvkm_mem **);
-	void (*put)(struct nvkm_fb *, struct nvkm_mem **);
-
-	int  (*calc)(struct nvkm_fb *, u32 freq);
-	int  (*prog)(struct nvkm_fb *);
-	void (*tidy)(struct nvkm_fb *);
 	u32 freq;
 	u32 mr[16];
 	u32 mr1_nuts;
@@ -151,4 +135,17 @@
 	struct nvkm_ram_data xition;
 	struct nvkm_ram_data target;
 };
+
+struct nvkm_ram_func {
+	void *(*dtor)(struct nvkm_ram *);
+	int (*init)(struct nvkm_ram *);
+
+	int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
+		   u32 type, struct nvkm_mem **);
+	void (*put)(struct nvkm_ram *, struct nvkm_mem **);
+
+	int (*calc)(struct nvkm_ram *, u32 freq);
+	int (*prog)(struct nvkm_ram *);
+	void (*tidy)(struct nvkm_ram *);
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index a138478..ae201e3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,28 +1,16 @@
 #ifndef __NVKM_FUSE_H__
 #define __NVKM_FUSE_H__
 #include <core/subdev.h>
-#include <core/device.h>
 
 struct nvkm_fuse {
-	struct nvkm_subdev base;
+	const struct nvkm_fuse_func *func;
+	struct nvkm_subdev subdev;
+	spinlock_t lock;
 };
 
-static inline struct nvkm_fuse *
-nvkm_fuse(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_FUSE);
-}
+u32 nvkm_fuse_read(struct nvkm_fuse *, u32 addr);
 
-#define nvkm_fuse_create(p, e, o, d)                                        \
-	nvkm_fuse_create_((p), (e), (o), sizeof(**d), (void **)d)
-
-int  nvkm_fuse_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-void _nvkm_fuse_dtor(struct nvkm_object *);
-int  _nvkm_fuse_init(struct nvkm_object *);
-#define _nvkm_fuse_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_fuse_oclass;
-extern struct nvkm_oclass gf100_fuse_oclass;
-extern struct nvkm_oclass gm107_fuse_oclass;
+int nv50_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gf100_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
+int gm107_fuse_new(struct nvkm_device *, int, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index ca5099a..9b9c6d2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -19,26 +19,21 @@
 };
 
 struct nvkm_gpio {
-	struct nvkm_subdev base;
+	const struct nvkm_gpio_func *func;
+	struct nvkm_subdev subdev;
 
 	struct nvkm_event event;
-
-	void (*reset)(struct nvkm_gpio *, u8 func);
-	int  (*find)(struct nvkm_gpio *, int idx, u8 tag, u8 line,
-		     struct dcb_gpio_func *);
-	int  (*set)(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
-	int  (*get)(struct nvkm_gpio *, int idx, u8 tag, u8 line);
 };
 
-static inline struct nvkm_gpio *
-nvkm_gpio(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_GPIO);
-}
+void nvkm_gpio_reset(struct nvkm_gpio *, u8 func);
+int nvkm_gpio_find(struct nvkm_gpio *, int idx, u8 tag, u8 line,
+		   struct dcb_gpio_func *);
+int nvkm_gpio_set(struct nvkm_gpio *, int idx, u8 tag, u8 line, int state);
+int nvkm_gpio_get(struct nvkm_gpio *, int idx, u8 tag, u8 line);
 
-extern struct nvkm_oclass *nv10_gpio_oclass;
-extern struct nvkm_oclass *nv50_gpio_oclass;
-extern struct nvkm_oclass *g94_gpio_oclass;
-extern struct nvkm_oclass *gf110_gpio_oclass;
-extern struct nvkm_oclass *gk104_gpio_oclass;
+int nv10_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a2e3373..6b6224d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -6,15 +6,6 @@
 #include <subdev/bios.h>
 #include <subdev/bios/i2c.h>
 
-#define NV_I2C_PORT(n)    (0x00 + (n))
-#define NV_I2C_AUX(n)     (0x10 + (n))
-#define NV_I2C_EXT(n)     (0x20 + (n))
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
-
-#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
-#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
-#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-
 struct nvkm_i2c_ntfy_req {
 #define NVKM_I2C_PLUG                                                      0x01
 #define NVKM_I2C_UNPLUG                                                    0x02
@@ -29,72 +20,79 @@
 	u8 mask;
 };
 
-struct nvkm_i2c_port {
-	struct nvkm_object base;
-	struct i2c_adapter adapter;
-	struct mutex mutex;
-
-	struct list_head head;
-	u8  index;
-	int aux;
-
-	const struct nvkm_i2c_func *func;
-};
-
-struct nvkm_i2c_func {
-	void (*drive_scl)(struct nvkm_i2c_port *, int);
-	void (*drive_sda)(struct nvkm_i2c_port *, int);
-	int  (*sense_scl)(struct nvkm_i2c_port *);
-	int  (*sense_sda)(struct nvkm_i2c_port *);
-
-	int  (*aux)(struct nvkm_i2c_port *, bool, u8, u32, u8 *, u8);
-	int  (*pattern)(struct nvkm_i2c_port *, int pattern);
-	int  (*lnk_ctl)(struct nvkm_i2c_port *, int nr, int bw, bool enh);
-	int  (*drv_ctl)(struct nvkm_i2c_port *, int lane, int sw, int pe);
-};
-
-struct nvkm_i2c_board_info {
+struct nvkm_i2c_bus_probe {
 	struct i2c_board_info dev;
 	u8 udelay; /* set to 0 to use the standard delay */
 };
 
-struct nvkm_i2c {
-	struct nvkm_subdev base;
-	struct nvkm_event event;
+struct nvkm_i2c_bus {
+	const struct nvkm_i2c_bus_func *func;
+	struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_BUS_CCB(n) /* 'n' is ccb index */                           (n)
+#define NVKM_I2C_BUS_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+#define NVKM_I2C_BUS_PRI /* ccb primary comm. port */                        -1
+#define NVKM_I2C_BUS_SEC /* ccb secondary comm. port */                      -2
+	int id;
 
-	struct nvkm_i2c_port *(*find)(struct nvkm_i2c *, u8 index);
-	struct nvkm_i2c_port *(*find_type)(struct nvkm_i2c *, u16 type);
-	int  (*acquire_pad)(struct nvkm_i2c_port *, unsigned long timeout);
-	void (*release_pad)(struct nvkm_i2c_port *);
-	int  (*acquire)(struct nvkm_i2c_port *, unsigned long timeout);
-	void (*release)(struct nvkm_i2c_port *);
-	int  (*identify)(struct nvkm_i2c *, int index,
-			 const char *what, struct nvkm_i2c_board_info *,
-			 bool (*match)(struct nvkm_i2c_port *,
-				       struct i2c_board_info *, void *),
-			 void *);
-
-	wait_queue_head_t wait;
-	struct list_head ports;
+	struct mutex mutex;
+	struct list_head head;
+	struct i2c_adapter i2c;
 };
 
-static inline struct nvkm_i2c *
-nvkm_i2c(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_I2C);
-}
+int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_release(struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_probe(struct nvkm_i2c_bus *, const char *,
+		       struct nvkm_i2c_bus_probe *,
+		       bool (*)(struct nvkm_i2c_bus *,
+			        struct i2c_board_info *, void *), void *);
 
-extern struct nvkm_oclass *nv04_i2c_oclass;
-extern struct nvkm_oclass *nv4e_i2c_oclass;
-extern struct nvkm_oclass *nv50_i2c_oclass;
-extern struct nvkm_oclass *g94_i2c_oclass;
-extern struct nvkm_oclass *gf110_i2c_oclass;
-extern struct nvkm_oclass *gf117_i2c_oclass;
-extern struct nvkm_oclass *gk104_i2c_oclass;
-extern struct nvkm_oclass *gm204_i2c_oclass;
+struct nvkm_i2c_aux {
+	const struct nvkm_i2c_aux_func *func;
+	struct nvkm_i2c_pad *pad;
+#define NVKM_I2C_AUX_CCB(n) /* 'n' is ccb index */                           (n)
+#define NVKM_I2C_AUX_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x100)
+	int id;
+
+	struct mutex mutex;
+	struct list_head head;
+	struct i2c_adapter i2c;
+
+	u32 intr;
+};
+
+void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
+int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+		      u32 addr, u8 *data, u8 size);
+int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+			 bool enhanced_framing);
+
+struct nvkm_i2c {
+	const struct nvkm_i2c_func *func;
+	struct nvkm_subdev subdev;
+
+	struct list_head pad;
+	struct list_head bus;
+	struct list_head aux;
+
+	struct nvkm_event event;
+};
+
+struct nvkm_i2c_bus *nvkm_i2c_bus_find(struct nvkm_i2c *, int);
+struct nvkm_i2c_aux *nvkm_i2c_aux_find(struct nvkm_i2c *, int);
+
+int nv04_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv4e_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int nv50_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gm204_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 
 static inline int
-nv_rdi2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg)
+nvkm_rdi2cr(struct i2c_adapter *adap, u8 addr, u8 reg)
 {
 	u8 val;
 	struct i2c_msg msgs[] = {
@@ -102,7 +100,7 @@
 		{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
 	};
 
-	int ret = i2c_transfer(&port->adapter, msgs, 2);
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
 	if (ret != 2)
 		return -EIO;
 
@@ -110,14 +108,14 @@
 }
 
 static inline int
-nv_wri2cr(struct nvkm_i2c_port *port, u8 addr, u8 reg, u8 val)
+nvkm_wri2cr(struct i2c_adapter *adap, u8 addr, u8 reg, u8 val)
 {
 	u8 buf[2] = { reg, val };
 	struct i2c_msg msgs[] = {
 		{ .addr = addr, .flags = 0, .len = 2, .buf = buf },
 	};
 
-	int ret = i2c_transfer(&port->adapter, msgs, 1);
+	int ret = i2c_transfer(adap, msgs, ARRAY_SIZE(msgs));
 	if (ret != 1)
 		return -EIO;
 
@@ -125,11 +123,30 @@
 }
 
 static inline bool
-nv_probe_i2c(struct nvkm_i2c_port *port, u8 addr)
+nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
 {
-	return nv_rdi2cr(port, addr, 0) >= 0;
+	return nvkm_rdi2cr(adap, addr, 0) >= 0;
 }
 
-int nv_rdaux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
-int nv_wraux(struct nvkm_i2c_port *, u32 addr, u8 *data, u8 size);
+static inline int
+nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+	int ret = nvkm_i2c_aux_acquire(aux);
+	if (ret == 0) {
+		ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size);
+		nvkm_i2c_aux_release(aux);
+	}
+	return ret;
+}
+
+static inline int
+nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
+{
+	int ret = nvkm_i2c_aux_acquire(aux);
+	if (ret == 0) {
+		ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size);
+		nvkm_i2c_aux_release(aux);
+	}
+	return ret;
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 2150d8a..9d512cd5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -2,31 +2,7 @@
 #define __NVKM_IBUS_H__
 #include <core/subdev.h>
 
-struct nvkm_ibus {
-	struct nvkm_subdev base;
-};
-
-static inline struct nvkm_ibus *
-nvkm_ibus(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_IBUS);
-}
-
-#define nvkm_ibus_create(p,e,o,d)                                           \
-	nvkm_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus",              \
-			       sizeof(**d), (void **)d)
-#define nvkm_ibus_destroy(p)                                                \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_ibus_init(p)                                                   \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_ibus_fini(p,s)                                                 \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_ibus_dtor _nvkm_subdev_dtor
-#define _nvkm_ibus_init _nvkm_subdev_init
-#define _nvkm_ibus_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass gf100_ibus_oclass;
-extern struct nvkm_oclass gk104_ibus_oclass;
-extern struct nvkm_oclass gk20a_ibus_oclass;
+int gf100_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 1bcb763..28bc202 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,49 +1,29 @@
 #ifndef __NVKM_INSTMEM_H__
 #define __NVKM_INSTMEM_H__
 #include <core/subdev.h>
-
-struct nvkm_instobj {
-	struct nvkm_object base;
-	struct list_head head;
-	u32 *suspend;
-	u64 addr;
-	u32 size;
-};
-
-static inline struct nvkm_instobj *
-nv_memobj(void *obj)
-{
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
-		nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
-#endif
-	return obj;
-}
+struct nvkm_memory;
 
 struct nvkm_instmem {
-	struct nvkm_subdev base;
-	struct list_head list;
+	const struct nvkm_instmem_func *func;
+	struct nvkm_subdev subdev;
 
+	struct list_head list;
 	u32 reserved;
-	int (*alloc)(struct nvkm_instmem *, struct nvkm_object *,
-		     u32 size, u32 align, struct nvkm_object **);
+
+	struct nvkm_memory *vbios;
+	struct nvkm_ramht  *ramht;
+	struct nvkm_memory *ramro;
+	struct nvkm_memory *ramfc;
 };
 
-static inline struct nvkm_instmem *
-nvkm_instmem(void *obj)
-{
-	/* nv04/nv40 impls need to create objects in their constructor,
-	 * which is before the subdev pointer is valid
-	 */
-	if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
-	    nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
-		return obj;
+u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
+void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
+int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+		     struct nvkm_memory **);
 
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_INSTMEM);
-}
 
-extern struct nvkm_oclass *nv04_instmem_oclass;
-extern struct nvkm_oclass *nv40_instmem_oclass;
-extern struct nvkm_oclass *nv50_instmem_oclass;
-extern struct nvkm_oclass *gk20a_instmem_oclass;
+int nv04_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv40_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int nv50_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
+int gk20a_instmem_new(struct nvkm_device *, int, struct nvkm_instmem **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index cd5d29f..c773b5e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,31 +1,36 @@
 #ifndef __NVKM_LTC_H__
 #define __NVKM_LTC_H__
 #include <core/subdev.h>
-struct nvkm_mm_node;
+#include <core/mm.h>
 
 #define NVKM_LTC_MAX_ZBC_CNT 16
 
 struct nvkm_ltc {
-	struct nvkm_subdev base;
+	const struct nvkm_ltc_func *func;
+	struct nvkm_subdev subdev;
 
-	int  (*tags_alloc)(struct nvkm_ltc *, u32 count,
-			   struct nvkm_mm_node **);
-	void (*tags_free)(struct nvkm_ltc *, struct nvkm_mm_node **);
-	void (*tags_clear)(struct nvkm_ltc *, u32 first, u32 count);
+	u32 ltc_nr;
+	u32 lts_nr;
+
+	u32 num_tags;
+	u32 tag_base;
+	struct nvkm_mm tags;
+	struct nvkm_mm_node *tag_ram;
 
 	int zbc_min;
 	int zbc_max;
-	int (*zbc_color_get)(struct nvkm_ltc *, int index, const u32[4]);
-	int (*zbc_depth_get)(struct nvkm_ltc *, int index, const u32);
+	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
+	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 };
 
-static inline struct nvkm_ltc *
-nvkm_ltc(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_LTC);
-}
+int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
+void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
+void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
 
-extern struct nvkm_oclass *gf100_ltc_oclass;
-extern struct nvkm_oclass *gk104_ltc_oclass;
-extern struct nvkm_oclass *gm107_ltc_oclass;
+int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
+int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
+
+int gf100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 055bea7..4de05e7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -3,26 +3,19 @@
 #include <core/subdev.h>
 
 struct nvkm_mc {
-	struct nvkm_subdev base;
-	bool use_msi;
-	unsigned int irq;
-	void (*unk260)(struct nvkm_mc *, u32);
+	const struct nvkm_mc_func *func;
+	struct nvkm_subdev subdev;
 };
 
-static inline struct nvkm_mc *
-nvkm_mc(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MC);
-}
+void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_mc *);
+void nvkm_mc_intr_rearm(struct nvkm_mc *);
+void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
 
-extern struct nvkm_oclass *nv04_mc_oclass;
-extern struct nvkm_oclass *nv40_mc_oclass;
-extern struct nvkm_oclass *nv44_mc_oclass;
-extern struct nvkm_oclass *nv4c_mc_oclass;
-extern struct nvkm_oclass *nv50_mc_oclass;
-extern struct nvkm_oclass *g94_mc_oclass;
-extern struct nvkm_oclass *g98_mc_oclass;
-extern struct nvkm_oclass *gf100_mc_oclass;
-extern struct nvkm_oclass *gf106_mc_oclass;
-extern struct nvkm_oclass *gk20a_mc_oclass;
+int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 3a53687..dcd3def 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -6,7 +6,7 @@
 struct nvkm_mem;
 
 struct nvkm_vm_pgt {
-	struct nvkm_gpuobj *obj[2];
+	struct nvkm_memory *mem[2];
 	u32 refcount[2];
 };
 
@@ -26,74 +26,23 @@
 
 struct nvkm_vm {
 	struct nvkm_mmu *mmu;
+
+	struct mutex mutex;
 	struct nvkm_mm mm;
 	struct kref refcount;
 
 	struct list_head pgd_list;
-	atomic_t engref[NVDEV_SUBDEV_NR];
+	atomic_t engref[NVKM_SUBDEV_NR];
 
 	struct nvkm_vm_pgt *pgt;
 	u32 fpde;
 	u32 lpde;
 };
 
-struct nvkm_mmu {
-	struct nvkm_subdev base;
-
-	u64 limit;
-	u8  dma_bits;
-	u32 pgt_bits;
-	u8  spg_shift;
-	u8  lpg_shift;
-
-	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length,
-		       u64 mm_offset, struct nvkm_vm **);
-
-	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
-			struct nvkm_gpuobj *pgt[2]);
-	void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
-		    struct nvkm_mem *, u32 pte, u32 cnt,
-		    u64 phys, u64 delta);
-	void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
-		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
-	void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
-	void (*flush)(struct nvkm_vm *);
-};
-
-static inline struct nvkm_mmu *
-nvkm_mmu(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MMU);
-}
-
-#define nvkm_mmu_create(p,e,o,i,f,d)                                      \
-	nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
-#define nvkm_mmu_destroy(p)                                               \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_mmu_init(p)                                                  \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_mmu_fini(p,s)                                                \
-	nvkm_subdev_fini(&(p)->base, (s))
-
-#define _nvkm_mmu_dtor _nvkm_subdev_dtor
-#define _nvkm_mmu_init _nvkm_subdev_init
-#define _nvkm_mmu_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv04_mmu_oclass;
-extern struct nvkm_oclass nv41_mmu_oclass;
-extern struct nvkm_oclass nv44_mmu_oclass;
-extern struct nvkm_oclass nv50_mmu_oclass;
-extern struct nvkm_oclass gf100_mmu_oclass;
-
-int  nv04_vm_create(struct nvkm_mmu *, u64, u64, u64,
-		    struct nvkm_vm **);
-void nv04_mmu_dtor(struct nvkm_object *);
-
-int  nvkm_vm_create(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
-		    u32 block, struct nvkm_vm **);
 int  nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
-		 struct nvkm_vm **);
+		 struct lock_class_key *, struct nvkm_vm **);
 int  nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
+int  nvkm_vm_boot(struct nvkm_vm *, u64 size);
 int  nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
 		 struct nvkm_vma *);
 void nvkm_vm_put(struct nvkm_vma *);
@@ -101,4 +50,19 @@
 void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
 void nvkm_vm_unmap(struct nvkm_vma *);
 void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+
+struct nvkm_mmu {
+	const struct nvkm_mmu_func *func;
+	struct nvkm_subdev subdev;
+
+	u64 limit;
+	u8  dma_bits;
+	u8  lpg_shift;
+};
+
+int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index fba6134..ed02501 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -2,33 +2,5 @@
 #define __NVKM_MXM_H__
 #include <core/subdev.h>
 
-#define MXM_SANITISE_DCB 0x00000001
-
-struct nvkm_mxm {
-	struct nvkm_subdev base;
-	u32 action;
-	u8 *mxms;
-};
-
-static inline struct nvkm_mxm *
-nvkm_mxm(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_MXM);
-}
-
-#define nvkm_mxm_create(p,e,o,d)                                            \
-	nvkm_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mxm_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_mxm_fini(p,s)                                                  \
-	nvkm_subdev_fini(&(p)->base, (s))
-int  nvkm_mxm_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int, void **);
-void nvkm_mxm_destroy(struct nvkm_mxm *);
-
-#define _nvkm_mxm_dtor _nvkm_subdev_dtor
-#define _nvkm_mxm_init _nvkm_subdev_init
-#define _nvkm_mxm_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv50_mxm_oclass;
+int nv50_mxm_new(struct nvkm_device *, int, struct nvkm_subdev **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
new file mode 100644
index 0000000..5b3c054
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -0,0 +1,34 @@
+#ifndef __NVKM_PCI_H__
+#define __NVKM_PCI_H__
+#include <core/subdev.h>
+
+struct nvkm_pci {
+	const struct nvkm_pci_func *func;
+	struct nvkm_subdev subdev;
+	struct pci_dev *pdev;
+	int irq;
+
+	struct {
+		struct agp_bridge_data *bridge;
+		u32 mode;
+		u64 base;
+		u64 size;
+		int mtrr;
+		bool cma;
+		bool acquired;
+	} agp;
+
+	bool msi;
+};
+
+u32 nvkm_pci_rd32(struct nvkm_pci *, u16 addr);
+void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
+void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
+void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
+
+int nv04_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int nv50_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 7559423..e61923d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -3,7 +3,8 @@
 #include <core/subdev.h>
 
 struct nvkm_pmu {
-	struct nvkm_subdev base;
+	const struct nvkm_pmu_func *func;
+	struct nvkm_subdev subdev;
 
 	struct {
 		u32 base;
@@ -20,24 +21,20 @@
 		u32 message;
 		u32 data[2];
 	} recv;
-
-	int  (*message)(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
-	void (*pgob)(struct nvkm_pmu *, bool);
 };
 
-static inline struct nvkm_pmu *
-nvkm_pmu(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_PMU);
-}
+int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
+		  u32 message, u32 data0, u32 data1);
+void nvkm_pmu_pgob(struct nvkm_pmu *, bool enable);
 
-extern struct nvkm_oclass *gt215_pmu_oclass;
-extern struct nvkm_oclass *gf100_pmu_oclass;
-extern struct nvkm_oclass *gf110_pmu_oclass;
-extern struct nvkm_oclass *gk104_pmu_oclass;
-extern struct nvkm_oclass *gk110_pmu_oclass;
-extern struct nvkm_oclass *gk208_pmu_oclass;
-extern struct nvkm_oclass *gk20a_pmu_oclass;
+int gt215_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gf119_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk104_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
+int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
 
 /* interface to MEMX process running on PMU */
 struct nvkm_memx;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 6662829..b268b96 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -2,6 +2,28 @@
 #define __NVKM_THERM_H__
 #include <core/subdev.h>
 
+#include <subdev/bios.h>
+#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+enum nvkm_therm_thrs_direction {
+	NVKM_THERM_THRS_FALLING = 0,
+	NVKM_THERM_THRS_RISING = 1
+};
+
+enum nvkm_therm_thrs_state {
+	NVKM_THERM_THRS_LOWER = 0,
+	NVKM_THERM_THRS_HIGHER = 1
+};
+
+enum nvkm_therm_thrs {
+	NVKM_THERM_THRS_FANBOOST = 0,
+	NVKM_THERM_THRS_DOWNCLOCK = 1,
+	NVKM_THERM_THRS_CRITICAL = 2,
+	NVKM_THERM_THRS_SHUTDOWN = 3,
+	NVKM_THERM_THRS_NR
+};
+
 enum nvkm_therm_fan_mode {
 	NVKM_THERM_CTRL_NONE = 0,
 	NVKM_THERM_CTRL_MANUAL = 1,
@@ -24,56 +46,54 @@
 };
 
 struct nvkm_therm {
-	struct nvkm_subdev base;
+	const struct nvkm_therm_func *func;
+	struct nvkm_subdev subdev;
 
-	int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
-	int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
-	int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
-	int (*pwm_clock)(struct nvkm_therm *, int line);
+	/* automatic thermal management */
+	struct nvkm_alarm alarm;
+	spinlock_t lock;
+	struct nvbios_therm_trip_point *last_trip;
+	int mode;
+	int cstate;
+	int suspend;
+
+	/* bios */
+	struct nvbios_therm_sensor bios_sensor;
+
+	/* fan priv */
+	struct nvkm_fan *fan;
+
+	/* alarms priv */
+	struct {
+		spinlock_t alarm_program_lock;
+		struct nvkm_alarm therm_poll_alarm;
+		enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
+	} sensor;
+
+	/* what should be done if the card overheats */
+	struct {
+		void (*downclock)(struct nvkm_therm *, bool active);
+		void (*pause)(struct nvkm_therm *, bool active);
+	} emergency;
+
+	/* ic */
+	struct i2c_client *ic;
 
 	int (*fan_get)(struct nvkm_therm *);
 	int (*fan_set)(struct nvkm_therm *, int);
-	int (*fan_sense)(struct nvkm_therm *);
-
-	int (*temp_get)(struct nvkm_therm *);
 
 	int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
 	int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
 };
 
-static inline struct nvkm_therm *
-nvkm_therm(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_THERM);
-}
+int nvkm_therm_temp_get(struct nvkm_therm *);
+int nvkm_therm_fan_sense(struct nvkm_therm *);
+int nvkm_therm_cstate(struct nvkm_therm *, int, int);
 
-#define nvkm_therm_create(p,e,o,d)                                          \
-	nvkm_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_therm_destroy(p) ({                                            \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_dtor(nv_object(therm));                                 \
-})
-#define nvkm_therm_init(p) ({                                               \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_init(nv_object(therm));                                 \
-})
-#define nvkm_therm_fini(p,s) ({                                             \
-	struct nvkm_therm *therm = (p);                                     \
-        _nvkm_therm_init(nv_object(therm), (s));                            \
-})
-
-int  nvkm_therm_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, int, void **);
-void _nvkm_therm_dtor(struct nvkm_object *);
-int  _nvkm_therm_init(struct nvkm_object *);
-int  _nvkm_therm_fini(struct nvkm_object *, bool);
-
-int  nvkm_therm_cstate(struct nvkm_therm *, int, int);
-
-extern struct nvkm_oclass nv40_therm_oclass;
-extern struct nvkm_oclass nv50_therm_oclass;
-extern struct nvkm_oclass g84_therm_oclass;
-extern struct nvkm_oclass gt215_therm_oclass;
-extern struct nvkm_oclass gf110_therm_oclass;
-extern struct nvkm_oclass gm107_therm_oclass;
+int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 4ad5508..62ed088 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -9,53 +9,58 @@
 };
 
 static inline void
-nvkm_alarm_init(struct nvkm_alarm *alarm,
-		   void (*func)(struct nvkm_alarm *))
+nvkm_alarm_init(struct nvkm_alarm *alarm, void (*func)(struct nvkm_alarm *))
 {
 	INIT_LIST_HEAD(&alarm->head);
 	alarm->func = func;
 }
 
-bool nvkm_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
-bool nvkm_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
-void nvkm_timer_alarm(void *, u32 nsec, struct nvkm_alarm *);
-void nvkm_timer_alarm_cancel(void *, struct nvkm_alarm *);
-
-#define NV_WAIT_DEFAULT 2000000000ULL
-#define nv_wait(o,a,m,v)                                                       \
-	nvkm_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_ne(o,a,m,v)                                                    \
-	nvkm_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
-#define nv_wait_cb(o,c,d)                                                      \
-	nvkm_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
-
 struct nvkm_timer {
-	struct nvkm_subdev base;
-	u64  (*read)(struct nvkm_timer *);
-	void (*alarm)(struct nvkm_timer *, u64 time, struct nvkm_alarm *);
-	void (*alarm_cancel)(struct nvkm_timer *, struct nvkm_alarm *);
+	const struct nvkm_timer_func *func;
+	struct nvkm_subdev subdev;
+
+	struct list_head alarms;
+	spinlock_t lock;
 };
 
-static inline struct nvkm_timer *
-nvkm_timer(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_TIMER);
-}
+u64 nvkm_timer_read(struct nvkm_timer *);
+void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
+void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
 
-#define nvkm_timer_create(p,e,o,d)                                          \
-	nvkm_subdev_create_((p), (e), (o), 0, "PTIMER", "timer",            \
-			       sizeof(**d), (void **)d)
-#define nvkm_timer_destroy(p)                                               \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_timer_init(p)                                                  \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_timer_fini(p,s)                                                \
-	nvkm_subdev_fini(&(p)->base, (s))
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ *
+ * NVKM_DELAY can be passed for 'cond' to disable the timeout warning,
+ * which is useful for unconditional delay loops.
+ */
+#define NVKM_DELAY _warn = false;
+#define nvkm_nsec(d,n,cond...) ({                                              \
+	struct nvkm_device *_device = (d);                                     \
+	struct nvkm_timer *_tmr = _device->timer;                              \
+	u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr);                      \
+	s64 _taken = 0;                                                        \
+	bool _warn = true;                                                     \
+                                                                               \
+	do {                                                                   \
+		cond                                                           \
+	} while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs);    \
+                                                                               \
+	if (_taken >= _nsecs) {                                                \
+		if (_warn) {                                                   \
+			dev_warn(_device->dev, "timeout at %s:%d/%s()!\n",     \
+				 __FILE__, __LINE__, __func__);                \
+		}                                                              \
+		_taken = -ETIMEDOUT;                                           \
+	}                                                                      \
+	_taken;                                                                \
+})
+#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
+#define nvkm_msec(d,m,cond...) nvkm_usec((d), (m) * 1000, ##cond)
 
-int nvkm_timer_create_(struct nvkm_object *, struct nvkm_engine *,
-			  struct nvkm_oclass *, int size, void **);
-
-extern struct nvkm_oclass nv04_timer_oclass;
-extern struct nvkm_oclass gk20a_timer_oclass;
+int nv04_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv40_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int nv41_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
+int gk20a_timer_new(struct nvkm_device *, int, struct nvkm_timer **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index fee09ad..ce5636f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,30 +1,28 @@
 #ifndef __NOUVEAU_VGA_H__
 #define __NOUVEAU_VGA_H__
-
-#include <core/os.h>
+#include <core/subdev.h>
 
 /* access to various legacy io ports */
-u8   nv_rdport(void *obj, int head, u16 port);
-void nv_wrport(void *obj, int head, u16 port, u8 value);
+u8   nvkm_rdport(struct nvkm_device *, int head, u16 port);
+void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value);
 
 /* VGA Sequencer */
-u8   nv_rdvgas(void *obj, int head, u8 index);
-void nv_wrvgas(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgas(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA Graphics */
-u8   nv_rdvgag(void *obj, int head, u8 index);
-void nv_wrvgag(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgag(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA CRTC */
-u8   nv_rdvgac(void *obj, int head, u8 index);
-void nv_wrvgac(void *obj, int head, u8 index, u8 value);
+u8   nvkm_rdvgac(struct nvkm_device *, int head, u8 index);
+void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value);
 
 /* VGA indexed port access dispatcher */
-u8   nv_rdvgai(void *obj, int head, u16 port, u8 index);
-void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
+u8   nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index);
+void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value);
 
-bool nv_lockvgac(void *obj, bool lock);
-u8   nv_rdvgaowner(void *obj);
-void nv_wrvgaowner(void *obj, u8);
-
+bool nvkm_lockvgac(struct nvkm_device *, bool lock);
+u8   nvkm_rdvgaowner(struct nvkm_device *);
+void nvkm_wrvgaowner(struct nvkm_device *, u8);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index e3d7243..5c8a3f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -2,19 +2,9 @@
 #define __NVKM_VOLT_H__
 #include <core/subdev.h>
 
-struct nvkm_voltage {
-	u32 uv;
-	u8  id;
-};
-
 struct nvkm_volt {
-	struct nvkm_subdev base;
-
-	int (*vid_get)(struct nvkm_volt *);
-	int (*get)(struct nvkm_volt *);
-	int (*vid_set)(struct nvkm_volt *, u8 vid);
-	int (*set)(struct nvkm_volt *, u32 uv);
-	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+	const struct nvkm_volt_func *func;
+	struct nvkm_subdev subdev;
 
 	u8 vid_mask;
 	u8 vid_nr;
@@ -24,35 +14,9 @@
 	} vid[256];
 };
 
-static inline struct nvkm_volt *
-nvkm_volt(void *obj)
-{
-	return (void *)nvkm_subdev(obj, NVDEV_SUBDEV_VOLT);
-}
+int nvkm_volt_get(struct nvkm_volt *);
+int nvkm_volt_set_id(struct nvkm_volt *, u8 id, int condition);
 
-#define nvkm_volt_create(p, e, o, d)                                        \
-	nvkm_volt_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_volt_destroy(p) ({                                             \
-	struct nvkm_volt *v = (p);                                          \
-	_nvkm_volt_dtor(nv_object(v));                                      \
-})
-#define nvkm_volt_init(p) ({                                                \
-	struct nvkm_volt *v = (p);                                          \
-	_nvkm_volt_init(nv_object(v));                                      \
-})
-#define nvkm_volt_fini(p,s)                                                 \
-	nvkm_subdev_fini((p), (s))
-
-int  nvkm_volt_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-void _nvkm_volt_dtor(struct nvkm_object *);
-int  _nvkm_volt_init(struct nvkm_object *);
-#define _nvkm_volt_fini _nvkm_subdev_fini
-
-extern struct nvkm_oclass nv40_volt_oclass;
-extern struct nvkm_oclass gk20a_volt_oclass;
-
-int nvkm_voltgpio_init(struct nvkm_volt *);
-int nvkm_voltgpio_get(struct nvkm_volt *);
-int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index d8b0891..d336c22 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -51,7 +51,7 @@
 			 * device (ie. the one that belongs to the fd it
 			 * opened)
 			 */
-			if (nvif_device_init(&cli->base.base, NULL,
+			if (nvif_device_init(&cli->base.object,
 					     NOUVEAU_ABI16_DEVICE, NV_DEVICE,
 					     &args, sizeof(args),
 					     &abi16->device) == 0)
@@ -69,28 +69,28 @@
 int
 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+	struct nouveau_cli *cli = (void *)abi16->device.object.client;
 	mutex_unlock(&cli->mutex);
 	return ret;
 }
 
-u16
+s32
 nouveau_abi16_swclass(struct nouveau_drm *drm)
 {
 	switch (drm->device.info.family) {
 	case NV_DEVICE_INFO_V0_TNT:
-		return 0x006e;
+		return NVIF_IOCTL_NEW_V0_SW_NV04;
 	case NV_DEVICE_INFO_V0_CELSIUS:
 	case NV_DEVICE_INFO_V0_KELVIN:
 	case NV_DEVICE_INFO_V0_RANKINE:
 	case NV_DEVICE_INFO_V0_CURIE:
-		return 0x016e;
+		return NVIF_IOCTL_NEW_V0_SW_NV10;
 	case NV_DEVICE_INFO_V0_TESLA:
-		return 0x506e;
+		return NVIF_IOCTL_NEW_V0_SW_NV50;
 	case NV_DEVICE_INFO_V0_FERMI:
 	case NV_DEVICE_INFO_V0_KEPLER:
 	case NV_DEVICE_INFO_V0_MAXWELL:
-		return 0x906e;
+		return NVIF_IOCTL_NEW_V0_SW_GF100;
 	}
 
 	return 0x0000;
@@ -100,6 +100,7 @@
 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
 			struct nouveau_abi16_ntfy *ntfy)
 {
+	nvif_object_fini(&ntfy->object);
 	nvkm_mm_free(&chan->heap, &ntfy->node);
 	list_del(&ntfy->head);
 	kfree(ntfy);
@@ -132,7 +133,8 @@
 
 	/* destroy channel object, all children will be killed too */
 	if (chan->chan) {
-		abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
+		abi16->handles &= ~(1ULL << (chan->chan->user.handle & 0xffff));
+		nouveau_channel_idle(chan->chan);
 		nouveau_channel_del(&chan->chan);
 	}
 
@@ -143,7 +145,7 @@
 void
 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
+	struct nouveau_cli *cli = (void *)abi16->device.object.client;
 	struct nouveau_abi16_chan *chan, *temp;
 
 	/* cleanup channels */
@@ -164,7 +166,6 @@
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvif_device *device = &drm->device;
-	struct nvkm_timer *ptimer = nvxx_timer(device);
 	struct nvkm_gr *gr = nvxx_gr(device);
 	struct drm_nouveau_getparam *getparam = data;
 
@@ -173,19 +174,19 @@
 		getparam->value = device->info.chipset;
 		break;
 	case NOUVEAU_GETPARAM_PCI_VENDOR:
-		if (nv_device_is_pci(nvxx_device(device)))
+		if (nvxx_device(device)->func->pci)
 			getparam->value = dev->pdev->vendor;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_PCI_DEVICE:
-		if (nv_device_is_pci(nvxx_device(device)))
+		if (nvxx_device(device)->func->pci)
 			getparam->value = dev->pdev->device;
 		else
 			getparam->value = 0;
 		break;
 	case NOUVEAU_GETPARAM_BUS_TYPE:
-		if (!nv_device_is_pci(nvxx_device(device)))
+		if (!nvxx_device(device)->func->pci)
 			getparam->value = 3;
 		else
 		if (drm_pci_device_is_agp(dev))
@@ -206,7 +207,7 @@
 		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
-		getparam->value = ptimer->read(ptimer);
+		getparam->value = nvif_device_time(device);
 		break;
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
@@ -215,10 +216,10 @@
 		getparam->value = 1;
 		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
-		getparam->value = gr->units ? gr->units(gr) : 0;
+		getparam->value = nvkm_gr_units(gr);
 		break;
 	default:
-		NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
+		NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
 		return -EINVAL;
 	}
 
@@ -337,7 +338,7 @@
 	struct nouveau_abi16_chan *chan;
 
 	list_for_each_entry(chan, &abi16->channels, head) {
-		if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
+		if (chan->chan->user.handle == NOUVEAU_ABI16_CHAN(channel))
 			return chan;
 	}
 
@@ -365,40 +366,91 @@
 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_new_v0 new;
-	} args = {
-		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
-		.ioctl.type = NVIF_IOCTL_V0_NEW,
-		.ioctl.path_nr = 3,
-		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
-		.new.route = NVDRM_OBJECT_ABI16,
-		.new.handle = init->handle,
-		.new.oclass = init->class,
-	};
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
-	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_abi16_chan *chan;
+	struct nouveau_abi16_ntfy *ntfy;
 	struct nvif_client *client;
-	int ret;
+	struct nvif_sclass *sclass;
+	s32 oclass = 0;
+	int ret, i;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
 
 	if (init->handle == ~0)
 		return nouveau_abi16_put(abi16, -EINVAL);
-	client = nvif_client(nvif_object(&abi16->device));
+	client = abi16->device.object.client;
 
-	/* compatibility with userspace that assumes 506e for all chipsets */
-	if (init->class == 0x506e) {
-		init->class = nouveau_abi16_swclass(drm);
-		if (init->class == 0x906e)
-			return nouveau_abi16_put(abi16, 0);
+	chan = nouveau_abi16_chan(abi16, init->channel);
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
+	if (ret < 0)
+		return nouveau_abi16_put(abi16, ret);
+
+	if ((init->class & 0x00ff) == 0x006e) {
+		/* nvsw: compatibility with older 0x*6e class identifier */
+		for (i = 0; !oclass && i < ret; i++) {
+			switch (sclass[i].oclass) {
+			case NVIF_IOCTL_NEW_V0_SW_NV04:
+			case NVIF_IOCTL_NEW_V0_SW_NV10:
+			case NVIF_IOCTL_NEW_V0_SW_NV50:
+			case NVIF_IOCTL_NEW_V0_SW_GF100:
+				oclass = sclass[i].oclass;
+				break;
+			default:
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b1) {
+		/* msvld: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
+		/* mspdec: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else
+	if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
+		/* msppp: compatibility with incorrect version exposure */
+		for (i = 0; i < ret; i++) {
+			if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
+				oclass = sclass[i].oclass;
+				break;
+			}
+		}
+	} else {
+		oclass = init->class;
 	}
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
+	nvif_object_sclass_put(&sclass);
+	if (!oclass)
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+	if (!ntfy)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	list_add(&ntfy->head, &chan->notifiers);
+
+	client->route = NVDRM_OBJECT_ABI16;
+	ret = nvif_object_init(&chan->chan->user, init->handle, oclass,
+			       NULL, 0, &ntfy->object);
+	client->route = NVDRM_OBJECT_NVIF;
+
+	if (ret)
+		nouveau_abi16_ntfy_fini(chan, ntfy);
 	return nouveau_abi16_put(abi16, ret);
 }
 
@@ -406,27 +458,13 @@
 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_notifierobj_alloc *info = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_new_v0 new;
-		struct nv_dma_v0 ctxdma;
-	} args = {
-		.ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
-		.ioctl.type = NVIF_IOCTL_V0_NEW,
-		.ioctl.path_nr = 3,
-		.ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
-		.new.route = NVDRM_OBJECT_ABI16,
-		.new.handle = info->handle,
-		.new.oclass = NV_DMA_IN_MEMORY,
-	};
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 	struct nouveau_abi16_chan *chan;
 	struct nouveau_abi16_ntfy *ntfy;
 	struct nvif_device *device = &abi16->device;
 	struct nvif_client *client;
+	struct nv_dma_v0 args = {};
 	int ret;
 
 	if (unlikely(!abi16))
@@ -435,7 +473,7 @@
 	/* completely unnecessary for these chipsets... */
 	if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
 		return nouveau_abi16_put(abi16, -EINVAL);
-	client = nvif_client(nvif_object(&abi16->device));
+	client = abi16->device.object.client;
 
 	chan = nouveau_abi16_chan(abi16, info->channel);
 	if (!chan)
@@ -446,41 +484,43 @@
 		return nouveau_abi16_put(abi16, -ENOMEM);
 
 	list_add(&ntfy->head, &chan->notifiers);
-	ntfy->handle = info->handle;
 
 	ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
 			   &ntfy->node);
 	if (ret)
 		goto done;
 
-	args.ctxdma.start = ntfy->node->offset;
-	args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
+	args.start = ntfy->node->offset;
+	args.limit = ntfy->node->offset + ntfy->node->length - 1;
 	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
-		args.ctxdma.start += chan->ntfy_vma.offset;
-		args.ctxdma.limit += chan->ntfy_vma.offset;
+		args.target = NV_DMA_V0_TARGET_VM;
+		args.access = NV_DMA_V0_ACCESS_VM;
+		args.start += chan->ntfy_vma.offset;
+		args.limit += chan->ntfy_vma.offset;
 	} else
-	if (drm->agp.stat == ENABLED) {
-		args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
-		args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
-		args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
-		client->super = true;
+	if (drm->agp.bridge) {
+		args.target = NV_DMA_V0_TARGET_AGP;
+		args.access = NV_DMA_V0_ACCESS_RDWR;
+		args.start += drm->agp.base + chan->ntfy->bo.offset;
+		args.limit += drm->agp.base + chan->ntfy->bo.offset;
 	} else {
-		args.ctxdma.target = NV_DMA_V0_TARGET_VM;
-		args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
-		args.ctxdma.start += chan->ntfy->bo.offset;
-		args.ctxdma.limit += chan->ntfy->bo.offset;
+		args.target = NV_DMA_V0_TARGET_VM;
+		args.access = NV_DMA_V0_ACCESS_RDWR;
+		args.start += chan->ntfy->bo.offset;
+		args.limit += chan->ntfy->bo.offset;
 	}
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
+	client->route = NVDRM_OBJECT_ABI16;
+	client->super = true;
+	ret = nvif_object_init(&chan->chan->user, info->handle,
+			       NV_DMA_IN_MEMORY, &args, sizeof(args),
+			       &ntfy->object);
 	client->super = false;
+	client->route = NVDRM_OBJECT_NVIF;
 	if (ret)
 		goto done;
 
 	info->offset = ntfy->node->offset;
-
 done:
 	if (ret)
 		nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -491,47 +531,28 @@
 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
 {
 	struct drm_nouveau_gpuobj_free *fini = data;
-	struct {
-		struct nvif_ioctl_v0 ioctl;
-		struct nvif_ioctl_del del;
-	} args = {
-		.ioctl.owner = NVDRM_OBJECT_ABI16,
-		.ioctl.type = NVIF_IOCTL_V0_DEL,
-		.ioctl.path_nr = 4,
-		.ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
-		.ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
-		.ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
-		.ioctl.path[0] = fini->handle,
-	};
 	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
 	struct nouveau_abi16_chan *chan;
 	struct nouveau_abi16_ntfy *ntfy;
-	struct nvif_client *client;
-	int ret;
+	int ret = -ENOENT;
 
 	if (unlikely(!abi16))
 		return -ENOMEM;
 
 	chan = nouveau_abi16_chan(abi16, fini->channel);
 	if (!chan)
-		return nouveau_abi16_put(abi16, -ENOENT);
-	client = nvif_client(nvif_object(&abi16->device));
+		return nouveau_abi16_put(abi16, -EINVAL);
 
 	/* synchronize with the user channel and destroy the gpu object */
 	nouveau_channel_idle(chan->chan);
 
-	ret = nvif_client_ioctl(client, &args, sizeof(args));
-	if (ret)
-		return nouveau_abi16_put(abi16, ret);
-
-	/* cleanup extra state if this object was a notifier */
 	list_for_each_entry(ntfy, &chan->notifiers, head) {
-		if (ntfy->handle == fini->handle) {
-			nvkm_mm_free(&chan->heap, &ntfy->node);
-			list_del(&ntfy->head);
+		if (ntfy->object.handle == fini->handle) {
+			nouveau_abi16_ntfy_fini(chan, ntfy);
+			ret = 0;
 			break;
 		}
 	}
 
-	return nouveau_abi16_put(abi16, 0);
+	return nouveau_abi16_put(abi16, ret);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 86eb1ca..6584557 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -13,9 +13,9 @@
 int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
 
 struct nouveau_abi16_ntfy {
+	struct nvif_object object;
 	struct list_head head;
 	struct nvkm_mm_node *node;
-	u32 handle;
 };
 
 struct nouveau_abi16_chan {
@@ -37,7 +37,7 @@
 struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
 int  nouveau_abi16_put(struct nouveau_abi16 *, int);
 void nouveau_abi16_fini(struct nouveau_abi16 *);
-u16  nouveau_abi16_swclass(struct nouveau_drm *);
+s32  nouveau_abi16_swclass(struct nouveau_drm *);
 
 #define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
 #define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 6224246..df2d981 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -372,12 +372,12 @@
 	return len;
 }
 
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+bool nouveau_acpi_rom_supported(struct device *dev)
 {
 	acpi_status status;
 	acpi_handle dhandle, rom_handle;
 
-	dhandle = ACPI_HANDLE(&pdev->dev);
+	dhandle = ACPI_HANDLE(dev);
 	if (!dhandle)
 		return false;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 74acf0f..2f03653 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,7 +10,7 @@
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
 int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
 static inline bool nouveau_is_optimus(void) { return false; };
@@ -18,7 +18,7 @@
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
 static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
 static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
deleted file mode 100644
index 0b59709..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ /dev/null
@@ -1,195 +0,0 @@
-#include <linux/module.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_agp.h"
-#include "nouveau_reg.h"
-
-#if __OS_HAS_AGP
-MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
-static int nouveau_agpmode = -1;
-module_param_named(agpmode, nouveau_agpmode, int, 0400);
-
-struct nouveau_agpmode_quirk {
-	u16 hostbridge_vendor;
-	u16 hostbridge_device;
-	u16 chip_vendor;
-	u16 chip_device;
-	int mode;
-};
-
-static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
-	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra, max agpmode 2, fdo #20341 */
-	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
-
-	{},
-};
-
-static unsigned long
-get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
-{
-	struct nvif_device *device = &drm->device;
-	struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
-	int agpmode = nouveau_agpmode;
-	unsigned long mode = info->mode;
-
-	/*
-	 * FW seems to be broken on nv18, it makes the card lock up
-	 * randomly.
-	 */
-	if (device->info.chipset == 0x18)
-		mode &= ~PCI_AGP_COMMAND_FW;
-
-	/*
-	 * Go through the quirks list and adjust the agpmode accordingly.
-	 */
-	while (agpmode == -1 && quirk->hostbridge_vendor) {
-		if (info->id_vendor == quirk->hostbridge_vendor &&
-		    info->id_device == quirk->hostbridge_device &&
-		    nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
-		    nvxx_device(device)->pdev->device == quirk->chip_device) {
-			agpmode = quirk->mode;
-			NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
-				agpmode);
-			break;
-		}
-		++quirk;
-	}
-
-	/*
-	 * AGP mode set in the command line.
-	 */
-	if (agpmode > 0) {
-		bool agpv3 = mode & 0x8;
-		int rate = agpv3 ? agpmode / 4 : agpmode;
-
-		mode = (mode & ~0x7) | (rate & 0x7);
-	}
-
-	return mode;
-}
-
-static bool
-nouveau_agp_enabled(struct nouveau_drm *drm)
-{
-	struct drm_device *dev = drm->dev;
-
-	if (!dev->pdev || !drm_pci_device_is_agp(dev) || !dev->agp)
-		return false;
-
-	if (drm->agp.stat == UNKNOWN) {
-		if (!nouveau_agpmode)
-			return false;
-#ifdef __powerpc__
-		/* Disable AGP by default on all PowerPC machines for
-		 * now -- At least some UniNorth-2 AGP bridges are
-		 * known to be broken: DMA from the host to the card
-		 * works just fine, but writeback from the card to the
-		 * host goes straight to memory untranslated bypassing
-		 * the GATT somehow, making them quite painful to deal
-		 * with...
-		 */
-		if (nouveau_agpmode == -1)
-			return false;
-#endif
-		return true;
-	}
-
-	return (drm->agp.stat == ENABLED);
-}
-#endif
-
-void
-nouveau_agp_reset(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct nvif_device *device = &drm->device;
-	struct drm_device *dev = drm->dev;
-	u32 save[2];
-	int ret;
-
-	if (!nouveau_agp_enabled(drm))
-		return;
-
-	/* First of all, disable fast writes, otherwise if it's
-	 * already enabled in the AGP bridge and we disable the card's
-	 * AGP controller we might be locking ourselves out of it. */
-	if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
-	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
-		struct drm_agp_info info;
-		struct drm_agp_mode mode;
-
-		ret = drm_agp_info(dev, &info);
-		if (ret)
-			return;
-
-		mode.mode  = get_agp_mode(drm, &info);
-		mode.mode &= ~PCI_AGP_COMMAND_FW;
-
-		ret = drm_agp_enable(dev, mode);
-		if (ret)
-			return;
-	}
-
-
-	/* clear busmaster bit, and disable AGP */
-	save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
-	nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
-
-	/* reset PGRAPH, PFIFO and PTIMER */
-	save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
-	nvif_mask(device, 0x000200, 0x00011100, save[1]);
-
-	/* and restore bustmaster bit (gives effect of resetting AGP) */
-	nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
-#endif
-}
-
-void
-nouveau_agp_init(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct drm_device *dev = drm->dev;
-	struct drm_agp_info info;
-	struct drm_agp_mode mode;
-	int ret;
-
-	if (!nouveau_agp_enabled(drm))
-		return;
-	drm->agp.stat = DISABLE;
-
-	ret = drm_agp_acquire(dev);
-	if (ret) {
-		NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
-		return;
-	}
-
-	ret = drm_agp_info(dev, &info);
-	if (ret) {
-		NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
-		return;
-	}
-
-	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = get_agp_mode(drm, &info);
-
-	ret = drm_agp_enable(dev, mode);
-	if (ret) {
-		NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
-		return;
-	}
-
-	drm->agp.stat = ENABLED;
-	drm->agp.base = info.aperture_base;
-	drm->agp.size = info.aperture_size;
-#endif
-}
-
-void
-nouveau_agp_fini(struct nouveau_drm *drm)
-{
-#if __OS_HAS_AGP
-	struct drm_device *dev = drm->dev;
-	if (dev->agp && dev->agp->acquired)
-		drm_agp_release(dev);
-#endif
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
deleted file mode 100644
index b55c086..0000000
--- a/drivers/gpu/drm/nouveau/nouveau_agp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __NOUVEAU_AGP_H__
-#define __NOUVEAU_AGP_H__
-
-struct nouveau_drm;
-
-void nouveau_agp_reset(struct nouveau_drm *);
-void nouveau_agp_init(struct nouveau_drm *);
-void nouveau_agp_fini(struct nouveau_drm *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index e566c5b..89eb460 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,7 +40,7 @@
 nv40_get_intensity(struct backlight_device *bd)
 {
 	struct nouveau_drm *drm = bl_get_data(bd);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
 				   NV40_PMC_BACKLIGHT_MASK) >> 16;
 
@@ -51,7 +51,7 @@
 nv40_set_intensity(struct backlight_device *bd)
 {
 	struct nouveau_drm *drm = bl_get_data(bd);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int val = bd->props.brightness;
 	int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
 
@@ -71,7 +71,7 @@
 nv40_backlight_init(struct drm_connector *connector)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct backlight_properties props;
 	struct backlight_device *bd;
 
@@ -97,7 +97,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val;
@@ -112,7 +112,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div = 1025;
 	u32 val = (bd->props.brightness * div) / 100;
@@ -133,7 +133,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div, val;
 
@@ -151,7 +151,7 @@
 {
 	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	int or = nv_encoder->or;
 	u32 div, val;
 
@@ -177,7 +177,7 @@
 nv50_backlight_init(struct drm_connector *connector)
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nouveau_encoder *nv_encoder;
 	struct backlight_properties props;
 	struct backlight_device *bd;
@@ -193,9 +193,9 @@
 	if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
 		return 0;
 
-	if (device->info.chipset <= 0xa0 ||
-	    device->info.chipset == 0xaa ||
-	    device->info.chipset == 0xac)
+	if (drm->device.info.chipset <= 0xa0 ||
+	    drm->device.info.chipset == 0xaa ||
+	    drm->device.info.chipset == 0xac)
 		ops = &nv50_bl_ops;
 	else
 		ops = &nva3_bl_ops;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0190b69..4dca65a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -215,7 +215,7 @@
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nvbios *bios = &drm->vbios;
 	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
 	uint32_t sel_clk_binding, sel_clk;
@@ -318,7 +318,8 @@
 static int
 get_fp_strap(struct drm_device *dev, struct nvbios *bios)
 {
-	struct nvif_device *device = &nouveau_drm(dev)->device;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_object *device = &drm->device.object;
 
 	/*
 	 * The fp strap is normally dictated by the "User Strap" in
@@ -332,7 +333,7 @@
 	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
 		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
 
-	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
+	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 		return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
 	else
 		return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
@@ -634,7 +635,7 @@
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	struct nvbios *bios = &drm->vbios;
 	int cv = bios->chip_version;
 	uint16_t clktable = 0, scriptptr;
@@ -1481,22 +1482,20 @@
 			entry->dpconf.link_bw = 540000;
 			break;
 		}
-		entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
-		if (dcb->version < 0x41) {
-			switch (entry->dpconf.link_nr) {
-			case 0xf:
-				entry->dpconf.link_nr = 4;
-				break;
-			case 0x3:
-				entry->dpconf.link_nr = 2;
-				break;
-			default:
-				entry->dpconf.link_nr = 1;
-				break;
-			}
+		switch ((conf & 0x0f000000) >> 24) {
+		case 0xf:
+		case 0x4:
+			entry->dpconf.link_nr = 4;
+			break;
+		case 0x3:
+		case 0x2:
+			entry->dpconf.link_nr = 2;
+			break;
+		default:
+			entry->dpconf.link_nr = 1;
+			break;
 		}
 		link = entry->dpconf.sor.link;
-		entry->i2c_index += NV_I2C_AUX(0);
 		break;
 	case DCB_OUTPUT_TMDS:
 		if (dcb->version >= 0x40) {
@@ -1892,11 +1891,12 @@
 	idx = -1;
 	while ((conn = olddcb_conn(dev, ++idx))) {
 		if (conn[0] != 0xff) {
-			NV_INFO(drm, "DCB conn %02d: ", idx);
 			if (olddcb_conntab(dev)[3] < 4)
-				pr_cont("%04x\n", ROM16(conn[0]));
+				NV_INFO(drm, "DCB conn %02d: %04x\n",
+					idx, ROM16(conn[0]));
 			else
-				pr_cont("%08x\n", ROM32(conn[0]));
+				NV_INFO(drm, "DCB conn %02d: %08x\n",
+					idx, ROM32(conn[0]));
 		}
 	}
 	dcb_fake_connectors(bios);
@@ -1915,7 +1915,7 @@
 	 */
 
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvif_device *device = &drm->device;
+	struct nvif_object *device = &drm->device.object;
 	uint8_t bytes_to_write;
 	uint16_t hwsq_entry_offset;
 	int i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6edcce1..15057b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -48,24 +48,19 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	int i = reg - drm->tile.reg;
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
-	struct nvkm_fb_tile *tile = &pfb->tile.region[i];
-	struct nvkm_engine *engine;
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_fb_tile *tile = &fb->tile.region[i];
 
 	nouveau_fence_unref(&reg->fence);
 
 	if (tile->pitch)
-		pfb->tile.fini(pfb, i, tile);
+		nvkm_fb_tile_fini(fb, i, tile);
 
 	if (pitch)
-		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
+		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
 
-	pfb->tile.prog(pfb, i, tile);
-
-	if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR)))
-		engine->tile_prog(engine, i);
-	if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG)))
-		engine->tile_prog(engine, i);
+	nvkm_fb_tile_prog(fb, i, tile);
 }
 
 static struct nouveau_drm_tile *
@@ -105,18 +100,18 @@
 		   u32 size, u32 pitch, u32 flags)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 	struct nouveau_drm_tile *tile, *found = NULL;
 	int i;
 
-	for (i = 0; i < pfb->tile.regions; i++) {
+	for (i = 0; i < fb->tile.regions; i++) {
 		tile = nv10_bo_get_tile_region(dev, i);
 
 		if (pitch && !found) {
 			found = tile;
 			continue;
 
-		} else if (tile && pfb->tile.region[i].pitch) {
+		} else if (tile && fb->tile.region[i].pitch) {
 			/* Kill an unused tile region. */
 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
 		}
@@ -214,7 +209,7 @@
 	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 
-	if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
+	if (!nvxx_device(&drm->device)->func->cpu_coherent)
 		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
 	nvbo->page_shift = 12;
@@ -471,8 +466,8 @@
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_device(nv_device_base(device),
-			ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+		dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+					   PAGE_SIZE, DMA_TO_DEVICE);
 }
 
 void
@@ -491,8 +486,8 @@
 		return;
 
 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
-		dma_sync_single_for_cpu(nv_device_base(device),
-			ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+		dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+					PAGE_SIZE, DMA_FROM_DEVICE);
 }
 
 int
@@ -581,10 +576,9 @@
 {
 #if __OS_HAS_AGP
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct drm_device *dev = drm->dev;
 
-	if (drm->agp.stat == ENABLED) {
-		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+	if (drm->agp.bridge) {
+		return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
 					 page_flags, dummy_read);
 	}
 #endif
@@ -636,12 +630,12 @@
 		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 			man->func = &nouveau_gart_manager;
 		else
-		if (drm->agp.stat != ENABLED)
+		if (!drm->agp.bridge)
 			man->func = &nv04_gart_manager;
 		else
 			man->func = &ttm_bo_manager_func;
 
-		if (drm->agp.stat == ENABLED) {
+		if (drm->agp.bridge) {
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->available_caching = TTM_PL_FLAG_UNCACHED |
 				TTM_PL_FLAG_WC;
@@ -1064,7 +1058,7 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_channel *chan = drm->ttm.chan;
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_fence *fence;
 	int ret;
 
@@ -1104,7 +1098,7 @@
 	static const struct {
 		const char *name;
 		int engine;
-		u32 oclass;
+		s32 oclass;
 		int (*exec)(struct nouveau_channel *,
 			    struct ttm_buffer_object *,
 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
@@ -1137,7 +1131,7 @@
 		if (chan == NULL)
 			continue;
 
-		ret = nvif_object_init(chan->object, NULL,
+		ret = nvif_object_init(&chan->user,
 				       mthd->oclass | (mthd->engine << 16),
 				       mthd->oclass, NULL, 0,
 				       &drm->ttm.copy);
@@ -1356,6 +1350,7 @@
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nvkm_mem *node = mem->mm_node;
 	int ret;
 
@@ -1372,10 +1367,10 @@
 		return 0;
 	case TTM_PL_TT:
 #if __OS_HAS_AGP
-		if (drm->agp.stat == ENABLED) {
+		if (drm->agp.bridge) {
 			mem->bus.offset = mem->start << PAGE_SHIFT;
 			mem->bus.base = drm->agp.base;
-			mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
+			mem->bus.is_iomem = !drm->agp.cma;
 		}
 #endif
 		if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
@@ -1384,16 +1379,20 @@
 		/* fallthrough, tiled memory */
 	case TTM_PL_VRAM:
 		mem->bus.offset = mem->start << PAGE_SHIFT;
-		mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+		mem->bus.base = device->func->resource_addr(device, 1);
 		mem->bus.is_iomem = true;
 		if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 			struct nvkm_bar *bar = nvxx_bar(&drm->device);
+			int page_shift = 12;
+			if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+				page_shift = node->page_shift;
 
-			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
-					&node->bar_vma);
+			ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
+					    &node->bar_vma);
 			if (ret)
 				return ret;
 
+			nvkm_vm_map(&node->bar_vma, node);
 			mem->bus.offset = node->bar_vma.offset;
 		}
 		break;
@@ -1406,14 +1405,13 @@
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
-	struct nouveau_drm *drm = nouveau_bdev(bdev);
-	struct nvkm_bar *bar = nvxx_bar(&drm->device);
 	struct nvkm_mem *node = mem->mm_node;
 
 	if (!node->bar_vma.node)
 		return;
 
-	bar->unmap(bar, &node->bar_vma);
+	nvkm_vm_unmap(&node->bar_vma);
+	nvkm_vm_put(&node->bar_vma);
 }
 
 static int
@@ -1421,8 +1419,8 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
-	struct nvif_device *device = &drm->device;
-	u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
 	int i, ret;
 
 	/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1488,18 +1486,18 @@
 	drm = nouveau_bdev(ttm->bdev);
 	device = nvxx_device(&drm->device);
 	dev = drm->dev;
-	pdev = nv_device_base(device);
+	pdev = device->dev;
 
 	/*
 	 * Objects matching this condition have been marked as force_coherent,
 	 * so use the DMA API for them.
 	 */
-	if (!nv_device_is_cpu_coherent(device) &&
+	if (!nvxx_device(&drm->device)->func->cpu_coherent &&
 	    ttm->caching_state == tt_uncached)
 		return ttm_dma_populate(ttm_dma, dev->dev);
 
 #if __OS_HAS_AGP
-	if (drm->agp.stat == ENABLED) {
+	if (drm->agp.bridge) {
 		return ttm_agp_tt_populate(ttm);
 	}
 #endif
@@ -1553,20 +1551,20 @@
 	drm = nouveau_bdev(ttm->bdev);
 	device = nvxx_device(&drm->device);
 	dev = drm->dev;
-	pdev = nv_device_base(device);
+	pdev = device->dev;
 
 	/*
 	 * Objects matching this condition have been marked as force_coherent,
 	 * so use the DMA API for them.
 	 */
-	if (!nv_device_is_cpu_coherent(device) &&
+	if (!nvxx_device(&drm->device)->func->cpu_coherent &&
 	    ttm->caching_state == tt_uncached) {
 		ttm_dma_unpopulate(ttm_dma, dev->dev);
 		return;
 	}
 
 #if __OS_HAS_AGP
-	if (drm->agp.stat == ENABLED) {
+	if (drm->agp.bridge) {
 		ttm_agp_tt_unpopulate(ttm);
 		return;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 0589bab..ff5e59d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -24,6 +24,7 @@
 
 #include <nvif/os.h>
 #include <nvif/class.h>
+#include <nvif/ioctl.h>
 
 /*XXX*/
 #include <core/client.h>
@@ -42,20 +43,26 @@
 int
 nouveau_channel_idle(struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
-	struct nouveau_fence *fence = NULL;
-	int ret;
+	if (likely(chan && chan->fence)) {
+		struct nouveau_cli *cli = (void *)chan->user.client;
+		struct nouveau_fence *fence = NULL;
+		int ret;
 
-	ret = nouveau_fence_new(chan, false, &fence);
-	if (!ret) {
-		ret = nouveau_fence_wait(fence, false, false);
-		nouveau_fence_unref(&fence);
+		ret = nouveau_fence_new(chan, false, &fence);
+		if (!ret) {
+			ret = nouveau_fence_wait(fence, false, false);
+			nouveau_fence_unref(&fence);
+		}
+
+		if (ret) {
+			NV_PRINTK(err, cli, "failed to idle channel "
+					    "0x%08x [%s]\n",
+				  chan->user.handle,
+				  nvxx_client(&cli->base)->name);
+			return ret;
+		}
 	}
-
-	if (ret)
-		NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
-			  chan->object->handle, nvxx_client(&cli->base)->name);
-	return ret;
+	return 0;
 }
 
 void
@@ -63,21 +70,18 @@
 {
 	struct nouveau_channel *chan = *pchan;
 	if (chan) {
-		if (chan->fence) {
-			nouveau_channel_idle(chan);
+		if (chan->fence)
 			nouveau_fence(chan->drm)->context_del(chan);
-		}
 		nvif_object_fini(&chan->nvsw);
 		nvif_object_fini(&chan->gart);
 		nvif_object_fini(&chan->vram);
-		nvif_object_ref(NULL, &chan->object);
+		nvif_object_fini(&chan->user);
 		nvif_object_fini(&chan->push.ctxdma);
 		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
 		nouveau_bo_unmap(chan->push.buffer);
 		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
 			nouveau_bo_unpin(chan->push.buffer);
 		nouveau_bo_ref(NULL, &chan->push.buffer);
-		nvif_device_ref(NULL, &chan->device);
 		kfree(chan);
 	}
 	*pchan = NULL;
@@ -87,7 +91,7 @@
 nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 		     u32 handle, u32 size, struct nouveau_channel **pchan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)device->object.client;
 	struct nvkm_mmu *mmu = nvxx_mmu(device);
 	struct nv_dma_v0 args = {};
 	struct nouveau_channel *chan;
@@ -98,7 +102,7 @@
 	if (!chan)
 		return -ENOMEM;
 
-	nvif_device_ref(device, &chan->device);
+	chan->device = device;
 	chan->drm = drm;
 
 	/* allocate memory for dma push buffer */
@@ -146,7 +150,8 @@
 			 */
 			args.target = NV_DMA_V0_TARGET_PCI;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
-			args.start = nv_device_resource_start(nvxx_device(device), 1);
+			args.start = nvxx_device(device)->func->
+				resource_addr(nvxx_device(device), 1);
 			args.limit = args.start + device->info.ram_user - 1;
 		} else {
 			args.target = NV_DMA_V0_TARGET_VRAM;
@@ -155,7 +160,7 @@
 			args.limit = device->info.ram_user - 1;
 		}
 	} else {
-		if (chan->drm->agp.stat == ENABLED) {
+		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = chan->drm->agp.base;
@@ -169,7 +174,7 @@
 		}
 	}
 
-	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
+	ret = nvif_object_init(&device->object, NVDRM_PUSH |
 			       (handle & 0xffff), NV_DMA_FROM_MEMORY,
 			       &args, sizeof(args), &chan->push.ctxdma);
 	if (ret) {
@@ -193,8 +198,9 @@
 	const u16 *oclass = oclasses;
 	union {
 		struct nv50_channel_gpfifo_v0 nv50;
+		struct fermi_channel_gpfifo_v0 fermi;
 		struct kepler_channel_gpfifo_a_v0 kepler;
-	} args, *retn;
+	} args;
 	struct nouveau_channel *chan;
 	u32 size;
 	int ret;
@@ -210,26 +216,36 @@
 		if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 			args.kepler.version = 0;
 			args.kepler.engine  = engine;
-			args.kepler.pushbuf = chan->push.ctxdma.handle;
 			args.kepler.ilength = 0x02000;
 			args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
+			args.kepler.vm = 0;
 			size = sizeof(args.kepler);
+		} else
+		if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
+			args.fermi.version = 0;
+			args.fermi.ilength = 0x02000;
+			args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
+			args.fermi.vm = 0;
+			size = sizeof(args.fermi);
 		} else {
 			args.nv50.version = 0;
-			args.nv50.pushbuf = chan->push.ctxdma.handle;
 			args.nv50.ilength = 0x02000;
 			args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+			args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
+			args.nv50.vm = 0;
 			size = sizeof(args.nv50);
 		}
 
-		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
-				      &args, size, &chan->object);
+		ret = nvif_object_init(&device->object, handle, *oclass++,
+				       &args, size, &chan->user);
 		if (ret == 0) {
-			retn = chan->object->data;
-			if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
-				chan->chid = retn->kepler.chid;
+			if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+				chan->chid = args.kepler.chid;
 			else
-				chan->chid = retn->nv50.chid;
+			if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+				chan->chid = args.fermi.chid;
+			else
+				chan->chid = args.nv50.chid;
 			return ret;
 		}
 	} while (*oclass);
@@ -248,7 +264,7 @@
 					NV03_CHANNEL_DMA,
 					0 };
 	const u16 *oclass = oclasses;
-	struct nv03_channel_dma_v0 args, *retn;
+	struct nv03_channel_dma_v0 args;
 	struct nouveau_channel *chan;
 	int ret;
 
@@ -260,15 +276,14 @@
 
 	/* create channel object */
 	args.version = 0;
-	args.pushbuf = chan->push.ctxdma.handle;
+	args.pushbuf = nvif_handle(&chan->push.ctxdma);
 	args.offset = chan->push.vma.offset;
 
 	do {
-		ret = nvif_object_new(nvif_object(device), handle, *oclass++,
-				      &args, sizeof(args), &chan->object);
+		ret = nvif_object_init(&device->object, handle, *oclass++,
+				       &args, sizeof(args), &chan->user);
 		if (ret == 0) {
-			retn = chan->object->data;
-			chan->chid = retn->chid;
+			chan->chid = args.chid;
 			return ret;
 		}
 	} while (ret && *oclass);
@@ -281,13 +296,12 @@
 nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 {
 	struct nvif_device *device = chan->device;
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nvkm_mmu *mmu = nvxx_mmu(device);
-	struct nvkm_sw_chan *swch;
 	struct nv_dma_v0 args = {};
 	int ret, i;
 
-	nvif_object_map(chan->object);
+	nvif_object_map(&chan->user);
 
 	/* allocate dma objects to cover all allowed vram, and gart */
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -303,9 +317,8 @@
 			args.limit = device->info.ram_user - 1;
 		}
 
-		ret = nvif_object_init(chan->object, NULL, vram,
-				       NV_DMA_IN_MEMORY, &args,
-				       sizeof(args), &chan->vram);
+		ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
+				       &args, sizeof(args), &chan->vram);
 		if (ret)
 			return ret;
 
@@ -315,7 +328,7 @@
 			args.start = 0;
 			args.limit = cli->vm->mmu->limit - 1;
 		} else
-		if (chan->drm->agp.stat == ENABLED) {
+		if (chan->drm->agp.bridge) {
 			args.target = NV_DMA_V0_TARGET_AGP;
 			args.access = NV_DMA_V0_ACCESS_RDWR;
 			args.start = chan->drm->agp.base;
@@ -328,15 +341,14 @@
 			args.limit = mmu->limit - 1;
 		}
 
-		ret = nvif_object_init(chan->object, NULL, gart,
-				       NV_DMA_IN_MEMORY, &args,
-				       sizeof(args), &chan->gart);
+		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
+				       &args, sizeof(args), &chan->gart);
 		if (ret)
 			return ret;
 	}
 
 	/* initialise dma tracking parameters */
-	switch (chan->object->oclass & 0x00ff) {
+	switch (chan->user.oclass & 0x00ff) {
 	case 0x006b:
 	case 0x006e:
 		chan->user_put = 0x40;
@@ -368,15 +380,12 @@
 
 	/* allocate software object class (used for fences on <= nv05) */
 	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
-		ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
+		ret = nvif_object_init(&chan->user, 0x006e,
+				       NVIF_IOCTL_NEW_V0_SW_NV04,
 				       NULL, 0, &chan->nvsw);
 		if (ret)
 			return ret;
 
-		swch = (void *)nvxx_object(&chan->nvsw)->parent;
-		swch->flip = nouveau_flip_complete;
-		swch->flip_data = chan;
-
 		ret = RING_SPACE(chan, 2);
 		if (ret)
 			return ret;
@@ -395,7 +404,7 @@
 		    u32 handle, u32 arg0, u32 arg1,
 		    struct nouveau_channel **pchan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+	struct nouveau_cli *cli = (void *)device->object.client;
 	bool super;
 	int ret;
 
@@ -405,17 +414,17 @@
 
 	ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
 	if (ret) {
-		NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+		NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
 		ret = nouveau_channel_dma(drm, device, handle, pchan);
 		if (ret) {
-			NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
+			NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret);
 			goto done;
 		}
 	}
 
 	ret = nouveau_channel_init(*pchan, arg0, arg1);
 	if (ret) {
-		NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
+		NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
 		nouveau_channel_del(pchan);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 8b3640f..2ed3241 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -37,7 +37,7 @@
 	u32 user_get;
 	u32 user_put;
 
-	struct nvif_object *object;
+	struct nvif_object user;
 };
 
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 3162040..2e7cbe9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -125,9 +125,9 @@
 	 * is handled by the SOR itself, and not required for LVDS DDC.
 	 */
 	if (nv_connector->type == DCB_CONNECTOR_eDP) {
-		panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+		panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
 		if (panel == 0) {
-			gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
 			msleep(300);
 		}
 	}
@@ -148,7 +148,7 @@
 				break;
 		} else
 		if (nv_encoder->i2c) {
-			if (nv_probe_i2c(nv_encoder->i2c, 0x50))
+			if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
 				break;
 		}
 	}
@@ -157,7 +157,7 @@
 	 * state to avoid confusing the SOR for other output types.
 	 */
 	if (!nv_encoder && panel == 0)
-		gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+		nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
 
 	return nv_encoder;
 }
@@ -241,7 +241,7 @@
 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
 	struct nouveau_encoder *nv_encoder = NULL;
 	struct nouveau_encoder *nv_partner;
-	struct nvkm_i2c_port *i2c;
+	struct i2c_adapter *i2c;
 	int type;
 	int ret;
 	enum drm_connector_status conn_status = connector_status_disconnected;
@@ -259,7 +259,7 @@
 
 	nv_encoder = nouveau_connector_ddc_detect(connector);
 	if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
-		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+		nv_connector->edid = drm_get_edid(connector, i2c);
 		drm_mode_connector_update_edid_property(connector,
 							nv_connector->edid);
 		if (!nv_connector->edid) {
@@ -919,7 +919,7 @@
 	.force = nouveau_connector_force
 };
 
-static void
+static int
 nouveau_connector_dp_dpms(struct drm_connector *connector, int mode)
 {
 	struct nouveau_encoder *nv_encoder = NULL;
@@ -930,15 +930,15 @@
 	    nv_encoder->dcb->type == DCB_OUTPUT_DP) {
 		if (mode == DRM_MODE_DPMS_ON) {
 			u8 data = DP_SET_POWER_D0;
-			nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+			nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
 			usleep_range(1000, 2000);
 		} else {
 			u8 data = DP_SET_POWER_D3;
-			nv_wraux(nv_encoder->i2c, DP_SET_POWER, &data, 1);
+			nvkm_wraux(nv_encoder->aux, DP_SET_POWER, &data, 1);
 		}
 	}
 
-	drm_helper_connector_dpms(connector, mode);
+	return drm_helper_connector_dpms(connector, mode);
 }
 
 static const struct drm_connector_funcs
@@ -980,29 +980,29 @@
 }
 
 static ssize_t
-nouveau_connector_aux_xfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
 {
 	struct nouveau_connector *nv_connector =
-		container_of(aux, typeof(*nv_connector), aux);
+		container_of(obj, typeof(*nv_connector), aux);
 	struct nouveau_encoder *nv_encoder;
-	struct nvkm_i2c_port *port;
+	struct nvkm_i2c_aux *aux;
 	int ret;
 
 	nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
-	if (!nv_encoder || !(port = nv_encoder->i2c))
+	if (!nv_encoder || !(aux = nv_encoder->aux))
 		return -ENODEV;
 	if (WARN_ON(msg->size > 16))
 		return -E2BIG;
 	if (msg->size == 0)
 		return msg->size;
 
-	ret = nvkm_i2c(port)->acquire(port, 0);
+	ret = nvkm_i2c_aux_acquire(aux);
 	if (ret)
 		return ret;
 
-	ret = port->func->aux(port, false, msg->request, msg->address,
-			      msg->buffer, msg->size);
-	nvkm_i2c(port)->release(port);
+	ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
+				msg->buffer, msg->size);
+	nvkm_i2c_aux_release(aux);
 	if (ret >= 0) {
 		msg->reply = ret;
 		return msg->size;
@@ -1256,8 +1256,8 @@
 		break;
 	}
 
-	ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
-				true, NV04_DISP_NTFY_CONN,
+	ret = nvif_notify_init(&disp->disp, nouveau_connector_hotplug, true,
+			       NV04_DISP_NTFY_CONN,
 			       &(struct nvif_notify_conn_req_v0) {
 				.mask = NVIF_NOTIFY_CONN_V0_ANY,
 				.conn = index,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8670d90..cc6c228 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -185,7 +185,7 @@
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-		ret = nvif_notify_init(&disp->disp, NULL,
+		ret = nvif_notify_init(&disp->disp,
 				       nouveau_display_vblank_handler, false,
 				       NV04_DISP_NTFY_VBLANK,
 				       &(struct nvif_notify_head_req_v0) {
@@ -358,6 +358,7 @@
 nouveau_display_init(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_connector *connector;
 	int ret;
 
@@ -374,6 +375,8 @@
 		nvif_notify_get(&conn->hpd);
 	}
 
+	/* enable flip completion events */
+	nvif_notify_get(&drm->flip);
 	return ret;
 }
 
@@ -381,6 +384,7 @@
 nouveau_display_fini(struct drm_device *dev)
 {
 	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct drm_connector *connector;
 	int head;
 
@@ -388,6 +392,9 @@
 	for (head = 0; head < dev->mode_config.num_crtc; head++)
 		drm_vblank_off(dev, head);
 
+	/* disable flip completion events */
+	nvif_notify_put(&drm->flip);
+
 	/* disable hotplug interrupts */
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		struct nouveau_connector *conn = nouveau_connector(connector);
@@ -438,6 +445,7 @@
 nouveau_display_create(struct drm_device *dev)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nouveau_display *disp;
 	int ret;
 
@@ -450,7 +458,7 @@
 	drm_mode_create_dvi_i_properties(dev);
 
 	dev->mode_config.funcs = &nouveau_mode_config_funcs;
-	dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1);
+	dev->mode_config.fb_base = device->func->resource_addr(device, 1);
 
 	dev->mode_config.min_width = 0;
 	dev->mode_config.min_height = 0;
@@ -494,7 +502,7 @@
 		int i;
 
 		for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
-			ret = nvif_object_init(nvif_object(&drm->device), NULL,
+			ret = nvif_object_init(&drm->device.object,
 					       NVDRM_DISPLAY, oclass[i],
 					       NULL, 0, &disp->disp);
 		}
@@ -711,7 +719,7 @@
 	chan = drm->channel;
 	if (!chan)
 		return -ENODEV;
-	cli = (void *)nvif_client(&chan->device->base);
+	cli = (void *)chan->user.client;
 
 	s = kzalloc(sizeof(*s), GFP_KERNEL);
 	if (!s)
@@ -847,10 +855,10 @@
 }
 
 int
-nouveau_flip_complete(void *data)
+nouveau_flip_complete(struct nvif_notify *notify)
 {
-	struct nouveau_channel *chan = data;
-	struct nouveau_drm *drm = chan->drm;
+	struct nouveau_drm *drm = container_of(notify, typeof(*drm), flip);
+	struct nouveau_channel *chan = drm->channel;
 	struct nouveau_page_flip_state state;
 
 	if (!nouveau_finish_page_flip(chan, &state)) {
@@ -861,7 +869,7 @@
 		}
 	}
 
-	return 0;
+	return NVIF_NOTIFY_KEEP;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 6d9245a..d168c63 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -52,9 +52,9 @@
 {
 	uint64_t val;
 
-	val = nvif_rd32(chan, chan->user_get);
+	val = nvif_rd32(&chan->user, chan->user_get);
         if (chan->user_get_hi)
-                val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
+                val |= (uint64_t)nvif_rd32(&chan->user, chan->user_get_hi) << 32;
 
 	/* reset counter as long as GET is still advancing, this is
 	 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -82,7 +82,7 @@
 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
 	      int delta, int length)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nouveau_bo *pb = chan->push.buffer;
 	struct nvkm_vma *vma;
 	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
@@ -103,7 +103,7 @@
 	/* Flush writes. */
 	nouveau_bo_rd32(pb, 0);
 
-	nvif_wr32(chan, 0x8c, chan->dma.ib_put);
+	nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
 	chan->dma.ib_free--;
 }
 
@@ -113,7 +113,7 @@
 	uint32_t cnt = 0, prev_get = 0;
 
 	while (chan->dma.ib_free < count) {
-		uint32_t get = nvif_rd32(chan, 0x88);
+		uint32_t get = nvif_rd32(&chan->user, 0x88);
 		if (get != prev_get) {
 			prev_get = get;
 			cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8da0a27..aff3a9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@
 #define WRITE_PUT(val) do {                                                    \
 	mb();                                                   \
 	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
-	nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+	nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
 } while (0)
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index c3ef30b..e17e15e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -31,8 +31,7 @@
 #include "nouveau_crtc.h"
 
 static void
-nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_port *auxch,
-		     u8 *dpcd)
+nouveau_dp_probe_oui(struct drm_device *dev, struct nvkm_i2c_aux *aux, u8 *dpcd)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	u8 buf[3];
@@ -40,11 +39,11 @@
 	if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
 		return;
 
-	if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
+	if (!nvkm_rdaux(aux, DP_SINK_OUI, buf, 3))
 		NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
-	if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
+	if (!nvkm_rdaux(aux, DP_BRANCH_OUI, buf, 3))
 		NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
 			     buf[0], buf[1], buf[2]);
 
@@ -55,15 +54,15 @@
 {
 	struct drm_device *dev = nv_encoder->base.base.dev;
 	struct nouveau_drm *drm = nouveau_drm(dev);
-	struct nvkm_i2c_port *auxch;
+	struct nvkm_i2c_aux *aux;
 	u8 *dpcd = nv_encoder->dp.dpcd;
 	int ret;
 
-	auxch = nv_encoder->i2c;
-	if (!auxch)
+	aux = nv_encoder->aux;
+	if (!aux)
 		return -ENODEV;
 
-	ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
+	ret = nvkm_rdaux(aux, DP_DPCD_REV, dpcd, 8);
 	if (ret)
 		return ret;
 
@@ -84,6 +83,6 @@
 	NV_DEBUG(drm, "maximum: %dx%d\n",
 		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
 
-	nouveau_dp_probe_oui(dev, auxch, dpcd);
+	nouveau_dp_probe_oui(dev, aux, dpcd);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 477cbb1..ccefb64 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -32,15 +32,15 @@
 #include "drmP.h"
 #include "drm_crtc_helper.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
+#include <core/pci.h>
+#include <core/tegra.h>
 
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
-#include "nouveau_agp.h"
 #include "nouveau_vga.h"
 #include "nouveau_sysfs.h"
 #include "nouveau_hwmon.h"
@@ -105,14 +105,18 @@
 }
 
 static int
-nouveau_cli_create(u64 name, const char *sname,
+nouveau_cli_create(struct drm_device *dev, const char *sname,
 		   int size, void **pcli)
 {
 	struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+	int ret;
 	if (cli) {
-		int ret = nvif_client_init(NULL, NULL, sname, name,
-					   nouveau_config, nouveau_debug,
-					  &cli->base);
+		snprintf(cli->name, sizeof(cli->name), "%s", sname);
+		cli->dev = dev;
+
+		ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
+				       nouveau_config, nouveau_debug,
+				       &cli->base);
 		if (ret == 0) {
 			mutex_init(&cli->mutex);
 			usif_client_init(cli);
@@ -134,12 +138,17 @@
 static void
 nouveau_accel_fini(struct nouveau_drm *drm)
 {
-	nouveau_channel_del(&drm->channel);
+	nouveau_channel_idle(drm->channel);
 	nvif_object_fini(&drm->ntfy);
-	nvkm_gpuobj_ref(NULL, &drm->notify);
+	nvkm_gpuobj_del(&drm->notify);
+	nvif_notify_fini(&drm->flip);
 	nvif_object_fini(&drm->nvsw);
-	nouveau_channel_del(&drm->cechan);
+	nouveau_channel_del(&drm->channel);
+
+	nouveau_channel_idle(drm->cechan);
 	nvif_object_fini(&drm->ttm.copy);
+	nouveau_channel_del(&drm->cechan);
+
 	if (drm->fence)
 		nouveau_fence(drm)->dtor(drm);
 }
@@ -148,9 +157,9 @@
 nouveau_accel_init(struct nouveau_drm *drm)
 {
 	struct nvif_device *device = &drm->device;
+	struct nvif_sclass *sclass;
 	u32 arg0, arg1;
-	u32 sclass[16];
-	int ret, i;
+	int ret, i, n;
 
 	if (nouveau_noaccel)
 		return;
@@ -159,12 +168,12 @@
 	/*XXX: this is crap, but the fence/channel stuff is a little
 	 *     backwards in some places.  this will be fixed.
 	 */
-	ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
+	ret = n = nvif_object_sclass_get(&device->object, &sclass);
 	if (ret < 0)
 		return;
 
-	for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
-		switch (sclass[i]) {
+	for (ret = -ENOSYS, i = 0; i < n; i++) {
+		switch (sclass[i].oclass) {
 		case NV03_CHANNEL_DMA:
 			ret = nv04_fence_create(drm);
 			break;
@@ -191,6 +200,7 @@
 		}
 	}
 
+	nvif_object_sclass_put(&sclass);
 	if (ret) {
 		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
 		nouveau_accel_fini(drm);
@@ -231,10 +241,9 @@
 		return;
 	}
 
-	ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
+	ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
 			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
 	if (ret == 0) {
-		struct nvkm_sw_chan *swch;
 		ret = RING_SPACE(drm->channel, 2);
 		if (ret == 0) {
 			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
@@ -246,9 +255,16 @@
 				OUT_RING  (drm->channel, 0x001f0000);
 			}
 		}
-		swch = (void *)nvxx_object(&drm->nvsw)->parent;
-		swch->flip = nouveau_flip_complete;
-		swch->flip_data = drm->channel;
+
+		ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
+				       false, NVSW_NTFY_UEVENT, NULL, 0, 0,
+				       &drm->flip);
+		if (ret == 0)
+			ret = nvif_notify_get(&drm->flip);
+		if (ret) {
+			nouveau_accel_fini(drm);
+			return;
+		}
 	}
 
 	if (ret) {
@@ -258,15 +274,15 @@
 	}
 
 	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
-		ret = nvkm_gpuobj_new(nvxx_object(&drm->device), NULL, 32,
-				      0, 0, &drm->notify);
+		ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
+				      NULL, &drm->notify);
 		if (ret) {
 			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
 			nouveau_accel_fini(drm);
 			return;
 		}
 
-		ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
+		ret = nvif_object_init(&drm->channel->user, NvNotify0,
 				       NV_DMA_IN_MEMORY,
 				       &(struct nv_dma_v0) {
 						.target = NV_DMA_V0_TARGET_VRAM,
@@ -321,9 +337,8 @@
 		remove_conflicting_framebuffers(aper, "nouveaufb", boot);
 	kfree(aper);
 
-	ret = nvkm_device_create(pdev, NVKM_BUS_PCI,
-				 nouveau_pci_name(pdev), pci_name(pdev),
-				 nouveau_config, nouveau_debug, &device);
+	ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
+				  true, true, ~0ULL, &device);
 	if (ret)
 		return ret;
 
@@ -331,7 +346,7 @@
 
 	ret = drm_get_pci_dev(pdev, pent, &driver_pci);
 	if (ret) {
-		nvkm_object_ref(NULL, (struct nvkm_object **)&device);
+		nvkm_device_del(&device);
 		return ret;
 	}
 
@@ -371,12 +386,10 @@
 static int
 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 {
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_drm *drm;
 	int ret;
 
-	ret = nouveau_cli_create(nouveau_name(dev), "DRM", sizeof(*drm),
-				 (void **)&drm);
+	ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
 	if (ret)
 		return ret;
 
@@ -390,36 +403,10 @@
 
 	nouveau_get_hdmi_dev(drm);
 
-	/* make sure AGP controller is in a consistent state before we
-	 * (possibly) execute vbios init tables (see nouveau_agp.h)
-	 */
-	if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
-		const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
-				    NV_DEVICE_V0_DISABLE_MMIO;
-		/* dummy device object, doesn't init anything, but allows
-		 * agp code access to registers
-		 */
-		ret = nvif_device_init(&drm->client.base.base, NULL,
-				       NVDRM_DEVICE, NV_DEVICE,
-				       &(struct nv_device_v0) {
-						.device = ~0,
-						.disable = ~enables,
-						.debug0 = ~0,
-				       }, sizeof(struct nv_device_v0),
-				       &drm->device);
-		if (ret)
-			goto fail_device;
-
-		nouveau_agp_reset(drm);
-		nvif_device_fini(&drm->device);
-	}
-
-	ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
-			       NV_DEVICE,
+	ret = nvif_device_init(&drm->client.base.object,
+			       NVDRM_DEVICE, NV_DEVICE,
 			       &(struct nv_device_v0) {
 					.device = ~0,
-					.disable = 0,
-					.debug0 = 0,
 			       }, sizeof(struct nv_device_v0),
 			       &drm->device);
 	if (ret)
@@ -432,14 +419,13 @@
 	 * better fix is found - assuming there is one...
 	 */
 	if (drm->device.info.chipset == 0xc1)
-		nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
+		nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
 
 	nouveau_vga_init(drm);
-	nouveau_agp_init(drm);
 
 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
-				  0x1000, &drm->client.vm);
+				  0x1000, NULL, &drm->client.vm);
 		if (ret)
 			goto fail_device;
 
@@ -486,7 +472,6 @@
 fail_bios:
 	nouveau_ttm_fini(drm);
 fail_ttm:
-	nouveau_agp_fini(drm);
 	nouveau_vga_fini(drm);
 fail_device:
 	nvif_device_fini(&drm->device);
@@ -512,7 +497,6 @@
 	nouveau_bios_takedown(dev);
 
 	nouveau_ttm_fini(drm);
-	nouveau_agp_fini(drm);
 	nouveau_vga_fini(drm);
 
 	nvif_device_fini(&drm->device);
@@ -527,15 +511,14 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_client *client;
-	struct nvkm_object *device;
+	struct nvkm_device *device;
 
 	dev->irq_enabled = false;
 	client = nvxx_client(&drm->client.base);
-	device = client->device;
+	device = nvkm_device_find(client->device);
 	drm_put_dev(dev);
 
-	nvkm_object_ref(NULL, &device);
-	nvkm_object_debug();
+	nvkm_device_del(&device);
 }
 
 static void
@@ -597,7 +580,6 @@
 	if (ret)
 		goto fail_client;
 
-	nouveau_agp_fini(drm);
 	return 0;
 
 fail_client:
@@ -622,13 +604,8 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli;
 
-	NV_INFO(drm, "re-enabling device...\n");
-
-	nouveau_agp_reset(drm);
-
 	NV_INFO(drm, "resuming kernel object tree...\n");
 	nvif_client_resume(&drm->client.base);
-	nouveau_agp_init(drm);
 
 	NV_INFO(drm, "resuming client object trees...\n");
 	if (drm->fence && nouveau_fence(drm)->resume)
@@ -728,7 +705,6 @@
 		return -EBUSY;
 	}
 
-	nv_debug_level(SILENT);
 	drm_kms_helper_poll_disable(drm_dev);
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
 	nouveau_switcheroo_optimus_dsm();
@@ -762,10 +738,9 @@
 	ret = nouveau_do_resume(drm_dev, true);
 	drm_kms_helper_poll_enable(drm_dev);
 	/* do magic */
-	nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
+	nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
 	vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
 	drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
-	nv_debug_level(NORMAL);
 	return ret;
 }
 
@@ -826,8 +801,7 @@
 	get_task_comm(tmpname, current);
 	snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
 
-	ret = nouveau_cli_create(nouveau_name(dev), name, sizeof(*cli),
-			(void **)&cli);
+	ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
 
 	if (ret)
 		goto out_suspend;
@@ -836,7 +810,7 @@
 
 	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 		ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
-				  0x1000, &cli->vm);
+				  0x1000, NULL, &cli->vm);
 		if (ret) {
 			nouveau_cli_destroy(cli);
 			goto out_suspend;
@@ -945,8 +919,8 @@
 static struct drm_driver
 driver_stub = {
 	.driver_features =
-		DRIVER_USE_AGP |
-		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
+		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
+		DRIVER_KMS_LEGACY_CONTEXT,
 
 	.load = nouveau_drm_load,
 	.unload = nouveau_drm_unload,
@@ -1056,18 +1030,16 @@
 };
 
 struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev, int size,
-				void **pobject)
+nouveau_platform_device_create(struct platform_device *pdev,
+			       struct nvkm_device **pdevice)
 {
 	struct drm_device *drm;
 	int err;
 
-	err = nvkm_device_create_(pdev, NVKM_BUS_PLATFORM,
-				  nouveau_platform_name(pdev),
-				  dev_name(&pdev->dev), nouveau_config,
-				  nouveau_debug, size, pobject);
+	err = nvkm_device_tegra_new(pdev, nouveau_config, nouveau_debug,
+				    true, true, ~0ULL, pdevice);
 	if (err)
-		return ERR_PTR(err);
+		goto err_free;
 
 	drm = drm_dev_alloc(&driver_platform, &pdev->dev);
 	if (!drm) {
@@ -1085,7 +1057,7 @@
 	return drm;
 
 err_free:
-	nvkm_object_ref(NULL, (struct nvkm_object **)pobject);
+	nvkm_device_del(pdevice);
 
 	return ERR_PTR(err);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index dd72652..3c902c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
 #define DRIVER_DATE		"20120801"
 
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		2
-#define DRIVER_PATCHLEVEL	2
+#define DRIVER_MINOR		3
+#define DRIVER_PATCHLEVEL	0
 
 /*
  * 1.1.1:
@@ -30,6 +30,9 @@
  *      - allow concurrent access to bo's mapped read/write.
  * 1.2.2:
  *      - add NOUVEAU_GEM_DOMAIN_COHERENT flag
+ * 1.3.0:
+ *      - NVIF ABI modified, safe because only (current) users are test
+ *        programs that get directly linked with NVKM.
  */
 
 #include <nvif/client.h>
@@ -88,6 +91,8 @@
 	void *abi16;
 	struct list_head objects;
 	struct list_head notifys;
+	char name[32];
+	struct drm_device *dev;
 };
 
 static inline struct nouveau_cli *
@@ -109,13 +114,10 @@
 	struct list_head clients;
 
 	struct {
-		enum {
-			UNKNOWN = 0,
-			DISABLE = 1,
-			ENABLED = 2
-		} stat;
+		struct agp_bridge_data *bridge;
 		u32 base;
 		u32 size;
+		bool cma;
 	} agp;
 
 	/* TTM interface support */
@@ -148,6 +150,7 @@
 	struct nouveau_fbdev *fbcon;
 	struct nvif_object nvsw;
 	struct nvif_object ntfy;
+	struct nvif_notify flip;
 
 	/* nv10-nv40 tiling regions */
 	struct {
@@ -180,22 +183,22 @@
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 
-#define nouveau_platform_device_create(p, u)                                   \
-	nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
 struct drm_device *
-nouveau_platform_device_create_(struct platform_device *pdev,
-				int size, void **pobject);
+nouveau_platform_device_create(struct platform_device *, struct nvkm_device **);
 void nouveau_drm_device_remove(struct drm_device *dev);
 
 #define NV_PRINTK(l,c,f,a...) do {                                             \
 	struct nouveau_cli *_cli = (c);                                        \
-	nv_##l(_cli->base.base.priv, f, ##a);                                  \
+	dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a);                     \
 } while(0)
-#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
-#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
+#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
 #define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
 #define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
-#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
+#define NV_DEBUG(drm,f,a...) do {                                              \
+	if (unlikely(drm_debug & DRM_UT_DRIVER))                               \
+		NV_PRINTK(info, &(drm)->client, f, ##a);                       \
+} while(0)
 
 extern int nouveau_modeset;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index c57a37e..b37da95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -41,7 +41,9 @@
 
 	struct dcb_output *dcb;
 	int or;
-	struct nvkm_i2c_port *i2c;
+
+	struct i2c_adapter *i2c;
+	struct nvkm_i2c_aux *aux;
 
 	/* different to drm_encoder.crtc, this reflects what's
 	 * actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 6751553..2791701 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -84,7 +84,7 @@
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_fillrect(info, rect);
+	drm_fb_helper_cfb_fillrect(info, rect);
 }
 
 static void
@@ -116,7 +116,7 @@
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_copyarea(info, image);
+	drm_fb_helper_cfb_copyarea(info, image);
 }
 
 static void
@@ -148,7 +148,7 @@
 
 	if (ret != -ENODEV)
 		nouveau_fbcon_gpu_lockup(info);
-	cfb_imageblit(info, image);
+	drm_fb_helper_cfb_imageblit(info, image);
 }
 
 static int
@@ -197,9 +197,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -319,7 +319,6 @@
 	struct nouveau_channel *chan;
 	struct nouveau_bo *nvbo;
 	struct drm_mode_fb_cmd2 mode_cmd;
-	struct pci_dev *pdev = dev->pdev;
 	int size, ret;
 
 	mode_cmd.width = sizes->surface_width;
@@ -365,20 +364,13 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	info = framebuffer_alloc(0, &pdev->dev);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unlock;
 	}
 	info->skip_vt_switch = 1;
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		framebuffer_release(info);
-		goto out_unlock;
-	}
-
 	info->par = fbcon;
 
 	nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
@@ -388,7 +380,6 @@
 
 	/* setup helper */
 	fbcon->helper.fb = fb;
-	fbcon->helper.fbdev = info;
 
 	strcpy(info->fix.id, "nouveaufb");
 	if (!chan)
@@ -450,15 +441,9 @@
 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 {
 	struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
-	struct fb_info *info;
 
-	if (fbcon->helper.fbdev) {
-		info = fbcon->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbcon->helper);
+	drm_fb_helper_release_fbi(&fbcon->helper);
 
 	if (nouveau_fb->nvbo) {
 		nouveau_bo_unmap(nouveau_fb->nvbo);
@@ -496,7 +481,7 @@
 		console_lock();
 		if (state == FBINFO_STATE_RUNNING)
 			nouveau_fbcon_accel_restore(dev);
-		fb_set_suspend(drm->fbcon->helper.fbdev, state);
+		drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
 		if (state != FBINFO_STATE_RUNNING)
 			nouveau_fbcon_accel_save_disable(dev);
 		console_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index c6d56be..574c36b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -169,7 +169,7 @@
 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 {
 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
-	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	int ret;
 
 	INIT_LIST_HEAD(&fctx->flip);
@@ -188,13 +188,12 @@
 	if (!priv->uevent)
 		return;
 
-	ret = nvif_notify_init(chan->object, NULL,
-			 nouveau_fence_wait_uevent_handler, false,
-			 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
-			 &(struct nvif_notify_uevent_req) { },
-			 sizeof(struct nvif_notify_uevent_req),
-			 sizeof(struct nvif_notify_uevent_rep),
-			 &fctx->notify);
+	ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
+			       false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+			       &(struct nvif_notify_uevent_req) { },
+			       sizeof(struct nvif_notify_uevent_req),
+			       sizeof(struct nvif_notify_uevent_rep),
+			       &fctx->notify);
 
 	WARN_ON(ret);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index d9241d8..2e3a62d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -85,7 +85,7 @@
 int nv84_fence_create(struct nouveau_drm *);
 int nvc0_fence_create(struct nouveau_drm *);
 
-int nouveau_flip_complete(void *chan);
+int nouveau_flip_complete(struct nvif_notify *);
 
 struct nv84_fence_chan {
 	struct nouveau_fence_chan base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index af1ee51..2c99815 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -254,13 +254,13 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nouveau_cli *cli = nouveau_cli(file_priv);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
 	struct drm_nouveau_gem_new *req = data;
 	struct nouveau_bo *nvbo = NULL;
 	int ret = 0;
 
-	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
-		NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+	if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
+		NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
 	}
 
@@ -376,7 +376,7 @@
 	ww_acquire_init(&op->ticket, &reservation_ww_class);
 retry:
 	if (++trycnt > 100000) {
-		NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
+		NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
 		return -EINVAL;
 	}
 
@@ -387,7 +387,7 @@
 
 		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
 		if (!gem) {
-			NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
+			NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
 			ret = -ENOENT;
 			break;
 		}
@@ -399,7 +399,7 @@
 		}
 
 		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
-			NV_PRINTK(error, cli, "multiple instances of buffer %d on "
+			NV_PRINTK(err, cli, "multiple instances of buffer %d on "
 				      "validation list\n", b->handle);
 			drm_gem_object_unreference_unlocked(gem);
 			ret = -EINVAL;
@@ -420,7 +420,7 @@
 			}
 			if (unlikely(ret)) {
 				if (ret != -ERESTARTSYS)
-					NV_PRINTK(error, cli, "fail reserve\n");
+					NV_PRINTK(err, cli, "fail reserve\n");
 				break;
 			}
 		}
@@ -438,7 +438,7 @@
 		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
 			list_add_tail(&nvbo->entry, &gart_list);
 		else {
-			NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
+			NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
 				 b->valid_domains);
 			list_add_tail(&nvbo->entry, &both_list);
 			ret = -EINVAL;
@@ -476,21 +476,21 @@
 					     b->write_domains,
 					     b->valid_domains);
 		if (unlikely(ret)) {
-			NV_PRINTK(error, cli, "fail set_domain\n");
+			NV_PRINTK(err, cli, "fail set_domain\n");
 			return ret;
 		}
 
 		ret = nouveau_bo_validate(nvbo, true, false);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_PRINTK(error, cli, "fail ttm_validate\n");
+				NV_PRINTK(err, cli, "fail ttm_validate\n");
 			return ret;
 		}
 
 		ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
 		if (unlikely(ret)) {
 			if (ret != -ERESTARTSYS)
-				NV_PRINTK(error, cli, "fail post-validate sync\n");
+				NV_PRINTK(err, cli, "fail post-validate sync\n");
 			return ret;
 		}
 
@@ -537,14 +537,14 @@
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validate_init\n");
+			NV_PRINTK(err, cli, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validating bo list\n");
+			NV_PRINTK(err, cli, "validating bo list\n");
 		validate_fini(op, NULL, NULL);
 		return ret;
 	}
@@ -600,7 +600,7 @@
 		uint32_t data;
 
 		if (unlikely(r->bo_index > req->nr_buffers)) {
-			NV_PRINTK(error, cli, "reloc bo index invalid\n");
+			NV_PRINTK(err, cli, "reloc bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -610,7 +610,7 @@
 			continue;
 
 		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
-			NV_PRINTK(error, cli, "reloc container bo index invalid\n");
+			NV_PRINTK(err, cli, "reloc container bo index invalid\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -618,7 +618,7 @@
 
 		if (unlikely(r->reloc_bo_offset + 4 >
 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
-			NV_PRINTK(error, cli, "reloc outside of bo\n");
+			NV_PRINTK(err, cli, "reloc outside of bo\n");
 			ret = -EINVAL;
 			break;
 		}
@@ -627,7 +627,7 @@
 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
 					  &nvbo->kmap);
 			if (ret) {
-				NV_PRINTK(error, cli, "failed kmap for reloc\n");
+				NV_PRINTK(err, cli, "failed kmap for reloc\n");
 				break;
 			}
 			nvbo->validate_mapped = true;
@@ -650,7 +650,7 @@
 
 		ret = ttm_bo_wait(&nvbo->bo, true, false, false);
 		if (ret) {
-			NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
+			NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
 			break;
 		}
 
@@ -681,7 +681,7 @@
 		return -ENOMEM;
 
 	list_for_each_entry(temp, &abi16->channels, head) {
-		if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
+		if (temp->chan->user.handle == (NVDRM_CHAN | req->channel)) {
 			chan = temp->chan;
 			break;
 		}
@@ -696,19 +696,19 @@
 		goto out_next;
 
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
-		NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
-		NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
-		NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+		NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
 		return nouveau_abi16_put(abi16, -EINVAL);
 	}
@@ -726,7 +726,7 @@
 	/* Ensure all push buffers are on validate list */
 	for (i = 0; i < req->nr_push; i++) {
 		if (push[i].bo_index >= req->nr_buffers) {
-			NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
+			NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
 			ret = -EINVAL;
 			goto out_prevalid;
 		}
@@ -737,7 +737,7 @@
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
 		if (ret != -ERESTARTSYS)
-			NV_PRINTK(error, cli, "validate: %d\n", ret);
+			NV_PRINTK(err, cli, "validate: %d\n", ret);
 		goto out_prevalid;
 	}
 
@@ -745,7 +745,7 @@
 	if (do_reloc) {
 		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
 		if (ret) {
-			NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
+			NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
 			goto out;
 		}
 	}
@@ -753,7 +753,7 @@
 	if (chan->dma.ib_max) {
 		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
 		if (ret) {
-			NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
+			NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -768,7 +768,7 @@
 	if (drm->device.info.chipset >= 0x25) {
 		ret = RING_SPACE(chan, req->nr_push * 2);
 		if (ret) {
-			NV_PRINTK(error, cli, "cal_space: %d\n", ret);
+			NV_PRINTK(err, cli, "cal_space: %d\n", ret);
 			goto out;
 		}
 
@@ -782,7 +782,7 @@
 	} else {
 		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
 		if (ret) {
-			NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
+			NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
 			goto out;
 		}
 
@@ -820,7 +820,7 @@
 
 	ret = nouveau_fence_new(chan, false, &fence);
 	if (ret) {
-		NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
+		NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
 		WIND_RING(chan);
 		goto out;
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 0dbe006..491c714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@
 	struct drm_device *dev = dev_get_drvdata(d);
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
-	int temp = therm->temp_get(therm);
+	int temp = nvkm_therm_temp_get(therm);
 
 	if (temp < 0)
 		return temp;
@@ -348,7 +348,7 @@
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_therm *therm = nvxx_therm(&drm->device);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+	return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
 }
 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
 			  NULL, 0);
@@ -571,7 +571,7 @@
 		return -ENOMEM;
 	hwmon->dev = dev;
 
-	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+	if (!therm || !therm->attr_get || !therm->attr_set)
 		return -ENODEV;
 
 	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
@@ -588,7 +588,7 @@
 		goto error;
 
 	/* if the card has a working thermal sensor */
-	if (therm->temp_get(therm) >= 0) {
+	if (nvkm_therm_temp_get(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
 		if (ret)
 			goto error;
@@ -606,7 +606,7 @@
 	}
 
 	/* if the card can read the fan rpm */
-	if (therm->fan_sense(therm) >= 0) {
+	if (nvkm_therm_fan_sense(therm) >= 0) {
 		ret = sysfs_create_group(&hwmon_dev->kobj,
 					 &hwmon_fan_rpm_attrgroup);
 		if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index ca0ad9d..55eb942 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -72,10 +72,8 @@
 static void
 nvkm_client_driver_fini(void *priv)
 {
-	struct nvkm_object *client = priv;
-	nvkm_client_fini(nv_client(client), false);
-	atomic_set(&client->refcount, 1);
-	nvkm_object_ref(NULL, &client);
+	struct nvkm_client *client = priv;
+	nvkm_client_del(&client);
 }
 
 static int
@@ -113,7 +111,7 @@
 	struct nvkm_client *client;
 	int ret;
 
-	ret = nvkm_client_create(name, device, cfg, dbg, &client);
+	ret = nvkm_client_new(name, device, cfg, dbg, &client);
 	*ppriv = client;
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dcfbbfa..3eb6654 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -19,239 +19,38 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/reset.h>
-#include <linux/regulator/consumer.h>
-#include <linux/iommu.h>
-#include <soc/tegra/fuse.h>
-#include <soc/tegra/pmc.h>
-
-#include "nouveau_drm.h"
 #include "nouveau_platform.h"
 
-static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
-{
-	int err;
-
-	err = regulator_enable(gpu->vdd);
-	if (err)
-		goto err_power;
-
-	err = clk_prepare_enable(gpu->clk);
-	if (err)
-		goto err_clk;
-	err = clk_prepare_enable(gpu->clk_pwr);
-	if (err)
-		goto err_clk_pwr;
-	clk_set_rate(gpu->clk_pwr, 204000000);
-	udelay(10);
-
-	reset_control_assert(gpu->rst);
-	udelay(10);
-
-	err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
-	if (err)
-		goto err_clamp;
-	udelay(10);
-
-	reset_control_deassert(gpu->rst);
-	udelay(10);
-
-	return 0;
-
-err_clamp:
-	clk_disable_unprepare(gpu->clk_pwr);
-err_clk_pwr:
-	clk_disable_unprepare(gpu->clk);
-err_clk:
-	regulator_disable(gpu->vdd);
-err_power:
-	return err;
-}
-
-static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
-{
-	int err;
-
-	reset_control_assert(gpu->rst);
-	udelay(10);
-
-	clk_disable_unprepare(gpu->clk_pwr);
-	clk_disable_unprepare(gpu->clk);
-	udelay(10);
-
-	err = regulator_disable(gpu->vdd);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-#if IS_ENABLED(CONFIG_IOMMU_API)
-
-static void nouveau_platform_probe_iommu(struct device *dev,
-					 struct nouveau_platform_gpu *gpu)
-{
-	int err;
-	unsigned long pgsize_bitmap;
-
-	mutex_init(&gpu->iommu.mutex);
-
-	if (iommu_present(&platform_bus_type)) {
-		gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
-		if (IS_ERR(gpu->iommu.domain))
-			goto error;
-
-		/*
-		 * A IOMMU is only usable if it supports page sizes smaller
-		 * or equal to the system's PAGE_SIZE, with a preference if
-		 * both are equal.
-		 */
-		pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
-		if (pgsize_bitmap & PAGE_SIZE) {
-			gpu->iommu.pgshift = PAGE_SHIFT;
-		} else {
-			gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
-			if (gpu->iommu.pgshift == 0) {
-				dev_warn(dev, "unsupported IOMMU page size\n");
-				goto free_domain;
-			}
-			gpu->iommu.pgshift -= 1;
-		}
-
-		err = iommu_attach_device(gpu->iommu.domain, dev);
-		if (err)
-			goto free_domain;
-
-		err = nvkm_mm_init(&gpu->iommu._mm, 0,
-				   (1ULL << 40) >> gpu->iommu.pgshift, 1);
-		if (err)
-			goto detach_device;
-
-		gpu->iommu.mm = &gpu->iommu._mm;
-	}
-
-	return;
-
-detach_device:
-	iommu_detach_device(gpu->iommu.domain, dev);
-
-free_domain:
-	iommu_domain_free(gpu->iommu.domain);
-
-error:
-	gpu->iommu.domain = NULL;
-	gpu->iommu.pgshift = 0;
-	dev_err(dev, "cannot initialize IOMMU MM\n");
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
-					  struct nouveau_platform_gpu *gpu)
-{
-	if (gpu->iommu.domain) {
-		nvkm_mm_fini(&gpu->iommu._mm);
-		iommu_detach_device(gpu->iommu.domain, dev);
-		iommu_domain_free(gpu->iommu.domain);
-	}
-}
-
-#else
-
-static void nouveau_platform_probe_iommu(struct device *dev,
-					 struct nouveau_platform_gpu *gpu)
-{
-}
-
-static void nouveau_platform_remove_iommu(struct device *dev,
-					  struct nouveau_platform_gpu *gpu)
-{
-}
-
-#endif
-
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
-	struct nouveau_platform_gpu *gpu;
-	struct nouveau_platform_device *device;
+	struct nvkm_device *device;
 	struct drm_device *drm;
-	int err;
-
-	gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
-	if (!gpu)
-		return -ENOMEM;
-
-	gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(gpu->vdd))
-		return PTR_ERR(gpu->vdd);
-
-	gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
-	if (IS_ERR(gpu->rst))
-		return PTR_ERR(gpu->rst);
-
-	gpu->clk = devm_clk_get(&pdev->dev, "gpu");
-	if (IS_ERR(gpu->clk))
-		return PTR_ERR(gpu->clk);
-
-	gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
-	if (IS_ERR(gpu->clk_pwr))
-		return PTR_ERR(gpu->clk_pwr);
-
-	nouveau_platform_probe_iommu(&pdev->dev, gpu);
-
-	err = nouveau_platform_power_up(gpu);
-	if (err)
-		return err;
+	int ret;
 
 	drm = nouveau_platform_device_create(pdev, &device);
-	if (IS_ERR(drm)) {
-		err = PTR_ERR(drm);
-		goto power_down;
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
+
+	ret = drm_dev_register(drm, 0);
+	if (ret < 0) {
+		drm_dev_unref(drm);
+		return ret;
 	}
 
-	device->gpu = gpu;
-	device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
-
-	err = drm_dev_register(drm, 0);
-	if (err < 0)
-		goto err_unref;
-
 	return 0;
-
-err_unref:
-	drm_dev_unref(drm);
-
-power_down:
-	nouveau_platform_power_down(gpu);
-	nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
-	return err;
 }
 
 static int nouveau_platform_remove(struct platform_device *pdev)
 {
-	struct drm_device *drm_dev = platform_get_drvdata(pdev);
-	struct nouveau_drm *drm = nouveau_drm(drm_dev);
-	struct nvkm_device *device = nvxx_device(&drm->device);
-	struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
-	int err;
-
-	nouveau_drm_device_remove(drm_dev);
-
-	err = nouveau_platform_power_down(gpu);
-
-	nouveau_platform_remove_iommu(&pdev->dev, gpu);
-
-	return err;
+	struct drm_device *dev = platform_get_drvdata(pdev);
+	nouveau_drm_device_remove(dev);
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id nouveau_platform_match[] = {
 	{ .compatible = "nvidia,gk20a" },
+	{ .compatible = "nvidia,gm20b" },
 	{ }
 };
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 392874c..f41056d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -19,54 +19,9 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
 #ifndef __NOUVEAU_PLATFORM_H__
 #define __NOUVEAU_PLATFORM_H__
-
-#include "core/device.h"
-#include "core/mm.h"
-
-struct reset_control;
-struct clk;
-struct regulator;
-struct iommu_domain;
-struct platform_driver;
-
-struct nouveau_platform_gpu {
-	struct reset_control *rst;
-	struct clk *clk;
-	struct clk *clk_pwr;
-
-	struct regulator *vdd;
-
-	struct {
-		/*
-		 * Protects accesses to mm from subsystems
-		 */
-		struct mutex mutex;
-
-		struct nvkm_mm _mm;
-		/*
-		 * Just points to _mm. We need this to avoid embedding
-		 * struct nvkm_mm in os.h
-		 */
-		struct nvkm_mm *mm;
-		struct iommu_domain *domain;
-		unsigned long pgshift;
-	} iommu;
-};
-
-struct nouveau_platform_device {
-	struct nvkm_device device;
-
-	struct nouveau_platform_gpu *gpu;
-
-	int gpu_speedo;
-};
-
-#define nv_device_to_platform(d)                                               \
-	container_of(d, struct nouveau_platform_device, device)
+#include "nouveau_drm.h"
 
 extern struct platform_driver nouveau_platform_driver;
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 1ec8f38..d12a5fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -165,7 +165,7 @@
 	struct nvif_device *device = &drm->device;
 
 	if (sysfs && sysfs->ctrl.priv) {
-		device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+		device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
 		nvif_object_fini(&sysfs->ctrl);
 	}
 
@@ -188,11 +188,11 @@
 	if (!sysfs)
 		return -ENOMEM;
 
-	ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
+	ret = nvif_object_init(&device->object, NVDRM_CONTROL,
 			       NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
-			      &sysfs->ctrl);
+			       &sysfs->ctrl);
 	if (ret == 0)
-		device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+		device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7464aef3..3f0fb55 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -33,8 +33,8 @@
 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
-	man->priv = pfb;
+	struct nvkm_fb *fb = nvxx_fb(&drm->device);
+	man->priv = fb;
 	return 0;
 }
 
@@ -64,9 +64,9 @@
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 	nvkm_mem_node_cleanup(mem->mm_node);
-	pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
+	ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
 }
 
 static int
@@ -76,7 +76,7 @@
 			 struct ttm_mem_reg *mem)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
-	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
+	struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nvkm_mem *node;
 	u32 size_nc = 0;
@@ -88,9 +88,9 @@
 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
 		size_nc = 1 << nvbo->page_shift;
 
-	ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
-			   mem->page_alignment << PAGE_SHIFT, size_nc,
-			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
+			     mem->page_alignment << PAGE_SHIFT, size_nc,
+			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
 	if (ret) {
 		mem->mm_node = NULL;
 		return (ret == -ENOSPC) ? 0 : ret;
@@ -103,38 +103,11 @@
 	return 0;
 }
 
-static void
-nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
-{
-	struct nvkm_fb *pfb = man->priv;
-	struct nvkm_mm *mm = &pfb->vram;
-	struct nvkm_mm_node *r;
-	u32 total = 0, free = 0;
-
-	mutex_lock(&nv_subdev(pfb)->mutex);
-	list_for_each_entry(r, &mm->nodes, nl_entry) {
-		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
-		       prefix, r->type, ((u64)r->offset << 12),
-		       (((u64)r->offset + r->length) << 12));
-
-		total += r->length;
-		if (!r->type)
-			free += r->length;
-	}
-	mutex_unlock(&nv_subdev(pfb)->mutex);
-
-	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
-	       prefix, (u64)total << 12, (u64)free << 12);
-	printk(KERN_DEBUG "%s  block: 0x%08x\n",
-	       prefix, mm->block_size << 12);
-}
-
 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
 	nouveau_vram_manager_init,
 	nouveau_vram_manager_fini,
 	nouveau_vram_manager_new,
 	nouveau_vram_manager_del,
-	nouveau_vram_manager_debug
 };
 
 static int
@@ -221,7 +194,7 @@
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
-	struct nv04_mmu_priv *priv = (void *)mmu;
+	struct nv04_mmu *priv = (void *)mmu;
 	struct nvkm_vm *vm = NULL;
 	nvkm_vm_ref(priv->vm, &vm, NULL);
 	man->priv = vm;
@@ -362,13 +335,22 @@
 int
 nouveau_ttm_init(struct nouveau_drm *drm)
 {
+	struct nvkm_device *device = nvxx_device(&drm->device);
+	struct nvkm_pci *pci = device->pci;
 	struct drm_device *dev = drm->dev;
 	u32 bits;
 	int ret;
 
+	if (pci && pci->agp.bridge) {
+		drm->agp.bridge = pci->agp.bridge;
+		drm->agp.base = pci->agp.base;
+		drm->agp.size = pci->agp.size;
+		drm->agp.cma = pci->agp.cma;
+	}
+
 	bits = nvxx_mmu(&drm->device)->dma_bits;
-	if (nv_device_is_pci(nvxx_device(&drm->device))) {
-		if (drm->agp.stat == ENABLED ||
+	if (nvxx_device(&drm->device)->func->pci) {
+		if (drm->agp.bridge ||
 		     !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
 			bits = 32;
 
@@ -408,11 +390,11 @@
 		return ret;
 	}
 
-	drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
-					 nv_device_resource_len(nvxx_device(&drm->device), 1));
+	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
+					 device->func->resource_size(device, 1));
 
 	/* GART init */
-	if (drm->agp.stat != ENABLED) {
+	if (!drm->agp.bridge) {
 		drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
 	} else {
 		drm->gem.gart_available = drm->agp.size;
@@ -433,10 +415,8 @@
 void
 nouveau_ttm_fini(struct nouveau_drm *drm)
 {
-	mutex_lock(&drm->dev->struct_mutex);
 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
-	mutex_unlock(&drm->dev->struct_mutex);
 
 	ttm_bo_device_release(&drm->ttm.bdev);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index c7592ec..af89c36 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,13 +12,14 @@
 static unsigned int
 nouveau_vga_set_decode(void *priv, bool state)
 {
-	struct nvif_device *device = &nouveau_drm(priv)->device;
+	struct nouveau_drm *drm = nouveau_drm(priv);
+	struct nvif_object *device = &drm->device.object;
 
-	if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
-	    device->info.chipset >= 0x4c)
+	if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
+	    drm->device.info.chipset >= 0x4c)
 		nvif_wr32(device, 0x088060, state);
 	else
-	if (device->info.chipset >= 0x40)
+	if (drm->device.info.chipset >= 0x40)
 		nvif_wr32(device, 0x088054, state);
 	else
 		nvif_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 495c576..789dc29 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -171,33 +171,33 @@
 		return -EINVAL;
 	}
 
-	ret = nvif_object_init(chan->object, NULL, 0x0062,
+	ret = nvif_object_init(&chan->user, 0x0062,
 			       device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
 			       0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0019, 0x0019, NULL, 0,
 			       &nfbdev->clip);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0043, 0x0043, NULL, 0,
 			       &nfbdev->rop);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x0044, 0x0044, NULL, 0,
 			       &nfbdev->patt);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x004a, 0x004a, NULL, 0,
 			       &nfbdev->gdi);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(chan->object, NULL, 0x005f,
+	ret = nvif_object_init(&chan->user, 0x005f,
 			       device->info.chipset >= 0x11 ? 0x009f : 0x005f,
 			       NULL, 0, &nfbdev->blit);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index c2e05e6..f3d705d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -57,8 +57,10 @@
 static u32
 nv04_fence_read(struct nouveau_channel *chan)
 {
-	struct nvkm_fifo_chan *fifo = nvxx_fifo_chan(chan);;
-	return atomic_read(&fifo->refcnt);
+	struct nv04_nvsw_get_ref_v0 args = {};
+	WARN_ON(nvif_object_mthd(&chan->nvsw, NV04_NVSW_GET_REF,
+				 &args, sizeof(args)));
+	return args.ref;
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 5e1ea1c..2c35213 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -50,7 +50,7 @@
 u32
 nv10_fence_read(struct nouveau_channel *chan)
 {
-	return nvif_rd32(chan, 0x0048);
+	return nvif_rd32(&chan->user, 0x0048);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 57860cf..80b6eb8 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -33,7 +33,7 @@
 nv17_fence_sync(struct nouveau_fence *fence,
 		struct nouveau_channel *prev, struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
+	struct nouveau_cli *cli = (void *)prev->user.client;
 	struct nv10_fence_priv *priv = chan->drm->fence;
 	struct nv10_fence_chan *fctx = chan->fence;
 	u32 value;
@@ -89,7 +89,7 @@
 	fctx->base.read = nv10_fence_read;
 	fctx->base.sync = nv17_fence_sync;
 
-	ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
+	ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 981342d..4ae87ae 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -60,35 +60,39 @@
 
 struct nv50_chan {
 	struct nvif_object user;
+	struct nvif_device *device;
 };
 
 static int
-nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, struct nv50_chan *chan)
+nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size,
+		 struct nv50_chan *chan)
 {
 	const u32 handle = (oclass[0] << 16) | head;
-	u32 sclass[8];
-	int ret, i;
+	struct nvif_sclass *sclass;
+	int ret, i, n;
 
-	ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
-	WARN_ON(ret > ARRAY_SIZE(sclass));
+	chan->device = device;
+
+	ret = n = nvif_object_sclass_get(disp, &sclass);
 	if (ret < 0)
 		return ret;
 
 	while (oclass[0]) {
-		for (i = 0; i < ARRAY_SIZE(sclass); i++) {
-			if (sclass[i] == oclass[0]) {
-				ret = nvif_object_init(disp, NULL, handle,
-						       oclass[0], data, size,
-						       &chan->user);
+		for (i = 0; i < n; i++) {
+			if (sclass[i].oclass == oclass[0]) {
+				ret = nvif_object_init(disp, handle, oclass[0],
+						       data, size, &chan->user);
 				if (ret == 0)
 					nvif_object_map(&chan->user);
+				nvif_object_sclass_put(&sclass);
 				return ret;
 			}
 		}
 		oclass++;
 	}
 
+	nvif_object_sclass_put(&sclass);
 	return -ENOSYS;
 }
 
@@ -113,10 +117,12 @@
 }
 
 static int
-nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, struct nv50_pioc *pioc)
+nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size,
+		 struct nv50_pioc *pioc)
 {
-	return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
+	return nv50_chan_create(device, disp, oclass, head, data, size,
+				&pioc->base);
 }
 
 /******************************************************************************
@@ -128,12 +134,13 @@
 };
 
 static int
-nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
+nv50_curs_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, struct nv50_curs *curs)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_CURSOR,
 		GF110_DISP_CURSOR,
 		GT214_DISP_CURSOR,
@@ -142,8 +149,8 @@
 		0
 	};
 
-	return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
-			       &curs->base);
+	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+				&curs->base);
 }
 
 /******************************************************************************
@@ -155,12 +162,13 @@
 };
 
 static int
-nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
+nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, struct nv50_oimm *oimm)
 {
 	struct nv50_disp_cursor_v0 args = {
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_OVERLAY,
 		GF110_DISP_OVERLAY,
 		GT214_DISP_OVERLAY,
@@ -169,8 +177,8 @@
 		0
 	};
 
-	return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
-			       &oimm->base);
+	return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
+				&oimm->base);
 }
 
 /******************************************************************************
@@ -194,37 +202,37 @@
 static void
 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
 {
+	struct nvif_device *device = dmac->base.device;
+
 	nvif_object_fini(&dmac->vram);
 	nvif_object_fini(&dmac->sync);
 
 	nv50_chan_destroy(&dmac->base);
 
 	if (dmac->ptr) {
-		struct pci_dev *pdev = nvxx_device(nvif_device(disp))->pdev;
-		pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+		struct device *dev = nvxx_device(device)->dev;
+		dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
 	}
 }
 
 static int
-nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
-		 void *data, u32 size, u64 syncbuf,
+nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+		 const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
 		 struct nv50_dmac *dmac)
 {
-	struct nvif_device *device = nvif_device(disp);
 	struct nv50_disp_core_channel_dma_v0 *args = data;
 	struct nvif_object pushbuf;
 	int ret;
 
 	mutex_init(&dmac->lock);
 
-	dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
-					 PAGE_SIZE, &dmac->handle);
+	dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
+				       &dmac->handle, GFP_KERNEL);
 	if (!dmac->ptr)
 		return -ENOMEM;
 
-	ret = nvif_object_init(nvif_object(device), NULL,
-			       args->pushbuf, NV_DMA_FROM_MEMORY,
-			       &(struct nv_dma_v0) {
+	ret = nvif_object_init(&device->object, 0xd0000000,
+			       NV_DMA_FROM_MEMORY, &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_PCI_US,
 					.access = NV_DMA_V0_ACCESS_RD,
 					.start = dmac->handle + 0x0000,
@@ -233,13 +241,15 @@
 	if (ret)
 		return ret;
 
-	ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
+	args->pushbuf = nvif_handle(&pushbuf);
+
+	ret = nv50_chan_create(device, disp, oclass, head, data, size,
+			       &dmac->base);
 	nvif_object_fini(&pushbuf);
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
-			       NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -250,8 +260,7 @@
 	if (ret)
 		return ret;
 
-	ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
-			       NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -274,12 +283,13 @@
 };
 
 static int
-nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
+nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
+		 u64 syncbuf, struct nv50_mast *core)
 {
 	struct nv50_disp_core_channel_dma_v0 args = {
 		.pushbuf = 0xb0007d00,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GM204_DISP_CORE_CHANNEL_DMA,
 		GM107_DISP_CORE_CHANNEL_DMA,
 		GK110_DISP_CORE_CHANNEL_DMA,
@@ -293,8 +303,8 @@
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
-			       &core->base);
+	return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
+				syncbuf, &core->base);
 }
 
 /******************************************************************************
@@ -308,14 +318,14 @@
 };
 
 static int
-nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
-		 struct nv50_sync *base)
+nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, u64 syncbuf, struct nv50_sync *base)
 {
 	struct nv50_disp_base_channel_dma_v0 args = {
 		.pushbuf = 0xb0007c00 | head,
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK110_DISP_BASE_CHANNEL_DMA,
 		GK104_DISP_BASE_CHANNEL_DMA,
 		GF110_DISP_BASE_CHANNEL_DMA,
@@ -326,7 +336,7 @@
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
 				syncbuf, &base->base);
 }
 
@@ -339,14 +349,14 @@
 };
 
 static int
-nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
-		 struct nv50_ovly *ovly)
+nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
+		 int head, u64 syncbuf, struct nv50_ovly *ovly)
 {
 	struct nv50_disp_overlay_channel_dma_v0 args = {
 		.pushbuf = 0xb0007e00 | head,
 		.head = head,
 	};
-	static const u32 oclass[] = {
+	static const s32 oclass[] = {
 		GK104_DISP_OVERLAY_CONTROL_DMA,
 		GF110_DISP_OVERLAY_CONTROL_DMA,
 		GT214_DISP_OVERLAY_CHANNEL_DMA,
@@ -356,7 +366,7 @@
 		0
 	};
 
-	return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+	return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
 				syncbuf, &ovly->base);
 }
 
@@ -413,6 +423,7 @@
 evo_wait(void *evoc, int nr)
 {
 	struct nv50_dmac *dmac = evoc;
+	struct nvif_device *device = dmac->base.device;
 	u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
 
 	mutex_lock(&dmac->lock);
@@ -420,9 +431,12 @@
 		dmac->ptr[put] = 0x20000000;
 
 		nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
-		if (!nvxx_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
+		if (nvif_msec(device, 2000,
+			if (!nvif_rd32(&dmac->base.user, 0x0004))
+				break;
+		) < 0) {
 			mutex_unlock(&dmac->lock);
-			nv_error(nvxx_object(&dmac->base.user), "channel stalled\n");
+			printk(KERN_ERR "nouveau: evo channel stalled\n");
 			return NULL;
 		}
 
@@ -480,7 +494,10 @@
 		evo_data(push, 0x00000000);
 		evo_data(push, 0x00000000);
 		evo_kick(push, mast);
-		if (nv_wait_cb(nvxx_device(device), evo_sync_wait, disp->sync))
+		if (nvif_msec(device, 2000,
+			if (evo_sync_wait(disp->sync))
+				break;
+		) >= 0)
 			return 0;
 	}
 
@@ -535,7 +552,10 @@
 		evo_kick(push, flip.chan);
 	}
 
-	nv_wait_cb(nvxx_device(device), nv50_display_flip_wait, &flip);
+	nvif_msec(device, 2000,
+		if (nv50_display_flip_wait(&flip))
+			break;
+	);
 }
 
 int
@@ -563,7 +583,7 @@
 	if (unlikely(push == NULL))
 		return -EBUSY;
 
-	if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
+	if (chan && chan->user.oclass < G82_CHANNEL_GPFIFO) {
 		ret = RING_SPACE(chan, 8);
 		if (ret)
 			return ret;
@@ -577,7 +597,7 @@
 		OUT_RING  (chan, sync->addr);
 		OUT_RING  (chan, sync->data);
 	} else
-	if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
+	if (chan && chan->user.oclass < FERMI_CHANNEL_GPFIFO) {
 		u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
 		ret = RING_SPACE(chan, 12);
 		if (ret)
@@ -1408,6 +1428,8 @@
 static int
 nv50_crtc_create(struct drm_device *dev, int index)
 {
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvif_device *device = &drm->device;
 	struct nv50_disp *disp = nv50_disp(dev);
 	struct nv50_head *head;
 	struct drm_crtc *crtc;
@@ -1452,13 +1474,13 @@
 		goto out;
 
 	/* allocate cursor resources */
-	ret = nv50_curs_create(disp->disp, index, &head->curs);
+	ret = nv50_curs_create(device, disp->disp, index, &head->curs);
 	if (ret)
 		goto out;
 
 	/* allocate page flip / sync resources */
-	ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
-			      &head->sync);
+	ret = nv50_base_create(device, disp->disp, index, disp->sync->bo.offset,
+			       &head->sync);
 	if (ret)
 		goto out;
 
@@ -1466,12 +1488,12 @@
 	head->sync.data = 0x00000000;
 
 	/* allocate overlay resources */
-	ret = nv50_oimm_create(disp->disp, index, &head->oimm);
+	ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
 	if (ret)
 		goto out;
 
-	ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
-			      &head->ovly);
+	ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
+			       &head->ovly);
 	if (ret)
 		goto out;
 
@@ -1678,6 +1700,7 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
+	struct nvkm_i2c_bus *bus;
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	int type = DRM_MODE_ENCODER_DAC;
@@ -1687,7 +1710,10 @@
 		return -ENOMEM;
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
+
+	bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+	if (bus)
+		nv_encoder->i2c = &bus->i2c;
 
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
@@ -2081,9 +2107,22 @@
 		return -ENOMEM;
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
-	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
 	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 
+	if (dcbe->type == DCB_OUTPUT_DP) {
+		struct nvkm_i2c_aux *aux =
+			nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+		if (aux) {
+			nv_encoder->i2c = &aux->i2c;
+			nv_encoder->aux = aux;
+		}
+	} else {
+		struct nvkm_i2c_bus *bus =
+			nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
+		if (bus)
+			nv_encoder->i2c = &bus->i2c;
+	}
+
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
 	encoder->possible_clones = 0;
@@ -2234,18 +2273,22 @@
 {
 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
 	struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
-	struct nvkm_i2c_port *ddc = NULL;
+	struct nvkm_i2c_bus *bus = NULL;
+	struct nvkm_i2c_aux *aux = NULL;
+	struct i2c_adapter *ddc;
 	struct nouveau_encoder *nv_encoder;
 	struct drm_encoder *encoder;
 	int type;
 
 	switch (dcbe->type) {
 	case DCB_OUTPUT_TMDS:
-		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+		bus  = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
+		ddc  = bus ? &bus->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	case DCB_OUTPUT_DP:
-		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+		aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
+		ddc  = aux ? &aux->i2c : NULL;
 		type = DRM_MODE_ENCODER_TMDS;
 		break;
 	default:
@@ -2258,6 +2301,7 @@
 	nv_encoder->dcb = dcbe;
 	nv_encoder->or = ffs(dcbe->or) - 1;
 	nv_encoder->i2c = ddc;
+	nv_encoder->aux = aux;
 
 	encoder = to_drm_encoder(nv_encoder);
 	encoder->possible_crtcs = dcbe->heads;
@@ -2295,7 +2339,7 @@
 		union {
 			struct nv50_dma_v0 nv50;
 			struct gf100_dma_v0 gf100;
-			struct gf110_dma_v0 gf110;
+			struct gf119_dma_v0 gf119;
 		};
 	} args = {};
 	struct nv50_fbdma *fbdma;
@@ -2331,15 +2375,15 @@
 		args.gf100.kind = kind;
 		size += sizeof(args.gf100);
 	} else {
-		args.gf110.page = GF110_DMA_V0_PAGE_LP;
-		args.gf110.kind = kind;
-		size += sizeof(args.gf110);
+		args.gf119.page = GF119_DMA_V0_PAGE_LP;
+		args.gf119.kind = kind;
+		size += sizeof(args.gf119);
 	}
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nv50_head *head = nv50_head(crtc);
-		int ret = nvif_object_init(&head->sync.base.base.user, NULL,
-					    name, NV_DMA_IN_MEMORY, &args, size,
+		int ret = nvif_object_init(&head->sync.base.base.user, name,
+					   NV_DMA_IN_MEMORY, &args, size,
 					   &fbdma->base[head->base.index]);
 		if (ret) {
 			nv50_fbdma_fini(fbdma);
@@ -2347,9 +2391,8 @@
 		}
 	}
 
-	ret = nvif_object_init(&mast->base.base.user, NULL, name,
-				NV_DMA_IN_MEMORY, &args, size,
-			       &fbdma->core);
+	ret = nvif_object_init(&mast->base.base.user, name, NV_DMA_IN_MEMORY,
+			       &args, size, &fbdma->core);
 	if (ret) {
 		nv50_fbdma_fini(fbdma);
 		return ret;
@@ -2502,14 +2545,14 @@
 		goto out;
 
 	/* allocate master evo channel */
-	ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
+	ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
 			      &disp->mast);
 	if (ret)
 		goto out;
 
 	/* create crtc objects to represent the hw heads */
 	if (disp->disp->oclass >= GF110_DISP)
-		crtcs = nvif_rd32(device, 0x022448);
+		crtcs = nvif_rd32(&device->object, 0x022448);
 	else
 		crtcs = 2;
 
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 901130b..e05499d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -183,7 +183,7 @@
 		return -EINVAL;
 	}
 
-	ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x502d, 0x502d, NULL, 0,
 			       &nfbdev->twod);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index a82d9ea..f0d96e5 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -51,7 +51,7 @@
 	fctx->base.read = nv10_fence_read;
 	fctx->base.sync = nv17_fence_sync;
 
-	ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
+	ret = nvif_object_init(&chan->user, NvSema, NV_DMA_IN_MEMORY,
 			       &(struct nv_dma_v0) {
 					.target = NV_DMA_V0_TARGET_VRAM,
 					.access = NV_DMA_V0_ACCESS_RDWR,
@@ -66,7 +66,7 @@
 		u32 start = bo->bo.mem.start * PAGE_SIZE;
 		u32 limit = start + bo->bo.mem.size - 1;
 
-		ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
+		ret = nvif_object_init(&chan->user, NvEvoSema0 + i,
 				       NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
 						.target = NV_DMA_V0_TARGET_VRAM,
 						.access = NV_DMA_V0_ACCESS_RDWR,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index a03db43..412c5be 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -131,7 +131,7 @@
 int
 nv84_fence_context_new(struct nouveau_channel *chan)
 {
-	struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
+	struct nouveau_cli *cli = (void *)chan->user.client;
 	struct nv84_fence_priv *priv = chan->drm->fence;
 	struct nv84_fence_chan *fctx;
 	int ret, i;
@@ -213,7 +213,7 @@
 int
 nv84_fence_create(struct nouveau_drm *drm)
 {
-	struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device);
+	struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
 	struct nv84_fence_priv *priv;
 	u32 domain;
 	int ret;
@@ -228,7 +228,7 @@
 	priv->base.context_new = nv84_fence_context_new;
 	priv->base.context_del = nv84_fence_context_del;
 
-	priv->base.contexts = pfifo->max + 1;
+	priv->base.contexts = fifo->nr;
 	priv->base.context_base = fence_context_alloc(priv->base.contexts);
 	priv->base.uevent = true;
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fcd2e5f..c97395b 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -156,7 +156,7 @@
 	struct nouveau_channel *chan = drm->channel;
 	int ret, format;
 
-	ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
+	ret = nvif_object_init(&chan->user, 0x902d, 0x902d, NULL, 0,
 			       &nfbdev->twod);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 80b9684..1ee9294 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -29,29 +29,29 @@
 int
 nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
 {
-	return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
+	return client->driver->ioctl(client->object.priv, client->super, data, size, NULL);
 }
 
 int
 nvif_client_suspend(struct nvif_client *client)
 {
-	return client->driver->suspend(client->base.priv);
+	return client->driver->suspend(client->object.priv);
 }
 
 int
 nvif_client_resume(struct nvif_client *client)
 {
-	return client->driver->resume(client->base.priv);
+	return client->driver->resume(client->object.priv);
 }
 
 void
 nvif_client_fini(struct nvif_client *client)
 {
 	if (client->driver) {
-		client->driver->fini(client->base.priv);
+		client->driver->fini(client->object.priv);
 		client->driver = NULL;
-		client->base.parent = NULL;
-		nvif_object_fini(&client->base);
+		client->object.client = NULL;
+		nvif_object_fini(&client->object);
 	}
 }
 
@@ -68,63 +68,39 @@
 };
 
 int
-nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
-		 const char *name, u64 device, const char *cfg, const char *dbg,
-		 struct nvif_client *client)
+nvif_client_init(const char *driver, const char *name, u64 device,
+		 const char *cfg, const char *dbg, struct nvif_client *client)
 {
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_nop_v0 nop;
+	} args = {};
 	int ret, i;
 
-	ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
+	ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
 	if (ret)
 		return ret;
 
-	client->base.parent = &client->base;
-	client->base.handle = ~0;
-	client->object = &client->base;
+	client->object.client = client;
+	client->object.handle = ~0;
+	client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
 	client->super = true;
 
 	for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
 		if (!driver || !strcmp(client->driver->name, driver)) {
 			ret = client->driver->init(name, device, cfg, dbg,
-						  &client->base.priv);
+						  &client->object.priv);
 			if (!ret || driver)
 				break;
 		}
 	}
 
+	if (ret == 0) {
+		ret = nvif_client_ioctl(client, &args, sizeof(args));
+		client->version = args.nop.version;
+	}
+
 	if (ret)
 		nvif_client_fini(client);
 	return ret;
 }
-
-static void
-nvif_client_del(struct nvif_client *client)
-{
-	nvif_client_fini(client);
-	kfree(client);
-}
-
-int
-nvif_client_new(const char *driver, const char *name, u64 device,
-		const char *cfg, const char *dbg,
-		struct nvif_client **pclient)
-{
-	struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
-	if (client) {
-		int ret = nvif_client_init(nvif_client_del, driver, name,
-					   device, cfg, dbg, client);
-		if (ret) {
-			kfree(client);
-			client = NULL;
-		}
-		*pclient = client;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-void
-nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
-{
-	nvif_object_ref(&client->base, (struct nvif_object **)pclient);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 6f72244..252d8c3 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -24,55 +24,32 @@
 
 #include <nvif/device.h>
 
+u64
+nvif_device_time(struct nvif_device *device)
+{
+	struct nv_device_time_v0 args = {};
+	int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+				   &args, sizeof(args));
+	WARN_ON_ONCE(ret != 0);
+	return args.time;
+}
+
 void
 nvif_device_fini(struct nvif_device *device)
 {
-	nvif_object_fini(&device->base);
+	nvif_object_fini(&device->object);
 }
 
 int
-nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
-		 u32 handle, u32 oclass, void *data, u32 size,
-		 struct nvif_device *device)
+nvif_device_init(struct nvif_object *parent, u32 handle, s32 oclass,
+		 void *data, u32 size, struct nvif_device *device)
 {
-	int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
-				   data, size, &device->base);
+	int ret = nvif_object_init(parent, handle, oclass, data, size,
+				   &device->object);
 	if (ret == 0) {
-		device->object = &device->base;
 		device->info.version = 0;
-		ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
+		ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_INFO,
 				       &device->info, sizeof(device->info));
 	}
 	return ret;
 }
-
-static void
-nvif_device_del(struct nvif_device *device)
-{
-	nvif_device_fini(device);
-	kfree(device);
-}
-
-int
-nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
-		void *data, u32 size, struct nvif_device **pdevice)
-{
-	struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
-	if (device) {
-		int ret = nvif_device_init(parent, nvif_device_del, handle,
-					   oclass, data, size, device);
-		if (ret) {
-			kfree(device);
-			device = NULL;
-		}
-		*pdevice = device;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-void
-nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
-{
-	nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 8e34748..b0787ff 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -124,7 +124,7 @@
 	}
 
 	if (!WARN_ON(notify == NULL)) {
-		struct nvif_client *client = nvif_client(notify->object);
+		struct nvif_client *client = notify->object->client;
 		if (!WARN_ON(notify->size != size)) {
 			atomic_inc(&notify->putcnt);
 			if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
@@ -156,7 +156,7 @@
 	if (ret >= 0 && object) {
 		ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
 		if (ret == 0) {
-			nvif_object_ref(NULL, &notify->object);
+			notify->object = NULL;
 			kfree((void *)notify->data);
 		}
 	}
@@ -164,9 +164,9 @@
 }
 
 int
-nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
-		 int (*func)(struct nvif_notify *), bool work, u8 event,
-		 void *data, u32 size, u32 reply, struct nvif_notify *notify)
+nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *),
+		 bool work, u8 event, void *data, u32 size, u32 reply,
+		 struct nvif_notify *notify)
 {
 	struct {
 		struct nvif_ioctl_v0 ioctl;
@@ -175,11 +175,9 @@
 	} *args;
 	int ret = -ENOMEM;
 
-	notify->object = NULL;
-	nvif_object_ref(object, &notify->object);
+	notify->object = object;
 	notify->flags = 0;
 	atomic_set(&notify->putcnt, 1);
-	notify->dtor = dtor;
 	notify->func = func;
 	notify->data = NULL;
 	notify->size = reply;
@@ -211,38 +209,3 @@
 		nvif_notify_fini(notify);
 	return ret;
 }
-
-static void
-nvif_notify_del(struct nvif_notify *notify)
-{
-	nvif_notify_fini(notify);
-	kfree(notify);
-}
-
-void
-nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
-{
-	BUG_ON(notify != NULL);
-	if (*pnotify)
-		(*pnotify)->dtor(*pnotify);
-	*pnotify = notify;
-}
-
-int
-nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
-		bool work, u8 type, void *data, u32 size, u32 reply,
-		struct nvif_notify **pnotify)
-{
-	struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
-	if (notify) {
-		int ret = nvif_notify_init(object, nvif_notify_del, func, work,
-					   type, data, size, reply, notify);
-		if (ret) {
-			kfree(notify);
-			notify = NULL;
-		}
-		*pnotify = notify;
-		return ret;
-	}
-	return -ENOMEM;
-}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index 3ab4e2f..c3fb6a2 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -30,47 +30,71 @@
 int
 nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
 {
-	struct nvif_client *client = nvif_client(object);
+	struct nvif_client *client = object->client;
 	union {
 		struct nvif_ioctl_v0 v0;
 	} *args = data;
 
 	if (size >= sizeof(*args) && args->v0.version == 0) {
+		if (object != &client->object)
+			args->v0.object = nvif_handle(object);
+		else
+			args->v0.object = 0;
 		args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
-		args->v0.path_nr = 0;
-		while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
-			args->v0.path[args->v0.path_nr++] = object->handle;
-			if (object->parent == object)
-				break;
-			object = object->parent;
-		}
 	} else
 		return -ENOSYS;
 
-	return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
+	return client->driver->ioctl(client->object.priv, client->super,
+				     data, size, hack);
+}
+
+void
+nvif_object_sclass_put(struct nvif_sclass **psclass)
+{
+	kfree(*psclass);
+	*psclass = NULL;
 }
 
 int
-nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
+nvif_object_sclass_get(struct nvif_object *object, struct nvif_sclass **psclass)
 {
 	struct {
 		struct nvif_ioctl_v0 ioctl;
 		struct nvif_ioctl_sclass_v0 sclass;
-	} *args;
-	u32 size = count * sizeof(args->sclass.oclass[0]);
-	int ret;
+	} *args = NULL;
+	int ret, cnt = 0, i;
+	u32 size;
 
-	if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
-		return -ENOMEM;
-	args->ioctl.version = 0;
-	args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
-	args->sclass.version = 0;
-	args->sclass.count = count;
+	while (1) {
+		size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
+		if (!(args = kmalloc(size, GFP_KERNEL)))
+			return -ENOMEM;
+		args->ioctl.version = 0;
+		args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+		args->sclass.version = 0;
+		args->sclass.count = cnt;
 
-	memcpy(args->sclass.oclass, oclass, size);
-	ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
-	ret = ret ? ret : args->sclass.count;
-	memcpy(oclass, args->sclass.oclass, size);
+		ret = nvif_object_ioctl(object, args, size, NULL);
+		if (ret == 0 && args->sclass.count <= cnt)
+			break;
+		cnt = args->sclass.count;
+		kfree(args);
+		if (ret != 0)
+			return ret;
+	}
+
+	*psclass = kzalloc(sizeof(**psclass) * args->sclass.count, GFP_KERNEL);
+	if (*psclass) {
+		for (i = 0; i < args->sclass.count; i++) {
+			(*psclass)[i].oclass = args->sclass.oclass[i].oclass;
+			(*psclass)[i].minver = args->sclass.oclass[i].minver;
+			(*psclass)[i].maxver = args->sclass.oclass[i].maxver;
+		}
+		ret = args->sclass.count;
+	} else {
+		ret = -ENOMEM;
+	}
+
 	kfree(args);
 	return ret;
 }
@@ -145,7 +169,7 @@
 nvif_object_unmap(struct nvif_object *object)
 {
 	if (object->map.size) {
-		struct nvif_client *client = nvif_client(object);
+		struct nvif_client *client = object->client;
 		struct {
 			struct nvif_ioctl_v0 ioctl;
 			struct nvif_ioctl_unmap unmap;
@@ -167,7 +191,7 @@
 int
 nvif_object_map(struct nvif_object *object)
 {
-	struct nvif_client *client = nvif_client(object);
+	struct nvif_client *client = object->client;
 	struct {
 		struct nvif_ioctl_v0 ioctl;
 		struct nvif_ioctl_map_v0 map;
@@ -186,119 +210,65 @@
 	return ret;
 }
 
-struct ctor {
-	struct nvif_ioctl_v0 ioctl;
-	struct nvif_ioctl_new_v0 new;
-};
-
 void
 nvif_object_fini(struct nvif_object *object)
 {
-	struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
-	if (object->parent) {
-		struct {
-			struct nvif_ioctl_v0 ioctl;
-			struct nvif_ioctl_del del;
-		} args = {
-			.ioctl.type = NVIF_IOCTL_V0_DEL,
-		};
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_del del;
+	} args = {
+		.ioctl.type = NVIF_IOCTL_V0_DEL,
+	};
 
-		nvif_object_unmap(object);
-		nvif_object_ioctl(object, &args, sizeof(args), NULL);
-		if (object->data) {
-			object->size = 0;
-			object->data = NULL;
-			kfree(ctor);
-		}
-		nvif_object_ref(NULL, &object->parent);
-	}
+	if (!object->client)
+		return;
+
+	nvif_object_unmap(object);
+	nvif_object_ioctl(object, &args, sizeof(args), NULL);
+	object->client = NULL;
 }
 
 int
-nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
-		 u32 handle, u32 oclass, void *data, u32 size,
-		 struct nvif_object *object)
+nvif_object_init(struct nvif_object *parent, u32 handle, s32 oclass,
+		 void *data, u32 size, struct nvif_object *object)
 {
-	struct ctor *ctor;
+	struct {
+		struct nvif_ioctl_v0 ioctl;
+		struct nvif_ioctl_new_v0 new;
+	} *args;
 	int ret = 0;
 
-	object->parent = NULL;
-	object->object = object;
-	nvif_object_ref(parent, &object->parent);
-	kref_init(&object->refcount);
+	object->client = NULL;
 	object->handle = handle;
 	object->oclass = oclass;
-	object->data = NULL;
-	object->size = 0;
-	object->dtor = dtor;
 	object->map.ptr = NULL;
 	object->map.size = 0;
 
-	if (object->parent) {
-		if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
+	if (parent) {
+		if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
 			nvif_object_fini(object);
 			return -ENOMEM;
 		}
-		object->data = ctor->new.data;
-		object->size = size;
-		memcpy(object->data, data, size);
 
-		ctor->ioctl.version = 0;
-		ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
-		ctor->new.version = 0;
-		ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
-		ctor->new.token = (unsigned long)(void *)object;
-		ctor->new.handle = handle;
-		ctor->new.oclass = oclass;
+		args->ioctl.version = 0;
+		args->ioctl.type = NVIF_IOCTL_V0_NEW;
+		args->new.version = 0;
+		args->new.route = parent->client->route;
+		args->new.token = nvif_handle(object);
+		args->new.object = nvif_handle(object);
+		args->new.handle = handle;
+		args->new.oclass = oclass;
 
-		ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
-					object->size, &object->priv);
+		memcpy(args->new.data, data, size);
+		ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
+					&object->priv);
+		memcpy(data, args->new.data, size);
+		kfree(args);
+		if (ret == 0)
+			object->client = parent->client;
 	}
 
 	if (ret)
 		nvif_object_fini(object);
 	return ret;
 }
-
-static void
-nvif_object_del(struct nvif_object *object)
-{
-	nvif_object_fini(object);
-	kfree(object);
-}
-
-int
-nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
-		void *data, u32 size, struct nvif_object **pobject)
-{
-	struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
-	if (object) {
-		int ret = nvif_object_init(parent, nvif_object_del, handle,
-					   oclass, data, size, object);
-		if (ret) {
-			kfree(object);
-			object = NULL;
-		}
-		*pobject = object;
-		return ret;
-	}
-	return -ENOMEM;
-}
-
-static void
-nvif_object_put(struct kref *kref)
-{
-	struct nvif_object *object =
-		container_of(kref, typeof(*object), refcount);
-	object->dtor(object);
-}
-
-void
-nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
-{
-	if (object)
-		kref_get(&object->refcount);
-	if (*pobject)
-		kref_put(&(*pobject)->refcount, nvif_object_put);
-	*pobject = object;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
index a2bdb20..7f66963 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild
@@ -1,17 +1,14 @@
 nvkm-y := nvkm/core/client.o
-nvkm-y += nvkm/core/engctx.o
 nvkm-y += nvkm/core/engine.o
 nvkm-y += nvkm/core/enum.o
 nvkm-y += nvkm/core/event.o
 nvkm-y += nvkm/core/gpuobj.o
-nvkm-y += nvkm/core/handle.o
 nvkm-y += nvkm/core/ioctl.o
+nvkm-y += nvkm/core/memory.o
 nvkm-y += nvkm/core/mm.o
-nvkm-y += nvkm/core/namedb.o
 nvkm-y += nvkm/core/notify.o
 nvkm-y += nvkm/core/object.o
+nvkm-y += nvkm/core/oproxy.o
 nvkm-y += nvkm/core/option.o
-nvkm-y += nvkm/core/parent.o
-nvkm-y += nvkm/core/printk.o
 nvkm-y += nvkm/core/ramht.o
 nvkm-y += nvkm/core/subdev.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 878a82f..297e1e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -23,7 +23,6 @@
  */
 #include <core/client.h>
 #include <core/device.h>
-#include <core/handle.h>
 #include <core/notify.h>
 #include <core/option.h>
 
@@ -91,7 +90,7 @@
 nvkm_client_notify_new(struct nvkm_object *object,
 		       struct nvkm_event *event, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(object);
+	struct nvkm_client *client = object->client;
 	struct nvkm_client_notify *notify;
 	union {
 		struct nvif_notify_req_v0 v0;
@@ -111,11 +110,11 @@
 	if (!notify)
 		return -ENOMEM;
 
-	nv_ioctl(client, "notify new size %d\n", size);
+	nvif_ioctl(object, "notify new size %d\n", size);
 	if (nvif_unpack(req->v0, 0, 0, true)) {
-		nv_ioctl(client, "notify new vers %d reply %d route %02x "
-				 "token %llx\n", req->v0.version,
-			 req->v0.reply, req->v0.route, req->v0.token);
+		nvif_ioctl(object, "notify new vers %d reply %d route %02x "
+				   "token %llx\n", req->v0.version,
+			   req->v0.reply, req->v0.route, req->v0.token);
 		notify->version = req->v0.version;
 		notify->size = sizeof(notify->rep.v0);
 		notify->rep.v0.version = req->v0.version;
@@ -146,10 +145,10 @@
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "client devlist size %d\n", size);
+	nvif_ioctl(object, "client devlist size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "client devlist vers %d count %d\n",
-			 args->v0.version, args->v0.count);
+		nvif_ioctl(object, "client devlist vers %d count %d\n",
+			   args->v0.version, args->v0.count);
 		if (size == sizeof(args->v0.device[0]) * args->v0.count) {
 			ret = nvkm_device_list(args->v0.device, args->v0.count);
 			if (ret >= 0) {
@@ -176,91 +175,134 @@
 	return -EINVAL;
 }
 
-static void
-nvkm_client_dtor(struct nvkm_object *object)
+static int
+nvkm_client_child_new(const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
 {
-	struct nvkm_client *client = (void *)object;
-	int i;
-	for (i = 0; i < ARRAY_SIZE(client->notify); i++)
-		nvkm_client_notify_del(client, i);
-	nvkm_object_ref(NULL, &client->device);
-	nvkm_handle_destroy(client->root);
-	nvkm_namedb_destroy(&client->namedb);
+	return oclass->base.ctor(oclass, data, size, pobject);
 }
 
-static struct nvkm_oclass
-nvkm_client_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_client_dtor,
-		.mthd = nvkm_client_mthd,
-	},
-};
-
-int
-nvkm_client_create_(const char *name, u64 devname, const char *cfg,
-		    const char *dbg, int length, void **pobject)
+static int
+nvkm_client_child_get(struct nvkm_object *object, int index,
+		      struct nvkm_oclass *oclass)
 {
-	struct nvkm_object *device;
-	struct nvkm_client *client;
-	int ret;
+	const struct nvkm_sclass *sclass;
 
-	device = (void *)nvkm_device_find(devname);
-	if (!device)
-		return -ENODEV;
+	switch (index) {
+	case 0: sclass = &nvkm_udevice_sclass; break;
+	default:
+		return -EINVAL;
+	}
 
-	ret = nvkm_namedb_create_(NULL, NULL, &nvkm_client_oclass,
-				  NV_CLIENT_CLASS, NULL,
-				  (1ULL << NVDEV_ENGINE_DEVICE),
-				  length, pobject);
-	client = *pobject;
-	if (ret)
-		return ret;
-
-	ret = nvkm_handle_create(nv_object(client), ~0, ~0, nv_object(client),
-				 &client->root);
-	if (ret)
-		return ret;
-
-	/* prevent init/fini being called, os in in charge of this */
-	atomic_set(&nv_object(client)->usecount, 2);
-
-	nvkm_object_ref(device, &client->device);
-	snprintf(client->name, sizeof(client->name), "%s", name);
-	client->debug = nvkm_dbgopt(dbg, "CLIENT");
+	oclass->ctor = nvkm_client_child_new;
+	oclass->base = *sclass;
 	return 0;
 }
 
-int
-nvkm_client_init(struct nvkm_client *client)
+static const struct nvkm_object_func
+nvkm_client_object_func = {
+	.mthd = nvkm_client_mthd,
+	.sclass = nvkm_client_child_get,
+};
+
+void
+nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
 {
-	int ret;
-	nv_debug(client, "init running\n");
-	ret = nvkm_handle_init(client->root);
-	nv_debug(client, "init completed with %d\n", ret);
-	return ret;
+	if (!RB_EMPTY_NODE(&object->node))
+		rb_erase(&object->node, &client->objroot);
+}
+
+bool
+nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
+{
+	struct rb_node **ptr = &client->objroot.rb_node;
+	struct rb_node *parent = NULL;
+
+	while (*ptr) {
+		struct nvkm_object *this =
+			container_of(*ptr, typeof(*this), node);
+		parent = *ptr;
+		if (object->object < this->object)
+			ptr = &parent->rb_left;
+		else
+		if (object->object > this->object)
+			ptr = &parent->rb_right;
+		else
+			return false;
+	}
+
+	rb_link_node(&object->node, parent, ptr);
+	rb_insert_color(&object->node, &client->objroot);
+	return true;
+}
+
+struct nvkm_object *
+nvkm_client_search(struct nvkm_client *client, u64 handle)
+{
+	struct rb_node *node = client->objroot.rb_node;
+	while (node) {
+		struct nvkm_object *object =
+			container_of(node, typeof(*object), node);
+		if (handle < object->object)
+			node = node->rb_left;
+		else
+		if (handle > object->object)
+			node = node->rb_right;
+		else
+			return object;
+	}
+	return NULL;
 }
 
 int
 nvkm_client_fini(struct nvkm_client *client, bool suspend)
 {
+	struct nvkm_object *object = &client->object;
 	const char *name[2] = { "fini", "suspend" };
-	int ret, i;
-	nv_debug(client, "%s running\n", name[suspend]);
-	nv_debug(client, "%s notify\n", name[suspend]);
+	int i;
+	nvif_debug(object, "%s notify\n", name[suspend]);
 	for (i = 0; i < ARRAY_SIZE(client->notify); i++)
 		nvkm_client_notify_put(client, i);
-	nv_debug(client, "%s object\n", name[suspend]);
-	ret = nvkm_handle_fini(client->root, suspend);
-	nv_debug(client, "%s completed with %d\n", name[suspend], ret);
-	return ret;
+	return nvkm_object_fini(&client->object, suspend);
 }
 
-const char *
-nvkm_client_name(void *obj)
+int
+nvkm_client_init(struct nvkm_client *client)
 {
-	const char *client_name = "unknown";
-	struct nvkm_client *client = nvkm_client(obj);
-	if (client)
-		client_name = client->name;
-	return client_name;
+	return nvkm_object_init(&client->object);
+}
+
+void
+nvkm_client_del(struct nvkm_client **pclient)
+{
+	struct nvkm_client *client = *pclient;
+	int i;
+	if (client) {
+		nvkm_client_fini(client, false);
+		for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+			nvkm_client_notify_del(client, i);
+		nvkm_object_dtor(&client->object);
+		kfree(*pclient);
+		*pclient = NULL;
+	}
+}
+
+int
+nvkm_client_new(const char *name, u64 device, const char *cfg,
+		const char *dbg, struct nvkm_client **pclient)
+{
+	struct nvkm_oclass oclass = {};
+	struct nvkm_client *client;
+
+	if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
+		return -ENOMEM;
+	oclass.client = client;
+
+	nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
+	snprintf(client->name, sizeof(client->name), "%s", name);
+	client->device = device;
+	client->debug = nvkm_dbgopt(dbg, "CLIENT");
+	client->objroot = RB_ROOT;
+	client->dmaroot = RB_ROOT;
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c b/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
deleted file mode 100644
index fb2acbc..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/engctx.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/engctx.h>
-#include <core/engine.h>
-#include <core/client.h>
-
-static inline int
-nvkm_engctx_exists(struct nvkm_object *parent,
-		   struct nvkm_engine *engine, void **pobject)
-{
-	struct nvkm_engctx *engctx;
-	struct nvkm_object *parctx;
-
-	list_for_each_entry(engctx, &engine->contexts, head) {
-		parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
-		if (parctx == parent) {
-			atomic_inc(&nv_object(engctx)->refcount);
-			*pobject = engctx;
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-int
-nvkm_engctx_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
-		    struct nvkm_oclass *oclass, struct nvkm_object *pargpu,
-		    u32 size, u32 align, u32 flags, int length, void **pobject)
-{
-	struct nvkm_client *client = nvkm_client(parent);
-	struct nvkm_engine *engine = nv_engine(engobj);
-	struct nvkm_object *engctx;
-	unsigned long save;
-	int ret;
-
-	/* check if this engine already has a context for the parent object,
-	 * and reference it instead of creating a new one
-	 */
-	spin_lock_irqsave(&engine->lock, save);
-	ret = nvkm_engctx_exists(parent, engine, pobject);
-	spin_unlock_irqrestore(&engine->lock, save);
-	if (ret)
-		return ret;
-
-	/* create the new context, supports creating both raw objects and
-	 * objects backed by instance memory
-	 */
-	if (size) {
-		ret = nvkm_gpuobj_create_(parent, engobj, oclass,
-					  NV_ENGCTX_CLASS, pargpu, size,
-					  align, flags, length, pobject);
-	} else {
-		ret = nvkm_object_create_(parent, engobj, oclass,
-					  NV_ENGCTX_CLASS, length, pobject);
-	}
-
-	engctx = *pobject;
-	if (ret)
-		return ret;
-
-	/* must take the lock again and re-check a context doesn't already
-	 * exist (in case of a race) - the lock had to be dropped before as
-	 * it's not possible to allocate the object with it held.
-	 */
-	spin_lock_irqsave(&engine->lock, save);
-	ret = nvkm_engctx_exists(parent, engine, pobject);
-	if (ret) {
-		spin_unlock_irqrestore(&engine->lock, save);
-		nvkm_object_ref(NULL, &engctx);
-		return ret;
-	}
-
-	if (client->vm)
-		atomic_inc(&client->vm->engref[nv_engidx(engine)]);
-	list_add(&nv_engctx(engctx)->head, &engine->contexts);
-	nv_engctx(engctx)->addr = ~0ULL;
-	spin_unlock_irqrestore(&engine->lock, save);
-	return 0;
-}
-
-void
-nvkm_engctx_destroy(struct nvkm_engctx *engctx)
-{
-	struct nvkm_engine *engine = engctx->gpuobj.object.engine;
-	struct nvkm_client *client = nvkm_client(engctx);
-	unsigned long save;
-
-	nvkm_gpuobj_unmap(&engctx->vma);
-	spin_lock_irqsave(&engine->lock, save);
-	list_del(&engctx->head);
-	spin_unlock_irqrestore(&engine->lock, save);
-
-	if (client->vm)
-		atomic_dec(&client->vm->engref[nv_engidx(engine)]);
-
-	if (engctx->gpuobj.size)
-		nvkm_gpuobj_destroy(&engctx->gpuobj);
-	else
-		nvkm_object_destroy(&engctx->gpuobj.object);
-}
-
-int
-nvkm_engctx_init(struct nvkm_engctx *engctx)
-{
-	struct nvkm_object *object = nv_object(engctx);
-	struct nvkm_subdev *subdev = nv_subdev(object->engine);
-	struct nvkm_object *parent;
-	struct nvkm_subdev *pardev;
-	int ret;
-
-	ret = nvkm_gpuobj_init(&engctx->gpuobj);
-	if (ret)
-		return ret;
-
-	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
-	pardev = nv_subdev(parent->engine);
-	if (nv_parent(parent)->context_attach) {
-		mutex_lock(&pardev->mutex);
-		ret = nv_parent(parent)->context_attach(parent, object);
-		mutex_unlock(&pardev->mutex);
-	}
-
-	if (ret) {
-		nv_error(parent, "failed to attach %s context, %d\n",
-			 subdev->name, ret);
-		return ret;
-	}
-
-	nv_debug(parent, "attached %s context\n", subdev->name);
-	return 0;
-}
-
-int
-nvkm_engctx_fini(struct nvkm_engctx *engctx, bool suspend)
-{
-	struct nvkm_object *object = nv_object(engctx);
-	struct nvkm_subdev *subdev = nv_subdev(object->engine);
-	struct nvkm_object *parent;
-	struct nvkm_subdev *pardev;
-	int ret = 0;
-
-	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
-	pardev = nv_subdev(parent->engine);
-	if (nv_parent(parent)->context_detach) {
-		mutex_lock(&pardev->mutex);
-		ret = nv_parent(parent)->context_detach(parent, suspend, object);
-		mutex_unlock(&pardev->mutex);
-	}
-
-	if (ret) {
-		nv_error(parent, "failed to detach %s context, %d\n",
-			 subdev->name, ret);
-		return ret;
-	}
-
-	nv_debug(parent, "detached %s context\n", subdev->name);
-	return nvkm_gpuobj_fini(&engctx->gpuobj, suspend);
-}
-
-int
-_nvkm_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_engctx *engctx;
-	int ret;
-
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 256, 256,
-				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
-	*pobject = nv_object(engctx);
-	return ret;
-}
-
-void
-_nvkm_engctx_dtor(struct nvkm_object *object)
-{
-	nvkm_engctx_destroy(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_init(struct nvkm_object *object)
-{
-	return nvkm_engctx_init(nv_engctx(object));
-}
-
-int
-_nvkm_engctx_fini(struct nvkm_object *object, bool suspend)
-{
-	return nvkm_engctx_fini(nv_engctx(object), suspend);
-}
-
-struct nvkm_object *
-nvkm_engctx_get(struct nvkm_engine *engine, u64 addr)
-{
-	struct nvkm_engctx *engctx;
-	unsigned long flags;
-
-	spin_lock_irqsave(&engine->lock, flags);
-	list_for_each_entry(engctx, &engine->contexts, head) {
-		if (engctx->addr == addr) {
-			engctx->save = flags;
-			return nv_object(engctx);
-		}
-	}
-	spin_unlock_irqrestore(&engine->lock, flags);
-	return NULL;
-}
-
-void
-nvkm_engctx_put(struct nvkm_object *object)
-{
-	if (object) {
-		struct nvkm_engine *engine = nv_engine(object->engine);
-		struct nvkm_engctx *engctx = nv_engctx(object);
-		spin_unlock_irqrestore(&engine->lock, engctx->save);
-	}
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 6082017..8a7bae7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -25,51 +25,141 @@
 #include <core/device.h>
 #include <core/option.h>
 
-struct nvkm_engine *
-nvkm_engine(void *obj, int idx)
+#include <subdev/fb.h>
+
+void
+nvkm_engine_unref(struct nvkm_engine **pengine)
 {
-	obj = nvkm_subdev(obj, idx);
-	if (obj && nv_iclass(obj, NV_ENGINE_CLASS))
-		return nv_engine(obj);
-	return NULL;
+	struct nvkm_engine *engine = *pengine;
+	if (engine) {
+		mutex_lock(&engine->subdev.mutex);
+		if (--engine->usecount == 0)
+			nvkm_subdev_fini(&engine->subdev, false);
+		mutex_unlock(&engine->subdev.mutex);
+		*pengine = NULL;
+	}
+}
+
+struct nvkm_engine *
+nvkm_engine_ref(struct nvkm_engine *engine)
+{
+	if (engine) {
+		mutex_lock(&engine->subdev.mutex);
+		if (++engine->usecount == 1) {
+			int ret = nvkm_subdev_init(&engine->subdev);
+			if (ret) {
+				engine->usecount--;
+				mutex_unlock(&engine->subdev.mutex);
+				return ERR_PTR(ret);
+			}
+		}
+		mutex_unlock(&engine->subdev.mutex);
+	}
+	return engine;
+}
+
+void
+nvkm_engine_tile(struct nvkm_engine *engine, int region)
+{
+	struct nvkm_fb *fb = engine->subdev.device->fb;
+	if (engine->func->tile)
+		engine->func->tile(engine, region, &fb->tile.region[region]);
+}
+
+static void
+nvkm_engine_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->intr)
+		engine->func->intr(engine);
+}
+
+static int
+nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->fini)
+		return engine->func->fini(engine, suspend);
+	return 0;
+}
+
+static int
+nvkm_engine_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	struct nvkm_fb *fb = subdev->device->fb;
+	int ret = 0, i;
+	s64 time;
+
+	if (!engine->usecount) {
+		nvkm_trace(subdev, "init skipped, engine has no users\n");
+		return ret;
+	}
+
+	if (engine->func->oneinit && !engine->subdev.oneinit) {
+		nvkm_trace(subdev, "one-time init running...\n");
+		time = ktime_to_us(ktime_get());
+		ret = engine->func->oneinit(engine);
+		if (ret) {
+			nvkm_trace(subdev, "one-time init failed, %d\n", ret);
+			return ret;
+		}
+
+		engine->subdev.oneinit = true;
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
+	}
+
+	if (engine->func->init)
+		ret = engine->func->init(engine);
+
+	for (i = 0; fb && i < fb->tile.regions; i++)
+		nvkm_engine_tile(engine, i);
+	return ret;
+}
+
+static void *
+nvkm_engine_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_engine *engine = nvkm_engine(subdev);
+	if (engine->func->dtor)
+		return engine->func->dtor(engine);
+	return engine;
+}
+
+static const struct nvkm_subdev_func
+nvkm_engine_func = {
+	.dtor = nvkm_engine_dtor,
+	.init = nvkm_engine_init,
+	.fini = nvkm_engine_fini,
+	.intr = nvkm_engine_intr,
+};
+
+int
+nvkm_engine_ctor(const struct nvkm_engine_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 bool enable, struct nvkm_engine *engine)
+{
+	nvkm_subdev_ctor(&nvkm_engine_func, device, index,
+			 pmc_enable, &engine->subdev);
+	engine->func = func;
+
+	if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
+		nvkm_debug(&engine->subdev, "disabled\n");
+		return -ENODEV;
+	}
+
+	spin_lock_init(&engine->lock);
+	return 0;
 }
 
 int
-nvkm_engine_create_(struct nvkm_object *parent, struct nvkm_object *engobj,
-		    struct nvkm_oclass *oclass, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+nvkm_engine_new_(const struct nvkm_engine_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 bool enable, struct nvkm_engine **pengine)
 {
-	struct nvkm_engine *engine;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
-				  iname, fname, length, pobject);
-	engine = *pobject;
-	if (ret)
-		return ret;
-
-	if (parent) {
-		struct nvkm_device *device = nv_device(parent);
-		int engidx = nv_engidx(engine);
-
-		if (device->disable_mask & (1ULL << engidx)) {
-			if (!nvkm_boolopt(device->cfgopt, iname, false)) {
-				nv_debug(engine, "engine disabled by hw/fw\n");
-				return -ENODEV;
-			}
-
-			nv_warn(engine, "ignoring hw/fw engine disable\n");
-		}
-
-		if (!nvkm_boolopt(device->cfgopt, iname, enable)) {
-			if (!enable)
-				nv_warn(engine, "disabled, %s=1 to enable\n", iname);
-			return -ENODEV;
-		}
-	}
-
-	INIT_LIST_HEAD(&engine->contexts);
-	spin_lock_init(&engine->lock);
-	return 0;
+	if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_engine_ctor(func, device, index, pmc_enable,
+				enable, *pengine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index 4f92bfc..b9581fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -38,29 +38,19 @@
 	return NULL;
 }
 
-const struct nvkm_enum *
-nvkm_enum_print(const struct nvkm_enum *en, u32 value)
-{
-	en = nvkm_enum_find(en, value);
-	if (en)
-		pr_cont("%s", en->name);
-	else
-		pr_cont("(unknown enum 0x%08x)", value);
-	return en;
-}
-
 void
-nvkm_bitfield_print(const struct nvkm_bitfield *bf, u32 value)
+nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
 {
-	while (bf->name) {
+	bool space = false;
+	while (size >= 1 && bf->name) {
 		if (value & bf->mask) {
-			pr_cont(" %s", bf->name);
-			value &= ~bf->mask;
+			int this = snprintf(data, size, "%s%s",
+					    space ? " " : "", bf->name);
+			size -= this;
+			data += this;
+			space = true;
 		}
-
 		bf++;
 	}
-
-	if (value)
-		pr_cont(" (unknown bits 0x%08x)", value);
+	data[0] = '\0';
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index 2eba801..c3a790e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -28,240 +28,205 @@
 #include <subdev/bar.h>
 #include <subdev/mmu.h>
 
-void
-nvkm_gpuobj_destroy(struct nvkm_gpuobj *gpuobj)
+/* fast-path, where backend is able to provide direct pointer to memory */
+static u32
+nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset)
 {
-	int i;
-
-	if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0x00000000);
-	}
-
-	if (gpuobj->node)
-		nvkm_mm_free(&nv_gpuobj(gpuobj->parent)->heap, &gpuobj->node);
-
-	if (gpuobj->heap.block_size)
-		nvkm_mm_fini(&gpuobj->heap);
-
-	nvkm_object_destroy(&gpuobj->object);
+	return ioread32_native(gpuobj->map + offset);
 }
 
-int
-nvkm_gpuobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_object *pargpu, u32 size, u32 align, u32 flags,
-		    int length, void **pobject)
+static void
+nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(parent);
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nvkm_gpuobj *gpuobj;
-	struct nvkm_mm *heap = NULL;
-	int ret, i;
-	u64 addr;
+	iowrite32_native(data, gpuobj->map + offset);
+}
 
-	*pobject = NULL;
+/* accessor functions for gpuobjs allocated directly from instmem */
+static u32
+nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+	return nvkm_ro32(gpuobj->memory, offset);
+}
 
-	if (pargpu) {
-		while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
-			if (nv_gpuobj(pargpu)->heap.block_size)
-				break;
-			pargpu = pargpu->parent;
-		}
+static void
+nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+	nvkm_wo32(gpuobj->memory, offset, data);
+}
 
-		if (unlikely(pargpu == NULL)) {
-			nv_error(parent, "no gpuobj heap\n");
-			return -EINVAL;
-		}
+static const struct nvkm_gpuobj_func nvkm_gpuobj_heap;
+static void
+nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->func = &nvkm_gpuobj_heap;
+	nvkm_done(gpuobj->memory);
+}
 
-		addr =  nv_gpuobj(pargpu)->addr;
-		heap = &nv_gpuobj(pargpu)->heap;
-		atomic_inc(&parent->refcount);
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_fast = {
+	.release = nvkm_gpuobj_heap_release,
+	.rd32 = nvkm_gpuobj_rd32_fast,
+	.wr32 = nvkm_gpuobj_wr32_fast,
+};
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap_slow = {
+	.release = nvkm_gpuobj_heap_release,
+	.rd32 = nvkm_gpuobj_heap_rd32,
+	.wr32 = nvkm_gpuobj_heap_wr32,
+};
+
+static void *
+nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->map = nvkm_kmap(gpuobj->memory);
+	if (likely(gpuobj->map))
+		gpuobj->func = &nvkm_gpuobj_heap_fast;
+	else
+		gpuobj->func = &nvkm_gpuobj_heap_slow;
+	return gpuobj->map;
+}
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_heap = {
+	.acquire = nvkm_gpuobj_heap_acquire,
+};
+
+/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static u32
+nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
+{
+	return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset);
+}
+
+static void
+nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
+{
+	nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data);
+}
+
+static const struct nvkm_gpuobj_func nvkm_gpuobj_func;
+static void
+nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->func = &nvkm_gpuobj_func;
+	nvkm_done(gpuobj->parent);
+}
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_fast = {
+	.release = nvkm_gpuobj_release,
+	.rd32 = nvkm_gpuobj_rd32_fast,
+	.wr32 = nvkm_gpuobj_wr32_fast,
+};
+
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_slow = {
+	.release = nvkm_gpuobj_release,
+	.rd32 = nvkm_gpuobj_rd32,
+	.wr32 = nvkm_gpuobj_wr32,
+};
+
+static void *
+nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
+{
+	gpuobj->map = nvkm_kmap(gpuobj->parent);
+	if (likely(gpuobj->map)) {
+		gpuobj->map  = (u8 *)gpuobj->map + gpuobj->node->offset;
+		gpuobj->func = &nvkm_gpuobj_fast;
 	} else {
-		ret = imem->alloc(imem, parent, size, align, &parent);
-		pargpu = parent;
-		if (ret)
-			return ret;
-
-		addr = nv_memobj(pargpu)->addr;
-		size = nv_memobj(pargpu)->size;
-
-		if (bar && bar->alloc) {
-			struct nvkm_instobj *iobj = (void *)parent;
-			struct nvkm_mem **mem = (void *)(iobj + 1);
-			struct nvkm_mem *node = *mem;
-			if (!bar->alloc(bar, parent, node, &pargpu)) {
-				nvkm_object_ref(NULL, &parent);
-				parent = pargpu;
-			}
-		}
+		gpuobj->func = &nvkm_gpuobj_slow;
 	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_GPUOBJ_CLASS, length, pobject);
-	nvkm_object_ref(NULL, &parent);
-	gpuobj = *pobject;
-	if (ret)
-		return ret;
-
-	gpuobj->parent = pargpu;
-	gpuobj->flags = flags;
-	gpuobj->addr = addr;
-	gpuobj->size = size;
-
-	if (heap) {
-		ret = nvkm_mm_head(heap, 0, 1, size, size, max(align, (u32)1),
-				   &gpuobj->node);
-		if (ret)
-			return ret;
-
-		gpuobj->addr += gpuobj->node->offset;
-	}
-
-	if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
-		ret = nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
-		if (ret)
-			return ret;
-	}
-
-	if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, 0x00000000);
-	}
-
-	return ret;
+	return gpuobj->map;
 }
 
-struct nvkm_gpuobj_class {
-	struct nvkm_object *pargpu;
-	u64 size;
-	u32 align;
-	u32 flags;
+static const struct nvkm_gpuobj_func
+nvkm_gpuobj_func = {
+	.acquire = nvkm_gpuobj_acquire,
 };
 
 static int
-_nvkm_gpuobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
+		 struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj)
 {
-	struct nvkm_gpuobj_class *args = data;
-	struct nvkm_gpuobj *object;
+	u32 offset;
 	int ret;
 
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
-				 args->size, args->align, args->flags,
-				 &object);
-	*pobject = nv_object(object);
-	if (ret)
-		return ret;
+	if (parent) {
+		if (align >= 0) {
+			ret = nvkm_mm_head(&parent->heap, 0, 1, size, size,
+					   max(align, 1), &gpuobj->node);
+		} else {
+			ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size,
+					   -align, &gpuobj->node);
+		}
+		if (ret)
+			return ret;
 
-	return 0;
-}
+		gpuobj->parent = parent;
+		gpuobj->func = &nvkm_gpuobj_func;
+		gpuobj->addr = parent->addr + gpuobj->node->offset;
+		gpuobj->size = gpuobj->node->length;
 
-void
-_nvkm_gpuobj_dtor(struct nvkm_object *object)
-{
-	nvkm_gpuobj_destroy(nv_gpuobj(object));
-}
+		if (zero) {
+			nvkm_kmap(gpuobj);
+			for (offset = 0; offset < gpuobj->size; offset += 4)
+				nvkm_wo32(gpuobj, offset, 0x00000000);
+			nvkm_done(gpuobj);
+		}
+	} else {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size,
+				      abs(align), zero, &gpuobj->memory);
+		if (ret)
+			return ret;
 
-int
-_nvkm_gpuobj_init(struct nvkm_object *object)
-{
-	return nvkm_gpuobj_init(nv_gpuobj(object));
-}
-
-int
-_nvkm_gpuobj_fini(struct nvkm_object *object, bool suspend)
-{
-	return nvkm_gpuobj_fini(nv_gpuobj(object), suspend);
-}
-
-u32
-_nvkm_gpuobj_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
-	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
-	if (gpuobj->node)
-		addr += gpuobj->node->offset;
-	return pfuncs->rd32(gpuobj->parent, addr);
-}
-
-void
-_nvkm_gpuobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_gpuobj *gpuobj = nv_gpuobj(object);
-	struct nvkm_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
-	if (gpuobj->node)
-		addr += gpuobj->node->offset;
-	pfuncs->wr32(gpuobj->parent, addr, data);
-}
-
-static struct nvkm_oclass
-_nvkm_gpuobj_oclass = {
-	.handle = 0x00000000,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpuobj_ctor,
-		.dtor = _nvkm_gpuobj_dtor,
-		.init = _nvkm_gpuobj_init,
-		.fini = _nvkm_gpuobj_fini,
-		.rd32 = _nvkm_gpuobj_rd32,
-		.wr32 = _nvkm_gpuobj_wr32,
-	},
-};
-
-int
-nvkm_gpuobj_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-		u32 size, u32 align, u32 flags,
-		struct nvkm_gpuobj **pgpuobj)
-{
-	struct nvkm_object *engine = parent;
-	struct nvkm_gpuobj_class args = {
-		.pargpu = pargpu,
-		.size = size,
-		.align = align,
-		.flags = flags,
-	};
-
-	if (!nv_iclass(engine, NV_SUBDEV_CLASS))
-		engine = &engine->engine->subdev.object;
-	BUG_ON(engine == NULL);
-
-	return nvkm_object_ctor(parent, engine, &_nvkm_gpuobj_oclass,
-				&args, sizeof(args),
-				(struct nvkm_object **)pgpuobj);
-}
-
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u32 access, struct nvkm_vma *vma)
-{
-	struct nvkm_bar *bar = nvkm_bar(gpuobj);
-	int ret = -EINVAL;
-
-	if (bar && bar->umap) {
-		struct nvkm_instobj *iobj = (void *)
-			nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
-		struct nvkm_mem **mem = (void *)(iobj + 1);
-		ret = bar->umap(bar, *mem, access, vma);
+		gpuobj->func = &nvkm_gpuobj_heap;
+		gpuobj->addr = nvkm_memory_addr(gpuobj->memory);
+		gpuobj->size = nvkm_memory_size(gpuobj->memory);
 	}
 
+	return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+}
+
+void
+nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
+{
+	struct nvkm_gpuobj *gpuobj = *pgpuobj;
+	if (gpuobj) {
+		if (gpuobj->parent)
+			nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
+		nvkm_mm_fini(&gpuobj->heap);
+		nvkm_memory_del(&gpuobj->memory);
+		kfree(*pgpuobj);
+		*pgpuobj = NULL;
+	}
+}
+
+int
+nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
+		struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nvkm_gpuobj *gpuobj;
+	int ret;
+
+	if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL)))
+		return -ENOMEM;
+
+	ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj);
+	if (ret)
+		nvkm_gpuobj_del(pgpuobj);
 	return ret;
 }
 
 int
-nvkm_gpuobj_map_vm(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
-		   u32 access, struct nvkm_vma *vma)
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
+		u32 access, struct nvkm_vma *vma)
 {
-	struct nvkm_instobj *iobj = (void *)
-		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
-	struct nvkm_mem **mem = (void *)(iobj + 1);
-	int ret;
-
-	ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, *mem);
-	return 0;
+	struct nvkm_memory *memory = gpuobj->memory;
+	int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
+	if (ret == 0)
+		nvkm_memory_map(memory, vma, 0);
+	return ret;
 }
 
 void
@@ -278,39 +243,13 @@
  * anywhere else.
  */
 
-static void
-nvkm_gpudup_dtor(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *gpuobj = (void *)object;
-	nvkm_object_ref(NULL, &gpuobj->parent);
-	nvkm_object_destroy(&gpuobj->object);
-}
-
-static struct nvkm_oclass
-nvkm_gpudup_oclass = {
-	.handle = NV_GPUOBJ_CLASS,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_gpudup_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-	},
-};
-
 int
-nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
-		struct nvkm_gpuobj **pgpuobj)
+nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *gpuobj;
-	int ret;
+	if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_object_create(parent, &parent->engine->subdev.object,
-				 &nvkm_gpudup_oclass, 0, &gpuobj);
-	*pgpuobj = gpuobj;
-	if (ret)
-		return ret;
-
-	nvkm_object_ref(nv_object(base), &gpuobj->parent);
-	gpuobj->addr = base->addr;
-	gpuobj->size = base->size;
+	(*pgpuobj)->addr = nvkm_memory_addr(memory);
+	(*pgpuobj)->size = nvkm_memory_size(memory);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/handle.c b/drivers/gpu/drm/nouveau/nvkm/core/handle.c
deleted file mode 100644
index dc7ff10..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/handle.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/handle.h>
-#include <core/client.h>
-
-#define hprintk(h,l,f,a...) do {                                               \
-	struct nvkm_client *c = nvkm_client((h)->object);                      \
-	struct nvkm_handle *p = (h)->parent; u32 n = p ? p->name : ~0;         \
-	nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a);               \
-} while(0)
-
-int
-nvkm_handle_init(struct nvkm_handle *handle)
-{
-	struct nvkm_handle *item;
-	int ret;
-
-	hprintk(handle, TRACE, "init running\n");
-	ret = nvkm_object_inc(handle->object);
-	if (ret)
-		return ret;
-
-	hprintk(handle, TRACE, "init children\n");
-	list_for_each_entry(item, &handle->tree, head) {
-		ret = nvkm_handle_init(item);
-		if (ret)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "init completed\n");
-	return 0;
-fail:
-	hprintk(handle, ERROR, "init failed with %d\n", ret);
-	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
-		nvkm_handle_fini(item, false);
-	}
-
-	nvkm_object_dec(handle->object, false);
-	return ret;
-}
-
-int
-nvkm_handle_fini(struct nvkm_handle *handle, bool suspend)
-{
-	static char *name[2] = { "fini", "suspend" };
-	struct nvkm_handle *item;
-	int ret;
-
-	hprintk(handle, TRACE, "%s children\n", name[suspend]);
-	list_for_each_entry(item, &handle->tree, head) {
-		ret = nvkm_handle_fini(item, suspend);
-		if (ret && suspend)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "%s running\n", name[suspend]);
-	if (handle->object) {
-		ret = nvkm_object_dec(handle->object, suspend);
-		if (ret && suspend)
-			goto fail;
-	}
-
-	hprintk(handle, TRACE, "%s completed\n", name[suspend]);
-	return 0;
-fail:
-	hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
-	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
-		int rret = nvkm_handle_init(item);
-		if (rret)
-			hprintk(handle, FATAL, "failed to restart, %d\n", rret);
-	}
-
-	return ret;
-}
-
-int
-nvkm_handle_create(struct nvkm_object *parent, u32 _parent, u32 _handle,
-		   struct nvkm_object *object, struct nvkm_handle **phandle)
-{
-	struct nvkm_object *namedb;
-	struct nvkm_handle *handle;
-	int ret;
-
-	namedb = parent;
-	while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
-		namedb = namedb->parent;
-
-	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&handle->head);
-	INIT_LIST_HEAD(&handle->tree);
-	handle->name = _handle;
-	handle->priv = ~0;
-
-	ret = nvkm_namedb_insert(nv_namedb(namedb), _handle, object, handle);
-	if (ret) {
-		kfree(handle);
-		return ret;
-	}
-
-	if (nv_parent(parent)->object_attach) {
-		ret = nv_parent(parent)->object_attach(parent, object, _handle);
-		if (ret < 0) {
-			nvkm_handle_destroy(handle);
-			return ret;
-		}
-
-		handle->priv = ret;
-	}
-
-	if (object != namedb) {
-		while (!nv_iclass(namedb, NV_CLIENT_CLASS))
-			namedb = namedb->parent;
-
-		handle->parent = nvkm_namedb_get(nv_namedb(namedb), _parent);
-		if (handle->parent) {
-			list_add(&handle->head, &handle->parent->tree);
-			nvkm_namedb_put(handle->parent);
-		}
-	}
-
-	hprintk(handle, TRACE, "created\n");
-	*phandle = handle;
-	return 0;
-}
-
-void
-nvkm_handle_destroy(struct nvkm_handle *handle)
-{
-	struct nvkm_handle *item, *temp;
-
-	hprintk(handle, TRACE, "destroy running\n");
-	list_for_each_entry_safe(item, temp, &handle->tree, head) {
-		nvkm_handle_destroy(item);
-	}
-	list_del(&handle->head);
-
-	if (handle->priv != ~0) {
-		struct nvkm_object *parent = handle->parent->object;
-		nv_parent(parent)->object_detach(parent, handle->priv);
-	}
-
-	hprintk(handle, TRACE, "destroy completed\n");
-	nvkm_namedb_remove(handle);
-	kfree(handle);
-}
-
-struct nvkm_object *
-nvkm_handle_ref(struct nvkm_object *parent, u32 name)
-{
-	struct nvkm_object *object = NULL;
-	struct nvkm_handle *handle;
-
-	while (!nv_iclass(parent, NV_NAMEDB_CLASS))
-		parent = parent->parent;
-
-	handle = nvkm_namedb_get(nv_namedb(parent), name);
-	if (handle) {
-		nvkm_object_ref(handle->object, &object);
-		nvkm_namedb_put(handle);
-	}
-
-	return object;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_class(struct nvkm_object *engctx, u16 oclass)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_class(namedb, oclass);
-	return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_vinst(struct nvkm_object *engctx, u64 vinst)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_vinst(namedb, vinst);
-	return NULL;
-}
-
-struct nvkm_handle *
-nvkm_handle_get_cinst(struct nvkm_object *engctx, u32 cinst)
-{
-	struct nvkm_namedb *namedb;
-	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
-		return nvkm_namedb_get_cinst(namedb, cinst);
-	return NULL;
-}
-
-void
-nvkm_handle_put(struct nvkm_handle *handle)
-{
-	if (handle)
-		nvkm_namedb_put(handle);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index 4459ff5..d87d6ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -24,196 +24,154 @@
 #include <core/ioctl.h>
 #include <core/client.h>
 #include <core/engine.h>
-#include <core/handle.h>
-#include <core/namedb.h>
 
 #include <nvif/unpack.h>
 #include <nvif/ioctl.h>
 
 static int
-nvkm_ioctl_nop(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
-		struct nvif_ioctl_nop none;
+		struct nvif_ioctl_nop_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "nop size %d\n", size);
-	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "nop\n");
+	nvif_ioctl(object, "nop size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "nop vers %lld\n", args->v0.version);
+		args->v0.version = NVIF_VERSION_LATEST;
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_sclass(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_sclass_v0 v0;
 	} *args = data;
-	int ret;
+	struct nvkm_oclass oclass;
+	int ret, i = 0;
 
-	if (!nv_iclass(object, NV_PARENT_CLASS)) {
-		nv_debug(object, "cannot have children (sclass)\n");
-		return -ENODEV;
-	}
-
-	nv_ioctl(object, "sclass size %d\n", size);
+	nvif_ioctl(object, "sclass size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "sclass vers %d count %d\n",
-			 args->v0.version, args->v0.count);
-		if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
-			ret = nvkm_parent_lclass(object, args->v0.oclass,
-							 args->v0.count);
-			if (ret >= 0) {
-				args->v0.count = ret;
-				ret = 0;
+		nvif_ioctl(object, "sclass vers %d count %d\n",
+			   args->v0.version, args->v0.count);
+		if (size != args->v0.count * sizeof(args->v0.oclass[0]))
+			return -EINVAL;
+
+		while (object->func->sclass &&
+		       object->func->sclass(object, i, &oclass) >= 0) {
+			if (i < args->v0.count) {
+				args->v0.oclass[i].oclass = oclass.base.oclass;
+				args->v0.oclass[i].minver = oclass.base.minver;
+				args->v0.oclass[i].maxver = oclass.base.maxver;
 			}
-		} else {
-			ret = -EINVAL;
+			i++;
 		}
+
+		args->v0.count = i;
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
 {
 	union {
 		struct nvif_ioctl_new_v0 v0;
 	} *args = data;
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *engctx = NULL;
+	struct nvkm_client *client = parent->client;
 	struct nvkm_object *object = NULL;
-	struct nvkm_parent *parent;
-	struct nvkm_object *engine;
-	struct nvkm_oclass *oclass;
-	u32 _handle, _oclass;
-	int ret;
+	struct nvkm_oclass oclass;
+	int ret, i = 0;
 
-	nv_ioctl(client, "new size %d\n", size);
+	nvif_ioctl(parent, "new size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		_handle = args->v0.handle;
-		_oclass = args->v0.oclass;
+		nvif_ioctl(parent, "new vers %d handle %08x class %08x "
+				   "route %02x token %llx object %016llx\n",
+			   args->v0.version, args->v0.handle, args->v0.oclass,
+			   args->v0.route, args->v0.token, args->v0.object);
 	} else
 		return ret;
 
-	nv_ioctl(client, "new vers %d handle %08x class %08x "
-			 "route %02x token %llx\n",
-		 args->v0.version, _handle, _oclass,
-		 args->v0.route, args->v0.token);
-
-	if (!nv_iclass(handle->object, NV_PARENT_CLASS)) {
-		nv_debug(handle->object, "cannot have children (ctor)\n");
-		ret = -ENODEV;
-		goto fail_class;
+	if (!parent->func->sclass) {
+		nvif_ioctl(parent, "cannot have children\n");
+		return -EINVAL;
 	}
 
-	parent = nv_parent(handle->object);
-
-	/* check that parent supports the requested subclass */
-	ret = nvkm_parent_sclass(&parent->object, _oclass, &engine, &oclass);
-	if (ret) {
-		nv_debug(parent, "illegal class 0x%04x\n", _oclass);
-		goto fail_class;
-	}
-
-	/* make sure engine init has been completed *before* any objects
-	 * it controls are created - the constructors may depend on
-	 * state calculated at init (ie. default context construction)
-	 */
-	if (engine) {
-		ret = nvkm_object_inc(engine);
+	do {
+		memset(&oclass, 0x00, sizeof(oclass));
+		oclass.client = client;
+		oclass.handle = args->v0.handle;
+		oclass.object = args->v0.object;
+		oclass.parent = parent;
+		ret = parent->func->sclass(parent, i++, &oclass);
 		if (ret)
-			goto fail_class;
+			return ret;
+	} while (oclass.base.oclass != args->v0.oclass);
+
+	if (oclass.engine) {
+		oclass.engine = nvkm_engine_ref(oclass.engine);
+		if (IS_ERR(oclass.engine))
+			return PTR_ERR(oclass.engine);
 	}
 
-	/* if engine requires it, create a context object to insert
-	 * between the parent and its children (eg. PGRAPH context)
-	 */
-	if (engine && nv_engine(engine)->cclass) {
-		ret = nvkm_object_ctor(&parent->object, engine,
-				       nv_engine(engine)->cclass,
-				       data, size, &engctx);
-		if (ret)
-			goto fail_engctx;
-	} else {
-		nvkm_object_ref(&parent->object, &engctx);
+	ret = oclass.ctor(&oclass, data, size, &object);
+	nvkm_engine_unref(&oclass.engine);
+	if (ret == 0) {
+		ret = nvkm_object_init(object);
+		if (ret == 0) {
+			list_add(&object->head, &parent->tree);
+			object->route = args->v0.route;
+			object->token = args->v0.token;
+			object->object = args->v0.object;
+			if (nvkm_client_insert(client, object)) {
+				client->data = object;
+				return 0;
+			}
+			ret = -EEXIST;
+		}
+		nvkm_object_fini(object, false);
 	}
 
-	/* finally, create new object and bind it to its handle */
-	ret = nvkm_object_ctor(engctx, engine, oclass, data, size, &object);
-	client->data = object;
-	if (ret)
-		goto fail_ctor;
-
-	ret = nvkm_object_inc(object);
-	if (ret)
-		goto fail_init;
-
-	ret = nvkm_handle_create(&parent->object, handle->name,
-				 _handle, object, &handle);
-	if (ret)
-		goto fail_handle;
-
-	ret = nvkm_handle_init(handle);
-	handle->route = args->v0.route;
-	handle->token = args->v0.token;
-	if (ret)
-		nvkm_handle_destroy(handle);
-
-fail_handle:
-	nvkm_object_dec(object, false);
-fail_init:
-	nvkm_object_ref(NULL, &object);
-fail_ctor:
-	nvkm_object_ref(NULL, &engctx);
-fail_engctx:
-	if (engine)
-		nvkm_object_dec(engine, false);
-fail_class:
+	nvkm_object_del(&object);
 	return ret;
 }
 
 static int
-nvkm_ioctl_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_del none;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "delete size %d\n", size);
+	nvif_ioctl(object, "delete size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "delete\n");
-		nvkm_handle_fini(handle, false);
-		nvkm_handle_destroy(handle);
+		nvif_ioctl(object, "delete\n");
+		nvkm_object_fini(object, false);
+		nvkm_object_del(&object);
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_mthd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_mthd_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "mthd size %d\n", size);
+	nvif_ioctl(object, "mthd size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "mthd vers %d mthd %02x\n",
-			 args->v0.version, args->v0.method);
-		if (ret = -ENODEV, ofuncs->mthd)
-			ret = ofuncs->mthd(object, args->v0.method, data, size);
+		nvif_ioctl(object, "mthd vers %d mthd %02x\n",
+			   args->v0.version, args->v0.method);
+		ret = nvkm_object_mthd(object, args->v0.method, data, size);
 	}
 
 	return ret;
@@ -221,37 +179,34 @@
 
 
 static int
-nvkm_ioctl_rd(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_rd_v0 v0;
 	} *args = data;
+	union {
+		u8  b08;
+		u16 b16;
+		u32 b32;
+	} v;
 	int ret;
 
-	nv_ioctl(object, "rd size %d\n", size);
+	nvif_ioctl(object, "rd size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
-			 args->v0.version, args->v0.size, args->v0.addr);
+		nvif_ioctl(object, "rd vers %d size %d addr %016llx\n",
+			   args->v0.version, args->v0.size, args->v0.addr);
 		switch (args->v0.size) {
 		case 1:
-			if (ret = -ENODEV, ofuncs->rd08) {
-				args->v0.data = nv_ro08(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd08(object, args->v0.addr, &v.b08);
+			args->v0.data = v.b08;
 			break;
 		case 2:
-			if (ret = -ENODEV, ofuncs->rd16) {
-				args->v0.data = nv_ro16(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd16(object, args->v0.addr, &v.b16);
+			args->v0.data = v.b16;
 			break;
 		case 4:
-			if (ret = -ENODEV, ofuncs->rd32) {
-				args->v0.data = nv_ro32(object, args->v0.addr);
-				ret = 0;
-			}
+			ret = nvkm_object_rd32(object, args->v0.addr, &v.b32);
+			args->v0.data = v.b32;
 			break;
 		default:
 			ret = -EINVAL;
@@ -263,104 +218,81 @@
 }
 
 static int
-nvkm_ioctl_wr(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_wr_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "wr size %d\n", size);
+	nvif_ioctl(object, "wr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
-			 args->v0.version, args->v0.size, args->v0.addr,
-			 args->v0.data);
-		switch (args->v0.size) {
-		case 1:
-			if (ret = -ENODEV, ofuncs->wr08) {
-				nv_wo08(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		case 2:
-			if (ret = -ENODEV, ofuncs->wr16) {
-				nv_wo16(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		case 4:
-			if (ret = -ENODEV, ofuncs->wr32) {
-				nv_wo32(object, args->v0.addr, args->v0.data);
-				ret = 0;
-			}
-			break;
-		default:
-			ret = -EINVAL;
-			break;
-		}
+		nvif_ioctl(object,
+			   "wr vers %d size %d addr %016llx data %08x\n",
+			   args->v0.version, args->v0.size, args->v0.addr,
+			   args->v0.data);
+	} else
+		return ret;
+
+	switch (args->v0.size) {
+	case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data);
+	case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data);
+	case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data);
+	default:
+		break;
 	}
 
-	return ret;
+	return -EINVAL;
 }
 
 static int
-nvkm_ioctl_map(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_map_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "map size %d\n", size);
+	nvif_ioctl(object, "map size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "map vers %d\n", args->v0.version);
-		if (ret = -ENODEV, ofuncs->map) {
-			ret = ofuncs->map(object, &args->v0.handle,
-						  &args->v0.length);
-		}
+		nvif_ioctl(object, "map vers %d\n", args->v0.version);
+		ret = nvkm_object_map(object, &args->v0.handle,
+					      &args->v0.length);
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_unmap(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
 	union {
 		struct nvif_ioctl_unmap none;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "unmap size %d\n", size);
+	nvif_ioctl(object, "unmap size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "unmap\n");
+		nvif_ioctl(object, "unmap\n");
 	}
 
 	return ret;
 }
 
 static int
-nvkm_ioctl_ntfy_new(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_object *object = handle->object;
-	struct nvkm_ofuncs *ofuncs = object->oclass->ofuncs;
 	union {
 		struct nvif_ioctl_ntfy_new_v0 v0;
 	} *args = data;
 	struct nvkm_event *event;
 	int ret;
 
-	nv_ioctl(object, "ntfy new size %d\n", size);
+	nvif_ioctl(object, "ntfy new size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "ntfy new vers %d event %02x\n",
-			 args->v0.version, args->v0.event);
-		if (ret = -ENODEV, ofuncs->ntfy)
-			ret = ofuncs->ntfy(object, args->v0.event, &event);
+		nvif_ioctl(object, "ntfy new vers %d event %02x\n",
+			   args->v0.version, args->v0.event);
+		ret = nvkm_object_ntfy(object, args->v0.event, &event);
 		if (ret == 0) {
 			ret = nvkm_client_notify_new(object, event, data, size);
 			if (ret >= 0) {
@@ -374,19 +306,18 @@
 }
 
 static int
-nvkm_ioctl_ntfy_del(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_del_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy del size %d\n", size);
+	nvif_ioctl(object, "ntfy del size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy del vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy del vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_del(client, args->v0.index);
 	}
 
@@ -394,19 +325,18 @@
 }
 
 static int
-nvkm_ioctl_ntfy_get(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_get_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy get size %d\n", size);
+	nvif_ioctl(object, "ntfy get size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy get vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy get vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_get(client, args->v0.index);
 	}
 
@@ -414,19 +344,18 @@
 }
 
 static int
-nvkm_ioctl_ntfy_put(struct nvkm_handle *handle, void *data, u32 size)
+nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
 {
-	struct nvkm_client *client = nvkm_client(handle->object);
-	struct nvkm_object *object = handle->object;
+	struct nvkm_client *client = object->client;
 	union {
 		struct nvif_ioctl_ntfy_put_v0 v0;
 	} *args = data;
 	int ret;
 
-	nv_ioctl(object, "ntfy put size %d\n", size);
+	nvif_ioctl(object, "ntfy put size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "ntfy put vers %d index %d\n",
-			 args->v0.version, args->v0.index);
+		nvif_ioctl(object, "ntfy put vers %d index %d\n",
+			   args->v0.version, args->v0.index);
 		ret = nvkm_client_notify_put(client, args->v0.index);
 	}
 
@@ -435,7 +364,7 @@
 
 static struct {
 	int version;
-	int (*func)(struct nvkm_handle *, void *, u32);
+	int (*func)(struct nvkm_object *, void *, u32);
 }
 nvkm_ioctl_v0[] = {
 	{ 0x00, nvkm_ioctl_nop },
@@ -454,40 +383,31 @@
 };
 
 static int
-nvkm_ioctl_path(struct nvkm_handle *parent, u32 type, u32 nr, u32 *path,
+nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
 		void *data, u32 size, u8 owner, u8 *route, u64 *token)
 {
-	struct nvkm_handle *handle = parent;
-	struct nvkm_namedb *namedb;
 	struct nvkm_object *object;
 	int ret;
 
-	while ((object = parent->object), nr--) {
-		nv_ioctl(object, "path 0x%08x\n", path[nr]);
-		if (!nv_iclass(object, NV_PARENT_CLASS)) {
-			nv_debug(object, "cannot have children (path)\n");
-			return -EINVAL;
-		}
-
-		if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
-		    !(handle = nvkm_namedb_get(namedb, path[nr]))) {
-			nv_debug(object, "handle 0x%08x not found\n", path[nr]);
-			return -ENOENT;
-		}
-		nvkm_namedb_put(handle);
-		parent = handle;
+	if (handle)
+		object = nvkm_client_search(client, handle);
+	else
+		object = &client->object;
+	if (unlikely(!object)) {
+		nvif_ioctl(&client->object, "object not found\n");
+		return -ENOENT;
 	}
 
-	if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != handle->route) {
-		nv_ioctl(object, "object route != owner\n");
+	if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
+		nvif_ioctl(&client->object, "route != owner\n");
 		return -EACCES;
 	}
-	*route = handle->route;
-	*token = handle->token;
+	*route = object->route;
+	*token = object->token;
 
 	if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
 		if (nvkm_ioctl_v0[type].version == 0)
-			ret = nvkm_ioctl_v0[type].func(handle, data, size);
+			ret = nvkm_ioctl_v0[type].func(object, data, size);
 	}
 
 	return ret;
@@ -497,25 +417,26 @@
 nvkm_ioctl(struct nvkm_client *client, bool supervisor,
 	   void *data, u32 size, void **hack)
 {
+	struct nvkm_object *object = &client->object;
 	union {
 		struct nvif_ioctl_v0 v0;
 	} *args = data;
 	int ret;
 
 	client->super = supervisor;
-	nv_ioctl(client, "size %d\n", size);
+	nvif_ioctl(object, "size %d\n", size);
 
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
-			 args->v0.version, args->v0.type, args->v0.path_nr,
-			 args->v0.owner);
-		ret = nvkm_ioctl_path(client->root, args->v0.type,
-				      args->v0.path_nr, args->v0.path,
+		nvif_ioctl(object,
+			   "vers %d type %02x object %016llx owner %02x\n",
+			   args->v0.version, args->v0.type, args->v0.object,
+			   args->v0.owner);
+		ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type,
 				      data, size, args->v0.owner,
 				      &args->v0.route, &args->v0.token);
 	}
 
-	nv_ioctl(client, "return %d\n", ret);
+	nvif_ioctl(object, "return %d\n", ret);
 	if (hack) {
 		*hack = client->data;
 		client->data = NULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
new file mode 100644
index 0000000..8903c04
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/memory.h>
+#include <subdev/instmem.h>
+
+void
+nvkm_memory_ctor(const struct nvkm_memory_func *func,
+		 struct nvkm_memory *memory)
+{
+	memory->func = func;
+}
+
+void
+nvkm_memory_del(struct nvkm_memory **pmemory)
+{
+	struct nvkm_memory *memory = *pmemory;
+	if (memory && !WARN_ON(!memory->func)) {
+		if (memory->func->dtor)
+			*pmemory = memory->func->dtor(memory);
+		kfree(*pmemory);
+		*pmemory = NULL;
+	}
+}
+
+int
+nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
+		u64 size, u32 align, bool zero,
+		struct nvkm_memory **pmemory)
+{
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_memory *memory;
+	int ret = -ENOSYS;
+
+	if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
+		return -ENOSYS;
+
+	ret = nvkm_instobj_new(imem, size, align, zero, &memory);
+	if (ret)
+		return ret;
+
+	*pmemory = memory;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 7f458df..09a1eee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -26,7 +26,7 @@
 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL :          \
 	list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
 
-static void
+void
 nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
 {
 	struct nvkm_mm_node *node;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c b/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
deleted file mode 100644
index 6400767..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/namedb.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/namedb.h>
-#include <core/gpuobj.h>
-#include <core/handle.h>
-
-static struct nvkm_handle *
-nvkm_namedb_lookup(struct nvkm_namedb *namedb, u32 name)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (handle->name == name)
-			return handle;
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_class(struct nvkm_namedb *namedb, u16 oclass)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_mclass(handle->object) == oclass)
-			return handle;
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-			if (nv_gpuobj(handle->object)->addr == vinst)
-				return handle;
-		}
-	}
-
-	return NULL;
-}
-
-static struct nvkm_handle *
-nvkm_namedb_lookup_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
-	struct nvkm_handle *handle;
-
-	list_for_each_entry(handle, &namedb->list, node) {
-		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-			if (nv_gpuobj(handle->object)->node &&
-			    nv_gpuobj(handle->object)->node->offset == cinst)
-				return handle;
-		}
-	}
-
-	return NULL;
-}
-
-int
-nvkm_namedb_insert(struct nvkm_namedb *namedb, u32 name,
-		   struct nvkm_object *object,
-		   struct nvkm_handle *handle)
-{
-	int ret = -EEXIST;
-	write_lock_irq(&namedb->lock);
-	if (!nvkm_namedb_lookup(namedb, name)) {
-		nvkm_object_ref(object, &handle->object);
-		handle->namedb = namedb;
-		list_add(&handle->node, &namedb->list);
-		ret = 0;
-	}
-	write_unlock_irq(&namedb->lock);
-	return ret;
-}
-
-void
-nvkm_namedb_remove(struct nvkm_handle *handle)
-{
-	struct nvkm_namedb *namedb = handle->namedb;
-	struct nvkm_object *object = handle->object;
-	write_lock_irq(&namedb->lock);
-	list_del(&handle->node);
-	write_unlock_irq(&namedb->lock);
-	nvkm_object_ref(NULL, &object);
-}
-
-struct nvkm_handle *
-nvkm_namedb_get(struct nvkm_namedb *namedb, u32 name)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup(namedb, name);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_class(struct nvkm_namedb *namedb, u16 oclass)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_class(namedb, oclass);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_vinst(struct nvkm_namedb *namedb, u64 vinst)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_vinst(namedb, vinst);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-struct nvkm_handle *
-nvkm_namedb_get_cinst(struct nvkm_namedb *namedb, u32 cinst)
-{
-	struct nvkm_handle *handle;
-	read_lock(&namedb->lock);
-	handle = nvkm_namedb_lookup_cinst(namedb, cinst);
-	if (handle == NULL)
-		read_unlock(&namedb->lock);
-	return handle;
-}
-
-void
-nvkm_namedb_put(struct nvkm_handle *handle)
-{
-	if (handle)
-		read_unlock(&handle->namedb->lock);
-}
-
-int
-nvkm_namedb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_oclass *sclass, u64 engcls,
-		    int length, void **pobject)
-{
-	struct nvkm_namedb *namedb;
-	int ret;
-
-	ret = nvkm_parent_create_(parent, engine, oclass, pclass |
-				  NV_NAMEDB_CLASS, sclass, engcls,
-				  length, pobject);
-	namedb = *pobject;
-	if (ret)
-		return ret;
-
-	rwlock_init(&namedb->lock);
-	INIT_LIST_HEAD(&namedb->list);
-	return 0;
-}
-
-int
-_nvkm_namedb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_namedb *object;
-	int ret;
-
-	ret = nvkm_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
-	*pobject = nv_object(object);
-	if (ret)
-		return ret;
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 979f362..67aa722 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -22,309 +22,243 @@
  * Authors: Ben Skeggs
  */
 #include <core/object.h>
+#include <core/client.h>
 #include <core/engine.h>
 
-#ifdef NVKM_OBJECT_MAGIC
-static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
-static DEFINE_SPINLOCK(_objlist_lock);
-#endif
-
 int
-nvkm_object_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    int size, void **pobject)
+nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
-	struct nvkm_object *object;
-
-	object = *pobject = kzalloc(size, GFP_KERNEL);
-	if (!object)
-		return -ENOMEM;
-
-	nvkm_object_ref(parent, &object->parent);
-	nvkm_object_ref(engine, (struct nvkm_object **)&object->engine);
-	object->oclass = oclass;
-	object->oclass->handle |= pclass;
-	atomic_set(&object->refcount, 1);
-	atomic_set(&object->usecount, 0);
-
-#ifdef NVKM_OBJECT_MAGIC
-	object->_magic = NVKM_OBJECT_MAGIC;
-	spin_lock(&_objlist_lock);
-	list_add(&object->list, &_objlist);
-	spin_unlock(&_objlist_lock);
-#endif
-	return 0;
+	if (likely(object->func->mthd))
+		return object->func->mthd(object, mthd, data, size);
+	return -ENODEV;
 }
 
 int
-_nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
+		 struct nvkm_event **pevent)
 {
-	if (size != 0)
-		return -ENOSYS;
-	return nvkm_object_create(parent, engine, oclass, 0, pobject);
-}
-
-void
-nvkm_object_destroy(struct nvkm_object *object)
-{
-#ifdef NVKM_OBJECT_MAGIC
-	spin_lock(&_objlist_lock);
-	list_del(&object->list);
-	spin_unlock(&_objlist_lock);
-#endif
-	nvkm_object_ref(NULL, (struct nvkm_object **)&object->engine);
-	nvkm_object_ref(NULL, &object->parent);
-	kfree(object);
+	if (likely(object->func->ntfy))
+		return object->func->ntfy(object, mthd, pevent);
+	return -ENODEV;
 }
 
 int
-nvkm_object_init(struct nvkm_object *object)
+nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
 {
-	return 0;
+	if (likely(object->func->map))
+		return object->func->map(object, addr, size);
+	return -ENODEV;
+}
+
+int
+nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	if (likely(object->func->rd08))
+		return object->func->rd08(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	if (likely(object->func->rd16))
+		return object->func->rd16(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	if (likely(object->func->rd32))
+		return object->func->rd32(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	if (likely(object->func->wr08))
+		return object->func->wr08(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	if (likely(object->func->wr16))
+		return object->func->wr16(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	if (likely(object->func->wr32))
+		return object->func->wr32(object, addr, data);
+	return -ENODEV;
+}
+
+int
+nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	if (object->func->bind)
+		return object->func->bind(object, gpuobj, align, pgpuobj);
+	return -ENODEV;
 }
 
 int
 nvkm_object_fini(struct nvkm_object *object, bool suspend)
 {
+	const char *action = suspend ? "suspend" : "fini";
+	struct nvkm_object *child;
+	s64 time;
+	int ret;
+
+	nvif_debug(object, "%s children...\n", action);
+	time = ktime_to_us(ktime_get());
+	list_for_each_entry(child, &object->tree, head) {
+		ret = nvkm_object_fini(child, suspend);
+		if (ret && suspend)
+			goto fail_child;
+	}
+
+	nvif_debug(object, "%s running...\n", action);
+	if (object->func->fini) {
+		ret = object->func->fini(object, suspend);
+		if (ret) {
+			nvif_error(object, "%s failed with %d\n", action, ret);
+			if (suspend)
+				goto fail;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "%s completed in %lldus\n", action, time);
 	return 0;
+
+fail:
+	if (object->func->init) {
+		int rret = object->func->init(object);
+		if (rret)
+			nvif_fatal(object, "failed to restart, %d\n", rret);
+	}
+fail_child:
+	list_for_each_entry_continue_reverse(child, &object->tree, head) {
+		nvkm_object_init(child);
+	}
+	return ret;
 }
 
-struct nvkm_ofuncs
-nvkm_object_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+int
+nvkm_object_init(struct nvkm_object *object)
+{
+	struct nvkm_object *child;
+	s64 time;
+	int ret;
+
+	nvif_debug(object, "init running...\n");
+	time = ktime_to_us(ktime_get());
+	if (object->func->init) {
+		ret = object->func->init(object);
+		if (ret)
+			goto fail;
+	}
+
+	nvif_debug(object, "init children...\n");
+	list_for_each_entry(child, &object->tree, head) {
+		ret = nvkm_object_init(child);
+		if (ret)
+			goto fail_child;
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "init completed in %lldus\n", time);
+	return 0;
+
+fail_child:
+	list_for_each_entry_continue_reverse(child, &object->tree, head)
+		nvkm_object_fini(child, false);
+fail:
+	nvif_error(object, "init failed with %d\n", ret);
+	if (object->func->fini)
+		object->func->fini(object, false);
+	return ret;
+}
+
+void *
+nvkm_object_dtor(struct nvkm_object *object)
+{
+	struct nvkm_object *child, *ctemp;
+	void *data = object;
+	s64 time;
+
+	nvif_debug(object, "destroy children...\n");
+	time = ktime_to_us(ktime_get());
+	list_for_each_entry_safe(child, ctemp, &object->tree, head) {
+		nvkm_object_del(&child);
+	}
+
+	nvif_debug(object, "destroy running...\n");
+	if (object->func->dtor)
+		data = object->func->dtor(object);
+	nvkm_engine_unref(&object->engine);
+	time = ktime_to_us(ktime_get()) - time;
+	nvif_debug(object, "destroy completed in %lldus...\n", time);
+	return data;
+}
+
+void
+nvkm_object_del(struct nvkm_object **pobject)
+{
+	struct nvkm_object *object = *pobject;
+	if (object && !WARN_ON(!object->func)) {
+		*pobject = nvkm_object_dtor(object);
+		nvkm_client_remove(object->client, object);
+		list_del(&object->head);
+		kfree(*pobject);
+		*pobject = NULL;
+	}
+}
+
+void
+nvkm_object_ctor(const struct nvkm_object_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_object *object)
+{
+	object->func = func;
+	object->client = oclass->client;
+	object->engine = nvkm_engine_ref(oclass->engine);
+	object->oclass = oclass->base.oclass;
+	object->handle = oclass->handle;
+	INIT_LIST_HEAD(&object->head);
+	INIT_LIST_HEAD(&object->tree);
+	RB_CLEAR_NODE(&object->node);
+	WARN_ON(oclass->engine && !object->engine);
+}
+
+int
+nvkm_object_new_(const struct nvkm_object_func *func,
+		 const struct nvkm_oclass *oclass, void *data, u32 size,
+		 struct nvkm_object **pobject)
+{
+	if (size == 0) {
+		if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL)))
+			return -ENOMEM;
+		nvkm_object_ctor(func, oclass, *pobject);
+		return 0;
+	}
+	return -ENOSYS;
+}
+
+static const struct nvkm_object_func
+nvkm_object_func = {
 };
 
 int
-nvkm_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		struct nvkm_object **pobject)
 {
-	struct nvkm_ofuncs *ofuncs = oclass->ofuncs;
-	struct nvkm_object *object = NULL;
-	int ret;
-
-	ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
-	*pobject = object;
-	if (ret < 0) {
-		if (ret != -ENODEV) {
-			nv_error(parent, "failed to create 0x%08x, %d\n",
-				 oclass->handle, ret);
-		}
-
-		if (object) {
-			ofuncs->dtor(object);
-			*pobject = NULL;
-		}
-
-		return ret;
-	}
-
-	if (ret == 0) {
-		nv_trace(object, "created\n");
-		atomic_set(&object->refcount, 1);
-	}
-
-	return 0;
-}
-
-static void
-nvkm_object_dtor(struct nvkm_object *object)
-{
-	nv_trace(object, "destroying\n");
-	nv_ofuncs(object)->dtor(object);
-}
-
-void
-nvkm_object_ref(struct nvkm_object *obj, struct nvkm_object **ref)
-{
-	if (obj) {
-		atomic_inc(&obj->refcount);
-		nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
-	}
-
-	if (*ref) {
-		int dead = atomic_dec_and_test(&(*ref)->refcount);
-		nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
-		if (dead)
-			nvkm_object_dtor(*ref);
-	}
-
-	*ref = obj;
-}
-
-int
-nvkm_object_inc(struct nvkm_object *object)
-{
-	int ref = atomic_add_return(1, &object->usecount);
-	int ret;
-
-	nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
-	if (ref != 1)
-		return 0;
-
-	nv_trace(object, "initialising...\n");
-	if (object->parent) {
-		ret = nvkm_object_inc(object->parent);
-		if (ret) {
-			nv_error(object, "parent failed, %d\n", ret);
-			goto fail_parent;
-		}
-	}
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		ret = nvkm_object_inc(&object->engine->subdev.object);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (ret) {
-			nv_error(object, "engine failed, %d\n", ret);
-			goto fail_engine;
-		}
-	}
-
-	ret = nv_ofuncs(object)->init(object);
-	atomic_set(&object->usecount, 1);
-	if (ret) {
-		nv_error(object, "init failed, %d\n", ret);
-		goto fail_self;
-	}
-
-	nv_trace(object, "initialised\n");
-	return 0;
-
-fail_self:
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		nvkm_object_dec(&object->engine->subdev.object, false);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-	}
-fail_engine:
-	if (object->parent)
-		 nvkm_object_dec(object->parent, false);
-fail_parent:
-	atomic_dec(&object->usecount);
-	return ret;
-}
-
-static int
-nvkm_object_decf(struct nvkm_object *object)
-{
-	int ret;
-
-	nv_trace(object, "stopping...\n");
-
-	ret = nv_ofuncs(object)->fini(object, false);
-	atomic_set(&object->usecount, 0);
-	if (ret)
-		nv_warn(object, "failed fini, %d\n", ret);
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		nvkm_object_dec(&object->engine->subdev.object, false);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-	}
-
-	if (object->parent)
-		nvkm_object_dec(object->parent, false);
-
-	nv_trace(object, "stopped\n");
-	return 0;
-}
-
-static int
-nvkm_object_decs(struct nvkm_object *object)
-{
-	int ret, rret;
-
-	nv_trace(object, "suspending...\n");
-
-	ret = nv_ofuncs(object)->fini(object, true);
-	atomic_set(&object->usecount, 0);
-	if (ret) {
-		nv_error(object, "failed suspend, %d\n", ret);
-		return ret;
-	}
-
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		ret = nvkm_object_dec(&object->engine->subdev.object, true);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (ret) {
-			nv_warn(object, "engine failed suspend, %d\n", ret);
-			goto fail_engine;
-		}
-	}
-
-	if (object->parent) {
-		ret = nvkm_object_dec(object->parent, true);
-		if (ret) {
-			nv_warn(object, "parent failed suspend, %d\n", ret);
-			goto fail_parent;
-		}
-	}
-
-	nv_trace(object, "suspended\n");
-	return 0;
-
-fail_parent:
-	if (object->engine) {
-		mutex_lock(&nv_subdev(object->engine)->mutex);
-		rret = nvkm_object_inc(&object->engine->subdev.object);
-		mutex_unlock(&nv_subdev(object->engine)->mutex);
-		if (rret)
-			nv_fatal(object, "engine failed to reinit, %d\n", rret);
-	}
-
-fail_engine:
-	rret = nv_ofuncs(object)->init(object);
-	if (rret)
-		nv_fatal(object, "failed to reinit, %d\n", rret);
-
-	return ret;
-}
-
-int
-nvkm_object_dec(struct nvkm_object *object, bool suspend)
-{
-	int ref = atomic_add_return(-1, &object->usecount);
-	int ret;
-
-	nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
-
-	if (ref == 0) {
-		if (suspend)
-			ret = nvkm_object_decs(object);
-		else
-			ret = nvkm_object_decf(object);
-
-		if (ret) {
-			atomic_inc(&object->usecount);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-void
-nvkm_object_debug(void)
-{
-#ifdef NVKM_OBJECT_MAGIC
-	struct nvkm_object *object;
-	if (!list_empty(&_objlist)) {
-		nv_fatal(NULL, "*******************************************\n");
-		nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
-		nv_fatal(NULL, "*******************************************\n");
-		list_for_each_entry(object, &_objlist, list) {
-			nv_fatal(object, "%p/%p/%d/%d\n",
-				 object->parent, object->engine,
-				 atomic_read(&object->refcount),
-				 atomic_read(&object->usecount));
-		}
-	}
-#endif
+	const struct nvkm_object_func *func =
+		oclass->base.func ? oclass->base.func : &nvkm_object_func;
+	return nvkm_object_new_(func, oclass, data, size, pobject);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
new file mode 100644
index 0000000..e31a047
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/oproxy.h>
+
+static int
+nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size);
+}
+
+static int
+nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
+		 struct nvkm_event **pevent)
+{
+	return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent);
+}
+
+static int
+nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+}
+
+static int
+nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data);
+}
+
+static int
+nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_object_bind(nvkm_oproxy(object)->object,
+				parent, align, pgpuobj);
+}
+
+static int
+nvkm_oproxy_sclass(struct nvkm_object *object, int index,
+		   struct nvkm_oclass *oclass)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	oclass->parent = oproxy->object;
+	if (!oproxy->object->func->sclass)
+		return -ENODEV;
+	return oproxy->object->func->sclass(oproxy->object, index, oclass);
+}
+
+static int
+nvkm_oproxy_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	int ret;
+
+	if (oproxy->func->fini[0]) {
+		ret = oproxy->func->fini[0](oproxy, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	if (oproxy->object->func->fini) {
+		ret = oproxy->object->func->fini(oproxy->object, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	if (oproxy->func->fini[1]) {
+		ret = oproxy->func->fini[1](oproxy, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_oproxy_init(struct nvkm_object *object)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	int ret;
+
+	if (oproxy->func->init[0]) {
+		ret = oproxy->func->init[0](oproxy);
+		if (ret)
+			return ret;
+	}
+
+	if (oproxy->object->func->init) {
+		ret = oproxy->object->func->init(oproxy->object);
+		if (ret)
+			return ret;
+	}
+
+	if (oproxy->func->init[1]) {
+		ret = oproxy->func->init[1](oproxy);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_oproxy_dtor(struct nvkm_object *object)
+{
+	struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+	if (oproxy->func->dtor[0])
+		oproxy->func->dtor[0](oproxy);
+	nvkm_object_del(&oproxy->object);
+	if (oproxy->func->dtor[1])
+		oproxy->func->dtor[1](oproxy);
+	return oproxy;
+}
+
+static const struct nvkm_object_func
+nvkm_oproxy_func = {
+	.dtor = nvkm_oproxy_dtor,
+	.init = nvkm_oproxy_init,
+	.fini = nvkm_oproxy_fini,
+	.mthd = nvkm_oproxy_mthd,
+	.ntfy = nvkm_oproxy_ntfy,
+	.map = nvkm_oproxy_map,
+	.rd08 = nvkm_oproxy_rd08,
+	.rd16 = nvkm_oproxy_rd16,
+	.rd32 = nvkm_oproxy_rd32,
+	.wr08 = nvkm_oproxy_wr08,
+	.wr16 = nvkm_oproxy_wr16,
+	.wr32 = nvkm_oproxy_wr32,
+	.bind = nvkm_oproxy_bind,
+	.sclass = nvkm_oproxy_sclass,
+};
+
+void
+nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy)
+{
+	nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base);
+	oproxy->func = func;
+}
+
+int
+nvkm_oproxy_new_(const struct nvkm_oproxy_func *func,
+		 const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy)
+{
+	if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(func, oclass, *poproxy);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c
index 19d153f..3e62cf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/option.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c
@@ -73,6 +73,24 @@
 	return value;
 }
 
+long
+nvkm_longopt(const char *optstr, const char *opt, long value)
+{
+	long result = value;
+	int arglen;
+	char *s;
+
+	optstr = nvkm_stropt(optstr, opt, &arglen);
+	if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) {
+		int ret = kstrtol(s, 0, &value);
+		if (ret == 0)
+			result = value;
+		kfree(s);
+	}
+
+	return result;
+}
+
 int
 nvkm_dbgopt(const char *optstr, const char *sub)
 {
@@ -95,7 +113,7 @@
 				else if (!strncasecmpz(optstr, "warn", len))
 					level = NV_DBG_WARN;
 				else if (!strncasecmpz(optstr, "info", len))
-					level = NV_DBG_INFO_NORMAL;
+					level = NV_DBG_INFO;
 				else if (!strncasecmpz(optstr, "debug", len))
 					level = NV_DBG_DEBUG;
 				else if (!strncasecmpz(optstr, "trace", len))
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/parent.c b/drivers/gpu/drm/nouveau/nvkm/core/parent.c
deleted file mode 100644
index dd56cd1..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/parent.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/parent.h>
-#include <core/client.h>
-#include <core/engine.h>
-
-int
-nvkm_parent_sclass(struct nvkm_object *parent, u16 handle,
-		   struct nvkm_object **pengine,
-		   struct nvkm_oclass **poclass)
-{
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-	u64 mask;
-
-	sclass = nv_parent(parent)->sclass;
-	while (sclass) {
-		if ((sclass->oclass->handle & 0xffff) == handle) {
-			*pengine = &parent->engine->subdev.object;
-			*poclass = sclass->oclass;
-			return 0;
-		}
-
-		sclass = sclass->sclass;
-	}
-
-	mask = nv_parent(parent)->engine;
-	while (mask) {
-		int i = __ffs64(mask);
-
-		if (nv_iclass(parent, NV_CLIENT_CLASS))
-			engine = nv_engine(nv_client(parent)->device);
-		else
-			engine = nvkm_engine(parent, i);
-
-		if (engine) {
-			oclass = engine->sclass;
-			while (oclass->ofuncs) {
-				if ((oclass->handle & 0xffff) == handle) {
-					*pengine = nv_object(engine);
-					*poclass = oclass;
-					return 0;
-				}
-				oclass++;
-			}
-		}
-
-		mask &= ~(1ULL << i);
-	}
-
-	return -EINVAL;
-}
-
-int
-nvkm_parent_lclass(struct nvkm_object *parent, u32 *lclass, int size)
-{
-	struct nvkm_sclass *sclass;
-	struct nvkm_engine *engine;
-	struct nvkm_oclass *oclass;
-	int nr = -1, i;
-	u64 mask;
-
-	sclass = nv_parent(parent)->sclass;
-	while (sclass) {
-		if (++nr < size)
-			lclass[nr] = sclass->oclass->handle & 0xffff;
-		sclass = sclass->sclass;
-	}
-
-	mask = nv_parent(parent)->engine;
-	while (i = __ffs64(mask), mask) {
-		engine = nvkm_engine(parent, i);
-		if (engine && (oclass = engine->sclass)) {
-			while (oclass->ofuncs) {
-				if (++nr < size)
-					lclass[nr] = oclass->handle & 0xffff;
-				oclass++;
-			}
-		}
-
-		mask &= ~(1ULL << i);
-	}
-
-	return nr + 1;
-}
-
-int
-nvkm_parent_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    struct nvkm_oclass *sclass, u64 engcls,
-		    int size, void **pobject)
-{
-	struct nvkm_parent *object;
-	struct nvkm_sclass *nclass;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_PARENT_CLASS, size, pobject);
-	object = *pobject;
-	if (ret)
-		return ret;
-
-	while (sclass && sclass->ofuncs) {
-		nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
-		if (!nclass)
-			return -ENOMEM;
-
-		nclass->sclass = object->sclass;
-		object->sclass = nclass;
-		nclass->engine = engine ? nv_engine(engine) : NULL;
-		nclass->oclass = sclass;
-		sclass++;
-	}
-
-	object->engine = engcls;
-	return 0;
-}
-
-void
-nvkm_parent_destroy(struct nvkm_parent *parent)
-{
-	struct nvkm_sclass *sclass;
-
-	while ((sclass = parent->sclass)) {
-		parent->sclass = sclass->sclass;
-		kfree(sclass);
-	}
-
-	nvkm_object_destroy(&parent->object);
-}
-
-
-void
-_nvkm_parent_dtor(struct nvkm_object *object)
-{
-	nvkm_parent_destroy(nv_parent(object));
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/printk.c b/drivers/gpu/drm/nouveau/nvkm/core/printk.c
deleted file mode 100644
index 4a220eb..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/core/printk.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include <core/printk.h>
-#include <core/client.h>
-#include <core/device.h>
-
-int nv_info_debug_level = NV_DBG_INFO_NORMAL;
-
-void
-nv_printk_(struct nvkm_object *object, int level, const char *fmt, ...)
-{
-	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
-	const char *pfx;
-	char mfmt[256];
-	va_list args;
-
-	switch (level) {
-	case NV_DBG_FATAL:
-		pfx = KERN_CRIT;
-		break;
-	case NV_DBG_ERROR:
-		pfx = KERN_ERR;
-		break;
-	case NV_DBG_WARN:
-		pfx = KERN_WARNING;
-		break;
-	case NV_DBG_INFO_NORMAL:
-		pfx = KERN_INFO;
-		break;
-	case NV_DBG_DEBUG:
-	case NV_DBG_PARANOIA:
-	case NV_DBG_TRACE:
-	case NV_DBG_SPAM:
-	default:
-		pfx = KERN_DEBUG;
-		break;
-	}
-
-	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
-		struct nvkm_object *device;
-		struct nvkm_object *subdev;
-		char obuf[64], *ofmt = "";
-
-		if (object->engine == NULL) {
-			subdev = object;
-			while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
-				subdev = subdev->parent;
-		} else {
-			subdev = &object->engine->subdev.object;
-		}
-
-		device = subdev;
-		if (device->parent)
-			device = device->parent;
-
-		if (object != subdev) {
-			snprintf(obuf, sizeof(obuf), "[0x%08x]",
-				 nv_hclass(object));
-			ofmt = obuf;
-		}
-
-		if (level > nv_subdev(subdev)->debug)
-			return;
-
-		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
-			 name[level], nv_subdev(subdev)->name,
-			 nv_device(device)->name, ofmt, fmt);
-	} else
-	if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
-		if (level > nv_client(object)->debug)
-			return;
-
-		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
-			 name[level], nv_client(object)->name, fmt);
-	} else {
-		snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
-	}
-
-	va_start(args, fmt);
-	vprintk(mfmt, args);
-	va_end(args);
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index ebd4d15..3216e15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -22,8 +22,6 @@
 #include <core/ramht.h>
 #include <core/engine.h>
 
-#include <subdev/bar.h>
-
 static u32
 nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
 {
@@ -35,72 +33,130 @@
 	}
 
 	hash ^= chid << (ramht->bits - 4);
-	hash  = hash << 3;
 	return hash;
 }
 
-int
-nvkm_ramht_insert(struct nvkm_ramht *ramht, int chid, u32 handle, u32 context)
+struct nvkm_gpuobj *
+nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle)
 {
-	struct nvkm_bar *bar = nvkm_bar(ramht);
 	u32 co, ho;
 
 	co = ho = nvkm_ramht_hash(ramht, chid, handle);
 	do {
-		if (!nv_ro32(ramht, co + 4)) {
-			nv_wo32(ramht, co + 0, handle);
-			nv_wo32(ramht, co + 4, context);
-			if (bar)
-				bar->flush(bar);
-			return co;
+		if (ramht->data[co].chid == chid) {
+			if (ramht->data[co].handle == handle)
+				return ramht->data[co].inst;
 		}
 
-		co += 8;
-		if (co >= nv_gpuobj(ramht)->size)
+		if (++co >= ramht->size)
 			co = 0;
 	} while (co != ho);
 
-	return -ENOMEM;
+	return NULL;
+}
+
+static int
+nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object,
+		  int chid, int addr, u32 handle, u32 context)
+{
+	struct nvkm_ramht_data *data = &ramht->data[co];
+	u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */
+	int ret;
+
+	nvkm_gpuobj_del(&data->inst);
+	data->chid = chid;
+	data->handle = handle;
+
+	if (object) {
+		ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst);
+		if (ret) {
+			if (ret != -ENODEV) {
+				data->chid = -1;
+				return ret;
+			}
+			data->inst = NULL;
+		}
+
+		if (data->inst) {
+			if (ramht->device->card_type >= NV_50)
+				inst = data->inst->node->offset;
+			else
+				inst = data->inst->addr;
+		}
+
+		if (addr < 0) context |= inst << -addr;
+		else          context |= inst >>  addr;
+	}
+
+	nvkm_kmap(ramht->gpuobj);
+	nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle);
+	nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context);
+	nvkm_done(ramht->gpuobj);
+	return co + 1;
 }
 
 void
 nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie)
 {
-	struct nvkm_bar *bar = nvkm_bar(ramht);
-	nv_wo32(ramht, cookie + 0, 0x00000000);
-	nv_wo32(ramht, cookie + 4, 0x00000000);
-	if (bar)
-		bar->flush(bar);
+	if (--cookie >= 0)
+		nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0);
 }
 
-static struct nvkm_oclass
-nvkm_ramht_oclass = {
-	.handle = 0x0000abcd,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = NULL,
-		.dtor = _nvkm_gpuobj_dtor,
-		.init = _nvkm_gpuobj_init,
-		.fini = _nvkm_gpuobj_fini,
-		.rd32 = _nvkm_gpuobj_rd32,
-		.wr32 = _nvkm_gpuobj_wr32,
-	},
-};
+int
+nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object,
+		  int chid, int addr, u32 handle, u32 context)
+{
+	u32 co, ho;
+
+	if (nvkm_ramht_search(ramht, chid, handle))
+		return -EEXIST;
+
+	co = ho = nvkm_ramht_hash(ramht, chid, handle);
+	do {
+		if (ramht->data[co].chid < 0) {
+			return nvkm_ramht_update(ramht, co, object, chid,
+						 addr, handle, context);
+		}
+
+		if (++co >= ramht->size)
+			co = 0;
+	} while (co != ho);
+
+	return -ENOSPC;
+}
+
+void
+nvkm_ramht_del(struct nvkm_ramht **pramht)
+{
+	struct nvkm_ramht *ramht = *pramht;
+	if (ramht) {
+		nvkm_gpuobj_del(&ramht->gpuobj);
+		kfree(*pramht);
+		*pramht = NULL;
+	}
+}
 
 int
-nvkm_ramht_new(struct nvkm_object *parent, struct nvkm_object *pargpu,
-	       u32 size, u32 align, struct nvkm_ramht **pramht)
+nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
+	       struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht)
 {
 	struct nvkm_ramht *ramht;
-	int ret;
+	int ret, i;
 
-	ret = nvkm_gpuobj_create(parent, parent->engine ?
-				 &parent->engine->subdev.object : parent, /* <nv50 ramht */
-				 &nvkm_ramht_oclass, 0, pargpu, size,
-				 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
-	*pramht = ramht;
+	if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
+					sizeof(*ramht->data), GFP_KERNEL)))
+		return -ENOMEM;
+
+	ramht->device = device;
+	ramht->parent = parent;
+	ramht->size = size >> 3;
+	ramht->bits = order_base_2(ramht->size);
+	for (i = 0; i < ramht->size; i++)
+		ramht->data[i].chid = -1;
+
+	ret = nvkm_gpuobj_new(ramht->device, size, align, true,
+			      ramht->parent, &ramht->gpuobj);
 	if (ret)
-		return ret;
-
-	ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
-	return 0;
+		nvkm_ramht_del(pramht);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index c5fb3a79..7de9847 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -25,96 +25,178 @@
 #include <core/device.h>
 #include <core/option.h>
 
-struct nvkm_subdev *
-nvkm_subdev(void *obj, int idx)
-{
-	struct nvkm_object *object = nv_object(obj);
-	while (object && !nv_iclass(object, NV_SUBDEV_CLASS))
-		object = object->parent;
-	if (object == NULL || nv_subidx(nv_subdev(object)) != idx)
-		object = nv_device(obj)->subdev[idx];
-	return object ? nv_subdev(object) : NULL;
-}
+static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
+
+const char *
+nvkm_subdev_name[NVKM_SUBDEV_NR] = {
+	[NVKM_SUBDEV_BAR    ] = "bar",
+	[NVKM_SUBDEV_VBIOS  ] = "bios",
+	[NVKM_SUBDEV_BUS    ] = "bus",
+	[NVKM_SUBDEV_CLK    ] = "clk",
+	[NVKM_SUBDEV_DEVINIT] = "devinit",
+	[NVKM_SUBDEV_FB     ] = "fb",
+	[NVKM_SUBDEV_FUSE   ] = "fuse",
+	[NVKM_SUBDEV_GPIO   ] = "gpio",
+	[NVKM_SUBDEV_I2C    ] = "i2c",
+	[NVKM_SUBDEV_IBUS   ] = "priv",
+	[NVKM_SUBDEV_INSTMEM] = "imem",
+	[NVKM_SUBDEV_LTC    ] = "ltc",
+	[NVKM_SUBDEV_MC     ] = "mc",
+	[NVKM_SUBDEV_MMU    ] = "mmu",
+	[NVKM_SUBDEV_MXM    ] = "mxm",
+	[NVKM_SUBDEV_PCI    ] = "pci",
+	[NVKM_SUBDEV_PMU    ] = "pmu",
+	[NVKM_SUBDEV_THERM  ] = "therm",
+	[NVKM_SUBDEV_TIMER  ] = "tmr",
+	[NVKM_SUBDEV_VOLT   ] = "volt",
+	[NVKM_ENGINE_BSP    ] = "bsp",
+	[NVKM_ENGINE_CE0    ] = "ce0",
+	[NVKM_ENGINE_CE1    ] = "ce1",
+	[NVKM_ENGINE_CE2    ] = "ce2",
+	[NVKM_ENGINE_CIPHER ] = "cipher",
+	[NVKM_ENGINE_DISP   ] = "disp",
+	[NVKM_ENGINE_DMAOBJ ] = "dma",
+	[NVKM_ENGINE_FIFO   ] = "fifo",
+	[NVKM_ENGINE_GR     ] = "gr",
+	[NVKM_ENGINE_IFB    ] = "ifb",
+	[NVKM_ENGINE_ME     ] = "me",
+	[NVKM_ENGINE_MPEG   ] = "mpeg",
+	[NVKM_ENGINE_MSENC  ] = "msenc",
+	[NVKM_ENGINE_MSPDEC ] = "mspdec",
+	[NVKM_ENGINE_MSPPP  ] = "msppp",
+	[NVKM_ENGINE_MSVLD  ] = "msvld",
+	[NVKM_ENGINE_PM     ] = "pm",
+	[NVKM_ENGINE_SEC    ] = "sec",
+	[NVKM_ENGINE_SW     ] = "sw",
+	[NVKM_ENGINE_VIC    ] = "vic",
+	[NVKM_ENGINE_VP     ] = "vp",
+};
 
 void
-nvkm_subdev_reset(struct nvkm_object *subdev)
+nvkm_subdev_intr(struct nvkm_subdev *subdev)
 {
-	nv_trace(subdev, "resetting...\n");
-	nv_ofuncs(subdev)->fini(subdev, false);
-	nv_debug(subdev, "reset\n");
-}
-
-int
-nvkm_subdev_init(struct nvkm_subdev *subdev)
-{
-	int ret = nvkm_object_init(&subdev->object);
-	if (ret)
-		return ret;
-
-	nvkm_subdev_reset(&subdev->object);
-	return 0;
-}
-
-int
-_nvkm_subdev_init(struct nvkm_object *object)
-{
-	return nvkm_subdev_init(nv_subdev(object));
+	if (subdev->func->intr)
+		subdev->func->intr(subdev);
 }
 
 int
 nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	if (subdev->unit) {
-		nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
-		nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+	struct nvkm_device *device = subdev->device;
+	const char *action = suspend ? "suspend" : "fini";
+	u32 pmc_enable = subdev->pmc_enable;
+	s64 time;
+
+	nvkm_trace(subdev, "%s running...\n", action);
+	time = ktime_to_us(ktime_get());
+
+	if (subdev->func->fini) {
+		int ret = subdev->func->fini(subdev, suspend);
+		if (ret) {
+			nvkm_error(subdev, "%s failed, %d\n", action, ret);
+			if (suspend)
+				return ret;
+		}
 	}
 
-	return nvkm_object_fini(&subdev->object, suspend);
+	if (pmc_enable) {
+		nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+		nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+		nvkm_rd32(device, 0x000200);
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
+	return 0;
 }
 
 int
-_nvkm_subdev_fini(struct nvkm_object *object, bool suspend)
+nvkm_subdev_preinit(struct nvkm_subdev *subdev)
 {
-	return nvkm_subdev_fini(nv_subdev(object), suspend);
-}
+	s64 time;
 
-void
-nvkm_subdev_destroy(struct nvkm_subdev *subdev)
-{
-	int subidx = nv_hclass(subdev) & 0xff;
-	nv_device(subdev)->subdev[subidx] = NULL;
-	nvkm_object_destroy(&subdev->object);
-}
+	nvkm_trace(subdev, "preinit running...\n");
+	time = ktime_to_us(ktime_get());
 
-void
-_nvkm_subdev_dtor(struct nvkm_object *object)
-{
-	nvkm_subdev_destroy(nv_subdev(object));
+	if (subdev->func->preinit) {
+		int ret = subdev->func->preinit(subdev);
+		if (ret) {
+			nvkm_error(subdev, "preinit failed, %d\n", ret);
+			return ret;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "preinit completed in %lldus\n", time);
+	return 0;
 }
 
 int
-nvkm_subdev_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 pclass,
-		    const char *subname, const char *sysname,
-		    int size, void **pobject)
+nvkm_subdev_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_subdev *subdev;
+	s64 time;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, pclass |
-				  NV_SUBDEV_CLASS, size, pobject);
-	subdev = *pobject;
-	if (ret)
-		return ret;
+	nvkm_trace(subdev, "init running...\n");
+	time = ktime_to_us(ktime_get());
 
-	__mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
-	subdev->name = subname;
+	if (subdev->func->oneinit && !subdev->oneinit) {
+		s64 time;
+		nvkm_trace(subdev, "one-time init running...\n");
+		time = ktime_to_us(ktime_get());
+		ret = subdev->func->oneinit(subdev);
+		if (ret) {
+			nvkm_error(subdev, "one-time init failed, %d\n", ret);
+			return ret;
+		}
 
-	if (parent) {
-		struct nvkm_device *device = nv_device(parent);
-		subdev->debug = nvkm_dbgopt(device->dbgopt, subname);
-		subdev->mmio  = nv_subdev(device)->mmio;
+		subdev->oneinit = true;
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "one-time init completed in %lldus\n", time);
 	}
 
+	if (subdev->func->init) {
+		ret = subdev->func->init(subdev);
+		if (ret) {
+			nvkm_error(subdev, "init failed, %d\n", ret);
+			return ret;
+		}
+	}
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvkm_trace(subdev, "init completed in %lldus\n", time);
 	return 0;
 }
+
+void
+nvkm_subdev_del(struct nvkm_subdev **psubdev)
+{
+	struct nvkm_subdev *subdev = *psubdev;
+	s64 time;
+
+	if (subdev && !WARN_ON(!subdev->func)) {
+		nvkm_trace(subdev, "destroy running...\n");
+		time = ktime_to_us(ktime_get());
+		if (subdev->func->dtor)
+			*psubdev = subdev->func->dtor(subdev);
+		time = ktime_to_us(ktime_get()) - time;
+		nvkm_trace(subdev, "destroy completed in %lldus\n", time);
+		kfree(*psubdev);
+		*psubdev = NULL;
+	}
+}
+
+void
+nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
+		 struct nvkm_device *device, int index, u32 pmc_enable,
+		 struct nvkm_subdev *subdev)
+{
+	const char *name = nvkm_subdev_name[index];
+	subdev->func = func;
+	subdev->device = device;
+	subdev->index = index;
+	subdev->pmc_enable = pmc_enable;
+
+	__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
+	subdev->debug = nvkm_dbgopt(device->dbgopt, name);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index 6bd3d75..36f7247 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -6,7 +6,7 @@
 include $(src)/nvkm/engine/cipher/Kbuild
 include $(src)/nvkm/engine/device/Kbuild
 include $(src)/nvkm/engine/disp/Kbuild
-include $(src)/nvkm/engine/dmaobj/Kbuild
+include $(src)/nvkm/engine/dma/Kbuild
 include $(src)/nvkm/engine/fifo/Kbuild
 include $(src)/nvkm/engine/gr/Kbuild
 include $(src)/nvkm/engine/mpeg/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index a0b1fd8..3ef0107 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -22,72 +22,23 @@
  * Authors: Ben Skeggs, Ilia Mirkin
  */
 #include <engine/bsp.h>
-#include <engine/xtensa.h>
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-/*******************************************************************************
- * BSP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_sclass[] = {
-	{ 0x74b0, &nvkm_object_ofuncs },
-	{},
+static const struct nvkm_xtensa_func
+g84_bsp = {
+	.pmc_enable = 0x04008000,
+	.fifo_val = 0x1111,
+	.unkd28 = 0x90044,
+	.sclass = {
+		{ -1, -1, NV74_BSP },
+		{}
+	}
 };
 
-/*******************************************************************************
- * BSP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_bsp_cclass = {
-	.handle = NV_ENGCTX(BSP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_xtensa_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * BSP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_bsp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
-	struct nvkm_xtensa *priv;
-	int ret;
-
-	ret = nvkm_xtensa_create(parent, engine, oclass, 0x103000, true,
-				 "PBSP", "bsp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x04008000;
-	nv_engine(priv)->cclass = &g84_bsp_cclass;
-	nv_engine(priv)->sclass = g84_bsp_sclass;
-	priv->fifo_val = 0x1111;
-	priv->unkd28 = 0x90044;
-	return 0;
+	return nvkm_xtensa_new_(&g84_bsp, device, index,
+				true, 0x103000, pengine);
 }
-
-struct nvkm_oclass
-g84_bsp_oclass = {
-	.handle = NV_ENGINE(BSP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_bsp_ctor,
-		.dtor = _nvkm_xtensa_dtor,
-		.init = _nvkm_xtensa_init,
-		.fini = _nvkm_xtensa_fini,
-		.rd32 = _nvkm_xtensa_rd32,
-		.wr32 = _nvkm_xtensa_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
index a558dfa..6226bcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/com.fuc
@@ -24,9 +24,9 @@
  */
 
 #ifdef GT215
-.section #gt215_pce_data
+.section #gt215_ce_data
 #else
-.section #gf100_pce_data
+.section #gf100_ce_data
 #endif
 
 ctx_object:                   .b32 0
@@ -128,9 +128,9 @@
 .b16 0x800 0
 
 #ifdef GT215
-.section #gt215_pce_code
+.section #gt215_ce_code
 #else
-.section #gf100_pce_code
+.section #gf100_ce_code
 #endif
 
 main:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9af6e4..05bb656 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gf100_pce_data[] = {
+uint32_t gf100_ce_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@
 	0x00000800,
 };
 
-uint32_t gf100_pce_code[] = {
+uint32_t gf100_ce_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f42c0d0d..972281d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,4 +1,4 @@
-uint32_t gt215_pce_data[] = {
+uint32_t gt215_ce_data[] = {
 /* 0x0000: ctx_object */
 	0x00000000,
 /* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@
 	0x00000800,
 };
 
-uint32_t gt215_pce_code[] = {
+uint32_t gt215_ce_code[] = {
 /* 0x0000: main */
 	0x04fe04bd,
 	0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 2d2e549..92a9f35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -21,146 +21,60 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
-#include <engine/falcon.h>
+#include "priv.h"
 #include "fuc/gf100.fuc3.h"
 
-struct gf100_ce_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_ce0_sclass[] = {
-	{ 0x90b5, &nvkm_object_ofuncs },
-	{},
-};
-
-static struct nvkm_oclass
-gf100_ce1_sclass[] = {
-	{ 0x90b8, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gf100_ce_context_ofuncs = {
-	.ctor = _nvkm_falcon_context_ctor,
-	.dtor = _nvkm_falcon_context_dtor,
-	.init = _nvkm_falcon_context_init,
-	.fini = _nvkm_falcon_context_fini,
-	.rd32 = _nvkm_falcon_context_rd32,
-	.wr32 = _nvkm_falcon_context_wr32,
-};
-
-static struct nvkm_oclass
-gf100_ce0_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xc0),
-	.ofuncs = &gf100_ce_context_ofuncs,
-};
-
-static struct nvkm_oclass
-gf100_ce1_cclass = {
-	.handle = NV_ENGCTX(CE1, 0xc0),
-	.ofuncs = &gf100_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_ce_init(struct nvkm_object *object)
+static void
+gf100_ce_init(struct nvkm_falcon *ce)
 {
-	struct gf100_ce_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_CE0);
-	return 0;
+	struct nvkm_device *device = ce->engine.subdev.device;
+	const int index = ce->engine.subdev.index - NVKM_ENGINE_CE0;
+	nvkm_wr32(device, ce->addr + 0x084, index);
 }
 
-static int
-gf100_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gf100_ce0_cclass;
-	nv_engine(priv)->sclass = gf100_ce0_sclass;
-	nv_falcon(priv)->code.data = gf100_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
-	nv_falcon(priv)->data.data = gf100_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
-	return 0;
-}
-
-static int
-gf100_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x105000, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gf100_ce1_cclass;
-	nv_engine(priv)->sclass = gf100_ce1_sclass;
-	nv_falcon(priv)->code.data = gf100_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gf100_pce_code);
-	nv_falcon(priv)->data.data = gf100_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gf100_pce_data);
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ce0_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_ce_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_ce0 = {
+	.code.data = gf100_ce_code,
+	.code.size = sizeof(gf100_ce_code),
+	.data.data = gf100_ce_data,
+	.data.size = sizeof(gf100_ce_data),
+	.pmc_enable = 0x00000040,
+	.init = gf100_ce_init,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, FERMI_DMA },
+		{}
+	}
 };
 
-struct nvkm_oclass
-gf100_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ce1_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_ce_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_ce1 = {
+	.code.data = gf100_ce_code,
+	.code.size = sizeof(gf100_ce_code),
+	.data.data = gf100_ce_data,
+	.data.size = sizeof(gf100_ce_data),
+	.pmc_enable = 0x00000080,
+	.init = gf100_ce_init,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, FERMI_DECOMPRESS },
+		{}
+	}
 };
+
+int
+gf100_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_falcon_new_(&gf100_ce0, device, index, true,
+					0x104000, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_falcon_new_(&gf100_ce1, device, index, true,
+					0x105000, pengine);
+	}
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index a998932..c541a1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -21,153 +21,47 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
+#include "priv.h"
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-struct gk104_ce_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_ce_sclass[] = {
-	{ 0xa0b5, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gk104_ce_context_ofuncs = {
-	.ctor = _nvkm_engctx_ctor,
-	.dtor = _nvkm_engctx_dtor,
-	.init = _nvkm_engctx_init,
-	.fini = _nvkm_engctx_fini,
-	.rd32 = _nvkm_engctx_rd32,
-	.wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gk104_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xc0),
-	.ofuncs = &gk104_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gk104_ce_intr(struct nvkm_subdev *subdev)
+void
+gk104_ce_intr(struct nvkm_engine *ce)
 {
-	const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
-	struct gk104_ce_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
+	const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000;
+	struct nvkm_subdev *subdev = &ce->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x104908 + base);
 	if (stat) {
-		nv_warn(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+		nvkm_warn(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, 0x104908 + base, stat);
 	}
 }
 
-static int
-gk104_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-static int
-gk104_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-static int
-gk104_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gk104_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE2", "ce2", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00200000;
-	nv_subdev(priv)->intr = gk104_ce_intr;
-	nv_engine(priv)->cclass = &gk104_ce_cclass;
-	nv_engine(priv)->sclass = gk104_ce_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce0_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
+static const struct nvkm_engine_func
+gk104_ce = {
+	.intr = gk104_ce_intr,
+	.sclass = {
+		{ -1, -1, KEPLER_DMA_COPY_A },
+		{}
+	}
 };
 
-struct nvkm_oclass
-gk104_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce1_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gk104_ce2_oclass = {
-	.handle = NV_ENGINE(CE2, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ce2_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
+int
+gk104_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00000040, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00000080, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE2) {
+		return nvkm_engine_new_(&gk104_ce, device, index,
+					0x00200000, true, pengine);
+	}
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
index 577eb2e..8eaa72a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm204.c
@@ -21,153 +21,34 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
+#include "priv.h"
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-struct gm204_ce_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_ce_sclass[] = {
-	{ 0xb0b5, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_ofuncs
-gm204_ce_context_ofuncs = {
-	.ctor = _nvkm_engctx_ctor,
-	.dtor = _nvkm_engctx_dtor,
-	.init = _nvkm_engctx_init,
-	.fini = _nvkm_engctx_fini,
-	.rd32 = _nvkm_engctx_rd32,
-	.wr32 = _nvkm_engctx_wr32,
-};
-
-static struct nvkm_oclass
-gm204_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0x24),
-	.ofuncs = &gm204_ce_context_ofuncs,
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
-
-static void
-gm204_ce_intr(struct nvkm_subdev *subdev)
-{
-	const int ce = nv_subidx(subdev) - NVDEV_ENGINE_CE0;
-	struct gm204_ce_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
-
-	if (stat) {
-		nv_warn(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x104908 + (ce * 0x1000), stat);
+static const struct nvkm_engine_func
+gm204_ce = {
+	.intr = gk104_ce_intr,
+	.sclass = {
+		{ -1, -1, MAXWELL_DMA_COPY_A },
+		{}
 	}
-}
-
-static int
-gm204_ce0_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000040;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-static int
-gm204_ce1_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE1", "ce1", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000080;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-static int
-gm204_ce2_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gm204_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCE2", "ce2", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00200000;
-	nv_subdev(priv)->intr = gm204_ce_intr;
-	nv_engine(priv)->cclass = &gm204_ce_cclass;
-	nv_engine(priv)->sclass = gm204_ce_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_ce0_oclass = {
-	.handle = NV_ENGINE(CE0, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce0_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
 };
 
-struct nvkm_oclass
-gm204_ce1_oclass = {
-	.handle = NV_ENGINE(CE1, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce1_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
-
-struct nvkm_oclass
-gm204_ce2_oclass = {
-	.handle = NV_ENGINE(CE2, 0x24),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_ce2_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = _nvkm_engine_init,
-		.fini = _nvkm_engine_fini,
-	},
-};
+int
+gm204_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	if (index == NVKM_ENGINE_CE0) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00000040, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE1) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00000080, true, pengine);
+	} else
+	if (index == NVKM_ENGINE_CE2) {
+		return nvkm_engine_new_(&gm204_ce, device, index,
+					0x00200000, true, pengine);
+	}
+	return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index d8bb429..402dcbc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -21,50 +21,15 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/ce.h>
-#include <engine/falcon.h>
-#include <engine/fifo.h>
+#include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
 #include <core/client.h>
-#include <core/device.h>
 #include <core/enum.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
 
-struct gt215_ce_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Copy object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_sclass[] = {
-	{ 0x85b5, &nvkm_object_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * PCE context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_ce_cclass = {
-	.handle = NV_ENGCTX(CE0, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-
-	},
-};
-
-/*******************************************************************************
- * PCE engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
 static const struct nvkm_enum
 gt215_ce_isr_error_name[] = {
@@ -75,78 +40,45 @@
 };
 
 void
-gt215_ce_intr(struct nvkm_subdev *subdev)
+gt215_ce_intr(struct nvkm_falcon *ce, struct nvkm_fifo_chan *chan)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_falcon *falcon = (void *)subdev;
-	struct nvkm_object *engctx;
-	u32 dispatch = nv_ro32(falcon, 0x01c);
-	u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
-	u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
-	u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
-	u32 addr = nv_ro32(falcon, 0x040) >> 16;
+	struct nvkm_subdev *subdev = &ce->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (subdev->index - NVKM_ENGINE_CE0) * 0x1000;
+	u32 ssta = nvkm_rd32(device, 0x104040 + base) & 0x0000ffff;
+	u32 addr = nvkm_rd32(device, 0x104040 + base) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_ro32(falcon, 0x044);
-	int chid;
+	u32 data = nvkm_rd32(device, 0x104044 + base);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gt215_ce_isr_error_name, ssta);
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000040) {
-		nv_error(falcon, "DISPATCH_ERROR [");
-		nvkm_enum_print(gt215_ce_isr_error_name, ssta);
-		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, inst << 12, nvkm_client_name(engctx), subc,
-		       mthd, data);
-		nv_wo32(falcon, 0x004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		nv_error(falcon, "unhandled intr 0x%08x\n", stat);
-		nv_wo32(falcon, 0x004, stat);
-	}
-
-	nvkm_engctx_put(engctx);
+	nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+			   "subc %d mthd %04x data %08x\n", ssta,
+		   en ? en->name : "", chan ? chan->chid : -1,
+		   chan ? chan->inst->addr : 0,
+		   chan ? chan->object.client->name : "unknown",
+		   subc, mthd, data);
 }
 
-static int
-gt215_ce_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	bool enable = (nv_device(parent)->chipset != 0xaf);
-	struct gt215_ce_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x104000, enable,
-				 "PCE0", "ce0", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00802000;
-	nv_subdev(priv)->intr = gt215_ce_intr;
-	nv_engine(priv)->cclass = &gt215_ce_cclass;
-	nv_engine(priv)->sclass = gt215_ce_sclass;
-	nv_falcon(priv)->code.data = gt215_pce_code;
-	nv_falcon(priv)->code.size = sizeof(gt215_pce_code);
-	nv_falcon(priv)->data.data = gt215_pce_data;
-	nv_falcon(priv)->data.size = sizeof(gt215_pce_data);
-	return 0;
-}
-
-struct nvkm_oclass
-gt215_ce_oclass = {
-	.handle = NV_ENGINE(CE0, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_ce_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = _nvkm_falcon_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gt215_ce = {
+	.code.data = gt215_ce_code,
+	.code.size = sizeof(gt215_ce_code),
+	.data.data = gt215_ce_data,
+	.data.size = sizeof(gt215_ce_data),
+	.pmc_enable = 0x00802000,
+	.intr = gt215_ce_intr,
+	.sclass = {
+		{ -1, -1, GT212_DMA },
+		{}
+	}
 };
+
+int
+gt215_ce_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(&gt215_ce, device, index,
+				(device->chipset != 0xaf), 0x104000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
new file mode 100644
index 0000000..e2fa8b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -0,0 +1,7 @@
+#ifndef __NVKM_CE_PRIV_H__
+#define __NVKM_CE_PRIV_H__
+#include <engine/ce.h>
+
+void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
+void gk104_ce_intr(struct nvkm_engine *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index 13f3042..bfd0162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -25,76 +25,47 @@
 #include <engine/fifo.h>
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
+#include <core/gpuobj.h>
 
-struct g84_cipher_priv {
-	struct nvkm_engine base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
+#include <nvif/class.h>
 
 static int
-g84_cipher_object_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+g84_cipher_oclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		       int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+				  align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-g84_cipher_ofuncs = {
-	.ctor = g84_cipher_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
+static const struct nvkm_object_func
+g84_cipher_oclass_func = {
+	.bind = g84_cipher_oclass_bind,
 };
 
-static struct nvkm_oclass
-g84_cipher_sclass[] = {
-	{ 0x74c1, &g84_cipher_ofuncs },
-	{}
-};
+static int
+g84_cipher_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		       int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+			       align, true, parent, pgpuobj);
 
-/*******************************************************************************
- * PCIPHER context
- ******************************************************************************/
+}
 
-static struct nvkm_oclass
+static const struct nvkm_object_func
 g84_cipher_cclass = {
-	.handle = NV_ENGCTX(CIPHER, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
+	.bind = g84_cipher_cclass_bind,
 };
 
-/*******************************************************************************
- * PCIPHER engine/subdev functions
- ******************************************************************************/
-
 static const struct nvkm_bitfield
 g84_cipher_intr_mask[] = {
 	{ 0x00000001, "INVALID_STATE" },
@@ -106,79 +77,59 @@
 };
 
 static void
-g84_cipher_intr(struct nvkm_subdev *subdev)
+g84_cipher_intr(struct nvkm_engine *cipher)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct g84_cipher_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x102130);
-	u32 mthd = nv_rd32(priv, 0x102190);
-	u32 data = nv_rd32(priv, 0x102194);
-	u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
-	int chid;
+	struct nvkm_subdev *subdev = &cipher->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo *fifo = device->fifo;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, 0x102130);
+	u32 mthd = nvkm_rd32(device, 0x102190);
+	u32 data = nvkm_rd32(device, 0x102194);
+	u32 inst = nvkm_rd32(device, 0x102188) & 0x7fffffff;
+	unsigned long flags;
+	char msg[128];
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
+	chan = nvkm_fifo_chan_inst(fifo, (u64)inst << 12, &flags);
 	if (stat) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(g84_cipher_intr_mask, stat);
-		pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, nvkm_client_name(engctx),
-		       mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), g84_cipher_intr_mask, stat);
+		nvkm_error(subdev,  "%08x [%s] ch %d [%010llx %s] "
+				    "mthd %04x data %08x\n", stat, msg,
+			   chan ? chan->chid : -1, (u64)inst << 12,
+			   chan ? chan->object.client->name : "unknown",
+			   mthd, data);
 	}
+	nvkm_fifo_chan_put(fifo, flags, &chan);
 
-	nv_wr32(priv, 0x102130, stat);
-	nv_wr32(priv, 0x10200c, 0x10);
-
-	nvkm_engctx_put(engctx);
+	nvkm_wr32(device, 0x102130, stat);
+	nvkm_wr32(device, 0x10200c, 0x10);
 }
 
 static int
-g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+g84_cipher_init(struct nvkm_engine *cipher)
 {
-	struct g84_cipher_priv *priv;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true,
-				 "PCIPHER", "cipher", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00004000;
-	nv_subdev(priv)->intr = g84_cipher_intr;
-	nv_engine(priv)->cclass = &g84_cipher_cclass;
-	nv_engine(priv)->sclass = g84_cipher_sclass;
+	struct nvkm_device *device = cipher->subdev.device;
+	nvkm_wr32(device, 0x102130, 0xffffffff);
+	nvkm_wr32(device, 0x102140, 0xffffffbf);
+	nvkm_wr32(device, 0x10200c, 0x00000010);
 	return 0;
 }
 
-static int
-g84_cipher_init(struct nvkm_object *object)
-{
-	struct g84_cipher_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_engine_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x102130, 0xffffffff);
-	nv_wr32(priv, 0x102140, 0xffffffbf);
-	nv_wr32(priv, 0x10200c, 0x00000010);
-	return 0;
-}
-
-struct nvkm_oclass
-g84_cipher_oclass = {
-	.handle = NV_ENGINE(CIPHER, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_cipher_ctor,
-		.dtor = _nvkm_engine_dtor,
-		.init = g84_cipher_init,
-		.fini = _nvkm_engine_fini,
-	},
+static const struct nvkm_engine_func
+g84_cipher = {
+	.init = g84_cipher_init,
+	.intr = g84_cipher_intr,
+	.cclass = &g84_cipher_cclass,
+	.sclass = {
+		{ -1, -1, NV74_CIPHER, &g84_cipher_oclass_func },
+		{}
+	}
 };
+
+int
+g84_cipher_new(struct nvkm_device *device, int index,
+	       struct nvkm_engine **pengine)
+{
+	return nvkm_engine_new_(&g84_cipher, device, index,
+				0x00004000, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
index de1bf09..09032ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/Kbuild
@@ -1,12 +1,6 @@
 nvkm-y += nvkm/engine/device/acpi.o
 nvkm-y += nvkm/engine/device/base.o
 nvkm-y += nvkm/engine/device/ctrl.o
-nvkm-y += nvkm/engine/device/nv04.o
-nvkm-y += nvkm/engine/device/nv10.o
-nvkm-y += nvkm/engine/device/nv20.o
-nvkm-y += nvkm/engine/device/nv30.o
-nvkm-y += nvkm/engine/device/nv40.o
-nvkm-y += nvkm/engine/device/nv50.o
-nvkm-y += nvkm/engine/device/gf100.o
-nvkm-y += nvkm/engine/device/gk104.o
-nvkm-y += nvkm/engine/device/gm100.o
+nvkm-y += nvkm/engine/device/pci.o
+nvkm-y += nvkm/engine/device/tegra.o
+nvkm-y += nvkm/engine/device/user.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
index f42706e..fdca90b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c
@@ -40,21 +40,19 @@
 }
 #endif
 
-int
-nvkm_acpi_fini(struct nvkm_device *device, bool suspend)
+void
+nvkm_acpi_fini(struct nvkm_device *device)
 {
 #ifdef CONFIG_ACPI
 	unregister_acpi_notifier(&device->acpi.nb);
 #endif
-	return 0;
 }
 
-int
+void
 nvkm_acpi_init(struct nvkm_device *device)
 {
 #ifdef CONFIG_ACPI
 	device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
 	register_acpi_notifier(&device->acpi.nb);
 #endif
-	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 82dd359..1bbe76e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -3,6 +3,6 @@
 #include <core/os.h>
 struct nvkm_device;
 
-int nvkm_acpi_init(struct nvkm_device *);
-int nvkm_acpi_fini(struct nvkm_device *, bool);
+void nvkm_acpi_init(struct nvkm_device *);
+void nvkm_acpi_fini(struct nvkm_device *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 63d8e52..94a906b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -24,33 +24,33 @@
 #include "priv.h"
 #include "acpi.h"
 
-#include <core/client.h>
-#include <core/option.h>
 #include <core/notify.h>
-#include <core/parent.h>
-#include <subdev/bios.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
+#include <core/option.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
+#include <subdev/bios.h>
 
 static DEFINE_MUTEX(nv_devices_mutex);
 static LIST_HEAD(nv_devices);
 
-struct nvkm_device *
-nvkm_device_find(u64 name)
+static struct nvkm_device *
+nvkm_device_find_locked(u64 handle)
 {
-	struct nvkm_device *device, *match = NULL;
-	mutex_lock(&nv_devices_mutex);
+	struct nvkm_device *device;
 	list_for_each_entry(device, &nv_devices, head) {
-		if (device->handle == name) {
-			match = device;
-			break;
-		}
+		if (device->handle == handle)
+			return device;
 	}
+	return NULL;
+}
+
+struct nvkm_device *
+nvkm_device_find(u64 handle)
+{
+	struct nvkm_device *device;
+	mutex_lock(&nv_devices_mutex);
+	device = nvkm_device_find_locked(handle);
 	mutex_unlock(&nv_devices_mutex);
-	return match;
+	return device;
 }
 
 int
@@ -67,280 +67,2272 @@
 	return nr;
 }
 
-/******************************************************************************
- * nvkm_devobj (0x0080): class implementation
- *****************************************************************************/
+static const struct nvkm_device_chip
+null_chipset = {
+	.name = "NULL",
+	.bios = nvkm_bios_new,
+};
 
-struct nvkm_devobj {
-	struct nvkm_parent base;
-	struct nvkm_object *subdev[NVDEV_SUBDEV_NR];
+static const struct nvkm_device_chip
+nv4_chipset = {
+	.name = "NV04",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv04_devinit_new,
+	.fb = nv04_fb_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv04_fifo_new,
+	.gr = nv04_gr_new,
+	.sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv5_chipset = {
+	.name = "NV05",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv05_devinit_new,
+	.fb = nv04_fb_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv04_fifo_new,
+	.gr = nv04_gr_new,
+	.sw = nv04_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv10_chipset = {
+	.name = "NV10",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.gr = nv10_gr_new,
+};
+
+static const struct nvkm_device_chip
+nv11_chipset = {
+	.name = "NV11",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv15_chipset = {
+	.name = "NV15",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv17_chipset = {
+	.name = "NV17",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv18_chipset = {
+	.name = "NV18",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1a_chipset = {
+	.name = "nForce",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv1a_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv10_fifo_new,
+	.gr = nv15_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv1f_chipset = {
+	.name = "nForce2",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv1a_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv17_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv20_chipset = {
+	.name = "NV20",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv20_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv20_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv25_chipset = {
+	.name = "NV25",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv25_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv28_chipset = {
+	.name = "NV28",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv25_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv2a_chipset = {
+	.name = "NV2A",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv25_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv2a_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv30_chipset = {
+	.name = "NV30",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv30_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv30_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv31_chipset = {
+	.name = "NV31",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv30_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv30_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv34_chipset = {
+	.name = "NV34",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv10_devinit_new,
+	.fb = nv10_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv34_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv35_chipset = {
+	.name = "NV35",
+	.bios = nvkm_bios_new,
+	.bus = nv04_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv35_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv35_gr_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv36_chipset = {
+	.name = "NV36",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv04_clk_new,
+	.devinit = nv20_devinit_new,
+	.fb = nv36_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv04_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv04_pci_new,
+	.timer = nv04_timer_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv17_fifo_new,
+	.gr = nv35_gr_new,
+	.mpeg = nv31_mpeg_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv40_chipset = {
+	.name = "NV40",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv40_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv40_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv41_chipset = {
+	.name = "NV41",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv42_chipset = {
+	.name = "NV42",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv43_chipset = {
+	.name = "NV43",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv41_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv40_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv44_chipset = {
+	.name = "NV44",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv44_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv45_chipset = {
+	.name = "NV45",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv40_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv04_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv46_chipset = {
+	.name = "G72",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv47_chipset = {
+	.name = "G70",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv47_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv49_chipset = {
+	.name = "G71",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv49_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4a_chipset = {
+	.name = "NV44A",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv44_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4b_chipset = {
+	.name = "G73",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv49_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv04_mc_new,
+	.mmu = nv41_mmu_new,
+	.pci = nv40_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv40_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4c_chipset = {
+	.name = "C61",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv4e_chipset = {
+	.name = "C51",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv4e_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv4e_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv50_chipset = {
+	.name = "G80",
+	.bar = nv50_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = nv50_clk_new,
+	.devinit = nv50_devinit_new,
+	.fb = nv50_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = nv50_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv50_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = nv50_fifo_new,
+	.gr = nv50_gr_new,
+	.mpeg = nv50_mpeg_new,
+	.pm = nv50_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv63_chipset = {
+	.name = "C73",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv67_chipset = {
+	.name = "C67",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv68_chipset = {
+	.name = "C68",
+	.bios = nvkm_bios_new,
+	.bus = nv31_bus_new,
+	.clk = nv40_clk_new,
+	.devinit = nv1a_devinit_new,
+	.fb = nv46_fb_new,
+	.gpio = nv10_gpio_new,
+	.i2c = nv04_i2c_new,
+	.imem = nv40_instmem_new,
+	.mc = nv44_mc_new,
+	.mmu = nv44_mmu_new,
+	.pci = nv4c_pci_new,
+	.therm = nv40_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = nv04_disp_new,
+	.dma = nv04_dma_new,
+	.fifo = nv40_fifo_new,
+	.gr = nv44_gr_new,
+	.mpeg = nv44_mpeg_new,
+	.pm = nv40_pm_new,
+	.sw = nv10_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv84_chipset = {
+	.name = "G84",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv86_chipset = {
+	.name = "G86",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv92_chipset = {
+	.name = "G92",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = nv50_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = nv50_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv50_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g84_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv94_chipset = {
+	.name = "G94",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv96_chipset = {
+	.name = "G96",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = nv50_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = g84_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nv98_chipset = {
+	.name = "G98",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = g84_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva0_chipset = {
+	.name = "GT200",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = g84_clk_new,
+	.devinit = g84_devinit_new,
+	.fb = g84_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = nv50_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.bsp = g84_bsp_new,
+	.cipher = g84_cipher_new,
+	.disp = gt200_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt200_gr_new,
+	.mpeg = g84_mpeg_new,
+	.pm = gt200_pm_new,
+	.sw = nv50_sw_new,
+	.vp = g84_vp_new,
+};
+
+static const struct nvkm_device_chip
+nva3_chipset = {
+	.name = "GT215",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mpeg = g84_mpeg_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva5_chipset = {
+	.name = "GT216",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nva8_chipset = {
+	.name = "GT218",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = gt215_devinit_new,
+	.fb = gt215_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt215_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = gt215_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaa_chipset = {
+	.name = "MCP77/MCP78",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = mcp77_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = mcp77_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = gt200_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvac_chipset = {
+	.name = "MCP79/MCP7A",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = mcp77_clk_new,
+	.devinit = g98_devinit_new,
+	.fb = mcp77_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = g84_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.disp = g94_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = mcp79_gr_new,
+	.mspdec = g98_mspdec_new,
+	.msppp = g98_msppp_new,
+	.msvld = g98_msvld_new,
+	.pm = g84_pm_new,
+	.sec = g98_sec_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvaf_chipset = {
+	.name = "MCP89",
+	.bar = g84_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = g94_bus_new,
+	.clk = gt215_clk_new,
+	.devinit = mcp89_devinit_new,
+	.fb = mcp89_fb_new,
+	.fuse = nv50_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.imem = nv50_instmem_new,
+	.mc = g98_mc_new,
+	.mmu = nv50_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gt215_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gt215_ce_new,
+	.disp = gt215_disp_new,
+	.dma = nv50_dma_new,
+	.fifo = g84_fifo_new,
+	.gr = mcp89_gr_new,
+	.mspdec = gt215_mspdec_new,
+	.msppp = gt215_msppp_new,
+	.msvld = mcp89_msvld_new,
+	.pm = gt215_pm_new,
+	.sw = nv50_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc0_chipset = {
+	.name = "GF100",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf100_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc1_chipset = {
+	.name = "GF108",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf108_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf108_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc3_chipset = {
+	.name = "GF106",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc4_chipset = {
+	.name = "GF104",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvc8_chipset = {
+	.name = "GF110",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf110_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvce_chipset = {
+	.name = "GF114",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = gf100_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.ce[1] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvcf_chipset = {
+	.name = "GF116",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = g94_gpio_new,
+	.i2c = g94_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf100_pmu_new,
+	.therm = gt215_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gt215_disp_new,
+	.dma = gf100_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf104_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf100_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd7_chipset = {
+	.name = "GF117",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gf119_gpio_new,
+	.i2c = gf117_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gf119_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf117_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf117_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvd9_chipset = {
+	.name = "GF119",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gf100_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gf100_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gf119_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gf100_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gf100_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf119_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gf100_ce_new,
+	.disp = gf119_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gf100_fifo_new,
+	.gr = gf119_gr_new,
+	.mspdec = gf100_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gf100_msvld_new,
+	.pm = gf117_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve4_chipset = {
+	.name = "GK104",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk104_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve6_chipset = {
+	.name = "GK106",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk104_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nve7_chipset = {
+	.name = "GK107",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gf119_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk104_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk104_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvea_chipset = {
+	.name = "GK20A",
+	.bar = gk20a_bar_new,
+	.bus = gf100_bus_new,
+	.clk = gk20a_clk_new,
+	.fb = gk20a_fb_new,
+	.fuse = gf100_fuse_new,
+	.ibus = gk20a_ibus_new,
+	.imem = gk20a_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.pmu = gk20a_pmu_new,
+	.timer = gk20a_timer_new,
+	.volt = gk20a_volt_new,
+	.ce[2] = gk104_ce_new,
+	.dma = gf119_dma_new,
+	.fifo = gk20a_fifo_new,
+	.gr = gk20a_gr_new,
+	.pm = gk104_pm_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf0_chipset = {
+	.name = "GK110",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk110_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk110_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nvf1_chipset = {
+	.name = "GK110B",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gf100_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk110_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk104_fifo_new,
+	.gr = gk110b_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv106_chipset = {
+	.name = "GK208B",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk208_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gk208_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv108_chipset = {
+	.name = "GK208",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gf100_devinit_new,
+	.fb = gk104_fb_new,
+	.fuse = gf100_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gk104_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gk104_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gk208_pmu_new,
+	.therm = gf119_therm_new,
+	.timer = nv41_timer_new,
+	.volt = nv40_volt_new,
+	.ce[0] = gk104_ce_new,
+	.ce[1] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gk110_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gk208_gr_new,
+	.mspdec = gk104_mspdec_new,
+	.msppp = gf100_msppp_new,
+	.msvld = gk104_msvld_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv117_chipset = {
+	.name = "GM107",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.clk = gk104_clk_new,
+	.devinit = gm107_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gf119_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.therm = gm107_therm_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gk104_ce_new,
+	.ce[2] = gk104_ce_new,
+	.disp = gm107_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gk208_fifo_new,
+	.gr = gm107_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv124_chipset = {
+	.name = "GM204",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm204_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm204_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gm204_ce_new,
+	.ce[1] = gm204_ce_new,
+	.ce[2] = gm204_ce_new,
+	.disp = gm204_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm204_fifo_new,
+	.gr = gm204_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv126_chipset = {
+	.name = "GM206",
+	.bar = gf100_bar_new,
+	.bios = nvkm_bios_new,
+	.bus = gf100_bus_new,
+	.devinit = gm204_devinit_new,
+	.fb = gm107_fb_new,
+	.fuse = gm107_fuse_new,
+	.gpio = gk104_gpio_new,
+	.i2c = gm204_i2c_new,
+	.ibus = gk104_ibus_new,
+	.imem = nv50_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.mxm = nv50_mxm_new,
+	.pci = nv40_pci_new,
+	.pmu = gm107_pmu_new,
+	.timer = gk20a_timer_new,
+	.ce[0] = gm204_ce_new,
+	.ce[1] = gm204_ce_new,
+	.ce[2] = gm204_ce_new,
+	.disp = gm204_disp_new,
+	.dma = gf119_dma_new,
+	.fifo = gm204_fifo_new,
+	.gr = gm206_gr_new,
+	.sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv12b_chipset = {
+	.name = "GM20B",
+	.bar = gk20a_bar_new,
+	.bus = gf100_bus_new,
+	.fb = gk20a_fb_new,
+	.fuse = gm107_fuse_new,
+	.ibus = gk20a_ibus_new,
+	.imem = gk20a_instmem_new,
+	.ltc = gm107_ltc_new,
+	.mc = gk20a_mc_new,
+	.mmu = gf100_mmu_new,
+	.timer = gk20a_timer_new,
+	.ce[2] = gm204_ce_new,
+	.dma = gf119_dma_new,
+	.fifo = gm20b_fifo_new,
+	.gr = gm20b_gr_new,
+	.sw = gf100_sw_new,
 };
 
 static int
-nvkm_devobj_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
+		       struct nvkm_notify *notify)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_fb *pfb = nvkm_fb(device);
-	struct nvkm_instmem *imem = nvkm_instmem(device);
-	union {
-		struct nv_device_info_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "device info size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "device info vers %d\n", args->v0.version);
-	} else
-		return ret;
-
-	switch (device->chipset) {
-	case 0x01a:
-	case 0x01f:
-	case 0x04c:
-	case 0x04e:
-	case 0x063:
-	case 0x067:
-	case 0x068:
-	case 0x0aa:
-	case 0x0ac:
-	case 0x0af:
-		args->v0.platform = NV_DEVICE_INFO_V0_IGP;
-		break;
-	default:
-		if (device->pdev) {
-			if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
-				args->v0.platform = NV_DEVICE_INFO_V0_AGP;
-			else
-			if (pci_is_pcie(device->pdev))
-				args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
-			else
-				args->v0.platform = NV_DEVICE_INFO_V0_PCI;
-		} else {
-			args->v0.platform = NV_DEVICE_INFO_V0_SOC;
-		}
-		break;
-	}
-
-	switch (device->card_type) {
-	case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
-	case NV_10:
-	case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
-	case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
-	case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
-	case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
-	case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
-	case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
-	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
-	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
-	default:
-		args->v0.family = 0;
-		break;
-	}
-
-	args->v0.chipset  = device->chipset;
-	args->v0.revision = device->chiprev;
-	if (pfb && pfb->ram)
-		args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
-	else
-		args->v0.ram_size = args->v0.ram_user = 0;
-	if (imem && args->v0.ram_size > 0)
-		args->v0.ram_user = args->v0.ram_user - imem->reserved;
-
-	return 0;
-}
-
-static int
-nvkm_devobj_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	switch (mthd) {
-	case NV_DEVICE_V0_INFO:
-		return nvkm_devobj_info(object, data, size);
-	default:
-		break;
+	if (!WARN_ON(size != 0)) {
+		notify->size  = 0;
+		notify->types = 1;
+		notify->index = 0;
+		return 0;
 	}
 	return -EINVAL;
 }
 
-static u8
-nvkm_devobj_rd08(struct nvkm_object *object, u64 addr)
+static const struct nvkm_event_func
+nvkm_device_event_func = {
+	.ctor = nvkm_device_event_ctor,
+};
+
+struct nvkm_subdev *
+nvkm_device_subdev(struct nvkm_device *device, int index)
 {
-	return nv_rd08(object->engine, addr);
+	struct nvkm_engine *engine;
+
+	if (device->disable_mask & (1ULL << index))
+		return NULL;
+
+	switch (index) {
+#define _(n,p,m) case NVKM_SUBDEV_##n: if (p) return (m); break
+	_(BAR    , device->bar    , &device->bar->subdev);
+	_(VBIOS  , device->bios   , &device->bios->subdev);
+	_(BUS    , device->bus    , &device->bus->subdev);
+	_(CLK    , device->clk    , &device->clk->subdev);
+	_(DEVINIT, device->devinit, &device->devinit->subdev);
+	_(FB     , device->fb     , &device->fb->subdev);
+	_(FUSE   , device->fuse   , &device->fuse->subdev);
+	_(GPIO   , device->gpio   , &device->gpio->subdev);
+	_(I2C    , device->i2c    , &device->i2c->subdev);
+	_(IBUS   , device->ibus   ,  device->ibus);
+	_(INSTMEM, device->imem   , &device->imem->subdev);
+	_(LTC    , device->ltc    , &device->ltc->subdev);
+	_(MC     , device->mc     , &device->mc->subdev);
+	_(MMU    , device->mmu    , &device->mmu->subdev);
+	_(MXM    , device->mxm    ,  device->mxm);
+	_(PCI    , device->pci    , &device->pci->subdev);
+	_(PMU    , device->pmu    , &device->pmu->subdev);
+	_(THERM  , device->therm  , &device->therm->subdev);
+	_(TIMER  , device->timer  , &device->timer->subdev);
+	_(VOLT   , device->volt   , &device->volt->subdev);
+#undef _
+	default:
+		engine = nvkm_device_engine(device, index);
+		if (engine)
+			return &engine->subdev;
+		break;
+	}
+	return NULL;
 }
 
-static u16
-nvkm_devobj_rd16(struct nvkm_object *object, u64 addr)
+struct nvkm_engine *
+nvkm_device_engine(struct nvkm_device *device, int index)
 {
-	return nv_rd16(object->engine, addr);
+	if (device->disable_mask & (1ULL << index))
+		return NULL;
+
+	switch (index) {
+#define _(n,p,m) case NVKM_ENGINE_##n: if (p) return (m); break
+	_(BSP    , device->bsp    ,  device->bsp);
+	_(CE0    , device->ce[0]  ,  device->ce[0]);
+	_(CE1    , device->ce[1]  ,  device->ce[1]);
+	_(CE2    , device->ce[2]  ,  device->ce[2]);
+	_(CIPHER , device->cipher ,  device->cipher);
+	_(DISP   , device->disp   , &device->disp->engine);
+	_(DMAOBJ , device->dma    , &device->dma->engine);
+	_(FIFO   , device->fifo   , &device->fifo->engine);
+	_(GR     , device->gr     , &device->gr->engine);
+	_(IFB    , device->ifb    ,  device->ifb);
+	_(ME     , device->me     ,  device->me);
+	_(MPEG   , device->mpeg   ,  device->mpeg);
+	_(MSENC  , device->msenc  ,  device->msenc);
+	_(MSPDEC , device->mspdec ,  device->mspdec);
+	_(MSPPP  , device->msppp  ,  device->msppp);
+	_(MSVLD  , device->msvld  ,  device->msvld);
+	_(PM     , device->pm     , &device->pm->engine);
+	_(SEC    , device->sec    ,  device->sec);
+	_(SW     , device->sw     , &device->sw->engine);
+	_(VIC    , device->vic    ,  device->vic);
+	_(VP     , device->vp     ,  device->vp);
+#undef _
+	default:
+		WARN_ON(1);
+		break;
+	}
+	return NULL;
 }
 
-static u32
-nvkm_devobj_rd32(struct nvkm_object *object, u64 addr)
+int
+nvkm_device_fini(struct nvkm_device *device, bool suspend)
 {
-	return nv_rd32(object->engine, addr);
-}
+	const char *action = suspend ? "suspend" : "fini";
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
 
-static void
-nvkm_devobj_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
-	nv_wr08(object->engine, addr, data);
-}
+	nvdev_trace(device, "%s running...\n", action);
+	time = ktime_to_us(ktime_get());
 
-static void
-nvkm_devobj_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
-	nv_wr16(object->engine, addr, data);
-}
+	nvkm_acpi_fini(device);
 
-static void
-nvkm_devobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	nv_wr32(object->engine, addr, data);
-}
+	for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_fini(subdev, suspend);
+			if (ret && suspend)
+				goto fail;
+		}
+	}
 
-static int
-nvkm_devobj_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nvkm_device *device = nv_device(object);
-	*addr = nv_device_resource_start(device, 0);
-	*size = nv_device_resource_len(device, 0);
+
+	if (device->func->fini)
+		device->func->fini(device, suspend);
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "%s completed in %lldus...\n", action, time);
 	return 0;
+
+fail:
+	do {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			int rret = nvkm_subdev_init(subdev);
+			if (rret)
+				nvkm_fatal(subdev, "failed restart, %d\n", ret);
+		}
+	} while (++i < NVKM_SUBDEV_NR);
+
+	nvdev_trace(device, "%s failed with %d\n", action, ret);
+	return ret;
 }
 
-static const u64 disable_map[] = {
-	[NVDEV_SUBDEV_VBIOS]	= NV_DEVICE_V0_DISABLE_VBIOS,
-	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_GPIO]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_I2C]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_CLK  ]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MXM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MC]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_BUS]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_TIMER]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_FB]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_LTC]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_IBUS]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_INSTMEM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_MMU]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_PMU]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_SUBDEV_FUSE]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_PM     ]  = NV_DEVICE_V0_DISABLE_CORE,
-	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_V0_DISABLE_FIFO,
-	[NVDEV_ENGINE_SW]	= NV_DEVICE_V0_DISABLE_FIFO,
-	[NVDEV_ENGINE_GR]	= NV_DEVICE_V0_DISABLE_GR,
-	[NVDEV_ENGINE_MPEG]	= NV_DEVICE_V0_DISABLE_MPEG,
-	[NVDEV_ENGINE_ME]	= NV_DEVICE_V0_DISABLE_ME,
-	[NVDEV_ENGINE_VP]	= NV_DEVICE_V0_DISABLE_VP,
-	[NVDEV_ENGINE_CIPHER]	= NV_DEVICE_V0_DISABLE_CIPHER,
-	[NVDEV_ENGINE_BSP]	= NV_DEVICE_V0_DISABLE_BSP,
-	[NVDEV_ENGINE_MSPPP]	= NV_DEVICE_V0_DISABLE_MSPPP,
-	[NVDEV_ENGINE_CE0]	= NV_DEVICE_V0_DISABLE_CE0,
-	[NVDEV_ENGINE_CE1]	= NV_DEVICE_V0_DISABLE_CE1,
-	[NVDEV_ENGINE_CE2]	= NV_DEVICE_V0_DISABLE_CE2,
-	[NVDEV_ENGINE_VIC]	= NV_DEVICE_V0_DISABLE_VIC,
-	[NVDEV_ENGINE_MSENC]	= NV_DEVICE_V0_DISABLE_MSENC,
-	[NVDEV_ENGINE_DISP]	= NV_DEVICE_V0_DISABLE_DISP,
-	[NVDEV_ENGINE_MSVLD]	= NV_DEVICE_V0_DISABLE_MSVLD,
-	[NVDEV_ENGINE_SEC]	= NV_DEVICE_V0_DISABLE_SEC,
-	[NVDEV_SUBDEV_NR]	= 0,
-};
-
-static void
-nvkm_devobj_dtor(struct nvkm_object *object)
-{
-	struct nvkm_devobj *devobj = (void *)object;
-	int i;
-
-	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
-		nvkm_object_ref(NULL, &devobj->subdev[i]);
-
-	nvkm_parent_destroy(&devobj->base);
-}
-
-static struct nvkm_oclass
-nvkm_devobj_oclass_super = {
-	.handle = NV_DEVICE,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_devobj_dtor,
-		.init = _nvkm_parent_init,
-		.fini = _nvkm_parent_fini,
-		.mthd = nvkm_devobj_mthd,
-		.map  = nvkm_devobj_map,
-		.rd08 = nvkm_devobj_rd08,
-		.rd16 = nvkm_devobj_rd16,
-		.rd32 = nvkm_devobj_rd32,
-		.wr08 = nvkm_devobj_wr08,
-		.wr16 = nvkm_devobj_wr16,
-		.wr32 = nvkm_devobj_wr32,
-	}
-};
-
 static int
-nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_device_preinit(struct nvkm_device *device)
 {
-	union {
-		struct nv_device_v0 v0;
-	} *args = data;
-	struct nvkm_client *client = nv_client(parent);
-	struct nvkm_device *device;
-	struct nvkm_devobj *devobj;
-	u32 boot0, strap;
-	u64 disable, mmio_base, mmio_size;
-	void __iomem *map;
-	int ret, i, c;
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
 
-	nv_ioctl(parent, "create device size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create device v%d device %016llx "
-				 "disable %016llx debug0 %016llx\n",
-			 args->v0.version, args->v0.device,
-			 args->v0.disable, args->v0.debug0);
-	} else
-		return ret;
+	nvdev_trace(device, "preinit running...\n");
+	time = ktime_to_us(ktime_get());
 
-	/* give priviledged clients register access */
-	if (client->super)
-		oclass = &nvkm_devobj_oclass_super;
-
-	/* find the device subdev that matches what the client requested */
-	device = nv_device(client->device);
-	if (args->v0.device != ~0) {
-		device = nvkm_device_find(args->v0.device);
-		if (!device)
-			return -ENODEV;
+	if (device->func->preinit) {
+		ret = device->func->preinit(device);
+		if (ret)
+			goto fail;
 	}
 
-	ret = nvkm_parent_create(parent, nv_object(device), oclass, 0,
-				 nvkm_control_oclass,
-				 (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				 (1ULL << NVDEV_ENGINE_FIFO) |
-				 (1ULL << NVDEV_ENGINE_DISP) |
-				 (1ULL << NVDEV_ENGINE_PM), &devobj);
-	*pobject = nv_object(devobj);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_preinit(subdev);
+			if (ret)
+				goto fail;
+		}
+	}
+
+	ret = nvkm_devinit_post(device->devinit, &device->disable_mask);
+	if (ret)
+		goto fail;
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "preinit completed in %lldus\n", time);
+	return 0;
+
+fail:
+	nvdev_error(device, "preinit failed with %d\n", ret);
+	return ret;
+}
+
+int
+nvkm_device_init(struct nvkm_device *device)
+{
+	struct nvkm_subdev *subdev;
+	int ret, i;
+	s64 time;
+
+	ret = nvkm_device_preinit(device);
 	if (ret)
 		return ret;
 
-	mmio_base = nv_device_resource_start(device, 0);
-	mmio_size = nv_device_resource_len(device, 0);
+	nvkm_device_fini(device, false);
 
-	/* translate api disable mask into internal mapping */
-	disable = args->v0.debug0;
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
-		if (args->v0.disable & disable_map[i])
-			disable |= (1ULL << i);
+	nvdev_trace(device, "init running...\n");
+	time = ktime_to_us(ktime_get());
+
+	if (device->func->init) {
+		ret = device->func->init(device);
+		if (ret)
+			goto fail;
 	}
 
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+		if ((subdev = nvkm_device_subdev(device, i))) {
+			ret = nvkm_subdev_init(subdev);
+			if (ret)
+				goto fail_subdev;
+		}
+	}
+
+	nvkm_acpi_init(device);
+
+	time = ktime_to_us(ktime_get()) - time;
+	nvdev_trace(device, "init completed in %lldus\n", time);
+	return 0;
+
+fail_subdev:
+	do {
+		if ((subdev = nvkm_device_subdev(device, i)))
+			nvkm_subdev_fini(subdev, false);
+	} while (--i >= 0);
+
+fail:
+	nvdev_error(device, "init failed with %d\n", ret);
+	return ret;
+}
+
+void
+nvkm_device_del(struct nvkm_device **pdevice)
+{
+	struct nvkm_device *device = *pdevice;
+	int i;
+	if (device) {
+		mutex_lock(&nv_devices_mutex);
+		device->disable_mask = 0;
+		for (i = NVKM_SUBDEV_NR - 1; i >= 0; i--) {
+			struct nvkm_subdev *subdev =
+				nvkm_device_subdev(device, i);
+			nvkm_subdev_del(&subdev);
+		}
+
+		nvkm_event_fini(&device->event);
+
+		if (device->pri)
+			iounmap(device->pri);
+		list_del(&device->head);
+
+		if (device->func->dtor)
+			*pdevice = device->func->dtor(device);
+		mutex_unlock(&nv_devices_mutex);
+
+		kfree(*pdevice);
+		*pdevice = NULL;
+	}
+}
+
+int
+nvkm_device_ctor(const struct nvkm_device_func *func,
+		 const struct nvkm_device_quirk *quirk,
+		 struct device *dev, enum nvkm_device_type type, u64 handle,
+		 const char *name, const char *cfg, const char *dbg,
+		 bool detect, bool mmio, u64 subdev_mask,
+		 struct nvkm_device *device)
+{
+	struct nvkm_subdev *subdev;
+	u64 mmio_base, mmio_size;
+	u32 boot0, strap;
+	void __iomem *map;
+	int ret = -EEXIST;
+	int i;
+
+	mutex_lock(&nv_devices_mutex);
+	if (nvkm_device_find_locked(handle))
+		goto done;
+
+	device->func = func;
+	device->quirk = quirk;
+	device->dev = dev;
+	device->type = type;
+	device->handle = handle;
+	device->cfgopt = cfg;
+	device->dbgopt = dbg;
+	device->name = name;
+	list_add_tail(&device->head, &nv_devices);
+	device->debug = nvkm_dbgopt(device->dbgopt, "device");
+
+	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
+	if (ret)
+		goto done;
+
+	mmio_base = device->func->resource_addr(device, 0);
+	mmio_size = device->func->resource_size(device, 0);
+
 	/* identify the chipset, and determine classes of subdev/engines */
-	if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
-	    !device->card_type) {
+	if (detect) {
 		map = ioremap(mmio_base, 0x102000);
-		if (map == NULL)
-			return -ENOMEM;
+		if (ret = -ENOMEM, map == NULL)
+			goto done;
 
 		/* switch mmio to cpu's native endianness */
 #ifndef __BIG_ENDIAN
@@ -397,31 +2389,83 @@
 			device->card_type = NV_04;
 		}
 
-		switch (device->card_type) {
-		case NV_04: ret = nv04_identify(device); break;
-		case NV_10:
-		case NV_11: ret = nv10_identify(device); break;
-		case NV_20: ret = nv20_identify(device); break;
-		case NV_30: ret = nv30_identify(device); break;
-		case NV_40: ret = nv40_identify(device); break;
-		case NV_50: ret = nv50_identify(device); break;
-		case NV_C0: ret = gf100_identify(device); break;
-		case NV_E0: ret = gk104_identify(device); break;
-		case GM100: ret = gm100_identify(device); break;
+		switch (device->chipset) {
+		case 0x004: device->chip = &nv4_chipset; break;
+		case 0x005: device->chip = &nv5_chipset; break;
+		case 0x010: device->chip = &nv10_chipset; break;
+		case 0x011: device->chip = &nv11_chipset; break;
+		case 0x015: device->chip = &nv15_chipset; break;
+		case 0x017: device->chip = &nv17_chipset; break;
+		case 0x018: device->chip = &nv18_chipset; break;
+		case 0x01a: device->chip = &nv1a_chipset; break;
+		case 0x01f: device->chip = &nv1f_chipset; break;
+		case 0x020: device->chip = &nv20_chipset; break;
+		case 0x025: device->chip = &nv25_chipset; break;
+		case 0x028: device->chip = &nv28_chipset; break;
+		case 0x02a: device->chip = &nv2a_chipset; break;
+		case 0x030: device->chip = &nv30_chipset; break;
+		case 0x031: device->chip = &nv31_chipset; break;
+		case 0x034: device->chip = &nv34_chipset; break;
+		case 0x035: device->chip = &nv35_chipset; break;
+		case 0x036: device->chip = &nv36_chipset; break;
+		case 0x040: device->chip = &nv40_chipset; break;
+		case 0x041: device->chip = &nv41_chipset; break;
+		case 0x042: device->chip = &nv42_chipset; break;
+		case 0x043: device->chip = &nv43_chipset; break;
+		case 0x044: device->chip = &nv44_chipset; break;
+		case 0x045: device->chip = &nv45_chipset; break;
+		case 0x046: device->chip = &nv46_chipset; break;
+		case 0x047: device->chip = &nv47_chipset; break;
+		case 0x049: device->chip = &nv49_chipset; break;
+		case 0x04a: device->chip = &nv4a_chipset; break;
+		case 0x04b: device->chip = &nv4b_chipset; break;
+		case 0x04c: device->chip = &nv4c_chipset; break;
+		case 0x04e: device->chip = &nv4e_chipset; break;
+		case 0x050: device->chip = &nv50_chipset; break;
+		case 0x063: device->chip = &nv63_chipset; break;
+		case 0x067: device->chip = &nv67_chipset; break;
+		case 0x068: device->chip = &nv68_chipset; break;
+		case 0x084: device->chip = &nv84_chipset; break;
+		case 0x086: device->chip = &nv86_chipset; break;
+		case 0x092: device->chip = &nv92_chipset; break;
+		case 0x094: device->chip = &nv94_chipset; break;
+		case 0x096: device->chip = &nv96_chipset; break;
+		case 0x098: device->chip = &nv98_chipset; break;
+		case 0x0a0: device->chip = &nva0_chipset; break;
+		case 0x0a3: device->chip = &nva3_chipset; break;
+		case 0x0a5: device->chip = &nva5_chipset; break;
+		case 0x0a8: device->chip = &nva8_chipset; break;
+		case 0x0aa: device->chip = &nvaa_chipset; break;
+		case 0x0ac: device->chip = &nvac_chipset; break;
+		case 0x0af: device->chip = &nvaf_chipset; break;
+		case 0x0c0: device->chip = &nvc0_chipset; break;
+		case 0x0c1: device->chip = &nvc1_chipset; break;
+		case 0x0c3: device->chip = &nvc3_chipset; break;
+		case 0x0c4: device->chip = &nvc4_chipset; break;
+		case 0x0c8: device->chip = &nvc8_chipset; break;
+		case 0x0ce: device->chip = &nvce_chipset; break;
+		case 0x0cf: device->chip = &nvcf_chipset; break;
+		case 0x0d7: device->chip = &nvd7_chipset; break;
+		case 0x0d9: device->chip = &nvd9_chipset; break;
+		case 0x0e4: device->chip = &nve4_chipset; break;
+		case 0x0e6: device->chip = &nve6_chipset; break;
+		case 0x0e7: device->chip = &nve7_chipset; break;
+		case 0x0ea: device->chip = &nvea_chipset; break;
+		case 0x0f0: device->chip = &nvf0_chipset; break;
+		case 0x0f1: device->chip = &nvf1_chipset; break;
+		case 0x106: device->chip = &nv106_chipset; break;
+		case 0x108: device->chip = &nv108_chipset; break;
+		case 0x117: device->chip = &nv117_chipset; break;
+		case 0x124: device->chip = &nv124_chipset; break;
+		case 0x126: device->chip = &nv126_chipset; break;
+		case 0x12b: device->chip = &nv12b_chipset; break;
 		default:
-			ret = -EINVAL;
-			break;
+			nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+			goto done;
 		}
 
-		if (ret) {
-			nv_error(device, "unknown chipset, 0x%08x\n", boot0);
-			return ret;
-		}
-
-		nv_info(device, "BOOT0  : 0x%08x\n", boot0);
-		nv_info(device, "Chipset: %s (NV%02X)\n",
-			device->cname, device->chipset);
-		nv_info(device, "Family : NV%02X\n", device->card_type);
+		nvdev_info(device, "NVIDIA %s (%08x)\n",
+			   device->chip->name, boot0);
 
 		/* determine frequency of timing crystal */
 		if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
@@ -436,300 +2480,89 @@
 		case 0x00400000: device->crystal = 27000; break;
 		case 0x00400040: device->crystal = 25000; break;
 		}
-
-		nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
-	} else
-	if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
-		device->cname = "NULL";
-		device->oclass[NVDEV_SUBDEV_VBIOS] = &nvkm_bios_oclass;
+	} else {
+		device->chip = &null_chipset;
 	}
 
-	if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
-	    !nv_subdev(device)->mmio) {
-		nv_subdev(device)->mmio  = ioremap(mmio_base, mmio_size);
-		if (!nv_subdev(device)->mmio) {
-			nv_error(device, "unable to map device registers\n");
+	if (!device->name)
+		device->name = device->chip->name;
+
+	if (mmio) {
+		device->pri = ioremap(mmio_base, mmio_size);
+		if (!device->pri) {
+			nvdev_error(device, "unable to map PRI\n");
 			return -ENOMEM;
 		}
 	}
 
-	/* ensure requested subsystems are available for use */
-	for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
-		if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
-			continue;
+	mutex_init(&device->mutex);
 
-		if (device->subdev[i]) {
-			nvkm_object_ref(device->subdev[i], &devobj->subdev[i]);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+#define _(s,m) case s:                                                         \
+	if (device->chip->m && (subdev_mask & (1ULL << (s)))) {                \
+		ret = device->chip->m(device, (s), &device->m);                \
+		if (ret) {                                                     \
+			subdev = nvkm_device_subdev(device, (s));              \
+			nvkm_subdev_del(&subdev);                              \
+			device->m = NULL;                                      \
+			if (ret != -ENODEV) {                                  \
+				nvdev_error(device, "%s ctor failed, %d\n",    \
+					    nvkm_subdev_name[s], ret);         \
+				goto done;                                     \
+			}                                                      \
+		}                                                              \
+	}                                                                      \
+	break
+		switch (i) {
+		_(NVKM_SUBDEV_BAR    ,     bar);
+		_(NVKM_SUBDEV_VBIOS  ,    bios);
+		_(NVKM_SUBDEV_BUS    ,     bus);
+		_(NVKM_SUBDEV_CLK    ,     clk);
+		_(NVKM_SUBDEV_DEVINIT, devinit);
+		_(NVKM_SUBDEV_FB     ,      fb);
+		_(NVKM_SUBDEV_FUSE   ,    fuse);
+		_(NVKM_SUBDEV_GPIO   ,    gpio);
+		_(NVKM_SUBDEV_I2C    ,     i2c);
+		_(NVKM_SUBDEV_IBUS   ,    ibus);
+		_(NVKM_SUBDEV_INSTMEM,    imem);
+		_(NVKM_SUBDEV_LTC    ,     ltc);
+		_(NVKM_SUBDEV_MC     ,      mc);
+		_(NVKM_SUBDEV_MMU    ,     mmu);
+		_(NVKM_SUBDEV_MXM    ,     mxm);
+		_(NVKM_SUBDEV_PCI    ,     pci);
+		_(NVKM_SUBDEV_PMU    ,     pmu);
+		_(NVKM_SUBDEV_THERM  ,   therm);
+		_(NVKM_SUBDEV_TIMER  ,   timer);
+		_(NVKM_SUBDEV_VOLT   ,    volt);
+		_(NVKM_ENGINE_BSP    ,     bsp);
+		_(NVKM_ENGINE_CE0    ,   ce[0]);
+		_(NVKM_ENGINE_CE1    ,   ce[1]);
+		_(NVKM_ENGINE_CE2    ,   ce[2]);
+		_(NVKM_ENGINE_CIPHER ,  cipher);
+		_(NVKM_ENGINE_DISP   ,    disp);
+		_(NVKM_ENGINE_DMAOBJ ,     dma);
+		_(NVKM_ENGINE_FIFO   ,    fifo);
+		_(NVKM_ENGINE_GR     ,      gr);
+		_(NVKM_ENGINE_IFB    ,     ifb);
+		_(NVKM_ENGINE_ME     ,      me);
+		_(NVKM_ENGINE_MPEG   ,    mpeg);
+		_(NVKM_ENGINE_MSENC  ,   msenc);
+		_(NVKM_ENGINE_MSPDEC ,  mspdec);
+		_(NVKM_ENGINE_MSPPP  ,   msppp);
+		_(NVKM_ENGINE_MSVLD  ,   msvld);
+		_(NVKM_ENGINE_PM     ,      pm);
+		_(NVKM_ENGINE_SEC    ,     sec);
+		_(NVKM_ENGINE_SW     ,      sw);
+		_(NVKM_ENGINE_VIC    ,     vic);
+		_(NVKM_ENGINE_VP     ,      vp);
+		default:
+			WARN_ON(1);
 			continue;
 		}
-
-		ret = nvkm_object_ctor(nv_object(device), NULL, oclass,
-				       NULL, i, &devobj->subdev[i]);
-		if (ret == -ENODEV)
-			continue;
-		if (ret)
-			return ret;
-
-		device->subdev[i] = devobj->subdev[i];
-
-		/* note: can't init *any* subdevs until devinit has been run
-		 * due to not knowing exactly what the vbios init tables will
-		 * mess with.  devinit also can't be run until all of its
-		 * dependencies have been created.
-		 *
-		 * this code delays init of any subdev until all of devinit's
-		 * dependencies have been created, and then initialises each
-		 * subdev in turn as they're created.
-		 */
-		while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
-			struct nvkm_object *subdev = devobj->subdev[c++];
-			if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret)
-					return ret;
-				atomic_dec(&nv_object(device)->usecount);
-			} else
-			if (subdev) {
-				nvkm_subdev_reset(subdev);
-			}
-		}
-	}
-
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nvkm_devobj_ofuncs = {
-	.ctor = nvkm_devobj_ctor,
-	.dtor = nvkm_devobj_dtor,
-	.init = _nvkm_parent_init,
-	.fini = _nvkm_parent_fini,
-	.mthd = nvkm_devobj_mthd,
-};
-
-/******************************************************************************
- * nvkm_device: engine functions
- *****************************************************************************/
-
-struct nvkm_device *
-nv_device(void *obj)
-{
-	struct nvkm_object *device = nv_object(obj);
-	if (device->engine == NULL) {
-		while (device && device->parent)
-			device = device->parent;
-	} else {
-		device = &nv_object(obj)->engine->subdev.object;
-		if (device && device->parent)
-			device = device->parent;
-	}
-#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
-	if (unlikely(!device))
-		nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj));
-#endif
-	return (void *)device;
-}
-
-static struct nvkm_oclass
-nvkm_device_sclass[] = {
-	{ 0x0080, &nvkm_devobj_ofuncs },
-	{}
-};
-
-static int
-nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
-		       struct nvkm_notify *notify)
-{
-	if (!WARN_ON(size != 0)) {
-		notify->size  = 0;
-		notify->types = 1;
-		notify->index = 0;
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static const struct nvkm_event_func
-nvkm_device_event_func = {
-	.ctor = nvkm_device_event_ctor,
-};
-
-static int
-nvkm_device_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_device *device = (void *)object;
-	struct nvkm_object *subdev;
-	int ret, i;
-
-	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_dec(subdev, suspend);
-				if (ret && suspend)
-					goto fail;
-			}
-		}
-	}
-
-	ret = nvkm_acpi_fini(device, suspend);
-fail:
-	for (; ret && i < NVDEV_SUBDEV_NR; i++) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret) {
-					/* XXX */
-				}
-			}
-		}
-	}
-
-	return ret;
-}
-
-static int
-nvkm_device_init(struct nvkm_object *object)
-{
-	struct nvkm_device *device = (void *)object;
-	struct nvkm_object *subdev;
-	int ret, i = 0;
-
-	ret = nvkm_acpi_init(device);
-	if (ret)
-		goto fail;
-
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
-				ret = nvkm_object_inc(subdev);
-				if (ret)
-					goto fail;
-			} else {
-				nvkm_subdev_reset(subdev);
-			}
-		}
+#undef _
 	}
 
 	ret = 0;
-fail:
-	for (--i; ret && i >= 0; i--) {
-		if ((subdev = device->subdev[i])) {
-			if (!nv_iclass(subdev, NV_ENGINE_CLASS))
-				nvkm_object_dec(subdev, false);
-		}
-	}
-
-	if (ret)
-		nvkm_acpi_fini(device, false);
-	return ret;
-}
-
-static void
-nvkm_device_dtor(struct nvkm_object *object)
-{
-	struct nvkm_device *device = (void *)object;
-
-	nvkm_event_fini(&device->event);
-
-	mutex_lock(&nv_devices_mutex);
-	list_del(&device->head);
-	mutex_unlock(&nv_devices_mutex);
-
-	if (nv_subdev(device)->mmio)
-		iounmap(nv_subdev(device)->mmio);
-
-	nvkm_engine_destroy(&device->engine);
-}
-
-resource_size_t
-nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
-{
-	if (nv_device_is_pci(device)) {
-		return pci_resource_start(device->pdev, bar);
-	} else {
-		struct resource *res;
-		res = platform_get_resource(device->platformdev,
-					    IORESOURCE_MEM, bar);
-		if (!res)
-			return 0;
-		return res->start;
-	}
-}
-
-resource_size_t
-nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
-{
-	if (nv_device_is_pci(device)) {
-		return pci_resource_len(device->pdev, bar);
-	} else {
-		struct resource *res;
-		res = platform_get_resource(device->platformdev,
-					    IORESOURCE_MEM, bar);
-		if (!res)
-			return 0;
-		return resource_size(res);
-	}
-}
-
-int
-nv_device_get_irq(struct nvkm_device *device, bool stall)
-{
-	if (nv_device_is_pci(device)) {
-		return device->pdev->irq;
-	} else {
-		return platform_get_irq_byname(device->platformdev,
-					       stall ? "stall" : "nonstall");
-	}
-}
-
-static struct nvkm_oclass
-nvkm_device_oclass = {
-	.handle = NV_ENGINE(DEVICE, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.dtor = nvkm_device_dtor,
-		.init = nvkm_device_init,
-		.fini = nvkm_device_fini,
-	},
-};
-
-int
-nvkm_device_create_(void *dev, enum nv_bus_type type, u64 name,
-		    const char *sname, const char *cfg, const char *dbg,
-		    int length, void **pobject)
-{
-	struct nvkm_device *device;
-	int ret = -EEXIST;
-
-	mutex_lock(&nv_devices_mutex);
-	list_for_each_entry(device, &nv_devices, head) {
-		if (device->handle == name)
-			goto done;
-	}
-
-	ret = nvkm_engine_create_(NULL, NULL, &nvkm_device_oclass, true,
-				  "DEVICE", "device", length, pobject);
-	device = *pobject;
-	if (ret)
-		goto done;
-
-	switch (type) {
-	case NVKM_BUS_PCI:
-		device->pdev = dev;
-		break;
-	case NVKM_BUS_PLATFORM:
-		device->platformdev = dev;
-		break;
-	}
-	device->handle = name;
-	device->cfgopt = cfg;
-	device->dbgopt = dbg;
-	device->name = sname;
-
-	nv_subdev(device)->debug = nvkm_dbgopt(device->dbgopt, "DEVICE");
-	nv_engine(device)->sclass = nvkm_device_sclass;
-	list_add(&device->head, &nv_devices);
-
-	ret = nvkm_event_init(&nvkm_device_event_func, 1, 1, &device->event);
 done:
 	mutex_unlock(&nv_devices_mutex);
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index 0b794b1..cf8bc06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
+#include "ctrl.h"
 
 #include <core/client.h>
 #include <subdev/clk.h>
@@ -31,18 +31,18 @@
 #include <nvif/unpack.h>
 
 static int
-nvkm_control_mthd_pstate_info(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_info_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
+	struct nvkm_clk *clk = ctrl->device->clk;
 	int ret;
 
-	nv_ioctl(object, "control pstate info size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate info size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate info vers %d\n",
-			 args->v0.version);
+		nvif_ioctl(&ctrl->object, "control pstate info vers %d\n",
+			   args->v0.version);
 	} else
 		return ret;
 
@@ -64,24 +64,24 @@
 }
 
 static int
-nvkm_control_mthd_pstate_attr(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_attr(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_attr_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
-	struct nvkm_domain *domain;
+	struct nvkm_clk *clk = ctrl->device->clk;
+	const struct nvkm_domain *domain;
 	struct nvkm_pstate *pstate;
 	struct nvkm_cstate *cstate;
 	int i = 0, j = -1;
 	u32 lo, hi;
 	int ret;
 
-	nv_ioctl(object, "control pstate attr size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate attr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate attr vers %d state %d "
-				 "index %d\n",
-			 args->v0.version, args->v0.state, args->v0.index);
+		nvif_ioctl(&ctrl->object,
+			   "control pstate attr vers %d state %d index %d\n",
+			   args->v0.version, args->v0.state, args->v0.index);
 		if (!clk)
 			return -ENODEV;
 		if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
@@ -116,7 +116,7 @@
 
 		args->v0.state = pstate->pstate;
 	} else {
-		lo = max(clk->read(clk, domain->name), 0);
+		lo = max(nvkm_clk_read(clk, domain->name), 0);
 		hi = lo;
 	}
 
@@ -137,19 +137,19 @@
 }
 
 static int
-nvkm_control_mthd_pstate_user(struct nvkm_object *object, void *data, u32 size)
+nvkm_control_mthd_pstate_user(struct nvkm_control *ctrl, void *data, u32 size)
 {
 	union {
 		struct nvif_control_pstate_user_v0 v0;
 	} *args = data;
-	struct nvkm_clk *clk = nvkm_clk(object);
+	struct nvkm_clk *clk = ctrl->device->clk;
 	int ret;
 
-	nv_ioctl(object, "control pstate user size %d\n", size);
+	nvif_ioctl(&ctrl->object, "control pstate user size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "control pstate user vers %d ustate %d "
-				 "pwrsrc %d\n", args->v0.version,
-			 args->v0.ustate, args->v0.pwrsrc);
+		nvif_ioctl(&ctrl->object,
+			   "control pstate user vers %d ustate %d pwrsrc %d\n",
+			   args->v0.version, args->v0.ustate, args->v0.pwrsrc);
 		if (!clk)
 			return -ENODEV;
 	} else
@@ -168,32 +168,44 @@
 static int
 nvkm_control_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
+	struct nvkm_control *ctrl = nvkm_control(object);
 	switch (mthd) {
 	case NVIF_CONTROL_PSTATE_INFO:
-		return nvkm_control_mthd_pstate_info(object, data, size);
+		return nvkm_control_mthd_pstate_info(ctrl, data, size);
 	case NVIF_CONTROL_PSTATE_ATTR:
-		return nvkm_control_mthd_pstate_attr(object, data, size);
+		return nvkm_control_mthd_pstate_attr(ctrl, data, size);
 	case NVIF_CONTROL_PSTATE_USER:
-		return nvkm_control_mthd_pstate_user(object, data, size);
+		return nvkm_control_mthd_pstate_user(ctrl, data, size);
 	default:
 		break;
 	}
 	return -EINVAL;
 }
 
-static struct nvkm_ofuncs
-nvkm_control_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+static const struct nvkm_object_func
+nvkm_control = {
 	.mthd = nvkm_control_mthd,
 };
 
-struct nvkm_oclass
-nvkm_control_oclass[] = {
-	{ .handle = NVIF_IOCTL_NEW_V0_CONTROL,
-	  .ofuncs = &nvkm_control_ofuncs
-	},
-	{}
+static int
+nvkm_control_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_control *ctrl;
+
+	if (!(ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &ctrl->object;
+	ctrl->device = device;
+
+	nvkm_object_ctor(&nvkm_control, oclass, &ctrl->object);
+	return 0;
+}
+
+const struct nvkm_device_oclass
+nvkm_control_oclass = {
+	.base.oclass = NVIF_IOCTL_NEW_V0_CONTROL,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nvkm_control_new,
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
new file mode 100644
index 0000000..20249d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -0,0 +1,12 @@
+#ifndef __NVKM_DEVICE_CTRL_H__
+#define __NVKM_DEVICE_CTRL_H__
+#define nvkm_control(p) container_of((p), struct nvkm_control, object)
+#include <core/device.h>
+
+struct nvkm_control {
+	struct nvkm_object object;
+	struct nvkm_device *device;
+};
+
+extern const struct nvkm_device_oclass nvkm_control_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
deleted file mode 100644
index 82b38d7..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gf100.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mspdec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-gf100_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0xc0:
-		device->cname = "GF100";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf100_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc4:
-		device->cname = "GF104";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc3:
-		device->cname = "GF106";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xce:
-		device->cname = "GF114";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xcf:
-		device->cname = "GF116";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc1:
-		device->cname = "GF108";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf108_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xc8:
-		device->cname = "GF110";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf100_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf100_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf100_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf110_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gf100_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xd9:
-		device->cname = "GF119";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gf110_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf119_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gf110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	case 0xd7:
-		device->cname = "GF117";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gf110_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf117_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gf100_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gf100_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gf100_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gf100_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gf100_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gf117_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gf100_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gf100_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gf100_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gf110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gf100_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Fermi chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
deleted file mode 100644
index 6a9483f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gk104.c
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gk104_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0xe4:
-		device->cname = "GK104";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk104_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xe7:
-		device->cname = "GK107";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gf110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xe6:
-		device->cname = "GK106";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk104_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk104_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk104_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		break;
-	case 0xea:
-		device->cname = "GK20A";
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk20a_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk20a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk20a_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] = gk20a_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gk20a_bar_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk20a_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk20a_gr_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk104_pm_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &gk20a_volt_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk20a_pmu_oclass;
-		break;
-	case 0xf0:
-		device->cname = "GK110";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk110_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk110_pm_oclass;
-		break;
-	case 0xf1:
-		device->cname = "GK110B";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gf106_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk110_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk104_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk110b_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] = &gk110_pm_oclass;
-		break;
-	case 0x106:
-		device->cname = "GK208B";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk208_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		break;
-	case 0x108:
-		device->cname = "GK208";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gk104_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gf100_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gf110_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gf100_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gk104_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gk104_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gk208_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gk110_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Kepler chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
deleted file mode 100644
index 70abf1e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/ibus.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-#include <engine/ce.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/pm.h>
-
-int
-gm100_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x117:
-		device->cname = "GM107";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gf110_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm107_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gk208_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm107_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm107_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gk104_ce0_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gk104_ce1_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gk104_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	case 0x124:
-		device->cname = "GM204";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-#if 0
-		/* looks to be some non-trivial changes */
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		/* priv ring says no to 0x10eb14 writes */
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-#endif
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gm204_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm204_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gm204_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gm204_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gm204_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	case 0x126:
-		device->cname = "GM206";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  gk104_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  gm204_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] = &gm107_fuse_oclass;
-#if 0
-		/* looks to be some non-trivial changes */
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gk104_clk_oclass;
-		/* priv ring says no to 0x10eb14 writes */
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gm107_therm_oclass;
-#endif
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gm204_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  gk20a_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  gf100_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &gk20a_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gm107_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_LTC    ] =  gm107_ltc_oclass;
-		device->oclass[NVDEV_SUBDEV_IBUS   ] = &gk104_ibus_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &gf100_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &gf100_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gk208_pmu_oclass;
-#if 0
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-#endif
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  gf110_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  gm204_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  gf100_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] =  gm206_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gm204_disp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gm204_ce0_oclass;
-		device->oclass[NVDEV_ENGINE_CE1    ] = &gm204_ce1_oclass;
-		device->oclass[NVDEV_ENGINE_CE2    ] = &gm204_ce2_oclass;
-#if 0
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &gk104_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &gf100_msppp_oclass;
-#endif
-		break;
-	default:
-		nv_fatal(device, "unknown Maxwell chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
deleted file mode 100644
index 5a2ae04..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv04.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv04_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x04:
-		device->cname = "NV04";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv04_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x05:
-		device->cname = "NV05";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv05_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv04_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv04_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv04_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown RIVA chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
deleted file mode 100644
index 94a1ca4..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv10.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv10_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x10:
-		device->cname = "NV10";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x15:
-		device->cname = "NV15";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x16:
-		device->cname = "NV16";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x1a:
-		device->cname = "nForce";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x11:
-		device->cname = "NV11";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv10_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x17:
-		device->cname = "NV17";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x1f:
-		device->cname = "nForce2";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv1a_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x18:
-		device->cname = "NV18";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Celsius chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
deleted file mode 100644
index d5ec893..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv20.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/disp.h>
-
-int
-nv20_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x20:
-		device->cname = "NV20";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv20_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x25:
-		device->cname = "NV25";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x28:
-		device->cname = "NV28";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x2a:
-		device->cname = "NV2A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv25_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Kelvin chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
deleted file mode 100644
index dda0962..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv30.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-
-int
-nv30_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x30:
-		device->cname = "NV30";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x35:
-		device->cname = "NV35";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv04_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv35_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_gr_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x31:
-		device->cname = "NV31";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv30_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x36:
-		device->cname = "NV36";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv20_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv36_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	case 0x34:
-		device->cname = "NV34";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv04_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv10_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv04_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv10_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv04_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv17_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Rankine chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
deleted file mode 100644
index c630136..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv40.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/mmu.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv40_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x40:
-		device->cname = "NV40";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x41:
-		device->cname = "NV41";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x42:
-		device->cname = "NV42";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x43:
-		device->cname = "NV43";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv41_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x45:
-		device->cname = "NV45";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv40_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv04_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x47:
-		device->cname = "G70";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv47_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x49:
-		device->cname = "G71";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4b:
-		device->cname = "G73";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv40_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv49_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv41_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x44:
-		device->cname = "NV44";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x46:
-		device->cname = "G72";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4a:
-		device->cname = "NV44A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv44_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv44_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4c:
-		device->cname = "C61";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x4e:
-		device->cname = "C51";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv4e_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv4e_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x63:
-		device->cname = "C73";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x67:
-		device->cname = "C67";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	case 0x68:
-		device->cname = "C68";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv10_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv04_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &nv40_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv1a_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv4c_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv31_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv46_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv40_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv44_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv04_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv40_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv10_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv44_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv04_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv40_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Curie chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
deleted file mode 100644
index 249b844..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/nv50.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <subdev/bios.h>
-#include <subdev/bus.h>
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-#include <subdev/fuse.h>
-#include <subdev/clk.h>
-#include <subdev/therm.h>
-#include <subdev/mxm.h>
-#include <subdev/devinit.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
-#include <subdev/pmu.h>
-#include <subdev/volt.h>
-
-#include <engine/dmaobj.h>
-#include <engine/fifo.h>
-#include <engine/sw.h>
-#include <engine/gr.h>
-#include <engine/mpeg.h>
-#include <engine/vp.h>
-#include <engine/cipher.h>
-#include <engine/sec.h>
-#include <engine/bsp.h>
-#include <engine/msvld.h>
-#include <engine/mspdec.h>
-#include <engine/msppp.h>
-#include <engine/ce.h>
-#include <engine/disp.h>
-#include <engine/pm.h>
-
-int
-nv50_identify(struct nvkm_device *device)
-{
-	switch (device->chipset) {
-	case 0x50:
-		device->cname = "G80";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  nv50_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  nv50_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  nv50_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  nv50_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  nv50_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  nv50_pm_oclass;
-		break;
-	case 0x84:
-		device->cname = "G84";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x86:
-		device->cname = "G86";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x92:
-		device->cname = "G92";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  nv50_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  nv50_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  nv50_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g84_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x94:
-		device->cname = "G94";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g94_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x96:
-		device->cname = "G96";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g94_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0x98:
-		device->cname = "G98";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xa0:
-		device->cname = "G200";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  nv50_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  g84_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g84_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  g84_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_VP     ] = &g84_vp_oclass;
-		device->oclass[NVDEV_ENGINE_CIPHER ] = &g84_cipher_oclass;
-		device->oclass[NVDEV_ENGINE_BSP    ] = &g84_bsp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt200_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xaa:
-		device->cname = "MCP77/MCP78";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  mcp77_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp77_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xac:
-		device->cname = "MCP79/MCP7A";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] =  mcp77_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &g84_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  g98_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp77_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_SEC    ] = &g98_sec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  g94_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  g84_pm_oclass;
-		break;
-	case 0xa3:
-		device->cname = "GT215";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MPEG   ] = &g84_mpeg_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xa5:
-		device->cname = "GT216";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xa8:
-		device->cname = "GT218";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  gt215_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  gt215_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	case 0xaf:
-		device->cname = "MCP89";
-		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nvkm_bios_oclass;
-		device->oclass[NVDEV_SUBDEV_GPIO   ] =  g94_gpio_oclass;
-		device->oclass[NVDEV_SUBDEV_I2C    ] =  g94_i2c_oclass;
-		device->oclass[NVDEV_SUBDEV_FUSE   ] =  &nv50_fuse_oclass;
-		device->oclass[NVDEV_SUBDEV_CLK    ] = &gt215_clk_oclass;
-		device->oclass[NVDEV_SUBDEV_THERM  ] = &gt215_therm_oclass;
-		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
-		device->oclass[NVDEV_SUBDEV_DEVINIT] =  mcp89_devinit_oclass;
-		device->oclass[NVDEV_SUBDEV_MC     ] =  g98_mc_oclass;
-		device->oclass[NVDEV_SUBDEV_BUS    ] =  g94_bus_oclass;
-		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-		device->oclass[NVDEV_SUBDEV_FB     ] =  mcp89_fb_oclass;
-		device->oclass[NVDEV_SUBDEV_INSTMEM] =  nv50_instmem_oclass;
-		device->oclass[NVDEV_SUBDEV_MMU    ] = &nv50_mmu_oclass;
-		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
-		device->oclass[NVDEV_SUBDEV_PMU    ] =  gt215_pmu_oclass;
-		device->oclass[NVDEV_SUBDEV_VOLT   ] = &nv40_volt_oclass;
-		device->oclass[NVDEV_ENGINE_DMAOBJ ] =  nv50_dmaeng_oclass;
-		device->oclass[NVDEV_ENGINE_FIFO   ] =  g84_fifo_oclass;
-		device->oclass[NVDEV_ENGINE_SW     ] =  nv50_sw_oclass;
-		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_gr_oclass;
-		device->oclass[NVDEV_ENGINE_MSPDEC ] = &g98_mspdec_oclass;
-		device->oclass[NVDEV_ENGINE_MSVLD  ] = &g98_msvld_oclass;
-		device->oclass[NVDEV_ENGINE_MSPPP  ] = &g98_msppp_oclass;
-		device->oclass[NVDEV_ENGINE_CE0    ] = &gt215_ce_oclass;
-		device->oclass[NVDEV_ENGINE_DISP   ] =  gt215_disp_oclass;
-		device->oclass[NVDEV_ENGINE_PM     ] =  gt215_pm_oclass;
-		break;
-	default:
-		nv_fatal(device, "unknown Tesla chipset\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
new file mode 100644
index 0000000..9dd1cac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -0,0 +1,1685 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include <core/pci.h>
+#include "priv.h"
+
+struct nvkm_device_pci_device {
+	u16 device;
+	const char *name;
+	const struct nvkm_device_pci_vendor *vendor;
+};
+
+struct nvkm_device_pci_vendor {
+	u16 vendor;
+	u16 device;
+	const char *name;
+	const struct nvkm_device_quirk quirk;
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0189[] = {
+	/* Apple iMac G4 NV18 */
+	{ 0x10de, 0x0010, NULL, { .tv_gpio = 4 } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_01f0[] = {
+	/* MSI nForce2 IGP */
+	{ 0x1462, 0x5710, NULL, { .tv_pin_mask = 0xc } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0322[] = {
+	/* Zotac FX5200 */
+	{ 0x19da, 0x1035, NULL, { .tv_pin_mask = 0xc } },
+	{ 0x19da, 0x2035, NULL, { .tv_pin_mask = 0xc } },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_05e7[] = {
+	{ 0x10de, 0x0595, "Tesla T10 Processor" },
+	{ 0x10de, 0x068f, "Tesla T10 Processor" },
+	{ 0x10de, 0x0697, "Tesla M1060" },
+	{ 0x10de, 0x0714, "Tesla M1060" },
+	{ 0x10de, 0x0743, "Tesla M1060" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0609[] = {
+	{ 0x106b, 0x00a7, "GeForce 8800 GS" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_062e[] = {
+	{ 0x106b, 0x0605, "GeForce GT 130" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0649[] = {
+	{ 0x1043, 0x202d, "GeForce GT 220M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0652[] = {
+	{ 0x152d, 0x0850, "GeForce GT 240M LE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0654[] = {
+	{ 0x1043, 0x14a2, "GeForce GT 320M" },
+	{ 0x1043, 0x14d2, "GeForce GT 320M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0655[] = {
+	{ 0x106b, 0x0633, "GeForce GT 120" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0656[] = {
+	{ 0x106b, 0x0693, "GeForce GT 120" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d1[] = {
+	{ 0x10de, 0x0771, "Tesla C2050" },
+	{ 0x10de, 0x0772, "Tesla C2070" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06d2[] = {
+	{ 0x10de, 0x088f, "Tesla X2070" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06de[] = {
+	{ 0x10de, 0x0773, "Tesla S2050" },
+	{ 0x10de, 0x082f, "Tesla M2050" },
+	{ 0x10de, 0x0840, "Tesla X2070" },
+	{ 0x10de, 0x0842, "Tesla M2050" },
+	{ 0x10de, 0x0846, "Tesla M2050" },
+	{ 0x10de, 0x0866, "Tesla M2050" },
+	{ 0x10de, 0x0907, "Tesla M2050" },
+	{ 0x10de, 0x091e, "Tesla M2050" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06e8[] = {
+	{ 0x103c, 0x360b, "GeForce 9200M GE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06f9[] = {
+	{ 0x10de, 0x060d, "Quadro FX 370 Low Profile" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_06ff[] = {
+	{ 0x10de, 0x0711, "HICx8 + Graphics" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0866[] = {
+	{ 0x106b, 0x00b1, "GeForce 9400M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0872[] = {
+	{ 0x1043, 0x1c42, "GeForce G205M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0873[] = {
+	{ 0x1043, 0x1c52, "GeForce G205M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a6e[] = {
+	{ 0x17aa, 0x3607, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a70[] = {
+	{ 0x17aa, 0x3605, "Second Generation ION" },
+	{ 0x17aa, 0x3617, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a73[] = {
+	{ 0x17aa, 0x3607, "Second Generation ION" },
+	{ 0x17aa, 0x3610, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a74[] = {
+	{ 0x17aa, 0x903a, "GeForce G210" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a75[] = {
+	{ 0x17aa, 0x3605, "Second Generation ION" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0a7a[] = {
+	{ 0x1462, 0xaa51, "GeForce 405" },
+	{ 0x1462, 0xaa58, "GeForce 405" },
+	{ 0x1462, 0xac71, "GeForce 405" },
+	{ 0x1462, 0xac82, "GeForce 405" },
+	{ 0x1642, 0x3980, "GeForce 405" },
+	{ 0x17aa, 0x3950, "GeForce 405M" },
+	{ 0x17aa, 0x397d, "GeForce 405M" },
+	{ 0x1b0a, 0x90b4, "GeForce 405" },
+	{ 0x1bfd, 0x0003, "GeForce 405" },
+	{ 0x1bfd, 0x8006, "GeForce 405" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dd8[] = {
+	{ 0x10de, 0x0914, "Quadro 2000D" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0de9[] = {
+	{ 0x1025, 0x0692, "GeForce GT 620M" },
+	{ 0x1025, 0x0725, "GeForce GT 620M" },
+	{ 0x1025, 0x0728, "GeForce GT 620M" },
+	{ 0x1025, 0x072b, "GeForce GT 620M" },
+	{ 0x1025, 0x072e, "GeForce GT 620M" },
+	{ 0x1025, 0x0753, "GeForce GT 620M" },
+	{ 0x1025, 0x0754, "GeForce GT 620M" },
+	{ 0x17aa, 0x3977, "GeForce GT 640M LE" },
+	{ 0x1b0a, 0x2210, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0dea[] = {
+	{ 0x17aa, 0x365a, "GeForce 615" },
+	{ 0x17aa, 0x365b, "GeForce 615" },
+	{ 0x17aa, 0x365e, "GeForce 615" },
+	{ 0x17aa, 0x3660, "GeForce 615" },
+	{ 0x17aa, 0x366c, "GeForce 615" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0df4[] = {
+	{ 0x152d, 0x0952, "GeForce GT 630M" },
+	{ 0x152d, 0x0953, "GeForce GT 630M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fd2[] = {
+	{ 0x1028, 0x0595, "GeForce GT 640M LE" },
+	{ 0x1028, 0x05b2, "GeForce GT 640M LE" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_0fe3[] = {
+	{ 0x103c, 0x2b16, "GeForce GT 745A" },
+	{ 0x17aa, 0x3675, "GeForce GT 745A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_104b[] = {
+	{ 0x1043, 0x844c, "GeForce GT 625" },
+	{ 0x1043, 0x846b, "GeForce GT 625" },
+	{ 0x1462, 0xb590, "GeForce GT 625" },
+	{ 0x174b, 0x0625, "GeForce GT 625" },
+	{ 0x174b, 0xa625, "GeForce GT 625" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1058[] = {
+	{ 0x103c, 0x2af1, "GeForce 610" },
+	{ 0x17aa, 0x3682, "GeForce 800A" },
+	{ 0x17aa, 0x3692, "GeForce 705A" },
+	{ 0x17aa, 0x3695, "GeForce 800A" },
+	{ 0x17aa, 0x36a8, "GeForce 800A" },
+	{ 0x17aa, 0x36ac, "GeForce 800A" },
+	{ 0x17aa, 0x36ad, "GeForce 800A" },
+	{ 0x705a, 0x3682, "GeForce 800A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_105b[] = {
+	{ 0x103c, 0x2afb, "GeForce 705A" },
+	{ 0x17aa, 0x36a1, "GeForce 800A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1091[] = {
+	{ 0x10de, 0x088e, "Tesla X2090" },
+	{ 0x10de, 0x0891, "Tesla X2090" },
+	{ 0x10de, 0x0974, "Tesla X2090" },
+	{ 0x10de, 0x098d, "Tesla X2090" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1096[] = {
+	{ 0x10de, 0x0911, "Tesla C2050" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1140[] = {
+	{ 0x1019, 0x999f, "GeForce GT 720M" },
+	{ 0x1025, 0x0600, "GeForce GT 620M" },
+	{ 0x1025, 0x0606, "GeForce GT 620M" },
+	{ 0x1025, 0x064a, "GeForce GT 620M" },
+	{ 0x1025, 0x064c, "GeForce GT 620M" },
+	{ 0x1025, 0x067a, "GeForce GT 620M" },
+	{ 0x1025, 0x0680, "GeForce GT 620M" },
+	{ 0x1025, 0x0686, "GeForce 710M" },
+	{ 0x1025, 0x0689, "GeForce 710M" },
+	{ 0x1025, 0x068b, "GeForce 710M" },
+	{ 0x1025, 0x068d, "GeForce 710M" },
+	{ 0x1025, 0x068e, "GeForce 710M" },
+	{ 0x1025, 0x0691, "GeForce 710M" },
+	{ 0x1025, 0x0692, "GeForce GT 620M" },
+	{ 0x1025, 0x0694, "GeForce GT 620M" },
+	{ 0x1025, 0x0702, "GeForce GT 620M" },
+	{ 0x1025, 0x0719, "GeForce GT 620M" },
+	{ 0x1025, 0x0725, "GeForce GT 620M" },
+	{ 0x1025, 0x0728, "GeForce GT 620M" },
+	{ 0x1025, 0x072b, "GeForce GT 620M" },
+	{ 0x1025, 0x072e, "GeForce GT 620M" },
+	{ 0x1025, 0x0732, "GeForce GT 620M" },
+	{ 0x1025, 0x0763, "GeForce GT 720M" },
+	{ 0x1025, 0x0773, "GeForce 710M" },
+	{ 0x1025, 0x0774, "GeForce 710M" },
+	{ 0x1025, 0x0776, "GeForce GT 720M" },
+	{ 0x1025, 0x077a, "GeForce 710M" },
+	{ 0x1025, 0x077b, "GeForce 710M" },
+	{ 0x1025, 0x077c, "GeForce 710M" },
+	{ 0x1025, 0x077d, "GeForce 710M" },
+	{ 0x1025, 0x077e, "GeForce 710M" },
+	{ 0x1025, 0x077f, "GeForce 710M" },
+	{ 0x1025, 0x0781, "GeForce GT 720M" },
+	{ 0x1025, 0x0798, "GeForce GT 720M" },
+	{ 0x1025, 0x0799, "GeForce GT 720M" },
+	{ 0x1025, 0x079b, "GeForce GT 720M" },
+	{ 0x1025, 0x079c, "GeForce GT 720M" },
+	{ 0x1025, 0x0807, "GeForce GT 720M" },
+	{ 0x1025, 0x0821, "GeForce 820M" },
+	{ 0x1025, 0x0823, "GeForce GT 720M" },
+	{ 0x1025, 0x0830, "GeForce GT 720M" },
+	{ 0x1025, 0x0833, "GeForce GT 720M" },
+	{ 0x1025, 0x0837, "GeForce GT 720M" },
+	{ 0x1025, 0x083e, "GeForce 820M" },
+	{ 0x1025, 0x0841, "GeForce 710M" },
+	{ 0x1025, 0x0853, "GeForce 820M" },
+	{ 0x1025, 0x0854, "GeForce 820M" },
+	{ 0x1025, 0x0855, "GeForce 820M" },
+	{ 0x1025, 0x0856, "GeForce 820M" },
+	{ 0x1025, 0x0857, "GeForce 820M" },
+	{ 0x1025, 0x0858, "GeForce 820M" },
+	{ 0x1025, 0x0863, "GeForce 820M" },
+	{ 0x1025, 0x0868, "GeForce 820M" },
+	{ 0x1025, 0x0869, "GeForce 810M" },
+	{ 0x1025, 0x0873, "GeForce 820M" },
+	{ 0x1025, 0x0878, "GeForce 820M" },
+	{ 0x1025, 0x087b, "GeForce 820M" },
+	{ 0x1025, 0x087f, "GeForce 820M" },
+	{ 0x1025, 0x0881, "GeForce 820M" },
+	{ 0x1025, 0x0885, "GeForce 820M" },
+	{ 0x1025, 0x088a, "GeForce 820M" },
+	{ 0x1025, 0x089b, "GeForce 820M" },
+	{ 0x1025, 0x0921, "GeForce 820M" },
+	{ 0x1025, 0x092e, "GeForce 810M" },
+	{ 0x1025, 0x092f, "GeForce 820M" },
+	{ 0x1025, 0x0932, "GeForce 820M" },
+	{ 0x1025, 0x093a, "GeForce 820M" },
+	{ 0x1025, 0x093c, "GeForce 820M" },
+	{ 0x1025, 0x093f, "GeForce 820M" },
+	{ 0x1025, 0x0941, "GeForce 820M" },
+	{ 0x1025, 0x0945, "GeForce 820M" },
+	{ 0x1025, 0x0954, "GeForce 820M" },
+	{ 0x1025, 0x0965, "GeForce 820M" },
+	{ 0x1028, 0x054d, "GeForce GT 630M" },
+	{ 0x1028, 0x054e, "GeForce GT 630M" },
+	{ 0x1028, 0x0554, "GeForce GT 620M" },
+	{ 0x1028, 0x0557, "GeForce GT 620M" },
+	{ 0x1028, 0x0562, "GeForce GT625M" },
+	{ 0x1028, 0x0565, "GeForce GT 630M" },
+	{ 0x1028, 0x0568, "GeForce GT 630M" },
+	{ 0x1028, 0x0590, "GeForce GT 630M" },
+	{ 0x1028, 0x0592, "GeForce GT625M" },
+	{ 0x1028, 0x0594, "GeForce GT625M" },
+	{ 0x1028, 0x0595, "GeForce GT625M" },
+	{ 0x1028, 0x05a2, "GeForce GT625M" },
+	{ 0x1028, 0x05b1, "GeForce GT625M" },
+	{ 0x1028, 0x05b3, "GeForce GT625M" },
+	{ 0x1028, 0x05da, "GeForce GT 630M" },
+	{ 0x1028, 0x05de, "GeForce GT 720M" },
+	{ 0x1028, 0x05e0, "GeForce GT 720M" },
+	{ 0x1028, 0x05e8, "GeForce GT 630M" },
+	{ 0x1028, 0x05f4, "GeForce GT 720M" },
+	{ 0x1028, 0x060f, "GeForce GT 720M" },
+	{ 0x1028, 0x062f, "GeForce GT 720M" },
+	{ 0x1028, 0x064e, "GeForce 820M" },
+	{ 0x1028, 0x0652, "GeForce 820M" },
+	{ 0x1028, 0x0653, "GeForce 820M" },
+	{ 0x1028, 0x0655, "GeForce 820M" },
+	{ 0x1028, 0x065e, "GeForce 820M" },
+	{ 0x1028, 0x0662, "GeForce 820M" },
+	{ 0x1028, 0x068d, "GeForce 820M" },
+	{ 0x1028, 0x06ad, "GeForce 820M" },
+	{ 0x1028, 0x06ae, "GeForce 820M" },
+	{ 0x1028, 0x06af, "GeForce 820M" },
+	{ 0x1028, 0x06b0, "GeForce 820M" },
+	{ 0x1028, 0x06c0, "GeForce 820M" },
+	{ 0x1028, 0x06c1, "GeForce 820M" },
+	{ 0x103c, 0x18ef, "GeForce GT 630M" },
+	{ 0x103c, 0x18f9, "GeForce GT 630M" },
+	{ 0x103c, 0x18fb, "GeForce GT 630M" },
+	{ 0x103c, 0x18fd, "GeForce GT 630M" },
+	{ 0x103c, 0x18ff, "GeForce GT 630M" },
+	{ 0x103c, 0x218a, "GeForce 820M" },
+	{ 0x103c, 0x21bb, "GeForce 820M" },
+	{ 0x103c, 0x21bc, "GeForce 820M" },
+	{ 0x103c, 0x220e, "GeForce 820M" },
+	{ 0x103c, 0x2210, "GeForce 820M" },
+	{ 0x103c, 0x2212, "GeForce 820M" },
+	{ 0x103c, 0x2214, "GeForce 820M" },
+	{ 0x103c, 0x2218, "GeForce 820M" },
+	{ 0x103c, 0x225b, "GeForce 820M" },
+	{ 0x103c, 0x225d, "GeForce 820M" },
+	{ 0x103c, 0x226d, "GeForce 820M" },
+	{ 0x103c, 0x226f, "GeForce 820M" },
+	{ 0x103c, 0x22d2, "GeForce 820M" },
+	{ 0x103c, 0x22d9, "GeForce 820M" },
+	{ 0x103c, 0x2335, "GeForce 820M" },
+	{ 0x103c, 0x2337, "GeForce 820M" },
+	{ 0x103c, 0x2aef, "GeForce GT 720A" },
+	{ 0x103c, 0x2af9, "GeForce 710A" },
+	{ 0x1043, 0x10dd, "NVS 5200M" },
+	{ 0x1043, 0x10ed, "NVS 5200M" },
+	{ 0x1043, 0x11fd, "GeForce GT 720M" },
+	{ 0x1043, 0x124d, "GeForce GT 720M" },
+	{ 0x1043, 0x126d, "GeForce GT 720M" },
+	{ 0x1043, 0x131d, "GeForce GT 720M" },
+	{ 0x1043, 0x13fd, "GeForce GT 720M" },
+	{ 0x1043, 0x14c7, "GeForce GT 720M" },
+	{ 0x1043, 0x1507, "GeForce GT 620M" },
+	{ 0x1043, 0x15ad, "GeForce 820M" },
+	{ 0x1043, 0x15ed, "GeForce 820M" },
+	{ 0x1043, 0x160d, "GeForce 820M" },
+	{ 0x1043, 0x163d, "GeForce 820M" },
+	{ 0x1043, 0x165d, "GeForce 820M" },
+	{ 0x1043, 0x166d, "GeForce 820M" },
+	{ 0x1043, 0x16cd, "GeForce 820M" },
+	{ 0x1043, 0x16dd, "GeForce 820M" },
+	{ 0x1043, 0x170d, "GeForce 820M" },
+	{ 0x1043, 0x176d, "GeForce 820M" },
+	{ 0x1043, 0x178d, "GeForce 820M" },
+	{ 0x1043, 0x179d, "GeForce 820M" },
+	{ 0x1043, 0x2132, "GeForce GT 620M" },
+	{ 0x1043, 0x2136, "NVS 5200M" },
+	{ 0x1043, 0x21ba, "GeForce GT 720M" },
+	{ 0x1043, 0x21fa, "GeForce GT 720M" },
+	{ 0x1043, 0x220a, "GeForce GT 720M" },
+	{ 0x1043, 0x221a, "GeForce GT 720M" },
+	{ 0x1043, 0x223a, "GeForce GT 710M" },
+	{ 0x1043, 0x224a, "GeForce GT 710M" },
+	{ 0x1043, 0x227a, "GeForce 820M" },
+	{ 0x1043, 0x228a, "GeForce 820M" },
+	{ 0x1043, 0x22fa, "GeForce 820M" },
+	{ 0x1043, 0x232a, "GeForce 820M" },
+	{ 0x1043, 0x233a, "GeForce 820M" },
+	{ 0x1043, 0x235a, "GeForce 820M" },
+	{ 0x1043, 0x236a, "GeForce 820M" },
+	{ 0x1043, 0x238a, "GeForce 820M" },
+	{ 0x1043, 0x8595, "GeForce GT 720M" },
+	{ 0x1043, 0x85ea, "GeForce GT 720M" },
+	{ 0x1043, 0x85eb, "GeForce 820M" },
+	{ 0x1043, 0x85ec, "GeForce 820M" },
+	{ 0x1043, 0x85ee, "GeForce GT 720M" },
+	{ 0x1043, 0x85f3, "GeForce 820M" },
+	{ 0x1043, 0x860e, "GeForce 820M" },
+	{ 0x1043, 0x861a, "GeForce 820M" },
+	{ 0x1043, 0x861b, "GeForce 820M" },
+	{ 0x1043, 0x8628, "GeForce 820M" },
+	{ 0x1043, 0x8643, "GeForce 820M" },
+	{ 0x1043, 0x864c, "GeForce 820M" },
+	{ 0x1043, 0x8652, "GeForce 820M" },
+	{ 0x1043, 0x8660, "GeForce 820M" },
+	{ 0x1043, 0x8661, "GeForce 820M" },
+	{ 0x105b, 0x0dac, "GeForce GT 720M" },
+	{ 0x105b, 0x0dad, "GeForce GT 720M" },
+	{ 0x105b, 0x0ef3, "GeForce GT 720M" },
+	{ 0x10cf, 0x17f5, "GeForce GT 720M" },
+	{ 0x1179, 0xfa01, "GeForce 710M" },
+	{ 0x1179, 0xfa02, "GeForce 710M" },
+	{ 0x1179, 0xfa03, "GeForce 710M" },
+	{ 0x1179, 0xfa05, "GeForce 710M" },
+	{ 0x1179, 0xfa11, "GeForce 710M" },
+	{ 0x1179, 0xfa13, "GeForce 710M" },
+	{ 0x1179, 0xfa18, "GeForce 710M" },
+	{ 0x1179, 0xfa19, "GeForce 710M" },
+	{ 0x1179, 0xfa21, "GeForce 710M" },
+	{ 0x1179, 0xfa23, "GeForce 710M" },
+	{ 0x1179, 0xfa2a, "GeForce 710M" },
+	{ 0x1179, 0xfa32, "GeForce 710M" },
+	{ 0x1179, 0xfa33, "GeForce 710M" },
+	{ 0x1179, 0xfa36, "GeForce 710M" },
+	{ 0x1179, 0xfa38, "GeForce 710M" },
+	{ 0x1179, 0xfa42, "GeForce 710M" },
+	{ 0x1179, 0xfa43, "GeForce 710M" },
+	{ 0x1179, 0xfa45, "GeForce 710M" },
+	{ 0x1179, 0xfa47, "GeForce 710M" },
+	{ 0x1179, 0xfa49, "GeForce 710M" },
+	{ 0x1179, 0xfa58, "GeForce 710M" },
+	{ 0x1179, 0xfa59, "GeForce 710M" },
+	{ 0x1179, 0xfa88, "GeForce 710M" },
+	{ 0x1179, 0xfa89, "GeForce 710M" },
+	{ 0x144d, 0xb092, "GeForce GT 620M" },
+	{ 0x144d, 0xc0d5, "GeForce GT 630M" },
+	{ 0x144d, 0xc0d7, "GeForce GT 620M" },
+	{ 0x144d, 0xc0e2, "NVS 5200M" },
+	{ 0x144d, 0xc0e3, "NVS 5200M" },
+	{ 0x144d, 0xc0e4, "NVS 5200M" },
+	{ 0x144d, 0xc10d, "GeForce 820M" },
+	{ 0x144d, 0xc652, "GeForce GT 620M" },
+	{ 0x144d, 0xc709, "GeForce 710M" },
+	{ 0x144d, 0xc711, "GeForce 710M" },
+	{ 0x144d, 0xc736, "GeForce 710M" },
+	{ 0x144d, 0xc737, "GeForce 710M" },
+	{ 0x144d, 0xc745, "GeForce 820M" },
+	{ 0x144d, 0xc750, "GeForce 820M" },
+	{ 0x1462, 0x10b8, "GeForce GT 710M" },
+	{ 0x1462, 0x10e9, "GeForce GT 720M" },
+	{ 0x1462, 0x1116, "GeForce 820M" },
+	{ 0x1462, 0xaa33, "GeForce 720M" },
+	{ 0x1462, 0xaaa2, "GeForce GT 720M" },
+	{ 0x1462, 0xaaa3, "GeForce 820M" },
+	{ 0x1462, 0xacb2, "GeForce GT 720M" },
+	{ 0x1462, 0xacc1, "GeForce GT 720M" },
+	{ 0x1462, 0xae61, "GeForce 720M" },
+	{ 0x1462, 0xae65, "GeForce GT 720M" },
+	{ 0x1462, 0xae6a, "GeForce 820M" },
+	{ 0x1462, 0xae71, "GeForce GT 720M" },
+	{ 0x14c0, 0x0083, "GeForce 820M" },
+	{ 0x152d, 0x0926, "GeForce 620M" },
+	{ 0x152d, 0x0982, "GeForce GT 630M" },
+	{ 0x152d, 0x0983, "GeForce GT 630M" },
+	{ 0x152d, 0x1005, "GeForce GT820M" },
+	{ 0x152d, 0x1012, "GeForce 710M" },
+	{ 0x152d, 0x1019, "GeForce 820M" },
+	{ 0x152d, 0x1030, "GeForce GT 630M" },
+	{ 0x152d, 0x1055, "GeForce 710M" },
+	{ 0x152d, 0x1067, "GeForce GT 720M" },
+	{ 0x152d, 0x1092, "GeForce 820M" },
+	{ 0x17aa, 0x2200, "NVS 5200M" },
+	{ 0x17aa, 0x2213, "GeForce GT 720M" },
+	{ 0x17aa, 0x2220, "GeForce GT 720M" },
+	{ 0x17aa, 0x309c, "GeForce GT 720A" },
+	{ 0x17aa, 0x30b4, "GeForce 820A" },
+	{ 0x17aa, 0x30b7, "GeForce 720A" },
+	{ 0x17aa, 0x30e4, "GeForce 820A" },
+	{ 0x17aa, 0x361b, "GeForce 820A" },
+	{ 0x17aa, 0x361c, "GeForce 820A" },
+	{ 0x17aa, 0x361d, "GeForce 820A" },
+	{ 0x17aa, 0x3656, "GeForce GT620M" },
+	{ 0x17aa, 0x365a, "GeForce 705M" },
+	{ 0x17aa, 0x365e, "GeForce 800M" },
+	{ 0x17aa, 0x3661, "GeForce 820A" },
+	{ 0x17aa, 0x366c, "GeForce 800M" },
+	{ 0x17aa, 0x3685, "GeForce 800M" },
+	{ 0x17aa, 0x3686, "GeForce 800M" },
+	{ 0x17aa, 0x3687, "GeForce 705A" },
+	{ 0x17aa, 0x3696, "GeForce 820A" },
+	{ 0x17aa, 0x369b, "GeForce 820A" },
+	{ 0x17aa, 0x369c, "GeForce 820A" },
+	{ 0x17aa, 0x369d, "GeForce 820A" },
+	{ 0x17aa, 0x369e, "GeForce 820A" },
+	{ 0x17aa, 0x36a6, "GeForce 820A" },
+	{ 0x17aa, 0x36a7, "GeForce 820A" },
+	{ 0x17aa, 0x36a9, "GeForce 820A" },
+	{ 0x17aa, 0x36af, "GeForce 820A" },
+	{ 0x17aa, 0x36b0, "GeForce 820A" },
+	{ 0x17aa, 0x36b6, "GeForce 820A" },
+	{ 0x17aa, 0x3800, "GeForce GT 720M" },
+	{ 0x17aa, 0x3801, "GeForce GT 720M" },
+	{ 0x17aa, 0x3802, "GeForce GT 720M" },
+	{ 0x17aa, 0x3803, "GeForce GT 720M" },
+	{ 0x17aa, 0x3804, "GeForce GT 720M" },
+	{ 0x17aa, 0x3806, "GeForce GT 720M" },
+	{ 0x17aa, 0x3808, "GeForce GT 720M" },
+	{ 0x17aa, 0x380d, "GeForce 820M" },
+	{ 0x17aa, 0x380e, "GeForce 820M" },
+	{ 0x17aa, 0x380f, "GeForce 820M" },
+	{ 0x17aa, 0x3811, "GeForce 820M" },
+	{ 0x17aa, 0x3812, "GeForce 820M" },
+	{ 0x17aa, 0x3813, "GeForce 820M" },
+	{ 0x17aa, 0x3816, "GeForce 820M" },
+	{ 0x17aa, 0x3817, "GeForce 820M" },
+	{ 0x17aa, 0x3818, "GeForce 820M" },
+	{ 0x17aa, 0x381a, "GeForce 820M" },
+	{ 0x17aa, 0x381c, "GeForce 820M" },
+	{ 0x17aa, 0x381d, "GeForce 820M" },
+	{ 0x17aa, 0x3901, "GeForce 610M" },
+	{ 0x17aa, 0x3902, "GeForce 710M" },
+	{ 0x17aa, 0x3903, "GeForce 710M" },
+	{ 0x17aa, 0x3904, "GeForce GT 625M" },
+	{ 0x17aa, 0x3905, "GeForce GT 720M" },
+	{ 0x17aa, 0x3907, "GeForce 820M" },
+	{ 0x17aa, 0x3910, "GeForce GT 720M" },
+	{ 0x17aa, 0x3912, "GeForce GT 720M" },
+	{ 0x17aa, 0x3913, "GeForce 820M" },
+	{ 0x17aa, 0x3915, "GeForce 820M" },
+	{ 0x17aa, 0x3983, "GeForce 610M" },
+	{ 0x17aa, 0x5001, "GeForce 610M" },
+	{ 0x17aa, 0x5003, "GeForce GT 720M" },
+	{ 0x17aa, 0x5005, "GeForce 705M" },
+	{ 0x17aa, 0x500d, "GeForce GT 620M" },
+	{ 0x17aa, 0x5014, "GeForce 710M" },
+	{ 0x17aa, 0x5017, "GeForce 710M" },
+	{ 0x17aa, 0x5019, "GeForce 710M" },
+	{ 0x17aa, 0x501a, "GeForce 710M" },
+	{ 0x17aa, 0x501f, "GeForce GT 720M" },
+	{ 0x17aa, 0x5025, "GeForce 710M" },
+	{ 0x17aa, 0x5027, "GeForce 710M" },
+	{ 0x17aa, 0x502a, "GeForce 710M" },
+	{ 0x17aa, 0x502b, "GeForce GT 720M" },
+	{ 0x17aa, 0x502d, "GeForce 710M" },
+	{ 0x17aa, 0x502e, "GeForce GT 720M" },
+	{ 0x17aa, 0x502f, "GeForce GT 720M" },
+	{ 0x17aa, 0x5030, "GeForce 705M" },
+	{ 0x17aa, 0x5031, "GeForce 705M" },
+	{ 0x17aa, 0x5032, "GeForce 820M" },
+	{ 0x17aa, 0x5033, "GeForce 820M" },
+	{ 0x17aa, 0x503e, "GeForce 710M" },
+	{ 0x17aa, 0x503f, "GeForce 820M" },
+	{ 0x17aa, 0x5040, "GeForce 820M" },
+	{ 0x1854, 0x0177, "GeForce 710M" },
+	{ 0x1854, 0x0180, "GeForce 710M" },
+	{ 0x1854, 0x0190, "GeForce GT 720M" },
+	{ 0x1854, 0x0192, "GeForce GT 720M" },
+	{ 0x1854, 0x0224, "GeForce 820M" },
+	{ 0x1b0a, 0x20dd, "GeForce GT 620M" },
+	{ 0x1b0a, 0x20df, "GeForce GT 620M" },
+	{ 0x1b0a, 0x210e, "GeForce 820M" },
+	{ 0x1b0a, 0x2202, "GeForce GT 720M" },
+	{ 0x1b0a, 0x90d7, "GeForce 820M" },
+	{ 0x1b0a, 0x90dd, "GeForce 820M" },
+	{ 0x1b50, 0x5530, "GeForce 820M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1185[] = {
+	{ 0x10de, 0x106f, "GeForce GTX 760" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1189[] = {
+	{ 0x10de, 0x1074, "GeForce GTX 760 Ti OEM" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1199[] = {
+	{ 0x1458, 0xd001, "GeForce GTX 760" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11e3[] = {
+	{ 0x17aa, 0x3683, "GeForce GTX 760A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_11fc[] = {
+	{ 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+	{ 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1247[] = {
+	{ 0x1043, 0x212a, "GeForce GT 635M" },
+	{ 0x1043, 0x212b, "GeForce GT 635M" },
+	{ 0x1043, 0x212c, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_124d[] = {
+	{ 0x1462, 0x10cc, "GeForce GT 635M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1290[] = {
+	{ 0x103c, 0x2afa, "GeForce 730A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1292[] = {
+	{ 0x17aa, 0x3675, "GeForce GT 740A" },
+	{ 0x17aa, 0x367c, "GeForce GT 740A" },
+	{ 0x17aa, 0x3684, "GeForce GT 740A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1295[] = {
+	{ 0x103c, 0x2b0d, "GeForce 710A" },
+	{ 0x103c, 0x2b0f, "GeForce 710A" },
+	{ 0x103c, 0x2b20, "GeForce 810A" },
+	{ 0x103c, 0x2b21, "GeForce 810A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1299[] = {
+	{ 0x17aa, 0x369b, "GeForce 920A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1340[] = {
+	{ 0x103c, 0x2b2b, "GeForce 830A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1341[] = {
+	{ 0x17aa, 0x3697, "GeForce 840A" },
+	{ 0x17aa, 0x3699, "GeForce 840A" },
+	{ 0x17aa, 0x369c, "GeForce 840A" },
+	{ 0x17aa, 0x36af, "GeForce 840A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1346[] = {
+	{ 0x17aa, 0x30ba, "GeForce 930A" },
+	{ 0x17aa, 0x362c, "GeForce 930A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1347[] = {
+	{ 0x17aa, 0x36b9, "GeForce 940A" },
+	{ 0x17aa, 0x36ba, "GeForce 940A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137a[] = {
+	{ 0x17aa, 0x2225, "Quadro K620M" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_137d[] = {
+	{ 0x17aa, 0x3699, "GeForce 940A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1391[] = {
+	{ 0x17aa, 0x3697, "GeForce GTX 850A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_1392[] = {
+	{ 0x1028, 0x066a, "GeForce GPU" },
+	{ 0x1043, 0x861e, "GeForce GTX 750 Ti" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139a[] = {
+	{ 0x17aa, 0x36b9, "GeForce GTX 950A" },
+	{}
+};
+
+static const struct nvkm_device_pci_vendor
+nvkm_device_pci_10de_139b[] = {
+	{ 0x1028, 0x06a3, "GeForce GTX 860M" },
+	{ 0x19da, 0xc248, "GeForce GTX 750 Ti" },
+	{}
+};
+
+static const struct nvkm_device_pci_device
+nvkm_device_pci_10de[] = {
+	{ 0x0020, "RIVA TNT" },
+	{ 0x0028, "RIVA TNT2/TNT2 Pro" },
+	{ 0x0029, "RIVA TNT2 Ultra" },
+	{ 0x002c, "Vanta/Vanta LT" },
+	{ 0x002d, "RIVA TNT2 Model 64/Model 64 Pro" },
+	{ 0x0040, "GeForce 6800 Ultra" },
+	{ 0x0041, "GeForce 6800" },
+	{ 0x0042, "GeForce 6800 LE" },
+	{ 0x0043, "GeForce 6800 XE" },
+	{ 0x0044, "GeForce 6800 XT" },
+	{ 0x0045, "GeForce 6800 GT" },
+	{ 0x0046, "GeForce 6800 GT" },
+	{ 0x0047, "GeForce 6800 GS" },
+	{ 0x0048, "GeForce 6800 XT" },
+	{ 0x004e, "Quadro FX 4000" },
+	{ 0x0090, "GeForce 7800 GTX" },
+	{ 0x0091, "GeForce 7800 GTX" },
+	{ 0x0092, "GeForce 7800 GT" },
+	{ 0x0093, "GeForce 7800 GS" },
+	{ 0x0095, "GeForce 7800 SLI" },
+	{ 0x0098, "GeForce Go 7800" },
+	{ 0x0099, "GeForce Go 7800 GTX" },
+	{ 0x009d, "Quadro FX 4500" },
+	{ 0x00a0, "Aladdin TNT2" },
+	{ 0x00c0, "GeForce 6800 GS" },
+	{ 0x00c1, "GeForce 6800" },
+	{ 0x00c2, "GeForce 6800 LE" },
+	{ 0x00c3, "GeForce 6800 XT" },
+	{ 0x00c8, "GeForce Go 6800" },
+	{ 0x00c9, "GeForce Go 6800 Ultra" },
+	{ 0x00cc, "Quadro FX Go1400" },
+	{ 0x00cd, "Quadro FX 3450/4000 SDI" },
+	{ 0x00ce, "Quadro FX 1400" },
+	{ 0x00f1, "GeForce 6600 GT" },
+	{ 0x00f2, "GeForce 6600" },
+	{ 0x00f3, "GeForce 6200" },
+	{ 0x00f4, "GeForce 6600 LE" },
+	{ 0x00f5, "GeForce 7800 GS" },
+	{ 0x00f6, "GeForce 6800 GS" },
+	{ 0x00f8, "Quadro FX 3400/Quadro FX 4000" },
+	{ 0x00f9, "GeForce 6800 Ultra" },
+	{ 0x00fa, "GeForce PCX 5750" },
+	{ 0x00fb, "GeForce PCX 5900" },
+	{ 0x00fc, "Quadro FX 330/GeForce PCX 5300" },
+	{ 0x00fd, "Quadro FX 330/Quadro NVS 280 PCI-E" },
+	{ 0x00fe, "Quadro FX 1300" },
+	{ 0x0100, "GeForce 256" },
+	{ 0x0101, "GeForce DDR" },
+	{ 0x0103, "Quadro" },
+	{ 0x0110, "GeForce2 MX/MX 400" },
+	{ 0x0111, "GeForce2 MX 100/200" },
+	{ 0x0112, "GeForce2 Go" },
+	{ 0x0113, "Quadro2 MXR/EX/Go" },
+	{ 0x0140, "GeForce 6600 GT" },
+	{ 0x0141, "GeForce 6600" },
+	{ 0x0142, "GeForce 6600 LE" },
+	{ 0x0143, "GeForce 6600 VE" },
+	{ 0x0144, "GeForce Go 6600" },
+	{ 0x0145, "GeForce 6610 XL" },
+	{ 0x0146, "GeForce Go 6600 TE/6200 TE" },
+	{ 0x0147, "GeForce 6700 XL" },
+	{ 0x0148, "GeForce Go 6600" },
+	{ 0x0149, "GeForce Go 6600 GT" },
+	{ 0x014a, "Quadro NVS 440" },
+	{ 0x014c, "Quadro FX 540M" },
+	{ 0x014d, "Quadro FX 550" },
+	{ 0x014e, "Quadro FX 540" },
+	{ 0x014f, "GeForce 6200" },
+	{ 0x0150, "GeForce2 GTS/GeForce2 Pro" },
+	{ 0x0151, "GeForce2 Ti" },
+	{ 0x0152, "GeForce2 Ultra" },
+	{ 0x0153, "Quadro2 Pro" },
+	{ 0x0160, "GeForce 6500" },
+	{ 0x0161, "GeForce 6200 TurboCache(TM)" },
+	{ 0x0162, "GeForce 6200SE TurboCache(TM)" },
+	{ 0x0163, "GeForce 6200 LE" },
+	{ 0x0164, "GeForce Go 6200" },
+	{ 0x0165, "Quadro NVS 285" },
+	{ 0x0166, "GeForce Go 6400" },
+	{ 0x0167, "GeForce Go 6200" },
+	{ 0x0168, "GeForce Go 6400" },
+	{ 0x0169, "GeForce 6250" },
+	{ 0x016a, "GeForce 7100 GS" },
+	{ 0x0170, "GeForce4 MX 460" },
+	{ 0x0171, "GeForce4 MX 440" },
+	{ 0x0172, "GeForce4 MX 420" },
+	{ 0x0173, "GeForce4 MX 440-SE" },
+	{ 0x0174, "GeForce4 440 Go" },
+	{ 0x0175, "GeForce4 420 Go" },
+	{ 0x0176, "GeForce4 420 Go 32M" },
+	{ 0x0177, "GeForce4 460 Go" },
+	{ 0x0178, "Quadro4 550 XGL" },
+	{ 0x0179, "GeForce4 440 Go 64M" },
+	{ 0x017a, "Quadro NVS 400" },
+	{ 0x017c, "Quadro4 500 GoGL" },
+	{ 0x017d, "GeForce4 410 Go 16M" },
+	{ 0x0181, "GeForce4 MX 440 with AGP8X" },
+	{ 0x0182, "GeForce4 MX 440SE with AGP8X" },
+	{ 0x0183, "GeForce4 MX 420 with AGP8X" },
+	{ 0x0185, "GeForce4 MX 4000" },
+	{ 0x0188, "Quadro4 580 XGL" },
+	{ 0x0189, "GeForce4 MX with AGP8X (Mac)", nvkm_device_pci_10de_0189 },
+	{ 0x018a, "Quadro NVS 280 SD" },
+	{ 0x018b, "Quadro4 380 XGL" },
+	{ 0x018c, "Quadro NVS 50 PCI" },
+	{ 0x0191, "GeForce 8800 GTX" },
+	{ 0x0193, "GeForce 8800 GTS" },
+	{ 0x0194, "GeForce 8800 Ultra" },
+	{ 0x0197, "Tesla C870" },
+	{ 0x019d, "Quadro FX 5600" },
+	{ 0x019e, "Quadro FX 4600" },
+	{ 0x01a0, "GeForce2 Integrated GPU" },
+	{ 0x01d0, "GeForce 7350 LE" },
+	{ 0x01d1, "GeForce 7300 LE" },
+	{ 0x01d2, "GeForce 7550 LE" },
+	{ 0x01d3, "GeForce 7300 SE/7200 GS" },
+	{ 0x01d6, "GeForce Go 7200" },
+	{ 0x01d7, "GeForce Go 7300" },
+	{ 0x01d8, "GeForce Go 7400" },
+	{ 0x01da, "Quadro NVS 110M" },
+	{ 0x01db, "Quadro NVS 120M" },
+	{ 0x01dc, "Quadro FX 350M" },
+	{ 0x01dd, "GeForce 7500 LE" },
+	{ 0x01de, "Quadro FX 350" },
+	{ 0x01df, "GeForce 7300 GS" },
+	{ 0x01f0, "GeForce4 MX Integrated GPU", nvkm_device_pci_10de_01f0 },
+	{ 0x0200, "GeForce3" },
+	{ 0x0201, "GeForce3 Ti 200" },
+	{ 0x0202, "GeForce3 Ti 500" },
+	{ 0x0203, "Quadro DCC" },
+	{ 0x0211, "GeForce 6800" },
+	{ 0x0212, "GeForce 6800 LE" },
+	{ 0x0215, "GeForce 6800 GT" },
+	{ 0x0218, "GeForce 6800 XT" },
+	{ 0x0221, "GeForce 6200" },
+	{ 0x0222, "GeForce 6200 A-LE" },
+	{ 0x0240, "GeForce 6150" },
+	{ 0x0241, "GeForce 6150 LE" },
+	{ 0x0242, "GeForce 6100" },
+	{ 0x0244, "GeForce Go 6150" },
+	{ 0x0245, "Quadro NVS 210S / GeForce 6150LE" },
+	{ 0x0247, "GeForce Go 6100" },
+	{ 0x0250, "GeForce4 Ti 4600" },
+	{ 0x0251, "GeForce4 Ti 4400" },
+	{ 0x0253, "GeForce4 Ti 4200" },
+	{ 0x0258, "Quadro4 900 XGL" },
+	{ 0x0259, "Quadro4 750 XGL" },
+	{ 0x025b, "Quadro4 700 XGL" },
+	{ 0x0280, "GeForce4 Ti 4800" },
+	{ 0x0281, "GeForce4 Ti 4200 with AGP8X" },
+	{ 0x0282, "GeForce4 Ti 4800 SE" },
+	{ 0x0286, "GeForce4 4200 Go" },
+	{ 0x0288, "Quadro4 980 XGL" },
+	{ 0x0289, "Quadro4 780 XGL" },
+	{ 0x028c, "Quadro4 700 GoGL" },
+	{ 0x0290, "GeForce 7900 GTX" },
+	{ 0x0291, "GeForce 7900 GT/GTO" },
+	{ 0x0292, "GeForce 7900 GS" },
+	{ 0x0293, "GeForce 7950 GX2" },
+	{ 0x0294, "GeForce 7950 GX2" },
+	{ 0x0295, "GeForce 7950 GT" },
+	{ 0x0297, "GeForce Go 7950 GTX" },
+	{ 0x0298, "GeForce Go 7900 GS" },
+	{ 0x0299, "Quadro NVS 510M" },
+	{ 0x029a, "Quadro FX 2500M" },
+	{ 0x029b, "Quadro FX 1500M" },
+	{ 0x029c, "Quadro FX 5500" },
+	{ 0x029d, "Quadro FX 3500" },
+	{ 0x029e, "Quadro FX 1500" },
+	{ 0x029f, "Quadro FX 4500 X2" },
+	{ 0x02e0, "GeForce 7600 GT" },
+	{ 0x02e1, "GeForce 7600 GS" },
+	{ 0x02e2, "GeForce 7300 GT" },
+	{ 0x02e3, "GeForce 7900 GS" },
+	{ 0x02e4, "GeForce 7950 GT" },
+	{ 0x0301, "GeForce FX 5800 Ultra" },
+	{ 0x0302, "GeForce FX 5800" },
+	{ 0x0308, "Quadro FX 2000" },
+	{ 0x0309, "Quadro FX 1000" },
+	{ 0x0311, "GeForce FX 5600 Ultra" },
+	{ 0x0312, "GeForce FX 5600" },
+	{ 0x0314, "GeForce FX 5600XT" },
+	{ 0x031a, "GeForce FX Go5600" },
+	{ 0x031b, "GeForce FX Go5650" },
+	{ 0x031c, "Quadro FX Go700" },
+	{ 0x0320, "GeForce FX 5200" },
+	{ 0x0321, "GeForce FX 5200 Ultra" },
+	{ 0x0322, "GeForce FX 5200", nvkm_device_pci_10de_0322 },
+	{ 0x0323, "GeForce FX 5200LE" },
+	{ 0x0324, "GeForce FX Go5200" },
+	{ 0x0325, "GeForce FX Go5250" },
+	{ 0x0326, "GeForce FX 5500" },
+	{ 0x0327, "GeForce FX 5100" },
+	{ 0x0328, "GeForce FX Go5200 32M/64M" },
+	{ 0x032a, "Quadro NVS 55/280 PCI" },
+	{ 0x032b, "Quadro FX 500/FX 600" },
+	{ 0x032c, "GeForce FX Go53xx" },
+	{ 0x032d, "GeForce FX Go5100" },
+	{ 0x0330, "GeForce FX 5900 Ultra" },
+	{ 0x0331, "GeForce FX 5900" },
+	{ 0x0332, "GeForce FX 5900XT" },
+	{ 0x0333, "GeForce FX 5950 Ultra" },
+	{ 0x0334, "GeForce FX 5900ZT" },
+	{ 0x0338, "Quadro FX 3000" },
+	{ 0x033f, "Quadro FX 700" },
+	{ 0x0341, "GeForce FX 5700 Ultra" },
+	{ 0x0342, "GeForce FX 5700" },
+	{ 0x0343, "GeForce FX 5700LE" },
+	{ 0x0344, "GeForce FX 5700VE" },
+	{ 0x0347, "GeForce FX Go5700" },
+	{ 0x0348, "GeForce FX Go5700" },
+	{ 0x034c, "Quadro FX Go1000" },
+	{ 0x034e, "Quadro FX 1100" },
+	{ 0x038b, "GeForce 7650 GS" },
+	{ 0x0390, "GeForce 7650 GS" },
+	{ 0x0391, "GeForce 7600 GT" },
+	{ 0x0392, "GeForce 7600 GS" },
+	{ 0x0393, "GeForce 7300 GT" },
+	{ 0x0394, "GeForce 7600 LE" },
+	{ 0x0395, "GeForce 7300 GT" },
+	{ 0x0397, "GeForce Go 7700" },
+	{ 0x0398, "GeForce Go 7600" },
+	{ 0x0399, "GeForce Go 7600 GT" },
+	{ 0x039c, "Quadro FX 560M" },
+	{ 0x039e, "Quadro FX 560" },
+	{ 0x03d0, "GeForce 6150SE nForce 430" },
+	{ 0x03d1, "GeForce 6100 nForce 405" },
+	{ 0x03d2, "GeForce 6100 nForce 400" },
+	{ 0x03d5, "GeForce 6100 nForce 420" },
+	{ 0x03d6, "GeForce 7025 / nForce 630a" },
+	{ 0x0400, "GeForce 8600 GTS" },
+	{ 0x0401, "GeForce 8600 GT" },
+	{ 0x0402, "GeForce 8600 GT" },
+	{ 0x0403, "GeForce 8600 GS" },
+	{ 0x0404, "GeForce 8400 GS" },
+	{ 0x0405, "GeForce 9500M GS" },
+	{ 0x0406, "GeForce 8300 GS" },
+	{ 0x0407, "GeForce 8600M GT" },
+	{ 0x0408, "GeForce 9650M GS" },
+	{ 0x0409, "GeForce 8700M GT" },
+	{ 0x040a, "Quadro FX 370" },
+	{ 0x040b, "Quadro NVS 320M" },
+	{ 0x040c, "Quadro FX 570M" },
+	{ 0x040d, "Quadro FX 1600M" },
+	{ 0x040e, "Quadro FX 570" },
+	{ 0x040f, "Quadro FX 1700" },
+	{ 0x0410, "GeForce GT 330" },
+	{ 0x0420, "GeForce 8400 SE" },
+	{ 0x0421, "GeForce 8500 GT" },
+	{ 0x0422, "GeForce 8400 GS" },
+	{ 0x0423, "GeForce 8300 GS" },
+	{ 0x0424, "GeForce 8400 GS" },
+	{ 0x0425, "GeForce 8600M GS" },
+	{ 0x0426, "GeForce 8400M GT" },
+	{ 0x0427, "GeForce 8400M GS" },
+	{ 0x0428, "GeForce 8400M G" },
+	{ 0x0429, "Quadro NVS 140M" },
+	{ 0x042a, "Quadro NVS 130M" },
+	{ 0x042b, "Quadro NVS 135M" },
+	{ 0x042c, "GeForce 9400 GT" },
+	{ 0x042d, "Quadro FX 360M" },
+	{ 0x042e, "GeForce 9300M G" },
+	{ 0x042f, "Quadro NVS 290" },
+	{ 0x0531, "GeForce 7150M / nForce 630M" },
+	{ 0x0533, "GeForce 7000M / nForce 610M" },
+	{ 0x053a, "GeForce 7050 PV / nForce 630a" },
+	{ 0x053b, "GeForce 7050 PV / nForce 630a" },
+	{ 0x053e, "GeForce 7025 / nForce 630a" },
+	{ 0x05e0, "GeForce GTX 295" },
+	{ 0x05e1, "GeForce GTX 280" },
+	{ 0x05e2, "GeForce GTX 260" },
+	{ 0x05e3, "GeForce GTX 285" },
+	{ 0x05e6, "GeForce GTX 275" },
+	{ 0x05e7, "Tesla C1060", nvkm_device_pci_10de_05e7 },
+	{ 0x05ea, "GeForce GTX 260" },
+	{ 0x05eb, "GeForce GTX 295" },
+	{ 0x05ed, "Quadroplex 2200 D2" },
+	{ 0x05f8, "Quadroplex 2200 S4" },
+	{ 0x05f9, "Quadro CX" },
+	{ 0x05fd, "Quadro FX 5800" },
+	{ 0x05fe, "Quadro FX 4800" },
+	{ 0x05ff, "Quadro FX 3800" },
+	{ 0x0600, "GeForce 8800 GTS 512" },
+	{ 0x0601, "GeForce 9800 GT" },
+	{ 0x0602, "GeForce 8800 GT" },
+	{ 0x0603, "GeForce GT 230" },
+	{ 0x0604, "GeForce 9800 GX2" },
+	{ 0x0605, "GeForce 9800 GT" },
+	{ 0x0606, "GeForce 8800 GS" },
+	{ 0x0607, "GeForce GTS 240" },
+	{ 0x0608, "GeForce 9800M GTX" },
+	{ 0x0609, "GeForce 8800M GTS", nvkm_device_pci_10de_0609 },
+	{ 0x060a, "GeForce GTX 280M" },
+	{ 0x060b, "GeForce 9800M GT" },
+	{ 0x060c, "GeForce 8800M GTX" },
+	{ 0x060d, "GeForce 8800 GS" },
+	{ 0x060f, "GeForce GTX 285M" },
+	{ 0x0610, "GeForce 9600 GSO" },
+	{ 0x0611, "GeForce 8800 GT" },
+	{ 0x0612, "GeForce 9800 GTX/9800 GTX+" },
+	{ 0x0613, "GeForce 9800 GTX+" },
+	{ 0x0614, "GeForce 9800 GT" },
+	{ 0x0615, "GeForce GTS 250" },
+	{ 0x0617, "GeForce 9800M GTX" },
+	{ 0x0618, "GeForce GTX 260M" },
+	{ 0x0619, "Quadro FX 4700 X2" },
+	{ 0x061a, "Quadro FX 3700" },
+	{ 0x061b, "Quadro VX 200" },
+	{ 0x061c, "Quadro FX 3600M" },
+	{ 0x061d, "Quadro FX 2800M" },
+	{ 0x061e, "Quadro FX 3700M" },
+	{ 0x061f, "Quadro FX 3800M" },
+	{ 0x0621, "GeForce GT 230" },
+	{ 0x0622, "GeForce 9600 GT" },
+	{ 0x0623, "GeForce 9600 GS" },
+	{ 0x0625, "GeForce 9600 GSO 512" },
+	{ 0x0626, "GeForce GT 130" },
+	{ 0x0627, "GeForce GT 140" },
+	{ 0x0628, "GeForce 9800M GTS" },
+	{ 0x062a, "GeForce 9700M GTS" },
+	{ 0x062b, "GeForce 9800M GS" },
+	{ 0x062c, "GeForce 9800M GTS" },
+	{ 0x062d, "GeForce 9600 GT" },
+	{ 0x062e, "GeForce 9600 GT", nvkm_device_pci_10de_062e },
+	{ 0x0630, "GeForce 9700 S" },
+	{ 0x0631, "GeForce GTS 160M" },
+	{ 0x0632, "GeForce GTS 150M" },
+	{ 0x0635, "GeForce 9600 GSO" },
+	{ 0x0637, "GeForce 9600 GT" },
+	{ 0x0638, "Quadro FX 1800" },
+	{ 0x063a, "Quadro FX 2700M" },
+	{ 0x0640, "GeForce 9500 GT" },
+	{ 0x0641, "GeForce 9400 GT" },
+	{ 0x0643, "GeForce 9500 GT" },
+	{ 0x0644, "GeForce 9500 GS" },
+	{ 0x0645, "GeForce 9500 GS" },
+	{ 0x0646, "GeForce GT 120" },
+	{ 0x0647, "GeForce 9600M GT" },
+	{ 0x0648, "GeForce 9600M GS" },
+	{ 0x0649, "GeForce 9600M GT", nvkm_device_pci_10de_0649 },
+	{ 0x064a, "GeForce 9700M GT" },
+	{ 0x064b, "GeForce 9500M G" },
+	{ 0x064c, "GeForce 9650M GT" },
+	{ 0x0651, "GeForce G 110M" },
+	{ 0x0652, "GeForce GT 130M", nvkm_device_pci_10de_0652 },
+	{ 0x0653, "GeForce GT 120M" },
+	{ 0x0654, "GeForce GT 220M", nvkm_device_pci_10de_0654 },
+	{ 0x0655, NULL, nvkm_device_pci_10de_0655 },
+	{ 0x0656, NULL, nvkm_device_pci_10de_0656 },
+	{ 0x0658, "Quadro FX 380" },
+	{ 0x0659, "Quadro FX 580" },
+	{ 0x065a, "Quadro FX 1700M" },
+	{ 0x065b, "GeForce 9400 GT" },
+	{ 0x065c, "Quadro FX 770M" },
+	{ 0x06c0, "GeForce GTX 480" },
+	{ 0x06c4, "GeForce GTX 465" },
+	{ 0x06ca, "GeForce GTX 480M" },
+	{ 0x06cd, "GeForce GTX 470" },
+	{ 0x06d1, "Tesla C2050 / C2070", nvkm_device_pci_10de_06d1 },
+	{ 0x06d2, "Tesla M2070", nvkm_device_pci_10de_06d2 },
+	{ 0x06d8, "Quadro 6000" },
+	{ 0x06d9, "Quadro 5000" },
+	{ 0x06da, "Quadro 5000M" },
+	{ 0x06dc, "Quadro 6000" },
+	{ 0x06dd, "Quadro 4000" },
+	{ 0x06de, "Tesla T20 Processor", nvkm_device_pci_10de_06de },
+	{ 0x06df, "Tesla M2070-Q" },
+	{ 0x06e0, "GeForce 9300 GE" },
+	{ 0x06e1, "GeForce 9300 GS" },
+	{ 0x06e2, "GeForce 8400" },
+	{ 0x06e3, "GeForce 8400 SE" },
+	{ 0x06e4, "GeForce 8400 GS" },
+	{ 0x06e5, "GeForce 9300M GS" },
+	{ 0x06e6, "GeForce G100" },
+	{ 0x06e7, "GeForce 9300 SE" },
+	{ 0x06e8, "GeForce 9200M GS", nvkm_device_pci_10de_06e8 },
+	{ 0x06e9, "GeForce 9300M GS" },
+	{ 0x06ea, "Quadro NVS 150M" },
+	{ 0x06eb, "Quadro NVS 160M" },
+	{ 0x06ec, "GeForce G 105M" },
+	{ 0x06ef, "GeForce G 103M" },
+	{ 0x06f1, "GeForce G105M" },
+	{ 0x06f8, "Quadro NVS 420" },
+	{ 0x06f9, "Quadro FX 370 LP", nvkm_device_pci_10de_06f9 },
+	{ 0x06fa, "Quadro NVS 450" },
+	{ 0x06fb, "Quadro FX 370M" },
+	{ 0x06fd, "Quadro NVS 295" },
+	{ 0x06ff, "HICx16 + Graphics", nvkm_device_pci_10de_06ff },
+	{ 0x07e0, "GeForce 7150 / nForce 630i" },
+	{ 0x07e1, "GeForce 7100 / nForce 630i" },
+	{ 0x07e2, "GeForce 7050 / nForce 630i" },
+	{ 0x07e3, "GeForce 7050 / nForce 610i" },
+	{ 0x07e5, "GeForce 7050 / nForce 620i" },
+	{ 0x0840, "GeForce 8200M" },
+	{ 0x0844, "GeForce 9100M G" },
+	{ 0x0845, "GeForce 8200M G" },
+	{ 0x0846, "GeForce 9200" },
+	{ 0x0847, "GeForce 9100" },
+	{ 0x0848, "GeForce 8300" },
+	{ 0x0849, "GeForce 8200" },
+	{ 0x084a, "nForce 730a" },
+	{ 0x084b, "GeForce 9200" },
+	{ 0x084c, "nForce 980a/780a SLI" },
+	{ 0x084d, "nForce 750a SLI" },
+	{ 0x084f, "GeForce 8100 / nForce 720a" },
+	{ 0x0860, "GeForce 9400" },
+	{ 0x0861, "GeForce 9400" },
+	{ 0x0862, "GeForce 9400M G" },
+	{ 0x0863, "GeForce 9400M" },
+	{ 0x0864, "GeForce 9300" },
+	{ 0x0865, "ION" },
+	{ 0x0866, "GeForce 9400M G", nvkm_device_pci_10de_0866 },
+	{ 0x0867, "GeForce 9400" },
+	{ 0x0868, "nForce 760i SLI" },
+	{ 0x0869, "GeForce 9400" },
+	{ 0x086a, "GeForce 9400" },
+	{ 0x086c, "GeForce 9300 / nForce 730i" },
+	{ 0x086d, "GeForce 9200" },
+	{ 0x086e, "GeForce 9100M G" },
+	{ 0x086f, "GeForce 8200M G" },
+	{ 0x0870, "GeForce 9400M" },
+	{ 0x0871, "GeForce 9200" },
+	{ 0x0872, "GeForce G102M", nvkm_device_pci_10de_0872 },
+	{ 0x0873, "GeForce G102M", nvkm_device_pci_10de_0873 },
+	{ 0x0874, "ION" },
+	{ 0x0876, "ION" },
+	{ 0x087a, "GeForce 9400" },
+	{ 0x087d, "ION" },
+	{ 0x087e, "ION LE" },
+	{ 0x087f, "ION LE" },
+	{ 0x08a0, "GeForce 320M" },
+	{ 0x08a2, "GeForce 320M" },
+	{ 0x08a3, "GeForce 320M" },
+	{ 0x08a4, "GeForce 320M" },
+	{ 0x08a5, "GeForce 320M" },
+	{ 0x0a20, "GeForce GT 220" },
+	{ 0x0a22, "GeForce 315" },
+	{ 0x0a23, "GeForce 210" },
+	{ 0x0a26, "GeForce 405" },
+	{ 0x0a27, "GeForce 405" },
+	{ 0x0a28, "GeForce GT 230M" },
+	{ 0x0a29, "GeForce GT 330M" },
+	{ 0x0a2a, "GeForce GT 230M" },
+	{ 0x0a2b, "GeForce GT 330M" },
+	{ 0x0a2c, "NVS 5100M" },
+	{ 0x0a2d, "GeForce GT 320M" },
+	{ 0x0a32, "GeForce GT 415" },
+	{ 0x0a34, "GeForce GT 240M" },
+	{ 0x0a35, "GeForce GT 325M" },
+	{ 0x0a38, "Quadro 400" },
+	{ 0x0a3c, "Quadro FX 880M" },
+	{ 0x0a60, "GeForce G210" },
+	{ 0x0a62, "GeForce 205" },
+	{ 0x0a63, "GeForce 310" },
+	{ 0x0a64, "Second Generation ION" },
+	{ 0x0a65, "GeForce 210" },
+	{ 0x0a66, "GeForce 310" },
+	{ 0x0a67, "GeForce 315" },
+	{ 0x0a68, "GeForce G105M" },
+	{ 0x0a69, "GeForce G105M" },
+	{ 0x0a6a, "NVS 2100M" },
+	{ 0x0a6c, "NVS 3100M" },
+	{ 0x0a6e, "GeForce 305M", nvkm_device_pci_10de_0a6e },
+	{ 0x0a6f, "Second Generation ION" },
+	{ 0x0a70, "GeForce 310M", nvkm_device_pci_10de_0a70 },
+	{ 0x0a71, "GeForce 305M" },
+	{ 0x0a72, "GeForce 310M" },
+	{ 0x0a73, "GeForce 305M", nvkm_device_pci_10de_0a73 },
+	{ 0x0a74, "GeForce G210M", nvkm_device_pci_10de_0a74 },
+	{ 0x0a75, "GeForce 310M", nvkm_device_pci_10de_0a75 },
+	{ 0x0a76, "Second Generation ION" },
+	{ 0x0a78, "Quadro FX 380 LP" },
+	{ 0x0a7a, "GeForce 315M", nvkm_device_pci_10de_0a7a },
+	{ 0x0a7c, "Quadro FX 380M" },
+	{ 0x0ca0, "GeForce GT 330" },
+	{ 0x0ca2, "GeForce GT 320" },
+	{ 0x0ca3, "GeForce GT 240" },
+	{ 0x0ca4, "GeForce GT 340" },
+	{ 0x0ca5, "GeForce GT 220" },
+	{ 0x0ca7, "GeForce GT 330" },
+	{ 0x0ca8, "GeForce GTS 260M" },
+	{ 0x0ca9, "GeForce GTS 250M" },
+	{ 0x0cac, "GeForce GT 220" },
+	{ 0x0caf, "GeForce GT 335M" },
+	{ 0x0cb0, "GeForce GTS 350M" },
+	{ 0x0cb1, "GeForce GTS 360M" },
+	{ 0x0cbc, "Quadro FX 1800M" },
+	{ 0x0dc0, "GeForce GT 440" },
+	{ 0x0dc4, "GeForce GTS 450" },
+	{ 0x0dc5, "GeForce GTS 450" },
+	{ 0x0dc6, "GeForce GTS 450" },
+	{ 0x0dcd, "GeForce GT 555M" },
+	{ 0x0dce, "GeForce GT 555M" },
+	{ 0x0dd1, "GeForce GTX 460M" },
+	{ 0x0dd2, "GeForce GT 445M" },
+	{ 0x0dd3, "GeForce GT 435M" },
+	{ 0x0dd6, "GeForce GT 550M" },
+	{ 0x0dd8, "Quadro 2000", nvkm_device_pci_10de_0dd8 },
+	{ 0x0dda, "Quadro 2000M" },
+	{ 0x0de0, "GeForce GT 440" },
+	{ 0x0de1, "GeForce GT 430" },
+	{ 0x0de2, "GeForce GT 420" },
+	{ 0x0de3, "GeForce GT 635M" },
+	{ 0x0de4, "GeForce GT 520" },
+	{ 0x0de5, "GeForce GT 530" },
+	{ 0x0de7, "GeForce GT 610" },
+	{ 0x0de8, "GeForce GT 620M" },
+	{ 0x0de9, "GeForce GT 630M", nvkm_device_pci_10de_0de9 },
+	{ 0x0dea, "GeForce 610M", nvkm_device_pci_10de_0dea },
+	{ 0x0deb, "GeForce GT 555M" },
+	{ 0x0dec, "GeForce GT 525M" },
+	{ 0x0ded, "GeForce GT 520M" },
+	{ 0x0dee, "GeForce GT 415M" },
+	{ 0x0def, "NVS 5400M" },
+	{ 0x0df0, "GeForce GT 425M" },
+	{ 0x0df1, "GeForce GT 420M" },
+	{ 0x0df2, "GeForce GT 435M" },
+	{ 0x0df3, "GeForce GT 420M" },
+	{ 0x0df4, "GeForce GT 540M", nvkm_device_pci_10de_0df4 },
+	{ 0x0df5, "GeForce GT 525M" },
+	{ 0x0df6, "GeForce GT 550M" },
+	{ 0x0df7, "GeForce GT 520M" },
+	{ 0x0df8, "Quadro 600" },
+	{ 0x0df9, "Quadro 500M" },
+	{ 0x0dfa, "Quadro 1000M" },
+	{ 0x0dfc, "NVS 5200M" },
+	{ 0x0e22, "GeForce GTX 460" },
+	{ 0x0e23, "GeForce GTX 460 SE" },
+	{ 0x0e24, "GeForce GTX 460" },
+	{ 0x0e30, "GeForce GTX 470M" },
+	{ 0x0e31, "GeForce GTX 485M" },
+	{ 0x0e3a, "Quadro 3000M" },
+	{ 0x0e3b, "Quadro 4000M" },
+	{ 0x0f00, "GeForce GT 630" },
+	{ 0x0f01, "GeForce GT 620" },
+	{ 0x0f02, "GeForce GT 730" },
+	{ 0x0fc0, "GeForce GT 640" },
+	{ 0x0fc1, "GeForce GT 640" },
+	{ 0x0fc2, "GeForce GT 630" },
+	{ 0x0fc6, "GeForce GTX 650" },
+	{ 0x0fc8, "GeForce GT 740" },
+	{ 0x0fc9, "GeForce GT 730" },
+	{ 0x0fcd, "GeForce GT 755M" },
+	{ 0x0fce, "GeForce GT 640M LE" },
+	{ 0x0fd1, "GeForce GT 650M" },
+	{ 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
+	{ 0x0fd3, "GeForce GT 640M LE" },
+	{ 0x0fd4, "GeForce GTX 660M" },
+	{ 0x0fd5, "GeForce GT 650M" },
+	{ 0x0fd8, "GeForce GT 640M" },
+	{ 0x0fd9, "GeForce GT 645M" },
+	{ 0x0fdf, "GeForce GT 740M" },
+	{ 0x0fe0, "GeForce GTX 660M" },
+	{ 0x0fe1, "GeForce GT 730M" },
+	{ 0x0fe2, "GeForce GT 745M" },
+	{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
+	{ 0x0fe4, "GeForce GT 750M" },
+	{ 0x0fe9, "GeForce GT 750M" },
+	{ 0x0fea, "GeForce GT 755M" },
+	{ 0x0fec, "GeForce 710A" },
+	{ 0x0fef, "GRID K340" },
+	{ 0x0ff2, "GRID K1" },
+	{ 0x0ff3, "Quadro K420" },
+	{ 0x0ff6, "Quadro K1100M" },
+	{ 0x0ff8, "Quadro K500M" },
+	{ 0x0ff9, "Quadro K2000D" },
+	{ 0x0ffa, "Quadro K600" },
+	{ 0x0ffb, "Quadro K2000M" },
+	{ 0x0ffc, "Quadro K1000M" },
+	{ 0x0ffd, "NVS 510" },
+	{ 0x0ffe, "Quadro K2000" },
+	{ 0x0fff, "Quadro 410" },
+	{ 0x1001, "GeForce GTX TITAN Z" },
+	{ 0x1004, "GeForce GTX 780" },
+	{ 0x1005, "GeForce GTX TITAN" },
+	{ 0x1007, "GeForce GTX 780" },
+	{ 0x1008, "GeForce GTX 780 Ti" },
+	{ 0x100a, "GeForce GTX 780 Ti" },
+	{ 0x100c, "GeForce GTX TITAN Black" },
+	{ 0x1021, "Tesla K20Xm" },
+	{ 0x1022, "Tesla K20c" },
+	{ 0x1023, "Tesla K40m" },
+	{ 0x1024, "Tesla K40c" },
+	{ 0x1026, "Tesla K20s" },
+	{ 0x1027, "Tesla K40st" },
+	{ 0x1028, "Tesla K20m" },
+	{ 0x1029, "Tesla K40s" },
+	{ 0x102a, "Tesla K40t" },
+	{ 0x102d, "Tesla K80" },
+	{ 0x103a, "Quadro K6000" },
+	{ 0x103c, "Quadro K5200" },
+	{ 0x1040, "GeForce GT 520" },
+	{ 0x1042, "GeForce 510" },
+	{ 0x1048, "GeForce 605" },
+	{ 0x1049, "GeForce GT 620" },
+	{ 0x104a, "GeForce GT 610" },
+	{ 0x104b, "GeForce GT 625 (OEM)", nvkm_device_pci_10de_104b },
+	{ 0x104c, "GeForce GT 705" },
+	{ 0x1050, "GeForce GT 520M" },
+	{ 0x1051, "GeForce GT 520MX" },
+	{ 0x1052, "GeForce GT 520M" },
+	{ 0x1054, "GeForce 410M" },
+	{ 0x1055, "GeForce 410M" },
+	{ 0x1056, "NVS 4200M" },
+	{ 0x1057, "NVS 4200M" },
+	{ 0x1058, "GeForce 610M", nvkm_device_pci_10de_1058 },
+	{ 0x1059, "GeForce 610M" },
+	{ 0x105a, "GeForce 610M" },
+	{ 0x105b, "GeForce 705M", nvkm_device_pci_10de_105b },
+	{ 0x107c, "NVS 315" },
+	{ 0x107d, "NVS 310" },
+	{ 0x1080, "GeForce GTX 580" },
+	{ 0x1081, "GeForce GTX 570" },
+	{ 0x1082, "GeForce GTX 560 Ti" },
+	{ 0x1084, "GeForce GTX 560" },
+	{ 0x1086, "GeForce GTX 570" },
+	{ 0x1087, "GeForce GTX 560 Ti" },
+	{ 0x1088, "GeForce GTX 590" },
+	{ 0x1089, "GeForce GTX 580" },
+	{ 0x108b, "GeForce GTX 580" },
+	{ 0x1091, "Tesla M2090", nvkm_device_pci_10de_1091 },
+	{ 0x1094, "Tesla M2075" },
+	{ 0x1096, "Tesla C2075", nvkm_device_pci_10de_1096 },
+	{ 0x109a, "Quadro 5010M" },
+	{ 0x109b, "Quadro 7000" },
+	{ 0x10c0, "GeForce 9300 GS" },
+	{ 0x10c3, "GeForce 8400GS" },
+	{ 0x10c5, "GeForce 405" },
+	{ 0x10d8, "NVS 300" },
+	{ 0x1140, NULL, nvkm_device_pci_10de_1140 },
+	{ 0x1180, "GeForce GTX 680" },
+	{ 0x1183, "GeForce GTX 660 Ti" },
+	{ 0x1184, "GeForce GTX 770" },
+	{ 0x1185, "GeForce GTX 660", nvkm_device_pci_10de_1185 },
+	{ 0x1187, "GeForce GTX 760" },
+	{ 0x1188, "GeForce GTX 690" },
+	{ 0x1189, "GeForce GTX 670", nvkm_device_pci_10de_1189 },
+	{ 0x118a, "GRID K520" },
+	{ 0x118e, "GeForce GTX 760 (192-bit)" },
+	{ 0x118f, "Tesla K10" },
+	{ 0x1193, "GeForce GTX 760 Ti OEM" },
+	{ 0x1194, "Tesla K8" },
+	{ 0x1195, "GeForce GTX 660" },
+	{ 0x1198, "GeForce GTX 880M" },
+	{ 0x1199, "GeForce GTX 870M", nvkm_device_pci_10de_1199 },
+	{ 0x119a, "GeForce GTX 860M" },
+	{ 0x119d, "GeForce GTX 775M" },
+	{ 0x119e, "GeForce GTX 780M" },
+	{ 0x119f, "GeForce GTX 780M" },
+	{ 0x11a0, "GeForce GTX 680M" },
+	{ 0x11a1, "GeForce GTX 670MX" },
+	{ 0x11a2, "GeForce GTX 675MX" },
+	{ 0x11a3, "GeForce GTX 680MX" },
+	{ 0x11a7, "GeForce GTX 675MX" },
+	{ 0x11b4, "Quadro K4200" },
+	{ 0x11b6, "Quadro K3100M" },
+	{ 0x11b7, "Quadro K4100M" },
+	{ 0x11b8, "Quadro K5100M" },
+	{ 0x11ba, "Quadro K5000" },
+	{ 0x11bc, "Quadro K5000M" },
+	{ 0x11bd, "Quadro K4000M" },
+	{ 0x11be, "Quadro K3000M" },
+	{ 0x11bf, "GRID K2" },
+	{ 0x11c0, "GeForce GTX 660" },
+	{ 0x11c2, "GeForce GTX 650 Ti BOOST" },
+	{ 0x11c3, "GeForce GTX 650 Ti" },
+	{ 0x11c4, "GeForce GTX 645" },
+	{ 0x11c5, "GeForce GT 740" },
+	{ 0x11c6, "GeForce GTX 650 Ti" },
+	{ 0x11c8, "GeForce GTX 650" },
+	{ 0x11cb, "GeForce GT 740" },
+	{ 0x11e0, "GeForce GTX 770M" },
+	{ 0x11e1, "GeForce GTX 765M" },
+	{ 0x11e2, "GeForce GTX 765M" },
+	{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
+	{ 0x11fa, "Quadro K4000" },
+	{ 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc },
+	{ 0x1200, "GeForce GTX 560 Ti" },
+	{ 0x1201, "GeForce GTX 560" },
+	{ 0x1203, "GeForce GTX 460 SE v2" },
+	{ 0x1205, "GeForce GTX 460 v2" },
+	{ 0x1206, "GeForce GTX 555" },
+	{ 0x1207, "GeForce GT 645" },
+	{ 0x1208, "GeForce GTX 560 SE" },
+	{ 0x1210, "GeForce GTX 570M" },
+	{ 0x1211, "GeForce GTX 580M" },
+	{ 0x1212, "GeForce GTX 675M" },
+	{ 0x1213, "GeForce GTX 670M" },
+	{ 0x1241, "GeForce GT 545" },
+	{ 0x1243, "GeForce GT 545" },
+	{ 0x1244, "GeForce GTX 550 Ti" },
+	{ 0x1245, "GeForce GTS 450" },
+	{ 0x1246, "GeForce GT 550M" },
+	{ 0x1247, "GeForce GT 555M", nvkm_device_pci_10de_1247 },
+	{ 0x1248, "GeForce GT 555M" },
+	{ 0x1249, "GeForce GTS 450" },
+	{ 0x124b, "GeForce GT 640" },
+	{ 0x124d, "GeForce GT 555M", nvkm_device_pci_10de_124d },
+	{ 0x1251, "GeForce GTX 560M" },
+	{ 0x1280, "GeForce GT 635" },
+	{ 0x1281, "GeForce GT 710" },
+	{ 0x1282, "GeForce GT 640" },
+	{ 0x1284, "GeForce GT 630" },
+	{ 0x1286, "GeForce GT 720" },
+	{ 0x1287, "GeForce GT 730" },
+	{ 0x1288, "GeForce GT 720" },
+	{ 0x1289, "GeForce GT 710" },
+	{ 0x1290, "GeForce GT 730M", nvkm_device_pci_10de_1290 },
+	{ 0x1291, "GeForce GT 735M" },
+	{ 0x1292, "GeForce GT 740M", nvkm_device_pci_10de_1292 },
+	{ 0x1293, "GeForce GT 730M" },
+	{ 0x1295, "GeForce 710M", nvkm_device_pci_10de_1295 },
+	{ 0x1296, "GeForce 825M" },
+	{ 0x1298, "GeForce GT 720M" },
+	{ 0x1299, "GeForce 920M", nvkm_device_pci_10de_1299 },
+	{ 0x129a, "GeForce 910M" },
+	{ 0x12b9, "Quadro K610M" },
+	{ 0x12ba, "Quadro K510M" },
+	{ 0x1340, "GeForce 830M", nvkm_device_pci_10de_1340 },
+	{ 0x1341, "GeForce 840M", nvkm_device_pci_10de_1341 },
+	{ 0x1344, "GeForce 845M" },
+	{ 0x1346, "GeForce 930M", nvkm_device_pci_10de_1346 },
+	{ 0x1347, "GeForce 940M", nvkm_device_pci_10de_1347 },
+	{ 0x137a, NULL, nvkm_device_pci_10de_137a },
+	{ 0x137d, NULL, nvkm_device_pci_10de_137d },
+	{ 0x1380, "GeForce GTX 750 Ti" },
+	{ 0x1381, "GeForce GTX 750" },
+	{ 0x1382, "GeForce GTX 745" },
+	{ 0x1390, "GeForce 845M" },
+	{ 0x1391, "GeForce GTX 850M", nvkm_device_pci_10de_1391 },
+	{ 0x1392, "GeForce GTX 860M", nvkm_device_pci_10de_1392 },
+	{ 0x1393, "GeForce 840M" },
+	{ 0x1398, "GeForce 845M" },
+	{ 0x139a, "GeForce GTX 950M", nvkm_device_pci_10de_139a },
+	{ 0x139b, "GeForce GTX 960M", nvkm_device_pci_10de_139b },
+	{ 0x139c, "GeForce 940M" },
+	{ 0x13b3, "Quadro K2200M" },
+	{ 0x13ba, "Quadro K2200" },
+	{ 0x13bb, "Quadro K620" },
+	{ 0x13bc, "Quadro K1200" },
+	{ 0x13c0, "GeForce GTX 980" },
+	{ 0x13c2, "GeForce GTX 970" },
+	{ 0x13d7, "GeForce GTX 980M" },
+	{ 0x13d8, "GeForce GTX 970M" },
+	{ 0x13d9, "GeForce GTX 965M" },
+	{ 0x1401, "GeForce GTX 960" },
+	{ 0x1617, "GeForce GTX 980M" },
+	{ 0x1618, "GeForce GTX 970M" },
+	{ 0x1619, "GeForce GTX 965M" },
+	{ 0x17c2, "GeForce GTX TITAN X" },
+	{ 0x17c8, "GeForce GTX 980 Ti" },
+	{ 0x17f0, "Quadro M6000" },
+	{}
+};
+
+static struct nvkm_device_pci *
+nvkm_device_pci(struct nvkm_device *device)
+{
+	return container_of(device, struct nvkm_device_pci, device);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	return pci_resource_start(pdev->pdev, bar);
+}
+
+static resource_size_t
+nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	return pci_resource_len(pdev->pdev, bar);
+}
+
+static void
+nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	if (suspend) {
+		pci_disable_device(pdev->pdev);
+		pdev->suspend = true;
+	}
+}
+
+static int
+nvkm_device_pci_preinit(struct nvkm_device *device)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	if (pdev->suspend) {
+		int ret = pci_enable_device(pdev->pdev);
+		if (ret)
+			return ret;
+		pci_set_master(pdev->pdev);
+		pdev->suspend = false;
+	}
+	return 0;
+}
+
+static void *
+nvkm_device_pci_dtor(struct nvkm_device *device)
+{
+	struct nvkm_device_pci *pdev = nvkm_device_pci(device);
+	pci_disable_device(pdev->pdev);
+	return pdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_pci_func = {
+	.pci = nvkm_device_pci,
+	.dtor = nvkm_device_pci_dtor,
+	.preinit = nvkm_device_pci_preinit,
+	.fini = nvkm_device_pci_fini,
+	.resource_addr = nvkm_device_pci_resource_addr,
+	.resource_size = nvkm_device_pci_resource_size,
+	.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
+};
+
+int
+nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
+		    bool detect, bool mmio, u64 subdev_mask,
+		    struct nvkm_device **pdevice)
+{
+	const struct nvkm_device_quirk *quirk = NULL;
+	const struct nvkm_device_pci_device *pcid;
+	const struct nvkm_device_pci_vendor *pciv;
+	const char *name = NULL;
+	struct nvkm_device_pci *pdev;
+	int ret;
+
+	ret = pci_enable_device(pci_dev);
+	if (ret)
+		return ret;
+
+	switch (pci_dev->vendor) {
+	case 0x10de: pcid = nvkm_device_pci_10de; break;
+	default:
+		pcid = NULL;
+		break;
+	}
+
+	while (pcid && pcid->device) {
+		if (pciv = pcid->vendor, pcid->device == pci_dev->device) {
+			while (pciv && pciv->vendor) {
+				if (pciv->vendor == pci_dev->subsystem_vendor &&
+				    pciv->device == pci_dev->subsystem_device) {
+					quirk = &pciv->quirk;
+					name  =  pciv->name;
+					break;
+				}
+				pciv++;
+			}
+			if (!name)
+				name = pcid->name;
+			break;
+		}
+		pcid++;
+	}
+
+	if (!(pdev = kzalloc(sizeof(*pdev), GFP_KERNEL))) {
+		pci_disable_device(pci_dev);
+		return -ENOMEM;
+	}
+	*pdevice = &pdev->device;
+	pdev->pdev = pci_dev;
+
+	return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+				pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+				pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+				NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
+				(u64)pci_domain_nr(pci_dev->bus) << 32 |
+				     pci_dev->bus->number << 16 |
+				     PCI_SLOT(pci_dev->devfn) << 8 |
+				     PCI_FUNC(pci_dev->devfn), name,
+				cfg, dbg, detect, mmio, subdev_mask,
+				&pdev->device);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 8d3590e..ed3ad2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -2,15 +2,49 @@
 #define __NVKM_DEVICE_PRIV_H__
 #include <core/device.h>
 
-extern struct nvkm_oclass nvkm_control_oclass[];
+#include <subdev/bar.h>
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/clk.h>
+#include <subdev/devinit.h>
+#include <subdev/fb.h>
+#include <subdev/fuse.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/ltc.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/mxm.h>
+#include <subdev/pci.h>
+#include <subdev/pmu.h>
+#include <subdev/therm.h>
+#include <subdev/timer.h>
+#include <subdev/volt.h>
 
-int nv04_identify(struct nvkm_device *);
-int nv10_identify(struct nvkm_device *);
-int nv20_identify(struct nvkm_device *);
-int nv30_identify(struct nvkm_device *);
-int nv40_identify(struct nvkm_device *);
-int nv50_identify(struct nvkm_device *);
-int gf100_identify(struct nvkm_device *);
-int gk104_identify(struct nvkm_device *);
-int gm100_identify(struct nvkm_device *);
+#include <engine/bsp.h>
+#include <engine/ce.h>
+#include <engine/cipher.h>
+#include <engine/disp.h>
+#include <engine/dma.h>
+#include <engine/fifo.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+#include <engine/mspdec.h>
+#include <engine/msppp.h>
+#include <engine/msvld.h>
+#include <engine/pm.h>
+#include <engine/sec.h>
+#include <engine/sw.h>
+#include <engine/vp.h>
+
+int  nvkm_device_ctor(const struct nvkm_device_func *,
+		      const struct nvkm_device_quirk *,
+		      struct device *, enum nvkm_device_type, u64 handle,
+		      const char *name, const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device *);
+int  nvkm_device_init(struct nvkm_device *);
+int  nvkm_device_fini(struct nvkm_device *, bool suspend);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
new file mode 100644
index 0000000..da57c8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <core/tegra.h>
+#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
+#include "priv.h"
+
+static int
+nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
+{
+	int ret;
+
+	ret = regulator_enable(tdev->vdd);
+	if (ret)
+		goto err_power;
+
+	ret = clk_prepare_enable(tdev->clk);
+	if (ret)
+		goto err_clk;
+	ret = clk_prepare_enable(tdev->clk_pwr);
+	if (ret)
+		goto err_clk_pwr;
+	clk_set_rate(tdev->clk_pwr, 204000000);
+	udelay(10);
+
+	reset_control_assert(tdev->rst);
+	udelay(10);
+
+	ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
+	if (ret)
+		goto err_clamp;
+	udelay(10);
+
+	reset_control_deassert(tdev->rst);
+	udelay(10);
+
+	return 0;
+
+err_clamp:
+	clk_disable_unprepare(tdev->clk_pwr);
+err_clk_pwr:
+	clk_disable_unprepare(tdev->clk);
+err_clk:
+	regulator_disable(tdev->vdd);
+err_power:
+	return ret;
+}
+
+static int
+nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
+{
+	reset_control_assert(tdev->rst);
+	udelay(10);
+
+	clk_disable_unprepare(tdev->clk_pwr);
+	clk_disable_unprepare(tdev->clk);
+	udelay(10);
+
+	return regulator_disable(tdev->vdd);
+}
+
+static void
+nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+	struct device *dev = &tdev->pdev->dev;
+	unsigned long pgsize_bitmap;
+	int ret;
+
+	mutex_init(&tdev->iommu.mutex);
+
+	if (iommu_present(&platform_bus_type)) {
+		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+		if (IS_ERR(tdev->iommu.domain))
+			goto error;
+
+		/*
+		 * A IOMMU is only usable if it supports page sizes smaller
+		 * or equal to the system's PAGE_SIZE, with a preference if
+		 * both are equal.
+		 */
+		pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
+		if (pgsize_bitmap & PAGE_SIZE) {
+			tdev->iommu.pgshift = PAGE_SHIFT;
+		} else {
+			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+			if (tdev->iommu.pgshift == 0) {
+				dev_warn(dev, "unsupported IOMMU page size\n");
+				goto free_domain;
+			}
+			tdev->iommu.pgshift -= 1;
+		}
+
+		ret = iommu_attach_device(tdev->iommu.domain, dev);
+		if (ret)
+			goto free_domain;
+
+		ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+				   (1ULL << 40) >> tdev->iommu.pgshift, 1);
+		if (ret)
+			goto detach_device;
+	}
+
+	return;
+
+detach_device:
+	iommu_detach_device(tdev->iommu.domain, dev);
+
+free_domain:
+	iommu_domain_free(tdev->iommu.domain);
+
+error:
+	tdev->iommu.domain = NULL;
+	tdev->iommu.pgshift = 0;
+	dev_err(dev, "cannot initialize IOMMU MM\n");
+#endif
+}
+
+static void
+nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev)
+{
+#if IS_ENABLED(CONFIG_IOMMU_API)
+	if (tdev->iommu.domain) {
+		nvkm_mm_fini(&tdev->iommu.mm);
+		iommu_detach_device(tdev->iommu.domain, tdev->device.dev);
+		iommu_domain_free(tdev->iommu.domain);
+	}
+#endif
+}
+
+static struct nvkm_device_tegra *
+nvkm_device_tegra(struct nvkm_device *device)
+{
+	return container_of(device, struct nvkm_device_tegra, device);
+}
+
+static struct resource *
+nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
+{
+	struct resource *res = nvkm_device_tegra_resource(device, bar);
+	return res ? res->start : 0;
+}
+
+static resource_size_t
+nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
+{
+	struct resource *res = nvkm_device_tegra_resource(device, bar);
+	return res ? resource_size(res) : 0;
+}
+
+static irqreturn_t
+nvkm_device_tegra_intr(int irq, void *arg)
+{
+	struct nvkm_device_tegra *tdev = arg;
+	struct nvkm_mc *mc = tdev->device.mc;
+	bool handled = false;
+	if (likely(mc)) {
+		nvkm_mc_intr_unarm(mc);
+		nvkm_mc_intr(mc, &handled);
+		nvkm_mc_intr_rearm(mc);
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void
+nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	if (tdev->irq) {
+		free_irq(tdev->irq, tdev);
+		tdev->irq = 0;
+	};
+}
+
+static int
+nvkm_device_tegra_init(struct nvkm_device *device)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	int irq, ret;
+
+	irq = platform_get_irq_byname(tdev->pdev, "stall");
+	if (irq < 0)
+		return irq;
+
+	ret = request_irq(irq, nvkm_device_tegra_intr,
+			  IRQF_SHARED, "nvkm", tdev);
+	if (ret)
+		return ret;
+
+	tdev->irq = irq;
+	return 0;
+}
+
+static void *
+nvkm_device_tegra_dtor(struct nvkm_device *device)
+{
+	struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
+	nvkm_device_tegra_power_down(tdev);
+	nvkm_device_tegra_remove_iommu(tdev);
+	return tdev;
+}
+
+static const struct nvkm_device_func
+nvkm_device_tegra_func = {
+	.tegra = nvkm_device_tegra,
+	.dtor = nvkm_device_tegra_dtor,
+	.init = nvkm_device_tegra_init,
+	.fini = nvkm_device_tegra_fini,
+	.resource_addr = nvkm_device_tegra_resource_addr,
+	.resource_size = nvkm_device_tegra_resource_size,
+	.cpu_coherent = false,
+};
+
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+		      const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device **pdevice)
+{
+	struct nvkm_device_tegra *tdev;
+	int ret;
+
+	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdevice = &tdev->device;
+	tdev->pdev = pdev;
+	tdev->irq = -1;
+
+	tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(tdev->vdd))
+		return PTR_ERR(tdev->vdd);
+
+	tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
+	if (IS_ERR(tdev->rst))
+		return PTR_ERR(tdev->rst);
+
+	tdev->clk = devm_clk_get(&pdev->dev, "gpu");
+	if (IS_ERR(tdev->clk))
+		return PTR_ERR(tdev->clk);
+
+	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+	if (IS_ERR(tdev->clk_pwr))
+		return PTR_ERR(tdev->clk_pwr);
+
+	nvkm_device_tegra_probe_iommu(tdev);
+
+	ret = nvkm_device_tegra_power_up(tdev);
+	if (ret)
+		return ret;
+
+	tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+	ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+			       NVKM_DEVICE_TEGRA, pdev->id, NULL,
+			       cfg, dbg, detect, mmio, subdev_mask,
+			       &tdev->device);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+#else
+int
+nvkm_device_tegra_new(struct platform_device *pdev,
+		      const char *cfg, const char *dbg,
+		      bool detect, bool mmio, u64 subdev_mask,
+		      struct nvkm_device **pdevice)
+{
+	return -ENOSYS;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
new file mode 100644
index 0000000..1ae48f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nvkm_udevice(p) container_of((p), struct nvkm_udevice, object)
+#include "priv.h"
+#include "ctrl.h"
+
+#include <core/client.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nvkm_udevice {
+	struct nvkm_object object;
+	struct nvkm_device *device;
+};
+
+static int
+nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
+{
+	struct nvkm_object *object = &udev->object;
+	struct nvkm_device *device = udev->device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_instmem *imem = device->imem;
+	union {
+		struct nv_device_info_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "device info size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "device info vers %d\n", args->v0.version);
+	} else
+		return ret;
+
+	switch (device->chipset) {
+	case 0x01a:
+	case 0x01f:
+	case 0x04c:
+	case 0x04e:
+	case 0x063:
+	case 0x067:
+	case 0x068:
+	case 0x0aa:
+	case 0x0ac:
+	case 0x0af:
+		args->v0.platform = NV_DEVICE_INFO_V0_IGP;
+		break;
+	default:
+		switch (device->type) {
+		case NVKM_DEVICE_PCI:
+			args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+			break;
+		case NVKM_DEVICE_AGP:
+			args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+			break;
+		case NVKM_DEVICE_PCIE:
+			args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+			break;
+		case NVKM_DEVICE_TEGRA:
+			args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+			break;
+		default:
+			WARN_ON(1);
+			break;
+		}
+		break;
+	}
+
+	switch (device->card_type) {
+	case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
+	case NV_10:
+	case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
+	case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
+	case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
+	case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
+	case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
+	case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
+	case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
+	case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+	default:
+		args->v0.family = 0;
+		break;
+	}
+
+	args->v0.chipset  = device->chipset;
+	args->v0.revision = device->chiprev;
+	if (fb && fb->ram)
+		args->v0.ram_size = args->v0.ram_user = fb->ram->size;
+	else
+		args->v0.ram_size = args->v0.ram_user = 0;
+	if (imem && args->v0.ram_size > 0)
+		args->v0.ram_user = args->v0.ram_user - imem->reserved;
+
+	strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip));
+	strncpy(args->v0.name, device->name, sizeof(args->v0.name));
+	return 0;
+}
+
+static int
+nvkm_udevice_time(struct nvkm_udevice *udev, void *data, u32 size)
+{
+	struct nvkm_device *device = udev->device;
+	union {
+		struct nv_device_time_v0 v0;
+	} *args = data;
+	int ret;
+
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		args->v0.time = nvkm_timer_read(device->timer);
+	}
+
+	return ret;
+}
+
+static int
+nvkm_udevice_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	switch (mthd) {
+	case NV_DEVICE_V0_INFO:
+		return nvkm_udevice_info(udev, data, size);
+	case NV_DEVICE_V0_TIME:
+		return nvkm_udevice_time(udev, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_udevice_rd08(struct nvkm_object *object, u64 addr, u8 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd08(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_rd16(struct nvkm_object *object, u64 addr, u16 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd16(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	*data = nvkm_rd32(udev->device, addr);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr08(struct nvkm_object *object, u64 addr, u8 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr08(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr16(struct nvkm_object *object, u64 addr, u16 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr16(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	nvkm_wr32(udev->device, addr, data);
+	return 0;
+}
+
+static int
+nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	*addr = device->func->resource_addr(device, 0);
+	*size = device->func->resource_size(device, 0);
+	return 0;
+}
+
+static int
+nvkm_udevice_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+	if (!--device->refcount) {
+		ret = nvkm_device_fini(device, suspend);
+		if (ret && suspend) {
+			device->refcount++;
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int
+nvkm_udevice_init(struct nvkm_object *object)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+	if (!device->refcount++) {
+		ret = nvkm_device_init(device);
+		if (ret) {
+			device->refcount--;
+			goto done;
+		}
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+	return ret;
+}
+
+static int
+nvkm_udevice_child_new(const struct nvkm_oclass *oclass,
+		       void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(oclass->parent);
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	return sclass->ctor(udev->device, oclass, data, size, pobject);
+}
+
+static int
+nvkm_udevice_child_get(struct nvkm_object *object, int index,
+		       struct nvkm_oclass *oclass)
+{
+	struct nvkm_udevice *udev = nvkm_udevice(object);
+	struct nvkm_device *device = udev->device;
+	struct nvkm_engine *engine;
+	u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) |
+		   (1ULL << NVKM_ENGINE_FIFO) |
+		   (1ULL << NVKM_ENGINE_DISP) |
+		   (1ULL << NVKM_ENGINE_PM);
+	const struct nvkm_device_oclass *sclass = NULL;
+	int i;
+
+	for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) {
+		if (!(engine = nvkm_device_engine(device, i)) ||
+		    !(engine->func->base.sclass))
+			continue;
+		oclass->engine = engine;
+
+		index -= engine->func->base.sclass(oclass, index, &sclass);
+	}
+
+	if (!sclass) {
+		switch (index) {
+		case 0: sclass = &nvkm_control_oclass; break;
+		default:
+			return -EINVAL;
+		}
+		oclass->base = sclass->base;
+	}
+
+	oclass->ctor = nvkm_udevice_child_new;
+	oclass->priv = sclass;
+	return 0;
+}
+
+static const struct nvkm_object_func
+nvkm_udevice_super = {
+	.init = nvkm_udevice_init,
+	.fini = nvkm_udevice_fini,
+	.mthd = nvkm_udevice_mthd,
+	.map = nvkm_udevice_map,
+	.rd08 = nvkm_udevice_rd08,
+	.rd16 = nvkm_udevice_rd16,
+	.rd32 = nvkm_udevice_rd32,
+	.wr08 = nvkm_udevice_wr08,
+	.wr16 = nvkm_udevice_wr16,
+	.wr32 = nvkm_udevice_wr32,
+	.sclass = nvkm_udevice_child_get,
+};
+
+static const struct nvkm_object_func
+nvkm_udevice = {
+	.init = nvkm_udevice_init,
+	.fini = nvkm_udevice_fini,
+	.mthd = nvkm_udevice_mthd,
+	.sclass = nvkm_udevice_child_get,
+};
+
+int
+nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		 struct nvkm_object **pobject)
+{
+	union {
+		struct nv_device_v0 v0;
+	} *args = data;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_object *parent = &client->object;
+	const struct nvkm_object_func *func;
+	struct nvkm_udevice *udev;
+	int ret;
+
+	nvif_ioctl(parent, "create device size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create device v%d device %016llx\n",
+			   args->v0.version, args->v0.device);
+	} else
+		return ret;
+
+	/* give priviledged clients register access */
+	if (client->super)
+		func = &nvkm_udevice_super;
+	else
+		func = &nvkm_udevice;
+
+	if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(func, oclass, &udev->object);
+	*pobject = &udev->object;
+
+	/* find the device that matches what the client requested */
+	if (args->v0.device != ~0)
+		udev->device = nvkm_device_find(args->v0.device);
+	else
+		udev->device = nvkm_device_find(client->device);
+	if (!udev->device)
+		return -ENODEV;
+
+	return 0;
+}
+
+const struct nvkm_sclass
+nvkm_udevice_sclass = {
+	.oclass = NV_DEVICE,
+	.minver = 0,
+	.maxver = 0,
+	.ctor = nvkm_udevice_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 16a4e2a..04f6045 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -1,29 +1,93 @@
 nvkm-y += nvkm/engine/disp/base.o
-nvkm-y += nvkm/engine/disp/conn.o
-nvkm-y += nvkm/engine/disp/outp.o
-nvkm-y += nvkm/engine/disp/outpdp.o
 nvkm-y += nvkm/engine/disp/nv04.o
 nvkm-y += nvkm/engine/disp/nv50.o
 nvkm-y += nvkm/engine/disp/g84.o
 nvkm-y += nvkm/engine/disp/g94.o
 nvkm-y += nvkm/engine/disp/gt200.o
 nvkm-y += nvkm/engine/disp/gt215.o
-nvkm-y += nvkm/engine/disp/gf110.o
+nvkm-y += nvkm/engine/disp/gf119.o
 nvkm-y += nvkm/engine/disp/gk104.o
 nvkm-y += nvkm/engine/disp/gk110.o
 nvkm-y += nvkm/engine/disp/gm107.o
 nvkm-y += nvkm/engine/disp/gm204.o
+
+nvkm-y += nvkm/engine/disp/outp.o
+nvkm-y += nvkm/engine/disp/outpdp.o
 nvkm-y += nvkm/engine/disp/dacnv50.o
-nvkm-y += nvkm/engine/disp/dport.o
-nvkm-y += nvkm/engine/disp/hdagt215.o
-nvkm-y += nvkm/engine/disp/hdagf110.o
-nvkm-y += nvkm/engine/disp/hdmig84.o
-nvkm-y += nvkm/engine/disp/hdmigt215.o
-nvkm-y += nvkm/engine/disp/hdmigf110.o
-nvkm-y += nvkm/engine/disp/hdmigk104.o
 nvkm-y += nvkm/engine/disp/piornv50.o
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
-nvkm-y += nvkm/engine/disp/sorgf110.o
+nvkm-y += nvkm/engine/disp/sorgf119.o
 nvkm-y += nvkm/engine/disp/sorgm204.o
+nvkm-y += nvkm/engine/disp/dport.o
+
+nvkm-y += nvkm/engine/disp/conn.o
+
+nvkm-y += nvkm/engine/disp/hdagt215.o
+nvkm-y += nvkm/engine/disp/hdagf119.o
+
+nvkm-y += nvkm/engine/disp/hdmig84.o
+nvkm-y += nvkm/engine/disp/hdmigt215.o
+nvkm-y += nvkm/engine/disp/hdmigf119.o
+nvkm-y += nvkm/engine/disp/hdmigk104.o
+
 nvkm-y += nvkm/engine/disp/vga.o
+
+nvkm-y += nvkm/engine/disp/rootnv04.o
+nvkm-y += nvkm/engine/disp/rootnv50.o
+nvkm-y += nvkm/engine/disp/rootg84.o
+nvkm-y += nvkm/engine/disp/rootg94.o
+nvkm-y += nvkm/engine/disp/rootgt200.o
+nvkm-y += nvkm/engine/disp/rootgt215.o
+nvkm-y += nvkm/engine/disp/rootgf119.o
+nvkm-y += nvkm/engine/disp/rootgk104.o
+nvkm-y += nvkm/engine/disp/rootgk110.o
+nvkm-y += nvkm/engine/disp/rootgm107.o
+nvkm-y += nvkm/engine/disp/rootgm204.o
+
+nvkm-y += nvkm/engine/disp/channv50.o
+nvkm-y += nvkm/engine/disp/changf119.o
+
+nvkm-y += nvkm/engine/disp/dmacnv50.o
+nvkm-y += nvkm/engine/disp/dmacgf119.o
+
+nvkm-y += nvkm/engine/disp/basenv50.o
+nvkm-y += nvkm/engine/disp/baseg84.o
+nvkm-y += nvkm/engine/disp/basegt200.o
+nvkm-y += nvkm/engine/disp/basegt215.o
+nvkm-y += nvkm/engine/disp/basegf119.o
+nvkm-y += nvkm/engine/disp/basegk104.o
+nvkm-y += nvkm/engine/disp/basegk110.o
+
+nvkm-y += nvkm/engine/disp/corenv50.o
+nvkm-y += nvkm/engine/disp/coreg84.o
+nvkm-y += nvkm/engine/disp/coreg94.o
+nvkm-y += nvkm/engine/disp/coregt200.o
+nvkm-y += nvkm/engine/disp/coregt215.o
+nvkm-y += nvkm/engine/disp/coregf119.o
+nvkm-y += nvkm/engine/disp/coregk104.o
+nvkm-y += nvkm/engine/disp/coregk110.o
+nvkm-y += nvkm/engine/disp/coregm107.o
+nvkm-y += nvkm/engine/disp/coregm204.o
+
+nvkm-y += nvkm/engine/disp/ovlynv50.o
+nvkm-y += nvkm/engine/disp/ovlyg84.o
+nvkm-y += nvkm/engine/disp/ovlygt200.o
+nvkm-y += nvkm/engine/disp/ovlygt215.o
+nvkm-y += nvkm/engine/disp/ovlygf119.o
+nvkm-y += nvkm/engine/disp/ovlygk104.o
+
+nvkm-y += nvkm/engine/disp/piocnv50.o
+nvkm-y += nvkm/engine/disp/piocgf119.o
+
+nvkm-y += nvkm/engine/disp/cursnv50.o
+nvkm-y += nvkm/engine/disp/cursg84.o
+nvkm-y += nvkm/engine/disp/cursgt215.o
+nvkm-y += nvkm/engine/disp/cursgf119.o
+nvkm-y += nvkm/engine/disp/cursgk104.o
+
+nvkm-y += nvkm/engine/disp/oimmnv50.o
+nvkm-y += nvkm/engine/disp/oimmg84.o
+nvkm-y += nvkm/engine/disp/oimmgt215.o
+nvkm-y += nvkm/engine/disp/oimmgf119.o
+nvkm-y += nvkm/engine/disp/oimmgk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 23d1b5c..44b6771 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -25,7 +25,9 @@
 #include "conn.h"
 #include "outp.h"
 
+#include <core/client.h>
 #include <core/notify.h>
+#include <core/oproxy.h>
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
@@ -33,7 +35,21 @@
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 
-int
+static void
+nvkm_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+	disp->func->head.vblank_fini(disp, head);
+}
+
+static void
+nvkm_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
+	disp->func->head.vblank_init(disp, head);
+}
+
+static int
 nvkm_disp_vblank_ctor(struct nvkm_object *object, void *data, u32 size,
 		      struct nvkm_notify *notify)
 {
@@ -56,6 +72,13 @@
 	return ret;
 }
 
+static const struct nvkm_event_func
+nvkm_disp_vblank_func = {
+	.ctor = nvkm_disp_vblank_ctor,
+	.init = nvkm_disp_vblank_init,
+	.fini = nvkm_disp_vblank_fini,
+};
+
 void
 nvkm_disp_vblank(struct nvkm_disp *disp, int head)
 {
@@ -100,7 +123,7 @@
 int
 nvkm_disp_ntfy(struct nvkm_object *object, u32 type, struct nvkm_event **event)
 {
-	struct nvkm_disp *disp = (void *)object->engine;
+	struct nvkm_disp *disp = nvkm_disp(object->engine);
 	switch (type) {
 	case NV04_DISP_NTFY_VBLANK:
 		*event = &disp->vblank;
@@ -114,127 +137,303 @@
 	return -EINVAL;
 }
 
-int
-_nvkm_disp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_disp_class_del(struct nvkm_oproxy *oproxy)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp;
-	int ret;
-
-	list_for_each_entry(outp, &disp->outp, head) {
-		ret = nv_ofuncs(outp)->fini(nv_object(outp), suspend);
-		if (ret && suspend)
-			goto fail_outp;
-	}
-
-	return nvkm_engine_fini(&disp->base, suspend);
-
-fail_outp:
-	list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
-		nv_ofuncs(outp)->init(nv_object(outp));
-	}
-
-	return ret;
+	struct nvkm_disp *disp = nvkm_disp(oproxy->base.engine);
+	mutex_lock(&disp->engine.subdev.mutex);
+	if (disp->client == oproxy)
+		disp->client = NULL;
+	mutex_unlock(&disp->engine.subdev.mutex);
 }
 
-int
-_nvkm_disp_init(struct nvkm_object *object)
+static const struct nvkm_oproxy_func
+nvkm_disp_class = {
+	.dtor[1] = nvkm_disp_class_del,
+};
+
+static int
+nvkm_disp_class_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp;
+	const struct nvkm_disp_oclass *sclass = oclass->engn;
+	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+	struct nvkm_oproxy *oproxy;
 	int ret;
 
-	ret = nvkm_engine_init(&disp->base);
+	ret = nvkm_oproxy_new_(&nvkm_disp_class, oclass, &oproxy);
 	if (ret)
 		return ret;
+	*pobject = &oproxy->base;
 
-	list_for_each_entry(outp, &disp->outp, head) {
-		ret = nv_ofuncs(outp)->init(nv_object(outp));
-		if (ret)
-			goto fail_outp;
+	mutex_lock(&disp->engine.subdev.mutex);
+	if (disp->client) {
+		mutex_unlock(&disp->engine.subdev.mutex);
+		return -EBUSY;
 	}
+	disp->client = oproxy;
+	mutex_unlock(&disp->engine.subdev.mutex);
 
-	return ret;
-
-fail_outp:
-	list_for_each_entry_continue_reverse(outp, &disp->outp, head) {
-		nv_ofuncs(outp)->fini(nv_object(outp), false);
-	}
-
-	return ret;
+	return sclass->ctor(disp, oclass, data, size, &oproxy->object);
 }
 
-void
-_nvkm_disp_dtor(struct nvkm_object *object)
+static const struct nvkm_device_oclass
+nvkm_disp_sclass = {
+	.ctor = nvkm_disp_class_new,
+};
+
+static int
+nvkm_disp_class_get(struct nvkm_oclass *oclass, int index,
+		    const struct nvkm_device_oclass **class)
 {
-	struct nvkm_disp *disp = (void *)object;
-	struct nvkm_output *outp, *outt;
+	struct nvkm_disp *disp = nvkm_disp(oclass->engine);
+	if (index == 0) {
+		const struct nvkm_disp_oclass *root = disp->func->root(disp);
+		oclass->base = root->base;
+		oclass->engn = root;
+		*class = &nvkm_disp_sclass;
+		return 0;
+	}
+	return 1;
+}
+
+static void
+nvkm_disp_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	disp->func->intr(disp);
+}
+
+static int
+nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+
+	list_for_each_entry(outp, &disp->outp, head) {
+		nvkm_output_fini(outp);
+	}
+
+	list_for_each_entry(conn, &disp->conn, head) {
+		nvkm_connector_fini(conn);
+	}
+
+	return 0;
+}
+
+static int
+nvkm_disp_init(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+
+	list_for_each_entry(conn, &disp->conn, head) {
+		nvkm_connector_init(conn);
+	}
+
+	list_for_each_entry(outp, &disp->outp, head) {
+		nvkm_output_init(outp);
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_disp_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_disp *disp = nvkm_disp(engine);
+	struct nvkm_connector *conn;
+	struct nvkm_output *outp;
+	void *data = disp;
+
+	if (disp->func->dtor)
+		data = disp->func->dtor(disp);
 
 	nvkm_event_fini(&disp->vblank);
 	nvkm_event_fini(&disp->hpd);
 
-	if (disp->outp.next) {
-		list_for_each_entry_safe(outp, outt, &disp->outp, head) {
-			nvkm_object_ref(NULL, (struct nvkm_object **)&outp);
-		}
+	while (!list_empty(&disp->outp)) {
+		outp = list_first_entry(&disp->outp, typeof(*outp), head);
+		list_del(&outp->head);
+		nvkm_output_del(&outp);
 	}
 
-	nvkm_engine_destroy(&disp->base);
+	while (!list_empty(&disp->conn)) {
+		conn = list_first_entry(&disp->conn, typeof(*conn), head);
+		list_del(&conn->head);
+		nvkm_connector_del(&conn);
+	}
+
+	return data;
 }
 
+static const struct nvkm_engine_func
+nvkm_disp = {
+	.dtor = nvkm_disp_dtor,
+	.init = nvkm_disp_init,
+	.fini = nvkm_disp_fini,
+	.intr = nvkm_disp_intr,
+	.base.sclass = nvkm_disp_class_get,
+};
+
 int
-nvkm_disp_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int heads, const char *intname,
-		  const char *extname, int length, void **pobject)
+nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp *disp)
 {
-	struct nvkm_disp_impl *impl = (void *)oclass;
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_disp *disp;
-	struct nvkm_oclass **sclass;
-	struct nvkm_object *object;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp, *outt, *pair;
+	struct nvkm_connector *conn;
+	struct nvbios_connE connE;
 	struct dcb_output dcbE;
 	u8  hpd = 0, ver, hdr;
 	u32 data;
 	int ret, i;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, true, intname,
-				  extname, length, pobject);
-	disp = *pobject;
+	INIT_LIST_HEAD(&disp->outp);
+	INIT_LIST_HEAD(&disp->conn);
+	disp->func = func;
+	disp->head.nr = heads;
+
+	ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
+			       true, &disp->engine);
 	if (ret)
 		return ret;
 
-	INIT_LIST_HEAD(&disp->outp);
-
 	/* create output objects for each display path in the vbios */
 	i = -1;
 	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+		const struct nvkm_disp_func_outp *outps;
+		int (*ctor)(struct nvkm_disp *, int, struct dcb_output *,
+			    struct nvkm_output **);
+
 		if (dcbE.type == DCB_OUTPUT_UNUSED)
 			continue;
 		if (dcbE.type == DCB_OUTPUT_EOL)
 			break;
-		data = dcbE.location << 4 | dcbE.type;
+		outp = NULL;
 
-		oclass = nvkm_output_oclass;
-		sclass = impl->outp;
-		while (sclass && sclass[0]) {
-			if (sclass[0]->handle == data) {
-				oclass = sclass[0];
-				break;
-			}
-			sclass++;
+		switch (dcbE.location) {
+		case 0: outps = &disp->func->outp.internal; break;
+		case 1: outps = &disp->func->outp.external; break;
+		default:
+			nvkm_warn(&disp->engine.subdev,
+				  "dcb %d locn %d unknown\n", i, dcbE.location);
+			continue;
 		}
 
-		nvkm_object_ctor(*pobject, NULL, oclass, &dcbE, i, &object);
+		switch (dcbE.type) {
+		case DCB_OUTPUT_ANALOG: ctor = outps->crt ; break;
+		case DCB_OUTPUT_TV    : ctor = outps->tv  ; break;
+		case DCB_OUTPUT_TMDS  : ctor = outps->tmds; break;
+		case DCB_OUTPUT_LVDS  : ctor = outps->lvds; break;
+		case DCB_OUTPUT_DP    : ctor = outps->dp  ; break;
+		default:
+			nvkm_warn(&disp->engine.subdev,
+				  "dcb %d type %d unknown\n", i, dcbE.type);
+			continue;
+		}
+
+		if (ctor)
+			ret = ctor(disp, i, &dcbE, &outp);
+		else
+			ret = -ENODEV;
+
+		if (ret) {
+			if (ret == -ENODEV) {
+				nvkm_debug(&disp->engine.subdev,
+					   "dcb %d %d/%d not supported\n",
+					   i, dcbE.location, dcbE.type);
+				continue;
+			}
+			nvkm_error(&disp->engine.subdev,
+				   "failed to create output %d\n", i);
+			nvkm_output_del(&outp);
+			continue;
+		}
+
+		list_add_tail(&outp->head, &disp->outp);
 		hpd = max(hpd, (u8)(dcbE.connector + 1));
 	}
 
+	/* create connector objects based on the outputs we support */
+	list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+		/* bios data *should* give us the most useful information */
+		data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
+				     &connE);
+
+		/* no bios connector data... */
+		if (!data) {
+			/* heuristic: anything with the same ccb index is
+			 * considered to be on the same connector, any
+			 * output path without an associated ccb entry will
+			 * be put on its own connector
+			 */
+			int ccb_index = outp->info.i2c_index;
+			if (ccb_index != 0xf) {
+				list_for_each_entry(pair, &disp->outp, head) {
+					if (pair->info.i2c_index == ccb_index) {
+						outp->conn = pair->conn;
+						break;
+					}
+				}
+			}
+
+			/* connector shared with another output path */
+			if (outp->conn)
+				continue;
+
+			memset(&connE, 0x00, sizeof(connE));
+			connE.type = DCB_CONNECTOR_NONE;
+			i = -1;
+		} else {
+			i = outp->info.connector;
+		}
+
+		/* check that we haven't already created this connector */
+		list_for_each_entry(conn, &disp->conn, head) {
+			if (conn->index == outp->info.connector) {
+				outp->conn = conn;
+				break;
+			}
+		}
+
+		if (outp->conn)
+			continue;
+
+		/* apparently we need to create a new one! */
+		ret = nvkm_connector_new(disp, i, &connE, &outp->conn);
+		if (ret) {
+			nvkm_error(&disp->engine.subdev,
+				   "failed to create output %d conn: %d\n",
+				   outp->index, ret);
+			nvkm_connector_del(&outp->conn);
+			list_del(&outp->head);
+			nvkm_output_del(&outp);
+			continue;
+		}
+
+		list_add_tail(&outp->conn->head, &disp->conn);
+	}
+
 	ret = nvkm_event_init(&nvkm_disp_hpd_func, 3, hpd, &disp->hpd);
 	if (ret)
 		return ret;
 
-	ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
+	ret = nvkm_event_init(&nvkm_disp_vblank_func, 1, heads, &disp->vblank);
 	if (ret)
 		return ret;
 
 	return 0;
 }
+
+int
+nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
+{
+	if (!(*pdisp = kzalloc(sizeof(**pdisp), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_disp_ctor(func, device, index, heads, *pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
new file mode 100644
index 0000000..6d17630
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/baseg84.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0008c4 },
+		{ 0x0088, 0x0008d0 },
+		{ 0x008c, 0x0008dc },
+		{ 0x0090, 0x0008e4 },
+		{ 0x0094, 0x610884 },
+		{ 0x00a0, 0x6108a0 },
+		{ 0x00a4, 0x610878 },
+		{ 0x00c0, 0x61086c },
+		{ 0x00c4, 0x610800 },
+		{ 0x00c8, 0x61080c },
+		{ 0x00cc, 0x610818 },
+		{ 0x00e0, 0x610858 },
+		{ 0x00e4, 0x610860 },
+		{ 0x00e8, 0x6108ac },
+		{ 0x00ec, 0x6108b4 },
+		{ 0x00fc, 0x610824 },
+		{ 0x0100, 0x610894 },
+		{ 0x0104, 0x61082c },
+		{ 0x0110, 0x6108bc },
+		{ 0x0114, 0x61088c },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &g84_disp_base_mthd_base },
+		{  "Image", 2, &nv50_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_base_oclass = {
+	.base.oclass = G82_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
new file mode 100644
index 0000000..ebcb925
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegf119.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x661080 },
+		{ 0x0084, 0x661084 },
+		{ 0x0088, 0x661088 },
+		{ 0x008c, 0x66108c },
+		{ 0x0090, 0x661090 },
+		{ 0x0094, 0x661094 },
+		{ 0x00a0, 0x6610a0 },
+		{ 0x00a4, 0x6610a4 },
+		{ 0x00c0, 0x6610c0 },
+		{ 0x00c4, 0x6610c4 },
+		{ 0x00c8, 0x6610c8 },
+		{ 0x00cc, 0x6610cc },
+		{ 0x00e0, 0x6610e0 },
+		{ 0x00e4, 0x6610e4 },
+		{ 0x00e8, 0x6610e8 },
+		{ 0x00ec, 0x6610ec },
+		{ 0x00fc, 0x6610fc },
+		{ 0x0100, 0x661100 },
+		{ 0x0104, 0x661104 },
+		{ 0x0108, 0x661108 },
+		{ 0x010c, 0x66110c },
+		{ 0x0110, 0x661110 },
+		{ 0x0114, 0x661114 },
+		{ 0x0118, 0x661118 },
+		{ 0x011c, 0x66111c },
+		{ 0x0130, 0x661130 },
+		{ 0x0134, 0x661134 },
+		{ 0x0138, 0x661138 },
+		{ 0x013c, 0x66113c },
+		{ 0x0140, 0x661140 },
+		{ 0x0144, 0x661144 },
+		{ 0x0148, 0x661148 },
+		{ 0x014c, 0x66114c },
+		{ 0x0150, 0x661150 },
+		{ 0x0154, 0x661154 },
+		{ 0x0158, 0x661158 },
+		{ 0x015c, 0x66115c },
+		{ 0x0160, 0x661160 },
+		{ 0x0164, 0x661164 },
+		{ 0x0168, 0x661168 },
+		{ 0x016c, 0x66116c },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_base_mthd_image = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0400, 0x661400 },
+		{ 0x0404, 0x661404 },
+		{ 0x0408, 0x661408 },
+		{ 0x040c, 0x66140c },
+		{ 0x0410, 0x661410 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gf119_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_base_mthd_base },
+		{  "Image", 2, &gf119_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_base_oclass = {
+	.base.oclass = GF110_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
new file mode 100644
index 0000000..780a1d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_base_oclass = {
+	.base.oclass = GK104_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
new file mode 100644
index 0000000..d8bdd24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_base_oclass = {
+	.base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
new file mode 100644
index 0000000..93451e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_base_oclass = {
+	.base.oclass = GT200_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
new file mode 100644
index 0000000..08e2b1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_base_oclass = {
+	.base.oclass = GT214_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
new file mode 100644
index 0000000..1fd89ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_base_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_base_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+	u64 push;
+
+	nvif_ioctl(parent, "create disp base channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp base channel dma vers %d "
+				   "pushbuf %016llx head %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+				   head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0008c4 },
+		{ 0x0088, 0x0008d0 },
+		{ 0x008c, 0x0008dc },
+		{ 0x0090, 0x0008e4 },
+		{ 0x0094, 0x610884 },
+		{ 0x00a0, 0x6108a0 },
+		{ 0x00a4, 0x610878 },
+		{ 0x00c0, 0x61086c },
+		{ 0x00e0, 0x610858 },
+		{ 0x00e4, 0x610860 },
+		{ 0x00e8, 0x6108ac },
+		{ 0x00ec, 0x6108b4 },
+		{ 0x0100, 0x610894 },
+		{ 0x0110, 0x6108bc },
+		{ 0x0114, 0x61088c },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_base_mthd_image = {
+	.mthd = 0x0400,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0800, 0x6108f0 },
+		{ 0x0804, 0x6108fc },
+		{ 0x0808, 0x61090c },
+		{ 0x080c, 0x610914 },
+		{ 0x0810, 0x610904 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_base_chan_mthd = {
+	.name = "Base",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_base_mthd_base },
+		{  "Image", 2, &nv50_disp_base_mthd_image },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_base_oclass = {
+	.base.oclass = NV50_DISP_BASE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_base_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &nv50_disp_base_chan_mthd,
+	.chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
new file mode 100644
index 0000000..17a3d83
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+static void
+gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
+	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+}
+
+static void
+gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x61008c, 0x00000001 << index);
+	nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
+}
+
+const struct nvkm_event_func
+gf119_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+	.init = gf119_disp_chan_uevent_init,
+	.fini = gf119_disp_chan_uevent_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
new file mode 100644
index 0000000..01803c0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <engine/dma.h>
+
+#include <nvif/class.h>
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+static void
+nv50_disp_mthd_list(struct nv50_disp *disp, int debug, u32 base, int c,
+		    const struct nv50_disp_mthd_list *list, int inst)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
+
+	for (i = 0; list->data[i].mthd; i++) {
+		if (list->data[i].addr) {
+			u32 next = nvkm_rd32(device, list->data[i].addr + base + 0);
+			u32 prev = nvkm_rd32(device, list->data[i].addr + base + c);
+			u32 mthd = list->data[i].mthd + (list->mthd * inst);
+			const char *name = list->data[i].name;
+			char mods[16];
+
+			if (prev != next)
+				snprintf(mods, sizeof(mods), "-> %08x", next);
+			else
+				snprintf(mods, sizeof(mods), "%13c", ' ');
+
+			nvkm_printk_(subdev, debug, info,
+				     "\t%04x: %08x %s%s%s\n",
+				     mthd, prev, mods, name ? " // " : "",
+				     name ? name : "");
+		}
+	}
+}
+
+void
+nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	const struct nv50_disp_chan_mthd *mthd = chan->mthd;
+	const struct nv50_disp_mthd_list *list;
+	int i, j;
+
+	if (debug > subdev->debug)
+		return;
+
+	for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
+		u32 base = chan->head * mthd->addr;
+		for (j = 0; j < mthd->data[i].nr; j++, base += list->addr) {
+			const char *cname = mthd->name;
+			const char *sname = "";
+			char cname_[16], sname_[16];
+
+			if (mthd->addr) {
+				snprintf(cname_, sizeof(cname_), "%s %d",
+					 mthd->name, chan->chid);
+				cname = cname_;
+			}
+
+			if (mthd->data[i].nr > 1) {
+				snprintf(sname_, sizeof(sname_), " - %s %d",
+					 mthd->data[i].name, j);
+				sname = sname_;
+			}
+
+			nvkm_printk_(subdev, debug, info, "%s%s:\n", cname, sname);
+			nv50_disp_mthd_list(disp, debug, base, mthd->prev,
+					    list, j);
+		}
+	}
+}
+
+static void
+nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000000 << index);
+	nvkm_wr32(device, 0x610020, 0x00000001 << index);
+}
+
+static void
+nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
+{
+	struct nv50_disp *disp = container_of(event, typeof(*disp), uevent);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x610020, 0x00000001 << index);
+	nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index);
+}
+
+void
+nv50_disp_chan_uevent_send(struct nv50_disp *disp, int chid)
+{
+	struct nvif_notify_uevent_rep {
+	} rep;
+
+	nvkm_event_send(&disp->uevent, 1, chid, &rep, sizeof(rep));
+}
+
+int
+nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
+			   struct nvkm_notify *notify)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	union {
+		struct nvif_notify_uevent_req none;
+	} *args = data;
+	int ret;
+
+	if (nvif_unvers(args->none)) {
+		notify->size  = sizeof(struct nvif_notify_uevent_rep);
+		notify->types = 1;
+		notify->index = chan->chid;
+		return 0;
+	}
+
+	return ret;
+}
+
+const struct nvkm_event_func
+nv50_disp_chan_uevent = {
+	.ctor = nv50_disp_chan_uevent_ctor,
+	.init = nv50_disp_chan_uevent_init,
+	.fini = nv50_disp_chan_uevent_fini,
+};
+
+int
+nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+	return 0;
+}
+
+int
+nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	return 0;
+}
+
+int
+nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
+		    struct nvkm_event **pevent)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	switch (type) {
+	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
+		*pevent = &disp->uevent;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+int
+nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	*addr = device->func->resource_addr(device, 0) +
+		0x640000 + (chan->chid * 0x1000);
+	*size = 0x001000;
+	return 0;
+}
+
+static int
+nv50_disp_chan_child_new(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(oclass->parent);
+	return chan->func->child_new(chan, oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_chan_child_get(struct nvkm_object *object, int index,
+			 struct nvkm_oclass *oclass)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	if (chan->func->child_get) {
+		int ret = chan->func->child_get(chan, index, oclass);
+		if (ret == 0)
+			oclass->ctor = nv50_disp_chan_child_new;
+		return ret;
+	}
+	return -EINVAL;
+}
+
+static int
+nv50_disp_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	chan->func->fini(chan);
+	return 0;
+}
+
+static int
+nv50_disp_chan_init(struct nvkm_object *object)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_chan_dtor(struct nvkm_object *object)
+{
+	struct nv50_disp_chan *chan = nv50_disp_chan(object);
+	struct nv50_disp *disp = chan->root->disp;
+	if (chan->chid >= 0)
+		disp->chan[chan->chid] = NULL;
+	return chan->func->dtor ? chan->func->dtor(chan) : chan;
+}
+
+static const struct nvkm_object_func
+nv50_disp_chan = {
+	.dtor = nv50_disp_chan_dtor,
+	.init = nv50_disp_chan_init,
+	.fini = nv50_disp_chan_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+	.ntfy = nv50_disp_chan_ntfy,
+	.map = nv50_disp_chan_map,
+	.sclass = nv50_disp_chan_child_get,
+};
+
+int
+nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head,
+		    const struct nvkm_oclass *oclass,
+		    struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = root->disp;
+
+	nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object);
+	chan->func = func;
+	chan->mthd = mthd;
+	chan->root = root;
+	chan->chid = chid;
+	chan->head = head;
+
+	if (disp->chan[chan->chid]) {
+		chan->chid = -1;
+		return -EBUSY;
+	}
+	disp->chan[chan->chid] = chan;
+	return 0;
+}
+
+int
+nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_object **pobject)
+{
+	struct nv50_disp_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
+
+	return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
new file mode 100644
index 0000000..aee3748
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -0,0 +1,127 @@
+#ifndef __NV50_DISP_CHAN_H__
+#define __NV50_DISP_CHAN_H__
+#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include "nv50.h"
+
+struct nv50_disp_chan {
+	const struct nv50_disp_chan_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	struct nv50_disp_root *root;
+	int chid;
+	int head;
+
+	struct nvkm_object object;
+};
+
+struct nv50_disp_chan_func {
+	void *(*dtor)(struct nv50_disp_chan *);
+	int (*init)(struct nv50_disp_chan *);
+	void (*fini)(struct nv50_disp_chan *);
+	int (*child_get)(struct nv50_disp_chan *, int index,
+			 struct nvkm_oclass *);
+	int (*child_new)(struct nv50_disp_chan *, const struct nvkm_oclass *,
+			 void *data, u32 size, struct nvkm_object **);
+};
+
+int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head,
+			const struct nvkm_oclass *, struct nv50_disp_chan *);
+int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head,
+			const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
+extern const struct nv50_disp_chan_func gf119_disp_pioc_func;
+
+extern const struct nvkm_event_func nv50_disp_chan_uevent;
+int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
+				struct nvkm_notify *);
+void nv50_disp_chan_uevent_send(struct nv50_disp *, int);
+
+extern const struct nvkm_event_func gf119_disp_chan_uevent;
+
+struct nv50_disp_mthd_list {
+	u32 mthd;
+	u32 addr;
+	struct {
+		u32 mthd;
+		u32 addr;
+		const char *name;
+	} data[];
+};
+
+struct nv50_disp_chan_mthd {
+	const char *name;
+	u32 addr;
+	s32 prev;
+	struct {
+		const char *name;
+		int nr;
+		const struct nv50_disp_mthd_list *mthd;
+	} data[];
+};
+
+void nv50_disp_chan_mthd(struct nv50_disp_chan *, int debug);
+
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
+extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
+
+extern const struct nv50_disp_chan_mthd g84_disp_core_chan_mthd;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
+extern const struct nv50_disp_chan_mthd g84_disp_base_chan_mthd;
+extern const struct nv50_disp_chan_mthd g84_disp_ovly_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd g94_disp_core_chan_mthd;
+
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_base;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_dac;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_sor;
+extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
+extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
+
+extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+
+struct nv50_disp_pioc_oclass {
+	int (*ctor)(const struct nv50_disp_chan_func *,
+		    const struct nv50_disp_chan_mthd *,
+		    struct nv50_disp_root *, int chid,
+		    const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const struct nv50_disp_chan_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	int chid;
+};
+
+extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass nv50_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass g84_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass g84_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gt215_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gt215_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gf119_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass;
+
+extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
+
+
+int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *, void *data, u32 size,
+		       struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index cf03e02..c6910d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -33,15 +33,15 @@
 nvkm_connector_hpd(struct nvkm_notify *notify)
 {
 	struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
-	struct nvkm_disp *disp = nvkm_disp(conn);
-	struct nvkm_gpio *gpio = nvkm_gpio(conn);
+	struct nvkm_disp *disp = conn->disp;
+	struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
 	const struct nvkm_gpio_ntfy_rep *line = notify->data;
 	struct nvif_notify_conn_rep_v0 rep;
 	int index = conn->index;
 
-	DBG("HPD: %d\n", line->mask);
+	CONN_DBG(conn, "HPD: %d", line->mask);
 
-	if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
+	if (!nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
 		rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
 	else
 		rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
@@ -51,78 +51,58 @@
 	return NVKM_NOTIFY_KEEP;
 }
 
-int
-_nvkm_connector_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_connector_fini(struct nvkm_connector *conn)
 {
-	struct nvkm_connector *conn = (void *)object;
 	nvkm_notify_put(&conn->hpd);
-	return nvkm_object_fini(&conn->base, suspend);
-}
-
-int
-_nvkm_connector_init(struct nvkm_object *object)
-{
-	struct nvkm_connector *conn = (void *)object;
-	int ret = nvkm_object_init(&conn->base);
-	if (ret == 0)
-		nvkm_notify_get(&conn->hpd);
-	return ret;
 }
 
 void
-_nvkm_connector_dtor(struct nvkm_object *object)
+nvkm_connector_init(struct nvkm_connector *conn)
 {
-	struct nvkm_connector *conn = (void *)object;
-	nvkm_notify_fini(&conn->hpd);
-	nvkm_object_destroy(&conn->base);
+	nvkm_notify_get(&conn->hpd);
 }
 
-int
-nvkm_connector_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass,
-		       struct nvbios_connE *info, int index,
-		       int length, void **pobject)
+void
+nvkm_connector_del(struct nvkm_connector **pconn)
+{
+	struct nvkm_connector *conn = *pconn;
+	if (conn) {
+		nvkm_notify_fini(&conn->hpd);
+		kfree(*pconn);
+		*pconn = NULL;
+	}
+}
+
+static void
+nvkm_connector_ctor(struct nvkm_disp *disp, int index,
+		    struct nvbios_connE *info, struct nvkm_connector *conn)
 {
 	static const u8 hpd[] = { 0x07, 0x08, 0x51, 0x52, 0x5e, 0x5f, 0x60 };
-	struct nvkm_disp *disp = nvkm_disp(parent);
-	struct nvkm_gpio *gpio = nvkm_gpio(parent);
-	struct nvkm_connector *conn;
-	struct nvkm_output *outp;
+	struct nvkm_gpio *gpio = disp->engine.subdev.device->gpio;
 	struct dcb_gpio_func func;
 	int ret;
 
-	list_for_each_entry(outp, &disp->outp, head) {
-		if (outp->conn && outp->conn->index == index) {
-			atomic_inc(&nv_object(outp->conn)->refcount);
-			*pobject = outp->conn;
-			return 1;
-		}
-	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	conn = *pobject;
-	if (ret)
-		return ret;
-
-	conn->info = *info;
+	conn->disp = disp;
 	conn->index = index;
+	conn->info = *info;
 
-	DBG("type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x\n",
-	    info->type, info->location, info->hpd, info->dp,
-	    info->di, info->sr, info->lcdid);
+	CONN_DBG(conn, "type %02x loc %d hpd %02x dp %x di %x sr %x lcdid %x",
+		 info->type, info->location, info->hpd, info->dp,
+		 info->di, info->sr, info->lcdid);
 
 	if ((info->hpd = ffs(info->hpd))) {
 		if (--info->hpd >= ARRAY_SIZE(hpd)) {
-			ERR("hpd %02x unknown\n", info->hpd);
-			return 0;
+			CONN_ERR(conn, "hpd %02x unknown", info->hpd);
+			return;
 		}
 		info->hpd = hpd[info->hpd];
 
-		ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
+		ret = nvkm_gpio_find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
 		if (ret) {
-			ERR("func %02x lookup failed, %d\n", info->hpd, ret);
-			return 0;
+			CONN_ERR(conn, "func %02x lookup failed, %d",
+				 info->hpd, ret);
+			return;
 		}
 
 		ret = nvkm_notify_init(NULL, &gpio->event, nvkm_connector_hpd,
@@ -134,41 +114,19 @@
 				       sizeof(struct nvkm_gpio_ntfy_rep),
 				       &conn->hpd);
 		if (ret) {
-			ERR("func %02x failed, %d\n", info->hpd, ret);
+			CONN_ERR(conn, "func %02x failed, %d", info->hpd, ret);
 		} else {
-			DBG("func %02x (HPD)\n", info->hpd);
+			CONN_DBG(conn, "func %02x (HPD)", info->hpd);
 		}
 	}
-
-	return 0;
 }
 
 int
-_nvkm_connector_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *info, u32 index,
-		     struct nvkm_object **pobject)
+nvkm_connector_new(struct nvkm_disp *disp, int index,
+		   struct nvbios_connE *info, struct nvkm_connector **pconn)
 {
-	struct nvkm_connector *conn;
-	int ret;
-
-	ret = nvkm_connector_create(parent, engine, oclass, info, index, &conn);
-	*pobject = nv_object(conn);
-	if (ret)
-		return ret;
-
+	if (!(*pconn = kzalloc(sizeof(**pconn), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_connector_ctor(disp, index, info, *pconn);
 	return 0;
 }
-
-struct nvkm_oclass *
-nvkm_connector_oclass = &(struct nvkm_connector_impl) {
-	.base = {
-		.handle = 0,
-		.ofuncs = &(struct nvkm_ofuncs) {
-			.ctor = _nvkm_connector_ctor,
-			.dtor = _nvkm_connector_dtor,
-			.init = _nvkm_connector_init,
-			.fini = _nvkm_connector_fini,
-		},
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index c87a061..ed32fe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,58 +1,33 @@
 #ifndef __NVKM_DISP_CONN_H__
 #define __NVKM_DISP_CONN_H__
-#include <core/object.h>
-#include <core/notify.h>
+#include <engine/disp.h>
 
+#include <core/notify.h>
 #include <subdev/bios.h>
 #include <subdev/bios/conn.h>
 
 struct nvkm_connector {
-	struct nvkm_object base;
-	struct list_head head;
-
-	struct nvbios_connE info;
+	struct nvkm_disp *disp;
 	int index;
+	struct nvbios_connE info;
 
 	struct nvkm_notify hpd;
+
+	struct list_head head;
 };
 
-#define nvkm_connector_create(p,e,c,b,i,d)                                     \
-	nvkm_connector_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_connector_destroy(d) ({                                           \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_dtor(nv_object(disp));                                 \
-})
-#define nvkm_connector_init(d) ({                                              \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_init(nv_object(disp));                                 \
-})
-#define nvkm_connector_fini(d,s) ({                                            \
-	struct nvkm_connector *disp = (d);                                     \
-	_nvkm_connector_fini(nv_object(disp), (s));                            \
-})
+int  nvkm_connector_new(struct nvkm_disp *, int index, struct nvbios_connE *,
+			struct nvkm_connector **);
+void nvkm_connector_del(struct nvkm_connector **);
+void nvkm_connector_init(struct nvkm_connector *);
+void nvkm_connector_fini(struct nvkm_connector *);
 
-int nvkm_connector_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, struct nvbios_connE *,
-			   int, int, void **);
-
-int  _nvkm_connector_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_connector_dtor(struct nvkm_object *);
-int  _nvkm_connector_init(struct nvkm_object *);
-int  _nvkm_connector_fini(struct nvkm_object *, bool);
-
-struct nvkm_connector_impl {
-	struct nvkm_oclass base;
-};
-
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_connector *_conn = (void *)conn;                           \
-	nv_##l(_conn, "%02x:%02x%02x: "f, _conn->index,                        \
-	       _conn->info.location, _conn->info.type, ##a);                   \
+#define CONN_MSG(c,l,f,a...) do {                                              \
+	struct nvkm_connector *_conn = (c);                                    \
+	nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n",     \
+		 _conn->index, _conn->info.location, _conn->info.type, ##a);   \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define CONN_ERR(c,f,a...) CONN_MSG((c), error, f, ##a)
+#define CONN_DBG(c,f,a...) CONN_MSG((c), debug, f, ##a)
+#define CONN_TRACE(c,f,a...) CONN_MSG((c), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
new file mode 100644
index 0000000..1baa5c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg84.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_dac = {
+	.mthd = 0x0080,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0400, 0x610b58 },
+		{ 0x0404, 0x610bdc },
+		{ 0x0420, 0x610bc4 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+g84_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000540,
+	.data = {
+		{ 0x0800, 0x610ad8 },
+		{ 0x0804, 0x610ad0 },
+		{ 0x0808, 0x610a48 },
+		{ 0x080c, 0x610a78 },
+		{ 0x0810, 0x610ac0 },
+		{ 0x0814, 0x610af8 },
+		{ 0x0818, 0x610b00 },
+		{ 0x081c, 0x610ae8 },
+		{ 0x0820, 0x610af0 },
+		{ 0x0824, 0x610b08 },
+		{ 0x0828, 0x610b10 },
+		{ 0x082c, 0x610a68 },
+		{ 0x0830, 0x610a60 },
+		{ 0x0834, 0x000000 },
+		{ 0x0838, 0x610a40 },
+		{ 0x0840, 0x610a24 },
+		{ 0x0844, 0x610a2c },
+		{ 0x0848, 0x610aa8 },
+		{ 0x084c, 0x610ab0 },
+		{ 0x085c, 0x610c5c },
+		{ 0x0860, 0x610a84 },
+		{ 0x0864, 0x610a90 },
+		{ 0x0868, 0x610b18 },
+		{ 0x086c, 0x610b20 },
+		{ 0x0870, 0x610ac8 },
+		{ 0x0874, 0x610a38 },
+		{ 0x0878, 0x610c50 },
+		{ 0x0880, 0x610a58 },
+		{ 0x0884, 0x610a9c },
+		{ 0x089c, 0x610c68 },
+		{ 0x08a0, 0x610a70 },
+		{ 0x08a4, 0x610a50 },
+		{ 0x08a8, 0x610ae0 },
+		{ 0x08c0, 0x610b28 },
+		{ 0x08c4, 0x610b30 },
+		{ 0x08c8, 0x610b40 },
+		{ 0x08d4, 0x610b38 },
+		{ 0x08d8, 0x610b48 },
+		{ 0x08dc, 0x610b50 },
+		{ 0x0900, 0x610a18 },
+		{ 0x0904, 0x610ab8 },
+		{ 0x0910, 0x610c70 },
+		{ 0x0914, 0x610c78 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &g84_disp_core_mthd_dac  },
+		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &g84_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_core_oclass = {
+	.base.oclass = G82_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g84_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
new file mode 100644
index 0000000..019379a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coreg94.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_mthd_list
+g94_disp_core_mthd_sor = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0600, 0x610794 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g94_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &g84_disp_core_mthd_dac  },
+		{    "SOR", 4, &g94_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &g84_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g94_disp_core_oclass = {
+	.base.oclass = GT206_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g94_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
new file mode 100644
index 0000000..6b1dc70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x660080 },
+		{ 0x0084, 0x660084 },
+		{ 0x0088, 0x660088 },
+		{ 0x008c, 0x000000 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_dac = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0180, 0x660180 },
+		{ 0x0184, 0x660184 },
+		{ 0x0188, 0x660188 },
+		{ 0x0190, 0x660190 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_sor = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0200, 0x660200 },
+		{ 0x0204, 0x660204 },
+		{ 0x0208, 0x660208 },
+		{ 0x0210, 0x660210 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_pior = {
+	.mthd = 0x0020,
+	.addr = 0x000020,
+	.data = {
+		{ 0x0300, 0x660300 },
+		{ 0x0304, 0x660304 },
+		{ 0x0308, 0x660308 },
+		{ 0x0310, 0x660310 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+gf119_disp_core_mthd_head = {
+	.mthd = 0x0300,
+	.addr = 0x000300,
+	.data = {
+		{ 0x0400, 0x660400 },
+		{ 0x0404, 0x660404 },
+		{ 0x0408, 0x660408 },
+		{ 0x040c, 0x66040c },
+		{ 0x0410, 0x660410 },
+		{ 0x0414, 0x660414 },
+		{ 0x0418, 0x660418 },
+		{ 0x041c, 0x66041c },
+		{ 0x0420, 0x660420 },
+		{ 0x0424, 0x660424 },
+		{ 0x0428, 0x660428 },
+		{ 0x042c, 0x66042c },
+		{ 0x0430, 0x660430 },
+		{ 0x0434, 0x660434 },
+		{ 0x0438, 0x660438 },
+		{ 0x0440, 0x660440 },
+		{ 0x0444, 0x660444 },
+		{ 0x0448, 0x660448 },
+		{ 0x044c, 0x66044c },
+		{ 0x0450, 0x660450 },
+		{ 0x0454, 0x660454 },
+		{ 0x0458, 0x660458 },
+		{ 0x045c, 0x66045c },
+		{ 0x0460, 0x660460 },
+		{ 0x0468, 0x660468 },
+		{ 0x046c, 0x66046c },
+		{ 0x0470, 0x660470 },
+		{ 0x0474, 0x660474 },
+		{ 0x0480, 0x660480 },
+		{ 0x0484, 0x660484 },
+		{ 0x048c, 0x66048c },
+		{ 0x0490, 0x660490 },
+		{ 0x0494, 0x660494 },
+		{ 0x0498, 0x660498 },
+		{ 0x04b0, 0x6604b0 },
+		{ 0x04b8, 0x6604b8 },
+		{ 0x04bc, 0x6604bc },
+		{ 0x04c0, 0x6604c0 },
+		{ 0x04c4, 0x6604c4 },
+		{ 0x04c8, 0x6604c8 },
+		{ 0x04d0, 0x6604d0 },
+		{ 0x04d4, 0x6604d4 },
+		{ 0x04e0, 0x6604e0 },
+		{ 0x04e4, 0x6604e4 },
+		{ 0x04e8, 0x6604e8 },
+		{ 0x04ec, 0x6604ec },
+		{ 0x04f0, 0x6604f0 },
+		{ 0x04f4, 0x6604f4 },
+		{ 0x04f8, 0x6604f8 },
+		{ 0x04fc, 0x6604fc },
+		{ 0x0500, 0x660500 },
+		{ 0x0504, 0x660504 },
+		{ 0x0508, 0x660508 },
+		{ 0x050c, 0x66050c },
+		{ 0x0510, 0x660510 },
+		{ 0x0514, 0x660514 },
+		{ 0x0518, 0x660518 },
+		{ 0x051c, 0x66051c },
+		{ 0x052c, 0x66052c },
+		{ 0x0530, 0x660530 },
+		{ 0x054c, 0x66054c },
+		{ 0x0550, 0x660550 },
+		{ 0x0554, 0x660554 },
+		{ 0x0558, 0x660558 },
+		{ 0x055c, 0x66055c },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_core_mthd_base },
+		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
+		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
+		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
+		{   "HEAD", 4, &gf119_disp_core_mthd_head },
+		{}
+	}
+};
+
+static void
+gf119_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
+	nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core fini: %08x\n",
+			   nvkm_rd32(device, 0x610490));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000000);
+}
+
+static int
+gf119_disp_core_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610494, chan->push);
+	nvkm_wr32(device, 0x610498, 0x00010000);
+	nvkm_wr32(device, 0x61049c, 0x00000001);
+	nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000, 0x00000000);
+	nvkm_wr32(device, 0x610490, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core init: %08x\n",
+			   nvkm_rd32(device, 0x610490));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_core_func = {
+	.init = gf119_disp_core_init,
+	.fini = gf119_disp_core_fini,
+	.bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_core_oclass = {
+	.base.oclass = GF110_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gf119_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
new file mode 100644
index 0000000..088ab22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk104.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_core_mthd_head = {
+	.mthd = 0x0300,
+	.addr = 0x000300,
+	.data = {
+		{ 0x0400, 0x660400 },
+		{ 0x0404, 0x660404 },
+		{ 0x0408, 0x660408 },
+		{ 0x040c, 0x66040c },
+		{ 0x0410, 0x660410 },
+		{ 0x0414, 0x660414 },
+		{ 0x0418, 0x660418 },
+		{ 0x041c, 0x66041c },
+		{ 0x0420, 0x660420 },
+		{ 0x0424, 0x660424 },
+		{ 0x0428, 0x660428 },
+		{ 0x042c, 0x66042c },
+		{ 0x0430, 0x660430 },
+		{ 0x0434, 0x660434 },
+		{ 0x0438, 0x660438 },
+		{ 0x0440, 0x660440 },
+		{ 0x0444, 0x660444 },
+		{ 0x0448, 0x660448 },
+		{ 0x044c, 0x66044c },
+		{ 0x0450, 0x660450 },
+		{ 0x0454, 0x660454 },
+		{ 0x0458, 0x660458 },
+		{ 0x045c, 0x66045c },
+		{ 0x0460, 0x660460 },
+		{ 0x0468, 0x660468 },
+		{ 0x046c, 0x66046c },
+		{ 0x0470, 0x660470 },
+		{ 0x0474, 0x660474 },
+		{ 0x047c, 0x66047c },
+		{ 0x0480, 0x660480 },
+		{ 0x0484, 0x660484 },
+		{ 0x0488, 0x660488 },
+		{ 0x048c, 0x66048c },
+		{ 0x0490, 0x660490 },
+		{ 0x0494, 0x660494 },
+		{ 0x0498, 0x660498 },
+		{ 0x04a0, 0x6604a0 },
+		{ 0x04b0, 0x6604b0 },
+		{ 0x04b8, 0x6604b8 },
+		{ 0x04bc, 0x6604bc },
+		{ 0x04c0, 0x6604c0 },
+		{ 0x04c4, 0x6604c4 },
+		{ 0x04c8, 0x6604c8 },
+		{ 0x04d0, 0x6604d0 },
+		{ 0x04d4, 0x6604d4 },
+		{ 0x04e0, 0x6604e0 },
+		{ 0x04e4, 0x6604e4 },
+		{ 0x04e8, 0x6604e8 },
+		{ 0x04ec, 0x6604ec },
+		{ 0x04f0, 0x6604f0 },
+		{ 0x04f4, 0x6604f4 },
+		{ 0x04f8, 0x6604f8 },
+		{ 0x04fc, 0x6604fc },
+		{ 0x0500, 0x660500 },
+		{ 0x0504, 0x660504 },
+		{ 0x0508, 0x660508 },
+		{ 0x050c, 0x66050c },
+		{ 0x0510, 0x660510 },
+		{ 0x0514, 0x660514 },
+		{ 0x0518, 0x660518 },
+		{ 0x051c, 0x66051c },
+		{ 0x0520, 0x660520 },
+		{ 0x0524, 0x660524 },
+		{ 0x052c, 0x66052c },
+		{ 0x0530, 0x660530 },
+		{ 0x054c, 0x66054c },
+		{ 0x0550, 0x660550 },
+		{ 0x0554, 0x660554 },
+		{ 0x0558, 0x660558 },
+		{ 0x055c, 0x66055c },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+gk104_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_core_mthd_base },
+		{    "DAC", 3, &gf119_disp_core_mthd_dac  },
+		{    "SOR", 8, &gf119_disp_core_mthd_sor  },
+		{   "PIOR", 4, &gf119_disp_core_mthd_pior },
+		{   "HEAD", 4, &gk104_disp_core_mthd_head },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_core_oclass = {
+	.base.oclass = GK104_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
new file mode 100644
index 0000000..df0f45c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregk110.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gk110_disp_core_oclass = {
+	.base.oclass = GK110_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
new file mode 100644
index 0000000..9e27f8f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm107.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm107_disp_core_oclass = {
+	.base.oclass = GM107_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
new file mode 100644
index 0000000..222f4a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregm204.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gm204_disp_core_oclass = {
+	.base.oclass = GM204_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &gf119_disp_core_func,
+	.mthd = &gk104_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
new file mode 100644
index 0000000..b234547
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt200.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_core_oclass = {
+	.base.oclass = GT200_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g84_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
new file mode 100644
index 0000000..8f5ba20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_core_oclass = {
+	.base.oclass = GT214_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &g94_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
new file mode 100644
index 0000000..db4a9b3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/corenv50.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_core_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_core_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	u64 push;
+	int ret;
+
+	nvif_ioctl(parent, "create disp core channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp core channel dma vers %d "
+				   "pushbuf %016llx\n",
+			   args->v0.version, args->v0.pushbuf);
+		push = args->v0.pushbuf;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid, 0,
+				   push, oclass, pobject);
+}
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x610bb8 },
+		{ 0x0088, 0x610b9c },
+		{ 0x008c, 0x000000 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_dac = {
+	.mthd = 0x0080,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0400, 0x610b58 },
+		{ 0x0404, 0x610bdc },
+		{ 0x0420, 0x610828 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_sor = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0600, 0x610b70 },
+		{}
+	}
+};
+
+const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_pior = {
+	.mthd = 0x0040,
+	.addr = 0x000008,
+	.data = {
+		{ 0x0700, 0x610b80 },
+		{}
+	}
+};
+
+static const struct nv50_disp_mthd_list
+nv50_disp_core_mthd_head = {
+	.mthd = 0x0400,
+	.addr = 0x000540,
+	.data = {
+		{ 0x0800, 0x610ad8 },
+		{ 0x0804, 0x610ad0 },
+		{ 0x0808, 0x610a48 },
+		{ 0x080c, 0x610a78 },
+		{ 0x0810, 0x610ac0 },
+		{ 0x0814, 0x610af8 },
+		{ 0x0818, 0x610b00 },
+		{ 0x081c, 0x610ae8 },
+		{ 0x0820, 0x610af0 },
+		{ 0x0824, 0x610b08 },
+		{ 0x0828, 0x610b10 },
+		{ 0x082c, 0x610a68 },
+		{ 0x0830, 0x610a60 },
+		{ 0x0834, 0x000000 },
+		{ 0x0838, 0x610a40 },
+		{ 0x0840, 0x610a24 },
+		{ 0x0844, 0x610a2c },
+		{ 0x0848, 0x610aa8 },
+		{ 0x084c, 0x610ab0 },
+		{ 0x0860, 0x610a84 },
+		{ 0x0864, 0x610a90 },
+		{ 0x0868, 0x610b18 },
+		{ 0x086c, 0x610b20 },
+		{ 0x0870, 0x610ac8 },
+		{ 0x0874, 0x610a38 },
+		{ 0x0880, 0x610a58 },
+		{ 0x0884, 0x610a9c },
+		{ 0x08a0, 0x610a70 },
+		{ 0x08a4, 0x610a50 },
+		{ 0x08a8, 0x610ae0 },
+		{ 0x08c0, 0x610b28 },
+		{ 0x08c4, 0x610b30 },
+		{ 0x08c8, 0x610b40 },
+		{ 0x08d4, 0x610b38 },
+		{ 0x08d8, 0x610b48 },
+		{ 0x08dc, 0x610b50 },
+		{ 0x0900, 0x610a18 },
+		{ 0x0904, 0x610ab8 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_core_chan_mthd = {
+	.name = "Core",
+	.addr = 0x000000,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_core_mthd_base },
+		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
+		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
+		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
+		{   "HEAD", 2, &nv50_disp_core_mthd_head },
+		{}
+	}
+};
+
+static void
+nv50_disp_core_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610200, 0x00000010, 0x00000000);
+	nvkm_mask(device, 0x610200, 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core fini: %08x\n",
+			   nvkm_rd32(device, 0x610200));
+	}
+
+	/* disable error reporting and completion notifications */
+	nvkm_mask(device, 0x610028, 0x00010001, 0x00000000);
+}
+
+static int
+nv50_disp_core_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x610028, 0x00010000, 0x00010000);
+
+	/* attempt to unstick channel from some unknown state */
+	if ((nvkm_rd32(device, 0x610200) & 0x009f0000) == 0x00020000)
+		nvkm_mask(device, 0x610200, 0x00800000, 0x00800000);
+	if ((nvkm_rd32(device, 0x610200) & 0x003f0000) == 0x00030000)
+		nvkm_mask(device, 0x610200, 0x00600000, 0x00600000);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610204, chan->push);
+	nvkm_wr32(device, 0x610208, 0x00010000);
+	nvkm_wr32(device, 0x61020c, 0x00000000);
+	nvkm_mask(device, 0x610200, 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000, 0x00000000);
+	nvkm_wr32(device, 0x610200, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "core init: %08x\n",
+			   nvkm_rd32(device, 0x610200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_core_func = {
+	.init = nv50_disp_core_init,
+	.fini = nv50_disp_core_fini,
+	.bind = nv50_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_core_oclass = {
+	.base.oclass = NV50_DISP_CORE_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_core_new,
+	.func = &nv50_disp_core_func,
+	.mthd = &nv50_disp_core_chan_mthd,
+	.chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
new file mode 100644
index 0000000..dd99fc7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_curs_oclass = {
+	.base.oclass = G82_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
new file mode 100644
index 0000000..2a1574e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_curs_oclass = {
+	.base.oclass = GF110_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
new file mode 100644
index 0000000..28e8f06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_curs_oclass = {
+	.base.oclass = GK104_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 13,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
new file mode 100644
index 0000000..d8a4b9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_curs_oclass = {
+	.base.oclass = GT214_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
new file mode 100644
index 0000000..225858e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_cursor_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+
+	nvif_ioctl(parent, "create disp cursor size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp cursor vers %d head %d\n",
+			   args->v0.version, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+				   head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_curs_oclass = {
+	.base.oclass = NV50_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 7,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
index 0f7d1ec..9bfa9e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c
@@ -33,6 +33,7 @@
 int
 nv50_dac_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 doff = outp->or * 0x800;
 	union {
 		struct nv50_disp_dac_pwr_v0 v0;
@@ -40,12 +41,12 @@
 	u32 stat;
 	int ret;
 
-	nv_ioctl(object, "disp dac pwr size %d\n", size);
+	nvif_ioctl(object, "disp dac pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
-				 "vsync %d hsync %d\n",
-			 args->v0.version, args->v0.state, args->v0.data,
-			 args->v0.vsync, args->v0.hsync);
+		nvif_ioctl(object, "disp dac pwr vers %d state %d data %d "
+				   "vsync %d hsync %d\n",
+			   args->v0.version, args->v0.state, args->v0.data,
+			   args->v0.vsync, args->v0.hsync);
 		stat  = 0x00000040 * !args->v0.state;
 		stat |= 0x00000010 * !args->v0.data;
 		stat |= 0x00000004 * !args->v0.vsync;
@@ -53,15 +54,23 @@
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
 int
 nv50_dac_sense(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	union {
 		struct nv50_disp_dac_load_v0 v0;
 	} *args = data;
@@ -69,31 +78,49 @@
 	u32 loadval;
 	int ret;
 
-	nv_ioctl(object, "disp dac load size %d\n", size);
+	nvif_ioctl(object, "disp dac load size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp dac load vers %d data %08x\n",
-			 args->v0.version, args->v0.data);
+		nvif_ioctl(object, "disp dac load vers %d data %08x\n",
+			   args->v0.version, args->v0.data);
 		if (args->v0.data & 0xfff00000)
 			return -EINVAL;
 		loadval = args->v0.data;
 	} else
 		return ret;
 
-	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80150000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 
-	nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+	nvkm_wr32(device, 0x61a00c + doff, 0x00100000 | loadval);
 	mdelay(9);
 	udelay(500);
-	loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+	loadval = nvkm_mask(device, 0x61a00c + doff, 0xffffffff, 0x00000000);
 
-	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
-	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61a004 + doff, 0x807f0000, 0x80550000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61a004 + doff) & 0x80000000))
+			break;
+	);
 
-	nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
+	nvkm_debug(subdev, "DAC%d sense: %08x\n", outp->or, loadval);
 	if (!(loadval & 0x80000000))
 		return -ETIMEDOUT;
 
 	args->v0.load = (loadval & 0x38000000) >> 27;
 	return 0;
 }
+
+static const struct nvkm_output_func
+nv50_dac_output_func = {
+};
+
+int
+nv50_dac_output_new(struct nvkm_disp *disp, int index,
+		    struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_dac_output_func, disp,
+				index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
new file mode 100644
index 0000000..876b145
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+int
+gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
+		     struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->base.root->ramht, object,
+				 chan->base.chid, -9, handle,
+				 chan->base.chid << 27 | 0x00000001);
+}
+
+static void
+gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+gf119_disp_dmac_func = {
+	.init = gf119_disp_dmac_init,
+	.fini = gf119_disp_dmac_fini,
+	.bind = gf119_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
new file mode 100644
index 0000000..9c6645a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+#include <core/oproxy.h>
+#include <core/ramht.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/dma.h>
+
+struct nv50_disp_dmac_object {
+	struct nvkm_oproxy oproxy;
+	struct nv50_disp_root *root;
+	int hash;
+};
+
+static void
+nv50_disp_dmac_child_del_(struct nvkm_oproxy *base)
+{
+	struct nv50_disp_dmac_object *object =
+		container_of(base, typeof(*object), oproxy);
+	nvkm_ramht_remove(object->root->ramht, object->hash);
+}
+
+static const struct nvkm_oproxy_func
+nv50_disp_dmac_child_func_ = {
+	.dtor[0] = nv50_disp_dmac_child_del_,
+};
+
+static int
+nv50_disp_dmac_child_new_(struct nv50_disp_chan *base,
+			  const struct nvkm_oclass *oclass,
+			  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	struct nv50_disp_root *root = chan->base.root;
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *sclass = oclass->priv;
+	struct nv50_disp_dmac_object *object;
+	int ret;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nv50_disp_dmac_child_func_, oclass, &object->oproxy);
+	object->root = root;
+	*pobject = &object->oproxy.base;
+
+	ret = sclass->ctor(device, oclass, data, size, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	object->hash = chan->func->bind(chan, object->oproxy.object,
+					      oclass->handle);
+	if (object->hash < 0)
+		return object->hash;
+
+	return 0;
+}
+
+static int
+nv50_disp_dmac_child_get_(struct nv50_disp_chan *base, int index,
+			  struct nvkm_oclass *sclass)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const struct nvkm_device_oclass *oclass = NULL;
+
+	sclass->engine = nvkm_device_engine(device, NVKM_ENGINE_DMAOBJ);
+	if (sclass->engine && sclass->engine->func->base.sclass) {
+		sclass->engine->func->base.sclass(sclass, index, &oclass);
+		if (oclass) {
+			sclass->priv = oclass;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void
+nv50_disp_dmac_fini_(struct nv50_disp_chan *base)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	chan->func->fini(chan);
+}
+
+static int
+nv50_disp_dmac_init_(struct nv50_disp_chan *base)
+{
+	struct nv50_disp_dmac *chan = nv50_disp_dmac(base);
+	return chan->func->init(chan);
+}
+
+static void *
+nv50_disp_dmac_dtor_(struct nv50_disp_chan *base)
+{
+	return nv50_disp_dmac(base);
+}
+
+static const struct nv50_disp_chan_func
+nv50_disp_dmac_func_ = {
+	.dtor = nv50_disp_dmac_dtor_,
+	.init = nv50_disp_dmac_init_,
+	.fini = nv50_disp_dmac_fini_,
+	.child_get = nv50_disp_dmac_child_get_,
+	.child_new = nv50_disp_dmac_child_new_,
+};
+
+int
+nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
+		    const struct nv50_disp_chan_mthd *mthd,
+		    struct nv50_disp_root *root, int chid, int head, u64 push,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_object **pobject)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_dmaobj *dmaobj;
+	struct nv50_disp_dmac *chan;
+	int ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->func = func;
+
+	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
+				  chid, head, oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	dmaobj = nvkm_dma_search(device->dma, client, push);
+	if (!dmaobj)
+		return -ENOENT;
+
+	if (dmaobj->limit - dmaobj->start != 0xfff)
+		return -EINVAL;
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		chan->push = 0x00000001 | dmaobj->start >> 8;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		chan->push = 0x00000003 | dmaobj->start >> 8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+nv50_disp_dmac_bind(struct nv50_disp_dmac *chan,
+		    struct nvkm_object *object, u32 handle)
+{
+	return nvkm_ramht_insert(chan->base.root->ramht, object,
+				 chan->base.chid, -10, handle,
+				 chan->base.chid << 28 |
+				 chan->base.chid);
+}
+
+static void
+nv50_disp_dmac_fini(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* deactivate channel */
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notifications */
+	nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+}
+
+static int
+nv50_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+	struct nv50_disp *disp = chan->base.root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->base.chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+
+	/* initialise channel for dma command submission */
+	nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
+	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_dmac_func
+nv50_disp_dmac_func = {
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.bind = nv50_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
new file mode 100644
index 0000000..c748ca2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -0,0 +1,91 @@
+#ifndef __NV50_DISP_DMAC_H__
+#define __NV50_DISP_DMAC_H__
+#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
+#include "channv50.h"
+
+struct nv50_disp_dmac {
+	const struct nv50_disp_dmac_func *func;
+	struct nv50_disp_chan base;
+	u32 push;
+};
+
+struct nv50_disp_dmac_func {
+	int  (*init)(struct nv50_disp_dmac *);
+	void (*fini)(struct nv50_disp_dmac *);
+	int  (*bind)(struct nv50_disp_dmac *, struct nvkm_object *, u32 handle);
+};
+
+int nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *,
+			const struct nv50_disp_chan_mthd *,
+			struct nv50_disp_root *, int chid, int head, u64 push,
+			const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_func nv50_disp_dmac_func;
+int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func nv50_disp_core_func;
+
+extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
+extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+
+struct nv50_disp_dmac_oclass {
+	int (*ctor)(const struct nv50_disp_dmac_func *,
+		    const struct nv50_disp_chan_mthd *,
+		    struct nv50_disp_root *, int chid,
+		    const struct nvkm_oclass *, void *data, u32 size,
+		    struct nvkm_object **);
+	struct nvkm_sclass base;
+	const struct nv50_disp_dmac_func *func;
+	const struct nv50_disp_chan_mthd *mthd;
+	int chid;
+};
+
+int nv50_disp_core_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_base_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+int nv50_disp_ovly_new(const struct nv50_disp_dmac_func *,
+		       const struct nv50_disp_chan_mthd *,
+		       struct nv50_disp_root *, int chid,
+		       const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **);
+
+extern const struct nv50_disp_dmac_oclass nv50_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass nv50_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g84_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass g84_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass g94_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt200_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt200_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gt215_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gt215_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gf119_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gf119_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gk104_disp_ovly_oclass;
+
+extern const struct nv50_disp_dmac_oclass gk110_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gm204_disp_core_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 6834766..74e2f7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -48,12 +48,12 @@
 static int
 dp_set_link_config(struct dp_state *dp)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
+		.subdev = subdev,
 		.bios = bios,
 		.offset = 0x0000,
 		.outp = &outp->base.info,
@@ -64,33 +64,33 @@
 	u8 sink[2];
 	int ret;
 
-	DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+	OUTP_DBG(&outp->base, "%d lanes at %d KB/s", dp->link_nr, dp->link_bw);
 
 	/* set desired link configuration on the source */
 	if ((lnkcmp = dp->outp->info.lnkcmp)) {
 		if (outp->version < 0x30) {
-			while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+			while ((dp->link_bw / 10) < nvbios_rd16(bios, lnkcmp))
 				lnkcmp += 4;
-			init.offset = nv_ro16(bios, lnkcmp + 2);
+			init.offset = nvbios_rd16(bios, lnkcmp + 2);
 		} else {
-			while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+			while ((dp->link_bw / 27000) < nvbios_rd08(bios, lnkcmp))
 				lnkcmp += 3;
-			init.offset = nv_ro16(bios, lnkcmp + 1);
+			init.offset = nvbios_rd16(bios, lnkcmp + 1);
 		}
 
 		nvbios_exec(&init);
 	}
 
-	ret = impl->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
-			    outp->dpcd[DPCD_RC02] &
-				       DPCD_RC02_ENHANCED_FRAME_CAP);
+	ret = outp->func->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000,
+				  outp->dpcd[DPCD_RC02] &
+					     DPCD_RC02_ENHANCED_FRAME_CAP);
 	if (ret) {
 		if (ret < 0)
-			ERR("lnk_ctl failed with %d\n", ret);
+			OUTP_ERR(&outp->base, "lnk_ctl failed with %d", ret);
 		return ret;
 	}
 
-	impl->lnk_pwr(outp, dp->link_nr);
+	outp->func->lnk_pwr(outp, dp->link_nr);
 
 	/* set desired link configuration on the sink */
 	sink[0] = dp->link_bw / 27000;
@@ -98,29 +98,27 @@
 	if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
 
-	return nv_wraux(outp->base.edid, DPCD_LC00_LINK_BW_SET, sink, 2);
+	return nvkm_wraux(outp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
 }
 
 static void
 dp_set_training_pattern(struct dp_state *dp, u8 pattern)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
 	u8 sink_tp;
 
-	DBG("training pattern %d\n", pattern);
-	impl->pattern(outp, pattern);
+	OUTP_DBG(&outp->base, "training pattern %d", pattern);
+	outp->func->pattern(outp, pattern);
 
-	nv_rdaux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+	nvkm_rdaux(outp->aux, DPCD_LC02, &sink_tp, 1);
 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
 	sink_tp |= pattern;
-	nv_wraux(outp->base.edid, DPCD_LC02, &sink_tp, 1);
+	nvkm_wraux(outp->aux, DPCD_LC02, &sink_tp, 1);
 }
 
 static int
 dp_link_train_commit(struct dp_state *dp, bool pc)
 {
-	struct nvkm_output_dp_impl *impl = (void *)nv_oclass(dp->outp);
 	struct nvkm_output_dp *outp = dp->outp;
 	int ret, i;
 
@@ -146,16 +144,17 @@
 		dp->conf[i] = (lpre << 3) | lvsw;
 		dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
 
-		DBG("config lane %d %02x %02x\n", i, dp->conf[i], lpc2);
-		impl->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
+		OUTP_DBG(&outp->base, "config lane %d %02x %02x",
+			 i, dp->conf[i], lpc2);
+		outp->func->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3);
 	}
 
-	ret = nv_wraux(outp->base.edid, DPCD_LC03(0), dp->conf, 4);
+	ret = nvkm_wraux(outp->aux, DPCD_LC03(0), dp->conf, 4);
 	if (ret)
 		return ret;
 
 	if (pc) {
-		ret = nv_wraux(outp->base.edid, DPCD_LC0F, dp->pc2conf, 2);
+		ret = nvkm_wraux(outp->aux, DPCD_LC0F, dp->pc2conf, 2);
 		if (ret)
 			return ret;
 	}
@@ -174,17 +173,18 @@
 	else
 		udelay(delay);
 
-	ret = nv_rdaux(outp->base.edid, DPCD_LS02, dp->stat, 6);
+	ret = nvkm_rdaux(outp->aux, DPCD_LS02, dp->stat, 6);
 	if (ret)
 		return ret;
 
 	if (pc) {
-		ret = nv_rdaux(outp->base.edid, DPCD_LS0C, &dp->pc2stat, 1);
+		ret = nvkm_rdaux(outp->aux, DPCD_LS0C, &dp->pc2stat, 1);
 		if (ret)
 			dp->pc2stat = 0x00;
-		DBG("status %6ph pc2 %02x\n", dp->stat, dp->pc2stat);
+		OUTP_DBG(&outp->base, "status %6ph pc2 %02x",
+			 dp->stat, dp->pc2stat);
 	} else {
-		DBG("status %6ph\n", dp->stat);
+		OUTP_DBG(&outp->base, "status %6ph", dp->stat);
 	}
 
 	return 0;
@@ -260,11 +260,11 @@
 dp_link_train_init(struct dp_state *dp, bool spread)
 {
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
-		.bios = bios,
+		.subdev = subdev,
+		.bios = subdev->device->bios,
 		.outp = &outp->base.info,
 		.crtc = -1,
 		.execute = 1,
@@ -286,11 +286,11 @@
 dp_link_train_fini(struct dp_state *dp)
 {
 	struct nvkm_output_dp *outp = dp->outp;
-	struct nvkm_disp *disp = nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(disp);
+	struct nvkm_disp *disp = outp->base.disp;
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
 	struct nvbios_init init = {
-		.subdev = nv_subdev(disp),
-		.bios = bios,
+		.subdev = subdev,
+		.bios = subdev->device->bios,
 		.outp = &outp->base.info,
 		.crtc = -1,
 		.execute = 1,
@@ -322,7 +322,7 @@
 nvkm_dp_train(struct work_struct *w)
 {
 	struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nv50_disp *disp = nv50_disp(outp->base.disp);
 	const struct dp_rates *cfg = nvkm_dp_rates;
 	struct dp_state _dp = {
 		.outp = outp,
@@ -330,11 +330,11 @@
 	u32 datarate = 0;
 	int ret;
 
-	if (!outp->base.info.location && priv->sor.magic)
-		priv->sor.magic(&outp->base);
+	if (!outp->base.info.location && disp->func->sor.magic)
+		disp->func->sor.magic(&outp->base);
 
 	/* bring capabilities within encoder limits */
-	if (nv_mclass(priv) < GF110_DISP)
+	if (disp->base.engine.subdev.device->chipset < 0xd0)
 		outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
 	if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
 		outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -386,12 +386,12 @@
 	/* finish link training and execute post-train script from vbios */
 	dp_set_training_pattern(dp, 0);
 	if (ret < 0)
-		ERR("link training failed\n");
+		OUTP_ERR(&outp->base, "link training failed");
 
 	dp_link_train_fini(dp);
 
 	/* signal completion and enable link interrupt handling */
-	DBG("training complete\n");
+	OUTP_DBG(&outp->base, "training complete");
 	atomic_set(&outp->lt.done, 1);
 	wake_up(&outp->lt.wait);
 	nvkm_notify_get(&outp->irq);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index a0dcf53..3e3e592 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -22,251 +22,34 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_dac = {
-	.mthd = 0x0080,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0400, 0x610b58 },
-		{ 0x0404, 0x610bdc },
-		{ 0x0420, 0x610bc4 },
-		{}
-	}
+static const struct nv50_disp_func
+g84_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &g84_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-const struct nv50_disp_mthd_list
-g84_disp_core_mthd_head = {
-	.mthd = 0x0400,
-	.addr = 0x000540,
-	.data = {
-		{ 0x0800, 0x610ad8 },
-		{ 0x0804, 0x610ad0 },
-		{ 0x0808, 0x610a48 },
-		{ 0x080c, 0x610a78 },
-		{ 0x0810, 0x610ac0 },
-		{ 0x0814, 0x610af8 },
-		{ 0x0818, 0x610b00 },
-		{ 0x081c, 0x610ae8 },
-		{ 0x0820, 0x610af0 },
-		{ 0x0824, 0x610b08 },
-		{ 0x0828, 0x610b10 },
-		{ 0x082c, 0x610a68 },
-		{ 0x0830, 0x610a60 },
-		{ 0x0834, 0x000000 },
-		{ 0x0838, 0x610a40 },
-		{ 0x0840, 0x610a24 },
-		{ 0x0844, 0x610a2c },
-		{ 0x0848, 0x610aa8 },
-		{ 0x084c, 0x610ab0 },
-		{ 0x085c, 0x610c5c },
-		{ 0x0860, 0x610a84 },
-		{ 0x0864, 0x610a90 },
-		{ 0x0868, 0x610b18 },
-		{ 0x086c, 0x610b20 },
-		{ 0x0870, 0x610ac8 },
-		{ 0x0874, 0x610a38 },
-		{ 0x0878, 0x610c50 },
-		{ 0x0880, 0x610a58 },
-		{ 0x0884, 0x610a9c },
-		{ 0x089c, 0x610c68 },
-		{ 0x08a0, 0x610a70 },
-		{ 0x08a4, 0x610a50 },
-		{ 0x08a8, 0x610ae0 },
-		{ 0x08c0, 0x610b28 },
-		{ 0x08c4, 0x610b30 },
-		{ 0x08c8, 0x610b40 },
-		{ 0x08d4, 0x610b38 },
-		{ 0x08d8, 0x610b48 },
-		{ 0x08dc, 0x610b50 },
-		{ 0x0900, 0x610a18 },
-		{ 0x0904, 0x610ab8 },
-		{ 0x0910, 0x610c70 },
-		{ 0x0914, 0x610c78 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &g84_disp_core_mthd_dac  },
-		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &g84_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0008c4 },
-		{ 0x0088, 0x0008d0 },
-		{ 0x008c, 0x0008dc },
-		{ 0x0090, 0x0008e4 },
-		{ 0x0094, 0x610884 },
-		{ 0x00a0, 0x6108a0 },
-		{ 0x00a4, 0x610878 },
-		{ 0x00c0, 0x61086c },
-		{ 0x00c4, 0x610800 },
-		{ 0x00c8, 0x61080c },
-		{ 0x00cc, 0x610818 },
-		{ 0x00e0, 0x610858 },
-		{ 0x00e4, 0x610860 },
-		{ 0x00e8, 0x6108ac },
-		{ 0x00ec, 0x6108b4 },
-		{ 0x00fc, 0x610824 },
-		{ 0x0100, 0x610894 },
-		{ 0x0104, 0x61082c },
-		{ 0x0110, 0x6108bc },
-		{ 0x0114, 0x61088c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &g84_disp_base_mthd_base },
-		{  "Image", 2, &nv50_disp_base_mthd_image },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-g84_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x6109a0 },
-		{ 0x0088, 0x6109c0 },
-		{ 0x008c, 0x6109c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-g84_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &g84_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_disp_sclass[] = {
-	{ G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-g84_disp_main_oclass[] = {
-	{ G82_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g84_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g84_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = g84_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = g84_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&g84_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-g84_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x82),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &g84_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 1ab0d0a..7a7af3b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -22,118 +22,35 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-g94_disp_core_mthd_sor = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0600, 0x610794 },
-		{}
-	}
+static const struct nv50_disp_func
+g94_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &g94_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = g94_sor_dp_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-const struct nv50_disp_mthd_chan
-g94_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &g84_disp_core_mthd_dac  },
-		{    "SOR", 4, &g94_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &g84_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-g94_disp_sclass[] = {
-	{ GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-g94_disp_main_oclass[] = {
-	{ GT206_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-g94_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g94_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = g94_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = g94_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&g94_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-g94_disp_outp_sclass[] = {
-	&nv50_pior_dp_impl.base.base,
-	&g94_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-g94_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x88),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g94_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  g94_disp_outp_sclass,
-	.mthd.core = &g94_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
deleted file mode 100644
index 7f2f05f..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ /dev/null
@@ -1,1310 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <core/ramht.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/bios/disp.h>
-#include <subdev/bios/init.h>
-#include <subdev/bios/pll.h>
-#include <subdev/devinit.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
-
-static void
-gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
-	nv_wr32(priv, 0x61008c, 0x00000001 << index);
-}
-
-static void
-gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_wr32(priv, 0x61008c, 0x00000001 << index);
-	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
-}
-
-const struct nvkm_event_func
-gf110_disp_chan_uevent = {
-	.ctor = nv50_disp_chan_uevent_ctor,
-	.init = gf110_disp_chan_uevent_init,
-	.fini = gf110_disp_chan_uevent_fini,
-};
-
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_dmac_object_attach(struct nvkm_object *parent,
-			      struct nvkm_object *object, u32 name)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	struct nv50_disp_chan *chan = (void *)parent;
-	u32 addr = nv_gpuobj(object)->node->offset;
-	u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
-	return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
-}
-
-static void
-gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	nvkm_ramht_remove(base->ramht, cookie);
-}
-
-static int
-gf110_disp_dmac_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&dmac->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
-	nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
-	nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
-		nv_error(dmac, "init: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
-		nv_error(dmac, "fini: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
-	return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x660080 },
-		{ 0x0084, 0x660084 },
-		{ 0x0088, 0x660088 },
-		{ 0x008c, 0x000000 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_dac = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0180, 0x660180 },
-		{ 0x0184, 0x660184 },
-		{ 0x0188, 0x660188 },
-		{ 0x0190, 0x660190 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_sor = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0200, 0x660200 },
-		{ 0x0204, 0x660204 },
-		{ 0x0208, 0x660208 },
-		{ 0x0210, 0x660210 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_pior = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0300, 0x660300 },
-		{ 0x0304, 0x660304 },
-		{ 0x0308, 0x660308 },
-		{ 0x0310, 0x660310 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_core_mthd_head = {
-	.mthd = 0x0300,
-	.addr = 0x000300,
-	.data = {
-		{ 0x0400, 0x660400 },
-		{ 0x0404, 0x660404 },
-		{ 0x0408, 0x660408 },
-		{ 0x040c, 0x66040c },
-		{ 0x0410, 0x660410 },
-		{ 0x0414, 0x660414 },
-		{ 0x0418, 0x660418 },
-		{ 0x041c, 0x66041c },
-		{ 0x0420, 0x660420 },
-		{ 0x0424, 0x660424 },
-		{ 0x0428, 0x660428 },
-		{ 0x042c, 0x66042c },
-		{ 0x0430, 0x660430 },
-		{ 0x0434, 0x660434 },
-		{ 0x0438, 0x660438 },
-		{ 0x0440, 0x660440 },
-		{ 0x0444, 0x660444 },
-		{ 0x0448, 0x660448 },
-		{ 0x044c, 0x66044c },
-		{ 0x0450, 0x660450 },
-		{ 0x0454, 0x660454 },
-		{ 0x0458, 0x660458 },
-		{ 0x045c, 0x66045c },
-		{ 0x0460, 0x660460 },
-		{ 0x0468, 0x660468 },
-		{ 0x046c, 0x66046c },
-		{ 0x0470, 0x660470 },
-		{ 0x0474, 0x660474 },
-		{ 0x0480, 0x660480 },
-		{ 0x0484, 0x660484 },
-		{ 0x048c, 0x66048c },
-		{ 0x0490, 0x660490 },
-		{ 0x0494, 0x660494 },
-		{ 0x0498, 0x660498 },
-		{ 0x04b0, 0x6604b0 },
-		{ 0x04b8, 0x6604b8 },
-		{ 0x04bc, 0x6604bc },
-		{ 0x04c0, 0x6604c0 },
-		{ 0x04c4, 0x6604c4 },
-		{ 0x04c8, 0x6604c8 },
-		{ 0x04d0, 0x6604d0 },
-		{ 0x04d4, 0x6604d4 },
-		{ 0x04e0, 0x6604e0 },
-		{ 0x04e4, 0x6604e4 },
-		{ 0x04e8, 0x6604e8 },
-		{ 0x04ec, 0x6604ec },
-		{ 0x04f0, 0x6604f0 },
-		{ 0x04f4, 0x6604f4 },
-		{ 0x04f8, 0x6604f8 },
-		{ 0x04fc, 0x6604fc },
-		{ 0x0500, 0x660500 },
-		{ 0x0504, 0x660504 },
-		{ 0x0508, 0x660508 },
-		{ 0x050c, 0x66050c },
-		{ 0x0510, 0x660510 },
-		{ 0x0514, 0x660514 },
-		{ 0x0518, 0x660518 },
-		{ 0x051c, 0x66051c },
-		{ 0x052c, 0x66052c },
-		{ 0x0530, 0x660530 },
-		{ 0x054c, 0x66054c },
-		{ 0x0550, 0x660550 },
-		{ 0x0554, 0x660554 },
-		{ 0x0558, 0x660558 },
-		{ 0x055c, 0x66055c },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &gf110_disp_core_mthd_base },
-		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
-		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
-		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
-		{   "HEAD", 4, &gf110_disp_core_mthd_head },
-		{}
-	}
-};
-
-static int
-gf110_disp_core_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-	int ret;
-
-	ret = nv50_disp_chan_init(&mast->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610494, mast->push);
-	nv_wr32(priv, 0x610498, 0x00010000);
-	nv_wr32(priv, 0x61049c, 0x00000001);
-	nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000, 0x00000000);
-	nv_wr32(priv, 0x610490, 0x01000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
-		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
-	nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
-		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
-
-	return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-gf110_disp_core_ofuncs = {
-	.base.ctor = nv50_disp_core_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_core_init,
-	.base.fini = gf110_disp_core_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 0,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x661080 },
-		{ 0x0084, 0x661084 },
-		{ 0x0088, 0x661088 },
-		{ 0x008c, 0x66108c },
-		{ 0x0090, 0x661090 },
-		{ 0x0094, 0x661094 },
-		{ 0x00a0, 0x6610a0 },
-		{ 0x00a4, 0x6610a4 },
-		{ 0x00c0, 0x6610c0 },
-		{ 0x00c4, 0x6610c4 },
-		{ 0x00c8, 0x6610c8 },
-		{ 0x00cc, 0x6610cc },
-		{ 0x00e0, 0x6610e0 },
-		{ 0x00e4, 0x6610e4 },
-		{ 0x00e8, 0x6610e8 },
-		{ 0x00ec, 0x6610ec },
-		{ 0x00fc, 0x6610fc },
-		{ 0x0100, 0x661100 },
-		{ 0x0104, 0x661104 },
-		{ 0x0108, 0x661108 },
-		{ 0x010c, 0x66110c },
-		{ 0x0110, 0x661110 },
-		{ 0x0114, 0x661114 },
-		{ 0x0118, 0x661118 },
-		{ 0x011c, 0x66111c },
-		{ 0x0130, 0x661130 },
-		{ 0x0134, 0x661134 },
-		{ 0x0138, 0x661138 },
-		{ 0x013c, 0x66113c },
-		{ 0x0140, 0x661140 },
-		{ 0x0144, 0x661144 },
-		{ 0x0148, 0x661148 },
-		{ 0x014c, 0x66114c },
-		{ 0x0150, 0x661150 },
-		{ 0x0154, 0x661154 },
-		{ 0x0158, 0x661158 },
-		{ 0x015c, 0x66115c },
-		{ 0x0160, 0x661160 },
-		{ 0x0164, 0x661164 },
-		{ 0x0168, 0x661168 },
-		{ 0x016c, 0x66116c },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-gf110_disp_base_mthd_image = {
-	.mthd = 0x0020,
-	.addr = 0x000020,
-	.data = {
-		{ 0x0400, 0x661400 },
-		{ 0x0404, 0x661404 },
-		{ 0x0408, 0x661408 },
-		{ 0x040c, 0x66140c },
-		{ 0x0410, 0x661410 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gf110_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gf110_disp_base_mthd_base },
-		{  "Image", 2, &gf110_disp_base_mthd_image },
-		{}
-	}
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_base_ofuncs = {
-	.base.ctor = nv50_disp_base_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_dmac_init,
-	.base.fini = gf110_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 1,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gf110_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.data = {
-		{ 0x0080, 0x665080 },
-		{ 0x0084, 0x665084 },
-		{ 0x0088, 0x665088 },
-		{ 0x008c, 0x66508c },
-		{ 0x0090, 0x665090 },
-		{ 0x0094, 0x665094 },
-		{ 0x00a0, 0x6650a0 },
-		{ 0x00a4, 0x6650a4 },
-		{ 0x00b0, 0x6650b0 },
-		{ 0x00b4, 0x6650b4 },
-		{ 0x00b8, 0x6650b8 },
-		{ 0x00c0, 0x6650c0 },
-		{ 0x00e0, 0x6650e0 },
-		{ 0x00e4, 0x6650e4 },
-		{ 0x00e8, 0x6650e8 },
-		{ 0x0100, 0x665100 },
-		{ 0x0104, 0x665104 },
-		{ 0x0108, 0x665108 },
-		{ 0x010c, 0x66510c },
-		{ 0x0110, 0x665110 },
-		{ 0x0118, 0x665118 },
-		{ 0x011c, 0x66511c },
-		{ 0x0120, 0x665120 },
-		{ 0x0124, 0x665124 },
-		{ 0x0130, 0x665130 },
-		{ 0x0134, 0x665134 },
-		{ 0x0138, 0x665138 },
-		{ 0x013c, 0x66513c },
-		{ 0x0140, 0x665140 },
-		{ 0x0144, 0x665144 },
-		{ 0x0148, 0x665148 },
-		{ 0x014c, 0x66514c },
-		{ 0x0150, 0x665150 },
-		{ 0x0154, 0x665154 },
-		{ 0x0158, 0x665158 },
-		{ 0x015c, 0x66515c },
-		{ 0x0160, 0x665160 },
-		{ 0x0164, 0x665164 },
-		{ 0x0168, 0x665168 },
-		{ 0x016c, 0x66516c },
-		{ 0x0400, 0x665400 },
-		{ 0x0408, 0x665408 },
-		{ 0x040c, 0x66540c },
-		{ 0x0410, 0x665410 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-gf110_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gf110_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-struct nv50_disp_chan_impl
-gf110_disp_ovly_ofuncs = {
-	.base.ctor = nv50_disp_ovly_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = gf110_disp_dmac_init,
-	.base.fini = gf110_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 5,
-	.attach = gf110_disp_dmac_object_attach,
-	.detach = gf110_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-gf110_disp_pioc_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&pioc->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
-
-	/* activate channel */
-	nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
-		nv_error(pioc, "init: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-
-	nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
-	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
-		nv_error(pioc, "timeout: 0x%08x\n",
-			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notification */
-	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
-	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
-
-	return nv50_disp_chan_fini(&pioc->base, suspend);
-}
-
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_oimm_ofuncs = {
-	.base.ctor = nv50_disp_oimm_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = gf110_disp_pioc_init,
-	.base.fini = gf110_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 9,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-struct nv50_disp_chan_impl
-gf110_disp_curs_ofuncs = {
-	.base.ctor = nv50_disp_curs_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = gf110_disp_pioc_init,
-	.base.fini = gf110_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 13,
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-int
-gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
-{
-	const u32 total  = nv_rd32(priv, 0x640414 + (head * 0x300));
-	const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
-	const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
-		args->v0.hblanke = (blanke & 0x0000ffff);
-		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
-		args->v0.hblanks = (blanks & 0x0000ffff);
-		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
-		args->v0.htotal  = ( total & 0x0000ffff);
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		args->v0.vline = /* vline read locks hline */
-			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline =
-			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
-	} else
-		return ret;
-
-	return 0;
-}
-
-static int
-gf110_disp_main_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_parent_init(&base->base);
-	if (ret)
-		return ret;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.
-	 */
-
-	/* ... CRTC caps */
-	for (i = 0; i < priv->head.nr; i++) {
-		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
-		nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
-		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
-		nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
-		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
-		nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < priv->dac.nr; i++) {
-		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
-		nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < priv->sor.nr; i++) {
-		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
-		nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
-		nv_wr32(priv, 0x6100ac, 0x00000100);
-		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
-			nv_error(priv, "timeout acquiring display\n");
-			return -EBUSY;
-		}
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nv_wr32(priv, 0x610090, 0x00000000);
-	nv_wr32(priv, 0x6100a0, 0x00000000);
-	nv_wr32(priv, 0x6100b0, 0x00000307);
-
-	/* disable underflow reporting, preventing an intermittent issue
-	 * on some gk104 boards where the production vbios left this
-	 * setting enabled by default.
-	 *
-	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
-	 */
-	for (i = 0; i < priv->head.nr; i++)
-		nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
-
-	return 0;
-}
-
-static int
-gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x6100b0, 0x00000000);
-
-	return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-gf110_disp_main_ofuncs = {
-	.ctor = nv50_disp_main_ctor,
-	.dtor = nv50_disp_main_dtor,
-	.init = gf110_disp_main_init,
-	.fini = gf110_disp_main_fini,
-	.mthd = nv50_disp_main_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-gf110_disp_main_oclass[] = {
-	{ GF110_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gf110_disp_sclass[] = {
-	{ GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
-}
-
-static void
-gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
-}
-
-const struct nvkm_event_func
-gf110_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = gf110_disp_vblank_init,
-	.fini = gf110_disp_vblank_fini,
-};
-
-static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
-	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
-	    struct nvbios_outp *info)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	u16 mask, type;
-
-	if (or < 4) {
-		type = DCB_OUTPUT_ANALOG;
-		mask = 0;
-	} else {
-		or -= 4;
-		switch (ctrl & 0x00000f00) {
-		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
-		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
-		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
-		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
-		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
-		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
-		default:
-			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
-			return NULL;
-		}
-	}
-
-	mask  = 0x00c0 & (mask << 6);
-	mask |= 0x0001 << or;
-	mask |= 0x0100 << head;
-
-	list_for_each_entry(outp, &priv->base.outp, head) {
-		if ((outp->info.hasht & 0xff) == type &&
-		    (outp->info.hashm & mask) == mask) {
-			*data = nvbios_outp_match(bios, outp->info.hasht,
-							outp->info.hashm,
-						  ver, hdr, cnt, len, info);
-			if (!*data)
-				return NULL;
-			return outp;
-		}
-	}
-
-	return NULL;
-}
-
-static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	struct nvbios_outp info;
-	u8  ver, hdr, cnt, len;
-	u32 data, ctrl = 0;
-	int or;
-
-	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
-		ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
-		if (ctrl & (1 << head))
-			break;
-	}
-
-	if (or == 8)
-		return NULL;
-
-	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
-	if (outp) {
-		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = bios,
-			.offset = info.script[id],
-			.outp = &outp->info,
-			.crtc = head,
-			.execute = 1,
-		};
-
-		nvbios_exec(&init);
-	}
-
-	return outp;
-}
-
-static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
-{
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	struct nvkm_output *outp;
-	struct nvbios_outp info1;
-	struct nvbios_ocfg info2;
-	u8  ver, hdr, cnt, len;
-	u32 data, ctrl = 0;
-	int or;
-
-	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
-		ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
-		if (ctrl & (1 << head))
-			break;
-	}
-
-	if (or == 8)
-		return NULL;
-
-	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
-	if (!outp)
-		return NULL;
-
-	switch (outp->info.type) {
-	case DCB_OUTPUT_TMDS:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		if (pclk >= 165000)
-			*conf |= 0x0100;
-		break;
-	case DCB_OUTPUT_LVDS:
-		*conf = priv->sor.lvdsconf;
-		break;
-	case DCB_OUTPUT_DP:
-		*conf = (ctrl & 0x00000f00) >> 8;
-		break;
-	case DCB_OUTPUT_ANALOG:
-	default:
-		*conf = 0x00ff;
-		break;
-	}
-
-	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
-	if (data && id < 0xff) {
-		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
-		if (data) {
-			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
-				.bios = bios,
-				.offset = data,
-				.outp = &outp->info,
-				.crtc = head,
-				.execute = 1,
-			};
-
-			nvbios_exec(&init);
-		}
-	}
-
-	return outp;
-}
-
-static void
-gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
-{
-	exec_script(priv, head, 1);
-}
-
-static void
-gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_output *outp = exec_script(priv, head, 2);
-
-	/* see note in nv50_disp_intr_unk20_0() */
-	if (outp && outp->info.type == DCB_OUTPUT_DP) {
-		struct nvkm_output_dp *outpdp = (void *)outp;
-		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = nvkm_bios(priv),
-			.outp = &outp->info,
-			.crtc = head,
-			.offset = outpdp->info.script[4],
-			.execute = 1,
-		};
-
-		nvbios_exec(&init);
-		atomic_set(&outpdp->lt.done, 0);
-	}
-}
-
-static void
-gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_devinit *devinit = nvkm_devinit(priv);
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	if (pclk)
-		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
-	nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
-}
-
-static void
-gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
-			  struct dcb_output *outp)
-{
-	const int or = ffs(outp->or) - 1;
-	const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
-	const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
-	const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
-	const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
-	const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
-	const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
-	const u32 hoff = (head * 0x800);
-	const u32 soff = (  or * 0x800);
-	const u32 loff = (link * 0x080) + soff;
-	const u32 symbol = 100000;
-	const u32 TU = 64;
-	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
-	u32 clksor = nv_rd32(priv, 0x612300 + soff);
-	u32 datarate, link_nr, link_bw, bits;
-	u64 ratio, value;
-
-	link_nr  = hweight32(dpctrl & 0x000f0000);
-	link_bw  = (clksor & 0x007c0000) >> 18;
-	link_bw *= 27000;
-
-	/* symbols/hblank - algorithm taken from comments in tegra driver */
-	value = vblanke + vactive - vblanks - 7;
-	value = value * link_bw;
-	do_div(value, pclk);
-	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
-	nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
-
-	/* symbols/vblank - algorithm taken from comments in tegra driver */
-	value = vblanks - vblanke - 25;
-	value = value * link_bw;
-	do_div(value, pclk);
-	value = value - ((36 / link_nr) + 3) - 1;
-	nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
-
-	/* watermark */
-	if      ((conf & 0x3c0) == 0x180) bits = 30;
-	else if ((conf & 0x3c0) == 0x140) bits = 24;
-	else                              bits = 18;
-	datarate = (pclk * bits) / 8;
-
-	ratio  = datarate;
-	ratio *= symbol;
-	do_div(ratio, link_nr * link_bw);
-
-	value  = (symbol - ratio) * TU;
-	value *= ratio;
-	do_div(value, symbol);
-	do_div(value, symbol);
-
-	value += 5;
-	value |= 0x08000000;
-
-	nv_wr32(priv, 0x616610 + hoff, value);
-}
-
-static void
-gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
-{
-	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	u32 conf, addr, data;
-
-	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
-	if (!outp)
-		return;
-
-	/* see note in nv50_disp_intr_unk20_2() */
-	if (outp->info.type == DCB_OUTPUT_DP) {
-		u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
-		switch ((sync & 0x000003c0) >> 6) {
-		case 6: pclk = pclk * 30; break;
-		case 5: pclk = pclk * 24; break;
-		case 2:
-		default:
-			pclk = pclk * 18;
-			break;
-		}
-
-		if (nvkm_output_dp_train(outp, pclk, true))
-			ERR("link not trained before attach\n");
-	} else {
-		if (priv->sor.magic)
-			priv->sor.magic(outp);
-	}
-
-	exec_clkcmp(priv, head, 0, pclk, &conf);
-
-	if (outp->info.type == DCB_OUTPUT_ANALOG) {
-		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
-		data = 0x00000000;
-	} else {
-		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
-		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
-		switch (outp->info.type) {
-		case DCB_OUTPUT_TMDS:
-			nv_mask(priv, addr, 0x007c0000, 0x00280000);
-			break;
-		case DCB_OUTPUT_DP:
-			gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
-			break;
-		default:
-			break;
-		}
-	}
-
-	nv_mask(priv, addr, 0x00000707, data);
-}
-
-static void
-gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
-{
-	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
-	u32 conf;
-
-	exec_clkcmp(priv, head, 1, pclk, &conf);
-}
-
-void
-gf110_disp_intr_supervisor(struct work_struct *work)
-{
-	struct nv50_disp_priv *priv =
-		container_of(work, struct nv50_disp_priv, supervisor);
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 mask[4];
-	int head;
-
-	nv_debug(priv, "supervisor %d\n", ffs(priv->super));
-	for (head = 0; head < priv->head.nr; head++) {
-		mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
-		nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
-	}
-
-	if (priv->super & 0x00000001) {
-		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 1.0 - head %d\n", head);
-			gf110_disp_intr_unk1_0(priv, head);
-		}
-	} else
-	if (priv->super & 0x00000002) {
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 2.0 - head %d\n", head);
-			gf110_disp_intr_unk2_0(priv, head);
-		}
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00010000))
-				continue;
-			nv_debug(priv, "supervisor 2.1 - head %d\n", head);
-			gf110_disp_intr_unk2_1(priv, head);
-		}
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 2.2 - head %d\n", head);
-			gf110_disp_intr_unk2_2(priv, head);
-		}
-	} else
-	if (priv->super & 0x00000004) {
-		for (head = 0; head < priv->head.nr; head++) {
-			if (!(mask[head] & 0x00001000))
-				continue;
-			nv_debug(priv, "supervisor 3.0 - head %d\n", head);
-			gf110_disp_intr_unk4_0(priv, head);
-		}
-	}
-
-	for (head = 0; head < priv->head.nr; head++)
-		nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
-	nv_wr32(priv, 0x6101d0, 0x80000000);
-}
-
-static void
-gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
-{
-	const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
-	u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
-	u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
-
-	nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
-		       "0x%08x 0x%08x\n",
-		 chid, (mthd & 0x0000ffc), data, mthd, unkn);
-
-	if (chid == 0) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
-					    impl->mthd.core);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 4) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
-					    impl->mthd.base);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 8) {
-		switch (mthd & 0xffc) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
-					    impl->mthd.ovly);
-			break;
-		default:
-			break;
-		}
-	}
-
-	nv_wr32(priv, 0x61009c, (1 << chid));
-	nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
-}
-
-void
-gf110_disp_intr(struct nvkm_subdev *subdev)
-{
-	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 intr = nv_rd32(priv, 0x610088);
-	int i;
-
-	if (intr & 0x00000001) {
-		u32 stat = nv_rd32(priv, 0x61008c);
-		while (stat) {
-			int chid = __ffs(stat); stat &= ~(1 << chid);
-			nv50_disp_chan_uevent_send(priv, chid);
-			nv_wr32(priv, 0x61008c, 1 << chid);
-		}
-		intr &= ~0x00000001;
-	}
-
-	if (intr & 0x00000002) {
-		u32 stat = nv_rd32(priv, 0x61009c);
-		int chid = ffs(stat) - 1;
-		if (chid >= 0)
-			gf110_disp_intr_error(priv, chid);
-		intr &= ~0x00000002;
-	}
-
-	if (intr & 0x00100000) {
-		u32 stat = nv_rd32(priv, 0x6100ac);
-		if (stat & 0x00000007) {
-			priv->super = (stat & 0x00000007);
-			schedule_work(&priv->supervisor);
-			nv_wr32(priv, 0x6100ac, priv->super);
-			stat &= ~0x00000007;
-		}
-
-		if (stat) {
-			nv_info(priv, "unknown intr24 0x%08x\n", stat);
-			nv_wr32(priv, 0x6100ac, stat);
-		}
-
-		intr &= ~0x00100000;
-	}
-
-	for (i = 0; i < priv->head.nr; i++) {
-		u32 mask = 0x01000000 << i;
-		if (mask & intr) {
-			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
-			if (stat & 0x00000001)
-				nvkm_disp_vblank(&priv->base, i);
-			nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
-			nv_rd32(priv, 0x6100c0 + (i * 0x800));
-		}
-	}
-}
-
-static int
-gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gf110_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gf110_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gf110_hdmi_ctrl;
-	return 0;
-}
-
-struct nvkm_oclass *
-gf110_disp_outp_sclass[] = {
-	&gf110_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-gf110_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x90),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf110_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gf110_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gf110_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
new file mode 100644
index 0000000..186fd3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/devinit.h>
+
+void
+gf119_disp_vblank_init(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+void
+gf119_disp_vblank_fini(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+static struct nvkm_output *
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
+	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
+	struct nvkm_output *outp;
+	u16 mask, type;
+
+	if (or < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else {
+		or -= 4;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
+			return NULL;
+		}
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << or;
+	mask |= 0x0100 << head;
+
+	list_for_each_entry(outp, &disp->base.outp, head) {
+		if ((outp->info.hasht & 0xff) == type &&
+		    (outp->info.hashm & mask) == mask) {
+			*data = nvbios_outp_match(bios, outp->info.hasht,
+							outp->info.hashm,
+						  ver, hdr, cnt, len, info);
+			if (!*data)
+				return NULL;
+			return outp;
+		}
+	}
+
+	return NULL;
+}
+
+static struct nvkm_output *
+exec_script(struct nv50_disp *disp, int head, int id)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp;
+	struct nvbios_outp info;
+	u8  ver, hdr, cnt, len;
+	u32 data, ctrl = 0;
+	int or;
+
+	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+		ctrl = nvkm_rd32(device, 0x640180 + (or * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (or == 8)
+		return NULL;
+
+	outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+	if (outp) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &outp->info,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+	}
+
+	return outp;
+}
+
+static struct nvkm_output *
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_output *outp;
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	u8  ver, hdr, cnt, len;
+	u32 data, ctrl = 0;
+	int or;
+
+	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
+		ctrl = nvkm_rd32(device, 0x660180 + (or * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (or == 8)
+		return NULL;
+
+	outp = exec_lookup(disp, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+	if (!outp)
+		return NULL;
+
+	switch (outp->info.type) {
+	case DCB_OUTPUT_TMDS:
+		*conf = (ctrl & 0x00000f00) >> 8;
+		if (pclk >= 165000)
+			*conf |= 0x0100;
+		break;
+	case DCB_OUTPUT_LVDS:
+		*conf = disp->sor.lvdsconf;
+		break;
+	case DCB_OUTPUT_DP:
+		*conf = (ctrl & 0x00000f00) >> 8;
+		break;
+	case DCB_OUTPUT_ANALOG:
+	default:
+		*conf = 0x00ff;
+		break;
+	}
+
+	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data && id < 0xff) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = subdev,
+				.bios = bios,
+				.offset = data,
+				.outp = &outp->info,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+	}
+
+	return outp;
+}
+
+static void
+gf119_disp_intr_unk1_0(struct nv50_disp *disp, int head)
+{
+	exec_script(disp, head, 1);
+}
+
+static void
+gf119_disp_intr_unk2_0(struct nv50_disp *disp, int head)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_output *outp = exec_script(disp, head, 2);
+
+	/* see note in nv50_disp_intr_unk20_0() */
+	if (outp && outp->info.type == DCB_OUTPUT_DP) {
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = subdev->device->bios,
+			.outp = &outp->info,
+			.crtc = head,
+			.offset = outpdp->info.script[4],
+			.execute = 1,
+		};
+
+		nvbios_exec(&init);
+		atomic_set(&outpdp->lt.done, 0);
+	}
+}
+
+static void
+gf119_disp_intr_unk2_1(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	if (pclk)
+		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
+	nvkm_wr32(device, 0x612200 + (head * 0x800), 0x00000000);
+}
+
+static void
+gf119_disp_intr_unk2_2_tu(struct nv50_disp *disp, int head,
+			  struct dcb_output *outp)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const int or = ffs(outp->or) - 1;
+	const u32 ctrl = nvkm_rd32(device, 0x660200 + (or   * 0x020));
+	const u32 conf = nvkm_rd32(device, 0x660404 + (head * 0x300));
+	const s32 vactive = nvkm_rd32(device, 0x660414 + (head * 0x300)) & 0xffff;
+	const s32 vblanke = nvkm_rd32(device, 0x66041c + (head * 0x300)) & 0xffff;
+	const s32 vblanks = nvkm_rd32(device, 0x660420 + (head * 0x300)) & 0xffff;
+	const u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+	const u32 hoff = (head * 0x800);
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 symbol = 100000;
+	const u32 TU = 64;
+	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+	u32 clksor = nvkm_rd32(device, 0x612300 + soff);
+	u32 datarate, link_nr, link_bw, bits;
+	u64 ratio, value;
+
+	link_nr  = hweight32(dpctrl & 0x000f0000);
+	link_bw  = (clksor & 0x007c0000) >> 18;
+	link_bw *= 27000;
+
+	/* symbols/hblank - algorithm taken from comments in tegra driver */
+	value = vblanke + vactive - vblanks - 7;
+	value = value * link_bw;
+	do_div(value, pclk);
+	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
+	nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, value);
+
+	/* symbols/vblank - algorithm taken from comments in tegra driver */
+	value = vblanks - vblanke - 25;
+	value = value * link_bw;
+	do_div(value, pclk);
+	value = value - ((36 / link_nr) + 3) - 1;
+	nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, value);
+
+	/* watermark */
+	if      ((conf & 0x3c0) == 0x180) bits = 30;
+	else if ((conf & 0x3c0) == 0x140) bits = 24;
+	else                              bits = 18;
+	datarate = (pclk * bits) / 8;
+
+	ratio  = datarate;
+	ratio *= symbol;
+	do_div(ratio, link_nr * link_bw);
+
+	value  = (symbol - ratio) * TU;
+	value *= ratio;
+	do_div(value, symbol);
+	do_div(value, symbol);
+
+	value += 5;
+	value |= 0x08000000;
+
+	nvkm_wr32(device, 0x616610 + hoff, value);
+}
+
+static void
+gf119_disp_intr_unk2_2(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_output *outp;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf, addr, data;
+
+	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
+	if (!outp)
+		return;
+
+	/* see note in nv50_disp_intr_unk20_2() */
+	if (outp->info.type == DCB_OUTPUT_DP) {
+		u32 sync = nvkm_rd32(device, 0x660404 + (head * 0x300));
+		switch ((sync & 0x000003c0) >> 6) {
+		case 6: pclk = pclk * 30; break;
+		case 5: pclk = pclk * 24; break;
+		case 2:
+		default:
+			pclk = pclk * 18;
+			break;
+		}
+
+		if (nvkm_output_dp_train(outp, pclk, true))
+			OUTP_ERR(outp, "link not trained before attach");
+	} else {
+		if (disp->func->sor.magic)
+			disp->func->sor.magic(outp);
+	}
+
+	exec_clkcmp(disp, head, 0, pclk, &conf);
+
+	if (outp->info.type == DCB_OUTPUT_ANALOG) {
+		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
+		data = 0x00000000;
+	} else {
+		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
+		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+		switch (outp->info.type) {
+		case DCB_OUTPUT_TMDS:
+			nvkm_mask(device, addr, 0x007c0000, 0x00280000);
+			break;
+		case DCB_OUTPUT_DP:
+			gf119_disp_intr_unk2_2_tu(disp, head, &outp->info);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_mask(device, addr, 0x00000707, data);
+}
+
+static void
+gf119_disp_intr_unk4_0(struct nv50_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 pclk = nvkm_rd32(device, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf;
+
+	exec_clkcmp(disp, head, 1, pclk, &conf);
+}
+
+void
+gf119_disp_intr_supervisor(struct work_struct *work)
+{
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask[4];
+	int head;
+
+	nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super));
+	for (head = 0; head < disp->base.head.nr; head++) {
+		mask[head] = nvkm_rd32(device, 0x6101d4 + (head * 0x800));
+		nvkm_debug(subdev, "head %d: %08x\n", head, mask[head]);
+	}
+
+	if (disp->super & 0x00000001) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 1.0 - head %d\n", head);
+			gf119_disp_intr_unk1_0(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000002) {
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.0 - head %d\n", head);
+			gf119_disp_intr_unk2_0(disp, head);
+		}
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00010000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.1 - head %d\n", head);
+			gf119_disp_intr_unk2_1(disp, head);
+		}
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 2.2 - head %d\n", head);
+			gf119_disp_intr_unk2_2(disp, head);
+		}
+	} else
+	if (disp->super & 0x00000004) {
+		for (head = 0; head < disp->base.head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvkm_debug(subdev, "supervisor 3.0 - head %d\n", head);
+			gf119_disp_intr_unk4_0(disp, head);
+		}
+	}
+
+	for (head = 0; head < disp->base.head.nr; head++)
+		nvkm_wr32(device, 0x6101d4 + (head * 0x800), 0x00000000);
+	nvkm_wr32(device, 0x6101d0, 0x80000000);
+}
+
+static void
+gf119_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mthd = nvkm_rd32(device, 0x6101f0 + (chid * 12));
+	u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
+	u32 unkn = nvkm_rd32(device, 0x6101f8 + (chid * 12));
+
+	nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+		   chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+	if (chid < ARRAY_SIZE(disp->chan)) {
+		switch (mthd & 0xffc) {
+		case 0x0080:
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+			break;
+		default:
+			break;
+		}
+	}
+
+	nvkm_wr32(device, 0x61009c, (1 << chid));
+	nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
+}
+
+void
+gf119_disp_intr(struct nv50_disp *disp)
+{
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x610088);
+	int i;
+
+	if (intr & 0x00000001) {
+		u32 stat = nvkm_rd32(device, 0x61008c);
+		while (stat) {
+			int chid = __ffs(stat); stat &= ~(1 << chid);
+			nv50_disp_chan_uevent_send(disp, chid);
+			nvkm_wr32(device, 0x61008c, 1 << chid);
+		}
+		intr &= ~0x00000001;
+	}
+
+	if (intr & 0x00000002) {
+		u32 stat = nvkm_rd32(device, 0x61009c);
+		int chid = ffs(stat) - 1;
+		if (chid >= 0)
+			gf119_disp_intr_error(disp, chid);
+		intr &= ~0x00000002;
+	}
+
+	if (intr & 0x00100000) {
+		u32 stat = nvkm_rd32(device, 0x6100ac);
+		if (stat & 0x00000007) {
+			disp->super = (stat & 0x00000007);
+			schedule_work(&disp->supervisor);
+			nvkm_wr32(device, 0x6100ac, disp->super);
+			stat &= ~0x00000007;
+		}
+
+		if (stat) {
+			nvkm_warn(subdev, "intr24 %08x\n", stat);
+			nvkm_wr32(device, 0x6100ac, stat);
+		}
+
+		intr &= ~0x00100000;
+	}
+
+	for (i = 0; i < disp->base.head.nr; i++) {
+		u32 mask = 0x01000000 << i;
+		if (mask & intr) {
+			u32 stat = nvkm_rd32(device, 0x6100bc + (i * 0x800));
+			if (stat & 0x00000001)
+				nvkm_disp_vblank(&disp->base, i);
+			nvkm_mask(device, 0x6100bc + (i * 0x800), 0, 0);
+			nvkm_rd32(device, 0x6100c0 + (i * 0x800));
+		}
+	}
+}
+
+int
+gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+		int index, struct nvkm_disp **pdisp)
+{
+	u32 heads = nvkm_rd32(device, 0x022448);
+	return nv50_disp_new_(func, device, index, heads, pdisp);
+}
+
+static const struct nv50_disp_func
+gf119_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gf119_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gf119_hdmi_ctrl,
+};
+
+int
+gf119_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return gf119_disp_new_(&gf119_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 6f4019a..a86384b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -22,247 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_core_mthd_head = {
-	.mthd = 0x0300,
-	.addr = 0x000300,
-	.data = {
-		{ 0x0400, 0x660400 },
-		{ 0x0404, 0x660404 },
-		{ 0x0408, 0x660408 },
-		{ 0x040c, 0x66040c },
-		{ 0x0410, 0x660410 },
-		{ 0x0414, 0x660414 },
-		{ 0x0418, 0x660418 },
-		{ 0x041c, 0x66041c },
-		{ 0x0420, 0x660420 },
-		{ 0x0424, 0x660424 },
-		{ 0x0428, 0x660428 },
-		{ 0x042c, 0x66042c },
-		{ 0x0430, 0x660430 },
-		{ 0x0434, 0x660434 },
-		{ 0x0438, 0x660438 },
-		{ 0x0440, 0x660440 },
-		{ 0x0444, 0x660444 },
-		{ 0x0448, 0x660448 },
-		{ 0x044c, 0x66044c },
-		{ 0x0450, 0x660450 },
-		{ 0x0454, 0x660454 },
-		{ 0x0458, 0x660458 },
-		{ 0x045c, 0x66045c },
-		{ 0x0460, 0x660460 },
-		{ 0x0468, 0x660468 },
-		{ 0x046c, 0x66046c },
-		{ 0x0470, 0x660470 },
-		{ 0x0474, 0x660474 },
-		{ 0x047c, 0x66047c },
-		{ 0x0480, 0x660480 },
-		{ 0x0484, 0x660484 },
-		{ 0x0488, 0x660488 },
-		{ 0x048c, 0x66048c },
-		{ 0x0490, 0x660490 },
-		{ 0x0494, 0x660494 },
-		{ 0x0498, 0x660498 },
-		{ 0x04a0, 0x6604a0 },
-		{ 0x04b0, 0x6604b0 },
-		{ 0x04b8, 0x6604b8 },
-		{ 0x04bc, 0x6604bc },
-		{ 0x04c0, 0x6604c0 },
-		{ 0x04c4, 0x6604c4 },
-		{ 0x04c8, 0x6604c8 },
-		{ 0x04d0, 0x6604d0 },
-		{ 0x04d4, 0x6604d4 },
-		{ 0x04e0, 0x6604e0 },
-		{ 0x04e4, 0x6604e4 },
-		{ 0x04e8, 0x6604e8 },
-		{ 0x04ec, 0x6604ec },
-		{ 0x04f0, 0x6604f0 },
-		{ 0x04f4, 0x6604f4 },
-		{ 0x04f8, 0x6604f8 },
-		{ 0x04fc, 0x6604fc },
-		{ 0x0500, 0x660500 },
-		{ 0x0504, 0x660504 },
-		{ 0x0508, 0x660508 },
-		{ 0x050c, 0x66050c },
-		{ 0x0510, 0x660510 },
-		{ 0x0514, 0x660514 },
-		{ 0x0518, 0x660518 },
-		{ 0x051c, 0x66051c },
-		{ 0x0520, 0x660520 },
-		{ 0x0524, 0x660524 },
-		{ 0x052c, 0x66052c },
-		{ 0x0530, 0x660530 },
-		{ 0x054c, 0x66054c },
-		{ 0x0550, 0x660550 },
-		{ 0x0554, 0x660554 },
-		{ 0x0558, 0x660558 },
-		{ 0x055c, 0x66055c },
-		{}
-	}
+static const struct nv50_disp_func
+gk104_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gk104_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-const struct nv50_disp_mthd_chan
-gk104_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &gf110_disp_core_mthd_base },
-		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
-		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
-		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
-		{   "HEAD", 4, &gk104_disp_core_mthd_head },
-		{}
-	}
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gk104_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.data = {
-		{ 0x0080, 0x665080 },
-		{ 0x0084, 0x665084 },
-		{ 0x0088, 0x665088 },
-		{ 0x008c, 0x66508c },
-		{ 0x0090, 0x665090 },
-		{ 0x0094, 0x665094 },
-		{ 0x00a0, 0x6650a0 },
-		{ 0x00a4, 0x6650a4 },
-		{ 0x00b0, 0x6650b0 },
-		{ 0x00b4, 0x6650b4 },
-		{ 0x00b8, 0x6650b8 },
-		{ 0x00c0, 0x6650c0 },
-		{ 0x00c4, 0x6650c4 },
-		{ 0x00e0, 0x6650e0 },
-		{ 0x00e4, 0x6650e4 },
-		{ 0x00e8, 0x6650e8 },
-		{ 0x0100, 0x665100 },
-		{ 0x0104, 0x665104 },
-		{ 0x0108, 0x665108 },
-		{ 0x010c, 0x66510c },
-		{ 0x0110, 0x665110 },
-		{ 0x0118, 0x665118 },
-		{ 0x011c, 0x66511c },
-		{ 0x0120, 0x665120 },
-		{ 0x0124, 0x665124 },
-		{ 0x0130, 0x665130 },
-		{ 0x0134, 0x665134 },
-		{ 0x0138, 0x665138 },
-		{ 0x013c, 0x66513c },
-		{ 0x0140, 0x665140 },
-		{ 0x0144, 0x665144 },
-		{ 0x0148, 0x665148 },
-		{ 0x014c, 0x66514c },
-		{ 0x0150, 0x665150 },
-		{ 0x0154, 0x665154 },
-		{ 0x0158, 0x665158 },
-		{ 0x015c, 0x66515c },
-		{ 0x0160, 0x665160 },
-		{ 0x0164, 0x665164 },
-		{ 0x0168, 0x665168 },
-		{ 0x016c, 0x66516c },
-		{ 0x0400, 0x665400 },
-		{ 0x0404, 0x665404 },
-		{ 0x0408, 0x665408 },
-		{ 0x040c, 0x66540c },
-		{ 0x0410, 0x665410 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_chan
-gk104_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x001000,
-	.data = {
-		{ "Global", 1, &gk104_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_disp_sclass[] = {
-	{ GK104_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK104_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gk104_disp_main_oclass[] = {
-	{ GK104_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk104_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gk104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gk104_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gk104_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gk104_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gk104_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x91),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index daa4b46..0d574c7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -22,82 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk110_disp_sclass[] = {
-	{ GK110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gk110_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gk110_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-static struct nvkm_oclass
-gk110_disp_main_oclass[] = {
-	{ GK110_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gk110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gk110_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gk110_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gk110_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gk110_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gk110_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x92),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk110_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index 881cc94..b694414 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -22,82 +22,32 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_disp_sclass[] = {
-	{ GM107_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gm107_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gm107_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gf119_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
 };
 
-static struct nvkm_oclass
-gm107_disp_main_oclass[] = {
-	{ GM107_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm107_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm107_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gm107_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gm107_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gk104_hdmi_ctrl;
-	return 0;
+	return gf119_disp_new_(&gm107_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gm107_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x07),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gf110_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
index 67004f8..30f1987 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm204.c
@@ -22,90 +22,33 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm204_disp_sclass[] = {
-	{ GM204_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
-	{ GK110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
-	{ GK104_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
-	{ GK104_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
-	{ GK104_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gm204_disp = {
+	.intr = gf119_disp_intr,
+	.uevent = &gf119_disp_chan_uevent,
+	.super = gf119_disp_intr_supervisor,
+	.root = &gm204_disp_root_oclass,
+	.head.vblank_init = gf119_disp_vblank_init,
+	.head.vblank_fini = gf119_disp_vblank_fini,
+	.head.scanoutpos = gf119_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = gm204_sor_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gf119_hda_eld,
+	.sor.hdmi = gk104_hdmi_ctrl,
+	.sor.magic = gm204_sor_magic,
 };
 
-static struct nvkm_oclass
-gm204_disp_main_oclass[] = {
-	{ GM204_DISP, &gf110_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gm204_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm204_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int heads = nv_rd32(parent, 0x022448);
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, heads,
-			       "PDISP", "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gm204_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = gf110_disp_intr;
-	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
-	priv->sclass = gm204_disp_sclass;
-	priv->head.nr = heads;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gf110_hda_eld;
-	priv->sor.hdmi = gf110_hdmi_ctrl;
-	priv->sor.magic = gm204_sor_magic;
-	return 0;
+	return gf119_disp_new_(&gm204_disp, device, index, pdisp);
 }
-
-struct nvkm_oclass *
-gm204_disp_outp_sclass[] = {
-	&gm204_sor_dp_impl.base.base,
-	NULL
-};
-
-struct nvkm_oclass *
-gm204_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x07),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &gf110_disp_vblank_func,
-	.base.outp =  gm204_disp_outp_sclass,
-	.mthd.core = &gk104_disp_core_mthd_chan,
-	.mthd.base = &gf110_disp_base_mthd_chan,
-	.mthd.ovly = &gk104_disp_ovly_mthd_chan,
-	.mthd.prev = -0x020000,
-	.head.scanoutpos = gf110_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
index a453072..6bc3bf0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c
@@ -22,127 +22,34 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-gt200_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x6109a0 },
-		{ 0x0088, 0x6109c0 },
-		{ 0x008c, 0x6109c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00b0, 0x610c98 },
-		{ 0x00b4, 0x610ca4 },
-		{ 0x00b8, 0x610cac },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
+static const struct nv50_disp_func
+gt200_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &gt200_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.sor.hdmi = g84_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-static const struct nv50_disp_mthd_chan
-gt200_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &gt200_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt200_disp_sclass[] = {
-	{ GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-static struct nvkm_oclass
-gt200_disp_main_oclass[] = {
-	{ GT200_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt200_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gt200_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gt200_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = gt200_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hdmi = g84_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&gt200_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-gt200_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x83),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt200_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &g84_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &gt200_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 55f0d3a..9402628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -22,83 +22,36 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "rootnv50.h"
 
-#include <nvif/class.h>
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-static struct nvkm_oclass
-gt215_disp_sclass[] = {
-	{ GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
+static const struct nv50_disp_func
+gt215_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &gt215_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.internal.dp = g94_sor_dp_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 4,
+	.sor.power = nv50_sor_power,
+	.sor.hda_eld = gt215_hda_eld,
+	.sor.hdmi = gt215_hdmi_ctrl,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-static struct nvkm_oclass
-gt215_disp_main_oclass[] = {
-	{ GT214_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static int
-gt215_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gt215_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = gt215_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = gt215_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 4;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->sor.hda_eld = gt215_hda_eld;
-	priv->sor.hdmi = gt215_hdmi_ctrl;
-	priv->pior.power = nv50_pior_power;
-	return 0;
+	return nv50_disp_new_(&gt215_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-gt215_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x85),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  g94_disp_outp_sclass,
-	.mthd.core = &g94_disp_core_mthd_chan,
-	.mthd.base = &g84_disp_base_mthd_chan,
-	.mthd.ovly = &g84_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
deleted file mode 100644
index b9813d2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf110.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outp.h"
-
-#include <core/client.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-int
-gf110_hda_eld(NV50_DISP_MTHD_V1)
-{
-	union {
-		struct nv50_disp_sor_hda_eld_v0 v0;
-	} *args = data;
-	const u32 soff = outp->or * 0x030;
-	const u32 hoff = head * 0x800;
-	int ret, i;
-
-	nv_ioctl(object, "disp sor hda eld size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
-		if (size > 0x60)
-			return -E2BIG;
-	} else
-		return ret;
-
-	if (size && args->v0.data[0]) {
-		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x616618 + hoff, 0x8000000c, 0x80000001);
-			nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
-		}
-		nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
-		for (i = 0; i < size; i++)
-			nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
-		for (; i < 0x60; i++)
-			nv_wr32(priv, 0x10ec00 + soff, (i << 8));
-		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
-	} else {
-		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x616618 + hoff, 0x80000001, 0x80000000);
-			nv_wait(priv, 0x616618 + hoff, 0x80000000, 0x00000000);
-		}
-		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
-	}
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
new file mode 100644
index 0000000..af99efb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "outp.h"
+
+#include <core/client.h>
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_hda_eld(NV50_DISP_MTHD_V1)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	union {
+		struct nv50_disp_sor_hda_eld_v0 v0;
+	} *args = data;
+	const u32 soff = outp->or * 0x030;
+	const u32 hoff = head * 0x800;
+	int ret, i;
+
+	nvif_ioctl(object, "disp sor hda eld size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp sor hda eld vers %d\n",
+			   args->v0.version);
+		if (size > 0x60)
+			return -E2BIG;
+	} else
+		return ret;
+
+	if (size && args->v0.data[0]) {
+		if (outp->info.type == DCB_OUTPUT_DP) {
+			nvkm_mask(device, 0x616618 + hoff, 0x8000000c, 0x80000001);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
+		}
+		nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
+		for (i = 0; i < size; i++)
+			nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
+		for (; i < 0x60; i++)
+			nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
+		nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000003);
+	} else {
+		if (outp->info.type == DCB_OUTPUT_DP) {
+			nvkm_mask(device, 0x616618 + hoff, 0x80000001, 0x80000000);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x616618 + hoff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
+		}
+		nvkm_mask(device, 0x10ec10 + soff, 0x80000003, 0x80000000 | !!size);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 891d1e7..c1590b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -33,15 +33,17 @@
 int
 gt215_hda_eld(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_hda_eld_v0 v0;
 	} *args = data;
 	const u32 soff = outp->or * 0x800;
 	int ret, i;
 
-	nv_ioctl(object, "disp sor hda eld size %d\n", size);
+	nvif_ioctl(object, "disp sor hda eld size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+		nvif_ioctl(object, "disp sor hda eld vers %d\n",
+			   args->v0.version);
 		if (size > 0x60)
 			return -E2BIG;
 	} else
@@ -49,20 +51,28 @@
 
 	if (size && args->v0.data[0]) {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
-			nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x61c1e0 + soff, 0x8000000d, 0x80000001);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
 		for (i = 0; i < size; i++)
-			nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
 		for (; i < 0x60; i++)
-			nv_wr32(priv, 0x61c440 + soff, (i << 8));
-		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+			nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
 	} else {
 		if (outp->info.type == DCB_OUTPUT_DP) {
-			nv_mask(priv, 0x61c1e0 + soff, 0x80000001, 0x80000000);
-			nv_wait(priv, 0x61c1e0 + soff, 0x80000000, 0x00000000);
+			nvkm_mask(device, 0x61c1e0 + soff, 0x80000001, 0x80000000);
+			nvkm_msec(device, 2000,
+				u32 tmp = nvkm_rd32(device, 0x61c1e0 + soff);
+				if (!(tmp & 0x80000000))
+					break;
+			);
 		}
-		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
+		nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000000 | !!size);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
index 621cb0b..ee9e800 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c
@@ -31,6 +31,7 @@
 int
 g84_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -38,12 +39,12 @@
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -54,38 +55,38 @@
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
-	nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
-	nv_wr32(priv, 0x616530 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616534 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616538 + hoff, 0x00000000);
-	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x616528 + hoff, 0x000d0282);
+	nvkm_wr32(device, 0x61652c + hoff, 0x0000006f);
+	nvkm_wr32(device, 0x616530 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616534 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616538 + hoff, 0x00000000);
+	nvkm_mask(device, 0x616520 + hoff, 0x00000001, 0x00000001);
 
 	/* Audio InfoFrame */
-	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
-	nv_wr32(priv, 0x61650c + hoff, 0x00000071);
-	nv_wr32(priv, 0x616510 + hoff, 0x00000000);
-	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x616508 + hoff, 0x000a0184);
+	nvkm_wr32(device, 0x61650c + hoff, 0x00000071);
+	nvkm_wr32(device, 0x616510 + hoff, 0x00000000);
+	nvkm_mask(device, 0x616500 + hoff, 0x00000001, 0x00000001);
 
-	nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+	nvkm_mask(device, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nvkm_mask(device, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nvkm_mask(device, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
 	/* ??? */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
+	nvkm_mask(device, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
deleted file mode 100644
index c284490..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf110.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-
-#include <core/client.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-int
-gf110_hdmi_ctrl(NV50_DISP_MTHD_V1)
-{
-	const u32 hoff = (head * 0x800);
-	union {
-		struct nv50_disp_sor_hdmi_pwr_v0 v0;
-	} *args = data;
-	u32 ctrl;
-	int ret;
-
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
-		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
-			return -EINVAL;
-		ctrl  = 0x40000000 * !!args->v0.state;
-		ctrl |= args->v0.max_ac_packet << 16;
-		ctrl |= args->v0.rekey;
-	} else
-		return ret;
-
-	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
-		return 0;
-	}
-
-	/* AVI InfoFrame */
-	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
-	nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
-	nv_wr32(priv, 0x616724 + hoff, 0x00000000);
-	nv_wr32(priv, 0x616728 + hoff, 0x00000000);
-	nv_wr32(priv, 0x61672c + hoff, 0x00000000);
-	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
-
-	/* ??? InfoFrame? */
-	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
-	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
-
-	/* HDMI_CTRL */
-	nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
new file mode 100644
index 0000000..b5af025
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_hdmi_ctrl(NV50_DISP_MTHD_V1)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 hoff = (head * 0x800);
+	union {
+		struct nv50_disp_sor_hdmi_pwr_v0 v0;
+	} *args = data;
+	u32 ctrl;
+	int ret;
+
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
+		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+			return -EINVAL;
+		ctrl  = 0x40000000 * !!args->v0.state;
+		ctrl |= args->v0.max_ac_packet << 16;
+		ctrl |= args->v0.rekey;
+	} else
+		return ret;
+
+	if (!(ctrl & 0x40000000)) {
+		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61671c + hoff, 0x000d0282);
+	nvkm_wr32(device, 0x616720 + hoff, 0x0000006f);
+	nvkm_wr32(device, 0x616724 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x616728 + hoff, 0x00000000);
+	nvkm_wr32(device, 0x61672c + hoff, 0x00000000);
+	nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+	/* ??? InfoFrame? */
+	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
+	nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+	/* HDMI_CTRL */
+	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
index ca34ff8..110dc19 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c
@@ -31,6 +31,7 @@
 int
 gk104_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 hoff = (head * 0x800);
 	const u32 hdmi = (head * 0x400);
 	union {
@@ -39,12 +40,12 @@
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -54,30 +55,30 @@
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x690008 + hdmi, 0x000d0282);
-	nv_wr32(priv, 0x69000c + hdmi, 0x0000006f);
-	nv_wr32(priv, 0x690010 + hdmi, 0x00000000);
-	nv_wr32(priv, 0x690014 + hdmi, 0x00000000);
-	nv_wr32(priv, 0x690018 + hdmi, 0x00000000);
-	nv_mask(priv, 0x690000 + hdmi, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282);
+	nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f);
+	nvkm_wr32(device, 0x690010 + hdmi, 0x00000000);
+	nvkm_wr32(device, 0x690014 + hdmi, 0x00000000);
+	nvkm_wr32(device, 0x690018 + hdmi, 0x00000000);
+	nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001);
 
 	/* ??? InfoFrame? */
-	nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x6900cc + hdmi, 0x00000010);
-	nv_mask(priv, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010);
+	nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001);
 
 	/* ??? */
-	nv_wr32(priv, 0x690080 + hdmi, 0x82000000);
+	nvkm_wr32(device, 0x690080 + hdmi, 0x82000000);
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
+	nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
index b641c16..61237db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c
@@ -32,6 +32,7 @@
 int
 gt215_hdmi_ctrl(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 soff = outp->or * 0x800;
 	union {
 		struct nv50_disp_sor_hdmi_pwr_v0 v0;
@@ -39,12 +40,12 @@
 	u32 ctrl;
 	int ret;
 
-	nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+	nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
-				 "max_ac_packet %d rekey %d\n",
-			 args->v0.version, args->v0.state,
-			 args->v0.max_ac_packet, args->v0.rekey);
+		nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+				   "max_ac_packet %d rekey %d\n",
+			   args->v0.version, args->v0.state,
+			   args->v0.max_ac_packet, args->v0.rekey);
 		if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
 			return -EINVAL;
 		ctrl  = 0x40000000 * !!args->v0.state;
@@ -55,38 +56,38 @@
 		return ret;
 
 	if (!(ctrl & 0x40000000)) {
-		nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
-		nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
-		nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+		nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
 		return 0;
 	}
 
 	/* AVI InfoFrame */
-	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
-	nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
-	nv_wr32(priv, 0x61c530 + soff, 0x00000000);
-	nv_wr32(priv, 0x61c534 + soff, 0x00000000);
-	nv_wr32(priv, 0x61c538 + soff, 0x00000000);
-	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61c528 + soff, 0x000d0282);
+	nvkm_wr32(device, 0x61c52c + soff, 0x0000006f);
+	nvkm_wr32(device, 0x61c530 + soff, 0x00000000);
+	nvkm_wr32(device, 0x61c534 + soff, 0x00000000);
+	nvkm_wr32(device, 0x61c538 + soff, 0x00000000);
+	nvkm_mask(device, 0x61c520 + soff, 0x00000001, 0x00000001);
 
 	/* Audio InfoFrame */
-	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
-	nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
-	nv_wr32(priv, 0x61c50c + soff, 0x00000071);
-	nv_wr32(priv, 0x61c510 + soff, 0x00000000);
-	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x61c508 + soff, 0x000a0184);
+	nvkm_wr32(device, 0x61c50c + soff, 0x00000071);
+	nvkm_wr32(device, 0x61c510 + soff, 0x00000000);
+	nvkm_mask(device, 0x61c500 + soff, 0x00000001, 0x00000001);
 
-	nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-	nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-	nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+	nvkm_mask(device, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nvkm_mask(device, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nvkm_mask(device, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
 
 	/* ??? */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nvkm_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nvkm_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
 
 	/* HDMI_CTRL */
-	nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
+	nvkm_mask(device, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
index ff09b26..67254ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv04.c
@@ -23,183 +23,63 @@
  */
 #include "priv.h"
 
-#include <core/client.h>
-#include <core/device.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv04_disp_priv {
-	struct nvkm_disp base;
-};
-
-static int
-nv04_disp_scanoutpos(struct nvkm_object *object, struct nv04_disp_priv *priv,
-		     void *data, u32 size, int head)
+static const struct nvkm_disp_oclass *
+nv04_disp_root(struct nvkm_disp *disp)
 {
-	const u32 hoff = head * 0x2000;
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	u32 line;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
-		args->v0.vtotal  = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
-		args->v0.vblanke = args->v0.vtotal - 1;
-
-		args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
-		args->v0.htotal  = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
-		args->v0.hblanke = args->v0.htotal - 1;
-
-		/*
-		 * If output is vga instead of digital then vtotal/htotal is
-		 * invalid so we have to give up and trigger the timestamping
-		 * fallback in the drm core.
-		 */
-		if (!args->v0.vtotal || !args->v0.htotal)
-			return -ENOTSUPP;
-
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		line = nv_rd32(priv, 0x600868 + hoff);
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline = (line & 0xffff0000) >> 16;
-		args->v0.vline = (line & 0x0000ffff);
-	} else
-		return ret;
-
-	return 0;
-}
-
-static int
-nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	union {
-		struct nv04_disp_mthd_v0 v0;
-	} *args = data;
-	struct nv04_disp_priv *priv = (void *)object->engine;
-	int head, ret;
-
-	nv_ioctl(object, "disp mthd size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
-			 args->v0.version, args->v0.method, args->v0.head);
-		mthd = args->v0.method;
-		head = args->v0.head;
-	} else
-		return ret;
-
-	if (head < 0 || head >= 2)
-		return -ENXIO;
-
-	switch (mthd) {
-	case NV04_DISP_SCANOUTPOS:
-		return nv04_disp_scanoutpos(object, priv, data, size, head);
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-static struct nvkm_ofuncs
-nv04_disp_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-	.mthd = nv04_disp_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv04_disp_sclass[] = {
-	{ NV04_DISP, &nv04_disp_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
+	return &nv04_disp_root_oclass;
 }
 
 static void
-nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+nv04_disp_vblank_init(struct nvkm_disp *disp, int head)
 {
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
+	struct nvkm_device *device = disp->engine.subdev.device;
+	nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000001);
 }
 
-static const struct nvkm_event_func
-nv04_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = nv04_disp_vblank_init,
-	.fini = nv04_disp_vblank_fini,
-};
+static void
+nv04_disp_vblank_fini(struct nvkm_disp *disp, int head)
+{
+	struct nvkm_device *device = disp->engine.subdev.device;
+	nvkm_wr32(device, 0x600140 + (head * 0x2000) , 0x00000000);
+}
 
 static void
-nv04_disp_intr(struct nvkm_subdev *subdev)
+nv04_disp_intr(struct nvkm_disp *disp)
 {
-	struct nv04_disp_priv *priv = (void *)subdev;
-	u32 crtc0 = nv_rd32(priv, 0x600100);
-	u32 crtc1 = nv_rd32(priv, 0x602100);
+	struct nvkm_subdev *subdev = &disp->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 crtc0 = nvkm_rd32(device, 0x600100);
+	u32 crtc1 = nvkm_rd32(device, 0x602100);
 	u32 pvideo;
 
 	if (crtc0 & 0x00000001) {
-		nvkm_disp_vblank(&priv->base, 0);
-		nv_wr32(priv, 0x600100, 0x00000001);
+		nvkm_disp_vblank(disp, 0);
+		nvkm_wr32(device, 0x600100, 0x00000001);
 	}
 
 	if (crtc1 & 0x00000001) {
-		nvkm_disp_vblank(&priv->base, 1);
-		nv_wr32(priv, 0x602100, 0x00000001);
+		nvkm_disp_vblank(disp, 1);
+		nvkm_wr32(device, 0x602100, 0x00000001);
 	}
 
-	if (nv_device(priv)->chipset >= 0x10 &&
-	    nv_device(priv)->chipset <= 0x40) {
-		pvideo = nv_rd32(priv, 0x8100);
+	if (device->chipset >= 0x10 && device->chipset <= 0x40) {
+		pvideo = nvkm_rd32(device, 0x8100);
 		if (pvideo & ~0x11)
-			nv_info(priv, "PVIDEO intr: %08x\n", pvideo);
-		nv_wr32(priv, 0x8100, pvideo);
+			nvkm_info(subdev, "PVIDEO intr: %08x\n", pvideo);
+		nvkm_wr32(device, 0x8100, pvideo);
 	}
 }
 
-static int
-nv04_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_disp_func
+nv04_disp = {
+	.intr = nv04_disp_intr,
+	.root = nv04_disp_root,
+	.head.vblank_init = nv04_disp_vblank_init,
+	.head.vblank_fini = nv04_disp_vblank_fini,
+};
+
+int
+nv04_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 {
-	struct nv04_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "DISPLAY",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = nv04_disp_sclass;
-	nv_subdev(priv)->intr = nv04_disp_intr;
-	return 0;
+	return nvkm_disp_new_(&nv04_disp, device, index, 2, pdisp);
 }
-
-struct nvkm_oclass *
-nv04_disp_oclass = &(struct nvkm_disp_impl) {
-	.base.handle = NV_ENGINE(DISP, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.vblank = &nv04_disp_vblank_func,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 8ba808d..32e73a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -22,1291 +22,158 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "outp.h"
-#include "outpdp.h"
+#include "rootnv50.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
-#include <core/ramht.h>
-#include <engine/dmaobj.h>
+#include <core/gpuobj.h>
 #include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
 #include <subdev/bios/disp.h>
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
 
-#include <nvif/class.h>
-#include <nvif/event.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * EVO channel base class
- ******************************************************************************/
+static const struct nvkm_disp_oclass *
+nv50_disp_root_(struct nvkm_disp *base)
+{
+	return nv50_disp(base)->func->root;
+}
 
 static int
-nv50_disp_chan_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, int head,
-		       int length, void **pobject)
+nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
+			     struct dcb_output *dcb, struct nvkm_output **poutp)
 {
-	const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
-	struct nv50_disp_base *base = (void *)parent;
-	struct nv50_disp_chan *chan;
-	int chid = impl->chid + head;
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.crt(base, index, dcb, poutp);
+}
+
+static int
+nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.tmds(base, index, dcb, poutp);
+}
+
+static int
+nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	return disp->func->outp.internal.lvds(base, index, dcb, poutp);
+}
+
+static int
+nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
+			    struct dcb_output *dcb, struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.internal.dp)
+		return disp->func->outp.internal.dp(base, index, dcb, poutp);
+	return -ENODEV;
+}
+
+static int
+nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
+			      struct dcb_output *dcb,
+			      struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.external.tmds)
+		return disp->func->outp.external.tmds(base, index, dcb, poutp);
+	return -ENODEV;
+}
+
+static int
+nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
+			    struct dcb_output *dcb, struct nvkm_output **poutp)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	if (disp->func->outp.external.dp)
+		return disp->func->outp.external.dp(base, index, dcb, poutp);
+	return -ENODEV;
+}
+
+static void
+nv50_disp_vblank_fini_(struct nvkm_disp *base, int head)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->head.vblank_fini(disp, head);
+}
+
+static void
+nv50_disp_vblank_init_(struct nvkm_disp *base, int head)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->head.vblank_init(disp, head);
+}
+
+static void
+nv50_disp_intr_(struct nvkm_disp *base)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	disp->func->intr(disp);
+}
+
+static void *
+nv50_disp_dtor_(struct nvkm_disp *base)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	nvkm_event_fini(&disp->uevent);
+	return disp;
+}
+
+static const struct nvkm_disp_func
+nv50_disp_ = {
+	.dtor = nv50_disp_dtor_,
+	.intr = nv50_disp_intr_,
+	.root = nv50_disp_root_,
+	.outp.internal.crt = nv50_disp_outp_internal_crt_,
+	.outp.internal.tmds = nv50_disp_outp_internal_tmds_,
+	.outp.internal.lvds = nv50_disp_outp_internal_lvds_,
+	.outp.internal.dp = nv50_disp_outp_internal_dp_,
+	.outp.external.tmds = nv50_disp_outp_external_tmds_,
+	.outp.external.dp = nv50_disp_outp_external_dp_,
+	.head.vblank_init = nv50_disp_vblank_init_,
+	.head.vblank_fini = nv50_disp_vblank_fini_,
+};
+
+int
+nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
+	       int index, int heads, struct nvkm_disp **pdisp)
+{
+	struct nv50_disp *disp;
 	int ret;
 
-	if (base->chan & (1 << chid))
-		return -EBUSY;
-	base->chan |= (1 << chid);
+	if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_WORK(&disp->supervisor, func->super);
+	disp->func = func;
+	*pdisp = &disp->base;
 
-	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
-				  (1ULL << NVDEV_ENGINE_DMAOBJ),
-				  length, pobject);
-	chan = *pobject;
+	ret = nvkm_disp_ctor(&nv50_disp_, device, index, heads, &disp->base);
 	if (ret)
 		return ret;
-	chan->chid = chid;
 
-	nv_parent(chan)->object_attach = impl->attach;
-	nv_parent(chan)->object_detach = impl->detach;
-	return 0;
-}
-
-static void
-nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
-{
-	struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
-	base->chan &= ~(1 << chan->chid);
-	nvkm_namedb_destroy(&chan->base);
-}
-
-static void
-nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
-	nv_wr32(priv, 0x610020, 0x00000001 << index);
-}
-
-static void
-nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
-{
-	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
-	nv_wr32(priv, 0x610020, 0x00000001 << index);
-	nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
+	return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
 }
 
 void
-nv50_disp_chan_uevent_send(struct nv50_disp_priv *priv, int chid)
+nv50_disp_vblank_fini(struct nv50_disp *disp, int head)
 {
-	struct nvif_notify_uevent_rep {
-	} rep;
-
-	nvkm_event_send(&priv->uevent, 1, chid, &rep, sizeof(rep));
-}
-
-int
-nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
-			   struct nvkm_notify *notify)
-{
-	struct nv50_disp_dmac *dmac = (void *)object;
-	union {
-		struct nvif_notify_uevent_req none;
-	} *args = data;
-	int ret;
-
-	if (nvif_unvers(args->none)) {
-		notify->size  = sizeof(struct nvif_notify_uevent_rep);
-		notify->types = 1;
-		notify->index = dmac->base.chid;
-		return 0;
-	}
-
-	return ret;
-}
-
-const struct nvkm_event_func
-nv50_disp_chan_uevent = {
-	.ctor = nv50_disp_chan_uevent_ctor,
-	.init = nv50_disp_chan_uevent_init,
-	.fini = nv50_disp_chan_uevent_fini,
-};
-
-int
-nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
-		    struct nvkm_event **pevent)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	switch (type) {
-	case NV50_DISP_CORE_CHANNEL_DMA_V0_NTFY_UEVENT:
-		*pevent = &priv->uevent;
-		return 0;
-	default:
-		break;
-	}
-	return -EINVAL;
-}
-
-int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nv50_disp_chan *chan = (void *)object;
-	*addr = nv_device_resource_start(nv_device(object), 0) +
-		0x640000 + (chan->chid * 0x1000);
-	*size = 0x001000;
-	return 0;
-}
-
-u32
-nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_chan *chan = (void *)object;
-	return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x61002c, (4 << head), 0);
 }
 
 void
-nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv50_disp_vblank_init(struct nv50_disp *disp, int head)
 {
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_chan *chan = (void *)object;
-	nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	nvkm_mask(device, 0x61002c, (4 << head), (4 << head));
 }
 
-/*******************************************************************************
- * EVO DMA channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_dmac_object_attach(struct nvkm_object *parent,
-			     struct nvkm_object *object, u32 name)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	struct nv50_disp_chan *chan = (void *)parent;
-	u32 addr = nv_gpuobj(object)->node->offset;
-	u32 chid = chan->chid;
-	u32 data = (chid << 28) | (addr << 10) | chid;
-	return nvkm_ramht_insert(base->ramht, chid, name, data);
-}
-
-static void
-nv50_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_disp_base *base = (void *)parent->parent;
-	nvkm_ramht_remove(base->ramht, cookie);
-}
-
-static int
-nv50_disp_dmac_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, u32 pushbuf, int head,
-		       int length, void **pobject)
-{
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	ret = nv50_disp_chan_create_(parent, engine, oclass, head,
-				     length, pobject);
-	dmac = *pobject;
-	if (ret)
-		return ret;
-
-	dmac->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
-	if (!dmac->pushdma)
-		return -ENOENT;
-
-	switch (nv_mclass(dmac->pushdma)) {
-	case 0x0002:
-	case 0x003d:
-		if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
-			return -EINVAL;
-
-		switch (dmac->pushdma->target) {
-		case NV_MEM_TARGET_VRAM:
-			dmac->push = 0x00000001 | dmac->pushdma->start >> 8;
-			break;
-		case NV_MEM_TARGET_PCI_NOSNOOP:
-			dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-void
-nv50_disp_dmac_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_dmac *dmac = (void *)object;
-	nvkm_object_ref(NULL, (struct nvkm_object **)&dmac->pushdma);
-	nv50_disp_chan_destroy(&dmac->base);
-}
-
-static int
-nv50_disp_dmac_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&dmac->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
-	nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
-	nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
-	nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
-		nv_error(dmac, "init timeout, 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_disp_dmac_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *dmac = (void *)object;
-	int chid = dmac->base.chid;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
-		nv_error(dmac, "fini timeout, 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notifications */
-	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
-
-	return nv50_disp_chan_fini(&dmac->base, suspend);
-}
-
-/*******************************************************************************
- * EVO master channel object
- ******************************************************************************/
-
-static void
-nv50_disp_mthd_list(struct nv50_disp_priv *priv, int debug, u32 base, int c,
-		    const struct nv50_disp_mthd_list *list, int inst)
-{
-	struct nvkm_object *disp = nv_object(priv);
-	int i;
-
-	for (i = 0; list->data[i].mthd; i++) {
-		if (list->data[i].addr) {
-			u32 next = nv_rd32(priv, list->data[i].addr + base + 0);
-			u32 prev = nv_rd32(priv, list->data[i].addr + base + c);
-			u32 mthd = list->data[i].mthd + (list->mthd * inst);
-			const char *name = list->data[i].name;
-			char mods[16];
-
-			if (prev != next)
-				snprintf(mods, sizeof(mods), "-> 0x%08x", next);
-			else
-				snprintf(mods, sizeof(mods), "%13c", ' ');
-
-			nv_printk_(disp, debug, "\t0x%04x: 0x%08x %s%s%s\n",
-				   mthd, prev, mods, name ? " // " : "",
-				   name ? name : "");
-		}
-	}
-}
-
-void
-nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
-		    const struct nv50_disp_mthd_chan *chan)
-{
-	struct nvkm_object *disp = nv_object(priv);
-	const struct nv50_disp_impl *impl = (void *)disp->oclass;
-	const struct nv50_disp_mthd_list *list;
-	int i, j;
-
-	if (debug > nv_subdev(priv)->debug)
-		return;
-
-	for (i = 0; (list = chan->data[i].mthd) != NULL; i++) {
-		u32 base = head * chan->addr;
-		for (j = 0; j < chan->data[i].nr; j++, base += list->addr) {
-			const char *cname = chan->name;
-			const char *sname = "";
-			char cname_[16], sname_[16];
-
-			if (chan->addr) {
-				snprintf(cname_, sizeof(cname_), "%s %d",
-					 chan->name, head);
-				cname = cname_;
-			}
-
-			if (chan->data[i].nr > 1) {
-				snprintf(sname_, sizeof(sname_), " - %s %d",
-					 chan->data[i].name, j);
-				sname = sname_;
-			}
-
-			nv_printk_(disp, debug, "%s%s:\n", cname, sname);
-			nv50_disp_mthd_list(priv, debug, base, impl->mthd.prev,
-					    list, j);
-		}
-	}
-}
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x610bb8 },
-		{ 0x0088, 0x610b9c },
-		{ 0x008c, 0x000000 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_dac = {
-	.mthd = 0x0080,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0400, 0x610b58 },
-		{ 0x0404, 0x610bdc },
-		{ 0x0420, 0x610828 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_sor = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0600, 0x610b70 },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_pior = {
-	.mthd = 0x0040,
-	.addr = 0x000008,
-	.data = {
-		{ 0x0700, 0x610b80 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_list
-nv50_disp_core_mthd_head = {
-	.mthd = 0x0400,
-	.addr = 0x000540,
-	.data = {
-		{ 0x0800, 0x610ad8 },
-		{ 0x0804, 0x610ad0 },
-		{ 0x0808, 0x610a48 },
-		{ 0x080c, 0x610a78 },
-		{ 0x0810, 0x610ac0 },
-		{ 0x0814, 0x610af8 },
-		{ 0x0818, 0x610b00 },
-		{ 0x081c, 0x610ae8 },
-		{ 0x0820, 0x610af0 },
-		{ 0x0824, 0x610b08 },
-		{ 0x0828, 0x610b10 },
-		{ 0x082c, 0x610a68 },
-		{ 0x0830, 0x610a60 },
-		{ 0x0834, 0x000000 },
-		{ 0x0838, 0x610a40 },
-		{ 0x0840, 0x610a24 },
-		{ 0x0844, 0x610a2c },
-		{ 0x0848, 0x610aa8 },
-		{ 0x084c, 0x610ab0 },
-		{ 0x0860, 0x610a84 },
-		{ 0x0864, 0x610a90 },
-		{ 0x0868, 0x610b18 },
-		{ 0x086c, 0x610b20 },
-		{ 0x0870, 0x610ac8 },
-		{ 0x0874, 0x610a38 },
-		{ 0x0880, 0x610a58 },
-		{ 0x0884, 0x610a9c },
-		{ 0x08a0, 0x610a70 },
-		{ 0x08a4, 0x610a50 },
-		{ 0x08a8, 0x610ae0 },
-		{ 0x08c0, 0x610b28 },
-		{ 0x08c4, 0x610b30 },
-		{ 0x08c8, 0x610b40 },
-		{ 0x08d4, 0x610b38 },
-		{ 0x08d8, 0x610b48 },
-		{ 0x08dc, 0x610b50 },
-		{ 0x0900, 0x610a18 },
-		{ 0x0904, 0x610ab8 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_core_mthd_chan = {
-	.name = "Core",
-	.addr = 0x000000,
-	.data = {
-		{ "Global", 1, &nv50_disp_core_mthd_base },
-		{    "DAC", 3, &nv50_disp_core_mthd_dac  },
-		{    "SOR", 2, &nv50_disp_core_mthd_sor  },
-		{   "PIOR", 3, &nv50_disp_core_mthd_pior },
-		{   "HEAD", 2, &nv50_disp_core_mthd_head },
-		{}
-	}
-};
-
-int
-nv50_disp_core_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_core_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_dmac *mast;
-	int ret;
-
-	nv_ioctl(parent, "create disp core channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp core channel dma vers %d "
-				 "pushbuf %08x\n",
-			 args->v0.version, args->v0.pushbuf);
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     0, sizeof(*mast), (void **)&mast);
-	*pobject = nv_object(mast);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int
-nv50_disp_core_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-	int ret;
-
-	ret = nv50_disp_chan_init(&mast->base);
-	if (ret)
-		return ret;
-
-	/* enable error reporting */
-	nv_mask(priv, 0x610028, 0x00010000, 0x00010000);
-
-	/* attempt to unstick channel from some unknown state */
-	if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
-		nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
-	if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
-		nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
-
-	/* initialise channel for dma command submission */
-	nv_wr32(priv, 0x610204, mast->push);
-	nv_wr32(priv, 0x610208, 0x00010000);
-	nv_wr32(priv, 0x61020c, 0x00000000);
-	nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
-	nv_wr32(priv, 0x640000, 0x00000000);
-	nv_wr32(priv, 0x610200, 0x01000013);
-
-	/* wait for it to go inactive */
-	if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
-		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_disp_core_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_dmac *mast = (void *)object;
-
-	/* deactivate channel */
-	nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
-	nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
-	if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
-		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	/* disable error reporting and completion notifications */
-	nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
-
-	return nv50_disp_chan_fini(&mast->base, suspend);
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_core_ofuncs = {
-	.base.ctor = nv50_disp_core_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_core_init,
-	.base.fini = nv50_disp_core_fini,
-	.base.map  = nv50_disp_chan_map,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 0,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO sync channel objects
- ******************************************************************************/
-
-static const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0008c4 },
-		{ 0x0088, 0x0008d0 },
-		{ 0x008c, 0x0008dc },
-		{ 0x0090, 0x0008e4 },
-		{ 0x0094, 0x610884 },
-		{ 0x00a0, 0x6108a0 },
-		{ 0x00a4, 0x610878 },
-		{ 0x00c0, 0x61086c },
-		{ 0x00e0, 0x610858 },
-		{ 0x00e4, 0x610860 },
-		{ 0x00e8, 0x6108ac },
-		{ 0x00ec, 0x6108b4 },
-		{ 0x0100, 0x610894 },
-		{ 0x0110, 0x6108bc },
-		{ 0x0114, 0x61088c },
-		{}
-	}
-};
-
-const struct nv50_disp_mthd_list
-nv50_disp_base_mthd_image = {
-	.mthd = 0x0400,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0800, 0x6108f0 },
-		{ 0x0804, 0x6108fc },
-		{ 0x0808, 0x61090c },
-		{ 0x080c, 0x610914 },
-		{ 0x0810, 0x610904 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_base_mthd_chan = {
-	.name = "Base",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &nv50_disp_base_mthd_base },
-		{  "Image", 2, &nv50_disp_base_mthd_image },
-		{}
-	}
-};
-
-int
-nv50_disp_base_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_base_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	nv_ioctl(parent, "create disp base channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp base channel dma vers %d "
-				 "pushbuf %08x head %d\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     args->v0.head, sizeof(*dmac),
-				     (void **)&dmac);
-	*pobject = nv_object(dmac);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_base_ofuncs = {
-	.base.ctor = nv50_disp_base_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_dmac_init,
-	.base.fini = nv50_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 1,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO overlay channel objects
- ******************************************************************************/
-
-const struct nv50_disp_mthd_list
-nv50_disp_ovly_mthd_base = {
-	.mthd = 0x0000,
-	.addr = 0x000000,
-	.data = {
-		{ 0x0080, 0x000000 },
-		{ 0x0084, 0x0009a0 },
-		{ 0x0088, 0x0009c0 },
-		{ 0x008c, 0x0009c8 },
-		{ 0x0090, 0x6109b4 },
-		{ 0x0094, 0x610970 },
-		{ 0x00a0, 0x610998 },
-		{ 0x00a4, 0x610964 },
-		{ 0x00c0, 0x610958 },
-		{ 0x00e0, 0x6109a8 },
-		{ 0x00e4, 0x6109d0 },
-		{ 0x00e8, 0x6109d8 },
-		{ 0x0100, 0x61094c },
-		{ 0x0104, 0x610984 },
-		{ 0x0108, 0x61098c },
-		{ 0x0800, 0x6109f8 },
-		{ 0x0808, 0x610a08 },
-		{ 0x080c, 0x610a10 },
-		{ 0x0810, 0x610a00 },
-		{}
-	}
-};
-
-static const struct nv50_disp_mthd_chan
-nv50_disp_ovly_mthd_chan = {
-	.name = "Overlay",
-	.addr = 0x000540,
-	.data = {
-		{ "Global", 1, &nv50_disp_ovly_mthd_base },
-		{}
-	}
-};
-
-int
-nv50_disp_ovly_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_overlay_channel_dma_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_dmac *dmac;
-	int ret;
-
-	nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp overlay channel dma vers %d "
-				 "pushbuf %08x head %d\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
-				     args->v0.head, sizeof(*dmac),
-				     (void **)&dmac);
-	*pobject = nv_object(dmac);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_ovly_ofuncs = {
-	.base.ctor = nv50_disp_ovly_ctor,
-	.base.dtor = nv50_disp_dmac_dtor,
-	.base.init = nv50_disp_dmac_init,
-	.base.fini = nv50_disp_dmac_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 3,
-	.attach = nv50_disp_dmac_object_attach,
-	.detach = nv50_disp_dmac_object_detach,
-};
-
-/*******************************************************************************
- * EVO PIO channel base class
- ******************************************************************************/
-
-static int
-nv50_disp_pioc_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, int head,
-		       int length, void **pobject)
-{
-	return nv50_disp_chan_create_(parent, engine, oclass, head,
-				      length, pobject);
-}
-
-void
-nv50_disp_pioc_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_pioc *pioc = (void *)object;
-	nv50_disp_chan_destroy(&pioc->base);
-}
-
-static int
-nv50_disp_pioc_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-	int ret;
-
-	ret = nv50_disp_chan_init(&pioc->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
-		nv_error(pioc, "timeout0: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
-		nv_error(pioc, "timeout1: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int
-nv50_disp_pioc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_pioc *pioc = (void *)object;
-	int chid = pioc->base.chid;
-
-	nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
-	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
-		nv_error(pioc, "timeout: 0x%08x\n",
-			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	return nv50_disp_chan_fini(&pioc->base, suspend);
-}
-
-/*******************************************************************************
- * EVO immediate overlay channel objects
- ******************************************************************************/
-
-int
-nv50_disp_oimm_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_overlay_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_pioc *pioc;
-	int ret;
-
-	nv_ioctl(parent, "create disp overlay size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp overlay vers %d head %d\n",
-			 args->v0.version, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
-				     sizeof(*pioc), (void **)&pioc);
-	*pobject = nv_object(pioc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_oimm_ofuncs = {
-	.base.ctor = nv50_disp_oimm_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = nv50_disp_pioc_init,
-	.base.fini = nv50_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 5,
-};
-
-/*******************************************************************************
- * EVO cursor channel objects
- ******************************************************************************/
-
-int
-nv50_disp_curs_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_disp_cursor_v0 v0;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_pioc *pioc;
-	int ret;
-
-	nv_ioctl(parent, "create disp cursor size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create disp cursor vers %d head %d\n",
-			 args->v0.version, args->v0.head);
-		if (args->v0.head > priv->head.nr)
-			return -EINVAL;
-	} else
-		return ret;
-
-	ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
-				     sizeof(*pioc), (void **)&pioc);
-	*pobject = nv_object(pioc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nv50_disp_chan_impl
-nv50_disp_curs_ofuncs = {
-	.base.ctor = nv50_disp_curs_ctor,
-	.base.dtor = nv50_disp_pioc_dtor,
-	.base.init = nv50_disp_pioc_init,
-	.base.fini = nv50_disp_pioc_fini,
-	.base.ntfy = nv50_disp_chan_ntfy,
-	.base.map  = nv50_disp_chan_map,
-	.base.rd32 = nv50_disp_chan_rd32,
-	.base.wr32 = nv50_disp_chan_wr32,
-	.chid = 7,
-};
-
-/*******************************************************************************
- * Base display object
- ******************************************************************************/
-
-int
-nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
-{
-	const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
-	const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
-	const u32 total  = nv_rd32(priv, 0x610afc + (head * 0x540));
-	union {
-		struct nv04_disp_scanoutpos_v0 v0;
-	} *args = data;
-	int ret;
-
-	nv_ioctl(object, "disp scanoutpos size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
-		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
-		args->v0.hblanke = (blanke & 0x0000ffff);
-		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
-		args->v0.hblanks = (blanks & 0x0000ffff);
-		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
-		args->v0.htotal  = ( total & 0x0000ffff);
-		args->v0.time[0] = ktime_to_ns(ktime_get());
-		args->v0.vline = /* vline read locks hline */
-			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
-		args->v0.time[1] = ktime_to_ns(ktime_get());
-		args->v0.hline =
-			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
-	} else
-		return ret;
-
-	return 0;
-}
-
-int
-nv50_disp_main_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
-{
-	const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
-	union {
-		struct nv50_disp_mthd_v0 v0;
-		struct nv50_disp_mthd_v1 v1;
-	} *args = data;
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nvkm_output *outp = NULL;
-	struct nvkm_output *temp;
-	u16 type, mask = 0;
-	int head, ret;
-
-	if (mthd != NV50_DISP_MTHD)
-		return -EINVAL;
-
-	nv_ioctl(object, "disp mthd size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
-			 args->v0.version, args->v0.method, args->v0.head);
-		mthd = args->v0.method;
-		head = args->v0.head;
-	} else
-	if (nvif_unpack(args->v1, 1, 1, true)) {
-		nv_ioctl(object, "disp mthd vers %d mthd %02x "
-				 "type %04x mask %04x\n",
-			 args->v1.version, args->v1.method,
-			 args->v1.hasht, args->v1.hashm);
-		mthd = args->v1.method;
-		type = args->v1.hasht;
-		mask = args->v1.hashm;
-		head = ffs((mask >> 8) & 0x0f) - 1;
-	} else
-		return ret;
-
-	if (head < 0 || head >= priv->head.nr)
-		return -ENXIO;
-
-	if (mask) {
-		list_for_each_entry(temp, &priv->base.outp, head) {
-			if ((temp->info.hasht         == type) &&
-			    (temp->info.hashm & mask) == mask) {
-				outp = temp;
-				break;
-			}
-		}
-		if (outp == NULL)
-			return -ENXIO;
-	}
-
-	switch (mthd) {
-	case NV50_DISP_SCANOUTPOS:
-		return impl->head.scanoutpos(object, priv, data, size, head);
-	default:
-		break;
-	}
-
-	switch (mthd * !!outp) {
-	case NV50_DISP_MTHD_V1_DAC_PWR:
-		return priv->dac.power(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_DAC_LOAD:
-		return priv->dac.sense(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_PWR:
-		return priv->sor.power(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
-		if (!priv->sor.hda_eld)
-			return -ENODEV;
-		return priv->sor.hda_eld(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
-		if (!priv->sor.hdmi)
-			return -ENODEV;
-		return priv->sor.hdmi(object, priv, data, size, head, outp);
-	case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
-		union {
-			struct nv50_disp_sor_lvds_script_v0 v0;
-		} *args = data;
-		nv_ioctl(object, "disp sor lvds script size %d\n", size);
-		if (nvif_unpack(args->v0, 0, 0, false)) {
-			nv_ioctl(object, "disp sor lvds script "
-					 "vers %d name %04x\n",
-				 args->v0.version, args->v0.script);
-			priv->sor.lvdsconf = args->v0.script;
-			return 0;
-		} else
-			return ret;
-	}
-		break;
-	case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
-		struct nvkm_output_dp *outpdp = (void *)outp;
-		union {
-			struct nv50_disp_sor_dp_pwr_v0 v0;
-		} *args = data;
-		nv_ioctl(object, "disp sor dp pwr size %d\n", size);
-		if (nvif_unpack(args->v0, 0, 0, false)) {
-			nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
-				 args->v0.version, args->v0.state);
-			if (args->v0.state == 0) {
-				nvkm_notify_put(&outpdp->irq);
-				((struct nvkm_output_dp_impl *)nv_oclass(outp))
-					->lnk_pwr(outpdp, 0);
-				atomic_set(&outpdp->lt.done, 0);
-				return 0;
-			} else
-			if (args->v0.state != 0) {
-				nvkm_output_dp_train(&outpdp->base, 0, true);
-				return 0;
-			}
-		} else
-			return ret;
-	}
-		break;
-	case NV50_DISP_MTHD_V1_PIOR_PWR:
-		if (!priv->pior.power)
-			return -ENODEV;
-		return priv->pior.power(object, priv, data, size, head, outp);
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-int
-nv50_disp_main_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nv50_disp_base *base;
-	int ret;
-
-	ret = nvkm_parent_create(parent, engine, oclass, 0,
-				 priv->sclass, 0, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	return nvkm_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
-			      &base->ramht);
-}
-
-void
-nv50_disp_main_dtor(struct nvkm_object *object)
-{
-	struct nv50_disp_base *base = (void *)object;
-	nvkm_ramht_ref(NULL, &base->ramht);
-	nvkm_parent_destroy(&base->base);
-}
-
-static int
-nv50_disp_main_init(struct nvkm_object *object)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-	int ret, i;
-	u32 tmp;
-
-	ret = nvkm_parent_init(&base->base);
-	if (ret)
-		return ret;
-
-	/* The below segments of code copying values from one register to
-	 * another appear to inform EVO of the display capabilities or
-	 * something similar.  NFI what the 0x614004 caps are for..
-	 */
-	tmp = nv_rd32(priv, 0x614004);
-	nv_wr32(priv, 0x610184, tmp);
-
-	/* ... CRTC caps */
-	for (i = 0; i < priv->head.nr; i++) {
-		tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
-		nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
-		nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
-		nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
-		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
-		nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
-	}
-
-	/* ... DAC caps */
-	for (i = 0; i < priv->dac.nr; i++) {
-		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
-		nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
-	}
-
-	/* ... SOR caps */
-	for (i = 0; i < priv->sor.nr; i++) {
-		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
-		nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
-	}
-
-	/* ... PIOR caps */
-	for (i = 0; i < priv->pior.nr; i++) {
-		tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
-		nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
-	}
-
-	/* steal display away from vbios, or something like that */
-	if (nv_rd32(priv, 0x610024) & 0x00000100) {
-		nv_wr32(priv, 0x610024, 0x00000100);
-		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
-		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
-			nv_error(priv, "timeout acquiring display\n");
-			return -EBUSY;
-		}
-	}
-
-	/* point at display engine memory area (hash table, objects) */
-	nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
-
-	/* enable supervisor interrupts, disable everything else */
-	nv_wr32(priv, 0x61002c, 0x00000370);
-	nv_wr32(priv, 0x610028, 0x00000000);
-	return 0;
-}
-
-static int
-nv50_disp_main_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv50_disp_priv *priv = (void *)object->engine;
-	struct nv50_disp_base *base = (void *)object;
-
-	/* disable all interrupts */
-	nv_wr32(priv, 0x610024, 0x00000000);
-	nv_wr32(priv, 0x610020, 0x00000000);
-
-	return nvkm_parent_fini(&base->base, suspend);
-}
-
-struct nvkm_ofuncs
-nv50_disp_main_ofuncs = {
-	.ctor = nv50_disp_main_ctor,
-	.dtor = nv50_disp_main_dtor,
-	.init = nv50_disp_main_init,
-	.fini = nv50_disp_main_fini,
-	.mthd = nv50_disp_main_mthd,
-	.ntfy = nvkm_disp_ntfy,
-};
-
-static struct nvkm_oclass
-nv50_disp_main_oclass[] = {
-	{ NV50_DISP, &nv50_disp_main_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-nv50_disp_sclass[] = {
-	{ NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
-	{ NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
-	{ NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
-	{ NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
-	{ NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
-	{}
-};
-
-/*******************************************************************************
- * Display context, tracks instmem allocation and prevents more than one
- * client using the display hardware at any time.
- ******************************************************************************/
-
-static int
-nv50_disp_data_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv = (void *)engine;
-	struct nvkm_engctx *ectx;
-	int ret = -EBUSY;
-
-	/* no context needed for channel objects... */
-	if (nv_mclass(parent) != NV_DEVICE) {
-		atomic_inc(&parent->refcount);
-		*pobject = parent;
-		return 1;
-	}
-
-	/* allocate display hardware to client */
-	mutex_lock(&nv_subdev(priv)->mutex);
-	if (list_empty(&nv_engine(priv)->contexts)) {
-		ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000,
-					 0x10000, NVOBJ_FLAG_HEAP, &ectx);
-		*pobject = nv_object(ectx);
-	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-struct nvkm_oclass
-nv50_disp_cclass = {
-	.handle = NV_ENGCTX(DISP, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_disp_data_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * Display engine implementation
- ******************************************************************************/
-
-static void
-nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x61002c, (4 << head), 0);
-}
-
-static void
-nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
-{
-	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
-	nv_mask(disp, 0x61002c, (4 << head), (4 << head));
-}
-
-const struct nvkm_event_func
-nv50_disp_vblank_func = {
-	.ctor = nvkm_disp_vblank_ctor,
-	.init = nv50_disp_vblank_init,
-	.fini = nv50_disp_vblank_fini,
-};
-
 static const struct nvkm_enum
 nv50_disp_intr_error_type[] = {
 	{ 3, "ILLEGAL_MTHD" },
@@ -1323,70 +190,46 @@
 };
 
 static void
-nv50_disp_intr_error(struct nv50_disp_priv *priv, int chid)
+nv50_disp_intr_error(struct nv50_disp *disp, int chid)
 {
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
-	u32 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
+	u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
 	u32 code = (addr & 0x00ff0000) >> 16;
 	u32 type = (addr & 0x00007000) >> 12;
 	u32 mthd = (addr & 0x00000ffc);
 	const struct nvkm_enum *ec, *et;
-	char ecunk[6], etunk[6];
 
 	et = nvkm_enum_find(nv50_disp_intr_error_type, type);
-	if (!et)
-		snprintf(etunk, sizeof(etunk), "UNK%02X", type);
-
 	ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02X", code);
 
-	nv_error(priv, "%s [%s] chid %d mthd 0x%04x data 0x%08x\n",
-		 et ? et->name : etunk, ec ? ec->name : ecunk,
-		 chid, mthd, data);
+	nvkm_error(subdev,
+		   "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
+		   type, et ? et->name : "", code, ec ? ec->name : "",
+		   chid, mthd, data);
 
-	if (chid == 0) {
+	if (chid < ARRAY_SIZE(disp->chan)) {
 		switch (mthd) {
 		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
-					    impl->mthd.core);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 2) {
-		switch (mthd) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
-					    impl->mthd.base);
-			break;
-		default:
-			break;
-		}
-	} else
-	if (chid <= 4) {
-		switch (mthd) {
-		case 0x0080:
-			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 3,
-					    impl->mthd.ovly);
+			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
 			break;
 		default:
 			break;
 		}
 	}
 
-	nv_wr32(priv, 0x610020, 0x00010000 << chid);
-	nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+	nvkm_wr32(device, 0x610020, 0x00010000 << chid);
+	nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
 }
 
 static struct nvkm_output *
-exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
+exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
 	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
 	    struct nvbios_outp *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_output *outp;
 	u16 mask, type;
 
@@ -1403,7 +246,7 @@
 		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
 		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
 		default:
-			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
 			return NULL;
 		}
 		or  -= 4;
@@ -1412,9 +255,9 @@
 		type = 0x0010;
 		mask = 0;
 		switch (ctrl & 0x00000f00) {
-		case 0x00000000: type |= priv->pior.type[or]; break;
+		case 0x00000000: type |= disp->pior.type[or]; break;
 		default:
-			nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+			nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
 			return NULL;
 		}
 	}
@@ -1423,7 +266,7 @@
 	mask |= 0x0001 << or;
 	mask |= 0x0100 << head;
 
-	list_for_each_entry(outp, &priv->base.outp, head) {
+	list_for_each_entry(outp, &disp->base.outp, head) {
 		if ((outp->info.hasht & 0xff) == type &&
 		    (outp->info.hashm & mask) == mask) {
 			*data = nvbios_outp_match(bios, outp->info.hasht,
@@ -1439,9 +282,11 @@
 }
 
 static struct nvkm_output *
-exec_script(struct nv50_disp_priv *priv, int head, int id)
+exec_script(struct nv50_disp *disp, int head, int id)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_output *outp;
 	struct nvbios_outp info;
 	u8  ver, hdr, cnt, len;
@@ -1450,27 +295,27 @@
 	int i;
 
 	/* DAC */
-	for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
-		ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+		ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
 
 	/* SOR */
 	if (!(ctrl & (1 << head))) {
-		if (nv_device(priv)->chipset  < 0x90 ||
-		    nv_device(priv)->chipset == 0x92 ||
-		    nv_device(priv)->chipset == 0xa0) {
+		if (device->chipset  < 0x90 ||
+		    device->chipset == 0x92 ||
+		    device->chipset == 0xa0) {
 			reg = 0x610b74;
 		} else {
 			reg = 0x610798;
 		}
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
-			ctrl = nv_rd32(priv, reg + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+			ctrl = nvkm_rd32(device, reg + (i * 8));
 		i += 4;
 	}
 
 	/* PIOR */
 	if (!(ctrl & (1 << head))) {
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
-			ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+			ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
 		i += 8;
 	}
 
@@ -1478,10 +323,10 @@
 		return NULL;
 	i--;
 
-	outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
+	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
 	if (outp) {
 		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
+			.subdev = subdev,
 			.bios = bios,
 			.offset = info.script[id],
 			.outp = &outp->info,
@@ -1496,9 +341,11 @@
 }
 
 static struct nvkm_output *
-exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
+exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_output *outp;
 	struct nvbios_outp info1;
 	struct nvbios_ocfg info2;
@@ -1508,27 +355,27 @@
 	int i;
 
 	/* DAC */
-	for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
-		ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+	for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
+		ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
 
 	/* SOR */
 	if (!(ctrl & (1 << head))) {
-		if (nv_device(priv)->chipset  < 0x90 ||
-		    nv_device(priv)->chipset == 0x92 ||
-		    nv_device(priv)->chipset == 0xa0) {
+		if (device->chipset  < 0x90 ||
+		    device->chipset == 0x92 ||
+		    device->chipset == 0xa0) {
 			reg = 0x610b70;
 		} else {
 			reg = 0x610794;
 		}
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
-			ctrl = nv_rd32(priv, reg + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
+			ctrl = nvkm_rd32(device, reg + (i * 8));
 		i += 4;
 	}
 
 	/* PIOR */
 	if (!(ctrl & (1 << head))) {
-		for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
-			ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+		for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
+			ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
 		i += 8;
 	}
 
@@ -1536,7 +383,7 @@
 		return NULL;
 	i--;
 
-	outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
+	outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
 	if (!outp)
 		return NULL;
 
@@ -1548,7 +395,7 @@
 				*conf |= 0x0100;
 			break;
 		case DCB_OUTPUT_LVDS:
-			*conf = priv->sor.lvdsconf;
+			*conf = disp->sor.lvdsconf;
 			break;
 		case DCB_OUTPUT_DP:
 			*conf = (ctrl & 0x00000f00) >> 8;
@@ -1568,7 +415,7 @@
 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
 		if (data) {
 			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
+				.subdev = subdev,
 				.bios = bios,
 				.offset = data,
 				.outp = &outp->info,
@@ -1584,15 +431,16 @@
 }
 
 static void
-nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
 {
-	exec_script(priv, head, 1);
+	exec_script(disp, head, 1);
 }
 
 static void
-nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
 {
-	struct nvkm_output *outp = exec_script(priv, head, 2);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_output *outp = exec_script(disp, head, 2);
 
 	/* the binary driver does this outside of the supervisor handling
 	 * (after the third supervisor from a detach).  we (currently?)
@@ -1608,10 +456,10 @@
 	 * in a blank screen (SOR_PWR off/on can restore it)
 	 */
 	if (outp && outp->info.type == DCB_OUTPUT_DP) {
-		struct nvkm_output_dp *outpdp = (void *)outp;
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
 		struct nvbios_init init = {
-			.subdev = nv_subdev(priv),
-			.bios = nvkm_bios(priv),
+			.subdev = subdev,
+			.bios = subdev->device->bios,
 			.outp = &outp->info,
 			.crtc = head,
 			.offset = outpdp->info.script[4],
@@ -1624,29 +472,32 @@
 }
 
 static void
-nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(priv);
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	if (pclk)
-		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
+		nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
 }
 
 static void
-nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv, int head,
+nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
 			  struct dcb_output *outp, u32 pclk)
 {
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	const int link = !(outp->sorconf.link & 1);
 	const int   or = ffs(outp->or) - 1;
 	const u32 soff = (  or * 0x800);
 	const u32 loff = (link * 0x080) + soff;
-	const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+	const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
 	const u32 symbol = 100000;
-	const s32 vactive = nv_rd32(priv, 0x610af8 + (head * 0x540)) & 0xffff;
-	const s32 vblanke = nv_rd32(priv, 0x610ae8 + (head * 0x540)) & 0xffff;
-	const s32 vblanks = nv_rd32(priv, 0x610af0 + (head * 0x540)) & 0xffff;
-	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
-	u32 clksor = nv_rd32(priv, 0x614300 + soff);
+	const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
+	const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
+	const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
+	u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
+	u32 clksor = nvkm_rd32(device, 0x614300 + soff);
 	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
 	int TU, VTUi, VTUf, VTUa;
 	u64 link_data_rate, link_ratio, unk;
@@ -1662,14 +513,14 @@
 	value = value * link_bw;
 	do_div(value, pclk);
 	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
-	nv_mask(priv, 0x61c1e8 + soff, 0x0000ffff, value);
+	nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
 
 	/* symbols/vblank - algorithm taken from comments in tegra driver */
 	value = vblanks - vblanke - 25;
 	value = value * link_bw;
 	do_div(value, pclk);
 	value = value - ((36 / link_nr) + 3) - 1;
-	nv_mask(priv, 0x61c1ec + soff, 0x00ffffff, value);
+	nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
 
 	/* watermark / activesym */
 	if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
@@ -1734,7 +585,7 @@
 	}
 
 	if (!bestTU) {
-		nv_error(priv, "unable to find suitable dp config\n");
+		nvkm_error(subdev, "unable to find suitable dp config\n");
 		return;
 	}
 
@@ -1745,22 +596,23 @@
 	do_div(unk, symbol);
 	unk += 6;
 
-	nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
-	nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+	nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+	nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
 						   bestVTUf << 16 |
 						   bestVTUi << 8 | unk);
 }
 
 static void
-nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 hval, hreg = 0x614200 + (head * 0x800);
 	u32 oval, oreg;
 	u32 mask, conf;
 
-	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
+	outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
 	if (!outp)
 		return;
 
@@ -1787,10 +639,10 @@
 		u32 ctrl, datarate;
 
 		if (outp->info.location == 0) {
-			ctrl = nv_rd32(priv, 0x610794 + soff);
+			ctrl = nvkm_rd32(device, 0x610794 + soff);
 			soff = 1;
 		} else {
-			ctrl = nv_rd32(priv, 0x610b80 + soff);
+			ctrl = nvkm_rd32(device, 0x610b80 + soff);
 			soff = 2;
 		}
 
@@ -1804,10 +656,10 @@
 		}
 
 		if (nvkm_output_dp_train(outp, datarate / soff, true))
-			ERR("link not trained before attach\n");
+			OUTP_ERR(outp, "link not trained before attach");
 	}
 
-	exec_clkcmp(priv, head, 0, pclk, &conf);
+	exec_clkcmp(disp, head, 0, pclk, &conf);
 
 	if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
 		oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
@@ -1817,7 +669,7 @@
 	} else
 	if (!outp->info.location) {
 		if (outp->info.type == DCB_OUTPUT_DP)
-			nv50_disp_intr_unk20_2_dp(priv, head, &outp->info, pclk);
+			nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
 		oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
 		oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
 		hval = 0x00000000;
@@ -1829,8 +681,8 @@
 		mask = 0x00000707;
 	}
 
-	nv_mask(priv, hreg, 0x0000000f, hval);
-	nv_mask(priv, oreg, mask, oval);
+	nvkm_mask(device, hreg, 0x0000000f, hval);
+	nvkm_mask(device, oreg, mask, oval);
 }
 
 /* If programming a TMDS output on a SOR that can also be configured for
@@ -1842,10 +694,11 @@
  * programmed for DisplayPort.
  */
 static void
-nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv,
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
 			    struct dcb_output *outp)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	const int link = !(outp->sorconf.link & 1);
 	const int   or = ffs(outp->or) - 1;
 	const u32 loff = (or * 0x800) + (link * 0x80);
@@ -1854,166 +707,136 @@
 	u8  ver, hdr;
 
 	if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
-		nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+		nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
 }
 
 static void
-nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
+nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	struct nvkm_output *outp;
-	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
 	u32 conf;
 
-	outp = exec_clkcmp(priv, head, 1, pclk, &conf);
+	outp = exec_clkcmp(disp, head, 1, pclk, &conf);
 	if (!outp)
 		return;
 
 	if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
-		nv50_disp_intr_unk40_0_tmds(priv, &outp->info);
+		nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
 }
 
 void
 nv50_disp_intr_supervisor(struct work_struct *work)
 {
-	struct nv50_disp_priv *priv =
-		container_of(work, struct nv50_disp_priv, supervisor);
-	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
-	u32 super = nv_rd32(priv, 0x610030);
+	struct nv50_disp *disp =
+		container_of(work, struct nv50_disp, supervisor);
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 super = nvkm_rd32(device, 0x610030);
 	int head;
 
-	nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
+	nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
 
-	if (priv->super & 0x00000010) {
-		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000010) {
+		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000020 << head)))
 				continue;
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk10_0(priv, head);
+			nv50_disp_intr_unk10_0(disp, head);
 		}
 	} else
-	if (priv->super & 0x00000020) {
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000020) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk20_0(priv, head);
+			nv50_disp_intr_unk20_0(disp, head);
 		}
-		for (head = 0; head < priv->head.nr; head++) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000200 << head)))
 				continue;
-			nv50_disp_intr_unk20_1(priv, head);
+			nv50_disp_intr_unk20_1(disp, head);
 		}
-		for (head = 0; head < priv->head.nr; head++) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk20_2(priv, head);
+			nv50_disp_intr_unk20_2(disp, head);
 		}
 	} else
-	if (priv->super & 0x00000040) {
-		for (head = 0; head < priv->head.nr; head++) {
+	if (disp->super & 0x00000040) {
+		for (head = 0; head < disp->base.head.nr; head++) {
 			if (!(super & (0x00000080 << head)))
 				continue;
-			nv50_disp_intr_unk40_0(priv, head);
+			nv50_disp_intr_unk40_0(disp, head);
 		}
 	}
 
-	nv_wr32(priv, 0x610030, 0x80000000);
+	nvkm_wr32(device, 0x610030, 0x80000000);
 }
 
 void
-nv50_disp_intr(struct nvkm_subdev *subdev)
+nv50_disp_intr(struct nv50_disp *disp)
 {
-	struct nv50_disp_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x610020);
-	u32 intr1 = nv_rd32(priv, 0x610024);
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x610020);
+	u32 intr1 = nvkm_rd32(device, 0x610024);
 
 	while (intr0 & 0x001f0000) {
 		u32 chid = __ffs(intr0 & 0x001f0000) - 16;
-		nv50_disp_intr_error(priv, chid);
+		nv50_disp_intr_error(disp, chid);
 		intr0 &= ~(0x00010000 << chid);
 	}
 
 	while (intr0 & 0x0000001f) {
 		u32 chid = __ffs(intr0 & 0x0000001f);
-		nv50_disp_chan_uevent_send(priv, chid);
+		nv50_disp_chan_uevent_send(disp, chid);
 		intr0 &= ~(0x00000001 << chid);
 	}
 
 	if (intr1 & 0x00000004) {
-		nvkm_disp_vblank(&priv->base, 0);
-		nv_wr32(priv, 0x610024, 0x00000004);
-		intr1 &= ~0x00000004;
+		nvkm_disp_vblank(&disp->base, 0);
+		nvkm_wr32(device, 0x610024, 0x00000004);
 	}
 
 	if (intr1 & 0x00000008) {
-		nvkm_disp_vblank(&priv->base, 1);
-		nv_wr32(priv, 0x610024, 0x00000008);
-		intr1 &= ~0x00000008;
+		nvkm_disp_vblank(&disp->base, 1);
+		nvkm_wr32(device, 0x610024, 0x00000008);
 	}
 
 	if (intr1 & 0x00000070) {
-		priv->super = (intr1 & 0x00000070);
-		schedule_work(&priv->supervisor);
-		nv_wr32(priv, 0x610024, priv->super);
-		intr1 &= ~0x00000070;
+		disp->super = (intr1 & 0x00000070);
+		schedule_work(&disp->supervisor);
+		nvkm_wr32(device, 0x610024, disp->super);
 	}
 }
 
-static int
-nv50_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_disp_priv *priv;
-	int ret;
-
-	ret = nvkm_disp_create(parent, engine, oclass, 2, "PDISP",
-			       "display", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&nv50_disp_chan_uevent, 1, 9, &priv->uevent);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->sclass = nv50_disp_main_oclass;
-	nv_engine(priv)->cclass = &nv50_disp_cclass;
-	nv_subdev(priv)->intr = nv50_disp_intr;
-	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
-	priv->sclass = nv50_disp_sclass;
-	priv->head.nr = 2;
-	priv->dac.nr = 3;
-	priv->sor.nr = 2;
-	priv->pior.nr = 3;
-	priv->dac.power = nv50_dac_power;
-	priv->dac.sense = nv50_dac_sense;
-	priv->sor.power = nv50_sor_power;
-	priv->pior.power = nv50_pior_power;
-	return 0;
-}
-
-struct nvkm_oclass *
-nv50_disp_outp_sclass[] = {
-	&nv50_pior_dp_impl.base.base,
-	NULL
+static const struct nv50_disp_func
+nv50_disp = {
+	.intr = nv50_disp_intr,
+	.uevent = &nv50_disp_chan_uevent,
+	.super = nv50_disp_intr_supervisor,
+	.root = &nv50_disp_root_oclass,
+	.head.vblank_init = nv50_disp_vblank_init,
+	.head.vblank_fini = nv50_disp_vblank_fini,
+	.head.scanoutpos = nv50_disp_root_scanoutpos,
+	.outp.internal.crt = nv50_dac_output_new,
+	.outp.internal.tmds = nv50_sor_output_new,
+	.outp.internal.lvds = nv50_sor_output_new,
+	.outp.external.tmds = nv50_pior_output_new,
+	.outp.external.dp = nv50_pior_dp_new,
+	.dac.nr = 3,
+	.dac.power = nv50_dac_power,
+	.dac.sense = nv50_dac_sense,
+	.sor.nr = 2,
+	.sor.power = nv50_sor_power,
+	.pior.nr = 3,
+	.pior.power = nv50_pior_power,
 };
 
-struct nvkm_oclass *
-nv50_disp_oclass = &(struct nv50_disp_impl) {
-	.base.base.handle = NV_ENGINE(DISP, 0x50),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_disp_ctor,
-		.dtor = _nvkm_disp_dtor,
-		.init = _nvkm_disp_init,
-		.fini = _nvkm_disp_fini,
-	},
-	.base.vblank = &nv50_disp_vblank_func,
-	.base.outp =  nv50_disp_outp_sclass,
-	.mthd.core = &nv50_disp_core_mthd_chan,
-	.mthd.base = &nv50_disp_base_mthd_chan,
-	.mthd.ovly = &nv50_disp_ovly_mthd_chan,
-	.mthd.prev = 0x000004,
-	.head.scanoutpos = nv50_disp_main_scanoutpos,
-}.base.base;
+int
+nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+	return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index b4ed620..aecebd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,17 +1,18 @@
 #ifndef __NV50_DISP_H__
 #define __NV50_DISP_H__
+#define nv50_disp(p) container_of((p), struct nv50_disp, base)
 #include "priv.h"
 struct nvkm_output;
 struct nvkm_output_dp;
 
 #define NV50_DISP_MTHD_ struct nvkm_object *object,                            \
-	struct nv50_disp_priv *priv, void *data, u32 size
+	struct nv50_disp *disp, void *data, u32 size
 #define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
 #define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
 
-struct nv50_disp_priv {
+struct nv50_disp {
+	const struct nv50_disp_func *func;
 	struct nvkm_disp base;
-	struct nvkm_oclass *sclass;
 
 	struct work_struct supervisor;
 	u32 super;
@@ -19,208 +20,98 @@
 	struct nvkm_event uevent;
 
 	struct {
-		int nr;
-	} head;
-	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
-		int (*sense)(NV50_DISP_MTHD_V1);
-	} dac;
-	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
-		int (*hda_eld)(NV50_DISP_MTHD_V1);
-		int (*hdmi)(NV50_DISP_MTHD_V1);
 		u32 lvdsconf;
-		void (*magic)(struct nvkm_output *);
 	} sor;
+
 	struct {
-		int nr;
-		int (*power)(NV50_DISP_MTHD_V1);
 		u8 type[3];
 	} pior;
+
+	struct nv50_disp_chan *chan[17];
 };
 
-struct nv50_disp_impl {
-	struct nvkm_disp_impl base;
-	struct {
-		const struct nv50_disp_mthd_chan *core;
-		const struct nv50_disp_mthd_chan *base;
-		const struct nv50_disp_mthd_chan *ovly;
-		int prev;
-	} mthd;
-	struct {
-		int (*scanoutpos)(NV50_DISP_MTHD_V0);
-	} head;
-};
+int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
 
-int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
-int nv50_disp_main_mthd(struct nvkm_object *, u32, void *, u32);
-
-int gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
+int gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
 
 int nv50_dac_power(NV50_DISP_MTHD_V1);
 int nv50_dac_sense(NV50_DISP_MTHD_V1);
 
 int gt215_hda_eld(NV50_DISP_MTHD_V1);
-int gf110_hda_eld(NV50_DISP_MTHD_V1);
+int gf119_hda_eld(NV50_DISP_MTHD_V1);
 
 int g84_hdmi_ctrl(NV50_DISP_MTHD_V1);
 int gt215_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int gf110_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int gf119_hdmi_ctrl(NV50_DISP_MTHD_V1);
 int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1);
 
 int nv50_sor_power(NV50_DISP_MTHD_V1);
 int nv50_pior_power(NV50_DISP_MTHD_V1);
 
-#include <core/parent.h>
+int nv50_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp **);
+int gf119_disp_new_(const struct nv50_disp_func *, struct nvkm_device *,
+		    int index, struct nvkm_disp **);
 
-struct nv50_disp_base {
-	struct nvkm_parent base;
-	struct nvkm_ramht *ramht;
-	u32 chan;
+struct nv50_disp_func_outp {
+	int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  tv)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  dp)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
 };
 
-struct nv50_disp_chan_impl {
-	struct nvkm_ofuncs base;
-	int chid;
-	int  (*attach)(struct nvkm_object *, struct nvkm_object *, u32);
-	void (*detach)(struct nvkm_object *, int);
-};
+struct nv50_disp_func {
+	void (*intr)(struct nv50_disp *);
 
-#include <core/namedb.h>
+	const struct nvkm_event_func *uevent;
+	void (*super)(struct work_struct *);
 
-struct nv50_disp_chan {
-	struct nvkm_namedb base;
-	int chid;
-};
+	const struct nvkm_disp_oclass *root;
 
-int  nv50_disp_chan_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
-int  nv50_disp_chan_map(struct nvkm_object *, u64 *, u32 *);
-u32  nv50_disp_chan_rd32(struct nvkm_object *, u64);
-void nv50_disp_chan_wr32(struct nvkm_object *, u64, u32);
-extern const struct nvkm_event_func nv50_disp_chan_uevent;
-int  nv50_disp_chan_uevent_ctor(struct nvkm_object *, void *, u32,
-				struct nvkm_notify *);
-void nv50_disp_chan_uevent_send(struct nv50_disp_priv *, int);
-
-extern const struct nvkm_event_func gf110_disp_chan_uevent;
-
-#define nv50_disp_chan_init(a)                                                 \
-	nvkm_namedb_init(&(a)->base)
-#define nv50_disp_chan_fini(a,b)                                               \
-	nvkm_namedb_fini(&(a)->base, (b))
-
-struct nv50_disp_dmac {
-	struct nv50_disp_chan base;
-	struct nvkm_dmaobj *pushdma;
-	u32 push;
-};
-
-void nv50_disp_dmac_dtor(struct nvkm_object *);
-
-struct nv50_disp_pioc {
-	struct nv50_disp_chan base;
-};
-
-void nv50_disp_pioc_dtor(struct nvkm_object *);
-
-struct nv50_disp_mthd_list {
-	u32 mthd;
-	u32 addr;
 	struct {
-		u32 mthd;
-		u32 addr;
-		const char *name;
-	} data[];
-};
+		void (*vblank_init)(struct nv50_disp *, int head);
+		void (*vblank_fini)(struct nv50_disp *, int head);
+		int (*scanoutpos)(NV50_DISP_MTHD_V0);
+	} head;
 
-struct nv50_disp_mthd_chan {
-	const char *name;
-	u32 addr;
 	struct {
-		const char *name;
+		const struct nv50_disp_func_outp internal;
+		const struct nv50_disp_func_outp external;
+	} outp;
+
+	struct {
 		int nr;
-		const struct nv50_disp_mthd_list *mthd;
-	} data[];
+		int (*power)(NV50_DISP_MTHD_V1);
+		int (*sense)(NV50_DISP_MTHD_V1);
+	} dac;
+
+	struct {
+		int nr;
+		int (*power)(NV50_DISP_MTHD_V1);
+		int (*hda_eld)(NV50_DISP_MTHD_V1);
+		int (*hdmi)(NV50_DISP_MTHD_V1);
+		void (*magic)(struct nvkm_output *);
+	} sor;
+
+	struct {
+		int nr;
+		int (*power)(NV50_DISP_MTHD_V1);
+	} pior;
 };
 
-extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
-int nv50_disp_core_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
-int nv50_disp_base_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
-extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
-int nv50_disp_ovly_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
-int nv50_disp_oimm_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
-int nv50_disp_curs_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-extern struct nvkm_ofuncs nv50_disp_main_ofuncs;
-int  nv50_disp_main_ctor(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, void *, u32,
-			 struct nvkm_object **);
-void nv50_disp_main_dtor(struct nvkm_object *);
-extern struct nvkm_omthds nv50_disp_main_omthds[];
-extern struct nvkm_oclass nv50_disp_cclass;
-void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
-			 const struct nv50_disp_mthd_chan *);
+void nv50_disp_vblank_init(struct nv50_disp *, int);
+void nv50_disp_vblank_fini(struct nv50_disp *, int);
+void nv50_disp_intr(struct nv50_disp *);
 void nv50_disp_intr_supervisor(struct work_struct *);
-void nv50_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func nv50_disp_vblank_func;
 
-extern const struct nv50_disp_mthd_chan g84_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list g84_disp_core_mthd_head;
-extern const struct nv50_disp_mthd_chan g84_disp_base_mthd_chan;
-extern const struct nv50_disp_mthd_chan g84_disp_ovly_mthd_chan;
-
-extern const struct nv50_disp_mthd_chan g94_disp_core_mthd_chan;
-
-extern struct nv50_disp_chan_impl gf110_disp_core_ofuncs;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_base;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_dac;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_sor;
-extern const struct nv50_disp_mthd_list gf110_disp_core_mthd_pior;
-extern struct nv50_disp_chan_impl gf110_disp_base_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_ovly_ofuncs;
-extern const struct nv50_disp_mthd_chan gf110_disp_base_mthd_chan;
-extern struct nv50_disp_chan_impl gf110_disp_oimm_ofuncs;
-extern struct nv50_disp_chan_impl gf110_disp_curs_ofuncs;
-extern struct nvkm_ofuncs gf110_disp_main_ofuncs;
-extern struct nvkm_oclass gf110_disp_cclass;
-void gf110_disp_intr_supervisor(struct work_struct *);
-void gf110_disp_intr(struct nvkm_subdev *);
-extern const struct nvkm_event_func gf110_disp_vblank_func;
-
-extern const struct nv50_disp_mthd_chan gk104_disp_core_mthd_chan;
-extern const struct nv50_disp_mthd_chan gk104_disp_ovly_mthd_chan;
-
-extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
-extern struct nvkm_oclass *nv50_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl g94_sor_dp_impl;
-int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
-extern struct nvkm_oclass *g94_disp_outp_sclass[];
-
-extern struct nvkm_output_dp_impl gf110_sor_dp_impl;
-int gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
-extern struct nvkm_oclass *gf110_disp_outp_sclass[];
-
-void gm204_sor_magic(struct nvkm_output *outp);
-extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
+void gf119_disp_vblank_init(struct nv50_disp *, int);
+void gf119_disp_vblank_fini(struct nv50_disp *, int);
+void gf119_disp_intr(struct nv50_disp *);
+void gf119_disp_intr_supervisor(struct work_struct *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
new file mode 100644
index 0000000..54a4ae8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+g84_disp_oimm_oclass = {
+	.base.oclass = G82_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
new file mode 100644
index 0000000..c658db5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gf119_disp_oimm_oclass = {
+	.base.oclass = GF110_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
new file mode 100644
index 0000000..b1fde8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gk104_disp_oimm_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = 9,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
new file mode 100644
index 0000000..f4e7eb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gt215_disp_oimm_oclass = {
+	.base.oclass = GT214_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
new file mode 100644
index 0000000..cd888a1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_overlay_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+
+	nvif_ioctl(parent, "create disp overlay size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp overlay vers %d head %d\n",
+			   args->v0.version, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+				   head, oclass, pobject);
+}
+
+const struct nv50_disp_pioc_oclass
+nv50_disp_oimm_oclass = {
+	.base.oclass = NV50_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &nv50_disp_pioc_func,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 9224bcb..bbe5ec0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,121 +22,66 @@
  * Authors: Ben Skeggs
  */
 #include "outp.h"
-#include "priv.h"
 
 #include <subdev/bios.h>
-#include <subdev/bios/conn.h>
 #include <subdev/bios/dcb.h>
 #include <subdev/i2c.h>
 
-int
-_nvkm_output_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_output_fini(struct nvkm_output *outp)
 {
-	struct nvkm_output *outp = (void *)object;
-	nv_ofuncs(outp->conn)->fini(nv_object(outp->conn), suspend);
-	return nvkm_object_fini(&outp->base, suspend);
-}
-
-int
-_nvkm_output_init(struct nvkm_object *object)
-{
-	struct nvkm_output *outp = (void *)object;
-	int ret = nvkm_object_init(&outp->base);
-	if (ret == 0)
-		nv_ofuncs(outp->conn)->init(nv_object(outp->conn));
-	return 0;
+	if (outp->func->fini)
+		outp->func->fini(outp);
 }
 
 void
-_nvkm_output_dtor(struct nvkm_object *object)
+nvkm_output_init(struct nvkm_output *outp)
 {
-	struct nvkm_output *outp = (void *)object;
-	list_del(&outp->head);
-	nvkm_object_ref(NULL, (void *)&outp->conn);
-	nvkm_object_destroy(&outp->base);
+	if (outp->func->init)
+		outp->func->init(outp);
 }
 
-int
-nvkm_output_create_(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass,
-		    struct dcb_output *dcbE, int index,
-		    int length, void **pobject)
+void
+nvkm_output_del(struct nvkm_output **poutp)
 {
-	struct nvkm_disp *disp = nvkm_disp(parent);
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvbios_connE connE;
-	struct nvkm_output *outp;
-	u8  ver, hdr;
-	u32 data;
-	int ret;
+	struct nvkm_output *outp = *poutp;
+	if (outp && !WARN_ON(!outp->func)) {
+		if (outp->func->dtor)
+			*poutp = outp->func->dtor(outp);
+		kfree(*poutp);
+		*poutp = NULL;
+	}
+}
 
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	outp = *pobject;
-	if (ret)
-		return ret;
+void
+nvkm_output_ctor(const struct nvkm_output_func *func, struct nvkm_disp *disp,
+		 int index, struct dcb_output *dcbE, struct nvkm_output *outp)
+{
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
 
-	outp->info = *dcbE;
+	outp->func = func;
+	outp->disp = disp;
 	outp->index = index;
+	outp->info = *dcbE;
+	outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
 	outp->or = ffs(outp->info.or) - 1;
 
-	DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
-	    dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
-	    dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
-	    dcbE->bus, dcbE->heads);
-
-	if (outp->info.type != DCB_OUTPUT_DP)
-		outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
-	else
-		outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
-	outp->edid = outp->port;
-
-	data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
-	if (!data) {
-		DBG("vbios connector data not found\n");
-		memset(&connE, 0x00, sizeof(connE));
-		connE.type = DCB_CONNECTOR_NONE;
-	}
-
-	ret = nvkm_object_ctor(parent, NULL, nvkm_connector_oclass,
-			       &connE, outp->info.connector,
-			       (struct nvkm_object **)&outp->conn);
-	if (ret < 0) {
-		ERR("error %d creating connector, disabling\n", ret);
-		return ret;
-	}
-
-	list_add_tail(&outp->head, &disp->outp);
-	return 0;
+	OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
+		       "edid %x bus %d head %x",
+		 outp->info.type, outp->info.location, outp->info.or,
+		 outp->info.type >= 2 ? outp->info.sorconf.link : 0,
+		 outp->info.connector, outp->info.i2c_index,
+		 outp->info.bus, outp->info.heads);
 }
 
 int
-_nvkm_output_ctor(struct nvkm_object *parent,
-		  struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *dcbE, u32 index,
-		  struct nvkm_object **pobject)
+nvkm_output_new_(const struct nvkm_output_func *func,
+		 struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
 {
-	struct nvkm_output *outp;
-	int ret;
+	if (!(*poutp = kzalloc(sizeof(**poutp), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_output_create(parent, engine, oclass, dcbE, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
-
+	nvkm_output_ctor(func, disp, index, dcbE, *poutp);
 	return 0;
 }
-
-struct nvkm_oclass *
-nvkm_output_oclass = &(struct nvkm_output_impl) {
-	.base = {
-		.handle = 0,
-		.ofuncs = &(struct nvkm_ofuncs) {
-			.ctor = _nvkm_output_ctor,
-			.dtor = _nvkm_output_dtor,
-			.init = _nvkm_output_init,
-			.fini = _nvkm_output_fini,
-		},
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index d9253d2..2590fec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,61 +1,55 @@
 #ifndef __NVKM_DISP_OUTP_H__
 #define __NVKM_DISP_OUTP_H__
-#include <core/object.h>
+#include <engine/disp.h>
 
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
 struct nvkm_output {
-	struct nvkm_object base;
-	struct list_head head;
-
-	struct dcb_output info;
+	const struct nvkm_output_func *func;
+	struct nvkm_disp *disp;
 	int index;
+	struct dcb_output info;
+
+	// whatever (if anything) is pointed at by the dcb device entry
+	struct nvkm_i2c_bus *i2c;
 	int or;
 
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_port *edid;
-
+	struct list_head head;
 	struct nvkm_connector *conn;
 };
 
-#define nvkm_output_create(p,e,c,b,i,d)                                        \
-	nvkm_output_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_destroy(d) ({                                              \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_dtor(nv_object(_outp));                                   \
-})
-#define nvkm_output_init(d) ({                                                 \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_init(nv_object(_outp));                                   \
-})
-#define nvkm_output_fini(d,s) ({                                               \
-	struct nvkm_output *_outp = (d);                                       \
-	_nvkm_output_fini(nv_object(_outp), (s));                              \
-})
-
-int nvkm_output_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, struct dcb_output *,
-			int, int, void **);
-
-int  _nvkm_output_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void _nvkm_output_dtor(struct nvkm_object *);
-int  _nvkm_output_init(struct nvkm_object *);
-int  _nvkm_output_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_impl {
-	struct nvkm_oclass base;
+struct nvkm_output_func {
+	void *(*dtor)(struct nvkm_output *);
+	void (*init)(struct nvkm_output *);
+	void (*fini)(struct nvkm_output *);
 };
 
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_output *_outp = (void *)outp;                              \
-	nv_##l(_outp, "%02x:%04x:%04x: "f, _outp->index,                       \
-	       _outp->info.hasht, _outp->info.hashm, ##a);                     \
+void nvkm_output_ctor(const struct nvkm_output_func *, struct nvkm_disp *,
+		      int index, struct dcb_output *, struct nvkm_output *);
+int nvkm_output_new_(const struct nvkm_output_func *, struct nvkm_disp *,
+		     int index, struct dcb_output *, struct nvkm_output **);
+void nvkm_output_del(struct nvkm_output **);
+void nvkm_output_init(struct nvkm_output *);
+void nvkm_output_fini(struct nvkm_output *);
+
+int nv50_dac_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			struct nvkm_output **);
+int nv50_sor_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			struct nvkm_output **);
+int nv50_pior_output_new(struct nvkm_disp *, int, struct dcb_output *,
+			 struct nvkm_output **);
+
+u32 g94_sor_dp_lane_map(struct nvkm_device *, u8 lane);
+
+void gm204_sor_magic(struct nvkm_output *outp);
+
+#define OUTP_MSG(o,l,f,a...) do {                                              \
+	struct nvkm_output *_outp = (o);                                       \
+	nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n",    \
+		 _outp->index, _outp->info.hasht, _outp->info.hashm, ##a);     \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define OUTP_ERR(o,f,a...) OUTP_MSG((o), error, f, ##a)
+#define OUTP_DBG(o,f,a...) OUTP_MSG((o), debug, f, ##a)
+#define OUTP_TRACE(o,f,a...) OUTP_MSG((o), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
index 0bde0fa..3b7a9e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.c
@@ -33,16 +33,17 @@
 int
 nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
 {
-	struct nvkm_output_dp *outp = (void *)base;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
 	bool retrain = true;
 	u8 link[2], stat[3];
 	u32 linkrate;
 	int ret, i;
 
 	/* check that the link is trained at a high enough rate */
-	ret = nv_rdaux(outp->base.edid, DPCD_LC00_LINK_BW_SET, link, 2);
+	ret = nvkm_rdaux(outp->aux, DPCD_LC00_LINK_BW_SET, link, 2);
 	if (ret) {
-		DBG("failed to read link config, assuming no sink\n");
+		OUTP_DBG(&outp->base,
+			 "failed to read link config, assuming no sink");
 		goto done;
 	}
 
@@ -50,14 +51,15 @@
 	linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
 	datarate = (datarate + 9) / 10; /* -> decakilobits */
 	if (linkrate < datarate) {
-		DBG("link not trained at sufficient rate\n");
+		OUTP_DBG(&outp->base, "link not trained at sufficient rate");
 		goto done;
 	}
 
 	/* check that link is still trained */
-	ret = nv_rdaux(outp->base.edid, DPCD_LS02, stat, 3);
+	ret = nvkm_rdaux(outp->aux, DPCD_LS02, stat, 3);
 	if (ret) {
-		DBG("failed to read link status, assuming no sink\n");
+		OUTP_DBG(&outp->base,
+			 "failed to read link status, assuming no sink");
 		goto done;
 	}
 
@@ -67,13 +69,14 @@
 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
-				DBG("lane %d not equalised\n", lane);
+				OUTP_DBG(&outp->base,
+					 "lane %d not equalised", lane);
 				goto done;
 			}
 		}
 		retrain = false;
 	} else {
-		DBG("no inter-lane alignment\n");
+		OUTP_DBG(&outp->base, "no inter-lane alignment");
 	}
 
 done:
@@ -102,150 +105,138 @@
 }
 
 static void
-nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool present)
+nvkm_output_dp_enable(struct nvkm_output_dp *outp, bool enable)
 {
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (present) {
+	struct nvkm_i2c_aux *aux = outp->aux;
+
+	if (enable) {
 		if (!outp->present) {
-			nvkm_i2c(port)->acquire_pad(port, 0);
-			DBG("aux power -> always\n");
+			OUTP_DBG(&outp->base, "aux power -> always");
+			nvkm_i2c_aux_monitor(aux, true);
 			outp->present = true;
 		}
-		nvkm_output_dp_train(&outp->base, 0, true);
-	} else {
-		if (outp->present) {
-			nvkm_i2c(port)->release_pad(port);
-			DBG("aux power -> demand\n");
-			outp->present = false;
-		}
-		atomic_set(&outp->lt.done, 0);
-	}
-}
 
-static void
-nvkm_output_dp_detect(struct nvkm_output_dp *outp)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	int ret = nvkm_i2c(port)->acquire_pad(port, 0);
-	if (ret == 0) {
-		ret = nv_rdaux(outp->base.edid, DPCD_RC00_DPCD_REV,
-			       outp->dpcd, sizeof(outp->dpcd));
-		nvkm_output_dp_enable(outp, ret == 0);
-		nvkm_i2c(port)->release_pad(port);
+		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dpcd,
+				sizeof(outp->dpcd))) {
+			nvkm_output_dp_train(&outp->base, 0, true);
+			return;
+		}
 	}
+
+	if (outp->present) {
+		OUTP_DBG(&outp->base, "aux power -> demand");
+		nvkm_i2c_aux_monitor(aux, false);
+		outp->present = false;
+	}
+
+	atomic_set(&outp->lt.done, 0);
 }
 
 static int
 nvkm_output_dp_hpd(struct nvkm_notify *notify)
 {
-	struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
-	struct nvkm_output_dp *outp;
-	struct nvkm_disp *disp = nvkm_disp(conn);
 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
+	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), hpd);
+	struct nvkm_connector *conn = outp->base.conn;
+	struct nvkm_disp *disp = outp->base.disp;
 	struct nvif_notify_conn_rep_v0 rep = {};
 
-	list_for_each_entry(outp, &disp->outp, base.head) {
-		if (outp->base.conn == conn &&
-		    outp->info.type == DCB_OUTPUT_DP) {
-			DBG("HPD: %d\n", line->mask);
-			nvkm_output_dp_detect(outp);
+	OUTP_DBG(&outp->base, "HPD: %d", line->mask);
+	nvkm_output_dp_enable(outp, true);
 
-			if (line->mask & NVKM_I2C_UNPLUG)
-				rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
-			if (line->mask & NVKM_I2C_PLUG)
-				rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
+	if (line->mask & NVKM_I2C_UNPLUG)
+		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
+	if (line->mask & NVKM_I2C_PLUG)
+		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
 
-			nvkm_event_send(&disp->hpd, rep.mask, conn->index,
-					&rep, sizeof(rep));
-			return NVKM_NOTIFY_KEEP;
-		}
-	}
-
-	WARN_ON(1);
-	return NVKM_NOTIFY_DROP;
+	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
+	return NVKM_NOTIFY_KEEP;
 }
 
 static int
 nvkm_output_dp_irq(struct nvkm_notify *notify)
 {
-	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
-	struct nvkm_disp *disp = nvkm_disp(outp);
 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
+	struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
+	struct nvkm_connector *conn = outp->base.conn;
+	struct nvkm_disp *disp = outp->base.disp;
 	struct nvif_notify_conn_rep_v0 rep = {
 		.mask = NVIF_NOTIFY_CONN_V0_IRQ,
 	};
-	int index = outp->base.info.connector;
 
-	DBG("IRQ: %d\n", line->mask);
+	OUTP_DBG(&outp->base, "IRQ: %d", line->mask);
 	nvkm_output_dp_train(&outp->base, 0, true);
 
-	nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
 	return NVKM_NOTIFY_DROP;
 }
 
-int
-_nvkm_output_dp_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_output_dp_fini(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_put(&outp->hpd);
 	nvkm_notify_put(&outp->irq);
+	flush_work(&outp->lt.work);
 	nvkm_output_dp_enable(outp, false);
-	return nvkm_output_fini(&outp->base, suspend);
 }
 
-int
-_nvkm_output_dp_init(struct nvkm_object *object)
+static void
+nvkm_output_dp_init(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
-	nvkm_output_dp_detect(outp);
-	return nvkm_output_init(&outp->base);
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_put(&outp->base.conn->hpd);
+	nvkm_output_dp_enable(outp, true);
+	nvkm_notify_get(&outp->hpd);
 }
 
-void
-_nvkm_output_dp_dtor(struct nvkm_object *object)
+static void *
+nvkm_output_dp_dtor(struct nvkm_output *base)
 {
-	struct nvkm_output_dp *outp = (void *)object;
+	struct nvkm_output_dp *outp = nvkm_output_dp(base);
+	nvkm_notify_fini(&outp->hpd);
 	nvkm_notify_fini(&outp->irq);
-	nvkm_output_destroy(&outp->base);
+	return outp;
 }
 
+static const struct nvkm_output_func
+nvkm_output_dp_func = {
+	.dtor = nvkm_output_dp_dtor,
+	.init = nvkm_output_dp_init,
+	.fini = nvkm_output_dp_fini,
+};
+
 int
-nvkm_output_dp_create_(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass,
-		       struct dcb_output *info, int index,
-		       int length, void **pobject)
+nvkm_output_dp_ctor(const struct nvkm_output_dp_func *func,
+		    struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		    struct nvkm_i2c_aux *aux, struct nvkm_output_dp *outp)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output_dp *outp;
+	struct nvkm_device *device = disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
 	u8  hdr, cnt, len;
 	u32 data;
 	int ret;
 
-	ret = nvkm_output_create_(parent, engine, oclass, info, index,
-				  length, pobject);
-	outp = *pobject;
-	if (ret)
-		return ret;
-
-	nvkm_notify_fini(&outp->base.conn->hpd);
-
-	/* access to the aux channel is not optional... */
-	if (!outp->base.edid) {
-		ERR("aux channel not found\n");
+	nvkm_output_ctor(&nvkm_output_dp_func, disp, index, dcbE, &outp->base);
+	outp->func = func;
+	outp->aux = aux;
+	if (!outp->aux) {
+		OUTP_ERR(&outp->base, "no aux");
 		return -ENODEV;
 	}
 
-	/* nor is the bios data for this output... */
+	/* bios data is not optional */
 	data = nvbios_dpout_match(bios, outp->base.info.hasht,
 				  outp->base.info.hashm, &outp->version,
 				  &hdr, &cnt, &len, &outp->info);
 	if (!data) {
-		ERR("no bios dp data\n");
+		OUTP_ERR(&outp->base, "no bios dp data");
 		return -ENODEV;
 	}
 
-	DBG("bios dp %02x %02x %02x %02x\n", outp->version, hdr, cnt, len);
+	OUTP_DBG(&outp->base, "bios dp %02x %02x %02x %02x",
+		 outp->version, hdr, cnt, len);
 
 	/* link training */
 	INIT_WORK(&outp->lt.work, nvkm_dp_train);
@@ -256,13 +247,13 @@
 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_irq, true,
 			       &(struct nvkm_i2c_ntfy_req) {
 				.mask = NVKM_I2C_IRQ,
-				.port = outp->base.edid->index,
+				.port = outp->aux->id,
 			       },
 			       sizeof(struct nvkm_i2c_ntfy_req),
 			       sizeof(struct nvkm_i2c_ntfy_rep),
 			       &outp->irq);
 	if (ret) {
-		ERR("error monitoring aux irq event: %d\n", ret);
+		OUTP_ERR(&outp->base, "error monitoring aux irq: %d", ret);
 		return ret;
 	}
 
@@ -270,13 +261,13 @@
 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_output_dp_hpd, true,
 			       &(struct nvkm_i2c_ntfy_req) {
 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
-				.port = outp->base.edid->index,
+				.port = outp->aux->id,
 			       },
 			       sizeof(struct nvkm_i2c_ntfy_req),
 			       sizeof(struct nvkm_i2c_ntfy_rep),
-			       &outp->base.conn->hpd);
+			       &outp->hpd);
 	if (ret) {
-		ERR("error monitoring aux hpd events: %d\n", ret);
+		OUTP_ERR(&outp->base, "error monitoring aux hpd: %d", ret);
 		return ret;
 	}
 
@@ -284,18 +275,17 @@
 }
 
 int
-_nvkm_output_dp_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *info, u32 index,
-		     struct nvkm_object **pobject)
+nvkm_output_dp_new_(const struct nvkm_output_dp_func *func,
+		    struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		    struct nvkm_output **poutp)
 {
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbE->i2c_index);
 	struct nvkm_output_dp *outp;
-	int ret;
 
-	ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
+	if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+		return -ENOMEM;
+	*poutp = &outp->base;
 
-	return 0;
+	return nvkm_output_dp_ctor(func, disp, index, dcbE, aux, outp);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
index 70c77ae..731136d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
@@ -1,5 +1,14 @@
 #ifndef __NVKM_DISP_OUTP_DP_H__
 #define __NVKM_DISP_OUTP_DP_H__
+#define nvkm_output_dp(p) container_of((p), struct nvkm_output_dp, base)
+#ifndef MSG
+#define MSG(l,f,a...)                                                          \
+	nvkm_##l(&outp->base.disp->engine.subdev, "%02x:%04x:%04x: "f,         \
+		 outp->base.index, outp->base.info.hasht,                      \
+		 outp->base.info.hashm, ##a)
+#define DBG(f,a...) MSG(debug, f, ##a)
+#define ERR(f,a...) MSG(error, f, ##a)
+#endif
 #include "outp.h"
 
 #include <core/notify.h>
@@ -7,12 +16,16 @@
 #include <subdev/bios/dp.h>
 
 struct nvkm_output_dp {
+	const struct nvkm_output_dp_func *func;
 	struct nvkm_output base;
 
 	struct nvbios_dpout info;
 	u8 version;
 
+	struct nvkm_i2c_aux *aux;
+
 	struct nvkm_notify irq;
+	struct nvkm_notify hpd;
 	bool present;
 	u8 dpcd[16];
 
@@ -23,34 +36,7 @@
 	} lt;
 };
 
-#define nvkm_output_dp_create(p,e,c,b,i,d)                                     \
-	nvkm_output_dp_create_((p), (e), (c), (b), (i), sizeof(**d), (void **)d)
-#define nvkm_output_dp_destroy(d) ({                                           \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_dtor(nv_object(_outp));                                \
-})
-#define nvkm_output_dp_init(d) ({                                              \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_init(nv_object(_outp));                                \
-})
-#define nvkm_output_dp_fini(d,s) ({                                            \
-	struct nvkm_output_dp *_outp = (d);                                    \
-	_nvkm_output_dp_fini(nv_object(_outp), (s));                           \
-})
-
-int nvkm_output_dp_create_(struct nvkm_object *, struct nvkm_object *,
-			   struct nvkm_oclass *, struct dcb_output *,
-			   int, int, void **);
-
-int  _nvkm_output_dp_ctor(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, void *, u32,
-			  struct nvkm_object **);
-void _nvkm_output_dp_dtor(struct nvkm_object *);
-int  _nvkm_output_dp_init(struct nvkm_object *);
-int  _nvkm_output_dp_fini(struct nvkm_object *, bool);
-
-struct nvkm_output_dp_impl {
-	struct nvkm_output_impl base;
+struct nvkm_output_dp_func {
 	int (*pattern)(struct nvkm_output_dp *, int);
 	int (*lnk_pwr)(struct nvkm_output_dp *, int nr);
 	int (*lnk_ctl)(struct nvkm_output_dp *, int nr, int bw, bool ef);
@@ -58,4 +44,25 @@
 };
 
 int nvkm_output_dp_train(struct nvkm_output *, u32 rate, bool wait);
+
+int nvkm_output_dp_ctor(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+			int index, struct dcb_output *, struct nvkm_i2c_aux *,
+			struct nvkm_output_dp *);
+int nvkm_output_dp_new_(const struct nvkm_output_dp_func *, struct nvkm_disp *,
+			int index, struct dcb_output *,
+			struct nvkm_output **);
+
+int nv50_pior_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+
+int g94_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		   struct nvkm_output **);
+int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
+
+int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		     struct nvkm_output **);
+int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+
+int  gm204_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+		      struct nvkm_output **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
new file mode 100644
index 0000000..db6234e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+g84_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x6109a0 },
+		{ 0x0088, 0x6109c0 },
+		{ 0x008c, 0x6109c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+const struct nv50_disp_chan_mthd
+g84_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &g84_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+g84_disp_ovly_oclass = {
+	.base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
new file mode 100644
index 0000000..5985879
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygf119.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gf119_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.data = {
+		{ 0x0080, 0x665080 },
+		{ 0x0084, 0x665084 },
+		{ 0x0088, 0x665088 },
+		{ 0x008c, 0x66508c },
+		{ 0x0090, 0x665090 },
+		{ 0x0094, 0x665094 },
+		{ 0x00a0, 0x6650a0 },
+		{ 0x00a4, 0x6650a4 },
+		{ 0x00b0, 0x6650b0 },
+		{ 0x00b4, 0x6650b4 },
+		{ 0x00b8, 0x6650b8 },
+		{ 0x00c0, 0x6650c0 },
+		{ 0x00e0, 0x6650e0 },
+		{ 0x00e4, 0x6650e4 },
+		{ 0x00e8, 0x6650e8 },
+		{ 0x0100, 0x665100 },
+		{ 0x0104, 0x665104 },
+		{ 0x0108, 0x665108 },
+		{ 0x010c, 0x66510c },
+		{ 0x0110, 0x665110 },
+		{ 0x0118, 0x665118 },
+		{ 0x011c, 0x66511c },
+		{ 0x0120, 0x665120 },
+		{ 0x0124, 0x665124 },
+		{ 0x0130, 0x665130 },
+		{ 0x0134, 0x665134 },
+		{ 0x0138, 0x665138 },
+		{ 0x013c, 0x66513c },
+		{ 0x0140, 0x665140 },
+		{ 0x0144, 0x665144 },
+		{ 0x0148, 0x665148 },
+		{ 0x014c, 0x66514c },
+		{ 0x0150, 0x665150 },
+		{ 0x0154, 0x665154 },
+		{ 0x0158, 0x665158 },
+		{ 0x015c, 0x66515c },
+		{ 0x0160, 0x665160 },
+		{ 0x0164, 0x665164 },
+		{ 0x0168, 0x665168 },
+		{ 0x016c, 0x66516c },
+		{ 0x0400, 0x665400 },
+		{ 0x0408, 0x665408 },
+		{ 0x040c, 0x66540c },
+		{ 0x0410, 0x665410 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gf119_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gf119_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gf119_disp_ovly_oclass = {
+	.base.oclass = GF110_DISP_OVERLAY_CONTROL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gf119_disp_ovly_chan_mthd,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
new file mode 100644
index 0000000..2e2dc06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gk104_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.data = {
+		{ 0x0080, 0x665080 },
+		{ 0x0084, 0x665084 },
+		{ 0x0088, 0x665088 },
+		{ 0x008c, 0x66508c },
+		{ 0x0090, 0x665090 },
+		{ 0x0094, 0x665094 },
+		{ 0x00a0, 0x6650a0 },
+		{ 0x00a4, 0x6650a4 },
+		{ 0x00b0, 0x6650b0 },
+		{ 0x00b4, 0x6650b4 },
+		{ 0x00b8, 0x6650b8 },
+		{ 0x00c0, 0x6650c0 },
+		{ 0x00c4, 0x6650c4 },
+		{ 0x00e0, 0x6650e0 },
+		{ 0x00e4, 0x6650e4 },
+		{ 0x00e8, 0x6650e8 },
+		{ 0x0100, 0x665100 },
+		{ 0x0104, 0x665104 },
+		{ 0x0108, 0x665108 },
+		{ 0x010c, 0x66510c },
+		{ 0x0110, 0x665110 },
+		{ 0x0118, 0x665118 },
+		{ 0x011c, 0x66511c },
+		{ 0x0120, 0x665120 },
+		{ 0x0124, 0x665124 },
+		{ 0x0130, 0x665130 },
+		{ 0x0134, 0x665134 },
+		{ 0x0138, 0x665138 },
+		{ 0x013c, 0x66513c },
+		{ 0x0140, 0x665140 },
+		{ 0x0144, 0x665144 },
+		{ 0x0148, 0x665148 },
+		{ 0x014c, 0x66514c },
+		{ 0x0150, 0x665150 },
+		{ 0x0154, 0x665154 },
+		{ 0x0158, 0x665158 },
+		{ 0x015c, 0x66515c },
+		{ 0x0160, 0x665160 },
+		{ 0x0164, 0x665164 },
+		{ 0x0168, 0x665168 },
+		{ 0x016c, 0x66516c },
+		{ 0x0400, 0x665400 },
+		{ 0x0404, 0x665404 },
+		{ 0x0408, 0x665408 },
+		{ 0x040c, 0x66540c },
+		{ 0x0410, 0x665410 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gk104_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x001000,
+	.prev = -0x020000,
+	.data = {
+		{ "Global", 1, &gk104_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gk104_disp_ovly_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &gf119_disp_dmac_func,
+	.mthd = &gk104_disp_ovly_chan_mthd,
+	.chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
new file mode 100644
index 0000000..f858053
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt200.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_mthd_list
+gt200_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x6109a0 },
+		{ 0x0088, 0x6109c0 },
+		{ 0x008c, 0x6109c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00b0, 0x610c98 },
+		{ 0x00b4, 0x610ca4 },
+		{ 0x00b8, 0x610cac },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+gt200_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &gt200_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+gt200_disp_ovly_oclass = {
+	.base.oclass = GT200_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &gt200_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
new file mode 100644
index 0000000..c947e1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygt215.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gt215_disp_ovly_oclass = {
+	.base.oclass = GT214_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &g84_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
new file mode 100644
index 0000000..6fa296c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_ovly_new(const struct nv50_disp_dmac_func *func,
+		   const struct nv50_disp_chan_mthd *mthd,
+		   struct nv50_disp_root *root, int chid,
+		   const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	union {
+		struct nv50_disp_overlay_channel_dma_v0 v0;
+	} *args = data;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_disp *disp = root->disp;
+	int head, ret;
+	u64 push;
+
+	nvif_ioctl(parent, "create disp overlay channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create disp overlay channel dma vers %d "
+				   "pushbuf %016llx head %d\n",
+			   args->v0.version, args->v0.pushbuf, args->v0.head);
+		if (args->v0.head > disp->base.head.nr)
+			return -EINVAL;
+		push = args->v0.pushbuf;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	return nv50_disp_dmac_new_(func, mthd, root, chid + head,
+				   head, push, oclass, pobject);
+}
+
+static const struct nv50_disp_mthd_list
+nv50_disp_ovly_mthd_base = {
+	.mthd = 0x0000,
+	.addr = 0x000000,
+	.data = {
+		{ 0x0080, 0x000000 },
+		{ 0x0084, 0x0009a0 },
+		{ 0x0088, 0x0009c0 },
+		{ 0x008c, 0x0009c8 },
+		{ 0x0090, 0x6109b4 },
+		{ 0x0094, 0x610970 },
+		{ 0x00a0, 0x610998 },
+		{ 0x00a4, 0x610964 },
+		{ 0x00c0, 0x610958 },
+		{ 0x00e0, 0x6109a8 },
+		{ 0x00e4, 0x6109d0 },
+		{ 0x00e8, 0x6109d8 },
+		{ 0x0100, 0x61094c },
+		{ 0x0104, 0x610984 },
+		{ 0x0108, 0x61098c },
+		{ 0x0800, 0x6109f8 },
+		{ 0x0808, 0x610a08 },
+		{ 0x080c, 0x610a10 },
+		{ 0x0810, 0x610a00 },
+		{}
+	}
+};
+
+static const struct nv50_disp_chan_mthd
+nv50_disp_ovly_chan_mthd = {
+	.name = "Overlay",
+	.addr = 0x000540,
+	.prev = 0x000004,
+	.data = {
+		{ "Global", 1, &nv50_disp_ovly_mthd_base },
+		{}
+	}
+};
+
+const struct nv50_disp_dmac_oclass
+nv50_disp_ovly_oclass = {
+	.base.oclass = NV50_DISP_OVERLAY_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_ovly_new,
+	.func = &nv50_disp_dmac_func,
+	.mthd = &nv50_disp_ovly_chan_mthd,
+	.chid = 3,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
new file mode 100644
index 0000000..a625a98
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+gf119_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+	}
+
+	/* disable error reporting and completion notification */
+	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+}
+
+static int
+gf119_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	/* enable error reporting */
+	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* activate channel */
+	nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+		if ((tmp & 0x00030000) == 0x00010000)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d init: %08x\n", chid,
+			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_chan_func
+gf119_disp_pioc_func = {
+	.init = gf119_disp_pioc_init,
+	.fini = gf119_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
new file mode 100644
index 0000000..9d2618d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static void
+nv50_disp_pioc_fini(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+	}
+}
+
+static int
+nv50_disp_pioc_init(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp *disp = chan->root->disp;
+	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int chid = chan->chid;
+
+	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+		if ((tmp & 0x00030000) == 0x00010000)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
+			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+const struct nv50_disp_chan_func
+nv50_disp_pioc_func = {
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
index 2a1d887..ab524bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c
@@ -21,8 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
 #include "outpdp.h"
+#include "nv50.h"
 
 #include <core/client.h>
 #include <subdev/i2c.h>
@@ -31,119 +31,10 @@
 #include <nvif/class.h>
 #include <nvif/unpack.h>
 
-/******************************************************************************
- * TMDS
- *****************************************************************************/
-
-static int
-nv50_pior_tmds_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *info, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output *outp;
-	int ret;
-
-	ret = nvkm_output_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
-
-	outp->edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(outp->info.extdev));
-	return 0;
-}
-
-struct nvkm_output_impl
-nv50_pior_tmds_impl = {
-	.base.handle = DCB_OUTPUT_TMDS | 0x0100,
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_pior_tmds_ctor,
-		.dtor = _nvkm_output_dtor,
-		.init = _nvkm_output_init,
-		.fini = _nvkm_output_fini,
-	},
-};
-
-/******************************************************************************
- * DisplayPort
- *****************************************************************************/
-
-static int
-nv50_pior_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->pattern)
-		return port->func->pattern(port, pattern);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
-{
-	return 0;
-}
-
-static int
-nv50_pior_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->lnk_ctl)
-		return port->func->lnk_ctl(port, nr, bw, ef);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
-{
-	struct nvkm_i2c_port *port = outp->base.edid;
-	if (port && port->func->drv_ctl)
-		return port->func->drv_ctl(port, ln, vs, pe);
-	return port ? 0 : -ENODEV;
-}
-
-static int
-nv50_pior_dp_ctor(struct nvkm_object *parent,
-		  struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *info, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_output_dp *outp;
-	int ret;
-
-	ret = nvkm_output_dp_create(parent, engine, oclass, info, index, &outp);
-	*pobject = nv_object(outp);
-	if (ret)
-		return ret;
-
-	outp->base.edid = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(
-					 outp->base.info.extdev));
-	return 0;
-}
-
-struct nvkm_output_dp_impl
-nv50_pior_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP | 0x0010,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_pior_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
-	.pattern = nv50_pior_dp_pattern,
-	.lnk_pwr = nv50_pior_dp_lnk_pwr,
-	.lnk_ctl = nv50_pior_dp_lnk_ctl,
-	.drv_ctl = nv50_pior_dp_drv_ctl,
-};
-
-/******************************************************************************
- * General PIOR handling
- *****************************************************************************/
-
 int
 nv50_pior_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	const u32 soff = outp->or * 0x800;
 	union {
 		struct nv50_disp_pior_pwr_v0 v0;
@@ -151,10 +42,10 @@
 	u32 ctrl, type;
 	int ret;
 
-	nv_ioctl(object, "disp pior pwr size %d\n", size);
+	nvif_ioctl(object, "disp pior pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
-			 args->v0.version, args->v0.state, args->v0.type);
+		nvif_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
+			   args->v0.version, args->v0.state, args->v0.type);
 		if (args->v0.type > 0x0f)
 			return -EINVAL;
 		ctrl = !!args->v0.state;
@@ -162,9 +53,79 @@
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
-	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
-	priv->pior.type[outp->or] = type;
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61e004 + soff) & 0x80000000))
+			break;
+	);
+	disp->pior.type[outp->or] = type;
 	return 0;
 }
+
+/******************************************************************************
+ * TMDS
+ *****************************************************************************/
+static const struct nvkm_output_func
+nv50_pior_output_func = {
+};
+
+int
+nv50_pior_output_new(struct nvkm_disp *disp, int index,
+		     struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_pior_output_func, disp,
+				index, dcbE, poutp);
+}
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+static int
+nv50_pior_output_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+	return 0;
+}
+
+static int
+nv50_pior_output_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
+{
+	return 0;
+}
+
+static int
+nv50_pior_output_dp_lnk_ctl(struct nvkm_output_dp *outp,
+			    int nr, int bw, bool ef)
+{
+	int ret = nvkm_i2c_aux_lnk_ctl(outp->aux, nr, bw, ef);
+	if (ret)
+		return ret;
+	return 1;
+}
+
+static const struct nvkm_output_dp_func
+nv50_pior_output_dp_func = {
+	.pattern = nv50_pior_output_dp_pattern,
+	.lnk_pwr = nv50_pior_output_dp_lnk_pwr,
+	.lnk_ctl = nv50_pior_output_dp_lnk_ctl,
+};
+
+int
+nv50_pior_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
+{
+	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
+	struct nvkm_i2c_aux *aux =
+		nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
+	struct nvkm_output_dp *outp;
+
+	if (!(outp = kzalloc(sizeof(*outp), GFP_KERNEL)))
+		return -ENOMEM;
+	*poutp = &outp->base;
+
+	return nvkm_output_dp_ctor(&nv50_pior_output_dp_func, disp,
+				   index, dcbE, aux, outp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 961ce8b..c245295 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,42 +1,52 @@
 #ifndef __NVKM_DISP_PRIV_H__
 #define __NVKM_DISP_PRIV_H__
 #include <engine/disp.h>
+#include "outp.h"
+#include "outpdp.h"
 
-struct nvkm_disp_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass **outp;
-	struct nvkm_oclass **conn;
-	const struct nvkm_event_func *vblank;
+int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp *);
+int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *,
+		   int index, int heads, struct nvkm_disp **);
+void nvkm_disp_vblank(struct nvkm_disp *, int head);
+
+struct nvkm_disp_func_outp {
+	int (* crt)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  tv)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*tmds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*lvds)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
+	int (*  dp)(struct nvkm_disp *, int index, struct dcb_output *,
+		    struct nvkm_output **);
 };
 
-#define nvkm_disp_create(p,e,c,h,i,x,d)                                     \
-	nvkm_disp_create_((p), (e), (c), (h), (i), (x),                     \
-			     sizeof(**d), (void **)d)
-#define nvkm_disp_destroy(d) ({                                             \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_dtor(nv_object(disp));                                   \
-})
-#define nvkm_disp_init(d) ({                                                \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_init(nv_object(disp));                                   \
-})
-#define nvkm_disp_fini(d,s) ({                                              \
-	struct nvkm_disp *disp = (d);                                       \
-	_nvkm_disp_fini(nv_object(disp), (s));                              \
-})
+struct nvkm_disp_func {
+	void *(*dtor)(struct nvkm_disp *);
+	void (*intr)(struct nvkm_disp *);
 
-int  nvkm_disp_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int heads,
-			  const char *, const char *, int, void **);
-void _nvkm_disp_dtor(struct nvkm_object *);
-int  _nvkm_disp_init(struct nvkm_object *);
-int  _nvkm_disp_fini(struct nvkm_object *, bool);
+	const struct nvkm_disp_oclass *(*root)(struct nvkm_disp *);
 
-extern struct nvkm_oclass *nvkm_output_oclass;
-extern struct nvkm_oclass *nvkm_connector_oclass;
+	struct {
+		void (*vblank_init)(struct nvkm_disp *, int head);
+		void (*vblank_fini)(struct nvkm_disp *, int head);
+	} head;
 
-int  nvkm_disp_vblank_ctor(struct nvkm_object *, void *data, u32 size,
-			   struct nvkm_notify *);
-void nvkm_disp_vblank(struct nvkm_disp *, int head);
+	struct {
+		const struct nvkm_disp_func_outp internal;
+		const struct nvkm_disp_func_outp external;
+	} outp;
+};
+
 int  nvkm_disp_ntfy(struct nvkm_object *, u32, struct nvkm_event **);
+
+extern const struct nvkm_disp_oclass nv04_disp_root_oclass;
+
+struct nvkm_disp_oclass {
+	int (*ctor)(struct nvkm_disp *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
new file mode 100644
index 0000000..721e4f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg84.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+g84_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&g84_disp_core_oclass,
+		&g84_disp_base_oclass,
+		&g84_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+g84_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&g84_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+g84_disp_root_oclass = {
+	.base.oclass = G82_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = g84_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
new file mode 100644
index 0000000..9493f6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootg94.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+g94_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&g94_disp_core_oclass,
+		&gt200_disp_base_oclass,
+		&gt200_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+g94_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&g94_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+g94_disp_root_oclass = {
+	.base.oclass = GT206_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = g94_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
new file mode 100644
index 0000000..8591726
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgf119.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+gf119_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 total  = nvkm_rd32(device, 0x640414 + (head * 0x300));
+	const u32 blanke = nvkm_rd32(device, 0x64041c + (head * 0x300));
+	const u32 blanks = nvkm_rd32(device, 0x640420 + (head * 0x300));
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+		args->v0.hblanke = (blanke & 0x0000ffff);
+		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+		args->v0.hblanks = (blanks & 0x0000ffff);
+		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
+		args->v0.htotal  = ( total & 0x0000ffff);
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		args->v0.vline = /* vline read locks hline */
+			nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline =
+			nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+	} else
+		return ret;
+
+	return 0;
+}
+
+void
+gf119_disp_root_fini(struct nv50_disp_root *root)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x6100b0, 0x00000000);
+}
+
+int
+gf119_disp_root_init(struct nv50_disp_root *root)
+{
+	struct nv50_disp *disp = root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.
+	 */
+
+	/* ... CRTC caps */
+	for (i = 0; i < disp->base.head.nr; i++) {
+		tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+		nvkm_wr32(device, 0x6101b4 + (i * 0x800), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+		nvkm_wr32(device, 0x6101b8 + (i * 0x800), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+		nvkm_wr32(device, 0x6101bc + (i * 0x800), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->func->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->func->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
+		nvkm_wr32(device, 0x6100ac, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x610090, 0x00000000);
+	nvkm_wr32(device, 0x6100a0, 0x00000000);
+	nvkm_wr32(device, 0x6100b0, 0x00000307);
+
+	/* disable underflow reporting, preventing an intermittent issue
+	 * on some gk104 boards where the production vbios left this
+	 * setting enabled by default.
+	 *
+	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
+	 */
+	for (i = 0; i < disp->base.head.nr; i++)
+		nvkm_mask(device, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
+
+	return 0;
+}
+
+static const struct nv50_disp_root_func
+gf119_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gf119_disp_core_oclass,
+		&gf119_disp_base_oclass,
+		&gf119_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gf119_disp_oimm_oclass,
+		&gf119_disp_curs_oclass,
+	},
+};
+
+static int
+gf119_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gf119_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gf119_disp_root_oclass = {
+	.base.oclass = GF110_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gf119_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
new file mode 100644
index 0000000..0bfdb1d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk104_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gk104_disp_core_oclass,
+		&gk104_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gk104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gk104_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk104_disp_root_oclass = {
+	.base.oclass = GK104_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gk104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
new file mode 100644
index 0000000..1e8dbed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgk110.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gk110_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gk110_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gk110_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gk110_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gk110_disp_root_oclass = {
+	.base.oclass = GK110_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gk110_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
new file mode 100644
index 0000000..44c55be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm107.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm107_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gm107_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gm107_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gm107_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm107_disp_root_oclass = {
+	.base.oclass = GM107_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gm107_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
new file mode 100644
index 0000000..168bffe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgm204.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gm204_disp_root = {
+	.init = gf119_disp_root_init,
+	.fini = gf119_disp_root_fini,
+	.dmac = {
+		&gm204_disp_core_oclass,
+		&gk110_disp_base_oclass,
+		&gk104_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gk104_disp_oimm_oclass,
+		&gk104_disp_curs_oclass,
+	},
+};
+
+static int
+gm204_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gm204_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gm204_disp_root_oclass = {
+	.base.oclass = GM204_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gm204_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
new file mode 100644
index 0000000..124a0c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt200.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt200_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&gt200_disp_core_oclass,
+		&gt200_disp_base_oclass,
+		&gt200_disp_ovly_oclass,
+	},
+	.pioc = {
+		&g84_disp_oimm_oclass,
+		&g84_disp_curs_oclass,
+	},
+};
+
+static int
+gt200_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gt200_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt200_disp_root_oclass = {
+	.base.oclass = GT200_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gt200_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
new file mode 100644
index 0000000..dff52f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgt215.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gt215_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&gt215_disp_core_oclass,
+		&gt215_disp_base_oclass,
+		&gt215_disp_ovly_oclass,
+	},
+	.pioc = {
+		&gt215_disp_oimm_oclass,
+		&gt215_disp_curs_oclass,
+	},
+};
+
+static int
+gt215_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&gt215_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gt215_disp_root_oclass = {
+	.base.oclass = GT214_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = gt215_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
new file mode 100644
index 0000000..62d3fb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_disp_root(p) container_of((p), struct nv04_disp_root, object)
+#include "priv.h"
+
+#include <core/client.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv04_disp_root {
+	struct nvkm_object object;
+	struct nvkm_disp *disp;
+};
+
+static int
+nv04_disp_scanoutpos(struct nv04_disp_root *root,
+		     void *data, u32 size, int head)
+{
+	struct nvkm_device *device = root->disp->engine.subdev.device;
+	struct nvkm_object *object = &root->object;
+	const u32 hoff = head * 0x2000;
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	u32 line;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanks = nvkm_rd32(device, 0x680800 + hoff) & 0xffff;
+		args->v0.vtotal  = nvkm_rd32(device, 0x680804 + hoff) & 0xffff;
+		args->v0.vblanke = args->v0.vtotal - 1;
+
+		args->v0.hblanks = nvkm_rd32(device, 0x680820 + hoff) & 0xffff;
+		args->v0.htotal  = nvkm_rd32(device, 0x680824 + hoff) & 0xffff;
+		args->v0.hblanke = args->v0.htotal - 1;
+
+		/*
+		 * If output is vga instead of digital then vtotal/htotal is
+		 * invalid so we have to give up and trigger the timestamping
+		 * fallback in the drm core.
+		 */
+		if (!args->v0.vtotal || !args->v0.htotal)
+			return -ENOTSUPP;
+
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		line = nvkm_rd32(device, 0x600868 + hoff);
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline = (line & 0xffff0000) >> 16;
+		args->v0.vline = (line & 0x0000ffff);
+	} else
+		return ret;
+
+	return 0;
+}
+
+static int
+nv04_disp_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nv04_disp_root *root = nv04_disp_root(object);
+	union {
+		struct nv04_disp_mthd_v0 v0;
+	} *args = data;
+	int head, ret;
+
+	nvif_ioctl(object, "disp mthd size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+			   args->v0.version, args->v0.method, args->v0.head);
+		mthd = args->v0.method;
+		head = args->v0.head;
+	} else
+		return ret;
+
+	if (head < 0 || head >= 2)
+		return -ENXIO;
+
+	switch (mthd) {
+	case NV04_DISP_SCANOUTPOS:
+		return nv04_disp_scanoutpos(root, data, size, head);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static struct nvkm_object_func
+nv04_disp_root = {
+	.mthd = nv04_disp_mthd,
+	.ntfy = nvkm_disp_ntfy,
+};
+
+static int
+nv04_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv04_disp_root *root;
+
+	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+		return -ENOMEM;
+	root->disp = disp;
+	*pobject = &root->object;
+
+	nvkm_object_ctor(&nv04_disp_root, oclass, &root->object);
+	return 0;
+}
+
+const struct nvkm_disp_oclass
+nv04_disp_root_oclass = {
+	.base.oclass = NV04_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nv04_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
new file mode 100644
index 0000000..06fb24d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+int
+nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0)
+{
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	const u32 blanke = nvkm_rd32(device, 0x610aec + (head * 0x540));
+	const u32 blanks = nvkm_rd32(device, 0x610af4 + (head * 0x540));
+	const u32 total  = nvkm_rd32(device, 0x610afc + (head * 0x540));
+	union {
+		struct nv04_disp_scanoutpos_v0 v0;
+	} *args = data;
+	int ret;
+
+	nvif_ioctl(object, "disp scanoutpos size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "disp scanoutpos vers %d\n",
+			   args->v0.version);
+		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+		args->v0.hblanke = (blanke & 0x0000ffff);
+		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+		args->v0.hblanks = (blanks & 0x0000ffff);
+		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
+		args->v0.htotal  = ( total & 0x0000ffff);
+		args->v0.time[0] = ktime_to_ns(ktime_get());
+		args->v0.vline = /* vline read locks hline */
+			nvkm_rd32(device, 0x616340 + (head * 0x800)) & 0xffff;
+		args->v0.time[1] = ktime_to_ns(ktime_get());
+		args->v0.hline =
+			nvkm_rd32(device, 0x616344 + (head * 0x800)) & 0xffff;
+	} else
+		return ret;
+
+	return 0;
+}
+
+int
+nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	union {
+		struct nv50_disp_mthd_v0 v0;
+		struct nv50_disp_mthd_v1 v1;
+	} *args = data;
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	struct nv50_disp *disp = root->disp;
+	const struct nv50_disp_func *func = disp->func;
+	struct nvkm_output *outp = NULL;
+	struct nvkm_output *temp;
+	u16 type, mask = 0;
+	int head, ret;
+
+	if (mthd != NV50_DISP_MTHD)
+		return -EINVAL;
+
+	nvif_ioctl(object, "disp mthd size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+			   args->v0.version, args->v0.method, args->v0.head);
+		mthd = args->v0.method;
+		head = args->v0.head;
+	} else
+	if (nvif_unpack(args->v1, 1, 1, true)) {
+		nvif_ioctl(object, "disp mthd vers %d mthd %02x "
+				   "type %04x mask %04x\n",
+			   args->v1.version, args->v1.method,
+			   args->v1.hasht, args->v1.hashm);
+		mthd = args->v1.method;
+		type = args->v1.hasht;
+		mask = args->v1.hashm;
+		head = ffs((mask >> 8) & 0x0f) - 1;
+	} else
+		return ret;
+
+	if (head < 0 || head >= disp->base.head.nr)
+		return -ENXIO;
+
+	if (mask) {
+		list_for_each_entry(temp, &disp->base.outp, head) {
+			if ((temp->info.hasht         == type) &&
+			    (temp->info.hashm & mask) == mask) {
+				outp = temp;
+				break;
+			}
+		}
+		if (outp == NULL)
+			return -ENXIO;
+	}
+
+	switch (mthd) {
+	case NV50_DISP_SCANOUTPOS:
+		return func->head.scanoutpos(object, disp, data, size, head);
+	default:
+		break;
+	}
+
+	switch (mthd * !!outp) {
+	case NV50_DISP_MTHD_V1_DAC_PWR:
+		return func->dac.power(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_DAC_LOAD:
+		return func->dac.sense(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_PWR:
+		return func->sor.power(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
+		if (!func->sor.hda_eld)
+			return -ENODEV;
+		return func->sor.hda_eld(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
+		if (!func->sor.hdmi)
+			return -ENODEV;
+		return func->sor.hdmi(object, disp, data, size, head, outp);
+	case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
+		union {
+			struct nv50_disp_sor_lvds_script_v0 v0;
+		} *args = data;
+		nvif_ioctl(object, "disp sor lvds script size %d\n", size);
+		if (nvif_unpack(args->v0, 0, 0, false)) {
+			nvif_ioctl(object, "disp sor lvds script "
+					   "vers %d name %04x\n",
+				   args->v0.version, args->v0.script);
+			disp->sor.lvdsconf = args->v0.script;
+			return 0;
+		} else
+			return ret;
+	}
+		break;
+	case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
+		struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
+		union {
+			struct nv50_disp_sor_dp_pwr_v0 v0;
+		} *args = data;
+		nvif_ioctl(object, "disp sor dp pwr size %d\n", size);
+		if (nvif_unpack(args->v0, 0, 0, false)) {
+			nvif_ioctl(object, "disp sor dp pwr vers %d state %d\n",
+				   args->v0.version, args->v0.state);
+			if (args->v0.state == 0) {
+				nvkm_notify_put(&outpdp->irq);
+				outpdp->func->lnk_pwr(outpdp, 0);
+				atomic_set(&outpdp->lt.done, 0);
+				return 0;
+			} else
+			if (args->v0.state != 0) {
+				nvkm_output_dp_train(&outpdp->base, 0, true);
+				return 0;
+			}
+		} else
+			return ret;
+	}
+		break;
+	case NV50_DISP_MTHD_V1_PIOR_PWR:
+		if (!func->pior.power)
+			return -ENODEV;
+		return func->pior.power(object, disp, data, size, head, outp);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv50_disp_root_dmac_new_(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	const struct nv50_disp_dmac_oclass *sclass = oclass->priv;
+	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+			    oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass,
+			 void *data, u32 size, struct nvkm_object **pobject)
+{
+	const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
+	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
+			    oclass, data, size, pobject);
+}
+
+static int
+nv50_disp_root_child_get_(struct nvkm_object *object, int index,
+			  struct nvkm_oclass *sclass)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+
+	if (index < ARRAY_SIZE(root->func->dmac)) {
+		sclass->base = root->func->dmac[index]->base;
+		sclass->priv = root->func->dmac[index];
+		sclass->ctor = nv50_disp_root_dmac_new_;
+		return 0;
+	}
+
+	index -= ARRAY_SIZE(root->func->dmac);
+
+	if (index < ARRAY_SIZE(root->func->pioc)) {
+		sclass->base = root->func->pioc[index]->base;
+		sclass->priv = root->func->pioc[index];
+		sclass->ctor = nv50_disp_root_pioc_new_;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv50_disp_root_fini_(struct nvkm_object *object, bool suspend)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	root->func->fini(root);
+	return 0;
+}
+
+static int
+nv50_disp_root_init_(struct nvkm_object *object)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	return root->func->init(root);
+}
+
+static void *
+nv50_disp_root_dtor_(struct nvkm_object *object)
+{
+	struct nv50_disp_root *root = nv50_disp_root(object);
+	nvkm_ramht_del(&root->ramht);
+	nvkm_gpuobj_del(&root->instmem);
+	return root;
+}
+
+static const struct nvkm_object_func
+nv50_disp_root_ = {
+	.dtor = nv50_disp_root_dtor_,
+	.init = nv50_disp_root_init_,
+	.fini = nv50_disp_root_fini_,
+	.mthd = nv50_disp_root_mthd_,
+	.ntfy = nvkm_disp_ntfy,
+	.sclass = nv50_disp_root_child_get_,
+};
+
+int
+nv50_disp_root_new_(const struct nv50_disp_root_func *func,
+		    struct nvkm_disp *base, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nv50_disp *disp = nv50_disp(base);
+	struct nv50_disp_root *root;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	int ret;
+
+	if (!(root = kzalloc(sizeof(*root), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &root->object;
+
+	nvkm_object_ctor(&nv50_disp_root_, oclass, &root->object);
+	root->func = func;
+	root->disp = disp;
+
+	ret = nvkm_gpuobj_new(disp->base.engine.subdev.device, 0x10000, 0x10000,
+			      false, NULL, &root->instmem);
+	if (ret)
+		return ret;
+
+	return nvkm_ramht_new(device, 0x1000, 0, root->instmem, &root->ramht);
+}
+
+void
+nv50_disp_root_fini(struct nv50_disp_root *root)
+{
+	struct nvkm_device *device = root->disp->base.engine.subdev.device;
+	/* disable all interrupts */
+	nvkm_wr32(device, 0x610024, 0x00000000);
+	nvkm_wr32(device, 0x610020, 0x00000000);
+}
+
+int
+nv50_disp_root_init(struct nv50_disp_root *root)
+{
+	struct nv50_disp *disp = root->disp;
+	struct nvkm_device *device = disp->base.engine.subdev.device;
+	u32 tmp;
+	int i;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.  NFI what the 0x614004 caps are for..
+	 */
+	tmp = nvkm_rd32(device, 0x614004);
+	nvkm_wr32(device, 0x610184, tmp);
+
+	/* ... CRTC caps */
+	for (i = 0; i < disp->base.head.nr; i++) {
+		tmp = nvkm_rd32(device, 0x616100 + (i * 0x800));
+		nvkm_wr32(device, 0x610190 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616104 + (i * 0x800));
+		nvkm_wr32(device, 0x610194 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x616108 + (i * 0x800));
+		nvkm_wr32(device, 0x610198 + (i * 0x10), tmp);
+		tmp = nvkm_rd32(device, 0x61610c + (i * 0x800));
+		nvkm_wr32(device, 0x61019c + (i * 0x10), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < disp->func->dac.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < disp->func->sor.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
+	}
+
+	/* ... PIOR caps */
+	for (i = 0; i < disp->func->pior.nr; i++) {
+		tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
+		nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nvkm_rd32(device, 0x610024) & 0x00000100) {
+		nvkm_wr32(device, 0x610024, 0x00000100);
+		nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
+				break;
+		) < 0)
+			return -EBUSY;
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nvkm_wr32(device, 0x610010, (root->instmem->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nvkm_wr32(device, 0x61002c, 0x00000370);
+	nvkm_wr32(device, 0x610028, 0x00000000);
+	return 0;
+}
+
+static const struct nv50_disp_root_func
+nv50_disp_root = {
+	.init = nv50_disp_root_init,
+	.fini = nv50_disp_root_fini,
+	.dmac = {
+		&nv50_disp_core_oclass,
+		&nv50_disp_base_oclass,
+		&nv50_disp_ovly_oclass,
+	},
+	.pioc = {
+		&nv50_disp_oimm_oclass,
+		&nv50_disp_curs_oclass,
+	},
+};
+
+static int
+nv50_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nv50_disp_root_new_(&nv50_disp_root, disp, oclass,
+				   data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+nv50_disp_root_oclass = {
+	.base.oclass = NV50_DISP,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nv50_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
new file mode 100644
index 0000000..5b2c903
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -0,0 +1,43 @@
+#ifndef __NV50_DISP_ROOT_H__
+#define __NV50_DISP_ROOT_H__
+#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
+#include "nv50.h"
+#include "channv50.h"
+#include "dmacnv50.h"
+
+struct nv50_disp_root {
+	const struct nv50_disp_root_func *func;
+	struct nv50_disp *disp;
+	struct nvkm_object object;
+
+	struct nvkm_gpuobj *instmem;
+	struct nvkm_ramht *ramht;
+};
+
+struct nv50_disp_root_func {
+	int (*init)(struct nv50_disp_root *);
+	void (*fini)(struct nv50_disp_root *);
+	const struct nv50_disp_dmac_oclass *dmac[3];
+	const struct nv50_disp_pioc_oclass *pioc[2];
+};
+
+int  nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
+			 const struct nvkm_oclass *, void *data, u32 size,
+			 struct nvkm_object **);
+int  nv50_disp_root_init(struct nv50_disp_root *);
+void nv50_disp_root_fini(struct nv50_disp_root *);
+
+int  gf119_disp_root_init(struct nv50_disp_root *);
+void gf119_disp_root_fini(struct nv50_disp_root *);
+
+extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
+extern const struct nvkm_disp_oclass g84_disp_root_oclass;
+extern const struct nvkm_disp_oclass g94_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gt215_disp_root_oclass;
+extern const struct nvkm_disp_oclass gf119_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
+extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
+extern const struct nvkm_disp_oclass gm204_disp_root_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
index 8918da7..1bb9d66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c
@@ -24,7 +24,6 @@
 #include "nv50.h"
 #include "outpdp.h"
 
-#include <core/device.h>
 #include <subdev/timer.h>
 
 static inline u32
@@ -39,12 +38,33 @@
 	return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
 }
 
-static inline u32
-g94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+/*******************************************************************************
+ * TMDS/LVDS
+ ******************************************************************************/
+static const struct nvkm_output_func
+g94_sor_output_func = {
+};
+
+int
+g94_sor_output_new(struct nvkm_disp *disp, int index,
+		   struct dcb_output *dcbE, struct nvkm_output **poutp)
 {
+	return nvkm_output_new_(&g94_sor_output_func, disp,
+				index, dcbE, poutp);
+}
+
+/*******************************************************************************
+ * DisplayPort
+ ******************************************************************************/
+u32
+g94_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
+{
+	static const u8 gm100[] = { 0, 8, 16, 24 };
 	static const u8 mcp89[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
-	static const u8 g94[] = { 16, 8, 0, 24 };
-	if (nv_device(priv)->chipset == 0xaf)
+	static const u8   g94[] = { 16, 8, 0, 24 };
+	if (device->chipset >= 0x110)
+		return gm100[lane];
+	if (device->chipset == 0xaf)
 		return mcp89[lane];
 	return g94[lane];
 }
@@ -52,33 +72,36 @@
 static int
 g94_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 loff = g94_sor_loff(outp);
-	nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
+	nvkm_mask(device, 0x61c10c + loff, 0x0f000000, pattern << 24);
 	return 0;
 }
 
 int
 g94_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = g94_sor_soff(outp);
 	const u32 loff = g94_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (g94_sor_dp_lane_map(priv, i) >> 3);
+		mask |= 1 << (g94_sor_dp_lane_map(device, i) >> 3);
 
-	nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
-	nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
-	nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
 static int
 g94_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = g94_sor_soff(outp);
 	const u32 loff = g94_sor_loff(outp);
 	u32 dpctrl = 0x00000000;
@@ -90,17 +113,17 @@
 	if (bw > 0x06)
 		clksor |= 0x00040000;
 
-	nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
-	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nvkm_mask(device, 0x614300 + soff, 0x000c0000, clksor);
+	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
 	return 0;
 }
 
 static int
 g94_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = g94_sor_dp_lane_map(priv, ln);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = g94_sor_dp_lane_map(device, ln);
 	const u32 loff = g94_sor_loff(outp);
 	u32 addr, data[3];
 	u8  ver, hdr, cnt, len;
@@ -109,37 +132,37 @@
 
 	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
 					outp->base.info.hashm,
-				 &ver, &hdr, &cnt, &len, &info);
+				  &ver, &hdr, &cnt, &len, &info);
 	if (!addr)
 		return -ENODEV;
 
 	addr = nvbios_dpcfg_match(bios, addr, 0, vs, pe,
-				 &ver, &hdr, &cnt, &len, &ocfg);
+				  &ver, &hdr, &cnt, &len, &ocfg);
 	if (!addr)
 		return -EINVAL;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
 	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
 		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-g94_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
+static const struct nvkm_output_dp_func
+g94_sor_dp_func = {
 	.pattern = g94_sor_dp_pattern,
 	.lnk_pwr = g94_sor_dp_lnk_pwr,
 	.lnk_ctl = g94_sor_dp_lnk_ctl,
 	.drv_ctl = g94_sor_dp_drv_ctl,
 };
+
+int
+g94_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+	       struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&g94_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
deleted file mode 100644
index 52fbe48..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf110.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-#include "outpdp.h"
-
-static inline u32
-gf110_sor_soff(struct nvkm_output_dp *outp)
-{
-	return (ffs(outp->base.info.or) - 1) * 0x800;
-}
-
-static inline u32
-gf110_sor_loff(struct nvkm_output_dp *outp)
-{
-	return gf110_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
-}
-
-static inline u32
-gf110_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
-{
-	static const u8 gf110[] = { 16, 8, 0, 24 };
-	return gf110[lane];
-}
-
-static int
-gf110_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	const u32 loff = gf110_sor_loff(outp);
-	nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-	return 0;
-}
-
-int
-gf110_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
-{
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	const u32 soff = gf110_sor_soff(outp);
-	const u32 loff = gf110_sor_loff(outp);
-	u32 dpctrl = 0x00000000;
-	u32 clksor = 0x00000000;
-
-	clksor |= bw << 18;
-	dpctrl |= ((1 << nr) - 1) << 16;
-	if (ef)
-		dpctrl |= 0x00004000;
-
-	nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
-	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
-	return 0;
-}
-
-static int
-gf110_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
-		     int ln, int vs, int pe, int pc)
-{
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = gf110_sor_dp_lane_map(priv, ln);
-	const u32 loff = gf110_sor_loff(outp);
-	u32 addr, data[4];
-	u8  ver, hdr, cnt, len;
-	struct nvbios_dpout info;
-	struct nvbios_dpcfg ocfg;
-
-	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
-					outp->base.info.hashm,
-				  &ver, &hdr, &cnt, &len, &info);
-	if (!addr)
-		return -ENODEV;
-
-	addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
-				  &ver, &hdr, &cnt, &len, &ocfg);
-	if (!addr)
-		return -EINVAL;
-
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
-	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
-		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
-	data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
-	nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
-	return 0;
-}
-
-struct nvkm_output_dp_impl
-gf110_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
-	.pattern = gf110_sor_dp_pattern,
-	.lnk_pwr = g94_sor_dp_lnk_pwr,
-	.lnk_ctl = gf110_sor_dp_lnk_ctl,
-	.drv_ctl = gf110_sor_dp_drv_ctl,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
new file mode 100644
index 0000000..b4b41b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+static inline u32
+gf119_sor_soff(struct nvkm_output_dp *outp)
+{
+	return (ffs(outp->base.info.or) - 1) * 0x800;
+}
+
+static inline u32
+gf119_sor_loff(struct nvkm_output_dp *outp)
+{
+	return gf119_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
+}
+
+static int
+gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 loff = gf119_sor_loff(outp);
+	nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+	return 0;
+}
+
+int
+gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	const u32 soff = gf119_sor_soff(outp);
+	const u32 loff = gf119_sor_loff(outp);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+
+	clksor |= bw << 18;
+	dpctrl |= ((1 << nr) - 1) << 16;
+	if (ef)
+		dpctrl |= 0x00004000;
+
+	nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+	nvkm_mask(device, 0x61c10c + loff, 0x001f4000, dpctrl);
+	return 0;
+}
+
+static int
+gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
+		     int ln, int vs, int pe, int pc)
+{
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = g94_sor_dp_lane_map(device, ln);
+	const u32 loff = gf119_sor_loff(outp);
+	u32 addr, data[4];
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout info;
+	struct nvbios_dpcfg ocfg;
+
+	addr = nvbios_dpout_match(bios, outp->base.info.hasht,
+					outp->base.info.hashm,
+				  &ver, &hdr, &cnt, &len, &info);
+	if (!addr)
+		return -ENODEV;
+
+	addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
+				  &ver, &hdr, &cnt, &len, &ocfg);
+	if (!addr)
+		return -EINVAL;
+
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
+	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
+		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
+	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+	nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+	return 0;
+}
+
+static const struct nvkm_output_dp_func
+gf119_sor_dp_func = {
+	.pattern = gf119_sor_dp_pattern,
+	.lnk_pwr = g94_sor_dp_lnk_pwr,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
+	.drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gf119_sor_dp_new(struct nvkm_disp *disp, int index,
+		 struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gf119_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
index 1e40dfe..029e5f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm204.c
@@ -41,17 +41,17 @@
 void
 gm204_sor_magic(struct nvkm_output *outp)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->disp->engine.subdev.device;
 	const u32 soff = outp->or * 0x100;
 	const u32 data = outp->or + 1;
 	if (outp->info.sorconf.link & 1)
-		nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
+		nvkm_mask(device, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
 	if (outp->info.sorconf.link & 2)
-		nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
+		nvkm_mask(device, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
 }
 
 static inline u32
-gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+gm204_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
 {
 	return lane * 0x08;
 }
@@ -59,30 +59,33 @@
 static int
 gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = gm204_sor_soff(outp);
 	const u32 data = 0x01010101 * pattern;
 	if (outp->base.info.sorconf.link & 1)
-		nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
+		nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
 	else
-		nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
+		nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
 	return 0;
 }
 
 static int
 gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
 	const u32 soff = gm204_sor_soff(outp);
 	const u32 loff = gm204_sor_loff(outp);
 	u32 mask = 0, i;
 
 	for (i = 0; i < nr; i++)
-		mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
+		mask |= 1 << (gm204_sor_dp_lane_map(device, i) >> 3);
 
-	nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
-	nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
-	nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x61c130 + loff, 0x0000000f, mask);
+	nvkm_mask(device, 0x61c034 + soff, 0x80000000, 0x80000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c034 + soff) & 0x80000000))
+			break;
+	);
 	return 0;
 }
 
@@ -90,9 +93,9 @@
 gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 		     int ln, int vs, int pe, int pc)
 {
-	struct nv50_disp_priv *priv = (void *)nvkm_disp(outp);
-	struct nvkm_bios *bios = nvkm_bios(priv);
-	const u32 shift = gm204_sor_dp_lane_map(priv, ln);
+	struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 shift = gm204_sor_dp_lane_map(device, ln);
 	const u32 loff = gm204_sor_loff(outp);
 	u32 addr, data[4];
 	u8  ver, hdr, cnt, len;
@@ -109,31 +112,32 @@
 				  &ver, &hdr, &cnt, &len, &ocfg);
 	if (!addr)
 		return -EINVAL;
+	ocfg.tx_pu &= 0x0f;
 
-	data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
-	data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
-	data[2] = nv_rd32(priv, 0x61c130 + loff);
-	if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
-		data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
-	nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
-	nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
-	nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
-	data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
-	nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
+	data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
+	data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
+	data[2] = nvkm_rd32(device, 0x61c130 + loff);
+	if ((data[2] & 0x00000f00) < (ocfg.tx_pu << 8) || ln == 0)
+		data[2] = (data[2] & ~0x00000f00) | (ocfg.tx_pu << 8);
+	nvkm_wr32(device, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
+	nvkm_wr32(device, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
+	nvkm_wr32(device, 0x61c130 + loff, data[2]);
+	data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
+	nvkm_wr32(device, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
 	return 0;
 }
 
-struct nvkm_output_dp_impl
-gm204_sor_dp_impl = {
-	.base.base.handle = DCB_OUTPUT_DP,
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_output_dp_ctor,
-		.dtor = _nvkm_output_dp_dtor,
-		.init = _nvkm_output_dp_init,
-		.fini = _nvkm_output_dp_fini,
-	},
+static const struct nvkm_output_dp_func
+gm204_sor_dp_func = {
 	.pattern = gm204_sor_dp_pattern,
 	.lnk_pwr = gm204_sor_dp_lnk_pwr,
-	.lnk_ctl = gf110_sor_dp_lnk_ctl,
+	.lnk_ctl = gf119_sor_dp_lnk_ctl,
 	.drv_ctl = gm204_sor_dp_drv_ctl,
 };
+
+int
+gm204_sor_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
+		 struct nvkm_output **poutp)
+{
+	return nvkm_output_dp_new_(&gm204_sor_dp_func, disp, index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
index b229a31..29e0d2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c
@@ -33,6 +33,7 @@
 int
 nv50_sor_power(NV50_DISP_MTHD_V1)
 {
+	struct nvkm_device *device = disp->base.engine.subdev.device;
 	union {
 		struct nv50_disp_sor_pwr_v0 v0;
 	} *args = data;
@@ -40,17 +41,39 @@
 	u32 stat;
 	int ret;
 
-	nv_ioctl(object, "disp sor pwr size %d\n", size);
+	nvif_ioctl(object, "disp sor pwr size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "disp sor pwr vers %d state %d\n",
-			 args->v0.version, args->v0.state);
+		nvif_ioctl(object, "disp sor pwr vers %d state %d\n",
+			   args->v0.version, args->v0.state);
 		stat = !!args->v0.state;
 	} else
 		return ret;
 
-	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
-	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
-	nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c004 + soff) & 0x80000000))
+			break;
+	);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+			break;
+	);
 	return 0;
 }
+
+static const struct nvkm_output_func
+nv50_sor_output_func = {
+};
+
+int
+nv50_sor_output_new(struct nvkm_disp *disp, int index,
+		    struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+	return nvkm_output_new_(&nv50_sor_output_func, disp,
+				index, dcbE, poutp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
index c4622c7..8bff95c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c
@@ -23,131 +23,119 @@
  */
 #include <subdev/vga.h>
 
-#include <core/device.h>
-
 u8
-nv_rdport(void *obj, int head, u16 port)
+nvkm_rdport(struct nvkm_device *device, int head, u16 port)
 {
-	struct nvkm_device *device = nv_device(obj);
-
 	if (device->card_type >= NV_50)
-		return nv_rd08(obj, 0x601000 + port);
+		return nvkm_rd08(device, 0x601000 + port);
 
 	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
 	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
 	    port == 0x03d4 || port == 0x03d5)	/* CR */
-		return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+		return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port);
 
 	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
 	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
 	    port == 0x03ce || port == 0x03cf) {	/* GR */
 		if (device->card_type < NV_40)
 			head = 0; /* CR44 selects head */
-		return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+		return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port);
 	}
 
-	nv_error(obj, "unknown vga port 0x%04x\n", port);
 	return 0x00;
 }
 
 void
-nv_wrport(void *obj, int head, u16 port, u8 data)
+nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data)
 {
-	struct nvkm_device *device = nv_device(obj);
-
 	if (device->card_type >= NV_50)
-		nv_wr08(obj, 0x601000 + port, data);
+		nvkm_wr08(device, 0x601000 + port, data);
 	else
 	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
 	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
 	    port == 0x03d4 || port == 0x03d5)	/* CR */
-		nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+		nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data);
 	else
 	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
 	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
 	    port == 0x03ce || port == 0x03cf) {	/* GR */
 		if (device->card_type < NV_40)
 			head = 0; /* CR44 selects head */
-		nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
-	} else
-		nv_error(obj, "unknown vga port 0x%04x\n", port);
+		nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data);
+	}
 }
 
 u8
-nv_rdvgas(void *obj, int head, u8 index)
+nvkm_rdvgas(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03c4, index);
-	return nv_rdport(obj, head, 0x03c5);
+	nvkm_wrport(device, head, 0x03c4, index);
+	return nvkm_rdport(device, head, 0x03c5);
 }
 
 void
-nv_wrvgas(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03c4, index);
-	nv_wrport(obj, head, 0x03c5, value);
+	nvkm_wrport(device, head, 0x03c4, index);
+	nvkm_wrport(device, head, 0x03c5, value);
 }
 
 u8
-nv_rdvgag(void *obj, int head, u8 index)
+nvkm_rdvgag(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03ce, index);
-	return nv_rdport(obj, head, 0x03cf);
+	nvkm_wrport(device, head, 0x03ce, index);
+	return nvkm_rdport(device, head, 0x03cf);
 }
 
 void
-nv_wrvgag(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03ce, index);
-	nv_wrport(obj, head, 0x03cf, value);
+	nvkm_wrport(device, head, 0x03ce, index);
+	nvkm_wrport(device, head, 0x03cf, value);
 }
 
 u8
-nv_rdvgac(void *obj, int head, u8 index)
+nvkm_rdvgac(struct nvkm_device *device, int head, u8 index)
 {
-	nv_wrport(obj, head, 0x03d4, index);
-	return nv_rdport(obj, head, 0x03d5);
+	nvkm_wrport(device, head, 0x03d4, index);
+	return nvkm_rdport(device, head, 0x03d5);
 }
 
 void
-nv_wrvgac(void *obj, int head, u8 index, u8 value)
+nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value)
 {
-	nv_wrport(obj, head, 0x03d4, index);
-	nv_wrport(obj, head, 0x03d5, value);
+	nvkm_wrport(device, head, 0x03d4, index);
+	nvkm_wrport(device, head, 0x03d5, value);
 }
 
 u8
-nv_rdvgai(void *obj, int head, u16 port, u8 index)
+nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index)
 {
-	if (port == 0x03c4) return nv_rdvgas(obj, head, index);
-	if (port == 0x03ce) return nv_rdvgag(obj, head, index);
-	if (port == 0x03d4) return nv_rdvgac(obj, head, index);
-	nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	if (port == 0x03c4) return nvkm_rdvgas(device, head, index);
+	if (port == 0x03ce) return nvkm_rdvgag(device, head, index);
+	if (port == 0x03d4) return nvkm_rdvgac(device, head, index);
 	return 0x00;
 }
 
 void
-nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value)
 {
-	if      (port == 0x03c4) nv_wrvgas(obj, head, index, value);
-	else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
-	else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
-	else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	if      (port == 0x03c4) nvkm_wrvgas(device, head, index, value);
+	else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value);
+	else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value);
 }
 
 bool
-nv_lockvgac(void *obj, bool lock)
+nvkm_lockvgac(struct nvkm_device *device, bool lock)
 {
-	struct nvkm_device *dev = nv_device(obj);
-
-	bool locked = !nv_rdvgac(obj, 0, 0x1f);
+	bool locked = !nvkm_rdvgac(device, 0, 0x1f);
 	u8 data = lock ? 0x99 : 0x57;
-	if (dev->card_type < NV_50)
-		nv_wrvgac(obj, 0, 0x1f, data);
+	if (device->card_type < NV_50)
+		nvkm_wrvgac(device, 0, 0x1f, data);
 	else
-		nv_wrvgac(obj, 0, 0x3f, data);
-	if (dev->chipset == 0x11) {
-		if (!(nv_rd32(obj, 0x001084) & 0x10000000))
-			nv_wrvgac(obj, 1, 0x1f, data);
+		nvkm_wrvgac(device, 0, 0x3f, data);
+	if (device->chipset == 0x11) {
+		if (!(nvkm_rd32(device, 0x001084) & 0x10000000))
+			nvkm_wrvgac(device, 1, 0x1f, data);
 	}
 	return locked;
 }
@@ -171,16 +159,16 @@
  * other values are treated as literal values to set
  */
 u8
-nv_rdvgaowner(void *obj)
+nvkm_rdvgaowner(struct nvkm_device *device)
 {
-	if (nv_device(obj)->card_type < NV_50) {
-		if (nv_device(obj)->chipset == 0x11) {
-			u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+	if (device->card_type < NV_50) {
+		if (device->chipset == 0x11) {
+			u32 tied = nvkm_rd32(device, 0x001084) & 0x10000000;
 			if (tied == 0) {
-				u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
-				u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
-				u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
-				u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+				u8 slA = nvkm_rdvgac(device, 0, 0x28) & 0x80;
+				u8 tvA = nvkm_rdvgac(device, 0, 0x33) & 0x01;
+				u8 slB = nvkm_rdvgac(device, 1, 0x28) & 0x80;
+				u8 tvB = nvkm_rdvgac(device, 1, 0x33) & 0x01;
 				if (slA && !tvA) return 0x00;
 				if (slB && !tvB) return 0x03;
 				if (slA) return 0x00;
@@ -190,30 +178,28 @@
 			return 0x04;
 		}
 
-		return nv_rdvgac(obj, 0, 0x44);
+		return nvkm_rdvgac(device, 0, 0x44);
 	}
 
-	nv_error(obj, "rdvgaowner after nv4x\n");
 	return 0x00;
 }
 
 void
-nv_wrvgaowner(void *obj, u8 select)
+nvkm_wrvgaowner(struct nvkm_device *device, u8 select)
 {
-	if (nv_device(obj)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		u8 owner = (select == 1) ? 3 : select;
-		if (nv_device(obj)->chipset == 0x11) {
+		if (device->chipset == 0x11) {
 			/* workaround hw lockup bug */
-			nv_rdvgac(obj, 0, 0x1f);
-			nv_rdvgac(obj, 1, 0x1f);
+			nvkm_rdvgac(device, 0, 0x1f);
+			nvkm_rdvgac(device, 1, 0x1f);
 		}
 
-		nv_wrvgac(obj, 0, 0x44, owner);
+		nvkm_wrvgac(device, 0, 0x44, owner);
 
-		if (nv_device(obj)->chipset == 0x11) {
-			nv_wrvgac(obj, 0, 0x2e, owner);
-			nv_wrvgac(obj, 0, 0x2e, owner);
+		if (device->chipset == 0x11) {
+			nvkm_wrvgac(device, 0, 0x2e, owner);
+			nvkm_wrvgac(device, 0, 0x2e, owner);
 		}
-	} else
-		nv_error(obj, "wrvgaowner after nv4x\n");
+	}
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
new file mode 100644
index 0000000..c4a2ce9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/Kbuild
@@ -0,0 +1,11 @@
+nvkm-y += nvkm/engine/dma/base.o
+nvkm-y += nvkm/engine/dma/nv04.o
+nvkm-y += nvkm/engine/dma/nv50.o
+nvkm-y += nvkm/engine/dma/gf100.o
+nvkm-y += nvkm/engine/dma/gf119.o
+
+nvkm-y += nvkm/engine/dma/user.o
+nvkm-y += nvkm/engine/dma/usernv04.o
+nvkm-y += nvkm/engine/dma/usernv50.o
+nvkm-y += nvkm/engine/dma/usergf100.o
+nvkm-y += nvkm/engine/dma/usergf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
new file mode 100644
index 0000000..9769fc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <core/client.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+struct nvkm_dmaobj *
+nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
+{
+	struct rb_node *node = client->dmaroot.rb_node;
+	while (node) {
+		struct nvkm_dmaobj *dmaobj =
+			container_of(node, typeof(*dmaobj), rb);
+		if (object < dmaobj->handle)
+			node = node->rb_left;
+		else
+		if (object > dmaobj->handle)
+			node = node->rb_right;
+		else
+			return dmaobj;
+	}
+	return NULL;
+}
+
+static int
+nvkm_dma_oclass_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
+{
+	struct nvkm_dma *dma = nvkm_dma(oclass->engine);
+	struct nvkm_dmaobj *dmaobj = NULL;
+	struct nvkm_client *client = oclass->client;
+	struct rb_node **ptr = &client->dmaroot.rb_node;
+	struct rb_node *parent = NULL;
+	int ret;
+
+	ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
+	if (dmaobj)
+		*pobject = &dmaobj->object;
+	if (ret)
+		return ret;
+
+	dmaobj->handle = oclass->object;
+
+	while (*ptr) {
+		struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
+		parent = *ptr;
+		if (dmaobj->handle < obj->handle)
+			ptr = &parent->rb_left;
+		else
+		if (dmaobj->handle > obj->handle)
+			ptr = &parent->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(&dmaobj->rb, parent, ptr);
+	rb_insert_color(&dmaobj->rb, &client->dmaroot);
+	return 0;
+}
+
+static const struct nvkm_device_oclass
+nvkm_dma_oclass_base = {
+	.ctor = nvkm_dma_oclass_new,
+};
+
+static int
+nvkm_dma_oclass_fifo_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+			 struct nvkm_object **pobject)
+{
+	return nvkm_dma_oclass_new(oclass->engine->subdev.device,
+				   oclass, data, size, pobject);
+}
+
+static const struct nvkm_sclass
+nvkm_dma_sclass[] = {
+	{ 0, 0, NV_DMA_FROM_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+	{ 0, 0, NV_DMA_TO_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+	{ 0, 0, NV_DMA_IN_MEMORY, NULL, nvkm_dma_oclass_fifo_new },
+};
+
+static int
+nvkm_dma_oclass_base_get(struct nvkm_oclass *sclass, int index,
+			 const struct nvkm_device_oclass **class)
+{
+	const int count = ARRAY_SIZE(nvkm_dma_sclass);
+	if (index < count) {
+		const struct nvkm_sclass *oclass = &nvkm_dma_sclass[index];
+		sclass->base = oclass[0];
+		sclass->engn = oclass;
+		*class = &nvkm_dma_oclass_base;
+		return index;
+	}
+	return count;
+}
+
+static int
+nvkm_dma_oclass_fifo_get(struct nvkm_oclass *oclass, int index)
+{
+	const int count = ARRAY_SIZE(nvkm_dma_sclass);
+	if (index < count) {
+		oclass->base = nvkm_dma_sclass[index];
+		return index;
+	}
+	return count;
+}
+
+static void *
+nvkm_dma_dtor(struct nvkm_engine *engine)
+{
+	return nvkm_dma(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_dma = {
+	.dtor = nvkm_dma_dtor,
+	.base.sclass = nvkm_dma_oclass_base_get,
+	.fifo.sclass = nvkm_dma_oclass_fifo_get,
+};
+
+int
+nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_dma **pdma)
+{
+	struct nvkm_dma *dma;
+
+	if (!(dma = *pdma = kzalloc(sizeof(*dma), GFP_KERNEL)))
+		return -ENOMEM;
+	dma->func = func;
+
+	return nvkm_engine_ctor(&nvkm_dma, device, index,
+				0, true, &dma->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
new file mode 100644
index 0000000..efec5d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf100.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf100_dma = {
+	.class_new = gf100_dmaobj_new,
+};
+
+int
+gf100_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&gf100_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
new file mode 100644
index 0000000..34c7660
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/gf119.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+gf119_dma = {
+	.class_new = gf119_dmaobj_new,
+};
+
+int
+gf119_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&gf119_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
new file mode 100644
index 0000000..30747a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv04.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv04_dma = {
+	.class_new = nv04_dmaobj_new,
+};
+
+int
+nv04_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&nv04_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
new file mode 100644
index 0000000..77aca7b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/nv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "user.h"
+
+static const struct nvkm_dma_func
+nv50_dma = {
+	.class_new = nv50_dmaobj_new,
+};
+
+int
+nv50_dma_new(struct nvkm_device *device, int index, struct nvkm_dma **pdma)
+{
+	return nvkm_dma_new_(&nv50_dma, device, index, pdma);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
new file mode 100644
index 0000000..deb37ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_PRIV_H__
+#define __NVKM_DMA_PRIV_H__
+#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
+#include <engine/dma.h>
+
+struct nvkm_dmaobj_func {
+	int (*bind)(struct nvkm_dmaobj *, struct nvkm_gpuobj *, int align,
+		    struct nvkm_gpuobj **);
+};
+
+int nvkm_dma_new_(const struct nvkm_dma_func *, struct nvkm_device *,
+		  int index, struct nvkm_dma **);
+
+struct nvkm_dma_func {
+	int (*class_new)(struct nvkm_dma *, const struct nvkm_oclass *,
+			 void *data, u32 size, struct nvkm_dmaobj **);
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
new file mode 100644
index 0000000..45ab062
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+	return dmaobj->func->bind(dmaobj, gpuobj, align, pgpuobj);
+}
+
+static void *
+nvkm_dmaobj_dtor(struct nvkm_object *base)
+{
+	struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
+	if (!RB_EMPTY_NODE(&dmaobj->rb))
+		rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
+	return dmaobj;
+}
+
+static const struct nvkm_object_func
+nvkm_dmaobj_func = {
+	.dtor = nvkm_dmaobj_dtor,
+	.bind = nvkm_dmaobj_bind,
+};
+
+int
+nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
+		 const struct nvkm_oclass *oclass, void **pdata, u32 *psize,
+		 struct nvkm_dmaobj *dmaobj)
+{
+	union {
+		struct nv_dma_v0 v0;
+	} *args = *pdata;
+	struct nvkm_device *device = dma->engine.subdev.device;
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_object *parent = oclass->parent;
+	struct nvkm_instmem *instmem = device->imem;
+	struct nvkm_fb *fb = device->fb;
+	void *data = *pdata;
+	u32 size = *psize;
+	int ret;
+
+	nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
+	dmaobj->func = func;
+	dmaobj->dma = dma;
+	RB_CLEAR_NODE(&dmaobj->rb);
+
+	nvif_ioctl(parent, "create dma size %d\n", *psize);
+	if (nvif_unpack(args->v0, 0, 0, true)) {
+		nvif_ioctl(parent, "create dma vers %d target %d access %d "
+				   "start %016llx limit %016llx\n",
+			   args->v0.version, args->v0.target, args->v0.access,
+			   args->v0.start, args->v0.limit);
+		dmaobj->target = args->v0.target;
+		dmaobj->access = args->v0.access;
+		dmaobj->start  = args->v0.start;
+		dmaobj->limit  = args->v0.limit;
+	} else
+		return ret;
+
+	*pdata = data;
+	*psize = size;
+
+	if (dmaobj->start > dmaobj->limit)
+		return -EINVAL;
+
+	switch (dmaobj->target) {
+	case NV_DMA_V0_TARGET_VM:
+		dmaobj->target = NV_MEM_TARGET_VM;
+		break;
+	case NV_DMA_V0_TARGET_VRAM:
+		if (!client->super) {
+			if (dmaobj->limit >= fb->ram->size - instmem->reserved)
+				return -EACCES;
+			if (device->card_type >= NV_50)
+				return -EACCES;
+		}
+		dmaobj->target = NV_MEM_TARGET_VRAM;
+		break;
+	case NV_DMA_V0_TARGET_PCI:
+		if (!client->super)
+			return -EACCES;
+		dmaobj->target = NV_MEM_TARGET_PCI;
+		break;
+	case NV_DMA_V0_TARGET_PCI_US:
+	case NV_DMA_V0_TARGET_AGP:
+		if (!client->super)
+			return -EACCES;
+		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_DMA_V0_ACCESS_VM:
+		dmaobj->access = NV_MEM_ACCESS_VM;
+		break;
+	case NV_DMA_V0_ACCESS_RD:
+		dmaobj->access = NV_MEM_ACCESS_RO;
+		break;
+	case NV_DMA_V0_ACCESS_WR:
+		dmaobj->access = NV_MEM_ACCESS_WO;
+		break;
+	case NV_DMA_V0_ACCESS_RDWR:
+		dmaobj->access = NV_MEM_ACCESS_RW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
new file mode 100644
index 0000000..69a7f10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_DMA_USER_H__
+#define __NVKM_DMA_USER_H__
+#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
+#include "priv.h"
+
+int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *, struct nvkm_dma *,
+		     const struct nvkm_oclass *, void **data, u32 *size,
+		     struct nvkm_dmaobj *);
+
+int nv04_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		    struct nvkm_dmaobj **);
+int nv50_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		    struct nvkm_dmaobj **);
+int gf100_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
+int gf119_dmaobj_new(struct nvkm_dma *, const struct nvkm_oclass *, void *, u32,
+		     struct nvkm_dmaobj **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
new file mode 100644
index 0000000..13e341c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf100.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf100_dmaobj(p) container_of((p), struct gf100_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf100_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+	u32 flags5;
+};
+
+static int
+gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gf100_dmaobj *dmaobj = gf100_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+		nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+					  upper_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf100_dmaobj_func = {
+	.bind = gf100_dmaobj_bind,
+};
+
+int
+gf100_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf100_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf100_dmaobj *dmaobj;
+	u32 kind, user, unkn;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gf100_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create gf100 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent,
+			   "create gf100 dma vers %d priv %d kind %02x\n",
+			   args->v0.version, args->v0.priv, args->v0.kind);
+		kind = args->v0.kind;
+		user = args->v0.priv;
+		unkn = 0;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			kind = GF100_DMA_V0_KIND_PITCH;
+			user = GF100_DMA_V0_PRIV_US;
+			unkn = 2;
+		} else {
+			kind = GF100_DMA_V0_KIND_VM;
+			user = GF100_DMA_V0_PRIV_VM;
+			unkn = 0;
+		}
+	} else
+		return ret;
+
+	if (user > 2)
+		return -EINVAL;
+	dmaobj->flags0 |= (kind << 22) | (user << 20) | oclass->base.oclass;
+	dmaobj->flags5 |= (unkn << 16);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VM:
+		dmaobj->flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags0 |= 0x00080000;
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
new file mode 100644
index 0000000..0e1af8b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usergf119.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gf119_dmaobj(p) container_of((p), struct gf119_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct gf119_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+};
+
+static int
+gf119_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct gf119_dmaobj *dmaobj = gf119_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, dmaobj->base.start >> 8);
+		nvkm_wo32(*pgpuobj, 0x08, dmaobj->base.limit >> 8);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+gf119_dmaobj_func = {
+	.bind = gf119_dmaobj_bind,
+};
+
+int
+gf119_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct gf119_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf119_dmaobj *dmaobj;
+	u32 kind, page;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&gf119_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create gf119 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent,
+			   "create gf100 dma vers %d page %d kind %02x\n",
+			   args->v0.version, args->v0.page, args->v0.kind);
+		kind = args->v0.kind;
+		page = args->v0.page;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			kind = GF119_DMA_V0_KIND_PITCH;
+			page = GF119_DMA_V0_PAGE_SP;
+		} else {
+			kind = GF119_DMA_V0_KIND_VM;
+			page = GF119_DMA_V0_PAGE_LP;
+		}
+	} else
+		return ret;
+
+	if (page > 1)
+		return -EINVAL;
+	dmaobj->flags0 = (kind << 20) | (page << 6);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00000009;
+		break;
+	case NV_MEM_TARGET_VM:
+	case NV_MEM_TARGET_PCI:
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		/* XXX: don't currently know how to construct a real one
+		 *      of these.  we only use them to represent pushbufs
+		 *      on these chipsets, and the classes that use them
+		 *      deal with the target themselves.
+		 */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
new file mode 100644
index 0000000..c95942e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
+#include "user.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu/nv04.h>
+
+#include <nvif/class.h>
+
+struct nv04_dmaobj {
+	struct nvkm_dmaobj base;
+	bool clone;
+	u32 flags0;
+	u32 flags2;
+};
+
+static int
+nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	u64 offset = dmaobj->base.start & 0xfffff000;
+	u64 adjust = dmaobj->base.start & 0x00000fff;
+	u32 length = dmaobj->base.limit - dmaobj->base.start;
+	int ret;
+
+	if (dmaobj->clone) {
+		struct nv04_mmu *mmu = nv04_mmu(device->mmu);
+		struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+		if (!dmaobj->base.start)
+			return nvkm_gpuobj_wrap(pgt, pgpuobj);
+		nvkm_kmap(pgt);
+		offset  = nvkm_ro32(pgt, 8 + (offset >> 10));
+		offset &= 0xfffff000;
+		nvkm_done(pgt);
+	}
+
+	ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
+		nvkm_wo32(*pgpuobj, 0x04, length);
+		nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
+		nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv04_dmaobj_func = {
+	.bind = nv04_dmaobj_bind,
+};
+
+int
+nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	struct nvkm_device *device = dma->engine.subdev.device;
+	struct nv04_dmaobj *dmaobj;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	if (dmaobj->base.target == NV_MEM_TARGET_VM) {
+		if (device->mmu->func == &nv04_mmu)
+			dmaobj->clone = true;
+		dmaobj->base.target = NV_MEM_TARGET_PCI;
+		dmaobj->base.access = NV_MEM_ACCESS_RW;
+	}
+
+	dmaobj->flags0 = oclass->base.oclass;
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00003000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00023000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00033000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		dmaobj->flags0 |= 0x00008000;
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags2 |= 0x00000002;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
new file mode 100644
index 0000000..5b7ce31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv50.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define nv50_dmaobj(p) container_of((p), struct nv50_dmaobj, base)
+#include "user.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+struct nv50_dmaobj {
+	struct nvkm_dmaobj base;
+	u32 flags0;
+	u32 flags5;
+};
+
+static int
+nv50_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
+		 int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv50_dmaobj *dmaobj = nv50_dmaobj(base);
+	struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
+	int ret;
+
+	ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0);
+		nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit));
+		nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 |
+					  upper_32_bits(dmaobj->base.start));
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5);
+		nvkm_done(*pgpuobj);
+	}
+
+	return ret;
+}
+
+static const struct nvkm_dmaobj_func
+nv50_dmaobj_func = {
+	.bind = nv50_dmaobj_bind,
+};
+
+int
+nv50_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
+		void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
+{
+	union {
+		struct nv50_dma_v0 v0;
+	} *args;
+	struct nvkm_object *parent = oclass->parent;
+	struct nv50_dmaobj *dmaobj;
+	u32 user, part, comp, kind;
+	int ret;
+
+	if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pdmaobj = &dmaobj->base;
+
+	ret = nvkm_dmaobj_ctor(&nv50_dmaobj_func, dma, oclass,
+			       &data, &size, &dmaobj->base);
+	if (ret)
+		return ret;
+
+	args = data;
+
+	nvif_ioctl(parent, "create nv50 dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
+				   "comp %d kind %02x\n", args->v0.version,
+			   args->v0.priv, args->v0.part, args->v0.comp,
+			   args->v0.kind);
+		user = args->v0.priv;
+		part = args->v0.part;
+		comp = args->v0.comp;
+		kind = args->v0.kind;
+	} else
+	if (size == 0) {
+		if (dmaobj->base.target != NV_MEM_TARGET_VM) {
+			user = NV50_DMA_V0_PRIV_US;
+			part = NV50_DMA_V0_PART_256;
+			comp = NV50_DMA_V0_COMP_NONE;
+			kind = NV50_DMA_V0_KIND_PITCH;
+		} else {
+			user = NV50_DMA_V0_PRIV_VM;
+			part = NV50_DMA_V0_PART_VM;
+			comp = NV50_DMA_V0_COMP_VM;
+			kind = NV50_DMA_V0_KIND_VM;
+		}
+	} else
+		return ret;
+
+	if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
+		return -EINVAL;
+	dmaobj->flags0 = (comp << 29) | (kind << 22) | (user << 20) |
+			 oclass->base.oclass;
+	dmaobj->flags5 = (part << 16);
+
+	switch (dmaobj->base.target) {
+	case NV_MEM_TARGET_VM:
+		dmaobj->flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		dmaobj->flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		dmaobj->flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		dmaobj->flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->base.access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		dmaobj->flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		dmaobj->flags0 |= 0x00080000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
deleted file mode 100644
index 7529632..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-nvkm-y += nvkm/engine/dmaobj/base.o
-nvkm-y += nvkm/engine/dmaobj/nv04.o
-nvkm-y += nvkm/engine/dmaobj/nv50.o
-nvkm-y += nvkm/engine/dmaobj/gf100.o
-nvkm-y += nvkm/engine/dmaobj/gf110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
deleted file mode 100644
index a2b60d8..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/base.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/device.h>
-#include <subdev/fb.h>
-#include <subdev/instmem.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static int
-nvkm_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	const struct nvkm_dmaeng_impl *impl = (void *)
-		nv_oclass(nv_object(dmaobj)->engine);
-	int ret = 0;
-
-	if (nv_object(dmaobj) == parent) { /* ctor bind */
-		if (nv_mclass(parent->parent) == NV_DEVICE) {
-			/* delayed, or no, binding */
-			return 0;
-		}
-		ret = impl->bind(dmaobj, parent, pgpuobj);
-		if (ret == 0)
-			nvkm_object_ref(NULL, &parent);
-		return ret;
-	}
-
-	return impl->bind(dmaobj, parent, pgpuobj);
-}
-
-int
-nvkm_dmaobj_create_(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void **pdata, u32 *psize,
-		    int length, void **pobject)
-{
-	union {
-		struct nv_dma_v0 v0;
-	} *args = *pdata;
-	struct nvkm_instmem *instmem = nvkm_instmem(parent);
-	struct nvkm_client *client = nvkm_client(parent);
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_dmaobj *dmaobj;
-	void *data = *pdata;
-	u32 size = *psize;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, length, pobject);
-	dmaobj = *pobject;
-	if (ret)
-		return ret;
-
-	nv_ioctl(parent, "create dma size %d\n", *psize);
-	if (nvif_unpack(args->v0, 0, 0, true)) {
-		nv_ioctl(parent, "create dma vers %d target %d access %d "
-				 "start %016llx limit %016llx\n",
-			 args->v0.version, args->v0.target, args->v0.access,
-			 args->v0.start, args->v0.limit);
-		dmaobj->target = args->v0.target;
-		dmaobj->access = args->v0.access;
-		dmaobj->start  = args->v0.start;
-		dmaobj->limit  = args->v0.limit;
-	} else
-		return ret;
-
-	*pdata = data;
-	*psize = size;
-
-	if (dmaobj->start > dmaobj->limit)
-		return -EINVAL;
-
-	switch (dmaobj->target) {
-	case NV_DMA_V0_TARGET_VM:
-		dmaobj->target = NV_MEM_TARGET_VM;
-		break;
-	case NV_DMA_V0_TARGET_VRAM:
-		if (!client->super) {
-			if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
-				return -EACCES;
-			if (device->card_type >= NV_50)
-				return -EACCES;
-		}
-		dmaobj->target = NV_MEM_TARGET_VRAM;
-		break;
-	case NV_DMA_V0_TARGET_PCI:
-		if (!client->super)
-			return -EACCES;
-		dmaobj->target = NV_MEM_TARGET_PCI;
-		break;
-	case NV_DMA_V0_TARGET_PCI_US:
-	case NV_DMA_V0_TARGET_AGP:
-		if (!client->super)
-			return -EACCES;
-		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (dmaobj->access) {
-	case NV_DMA_V0_ACCESS_VM:
-		dmaobj->access = NV_MEM_ACCESS_VM;
-		break;
-	case NV_DMA_V0_ACCESS_RD:
-		dmaobj->access = NV_MEM_ACCESS_RO;
-		break;
-	case NV_DMA_V0_ACCESS_WR:
-		dmaobj->access = NV_MEM_ACCESS_WO;
-		break;
-	case NV_DMA_V0_ACCESS_RDWR:
-		dmaobj->access = NV_MEM_ACCESS_RW;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-int
-_nvkm_dmaeng_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	const struct nvkm_dmaeng_impl *impl = (void *)oclass;
-	struct nvkm_dmaeng *dmaeng;
-	int ret;
-
-	ret = nvkm_engine_create(parent, engine, oclass, true, "DMAOBJ",
-				 "dmaobj", &dmaeng);
-	*pobject = nv_object(dmaeng);
-	if (ret)
-		return ret;
-
-	nv_engine(dmaeng)->sclass = impl->sclass;
-	dmaeng->bind = nvkm_dmaobj_bind;
-	return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
deleted file mode 100644
index f880e51..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf100.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-	u32 flags5;
-};
-
-static int
-gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		  struct nvkm_gpuobj **pgpuobj)
-{
-	struct gf100_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case GT214_DISP_CORE_CHANNEL_DMA:
-		case GT214_DISP_BASE_CHANNEL_DMA:
-		case GT214_DISP_OVERLAY_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else
-		return 0;
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
-		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
-		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
-					upper_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, priv->flags5);
-	}
-
-	return ret;
-}
-
-static int
-gf100_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct gf100_dma_v0 v0;
-	} *args;
-	struct gf100_dmaobj_priv *priv;
-	u32 kind, user, unkn;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create gf100 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
-			 args->v0.version, args->v0.priv, args->v0.kind);
-		kind = args->v0.kind;
-		user = args->v0.priv;
-		unkn = 0;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			kind = GF100_DMA_V0_KIND_PITCH;
-			user = GF100_DMA_V0_PRIV_US;
-			unkn = 2;
-		} else {
-			kind = GF100_DMA_V0_KIND_VM;
-			user = GF100_DMA_V0_PRIV_VM;
-			unkn = 0;
-		}
-	} else
-		return ret;
-
-	if (user > 2)
-		return -EINVAL;
-	priv->flags0 |= (kind << 22) | (user << 20);
-	priv->flags5 |= (unkn << 16);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VM:
-		priv->flags0 |= 0x00000000;
-		break;
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00030000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_VM:
-		break;
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00040000;
-		break;
-	case NV_MEM_ACCESS_WO:
-	case NV_MEM_ACCESS_RW:
-		priv->flags0 |= 0x00080000;
-		break;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf100_dmaobj_ofuncs = {
-	.ctor =  gf100_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf100_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &gf100_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &gf100_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &gf100_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-gf100_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = gf100_dmaeng_sclass,
-	.bind = gf100_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
deleted file mode 100644
index bf8f0f2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/gf110.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf110_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-};
-
-static int
-gf110_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		  struct nvkm_gpuobj **pgpuobj)
-{
-	struct gf110_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case GF110_DISP_CORE_CHANNEL_DMA:
-		case GK104_DISP_CORE_CHANNEL_DMA:
-		case GK110_DISP_CORE_CHANNEL_DMA:
-		case GM107_DISP_CORE_CHANNEL_DMA:
-		case GM204_DISP_CORE_CHANNEL_DMA:
-		case GF110_DISP_BASE_CHANNEL_DMA:
-		case GK104_DISP_BASE_CHANNEL_DMA:
-		case GK110_DISP_BASE_CHANNEL_DMA:
-		case GF110_DISP_OVERLAY_CONTROL_DMA:
-		case GK104_DISP_OVERLAY_CONTROL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	} else
-		return 0;
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0);
-		nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
-		nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
-		nv_wo32(*pgpuobj, 0x0c, 0x00000000);
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, 0x00000000);
-	}
-
-	return ret;
-}
-
-static int
-gf110_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct gf110_dma_v0 v0;
-	} *args;
-	struct gf110_dmaobj_priv *priv;
-	u32 kind, page;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create gf110 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
-			 args->v0.version, args->v0.page, args->v0.kind);
-		kind = args->v0.kind;
-		page = args->v0.page;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			kind = GF110_DMA_V0_KIND_PITCH;
-			page = GF110_DMA_V0_PAGE_SP;
-		} else {
-			kind = GF110_DMA_V0_KIND_VM;
-			page = GF110_DMA_V0_PAGE_LP;
-		}
-	} else
-		return ret;
-
-	if (page > 1)
-		return -EINVAL;
-	priv->flags0 = (kind << 20) | (page << 6);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00000009;
-		break;
-	case NV_MEM_TARGET_VM:
-	case NV_MEM_TARGET_PCI:
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		/* XXX: don't currently know how to construct a real one
-		 *      of these.  we only use them to represent pushbufs
-		 *      on these chipsets, and the classes that use them
-		 *      deal with the target themselves.
-		 */
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-gf110_dmaobj_ofuncs = {
-	.ctor =  gf110_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-gf110_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &gf110_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &gf110_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &gf110_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-gf110_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = gf110_dmaeng_sclass,
-	.bind = gf110_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
deleted file mode 100644
index b4379c2..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv04.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
-
-#include <nvif/class.h>
-
-struct nv04_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	bool clone;
-	u32 flags0;
-	u32 flags2;
-};
-
-static int
-nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	struct nv04_dmaobj_priv *priv = (void *)dmaobj;
-	struct nvkm_gpuobj *gpuobj;
-	u64 offset = priv->base.start & 0xfffff000;
-	u64 adjust = priv->base.start & 0x00000fff;
-	u32 length = priv->base.limit - priv->base.start;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case NV03_CHANNEL_DMA:
-		case NV10_CHANNEL_DMA:
-		case NV17_CHANNEL_DMA:
-		case NV40_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	if (priv->clone) {
-		struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
-		struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
-		if (!dmaobj->start)
-			return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
-		offset  = nv_ro32(pgt, 8 + (offset >> 10));
-		offset &= 0xfffff000;
-	}
-
-	ret = nvkm_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
-	*pgpuobj = gpuobj;
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
-		nv_wo32(*pgpuobj, 0x04, length);
-		nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
-		nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
-	}
-
-	return ret;
-}
-
-static int
-nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	struct nv04_mmu_priv *mmu = nv04_mmu(engine);
-	struct nv04_dmaobj_priv *priv;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret || (ret = -ENOSYS, size))
-		return ret;
-
-	if (priv->base.target == NV_MEM_TARGET_VM) {
-		if (nv_object(mmu)->oclass == &nv04_mmu_oclass)
-			priv->clone = true;
-		priv->base.target = NV_MEM_TARGET_PCI;
-		priv->base.access = NV_MEM_ACCESS_RW;
-	}
-
-	priv->flags0 = nv_mclass(priv);
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00003000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00023000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00033000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00004000;
-		break;
-	case NV_MEM_ACCESS_WO:
-		priv->flags0 |= 0x00008000;
-	case NV_MEM_ACCESS_RW:
-		priv->flags2 |= 0x00000002;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv04_dmaobj_ofuncs = {
-	.ctor =  nv04_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv04_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = nv04_dmaeng_sclass,
-	.bind = nv04_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
deleted file mode 100644
index 4d3c828..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/nv50.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct nv50_dmaobj_priv {
-	struct nvkm_dmaobj base;
-	u32 flags0;
-	u32 flags5;
-};
-
-static int
-nv50_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
-		 struct nvkm_gpuobj **pgpuobj)
-{
-	struct nv50_dmaobj_priv *priv = (void *)dmaobj;
-	int ret;
-
-	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
-		switch (nv_mclass(parent->parent)) {
-		case NV40_CHANNEL_DMA:
-		case NV50_CHANNEL_GPFIFO:
-		case G82_CHANNEL_GPFIFO:
-		case NV50_DISP_CORE_CHANNEL_DMA:
-		case G82_DISP_CORE_CHANNEL_DMA:
-		case GT206_DISP_CORE_CHANNEL_DMA:
-		case GT200_DISP_CORE_CHANNEL_DMA:
-		case GT214_DISP_CORE_CHANNEL_DMA:
-		case NV50_DISP_BASE_CHANNEL_DMA:
-		case G82_DISP_BASE_CHANNEL_DMA:
-		case GT200_DISP_BASE_CHANNEL_DMA:
-		case GT214_DISP_BASE_CHANNEL_DMA:
-		case NV50_DISP_OVERLAY_CHANNEL_DMA:
-		case G82_DISP_OVERLAY_CHANNEL_DMA:
-		case GT200_DISP_OVERLAY_CHANNEL_DMA:
-		case GT214_DISP_OVERLAY_CHANNEL_DMA:
-			break;
-		default:
-			return -EINVAL;
-		}
-	}
-
-	ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
-	if (ret == 0) {
-		nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
-		nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
-		nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
-					upper_32_bits(priv->base.start));
-		nv_wo32(*pgpuobj, 0x10, 0x00000000);
-		nv_wo32(*pgpuobj, 0x14, priv->flags5);
-	}
-
-	return ret;
-}
-
-static int
-nv50_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct nvkm_dmaeng *dmaeng = (void *)engine;
-	union {
-		struct nv50_dma_v0 v0;
-	} *args;
-	struct nv50_dmaobj_priv *priv;
-	u32 user, part, comp, kind;
-	int ret;
-
-	ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-	args = data;
-
-	nv_ioctl(parent, "create nv50 dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
-				 "comp %d kind %02x\n", args->v0.version,
-			 args->v0.priv, args->v0.part, args->v0.comp,
-			 args->v0.kind);
-		user = args->v0.priv;
-		part = args->v0.part;
-		comp = args->v0.comp;
-		kind = args->v0.kind;
-	} else
-	if (size == 0) {
-		if (priv->base.target != NV_MEM_TARGET_VM) {
-			user = NV50_DMA_V0_PRIV_US;
-			part = NV50_DMA_V0_PART_256;
-			comp = NV50_DMA_V0_COMP_NONE;
-			kind = NV50_DMA_V0_KIND_PITCH;
-		} else {
-			user = NV50_DMA_V0_PRIV_VM;
-			part = NV50_DMA_V0_PART_VM;
-			comp = NV50_DMA_V0_COMP_VM;
-			kind = NV50_DMA_V0_KIND_VM;
-		}
-	} else
-		return ret;
-
-	if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
-		return -EINVAL;
-	priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
-	priv->flags5 = (part << 16);
-
-	switch (priv->base.target) {
-	case NV_MEM_TARGET_VM:
-		priv->flags0 |= 0x00000000;
-		break;
-	case NV_MEM_TARGET_VRAM:
-		priv->flags0 |= 0x00010000;
-		break;
-	case NV_MEM_TARGET_PCI:
-		priv->flags0 |= 0x00020000;
-		break;
-	case NV_MEM_TARGET_PCI_NOSNOOP:
-		priv->flags0 |= 0x00030000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (priv->base.access) {
-	case NV_MEM_ACCESS_VM:
-		break;
-	case NV_MEM_ACCESS_RO:
-		priv->flags0 |= 0x00040000;
-		break;
-	case NV_MEM_ACCESS_WO:
-	case NV_MEM_ACCESS_RW:
-		priv->flags0 |= 0x00080000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
-}
-
-static struct nvkm_ofuncs
-nv50_dmaobj_ofuncs = {
-	.ctor =  nv50_dmaobj_ctor,
-	.dtor = _nvkm_dmaobj_dtor,
-	.init = _nvkm_dmaobj_init,
-	.fini = _nvkm_dmaobj_fini,
-};
-
-static struct nvkm_oclass
-nv50_dmaeng_sclass[] = {
-	{ NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
-	{ NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
-	{ NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
-	{}
-};
-
-struct nvkm_oclass *
-nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
-	.base.handle = NV_ENGINE(DMAOBJ, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_dmaeng_ctor,
-		.dtor = _nvkm_dmaeng_dtor,
-		.init = _nvkm_dmaeng_init,
-		.fini = _nvkm_dmaeng_fini,
-	},
-	.sclass = nv50_dmaeng_sclass,
-	.bind = nv50_dmaobj_bind,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
deleted file mode 100644
index 44ae8a0..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dmaobj/priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __NVKM_DMAOBJ_PRIV_H__
-#define __NVKM_DMAOBJ_PRIV_H__
-#include <engine/dmaobj.h>
-
-#define nvkm_dmaobj_create(p,e,c,pa,sa,d)                                      \
-	nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
-
-int nvkm_dmaobj_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void **, u32 *,
-			int, void **);
-#define _nvkm_dmaobj_dtor nvkm_object_destroy
-#define _nvkm_dmaobj_init nvkm_object_init
-#define _nvkm_dmaobj_fini nvkm_object_fini
-
-int _nvkm_dmaeng_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-#define _nvkm_dmaeng_dtor _nvkm_engine_dtor
-#define _nvkm_dmaeng_init _nvkm_engine_init
-#define _nvkm_dmaeng_fini _nvkm_engine_fini
-
-struct nvkm_dmaeng_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *sclass;
-	int (*bind)(struct nvkm_dmaobj *, struct nvkm_object *,
-		    struct nvkm_gpuobj **);
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 30958c1..7400060 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -21,40 +21,95 @@
  */
 #include <engine/falcon.h>
 
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
 
-void
-nvkm_falcon_intr(struct nvkm_subdev *subdev)
+static int
+nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index)
 {
-	struct nvkm_falcon *falcon = (void *)subdev;
-	u32 dispatch = nv_ro32(falcon, 0x01c);
-	u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+	struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine);
+	int c = 0;
+
+	while (falcon->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = falcon->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+			int align, struct nvkm_gpuobj **pgpuobj)
+{
+	return nvkm_gpuobj_new(object->engine->subdev.device, 256,
+			       align, true, parent, pgpuobj);
+}
+
+static const struct nvkm_object_func
+nvkm_falcon_cclass = {
+	.bind = nvkm_falcon_cclass_bind,
+};
+
+static void
+nvkm_falcon_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = falcon->addr;
+	u32 dest = nvkm_rd32(device, base + 0x01c);
+	u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16);
+	u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+
+	if (intr & 0x00000040) {
+		if (falcon->func->intr) {
+			falcon->func->intr(falcon, chan);
+			nvkm_wr32(device, base + 0x004, 0x00000040);
+			intr &= ~0x00000040;
+		}
+	}
 
 	if (intr & 0x00000010) {
-		nv_debug(falcon, "ucode halted\n");
-		nv_wo32(falcon, 0x004, 0x00000010);
+		nvkm_debug(subdev, "ucode halted\n");
+		nvkm_wr32(device, base + 0x004, 0x00000010);
 		intr &= ~0x00000010;
 	}
 
 	if (intr)  {
-		nv_error(falcon, "unhandled intr 0x%08x\n", intr);
-		nv_wo32(falcon, 0x004, intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, base + 0x004, intr);
 	}
+
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-u32
-_nvkm_falcon_rd32(struct nvkm_object *object, u64 addr)
+static int
+nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_falcon *falcon = (void *)object;
-	return nv_rd32(falcon, falcon->addr + addr);
-}
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_device *device = falcon->engine.subdev.device;
+	const u32 base = falcon->addr;
 
-void
-_nvkm_falcon_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_falcon *falcon = (void *)object;
-	nv_wr32(falcon, falcon->addr + addr, data);
+	if (!suspend) {
+		nvkm_memory_del(&falcon->core);
+		if (falcon->external) {
+			vfree(falcon->data.data);
+			vfree(falcon->code.data);
+			falcon->code.data = NULL;
+		}
+	}
+
+	nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+	nvkm_wr32(device, base + 0x014, 0xffffffff);
+	return 0;
 }
 
 static void *
@@ -67,51 +122,66 @@
 	return p;
 }
 
-int
-_nvkm_falcon_init(struct nvkm_object *object)
+static int
+nvkm_falcon_oneinit(struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_falcon *falcon = (void *)object;
-	const struct firmware *fw;
-	char name[32] = "internal";
-	int ret, i;
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = falcon->addr;
 	u32 caps;
 
-	/* enable engine, and determine its capabilities */
-	ret = nvkm_engine_init(&falcon->base);
-	if (ret)
-		return ret;
-
+	/* determine falcon capabilities */
 	if (device->chipset <  0xa3 ||
 	    device->chipset == 0xaa || device->chipset == 0xac) {
 		falcon->version = 0;
 		falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
 	} else {
-		caps = nv_ro32(falcon, 0x12c);
+		caps = nvkm_rd32(device, base + 0x12c);
 		falcon->version = (caps & 0x0000000f);
 		falcon->secret  = (caps & 0x00000030) >> 4;
 	}
 
-	caps = nv_ro32(falcon, 0x108);
+	caps = nvkm_rd32(device, base + 0x108);
 	falcon->code.limit = (caps & 0x000001ff) << 8;
 	falcon->data.limit = (caps & 0x0003fe00) >> 1;
 
-	nv_debug(falcon, "falcon version: %d\n", falcon->version);
-	nv_debug(falcon, "secret level: %d\n", falcon->secret);
-	nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
-	nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+	nvkm_debug(subdev, "falcon version: %d\n", falcon->version);
+	nvkm_debug(subdev, "secret level: %d\n", falcon->secret);
+	nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit);
+	nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit);
+	return 0;
+}
+
+static int
+nvkm_falcon_init(struct nvkm_engine *engine)
+{
+	struct nvkm_falcon *falcon = nvkm_falcon(engine);
+	struct nvkm_subdev *subdev = &falcon->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const struct firmware *fw;
+	char name[32] = "internal";
+	const u32 base = falcon->addr;
+	int ret, i;
 
 	/* wait for 'uc halted' to be signalled before continuing */
 	if (falcon->secret && falcon->version < 4) {
-		if (!falcon->version)
-			nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
-		else
-			nv_wait(falcon, 0x180, 0x80000000, 0);
-		nv_wo32(falcon, 0x004, 0x00000010);
+		if (!falcon->version) {
+			nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, base + 0x008) & 0x00000010)
+					break;
+			);
+		} else {
+			nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, base + 0x180) & 0x80000000))
+					break;
+			);
+		}
+		nvkm_wr32(device, base + 0x004, 0x00000010);
 	}
 
 	/* disable all interrupts */
-	nv_wo32(falcon, 0x014, 0xffffffff);
+	nvkm_wr32(device, base + 0x014, 0xffffffff);
 
 	/* no default ucode provided by the engine implementation, try and
 	 * locate a "self-bootstrapping" firmware image for the engine
@@ -120,7 +190,7 @@
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret == 0) {
 			falcon->code.data = vmemdup(fw->data, fw->size);
 			falcon->code.size = fw->size;
@@ -139,10 +209,10 @@
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_error(falcon, "unable to load firmware data\n");
-			return ret;
+			nvkm_error(subdev, "unable to load firmware data\n");
+			return -ENODEV;
 		}
 
 		falcon->data.data = vmemdup(fw->data, fw->size);
@@ -154,10 +224,10 @@
 		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
 			 device->chipset, falcon->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_error(falcon, "unable to load firmware code\n");
-			return ret;
+			nvkm_error(subdev, "unable to load firmware code\n");
+			return -ENODEV;
 		}
 
 		falcon->code.data = vmemdup(fw->data, fw->size);
@@ -167,111 +237,117 @@
 			return -ENOMEM;
 	}
 
-	nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
-		 "static code/data segments" : "self-bootstrapping");
+	nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ?
+		   "static code/data segments" : "self-bootstrapping");
 
 	/* ensure any "self-bootstrapping" firmware image is in vram */
 	if (!falcon->data.data && !falcon->core) {
-		ret = nvkm_gpuobj_new(object->parent, NULL, falcon->code.size,
-				      256, 0, &falcon->core);
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      falcon->code.size, 256, false,
+				      &falcon->core);
 		if (ret) {
-			nv_error(falcon, "core allocation failed, %d\n", ret);
+			nvkm_error(subdev, "core allocation failed, %d\n", ret);
 			return ret;
 		}
 
+		nvkm_kmap(falcon->core);
 		for (i = 0; i < falcon->code.size; i += 4)
-			nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+			nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]);
+		nvkm_done(falcon->core);
 	}
 
 	/* upload firmware bootloader (or the full code segments) */
 	if (falcon->core) {
+		u64 addr = nvkm_memory_addr(falcon->core);
 		if (device->card_type < NV_C0)
-			nv_wo32(falcon, 0x618, 0x04000000);
+			nvkm_wr32(device, base + 0x618, 0x04000000);
 		else
-			nv_wo32(falcon, 0x618, 0x00000114);
-		nv_wo32(falcon, 0x11c, 0);
-		nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
-		nv_wo32(falcon, 0x114, 0);
-		nv_wo32(falcon, 0x118, 0x00006610);
+			nvkm_wr32(device, base + 0x618, 0x00000114);
+		nvkm_wr32(device, base + 0x11c, 0);
+		nvkm_wr32(device, base + 0x110, addr >> 8);
+		nvkm_wr32(device, base + 0x114, 0);
+		nvkm_wr32(device, base + 0x118, 0x00006610);
 	} else {
 		if (falcon->code.size > falcon->code.limit ||
 		    falcon->data.size > falcon->data.limit) {
-			nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+			nvkm_error(subdev, "ucode exceeds falcon limit(s)\n");
 			return -EINVAL;
 		}
 
 		if (falcon->version < 3) {
-			nv_wo32(falcon, 0xff8, 0x00100000);
+			nvkm_wr32(device, base + 0xff8, 0x00100000);
 			for (i = 0; i < falcon->code.size / 4; i++)
-				nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+				nvkm_wr32(device, base + 0xff4, falcon->code.data[i]);
 		} else {
-			nv_wo32(falcon, 0x180, 0x01000000);
+			nvkm_wr32(device, base + 0x180, 0x01000000);
 			for (i = 0; i < falcon->code.size / 4; i++) {
 				if ((i & 0x3f) == 0)
-					nv_wo32(falcon, 0x188, i >> 6);
-				nv_wo32(falcon, 0x184, falcon->code.data[i]);
+					nvkm_wr32(device, base + 0x188, i >> 6);
+				nvkm_wr32(device, base + 0x184, falcon->code.data[i]);
 			}
 		}
 	}
 
 	/* upload data segment (if necessary), zeroing the remainder */
 	if (falcon->version < 3) {
-		nv_wo32(falcon, 0xff8, 0x00000000);
+		nvkm_wr32(device, base + 0xff8, 0x00000000);
 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
-			nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+			nvkm_wr32(device, base + 0xff4, falcon->data.data[i]);
 		for (; i < falcon->data.limit; i += 4)
-			nv_wo32(falcon, 0xff4, 0x00000000);
+			nvkm_wr32(device, base + 0xff4, 0x00000000);
 	} else {
-		nv_wo32(falcon, 0x1c0, 0x01000000);
+		nvkm_wr32(device, base + 0x1c0, 0x01000000);
 		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
-			nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+			nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]);
 		for (; i < falcon->data.limit / 4; i++)
-			nv_wo32(falcon, 0x1c4, 0x00000000);
+			nvkm_wr32(device, base + 0x1c4, 0x00000000);
 	}
 
 	/* start it running */
-	nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
-	nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
-	nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
-	nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+	nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+	nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */
+	nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */
+	nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */
+
+	if (falcon->func->init)
+		falcon->func->init(falcon);
 	return 0;
 }
 
-int
-_nvkm_falcon_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_falcon_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_falcon *falcon = (void *)object;
-
-	if (!suspend) {
-		nvkm_gpuobj_ref(NULL, &falcon->core);
-		if (falcon->external) {
-			vfree(falcon->data.data);
-			vfree(falcon->code.data);
-			falcon->code.data = NULL;
-		}
-	}
-
-	nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
-	nv_wo32(falcon, 0x014, 0xffffffff);
-
-	return nvkm_engine_fini(&falcon->base, suspend);
+	return nvkm_falcon(engine);
 }
 
+static const struct nvkm_engine_func
+nvkm_falcon = {
+	.dtor = nvkm_falcon_dtor,
+	.oneinit = nvkm_falcon_oneinit,
+	.init = nvkm_falcon_init,
+	.fini = nvkm_falcon_fini,
+	.intr = nvkm_falcon_intr,
+	.fifo.sclass = nvkm_falcon_oclass_get,
+	.cclass = &nvkm_falcon_cclass,
+};
+
 int
-nvkm_falcon_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 addr, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+nvkm_falcon_new_(const struct nvkm_falcon_func *func,
+		 struct nvkm_device *device, int index, bool enable,
+		 u32 addr, struct nvkm_engine **pengine)
 {
 	struct nvkm_falcon *falcon;
-	int ret;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
-				  fname, length, pobject);
-	falcon = *pobject;
-	if (ret)
-		return ret;
-
+	if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
+		return -ENOMEM;
+	falcon->func = func;
 	falcon->addr = addr;
-	return 0;
+	falcon->code.data = func->code.data;
+	falcon->code.size = func->code.size;
+	falcon->data.data = func->data.data;
+	falcon->data.size = func->data.size;
+	*pengine = &falcon->engine;
+
+	return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
+				enable, &falcon->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 42891cb..74993c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -7,6 +7,24 @@
 nvkm-y += nvkm/engine/fifo/g84.o
 nvkm-y += nvkm/engine/fifo/gf100.o
 nvkm-y += nvkm/engine/fifo/gk104.o
-nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gm204.o
+nvkm-y += nvkm/engine/fifo/gm20b.o
+
+nvkm-y += nvkm/engine/fifo/chan.o
+nvkm-y += nvkm/engine/fifo/channv50.o
+nvkm-y += nvkm/engine/fifo/chang84.o
+
+nvkm-y += nvkm/engine/fifo/dmanv04.o
+nvkm-y += nvkm/engine/fifo/dmanv10.o
+nvkm-y += nvkm/engine/fifo/dmanv17.o
+nvkm-y += nvkm/engine/fifo/dmanv40.o
+nvkm-y += nvkm/engine/fifo/dmanv50.o
+nvkm-y += nvkm/engine/fifo/dmag84.o
+
+nvkm-y += nvkm/engine/fifo/gpfifonv50.o
+nvkm-y += nvkm/engine/fifo/gpfifog84.o
+nvkm-y += nvkm/engine/fifo/gpfifogf100.o
+nvkm-y += nvkm/engine/fifo/gpfifogk104.o
+nvkm-y += nvkm/engine/fifo/gpfifogm204.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fa223f8..1fbbfbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -21,18 +21,75 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/fifo.h>
+#include "priv.h"
+#include "chan.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <core/notify.h>
-#include <engine/dmaobj.h>
 
-#include <nvif/class.h>
 #include <nvif/event.h>
 #include <nvif/unpack.h>
 
+void
+nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+	return fifo->func->pause(fifo, flags);
+}
+
+void
+nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+	return fifo->func->start(fifo, flags);
+}
+
+void
+nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
+		   struct nvkm_fifo_chan **pchan)
+{
+	struct nvkm_fifo_chan *chan = *pchan;
+	if (likely(chan)) {
+		*pchan = NULL;
+		spin_unlock_irqrestore(&fifo->lock, flags);
+	}
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
+{
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->lock, flags);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		if (chan->inst->addr == inst) {
+			list_del(&chan->head);
+			list_add(&chan->head, &fifo->chan);
+			*rflags = flags;
+			return chan;
+		}
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+	return NULL;
+}
+
+struct nvkm_fifo_chan *
+nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
+{
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->lock, flags);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		if (chan->chid == chid) {
+			list_del(&chan->head);
+			list_add(&chan->head, &fifo->chan);
+			*rflags = flags;
+			return chan;
+		}
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+	return NULL;
+}
+
 static int
 nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
 		     struct nvkm_notify *notify)
@@ -51,126 +108,21 @@
 	.ctor = nvkm_fifo_event_ctor,
 };
 
-int
-nvkm_fifo_channel_create_(struct nvkm_object *parent,
-			  struct nvkm_object *engine,
-			  struct nvkm_oclass *oclass,
-			  int bar, u32 addr, u32 size, u32 pushbuf,
-			  u64 engmask, int len, void **ptr)
+static void
+nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
 {
-	struct nvkm_device *device = nv_device(engine);
-	struct nvkm_fifo *priv = (void *)engine;
-	struct nvkm_fifo_chan *chan;
-	struct nvkm_dmaeng *dmaeng;
-	unsigned long flags;
-	int ret;
-
-	/* create base object class */
-	ret = nvkm_namedb_create_(parent, engine, oclass, 0, NULL,
-				  engmask, len, ptr);
-	chan = *ptr;
-	if (ret)
-		return ret;
-
-	/* validate dma object representing push buffer */
-	chan->pushdma = (void *)nvkm_handle_ref(parent, pushbuf);
-	if (!chan->pushdma)
-		return -ENOENT;
-
-	dmaeng = (void *)chan->pushdma->base.engine;
-	switch (chan->pushdma->base.oclass->handle) {
-	case NV_DMA_FROM_MEMORY:
-	case NV_DMA_IN_MEMORY:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
-	if (ret)
-		return ret;
-
-	/* find a free fifo channel */
-	spin_lock_irqsave(&priv->lock, flags);
-	for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
-		if (!priv->channel[chan->chid]) {
-			priv->channel[chan->chid] = nv_object(chan);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (chan->chid == priv->max) {
-		nv_error(priv, "no free channels\n");
-		return -ENOSPC;
-	}
-
-	chan->addr = nv_device_resource_start(device, bar) +
-		     addr + size * chan->chid;
-	chan->size = size;
-	nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
-	return 0;
+	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+	fifo->func->uevent_fini(fifo);
 }
 
-void
-nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan)
+static void
+nvkm_fifo_uevent_init(struct nvkm_event *event, int type, int index)
 {
-	struct nvkm_fifo *priv = (void *)nv_object(chan)->engine;
-	unsigned long flags;
-
-	if (chan->user)
-		iounmap(chan->user);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->channel[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_gpuobj_ref(NULL, &chan->pushgpu);
-	nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma);
-	nvkm_namedb_destroy(&chan->namedb);
+	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+	fifo->func->uevent_init(fifo);
 }
 
-void
-_nvkm_fifo_channel_dtor(struct nvkm_object *object)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	nvkm_fifo_channel_destroy(chan);
-}
-
-int
-_nvkm_fifo_channel_map(struct nvkm_object *object, u64 *addr, u32 *size)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	*addr = chan->addr;
-	*size = chan->size;
-	return 0;
-}
-
-u32
-_nvkm_fifo_channel_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	if (unlikely(!chan->user)) {
-		chan->user = ioremap(chan->addr, chan->size);
-		if (WARN_ON_ONCE(chan->user == NULL))
-			return 0;
-	}
-	return ioread32_native(chan->user + addr);
-}
-
-void
-_nvkm_fifo_channel_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_fifo_chan *chan = (void *)object;
-	if (unlikely(!chan->user)) {
-		chan->user = ioremap(chan->addr, chan->size);
-		if (WARN_ON_ONCE(chan->user == NULL))
-			return;
-	}
-	iowrite32_native(data, chan->user + addr);
-}
-
-int
+static int
 nvkm_fifo_uevent_ctor(struct nvkm_object *object, void *data, u32 size,
 		      struct nvkm_notify *notify)
 {
@@ -188,6 +140,13 @@
 	return ret;
 }
 
+static const struct nvkm_event_func
+nvkm_fifo_uevent_func = {
+	.ctor = nvkm_fifo_uevent_ctor,
+	.init = nvkm_fifo_uevent_init,
+	.fini = nvkm_fifo_uevent_fini,
+};
+
 void
 nvkm_fifo_uevent(struct nvkm_fifo *fifo)
 {
@@ -196,87 +155,123 @@
 	nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
 }
 
-int
-_nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type,
-			struct nvkm_event **event)
+static int
+nvkm_fifo_class_new(struct nvkm_device *device,
+		    const struct nvkm_oclass *oclass, void *data, u32 size,
+		    struct nvkm_object **pobject)
 {
-	struct nvkm_fifo *fifo = (void *)object->engine;
-	switch (type) {
-	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
-		if (nv_mclass(object) >= G82_CHANNEL_DMA) {
-			*event = &fifo->uevent;
+	const struct nvkm_fifo_chan_oclass *sclass = oclass->engn;
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	return sclass->ctor(fifo, oclass, data, size, pobject);
+}
+
+static const struct nvkm_device_oclass
+nvkm_fifo_class = {
+	.ctor = nvkm_fifo_class_new,
+};
+
+static int
+nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
+		    const struct nvkm_device_oclass **class)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+	const struct nvkm_fifo_chan_oclass *sclass;
+	int c = 0;
+
+	while ((sclass = fifo->func->chan[c])) {
+		if (c++ == index) {
+			oclass->base = sclass->base;
+			oclass->engn = sclass;
+			*class = &nvkm_fifo_class;
 			return 0;
 		}
-		break;
-	default:
-		break;
 	}
-	return -EINVAL;
+
+	return c;
+}
+
+static void
+nvkm_fifo_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	fifo->func->intr(fifo);
 }
 
 static int
-nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object)
+nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
 {
-	int engidx = nv_hclass(priv) & 0xff;
-
-	while (object && object->parent) {
-		if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
-		    (nv_hclass(object->parent) & 0xff) == engidx)
-			return nvkm_fifo_chan(object)->chid;
-		object = object->parent;
-	}
-
-	return -1;
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	if (fifo->func->fini)
+		fifo->func->fini(fifo);
+	return 0;
 }
 
-const char *
-nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid)
+static int
+nvkm_fifo_oneinit(struct nvkm_engine *engine)
 {
-	struct nvkm_fifo_chan *chan = NULL;
-	unsigned long flags;
-
-	spin_lock_irqsave(&fifo->lock, flags);
-	if (chid >= fifo->min && chid <= fifo->max)
-		chan = (void *)fifo->channel[chid];
-	spin_unlock_irqrestore(&fifo->lock, flags);
-
-	return nvkm_client_name(chan);
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	if (fifo->func->oneinit)
+		return fifo->func->oneinit(fifo);
+	return 0;
 }
 
-void
-nvkm_fifo_destroy(struct nvkm_fifo *priv)
+static int
+nvkm_fifo_init(struct nvkm_engine *engine)
 {
-	kfree(priv->channel);
-	nvkm_event_fini(&priv->uevent);
-	nvkm_event_fini(&priv->cevent);
-	nvkm_engine_destroy(&priv->base);
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	fifo->func->init(fifo);
+	return 0;
 }
 
+static void *
+nvkm_fifo_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_fifo *fifo = nvkm_fifo(engine);
+	void *data = fifo;
+	if (fifo->func->dtor)
+		data = fifo->func->dtor(fifo);
+	nvkm_event_fini(&fifo->cevent);
+	nvkm_event_fini(&fifo->uevent);
+	return data;
+}
+
+static const struct nvkm_engine_func
+nvkm_fifo = {
+	.dtor = nvkm_fifo_dtor,
+	.oneinit = nvkm_fifo_oneinit,
+	.init = nvkm_fifo_init,
+	.fini = nvkm_fifo_fini,
+	.intr = nvkm_fifo_intr,
+	.base.sclass = nvkm_fifo_class_get,
+};
+
 int
-nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass,
-		  int min, int max, int length, void **pobject)
+nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, int nr, struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *priv;
 	int ret;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO",
-				  "fifo", length, pobject);
-	priv = *pobject;
+	fifo->func = func;
+	INIT_LIST_HEAD(&fifo->chan);
+	spin_lock_init(&fifo->lock);
+
+	if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
+		fifo->nr = NVKM_FIFO_CHID_NR;
+	else
+		fifo->nr = nr;
+	bitmap_clear(fifo->mask, 0, fifo->nr);
+
+	ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
+			       true, &fifo->engine);
 	if (ret)
 		return ret;
 
-	priv->min = min;
-	priv->max = max;
-	priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
-	if (!priv->channel)
-		return -ENOMEM;
+	if (func->uevent_init) {
+		ret = nvkm_event_init(&nvkm_fifo_uevent_func, 1, 1,
+				      &fifo->uevent);
+		if (ret)
+			return ret;
+	}
 
-	ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent);
-	if (ret)
-		return ret;
-
-	priv->chid = nvkm_fifo_chid;
-	spin_lock_init(&priv->lock);
-	return 0;
+	return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 0000000..dc6d467
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "chan.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/oproxy.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+struct nvkm_fifo_chan_object {
+	struct nvkm_oproxy oproxy;
+	struct nvkm_fifo_chan *chan;
+	int hash;
+};
+
+static int
+nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.object->engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	const char *name = nvkm_subdev_name[engine->subdev.index];
+	int ret = 0;
+
+	if (--engn->usecount)
+		return 0;
+
+	if (chan->func->engine_fini) {
+		ret = chan->func->engine_fini(chan, engine, suspend);
+		if (ret) {
+			nvif_error(&chan->object,
+				   "detach %s failed, %d\n", name, ret);
+			return ret;
+		}
+	}
+
+	if (engn->object) {
+		ret = nvkm_object_fini(engn->object, suspend);
+		if (ret && suspend)
+			return ret;
+	}
+
+	nvif_trace(&chan->object, "detached %s\n", name);
+	return ret;
+}
+
+static int
+nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.object->engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	const char *name = nvkm_subdev_name[engine->subdev.index];
+	int ret;
+
+	if (engn->usecount++)
+		return 0;
+
+	if (engn->object) {
+		ret = nvkm_object_init(engn->object);
+		if (ret)
+			return ret;
+	}
+
+	if (chan->func->engine_init) {
+		ret = chan->func->engine_init(chan, engine);
+		if (ret) {
+			nvif_error(&chan->object,
+				   "attach %s failed, %d\n", name, ret);
+			return ret;
+		}
+	}
+
+	nvif_trace(&chan->object, "attached %s\n", name);
+	return 0;
+}
+
+static void
+nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
+{
+	struct nvkm_fifo_chan_object *object =
+		container_of(base, typeof(*object), oproxy);
+	struct nvkm_engine *engine  = object->oproxy.base.engine;
+	struct nvkm_fifo_chan *chan = object->chan;
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+
+	if (chan->func->object_dtor)
+		chan->func->object_dtor(chan, object->hash);
+
+	if (!--engn->refcount) {
+		if (chan->func->engine_dtor)
+			chan->func->engine_dtor(chan, engine);
+		nvkm_object_del(&engn->object);
+		if (chan->vm)
+			atomic_dec(&chan->vm->engref[engine->subdev.index]);
+	}
+}
+
+static const struct nvkm_oproxy_func
+nvkm_fifo_chan_child_func = {
+	.dtor[0] = nvkm_fifo_chan_child_del,
+	.init[0] = nvkm_fifo_chan_child_init,
+	.fini[0] = nvkm_fifo_chan_child_fini,
+};
+
+static int
+nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+			 struct nvkm_object **pobject)
+{
+	struct nvkm_engine *engine = oclass->engine;
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
+	struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
+	struct nvkm_fifo_chan_object *object;
+	int ret = 0;
+
+	if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
+	object->chan = chan;
+	*pobject = &object->oproxy.base;
+
+	if (!engn->refcount++) {
+		struct nvkm_oclass cclass = {
+			.client = oclass->client,
+			.engine = oclass->engine,
+		};
+
+		if (chan->vm)
+			atomic_inc(&chan->vm->engref[engine->subdev.index]);
+
+		if (engine->func->fifo.cclass) {
+			ret = engine->func->fifo.cclass(chan, &cclass,
+							&engn->object);
+		} else
+		if (engine->func->cclass) {
+			ret = nvkm_object_new_(engine->func->cclass, &cclass,
+					       NULL, 0, &engn->object);
+		}
+		if (ret)
+			return ret;
+
+		if (chan->func->engine_ctor) {
+			ret = chan->func->engine_ctor(chan, oclass->engine,
+						      engn->object);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = oclass->base.ctor(&(const struct nvkm_oclass) {
+					.base = oclass->base,
+					.engn = oclass->engn,
+					.handle = oclass->handle,
+					.object = oclass->object,
+					.client = oclass->client,
+					.parent = engn->object ?
+						  engn->object :
+						  oclass->parent,
+					.engine = engine,
+				}, data, size, &object->oproxy.object);
+	if (ret)
+		return ret;
+
+	if (chan->func->object_ctor) {
+		object->hash =
+			chan->func->object_ctor(chan, object->oproxy.object);
+		if (object->hash < 0)
+			return object->hash;
+	}
+
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
+			 struct nvkm_oclass *oclass)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	struct nvkm_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_engine *engine;
+	u64 mask = chan->engines;
+	int ret, i, c;
+
+	for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
+		if (!(engine = nvkm_device_engine(device, i)))
+			continue;
+		oclass->engine = engine;
+		oclass->base.oclass = 0;
+
+		if (engine->func->fifo.sclass) {
+			ret = engine->func->fifo.sclass(oclass, index);
+			if (oclass->base.oclass) {
+				if (!oclass->base.ctor)
+					oclass->base.ctor = nvkm_object_new;
+				oclass->ctor = nvkm_fifo_chan_child_new;
+				return 0;
+			}
+
+			index -= ret;
+			continue;
+		}
+
+		while (engine->func->sclass[c].oclass) {
+			if (c++ == index) {
+				oclass->base = engine->func->sclass[index];
+				if (!oclass->base.ctor)
+					oclass->base.ctor = nvkm_object_new;
+				oclass->ctor = nvkm_fifo_chan_child_new;
+				return 0;
+			}
+		}
+		index -= c;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
+		    struct nvkm_event **pevent)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (chan->func->ntfy)
+		return chan->func->ntfy(chan, type, pevent);
+	return -ENODEV;
+}
+
+static int
+nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	*addr = chan->addr;
+	*size = chan->size;
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (unlikely(!chan->user)) {
+		chan->user = ioremap(chan->addr, chan->size);
+		if (!chan->user)
+			return -ENOMEM;
+	}
+	if (unlikely(addr + 4 > chan->size))
+		return -EINVAL;
+	*data = ioread32_native(chan->user + addr);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	if (unlikely(!chan->user)) {
+		chan->user = ioremap(chan->addr, chan->size);
+		if (!chan->user)
+			return -ENOMEM;
+	}
+	if (unlikely(addr + 4 > chan->size))
+		return -EINVAL;
+	iowrite32_native(data, chan->user + addr);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	chan->func->fini(chan);
+	return 0;
+}
+
+static int
+nvkm_fifo_chan_init(struct nvkm_object *object)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	chan->func->init(chan);
+	return 0;
+}
+
+static void *
+nvkm_fifo_chan_dtor(struct nvkm_object *object)
+{
+	struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+	struct nvkm_fifo *fifo = chan->fifo;
+	void *data = chan->func->dtor(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fifo->lock, flags);
+	if (!list_empty(&chan->head)) {
+		__clear_bit(chan->chid, fifo->mask);
+		list_del(&chan->head);
+	}
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	if (chan->user)
+		iounmap(chan->user);
+
+	nvkm_vm_ref(NULL, &chan->vm, NULL);
+
+	nvkm_gpuobj_del(&chan->push);
+	nvkm_gpuobj_del(&chan->inst);
+	return data;
+}
+
+static const struct nvkm_object_func
+nvkm_fifo_chan_func = {
+	.dtor = nvkm_fifo_chan_dtor,
+	.init = nvkm_fifo_chan_init,
+	.fini = nvkm_fifo_chan_fini,
+	.ntfy = nvkm_fifo_chan_ntfy,
+	.map = nvkm_fifo_chan_map,
+	.rd32 = nvkm_fifo_chan_rd32,
+	.wr32 = nvkm_fifo_chan_wr32,
+	.sclass = nvkm_fifo_chan_child_get,
+};
+
+int
+nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
+		    struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
+		    u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
+		    const struct nvkm_oclass *oclass,
+		    struct nvkm_fifo_chan *chan)
+{
+	struct nvkm_client *client = oclass->client;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	struct nvkm_mmu *mmu = device->mmu;
+	struct nvkm_dmaobj *dmaobj;
+	unsigned long flags;
+	int ret;
+
+	nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
+	chan->func = func;
+	chan->fifo = fifo;
+	chan->engines = engines;
+	INIT_LIST_HEAD(&chan->head);
+
+	/* instance memory */
+	ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
+	if (ret)
+		return ret;
+
+	/* allocate push buffer ctxdma instance */
+	if (push) {
+		dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
+		if (!dmaobj)
+			return -ENOENT;
+
+		ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
+				       &chan->push);
+		if (ret)
+			return ret;
+	}
+
+	/* channel address space */
+	if (!vm && mmu) {
+		if (!client->vm || client->vm->mmu == mmu) {
+			ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
+			if (ret)
+				return ret;
+		} else {
+			return -EINVAL;
+		}
+	} else {
+		return -ENOENT;
+	}
+
+	/* allocate channel id */
+	spin_lock_irqsave(&fifo->lock, flags);
+	chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
+	if (chan->chid >= NVKM_FIFO_CHID_NR) {
+		spin_unlock_irqrestore(&fifo->lock, flags);
+		return -ENOSPC;
+	}
+	list_add(&chan->head, &fifo->chan);
+	__set_bit(chan->chid, fifo->mask);
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	/* determine address of this channel's user registers */
+	chan->addr = device->func->resource_addr(device, bar) +
+		     base + user * chan->chid;
+	chan->size = user;
+
+	nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 0000000..55dc415
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,33 @@
+#ifndef __NVKM_FIFO_CHAN_H__
+#define __NVKM_FIFO_CHAN_H__
+#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
+#include "priv.h"
+
+struct nvkm_fifo_chan_func {
+	void *(*dtor)(struct nvkm_fifo_chan *);
+	void (*init)(struct nvkm_fifo_chan *);
+	void (*fini)(struct nvkm_fifo_chan *);
+	int (*ntfy)(struct nvkm_fifo_chan *, u32 type, struct nvkm_event **);
+	int  (*engine_ctor)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+			    struct nvkm_object *);
+	void (*engine_dtor)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+	int  (*engine_init)(struct nvkm_fifo_chan *, struct nvkm_engine *);
+	int  (*engine_fini)(struct nvkm_fifo_chan *, struct nvkm_engine *,
+			    bool suspend);
+	int  (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
+	void (*object_dtor)(struct nvkm_fifo_chan *, int);
+};
+
+int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
+			u32 size, u32 align, bool zero, u64 vm, u64 push,
+			u64 engines, int bar, u32 base, u32 user,
+			const struct nvkm_oclass *, struct nvkm_fifo_chan *);
+
+struct nvkm_fifo_chan_oclass {
+	int (*ctor)(struct nvkm_fifo *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
+
+int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
new file mode 100644
index 0000000..0430524
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+int
+g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
+		   struct nvkm_event **pevent)
+{
+	switch (type) {
+	case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+		*pevent = &chan->fifo->uevent;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+g84_fifo_chan_engine(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_GR    : return 0;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return 1;
+	case NVKM_ENGINE_CE0   : return 2;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return 3;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return 4;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return 5;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+g84_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : return -1;
+	case NVKM_ENGINE_GR    : return 0x0020;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: return 0x0040;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : return 0x0060;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : return 0x0080;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   : return 0x00a0;
+	case NVKM_ENGINE_CE0   : return 0x00c0;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+static int
+g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine, bool suspend)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 engn, save;
+	int offset;
+	bool done;
+
+	offset = g84_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+
+	engn = g84_fifo_chan_engine(engine);
+	save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
+	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+	done = nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+			break;
+	) >= 0;
+	nvkm_wr32(device, 0x002520, save);
+	if (!done) {
+		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		if (suspend)
+			return -EBUSY;
+	}
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+
+int
+g84_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	u64 limit, start;
+	int offset;
+
+	offset = g84_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+	limit = engn->addr + engn->size - 1;
+	start = engn->addr;
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+	nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+	nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+					    upper_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+static int
+g84_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine,
+			  struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	int engn = engine->subdev.index;
+
+	if (g84_fifo_chan_engine_addr(engine) < 0)
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+int
+g84_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	u32 handle = object->handle;
+	u32 context;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context = 0x00000000; break;
+	case NVKM_ENGINE_GR    : context = 0x00100000; break;
+	case NVKM_ENGINE_MPEG  :
+	case NVKM_ENGINE_MSPPP : context = 0x00200000; break;
+	case NVKM_ENGINE_ME    :
+	case NVKM_ENGINE_CE0   : context = 0x00300000; break;
+	case NVKM_ENGINE_VP    :
+	case NVKM_ENGINE_MSPDEC: context = 0x00400000; break;
+	case NVKM_ENGINE_CIPHER:
+	case NVKM_ENGINE_SEC   :
+	case NVKM_ENGINE_VIC   : context = 0x00500000; break;
+	case NVKM_ENGINE_BSP   :
+	case NVKM_ENGINE_MSVLD : context = 0x00600000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+static void
+g84_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = chan->ramfc->addr >> 8;
+	u32 chid = chan->base.chid;
+
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+	nv50_fifo_runlist_update(fifo);
+}
+
+static const struct nvkm_fifo_chan_func
+g84_fifo_chan_func = {
+	.dtor = nv50_fifo_chan_dtor,
+	.init = g84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = g84_fifo_chan_engine_ctor,
+	.engine_dtor = nv50_fifo_chan_engine_dtor,
+	.engine_init = g84_fifo_chan_engine_init,
+	.engine_fini = g84_fifo_chan_engine_fini,
+	.object_ctor = g84_fifo_chan_object_ctor,
+	.object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+		   const struct nvkm_oclass *oclass,
+		   struct nv50_fifo_chan *chan)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret;
+
+	ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
+				  0x10000, 0x1000, false, vm, push,
+				  (1ULL << NVKM_ENGINE_BSP) |
+				  (1ULL << NVKM_ENGINE_CE0) |
+				  (1ULL << NVKM_ENGINE_CIPHER) |
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_ME) |
+				  (1ULL << NVKM_ENGINE_MPEG) |
+				  (1ULL << NVKM_ENGINE_MSPDEC) |
+				  (1ULL << NVKM_ENGINE_MSPPP) |
+				  (1ULL << NVKM_ENGINE_MSVLD) |
+				  (1ULL << NVKM_ENGINE_SEC) |
+				  (1ULL << NVKM_ENGINE_SW) |
+				  (1ULL << NVKM_ENGINE_VIC) |
+				  (1ULL << NVKM_ENGINE_VP),
+				  0, 0xc00000, 0x2000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->base.inst,
+			      &chan->eng);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+			      &chan->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->base.inst,
+			      &chan->cache);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->base.inst,
+			      &chan->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+	if (ret)
+		return ret;
+
+	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
new file mode 100644
index 0000000..7d697e2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -0,0 +1,24 @@
+#ifndef __GF100_FIFO_CHAN_H__
+#define __GF100_FIFO_CHAN_H__
+#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
+#include "chan.h"
+#include "gf100.h"
+
+struct gf100_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct gf100_fifo *fifo;
+
+	struct list_head head;
+	bool killed;
+
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	struct {
+		struct nvkm_gpuobj *inst;
+		struct nvkm_vma vma;
+	} engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_oclass gf100_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
new file mode 100644
index 0000000..97bdddb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -0,0 +1,29 @@
+#ifndef __GK104_FIFO_CHAN_H__
+#define __GK104_FIFO_CHAN_H__
+#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
+#include "chan.h"
+#include "gk104.h"
+
+struct gk104_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct gk104_fifo *fifo;
+	int engine;
+
+	struct list_head head;
+	bool killed;
+
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+
+	struct {
+		struct nvkm_gpuobj *inst;
+		struct nvkm_vma vma;
+	} engn[NVKM_SUBDEV_NR];
+};
+
+int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
+			  void *data, u32 size, struct nvkm_object **);
+
+extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gm204_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
new file mode 100644
index 0000000..3361a1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -0,0 +1,24 @@
+#ifndef __NV04_FIFO_CHAN_H__
+#define __NV04_FIFO_CHAN_H__
+#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
+#include "chan.h"
+#include "nv04.h"
+
+struct nv04_fifo_chan {
+	struct nvkm_fifo_chan base;
+	struct nv04_fifo *fifo;
+	u32 ramfc;
+	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
+void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
+void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
+
+extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv10_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv17_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv40_fifo_dma_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
new file mode 100644
index 0000000..25b60af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+static int
+nv50_fifo_chan_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : return -1;
+	case NVKM_ENGINE_GR    : return 0x0000;
+	case NVKM_ENGINE_MPEG  : return 0x0060;
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+static int
+nv50_fifo_chan_engine_fini(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine, bool suspend)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int offset, ret = 0;
+	u32 me;
+
+	offset = nv50_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+
+	/* HW bug workaround:
+	 *
+	 * PFIFO will hang forever if the connected engines don't report
+	 * that they've processed the context switch request.
+	 *
+	 * In order for the kickoff to work, we need to ensure all the
+	 * connected engines are in a state where they can answer.
+	 *
+	 * Newer chipsets don't seem to suffer from this issue, and well,
+	 * there's also a "ignore these engines" bitmask reg we can use
+	 * if we hit the issue there..
+	 */
+	me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
+
+	/* do the kickoff... */
+	nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] unload timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		if (suspend)
+			ret = -EBUSY;
+	}
+	nvkm_wr32(device, 0x00b860, me);
+
+	if (ret == 0) {
+		nvkm_kmap(chan->eng);
+		nvkm_wo32(chan->eng, offset + 0x00, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x04, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x08, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x0c, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+		nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+		nvkm_done(chan->eng);
+	}
+
+	return ret;
+}
+
+static int
+nv50_fifo_chan_engine_init(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nvkm_gpuobj *engn = chan->engn[engine->subdev.index];
+	u64 limit, start;
+	int offset;
+
+	offset = nv50_fifo_chan_engine_addr(engine);
+	if (offset < 0)
+		return 0;
+	limit = engn->addr + engn->size - 1;
+	start = engn->addr;
+
+	nvkm_kmap(chan->eng);
+	nvkm_wo32(chan->eng, offset + 0x00, 0x00190000);
+	nvkm_wo32(chan->eng, offset + 0x04, lower_32_bits(limit));
+	nvkm_wo32(chan->eng, offset + 0x08, lower_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x0c, upper_32_bits(limit) << 24 |
+					    upper_32_bits(start));
+	nvkm_wo32(chan->eng, offset + 0x10, 0x00000000);
+	nvkm_wo32(chan->eng, offset + 0x14, 0x00000000);
+	nvkm_done(chan->eng);
+	return 0;
+}
+
+void
+nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv50_fifo_chan_engine_ctor(struct nvkm_fifo_chan *base,
+			   struct nvkm_engine *engine,
+			   struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	int engn = engine->subdev.index;
+
+	if (nv50_fifo_chan_engine_addr(engine) < 0)
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+void
+nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_object_ctor(struct nvkm_fifo_chan *base,
+			   struct nvkm_object *object)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	u32 handle = object->handle;
+	u32 context;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context = 0x00000000; break;
+	case NVKM_ENGINE_GR    : context = 0x00100000; break;
+	case NVKM_ENGINE_MPEG  : context = 0x00200000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context);
+}
+
+void
+nv50_fifo_chan_fini(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 chid = chan->base.chid;
+
+	/* remove channel from runlist, fifo will unload context */
+	nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+	nv50_fifo_runlist_update(fifo);
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
+}
+
+static void
+nv50_fifo_chan_init(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	struct nv50_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 addr = chan->ramfc->addr >> 12;
+	u32 chid = chan->base.chid;
+
+	nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | addr);
+	nv50_fifo_runlist_update(fifo);
+}
+
+void *
+nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
+{
+	struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_ramht_del(&chan->ramht);
+	nvkm_gpuobj_del(&chan->pgd);
+	nvkm_gpuobj_del(&chan->eng);
+	nvkm_gpuobj_del(&chan->cache);
+	nvkm_gpuobj_del(&chan->ramfc);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+nv50_fifo_chan_func = {
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.engine_ctor = nv50_fifo_chan_engine_ctor,
+	.engine_dtor = nv50_fifo_chan_engine_dtor,
+	.engine_init = nv50_fifo_chan_engine_init,
+	.engine_fini = nv50_fifo_chan_engine_fini,
+	.object_ctor = nv50_fifo_chan_object_ctor,
+	.object_dtor = nv50_fifo_chan_object_dtor,
+};
+
+int
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+		    const struct nvkm_oclass *oclass,
+		    struct nv50_fifo_chan *chan)
+{
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int ret;
+
+	ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
+				  0x10000, 0x1000, false, vm, push,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_SW) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG),
+				  0, 0xc00000, 0x2000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->base.inst,
+			      &chan->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->base.inst,
+			      &chan->eng);
+	if (ret)
+		return ret;
+
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->base.inst,
+			      &chan->pgd);
+	if (ret)
+		return ret;
+
+	ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
+	if (ret)
+		return ret;
+
+	return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
new file mode 100644
index 0000000..4b9da46
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -0,0 +1,35 @@
+#ifndef __NV50_FIFO_CHAN_H__
+#define __NV50_FIFO_CHAN_H__
+#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
+#include "chan.h"
+#include "nv50.h"
+
+struct nv50_fifo_chan {
+	struct nv50_fifo *fifo;
+	struct nvkm_fifo_chan base;
+
+	struct nvkm_gpuobj *ramfc;
+	struct nvkm_gpuobj *cache;
+	struct nvkm_gpuobj *eng;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_ramht *ramht;
+	struct nvkm_vm *vm;
+
+	struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
+};
+
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+			const struct nvkm_oclass *, struct nv50_fifo_chan *);
+void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
+void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
+void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
+
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+		       const struct nvkm_oclass *, struct nv50_fifo_chan *);
+
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass;
+extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
new file mode 100644
index 0000000..a5ca52c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_dma_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+				   "pushbuf %llx offset %016llx\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				 oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+	nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_dma_oclass = {
+	.base.oclass = G82_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = g84_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
new file mode 100644
index 0000000..bfcc640
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+void
+nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	nvkm_ramht_remove(imem->ramht, cookie);
+}
+
+static int
+nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	u32 context = 0x80000000 | chan->base.chid << 24;
+	u32 handle  = object->handle;
+	int hash;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
+	case NVKM_ENGINE_GR    : context |= 0x00010000; break;
+	case NVKM_ENGINE_MPEG  : context |= 0x00020000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+				 handle, context);
+	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	return hash;
+}
+
+void
+nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_memory *fctx = device->imem->ramfc;
+	const struct nv04_fifo_ramfc *c;
+	unsigned long flags;
+	u32 mask = fifo->base.nr - 1;
+	u32 data = chan->ramfc;
+	u32 chid;
+
+	/* prevent fifo context switches */
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+	/* if this channel is active, replace it with a null context */
+	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask;
+	if (chid == chan->base.chid) {
+		nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
+		nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+		c = fifo->ramfc;
+		do {
+			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+			u32 rv = (nvkm_rd32(device, c->regp) &  rm) >> c->regs;
+			u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
+			nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+		} while ((++c)->bits);
+
+		c = fifo->ramfc;
+		do {
+			nvkm_wr32(device, c->regp, 0x00000000);
+		} while ((++c)->bits);
+
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	}
+
+	/* restore normal operation, after disabling dma mode */
+	nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void
+nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = 1 << chan->base.chid;
+	unsigned long flags;
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+}
+
+void *
+nv04_fifo_dma_dtor(struct nvkm_fifo_chan *base)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
+	const struct nv04_fifo_ramfc *c = fifo->ramfc;
+
+	nvkm_kmap(imem->ramfc);
+	do {
+		nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+	} while ((++c)->bits);
+	nvkm_done(imem->ramfc);
+	return chan;
+}
+
+const struct nvkm_fifo_chan_func
+nv04_fifo_dma_func = {
+	.dtor = nv04_fifo_dma_dtor,
+	.init = nv04_fifo_dma_init,
+	.fini = nv04_fifo_dma_fini,
+	.object_ctor = nv04_fifo_dma_object_ctor,
+	.object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv04_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 32;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv04_fifo_dma_oclass = {
+	.base.oclass = NV03_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv04_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
new file mode 100644
index 0000000..34f68e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv10.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv10_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 32;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv10_fifo_dma_oclass = {
+	.base.oclass = NV10_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv10_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
new file mode 100644
index 0000000..ed7cc9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv17.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv17_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv04_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG) | /* NV31- */
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0x800000, 0x10000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 64;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv17_fifo_dma_oclass = {
+	.base.oclass = NV17_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv17_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
new file mode 100644
index 0000000..043b6c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv04.h"
+#include "regsnv04.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static bool
+nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW:
+		return false;
+	case NVKM_ENGINE_GR:
+		*reg = 0x0032e0;
+		*ctx = 0x38;
+		return true;
+	case NVKM_ENGINE_MPEG:
+		*reg = 0x00330c;
+		*ctx = 0x54;
+		return true;
+	default:
+		WARN_ON(1);
+		return false;
+	}
+}
+
+static int
+nv40_fifo_dma_engine_fini(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine, bool suspend)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	unsigned long flags;
+	u32 reg, ctx;
+	int chid;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+	if (chid == chan->base.chid)
+		nvkm_wr32(device, reg, 0x00000000);
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
+	nvkm_done(imem->ramfc);
+
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nv04_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	unsigned long flags;
+	u32 inst, reg, ctx;
+	int chid;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+	inst = chan->engn[engine->subdev.index]->addr >> 4;
+
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+	chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1);
+	if (chid == chan->base.chid)
+		nvkm_wr32(device, reg, inst);
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst);
+	nvkm_done(imem->ramfc);
+
+	nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
+	return 0;
+}
+
+static void
+nv40_fifo_dma_engine_dtor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index]);
+}
+
+static int
+nv40_fifo_dma_engine_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_engine *engine,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	const int engn = engine->subdev.index;
+	u32 reg, ctx;
+
+	if (!nv40_fifo_dma_engine(engine, &reg, &ctx))
+		return 0;
+
+	return nvkm_object_bind(object, NULL, 0, &chan->engn[engn]);
+}
+
+static int
+nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
+			  struct nvkm_object *object)
+{
+	struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
+	struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+	u32 context = chan->base.chid << 23;
+	u32 handle  = object->handle;
+	int hash;
+
+	switch (object->engine->subdev.index) {
+	case NVKM_ENGINE_DMAOBJ:
+	case NVKM_ENGINE_SW    : context |= 0x00000000; break;
+	case NVKM_ENGINE_GR    : context |= 0x00100000; break;
+	case NVKM_ENGINE_MPEG  : context |= 0x00200000; break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mutex_lock(&chan->fifo->base.engine.subdev.mutex);
+	hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
+				 handle, context);
+	mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
+	return hash;
+}
+
+static const struct nvkm_fifo_chan_func
+nv40_fifo_dma_func = {
+	.dtor = nv04_fifo_dma_dtor,
+	.init = nv04_fifo_dma_init,
+	.fini = nv04_fifo_dma_fini,
+	.engine_ctor = nv40_fifo_dma_engine_ctor,
+	.engine_dtor = nv40_fifo_dma_engine_dtor,
+	.engine_init = nv40_fifo_dma_engine_init,
+	.engine_fini = nv40_fifo_dma_engine_fini,
+	.object_ctor = nv40_fifo_dma_object_ctor,
+	.object_dtor = nv04_fifo_dma_object_dtor,
+};
+
+static int
+nv40_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv03_channel_dma_v0 v0;
+	} *args = data;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nv04_fifo_chan *chan = NULL;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
+				   "offset %08x\n", args->v0.version,
+			   args->v0.pushbuf, args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_fifo_chan_ctor(&nv40_fifo_dma_func, &fifo->base,
+				  0x1000, 0x1000, false, 0, args->v0.pushbuf,
+				  (1ULL << NVKM_ENGINE_DMAOBJ) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MPEG) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  0, 0xc00000, 0x1000, oclass, &chan->base);
+	chan->fifo = fifo;
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	chan->ramfc = chan->base.chid * 128;
+
+	nvkm_kmap(imem->ramfc);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.push->addr >> 4);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
+			       NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			       NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+	nvkm_done(imem->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv40_fifo_dma_oclass = {
+	.base.oclass = NV40_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv40_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
new file mode 100644
index 0000000..6b3b15f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		  void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_dma_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	int ret;
+
+	nvif_ioctl(parent, "create channel dma size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+				   "pushbuf %llx offset %016llx\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.offset);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				  oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset));
+	nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff);
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_dma_oclass = {
+	.base.oclass = NV50_CHANNEL_DMA,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_fifo_dma_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
index a04920b..ff7b529 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -22,466 +22,41 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "nv04.h"
-
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nvkm_gpuobj *ectx = (void *)object;
-	u64 limit = ectx->addr + ectx->size - 1;
-	u64 start = ectx->addr;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0020; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
-	case NVDEV_ENGINE_MSPPP :
-	case NVDEV_ENGINE_MPEG  : addr = 0x0060; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   : addr = 0x00a0; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x00c0; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	nv_wo32(base->eng, addr + 0x00, 0x00190000);
-	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
-	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
-	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
-					upper_32_bits(start));
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_priv *priv = (void *)parent->engine;
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 addr, save, engn;
-	bool done;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : engn = 0; addr = 0x0020; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
-	case NVDEV_ENGINE_MSPPP :
-	case NVDEV_ENGINE_MPEG  : engn = 1; addr = 0x0060; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   : engn = 4; addr = 0x00a0; break;
-	case NVDEV_ENGINE_CE0   : engn = 2; addr = 0x00c0; break;
-	default:
-		return -EINVAL;
-	}
-
-	save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
-	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
-	done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
-	nv_wr32(priv, 0x002520, save);
-	if (!done) {
-		nv_error(priv, "channel %d [%s] unload timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	nv_wo32(base->eng, addr + 0x00, 0x00000000);
-	nv_wo32(base->eng, addr + 0x04, 0x00000000);
-	nv_wo32(base->eng, addr + 0x08, 0x00000000);
-	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_object_attach(struct nvkm_object *parent,
-		       struct nvkm_object *object, u32 handle)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 context;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->node->offset >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-	case NVDEV_ENGINE_MPEG  :
-	case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
-	case NVDEV_ENGINE_ME    :
-	case NVDEV_ENGINE_CE0   : context |= 0x00300000; break;
-	case NVDEV_ENGINE_VP    :
-	case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
-	case NVDEV_ENGINE_CIPHER:
-	case NVDEV_ENGINE_SEC   :
-	case NVDEV_ENGINE_VIC   : context |= 0x00500000; break;
-	case NVDEV_ENGINE_BSP   :
-	case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
-	default:
-		return -EINVAL;
-	}
-
-	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
-}
-
-static int
-g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG) |
-				       (1ULL << NVDEV_ENGINE_ME) |
-				       (1ULL << NVDEV_ENGINE_VP) |
-				       (1ULL << NVDEV_ENGINE_CIPHER) |
-				       (1ULL << NVDEV_ENGINE_SEC) |
-				       (1ULL << NVDEV_ENGINE_BSP) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_VIC), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_parent(chan)->context_attach = g84_fifo_context_attach;
-	nv_parent(chan)->context_detach = g84_fifo_context_detach;
-	nv_parent(chan)->object_attach = g84_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
-	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	u64 ioffset, ilength;
-	int ret;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG) |
-				       (1ULL << NVDEV_ENGINE_ME) |
-				       (1ULL << NVDEV_ENGINE_VP) |
-				       (1ULL << NVDEV_ENGINE_CIPHER) |
-				       (1ULL << NVDEV_ENGINE_SEC) |
-				       (1ULL << NVDEV_ENGINE_BSP) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_VIC), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_parent(chan)->context_attach = g84_fifo_context_attach;
-	nv_parent(chan)->context_detach = g84_fifo_context_detach;
-	nv_parent(chan)->object_attach = g84_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
-	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
-	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-g84_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_base *base = (void *)object->parent;
-	struct nv50_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *ramfc = base->ramfc;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
-	nv50_fifo_playlist_update(priv);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_dma = {
-	.ctor = g84_fifo_chan_ctor_dma,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = g84_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-g84_fifo_ofuncs_ind = {
-	.ctor = g84_fifo_chan_ctor_ind,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = g84_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-g84_fifo_sclass[] = {
-	{ G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
-	{ G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
-{
-	struct nv50_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
-			      0, &base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
-			      0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
-			      0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-g84_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_fifo_context_ctor,
-		.dtor = nv50_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
+#include "channv50.h"
 
 static void
-g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
 }
 
 static void
-g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
+g84_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
 }
 
-static const struct nvkm_event_func
-g84_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = g84_fifo_uevent_init,
-	.fini = g84_fifo_uevent_fini,
-};
-
-static int
-g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[0]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[1]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &g84_fifo_cclass;
-	nv_engine(priv)->sclass = g84_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	return 0;
-}
-
-struct nvkm_oclass *
-g84_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_fifo_ctor,
-		.dtor = nv50_fifo_dtor,
-		.init = nv50_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+g84_fifo = {
+	.dtor = nv50_fifo_dtor,
+	.oneinit = nv50_fifo_oneinit,
+	.init = nv50_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.uevent_init = g84_fifo_uevent_init,
+	.uevent_fini = g84_fifo_uevent_fini,
+	.chan = {
+		&g84_fifo_dma_oclass,
+		&g84_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+g84_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv50_fifo_new_(&g84_fifo, device, index, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index b745252..ff6fcbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -21,365 +21,72 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/fifo.h>
+#include "gf100.h"
+#include "changf100.h"
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
-#include <nvif/unpack.h>
-
-struct gf100_fifo_priv {
-	struct nvkm_fifo base;
-
-	struct work_struct fault;
-	u64 mask;
-
-	struct {
-		struct nvkm_gpuobj *mem[2];
-		int active;
-		wait_queue_head_t wait;
-	} runlist;
-
-	struct {
-		struct nvkm_gpuobj *mem;
-		struct nvkm_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct gf100_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gf100_fifo_chan {
-	struct nvkm_fifo_chan base;
-	enum {
-		STOPPED,
-		RUNNING,
-		KILLED
-	} state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
 
 static void
-gf100_fifo_runlist_update(struct gf100_fifo_priv *priv)
+gf100_fifo_uevent_init(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *cur;
-	int i, p;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	cur = priv->runlist.mem[priv->runlist.active];
-	priv->runlist.active = !priv->runlist.active;
-
-	for (i = 0, p = 0; i < 128; i++) {
-		struct gf100_fifo_chan *chan = (void *)priv->base.channel[i];
-		if (chan && chan->state == RUNNING) {
-			nv_wo32(cur, p + 0, i);
-			nv_wo32(cur, p + 4, 0x00000004);
-			p += 8;
-		}
-	}
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x002270, cur->addr >> 12);
-	nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
-
-	if (wait_event_timeout(priv->runlist.wait,
-			       !(nv_rd32(priv, 0x00227c) & 0x00100000),
-			       msecs_to_jiffies(2000)) == 0)
-		nv_error(priv, "runlist update timeout\n");
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
 }
 
-static int
-gf100_fifo_context_attach(struct nvkm_object *parent,
-			  struct nvkm_object *object)
+static void
+gf100_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_base *base = (void *)parent->parent;
-	struct nvkm_engctx *ectx = (void *)object;
-	u32 addr;
-	int ret;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
-	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (!ectx->vma.node) {
-		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-					 NV_MEM_ACCESS_RW, &ectx->vma);
-		if (ret)
-			return ret;
-
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	}
-
-	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
-	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
-	bar->flush(bar);
-	return 0;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
 }
 
-static int
-gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			  struct nvkm_object *object)
+void
+gf100_fifo_runlist_update(struct gf100_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_priv *priv = (void *)parent->engine;
-	struct gf100_fifo_base *base = (void *)parent->parent;
-	struct gf100_fifo_chan *chan = (void *)parent;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_CE0   : addr = 0x0230; break;
-	case NVDEV_ENGINE_CE1   : addr = 0x0240; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	nv_wo32(base, addr + 0x00, 0x00000000);
-	nv_wo32(base, addr + 0x04, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gf100_fifo_priv *priv = (void *)engine;
-	struct gf100_fifo_base *base = (void *)parent;
 	struct gf100_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	int ret, i;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *cur;
+	int nr = 0;
 
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
+	mutex_lock(&subdev->mutex);
+	cur = fifo->runlist.mem[fifo->runlist.active];
+	fifo->runlist.active = !fifo->runlist.active;
 
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
-				       priv->user.bar.offset, 0x1000,
-				       args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_CE0) |
-				       (1ULL << NVDEV_ENGINE_CE1) |
-				       (1ULL << NVDEV_ENGINE_MSVLD) |
-				       (1ULL << NVDEV_ENGINE_MSPDEC) |
-				       (1ULL << NVDEV_ENGINE_MSPPP), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = gf100_fifo_context_attach;
-	nv_parent(chan)->context_detach = gf100_fifo_context_detach;
-
-	usermem = chan->base.chid * 0x1000;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	for (i = 0; i < 0x1000; i += 4)
-		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
-	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x10, 0x0000face);
-	nv_wo32(base, 0x30, 0xfffff902);
-	nv_wo32(base, 0x48, lower_32_bits(ioffset));
-	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base, 0x54, 0x00000002);
-	nv_wo32(base, 0x84, 0x20400000);
-	nv_wo32(base, 0x94, 0x30000001);
-	nv_wo32(base, 0x9c, 0x00000100);
-	nv_wo32(base, 0xa4, 0x1f1f1f1f);
-	nv_wo32(base, 0xa8, 0x1f1f1f1f);
-	nv_wo32(base, 0xac, 0x0000001f);
-	nv_wo32(base, 0xb8, 0xf8000000);
-	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gf100_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
-	struct gf100_fifo_priv *priv = (void *)object->engine;
-	struct gf100_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
-
-	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
-		nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
-		gf100_fifo_runlist_update(priv);
+	nvkm_kmap(cur);
+	list_for_each_entry(chan, &fifo->chan, head) {
+		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(cur, (nr * 8) + 4, 0x00000004);
+		nr++;
 	}
+	nvkm_done(cur);
 
-	return 0;
+	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x002274, 0x01f00000 | nr);
+
+	if (wait_event_timeout(fifo->runlist.wait,
+			       !(nvkm_rd32(device, 0x00227c) & 0x00100000),
+			       msecs_to_jiffies(2000)) == 0)
+		nvkm_error(subdev, "runlist update timeout\n");
+	mutex_unlock(&subdev->mutex);
 }
 
-static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv);
-
-static int
-gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gf100_fifo_priv *priv = (void *)object->engine;
-	struct gf100_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
-		nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
-		gf100_fifo_runlist_update(priv);
-	}
-
-	gf100_fifo_intr_engine(priv);
-
-	nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-gf100_fifo_ofuncs = {
-	.ctor = gf100_fifo_chan_ctor,
-	.dtor = _nvkm_fifo_channel_dtor,
-	.init = gf100_fifo_chan_init,
-	.fini = gf100_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gf100_fifo_sclass[] = {
-	{ FERMI_CHANNEL_GPFIFO, &gf100_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	struct gf100_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_ZERO_ALLOC |
-				       NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0208, 0xffffffff);
-	nv_wo32(base, 0x020c, 0x000000ff);
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-gf100_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct gf100_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-gf100_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fifo_context_ctor,
-		.dtor = gf100_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
 static inline int
-gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn)
 {
 	switch (engn) {
-	case NVDEV_ENGINE_GR    : engn = 0; break;
-	case NVDEV_ENGINE_MSVLD : engn = 1; break;
-	case NVDEV_ENGINE_MSPPP : engn = 2; break;
-	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
-	case NVDEV_ENGINE_CE0   : engn = 4; break;
-	case NVDEV_ENGINE_CE1   : engn = 5; break;
+	case NVKM_ENGINE_GR    : engn = 0; break;
+	case NVKM_ENGINE_MSVLD : engn = 1; break;
+	case NVKM_ENGINE_MSPPP : engn = 2; break;
+	case NVKM_ENGINE_MSPDEC: engn = 3; break;
+	case NVKM_ENGINE_CE0   : engn = 4; break;
+	case NVKM_ENGINE_CE1   : engn = 5; break;
 	default:
 		return -1;
 	}
@@ -388,95 +95,73 @@
 }
 
 static inline struct nvkm_engine *
-gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn)
+gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+
 	switch (engn) {
-	case 0: engn = NVDEV_ENGINE_GR; break;
-	case 1: engn = NVDEV_ENGINE_MSVLD; break;
-	case 2: engn = NVDEV_ENGINE_MSPPP; break;
-	case 3: engn = NVDEV_ENGINE_MSPDEC; break;
-	case 4: engn = NVDEV_ENGINE_CE0; break;
-	case 5: engn = NVDEV_ENGINE_CE1; break;
+	case 0: engn = NVKM_ENGINE_GR; break;
+	case 1: engn = NVKM_ENGINE_MSVLD; break;
+	case 2: engn = NVKM_ENGINE_MSPPP; break;
+	case 3: engn = NVKM_ENGINE_MSPDEC; break;
+	case 4: engn = NVKM_ENGINE_CE0; break;
+	case 5: engn = NVKM_ENGINE_CE1; break;
 	default:
 		return NULL;
 	}
 
-	return nvkm_engine(priv, engn);
+	return nvkm_device_engine(device, engn);
 }
 
 static void
 gf100_fifo_recover_work(struct work_struct *work)
 {
-	struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault);
-	struct nvkm_object *engine;
+	struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
 	unsigned long flags;
 	u32 engn, engm = 0;
 	u64 mask, todo;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	mask = priv->mask;
-	priv->mask = 0ULL;
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	mask = fifo->mask;
+	fifo->mask = 0ULL;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gf100_fifo_engidx(priv, engn);
-	nv_mask(priv, 0x002630, engm, engm);
+		engm |= 1 << gf100_fifo_engidx(fifo, engn);
+	nvkm_mask(device, 0x002630, engm, engm);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = (void *)nvkm_engine(priv, engn))) {
-			nv_ofuncs(engine)->fini(engine, false);
-			WARN_ON(nv_ofuncs(engine)->init(engine));
+		if ((engine = nvkm_device_engine(device, engn))) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
 	}
 
-	gf100_fifo_runlist_update(priv);
-	nv_wr32(priv, 0x00262c, engm);
-	nv_mask(priv, 0x002630, engm, 0x00000000);
+	gf100_fifo_runlist_update(fifo);
+	nvkm_wr32(device, 0x00262c, engm);
+	nvkm_mask(device, 0x002630, engm, 0x00000000);
 }
 
 static void
-gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine,
+gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
 		   struct gf100_fifo_chan *chan)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
-	unsigned long flags;
 
-	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
-		       nv_subdev(engine)->name, chid);
+	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+		   nvkm_subdev_name[engine->subdev.index], chid);
+	assert_spin_locked(&fifo->base.lock);
 
-	nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
-	chan->state = KILLED;
+	nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+	list_del_init(&chan->head);
+	chan->killed = true;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	priv->mask |= 1ULL << nv_engidx(engine);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	schedule_work(&priv->fault);
-}
-
-static int
-gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
-	struct gf100_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
-	if (likely(bind)) {
-		if (!mthd || !nv_call(bind->object, mthd, data))
-			ret = 0;
-		nvkm_namedb_put(bind);
-	}
-
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return ret;
+	fifo->mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->fault);
 }
 
 static const struct nvkm_enum
@@ -486,14 +171,17 @@
 };
 
 static void
-gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	struct gf100_fifo_chan *chan;
+	unsigned long flags;
 	u32 engn;
 
+	spin_lock_irqsave(&fifo->base.lock, flags);
 	for (engn = 0; engn < 6; engn++) {
-		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 		u32 busy = (stat & 0x80000000);
 		u32 save = (stat & 0x00100000); /* maybe? */
 		u32 unk0 = (stat & 0x00040000);
@@ -502,32 +190,36 @@
 		(void)save;
 
 		if (busy && unk0 && unk1) {
-			if (!(chan = (void *)priv->base.channel[chid]))
-				continue;
-			if (!(engine = gf100_fifo_engine(priv, engn)))
-				continue;
-			gf100_fifo_recover(priv, engine, chan);
+			list_for_each_entry(chan, &fifo->chan, head) {
+				if (chan->base.chid == chid) {
+					engine = gf100_fifo_engine(fifo, engn);
+					if (!engine)
+						break;
+					gf100_fifo_recover(fifo, engine, chan);
+					break;
+				}
+			}
 		}
 	}
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static void
-gf100_fifo_intr_sched(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_sched(struct gf100_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00254c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
 	u32 code = intr & 0x000000ff;
 	const struct nvkm_enum *en;
-	char enunk[6] = "";
 
 	en = nvkm_enum_find(gf100_fifo_sched_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
 
-	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 
 	switch (code) {
 	case 0x0a:
-		gf100_fifo_intr_sched_ctxsw(priv);
+		gf100_fifo_intr_sched_ctxsw(fifo);
 		break;
 	default:
 		break;
@@ -536,17 +228,17 @@
 
 static const struct nvkm_enum
 gf100_fifo_fault_engine[] = {
-	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
-	{ 0x03, "PEEPHOLE", NULL, NVDEV_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
-	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x10, "PMSVLD", NULL, NVDEV_ENGINE_MSVLD },
-	{ 0x11, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
+	{ 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
+	{ 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x07, "PFIFO", NULL, NVKM_ENGINE_FIFO },
+	{ 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PCOUNTER" },
-	{ 0x14, "PMSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
-	{ 0x15, "PCE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x16, "PCE1", NULL, NVDEV_ENGINE_CE1 },
+	{ 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "PCE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "PCE1", NULL, NVKM_ENGINE_CE1 },
 	{ 0x17, "PDAEMON" },
 	{}
 };
@@ -594,79 +286,65 @@
 };
 
 static void
-gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
 {
-	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
-	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
-	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
-	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 	u32 gpc    = (stat & 0x1f000000) >> 24;
 	u32 client = (stat & 0x00001f00) >> 8;
 	u32 write  = (stat & 0x00000080);
 	u32 hub    = (stat & 0x00000040);
 	u32 reason = (stat & 0x0000000f);
-	struct nvkm_object *engctx = NULL, *object;
-	struct nvkm_engine *engine = NULL;
 	const struct nvkm_enum *er, *eu, *ec;
-	char erunk[6] = "";
-	char euunk[6] = "";
-	char ecunk[6] = "";
-	char gpcid[3] = "";
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char gpcid[8] = "";
 
 	er = nvkm_enum_find(gf100_fifo_fault_reason, reason);
-	if (!er)
-		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
 	eu = nvkm_enum_find(gf100_fifo_fault_engine, unit);
-	if (eu) {
-		switch (eu->data2) {
-		case NVDEV_SUBDEV_BAR:
-			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_SUBDEV_INSTMEM:
-			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_ENGINE_IFB:
-			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
-			break;
-		default:
-			engine = nvkm_engine(priv, eu->data2);
-			if (engine)
-				engctx = nvkm_engctx_get(engine, inst);
-			break;
-		}
-	} else {
-		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
-	}
-
 	if (hub) {
 		ec = nvkm_enum_find(gf100_fifo_fault_hubclient, client);
 	} else {
 		ec = nvkm_enum_find(gf100_fifo_fault_gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
+		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
-	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
-		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
-		 (u64)vahi << 32 | valo, er ? er->name : erunk,
-		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
-		 ec ? ec->name : ecunk, (u64)inst << 12,
-		 nvkm_client_name(engctx));
-
-	object = engctx;
-	while (object) {
-		switch (nv_mclass(object)) {
-		case FERMI_CHANNEL_GPFIFO:
-			gf100_fifo_recover(priv, engine, (void *)object);
+	if (eu) {
+		switch (eu->data2) {
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			break;
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			break;
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+			break;
+		default:
+			engine = nvkm_device_engine(device, eu->data2);
 			break;
 		}
-		object = object->parent;
 	}
 
-	nvkm_engctx_put(engctx);
+	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+
+	nvkm_error(subdev,
+		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+		   "reason %02x [%s] on channel %d [%010llx %s]\n",
+		   write ? "write" : "read", (u64)vahi << 32 | valo,
+		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   reason, er ? er->name : "", chan ? chan->chid : -1,
+		   (u64)inst << 12,
+		   chan ? chan->object.client->name : "unknown");
+
+	if (engine && chan)
+		gf100_fifo_recover(fifo, engine, (void *)chan);
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 }
 
 static const struct nvkm_bitfield
@@ -678,290 +356,288 @@
 };
 
 static void
-gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit)
+gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit)
 {
-	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
-	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000));
+	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0x7f;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00003ffc);
-	u32 show = stat;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u32 show= stat;
+	char msg[128];
 
 	if (stat & 0x00800000) {
-		if (!gf100_fifo_swmthd(priv, chid, mthd, data))
-			show &= ~0x00800000;
+		if (device->sw) {
+			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+				show &= ~0x00800000;
+		}
 	}
 
 	if (show) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gf100_fifo_pbdma_intr, show);
-		pr_cont("\n");
-		nv_error(priv,
-			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 unit, chid,
-			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
-			 subc, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), gf100_fifo_pbdma_intr, show);
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+				   "subc %d mthd %04x data %08x\n",
+			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, mthd, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
-	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 }
 
 static void
-gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv)
+gf100_fifo_intr_runlist(struct gf100_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x002a00);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x002a00);
 
 	if (intr & 0x10000000) {
-		wake_up(&priv->runlist.wait);
-		nv_wr32(priv, 0x002a00, 0x10000000);
+		wake_up(&fifo->runlist.wait);
+		nvkm_wr32(device, 0x002a00, 0x10000000);
 		intr &= ~0x10000000;
 	}
 
 	if (intr) {
-		nv_error(priv, "RUNLIST 0x%08x\n", intr);
-		nv_wr32(priv, 0x002a00, intr);
+		nvkm_error(subdev, "RUNLIST %08x\n", intr);
+		nvkm_wr32(device, 0x002a00, intr);
 	}
 }
 
 static void
-gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn)
+gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn)
 {
-	u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04));
-	u32 inte = nv_rd32(priv, 0x002628);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
+	u32 inte = nvkm_rd32(device, 0x002628);
 	u32 unkn;
 
-	nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+	nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
 
 	for (unkn = 0; unkn < 8; unkn++) {
 		u32 ints = (intr >> (unkn * 0x04)) & inte;
 		if (ints & 0x1) {
-			nvkm_fifo_uevent(&priv->base);
+			nvkm_fifo_uevent(&fifo->base);
 			ints &= ~1;
 		}
 		if (ints) {
-			nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints);
-			nv_mask(priv, 0x002628, ints, 0);
+			nvkm_error(subdev, "ENGINE %d %d %01x",
+				   engn, unkn, ints);
+			nvkm_mask(device, 0x002628, ints, 0);
 		}
 	}
 }
 
-static void
-gf100_fifo_intr_engine(struct gf100_fifo_priv *priv)
+void
+gf100_fifo_intr_engine(struct gf100_fifo *fifo)
 {
-	u32 mask = nv_rd32(priv, 0x0025a4);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = nvkm_rd32(device, 0x0025a4);
 	while (mask) {
 		u32 unit = __ffs(mask);
-		gf100_fifo_intr_engine_unit(priv, unit);
+		gf100_fifo_intr_engine_unit(fifo, unit);
 		mask &= ~(1 << unit);
 	}
 }
 
 static void
-gf100_fifo_intr(struct nvkm_subdev *subdev)
+gf100_fifo_intr(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, 0x002140);
-	u32 stat = nv_rd32(priv, 0x002100) & mask;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
 
 	if (stat & 0x00000001) {
-		u32 intr = nv_rd32(priv, 0x00252c);
-		nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x00000001);
+		u32 intr = nvkm_rd32(device, 0x00252c);
+		nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000100) {
-		gf100_fifo_intr_sched(priv);
-		nv_wr32(priv, 0x002100, 0x00000100);
+		gf100_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
 	if (stat & 0x00010000) {
-		u32 intr = nv_rd32(priv, 0x00256c);
-		nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x00010000);
+		u32 intr = nvkm_rd32(device, 0x00256c);
+		nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x00010000);
 		stat &= ~0x00010000;
 	}
 
 	if (stat & 0x01000000) {
-		u32 intr = nv_rd32(priv, 0x00258c);
-		nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
-		nv_wr32(priv, 0x002100, 0x01000000);
+		u32 intr = nvkm_rd32(device, 0x00258c);
+		nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
+		nvkm_wr32(device, 0x002100, 0x01000000);
 		stat &= ~0x01000000;
 	}
 
 	if (stat & 0x10000000) {
-		u32 mask = nv_rd32(priv, 0x00259c);
+		u32 mask = nvkm_rd32(device, 0x00259c);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gf100_fifo_intr_fault(priv, unit);
-			nv_wr32(priv, 0x00259c, (1 << unit));
+			gf100_fifo_intr_fault(fifo, unit);
+			nvkm_wr32(device, 0x00259c, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x10000000;
 	}
 
 	if (stat & 0x20000000) {
-		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 mask = nvkm_rd32(device, 0x0025a0);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gf100_fifo_intr_pbdma(priv, unit);
-			nv_wr32(priv, 0x0025a0, (1 << unit));
+			gf100_fifo_intr_pbdma(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		gf100_fifo_intr_runlist(priv);
+		gf100_fifo_intr_runlist(fifo);
 		stat &= ~0x40000000;
 	}
 
 	if (stat & 0x80000000) {
-		gf100_fifo_intr_engine(priv);
+		gf100_fifo_intr_engine(fifo);
 		stat &= ~0x80000000;
 	}
 
 	if (stat) {
-		nv_error(priv, "INTR 0x%08x\n", stat);
-		nv_mask(priv, 0x002140, stat, 0x00000000);
-		nv_wr32(priv, 0x002100, stat);
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
 	}
 }
 
-static void
-gf100_fifo_uevent_init(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
-}
-
-static void
-gf100_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-gf100_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = gf100_fifo_uevent_init,
-	.fini = gf100_fifo_uevent_fini,
-};
-
 static int
-gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+gf100_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+			      false, &fifo->runlist.mem[0]);
 	if (ret)
 		return ret;
 
-	INIT_WORK(&priv->fault, gf100_fifo_recover_work);
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
-			      &priv->runlist.mem[0]);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000,
+			      false, &fifo->runlist.mem[1]);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
-			      &priv->runlist.mem[1]);
+	init_waitqueue_head(&fifo->runlist.wait);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 0x1000,
+			      0x1000, false, &fifo->user.mem);
 	if (ret)
 		return ret;
 
-	init_waitqueue_head(&priv->runlist.wait);
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
-			      &priv->user.mem);
+	ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-			      &priv->user.bar);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = gf100_fifo_intr;
-	nv_engine(priv)->cclass = &gf100_fifo_cclass;
-	nv_engine(priv)->sclass = gf100_fifo_sclass;
+	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 	return 0;
 }
 
 static void
-gf100_fifo_dtor(struct nvkm_object *object)
+gf100_fifo_fini(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)object;
-
-	nvkm_gpuobj_unmap(&priv->user.bar);
-	nvkm_gpuobj_ref(NULL, &priv->user.mem);
-	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]);
-	nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]);
-
-	nvkm_fifo_destroy(&priv->base);
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	flush_work(&fifo->fault);
 }
 
-static int
-gf100_fifo_init(struct nvkm_object *object)
+static void
+gf100_fifo_init(struct nvkm_fifo *base)
 {
-	struct gf100_fifo_priv *priv = (void *)object;
-	int ret, i;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	int i;
 
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	nvkm_wr32(device, 0x002204, 0xffffffff);
 
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	nv_wr32(priv, 0x002204, 0xffffffff);
-
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
-	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x002204));
+	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
 
 	/* assign engines to PBDMAs */
-	if (priv->spoon_nr >= 3) {
-		nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
-		nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
-		nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */
-		nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */
-		nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
-		nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+	if (fifo->spoon_nr >= 3) {
+		nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
+		nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
+		nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
+		nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
+		nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
 	}
 
 	/* PBDMA[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
-	nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+	nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
 
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x7fffffff);
-	nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
-	return 0;
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0x7fffffff);
+	nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
 }
 
-struct nvkm_oclass *
-gf100_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fifo_ctor,
-		.dtor = gf100_fifo_dtor,
-		.init = gf100_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static void *
+gf100_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	nvkm_vm_put(&fifo->user.bar);
+	nvkm_memory_del(&fifo->user.mem);
+	nvkm_memory_del(&fifo->runlist.mem[0]);
+	nvkm_memory_del(&fifo->runlist.mem[1]);
+	return fifo;
+}
+
+static const struct nvkm_fifo_func
+gf100_fifo = {
+	.dtor = gf100_fifo_dtor,
+	.oneinit = gf100_fifo_oneinit,
+	.init = gf100_fifo_init,
+	.fini = gf100_fifo_fini,
+	.intr = gf100_fifo_intr,
+	.uevent_init = gf100_fifo_uevent_init,
+	.uevent_fini = gf100_fifo_uevent_fini,
+	.chan = {
+		&gf100_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+gf100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	struct gf100_fifo *fifo;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&fifo->chan);
+	INIT_WORK(&fifo->fault, gf100_fifo_recover_work);
+	*pfifo = &fifo->base;
+
+	return nvkm_fifo_ctor(&gf100_fifo, device, index, 128, &fifo->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
new file mode 100644
index 0000000..c649ca9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -0,0 +1,31 @@
+#ifndef __GF100_FIFO_H__
+#define __GF100_FIFO_H__
+#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
+#include "priv.h"
+
+#include <subdev/mmu.h>
+
+struct gf100_fifo {
+	struct nvkm_fifo base;
+
+	struct list_head chan;
+
+	struct work_struct fault;
+	u64 mask;
+
+	struct {
+		struct nvkm_memory *mem[2];
+		int active;
+		wait_queue_head_t wait;
+	} runlist;
+
+	struct {
+		struct nvkm_memory *mem;
+		struct nvkm_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+void gf100_fifo_intr_engine(struct gf100_fifo *);
+void gf100_fifo_runlist_update(struct gf100_fifo *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index e10f964..98970a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -22,486 +22,121 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
 #include <core/client.h>
-#include <core/engctx.h>
 #include <core/enum.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
+#include <engine/sw.h>
 
 #include <nvif/class.h>
-#include <nvif/unpack.h>
 
-#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
-static const struct {
-	u64 subdev;
-	u64 mask;
-} fifo_engine[] = {
-	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
-				 (1ULL << NVDEV_ENGINE_CE2)),
-	_(NVDEV_ENGINE_MSPDEC  , 0),
-	_(NVDEV_ENGINE_MSPPP   , 0),
-	_(NVDEV_ENGINE_MSVLD   , 0),
-	_(NVDEV_ENGINE_CE0     , 0),
-	_(NVDEV_ENGINE_CE1     , 0),
-	_(NVDEV_ENGINE_MSENC   , 0),
-};
-#undef _
-#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
-
-struct gk104_fifo_engn {
-	struct nvkm_gpuobj *runlist[2];
-	int cur_runlist;
-	wait_queue_head_t wait;
-};
-
-struct gk104_fifo_priv {
-	struct nvkm_fifo base;
-
-	struct work_struct fault;
-	u64 mask;
-
-	struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
-	struct {
-		struct nvkm_gpuobj *mem;
-		struct nvkm_vma bar;
-	} user;
-	int spoon_nr;
-};
-
-struct gk104_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gk104_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 engine;
-	enum {
-		STOPPED,
-		RUNNING,
-		KILLED
-	} state;
-};
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static void
-gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine)
+void
+gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct gk104_fifo_engn *engn = &priv->engine[engine];
-	struct nvkm_gpuobj *cur;
-	int i, p;
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
+}
 
-	mutex_lock(&nv_subdev(priv)->mutex);
+void
+gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
+{
+	struct nvkm_device *device = fifo->engine.subdev.device;
+	nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
+{
+	struct gk104_fifo_engn *engn = &fifo->engine[engine];
+	struct gk104_fifo_chan *chan;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *cur;
+	int nr = 0;
+
+	mutex_lock(&subdev->mutex);
 	cur = engn->runlist[engn->cur_runlist];
 	engn->cur_runlist = !engn->cur_runlist;
 
-	for (i = 0, p = 0; i < priv->base.max; i++) {
-		struct gk104_fifo_chan *chan = (void *)priv->base.channel[i];
-		if (chan && chan->state == RUNNING && chan->engine == engine) {
-			nv_wo32(cur, p + 0, i);
-			nv_wo32(cur, p + 4, 0x00000000);
-			p += 8;
-		}
+	nvkm_kmap(cur);
+	list_for_each_entry(chan, &engn->chan, head) {
+		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
+		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
+		nr++;
 	}
-	bar->flush(bar);
+	nvkm_done(cur);
 
-	nv_wr32(priv, 0x002270, cur->addr >> 12);
-	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
+	nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
 
-	if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
+	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
 			       (engine * 0x08)) & 0x00100000),
 				msecs_to_jiffies(2000)) == 0)
-		nv_error(priv, "runlist %d update timeout\n", engine);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-static int
-gk104_fifo_context_attach(struct nvkm_object *parent,
-			  struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_base *base = (void *)parent->parent;
-	struct nvkm_engctx *ectx = (void *)object;
-	u32 addr;
-	int ret;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   :
-		return 0;
-	case NVDEV_ENGINE_CE0:
-	case NVDEV_ENGINE_CE1:
-	case NVDEV_ENGINE_CE2:
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-		return 0;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	if (!ectx->vma.node) {
-		ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
-					 NV_MEM_ACCESS_RW, &ectx->vma);
-		if (ret)
-			return ret;
-
-		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	}
-
-	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
-	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			  struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)parent->engine;
-	struct gk104_fifo_base *base = (void *)parent->parent;
-	struct gk104_fifo_chan *chan = (void *)parent;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW    : return 0;
-	case NVDEV_ENGINE_CE0   :
-	case NVDEV_ENGINE_CE1   :
-	case NVDEV_ENGINE_CE2   : addr = 0x0000; break;
-	case NVDEV_ENGINE_GR    : addr = 0x0210; break;
-	case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
-	case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
-	case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_wr32(priv, 0x002634, chan->base.chid);
-	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-		nv_error(priv, "channel %d [%s] kick timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			return -EBUSY;
-	}
-
-	if (addr) {
-		nv_wo32(base, addr + 0x00, 0x00000000);
-		nv_wo32(base, addr + 0x04, 0x00000000);
-		bar->flush(bar);
-	}
-
-	return 0;
-}
-
-static int
-gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
-{
-	union {
-		struct kepler_channel_gpfifo_a_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct gk104_fifo_priv *priv = (void *)engine;
-	struct gk104_fifo_base *base = (void *)parent;
-	struct gk104_fifo_chan *chan;
-	u64 usermem, ioffset, ilength;
-	int ret, i;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x engine %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength, args->v0.engine);
-	} else
-		return ret;
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		if (args->v0.engine & (1 << i)) {
-			if (nvkm_engine(parent, fifo_engine[i].subdev)) {
-				args->v0.engine = (1 << i);
-				break;
-			}
-		}
-	}
-
-	if (i == FIFO_ENGINE_NR) {
-		nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
-		return -ENODEV;
-	}
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
-				       priv->user.bar.offset, 0x200,
-				       args->v0.pushbuf,
-				       fifo_engine[i].mask, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = gk104_fifo_context_attach;
-	nv_parent(chan)->context_detach = gk104_fifo_context_detach;
-	chan->engine = i;
-
-	usermem = chan->base.chid * 0x200;
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	for (i = 0; i < 0x200; i += 4)
-		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
-
-	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
-	nv_wo32(base, 0x10, 0x0000face);
-	nv_wo32(base, 0x30, 0xfffff902);
-	nv_wo32(base, 0x48, lower_32_bits(ioffset));
-	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base, 0x84, 0x20400000);
-	nv_wo32(base, 0x94, 0x30000001);
-	nv_wo32(base, 0x9c, 0x00000100);
-	nv_wo32(base, 0xac, 0x0000001f);
-	nv_wo32(base, 0xe8, chan->base.chid);
-	nv_wo32(base, 0xb8, 0xf8000000);
-	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
-	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-gk104_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
-	struct gk104_fifo_priv *priv = (void *)object->engine;
-	struct gk104_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
-	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
-
-	if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-		gk104_fifo_runlist_update(priv, chan->engine);
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
-	}
-
-	return 0;
-}
-
-static int
-gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gk104_fifo_priv *priv = (void *)object->engine;
-	struct gk104_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
-		nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
-		gk104_fifo_runlist_update(priv, chan->engine);
-	}
-
-	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-struct nvkm_ofuncs
-gk104_fifo_chan_ofuncs = {
-	.ctor = gk104_fifo_chan_ctor,
-	.dtor = _nvkm_fifo_channel_dtor,
-	.init = gk104_fifo_chan_init,
-	.fini = gk104_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-gk104_fifo_sclass[] = {
-	{ KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - instmem heap and vm setup
- ******************************************************************************/
-
-static int
-gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	struct gk104_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
-	nv_wo32(base, 0x0208, 0xffffffff);
-	nv_wo32(base, 0x020c, 0x000000ff);
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void
-gk104_fifo_context_dtor(struct nvkm_object *object)
-{
-	struct gk104_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_fifo_context_destroy(&base->base);
-}
-
-static struct nvkm_oclass
-gk104_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_context_ctor,
-		.dtor = gk104_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static inline int
-gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn)
-{
-	switch (engn) {
-	case NVDEV_ENGINE_GR    :
-	case NVDEV_ENGINE_CE2   : engn = 0; break;
-	case NVDEV_ENGINE_MSVLD : engn = 1; break;
-	case NVDEV_ENGINE_MSPPP : engn = 2; break;
-	case NVDEV_ENGINE_MSPDEC: engn = 3; break;
-	case NVDEV_ENGINE_CE0   : engn = 4; break;
-	case NVDEV_ENGINE_CE1   : engn = 5; break;
-	case NVDEV_ENGINE_MSENC : engn = 6; break;
-	default:
-		return -1;
-	}
-
-	return engn;
+		nvkm_error(subdev, "runlist %d update timeout\n", engine);
+	mutex_unlock(&subdev->mutex);
 }
 
 static inline struct nvkm_engine *
-gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn)
+gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
 {
-	if (engn >= ARRAY_SIZE(fifo_engine))
-		return NULL;
-	return nvkm_engine(priv, fifo_engine[engn].subdev);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u64 subdevs = gk104_fifo_engine_subdev(engn);
+	if (subdevs)
+		return nvkm_device_engine(device, __ffs(subdevs));
+	return NULL;
 }
 
 static void
 gk104_fifo_recover_work(struct work_struct *work)
 {
-	struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault);
-	struct nvkm_object *engine;
+	struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_engine *engine;
 	unsigned long flags;
 	u32 engn, engm = 0;
 	u64 mask, todo;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	mask = priv->mask;
-	priv->mask = 0ULL;
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	mask = fifo->mask;
+	fifo->mask = 0ULL;
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
-		engm |= 1 << gk104_fifo_engidx(priv, engn);
-	nv_mask(priv, 0x002630, engm, engm);
+		engm |= 1 << gk104_fifo_subdev_engine(engn);
+	nvkm_mask(device, 0x002630, engm, engm);
 
 	for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
-		if ((engine = (void *)nvkm_engine(priv, engn))) {
-			nv_ofuncs(engine)->fini(engine, false);
-			WARN_ON(nv_ofuncs(engine)->init(engine));
+		if ((engine = nvkm_device_engine(device, engn))) {
+			nvkm_subdev_fini(&engine->subdev, false);
+			WARN_ON(nvkm_subdev_init(&engine->subdev));
 		}
-		gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn));
+		gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
 	}
 
-	nv_wr32(priv, 0x00262c, engm);
-	nv_mask(priv, 0x002630, engm, 0x00000000);
+	nvkm_wr32(device, 0x00262c, engm);
+	nvkm_mask(device, 0x002630, engm, 0x00000000);
 }
 
 static void
-gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine,
+gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
 		  struct gk104_fifo_chan *chan)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 chid = chan->base.chid;
-	unsigned long flags;
 
-	nv_error(priv, "%s engine fault on channel %d, recovering...\n",
-		       nv_subdev(engine)->name, chid);
+	nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
+		   nvkm_subdev_name[engine->subdev.index], chid);
+	assert_spin_locked(&fifo->base.lock);
 
-	nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
-	chan->state = KILLED;
+	nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
+	list_del_init(&chan->head);
+	chan->killed = true;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
-	priv->mask |= 1ULL << nv_engidx(engine);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	schedule_work(&priv->fault);
-}
-
-static int
-gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
-{
-	struct gk104_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	unsigned long flags;
-	int ret = -EINVAL;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
-
-	bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
-	if (likely(bind)) {
-		if (!mthd || !nv_call(bind->object, mthd, data))
-			ret = 0;
-		nvkm_namedb_put(bind);
-	}
-
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return ret;
+	fifo->mask |= 1ULL << engine->subdev.index;
+	schedule_work(&fifo->fault);
 }
 
 static const struct nvkm_enum
@@ -516,18 +151,16 @@
 };
 
 static void
-gk104_fifo_intr_bind(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_bind(struct gk104_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00252c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00252c);
 	u32 code = intr & 0x000000ff;
-	const struct nvkm_enum *en;
-	char enunk[6] = "";
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gk104_fifo_bind_reason, code);
 
-	en = nvkm_enum_find(gk104_fifo_bind_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
-
-	nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
 }
 
 static const struct nvkm_enum
@@ -537,14 +170,17 @@
 };
 
 static void
-gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
 {
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	struct nvkm_engine *engine;
 	struct gk104_fifo_chan *chan;
+	unsigned long flags;
 	u32 engn;
 
-	for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
-		u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
+	spin_lock_irqsave(&fifo->base.lock, flags);
+	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
+		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
 		u32 busy = (stat & 0x80000000);
 		u32 next = (stat & 0x07ff0000) >> 16;
 		u32 chsw = (stat & 0x00008000);
@@ -555,32 +191,35 @@
 		(void)save;
 
 		if (busy && chsw) {
-			if (!(chan = (void *)priv->base.channel[chid]))
-				continue;
-			if (!(engine = gk104_fifo_engine(priv, engn)))
-				continue;
-			gk104_fifo_recover(priv, engine, chan);
+			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
+				if (chan->base.chid == chid) {
+					engine = gk104_fifo_engine(fifo, engn);
+					if (!engine)
+						break;
+					gk104_fifo_recover(fifo, engine, chan);
+					break;
+				}
+			}
 		}
 	}
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static void
-gk104_fifo_intr_sched(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_sched(struct gk104_fifo *fifo)
 {
-	u32 intr = nv_rd32(priv, 0x00254c);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x00254c);
 	u32 code = intr & 0x000000ff;
-	const struct nvkm_enum *en;
-	char enunk[6] = "";
+	const struct nvkm_enum *en =
+		nvkm_enum_find(gk104_fifo_sched_reason, code);
 
-	en = nvkm_enum_find(gk104_fifo_sched_reason, code);
-	if (!en)
-		snprintf(enunk, sizeof(enunk), "UNK%02x", code);
-
-	nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
+	nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
 
 	switch (code) {
 	case 0x0a:
-		gk104_fifo_intr_sched_ctxsw(priv);
+		gk104_fifo_intr_sched_ctxsw(fifo);
 		break;
 	default:
 		break;
@@ -588,38 +227,42 @@
 }
 
 static void
-gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
 {
-	u32 stat = nv_rd32(priv, 0x00256c);
-	nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
-	nv_wr32(priv, 0x00256c, stat);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00256c);
+	nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
+	nvkm_wr32(device, 0x00256c, stat);
 }
 
 static void
-gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
 {
-	u32 stat = nv_rd32(priv, 0x00259c);
-	nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00259c);
+	nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
 }
 
 static const struct nvkm_enum
 gk104_fifo_fault_engine[] = {
-	{ 0x00, "GR", NULL, NVDEV_ENGINE_GR },
-	{ 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
-	{ 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
-	{ 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
-	{ 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
-	{ 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
-	{ 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
+	{ 0x00, "GR", NULL, NVKM_ENGINE_GR },
+	{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+	{ 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+	{ 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+	{ 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
+	{ 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
+	{ 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
+	{ 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+	{ 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
 	{ 0x13, "PERF" },
-	{ 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
-	{ 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
+	{ 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+	{ 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
+	{ 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
 	{ 0x17, "PMU" },
-	{ 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
-	{ 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
+	{ 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+	{ 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
 	{}
 };
 
@@ -708,80 +351,65 @@
 };
 
 static void
-gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
 {
-	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
-	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
-	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
-	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+	u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+	u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+	u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
 	u32 gpc    = (stat & 0x1f000000) >> 24;
 	u32 client = (stat & 0x00001f00) >> 8;
 	u32 write  = (stat & 0x00000080);
 	u32 hub    = (stat & 0x00000040);
 	u32 reason = (stat & 0x0000000f);
-	struct nvkm_object *engctx = NULL, *object;
-	struct nvkm_engine *engine = NULL;
 	const struct nvkm_enum *er, *eu, *ec;
-	char erunk[6] = "";
-	char euunk[6] = "";
-	char ecunk[6] = "";
-	char gpcid[3] = "";
+	struct nvkm_engine *engine = NULL;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char gpcid[8] = "";
 
 	er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
-	if (!er)
-		snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
-
 	eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
-	if (eu) {
-		switch (eu->data2) {
-		case NVDEV_SUBDEV_BAR:
-			nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_SUBDEV_INSTMEM:
-			nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
-			break;
-		case NVDEV_ENGINE_IFB:
-			nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
-			break;
-		default:
-			engine = nvkm_engine(priv, eu->data2);
-			if (engine)
-				engctx = nvkm_engctx_get(engine, inst);
-			break;
-		}
-	} else {
-		snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
-	}
-
 	if (hub) {
 		ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
 	} else {
 		ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
-		snprintf(gpcid, sizeof(gpcid), "%d", gpc);
+		snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
 	}
 
-	if (!ec)
-		snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
-
-	nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
-		       "channel 0x%010llx [%s]\n", write ? "write" : "read",
-		 (u64)vahi << 32 | valo, er ? er->name : erunk,
-		 eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
-		 ec ? ec->name : ecunk, (u64)inst << 12,
-		 nvkm_client_name(engctx));
-
-	object = engctx;
-	while (object) {
-		switch (nv_mclass(object)) {
-		case KEPLER_CHANNEL_GPFIFO_A:
-		case MAXWELL_CHANNEL_GPFIFO_A:
-			gk104_fifo_recover(priv, engine, (void *)object);
+	if (eu) {
+		switch (eu->data2) {
+		case NVKM_SUBDEV_BAR:
+			nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+			break;
+		case NVKM_SUBDEV_INSTMEM:
+			nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+			break;
+		case NVKM_ENGINE_IFB:
+			nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+			break;
+		default:
+			engine = nvkm_device_engine(device, eu->data2);
 			break;
 		}
-		object = object->parent;
 	}
 
-	nvkm_engctx_put(engctx);
+	chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
+
+	nvkm_error(subdev,
+		   "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
+		   "reason %02x [%s] on channel %d [%010llx %s]\n",
+		   write ? "write" : "read", (u64)vahi << 32 | valo,
+		   unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+		   reason, er ? er->name : "", chan ? chan->chid : -1,
+		   (u64)inst << 12,
+		   chan ? chan->object.client->name : "unknown");
+
+	if (engine && chan)
+		gk104_fifo_recover(fifo, engine, (void *)chan);
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
@@ -819,35 +447,42 @@
 };
 
 static void
-gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
 {
-	u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
-	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
-	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
-	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
+	u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
+	u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
+	u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00003ffc);
 	u32 show = stat;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	char msg[128];
 
 	if (stat & 0x00800000) {
-		if (!gk104_fifo_swmthd(priv, chid, mthd, data))
-			show &= ~0x00800000;
-		nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+		if (device->sw) {
+			if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+				show &= ~0x00800000;
+		}
+		nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
 	}
 
 	if (show) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show);
-		pr_cont("\n");
-		nv_error(priv,
-			 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 unit, chid,
-			 nvkm_client_name_for_fifo_chid(&priv->base, chid),
-			 subc, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+				   "subc %d mthd %04x data %08x\n",
+			   unit, show, msg, chid, chan ? chan->inst->addr : 0,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, mthd, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
 }
 
 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
@@ -860,280 +495,266 @@
 };
 
 static void
-gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit)
+gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
 {
-	u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
-	u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
-	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
+	u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
+	u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
+	char msg[128];
 
 	if (stat) {
-		nv_error(priv, "PBDMA%d:", unit);
-		nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat);
-		pr_cont("\n");
-		nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
-			 nv_rd32(priv, 0x040150 + (unit * 0x2000)),
-			 nv_rd32(priv, 0x040154 + (unit * 0x2000)));
+		nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
+		nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
+			   unit, stat, msg, chid,
+			   nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
+			   nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
 	}
 
-	nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
+	nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
 }
 
 static void
-gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
 {
-	u32 mask = nv_rd32(priv, 0x002a00);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 mask = nvkm_rd32(device, 0x002a00);
 	while (mask) {
 		u32 engn = __ffs(mask);
-		wake_up(&priv->engine[engn].wait);
-		nv_wr32(priv, 0x002a00, 1 << engn);
+		wake_up(&fifo->engine[engn].wait);
+		nvkm_wr32(device, 0x002a00, 1 << engn);
 		mask &= ~(1 << engn);
 	}
 }
 
 static void
-gk104_fifo_intr_engine(struct gk104_fifo_priv *priv)
+gk104_fifo_intr_engine(struct gk104_fifo *fifo)
 {
-	nvkm_fifo_uevent(&priv->base);
+	nvkm_fifo_uevent(&fifo->base);
 }
 
-static void
-gk104_fifo_intr(struct nvkm_subdev *subdev)
+void
+gk104_fifo_intr(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, 0x002140);
-	u32 stat = nv_rd32(priv, 0x002100) & mask;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, 0x002140);
+	u32 stat = nvkm_rd32(device, 0x002100) & mask;
 
 	if (stat & 0x00000001) {
-		gk104_fifo_intr_bind(priv);
-		nv_wr32(priv, 0x002100, 0x00000001);
+		gk104_fifo_intr_bind(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000010) {
-		nv_error(priv, "PIO_ERROR\n");
-		nv_wr32(priv, 0x002100, 0x00000010);
+		nvkm_error(subdev, "PIO_ERROR\n");
+		nvkm_wr32(device, 0x002100, 0x00000010);
 		stat &= ~0x00000010;
 	}
 
 	if (stat & 0x00000100) {
-		gk104_fifo_intr_sched(priv);
-		nv_wr32(priv, 0x002100, 0x00000100);
+		gk104_fifo_intr_sched(fifo);
+		nvkm_wr32(device, 0x002100, 0x00000100);
 		stat &= ~0x00000100;
 	}
 
 	if (stat & 0x00010000) {
-		gk104_fifo_intr_chsw(priv);
-		nv_wr32(priv, 0x002100, 0x00010000);
+		gk104_fifo_intr_chsw(fifo);
+		nvkm_wr32(device, 0x002100, 0x00010000);
 		stat &= ~0x00010000;
 	}
 
 	if (stat & 0x00800000) {
-		nv_error(priv, "FB_FLUSH_TIMEOUT\n");
-		nv_wr32(priv, 0x002100, 0x00800000);
+		nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
+		nvkm_wr32(device, 0x002100, 0x00800000);
 		stat &= ~0x00800000;
 	}
 
 	if (stat & 0x01000000) {
-		nv_error(priv, "LB_ERROR\n");
-		nv_wr32(priv, 0x002100, 0x01000000);
+		nvkm_error(subdev, "LB_ERROR\n");
+		nvkm_wr32(device, 0x002100, 0x01000000);
 		stat &= ~0x01000000;
 	}
 
 	if (stat & 0x08000000) {
-		gk104_fifo_intr_dropped_fault(priv);
-		nv_wr32(priv, 0x002100, 0x08000000);
+		gk104_fifo_intr_dropped_fault(fifo);
+		nvkm_wr32(device, 0x002100, 0x08000000);
 		stat &= ~0x08000000;
 	}
 
 	if (stat & 0x10000000) {
-		u32 mask = nv_rd32(priv, 0x00259c);
+		u32 mask = nvkm_rd32(device, 0x00259c);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gk104_fifo_intr_fault(priv, unit);
-			nv_wr32(priv, 0x00259c, (1 << unit));
+			gk104_fifo_intr_fault(fifo, unit);
+			nvkm_wr32(device, 0x00259c, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x10000000;
 	}
 
 	if (stat & 0x20000000) {
-		u32 mask = nv_rd32(priv, 0x0025a0);
+		u32 mask = nvkm_rd32(device, 0x0025a0);
 		while (mask) {
 			u32 unit = __ffs(mask);
-			gk104_fifo_intr_pbdma_0(priv, unit);
-			gk104_fifo_intr_pbdma_1(priv, unit);
-			nv_wr32(priv, 0x0025a0, (1 << unit));
+			gk104_fifo_intr_pbdma_0(fifo, unit);
+			gk104_fifo_intr_pbdma_1(fifo, unit);
+			nvkm_wr32(device, 0x0025a0, (1 << unit));
 			mask &= ~(1 << unit);
 		}
 		stat &= ~0x20000000;
 	}
 
 	if (stat & 0x40000000) {
-		gk104_fifo_intr_runlist(priv);
+		gk104_fifo_intr_runlist(fifo);
 		stat &= ~0x40000000;
 	}
 
 	if (stat & 0x80000000) {
-		nv_wr32(priv, 0x002100, 0x80000000);
-		gk104_fifo_intr_engine(priv);
+		nvkm_wr32(device, 0x002100, 0x80000000);
+		gk104_fifo_intr_engine(fifo);
 		stat &= ~0x80000000;
 	}
 
 	if (stat) {
-		nv_error(priv, "INTR 0x%08x\n", stat);
-		nv_mask(priv, 0x002140, stat, 0x00000000);
-		nv_wr32(priv, 0x002100, stat);
+		nvkm_error(subdev, "INTR %08x\n", stat);
+		nvkm_mask(device, 0x002140, stat, 0x00000000);
+		nvkm_wr32(device, 0x002100, stat);
 	}
 }
 
-static void
-gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
+void
+gk104_fifo_fini(struct nvkm_fifo *base)
 {
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
-}
-
-static void
-gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
-{
-	struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
-	nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
-}
-
-static const struct nvkm_event_func
-gk104_fifo_uevent_func = {
-	.ctor = nvkm_fifo_uevent_ctor,
-	.init = gk104_fifo_uevent_init,
-	.fini = gk104_fifo_uevent_fini,
-};
-
-int
-gk104_fifo_fini(struct nvkm_object *object, bool suspend)
-{
-	struct gk104_fifo_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fifo_fini(&priv->base, suspend);
-	if (ret)
-		return ret;
-
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	flush_work(&fifo->fault);
 	/* allow mmu fault interrupts, even when we're not using fifo */
-	nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
-	return 0;
+	nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
 }
 
 int
-gk104_fifo_init(struct nvkm_object *object)
+gk104_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)object;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret, i;
 
-	ret = nvkm_fifo_init(&priv->base);
+	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x8000, 0x1000, false,
+				      &fifo->engine[i].runlist[0]);
+		if (ret)
+			return ret;
+
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x8000, 0x1000, false,
+				      &fifo->engine[i].runlist[1]);
+		if (ret)
+			return ret;
+
+		init_waitqueue_head(&fifo->engine[i].wait);
+		INIT_LIST_HEAD(&fifo->engine[i].chan);
+	}
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      fifo->base.nr * 0x200, 0x1000, true,
+			      &fifo->user.mem);
 	if (ret)
 		return ret;
 
-	/* enable all available PBDMA units */
-	nv_wr32(priv, 0x000204, 0xffffffff);
-	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
-	nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
+	ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
+			    &fifo->user.bar);
+	if (ret)
+		return ret;
 
-	/* PBDMA[n] */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
-		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
-	}
-
-	/* PBDMA[n].HCE */
-	for (i = 0; i < priv->spoon_nr; i++) {
-		nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
-		nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
-	}
-
-	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
-
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0x7fffffff);
+	nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
 	return 0;
 }
 
 void
-gk104_fifo_dtor(struct nvkm_object *object)
+gk104_fifo_init(struct nvkm_fifo *base)
 {
-	struct gk104_fifo_priv *priv = (void *)object;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int i;
 
-	nvkm_gpuobj_unmap(&priv->user.bar);
-	nvkm_gpuobj_ref(NULL, &priv->user.mem);
+	/* enable all available PBDMA units */
+	nvkm_wr32(device, 0x000204, 0xffffffff);
+	fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
+	nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
 
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
-		nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
+	/* PBDMA[n] */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
 	}
 
-	nvkm_fifo_destroy(&priv->base);
+	/* PBDMA[n].HCE */
+	for (i = 0; i < fifo->spoon_nr; i++) {
+		nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
+		nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
+	}
+
+	nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0x7fffffff);
+}
+
+void *
+gk104_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	int i;
+
+	nvkm_vm_put(&fifo->user.bar);
+	nvkm_memory_del(&fifo->user.mem);
+
+	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		nvkm_memory_del(&fifo->engine[i].runlist[1]);
+		nvkm_memory_del(&fifo->engine[i].runlist[0]);
+	}
+
+	return fifo;
 }
 
 int
-gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+		int index, int nr, struct nvkm_fifo **pfifo)
 {
-	struct gk104_fifo_impl *impl = (void *)oclass;
-	struct gk104_fifo_priv *priv;
-	int ret, i;
+	struct gk104_fifo *fifo;
 
-	ret = nvkm_fifo_create(parent, engine, oclass, 0,
-			       impl->channels - 1, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
+	*pfifo = &fifo->base;
 
-	INIT_WORK(&priv->fault, gk104_fifo_recover_work);
-
-	for (i = 0; i < FIFO_ENGINE_NR; i++) {
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-				      0, &priv->engine[i].runlist[0]);
-		if (ret)
-			return ret;
-
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
-				      0, &priv->engine[i].runlist[1]);
-		if (ret)
-			return ret;
-
-		init_waitqueue_head(&priv->engine[i].wait);
-	}
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
-			      &priv->user.bar);
-	if (ret)
-		return ret;
-
-	ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = gk104_fifo_intr;
-	nv_engine(priv)->cclass = &gk104_fifo_cclass;
-	nv_engine(priv)->sclass = gk104_fifo_sclass;
-	return 0;
+	return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
 }
 
-struct nvkm_oclass *
-gk104_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk104_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 4096,
-}.base;
+};
+
+int
+gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 318d30d..5afd9b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,18 +1,77 @@
-#ifndef __NVKM_FIFO_NVE0_H__
-#define __NVKM_FIFO_NVE0_H__
-#include <engine/fifo.h>
+#ifndef __GK104_FIFO_H__
+#define __GK104_FIFO_H__
+#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
+#include "priv.h"
 
-int  gk104_fifo_ctor(struct nvkm_object *, struct nvkm_object *,
-		    struct nvkm_oclass *, void *, u32,
-		    struct nvkm_object **);
-void gk104_fifo_dtor(struct nvkm_object *);
-int  gk104_fifo_init(struct nvkm_object *);
-int  gk104_fifo_fini(struct nvkm_object *, bool);
+#include <subdev/mmu.h>
 
-struct gk104_fifo_impl {
-	struct nvkm_oclass base;
-	u32 channels;
+struct gk104_fifo_engn {
+	struct nvkm_memory *runlist[2];
+	int cur_runlist;
+	wait_queue_head_t wait;
+	struct list_head chan;
 };
 
-extern struct nvkm_ofuncs gk104_fifo_chan_ofuncs;
+struct gk104_fifo {
+	struct nvkm_fifo base;
+
+	struct work_struct fault;
+	u64 mask;
+
+	struct gk104_fifo_engn engine[7];
+	struct {
+		struct nvkm_memory *mem;
+		struct nvkm_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		    int index, int nr, struct nvkm_fifo **);
+void *gk104_fifo_dtor(struct nvkm_fifo *);
+int gk104_fifo_oneinit(struct nvkm_fifo *);
+void gk104_fifo_init(struct nvkm_fifo *);
+void gk104_fifo_fini(struct nvkm_fifo *);
+void gk104_fifo_intr(struct nvkm_fifo *);
+void gk104_fifo_uevent_init(struct nvkm_fifo *);
+void gk104_fifo_uevent_fini(struct nvkm_fifo *);
+void gk104_fifo_runlist_update(struct gk104_fifo *, u32 engine);
+
+static inline u64
+gk104_fifo_engine_subdev(int engine)
+{
+	switch (engine) {
+	case 0: return (1ULL << NVKM_ENGINE_GR) |
+		       (1ULL << NVKM_ENGINE_SW) |
+		       (1ULL << NVKM_ENGINE_CE2);
+	case 1: return (1ULL << NVKM_ENGINE_MSPDEC);
+	case 2: return (1ULL << NVKM_ENGINE_MSPPP);
+	case 3: return (1ULL << NVKM_ENGINE_MSVLD);
+	case 4: return (1ULL << NVKM_ENGINE_CE0);
+	case 5: return (1ULL << NVKM_ENGINE_CE1);
+	case 6: return (1ULL << NVKM_ENGINE_MSENC);
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static inline int
+gk104_fifo_subdev_engine(int subdev)
+{
+	switch (subdev) {
+	case NVKM_ENGINE_GR:
+	case NVKM_ENGINE_SW:
+	case NVKM_ENGINE_CE2   : return 0;
+	case NVKM_ENGINE_MSPDEC: return 1;
+	case NVKM_ENGINE_MSPPP : return 2;
+	case NVKM_ENGINE_MSVLD : return 3;
+	case NVKM_ENGINE_CE0   : return 4;
+	case NVKM_ENGINE_CE1   : return 5;
+	case NVKM_ENGINE_MSENC : return 6;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 9270922..ce01c1a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -22,15 +22,25 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
-struct nvkm_oclass *
-gk208_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+gk208_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 1024,
-}.base;
+};
+
+int
+gk208_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk208_fifo, device, index, 1024, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b30dc87..b47fe98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -20,15 +20,25 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "gk104.h"
+#include "changk104.h"
 
-struct nvkm_oclass *
-gk20a_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = gk104_fifo_fini,
+static const struct nvkm_fifo_func
+gk20a_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gk104_fifo_gpfifo_oclass,
+		NULL
 	},
-	.channels = 128,
-}.base;
+};
+
+int
+gk20a_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gk20a_fifo, device, index, 128, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
index 749d525..2db629f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c
@@ -22,36 +22,25 @@
  * Authors: Ben Skeggs
  */
 #include "gk104.h"
+#include "changk104.h"
 
-#include <nvif/class.h>
-
-static struct nvkm_oclass
-gm204_fifo_sclass[] = {
-	{ MAXWELL_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
-	{}
+static const struct nvkm_fifo_func
+gm204_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gm204_fifo_gpfifo_oclass,
+		NULL
+	},
 };
 
-static int
-gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+gm204_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject);
-	if (ret == 0) {
-		struct gk104_fifo_priv *priv = (void *)*pobject;
-		nv_engine(priv)->sclass = gm204_fifo_sclass;
-	}
-	return ret;
+	return gk104_fifo_new_(&gm204_fifo, device, index, 4096, pfifo);
 }
-
-struct nvkm_oclass *
-gm204_fifo_oclass = &(struct gk104_fifo_impl) {
-	.base.handle = NV_ENGINE(FIFO, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_fifo_ctor,
-		.dtor = gk104_fifo_dtor,
-		.init = gk104_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-	.channels = 4096,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
new file mode 100644
index 0000000..ae6375d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_fifo_func
+gm20b_fifo = {
+	.dtor = gk104_fifo_dtor,
+	.oneinit = gk104_fifo_oneinit,
+	.init = gk104_fifo_init,
+	.fini = gk104_fifo_fini,
+	.intr = gk104_fifo_intr,
+	.uevent_init = gk104_fifo_uevent_init,
+	.uevent_fini = gk104_fifo_uevent_fini,
+	.chan = {
+		&gm204_fifo_gpfifo_oclass,
+		NULL
+	},
+};
+
+int
+gm20b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return gk104_fifo_new_(&gm20b_fifo, device, index, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
new file mode 100644
index 0000000..82013236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		    void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "pushbuf %llx ioffset %016llx "
+				   "ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.ioffset, args->v0.ilength);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				 oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+	nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+	nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12);
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+g84_fifo_gpfifo_oclass = {
+	.base.oclass = G82_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = g84_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
new file mode 100644
index 0000000..e7cbc13
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changf100.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static u32
+gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_SW    : return 0;
+	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_CE0   : return 0x0230;
+	case NVKM_ENGINE_CE1   : return 0x0240;
+	case NVKM_ENGINE_MSPDEC: return 0x0250;
+	case NVKM_ENGINE_MSPPP : return 0x0260;
+	case NVKM_ENGINE_MSVLD : return 0x0270;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret = 0;
+
+	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x002634) == chan->base.chid)
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+			   chan->base.chid, chan->base.object.client->name);
+		ret = -EBUSY;
+		if (suspend)
+			return ret;
+	}
+
+	if (offset) {
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, 0x00000000);
+		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_done(inst);
+	}
+
+	return ret;
+}
+
+static int
+gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+
+	if (offset) {
+		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_done(inst);
+	}
+
+	return 0;
+}
+
+static void
+gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine,
+			      struct nvkm_object *object)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	int engn = engine->subdev.index;
+	int ret;
+
+	if (!gf100_fifo_gpfifo_engine_addr(engine))
+		return 0;
+
+	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	if (ret)
+		return ret;
+
+	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gf100_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 coff = chan->base.chid * 8;
+
+	if (!list_empty(&chan->head) && !chan->killed) {
+		list_del_init(&chan->head);
+		nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
+		gf100_fifo_runlist_update(fifo);
+	}
+
+	gf100_fifo_intr_engine(fifo);
+
+	nvkm_wr32(device, 0x003000 + coff, 0x00000000);
+}
+
+static void
+gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	struct gf100_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 addr = chan->base.inst->addr >> 12;
+	u32 coff = chan->base.chid * 8;
+
+	nvkm_wr32(device, 0x003000 + coff, 0xc0000000 | addr);
+
+	if (list_empty(&chan->head) && !chan->killed) {
+		list_add_tail(&chan->head, &fifo->chan);
+		nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
+		gf100_fifo_runlist_update(fifo);
+	}
+}
+
+static void *
+gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+	struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_gpuobj_del(&chan->pgd);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gf100_fifo_gpfifo_func = {
+	.dtor = gf100_fifo_gpfifo_dtor,
+	.init = gf100_fifo_gpfifo_init,
+	.fini = gf100_fifo_gpfifo_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
+	.engine_init = gf100_fifo_gpfifo_engine_init,
+	.engine_fini = gf100_fifo_gpfifo_engine_fini,
+};
+
+static int
+gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	union {
+		struct fermi_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct gf100_fifo *fifo = gf100_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_object *parent = oclass->parent;
+	struct gf100_fifo_chan *chan;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength);
+	} else
+		return ret;
+
+	/* allocate channel */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  (1ULL << NVKM_ENGINE_CE0) |
+				  (1ULL << NVKM_ENGINE_CE1) |
+				  (1ULL << NVKM_ENGINE_GR) |
+				  (1ULL << NVKM_ENGINE_MSPDEC) |
+				  (1ULL << NVKM_ENGINE_MSPPP) |
+				  (1ULL << NVKM_ENGINE_MSVLD) |
+				  (1ULL << NVKM_ENGINE_SW),
+				  1, fifo->user.bar.offset, 0x1000,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	/* page directory */
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+	nvkm_done(chan->base.inst);
+
+	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	if (ret)
+		return ret;
+
+	/* clear channel control registers */
+
+	usermem = chan->base.chid * 0x1000;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+					 (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x54, 0x00000002);
+	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+	nvkm_wo32(chan->base.inst, 0xa4, 0x1f1f1f1f);
+	nvkm_wo32(chan->base.inst, 0xa8, 0x1f1f1f1f);
+	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+	nvkm_done(chan->base.inst);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gf100_fifo_gpfifo_oclass = {
+	.base.oclass = FERMI_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gf100_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
new file mode 100644
index 0000000..0b81754
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <subdev/fb.h>
+#include <subdev/mmu.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
+{
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_client *client = chan->base.object.client;
+
+	nvkm_wr32(device, 0x002634, chan->base.chid);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "channel %d [%s] kick timeout\n",
+			   chan->base.chid, client->name);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static u32
+gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
+{
+	switch (engine->subdev.index) {
+	case NVKM_ENGINE_SW    :
+	case NVKM_ENGINE_CE0   :
+	case NVKM_ENGINE_CE1   :
+	case NVKM_ENGINE_CE2   : return 0x0000;
+	case NVKM_ENGINE_GR    : return 0x0210;
+	case NVKM_ENGINE_MSPDEC: return 0x0250;
+	case NVKM_ENGINE_MSPPP : return 0x0260;
+	case NVKM_ENGINE_MSVLD : return 0x0270;
+	default:
+		WARN_ON(1);
+		return 0;
+	}
+}
+
+static int
+gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine, bool suspend)
+{
+	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+	int ret;
+
+	ret = gk104_fifo_gpfifo_kick(chan);
+	if (ret && suspend)
+		return ret;
+
+	if (offset) {
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, 0x00000000);
+		nvkm_wo32(inst, offset + 0x04, 0x00000000);
+		nvkm_done(inst);
+	}
+
+	return ret;
+}
+
+static int
+gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	const u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct nvkm_gpuobj *inst = chan->base.inst;
+
+	if (offset) {
+		u64 addr = chan->engn[engine->subdev.index].vma.offset;
+		nvkm_kmap(inst);
+		nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
+		nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
+		nvkm_done(inst);
+	}
+
+	return 0;
+}
+
+static void
+gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+	nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
+}
+
+static int
+gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
+			      struct nvkm_engine *engine,
+			      struct nvkm_object *object)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	int engn = engine->subdev.index;
+	int ret;
+
+	if (!gk104_fifo_gpfifo_engine_addr(engine))
+		return 0;
+
+	ret = nvkm_object_bind(object, NULL, 0, &chan->engn[engn].inst);
+	if (ret)
+		return ret;
+
+	return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
+			       NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+}
+
+static void
+gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 coff = chan->base.chid * 8;
+
+	if (!list_empty(&chan->head)) {
+		list_del_init(&chan->head);
+		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
+		gk104_fifo_runlist_update(fifo, chan->engine);
+	}
+
+	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
+}
+
+static void
+gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	struct gk104_fifo *fifo = chan->fifo;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	u32 addr = chan->base.inst->addr >> 12;
+	u32 coff = chan->base.chid * 8;
+
+	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
+	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
+
+	if (list_empty(&chan->head) && !chan->killed) {
+		list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
+		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+		gk104_fifo_runlist_update(fifo, chan->engine);
+		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
+	}
+}
+
+static void *
+gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
+{
+	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+	nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
+	nvkm_gpuobj_del(&chan->pgd);
+	return chan;
+}
+
+static const struct nvkm_fifo_chan_func
+gk104_fifo_gpfifo_func = {
+	.dtor = gk104_fifo_gpfifo_dtor,
+	.init = gk104_fifo_gpfifo_init,
+	.fini = gk104_fifo_gpfifo_fini,
+	.ntfy = g84_fifo_chan_ntfy,
+	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+	.engine_init = gk104_fifo_gpfifo_engine_init,
+	.engine_fini = gk104_fifo_gpfifo_engine_fini,
+};
+
+int
+gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		      void *data, u32 size, struct nvkm_object **pobject)
+{
+	union {
+		struct kepler_channel_gpfifo_a_v0 v0;
+	} *args = data;
+	struct gk104_fifo *fifo = gk104_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_object *parent = oclass->parent;
+	struct gk104_fifo_chan *chan;
+	u64 usermem, ioffset, ilength;
+	u32 engines;
+	int ret, i;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "ioffset %016llx ilength %08x engine %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.ioffset,
+			   args->v0.ilength, args->v0.engine);
+	} else
+		return ret;
+
+	/* determine which downstream engines are present */
+	for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
+		u64 subdevs = gk104_fifo_engine_subdev(i);
+		if (!nvkm_device_engine(device, __ffs64(subdevs)))
+			continue;
+		engines |= (1 << i);
+	}
+
+	/* if this is an engine mask query, we're done */
+	if (!args->v0.engine) {
+		args->v0.engine = engines;
+		return nvkm_object_new(oclass, NULL, 0, pobject);
+	}
+
+	/* check that we support a requested engine - note that the user
+	 * argument is a mask in order to allow the user to request (for
+	 * example) *any* copy engine, but doesn't matter which.
+	 */
+	args->v0.engine &= engines;
+	if (!args->v0.engine) {
+		nvif_ioctl(parent, "no supported engine\n");
+		return -ENODEV;
+	}
+
+	/* allocate the channel */
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+	chan->fifo = fifo;
+	chan->engine = __ffs(args->v0.engine);
+	INIT_LIST_HEAD(&chan->head);
+
+	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
+				  0x1000, 0x1000, true, args->v0.vm, 0,
+				  gk104_fifo_engine_subdev(chan->engine),
+				  1, fifo->user.bar.offset, 0x200,
+				  oclass, &chan->base);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+
+	/* page directory */
+	ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
+	if (ret)
+		return ret;
+
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
+	nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
+	nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
+	nvkm_done(chan->base.inst);
+
+	ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+	if (ret)
+		return ret;
+
+	/* clear channel control registers */
+	usermem = chan->base.chid * 0x200;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(fifo->user.mem);
+	for (i = 0; i < 0x200; i += 4)
+		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
+	nvkm_done(fifo->user.mem);
+	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+
+	/* RAMFC */
+	nvkm_kmap(chan->base.inst);
+	nvkm_wo32(chan->base.inst, 0x08, lower_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x0c, upper_32_bits(usermem));
+	nvkm_wo32(chan->base.inst, 0x10, 0x0000face);
+	nvkm_wo32(chan->base.inst, 0x30, 0xfffff902);
+	nvkm_wo32(chan->base.inst, 0x48, lower_32_bits(ioffset));
+	nvkm_wo32(chan->base.inst, 0x4c, upper_32_bits(ioffset) |
+					 (ilength << 16));
+	nvkm_wo32(chan->base.inst, 0x84, 0x20400000);
+	nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
+	nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
+	nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+	nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
+	nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
+	nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
+	nvkm_wo32(chan->base.inst, 0xfc, 0x10000010); /* 0x002350 */
+	nvkm_done(chan->base.inst);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+gk104_fifo_gpfifo_oclass = {
+	.base.oclass = KEPLER_CHANNEL_GPFIFO_A,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
new file mode 100644
index 0000000..6511d6e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogm204.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gm204_fifo_gpfifo_oclass = {
+	.base.oclass = MAXWELL_CHANNEL_GPFIFO_A,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
new file mode 100644
index 0000000..a8c69f8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "channv50.h"
+
+#include <core/client.h>
+#include <core/ramht.h>
+
+#include <nvif/class.h>
+#include <nvif/unpack.h>
+
+static int
+nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
+		     void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_object *parent = oclass->parent;
+	union {
+		struct nv50_channel_gpfifo_v0 v0;
+	} *args = data;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+				   "pushbuf %llx ioffset %016llx "
+				   "ilength %08x\n",
+			   args->v0.version, args->v0.vm, args->v0.pushbuf,
+			   args->v0.ioffset, args->v0.ilength);
+		if (!args->v0.pushbuf)
+			return -EINVAL;
+	} else
+		return ret;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+				  oclass, chan);
+	if (ret)
+		return ret;
+
+	args->v0.chid = chan->base.chid;
+	ioffset = args->v0.ioffset;
+	ilength = order_base_2(args->v0.ilength / 8);
+
+	nvkm_kmap(chan->ramfc);
+	nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+	nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+	nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4);
+	nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(ioffset));
+	nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+	nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+	nvkm_wo32(chan->ramfc, 0x7c, 0x30000001);
+	nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				     (4 << 24) /* SEARCH_FULL */ |
+				     (chan->ramht->gpuobj->node->offset >> 4));
+	nvkm_done(chan->ramfc);
+	return 0;
+}
+
+const struct nvkm_fifo_chan_oclass
+nv50_fifo_gpfifo_oclass = {
+	.base.oclass = NV50_CHANNEL_GPFIFO,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index 043e429..ad707ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -22,20 +22,17 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
-#include <core/handle.h>
 #include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 #include <subdev/timer.h>
+#include <engine/sw.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv04_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv04_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -47,268 +44,19 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-int
-nv04_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	u32 context, chid = chan->base.chid;
-	int ret;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->addr >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW:
-		context |= 0x00000000;
-		break;
-	case NVDEV_ENGINE_GR:
-		context |= 0x00010000;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		context |= 0x00020000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	context |= 0x80000000; /* valid */
-	context |= chid << 24;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
 void
-nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
+nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
+__acquires(fifo->base.lock)
 {
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nvkm_ramht_remove(priv->ramht, cookie);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-int
-nv04_fifo_context_attach(struct nvkm_object *parent,
-			 struct nvkm_object *object)
-{
-	nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
-	return 0;
-}
-
-static int
-nv04_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 32;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x10,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
-}
-
-void
-nv04_fifo_chan_dtor(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	struct ramfc_desc *c = priv->ramfc_desc;
-
-	do {
-		nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
-	} while ((++c)->bits);
-
-	nvkm_fifo_channel_destroy(&chan->base);
-}
-
-int
-nv04_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	u32 mask = 1 << chan->base.chid;
-	unsigned long flags;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-int
-nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv04_fifo_priv *priv = (void *)object->engine;
-	struct nv04_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *fctx = priv->ramfc;
-	struct ramfc_desc *c;
-	unsigned long flags;
-	u32 data = chan->ramfc;
-	u32 chid;
-
-	/* prevent fifo context switches */
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
-
-	/* if this channel is active, replace it with a null context */
-	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
-	if (chid == chan->base.chid) {
-		nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
-		nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
-
-		c = priv->ramfc_desc;
-		do {
-			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
-			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
-			u32 rv = (nv_rd32(priv, c->regp) &  rm) >> c->regs;
-			u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
-			nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
-		} while ((++c)->bits);
-
-		c = priv->ramfc_desc;
-		do {
-			nv_wr32(priv, c->regp, 0x00000000);
-		} while ((++c)->bits);
-
-		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	}
-
-	/* restore normal operation, after disabling dma mode */
-	nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv04_fifo_ofuncs = {
-	.ctor = nv04_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv04_fifo_sclass[] = {
-	{ NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-int
-nv04_fifo_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv04_fifo_base *base;
-	int ret;
-
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-nv04_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-void
-nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__acquires(priv->base.lock)
-{
-	struct nv04_fifo_priv *priv = (void *)pfifo;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->base.lock, flags);
+	spin_lock_irqsave(&fifo->base.lock, flags);
 	*pflags = flags;
 
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
-	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
+	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
 
 	/* in some cases the puller may be left in an inconsistent state
 	 * if you try to stop it while it's busy translating handles.
@@ -319,28 +67,31 @@
 	 * to avoid this, we invalidate the most recently calculated
 	 * instance.
 	 */
-	if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
-			   NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
-		nv_warn(priv, "timeout idling puller\n");
+	nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+		if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+			break;
+	);
 
-	if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+	if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
 			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
-		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
 }
 
 void
-nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags)
-__releases(priv->base.lock)
+nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
+__releases(fifo->base.lock)
 {
-	struct nv04_fifo_priv *priv = (void *)pfifo;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	unsigned long flags = *pflags;
 
-	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+	nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
 
-	spin_unlock_irqrestore(&priv->base.lock, flags);
+	spin_unlock_irqrestore(&fifo->base.lock, flags);
 }
 
 static const char *
@@ -354,61 +105,40 @@
 }
 
 static bool
-nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
 {
-	struct nv04_fifo_chan *chan = NULL;
-	struct nvkm_handle *bind;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
+	struct nvkm_sw *sw = device->sw;
+	const int subc = (addr & 0x0000e000) >> 13;
+	const int mthd = (addr & 0x00001ffc);
+	const u32 mask = 0x0000000f << (subc * 4);
+	u32 engine = nvkm_rd32(device, 0x003280);
 	bool handled = false;
-	unsigned long flags;
-	u32 engine;
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	if (likely(chid >= priv->base.min && chid <= priv->base.max))
-		chan = (void *)priv->base.channel[chid];
-	if (unlikely(!chan))
-		goto out;
 
 	switch (mthd) {
-	case 0x0000:
-		bind = nvkm_namedb_get(nv_namedb(chan), data);
-		if (unlikely(!bind))
-			break;
-
-		if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
-			engine = 0x0000000f << (subc * 4);
-			chan->subc[subc] = data;
-			handled = true;
-
-			nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
-		}
-
-		nvkm_namedb_put(bind);
+	case 0x0000 ... 0x0000: /* subchannel's engine -> software */
+		nvkm_wr32(device, 0x003280, (engine &= ~mask));
+	case 0x0180 ... 0x01fc: /* handle -> instance */
+		data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+	case 0x0100 ... 0x017c:
+	case 0x0200 ... 0x1ffc: /* pass method down to sw */
+		if (!(engine & mask) && sw)
+			handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
 		break;
 	default:
-		engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
-		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
-			break;
-
-		bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
-		if (likely(bind)) {
-			if (!nv_call(bind->object, mthd, data))
-				handled = true;
-			nvkm_namedb_put(bind);
-		}
 		break;
 	}
 
-out:
-	spin_unlock_irqrestore(&priv->base.lock, flags);
 	return handled;
 }
 
 static void
-nv04_fifo_cache_error(struct nvkm_device *device,
-		      struct nv04_fifo_priv *priv, u32 chid, u32 get)
+nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
 {
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u32 pull0 = nvkm_rd32(device, 0x003250);
 	u32 mthd, data;
 	int ptr;
 
@@ -420,216 +150,214 @@
 	ptr = (get & 0x7ff) >> 2;
 
 	if (device->card_type < NV_40) {
-		mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
-		data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+		mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
+		data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
 	} else {
-		mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
-		data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+		mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
+		data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
 	}
 
-	if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
-		const char *client_name =
-			nvkm_client_name_for_fifo_chid(&priv->base, chid);
-		nv_error(priv,
-			 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
-			 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
-			 data);
+	if (!(pull0 & 0x00000100) ||
+	    !nv04_fifo_swmthd(device, chid, mthd, data)) {
+		chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+		nvkm_error(subdev, "CACHE_ERROR - "
+			   "ch %d [%s] subc %d mthd %04x data %08x\n",
+			   chid, chan ? chan->object.client->name : "unknown",
+			   (mthd >> 13) & 7, mthd & 0x1ffc, data);
+		nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 	}
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-	nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
-		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-	nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
-		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+		nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
 
-	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
-		nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
+		nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
 }
 
 static void
-nv04_fifo_dma_pusher(struct nvkm_device *device,
-		     struct nv04_fifo_priv *priv, u32 chid)
+nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
 {
-	const char *client_name;
-	u32 dma_get = nv_rd32(priv, 0x003244);
-	u32 dma_put = nv_rd32(priv, 0x003240);
-	u32 push = nv_rd32(priv, 0x003220);
-	u32 state = nv_rd32(priv, 0x003228);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 dma_get = nvkm_rd32(device, 0x003244);
+	u32 dma_put = nvkm_rd32(device, 0x003240);
+	u32 push = nvkm_rd32(device, 0x003220);
+	u32 state = nvkm_rd32(device, 0x003228);
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	const char *name;
 
-	client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid);
-
+	chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
+	name = chan ? chan->object.client->name : "unknown";
 	if (device->card_type == NV_50) {
-		u32 ho_get = nv_rd32(priv, 0x003328);
-		u32 ho_put = nv_rd32(priv, 0x003320);
-		u32 ib_get = nv_rd32(priv, 0x003334);
-		u32 ib_put = nv_rd32(priv, 0x003330);
+		u32 ho_get = nvkm_rd32(device, 0x003328);
+		u32 ho_put = nvkm_rd32(device, 0x003320);
+		u32 ib_get = nvkm_rd32(device, 0x003334);
+		u32 ib_put = nvkm_rd32(device, 0x003330);
 
-		nv_error(priv,
-			 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
-			 chid, client_name, ho_get, dma_get, ho_put, dma_put,
-			 ib_get, ib_put, state, nv_dma_state_err(state), push);
+		nvkm_error(subdev, "DMA_PUSHER - "
+			   "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
+			   "ib_put %08x state %08x (err: %s) push %08x\n",
+			   chid, name, ho_get, dma_get, ho_put, dma_put,
+			   ib_get, ib_put, state, nv_dma_state_err(state),
+			   push);
 
 		/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-		nv_wr32(priv, 0x003364, 0x00000000);
+		nvkm_wr32(device, 0x003364, 0x00000000);
 		if (dma_get != dma_put || ho_get != ho_put) {
-			nv_wr32(priv, 0x003244, dma_put);
-			nv_wr32(priv, 0x003328, ho_put);
+			nvkm_wr32(device, 0x003244, dma_put);
+			nvkm_wr32(device, 0x003328, ho_put);
 		} else
 		if (ib_get != ib_put)
-			nv_wr32(priv, 0x003334, ib_put);
+			nvkm_wr32(device, 0x003334, ib_put);
 	} else {
-		nv_error(priv,
-			 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
-			 chid, client_name, dma_get, dma_put, state,
-			 nv_dma_state_err(state), push);
+		nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
+				   "state %08x (err: %s) push %08x\n",
+			   chid, name, dma_get, dma_put, state,
+			   nv_dma_state_err(state), push);
 
 		if (dma_get != dma_put)
-			nv_wr32(priv, 0x003244, dma_put);
+			nvkm_wr32(device, 0x003244, dma_put);
 	}
+	nvkm_fifo_chan_put(&fifo->base, flags, &chan);
 
-	nv_wr32(priv, 0x003228, 0x00000000);
-	nv_wr32(priv, 0x003220, 0x00000001);
-	nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+	nvkm_wr32(device, 0x003228, 0x00000000);
+	nvkm_wr32(device, 0x003220, 0x00000001);
+	nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
 }
 
 void
-nv04_fifo_intr(struct nvkm_subdev *subdev)
+nv04_fifo_intr(struct nvkm_fifo *base)
 {
-	struct nvkm_device *device = nv_device(subdev);
-	struct nv04_fifo_priv *priv = (void *)subdev;
-	u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0);
-	u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
+	u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
 	u32 reassign, chid, get, sem;
 
-	reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
-	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+	reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
 
-	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
-	get  = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+	chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & (fifo->base.nr - 1);
+	get  = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
 
 	if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
-		nv04_fifo_cache_error(device, priv, chid, get);
+		nv04_fifo_cache_error(fifo, chid, get);
 		stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
 	}
 
 	if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
-		nv04_fifo_dma_pusher(device, priv, chid);
+		nv04_fifo_dma_pusher(fifo, chid);
 		stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
 	}
 
 	if (stat & NV_PFIFO_INTR_SEMAPHORE) {
 		stat &= ~NV_PFIFO_INTR_SEMAPHORE;
-		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
 
-		sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
-		nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+		sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
+		nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
 
-		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
-		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+		nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+		nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
 	}
 
 	if (device->card_type == NV_50) {
 		if (stat & 0x00000010) {
 			stat &= ~0x00000010;
-			nv_wr32(priv, 0x002100, 0x00000010);
+			nvkm_wr32(device, 0x002100, 0x00000010);
 		}
 
 		if (stat & 0x40000000) {
-			nv_wr32(priv, 0x002100, 0x40000000);
-			nvkm_fifo_uevent(&priv->base);
+			nvkm_wr32(device, 0x002100, 0x40000000);
+			nvkm_fifo_uevent(&fifo->base);
 			stat &= ~0x40000000;
 		}
 	}
 
 	if (stat) {
-		nv_warn(priv, "unknown intr 0x%08x\n", stat);
-		nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
-		nv_wr32(priv, NV03_PFIFO_INTR_0, stat);
+		nvkm_warn(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+		nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
 	}
 
-	nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
-}
-
-static int
-nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv04_fifo_cclass;
-	nv_engine(priv)->sclass = nv04_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv04_ramfc;
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
 }
 
 void
-nv04_fifo_dtor(struct nvkm_object *object)
+nv04_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv04_fifo_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ramfc);
-	nvkm_gpuobj_ref(NULL, &priv->ramro);
-	nvkm_ramht_ref(NULL, &priv->ramht);
-	nvkm_fifo_destroy(&priv->base);
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
+
+	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
+
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
 int
-nv04_fifo_init(struct nvkm_object *object)
+nv04_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, int nr, const struct nv04_fifo_ramfc *ramfc,
+	       struct nvkm_fifo **pfifo)
 {
-	struct nv04_fifo_priv *priv = (void *)object;
+	struct nv04_fifo *fifo;
 	int ret;
 
-	ret = nvkm_fifo_init(&priv->base);
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	fifo->ramfc = ramfc;
+	*pfifo = &fifo->base;
+
+	ret = nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	set_bit(nr - 1, fifo->base.mask); /* inactive channel */
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv04_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv04_fifo = {
+	.init = nv04_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv04_fifo_dma_oclass,
+		NULL
 	},
 };
+
+int
+nv04_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv04_fifo_new_(&nv04_fifo, device, index, 16,
+			      nv04_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index e0e0c47..03f6000 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,137 +1,9 @@
 #ifndef __NV04_FIFO_H__
 #define __NV04_FIFO_H__
-#include <engine/fifo.h>
+#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
+#include "priv.h"
 
-#define NV04_PFIFO_DELAY_0                                 0x00002040
-#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
-#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
-#define NV03_PFIFO_INTR_0                                  0x00002100
-#define NV03_PFIFO_INTR_EN_0                               0x00002140
-#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
-#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
-#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
-#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
-#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
-#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
-#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
-#define NV03_PFIFO_RAMHT                                   0x00002210
-#define NV03_PFIFO_RAMFC                                   0x00002214
-#define NV03_PFIFO_RAMRO                                   0x00002218
-#define NV40_PFIFO_RAMFC                                   0x00002220
-#define NV03_PFIFO_CACHES                                  0x00002500
-#define NV04_PFIFO_MODE                                    0x00002504
-#define NV04_PFIFO_DMA                                     0x00002508
-#define NV04_PFIFO_SIZE                                    0x0000250c
-#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
-#define NV50_PFIFO_CTX_TABLE__SIZE                                128
-#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
-#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
-#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
-#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
-#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
-#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
-#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
-#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
-#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
-#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
-#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
-#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
-#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
-#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
-#define NV03_PFIFO_CACHE1_PUT                              0x00003210
-#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
-#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
-#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
-#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
-#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
-#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
-#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
-#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
-#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
-#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
-#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
-#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
-#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
-#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
-#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
-#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
-#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
-#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
-#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
-#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
-#define NV04_PFIFO_CACHE1_HASH                             0x00003258
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
-#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
-#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
-#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
-#define NV03_PFIFO_CACHE1_GET                              0x00003270
-#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
-#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
-#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
-#define NV40_PFIFO_UNK32E4                                 0x000032E4
-#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
-#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
-#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
-#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
-
-struct ramfc_desc {
+struct nv04_fifo_ramfc {
 	unsigned bits:6;
 	unsigned ctxs:5;
 	unsigned ctxp:8;
@@ -139,37 +11,13 @@
 	unsigned regp;
 };
 
-struct nv04_fifo_priv {
+struct nv04_fifo {
 	struct nvkm_fifo base;
-	struct ramfc_desc *ramfc_desc;
-	struct nvkm_ramht  *ramht;
-	struct nvkm_gpuobj *ramro;
-	struct nvkm_gpuobj *ramfc;
+	const struct nv04_fifo_ramfc *ramfc;
 };
 
-struct nv04_fifo_base {
-	struct nvkm_fifo_base base;
-};
-
-struct nv04_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 subc[8];
-	u32 ramfc;
-};
-
-int  nv04_fifo_object_attach(struct nvkm_object *, struct nvkm_object *, u32);
-void nv04_fifo_object_detach(struct nvkm_object *, int);
-
-void nv04_fifo_chan_dtor(struct nvkm_object *);
-int  nv04_fifo_chan_init(struct nvkm_object *);
-int  nv04_fifo_chan_fini(struct nvkm_object *, bool suspend);
-
-int  nv04_fifo_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, void *, u32,
-			    struct nvkm_object **);
-
-void nv04_fifo_dtor(struct nvkm_object *);
-int  nv04_fifo_init(struct nvkm_object *);
-void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
-void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+int nv04_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, int nr, const struct nv04_fifo_ramfc *,
+		   struct nvkm_fifo **);
+void nv04_fifo_init(struct nvkm_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
index 48ce4af..f9a87de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -22,17 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv10_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv10_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -45,134 +39,21 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv10_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 32;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nv10_fifo_ofuncs = {
-	.ctor = nv10_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv10_fifo_sclass[] = {
-	{ NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv10_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv10_fifo = {
+	.init = nv04_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv10_fifo_dma_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv10_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv10_fifo_cclass;
-	nv_engine(priv)->sclass = nv10_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv10_ramfc;
-	return 0;
+	return nv04_fifo_new_(&nv10_fifo, device, index, 32,
+			      nv10_fifo_ramfc, pfifo);
 }
-
-struct nvkm_oclass *
-nv10_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv04_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
index 4a20a6f..f6d383a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -22,17 +22,14 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
 #include <core/ramht.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv17_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv17_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,166 +47,51 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv17_fifo_chan_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+static void
+nv17_fifo_init(struct nvkm_fifo *base)
 {
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
 
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
+	nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
 
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
-				       0x10000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
-				       &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+	nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
+					    0x00010000);
 
-	args->v0.chid = chan->base.chid;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
 
-	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
-	chan->ramfc = chan->base.chid * 64;
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
 
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
-static struct nvkm_ofuncs
-nv17_fifo_ofuncs = {
-	.ctor = nv17_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv17_fifo_sclass[] = {
-	{ NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv17_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x17),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
+static const struct nvkm_fifo_func
+nv17_fifo = {
+	.init = nv17_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv17_fifo_dma_oclass,
+		NULL
 	},
 };
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv17_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
 {
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv17_fifo_cclass;
-	nv_engine(priv)->sclass = nv17_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv17_ramfc;
-	return 0;
+	return nv04_fifo_new_(&nv17_fifo, device, index, 32,
+			      nv17_fifo_ramfc, pfifo);
 }
-
-static int
-nv17_fifo_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
-	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
-
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	return 0;
-}
-
-struct nvkm_oclass *
-nv17_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x17),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv17_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv17_fifo_init,
-		.fini = _nvkm_fifo_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 5bfc962..8c7ba32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -22,19 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "nv04.h"
+#include "channv04.h"
+#include "regsnv04.h"
 
-#include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/ramht.h>
 #include <subdev/fb.h>
-#include <subdev/instmem/nv04.h>
+#include <subdev/instmem.h>
 
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-static struct ramfc_desc
-nv40_ramfc[] = {
+static const struct nv04_fifo_ramfc
+nv40_fifo_ramfc[] = {
 	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
 	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
 	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -60,297 +56,72 @@
 	{}
 };
 
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
-
-static int
-nv40_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
+static void
+nv40_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	u32 context, chid = chan->base.chid;
-	int ret;
+	struct nv04_fifo *fifo = nv04_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_fb *fb = device->fb;
+	struct nvkm_instmem *imem = device->imem;
+	struct nvkm_ramht *ramht = imem->ramht;
+	struct nvkm_memory *ramro = imem->ramro;
+	struct nvkm_memory *ramfc = imem->ramfc;
 
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->addr >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
+	nvkm_wr32(device, 0x002040, 0x000000ff);
+	nvkm_wr32(device, 0x002044, 0x2101ffff);
+	nvkm_wr32(device, 0x002058, 0x00000001);
 
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW:
-		context |= 0x00000000;
-		break;
-	case NVDEV_ENGINE_GR:
-		context |= 0x00100000;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		context |= 0x00200000;
-		break;
-	default:
-		return -EINVAL;
-	}
+	nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+					    ((ramht->bits - 9) << 16) |
+					    (ramht->gpuobj->addr >> 8));
+	nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
 
-	context |= chid << 23;
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-	return ret;
-}
-
-static int
-nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	unsigned long flags;
-	u32 reg, ctx;
-
-	switch (nv_engidx(engctx->engine)) {
-	case NVDEV_ENGINE_SW:
-		return 0;
-	case NVDEV_ENGINE_GR:
-		reg = 0x32e0;
-		ctx = 0x38;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		reg = 0x330c;
-		ctx = 0x54;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
-	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-		nv_wr32(priv, reg, nv_engctx(engctx)->addr);
-	nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
-
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-static int
-nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			 struct nvkm_object *engctx)
-{
-	struct nv04_fifo_priv *priv = (void *)parent->engine;
-	struct nv04_fifo_chan *chan = (void *)parent;
-	unsigned long flags;
-	u32 reg, ctx;
-
-	switch (nv_engidx(engctx->engine)) {
-	case NVDEV_ENGINE_SW:
-		return 0;
-	case NVDEV_ENGINE_GR:
-		reg = 0x32e0;
-		ctx = 0x38;
-		break;
-	case NVDEV_ENGINE_MPEG:
-		reg = 0x330c;
-		ctx = 0x54;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&priv->base.lock, flags);
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
-
-	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-		nv_wr32(priv, reg, 0x00000000);
-	nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
-
-	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->base.lock, flags);
-	return 0;
-}
-
-static int
-nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nv04_fifo_priv *priv = (void *)engine;
-	struct nv04_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x1000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
-	chan->ramfc = chan->base.chid * 128;
-
-	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
-			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
-			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
-			     NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
-			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-	nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
-	return 0;
-}
-
-static struct nvkm_ofuncs
-nv40_fifo_ofuncs = {
-	.ctor = nv40_fifo_chan_ctor,
-	.dtor = nv04_fifo_chan_dtor,
-	.init = nv04_fifo_chan_init,
-	.fini = nv04_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv40_fifo_sclass[] = {
-	{ NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv40_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fifo_context_ctor,
-		.dtor = _nvkm_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv04_instmem_priv *imem = nv04_instmem(parent);
-	struct nv04_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nvkm_ramht_ref(imem->ramht, &priv->ramht);
-	nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
-	nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv40_fifo_cclass;
-	nv_engine(priv)->sclass = nv40_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	priv->ramfc_desc = nv40_ramfc;
-	return 0;
-}
-
-static int
-nv40_fifo_init(struct nvkm_object *object)
-{
-	struct nv04_fifo_priv *priv = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002040, 0x000000ff);
-	nv_wr32(priv, 0x002044, 0x2101ffff);
-	nv_wr32(priv, 0x002058, 0x00000001);
-
-	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
-				       ((priv->ramht->bits - 9) << 16) |
-				        (priv->ramht->gpuobj.addr >> 8));
-	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
-
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-		nv_wr32(priv, 0x002230, 0x00000001);
+		nvkm_wr32(device, 0x002230, 0x00000001);
 	case 0x40:
 	case 0x41:
 	case 0x42:
 	case 0x43:
 	case 0x45:
 	case 0x48:
-		nv_wr32(priv, 0x002220, 0x00030002);
+		nvkm_wr32(device, 0x002220, 0x00030002);
 		break;
 	default:
-		nv_wr32(priv, 0x002230, 0x00000000);
-		nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
-					 priv->ramfc->addr) >> 16) |
-					0x00030000);
+		nvkm_wr32(device, 0x002230, 0x00000000);
+		nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
+					      nvkm_memory_addr(ramfc)) >> 16) |
+					    0x00030000);
 		break;
 	}
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.nr - 1);
 
-	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+	nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
 
-	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
-	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
-	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
-	return 0;
+	nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+	nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
 }
 
-struct nvkm_oclass *
-nv40_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_fifo_ctor,
-		.dtor = nv04_fifo_dtor,
-		.init = nv40_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv40_fifo = {
+	.init = nv40_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv40_fifo_dma_oclass,
+		NULL
 	},
 };
+
+int
+nv40_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv04_fifo_new_(&nv40_fifo, device, index, 32,
+			      nv40_fifo_ramfc, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index f25f0fd..66eb12c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -22,513 +22,126 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
-#include "nv04.h"
+#include "channv50.h"
 
-#include <core/client.h>
-#include <core/engctx.h>
-#include <core/ramht.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
-#include <subdev/timer.h>
-
-#include <nvif/class.h>
-#include <nvif/unpack.h>
-
-/*******************************************************************************
- * FIFO channel objects
- ******************************************************************************/
+#include <core/gpuobj.h>
 
 static void
-nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
+nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
 {
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *cur;
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	struct nvkm_memory *cur;
 	int i, p;
 
-	cur = priv->playlist[priv->cur_playlist];
-	priv->cur_playlist = !priv->cur_playlist;
+	cur = fifo->runlist[fifo->cur_runlist];
+	fifo->cur_runlist = !fifo->cur_runlist;
 
-	for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
-		if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
-			nv_wo32(cur, p++ * 4, i);
+	nvkm_kmap(cur);
+	for (i = 0, p = 0; i < fifo->base.nr; i++) {
+		if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
+			nvkm_wo32(cur, p++ * 4, i);
 	}
+	nvkm_done(cur);
 
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x0032f4, cur->addr >> 12);
-	nv_wr32(priv, 0x0032ec, p);
-	nv_wr32(priv, 0x002500, 0x00000101);
+	nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
+	nvkm_wr32(device, 0x0032ec, p);
+	nvkm_wr32(device, 0x002500, 0x00000101);
 }
 
 void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+nv50_fifo_runlist_update(struct nv50_fifo *fifo)
 {
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nv50_fifo_playlist_update_locked(priv);
-	mutex_unlock(&nv_subdev(priv)->mutex);
-}
-
-static int
-nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nvkm_gpuobj *ectx = (void *)object;
-	u64 limit = ectx->addr + ectx->size - 1;
-	u64 start = ectx->addr;
-	u32 addr;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   : return 0;
-	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
-	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
-	default:
-		return -EINVAL;
-	}
-
-	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
-	nv_wo32(base->eng, addr + 0x00, 0x00190000);
-	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
-	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
-	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
-					upper_32_bits(start));
-	nv_wo32(base->eng, addr + 0x10, 0x00000000);
-	nv_wo32(base->eng, addr + 0x14, 0x00000000);
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
-			 struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_priv *priv = (void *)parent->engine;
-	struct nv50_fifo_base *base = (void *)parent->parent;
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 addr, me;
-	int ret = 0;
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_SW   : return 0;
-	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
-	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
-	default:
-		return -EINVAL;
-	}
-
-	/* HW bug workaround:
-	 *
-	 * PFIFO will hang forever if the connected engines don't report
-	 * that they've processed the context switch request.
-	 *
-	 * In order for the kickoff to work, we need to ensure all the
-	 * connected engines are in a state where they can answer.
-	 *
-	 * Newer chipsets don't seem to suffer from this issue, and well,
-	 * there's also a "ignore these engines" bitmask reg we can use
-	 * if we hit the issue there..
-	 */
-	me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
-
-	/* do the kickoff... */
-	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
-	if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
-		nv_error(priv, "channel %d [%s] unload timeout\n",
-			 chan->base.chid, nvkm_client_name(chan));
-		if (suspend)
-			ret = -EBUSY;
-	}
-	nv_wr32(priv, 0x00b860, me);
-
-	if (ret == 0) {
-		nv_wo32(base->eng, addr + 0x00, 0x00000000);
-		nv_wo32(base->eng, addr + 0x04, 0x00000000);
-		nv_wo32(base->eng, addr + 0x08, 0x00000000);
-		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-		nv_wo32(base->eng, addr + 0x10, 0x00000000);
-		nv_wo32(base->eng, addr + 0x14, 0x00000000);
-		bar->flush(bar);
-	}
-
-	return ret;
-}
-
-static int
-nv50_fifo_object_attach(struct nvkm_object *parent,
-			struct nvkm_object *object, u32 handle)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	u32 context;
-
-	if (nv_iclass(object, NV_GPUOBJ_CLASS))
-		context = nv_gpuobj(object)->node->offset >> 4;
-	else
-		context = 0x00000004; /* just non-zero */
-
-	switch (nv_engidx(object->engine)) {
-	case NVDEV_ENGINE_DMAOBJ:
-	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
-	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
-	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
-	default:
-		return -EINVAL;
-	}
-
-	return nvkm_ramht_insert(chan->ramht, 0, handle, context);
-}
-
-void
-nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
-{
-	struct nv50_fifo_chan *chan = (void *)parent;
-	nvkm_ramht_remove(chan->ramht, cookie);
-}
-
-static int
-nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	union {
-		struct nv03_channel_dma_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	int ret;
-
-	nv_ioctl(parent, "create channel dma size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
-				 "offset %016llx\n", args->v0.version,
-			 args->v0.pushbuf, args->v0.offset);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
-	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	bar->flush(bar);
-	return 0;
-}
-
-static int
-nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
-			struct nvkm_oclass *oclass, void *data, u32 size,
-			struct nvkm_object **pobject)
-{
-	union {
-		struct nv50_channel_gpfifo_v0 v0;
-	} *args = data;
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_fifo_base *base = (void *)parent;
-	struct nv50_fifo_chan *chan;
-	u64 ioffset, ilength;
-	int ret;
-
-	nv_ioctl(parent, "create channel gpfifo size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
-				 "ioffset %016llx ilength %08x\n",
-			 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
-			 args->v0.ilength);
-	} else
-		return ret;
-
-	ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
-				       0x2000, args->v0.pushbuf,
-				       (1ULL << NVDEV_ENGINE_DMAOBJ) |
-				       (1ULL << NVDEV_ENGINE_SW) |
-				       (1ULL << NVDEV_ENGINE_GR) |
-				       (1ULL << NVDEV_ENGINE_MPEG), &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	args->v0.chid = chan->base.chid;
-
-	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
-	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
-	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
-	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
-
-	ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
-			     &chan->ramht);
-	if (ret)
-		return ret;
-
-	ioffset = args->v0.ioffset;
-	ilength = order_base_2(args->v0.ilength / 8);
-
-	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
-	nv_wo32(base->ramfc, 0x44, 0x01003fff);
-	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
-	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
-	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
-	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
-	nv_wo32(base->ramfc, 0x78, 0x00000000);
-	nv_wo32(base->ramfc, 0x7c, 0x30000001);
-	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
-				   (4 << 24) /* SEARCH_FULL */ |
-				   (chan->ramht->gpuobj.node->offset >> 4));
-	bar->flush(bar);
-	return 0;
-}
-
-void
-nv50_fifo_chan_dtor(struct nvkm_object *object)
-{
-	struct nv50_fifo_chan *chan = (void *)object;
-	nvkm_ramht_ref(NULL, &chan->ramht);
-	nvkm_fifo_channel_destroy(&chan->base);
-}
-
-static int
-nv50_fifo_chan_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_base *base = (void *)object->parent;
-	struct nv50_fifo_chan *chan = (void *)object;
-	struct nvkm_gpuobj *ramfc = base->ramfc;
-	u32 chid = chan->base.chid;
-	int ret;
-
-	ret = nvkm_fifo_channel_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
-	nv50_fifo_playlist_update(priv);
-	return 0;
+	mutex_lock(&fifo->base.engine.subdev.mutex);
+	nv50_fifo_runlist_update_locked(fifo);
+	mutex_unlock(&fifo->base.engine.subdev.mutex);
 }
 
 int
-nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
+nv50_fifo_oneinit(struct nvkm_fifo *base)
 {
-	struct nv50_fifo_priv *priv = (void *)object->engine;
-	struct nv50_fifo_chan *chan = (void *)object;
-	u32 chid = chan->base.chid;
-
-	/* remove channel from playlist, fifo will unload context */
-	nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
-	nv50_fifo_playlist_update(priv);
-	nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
-
-	return nvkm_fifo_channel_fini(&chan->base, suspend);
-}
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_dma = {
-	.ctor = nv50_fifo_chan_ctor_dma,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = nv50_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_ofuncs
-nv50_fifo_ofuncs_ind = {
-	.ctor = nv50_fifo_chan_ctor_ind,
-	.dtor = nv50_fifo_chan_dtor,
-	.init = nv50_fifo_chan_init,
-	.fini = nv50_fifo_chan_fini,
-	.map  = _nvkm_fifo_channel_map,
-	.rd32 = _nvkm_fifo_channel_rd32,
-	.wr32 = _nvkm_fifo_channel_wr32,
-	.ntfy = _nvkm_fifo_channel_ntfy
-};
-
-static struct nvkm_oclass
-nv50_fifo_sclass[] = {
-	{ NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
-	{ NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
-	{}
-};
-
-/*******************************************************************************
- * FIFO context - basically just the instmem reserved for the channel
- ******************************************************************************/
-
-static int
-nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv50_fifo_base *base;
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
 	int ret;
 
-	ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
-				       0x1000, NVOBJ_FLAG_HEAP, &base);
-	*pobject = nv_object(base);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+			      false, &fifo->runlist[0]);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
-			      &base->pgd);
-	if (ret)
-		return ret;
-
-	ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
-	if (ret)
-		return ret;
-
-	return 0;
+	return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
+			       false, &fifo->runlist[1]);
 }
 
 void
-nv50_fifo_context_dtor(struct nvkm_object *object)
+nv50_fifo_init(struct nvkm_fifo *base)
 {
-	struct nv50_fifo_base *base = (void *)object;
-	nvkm_vm_ref(NULL, &base->vm, base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->pgd);
-	nvkm_gpuobj_ref(NULL, &base->eng);
-	nvkm_gpuobj_ref(NULL, &base->ramfc);
-	nvkm_gpuobj_ref(NULL, &base->cache);
-	nvkm_fifo_context_destroy(&base->base);
-}
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	struct nvkm_device *device = fifo->base.engine.subdev.device;
+	int i;
 
-static struct nvkm_oclass
-nv50_fifo_cclass = {
-	.handle = NV_ENGCTX(FIFO, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fifo_context_ctor,
-		.dtor = nv50_fifo_context_dtor,
-		.init = _nvkm_fifo_context_init,
-		.fini = _nvkm_fifo_context_fini,
-		.rd32 = _nvkm_fifo_context_rd32,
-		.wr32 = _nvkm_fifo_context_wr32,
-	},
-};
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
+	nvkm_wr32(device, 0x002044, 0x01003fff);
 
-/*******************************************************************************
- * PFIFO engine
- ******************************************************************************/
-
-static int
-nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_fifo_priv *priv;
-	int ret;
-
-	ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[0]);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
-			      &priv->playlist[1]);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000100;
-	nv_subdev(priv)->intr = nv04_fifo_intr;
-	nv_engine(priv)->cclass = &nv50_fifo_cclass;
-	nv_engine(priv)->sclass = nv50_fifo_sclass;
-	priv->base.pause = nv04_fifo_pause;
-	priv->base.start = nv04_fifo_start;
-	return 0;
-}
-
-void
-nv50_fifo_dtor(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object;
-
-	nvkm_gpuobj_ref(NULL, &priv->playlist[1]);
-	nvkm_gpuobj_ref(NULL, &priv->playlist[0]);
-
-	nvkm_fifo_destroy(&priv->base);
-}
-
-int
-nv50_fifo_init(struct nvkm_object *object)
-{
-	struct nv50_fifo_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_fifo_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x00250c, 0x6f3cfc34);
-	nv_wr32(priv, 0x002044, 0x01003fff);
-
-	nv_wr32(priv, 0x002100, 0xffffffff);
-	nv_wr32(priv, 0x002140, 0xbfffffff);
+	nvkm_wr32(device, 0x002100, 0xffffffff);
+	nvkm_wr32(device, 0x002140, 0xbfffffff);
 
 	for (i = 0; i < 128; i++)
-		nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
-	nv50_fifo_playlist_update_locked(priv);
+		nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
+	nv50_fifo_runlist_update_locked(fifo);
 
-	nv_wr32(priv, 0x003200, 0x00000001);
-	nv_wr32(priv, 0x003250, 0x00000001);
-	nv_wr32(priv, 0x002500, 0x00000001);
+	nvkm_wr32(device, 0x003200, 0x00000001);
+	nvkm_wr32(device, 0x003250, 0x00000001);
+	nvkm_wr32(device, 0x002500, 0x00000001);
+}
+
+void *
+nv50_fifo_dtor(struct nvkm_fifo *base)
+{
+	struct nv50_fifo *fifo = nv50_fifo(base);
+	nvkm_memory_del(&fifo->runlist[1]);
+	nvkm_memory_del(&fifo->runlist[0]);
+	return fifo;
+}
+
+int
+nv50_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_fifo **pfifo)
+{
+	struct nv50_fifo *fifo;
+	int ret;
+
+	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+		return -ENOMEM;
+	*pfifo = &fifo->base;
+
+	ret = nvkm_fifo_ctor(func, device, index, 128, &fifo->base);
+	if (ret)
+		return ret;
+
+	set_bit(0, fifo->base.mask); /* PIO channel */
+	set_bit(127, fifo->base.mask); /* inactive channel */
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_fifo_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(FIFO, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fifo_ctor,
-		.dtor = nv50_fifo_dtor,
-		.init = nv50_fifo_init,
-		.fini = _nvkm_fifo_fini,
+static const struct nvkm_fifo_func
+nv50_fifo = {
+	.dtor = nv50_fifo_dtor,
+	.oneinit = nv50_fifo_oneinit,
+	.init = nv50_fifo_init,
+	.intr = nv04_fifo_intr,
+	.pause = nv04_fifo_pause,
+	.start = nv04_fifo_start,
+	.chan = {
+		&nv50_fifo_dma_oclass,
+		&nv50_fifo_gpfifo_oclass,
+		NULL
 	},
 };
+
+int
+nv50_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+	return nv50_fifo_new_(&nv50_fifo, device, index, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 09ed93c..8ab5394 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,36 +1,19 @@
 #ifndef __NV50_FIFO_H__
 #define __NV50_FIFO_H__
-#include <engine/fifo.h>
+#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
+#include "priv.h"
 
-struct nv50_fifo_priv {
+struct nv50_fifo {
 	struct nvkm_fifo base;
-	struct nvkm_gpuobj *playlist[2];
-	int cur_playlist;
+	struct nvkm_memory *runlist[2];
+	int cur_runlist;
 };
 
-struct nv50_fifo_base {
-	struct nvkm_fifo_base base;
-	struct nvkm_gpuobj *ramfc;
-	struct nvkm_gpuobj *cache;
-	struct nvkm_gpuobj *eng;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
+int nv50_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, struct nvkm_fifo **);
 
-struct nv50_fifo_chan {
-	struct nvkm_fifo_chan base;
-	u32 subc[8];
-	struct nvkm_ramht *ramht;
-};
-
-void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
-
-void nv50_fifo_object_detach(struct nvkm_object *, int);
-void nv50_fifo_chan_dtor(struct nvkm_object *);
-int  nv50_fifo_chan_fini(struct nvkm_object *, bool);
-
-void nv50_fifo_context_dtor(struct nvkm_object *);
-
-void nv50_fifo_dtor(struct nvkm_object *);
-int  nv50_fifo_init(struct nvkm_object *);
+void *nv50_fifo_dtor(struct nvkm_fifo *);
+int nv50_fifo_oneinit(struct nvkm_fifo *);
+void nv50_fifo_init(struct nvkm_fifo *);
+void nv50_fifo_runlist_update(struct nv50_fifo *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 0000000..cb1432e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_FIFO_PRIV_H__
+#define __NVKM_FIFO_PRIV_H__
+#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
+#include <engine/fifo.h>
+
+int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
+		   int index, int nr, struct nvkm_fifo *);
+void nvkm_fifo_uevent(struct nvkm_fifo *);
+
+struct nvkm_fifo_func {
+	void *(*dtor)(struct nvkm_fifo *);
+	int (*oneinit)(struct nvkm_fifo *);
+	void (*init)(struct nvkm_fifo *);
+	void (*fini)(struct nvkm_fifo *);
+	void (*intr)(struct nvkm_fifo *);
+	void (*pause)(struct nvkm_fifo *, unsigned long *);
+	void (*start)(struct nvkm_fifo *, unsigned long *);
+	void (*uevent_init)(struct nvkm_fifo *);
+	void (*uevent_fini)(struct nvkm_fifo *);
+	const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+void nv04_fifo_intr(struct nvkm_fifo *);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 0000000..92d5622
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,132 @@
+#ifndef __NV04_FIFO_REGS_H__
+#define __NV04_FIFO_REGS_H__
+
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2e1b92f..9ad0d0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -1,21 +1,8 @@
-nvkm-y += nvkm/engine/gr/ctxnv40.o
-nvkm-y += nvkm/engine/gr/ctxnv50.o
-nvkm-y += nvkm/engine/gr/ctxgf100.o
-nvkm-y += nvkm/engine/gr/ctxgf108.o
-nvkm-y += nvkm/engine/gr/ctxgf104.o
-nvkm-y += nvkm/engine/gr/ctxgf110.o
-nvkm-y += nvkm/engine/gr/ctxgf117.o
-nvkm-y += nvkm/engine/gr/ctxgf119.o
-nvkm-y += nvkm/engine/gr/ctxgk104.o
-nvkm-y += nvkm/engine/gr/ctxgk20a.o
-nvkm-y += nvkm/engine/gr/ctxgk110.o
-nvkm-y += nvkm/engine/gr/ctxgk110b.o
-nvkm-y += nvkm/engine/gr/ctxgk208.o
-nvkm-y += nvkm/engine/gr/ctxgm107.o
-nvkm-y += nvkm/engine/gr/ctxgm204.o
-nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/base.o
 nvkm-y += nvkm/engine/gr/nv04.o
 nvkm-y += nvkm/engine/gr/nv10.o
+nvkm-y += nvkm/engine/gr/nv15.o
+nvkm-y += nvkm/engine/gr/nv17.o
 nvkm-y += nvkm/engine/gr/nv20.o
 nvkm-y += nvkm/engine/gr/nv25.o
 nvkm-y += nvkm/engine/gr/nv2a.o
@@ -23,18 +10,43 @@
 nvkm-y += nvkm/engine/gr/nv34.o
 nvkm-y += nvkm/engine/gr/nv35.o
 nvkm-y += nvkm/engine/gr/nv40.o
+nvkm-y += nvkm/engine/gr/nv44.o
 nvkm-y += nvkm/engine/gr/nv50.o
+nvkm-y += nvkm/engine/gr/g84.o
+nvkm-y += nvkm/engine/gr/gt200.o
+nvkm-y += nvkm/engine/gr/mcp79.o
+nvkm-y += nvkm/engine/gr/gt215.o
+nvkm-y += nvkm/engine/gr/mcp89.o
 nvkm-y += nvkm/engine/gr/gf100.o
-nvkm-y += nvkm/engine/gr/gf108.o
 nvkm-y += nvkm/engine/gr/gf104.o
+nvkm-y += nvkm/engine/gr/gf108.o
 nvkm-y += nvkm/engine/gr/gf110.o
 nvkm-y += nvkm/engine/gr/gf117.o
 nvkm-y += nvkm/engine/gr/gf119.o
 nvkm-y += nvkm/engine/gr/gk104.o
-nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gk110.o
 nvkm-y += nvkm/engine/gr/gk110b.o
 nvkm-y += nvkm/engine/gr/gk208.o
+nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gm107.o
 nvkm-y += nvkm/engine/gr/gm204.o
 nvkm-y += nvkm/engine/gr/gm206.o
+nvkm-y += nvkm/engine/gr/gm20b.o
+
+nvkm-y += nvkm/engine/gr/ctxnv40.o
+nvkm-y += nvkm/engine/gr/ctxnv50.o
+nvkm-y += nvkm/engine/gr/ctxgf100.o
+nvkm-y += nvkm/engine/gr/ctxgf104.o
+nvkm-y += nvkm/engine/gr/ctxgf108.o
+nvkm-y += nvkm/engine/gr/ctxgf110.o
+nvkm-y += nvkm/engine/gr/ctxgf117.o
+nvkm-y += nvkm/engine/gr/ctxgf119.o
+nvkm-y += nvkm/engine/gr/ctxgk104.o
+nvkm-y += nvkm/engine/gr/ctxgk110.o
+nvkm-y += nvkm/engine/gr/ctxgk110b.o
+nvkm-y += nvkm/engine/gr/ctxgk208.o
+nvkm-y += nvkm/engine/gr/ctxgk20a.o
+nvkm-y += nvkm/engine/gr/ctxgm107.o
+nvkm-y += nvkm/engine/gr/ctxgm204.o
+nvkm-y += nvkm/engine/gr/ctxgm206.o
+nvkm-y += nvkm/engine/gr/ctxgm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
new file mode 100644
index 0000000..090765f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <engine/fifo.h>
+
+static void
+nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->tile)
+		gr->func->tile(gr, region, tile);
+}
+
+u64
+nvkm_gr_units(struct nvkm_gr *gr)
+{
+	if (gr->func->units)
+		return gr->func->units(gr);
+	return 0;
+}
+
+int
+nvkm_gr_tlb_flush(struct nvkm_gr *gr)
+{
+	if (gr->func->tlb_flush)
+		return gr->func->tlb_flush(gr);
+	return -ENODEV;
+}
+
+static int
+nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+	int c = 0;
+
+	if (gr->func->object_get) {
+		int ret = gr->func->object_get(gr, index, &oclass->base);
+		if (oclass->base.oclass)
+			return index;
+		return ret;
+	}
+
+	while (gr->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = gr->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_gr *gr = nvkm_gr(oclass->engine);
+	if (gr->func->chan_new)
+		return gr->func->chan_new(gr, chan, oclass, pobject);
+	return 0;
+}
+
+static void
+nvkm_gr_intr(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	gr->func->intr(gr);
+}
+
+static int
+nvkm_gr_oneinit(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->oneinit)
+		return gr->func->oneinit(gr);
+	return 0;
+}
+
+static int
+nvkm_gr_init(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	return gr->func->init(gr);
+}
+
+static void *
+nvkm_gr_dtor(struct nvkm_engine *engine)
+{
+	struct nvkm_gr *gr = nvkm_gr(engine);
+	if (gr->func->dtor)
+		return gr->func->dtor(gr);
+	return gr;
+}
+
+static const struct nvkm_engine_func
+nvkm_gr = {
+	.dtor = nvkm_gr_dtor,
+	.oneinit = nvkm_gr_oneinit,
+	.init = nvkm_gr_init,
+	.intr = nvkm_gr_intr,
+	.tile = nvkm_gr_tile,
+	.fifo.cclass = nvkm_gr_cclass_new,
+	.fifo.sclass = nvkm_gr_oclass_get,
+};
+
+int
+nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
+{
+	gr->func = func;
+	return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
+				enable, &gr->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 57e2c5b..56f392d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -23,7 +23,6 @@
  */
 #include "ctxgf100.h"
 
-#include <subdev/bar.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
 #include <subdev/timer.h>
@@ -1005,6 +1004,7 @@
 gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
 		      int shift, int buffer)
 {
+	struct nvkm_device *device = info->gr->base.engine.subdev.device;
 	if (info->data) {
 		if (shift >= 0) {
 			info->mmio->addr = addr;
@@ -1021,29 +1021,29 @@
 			return;
 	}
 
-	nv_wr32(info->priv, addr, data);
+	nvkm_wr32(device, addr, data);
 }
 
 void
 gf100_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
-	mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
 }
 
 void
 gf100_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1053,13 +1053,13 @@
 void
 gf100_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32 attrib = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	int gpc, tpc;
 	u32 bo = 0;
 
@@ -1067,91 +1067,95 @@
 	mmio_refn(info, 0x419848, 0x10000000, s, b);
 	mmio_wr32(info, 0x405830, (attrib << 16));
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
 			mmio_skip(info, o, (attrib << 16) | ++bo);
 			mmio_wr32(info, o, (attrib << 16) | --bo);
-			bo += impl->attrib_nr_max;
+			bo += grctx->attrib_nr_max;
 		}
 	}
 }
 
 void
-gf100_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf100_grctx_generate_unkn(struct gf100_gr *gr)
 {
 }
 
 void
-gf100_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+gf100_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
 		}
 	}
 }
 
 void
-gf100_grctx_generate_r406028(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406028(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 tmp[GPC_MAX / 8] = {}, i = 0;
-	for (i = 0; i < priv->gpc_nr; i++)
-		tmp[i / 8] |= priv->tpc_nr[i] << ((i % 8) * 4);
+	for (i = 0; i < gr->gpc_nr; i++)
+		tmp[i / 8] |= gr->tpc_nr[i] << ((i % 8) * 4);
 	for (i = 0; i < 4; i++) {
-		nv_wr32(priv, 0x406028 + (i * 4), tmp[i]);
-		nv_wr32(priv, 0x405870 + (i * 4), tmp[i]);
+		nvkm_wr32(device, 0x406028 + (i * 4), tmp[i]);
+		nvkm_wr32(device, 0x405870 + (i * 4), tmp[i]);
 	}
 }
 
 void
-gf100_grctx_generate_r4060a8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r4060a8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u8  tpcnr[GPC_MAX], data[TPC_MAX];
 	int gpc, tpc, i;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 	memset(data, 0x1f, sizeof(data));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 		data[tpc] = gpc;
 	}
 
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+		nvkm_wr32(device, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
 }
 
 void
-gf100_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
 	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
 	int gpc, tpc, i;
 
 	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 
@@ -1163,7 +1167,7 @@
 
 	/* and the second... */
 	shift = 0;
-	ntpcv = priv->tpc_total;
+	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
 		ntpcv <<= 1;
 		shift++;
@@ -1176,202 +1180,211 @@
 		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
 
 	/* GPC_BROADCAST */
-	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr | data2[0]);
-	nv_wr32(priv, 0x419be4, data2[1]);
+	nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr | data2[0]);
+	nvkm_wr32(device, 0x419be4, data2[1]);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
 
 	/* UNK78xx */
-	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gf100_grctx_generate_r406800(struct gf100_gr_priv *priv)
+gf100_grctx_generate_r406800(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u64 tpc_mask = 0, tpc_set = 0;
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc;
 	int i, a, b;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++)
-		tpc_mask |= ((1ULL << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++)
+		tpc_mask |= ((1ULL << gr->tpc_nr[gpc]) - 1) << (gpc * 8);
 
 	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
-		a = (i * (priv->tpc_total - 1)) / 32;
+		a = (i * (gr->tpc_total - 1)) / 32;
 		if (a != b) {
 			b = a;
 			do {
-				gpc = (gpc + 1) % priv->gpc_nr;
+				gpc = (gpc + 1) % gr->gpc_nr;
 			} while (!tpcnr[gpc]);
-			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+			tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 			tpc_set |= 1ULL << ((gpc * 8) + tpc);
 		}
 
-		nv_wr32(priv, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
-		nv_wr32(priv, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
-		if (priv->gpc_nr > 4) {
-			nv_wr32(priv, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
-			nv_wr32(priv, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
+		nvkm_wr32(device, 0x406800 + (i * 0x20), lower_32_bits(tpc_set));
+		nvkm_wr32(device, 0x406c00 + (i * 0x20), lower_32_bits(tpc_set ^ tpc_mask));
+		if (gr->gpc_nr > 4) {
+			nvkm_wr32(device, 0x406804 + (i * 0x20), upper_32_bits(tpc_set));
+			nvkm_wr32(device, 0x406c04 + (i * 0x20), upper_32_bits(tpc_set ^ tpc_mask));
 		}
 	}
 }
 
 void
-gf100_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gf100_grctx_generate_r4060a8(priv);
-	gf100_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gf100_grctx_generate_r4060a8(gr);
+	gf100_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 }
 
 int
-gf100_grctx_generate(struct gf100_gr_priv *priv)
+gf100_grctx_generate(struct gf100_gr *gr)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_gpuobj *chan;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_memory *chan;
 	struct gf100_grctx info;
 	int ret, i;
+	u64 addr;
 
 	/* allocate memory to for a "channel", which we'll use to generate
 	 * the default context values
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x80000 + priv->size,
-			      0x1000, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
+			      0x1000, true, &chan);
 	if (ret) {
-		nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+		nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
 		return ret;
 	}
 
+	addr = nvkm_memory_addr(chan);
+
 	/* PGD pointer */
-	nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
-	nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
-	nv_wo32(chan, 0x0208, 0xffffffff);
-	nv_wo32(chan, 0x020c, 0x000000ff);
+	nvkm_kmap(chan);
+	nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
+	nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
+	nvkm_wo32(chan, 0x0208, 0xffffffff);
+	nvkm_wo32(chan, 0x020c, 0x000000ff);
 
 	/* PGT[0] pointer */
-	nv_wo32(chan, 0x1000, 0x00000000);
-	nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+	nvkm_wo32(chan, 0x1000, 0x00000000);
+	nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
 
 	/* identity-map the whole "channel" into its own vm */
-	for (i = 0; i < chan->size / 4096; i++) {
-		u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
-		nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
-		nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+	for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
+		u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
+		nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+		nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
 	}
 
 	/* context pointer (virt) */
-	nv_wo32(chan, 0x0210, 0x00080004);
-	nv_wo32(chan, 0x0214, 0x00000000);
+	nvkm_wo32(chan, 0x0210, 0x00080004);
+	nvkm_wo32(chan, 0x0214, 0x00000000);
+	nvkm_done(chan);
 
-	bar->flush(bar);
-
-	nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
-	nv_wr32(priv, 0x100cbc, 0x80000001);
-	nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+	nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
+	nvkm_wr32(device, 0x100cbc, 0x80000001);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+			break;
+	);
 
 	/* setup default state for mmio list construction */
-	info.priv = priv;
-	info.data = priv->mmio_data;
-	info.mmio = priv->mmio_list;
+	info.gr = gr;
+	info.data = gr->mmio_data;
+	info.mmio = gr->mmio_list;
 	info.addr = 0x2000 + (i * 8);
 	info.buffer_nr = 0;
 
 	/* make channel current */
-	if (priv->firmware) {
-		nv_wr32(priv, 0x409840, 0x00000030);
-		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
-		nv_wr32(priv, 0x409504, 0x00000003);
-		if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
-			nv_error(priv, "load_ctx timeout\n");
+	if (gr->firmware) {
+		nvkm_wr32(device, 0x409840, 0x00000030);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409504, 0x00000003);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x00000010)
+				break;
+		);
 
-		nv_wo32(chan, 0x8001c, 1);
-		nv_wo32(chan, 0x80020, 0);
-		nv_wo32(chan, 0x80028, 0);
-		nv_wo32(chan, 0x8002c, 0);
-		bar->flush(bar);
+		nvkm_kmap(chan);
+		nvkm_wo32(chan, 0x8001c, 1);
+		nvkm_wo32(chan, 0x80020, 0);
+		nvkm_wo32(chan, 0x80028, 0);
+		nvkm_wo32(chan, 0x8002c, 0);
+		nvkm_done(chan);
 	} else {
-		nv_wr32(priv, 0x409840, 0x80000000);
-		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
-		nv_wr32(priv, 0x409504, 0x00000001);
-		if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000))
-			nv_error(priv, "HUB_SET_CHAN timeout\n");
+		nvkm_wr32(device, 0x409840, 0x80000000);
+		nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+		nvkm_wr32(device, 0x409504, 0x00000001);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x80000000)
+				break;
+		);
 	}
 
-	oclass->main(priv, &info);
+	grctx->main(gr, &info);
 
 	/* trigger a context unload by unsetting the "next channel valid" bit
 	 * and faking a context switch interrupt
 	 */
-	nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
-	nv_wr32(priv, 0x409000, 0x00000100);
-	if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
-		nv_error(priv, "grctx template channel unload timeout\n");
+	nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
+	nvkm_wr32(device, 0x409000, 0x00000100);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x409b00) & 0x80000000))
+			break;
+	) < 0) {
 		ret = -EBUSY;
 		goto done;
 	}
 
-	priv->data = kmalloc(priv->size, GFP_KERNEL);
-	if (priv->data) {
-		for (i = 0; i < priv->size; i += 4)
-			priv->data[i / 4] = nv_ro32(chan, 0x80000 + i);
+	gr->data = kmalloc(gr->size, GFP_KERNEL);
+	if (gr->data) {
+		nvkm_kmap(chan);
+		for (i = 0; i < gr->size; i += 4)
+			gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
+		nvkm_done(chan);
 		ret = 0;
 	} else {
 		ret = -ENOMEM;
 	}
 
 done:
-	nvkm_gpuobj_ref(NULL, &chan);
+	nvkm_memory_del(&chan);
 	return ret;
 }
 
-struct nvkm_oclass *
-gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf100_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -1387,4 +1400,4 @@
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3676a33..3c64040 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -3,7 +3,7 @@
 #include "gf100.h"
 
 struct gf100_grctx {
-	struct gf100_gr_priv *priv;
+	struct gf100_gr *gr;
 	struct gf100_gr_data *data;
 	struct gf100_gr_mmio *mmio;
 	int buffer_nr;
@@ -19,12 +19,11 @@
 #define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
 #define mmio_wr32(a,b,c) mmio_refn((a), (b), (c),  0, -1)
 
-struct gf100_grctx_oclass {
-	struct nvkm_oclass base;
+struct gf100_grctx_func {
 	/* main context generation function */
-	void  (*main)(struct gf100_gr_priv *, struct gf100_grctx *);
+	void  (*main)(struct gf100_gr *, struct gf100_grctx *);
 	/* context-specific modify-on-first-load list generation function */
-	void  (*unkn)(struct gf100_gr_priv *);
+	void  (*unkn)(struct gf100_gr *);
 	/* mmio context data */
 	const struct gf100_gr_pack *hub;
 	const struct gf100_gr_pack *gpc;
@@ -50,60 +49,61 @@
 	u32 alpha_nr;
 };
 
-static inline const struct gf100_grctx_oclass *
-gf100_grctx_impl(struct gf100_gr_priv *priv)
-{
-	return (void *)nv_engine(priv)->cclass;
-}
-
-extern struct nvkm_oclass *gf100_grctx_oclass;
-int  gf100_grctx_generate(struct gf100_gr_priv *);
-void gf100_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gf100_grctx;
+int  gf100_grctx_generate(struct gf100_gr *);
+void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gf100_grctx_generate_bundle(struct gf100_grctx *);
 void gf100_grctx_generate_pagepool(struct gf100_grctx *);
 void gf100_grctx_generate_attrib(struct gf100_grctx *);
-void gf100_grctx_generate_unkn(struct gf100_gr_priv *);
-void gf100_grctx_generate_tpcid(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406028(struct gf100_gr_priv *);
-void gf100_grctx_generate_r4060a8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gf100_grctx_generate_r406800(struct gf100_gr_priv *);
+void gf100_grctx_generate_unkn(struct gf100_gr *);
+void gf100_grctx_generate_tpcid(struct gf100_gr *);
+void gf100_grctx_generate_r406028(struct gf100_gr *);
+void gf100_grctx_generate_r4060a8(struct gf100_gr *);
+void gf100_grctx_generate_r418bb8(struct gf100_gr *);
+void gf100_grctx_generate_r406800(struct gf100_gr *);
 
-extern struct nvkm_oclass *gf108_grctx_oclass;
+extern const struct gf100_grctx_func gf108_grctx;
 void gf108_grctx_generate_attrib(struct gf100_grctx *);
-void gf108_grctx_generate_unkn(struct gf100_gr_priv *);
+void gf108_grctx_generate_unkn(struct gf100_gr *);
 
-extern struct nvkm_oclass *gf104_grctx_oclass;
-extern struct nvkm_oclass *gf110_grctx_oclass;
+extern const struct gf100_grctx_func gf104_grctx;
+extern const struct gf100_grctx_func gf110_grctx;
 
-extern struct nvkm_oclass *gf117_grctx_oclass;
+extern const struct gf100_grctx_func gf117_grctx;
 void gf117_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gf119_grctx_oclass;
+extern const struct gf100_grctx_func gf119_grctx;
 
-extern struct nvkm_oclass *gk104_grctx_oclass;
-extern struct nvkm_oclass *gk20a_grctx_oclass;
-void gk104_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gk104_grctx;
+extern const struct gf100_grctx_func gk20a_grctx;
+void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
 void gk104_grctx_generate_bundle(struct gf100_grctx *);
 void gk104_grctx_generate_pagepool(struct gf100_grctx *);
-void gk104_grctx_generate_unkn(struct gf100_gr_priv *);
-void gk104_grctx_generate_r418bb8(struct gf100_gr_priv *);
-void gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *);
+void gk104_grctx_generate_unkn(struct gf100_gr *);
+void gk104_grctx_generate_r418bb8(struct gf100_gr *);
+void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
 
 
-extern struct nvkm_oclass *gk110_grctx_oclass;
-extern struct nvkm_oclass *gk110b_grctx_oclass;
-extern struct nvkm_oclass *gk208_grctx_oclass;
-
-extern struct nvkm_oclass *gm107_grctx_oclass;
 void gm107_grctx_generate_bundle(struct gf100_grctx *);
 void gm107_grctx_generate_pagepool(struct gf100_grctx *);
 void gm107_grctx_generate_attrib(struct gf100_grctx *);
 
-extern struct nvkm_oclass *gm204_grctx_oclass;
-void gm204_grctx_generate_main(struct gf100_gr_priv *, struct gf100_grctx *);
+extern const struct gf100_grctx_func gk110_grctx;
+extern const struct gf100_grctx_func gk110b_grctx;
+extern const struct gf100_grctx_func gk208_grctx;
 
-extern struct nvkm_oclass *gm206_grctx_oclass;
+extern const struct gf100_grctx_func gm107_grctx;
+void gm107_grctx_generate_bundle(struct gf100_grctx *);
+void gm107_grctx_generate_pagepool(struct gf100_grctx *);
+void gm107_grctx_generate_attrib(struct gf100_grctx *);
+
+extern const struct gf100_grctx_func gm204_grctx;
+void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gm204_grctx_generate_tpcid(struct gf100_gr *);
+void gm204_grctx_generate_405b60(struct gf100_gr *);
+
+extern const struct gf100_grctx_func gm206_grctx;
+extern const struct gf100_grctx_func gm20b_grctx;
 
 /* context init value lists */
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
index c5a8d55..54fd74e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf104.c
@@ -79,17 +79,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf104_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -105,4 +96,4 @@
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 87c844a..505cdcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -730,18 +730,18 @@
 void
 gf108_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32   beta = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32   beta = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, tpc;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -749,43 +749,35 @@
 	mmio_wr32(info, 0x405830, (beta << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 			const u32 a = alpha;
 			const u32 b =  beta;
 			const u32 t = timeslice_mode;
 			const u32 o = TPC_UNIT(gpc, tpc, 0x500);
 			mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
 			mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
-			bo += impl->attrib_nr_max;
+			bo += grctx->attrib_nr_max;
 			mmio_wr32(info, o + 0x44, (a << 16) | ao);
-			ao += impl->alpha_nr_max;
+			ao += grctx->alpha_nr_max;
 		}
 	}
 }
 
 void
-gf108_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gf108_grctx_generate_unkn(struct gf100_gr *gr)
 {
-	nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
-	nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
-	nv_mask(priv, 0x419814, 0x00000004, 0x00000004);
-	nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
-	nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x419814, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
-struct nvkm_oclass *
-gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf108_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf108_grctx_pack_hub,
@@ -803,4 +795,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
index b3acd93..7df398b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf110.c
@@ -330,17 +330,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xc8),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf110_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf100_grctx_generate_unkn,
 	.hub   = gf100_grctx_pack_hub,
@@ -356,4 +347,4 @@
 	.attrib = gf100_grctx_generate_attrib,
 	.attrib_nr_max = 0x324,
 	.attrib_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 9bbe2c9..b5b8759 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -182,18 +182,18 @@
 void
 gf117_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32   beta = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32   beta = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int timeslice_mode = 1;
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, ppc;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -201,68 +201,60 @@
 	mmio_wr32(info, 0x405830, (beta << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
-			const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
-			const u32 b =  beta * priv->ppc_tpc_nr[gpc][ppc];
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+			const u32 a = alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 b =  beta * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 t = timeslice_mode;
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
 			mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
-			bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, (a << 16) | ao);
-			ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 		}
 	}
 }
 
 void
-gf117_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gf100_grctx_generate_r4060a8(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gf100_grctx_generate_r4060a8(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 }
 
-struct nvkm_oclass *
-gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf117_grctx = {
 	.main  = gf117_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gf117_grctx_pack_hub,
@@ -281,4 +273,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x324,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
index 8d87614..605185b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf119.c
@@ -498,17 +498,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xd9),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gf119_grctx = {
 	.main  = gf100_grctx_generate_main,
 	.unkn  = gf108_grctx_generate_unkn,
 	.hub   = gf119_grctx_pack_hub,
@@ -526,4 +517,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x324,
 	.alpha_nr = 0x218,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index b12f6a9..a843e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -843,27 +843,27 @@
 void
 gk104_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
-	const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
-				    impl->bundle_size / 0x20);
-	const u32 token_limit = impl->bundle_token_limit;
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+				    grctx->bundle_size / 0x20);
+	const u32 token_limit = grctx->bundle_token_limit;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418808, 0x00000000, s, b);
-	mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
 }
 
 void
 gk104_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -872,31 +872,33 @@
 }
 
 void
-gk104_grctx_generate_unkn(struct gf100_gr_priv *priv)
+gk104_grctx_generate_unkn(struct gf100_gr *gr)
 {
-	nv_mask(priv, 0x418c6c, 0x00000001, 0x00000001);
-	nv_mask(priv, 0x41980c, 0x00000010, 0x00000010);
-	nv_mask(priv, 0x41be08, 0x00000004, 0x00000004);
-	nv_mask(priv, 0x4064c0, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x405800, 0x08000000, 0x08000000);
-	nv_mask(priv, 0x419c00, 0x00000008, 0x00000008);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_mask(device, 0x418c6c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004);
+	nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x405800, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
 }
 
 void
-gk104_grctx_generate_r418bb8(struct gf100_gr_priv *priv)
+gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 data[6] = {}, data2[2] = {};
 	u8  tpcnr[GPC_MAX];
 	u8  shift, ntpcv;
 	int gpc, tpc, i;
 
 	/* calculate first set of magics */
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	gpc = -1;
-	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_total; tpc++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
 		tpcnr[gpc]--;
 
@@ -908,7 +910,7 @@
 
 	/* and the second... */
 	shift = 0;
-	ntpcv = priv->tpc_total;
+	ntpcv = gr->tpc_total;
 	while (!(ntpcv & (1 << 4))) {
 		ntpcv <<= 1;
 		shift++;
@@ -921,86 +923,79 @@
 		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
 
 	/* GPC_BROADCAST */
-	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
 
 	/* GPC_BROADCAST.TP_BROADCAST */
-	nv_wr32(priv, 0x41bfd0, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr | data2[0]);
-	nv_wr32(priv, 0x41bfe4, data2[1]);
+	nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr | data2[0]);
+	nvkm_wr32(device, 0x41bfe4, data2[1]);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+		nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
 
 	/* UNK78xx */
-	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
-				 priv->magic_not_rop_nr);
+	nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
+				 gr->magic_not_rop_nr);
 	for (i = 0; i < 6; i++)
-		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+		nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
 }
 
 void
-gk104_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
 {
-	const u32 fbp_count = nv_rd32(priv, 0x120074);
-	nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x120074);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
 void
-gk104_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
+	nvkm_mc_unk260(device->mc, 0);
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gf100_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
-	gk104_grctx_generate_rop_active_fbps(priv);
-	nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+	gk104_grctx_generate_rop_active_fbps(gr);
+	nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
+	nvkm_mc_unk260(device->mc, 1);
 
-	nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
-	nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
+	nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
+	nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
 }
 
-struct nvkm_oclass *
-gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk104_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk104_grctx_pack_hub,
@@ -1021,4 +1016,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
index b3f58be..7b95ec2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c
@@ -808,17 +808,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk110_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
@@ -839,4 +830,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
index b11c267..048b115 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c
@@ -69,17 +69,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xf1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk110b_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk110_grctx_pack_hub,
@@ -100,4 +91,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
index 6e8ce9f..67b7a1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c
@@ -530,17 +530,8 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gk208_grctx = {
 	.main  = gk104_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gk208_grctx_pack_hub,
@@ -561,4 +552,4 @@
 	.attrib_nr = 0x218,
 	.alpha_nr_max = 0x7ff,
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index 2f241f6..ddaa16a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -20,34 +20,60 @@
  * DEALINGS IN THE SOFTWARE.
  */
 #include "ctxgf100.h"
+#include "gf100.h"
 
-static const struct gf100_gr_pack
-gk20a_grctx_pack_mthd[] = {
-	{ gk104_grctx_init_a097_0, 0xa297 },
-	{ gf100_grctx_init_902d_0, 0x902d },
-	{}
-};
+#include <subdev/mc.h>
 
-struct nvkm_oclass *
-gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-	.main  = gk104_grctx_generate_main,
+static void
+gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	int idle_timeout_save;
+	int i;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	gf100_gr_wait_idle(gr);
+
+	idle_timeout_save = nvkm_rd32(device, 0x404154);
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->attrib(info);
+
+	grctx->unkn(gr);
+
+	gf100_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gk104_grctx_generate_rop_active_fbps(gr);
+
+	nvkm_mask(device, 0x5044b0, 0x8000000, 0x8000000);
+
+	gf100_gr_wait_idle(gr);
+
+	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	grctx->pagepool(info);
+	grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gk20a_grctx = {
+	.main  = gk20a_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
-	.hub   = gk104_grctx_pack_hub,
-	.gpc   = gk104_grctx_pack_gpc,
-	.zcull = gf100_grctx_pack_zcull,
-	.tpc   = gk104_grctx_pack_tpc,
-	.ppc   = gk104_grctx_pack_ppc,
-	.icmd  = gk104_grctx_pack_icmd,
-	.mthd  = gk20a_grctx_pack_mthd,
 	.bundle = gk104_grctx_generate_bundle,
 	.bundle_size = 0x1800,
 	.bundle_min_gpm_fifo_depth = 0x62,
@@ -59,4 +85,4 @@
 	.attrib_nr = 0x240,
 	.alpha_nr_max = 0x648 + (0x648 / 2),
 	.alpha_nr = 0x648,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index fbeaae3..95f59e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -863,27 +863,27 @@
 void
 gm107_grctx_generate_bundle(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
-	const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
-				    impl->bundle_size / 0x20);
-	const u32 token_limit = impl->bundle_token_limit;
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+	const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
+				    grctx->bundle_size / 0x20);
+	const u32 token_limit = grctx->bundle_token_limit;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
 	mmio_refn(info, 0x408004, 0x00000000, s, b);
-	mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_refn(info, 0x418e24, 0x00000000, s, b);
-	mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
+	mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
 	mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
 }
 
 void
 gm107_grctx_generate_pagepool(struct gf100_grctx *info)
 {
-	const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->priv);
+	const struct gf100_grctx_func *grctx = info->gr->func->grctx;
 	const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
 	const int s = 8;
-	const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+	const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
 	mmio_refn(info, 0x40800c, 0x00000000, s, b);
 	mmio_wr32(info, 0x408010, 0x80000000);
 	mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -895,17 +895,17 @@
 void
 gm107_grctx_generate_attrib(struct gf100_grctx *info)
 {
-	struct gf100_gr_priv *priv = info->priv;
-	const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(priv);
-	const u32  alpha = impl->alpha_nr;
-	const u32 attrib = impl->attrib_nr;
-	const u32   size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+	struct gf100_gr *gr = info->gr;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	const u32  alpha = grctx->alpha_nr;
+	const u32 attrib = grctx->attrib_nr;
+	const u32   size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
 	const u32 access = NV_MEM_ACCESS_RW;
 	const int s = 12;
-	const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+	const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
 	const int max_batches = 0xffff;
 	u32 bo = 0;
-	u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+	u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
 	int gpc, ppc, n = 0;
 
 	mmio_refn(info, 0x418810, 0x80000000, s, b);
@@ -914,97 +914,90 @@
 	mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
 	mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
-			const u32 as =  alpha * priv->ppc_tpc_nr[gpc][ppc];
-			const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+			const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
+			const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
 			const u32 u = 0x418ea0 + (n * 0x04);
 			const u32 o = PPC_UNIT(gpc, ppc, 0);
 			mmio_wr32(info, o + 0xc0, bs);
 			mmio_wr32(info, o + 0xf4, bo);
-			bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, o + 0xe4, as);
 			mmio_wr32(info, o + 0xf8, ao);
-			ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+			ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
 			mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
 		}
 	}
 }
 
-static void
-gm107_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm107_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c08), gr->tpc_nr[gpc]);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x0c8c), gr->tpc_nr[gpc]);
 		}
 	}
 }
 
 static void
-gm107_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	int i;
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gm107_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
-	gf100_grctx_generate_r406800(priv);
+	gm107_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+	gf100_grctx_generate_r406800(gr);
 
-	nv_wr32(priv, 0x4064d0, 0x00000001);
+	nvkm_wr32(device, 0x4064d0, 0x00000001);
 	for (i = 1; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
-	nv_wr32(priv, 0x406500, 0x00000001);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000001);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gk104_grctx_generate_rop_active_fbps(priv);
+	gk104_grctx_generate_rop_active_fbps(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000400);
-	gf100_gr_mthd(priv, oclass->mthd);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000400);
+	gf100_gr_mthd(gr, grctx->mthd);
 
-	nv_mask(priv, 0x419e00, 0x00808080, 0x00808080);
-	nv_mask(priv, 0x419ccc, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x419f80, 0x80000000, 0x80000000);
-	nv_mask(priv, 0x419f88, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
+	nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f80, 0x80000000, 0x80000000);
+	nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
 }
 
-struct nvkm_oclass *
-gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm107_grctx = {
 	.main  = gm107_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm107_grctx_pack_hub,
@@ -1025,4 +1018,4 @@
 	.attrib_nr = 0xaa0,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
index ea8e661..170cbfd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm204.c
@@ -918,17 +918,18 @@
  * PGRAPH context implementation
  ******************************************************************************/
 
-static void
-gm204_grctx_generate_tpcid(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_tpcid(struct gf100_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int gpc, tpc, id;
 
 	for (tpc = 0, id = 0; tpc < 4; tpc++) {
-		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tpc < priv->tpc_nr[gpc]) {
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
-				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
-				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+		for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+			if (tpc < gr->tpc_nr[gpc]) {
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
+				nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), id);
 				id++;
 			}
 		}
@@ -936,101 +937,95 @@
 }
 
 static void
-gm204_grctx_generate_rop_active_fbps(struct gf100_gr_priv *priv)
+gm204_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
 {
-	const u32 fbp_count = nv_rd32(priv, 0x12006c);
-	nv_mask(priv, 0x408850, 0x0000000f, fbp_count); /* zrop */
-	nv_mask(priv, 0x408958, 0x0000000f, fbp_count); /* crop */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+	nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+	nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
 }
 
-static void
-gm204_grctx_generate_405b60(struct gf100_gr_priv *priv)
+void
+gm204_grctx_generate_405b60(struct gf100_gr *gr)
 {
-	const u32 dist_nr = DIV_ROUND_UP(priv->tpc_total, 4);
-	u32 dist[TPC_MAX] = {};
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+	u32 dist[TPC_MAX / 4] = {};
 	u32 gpcs[GPC_MAX] = {};
 	u8  tpcnr[GPC_MAX];
 	int tpc, gpc, i;
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
 
 	/* won't result in the same distribution as the binary driver where
 	 * some of the gpcs have more tpcs than others, but this shall do
 	 * for the moment.  the code for earlier gpus has this issue too.
 	 */
-	for (gpc = -1, i = 0; i < priv->tpc_total; i++) {
+	for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while(!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
 		gpcs[gpc] |= i << (tpc * 8);
 	}
 
 	for (i = 0; i < dist_nr; i++)
-		nv_wr32(priv, 0x405b60 + (i * 4), dist[i]);
-	for (i = 0; i < priv->gpc_nr; i++)
-		nv_wr32(priv, 0x405ba0 + (i * 4), gpcs[i]);
+		nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+	for (i = 0; i < gr->gpc_nr; i++)
+		nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
 }
 
 void
-gm204_grctx_generate_main(struct gf100_gr_priv *priv, struct gf100_grctx *info)
+gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
 {
-	struct gf100_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
 	u32 tmp;
 	int i;
 
-	gf100_gr_mmio(priv, oclass->hub);
-	gf100_gr_mmio(priv, oclass->gpc);
-	gf100_gr_mmio(priv, oclass->zcull);
-	gf100_gr_mmio(priv, oclass->tpc);
-	gf100_gr_mmio(priv, oclass->ppc);
+	gf100_gr_mmio(gr, grctx->hub);
+	gf100_gr_mmio(gr, grctx->gpc);
+	gf100_gr_mmio(gr, grctx->zcull);
+	gf100_gr_mmio(gr, grctx->tpc);
+	gf100_gr_mmio(gr, grctx->ppc);
 
-	nv_wr32(priv, 0x404154, 0x00000000);
+	nvkm_wr32(device, 0x404154, 0x00000000);
 
-	oclass->bundle(info);
-	oclass->pagepool(info);
-	oclass->attrib(info);
-	oclass->unkn(priv);
+	grctx->bundle(info);
+	grctx->pagepool(info);
+	grctx->attrib(info);
+	grctx->unkn(gr);
 
-	gm204_grctx_generate_tpcid(priv);
-	gf100_grctx_generate_r406028(priv);
-	gk104_grctx_generate_r418bb8(priv);
+	gm204_grctx_generate_tpcid(gr);
+	gf100_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
 
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
-	nv_wr32(priv, 0x406500, 0x00000000);
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+	nvkm_wr32(device, 0x406500, 0x00000000);
 
-	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
 
-	gm204_grctx_generate_rop_active_fbps(priv);
+	gm204_grctx_generate_rop_active_fbps(gr);
 
-	for (tmp = 0, i = 0; i < priv->gpc_nr; i++)
-		tmp |= ((1 << priv->tpc_nr[i]) - 1) << (i * 4);
-	nv_wr32(priv, 0x4041c4, tmp);
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
 
-	gm204_grctx_generate_405b60(priv);
+	gm204_grctx_generate_405b60(gr);
 
-	gf100_gr_icmd(priv, oclass->icmd);
-	nv_wr32(priv, 0x404154, 0x00000800);
-	gf100_gr_mthd(priv, oclass->mthd);
+	gf100_gr_icmd(gr, grctx->icmd);
+	nvkm_wr32(device, 0x404154, 0x00000800);
+	gf100_gr_mthd(gr, grctx->mthd);
 
-	nv_mask(priv, 0x418e94, 0xffffffff, 0xc4230000);
-	nv_mask(priv, 0x418e4c, 0xffffffff, 0x70000000);
+	nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
+	nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
 }
 
-struct nvkm_oclass *
-gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm204_grctx = {
 	.main  = gm204_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm204_grctx_pack_hub,
@@ -1051,4 +1046,4 @@
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
index 91ec416..d6be603 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm206.c
@@ -49,17 +49,8 @@
 	{}
 };
 
-struct nvkm_oclass *
-gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
-	.base.handle = NV_ENGCTX(GR, 0x26),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_context_ctor,
-		.dtor = gf100_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+const struct gf100_grctx_func
+gm206_grctx = {
 	.main  = gm204_grctx_generate_main,
 	.unkn  = gk104_grctx_generate_unkn,
 	.hub   = gm204_grctx_pack_hub,
@@ -80,4 +71,4 @@
 	.attrib_nr = 0x400,
 	.alpha_nr_max = 0x1800,
 	.alpha_nr = 0x1000,
-}.base;
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
new file mode 100644
index 0000000..6702604
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ctxgf100.h"
+
+static void
+gm20b_grctx_generate_r406028(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 tpc_per_gpc = 0;
+	int i;
+
+	for (i = 0; i < gr->gpc_nr; i++)
+		tpc_per_gpc |= gr->tpc_nr[i] << (4 * i);
+
+	nvkm_wr32(device, 0x406028, tpc_per_gpc);
+	nvkm_wr32(device, 0x405870, tpc_per_gpc);
+}
+
+static void
+gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	int idle_timeout_save;
+	int i, tmp;
+
+	gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+	gf100_gr_wait_idle(gr);
+
+	idle_timeout_save = nvkm_rd32(device, 0x404154);
+	nvkm_wr32(device, 0x404154, 0x00000000);
+
+	grctx->attrib(info);
+
+	grctx->unkn(gr);
+
+	gm204_grctx_generate_tpcid(gr);
+	gm20b_grctx_generate_r406028(gr);
+	gk104_grctx_generate_r418bb8(gr);
+
+	for (i = 0; i < 8; i++)
+		nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+	gk104_grctx_generate_rop_active_fbps(gr);
+	nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
+
+	for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+		tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
+	nvkm_wr32(device, 0x4041c4, tmp);
+
+	gm204_grctx_generate_405b60(gr);
+
+	gf100_gr_wait_idle(gr);
+
+	nvkm_wr32(device, 0x404154, idle_timeout_save);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_mthd(gr, gr->fuc_method);
+	gf100_gr_wait_idle(gr);
+
+	gf100_gr_icmd(gr, gr->fuc_bundle);
+	grctx->pagepool(info);
+	grctx->bundle(info);
+}
+
+const struct gf100_grctx_func
+gm20b_grctx = {
+	.main  = gm20b_grctx_generate_main,
+	.unkn  = gk104_grctx_generate_unkn,
+	.bundle = gm107_grctx_generate_bundle,
+	.bundle_size = 0x1800,
+	.bundle_min_gpm_fifo_depth = 0x182,
+	.bundle_token_limit = 0x1c0,
+	.pagepool = gm107_grctx_generate_pagepool,
+	.pagepool_size = 0x8000,
+	.attrib = gm107_grctx_generate_attrib,
+	.attrib_nr_max = 0x600,
+	.attrib_nr = 0x400,
+	.alpha_nr_max = 0xc00,
+	.alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
index dc31462..80a6b01 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.c
@@ -111,7 +111,6 @@
 
 #include "ctxnv40.h"
 #include "nv40.h"
-#include <core/device.h>
 
 /* TODO:
  *  - get vs count from 0x1540
@@ -583,13 +582,13 @@
 
 	offset += 0x0280/4;
 	for (i = 0; i < 16; i++, offset += 2)
-		nv_wo32(obj, offset * 4, 0x3f800000);
+		nvkm_wo32(obj, offset * 4, 0x3f800000);
 
 	for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
 		for (i = 0; i < vs_nr_b0 * 6; i += 6)
-			nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+			nvkm_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
 		for (i = 0; i < vs_nr_b1 * 4; i += 4)
-			nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+			nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
 	}
 }
 
@@ -675,7 +674,7 @@
 	struct nvkm_grctx ctx = {
 		.device = device,
 		.mode = NVKM_GRCTX_PROG,
-		.data = ctxprog,
+		.ucode = ctxprog,
 		.ctxprog_max = 256,
 	};
 
@@ -684,9 +683,9 @@
 
 	nv40_grctx_generate(&ctx);
 
-	nv_wr32(device, 0x400324, 0);
+	nvkm_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(device, 0x400328, ctxprog[i]);
+		nvkm_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 
 	kfree(ctxprog);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 8a89961..50e808e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -9,7 +9,8 @@
 		NVKM_GRCTX_PROG,
 		NVKM_GRCTX_VALS
 	} mode;
-	void *data;
+	u32 *ucode;
+	struct nvkm_gpuobj *data;
 
 	u32 ctxprog_max;
 	u32 ctxprog_len;
@@ -22,7 +23,7 @@
 static inline void
 cp_out(struct nvkm_grctx *ctx, u32 inst)
 {
-	u32 *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->ucode;
 
 	if (ctx->mode != NVKM_GRCTX_PROG)
 		return;
@@ -56,7 +57,7 @@
 static inline void
 cp_name(struct nvkm_grctx *ctx, int name)
 {
-	u32 *ctxprog = ctx->data;
+	u32 *ctxprog = ctx->ucode;
 	int i;
 
 	if (ctx->mode != NVKM_GRCTX_PROG)
@@ -124,6 +125,6 @@
 	reg = (reg - 0x00400000) / 4;
 	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
 
-	nv_wo32(ctx->data, reg * 4, val);
+	nvkm_wo32(ctx->data, reg * 4, val);
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
index 9c9528d..1e13278 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv50.c
@@ -107,7 +107,6 @@
 
 #include "ctxnv40.h"
 
-#include <core/device.h>
 #include <subdev/fb.h>
 
 #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -269,7 +268,7 @@
 	struct nvkm_grctx ctx = {
 		.device = device,
 		.mode = NVKM_GRCTX_PROG,
-		.data = ctxprog,
+		.ucode = ctxprog,
 		.ctxprog_max = 512,
 	};
 
@@ -277,9 +276,9 @@
 		return -ENOMEM;
 	nv50_grctx_generate(&ctx);
 
-	nv_wr32(device, 0x400324, 0);
+	nvkm_wr32(device, 0x400324, 0);
 	for (i = 0; i < ctx.ctxprog_len; i++)
-		nv_wr32(device, 0x400328, ctxprog[i]);
+		nvkm_wr32(device, 0x400328, ctxprog[i]);
 	*size = ctx.ctxvals_pos * 4;
 	kfree(ctxprog);
 	return 0;
@@ -299,7 +298,7 @@
 	struct nvkm_device *device = ctx->device;
 	int i, j;
 	int offset, base;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 
 	/* 0800: DISPATCH */
 	cp_ctx(ctx, 0x400808, 7);
@@ -570,7 +569,7 @@
 		else if (device->chipset < 0xa0)
 			gr_def(ctx, 0x407d08, 0x00390040);
 		else {
-			if (nvkm_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
+			if (device->fb->ram->type != NVKM_RAM_TYPE_GDDR5)
 				gr_def(ctx, 0x407d08, 0x003d0040);
 			else
 				gr_def(ctx, 0x407d08, 0x003c0040);
@@ -784,9 +783,10 @@
 static void
 dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
 	int i;
-	if (val && ctx->mode == NVKM_GRCTX_VALS)
+	if (val && ctx->mode == NVKM_GRCTX_VALS) {
 		for (i = 0; i < num; i++)
-			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+			nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+	}
 	ctx->ctxvals_pos += num;
 }
 
@@ -1156,9 +1156,10 @@
 static void
 xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
 	int i;
-	if (val && ctx->mode == NVKM_GRCTX_VALS)
+	if (val && ctx->mode == NVKM_GRCTX_VALS) {
 		for (i = 0; i < num; i++)
-			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+			nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+	}
 	ctx->ctxvals_pos += num << 3;
 }
 
@@ -1190,7 +1191,7 @@
 	int i;
 	int offset;
 	int size = 0;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
 	ctx->ctxvals_base = offset;
@@ -3273,7 +3274,7 @@
 	struct nvkm_device *device = ctx->device;
 	int i;
 	u32 offset;
-	u32 units = nv_rd32 (ctx->device, 0x1540);
+	u32 units = nvkm_rd32(device, 0x1540);
 	int size = 0;
 
 	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
new file mode 100644
index 0000000..ce91330
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/g84.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+static const struct nvkm_bitfield nv50_gr_status[] = {
+	{ 0x00000001, "BUSY" }, /* set when any bit is set */
+	{ 0x00000002, "DISPATCH" },
+	{ 0x00000004, "UNK2" },
+	{ 0x00000008, "UNK3" },
+	{ 0x00000010, "UNK4" },
+	{ 0x00000020, "UNK5" },
+	{ 0x00000040, "M2MF" },
+	{ 0x00000080, "UNK7" },
+	{ 0x00000100, "CTXPROG" },
+	{ 0x00000200, "VFETCH" },
+	{ 0x00000400, "CCACHE_PREGEOM" },
+	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
+	{ 0x00001000, "VCLIP" },
+	{ 0x00002000, "RATTR_APLANE" },
+	{ 0x00004000, "TRAST" },
+	{ 0x00008000, "CLIPID" },
+	{ 0x00010000, "ZCULL" },
+	{ 0x00020000, "ENG2D" },
+	{ 0x00040000, "RMASK" },
+	{ 0x00080000, "TPC_RAST" },
+	{ 0x00100000, "TPC_PROP" },
+	{ 0x00200000, "TPC_TEX" },
+	{ 0x00400000, "TPC_GEOM" },
+	{ 0x00800000, "TPC_MP" },
+	{ 0x01000000, "ROP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_0[] = {
+	{ 0x01, "VFETCH" },
+	{ 0x02, "CCACHE" },
+	{ 0x04, "PREGEOM" },
+	{ 0x08, "POSTGEOM" },
+	{ 0x10, "VATTR" },
+	{ 0x20, "STRMOUT" },
+	{ 0x40, "VCLIP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_1[] = {
+	{ 0x01, "TPC_RAST" },
+	{ 0x02, "TPC_PROP" },
+	{ 0x04, "TPC_TEX" },
+	{ 0x08, "TPC_GEOM" },
+	{ 0x10, "TPC_MP" },
+	{}
+};
+
+static const struct nvkm_bitfield
+nv50_gr_vstatus_2[] = {
+	{ 0x01, "RATTR" },
+	{ 0x02, "APLANE" },
+	{ 0x04, "TRAST" },
+	{ 0x08, "CLIPID" },
+	{ 0x10, "ZCULL" },
+	{ 0x20, "ENG2D" },
+	{ 0x40, "RMASK" },
+	{ 0x80, "ROP" },
+	{}
+};
+
+static void
+nvkm_gr_vstatus_print(struct nv50_gr *gr, int r,
+		      const struct nvkm_bitfield *units, u32 status)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	u32 stat = status;
+	u8  mask = 0x00;
+	char msg[64];
+	int i;
+
+	for (i = 0; units[i].name && status; i++) {
+		if ((status & 7) == 1)
+			mask |= (1 << i);
+		status >>= 3;
+	}
+
+	nvkm_snprintbf(msg, sizeof(msg), units, mask);
+	nvkm_error(subdev, "PGRAPH_VSTATUS%d: %08x [%s]\n", r, stat, msg);
+}
+
+int
+g84_gr_tlb_flush(struct nvkm_gr *base)
+{
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_timer *tmr = device->timer;
+	bool idle, timeout = false;
+	unsigned long flags;
+	char status[128];
+	u64 start;
+	u32 tmp;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, 0x400500, 0x00000001, 0x00000000);
+
+	start = nvkm_timer_read(tmr);
+	do {
+		idle = true;
+
+		for (tmp = nvkm_rd32(device, 0x400380); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nvkm_rd32(device, 0x400384); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nvkm_rd32(device, 0x400388); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+	} while (!idle &&
+		 !(timeout = nvkm_timer_read(tmr) - start > 2000000000));
+
+	if (timeout) {
+		nvkm_error(subdev, "PGRAPH TLB flush idle timeout fail\n");
+
+		tmp = nvkm_rd32(device, 0x400700);
+		nvkm_snprintbf(status, sizeof(status), nv50_gr_status, tmp);
+		nvkm_error(subdev, "PGRAPH_STATUS %08x [%s]\n", tmp, status);
+
+		nvkm_gr_vstatus_print(gr, 0, nv50_gr_vstatus_0,
+				       nvkm_rd32(device, 0x400380));
+		nvkm_gr_vstatus_print(gr, 1, nv50_gr_vstatus_1,
+				       nvkm_rd32(device, 0x400384));
+		nvkm_gr_vstatus_print(gr, 2, nv50_gr_vstatus_2,
+				       nvkm_rd32(device, 0x400388));
+	}
+
+
+	nvkm_wr32(device, 0x100c80, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+			break;
+	);
+	nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return timeout ? -EBUSY : 0;
+}
+
+static const struct nvkm_gr_func
+g84_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8297, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+g84_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&g84_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ca11ddb..f1358a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -26,13 +26,12 @@
 #include "fuc/os.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
 #include <core/option.h>
-#include <engine/fifo.h>
 #include <subdev/fb.h>
 #include <subdev/mc.h>
+#include <subdev/pmu.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
 
 #include <nvif/class.h>
 #include <nvif/unpack.h>
@@ -42,35 +41,36 @@
  ******************************************************************************/
 
 static void
-gf100_gr_zbc_clear_color(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_color(struct gf100_gr *gr, int zbc)
 {
-	if (priv->zbc_color[zbc].format) {
-		nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
-		nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
-		nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
-		nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	if (gr->zbc_color[zbc].format) {
+		nvkm_wr32(device, 0x405804, gr->zbc_color[zbc].ds[0]);
+		nvkm_wr32(device, 0x405808, gr->zbc_color[zbc].ds[1]);
+		nvkm_wr32(device, 0x40580c, gr->zbc_color[zbc].ds[2]);
+		nvkm_wr32(device, 0x405810, gr->zbc_color[zbc].ds[3]);
 	}
-	nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
-	nv_wr32(priv, 0x405820, zbc);
-	nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
+	nvkm_wr32(device, 0x405814, gr->zbc_color[zbc].format);
+	nvkm_wr32(device, 0x405820, zbc);
+	nvkm_wr32(device, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
 }
 
 static int
-gf100_gr_zbc_color_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_color_get(struct gf100_gr *gr, int format,
 		       const u32 ds[4], const u32 l2[4])
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int zbc = -ENOSPC, i;
 
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
-		if (priv->zbc_color[i].format) {
-			if (priv->zbc_color[i].format != format)
+		if (gr->zbc_color[i].format) {
+			if (gr->zbc_color[i].format != format)
 				continue;
-			if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
-				   priv->zbc_color[i].ds)))
+			if (memcmp(gr->zbc_color[i].ds, ds, sizeof(
+				   gr->zbc_color[i].ds)))
 				continue;
-			if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
-				   priv->zbc_color[i].l2))) {
+			if (memcmp(gr->zbc_color[i].l2, l2, sizeof(
+				   gr->zbc_color[i].l2))) {
 				WARN_ON(1);
 				return -EINVAL;
 			}
@@ -83,38 +83,39 @@
 	if (zbc < 0)
 		return zbc;
 
-	memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
-	memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
-	priv->zbc_color[zbc].format = format;
-	ltc->zbc_color_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_color(priv, zbc);
+	memcpy(gr->zbc_color[zbc].ds, ds, sizeof(gr->zbc_color[zbc].ds));
+	memcpy(gr->zbc_color[zbc].l2, l2, sizeof(gr->zbc_color[zbc].l2));
+	gr->zbc_color[zbc].format = format;
+	nvkm_ltc_zbc_color_get(ltc, zbc, l2);
+	gf100_gr_zbc_clear_color(gr, zbc);
 	return zbc;
 }
 
 static void
-gf100_gr_zbc_clear_depth(struct gf100_gr_priv *priv, int zbc)
+gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc)
 {
-	if (priv->zbc_depth[zbc].format)
-		nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
-	nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
-	nv_wr32(priv, 0x405820, zbc);
-	nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	if (gr->zbc_depth[zbc].format)
+		nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds);
+	nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format);
+	nvkm_wr32(device, 0x405820, zbc);
+	nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
 }
 
 static int
-gf100_gr_zbc_depth_get(struct gf100_gr_priv *priv, int format,
+gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
 		       const u32 ds, const u32 l2)
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int zbc = -ENOSPC, i;
 
 	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
-		if (priv->zbc_depth[i].format) {
-			if (priv->zbc_depth[i].format != format)
+		if (gr->zbc_depth[i].format) {
+			if (gr->zbc_depth[i].format != format)
 				continue;
-			if (priv->zbc_depth[i].ds != ds)
+			if (gr->zbc_depth[i].ds != ds)
 				continue;
-			if (priv->zbc_depth[i].l2 != l2) {
+			if (gr->zbc_depth[i].l2 != l2) {
 				WARN_ON(1);
 				return -EINVAL;
 			}
@@ -127,11 +128,11 @@
 	if (zbc < 0)
 		return zbc;
 
-	priv->zbc_depth[zbc].format = format;
-	priv->zbc_depth[zbc].ds = ds;
-	priv->zbc_depth[zbc].l2 = l2;
-	ltc->zbc_depth_get(ltc, zbc, l2);
-	gf100_gr_zbc_clear_depth(priv, zbc);
+	gr->zbc_depth[zbc].format = format;
+	gr->zbc_depth[zbc].ds = ds;
+	gr->zbc_depth[zbc].l2 = l2;
+	nvkm_ltc_zbc_depth_get(ltc, zbc, l2);
+	gf100_gr_zbc_clear_depth(gr, zbc);
 	return zbc;
 }
 
@@ -142,7 +143,7 @@
 static int
 gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
+	struct gf100_gr *gr = (void *)object->engine;
 	union {
 		struct fermi_a_zbc_color_v0 v0;
 	} *args = data;
@@ -169,7 +170,7 @@
 		case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
 		case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
 		case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
-			ret = gf100_gr_zbc_color_get(priv, args->v0.format,
+			ret = gf100_gr_zbc_color_get(gr, args->v0.format,
 							   args->v0.ds,
 							   args->v0.l2);
 			if (ret >= 0) {
@@ -188,7 +189,7 @@
 static int
 gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
+	struct gf100_gr *gr = (void *)object->engine;
 	union {
 		struct fermi_a_zbc_depth_v0 v0;
 	} *args = data;
@@ -197,7 +198,7 @@
 	if (nvif_unpack(args->v0, 0, 0, false)) {
 		switch (args->v0.format) {
 		case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
-			ret = gf100_gr_zbc_depth_get(priv, args->v0.format,
+			ret = gf100_gr_zbc_depth_get(gr, args->v0.format,
 							   args->v0.ds,
 							   args->v0.l2);
 			return (ret >= 0) ? 0 : -ENOSPC;
@@ -223,106 +224,176 @@
 	return -EINVAL;
 }
 
-struct nvkm_ofuncs
-gf100_fermi_ofuncs = {
-	.ctor = _nvkm_object_ctor,
-	.dtor = nvkm_object_destroy,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
+const struct nvkm_object_func
+gf100_fermi = {
 	.mthd = gf100_fermi_mthd,
 };
 
-static int
-gf100_gr_set_shader_exceptions(struct nvkm_object *object, u32 mthd,
-			       void *pdata, u32 size)
+static void
+gf100_gr_mthd_set_shader_exceptions(struct nvkm_device *device, u32 data)
 {
-	struct gf100_gr_priv *priv = (void *)object->engine;
-	if (size >= sizeof(u32)) {
-		u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
-		nv_wr32(priv, 0x419e44, data);
-		nv_wr32(priv, 0x419e4c, data);
-		return 0;
-	}
-	return -EINVAL;
+	nvkm_wr32(device, 0x419e44, data ? 0xffffffff : 0x00000000);
+	nvkm_wr32(device, 0x419e4c, data ? 0xffffffff : 0x00000000);
 }
 
-struct nvkm_omthds
-gf100_gr_9097_omthds[] = {
-	{ 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
-	{}
-};
+static bool
+gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
+{
+	switch (class & 0x00ff) {
+	case 0x97:
+	case 0xc0:
+		switch (mthd) {
+		case 0x1528:
+			gf100_gr_mthd_set_shader_exceptions(device, data);
+			return true;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+	return false;
+}
 
-struct nvkm_omthds
-gf100_gr_90c0_omthds[] = {
-	{ 0x1528, 0x1528, gf100_gr_set_shader_exceptions },
-	{}
-};
+static int
+gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	int c = 0;
 
-struct nvkm_oclass
-gf100_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
+	while (gr->func->sclass[c].oclass) {
+		if (c++ == index) {
+			*sclass = gr->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
+}
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
-int
-gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *args, u32 size,
-		      struct nvkm_object **pobject)
+static int
+gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		   int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_vm *vm = nvkm_client(parent)->vm;
-	struct gf100_gr_priv *priv = (void *)engine;
-	struct gf100_gr_data *data = priv->mmio_data;
-	struct gf100_gr_mmio *mmio = priv->mmio_list;
-	struct gf100_gr_chan *chan;
+	struct gf100_gr_chan *chan = gf100_gr_chan(object);
+	struct gf100_gr *gr = chan->gr;
 	int ret, i;
 
-	/* allocate memory for context, and fill with default values */
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
-				     priv->size, 0x100,
-				     NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+			      align, false, parent, pgpuobj);
 	if (ret)
 		return ret;
 
+	nvkm_kmap(*pgpuobj);
+	for (i = 0; i < gr->size; i += 4)
+		nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
+
+	if (!gr->firmware) {
+		nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
+		nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+	} else {
+		nvkm_wo32(*pgpuobj, 0xf4, 0);
+		nvkm_wo32(*pgpuobj, 0xf8, 0);
+		nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
+		nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+		nvkm_wo32(*pgpuobj, 0x1c, 1);
+		nvkm_wo32(*pgpuobj, 0x20, 0);
+		nvkm_wo32(*pgpuobj, 0x28, 0);
+		nvkm_wo32(*pgpuobj, 0x2c, 0);
+	}
+	nvkm_done(*pgpuobj);
+	return 0;
+}
+
+static void *
+gf100_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct gf100_gr_chan *chan = gf100_gr_chan(object);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+		if (chan->data[i].vma.node) {
+			nvkm_vm_unmap(&chan->data[i].vma);
+			nvkm_vm_put(&chan->data[i].vma);
+		}
+		nvkm_memory_del(&chan->data[i].mem);
+	}
+
+	if (chan->mmio_vma.node) {
+		nvkm_vm_unmap(&chan->mmio_vma);
+		nvkm_vm_put(&chan->mmio_vma);
+	}
+	nvkm_memory_del(&chan->mmio);
+	return chan;
+}
+
+static const struct nvkm_object_func
+gf100_gr_chan = {
+	.dtor = gf100_gr_chan_dtor,
+	.bind = gf100_gr_chan_bind,
+};
+
+static int
+gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		  const struct nvkm_oclass *oclass,
+		  struct nvkm_object **pobject)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	struct gf100_gr_data *data = gr->mmio_data;
+	struct gf100_gr_mmio *mmio = gr->mmio_list;
+	struct gf100_gr_chan *chan;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int ret, i;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+
 	/* allocate memory for a "mmio list" buffer that's used by the HUB
 	 * fuc to modify some per-context register settings on first load
 	 * of the context.
 	 */
-	ret = nvkm_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
-			      &chan->mmio);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x100,
+			      false, &chan->mmio);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
-				 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
-				 &chan->mmio_vma);
+	ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
+			  NV_MEM_ACCESS_SYS, &chan->mmio_vma);
 	if (ret)
 		return ret;
 
+	nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+
 	/* allocate buffers referenced by mmio list */
-	for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
-		ret = nvkm_gpuobj_new(nv_object(chan), NULL, data->size,
-				      data->align, 0, &chan->data[i].mem);
+	for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      data->size, data->align, false,
+				      &chan->data[i].mem);
 		if (ret)
 			return ret;
 
-		ret = nvkm_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
-					 &chan->data[i].vma);
+		ret = nvkm_vm_get(fifoch->vm,
+				  nvkm_memory_size(chan->data[i].mem), 12,
+				  data->access, &chan->data[i].vma);
 		if (ret)
 			return ret;
 
+		nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
 		data++;
 	}
 
 	/* finally, fill in the mmio list and point the context at it */
-	for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
+	nvkm_kmap(chan->mmio);
+	for (i = 0; mmio->addr && i < ARRAY_SIZE(gr->mmio_list); i++) {
 		u32 addr = mmio->addr;
 		u32 data = mmio->data;
 
@@ -331,49 +402,14 @@
 			data |= info >> mmio->shift;
 		}
 
-		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
-		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+		nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+		nvkm_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
 		mmio++;
 	}
-
-	for (i = 0; i < priv->size; i += 4)
-		nv_wo32(chan, i, priv->data[i / 4]);
-
-	if (!priv->firmware) {
-		nv_wo32(chan, 0x00, chan->mmio_nr / 2);
-		nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
-	} else {
-		nv_wo32(chan, 0xf4, 0);
-		nv_wo32(chan, 0xf8, 0);
-		nv_wo32(chan, 0x10, chan->mmio_nr / 2);
-		nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
-		nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
-		nv_wo32(chan, 0x1c, 1);
-		nv_wo32(chan, 0x20, 0);
-		nv_wo32(chan, 0x28, 0);
-		nv_wo32(chan, 0x2c, 0);
-	}
-
+	nvkm_done(chan->mmio);
 	return 0;
 }
 
-void
-gf100_gr_context_dtor(struct nvkm_object *object)
-{
-	struct gf100_gr_chan *chan = (void *)object;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
-		nvkm_gpuobj_unmap(&chan->data[i].vma);
-		nvkm_gpuobj_ref(NULL, &chan->data[i].mem);
-	}
-
-	nvkm_gpuobj_unmap(&chan->mmio_vma);
-	nvkm_gpuobj_ref(NULL, &chan->mmio);
-
-	nvkm_gr_context_destroy(&chan->base);
-}
-
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -635,7 +671,7 @@
  ******************************************************************************/
 
 void
-gf100_gr_zbc_init(struct gf100_gr_priv *priv)
+gf100_gr_zbc_init(struct gf100_gr *gr)
 {
 	const u32  zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
 			      0x00000000, 0x00000000, 0x00000000, 0x00000000 };
@@ -645,22 +681,22 @@
 			      0x00000000, 0x00000000, 0x00000000, 0x00000000 };
 	const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
 			      0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
-	struct nvkm_ltc *ltc = nvkm_ltc(priv);
+	struct nvkm_ltc *ltc = gr->base.engine.subdev.device->ltc;
 	int index;
 
-	if (!priv->zbc_color[0].format) {
-		gf100_gr_zbc_color_get(priv, 1,  & zero[0],   &zero[4]);
-		gf100_gr_zbc_color_get(priv, 2,  &  one[0],    &one[4]);
-		gf100_gr_zbc_color_get(priv, 4,  &f32_0[0],  &f32_0[4]);
-		gf100_gr_zbc_color_get(priv, 4,  &f32_1[0],  &f32_1[4]);
-		gf100_gr_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
-		gf100_gr_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+	if (!gr->zbc_color[0].format) {
+		gf100_gr_zbc_color_get(gr, 1,  & zero[0],   &zero[4]);
+		gf100_gr_zbc_color_get(gr, 2,  &  one[0],    &one[4]);
+		gf100_gr_zbc_color_get(gr, 4,  &f32_0[0],  &f32_0[4]);
+		gf100_gr_zbc_color_get(gr, 4,  &f32_1[0],  &f32_1[4]);
+		gf100_gr_zbc_depth_get(gr, 1, 0x00000000, 0x00000000);
+		gf100_gr_zbc_depth_get(gr, 1, 0x3f800000, 0x3f800000);
 	}
 
 	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_color(priv, index);
+		gf100_gr_zbc_clear_color(gr, index);
 	for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
-		gf100_gr_zbc_clear_depth(priv, index);
+		gf100_gr_zbc_clear_depth(gr, index);
 }
 
 /**
@@ -669,8 +705,10 @@
  * progress.
  */
 int
-gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+gf100_gr_wait_idle(struct gf100_gr *gr)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
 	bool gr_enabled, ctxsw_active, gr_busy;
 
@@ -679,24 +717,26 @@
 		 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
 		 * up-to-date
 		 */
-		nv_rd32(priv, 0x400700);
+		nvkm_rd32(device, 0x400700);
 
-		gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
-		ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
-		gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+		gr_enabled = nvkm_rd32(device, 0x200) & 0x1000;
+		ctxsw_active = nvkm_rd32(device, 0x2640) & 0x8000;
+		gr_busy = nvkm_rd32(device, 0x40060c) & 0x1;
 
 		if (!gr_enabled || (!gr_busy && !ctxsw_active))
 			return 0;
 	} while (time_before(jiffies, end_jiffies));
 
-	nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
-		 gr_enabled, ctxsw_active, gr_busy);
+	nvkm_error(subdev,
+		   "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+		   gr_enabled, ctxsw_active, gr_busy);
 	return -EAGAIN;
 }
 
 void
-gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mmio(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 
@@ -704,49 +744,54 @@
 		u32 next = init->addr + init->count * init->pitch;
 		u32 addr = init->addr;
 		while (addr < next) {
-			nv_wr32(priv, addr, init->data);
+			nvkm_wr32(device, addr, init->data);
 			addr += init->pitch;
 		}
 	}
 }
 
 void
-gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 	u32 data = 0;
 
-	nv_wr32(priv, 0x400208, 0x80000000);
+	nvkm_wr32(device, 0x400208, 0x80000000);
 
 	pack_for_each_init(init, pack, p) {
 		u32 next = init->addr + init->count * init->pitch;
 		u32 addr = init->addr;
 
 		if ((pack == p && init == p->init) || data != init->data) {
-			nv_wr32(priv, 0x400204, init->data);
+			nvkm_wr32(device, 0x400204, init->data);
 			data = init->data;
 		}
 
 		while (addr < next) {
-			nv_wr32(priv, 0x400200, addr);
+			nvkm_wr32(device, 0x400200, addr);
 			/**
 			 * Wait for GR to go idle after submitting a
 			 * GO_IDLE bundle
 			 */
 			if ((addr & 0xffff) == 0xe100)
-				gf100_gr_wait_idle(priv);
-			nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
+				gf100_gr_wait_idle(gr);
+			nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
+					break;
+			);
 			addr += init->pitch;
 		}
 	}
 
-	nv_wr32(priv, 0x400208, 0x00000000);
+	nvkm_wr32(device, 0x400208, 0x00000000);
 }
 
 void
-gf100_gr_mthd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
+gf100_gr_mthd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *pack;
 	const struct gf100_gr_init *init;
 	u32 data = 0;
@@ -757,79 +802,75 @@
 		u32 addr = init->addr;
 
 		if ((pack == p && init == p->init) || data != init->data) {
-			nv_wr32(priv, 0x40448c, init->data);
+			nvkm_wr32(device, 0x40448c, init->data);
 			data = init->data;
 		}
 
 		while (addr < next) {
-			nv_wr32(priv, 0x404488, ctrl | (addr << 14));
+			nvkm_wr32(device, 0x404488, ctrl | (addr << 14));
 			addr += init->pitch;
 		}
 	}
 }
 
 u64
-gf100_gr_units(struct nvkm_gr *gr)
+gf100_gr_units(struct nvkm_gr *base)
 {
-	struct gf100_gr_priv *priv = (void *)gr;
+	struct gf100_gr *gr = gf100_gr(base);
 	u64 cfg;
 
-	cfg  = (u32)priv->gpc_nr;
-	cfg |= (u32)priv->tpc_total << 8;
-	cfg |= (u64)priv->rop_nr << 32;
+	cfg  = (u32)gr->gpc_nr;
+	cfg |= (u32)gr->tpc_total << 8;
+	cfg |= (u64)gr->rop_nr << 32;
 
 	return cfg;
 }
 
-static const struct nvkm_enum gk104_sked_error[] = {
-	{ 7, "CONSTANT_BUFFER_SIZE" },
-	{ 9, "LOCAL_MEMORY_SIZE_POS" },
-	{ 10, "LOCAL_MEMORY_SIZE_NEG" },
-	{ 11, "WARP_CSTACK_SIZE" },
-	{ 12, "TOTAL_TEMP_SIZE" },
-	{ 13, "REGISTER_COUNT" },
-	{ 18, "TOTAL_THREADS" },
-	{ 20, "PROGRAM_OFFSET" },
-	{ 21, "SHARED_MEMORY_SIZE" },
-	{ 25, "SHARED_CONFIG_TOO_SMALL" },
-	{ 26, "TOTAL_REGISTER_COUNT" },
+static const struct nvkm_bitfield gk104_sked_error[] = {
+	{ 0x00000080, "CONSTANT_BUFFER_SIZE" },
+	{ 0x00000200, "LOCAL_MEMORY_SIZE_POS" },
+	{ 0x00000400, "LOCAL_MEMORY_SIZE_NEG" },
+	{ 0x00000800, "WARP_CSTACK_SIZE" },
+	{ 0x00001000, "TOTAL_TEMP_SIZE" },
+	{ 0x00002000, "REGISTER_COUNT" },
+	{ 0x00040000, "TOTAL_THREADS" },
+	{ 0x00100000, "PROGRAM_OFFSET" },
+	{ 0x00200000, "SHARED_MEMORY_SIZE" },
+	{ 0x02000000, "SHARED_CONFIG_TOO_SMALL" },
+	{ 0x04000000, "TOTAL_REGISTER_COUNT" },
 	{}
 };
 
-static const struct nvkm_enum gf100_gpc_rop_error[] = {
-	{ 1, "RT_PITCH_OVERRUN" },
-	{ 4, "RT_WIDTH_OVERRUN" },
-	{ 5, "RT_HEIGHT_OVERRUN" },
-	{ 7, "ZETA_STORAGE_TYPE_MISMATCH" },
-	{ 8, "RT_STORAGE_TYPE_MISMATCH" },
-	{ 10, "RT_LINEAR_MISMATCH" },
+static const struct nvkm_bitfield gf100_gpc_rop_error[] = {
+	{ 0x00000002, "RT_PITCH_OVERRUN" },
+	{ 0x00000010, "RT_WIDTH_OVERRUN" },
+	{ 0x00000020, "RT_HEIGHT_OVERRUN" },
+	{ 0x00000080, "ZETA_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000100, "RT_STORAGE_TYPE_MISMATCH" },
+	{ 0x00000400, "RT_LINEAR_MISMATCH" },
 	{}
 };
 
 static void
-gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	char error[128];
 	u32 trap[4];
-	int i;
 
-	trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
-	trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
-	trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
-	trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+	trap[0] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0420)) & 0x3fffffff;
+	trap[1] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0434));
+	trap[2] = nvkm_rd32(device, GPC_UNIT(gpc, 0x0438));
+	trap[3] = nvkm_rd32(device, GPC_UNIT(gpc, 0x043c));
 
-	nv_error(priv, "GPC%d/PROP trap:", gpc);
-	for (i = 0; i <= 29; ++i) {
-		if (!(trap[0] & (1 << i)))
-			continue;
-		pr_cont(" ");
-		nvkm_enum_print(gf100_gpc_rop_error, i);
-	}
-	pr_cont("\n");
+	nvkm_snprintbf(error, sizeof(error), gf100_gpc_rop_error, trap[0]);
 
-	nv_error(priv, "x = %u, y = %u, format = %x, storage type = %x\n",
-		 trap[1] & 0xffff, trap[1] >> 16, (trap[2] >> 8) & 0x3f,
-		 trap[3] & 0xff);
-	nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+	nvkm_error(subdev, "GPC%d/PROP trap: %08x [%s] x = %u, y = %u, "
+			   "format = %x, storage type = %x\n",
+		   gpc, trap[0], error, trap[1] & 0xffff, trap[1] >> 16,
+		   (trap[2] >> 8) & 0x3f, trap[3] & 0xff);
+	nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
@@ -852,401 +893,418 @@
 };
 
 static void
-gf100_gr_trap_mp(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
 {
-	u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x648));
-	u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x650));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x648));
+	u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x650));
+	const struct nvkm_enum *warp;
+	char glob[128];
 
-	nv_error(priv, "GPC%i/TPC%i/MP trap:", gpc, tpc);
-	nvkm_bitfield_print(gf100_mp_global_error, gerr);
-	if (werr) {
-		pr_cont(" ");
-		nvkm_enum_print(gf100_mp_warp_error, werr & 0xffff);
-	}
-	pr_cont("\n");
+	nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
+	warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
 
-	nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
-	nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x650), gerr);
+	nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+			   "global %08x [%s] warp %04x [%s]\n",
+		   gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x648), 0x00000000);
+	nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x650), gerr);
 }
 
 static void
-gf100_gr_trap_tpc(struct gf100_gr_priv *priv, int gpc, int tpc)
+gf100_gr_trap_tpc(struct gf100_gr *gr, int gpc, int tpc)
 {
-	u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0508));
 
 	if (stat & 0x00000001) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
-		nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0224));
+		nvkm_error(subdev, "GPC%d/TPC%d/TEX: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000002) {
-		gf100_gr_trap_mp(priv, gpc, tpc);
+		gf100_gr_trap_mp(gr, gpc, tpc);
 		stat &= ~0x00000002;
 	}
 
 	if (stat & 0x00000004) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
-		nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x0084));
+		nvkm_error(subdev, "GPC%d/TPC%d/POLY: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
 		stat &= ~0x00000004;
 	}
 
 	if (stat & 0x00000008) {
-		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
-		nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
-		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
+		u32 trap = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x048c));
+		nvkm_error(subdev, "GPC%d/TPC%d/L1C: %08x\n", gpc, tpc, trap);
+		nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
 		stat &= ~0x00000008;
 	}
 
 	if (stat) {
-		nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
+		nvkm_error(subdev, "GPC%d/TPC%d/%08x: unknown\n", gpc, tpc, stat);
 	}
 }
 
 static void
-gf100_gr_trap_gpc(struct gf100_gr_priv *priv, int gpc)
+gf100_gr_trap_gpc(struct gf100_gr *gr, int gpc)
 {
-	u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, GPC_UNIT(gpc, 0x2c90));
 	int tpc;
 
 	if (stat & 0x00000001) {
-		gf100_gr_trap_gpc_rop(priv, gpc);
+		gf100_gr_trap_gpc_rop(gr, gpc);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000002) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
-		nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0900));
+		nvkm_error(subdev, "GPC%d/ZCULL: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
 		stat &= ~0x00000002;
 	}
 
 	if (stat & 0x00000004) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
-		nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x1028));
+		nvkm_error(subdev, "GPC%d/CCACHE: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
 		stat &= ~0x00000004;
 	}
 
 	if (stat & 0x00000008) {
-		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
-		nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		u32 trap = nvkm_rd32(device, GPC_UNIT(gpc, 0x0824));
+		nvkm_error(subdev, "GPC%d/ESETUP: %08x\n", gpc, trap);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
 		stat &= ~0x00000009;
 	}
 
-	for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+	for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
 		u32 mask = 0x00010000 << tpc;
 		if (stat & mask) {
-			gf100_gr_trap_tpc(priv, gpc, tpc);
-			nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
+			gf100_gr_trap_tpc(gr, gpc, tpc);
+			nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), mask);
 			stat &= ~mask;
 		}
 	}
 
 	if (stat) {
-		nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
+		nvkm_error(subdev, "GPC%d/%08x: unknown\n", gpc, stat);
 	}
 }
 
 static void
-gf100_gr_trap_intr(struct gf100_gr_priv *priv)
+gf100_gr_trap_intr(struct gf100_gr *gr)
 {
-	u32 trap = nv_rd32(priv, 0x400108);
-	int rop, gpc, i;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 trap = nvkm_rd32(device, 0x400108);
+	int rop, gpc;
 
 	if (trap & 0x00000001) {
-		u32 stat = nv_rd32(priv, 0x404000);
-		nv_error(priv, "DISPATCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x404000, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000001);
+		u32 stat = nvkm_rd32(device, 0x404000);
+		nvkm_error(subdev, "DISPATCH %08x\n", stat);
+		nvkm_wr32(device, 0x404000, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000001);
 		trap &= ~0x00000001;
 	}
 
 	if (trap & 0x00000002) {
-		u32 stat = nv_rd32(priv, 0x404600);
-		nv_error(priv, "M2MF 0x%08x\n", stat);
-		nv_wr32(priv, 0x404600, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000002);
+		u32 stat = nvkm_rd32(device, 0x404600);
+		nvkm_error(subdev, "M2MF %08x\n", stat);
+		nvkm_wr32(device, 0x404600, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000002);
 		trap &= ~0x00000002;
 	}
 
 	if (trap & 0x00000008) {
-		u32 stat = nv_rd32(priv, 0x408030);
-		nv_error(priv, "CCACHE 0x%08x\n", stat);
-		nv_wr32(priv, 0x408030, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000008);
+		u32 stat = nvkm_rd32(device, 0x408030);
+		nvkm_error(subdev, "CCACHE %08x\n", stat);
+		nvkm_wr32(device, 0x408030, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000008);
 		trap &= ~0x00000008;
 	}
 
 	if (trap & 0x00000010) {
-		u32 stat = nv_rd32(priv, 0x405840);
-		nv_error(priv, "SHADER 0x%08x\n", stat);
-		nv_wr32(priv, 0x405840, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000010);
+		u32 stat = nvkm_rd32(device, 0x405840);
+		nvkm_error(subdev, "SHADER %08x\n", stat);
+		nvkm_wr32(device, 0x405840, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000010);
 		trap &= ~0x00000010;
 	}
 
 	if (trap & 0x00000040) {
-		u32 stat = nv_rd32(priv, 0x40601c);
-		nv_error(priv, "UNK6 0x%08x\n", stat);
-		nv_wr32(priv, 0x40601c, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000040);
+		u32 stat = nvkm_rd32(device, 0x40601c);
+		nvkm_error(subdev, "UNK6 %08x\n", stat);
+		nvkm_wr32(device, 0x40601c, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000040);
 		trap &= ~0x00000040;
 	}
 
 	if (trap & 0x00000080) {
-		u32 stat = nv_rd32(priv, 0x404490);
-		nv_error(priv, "MACRO 0x%08x\n", stat);
-		nv_wr32(priv, 0x404490, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x00000080);
+		u32 stat = nvkm_rd32(device, 0x404490);
+		nvkm_error(subdev, "MACRO %08x\n", stat);
+		nvkm_wr32(device, 0x404490, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x00000080);
 		trap &= ~0x00000080;
 	}
 
 	if (trap & 0x00000100) {
-		u32 stat = nv_rd32(priv, 0x407020);
+		u32 stat = nvkm_rd32(device, 0x407020) & 0x3fffffff;
+		char sked[128];
 
-		nv_error(priv, "SKED:");
-		for (i = 0; i <= 29; ++i) {
-			if (!(stat & (1 << i)))
-				continue;
-			pr_cont(" ");
-			nvkm_enum_print(gk104_sked_error, i);
-		}
-		pr_cont("\n");
+		nvkm_snprintbf(sked, sizeof(sked), gk104_sked_error, stat);
+		nvkm_error(subdev, "SKED: %08x [%s]\n", stat, sked);
 
-		if (stat & 0x3fffffff)
-			nv_wr32(priv, 0x407020, 0x40000000);
-		nv_wr32(priv, 0x400108, 0x00000100);
+		if (stat)
+			nvkm_wr32(device, 0x407020, 0x40000000);
+		nvkm_wr32(device, 0x400108, 0x00000100);
 		trap &= ~0x00000100;
 	}
 
 	if (trap & 0x01000000) {
-		u32 stat = nv_rd32(priv, 0x400118);
-		for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
+		u32 stat = nvkm_rd32(device, 0x400118);
+		for (gpc = 0; stat && gpc < gr->gpc_nr; gpc++) {
 			u32 mask = 0x00000001 << gpc;
 			if (stat & mask) {
-				gf100_gr_trap_gpc(priv, gpc);
-				nv_wr32(priv, 0x400118, mask);
+				gf100_gr_trap_gpc(gr, gpc);
+				nvkm_wr32(device, 0x400118, mask);
 				stat &= ~mask;
 			}
 		}
-		nv_wr32(priv, 0x400108, 0x01000000);
+		nvkm_wr32(device, 0x400108, 0x01000000);
 		trap &= ~0x01000000;
 	}
 
 	if (trap & 0x02000000) {
-		for (rop = 0; rop < priv->rop_nr; rop++) {
-			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
-			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
-			nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
+		for (rop = 0; rop < gr->rop_nr; rop++) {
+			u32 statz = nvkm_rd32(device, ROP_UNIT(rop, 0x070));
+			u32 statc = nvkm_rd32(device, ROP_UNIT(rop, 0x144));
+			nvkm_error(subdev, "ROP%d %08x %08x\n",
 				 rop, statz, statc);
-			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+			nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
 		}
-		nv_wr32(priv, 0x400108, 0x02000000);
+		nvkm_wr32(device, 0x400108, 0x02000000);
 		trap &= ~0x02000000;
 	}
 
 	if (trap) {
-		nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
-		nv_wr32(priv, 0x400108, trap);
+		nvkm_error(subdev, "TRAP UNHANDLED %08x\n", trap);
+		nvkm_wr32(device, 0x400108, trap);
 	}
 }
 
 static void
-gf100_gr_ctxctl_debug_unit(struct gf100_gr_priv *priv, u32 base)
+gf100_gr_ctxctl_debug_unit(struct gf100_gr *gr, u32 base)
 {
-	nv_error(priv, "%06x - done 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x400));
-	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
-		 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
-	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
-		 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
-		 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	nvkm_error(subdev, "%06x - done %08x\n", base,
+		   nvkm_rd32(device, base + 0x400));
+	nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+		   nvkm_rd32(device, base + 0x800),
+		   nvkm_rd32(device, base + 0x804),
+		   nvkm_rd32(device, base + 0x808),
+		   nvkm_rd32(device, base + 0x80c));
+	nvkm_error(subdev, "%06x - stat %08x %08x %08x %08x\n", base,
+		   nvkm_rd32(device, base + 0x810),
+		   nvkm_rd32(device, base + 0x814),
+		   nvkm_rd32(device, base + 0x818),
+		   nvkm_rd32(device, base + 0x81c));
 }
 
 void
-gf100_gr_ctxctl_debug(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_debug(struct gf100_gr *gr)
 {
-	u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 gpcnr = nvkm_rd32(device, 0x409604) & 0xffff;
 	u32 gpc;
 
-	gf100_gr_ctxctl_debug_unit(priv, 0x409000);
+	gf100_gr_ctxctl_debug_unit(gr, 0x409000);
 	for (gpc = 0; gpc < gpcnr; gpc++)
-		gf100_gr_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+		gf100_gr_ctxctl_debug_unit(gr, 0x502000 + (gpc * 0x8000));
 }
 
 static void
-gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv)
+gf100_gr_ctxctl_isr(struct gf100_gr *gr)
 {
-	u32 stat = nv_rd32(priv, 0x409c18);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x409c18);
 
 	if (stat & 0x00000001) {
-		u32 code = nv_rd32(priv, 0x409814);
+		u32 code = nvkm_rd32(device, 0x409814);
 		if (code == E_BAD_FWMTHD) {
-			u32 class = nv_rd32(priv, 0x409808);
-			u32  addr = nv_rd32(priv, 0x40980c);
+			u32 class = nvkm_rd32(device, 0x409808);
+			u32  addr = nvkm_rd32(device, 0x40980c);
 			u32  subc = (addr & 0x00070000) >> 16;
 			u32  mthd = (addr & 0x00003ffc);
-			u32  data = nv_rd32(priv, 0x409810);
+			u32  data = nvkm_rd32(device, 0x409810);
 
-			nv_error(priv, "FECS MTHD subc %d class 0x%04x "
-				       "mthd 0x%04x data 0x%08x\n",
-				 subc, class, mthd, data);
+			nvkm_error(subdev, "FECS MTHD subc %d class %04x "
+					   "mthd %04x data %08x\n",
+				   subc, class, mthd, data);
 
-			nv_wr32(priv, 0x409c20, 0x00000001);
+			nvkm_wr32(device, 0x409c20, 0x00000001);
 			stat &= ~0x00000001;
 		} else {
-			nv_error(priv, "FECS ucode error %d\n", code);
+			nvkm_error(subdev, "FECS ucode error %d\n", code);
 		}
 	}
 
 	if (stat & 0x00080000) {
-		nv_error(priv, "FECS watchdog timeout\n");
-		gf100_gr_ctxctl_debug(priv);
-		nv_wr32(priv, 0x409c20, 0x00080000);
+		nvkm_error(subdev, "FECS watchdog timeout\n");
+		gf100_gr_ctxctl_debug(gr);
+		nvkm_wr32(device, 0x409c20, 0x00080000);
 		stat &= ~0x00080000;
 	}
 
 	if (stat) {
-		nv_error(priv, "FECS 0x%08x\n", stat);
-		gf100_gr_ctxctl_debug(priv);
-		nv_wr32(priv, 0x409c20, stat);
+		nvkm_error(subdev, "FECS %08x\n", stat);
+		gf100_gr_ctxctl_debug(gr);
+		nvkm_wr32(device, 0x409c20, stat);
 	}
 }
 
 static void
-gf100_gr_intr(struct nvkm_subdev *subdev)
+gf100_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct gf100_gr_priv *priv = (void *)subdev;
-	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
-	u32 stat = nv_rd32(priv, 0x400100);
-	u32 addr = nv_rd32(priv, 0x400704);
+	struct gf100_gr *gr = gf100_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	unsigned long flags;
+	u64 inst = nvkm_rd32(device, 0x409b00) & 0x0fffffff;
+	u32 stat = nvkm_rd32(device, 0x400100);
+	u32 addr = nvkm_rd32(device, 0x400704);
 	u32 mthd = (addr & 0x00003ffc);
 	u32 subc = (addr & 0x00070000) >> 16;
-	u32 data = nv_rd32(priv, 0x400708);
-	u32 code = nv_rd32(priv, 0x400110);
+	u32 data = nvkm_rd32(device, 0x400708);
+	u32 code = nvkm_rd32(device, 0x400110);
 	u32 class;
-	int chid;
+	const char *name = "unknown";
+	int chid = -1;
 
-	if (nv_device(priv)->card_type < NV_E0 || subc < 4)
-		class = nv_rd32(priv, 0x404200 + (subc * 4));
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+	if (chan) {
+		name = chan->object.client->name;
+		chid = chan->chid;
+	}
+
+	if (device->card_type < NV_E0 || subc < 4)
+		class = nvkm_rd32(device, 0x404200 + (subc * 4));
 	else
 		class = 0x0000;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
 	if (stat & 0x00000001) {
 		/*
 		 * notifier interrupt, only needed for cyclestats
 		 * can be safely ignored
 		 */
-		nv_wr32(priv, 0x400100, 0x00000001);
+		nvkm_wr32(device, 0x400100, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat & 0x00000010) {
-		handle = nvkm_handle_get_class(engctx, class);
-		if (!handle || nv_call(handle->object, mthd, data)) {
-			nv_error(priv,
-				 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-				 chid, inst << 12, nvkm_client_name(engctx),
-				 subc, class, mthd, data);
+		if (!gf100_gr_mthd_sw(device, class, mthd, data)) {
+			nvkm_error(subdev, "ILLEGAL_MTHD ch %d [%010llx %s] "
+				   "subc %d class %04x mthd %04x data %08x\n",
+				   chid, inst << 12, name, subc,
+				   class, mthd, data);
 		}
-		nvkm_handle_put(handle);
-		nv_wr32(priv, 0x400100, 0x00000010);
+		nvkm_wr32(device, 0x400100, 0x00000010);
 		stat &= ~0x00000010;
 	}
 
 	if (stat & 0x00000020) {
-		nv_error(priv,
-			 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, inst << 12, nvkm_client_name(engctx), subc,
-			 class, mthd, data);
-		nv_wr32(priv, 0x400100, 0x00000020);
+		nvkm_error(subdev, "ILLEGAL_CLASS ch %d [%010llx %s] "
+			   "subc %d class %04x mthd %04x data %08x\n",
+			   chid, inst << 12, name, subc, class, mthd, data);
+		nvkm_wr32(device, 0x400100, 0x00000020);
 		stat &= ~0x00000020;
 	}
 
 	if (stat & 0x00100000) {
-		nv_error(priv, "DATA_ERROR [");
-		nvkm_enum_print(nv50_data_error_names, code);
-		pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			chid, inst << 12, nvkm_client_name(engctx), subc,
-			class, mthd, data);
-		nv_wr32(priv, 0x400100, 0x00100000);
+		const struct nvkm_enum *en =
+			nvkm_enum_find(nv50_data_error_names, code);
+		nvkm_error(subdev, "DATA_ERROR %08x [%s] ch %d [%010llx %s] "
+				   "subc %d class %04x mthd %04x data %08x\n",
+			   code, en ? en->name : "", chid, inst << 12,
+			   name, subc, class, mthd, data);
+		nvkm_wr32(device, 0x400100, 0x00100000);
 		stat &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
-		nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
-			 nvkm_client_name(engctx));
-		gf100_gr_trap_intr(priv);
-		nv_wr32(priv, 0x400100, 0x00200000);
+		nvkm_error(subdev, "TRAP ch %d [%010llx %s]\n",
+			   chid, inst << 12, name);
+		gf100_gr_trap_intr(gr);
+		nvkm_wr32(device, 0x400100, 0x00200000);
 		stat &= ~0x00200000;
 	}
 
 	if (stat & 0x00080000) {
-		gf100_gr_ctxctl_isr(priv);
-		nv_wr32(priv, 0x400100, 0x00080000);
+		gf100_gr_ctxctl_isr(gr);
+		nvkm_wr32(device, 0x400100, 0x00080000);
 		stat &= ~0x00080000;
 	}
 
 	if (stat) {
-		nv_error(priv, "unknown stat 0x%08x\n", stat);
-		nv_wr32(priv, 0x400100, stat);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, 0x400100, stat);
 	}
 
-	nv_wr32(priv, 0x400500, 0x00010001);
-	nvkm_engctx_put(engctx);
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
 void
-gf100_gr_init_fw(struct gf100_gr_priv *priv, u32 fuc_base,
+gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
 		 struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int i;
 
-	nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
+	nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
 	for (i = 0; i < data->size / 4; i++)
-		nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
+		nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
 
-	nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
+	nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
 	for (i = 0; i < code->size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, fuc_base + 0x0188, i >> 6);
-		nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
+			nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
+		nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
 	}
 
 	/* code must be padded to 0x40 words */
 	for (; i & 0x3f; i++)
-		nv_wr32(priv, fuc_base + 0x0184, 0);
+		nvkm_wr32(device, fuc_base + 0x0184, 0);
 }
 
 static void
-gf100_gr_init_csdata(struct gf100_gr_priv *priv,
+gf100_gr_init_csdata(struct gf100_gr *gr,
 		     const struct gf100_gr_pack *pack,
 		     u32 falcon, u32 starstar, u32 base)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	const struct gf100_gr_pack *iter;
 	const struct gf100_gr_init *init;
 	u32 addr = ~0, prev = ~0, xfer = 0;
 	u32 star, temp;
 
-	nv_wr32(priv, falcon + 0x01c0, 0x02000000 + starstar);
-	star = nv_rd32(priv, falcon + 0x01c4);
-	temp = nv_rd32(priv, falcon + 0x01c4);
+	nvkm_wr32(device, falcon + 0x01c0, 0x02000000 + starstar);
+	star = nvkm_rd32(device, falcon + 0x01c4);
+	temp = nvkm_rd32(device, falcon + 0x01c4);
 	if (temp > star)
 		star = temp;
-	nv_wr32(priv, falcon + 0x01c0, 0x01000000 + star);
+	nvkm_wr32(device, falcon + 0x01c0, 0x01000000 + star);
 
 	pack_for_each_init(init, iter, pack) {
 		u32 head = init->addr - base;
@@ -1255,7 +1313,7 @@
 			if (head != prev + 4 || xfer >= 32) {
 				if (xfer) {
 					u32 data = ((--xfer << 26) | addr);
-					nv_wr32(priv, falcon + 0x01c4, data);
+					nvkm_wr32(device, falcon + 0x01c4, data);
 					star += 4;
 				}
 				addr = head;
@@ -1267,157 +1325,166 @@
 		}
 	}
 
-	nv_wr32(priv, falcon + 0x01c4, (--xfer << 26) | addr);
-	nv_wr32(priv, falcon + 0x01c0, 0x01000004 + starstar);
-	nv_wr32(priv, falcon + 0x01c4, star + 4);
+	nvkm_wr32(device, falcon + 0x01c4, (--xfer << 26) | addr);
+	nvkm_wr32(device, falcon + 0x01c0, 0x01000004 + starstar);
+	nvkm_wr32(device, falcon + 0x01c4, star + 4);
 }
 
 int
-gf100_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gf100_gr_init_ctxctl(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)nv_object(priv)->oclass;
-	struct gf100_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
+	const struct gf100_grctx_func *grctx = gr->func->grctx;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	int i;
 
-	if (priv->firmware) {
+	if (gr->firmware) {
 		/* load fuc microcode */
-		nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-		gf100_gr_init_fw(priv, 0x409000, &priv->fuc409c,
-						 &priv->fuc409d);
-		gf100_gr_init_fw(priv, 0x41a000, &priv->fuc41ac,
-						 &priv->fuc41ad);
-		nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+		nvkm_mc_unk260(device->mc, 0);
+		gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d);
+		gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad);
+		nvkm_mc_unk260(device->mc, 1);
 
 		/* start both of them running */
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x41a10c, 0x00000000);
-		nv_wr32(priv, 0x40910c, 0x00000000);
-		nv_wr32(priv, 0x41a100, 0x00000002);
-		nv_wr32(priv, 0x409100, 0x00000002);
-		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
-			nv_warn(priv, "0x409800 wait failed\n");
-
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x7fffffff);
-		nv_wr32(priv, 0x409504, 0x00000021);
-
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000010);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x10 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x41a10c, 0x00000000);
+		nvkm_wr32(device, 0x40910c, 0x00000000);
+		nvkm_wr32(device, 0x41a100, 0x00000002);
+		nvkm_wr32(device, 0x409100, 0x00000002);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800) & 0x00000001)
+				break;
+		) < 0)
 			return -EBUSY;
-		}
-		priv->size = nv_rd32(priv, 0x409800);
 
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000016);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x16 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x7fffffff);
+		nvkm_wr32(device, 0x409504, 0x00000021);
+
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000010);
+		if (nvkm_msec(device, 2000,
+			if ((gr->size = nvkm_rd32(device, 0x409800)))
+				break;
+		) < 0)
 			return -EBUSY;
-		}
 
-		nv_wr32(priv, 0x409840, 0xffffffff);
-		nv_wr32(priv, 0x409500, 0x00000000);
-		nv_wr32(priv, 0x409504, 0x00000025);
-		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-			nv_error(priv, "fuc09 req 0x25 timeout\n");
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000016);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800))
+				break;
+		) < 0)
 			return -EBUSY;
+
+		nvkm_wr32(device, 0x409840, 0xffffffff);
+		nvkm_wr32(device, 0x409500, 0x00000000);
+		nvkm_wr32(device, 0x409504, 0x00000025);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x409800))
+				break;
+		) < 0)
+			return -EBUSY;
+
+		if (device->chipset >= 0xe0) {
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000030);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
+				return -EBUSY;
+
+			nvkm_wr32(device, 0x409810, 0xb00095c8);
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000031);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
+				return -EBUSY;
+
+			nvkm_wr32(device, 0x409810, 0x00080420);
+			nvkm_wr32(device, 0x409800, 0x00000000);
+			nvkm_wr32(device, 0x409500, 0x00000001);
+			nvkm_wr32(device, 0x409504, 0x00000032);
+			if (nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, 0x409800))
+					break;
+			) < 0)
+				return -EBUSY;
+
+			nvkm_wr32(device, 0x409614, 0x00000070);
+			nvkm_wr32(device, 0x409614, 0x00000770);
+			nvkm_wr32(device, 0x40802c, 0x00000001);
 		}
 
-		if (nv_device(priv)->chipset >= 0xe0) {
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000030);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x30 timeout\n");
-				return -EBUSY;
-			}
-
-			nv_wr32(priv, 0x409810, 0xb00095c8);
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000031);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x31 timeout\n");
-				return -EBUSY;
-			}
-
-			nv_wr32(priv, 0x409810, 0x00080420);
-			nv_wr32(priv, 0x409800, 0x00000000);
-			nv_wr32(priv, 0x409500, 0x00000001);
-			nv_wr32(priv, 0x409504, 0x00000032);
-			if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
-				nv_error(priv, "fuc09 req 0x32 timeout\n");
-				return -EBUSY;
-			}
-
-			nv_wr32(priv, 0x409614, 0x00000070);
-			nv_wr32(priv, 0x409614, 0x00000770);
-			nv_wr32(priv, 0x40802c, 0x00000001);
-		}
-
-		if (priv->data == NULL) {
-			int ret = gf100_grctx_generate(priv);
+		if (gr->data == NULL) {
+			int ret = gf100_grctx_generate(gr);
 			if (ret) {
-				nv_error(priv, "failed to construct context\n");
+				nvkm_error(subdev, "failed to construct context\n");
 				return ret;
 			}
 		}
 
 		return 0;
 	} else
-	if (!oclass->fecs.ucode) {
+	if (!gr->func->fecs.ucode) {
 		return -ENOSYS;
 	}
 
 	/* load HUB microcode */
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 0);
-	nv_wr32(priv, 0x4091c0, 0x01000000);
-	for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
-		nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
+	nvkm_mc_unk260(device->mc, 0);
+	nvkm_wr32(device, 0x4091c0, 0x01000000);
+	for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
+		nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
 
-	nv_wr32(priv, 0x409180, 0x01000000);
-	for (i = 0; i < oclass->fecs.ucode->code.size / 4; i++) {
+	nvkm_wr32(device, 0x409180, 0x01000000);
+	for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x409188, i >> 6);
-		nv_wr32(priv, 0x409184, oclass->fecs.ucode->code.data[i]);
+			nvkm_wr32(device, 0x409188, i >> 6);
+		nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
 	}
 
 	/* load GPC microcode */
-	nv_wr32(priv, 0x41a1c0, 0x01000000);
-	for (i = 0; i < oclass->gpccs.ucode->data.size / 4; i++)
-		nv_wr32(priv, 0x41a1c4, oclass->gpccs.ucode->data.data[i]);
+	nvkm_wr32(device, 0x41a1c0, 0x01000000);
+	for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
+		nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
 
-	nv_wr32(priv, 0x41a180, 0x01000000);
-	for (i = 0; i < oclass->gpccs.ucode->code.size / 4; i++) {
+	nvkm_wr32(device, 0x41a180, 0x01000000);
+	for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(priv, 0x41a188, i >> 6);
-		nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
+			nvkm_wr32(device, 0x41a188, i >> 6);
+		nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
 	}
-	nvkm_mc(priv)->unk260(nvkm_mc(priv), 1);
+	nvkm_mc_unk260(device->mc, 1);
 
 	/* load register lists */
-	gf100_gr_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
-	gf100_gr_init_csdata(priv, cclass->gpc, 0x41a000, 0x000, 0x418000);
-	gf100_gr_init_csdata(priv, cclass->tpc, 0x41a000, 0x004, 0x419800);
-	gf100_gr_init_csdata(priv, cclass->ppc, 0x41a000, 0x008, 0x41be00);
+	gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
+	gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
+	gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
+	gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
 
 	/* start HUB ucode running, it'll init the GPCs */
-	nv_wr32(priv, 0x40910c, 0x00000000);
-	nv_wr32(priv, 0x409100, 0x00000002);
-	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
-		nv_error(priv, "HUB_INIT timed out\n");
-		gf100_gr_ctxctl_debug(priv);
+	nvkm_wr32(device, 0x40910c, 0x00000000);
+	nvkm_wr32(device, 0x409100, 0x00000002);
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x409800) & 0x80000000)
+			break;
+	) < 0) {
+		gf100_gr_ctxctl_debug(gr);
 		return -EBUSY;
 	}
 
-	priv->size = nv_rd32(priv, 0x409804);
-	if (priv->data == NULL) {
-		int ret = gf100_grctx_generate(priv);
+	gr->size = nvkm_rd32(device, 0x409804);
+	if (gr->data == NULL) {
+		int ret = gf100_grctx_generate(gr);
 		if (ret) {
-			nv_error(priv, "failed to construct context\n");
+			nvkm_error(subdev, "failed to construct context\n");
 			return ret;
 		}
 	}
@@ -1425,143 +1492,160 @@
 	return 0;
 }
 
-int
-gf100_gr_init(struct nvkm_object *object)
+static int
+gf100_gr_oneinit(struct nvkm_gr *base)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
-	u8  tpcnr[GPC_MAX];
-	int gpc, tpc, rop;
-	int ret, i;
+	struct gf100_gr *gr = gf100_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	int ret, i, j;
 
-	ret = nvkm_gr_init(&priv->base);
+	nvkm_pmu_pgob(device->pmu, false);
+
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+			      &gr->unk4188b4);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
+			      &gr->unk4188b8);
+	if (ret)
+		return ret;
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	nvkm_kmap(gr->unk4188b4);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(gr->unk4188b4, i, 0x00000010);
+	nvkm_done(gr->unk4188b4);
 
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
-		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
-		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+	nvkm_kmap(gr->unk4188b8);
+	for (i = 0; i < 0x1000; i += 4)
+		nvkm_wo32(gr->unk4188b8, i, 0x00000010);
+	nvkm_done(gr->unk4188b8);
 
-		data[i / 8] |= tpc << ((i % 8) * 4);
-	}
-
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
-	}
-
-	if (nv_device(priv)->chipset != 0xd7)
-		nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
-	else
-		nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-
-	nv_wr32(priv, 0x400500, 0x00010001);
-
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-
-	nv_wr32(priv, 0x409c24, 0x000f0000);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x40601c, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
-
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+	gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
+	gr->gpc_nr =  nvkm_rd32(device, 0x409604) & 0x0000001f;
+	for (i = 0; i < gr->gpc_nr; i++) {
+		gr->tpc_nr[i]  = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
+		gr->tpc_total += gr->tpc_nr[i];
+		gr->ppc_nr[i]  = gr->func->ppc_nr;
+		for (j = 0; j < gr->ppc_nr[i]; j++) {
+			u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
+			gr->ppc_tpc_nr[i][j] = hweight8(mask);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	/*XXX: these need figuring out... though it might not even matter */
+	switch (device->chipset) {
+	case 0xc0:
+		if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+			gr->magic_not_rop_nr = 0x07;
+		} else
+		if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+			gr->magic_not_rop_nr = 0x05;
+		} else
+		if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+			gr->magic_not_rop_nr = 0x06;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		gr->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc1: /* 2/0/0/0, 1 */
+		gr->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc8: /* 4/4/3/4, 5 */
+		gr->magic_not_rop_nr = 0x06;
+		break;
+	case 0xce: /* 4/4/0/0, 4 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xcf: /* 4/0/0/0, 3 */
+		gr->magic_not_rop_nr = 0x03;
+		break;
+	case 0xd7:
+	case 0xd9: /* 1/0/0/0, 1 */
+	case 0xea: /* gk20a */
+	case 0x12b: /* gm20b */
+		gr->magic_not_rop_nr = 0x01;
+		break;
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
-
-	nv_wr32(priv, 0x400054, 0x34ce3464);
-
-	gf100_gr_zbc_init(priv);
-
-	return gf100_gr_init_ctxctl(priv);
+	return 0;
 }
 
-static void
+int
+gf100_gr_init_(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+	nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
+	return gr->func->init(gr);
+}
+
+void
 gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
 {
 	kfree(fuc->data);
 	fuc->data = NULL;
 }
 
+void *
+gf100_gr_dtor(struct nvkm_gr *base)
+{
+	struct gf100_gr *gr = gf100_gr(base);
+
+	if (gr->func->dtor)
+		gr->func->dtor(gr);
+	kfree(gr->data);
+
+	gf100_gr_dtor_fw(&gr->fuc409c);
+	gf100_gr_dtor_fw(&gr->fuc409d);
+	gf100_gr_dtor_fw(&gr->fuc41ac);
+	gf100_gr_dtor_fw(&gr->fuc41ad);
+
+	nvkm_memory_del(&gr->unk4188b8);
+	nvkm_memory_del(&gr->unk4188b4);
+	return gr;
+}
+
+static const struct nvkm_gr_func
+gf100_gr_ = {
+	.dtor = gf100_gr_dtor,
+	.oneinit = gf100_gr_oneinit,
+	.init = gf100_gr_init_,
+	.intr = gf100_gr_intr,
+	.units = gf100_gr_units,
+	.chan_new = gf100_gr_chan_new,
+	.object_get = gf100_gr_object_get,
+};
+
 int
-gf100_gr_ctor_fw(struct gf100_gr_priv *priv, const char *fwname,
+gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
 		 struct gf100_gr_fuc *fuc)
 {
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	const struct firmware *fw;
-	char f[32];
+	char f[64];
+	char cname[16];
 	int ret;
+	int i;
 
-	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
-	ret = request_firmware(&fw, f, nv_device_base(device));
+	/* Convert device name to lowercase */
+	strncpy(cname, device->chip->name, sizeof(cname));
+	cname[sizeof(cname) - 1] = '\0';
+	i = strlen(cname);
+	while (i) {
+		--i;
+		cname[i] = tolower(cname[i]);
+	}
+
+	snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+	ret = request_firmware(&fw, f, device->dev);
 	if (ret) {
-		snprintf(f, sizeof(f), "nouveau/%s", fwname);
-		ret = request_firmware(&fw, f, nv_device_base(device));
-		if (ret) {
-			nv_error(priv, "failed to load %s\n", fwname);
-			return ret;
-		}
+		nvkm_error(subdev, "failed to load %s\n", fwname);
+		return ret;
 	}
 
 	fuc->size = fw->size;
@@ -1570,126 +1654,150 @@
 	return (fuc->data != NULL) ? 0 : -ENOMEM;
 }
 
-void
-gf100_gr_dtor(struct nvkm_object *object)
+int
+gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct gf100_gr *gr)
 {
-	struct gf100_gr_priv *priv = (void *)object;
+	int ret;
 
-	kfree(priv->data);
+	gr->func = func;
+	gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
+				    func->fecs.ucode == NULL);
 
-	gf100_gr_dtor_fw(&priv->fuc409c);
-	gf100_gr_dtor_fw(&priv->fuc409d);
-	gf100_gr_dtor_fw(&priv->fuc41ac);
-	gf100_gr_dtor_fw(&priv->fuc41ad);
+	ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
+			   gr->firmware || func->fecs.ucode != NULL,
+			   &gr->base);
+	if (ret)
+		return ret;
 
-	nvkm_gpuobj_ref(NULL, &priv->unk4188b8);
-	nvkm_gpuobj_ref(NULL, &priv->unk4188b4);
+	if (gr->firmware) {
+		nvkm_info(&gr->base.engine.subdev, "using external firmware\n");
+		if (gf100_gr_ctor_fw(gr, "fecs_inst", &gr->fuc409c) ||
+		    gf100_gr_ctor_fw(gr, "fecs_data", &gr->fuc409d) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_inst", &gr->fuc41ac) ||
+		    gf100_gr_ctor_fw(gr, "gpccs_data", &gr->fuc41ad))
+			return -ENODEV;
+	}
 
-	nvkm_gr_destroy(&priv->base);
+	return 0;
 }
 
 int
-gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *bclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
 {
-	struct gf100_gr_oclass *oclass = (void *)bclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_gr_priv *priv;
-	bool use_ext_fw, enable;
-	int ret, i, j;
+	struct gf100_gr *gr;
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+	return gf100_gr_ctor(func, device, index, gr);
+}
 
-	use_ext_fw = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
-				  oclass->fecs.ucode == NULL);
-	enable = use_ext_fw || oclass->fecs.ucode != NULL;
+int
+gf100_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc, rop;
+	int i;
 
-	ret = nvkm_gr_create(parent, engine, bclass, enable, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	nv_subdev(priv)->unit = 0x08001000;
-	nv_subdev(priv)->intr = gf100_gr_intr;
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	priv->base.units = gf100_gr_units;
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
-	if (use_ext_fw) {
-		nv_info(priv, "using external firmware\n");
-		if (gf100_gr_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
-		    gf100_gr_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
-		    gf100_gr_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
-		    gf100_gr_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
-			return -ENODEV;
-		priv->firmware = true;
+		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
-			      &priv->unk4188b4);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
-			      &priv->unk4188b8);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < 0x1000; i += 4) {
-		nv_wo32(priv->unk4188b4, i, 0x00000010);
-		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
-	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
-	for (i = 0; i < priv->gpc_nr; i++) {
-		priv->tpc_nr[i]  = nv_rd32(priv, GPC_UNIT(i, 0x2608));
-		priv->tpc_total += priv->tpc_nr[i];
-		priv->ppc_nr[i]  = oclass->ppc_nr;
-		for (j = 0; j < priv->ppc_nr[i]; j++) {
-			u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
-			priv->ppc_tpc_nr[i][j] = hweight8(mask);
+	if (device->chipset != 0xd7)
+		nvkm_wr32(device, GPC_BCAST(0x1bd4), magicgpc918);
+	else
+		nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	nvkm_wr32(device, 0x409c24, 0x000f0000);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x40601c, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	/*XXX: these need figuring out... though it might not even matter */
-	switch (nv_device(priv)->chipset) {
-	case 0xc0:
-		if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
-			priv->magic_not_rop_nr = 0x07;
-		} else
-		if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
-			priv->magic_not_rop_nr = 0x05;
-		} else
-		if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
-			priv->magic_not_rop_nr = 0x06;
-		}
-		break;
-	case 0xc3: /* 450, 4/0/0/0, 2 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xc4: /* 460, 3/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc1: /* 2/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
-	case 0xc8: /* 4/4/3/4, 5 */
-		priv->magic_not_rop_nr = 0x06;
-		break;
-	case 0xce: /* 4/4/0/0, 4 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xcf: /* 4/0/0/0, 3 */
-		priv->magic_not_rop_nr = 0x03;
-		break;
-	case 0xd7:
-	case 0xd9: /* 1/0/0/0, 1 */
-		priv->magic_not_rop_nr = 0x01;
-		break;
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_engine(priv)->cclass = *oclass->cclass;
-	nv_engine(priv)->sclass =  oclass->sclass;
-	return 0;
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	nvkm_wr32(device, 0x400054, 0x34ce3464);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgf100.fuc3.h"
@@ -1712,18 +1820,24 @@
 	.data.size = sizeof(gf100_grgpc_data),
 };
 
-struct nvkm_oclass *
-gf100_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf100_grctx_oclass,
-	.sclass =  gf100_gr_sclass,
+static const struct gf100_gr_func
+gf100_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf100_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf100_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index c9533fd..4611961 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -21,11 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#ifndef __NVC0_GR_H__
-#define __NVC0_GR_H__
-#include <engine/gr.h>
+#ifndef __GF100_GR_H__
+#define __GF100_GR_H__
+#define gf100_gr(p) container_of((p), struct gf100_gr, base)
+#include "priv.h"
 
+#include <core/gpuobj.h>
 #include <subdev/ltc.h>
+#include <subdev/mmu.h>
 
 #define GPC_MAX 32
 #define TPC_MAX (GPC_MAX * 8)
@@ -67,7 +70,8 @@
 	u32 l2;
 };
 
-struct gf100_gr_priv {
+struct gf100_gr {
+	const struct gf100_gr_func *func;
 	struct nvkm_gr base;
 
 	struct gf100_gr_fuc fuc409c;
@@ -76,6 +80,15 @@
 	struct gf100_gr_fuc fuc41ad;
 	bool firmware;
 
+	/*
+	 * Used if the register packs are loaded from NVIDIA fw instead of
+	 * using hardcoded arrays.
+	 */
+	struct gf100_gr_pack *fuc_sw_nonctx;
+	struct gf100_gr_pack *fuc_sw_ctx;
+	struct gf100_gr_pack *fuc_bundle;
+	struct gf100_gr_pack *fuc_method;
+
 	struct gf100_gr_zbc_color zbc_color[NVKM_LTC_MAX_ZBC_CNT];
 	struct gf100_gr_zbc_depth zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
 
@@ -86,8 +99,8 @@
 	u8 ppc_nr[GPC_MAX];
 	u8 ppc_tpc_nr[GPC_MAX][4];
 
-	struct nvkm_gpuobj *unk4188b4;
-	struct nvkm_gpuobj *unk4188b8;
+	struct nvkm_memory *unk4188b4;
+	struct nvkm_memory *unk4188b8;
 
 	struct gf100_gr_data mmio_data[4];
 	struct gf100_gr_mmio mmio_list[4096/8];
@@ -97,48 +110,65 @@
 	u8 magic_not_rop_nr;
 };
 
-struct gf100_gr_chan {
-	struct nvkm_gr_chan base;
+int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct gf100_gr *);
+int gf100_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct nvkm_gr **);
+void *gf100_gr_dtor(struct nvkm_gr *);
 
-	struct nvkm_gpuobj *mmio;
+struct gf100_gr_func {
+	void (*dtor)(struct gf100_gr *);
+	int (*init)(struct gf100_gr *);
+	void (*init_gpc_mmu)(struct gf100_gr *);
+	void (*set_hww_esr_report_mask)(struct gf100_gr *);
+	const struct gf100_gr_pack *mmio;
+	struct {
+		struct gf100_gr_ucode *ucode;
+	} fecs;
+	struct {
+		struct gf100_gr_ucode *ucode;
+	} gpccs;
+	int ppc_nr;
+	const struct gf100_grctx_func *grctx;
+	struct nvkm_sclass sclass[];
+};
+
+int gf100_gr_init(struct gf100_gr *);
+
+int gk104_gr_init(struct gf100_gr *);
+
+int gk20a_gr_new_(const struct gf100_gr_func *, struct nvkm_device *,
+		  int, struct nvkm_gr **);
+void gk20a_gr_dtor(struct gf100_gr *);
+int gk20a_gr_init(struct gf100_gr *);
+
+int gm204_gr_init(struct gf100_gr *);
+
+#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+
+struct gf100_gr_chan {
+	struct nvkm_object object;
+	struct gf100_gr *gr;
+
+	struct nvkm_memory *mmio;
 	struct nvkm_vma mmio_vma;
 	int mmio_nr;
+
 	struct {
-		struct nvkm_gpuobj *mem;
+		struct nvkm_memory *mem;
 		struct nvkm_vma vma;
 	} data[4];
 };
 
-int  gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, void *, u32,
-			     struct nvkm_object **);
-void gf100_gr_context_dtor(struct nvkm_object *);
+void gf100_gr_ctxctl_debug(struct gf100_gr *);
 
-void gf100_gr_ctxctl_debug(struct gf100_gr_priv *);
-
+void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
+int  gf100_gr_ctor_fw(struct gf100_gr *, const char *,
+		      struct gf100_gr_fuc *);
 u64  gf100_gr_units(struct nvkm_gr *);
-int  gf100_gr_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-void gf100_gr_dtor(struct nvkm_object *);
-int  gf100_gr_init(struct nvkm_object *);
-void gf100_gr_zbc_init(struct gf100_gr_priv *);
+void gf100_gr_zbc_init(struct gf100_gr *);
 
-int  gk104_gr_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *data, u32 size,
-		     struct nvkm_object **);
-int  gk104_gr_init(struct nvkm_object *);
-
-int  gm204_gr_init(struct nvkm_object *);
-
-extern struct nvkm_ofuncs gf100_fermi_ofuncs;
-
-extern struct nvkm_oclass gf100_gr_sclass[];
-extern struct nvkm_omthds gf100_gr_9097_omthds[];
-extern struct nvkm_omthds gf100_gr_90c0_omthds[];
-extern struct nvkm_oclass gf110_gr_sclass[];
-extern struct nvkm_oclass gk110_gr_sclass[];
-extern struct nvkm_oclass gm204_gr_sclass[];
+extern const struct nvkm_object_func gf100_fermi;
 
 struct gf100_gr_init {
 	u32 addr;
@@ -167,25 +197,11 @@
 extern struct gf100_gr_ucode gk110_gr_fecs_ucode;
 extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
 
-struct gf100_gr_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_oclass **cclass;
-	struct nvkm_oclass *sclass;
-	const struct gf100_gr_pack *mmio;
-	struct {
-		struct gf100_gr_ucode *ucode;
-	} fecs;
-	struct {
-		struct gf100_gr_ucode *ucode;
-	} gpccs;
-	int ppc_nr;
-};
-
-int  gf100_gr_wait_idle(struct gf100_gr_priv *);
-void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
-int  gf100_gr_init_ctxctl(struct gf100_gr_priv *);
+int  gf100_gr_wait_idle(struct gf100_gr *);
+void gf100_gr_mmio(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_icmd(struct gf100_gr *, const struct gf100_gr_pack *);
+void gf100_gr_mthd(struct gf100_gr *, const struct gf100_gr_pack *);
+int  gf100_gr_init_ctxctl(struct gf100_gr *);
 
 /* register init value lists */
 
@@ -261,7 +277,7 @@
 extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
 extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
-void gm107_gr_init_bios(struct gf100_gr_priv *);
+void gm107_gr_init_bios(struct gf100_gr *);
 
 extern const struct gf100_gr_pack gm204_gr_pack_mmio[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 20d3b85..8f253e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -110,18 +112,24 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf104_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf104_grctx_oclass,
-	.sclass = gf100_gr_sclass,
+static const struct gf100_gr_func
+gf104_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf104_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 8df7342..815a5aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -27,20 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf108_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -117,18 +103,25 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf108_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf108_grctx_oclass,
-	.sclass = gf108_gr_sclass,
+static const struct gf100_gr_func
+gf108_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf108_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf108_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf108_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf108_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index ef76e2d..d131874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -27,21 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gf110_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
-	{ FERMI_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ FERMI_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -99,18 +84,26 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf110_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xc8),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf110_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf110_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf110_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf110_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 871ac5f..28483d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -118,19 +120,27 @@
 	.data.size = sizeof(gf117_grgpc_data),
 };
 
-struct nvkm_oclass *
-gf117_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf117_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf117_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf117_gr_pack_mmio,
 	.fecs.ucode = &gf117_gr_fecs_ucode,
 	.gpccs.ucode = &gf117_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gf117_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf117_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf117_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index e6dd651..9811a72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -173,18 +175,26 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf119_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xd9),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gf100_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gf119_grctx_oclass,
-	.sclass = gf110_gr_sclass,
+static const struct gf100_gr_func
+gf119_gr = {
+	.init = gf100_gr_init,
 	.mmio = gf119_gr_pack_mmio,
 	.fecs.ucode = &gf100_gr_fecs_ucode,
 	.gpccs.ucode = &gf100_gr_gpccs_ucode,
-}.base;
+	.grctx = &gf119_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
+		{ -1, -1, FERMI_A, &gf100_fermi },
+		{ -1, -1, FERMI_B, &gf100_fermi },
+		{ -1, -1, FERMI_C, &gf100_fermi },
+		{ -1, -1, FERMI_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gf119_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gf119_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 46f7844..abf5492 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,24 +24,9 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-#include <subdev/pmu.h>
-
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
-	{ KEPLER_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -193,132 +178,112 @@
  ******************************************************************************/
 
 int
-gk104_gr_init(struct nvkm_object *object)
+gk104_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	struct nvkm_pmu *pmu = nvkm_pmu(priv);
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, rop;
-	int ret, i;
+	int i;
 
-	if (pmu)
-		pmu->pgob(pmu, false);
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0888), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x088c), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
-
-	gf100_gr_mmio(priv, oclass->mmio);
-
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
 
-	nv_wr32(priv, 0x409ffc, 0x00000000);
-	nv_wr32(priv, 0x409c14, 0x00003e3e);
-	nv_wr32(priv, 0x409c24, 0x000f0001);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
-	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
+	nvkm_wr32(device, 0x409ffc, 0x00000000);
+	nvkm_wr32(device, 0x409c14, 0x00003e3e);
+	nvkm_wr32(device, 0x409c24, 0x000f0001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x34ce3464);
+	nvkm_wr32(device, 0x400054, 0x34ce3464);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gf100_gr_init_ctxctl(priv);
-}
-
-int
-gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nvkm_pmu *pmu = nvkm_pmu(parent);
-	if (pmu)
-		pmu->pgob(pmu, false);
-	return gf100_gr_ctor(parent, engine, oclass, data, size, pobject);
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgk104.fuc3.h"
@@ -341,19 +306,25 @@
 	.data.size = sizeof(gk104_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk104_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk104_grctx_oclass,
-	.sclass = gk104_gr_sclass,
+static const struct gf100_gr_func
+gk104_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk104_gr_pack_mmio,
 	.fecs.ucode = &gk104_gr_fecs_ucode,
 	.gpccs.ucode = &gk104_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk104_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+		{ -1, -1, KEPLER_A, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gk104_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk104_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f4cd8e5..32aa294 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -29,19 +29,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gk110_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ KEPLER_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -193,19 +180,25 @@
 	.data.size = sizeof(gk110_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk110_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk110_grctx_oclass,
-	.sclass =  gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk110_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gk110_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk110_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk110_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 9ff9eab..22f88af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -24,6 +24,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <nvif/class.h>
+
 /*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
@@ -98,19 +100,25 @@
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gk110b_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xf1),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk110b_grctx_oclass,
-	.sclass =  gk110_gr_sclass,
+static const struct gf100_gr_func
+gk110b_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk110b_gr_pack_mmio,
 	.fecs.ucode = &gk110_gr_fecs_ucode,
 	.gpccs.ucode = &gk110_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gk110b_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk110b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk110b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 85f44a3..ee7554f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -29,19 +29,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk208_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ KEPLER_B, &gf100_fermi_ofuncs },
-	{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -172,19 +159,25 @@
 	.data.size = sizeof(gk208_grgpc_data),
 };
 
-struct nvkm_oclass *
-gk208_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x08),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk208_grctx_oclass,
-	.sclass =  gk208_gr_sclass,
+static const struct gf100_gr_func
+gk208_gr = {
+	.init = gk104_gr_init,
 	.mmio = gk208_gr_pack_mmio,
 	.fecs.ucode = &gk208_gr_fecs_ucode,
 	.gpccs.ucode = &gk208_gr_gpccs_ucode,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk208_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, KEPLER_B, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gk208_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gk208_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 40ff5eb..b8758d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -22,28 +22,335 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/timer.h>
+
 #include <nvif/class.h>
 
-static struct nvkm_oclass
-gk20a_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
-	{ KEPLER_C, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
+static void
+gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
+{
+	vfree(pack);
+}
+
+struct gk20a_fw_av
+{
+	u32 addr;
+	u32 data;
 };
 
-struct nvkm_oclass *
-gk20a_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gk104_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gk20a_grctx_oclass,
-	.sclass = gk20a_gr_sclass,
-	.mmio = gk104_gr_pack_mmio,
+static struct gf100_gr_pack *
+gk20a_gr_av_to_init(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int i;
+
+	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + 2);
+
+	pack[0].init = init;
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+
+		ent->addr = av->addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+struct gk20a_fw_aiv
+{
+	u32 addr;
+	u32 index;
+	u32 data;
+};
+
+static struct gf100_gr_pack *
+gk20a_gr_aiv_to_init(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_aiv));
+	int i;
+
+	pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + 2);
+
+	pack[0].init = init;
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_aiv *av = &((struct gk20a_fw_aiv *)fuc->data)[i];
+
+		ent->addr = av->addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+static struct gf100_gr_pack *
+gk20a_gr_av_to_method(struct gf100_gr_fuc *fuc)
+{
+	struct gf100_gr_init *init;
+	struct gf100_gr_pack *pack;
+	/* We don't suppose we will initialize more than 16 classes here... */
+	static const unsigned int max_classes = 16;
+	const int nent = (fuc->size / sizeof(struct gk20a_fw_av));
+	int i, classidx = 0;
+	u32 prevclass = 0;
+
+	pack = vzalloc((sizeof(*pack) * max_classes) +
+		       (sizeof(*init) * (nent + 1)));
+	if (!pack)
+		return ERR_PTR(-ENOMEM);
+
+	init = (void *)(pack + max_classes);
+
+	for (i = 0; i < nent; i++) {
+		struct gf100_gr_init *ent = &init[i];
+		struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc->data)[i];
+		u32 class = av->addr & 0xffff;
+		u32 addr = (av->addr & 0xffff0000) >> 14;
+
+		if (prevclass != class) {
+			pack[classidx].init = ent;
+			pack[classidx].type = class;
+			prevclass = class;
+			if (++classidx >= max_classes) {
+				vfree(pack);
+				return ERR_PTR(-ENOSPC);
+			}
+		}
+
+		ent->addr = addr;
+		ent->data = av->data;
+		ent->count = 1;
+		ent->pitch = 1;
+	}
+
+	return pack;
+}
+
+static int
+gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
+{
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "FECS mem scrubbing timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "GPCCS mem scrubbing timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void
+gk20a_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x419e44, 0x1ffffe);
+	nvkm_wr32(device, 0x419e4c, 0x7f);
+}
+
+int
+gk20a_gr_init(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {};
+	u8  tpcnr[GPC_MAX];
+	int gpc, tpc;
+	int ret, i;
+
+	/* Clear SCC RAM */
+	nvkm_wr32(device, 0x40802c, 0x1);
+
+	gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+	ret = gk20a_gr_wait_mem_scrubbing(gr);
+	if (ret)
+		return ret;
+
+	ret = gf100_gr_wait_idle(gr);
+	if (ret)
+		return ret;
+
+	/* MMU debug buffer */
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+
+	if (gr->func->init_gpc_mmu)
+		gr->func->init_gpc_mmu(gr);
+
+	/* Set the PE as stream master */
+	nvkm_mask(device, 0x503018, 0x1, 0x1);
+
+	/* Zcull init */
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % gr->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			  gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			  gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+
+	/* Enable FIFO access */
+	nvkm_wr32(device, 0x400500, 0x00010001);
+
+	/* Enable interrupts */
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+
+	/* Enable FECS error interrupts */
+	nvkm_wr32(device, 0x409c24, 0x000f0000);
+
+	/* Enable hardware warning exceptions */
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+
+	if (gr->func->set_hww_esr_report_mask)
+		gr->func->set_hww_esr_report_mask(gr);
+
+	/* Enable TPC exceptions per GPC */
+	nvkm_wr32(device, 0x419d0c, 0x2);
+	nvkm_wr32(device, 0x41ac94, (((1 << gr->tpc_total) - 1) & 0xff) << 16);
+
+	/* Reset and enable all exceptions */
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
+
+	gf100_gr_zbc_init(gr);
+
+	return gf100_gr_init_ctxctl(gr);
+}
+
+void
+gk20a_gr_dtor(struct gf100_gr *gr)
+{
+	gk20a_gr_init_dtor(gr->fuc_method);
+	gk20a_gr_init_dtor(gr->fuc_bundle);
+	gk20a_gr_init_dtor(gr->fuc_sw_ctx);
+	gk20a_gr_init_dtor(gr->fuc_sw_nonctx);
+}
+
+int
+gk20a_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_gr **pgr)
+{
+	struct gf100_gr_fuc fuc;
+	struct gf100_gr *gr;
+	int ret;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	ret = gf100_gr_ctor(func, device, index, gr);
+	if (ret)
+		return ret;
+
+	ret = gf100_gr_ctor_fw(gr, "sw_nonctx", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_sw_nonctx = gk20a_gr_av_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_sw_nonctx))
+		return PTR_ERR(gr->fuc_sw_nonctx);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_ctx", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_sw_ctx = gk20a_gr_aiv_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_sw_ctx))
+		return PTR_ERR(gr->fuc_sw_ctx);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_bundle_init", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_bundle = gk20a_gr_av_to_init(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_bundle))
+		return PTR_ERR(gr->fuc_bundle);
+
+	ret = gf100_gr_ctor_fw(gr, "sw_method_init", &fuc);
+	if (ret)
+		return ret;
+	gr->fuc_method = gk20a_gr_av_to_method(&fuc);
+	gf100_gr_dtor_fw(&fuc);
+	if (IS_ERR(gr->fuc_method))
+		return PTR_ERR(gr->fuc_method);
+
+	return 0;
+}
+
+static const struct gf100_gr_func
+gk20a_gr = {
+	.dtor = gk20a_gr_dtor,
+	.init = gk20a_gr_init,
+	.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
 	.ppc_nr = 1,
-}.base;
+	.grctx = &gk20a_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
+		{ -1, -1, KEPLER_C, &gf100_fermi },
+		{ -1, -1, KEPLER_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gk20a_gr_new_(&gk20a_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index a5ebd45..56e9602 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -30,19 +30,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gm107_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ MAXWELL_A, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -292,7 +279,7 @@
  ******************************************************************************/
 
 void
-gm107_gr_init_bios(struct gf100_gr_priv *priv)
+gm107_gr_init_bios(struct gf100_gr *gr)
 {
 	static const struct {
 		u32 ctrl;
@@ -304,7 +291,8 @@
 		{ 0x419af0, 0x419af4 },
 		{ 0x419af8, 0x419afc },
 	};
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_P0260E infoE;
 	struct nvbios_P0260X infoX;
 	int E = -1, X;
@@ -312,124 +300,119 @@
 
 	while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) {
 		if (X = -1, E < ARRAY_SIZE(regs)) {
-			nv_wr32(priv, regs[E].ctrl, infoE.data);
+			nvkm_wr32(device, regs[E].ctrl, infoE.data);
 			while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX))
-				nv_wr32(priv, regs[E].data, infoX.data);
+				nvkm_wr32(device, regs[E].data, infoX.data);
 		}
 	}
 }
 
 int
-gm107_gr_init(struct nvkm_object *object)
+gm107_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
 	u32 data[TPC_MAX / 8] = {};
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, ppc, rop;
-	int ret, i;
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
+	nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
 
-	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0890), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x0894), 0x00000000);
-	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	gm107_gr_init_bios(gr);
 
-	gm107_gr_init_bios(priv);
-
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
 
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400124, 0x00000002);
-	nv_wr32(priv, 0x409c24, 0x000e0000);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
 
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < 2 /* priv->ppc_nr[gpc] */; ppc++)
-			nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x2c350f63);
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gf100_gr_init_ctxctl(priv);
+	return gf100_gr_init_ctxctl(gr);
 }
 
 #include "fuc/hubgm107.fuc5.h"
@@ -452,19 +435,25 @@
 	.data.size = sizeof(gm107_grgpc_data),
 };
 
-struct nvkm_oclass *
-gm107_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm107_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm107_grctx_oclass,
-	.sclass =  gm107_gr_sclass,
+static const struct gf100_gr_func
+gm107_gr = {
+	.init = gm107_gr_init,
 	.mmio = gm107_gr_pack_mmio,
 	.fecs.ucode = &gm107_gr_fecs_ucode,
 	.gpccs.ucode = &gm107_gr_gpccs_ucode,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm107_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_A, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_A },
+		{}
+	}
+};
+
+int
+gm107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm107_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
index fdb1dcf..90381dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
@@ -27,19 +27,6 @@
 #include <nvif/class.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-gm204_gr_sclass[] = {
-	{ FERMI_TWOD_A, &nvkm_object_ofuncs },
-	{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
-	{ MAXWELL_B, &gf100_fermi_ofuncs, gf100_gr_9097_omthds },
-	{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs, gf100_gr_90c0_omthds },
-	{}
-};
-
-/*******************************************************************************
  * PGRAPH register lists
  ******************************************************************************/
 
@@ -243,144 +230,144 @@
  ******************************************************************************/
 
 static int
-gm204_gr_init_ctxctl(struct gf100_gr_priv *priv)
+gm204_gr_init_ctxctl(struct gf100_gr *gr)
 {
 	return 0;
 }
 
 int
-gm204_gr_init(struct nvkm_object *object)
+gm204_gr_init(struct gf100_gr *gr)
 {
-	struct gf100_gr_oclass *oclass = (void *)object->oclass;
-	struct gf100_gr_priv *priv = (void *)object;
-	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
-	u32 data[TPC_MAX / 8] = {};
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+	u32 data[TPC_MAX / 8] = {}, tmp;
 	u8  tpcnr[GPC_MAX];
 	int gpc, tpc, ppc, rop;
-	int ret, i;
-	u32 tmp;
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
-	tmp = nv_rd32(priv, 0x100c80); /*XXX: mask? */
-	nv_wr32(priv, 0x418880, 0x00001000 | (tmp & 0x00000fff));
-	nv_wr32(priv, 0x418890, 0x00000000);
-	nv_wr32(priv, 0x418894, 0x00000000);
-	nv_wr32(priv, 0x4188b4, priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, 0x4188b8, priv->unk4188b8->addr >> 8);
-	nv_mask(priv, 0x4188b0, 0x00040000, 0x00040000);
+	tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
+	nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
+	nvkm_wr32(device, 0x418890, 0x00000000);
+	nvkm_wr32(device, 0x418894, 0x00000000);
+	nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
 
 	/*XXX: belongs in fb */
-	nv_wr32(priv, 0x100cc8, priv->unk4188b4->addr >> 8);
-	nv_wr32(priv, 0x100ccc, priv->unk4188b8->addr >> 8);
-	nv_mask(priv, 0x100cc4, 0x00040000, 0x00040000);
+	nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
+	nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
+	nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
 
-	gf100_gr_mmio(priv, oclass->mmio);
+	gf100_gr_mmio(gr, gr->func->mmio);
 
-	gm107_gr_init_bios(priv);
+	gm107_gr_init_bios(gr);
 
-	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+	nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
 
 	memset(data, 0x00, sizeof(data));
-	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+	memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+	for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
 		do {
-			gpc = (gpc + 1) % priv->gpc_nr;
+			gpc = (gpc + 1) % gr->gpc_nr;
 		} while (!tpcnr[gpc]);
-		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+		tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
 
 		data[i / 8] |= tpc << ((i % 8) * 4);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
-	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
-	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
-	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+	nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+	nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+	nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+	nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0914),
-			priv->magic_not_rop_nr << 8 | priv->tpc_nr[gpc]);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 |
-			priv->tpc_total);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+			gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+			gr->tpc_total);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
 	}
 
-	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
-	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
-	nv_wr32(priv, GPC_BCAST(0x033c), nv_rd32(priv, 0x100804));
+	nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+	nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+	nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
 
-	nv_wr32(priv, 0x400500, 0x00010001);
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400124, 0x00000002);
-	nv_wr32(priv, 0x409c24, 0x000e0000);
-	nv_wr32(priv, 0x405848, 0xc0000000);
-	nv_wr32(priv, 0x40584c, 0x00000001);
-	nv_wr32(priv, 0x404000, 0xc0000000);
-	nv_wr32(priv, 0x404600, 0xc0000000);
-	nv_wr32(priv, 0x408030, 0xc0000000);
-	nv_wr32(priv, 0x404490, 0xc0000000);
-	nv_wr32(priv, 0x406018, 0xc0000000);
-	nv_wr32(priv, 0x407020, 0x40000000);
-	nv_wr32(priv, 0x405840, 0xc0000000);
-	nv_wr32(priv, 0x405844, 0x00ffffff);
-	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nvkm_wr32(device, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400124, 0x00000002);
+	nvkm_wr32(device, 0x409c24, 0x000e0000);
+	nvkm_wr32(device, 0x405848, 0xc0000000);
+	nvkm_wr32(device, 0x40584c, 0x00000001);
+	nvkm_wr32(device, 0x404000, 0xc0000000);
+	nvkm_wr32(device, 0x404600, 0xc0000000);
+	nvkm_wr32(device, 0x408030, 0xc0000000);
+	nvkm_wr32(device, 0x404490, 0xc0000000);
+	nvkm_wr32(device, 0x406018, 0xc0000000);
+	nvkm_wr32(device, 0x407020, 0x40000000);
+	nvkm_wr32(device, 0x405840, 0xc0000000);
+	nvkm_wr32(device, 0x405844, 0x00ffffff);
+	nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
 
-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-		for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
-			nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
-		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
-			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
+	for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+		for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
+			nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+			nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
 		}
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
-		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
 	}
 
-	for (rop = 0; rop < priv->rop_nr; rop++) {
-		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0x40000000);
-		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
-		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	for (rop = 0; rop < gr->rop_nr; rop++) {
+		nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400118, 0xffffffff);
-	nv_wr32(priv, 0x400130, 0xffffffff);
-	nv_wr32(priv, 0x40011c, 0xffffffff);
-	nv_wr32(priv, 0x400134, 0xffffffff);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400118, 0xffffffff);
+	nvkm_wr32(device, 0x400130, 0xffffffff);
+	nvkm_wr32(device, 0x40011c, 0xffffffff);
+	nvkm_wr32(device, 0x400134, 0xffffffff);
 
-	nv_wr32(priv, 0x400054, 0x2c350f63);
+	nvkm_wr32(device, 0x400054, 0x2c350f63);
 
-	gf100_gr_zbc_init(priv);
+	gf100_gr_zbc_init(gr);
 
-	return gm204_gr_init_ctxctl(priv);
+	return gm204_gr_init_ctxctl(gr);
 }
 
-struct nvkm_oclass *
-gm204_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm204_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm204_grctx_oclass,
-	.sclass =  gm204_gr_sclass,
+static const struct gf100_gr_func
+gm204_gr = {
+	.init = gm204_gr_init,
 	.mmio = gm204_gr_pack_mmio,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm204_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm204_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm204_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
index 04b9733..341dc560 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm206.c
@@ -24,17 +24,25 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
-struct nvkm_oclass *
-gm206_gr_oclass = &(struct gf100_gr_oclass) {
-	.base.handle = NV_ENGINE(GR, 0x26),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_gr_ctor,
-		.dtor = gf100_gr_dtor,
-		.init = gm204_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
-	.cclass = &gm206_grctx_oclass,
-	.sclass =  gm204_gr_sclass,
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gm206_gr = {
+	.init = gm204_gr_init,
 	.mmio = gm204_gr_pack_mmio,
 	.ppc_nr = 2,
-}.base;
+	.grctx = &gm206_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm206_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gf100_gr_new_(&gm206_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
new file mode 100644
index 0000000..65b6e3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static void
+gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 val;
+
+	/* TODO this needs to be removed once secure boot works */
+	if (1) {
+		nvkm_wr32(device, 0x100ce4, 0xffffffff);
+	}
+
+	/* TODO update once secure boot works */
+	val = nvkm_rd32(device, 0x100c80);
+	val &= 0xf000087f;
+	nvkm_wr32(device, 0x418880, val);
+	nvkm_wr32(device, 0x418890, 0);
+	nvkm_wr32(device, 0x418894, 0);
+
+	nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+	nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+	nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+
+	nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800));
+}
+
+static void
+gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
+{
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	nvkm_wr32(device, 0x419e44, 0xdffffe);
+	nvkm_wr32(device, 0x419e4c, 0x5);
+}
+
+static const struct gf100_gr_func
+gm20b_gr = {
+	.dtor = gk20a_gr_dtor,
+	.init = gk20a_gr_init,
+	.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
+	.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
+	.ppc_nr = 1,
+	.grctx = &gm20b_grctx,
+	.sclass = {
+		{ -1, -1, FERMI_TWOD_A },
+		{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+		{ -1, -1, MAXWELL_B, &gf100_fermi },
+		{ -1, -1, MAXWELL_COMPUTE_B },
+		{}
+	}
+};
+
+int
+gm20b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return gk20a_gr_new_(&gm20b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
new file mode 100644
index 0000000..2e68919
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt200.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt200_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8397, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+gt200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&gt200_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
new file mode 100644
index 0000000..2bf7aac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gt215.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+gt215_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8597, &nv50_gr_object },
+		{ -1, -1, 0x85c0, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+gt215_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&gt215_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
new file mode 100644
index 0000000..95d5219
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp79.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp79_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x8397, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+mcp79_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&mcp79_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
new file mode 100644
index 0000000..027b58e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/mcp89.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv50.h"
+
+static const struct nvkm_gr_func
+mcp89_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.tlb_flush = g84_gr_tlb_flush,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{ -1, -1, 0x85c0, &nv50_gr_object },
+		{ -1, -1, 0x8697, &nv50_gr_object },
+		{}
+	}
+};
+
+int
+mcp89_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&mcp89_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 2614510..426ba00 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -21,13 +21,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <engine/gr.h>
+#include "priv.h"
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/instmem.h>
 #include <subdev/timer.h>
 
@@ -346,25 +346,23 @@
 	NV04_PGRAPH_DEBUG_3
 };
 
-struct nv04_gr_priv {
+#define nv04_gr(p) container_of((p), struct nv04_gr, base)
+
+struct nv04_gr {
 	struct nvkm_gr base;
 	struct nv04_gr_chan *chan[16];
 	spinlock_t lock;
 };
 
+#define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
+
 struct nv04_gr_chan {
-	struct nvkm_object base;
+	struct nvkm_object object;
+	struct nv04_gr *gr;
 	int chid;
 	u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
 };
 
-
-static inline struct nv04_gr_priv *
-nv04_gr_priv(struct nv04_gr_chan *chan)
-{
-	return (void *)nv_object(chan)->engine;
-}
-
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
@@ -444,35 +442,34 @@
  */
 
 static void
-nv04_gr_set_ctx1(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx1(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+	int subc = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
 	u32 tmp;
 
-	tmp  = nv_ro32(object, 0x00);
+	tmp  = nvkm_rd32(device, 0x700000 + inst);
 	tmp &= ~mask;
 	tmp |= value;
-	nv_wo32(object, 0x00, tmp);
+	nvkm_wr32(device, 0x700000 + inst, tmp);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
-	nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_SWITCH1, tmp);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CACHE1 + (subc << 2), tmp);
 }
 
 static void
-nv04_gr_set_ctx_val(struct nvkm_object *object, u32 mask, u32 value)
+nv04_gr_set_ctx_val(struct nvkm_device *device, u32 inst, u32 mask, u32 value)
 {
 	int class, op, valid = 1;
 	u32 tmp, ctx1;
 
-	ctx1 = nv_ro32(object, 0x00);
+	ctx1 = nvkm_rd32(device, 0x700000 + inst);
 	class = ctx1 & 0xff;
 	op = (ctx1 >> 15) & 7;
 
-	tmp = nv_ro32(object, 0x0c);
+	tmp = nvkm_rd32(device, 0x70000c + inst);
 	tmp &= ~mask;
 	tmp |= value;
-	nv_wo32(object, 0x0c, tmp);
+	nvkm_wr32(device, 0x70000c + inst, tmp);
 
 	/* check for valid surf2d/surf_dst/surf_color */
 	if (!(tmp & 0x02000000))
@@ -504,527 +501,567 @@
 		break;
 	}
 
-	nv04_gr_set_ctx1(object, 0x01000000, valid << 24);
+	nv04_gr_set_ctx1(device, inst, 0x01000000, valid << 24);
 }
 
-static int
-nv04_gr_mthd_set_operation(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_set_operation(struct nvkm_device *device, u32 inst, u32 data)
 {
-	u32 class = nv_ro32(object, 0) & 0xff;
-	u32 data = *(u32 *)args;
+	u8 class = nvkm_rd32(device, 0x700000) & 0x000000ff;
 	if (data > 5)
-		return 1;
+		return false;
 	/* Old versions of the objects only accept first three operations. */
 	if (data > 2 && class < 0x40)
-		return 1;
-	nv04_gr_set_ctx1(object, 0x00038000, data << 15);
+		return false;
+	nv04_gr_set_ctx1(device, inst, 0x00038000, data << 15);
 	/* changing operation changes set of objects needed for validation */
-	nv04_gr_set_ctx_val(object, 0, 0);
-	return 0;
+	nv04_gr_set_ctx_val(device, inst, 0, 0);
+	return true;
 }
 
-static int
-nv04_gr_mthd_surf3d_clip_h(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_h(struct nvkm_device *device, u32 inst, u32 data)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	u32 data = *(u32 *)args;
 	u32 min = data & 0xffff, max;
 	u32 w = data >> 16;
 	if (min & 0x8000)
 		/* too large */
-		return 1;
+		return false;
 	if (w & 0x8000)
 		/* yes, it accepts negative for some reason. */
 		w |= 0xffff0000;
 	max = min + w;
 	max &= 0x3ffff;
-	nv_wr32(priv, 0x40053c, min);
-	nv_wr32(priv, 0x400544, max);
-	return 0;
+	nvkm_wr32(device, 0x40053c, min);
+	nvkm_wr32(device, 0x400544, max);
+	return true;
 }
 
-static int
-nv04_gr_mthd_surf3d_clip_v(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_surf3d_clip_v(struct nvkm_device *device, u32 inst, u32 data)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	u32 data = *(u32 *)args;
 	u32 min = data & 0xffff, max;
 	u32 w = data >> 16;
 	if (min & 0x8000)
 		/* too large */
-		return 1;
+		return false;
 	if (w & 0x8000)
 		/* yes, it accepts negative for some reason. */
 		w |= 0xffff0000;
 	max = min + w;
 	max &= 0x3ffff;
-	nv_wr32(priv, 0x400540, min);
-	nv_wr32(priv, 0x400548, max);
-	return 0;
+	nvkm_wr32(device, 0x400540, min);
+	nvkm_wr32(device, 0x400548, max);
+	return true;
 }
 
-static u16
-nv04_gr_mthd_bind_class(struct nvkm_object *object, u32 *args, u32 size)
+static u8
+nv04_gr_mthd_bind_class(struct nvkm_device *device, u32 inst)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	u32 inst = *(u32 *)args << 4;
-	return nv_ro32(imem, inst);
+	return nvkm_rd32(device, 0x700000 + (inst << 4));
 }
 
-static int
-nv04_gr_mthd_bind_surf2d(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x42:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_object *object, u32 mthd,
-				 void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf2d_swzsurf(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x42:
-		nv04_gr_set_ctx1(object, 0x00004000, 0);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	case 0x52:
-		nv04_gr_set_ctx1(object, 0x00004000, 0x00004000);
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x00004000, 0x00004000);
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+		return true;
 	case 0x18:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_patt(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_patt(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0);
+		return true;
 	case 0x44:
-		nv04_gr_set_ctx_val(object, 0x08000000, 0x08000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x08000000, 0x08000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_rop(struct nvkm_object *object, u32 mthd,
-		      void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_rop(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x10000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0);
+		return true;
 	case 0x43:
-		nv04_gr_set_ctx_val(object, 0x10000000, 0x10000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x10000000, 0x10000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_beta1(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta1(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x20000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0);
+		return true;
 	case 0x12:
-		nv04_gr_set_ctx_val(object, 0x20000000, 0x20000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x20000000, 0x20000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_beta4(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_beta4(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x40000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0);
+		return true;
 	case 0x72:
-		nv04_gr_set_ctx_val(object, 0x40000000, 0x40000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x40000000, 0x40000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_dst(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_dst(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x58:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_src(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_src(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+		return true;
 	case 0x59:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_color(struct nvkm_object *object, u32 mthd,
-			     void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_color(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0);
+		return true;
 	case 0x5a:
-		nv04_gr_set_ctx_val(object, 0x02000000, 0x02000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x02000000, 0x02000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv04_gr_mthd_bind_surf_zeta(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
+static bool
+nv04_gr_mthd_bind_surf_zeta(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0);
+		return true;
 	case 0x5b:
-		nv04_gr_set_ctx_val(object, 0x04000000, 0x04000000);
-		return 0;
+		nv04_gr_set_ctx_val(device, inst, 0x04000000, 0x04000000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_clip(struct nvkm_object *object, u32 mthd,
-		       void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_clip(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x2000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x2000, 0);
+		return true;
 	case 0x19:
-		nv04_gr_set_ctx1(object, 0x2000, 0x2000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x2000, 0x2000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static int
-nv01_gr_mthd_bind_chroma(struct nvkm_object *object, u32 mthd,
-			 void *args, u32 size)
+static bool
+nv01_gr_mthd_bind_chroma(struct nvkm_device *device, u32 inst, u32 data)
 {
-	switch (nv04_gr_mthd_bind_class(object, args, size)) {
+	switch (nv04_gr_mthd_bind_class(device, data)) {
 	case 0x30:
-		nv04_gr_set_ctx1(object, 0x1000, 0);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x1000, 0);
+		return true;
 	/* Yes, for some reason even the old versions of objects
 	 * accept 0x57 and not 0x17. Consistency be damned.
 	 */
 	case 0x57:
-		nv04_gr_set_ctx1(object, 0x1000, 0x1000);
-		return 0;
+		nv04_gr_set_ctx1(device, inst, 0x1000, 0x1000);
+		return true;
 	}
-	return 1;
+	return false;
 }
 
-static struct nvkm_omthds
-nv03_gr_gdi_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_patt },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_rop },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_beta1 },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0188: func = nv04_gr_mthd_bind_rop; break;
+	case 0x018c: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0190: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_gdi_omthds[] = {
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_gdi(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_blit_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf_src },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf_src; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_blit_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_blit(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_iifc_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_chroma },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_clip },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_patt },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_rop },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta1 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_beta4 },
-	{ 0x01a0, 0x01a0, nv04_gr_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, 0x03e4, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_iifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x018c: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0190: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0194: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x019c: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x01a0: func = nv04_gr_mthd_bind_surf2d_swzsurf; break;
+	case 0x03e4: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_ifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv01_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv01_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_ifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_patt },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_rop },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta1 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_beta4 },
-	{ 0x019c, 0x019c, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_ifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_patt; break;
+	case 0x0190: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0198: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x019c: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_sifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_sifc_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_chroma },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_sifc(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_chroma; break;
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_sifm_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x0304, 0x0304, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv03_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x0304: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_sifm_omthds[] = {
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x0304, 0x0304, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_sifm(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x0304: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_surf3d_omthds[] = {
-	{ 0x02f8, 0x02f8, nv04_gr_mthd_surf3d_clip_h },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_surf3d_clip_v },
-	{}
-};
+static bool
+nv04_gr_mthd_surf3d(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x02f8: func = nv04_gr_mthd_surf3d_clip_h; break;
+	case 0x02fc: func = nv04_gr_mthd_surf3d_clip_v; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv03_gr_ttri_omthds[] = {
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_clip },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_surf_color },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_surf_zeta },
-	{}
-};
+static bool
+nv03_gr_mthd_ttri(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0188: func = nv01_gr_mthd_bind_clip; break;
+	case 0x018c: func = nv04_gr_mthd_bind_surf_color; break;
+	case 0x0190: func = nv04_gr_mthd_bind_surf_zeta; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv01_gr_prim_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
-	{ 0x0188, 0x0188, nv01_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_surf_dst },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv01_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0188: func = nv01_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_surf_dst; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
 
-static struct nvkm_omthds
-nv04_gr_prim_omthds[] = {
-	{ 0x0184, 0x0184, nv01_gr_mthd_bind_clip },
-	{ 0x0188, 0x0188, nv04_gr_mthd_bind_patt },
-	{ 0x018c, 0x018c, nv04_gr_mthd_bind_rop },
-	{ 0x0190, 0x0190, nv04_gr_mthd_bind_beta1 },
-	{ 0x0194, 0x0194, nv04_gr_mthd_bind_beta4 },
-	{ 0x0198, 0x0198, nv04_gr_mthd_bind_surf2d },
-	{ 0x02fc, 0x02fc, nv04_gr_mthd_set_operation },
-	{}
-};
+static bool
+nv04_gr_mthd_prim(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32);
+	switch (mthd) {
+	case 0x0184: func = nv01_gr_mthd_bind_clip; break;
+	case 0x0188: func = nv04_gr_mthd_bind_patt; break;
+	case 0x018c: func = nv04_gr_mthd_bind_rop; break;
+	case 0x0190: func = nv04_gr_mthd_bind_beta1; break;
+	case 0x0194: func = nv04_gr_mthd_bind_beta4; break;
+	case 0x0198: func = nv04_gr_mthd_bind_surf2d; break;
+	case 0x02fc: func = nv04_gr_mthd_set_operation; break;
+	default:
+		return false;
+	}
+	return func(device, inst, data);
+}
+
+static bool
+nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
+{
+	bool (*func)(struct nvkm_device *, u32, u32, u32);
+	switch (nvkm_rd32(device, 0x700000 + inst) & 0x000000ff) {
+	case 0x1c ... 0x1e:
+		   func = nv01_gr_mthd_prim; break;
+	case 0x1f: func = nv01_gr_mthd_blit; break;
+	case 0x21: func = nv01_gr_mthd_ifc; break;
+	case 0x36: func = nv03_gr_mthd_sifc; break;
+	case 0x37: func = nv03_gr_mthd_sifm; break;
+	case 0x48: func = nv03_gr_mthd_ttri; break;
+	case 0x4a: func = nv04_gr_mthd_gdi; break;
+	case 0x4b: func = nv03_gr_mthd_gdi; break;
+	case 0x53: func = nv04_gr_mthd_surf3d; break;
+	case 0x5c ... 0x5e:
+		   func = nv04_gr_mthd_prim; break;
+	case 0x5f: func = nv04_gr_mthd_blit; break;
+	case 0x60: func = nv04_gr_mthd_iifc; break;
+	case 0x61: func = nv04_gr_mthd_ifc; break;
+	case 0x76: func = nv04_gr_mthd_sifc; break;
+	case 0x77: func = nv04_gr_mthd_sifm; break;
+	default:
+		return false;
+	}
+	return func(device, inst, mthd, data);
+}
 
 static int
-nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
 #ifdef __BIG_ENDIAN
-	nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+		nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000);
 #endif
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-struct nvkm_ofuncs
-nv04_gr_ofuncs = {
-	.ctor = nv04_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv04_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0017, &nv04_gr_ofuncs }, /* chroma */
-	{ 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x001c, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* line */
-	{ 0x001d, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* tri */
-	{ 0x001e, &nv04_gr_ofuncs, nv01_gr_prim_omthds }, /* rect */
-	{ 0x001f, &nv04_gr_ofuncs, nv01_gr_blit_omthds },
-	{ 0x0021, &nv04_gr_ofuncs, nv01_gr_ifc_omthds },
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0036, &nv04_gr_ofuncs, nv03_gr_sifc_omthds },
-	{ 0x0037, &nv04_gr_ofuncs, nv03_gr_sifm_omthds },
-	{ 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0042, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x0048, &nv04_gr_ofuncs, nv03_gr_ttri_omthds },
-	{ 0x004a, &nv04_gr_ofuncs, nv04_gr_gdi_omthds },
-	{ 0x004b, &nv04_gr_ofuncs, nv03_gr_gdi_omthds },
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x0053, &nv04_gr_ofuncs, nv04_gr_surf3d_omthds },
-	{ 0x0054, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0055, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0057, &nv04_gr_ofuncs }, /* chroma */
-	{ 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
-	{ 0x0059, &nv04_gr_ofuncs }, /* surf_src */
-	{ 0x005a, &nv04_gr_ofuncs }, /* surf_color */
-	{ 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
-	{ 0x005c, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* line */
-	{ 0x005d, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* tri */
-	{ 0x005e, &nv04_gr_ofuncs, nv04_gr_prim_omthds }, /* rect */
-	{ 0x005f, &nv04_gr_ofuncs, nv04_gr_blit_omthds },
-	{ 0x0060, &nv04_gr_ofuncs, nv04_gr_iifc_omthds },
-	{ 0x0061, &nv04_gr_ofuncs, nv04_gr_ifc_omthds },
-	{ 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
-	{ 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
-	{ 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0076, &nv04_gr_ofuncs, nv04_gr_sifc_omthds },
-	{ 0x0077, &nv04_gr_ofuncs, nv04_gr_sifm_omthds },
-	{},
+const struct nvkm_object_func
+nv04_gr_object = {
+	.bind = nv04_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -1032,13 +1069,14 @@
  ******************************************************************************/
 
 static struct nv04_gr_chan *
-nv04_gr_channel(struct nv04_gr_priv *priv)
+nv04_gr_channel(struct nv04_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv04_gr_chan *chan = NULL;
-	if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
-		int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
-		if (chid < ARRAY_SIZE(priv->chan))
-			chan = priv->chan[chid];
+	if (nvkm_rd32(device, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+		int chid = nvkm_rd32(device, NV04_PGRAPH_CTX_USER) >> 24;
+		if (chid < ARRAY_SIZE(gr->chan))
+			chan = gr->chan[chid];
 	}
 	return chan;
 }
@@ -1046,55 +1084,52 @@
 static int
 nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
 {
-	struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
-		nv_wr32(priv, nv04_gr_ctx_regs[i], chan->nv04[i]);
+		nvkm_wr32(device, nv04_gr_ctx_regs[i], chan->nv04[i]);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
-	nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nvkm_mask(device, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
 	return 0;
 }
 
 static int
 nv04_gr_unload_context(struct nv04_gr_chan *chan)
 {
-	struct nv04_gr_priv *priv = nv04_gr_priv(chan);
+	struct nvkm_device *device = chan->gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
-		chan->nv04[i] = nv_rd32(priv, nv04_gr_ctx_regs[i]);
+		chan->nv04[i] = nvkm_rd32(device, nv04_gr_ctx_regs[i]);
 
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
 	return 0;
 }
 
 static void
-nv04_gr_context_switch(struct nv04_gr_priv *priv)
+nv04_gr_context_switch(struct nv04_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv04_gr_chan *prev = NULL;
 	struct nv04_gr_chan *next = NULL;
-	unsigned long flags;
 	int chid;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 
 	/* If previous context is valid, we need to save it */
-	prev = nv04_gr_channel(priv);
+	prev = nv04_gr_channel(gr);
 	if (prev)
 		nv04_gr_unload_context(prev);
 
 	/* load context for next channel */
-	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
-	next = priv->chan[chid];
+	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+	next = gr->chan[chid];
 	if (next)
 		nv04_gr_load_context(next, chid);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
 static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
@@ -1109,98 +1144,85 @@
 	return NULL;
 }
 
-static int
-nv04_gr_context_ctor(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static void *
+nv04_gr_chan_dtor(struct nvkm_object *object)
 {
-	struct nvkm_fifo_chan *fifo = (void *)parent;
-	struct nv04_gr_priv *priv = (void *)engine;
-	struct nv04_gr_chan *chan;
+	struct nv04_gr_chan *chan = nv04_gr_chan(object);
+	struct nv04_gr *gr = chan->gr;
 	unsigned long flags;
-	int ret;
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return chan;
+}
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->chan[fifo->chid]) {
-		*pobject = nv_object(priv->chan[fifo->chid]);
-		atomic_inc(&(*pobject)->refcount);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		return 1;
-	}
+static int
+nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv04_gr_chan *chan = nv04_gr_chan(object);
+	struct nv04_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	unsigned long flags;
 
-	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
-	priv->chan[fifo->chid] = chan;
-	chan->chid = fifo->chid;
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv04_gr_channel(gr) == chan)
+		nv04_gr_unload_context(chan);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
 	return 0;
 }
 
-static void
-nv04_gr_context_dtor(struct nvkm_object *object)
-{
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	struct nv04_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->chan[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_object_destroy(&chan->base);
-}
+static const struct nvkm_object_func
+nv04_gr_chan = {
+	.dtor = nv04_gr_chan_dtor,
+	.fini = nv04_gr_chan_fini,
+};
 
 static int
-nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nv04_gr_priv *priv = (void *)object->engine;
-	struct nv04_gr_chan *chan = (void *)object;
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nv04_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (nv04_gr_channel(priv) == chan)
-		nv04_gr_unload_context(chan);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
 
-	return nvkm_object_fini(&chan->base, suspend);
+	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = chan;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
 }
 
-static struct nvkm_oclass
-nv04_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_gr_context_ctor,
-		.dtor = nv04_gr_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nv04_gr_context_fini,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 bool
-nv04_gr_idle(void *obj)
+nv04_gr_idle(struct nvkm_gr *gr)
 {
-	struct nvkm_gr *gr = nvkm_gr(obj);
+	struct nvkm_subdev *subdev = &gr->engine.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 mask = 0xffffffff;
 
-	if (nv_device(obj)->card_type == NV_40)
+	if (device->card_type == NV_40)
 		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
 
-	if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) {
-		nv_error(gr, "idle timed out with status 0x%08x\n",
-			 nv_rd32(gr, NV04_PGRAPH_STATUS));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "idle timed out with status %08x\n",
+			   nvkm_rd32(device, NV04_PGRAPH_STATUS));
 		return false;
 	}
 
@@ -1247,136 +1269,159 @@
 };
 
 static void
-nv04_gr_intr(struct nvkm_subdev *subdev)
+nv04_gr_intr(struct nvkm_gr *base)
 {
-	struct nv04_gr_priv *priv = (void *)subdev;
-	struct nv04_gr_chan *chan = NULL;
-	struct nvkm_namedb *namedb = NULL;
-	struct nvkm_handle *handle = NULL;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x0f000000) >> 24;
 	u32 subc = (addr & 0x0000e000) >> 13;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
-	u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400180 + subc * 4) & 0xff;
+	u32 inst = (nvkm_rd32(device, 0x40016c) & 0xffff) << 4;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	struct nv04_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	chan = priv->chan[chid];
-	if (chan)
-		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	chan = gr->chan[chid];
 
 	if (stat & NV_PGRAPH_INTR_NOTIFY) {
 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
-			handle = nvkm_namedb_get_vinst(namedb, inst);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv04_gr_mthd(device, inst, mthd, data))
 				show &= ~NV_PGRAPH_INTR_NOTIFY;
 		}
 	}
 
 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		nv04_gr_context_switch(priv);
+		nv04_gr_context_switch(gr);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv04_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv04_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(chan), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv04_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv04_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_namedb_put(handle);
+	spin_unlock_irqrestore(&gr->lock, flags);
 }
 
 static int
-nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_gr_init(struct nvkm_gr *base)
 {
-	struct nv04_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv04_gr_intr;
-	nv_engine(priv)->cclass = &nv04_gr_cclass;
-	nv_engine(priv)->sclass = nv04_gr_sclass;
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static int
-nv04_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv04_gr_priv *priv = (void *)engine;
-	int ret;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nv04_gr *gr = nv04_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
 	/* Enable PGRAPH interrupts */
-	nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
-	nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+	nvkm_wr32(device, NV04_PGRAPH_VALID1, 0);
+	nvkm_wr32(device, NV04_PGRAPH_VALID2, 0);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x1231c000);
 	/*1231C000 blob, 001 haiku*/
 	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x72111100);
 	/*0x72111100 blob , 01 haiku*/
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
 	/*haiku same*/
 
-	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+	/*nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
 	/*haiku and blob 10d4*/
 
-	nv_wr32(priv, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
-	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	nvkm_wr32(device, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
+	nvkm_mask(device, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
 
 	/* These don't belong here, they're part of a per-channel context */
-	nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
 	return 0;
 }
 
-struct nvkm_oclass
-nv04_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv04_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv04_gr = {
+	.init = nv04_gr_init,
+	.intr = nv04_gr_intr,
+	.chan_new = nv04_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
+		{ -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x001c, &nv04_gr_object }, /* line */
+		{ -1, -1, 0x001d, &nv04_gr_object }, /* tri */
+		{ -1, -1, 0x001e, &nv04_gr_object }, /* rect */
+		{ -1, -1, 0x001f, &nv04_gr_object },
+		{ -1, -1, 0x0021, &nv04_gr_object },
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0036, &nv04_gr_object },
+		{ -1, -1, 0x0037, &nv04_gr_object },
+		{ -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x0048, &nv04_gr_object },
+		{ -1, -1, 0x004a, &nv04_gr_object },
+		{ -1, -1, 0x004b, &nv04_gr_object },
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x0053, &nv04_gr_object },
+		{ -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
+		{ -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
+		{ -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
+		{ -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
+		{ -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
+		{ -1, -1, 0x005c, &nv04_gr_object }, /* line */
+		{ -1, -1, 0x005d, &nv04_gr_object }, /* tri */
+		{ -1, -1, 0x005e, &nv04_gr_object }, /* rect */
+		{ -1, -1, 0x005f, &nv04_gr_object },
+		{ -1, -1, 0x0060, &nv04_gr_object },
+		{ -1, -1, 0x0061, &nv04_gr_object },
+		{ -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
+		{ -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
+		{ -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0076, &nv04_gr_object },
+		{ -1, -1, 0x0077, &nv04_gr_object },
+		{}
+	}
 };
+
+int
+nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	struct nv04_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
+			    true, &gr->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index d6ace41..4542867 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -21,13 +21,13 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <engine/gr.h>
+#include "nv10.h"
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 
 struct pipe_state {
@@ -386,14 +386,19 @@
 	0x00400a04,
 };
 
-struct nv10_gr_priv {
+#define nv10_gr(p) container_of((p), struct nv10_gr, base)
+
+struct nv10_gr {
 	struct nvkm_gr base;
 	struct nv10_gr_chan *chan[32];
 	spinlock_t lock;
 };
 
+#define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
+
 struct nv10_gr_chan {
-	struct nvkm_object base;
+	struct nvkm_object object;
+	struct nv10_gr *gr;
 	int chid;
 	int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
 	int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
@@ -402,214 +407,151 @@
 };
 
 
-static inline struct nv10_gr_priv *
-nv10_gr_priv(struct nv10_gr_chan *chan)
-{
-	return (void *)nv_object(chan)->engine;
-}
-
 /*******************************************************************************
  * Graphics object classes
  ******************************************************************************/
 
-#define PIPE_SAVE(priv, state, addr)					\
+#define PIPE_SAVE(gr, state, addr)					\
 	do {								\
 		int __i;						\
-		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
+			state[__i] = nvkm_rd32(device, NV10_PGRAPH_PIPE_DATA); \
 	} while (0)
 
-#define PIPE_RESTORE(priv, state, addr)					\
+#define PIPE_RESTORE(gr, state, addr)					\
 	do {								\
 		int __i;						\
-		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
 		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
-			nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+			nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
 	} while (0)
 
-static struct nvkm_oclass
-nv10_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0056, &nv04_gr_ofuncs }, /* celcius */
-	{},
-};
-
-static struct nvkm_oclass
-nv15_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0096, &nv04_gr_ofuncs }, /* celcius */
-	{},
-};
-
-static int
-nv17_gr_mthd_lma_window(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static void
+nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
 {
-	struct nv10_gr_chan *chan = (void *)object->parent;
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nvkm_device *device = chan->object.engine->subdev.device;
+	struct nvkm_gr *gr = &chan->gr->base;
 	struct pipe_state *pipe = &chan->pipe_state;
 	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
 	u32 xfmode0, xfmode1;
-	u32 data = *(u32 *)args;
 	int i;
 
 	chan->lma_window[(mthd - 0x1638) / 4] = data;
 
 	if (mthd != 0x1644)
-		return 0;
+		return;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	PIPE_SAVE(priv, pipe_0x0040, 0x0040);
-	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(device, pipe_0x0040, 0x0040);
+	PIPE_SAVE(device, pipe->pipe_0x0200, 0x0200);
 
-	PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+	PIPE_RESTORE(device, chan->lma_window, 0x6790);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
 
-	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
-	PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
-	PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+	PIPE_SAVE(device, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(device, pipe_0x64c0, 0x64c0);
+	PIPE_SAVE(device, pipe_0x6ab0, 0x6ab0);
+	PIPE_SAVE(device, pipe_0x6a80, 0x6a80);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
 
-	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_RESTORE(device, pipe->pipe_0x0200, 0x0200);
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+	PIPE_RESTORE(device, pipe_0x0040, 0x0040);
 
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
 
-	PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
-	PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
-	PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
-	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(device, pipe_0x64c0, 0x64c0);
+	PIPE_RESTORE(device, pipe_0x6ab0, 0x6ab0);
+	PIPE_RESTORE(device, pipe_0x6a80, 0x6a80);
+	PIPE_RESTORE(device, pipe->pipe_0x4400, 0x4400);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv04_gr_idle(priv);
-
-	return 0;
+	nv04_gr_idle(gr);
 }
 
-static int
-nv17_gr_mthd_lma_enable(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
+static void
+nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
 {
-	struct nv10_gr_chan *chan = (void *)object->parent;
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nvkm_device *device = chan->object.engine->subdev.device;
+	struct nvkm_gr *gr = &chan->gr->base;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(gr);
 
-	nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
-	nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
-	return 0;
+	nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+	nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000);
 }
 
-static struct nvkm_omthds
-nv17_celcius_omthds[] = {
-	{ 0x1638, 0x1638, nv17_gr_mthd_lma_window },
-	{ 0x163c, 0x163c, nv17_gr_mthd_lma_window },
-	{ 0x1640, 0x1640, nv17_gr_mthd_lma_window },
-	{ 0x1644, 0x1644, nv17_gr_mthd_lma_window },
-	{ 0x1658, 0x1658, nv17_gr_mthd_lma_enable },
-	{}
-};
+static bool
+nv17_gr_mthd_celcius(struct nv10_gr_chan *chan, u32 mthd, u32 data)
+{
+	void (*func)(struct nv10_gr_chan *, u32, u32);
+	switch (mthd) {
+	case 0x1638 ... 0x1644:
+		     func = nv17_gr_mthd_lma_window; break;
+	case 0x1658: func = nv17_gr_mthd_lma_enable; break;
+	default:
+		return false;
+	}
+	func(chan, mthd, data);
+	return true;
+}
 
-static struct nvkm_oclass
-nv17_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
-	{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
-	{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
-	{ 0x005f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs }, /* blit */
-	{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
-	{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
-	{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
-	{ 0x0099, &nv04_gr_ofuncs, nv17_celcius_omthds },
-	{},
-};
+static bool
+nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
+{
+	bool (*func)(struct nv10_gr_chan *, u32, u32);
+	switch (class) {
+	case 0x99: func = nv17_gr_mthd_celcius; break;
+	default:
+		return false;
+	}
+	return func(chan, mthd, data);
+}
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
 static struct nv10_gr_chan *
-nv10_gr_channel(struct nv10_gr_priv *priv)
+nv10_gr_channel(struct nv10_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv10_gr_chan *chan = NULL;
-	if (nv_rd32(priv, 0x400144) & 0x00010000) {
-		int chid = nv_rd32(priv, 0x400148) >> 24;
-		if (chid < ARRAY_SIZE(priv->chan))
-			chan = priv->chan[chid];
+	if (nvkm_rd32(device, 0x400144) & 0x00010000) {
+		int chid = nvkm_rd32(device, 0x400148) >> 24;
+		if (chid < ARRAY_SIZE(gr->chan))
+			chan = gr->chan[chid];
 	}
 	return chan;
 }
@@ -617,75 +559,78 @@
 static void
 nv10_gr_save_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
 	struct pipe_state *pipe = &chan->pipe_state;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
-	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
-	PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
-	PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
-	PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
-	PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
-	PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
-	PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
-	PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+	PIPE_SAVE(gr, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(gr, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(gr, pipe->pipe_0x6400, 0x6400);
+	PIPE_SAVE(gr, pipe->pipe_0x6800, 0x6800);
+	PIPE_SAVE(gr, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_SAVE(gr, pipe->pipe_0x7000, 0x7000);
+	PIPE_SAVE(gr, pipe->pipe_0x7400, 0x7400);
+	PIPE_SAVE(gr, pipe->pipe_0x7800, 0x7800);
+	PIPE_SAVE(gr, pipe->pipe_0x0040, 0x0040);
+	PIPE_SAVE(gr, pipe->pipe_0x0000, 0x0000);
 }
 
 static void
 nv10_gr_load_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
 	struct pipe_state *pipe = &chan->pipe_state;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 xfmode0, xfmode1;
 	int i;
 
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 	/* XXX check haiku comments */
-	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
-	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 	for (i = 0; i < 4; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
 	for (i = 0; i < 3; i++)
-		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000000);
 
-	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
-	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, 0x00000008);
 
 
-	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
-	nv04_gr_idle(priv);
+	PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
+	nv04_gr_idle(&gr->base);
 
 	/* restore XFMODE */
-	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
-	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
-	PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
-	PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
-	PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
-	PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
-	PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
-	PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
-	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
-	PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
-	PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
-	nv04_gr_idle(priv);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
+	nvkm_wr32(device, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(gr, pipe->pipe_0x6400, 0x6400);
+	PIPE_RESTORE(gr, pipe->pipe_0x6800, 0x6800);
+	PIPE_RESTORE(gr, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_RESTORE(gr, pipe->pipe_0x7000, 0x7000);
+	PIPE_RESTORE(gr, pipe->pipe_0x7400, 0x7400);
+	PIPE_RESTORE(gr, pipe->pipe_0x7800, 0x7800);
+	PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
+	PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
+	nv04_gr_idle(&gr->base);
 }
 
 static void
 nv10_gr_create_pipe(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	struct pipe_state *pipe_state = &chan->pipe_state;
 	u32 *pipe_state_addr;
 	int i;
@@ -698,7 +643,7 @@
 		u32 *__end_addr = pipe_state->pipe_##addr + \
 				ARRAY_SIZE(pipe_state->pipe_##addr); \
 		if (pipe_state_addr != __end_addr) \
-			nv_error(priv, "incomplete pipe init for 0x%x :  %p/%p\n", \
+			nvkm_error(subdev, "incomplete pipe init for 0x%x :  %p/%p\n", \
 				addr, pipe_state_addr, __end_addr); \
 	} while (0)
 #define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
@@ -838,33 +783,36 @@
 }
 
 static int
-nv10_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv10_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	int i;
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++) {
 		if (nv10_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nv_error(priv, "unknown offset nv10_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv10_ctx_regs %d\n", reg);
 	return -1;
 }
 
 static int
-nv17_gr_ctx_regs_find_offset(struct nv10_gr_priv *priv, int reg)
+nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
 	int i;
 	for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++) {
 		if (nv17_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nv_error(priv, "unknown offset nv17_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv17_ctx_regs %d\n", reg);
 	return -1;
 }
 
 static void
 nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
 	u32 ctx_user, ctx_switch[5];
 	int i, subchan = -1;
@@ -876,7 +824,7 @@
 
 	/* Look for a celsius object */
 	for (i = 0; i < 8; i++) {
-		int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+		int class = nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
 
 		if (class == 0x56 || class == 0x96 || class == 0x99) {
 			subchan = i;
@@ -888,159 +836,183 @@
 		return;
 
 	/* Save the current ctx object */
-	ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
+	ctx_user = nvkm_rd32(device, NV10_PGRAPH_CTX_USER);
 	for (i = 0; i < 5; i++)
-		ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
+		ctx_switch[i] = nvkm_rd32(device, NV10_PGRAPH_CTX_SWITCH(i));
 
 	/* Save the FIFO state */
-	st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
-	st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
-	st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
-	fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+	st2 = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2);
+	st2_dl = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DL);
+	st2_dh = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_ST2_DH);
+	fifo_ptr = nvkm_rd32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR);
 
 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
+		fifo[i] = nvkm_rd32(device, 0x4007a0 + 4 * i);
 
 	/* Switch to the celsius subchannel */
 	for (i = 0; i < 5; i++)
-		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
-			nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i),
+			nvkm_rd32(device, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
 
 	/* Inject NV10TCL_DMA_VTXBUF */
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2,
 		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
-	nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+	nvkm_mask(device, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
 
 	/* Restore the FIFO state */
 	for (i = 0; i < ARRAY_SIZE(fifo); i++)
-		nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
+		nvkm_wr32(device, 0x4007a0 + 4 * i, fifo[i]);
 
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, st2);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
 
 	/* Restore the current ctx object */
 	for (i = 0; i < 5; i++)
-		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
-	nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
+		nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_USER, ctx_user);
 }
 
 static int
 nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 inst;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
-		nv_wr32(priv, nv10_gr_ctx_regs[i], chan->nv10[i]);
+		nvkm_wr32(device, nv10_gr_ctx_regs[i], chan->nv10[i]);
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
-			nv_wr32(priv, nv17_gr_ctx_regs[i], chan->nv17[i]);
+			nvkm_wr32(device, nv17_gr_ctx_regs[i], chan->nv17[i]);
 	}
 
 	nv10_gr_load_pipe(chan);
 
-	inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+	inst = nvkm_rd32(device, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
 	nv10_gr_load_dma_vtxbuf(chan, chid, inst);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
-	nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nvkm_mask(device, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
 	return 0;
 }
 
 static int
 nv10_gr_unload_context(struct nv10_gr_chan *chan)
 {
-	struct nv10_gr_priv *priv = nv10_gr_priv(chan);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(nv10_gr_ctx_regs); i++)
-		chan->nv10[i] = nv_rd32(priv, nv10_gr_ctx_regs[i]);
+		chan->nv10[i] = nvkm_rd32(device, nv10_gr_ctx_regs[i]);
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		for (i = 0; i < ARRAY_SIZE(nv17_gr_ctx_regs); i++)
-			chan->nv17[i] = nv_rd32(priv, nv17_gr_ctx_regs[i]);
+			chan->nv17[i] = nvkm_rd32(device, nv17_gr_ctx_regs[i]);
 	}
 
 	nv10_gr_save_pipe(chan);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
 	return 0;
 }
 
 static void
-nv10_gr_context_switch(struct nv10_gr_priv *priv)
+nv10_gr_context_switch(struct nv10_gr *gr)
 {
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	struct nv10_gr_chan *prev = NULL;
 	struct nv10_gr_chan *next = NULL;
-	unsigned long flags;
 	int chid;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	nv04_gr_idle(priv);
+	nv04_gr_idle(&gr->base);
 
 	/* If previous context is valid, we need to save it */
-	prev = nv10_gr_channel(priv);
+	prev = nv10_gr_channel(gr);
 	if (prev)
 		nv10_gr_unload_context(prev);
 
 	/* load context for next channel */
-	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	next = priv->chan[chid];
+	chid = (nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	next = gr->chan[chid];
 	if (next)
 		nv10_gr_load_context(next, chid);
-
-	spin_unlock_irqrestore(&priv->lock, flags);
 }
 
+static int
+nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv10_gr_chan *chan = nv10_gr_chan(object);
+	struct nv10_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv10_gr_channel(gr) == chan)
+		nv10_gr_unload_context(chan);
+	nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return 0;
+}
+
+static void *
+nv10_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv10_gr_chan *chan = nv10_gr_chan(object);
+	struct nv10_gr *gr = chan->gr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&gr->lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv10_gr_chan = {
+	.dtor = nv10_gr_chan_dtor,
+	.fini = nv10_gr_chan_fini,
+};
+
 #define NV_WRITE_CTX(reg, val) do { \
-	int offset = nv10_gr_ctx_regs_find_offset(priv, reg); \
+	int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
 	if (offset > 0) \
 		chan->nv10[offset] = val; \
 	} while (0)
 
 #define NV17_WRITE_CTX(reg, val) do { \
-	int offset = nv17_gr_ctx_regs_find_offset(priv, reg); \
+	int offset = nv17_gr_ctx_regs_find_offset(gr, reg); \
 	if (offset > 0) \
 		chan->nv17[offset] = val; \
 	} while (0)
 
-static int
-nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+int
+nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
-	struct nvkm_fifo_chan *fifo = (void *)parent;
-	struct nv10_gr_priv *priv = (void *)engine;
+	struct nv10_gr *gr = nv10_gr(base);
 	struct nv10_gr_chan *chan;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	unsigned long flags;
-	int ret;
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->chan[fifo->chid]) {
-		*pobject = nv_object(priv->chan[fifo->chid]);
-		atomic_inc(&(*pobject)->refcount);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		return 1;
-	}
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
 
 	NV_WRITE_CTX(0x00400e88, 0x08000000);
 	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -1049,12 +1021,11 @@
 	NV_WRITE_CTX(0x00400e14, 0x00001000);
 	NV_WRITE_CTX(0x00400e30, 0x00080008);
 	NV_WRITE_CTX(0x00400e34, 0x00080008);
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
 		/* is it really needed ??? */
 		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
-					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
-		NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
+			       nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
+		NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
 		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
 		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
 		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
@@ -1064,74 +1035,32 @@
 
 	nv10_gr_create_pipe(chan);
 
-	priv->chan[fifo->chid] = chan;
-	chan->chid = fifo->chid;
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	gr->chan[chan->chid] = chan;
+	spin_unlock_irqrestore(&gr->lock, flags);
 	return 0;
 }
 
-static void
-nv10_gr_context_dtor(struct nvkm_object *object)
-{
-	struct nv10_gr_priv *priv = (void *)object->engine;
-	struct nv10_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->chan[chan->chid] = NULL;
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	nvkm_object_destroy(&chan->base);
-}
-
-static int
-nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv10_gr_priv *priv = (void *)object->engine;
-	struct nv10_gr_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-	if (nv10_gr_channel(priv) == chan)
-		nv10_gr_unload_context(chan);
-	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	return nvkm_object_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv10_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_gr_context_ctor,
-		.dtor = nv10_gr_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nv10_gr_context_fini,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static void
-nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
+void
+nv10_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv10_gr_priv *priv = (void *)engine;
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
+	nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nvkm_wr32(device, NV10_PGRAPH_TILE(i), tile->addr);
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
 const struct nvkm_bitfield nv10_gr_intr_name[] = {
@@ -1148,168 +1077,145 @@
 	{}
 };
 
-static void
-nv10_gr_intr(struct nvkm_subdev *subdev)
+void
+nv10_gr_intr(struct nvkm_gr *base)
 {
-	struct nv10_gr_priv *priv = (void *)subdev;
-	struct nv10_gr_chan *chan = NULL;
-	struct nvkm_namedb *namedb = NULL;
-	struct nvkm_handle *handle = NULL;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x01f00000) >> 20;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	struct nv10_gr_chan *chan;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	chan = priv->chan[chid];
-	if (chan)
-		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&gr->lock, flags);
+	chan = gr->chan[chid];
 
 	if (stat & NV_PGRAPH_INTR_ERROR) {
 		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
-			handle = nvkm_namedb_get_class(namedb, class);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv10_gr_mthd(chan, class, mthd, data))
 				show &= ~NV_PGRAPH_INTR_ERROR;
 		}
 	}
 
 	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		nvkm_wr32(device, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
 		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
 		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		nv10_gr_context_switch(priv);
+		nv10_gr_context_switch(gr);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(chan), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_namedb_put(handle);
+	spin_unlock_irqrestore(&gr->lock, flags);
 }
 
-static int
-nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv10_gr_init(struct nvkm_gr *base)
 {
-	struct nv10_gr_priv *priv;
-	int ret;
+	struct nv10_gr *gr = nv10_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv10_gr_intr;
-	nv_engine(priv)->cclass = &nv10_gr_cclass;
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	/* nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
 
-	if (nv_device(priv)->chipset <= 0x10)
-		nv_engine(priv)->sclass = nv10_gr_sclass;
-	else
-	if (nv_device(priv)->chipset <  0x17 ||
-	    nv_device(priv)->card_type < NV_11)
-		nv_engine(priv)->sclass = nv15_gr_sclass;
-	else
-		nv_engine(priv)->sclass = nv17_gr_sclass;
-
-	nv_engine(priv)->tile_prog = nv10_gr_tile_prog;
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static void
-nv10_gr_dtor(struct nvkm_object *object)
-{
-	struct nv10_gr_priv *priv = (void *)object;
-	nvkm_gr_destroy(&priv->base);
-}
-
-static int
-nv10_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nv10_gr_priv *priv = (void *)engine;
-	int ret, i;
-
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	/* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
-
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17) {
-		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
-		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
-		nv_wr32(priv, 0x400838, 0x002f8684);
-		nv_wr32(priv, 0x40083c, 0x00115f3f);
-		nv_wr32(priv, 0x4006b0, 0x40000020);
+	if (device->card_type >= NV_11 && device->chipset >= 0x17) {
+		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+		nvkm_wr32(device, 0x400a10, 0x03ff3fb6);
+		nvkm_wr32(device, 0x400838, 0x002f8684);
+		nvkm_wr32(device, 0x40083c, 0x00115f3f);
+		nvkm_wr32(device, 0x4006b0, 0x40000020);
 	} else {
-		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+		nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+	nvkm_wr32(device, NV10_PGRAPH_STATE, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
-	nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
-
-	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+	nvkm_mask(device, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
 	return 0;
 }
 
-static int
-nv10_gr_fini(struct nvkm_object *object, bool suspend)
+int
+nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
 {
-	struct nv10_gr_priv *priv = (void *)object;
-	return nvkm_gr_fini(&priv->base, suspend);
+	struct nv10_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
 }
 
-struct nvkm_oclass
-nv10_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_gr_ctor,
-		.dtor = nv10_gr_dtor,
-		.init = nv10_gr_init,
-		.fini = nv10_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv10_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
+		{}
+	}
 };
+
+int
+nv10_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv10_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
new file mode 100644
index 0000000..d7c3d86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -0,0 +1,13 @@
+#ifndef __NV10_GR_H__
+#define __NV10_GR_H__
+#include "priv.h"
+
+int nv10_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv10_gr_init(struct nvkm_gr *);
+void nv10_gr_intr(struct nvkm_gr *);
+void nv10_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
+
+int nv10_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
new file mode 100644
index 0000000..3e2c685
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv15.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv15_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{}
+	}
+};
+
+int
+nv15_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv15_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
new file mode 100644
index 0000000..12437d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv17.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragr) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "nv10.h"
+
+static const struct nvkm_gr_func
+nv17_gr = {
+	.init = nv10_gr_init,
+	.intr = nv10_gr_intr,
+	.tile = nv10_gr_tile,
+	.chan_new = nv10_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
+		{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
+		{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
+		{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
+		{ -1, -1, 0x0099, &nv04_gr_object },
+		{}
+	}
+};
+
+int
+nv17_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv10_gr_new_(&nv17_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 1713ffb..5caef65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -2,375 +2,374 @@
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv20_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
-	{ 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
-	{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{},
-};
-
-/*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
-static int
-nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+int
+nv20_gr_chan_init(struct nvkm_object *object)
 {
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	struct nv20_gr *gr = chan->gr;
+	u32 inst = nvkm_memory_addr(chan->inst);
+
+	nvkm_kmap(gr->ctxtab);
+	nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
+	nvkm_done(gr->ctxtab);
+	return 0;
+}
+
+int
+nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	struct nv20_gr *gr = chan->gr;
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	u32 inst = nvkm_memory_addr(chan->inst);
+	int chid = -1;
+
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
+	if (nvkm_rd32(device, 0x400144) & 0x00010000)
+		chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
+	if (chan->chid == chid) {
+		nvkm_wr32(device, 0x400784, inst >> 4);
+		nvkm_wr32(device, 0x400788, 0x00000002);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
+		nvkm_wr32(device, 0x400144, 0x10000000);
+		nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
+	}
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
+
+	nvkm_kmap(gr->ctxtab);
+	nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
+	nvkm_done(gr->ctxtab);
+	return 0;
+}
+
+void *
+nv20_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv20_gr_chan *chan = nv20_gr_chan(object);
+	nvkm_memory_del(&chan->inst);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv20_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
+static int
+nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x033c, 0xffff0000);
-	nv_wo32(chan, 0x03a0, 0x0fff0000);
-	nv_wo32(chan, 0x03a4, 0x0fff0000);
-	nv_wo32(chan, 0x047c, 0x00000101);
-	nv_wo32(chan, 0x0490, 0x00000111);
-	nv_wo32(chan, 0x04a8, 0x44400000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
 	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05a4, 0x4b7fffff);
-	nv_wo32(chan, 0x05fc, 0x00000001);
-	nv_wo32(chan, 0x0604, 0x00004000);
-	nv_wo32(chan, 0x0610, 0x00000001);
-	nv_wo32(chan, 0x0618, 0x00040000);
-	nv_wo32(chan, 0x061c, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+	nvkm_wo32(chan->inst, 0x061c, 0x00010000);
 	for (i = 0x1c1c; i <= 0x248c; i += 16) {
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x281c, 0x3f800000);
-	nv_wo32(chan, 0x2830, 0x3f800000);
-	nv_wo32(chan, 0x285c, 0x40000000);
-	nv_wo32(chan, 0x2860, 0x3f800000);
-	nv_wo32(chan, 0x2864, 0x3f000000);
-	nv_wo32(chan, 0x286c, 0x40000000);
-	nv_wo32(chan, 0x2870, 0x3f800000);
-	nv_wo32(chan, 0x2878, 0xbf800000);
-	nv_wo32(chan, 0x2880, 0xbf800000);
-	nv_wo32(chan, 0x34a4, 0x000fe000);
-	nv_wo32(chan, 0x3530, 0x000003f8);
-	nv_wo32(chan, 0x3540, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x285c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x286c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
 	for (i = 0x355c; i <= 0x3578; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-int
-nv20_gr_context_init(struct nvkm_object *object)
-{
-	struct nv20_gr_priv *priv = (void *)object->engine;
-	struct nv20_gr_chan *chan = (void *)object;
-	int ret;
-
-	ret = nvkm_gr_context_init(&chan->base);
-	if (ret)
-		return ret;
-
-	nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
-	return 0;
-}
-
-int
-nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nv20_gr_priv *priv = (void *)object->engine;
-	struct nv20_gr_chan *chan = (void *)object;
-	int chid = -1;
-
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x400144) & 0x00010000)
-		chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
-	if (chan->chid == chid) {
-		nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
-		nv_wr32(priv, 0x400788, 0x00000002);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
-		nv_wr32(priv, 0x400144, 0x10000000);
-		nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
-	}
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
-
-	nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
-	return nvkm_gr_context_fini(&chan->base, suspend);
-}
-
-static struct nvkm_oclass
-nv20_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x20),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 void
-nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv20_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv20_gr_priv *priv = (void *)engine;
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-	nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-	nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+	nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
 
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->limit);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->pitch);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->addr);
 
-	if (nv_device(engine)->chipset != 0x34) {
-		nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	if (device->chipset != 0x34) {
+		nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, tile->zcomp);
 	}
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
 void
-nv20_gr_intr(struct nvkm_subdev *subdev)
+nv20_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct nv20_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 chid = (addr & 0x01f00000) >> 20;
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xfff;
 	u32 show = stat;
+	char msg[128], src[128], sta[128];
+	unsigned long flags;
 
-	engctx = nvkm_engctx_get(engine, chid);
-	if (stat & NV_PGRAPH_INTR_ERROR) {
-		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-			handle = nvkm_handle_get_class(engctx, class);
-			if (handle && !nv_call(handle->object, mthd, data))
-				show &= ~NV_PGRAPH_INTR_ERROR;
-			nvkm_handle_put(handle);
-		}
-	}
+	chan = nvkm_fifo_chan_chid(device->fifo, chid, &flags);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, nvkm_client_name(engctx), subc, class, mthd,
-			 data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta, chid,
+			   chan ? chan->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
-}
-
-static int
-nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv20_gr_cclass;
-	nv_engine(priv)->sclass = nv20_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-void
-nv20_gr_dtor(struct nvkm_object *object)
-{
-	struct nv20_gr_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ctxtab);
-	nvkm_gr_destroy(&priv->base);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
 int
-nv20_gr_init(struct nvkm_object *object)
+nv20_gr_oneinit(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv20_gr_priv *priv = (void *)engine;
-	struct nvkm_fb *pfb = nvkm_fb(object);
+	struct nv20_gr *gr = nv20_gr(base);
+	return nvkm_memory_new(gr->base.engine.subdev.device,
+			       NVKM_MEM_TARGET_INST, 32 * 4, 16,
+			       true, &gr->ctxtab);
+}
+
+int
+nv20_gr_init(struct nvkm_gr *base)
+{
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	u32 tmp, vramsz;
-	int ret, i;
+	int i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			  nvkm_memory_addr(gr->ctxtab) >> 4);
 
-	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
-
-	if (nv_device(priv)->chipset == 0x20) {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+	if (device->chipset == 0x20) {
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
 		for (i = 0; i < 15; i++)
-			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+			nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
 	} else {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
 		for (i = 0; i < 32; i++)
-			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
-		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+			nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nvkm_msec(device, 2000,
+			if (!nvkm_rd32(device, 0x400700))
+				break;
+		);
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
-	nv_wr32(priv, 0x40009C           , 0x00000040);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	nvkm_wr32(device, 0x40009C           , 0x00000040);
 
-	if (nv_device(priv)->chipset >= 0x25) {
-		nv_wr32(priv, 0x400890, 0x00a8cfff);
-		nv_wr32(priv, 0x400610, 0x304B1FB6);
-		nv_wr32(priv, 0x400B80, 0x1cbd3883);
-		nv_wr32(priv, 0x400B84, 0x44000000);
-		nv_wr32(priv, 0x400098, 0x40000080);
-		nv_wr32(priv, 0x400B88, 0x000000ff);
+	if (device->chipset >= 0x25) {
+		nvkm_wr32(device, 0x400890, 0x00a8cfff);
+		nvkm_wr32(device, 0x400610, 0x304B1FB6);
+		nvkm_wr32(device, 0x400B80, 0x1cbd3883);
+		nvkm_wr32(device, 0x400B84, 0x44000000);
+		nvkm_wr32(device, 0x400098, 0x40000080);
+		nvkm_wr32(device, 0x400B88, 0x000000ff);
 
 	} else {
-		nv_wr32(priv, 0x400880, 0x0008c7df);
-		nv_wr32(priv, 0x400094, 0x00000005);
-		nv_wr32(priv, 0x400B80, 0x45eae20e);
-		nv_wr32(priv, 0x400B84, 0x24000000);
-		nv_wr32(priv, 0x400098, 0x00000040);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nvkm_wr32(device, 0x400880, 0x0008c7df);
+		nvkm_wr32(device, 0x400094, 0x00000005);
+		nvkm_wr32(device, 0x400B80, 0x45eae20e);
+		nvkm_wr32(device, 0x400B84, 0x24000000);
+		nvkm_wr32(device, 0x400098, 0x00000040);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000030);
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x4009a0, nvkm_rd32(device, 0x100324));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, nvkm_rd32(device, 0x100324));
 
-	nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-
-	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
-	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
-	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
-	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+	nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nvkm_rd32(device, NV10_PGRAPH_SURFACE) | 0x00020100;
+	nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
 
 	/* begin RAM config */
-	vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
-	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
-	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
-	nv_wr32(priv, 0x400820, 0);
-	nv_wr32(priv, 0x400824, 0);
-	nv_wr32(priv, 0x400864, vramsz - 1);
-	nv_wr32(priv, 0x400868, vramsz - 1);
+	vramsz = device->func->resource_size(device, 1) - 1;
+	nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+	nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , nvkm_rd32(device, 0x100204));
+	nvkm_wr32(device, 0x400820, 0);
+	nvkm_wr32(device, 0x400824, 0);
+	nvkm_wr32(device, 0x400864, vramsz - 1);
+	nvkm_wr32(device, 0x400868, vramsz - 1);
 
 	/* interesting.. the below overwrites some of the tile setup above.. */
-	nv_wr32(priv, 0x400B20, 0x00000000);
-	nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+	nvkm_wr32(device, 0x400B20, 0x00000000);
+	nvkm_wr32(device, 0x400B04, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
-	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+	nvkm_wr32(device, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
 	return 0;
 }
 
-struct nvkm_oclass
-nv20_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x20),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+void *
+nv20_gr_dtor(struct nvkm_gr *base)
+{
+	struct nv20_gr *gr = nv20_gr(base);
+	nvkm_memory_del(&gr->ctxtab);
+	return gr;
+}
+
+int
+nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv20_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv20_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv20_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{}
+	}
 };
+
+int
+nv20_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv20_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ac4dc04..cdf4501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,26 +1,33 @@
 #ifndef __NV20_GR_H__
 #define __NV20_GR_H__
-#include <engine/gr.h>
+#define nv20_gr(p) container_of((p), struct nv20_gr, base)
+#include "priv.h"
 
-struct nv20_gr_priv {
+struct nv20_gr {
 	struct nvkm_gr base;
-	struct nvkm_gpuobj *ctxtab;
+	struct nvkm_memory *ctxtab;
 };
 
+int nv20_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *,
+		 int, struct nvkm_gr **);
+void *nv20_gr_dtor(struct nvkm_gr *);
+int nv20_gr_oneinit(struct nvkm_gr *);
+int nv20_gr_init(struct nvkm_gr *);
+void nv20_gr_intr(struct nvkm_gr *);
+void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
+
+int nv30_gr_init(struct nvkm_gr *);
+
+#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
+
 struct nv20_gr_chan {
-	struct nvkm_gr_chan base;
+	struct nvkm_object object;
+	struct nv20_gr *gr;
 	int chid;
+	struct nvkm_memory *inst;
 };
 
-extern struct nvkm_oclass nv25_gr_sclass[];
-int  nv20_gr_context_init(struct nvkm_object *);
-int  nv20_gr_context_fini(struct nvkm_object *, bool);
-
-void nv20_gr_tile_prog(struct nvkm_engine *, int);
-void nv20_gr_intr(struct nvkm_subdev *);
-
-void nv20_gr_dtor(struct nvkm_object *);
-int  nv20_gr_init(struct nvkm_object *);
-
-int  nv30_gr_init(struct nvkm_object *);
+void *nv20_gr_chan_dtor(struct nvkm_object *);
+int nv20_gr_chan_init(struct nvkm_object *);
+int nv20_gr_chan_fini(struct nvkm_object *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index bc36251..6c4a008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,158 +1,134 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-
-/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-struct nvkm_oclass
-nv25_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
-	{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
-	{},
-};
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv25_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x3724, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x035c, 0xffff0000);
-	nv_wo32(chan, 0x03c0, 0x0fff0000);
-	nv_wo32(chan, 0x03c4, 0x0fff0000);
-	nv_wo32(chan, 0x049c, 0x00000101);
-	nv_wo32(chan, 0x04b0, 0x00000111);
-	nv_wo32(chan, 0x04c8, 0x00000080);
-	nv_wo32(chan, 0x04cc, 0xffff0000);
-	nv_wo32(chan, 0x04d0, 0x00000001);
-	nv_wo32(chan, 0x04e4, 0x44400000);
-	nv_wo32(chan, 0x04fc, 0x4b800000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x049c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
+	nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
+	nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
+	nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
 	for (i = 0x0510; i <= 0x051c; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x0530; i <= 0x053c; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x0548; i <= 0x0554; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0558; i <= 0x0564; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x0568; i <= 0x0574; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x0598; i <= 0x05d4; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05e0, 0x4b7fffff);
-	nv_wo32(chan, 0x0620, 0x00000080);
-	nv_wo32(chan, 0x0624, 0x30201000);
-	nv_wo32(chan, 0x0628, 0x70605040);
-	nv_wo32(chan, 0x062c, 0xb0a09080);
-	nv_wo32(chan, 0x0630, 0xf0e0d0c0);
-	nv_wo32(chan, 0x0664, 0x00000001);
-	nv_wo32(chan, 0x066c, 0x00004000);
-	nv_wo32(chan, 0x0678, 0x00000001);
-	nv_wo32(chan, 0x0680, 0x00040000);
-	nv_wo32(chan, 0x0684, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x0620, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0624, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0628, 0x70605040);
+	nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
+	nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
+	nvkm_wo32(chan->inst, 0x0664, 0x00000001);
+	nvkm_wo32(chan->inst, 0x066c, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0678, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0680, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0684, 0x00010000);
 	for (i = 0x1b04; i <= 0x2374; i += 16) {
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x2704, 0x3f800000);
-	nv_wo32(chan, 0x2718, 0x3f800000);
-	nv_wo32(chan, 0x2744, 0x40000000);
-	nv_wo32(chan, 0x2748, 0x3f800000);
-	nv_wo32(chan, 0x274c, 0x3f000000);
-	nv_wo32(chan, 0x2754, 0x40000000);
-	nv_wo32(chan, 0x2758, 0x3f800000);
-	nv_wo32(chan, 0x2760, 0xbf800000);
-	nv_wo32(chan, 0x2768, 0xbf800000);
-	nv_wo32(chan, 0x308c, 0x000fe000);
-	nv_wo32(chan, 0x3108, 0x000003f8);
-	nv_wo32(chan, 0x3468, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2744, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x2754, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
 	for (i = 0x3484; i <= 0x34a0; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv25_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x25),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv25_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv25_gr_cclass;
-	nv_engine(priv)->sclass = nv25_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv25_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x25),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv25_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv25_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv25_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+		{}
+	}
 };
+
+int
+nv25_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv25_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 22a5096..3cad26d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,125 +1,125 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv2a_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x033c, 0xffff0000);
-	nv_wo32(chan, 0x03a0, 0x0fff0000);
-	nv_wo32(chan, 0x03a4, 0x0fff0000);
-	nv_wo32(chan, 0x047c, 0x00000101);
-	nv_wo32(chan, 0x0490, 0x00000111);
-	nv_wo32(chan, 0x04a8, 0x44400000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x047c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0490, 0x00000111);
+	nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
 	for (i = 0x04d4; i <= 0x04e0; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x04f4; i <= 0x0500; i += 4)
-		nv_wo32(chan, i, 0x00080000);
+		nvkm_wo32(chan->inst, i, 0x00080000);
 	for (i = 0x050c; i <= 0x0518; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x051c; i <= 0x0528; i += 4)
-		nv_wo32(chan, i, 0x000105b8);
+		nvkm_wo32(chan->inst, i, 0x000105b8);
 	for (i = 0x052c; i <= 0x0538; i += 4)
-		nv_wo32(chan, i, 0x00080008);
+		nvkm_wo32(chan->inst, i, 0x00080008);
 	for (i = 0x055c; i <= 0x0598; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x05a4, 0x4b7fffff);
-	nv_wo32(chan, 0x05fc, 0x00000001);
-	nv_wo32(chan, 0x0604, 0x00004000);
-	nv_wo32(chan, 0x0610, 0x00000001);
-	nv_wo32(chan, 0x0618, 0x00040000);
-	nv_wo32(chan, 0x061c, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0604, 0x00004000);
+	nvkm_wo32(chan->inst, 0x0610, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0618, 0x00040000);
+	nvkm_wo32(chan->inst, 0x061c, 0x00010000);
 	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
-		nv_wo32(chan, (i + 0), 0x10700ff9);
-		nv_wo32(chan, (i + 4), 0x0436086c);
-		nv_wo32(chan, (i + 8), 0x000c001b);
+		nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
+		nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
+		nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
 	}
-	nv_wo32(chan, 0x269c, 0x3f800000);
-	nv_wo32(chan, 0x26b0, 0x3f800000);
-	nv_wo32(chan, 0x26dc, 0x40000000);
-	nv_wo32(chan, 0x26e0, 0x3f800000);
-	nv_wo32(chan, 0x26e4, 0x3f000000);
-	nv_wo32(chan, 0x26ec, 0x40000000);
-	nv_wo32(chan, 0x26f0, 0x3f800000);
-	nv_wo32(chan, 0x26f8, 0xbf800000);
-	nv_wo32(chan, 0x2700, 0xbf800000);
-	nv_wo32(chan, 0x3024, 0x000fe000);
-	nv_wo32(chan, 0x30a0, 0x000003f8);
-	nv_wo32(chan, 0x33fc, 0x002fe000);
+	nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
+	nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
+	nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
+	nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
+	nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
 	for (i = 0x341c; i <= 0x3438; i += 4)
-		nv_wo32(chan, i, 0x001c527c);
+		nvkm_wo32(chan->inst, i, 0x001c527c);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv2a_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x2a),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv2a_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv2a_gr_cclass;
-	nv_engine(priv)->sclass = nv25_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv2a_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x2a),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv2a_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv20_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv2a_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv20_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv2a_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
+		{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
+		{}
+	}
 };
+
+int
+nv2a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv2a_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index dcc84eb..69de8c62 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,231 +1,198 @@
 #include "nv20.h"
 #include "regs.h"
 
-#include <core/device.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
+#include <engine/fifo/chan.h>
 #include <subdev/fb.h>
 
 /*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv30_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
-};
-
-/*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv30_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x0410, 0x00000101);
-	nv_wo32(chan, 0x0424, 0x00000111);
-	nv_wo32(chan, 0x0428, 0x00000060);
-	nv_wo32(chan, 0x0444, 0x00000080);
-	nv_wo32(chan, 0x0448, 0xffff0000);
-	nv_wo32(chan, 0x044c, 0x00000001);
-	nv_wo32(chan, 0x0460, 0x44400000);
-	nv_wo32(chan, 0x048c, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x0410, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0428, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0444, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x044c, 0x00000001);
+	nvkm_wo32(chan->inst, 0x0460, 0x44400000);
+	nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
 	for (i = 0x04e0; i < 0x04e8; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04ec, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
 	for (i = 0x0508; i < 0x0548; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x0550, 0x4b7fffff);
-	nv_wo32(chan, 0x058c, 0x00000080);
-	nv_wo32(chan, 0x0590, 0x30201000);
-	nv_wo32(chan, 0x0594, 0x70605040);
-	nv_wo32(chan, 0x0598, 0xb8a89888);
-	nv_wo32(chan, 0x059c, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05b0, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x058c, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0590, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0594, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
 	for (i = 0x0600; i < 0x0640; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0640; i < 0x0680; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06c0; i < 0x0700; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x0700; i < 0x0740; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0740; i < 0x0780; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x085c, 0x00040000);
-	nv_wo32(chan, 0x0860, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x085c, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0860, 0x00010000);
 	for (i = 0x0864; i < 0x0874; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 1, 0x0436086c);
-		nv_wo32(chan, i + 2, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
 	}
 	for (i = 0x30b8; i < 0x30c8; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x344c, 0x3f800000);
-	nv_wo32(chan, 0x3808, 0x3f800000);
-	nv_wo32(chan, 0x381c, 0x3f800000);
-	nv_wo32(chan, 0x3848, 0x40000000);
-	nv_wo32(chan, 0x384c, 0x3f800000);
-	nv_wo32(chan, 0x3850, 0x3f000000);
-	nv_wo32(chan, 0x3858, 0x40000000);
-	nv_wo32(chan, 0x385c, 0x3f800000);
-	nv_wo32(chan, 0x3864, 0xbf800000);
-	nv_wo32(chan, 0x386c, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3848, 0x40000000);
+	nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x3858, 0x40000000);
+	nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv30_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x30),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv30_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv30_gr_cclass;
-	nv_engine(priv)->sclass = nv30_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
 int
-nv30_gr_init(struct nvkm_object *object)
+nv30_gr_init(struct nvkm_gr *base)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv20_gr_priv *priv = (void *)engine;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret, i;
+	struct nv20_gr *gr = nv20_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+			  nvkm_memory_addr(gr->ctxtab) >> 4);
 
-	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nvkm_wr32(device, 0x400890, 0x01b463ff);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+	nvkm_wr32(device, 0x400B80, 0x1003d888);
+	nvkm_wr32(device, 0x400B84, 0x0c000000);
+	nvkm_wr32(device, 0x400098, 0x00000000);
+	nvkm_wr32(device, 0x40009C, 0x0005ad00);
+	nvkm_wr32(device, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+	nvkm_wr32(device, 0x4000a0, 0x00000000);
+	nvkm_wr32(device, 0x4000a4, 0x00000008);
+	nvkm_wr32(device, 0x4008a8, 0xb784a400);
+	nvkm_wr32(device, 0x400ba0, 0x002f8685);
+	nvkm_wr32(device, 0x400ba4, 0x00231f3f);
+	nvkm_wr32(device, 0x4008a4, 0x40000020);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(priv, 0x400890, 0x01b463ff);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
-	nv_wr32(priv, 0x400B80, 0x1003d888);
-	nv_wr32(priv, 0x400B84, 0x0c000000);
-	nv_wr32(priv, 0x400098, 0x00000000);
-	nv_wr32(priv, 0x40009C, 0x0005ad00);
-	nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
-	nv_wr32(priv, 0x4000a0, 0x00000000);
-	nv_wr32(priv, 0x4000a4, 0x00000008);
-	nv_wr32(priv, 0x4008a8, 0xb784a400);
-	nv_wr32(priv, 0x400ba0, 0x002f8685);
-	nv_wr32(priv, 0x400ba4, 0x00231f3f);
-	nv_wr32(priv, 0x4008a4, 0x40000020);
-
-	if (nv_device(priv)->chipset == 0x34) {
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
-		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
-		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+	if (device->chipset == 0x34) {
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00200201);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000008);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000032);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+		nvkm_wr32(device, NV10_PGRAPH_RDI_DATA , 0x00000002);
 	}
 
-	nv_wr32(priv, 0x4000c0, 0x00000016);
+	nvkm_wr32(device, 0x4000c0, 0x00000016);
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
-
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
-	nv_wr32(priv, 0x0040075c             , 0x00000001);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nvkm_wr32(device, 0x0040075c             , 0x00000001);
 
 	/* begin RAM config */
-	/* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
-	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-	if (nv_device(priv)->chipset != 0x34) {
-		nv_wr32(priv, 0x400750, 0x00EA0000);
-		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x400750, 0x00EA0004);
-		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+	/* vramsz = pci_resource_len(gr->dev->pdev, 1) - 1; */
+	nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+	nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+	if (device->chipset != 0x34) {
+		nvkm_wr32(device, 0x400750, 0x00EA0000);
+		nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x400750, 0x00EA0004);
+		nvkm_wr32(device, 0x400754, nvkm_rd32(device, 0x100204));
 	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-nv30_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x30),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv30_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv30_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv30_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv30_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv30_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 985b7f3..2207dac2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,159 +1,135 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-
-/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv34_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
-};
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv34_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x040c, 0x01000101);
-	nv_wo32(chan, 0x0420, 0x00000111);
-	nv_wo32(chan, 0x0424, 0x00000060);
-	nv_wo32(chan, 0x0440, 0x00000080);
-	nv_wo32(chan, 0x0444, 0xffff0000);
-	nv_wo32(chan, 0x0448, 0x00000001);
-	nv_wo32(chan, 0x045c, 0x44400000);
-	nv_wo32(chan, 0x0480, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x040c, 0x01000101);
+	nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+	nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+	nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
 	for (i = 0x04d4; i < 0x04dc; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04e0, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
 	for (i = 0x04fc; i < 0x053c; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x0544, 0x4b7fffff);
-	nv_wo32(chan, 0x057c, 0x00000080);
-	nv_wo32(chan, 0x0580, 0x30201000);
-	nv_wo32(chan, 0x0584, 0x70605040);
-	nv_wo32(chan, 0x0588, 0xb8a89888);
-	nv_wo32(chan, 0x058c, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05a0, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x057c, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0580, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0584, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
 	for (i = 0x05f0; i < 0x0630; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0630; i < 0x0670; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06b0; i < 0x06f0; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x06f0; i < 0x0730; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0730; i < 0x0770; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x0850, 0x00040000);
-	nv_wo32(chan, 0x0854, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x0850, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0854, 0x00010000);
 	for (i = 0x0858; i < 0x0868; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x15ac; i <= 0x271c ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 1, 0x0436086c);
-		nv_wo32(chan, i + 2, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
 	}
 	for (i = 0x274c; i < 0x275c; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x2ae0, 0x3f800000);
-	nv_wo32(chan, 0x2e9c, 0x3f800000);
-	nv_wo32(chan, 0x2eb0, 0x3f800000);
-	nv_wo32(chan, 0x2edc, 0x40000000);
-	nv_wo32(chan, 0x2ee0, 0x3f800000);
-	nv_wo32(chan, 0x2ee4, 0x3f000000);
-	nv_wo32(chan, 0x2eec, 0x40000000);
-	nv_wo32(chan, 0x2ef0, 0x3f800000);
-	nv_wo32(chan, 0x2ef8, 0xbf800000);
-	nv_wo32(chan, 0x2f00, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
+	nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv34_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x34),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv34_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv34_gr_cclass;
-	nv_engine(priv)->sclass = nv34_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv34_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x34),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv34_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv34_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv34_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv34_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv34_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 707625f..740df0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,159 +1,135 @@
 #include "nv20.h"
 #include "regs.h"
 
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-
-/*******************************************************************************
- * Graphics object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-nv35_gr_sclass[] = {
-	{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
-	{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
-	{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
-	{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
-	{ 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
-	{},
-};
+#include <engine/fifo/chan.h>
 
 /*******************************************************************************
  * PGRAPH context
  ******************************************************************************/
 
+static const struct nvkm_object_func
+nv35_gr_chan = {
+	.dtor = nv20_gr_chan_dtor,
+	.init = nv20_gr_chan_init,
+	.fini = nv20_gr_chan_fini,
+};
+
 static int
-nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
 {
+	struct nv20_gr *gr = nv20_gr(base);
 	struct nv20_gr_chan *chan;
 	int ret, i;
 
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	chan->chid = fifoch->chid;
+	*pobject = &chan->object;
+
+	ret = nvkm_memory_new(gr->base.engine.subdev.device,
+			      NVKM_MEM_TARGET_INST, 0x577c, 16, true,
+			      &chan->inst);
 	if (ret)
 		return ret;
 
-	chan->chid = nvkm_fifo_chan(parent)->chid;
-
-	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
-	nv_wo32(chan, 0x040c, 0x00000101);
-	nv_wo32(chan, 0x0420, 0x00000111);
-	nv_wo32(chan, 0x0424, 0x00000060);
-	nv_wo32(chan, 0x0440, 0x00000080);
-	nv_wo32(chan, 0x0444, 0xffff0000);
-	nv_wo32(chan, 0x0448, 0x00000001);
-	nv_wo32(chan, 0x045c, 0x44400000);
-	nv_wo32(chan, 0x0488, 0xffff0000);
+	nvkm_kmap(chan->inst);
+	nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
+	nvkm_wo32(chan->inst, 0x040c, 0x00000101);
+	nvkm_wo32(chan->inst, 0x0420, 0x00000111);
+	nvkm_wo32(chan->inst, 0x0424, 0x00000060);
+	nvkm_wo32(chan->inst, 0x0440, 0x00000080);
+	nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
+	nvkm_wo32(chan->inst, 0x0448, 0x00000001);
+	nvkm_wo32(chan->inst, 0x045c, 0x44400000);
+	nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
 	for (i = 0x04dc; i < 0x04e4; i += 4)
-		nv_wo32(chan, i, 0x0fff0000);
-	nv_wo32(chan, 0x04e8, 0x00011100);
+		nvkm_wo32(chan->inst, i, 0x0fff0000);
+	nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
 	for (i = 0x0504; i < 0x0544; i += 4)
-		nv_wo32(chan, i, 0x07ff0000);
-	nv_wo32(chan, 0x054c, 0x4b7fffff);
-	nv_wo32(chan, 0x0588, 0x00000080);
-	nv_wo32(chan, 0x058c, 0x30201000);
-	nv_wo32(chan, 0x0590, 0x70605040);
-	nv_wo32(chan, 0x0594, 0xb8a89888);
-	nv_wo32(chan, 0x0598, 0xf8e8d8c8);
-	nv_wo32(chan, 0x05ac, 0xb0000000);
+		nvkm_wo32(chan->inst, i, 0x07ff0000);
+	nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
+	nvkm_wo32(chan->inst, 0x0588, 0x00000080);
+	nvkm_wo32(chan->inst, 0x058c, 0x30201000);
+	nvkm_wo32(chan->inst, 0x0590, 0x70605040);
+	nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
+	nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
+	nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
 	for (i = 0x0604; i < 0x0644; i += 4)
-		nv_wo32(chan, i, 0x00010588);
+		nvkm_wo32(chan->inst, i, 0x00010588);
 	for (i = 0x0644; i < 0x0684; i += 4)
-		nv_wo32(chan, i, 0x00030303);
+		nvkm_wo32(chan->inst, i, 0x00030303);
 	for (i = 0x06c4; i < 0x0704; i += 4)
-		nv_wo32(chan, i, 0x0008aae4);
+		nvkm_wo32(chan->inst, i, 0x0008aae4);
 	for (i = 0x0704; i < 0x0744; i += 4)
-		nv_wo32(chan, i, 0x01012000);
+		nvkm_wo32(chan->inst, i, 0x01012000);
 	for (i = 0x0744; i < 0x0784; i += 4)
-		nv_wo32(chan, i, 0x00080008);
-	nv_wo32(chan, 0x0860, 0x00040000);
-	nv_wo32(chan, 0x0864, 0x00010000);
+		nvkm_wo32(chan->inst, i, 0x00080008);
+	nvkm_wo32(chan->inst, 0x0860, 0x00040000);
+	nvkm_wo32(chan->inst, 0x0864, 0x00010000);
 	for (i = 0x0868; i < 0x0878; i += 4)
-		nv_wo32(chan, i, 0x00040004);
+		nvkm_wo32(chan->inst, i, 0x00040004);
 	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
-		nv_wo32(chan, i + 0, 0x10700ff9);
-		nv_wo32(chan, i + 4, 0x0436086c);
-		nv_wo32(chan, i + 8, 0x000c001b);
+		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+		nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+		nvkm_wo32(chan->inst, i + 8, 0x000c001b);
 	}
 	for (i = 0x30bc; i < 0x30cc; i += 4)
-		nv_wo32(chan, i, 0x0000ffff);
-	nv_wo32(chan, 0x3450, 0x3f800000);
-	nv_wo32(chan, 0x380c, 0x3f800000);
-	nv_wo32(chan, 0x3820, 0x3f800000);
-	nv_wo32(chan, 0x384c, 0x40000000);
-	nv_wo32(chan, 0x3850, 0x3f800000);
-	nv_wo32(chan, 0x3854, 0x3f000000);
-	nv_wo32(chan, 0x385c, 0x40000000);
-	nv_wo32(chan, 0x3860, 0x3f800000);
-	nv_wo32(chan, 0x3868, 0xbf800000);
-	nv_wo32(chan, 0x3870, 0xbf800000);
+		nvkm_wo32(chan->inst, i, 0x0000ffff);
+	nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x384c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
+	nvkm_wo32(chan->inst, 0x385c, 0x40000000);
+	nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
+	nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
+	nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
+	nvkm_done(chan->inst);
 	return 0;
 }
 
-static struct nvkm_oclass
-nv35_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x35),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv35_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = nv20_gr_context_init,
-		.fini = nv20_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
-};
-
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static int
-nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv20_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv20_gr_intr;
-	nv_engine(priv)->cclass = &nv35_gr_cclass;
-	nv_engine(priv)->sclass = nv35_gr_sclass;
-	nv_engine(priv)->tile_prog = nv20_gr_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv35_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x35),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv35_gr_ctor,
-		.dtor = nv20_gr_dtor,
-		.init = nv30_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+static const struct nvkm_gr_func
+nv35_gr = {
+	.dtor = nv20_gr_dtor,
+	.oneinit = nv20_gr_oneinit,
+	.init = nv30_gr_init,
+	.intr = nv20_gr_intr,
+	.tile = nv20_gr_tile,
+	.chan_new = nv35_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
+		{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
+		{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
+		{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
+		{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
+		{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
+		{}
+	}
 };
+
+int
+nv35_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv20_gr_new_(&nv35_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 7e19379..ffa902e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -25,26 +25,15 @@
 #include "regs.h"
 
 #include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 #include <engine/fifo.h>
 
-struct nv40_gr_priv {
-	struct nvkm_gr base;
-	u32 size;
-};
-
-struct nv40_gr_chan {
-	struct nvkm_gr_chan base;
-};
-
-static u64
+u64
 nv40_gr_units(struct nvkm_gr *gr)
 {
-	struct nv40_gr_priv *priv = (void *)gr;
-
-	return nv_rd32(priv, 0x1540);
+	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
 }
 
 /*******************************************************************************
@@ -52,80 +41,29 @@
  ******************************************************************************/
 
 static int
-nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 20, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
 #ifdef __BIG_ENDIAN
-	nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+		nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
 #endif
-	nv_wo32(obj, 0x0c, 0x00000000);
-	nv_wo32(obj, 0x10, 0x00000000);
-	return 0;
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-nv40_gr_ofuncs = {
-	.ctor = nv40_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv40_gr_sclass[] = {
-	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
-	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
-	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
-	{ 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
-	{},
-};
-
-static struct nvkm_oclass
-nv44_gr_sclass[] = {
-	{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
-	{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
-	{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
-	{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
-	{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
-	{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
-	{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
-	{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
-	{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
-	{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
-	{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
-	{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
-	{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
-	{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
-	{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
-	{ 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
-	{},
+const struct nvkm_object_func
+nv40_gr_object = {
+	.bind = nv40_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -133,361 +71,334 @@
  ******************************************************************************/
 
 static int
-nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv40_gr_priv *priv = (void *)engine;
-	struct nv40_gr_chan *chan;
-	int ret;
-
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
-				     16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
-	nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
-	return 0;
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	struct nv40_gr *gr = chan->gr;
+	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		chan->inst = (*pgpuobj)->addr;
+		nvkm_kmap(*pgpuobj);
+		nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
 static int
-nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
+nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
 {
-	struct nv40_gr_priv *priv = (void *)object->engine;
-	struct nv40_gr_chan *chan = (void *)object;
-	u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	struct nv40_gr *gr = chan->gr;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 inst = 0x01000000 | chan->inst >> 4;
 	int ret = 0;
 
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
 
-	if (nv_rd32(priv, 0x40032c) == inst) {
+	if (nvkm_rd32(device, 0x40032c) == inst) {
 		if (suspend) {
-			nv_wr32(priv, 0x400720, 0x00000000);
-			nv_wr32(priv, 0x400784, inst);
-			nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
-			nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
-			if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
-				u32 insn = nv_rd32(priv, 0x400308);
-				nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+			nvkm_wr32(device, 0x400720, 0x00000000);
+			nvkm_wr32(device, 0x400784, inst);
+			nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
+			nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
+			if (nvkm_msec(device, 2000,
+				if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
+					break;
+			) < 0) {
+				u32 insn = nvkm_rd32(device, 0x400308);
+				nvkm_warn(subdev, "ctxprog timeout %08x\n", insn);
 				ret = -EBUSY;
 			}
 		}
 
-		nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+		nvkm_mask(device, 0x40032c, 0x01000000, 0x00000000);
 	}
 
-	if (nv_rd32(priv, 0x400330) == inst)
-		nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+	if (nvkm_rd32(device, 0x400330) == inst)
+		nvkm_mask(device, 0x400330, 0x01000000, 0x00000000);
 
-	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x400720, 0x00000001, 0x00000001);
 	return ret;
 }
 
-static struct nvkm_oclass
-nv40_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = nv40_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+static void *
+nv40_gr_chan_dtor(struct nvkm_object *object)
+{
+	struct nv40_gr_chan *chan = nv40_gr_chan(object);
+	unsigned long flags;
+	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv40_gr_chan = {
+	.dtor = nv40_gr_chan_dtor,
+	.fini = nv40_gr_chan_fini,
+	.bind = nv40_gr_chan_bind,
 };
 
+int
+nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nv40_gr_chan *chan;
+	unsigned long flags;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
+	list_add(&chan->head, &gr->chan);
+	spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
+	return 0;
+}
+
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
 static void
-nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
+nv40_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nvkm_fifo *pfifo = nvkm_fifo(engine);
-	struct nv40_gr_priv *priv = (void *)engine;
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 	unsigned long flags;
 
-	pfifo->pause(pfifo, &flags);
-	nv04_gr_idle(priv);
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 	case 0x41:
 	case 0x42:
 	case 0x43:
 	case 0x45:
-	case 0x4e:
-		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
-		switch (nv_device(priv)->chipset) {
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (device->chipset) {
 		case 0x40:
 		case 0x45:
-			nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
-			nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+			nvkm_wr32(device, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+			nvkm_wr32(device, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
 			break;
 		case 0x41:
 		case 0x42:
 		case 0x43:
-			nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
-			nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+			nvkm_wr32(device, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nvkm_wr32(device, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
 			break;
 		default:
 			break;
 		}
 		break;
-	case 0x44:
-	case 0x4a:
-		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
-		break;
-	case 0x46:
-	case 0x4c:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-	case 0x63:
-	case 0x67:
-	case 0x68:
-		nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
-		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
-		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
-		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
-		switch (nv_device(priv)->chipset) {
-		case 0x47:
-		case 0x49:
-		case 0x4b:
-			nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
-			nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
-			break;
-		default:
-			break;
-		}
+		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		nvkm_wr32(device, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+		nvkm_wr32(device, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
 		break;
 	default:
+		WARN_ON(1);
 		break;
 	}
 
-	pfifo->start(pfifo, &flags);
+	nvkm_fifo_start(fifo, &flags);
 }
 
-static void
-nv40_gr_intr(struct nvkm_subdev *subdev)
+void
+nv40_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle = NULL;
-	struct nv40_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
-	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
-	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
-	u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
-	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nv40_gr_chan *temp, *chan = NULL;
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV03_PGRAPH_INTR);
+	u32 nsource = nvkm_rd32(device, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nvkm_rd32(device, NV03_PGRAPH_NSTATUS);
+	u32 inst = nvkm_rd32(device, 0x40032c) & 0x000fffff;
+	u32 addr = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_ADDR);
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
-	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+	u32 data = nvkm_rd32(device, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nvkm_rd32(device, 0x400160 + subc * 4) & 0xffff;
 	u32 show = stat;
-	int chid;
+	char msg[128], src[128], sta[128];
+	unsigned long flags;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+	spin_lock_irqsave(&gr->base.engine.lock, flags);
+	list_for_each_entry(temp, &gr->chan, head) {
+		if (temp->inst >> 4 == inst) {
+			chan = temp;
+			list_del(&chan->head);
+			list_add(&chan->head, &gr->chan);
+			break;
+		}
+	}
 
 	if (stat & NV_PGRAPH_INTR_ERROR) {
-		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-			handle = nvkm_handle_get_class(engctx, class);
-			if (handle && !nv_call(handle->object, mthd, data))
-				show &= ~NV_PGRAPH_INTR_ERROR;
-			nvkm_handle_put(handle);
-		}
-
 		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-			nv_mask(priv, 0x402000, 0, 0);
+			nvkm_mask(device, 0x402000, 0, 0);
 		}
 	}
 
-	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
-	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+	nvkm_wr32(device, NV03_PGRAPH_INTR, stat);
+	nvkm_wr32(device, NV04_PGRAPH_FIFO, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "%s", "");
-		nvkm_bitfield_print(nv10_gr_intr_name, show);
-		pr_cont(" nsource:");
-		nvkm_bitfield_print(nv04_gr_nsource, nsource);
-		pr_cont(" nstatus:");
-		nvkm_bitfield_print(nv10_gr_nstatus, nstatus);
-		pr_cont("\n");
-		nv_error(priv,
-			 "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, inst << 4, nvkm_client_name(engctx), subc,
-			 class, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), nv10_gr_intr_name, show);
+		nvkm_snprintbf(src, sizeof(src), nv04_gr_nsource, nsource);
+		nvkm_snprintbf(sta, sizeof(sta), nv10_gr_nstatus, nstatus);
+		nvkm_error(subdev, "intr %08x [%s] nsource %08x [%s] "
+				   "nstatus %08x [%s] ch %d [%08x %s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   show, msg, nsource, src, nstatus, sta,
+			   chan ? chan->fifo->chid : -1, inst << 4,
+			   chan ? chan->fifo->object.client->name : "unknown",
+			   subc, class, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
+	spin_unlock_irqrestore(&gr->base.engine.lock, flags);
 }
 
-static int
-nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv40_gr_init(struct nvkm_gr *base)
 {
-	struct nv40_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00001000;
-	nv_subdev(priv)->intr = nv40_gr_intr;
-	nv_engine(priv)->cclass = &nv40_gr_cclass;
-	if (nv44_gr_class(priv))
-		nv_engine(priv)->sclass = nv44_gr_sclass;
-	else
-		nv_engine(priv)->sclass = nv40_gr_sclass;
-	nv_engine(priv)->tile_prog = nv40_gr_tile_prog;
-
-	priv->base.units = nv40_gr_units;
-	return 0;
-}
-
-static int
-nv40_gr_init(struct nvkm_object *object)
-{
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nv40_gr_priv *priv = (void *)engine;
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, i, j;
 	u32 vramsz;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
 	/* generate and upload context program */
-	ret = nv40_grctx_init(nv_device(priv), &priv->size);
+	ret = nv40_grctx_init(device, &gr->size);
 	if (ret)
 		return ret;
 
 	/* No context present currently */
-	nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+	nvkm_wr32(device, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 
-	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
-	nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+	nvkm_wr32(device, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nvkm_wr32(device, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
-	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
-	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
-	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nvkm_wr32(device, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+	nvkm_wr32(device, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nvkm_wr32(device, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
 
-	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nvkm_wr32(device, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nvkm_wr32(device, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 
-	j = nv_rd32(priv, 0x1540) & 0xff;
+	j = nvkm_rd32(device, 0x1540) & 0xff;
 	if (j) {
 		for (i = 0; !(j & 1); j >>= 1, i++)
 			;
-		nv_wr32(priv, 0x405000, i);
+		nvkm_wr32(device, 0x405000, i);
 	}
 
-	if (nv_device(priv)->chipset == 0x40) {
-		nv_wr32(priv, 0x4009b0, 0x83280fff);
-		nv_wr32(priv, 0x4009b4, 0x000000a0);
+	if (device->chipset == 0x40) {
+		nvkm_wr32(device, 0x4009b0, 0x83280fff);
+		nvkm_wr32(device, 0x4009b4, 0x000000a0);
 	} else {
-		nv_wr32(priv, 0x400820, 0x83280eff);
-		nv_wr32(priv, 0x400824, 0x000000a0);
+		nvkm_wr32(device, 0x400820, 0x83280eff);
+		nvkm_wr32(device, 0x400824, 0x000000a0);
 	}
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x40:
 	case 0x45:
-		nv_wr32(priv, 0x4009b8, 0x0078e366);
-		nv_wr32(priv, 0x4009bc, 0x0000014c);
+		nvkm_wr32(device, 0x4009b8, 0x0078e366);
+		nvkm_wr32(device, 0x4009bc, 0x0000014c);
 		break;
 	case 0x41:
 	case 0x42: /* pciid also 0x00Cx */
 	/* case 0x0120: XXX (pciid) */
-		nv_wr32(priv, 0x400828, 0x007596ff);
-		nv_wr32(priv, 0x40082c, 0x00000108);
+		nvkm_wr32(device, 0x400828, 0x007596ff);
+		nvkm_wr32(device, 0x40082c, 0x00000108);
 		break;
 	case 0x43:
-		nv_wr32(priv, 0x400828, 0x0072cb77);
-		nv_wr32(priv, 0x40082c, 0x00000108);
+		nvkm_wr32(device, 0x400828, 0x0072cb77);
+		nvkm_wr32(device, 0x40082c, 0x00000108);
 		break;
 	case 0x44:
 	case 0x46: /* G72 */
 	case 0x4a:
 	case 0x4c: /* G7x-based C51 */
 	case 0x4e:
-		nv_wr32(priv, 0x400860, 0);
-		nv_wr32(priv, 0x400864, 0);
+		nvkm_wr32(device, 0x400860, 0);
+		nvkm_wr32(device, 0x400864, 0);
 		break;
 	case 0x47: /* G70 */
 	case 0x49: /* G71 */
 	case 0x4b: /* G73 */
-		nv_wr32(priv, 0x400828, 0x07830610);
-		nv_wr32(priv, 0x40082c, 0x0000016A);
+		nvkm_wr32(device, 0x400828, 0x07830610);
+		nvkm_wr32(device, 0x40082c, 0x0000016A);
 		break;
 	default:
 		break;
 	}
 
-	nv_wr32(priv, 0x400b38, 0x2ffff800);
-	nv_wr32(priv, 0x400b3c, 0x00006000);
+	nvkm_wr32(device, 0x400b38, 0x2ffff800);
+	nvkm_wr32(device, 0x400b3c, 0x00006000);
 
 	/* Tiling related stuff. */
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x44:
 	case 0x4a:
-		nv_wr32(priv, 0x400bc4, 0x1003d888);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b500);
+		nvkm_wr32(device, 0x400bc4, 0x1003d888);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b500);
 		break;
 	case 0x46:
-		nv_wr32(priv, 0x400bc4, 0x0000e024);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b520);
+		nvkm_wr32(device, 0x400bc4, 0x0000e024);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b520);
 		break;
 	case 0x4c:
 	case 0x4e:
 	case 0x67:
-		nv_wr32(priv, 0x400bc4, 0x1003d888);
-		nv_wr32(priv, 0x400bbc, 0xb7a7b540);
+		nvkm_wr32(device, 0x400bc4, 0x1003d888);
+		nvkm_wr32(device, 0x400bbc, 0xb7a7b540);
 		break;
 	default:
 		break;
 	}
 
-	/* Turn all the tiling regions off. */
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
-
 	/* begin RAM config */
-	vramsz = nv_device_resource_len(nv_device(priv), 0) - 1;
-	switch (nv_device(priv)->chipset) {
+	vramsz = device->func->resource_size(device, 1) - 1;
+	switch (device->chipset) {
 	case 0x40:
-		nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x400820, 0);
-		nv_wr32(priv, 0x400824, 0);
-		nv_wr32(priv, 0x400864, vramsz);
-		nv_wr32(priv, 0x400868, vramsz);
+		nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x4069A4, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4069A8, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x400820, 0);
+		nvkm_wr32(device, 0x400824, 0);
+		nvkm_wr32(device, 0x400864, vramsz);
+		nvkm_wr32(device, 0x400868, vramsz);
 		break;
 	default:
-		switch (nv_device(priv)->chipset) {
+		switch (device->chipset) {
 		case 0x41:
 		case 0x42:
 		case 0x43:
@@ -495,33 +406,70 @@
 		case 0x4e:
 		case 0x44:
 		case 0x4a:
-			nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
-			nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
+			nvkm_wr32(device, 0x4009F0, nvkm_rd32(device, 0x100200));
+			nvkm_wr32(device, 0x4009F4, nvkm_rd32(device, 0x100204));
 			break;
 		default:
-			nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
-			nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
+			nvkm_wr32(device, 0x400DF0, nvkm_rd32(device, 0x100200));
+			nvkm_wr32(device, 0x400DF4, nvkm_rd32(device, 0x100204));
 			break;
 		}
-		nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
-		nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
-		nv_wr32(priv, 0x400840, 0);
-		nv_wr32(priv, 0x400844, 0);
-		nv_wr32(priv, 0x4008A0, vramsz);
-		nv_wr32(priv, 0x4008A4, vramsz);
+		nvkm_wr32(device, 0x4069F0, nvkm_rd32(device, 0x100200));
+		nvkm_wr32(device, 0x4069F4, nvkm_rd32(device, 0x100204));
+		nvkm_wr32(device, 0x400840, 0);
+		nvkm_wr32(device, 0x400844, 0);
+		nvkm_wr32(device, 0x4008A0, vramsz);
+		nvkm_wr32(device, 0x4008A4, vramsz);
 		break;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv40_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv40_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+int
+nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv40_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	*pgr = &gr->base;
+	INIT_LIST_HEAD(&gr->chan);
+
+	return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv40_gr = {
+	.init = nv40_gr_init,
+	.intr = nv40_gr_intr,
+	.tile = nv40_gr_tile,
+	.units = nv40_gr_units,
+	.chan_new = nv40_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+		{ -1, -1, 0x4097, &nv40_gr_object }, /* curie */
+		{}
+	}
 };
+
+int
+nv40_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv40_gr_new_(&nv40_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index d852bd6..2812ed1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,22 +1,45 @@
 #ifndef __NV40_GR_H__
 #define __NV40_GR_H__
-#include <engine/gr.h>
+#define nv40_gr(p) container_of((p), struct nv40_gr, base)
+#include "priv.h"
 
-#include <core/device.h>
-struct nvkm_gpuobj;
+struct nv40_gr {
+	struct nvkm_gr base;
+	u32 size;
+	struct list_head chan;
+};
+
+int nv40_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv40_gr_init(struct nvkm_gr *);
+void nv40_gr_intr(struct nvkm_gr *);
+u64 nv40_gr_units(struct nvkm_gr *);
+
+#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+
+struct nv40_gr_chan {
+	struct nvkm_object object;
+	struct nv40_gr *gr;
+	struct nvkm_fifo_chan *fifo;
+	u32 inst;
+	struct list_head head;
+};
+
+int nv40_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv40_gr_object;
 
 /* returns 1 if device is one of the nv4x using the 0x4497 object class,
  * helpful to determine a number of other hardware features
  */
 static inline int
-nv44_gr_class(void *priv)
+nv44_gr_class(struct nvkm_device *device)
 {
-	struct nvkm_device *device = nv_device(priv);
-
 	if ((device->chipset & 0xf0) == 0x60)
 		return 1;
 
-	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+	return !(0x0aaf & (1 << (device->chipset & 0x0f)));
 }
 
 int  nv40_grctx_init(struct nvkm_device *, u32 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
new file mode 100644
index 0000000..45ff802
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv44.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "nv40.h"
+#include "regs.h"
+
+#include <subdev/fb.h>
+#include <engine/fifo.h>
+
+static void
+nv44_gr_tile(struct nvkm_gr *base, int i, struct nvkm_fb_tile *tile)
+{
+	struct nv40_gr *gr = nv40_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
+	unsigned long flags;
+
+	nvkm_fifo_pause(fifo, &flags);
+	nv04_gr_idle(&gr->base);
+
+	switch (device->chipset) {
+	case 0x44:
+	case 0x4a:
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x4c:
+	case 0x63:
+	case 0x67:
+	case 0x68:
+		nvkm_wr32(device, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV47_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	case 0x4e:
+		nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nvkm_wr32(device, NV20_PGRAPH_TILE(i), tile->addr);
+		nvkm_wr32(device, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nvkm_wr32(device, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nvkm_wr32(device, NV40_PGRAPH_TILE1(i), tile->addr);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	nvkm_fifo_start(fifo, &flags);
+}
+
+static const struct nvkm_gr_func
+nv44_gr = {
+	.init = nv40_gr_init,
+	.intr = nv40_gr_intr,
+	.tile = nv44_gr_tile,
+	.units = nv40_gr_units,
+	.chan_new = nv40_gr_chan_new,
+	.sclass = {
+		{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
+		{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
+		{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
+		{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
+		{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
+		{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
+		{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
+		{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
+		{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
+		{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
+		{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
+		{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
+		{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
+		{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
+		{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
+		{ -1, -1, 0x4497, &nv40_gr_object }, /* curie */
+		{}
+	}
+};
+
+int
+nv44_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv40_gr_new_(&nv44_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index 270d7cd..b19b912 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -24,27 +24,13 @@
 #include "nv50.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
-#include <subdev/timer.h>
 
-struct nv50_gr_priv {
-	struct nvkm_gr base;
-	spinlock_t lock;
-	u32 size;
-};
-
-struct nv50_gr_chan {
-	struct nvkm_gr_chan base;
-};
-
-static u64
+u64
 nv50_gr_units(struct nvkm_gr *gr)
 {
-	struct nv50_gr_priv *priv = (void *)gr;
-
-	return nv_rd32(priv, 0x1540);
+	return nvkm_rd32(gr->engine.subdev.device, 0x1540);
 }
 
 /*******************************************************************************
@@ -52,86 +38,25 @@
  ******************************************************************************/
 
 static int
-nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 size,
-		    struct nvkm_object **pobject)
+nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
+				  align, false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_ofuncs
-nv50_gr_ofuncs = {
-	.ctor = nv50_gr_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x5097, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-g84_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8297, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gt200_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8397, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-gt215_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x8597, &nv50_gr_ofuncs },
-	{ 0x85c0, &nv50_gr_ofuncs },
-	{}
-};
-
-static struct nvkm_oclass
-mcp89_gr_sclass[] = {
-	{ 0x0030, &nv50_gr_ofuncs },
-	{ 0x502d, &nv50_gr_ofuncs },
-	{ 0x5039, &nv50_gr_ofuncs },
-	{ 0x50c0, &nv50_gr_ofuncs },
-	{ 0x85c0, &nv50_gr_ofuncs },
-	{ 0x8697, &nv50_gr_ofuncs },
-	{}
+const struct nvkm_object_func
+nv50_gr_object = {
+	.bind = nv50_gr_object_bind,
 };
 
 /*******************************************************************************
@@ -139,161 +64,44 @@
  ******************************************************************************/
 
 static int
-nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		  int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nv50_gr_priv *priv = (void *)engine;
-	struct nv50_gr_chan *chan;
-	int ret;
-
-	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
-				     0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
-	return 0;
+	struct nv50_gr *gr = nv50_gr_chan(object)->gr;
+	int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_oclass
-nv50_gr_cclass = {
-	.handle = NV_ENGCTX(GR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_gr_context_ctor,
-		.dtor = _nvkm_gr_context_dtor,
-		.init = _nvkm_gr_context_init,
-		.fini = _nvkm_gr_context_fini,
-		.rd32 = _nvkm_gr_context_rd32,
-		.wr32 = _nvkm_gr_context_wr32,
-	},
+static const struct nvkm_object_func
+nv50_gr_chan = {
+	.bind = nv50_gr_chan_bind,
 };
 
+int
+nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nv50_gr_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
+	chan->gr = gr;
+	*pobject = &chan->object;
+	return 0;
+}
+
 /*******************************************************************************
  * PGRAPH engine/subdev functions
  ******************************************************************************/
 
-static const struct nvkm_bitfield nv50_pgr_status[] = {
-	{ 0x00000001, "BUSY" }, /* set when any bit is set */
-	{ 0x00000002, "DISPATCH" },
-	{ 0x00000004, "UNK2" },
-	{ 0x00000008, "UNK3" },
-	{ 0x00000010, "UNK4" },
-	{ 0x00000020, "UNK5" },
-	{ 0x00000040, "M2MF" },
-	{ 0x00000080, "UNK7" },
-	{ 0x00000100, "CTXPROG" },
-	{ 0x00000200, "VFETCH" },
-	{ 0x00000400, "CCACHE_PREGEOM" },
-	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
-	{ 0x00001000, "VCLIP" },
-	{ 0x00002000, "RATTR_APLANE" },
-	{ 0x00004000, "TRAST" },
-	{ 0x00008000, "CLIPID" },
-	{ 0x00010000, "ZCULL" },
-	{ 0x00020000, "ENG2D" },
-	{ 0x00040000, "RMASK" },
-	{ 0x00080000, "TPC_RAST" },
-	{ 0x00100000, "TPC_PROP" },
-	{ 0x00200000, "TPC_TEX" },
-	{ 0x00400000, "TPC_GEOM" },
-	{ 0x00800000, "TPC_MP" },
-	{ 0x01000000, "ROP" },
-	{}
-};
-
-static const char *const nv50_pgr_vstatus_0[] = {
-	"VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
-	NULL
-};
-
-static const char *const nv50_pgr_vstatus_1[] = {
-	"TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
-};
-
-static const char *const nv50_pgr_vstatus_2[] = {
-	"RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
-	"ROP", NULL
-};
-
-static void
-nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
-		       const char *const units[], u32 status)
-{
-	int i;
-
-	nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
-
-	for (i = 0; units[i] && status; i++) {
-		if ((status & 7) == 1)
-			pr_cont(" %s", units[i]);
-		status >>= 3;
-	}
-	if (status)
-		pr_cont(" (invalid: 0x%x)", status);
-	pr_cont("\n");
-}
-
-static int
-g84_gr_tlb_flush(struct nvkm_engine *engine)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(engine);
-	struct nv50_gr_priv *priv = (void *)engine;
-	bool idle, timeout = false;
-	unsigned long flags;
-	u64 start;
-	u32 tmp;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
-
-	start = ptimer->read(ptimer);
-	do {
-		idle = true;
-
-		for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-
-		for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
-			if ((tmp & 7) == 1)
-				idle = false;
-		}
-	} while (!idle &&
-		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
-
-	if (timeout) {
-		nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
-
-		tmp = nv_rd32(priv, 0x400700);
-		nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
-		nvkm_bitfield_print(nv50_pgr_status, tmp);
-		pr_cont("\n");
-
-		nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
-				       nv_rd32(priv, 0x400380));
-		nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
-				       nv_rd32(priv, 0x400384));
-		nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
-				       nv_rd32(priv, 0x400388));
-	}
-
-
-	nv_wr32(priv, 0x100c80, 0x00000001);
-	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
-		nv_error(priv, "vm flush timeout\n");
-	nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return timeout ? -EBUSY : 0;
-}
-
 static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
 	{ 0x01, "STACK_UNDERFLOW" },
 	{ 0x02, "STACK_MISMATCH" },
@@ -427,157 +235,172 @@
 };
 
 static void
-nv50_priv_prop_trap(struct nv50_gr_priv *priv,
-		    u32 ustatus_addr, u32 ustatus, u32 tp)
+nv50_gr_prop_trap(struct nv50_gr *gr, u32 ustatus_addr, u32 ustatus, u32 tp)
 {
-	u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
-	u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
-	u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
-	u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
-	u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
-	u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
-	u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 e0c = nvkm_rd32(device, ustatus_addr + 0x04);
+	u32 e10 = nvkm_rd32(device, ustatus_addr + 0x08);
+	u32 e14 = nvkm_rd32(device, ustatus_addr + 0x0c);
+	u32 e18 = nvkm_rd32(device, ustatus_addr + 0x10);
+	u32 e1c = nvkm_rd32(device, ustatus_addr + 0x14);
+	u32 e20 = nvkm_rd32(device, ustatus_addr + 0x18);
+	u32 e24 = nvkm_rd32(device, ustatus_addr + 0x1c);
+	char msg[128];
 
 	/* CUDA memory: l[], g[] or stack. */
 	if (ustatus & 0x00000080) {
 		if (e18 & 0x80000000) {
 			/* g[] read fault? */
-			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
 					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
 			e18 &= ~0x1f000000;
 		} else if (e18 & 0xc) {
 			/* g[] write fault? */
-			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
 				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
 			e18 &= ~0x00000f80;
 		} else {
-			nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
+			nvkm_error(subdev, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
 				 tp, e14, e10);
 		}
 		ustatus &= ~0x00000080;
 	}
 	if (ustatus) {
-		nv_error(priv, "TRAP_PROP - TP %d -", tp);
-		nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
-		pr_cont(" - Address %02x%08x\n", e14, e10);
+		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_trap_prop, ustatus);
+		nvkm_error(subdev, "TRAP_PROP - TP %d - %08x [%s] - "
+				   "Address %02x%08x\n",
+			   tp, ustatus, msg, e14, e10);
 	}
-	nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+	nvkm_error(subdev, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
 		 tp, e0c, e18, e1c, e20, e24);
 }
 
 static void
-nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
+nv50_gr_mp_trap(struct nv50_gr *gr, int tpid, int display)
 {
-	u32 units = nv_rd32(priv, 0x1540);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 units = nvkm_rd32(device, 0x1540);
 	u32 addr, mp10, status, pc, oplow, ophigh;
+	char msg[128];
 	int i;
 	int mps = 0;
 	for (i = 0; i < 4; i++) {
 		if (!(units & 1 << (i+24)))
 			continue;
-		if (nv_device(priv)->chipset < 0xa0)
+		if (device->chipset < 0xa0)
 			addr = 0x408200 + (tpid << 12) + (i << 7);
 		else
 			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(priv, addr + 0x10);
-		status = nv_rd32(priv, addr + 0x14);
+		mp10 = nvkm_rd32(device, addr + 0x10);
+		status = nvkm_rd32(device, addr + 0x14);
 		if (!status)
 			continue;
 		if (display) {
-			nv_rd32(priv, addr + 0x20);
-			pc = nv_rd32(priv, addr + 0x24);
-			oplow = nv_rd32(priv, addr + 0x70);
-			ophigh = nv_rd32(priv, addr + 0x74);
-			nv_error(priv, "TRAP_MP_EXEC - "
-					"TP %d MP %d:", tpid, i);
-			nvkm_bitfield_print(nv50_mp_exec_errors, status);
-			pr_cont(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
+			nvkm_rd32(device, addr + 0x20);
+			pc = nvkm_rd32(device, addr + 0x24);
+			oplow = nvkm_rd32(device, addr + 0x70);
+			ophigh = nvkm_rd32(device, addr + 0x74);
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_mp_exec_errors, status);
+			nvkm_error(subdev, "TRAP_MP_EXEC - TP %d MP %d: "
+					   "%08x [%s] at %06x warp %d, "
+					   "opcode %08x %08x\n",
+				   tpid, i, status, msg, pc & 0xffffff,
+				   pc >> 24, oplow, ophigh);
 		}
-		nv_wr32(priv, addr + 0x10, mp10);
-		nv_wr32(priv, addr + 0x14, 0);
+		nvkm_wr32(device, addr + 0x10, mp10);
+		nvkm_wr32(device, addr + 0x14, 0);
 		mps++;
 	}
 	if (!mps && display)
-		nv_error(priv, "TRAP_MP_EXEC - TP %d: "
+		nvkm_error(subdev, "TRAP_MP_EXEC - TP %d: "
 				"No MPs claiming errors?\n", tpid);
 }
 
 static void
-nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
+nv50_gr_tp_trap(struct nv50_gr *gr, int type, u32 ustatus_old,
 		  u32 ustatus_new, int display, const char *name)
 {
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 units = nvkm_rd32(device, 0x1540);
 	int tps = 0;
-	u32 units = nv_rd32(priv, 0x1540);
 	int i, r;
+	char msg[128];
 	u32 ustatus_addr, ustatus;
 	for (i = 0; i < 16; i++) {
 		if (!(units & (1 << i)))
 			continue;
-		if (nv_device(priv)->chipset < 0xa0)
+		if (device->chipset < 0xa0)
 			ustatus_addr = ustatus_old + (i << 12);
 		else
 			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, ustatus_addr) & 0x7fffffff;
 		if (!ustatus)
 			continue;
 		tps++;
 		switch (type) {
 		case 6: /* texture error... unknown for now */
 			if (display) {
-				nv_error(priv, "magic set %d:\n", i);
+				nvkm_error(subdev, "magic set %d:\n", i);
 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					nv_error(priv, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(priv, r));
+					nvkm_error(subdev, "\t%08x: %08x\n", r,
+						   nvkm_rd32(device, r));
 				if (ustatus) {
-					nv_error(priv, "%s - TP%d:", name, i);
-					nvkm_bitfield_print(nv50_tex_traps,
-							       ustatus);
-					pr_cont("\n");
+					nvkm_snprintbf(msg, sizeof(msg),
+						       nv50_tex_traps, ustatus);
+					nvkm_error(subdev,
+						   "%s - TP%d: %08x [%s]\n",
+						   name, i, ustatus, msg);
 					ustatus = 0;
 				}
 			}
 			break;
 		case 7: /* MP error */
 			if (ustatus & 0x04030000) {
-				nv50_priv_mp_trap(priv, i, display);
+				nv50_gr_mp_trap(gr, i, display);
 				ustatus &= ~0x04030000;
 			}
 			if (ustatus && display) {
-				nv_error(priv, "%s - TP%d:", name, i);
-				nvkm_bitfield_print(nv50_mpc_traps, ustatus);
-				pr_cont("\n");
+				nvkm_snprintbf(msg, sizeof(msg),
+					       nv50_mpc_traps, ustatus);
+				nvkm_error(subdev, "%s - TP%d: %08x [%s]\n",
+					   name, i, ustatus, msg);
 				ustatus = 0;
 			}
 			break;
 		case 8: /* PROP error */
 			if (display)
-				nv50_priv_prop_trap(
-						priv, ustatus_addr, ustatus, i);
+				nv50_gr_prop_trap(
+						gr, ustatus_addr, ustatus, i);
 			ustatus = 0;
 			break;
 		}
 		if (ustatus) {
 			if (display)
-				nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+				nvkm_error(subdev, "%s - TP%d: Unhandled ustatus %08x\n", name, i, ustatus);
 		}
-		nv_wr32(priv, ustatus_addr, 0xc0000000);
+		nvkm_wr32(device, ustatus_addr, 0xc0000000);
 	}
 
 	if (!tps && display)
-		nv_warn(priv, "%s - No TPs claiming errors?\n", name);
+		nvkm_warn(subdev, "%s - No TPs claiming errors?\n", name);
 }
 
 static int
-nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
-		     int chid, u64 inst, struct nvkm_object *engctx)
+nv50_gr_trap_handler(struct nv50_gr *gr, u32 display,
+		     int chid, u64 inst, const char *name)
 {
-	u32 status = nv_rd32(priv, 0x400108);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 status = nvkm_rd32(device, 0x400108);
 	u32 ustatus;
+	char msg[128];
 
 	if (!status && display) {
-		nv_error(priv, "TRAP: no units reporting traps?\n");
+		nvkm_error(subdev, "TRAP: no units reporting traps?\n");
 		return 1;
 	}
 
@@ -585,71 +408,72 @@
 	 * COND, QUERY. If you get a trap from it, the command is still stuck
 	 * in DISPATCH and you need to do something about it. */
 	if (status & 0x001) {
-		ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x400804) & 0x7fffffff;
 		if (!ustatus && display) {
-			nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
+			nvkm_error(subdev, "TRAP_DISPATCH - no ustatus?\n");
 		}
 
-		nv_wr32(priv, 0x400500, 0x00000000);
+		nvkm_wr32(device, 0x400500, 0x00000000);
 
 		/* Known to be triggered by screwed up NOTIFY and COND... */
 		if (ustatus & 0x00000001) {
-			u32 addr = nv_rd32(priv, 0x400808);
+			u32 addr = nvkm_rd32(device, 0x400808);
 			u32 subc = (addr & 0x00070000) >> 16;
 			u32 mthd = (addr & 0x00001ffc);
-			u32 datal = nv_rd32(priv, 0x40080c);
-			u32 datah = nv_rd32(priv, 0x400810);
-			u32 class = nv_rd32(priv, 0x400814);
-			u32 r848 = nv_rd32(priv, 0x400848);
+			u32 datal = nvkm_rd32(device, 0x40080c);
+			u32 datah = nvkm_rd32(device, 0x400810);
+			u32 class = nvkm_rd32(device, 0x400814);
+			u32 r848 = nvkm_rd32(device, 0x400848);
 
-			nv_error(priv, "TRAP DISPATCH_FAULT\n");
+			nvkm_error(subdev, "TRAP DISPATCH_FAULT\n");
 			if (display && (addr & 0x80000000)) {
-				nv_error(priv,
-					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
-					 chid, inst,
-					 nvkm_client_name(engctx), subc,
-					 class, mthd, datah, datal, addr, r848);
+				nvkm_error(subdev,
+					   "ch %d [%010llx %s] subc %d "
+					   "class %04x mthd %04x data %08x%08x "
+					   "400808 %08x 400848 %08x\n",
+					   chid, inst, name, subc, class, mthd,
+					   datah, datal, addr, r848);
 			} else
 			if (display) {
-				nv_error(priv, "no stuck command?\n");
+				nvkm_error(subdev, "no stuck command?\n");
 			}
 
-			nv_wr32(priv, 0x400808, 0);
-			nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
-			nv_wr32(priv, 0x400848, 0);
+			nvkm_wr32(device, 0x400808, 0);
+			nvkm_wr32(device, 0x4008e8, nvkm_rd32(device, 0x4008e8) & 3);
+			nvkm_wr32(device, 0x400848, 0);
 			ustatus &= ~0x00000001;
 		}
 
 		if (ustatus & 0x00000002) {
-			u32 addr = nv_rd32(priv, 0x40084c);
+			u32 addr = nvkm_rd32(device, 0x40084c);
 			u32 subc = (addr & 0x00070000) >> 16;
 			u32 mthd = (addr & 0x00001ffc);
-			u32 data = nv_rd32(priv, 0x40085c);
-			u32 class = nv_rd32(priv, 0x400814);
+			u32 data = nvkm_rd32(device, 0x40085c);
+			u32 class = nvkm_rd32(device, 0x400814);
 
-			nv_error(priv, "TRAP DISPATCH_QUERY\n");
+			nvkm_error(subdev, "TRAP DISPATCH_QUERY\n");
 			if (display && (addr & 0x80000000)) {
-				nv_error(priv,
-					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
-					 chid, inst,
-					 nvkm_client_name(engctx), subc,
-					 class, mthd, data, addr);
+				nvkm_error(subdev,
+					   "ch %d [%010llx %s] subc %d "
+					   "class %04x mthd %04x data %08x "
+					   "40084c %08x\n", chid, inst, name,
+					   subc, class, mthd, data, addr);
 			} else
 			if (display) {
-				nv_error(priv, "no stuck command?\n");
+				nvkm_error(subdev, "no stuck command?\n");
 			}
 
-			nv_wr32(priv, 0x40084c, 0);
+			nvkm_wr32(device, 0x40084c, 0);
 			ustatus &= ~0x00000002;
 		}
 
 		if (ustatus && display) {
-			nv_error(priv, "TRAP_DISPATCH (unknown "
-				      "0x%08x)\n", ustatus);
+			nvkm_error(subdev, "TRAP_DISPATCH "
+					   "(unknown %08x)\n", ustatus);
 		}
 
-		nv_wr32(priv, 0x400804, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x001);
+		nvkm_wr32(device, 0x400804, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x001);
 		status &= ~0x001;
 		if (!status)
 			return 0;
@@ -657,81 +481,91 @@
 
 	/* M2MF: Memory to memory copy engine. */
 	if (status & 0x002) {
-		u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
+		u32 ustatus = nvkm_rd32(device, 0x406800) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_M2MF");
-			nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
-				nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_m2mf, ustatus);
+			nvkm_error(subdev, "TRAP_M2MF %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_M2MF %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x406804),
+				   nvkm_rd32(device, 0x406808),
+				   nvkm_rd32(device, 0x40680c),
+				   nvkm_rd32(device, 0x406810));
 		}
 
 		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(priv, 0x400040, 2);
-		nv_wr32(priv, 0x400040, 0);
-		nv_wr32(priv, 0x406800, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x002);
+		nvkm_wr32(device, 0x400040, 2);
+		nvkm_wr32(device, 0x400040, 0);
+		nvkm_wr32(device, 0x406800, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x002);
 		status &= ~0x002;
 	}
 
 	/* VFETCH: Fetches data from vertex buffers. */
 	if (status & 0x004) {
-		u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
+		u32 ustatus = nvkm_rd32(device, 0x400c04) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_VFETCH");
-			nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
-				nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_vfetch, ustatus);
+			nvkm_error(subdev, "TRAP_VFETCH %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_VFETCH %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x400c00),
+				   nvkm_rd32(device, 0x400c08),
+				   nvkm_rd32(device, 0x400c0c),
+				   nvkm_rd32(device, 0x400c10));
 		}
 
-		nv_wr32(priv, 0x400c04, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x004);
+		nvkm_wr32(device, 0x400c04, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x004);
 		status &= ~0x004;
 	}
 
 	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
 	if (status & 0x008) {
-		ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x401800) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_STRMOUT");
-			nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
-				nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
-				nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_strmout, ustatus);
+			nvkm_error(subdev, "TRAP_STRMOUT %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+				   nvkm_rd32(device, 0x401804),
+				   nvkm_rd32(device, 0x401808),
+				   nvkm_rd32(device, 0x40180c),
+				   nvkm_rd32(device, 0x401810));
 		}
 
 		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(priv, 0x400040, 0x80);
-		nv_wr32(priv, 0x400040, 0);
-		nv_wr32(priv, 0x401800, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x008);
+		nvkm_wr32(device, 0x400040, 0x80);
+		nvkm_wr32(device, 0x400040, 0);
+		nvkm_wr32(device, 0x401800, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x008);
 		status &= ~0x008;
 	}
 
 	/* CCACHE: Handles code and c[] caches and fills them. */
 	if (status & 0x010) {
-		ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x405018) & 0x7fffffff;
 		if (display) {
-			nv_error(priv, "TRAP_CCACHE");
-			nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
-			pr_cont("\n");
-			nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
-				     " %08x %08x %08x\n",
-				nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
-				nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
-				nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
-				nv_rd32(priv, 0x40501c));
-
+			nvkm_snprintbf(msg, sizeof(msg),
+				       nv50_gr_trap_ccache, ustatus);
+			nvkm_error(subdev, "TRAP_CCACHE %08x [%s]\n",
+				   ustatus, msg);
+			nvkm_error(subdev, "TRAP_CCACHE %08x %08x %08x %08x "
+					   "%08x %08x %08x\n",
+				   nvkm_rd32(device, 0x405000),
+				   nvkm_rd32(device, 0x405004),
+				   nvkm_rd32(device, 0x405008),
+				   nvkm_rd32(device, 0x40500c),
+				   nvkm_rd32(device, 0x405010),
+				   nvkm_rd32(device, 0x405014),
+				   nvkm_rd32(device, 0x40501c));
 		}
 
-		nv_wr32(priv, 0x405018, 0xc0000000);
-		nv_wr32(priv, 0x400108, 0x010);
+		nvkm_wr32(device, 0x405018, 0xc0000000);
+		nvkm_wr32(device, 0x400108, 0x010);
 		status &= ~0x010;
 	}
 
@@ -739,239 +573,174 @@
 	 * remaining, so try to handle it anyway. Perhaps related to that
 	 * unknown DMA slot on tesla? */
 	if (status & 0x20) {
-		ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
+		ustatus = nvkm_rd32(device, 0x402000) & 0x7fffffff;
 		if (display)
-			nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
-		nv_wr32(priv, 0x402000, 0xc0000000);
+			nvkm_error(subdev, "TRAP_UNKC04 %08x\n", ustatus);
+		nvkm_wr32(device, 0x402000, 0xc0000000);
 		/* no status modifiction on purpose */
 	}
 
 	/* TEXTURE: CUDA texturing units */
 	if (status & 0x040) {
-		nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+		nv50_gr_tp_trap(gr, 6, 0x408900, 0x408600, display,
 				    "TRAP_TEXTURE");
-		nv_wr32(priv, 0x400108, 0x040);
+		nvkm_wr32(device, 0x400108, 0x040);
 		status &= ~0x040;
 	}
 
 	/* MP: CUDA execution engines. */
 	if (status & 0x080) {
-		nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+		nv50_gr_tp_trap(gr, 7, 0x408314, 0x40831c, display,
 				    "TRAP_MP");
-		nv_wr32(priv, 0x400108, 0x080);
+		nvkm_wr32(device, 0x400108, 0x080);
 		status &= ~0x080;
 	}
 
 	/* PROP:  Handles TP-initiated uncached memory accesses:
 	 * l[], g[], stack, 2d surfaces, render targets. */
 	if (status & 0x100) {
-		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+		nv50_gr_tp_trap(gr, 8, 0x408e08, 0x408708, display,
 				    "TRAP_PROP");
-		nv_wr32(priv, 0x400108, 0x100);
+		nvkm_wr32(device, 0x400108, 0x100);
 		status &= ~0x100;
 	}
 
 	if (status) {
 		if (display)
-			nv_error(priv, "TRAP: unknown 0x%08x\n", status);
-		nv_wr32(priv, 0x400108, status);
+			nvkm_error(subdev, "TRAP: unknown %08x\n", status);
+		nvkm_wr32(device, 0x400108, status);
 	}
 
 	return 1;
 }
 
-static void
-nv50_gr_intr(struct nvkm_subdev *subdev)
+void
+nv50_gr_intr(struct nvkm_gr *base)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle = NULL;
-	struct nv50_gr_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x400100);
-	u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
-	u32 addr = nv_rd32(priv, 0x400704);
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo_chan *chan;
+	u32 stat = nvkm_rd32(device, 0x400100);
+	u32 inst = nvkm_rd32(device, 0x40032c) & 0x0fffffff;
+	u32 addr = nvkm_rd32(device, 0x400704);
 	u32 subc = (addr & 0x00070000) >> 16;
 	u32 mthd = (addr & 0x00001ffc);
-	u32 data = nv_rd32(priv, 0x400708);
-	u32 class = nv_rd32(priv, 0x400814);
+	u32 data = nvkm_rd32(device, 0x400708);
+	u32 class = nvkm_rd32(device, 0x400814);
 	u32 show = stat, show_bitfield = stat;
-	int chid;
+	const struct nvkm_enum *en;
+	unsigned long flags;
+	const char *name = "unknown";
+	char msg[128];
+	int chid = -1;
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000010) {
-		handle = nvkm_handle_get_class(engctx, class);
-		if (handle && !nv_call(handle->object, mthd, data))
-			show &= ~0x00000010;
-		nvkm_handle_put(handle);
+	chan = nvkm_fifo_chan_inst(device->fifo, (u64)inst << 12, &flags);
+	if (chan)  {
+		name = chan->object.client->name;
+		chid = chan->chid;
 	}
 
 	if (show & 0x00100000) {
-		u32 ecode = nv_rd32(priv, 0x400110);
-		nv_error(priv, "DATA_ERROR ");
-		nvkm_enum_print(nv50_data_error_names, ecode);
-		pr_cont("\n");
+		u32 ecode = nvkm_rd32(device, 0x400110);
+		en = nvkm_enum_find(nv50_data_error_names, ecode);
+		nvkm_error(subdev, "DATA_ERROR %08x [%s]\n",
+			   ecode, en ? en->name : "");
 		show_bitfield &= ~0x00100000;
 	}
 
 	if (stat & 0x00200000) {
-		if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
-					  engctx))
+		if (!nv50_gr_trap_handler(gr, show, chid, (u64)inst << 12, name))
 			show &= ~0x00200000;
 		show_bitfield &= ~0x00200000;
 	}
 
-	nv_wr32(priv, 0x400100, stat);
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400100, stat);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	if (show) {
 		show &= show_bitfield;
-		if (show) {
-			nv_error(priv, "%s", "");
-			nvkm_bitfield_print(nv50_gr_intr_name, show);
-			pr_cont("\n");
-		}
-		nv_error(priv,
-			 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
-			 chid, (u64)inst << 12, nvkm_client_name(engctx),
-			 subc, class, mthd, data);
+		nvkm_snprintbf(msg, sizeof(msg), nv50_gr_intr_name, show);
+		nvkm_error(subdev, "%08x [%s] ch %d [%010llx %s] subc %d "
+				   "class %04x mthd %04x data %08x\n",
+			   stat, msg, chid, (u64)inst << 12, name,
+			   subc, class, mthd, data);
 	}
 
-	if (nv_rd32(priv, 0x400824) & (1 << 31))
-		nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
+	if (nvkm_rd32(device, 0x400824) & (1 << 31))
+		nvkm_wr32(device, 0x400824, nvkm_rd32(device, 0x400824) & ~(1 << 31));
 
-	nvkm_engctx_put(engctx);
+	nvkm_fifo_chan_put(device->fifo, flags, &chan);
 }
 
-static int
-nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+int
+nv50_gr_init(struct nvkm_gr *base)
 {
-	struct nv50_gr_priv *priv;
-	int ret;
-
-	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00201000;
-	nv_subdev(priv)->intr = nv50_gr_intr;
-	nv_engine(priv)->cclass = &nv50_gr_cclass;
-
-	priv->base.units = nv50_gr_units;
-
-	switch (nv_device(priv)->chipset) {
-	case 0x50:
-		nv_engine(priv)->sclass = nv50_gr_sclass;
-		break;
-	case 0x84:
-	case 0x86:
-	case 0x92:
-	case 0x94:
-	case 0x96:
-	case 0x98:
-		nv_engine(priv)->sclass = g84_gr_sclass;
-		break;
-	case 0xa0:
-	case 0xaa:
-	case 0xac:
-		nv_engine(priv)->sclass = gt200_gr_sclass;
-		break;
-	case 0xa3:
-	case 0xa5:
-	case 0xa8:
-		nv_engine(priv)->sclass = gt215_gr_sclass;
-		break;
-	case 0xaf:
-		nv_engine(priv)->sclass = mcp89_gr_sclass;
-		break;
-
-	}
-
-	/* unfortunate hw bug workaround... */
-	if (nv_device(priv)->chipset != 0x50 &&
-	    nv_device(priv)->chipset != 0xac)
-		nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
-
-	spin_lock_init(&priv->lock);
-	return 0;
-}
-
-static int
-nv50_gr_init(struct nvkm_object *object)
-{
-	struct nv50_gr_priv *priv = (void *)object;
+	struct nv50_gr *gr = nv50_gr(base);
+	struct nvkm_device *device = gr->base.engine.subdev.device;
 	int ret, units, i;
 
-	ret = nvkm_gr_init(&priv->base);
-	if (ret)
-		return ret;
-
 	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
-	nv_wr32(priv, 0x40008c, 0x00000004);
+	nvkm_wr32(device, 0x40008c, 0x00000004);
 
 	/* reset/enable traps and interrupts */
-	nv_wr32(priv, 0x400804, 0xc0000000);
-	nv_wr32(priv, 0x406800, 0xc0000000);
-	nv_wr32(priv, 0x400c04, 0xc0000000);
-	nv_wr32(priv, 0x401800, 0xc0000000);
-	nv_wr32(priv, 0x405018, 0xc0000000);
-	nv_wr32(priv, 0x402000, 0xc0000000);
+	nvkm_wr32(device, 0x400804, 0xc0000000);
+	nvkm_wr32(device, 0x406800, 0xc0000000);
+	nvkm_wr32(device, 0x400c04, 0xc0000000);
+	nvkm_wr32(device, 0x401800, 0xc0000000);
+	nvkm_wr32(device, 0x405018, 0xc0000000);
+	nvkm_wr32(device, 0x402000, 0xc0000000);
 
-	units = nv_rd32(priv, 0x001540);
+	units = nvkm_rd32(device, 0x001540);
 	for (i = 0; i < 16; i++) {
 		if (!(units & (1 << i)))
 			continue;
 
-		if (nv_device(priv)->chipset < 0xa0) {
-			nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
-			nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
-			nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+		if (device->chipset < 0xa0) {
+			nvkm_wr32(device, 0x408900 + (i << 12), 0xc0000000);
+			nvkm_wr32(device, 0x408e08 + (i << 12), 0xc0000000);
+			nvkm_wr32(device, 0x408314 + (i << 12), 0xc0000000);
 		} else {
-			nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
-			nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
-			nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x408600 + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x408708 + (i << 11), 0xc0000000);
+			nvkm_wr32(device, 0x40831c + (i << 11), 0xc0000000);
 		}
 	}
 
-	nv_wr32(priv, 0x400108, 0xffffffff);
-	nv_wr32(priv, 0x400138, 0xffffffff);
-	nv_wr32(priv, 0x400100, 0xffffffff);
-	nv_wr32(priv, 0x40013c, 0xffffffff);
-	nv_wr32(priv, 0x400500, 0x00010001);
+	nvkm_wr32(device, 0x400108, 0xffffffff);
+	nvkm_wr32(device, 0x400138, 0xffffffff);
+	nvkm_wr32(device, 0x400100, 0xffffffff);
+	nvkm_wr32(device, 0x40013c, 0xffffffff);
+	nvkm_wr32(device, 0x400500, 0x00010001);
 
 	/* upload context program, initialise ctxctl defaults */
-	ret = nv50_grctx_init(nv_device(priv), &priv->size);
+	ret = nv50_grctx_init(device, &gr->size);
 	if (ret)
 		return ret;
 
-	nv_wr32(priv, 0x400824, 0x00000000);
-	nv_wr32(priv, 0x400828, 0x00000000);
-	nv_wr32(priv, 0x40082c, 0x00000000);
-	nv_wr32(priv, 0x400830, 0x00000000);
-	nv_wr32(priv, 0x40032c, 0x00000000);
-	nv_wr32(priv, 0x400330, 0x00000000);
+	nvkm_wr32(device, 0x400824, 0x00000000);
+	nvkm_wr32(device, 0x400828, 0x00000000);
+	nvkm_wr32(device, 0x40082c, 0x00000000);
+	nvkm_wr32(device, 0x400830, 0x00000000);
+	nvkm_wr32(device, 0x40032c, 0x00000000);
+	nvkm_wr32(device, 0x400330, 0x00000000);
 
 	/* some unknown zcull magic */
-	switch (nv_device(priv)->chipset & 0xf0) {
+	switch (device->chipset & 0xf0) {
 	case 0x50:
 	case 0x80:
 	case 0x90:
-		nv_wr32(priv, 0x402ca8, 0x00000800);
+		nvkm_wr32(device, 0x402ca8, 0x00000800);
 		break;
 	case 0xa0:
 	default:
-		if (nv_device(priv)->chipset == 0xa0 ||
-		    nv_device(priv)->chipset == 0xaa ||
-		    nv_device(priv)->chipset == 0xac) {
-			nv_wr32(priv, 0x402ca8, 0x00000802);
+		if (device->chipset == 0xa0 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac) {
+			nvkm_wr32(device, 0x402ca8, 0x00000802);
 		} else {
-			nv_wr32(priv, 0x402cc0, 0x00000000);
-			nv_wr32(priv, 0x402ca8, 0x00000002);
+			nvkm_wr32(device, 0x402cc0, 0x00000000);
+			nvkm_wr32(device, 0x402ca8, 0x00000002);
 		}
 
 		break;
@@ -979,21 +748,47 @@
 
 	/* zero out zcull regions */
 	for (i = 0; i < 8; i++) {
-		nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
-		nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c20 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c24 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c28 + (i * 0x10), 0x00000000);
+		nvkm_wr32(device, 0x402c2c + (i * 0x10), 0x00000000);
 	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-nv50_gr_oclass = {
-	.handle = NV_ENGINE(GR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_gr_ctor,
-		.dtor = _nvkm_gr_dtor,
-		.init = nv50_gr_init,
-		.fini = _nvkm_gr_fini,
-	},
+int
+nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_gr **pgr)
+{
+	struct nv50_gr *gr;
+
+	if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
+		return -ENOMEM;
+	spin_lock_init(&gr->lock);
+	*pgr = &gr->base;
+
+	return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
+}
+
+static const struct nvkm_gr_func
+nv50_gr = {
+	.init = nv50_gr_init,
+	.intr = nv50_gr_intr,
+	.chan_new = nv50_gr_chan_new,
+	.units = nv50_gr_units,
+	.sclass = {
+		{ -1, -1, 0x0030, &nv50_gr_object },
+		{ -1, -1, 0x502d, &nv50_gr_object },
+		{ -1, -1, 0x5039, &nv50_gr_object },
+		{ -1, -1, 0x5097, &nv50_gr_object },
+		{ -1, -1, 0x50c0, &nv50_gr_object },
+		{}
+	}
 };
+
+int
+nv50_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+	return nv50_gr_new_(&nv50_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index bcf786f..45eec83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,8 +1,34 @@
 #ifndef __NV50_GR_H__
 #define __NV50_GR_H__
-#include <engine/gr.h>
-struct nvkm_device;
-struct nvkm_gpuobj;
+#define nv50_gr(p) container_of((p), struct nv50_gr, base)
+#include "priv.h"
+
+struct nv50_gr {
+	struct nvkm_gr base;
+	const struct nv50_gr_func *func;
+	spinlock_t lock;
+	u32 size;
+};
+
+int nv50_gr_new_(const struct nvkm_gr_func *, struct nvkm_device *, int index,
+		 struct nvkm_gr **);
+int nv50_gr_init(struct nvkm_gr *);
+void nv50_gr_intr(struct nvkm_gr *);
+u64 nv50_gr_units(struct nvkm_gr *);
+
+int g84_gr_tlb_flush(struct nvkm_gr *);
+
+#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+
+struct nv50_gr_chan {
+	struct nvkm_object object;
+	struct nv50_gr *gr;
+};
+
+int nv50_gr_chan_new(struct nvkm_gr *, struct nvkm_fifo_chan *,
+		     const struct nvkm_oclass *, struct nvkm_object **);
+
+extern const struct nvkm_object_func nv50_gr_object;
 
 int  nv50_grctx_init(struct nvkm_device *, u32 *size);
 void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
new file mode 100644
index 0000000..a234590
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -0,0 +1,38 @@
+#ifndef __NVKM_GR_PRIV_H__
+#define __NVKM_GR_PRIV_H__
+#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
+#include <engine/gr.h>
+#include <core/enum.h>
+struct nvkm_fb_tile;
+struct nvkm_fifo_chan;
+
+int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
+		 int index, u32 pmc_enable, bool enable,
+		 struct nvkm_gr *);
+
+bool nv04_gr_idle(struct nvkm_gr *);
+
+struct nvkm_gr_func {
+	void *(*dtor)(struct nvkm_gr *);
+	int (*oneinit)(struct nvkm_gr *);
+	int (*init)(struct nvkm_gr *);
+	void (*intr)(struct nvkm_gr *);
+	void (*tile)(struct nvkm_gr *, int region, struct nvkm_fb_tile *);
+	int (*tlb_flush)(struct nvkm_gr *);
+	int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
+			const struct nvkm_oclass *, struct nvkm_object **);
+	int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
+	/* Returns chipset-specific counts of units packed into an u64.
+	 */
+	u64 (*units)(struct nvkm_gr *);
+	struct nvkm_sclass sclass[];
+};
+
+extern const struct nvkm_bitfield nv04_gr_nsource[];
+extern const struct nvkm_object_func nv04_gr_object;
+
+extern const struct nvkm_bitfield nv10_gr_intr_name[];
+extern const struct nvkm_bitfield nv10_gr_nstatus[];
+
+extern const struct nvkm_enum nv50_data_error_names[];
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 0df889f..34ff001 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -21,74 +21,24 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#include "priv.h"
 
-struct g84_mpeg_priv {
-	struct nvkm_mpeg base;
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+g84_mpeg = {
+	.init = nv50_mpeg_init,
+	.intr = nv50_mpeg_intr,
+	.cclass = &nv50_mpeg_cclass,
+	.sclass = {
+		{ -1, -1, G82_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
 
-struct g84_mpeg_chan {
-	struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_sclass[] = {
-	{ 0x8274, &nv50_mpeg_ofuncs },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = _nvkm_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct g84_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv50_mpeg_intr;
-	nv_engine(priv)->cclass = &g84_mpeg_cclass;
-	nv_engine(priv)->sclass = g84_mpeg_sclass;
-	return 0;
+	return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
+				true, pmpeg);
 }
-
-struct nvkm_oclass
-g84_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv50_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index b5bef07..d4d8942 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -24,281 +24,271 @@
 #include "nv31.h"
 
 #include <core/client.h>
-#include <core/handle.h>
-#include <engine/fifo.h>
-#include <subdev/instmem.h>
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
 
 /*******************************************************************************
  * MPEG object classes
  ******************************************************************************/
 
 static int
-nv31_mpeg_object_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
+nv31_mpeg_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		      int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 20, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
-}
-
-static int
-nv31_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
-{
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	u32 inst = *(u32 *)arg << 4;
-	u32 dma0 = nv_ro32(imem, inst + 0);
-	u32 dma1 = nv_ro32(imem, inst + 4);
-	u32 dma2 = nv_ro32(imem, inst + 8);
-	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
-	u32 size = dma1 + 1;
-
-	/* only allow linear DMA objects */
-	if (!(dma0 & 0x00002000))
-		return -EINVAL;
-
-	if (mthd == 0x0190) {
-		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00010000, (dma0 & 0x00030000) ? 0x00010000 : 0);
-		nv_wr32(priv, 0x00b334, base);
-		nv_wr32(priv, 0x00b324, size);
-	} else
-	if (mthd == 0x01a0) {
-		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x00020000, (dma0 & 0x00030000) ? 0x00020000 : 0);
-		nv_wr32(priv, 0x00b360, base);
-		nv_wr32(priv, 0x00b364, size);
-	} else {
-		/* DMA_IMAGE, VRAM only */
-		if (dma0 & 0x00030000)
-			return -EINVAL;
-
-		nv_wr32(priv, 0x00b370, base);
-		nv_wr32(priv, 0x00b374, size);
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
+				  false, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x00, object->oclass);
+		nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
+		nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nvkm_done(*pgpuobj);
 	}
-
-	return 0;
+	return ret;
 }
 
-struct nvkm_ofuncs
-nv31_mpeg_ofuncs = {
-	.ctor = nv31_mpeg_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_omthds
-nv31_mpeg_omthds[] = {
-	{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
-	{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
-	{ 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
-	{}
-};
-
-struct nvkm_oclass
-nv31_mpeg_sclass[] = {
-	{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
-	{}
+const struct nvkm_object_func
+nv31_mpeg_object = {
+	.bind = nv31_mpeg_object_bind,
 };
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
 
-static int
-nv31_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+static void *
+nv31_mpeg_chan_dtor(struct nvkm_object *object)
 {
-	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nv31_mpeg_chan *chan = nv31_mpeg_chan(object);
+	struct nv31_mpeg *mpeg = chan->mpeg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	if (mpeg->chan == chan)
+		mpeg->chan = NULL;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv31_mpeg_chan = {
+	.dtor = nv31_mpeg_chan_dtor,
+};
+
+int
+nv31_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nv31_mpeg *mpeg = nv31_mpeg(oclass->engine);
 	struct nv31_mpeg_chan *chan;
 	unsigned long flags;
-	int ret;
+	int ret = -EBUSY;
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv31_mpeg_chan, oclass, &chan->object);
+	chan->mpeg = mpeg;
+	chan->fifo = fifoch;
+	*pobject = &chan->object;
 
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	if (priv->chan) {
-		spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-		nvkm_object_destroy(&chan->base);
-		*pobject = NULL;
-		return -EBUSY;
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	if (!mpeg->chan) {
+		mpeg->chan = chan;
+		ret = 0;
 	}
-	priv->chan = chan;
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-	return 0;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return ret;
 }
 
-static void
-nv31_mpeg_context_dtor(struct nvkm_object *object)
-{
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	struct nv31_mpeg_chan *chan = (void *)object;
-	unsigned long flags;
-
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	priv->chan = NULL;
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-	nvkm_object_destroy(&chan->base);
-}
-
-struct nvkm_oclass
-nv31_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x31),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv31_mpeg_context_ctor,
-		.dtor = nv31_mpeg_context_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-	},
-};
-
 /*******************************************************************************
  * PMPEG engine/subdev functions
  ******************************************************************************/
 
 void
-nv31_mpeg_tile_prog(struct nvkm_engine *engine, int i)
+nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
 {
-	struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i];
-	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+	struct nvkm_device *device = mpeg->engine.subdev.device;
 
-	nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
-	nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
-	nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
+	nvkm_wr32(device, 0x00b008 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x00b004 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x00b000 + (i * 0x10), tile->addr);
 }
 
-void
-nv31_mpeg_intr(struct nvkm_subdev *subdev)
+static bool
+nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nv31_mpeg_priv *priv = (void *)subdev;
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_handle *handle;
-	struct nvkm_object *engctx;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	u32 inst = data << 4;
+	u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
+	u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
+	u32 dma2 = nvkm_rd32(device, 0x700008 + inst);
+	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+	u32 size = dma1 + 1;
+
+	/* only allow linear DMA objects */
+	if (!(dma0 & 0x00002000))
+		return false;
+
+	if (mthd == 0x0190) {
+		/* DMA_CMD */
+		nvkm_mask(device, 0x00b300, 0x00010000,
+				  (dma0 & 0x00030000) ? 0x00010000 : 0);
+		nvkm_wr32(device, 0x00b334, base);
+		nvkm_wr32(device, 0x00b324, size);
+	} else
+	if (mthd == 0x01a0) {
+		/* DMA_DATA */
+		nvkm_mask(device, 0x00b300, 0x00020000,
+				  (dma0 & 0x00030000) ? 0x00020000 : 0);
+		nvkm_wr32(device, 0x00b360, base);
+		nvkm_wr32(device, 0x00b364, size);
+	} else {
+		/* DMA_IMAGE, VRAM only */
+		if (dma0 & 0x00030000)
+			return false;
+
+		nvkm_wr32(device, 0x00b370, base);
+		nvkm_wr32(device, 0x00b374, size);
+	}
+
+	return true;
+}
+
+static bool
+nv31_mpeg_mthd(struct nv31_mpeg *mpeg, u32 mthd, u32 data)
+{
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+	switch (mthd) {
+	case 0x190:
+	case 0x1a0:
+	case 0x1b0:
+		return mpeg->func->mthd_dma(device, mthd, data);
+	default:
+		break;
+	}
+	return false;
+}
+
+static void
+nv31_mpeg_intr(struct nvkm_engine *engine)
+{
+	struct nv31_mpeg *mpeg = nv31_mpeg(engine);
+	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
 	unsigned long flags;
 
-	spin_lock_irqsave(&nv_engine(priv)->lock, flags);
-	engctx = nv_object(priv->chan);
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
 			show &= ~0x01000000;
 		}
 
-		if (type == 0x00000010 && engctx) {
-			handle = nvkm_handle_get_class(engctx, 0x3174);
-			if (handle && !nv_call(handle->object, mthd, data))
+		if (type == 0x00000010) {
+			if (!nv31_mpeg_mthd(mpeg, mthd, data))
 				show &= ~0x01000000;
-			nvkm_handle_put(handle);
 		}
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 pfifo->chid(pfifo, engctx),
-			 nvkm_client_name(engctx), stat, type, mthd, data);
+		nvkm_error(subdev, "ch %d [%s] %08x %08x %08x %08x\n",
+			   mpeg->chan ? mpeg->chan->fifo->chid : -1,
+			   mpeg->chan ? mpeg->chan->object.client->name :
+			   "unknown", stat, type, mthd, data);
 	}
 
-	spin_unlock_irqrestore(&nv_engine(priv)->lock, flags);
-}
-
-static int
-nv31_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv31_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv31_mpeg_intr;
-	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
-	nv_engine(priv)->sclass = nv31_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
 }
 
 int
-nv31_mpeg_init(struct nvkm_object *object)
+nv31_mpeg_init(struct nvkm_engine *mpeg)
 {
-	struct nvkm_engine *engine = nv_engine(object);
-	struct nv31_mpeg_priv *priv = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	int ret, i;
-
-	ret = nvkm_mpeg_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
 
 	/* VPE init */
-	nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-	nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
-	for (i = 0; i < pfb->tile.regions; i++)
-		engine->tile_prog(engine, i);
+	nvkm_wr32(device, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+	nvkm_wr32(device, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
 
 	/* PMPEG init */
-	nv_wr32(priv, 0x00b32c, 0x00000000);
-	nv_wr32(priv, 0x00b314, 0x00000100);
-	nv_wr32(priv, 0x00b220, 0x00000031);
-	nv_wr32(priv, 0x00b300, 0x02001ec1);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x00b32c, 0x00000000);
+	nvkm_wr32(device, 0x00b314, 0x00000100);
+	nvkm_wr32(device, 0x00b220, 0x00000031);
+	nvkm_wr32(device, 0x00b300, 0x02001ec1);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
 
-	nv_wr32(priv, 0x00b100, 0xffffffff);
-	nv_wr32(priv, 0x00b140, 0xffffffff);
+	nvkm_wr32(device, 0x00b100, 0xffffffff);
+	nvkm_wr32(device, 0x00b140, 0xffffffff);
 
-	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
-		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "timeout %08x\n",
+			   nvkm_rd32(device, 0x00b200));
 		return -EBUSY;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv31_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x31),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv31_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
+static void *
+nv31_mpeg_dtor(struct nvkm_engine *engine)
+{
+	return nv31_mpeg(engine);
+}
+
+static const struct nvkm_engine_func
+nv31_mpeg_ = {
+	.dtor = nv31_mpeg_dtor,
+	.init = nv31_mpeg_init,
+	.intr = nv31_mpeg_intr,
+	.tile = nv31_mpeg_tile,
+	.fifo.cclass = nv31_mpeg_chan_new,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
+
+int
+nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_engine **pmpeg)
+{
+	struct nv31_mpeg *mpeg;
+
+	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+		return -ENOMEM;
+	mpeg->func = func;
+	*pmpeg = &mpeg->engine;
+
+	return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
+				true, &mpeg->engine);
+}
+
+static const struct nv31_mpeg_func
+nv31_mpeg = {
+	.mthd_dma = nv31_mpeg_mthd_dma,
+};
+
+int
+nv31_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	return nv31_mpeg_new_(&nv31_mpeg, device, index, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 782b796..d3bb34f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,13 +1,30 @@
 #ifndef __NV31_MPEG_H__
 #define __NV31_MPEG_H__
+#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
+#include "priv.h"
 #include <engine/mpeg.h>
 
-struct nv31_mpeg_chan {
-	struct nvkm_object base;
-};
-
-struct nv31_mpeg_priv {
-	struct nvkm_mpeg base;
+struct nv31_mpeg {
+	const struct nv31_mpeg_func *func;
+	struct nvkm_engine engine;
 	struct nv31_mpeg_chan *chan;
 };
+
+int nv31_mpeg_new_(const struct nv31_mpeg_func *, struct nvkm_device *,
+		   int index, struct nvkm_engine **);
+
+struct nv31_mpeg_func {
+	bool (*mthd_dma)(struct nvkm_device *, u32 mthd, u32 data);
+};
+
+#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+
+struct nv31_mpeg_chan {
+	struct nvkm_object object;
+	struct nv31_mpeg *mpeg;
+	struct nvkm_fifo_chan *fifo;
+};
+
+int nv31_mpeg_chan_new(struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 9508bf9..16de5bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -25,110 +25,53 @@
 
 #include <subdev/instmem.h>
 
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
+#include <nvif/class.h>
 
-static int
-nv40_mpeg_mthd_dma(struct nvkm_object *object, u32 mthd, void *arg, u32 len)
+bool
+nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nv31_mpeg_priv *priv = (void *)object->engine;
-	u32 inst = *(u32 *)arg << 4;
-	u32 dma0 = nv_ro32(imem, inst + 0);
-	u32 dma1 = nv_ro32(imem, inst + 4);
-	u32 dma2 = nv_ro32(imem, inst + 8);
+	struct nvkm_instmem *imem = device->imem;
+	u32 inst = data << 4;
+	u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
+	u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
+	u32 dma2 = nvkm_instmem_rd32(imem, inst + 8);
 	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
 	u32 size = dma1 + 1;
 
 	/* only allow linear DMA objects */
 	if (!(dma0 & 0x00002000))
-		return -EINVAL;
+		return false;
 
 	if (mthd == 0x0190) {
 		/* DMA_CMD */
-		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
-		nv_wr32(priv, 0x00b334, base);
-		nv_wr32(priv, 0x00b324, size);
+		nvkm_mask(device, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nvkm_wr32(device, 0x00b334, base);
+		nvkm_wr32(device, 0x00b324, size);
 	} else
 	if (mthd == 0x01a0) {
 		/* DMA_DATA */
-		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
-		nv_wr32(priv, 0x00b360, base);
-		nv_wr32(priv, 0x00b364, size);
+		nvkm_mask(device, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nvkm_wr32(device, 0x00b360, base);
+		nvkm_wr32(device, 0x00b364, size);
 	} else {
 		/* DMA_IMAGE, VRAM only */
 		if (dma0 & 0x00030000)
-			return -EINVAL;
+			return false;
 
-		nv_wr32(priv, 0x00b370, base);
-		nv_wr32(priv, 0x00b374, size);
+		nvkm_wr32(device, 0x00b370, base);
+		nvkm_wr32(device, 0x00b374, size);
 	}
 
-	return 0;
+	return true;
 }
 
-static struct nvkm_omthds
-nv40_mpeg_omthds[] = {
-	{ 0x0190, 0x0190, nv40_mpeg_mthd_dma },
-	{ 0x01a0, 0x01a0, nv40_mpeg_mthd_dma },
-	{ 0x01b0, 0x01b0, nv40_mpeg_mthd_dma },
-	{}
+static const struct nv31_mpeg_func
+nv40_mpeg = {
+	.mthd_dma = nv40_mpeg_mthd_dma,
 };
 
-struct nvkm_oclass
-nv40_mpeg_sclass[] = {
-	{ 0x3174, &nv31_mpeg_ofuncs, nv40_mpeg_omthds },
-	{}
-};
-
-/*******************************************************************************
- * PMPEG engine/subdev functions
- ******************************************************************************/
-
-static void
-nv40_mpeg_intr(struct nvkm_subdev *subdev)
+int
+nv40_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
 {
-	struct nv31_mpeg_priv *priv = (void *)subdev;
-	u32 stat;
-
-	if ((stat = nv_rd32(priv, 0x00b100)))
-		nv31_mpeg_intr(subdev);
-
-	if ((stat = nv_rd32(priv, 0x00b800))) {
-		nv_error(priv, "PMSRCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x00b800, stat);
-	}
+	return nv31_mpeg_new_(&nv40_mpeg, device, index, pmpeg);
 }
-
-static int
-nv40_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv31_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv40_mpeg_intr;
-	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
-	nv_engine(priv)->sclass = nv40_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv40_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index 4720ac8..d433cfa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -21,165 +21,197 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#define nv44_mpeg(p) container_of((p), struct nv44_mpeg, engine)
+#include "priv.h"
 
 #include <core/client.h>
-#include <core/handle.h>
+#include <core/gpuobj.h>
 #include <engine/fifo.h>
 
-struct nv44_mpeg_priv {
-	struct nvkm_mpeg base;
-};
+#include <nvif/class.h>
 
-struct nv44_mpeg_chan {
-	struct nvkm_mpeg_chan base;
+struct nv44_mpeg {
+	struct nvkm_engine engine;
+	struct list_head chan;
 };
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
+#define nv44_mpeg_chan(p) container_of((p), struct nv44_mpeg_chan, object)
 
-static int
-nv44_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
-{
-	struct nv44_mpeg_chan *chan;
-	int ret;
-
-	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 264 * 4,
-				       16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
-	return 0;
-}
-
-static int
-nv44_mpeg_context_fini(struct nvkm_object *object, bool suspend)
-{
-
-	struct nv44_mpeg_priv *priv = (void *)object->engine;
-	struct nv44_mpeg_chan *chan = (void *)object;
-	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
-
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
-	if (nv_rd32(priv, 0x00b318) == inst)
-		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
-	return 0;
-}
-
-static struct nvkm_oclass
-nv44_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = nv44_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+struct nv44_mpeg_chan {
+	struct nvkm_object object;
+	struct nv44_mpeg *mpeg;
+	struct nvkm_fifo_chan *fifo;
+	struct list_head head;
+	u32 inst;
 };
 
+static int
+nv44_mpeg_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		    int align, struct nvkm_gpuobj **pgpuobj)
+{
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	int ret = nvkm_gpuobj_new(chan->object.engine->subdev.device, 264 * 4,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		chan->inst = (*pgpuobj)->addr;
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x78, 0x02001ec1);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
+}
+
+static int
+nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend)
+{
+
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	struct nv44_mpeg *mpeg = chan->mpeg;
+	struct nvkm_device *device = mpeg->engine.subdev.device;
+	u32 inst = 0x80000000 | (chan->inst >> 4);
+
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000000);
+	if (nvkm_rd32(device, 0x00b318) == inst)
+		nvkm_mask(device, 0x00b318, 0x80000000, 0x00000000);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
+	return 0;
+}
+
+static void *
+nv44_mpeg_chan_dtor(struct nvkm_object *object)
+{
+	struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+	struct nv44_mpeg *mpeg = chan->mpeg;
+	unsigned long flags;
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return chan;
+}
+
+static const struct nvkm_object_func
+nv44_mpeg_chan = {
+	.dtor = nv44_mpeg_chan_dtor,
+	.fini = nv44_mpeg_chan_fini,
+	.bind = nv44_mpeg_chan_bind,
+};
+
+static int
+nv44_mpeg_chan_new(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nv44_mpeg *mpeg = nv44_mpeg(oclass->engine);
+	struct nv44_mpeg_chan *chan;
+	unsigned long flags;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nv44_mpeg_chan, oclass, &chan->object);
+	chan->mpeg = mpeg;
+	chan->fifo = fifoch;
+	*pobject = &chan->object;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_add(&chan->head, &mpeg->chan);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
+	return 0;
+}
+
 /*******************************************************************************
  * PMPEG engine/subdev functions
  ******************************************************************************/
 
-static void
-nv44_mpeg_intr(struct nvkm_subdev *subdev)
+static bool
+nv44_mpeg_mthd(struct nvkm_device *device, u32 mthd, u32 data)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct nvkm_handle *handle;
-	struct nv44_mpeg_priv *priv = (void *)subdev;
-	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
-	u32 show = stat;
-	int chid;
+	switch (mthd) {
+	case 0x190:
+	case 0x1a0:
+	case 0x1b0:
+		return nv40_mpeg_mthd_dma(device, mthd, data);
+	default:
+		break;
+	}
+	return false;
+}
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
+static void
+nv44_mpeg_intr(struct nvkm_engine *engine)
+{
+	struct nv44_mpeg *mpeg = nv44_mpeg(engine);
+	struct nvkm_subdev *subdev = &mpeg->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nv44_mpeg_chan *temp, *chan = NULL;
+	unsigned long flags;
+	u32 inst = nvkm_rd32(device, 0x00b318) & 0x000fffff;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
+	u32 show = stat;
+
+	spin_lock_irqsave(&mpeg->engine.lock, flags);
+	list_for_each_entry(temp, &mpeg->chan, head) {
+		if (temp->inst >> 4 == inst) {
+			chan = temp;
+			list_del(&chan->head);
+			list_add(&chan->head, &mpeg->chan);
+			break;
+		}
+	}
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			nvkm_mask(device, 0x00b308, 0x00000000, 0x00000000);
 			show &= ~0x01000000;
 		}
 
 		if (type == 0x00000010) {
-			handle = nvkm_handle_get_class(engctx, 0x3174);
-			if (handle && !nv_call(handle->object, mthd, data))
+			if (!nv44_mpeg_mthd(subdev->device, mthd, data))
 				show &= ~0x01000000;
-			nvkm_handle_put(handle);
 		}
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 
 	if (show) {
-		nv_error(priv,
-			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
-			 chid, inst << 4, nvkm_client_name(engctx), stat,
-			 type, mthd, data);
+		nvkm_error(subdev, "ch %d [%08x %s] %08x %08x %08x %08x\n",
+			   chan ? chan->fifo->chid : -1, inst << 4,
+			   chan ? chan->object.client->name : "unknown",
+			   stat, type, mthd, data);
 	}
 
-	nvkm_engctx_put(engctx);
+	spin_unlock_irqrestore(&mpeg->engine.lock, flags);
 }
 
-static void
-nv44_mpeg_me_intr(struct nvkm_subdev *subdev)
-{
-	struct nv44_mpeg_priv *priv = (void *)subdev;
-	u32 stat;
-
-	if ((stat = nv_rd32(priv, 0x00b100)))
-		nv44_mpeg_intr(subdev);
-
-	if ((stat = nv_rd32(priv, 0x00b800))) {
-		nv_error(priv, "PMSRCH 0x%08x\n", stat);
-		nv_wr32(priv, 0x00b800, stat);
+static const struct nvkm_engine_func
+nv44_mpeg = {
+	.init = nv31_mpeg_init,
+	.intr = nv44_mpeg_intr,
+	.tile = nv31_mpeg_tile,
+	.fifo.cclass = nv44_mpeg_chan_new,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
 	}
-}
-
-static int
-nv44_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv44_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nv44_mpeg_me_intr;
-	nv_engine(priv)->cclass = &nv44_mpeg_cclass;
-	nv_engine(priv)->sclass = nv40_mpeg_sclass;
-	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
-	return 0;
-}
-
-struct nvkm_oclass
-nv44_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv31_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
 };
+
+int
+nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	struct nv44_mpeg *mpeg;
+
+	if (!(mpeg = kzalloc(sizeof(*mpeg), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&mpeg->chan);
+	*pmpeg = &mpeg->engine;
+
+	return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
+				true, &mpeg->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index b3463f3..c3a85df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -21,98 +21,35 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mpeg.h>
+#include "priv.h"
 
-#include <subdev/bar.h>
+#include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-struct nv50_mpeg_priv {
-	struct nvkm_mpeg base;
-};
-
-struct nv50_mpeg_chan {
-	struct nvkm_mpeg_chan base;
-};
-
-/*******************************************************************************
- * MPEG object classes
- ******************************************************************************/
-
-static int
-nv50_mpeg_object_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 size,
-		      struct nvkm_object **pobject)
-{
-	struct nvkm_gpuobj *obj;
-	int ret;
-
-	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
-				 16, 16, 0, &obj);
-	*pobject = nv_object(obj);
-	if (ret)
-		return ret;
-
-	nv_wo32(obj, 0x00, nv_mclass(obj));
-	nv_wo32(obj, 0x04, 0x00000000);
-	nv_wo32(obj, 0x08, 0x00000000);
-	nv_wo32(obj, 0x0c, 0x00000000);
-	return 0;
-}
-
-struct nvkm_ofuncs
-nv50_mpeg_ofuncs = {
-	.ctor = nv50_mpeg_object_ctor,
-	.dtor = _nvkm_gpuobj_dtor,
-	.init = _nvkm_gpuobj_init,
-	.fini = _nvkm_gpuobj_fini,
-	.rd32 = _nvkm_gpuobj_rd32,
-	.wr32 = _nvkm_gpuobj_wr32,
-};
-
-static struct nvkm_oclass
-nv50_mpeg_sclass[] = {
-	{ 0x3174, &nv50_mpeg_ofuncs },
-	{}
-};
+#include <nvif/class.h>
 
 /*******************************************************************************
  * PMPEG context
  ******************************************************************************/
 
-int
-nv50_mpeg_context_ctor(struct nvkm_object *parent,
-		       struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, void *data, u32 size,
-		       struct nvkm_object **pobject)
+static int
+nv50_mpeg_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+		      int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_bar *bar = nvkm_bar(parent);
-	struct nv50_mpeg_chan *chan;
-	int ret;
-
-	ret = nvkm_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
-				       0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	nv_wo32(chan, 0x0070, 0x00801ec1);
-	nv_wo32(chan, 0x007c, 0x0000037c);
-	bar->flush(bar);
-	return 0;
+	int ret = nvkm_gpuobj_new(object->engine->subdev.device, 128 * 4,
+				  align, true, parent, pgpuobj);
+	if (ret == 0) {
+		nvkm_kmap(*pgpuobj);
+		nvkm_wo32(*pgpuobj, 0x70, 0x00801ec1);
+		nvkm_wo32(*pgpuobj, 0x7c, 0x0000037c);
+		nvkm_done(*pgpuobj);
+	}
+	return ret;
 }
 
-static struct nvkm_oclass
+const struct nvkm_object_func
 nv50_mpeg_cclass = {
-	.handle = NV_ENGCTX(MPEG, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_context_ctor,
-		.dtor = _nvkm_mpeg_context_dtor,
-		.init = _nvkm_mpeg_context_init,
-		.fini = _nvkm_mpeg_context_fini,
-		.rd32 = _nvkm_mpeg_context_rd32,
-		.wr32 = _nvkm_mpeg_context_wr32,
-	},
+	.bind = nv50_mpeg_cclass_bind,
 };
 
 /*******************************************************************************
@@ -120,106 +57,79 @@
  ******************************************************************************/
 
 void
-nv50_mpeg_intr(struct nvkm_subdev *subdev)
+nv50_mpeg_intr(struct nvkm_engine *mpeg)
 {
-	struct nv50_mpeg_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, 0x00b100);
-	u32 type = nv_rd32(priv, 0x00b230);
-	u32 mthd = nv_rd32(priv, 0x00b234);
-	u32 data = nv_rd32(priv, 0x00b238);
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x00b100);
+	u32 type = nvkm_rd32(device, 0x00b230);
+	u32 mthd = nvkm_rd32(device, 0x00b234);
+	u32 data = nvkm_rd32(device, 0x00b238);
 	u32 show = stat;
 
 	if (stat & 0x01000000) {
 		/* happens on initial binding of the object */
 		if (type == 0x00000020 && mthd == 0x0000) {
-			nv_wr32(priv, 0x00b308, 0x00000100);
+			nvkm_wr32(device, 0x00b308, 0x00000100);
 			show &= ~0x01000000;
 		}
 	}
 
 	if (show) {
-		nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
-			stat, type, mthd, data);
+		nvkm_info(subdev, "%08x %08x %08x %08x\n",
+			  stat, type, mthd, data);
 	}
 
-	nv_wr32(priv, 0x00b100, stat);
-	nv_wr32(priv, 0x00b230, 0x00000001);
-}
-
-static void
-nv50_vpe_intr(struct nvkm_subdev *subdev)
-{
-	struct nv50_mpeg_priv *priv = (void *)subdev;
-
-	if (nv_rd32(priv, 0x00b100))
-		nv50_mpeg_intr(subdev);
-
-	if (nv_rd32(priv, 0x00b800)) {
-		u32 stat = nv_rd32(priv, 0x00b800);
-		nv_info(priv, "PMSRCH: 0x%08x\n", stat);
-		nv_wr32(priv, 0xb800, stat);
-	}
-}
-
-static int
-nv50_mpeg_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_mpeg_priv *priv;
-	int ret;
-
-	ret = nvkm_mpeg_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00400002;
-	nv_subdev(priv)->intr = nv50_vpe_intr;
-	nv_engine(priv)->cclass = &nv50_mpeg_cclass;
-	nv_engine(priv)->sclass = nv50_mpeg_sclass;
-	return 0;
+	nvkm_wr32(device, 0x00b100, stat);
+	nvkm_wr32(device, 0x00b230, 0x00000001);
 }
 
 int
-nv50_mpeg_init(struct nvkm_object *object)
+nv50_mpeg_init(struct nvkm_engine *mpeg)
 {
-	struct nv50_mpeg_priv *priv = (void *)object;
-	int ret;
+	struct nvkm_subdev *subdev = &mpeg->subdev;
+	struct nvkm_device *device = subdev->device;
 
-	ret = nvkm_mpeg_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_wr32(device, 0x00b32c, 0x00000000);
+	nvkm_wr32(device, 0x00b314, 0x00000100);
+	nvkm_wr32(device, 0x00b0e0, 0x0000001a);
 
-	nv_wr32(priv, 0x00b32c, 0x00000000);
-	nv_wr32(priv, 0x00b314, 0x00000100);
-	nv_wr32(priv, 0x00b0e0, 0x0000001a);
+	nvkm_wr32(device, 0x00b220, 0x00000044);
+	nvkm_wr32(device, 0x00b300, 0x00801ec1);
+	nvkm_wr32(device, 0x00b390, 0x00000000);
+	nvkm_wr32(device, 0x00b394, 0x00000000);
+	nvkm_wr32(device, 0x00b398, 0x00000000);
+	nvkm_mask(device, 0x00b32c, 0x00000001, 0x00000001);
 
-	nv_wr32(priv, 0x00b220, 0x00000044);
-	nv_wr32(priv, 0x00b300, 0x00801ec1);
-	nv_wr32(priv, 0x00b390, 0x00000000);
-	nv_wr32(priv, 0x00b394, 0x00000000);
-	nv_wr32(priv, 0x00b398, 0x00000000);
-	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x00b100, 0xffffffff);
+	nvkm_wr32(device, 0x00b140, 0xffffffff);
 
-	nv_wr32(priv, 0x00b100, 0xffffffff);
-	nv_wr32(priv, 0x00b140, 0xffffffff);
-
-	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
-		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00b200) & 0x00000001))
+			break;
+	) < 0) {
+		nvkm_error(subdev, "timeout %08x\n",
+			   nvkm_rd32(device, 0x00b200));
 		return -EBUSY;
 	}
 
 	return 0;
 }
 
-struct nvkm_oclass
-nv50_mpeg_oclass = {
-	.handle = NV_ENGINE(MPEG, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mpeg_ctor,
-		.dtor = _nvkm_mpeg_dtor,
-		.init = nv50_mpeg_init,
-		.fini = _nvkm_mpeg_fini,
-	},
+static const struct nvkm_engine_func
+nv50_mpeg = {
+	.init = nv50_mpeg_init,
+	.intr = nv50_mpeg_intr,
+	.cclass = &nv50_mpeg_cclass,
+	.sclass = {
+		{ -1, -1, NV31_MPEG, &nv31_mpeg_object },
+		{}
+	}
 };
+
+int
+nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
+{
+	return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
+				true, pmpeg);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
new file mode 100644
index 0000000..d575310
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -0,0 +1,16 @@
+#ifndef __NVKM_MPEG_PRIV_H__
+#define __NVKM_MPEG_PRIV_H__
+#include <engine/mpeg.h>
+struct nvkm_fifo_chan;
+
+int nv31_mpeg_init(struct nvkm_engine *);
+void nv31_mpeg_tile(struct nvkm_engine *, int, struct nvkm_fb_tile *);
+extern const struct nvkm_object_func nv31_mpeg_object;
+
+bool nv40_mpeg_mthd_dma(struct nvkm_device *, u32, u32);
+
+int nv50_mpeg_init(struct nvkm_engine *);
+void nv50_mpeg_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func nv50_mpeg_cclass;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
index c59c83a..1a71511 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/Kbuild
@@ -1,3 +1,5 @@
+nvkm-y += nvkm/engine/mspdec/base.o
 nvkm-y += nvkm/engine/mspdec/g98.o
+nvkm-y += nvkm/engine/mspdec/gt215.o
 nvkm-y += nvkm/engine/mspdec/gf100.o
 nvkm-y += nvkm/engine/mspdec/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
new file mode 100644
index 0000000..80211f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/base.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_mspdec_new_(const struct nvkm_falcon_func *func,
+		 struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x085000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 2174577..1f1a99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -21,89 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_mspdec_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_sclass[] = {
-	{ 0x88b2, &nvkm_object_ofuncs },
-	{ 0x85b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-g98_mspdec_init(struct nvkm_object *object)
+void
+g98_mspdec_init(struct nvkm_falcon *mspdec)
 {
-	struct g98_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000ffd2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = mspdec->engine.subdev.device;
+	nvkm_wr32(device, 0x085010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08501c, 0x0000fff2);
 }
 
-static int
-g98_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct g98_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x01020000;
-	nv_engine(priv)->cclass = &g98_mspdec_cclass;
-	nv_engine(priv)->sclass = g98_mspdec_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-g98_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_mspdec = {
+	.pmc_enable = 0x01020000,
+	.init = g98_mspdec_init,
+	.sclass = {
+		{ -1, -1, G98_MSPDEC },
+		{}
+	}
 };
+
+int
+g98_mspdec_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&g98_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index c814a5f..371fd6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_mspdec_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_sclass[] = {
-	{ 0x90b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_mspdec_init(struct nvkm_object *object)
+void
+gf100_mspdec_init(struct nvkm_falcon *mspdec)
 {
-	struct gf100_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000fff2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = mspdec->engine.subdev.device;
+	nvkm_wr32(device, 0x085010, 0x0000fff2);
+	nvkm_wr32(device, 0x08501c, 0x0000fff2);
 }
 
-static int
-gf100_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct gf100_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00020000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_mspdec_cclass;
-	nv_engine(priv)->sclass = gf100_mspdec_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_mspdec = {
+	.pmc_enable = 0x00020000,
+	.init = gf100_mspdec_init,
+	.sclass = {
+		{ -1, -1, GF100_MSPDEC },
+		{}
+	}
 };
+
+int
+gf100_mspdec_new(struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&gf100_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index 9799206..de804a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -21,89 +21,23 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/mspdec.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gk104_mspdec_priv {
-	struct nvkm_falcon base;
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_mspdec = {
+	.pmc_enable = 0x00020000,
+	.init = gf100_mspdec_init,
+	.sclass = {
+		{ -1, -1, GK104_MSPDEC },
+		{}
+	}
 };
 
-/*******************************************************************************
- * MSPDEC object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_sclass[] = {
-	{ 0x95b2, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPDEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_mspdec_cclass = {
-	.handle = NV_ENGCTX(MSPDEC, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPDEC engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_mspdec_init(struct nvkm_object *object)
+int
+gk104_mspdec_new(struct nvkm_device *device, int index,
+		 struct nvkm_engine **pengine)
 {
-	struct gk104_mspdec_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x085010, 0x0000fff2);
-	nv_wr32(priv, 0x08501c, 0x0000fff2);
-	return 0;
+	return nvkm_mspdec_new_(&gk104_mspdec, device, index, pengine);
 }
-
-static int
-gk104_mspdec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
-{
-	struct gk104_mspdec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x085000, true,
-				 "PMSPDEC", "mspdec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00020000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gk104_mspdec_cclass;
-	nv_engine(priv)->sclass = gk104_mspdec_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_mspdec_oclass = {
-	.handle = NV_ENGINE(MSPDEC, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_mspdec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gk104_mspdec_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
new file mode 100644
index 0000000..8356317
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_mspdec = {
+	.pmc_enable = 0x01020000,
+	.init = g98_mspdec_init,
+	.sclass = {
+		{ -1, -1, GT212_MSPDEC },
+		{}
+	}
+};
+
+int
+gt215_mspdec_new(struct nvkm_device *device, int index,
+	     struct nvkm_engine **pengine)
+{
+	return nvkm_mspdec_new_(&gt215_mspdec, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
new file mode 100644
index 0000000..d518af4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSPDEC_PRIV_H__
+#define __NVKM_MSPDEC_PRIV_H__
+#include <engine/mspdec.h>
+
+int nvkm_mspdec_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		     int index, struct nvkm_engine **);
+
+void g98_mspdec_init(struct nvkm_falcon *);
+
+void gf100_mspdec_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
index 4576a9e..3ea7eaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/Kbuild
@@ -1,2 +1,4 @@
+nvkm-y += nvkm/engine/msppp/base.o
 nvkm-y += nvkm/engine/msppp/g98.o
+nvkm-y += nvkm/engine/msppp/gt215.o
 nvkm-y += nvkm/engine/msppp/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
new file mode 100644
index 0000000..bfae5e60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msppp_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		int index, struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x086000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 7a602a2..73f633a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -21,89 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_msppp_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_sclass[] = {
-	{ 0x88b3, &nvkm_object_ofuncs },
-	{ 0x85b3, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msppp_cclass = {
-	.handle = NV_ENGCTX(MSPPP, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
-
-static int
-g98_msppp_init(struct nvkm_object *object)
+void
+g98_msppp_init(struct nvkm_falcon *msppp)
 {
-	struct g98_msppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x086010, 0x0000ffd2);
-	nv_wr32(priv, 0x08601c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msppp->engine.subdev.device;
+	nvkm_wr32(device, 0x086010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08601c, 0x0000fff2);
 }
 
-static int
-g98_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g98_msppp_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
-				 "PMSPPP", "msppp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00400002;
-	nv_engine(priv)->cclass = &g98_msppp_cclass;
-	nv_engine(priv)->sclass = g98_msppp_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-g98_msppp_oclass = {
-	.handle = NV_ENGINE(MSPPP, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_msppp_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_msppp_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_msppp = {
+	.pmc_enable = 0x00400002,
+	.init = g98_msppp_init,
+	.sclass = {
+		{ -1, -1, G98_MSPPP },
+		{}
+	}
 };
+
+int
+g98_msppp_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&g98_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index 6047bae..c42c0c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/msppp.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_msppp_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSPPP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_sclass[] = {
-	{ 0x90b3, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSPPP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msppp_cclass = {
-	.handle = NV_ENGCTX(MSPPP, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSPPP engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_msppp_init(struct nvkm_object *object)
+static void
+gf100_msppp_init(struct nvkm_falcon *msppp)
 {
-	struct gf100_msppp_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x086010, 0x0000fff2);
-	nv_wr32(priv, 0x08601c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msppp->engine.subdev.device;
+	nvkm_wr32(device, 0x086010, 0x0000fff2);
+	nvkm_wr32(device, 0x08601c, 0x0000fff2);
 }
 
-static int
-gf100_msppp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf100_msppp_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x086000, true,
-				 "PMSPPP", "msppp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00000002;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_msppp_cclass;
-	nv_engine(priv)->sclass = gf100_msppp_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_msppp_oclass = {
-	.handle = NV_ENGINE(MSPPP, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_msppp_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_msppp_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_msppp = {
+	.pmc_enable = 0x00000002,
+	.init = gf100_msppp_init,
+	.sclass = {
+		{ -1, -1, GF100_MSPPP },
+		{}
+	}
 };
+
+int
+gf100_msppp_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&gf100_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
new file mode 100644
index 0000000..00e7795
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msppp = {
+	.pmc_enable = 0x00400002,
+	.init = g98_msppp_init,
+	.sclass = {
+		{ -1, -1, GT212_MSPPP },
+		{}
+	}
+};
+
+int
+gt215_msppp_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msppp_new_(&gt215_msppp, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
new file mode 100644
index 0000000..37a91f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_MSPPP_PRIV_H__
+#define __NVKM_MSPPP_PRIV_H__
+#include <engine/msppp.h>
+
+int nvkm_msppp_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		    int index, struct nvkm_engine **);
+
+void g98_msppp_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
index 0c98110..28c8ecd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/Kbuild
@@ -1,3 +1,6 @@
+nvkm-y += nvkm/engine/msvld/base.o
 nvkm-y += nvkm/engine/msvld/g98.o
+nvkm-y += nvkm/engine/msvld/gt215.o
+nvkm-y += nvkm/engine/msvld/mcp89.o
 nvkm-y += nvkm/engine/msvld/gf100.o
 nvkm-y += nvkm/engine/msvld/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
new file mode 100644
index 0000000..745bbb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/base.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+int
+nvkm_msvld_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device,
+		int index, struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(func, device, index, true, 0x084000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index c8a6b4e..47e2929 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -21,90 +21,31 @@
  *
  * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct g98_msvld_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_sclass[] = {
-	{ 0x88b1, &nvkm_object_ofuncs },
-	{ 0x85b1, &nvkm_object_ofuncs },
-	{ 0x86b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-g98_msvld_init(struct nvkm_object *object)
+void
+g98_msvld_init(struct nvkm_falcon *msvld)
 {
-	struct g98_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000ffd2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msvld->engine.subdev.device;
+	nvkm_wr32(device, 0x084010, 0x0000ffd2);
+	nvkm_wr32(device, 0x08401c, 0x0000fff2);
 }
 
-static int
-g98_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g98_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x04008000;
-	nv_engine(priv)->cclass = &g98_msvld_cclass;
-	nv_engine(priv)->sclass = g98_msvld_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-g98_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = g98_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, G98_MSVLD },
+		{}
+	}
 };
+
+int
+g98_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&g98_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index b8d1e0f..1ac581b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -21,89 +21,31 @@
  *
  * Authors: Maarten Lankhorst
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gf100_msvld_priv {
-	struct nvkm_falcon base;
-};
+#include <nvif/class.h>
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_sclass[] = {
-	{ 0x90b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gf100_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-gf100_msvld_init(struct nvkm_object *object)
+void
+gf100_msvld_init(struct nvkm_falcon *msvld)
 {
-	struct gf100_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000fff2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	struct nvkm_device *device = msvld->engine.subdev.device;
+	nvkm_wr32(device, 0x084010, 0x0000fff2);
+	nvkm_wr32(device, 0x08401c, 0x0000fff2);
 }
 
-static int
-gf100_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf100_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00008000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gf100_msvld_cclass;
-	nv_engine(priv)->sclass = gf100_msvld_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gf100_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+gf100_msvld = {
+	.pmc_enable = 0x00008000,
+	.init = gf100_msvld_init,
+	.sclass = {
+		{ -1, -1, GF100_MSVLD },
+		{}
+	}
 };
+
+int
+gf100_msvld_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&gf100_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index a0b0927..4bba16e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -21,89 +21,23 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/msvld.h>
-#include <engine/falcon.h>
+#include "priv.h"
 
-struct gk104_msvld_priv {
-	struct nvkm_falcon base;
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gk104_msvld = {
+	.pmc_enable = 0x00008000,
+	.init = gf100_msvld_init,
+	.sclass = {
+		{ -1, -1, GK104_MSVLD },
+		{}
+	}
 };
 
-/*******************************************************************************
- * MSVLD object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_sclass[] = {
-	{ 0x95b1, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PMSVLD context
- ******************************************************************************/
-
-static struct nvkm_oclass
-gk104_msvld_cclass = {
-	.handle = NV_ENGCTX(MSVLD, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PMSVLD engine/subdev functions
- ******************************************************************************/
-
-static int
-gk104_msvld_init(struct nvkm_object *object)
+int
+gk104_msvld_new(struct nvkm_device *device, int index,
+		struct nvkm_engine **pengine)
 {
-	struct gk104_msvld_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_falcon_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x084010, 0x0000fff2);
-	nv_wr32(priv, 0x08401c, 0x0000fff2);
-	return 0;
+	return nvkm_msvld_new_(&gk104_msvld, device, index, pengine);
 }
-
-static int
-gk104_msvld_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gk104_msvld_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x084000, true,
-				 "PMSVLD", "msvld", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00008000;
-	nv_subdev(priv)->intr = nvkm_falcon_intr;
-	nv_engine(priv)->cclass = &gk104_msvld_cclass;
-	nv_engine(priv)->sclass = gk104_msvld_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_msvld_oclass = {
-	.handle = NV_ENGINE(MSVLD, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_msvld_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = gk104_msvld_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
new file mode 100644
index 0000000..e17cb56
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+gt215_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, GT212_MSVLD },
+		{}
+	}
+};
+
+int
+gt215_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&gt215_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
new file mode 100644
index 0000000..511800f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_falcon_func
+mcp89_msvld = {
+	.pmc_enable = 0x04008000,
+	.init = g98_msvld_init,
+	.sclass = {
+		{ -1, -1, IGT21A_MSVLD },
+		{}
+	}
+};
+
+int
+mcp89_msvld_new(struct nvkm_device *device, int index,
+	      struct nvkm_engine **pengine)
+{
+	return nvkm_msvld_new_(&mcp89_msvld, device, index, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
new file mode 100644
index 0000000..9dc1da6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -0,0 +1,11 @@
+#ifndef __NVKM_MSVLD_PRIV_H__
+#define __NVKM_MSVLD_PRIV_H__
+#include <engine/msvld.h>
+
+int nvkm_msvld_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
+		    int index, struct nvkm_engine **);
+
+void g98_msvld_init(struct nvkm_falcon *);
+
+void gf100_msvld_init(struct nvkm_falcon *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
index 413b609..1614d38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/Kbuild
@@ -1,9 +1,10 @@
 nvkm-y += nvkm/engine/pm/base.o
-nvkm-y += nvkm/engine/pm/daemon.o
 nvkm-y += nvkm/engine/pm/nv40.o
 nvkm-y += nvkm/engine/pm/nv50.o
 nvkm-y += nvkm/engine/pm/g84.o
+nvkm-y += nvkm/engine/pm/gt200.o
 nvkm-y += nvkm/engine/pm/gt215.o
 nvkm-y += nvkm/engine/pm/gf100.o
+nvkm-y += nvkm/engine/pm/gf108.o
+nvkm-y += nvkm/engine/pm/gf117.o
 nvkm-y += nvkm/engine/pm/gk104.o
-nvkm-y += nvkm/engine/pm/gk110.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 4cf36a3..0db9be2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -24,370 +24,751 @@
 #include "priv.h"
 
 #include <core/client.h>
-#include <core/device.h>
 #include <core/option.h>
 
 #include <nvif/class.h>
 #include <nvif/ioctl.h>
 #include <nvif/unpack.h>
 
-#define QUAD_MASK 0x0f
-#define QUAD_FREE 0x01
-
-static struct nvkm_perfsig *
-nvkm_perfsig_find_(struct nvkm_perfdom *dom, const char *name, u32 size)
+static u8
+nvkm_pm_count_perfdom(struct nvkm_pm *pm)
 {
-	char path[64];
+	struct nvkm_perfdom *dom;
+	u8 domain_nr = 0;
+
+	list_for_each_entry(dom, &pm->domains, head)
+		domain_nr++;
+	return domain_nr;
+}
+
+static u16
+nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
+{
+	u16 signal_nr = 0;
 	int i;
 
-	if (name[0] != '/') {
+	if (dom) {
 		for (i = 0; i < dom->signal_nr; i++) {
-			if ( dom->signal[i].name &&
-			    !strncmp(name, dom->signal[i].name, size))
-				return &dom->signal[i];
+			if (dom->signal[i].name)
+				signal_nr++;
 		}
-	} else {
-		for (i = 0; i < dom->signal_nr; i++) {
-			snprintf(path, sizeof(path), "/%s/%02x", dom->name, i);
-			if (!strncmp(name, path, size))
-				return &dom->signal[i];
+	}
+	return signal_nr;
+}
+
+static struct nvkm_perfdom *
+nvkm_perfdom_find(struct nvkm_pm *pm, int di)
+{
+	struct nvkm_perfdom *dom;
+	int tmp = 0;
+
+	list_for_each_entry(dom, &pm->domains, head) {
+		if (tmp++ == di)
+			return dom;
+	}
+	return NULL;
+}
+
+struct nvkm_perfsig *
+nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
+{
+	struct nvkm_perfdom *dom = *pdom;
+
+	if (dom == NULL) {
+		dom = nvkm_perfdom_find(pm, di);
+		if (dom == NULL)
+			return NULL;
+		*pdom = dom;
+	}
+
+	if (!dom->signal[si].name)
+		return NULL;
+	return &dom->signal[si];
+}
+
+static u8
+nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
+{
+	u8 source_nr = 0, i;
+
+	for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
+		if (sig->source[i])
+			source_nr++;
+	}
+	return source_nr;
+}
+
+static struct nvkm_perfsrc *
+nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
+{
+	struct nvkm_perfsrc *src;
+	bool found = false;
+	int tmp = 1; /* Sources ID start from 1 */
+	u8 i;
+
+	for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
+		if (sig->source[i] == si) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		list_for_each_entry(src, &pm->sources, head) {
+			if (tmp++ == si)
+				return src;
 		}
 	}
 
 	return NULL;
 }
 
-struct nvkm_perfsig *
-nvkm_perfsig_find(struct nvkm_pm *ppm, const char *name, u32 size,
-		  struct nvkm_perfdom **pdom)
+static int
+nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
 {
-	struct nvkm_perfdom *dom = *pdom;
+	struct nvkm_subdev *subdev = &pm->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_perfdom *dom = NULL;
 	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u32 mask, value;
+	int i, j;
 
-	if (dom == NULL) {
-		list_for_each_entry(dom, &ppm->domains, head) {
-			sig = nvkm_perfsig_find_(dom, name, size);
-			if (sig) {
-				*pdom = dom;
-				return sig;
-			}
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+			sig = nvkm_perfsig_find(pm, ctr->domain,
+						ctr->signal[i], &dom);
+			if (!sig)
+				return -EINVAL;
+
+			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+			if (!src)
+				return -EINVAL;
+
+			/* set enable bit if needed */
+			mask = value = 0x00000000;
+			if (src->enable)
+				mask = value = 0x80000000;
+			mask  |= (src->mask << src->shift);
+			value |= ((ctr->source[i][j] >> 32) << src->shift);
+
+			/* enable the source */
+			nvkm_mask(device, src->addr, mask, value);
+			nvkm_debug(subdev,
+				   "enabled source %08x %08x %08x\n",
+				   src->addr, mask, value);
 		}
-
-		return NULL;
 	}
-
-	return nvkm_perfsig_find_(dom, name, size);
+	return 0;
 }
 
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *ppm, const char *name,
-		  struct nvkm_perfdom **pdom)
+static int
+nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
 {
+	struct nvkm_subdev *subdev = &pm->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_perfdom *dom = NULL;
 	struct nvkm_perfsig *sig;
-	struct nvkm_perfctr *ctr;
+	struct nvkm_perfsrc *src;
+	u32 mask;
+	int i, j;
 
-	sig = nvkm_perfsig_find(ppm, name, strlen(name), pdom);
-	if (!sig)
-		return NULL;
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 8 && ctr->source[i][j]; j++) {
+			sig = nvkm_perfsig_find(pm, ctr->domain,
+						ctr->signal[i], &dom);
+			if (!sig)
+				return -EINVAL;
 
-	ctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
-	if (ctr) {
-		ctr->signal[0] = sig;
-		ctr->logic_op = 0xaaaa;
+			src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
+			if (!src)
+				return -EINVAL;
+
+			/* unset enable bit if needed */
+			mask = 0x00000000;
+			if (src->enable)
+				mask = 0x80000000;
+			mask |= (src->mask << src->shift);
+
+			/* disable the source */
+			nvkm_mask(device, src->addr, mask, 0);
+			nvkm_debug(subdev, "disabled source %08x %08x\n",
+				   src->addr, mask);
+		}
 	}
-
-	return ctr;
+	return 0;
 }
 
 /*******************************************************************************
- * Perfmon object classes
+ * Perfdom object classes
  ******************************************************************************/
 static int
-nvkm_perfctr_query(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_query_v0 v0;
+		struct nvif_perfdom_init none;
 	} *args = data;
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfdom *dom = NULL, *chk;
-	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
-	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
-	const char *name;
-	int tmp = 0, di, si;
-	int ret;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret, i;
 
-	nv_ioctl(object, "perfctr query size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "perfctr query vers %d iter %08x\n",
-			 args->v0.version, args->v0.iter);
-		di = (args->v0.iter & 0xff000000) >> 24;
-		si = (args->v0.iter & 0x00ffffff) - 1;
-	} else
-		return ret;
-
-	list_for_each_entry(chk, &ppm->domains, head) {
-		if (tmp++ == di) {
-			dom = chk;
-			break;
-		}
-	}
-
-	if (dom == NULL || si >= (int)dom->signal_nr)
-		return -EINVAL;
-
-	if (si >= 0) {
-		if (raw || !(name = dom->signal[si].name)) {
-			snprintf(args->v0.name, sizeof(args->v0.name),
-				 "/%s/%02x", dom->name, si);
-		} else {
-			strncpy(args->v0.name, name, sizeof(args->v0.name));
-		}
-	}
-
-	do {
-		while (++si < dom->signal_nr) {
-			if (all || dom->signal[si].name) {
-				args->v0.iter = (di << 24) | ++si;
-				return 0;
-			}
-		}
-		si = -1;
-		di = di + 1;
-		dom = list_entry(dom->head.next, typeof(*dom), head);
-	} while (&dom->head != &ppm->domains);
-
-	args->v0.iter = 0xffffffff;
-	return 0;
-}
-
-static int
-nvkm_perfctr_sample(struct nvkm_object *object, void *data, u32 size)
-{
-	union {
-		struct nvif_perfctr_sample none;
-	} *args = data;
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfctr *ctr, *tmp;
-	struct nvkm_perfdom *dom;
-	int ret;
-
-	nv_ioctl(object, "perfctr sample size %d\n", size);
+	nvif_ioctl(object, "perfdom init size %d\n", size);
 	if (nvif_unvers(args->none)) {
-		nv_ioctl(object, "perfctr sample\n");
+		nvif_ioctl(object, "perfdom init\n");
 	} else
 		return ret;
-	ppm->sequence++;
 
-	list_for_each_entry(dom, &ppm->domains, head) {
-		/* sample previous batch of counters */
-		if (dom->quad != QUAD_MASK) {
-			dom->func->next(ppm, dom);
-			tmp = NULL;
-			while (!list_empty(&dom->list)) {
-				ctr = list_first_entry(&dom->list,
-						       typeof(*ctr), head);
-				if (ctr->slot < 0) break;
-				if ( tmp && tmp == ctr) break;
-				if (!tmp) tmp = ctr;
-				dom->func->read(ppm, dom, ctr);
-				ctr->slot  = -1;
-				list_move_tail(&ctr->head, &dom->list);
-			}
+	for (i = 0; i < 4; i++) {
+		if (dom->ctr[i]) {
+			dom->func->init(pm, dom, dom->ctr[i]);
+
+			/* enable sources */
+			nvkm_perfsrc_enable(pm, dom->ctr[i]);
 		}
-
-		dom->quad = QUAD_MASK;
-
-		/* setup next batch of counters for sampling */
-		list_for_each_entry(ctr, &dom->list, head) {
-			ctr->slot = ffs(dom->quad) - 1;
-			if (ctr->slot < 0)
-				break;
-			dom->quad &= ~(QUAD_FREE << ctr->slot);
-			dom->func->init(ppm, dom, ctr);
-		}
-
-		if (dom->quad != QUAD_MASK)
-			dom->func->next(ppm, dom);
 	}
 
+	/* start next batch of counters for sampling */
+	dom->func->next(pm, dom);
+	return 0;
+}
+
+static int
+nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
+{
+	union {
+		struct nvif_perfdom_sample none;
+	} *args = data;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret;
+
+	nvif_ioctl(object, "perfdom sample size %d\n", size);
+	if (nvif_unvers(args->none)) {
+		nvif_ioctl(object, "perfdom sample\n");
+	} else
+		return ret;
+	pm->sequence++;
+
+	/* sample previous batch of counters */
+	list_for_each_entry(dom, &pm->domains, head)
+		dom->func->next(pm, dom);
+
 	return 0;
 }
 
 static int
-nvkm_perfctr_read(struct nvkm_object *object, void *data, u32 size)
+nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
 {
 	union {
-		struct nvif_perfctr_read_v0 v0;
+		struct nvif_perfdom_read_v0 v0;
 	} *args = data;
-	struct nvkm_perfctr *ctr = (void *)object;
-	int ret;
+	struct nvkm_object *object = &dom->object;
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int ret, i;
 
-	nv_ioctl(object, "perfctr read size %d\n", size);
+	nvif_ioctl(object, "perfdom read size %d\n", size);
 	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
+		nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
 	} else
 		return ret;
 
-	if (!ctr->clk)
+	for (i = 0; i < 4; i++) {
+		if (dom->ctr[i])
+			dom->func->read(pm, dom, dom->ctr[i]);
+	}
+
+	if (!dom->clk)
 		return -EAGAIN;
 
-	args->v0.clk = ctr->clk;
-	args->v0.ctr = ctr->ctr;
+	for (i = 0; i < 4; i++)
+		if (dom->ctr[i])
+			args->v0.ctr[i] = dom->ctr[i]->ctr;
+	args->v0.clk = dom->clk;
 	return 0;
 }
 
 static int
-nvkm_perfctr_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
 {
+	struct nvkm_perfdom *dom = nvkm_perfdom(object);
 	switch (mthd) {
-	case NVIF_PERFCTR_V0_QUERY:
-		return nvkm_perfctr_query(object, data, size);
-	case NVIF_PERFCTR_V0_SAMPLE:
-		return nvkm_perfctr_sample(object, data, size);
-	case NVIF_PERFCTR_V0_READ:
-		return nvkm_perfctr_read(object, data, size);
+	case NVIF_PERFDOM_V0_INIT:
+		return nvkm_perfdom_init(dom, data, size);
+	case NVIF_PERFDOM_V0_SAMPLE:
+		return nvkm_perfdom_sample(dom, data, size);
+	case NVIF_PERFDOM_V0_READ:
+		return nvkm_perfdom_read(dom, data, size);
 	default:
 		break;
 	}
 	return -EINVAL;
 }
 
-static void
-nvkm_perfctr_dtor(struct nvkm_object *object)
+static void *
+nvkm_perfdom_dtor(struct nvkm_object *object)
 {
-	struct nvkm_perfctr *ctr = (void *)object;
-	if (ctr->head.next)
-		list_del(&ctr->head);
-	nvkm_object_destroy(&ctr->base);
+	struct nvkm_perfdom *dom = nvkm_perfdom(object);
+	struct nvkm_pm *pm = dom->perfmon->pm;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		struct nvkm_perfctr *ctr = dom->ctr[i];
+		if (ctr) {
+			nvkm_perfsrc_disable(pm, ctr);
+			if (ctr->head.next)
+				list_del(&ctr->head);
+		}
+		kfree(ctr);
+	}
+
+	return dom;
 }
 
 static int
-nvkm_perfctr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
+		 struct nvkm_perfsig *signal[4], u64 source[4][8],
+		 u16 logic_op, struct nvkm_perfctr **pctr)
 {
-	union {
-		struct nvif_perfctr_v0 v0;
-	} *args = data;
-	struct nvkm_pm *ppm = (void *)engine;
-	struct nvkm_perfdom *dom = NULL;
-	struct nvkm_perfsig *sig[4] = {};
 	struct nvkm_perfctr *ctr;
-	int ret, i;
+	int i, j;
 
-	nv_ioctl(parent, "create perfctr size %d\n", size);
-	if (nvif_unpack(args->v0, 0, 0, false)) {
-		nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
-			 args->v0.version, args->v0.logic_op);
-	} else
-		return ret;
+	if (!dom)
+		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
-		sig[i] = nvkm_perfsig_find(ppm, args->v0.name[i],
-					   strnlen(args->v0.name[i],
-						   sizeof(args->v0.name[i])),
-					   &dom);
-		if (!sig[i])
-			return -EINVAL;
+	ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
+	if (!ctr)
+		return -ENOMEM;
+
+	ctr->domain   = domain;
+	ctr->logic_op = logic_op;
+	ctr->slot     = slot;
+	for (i = 0; i < 4; i++) {
+		if (signal[i]) {
+			ctr->signal[i] = signal[i] - dom->signal;
+			for (j = 0; j < 8; j++)
+				ctr->source[i][j] = source[i][j];
+		}
 	}
+	list_add_tail(&ctr->head, &dom->list);
 
-	ret = nvkm_object_create(parent, engine, oclass, 0, &ctr);
-	*pobject = nv_object(ctr);
-	if (ret)
-		return ret;
-
-	ctr->slot = -1;
-	ctr->logic_op = args->v0.logic_op;
-	ctr->signal[0] = sig[0];
-	ctr->signal[1] = sig[1];
-	ctr->signal[2] = sig[2];
-	ctr->signal[3] = sig[3];
-	if (dom)
-		list_add_tail(&ctr->head, &dom->list);
 	return 0;
 }
 
-static struct nvkm_ofuncs
-nvkm_perfctr_ofuncs = {
-	.ctor = nvkm_perfctr_ctor,
-	.dtor = nvkm_perfctr_dtor,
-	.init = nvkm_object_init,
-	.fini = nvkm_object_fini,
-	.mthd = nvkm_perfctr_mthd,
+static const struct nvkm_object_func
+nvkm_perfdom = {
+	.dtor = nvkm_perfdom_dtor,
+	.mthd = nvkm_perfdom_mthd,
 };
 
-struct nvkm_oclass
-nvkm_pm_sclass[] = {
-	{ .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
-	  .ofuncs = &nvkm_perfctr_ofuncs,
-	},
-	{},
-};
+static int
+nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
+		  const struct nvkm_oclass *oclass, void *data, u32 size,
+		  struct nvkm_object **pobject)
+{
+	union {
+		struct nvif_perfdom_v0 v0;
+	} *args = data;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_object *parent = oclass->parent;
+	struct nvkm_perfdom *sdom = NULL;
+	struct nvkm_perfctr *ctr[4] = {};
+	struct nvkm_perfdom *dom;
+	int c, s, m;
+	int ret;
+
+	nvif_ioctl(parent, "create perfdom size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
+			   args->v0.version, args->v0.domain, args->v0.mode);
+	} else
+		return ret;
+
+	for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
+		struct nvkm_perfsig *sig[4] = {};
+		u64 src[4][8] = {};
+
+		for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
+			sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
+						   args->v0.ctr[c].signal[s],
+						   &sdom);
+			if (args->v0.ctr[c].signal[s] && !sig[s])
+				return -EINVAL;
+
+			for (m = 0; m < 8; m++) {
+				src[s][m] = args->v0.ctr[c].source[s][m];
+				if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
+							            src[s][m]))
+					return -EINVAL;
+			}
+		}
+
+		ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
+				       args->v0.ctr[c].logic_op, &ctr[c]);
+		if (ret)
+			return ret;
+	}
+
+	if (!sdom)
+		return -EINVAL;
+
+	if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
+	dom->perfmon = perfmon;
+	*pobject = &dom->object;
+
+	dom->func = sdom->func;
+	dom->addr = sdom->addr;
+	dom->mode = args->v0.mode;
+	for (c = 0; c < ARRAY_SIZE(ctr); c++)
+		dom->ctr[c] = ctr[c];
+	return 0;
+}
 
 /*******************************************************************************
- * PPM context
+ * Perfmon object classes
  ******************************************************************************/
-static void
-nvkm_perfctx_dtor(struct nvkm_object *object)
+static int
+nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
-	struct nvkm_pm *ppm = (void *)object->engine;
-	struct nvkm_perfctx *ctx = (void *)object;
+	union {
+		struct nvif_perfmon_query_domain_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_perfdom *dom;
+	u8 domain_nr;
+	int di, ret;
 
-	mutex_lock(&nv_subdev(ppm)->mutex);
-	nvkm_engctx_destroy(&ctx->base);
-	if (ppm->context == ctx)
-		ppm->context = NULL;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
+	nvif_ioctl(object, "perfmon query domain size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
+			   args->v0.version, args->v0.iter);
+		di = (args->v0.iter & 0xff) - 1;
+	} else
+		return ret;
+
+	domain_nr = nvkm_pm_count_perfdom(pm);
+	if (di >= (int)domain_nr)
+		return -EINVAL;
+
+	if (di >= 0) {
+		dom = nvkm_perfdom_find(pm, di);
+		if (dom == NULL)
+			return -EINVAL;
+
+		args->v0.id         = di;
+		args->v0.signal_nr  = nvkm_perfdom_count_perfsig(dom);
+		strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
+
+		/* Currently only global counters (PCOUNTER) are implemented
+		 * but this will be different for local counters (MP). */
+		args->v0.counter_nr = 4;
+	}
+
+	if (++di < domain_nr) {
+		args->v0.iter = ++di;
+		return 0;
+	}
+
+	args->v0.iter = 0xff;
+	return 0;
 }
 
 static int
-nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
 {
-	struct nvkm_pm *ppm = (void *)engine;
-	struct nvkm_perfctx *ctx;
-	int ret;
+	union {
+		struct nvif_perfmon_query_signal_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_device *device = pm->engine.subdev.device;
+	struct nvkm_perfdom *dom;
+	struct nvkm_perfsig *sig;
+	const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
+	const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
+	int ret, si;
 
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0, 0, 0, &ctx);
-	*pobject = nv_object(ctx);
-	if (ret)
+	nvif_ioctl(object, "perfmon query signal size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object,
+			   "perfmon query signal vers %d dom %d iter %04x\n",
+			   args->v0.version, args->v0.domain, args->v0.iter);
+		si = (args->v0.iter & 0xffff) - 1;
+	} else
 		return ret;
 
-	mutex_lock(&nv_subdev(ppm)->mutex);
-	if (ppm->context == NULL)
-		ppm->context = ctx;
-	if (ctx != ppm->context)
-		ret = -EBUSY;
-	mutex_unlock(&nv_subdev(ppm)->mutex);
+	dom = nvkm_perfdom_find(pm, args->v0.domain);
+	if (dom == NULL || si >= (int)dom->signal_nr)
+		return -EINVAL;
 
-	return ret;
+	if (si >= 0) {
+		sig = &dom->signal[si];
+		if (raw || !sig->name) {
+			snprintf(args->v0.name, sizeof(args->v0.name),
+				 "/%s/%02x", dom->name, si);
+		} else {
+			strncpy(args->v0.name, sig->name,
+				sizeof(args->v0.name));
+		}
+
+		args->v0.signal = si;
+		args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
+	}
+
+	while (++si < dom->signal_nr) {
+		if (all || dom->signal[si].name) {
+			args->v0.iter = ++si;
+			return 0;
+		}
+	}
+
+	args->v0.iter = 0xffff;
+	return 0;
 }
 
-struct nvkm_oclass
-nvkm_pm_cclass = {
-	.handle = NV_ENGCTX(PM, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_perfctx_ctor,
-		.dtor = nvkm_perfctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-	},
+static int
+nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
+			       void *data, u32 size)
+{
+	union {
+		struct nvif_perfmon_query_source_v0 v0;
+	} *args = data;
+	struct nvkm_object *object = &perfmon->object;
+	struct nvkm_pm *pm = perfmon->pm;
+	struct nvkm_perfdom *dom = NULL;
+	struct nvkm_perfsig *sig;
+	struct nvkm_perfsrc *src;
+	u8 source_nr = 0;
+	int si, ret;
+
+	nvif_ioctl(object, "perfmon query source size %d\n", size);
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		nvif_ioctl(object,
+			   "perfmon source vers %d dom %d sig %02x iter %02x\n",
+			   args->v0.version, args->v0.domain, args->v0.signal,
+			   args->v0.iter);
+		si = (args->v0.iter & 0xff) - 1;
+	} else
+		return ret;
+
+	sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
+	if (!sig)
+		return -EINVAL;
+
+	source_nr = nvkm_perfsig_count_perfsrc(sig);
+	if (si >= (int)source_nr)
+		return -EINVAL;
+
+	if (si >= 0) {
+		src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
+		if (!src)
+			return -EINVAL;
+
+		args->v0.source = sig->source[si];
+		args->v0.mask   = src->mask;
+		strncpy(args->v0.name, src->name, sizeof(args->v0.name));
+	}
+
+	if (++si < source_nr) {
+		args->v0.iter = ++si;
+		return 0;
+	}
+
+	args->v0.iter = 0xff;
+	return 0;
+}
+
+static int
+nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+	switch (mthd) {
+	case NVIF_PERFMON_V0_QUERY_DOMAIN:
+		return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
+	case NVIF_PERFMON_V0_QUERY_SIGNAL:
+		return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
+	case NVIF_PERFMON_V0_QUERY_SOURCE:
+		return nvkm_perfmon_mthd_query_source(perfmon, data, size);
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int
+nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		       struct nvkm_object **pobject)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
+	return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
+}
+
+static int
+nvkm_perfmon_child_get(struct nvkm_object *object, int index,
+		       struct nvkm_oclass *oclass)
+{
+	if (index == 0) {
+		oclass->base.oclass = NVIF_IOCTL_NEW_V0_PERFDOM;
+		oclass->base.minver = 0;
+		oclass->base.maxver = 0;
+		oclass->ctor = nvkm_perfmon_child_new;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void *
+nvkm_perfmon_dtor(struct nvkm_object *object)
+{
+	struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
+	struct nvkm_pm *pm = perfmon->pm;
+	mutex_lock(&pm->engine.subdev.mutex);
+	if (pm->perfmon == &perfmon->object)
+		pm->perfmon = NULL;
+	mutex_unlock(&pm->engine.subdev.mutex);
+	return perfmon;
+}
+
+static struct nvkm_object_func
+nvkm_perfmon = {
+	.dtor = nvkm_perfmon_dtor,
+	.mthd = nvkm_perfmon_mthd,
+	.sclass = nvkm_perfmon_child_get,
 };
 
+static int
+nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
+		 void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_perfmon *perfmon;
+
+	if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
+	perfmon->pm = pm;
+	*pobject = &perfmon->object;
+	return 0;
+}
+
 /*******************************************************************************
  * PPM engine/subdev functions
  ******************************************************************************/
+
+static int
+nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+		   void *data, u32 size, struct nvkm_object **pobject)
+{
+	struct nvkm_pm *pm = nvkm_pm(oclass->engine);
+	int ret;
+
+	ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
+	if (ret)
+		return ret;
+
+	mutex_lock(&pm->engine.subdev.mutex);
+	if (pm->perfmon == NULL)
+		pm->perfmon = *pobject;
+	ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
+	mutex_unlock(&pm->engine.subdev.mutex);
+	return ret;
+}
+
+static const struct nvkm_device_oclass
+nvkm_pm_oclass = {
+	.base.oclass = NVIF_IOCTL_NEW_V0_PERFMON,
+	.base.minver = -1,
+	.base.maxver = -1,
+	.ctor = nvkm_pm_oclass_new,
+};
+
+static int
+nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
+		   const struct nvkm_device_oclass **class)
+{
+	if (index == 0) {
+		oclass->base = nvkm_pm_oclass.base;
+		*class = &nvkm_pm_oclass;
+		return index;
+	}
+	return 1;
+}
+
 int
-nvkm_perfdom_new(struct nvkm_pm *ppm, const char *name, u32 mask,
+nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
+		 const struct nvkm_specsrc *spec)
+{
+	const struct nvkm_specsrc *ssrc;
+	const struct nvkm_specmux *smux;
+	struct nvkm_perfsrc *src;
+	u8 source_nr = 0;
+
+	if (!spec) {
+		/* No sources are defined for this signal. */
+		return 0;
+	}
+
+	ssrc = spec;
+	while (ssrc->name) {
+		smux = ssrc->mux;
+		while (smux->name) {
+			bool found = false;
+			u8 source_id = 0;
+			u32 len;
+
+			list_for_each_entry(src, &pm->sources, head) {
+				if (src->addr == ssrc->addr &&
+				    src->shift == smux->shift) {
+					found = true;
+					break;
+				}
+				source_id++;
+			}
+
+			if (!found) {
+				src = kzalloc(sizeof(*src), GFP_KERNEL);
+				if (!src)
+					return -ENOMEM;
+
+				src->addr   = ssrc->addr;
+				src->mask   = smux->mask;
+				src->shift  = smux->shift;
+				src->enable = smux->enable;
+
+				len = strlen(ssrc->name) +
+				      strlen(smux->name) + 2;
+				src->name = kzalloc(len, GFP_KERNEL);
+				if (!src->name) {
+					kfree(src);
+					return -ENOMEM;
+				}
+				snprintf(src->name, len, "%s_%s", ssrc->name,
+					 smux->name);
+
+				list_add_tail(&src->head, &pm->sources);
+			}
+
+			sig->source[source_nr++] = source_id + 1;
+			smux++;
+		}
+		ssrc++;
+	}
+
+	return 0;
+}
+
+int
+nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
 		 u32 base, u32 size_unit, u32 size_domain,
 		 const struct nvkm_specdom *spec)
 {
 	const struct nvkm_specdom *sdom;
 	const struct nvkm_specsig *ssig;
 	struct nvkm_perfdom *dom;
-	int i;
+	int ret, i;
 
 	for (i = 0; i == 0 || mask; i++) {
 		u32 addr = base + (i * size_unit);
@@ -410,16 +791,20 @@
 					 "%s/%02x", name, (int)(sdom - spec));
 			}
 
-			list_add_tail(&dom->head, &ppm->domains);
+			list_add_tail(&dom->head, &pm->domains);
 			INIT_LIST_HEAD(&dom->list);
 			dom->func = sdom->func;
 			dom->addr = addr;
-			dom->quad = QUAD_MASK;
 			dom->signal_nr = sdom->signal_nr;
 
 			ssig = (sdom++)->signal;
 			while (ssig->name) {
-				dom->signal[ssig->signal].name = ssig->name;
+				struct nvkm_perfsig *sig =
+					&dom->signal[ssig->signal];
+				sig->name = ssig->name;
+				ret = nvkm_perfsrc_new(pm, sig, ssig->source);
+				if (ret)
+					return ret;
 				ssig++;
 			}
 
@@ -432,47 +817,49 @@
 	return 0;
 }
 
-int
-_nvkm_pm_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_pm *ppm = (void *)object;
-	return nvkm_engine_fini(&ppm->base, suspend);
+	struct nvkm_pm *pm = nvkm_pm(engine);
+	if (pm->func->fini)
+		pm->func->fini(pm);
+	return 0;
 }
 
-int
-_nvkm_pm_init(struct nvkm_object *object)
+static void *
+nvkm_pm_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_pm *ppm = (void *)object;
-	return nvkm_engine_init(&ppm->base);
-}
+	struct nvkm_pm *pm = nvkm_pm(engine);
+	struct nvkm_perfdom *dom, *next_dom;
+	struct nvkm_perfsrc *src, *next_src;
 
-void
-_nvkm_pm_dtor(struct nvkm_object *object)
-{
-	struct nvkm_pm *ppm = (void *)object;
-	struct nvkm_perfdom *dom, *tmp;
-
-	list_for_each_entry_safe(dom, tmp, &ppm->domains, head) {
+	list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
 		list_del(&dom->head);
 		kfree(dom);
 	}
 
-	nvkm_engine_destroy(&ppm->base);
+	list_for_each_entry_safe(src, next_src, &pm->sources, head) {
+		list_del(&src->head);
+		kfree(src->name);
+		kfree(src);
+	}
+
+	return pm;
 }
 
+static const struct nvkm_engine_func
+nvkm_pm = {
+	.dtor = nvkm_pm_dtor,
+	.fini = nvkm_pm_fini,
+	.base.sclass = nvkm_pm_oclass_get,
+};
+
 int
-nvkm_pm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_pm *pm)
 {
-	struct nvkm_pm *ppm;
-	int ret;
-
-	ret = nvkm_engine_create_(parent, engine, oclass, true, "PPM",
-				  "pm", length, pobject);
-	ppm = *pobject;
-	if (ret)
-		return ret;
-
-	INIT_LIST_HEAD(&ppm->domains);
-	return 0;
+	pm->func = func;
+	INIT_LIST_HEAD(&pm->domains);
+	INIT_LIST_HEAD(&pm->sources);
+	return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
deleted file mode 100644
index a7a5f3a..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/daemon.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-static void
-pwr_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
-		 struct nvkm_perfctr *ctr)
-{
-	u32 mask = 0x00000000;
-	u32 ctrl = 0x00000001;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ctr->signal) && ctr->signal[i]; i++)
-		mask |= 1 << (ctr->signal[i] - dom->signal);
-
-	nv_wr32(ppm, 0x10a504 + (ctr->slot * 0x10), mask);
-	nv_wr32(ppm, 0x10a50c + (ctr->slot * 0x10), ctrl);
-	nv_wr32(ppm, 0x10a50c + (ppm->last * 0x10), 0x00000003);
-}
-
-static void
-pwr_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
-		 struct nvkm_perfctr *ctr)
-{
-	ctr->ctr = ppm->pwr[ctr->slot];
-	ctr->clk = ppm->pwr[ppm->last];
-}
-
-static void
-pwr_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
-{
-	int i;
-
-	for (i = 0; i <= ppm->last; i++) {
-		ppm->pwr[i] = nv_rd32(ppm, 0x10a508 + (i * 0x10));
-		nv_wr32(ppm, 0x10a508 + (i * 0x10), 0x80000000);
-	}
-}
-
-static const struct nvkm_funcdom
-pwr_perfctr_func = {
-	.init = pwr_perfctr_init,
-	.read = pwr_perfctr_read,
-	.next = pwr_perfctr_next,
-};
-
-const struct nvkm_specdom
-gt215_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
-
-const struct nvkm_specdom
-gf100_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{ 0x14, "pwr_ce1_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
-
-const struct nvkm_specdom
-gk104_pm_pwr[] = {
-	{ 0x20, (const struct nvkm_specsig[]) {
-			{ 0x00, "pwr_gr_idle" },
-			{ 0x04, "pwr_bsp_idle" },
-			{ 0x05, "pwr_vp_idle" },
-			{ 0x06, "pwr_ppp_idle" },
-			{ 0x13, "pwr_ce0_idle" },
-			{ 0x14, "pwr_ce1_idle" },
-			{ 0x15, "pwr_ce2_idle" },
-			{}
-		}, &pwr_perfctr_func },
-	{}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
index d54c670..6e441dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/g84.c
@@ -23,15 +23,121 @@
  */
 #include "nv40.h"
 
+const struct nvkm_specsrc
+g84_vfetch_sources[] = {
+	{ 0x400c0c, (const struct nvkm_specmux[]) {
+			{ 0x3, 0, "unk0" },
+			{}
+		}, "pgraph_vfetch_unk0c" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_prop_sources[] = {
+	{ 0x408e50, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc0_prop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_crop_sources[] = {
+	{ 0x407008, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_crop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+g84_tex_sources[] = {
+	{ 0x408808, (const struct nvkm_specmux[]) {
+			{ 0xfffff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc0_tex_unk08" },
+	{}
+};
+
 static const struct nvkm_specdom
 g84_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xbd, "pc01_gr_idle" },
+			{ 0x5e, "pc01_strmout_00" },
+			{ 0x5f, "pc01_strmout_01" },
+			{ 0xd2, "pc01_trast_00" },
+			{ 0xd3, "pc01_trast_01" },
+			{ 0xd4, "pc01_trast_02" },
+			{ 0xd5, "pc01_trast_03" },
+			{ 0xd8, "pc01_trast_04" },
+			{ 0xd9, "pc01_trast_05" },
+			{ 0x5c, "pc01_vattr_00" },
+			{ 0x5d, "pc01_vattr_01" },
+			{ 0x66, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x67, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x68, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x69, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x6a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x6b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x6c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x6d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x6e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x6f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x70, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x71, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x72, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x73, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x74, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x75, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x76, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x77, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x78, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x79, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x7a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x7b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x7c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x7d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x7e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x7f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+			{ 0xa4, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xa0, (const struct nvkm_specsig[]) {
+			{ 0x30, "pc02_crop_00", g84_crop_sources },
+			{ 0x31, "pc02_crop_01", g84_crop_sources },
+			{ 0x32, "pc02_crop_02", g84_crop_sources },
+			{ 0x33, "pc02_crop_03", g84_crop_sources },
+			{ 0x00, "pc02_prop_00", g84_prop_sources },
+			{ 0x01, "pc02_prop_01", g84_prop_sources },
+			{ 0x02, "pc02_prop_02", g84_prop_sources },
+			{ 0x03, "pc02_prop_03", g84_prop_sources },
+			{ 0x04, "pc02_prop_04", g84_prop_sources },
+			{ 0x05, "pc02_prop_05", g84_prop_sources },
+			{ 0x06, "pc02_prop_06", g84_prop_sources },
+			{ 0x07, "pc02_prop_07", g84_prop_sources },
+			{ 0x48, "pc02_tex_00", g84_tex_sources },
+			{ 0x49, "pc02_tex_01", g84_tex_sources },
+			{ 0x4a, "pc02_tex_02", g84_tex_sources },
+			{ 0x4b, "pc02_tex_03", g84_tex_sources },
+			{ 0x1a, "pc02_tex_04", g84_tex_sources },
+			{ 0x1b, "pc02_tex_05", g84_tex_sources },
+			{ 0x1c, "pc02_tex_06", g84_tex_sources },
+			{ 0x44, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x45, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x46, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x47, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0x8c, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,14 +158,8 @@
 	{}
 };
 
-struct nvkm_oclass *
-g84_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = g84_pm,
-}.base;
+int
+g84_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(g84_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
index 008fed7..d2901e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.c
@@ -23,62 +23,146 @@
  */
 #include "gf100.h"
 
+const struct nvkm_specsrc
+gf100_pbfb_sources[] = {
+	{ 0x10f100, (const struct nvkm_specmux[]) {
+			{ 0x1, 0, "unk0" },
+			{ 0x3f, 4, "unk4" },
+			{}
+		}, "pbfb_broadcast_pm_unk100" },
+	{}
+};
+
+const struct nvkm_specsrc
+gf100_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 29, "unk29" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_l1_sources[] = {
+	{ 0x5044a8, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_l1_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_tex_sources[] = {
+	{ 0x5042c0, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 8, "sel1", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gf100_unk400_sources[] = {
+	{ 0x50440c, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_unk400_pm_mux" },
+	{}
+};
+
 static const struct nvkm_specdom
 gf100_pm_hub[] = {
 	{}
 };
 
-static const struct nvkm_specdom
+const struct nvkm_specdom
 gf100_pm_gpc[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x00, "gpc00_l1_00", gf100_l1_sources },
+			{ 0x01, "gpc00_l1_01", gf100_l1_sources },
+			{ 0x02, "gpc00_l1_02", gf100_l1_sources },
+			{ 0x03, "gpc00_l1_03", gf100_l1_sources },
+			{ 0x05, "gpc00_l1_04", gf100_l1_sources },
+			{ 0x06, "gpc00_l1_05", gf100_l1_sources },
+			{ 0x0a, "gpc00_tex_00", gf100_tex_sources },
+			{ 0x0b, "gpc00_tex_01", gf100_tex_sources },
+			{ 0x0c, "gpc00_tex_02", gf100_tex_sources },
+			{ 0x0d, "gpc00_tex_03", gf100_tex_sources },
+			{ 0x0e, "gpc00_tex_04", gf100_tex_sources },
+			{ 0x0f, "gpc00_tex_05", gf100_tex_sources },
+			{ 0x10, "gpc00_tex_06", gf100_tex_sources },
+			{ 0x11, "gpc00_tex_07", gf100_tex_sources },
+			{ 0x12, "gpc00_tex_08", gf100_tex_sources },
+			{ 0x26, "gpc00_unk400_00", gf100_unk400_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
-static const struct nvkm_specdom
+const struct nvkm_specdom
 gf100_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x0f, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x10, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x21, "part00_pmfb_00", gf100_pmfb_sources },
+			{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+			{ 0x00, "part00_pmfb_02", gf100_pmfb_sources },
+			{ 0x02, "part00_pmfb_03", gf100_pmfb_sources },
+			{ 0x01, "part00_pmfb_04", gf100_pmfb_sources },
+			{ 0x2e, "part00_pmfb_05", gf100_pmfb_sources },
+			{ 0x2f, "part00_pmfb_06", gf100_pmfb_sources },
+			{ 0x1b, "part00_pmfb_07", gf100_pmfb_sources },
+			{ 0x1c, "part00_pmfb_08", gf100_pmfb_sources },
+			{ 0x1d, "part00_pmfb_09", gf100_pmfb_sources },
+			{ 0x1e, "part00_pmfb_0a", gf100_pmfb_sources },
+			{ 0x1f, "part00_pmfb_0b", gf100_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
 static void
-gf100_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		   struct nvkm_perfctr *ctr)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	struct gf100_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 	u32 log = ctr->logic_op;
 	u32 src = 0x00000000;
 	int i;
 
-	for (i = 0; i < 4 && ctr->signal[i]; i++)
-		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+	for (i = 0; i < 4; i++)
+		src |= ctr->signal[i] << (i * 8);
 
-	nv_wr32(priv, dom->addr + 0x09c, 0x00040002);
-	nv_wr32(priv, dom->addr + 0x100, 0x00000000);
-	nv_wr32(priv, dom->addr + 0x040 + (cntr->base.slot * 0x08), src);
-	nv_wr32(priv, dom->addr + 0x044 + (cntr->base.slot * 0x08), log);
+	nvkm_wr32(device, dom->addr + 0x09c, 0x00040002 | (dom->mode << 3));
+	nvkm_wr32(device, dom->addr + 0x100, 0x00000000);
+	nvkm_wr32(device, dom->addr + 0x040 + (ctr->slot * 0x08), src);
+	nvkm_wr32(device, dom->addr + 0x044 + (ctr->slot * 0x08), log);
 }
 
 static void
-gf100_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+gf100_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		   struct nvkm_perfctr *ctr)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	struct gf100_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 
-	switch (cntr->base.slot) {
-	case 0: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x08c); break;
-	case 1: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x088); break;
-	case 2: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x080); break;
-	case 3: cntr->base.ctr = nv_rd32(priv, dom->addr + 0x090); break;
+	switch (ctr->slot) {
+	case 0: ctr->ctr = nvkm_rd32(device, dom->addr + 0x08c); break;
+	case 1: ctr->ctr = nvkm_rd32(device, dom->addr + 0x088); break;
+	case 2: ctr->ctr = nvkm_rd32(device, dom->addr + 0x080); break;
+	case 3: ctr->ctr = nvkm_rd32(device, dom->addr + 0x090); break;
 	}
-	cntr->base.clk = nv_rd32(priv, dom->addr + 0x070);
+	dom->clk = nvkm_rd32(device, dom->addr + 0x070);
 }
 
 static void
-gf100_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+gf100_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
-	struct gf100_pm_priv *priv = (void *)ppm;
-	nv_wr32(priv, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
-	nv_wr32(priv, dom->addr + 0x0ec, 0x00000011);
+	struct nvkm_device *device = pm->engine.subdev.device;
+	nvkm_wr32(device, dom->addr + 0x06c, dom->signal_nr - 0x40 + 0x27);
+	nvkm_wr32(device, dom->addr + 0x0ec, 0x00000011);
 }
 
 const struct nvkm_funcdom
@@ -88,72 +172,72 @@
 	.next = gf100_perfctr_next,
 };
 
-int
-gf100_pm_fini(struct nvkm_object *object, bool suspend)
+static void
+gf100_pm_fini(struct nvkm_pm *pm)
 {
-	struct gf100_pm_priv *priv = (void *)object;
-	nv_mask(priv, 0x000200, 0x10000000, 0x00000000);
-	nv_mask(priv, 0x000200, 0x10000000, 0x10000000);
-	return nvkm_pm_fini(&priv->base, suspend);
+	struct nvkm_device *device = pm->engine.subdev.device;
+	nvkm_mask(device, 0x000200, 0x10000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x10000000, 0x10000000);
 }
 
-static int
-gf100_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+static const struct nvkm_pm_func
+gf100_pm_ = {
+	.fini = gf100_pm_fini,
+};
+
+int
+gf100_pm_new_(const struct gf100_pm_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pm **ppm)
 {
-	struct gf100_pm_priv *priv;
+	struct nvkm_pm *pm;
 	u32 mask;
 	int ret;
 
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(pm = *ppm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gf100_pm_pwr);
+	ret = nvkm_pm_ctor(&gf100_pm_, device, index, pm);
 	if (ret)
 		return ret;
 
 	/* HUB */
-	ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
-			       gf100_pm_hub);
+	ret = nvkm_perfdom_new(pm, "hub", 0, 0x1b0000, 0, 0x200,
+			       func->doms_hub);
 	if (ret)
 		return ret;
 
 	/* GPC */
-	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
-	mask &= ~nv_rd32(priv, 0x022504);
-	mask &= ~nv_rd32(priv, 0x022584);
+	mask  = (1 << nvkm_rd32(device, 0x022430)) - 1;
+	mask &= ~nvkm_rd32(device, 0x022504);
+	mask &= ~nvkm_rd32(device, 0x022584);
 
-	ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
-			       0x1000, 0x200, gf100_pm_gpc);
+	ret = nvkm_perfdom_new(pm, "gpc", mask, 0x180000,
+			       0x1000, 0x200, func->doms_gpc);
 	if (ret)
 		return ret;
 
 	/* PART */
-	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
-	mask &= ~nv_rd32(priv, 0x022548);
-	mask &= ~nv_rd32(priv, 0x0225c8);
+	mask  = (1 << nvkm_rd32(device, 0x022438)) - 1;
+	mask &= ~nvkm_rd32(device, 0x022548);
+	mask &= ~nvkm_rd32(device, 0x0225c8);
 
-	ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
-			       0x1000, 0x200, gf100_pm_part);
+	ret = nvkm_perfdom_new(pm, "part", mask, 0x1a0000,
+			       0x1000, 0x200, func->doms_part);
 	if (ret)
 		return ret;
 
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	priv->base.last = 7;
 	return 0;
 }
 
-struct nvkm_oclass
-gf100_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
+static const struct gf100_pm_func
+gf100_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf100_pm_hub,
+	.doms_part = gf100_pm_part,
 };
+
+int
+gf100_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf100_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 6a01fc7..56d0344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -2,14 +2,18 @@
 #define __NVKM_PM_NVC0_H__
 #include "priv.h"
 
-struct gf100_pm_priv {
-	struct nvkm_pm base;
+struct gf100_pm_func {
+	const struct nvkm_specdom *doms_hub;
+	const struct nvkm_specdom *doms_gpc;
+	const struct nvkm_specdom *doms_part;
 };
 
-struct gf100_pm_cntr {
-	struct nvkm_perfctr base;
-};
+int gf100_pm_new_(const struct gf100_pm_func *, struct nvkm_device *,
+		  int index, struct nvkm_pm **);
 
 extern const struct nvkm_funcdom gf100_perfctr_func;
-int gf100_pm_fini(struct nvkm_object *, bool);
+extern const struct nvkm_specdom gf100_pm_gpc[];
+
+extern const struct nvkm_specsrc gf100_pbfb_sources[];
+extern const struct nvkm_specsrc gf100_pmfb_sources[];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
new file mode 100644
index 0000000..49b24c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf108.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specdom
+gf108_pm_hub[] = {
+	{}
+};
+
+static const struct nvkm_specdom
+gf108_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x14, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x15, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x20, "part00_pbfb_02", gf100_pbfb_sources },
+			{ 0x21, "part00_pbfb_03", gf100_pbfb_sources },
+			{ 0x01, "part00_pmfb_00", gf100_pmfb_sources },
+			{ 0x04, "part00_pmfb_01", gf100_pmfb_sources },
+			{ 0x05, "part00_pmfb_02", gf100_pmfb_sources},
+			{ 0x07, "part00_pmfb_03", gf100_pmfb_sources },
+			{ 0x0d, "part00_pmfb_04", gf100_pmfb_sources },
+			{ 0x12, "part00_pmfb_05", gf100_pmfb_sources },
+			{ 0x13, "part00_pmfb_06", gf100_pmfb_sources },
+			{ 0x2c, "part00_pmfb_07", gf100_pmfb_sources },
+			{ 0x2d, "part00_pmfb_08", gf100_pmfb_sources },
+			{ 0x2e, "part00_pmfb_09", gf100_pmfb_sources },
+			{ 0x2f, "part00_pmfb_0a", gf100_pmfb_sources },
+			{ 0x30, "part00_pmfb_0b", gf100_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
+	{}
+};
+
+static const struct gf100_pm_func
+gf108_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf108_pm_hub,
+	.doms_part = gf108_pm_part,
+};
+
+int
+gf108_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf108_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
new file mode 100644
index 0000000..9170025
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf117.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2015 Samuel Pitoiset
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "gf100.h"
+
+static const struct nvkm_specsrc
+gf117_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 28, "unk28" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{ 0x14125c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp0_pm_unk25c" },
+	{}
+};
+
+static const struct nvkm_specdom
+gf117_pm_hub[] = {
+	{}
+};
+
+static const struct nvkm_specdom
+gf117_pm_part[] = {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x12, "part00_pmfb_00", gf117_pmfb_sources },
+			{ 0x15, "part00_pmfb_01", gf117_pmfb_sources },
+			{ 0x16, "part00_pmfb_02", gf117_pmfb_sources },
+			{ 0x18, "part00_pmfb_03", gf117_pmfb_sources },
+			{ 0x1e, "part00_pmfb_04", gf117_pmfb_sources },
+			{ 0x23, "part00_pmfb_05", gf117_pmfb_sources },
+			{ 0x24, "part00_pmfb_06", gf117_pmfb_sources },
+			{ 0x0c, "part00_pmfb_07", gf117_pmfb_sources },
+			{ 0x0d, "part00_pmfb_08", gf117_pmfb_sources },
+			{ 0x0e, "part00_pmfb_09", gf117_pmfb_sources },
+			{ 0x0f, "part00_pmfb_0a", gf117_pmfb_sources },
+			{ 0x10, "part00_pmfb_0b", gf117_pmfb_sources },
+			{}
+		}, &gf100_perfctr_func },
+	{}
+};
+
+static const struct gf100_pm_func
+gf117_pm = {
+	.doms_gpc = gf100_pm_gpc,
+	.doms_hub = gf117_pm_hub,
+	.doms_part = gf117_pm_part,
+};
+
+int
+gf117_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gf117_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
index 75b9ff3..07f946d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk104.c
@@ -23,6 +23,52 @@
  */
 #include "gf100.h"
 
+static const struct nvkm_specsrc
+gk104_pmfb_sources[] = {
+	{ 0x140028, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{ 0x7, 16, "unk16" },
+			{ 0x3, 24, "unk24" },
+			{ 0x2, 28, "unk28" },
+			{}
+		}, "pmfb0_pm_unk28" },
+	{ 0x14125c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp0_pm_unk25c" },
+	{ 0x14165c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp1_pm_unk25c" },
+	{ 0x141a5c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp2_pm_unk25c" },
+	{ 0x141e5c, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pmfb0_subp3_pm_unk25c" },
+	{}
+};
+
+static const struct nvkm_specsrc
+gk104_tex_sources[] = {
+	{ 0x5042c0, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x7, 8, "sel1", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_mux_c_d" },
+	{ 0x5042c8, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_unkc8" },
+	{ 0x5042b8, (const struct nvkm_specmux[]) {
+			{ 0xff, 0, "sel", true },
+			{}
+		}, "pgraph_gpc0_tpc0_tex_pm_unkb8" },
+	{}
+};
+
 static const struct nvkm_specdom
 gk104_pm_hub[] = {
 	{ 0x60, (const struct nvkm_specsig[]) {
@@ -69,12 +115,51 @@
 			{ 0xc7, "gpc00_user_0" },
 			{}
 		}, &gf100_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &gf100_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{ 0x00, "gpc02_tex_00", gk104_tex_sources },
+			{ 0x01, "gpc02_tex_01", gk104_tex_sources },
+			{ 0x02, "gpc02_tex_02", gk104_tex_sources },
+			{ 0x03, "gpc02_tex_03", gk104_tex_sources },
+			{ 0x04, "gpc02_tex_04", gk104_tex_sources },
+			{ 0x05, "gpc02_tex_05", gk104_tex_sources },
+			{ 0x06, "gpc02_tex_06", gk104_tex_sources },
+			{ 0x07, "gpc02_tex_07", gk104_tex_sources },
+			{ 0x08, "gpc02_tex_08", gk104_tex_sources },
+			{ 0x0a, "gpc02_tex_0a", gk104_tex_sources },
+			{ 0x0b, "gpc02_tex_0b", gk104_tex_sources },
+			{ 0x0d, "gpc02_tex_0c", gk104_tex_sources },
+			{ 0x0c, "gpc02_tex_0d", gk104_tex_sources },
+			{ 0x0e, "gpc02_tex_0e", gk104_tex_sources },
+			{ 0x0f, "gpc02_tex_0f", gk104_tex_sources },
+			{ 0x10, "gpc02_tex_10", gk104_tex_sources },
+			{ 0x11, "gpc02_tex_11", gk104_tex_sources },
+			{ 0x12, "gpc02_tex_12", gk104_tex_sources },
+			{}
+		}, &gf100_perfctr_func },
 	{}
 };
 
 static const struct nvkm_specdom
 gk104_pm_part[] = {
 	{ 0x60, (const struct nvkm_specsig[]) {
+			{ 0x00, "part00_pbfb_00", gf100_pbfb_sources },
+			{ 0x01, "part00_pbfb_01", gf100_pbfb_sources },
+			{ 0x0c, "part00_pmfb_00", gk104_pmfb_sources },
+			{ 0x0d, "part00_pmfb_01", gk104_pmfb_sources },
+			{ 0x0e, "part00_pmfb_02", gk104_pmfb_sources },
+			{ 0x0f, "part00_pmfb_03", gk104_pmfb_sources },
+			{ 0x10, "part00_pmfb_04", gk104_pmfb_sources },
+			{ 0x12, "part00_pmfb_05", gk104_pmfb_sources },
+			{ 0x15, "part00_pmfb_06", gk104_pmfb_sources },
+			{ 0x16, "part00_pmfb_07", gk104_pmfb_sources },
+			{ 0x18, "part00_pmfb_08", gk104_pmfb_sources },
+			{ 0x21, "part00_pmfb_09", gk104_pmfb_sources },
+			{ 0x25, "part00_pmfb_0a", gk104_pmfb_sources },
+			{ 0x26, "part00_pmfb_0b", gk104_pmfb_sources },
+			{ 0x27, "part00_pmfb_0c", gk104_pmfb_sources },
 			{ 0x47, "part00_user_0" },
 			{}
 		}, &gf100_perfctr_func },
@@ -85,64 +170,15 @@
 	{}
 };
 
-static int
-gk104_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gf100_pm_priv *priv;
-	u32 mask;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	/* PDAEMON */
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
-	if (ret)
-		return ret;
-
-	/* HUB */
-	ret = nvkm_perfdom_new(&priv->base, "hub", 0, 0x1b0000, 0, 0x200,
-			       gk104_pm_hub);
-	if (ret)
-		return ret;
-
-	/* GPC */
-	mask  = (1 << nv_rd32(priv, 0x022430)) - 1;
-	mask &= ~nv_rd32(priv, 0x022504);
-	mask &= ~nv_rd32(priv, 0x022584);
-
-	ret = nvkm_perfdom_new(&priv->base, "gpc", mask, 0x180000,
-			       0x1000, 0x200, gk104_pm_gpc);
-	if (ret)
-		return ret;
-
-	/* PART */
-	mask  = (1 << nv_rd32(priv, 0x022438)) - 1;
-	mask &= ~nv_rd32(priv, 0x022548);
-	mask &= ~nv_rd32(priv, 0x0225c8);
-
-	ret = nvkm_perfdom_new(&priv->base, "part", mask, 0x1a0000,
-			       0x1000, 0x200, gk104_pm_part);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	priv->base.last = 7;
-	return 0;
-}
-
-struct nvkm_oclass
-gk104_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
+static const struct gf100_pm_func
+gk104_pm = {
+	.doms_gpc = gk104_pm_gpc,
+	.doms_hub = gk104_pm_hub,
+	.doms_part = gk104_pm_part,
 };
+
+int
+gk104_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return gf100_pm_new_(&gk104_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
deleted file mode 100644
index 6820176..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gk110.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "gf100.h"
-
-static int
-gk110_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gf100_pm_priv *priv;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0, gk104_pm_pwr);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	return 0;
-}
-
-struct nvkm_oclass
-gk110_pm_oclass = {
-	.handle = NV_ENGINE(PM, 0xf0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk110_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = gf100_pm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
new file mode 100644
index 0000000..5cf5dd5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt200.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2015 Nouveau project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Samuel Pitoiset
+ */
+#include "nv40.h"
+
+const struct nvkm_specsrc
+gt200_crop_sources[] = {
+	{ 0x407008, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0x1f, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_crop_pm_mux" },
+	{}
+};
+
+const struct nvkm_specsrc
+gt200_prop_sources[] = {
+	{ 0x408750, (const struct nvkm_specmux[]) {
+			{ 0x3f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc0_prop_pm_mux" },
+	{}
+};
+
+const struct nvkm_specsrc
+gt200_tex_sources[] = {
+	{ 0x408508, (const struct nvkm_specmux[]) {
+			{ 0xfffff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc0_tex_unk08" },
+	{}
+};
+
+static const struct nvkm_specdom
+gt200_pm[] = {
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xc9, "pc01_gr_idle" },
+			{ 0x84, "pc01_strmout_00" },
+			{ 0x85, "pc01_strmout_01" },
+			{ 0xde, "pc01_trast_00" },
+			{ 0xdf, "pc01_trast_01" },
+			{ 0xe0, "pc01_trast_02" },
+			{ 0xe1, "pc01_trast_03" },
+			{ 0xe4, "pc01_trast_04" },
+			{ 0xe5, "pc01_trast_05" },
+			{ 0x82, "pc01_vattr_00" },
+			{ 0x83, "pc01_vattr_01" },
+			{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x08, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x09, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", nv50_zcull_sources },
+
+			{ 0xb0, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0x55, "pc02_crop_00", gt200_crop_sources },
+			{ 0x56, "pc02_crop_01", gt200_crop_sources },
+			{ 0x57, "pc02_crop_02", gt200_crop_sources },
+			{ 0x58, "pc02_crop_03", gt200_crop_sources },
+			{ 0x00, "pc02_prop_00", gt200_prop_sources },
+			{ 0x01, "pc02_prop_01", gt200_prop_sources },
+			{ 0x02, "pc02_prop_02", gt200_prop_sources },
+			{ 0x03, "pc02_prop_03", gt200_prop_sources },
+			{ 0x04, "pc02_prop_04", gt200_prop_sources },
+			{ 0x05, "pc02_prop_05", gt200_prop_sources },
+			{ 0x06, "pc02_prop_06", gt200_prop_sources },
+			{ 0x07, "pc02_prop_07", gt200_prop_sources },
+			{ 0x78, "pc02_tex_00", gt200_tex_sources },
+			{ 0x79, "pc02_tex_01", gt200_tex_sources },
+			{ 0x7a, "pc02_tex_02", gt200_tex_sources },
+			{ 0x7b, "pc02_tex_03", gt200_tex_sources },
+			{ 0x32, "pc02_tex_04", gt200_tex_sources },
+			{ 0x33, "pc02_tex_05", gt200_tex_sources },
+			{ 0x34, "pc02_tex_06", gt200_tex_sources },
+			{ 0x74, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x75, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x76, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x77, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xec, "pc02_trailer" },
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{ 0x20, (const struct nvkm_specsig[]) {
+			{}
+		}, &nv40_perfctr_func },
+	{}
+};
+
+int
+gt200_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(gt200_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
index d065bfc..c9227ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gt215.c
@@ -23,15 +23,94 @@
  */
 #include "nv40.h"
 
+static const struct nvkm_specsrc
+gt215_zcull_sources[] = {
+	{ 0x402ca4, (const struct nvkm_specmux[]) {
+			{ 0x7fff, 0, "unk0" },
+			{ 0xff, 24, "unk24" },
+			{}
+		}, "pgraph_zcull_pm_unka4" },
+	{}
+};
+
 static const struct nvkm_specdom
 gt215_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xcb, "pc01_gr_idle" },
+			{ 0x86, "pc01_strmout_00" },
+			{ 0x87, "pc01_strmout_01" },
+			{ 0xe0, "pc01_trast_00" },
+			{ 0xe1, "pc01_trast_01" },
+			{ 0xe2, "pc01_trast_02" },
+			{ 0xe3, "pc01_trast_03" },
+			{ 0xe6, "pc01_trast_04" },
+			{ 0xe7, "pc01_trast_05" },
+			{ 0x84, "pc01_vattr_00" },
+			{ 0x85, "pc01_vattr_01" },
+			{ 0x46, "pc01_vfetch_00", g84_vfetch_sources },
+			{ 0x47, "pc01_vfetch_01", g84_vfetch_sources },
+			{ 0x48, "pc01_vfetch_02", g84_vfetch_sources },
+			{ 0x49, "pc01_vfetch_03", g84_vfetch_sources },
+			{ 0x4a, "pc01_vfetch_04", g84_vfetch_sources },
+			{ 0x4b, "pc01_vfetch_05", g84_vfetch_sources },
+			{ 0x4c, "pc01_vfetch_06", g84_vfetch_sources },
+			{ 0x4d, "pc01_vfetch_07", g84_vfetch_sources },
+			{ 0x4e, "pc01_vfetch_08", g84_vfetch_sources },
+			{ 0x4f, "pc01_vfetch_09", g84_vfetch_sources },
+			{ 0x50, "pc01_vfetch_0a", g84_vfetch_sources },
+			{ 0x51, "pc01_vfetch_0b", g84_vfetch_sources },
+			{ 0x52, "pc01_vfetch_0c", g84_vfetch_sources },
+			{ 0x53, "pc01_vfetch_0d", g84_vfetch_sources },
+			{ 0x54, "pc01_vfetch_0e", g84_vfetch_sources },
+			{ 0x55, "pc01_vfetch_0f", g84_vfetch_sources },
+			{ 0x56, "pc01_vfetch_10", g84_vfetch_sources },
+			{ 0x57, "pc01_vfetch_11", g84_vfetch_sources },
+			{ 0x58, "pc01_vfetch_12", g84_vfetch_sources },
+			{ 0x59, "pc01_vfetch_13", g84_vfetch_sources },
+			{ 0x5a, "pc01_vfetch_14", g84_vfetch_sources },
+			{ 0x5b, "pc01_vfetch_15", g84_vfetch_sources },
+			{ 0x5c, "pc01_vfetch_16", g84_vfetch_sources },
+			{ 0x5d, "pc01_vfetch_17", g84_vfetch_sources },
+			{ 0x5e, "pc01_vfetch_18", g84_vfetch_sources },
+			{ 0x5f, "pc01_vfetch_19", g84_vfetch_sources },
+			{ 0x07, "pc01_zcull_00", gt215_zcull_sources },
+			{ 0x08, "pc01_zcull_01", gt215_zcull_sources },
+			{ 0x09, "pc01_zcull_02", gt215_zcull_sources },
+			{ 0x0a, "pc01_zcull_03", gt215_zcull_sources },
+			{ 0x0b, "pc01_zcull_04", gt215_zcull_sources },
+			{ 0x0c, "pc01_zcull_05", gt215_zcull_sources },
+			{ 0xb2, "pc01_unk00" },
+			{ 0xec, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x20, (const struct nvkm_specsig[]) {
+	{ 0xe0, (const struct nvkm_specsig[]) {
+			{ 0x64, "pc02_crop_00", gt200_crop_sources },
+			{ 0x65, "pc02_crop_01", gt200_crop_sources },
+			{ 0x66, "pc02_crop_02", gt200_crop_sources },
+			{ 0x67, "pc02_crop_03", gt200_crop_sources },
+			{ 0x00, "pc02_prop_00", gt200_prop_sources },
+			{ 0x01, "pc02_prop_01", gt200_prop_sources },
+			{ 0x02, "pc02_prop_02", gt200_prop_sources },
+			{ 0x03, "pc02_prop_03", gt200_prop_sources },
+			{ 0x04, "pc02_prop_04", gt200_prop_sources },
+			{ 0x05, "pc02_prop_05", gt200_prop_sources },
+			{ 0x06, "pc02_prop_06", gt200_prop_sources },
+			{ 0x07, "pc02_prop_07", gt200_prop_sources },
+			{ 0x80, "pc02_tex_00", gt200_tex_sources },
+			{ 0x81, "pc02_tex_01", gt200_tex_sources },
+			{ 0x82, "pc02_tex_02", gt200_tex_sources },
+			{ 0x83, "pc02_tex_03", gt200_tex_sources },
+			{ 0x3a, "pc02_tex_04", gt200_tex_sources },
+			{ 0x3b, "pc02_tex_05", gt200_tex_sources },
+			{ 0x3c, "pc02_tex_06", gt200_tex_sources },
+			{ 0x7c, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x7d, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x7e, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x7f, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xcc, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -52,32 +131,8 @@
 	{}
 };
 
-static int
-gt215_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **object)
+int
+gt215_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
 {
-	int ret = nv40_pm_ctor(parent, engine, oclass, data, size, object);
-	if (ret == 0) {
-		struct nv40_pm_priv *priv = (void *)*object;
-		ret = nvkm_perfdom_new(&priv->base, "pwr", 0, 0, 0, 0,
-				       gt215_pm_pwr);
-		if (ret)
-			return ret;
-
-		priv->base.last = 3;
-	}
-	return ret;
+	return nv40_pm_new_(gt215_pm, device, index, ppm);
 }
-
-struct nvkm_oclass *
-gt215_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = gt215_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index ff22f06..4bef72a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -24,46 +24,44 @@
 #include "nv40.h"
 
 static void
-nv40_perfctr_init(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_init(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		  struct nvkm_perfctr *ctr)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	struct nv40_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 	u32 log = ctr->logic_op;
 	u32 src = 0x00000000;
 	int i;
 
-	for (i = 0; i < 4 && ctr->signal[i]; i++)
-		src |= (ctr->signal[i] - dom->signal) << (i * 8);
+	for (i = 0; i < 4; i++)
+		src |= ctr->signal[i] << (i * 8);
 
-	nv_wr32(priv, 0x00a7c0 + dom->addr, 0x00000001);
-	nv_wr32(priv, 0x00a400 + dom->addr + (cntr->base.slot * 0x40), src);
-	nv_wr32(priv, 0x00a420 + dom->addr + (cntr->base.slot * 0x40), log);
+	nvkm_wr32(device, 0x00a7c0 + dom->addr, 0x00000001 | (dom->mode << 4));
+	nvkm_wr32(device, 0x00a400 + dom->addr + (ctr->slot * 0x40), src);
+	nvkm_wr32(device, 0x00a420 + dom->addr + (ctr->slot * 0x40), log);
 }
 
 static void
-nv40_perfctr_read(struct nvkm_pm *ppm, struct nvkm_perfdom *dom,
+nv40_perfctr_read(struct nvkm_pm *pm, struct nvkm_perfdom *dom,
 		  struct nvkm_perfctr *ctr)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	struct nv40_pm_cntr *cntr = (void *)ctr;
+	struct nvkm_device *device = pm->engine.subdev.device;
 
-	switch (cntr->base.slot) {
-	case 0: cntr->base.ctr = nv_rd32(priv, 0x00a700 + dom->addr); break;
-	case 1: cntr->base.ctr = nv_rd32(priv, 0x00a6c0 + dom->addr); break;
-	case 2: cntr->base.ctr = nv_rd32(priv, 0x00a680 + dom->addr); break;
-	case 3: cntr->base.ctr = nv_rd32(priv, 0x00a740 + dom->addr); break;
+	switch (ctr->slot) {
+	case 0: ctr->ctr = nvkm_rd32(device, 0x00a700 + dom->addr); break;
+	case 1: ctr->ctr = nvkm_rd32(device, 0x00a6c0 + dom->addr); break;
+	case 2: ctr->ctr = nvkm_rd32(device, 0x00a680 + dom->addr); break;
+	case 3: ctr->ctr = nvkm_rd32(device, 0x00a740 + dom->addr); break;
 	}
-	cntr->base.clk = nv_rd32(priv, 0x00a600 + dom->addr);
+	dom->clk = nvkm_rd32(device, 0x00a600 + dom->addr);
 }
 
 static void
-nv40_perfctr_next(struct nvkm_pm *ppm, struct nvkm_perfdom *dom)
+nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
 {
-	struct nv40_pm_priv *priv = (void *)ppm;
-	if (priv->sequence != ppm->sequence) {
-		nv_wr32(priv, 0x400084, 0x00000020);
-		priv->sequence = ppm->sequence;
+	struct nvkm_device *device = pm->engine.subdev.device;
+	if (pm->sequence != pm->sequence) {
+		nvkm_wr32(device, 0x400084, 0x00000020);
+		pm->sequence = pm->sequence;
 	}
 }
 
@@ -74,6 +72,28 @@
 	.next = nv40_perfctr_next,
 };
 
+static const struct nvkm_pm_func
+nv40_pm_ = {
+};
+
+int
+nv40_pm_new_(const struct nvkm_specdom *doms, struct nvkm_device *device,
+	     int index, struct nvkm_pm **ppm)
+{
+	struct nv40_pm *pm;
+	int ret;
+
+	if (!(pm = kzalloc(sizeof(*pm), GFP_KERNEL)))
+		return -ENOMEM;
+	*ppm = &pm->base;
+
+	ret = nvkm_pm_ctor(&nv40_pm_, device, index, &pm->base);
+	if (ret)
+		return ret;
+
+	return nvkm_perfdom_new(&pm->base, "pc", 0, 0, 0, 4, doms);
+}
+
 static const struct nvkm_specdom
 nv40_pm[] = {
 	{ 0x20, (const struct nvkm_specsig[]) {
@@ -95,36 +115,7 @@
 };
 
 int
-nv40_pm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv40_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
 {
-	struct nv40_pm_oclass *mclass = (void *)oclass;
-	struct nv40_pm_priv *priv;
-	int ret;
-
-	ret = nvkm_pm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	ret = nvkm_perfdom_new(&priv->base, "pm", 0, 0, 0, 4, mclass->doms);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nvkm_pm_cclass;
-	nv_engine(priv)->sclass =  nvkm_pm_sclass;
-	return 0;
+	return nv40_pm_new_(nv40_pm, device, index, ppm);
 }
-
-struct nvkm_oclass *
-nv40_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = nv40_pm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index 2338e15..da481ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,24 +1,14 @@
 #ifndef __NVKM_PM_NV40_H__
 #define __NVKM_PM_NV40_H__
+#define nv40_pm(p) container_of((p), struct nv40_pm, base)
 #include "priv.h"
 
-struct nv40_pm_oclass {
-	struct nvkm_oclass base;
-	const struct nvkm_specdom *doms;
-};
-
-struct nv40_pm_priv {
+struct nv40_pm {
 	struct nvkm_pm base;
 	u32 sequence;
 };
 
-int nv40_pm_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *data, u32 size,
-		      struct nvkm_object **pobject);
-
-struct nv40_pm_cntr {
-	struct nvkm_perfctr base;
-};
-
+int nv40_pm_new_(const struct nvkm_specdom *, struct nvkm_device *,
+		 int index, struct nvkm_pm **);
 extern const struct nvkm_funcdom nv40_perfctr_func;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
index 6af83b5..cc5a41d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv50.c
@@ -23,35 +23,153 @@
  */
 #include "nv40.h"
 
+const struct nvkm_specsrc
+nv50_zcull_sources[] = {
+	{ 0x402ca4, (const struct nvkm_specmux[]) {
+			{ 0x7fff, 0, "unk0" },
+			{}
+		}, "pgraph_zcull_pm_unka4" },
+	{}
+};
+
+const struct nvkm_specsrc
+nv50_zrop_sources[] = {
+	{ 0x40708c, (const struct nvkm_specmux[]) {
+			{ 0xf, 0, "sel0", true },
+			{ 0xf, 16, "sel1", true },
+			{}
+		}, "pgraph_rop0_zrop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_prop_sources[] = {
+	{ 0x40be50, (const struct nvkm_specmux[]) {
+			{ 0x1f, 0, "sel", true },
+			{}
+		}, "pgraph_tpc3_prop_pm_mux" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_crop_sources[] = {
+        { 0x407008, (const struct nvkm_specmux[]) {
+                        { 0x7, 0, "sel0", true },
+                        { 0x7, 16, "sel1", true },
+                        {}
+                }, "pgraph_rop0_crop_pm_mux" },
+        {}
+};
+
+static const struct nvkm_specsrc
+nv50_tex_sources[] = {
+	{ 0x40b808, (const struct nvkm_specmux[]) {
+			{ 0x3fff, 0, "unk0" },
+			{}
+		}, "pgraph_tpc3_tex_unk08" },
+	{}
+};
+
+static const struct nvkm_specsrc
+nv50_vfetch_sources[] = {
+	{ 0x400c0c, (const struct nvkm_specmux[]) {
+			{ 0x1, 0, "unk0" },
+			{}
+		}, "pgraph_vfetch_unk0c" },
+	{}
+};
+
 static const struct nvkm_specdom
 nv50_pm[] = {
-	{ 0x040, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x100, (const struct nvkm_specsig[]) {
-			{ 0xc8, "gr_idle" },
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0xc8, "pc01_gr_idle" },
+			{ 0x7f, "pc01_strmout_00" },
+			{ 0x80, "pc01_strmout_01" },
+			{ 0xdc, "pc01_trast_00" },
+			{ 0xdd, "pc01_trast_01" },
+			{ 0xde, "pc01_trast_02" },
+			{ 0xdf, "pc01_trast_03" },
+			{ 0xe2, "pc01_trast_04" },
+			{ 0xe3, "pc01_trast_05" },
+			{ 0x7c, "pc01_vattr_00" },
+			{ 0x7d, "pc01_vattr_01" },
+			{ 0x26, "pc01_vfetch_00", nv50_vfetch_sources },
+			{ 0x27, "pc01_vfetch_01", nv50_vfetch_sources },
+			{ 0x28, "pc01_vfetch_02", nv50_vfetch_sources },
+			{ 0x29, "pc01_vfetch_03", nv50_vfetch_sources },
+			{ 0x2a, "pc01_vfetch_04", nv50_vfetch_sources },
+			{ 0x2b, "pc01_vfetch_05", nv50_vfetch_sources },
+			{ 0x2c, "pc01_vfetch_06", nv50_vfetch_sources },
+			{ 0x2d, "pc01_vfetch_07", nv50_vfetch_sources },
+			{ 0x2e, "pc01_vfetch_08", nv50_vfetch_sources },
+			{ 0x2f, "pc01_vfetch_09", nv50_vfetch_sources },
+			{ 0x30, "pc01_vfetch_0a", nv50_vfetch_sources },
+			{ 0x31, "pc01_vfetch_0b", nv50_vfetch_sources },
+			{ 0x32, "pc01_vfetch_0c", nv50_vfetch_sources },
+			{ 0x33, "pc01_vfetch_0d", nv50_vfetch_sources },
+			{ 0x34, "pc01_vfetch_0e", nv50_vfetch_sources },
+			{ 0x35, "pc01_vfetch_0f", nv50_vfetch_sources },
+			{ 0x36, "pc01_vfetch_10", nv50_vfetch_sources },
+			{ 0x37, "pc01_vfetch_11", nv50_vfetch_sources },
+			{ 0x38, "pc01_vfetch_12", nv50_vfetch_sources },
+			{ 0x39, "pc01_vfetch_13", nv50_vfetch_sources },
+			{ 0x3a, "pc01_vfetch_14", nv50_vfetch_sources },
+			{ 0x3b, "pc01_vfetch_15", nv50_vfetch_sources },
+			{ 0x3c, "pc01_vfetch_16", nv50_vfetch_sources },
+			{ 0x3d, "pc01_vfetch_17", nv50_vfetch_sources },
+			{ 0x3e, "pc01_vfetch_18", nv50_vfetch_sources },
+			{ 0x3f, "pc01_vfetch_19", nv50_vfetch_sources },
+			{ 0x20, "pc01_zcull_00", nv50_zcull_sources },
+			{ 0x21, "pc01_zcull_01", nv50_zcull_sources },
+			{ 0x22, "pc01_zcull_02", nv50_zcull_sources },
+			{ 0x23, "pc01_zcull_03", nv50_zcull_sources },
+			{ 0x24, "pc01_zcull_04", nv50_zcull_sources },
+			{ 0x25, "pc01_zcull_05", nv50_zcull_sources },
+			{ 0xae, "pc01_unk00" },
+			{ 0xee, "pc01_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x100, (const struct nvkm_specsig[]) {
+	{ 0xf0, (const struct nvkm_specsig[]) {
+			{ 0x52, "pc02_crop_00", nv50_crop_sources },
+			{ 0x53, "pc02_crop_01", nv50_crop_sources },
+			{ 0x54, "pc02_crop_02", nv50_crop_sources },
+			{ 0x55, "pc02_crop_03", nv50_crop_sources },
+			{ 0x00, "pc02_prop_00", nv50_prop_sources },
+			{ 0x01, "pc02_prop_01", nv50_prop_sources },
+			{ 0x02, "pc02_prop_02", nv50_prop_sources },
+			{ 0x03, "pc02_prop_03", nv50_prop_sources },
+			{ 0x04, "pc02_prop_04", nv50_prop_sources },
+			{ 0x05, "pc02_prop_05", nv50_prop_sources },
+			{ 0x06, "pc02_prop_06", nv50_prop_sources },
+			{ 0x07, "pc02_prop_07", nv50_prop_sources },
+			{ 0x70, "pc02_tex_00", nv50_tex_sources },
+			{ 0x71, "pc02_tex_01", nv50_tex_sources },
+			{ 0x72, "pc02_tex_02", nv50_tex_sources },
+			{ 0x73, "pc02_tex_03", nv50_tex_sources },
+			{ 0x40, "pc02_tex_04", nv50_tex_sources },
+			{ 0x41, "pc02_tex_05", nv50_tex_sources },
+			{ 0x42, "pc02_tex_06", nv50_tex_sources },
+			{ 0x6c, "pc02_zrop_00", nv50_zrop_sources },
+			{ 0x6d, "pc02_zrop_01", nv50_zrop_sources },
+			{ 0x6e, "pc02_zrop_02", nv50_zrop_sources },
+			{ 0x6f, "pc02_zrop_03", nv50_zrop_sources },
+			{ 0xee, "pc02_trailer" },
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x020, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
-	{ 0x040, (const struct nvkm_specsig[]) {
+	{ 0x20, (const struct nvkm_specsig[]) {
 			{}
 		}, &nv40_perfctr_func },
 	{}
 };
 
-struct nvkm_oclass *
-nv50_pm_oclass = &(struct nv40_pm_oclass) {
-	.base.handle = NV_ENGINE(PM, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_pm_ctor,
-		.dtor = _nvkm_pm_dtor,
-		.init = _nvkm_pm_init,
-		.fini = _nvkm_pm_fini,
-	},
-	.doms = nv50_pm,
-}.base;
+int
+nv50_pm_new(struct nvkm_device *device, int index, struct nvkm_pm **ppm)
+{
+	return nv40_pm_new_(nv50_pm, device, index, ppm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 1e6eff2..d7b81cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,58 +1,85 @@
 #ifndef __NVKM_PM_PRIV_H__
 #define __NVKM_PM_PRIV_H__
+#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
 #include <engine/pm.h>
 
+int nvkm_pm_ctor(const struct nvkm_pm_func *, struct nvkm_device *,
+		 int index, struct nvkm_pm *);
+
+struct nvkm_pm_func {
+	void (*fini)(struct nvkm_pm *);
+};
+
 struct nvkm_perfctr {
-	struct nvkm_object base;
 	struct list_head head;
-	struct nvkm_perfsig *signal[4];
+	u8 domain;
+	u8  signal[4];
+	u64 source[4][8];
 	int slot;
 	u32 logic_op;
-	u32 clk;
 	u32 ctr;
 };
 
-extern struct nvkm_oclass nvkm_pm_sclass[];
-
-#include <core/engctx.h>
-
-struct nvkm_perfctx {
-	struct nvkm_engctx base;
+struct nvkm_specmux {
+	u32 mask;
+	u8 shift;
+	const char *name;
+	bool enable;
 };
 
-extern struct nvkm_oclass nvkm_pm_cclass;
+struct nvkm_specsrc {
+	u32 addr;
+	const struct nvkm_specmux *mux;
+	const char *name;
+};
+
+struct nvkm_perfsrc {
+	struct list_head head;
+	char *name;
+	u32 addr;
+	u32 mask;
+	u8 shift;
+	bool enable;
+};
+
+extern const struct nvkm_specsrc nv50_zcull_sources[];
+extern const struct nvkm_specsrc nv50_zrop_sources[];
+extern const struct nvkm_specsrc g84_vfetch_sources[];
+extern const struct nvkm_specsrc gt200_crop_sources[];
+extern const struct nvkm_specsrc gt200_prop_sources[];
+extern const struct nvkm_specsrc gt200_tex_sources[];
 
 struct nvkm_specsig {
 	u8 signal;
 	const char *name;
+	const struct nvkm_specsrc *source;
 };
 
 struct nvkm_perfsig {
 	const char *name;
+	u8 source[8];
 };
 
-struct nvkm_perfdom;
-struct nvkm_perfctr *
-nvkm_perfsig_wrap(struct nvkm_pm *, const char *, struct nvkm_perfdom **);
-
 struct nvkm_specdom {
 	u16 signal_nr;
 	const struct nvkm_specsig *signal;
 	const struct nvkm_funcdom *func;
 };
 
-extern const struct nvkm_specdom gt215_pm_pwr[];
-extern const struct nvkm_specdom gf100_pm_pwr[];
-extern const struct nvkm_specdom gk104_pm_pwr[];
+#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
 
 struct nvkm_perfdom {
+	struct nvkm_object object;
+	struct nvkm_perfmon *perfmon;
 	struct list_head head;
 	struct list_head list;
 	const struct nvkm_funcdom *func;
+	struct nvkm_perfctr *ctr[4];
 	char name[32];
 	u32 addr;
-	u8  quad;
-	u32 signal_nr;
+	u8  mode;
+	u32 clk;
+	u16 signal_nr;
 	struct nvkm_perfsig signal[];
 };
 
@@ -67,24 +94,10 @@
 int nvkm_perfdom_new(struct nvkm_pm *, const char *, u32, u32, u32, u32,
 		     const struct nvkm_specdom *);
 
-#define nvkm_pm_create(p,e,o,d)                                        \
-	nvkm_pm_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pm_dtor(p) ({                                             \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_dtor(nv_object(c));                                   \
-})
-#define nvkm_pm_init(p) ({                                             \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_init(nv_object(c));                                   \
-})
-#define nvkm_pm_fini(p,s) ({                                           \
-	struct nvkm_pm *c = (p);                                       \
-	_nvkm_pm_fini(nv_object(c), (s));                              \
-})
+#define nvkm_perfmon(p) container_of((p), struct nvkm_perfmon, object)
 
-int nvkm_pm_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-void _nvkm_pm_dtor(struct nvkm_object *);
-int  _nvkm_pm_init(struct nvkm_object *);
-int  _nvkm_pm_fini(struct nvkm_object *, bool);
+struct nvkm_perfmon {
+	struct nvkm_object object;
+	struct nvkm_pm *pm;
+};
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
index 06ee060..66b147b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s
@@ -1,5 +1,5 @@
 /*
- *  fuc microcode for g98 psec engine
+ *  fuc microcode for g98 sec engine
  *  Copyright (C) 2010  Marcin Kościelnicki
  *
  *  This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,7 @@
  *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
-.section #g98_psec_data
+.section #g98_sec_data
 
 ctx_dma:
 ctx_dma_query:		.b32 0
@@ -94,7 +94,7 @@
 
 .align 0x100
 
-.section #g98_psec_code
+.section #g98_sec_code
 
 	// $r0 is always set to 0 in our code - this allows some space savings.
 	clear b32 $r0
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 5d65c4f..eca6222 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,4 +1,4 @@
-uint32_t g98_psec_data[] = {
+uint32_t g98_sec_data[] = {
 /* 0x0000: ctx_dma */
 /* 0x0000: ctx_dma_query */
 	0x00000000,
@@ -150,7 +150,7 @@
 	0x00000000,
 };
 
-uint32_t g98_psec_code[] = {
+uint32_t g98_sec_code[] = {
 	0x17f004bd,
 	0x0010fe35,
 	0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 9d5c1b8..995c2c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -22,47 +22,14 @@
  * Authors: Ben Skeggs
  */
 #include <engine/sec.h>
-#include <engine/falcon.h>
+#include <engine/fifo.h>
 #include "fuc/g98.fuc0s.h"
 
 #include <core/client.h>
 #include <core/enum.h>
-#include <engine/fifo.h>
+#include <core/gpuobj.h>
 
-struct g98_sec_priv {
-	struct nvkm_falcon base;
-};
-
-/*******************************************************************************
- * Crypt object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_sec_sclass[] = {
-	{ 0x88b4, &nvkm_object_ofuncs },
-	{},
-};
-
-/*******************************************************************************
- * PSEC context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g98_sec_cclass = {
-	.handle = NV_ENGCTX(SEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_falcon_context_ctor,
-		.dtor = _nvkm_falcon_context_dtor,
-		.init = _nvkm_falcon_context_init,
-		.fini = _nvkm_falcon_context_fini,
-		.rd32 = _nvkm_falcon_context_rd32,
-		.wr32 = _nvkm_falcon_context_wr32,
-	},
-};
-
-/*******************************************************************************
- * PSEC engine/subdev functions
- ******************************************************************************/
+#include <nvif/class.h>
 
 static const struct nvkm_enum g98_sec_isr_error_name[] = {
 	{ 0x0000, "ILLEGAL_MTHD" },
@@ -73,77 +40,44 @@
 };
 
 static void
-g98_sec_intr(struct nvkm_subdev *subdev)
+g98_sec_intr(struct nvkm_falcon *sec, struct nvkm_fifo_chan *chan)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
-	struct nvkm_engine *engine = nv_engine(subdev);
-	struct nvkm_object *engctx;
-	struct g98_sec_priv *priv = (void *)subdev;
-	u32 disp = nv_rd32(priv, 0x08701c);
-	u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
-	u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
-	u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
-	u32 addr = nv_rd32(priv, 0x087040) >> 16;
+	struct nvkm_subdev *subdev = &sec->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 ssta = nvkm_rd32(device, 0x087040) & 0x0000ffff;
+	u32 addr = nvkm_rd32(device, 0x087040) >> 16;
 	u32 mthd = (addr & 0x07ff) << 2;
 	u32 subc = (addr & 0x3800) >> 11;
-	u32 data = nv_rd32(priv, 0x087044);
-	int chid;
+	u32 data = nvkm_rd32(device, 0x087044);
+	const struct nvkm_enum *en =
+		nvkm_enum_find(g98_sec_isr_error_name, ssta);
 
-	engctx = nvkm_engctx_get(engine, inst);
-	chid   = pfifo->chid(pfifo, engctx);
-
-	if (stat & 0x00000040) {
-		nv_error(priv, "DISPATCH_ERROR [");
-		nvkm_enum_print(g98_sec_isr_error_name, ssta);
-		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
-		       chid, (u64)inst << 12, nvkm_client_name(engctx),
-		       subc, mthd, data);
-		nv_wr32(priv, 0x087004, 0x00000040);
-		stat &= ~0x00000040;
-	}
-
-	if (stat) {
-		nv_error(priv, "unhandled intr 0x%08x\n", stat);
-		nv_wr32(priv, 0x087004, stat);
-	}
-
-	nvkm_engctx_put(engctx);
+	nvkm_error(subdev, "DISPATCH_ERROR %04x [%s] ch %d [%010llx %s] "
+			   "subc %d mthd %04x data %08x\n", ssta,
+		   en ? en->name : "UNKNOWN", chan ? chan->chid : -1,
+		   chan ? chan->inst->addr : 0,
+		   chan ? chan->object.client->name : "unknown",
+		   subc, mthd, data);
 }
 
-static int
-g98_sec_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct g98_sec_priv *priv;
-	int ret;
-
-	ret = nvkm_falcon_create(parent, engine, oclass, 0x087000, true,
-				 "PSEC", "sec", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x00004000;
-	nv_subdev(priv)->intr = g98_sec_intr;
-	nv_engine(priv)->cclass = &g98_sec_cclass;
-	nv_engine(priv)->sclass = g98_sec_sclass;
-	nv_falcon(priv)->code.data = g98_psec_code;
-	nv_falcon(priv)->code.size = sizeof(g98_psec_code);
-	nv_falcon(priv)->data.data = g98_psec_data;
-	nv_falcon(priv)->data.size = sizeof(g98_psec_data);
-	return 0;
-}
-
-struct nvkm_oclass
-g98_sec_oclass = {
-	.handle = NV_ENGINE(SEC, 0x98),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g98_sec_ctor,
-		.dtor = _nvkm_falcon_dtor,
-		.init = _nvkm_falcon_init,
-		.fini = _nvkm_falcon_fini,
-		.rd32 = _nvkm_falcon_rd32,
-		.wr32 = _nvkm_falcon_wr32,
-	},
+static const struct nvkm_falcon_func
+g98_sec = {
+	.code.data = g98_sec_code,
+	.code.size = sizeof(g98_sec_code),
+	.data.data = g98_sec_data,
+	.data.size = sizeof(g98_sec_data),
+	.pmc_enable = 0x00004000,
+	.intr = g98_sec_intr,
+	.sclass = {
+		{ -1, -1, G98_SEC },
+		{}
+	}
 };
+
+int
+g98_sec_new(struct nvkm_device *device, int index,
+	    struct nvkm_engine **pengine)
+{
+	return nvkm_falcon_new_(&g98_sec, device, index,
+				true, 0x087000, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
index bdc3a059..1c291e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/Kbuild
@@ -1,4 +1,9 @@
+nvkm-y += nvkm/engine/sw/base.o
 nvkm-y += nvkm/engine/sw/nv04.o
 nvkm-y += nvkm/engine/sw/nv10.o
 nvkm-y += nvkm/engine/sw/nv50.o
 nvkm-y += nvkm/engine/sw/gf100.o
+
+nvkm-y += nvkm/engine/sw/chan.o
+
+nvkm-y += nvkm/engine/sw/nvsw.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
new file mode 100644
index 0000000..53c1f7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <engine/fifo.h>
+
+bool
+nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data)
+{
+	struct nvkm_sw_chan *chan;
+	bool handled = false;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_for_each_entry(chan, &sw->chan, head) {
+		if (chan->fifo->chid == chid) {
+			handled = nvkm_sw_chan_mthd(chan, subc, mthd, data);
+			list_del(&chan->head);
+			list_add(&chan->head, &sw->chan);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+	return handled;
+}
+
+static int
+nvkm_sw_oclass_new(const struct nvkm_oclass *oclass, void *data, u32 size,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_sw_chan *chan = nvkm_sw_chan(oclass->parent);
+	const struct nvkm_sw_chan_sclass *sclass = oclass->engn;
+	return sclass->ctor(chan, oclass, data, size, pobject);
+}
+
+static int
+nvkm_sw_oclass_get(struct nvkm_oclass *oclass, int index)
+{
+	struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+	int c = 0;
+
+	while (sw->func->sclass[c].ctor) {
+		if (c++ == index) {
+			oclass->engn = &sw->func->sclass[index];
+			oclass->base =  sw->func->sclass[index].base;
+			oclass->base.ctor = nvkm_sw_oclass_new;
+			return index;
+		}
+	}
+
+	return c;
+}
+
+static int
+nvkm_sw_cclass_get(struct nvkm_fifo_chan *fifoch,
+		   const struct nvkm_oclass *oclass,
+		   struct nvkm_object **pobject)
+{
+	struct nvkm_sw *sw = nvkm_sw(oclass->engine);
+	return sw->func->chan_new(sw, fifoch, oclass, pobject);
+}
+
+static void *
+nvkm_sw_dtor(struct nvkm_engine *engine)
+{
+	return nvkm_sw(engine);
+}
+
+static const struct nvkm_engine_func
+nvkm_sw = {
+	.dtor = nvkm_sw_dtor,
+	.fifo.cclass = nvkm_sw_cclass_get,
+	.fifo.sclass = nvkm_sw_oclass_get,
+};
+
+int
+nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_sw **psw)
+{
+	struct nvkm_sw *sw;
+
+	if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL)))
+		return -ENOMEM;
+	INIT_LIST_HEAD(&sw->chan);
+	sw->func = func;
+
+	return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
new file mode 100644
index 0000000..d082f4f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "chan.h"
+
+#include <core/notify.h>
+#include <engine/fifo.h>
+
+#include <nvif/event.h>
+#include <nvif/unpack.h>
+
+bool
+nvkm_sw_chan_mthd(struct nvkm_sw_chan *chan, int subc, u32 mthd, u32 data)
+{
+	switch (mthd) {
+	case 0x0000:
+		return true;
+	case 0x0500:
+		nvkm_event_send(&chan->event, 1, 0, NULL, 0);
+		return true;
+	default:
+		if (chan->func->mthd)
+			return chan->func->mthd(chan, subc, mthd, data);
+		break;
+	}
+	return false;
+}
+
+static int
+nvkm_sw_chan_event_ctor(struct nvkm_object *object, void *data, u32 size,
+			struct nvkm_notify *notify)
+{
+	union {
+		struct nvif_notify_uevent_req none;
+	} *req = data;
+	int ret;
+
+	if (nvif_unvers(req->none)) {
+		notify->size  = sizeof(struct nvif_notify_uevent_rep);
+		notify->types = 1;
+		notify->index = 0;
+	}
+
+	return ret;
+}
+
+static const struct nvkm_event_func
+nvkm_sw_chan_event = {
+	.ctor = nvkm_sw_chan_event_ctor,
+};
+
+static void *
+nvkm_sw_chan_dtor(struct nvkm_object *object)
+{
+	struct nvkm_sw_chan *chan = nvkm_sw_chan(object);
+	struct nvkm_sw *sw = chan->sw;
+	unsigned long flags;
+	void *data = chan;
+
+	if (chan->func->dtor)
+		data = chan->func->dtor(chan);
+	nvkm_event_fini(&chan->event);
+
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_del(&chan->head);
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+	return data;
+}
+
+static const struct nvkm_object_func
+nvkm_sw_chan = {
+	.dtor = nvkm_sw_chan_dtor,
+};
+
+int
+nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw,
+		  struct nvkm_fifo_chan *fifo, const struct nvkm_oclass *oclass,
+		  struct nvkm_sw_chan *chan)
+{
+	unsigned long flags;
+
+	nvkm_object_ctor(&nvkm_sw_chan, oclass, &chan->object);
+	chan->func = func;
+	chan->sw = sw;
+	chan->fifo = fifo;
+	spin_lock_irqsave(&sw->engine.lock, flags);
+	list_add(&chan->head, &sw->chan);
+	spin_unlock_irqrestore(&sw->engine.lock, flags);
+
+	return nvkm_event_init(&nvkm_sw_chan_event, 1, 1, &chan->event);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
new file mode 100644
index 0000000..6608bf6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_SW_CHAN_H__
+#define __NVKM_SW_CHAN_H__
+#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
+#include "priv.h"
+#include <core/event.h>
+
+struct nvkm_sw_chan {
+	const struct nvkm_sw_chan_func *func;
+	struct nvkm_object object;
+	struct nvkm_sw *sw;
+	struct nvkm_fifo_chan *fifo;
+	struct list_head head;
+
+	struct nvkm_event event;
+};
+
+struct nvkm_sw_chan_func {
+	void *(*dtor)(struct nvkm_sw_chan *);
+	bool (*mthd)(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+};
+
+int nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *, struct nvkm_sw *,
+		      struct nvkm_fifo_chan *, const struct nvkm_oclass *,
+		      struct nvkm_sw_chan *);
+bool nvkm_sw_chan_mthd(struct nvkm_sw_chan *, int subc, u32 mthd, u32 data);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
index 533d5d8..b01ef7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c
@@ -23,119 +23,133 @@
  */
 #include "nv50.h"
 
+#include <core/gpuobj.h>
 #include <subdev/bar.h>
+#include <engine/disp.h>
+#include <engine/fifo.h>
 
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-gf100_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	u64 data = *(u32 *)args;
-	if (mthd == 0x0400) {
-		chan->vblank.offset &= 0x00ffffffffULL;
-		chan->vblank.offset |= data << 32;
-	} else {
-		chan->vblank.offset &= 0xff00000000ULL;
-		chan->vblank.offset |= data;
-	}
-	return 0;
-}
-
-static int
-gf100_sw_mthd_mp_control(struct nvkm_object *object, u32 mthd,
-			 void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	u32 data = *(u32 *)args;
-
-	switch (mthd) {
-	case 0x600:
-		nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
-		break;
-	case 0x644:
-		if (data & ~0x1ffffe)
-			return -EINVAL;
-		nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
-		break;
-	case 0x6ac:
-		nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
-		break;
-	default:
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static struct nvkm_omthds
-gf100_sw_omthds[] = {
-	{ 0x0400, 0x0400, gf100_sw_mthd_vblsem_offset },
-	{ 0x0404, 0x0404, gf100_sw_mthd_vblsem_offset },
-	{ 0x0408, 0x0408, nv50_sw_mthd_vblsem_value },
-	{ 0x040c, 0x040c, nv50_sw_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nv50_sw_mthd_flip },
-	{ 0x0600, 0x0600, gf100_sw_mthd_mp_control },
-	{ 0x0644, 0x0644, gf100_sw_mthd_mp_control },
-	{ 0x06ac, 0x06ac, gf100_sw_mthd_mp_control },
-	{}
-};
-
-static struct nvkm_oclass
-gf100_sw_sclass[] = {
-	{ 0x906e, &nvkm_object_ofuncs, gf100_sw_omthds },
-	{}
-};
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
 static int
-gf100_sw_vblsem_release(struct nvkm_notify *notify)
+gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
 {
 	struct nv50_sw_chan *chan =
 		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_sw *sw = chan->base.sw;
+	struct nvkm_device *device = sw->engine.subdev.device;
+	u32 inst = chan->base.fifo->inst->addr >> 12;
 
-	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
-	bar->flush(bar);
-	nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
-	nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
-	nv_wr32(priv, 0x060014, chan->vblank.value);
+	nvkm_wr32(device, 0x001718, 0x80000000 | inst);
+	nvkm_bar_flush(device->bar);
+	nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
+	nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
+	nvkm_wr32(device, 0x060014, chan->vblank.value);
 
 	return NVKM_NOTIFY_DROP;
 }
 
-static struct nv50_sw_cclass
-gf100_sw_cclass = {
-	.base.handle = NV_ENGCTX(SW, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_context_ctor,
-		.dtor = nv50_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-	.vblank = gf100_sw_vblsem_release,
+static bool
+gf100_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
+{
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	struct nvkm_engine *engine = chan->base.object.engine;
+	struct nvkm_device *device = engine->subdev.device;
+	switch (mthd) {
+	case 0x0400:
+		chan->vblank.offset &= 0x00ffffffffULL;
+		chan->vblank.offset |= (u64)data << 32;
+		return true;
+	case 0x0404:
+		chan->vblank.offset &= 0xff00000000ULL;
+		chan->vblank.offset |= data;
+		return true;
+	case 0x0408:
+		chan->vblank.value = data;
+		return true;
+	case 0x040c:
+		if (data < device->disp->vblank.index_nr) {
+			nvkm_notify_get(&chan->vblank.notify[data]);
+			return true;
+		}
+		break;
+	case 0x600: /* MP.PM_UNK000 */
+		nvkm_wr32(device, 0x419e00, data);
+		return true;
+	case 0x644: /* MP.TRAP_WARP_ERROR_EN */
+		if (!(data & ~0x001ffffe)) {
+			nvkm_wr32(device, 0x419e44, data);
+			return true;
+		}
+		break;
+	case 0x6ac: /* MP.PM_UNK0AC */
+		nvkm_wr32(device, 0x419eac, data);
+		return true;
+	default:
+		break;
+	}
+	return false;
+}
+
+static const struct nvkm_sw_chan_func
+gf100_sw_chan = {
+	.dtor = nv50_sw_chan_dtor,
+	.mthd = gf100_sw_chan_mthd,
 };
 
+static int
+gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+		  const struct nvkm_oclass *oclass,
+		  struct nvkm_object **pobject)
+{
+	struct nvkm_disp *disp = sw->engine.subdev.device->disp;
+	struct nv50_sw_chan *chan;
+	int ret, i;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_sw_chan_ctor(&gf100_sw_chan, sw, fifoch, oclass,
+				&chan->base);
+	if (ret)
+		return ret;
+
+	for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+		ret = nvkm_notify_init(NULL, &disp->vblank,
+				       gf100_sw_chan_vblsem_release, false,
+				       &(struct nvif_notify_head_req_v0) {
+					.head = i,
+				       },
+				       sizeof(struct nvif_notify_head_req_v0),
+				       sizeof(struct nvif_notify_head_rep_v0),
+				       &chan->vblank.notify[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-struct nvkm_oclass *
-gf100_sw_oclass = &(struct nv50_sw_oclass) {
-	.base.handle = NV_ENGINE(SW, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-	.cclass = &gf100_sw_cclass.base,
-	.sclass =  gf100_sw_sclass,
-}.base;
+static const struct nvkm_sw_func
+gf100_sw = {
+	.chan_new = gf100_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_GF100 } },
+		{}
+	}
+};
+
+int
+gf100_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&gf100_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
index 8970244..445217f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c
@@ -21,15 +21,18 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/sw.h>
-#include <engine/fifo.h>
+#define nv04_sw_chan(p) container_of((p), struct nv04_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 
-struct nv04_sw_priv {
-	struct nvkm_sw base;
-};
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
+#include <nvif/unpack.h>
 
 struct nv04_sw_chan {
 	struct nvkm_sw_chan base;
+	atomic_t ref;
 };
 
 /*******************************************************************************
@@ -37,103 +40,99 @@
  ******************************************************************************/
 
 static int
-nv04_sw_set_ref(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+nv04_nvsw_mthd_get_ref(struct nvkm_nvsw *nvsw, void *data, u32 size)
 {
-	struct nvkm_object *channel = (void *)nv_engctx(object->parent);
-	struct nvkm_fifo_chan *fifo = (void *)channel->parent;
-	atomic_set(&fifo->refcnt, *(u32*)data);
-	return 0;
+	struct nv04_sw_chan *chan = nv04_sw_chan(nvsw->chan);
+	union {
+		struct nv04_nvsw_get_ref_v0 v0;
+	} *args = data;
+	int ret;
+
+	if (nvif_unpack(args->v0, 0, 0, false)) {
+		args->v0.ref = atomic_read(&chan->ref);
+	}
+
+	return ret;
 }
 
 static int
-nv04_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
+nv04_nvsw_mthd(struct nvkm_nvsw *nvsw, u32 mthd, void *data, u32 size)
 {
-	struct nv04_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
+	switch (mthd) {
+	case NV04_NVSW_GET_REF:
+		return nv04_nvsw_mthd_get_ref(nvsw, data, size);
+	default:
+		break;
+	}
 	return -EINVAL;
 }
 
-static struct nvkm_omthds
-nv04_sw_omthds[] = {
-	{ 0x0150, 0x0150, nv04_sw_set_ref },
-	{ 0x0500, 0x0500, nv04_sw_flip },
-	{}
+static const struct nvkm_nvsw_func
+nv04_nvsw = {
+	.mthd = nv04_nvsw_mthd,
 };
 
-static struct nvkm_oclass
-nv04_sw_sclass[] = {
-	{ 0x006e, &nvkm_object_ofuncs, nv04_sw_omthds },
-	{}
-};
+static int
+nv04_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+	      void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nvkm_nvsw_new_(&nv04_nvsw, chan, oclass, data, size, pobject);
+}
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
-static int
-nv04_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+static bool
+nv04_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv04_sw_chan *chan;
-	int ret;
+	struct nv04_sw_chan *chan = nv04_sw_chan(base);
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
+	switch (mthd) {
+	case 0x0150:
+		atomic_set(&chan->ref, data);
+		return true;
+	default:
+		break;
+	}
 
-	return 0;
+	return false;
 }
 
-static struct nvkm_oclass
-nv04_sw_cclass = {
-	.handle = NV_ENGCTX(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_sw_context_ctor,
-		.dtor = _nvkm_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
+static const struct nvkm_sw_chan_func
+nv04_sw_chan = {
+	.mthd = nv04_sw_chan_mthd,
 };
 
+static int
+nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nv04_sw_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	atomic_set(&chan->ref, 0);
+	*pobject = &chan->base.object;
+
+	return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base);
+}
+
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-void
-nv04_sw_intr(struct nvkm_subdev *subdev)
-{
-	nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
-}
-
-static int
-nv04_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv04_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nv04_sw_cclass;
-	nv_engine(priv)->sclass = nv04_sw_sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
-}
-
-struct nvkm_oclass *
-nv04_sw_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
+static const struct nvkm_sw_func
+nv04_sw = {
+	.chan_new = nv04_sw_chan_new,
+	.sclass = {
+		{ nv04_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV04 } },
+		{}
+	}
 };
+
+int
+nv04_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&nv04_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
index c61153a..adf70d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c
@@ -21,102 +21,48 @@
  *
  * Authors: Ben Skeggs
  */
-#include <engine/sw.h>
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 
-struct nv10_sw_priv {
-	struct nvkm_sw base;
-};
-
-struct nv10_sw_chan {
-	struct nvkm_sw_chan base;
-};
-
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-nv10_sw_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
-	struct nv10_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv10_sw_omthds[] = {
-	{ 0x0500, 0x0500, nv10_sw_flip },
-	{}
-};
-
-static struct nvkm_oclass
-nv10_sw_sclass[] = {
-	{ 0x016e, &nvkm_object_ofuncs, nv10_sw_omthds },
-	{}
-};
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
-static int
-nv10_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
-{
-	struct nv10_sw_chan *chan;
-	int ret;
-
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static struct nvkm_oclass
-nv10_sw_cclass = {
-	.handle = NV_ENGCTX(SW, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_sw_context_ctor,
-		.dtor = _nvkm_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
+static const struct nvkm_sw_chan_func
+nv10_sw_chan = {
 };
 
+static int
+nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nvkm_sw_chan *chan;
+
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->object;
+
+	return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan);
+}
+
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
-static int
-nv10_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
-{
-	struct nv10_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = &nv10_sw_cclass;
-	nv_engine(priv)->sclass = nv10_sw_sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
-}
-
-struct nvkm_oclass *
-nv10_sw_oclass = &(struct nvkm_oclass) {
-	.handle = NV_ENGINE(SW, 0x10),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
+static const struct nvkm_sw_func
+nv10_sw = {
+	.chan_new = nv10_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV10 } },
+		{}
+	}
 };
+
+int
+nv10_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
+{
+	return nvkm_sw_new_(&nv10_sw, device, index, psw);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
index 401fcd7..a381196 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c
@@ -23,153 +23,98 @@
  */
 #include "nv50.h"
 
-#include <core/device.h>
-#include <core/handle.h>
-#include <core/namedb.h>
+#include <core/gpuobj.h>
 #include <engine/disp.h>
+#include <engine/fifo/chan.h>
 #include <subdev/bar.h>
 
 #include <nvif/event.h>
-
-/*******************************************************************************
- * software object classes
- ******************************************************************************/
-
-static int
-nv50_sw_mthd_dma_vblsem(struct nvkm_object *object, u32 mthd,
-			void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	struct nvkm_fifo_chan *fifo = (void *)nv_object(chan)->parent;
-	struct nvkm_handle *handle;
-	int ret = -EINVAL;
-
-	handle = nvkm_namedb_get(nv_namedb(fifo), *(u32 *)args);
-	if (!handle)
-		return -ENOENT;
-
-	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
-		struct nvkm_gpuobj *gpuobj = nv_gpuobj(handle->object);
-		chan->vblank.ctxdma = gpuobj->node->offset >> 4;
-		ret = 0;
-	}
-	nvkm_namedb_put(handle);
-	return ret;
-}
-
-static int
-nv50_sw_mthd_vblsem_offset(struct nvkm_object *object, u32 mthd,
-			   void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	chan->vblank.offset = *(u32 *)args;
-	return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_value(struct nvkm_object *object, u32 mthd,
-			  void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	chan->vblank.value = *(u32 *)args;
-	return 0;
-}
-
-int
-nv50_sw_mthd_vblsem_release(struct nvkm_object *object, u32 mthd,
-			    void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	u32 head = *(u32 *)args;
-	if (head >= nvkm_disp(chan)->vblank.index_nr)
-		return -EINVAL;
-
-	nvkm_notify_get(&chan->vblank.notify[head]);
-	return 0;
-}
-
-int
-nv50_sw_mthd_flip(struct nvkm_object *object, u32 mthd, void *args, u32 size)
-{
-	struct nv50_sw_chan *chan = (void *)nv_engctx(object->parent);
-	if (chan->base.flip)
-		return chan->base.flip(chan->base.flip_data);
-	return -EINVAL;
-}
-
-static struct nvkm_omthds
-nv50_sw_omthds[] = {
-	{ 0x018c, 0x018c, nv50_sw_mthd_dma_vblsem },
-	{ 0x0400, 0x0400, nv50_sw_mthd_vblsem_offset },
-	{ 0x0404, 0x0404, nv50_sw_mthd_vblsem_value },
-	{ 0x0408, 0x0408, nv50_sw_mthd_vblsem_release },
-	{ 0x0500, 0x0500, nv50_sw_mthd_flip },
-	{}
-};
-
-static struct nvkm_oclass
-nv50_sw_sclass[] = {
-	{ 0x506e, &nvkm_object_ofuncs, nv50_sw_omthds },
-	{}
-};
+#include <nvif/ioctl.h>
 
 /*******************************************************************************
  * software context
  ******************************************************************************/
 
 static int
-nv50_sw_vblsem_release(struct nvkm_notify *notify)
+nv50_sw_chan_vblsem_release(struct nvkm_notify *notify)
 {
 	struct nv50_sw_chan *chan =
 		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
-	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_sw *sw = chan->base.sw;
+	struct nvkm_device *device = sw->engine.subdev.device;
 
-	nv_wr32(priv, 0x001704, chan->vblank.channel);
-	nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-	bar->flush(bar);
+	nvkm_wr32(device, 0x001704, chan->base.fifo->inst->addr >> 12);
+	nvkm_wr32(device, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+	nvkm_bar_flush(device->bar);
 
-	if (nv_device(priv)->chipset == 0x50) {
-		nv_wr32(priv, 0x001570, chan->vblank.offset);
-		nv_wr32(priv, 0x001574, chan->vblank.value);
+	if (device->chipset == 0x50) {
+		nvkm_wr32(device, 0x001570, chan->vblank.offset);
+		nvkm_wr32(device, 0x001574, chan->vblank.value);
 	} else {
-		nv_wr32(priv, 0x060010, chan->vblank.offset);
-		nv_wr32(priv, 0x060014, chan->vblank.value);
+		nvkm_wr32(device, 0x060010, chan->vblank.offset);
+		nvkm_wr32(device, 0x060014, chan->vblank.value);
 	}
 
 	return NVKM_NOTIFY_DROP;
 }
 
-void
-nv50_sw_context_dtor(struct nvkm_object *object)
+static bool
+nv50_sw_chan_mthd(struct nvkm_sw_chan *base, int subc, u32 mthd, u32 data)
 {
-	struct nv50_sw_chan *chan = (void *)object;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
-		nvkm_notify_fini(&chan->vblank.notify[i]);
-
-	nvkm_sw_context_destroy(&chan->base);
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	struct nvkm_engine *engine = chan->base.object.engine;
+	struct nvkm_device *device = engine->subdev.device;
+	switch (mthd) {
+	case 0x018c: chan->vblank.ctxdma = data; return true;
+	case 0x0400: chan->vblank.offset = data; return true;
+	case 0x0404: chan->vblank.value  = data; return true;
+	case 0x0408:
+		if (data < device->disp->vblank.index_nr) {
+			nvkm_notify_get(&chan->vblank.notify[data]);
+			return true;
+		}
+		break;
+	default:
+		break;
+	}
+	return false;
 }
 
-int
-nv50_sw_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, void *data, u32 size,
-		     struct nvkm_object **pobject)
+void *
+nv50_sw_chan_dtor(struct nvkm_sw_chan *base)
 {
-	struct nvkm_disp *pdisp = nvkm_disp(parent);
-	struct nv50_sw_cclass *pclass = (void *)oclass;
+	struct nv50_sw_chan *chan = nv50_sw_chan(base);
+	int i;
+	for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
+		nvkm_notify_fini(&chan->vblank.notify[i]);
+	return chan;
+}
+
+static const struct nvkm_sw_chan_func
+nv50_sw_chan = {
+	.dtor = nv50_sw_chan_dtor,
+	.mthd = nv50_sw_chan_mthd,
+};
+
+static int
+nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch,
+		 const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
+{
+	struct nvkm_disp *disp = sw->engine.subdev.device->disp;
 	struct nv50_sw_chan *chan;
 	int ret, i;
 
-	ret = nvkm_sw_context_create(parent, engine, oclass, &chan);
-	*pobject = nv_object(chan);
+	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &chan->base.object;
+
+	ret = nvkm_sw_chan_ctor(&nv50_sw_chan, sw, fifoch, oclass, &chan->base);
 	if (ret)
 		return ret;
 
-	for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
-		ret = nvkm_notify_init(NULL, &pdisp->vblank, pclass->vblank,
-				       false,
+	for (i = 0; disp && i < disp->vblank.index_nr; i++) {
+		ret = nvkm_notify_init(NULL, &disp->vblank,
+				       nv50_sw_chan_vblsem_release, false,
 				       &(struct nvif_notify_head_req_v0) {
 					.head = i,
 				       },
@@ -180,55 +125,24 @@
 			return ret;
 	}
 
-	chan->vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
 	return 0;
 }
 
-static struct nv50_sw_cclass
-nv50_sw_cclass = {
-	.base.handle = NV_ENGCTX(SW, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_context_ctor,
-		.dtor = nv50_sw_context_dtor,
-		.init = _nvkm_sw_context_init,
-		.fini = _nvkm_sw_context_fini,
-	},
-	.vblank = nv50_sw_vblsem_release,
-};
-
 /*******************************************************************************
  * software engine/subdev functions
  ******************************************************************************/
 
+static const struct nvkm_sw_func
+nv50_sw = {
+	.chan_new = nv50_sw_chan_new,
+	.sclass = {
+		{ nvkm_nvsw_new, { -1, -1, NVIF_IOCTL_NEW_V0_SW_NV50 } },
+		{}
+	}
+};
+
 int
-nv50_sw_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv50_sw_new(struct nvkm_device *device, int index, struct nvkm_sw **psw)
 {
-	struct nv50_sw_oclass *pclass = (void *)oclass;
-	struct nv50_sw_priv *priv;
-	int ret;
-
-	ret = nvkm_sw_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_engine(priv)->cclass = pclass->cclass;
-	nv_engine(priv)->sclass = pclass->sclass;
-	nv_subdev(priv)->intr = nv04_sw_intr;
-	return 0;
+	return nvkm_sw_new_(&nv50_sw, device, index, psw);
 }
-
-struct nvkm_oclass *
-nv50_sw_oclass = &(struct nv50_sw_oclass) {
-	.base.handle = NV_ENGINE(SW, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_sw_ctor,
-		.dtor = _nvkm_sw_dtor,
-		.init = _nvkm_sw_init,
-		.fini = _nvkm_sw_fini,
-	},
-	.cclass = &nv50_sw_cclass.base,
-	.sclass =  nv50_sw_sclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index d8adc11..25cdfde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,45 +1,20 @@
 #ifndef __NVKM_SW_NV50_H__
 #define __NVKM_SW_NV50_H__
-#include <engine/sw.h>
+#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
+#include "priv.h"
+#include "chan.h"
+#include "nvsw.h"
 #include <core/notify.h>
 
-struct nv50_sw_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *cclass;
-	struct nvkm_oclass *sclass;
-};
-
-struct nv50_sw_priv {
-	struct nvkm_sw base;
-};
-
-int  nv50_sw_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-
-struct nv50_sw_cclass {
-	struct nvkm_oclass base;
-	int (*vblank)(struct nvkm_notify *);
-};
-
 struct nv50_sw_chan {
 	struct nvkm_sw_chan base;
 	struct {
 		struct nvkm_notify notify[4];
-		u32 channel;
 		u32 ctxdma;
 		u64 offset;
 		u32 value;
 	} vblank;
 };
 
-int  nv50_sw_context_ctor(struct nvkm_object *,
-				struct nvkm_object *,
-				struct nvkm_oclass *, void *, u32,
-				struct nvkm_object **);
-void nv50_sw_context_dtor(struct nvkm_object *);
-
-int nv50_sw_mthd_vblsem_value(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_vblsem_release(struct nvkm_object *, u32, void *, u32);
-int nv50_sw_mthd_flip(struct nvkm_object *, u32, void *, u32);
+void *nv50_sw_chan_dtor(struct nvkm_sw_chan *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
new file mode 100644
index 0000000..66cf986
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nvsw.h"
+#include "chan.h"
+
+#include <nvif/class.h>
+
+static int
+nvkm_nvsw_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
+{
+	struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+	if (nvsw->func->mthd)
+		return nvsw->func->mthd(nvsw, mthd, data, size);
+	return -ENODEV;
+}
+
+static int
+nvkm_nvsw_ntfy_(struct nvkm_object *object, u32 mthd,
+		struct nvkm_event **pevent)
+{
+	struct nvkm_nvsw *nvsw = nvkm_nvsw(object);
+	switch (mthd) {
+	case NVSW_NTFY_UEVENT:
+		*pevent = &nvsw->chan->event;
+		return 0;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_nvsw_ = {
+	.mthd = nvkm_nvsw_mthd_,
+	.ntfy = nvkm_nvsw_ntfy_,
+};
+
+int
+nvkm_nvsw_new_(const struct nvkm_nvsw_func *func, struct nvkm_sw_chan *chan,
+	       const struct nvkm_oclass *oclass, void *data, u32 size,
+	       struct nvkm_object **pobject)
+{
+	struct nvkm_nvsw *nvsw;
+
+	if (!(nvsw = kzalloc(sizeof(*nvsw), GFP_KERNEL)))
+		return -ENOMEM;
+	*pobject = &nvsw->object;
+
+	nvkm_object_ctor(&nvkm_nvsw_, oclass, &nvsw->object);
+	nvsw->func = func;
+	nvsw->chan = chan;
+	return 0;
+}
+
+static const struct nvkm_nvsw_func
+nvkm_nvsw = {
+};
+
+int
+nvkm_nvsw_new(struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass,
+	      void *data, u32 size, struct nvkm_object **pobject)
+{
+	return nvkm_nvsw_new_(&nvkm_nvsw, chan, oclass, data, size, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
new file mode 100644
index 0000000..943ef4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_NVSW_H__
+#define __NVKM_NVSW_H__
+#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
+#include "priv.h"
+
+struct nvkm_nvsw {
+	struct nvkm_object object;
+	const struct nvkm_nvsw_func *func;
+	struct nvkm_sw_chan *chan;
+};
+
+struct nvkm_nvsw_func {
+	int (*mthd)(struct nvkm_nvsw *, u32 mthd, void *data, u32 size);
+};
+
+int nvkm_nvsw_new_(const struct nvkm_nvsw_func *, struct nvkm_sw_chan *,
+		   const struct nvkm_oclass *, void *data, u32 size,
+		   struct nvkm_object **pobject);
+int nvkm_nvsw_new(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+		  void *data, u32 size, struct nvkm_object **pobject);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
new file mode 100644
index 0000000..0ef1318
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -0,0 +1,21 @@
+#ifndef __NVKM_SW_PRIV_H__
+#define __NVKM_SW_PRIV_H__
+#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
+#include <engine/sw.h>
+struct nvkm_sw_chan;
+
+int nvkm_sw_new_(const struct nvkm_sw_func *, struct nvkm_device *,
+		 int index, struct nvkm_sw **);
+
+struct nvkm_sw_chan_sclass {
+	int (*ctor)(struct nvkm_sw_chan *, const struct nvkm_oclass *,
+		    void *data, u32 size, struct nvkm_object **);
+	struct nvkm_sclass base;
+};
+
+struct nvkm_sw_func {
+	int (*chan_new)(struct nvkm_sw *, struct nvkm_fifo_chan *,
+			const struct nvkm_oclass *, struct nvkm_object **);
+	const struct nvkm_sw_chan_sclass sclass[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 45f4e18..4188c77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -22,72 +22,23 @@
  * Authors: Ben Skeggs, Ilia Mirkin
  */
 #include <engine/vp.h>
-#include <engine/xtensa.h>
 
-#include <core/engctx.h>
+#include <nvif/class.h>
 
-/*******************************************************************************
- * VP object classes
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_sclass[] = {
-	{ 0x7476, &nvkm_object_ofuncs },
-	{},
+static const struct nvkm_xtensa_func
+g84_vp = {
+	.pmc_enable = 0x01020000,
+	.fifo_val = 0x111,
+	.unkd28 = 0x9c544,
+	.sclass = {
+		{ -1, -1, NV74_VP2 },
+		{}
+	}
 };
 
-/*******************************************************************************
- * PVP context
- ******************************************************************************/
-
-static struct nvkm_oclass
-g84_vp_cclass = {
-	.handle = NV_ENGCTX(VP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_xtensa_engctx_ctor,
-		.dtor = _nvkm_engctx_dtor,
-		.init = _nvkm_engctx_init,
-		.fini = _nvkm_engctx_fini,
-		.rd32 = _nvkm_engctx_rd32,
-		.wr32 = _nvkm_engctx_wr32,
-	},
-};
-
-/*******************************************************************************
- * PVP engine/subdev functions
- ******************************************************************************/
-
-static int
-g84_vp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	    struct nvkm_oclass *oclass, void *data, u32 size,
-	    struct nvkm_object **pobject)
+int
+g84_vp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
 {
-	struct nvkm_xtensa *priv;
-	int ret;
-
-	ret = nvkm_xtensa_create(parent, engine, oclass, 0xf000, true,
-				 "PVP", "vp", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->unit = 0x01020000;
-	nv_engine(priv)->cclass = &g84_vp_cclass;
-	nv_engine(priv)->sclass = g84_vp_sclass;
-	priv->fifo_val = 0x111;
-	priv->unkd28 = 0x9c544;
-	return 0;
+	return nvkm_xtensa_new_(&g84_vp, device, index,
+				true, 0x00f000, pengine);
 }
-
-struct nvkm_oclass
-g84_vp_oclass = {
-	.handle = NV_ENGINE(VP, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_vp_ctor,
-		.dtor = _nvkm_xtensa_dtor,
-		.init = _nvkm_xtensa_init,
-		.fini = _nvkm_xtensa_fini,
-		.rd32 = _nvkm_xtensa_rd32,
-		.wr32 = _nvkm_xtensa_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index cea90df..a3d4f5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -20,153 +20,173 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include <engine/xtensa.h>
-#include <core/device.h>
 
-#include <core/engctx.h>
+#include <core/gpuobj.h>
+#include <engine/fifo.h>
 
-u32
-_nvkm_xtensa_rd32(struct nvkm_object *object, u64 addr)
+static int
+nvkm_xtensa_oclass_get(struct nvkm_oclass *oclass, int index)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
-	return nv_rd32(xtensa, xtensa->addr + addr);
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(oclass->engine);
+	int c = 0;
+
+	while (xtensa->func->sclass[c].oclass) {
+		if (c++ == index) {
+			oclass->base = xtensa->func->sclass[index];
+			return index;
+		}
+	}
+
+	return c;
 }
 
-void
-_nvkm_xtensa_wr32(struct nvkm_object *object, u64 addr, u32 data)
+static int
+nvkm_xtensa_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
+			int align, struct nvkm_gpuobj **pgpuobj)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
-	nv_wr32(xtensa, xtensa->addr + addr, data);
+	return nvkm_gpuobj_new(object->engine->subdev.device, 0x10000, align,
+			       true, parent, pgpuobj);
 }
 
-int
-_nvkm_xtensa_engctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-			 struct nvkm_oclass *oclass, void *data, u32 size,
-			 struct nvkm_object **pobject)
-{
-	struct nvkm_engctx *engctx;
-	int ret;
+static const struct nvkm_object_func
+nvkm_xtensa_cclass = {
+	.bind = nvkm_xtensa_cclass_bind,
+};
 
-	ret = nvkm_engctx_create(parent, engine, oclass, NULL, 0x10000, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
-	*pobject = nv_object(engctx);
-	return ret;
-}
-
-void
-_nvkm_xtensa_intr(struct nvkm_subdev *subdev)
+static void
+nvkm_xtensa_intr(struct nvkm_engine *engine)
 {
-	struct nvkm_xtensa *xtensa = (void *)subdev;
-	u32 unk104 = nv_ro32(xtensa, 0xd04);
-	u32 intr = nv_ro32(xtensa, 0xc20);
-	u32 chan = nv_ro32(xtensa, 0xc28);
-	u32 unk10c = nv_ro32(xtensa, 0xd0c);
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = xtensa->addr;
+	u32 unk104 = nvkm_rd32(device, base + 0xd04);
+	u32 intr = nvkm_rd32(device, base + 0xc20);
+	u32 chan = nvkm_rd32(device, base + 0xc28);
+	u32 unk10c = nvkm_rd32(device, base + 0xd0c);
 
 	if (intr & 0x10)
-		nv_warn(xtensa, "Watchdog interrupt, engine hung.\n");
-	nv_wo32(xtensa, 0xc20, intr);
-	intr = nv_ro32(xtensa, 0xc20);
+		nvkm_warn(subdev, "Watchdog interrupt, engine hung.\n");
+	nvkm_wr32(device, base + 0xc20, intr);
+	intr = nvkm_rd32(device, base + 0xc20);
 	if (unk104 == 0x10001 && unk10c == 0x200 && chan && !intr) {
-		nv_debug(xtensa, "Enabling FIFO_CTRL\n");
-		nv_mask(xtensa, xtensa->addr + 0xd94, 0, xtensa->fifo_val);
+		nvkm_debug(subdev, "Enabling FIFO_CTRL\n");
+		nvkm_mask(device, xtensa->addr + 0xd94, 0, xtensa->func->fifo_val);
 	}
 }
 
-int
-nvkm_xtensa_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, u32 addr, bool enable,
-		    const char *iname, const char *fname,
-		    int length, void **pobject)
+static int
+nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
 {
-	struct nvkm_xtensa *xtensa;
-	int ret;
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_device *device = xtensa->engine.subdev.device;
+	const u32 base = xtensa->addr;
 
-	ret = nvkm_engine_create_(parent, engine, oclass, enable, iname,
-				  fname, length, pobject);
-	xtensa = *pobject;
-	if (ret)
-		return ret;
+	nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */
+	nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
 
-	nv_subdev(xtensa)->intr = _nvkm_xtensa_intr;
-	xtensa->addr = addr;
+	if (!suspend)
+		nvkm_memory_del(&xtensa->gpu_fw);
 	return 0;
 }
 
-int
-_nvkm_xtensa_init(struct nvkm_object *object)
+static int
+nvkm_xtensa_init(struct nvkm_engine *engine)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_xtensa *xtensa = (void *)object;
+	struct nvkm_xtensa *xtensa = nvkm_xtensa(engine);
+	struct nvkm_subdev *subdev = &xtensa->engine.subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = xtensa->addr;
 	const struct firmware *fw;
 	char name[32];
 	int i, ret;
+	u64 addr, size;
 	u32 tmp;
 
-	ret = nvkm_engine_init(&xtensa->base);
-	if (ret)
-		return ret;
-
 	if (!xtensa->gpu_fw) {
 		snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
 			 xtensa->addr >> 12);
 
-		ret = request_firmware(&fw, name, nv_device_base(device));
+		ret = request_firmware(&fw, name, device->dev);
 		if (ret) {
-			nv_warn(xtensa, "unable to load firmware %s\n", name);
+			nvkm_warn(subdev, "unable to load firmware %s\n", name);
 			return ret;
 		}
 
 		if (fw->size > 0x40000) {
-			nv_warn(xtensa, "firmware %s too large\n", name);
+			nvkm_warn(subdev, "firmware %s too large\n", name);
 			release_firmware(fw);
 			return -EINVAL;
 		}
 
-		ret = nvkm_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
+		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+				      0x40000, 0x1000, false,
 				      &xtensa->gpu_fw);
 		if (ret) {
 			release_firmware(fw);
 			return ret;
 		}
 
-		nv_debug(xtensa, "Loading firmware to address: 0x%llx\n",
-			 xtensa->gpu_fw->addr);
-
+		nvkm_kmap(xtensa->gpu_fw);
 		for (i = 0; i < fw->size / 4; i++)
-			nv_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+			nvkm_wo32(xtensa->gpu_fw, i * 4, *((u32 *)fw->data + i));
+		nvkm_done(xtensa->gpu_fw);
 		release_firmware(fw);
 	}
 
-	nv_wo32(xtensa, 0xd10, 0x1fffffff); /* ?? */
-	nv_wo32(xtensa, 0xd08, 0x0fffffff); /* ?? */
+	addr = nvkm_memory_addr(xtensa->gpu_fw);
+	size = nvkm_memory_size(xtensa->gpu_fw);
 
-	nv_wo32(xtensa, 0xd28, xtensa->unkd28); /* ?? */
-	nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
-	nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+	nvkm_wr32(device, base + 0xd10, 0x1fffffff); /* ?? */
+	nvkm_wr32(device, base + 0xd08, 0x0fffffff); /* ?? */
 
-	nv_wo32(xtensa, 0xcc0, xtensa->gpu_fw->addr >> 8); /* XT_REGION_BASE */
-	nv_wo32(xtensa, 0xcc4, 0x1c); /* XT_REGION_SETUP */
-	nv_wo32(xtensa, 0xcc8, xtensa->gpu_fw->size >> 8); /* XT_REGION_LIMIT */
+	nvkm_wr32(device, base + 0xd28, xtensa->func->unkd28); /* ?? */
+	nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+	nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 
-	tmp = nv_rd32(xtensa, 0x0);
-	nv_wo32(xtensa, 0xde0, tmp); /* SCRATCH_H2X */
+	nvkm_wr32(device, base + 0xcc0, addr >> 8); /* XT_REGION_BASE */
+	nvkm_wr32(device, base + 0xcc4, 0x1c); /* XT_REGION_SETUP */
+	nvkm_wr32(device, base + 0xcc8, size >> 8); /* XT_REGION_LIMIT */
 
-	nv_wo32(xtensa, 0xce8, 0xf); /* XT_REGION_SETUP */
+	tmp = nvkm_rd32(device, 0x0);
+	nvkm_wr32(device, base + 0xde0, tmp); /* SCRATCH_H2X */
 
-	nv_wo32(xtensa, 0xc20, 0x3f); /* INTR */
-	nv_wo32(xtensa, 0xd84, 0x3f); /* INTR_EN */
+	nvkm_wr32(device, base + 0xce8, 0xf); /* XT_REGION_SETUP */
+
+	nvkm_wr32(device, base + 0xc20, 0x3f); /* INTR */
+	nvkm_wr32(device, base + 0xd84, 0x3f); /* INTR_EN */
 	return 0;
 }
 
-int
-_nvkm_xtensa_fini(struct nvkm_object *object, bool suspend)
+static void *
+nvkm_xtensa_dtor(struct nvkm_engine *engine)
 {
-	struct nvkm_xtensa *xtensa = (void *)object;
+	return nvkm_xtensa(engine);
+}
 
-	nv_wo32(xtensa, 0xd84, 0); /* INTR_EN */
-	nv_wo32(xtensa, 0xd94, 0); /* FIFO_CTRL */
+static const struct nvkm_engine_func
+nvkm_xtensa = {
+	.dtor = nvkm_xtensa_dtor,
+	.init = nvkm_xtensa_init,
+	.fini = nvkm_xtensa_fini,
+	.intr = nvkm_xtensa_intr,
+	.fifo.sclass = nvkm_xtensa_oclass_get,
+	.cclass = &nvkm_xtensa_cclass,
+};
 
-	if (!suspend)
-		nvkm_gpuobj_ref(NULL, &xtensa->gpu_fw);
+int
+nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
+		 struct nvkm_device *device, int index, bool enable,
+		 u32 addr, struct nvkm_engine **pengine)
+{
+	struct nvkm_xtensa *xtensa;
 
-	return nvkm_engine_fini(&xtensa->base, suspend);
+	if (!(xtensa = kzalloc(sizeof(*xtensa), GFP_KERNEL)))
+		return -ENOMEM;
+	xtensa->func = func;
+	xtensa->addr = addr;
+	*pengine = &xtensa->engine;
+
+	return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
+				enable, &xtensa->engine);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index a1bb3e4..ee2c38f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -13,6 +13,7 @@
 include $(src)/nvkm/subdev/mc/Kbuild
 include $(src)/nvkm/subdev/mmu/Kbuild
 include $(src)/nvkm/subdev/mxm/Kbuild
+include $(src)/nvkm/subdev/pci/Kbuild
 include $(src)/nvkm/subdev/pmu/Kbuild
 include $(src)/nvkm/subdev/therm/Kbuild
 include $(src)/nvkm/subdev/timer/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1ab554a..1e138b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -1,4 +1,5 @@
 nvkm-y += nvkm/subdev/bar/base.o
 nvkm-y += nvkm/subdev/bar/nv50.o
+nvkm-y += nvkm/subdev/bar/g84.o
 nvkm-y += nvkm/subdev/bar/gf100.o
 nvkm-y += nvkm/subdev/bar/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 3502d00..a9433ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -23,122 +23,61 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-#include <subdev/fb.h>
-#include <subdev/mmu.h>
+void
+nvkm_bar_flush(struct nvkm_bar *bar)
+{
+	if (bar && bar->func->flush)
+		bar->func->flush(bar);
+}
 
-struct nvkm_barobj {
-	struct nvkm_object base;
-	struct nvkm_vma vma;
-	void __iomem *iomem;
-};
+struct nvkm_vm *
+nvkm_bar_kmap(struct nvkm_bar *bar)
+{
+	/* disallow kmap() until after vm has been bootstrapped */
+	if (bar && bar->func->kmap && bar->subdev.oneinit)
+		return bar->func->kmap(bar);
+	return NULL;
+}
+
+int
+nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
+{
+	return bar->func->umap(bar, size, type, vma);
+}
 
 static int
-nvkm_barobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_bar *bar = nvkm_bar(device);
-	struct nvkm_mem *mem = data;
-	struct nvkm_barobj *barobj;
-	int ret;
-
-	ret = nvkm_object_create(parent, engine, oclass, 0, &barobj);
-	*pobject = nv_object(barobj);
-	if (ret)
-		return ret;
-
-	ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
-	if (ret)
-		return ret;
-
-	barobj->iomem = ioremap(nv_device_resource_start(device, 3) +
-				(u32)barobj->vma.offset, mem->size << 12);
-	if (!barobj->iomem) {
-		nv_warn(bar, "PRAMIN ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	return 0;
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->oneinit(bar);
 }
 
-static void
-nvkm_barobj_dtor(struct nvkm_object *object)
+static int
+nvkm_bar_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bar *bar = nvkm_bar(object);
-	struct nvkm_barobj *barobj = (void *)object;
-	if (barobj->vma.node) {
-		if (barobj->iomem)
-			iounmap(barobj->iomem);
-		bar->unmap(bar, &barobj->vma);
-	}
-	nvkm_object_destroy(&barobj->base);
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->init(bar);
 }
 
-static u32
-nvkm_barobj_rd32(struct nvkm_object *object, u64 addr)
+static void *
+nvkm_bar_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_barobj *barobj = (void *)object;
-	return ioread32_native(barobj->iomem + addr);
+	struct nvkm_bar *bar = nvkm_bar(subdev);
+	return bar->func->dtor(bar);
 }
 
-static void
-nvkm_barobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_barobj *barobj = (void *)object;
-	iowrite32_native(data, barobj->iomem + addr);
-}
-
-static struct nvkm_oclass
-nvkm_barobj_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_barobj_ctor,
-		.dtor = nvkm_barobj_dtor,
-		.init = nvkm_object_init,
-		.fini = nvkm_object_fini,
-		.rd32 = nvkm_barobj_rd32,
-		.wr32 = nvkm_barobj_wr32,
-	},
+static const struct nvkm_subdev_func
+nvkm_bar = {
+	.dtor = nvkm_bar_dtor,
+	.oneinit = nvkm_bar_oneinit,
+	.init = nvkm_bar_init,
 };
 
-int
-nvkm_bar_alloc(struct nvkm_bar *bar, struct nvkm_object *parent,
-	       struct nvkm_mem *mem, struct nvkm_object **pobject)
-{
-	struct nvkm_object *gpuobj;
-	int ret = nvkm_object_ctor(parent, &parent->engine->subdev.object,
-				   &nvkm_barobj_oclass, mem, 0, &gpuobj);
-	if (ret == 0)
-		*pobject = gpuobj;
-	return ret;
-}
-
-int
-nvkm_bar_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_bar *bar;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "BARCTL",
-				  "bar", length, pobject);
-	bar = *pobject;
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 void
-nvkm_bar_destroy(struct nvkm_bar *bar)
+nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_bar *bar)
 {
-	nvkm_subdev_destroy(&bar->base);
-}
-
-void
-_nvkm_bar_dtor(struct nvkm_object *object)
-{
-	struct nvkm_bar *bar = (void *)object;
-	nvkm_bar_destroy(bar);
+	nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
+	bar->func = func;
+	spin_lock_init(&bar->lock);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
new file mode 100644
index 0000000..ef71713
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+
+#include <subdev/timer.h>
+
+void
+g84_bar_flush(struct nvkm_bar *bar)
+{
+	struct nvkm_device *device = bar->subdev.device;
+	unsigned long flags;
+	spin_lock_irqsave(&bar->lock, flags);
+	nvkm_wr32(device, 0x070000, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x070000) & 0x00000002))
+			break;
+	);
+	spin_unlock_irqrestore(&bar->lock, flags);
+}
+
+static const struct nvkm_bar_func
+g84_bar_func = {
+	.dtor = nv50_bar_dtor,
+	.oneinit = nv50_bar_oneinit,
+	.init = nv50_bar_init,
+	.kmap = nv50_bar_kmap,
+	.umap = nv50_bar_umap,
+	.flush = g84_bar_flush,
+};
+
+int
+g84_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return nv50_bar_new_(&g84_bar_func, device, index, 0x200, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 12a1aeb..c794b2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -21,101 +21,60 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "gf100.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 
-struct gf100_bar_priv_vm {
-	struct nvkm_gpuobj *mem;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *vm;
-};
-
-struct gf100_bar_priv {
-	struct nvkm_bar base;
-	spinlock_t lock;
-	struct gf100_bar_priv_vm bar[2];
-};
-
-static int
-gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	       struct nvkm_vma *vma)
+static struct nvkm_vm *
+gf100_bar_kmap(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)bar;
-	int ret;
+	return gf100_bar(base)->bar[0].vm;
+}
 
-	ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+int
+gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
+{
+	struct gf100_bar *bar = gf100_bar(base);
+	return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
 }
 
 static int
-gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	       struct nvkm_vma *vma)
+gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
+		  struct lock_class_key *key, int bar_nr)
 {
-	struct gf100_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar[1].vm, mem->size << 12,
-			  mem->page_shift, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
-}
-
-static void
-gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
-{
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
-}
-
-static int
-gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm,
-		  int bar_nr)
-{
-	struct nvkm_device *device = nv_device(&priv->base);
+	struct nvkm_device *device = bar->base.subdev.device;
 	struct nvkm_vm *vm;
 	resource_size_t bar_len;
 	int ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
 			      &bar_vm->mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
-			      &bar_vm->pgd);
+	ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
 	if (ret)
 		return ret;
 
-	bar_len = nv_device_resource_len(device, bar_nr);
+	bar_len = device->func->resource_size(device, bar_nr);
 
-	ret = nvkm_vm_new(device, 0, bar_len, 0, &vm);
+	ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
 	/*
 	 * Bootstrap page table lookup.
 	 */
 	if (bar_nr == 3) {
-		ret = nvkm_gpuobj_new(nv_object(priv), NULL,
-				      (bar_len >> 12) * 8, 0x1000,
-				      NVOBJ_FLAG_ZERO_ALLOC,
-				      &vm->pgt[0].obj[0]);
-		vm->pgt[0].refcount[0] = 1;
-		if (ret)
+		ret = nvkm_vm_boot(vm, bar_len);
+		if (ret) {
+			nvkm_vm_ref(NULL, &vm, NULL);
 			return ret;
+		}
 	}
 
 	ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
@@ -123,97 +82,101 @@
 	if (ret)
 		return ret;
 
-	nv_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
-	nv_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
-	nv_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
-	nv_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+	nvkm_kmap(bar_vm->mem);
+	nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
+	nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
+	nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
+	nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
+	nvkm_done(bar_vm->mem);
 	return 0;
 }
 
 int
-gf100_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gf100_bar_oneinit(struct nvkm_bar *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_bar_priv *priv;
-	bool has_bar3 = nv_device_resource_len(device, 3) != 0;
+	static struct lock_class_key bar1_lock;
+	static struct lock_class_key bar3_lock;
+	struct gf100_bar *bar = gf100_bar(base);
 	int ret;
 
-	ret = nvkm_bar_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
 	/* BAR3 */
-	if (has_bar3) {
-		ret = gf100_bar_ctor_vm(priv, &priv->bar[0], 3);
+	if (bar->base.func->kmap) {
+		ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
 		if (ret)
 			return ret;
 	}
 
 	/* BAR1 */
-	ret = gf100_bar_ctor_vm(priv, &priv->bar[1], 1);
+	ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
 	if (ret)
 		return ret;
 
-	if (has_bar3) {
-		priv->base.alloc = nvkm_bar_alloc;
-		priv->base.kmap = gf100_bar_kmap;
-	}
-	priv->base.umap = gf100_bar_umap;
-	priv->base.unmap = gf100_bar_unmap;
-	priv->base.flush = g84_bar_flush;
-	spin_lock_init(&priv->lock);
 	return 0;
 }
 
-void
-gf100_bar_dtor(struct nvkm_object *object)
-{
-	struct gf100_bar_priv *priv = (void *)object;
-
-	nvkm_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[1].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[1].mem);
-
-	if (priv->bar[0].vm) {
-		nvkm_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
-	}
-	nvkm_gpuobj_ref(NULL, &priv->bar[0].pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar[0].mem);
-
-	nvkm_bar_destroy(&priv->base);
-}
-
 int
-gf100_bar_init(struct nvkm_object *object)
+gf100_bar_init(struct nvkm_bar *base)
 {
-	struct gf100_bar_priv *priv = (void *)object;
-	int ret;
+	struct gf100_bar *bar = gf100_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	u32 addr;
 
-	ret = nvkm_bar_init(&priv->base);
-	if (ret)
-		return ret;
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
 
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
+	nvkm_wr32(device, 0x001704, 0x80000000 | addr);
 
-	nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
-	if (priv->bar[0].mem)
-		nv_wr32(priv, 0x001714,
-			0xc0000000 | priv->bar[0].mem->addr >> 12);
+	if (bar->bar[0].mem) {
+		addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
+		nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+	}
+
 	return 0;
 }
 
-struct nvkm_oclass
-gf100_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_bar_ctor,
-		.dtor = gf100_bar_dtor,
-		.init = gf100_bar_init,
-		.fini = _nvkm_bar_fini,
-	},
+void *
+gf100_bar_dtor(struct nvkm_bar *base)
+{
+	struct gf100_bar *bar = gf100_bar(base);
+
+	nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
+	nvkm_gpuobj_del(&bar->bar[1].pgd);
+	nvkm_memory_del(&bar->bar[1].mem);
+
+	if (bar->bar[0].vm) {
+		nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
+	}
+	nvkm_gpuobj_del(&bar->bar[0].pgd);
+	nvkm_memory_del(&bar->bar[0].mem);
+	return bar;
+}
+
+int
+gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_bar **pbar)
+{
+	struct gf100_bar *bar;
+	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_bar_ctor(func, device, index, &bar->base);
+	*pbar = &bar->base;
+	return 0;
+}
+
+static const struct nvkm_bar_func
+gf100_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.init = gf100_bar_init,
+	.kmap = gf100_bar_kmap,
+	.umap = gf100_bar_umap,
+	.flush = g84_bar_flush,
 };
+
+int
+gf100_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return gf100_bar_new_(&gf100_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
new file mode 100644
index 0000000..f7dea69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -0,0 +1,23 @@
+#ifndef __GF100_BAR_H__
+#define __GF100_BAR_H__
+#define gf100_bar(p) container_of((p), struct gf100_bar, base)
+#include "priv.h"
+
+struct gf100_bar_vm {
+	struct nvkm_memory *mem;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *vm;
+};
+
+struct gf100_bar {
+	struct nvkm_bar base;
+	struct gf100_bar_vm bar[2];
+};
+
+int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+		   int, struct nvkm_bar **);
+void *gf100_bar_dtor(struct nvkm_bar *);
+int gf100_bar_oneinit(struct nvkm_bar *);
+int gf100_bar_init(struct nvkm_bar *);
+int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 148f739..9232fab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -19,32 +19,22 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include "priv.h"
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gk20a_bar_func = {
+	.dtor = gf100_bar_dtor,
+	.oneinit = gf100_bar_oneinit,
+	.init = gf100_bar_init,
+	.umap = gf100_bar_umap,
+	.flush = g84_bar_flush,
+};
 
 int
-gk20a_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gk20a_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
 {
-	struct nvkm_bar *bar;
-	int ret;
-
-	ret = gf100_bar_ctor(parent, engine, oclass, data, size, pobject);
-	if (ret)
-		return ret;
-
-	bar = (struct nvkm_bar *)*pobject;
-	bar->iomap_uncached = true;
-	return 0;
+	int ret = gf100_bar_new_(&gk20a_bar_func, device, index, pbar);
+	if (ret == 0)
+		(*pbar)->iomap_uncached = true;
+	return ret;
 }
-
-struct nvkm_oclass
-gk20a_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_bar_ctor,
-		.dtor = gf100_bar_dtor,
-		.init = gf100_bar_init,
-		.fini = _nvkm_bar_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 8548adb..370dcd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -21,251 +21,196 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "nv50.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/mmu.h>
 #include <subdev/timer.h>
 
-struct nv50_bar_priv {
-	struct nvkm_bar base;
-	spinlock_t lock;
-	struct nvkm_gpuobj *mem;
-	struct nvkm_gpuobj *pad;
-	struct nvkm_gpuobj *pgd;
-	struct nvkm_vm *bar1_vm;
-	struct nvkm_gpuobj *bar1;
-	struct nvkm_vm *bar3_vm;
-	struct nvkm_gpuobj *bar3;
-};
-
-static int
-nv50_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	      struct nvkm_vma *vma)
+struct nvkm_vm *
+nv50_bar_kmap(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+	return nv50_bar(base)->bar3_vm;
 }
 
-static int
-nv50_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags,
-	      struct nvkm_vma *vma)
+int
+nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	int ret;
-
-	ret = nvkm_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
-	if (ret)
-		return ret;
-
-	nvkm_vm_map(vma, mem);
-	return 0;
+	struct nv50_bar *bar = nv50_bar(base);
+	return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
 }
 
 static void
-nv50_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma)
+nv50_bar_flush(struct nvkm_bar *base)
 {
-	nvkm_vm_unmap(vma);
-	nvkm_vm_put(vma);
-}
-
-static void
-nv50_bar_flush(struct nvkm_bar *bar)
-{
-	struct nv50_bar_priv *priv = (void *)bar;
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
 	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_wr32(priv, 0x00330c, 0x00000001);
-	if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
-		nv_warn(priv, "flush timeout\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_lock_irqsave(&bar->base.lock, flags);
+	nvkm_wr32(device, 0x00330c, 0x00000001);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
+			break;
+	);
+	spin_unlock_irqrestore(&bar->base.lock, flags);
 }
 
-void
-g84_bar_flush(struct nvkm_bar *bar)
+int
+nv50_bar_oneinit(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)bar;
-	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	nv_wr32(bar, 0x070000, 0x00000001);
-	if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
-		nv_warn(priv, "flush timeout\n");
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static int
-nv50_bar_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_object *heap;
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	static struct lock_class_key bar1_lock;
+	static struct lock_class_key bar3_lock;
 	struct nvkm_vm *vm;
-	struct nv50_bar_priv *priv;
 	u64 start, limit;
 	int ret;
 
-	ret = nvkm_bar_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
-			      NVOBJ_FLAG_HEAP, &priv->mem);
-	heap = nv_object(priv->mem);
+	ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
+			      &bar->pad);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap,
-			      (device->chipset == 0x50) ? 0x1400 : 0x0200,
-			      0, 0, &priv->pad);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 0x4000, 0, 0, &priv->pgd);
+	ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
 	if (ret)
 		return ret;
 
 	/* BAR3 */
 	start = 0x0100000000ULL;
-	limit = start + nv_device_resource_len(device, 3);
+	limit = start + device->func->resource_size(device, 3);
 
-	ret = nvkm_vm_new(device, start, limit, start, &vm);
+	ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap,
-			      ((limit-- - start) >> 12) * 8, 0x1000,
-			      NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
-	vm->pgt[0].refcount[0] = 1;
+	ret = nvkm_vm_boot(vm, limit-- - start);
 	if (ret)
 		return ret;
 
-	ret = nvkm_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+	ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
 	nvkm_vm_ref(NULL, &vm, NULL);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
 	if (ret)
 		return ret;
 
-	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
-	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
-	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
-	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
-				  upper_32_bits(start));
-	nv_wo32(priv->bar3, 0x10, 0x00000000);
-	nv_wo32(priv->bar3, 0x14, 0x00000000);
+	nvkm_kmap(bar->bar3);
+	nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+				   upper_32_bits(start));
+	nvkm_wo32(bar->bar3, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar3, 0x14, 0x00000000);
+	nvkm_done(bar->bar3);
 
 	/* BAR1 */
 	start = 0x0000000000ULL;
-	limit = start + nv_device_resource_len(device, 1);
+	limit = start + device->func->resource_size(device, 1);
 
-	ret = nvkm_vm_new(device, start, limit--, start, &vm);
+	ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
 	if (ret)
 		return ret;
 
-	atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]);
+	atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
 
-	ret = nvkm_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+	ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
 	nvkm_vm_ref(NULL, &vm, NULL);
 	if (ret)
 		return ret;
 
-	ret = nvkm_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
+	ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
 	if (ret)
 		return ret;
 
-	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
-	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
-	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
-	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
-				  upper_32_bits(start));
-	nv_wo32(priv->bar1, 0x10, 0x00000000);
-	nv_wo32(priv->bar1, 0x14, 0x00000000);
-
-	priv->base.alloc = nvkm_bar_alloc;
-	priv->base.kmap = nv50_bar_kmap;
-	priv->base.umap = nv50_bar_umap;
-	priv->base.unmap = nv50_bar_unmap;
-	if (device->chipset == 0x50)
-		priv->base.flush = nv50_bar_flush;
-	else
-		priv->base.flush = g84_bar_flush;
-	spin_lock_init(&priv->lock);
+	nvkm_kmap(bar->bar1);
+	nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
+	nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
+	nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
+	nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
+				   upper_32_bits(start));
+	nvkm_wo32(bar->bar1, 0x10, 0x00000000);
+	nvkm_wo32(bar->bar1, 0x14, 0x00000000);
+	nvkm_done(bar->bar1);
 	return 0;
 }
 
-static void
-nv50_bar_dtor(struct nvkm_object *object)
+int
+nv50_bar_init(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->bar1);
-	nvkm_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
-	nvkm_gpuobj_ref(NULL, &priv->bar3);
-	if (priv->bar3_vm) {
-		nvkm_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
-	}
-	nvkm_gpuobj_ref(NULL, &priv->pgd);
-	nvkm_gpuobj_ref(NULL, &priv->pad);
-	nvkm_gpuobj_ref(NULL, &priv->mem);
-	nvkm_bar_destroy(&priv->base);
-}
+	struct nv50_bar *bar = nv50_bar(base);
+	struct nvkm_device *device = bar->base.subdev.device;
+	int i;
 
-static int
-nv50_bar_init(struct nvkm_object *object)
-{
-	struct nv50_bar_priv *priv = (void *)object;
-	int ret, i;
-
-	ret = nvkm_bar_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
-	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x100c80, 0x00060001);
-	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000)) {
-		nv_error(priv, "vm flush timeout\n");
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x100c80, 0x00060001);
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+			break;
+	) < 0)
 		return -EBUSY;
-	}
 
-	nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
-	nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
-	nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
-	nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+	nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+	nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+	nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
 	for (i = 0; i < 8; i++)
-		nv_wr32(priv, 0x001900 + (i * 4), 0x00000000);
+		nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
 	return 0;
 }
 
-static int
-nv50_bar_fini(struct nvkm_object *object, bool suspend)
+void *
+nv50_bar_dtor(struct nvkm_bar *base)
 {
-	struct nv50_bar_priv *priv = (void *)object;
-	return nvkm_bar_fini(&priv->base, suspend);
+	struct nv50_bar *bar = nv50_bar(base);
+	nvkm_gpuobj_del(&bar->bar1);
+	nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
+	nvkm_gpuobj_del(&bar->bar3);
+	if (bar->bar3_vm) {
+		nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+	}
+	nvkm_gpuobj_del(&bar->pgd);
+	nvkm_gpuobj_del(&bar->pad);
+	nvkm_gpuobj_del(&bar->mem);
+	return bar;
 }
 
-struct nvkm_oclass
-nv50_bar_oclass = {
-	.handle = NV_SUBDEV(BAR, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_bar_ctor,
-		.dtor = nv50_bar_dtor,
-		.init = nv50_bar_init,
-		.fini = nv50_bar_fini,
-	},
+int
+nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
+	      int index, u32 pgd_addr, struct nvkm_bar **pbar)
+{
+	struct nv50_bar *bar;
+	if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_bar_ctor(func, device, index, &bar->base);
+	bar->pgd_addr = pgd_addr;
+	*pbar = &bar->base;
+	return 0;
+}
+
+static const struct nvkm_bar_func
+nv50_bar_func = {
+	.dtor = nv50_bar_dtor,
+	.oneinit = nv50_bar_oneinit,
+	.init = nv50_bar_init,
+	.kmap = nv50_bar_kmap,
+	.umap = nv50_bar_umap,
+	.flush = nv50_bar_flush,
 };
+
+int
+nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+	return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
new file mode 100644
index 0000000..1eb764f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -0,0 +1,26 @@
+#ifndef __NV50_BAR_H__
+#define __NV50_BAR_H__
+#define nv50_bar(p) container_of((p), struct nv50_bar, base)
+#include "priv.h"
+
+struct nv50_bar {
+	struct nvkm_bar base;
+	u32 pgd_addr;
+	struct nvkm_gpuobj *mem;
+	struct nvkm_gpuobj *pad;
+	struct nvkm_gpuobj *pgd;
+	struct nvkm_vm *bar1_vm;
+	struct nvkm_gpuobj *bar1;
+	struct nvkm_vm *bar3_vm;
+	struct nvkm_gpuobj *bar3;
+};
+
+int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
+		  int, u32 pgd_addr, struct nvkm_bar **);
+void *nv50_bar_dtor(struct nvkm_bar *);
+int nv50_bar_oneinit(struct nvkm_bar *);
+int nv50_bar_init(struct nvkm_bar *);
+struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
+int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index aa85f61..d834ef2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,30 +1,19 @@
 #ifndef __NVKM_BAR_PRIV_H__
 #define __NVKM_BAR_PRIV_H__
+#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
 #include <subdev/bar.h>
 
-#define nvkm_bar_create(p,e,o,d)                                            \
-	nvkm_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_bar_init(p)                                                    \
-	nvkm_subdev_init(&(p)->base)
-#define nvkm_bar_fini(p,s)                                                  \
-	nvkm_subdev_fini(&(p)->base, (s))
+void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
+		   int, struct nvkm_bar *);
 
-int nvkm_bar_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-void nvkm_bar_destroy(struct nvkm_bar *);
-
-void _nvkm_bar_dtor(struct nvkm_object *);
-#define _nvkm_bar_init _nvkm_subdev_init
-#define _nvkm_bar_fini _nvkm_subdev_fini
-
-int  nvkm_bar_alloc(struct nvkm_bar *, struct nvkm_object *,
-		    struct nvkm_mem *, struct nvkm_object **);
+struct nvkm_bar_func {
+	void *(*dtor)(struct nvkm_bar *);
+	int (*oneinit)(struct nvkm_bar *);
+	int (*init)(struct nvkm_bar *);
+	struct nvkm_vm *(*kmap)(struct nvkm_bar *);
+	int  (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+	void (*flush)(struct nvkm_bar *);
+};
 
 void g84_bar_flush(struct nvkm_bar *);
-
-int gf100_bar_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void gf100_bar_dtor(struct nvkm_object *);
-int gf100_bar_init(struct nvkm_object *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
index 08eb03f..43f0ba1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0203.c
@@ -33,14 +33,14 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x04)
-			data = nv_ro16(bios, bit_M.offset + 0x03);
+			data = nvbios_rd16(bios, bit_M.offset + 0x03);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*cnt = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*cnt = nvbios_rd08(bios, data + 0x03);
 				return data;
 			default:
 				break;
@@ -59,8 +59,8 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type    = nv_ro08(bios, data + 0x04);
-		info->pointer = nv_ro16(bios, data + 0x05);
+		info->type    = nvbios_rd08(bios, data + 0x04);
+		info->pointer = nvbios_rd16(bios, data + 0x05);
 		break;
 	default:
 		break;
@@ -89,9 +89,9 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type  = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
-		info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
-		info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
+		info->type  = (nvbios_rd08(bios, data + 0x00) & 0x0f) >> 0;
+		info->strap = (nvbios_rd08(bios, data + 0x00) & 0xf0) >> 4;
+		info->group = (nvbios_rd08(bios, data + 0x01) & 0x0f) >> 0;
 		return data;
 	default:
 		break;
@@ -103,12 +103,13 @@
 nvbios_M0203Em(struct nvkm_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
 	       struct nvbios_M0203E *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_M0203T M0203T;
 	u8  cnt, len, idx = 0xff;
 	u32 data;
 
 	if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
-		nv_warn(bios, "M0203T not found\n");
+		nvkm_warn(subdev, "M0203T not found\n");
 		return 0x00000000;
 	}
 
@@ -119,7 +120,7 @@
 				continue;
 			return data;
 		default:
-			nv_warn(bios, "M0203T type %02x\n", M0203T.type);
+			nvkm_warn(subdev, "M0203T type %02x\n", M0203T.type);
 			return 0x00000000;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
index e1a8ad5..293a6af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c
@@ -34,16 +34,16 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x08)
-			data = nv_ro32(bios, bit_M.offset + 0x05);
+			data = nvbios_rd32(bios, bit_M.offset + 0x05);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*ssz = nv_ro08(bios, data + 0x03);
-				*snr = nv_ro08(bios, data + 0x04);
-				*cnt = nv_ro08(bios, data + 0x05);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*ssz = nvbios_rd08(bios, data + 0x03);
+				*snr = nvbios_rd08(bios, data + 0x04);
+				*cnt = nvbios_rd08(bios, data + 0x05);
 				return data;
 			default:
 				break;
@@ -63,7 +63,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->freq = nv_ro16(bios, data + 0x06);
+		info->freq = nvbios_rd16(bios, data + 0x06);
 		break;
 	default:
 		break;
@@ -96,7 +96,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->type = nv_ro08(bios, data + 0x00) & 0x0f;
+		info->type = nvbios_rd08(bios, data + 0x00) & 0x0f;
 		return data;
 	default:
 		break;
@@ -126,7 +126,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro08(bios, data + 0x00);
+		info->data = nvbios_rd08(bios, data + 0x00);
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
index 3026920..95d49a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c
@@ -34,16 +34,16 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 2 && bit_M.length > 0x0c)
-			data = nv_ro32(bios, bit_M.offset + 0x09);
+			data = nvbios_rd32(bios, bit_M.offset + 0x09);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 0x01);
-				*len = nv_ro08(bios, data + 0x02);
-				*ssz = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*len = nvbios_rd08(bios, data + 0x02);
+				*ssz = nvbios_rd08(bios, data + 0x03);
 				*snr = 1;
-				*cnt = nv_ro08(bios, data + 0x04);
+				*cnt = nvbios_rd08(bios, data + 0x04);
 				return data;
 			default:
 				break;
@@ -78,12 +78,12 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->v00_40 = (nv_ro08(bios, data + 0x00) & 0x40) >> 6;
-		info->bits   =  nv_ro08(bios, data + 0x00) & 0x3f;
-		info->modulo =  nv_ro08(bios, data + 0x01);
-		info->v02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		info->v02_07 =  nv_ro08(bios, data + 0x02) & 0x07;
-		info->v03    =  nv_ro08(bios, data + 0x03);
+		info->v00_40 = (nvbios_rd08(bios, data + 0x00) & 0x40) >> 6;
+		info->bits   =  nvbios_rd08(bios, data + 0x00) & 0x3f;
+		info->modulo =  nvbios_rd08(bios, data + 0x01);
+		info->v02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		info->v02_07 =  nvbios_rd08(bios, data + 0x02) & 0x07;
+		info->v03    =  nvbios_rd08(bios, data + 0x03);
 		return data;
 	default:
 		break;
@@ -122,7 +122,7 @@
 				u32 mask = (1ULL << M0209E.bits) - 1;
 				u16  off = bits / 8;
 				u8   mod = bits % 8;
-				info->data[i] = nv_ro32(bios, data + off);
+				info->data[i] = nvbios_rd32(bios, data + off);
 				info->data[i] = info->data[i] >> mod;
 				info->data[i] = info->data[i] & mask;
 			}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
index b72edcf..3f7db3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/P0260.c
@@ -34,15 +34,15 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2 && bit_P.length > 0x63)
-			data = nv_ro32(bios, bit_P.offset + 0x60);
+			data = nvbios_rd32(bios, bit_P.offset + 0x60);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
+			*ver = nvbios_rd08(bios, data + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, data + 1);
-				*cnt = nv_ro08(bios, data + 2);
+				*hdr = nvbios_rd08(bios, data + 1);
+				*cnt = nvbios_rd08(bios, data + 2);
 				*len = 4;
-				*xnr = nv_ro08(bios, data + 3);
+				*xnr = nvbios_rd08(bios, data + 3);
 				*xsz = 4;
 				return data;
 			default:
@@ -72,7 +72,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro32(bios, data);
+		info->data = nvbios_rd32(bios, data);
 		return data;
 	default:
 		break;
@@ -98,7 +98,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x10:
-		info->data = nv_ro32(bios, data);
+		info->data = nvbios_rd32(bios, data);
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 8db204f..7953689 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -53,6 +53,20 @@
 }
 
 int
+nvbios_memcmp(struct nvkm_bios *bios, u32 addr, const char *str, u32 len)
+{
+	unsigned char c1, c2;
+
+	while (len--) {
+		c1 = nvbios_rd08(bios, addr++);
+		c2 = *(str++);
+		if (c1 != c2)
+			return c1 - c2;
+	}
+	return 0;
+}
+
+int
 nvbios_extend(struct nvkm_bios *bios, u32 length)
 {
 	if (bios->size < length) {
@@ -69,62 +83,29 @@
 	return 0;
 }
 
-static u8
-nvkm_bios_rd08(struct nvkm_object *object, u64 addr)
+static void *
+nvkm_bios_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bios *bios = (void *)object;
-	return bios->data[addr];
+	struct nvkm_bios *bios = nvkm_bios(subdev);
+	kfree(bios->data);
+	return bios;
 }
 
-static u16
-nvkm_bios_rd16(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return get_unaligned_le16(&bios->data[addr]);
-}
+static const struct nvkm_subdev_func
+nvkm_bios = {
+	.dtor = nvkm_bios_dtor,
+};
 
-static u32
-nvkm_bios_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return get_unaligned_le32(&bios->data[addr]);
-}
-
-static void
-nvkm_bios_wr08(struct nvkm_object *object, u64 addr, u8 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	bios->data[addr] = data;
-}
-
-static void
-nvkm_bios_wr16(struct nvkm_object *object, u64 addr, u16 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	put_unaligned_le16(data, &bios->data[addr]);
-}
-
-static void
-nvkm_bios_wr32(struct nvkm_object *object, u64 addr, u32 data)
-{
-	struct nvkm_bios *bios = (void *)object;
-	put_unaligned_le32(data, &bios->data[addr]);
-}
-
-static int
-nvkm_bios_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
 {
 	struct nvkm_bios *bios;
 	struct bit_entry bit_i;
 	int ret;
 
-	ret = nvkm_subdev_create(parent, engine, oclass, 0,
-				 "VBIOS", "bios", &bios);
-	*pobject = nv_object(bios);
-	if (ret)
-		return ret;
+	if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
 
 	ret = nvbios_shadow(bios);
 	if (ret)
@@ -134,73 +115,33 @@
 	bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
 					  "\xff\x7f""NV\0", 5);
 	if (bios->bmp_offset) {
-		nv_info(bios, "BMP version %x.%x\n",
-			bmp_version(bios) >> 8,
-			bmp_version(bios) & 0xff);
+		nvkm_debug(&bios->subdev, "BMP version %x.%x\n",
+			   bmp_version(bios) >> 8,
+			   bmp_version(bios) & 0xff);
 	}
 
 	bios->bit_offset = nvbios_findstr(bios->data, bios->size,
 					  "\xff\xb8""BIT", 5);
 	if (bios->bit_offset)
-		nv_info(bios, "BIT signature found\n");
+		nvkm_debug(&bios->subdev, "BIT signature found\n");
 
 	/* determine the vbios version number */
 	if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
-		bios->version.major = nv_ro08(bios, bit_i.offset + 3);
-		bios->version.chip  = nv_ro08(bios, bit_i.offset + 2);
-		bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
-		bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
-		bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
+		bios->version.major = nvbios_rd08(bios, bit_i.offset + 3);
+		bios->version.chip  = nvbios_rd08(bios, bit_i.offset + 2);
+		bios->version.minor = nvbios_rd08(bios, bit_i.offset + 1);
+		bios->version.micro = nvbios_rd08(bios, bit_i.offset + 0);
+		bios->version.patch = nvbios_rd08(bios, bit_i.offset + 4);
 	} else
 	if (bmp_version(bios)) {
-		bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
-		bios->version.chip  = nv_ro08(bios, bios->bmp_offset + 12);
-		bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
-		bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
+		bios->version.major = nvbios_rd08(bios, bios->bmp_offset + 13);
+		bios->version.chip  = nvbios_rd08(bios, bios->bmp_offset + 12);
+		bios->version.minor = nvbios_rd08(bios, bios->bmp_offset + 11);
+		bios->version.micro = nvbios_rd08(bios, bios->bmp_offset + 10);
 	}
 
-	nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
-		bios->version.major, bios->version.chip,
-		bios->version.minor, bios->version.micro, bios->version.patch);
-
+	nvkm_info(&bios->subdev, "version %02x.%02x.%02x.%02x.%02x\n",
+		  bios->version.major, bios->version.chip,
+		  bios->version.minor, bios->version.micro, bios->version.patch);
 	return 0;
 }
-
-static void
-nvkm_bios_dtor(struct nvkm_object *object)
-{
-	struct nvkm_bios *bios = (void *)object;
-	kfree(bios->data);
-	nvkm_subdev_destroy(&bios->base);
-}
-
-static int
-nvkm_bios_init(struct nvkm_object *object)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return nvkm_subdev_init(&bios->base);
-}
-
-static int
-nvkm_bios_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_bios *bios = (void *)object;
-	return nvkm_subdev_fini(&bios->base, suspend);
-}
-
-struct nvkm_oclass
-nvkm_bios_oclass = {
-	.handle = NV_SUBDEV(VBIOS, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nvkm_bios_ctor,
-		.dtor = nvkm_bios_dtor,
-		.init = nvkm_bios_init,
-		.fini = nvkm_bios_fini,
-		.rd08 = nvkm_bios_rd08,
-		.rd16 = nvkm_bios_rd16,
-		.rd32 = nvkm_bios_rd32,
-		.wr08 = nvkm_bios_wr08,
-		.wr16 = nvkm_bios_wr16,
-		.wr32 = nvkm_bios_wr32,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
index eab54049..070ff33 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c
@@ -28,18 +28,18 @@
 bit_entry(struct nvkm_bios *bios, u8 id, struct bit_entry *bit)
 {
 	if (likely(bios->bit_offset)) {
-		u8  entries = nv_ro08(bios, bios->bit_offset + 10);
+		u8  entries = nvbios_rd08(bios, bios->bit_offset + 10);
 		u32 entry   = bios->bit_offset + 12;
 		while (entries--) {
-			if (nv_ro08(bios, entry + 0) == id) {
-				bit->id      = nv_ro08(bios, entry + 0);
-				bit->version = nv_ro08(bios, entry + 1);
-				bit->length  = nv_ro16(bios, entry + 2);
-				bit->offset  = nv_ro16(bios, entry + 4);
+			if (nvbios_rd08(bios, entry + 0) == id) {
+				bit->id      = nvbios_rd08(bios, entry + 0);
+				bit->version = nvbios_rd08(bios, entry + 1);
+				bit->length  = nvbios_rd16(bios, entry + 2);
+				bit->offset  = nvbios_rd16(bios, entry + 4);
 				return 0;
 			}
 
-			entry += nv_ro08(bios, bios->bit_offset + 9);
+			entry += nvbios_rd08(bios, bios->bit_offset + 9);
 		}
 
 		return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index 12e9585..3756ec9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -34,17 +34,17 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			boost = nv_ro16(bios, bit_P.offset + 0x30);
+			boost = nvbios_rd16(bios, bit_P.offset + 0x30);
 
 		if (boost) {
-			*ver = nv_ro08(bios, boost + 0);
+			*ver = nvbios_rd08(bios, boost + 0);
 			switch (*ver) {
 			case 0x11:
-				*hdr = nv_ro08(bios, boost + 1);
-				*cnt = nv_ro08(bios, boost + 5);
-				*len = nv_ro08(bios, boost + 2);
-				*snr = nv_ro08(bios, boost + 4);
-				*ssz = nv_ro08(bios, boost + 3);
+				*hdr = nvbios_rd08(bios, boost + 1);
+				*cnt = nvbios_rd08(bios, boost + 5);
+				*len = nvbios_rd08(bios, boost + 2);
+				*snr = nvbios_rd08(bios, boost + 4);
+				*ssz = nvbios_rd08(bios, boost + 3);
 				return boost;
 			default:
 				break;
@@ -78,9 +78,9 @@
 	u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
-		info->min    =  nv_ro16(bios, data + 0x02) * 1000;
-		info->max    =  nv_ro16(bios, data + 0x04) * 1000;
+		info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->min    =  nvbios_rd16(bios, data + 0x02) * 1000;
+		info->max    =  nvbios_rd16(bios, data + 0x04) * 1000;
 	}
 	return data;
 }
@@ -117,10 +117,10 @@
 	data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->domain  = nv_ro08(bios, data + 0x00);
-		info->percent = nv_ro08(bios, data + 0x01);
-		info->min     = nv_ro16(bios, data + 0x02) * 1000;
-		info->max     = nv_ro16(bios, data + 0x04) * 1000;
+		info->domain  = nvbios_rd08(bios, data + 0x00);
+		info->percent = nvbios_rd08(bios, data + 0x01);
+		info->min     = nvbios_rd16(bios, data + 0x02) * 1000;
+		info->max     = nvbios_rd16(bios, data + 0x04) * 1000;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
index 706a165..2768234 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/conn.c
@@ -30,12 +30,12 @@
 {
 	u32 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
-		u32 data = nv_ro16(bios, dcb + 0x14);
+		u32 data = nvbios_rd16(bios, dcb + 0x14);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
-			*hdr = nv_ro08(bios, data + 1);
-			*cnt = nv_ro08(bios, data + 2);
-			*len = nv_ro08(bios, data + 3);
+			*ver = nvbios_rd08(bios, data + 0);
+			*hdr = nvbios_rd08(bios, data + 1);
+			*cnt = nvbios_rd08(bios, data + 2);
+			*len = nvbios_rd08(bios, data + 3);
 			return data;
 		}
 	}
@@ -77,18 +77,18 @@
 	switch (!!data * *ver) {
 	case 0x30:
 	case 0x40:
-		info->type     =  nv_ro08(bios, data + 0x00);
-		info->location =  nv_ro08(bios, data + 0x01) & 0x0f;
-		info->hpd      = (nv_ro08(bios, data + 0x01) & 0x30) >> 4;
-		info->dp       = (nv_ro08(bios, data + 0x01) & 0xc0) >> 6;
+		info->type     =  nvbios_rd08(bios, data + 0x00);
+		info->location =  nvbios_rd08(bios, data + 0x01) & 0x0f;
+		info->hpd      = (nvbios_rd08(bios, data + 0x01) & 0x30) >> 4;
+		info->dp       = (nvbios_rd08(bios, data + 0x01) & 0xc0) >> 6;
 		if (*len < 4)
 			return data;
-		info->hpd     |= (nv_ro08(bios, data + 0x02) & 0x03) << 2;
-		info->dp      |=  nv_ro08(bios, data + 0x02) & 0x0c;
-		info->di       = (nv_ro08(bios, data + 0x02) & 0xf0) >> 4;
-		info->hpd     |= (nv_ro08(bios, data + 0x03) & 0x07) << 4;
-		info->sr       = (nv_ro08(bios, data + 0x03) & 0x08) >> 3;
-		info->lcdid    = (nv_ro08(bios, data + 0x03) & 0x70) >> 4;
+		info->hpd     |= (nvbios_rd08(bios, data + 0x02) & 0x03) << 2;
+		info->dp      |=  nvbios_rd08(bios, data + 0x02) & 0x0c;
+		info->di       = (nvbios_rd08(bios, data + 0x02) & 0xf0) >> 4;
+		info->hpd     |= (nvbios_rd08(bios, data + 0x03) & 0x07) << 4;
+		info->sr       = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+		info->lcdid    = (nvbios_rd08(bios, data + 0x03) & 0x70) >> 4;
 		return data;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 16f7ad8..32e0162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -34,17 +34,17 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			cstep = nv_ro16(bios, bit_P.offset + 0x34);
+			cstep = nvbios_rd16(bios, bit_P.offset + 0x34);
 
 		if (cstep) {
-			*ver = nv_ro08(bios, cstep + 0);
+			*ver = nvbios_rd08(bios, cstep + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, cstep + 1);
-				*cnt = nv_ro08(bios, cstep + 3);
-				*len = nv_ro08(bios, cstep + 2);
-				*xnr = nv_ro08(bios, cstep + 5);
-				*xsz = nv_ro08(bios, cstep + 4);
+				*hdr = nvbios_rd08(bios, cstep + 1);
+				*cnt = nvbios_rd08(bios, cstep + 3);
+				*len = nvbios_rd08(bios, cstep + 2);
+				*xnr = nvbios_rd08(bios, cstep + 5);
+				*xsz = nvbios_rd08(bios, cstep + 4);
 				return cstep;
 			default:
 				break;
@@ -75,8 +75,8 @@
 	u16 data = nvbios_cstepEe(bios, idx, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5;
-		info->index   = nv_ro08(bios, data + 0x03);
+		info->pstate = (nvbios_rd16(bios, data + 0x00) & 0x01e0) >> 5;
+		info->index   = nvbios_rd08(bios, data + 0x03);
 	}
 	return data;
 }
@@ -113,10 +113,10 @@
 	u16 data = nvbios_cstepXe(bios, idx, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->freq    = nv_ro16(bios, data + 0x00) * 1000;
-		info->unkn[0] = nv_ro08(bios, data + 0x02);
-		info->unkn[1] = nv_ro08(bios, data + 0x03);
-		info->voltage = nv_ro08(bios, data + 0x04);
+		info->freq    = nvbios_rd16(bios, data + 0x00) * 1000;
+		info->unkn[0] = nvbios_rd08(bios, data + 0x02);
+		info->unkn[1] = nvbios_rd08(bios, data + 0x03);
+		info->voltage = nvbios_rd08(bios, data + 0x04);
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
index 8d78140..8304b80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dcb.c
@@ -24,38 +24,37 @@
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
 
-#include <core/device.h>
-
 u16
 dcb_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-	struct nvkm_device *device = nv_device(bios);
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	u16 dcb = 0x0000;
 
 	if (device->card_type > NV_04)
-		dcb = nv_ro16(bios, 0x36);
+		dcb = nvbios_rd16(bios, 0x36);
 	if (!dcb) {
-		nv_warn(bios, "DCB table not found\n");
+		nvkm_warn(subdev, "DCB table not found\n");
 		return dcb;
 	}
 
-	*ver = nv_ro08(bios, dcb);
+	*ver = nvbios_rd08(bios, dcb);
 
 	if (*ver >= 0x42) {
-		nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
+		nvkm_warn(subdev, "DCB version 0x%02x unknown\n", *ver);
 		return 0x0000;
 	} else
 	if (*ver >= 0x30) {
-		if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
-			*hdr = nv_ro08(bios, dcb + 1);
-			*cnt = nv_ro08(bios, dcb + 2);
-			*len = nv_ro08(bios, dcb + 3);
+		if (nvbios_rd32(bios, dcb + 6) == 0x4edcbdcb) {
+			*hdr = nvbios_rd08(bios, dcb + 1);
+			*cnt = nvbios_rd08(bios, dcb + 2);
+			*len = nvbios_rd08(bios, dcb + 3);
 			return dcb;
 		}
 	} else
 	if (*ver >= 0x20) {
-		if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
-			u16 i2c = nv_ro16(bios, dcb + 2);
+		if (nvbios_rd32(bios, dcb + 4) == 0x4edcbdcb) {
+			u16 i2c = nvbios_rd16(bios, dcb + 2);
 			*hdr = 8;
 			*cnt = (i2c - dcb) / 8;
 			*len = 8;
@@ -63,8 +62,8 @@
 		}
 	} else
 	if (*ver >= 0x15) {
-		if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
-			u16 i2c = nv_ro16(bios, dcb + 2);
+		if (!nvbios_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
+			u16 i2c = nvbios_rd16(bios, dcb + 2);
 			*hdr = 4;
 			*cnt = (i2c - dcb) / 10;
 			*len = 10;
@@ -88,11 +87,11 @@
 		 *
 		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
 		 */
-		nv_warn(bios, "DCB contains no useful data\n");
+		nvkm_debug(subdev, "DCB contains no useful data\n");
 		return 0x0000;
 	}
 
-	nv_warn(bios, "DCB header validation failed\n");
+	nvkm_warn(subdev, "DCB header validation failed\n");
 	return 0x0000;
 }
 
@@ -126,7 +125,7 @@
 	memset(outp, 0x00, sizeof(*outp));
 	if (dcb) {
 		if (*ver >= 0x20) {
-			u32 conn = nv_ro32(bios, dcb + 0x00);
+			u32 conn = nvbios_rd32(bios, dcb + 0x00);
 			outp->or        = (conn & 0x0f000000) >> 24;
 			outp->location  = (conn & 0x00300000) >> 20;
 			outp->bus       = (conn & 0x000f0000) >> 16;
@@ -140,7 +139,7 @@
 		}
 
 		if (*ver >= 0x40) {
-			u32 conf = nv_ro32(bios, dcb + 0x04);
+			u32 conf = nvbios_rd32(bios, dcb + 0x04);
 			switch (outp->type) {
 			case DCB_OUTPUT_DP:
 				switch (conf & 0x00e00000) {
@@ -156,20 +155,19 @@
 					break;
 				}
 
-				outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
-				if (*ver < 0x41) {
-					switch (outp->dpconf.link_nr) {
-					case 0x0f:
-						outp->dpconf.link_nr = 4;
-						break;
-					case 0x03:
-						outp->dpconf.link_nr = 2;
-						break;
-					case 0x01:
-					default:
-						outp->dpconf.link_nr = 1;
-						break;
-					}
+				switch ((conf & 0x0f000000) >> 24) {
+				case 0xf:
+				case 0x4:
+					outp->dpconf.link_nr = 4;
+					break;
+				case 0x3:
+				case 0x2:
+					outp->dpconf.link_nr = 2;
+					break;
+				case 0x1:
+				default:
+					outp->dpconf.link_nr = 1;
+					break;
 				}
 
 				/* fall-through... */
@@ -215,14 +213,14 @@
 	u16 outp;
 
 	while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
-		if (nv_ro32(bios, outp) == 0x00000000)
+		if (nvbios_rd32(bios, outp) == 0x00000000)
 			break; /* seen on an NV11 with DCB v1.5 */
-		if (nv_ro32(bios, outp) == 0xffffffff)
+		if (nvbios_rd32(bios, outp) == 0xffffffff)
 			break; /* seen on an NV17 with DCB v2.0 */
 
-		if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
+		if (nvbios_rd08(bios, outp) == DCB_OUTPUT_UNUSED)
 			continue;
-		if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
+		if (nvbios_rd08(bios, outp) == DCB_OUTPUT_EOL)
 			break;
 
 		ret = exec(bios, data, idx, outp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
index 262c410..a5e9213 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
@@ -33,17 +33,17 @@
 
 	if (!bit_entry(bios, 'U', &U)) {
 		if (U.version == 1) {
-			u16 data = nv_ro16(bios, U.offset);
+			u16 data = nvbios_rd16(bios, U.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0x00);
+				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
 				case 0x20:
 				case 0x21:
 				case 0x22:
-					*hdr = nv_ro08(bios, data + 0x01);
-					*len = nv_ro08(bios, data + 0x02);
-					*cnt = nv_ro08(bios, data + 0x03);
-					*sub = nv_ro08(bios, data + 0x04);
+					*hdr = nvbios_rd08(bios, data + 0x01);
+					*len = nvbios_rd08(bios, data + 0x02);
+					*cnt = nvbios_rd08(bios, data + 0x03);
+					*sub = nvbios_rd08(bios, data + 0x04);
 					return data;
 				default:
 					break;
@@ -72,7 +72,7 @@
 {
 	u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
 	if (data && *len >= 2) {
-		info->data = nv_ro16(bios, data + 0);
+		info->data = nvbios_rd16(bios, data + 0);
 		return data;
 	}
 	return 0x0000;
@@ -85,7 +85,7 @@
 	struct nvbios_disp info;
 	u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
 	if (data) {
-		*cnt = nv_ro08(bios, info.data + 0x05);
+		*cnt = nvbios_rd08(bios, info.data + 0x05);
 		*len = 0x06;
 		data = info.data;
 	}
@@ -98,15 +98,15 @@
 {
 	u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
 	if (data && *hdr >= 0x0a) {
-		info->type      = nv_ro16(bios, data + 0x00);
-		info->mask      = nv_ro32(bios, data + 0x02);
+		info->type      = nvbios_rd16(bios, data + 0x00);
+		info->mask      = nvbios_rd32(bios, data + 0x02);
 		if (*ver <= 0x20) /* match any link */
 			info->mask |= 0x00c0;
-		info->script[0] = nv_ro16(bios, data + 0x06);
-		info->script[1] = nv_ro16(bios, data + 0x08);
+		info->script[0] = nvbios_rd16(bios, data + 0x06);
+		info->script[1] = nvbios_rd16(bios, data + 0x08);
 		info->script[2] = 0x0000;
 		if (*hdr >= 0x0c)
-			info->script[2] = nv_ro16(bios, data + 0x0a);
+			info->script[2] = nvbios_rd16(bios, data + 0x0a);
 		return data;
 	}
 	return 0x0000;
@@ -141,9 +141,9 @@
 {
 	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
 	if (data) {
-		info->match     = nv_ro16(bios, data + 0x00);
-		info->clkcmp[0] = nv_ro16(bios, data + 0x02);
-		info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+		info->match     = nvbios_rd16(bios, data + 0x00);
+		info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
+		info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
 	}
 	return data;
 }
@@ -164,8 +164,8 @@
 nvbios_oclk_match(struct nvkm_bios *bios, u16 cmp, u32 khz)
 {
 	while (cmp) {
-		if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
-			return  nv_ro16(bios, cmp + 0x02);
+		if (khz / 10 >= nvbios_rd16(bios, cmp + 0x00))
+			return  nvbios_rd16(bios, cmp + 0x02);
 		cmp += 0x04;
 	}
 	return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 95970fa..0533247 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -32,17 +32,17 @@
 
 	if (!bit_entry(bios, 'd', &d)) {
 		if (d.version == 1 && d.length >= 2) {
-			u16 data = nv_ro16(bios, d.offset);
+			u16 data = nvbios_rd16(bios, d.offset);
 			if (data) {
-				*ver = nv_ro08(bios, data + 0x00);
+				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
 				case 0x21:
 				case 0x30:
 				case 0x40:
 				case 0x41:
-					*hdr = nv_ro08(bios, data + 0x01);
-					*len = nv_ro08(bios, data + 0x02);
-					*cnt = nv_ro08(bios, data + 0x03);
+					*hdr = nvbios_rd08(bios, data + 0x01);
+					*len = nvbios_rd08(bios, data + 0x02);
+					*cnt = nvbios_rd08(bios, data + 0x03);
 					return data;
 				default:
 					break;
@@ -60,17 +60,17 @@
 {
 	u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
 	if (data && idx < *cnt) {
-		u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+		u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
 		switch (*ver * !!outp) {
 		case 0x21:
 		case 0x30:
-			*hdr = nv_ro08(bios, data + 0x04);
-			*len = nv_ro08(bios, data + 0x05);
-			*cnt = nv_ro08(bios, outp + 0x04);
+			*hdr = nvbios_rd08(bios, data + 0x04);
+			*len = nvbios_rd08(bios, data + 0x05);
+			*cnt = nvbios_rd08(bios, outp + 0x04);
 			break;
 		case 0x40:
 		case 0x41:
-			*hdr = nv_ro08(bios, data + 0x04);
+			*hdr = nvbios_rd08(bios, data + 0x04);
 			*cnt = 0;
 			*len = 0;
 			break;
@@ -91,31 +91,31 @@
 	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
 	if (data && *ver) {
-		info->type = nv_ro16(bios, data + 0x00);
-		info->mask = nv_ro16(bios, data + 0x02);
+		info->type = nvbios_rd16(bios, data + 0x00);
+		info->mask = nvbios_rd16(bios, data + 0x02);
 		switch (*ver) {
 		case 0x21:
 		case 0x30:
-			info->flags     = nv_ro08(bios, data + 0x05);
-			info->script[0] = nv_ro16(bios, data + 0x06);
-			info->script[1] = nv_ro16(bios, data + 0x08);
-			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+			info->flags     = nvbios_rd08(bios, data + 0x05);
+			info->script[0] = nvbios_rd16(bios, data + 0x06);
+			info->script[1] = nvbios_rd16(bios, data + 0x08);
+			info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
 			if (*len >= 0x0f) {
-				info->script[2] = nv_ro16(bios, data + 0x0c);
-				info->script[3] = nv_ro16(bios, data + 0x0e);
+				info->script[2] = nvbios_rd16(bios, data + 0x0c);
+				info->script[3] = nvbios_rd16(bios, data + 0x0e);
 			}
 			if (*len >= 0x11)
-				info->script[4] = nv_ro16(bios, data + 0x10);
+				info->script[4] = nvbios_rd16(bios, data + 0x10);
 			break;
 		case 0x40:
 		case 0x41:
-			info->flags     = nv_ro08(bios, data + 0x04);
-			info->script[0] = nv_ro16(bios, data + 0x05);
-			info->script[1] = nv_ro16(bios, data + 0x07);
-			info->lnkcmp    = nv_ro16(bios, data + 0x09);
-			info->script[2] = nv_ro16(bios, data + 0x0b);
-			info->script[3] = nv_ro16(bios, data + 0x0d);
-			info->script[4] = nv_ro16(bios, data + 0x0f);
+			info->flags     = nvbios_rd08(bios, data + 0x04);
+			info->script[0] = nvbios_rd16(bios, data + 0x05);
+			info->script[1] = nvbios_rd16(bios, data + 0x07);
+			info->lnkcmp    = nvbios_rd16(bios, data + 0x09);
+			info->script[2] = nvbios_rd16(bios, data + 0x0b);
+			info->script[3] = nvbios_rd16(bios, data + 0x0d);
+			info->script[4] = nvbios_rd16(bios, data + 0x0f);
 			break;
 		default:
 			data = 0x0000;
@@ -147,8 +147,9 @@
 	if (*ver >= 0x40) {
 		outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
 		*hdr = *hdr + (*len * * cnt);
-		*len = nv_ro08(bios, outp + 0x06);
-		*cnt = nv_ro08(bios, outp + 0x07);
+		*len = nvbios_rd08(bios, outp + 0x06);
+		*cnt = nvbios_rd08(bios, outp + 0x07) *
+		       nvbios_rd08(bios, outp + 0x05);
 	}
 
 	if (idx < *cnt)
@@ -167,17 +168,17 @@
 	if (data) {
 		switch (*ver) {
 		case 0x21:
-			info->dc    = nv_ro08(bios, data + 0x02);
-			info->pe    = nv_ro08(bios, data + 0x03);
-			info->tx_pu = nv_ro08(bios, data + 0x04);
+			info->dc    = nvbios_rd08(bios, data + 0x02);
+			info->pe    = nvbios_rd08(bios, data + 0x03);
+			info->tx_pu = nvbios_rd08(bios, data + 0x04);
 			break;
 		case 0x30:
 		case 0x40:
 		case 0x41:
-			info->pc    = nv_ro08(bios, data + 0x00);
-			info->dc    = nv_ro08(bios, data + 0x01);
-			info->pe    = nv_ro08(bios, data + 0x02);
-			info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
+			info->pc    = nvbios_rd08(bios, data + 0x00);
+			info->dc    = nvbios_rd08(bios, data + 0x01);
+			info->pe    = nvbios_rd08(bios, data + 0x02);
+			info->tx_pu = nvbios_rd08(bios, data + 0x03);
 			break;
 		default:
 			data = 0x0000;
@@ -196,17 +197,15 @@
 	u16 data;
 
 	if (*ver >= 0x30) {
-		/*XXX: there's a second set of these on at least 4.1, that
-		 *     i've witnessed nvidia using instead of the first
-		 *     on gm204.  figure out what/why
-		 */
 		const u8 vsoff[] = { 0, 4, 7, 9 };
 		idx = (pc * 10) + vsoff[vs] + pe;
+		if (*ver >= 0x40 && *hdr >= 0x12)
+			idx += nvbios_rd08(bios, outp + 0x11) * 40;
 	} else {
 		while ((data = nvbios_dpcfg_entry(bios, outp, ++idx,
 						  ver, hdr, cnt, len))) {
-			if (nv_ro08(bios, data + 0x00) == vs &&
-			    nv_ro08(bios, data + 0x01) == pe)
+			if (nvbios_rd08(bios, data + 0x00) == vs &&
+			    nvbios_rd08(bios, data + 0x01) == pe)
 				break;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
index a8503a1..c9e6f6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/extdev.c
@@ -35,14 +35,14 @@
 	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
 		return 0x0000;
 
-	extdev = nv_ro16(bios, dcb + 18);
+	extdev = nvbios_rd16(bios, dcb + 18);
 	if (!extdev)
 		return 0x0000;
 
-	*ver = nv_ro08(bios, extdev + 0);
-	*hdr = nv_ro08(bios, extdev + 1);
-	*cnt = nv_ro08(bios, extdev + 2);
-	*len = nv_ro08(bios, extdev + 3);
+	*ver = nvbios_rd08(bios, extdev + 0);
+	*hdr = nvbios_rd08(bios, extdev + 1);
+	*cnt = nvbios_rd08(bios, extdev + 2);
+	*len = nvbios_rd08(bios, extdev + 3);
 	return extdev + *hdr;
 }
 
@@ -60,9 +60,9 @@
 extdev_parse_entry(struct nvkm_bios *bios, u16 offset,
 		   struct nvbios_extdev_func *entry)
 {
-	entry->type = nv_ro08(bios, offset + 0);
-	entry->addr = nv_ro08(bios, offset + 1);
-	entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
+	entry->type = nvbios_rd08(bios, offset + 0);
+	entry->addr = nvbios_rd08(bios, offset + 1);
+	entry->bus = (nvbios_rd08(bios, offset + 2) >> 4) & 1;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 8dba70d..43006db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -33,15 +33,15 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2 && bit_P.length >= 0x5a)
-			fan = nv_ro16(bios, bit_P.offset + 0x58);
+			fan = nvbios_rd16(bios, bit_P.offset + 0x58);
 
 		if (fan) {
-			*ver = nv_ro08(bios, fan + 0);
+			*ver = nvbios_rd08(bios, fan + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, fan + 1);
-				*len = nv_ro08(bios, fan + 2);
-				*cnt = nv_ro08(bios, fan + 3);
+				*hdr = nvbios_rd08(bios, fan + 1);
+				*len = nvbios_rd08(bios, fan + 2);
+				*cnt = nvbios_rd08(bios, fan + 3);
 				return fan;
 			default:
 				break;
@@ -69,7 +69,7 @@
 
 	u16 data = nvbios_fan_entry(bios, 0, &ver, &hdr, &cnt, &len);
 	if (data) {
-		u8 type = nv_ro08(bios, data + 0x00);
+		u8 type = nvbios_rd08(bios, data + 0x00);
 		switch (type) {
 		case 0:
 			fan->type = NVBIOS_THERM_FAN_TOGGLE;
@@ -83,10 +83,10 @@
 			fan->type = NVBIOS_THERM_FAN_UNK;
 		}
 
-		fan->min_duty = nv_ro08(bios, data + 0x02);
-		fan->max_duty = nv_ro08(bios, data + 0x03);
+		fan->min_duty = nvbios_rd08(bios, data + 0x02);
+		fan->max_duty = nvbios_rd08(bios, data + 0x03);
 
-		fan->pwm_freq = nv_ro32(bios, data + 0x0b) & 0xffffff;
+		fan->pwm_freq = nvbios_rd32(bios, data + 0x0b) & 0xffffff;
 	}
 
 	return data;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
index 8ce154d..2107b55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c
@@ -33,22 +33,22 @@
 	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb) {
 		if (*ver >= 0x30 && *hdr >= 0x0c)
-			data = nv_ro16(bios, dcb + 0x0a);
+			data = nvbios_rd16(bios, dcb + 0x0a);
 		else
-		if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
-			data = nv_ro16(bios, dcb - 0x0f);
+		if (*ver >= 0x22 && nvbios_rd08(bios, dcb - 1) >= 0x13)
+			data = nvbios_rd16(bios, dcb - 0x0f);
 
 		if (data) {
-			*ver = nv_ro08(bios, data + 0x00);
+			*ver = nvbios_rd08(bios, data + 0x00);
 			if (*ver < 0x30) {
 				*hdr = 3;
-				*cnt = nv_ro08(bios, data + 0x02);
-				*len = nv_ro08(bios, data + 0x01);
+				*cnt = nvbios_rd08(bios, data + 0x02);
+				*len = nvbios_rd08(bios, data + 0x01);
 			} else
 			if (*ver <= 0x41) {
-				*hdr = nv_ro08(bios, data + 0x01);
-				*cnt = nv_ro08(bios, data + 0x02);
-				*len = nv_ro08(bios, data + 0x03);
+				*hdr = nvbios_rd08(bios, data + 0x01);
+				*cnt = nvbios_rd08(bios, data + 0x02);
+				*len = nvbios_rd08(bios, data + 0x03);
 			} else {
 				data = 0x0000;
 			}
@@ -81,7 +81,7 @@
 	u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
 	if (data) {
 		if (*ver < 0x40) {
-			u16 info = nv_ro16(bios, data);
+			u16 info = nvbios_rd16(bios, data);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x001f) >> 0,
 				.func = (info & 0x07e0) >> 5,
@@ -91,7 +91,7 @@
 			};
 		} else
 		if (*ver < 0x41) {
-			u32 info = nv_ro32(bios, data);
+			u32 info = nvbios_rd32(bios, data);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x0000001f) >> 0,
 				.func = (info & 0x0000ff00) >> 8,
@@ -100,8 +100,8 @@
 				.param = !!(info & 0x80000000),
 			};
 		} else {
-			u32 info = nv_ro32(bios, data + 0);
-			u8 info1 = nv_ro32(bios, data + 4);
+			u32 info = nvbios_rd32(bios, data + 0);
+			u8 info1 = nvbios_rd32(bios, data + 4);
 			*gpio = (struct dcb_gpio_func) {
 				.line = (info & 0x0000003f) >> 0,
 				.func = (info & 0x0000ff00) >> 8,
@@ -131,8 +131,8 @@
 	/* DCB 2.2, fixed TVDAC GPIO data */
 	if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
 		if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
-			u8 conf = nv_ro08(bios, data - 5);
-			u8 addr = nv_ro08(bios, data - 4);
+			u8 conf = nvbios_rd08(bios, data - 5);
+			u8 addr = nvbios_rd08(bios, data - 4);
 			if (conf & 0x01) {
 				*gpio = (struct dcb_gpio_func) {
 					.func = DCB_GPIO_TVDAC0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
index c4e1f08..0fc60be 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c
@@ -32,21 +32,21 @@
 	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
 	if (dcb) {
 		if (*ver >= 0x15)
-			i2c = nv_ro16(bios, dcb + 2);
+			i2c = nvbios_rd16(bios, dcb + 2);
 		if (*ver >= 0x30)
-			i2c = nv_ro16(bios, dcb + 4);
+			i2c = nvbios_rd16(bios, dcb + 4);
 	}
 
 	if (i2c && *ver >= 0x42) {
-		nv_warn(bios, "ccb %02x not supported\n", *ver);
+		nvkm_warn(&bios->subdev, "ccb %02x not supported\n", *ver);
 		return 0x0000;
 	}
 
 	if (i2c && *ver >= 0x30) {
-		*ver = nv_ro08(bios, i2c + 0);
-		*hdr = nv_ro08(bios, i2c + 1);
-		*cnt = nv_ro08(bios, i2c + 2);
-		*len = nv_ro08(bios, i2c + 3);
+		*ver = nvbios_rd08(bios, i2c + 0);
+		*hdr = nvbios_rd08(bios, i2c + 1);
+		*cnt = nvbios_rd08(bios, i2c + 2);
+		*len = nvbios_rd08(bios, i2c + 3);
 	} else {
 		*ver = *ver; /* use DCB version */
 		*hdr = 0;
@@ -70,13 +70,14 @@
 int
 dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, len;
 	u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
 	if (ent) {
 		if (ver >= 0x41) {
-			u32 ent_value = nv_ro32(bios, ent);
-			u8 i2c_port = (ent_value >> 27) & 0x1f;
-			u8 dpaux_port = (ent_value >> 22) & 0x1f;
+			u32 ent_value = nvbios_rd32(bios, ent);
+			u8 i2c_port = (ent_value >> 0) & 0x1f;
+			u8 dpaux_port = (ent_value >> 5) & 0x1f;
 			/* value 0x1f means unused according to DCB 4.x spec */
 			if (i2c_port == 0x1f && dpaux_port == 0x1f)
 				info->type = DCB_I2C_UNUSED;
@@ -84,9 +85,9 @@
 				info->type = DCB_I2C_PMGR;
 		} else
 		if (ver >= 0x30) {
-			info->type = nv_ro08(bios, ent + 0x03);
+			info->type = nvbios_rd08(bios, ent + 0x03);
 		} else {
-			info->type = nv_ro08(bios, ent + 0x03) & 0x07;
+			info->type = nvbios_rd08(bios, ent + 0x03) & 0x07;
 			if (info->type == 0x07)
 				info->type = DCB_I2C_UNUSED;
 		}
@@ -98,27 +99,27 @@
 
 		switch (info->type) {
 		case DCB_I2C_NV04_BIT:
-			info->drive = nv_ro08(bios, ent + 0);
-			info->sense = nv_ro08(bios, ent + 1);
+			info->drive = nvbios_rd08(bios, ent + 0);
+			info->sense = nvbios_rd08(bios, ent + 1);
 			return 0;
 		case DCB_I2C_NV4E_BIT:
-			info->drive = nv_ro08(bios, ent + 1);
+			info->drive = nvbios_rd08(bios, ent + 1);
 			return 0;
 		case DCB_I2C_NVIO_BIT:
-			info->drive = nv_ro08(bios, ent + 0) & 0x0f;
-			if (nv_ro08(bios, ent + 1) & 0x01)
-				info->share = nv_ro08(bios, ent + 1) >> 1;
+			info->drive = nvbios_rd08(bios, ent + 0) & 0x0f;
+			if (nvbios_rd08(bios, ent + 1) & 0x01)
+				info->share = nvbios_rd08(bios, ent + 1) >> 1;
 			return 0;
 		case DCB_I2C_NVIO_AUX:
-			info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
-			if (nv_ro08(bios, ent + 1) & 0x01)
+			info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f;
+			if (nvbios_rd08(bios, ent + 1) & 0x01)
 					info->share = info->auxch;
 			return 0;
 		case DCB_I2C_PMGR:
-			info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
+			info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0;
 			if (info->drive == 0x1f)
 				info->drive = DCB_I2C_UNUSED;
-			info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
+			info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5;
 			if (info->auxch == 0x1f)
 				info->auxch = DCB_I2C_UNUSED;
 			info->share = info->auxch;
@@ -126,7 +127,7 @@
 		case DCB_I2C_UNUSED:
 			return 0;
 		default:
-			nv_warn(bios, "unknown i2c type %d\n", info->type);
+			nvkm_warn(subdev, "unknown i2c type %d\n", info->type);
 			info->type = DCB_I2C_UNUSED;
 			return 0;
 		}
@@ -136,21 +137,21 @@
 		/* BMP (from v4.0 has i2c info in the structure, it's in a
 		 * fixed location on earlier VBIOS
 		 */
-		if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
+		if (nvbios_rd08(bios, bios->bmp_offset + 5) < 4)
 			ent = 0x0048;
 		else
 			ent = 0x0036 + bios->bmp_offset;
 
 		if (idx == 0) {
-			info->drive = nv_ro08(bios, ent + 4);
+			info->drive = nvbios_rd08(bios, ent + 4);
 			if (!info->drive) info->drive = 0x3f;
-			info->sense = nv_ro08(bios, ent + 5);
+			info->sense = nvbios_rd08(bios, ent + 5);
 			if (!info->sense) info->sense = 0x3e;
 		} else
 		if (idx == 1) {
-			info->drive = nv_ro08(bios, ent + 6);
+			info->drive = nvbios_rd08(bios, ent + 6);
 			if (!info->drive) info->drive = 0x37;
-			info->sense = nv_ro08(bios, ent + 7);
+			info->sense = nvbios_rd08(bios, ent + 7);
 			if (!info->sense) info->sense = 0x36;
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 1815540..74b14cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -29,20 +29,21 @@
 static bool
 nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_pcirT pcir;
 	struct nvbios_npdeT npde;
 	u8  ver;
 	u16 hdr;
 	u32 data;
 
-	switch ((data = nv_ro16(bios, image->base + 0x00))) {
+	switch ((data = nvbios_rd16(bios, image->base + 0x00))) {
 	case 0xaa55:
 	case 0xbb77:
 	case 0x4e56: /* NV */
 		break;
 	default:
-		nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
-			 image->base, data);
+		nvkm_debug(subdev, "%08x: ROM signature (%04x) unknown\n",
+			   image->base, data);
 		return false;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f4611e3..65af314 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -31,18 +31,18 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/ramcfg.h>
 
-#include <core/device.h>
 #include <subdev/devinit.h>
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 #include <subdev/vga.h>
 
 #define bioslog(lvl, fmt, args...) do {                                        \
-	nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset,            \
-		  init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args);   \
+	nvkm_printk(init->subdev, lvl, info, "0x%04x[%c]: "fmt,                \
+		    init->offset, init_exec(init) ?                            \
+		    '0' + (init->nested - 1) : ' ', ##args);                   \
 } while(0)
 #define cont(fmt, args...) do {                                                \
-	if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE)                      \
+	if (init->subdev->debug >= NV_DBG_TRACE)                               \
 		printk(fmt, ##args);                                           \
 } while(0)
 #define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
@@ -141,7 +141,7 @@
 static inline u32
 init_nvreg(struct nvbios_init *init, u32 reg)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
 
 	/* C51 (at least) sometimes has the lower bits set which the VBIOS
 	 * interprets to mean that access needs to go through certain IO
@@ -154,7 +154,7 @@
 	/* GF8+ display scripts need register addresses mangled a bit to
 	 * select a specific CRTC/OR
 	 */
-	if (nv_device(init->bios)->card_type >= NV_50) {
+	if (init->bios->subdev.device->card_type >= NV_50) {
 		if (reg & 0x80000000) {
 			reg += init_crtc(init) * 0x800;
 			reg &= ~0x80000000;
@@ -173,35 +173,36 @@
 	if (reg & ~0x00fffffc)
 		warn("unknown bits in register 0x%08x\n", reg);
 
-	if (devinit->mmio)
-		reg = devinit->mmio(devinit, reg);
-	return reg;
+	return nvkm_devinit_mmio(devinit, reg);
 }
 
 static u32
 init_rd32(struct nvbios_init *init, u32 reg)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init))
-		return nv_rd32(init->subdev, reg);
+		return nvkm_rd32(device, reg);
 	return 0x00000000;
 }
 
 static void
 init_wr32(struct nvbios_init *init, u32 reg, u32 val)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init))
-		nv_wr32(init->subdev, reg, val);
+		nvkm_wr32(device, reg, val);
 }
 
 static u32
 init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
 {
+	struct nvkm_device *device = init->bios->subdev.device;
 	reg = init_nvreg(init, reg);
 	if (reg != ~0 && init_exec(init)) {
-		u32 tmp = nv_rd32(init->subdev, reg);
-		nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
+		u32 tmp = nvkm_rd32(device, reg);
+		nvkm_wr32(device, reg, (tmp & ~mask) | val);
 		return tmp;
 	}
 	return 0x00000000;
@@ -211,7 +212,7 @@
 init_rdport(struct nvbios_init *init, u16 port)
 {
 	if (init_exec(init))
-		return nv_rdport(init->subdev, init->crtc, port);
+		return nvkm_rdport(init->subdev->device, init->crtc, port);
 	return 0x00;
 }
 
@@ -219,7 +220,7 @@
 init_wrport(struct nvbios_init *init, u16 port, u8 value)
 {
 	if (init_exec(init))
-		nv_wrport(init->subdev, init->crtc, port, value);
+		nvkm_wrport(init->subdev->device, init->crtc, port, value);
 }
 
 static u8
@@ -228,7 +229,7 @@
 	struct nvkm_subdev *subdev = init->subdev;
 	if (init_exec(init)) {
 		int head = init->crtc < 0 ? 0 : init->crtc;
-		return nv_rdvgai(subdev, head, port, index);
+		return nvkm_rdvgai(subdev->device, head, port, index);
 	}
 	return 0x00;
 }
@@ -236,80 +237,80 @@
 static void
 init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
 {
+	struct nvkm_device *device = init->subdev->device;
+
 	/* force head 0 for updates to cr44, it only exists on first head */
-	if (nv_device(init->subdev)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (port == 0x03d4 && index == 0x44)
 			init->crtc = 0;
 	}
 
 	if (init_exec(init)) {
 		int head = init->crtc < 0 ? 0 : init->crtc;
-		nv_wrvgai(init->subdev, head, port, index, value);
+		nvkm_wrvgai(device, head, port, index, value);
 	}
 
 	/* select head 1 if cr44 write selected it */
-	if (nv_device(init->subdev)->card_type < NV_50) {
+	if (device->card_type < NV_50) {
 		if (port == 0x03d4 && index == 0x44 && value == 3)
 			init->crtc = 1;
 	}
 }
 
-static struct nvkm_i2c_port *
+static struct i2c_adapter *
 init_i2c(struct nvbios_init *init, int index)
 {
-	struct nvkm_i2c *i2c = nvkm_i2c(init->bios);
+	struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+	struct nvkm_i2c_bus *bus;
 
 	if (index == 0xff) {
-		index = NV_I2C_DEFAULT(0);
+		index = NVKM_I2C_BUS_PRI;
 		if (init->outp && init->outp->i2c_upper_default)
-			index = NV_I2C_DEFAULT(1);
-	} else
-	if (index < 0) {
-		if (!init->outp) {
-			if (init_exec(init))
-				error("script needs output for i2c\n");
-			return NULL;
-		}
-
-		if (index == -2 && init->outp->location) {
-			index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
-			return i2c->find_type(i2c, index);
-		}
-
-		index = init->outp->i2c_index;
-		if (init->outp->type == DCB_OUTPUT_DP)
-			index += NV_I2C_AUX(0);
+			index = NVKM_I2C_BUS_SEC;
 	}
 
-	return i2c->find(i2c, index);
+	bus = nvkm_i2c_bus_find(i2c, index);
+	return bus ? &bus->i2c : NULL;
 }
 
 static int
 init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, index);
-	if (port && init_exec(init))
-		return nv_rdi2cr(port, addr, reg);
+	struct i2c_adapter *adap = init_i2c(init, index);
+	if (adap && init_exec(init))
+		return nvkm_rdi2cr(adap, addr, reg);
 	return -ENODEV;
 }
 
 static int
 init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, index);
-	if (port && init_exec(init))
-		return nv_wri2cr(port, addr, reg, val);
+	struct i2c_adapter *adap = init_i2c(init, index);
+	if (adap && init_exec(init))
+		return nvkm_wri2cr(adap, addr, reg, val);
 	return -ENODEV;
 }
 
+static struct nvkm_i2c_aux *
+init_aux(struct nvbios_init *init)
+{
+	struct nvkm_i2c *i2c = init->bios->subdev.device->i2c;
+	if (!init->outp) {
+		if (init_exec(init))
+			error("script needs output for aux\n");
+		return NULL;
+	}
+	return nvkm_i2c_aux_find(i2c, init->outp->i2c_index);
+}
+
 static u8
 init_rdauxr(struct nvbios_init *init, u32 addr)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, -2);
+	struct nvkm_i2c_aux *aux = init_aux(init);
 	u8 data;
 
-	if (port && init_exec(init)) {
-		int ret = nv_rdaux(port, addr, &data, 1);
+	if (aux && init_exec(init)) {
+		int ret = nvkm_rdaux(aux, addr, &data, 1);
 		if (ret == 0)
 			return data;
 		trace("auxch read failed with %d\n", ret);
@@ -321,9 +322,9 @@
 static int
 init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
 {
-	struct nvkm_i2c_port *port = init_i2c(init, -2);
-	if (port && init_exec(init)) {
-		int ret = nv_wraux(port, addr, &data, 1);
+	struct nvkm_i2c_aux *aux = init_aux(init);
+	if (aux && init_exec(init)) {
+		int ret = nvkm_wraux(aux, addr, &data, 1);
 		if (ret)
 			trace("auxch write failed with %d\n", ret);
 		return ret;
@@ -334,9 +335,9 @@
 static void
 init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
-	if (devinit->pll_set && init_exec(init)) {
-		int ret = devinit->pll_set(devinit, id, freq);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
+	if (init_exec(init)) {
+		int ret = nvkm_devinit_pll_set(devinit, id, freq);
 		if (ret)
 			warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
 	}
@@ -371,7 +372,7 @@
 	u16 len, data = init_table(bios, &len);
 	if (data) {
 		if (len >= offset + 2) {
-			data = nv_ro16(bios, data + offset);
+			data = nvbios_rd16(bios, data + offset);
 			if (data)
 				return data;
 
@@ -407,12 +408,12 @@
 			return 0x0000;
 
 		data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
-		return nv_ro16(bios, data + (index * 2));
+		return nvbios_rd16(bios, data + (index * 2));
 	}
 
 	data = init_script_table(&init);
 	if (data)
-		return nv_ro16(bios, data + (index * 2));
+		return nvbios_rd16(bios, data + (index * 2));
 
 	return 0x0000;
 }
@@ -422,7 +423,7 @@
 {
 	u16 len, data = init_table(bios, &len);
 	if (data && len >= 16)
-		return nv_ro16(bios, data + 14);
+		return nvbios_rd16(bios, data + 14);
 	return 0x0000;
 }
 
@@ -454,9 +455,9 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_xlat_table(init);
 	if (table) {
-		u16 data = nv_ro16(bios, table + (index * 2));
+		u16 data = nvbios_rd16(bios, table + (index * 2));
 		if (data)
-			return nv_ro08(bios, data + offset);
+			return nvbios_rd08(bios, data + offset);
 		warn("xlat table pointer %d invalid\n", index);
 	}
 	return 0x00;
@@ -472,9 +473,9 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_condition_table(init);
 	if (table) {
-		u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
-		u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
-		u32 val = nv_ro32(bios, table + (cond * 12) + 8);
+		u32 reg = nvbios_rd32(bios, table + (cond * 12) + 0);
+		u32 msk = nvbios_rd32(bios, table + (cond * 12) + 4);
+		u32 val = nvbios_rd32(bios, table + (cond * 12) + 8);
 		trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
 		      cond, reg, msk, val);
 		return (init_rd32(init, reg) & msk) == val;
@@ -488,10 +489,10 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_io_condition_table(init);
 	if (table) {
-		u16 port = nv_ro16(bios, table + (cond * 5) + 0);
-		u8 index = nv_ro08(bios, table + (cond * 5) + 2);
-		u8  mask = nv_ro08(bios, table + (cond * 5) + 3);
-		u8 value = nv_ro08(bios, table + (cond * 5) + 4);
+		u16 port = nvbios_rd16(bios, table + (cond * 5) + 0);
+		u8 index = nvbios_rd08(bios, table + (cond * 5) + 2);
+		u8  mask = nvbios_rd08(bios, table + (cond * 5) + 3);
+		u8 value = nvbios_rd08(bios, table + (cond * 5) + 4);
 		trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
 		      cond, port, index, mask, value);
 		return (init_rdvgai(init, port, index) & mask) == value;
@@ -505,15 +506,15 @@
 	struct nvkm_bios *bios = init->bios;
 	u16 table = init_io_flag_condition_table(init);
 	if (table) {
-		u16 port = nv_ro16(bios, table + (cond * 9) + 0);
-		u8 index = nv_ro08(bios, table + (cond * 9) + 2);
-		u8  mask = nv_ro08(bios, table + (cond * 9) + 3);
-		u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
-		u16 data = nv_ro16(bios, table + (cond * 9) + 5);
-		u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
-		u8 value = nv_ro08(bios, table + (cond * 9) + 8);
+		u16 port = nvbios_rd16(bios, table + (cond * 9) + 0);
+		u8 index = nvbios_rd08(bios, table + (cond * 9) + 2);
+		u8  mask = nvbios_rd08(bios, table + (cond * 9) + 3);
+		u8 shift = nvbios_rd08(bios, table + (cond * 9) + 4);
+		u16 data = nvbios_rd16(bios, table + (cond * 9) + 5);
+		u8 dmask = nvbios_rd08(bios, table + (cond * 9) + 7);
+		u8 value = nvbios_rd08(bios, table + (cond * 9) + 8);
 		u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
-		return (nv_ro08(bios, data + ioval) & dmask) == value;
+		return (nvbios_rd08(bios, data + ioval) & dmask) == value;
 	}
 	return false;
 }
@@ -573,7 +574,7 @@
 static void
 init_reserved(struct nvbios_init *init)
 {
-	u8 opcode = nv_ro08(init->bios, init->offset);
+	u8 opcode = nvbios_rd08(init->bios, init->offset);
 	u8 length, i;
 
 	switch (opcode) {
@@ -587,7 +588,7 @@
 
 	trace("RESERVED 0x%02x\t", opcode);
 	for (i = 1; i < length; i++)
-		cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+		cont(" 0x%02x", nvbios_rd08(init->bios, init->offset + i));
 	cont("\n");
 	init->offset += length;
 }
@@ -611,12 +612,12 @@
 init_io_restrict_prog(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 count = nv_ro08(bios, init->offset + 6);
-	u32  reg = nv_ro32(bios, init->offset + 7);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 count = nvbios_rd08(bios, init->offset + 6);
+	u32  reg = nvbios_rd32(bios, init->offset + 7);
 	u8 conf, i;
 
 	trace("IO_RESTRICT_PROG\tR[0x%06x] = "
@@ -626,7 +627,7 @@
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 
 		if (i == conf) {
 			trace("\t0x%08x *\n", data);
@@ -648,7 +649,7 @@
 init_repeat(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 	u16 repeat = init->repeat;
 
 	trace("REPEAT\t0x%02x\n", count);
@@ -674,13 +675,13 @@
 init_io_restrict_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	s8  iofc = nv_ro08(bios, init->offset + 6);
-	u8 count = nv_ro08(bios, init->offset + 7);
-	u32  reg = nv_ro32(bios, init->offset + 8);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	s8  iofc = nvbios_rd08(bios, init->offset + 6);
+	u8 count = nvbios_rd08(bios, init->offset + 7);
+	u32  reg = nvbios_rd32(bios, init->offset + 8);
 	u8 conf, i;
 
 	trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
@@ -690,7 +691,7 @@
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 freq = nv_ro16(bios, init->offset) * 10;
+		u32 freq = nvbios_rd16(bios, init->offset) * 10;
 
 		if (i == conf) {
 			trace("\t%dkHz *\n", freq);
@@ -730,12 +731,12 @@
 init_copy(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 smask = nv_ro08(bios, init->offset + 6);
-	u16 port = nv_ro16(bios, init->offset + 7);
-	u8 index = nv_ro08(bios, init->offset + 9);
-	u8  mask = nv_ro08(bios, init->offset + 10);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 smask = nvbios_rd08(bios, init->offset + 6);
+	u16 port = nvbios_rd16(bios, init->offset + 7);
+	u8 index = nvbios_rd08(bios, init->offset + 9);
+	u8  mask = nvbios_rd08(bios, init->offset + 10);
 	u8  data;
 
 	trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
@@ -769,7 +770,7 @@
 init_io_flag_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -787,8 +788,8 @@
 {
 	struct nvkm_bios *bios = init->bios;
 	struct nvbios_dpout info;
-	u8  cond = nv_ro08(bios, init->offset + 1);
-	u8  unkn = nv_ro08(bios, init->offset + 2);
+	u8  cond = nvbios_rd08(bios, init->offset + 1);
+	u8  unkn = nvbios_rd08(bios, init->offset + 2);
 	u8  ver, hdr, cnt, len;
 	u16 data;
 
@@ -834,7 +835,7 @@
 init_io_mask_or(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u8    or = init_or(init);
 	u8  data;
 
@@ -853,7 +854,7 @@
 init_io_or(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u8    or = init_or(init);
 	u8  data;
 
@@ -872,8 +873,8 @@
 init_andn_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
 
 	trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
 	init->offset += 9;
@@ -889,8 +890,8 @@
 init_or_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
 
 	trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
 	init->offset += 9;
@@ -906,19 +907,19 @@
 init_idx_addr_latched(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 creg = nv_ro32(bios, init->offset + 1);
-	u32 dreg = nv_ro32(bios, init->offset + 5);
-	u32 mask = nv_ro32(bios, init->offset + 9);
-	u32 data = nv_ro32(bios, init->offset + 13);
-	u8 count = nv_ro08(bios, init->offset + 17);
+	u32 creg = nvbios_rd32(bios, init->offset + 1);
+	u32 dreg = nvbios_rd32(bios, init->offset + 5);
+	u32 mask = nvbios_rd32(bios, init->offset + 9);
+	u32 data = nvbios_rd32(bios, init->offset + 13);
+	u8 count = nvbios_rd08(bios, init->offset + 17);
 
 	trace("INDEX_ADDRESS_LATCHED\tR[0x%06x] : R[0x%06x]\n", creg, dreg);
 	trace("\tCTRL &= 0x%08x |= 0x%08x\n", mask, data);
 	init->offset += 18;
 
 	while (count--) {
-		u8 iaddr = nv_ro08(bios, init->offset + 0);
-		u8 idata = nv_ro08(bios, init->offset + 1);
+		u8 iaddr = nvbios_rd08(bios, init->offset + 0);
+		u8 idata = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
 		init->offset += 2;
@@ -936,12 +937,12 @@
 init_io_restrict_pll2(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8 shift = nv_ro08(bios, init->offset + 5);
-	u8 count = nv_ro08(bios, init->offset + 6);
-	u32  reg = nv_ro32(bios, init->offset + 7);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8 shift = nvbios_rd08(bios, init->offset + 5);
+	u8 count = nvbios_rd08(bios, init->offset + 6);
+	u32  reg = nvbios_rd32(bios, init->offset + 7);
 	u8  conf, i;
 
 	trace("IO_RESTRICT_PLL2\t"
@@ -951,7 +952,7 @@
 
 	conf = (init_rdvgai(init, port, index) & mask) >> shift;
 	for (i = 0; i < count; i++) {
-		u32 freq = nv_ro32(bios, init->offset);
+		u32 freq = nvbios_rd32(bios, init->offset);
 		if (i == conf) {
 			trace("\t%dkHz *\n", freq);
 			init_prog_pll(init, reg, freq);
@@ -971,8 +972,8 @@
 init_pll2(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 freq = nv_ro32(bios, init->offset + 5);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 freq = nvbios_rd32(bios, init->offset + 5);
 
 	trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
 	init->offset += 9;
@@ -988,17 +989,17 @@
 init_i2c_byte(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 
 	trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	while (count--) {
-		u8  reg = nv_ro08(bios, init->offset + 0);
-		u8 mask = nv_ro08(bios, init->offset + 1);
-		u8 data = nv_ro08(bios, init->offset + 2);
+		u8  reg = nvbios_rd08(bios, init->offset + 0);
+		u8 mask = nvbios_rd08(bios, init->offset + 1);
+		u8 data = nvbios_rd08(bios, init->offset + 2);
 		int val;
 
 		trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
@@ -1019,16 +1020,16 @@
 init_zm_i2c_byte(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 
 	trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	while (count--) {
-		u8  reg = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8  reg = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", reg, data);
 		init->offset += 2;
@@ -1045,28 +1046,28 @@
 init_zm_i2c(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 count = nvbios_rd08(bios, init->offset + 3);
 	u8 data[256], i;
 
 	trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
 	init->offset += 4;
 
 	for (i = 0; i < count; i++) {
-		data[i] = nv_ro08(bios, init->offset);
+		data[i] = nvbios_rd08(bios, init->offset);
 		trace("\t0x%02x\n", data[i]);
 		init->offset++;
 	}
 
 	if (init_exec(init)) {
-		struct nvkm_i2c_port *port = init_i2c(init, index);
+		struct i2c_adapter *adap = init_i2c(init, index);
 		struct i2c_msg msg = {
 			.addr = addr, .flags = 0, .len = count, .buf = data,
 		};
 		int ret;
 
-		if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
+		if (adap && (ret = i2c_transfer(adap, &msg, 1)) != 1)
 			warn("i2c wr failed, %d\n", ret);
 	}
 }
@@ -1079,10 +1080,10 @@
 init_tmds(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 tmds = nv_ro08(bios, init->offset + 1);
-	u8 addr = nv_ro08(bios, init->offset + 2);
-	u8 mask = nv_ro08(bios, init->offset + 3);
-	u8 data = nv_ro08(bios, init->offset + 4);
+	u8 tmds = nvbios_rd08(bios, init->offset + 1);
+	u8 addr = nvbios_rd08(bios, init->offset + 2);
+	u8 mask = nvbios_rd08(bios, init->offset + 3);
+	u8 data = nvbios_rd08(bios, init->offset + 4);
 	u32 reg = init_tmds_reg(init, tmds);
 
 	trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1105,16 +1106,16 @@
 init_zm_tmds_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  tmds = nv_ro08(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 2);
+	u8  tmds = nvbios_rd08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 2);
 	u32  reg = init_tmds_reg(init, tmds);
 
 	trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
 	init->offset += 3;
 
 	while (count--) {
-		u8 addr = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 addr = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t[0x%02x] = 0x%02x\n", addr, data);
 		init->offset += 2;
@@ -1132,10 +1133,10 @@
 init_cr_idx_adr_latch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr0 = nv_ro08(bios, init->offset + 1);
-	u8 addr1 = nv_ro08(bios, init->offset + 2);
-	u8  base = nv_ro08(bios, init->offset + 3);
-	u8 count = nv_ro08(bios, init->offset + 4);
+	u8 addr0 = nvbios_rd08(bios, init->offset + 1);
+	u8 addr1 = nvbios_rd08(bios, init->offset + 2);
+	u8  base = nvbios_rd08(bios, init->offset + 3);
+	u8 count = nvbios_rd08(bios, init->offset + 4);
 	u8 save0;
 
 	trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
@@ -1143,7 +1144,7 @@
 
 	save0 = init_rdvgai(init, 0x03d4, addr0);
 	while (count--) {
-		u8 data = nv_ro08(bios, init->offset);
+		u8 data = nvbios_rd08(bios, init->offset);
 
 		trace("\t\t[0x%02x] = 0x%02x\n", base, data);
 		init->offset += 1;
@@ -1162,9 +1163,9 @@
 init_cr(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr = nv_ro08(bios, init->offset + 1);
-	u8 mask = nv_ro08(bios, init->offset + 2);
-	u8 data = nv_ro08(bios, init->offset + 3);
+	u8 addr = nvbios_rd08(bios, init->offset + 1);
+	u8 mask = nvbios_rd08(bios, init->offset + 2);
+	u8 data = nvbios_rd08(bios, init->offset + 3);
 	u8 val;
 
 	trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
@@ -1182,8 +1183,8 @@
 init_zm_cr(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 addr = nv_ro08(bios, init->offset + 1);
-	u8 data = nv_ro08(bios, init->offset + 2);
+	u8 addr = nvbios_rd08(bios, init->offset + 1);
+	u8 data = nvbios_rd08(bios, init->offset + 2);
 
 	trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr,  data);
 	init->offset += 3;
@@ -1199,14 +1200,14 @@
 init_zm_cr_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 
 	trace("ZM_CR_GROUP\n");
 	init->offset += 2;
 
 	while (count--) {
-		u8 addr = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 addr = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 
 		trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
 		init->offset += 2;
@@ -1223,8 +1224,8 @@
 init_condition_time(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  cond = nv_ro08(bios, init->offset + 1);
-	u8 retry = nv_ro08(bios, init->offset + 2);
+	u8  cond = nvbios_rd08(bios, init->offset + 1);
+	u8 retry = nvbios_rd08(bios, init->offset + 2);
 	u8  wait = min((u16)retry * 50, 100);
 
 	trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
@@ -1250,7 +1251,7 @@
 init_ltime(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 msec = nv_ro16(bios, init->offset + 1);
+	u16 msec = nvbios_rd16(bios, init->offset + 1);
 
 	trace("LTIME\t0x%04x\n", msec);
 	init->offset += 3;
@@ -1267,14 +1268,14 @@
 init_zm_reg_sequence(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 base = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 base = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
 	init->offset += 6;
 
 	while (count--) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 
 		trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
 		init->offset += 4;
@@ -1292,9 +1293,9 @@
 init_pll_indirect(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u16 addr = nv_ro16(bios, init->offset + 5);
-	u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 5);
+	u32 freq = (u32)nvbios_rd16(bios, addr) * 1000;
 
 	trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
 	      reg, addr, freq);
@@ -1311,9 +1312,9 @@
 init_zm_reg_indirect(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u16 addr = nv_ro16(bios, init->offset + 5);
-	u32 data = nv_ro32(bios, addr);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 5);
+	u32 data = nvbios_rd32(bios, addr);
 
 	trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
 	      reg, addr, data);
@@ -1330,7 +1331,7 @@
 init_sub_direct(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 addr = nv_ro16(bios, init->offset + 1);
+	u16 addr = nvbios_rd16(bios, init->offset + 1);
 	u16 save;
 
 	trace("SUB_DIRECT\t0x%04x\n", addr);
@@ -1356,7 +1357,7 @@
 init_jump(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 offset = nv_ro16(bios, init->offset + 1);
+	u16 offset = nvbios_rd16(bios, init->offset + 1);
 
 	trace("JUMP\t0x%04x\n", offset);
 
@@ -1374,11 +1375,11 @@
 init_i2c_if(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2);
-	u8   reg = nv_ro08(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2);
+	u8   reg = nvbios_rd08(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8  data = nvbios_rd08(bios, init->offset + 5);
 	u8 value;
 
 	trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
@@ -1401,12 +1402,12 @@
 init_copy_nv_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  sreg = nv_ro32(bios, init->offset + 1);
-	u8  shift = nv_ro08(bios, init->offset + 5);
-	u32 smask = nv_ro32(bios, init->offset + 6);
-	u32  sxor = nv_ro32(bios, init->offset + 10);
-	u32  dreg = nv_ro32(bios, init->offset + 14);
-	u32 dmask = nv_ro32(bios, init->offset + 18);
+	u32  sreg = nvbios_rd32(bios, init->offset + 1);
+	u8  shift = nvbios_rd08(bios, init->offset + 5);
+	u32 smask = nvbios_rd32(bios, init->offset + 6);
+	u32  sxor = nvbios_rd32(bios, init->offset + 10);
+	u32  dreg = nvbios_rd32(bios, init->offset + 14);
+	u32 dmask = nvbios_rd32(bios, init->offset + 18);
 	u32 data;
 
 	trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
@@ -1427,9 +1428,9 @@
 init_zm_index_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro08(bios, init->offset + 3);
-	u8  data = nv_ro08(bios, init->offset + 4);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 3);
+	u8  data = nvbios_rd08(bios, init->offset + 4);
 
 	trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
 	init->offset += 5;
@@ -1444,14 +1445,14 @@
 static void
 init_compute_mem(struct nvbios_init *init)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(init->bios);
+	struct nvkm_devinit *devinit = init->bios->subdev.device->devinit;
 
 	trace("COMPUTE_MEM\n");
 	init->offset += 1;
 
 	init_exec_force(init, true);
-	if (init_exec(init) && devinit->meminit)
-		devinit->meminit(devinit);
+	if (init_exec(init))
+		nvkm_devinit_meminit(devinit);
 	init_exec_force(init, false);
 }
 
@@ -1463,9 +1464,9 @@
 init_reset(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32   reg = nv_ro32(bios, init->offset + 1);
-	u32 data1 = nv_ro32(bios, init->offset + 5);
-	u32 data2 = nv_ro32(bios, init->offset + 9);
+	u32   reg = nvbios_rd32(bios, init->offset + 1);
+	u32 data1 = nvbios_rd32(bios, init->offset + 5);
+	u32 data2 = nvbios_rd32(bios, init->offset + 9);
 	u32 savepci19;
 
 	trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
@@ -1513,14 +1514,14 @@
 
 	mdata = init_configure_mem_clk(init);
 	sdata = bmp_sdr_seq_table(bios);
-	if (nv_ro08(bios, mdata) & 0x01)
+	if (nvbios_rd08(bios, mdata) & 0x01)
 		sdata = bmp_ddr_seq_table(bios);
 	mdata += 6; /* skip to data */
 
 	data = init_rdvgai(init, 0x03c4, 0x01);
 	init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
 
-	for (; (addr = nv_ro32(bios, sdata)) != 0xffffffff; sdata += 4) {
+	for (; (addr = nvbios_rd32(bios, sdata)) != 0xffffffff; sdata += 4) {
 		switch (addr) {
 		case 0x10021c: /* CKE_NORMAL */
 		case 0x1002d0: /* CMD_REFRESH */
@@ -1528,7 +1529,7 @@
 			data = 0x00000001;
 			break;
 		default:
-			data = nv_ro32(bios, mdata);
+			data = nvbios_rd32(bios, mdata);
 			mdata += 4;
 			if (data == 0xffffffff)
 				continue;
@@ -1563,12 +1564,12 @@
 	mdata = init_configure_mem_clk(init);
 
 	/* NVPLL */
-	clock = nv_ro16(bios, mdata + 4) * 10;
+	clock = nvbios_rd16(bios, mdata + 4) * 10;
 	init_prog_pll(init, 0x680500, clock);
 
 	/* MPLL */
-	clock = nv_ro16(bios, mdata + 2) * 10;
-	if (nv_ro08(bios, mdata) & 0x01)
+	clock = nvbios_rd16(bios, mdata + 2) * 10;
+	if (nvbios_rd08(bios, mdata) & 0x01)
 		clock *= 2;
 	init_prog_pll(init, 0x680504, clock);
 
@@ -1609,9 +1610,9 @@
 init_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8  mask = nv_ro16(bios, init->offset + 3);
-	u8  data = nv_ro16(bios, init->offset + 4);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8  mask = nvbios_rd16(bios, init->offset + 3);
+	u8  data = nvbios_rd16(bios, init->offset + 4);
 	u8 value;
 
 	trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
@@ -1621,7 +1622,7 @@
 	 * needed some day..  it's almost certainly wrong, but, it also
 	 * somehow makes things work...
 	 */
-	if (nv_device(init->bios)->card_type >= NV_50 &&
+	if (bios->subdev.device->card_type >= NV_50 &&
 	    port == 0x03c3 && data == 0x01) {
 		init_mask(init, 0x614100, 0xf0800000, 0x00800000);
 		init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
@@ -1649,7 +1650,7 @@
 init_sub(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
+	u8 index = nvbios_rd08(bios, init->offset + 1);
 	u16 addr, save;
 
 	trace("SUB\t0x%02x\n", index);
@@ -1676,8 +1677,8 @@
 init_ram_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  mask = nv_ro08(bios, init->offset + 1);
-	u8 value = nv_ro08(bios, init->offset + 2);
+	u8  mask = nvbios_rd08(bios, init->offset + 1);
+	u8 value = nvbios_rd08(bios, init->offset + 2);
 
 	trace("RAM_CONDITION\t"
 	      "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
@@ -1695,9 +1696,9 @@
 init_nv_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
-	u32 data = nv_ro32(bios, init->offset + 9);
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
+	u32 data = nvbios_rd32(bios, init->offset + 9);
 
 	trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
 	init->offset += 13;
@@ -1713,15 +1714,15 @@
 init_macro(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  macro = nv_ro08(bios, init->offset + 1);
+	u8  macro = nvbios_rd08(bios, init->offset + 1);
 	u16 table;
 
 	trace("MACRO\t0x%02x\n", macro);
 
 	table = init_macro_table(init);
 	if (table) {
-		u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
-		u32 data = nv_ro32(bios, table + (macro * 8) + 4);
+		u32 addr = nvbios_rd32(bios, table + (macro * 8) + 0);
+		u32 data = nvbios_rd32(bios, table + (macro * 8) + 4);
 		trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
 		init_wr32(init, addr, data);
 	}
@@ -1742,6 +1743,24 @@
 }
 
 /**
+ * INIT_STRAP_CONDITION - opcode 0x73
+ *
+ */
+static void
+init_strap_condition(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32 mask = nvbios_rd32(bios, init->offset + 1);
+	u32 value = nvbios_rd32(bios, init->offset + 5);
+
+	trace("STRAP_CONDITION\t(R[0x101000] & 0x%08x) == 0x%08x\n", mask, value);
+	init->offset += 9;
+
+	if ((init_rd32(init, 0x101000) & mask) != value)
+		init_exec_set(init, false);
+}
+
+/**
  * INIT_TIME - opcode 0x74
  *
  */
@@ -1749,7 +1768,7 @@
 init_time(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 usec = nv_ro16(bios, init->offset + 1);
+	u16 usec = nvbios_rd16(bios, init->offset + 1);
 
 	trace("TIME\t0x%04x\n", usec);
 	init->offset += 3;
@@ -1770,7 +1789,7 @@
 init_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -1787,7 +1806,7 @@
 init_io_condition(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 cond = nv_ro08(bios, init->offset + 1);
+	u8 cond = nvbios_rd08(bios, init->offset + 1);
 
 	trace("IO_CONDITION\t0x%02x\n", cond);
 	init->offset += 2;
@@ -1797,6 +1816,23 @@
 }
 
 /**
+ * INIT_ZM_REG16 - opcode 0x77
+ *
+ */
+static void
+init_zm_reg16(struct nvbios_init *init)
+{
+	struct nvkm_bios *bios = init->bios;
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u16 data = nvbios_rd16(bios, init->offset + 5);
+
+	trace("ZM_REG\tR[0x%06x] = 0x%04x\n", addr, data);
+	init->offset += 7;
+
+	init_wr32(init, addr, data);
+}
+
+/**
  * INIT_INDEX_IO - opcode 0x78
  *
  */
@@ -1804,10 +1840,10 @@
 init_index_io(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u16 port = nv_ro16(bios, init->offset + 1);
-	u8 index = nv_ro16(bios, init->offset + 3);
-	u8  mask = nv_ro08(bios, init->offset + 4);
-	u8  data = nv_ro08(bios, init->offset + 5);
+	u16 port = nvbios_rd16(bios, init->offset + 1);
+	u8 index = nvbios_rd16(bios, init->offset + 3);
+	u8  mask = nvbios_rd08(bios, init->offset + 4);
+	u8  data = nvbios_rd08(bios, init->offset + 5);
 	u8 value;
 
 	trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
@@ -1826,8 +1862,8 @@
 init_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32  reg = nv_ro32(bios, init->offset + 1);
-	u32 freq = nv_ro16(bios, init->offset + 5) * 10;
+	u32  reg = nvbios_rd32(bios, init->offset + 1);
+	u32 freq = nvbios_rd16(bios, init->offset + 5) * 10;
 
 	trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
 	init->offset += 7;
@@ -1843,8 +1879,8 @@
 init_zm_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u32 data = nv_ro32(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u32 data = nvbios_rd32(bios, init->offset + 5);
 
 	trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
 	init->offset += 9;
@@ -1863,7 +1899,7 @@
 init_ram_restrict_pll(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8  type = nv_ro08(bios, init->offset + 1);
+	u8  type = nvbios_rd08(bios, init->offset + 1);
 	u8 count = init_ram_restrict_group_count(init);
 	u8 strap = init_ram_restrict(init);
 	u8 cconf;
@@ -1872,7 +1908,7 @@
 	init->offset += 2;
 
 	for (cconf = 0; cconf < count; cconf++) {
-		u32 freq = nv_ro32(bios, init->offset);
+		u32 freq = nvbios_rd32(bios, init->offset);
 
 		if (cconf == strap) {
 			trace("%dkHz *\n", freq);
@@ -1892,13 +1928,13 @@
 static void
 init_gpio(struct nvbios_init *init)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(init->bios);
+	struct nvkm_gpio *gpio = init->bios->subdev.device->gpio;
 
 	trace("GPIO\n");
 	init->offset += 1;
 
-	if (init_exec(init) && gpio && gpio->reset)
-		gpio->reset(gpio, DCB_GPIO_UNUSED);
+	if (init_exec(init))
+		nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
 }
 
 /**
@@ -1909,9 +1945,9 @@
 init_ram_restrict_zm_reg_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8  incr = nv_ro08(bios, init->offset + 5);
-	u8   num = nv_ro08(bios, init->offset + 6);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8  incr = nvbios_rd08(bios, init->offset + 5);
+	u8   num = nvbios_rd08(bios, init->offset + 6);
 	u8 count = init_ram_restrict_group_count(init);
 	u8 index = init_ram_restrict(init);
 	u8 i, j;
@@ -1923,7 +1959,7 @@
 	for (i = 0; i < num; i++) {
 		trace("\tR[0x%06x] = {\n", addr);
 		for (j = 0; j < count; j++) {
-			u32 data = nv_ro32(bios, init->offset);
+			u32 data = nvbios_rd32(bios, init->offset);
 
 			if (j == index) {
 				trace("\t\t0x%08x *\n", data);
@@ -1947,8 +1983,8 @@
 init_copy_zm_reg(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 sreg = nv_ro32(bios, init->offset + 1);
-	u32 dreg = nv_ro32(bios, init->offset + 5);
+	u32 sreg = nvbios_rd32(bios, init->offset + 1);
+	u32 dreg = nvbios_rd32(bios, init->offset + 5);
 
 	trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
 	init->offset += 9;
@@ -1964,14 +2000,14 @@
 init_zm_reg_group(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
 	init->offset += 6;
 
 	while (count--) {
-		u32 data = nv_ro32(bios, init->offset);
+		u32 data = nvbios_rd32(bios, init->offset);
 		trace("\t0x%08x\n", data);
 		init_wr32(init, addr, data);
 		init->offset += 4;
@@ -1986,13 +2022,13 @@
 init_xlat(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 saddr = nv_ro32(bios, init->offset + 1);
-	u8 sshift = nv_ro08(bios, init->offset + 5);
-	u8  smask = nv_ro08(bios, init->offset + 6);
-	u8  index = nv_ro08(bios, init->offset + 7);
-	u32 daddr = nv_ro32(bios, init->offset + 8);
-	u32 dmask = nv_ro32(bios, init->offset + 12);
-	u8  shift = nv_ro08(bios, init->offset + 16);
+	u32 saddr = nvbios_rd32(bios, init->offset + 1);
+	u8 sshift = nvbios_rd08(bios, init->offset + 5);
+	u8  smask = nvbios_rd08(bios, init->offset + 6);
+	u8  index = nvbios_rd08(bios, init->offset + 7);
+	u32 daddr = nvbios_rd32(bios, init->offset + 8);
+	u32 dmask = nvbios_rd32(bios, init->offset + 12);
+	u8  shift = nvbios_rd08(bios, init->offset + 16);
 	u32 data;
 
 	trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
@@ -2014,9 +2050,9 @@
 init_zm_mask_add(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u32 mask = nv_ro32(bios, init->offset + 5);
-	u32  add = nv_ro32(bios, init->offset + 9);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u32 mask = nvbios_rd32(bios, init->offset + 5);
+	u32  add = nvbios_rd32(bios, init->offset + 9);
 	u32 data;
 
 	trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
@@ -2035,15 +2071,15 @@
 init_auxch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
 	init->offset += 6;
 
 	while (count--) {
-		u8 mask = nv_ro08(bios, init->offset + 0);
-		u8 data = nv_ro08(bios, init->offset + 1);
+		u8 mask = nvbios_rd08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 1);
 		trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
 		mask = init_rdauxr(init, addr) & mask;
 		init_wrauxr(init, addr, mask | data);
@@ -2059,14 +2095,14 @@
 init_zm_auxch(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u32 addr = nv_ro32(bios, init->offset + 1);
-	u8 count = nv_ro08(bios, init->offset + 5);
+	u32 addr = nvbios_rd32(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 5);
 
 	trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
 	init->offset += 6;
 
 	while (count--) {
-		u8 data = nv_ro08(bios, init->offset + 0);
+		u8 data = nvbios_rd08(bios, init->offset + 0);
 		trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
 		init_wrauxr(init, addr, data);
 		init->offset += 1;
@@ -2081,21 +2117,21 @@
 init_i2c_long_if(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	u8 index = nv_ro08(bios, init->offset + 1);
-	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
-	u8 reglo = nv_ro08(bios, init->offset + 3);
-	u8 reghi = nv_ro08(bios, init->offset + 4);
-	u8  mask = nv_ro08(bios, init->offset + 5);
-	u8  data = nv_ro08(bios, init->offset + 6);
-	struct nvkm_i2c_port *port;
+	u8 index = nvbios_rd08(bios, init->offset + 1);
+	u8  addr = nvbios_rd08(bios, init->offset + 2) >> 1;
+	u8 reglo = nvbios_rd08(bios, init->offset + 3);
+	u8 reghi = nvbios_rd08(bios, init->offset + 4);
+	u8  mask = nvbios_rd08(bios, init->offset + 5);
+	u8  data = nvbios_rd08(bios, init->offset + 6);
+	struct i2c_adapter *adap;
 
 	trace("I2C_LONG_IF\t"
 	      "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
 	      index, addr, reglo, reghi, mask, data);
 	init->offset += 7;
 
-	port = init_i2c(init, index);
-	if (port) {
+	adap = init_i2c(init, index);
+	if (adap) {
 		u8 i[2] = { reghi, reglo };
 		u8 o[1] = {};
 		struct i2c_msg msg[] = {
@@ -2104,7 +2140,7 @@
 		};
 		int ret;
 
-		ret = i2c_transfer(&port->adapter, msg, 2);
+		ret = i2c_transfer(adap, msg, 2);
 		if (ret == 2 && ((o[0] & mask) == data))
 			return;
 	}
@@ -2120,9 +2156,9 @@
 init_gpio_ne(struct nvbios_init *init)
 {
 	struct nvkm_bios *bios = init->bios;
-	struct nvkm_gpio *gpio = nvkm_gpio(bios);
+	struct nvkm_gpio *gpio = bios->subdev.device->gpio;
 	struct dcb_gpio_func func;
-	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 count = nvbios_rd08(bios, init->offset + 1);
 	u8 idx = 0, ver, len;
 	u16 data, i;
 
@@ -2130,21 +2166,21 @@
 	init->offset += 2;
 
 	for (i = init->offset; i < init->offset + count; i++)
-		cont("0x%02x ", nv_ro08(bios, i));
+		cont("0x%02x ", nvbios_rd08(bios, i));
 	cont("\n");
 
 	while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
 		if (func.func != DCB_GPIO_UNUSED) {
 			for (i = init->offset; i < init->offset + count; i++) {
-				if (func.func == nv_ro08(bios, i))
+				if (func.func == nvbios_rd08(bios, i))
 					break;
 			}
 
 			trace("\tFUNC[0x%02x]", func.func);
 			if (i == (init->offset + count)) {
 				cont(" *");
-				if (init_exec(init) && gpio && gpio->reset)
-					gpio->reset(gpio, func.func);
+				if (init_exec(init))
+					nvkm_gpio_reset(gpio, func.func);
 			}
 			cont("\n");
 		}
@@ -2202,9 +2238,11 @@
 	[0x6f] = { init_macro },
 	[0x71] = { init_done },
 	[0x72] = { init_resume },
+	[0x73] = { init_strap_condition },
 	[0x74] = { init_time },
 	[0x75] = { init_condition },
 	[0x76] = { init_io_condition },
+	[0x77] = { init_zm_reg16 },
 	[0x78] = { init_index_io },
 	[0x79] = { init_pll },
 	[0x7a] = { init_zm_reg },
@@ -2232,7 +2270,7 @@
 {
 	init->nested++;
 	while (init->offset) {
-		u8 opcode = nv_ro08(init->bios, init->offset);
+		u8 opcode = nvbios_rd08(init->bios, init->offset);
 		if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
 			error("unknown opcode 0x%02x\n", opcode);
 			return -EINVAL;
@@ -2247,13 +2285,13 @@
 int
 nvbios_init(struct nvkm_subdev *subdev, bool execute)
 {
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	int ret = 0;
 	int i = -1;
 	u16 data;
 
 	if (execute)
-		nv_info(bios, "running init tables\n");
+		nvkm_debug(subdev, "running init tables\n");
 	while (!ret && (data = (init_script(bios, ++i)))) {
 		struct nvbios_init init = {
 			.subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
index c4087df..3ddf093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c
@@ -28,17 +28,18 @@
 u16
 mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct bit_entry x;
 
 	if (bit_entry(bios, 'x', &x)) {
-		nv_debug(bios, "BIT 'x' table not present\n");
+		nvkm_debug(subdev, "BIT 'x' table not present\n");
 		return 0x0000;
 	}
 
 	*ver = x.version;
 	*hdr = x.length;
 	if (*ver != 1 || *hdr < 3) {
-		nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
+		nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
 		return 0x0000;
 	}
 
@@ -73,23 +74,24 @@
 u8
 mxm_sor_map(struct nvkm_bios *bios, u8 conn)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, hdr;
 	u16 mxm = mxm_table(bios, &ver, &hdr);
 	if (mxm && hdr >= 6) {
-		u16 map = nv_ro16(bios, mxm + 4);
+		u16 map = nvbios_rd16(bios, mxm + 4);
 		if (map) {
-			ver = nv_ro08(bios, map);
+			ver = nvbios_rd08(bios, map);
 			if (ver == 0x10) {
-				if (conn < nv_ro08(bios, map + 3)) {
-					map += nv_ro08(bios, map + 1);
+				if (conn < nvbios_rd08(bios, map + 3)) {
+					map += nvbios_rd08(bios, map + 1);
 					map += conn;
-					return nv_ro08(bios, map);
+					return nvbios_rd08(bios, map);
 				}
 
 				return 0x00;
 			}
 
-			nv_warn(bios, "unknown sor map v%02x\n", ver);
+			nvkm_warn(subdev, "unknown sor map v%02x\n", ver);
 		}
 	}
 
@@ -102,30 +104,31 @@
 	if (bios->version.chip == 0x98)
 		return g98_sor_map[conn];
 
-	nv_warn(bios, "missing sor map\n");
+	nvkm_warn(subdev, "missing sor map\n");
 	return 0x00;
 }
 
 u8
 mxm_ddc_map(struct nvkm_bios *bios, u8 port)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	u8  ver, hdr;
 	u16 mxm = mxm_table(bios, &ver, &hdr);
 	if (mxm && hdr >= 8) {
-		u16 map = nv_ro16(bios, mxm + 6);
+		u16 map = nvbios_rd16(bios, mxm + 6);
 		if (map) {
-			ver = nv_ro08(bios, map);
+			ver = nvbios_rd08(bios, map);
 			if (ver == 0x10) {
-				if (port < nv_ro08(bios, map + 3)) {
-					map += nv_ro08(bios, map + 1);
+				if (port < nvbios_rd08(bios, map + 3)) {
+					map += nvbios_rd08(bios, map + 1);
 					map += port;
-					return nv_ro08(bios, map);
+					return nvbios_rd08(bios, map);
 				}
 
 				return 0x00;
 			}
 
-			nv_warn(bios, "unknown ddc map v%02x\n", ver);
+			nvkm_warn(subdev, "unknown ddc map v%02x\n", ver);
 		}
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
index fd7dd71..955df29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/npde.c
@@ -32,12 +32,13 @@
 	u8  ver; u16 hdr;
 	u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
 	if (data = (data + hdr + 0x0f) & ~0x0f, data) {
-		switch (nv_ro32(bios, data + 0x00)) {
+		switch (nvbios_rd32(bios, data + 0x00)) {
 		case 0x4544504e: /* NPDE */
 			break;
 		default:
-			nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
-				 data, nv_ro32(bios, data + 0x00));
+			nvkm_debug(&bios->subdev,
+				   "%08x: NPDE signature (%08x) unknown\n",
+				   data, nvbios_rd32(bios, data + 0x00));
 			data = 0;
 			break;
 		}
@@ -51,8 +52,8 @@
 	u32 data = nvbios_npdeTe(bios, base);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->image_size = nv_ro16(bios, data + 0x08) * 512;
-		info->last = nv_ro08(bios, data + 0x0a) & 0x80;
+		info->image_size = nvbios_rd16(bios, data + 0x08) * 512;
+		info->last = nvbios_rd08(bios, data + 0x0a) & 0x80;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
index df59787..67cb3aeb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pcir.c
@@ -27,19 +27,20 @@
 u32
 nvbios_pcirTe(struct nvkm_bios *bios, u32 base, u8 *ver, u16 *hdr)
 {
-	u32 data = nv_ro16(bios, base + 0x18);
+	u32 data = nvbios_rd16(bios, base + 0x18);
 	if (data) {
 		data += base;
-		switch (nv_ro32(bios, data + 0x00)) {
+		switch (nvbios_rd32(bios, data + 0x00)) {
 		case 0x52494350: /* PCIR */
 		case 0x53494752: /* RGIS */
 		case 0x5344504e: /* NPDS */
-			*hdr = nv_ro16(bios, data + 0x0a);
-			*ver = nv_ro08(bios, data + 0x0c);
+			*hdr = nvbios_rd16(bios, data + 0x0a);
+			*ver = nvbios_rd08(bios, data + 0x0c);
 			break;
 		default:
-			nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
-				 data, nv_ro32(bios, data + 0x00));
+			nvkm_debug(&bios->subdev,
+				   "%08x: PCIR signature (%08x) unknown\n",
+				   data, nvbios_rd32(bios, data + 0x00));
 			data = 0;
 			break;
 		}
@@ -54,15 +55,15 @@
 	u32 data = nvbios_pcirTe(bios, base, ver, hdr);
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
-		info->vendor_id = nv_ro16(bios, data + 0x04);
-		info->device_id = nv_ro16(bios, data + 0x06);
-		info->class_code[0] = nv_ro08(bios, data + 0x0d);
-		info->class_code[1] = nv_ro08(bios, data + 0x0e);
-		info->class_code[2] = nv_ro08(bios, data + 0x0f);
-		info->image_size = nv_ro16(bios, data + 0x10) * 512;
-		info->image_rev = nv_ro16(bios, data + 0x12);
-		info->image_type = nv_ro08(bios, data + 0x14);
-		info->last = nv_ro08(bios, data + 0x15) & 0x80;
+		info->vendor_id = nvbios_rd16(bios, data + 0x04);
+		info->device_id = nvbios_rd16(bios, data + 0x06);
+		info->class_code[0] = nvbios_rd08(bios, data + 0x0d);
+		info->class_code[1] = nvbios_rd08(bios, data + 0x0e);
+		info->class_code[2] = nvbios_rd08(bios, data + 0x0f);
+		info->image_size = nvbios_rd16(bios, data + 0x10) * 512;
+		info->image_rev = nvbios_rd16(bios, data + 0x12);
+		info->image_type = nvbios_rd08(bios, data + 0x14);
+		info->last = nvbios_rd08(bios, data + 0x15) & 0x80;
 	}
 	return data;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index 382ae9c..aa7e33b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -25,8 +25,6 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/perf.h>
 
-#include <core/device.h>
-
 u16
 nvbios_perf_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
 		  u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
@@ -36,22 +34,22 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version <= 2) {
-			perf = nv_ro16(bios, bit_P.offset + 0);
+			perf = nvbios_rd16(bios, bit_P.offset + 0);
 			if (perf) {
-				*ver = nv_ro08(bios, perf + 0);
-				*hdr = nv_ro08(bios, perf + 1);
+				*ver = nvbios_rd08(bios, perf + 0);
+				*hdr = nvbios_rd08(bios, perf + 1);
 				if (*ver >= 0x40 && *ver < 0x41) {
-					*cnt = nv_ro08(bios, perf + 5);
-					*len = nv_ro08(bios, perf + 2);
-					*snr = nv_ro08(bios, perf + 4);
-					*ssz = nv_ro08(bios, perf + 3);
+					*cnt = nvbios_rd08(bios, perf + 5);
+					*len = nvbios_rd08(bios, perf + 2);
+					*snr = nvbios_rd08(bios, perf + 4);
+					*ssz = nvbios_rd08(bios, perf + 3);
 					return perf;
 				} else
 				if (*ver >= 0x20 && *ver < 0x40) {
-					*cnt = nv_ro08(bios, perf + 2);
-					*len = nv_ro08(bios, perf + 3);
-					*snr = nv_ro08(bios, perf + 4);
-					*ssz = nv_ro08(bios, perf + 5);
+					*cnt = nvbios_rd08(bios, perf + 2);
+					*len = nvbios_rd08(bios, perf + 3);
+					*snr = nvbios_rd08(bios, perf + 4);
+					*ssz = nvbios_rd08(bios, perf + 5);
 					return perf;
 				}
 			}
@@ -59,13 +57,13 @@
 	}
 
 	if (bios->bmp_offset) {
-		if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
-			perf = nv_ro16(bios, bios->bmp_offset + 0x94);
+		if (nvbios_rd08(bios, bios->bmp_offset + 6) >= 0x25) {
+			perf = nvbios_rd16(bios, bios->bmp_offset + 0x94);
 			if (perf) {
-				*hdr = nv_ro08(bios, perf + 0);
-				*ver = nv_ro08(bios, perf + 1);
-				*cnt = nv_ro08(bios, perf + 2);
-				*len = nv_ro08(bios, perf + 3);
+				*hdr = nvbios_rd08(bios, perf + 0);
+				*ver = nvbios_rd08(bios, perf + 1);
+				*cnt = nvbios_rd08(bios, perf + 2);
+				*len = nvbios_rd08(bios, perf + 3);
 				*snr = 0;
 				*ssz = 0;
 				return perf;
@@ -98,55 +96,55 @@
 {
 	u16 perf = nvbios_perf_entry(bios, idx, ver, hdr, cnt, len);
 	memset(info, 0x00, sizeof(*info));
-	info->pstate = nv_ro08(bios, perf + 0x00);
+	info->pstate = nvbios_rd08(bios, perf + 0x00);
 	switch (!!perf * *ver) {
 	case 0x12:
 	case 0x13:
 	case 0x14:
-		info->core     = nv_ro32(bios, perf + 0x01) * 10;
-		info->memory   = nv_ro32(bios, perf + 0x05) * 20;
-		info->fanspeed = nv_ro08(bios, perf + 0x37);
+		info->core     = nvbios_rd32(bios, perf + 0x01) * 10;
+		info->memory   = nvbios_rd32(bios, perf + 0x05) * 20;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x37);
 		if (*hdr > 0x38)
-			info->voltage = nv_ro08(bios, perf + 0x38);
+			info->voltage = nvbios_rd08(bios, perf + 0x38);
 		break;
 	case 0x21:
 	case 0x23:
 	case 0x24:
-		info->fanspeed = nv_ro08(bios, perf + 0x04);
-		info->voltage  = nv_ro08(bios, perf + 0x05);
-		info->shader   = nv_ro16(bios, perf + 0x06) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+		info->voltage  = nvbios_rd08(bios, perf + 0x05);
+		info->shader   = nvbios_rd16(bios, perf + 0x06) * 1000;
 		info->core     = info->shader + (signed char)
-				 nv_ro08(bios, perf + 0x08) * 1000;
-		switch (nv_device(bios)->chipset) {
+				 nvbios_rd08(bios, perf + 0x08) * 1000;
+		switch (bios->subdev.device->chipset) {
 		case 0x49:
 		case 0x4b:
-			info->memory = nv_ro16(bios, perf + 0x0b) * 1000;
+			info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000;
 			break;
 		default:
-			info->memory = nv_ro16(bios, perf + 0x0b) * 2000;
+			info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000;
 			break;
 		}
 		break;
 	case 0x25:
-		info->fanspeed = nv_ro08(bios, perf + 0x04);
-		info->voltage  = nv_ro08(bios, perf + 0x05);
-		info->core     = nv_ro16(bios, perf + 0x06) * 1000;
-		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
-		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x04);
+		info->voltage  = nvbios_rd08(bios, perf + 0x05);
+		info->core     = nvbios_rd16(bios, perf + 0x06) * 1000;
+		info->shader   = nvbios_rd16(bios, perf + 0x0a) * 1000;
+		info->memory   = nvbios_rd16(bios, perf + 0x0c) * 1000;
 		break;
 	case 0x30:
-		info->script   = nv_ro16(bios, perf + 0x02);
+		info->script   = nvbios_rd16(bios, perf + 0x02);
 	case 0x35:
-		info->fanspeed = nv_ro08(bios, perf + 0x06);
-		info->voltage  = nv_ro08(bios, perf + 0x07);
-		info->core     = nv_ro16(bios, perf + 0x08) * 1000;
-		info->shader   = nv_ro16(bios, perf + 0x0a) * 1000;
-		info->memory   = nv_ro16(bios, perf + 0x0c) * 1000;
-		info->vdec     = nv_ro16(bios, perf + 0x10) * 1000;
-		info->disp     = nv_ro16(bios, perf + 0x14) * 1000;
+		info->fanspeed = nvbios_rd08(bios, perf + 0x06);
+		info->voltage  = nvbios_rd08(bios, perf + 0x07);
+		info->core     = nvbios_rd16(bios, perf + 0x08) * 1000;
+		info->shader   = nvbios_rd16(bios, perf + 0x0a) * 1000;
+		info->memory   = nvbios_rd16(bios, perf + 0x0c) * 1000;
+		info->vdec     = nvbios_rd16(bios, perf + 0x10) * 1000;
+		info->disp     = nvbios_rd16(bios, perf + 0x14) * 1000;
 		break;
 	case 0x40:
-		info->voltage  = nv_ro08(bios, perf + 0x02);
+		info->voltage  = nvbios_rd08(bios, perf + 0x02);
 		break;
 	default:
 		return 0x0000;
@@ -175,7 +173,7 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	case 0x40:
-		info->v40.freq = (nv_ro16(bios, data + 0x00) & 0x3fff) * 1000;
+		info->v40.freq = (nvbios_rd16(bios, data + 0x00) & 0x3fff) * 1000;
 		break;
 	default:
 		break;
@@ -193,7 +191,7 @@
 		return -ENODEV;
 
 	if (ver >= 0x20 && ver < 0x40 && hdr > 6)
-		fan->pwm_divisor = nv_ro16(bios, perf + 6);
+		fan->pwm_divisor = nvbios_rd16(bios, perf + 6);
 	else
 		fan->pwm_divisor = 0;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index ebd402e..125ec2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -27,7 +27,6 @@
 #include <subdev/bios/pll.h>
 #include <subdev/vga.h>
 
-#include <core/device.h>
 
 struct pll_mapping {
 	u8  type;
@@ -84,20 +83,20 @@
 	struct bit_entry bit_C;
 
 	if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
-		u16 data = nv_ro16(bios, bit_C.offset + 8);
+		u16 data = nvbios_rd16(bios, bit_C.offset + 8);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
-			*hdr = nv_ro08(bios, data + 1);
-			*len = nv_ro08(bios, data + 2);
-			*cnt = nv_ro08(bios, data + 3);
+			*ver = nvbios_rd08(bios, data + 0);
+			*hdr = nvbios_rd08(bios, data + 1);
+			*len = nvbios_rd08(bios, data + 2);
+			*cnt = nvbios_rd08(bios, data + 3);
 			return data;
 		}
 	}
 
 	if (bmp_version(bios) >= 0x0524) {
-		u16 data = nv_ro16(bios, bios->bmp_offset + 142);
+		u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
 		if (data) {
-			*ver = nv_ro08(bios, data + 0);
+			*ver = nvbios_rd08(bios, data + 0);
 			*hdr = 1;
 			*cnt = 1;
 			*len = 0x18;
@@ -112,7 +111,8 @@
 static struct pll_mapping *
 pll_map(struct nvkm_bios *bios)
 {
-	switch (nv_device(bios)->card_type) {
+	struct nvkm_device *device = bios->subdev.device;
+	switch (device->card_type) {
 	case NV_04:
 	case NV_10:
 	case NV_11:
@@ -123,12 +123,12 @@
 	case NV_40:
 		return nv40_pll_mapping;
 	case NV_50:
-		if (nv_device(bios)->chipset == 0x50)
+		if (device->chipset == 0x50)
 			return nv50_pll_mapping;
 		else
-		if (nv_device(bios)->chipset <  0xa3 ||
-		    nv_device(bios)->chipset == 0xaa ||
-		    nv_device(bios)->chipset == 0xac)
+		if (device->chipset <  0xa3 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac)
 			return g84_pll_mapping;
 	default:
 		return NULL;
@@ -146,8 +146,8 @@
 	if (data && *ver >= 0x30) {
 		data += hdr;
 		while (cnt--) {
-			if (nv_ro32(bios, data + 3) == reg) {
-				*type = nv_ro08(bios, data + 0);
+			if (nvbios_rd32(bios, data + 3) == reg) {
+				*type = nvbios_rd08(bios, data + 0);
 				return data;
 			}
 			data += *len;
@@ -161,7 +161,7 @@
 			u16 addr = (data += hdr);
 			*type = map->type;
 			while (cnt--) {
-				if (nv_ro32(bios, data) == map->reg)
+				if (nvbios_rd32(bios, data) == map->reg)
 					return data;
 				data += *len;
 			}
@@ -188,8 +188,8 @@
 	if (data && *ver >= 0x30) {
 		data += hdr;
 		while (cnt--) {
-			if (nv_ro08(bios, data + 0) == type) {
-				*reg = nv_ro32(bios, data + 3);
+			if (nvbios_rd08(bios, data + 0) == type) {
+				*reg = nvbios_rd32(bios, data + 3);
 				return data;
 			}
 			data += *len;
@@ -203,7 +203,7 @@
 			u16 addr = (data += hdr);
 			*reg = map->reg;
 			while (cnt--) {
-				if (nv_ro32(bios, data) == map->reg)
+				if (nvbios_rd32(bios, data) == map->reg)
 					return data;
 				data += *len;
 			}
@@ -222,6 +222,8 @@
 int
 nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	u8  ver, len;
 	u32 reg = type;
 	u16 data;
@@ -245,12 +247,12 @@
 		break;
 	case 0x10:
 	case 0x11:
-		info->vco1.min_freq = nv_ro32(bios, data + 0);
-		info->vco1.max_freq = nv_ro32(bios, data + 4);
-		info->vco2.min_freq = nv_ro32(bios, data + 8);
-		info->vco2.max_freq = nv_ro32(bios, data + 12);
-		info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
-		info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
+		info->vco1.min_freq = nvbios_rd32(bios, data + 0);
+		info->vco1.max_freq = nvbios_rd32(bios, data + 4);
+		info->vco2.min_freq = nvbios_rd32(bios, data + 8);
+		info->vco2.max_freq = nvbios_rd32(bios, data + 12);
+		info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16);
+		info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20);
 		info->vco1.max_inputfreq = INT_MAX;
 		info->vco2.max_inputfreq = INT_MAX;
 
@@ -291,82 +293,82 @@
 		break;
 	case 0x20:
 	case 0x21:
-		info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
-		info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
-		info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
-		info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
-		info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
-		info->vco1.min_n = nv_ro08(bios, data + 20);
-		info->vco1.max_n = nv_ro08(bios, data + 21);
-		info->vco1.min_m = nv_ro08(bios, data + 22);
-		info->vco1.max_m = nv_ro08(bios, data + 23);
-		info->vco2.min_n = nv_ro08(bios, data + 24);
-		info->vco2.max_n = nv_ro08(bios, data + 25);
-		info->vco2.min_m = nv_ro08(bios, data + 26);
-		info->vco2.max_m = nv_ro08(bios, data + 27);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000;
+		info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+		info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000;
+		info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000;
+		info->vco1.min_n = nvbios_rd08(bios, data + 20);
+		info->vco1.max_n = nvbios_rd08(bios, data + 21);
+		info->vco1.min_m = nvbios_rd08(bios, data + 22);
+		info->vco1.max_m = nvbios_rd08(bios, data + 23);
+		info->vco2.min_n = nvbios_rd08(bios, data + 24);
+		info->vco2.max_n = nvbios_rd08(bios, data + 25);
+		info->vco2.min_m = nvbios_rd08(bios, data + 26);
+		info->vco2.max_m = nvbios_rd08(bios, data + 27);
 
-		info->max_p = nv_ro08(bios, data + 29);
+		info->max_p = nvbios_rd08(bios, data + 29);
 		info->max_p_usable = info->max_p;
 		if (bios->version.chip < 0x60)
 			info->max_p_usable = 0x6;
-		info->bias_p = nv_ro08(bios, data + 30);
+		info->bias_p = nvbios_rd08(bios, data + 30);
 
 		if (len > 0x22)
-			info->refclk = nv_ro32(bios, data + 31);
+			info->refclk = nvbios_rd32(bios, data + 31);
 		break;
 	case 0x30:
-		data = nv_ro16(bios, data + 1);
+		data = nvbios_rd16(bios, data + 1);
 
-		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
-		info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
-		info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
-		info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
-		info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
-		info->vco1.min_n = nv_ro08(bios, data + 16);
-		info->vco1.max_n = nv_ro08(bios, data + 17);
-		info->vco1.min_m = nv_ro08(bios, data + 18);
-		info->vco1.max_m = nv_ro08(bios, data + 19);
-		info->vco2.min_n = nv_ro08(bios, data + 20);
-		info->vco2.max_n = nv_ro08(bios, data + 21);
-		info->vco2.min_m = nv_ro08(bios, data + 22);
-		info->vco2.max_m = nv_ro08(bios, data + 23);
-		info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
-		info->bias_p = nv_ro08(bios, data + 27);
-		info->refclk = nv_ro32(bios, data + 28);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+		info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000;
+		info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000;
+		info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000;
+		info->vco1.min_n = nvbios_rd08(bios, data + 16);
+		info->vco1.max_n = nvbios_rd08(bios, data + 17);
+		info->vco1.min_m = nvbios_rd08(bios, data + 18);
+		info->vco1.max_m = nvbios_rd08(bios, data + 19);
+		info->vco2.min_n = nvbios_rd08(bios, data + 20);
+		info->vco2.max_n = nvbios_rd08(bios, data + 21);
+		info->vco2.min_m = nvbios_rd08(bios, data + 22);
+		info->vco2.max_m = nvbios_rd08(bios, data + 23);
+		info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25);
+		info->bias_p = nvbios_rd08(bios, data + 27);
+		info->refclk = nvbios_rd32(bios, data + 28);
 		break;
 	case 0x40:
-		info->refclk = nv_ro16(bios, data + 9) * 1000;
-		data = nv_ro16(bios, data + 1);
+		info->refclk = nvbios_rd16(bios, data + 9) * 1000;
+		data = nvbios_rd16(bios, data + 1);
 
-		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
-		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
-		info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
-		info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
-		info->vco1.min_m = nv_ro08(bios, data + 8);
-		info->vco1.max_m = nv_ro08(bios, data + 9);
-		info->vco1.min_n = nv_ro08(bios, data + 10);
-		info->vco1.max_n = nv_ro08(bios, data + 11);
-		info->min_p = nv_ro08(bios, data + 12);
-		info->max_p = nv_ro08(bios, data + 13);
+		info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000;
+		info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000;
+		info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000;
+		info->vco1.min_m = nvbios_rd08(bios, data + 8);
+		info->vco1.max_m = nvbios_rd08(bios, data + 9);
+		info->vco1.min_n = nvbios_rd08(bios, data + 10);
+		info->vco1.max_n = nvbios_rd08(bios, data + 11);
+		info->min_p = nvbios_rd08(bios, data + 12);
+		info->max_p = nvbios_rd08(bios, data + 13);
 		break;
 	default:
-		nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
+		nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver);
 		return -EINVAL;
 	}
 
 	if (!info->refclk) {
-		info->refclk = nv_device(bios)->crystal;
+		info->refclk = device->crystal;
 		if (bios->version.chip == 0x51) {
-			u32 sel_clk = nv_rd32(bios, 0x680524);
+			u32 sel_clk = nvkm_rd32(device, 0x680524);
 			if ((info->reg == 0x680508 && sel_clk & 0x20) ||
 			    (info->reg == 0x680520 && sel_clk & 0x80)) {
-				if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
+				if (nvkm_rdvgac(device, 0, 0x27) < 0xa3)
 					info->refclk = 200000;
 				else
 					info->refclk = 25000;
@@ -380,8 +382,8 @@
 	 * with an empty limit table (seen on nv18)
 	 */
 	if (!info->vco1.max_freq) {
-		info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
-		info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
+		info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67);
+		info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71);
 		if (bmp_version(bios) < 0x0506) {
 			info->vco1.max_freq = 256000;
 			info->vco1.min_freq = 128000;
@@ -393,7 +395,7 @@
 		info->vco1.max_n = 0xff;
 		info->vco1.min_m = 0x1;
 
-		if (nv_device(bios)->crystal == 13500) {
+		if (device->crystal == 13500) {
 			/* nv05 does this, nv11 doesn't, nv10 unknown */
 			if (bios->version.chip < 0x11)
 				info->vco1.min_m = 0x7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index 20c5ce0..441ec45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -49,12 +49,12 @@
 
 	if (!bit_entry(bios, 'p', &bit_p)) {
 		if (bit_p.version == 2 && bit_p.length >= 4)
-			data = nv_ro32(bios, bit_p.offset + 0x00);
+			data = nvbios_rd32(bios, bit_p.offset + 0x00);
 		if ((data = weirdo_pointer(bios, data))) {
-			*ver = nv_ro08(bios, data + 0x00); /* maybe? */
-			*hdr = nv_ro08(bios, data + 0x01);
-			*len = nv_ro08(bios, data + 0x02);
-			*cnt = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*len = nvbios_rd08(bios, data + 0x02);
+			*cnt = nvbios_rd08(bios, data + 0x03);
 		}
 	}
 
@@ -95,8 +95,8 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!data * *ver) {
 	default:
-		info->type = nv_ro08(bios, data + 0x00);
-		info->data = nv_ro32(bios, data + 0x02);
+		info->type = nvbios_rd08(bios, data + 0x00);
+		info->data = nvbios_rd32(bios, data + 0x02);
 		break;
 	}
 	return data;
@@ -112,21 +112,21 @@
 	while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
 		if ( pmuE.type == type &&
 		    (data = weirdo_pointer(bios, pmuE.data))) {
-			info->init_addr_pmu = nv_ro32(bios, data + 0x08);
-			info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
+			info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
+			info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
 			info->boot_addr     = data + 0x30;
-			info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
-					      nv_ro32(bios, data + 0x18);
-			info->boot_size     = nv_ro32(bios, data + 0x1c) -
-					      nv_ro32(bios, data + 0x18);
+			info->boot_addr_pmu = nvbios_rd32(bios, data + 0x10) +
+					      nvbios_rd32(bios, data + 0x18);
+			info->boot_size     = nvbios_rd32(bios, data + 0x1c) -
+					      nvbios_rd32(bios, data + 0x18);
 			info->code_addr     = info->boot_addr + info->boot_size;
 			info->code_addr_pmu = info->boot_addr_pmu +
 					      info->boot_size;
-			info->code_size     = nv_ro32(bios, data + 0x20);
+			info->code_size     = nvbios_rd32(bios, data + 0x20);
 			info->data_addr     = data + 0x30 +
-					      nv_ro32(bios, data + 0x24);
-			info->data_addr_pmu = nv_ro32(bios, data + 0x28);
-			info->data_size     = nv_ro32(bios, data + 0x2c);
+					      nvbios_rd32(bios, data + 0x24);
+			info->data_addr_pmu = nvbios_rd32(bios, data + 0x28);
+			info->data_size     = nvbios_rd32(bios, data + 0x2c);
 			return true;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 95e4fa1..e0ec2a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,5 +1,6 @@
 #ifndef __NVKM_BIOS_PRIV_H__
 #define __NVKM_BIOS_PRIV_H__
+#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
 #include <subdev/bios.h>
 
 struct nvbios_source {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
index a17b221..d5222af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ramcfg.c
@@ -29,7 +29,7 @@
 static u8
 nvbios_ramcfg_strap(struct nvkm_subdev *subdev)
 {
-	return (nv_rd32(subdev, 0x101000) & 0x0000003c) >> 2;
+	return (nvkm_rd32(subdev->device, 0x101000) & 0x0000003c) >> 2;
 }
 
 u8
@@ -39,9 +39,9 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 1 && bit_M.length >= 5)
-			return nv_ro08(bios, bit_M.offset + 2);
+			return nvbios_rd08(bios, bit_M.offset + 2);
 		if (bit_M.version == 2 && bit_M.length >= 3)
-			return nv_ro08(bios, bit_M.offset + 0);
+			return nvbios_rd08(bios, bit_M.offset + 0);
 	}
 
 	return 0x00;
@@ -50,7 +50,7 @@
 u8
 nvbios_ramcfg_index(struct nvkm_subdev *subdev)
 {
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	u8 strap = nvbios_ramcfg_strap(subdev);
 	u32 xlat = 0x00000000;
 	struct bit_entry bit_M;
@@ -59,7 +59,7 @@
 
 	if (!bit_entry(bios, 'M', &bit_M)) {
 		if (bit_M.version == 1 && bit_M.length >= 5)
-			xlat = nv_ro16(bios, bit_M.offset + 3);
+			xlat = nvbios_rd16(bios, bit_M.offset + 3);
 		if (bit_M.version == 2 && bit_M.length >= 3) {
 			/*XXX: is M ever shorter than this?
 			 *     if not - what is xlat used for now?
@@ -68,11 +68,11 @@
 			if (bit_M.length >= 7 &&
 			    nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
 				return M0203E.group;
-			xlat = nv_ro16(bios, bit_M.offset + 1);
+			xlat = nvbios_rd16(bios, bit_M.offset + 1);
 		}
 	}
 
 	if (xlat)
-		strap = nv_ro08(bios, xlat + strap);
+		strap = nvbios_rd08(bios, xlat + strap);
 	return strap;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index 8b17bb4..f0e1fc74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -34,18 +34,18 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			rammap = nv_ro16(bios, bit_P.offset + 4);
+			rammap = nvbios_rd16(bios, bit_P.offset + 4);
 
 		if (rammap) {
-			*ver = nv_ro08(bios, rammap + 0);
+			*ver = nvbios_rd08(bios, rammap + 0);
 			switch (*ver) {
 			case 0x10:
 			case 0x11:
-				*hdr = nv_ro08(bios, rammap + 1);
-				*cnt = nv_ro08(bios, rammap + 5);
-				*len = nv_ro08(bios, rammap + 2);
-				*snr = nv_ro08(bios, rammap + 4);
-				*ssz = nv_ro08(bios, rammap + 3);
+				*hdr = nvbios_rd08(bios, rammap + 1);
+				*cnt = nvbios_rd08(bios, rammap + 5);
+				*len = nvbios_rd08(bios, rammap + 2);
+				*snr = nvbios_rd08(bios, rammap + 4);
+				*ssz = nvbios_rd08(bios, rammap + 3);
 				return rammap;
 			default:
 				break;
@@ -72,6 +72,21 @@
 	return 0x0000;
 }
 
+/* Pretend a performance mode is also a rammap entry, helps coalesce entries
+ * later on */
+u32
+nvbios_rammapEp_from_perf(struct nvkm_bios *bios, u32 data, u8 size,
+		struct nvbios_ramcfg *p)
+{
+	memset(p, 0x00, sizeof(*p));
+
+	p->rammap_00_16_20 = (nvbios_rd08(bios, data + 0x16) & 0x20) >> 5;
+	p->rammap_00_16_40 = (nvbios_rd08(bios, data + 0x16) & 0x40) >> 6;
+	p->rammap_00_17_02 = (nvbios_rd08(bios, data + 0x17) & 0x02) >> 1;
+
+	return data;
+}
+
 u32
 nvbios_rammapEp(struct nvkm_bios *bios, int idx,
 		u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ramcfg *p)
@@ -82,18 +97,18 @@
 	p->rammap_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->rammap_min      =  nv_ro16(bios, data + 0x00);
-		p->rammap_max      =  nv_ro16(bios, data + 0x02);
-		p->rammap_10_04_02 = (nv_ro08(bios, data + 0x04) & 0x02) >> 1;
-		p->rammap_10_04_08 = (nv_ro08(bios, data + 0x04) & 0x08) >> 3;
+		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
+		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
+		p->rammap_10_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+		p->rammap_10_04_08 = (nvbios_rd08(bios, data + 0x04) & 0x08) >> 3;
 		break;
 	case 0x11:
-		p->rammap_min      =  nv_ro16(bios, data + 0x00);
-		p->rammap_max      =  nv_ro16(bios, data + 0x02);
-		p->rammap_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
-		p->rammap_11_08_0c = (nv_ro08(bios, data + 0x08) & 0x0c) >> 2;
-		p->rammap_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
-		temp = nv_ro32(bios, data + 0x09);
+		p->rammap_min      =  nvbios_rd16(bios, data + 0x00);
+		p->rammap_max      =  nvbios_rd16(bios, data + 0x02);
+		p->rammap_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+		p->rammap_11_08_0c = (nvbios_rd08(bios, data + 0x08) & 0x0c) >> 2;
+		p->rammap_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+		temp = nvbios_rd32(bios, data + 0x09);
 		p->rammap_11_09_01ff = (temp & 0x000001ff) >> 0;
 		p->rammap_11_0a_03fe = (temp & 0x0003fe00) >> 9;
 		p->rammap_11_0a_0400 = (temp & 0x00040000) >> 18;
@@ -102,10 +117,10 @@
 		p->rammap_11_0b_0200 = (temp & 0x02000000) >> 25;
 		p->rammap_11_0b_0400 = (temp & 0x04000000) >> 26;
 		p->rammap_11_0b_0800 = (temp & 0x08000000) >> 27;
-		p->rammap_11_0d    =  nv_ro08(bios, data + 0x0d);
-		p->rammap_11_0e    =  nv_ro08(bios, data + 0x0e);
-		p->rammap_11_0f    =  nv_ro08(bios, data + 0x0f);
-		p->rammap_11_11_0c = (nv_ro08(bios, data + 0x11) & 0x0c) >> 2;
+		p->rammap_11_0d    =  nvbios_rd08(bios, data + 0x0d);
+		p->rammap_11_0e    =  nvbios_rd08(bios, data + 0x0e);
+		p->rammap_11_0f    =  nvbios_rd08(bios, data + 0x0f);
+		p->rammap_11_11_0c = (nvbios_rd08(bios, data + 0x11) & 0x0c) >> 2;
 		break;
 	default:
 		data = 0;
@@ -141,6 +156,36 @@
 }
 
 u32
+nvbios_rammapSp_from_perf(struct nvkm_bios *bios, u32 data, u8 size, int idx,
+		struct nvbios_ramcfg *p)
+{
+	data += (idx * size);
+
+	if (size < 11)
+		return 0x00000000;
+
+	p->ramcfg_ver = 0;
+	p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
+	p->ramcfg_00_03_01 = (nvbios_rd08(bios, data + 0x03) & 0x01) >> 0;
+	p->ramcfg_00_03_02 = (nvbios_rd08(bios, data + 0x03) & 0x02) >> 1;
+	p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x03) & 0x04) >> 2;
+	p->ramcfg_00_03_08 = (nvbios_rd08(bios, data + 0x03) & 0x08) >> 3;
+	p->ramcfg_RON      = (nvbios_rd08(bios, data + 0x03) & 0x10) >> 3;
+	p->ramcfg_00_04_02 = (nvbios_rd08(bios, data + 0x04) & 0x02) >> 1;
+	p->ramcfg_00_04_04 = (nvbios_rd08(bios, data + 0x04) & 0x04) >> 2;
+	p->ramcfg_00_04_20 = (nvbios_rd08(bios, data + 0x04) & 0x20) >> 5;
+	p->ramcfg_00_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+	p->ramcfg_00_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+	p->ramcfg_00_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+	p->ramcfg_00_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+	p->ramcfg_00_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
+	p->ramcfg_00_0a_0f = (nvbios_rd08(bios, data + 0x0a) & 0x0f) >> 0;
+	p->ramcfg_00_0a_f0 = (nvbios_rd08(bios, data + 0x0a) & 0xf0) >> 4;
+
+	return data;
+}
+
+u32
 nvbios_rammapSp(struct nvkm_bios *bios, u32 data,
 		u8 ever, u8 ehdr, u8 ecnt, u8 elen, int idx,
 		u8 *ver, u8 *hdr, struct nvbios_ramcfg *p)
@@ -150,58 +195,58 @@
 	p->ramcfg_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->ramcfg_timing   =  nv_ro08(bios, data + 0x01);
-		p->ramcfg_10_02_01 = (nv_ro08(bios, data + 0x02) & 0x01) >> 0;
-		p->ramcfg_10_02_02 = (nv_ro08(bios, data + 0x02) & 0x02) >> 1;
-		p->ramcfg_10_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
-		p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
-		p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
-		p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
-		p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
-		p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
-		p->ramcfg_10_05    = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
-		p->ramcfg_10_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
-		p->ramcfg_10_07    = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
-		p->ramcfg_10_08    = (nv_ro08(bios, data + 0x08) & 0xff) >> 0;
-		p->ramcfg_10_09_0f = (nv_ro08(bios, data + 0x09) & 0x0f) >> 0;
-		p->ramcfg_10_09_f0 = (nv_ro08(bios, data + 0x09) & 0xf0) >> 4;
+		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x01);
+		p->ramcfg_10_02_01 = (nvbios_rd08(bios, data + 0x02) & 0x01) >> 0;
+		p->ramcfg_10_02_02 = (nvbios_rd08(bios, data + 0x02) & 0x02) >> 1;
+		p->ramcfg_10_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_10_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_10_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_10_02_20 = (nvbios_rd08(bios, data + 0x02) & 0x20) >> 5;
+		p->ramcfg_DLLoff   = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_10_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_10_04_01 = (nvbios_rd08(bios, data + 0x04) & 0x01) >> 0;
+		p->ramcfg_10_05    = (nvbios_rd08(bios, data + 0x05) & 0xff) >> 0;
+		p->ramcfg_10_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_10_07    = (nvbios_rd08(bios, data + 0x07) & 0xff) >> 0;
+		p->ramcfg_10_08    = (nvbios_rd08(bios, data + 0x08) & 0xff) >> 0;
+		p->ramcfg_10_09_0f = (nvbios_rd08(bios, data + 0x09) & 0x0f) >> 0;
+		p->ramcfg_10_09_f0 = (nvbios_rd08(bios, data + 0x09) & 0xf0) >> 4;
 		break;
 	case 0x11:
-		p->ramcfg_timing   =  nv_ro08(bios, data + 0x00);
-		p->ramcfg_11_01_01 = (nv_ro08(bios, data + 0x01) & 0x01) >> 0;
-		p->ramcfg_11_01_02 = (nv_ro08(bios, data + 0x01) & 0x02) >> 1;
-		p->ramcfg_11_01_04 = (nv_ro08(bios, data + 0x01) & 0x04) >> 2;
-		p->ramcfg_11_01_08 = (nv_ro08(bios, data + 0x01) & 0x08) >> 3;
-		p->ramcfg_11_01_10 = (nv_ro08(bios, data + 0x01) & 0x10) >> 4;
-		p->ramcfg_11_01_20 = (nv_ro08(bios, data + 0x01) & 0x20) >> 5;
-		p->ramcfg_11_01_40 = (nv_ro08(bios, data + 0x01) & 0x40) >> 6;
-		p->ramcfg_11_01_80 = (nv_ro08(bios, data + 0x01) & 0x80) >> 7;
-		p->ramcfg_11_02_03 = (nv_ro08(bios, data + 0x02) & 0x03) >> 0;
-		p->ramcfg_11_02_04 = (nv_ro08(bios, data + 0x02) & 0x04) >> 2;
-		p->ramcfg_11_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
-		p->ramcfg_11_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
-		p->ramcfg_11_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
-		p->ramcfg_11_02_80 = (nv_ro08(bios, data + 0x02) & 0x80) >> 7;
-		p->ramcfg_11_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
-		p->ramcfg_11_03_30 = (nv_ro08(bios, data + 0x03) & 0x30) >> 4;
-		p->ramcfg_11_03_c0 = (nv_ro08(bios, data + 0x03) & 0xc0) >> 6;
-		p->ramcfg_11_03_f0 = (nv_ro08(bios, data + 0x03) & 0xf0) >> 4;
-		p->ramcfg_11_04    = (nv_ro08(bios, data + 0x04) & 0xff) >> 0;
-		p->ramcfg_11_06    = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
-		p->ramcfg_11_07_02 = (nv_ro08(bios, data + 0x07) & 0x02) >> 1;
-		p->ramcfg_11_07_04 = (nv_ro08(bios, data + 0x07) & 0x04) >> 2;
-		p->ramcfg_11_07_08 = (nv_ro08(bios, data + 0x07) & 0x08) >> 3;
-		p->ramcfg_11_07_10 = (nv_ro08(bios, data + 0x07) & 0x10) >> 4;
-		p->ramcfg_11_07_40 = (nv_ro08(bios, data + 0x07) & 0x40) >> 6;
-		p->ramcfg_11_07_80 = (nv_ro08(bios, data + 0x07) & 0x80) >> 7;
-		p->ramcfg_11_08_01 = (nv_ro08(bios, data + 0x08) & 0x01) >> 0;
-		p->ramcfg_11_08_02 = (nv_ro08(bios, data + 0x08) & 0x02) >> 1;
-		p->ramcfg_11_08_04 = (nv_ro08(bios, data + 0x08) & 0x04) >> 2;
-		p->ramcfg_11_08_08 = (nv_ro08(bios, data + 0x08) & 0x08) >> 3;
-		p->ramcfg_11_08_10 = (nv_ro08(bios, data + 0x08) & 0x10) >> 4;
-		p->ramcfg_11_08_20 = (nv_ro08(bios, data + 0x08) & 0x20) >> 5;
-		p->ramcfg_11_09    = (nv_ro08(bios, data + 0x09) & 0xff) >> 0;
+		p->ramcfg_timing   =  nvbios_rd08(bios, data + 0x00);
+		p->ramcfg_11_01_01 = (nvbios_rd08(bios, data + 0x01) & 0x01) >> 0;
+		p->ramcfg_11_01_02 = (nvbios_rd08(bios, data + 0x01) & 0x02) >> 1;
+		p->ramcfg_11_01_04 = (nvbios_rd08(bios, data + 0x01) & 0x04) >> 2;
+		p->ramcfg_11_01_08 = (nvbios_rd08(bios, data + 0x01) & 0x08) >> 3;
+		p->ramcfg_11_01_10 = (nvbios_rd08(bios, data + 0x01) & 0x10) >> 4;
+		p->ramcfg_11_01_20 = (nvbios_rd08(bios, data + 0x01) & 0x20) >> 5;
+		p->ramcfg_11_01_40 = (nvbios_rd08(bios, data + 0x01) & 0x40) >> 6;
+		p->ramcfg_11_01_80 = (nvbios_rd08(bios, data + 0x01) & 0x80) >> 7;
+		p->ramcfg_11_02_03 = (nvbios_rd08(bios, data + 0x02) & 0x03) >> 0;
+		p->ramcfg_11_02_04 = (nvbios_rd08(bios, data + 0x02) & 0x04) >> 2;
+		p->ramcfg_11_02_08 = (nvbios_rd08(bios, data + 0x02) & 0x08) >> 3;
+		p->ramcfg_11_02_10 = (nvbios_rd08(bios, data + 0x02) & 0x10) >> 4;
+		p->ramcfg_11_02_40 = (nvbios_rd08(bios, data + 0x02) & 0x40) >> 6;
+		p->ramcfg_11_02_80 = (nvbios_rd08(bios, data + 0x02) & 0x80) >> 7;
+		p->ramcfg_11_03_0f = (nvbios_rd08(bios, data + 0x03) & 0x0f) >> 0;
+		p->ramcfg_11_03_30 = (nvbios_rd08(bios, data + 0x03) & 0x30) >> 4;
+		p->ramcfg_11_03_c0 = (nvbios_rd08(bios, data + 0x03) & 0xc0) >> 6;
+		p->ramcfg_11_03_f0 = (nvbios_rd08(bios, data + 0x03) & 0xf0) >> 4;
+		p->ramcfg_11_04    = (nvbios_rd08(bios, data + 0x04) & 0xff) >> 0;
+		p->ramcfg_11_06    = (nvbios_rd08(bios, data + 0x06) & 0xff) >> 0;
+		p->ramcfg_11_07_02 = (nvbios_rd08(bios, data + 0x07) & 0x02) >> 1;
+		p->ramcfg_11_07_04 = (nvbios_rd08(bios, data + 0x07) & 0x04) >> 2;
+		p->ramcfg_11_07_08 = (nvbios_rd08(bios, data + 0x07) & 0x08) >> 3;
+		p->ramcfg_11_07_10 = (nvbios_rd08(bios, data + 0x07) & 0x10) >> 4;
+		p->ramcfg_11_07_40 = (nvbios_rd08(bios, data + 0x07) & 0x40) >> 6;
+		p->ramcfg_11_07_80 = (nvbios_rd08(bios, data + 0x07) & 0x80) >> 7;
+		p->ramcfg_11_08_01 = (nvbios_rd08(bios, data + 0x08) & 0x01) >> 0;
+		p->ramcfg_11_08_02 = (nvbios_rd08(bios, data + 0x08) & 0x02) >> 1;
+		p->ramcfg_11_08_04 = (nvbios_rd08(bios, data + 0x08) & 0x04) >> 2;
+		p->ramcfg_11_08_08 = (nvbios_rd08(bios, data + 0x08) & 0x08) >> 3;
+		p->ramcfg_11_08_10 = (nvbios_rd08(bios, data + 0x08) & 0x10) >> 4;
+		p->ramcfg_11_08_20 = (nvbios_rd08(bios, data + 0x08) & 0x20) >> 5;
+		p->ramcfg_11_09    = (nvbios_rd08(bios, data + 0x09) & 0xff) >> 0;
 		break;
 	default:
 		data = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 8c2b7cb..792f017 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -23,13 +23,11 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/image.h>
 
 struct shadow {
-	struct nvkm_oclass base;
 	u32 skip;
 	const struct nvbios_source *func;
 	void *data;
@@ -38,9 +36,8 @@
 };
 
 static bool
-shadow_fetch(struct nvkm_bios *bios, u32 upto)
+shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
 {
-	struct shadow *mthd = (void *)nv_object(bios)->oclass;
 	const u32 limit = (upto + 3) & ~3;
 	const u32 start = bios->size;
 	void *data = mthd->data;
@@ -51,65 +48,35 @@
 	return bios->size >= limit;
 }
 
-static u8
-shadow_rd08(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 1))
-		return bios->data[addr];
-	return 0x00;
-}
-
-static u16
-shadow_rd16(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 2))
-		return get_unaligned_le16(&bios->data[addr]);
-	return 0x0000;
-}
-
-static u32
-shadow_rd32(struct nvkm_object *object, u64 addr)
-{
-	struct nvkm_bios *bios = (void *)object;
-	if (shadow_fetch(bios, addr + 4))
-		return get_unaligned_le32(&bios->data[addr]);
-	return 0x00000000;
-}
-
-static struct nvkm_oclass
-shadow_class = {
-	.handle = NV_SUBDEV(VBIOS, 0x00),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.rd08 = shadow_rd08,
-		.rd16 = shadow_rd16,
-		.rd32 = shadow_rd32,
-	},
-};
-
 static int
-shadow_image(struct nvkm_bios *bios, int idx, struct shadow *mthd)
+shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
 	struct nvbios_image image;
 	int score = 1;
 
-	if (!nvbios_image(bios, idx, &image)) {
-		nv_debug(bios, "image %d invalid\n", idx);
+	if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+		nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
 		return 0;
 	}
-	nv_debug(bios, "%08x: type %02x, %d bytes\n",
-		 image.base, image.type, image.size);
 
-	if (!shadow_fetch(bios, image.size)) {
-		nv_debug(bios, "%08x: fetch failed\n", image.base);
+	if (!nvbios_image(bios, idx, &image)) {
+		nvkm_debug(subdev, "image %d invalid\n", idx);
+		return 0;
+	}
+	nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
+		   image.base, image.type, image.size);
+
+	if (!shadow_fetch(bios, mthd, image.size)) {
+		nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
 		return 0;
 	}
 
 	switch (image.type) {
 	case 0x00:
 		if (nvbios_checksum(&bios->data[image.base], image.size)) {
-			nv_debug(bios, "%08x: checksum failed\n", image.base);
+			nvkm_debug(subdev, "%08x: checksum failed\n",
+				   image.base);
 			if (mthd->func->rw)
 				score += 1;
 			score += 1;
@@ -123,28 +90,17 @@
 	}
 
 	if (!image.last)
-		score += shadow_image(bios, idx + 1, mthd);
+		score += shadow_image(bios, idx + 1, offset + image.size, mthd);
 	return score;
 }
 
 static int
-shadow_score(struct nvkm_bios *bios, struct shadow *mthd)
-{
-	struct nvkm_oclass *oclass = nv_object(bios)->oclass;
-	int score;
-	nv_object(bios)->oclass = &mthd->base;
-	score = shadow_image(bios, 0, mthd);
-	nv_object(bios)->oclass = oclass;
-	return score;
-
-}
-
-static int
 shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name)
 {
 	const struct nvbios_source *func = mthd->func;
+	struct nvkm_subdev *subdev = &bios->subdev;
 	if (func->name) {
-		nv_debug(bios, "trying %s...\n", name ? name : func->name);
+		nvkm_debug(subdev, "trying %s...\n", name ? name : func->name);
 		if (func->init) {
 			mthd->data = func->init(bios, name);
 			if (IS_ERR(mthd->data)) {
@@ -152,10 +108,10 @@
 				return 0;
 			}
 		}
-		mthd->score = shadow_score(bios, mthd);
+		mthd->score = shadow_image(bios, 0, 0, mthd);
 		if (func->fini)
 			func->fini(mthd->data);
-		nv_debug(bios, "scored %d\n", mthd->score);
+		nvkm_debug(subdev, "scored %d\n", mthd->score);
 		mthd->data = bios->data;
 		mthd->size = bios->size;
 		bios->data  = NULL;
@@ -178,7 +134,7 @@
 static void *
 shadow_fw_init(struct nvkm_bios *bios, const char *name)
 {
-	struct device *dev = &nv_device(bios)->pdev->dev;
+	struct device *dev = bios->subdev.device->dev;
 	const struct firmware *fw;
 	int ret = request_firmware(&fw, name, dev);
 	if (ret)
@@ -198,22 +154,24 @@
 int
 nvbios_shadow(struct nvkm_bios *bios)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct shadow mthds[] = {
-		{ shadow_class, 0, &nvbios_of },
-		{ shadow_class, 0, &nvbios_ramin },
-		{ shadow_class, 0, &nvbios_rom },
-		{ shadow_class, 0, &nvbios_acpi_fast },
-		{ shadow_class, 4, &nvbios_acpi_slow },
-		{ shadow_class, 1, &nvbios_pcirom },
-		{ shadow_class, 1, &nvbios_platform },
-		{ shadow_class }
-	}, *mthd = mthds, *best = NULL;
+		{ 0, &nvbios_of },
+		{ 0, &nvbios_ramin },
+		{ 0, &nvbios_rom },
+		{ 0, &nvbios_acpi_fast },
+		{ 4, &nvbios_acpi_slow },
+		{ 1, &nvbios_pcirom },
+		{ 1, &nvbios_platform },
+		{}
+	}, *mthd, *best = NULL;
 	const char *optarg;
 	char *source;
 	int optlen;
 
 	/* handle user-specified bios source */
-	optarg = nvkm_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+	optarg = nvkm_stropt(device->cfgopt, "NvBios", &optlen);
 	source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
 	if (source) {
 		/* try to match one of the built-in methods */
@@ -234,7 +192,7 @@
 		}
 
 		if (!best->score) {
-			nv_error(bios, "%s invalid\n", source);
+			nvkm_error(subdev, "%s invalid\n", source);
 			kfree(source);
 			source = NULL;
 		}
@@ -259,12 +217,12 @@
 	}
 
 	if (!best->score) {
-		nv_fatal(bios, "unable to locate usable image\n");
+		nvkm_error(subdev, "unable to locate usable image\n");
 		return -EINVAL;
 	}
 
-	nv_info(bios, "using image from %s\n", best->func ?
-		best->func->name : source);
+	nvkm_debug(subdev, "using image from %s\n", best->func ?
+		   best->func->name : source);
 	bios->data = best->data;
 	bios->size = best->size;
 	kfree(source);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index f9d0eb5..8fecb5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,14 +22,12 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
 #if defined(CONFIG_ACPI) && defined(CONFIG_X86)
 int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
 #else
 static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev)
+nouveau_acpi_rom_supported(struct device *dev)
 {
 	return false;
 }
@@ -90,7 +88,7 @@
 static void *
 acpi_init(struct nvkm_bios *bios, const char *name)
 {
-	if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
+	if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
 		return ERR_PTR(-ENODEV);
 	return NULL;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4c19a7d..bd60d7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -21,8 +21,7 @@
  *
  */
 #include "priv.h"
-
-#include <core/device.h>
+#include <core/pci.h>
 
 #if defined(__powerpc__)
 struct priv {
@@ -44,7 +43,7 @@
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
 	struct device_node *dn;
 	struct priv *priv;
 	if (!(dn = pci_device_to_OF_node(pdev)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
index 1b04548..9b91da0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
@@ -22,7 +22,7 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+#include <core/pci.h>
 
 struct priv {
 	struct pci_dev *pdev;
@@ -53,10 +53,16 @@
 static void *
 pcirom_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
 	struct priv *priv = NULL;
+	struct pci_dev *pdev;
 	int ret;
 
+	if (device->func->pci)
+		pdev = device->func->pci(device)->pdev;
+	else
+		return ERR_PTR(-ENODEV);
+
 	if (!(ret = pci_enable_rom(pdev))) {
 		if (ret = -ENOMEM,
 		    (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
@@ -85,10 +91,16 @@
 static void *
 platform_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
+	struct pci_dev *pdev;
 	struct priv *priv;
 	int ret = -ENOMEM;
 
+	if (device->func->pci)
+		pdev = device->func->pci(device)->pdev;
+	else
+		return ERR_PTR(-ENODEV);
+
 	if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
 		if (ret = -ENODEV,
 		    (priv->rom = pci_platform_rom(pdev, &priv->size)))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
index abe8ae4..0f537c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
@@ -22,8 +22,6 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
 struct priv {
 	struct nvkm_bios *bios;
 	u32 bar0;
@@ -32,10 +30,11 @@
 static u32
 pramin_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
+	struct nvkm_device *device = bios->subdev.device;
 	u32 i;
 	if (offset + length <= 0x00100000) {
 		for (i = offset; i < offset + length; i += 4)
-			*(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
+			*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x700000 + i);
 		return length;
 	}
 	return 0;
@@ -46,7 +45,8 @@
 {
 	struct priv *priv = data;
 	if (priv) {
-		nv_wr32(priv->bios, 0x001700, priv->bar0);
+		struct nvkm_device *device = priv->bios->subdev.device;
+		nvkm_wr32(device, 0x001700, priv->bar0);
 		kfree(priv);
 	}
 }
@@ -54,21 +54,23 @@
 static void *
 pramin_init(struct nvkm_bios *bios, const char *name)
 {
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct priv *priv = NULL;
 	u64 addr = 0;
 
 	/* PRAMIN always potentially available prior to nv50 */
-	if (nv_device(bios)->card_type < NV_50)
+	if (device->card_type < NV_50)
 		return NULL;
 
 	/* we can't get the bios image pointer without PDISP */
-	if (nv_device(bios)->card_type >= GM100)
-		addr = nv_rd32(bios, 0x021c04);
+	if (device->card_type >= GM100)
+		addr = nvkm_rd32(device, 0x021c04);
 	else
-	if (nv_device(bios)->card_type >= NV_C0)
-		addr = nv_rd32(bios, 0x022500);
+	if (device->card_type >= NV_C0)
+		addr = nvkm_rd32(device, 0x022500);
 	if (addr & 0x00000001) {
-		nv_debug(bios, "... display disabled\n");
+		nvkm_debug(subdev, "... display disabled\n");
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -76,32 +78,32 @@
 	 * important as we don't want to be touching vram on an
 	 * uninitialised board
 	 */
-	addr = nv_rd32(bios, 0x619f04);
+	addr = nvkm_rd32(device, 0x619f04);
 	if (!(addr & 0x00000008)) {
-		nv_debug(bios, "... not enabled\n");
+		nvkm_debug(subdev, "... not enabled\n");
 		return ERR_PTR(-ENODEV);
 	}
 	if ( (addr & 0x00000003) != 1) {
-		nv_debug(bios, "... not in vram\n");
+		nvkm_debug(subdev, "... not in vram\n");
 		return ERR_PTR(-ENODEV);
 	}
 
 	/* some alternate method inherited from xf86-video-nv... */
 	addr = (addr & 0xffffff00) << 8;
 	if (!addr) {
-		addr  = (u64)nv_rd32(bios, 0x001700) << 16;
+		addr  = (u64)nvkm_rd32(device, 0x001700) << 16;
 		addr += 0xf0000;
 	}
 
 	/* modify bar0 PRAMIN window to cover the bios image */
 	if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
-		nv_error(bios, "... out of memory\n");
+		nvkm_error(subdev, "... out of memory\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
 	priv->bios = bios;
-	priv->bar0 = nv_rd32(bios, 0x001700);
-	nv_wr32(bios, 0x001700, addr >> 16);
+	priv->bar0 = nvkm_rd32(device, 0x001700);
+	nvkm_wr32(device, 0x001700, addr >> 16);
 	return priv;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
index 6ec3b23..ffa4b39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowrom.c
@@ -22,15 +22,16 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+#include <subdev/pci.h>
 
 static u32
 prom_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
+	struct nvkm_device *device = data;
 	u32 i;
 	if (offset + length <= 0x00100000) {
 		for (i = offset; i < offset + length; i += 4)
-			*(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
+			*(u32 *)&bios->data[i] = nvkm_rd32(device, 0x300000 + i);
 		return length;
 	}
 	return 0;
@@ -39,25 +40,18 @@
 static void
 prom_fini(void *data)
 {
-	struct nvkm_bios *bios = data;
-	if (nv_device(bios)->card_type < NV_50)
-		nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
-	else
-		nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
+	struct nvkm_device *device = data;
+	nvkm_pci_rom_shadow(device->pci, true);
 }
 
 static void *
 prom_init(struct nvkm_bios *bios, const char *name)
 {
-	if (nv_device(bios)->card_type < NV_50) {
-		if (nv_device(bios)->card_type == NV_40 &&
-		    nv_device(bios)->chipset >= 0x4c)
-			return ERR_PTR(-ENODEV);
-		nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
-	} else {
-		nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
-	}
-	return bios;
+	struct nvkm_device *device = bios->subdev.device;
+	if (device->card_type == NV_40 && device->chipset >= 0x4c)
+		return ERR_PTR(-ENODEV);
+	nvkm_pci_rom_shadow(device->pci, false);
+	return device;
 }
 
 const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
index 249ff6d..a54cfec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/therm.c
@@ -25,8 +25,6 @@
 #include <subdev/bios/bit.h>
 #include <subdev/bios/therm.h>
 
-#include <core/device.h>
-
 static u16
 therm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
 {
@@ -35,24 +33,24 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 1)
-			therm = nv_ro16(bios, bit_P.offset + 12);
+			therm = nvbios_rd16(bios, bit_P.offset + 12);
 		else if (bit_P.version == 2)
-			therm = nv_ro16(bios, bit_P.offset + 16);
+			therm = nvbios_rd16(bios, bit_P.offset + 16);
 		else
-			nv_error(bios,
-				"unknown offset for thermal in BIT P %d\n",
-				bit_P.version);
+			nvkm_error(&bios->subdev,
+				   "unknown offset for thermal in BIT P %d\n",
+				   bit_P.version);
 	}
 
 	/* exit now if we haven't found the thermal table */
 	if (!therm)
 		return 0x0000;
 
-	*ver = nv_ro08(bios, therm + 0);
-	*hdr = nv_ro08(bios, therm + 1);
-	*len = nv_ro08(bios, therm + 2);
-	*cnt = nv_ro08(bios, therm + 3);
-	return therm + nv_ro08(bios, therm + 1);
+	*ver = nvbios_rd08(bios, therm + 0);
+	*hdr = nvbios_rd08(bios, therm + 1);
+	*len = nvbios_rd08(bios, therm + 2);
+	*cnt = nvbios_rd08(bios, therm + 3);
+	return therm + nvbios_rd08(bios, therm + 1);
 }
 
 static u16
@@ -83,9 +81,9 @@
 	sensor_section = -1;
 	i = 0;
 	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
-		s16 value = nv_ro16(bios, entry + 1);
+		s16 value = nvbios_rd16(bios, entry + 1);
 
-		switch (nv_ro08(bios, entry + 0)) {
+		switch (nvbios_rd08(bios, entry + 0)) {
 		case 0x0:
 			thrs_section = value;
 			if (value > 0)
@@ -94,7 +92,7 @@
 		case 0x01:
 			sensor_section++;
 			if (sensor_section == 0) {
-				offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
+				offset = ((s8) nvbios_rd08(bios, entry + 2)) / 2;
 				sensor->offset_constant = offset;
 			}
 			break;
@@ -165,9 +163,9 @@
 	fan->nr_fan_trip = 0;
 	fan->fan_mode = NVBIOS_THERM_FAN_OTHER;
 	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
-		s16 value = nv_ro16(bios, entry + 1);
+		s16 value = nvbios_rd16(bios, entry + 1);
 
-		switch (nv_ro08(bios, entry + 0)) {
+		switch (nvbios_rd08(bios, entry + 0)) {
 		case 0x22:
 			fan->min_duty = value & 0xff;
 			fan->max_duty = (value & 0xff00) >> 8;
@@ -198,14 +196,14 @@
 		case 0x46:
 			if (fan->fan_mode > NVBIOS_THERM_FAN_LINEAR)
 				fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
-			fan->linear_min_temp = nv_ro08(bios, entry + 1);
-			fan->linear_max_temp = nv_ro08(bios, entry + 2);
+			fan->linear_min_temp = nvbios_rd08(bios, entry + 1);
+			fan->linear_max_temp = nvbios_rd08(bios, entry + 2);
 			break;
 		}
 	}
 
 	/* starting from fermi, fan management is always linear */
-	if (nv_device(bios)->card_type >= NV_C0 &&
+	if (bios->subdev.device->card_type >= NV_C0 &&
 		fan->fan_mode == NVBIOS_THERM_FAN_OTHER) {
 		fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 763fd29..99f6432 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -34,27 +34,27 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 1)
-			timing = nv_ro16(bios, bit_P.offset + 4);
+			timing = nvbios_rd16(bios, bit_P.offset + 4);
 		else
 		if (bit_P.version == 2)
-			timing = nv_ro16(bios, bit_P.offset + 8);
+			timing = nvbios_rd16(bios, bit_P.offset + 8);
 
 		if (timing) {
-			*ver = nv_ro08(bios, timing + 0);
+			*ver = nvbios_rd08(bios, timing + 0);
 			switch (*ver) {
 			case 0x10:
-				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 2);
-				*len = nv_ro08(bios, timing + 3);
+				*hdr = nvbios_rd08(bios, timing + 1);
+				*cnt = nvbios_rd08(bios, timing + 2);
+				*len = nvbios_rd08(bios, timing + 3);
 				*snr = 0;
 				*ssz = 0;
 				return timing;
 			case 0x20:
-				*hdr = nv_ro08(bios, timing + 1);
-				*cnt = nv_ro08(bios, timing + 5);
-				*len = nv_ro08(bios, timing + 2);
-				*snr = nv_ro08(bios, timing + 4);
-				*ssz = nv_ro08(bios, timing + 3);
+				*hdr = nvbios_rd08(bios, timing + 1);
+				*cnt = nvbios_rd08(bios, timing + 5);
+				*len = nvbios_rd08(bios, timing + 2);
+				*snr = nvbios_rd08(bios, timing + 4);
+				*ssz = nvbios_rd08(bios, timing + 3);
 				return timing;
 			default:
 				break;
@@ -90,18 +90,20 @@
 	p->timing_hdr = *hdr;
 	switch (!!data * *ver) {
 	case 0x10:
-		p->timing_10_WR    = nv_ro08(bios, data + 0x00);
-		p->timing_10_WTR   = nv_ro08(bios, data + 0x01);
-		p->timing_10_CL    = nv_ro08(bios, data + 0x02);
-		p->timing_10_RC    = nv_ro08(bios, data + 0x03);
-		p->timing_10_RFC   = nv_ro08(bios, data + 0x05);
-		p->timing_10_RAS   = nv_ro08(bios, data + 0x07);
-		p->timing_10_RP    = nv_ro08(bios, data + 0x09);
-		p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
-		p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
-		p->timing_10_RRD   = nv_ro08(bios, data + 0x0c);
-		p->timing_10_13    = nv_ro08(bios, data + 0x0d);
-		p->timing_10_ODT   = nv_ro08(bios, data + 0x0e) & 0x07;
+		p->timing_10_WR    = nvbios_rd08(bios, data + 0x00);
+		p->timing_10_WTR   = nvbios_rd08(bios, data + 0x01);
+		p->timing_10_CL    = nvbios_rd08(bios, data + 0x02);
+		p->timing_10_RC    = nvbios_rd08(bios, data + 0x03);
+		p->timing_10_RFC   = nvbios_rd08(bios, data + 0x05);
+		p->timing_10_RAS   = nvbios_rd08(bios, data + 0x07);
+		p->timing_10_RP    = nvbios_rd08(bios, data + 0x09);
+		p->timing_10_RCDRD = nvbios_rd08(bios, data + 0x0a);
+		p->timing_10_RCDWR = nvbios_rd08(bios, data + 0x0b);
+		p->timing_10_RRD   = nvbios_rd08(bios, data + 0x0c);
+		p->timing_10_13    = nvbios_rd08(bios, data + 0x0d);
+		p->timing_10_ODT   = nvbios_rd08(bios, data + 0x0e) & 0x07;
+		if (p->ramcfg_ver >= 0x10)
+			p->ramcfg_RON = nvbios_rd08(bios, data + 0x0e) & 0x07;
 
 		p->timing_10_24  = 0xff;
 		p->timing_10_21  = 0;
@@ -112,45 +114,45 @@
 
 		switch (min_t(u8, *hdr, 25)) {
 		case 25:
-			p->timing_10_24  = nv_ro08(bios, data + 0x18);
+			p->timing_10_24  = nvbios_rd08(bios, data + 0x18);
 		case 24:
 		case 23:
 		case 22:
-			p->timing_10_21  = nv_ro08(bios, data + 0x15);
+			p->timing_10_21  = nvbios_rd08(bios, data + 0x15);
 		case 21:
-			p->timing_10_20  = nv_ro08(bios, data + 0x14);
+			p->timing_10_20  = nvbios_rd08(bios, data + 0x14);
 		case 20:
-			p->timing_10_CWL = nv_ro08(bios, data + 0x13);
+			p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
 		case 19:
-			p->timing_10_18  = nv_ro08(bios, data + 0x12);
+			p->timing_10_18  = nvbios_rd08(bios, data + 0x12);
 		case 18:
 		case 17:
-			p->timing_10_16  = nv_ro08(bios, data + 0x10);
+			p->timing_10_16  = nvbios_rd08(bios, data + 0x10);
 		}
 
 		break;
 	case 0x20:
-		p->timing[0] = nv_ro32(bios, data + 0x00);
-		p->timing[1] = nv_ro32(bios, data + 0x04);
-		p->timing[2] = nv_ro32(bios, data + 0x08);
-		p->timing[3] = nv_ro32(bios, data + 0x0c);
-		p->timing[4] = nv_ro32(bios, data + 0x10);
-		p->timing[5] = nv_ro32(bios, data + 0x14);
-		p->timing[6] = nv_ro32(bios, data + 0x18);
-		p->timing[7] = nv_ro32(bios, data + 0x1c);
-		p->timing[8] = nv_ro32(bios, data + 0x20);
-		p->timing[9] = nv_ro32(bios, data + 0x24);
-		p->timing[10] = nv_ro32(bios, data + 0x28);
-		p->timing_20_2e_03 = (nv_ro08(bios, data + 0x2e) & 0x03) >> 0;
-		p->timing_20_2e_30 = (nv_ro08(bios, data + 0x2e) & 0x30) >> 4;
-		p->timing_20_2e_c0 = (nv_ro08(bios, data + 0x2e) & 0xc0) >> 6;
-		p->timing_20_2f_03 = (nv_ro08(bios, data + 0x2f) & 0x03) >> 0;
-		temp = nv_ro16(bios, data + 0x2c);
+		p->timing[0] = nvbios_rd32(bios, data + 0x00);
+		p->timing[1] = nvbios_rd32(bios, data + 0x04);
+		p->timing[2] = nvbios_rd32(bios, data + 0x08);
+		p->timing[3] = nvbios_rd32(bios, data + 0x0c);
+		p->timing[4] = nvbios_rd32(bios, data + 0x10);
+		p->timing[5] = nvbios_rd32(bios, data + 0x14);
+		p->timing[6] = nvbios_rd32(bios, data + 0x18);
+		p->timing[7] = nvbios_rd32(bios, data + 0x1c);
+		p->timing[8] = nvbios_rd32(bios, data + 0x20);
+		p->timing[9] = nvbios_rd32(bios, data + 0x24);
+		p->timing[10] = nvbios_rd32(bios, data + 0x28);
+		p->timing_20_2e_03 = (nvbios_rd08(bios, data + 0x2e) & 0x03) >> 0;
+		p->timing_20_2e_30 = (nvbios_rd08(bios, data + 0x2e) & 0x30) >> 4;
+		p->timing_20_2e_c0 = (nvbios_rd08(bios, data + 0x2e) & 0xc0) >> 6;
+		p->timing_20_2f_03 = (nvbios_rd08(bios, data + 0x2f) & 0x03) >> 0;
+		temp = nvbios_rd16(bios, data + 0x2c);
 		p->timing_20_2c_003f = (temp & 0x003f) >> 0;
 		p->timing_20_2c_1fc0 = (temp & 0x1fc0) >> 6;
-		p->timing_20_30_07 = (nv_ro08(bios, data + 0x30) & 0x07) >> 0;
-		p->timing_20_30_f8 = (nv_ro08(bios, data + 0x30) & 0xf8) >> 3;
-		temp = nv_ro16(bios, data + 0x31);
+		p->timing_20_30_07 = (nvbios_rd08(bios, data + 0x30) & 0x07) >> 0;
+		p->timing_20_30_f8 = (nvbios_rd08(bios, data + 0x30) & 0xf8) >> 3;
+		temp = nvbios_rd16(bios, data + 0x31);
 		p->timing_20_31_0007 = (temp & 0x0007) >> 0;
 		p->timing_20_31_0078 = (temp & 0x0078) >> 3;
 		p->timing_20_31_0780 = (temp & 0x0780) >> 7;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
index e95b69f..2f13db7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vmap.c
@@ -33,15 +33,15 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2) {
-			vmap = nv_ro16(bios, bit_P.offset + 0x20);
+			vmap = nvbios_rd16(bios, bit_P.offset + 0x20);
 			if (vmap) {
-				*ver = nv_ro08(bios, vmap + 0);
+				*ver = nvbios_rd08(bios, vmap + 0);
 				switch (*ver) {
 				case 0x10:
 				case 0x20:
-					*hdr = nv_ro08(bios, vmap + 1);
-					*cnt = nv_ro08(bios, vmap + 3);
-					*len = nv_ro08(bios, vmap + 2);
+					*hdr = nvbios_rd08(bios, vmap + 1);
+					*cnt = nvbios_rd08(bios, vmap + 3);
+					*len = nvbios_rd08(bios, vmap + 2);
 					return vmap;
 				default:
 					break;
@@ -88,23 +88,23 @@
 	switch (!!vmap * *ver) {
 	case 0x10:
 		info->link   = 0xff;
-		info->min    = nv_ro32(bios, vmap + 0x00);
-		info->max    = nv_ro32(bios, vmap + 0x04);
-		info->arg[0] = nv_ro32(bios, vmap + 0x08);
-		info->arg[1] = nv_ro32(bios, vmap + 0x0c);
-		info->arg[2] = nv_ro32(bios, vmap + 0x10);
+		info->min    = nvbios_rd32(bios, vmap + 0x00);
+		info->max    = nvbios_rd32(bios, vmap + 0x04);
+		info->arg[0] = nvbios_rd32(bios, vmap + 0x08);
+		info->arg[1] = nvbios_rd32(bios, vmap + 0x0c);
+		info->arg[2] = nvbios_rd32(bios, vmap + 0x10);
 		break;
 	case 0x20:
-		info->unk0   = nv_ro08(bios, vmap + 0x00);
-		info->link   = nv_ro08(bios, vmap + 0x01);
-		info->min    = nv_ro32(bios, vmap + 0x02);
-		info->max    = nv_ro32(bios, vmap + 0x06);
-		info->arg[0] = nv_ro32(bios, vmap + 0x0a);
-		info->arg[1] = nv_ro32(bios, vmap + 0x0e);
-		info->arg[2] = nv_ro32(bios, vmap + 0x12);
-		info->arg[3] = nv_ro32(bios, vmap + 0x16);
-		info->arg[4] = nv_ro32(bios, vmap + 0x1a);
-		info->arg[5] = nv_ro32(bios, vmap + 0x1e);
+		info->unk0   = nvbios_rd08(bios, vmap + 0x00);
+		info->link   = nvbios_rd08(bios, vmap + 0x01);
+		info->min    = nvbios_rd32(bios, vmap + 0x02);
+		info->max    = nvbios_rd32(bios, vmap + 0x06);
+		info->arg[0] = nvbios_rd32(bios, vmap + 0x0a);
+		info->arg[1] = nvbios_rd32(bios, vmap + 0x0e);
+		info->arg[2] = nvbios_rd32(bios, vmap + 0x12);
+		info->arg[3] = nvbios_rd32(bios, vmap + 0x16);
+		info->arg[4] = nvbios_rd32(bios, vmap + 0x1a);
+		info->arg[5] = nvbios_rd32(bios, vmap + 0x1e);
 		break;
 	}
 	return vmap;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
index 8454ab7..615804c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
@@ -33,30 +33,30 @@
 
 	if (!bit_entry(bios, 'P', &bit_P)) {
 		if (bit_P.version == 2)
-			volt = nv_ro16(bios, bit_P.offset + 0x0c);
+			volt = nvbios_rd16(bios, bit_P.offset + 0x0c);
 		else
 		if (bit_P.version == 1)
-			volt = nv_ro16(bios, bit_P.offset + 0x10);
+			volt = nvbios_rd16(bios, bit_P.offset + 0x10);
 
 		if (volt) {
-			*ver = nv_ro08(bios, volt + 0);
+			*ver = nvbios_rd08(bios, volt + 0);
 			switch (*ver) {
 			case 0x12:
 				*hdr = 5;
-				*cnt = nv_ro08(bios, volt + 2);
-				*len = nv_ro08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 2);
+				*len = nvbios_rd08(bios, volt + 1);
 				return volt;
 			case 0x20:
-				*hdr = nv_ro08(bios, volt + 1);
-				*cnt = nv_ro08(bios, volt + 2);
-				*len = nv_ro08(bios, volt + 3);
+				*hdr = nvbios_rd08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 2);
+				*len = nvbios_rd08(bios, volt + 3);
 				return volt;
 			case 0x30:
 			case 0x40:
 			case 0x50:
-				*hdr = nv_ro08(bios, volt + 1);
-				*cnt = nv_ro08(bios, volt + 3);
-				*len = nv_ro08(bios, volt + 2);
+				*hdr = nvbios_rd08(bios, volt + 1);
+				*cnt = nvbios_rd08(bios, volt + 3);
+				*len = nvbios_rd08(bios, volt + 2);
 				return volt;
 			}
 		}
@@ -73,28 +73,28 @@
 	memset(info, 0x00, sizeof(*info));
 	switch (!!volt * *ver) {
 	case 0x12:
-		info->vidmask = nv_ro08(bios, volt + 0x04);
+		info->vidmask = nvbios_rd08(bios, volt + 0x04);
 		break;
 	case 0x20:
-		info->vidmask = nv_ro08(bios, volt + 0x05);
+		info->vidmask = nvbios_rd08(bios, volt + 0x05);
 		break;
 	case 0x30:
-		info->vidmask = nv_ro08(bios, volt + 0x04);
+		info->vidmask = nvbios_rd08(bios, volt + 0x04);
 		break;
 	case 0x40:
-		info->base    = nv_ro32(bios, volt + 0x04);
-		info->step    = nv_ro16(bios, volt + 0x08);
-		info->vidmask = nv_ro08(bios, volt + 0x0b);
+		info->base    = nvbios_rd32(bios, volt + 0x04);
+		info->step    = nvbios_rd16(bios, volt + 0x08);
+		info->vidmask = nvbios_rd08(bios, volt + 0x0b);
 		/*XXX*/
 		info->min     = 0;
 		info->max     = info->base;
 		break;
 	case 0x50:
-		info->vidmask = nv_ro08(bios, volt + 0x06);
-		info->min     = nv_ro32(bios, volt + 0x0a);
-		info->max     = nv_ro32(bios, volt + 0x0e);
-		info->base    = nv_ro32(bios, volt + 0x12) & 0x00ffffff;
-		info->step    = nv_ro16(bios, volt + 0x16);
+		info->vidmask = nvbios_rd08(bios, volt + 0x06);
+		info->min     = nvbios_rd32(bios, volt + 0x0a);
+		info->max     = nvbios_rd32(bios, volt + 0x0e);
+		info->base    = nvbios_rd32(bios, volt + 0x12) & 0x00ffffff;
+		info->step    = nvbios_rd16(bios, volt + 0x16);
 		break;
 	}
 	return volt;
@@ -121,12 +121,12 @@
 	switch (!!volt * *ver) {
 	case 0x12:
 	case 0x20:
-		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
-		info->vid     = nv_ro08(bios, volt + 0x01);
+		info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+		info->vid     = nvbios_rd08(bios, volt + 0x01);
 		break;
 	case 0x30:
-		info->voltage = nv_ro08(bios, volt + 0x00) * 10000;
-		info->vid     = nv_ro08(bios, volt + 0x01) >> 2;
+		info->voltage = nvbios_rd08(bios, volt + 0x00) * 10000;
+		info->vid     = nvbios_rd08(bios, volt + 0x01) >> 2;
 		break;
 	case 0x40:
 	case 0x50:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
index 63a5e1b..250fc42 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/xpio.c
@@ -30,12 +30,12 @@
 {
 	u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
 	if (data && *ver >= 0x40 && *hdr >= 0x06) {
-		u16 xpio = nv_ro16(bios, data + 0x04);
+		u16 xpio = nvbios_rd16(bios, data + 0x04);
 		if (xpio) {
-			*ver = nv_ro08(bios, data + 0x00);
-			*hdr = nv_ro08(bios, data + 0x01);
-			*cnt = nv_ro08(bios, data + 0x02);
-			*len = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00);
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*cnt = nvbios_rd08(bios, data + 0x02);
+			*len = nvbios_rd08(bios, data + 0x03);
 			return xpio;
 		}
 	}
@@ -48,12 +48,12 @@
 {
 	u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
 	if (data && idx < *cnt) {
-		u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+		u16 xpio = nvbios_rd16(bios, data + *hdr + (idx * *len));
 		if (xpio) {
-			*ver = nv_ro08(bios, data + 0x00);
-			*hdr = nv_ro08(bios, data + 0x01);
-			*cnt = nv_ro08(bios, data + 0x02);
-			*len = nv_ro08(bios, data + 0x03);
+			*ver = nvbios_rd08(bios, data + 0x00);
+			*hdr = nvbios_rd08(bios, data + 0x01);
+			*cnt = nvbios_rd08(bios, data + 0x02);
+			*len = nvbios_rd08(bios, data + 0x03);
 			return xpio;
 		}
 	}
@@ -66,9 +66,9 @@
 {
 	u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
 	if (data && *len >= 6) {
-		info->type = nv_ro08(bios, data + 0x04);
-		info->addr = nv_ro08(bios, data + 0x05);
-		info->flags = nv_ro08(bios, data + 0x06);
+		info->type = nvbios_rd08(bios, data + 0x04);
+		info->addr = nvbios_rd08(bios, data + 0x05);
+		info->flags = nvbios_rd08(bios, data + 0x06);
 	}
 	return 0x0000;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
index 83d80b1..5fa9e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/Kbuild
@@ -1,3 +1,4 @@
+nvkm-y += nvkm/subdev/bus/base.o
 nvkm-y += nvkm/subdev/bus/hwsq.o
 nvkm-y += nvkm/subdev/bus/nv04.o
 nvkm-y += nvkm/subdev/bus/nv31.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
new file mode 100644
index 0000000..dc5a10f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+nvkm_bus_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bus *bus = nvkm_bus(subdev);
+	bus->func->intr(bus);
+}
+
+static int
+nvkm_bus_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_bus *bus = nvkm_bus(subdev);
+	bus->func->init(bus);
+	return 0;
+}
+
+static void *
+nvkm_bus_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_bus(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_bus = {
+	.dtor = nvkm_bus_dtor,
+	.init = nvkm_bus_init,
+	.intr = nvkm_bus_intr,
+};
+
+int
+nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_bus **pbus)
+{
+	struct nvkm_bus *bus;
+	if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
+	bus->func = func;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
index cbe699e..9700b5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/g94.c
@@ -22,37 +22,43 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 #include <subdev/timer.h>
 
 static int
-g94_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+g94_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
 {
-	struct nv50_bus_priv *priv = (void *)pbus;
+	struct nvkm_device *device = bus->subdev.device;
 	int i;
 
-	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(pbus, 0x001304, 0x00000000);
-	nv_wr32(pbus, 0x001318, 0x00000000);
+	nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nvkm_wr32(device, 0x001304, 0x00000000);
+	nvkm_wr32(device, 0x001318, 0x00000000);
 	for (i = 0; i < size; i++)
-		nv_wr32(priv, 0x080000 + (i * 4), data[i]);
-	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
-	nv_wr32(pbus, 0x00130c, 0x00000001);
+		nvkm_wr32(device, 0x080000 + (i * 4), data[i]);
+	nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+	nvkm_wr32(device, 0x00130c, 0x00000001);
 
-	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+			break;
+	) < 0)
+		return -ETIMEDOUT;
+
+	return 0;
 }
 
-struct nvkm_oclass *
-g94_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv50_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+g94_bus = {
+	.init = nv50_bus_init,
 	.intr = nv50_bus_intr,
 	.hwsq_exec = g94_bus_hwsq_exec,
 	.hwsq_size = 128,
-}.base;
+};
+
+int
+g94_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&g94_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
index ebc63ba..e0930d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
@@ -22,59 +22,54 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 static void
-gf100_bus_intr(struct nvkm_subdev *subdev)
+gf100_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x0000000e) {
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc),
-			 (stat & 0x00000002) ? "!ENGINE " : "",
-			 (stat & 0x00000004) ? "IBUS " : "",
-			 (stat & 0x00000008) ? "TIMEOUT " : "");
+		nvkm_error(subdev,
+			   "MMIO %s of %08x FAULT at %06x [ %s%s%s]\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc),
+			   (stat & 0x00000002) ? "!ENGINE " : "",
+			   (stat & 0x00000004) ? "IBUS " : "",
+			   (stat & 0x00000008) ? "TIMEOUT " : "");
 
-		nv_wr32(pbus, 0x009084, 0x00000000);
-		nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+		nvkm_wr32(device, 0x009084, 0x00000000);
+		nvkm_wr32(device, 0x001100, (stat & 0x0000000e));
 		stat &= ~0x0000000e;
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-gf100_bus_init(struct nvkm_object *object)
+static void
+gf100_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x0000000e);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x0000000e);
 }
 
-struct nvkm_oclass *
-gf100_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = gf100_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+gf100_bus = {
+	.init = gf100_bus_init,
 	.intr = gf100_bus_intr,
-}.base;
+};
+
+int
+gf100_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&gf100_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
index 7622b41..79f1cf5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include <subdev/bus.h>
+#include "priv.h"
 
 struct nvkm_hwsq {
-	struct nvkm_bus *pbus;
+	struct nvkm_subdev *subdev;
 	u32 addr;
 	u32 data;
 	struct {
@@ -41,13 +41,13 @@
 }
 
 int
-nvkm_hwsq_init(struct nvkm_bus *pbus, struct nvkm_hwsq **phwsq)
+nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
 {
 	struct nvkm_hwsq *hwsq;
 
 	hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
 	if (hwsq) {
-		hwsq->pbus = pbus;
+		hwsq->subdev = subdev;
 		hwsq->addr = ~0;
 		hwsq->data = ~0;
 		memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
@@ -63,21 +63,23 @@
 	struct nvkm_hwsq *hwsq = *phwsq;
 	int ret = 0, i;
 	if (hwsq) {
-		struct nvkm_bus *pbus = hwsq->pbus;
+		struct nvkm_subdev *subdev = hwsq->subdev;
+		struct nvkm_bus *bus = subdev->device->bus;
 		hwsq->c.size = (hwsq->c.size + 4) / 4;
-		if (hwsq->c.size <= pbus->hwsq_size) {
+		if (hwsq->c.size <= bus->func->hwsq_size) {
 			if (exec)
-				ret = pbus->hwsq_exec(pbus, (u32 *)hwsq->c.data,
-						      hwsq->c.size);
+				ret = bus->func->hwsq_exec(bus,
+							   (u32 *)hwsq->c.data,
+								  hwsq->c.size);
 			if (ret)
-				nv_error(pbus, "hwsq exec failed: %d\n", ret);
+				nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
 		} else {
-			nv_error(pbus, "hwsq ucode too large\n");
+			nvkm_error(subdev, "hwsq ucode too large\n");
 			ret = -ENOSPC;
 		}
 
 		for (i = 0; ret && i < hwsq->c.size; i++)
-			nv_error(pbus, "\t0x%08x\n", ((u32 *)hwsq->c.data)[i]);
+			nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
 
 		*phwsq = NULL;
 		kfree(hwsq);
@@ -88,7 +90,7 @@
 void
 nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
 {
-	nv_debug(hwsq->pbus, "R[%06x] = 0x%08x\n", addr, data);
+	nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
 
 	if (hwsq->data != data) {
 		if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
@@ -113,7 +115,7 @@
 void
 nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
 {
-	nv_debug(hwsq->pbus, " FLAG[%02x] = %d\n", flag, data);
+	nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
 	flag += 0x80;
 	if (data >= 0)
 		flag += 0x20;
@@ -125,7 +127,7 @@
 void
 nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
 {
-	nv_debug(hwsq->pbus, " WAIT[%02x] = %d\n", flag, data);
+	nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
 	hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
 }
 
@@ -138,6 +140,6 @@
 		shift++;
 	}
 
-	nv_debug(hwsq->pbus, "    DELAY = %d ns\n", nsec);
+	nvkm_debug(hwsq->subdev, "    DELAY = %d ns\n", nsec);
 	hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index ebf709c..8117ec5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -59,10 +59,9 @@
 static inline int
 hwsq_init(struct hwsq *ram, struct nvkm_subdev *subdev)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
 	int ret;
 
-	ret = nvkm_hwsq_init(pbus, &ram->hwsq);
+	ret = nvkm_hwsq_init(subdev, &ram->hwsq);
 	if (ret)
 		return ret;
 
@@ -85,8 +84,9 @@
 static inline u32
 hwsq_rd32(struct hwsq *ram, struct hwsq_reg *reg)
 {
+	struct nvkm_device *device = ram->subdev->device;
 	if (reg->sequence != ram->sequence)
-		reg->data = nv_rd32(ram->subdev, reg->addr);
+		reg->data = nvkm_rd32(device, reg->addr);
 	return reg->data;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
index 19c8e50..c80b967 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c
@@ -22,73 +22,55 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+
+#include <subdev/gpio.h>
 
 static void
-nv04_bus_intr(struct nvkm_subdev *subdev)
+nv04_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
 
 	if (stat & 0x00000001) {
-		nv_error(pbus, "BUS ERROR\n");
+		nvkm_error(subdev, "BUS ERROR\n");
 		stat &= ~0x00000001;
-		nv_wr32(pbus, 0x001100, 0x00000001);
+		nvkm_wr32(device, 0x001100, 0x00000001);
 	}
 
 	if (stat & 0x00000110) {
-		subdev = nvkm_subdev(subdev, NVDEV_SUBDEV_GPIO);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_gpio *gpio = device->gpio;
+		if (gpio)
+			nvkm_subdev_intr(&gpio->subdev);
 		stat &= ~0x00000110;
-		nv_wr32(pbus, 0x001100, 0x00000110);
+		nvkm_wr32(device, 0x001100, 0x00000110);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-nv04_bus_init(struct nvkm_object *object)
+static void
+nv04_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00000111);
-
-	return nvkm_bus_init(&priv->base);
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00000111);
 }
 
+static const struct nvkm_bus_func
+nv04_bus = {
+	.init = nv04_bus_init,
+	.intr = nv04_bus_intr,
+};
+
 int
-nv04_bus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
 {
-	struct nv04_bus_impl *impl = (void *)oclass;
-	struct nv04_bus_priv *priv;
-	int ret;
-
-	ret = nvkm_bus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = impl->intr;
-	priv->base.hwsq_exec = impl->hwsq_exec;
-	priv->base.hwsq_size = impl->hwsq_size;
-	return 0;
+	return nvkm_bus_new_(&nv04_bus, device, index, pbus);
 }
-
-struct nvkm_oclass *
-nv04_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv04_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
-	.intr = nv04_bus_intr,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
deleted file mode 100644
index 3ddc8f9..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __NVKM_BUS_NV04_H__
-#define __NVKM_BUS_NV04_H__
-#include <subdev/bus.h>
-
-struct nv04_bus_priv {
-	struct nvkm_bus base;
-};
-
-int  nv04_bus_ctor(struct nvkm_object *, struct nvkm_object *,
-		   struct nvkm_oclass *, void *, u32,
-		   struct nvkm_object **);
-int  nv50_bus_init(struct nvkm_object *);
-void nv50_bus_intr(struct nvkm_subdev *);
-
-struct nv04_bus_impl {
-	struct nvkm_oclass base;
-	void (*intr)(struct nvkm_subdev *);
-	int  (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
-	u32  hwsq_size;
-};
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
index c5739bce..5153d89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv31.c
@@ -22,70 +22,67 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+
+#include <subdev/gpio.h>
+#include <subdev/therm.h>
 
 static void
-nv31_bus_intr(struct nvkm_subdev *subdev)
+nv31_bus_intr(struct nvkm_bus *bus)
 {
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
-	u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
+	u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144);
 
 	if (gpio) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_GPIO);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_gpio *gpio = device->gpio;
+		if (gpio)
+			nvkm_subdev_intr(&gpio->subdev);
 	}
 
 	if (stat & 0x00000008) {  /* NV41- */
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
 
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc));
+		nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc));
 
 		stat &= ~0x00000008;
-		nv_wr32(pbus, 0x001100, 0x00000008);
+		nvkm_wr32(device, 0x001100, 0x00000008);
 	}
 
 	if (stat & 0x00070000) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
+		struct nvkm_therm *therm = device->therm;
+		if (therm)
+			nvkm_subdev_intr(&therm->subdev);
 		stat &= ~0x00070000;
-		nv_wr32(pbus, 0x001100, 0x00070000);
+		nvkm_wr32(device, 0x001100, 0x00070000);
 	}
 
 	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0x00000000);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0x00000000);
 	}
 }
 
-static int
-nv31_bus_init(struct nvkm_object *object)
+static void
+nv31_bus_init(struct nvkm_bus *bus)
 {
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00070008);
-	return 0;
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00070008);
 }
 
-struct nvkm_oclass *
-nv31_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x31),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv31_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+static const struct nvkm_bus_func
+nv31_bus = {
+	.init = nv31_bus_init,
 	.intr = nv31_bus_intr,
-}.base;
+};
+
+int
+nv31_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&nv31_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
index 1987863..19e10fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv50.c
@@ -22,83 +22,84 @@
  * Authors: Martin Peres <martin.peres@labri.fr>
  *          Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
+#include <subdev/therm.h>
 #include <subdev/timer.h>
 
 static int
-nv50_bus_hwsq_exec(struct nvkm_bus *pbus, u32 *data, u32 size)
+nv50_bus_hwsq_exec(struct nvkm_bus *bus, u32 *data, u32 size)
 {
-	struct nv50_bus_priv *priv = (void *)pbus;
+	struct nvkm_device *device = bus->subdev.device;
 	int i;
 
-	nv_mask(pbus, 0x001098, 0x00000008, 0x00000000);
-	nv_wr32(pbus, 0x001304, 0x00000000);
+	nvkm_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nvkm_wr32(device, 0x001304, 0x00000000);
 	for (i = 0; i < size; i++)
-		nv_wr32(priv, 0x001400 + (i * 4), data[i]);
-	nv_mask(pbus, 0x001098, 0x00000018, 0x00000018);
-	nv_wr32(pbus, 0x00130c, 0x00000003);
+		nvkm_wr32(device, 0x001400 + (i * 4), data[i]);
+	nvkm_mask(device, 0x001098, 0x00000018, 0x00000018);
+	nvkm_wr32(device, 0x00130c, 0x00000003);
 
-	return nv_wait(pbus, 0x001308, 0x00000100, 0x00000000) ? 0 : -ETIMEDOUT;
-}
+	if (nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x001308) & 0x00000100))
+			break;
+	) < 0)
+		return -ETIMEDOUT;
 
-void
-nv50_bus_intr(struct nvkm_subdev *subdev)
-{
-	struct nvkm_bus *pbus = nvkm_bus(subdev);
-	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
-
-	if (stat & 0x00000008) {
-		u32 addr = nv_rd32(pbus, 0x009084);
-		u32 data = nv_rd32(pbus, 0x009088);
-
-		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
-			 (addr & 0x00000002) ? "write" : "read", data,
-			 (addr & 0x00fffffc));
-
-		stat &= ~0x00000008;
-		nv_wr32(pbus, 0x001100, 0x00000008);
-	}
-
-	if (stat & 0x00010000) {
-		subdev = nvkm_subdev(pbus, NVDEV_SUBDEV_THERM);
-		if (subdev && subdev->intr)
-			subdev->intr(subdev);
-		stat &= ~0x00010000;
-		nv_wr32(pbus, 0x001100, 0x00010000);
-	}
-
-	if (stat) {
-		nv_error(pbus, "unknown intr 0x%08x\n", stat);
-		nv_mask(pbus, 0x001140, stat, 0);
-	}
-}
-
-int
-nv50_bus_init(struct nvkm_object *object)
-{
-	struct nv04_bus_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_bus_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x001100, 0xffffffff);
-	nv_wr32(priv, 0x001140, 0x00010008);
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_bus_oclass = &(struct nv04_bus_impl) {
-	.base.handle = NV_SUBDEV(BUS, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_bus_ctor,
-		.dtor = _nvkm_bus_dtor,
-		.init = nv50_bus_init,
-		.fini = _nvkm_bus_fini,
-	},
+void
+nv50_bus_intr(struct nvkm_bus *bus)
+{
+	struct nvkm_subdev *subdev = &bus->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
+
+	if (stat & 0x00000008) {
+		u32 addr = nvkm_rd32(device, 0x009084);
+		u32 data = nvkm_rd32(device, 0x009088);
+
+		nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n",
+			   (addr & 0x00000002) ? "write" : "read", data,
+			   (addr & 0x00fffffc));
+
+		stat &= ~0x00000008;
+		nvkm_wr32(device, 0x001100, 0x00000008);
+	}
+
+	if (stat & 0x00010000) {
+		struct nvkm_therm *therm = device->therm;
+		if (therm)
+			nvkm_subdev_intr(&therm->subdev);
+		stat &= ~0x00010000;
+		nvkm_wr32(device, 0x001100, 0x00010000);
+	}
+
+	if (stat) {
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_mask(device, 0x001140, stat, 0);
+	}
+}
+
+void
+nv50_bus_init(struct nvkm_bus *bus)
+{
+	struct nvkm_device *device = bus->subdev.device;
+	nvkm_wr32(device, 0x001100, 0xffffffff);
+	nvkm_wr32(device, 0x001140, 0x00010008);
+}
+
+static const struct nvkm_bus_func
+nv50_bus = {
+	.init = nv50_bus_init,
 	.intr = nv50_bus_intr,
 	.hwsq_exec = nv50_bus_hwsq_exec,
 	.hwsq_size = 64,
-}.base;
+};
+
+int
+nv50_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
+{
+	return nvkm_bus_new_(&nv50_bus, device, index, pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
new file mode 100644
index 0000000..a130f2c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -0,0 +1,18 @@
+#ifndef __NVKM_BUS_PRIV_H__
+#define __NVKM_BUS_PRIV_H__
+#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
+#include <subdev/bus.h>
+
+struct nvkm_bus_func {
+	void (*init)(struct nvkm_bus *);
+	void (*intr)(struct nvkm_bus *);
+	int (*hwsq_exec)(struct nvkm_bus *, u32 *, u32);
+	u32 hwsq_size;
+};
+
+int nvkm_bus_new_(const struct nvkm_bus_func *, struct nvkm_device *, int,
+		  struct nvkm_bus **);
+
+void nv50_bus_init(struct nvkm_bus *);
+void nv50_bus_intr(struct nvkm_bus *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index 9c2f688..ed7717b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -8,5 +8,6 @@
 nvkm-y += nvkm/subdev/clk/gf100.o
 nvkm-y += nvkm/subdev/clk/gk104.o
 nvkm-y += nvkm/subdev/clk/gk20a.o
+
 nvkm-y += nvkm/subdev/clk/pllnv04.o
 nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 39a83d8..dc8682c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -21,7 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#include "priv.h"
+
 #include <subdev/bios.h>
 #include <subdev/bios/boost.h>
 #include <subdev/bios/cstep.h>
@@ -30,7 +31,6 @@
 #include <subdev/therm.h>
 #include <subdev/volt.h>
 
-#include <core/device.h>
 #include <core/option.h>
 
 /******************************************************************************
@@ -40,7 +40,7 @@
 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust,
 		u8 pstate, u8 domain, u32 input)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
+	struct nvkm_bios *bios = clk->subdev.device->bios;
 	struct nvbios_boostE boostE;
 	u8  ver, hdr, cnt, len;
 	u16 data;
@@ -77,8 +77,10 @@
 static int
 nvkm_cstate_prog(struct nvkm_clk *clk, struct nvkm_pstate *pstate, int cstatei)
 {
-	struct nvkm_therm *ptherm = nvkm_therm(clk);
-	struct nvkm_volt *volt = nvkm_volt(clk);
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_therm *therm = device->therm;
+	struct nvkm_volt *volt = device->volt;
 	struct nvkm_cstate *cstate;
 	int ret;
 
@@ -88,41 +90,41 @@
 		cstate = &pstate->base;
 	}
 
-	if (ptherm) {
-		ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, +1);
+	if (therm) {
+		ret = nvkm_therm_cstate(therm, pstate->fanspeed, +1);
 		if (ret && ret != -ENODEV) {
-			nv_error(clk, "failed to raise fan speed: %d\n", ret);
+			nvkm_error(subdev, "failed to raise fan speed: %d\n", ret);
 			return ret;
 		}
 	}
 
 	if (volt) {
-		ret = volt->set_id(volt, cstate->voltage, +1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage, +1);
 		if (ret && ret != -ENODEV) {
-			nv_error(clk, "failed to raise voltage: %d\n", ret);
+			nvkm_error(subdev, "failed to raise voltage: %d\n", ret);
 			return ret;
 		}
 	}
 
-	ret = clk->calc(clk, cstate);
+	ret = clk->func->calc(clk, cstate);
 	if (ret == 0) {
-		ret = clk->prog(clk);
-		clk->tidy(clk);
+		ret = clk->func->prog(clk);
+		clk->func->tidy(clk);
 	}
 
 	if (volt) {
-		ret = volt->set_id(volt, cstate->voltage, -1);
+		ret = nvkm_volt_set_id(volt, cstate->voltage, -1);
 		if (ret && ret != -ENODEV)
-			nv_error(clk, "failed to lower voltage: %d\n", ret);
+			nvkm_error(subdev, "failed to lower voltage: %d\n", ret);
 	}
 
-	if (ptherm) {
-		ret = nvkm_therm_cstate(ptherm, pstate->fanspeed, -1);
+	if (therm) {
+		ret = nvkm_therm_cstate(therm, pstate->fanspeed, -1);
 		if (ret && ret != -ENODEV)
-			nv_error(clk, "failed to lower fan speed: %d\n", ret);
+			nvkm_error(subdev, "failed to lower fan speed: %d\n", ret);
 	}
 
-	return 0;
+	return ret;
 }
 
 static void
@@ -135,8 +137,8 @@
 static int
 nvkm_cstate_new(struct nvkm_clk *clk, int idx, struct nvkm_pstate *pstate)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
-	struct nvkm_domain *domain = clk->domains;
+	struct nvkm_bios *bios = clk->subdev.device->bios;
+	const struct nvkm_domain *domain = clk->domains;
 	struct nvkm_cstate *cstate = NULL;
 	struct nvbios_cstepX cstepX;
 	u8  ver, hdr;
@@ -172,7 +174,8 @@
 static int
 nvkm_pstate_prog(struct nvkm_clk *clk, int pstatei)
 {
-	struct nvkm_fb *pfb = nvkm_fb(clk);
+	struct nvkm_subdev *subdev = &clk->subdev;
+	struct nvkm_ram *ram = subdev->device->fb->ram;
 	struct nvkm_pstate *pstate;
 	int ret, idx = 0;
 
@@ -181,17 +184,17 @@
 			break;
 	}
 
-	nv_debug(clk, "setting performance state %d\n", pstatei);
+	nvkm_debug(subdev, "setting performance state %d\n", pstatei);
 	clk->pstate = pstatei;
 
-	if (pfb->ram && pfb->ram->calc) {
+	if (ram && ram->func->calc) {
 		int khz = pstate->base.domain[nv_clk_src_mem];
 		do {
-			ret = pfb->ram->calc(pfb, khz);
+			ret = ram->func->calc(ram, khz);
 			if (ret == 0)
-				ret = pfb->ram->prog(pfb);
+				ret = ram->func->prog(ram);
 		} while (ret > 0);
-		pfb->ram->tidy(pfb);
+		ram->func->tidy(ram);
 	}
 
 	return nvkm_cstate_prog(clk, pstate, 0);
@@ -201,31 +204,32 @@
 nvkm_pstate_work(struct work_struct *work)
 {
 	struct nvkm_clk *clk = container_of(work, typeof(*clk), work);
+	struct nvkm_subdev *subdev = &clk->subdev;
 	int pstate;
 
 	if (!atomic_xchg(&clk->waiting, 0))
 		return;
 	clk->pwrsrc = power_supply_is_system_supplied();
 
-	nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
-		 clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
-		 clk->astate, clk->tstate, clk->dstate);
+	nvkm_trace(subdev, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+		   clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+		   clk->astate, clk->tstate, clk->dstate);
 
 	pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
 	if (clk->state_nr && pstate != -1) {
 		pstate = (pstate < 0) ? clk->astate : pstate;
-		pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
+		pstate = min(pstate, clk->state_nr - 1 + clk->tstate);
 		pstate = max(pstate, clk->dstate);
 	} else {
 		pstate = clk->pstate = -1;
 	}
 
-	nv_trace(clk, "-> %d\n", pstate);
+	nvkm_trace(subdev, "-> %d\n", pstate);
 	if (pstate != clk->pstate) {
 		int ret = nvkm_pstate_prog(clk, pstate);
 		if (ret) {
-			nv_error(clk, "error setting pstate %d: %d\n",
-				 pstate, ret);
+			nvkm_error(subdev, "error setting pstate %d: %d\n",
+				   pstate, ret);
 		}
 	}
 
@@ -246,8 +250,9 @@
 static void
 nvkm_pstate_info(struct nvkm_clk *clk, struct nvkm_pstate *pstate)
 {
-	struct nvkm_domain *clock = clk->domains - 1;
+	const struct nvkm_domain *clock = clk->domains - 1;
 	struct nvkm_cstate *cstate;
+	struct nvkm_subdev *subdev = &clk->subdev;
 	char info[3][32] = { "", "", "" };
 	char name[4] = "--";
 	int i = -1;
@@ -261,12 +266,12 @@
 		if (hi == 0)
 			continue;
 
-		nv_debug(clk, "%02x: %10d KHz\n", clock->name, lo);
+		nvkm_debug(subdev, "%02x: %10d KHz\n", clock->name, lo);
 		list_for_each_entry(cstate, &pstate->list, head) {
 			u32 freq = cstate->domain[clock->name];
 			lo = min(lo, freq);
 			hi = max(hi, freq);
-			nv_debug(clk, "%10d KHz\n", freq);
+			nvkm_debug(subdev, "%10d KHz\n", freq);
 		}
 
 		if (clock->mname && ++i < ARRAY_SIZE(info)) {
@@ -282,7 +287,7 @@
 		}
 	}
 
-	nv_info(clk, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
+	nvkm_debug(subdev, "%s: %s %s %s\n", name, info[0], info[1], info[2]);
 }
 
 static void
@@ -301,8 +306,8 @@
 static int
 nvkm_pstate_new(struct nvkm_clk *clk, int idx)
 {
-	struct nvkm_bios *bios = nvkm_bios(clk);
-	struct nvkm_domain *domain = clk->domains - 1;
+	struct nvkm_bios *bios = clk->subdev.device->bios;
+	const struct nvkm_domain *domain = clk->domains - 1;
 	struct nvkm_pstate *pstate;
 	struct nvkm_cstate *cstate;
 	struct nvbios_cstepE cstepE;
@@ -471,32 +476,37 @@
  *****************************************************************************/
 
 int
-_nvkm_clk_fini(struct nvkm_object *object, bool suspend)
+nvkm_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
 {
-	struct nvkm_clk *clk = (void *)object;
-	nvkm_notify_put(&clk->pwrsrc_ntfy);
-	return nvkm_subdev_fini(&clk->base, suspend);
+	return clk->func->read(clk, src);
 }
 
-int
-_nvkm_clk_init(struct nvkm_object *object)
+static int
+nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_clk *clk = (void *)object;
-	struct nvkm_domain *clock = clk->domains;
-	int ret;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
+	nvkm_notify_put(&clk->pwrsrc_ntfy);
+	flush_work(&clk->work);
+	if (clk->func->fini)
+		clk->func->fini(clk);
+	return 0;
+}
 
-	ret = nvkm_subdev_init(&clk->base);
-	if (ret)
-		return ret;
+static int
+nvkm_clk_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_clk *clk = nvkm_clk(subdev);
+	const struct nvkm_domain *clock = clk->domains;
+	int ret;
 
 	memset(&clk->bstate, 0x00, sizeof(clk->bstate));
 	INIT_LIST_HEAD(&clk->bstate.list);
 	clk->bstate.pstate = 0xff;
 
 	while (clock->name != nv_clk_src_max) {
-		ret = clk->read(clk, clock->name);
+		ret = nvkm_clk_read(clk, clock->name);
 		if (ret < 0) {
-			nv_error(clk, "%02x freq unknown\n", clock->name);
+			nvkm_error(subdev, "%02x freq unknown\n", clock->name);
 			return ret;
 		}
 		clk->bstate.base.domain[clock->name] = ret;
@@ -505,6 +515,9 @@
 
 	nvkm_pstate_info(clk, &clk->bstate);
 
+	if (clk->func->init)
+		return clk->func->init(clk);
+
 	clk->astate = clk->state_nr - 1;
 	clk->tstate = 0;
 	clk->dstate = 0;
@@ -513,61 +526,63 @@
 	return 0;
 }
 
-void
-_nvkm_clk_dtor(struct nvkm_object *object)
+static void *
+nvkm_clk_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_clk *clk = (void *)object;
+	struct nvkm_clk *clk = nvkm_clk(subdev);
 	struct nvkm_pstate *pstate, *temp;
 
 	nvkm_notify_fini(&clk->pwrsrc_ntfy);
 
+	/* Early return if the pstates have been provided statically */
+	if (clk->func->pstates)
+		return clk;
+
 	list_for_each_entry_safe(pstate, temp, &clk->states, head) {
 		nvkm_pstate_del(pstate);
 	}
 
-	nvkm_subdev_destroy(&clk->base);
+	return clk;
 }
 
+static const struct nvkm_subdev_func
+nvkm_clk = {
+	.dtor = nvkm_clk_dtor,
+	.init = nvkm_clk_init,
+	.fini = nvkm_clk_fini,
+};
+
 int
-nvkm_clk_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, struct nvkm_domain *clocks,
-		 struct nvkm_pstate *pstates, int nb_pstates,
-		 bool allow_reclock, int length, void **object)
+nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk *clk)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_clk *clk;
 	int ret, idx, arglen;
 	const char *mode;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "CLK",
-				  "clock", length, object);
-	clk = *object;
-	if (ret)
-		return ret;
-
+	nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+	clk->func = func;
 	INIT_LIST_HEAD(&clk->states);
-	clk->domains = clocks;
+	clk->domains = func->domains;
 	clk->ustate_ac = -1;
 	clk->ustate_dc = -1;
+	clk->allow_reclock = allow_reclock;
 
 	INIT_WORK(&clk->work, nvkm_pstate_work);
 	init_waitqueue_head(&clk->wait);
 	atomic_set(&clk->waiting, 0);
 
 	/* If no pstates are provided, try and fetch them from the BIOS */
-	if (!pstates) {
+	if (!func->pstates) {
 		idx = 0;
 		do {
 			ret = nvkm_pstate_new(clk, idx++);
 		} while (ret == 0);
 	} else {
-		for (idx = 0; idx < nb_pstates; idx++)
-			list_add_tail(&pstates[idx].head, &clk->states);
-		clk->state_nr = nb_pstates;
+		for (idx = 0; idx < func->nr_pstates; idx++)
+			list_add_tail(&func->pstates[idx].head, &clk->states);
+		clk->state_nr = func->nr_pstates;
 	}
 
-	clk->allow_reclock = allow_reclock;
-
 	ret = nvkm_notify_init(NULL, &device->event, nvkm_clk_pwrsrc, true,
 			       NULL, 0, 0, &clk->pwrsrc_ntfy);
 	if (ret)
@@ -589,3 +604,12 @@
 
 	return 0;
 }
+
+int
+nvkm_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk **pclk)
+{
+	if (!(*pclk = kzalloc(sizeof(**pclk), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_clk_ctor(func, device, index, allow_reclock, *pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
index 4c90b97..347da9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/g84.c
@@ -23,25 +23,26 @@
  */
 #include "nv50.h"
 
-static struct nvkm_domain
-g84_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_vdec   , 0xff },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+g84_clk = {
+	.read = nv50_clk_read,
+	.calc = nv50_clk_calc,
+	.prog = nv50_clk_prog,
+	.tidy = nv50_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_vdec   , 0xff },
+		{ nv_clk_src_max }
+	}
 };
 
-struct nvkm_oclass *
-g84_clk_oclass = &(struct nv50_clk_oclass) {
-	.base.handle = NV_SUBDEV(CLK, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-	.domains = g84_domains,
-}.base;
+int
+g84_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	return nv50_clk_new_(&g84_clk, device, index,
+			     (device->chipset == 0xa0), pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 3d7330d..a52b7e7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define gf100_clk(p) container_of((p), struct gf100_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
@@ -38,29 +38,29 @@
 	u32 coef;
 };
 
-struct gf100_clk_priv {
+struct gf100_clk {
 	struct nvkm_clk base;
 	struct gf100_clk_info eng[16];
 };
 
-static u32 read_div(struct gf100_clk_priv *, int, u32, u32);
+static u32 read_div(struct gf100_clk *, int, u32, u32);
 
 static u32
-read_vco(struct gf100_clk_priv *priv, u32 dsrc)
+read_vco(struct gf100_clk *clk, u32 dsrc)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 ssrc = nv_rd32(priv, dsrc);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
-		return clk->read(clk, nv_clk_src_sppll0);
-	return clk->read(clk, nv_clk_src_sppll1);
+		return nvkm_clk_read(&clk->base, nv_clk_src_sppll0);
+	return nvkm_clk_read(&clk->base, nv_clk_src_sppll1);
 }
 
 static u32
-read_pll(struct gf100_clk_priv *priv, u32 pll)
+read_pll(struct gf100_clk *clk, u32 pll)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 ctrl = nv_rd32(priv, pll + 0x00);
-	u32 coef = nv_rd32(priv, pll + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0x00);
+	u32 coef = nvkm_rd32(device, pll + 0x04);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -72,20 +72,20 @@
 	switch (pll) {
 	case 0x00e800:
 	case 0x00e820:
-		sclk = nv_device(priv)->crystal;
+		sclk = device->crystal;
 		P = 1;
 		break;
 	case 0x132000:
-		sclk = clk->read(clk, nv_clk_src_mpllsrc);
+		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrc);
 		break;
 	case 0x132020:
-		sclk = clk->read(clk, nv_clk_src_mpllsrcref);
+		sclk = nvkm_clk_read(&clk->base, nv_clk_src_mpllsrcref);
 		break;
 	case 0x137000:
 	case 0x137020:
 	case 0x137040:
 	case 0x1370e0:
-		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
 		break;
 	default:
 		return 0;
@@ -95,46 +95,48 @@
 }
 
 static u32
-read_div(struct gf100_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
 		if ((ssrc & 0x00030000) != 0x00030000)
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 108000;
 	case 2:
 		return 100000;
 	case 3:
 		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sclk = read_vco(clk, dsrc + (doff * 4));
 			u32 sdiv = (sctl & 0x0000003f) + 2;
 			return (sclk * 2) / sdiv;
 		}
 
-		return read_vco(priv, dsrc + (doff * 4));
+		return read_vco(clk, dsrc + (doff * 4));
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gf100_clk_priv *priv, int clk)
+read_clk(struct gf100_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
-	u32 ssel = nv_rd32(priv, 0x137100);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
+	u32 ssel = nvkm_rd32(device, 0x137100);
 	u32 sclk, sdiv;
 
-	if (ssel & (1 << clk)) {
-		if (clk < 7)
-			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+	if (ssel & (1 << idx)) {
+		if (idx < 7)
+			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
 		else
-			sclk = read_pll(priv, 0x1370e0);
+			sclk = read_pll(clk, 0x1370e0);
 		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
 	} else {
-		sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+		sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
 	}
 
@@ -145,10 +147,11 @@
 }
 
 static int
-gf100_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gf100_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nvkm_device *device = nv_device(clk);
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
@@ -156,47 +159,47 @@
 	case nv_clk_src_href:
 		return 100000;
 	case nv_clk_src_sppll0:
-		return read_pll(priv, 0x00e800);
+		return read_pll(clk, 0x00e800);
 	case nv_clk_src_sppll1:
-		return read_pll(priv, 0x00e820);
+		return read_pll(clk, 0x00e820);
 
 	case nv_clk_src_mpllsrcref:
-		return read_div(priv, 0, 0x137320, 0x137330);
+		return read_div(clk, 0, 0x137320, 0x137330);
 	case nv_clk_src_mpllsrc:
-		return read_pll(priv, 0x132020);
+		return read_pll(clk, 0x132020);
 	case nv_clk_src_mpll:
-		return read_pll(priv, 0x132000);
+		return read_pll(clk, 0x132000);
 	case nv_clk_src_mdiv:
-		return read_div(priv, 0, 0x137300, 0x137310);
+		return read_div(clk, 0, 0x137300, 0x137310);
 	case nv_clk_src_mem:
-		if (nv_rd32(priv, 0x1373f0) & 0x00000002)
-			return clk->read(clk, nv_clk_src_mpll);
-		return clk->read(clk, nv_clk_src_mdiv);
+		if (nvkm_rd32(device, 0x1373f0) & 0x00000002)
+			return nvkm_clk_read(&clk->base, nv_clk_src_mpll);
+		return nvkm_clk_read(&clk->base, nv_clk_src_mdiv);
 
 	case nv_clk_src_gpc:
-		return read_clk(priv, 0x00);
+		return read_clk(clk, 0x00);
 	case nv_clk_src_rop:
-		return read_clk(priv, 0x01);
+		return read_clk(clk, 0x01);
 	case nv_clk_src_hubk07:
-		return read_clk(priv, 0x02);
+		return read_clk(clk, 0x02);
 	case nv_clk_src_hubk06:
-		return read_clk(priv, 0x07);
+		return read_clk(clk, 0x07);
 	case nv_clk_src_hubk01:
-		return read_clk(priv, 0x08);
+		return read_clk(clk, 0x08);
 	case nv_clk_src_copy:
-		return read_clk(priv, 0x09);
+		return read_clk(clk, 0x09);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x0c);
+		return read_clk(clk, 0x0c);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x0e);
+		return read_clk(clk, 0x0e);
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static u32
-calc_div(struct gf100_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gf100_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
 {
 	u32 div = min((ref * 2) / freq, (u32)65);
 	if (div < 2)
@@ -207,7 +210,7 @@
 }
 
 static u32
-calc_src(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gf100_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
 {
 	u32 sclk;
 
@@ -229,28 +232,29 @@
 	}
 
 	/* otherwise, calculate the closest divider */
-	sclk = read_vco(priv, 0x137160 + (clk * 4));
-	if (clk < 7)
-		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	sclk = read_vco(clk, 0x137160 + (idx * 4));
+	if (idx < 7)
+		sclk = calc_div(clk, idx, sclk, freq, ddiv);
 	return sclk;
 }
 
 static u32
-calc_pll(struct gf100_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gf100_clk *clk, int idx, u32 freq, u32 *coef)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
 	if (ret)
 		return 0;
 
-	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
 	if (!limits.refclk)
 		return 0;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
 	if (ret <= 0)
 		return 0;
 
@@ -259,10 +263,9 @@
 }
 
 static int
-calc_clk(struct gf100_clk_priv *priv,
-	 struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gf100_clk *clk, struct nvkm_cstate *cstate, int idx, int dom)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
+	struct gf100_clk_info *info = &clk->eng[idx];
 	u32 freq = cstate->domain[dom];
 	u32 src0, div0, div1D, div1P = 0;
 	u32 clk0, clk1 = 0;
@@ -272,16 +275,16 @@
 		return 0;
 
 	/* first possible path, using only dividers */
-	clk0 = calc_src(priv, clk, freq, &src0, &div0);
-	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+	clk0 = calc_src(clk, idx, freq, &src0, &div0);
+	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
 
 	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x00004387 & (1 << clk))) {
-		if (clk <= 7)
-			clk1 = calc_pll(priv, clk, freq, &info->coef);
+	if (clk0 != freq && (0x00004387 & (1 << idx))) {
+		if (idx <= 7)
+			clk1 = calc_pll(clk, idx, freq, &info->coef);
 		else
 			clk1 = cstate->domain[nv_clk_src_hubk06];
-		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
 	}
 
 	/* select the method which gets closest to target freq */
@@ -303,7 +306,7 @@
 			info->mdiv |= 0x80000000;
 			info->mdiv |= div1P << 8;
 		}
-		info->ssel = (1 << clk);
+		info->ssel = (1 << idx);
 		info->freq = clk1;
 	}
 
@@ -311,81 +314,96 @@
 }
 
 static int
-gf100_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gf100_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
-	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
-	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
-	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
-	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
-	    (ret = calc_clk(priv, cstate, 0x09, nv_clk_src_copy)) ||
-	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
-	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(clk, cstate, 0x09, nv_clk_src_copy)) ||
+	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
 		return ret;
 
 	return 0;
 }
 
 static void
-gf100_clk_prog_0(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_0(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	if (clk < 7 && !info->ssel) {
-		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
-		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	if (idx < 7 && !info->ssel) {
+		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x80003f3f, info->ddiv);
+		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
 	}
 }
 
 static void
-gf100_clk_prog_1(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_1(struct gf100_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+			break;
+	);
 }
 
 static void
-gf100_clk_prog_2(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_2(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	const u32 addr = 0x137000 + (clk * 0x20);
-	if (clk <= 7) {
-		nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
-		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 addr = 0x137000 + (idx * 0x20);
+	if (idx <= 7) {
+		nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
 		if (info->coef) {
-			nv_wr32(priv, addr + 0x04, info->coef);
-			nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
-			nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
-			nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+			nvkm_wr32(device, addr + 0x04, info->coef);
+			nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+			nvkm_msec(device, 2000,
+				if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+					break;
+			);
+			nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
 		}
 	}
 }
 
 static void
-gf100_clk_prog_3(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_3(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
-		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+			if (tmp == info->ssel)
+				break;
+		);
 	}
 }
 
 static void
-gf100_clk_prog_4(struct gf100_clk_priv *priv, int clk)
+gf100_clk_prog_4(struct gf100_clk *clk, int idx)
 {
-	struct gf100_clk_info *info = &priv->eng[clk];
-	nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+	struct gf100_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f3f, info->mdiv);
 }
 
 static int
-gf100_clk_prog(struct nvkm_clk *clk)
+gf100_clk_prog(struct nvkm_clk *base)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
+	struct gf100_clk *clk = gf100_clk(base);
 	struct {
-		void (*exec)(struct gf100_clk_priv *, int);
+		void (*exec)(struct gf100_clk *, int);
 	} stage[] = {
 		{ gf100_clk_prog_0 }, /* div programming */
 		{ gf100_clk_prog_1 }, /* select div mode */
@@ -396,10 +414,10 @@
 	int i, j;
 
 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
-		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
-			if (!priv->eng[j].freq)
+		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
+			if (!clk->eng[j].freq)
 				continue;
-			stage[i].exec(priv, j);
+			stage[i].exec(clk, j);
 		}
 	}
 
@@ -407,56 +425,42 @@
 }
 
 static void
-gf100_clk_tidy(struct nvkm_clk *clk)
+gf100_clk_tidy(struct nvkm_clk *base)
 {
-	struct gf100_clk_priv *priv = (void *)clk;
-	memset(priv->eng, 0x00, sizeof(priv->eng));
+	struct gf100_clk *clk = gf100_clk(base);
+	memset(clk->eng, 0x00, sizeof(clk->eng));
 }
 
-static struct nvkm_domain
-gf100_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_hubk06 , 0x00 },
-	{ nv_clk_src_hubk01 , 0x01 },
-	{ nv_clk_src_copy   , 0x02 },
-	{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
-	{ nv_clk_src_rop    , 0x04 },
-	{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
-	{ nv_clk_src_vdec   , 0x06 },
-	{ nv_clk_src_daemon , 0x0a },
-	{ nv_clk_src_hubk07 , 0x0b },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gf100_clk = {
+	.read = gf100_clk_read,
+	.calc = gf100_clk_calc,
+	.prog = gf100_clk_prog,
+	.tidy = gf100_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_hubk06 , 0x00 },
+		{ nv_clk_src_hubk01 , 0x01 },
+		{ nv_clk_src_copy   , 0x02 },
+		{ nv_clk_src_gpc    , 0x03, 0, "core", 2000 },
+		{ nv_clk_src_rop    , 0x04 },
+		{ nv_clk_src_mem    , 0x05, 0, "memory", 1000 },
+		{ nv_clk_src_vdec   , 0x06 },
+		{ nv_clk_src_daemon , 0x0a },
+		{ nv_clk_src_hubk07 , 0x0b },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gf100_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gf100_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gf100_clk_priv *priv;
-	int ret;
+	struct gf100_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gf100_domain,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gf100_clk_read;
-	priv->base.calc = gf100_clk_calc;
-	priv->base.prog = gf100_clk_prog;
-	priv->base.tidy = gf100_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gf100_clk, device, index, false, &clk->base);
 }
-
-struct nvkm_oclass
-gf100_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index e9b2310..396f7e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define gk104_clk(p) container_of((p), struct gk104_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/timer.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
@@ -38,28 +38,30 @@
 	u32 coef;
 };
 
-struct gk104_clk_priv {
+struct gk104_clk {
 	struct nvkm_clk base;
 	struct gk104_clk_info eng[16];
 };
 
-static u32 read_div(struct gk104_clk_priv *, int, u32, u32);
-static u32 read_pll(struct gk104_clk_priv *, u32);
+static u32 read_div(struct gk104_clk *, int, u32, u32);
+static u32 read_pll(struct gk104_clk *, u32);
 
 static u32
-read_vco(struct gk104_clk_priv *priv, u32 dsrc)
+read_vco(struct gk104_clk *clk, u32 dsrc)
 {
-	u32 ssrc = nv_rd32(priv, dsrc);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc);
 	if (!(ssrc & 0x00000100))
-		return read_pll(priv, 0x00e800);
-	return read_pll(priv, 0x00e820);
+		return read_pll(clk, 0x00e800);
+	return read_pll(clk, 0x00e820);
 }
 
 static u32
-read_pll(struct gk104_clk_priv *priv, u32 pll)
+read_pll(struct gk104_clk *clk, u32 pll)
 {
-	u32 ctrl = nv_rd32(priv, pll + 0x00);
-	u32 coef = nv_rd32(priv, pll + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0x00);
+	u32 coef = nvkm_rd32(device, pll + 0x04);
 	u32 P = (coef & 0x003f0000) >> 16;
 	u32 N = (coef & 0x0000ff00) >> 8;
 	u32 M = (coef & 0x000000ff) >> 0;
@@ -72,22 +74,22 @@
 	switch (pll) {
 	case 0x00e800:
 	case 0x00e820:
-		sclk = nv_device(priv)->crystal;
+		sclk = device->crystal;
 		P = 1;
 		break;
 	case 0x132000:
-		sclk = read_pll(priv, 0x132020);
+		sclk = read_pll(clk, 0x132020);
 		P = (coef & 0x10000000) ? 2 : 1;
 		break;
 	case 0x132020:
-		sclk = read_div(priv, 0, 0x137320, 0x137330);
-		fN   = nv_rd32(priv, pll + 0x10) >> 16;
+		sclk = read_div(clk, 0, 0x137320, 0x137330);
+		fN   = nvkm_rd32(device, pll + 0x10) >> 16;
 		break;
 	case 0x137000:
 	case 0x137020:
 	case 0x137040:
 	case 0x1370e0:
-		sclk = read_div(priv, (pll & 0xff) / 0x20, 0x137120, 0x137140);
+		sclk = read_div(clk, (pll & 0xff) / 0x20, 0x137120, 0x137140);
 		break;
 	default:
 		return 0;
@@ -101,70 +103,73 @@
 }
 
 static u32
-read_div(struct gk104_clk_priv *priv, int doff, u32 dsrc, u32 dctl)
+read_div(struct gk104_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
-	u32 ssrc = nv_rd32(priv, dsrc + (doff * 4));
-	u32 sctl = nv_rd32(priv, dctl + (doff * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
 
 	switch (ssrc & 0x00000003) {
 	case 0:
 		if ((ssrc & 0x00030000) != 0x00030000)
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 108000;
 	case 2:
 		return 100000;
 	case 3:
 		if (sctl & 0x80000000) {
-			u32 sclk = read_vco(priv, dsrc + (doff * 4));
+			u32 sclk = read_vco(clk, dsrc + (doff * 4));
 			u32 sdiv = (sctl & 0x0000003f) + 2;
 			return (sclk * 2) / sdiv;
 		}
 
-		return read_vco(priv, dsrc + (doff * 4));
+		return read_vco(clk, dsrc + (doff * 4));
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_mem(struct gk104_clk_priv *priv)
+read_mem(struct gk104_clk *clk)
 {
-	switch (nv_rd32(priv, 0x1373f4) & 0x0000000f) {
-	case 1: return read_pll(priv, 0x132020);
-	case 2: return read_pll(priv, 0x132000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	switch (nvkm_rd32(device, 0x1373f4) & 0x0000000f) {
+	case 1: return read_pll(clk, 0x132020);
+	case 2: return read_pll(clk, 0x132000);
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gk104_clk_priv *priv, int clk)
+read_clk(struct gk104_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x137250 + (clk * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x137250 + (idx * 4));
 	u32 sclk, sdiv;
 
-	if (clk < 7) {
-		u32 ssel = nv_rd32(priv, 0x137100);
-		if (ssel & (1 << clk)) {
-			sclk = read_pll(priv, 0x137000 + (clk * 0x20));
+	if (idx < 7) {
+		u32 ssel = nvkm_rd32(device, 0x137100);
+		if (ssel & (1 << idx)) {
+			sclk = read_pll(clk, 0x137000 + (idx * 0x20));
 			sdiv = 1;
 		} else {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			sdiv = 0;
 		}
 	} else {
-		u32 ssrc = nv_rd32(priv, 0x137160 + (clk * 0x04));
+		u32 ssrc = nvkm_rd32(device, 0x137160 + (idx * 0x04));
 		if ((ssrc & 0x00000003) == 0x00000003) {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			if (ssrc & 0x00000100) {
 				if (ssrc & 0x40000000)
-					sclk = read_pll(priv, 0x1370e0);
+					sclk = read_pll(clk, 0x1370e0);
 				sdiv = 1;
 			} else {
 				sdiv = 0;
 			}
 		} else {
-			sclk = read_div(priv, clk, 0x137160, 0x1371d0);
+			sclk = read_div(clk, idx, 0x137160, 0x1371d0);
 			sdiv = 0;
 		}
 	}
@@ -181,10 +186,11 @@
 }
 
 static int
-gk104_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk104_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nvkm_device *device = nv_device(clk);
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
@@ -192,29 +198,29 @@
 	case nv_clk_src_href:
 		return 100000;
 	case nv_clk_src_mem:
-		return read_mem(priv);
+		return read_mem(clk);
 	case nv_clk_src_gpc:
-		return read_clk(priv, 0x00);
+		return read_clk(clk, 0x00);
 	case nv_clk_src_rop:
-		return read_clk(priv, 0x01);
+		return read_clk(clk, 0x01);
 	case nv_clk_src_hubk07:
-		return read_clk(priv, 0x02);
+		return read_clk(clk, 0x02);
 	case nv_clk_src_hubk06:
-		return read_clk(priv, 0x07);
+		return read_clk(clk, 0x07);
 	case nv_clk_src_hubk01:
-		return read_clk(priv, 0x08);
+		return read_clk(clk, 0x08);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x0c);
+		return read_clk(clk, 0x0c);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x0e);
+		return read_clk(clk, 0x0e);
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static u32
-calc_div(struct gk104_clk_priv *priv, int clk, u32 ref, u32 freq, u32 *ddiv)
+calc_div(struct gk104_clk *clk, int idx, u32 ref, u32 freq, u32 *ddiv)
 {
 	u32 div = min((ref * 2) / freq, (u32)65);
 	if (div < 2)
@@ -225,7 +231,7 @@
 }
 
 static u32
-calc_src(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+calc_src(struct gk104_clk *clk, int idx, u32 freq, u32 *dsrc, u32 *ddiv)
 {
 	u32 sclk;
 
@@ -247,28 +253,29 @@
 	}
 
 	/* otherwise, calculate the closest divider */
-	sclk = read_vco(priv, 0x137160 + (clk * 4));
-	if (clk < 7)
-		sclk = calc_div(priv, clk, sclk, freq, ddiv);
+	sclk = read_vco(clk, 0x137160 + (idx * 4));
+	if (idx < 7)
+		sclk = calc_div(clk, idx, sclk, freq, ddiv);
 	return sclk;
 }
 
 static u32
-calc_pll(struct gk104_clk_priv *priv, int clk, u32 freq, u32 *coef)
+calc_pll(struct gk104_clk *clk, int idx, u32 freq, u32 *coef)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll limits;
 	int N, M, P, ret;
 
-	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	ret = nvbios_pll_parse(bios, 0x137000 + (idx * 0x20), &limits);
 	if (ret)
 		return 0;
 
-	limits.refclk = read_div(priv, clk, 0x137120, 0x137140);
+	limits.refclk = read_div(clk, idx, 0x137120, 0x137140);
 	if (!limits.refclk)
 		return 0;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, freq, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, freq, &N, NULL, &M, &P);
 	if (ret <= 0)
 		return 0;
 
@@ -277,10 +284,10 @@
 }
 
 static int
-calc_clk(struct gk104_clk_priv *priv,
-	 struct nvkm_cstate *cstate, int clk, int dom)
+calc_clk(struct gk104_clk *clk,
+	 struct nvkm_cstate *cstate, int idx, int dom)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
 	u32 freq = cstate->domain[dom];
 	u32 src0, div0, div1D, div1P = 0;
 	u32 clk0, clk1 = 0;
@@ -290,16 +297,16 @@
 		return 0;
 
 	/* first possible path, using only dividers */
-	clk0 = calc_src(priv, clk, freq, &src0, &div0);
-	clk0 = calc_div(priv, clk, clk0, freq, &div1D);
+	clk0 = calc_src(clk, idx, freq, &src0, &div0);
+	clk0 = calc_div(clk, idx, clk0, freq, &div1D);
 
 	/* see if we can get any closer using PLLs */
-	if (clk0 != freq && (0x0000ff87 & (1 << clk))) {
-		if (clk <= 7)
-			clk1 = calc_pll(priv, clk, freq, &info->coef);
+	if (clk0 != freq && (0x0000ff87 & (1 << idx))) {
+		if (idx <= 7)
+			clk1 = calc_pll(clk, idx, freq, &info->coef);
 		else
 			clk1 = cstate->domain[nv_clk_src_hubk06];
-		clk1 = calc_div(priv, clk, clk1, freq, &div1P);
+		clk1 = calc_div(clk, idx, clk1, freq, &div1P);
 	}
 
 	/* select the method which gets closest to target freq */
@@ -320,7 +327,7 @@
 			info->mdiv |= 0x80000000;
 			info->mdiv |= div1P << 8;
 		}
-		info->ssel = (1 << clk);
+		info->ssel = (1 << idx);
 		info->dsrc = 0x40000100;
 		info->freq = clk1;
 	}
@@ -329,98 +336,115 @@
 }
 
 static int
-gk104_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk104_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x00, nv_clk_src_gpc)) ||
-	    (ret = calc_clk(priv, cstate, 0x01, nv_clk_src_rop)) ||
-	    (ret = calc_clk(priv, cstate, 0x02, nv_clk_src_hubk07)) ||
-	    (ret = calc_clk(priv, cstate, 0x07, nv_clk_src_hubk06)) ||
-	    (ret = calc_clk(priv, cstate, 0x08, nv_clk_src_hubk01)) ||
-	    (ret = calc_clk(priv, cstate, 0x0c, nv_clk_src_daemon)) ||
-	    (ret = calc_clk(priv, cstate, 0x0e, nv_clk_src_vdec)))
+	if ((ret = calc_clk(clk, cstate, 0x00, nv_clk_src_gpc)) ||
+	    (ret = calc_clk(clk, cstate, 0x01, nv_clk_src_rop)) ||
+	    (ret = calc_clk(clk, cstate, 0x02, nv_clk_src_hubk07)) ||
+	    (ret = calc_clk(clk, cstate, 0x07, nv_clk_src_hubk06)) ||
+	    (ret = calc_clk(clk, cstate, 0x08, nv_clk_src_hubk01)) ||
+	    (ret = calc_clk(clk, cstate, 0x0c, nv_clk_src_daemon)) ||
+	    (ret = calc_clk(clk, cstate, 0x0e, nv_clk_src_vdec)))
 		return ret;
 
 	return 0;
 }
 
 static void
-gk104_clk_prog_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_0(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (!info->ssel) {
-		nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
-		nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
+		nvkm_mask(device, 0x1371d0 + (idx * 0x04), 0x8000003f, info->ddiv);
+		nvkm_wr32(device, 0x137160 + (idx * 0x04), info->dsrc);
 	}
 }
 
 static void
-gk104_clk_prog_1_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_0(struct gk104_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137100, (1 << clk), 0x00000000);
-	nv_wait(priv, 0x137100, (1 << clk), 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137100, (1 << idx), 0x00000000);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x137100) & (1 << idx)))
+			break;
+	);
 }
 
 static void
-gk104_clk_prog_1_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_1_1(struct gk104_clk *clk, int idx)
 {
-	nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000000);
 }
 
 static void
-gk104_clk_prog_2(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_2(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
-	const u32 addr = 0x137000 + (clk * 0x20);
-	nv_mask(priv, addr + 0x00, 0x00000004, 0x00000000);
-	nv_mask(priv, addr + 0x00, 0x00000001, 0x00000000);
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 addr = 0x137000 + (idx * 0x20);
+	nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000000);
+	nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000000);
 	if (info->coef) {
-		nv_wr32(priv, addr + 0x04, info->coef);
-		nv_mask(priv, addr + 0x00, 0x00000001, 0x00000001);
-		nv_wait(priv, addr + 0x00, 0x00020000, 0x00020000);
-		nv_mask(priv, addr + 0x00, 0x00020004, 0x00000004);
+		nvkm_wr32(device, addr + 0x04, info->coef);
+		nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
+				break;
+		);
+		nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
 	}
 }
 
 static void
-gk104_clk_prog_3(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_3(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel)
-		nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x00003f00, info->mdiv);
 	else
-		nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
+		nvkm_mask(device, 0x137250 + (idx * 0x04), 0x0000003f, info->mdiv);
 }
 
 static void
-gk104_clk_prog_4_0(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_0(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137100, (1 << clk), info->ssel);
-		nv_wait(priv, 0x137100, (1 << clk), info->ssel);
+		nvkm_mask(device, 0x137100, (1 << idx), info->ssel);
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x137100) & (1 << idx);
+			if (tmp == info->ssel)
+				break;
+		);
 	}
 }
 
 static void
-gk104_clk_prog_4_1(struct gk104_clk_priv *priv, int clk)
+gk104_clk_prog_4_1(struct gk104_clk *clk, int idx)
 {
-	struct gk104_clk_info *info = &priv->eng[clk];
+	struct gk104_clk_info *info = &clk->eng[idx];
+	struct nvkm_device *device = clk->base.subdev.device;
 	if (info->ssel) {
-		nv_mask(priv, 0x137160 + (clk * 0x04), 0x40000000, 0x40000000);
-		nv_mask(priv, 0x137160 + (clk * 0x04), 0x00000100, 0x00000100);
+		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x40000000, 0x40000000);
+		nvkm_mask(device, 0x137160 + (idx * 0x04), 0x00000100, 0x00000100);
 	}
 }
 
 static int
-gk104_clk_prog(struct nvkm_clk *clk)
+gk104_clk_prog(struct nvkm_clk *base)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
+	struct gk104_clk *clk = gk104_clk(base);
 	struct {
 		u32 mask;
-		void (*exec)(struct gk104_clk_priv *, int);
+		void (*exec)(struct gk104_clk *, int);
 	} stage[] = {
 		{ 0x007f, gk104_clk_prog_0   }, /* div programming */
 		{ 0x007f, gk104_clk_prog_1_0 }, /* select div mode */
@@ -433,12 +457,12 @@
 	int i, j;
 
 	for (i = 0; i < ARRAY_SIZE(stage); i++) {
-		for (j = 0; j < ARRAY_SIZE(priv->eng); j++) {
+		for (j = 0; j < ARRAY_SIZE(clk->eng); j++) {
 			if (!(stage[i].mask & (1 << j)))
 				continue;
-			if (!priv->eng[j].freq)
+			if (!clk->eng[j].freq)
 				continue;
-			stage[i].exec(priv, j);
+			stage[i].exec(clk, j);
 		}
 	}
 
@@ -446,55 +470,41 @@
 }
 
 static void
-gk104_clk_tidy(struct nvkm_clk *clk)
+gk104_clk_tidy(struct nvkm_clk *base)
 {
-	struct gk104_clk_priv *priv = (void *)clk;
-	memset(priv->eng, 0x00, sizeof(priv->eng));
+	struct gk104_clk *clk = gk104_clk(base);
+	memset(clk->eng, 0x00, sizeof(clk->eng));
 }
 
-static struct nvkm_domain
-gk104_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
-	{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
-	{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
-	{ nv_clk_src_hubk01 , 0x05 },
-	{ nv_clk_src_vdec   , 0x06 },
-	{ nv_clk_src_daemon , 0x07 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gk104_clk = {
+	.read = gk104_clk_read,
+	.calc = gk104_clk_calc,
+	.prog = gk104_clk_prog,
+	.tidy = gk104_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_gpc    , 0x00, NVKM_CLK_DOM_FLAG_CORE, "core", 2000 },
+		{ nv_clk_src_hubk07 , 0x01, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_rop    , 0x02, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_mem    , 0x03, 0, "memory", 500 },
+		{ nv_clk_src_hubk06 , 0x04, NVKM_CLK_DOM_FLAG_CORE },
+		{ nv_clk_src_hubk01 , 0x05 },
+		{ nv_clk_src_vdec   , 0x06 },
+		{ nv_clk_src_daemon , 0x07 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gk104_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gk104_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gk104_clk_priv *priv;
-	int ret;
+	struct gk104_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gk104_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gk104_clk_read;
-	priv->base.calc = gk104_clk_calc;
-	priv->base.prog = gk104_clk_prog;
-	priv->base.tidy = gk104_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gk104_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-gk104_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 65c5327..254094a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -22,15 +22,12 @@
  * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
  *
  */
-#include <subdev/clk.h>
+#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
+#include "priv.h"
+
+#include <core/tegra.h>
 #include <subdev/timer.h>
 
-#include <core/device.h>
-
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
-
 #define MHZ (1000 * 1000)
 
 #define MASK(w)	((1 << w) - 1)
@@ -117,41 +114,42 @@
 	.min_pl = 1, .max_pl = 32,
 };
 
-struct gk20a_clk_priv {
+struct gk20a_clk {
 	struct nvkm_clk base;
 	const struct gk20a_clk_pllg_params *params;
 	u32 m, n, pl;
 	u32 parent_rate;
 };
-#define to_gk20a_clk(base) container_of(base, struct gk20a_clk_priv, base)
 
 static void
-gk20a_pllg_read_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_read_mnp(struct gk20a_clk *clk)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
-	val = nv_rd32(priv, GPCPLL_COEFF);
-	priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-	priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
-	priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
+	clk->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+	clk->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+	clk->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 }
 
 static u32
-gk20a_pllg_calc_rate(struct gk20a_clk_priv *priv)
+gk20a_pllg_calc_rate(struct gk20a_clk *clk)
 {
 	u32 rate;
 	u32 divider;
 
-	rate = priv->parent_rate * priv->n;
-	divider = priv->m * pl_to_div[priv->pl];
+	rate = clk->parent_rate * clk->n;
+	divider = clk->m * pl_to_div[clk->pl];
 	do_div(rate, divider);
 
 	return rate / 2;
 }
 
 static int
-gk20a_pllg_calc_mnp(struct gk20a_clk_priv *priv, unsigned long rate)
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	u32 target_clk_f, ref_clk_f, target_freq;
 	u32 min_vco_f, max_vco_f;
 	u32 low_pl, high_pl, best_pl;
@@ -163,13 +161,13 @@
 	u32 pl;
 
 	target_clk_f = rate * 2 / MHZ;
-	ref_clk_f = priv->parent_rate / MHZ;
+	ref_clk_f = clk->parent_rate / MHZ;
 
-	max_vco_f = priv->params->max_vco;
-	min_vco_f = priv->params->min_vco;
-	best_m = priv->params->max_m;
-	best_n = priv->params->min_n;
-	best_pl = priv->params->min_pl;
+	max_vco_f = clk->params->max_vco;
+	min_vco_f = clk->params->min_vco;
+	best_m = clk->params->max_m;
+	best_n = clk->params->min_n;
+	best_pl = clk->params->min_pl;
 
 	target_vco_f = target_clk_f + target_clk_f / 50;
 	if (max_vco_f < target_vco_f)
@@ -177,13 +175,13 @@
 
 	/* min_pl <= high_pl <= max_pl */
 	high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
-	high_pl = min(high_pl, priv->params->max_pl);
-	high_pl = max(high_pl, priv->params->min_pl);
+	high_pl = min(high_pl, clk->params->max_pl);
+	high_pl = max(high_pl, clk->params->min_pl);
 
 	/* min_pl <= low_pl <= max_pl */
 	low_pl = min_vco_f / target_vco_f;
-	low_pl = min(low_pl, priv->params->max_pl);
-	low_pl = max(low_pl, priv->params->min_pl);
+	low_pl = min(low_pl, clk->params->max_pl);
+	low_pl = max(low_pl, clk->params->min_pl);
 
 	/* Find Indices of high_pl and low_pl */
 	for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
@@ -199,30 +197,30 @@
 		}
 	}
 
-	nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
-		 pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+	nvkm_debug(subdev, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+		   pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
 
 	/* Select lowest possible VCO */
 	for (pl = low_pl; pl <= high_pl; pl++) {
 		target_vco_f = target_clk_f * pl_to_div[pl];
-		for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+		for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
 			u_f = ref_clk_f / m;
 
-			if (u_f < priv->params->min_u)
+			if (u_f < clk->params->min_u)
 				break;
-			if (u_f > priv->params->max_u)
+			if (u_f > clk->params->max_u)
 				continue;
 
 			n = (target_vco_f * m) / ref_clk_f;
 			n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
 
-			if (n > priv->params->max_n)
+			if (n > clk->params->max_n)
 				break;
 
 			for (; n <= n2; n++) {
-				if (n < priv->params->min_n)
+				if (n < clk->params->min_n)
 					continue;
-				if (n > priv->params->max_n)
+				if (n > clk->params->max_n)
 					break;
 
 				vco_f = ref_clk_f * n / m;
@@ -250,71 +248,75 @@
 	WARN_ON(best_delta == ~0);
 
 	if (best_delta != 0)
-		nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
-			 target_clk_f);
+		nvkm_debug(subdev,
+			   "no best match for target @ %dMHz on gpc_pll",
+			   target_clk_f);
 
-	priv->m = best_m;
-	priv->n = best_n;
-	priv->pl = best_pl;
+	clk->m = best_m;
+	clk->n = best_n;
+	clk->pl = best_pl;
 
-	target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+	target_freq = gk20a_pllg_calc_rate(clk) / MHZ;
 
-	nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
-		 target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+	nvkm_debug(subdev,
+		   "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+		   target_freq, clk->m, clk->n, clk->pl, pl_to_div[clk->pl]);
 	return 0;
 }
 
 static int
-gk20a_pllg_slide(struct gk20a_clk_priv *priv, u32 n)
+gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 val;
 	int ramp_timeout;
 
 	/* get old coefficients */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	/* do nothing if NDIV is the same */
 	if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
 		return 0;
 
 	/* setup */
-	nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+	nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
 		0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
-	nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+	nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
 		0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
 
 	/* pll slowdown mode */
-	nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
 
 	/* new ndiv ready for ramp */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
 	val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
 	udelay(1);
-	nv_wr32(priv, GPCPLL_COEFF, val);
+	nvkm_wr32(device, GPCPLL_COEFF, val);
 
 	/* dynamic ramp to new ndiv */
-	val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+	val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 	val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
 	udelay(1);
-	nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+	nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
 
 	for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
 		udelay(1);
-		val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+		val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
 		if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
 			break;
 	}
 
 	/* exit slowdown mode */
-	nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
-	nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 
 	if (ramp_timeout <= 0) {
-		nv_error(priv, "gpcpll dynamic ramp timeout\n");
+		nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
 		return -ETIMEDOUT;
 	}
 
@@ -322,149 +324,147 @@
 }
 
 static void
-_gk20a_pllg_enable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_enable(struct gk20a_clk *clk)
 {
-	nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
-	nv_rd32(priv, GPCPLL_CFG);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static void
-_gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+_gk20a_pllg_disable(struct gk20a_clk *clk)
 {
-	nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
-	nv_rd32(priv, GPCPLL_CFG);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+	nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static int
-_gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv, bool allow_slide)
+_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
 {
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 val, cfg;
 	u32 m_old, pl_old, n_lo;
 
 	/* get old coefficients */
-	val = nv_rd32(priv, GPCPLL_COEFF);
+	val = nvkm_rd32(device, GPCPLL_COEFF);
 	m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
 	pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 
 	/* do NDIV slide if there is no change in M and PL */
-	cfg = nv_rd32(priv, GPCPLL_CFG);
-	if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+	cfg = nvkm_rd32(device, GPCPLL_CFG);
+	if (allow_slide && clk->m == m_old && clk->pl == pl_old &&
 	    (cfg & GPCPLL_CFG_ENABLE)) {
-		return gk20a_pllg_slide(priv, priv->n);
+		return gk20a_pllg_slide(clk, clk->n);
 	}
 
 	/* slide down to NDIV_LO */
-	n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
-			    priv->parent_rate / MHZ);
+	n_lo = DIV_ROUND_UP(m_old * clk->params->min_vco,
+			    clk->parent_rate / MHZ);
 	if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
-		int ret = gk20a_pllg_slide(priv, n_lo);
+		int ret = gk20a_pllg_slide(clk, n_lo);
 
 		if (ret)
 			return ret;
 	}
 
 	/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
-	nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
 		0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
 
 	/* put PLL in bypass before programming it */
-	val = nv_rd32(priv, SEL_VCO);
+	val = nvkm_rd32(device, SEL_VCO);
 	val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 	udelay(2);
-	nv_wr32(priv, SEL_VCO, val);
+	nvkm_wr32(device, SEL_VCO, val);
 
 	/* get out from IDDQ */
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_IDDQ) {
 		val &= ~GPCPLL_CFG_IDDQ;
-		nv_wr32(priv, GPCPLL_CFG, val);
-		nv_rd32(priv, GPCPLL_CFG);
+		nvkm_wr32(device, GPCPLL_CFG, val);
+		nvkm_rd32(device, GPCPLL_CFG);
 		udelay(2);
 	}
 
-	_gk20a_pllg_disable(priv);
+	_gk20a_pllg_disable(clk);
 
-	nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
-		 priv->pl);
+	nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
+		   clk->m, clk->n, clk->pl);
 
-	n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
-			    priv->parent_rate / MHZ);
-	val = priv->m << GPCPLL_COEFF_M_SHIFT;
-	val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
-	val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
-	nv_wr32(priv, GPCPLL_COEFF, val);
+	n_lo = DIV_ROUND_UP(clk->m * clk->params->min_vco,
+			    clk->parent_rate / MHZ);
+	val = clk->m << GPCPLL_COEFF_M_SHIFT;
+	val |= (allow_slide ? n_lo : clk->n) << GPCPLL_COEFF_N_SHIFT;
+	val |= clk->pl << GPCPLL_COEFF_P_SHIFT;
+	nvkm_wr32(device, GPCPLL_COEFF, val);
 
-	_gk20a_pllg_enable(priv);
+	_gk20a_pllg_enable(clk);
 
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_LOCK_DET_OFF) {
 		val &= ~GPCPLL_CFG_LOCK_DET_OFF;
-		nv_wr32(priv, GPCPLL_CFG, val);
+		nvkm_wr32(device, GPCPLL_CFG, val);
 	}
 
-	if (!nvkm_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
-				GPCPLL_CFG_LOCK)) {
-		nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+	if (nvkm_usec(device, 300,
+		if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
+			break;
+	) < 0)
 		return -ETIMEDOUT;
-	}
 
 	/* switch to VCO mode */
-	nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+	nvkm_mask(device, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
 
 	/* restore out divider 1:1 */
-	val = nv_rd32(priv, GPC2CLK_OUT);
+	val = nvkm_rd32(device, GPC2CLK_OUT);
 	val &= ~GPC2CLK_OUT_VCODIV_MASK;
 	udelay(2);
-	nv_wr32(priv, GPC2CLK_OUT, val);
+	nvkm_wr32(device, GPC2CLK_OUT, val);
 
 	/* slide up to new NDIV */
-	return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+	return allow_slide ? gk20a_pllg_slide(clk, clk->n) : 0;
 }
 
 static int
-gk20a_pllg_program_mnp(struct gk20a_clk_priv *priv)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk)
 {
 	int err;
 
-	err = _gk20a_pllg_program_mnp(priv, true);
+	err = _gk20a_pllg_program_mnp(clk, true);
 	if (err)
-		err = _gk20a_pllg_program_mnp(priv, false);
+		err = _gk20a_pllg_program_mnp(clk, false);
 
 	return err;
 }
 
 static void
-gk20a_pllg_disable(struct gk20a_clk_priv *priv)
+gk20a_pllg_disable(struct gk20a_clk *clk)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 val;
 
 	/* slide to VCO min */
-	val = nv_rd32(priv, GPCPLL_CFG);
+	val = nvkm_rd32(device, GPCPLL_CFG);
 	if (val & GPCPLL_CFG_ENABLE) {
 		u32 coeff, m, n_lo;
 
-		coeff = nv_rd32(priv, GPCPLL_COEFF);
+		coeff = nvkm_rd32(device, GPCPLL_COEFF);
 		m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
-		n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
-				    priv->parent_rate / MHZ);
-		gk20a_pllg_slide(priv, n_lo);
+		n_lo = DIV_ROUND_UP(m * clk->params->min_vco,
+				    clk->parent_rate / MHZ);
+		gk20a_pllg_slide(clk, n_lo);
 	}
 
 	/* put PLL in bypass before disabling it */
-	nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
 
-	_gk20a_pllg_disable(priv);
+	_gk20a_pllg_disable(clk);
 }
 
 #define GK20A_CLK_GPC_MDIV 1000
 
-static struct nvkm_domain
-gk20a_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
-	{ nv_clk_src_max }
-};
-
 static struct nvkm_pstate
 gk20a_pstates[] = {
 	{
@@ -560,87 +560,99 @@
 };
 
 static int
-gk20a_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(clk)->crystal;
+		return device->crystal;
 	case nv_clk_src_gpc:
-		gk20a_pllg_read_mnp(priv);
-		return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+		gk20a_pllg_read_mnp(clk);
+		return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 }
 
 static int
-gk20a_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
 
-	return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+	return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
 					 GK20A_CLK_GPC_MDIV);
 }
 
 static int
-gk20a_clk_prog(struct nvkm_clk *clk)
+gk20a_clk_prog(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)clk;
+	struct gk20a_clk *clk = gk20a_clk(base);
 
-	return gk20a_pllg_program_mnp(priv);
+	return gk20a_pllg_program_mnp(clk);
 }
 
 static void
-gk20a_clk_tidy(struct nvkm_clk *clk)
+gk20a_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static int
-gk20a_clk_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_clk_fini(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_clk_fini(&priv->base, false);
-
-	gk20a_pllg_disable(priv);
-
-	return ret;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	gk20a_pllg_disable(clk);
 }
 
 static int
-gk20a_clk_init(struct nvkm_object *object)
+gk20a_clk_init(struct nvkm_clk *base)
 {
-	struct gk20a_clk_priv *priv = (void *)object;
+	struct gk20a_clk *clk = gk20a_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	int ret;
 
-	nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
 
-	ret = nvkm_clk_init(&priv->base);
-	if (ret)
-		return ret;
-
-	ret = gk20a_clk_prog(&priv->base);
+	ret = gk20a_clk_prog(&clk->base);
 	if (ret) {
-		nv_error(priv, "cannot initialize clock\n");
+		nvkm_error(subdev, "cannot initialize clock\n");
 		return ret;
 	}
 
 	return 0;
 }
 
-static int
-gk20a_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_clk_func
+gk20a_clk = {
+	.init = gk20a_clk_init,
+	.fini = gk20a_clk_fini,
+	.read = gk20a_clk_read,
+	.calc = gk20a_clk_calc,
+	.prog = gk20a_clk_prog,
+	.tidy = gk20a_clk_tidy,
+	.pstates = gk20a_pstates,
+	.nr_pstates = ARRAY_SIZE(gk20a_pstates),
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+		{ nv_clk_src_max }
+	}
+};
+
+int
+gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gk20a_clk_priv *priv;
-	struct nouveau_platform_device *plat;
-	int ret;
-	int i;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_clk *clk;
+	int ret, i;
+
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
 	/* Finish initializing the pstates */
 	for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
@@ -648,33 +660,11 @@
 		gk20a_pstates[i].pstate = i + 1;
 	}
 
-	ret = nvkm_clk_create(parent, engine, oclass, gk20a_domains,
-			      gk20a_pstates, ARRAY_SIZE(gk20a_pstates),
-			      true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	clk->params = &gk20a_pllg_params;
+	clk->parent_rate = clk_get_rate(tdev->clk);
 
-	priv->params = &gk20a_pllg_params;
-
-	plat = nv_device_to_platform(nv_device(parent));
-	priv->parent_rate = clk_get_rate(plat->gpu->clk);
-	nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
-
-	priv->base.read = gk20a_clk_read;
-	priv->base.calc = gk20a_clk_calc;
-	priv->base.prog = gk20a_clk_prog;
-	priv->base.tidy = gk20a_clk_tidy;
-	return 0;
+	ret = nvkm_clk_ctor(&gk20a_clk, device, index, true, &clk->base);
+	nvkm_info(&clk->base.subdev, "parent clock rate: %d Mhz\n",
+		  clk->parent_rate / MHZ);
+	return ret;
 }
-
-struct nvkm_oclass
-gk20a_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_clk_ctor,
-		.dtor = _nvkm_subdev_dtor,
-		.init = gk20a_clk_init,
-		.fini = gk20a_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 065e9f5..07feae6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -22,56 +22,58 @@
  * Authors: Ben Skeggs
  *          Roy Spliet
  */
+#define gt215_clk(p) container_of((p), struct gt215_clk, base)
 #include "gt215.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <engine/fifo.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
 
-struct gt215_clk_priv {
+struct gt215_clk {
 	struct nvkm_clk base;
 	struct gt215_clk_info eng[nv_clk_src_max];
 };
 
-static u32 read_clk(struct gt215_clk_priv *, int, bool);
-static u32 read_pll(struct gt215_clk_priv *, int, u32);
+static u32 read_clk(struct gt215_clk *, int, bool);
+static u32 read_pll(struct gt215_clk *, int, u32);
 
 static u32
-read_vco(struct gt215_clk_priv *priv, int clk)
+read_vco(struct gt215_clk *clk, int idx)
 {
-	u32 sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 
 	switch (sctl & 0x00000030) {
 	case 0x00000000:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case 0x00000020:
-		return read_pll(priv, 0x41, 0x00e820);
+		return read_pll(clk, 0x41, 0x00e820);
 	case 0x00000030:
-		return read_pll(priv, 0x42, 0x00e8a0);
+		return read_pll(clk, 0x42, 0x00e8a0);
 	default:
 		return 0;
 	}
 }
 
 static u32
-read_clk(struct gt215_clk_priv *priv, int clk, bool ignore_en)
+read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
 {
+	struct nvkm_device *device = clk->base.subdev.device;
 	u32 sctl, sdiv, sclk;
 
 	/* refclk for the 0xe8xx plls is a fixed frequency */
-	if (clk >= 0x40) {
-		if (nv_device(priv)->chipset == 0xaf) {
+	if (idx >= 0x40) {
+		if (device->chipset == 0xaf) {
 			/* no joke.. seriously.. sigh.. */
-			return nv_rd32(priv, 0x00471c) * 1000;
+			return nvkm_rd32(device, 0x00471c) * 1000;
 		}
 
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	}
 
-	sctl = nv_rd32(priv, 0x4120 + (clk * 4));
+	sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
 	if (!ignore_en && !(sctl & 0x00000100))
 		return 0;
 
@@ -83,7 +85,7 @@
 	switch (sctl & 0x00003000) {
 	case 0x00000000:
 		if (!(sctl & 0x00000200))
-			return nv_device(priv)->crystal;
+			return device->crystal;
 		return 0;
 	case 0x00002000:
 		if (sctl & 0x00000040)
@@ -94,7 +96,7 @@
 		if (!(sctl & 0x00000001))
 			return 0;
 
-		sclk = read_vco(priv, clk);
+		sclk = read_vco(clk, idx);
 		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
 		return (sclk * 2) / sdiv;
 	default:
@@ -103,14 +105,15 @@
 }
 
 static u32
-read_pll(struct gt215_clk_priv *priv, int clk, u32 pll)
+read_pll(struct gt215_clk *clk, int idx, u32 pll)
 {
-	u32 ctrl = nv_rd32(priv, pll + 0);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, pll + 0);
 	u32 sclk = 0, P = 1, N = 1, M = 1;
 
 	if (!(ctrl & 0x00000008)) {
 		if (ctrl & 0x00000001) {
-			u32 coef = nv_rd32(priv, pll + 4);
+			u32 coef = nvkm_rd32(device, pll + 4);
 			M = (coef & 0x000000ff) >> 0;
 			N = (coef & 0x0000ff00) >> 8;
 			P = (coef & 0x003f0000) >> 16;
@@ -121,10 +124,10 @@
 			if ((pll & 0x00ff00) == 0x00e800)
 				P = 1;
 
-			sclk = read_clk(priv, 0x00 + clk, false);
+			sclk = read_clk(clk, 0x00 + idx, false);
 		}
 	} else {
-		sclk = read_clk(priv, 0x10 + clk, false);
+		sclk = read_clk(clk, 0x10 + idx, false);
 	}
 
 	if (M * P)
@@ -134,41 +137,43 @@
 }
 
 static int
-gt215_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
+	struct gt215_clk *clk = gt215_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 hsrc;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_core:
 	case nv_clk_src_core_intm:
-		return read_pll(priv, 0x00, 0x4200);
+		return read_pll(clk, 0x00, 0x4200);
 	case nv_clk_src_shader:
-		return read_pll(priv, 0x01, 0x4220);
+		return read_pll(clk, 0x01, 0x4220);
 	case nv_clk_src_mem:
-		return read_pll(priv, 0x02, 0x4000);
+		return read_pll(clk, 0x02, 0x4000);
 	case nv_clk_src_disp:
-		return read_clk(priv, 0x20, false);
+		return read_clk(clk, 0x20, false);
 	case nv_clk_src_vdec:
-		return read_clk(priv, 0x21, false);
+		return read_clk(clk, 0x21, false);
 	case nv_clk_src_daemon:
-		return read_clk(priv, 0x25, false);
+		return read_clk(clk, 0x25, false);
 	case nv_clk_src_host:
-		hsrc = (nv_rd32(priv, 0xc040) & 0x30000000) >> 28;
+		hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
 		switch (hsrc) {
 		case 0:
-			return read_clk(priv, 0x1d, false);
+			return read_clk(clk, 0x1d, false);
 		case 2:
 		case 3:
 			return 277000;
 		default:
-			nv_error(clk, "unknown HOST clock source %d\n", hsrc);
+			nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
 			return -EINVAL;
 		}
 	default:
-		nv_error(clk, "invalid clock source %d\n", src);
+		nvkm_error(subdev, "invalid clock source %d\n", src);
 		return -EINVAL;
 	}
 
@@ -176,10 +181,10 @@
 }
 
 int
-gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
+gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
 	       struct gt215_clk_info *info)
 {
-	struct gt215_clk_priv *priv = (void *)clock;
+	struct gt215_clk *clk = gt215_clk(base);
 	u32 oclk, sclk, sdiv;
 	s32 diff;
 
@@ -196,7 +201,7 @@
 		info->clk = 0x00002140;
 		return khz;
 	default:
-		sclk = read_vco(priv, clk);
+		sclk = read_vco(clk, idx);
 		sdiv = min((sclk * 2) / khz, (u32)65);
 		oclk = (sclk * 2) / sdiv;
 		diff = ((khz + 3000) - oclk);
@@ -224,11 +229,11 @@
 }
 
 int
-gt215_pll_info(struct nvkm_clk *clock, int clk, u32 pll, u32 khz,
+gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
 	       struct gt215_clk_info *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(clock);
-	struct gt215_clk_priv *priv = (void *)clock;
+	struct gt215_clk *clk = gt215_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll limits;
 	int P, N, M, diff;
 	int ret;
@@ -237,22 +242,22 @@
 
 	/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
 	 * PLL and use the divider instead. */
-	ret = gt215_clk_info(clock, clk, khz, info);
+	ret = gt215_clk_info(&clk->base, idx, khz, info);
 	diff = khz - ret;
 	if (!pll || (diff >= -2000 && diff < 3000)) {
 		goto out;
 	}
 
 	/* Try with PLL */
-	ret = nvbios_pll_parse(bios, pll, &limits);
+	ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
 	if (ret)
 		return ret;
 
-	ret = gt215_clk_info(clock, clk - 0x10, limits.refclk, info);
+	ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
 	if (ret != limits.refclk)
 		return -EINVAL;
 
-	ret = gt215_pll_calc(nv_subdev(priv), &limits, khz, &N, NULL, &M, &P);
+	ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
 	if (ret >= 0) {
 		info->pll = (P << 16) | (N << 8) | M;
 	}
@@ -263,22 +268,22 @@
 }
 
 static int
-calc_clk(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate,
-	 int clk, u32 pll, int idx)
+calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
+	 int idx, u32 pll, int dom)
 {
-	int ret = gt215_pll_info(&priv->base, clk, pll, cstate->domain[idx],
-				 &priv->eng[idx]);
+	int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
+				 &clk->eng[dom]);
 	if (ret >= 0)
 		return 0;
 	return ret;
 }
 
 static int
-calc_host(struct gt215_clk_priv *priv, struct nvkm_cstate *cstate)
+calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
 {
 	int ret = 0;
 	u32 kHz = cstate->domain[nv_clk_src_host];
-	struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
+	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
 
 	if (kHz == 277000) {
 		info->clk = 0;
@@ -288,7 +293,7 @@
 
 	info->host_out = NVA3_HOST_CLK;
 
-	ret = gt215_clk_info(&priv->base, 0x1d, kHz, info);
+	ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
 	if (ret >= 0)
 		return 0;
 
@@ -298,21 +303,33 @@
 int
 gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 
 	/* halt and idle execution engines */
-	nv_mask(clk, 0x020060, 0x00070000, 0x00000000);
-	nv_mask(clk, 0x002504, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
+	nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
 	/* Wait until the interrupt handler is finished */
-	if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (!nvkm_rd32(device, 0x000100))
+			break;
+	) < 0)
 		return -EBUSY;
 
-	if (pfifo)
-		pfifo->pause(pfifo, flags);
+	if (fifo)
+		nvkm_fifo_pause(fifo, flags);
 
-	if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x002504) & 0x00000010)
+			break;
+	) < 0)
 		return -EIO;
-	if (!nv_wait(clk, 0x00251c, 0x0000003f, 0x0000003f))
+
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x002504) & 0x0000003f;
+		if (tmp == 0x0000003f)
+			break;
+	) < 0)
 		return -EIO;
 
 	return 0;
@@ -321,86 +338,94 @@
 void
 gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
 {
-	struct nvkm_fifo *pfifo = nvkm_fifo(clk);
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_fifo *fifo = device->fifo;
 
-	if (pfifo && flags)
-		pfifo->start(pfifo, flags);
+	if (fifo && flags)
+		nvkm_fifo_start(fifo, flags);
 
-	nv_mask(clk, 0x002504, 0x00000001, 0x00000000);
-	nv_mask(clk, 0x020060, 0x00070000, 0x00040000);
+	nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
 }
 
 static void
-disable_clk_src(struct gt215_clk_priv *priv, u32 src)
+disable_clk_src(struct gt215_clk *clk, u32 src)
 {
-	nv_mask(priv, src, 0x00000100, 0x00000000);
-	nv_mask(priv, src, 0x00000001, 0x00000000);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, src, 0x00000100, 0x00000000);
+	nvkm_mask(device, src, 0x00000001, 0x00000000);
 }
 
 static void
-prog_pll(struct gt215_clk_priv *priv, int clk, u32 pll, int idx)
+prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	const u32 src0 = 0x004120 + (clk * 4);
-	const u32 src1 = 0x004160 + (clk * 4);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	const u32 src0 = 0x004120 + (idx * 4);
+	const u32 src1 = 0x004160 + (idx * 4);
 	const u32 ctrl = pll + 0;
 	const u32 coef = pll + 4;
 	u32 bypass;
 
 	if (info->pll) {
 		/* Always start from a non-PLL clock */
-		bypass = nv_rd32(priv, ctrl)  & 0x00000008;
+		bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
 		if (!bypass) {
-			nv_mask(priv, src1, 0x00000101, 0x00000101);
-			nv_mask(priv, ctrl, 0x00000008, 0x00000008);
+			nvkm_mask(device, src1, 0x00000101, 0x00000101);
+			nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
 			udelay(20);
 		}
 
-		nv_mask(priv, src0, 0x003f3141, 0x00000101 | info->clk);
-		nv_wr32(priv, coef, info->pll);
-		nv_mask(priv, ctrl, 0x00000015, 0x00000015);
-		nv_mask(priv, ctrl, 0x00000010, 0x00000000);
-		if (!nv_wait(priv, ctrl, 0x00020000, 0x00020000)) {
-			nv_mask(priv, ctrl, 0x00000010, 0x00000010);
-			nv_mask(priv, src0, 0x00000101, 0x00000000);
+		nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
+		nvkm_wr32(device, coef, info->pll);
+		nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
+		nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
+		if (nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, ctrl) & 0x00020000)
+				break;
+		) < 0) {
+			nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+			nvkm_mask(device, src0, 0x00000101, 0x00000000);
 			return;
 		}
-		nv_mask(priv, ctrl, 0x00000010, 0x00000010);
-		nv_mask(priv, ctrl, 0x00000008, 0x00000000);
-		disable_clk_src(priv, src1);
+		nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
+		nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
+		disable_clk_src(clk, src1);
 	} else {
-		nv_mask(priv, src1, 0x003f3141, 0x00000101 | info->clk);
-		nv_mask(priv, ctrl, 0x00000018, 0x00000018);
+		nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
+		nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
 		udelay(20);
-		nv_mask(priv, ctrl, 0x00000001, 0x00000000);
-		disable_clk_src(priv, src0);
+		nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
+		disable_clk_src(clk, src0);
 	}
 }
 
 static void
-prog_clk(struct gt215_clk_priv *priv, int clk, int idx)
+prog_clk(struct gt215_clk *clk, int idx, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	nv_mask(priv, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | info->clk);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
 }
 
 static void
-prog_host(struct gt215_clk_priv *priv)
+prog_host(struct gt215_clk *clk)
 {
-	struct gt215_clk_info *info = &priv->eng[nv_clk_src_host];
-	u32 hsrc = (nv_rd32(priv, 0xc040));
+	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 hsrc = (nvkm_rd32(device, 0xc040));
 
 	switch (info->host_out) {
 	case NVA3_HOST_277:
 		if ((hsrc & 0x30000000) == 0) {
-			nv_wr32(priv, 0xc040, hsrc | 0x20000000);
-			disable_clk_src(priv, 0x4194);
+			nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
+			disable_clk_src(clk, 0x4194);
 		}
 		break;
 	case NVA3_HOST_CLK:
-		prog_clk(priv, 0x1d, nv_clk_src_host);
+		prog_clk(clk, 0x1d, nv_clk_src_host);
 		if ((hsrc & 0x30000000) >= 0x20000000) {
-			nv_wr32(priv, 0xc040, hsrc & ~0x30000000);
+			nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
 		}
 		break;
 	default:
@@ -408,44 +433,45 @@
 	}
 
 	/* This seems to be a clock gating factor on idle, always set to 64 */
-	nv_wr32(priv, 0xc044, 0x3e);
+	nvkm_wr32(device, 0xc044, 0x3e);
 }
 
 static void
-prog_core(struct gt215_clk_priv *priv, int idx)
+prog_core(struct gt215_clk *clk, int dom)
 {
-	struct gt215_clk_info *info = &priv->eng[idx];
-	u32 fb_delay = nv_rd32(priv, 0x10002c);
+	struct gt215_clk_info *info = &clk->eng[dom];
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 fb_delay = nvkm_rd32(device, 0x10002c);
 
 	if (fb_delay < info->fb_delay)
-		nv_wr32(priv, 0x10002c, info->fb_delay);
+		nvkm_wr32(device, 0x10002c, info->fb_delay);
 
-	prog_pll(priv, 0x00, 0x004200, idx);
+	prog_pll(clk, 0x00, 0x004200, dom);
 
 	if (fb_delay > info->fb_delay)
-		nv_wr32(priv, 0x10002c, info->fb_delay);
+		nvkm_wr32(device, 0x10002c, info->fb_delay);
 }
 
 static int
-gt215_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
-	struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+	struct gt215_clk *clk = gt215_clk(base);
+	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
 	int ret;
 
-	if ((ret = calc_clk(priv, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
-	    (ret = calc_clk(priv, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
-	    (ret = calc_clk(priv, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
-	    (ret = calc_clk(priv, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
-	    (ret = calc_host(priv, cstate)))
+	if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
+	    (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
+	    (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
+	    (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
+	    (ret = calc_host(clk, cstate)))
 		return ret;
 
 	/* XXX: Should be reading the highest bit in the VBIOS clock to decide
 	 * whether to use a PLL or not... but using a PLL defeats the purpose */
 	if (core->pll) {
-		ret = gt215_clk_info(clk, 0x10,
+		ret = gt215_clk_info(&clk->base, 0x10,
 				     cstate->domain[nv_clk_src_core_intm],
-				     &priv->eng[nv_clk_src_core_intm]);
+				     &clk->eng[nv_clk_src_core_intm]);
 		if (ret < 0)
 			return ret;
 	}
@@ -454,81 +480,67 @@
 }
 
 static int
-gt215_clk_prog(struct nvkm_clk *clk)
+gt215_clk_prog(struct nvkm_clk *base)
 {
-	struct gt215_clk_priv *priv = (void *)clk;
-	struct gt215_clk_info *core = &priv->eng[nv_clk_src_core];
+	struct gt215_clk *clk = gt215_clk(base);
+	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
 	int ret = 0;
 	unsigned long flags;
 	unsigned long *f = &flags;
 
-	ret = gt215_clk_pre(clk, f);
+	ret = gt215_clk_pre(&clk->base, f);
 	if (ret)
 		goto out;
 
 	if (core->pll)
-		prog_core(priv, nv_clk_src_core_intm);
+		prog_core(clk, nv_clk_src_core_intm);
 
-	prog_core(priv,  nv_clk_src_core);
-	prog_pll(priv, 0x01, 0x004220, nv_clk_src_shader);
-	prog_clk(priv, 0x20, nv_clk_src_disp);
-	prog_clk(priv, 0x21, nv_clk_src_vdec);
-	prog_host(priv);
+	prog_core(clk,  nv_clk_src_core);
+	prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
+	prog_clk(clk, 0x20, nv_clk_src_disp);
+	prog_clk(clk, 0x21, nv_clk_src_vdec);
+	prog_host(clk);
 
 out:
 	if (ret == -EBUSY)
 		f = NULL;
 
-	gt215_clk_post(clk, f);
+	gt215_clk_post(&clk->base, f);
 	return ret;
 }
 
 static void
-gt215_clk_tidy(struct nvkm_clk *clk)
+gt215_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static struct nvkm_domain
-gt215_domain[] = {
-	{ nv_clk_src_crystal  , 0xff },
-	{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
-	{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
-	{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
-	{ nv_clk_src_vdec     , 0x03 },
-	{ nv_clk_src_disp     , 0x04 },
-	{ nv_clk_src_host     , 0x05 },
-	{ nv_clk_src_core_intm, 0x06 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+gt215_clk = {
+	.read = gt215_clk_read,
+	.calc = gt215_clk_calc,
+	.prog = gt215_clk_prog,
+	.tidy = gt215_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal  , 0xff },
+		{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
+		{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
+		{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
+		{ nv_clk_src_vdec     , 0x03 },
+		{ nv_clk_src_disp     , 0x04 },
+		{ nv_clk_src_host     , 0x05 },
+		{ nv_clk_src_core_intm, 0x06 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-gt215_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct gt215_clk_priv *priv;
-	int ret;
+	struct gt215_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, gt215_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = gt215_clk_read;
-	priv->base.calc = gt215_clk_calc;
-	priv->base.prog = gt215_clk_prog;
-	priv->base.tidy = gt215_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass
-gt215_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index b447d9c..8865b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,6 +1,6 @@
 #ifndef __NVKM_CLK_NVA3_H__
 #define __NVKM_CLK_NVA3_H__
-#include <subdev/clk.h>
+#include "priv.h"
 
 struct gt215_clk_info {
 	u32 clk;
@@ -13,6 +13,6 @@
 };
 
 int  gt215_pll_info(struct nvkm_clk *, int, u32, u32, struct gt215_clk_info *);
-int  gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags);
-void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags);
+int  gt215_clk_pre(struct nvkm_clk *, unsigned long *flags);
+void gt215_clk_post(struct nvkm_clk *, unsigned long *flags);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index c54417b..1c21b8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -21,15 +21,15 @@
  *
  * Authors: Ben Skeggs
  */
+#define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
 #include "gt215.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/timer.h>
 
-struct mcp77_clk_priv {
+struct mcp77_clk {
 	struct nvkm_clk base;
 	enum nv_clk_src csrc, ssrc, vsrc;
 	u32 cctrl, sctrl;
@@ -39,27 +39,29 @@
 };
 
 static u32
-read_div(struct nvkm_clk *clk)
+read_div(struct mcp77_clk *clk)
 {
-	return nv_rd32(clk, 0x004600);
+	struct nvkm_device *device = clk->base.subdev.device;
+	return nvkm_rd32(device, 0x004600);
 }
 
 static u32
-read_pll(struct nvkm_clk *clk, u32 base)
+read_pll(struct mcp77_clk *clk, u32 base)
 {
-	u32 ctrl = nv_rd32(clk, base + 0);
-	u32 coef = nv_rd32(clk, base + 4);
-	u32 ref = clk->read(clk, nv_clk_src_href);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, base + 0);
+	u32 coef = nvkm_rd32(device, base + 4);
+	u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
 	u32 post_div = 0;
 	u32 clock = 0;
 	int N1, M1;
 
 	switch (base){
 	case 0x4020:
-		post_div = 1 << ((nv_rd32(clk, 0x4070) & 0x000f0000) >> 16);
+		post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
 		break;
 	case 0x4028:
-		post_div = (nv_rd32(clk, 0x4040) & 0x000f0000) >> 16;
+		post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
 		break;
 	default:
 		break;
@@ -76,59 +78,61 @@
 }
 
 static int
-mcp77_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(clk, 0x00c054);
+	struct mcp77_clk *clk = mcp77_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c054);
 	u32 P = 0;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case nv_clk_src_hclkm4:
-		return clk->read(clk, nv_clk_src_href) * 4;
+		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
 	case nv_clk_src_hclkm2d3:
-		return clk->read(clk, nv_clk_src_href) * 2 / 3;
+		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
 	case nv_clk_src_host:
 		switch (mast & 0x000c0000) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_hclkm2d3);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
 		case 0x00040000: break;
-		case 0x00080000: return clk->read(clk, nv_clk_src_hclkm4);
-		case 0x000c0000: return clk->read(clk, nv_clk_src_cclk);
+		case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+		case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
 		}
 		break;
 	case nv_clk_src_core:
-		P = (nv_rd32(clk, 0x004028) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 
 		switch (mast & 0x00000003) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000001: return 0;
-		case 0x00000002: return clk->read(clk, nv_clk_src_hclkm4) >> P;
+		case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
 		}
 		break;
 	case nv_clk_src_cclk:
 		if ((mast & 0x03000000) != 0x03000000)
-			return clk->read(clk, nv_clk_src_core);
+			return nvkm_clk_read(&clk->base, nv_clk_src_core);
 
 		if ((mast & 0x00000200) == 0x00000000)
-			return clk->read(clk, nv_clk_src_core);
+			return nvkm_clk_read(&clk->base, nv_clk_src_core);
 
 		switch (mast & 0x00000c00) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_href);
-		case 0x00000400: return clk->read(clk, nv_clk_src_hclkm4);
-		case 0x00000800: return clk->read(clk, nv_clk_src_hclkm2d3);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
+		case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
+		case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
 		default: return 0;
 		}
 	case nv_clk_src_shader:
-		P = (nv_rd32(clk, 0x004020) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000040)
-				return clk->read(clk, nv_clk_src_href) >> P;
-			return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000010: break;
 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
@@ -142,7 +146,7 @@
 
 		switch (mast & 0x00400000) {
 		case 0x00400000:
-			return clk->read(clk, nv_clk_src_core) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			break;
 		default:
 			return 500000 >> P;
@@ -153,29 +157,28 @@
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return 0;
 }
 
 static u32
-calc_pll(struct mcp77_clk_priv *priv, u32 reg,
+calc_pll(struct mcp77_clk *clk, u32 reg,
 	 u32 clock, int *N, int *M, int *P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
-	struct nvkm_clk *clk = &priv->base;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return 0;
 
 	pll.vco2.max_freq = 0;
-	pll.refclk = clk->read(clk, nv_clk_src_href);
+	pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
 	if (!pll.refclk)
 		return 0;
 
-	return nv04_pll_calc(nv_subdev(priv), &pll, clock, N, M, NULL, NULL, P);
+	return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
 }
 
 static inline u32
@@ -197,26 +200,27 @@
 }
 
 static int
-mcp77_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
+	struct mcp77_clk *clk = mcp77_clk(base);
 	const int shader = cstate->domain[nv_clk_src_shader];
 	const int core = cstate->domain[nv_clk_src_core];
 	const int vdec = cstate->domain[nv_clk_src_vdec];
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	u32 out = 0, clock = 0;
 	int N, M, P1, P2 = 0;
 	int divs = 0;
 
 	/* cclk: find suitable source, disable PLL if we can */
-	if (core < clk->read(clk, nv_clk_src_hclkm4))
-		out = calc_P(clk->read(clk, nv_clk_src_hclkm4), core, &divs);
+	if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
+		out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
 
 	/* Calculate clock * 2, so shader clock can use it too */
-	clock = calc_pll(priv, 0x4028, (core << 1), &N, &M, &P1);
+	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
 
 	if (abs(core - out) <= abs(core - (clock >> 1))) {
-		priv->csrc = nv_clk_src_hclkm4;
-		priv->cctrl = divs << 16;
+		clk->csrc = nv_clk_src_hclkm4;
+		clk->cctrl = divs << 16;
 	} else {
 		/* NVCTRL is actually used _after_ NVPOST, and after what we
 		 * call NVPLL. To make matters worse, NVPOST is an integer
@@ -226,31 +230,31 @@
 			P1 = 2;
 		}
 
-		priv->csrc = nv_clk_src_core;
-		priv->ccoef = (N << 8) | M;
+		clk->csrc = nv_clk_src_core;
+		clk->ccoef = (N << 8) | M;
 
-		priv->cctrl = (P2 + 1) << 16;
-		priv->cpost = (1 << P1) << 16;
+		clk->cctrl = (P2 + 1) << 16;
+		clk->cpost = (1 << P1) << 16;
 	}
 
 	/* sclk: nvpll + divisor, href or spll */
 	out = 0;
-	if (shader == clk->read(clk, nv_clk_src_href)) {
-		priv->ssrc = nv_clk_src_href;
+	if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
+		clk->ssrc = nv_clk_src_href;
 	} else {
-		clock = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
-		if (priv->csrc == nv_clk_src_core)
+		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
+		if (clk->csrc == nv_clk_src_core)
 			out = calc_P((core << 1), shader, &divs);
 
 		if (abs(shader - out) <=
 		    abs(shader - clock) &&
 		   (divs + P2) <= 7) {
-			priv->ssrc = nv_clk_src_core;
-			priv->sctrl = (divs + P2) << 16;
+			clk->ssrc = nv_clk_src_core;
+			clk->sctrl = (divs + P2) << 16;
 		} else {
-			priv->ssrc = nv_clk_src_shader;
-			priv->scoef = (N << 8) | M;
-			priv->sctrl = P1 << 16;
+			clk->ssrc = nv_clk_src_shader;
+			clk->scoef = (N << 8) | M;
+			clk->sctrl = P1 << 16;
 		}
 	}
 
@@ -258,172 +262,162 @@
 	out = calc_P(core, vdec, &divs);
 	clock = calc_P(500000, vdec, &P1);
 	if(abs(vdec - out) <= abs(vdec - clock)) {
-		priv->vsrc = nv_clk_src_cclk;
-		priv->vdiv = divs << 16;
+		clk->vsrc = nv_clk_src_cclk;
+		clk->vdiv = divs << 16;
 	} else {
-		priv->vsrc = nv_clk_src_vdec;
-		priv->vdiv = P1 << 16;
+		clk->vsrc = nv_clk_src_vdec;
+		clk->vdiv = P1 << 16;
 	}
 
 	/* Print strategy! */
-	nv_debug(priv, "nvpll: %08x %08x %08x\n",
-			priv->ccoef, priv->cpost, priv->cctrl);
-	nv_debug(priv, " spll: %08x %08x %08x\n",
-			priv->scoef, priv->spost, priv->sctrl);
-	nv_debug(priv, " vdiv: %08x\n", priv->vdiv);
-	if (priv->csrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "core: hrefm4\n");
+	nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
+		   clk->ccoef, clk->cpost, clk->cctrl);
+	nvkm_debug(subdev, " spll: %08x %08x %08x\n",
+		   clk->scoef, clk->spost, clk->sctrl);
+	nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
+	if (clk->csrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "core: hrefm4\n");
 	else
-		nv_debug(priv, "core: nvpll\n");
+		nvkm_debug(subdev, "core: nvpll\n");
 
-	if (priv->ssrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "shader: hrefm4\n");
-	else if (priv->ssrc == nv_clk_src_core)
-		nv_debug(priv, "shader: nvpll\n");
+	if (clk->ssrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "shader: hrefm4\n");
+	else if (clk->ssrc == nv_clk_src_core)
+		nvkm_debug(subdev, "shader: nvpll\n");
 	else
-		nv_debug(priv, "shader: spll\n");
+		nvkm_debug(subdev, "shader: spll\n");
 
-	if (priv->vsrc == nv_clk_src_hclkm4)
-		nv_debug(priv, "vdec: 500MHz\n");
+	if (clk->vsrc == nv_clk_src_hclkm4)
+		nvkm_debug(subdev, "vdec: 500MHz\n");
 	else
-		nv_debug(priv, "vdec: core\n");
+		nvkm_debug(subdev, "vdec: core\n");
 
 	return 0;
 }
 
 static int
-mcp77_clk_prog(struct nvkm_clk *clk)
+mcp77_clk_prog(struct nvkm_clk *base)
 {
-	struct mcp77_clk_priv *priv = (void *)clk;
+	struct mcp77_clk *clk = mcp77_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 pllmask = 0, mast;
 	unsigned long flags;
 	unsigned long *f = &flags;
 	int ret = 0;
 
-	ret = gt215_clk_pre(clk, f);
+	ret = gt215_clk_pre(&clk->base, f);
 	if (ret)
 		goto out;
 
 	/* First switch to safe clocks: href */
-	mast = nv_mask(clk, 0xc054, 0x03400e70, 0x03400640);
+	mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
 	mast &= ~0x00400e73;
 	mast |= 0x03000000;
 
-	switch (priv->csrc) {
+	switch (clk->csrc) {
 	case nv_clk_src_hclkm4:
-		nv_mask(clk, 0x4028, 0x00070000, priv->cctrl);
+		nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
 		mast |= 0x00000002;
 		break;
 	case nv_clk_src_core:
-		nv_wr32(clk, 0x402c, priv->ccoef);
-		nv_wr32(clk, 0x4028, 0x80000000 | priv->cctrl);
-		nv_wr32(clk, 0x4040, priv->cpost);
+		nvkm_wr32(device, 0x402c, clk->ccoef);
+		nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
+		nvkm_wr32(device, 0x4040, clk->cpost);
 		pllmask |= (0x3 << 8);
 		mast |= 0x00000003;
 		break;
 	default:
-		nv_warn(priv,"Reclocking failed: unknown core clock\n");
+		nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
 		goto resume;
 	}
 
-	switch (priv->ssrc) {
+	switch (clk->ssrc) {
 	case nv_clk_src_href:
-		nv_mask(clk, 0x4020, 0x00070000, 0x00000000);
+		nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
 		/* mast |= 0x00000000; */
 		break;
 	case nv_clk_src_core:
-		nv_mask(clk, 0x4020, 0x00070000, priv->sctrl);
+		nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
 		mast |= 0x00000020;
 		break;
 	case nv_clk_src_shader:
-		nv_wr32(clk, 0x4024, priv->scoef);
-		nv_wr32(clk, 0x4020, 0x80000000 | priv->sctrl);
-		nv_wr32(clk, 0x4070, priv->spost);
+		nvkm_wr32(device, 0x4024, clk->scoef);
+		nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
+		nvkm_wr32(device, 0x4070, clk->spost);
 		pllmask |= (0x3 << 12);
 		mast |= 0x00000030;
 		break;
 	default:
-		nv_warn(priv,"Reclocking failed: unknown sclk clock\n");
+		nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
 		goto resume;
 	}
 
-	if (!nv_wait(clk, 0x004080, pllmask, pllmask)) {
-		nv_warn(priv,"Reclocking failed: unstable PLLs\n");
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
+		if (tmp == pllmask)
+			break;
+	) < 0)
 		goto resume;
-	}
 
-	switch (priv->vsrc) {
+	switch (clk->vsrc) {
 	case nv_clk_src_cclk:
 		mast |= 0x00400000;
 	default:
-		nv_wr32(clk, 0x4600, priv->vdiv);
+		nvkm_wr32(device, 0x4600, clk->vdiv);
 	}
 
-	nv_wr32(clk, 0xc054, mast);
+	nvkm_wr32(device, 0xc054, mast);
 
 resume:
 	/* Disable some PLLs and dividers when unused */
-	if (priv->csrc != nv_clk_src_core) {
-		nv_wr32(clk, 0x4040, 0x00000000);
-		nv_mask(clk, 0x4028, 0x80000000, 0x00000000);
+	if (clk->csrc != nv_clk_src_core) {
+		nvkm_wr32(device, 0x4040, 0x00000000);
+		nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
 	}
 
-	if (priv->ssrc != nv_clk_src_shader) {
-		nv_wr32(clk, 0x4070, 0x00000000);
-		nv_mask(clk, 0x4020, 0x80000000, 0x00000000);
+	if (clk->ssrc != nv_clk_src_shader) {
+		nvkm_wr32(device, 0x4070, 0x00000000);
+		nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
 	}
 
 out:
 	if (ret == -EBUSY)
 		f = NULL;
 
-	gt215_clk_post(clk, f);
+	gt215_clk_post(&clk->base, f);
 	return ret;
 }
 
 static void
-mcp77_clk_tidy(struct nvkm_clk *clk)
+mcp77_clk_tidy(struct nvkm_clk *base)
 {
 }
 
-static struct nvkm_domain
-mcp77_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+mcp77_clk = {
+	.read = mcp77_clk_read,
+	.calc = mcp77_clk_calc,
+	.prog = mcp77_clk_prog,
+	.tidy = mcp77_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-mcp77_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct mcp77_clk_priv *priv;
-	int ret;
+	struct mcp77_clk *clk;
 
-	ret = nvkm_clk_create(parent, engine, oclass, mcp77_domains,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	*pclk = &clk->base;
 
-	priv->base.read = mcp77_clk_read;
-	priv->base.calc = mcp77_clk_calc;
-	priv->base.prog = mcp77_clk_prog;
-	priv->base.tidy = mcp77_clk_tidy;
-	return 0;
+	return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
 }
-
-struct nvkm_oclass *
-mcp77_clk_oclass = &(struct nvkm_oclass) {
-	.handle = NV_SUBDEV(CLK, 0xaa),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = mcp77_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
index 63dbbb5..b280f85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv04.c
@@ -21,23 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#include "priv.h"
 #include "pll.h"
 
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit/nv04.h>
 
-struct nv04_clk_priv {
-	struct nvkm_clk base;
-};
-
 int
 nv04_clk_pll_calc(struct nvkm_clk *clock, struct nvbios_pll *info,
 		  int clk, struct nvkm_pll_vals *pv)
 {
 	int N1, M1, N2, M2, P;
-	int ret = nv04_pll_calc(nv_subdev(clock), info, clk, &N1, &M1, &N2, &M2, &P);
+	int ret = nv04_pll_calc(&clock->subdev, info, clk, &N1, &M1, &N2, &M2, &P);
 	if (ret) {
 		pv->refclk = info->refclk;
 		pv->N1 = N1;
@@ -52,8 +48,9 @@
 int
 nv04_clk_pll_prog(struct nvkm_clk *clk, u32 reg1, struct nvkm_pll_vals *pv)
 {
-	struct nvkm_devinit *devinit = nvkm_devinit(clk);
-	int cv = nvkm_bios(clk)->version.chip;
+	struct nvkm_device *device = clk->subdev.device;
+	struct nvkm_devinit *devinit = device->devinit;
+	int cv = device->bios->version.chip;
 
 	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
 	    cv >= 0x40) {
@@ -67,37 +64,20 @@
 	return 0;
 }
 
-static struct nvkm_domain
-nv04_domain[] = {
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+nv04_clk = {
+	.domains = {
+		{ nv_clk_src_max }
+	}
 };
 
-static int
-nv04_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv04_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
 {
-	struct nv04_clk_priv *priv;
-	int ret;
-
-	ret = nvkm_clk_create(parent, engine, oclass, nv04_domain,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.pll_calc = nv04_clk_pll_calc;
-	priv->base.pll_prog = nv04_clk_pll_prog;
-	return 0;
+	int ret = nvkm_clk_new_(&nv04_clk, device, index, false, pclk);
+	if (ret == 0) {
+		(*pclk)->pll_calc = nv04_clk_pll_calc;
+		(*pclk)->pll_prog = nv04_clk_pll_prog;
+	}
+	return ret;
 }
-
-struct nvkm_oclass
-nv04_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
index ed83813..2ab9b9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv40.c
@@ -21,14 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/clk.h>
+#define nv40_clk(p) container_of((p), struct nv40_clk, base)
+#include "priv.h"
 #include "pll.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
-struct nv40_clk_priv {
+struct nv40_clk {
 	struct nvkm_clk base;
 	u32 ctrl;
 	u32 npll_ctrl;
@@ -36,64 +36,56 @@
 	u32 spll;
 };
 
-static struct nvkm_domain
-nv40_domain[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_max }
-};
-
 static u32
-read_pll_1(struct nv40_clk_priv *priv, u32 reg)
+read_pll_1(struct nv40_clk *clk, u32 reg)
 {
-	u32 ctrl = nv_rd32(priv, reg + 0x00);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, reg + 0x00);
 	int P = (ctrl & 0x00070000) >> 16;
 	int N = (ctrl & 0x0000ff00) >> 8;
 	int M = (ctrl & 0x000000ff) >> 0;
-	u32 ref = 27000, clk = 0;
+	u32 ref = 27000, khz = 0;
 
 	if (ctrl & 0x80000000)
-		clk = ref * N / M;
+		khz = ref * N / M;
 
-	return clk >> P;
+	return khz >> P;
 }
 
 static u32
-read_pll_2(struct nv40_clk_priv *priv, u32 reg)
+read_pll_2(struct nv40_clk *clk, u32 reg)
 {
-	u32 ctrl = nv_rd32(priv, reg + 0x00);
-	u32 coef = nv_rd32(priv, reg + 0x04);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 ctrl = nvkm_rd32(device, reg + 0x00);
+	u32 coef = nvkm_rd32(device, reg + 0x04);
 	int N2 = (coef & 0xff000000) >> 24;
 	int M2 = (coef & 0x00ff0000) >> 16;
 	int N1 = (coef & 0x0000ff00) >> 8;
 	int M1 = (coef & 0x000000ff) >> 0;
 	int P = (ctrl & 0x00070000) >> 16;
-	u32 ref = 27000, clk = 0;
+	u32 ref = 27000, khz = 0;
 
 	if ((ctrl & 0x80000000) && M1) {
-		clk = ref * N1 / M1;
+		khz = ref * N1 / M1;
 		if ((ctrl & 0x40000100) == 0x40000000) {
 			if (M2)
-				clk = clk * N2 / M2;
+				khz = khz * N2 / M2;
 			else
-				clk = 0;
+				khz = 0;
 		}
 	}
 
-	return clk >> P;
+	return khz >> P;
 }
 
 static u32
-read_clk(struct nv40_clk_priv *priv, u32 src)
+read_clk(struct nv40_clk *clk, u32 src)
 {
 	switch (src) {
 	case 3:
-		return read_pll_2(priv, 0x004000);
+		return read_pll_2(clk, 0x004000);
 	case 2:
-		return read_pll_1(priv, 0x004008);
+		return read_pll_1(clk, 0x004008);
 	default:
 		break;
 	}
@@ -102,46 +94,48 @@
 }
 
 static int
-nv40_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+nv40_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(priv, 0x00c040);
+	struct nv40_clk *clk = nv40_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /*XXX: PCIE/AGP differ*/
 	case nv_clk_src_core:
-		return read_clk(priv, (mast & 0x00000003) >> 0);
+		return read_clk(clk, (mast & 0x00000003) >> 0);
 	case nv_clk_src_shader:
-		return read_clk(priv, (mast & 0x00000030) >> 4);
+		return read_clk(clk, (mast & 0x00000030) >> 4);
 	case nv_clk_src_mem:
-		return read_pll_2(priv, 0x4020);
+		return read_pll_2(clk, 0x4020);
 	default:
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return -EINVAL;
 }
 
 static int
-nv40_clk_calc_pll(struct nv40_clk_priv *priv, u32 reg, u32 clk,
+nv40_clk_calc_pll(struct nv40_clk *clk, u32 reg, u32 khz,
 		  int *N1, int *M1, int *N2, int *M2, int *log2P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return ret;
 
-	if (clk < pll.vco1.max_freq)
+	if (khz < pll.vco1.max_freq)
 		pll.vco2.max_freq = 0;
 
-	ret = nv04_pll_calc(nv_subdev(priv), &pll, clk, N1, M1, N2, M2, log2P);
+	ret = nv04_pll_calc(subdev, &pll, khz, N1, M1, N2, M2, log2P);
 	if (ret == 0)
 		return -ERANGE;
 
@@ -149,93 +143,90 @@
 }
 
 static int
-nv40_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+nv40_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
+	struct nv40_clk *clk = nv40_clk(base);
 	int gclk = cstate->domain[nv_clk_src_core];
 	int sclk = cstate->domain[nv_clk_src_shader];
 	int N1, M1, N2, M2, log2P;
 	int ret;
 
 	/* core/geometric clock */
-	ret = nv40_clk_calc_pll(priv, 0x004000, gclk,
+	ret = nv40_clk_calc_pll(clk, 0x004000, gclk,
 				&N1, &M1, &N2, &M2, &log2P);
 	if (ret < 0)
 		return ret;
 
 	if (N2 == M2) {
-		priv->npll_ctrl = 0x80000100 | (log2P << 16);
-		priv->npll_coef = (N1 << 8) | M1;
+		clk->npll_ctrl = 0x80000100 | (log2P << 16);
+		clk->npll_coef = (N1 << 8) | M1;
 	} else {
-		priv->npll_ctrl = 0xc0000000 | (log2P << 16);
-		priv->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+		clk->npll_ctrl = 0xc0000000 | (log2P << 16);
+		clk->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
 	}
 
 	/* use the second pll for shader/rop clock, if it differs from core */
 	if (sclk && sclk != gclk) {
-		ret = nv40_clk_calc_pll(priv, 0x004008, sclk,
+		ret = nv40_clk_calc_pll(clk, 0x004008, sclk,
 					&N1, &M1, NULL, NULL, &log2P);
 		if (ret < 0)
 			return ret;
 
-		priv->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
-		priv->ctrl = 0x00000223;
+		clk->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+		clk->ctrl = 0x00000223;
 	} else {
-		priv->spll = 0x00000000;
-		priv->ctrl = 0x00000333;
+		clk->spll = 0x00000000;
+		clk->ctrl = 0x00000333;
 	}
 
 	return 0;
 }
 
 static int
-nv40_clk_prog(struct nvkm_clk *clk)
+nv40_clk_prog(struct nvkm_clk *base)
 {
-	struct nv40_clk_priv *priv = (void *)clk;
-	nv_mask(priv, 0x00c040, 0x00000333, 0x00000000);
-	nv_wr32(priv, 0x004004, priv->npll_coef);
-	nv_mask(priv, 0x004000, 0xc0070100, priv->npll_ctrl);
-	nv_mask(priv, 0x004008, 0xc007ffff, priv->spll);
+	struct nv40_clk *clk = nv40_clk(base);
+	struct nvkm_device *device = clk->base.subdev.device;
+	nvkm_mask(device, 0x00c040, 0x00000333, 0x00000000);
+	nvkm_wr32(device, 0x004004, clk->npll_coef);
+	nvkm_mask(device, 0x004000, 0xc0070100, clk->npll_ctrl);
+	nvkm_mask(device, 0x004008, 0xc007ffff, clk->spll);
 	mdelay(5);
-	nv_mask(priv, 0x00c040, 0x00000333, priv->ctrl);
+	nvkm_mask(device, 0x00c040, 0x00000333, clk->ctrl);
 	return 0;
 }
 
 static void
-nv40_clk_tidy(struct nvkm_clk *clk)
+nv40_clk_tidy(struct nvkm_clk *obj)
 {
 }
 
-static int
-nv40_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nv40_clk_priv *priv;
-	int ret;
-
-	ret = nvkm_clk_create(parent, engine, oclass, nv40_domain,
-			      NULL, 0, true, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.pll_calc = nv04_clk_pll_calc;
-	priv->base.pll_prog = nv04_clk_pll_prog;
-	priv->base.read = nv40_clk_read;
-	priv->base.calc = nv40_clk_calc;
-	priv->base.prog = nv40_clk_prog;
-	priv->base.tidy = nv40_clk_tidy;
-	return 0;
-}
-
-struct nvkm_oclass
-nv40_clk_oclass = {
-	.handle = NV_SUBDEV(CLK, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
+static const struct nvkm_clk_func
+nv40_clk = {
+	.read = nv40_clk_read,
+	.calc = nv40_clk_calc,
+	.prog = nv40_clk_prog,
+	.tidy = nv40_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_max }
+	}
 };
+
+int
+nv40_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	struct nv40_clk *clk;
+
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	clk->base.pll_calc = nv04_clk_pll_calc;
+	clk->base.pll_prog = nv04_clk_pll_prog;
+	*pclk = &clk->base;
+
+	return nvkm_clk_ctor(&nv40_clk, device, index, true, &clk->base);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
index 9b4ffd6..5841f29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.c
@@ -25,38 +25,39 @@
 #include "pll.h"
 #include "seq.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 
 static u32
-read_div(struct nv50_clk_priv *priv)
+read_div(struct nv50_clk *clk)
 {
-	switch (nv_device(priv)->chipset) {
+	struct nvkm_device *device = clk->base.subdev.device;
+	switch (device->chipset) {
 	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
 	case 0x84:
 	case 0x86:
 	case 0x98:
 	case 0xa0:
-		return nv_rd32(priv, 0x004700);
+		return nvkm_rd32(device, 0x004700);
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		return nv_rd32(priv, 0x004800);
+		return nvkm_rd32(device, 0x004800);
 	default:
 		return 0x00000000;
 	}
 }
 
 static u32
-read_pll_src(struct nv50_clk_priv *priv, u32 base)
+read_pll_src(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 coef, ref = clk->read(clk, nv_clk_src_crystal);
-	u32 rsel = nv_rd32(priv, 0x00e18c);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 coef, ref = nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+	u32 rsel = nvkm_rd32(device, 0x00e18c);
 	int P, N, M, id;
 
-	switch (nv_device(priv)->chipset) {
+	switch (device->chipset) {
 	case 0x50:
 	case 0xa0:
 		switch (base) {
@@ -65,11 +66,11 @@
 		case 0x4008: id = !!(rsel & 0x00000008); break;
 		case 0x4030: id = 0; break;
 		default:
-			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			nvkm_error(subdev, "ref: bad pll %06x\n", base);
 			return 0;
 		}
 
-		coef = nv_rd32(priv, 0x00e81c + (id * 0x0c));
+		coef = nvkm_rd32(device, 0x00e81c + (id * 0x0c));
 		ref *=  (coef & 0x01000000) ? 2 : 4;
 		P    =  (coef & 0x00070000) >> 16;
 		N    = ((coef & 0x0000ff00) >> 8) + 1;
@@ -78,7 +79,7 @@
 	case 0x84:
 	case 0x86:
 	case 0x92:
-		coef = nv_rd32(priv, 0x00e81c);
+		coef = nvkm_rd32(device, 0x00e81c);
 		P    = (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -86,26 +87,26 @@
 	case 0x94:
 	case 0x96:
 	case 0x98:
-		rsel = nv_rd32(priv, 0x00c050);
+		rsel = nvkm_rd32(device, 0x00c050);
 		switch (base) {
 		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
 		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
 		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
 		case 0x4030: rsel = 3; break;
 		default:
-			nv_error(priv, "ref: bad pll 0x%06x\n", base);
+			nvkm_error(subdev, "ref: bad pll %06x\n", base);
 			return 0;
 		}
 
 		switch (rsel) {
 		case 0: id = 1; break;
-		case 1: return clk->read(clk, nv_clk_src_crystal);
-		case 2: return clk->read(clk, nv_clk_src_href);
+		case 1: return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
+		case 2: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 		case 3: id = 0; break;
 		}
 
-		coef =  nv_rd32(priv, 0x00e81c + (id * 0x28));
-		P    = (nv_rd32(priv, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		coef =  nvkm_rd32(device, 0x00e81c + (id * 0x28));
+		P    = (nvkm_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
 		P   += (coef & 0x00070000) >> 16;
 		N    = (coef & 0x0000ff00) >> 8;
 		M    = (coef & 0x000000ff) >> 0;
@@ -121,10 +122,11 @@
 }
 
 static u32
-read_pll_ref(struct nv50_clk_priv *priv, u32 base)
+read_pll_ref(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 src, mast = nv_rd32(priv, 0x00c040);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 src, mast = nvkm_rd32(device, 0x00c040);
 
 	switch (base) {
 	case 0x004028:
@@ -140,33 +142,33 @@
 		src = !!(mast & 0x02000000);
 		break;
 	case 0x00e810:
-		return clk->read(clk, nv_clk_src_crystal);
+		return nvkm_clk_read(&clk->base, nv_clk_src_crystal);
 	default:
-		nv_error(priv, "bad pll 0x%06x\n", base);
+		nvkm_error(subdev, "bad pll %06x\n", base);
 		return 0;
 	}
 
 	if (src)
-		return clk->read(clk, nv_clk_src_href);
+		return nvkm_clk_read(&clk->base, nv_clk_src_href);
 
-	return read_pll_src(priv, base);
+	return read_pll_src(clk, base);
 }
 
 static u32
-read_pll(struct nv50_clk_priv *priv, u32 base)
+read_pll(struct nv50_clk *clk, u32 base)
 {
-	struct nvkm_clk *clk = &priv->base;
-	u32 mast = nv_rd32(priv, 0x00c040);
-	u32 ctrl = nv_rd32(priv, base + 0);
-	u32 coef = nv_rd32(priv, base + 4);
-	u32 ref = read_pll_ref(priv, base);
+	struct nvkm_device *device = clk->base.subdev.device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
+	u32 ctrl = nvkm_rd32(device, base + 0);
+	u32 coef = nvkm_rd32(device, base + 4);
+	u32 ref = read_pll_ref(clk, base);
 	u32 freq = 0;
 	int N1, N2, M1, M2;
 
 	if (base == 0x004028 && (mast & 0x00100000)) {
 		/* wtf, appears to only disable post-divider on gt200 */
-		if (nv_device(priv)->chipset != 0xa0)
-			return clk->read(clk, nv_clk_src_dom6);
+		if (device->chipset != 0xa0)
+			return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
 	}
 
 	N2 = (coef & 0xff000000) >> 24;
@@ -186,71 +188,73 @@
 	return freq;
 }
 
-static int
-nv50_clk_read(struct nvkm_clk *clk, enum nv_clk_src src)
+int
+nv50_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	u32 mast = nv_rd32(priv, 0x00c040);
+	struct nv50_clk *clk = nv50_clk(base);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 mast = nvkm_rd32(device, 0x00c040);
 	u32 P = 0;
 
 	switch (src) {
 	case nv_clk_src_crystal:
-		return nv_device(priv)->crystal;
+		return device->crystal;
 	case nv_clk_src_href:
 		return 100000; /* PCIE reference clock */
 	case nv_clk_src_hclk:
-		return div_u64((u64)clk->read(clk, nv_clk_src_href) * 27778, 10000);
+		return div_u64((u64)nvkm_clk_read(&clk->base, nv_clk_src_href) * 27778, 10000);
 	case nv_clk_src_hclkm3:
-		return clk->read(clk, nv_clk_src_hclk) * 3;
+		return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
 	case nv_clk_src_hclkm3d2:
-		return clk->read(clk, nv_clk_src_hclk) * 3 / 2;
+		return nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3 / 2;
 	case nv_clk_src_host:
 		switch (mast & 0x30000000) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_href);
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 		case 0x10000000: break;
 		case 0x20000000: /* !0x50 */
-		case 0x30000000: return clk->read(clk, nv_clk_src_hclk);
+		case 0x30000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
 		}
 		break;
 	case nv_clk_src_core:
 		if (!(mast & 0x00100000))
-			P = (nv_rd32(priv, 0x004028) & 0x00070000) >> 16;
+			P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
 		switch (mast & 0x00000003) {
-		case 0x00000000: return clk->read(clk, nv_clk_src_crystal) >> P;
-		case 0x00000001: return clk->read(clk, nv_clk_src_dom6);
-		case 0x00000002: return read_pll(priv, 0x004020) >> P;
-		case 0x00000003: return read_pll(priv, 0x004028) >> P;
+		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
+		case 0x00000001: return nvkm_clk_read(&clk->base, nv_clk_src_dom6);
+		case 0x00000002: return read_pll(clk, 0x004020) >> P;
+		case 0x00000003: return read_pll(clk, 0x004028) >> P;
 		}
 		break;
 	case nv_clk_src_shader:
-		P = (nv_rd32(priv, 0x004020) & 0x00070000) >> 16;
+		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
 		switch (mast & 0x00000030) {
 		case 0x00000000:
 			if (mast & 0x00000080)
-				return clk->read(clk, nv_clk_src_host) >> P;
-			return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_host) >> P;
+			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 		case 0x00000010: break;
-		case 0x00000020: return read_pll(priv, 0x004028) >> P;
-		case 0x00000030: return read_pll(priv, 0x004020) >> P;
+		case 0x00000020: return read_pll(clk, 0x004028) >> P;
+		case 0x00000030: return read_pll(clk, 0x004020) >> P;
 		}
 		break;
 	case nv_clk_src_mem:
-		P = (nv_rd32(priv, 0x004008) & 0x00070000) >> 16;
-		if (nv_rd32(priv, 0x004008) & 0x00000200) {
+		P = (nvkm_rd32(device, 0x004008) & 0x00070000) >> 16;
+		if (nvkm_rd32(device, 0x004008) & 0x00000200) {
 			switch (mast & 0x0000c000) {
 			case 0x00000000:
-				return clk->read(clk, nv_clk_src_crystal) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 			case 0x00008000:
 			case 0x0000c000:
-				return clk->read(clk, nv_clk_src_href) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
 			}
 		} else {
-			return read_pll(priv, 0x004008) >> P;
+			return read_pll(clk, 0x004008) >> P;
 		}
 		break;
 	case nv_clk_src_vdec:
-		P = (read_div(priv) & 0x00000700) >> 8;
-		switch (nv_device(priv)->chipset) {
+		P = (read_div(clk) & 0x00000700) >> 8;
+		switch (device->chipset) {
 		case 0x84:
 		case 0x86:
 		case 0x92:
@@ -259,51 +263,51 @@
 		case 0xa0:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				if (nv_device(priv)->chipset == 0xa0) /* wtf?? */
-					return clk->read(clk, nv_clk_src_core) >> P;
-				return clk->read(clk, nv_clk_src_crystal) >> P;
+				if (device->chipset == 0xa0) /* wtf?? */
+					return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
 			case 0x00000400:
 				return 0;
 			case 0x00000800:
 				if (mast & 0x01000000)
-					return read_pll(priv, 0x004028) >> P;
-				return read_pll(priv, 0x004030) >> P;
+					return read_pll(clk, 0x004028) >> P;
+				return read_pll(clk, 0x004030) >> P;
 			case 0x00000c00:
-				return clk->read(clk, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			}
 			break;
 		case 0x98:
 			switch (mast & 0x00000c00) {
 			case 0x00000000:
-				return clk->read(clk, nv_clk_src_core) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
 			case 0x00000400:
 				return 0;
 			case 0x00000800:
-				return clk->read(clk, nv_clk_src_hclkm3d2) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2) >> P;
 			case 0x00000c00:
-				return clk->read(clk, nv_clk_src_mem) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_mem) >> P;
 			}
 			break;
 		}
 		break;
 	case nv_clk_src_dom6:
-		switch (nv_device(priv)->chipset) {
+		switch (device->chipset) {
 		case 0x50:
 		case 0xa0:
-			return read_pll(priv, 0x00e810) >> 2;
+			return read_pll(clk, 0x00e810) >> 2;
 		case 0x84:
 		case 0x86:
 		case 0x92:
 		case 0x94:
 		case 0x96:
 		case 0x98:
-			P = (read_div(priv) & 0x00000007) >> 0;
+			P = (read_div(clk) & 0x00000007) >> 0;
 			switch (mast & 0x0c000000) {
-			case 0x00000000: return clk->read(clk, nv_clk_src_href);
+			case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
 			case 0x04000000: break;
-			case 0x08000000: return clk->read(clk, nv_clk_src_hclk);
+			case 0x08000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclk);
 			case 0x0c000000:
-				return clk->read(clk, nv_clk_src_hclkm3) >> P;
+				return nvkm_clk_read(&clk->base, nv_clk_src_hclkm3) >> P;
 			}
 			break;
 		default:
@@ -313,27 +317,27 @@
 		break;
 	}
 
-	nv_debug(priv, "unknown clock source %d 0x%08x\n", src, mast);
+	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
 	return -EINVAL;
 }
 
 static u32
-calc_pll(struct nv50_clk_priv *priv, u32 reg, u32 clk, int *N, int *M, int *P)
+calc_pll(struct nv50_clk *clk, u32 reg, u32 idx, int *N, int *M, int *P)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &clk->base.subdev;
 	struct nvbios_pll pll;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, reg, &pll);
+	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
 	if (ret)
 		return 0;
 
 	pll.vco2.max_freq = 0;
-	pll.refclk = read_pll_ref(priv, reg);
+	pll.refclk = read_pll_ref(clk, reg);
 	if (!pll.refclk)
 		return 0;
 
-	return nv04_pll_calc(nv_subdev(priv), &pll, clk, N, M, NULL, NULL, P);
+	return nv04_pll_calc(subdev, &pll, idx, N, M, NULL, NULL, P);
 }
 
 static inline u32
@@ -360,11 +364,13 @@
 	return ((a / 1000) == (b / 1000));
 }
 
-static int
-nv50_clk_calc(struct nvkm_clk *clk, struct nvkm_cstate *cstate)
+int
+nv50_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	struct nv50_clk_hwsq *hwsq = &priv->hwsq;
+	struct nv50_clk *clk = nv50_clk(base);
+	struct nv50_clk_hwsq *hwsq = &clk->hwsq;
+	struct nvkm_subdev *subdev = &clk->base.subdev;
+	struct nvkm_device *device = subdev->device;
 	const int shader = cstate->domain[nv_clk_src_shader];
 	const int core = cstate->domain[nv_clk_src_core];
 	const int vdec = cstate->domain[nv_clk_src_vdec];
@@ -375,7 +381,7 @@
 	int freq, out;
 
 	/* prepare a hwsq script from which we'll perform the reclock */
-	out = clk_init(hwsq, nv_subdev(clk));
+	out = clk_init(hwsq, subdev);
 	if (out)
 		return out;
 
@@ -393,15 +399,15 @@
 		freq = calc_div(core, vdec, &P1);
 
 		/* see how close we can get using xpll/hclk as a source */
-		if (nv_device(priv)->chipset != 0x98)
-			out = read_pll(priv, 0x004030);
+		if (device->chipset != 0x98)
+			out = read_pll(clk, 0x004030);
 		else
-			out = clk->read(clk, nv_clk_src_hclkm3d2);
+			out = nvkm_clk_read(&clk->base, nv_clk_src_hclkm3d2);
 		out = calc_div(out, vdec, &P2);
 
 		/* select whichever gets us closest */
 		if (abs(vdec - freq) <= abs(vdec - out)) {
-			if (nv_device(priv)->chipset != 0x98)
+			if (device->chipset != 0x98)
 				mastv |= 0x00000c00;
 			divsv |= P1 << 8;
 		} else {
@@ -417,14 +423,14 @@
 	 * of the host clock frequency
 	 */
 	if (dom6) {
-		if (clk_same(dom6, clk->read(clk, nv_clk_src_href))) {
+		if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_href))) {
 			mastv |= 0x00000000;
 		} else
-		if (clk_same(dom6, clk->read(clk, nv_clk_src_hclk))) {
+		if (clk_same(dom6, nvkm_clk_read(&clk->base, nv_clk_src_hclk))) {
 			mastv |= 0x08000000;
 		} else {
-			freq = clk->read(clk, nv_clk_src_hclk) * 3;
-			freq = calc_div(freq, dom6, &P1);
+			freq = nvkm_clk_read(&clk->base, nv_clk_src_hclk) * 3;
+			calc_div(freq, dom6, &P1);
 
 			mastv |= 0x0c000000;
 			divsv |= P1;
@@ -444,13 +450,13 @@
 	/* core/shader: disconnect nvclk/sclk from their PLLs (nvclk to dom6,
 	 * sclk to hclk) before reprogramming
 	 */
-	if (nv_device(priv)->chipset < 0x92)
+	if (device->chipset < 0x92)
 		clk_mask(hwsq, mast, 0x001000b0, 0x00100080);
 	else
 		clk_mask(hwsq, mast, 0x000000b3, 0x00000081);
 
 	/* core: for the moment at least, always use nvpll */
-	freq = calc_pll(priv, 0x4028, core, &N, &M, &P1);
+	freq = calc_pll(clk, 0x4028, core, &N, &M, &P1);
 	if (freq == 0)
 		return -ERANGE;
 
@@ -468,7 +474,7 @@
 		clk_mask(hwsq, spll[0], 0xc03f0100, (P1 << 19) | (P1 << 16));
 		clk_mask(hwsq, mast, 0x00100033, 0x00000023);
 	} else {
-		freq = calc_pll(priv, 0x4020, shader, &N, &M, &P1);
+		freq = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
 		if (freq == 0)
 			return -ERANGE;
 
@@ -485,77 +491,71 @@
 	return 0;
 }
 
-static int
-nv50_clk_prog(struct nvkm_clk *clk)
+int
+nv50_clk_prog(struct nvkm_clk *base)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	return clk_exec(&priv->hwsq, true);
+	struct nv50_clk *clk = nv50_clk(base);
+	return clk_exec(&clk->hwsq, true);
 }
 
-static void
-nv50_clk_tidy(struct nvkm_clk *clk)
+void
+nv50_clk_tidy(struct nvkm_clk *base)
 {
-	struct nv50_clk_priv *priv = (void *)clk;
-	clk_exec(&priv->hwsq, false);
+	struct nv50_clk *clk = nv50_clk(base);
+	clk_exec(&clk->hwsq, false);
 }
 
 int
-nv50_clk_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv50_clk_new_(const struct nvkm_clk_func *func, struct nvkm_device *device,
+	      int index, bool allow_reclock, struct nvkm_clk **pclk)
 {
-	struct nv50_clk_oclass *pclass = (void *)oclass;
-	struct nv50_clk_priv *priv;
+	struct nv50_clk *clk;
 	int ret;
 
-	ret = nvkm_clk_create(parent, engine, oclass, pclass->domains,
-			      NULL, 0, false, &priv);
-	*pobject = nv_object(priv);
+	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
+		return -ENOMEM;
+	ret = nvkm_clk_ctor(func, device, index, allow_reclock, &clk->base);
+	*pclk = &clk->base;
 	if (ret)
 		return ret;
 
-	priv->hwsq.r_fifo = hwsq_reg(0x002504);
-	priv->hwsq.r_spll[0] = hwsq_reg(0x004020);
-	priv->hwsq.r_spll[1] = hwsq_reg(0x004024);
-	priv->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
-	priv->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
-	switch (nv_device(priv)->chipset) {
+	clk->hwsq.r_fifo = hwsq_reg(0x002504);
+	clk->hwsq.r_spll[0] = hwsq_reg(0x004020);
+	clk->hwsq.r_spll[1] = hwsq_reg(0x004024);
+	clk->hwsq.r_nvpll[0] = hwsq_reg(0x004028);
+	clk->hwsq.r_nvpll[1] = hwsq_reg(0x00402c);
+	switch (device->chipset) {
 	case 0x92:
 	case 0x94:
 	case 0x96:
-		priv->hwsq.r_divs = hwsq_reg(0x004800);
+		clk->hwsq.r_divs = hwsq_reg(0x004800);
 		break;
 	default:
-		priv->hwsq.r_divs = hwsq_reg(0x004700);
+		clk->hwsq.r_divs = hwsq_reg(0x004700);
 		break;
 	}
-	priv->hwsq.r_mast = hwsq_reg(0x00c040);
-
-	priv->base.read = nv50_clk_read;
-	priv->base.calc = nv50_clk_calc;
-	priv->base.prog = nv50_clk_prog;
-	priv->base.tidy = nv50_clk_tidy;
+	clk->hwsq.r_mast = hwsq_reg(0x00c040);
 	return 0;
 }
 
-static struct nvkm_domain
-nv50_domains[] = {
-	{ nv_clk_src_crystal, 0xff },
-	{ nv_clk_src_href   , 0xff },
-	{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
-	{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
-	{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
-	{ nv_clk_src_max }
+static const struct nvkm_clk_func
+nv50_clk = {
+	.read = nv50_clk_read,
+	.calc = nv50_clk_calc,
+	.prog = nv50_clk_prog,
+	.tidy = nv50_clk_tidy,
+	.domains = {
+		{ nv_clk_src_crystal, 0xff },
+		{ nv_clk_src_href   , 0xff },
+		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
+		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
+		{ nv_clk_src_mem    , 0xff, 0, "memory", 1000 },
+		{ nv_clk_src_max }
+	}
 };
 
-struct nvkm_oclass *
-nv50_clk_oclass = &(struct nv50_clk_oclass) {
-	.base.handle = NV_SUBDEV(CLK, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_clk_ctor,
-		.dtor = _nvkm_clk_dtor,
-		.init = _nvkm_clk_init,
-		.fini = _nvkm_clk_fini,
-	},
-	.domains = nv50_domains,
-}.base;
+int
+nv50_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+	return nv50_clk_new_(&nv50_clk, device, index, false, pclk);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index 0ead76a..d3c7fb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,7 +1,9 @@
-#ifndef __NVKM_CLK_NV50_H__
-#define __NVKM_CLK_NV50_H__
+#ifndef __NV50_CLK_H__
+#define __NV50_CLK_H__
+#define nv50_clk(p) container_of((p), struct nv50_clk, base)
+#include "priv.h"
+
 #include <subdev/bus/hwsq.h>
-#include <subdev/clk.h>
 
 struct nv50_clk_hwsq {
 	struct hwsq base;
@@ -12,17 +14,15 @@
 	struct hwsq_reg r_mast;
 };
 
-struct nv50_clk_priv {
+struct nv50_clk {
 	struct nvkm_clk base;
 	struct nv50_clk_hwsq hwsq;
 };
 
-int  nv50_clk_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *, u32,
-		     struct nvkm_object **);
-
-struct nv50_clk_oclass {
-	struct nvkm_oclass base;
-	struct nvkm_domain *domains;
-};
+int nv50_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool, struct nvkm_clk **);
+int nv50_clk_read(struct nvkm_clk *, enum nv_clk_src);
+int nv50_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
+int nv50_clk_prog(struct nvkm_clk *);
+void nv50_clk_tidy(struct nvkm_clk *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
index 783a3e7..c6fccd6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllgt215.c
@@ -79,7 +79,7 @@
 	}
 
 	if (unlikely(best_err == ~0)) {
-		nv_error(subdev, "unable to find matching pll values\n");
+		nvkm_error(subdev, "unable to find matching pll values\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
index f229289..5ad6787 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pllnv04.c
@@ -37,7 +37,7 @@
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	struct nvkm_bios *bios = nvkm_bios(subdev);
+	struct nvkm_bios *bios = subdev->device->bios;
 	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
 	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
 	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
@@ -136,7 +136,7 @@
 	 * "clk" parameter in kHz
 	 * returns calculated clock
 	 */
-	int chip_version = nvkm_bios(subdev)->version.chip;
+	int chip_version = subdev->device->bios->version.chip;
 	int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
 	int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
 	int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
@@ -240,6 +240,6 @@
 	}
 
 	if (!ret)
-		nv_error(subdev, "unable to compute acceptable pll values\n");
+		nvkm_error(subdev, "unable to compute acceptable pll values\n");
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
new file mode 100644
index 0000000..51eafc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_CLK_PRIV_H__
+#define __NVKM_CLK_PRIV_H__
+#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
+#include <subdev/clk.h>
+
+struct nvkm_clk_func {
+	int (*init)(struct nvkm_clk *);
+	void (*fini)(struct nvkm_clk *);
+	int (*read)(struct nvkm_clk *, enum nv_clk_src);
+	int (*calc)(struct nvkm_clk *, struct nvkm_cstate *);
+	int (*prog)(struct nvkm_clk *);
+	void (*tidy)(struct nvkm_clk *);
+	struct nvkm_pstate *pstates;
+	int nr_pstates;
+	struct nvkm_domain domains[];
+};
+
+int nvkm_clk_ctor(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool allow_reclock, struct nvkm_clk *);
+int nvkm_clk_new_(const struct nvkm_clk_func *, struct nvkm_device *, int,
+		  bool allow_reclock, struct nvkm_clk **);
+
+int nv04_clk_pll_calc(struct nvkm_clk *, struct nvbios_pll *, int clk,
+		      struct nvkm_pll_vals *);
+int nv04_clk_pll_prog(struct nvkm_clk *, u32 reg1, struct nvkm_pll_vals *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index b0d7c5f..5f25402 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -23,74 +23,108 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/vga.h>
 
-int
-_nvkm_devinit_fini(struct nvkm_object *object, bool suspend)
+u32
+nvkm_devinit_mmio(struct nvkm_devinit *init, u32 addr)
 {
-	struct nvkm_devinit *devinit = (void *)object;
-
-	/* force full reinit on resume */
-	if (suspend)
-		devinit->post = true;
-
-	/* unlock the extended vga crtc regs */
-	nv_lockvgac(devinit, false);
-
-	return nvkm_subdev_fini(&devinit->base, suspend);
+	if (init->func->mmio)
+		addr = init->func->mmio(init, addr);
+	return addr;
 }
 
 int
-_nvkm_devinit_init(struct nvkm_object *object)
+nvkm_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 khz)
 {
-	struct nvkm_devinit_impl *impl = (void *)object->oclass;
-	struct nvkm_devinit *devinit = (void *)object;
-	int ret;
-
-	ret = nvkm_subdev_init(&devinit->base);
-	if (ret)
-		return ret;
-
-	ret = impl->post(&devinit->base, devinit->post);
-	if (ret)
-		return ret;
-
-	if (impl->disable)
-		nv_device(devinit)->disable_mask |= impl->disable(devinit);
-	return 0;
+	return init->func->pll_set(init, type, khz);
 }
 
 void
-_nvkm_devinit_dtor(struct nvkm_object *object)
+nvkm_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nvkm_devinit *devinit = (void *)object;
+	if (init->func->meminit)
+		init->func->meminit(init);
+}
 
-	/* lock crtc regs */
-	nv_lockvgac(devinit, true);
-
-	nvkm_subdev_destroy(&devinit->base);
+u64
+nvkm_devinit_disable(struct nvkm_devinit *init)
+{
+	if (init && init->func->disable)
+		return init->func->disable(init);
+	return 0;
 }
 
 int
-nvkm_devinit_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int size, void **pobject)
+nvkm_devinit_post(struct nvkm_devinit *init, u64 *disable)
 {
-	struct nvkm_devinit_impl *impl = (void *)oclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_devinit *devinit;
-	int ret;
+	int ret = 0;
+	if (init && init->func->post)
+		ret = init->func->post(init, init->post);
+	*disable = nvkm_devinit_disable(init);
+	return ret;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
-				  "init", size, pobject);
-	devinit = *pobject;
-	if (ret)
-		return ret;
-
-	devinit->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
-	devinit->meminit = impl->meminit;
-	devinit->pll_set = impl->pll_set;
-	devinit->mmio    = impl->mmio;
+static int
+nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	/* force full reinit on resume */
+	if (suspend)
+		init->post = true;
 	return 0;
 }
+
+static int
+nvkm_devinit_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+
+	if (init->func->preinit)
+		init->func->preinit(init);
+
+	/* unlock the extended vga crtc regs */
+	nvkm_lockvgac(subdev->device, false);
+	return 0;
+}
+
+static int
+nvkm_devinit_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	if (init->func->init)
+		init->func->init(init);
+	return 0;
+}
+
+static void *
+nvkm_devinit_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_devinit *init = nvkm_devinit(subdev);
+	void *data = init;
+
+	if (init->func->dtor)
+		data = init->func->dtor(init);
+
+	/* lock crtc regs */
+	nvkm_lockvgac(subdev->device, true);
+	return data;
+}
+
+static const struct nvkm_subdev_func
+nvkm_devinit = {
+	.dtor = nvkm_devinit_dtor,
+	.preinit = nvkm_devinit_preinit,
+	.init = nvkm_devinit_init,
+	.fini = nvkm_devinit_fini,
+};
+
+void
+nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit *init)
+{
+	nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
+	init->func = func;
+	init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
index 36684c3..6c5bbff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
@@ -23,7 +23,6 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include <core/device.h>
 #include <subdev/fb/regsnv04.h>
 
 #define NV04_PFB_DEBUG_0					0x00100080
@@ -48,8 +47,8 @@
 static inline struct io_mapping *
 fbmem_init(struct nvkm_device *dev)
 {
-	return io_mapping_create_wc(nv_device_resource_start(dev, 1),
-				    nv_device_resource_len(dev, 1));
+	return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
+				    dev->func->resource_size(dev, 1));
 }
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index ca776ce..e895289 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -27,40 +27,42 @@
 #include <subdev/bios/init.h>
 
 static u64
-g84_devinit_disable(struct nvkm_devinit *devinit)
+g84_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MPEG);
-		disable |= (1ULL << NVDEV_ENGINE_VP);
-		disable |= (1ULL << NVDEV_ENGINE_BSP);
-		disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+		disable |= (1ULL << NVKM_ENGINE_MPEG);
+		disable |= (1ULL << NVKM_ENGINE_VP);
+		disable |= (1ULL << NVKM_ENGINE_BSP);
+		disable |= (1ULL << NVKM_ENGINE_CIPHER);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_BSP);
+		disable |= (1ULL << NVKM_ENGINE_BSP);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_CIPHER);
+		disable |= (1ULL << NVKM_ENGINE_CIPHER);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-g84_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x84),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+g84_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = g84_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+g84_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&g84_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index d29bace..a9d4584 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -27,39 +27,41 @@
 #include <subdev/bios/init.h>
 
 static u64
-g98_devinit_disable(struct nvkm_devinit *devinit)
+g98_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_SEC);
+		disable |= (1ULL << NVKM_ENGINE_SEC);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-g98_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x98),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+g98_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = g98_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+g98_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&g98_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index c61102f..22b0140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -29,19 +29,19 @@
 #include <subdev/clk/pll.h>
 
 int
-gf100_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct nvbios_pll info;
 	int N, fN, M, P;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, type, &info);
+	ret = nvbios_pll_parse(device->bios, type, &info);
 	if (ret)
 		return ret;
 
-	ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
 	if (ret < 0)
 		return ret;
 
@@ -50,12 +50,12 @@
 	case PLL_VPLL1:
 	case PLL_VPLL2:
 	case PLL_VPLL3:
-		nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
-		nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
-		nv_wr32(priv, info.reg + 0x10, fN << 16);
+		nvkm_mask(device, info.reg + 0x0c, 0x00000000, 0x00000100);
+		nvkm_wr32(device, info.reg + 0x04, (P << 16) | (N << 8) | M);
+		nvkm_wr32(device, info.reg + 0x10, fN << 16);
 		break;
 	default:
-		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
 		ret = -EINVAL;
 		break;
 	}
@@ -64,64 +64,44 @@
 }
 
 static u64
-gf100_devinit_disable(struct nvkm_devinit *devinit)
+gf100_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r022500 = nv_rd32(priv, 0x022500);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r022500 = nvkm_rd32(device, 0x022500);
 	u64 disable = 0ULL;
 
 	if (r022500 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 
 	if (r022500 & 0x00000002) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (r022500 & 0x00000004)
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (r022500 & 0x00000008)
-		disable |= (1ULL << NVDEV_ENGINE_MSENC);
+		disable |= (1ULL << NVKM_ENGINE_MSENC);
 	if (r022500 & 0x00000100)
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 	if (r022500 & 0x00000200)
-		disable |= (1ULL << NVDEV_ENGINE_CE1);
+		disable |= (1ULL << NVKM_ENGINE_CE1);
 
 	return disable;
 }
 
-int
-gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
-{
-	struct nvkm_devinit_impl *impl = (void *)oclass;
-	struct nv50_devinit_priv *priv;
-	u64 disable;
-	int ret;
-
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	disable = impl->disable(&priv->base);
-	if (disable & (1ULL << NVDEV_ENGINE_DISP))
-		priv->base.post = true;
-
-	return 0;
-}
-
-struct nvkm_oclass *
-gf100_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gf100_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gf100_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gf100_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gf100_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 87ca0ec..2be98bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -27,33 +27,35 @@
 #include <subdev/bios/init.h>
 
 u64
-gm107_devinit_disable(struct nvkm_devinit *devinit)
+gm107_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r021c00 = nv_rd32(priv, 0x021c00);
-	u32 r021c04 = nv_rd32(priv, 0x021c04);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r021c00 = nvkm_rd32(device, 0x021c00);
+	u32 r021c04 = nvkm_rd32(device, 0x021c04);
 	u64 disable = 0ULL;
 
 	if (r021c00 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 	if (r021c00 & 0x00000004)
-		disable |= (1ULL << NVDEV_ENGINE_CE2);
+		disable |= (1ULL << NVKM_ENGINE_CE2);
 	if (r021c04 & 0x00000001)
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gm107_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gm107_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gm107_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 1076fcf..2b9c3f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -28,69 +28,74 @@
 #include <subdev/bios/pmu.h>
 
 static void
-pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
+pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = init->base.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
+	nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
 	for (i = 0; i < len; i += 4) {
 		if ((i & 0xff) == 0)
-			nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
-		nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
+			nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
+		nvkm_wr32(device, 0x10a184, nvbios_rd32(bios, img + i));
 	}
 
 	while (i & 0xff) {
-		nv_wr32(priv, 0x10a184, 0x00000000);
+		nvkm_wr32(device, 0x10a184, 0x00000000);
 		i += 4;
 	}
 }
 
 static void
-pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
+pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_device *device = init->base.subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	int i;
 
-	nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
 	for (i = 0; i < len; i += 4)
-		nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
+		nvkm_wr32(device, 0x10a1c4, nvbios_rd32(bios, img + i));
 }
 
 static u32
-pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
+pmu_args(struct nv50_devinit *init, u32 argp, u32 argi)
 {
-	nv_wr32(priv, 0x10a1c0, argp);
-	nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
-	return nv_rd32(priv, 0x10a1c4);
+	struct nvkm_device *device = init->base.subdev.device;
+	nvkm_wr32(device, 0x10a1c0, argp);
+	nvkm_wr32(device, 0x10a1c0, nvkm_rd32(device, 0x10a1c4) + argi);
+	return nvkm_rd32(device, 0x10a1c4);
 }
 
 static void
-pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
+pmu_exec(struct nv50_devinit *init, u32 init_addr)
 {
-	nv_wr32(priv, 0x10a104, init_addr);
-	nv_wr32(priv, 0x10a10c, 0x00000000);
-	nv_wr32(priv, 0x10a100, 0x00000002);
+	struct nvkm_device *device = init->base.subdev.device;
+	nvkm_wr32(device, 0x10a104, init_addr);
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
 }
 
 static int
-pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
+pmu_load(struct nv50_devinit *init, u8 type, bool post,
 	 u32 *init_addr_pmu, u32 *args_addr_pmu)
 {
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pmuR pmu;
 
 	if (!nvbios_pmuRm(bios, type, &pmu)) {
-		nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
+		nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
 		return -EINVAL;
 	}
 
 	if (!post)
 		return 0;
 
-	pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
-	pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
-	pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
+	pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
+	pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
+	pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
 
 	if (init_addr_pmu) {
 		*init_addr_pmu = pmu.init_addr_pmu;
@@ -98,75 +103,79 @@
 		return 0;
 	}
 
-	return pmu_exec(priv, pmu.init_addr_pmu), 0;
+	return pmu_exec(init, pmu.init_addr_pmu), 0;
 }
 
 static int
-gm204_devinit_post(struct nvkm_subdev *subdev, bool post)
+gm204_devinit_post(struct nvkm_devinit *base, bool post)
 {
-	struct nv50_devinit_priv *priv = (void *)nvkm_devinit(subdev);
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct bit_entry bit_I;
-	u32 init, args;
+	u32 exec, args;
 	int ret;
 
 	if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
 					    bit_I.length < 0x1c) {
-		nv_error(priv, "VBIOS PMU init data not found\n");
+		nvkm_error(subdev, "VBIOS PMU init data not found\n");
 		return -EINVAL;
 	}
 
 	/* reset PMU and load init table parser ucode */
 	if (post) {
-		nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
-		nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
-		nv_rd32(priv, 0x000200);
-		while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
+		nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+		nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+		nvkm_rd32(device, 0x000200);
+		while (nvkm_rd32(device, 0x10a10c) & 0x00000006) {
 		}
 	}
 
-	ret = pmu_load(priv, 0x04, post, &init, &args);
+	ret = pmu_load(init, 0x04, post, &exec, &args);
 	if (ret)
 		return ret;
 
 	/* upload first chunk of init data */
 	if (post) {
-		u32 pmu = pmu_args(priv, args + 0x08, 0x08);
-		u32 img = nv_ro16(bios, bit_I.offset + 0x14);
-		u32 len = nv_ro16(bios, bit_I.offset + 0x16);
-		pmu_data(priv, pmu, img, len);
+		u32 pmu = pmu_args(init, args + 0x08, 0x08);
+		u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
+		u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
+		pmu_data(init, pmu, img, len);
 	}
 
 	/* upload second chunk of init data */
 	if (post) {
-		u32 pmu = pmu_args(priv, args + 0x08, 0x10);
-		u32 img = nv_ro16(bios, bit_I.offset + 0x18);
-		u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
-		pmu_data(priv, pmu, img, len);
+		u32 pmu = pmu_args(init, args + 0x08, 0x10);
+		u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
+		u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
+		pmu_data(init, pmu, img, len);
 	}
 
 	/* execute init tables */
 	if (post) {
-		nv_wr32(priv, 0x10a040, 0x00005000);
-		pmu_exec(priv, init);
-		while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
+		nvkm_wr32(device, 0x10a040, 0x00005000);
+		pmu_exec(init, exec);
+		while (!(nvkm_rd32(device, 0x10a040) & 0x00002000)) {
 		}
 	}
 
 	/* load and execute some other ucode image (bios therm?) */
-	return pmu_load(priv, 0x01, post, NULL, NULL);
+	return pmu_load(init, 0x01, post, NULL, NULL);
 }
 
-struct nvkm_oclass *
-gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gm204_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = gm204_devinit_post,
 	.pll_set = gf100_devinit_pll_set,
 	.disable = gm107_devinit_disable,
-	.post = gm204_devinit_post,
-}.base;
+};
+
+int
+gm204_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gm204_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 6a3e8d4..9a8522f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -29,32 +29,32 @@
 #include <subdev/clk/pll.h>
 
 int
-gt215_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	struct nvbios_pll info;
 	int N, fN, M, P;
 	int ret;
 
-	ret = nvbios_pll_parse(bios, type, &info);
+	ret = nvbios_pll_parse(device->bios, type, &info);
 	if (ret)
 		return ret;
 
-	ret = gt215_pll_calc(nv_subdev(devinit), &info, freq, &N, &fN, &M, &P);
+	ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
 	if (ret < 0)
 		return ret;
 
 	switch (info.type) {
 	case PLL_VPLL0:
 	case PLL_VPLL1:
-		nv_wr32(priv, info.reg + 0, 0x50000610);
-		nv_mask(priv, info.reg + 4, 0x003fffff,
-					    (P << 16) | (M << 8) | N);
-		nv_wr32(priv, info.reg + 8, fN);
+		nvkm_wr32(device, info.reg + 0, 0x50000610);
+		nvkm_mask(device, info.reg + 4, 0x003fffff,
+						(P << 16) | (M << 8) | N);
+		nvkm_wr32(device, info.reg + 8, fN);
 		break;
 	default:
-		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
 		ret = -EINVAL;
 		break;
 	}
@@ -63,24 +63,24 @@
 }
 
 static u64
-gt215_devinit_disable(struct nvkm_devinit *devinit)
+gt215_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 
 	return disable;
 }
@@ -99,9 +99,10 @@
 };
 
 static u32
-gt215_devinit_mmio(struct nvkm_devinit *devinit, u32 addr)
+gt215_devinit_mmio(struct nvkm_devinit *base, u32 addr)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_device *device = init->base.subdev.device;
 	u32 *mmio = gt215_devinit_mmio_part;
 
 	/* the init tables on some boards have INIT_RAM_RESTRICT_ZM_REG_GROUP
@@ -113,7 +114,7 @@
 	 *
 	 * the binary driver avoids touching these registers at all, however,
 	 * the video bios doesn't care and does what the scripts say.  it's
-	 * presumed that the io-port access to priv registers isn't effected
+	 * presumed that the io-port access to init registers isn't effected
 	 * by the screw-up bug mentioned above.
 	 *
 	 * really, a new opcode should've been invented to handle these
@@ -122,9 +123,9 @@
 	while (mmio[0]) {
 		if (addr >= mmio[0] && addr <= mmio[1]) {
 			u32 part = (addr / mmio[2]) & 7;
-			if (!priv->r001540)
-				priv->r001540 = nv_rd32(priv, 0x001540);
-			if (part >= hweight8((priv->r001540 >> 16) & 0xff))
+			if (!init->r001540)
+				init->r001540 = nvkm_rd32(device, 0x001540);
+			if (part >= hweight8((init->r001540 >> 16) & 0xff))
 				return ~0;
 			return addr;
 		}
@@ -134,17 +135,19 @@
 	return addr;
 }
 
-struct nvkm_oclass *
-gt215_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+gt215_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
+	.mmio = gt215_devinit_mmio,
 	.pll_set = gt215_devinit_pll_set,
 	.disable = gt215_devinit_disable,
-	.mmio    = gt215_devinit_mmio,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+gt215_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&gt215_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index 55cf48b..ce4f718 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -27,40 +27,42 @@
 #include <subdev/bios/init.h>
 
 static u64
-mcp89_devinit_disable(struct nvkm_devinit *devinit)
+mcp89_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
-	u32 r00154c = nv_rd32(priv, 0x00154c);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
+	u32 r00154c = nvkm_rd32(device, 0x00154c);
 	u64 disable = 0;
 
 	if (!(r001540 & 0x40000000)) {
-		disable |= (1ULL << NVDEV_ENGINE_MSPDEC);
-		disable |= (1ULL << NVDEV_ENGINE_MSPPP);
+		disable |= (1ULL << NVKM_ENGINE_MSPDEC);
+		disable |= (1ULL << NVKM_ENGINE_MSPPP);
 	}
 
 	if (!(r00154c & 0x00000004))
-		disable |= (1ULL << NVDEV_ENGINE_DISP);
+		disable |= (1ULL << NVKM_ENGINE_DISP);
 	if (!(r00154c & 0x00000020))
-		disable |= (1ULL << NVDEV_ENGINE_MSVLD);
+		disable |= (1ULL << NVKM_ENGINE_MSVLD);
 	if (!(r00154c & 0x00000040))
-		disable |= (1ULL << NVDEV_ENGINE_VIC);
+		disable |= (1ULL << NVKM_ENGINE_VIC);
 	if (!(r00154c & 0x00000200))
-		disable |= (1ULL << NVDEV_ENGINE_CE0);
+		disable |= (1ULL << NVKM_ENGINE_CE0);
 
 	return disable;
 }
 
-struct nvkm_oclass *
-mcp89_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0xaf),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+mcp89_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = gt215_devinit_pll_set,
 	.disable = mcp89_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+mcp89_devinit_new(struct nvkm_device *device, int index,
+		struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&mcp89_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 03a0da8..c8d4553 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -33,25 +33,26 @@
 #include <subdev/vga.h>
 
 static void
-nv04_devinit_meminit(struct nvkm_devinit *devinit)
+nv04_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
 	/* Sequencer and refresh off */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
-	nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
 
-	nv_mask(priv, NV04_PFB_BOOT_0, ~0,
+	nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
 		      NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
 		      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 		      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
@@ -62,49 +63,49 @@
 	fbmem_poke(fb, 0x400000, patt + 1);
 
 	if (fbmem_peek(fb, 0) == patt + 1) {
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			      NV04_PFB_BOOT_0_RAM_TYPE,
 			      NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
-		nv_mask(priv, NV04_PFB_DEBUG_0,
+		nvkm_mask(device, NV04_PFB_DEBUG_0,
 			      NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
 
 		for (i = 0; i < 4; i++)
 			fbmem_poke(fb, 4 * i, patt);
 
 		if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 	} else
 	if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
 			      NV04_PFB_BOOT_0_RAM_AMOUNT,
 			      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 	} else
 	if (fbmem_peek(fb, 0) != patt) {
 		if (fbmem_readback(fb, 0x800000, patt))
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 		else
-			nv_mask(priv, NV04_PFB_BOOT_0,
+			nvkm_mask(device, NV04_PFB_BOOT_0,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT,
 				      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
 			      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
 	} else
 	if (!fbmem_readback(fb, 0x800000, patt)) {
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 
 	}
 
 	/* Refresh on, sequencer on */
-	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
 	fbmem_fini(fb);
 }
 
@@ -139,11 +140,12 @@
 }
 
 void
-setPLL_single(struct nvkm_devinit *devinit, u32 reg,
+setPLL_single(struct nvkm_devinit *init, u32 reg,
 	      struct nvkm_pll_vals *pv)
 {
-	int chip_version = nvkm_bios(devinit)->version.chip;
-	uint32_t oldpll = nv_rd32(devinit, reg);
+	struct nvkm_device *device = init->subdev.device;
+	int chip_version = device->bios->version.chip;
+	uint32_t oldpll = nvkm_rd32(device, reg);
 	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
 	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
 	uint32_t saved_powerctrl_1 = 0;
@@ -153,30 +155,30 @@
 		return;	/* already set */
 
 	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
-		nv_wr32(devinit, 0x001584,
+		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+		nvkm_wr32(device, 0x001584,
 			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
 			1 << shift_powerctrl_1);
 	}
 
 	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
 		/* upclock -- write new post divider first */
-		nv_wr32(devinit, reg, pv->log2P << 16 | (oldpll & 0xffff));
+		nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
 	else
 		/* downclock -- write new NM first */
-		nv_wr32(devinit, reg, (oldpll & 0xffff0000) | pv->NM1);
+		nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
 
 	if ((chip_version < 0x17 || chip_version == 0x1a) &&
 	    chip_version != 0x11)
 		/* wait a bit on older chips */
 		msleep(64);
-	nv_rd32(devinit, reg);
+	nvkm_rd32(device, reg);
 
 	/* then write the other half as well */
-	nv_wr32(devinit, reg, pll);
+	nvkm_wr32(device, reg, pll);
 
 	if (shift_powerctrl_1 >= 0)
-		nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
 }
 
 static uint32_t
@@ -193,14 +195,15 @@
 }
 
 void
-setPLL_double_highregs(struct nvkm_devinit *devinit, u32 reg1,
+setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
 		       struct nvkm_pll_vals *pv)
 {
-	int chip_version = nvkm_bios(devinit)->version.chip;
+	struct nvkm_device *device = init->subdev.device;
+	int chip_version = device->bios->version.chip;
 	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
 	uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
-	uint32_t oldpll1 = nv_rd32(devinit, reg1);
-	uint32_t oldpll2 = !nv3035 ? nv_rd32(devinit, reg2) : 0;
+	uint32_t oldpll1 = nvkm_rd32(device, reg1);
+	uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
 	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
 	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
 	uint32_t oldramdac580 = 0, ramdac580 = 0;
@@ -215,7 +218,7 @@
 		pll2 = 0;
 	}
 	if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
-		oldramdac580 = nv_rd32(devinit, 0x680580);
+		oldramdac580 = nvkm_rd32(device, 0x680580);
 		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
 		if (oldramdac580 != ramdac580)
 			oldpll1 = ~0;	/* force mismatch */
@@ -231,8 +234,8 @@
 		return;	/* already set */
 
 	if (shift_powerctrl_1 >= 0) {
-		saved_powerctrl_1 = nv_rd32(devinit, 0x001584);
-		nv_wr32(devinit, 0x001584,
+		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
+		nvkm_wr32(device, 0x001584,
 			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
 			1 << shift_powerctrl_1);
 	}
@@ -251,26 +254,26 @@
 			shift_c040 += 2;
 		}
 
-		savedc040 = nv_rd32(devinit, 0xc040);
+		savedc040 = nvkm_rd32(device, 0xc040);
 		if (shift_c040 != 14)
-			nv_wr32(devinit, 0xc040, savedc040 & ~(3 << shift_c040));
+			nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
 	}
 
 	if (oldramdac580 != ramdac580)
-		nv_wr32(devinit, 0x680580, ramdac580);
+		nvkm_wr32(device, 0x680580, ramdac580);
 
 	if (!nv3035)
-		nv_wr32(devinit, reg2, pll2);
-	nv_wr32(devinit, reg1, pll1);
+		nvkm_wr32(device, reg2, pll2);
+	nvkm_wr32(device, reg1, pll1);
 
 	if (shift_powerctrl_1 >= 0)
-		nv_wr32(devinit, 0x001584, saved_powerctrl_1);
+		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
 	if (chip_version >= 0x40)
-		nv_wr32(devinit, 0xc040, savedc040);
+		nvkm_wr32(device, 0xc040, savedc040);
 }
 
 void
-setPLL_double_lowregs(struct nvkm_devinit *devinit, u32 NMNMreg,
+setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
 		      struct nvkm_pll_vals *pv)
 {
 	/* When setting PLLs, there is a merry game of disabling and enabling
@@ -280,10 +283,10 @@
 	 * combined herein. Without luck it deviates from each card's formula
 	 * so as to not work on any :)
 	 */
-
+	struct nvkm_device *device = init->subdev.device;
 	uint32_t Preg = NMNMreg - 4;
 	bool mpll = Preg == 0x4020;
-	uint32_t oldPval = nv_rd32(devinit, Preg);
+	uint32_t oldPval = nvkm_rd32(device, Preg);
 	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
 	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
 			0xc << 28 | pv->log2P << 16;
@@ -292,7 +295,7 @@
 	uint32_t maskc040 = ~(3 << 14), savedc040;
 	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
 
-	if (nv_rd32(devinit, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+	if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
 		return;
 
 	if (Preg == 0x4000)
@@ -304,7 +307,7 @@
 		struct nvbios_pll info;
 		uint8_t Pval2;
 
-		if (nvbios_pll_parse(nvkm_bios(devinit), Preg, &info))
+		if (nvbios_pll_parse(device->bios, Preg, &info))
 			return;
 
 		Pval2 = pv->log2P + info.bias_p;
@@ -312,47 +315,48 @@
 			Pval2 = info.max_p;
 		Pval |= 1 << 28 | Pval2 << 20;
 
-		saved4600 = nv_rd32(devinit, 0x4600);
-		nv_wr32(devinit, 0x4600, saved4600 | 8 << 28);
+		saved4600 = nvkm_rd32(device, 0x4600);
+		nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
 	}
 	if (single_stage)
 		Pval |= mpll ? 1 << 12 : 1 << 8;
 
-	nv_wr32(devinit, Preg, oldPval | 1 << 28);
-	nv_wr32(devinit, Preg, Pval & ~(4 << 28));
+	nvkm_wr32(device, Preg, oldPval | 1 << 28);
+	nvkm_wr32(device, Preg, Pval & ~(4 << 28));
 	if (mpll) {
 		Pval |= 8 << 20;
-		nv_wr32(devinit, 0x4020, Pval & ~(0xc << 28));
-		nv_wr32(devinit, 0x4038, Pval & ~(0xc << 28));
+		nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
+		nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
 	}
 
-	savedc040 = nv_rd32(devinit, 0xc040);
-	nv_wr32(devinit, 0xc040, savedc040 & maskc040);
+	savedc040 = nvkm_rd32(device, 0xc040);
+	nvkm_wr32(device, 0xc040, savedc040 & maskc040);
 
-	nv_wr32(devinit, NMNMreg, NMNM);
+	nvkm_wr32(device, NMNMreg, NMNM);
 	if (NMNMreg == 0x4024)
-		nv_wr32(devinit, 0x403c, NMNM);
+		nvkm_wr32(device, 0x403c, NMNM);
 
-	nv_wr32(devinit, Preg, Pval);
+	nvkm_wr32(device, Preg, Pval);
 	if (mpll) {
 		Pval &= ~(8 << 20);
-		nv_wr32(devinit, 0x4020, Pval);
-		nv_wr32(devinit, 0x4038, Pval);
-		nv_wr32(devinit, 0x4600, saved4600);
+		nvkm_wr32(device, 0x4020, Pval);
+		nvkm_wr32(device, 0x4038, Pval);
+		nvkm_wr32(device, 0x4600, saved4600);
 	}
 
-	nv_wr32(devinit, 0xc040, savedc040);
+	nvkm_wr32(device, 0xc040, savedc040);
 
 	if (mpll) {
-		nv_wr32(devinit, 0x4020, Pval & ~(1 << 28));
-		nv_wr32(devinit, 0x4038, Pval & ~(1 << 28));
+		nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
+		nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
 	}
 }
 
 int
 nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(devinit);
+	struct nvkm_subdev *subdev = &devinit->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvkm_pll_vals pv;
 	struct nvbios_pll info;
 	int cv = bios->version.chip;
@@ -363,8 +367,7 @@
 	if (ret)
 		return ret;
 
-	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq,
-			    &N1, &M1, &N2, &M2, &P);
+	ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
 	if (!ret)
 		return -EINVAL;
 
@@ -388,83 +391,76 @@
 }
 
 int
-nv04_devinit_fini(struct nvkm_object *object, bool suspend)
+nv04_devinit_post(struct nvkm_devinit *init, bool execute)
 {
-	struct nv04_devinit_priv *priv = (void *)object;
-	int ret;
-
-	/* make i2c busses accessible */
-	nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
-
-	ret = nvkm_devinit_fini(&priv->base, suspend);
-	if (ret)
-		return ret;
-
-	/* unslave crtcs */
-	if (priv->owner < 0)
-		priv->owner = nv_rdvgaowner(priv);
-	nv_wrvgaowner(priv, 0);
-	return 0;
-}
-
-int
-nv04_devinit_init(struct nvkm_object *object)
-{
-	struct nv04_devinit_priv *priv = (void *)object;
-
-	if (!priv->base.post) {
-		u32 htotal = nv_rdvgac(priv, 0, 0x06);
-		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
-		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
-		htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
-		htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
-		if (!htotal) {
-			nv_info(priv, "adaptor not initialised\n");
-			priv->base.post = true;
-		}
-	}
-
-	return nvkm_devinit_init(&priv->base);
+	return nvbios_init(&init->subdev, execute);
 }
 
 void
-nv04_devinit_dtor(struct nvkm_object *object)
+nv04_devinit_preinit(struct nvkm_devinit *base)
 {
-	struct nv04_devinit_priv *priv = (void *)object;
+	struct nv04_devinit *init = nv04_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
 
+	/* make i2c busses accessible */
+	nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
+
+	/* unslave crtcs */
+	if (init->owner < 0)
+		init->owner = nvkm_rdvgaowner(device);
+	nvkm_wrvgaowner(device, 0);
+
+	if (!init->base.post) {
+		u32 htotal = nvkm_rdvgac(device, 0, 0x06);
+		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
+		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
+		htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
+		htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
+		if (!htotal) {
+			nvkm_debug(subdev, "adaptor not initialised\n");
+			init->base.post = true;
+		}
+	}
+}
+
+void *
+nv04_devinit_dtor(struct nvkm_devinit *base)
+{
+	struct nv04_devinit *init = nv04_devinit(base);
 	/* restore vga owner saved at first init */
-	nv_wrvgaowner(priv, priv->owner);
-
-	nvkm_devinit_destroy(&priv->base);
+	nvkm_wrvgaowner(init->base.subdev.device, init->owner);
+	return init;
 }
 
 int
-nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_devinit_new_(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit **pinit)
 {
-	struct nv04_devinit_priv *priv;
-	int ret;
+	struct nv04_devinit *init;
 
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+		return -ENOMEM;
+	*pinit = &init->base;
 
-	priv->owner = -1;
+	nvkm_devinit_ctor(func, device, index, &init->base);
+	init->owner = -1;
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv04_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv04_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv04_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv04_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 7c63abf..4a87c8c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,19 +1,19 @@
-#ifndef __NVKM_DEVINIT_NV04_H__
-#define __NVKM_DEVINIT_NV04_H__
+#ifndef __NV04_DEVINIT_H__
+#define __NV04_DEVINIT_H__
+#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
 #include "priv.h"
 struct nvkm_pll_vals;
 
-struct nv04_devinit_priv {
+struct nv04_devinit {
 	struct nvkm_devinit base;
 	int owner;
 };
 
-int  nv04_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void nv04_devinit_dtor(struct nvkm_object *);
-int  nv04_devinit_init(struct nvkm_object *);
-int  nv04_devinit_fini(struct nvkm_object *, bool);
+int nv04_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+		      int, struct nvkm_devinit **);
+void *nv04_devinit_dtor(struct nvkm_devinit *);
+void nv04_devinit_preinit(struct nvkm_devinit *);
+void nv04_devinit_fini(struct nvkm_devinit *);
 int  nv04_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 void setPLL_single(struct nvkm_devinit *, u32, struct nvkm_pll_vals *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
index def8649..9891ead 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv05.c
@@ -32,7 +32,7 @@
 #include <subdev/vga.h>
 
 static void
-nv05_devinit_meminit(struct nvkm_devinit *devinit)
+nv05_devinit_meminit(struct nvkm_devinit *init)
 {
 	static const u8 default_config_tab[][2] = {
 		{ 0x24, 0x00 },
@@ -44,8 +44,9 @@
 		{ 0x06, 0x00 },
 		{ 0x00, 0x00 }
 	};
-	struct nv04_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct io_mapping *fb;
 	u32 patt = 0xdeadbeef;
 	u16 data;
@@ -53,88 +54,90 @@
 	int i, v;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
+	strap = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	if ((data = bmp_mem_init_table(bios))) {
-		ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
-		ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
+		ramcfg[0] = nvbios_rd08(bios, data + 2 * strap + 0);
+		ramcfg[1] = nvbios_rd08(bios, data + 2 * strap + 1);
 	} else {
 		ramcfg[0] = default_config_tab[strap][0];
 		ramcfg[1] = default_config_tab[strap][1];
 	}
 
 	/* Sequencer off */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
 
-	if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+	if (nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
 		goto out;
 
-	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
 
 	/* If present load the hardcoded scrambling table */
 	if (data) {
 		for (i = 0, data += 0x10; i < 8; i++, data += 4) {
-			u32 scramble = nv_ro32(bios, data);
-			nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
+			u32 scramble = nvbios_rd32(bios, data);
+			nvkm_wr32(device, NV04_PFB_SCRAMBLE(i), scramble);
 		}
 	}
 
 	/* Set memory type/width/length defaults depending on the straps */
-	nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+	nvkm_mask(device, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
 
 	if (ramcfg[1] & 0x80)
-		nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+		nvkm_mask(device, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
 
-	nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
-	nv_mask(priv, NV04_PFB_CFG1, 0, 1);
+	nvkm_mask(device, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+	nvkm_mask(device, NV04_PFB_CFG1, 0, 1);
 
 	/* Probe memory bus width */
 	for (i = 0; i < 4; i++)
 		fbmem_poke(fb, 4 * i, patt);
 
 	if (fbmem_peek(fb, 0xc) != patt)
-		nv_mask(priv, NV04_PFB_BOOT_0,
+		nvkm_mask(device, NV04_PFB_BOOT_0,
 			  NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
 
 	/* Probe memory length */
-	v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+	v = nvkm_rd32(device, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
 
 	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
 	    (!fbmem_readback(fb, 0x1000000, ++patt) ||
 	     !fbmem_readback(fb, 0, ++patt)))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
 
 	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
 	    !fbmem_readback(fb, 0x800000, ++patt))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
 
 	if (!fbmem_readback(fb, 0x400000, ++patt))
-		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
 			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
 
 out:
 	/* Sequencer on */
-	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv05_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x05),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv05_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv05_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv05_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv05_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
index 7aabc1b..570822f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv10.c
@@ -30,33 +30,33 @@
 #include <subdev/bios/init.h>
 
 static void
-nv10_devinit_meminit(struct nvkm_devinit *devinit)
+nv10_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	static const int mem_width[] = { 0x10, 0x00, 0x20 };
 	int mem_width_count;
 	uint32_t patt = 0xdeadbeef;
 	struct io_mapping *fb;
 	int i, j, k;
 
-	if (nv_device(priv)->card_type >= NV_11 &&
-	    nv_device(priv)->chipset >= 0x17)
+	if (device->card_type >= NV_11 && device->chipset >= 0x17)
 		mem_width_count = 3;
 	else
 		mem_width_count = 2;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+	nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
 
 	/* Probe memory bus width */
 	for (i = 0; i < mem_width_count; i++) {
-		nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
+		nvkm_mask(device, NV04_PFB_CFG0, 0x30, mem_width[i]);
 
 		for (j = 0; j < 4; j++) {
 			for (k = 0; k < 4; k++)
@@ -75,7 +75,7 @@
 
 	/* Probe amount of installed memory */
 	for (i = 0; i < 4; i++) {
-		int off = nv_rd32(priv, 0x10020c) - 0x100000;
+		int off = nvkm_rd32(device, 0x10020c) - 0x100000;
 
 		fbmem_poke(fb, off, patt);
 		fbmem_poke(fb, 0, 0);
@@ -90,22 +90,24 @@
 	}
 
 	/* IC missing - disable the upper half memory space. */
-	nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
+	nvkm_mask(device, NV04_PFB_CFG0, 0x1000, 0);
 
 amount_found:
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv10_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x10),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv10_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv10_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv10_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv10_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
index 9f36fff..fefafec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv1a.c
@@ -26,15 +26,17 @@
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
 
-struct nvkm_oclass *
-nv1a_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x1a),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv1a_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv1a_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv1a_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
index 02fcfd9..4ef04e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv20.c
@@ -30,48 +30,50 @@
 #include <subdev/bios/init.h>
 
 static void
-nv20_devinit_meminit(struct nvkm_devinit *devinit)
+nv20_devinit_meminit(struct nvkm_devinit *init)
 {
-	struct nv04_devinit_priv *priv = (void *)devinit;
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
 	uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
 	uint32_t amount, off;
 	struct io_mapping *fb;
 
 	/* Map the framebuffer aperture */
-	fb = fbmem_init(nv_device(priv));
+	fb = fbmem_init(device);
 	if (!fb) {
-		nv_error(priv, "failed to map fb\n");
+		nvkm_error(subdev, "failed to map fb\n");
 		return;
 	}
 
-	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+	nvkm_wr32(device, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
 
 	/* Allow full addressing */
-	nv_mask(priv, NV04_PFB_CFG0, 0, mask);
+	nvkm_mask(device, NV04_PFB_CFG0, 0, mask);
 
-	amount = nv_rd32(priv, 0x10020c);
+	amount = nvkm_rd32(device, 0x10020c);
 	for (off = amount; off > 0x2000000; off -= 0x2000000)
 		fbmem_poke(fb, off - 4, off);
 
-	amount = nv_rd32(priv, 0x10020c);
+	amount = nvkm_rd32(device, 0x10020c);
 	if (amount != fbmem_peek(fb, amount - 4))
 		/* IC missing - disable the upper half memory space. */
-		nv_mask(priv, NV04_PFB_CFG0, mask, 0);
+		nvkm_mask(device, NV04_PFB_CFG0, mask, 0);
 
 	fbmem_fini(fb);
 }
 
-struct nvkm_oclass *
-nv20_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x20),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_devinit_ctor,
-		.dtor = nv04_devinit_dtor,
-		.init = nv04_devinit_init,
-		.fini = nv04_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv20_devinit = {
+	.dtor = nv04_devinit_dtor,
+	.preinit = nv04_devinit_preinit,
+	.post = nv04_devinit_post,
 	.meminit = nv20_devinit_meminit,
 	.pll_set = nv04_devinit_pll_set,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv20_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv04_devinit_new_(&nv20_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 26b7cb1..337c2c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -29,47 +29,48 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
-#include <subdev/ibus.h>
 #include <subdev/vga.h>
 
 int
-nv50_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
+nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	struct nvkm_bios *bios = nvkm_bios(priv);
+	struct nvkm_subdev *subdev = &init->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_pll info;
 	int N1, M1, N2, M2, P;
 	int ret;
 
 	ret = nvbios_pll_parse(bios, type, &info);
 	if (ret) {
-		nv_error(devinit, "failed to retrieve pll data, %d\n", ret);
+		nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret);
 		return ret;
 	}
 
-	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq, &N1, &M1, &N2, &M2, &P);
+	ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P);
 	if (!ret) {
-		nv_error(devinit, "failed pll calculation\n");
+		nvkm_error(subdev, "failed pll calculation\n");
 		return ret;
 	}
 
 	switch (info.type) {
 	case PLL_VPLL0:
 	case PLL_VPLL1:
-		nv_wr32(priv, info.reg + 0, 0x10000611);
-		nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
-		nv_mask(priv, info.reg + 8, 0x7fff00ff, (P  << 28) |
-							(M2 << 16) | N2);
+		nvkm_wr32(device, info.reg + 0, 0x10000611);
+		nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
+		nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P  << 28) |
+							    (M2 << 16) | N2);
 		break;
 	case PLL_MEMORY:
-		nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
-						        (info.bias_p << 19) |
-							(P << 16));
-		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		nvkm_mask(device, info.reg + 0, 0x01ff0000,
+					        (P << 22) |
+						(info.bias_p << 19) |
+						(P << 16));
+		nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
 		break;
 	default:
-		nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
-		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16));
+		nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1);
 		break;
 	}
 
@@ -77,57 +78,68 @@
 }
 
 static u64
-nv50_devinit_disable(struct nvkm_devinit *devinit)
+nv50_devinit_disable(struct nvkm_devinit *init)
 {
-	struct nv50_devinit_priv *priv = (void *)devinit;
-	u32 r001540 = nv_rd32(priv, 0x001540);
+	struct nvkm_device *device = init->subdev.device;
+	u32 r001540 = nvkm_rd32(device, 0x001540);
 	u64 disable = 0ULL;
 
 	if (!(r001540 & 0x40000000))
-		disable |= (1ULL << NVDEV_ENGINE_MPEG);
+		disable |= (1ULL << NVKM_ENGINE_MPEG);
 
 	return disable;
 }
 
-int
-nv50_devinit_init(struct nvkm_object *object)
+void
+nv50_devinit_preinit(struct nvkm_devinit *base)
 {
-	struct nvkm_bios *bios = nvkm_bios(object);
-	struct nvkm_ibus *ibus = nvkm_ibus(object);
-	struct nv50_devinit_priv *priv = (void *)object;
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+
+	/* our heuristics can't detect whether the board has had its
+	 * devinit scripts executed or not if the display engine is
+	 * missing, assume it's a secondary gpu which requires post
+	 */
+	if (!init->base.post) {
+		u64 disable = nvkm_devinit_disable(&init->base);
+		if (disable & (1ULL << NVKM_ENGINE_DISP))
+			init->base.post = true;
+	}
+
+	/* magic to detect whether or not x86 vbios code has executed
+	 * the devinit scripts to initialise the board
+	 */
+	if (!init->base.post) {
+		if (!nvkm_rdvgac(device, 0, 0x00) &&
+		    !nvkm_rdvgac(device, 0, 0x1a)) {
+			nvkm_debug(subdev, "adaptor not initialised\n");
+			init->base.post = true;
+		}
+	}
+}
+
+void
+nv50_devinit_init(struct nvkm_devinit *base)
+{
+	struct nv50_devinit *init = nv50_devinit(base);
+	struct nvkm_subdev *subdev = &init->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_outp info;
 	struct dcb_output outp;
 	u8  ver = 0xff, hdr, cnt, len;
-	int ret, i = 0;
-
-	if (!priv->base.post) {
-		if (!nv_rdvgac(priv, 0, 0x00) &&
-		    !nv_rdvgac(priv, 0, 0x1a)) {
-			nv_info(priv, "adaptor not initialised\n");
-			priv->base.post = true;
-		}
-	}
-
-	/* some boards appear to require certain priv register timeouts
-	 * to be bumped before runing devinit scripts.  not a clue why
-	 * the vbios engineers didn't make the scripts just work...
-	 */
-	if (priv->base.post && ibus)
-		nv_ofuncs(ibus)->init(nv_object(ibus));
-
-	ret = nvkm_devinit_init(&priv->base);
-	if (ret)
-		return ret;
+	int i = 0;
 
 	/* if we ran the init tables, we have to execute the first script
 	 * pointer of each dcb entry's display encoder table in order
 	 * to properly initialise each encoder.
 	 */
-	while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+	while (init->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
 		if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
 				      &ver, &hdr, &cnt, &len, &info)) {
-			struct nvbios_init init = {
-				.subdev = nv_subdev(priv),
+			struct nvbios_init exec = {
+				.subdev = subdev,
 				.bios = bios,
 				.offset = info.script[0],
 				.outp = &outp,
@@ -135,40 +147,39 @@
 				.execute = 1,
 			};
 
-			nvbios_exec(&init);
+			nvbios_exec(&exec);
 		}
 		i++;
 	}
-
-	return 0;
 }
 
 int
-nv50_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv50_devinit_new_(const struct nvkm_devinit_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_devinit **pinit)
 {
-	struct nv50_devinit_priv *priv;
-	int ret;
+	struct nv50_devinit *init;
 
-	ret = nvkm_devinit_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(init = kzalloc(sizeof(*init), GFP_KERNEL)))
+		return -ENOMEM;
+	*pinit = &init->base;
 
+	nvkm_devinit_ctor(func, device, index, &init->base);
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_devinit_oclass = &(struct nvkm_devinit_impl) {
-	.base.handle = NV_SUBDEV(DEVINIT, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_devinit_ctor,
-		.dtor = _nvkm_devinit_dtor,
-		.init = nv50_devinit_init,
-		.fini = _nvkm_devinit_fini,
-	},
+static const struct nvkm_devinit_func
+nv50_devinit = {
+	.preinit = nv50_devinit_preinit,
+	.init = nv50_devinit_init,
+	.post = nv04_devinit_post,
 	.pll_set = nv50_devinit_pll_set,
 	.disable = nv50_devinit_disable,
-	.post = nvbios_init,
-}.base;
+};
+
+int
+nv50_devinit_new(struct nvkm_device *device, int index,
+		 struct nvkm_devinit **pinit)
+{
+	return nv50_devinit_new_(&nv50_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9243521c..5de70a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,16 +1,17 @@
-#ifndef __NVKM_DEVINIT_NV50_H__
-#define __NVKM_DEVINIT_NV50_H__
+#ifndef __NV50_DEVINIT_H__
+#define __NV50_DEVINIT_H__
+#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
 #include "priv.h"
 
-struct nv50_devinit_priv {
+struct nv50_devinit {
 	struct nvkm_devinit base;
 	u32 r001540;
 };
 
-int  nv50_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-int  nv50_devinit_init(struct nvkm_object *);
+int nv50_devinit_new_(const struct nvkm_devinit_func *, struct nvkm_device *,
+		      int, struct nvkm_devinit **);
+void nv50_devinit_preinit(struct nvkm_devinit *);
+void nv50_devinit_init(struct nvkm_devinit *);
 int  nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 int  gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index bb51a95..e1f6ae5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,34 +1,21 @@
 #ifndef __NVKM_DEVINIT_PRIV_H__
 #define __NVKM_DEVINIT_PRIV_H__
+#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
 #include <subdev/devinit.h>
 
-struct nvkm_devinit_impl {
-	struct nvkm_oclass base;
+struct nvkm_devinit_func {
+	void *(*dtor)(struct nvkm_devinit *);
+	void (*preinit)(struct nvkm_devinit *);
+	void (*init)(struct nvkm_devinit *);
+	int  (*post)(struct nvkm_devinit *, bool post);
+	u32  (*mmio)(struct nvkm_devinit *, u32);
 	void (*meminit)(struct nvkm_devinit *);
 	int  (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
 	u64  (*disable)(struct nvkm_devinit *);
-	u32  (*mmio)(struct nvkm_devinit *, u32);
-	int  (*post)(struct nvkm_subdev *, bool);
 };
 
-#define nvkm_devinit_create(p,e,o,d)                                        \
-	nvkm_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_devinit_destroy(p) ({                                          \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_dtor(nv_object(d));                                   \
-})
-#define nvkm_devinit_init(p) ({                                             \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_init(nv_object(d));                                   \
-})
-#define nvkm_devinit_fini(p,s) ({                                           \
-	struct nvkm_devinit *d = (p);                                       \
-	_nvkm_devinit_fini(nv_object(d), (s));                              \
-})
+void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
+		       int index, struct nvkm_devinit *);
 
-int nvkm_devinit_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-void _nvkm_devinit_dtor(struct nvkm_object *);
-int _nvkm_devinit_init(struct nvkm_object *);
-int _nvkm_devinit_fini(struct nvkm_object *, bool suspend);
+int nv04_devinit_post(struct nvkm_devinit *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index d6be4c6c..0810570 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,8 @@
 nvkm-y += nvkm/subdev/fb/gk104.o
 nvkm-y += nvkm/subdev/fb/gk20a.o
 nvkm-y += nvkm/subdev/fb/gm107.o
+
+nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
 nvkm-y += nvkm/subdev/fb/ramnv10.o
 nvkm-y += nvkm/subdev/fb/ramnv1a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 61fde43..a719b9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -22,144 +22,151 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include "ram.h"
 
 #include <subdev/bios.h>
 #include <subdev/bios/M0203.h>
+#include <engine/gr.h>
+#include <engine/mpeg.h>
+
+bool
+nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
+{
+	return fb->func->memtype_valid(fb, memtype);
+}
+
+void
+nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+	fb->func->tile.fini(fb, region, tile);
+}
+
+void
+nvkm_fb_tile_init(struct nvkm_fb *fb, int region, u32 addr, u32 size,
+		  u32 pitch, u32 flags, struct nvkm_fb_tile *tile)
+{
+	fb->func->tile.init(fb, region, addr, size, pitch, flags, tile);
+}
+
+void
+nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	if (fb->func->tile.prog) {
+		fb->func->tile.prog(fb, region, tile);
+		if (device->gr)
+			nvkm_engine_tile(&device->gr->engine, region);
+		if (device->mpeg)
+			nvkm_engine_tile(device->mpeg, region);
+	}
+}
 
 int
 nvkm_fb_bios_memtype(struct nvkm_bios *bios)
 {
-	const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+	struct nvkm_subdev *subdev = &bios->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u8 ramcfg = (nvkm_rd32(device, 0x101000) & 0x0000003c) >> 2;
 	struct nvbios_M0203E M0203E;
 	u8 ver, hdr;
 
 	if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
 		switch (M0203E.type) {
-		case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
-		case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
-		case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
-		case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
+		case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
+		case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
+		case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
+		case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
 		default:
-			nv_warn(bios, "M0203E type %02x\n", M0203E.type);
-			return NV_MEM_TYPE_UNKNOWN;
+			nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
+			return NVKM_RAM_TYPE_UNKNOWN;
 		}
 	}
 
-	nv_warn(bios, "M0203E not matched!\n");
-	return NV_MEM_TYPE_UNKNOWN;
+	nvkm_warn(subdev, "M0203E not matched!\n");
+	return NVKM_RAM_TYPE_UNKNOWN;
 }
 
-int
-_nvkm_fb_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_fb_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int ret;
-
-	if (pfb->ram) {
-		ret = nv_ofuncs(pfb->ram)->fini(nv_object(pfb->ram), suspend);
-		if (ret && suspend)
-			return ret;
-	}
-
-	return nvkm_subdev_fini(&pfb->base, suspend);
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	if (fb->func->intr)
+		fb->func->intr(fb);
 }
 
-int
-_nvkm_fb_init(struct nvkm_object *object)
+static int
+nvkm_fb_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int ret, i;
-
-	ret = nvkm_subdev_init(&pfb->base);
-	if (ret)
-		return ret;
-
-	if (pfb->ram) {
-		ret = nv_ofuncs(pfb->ram)->init(nv_object(pfb->ram));
-		if (ret)
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	if (fb->func->ram_new) {
+		int ret = fb->func->ram_new(fb, &fb->ram);
+		if (ret) {
+			nvkm_error(subdev, "vram setup failed, %d\n", ret);
 			return ret;
+		}
 	}
-
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
-
 	return 0;
 }
 
-void
-_nvkm_fb_dtor(struct nvkm_object *object)
+static int
+nvkm_fb_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fb *pfb = (void *)object;
-	int i;
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	int ret, i;
 
-	for (i = 0; i < pfb->tile.regions; i++)
-		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-	nvkm_mm_fini(&pfb->tags);
-
-	if (pfb->ram) {
-		nvkm_mm_fini(&pfb->vram);
-		nvkm_object_ref(NULL, (struct nvkm_object **)&pfb->ram);
+	if (fb->ram) {
+		ret = nvkm_ram_init(fb->ram);
+		if (ret)
+			return ret;
 	}
 
-	nvkm_subdev_destroy(&pfb->base);
+	for (i = 0; i < fb->tile.regions; i++)
+		fb->func->tile.prog(fb, i, &fb->tile.region[i]);
+
+	if (fb->func->init)
+		fb->func->init(fb);
+	return 0;
+}
+
+static void *
+nvkm_fb_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_fb *fb = nvkm_fb(subdev);
+	int i;
+
+	for (i = 0; i < fb->tile.regions; i++)
+		fb->func->tile.fini(fb, i, &fb->tile.region[i]);
+
+	nvkm_ram_del(&fb->ram);
+
+	if (fb->func->dtor)
+		return fb->func->dtor(fb);
+	return fb;
+}
+
+static const struct nvkm_subdev_func
+nvkm_fb = {
+	.dtor = nvkm_fb_dtor,
+	.oneinit = nvkm_fb_oneinit,
+	.init = nvkm_fb_init,
+	.intr = nvkm_fb_intr,
+};
+
+void
+nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb *fb)
+{
+	nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
+	fb->func = func;
+	fb->tile.regions = fb->func->tile.regions;
 }
 
 int
-nvkm_fb_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb **pfb)
 {
-	struct nvkm_fb_impl *impl = (void *)oclass;
-	static const char *name[] = {
-		[NV_MEM_TYPE_UNKNOWN] = "unknown",
-		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
-		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
-		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
-		[NV_MEM_TYPE_DDR1   ] = "DDR1",
-		[NV_MEM_TYPE_DDR2   ] = "DDR2",
-		[NV_MEM_TYPE_DDR3   ] = "DDR3",
-		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
-		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
-		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
-		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
-	};
-	struct nvkm_object *ram;
-	struct nvkm_fb *pfb;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PFB", "fb",
-				  length, pobject);
-	pfb = *pobject;
-	if (ret)
-		return ret;
-
-	pfb->memtype_valid = impl->memtype;
-
-	if (!impl->ram)
-		return 0;
-
-	ret = nvkm_object_ctor(nv_object(pfb), NULL, impl->ram, NULL, 0, &ram);
-	if (ret) {
-		nv_fatal(pfb, "error detecting memory configuration!!\n");
-		return ret;
-	}
-
-	pfb->ram = (void *)ram;
-
-	if (!nvkm_mm_initialised(&pfb->vram)) {
-		ret = nvkm_mm_init(&pfb->vram, 0, pfb->ram->size >> 12, 1);
-		if (ret)
-			return ret;
-	}
-
-	if (!nvkm_mm_initialised(&pfb->tags)) {
-		ret = nvkm_mm_init(&pfb->tags, 0, pfb->ram->tags ?
-				   ++pfb->ram->tags : 0, 1);
-		if (ret)
-			return ret;
-	}
-
-	nv_info(pfb, "RAM type: %s\n", name[pfb->ram->type]);
-	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram->size >> 20));
-	nv_info(pfb, "   ZCOMP: %d tags\n", pfb->ram->tags);
+	if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(func, device, index, *pfb);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 6c968d1..9c28392 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-g84_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x84),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+g84_fb = {
+	.ram_new = nv50_ram_new,
 	.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+g84_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&g84_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 15b462a..79b523a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -22,7 +22,7 @@
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  * 	    Roy Spliet <rspliet@eclipso.eu>
  */
-#include "priv.h"
+#include "ram.h"
 
 struct ramxlat {
 	int id;
@@ -42,9 +42,9 @@
 
 static const struct ramxlat
 ramgddr3_cl_lo[] = {
-	{ 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
+	{ 5, 5 }, { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 }, { 12, 8 },
 	/* the below are mentioned in some, but not all, gddr3 docs */
-	{ 12, 4 }, { 13, 5 }, { 14, 6 },
+	{ 13, 9 }, { 14, 6 },
 	/* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
 	/* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
 	 * { 15, 11 }, */
@@ -61,24 +61,25 @@
 static const struct ramxlat
 ramgddr3_wr_lo[] = {
 	{ 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
-	{ 11, 0 },
+	{ 11, 0 }, { 13 , 1 },
 	/* the below are mentioned in some, but not all, gddr3 docs */
-	{ 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
+	{ 4, 1 }, { 6, 3 }, { 12, 1 },
 	{ -1 }
 };
 
 int
 nvkm_gddr3_calc(struct nvkm_ram *ram)
 {
-	int CL, WR, CWL, DLL = 0, ODT = 0, hi;
+	int CL, WR, CWL, DLL = 0, ODT = 0, RON, hi;
 
 	switch (ram->next->bios.timing_ver) {
 	case 0x10:
 		CWL = ram->next->bios.timing_10_CWL;
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT;
+		RON = ram->next->bios.ramcfg_RON;
 		break;
 	case 0x20:
 		CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
@@ -89,6 +90,7 @@
 		ODT =  (ram->mr[1] & 0x004) >> 2 |
 		       (ram->mr[1] & 0x040) >> 5 |
 		       (ram->mr[1] & 0x200) >> 7;
+		RON = !(ram->mr[1] & 0x300) >> 8;
 		break;
 	default:
 		return -ENOSYS;
@@ -107,7 +109,7 @@
 
 	ram->mr[1] &= ~0x3fc;
 	ram->mr[1] |= (ODT & 0x03) << 2;
-	ram->mr[1] |= (ODT & 0x03) << 8;
+	ram->mr[1] |= (RON & 0x03) << 8;
 	ram->mr[1] |= (WR  & 0x03) << 4;
 	ram->mr[1] |= (WR  & 0x04) << 5;
 	ram->mr[1] |= !DLL << 6;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
index f6f9eee..24f83b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "priv.h"
+#include "ram.h"
 
 /* binary driver only executes this path if the condition (a) is true
  * for any configuration (combination of rammap+ramcfg+timing) that
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index d51aa02..008bb98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -22,101 +22,90 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
-
-#include <core/device.h>
+#include "ram.h"
 
 extern const u8 gf100_pte_storage_type_map[256];
 
 bool
-gf100_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
 {
 	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
 	return likely((gf100_pte_storage_type_map[memtype] != 0xff));
 }
 
-static void
-gf100_fb_intr(struct nvkm_subdev *subdev)
+void
+gf100_fb_intr(struct nvkm_fb *base)
 {
-	struct gf100_fb_priv *priv = (void *)subdev;
-	u32 intr = nv_rd32(priv, 0x000100);
-	if (intr & 0x08000000) {
-		nv_debug(priv, "PFFB intr\n");
-		intr &= ~0x08000000;
-	}
-	if (intr & 0x00002000) {
-		nv_debug(priv, "PBFB intr\n");
-		intr &= ~0x00002000;
-	}
-}
-
-int
-gf100_fb_init(struct nvkm_object *object)
-{
-	struct gf100_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	if (priv->r100c10_page)
-		nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
-
-	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-	return 0;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_subdev *subdev = &fb->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 intr = nvkm_rd32(device, 0x000100);
+	if (intr & 0x08000000)
+		nvkm_debug(subdev, "PFFB intr\n");
+	if (intr & 0x00002000)
+		nvkm_debug(subdev, "PBFB intr\n");
 }
 
 void
-gf100_fb_dtor(struct nvkm_object *object)
+gf100_fb_init(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct gf100_fb_priv *priv = (void *)object;
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
-	if (priv->r100c10_page) {
-		dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+	if (fb->r100c10_page)
+		nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+}
+
+void *
+gf100_fb_dtor(struct nvkm_fb *base)
+{
+	struct gf100_fb *fb = gf100_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
+
+	if (fb->r100c10_page) {
+		dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
 			       DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c10_page);
+		__free_page(fb->r100c10_page);
 	}
 
-	nvkm_fb_destroy(&priv->base);
+	return fb;
 }
 
 int
-gf100_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_fb **pfb)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct gf100_fb_priv *priv;
-	int ret;
+	struct gf100_fb *fb;
 
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(func, device, index, &fb->base);
+	*pfb = &fb->base;
 
-	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c10_page) {
-		priv->r100c10 = dma_map_page(nv_device_base(device),
-					     priv->r100c10_page, 0, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(nv_device_base(device), priv->r100c10))
+	fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c10_page) {
+		fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c10))
 			return -EFAULT;
 	}
 
-	nv_subdev(priv)->intr = gf100_fb_intr;
 	return 0;
 }
 
-struct nvkm_oclass *
-gf100_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gf100_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gf100_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gf100_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gf100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gf100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 0af4da2..2160e5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,28 +1,17 @@
 #ifndef __NVKM_RAM_NVC0_H__
 #define __NVKM_RAM_NVC0_H__
+#define gf100_fb(p) container_of((p), struct gf100_fb, base)
 #include "priv.h"
-#include "nv50.h"
 
-struct gf100_fb_priv {
+struct gf100_fb {
 	struct nvkm_fb base;
 	struct page *r100c10_page;
 	dma_addr_t r100c10;
 };
 
-int  gf100_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void gf100_fb_dtor(struct nvkm_object *);
-int  gf100_fb_init(struct nvkm_object *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
-
-#define gf100_ram_create(p,e,o,m,d)                                             \
-	gf100_ram_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
-int  gf100_ram_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, u32, int, void **);
-int  gf100_ram_get(struct nvkm_fb *, u64, u32, u32, u32,
-		  struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-
-int  gk104_ram_init(struct nvkm_object*);
+int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
+		  int index, struct nvkm_fb **);
+void *gf100_fb_dtor(struct nvkm_fb *);
+void gf100_fb_init(struct nvkm_fb *);
+void gf100_fb_intr(struct nvkm_fb *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 1c08317..0edb3c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -22,16 +22,19 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gk104_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gk104_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gk104_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gk104_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gk104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gk104_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index a5d7857..81447eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -19,50 +19,23 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include "gf100.h"
+#include "priv.h"
 
-struct gk20a_fb_priv {
-	struct nvkm_fb base;
+static void
+gk20a_fb_init(struct nvkm_fb *fb)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+}
+
+static const struct nvkm_fb_func
+gk20a_fb = {
+	.init = gk20a_fb_init,
+	.memtype_valid = gf100_fb_memtype_valid,
 };
 
-static int
-gk20a_fb_init(struct nvkm_object *object)
+int
+gk20a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	struct gk20a_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
-	return 0;
+	return nvkm_fb_new_(&gk20a_fb, device, index, pfb);
 }
-
-static int
-gk20a_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct gk20a_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-struct nvkm_oclass *
-gk20a_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = gk20a_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 843f935..2a91df8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -22,16 +22,19 @@
  * Authors: Ben Skeggs
  */
 #include "gf100.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gm107_fb_oclass = &(struct nvkm_fb_impl) {
-	.base.handle = NV_SUBDEV(FB, 0x07),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fb_ctor,
-		.dtor = gf100_fb_dtor,
-		.init = gf100_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.memtype = gf100_fb_memtype_valid,
-	.ram = &gm107_ram_oclass,
-}.base;
+static const struct nvkm_fb_func
+gm107_fb = {
+	.dtor = gf100_fb_dtor,
+	.init = gf100_fb_init,
+	.intr = gf100_fb_intr,
+	.ram_new = gm107_ram_new,
+	.memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm107_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return gf100_fb_new_(&gm107_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index dd9b8a0..ebb3060 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-gt215_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xa3),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &gt215_ram_oclass,
+static const struct nv50_fb_func
+gt215_fb = {
+	.ram_new = gt215_ram_new,
 	.trap = 0x000d0fff,
-}.base.base;
+};
+
+int
+gt215_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&gt215_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
index 7be4a47..73b3b86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp77.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-mcp77_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xaa),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp77_fb = {
+	.ram_new = mcp77_ram_new,
 	.trap = 0x001d07ff,
-}.base.base;
+};
+
+int
+mcp77_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&mcp77_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
index 2d00656..6d11e32 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/mcp89.c
@@ -22,17 +22,16 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-mcp89_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0xaf),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &mcp77_ram_oclass,
+static const struct nv50_fb_func
+mcp89_fb = {
+	.ram_new = mcp77_ram_new,
 	.trap = 0x089d1fff,
-}.base.base;
+};
+
+int
+mcp89_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&mcp89_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index c063dec..8ff2e5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -21,67 +21,39 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 #include "regsnv04.h"
 
 bool
-nv04_fb_memtype_valid(struct nvkm_fb *pfb, u32 tile_flags)
+nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
 {
 	if (!(tile_flags & 0xff00))
 		return true;
-
 	return false;
 }
 
-static int
-nv04_fb_init(struct nvkm_object *object)
+static void
+nv04_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
 
 	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
 	 * nvidia reading PFB_CFG_0, then writing back its original value.
 	 * (which was 0x701114 in this case)
 	 */
-	nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
-	return 0;
+	nvkm_wr32(device, NV04_PFB_CFG0, 0x1114);
 }
 
+static const struct nvkm_fb_func
+nv04_fb = {
+	.init = nv04_fb_init,
+	.ram_new = nv04_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
 int
-nv04_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+nv04_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
 {
-	struct nv04_fb_impl *impl = (void *)oclass;
-	struct nv04_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.tile.regions = impl->tile.regions;
-	priv->base.tile.init = impl->tile.init;
-	priv->base.tile.comp = impl->tile.comp;
-	priv->base.tile.fini = impl->tile.fini;
-	priv->base.tile.prog = impl->tile.prog;
-	return 0;
+	return nvkm_fb_new_(&nv04_fb, device, index, pfb);
 }
-
-struct nvkm_oclass *
-nv04_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x04),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv04_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv04_ram_oclass,
-}.base.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
deleted file mode 100644
index caa0d03..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef __NVKM_FB_NV04_H__
-#define __NVKM_FB_NV04_H__
-#include "priv.h"
-
-struct nv04_fb_priv {
-	struct nvkm_fb base;
-};
-
-int  nv04_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-
-struct nv04_fb_impl {
-	struct nvkm_fb_impl base;
-	struct {
-		int regions;
-		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
-			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
-		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
-			     struct nvkm_fb_tile *);
-		void (*fini)(struct nvkm_fb *, int i,
-			     struct nvkm_fb_tile *);
-		void (*prog)(struct nvkm_fb *, int i,
-			     struct nvkm_fb_tile *);
-	} tile;
-};
-
-void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
-void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int  nv30_fb_init(struct nvkm_object *);
-void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-
-void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
-		       struct nvkm_fb_tile *);
-
-int  nv41_fb_init(struct nvkm_object *);
-void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-int  nv44_fb_init(struct nvkm_object *);
-void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
-
-void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
-		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index f3530e4a..e8c44f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv10_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv10_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x80000000 | addr;
@@ -35,7 +36,7 @@
 }
 
 void
-nv10_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0;
 	tile->limit = 0;
@@ -44,27 +45,27 @@
 }
 
 void
-nv10_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv10_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100240 + (i * 0x10));
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100240 + (i * 0x10));
 }
 
-struct nvkm_oclass *
-nv10_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x10),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv10_ram_oclass,
+static const struct nvkm_fb_func
+nv10_fb = {
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+	.ram_new = nv10_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv10_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv10_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 83bcb73..2ae0beb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -23,21 +23,21 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv1a_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x1a),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv1a_ram_oclass,
+static const struct nvkm_fb_func
+nv1a_fb = {
 	.tile.regions = 8,
 	.tile.init = nv10_fb_tile_init,
 	.tile.fini = nv10_fb_tile_fini,
 	.tile.prog = nv10_fb_tile_prog,
-}.base.base;
+	.ram_new = nv1a_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv1a_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv1a_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index e37084b..126865d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -23,28 +23,29 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv20_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv20_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x00000001 | addr;
 	tile->limit = max(1u, addr + size) - 1;
 	tile->pitch = pitch;
 	if (flags & 4) {
-		pfb->tile.comp(pfb, i, size, flags, tile);
+		fb->func->tile.comp(fb, i, size, flags, tile);
 		tile->addr |= 2;
 	}
 }
 
 static void
-nv20_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
 		else              tile->zcomp = 0x04000000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -56,39 +57,39 @@
 }
 
 void
-nv20_fb_tile_fini(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0;
 	tile->limit = 0;
 	tile->pitch = 0;
 	tile->zcomp = 0;
-	nvkm_mm_free(&pfb->tags, &tile->tag);
+	nvkm_mm_free(&fb->ram->tags, &tile->tag);
 }
 
 void
-nv20_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100240 + (i * 0x10));
-	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100244 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100248 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100240 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100240 + (i * 0x10));
+	nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
-struct nvkm_oclass *
-nv20_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x20),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv20_fb = {
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv20_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv20_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv20_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index bc9f54f..c56746d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv25_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
 		else              tile->zcomp = 0x00200000; /* Z24S8 */
 		tile->zcomp |= tile->tag->offset;
@@ -41,20 +42,19 @@
 	}
 }
 
-struct nvkm_oclass *
-nv25_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x25),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = _nvkm_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv25_fb = {
 	.tile.regions = 8,
 	.tile.init = nv20_fb_tile_init,
 	.tile.comp = nv25_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv25_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv25_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 09ebb94..2a7c483 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -23,20 +23,19 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
+#include "ram.h"
 
 void
-nv30_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv30_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	/* for performance, select alternate bank offset for zeta */
 	if (!(flags & 4)) {
 		tile->addr = (0 << 4);
 	} else {
-		if (pfb->tile.comp) /* z compression */
-			pfb->tile.comp(pfb, i, size, flags, tile);
+		if (fb->func->tile.comp) /* z compression */
+			fb->func->tile.comp(fb, i, size, flags, tile);
 		tile->addr = (1 << 4);
 	}
 
@@ -47,12 +46,12 @@
 }
 
 static void
-nv30_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
 		else           tile->zcomp |= 0x02000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -64,23 +63,24 @@
 }
 
 static int
-calc_bias(struct nv04_fb_priv *priv, int k, int i, int j)
+calc_bias(struct nvkm_fb *fb, int k, int i, int j)
 {
-	struct nvkm_device *device = nv_device(priv);
+	struct nvkm_device *device = fb->subdev.device;
 	int b = (device->chipset > 0x30 ?
-		 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+		 nvkm_rd32(device, 0x122c + 0x10 * k + 0x4 * j) >>
+			(4 * (i ^ 1)) :
 		 0) & 0xf;
 
 	return 2 * (b & 0x8 ? b - 0x10 : b);
 }
 
 static int
-calc_ref(struct nv04_fb_priv *priv, int l, int k, int i)
+calc_ref(struct nvkm_fb *fb, int l, int k, int i)
 {
 	int j, x = 0;
 
 	for (j = 0; j < 4; j++) {
-		int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
+		int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j);
 
 		x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
 	}
@@ -88,16 +88,11 @@
 	return x;
 }
 
-int
-nv30_fb_init(struct nvkm_object *object)
+void
+nv30_fb_init(struct nvkm_fb *fb)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret, i, j;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
+	int i, j;
 
 	/* Init the memory timing regs at 0x10037c/0x1003ac */
 	if (device->chipset == 0x30 ||
@@ -105,36 +100,34 @@
 	    device->chipset == 0x35) {
 		/* Related to ROP count */
 		int n = (device->chipset == 0x31 ? 2 : 4);
-		int l = nv_rd32(priv, 0x1003d0);
+		int l = nvkm_rd32(device, 0x1003d0);
 
 		for (i = 0; i < n; i++) {
 			for (j = 0; j < 3; j++)
-				nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
-					calc_ref(priv, l, 0, j));
+				nvkm_wr32(device, 0x10037c + 0xc * i + 0x4 * j,
+					  calc_ref(fb, l, 0, j));
 
 			for (j = 0; j < 2; j++)
-				nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
-					calc_ref(priv, l, 1, j));
+				nvkm_wr32(device, 0x1003ac + 0x8 * i + 0x4 * j,
+					  calc_ref(fb, l, 1, j));
 		}
 	}
-
-	return 0;
 }
 
-struct nvkm_oclass *
-nv30_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x30),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv30_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv30_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv30_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv30_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index c01dc18..1604b37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv35_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
 		else           tile->zcomp |= 0x08000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -42,20 +43,20 @@
 	}
 }
 
-struct nvkm_oclass *
-nv35_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x35),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv35_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv35_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv35_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv35_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index cad75a1..80cc0a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -23,15 +23,16 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv36_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x40);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x40);
-	if (!nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x40);
+	if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
 		else           tile->zcomp |= 0x20000000; /* Z24S8 */
 		tile->zcomp |= ((tile->tag->offset           ) >> 6);
@@ -42,20 +43,20 @@
 	}
 }
 
-struct nvkm_oclass *
-nv36_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x36),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv30_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv20_ram_oclass,
+static const struct nvkm_fb_func
+nv36_fb = {
+	.init = nv30_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv36_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv20_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv36_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv36_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index dbe5c19..deec46a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -23,16 +23,17 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv40_fb_tile_comp(struct nvkm_fb *pfb, int i, u32 size, u32 flags,
+nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
 		  struct nvkm_fb_tile *tile)
 {
 	u32 tiles = DIV_ROUND_UP(size, 0x80);
-	u32 tags  = round_up(tiles / pfb->ram->parts, 0x100);
+	u32 tags  = round_up(tiles / fb->ram->parts, 0x100);
 	if ( (flags & 2) &&
-	    !nvkm_mm_head(&pfb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+	    !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
 		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
 		tile->zcomp |= ((tile->tag->offset           ) >> 8);
 		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -42,34 +43,26 @@
 	}
 }
 
-static int
-nv40_fb_init(struct nvkm_object *object)
+static void
+nv40_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
-	return 0;
+	nvkm_mask(fb->subdev.device, 0x10033c, 0x00008000, 0x00000000);
 }
 
-struct nvkm_oclass *
-nv40_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x40),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv40_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv40_ram_oclass,
+static const struct nvkm_fb_func
+nv40_fb = {
+	.init = nv40_fb_init,
 	.tile.regions = 8,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv20_fb_tile_prog,
-}.base.base;
+	.ram_new = nv40_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv40_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv40_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
deleted file mode 100644
index 6021826..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __NVKM_FB_NV40_H__
-#define __NVKM_FB_NV40_H__
-#include "priv.h"
-
-struct nv40_ram {
-	struct nvkm_ram base;
-	u32 ctrl;
-	u32 coef;
-};
-
-int  nv40_ram_calc(struct nvkm_fb *, u32);
-int  nv40_ram_prog(struct nvkm_fb *);
-void nv40_ram_tidy(struct nvkm_fb *);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index d9e1a40..79e57dd5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -23,46 +23,40 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv41_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv41_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100600 + (i * 0x10));
-	nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100600 + (i * 0x10));
+	nvkm_wr32(device, 0x100700 + (i * 0x04), tile->zcomp);
 }
 
-int
-nv41_fb_init(struct nvkm_object *object)
+void
+nv41_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100800, 0x00000001);
-	return 0;
+	nvkm_wr32(fb->subdev.device, 0x100800, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv41_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x41),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv41_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv41_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv41_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv41_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 20b97c8..06246cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 static void
-nv44_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv44_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	tile->addr  = 0x00000001; /* mode = vram */
@@ -36,42 +37,36 @@
 }
 
 void
-nv44_fb_tile_prog(struct nvkm_fb *pfb, int i, struct nvkm_fb_tile *tile)
+nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
 {
-	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-	nv_rd32(pfb, 0x100600 + (i * 0x10));
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit);
+	nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch);
+	nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr);
+	nvkm_rd32(device, 0x100600 + (i * 0x10));
 }
 
-int
-nv44_fb_init(struct nvkm_object *object)
+void
+nv44_fb_init(struct nvkm_fb *fb)
 {
-	struct nv04_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100800, 0x00000001);
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	nvkm_wr32(device, 0x100850, 0x80000000);
+	nvkm_wr32(device, 0x100800, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv44_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x44),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv44_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv44_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv44_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv44_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 5bfac38..3598a1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -23,10 +23,11 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
 void
-nv46_fb_tile_init(struct nvkm_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+nv46_fb_tile_init(struct nvkm_fb *fb, int i, u32 addr, u32 size, u32 pitch,
 		  u32 flags, struct nvkm_fb_tile *tile)
 {
 	/* for performance, select alternate bank offset for zeta */
@@ -39,19 +40,19 @@
 	tile->pitch = pitch;
 }
 
-struct nvkm_oclass *
-nv46_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x46),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv44_ram_oclass,
+static const struct nvkm_fb_func
+nv46_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv46_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv46_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv46_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index d3b3988..c505e44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -23,22 +23,23 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv47_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x47),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv41_ram_oclass,
+static const struct nvkm_fb_func
+nv47_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv41_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv47_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv47_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 236e36c..7b91b9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -23,22 +23,23 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv49_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x49),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv41_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv49_ram_oclass,
+static const struct nvkm_fb_func
+nv49_fb = {
+	.init = nv41_fb_init,
 	.tile.regions = 15,
 	.tile.init = nv30_fb_tile_init,
 	.tile.comp = nv40_fb_tile_comp,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv41_fb_tile_prog,
-}.base.base;
+	.ram_new = nv49_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv49_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv49_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 1352b6a..4e98210 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -23,21 +23,22 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#include "nv04.h"
+#include "priv.h"
+#include "ram.h"
 
-struct nvkm_oclass *
-nv4e_fb_oclass = &(struct nv04_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x4e),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_fb_ctor,
-		.dtor = _nvkm_fb_dtor,
-		.init = nv44_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv04_fb_memtype_valid,
-	.base.ram = &nv4e_ram_oclass,
+static const struct nvkm_fb_func
+nv4e_fb = {
+	.init = nv44_fb_init,
 	.tile.regions = 12,
 	.tile.init = nv46_fb_tile_init,
 	.tile.fini = nv20_fb_tile_fini,
 	.tile.prog = nv44_fb_tile_prog,
-}.base.base;
+	.ram_new = nv44_ram_new,
+	.memtype_valid = nv04_fb_memtype_valid,
+};
+
+int
+nv4e_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nvkm_fb_new_(&nv4e_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0480ce5..f5edfad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -22,11 +22,11 @@
  * Authors: Ben Skeggs
  */
 #include "nv50.h"
+#include "ram.h"
 
 #include <core/client.h>
-#include <core/device.h>
-#include <core/engctx.h>
 #include <core/enum.h>
+#include <engine/fifo.h>
 
 int
 nv50_fb_memtype[0x80] = {
@@ -40,130 +40,139 @@
 	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
 };
 
-bool
-nv50_fb_memtype_valid(struct nvkm_fb *pfb, u32 memtype)
+static int
+nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	return fb->func->ram_new(&fb->base, pram);
+}
+
+static bool
+nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
 {
 	return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
 }
 
 static const struct nvkm_enum vm_dispatch_subclients[] = {
-	{ 0x00000000, "GRCTX", NULL },
-	{ 0x00000001, "NOTIFY", NULL },
-	{ 0x00000002, "QUERY", NULL },
-	{ 0x00000003, "COND", NULL },
-	{ 0x00000004, "M2M_IN", NULL },
-	{ 0x00000005, "M2M_OUT", NULL },
-	{ 0x00000006, "M2M_NOTIFY", NULL },
+	{ 0x00000000, "GRCTX" },
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "QUERY" },
+	{ 0x00000003, "COND" },
+	{ 0x00000004, "M2M_IN" },
+	{ 0x00000005, "M2M_OUT" },
+	{ 0x00000006, "M2M_NOTIFY" },
 	{}
 };
 
 static const struct nvkm_enum vm_ccache_subclients[] = {
-	{ 0x00000000, "CB", NULL },
-	{ 0x00000001, "TIC", NULL },
-	{ 0x00000002, "TSC", NULL },
+	{ 0x00000000, "CB" },
+	{ 0x00000001, "TIC" },
+	{ 0x00000002, "TSC" },
 	{}
 };
 
 static const struct nvkm_enum vm_prop_subclients[] = {
-	{ 0x00000000, "RT0", NULL },
-	{ 0x00000001, "RT1", NULL },
-	{ 0x00000002, "RT2", NULL },
-	{ 0x00000003, "RT3", NULL },
-	{ 0x00000004, "RT4", NULL },
-	{ 0x00000005, "RT5", NULL },
-	{ 0x00000006, "RT6", NULL },
-	{ 0x00000007, "RT7", NULL },
-	{ 0x00000008, "ZETA", NULL },
-	{ 0x00000009, "LOCAL", NULL },
-	{ 0x0000000a, "GLOBAL", NULL },
-	{ 0x0000000b, "STACK", NULL },
-	{ 0x0000000c, "DST2D", NULL },
+	{ 0x00000000, "RT0" },
+	{ 0x00000001, "RT1" },
+	{ 0x00000002, "RT2" },
+	{ 0x00000003, "RT3" },
+	{ 0x00000004, "RT4" },
+	{ 0x00000005, "RT5" },
+	{ 0x00000006, "RT6" },
+	{ 0x00000007, "RT7" },
+	{ 0x00000008, "ZETA" },
+	{ 0x00000009, "LOCAL" },
+	{ 0x0000000a, "GLOBAL" },
+	{ 0x0000000b, "STACK" },
+	{ 0x0000000c, "DST2D" },
 	{}
 };
 
 static const struct nvkm_enum vm_pfifo_subclients[] = {
-	{ 0x00000000, "PUSHBUF", NULL },
-	{ 0x00000001, "SEMAPHORE", NULL },
+	{ 0x00000000, "PUSHBUF" },
+	{ 0x00000001, "SEMAPHORE" },
 	{}
 };
 
 static const struct nvkm_enum vm_bar_subclients[] = {
-	{ 0x00000000, "FB", NULL },
-	{ 0x00000001, "IN", NULL },
+	{ 0x00000000, "FB" },
+	{ 0x00000001, "IN" },
 	{}
 };
 
 static const struct nvkm_enum vm_client[] = {
-	{ 0x00000000, "STRMOUT", NULL },
+	{ 0x00000000, "STRMOUT" },
 	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
-	{ 0x00000004, "PFIFO_WRITE", NULL },
+	{ 0x00000004, "PFIFO_WRITE" },
 	{ 0x00000005, "CCACHE", vm_ccache_subclients },
-	{ 0x00000006, "PMSPPP", NULL },
-	{ 0x00000007, "CLIPID", NULL },
-	{ 0x00000008, "PFIFO_READ", NULL },
-	{ 0x00000009, "VFETCH", NULL },
-	{ 0x0000000a, "TEXTURE", NULL },
+	{ 0x00000006, "PMSPPP" },
+	{ 0x00000007, "CLIPID" },
+	{ 0x00000008, "PFIFO_READ" },
+	{ 0x00000009, "VFETCH" },
+	{ 0x0000000a, "TEXTURE" },
 	{ 0x0000000b, "PROP", vm_prop_subclients },
-	{ 0x0000000c, "PVP", NULL },
-	{ 0x0000000d, "PBSP", NULL },
-	{ 0x0000000e, "PCRYPT", NULL },
-	{ 0x0000000f, "PCOUNTER", NULL },
-	{ 0x00000011, "PDAEMON", NULL },
+	{ 0x0000000c, "PVP" },
+	{ 0x0000000d, "PBSP" },
+	{ 0x0000000e, "PCRYPT" },
+	{ 0x0000000f, "PCOUNTER" },
+	{ 0x00000011, "PDAEMON" },
 	{}
 };
 
 static const struct nvkm_enum vm_engine[] = {
-	{ 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
-	{ 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
-	{ 0x00000004, "PEEPHOLE", NULL },
-	{ 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
+	{ 0x00000000, "PGRAPH" },
+	{ 0x00000001, "PVP" },
+	{ 0x00000004, "PEEPHOLE" },
+	{ 0x00000005, "PFIFO", vm_pfifo_subclients },
 	{ 0x00000006, "BAR", vm_bar_subclients },
-	{ 0x00000008, "PMSPPP", NULL, NVDEV_ENGINE_MSPPP },
-	{ 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
-	{ 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
-	{ 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CIPHER },
-	{ 0x0000000b, "PCOUNTER", NULL },
-	{ 0x0000000c, "SEMAPHORE_BG", NULL },
-	{ 0x0000000d, "PCE0", NULL, NVDEV_ENGINE_CE0 },
-	{ 0x0000000e, "PDAEMON", NULL },
+	{ 0x00000008, "PMSPPP" },
+	{ 0x00000008, "PMPEG" },
+	{ 0x00000009, "PBSP" },
+	{ 0x0000000a, "PCRYPT" },
+	{ 0x0000000b, "PCOUNTER" },
+	{ 0x0000000c, "SEMAPHORE_BG" },
+	{ 0x0000000d, "PCE0" },
+	{ 0x0000000e, "PDAEMON" },
 	{}
 };
 
 static const struct nvkm_enum vm_fault[] = {
-	{ 0x00000000, "PT_NOT_PRESENT", NULL },
-	{ 0x00000001, "PT_TOO_SHORT", NULL },
-	{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
-	{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
-	{ 0x00000004, "PAGE_READ_ONLY", NULL },
-	{ 0x00000006, "NULL_DMAOBJ", NULL },
-	{ 0x00000007, "WRONG_MEMTYPE", NULL },
-	{ 0x0000000b, "VRAM_LIMIT", NULL },
-	{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
+	{ 0x00000000, "PT_NOT_PRESENT" },
+	{ 0x00000001, "PT_TOO_SHORT" },
+	{ 0x00000002, "PAGE_NOT_PRESENT" },
+	{ 0x00000003, "PAGE_SYSTEM_ONLY" },
+	{ 0x00000004, "PAGE_READ_ONLY" },
+	{ 0x00000006, "NULL_DMAOBJ" },
+	{ 0x00000007, "WRONG_MEMTYPE" },
+	{ 0x0000000b, "VRAM_LIMIT" },
+	{ 0x0000000f, "DMAOBJ_LIMIT" },
 	{}
 };
 
 static void
-nv50_fb_intr(struct nvkm_subdev *subdev)
+nv50_fb_intr(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(subdev);
-	struct nvkm_engine *engine;
-	struct nv50_fb_priv *priv = (void *)subdev;
-	const struct nvkm_enum *en, *cl;
-	struct nvkm_object *engctx = NULL;
-	u32 trap[6], idx, chan;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_subdev *subdev = &fb->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_fifo *fifo = device->fifo;
+	struct nvkm_fifo_chan *chan;
+	const struct nvkm_enum *en, *re, *cl, *sc;
+	u32 trap[6], idx, inst;
 	u8 st0, st1, st2, st3;
+	unsigned long flags;
 	int i;
 
-	idx = nv_rd32(priv, 0x100c90);
+	idx = nvkm_rd32(device, 0x100c90);
 	if (!(idx & 0x80000000))
 		return;
 	idx &= 0x00ffffff;
 
 	for (i = 0; i < 6; i++) {
-		nv_wr32(priv, 0x100c90, idx | i << 24);
-		trap[i] = nv_rd32(priv, 0x100c94);
+		nvkm_wr32(device, 0x100c90, idx | i << 24);
+		trap[i] = nvkm_rd32(device, 0x100c94);
 	}
-	nv_wr32(priv, 0x100c90, idx | 0x80000000);
+	nvkm_wr32(device, 0x100c90, idx | 0x80000000);
 
 	/* decode status bits into something more useful */
 	if (device->chipset  < 0xa3 ||
@@ -178,143 +187,103 @@
 		st2 = (trap[0] & 0x00ff0000) >> 16;
 		st3 = (trap[0] & 0xff000000) >> 24;
 	}
-	chan = (trap[2] << 16) | trap[1];
+	inst = ((trap[2] << 16) | trap[1]) << 12;
 
 	en = nvkm_enum_find(vm_engine, st0);
-
-	if (en && en->data2) {
-		const struct nvkm_enum *orig_en = en;
-		while (en->name && en->value == st0 && en->data2) {
-			engine = nvkm_engine(subdev, en->data2);
-			/*XXX: clean this up */
-			if (!engine && en->data2 == NVDEV_ENGINE_BSP)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_MSVLD);
-			if (!engine && en->data2 == NVDEV_ENGINE_CIPHER)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_SEC);
-			if (!engine && en->data2 == NVDEV_ENGINE_VP)
-				engine = nvkm_engine(subdev, NVDEV_ENGINE_MSPDEC);
-			if (engine) {
-				engctx = nvkm_engctx_get(engine, chan);
-				if (engctx)
-					break;
-			}
-			en++;
-		}
-		if (!engctx)
-			en = orig_en;
-	}
-
-	nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
-		 (trap[5] & 0x00000100) ? "read" : "write",
-		 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
-		 nvkm_client_name(engctx));
-
-	nvkm_engctx_put(engctx);
-
-	if (en)
-		pr_cont("%s/", en->name);
-	else
-		pr_cont("%02x/", st0);
-
+	re = nvkm_enum_find(vm_fault , st1);
 	cl = nvkm_enum_find(vm_client, st2);
-	if (cl)
-		pr_cont("%s/", cl->name);
-	else
-		pr_cont("%02x/", st2);
+	if      (cl && cl->data) sc = nvkm_enum_find(cl->data, st3);
+	else if (en && en->data) sc = nvkm_enum_find(en->data, st3);
+	else                     sc = NULL;
 
-	if      (cl && cl->data) cl = nvkm_enum_find(cl->data, st3);
-	else if (en && en->data) cl = nvkm_enum_find(en->data, st3);
-	else                     cl = NULL;
-	if (cl)
-		pr_cont("%s", cl->name);
-	else
-		pr_cont("%02x", st3);
-
-	pr_cont(" reason: ");
-	en = nvkm_enum_find(vm_fault, st1);
-	if (en)
-		pr_cont("%s\n", en->name);
-	else
-		pr_cont("0x%08x\n", st1);
+	chan = nvkm_fifo_chan_inst(fifo, inst, &flags);
+	nvkm_error(subdev, "trapped %s at %02x%04x%04x on channel %d [%08x %s] "
+			   "engine %02x [%s] client %02x [%s] "
+			   "subclient %02x [%s] reason %08x [%s]\n",
+		   (trap[5] & 0x00000100) ? "read" : "write",
+		   trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
+		   chan ? chan->chid : -1, inst,
+		   chan ? chan->object.client->name : "unknown",
+		   st0, en ? en->name : "",
+		   st2, cl ? cl->name : "", st3, sc ? sc->name : "",
+		   st1, re ? re->name : "");
+	nvkm_fifo_chan_put(fifo, flags, &chan);
 }
 
-int
-nv50_fb_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+static void
+nv50_fb_init(struct nvkm_fb *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv50_fb_priv *priv;
-	int ret;
-
-	ret = nvkm_fb_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-	if (priv->r100c08_page) {
-		priv->r100c08 = dma_map_page(nv_device_base(device),
-					     priv->r100c08_page, 0, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL);
-		if (dma_mapping_error(nv_device_base(device), priv->r100c08))
-			return -EFAULT;
-	} else {
-		nv_warn(priv, "failed 0x100c08 page alloc\n");
-	}
-
-	nv_subdev(priv)->intr = nv50_fb_intr;
-	return 0;
-}
-
-void
-nv50_fb_dtor(struct nvkm_object *object)
-{
-	struct nvkm_device *device = nv_device(object);
-	struct nv50_fb_priv *priv = (void *)object;
-
-	if (priv->r100c08_page) {
-		dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
-			       DMA_BIDIRECTIONAL);
-		__free_page(priv->r100c08_page);
-	}
-
-	nvkm_fb_destroy(&priv->base);
-}
-
-int
-nv50_fb_init(struct nvkm_object *object)
-{
-	struct nv50_fb_impl *impl = (void *)object->oclass;
-	struct nv50_fb_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_fb_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
 
 	/* Not a clue what this is exactly.  Without pointing it at a
 	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
 	 * cause IOMMU "read from address 0" errors (rh#561267)
 	 */
-	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+	nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
-	nv_wr32(priv, 0x100c90, impl->trap);
+	nvkm_wr32(device, 0x100c90, fb->func->trap);
+}
+
+static void *
+nv50_fb_dtor(struct nvkm_fb *base)
+{
+	struct nv50_fb *fb = nv50_fb(base);
+	struct nvkm_device *device = fb->base.subdev.device;
+
+	if (fb->r100c08_page) {
+		dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
+			       DMA_BIDIRECTIONAL);
+		__free_page(fb->r100c08_page);
+	}
+
+	return fb;
+}
+
+static const struct nvkm_fb_func
+nv50_fb_ = {
+	.dtor = nv50_fb_dtor,
+	.init = nv50_fb_init,
+	.intr = nv50_fb_intr,
+	.ram_new = nv50_fb_ram_new,
+	.memtype_valid = nv50_fb_memtype_valid,
+};
+
+int
+nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_fb **pfb)
+{
+	struct nv50_fb *fb;
+
+	if (!(fb = kzalloc(sizeof(*fb), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_fb_ctor(&nv50_fb_, device, index, &fb->base);
+	fb->func = func;
+	*pfb = &fb->base;
+
+	fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (fb->r100c08_page) {
+		fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+					   PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device->dev, fb->r100c08))
+			return -EFAULT;
+	} else {
+		nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
+	}
+
 	return 0;
 }
 
-struct nvkm_oclass *
-nv50_fb_oclass = &(struct nv50_fb_impl) {
-	.base.base.handle = NV_SUBDEV(FB, 0x50),
-	.base.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fb_ctor,
-		.dtor = nv50_fb_dtor,
-		.init = nv50_fb_init,
-		.fini = _nvkm_fb_fini,
-	},
-	.base.memtype = nv50_fb_memtype_valid,
-	.base.ram = &nv50_ram_oclass,
+static const struct nv50_fb_func
+nv50_fb = {
+	.ram_new = nv50_ram_new,
 	.trap = 0x000707ff,
-}.base.base;
+};
+
+int
+nv50_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+	return nv50_fb_new_(&nv50_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index f3cde3f..faa88c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,31 +1,21 @@
 #ifndef __NVKM_FB_NV50_H__
 #define __NVKM_FB_NV50_H__
+#define nv50_fb(p) container_of((p), struct nv50_fb, base)
 #include "priv.h"
 
-struct nv50_fb_priv {
+struct nv50_fb {
+	const struct nv50_fb_func *func;
 	struct nvkm_fb base;
 	struct page *r100c08_page;
 	dma_addr_t r100c08;
 };
 
-int  nv50_fb_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-void nv50_fb_dtor(struct nvkm_object *);
-int  nv50_fb_init(struct nvkm_object *);
-
-struct nv50_fb_impl {
-	struct nvkm_fb_impl base;
+struct nv50_fb_func {
+	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 	u32 trap;
 };
 
-#define nv50_ram_create(p,e,o,d)                                               \
-	nv50_ram_create_((p), (e), (o), sizeof(**d), (void **)d)
-int  nv50_ram_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, int, void **);
-int  nv50_ram_get(struct nvkm_fb *, u64 size, u32 align, u32 ncmin,
-		  u32 memtype, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_fb *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_fb *, struct nvkm_mem *);
+int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
+		 struct nvkm_fb **pfb);
 extern int nv50_fb_memtype[0x80];
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 485c4b6..62b9feb5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,73 +1,62 @@
 #ifndef __NVKM_FB_PRIV_H__
 #define __NVKM_FB_PRIV_H__
+#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
 #include <subdev/fb.h>
 struct nvkm_bios;
 
-#define nvkm_ram_create(p,e,o,d)                                            \
-	nvkm_object_create_((p), (e), (o), 0, sizeof(**d), (void **)d)
-#define nvkm_ram_destroy(p)                                                 \
-	nvkm_object_destroy(&(p)->base)
-#define nvkm_ram_init(p)                                                    \
-	nvkm_object_init(&(p)->base)
-#define nvkm_ram_fini(p,s)                                                  \
-	nvkm_object_fini(&(p)->base, (s))
+struct nvkm_fb_func {
+	void *(*dtor)(struct nvkm_fb *);
+	void (*init)(struct nvkm_fb *);
+	void (*intr)(struct nvkm_fb *);
 
-#define nvkm_ram_create_(p,e,o,s,d)                                         \
-	nvkm_object_create_((p), (e), (o), 0, (s), (void **)d)
-#define _nvkm_ram_dtor nvkm_object_destroy
-#define _nvkm_ram_init nvkm_object_init
-#define _nvkm_ram_fini nvkm_object_fini
+	struct {
+		int regions;
+		void (*init)(struct nvkm_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nvkm_fb_tile *);
+		void (*comp)(struct nvkm_fb *, int i, u32 size, u32 flags,
+			     struct nvkm_fb_tile *);
+		void (*fini)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+		void (*prog)(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+	} tile;
 
-extern struct nvkm_oclass nv04_ram_oclass;
-extern struct nvkm_oclass nv10_ram_oclass;
-extern struct nvkm_oclass nv1a_ram_oclass;
-extern struct nvkm_oclass nv20_ram_oclass;
-extern struct nvkm_oclass nv40_ram_oclass;
-extern struct nvkm_oclass nv41_ram_oclass;
-extern struct nvkm_oclass nv44_ram_oclass;
-extern struct nvkm_oclass nv49_ram_oclass;
-extern struct nvkm_oclass nv4e_ram_oclass;
-extern struct nvkm_oclass nv50_ram_oclass;
-extern struct nvkm_oclass gt215_ram_oclass;
-extern struct nvkm_oclass mcp77_ram_oclass;
-extern struct nvkm_oclass gf100_ram_oclass;
-extern struct nvkm_oclass gk104_ram_oclass;
-extern struct nvkm_oclass gm107_ram_oclass;
+	int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
 
-int nvkm_sddr2_calc(struct nvkm_ram *ram);
-int nvkm_sddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr3_calc(struct nvkm_ram *ram);
-int nvkm_gddr5_calc(struct nvkm_ram *ram, bool nuts);
-
-#define nvkm_fb_create(p,e,c,d)                                             \
-	nvkm_fb_create_((p), (e), (c), sizeof(**d), (void **)d)
-#define nvkm_fb_destroy(p) ({                                               \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_dtor(nv_object(pfb));                                      \
-})
-#define nvkm_fb_init(p) ({                                                  \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_init(nv_object(pfb));                                      \
-})
-#define nvkm_fb_fini(p,s) ({                                                \
-	struct nvkm_fb *pfb = (p);                                          \
-	_nvkm_fb_fini(nv_object(pfb), (s));                                 \
-})
-
-int nvkm_fb_create_(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, int, void **);
-void _nvkm_fb_dtor(struct nvkm_object *);
-int  _nvkm_fb_init(struct nvkm_object *);
-int  _nvkm_fb_fini(struct nvkm_object *, bool);
-
-struct nvkm_fb_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *ram;
-	bool (*memtype)(struct nvkm_fb *, u32);
+	bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
 };
 
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-bool nv50_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
+		  int index, struct nvkm_fb *);
+int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
+		 int index, struct nvkm_fb **);
+int nvkm_fb_bios_memtype(struct nvkm_bios *);
 
-int  nvkm_fb_bios_memtype(struct nvkm_bios *);
+bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
+
+void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
+void nv20_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv30_fb_init(struct nvkm_fb *);
+void nv30_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+void nv40_fb_tile_comp(struct nvkm_fb *, int i, u32 size, u32 flags,
+		       struct nvkm_fb_tile *);
+
+void nv41_fb_init(struct nvkm_fb *);
+void nv41_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv44_fb_init(struct nvkm_fb *);
+void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+
+void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nvkm_fb_tile *);
+
+bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
new file mode 100644
index 0000000..c17d559
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ram.h"
+
+int
+nvkm_ram_init(struct nvkm_ram *ram)
+{
+	if (ram->func->init)
+		return ram->func->init(ram);
+	return 0;
+}
+
+void
+nvkm_ram_del(struct nvkm_ram **pram)
+{
+	struct nvkm_ram *ram = *pram;
+	if (ram && !WARN_ON(!ram->func)) {
+		if (ram->func->dtor)
+			*pram = ram->func->dtor(ram);
+		nvkm_mm_fini(&ram->tags);
+		nvkm_mm_fini(&ram->vram);
+		kfree(*pram);
+		*pram = NULL;
+	}
+}
+
+int
+nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	      enum nvkm_ram_type type, u64 size, u32 tags,
+	      struct nvkm_ram *ram)
+{
+	static const char *name[] = {
+		[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
+		[NVKM_RAM_TYPE_STOLEN ] = "stolen system memory",
+		[NVKM_RAM_TYPE_SGRAM  ] = "SGRAM",
+		[NVKM_RAM_TYPE_SDRAM  ] = "SDRAM",
+		[NVKM_RAM_TYPE_DDR1   ] = "DDR1",
+		[NVKM_RAM_TYPE_DDR2   ] = "DDR2",
+		[NVKM_RAM_TYPE_DDR3   ] = "DDR3",
+		[NVKM_RAM_TYPE_GDDR2  ] = "GDDR2",
+		[NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
+		[NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
+		[NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
+	};
+	struct nvkm_subdev *subdev = &fb->subdev;
+	int ret;
+
+	nvkm_info(subdev, "%d MiB %s\n", (int)(size >> 20), name[type]);
+	ram->func = func;
+	ram->fb = fb;
+	ram->type = type;
+	ram->size = size;
+
+	if (!nvkm_mm_initialised(&ram->vram)) {
+		ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (!nvkm_mm_initialised(&ram->tags)) {
+		ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
+		if (ret)
+			return ret;
+
+		nvkm_debug(subdev, "%d compression tags\n", tags);
+	}
+
+	return 0;
+}
+
+int
+nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	      enum nvkm_ram_type type, u64 size, u32 tags,
+	      struct nvkm_ram **pram)
+{
+	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
new file mode 100644
index 0000000..f816cbf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -0,0 +1,50 @@
+#ifndef __NVKM_FB_RAM_PRIV_H__
+#define __NVKM_FB_RAM_PRIV_H__
+#include "priv.h"
+
+int  nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   enum nvkm_ram_type, u64 size, u32 tags,
+		   struct nvkm_ram *);
+int  nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   enum nvkm_ram_type, u64 size, u32 tags,
+		   struct nvkm_ram **);
+void nvkm_ram_del(struct nvkm_ram **);
+int  nvkm_ram_init(struct nvkm_ram *);
+
+extern const struct nvkm_ram_func nv04_ram_func;
+
+int  nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		   struct nvkm_ram *);
+int  nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
+
+int  gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
+		    u32, struct nvkm_ram *);
+int  gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
+void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+
+int  gk104_ram_init(struct nvkm_ram *ram);
+
+/* RAM type-specific MR calculation routines */
+int nvkm_sddr2_calc(struct nvkm_ram *);
+int nvkm_sddr3_calc(struct nvkm_ram *);
+int nvkm_gddr3_calc(struct nvkm_ram *);
+int nvkm_gddr5_calc(struct nvkm_ram *, bool nuts);
+
+int nv04_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv10_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv1a_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv20_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv40_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv41_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv44_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv49_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv4e_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index f343682..9ef9d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,10 +1,11 @@
 #ifndef __NVKM_FBRAM_FUC_H__
 #define __NVKM_FBRAM_FUC_H__
+#include <subdev/fb.h>
 #include <subdev/pmu.h>
 
 struct ramfuc {
 	struct nvkm_memx *memx;
-	struct nvkm_fb *pfb;
+	struct nvkm_fb *fb;
 	int sequence;
 };
 
@@ -54,17 +55,14 @@
 }
 
 static inline int
-ramfuc_init(struct ramfuc *ram, struct nvkm_fb *pfb)
+ramfuc_init(struct ramfuc *ram, struct nvkm_fb *fb)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-	int ret;
-
-	ret = nvkm_memx_init(pmu, &ram->memx);
+	int ret = nvkm_memx_init(fb->subdev.device->pmu, &ram->memx);
 	if (ret)
 		return ret;
 
 	ram->sequence++;
-	ram->pfb = pfb;
+	ram->fb = fb;
 	return 0;
 }
 
@@ -72,9 +70,9 @@
 ramfuc_exec(struct ramfuc *ram, bool exec)
 {
 	int ret = 0;
-	if (ram->pfb) {
+	if (ram->fb) {
 		ret = nvkm_memx_fini(&ram->memx, exec);
-		ram->pfb = NULL;
+		ram->fb = NULL;
 	}
 	return ret;
 }
@@ -82,8 +80,9 @@
 static inline u32
 ramfuc_rd32(struct ramfuc *ram, struct ramfuc_reg *reg)
 {
+	struct nvkm_device *device = ram->fb->subdev.device;
 	if (reg->sequence != ram->sequence)
-		reg->data = nv_rd32(ram->pfb, reg->addr);
+		reg->data = nvkm_rd32(device, reg->addr);
 	return reg->data;
 }
 
@@ -144,11 +143,9 @@
 }
 
 static inline int
-ramfuc_train_result(struct nvkm_fb *pfb, u32 *result, u32 rsize)
+ramfuc_train_result(struct nvkm_fb *fb, u32 *result, u32 rsize)
 {
-	struct nvkm_pmu *pmu = nvkm_pmu(pfb);
-
-	return nvkm_memx_train_result(pmu, result, rsize);
+	return nvkm_memx_train_result(fb->subdev.device->pmu, result, rsize);
 }
 
 static inline void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index de9f395..772425c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#define gf100_ram(p) container_of((p), struct gf100_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
@@ -108,9 +108,10 @@
 gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
 {
 	struct gf100_ram *ram = container_of(fuc, typeof(*ram), fuc);
-	struct nvkm_fb *pfb = nvkm_fb(ram);
-	u32 part = nv_rd32(pfb, 0x022438), i;
-	u32 mask = nv_rd32(pfb, 0x022554);
+	struct nvkm_fb *fb = ram->base.fb;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 part = nvkm_rd32(device, 0x022438), i;
+	u32 mask = nvkm_rd32(device, 0x022554);
 	u32 addr = 0x110974;
 
 	ram_wr32(fuc, 0x10f910, magic);
@@ -124,12 +125,14 @@
 }
 
 static int
-gf100_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gf100_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_clk *clk = nvkm_clk(pfb);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gf100_ram *ram = (void *)pfb->ram;
+	struct gf100_ram *ram = gf100_ram(base);
 	struct gf100_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_clk *clk = device->clk;
+	struct nvkm_bios *bios = device->bios;
 	struct nvbios_ramcfg cfg;
 	u8  ver, cnt, len, strap;
 	struct {
@@ -145,37 +148,37 @@
 	rammap.data = nvbios_rammapEm(bios, freq / 1000, &ver, &rammap.size,
 				      &cnt, &ramcfg.size, &cfg);
 	if (!rammap.data || ver != 0x10 || rammap.size < 0x0e) {
-		nv_error(pfb, "invalid/missing rammap entry\n");
+		nvkm_error(subdev, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
 	ramcfg.data = rammap.data + rammap.size + (strap * ramcfg.size);
 	if (!ramcfg.data || ver != 0x10 || ramcfg.size < 0x0e) {
-		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
 		return -EINVAL;
 	}
 
 	/* lookup memory timings, if bios says they're present */
-	strap = nv_ro08(bios, ramcfg.data + 0x01);
+	strap = nvbios_rd08(bios, ramcfg.data + 0x01);
 	if (strap != 0xff) {
 		timing.data = nvbios_timingEe(bios, strap, &ver, &timing.size,
 					      &cnt, &len);
 		if (!timing.data || ver != 0x10 || timing.size < 0x19) {
-			nv_error(pfb, "invalid/missing timing entry\n");
+			nvkm_error(subdev, "invalid/missing timing entry\n");
 			return -EINVAL;
 		}
 	} else {
 		timing.data = 0;
 	}
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -184,9 +187,9 @@
 
 	/* determine target mclk configuration */
 	if (!(ram_rd32(fuc, 0x137300) & 0x00000100))
-		ref = clk->read(clk, nv_clk_src_sppll0);
+		ref = nvkm_clk_read(clk, nv_clk_src_sppll0);
 	else
-		ref = clk->read(clk, nv_clk_src_sppll1);
+		ref = nvkm_clk_read(clk, nv_clk_src_sppll1);
 	div = max(min((ref * 2) / freq, (u32)65), (u32)2) - 2;
 	out = (ref * 2) / (div + 2);
 	mode = freq != out;
@@ -210,10 +213,10 @@
 
 	if (mode == 1 && from == 0) {
 		/* calculate refpll */
-		ret = gt215_pll_calc(nv_subdev(pfb), &ram->refpll,
-				     ram->mempll.refclk, &N1, NULL, &M1, &P);
+		ret = gt215_pll_calc(subdev, &ram->refpll, ram->mempll.refclk,
+				     &N1, NULL, &M1, &P);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc refpll\n");
+			nvkm_error(subdev, "unable to calc refpll\n");
 			return ret ? ret : -ERANGE;
 		}
 
@@ -225,10 +228,10 @@
 		ram_wait(fuc, 0x137390, 0x00020000, 0x00020000, 64000);
 
 		/* calculate mempll */
-		ret = gt215_pll_calc(nv_subdev(pfb), &ram->mempll, freq,
+		ret = gt215_pll_calc(subdev, &ram->mempll, freq,
 				     &N1, NULL, &M1, &P);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc refpll\n");
+			nvkm_error(subdev, "unable to calc refpll\n");
 			return ret ? ret : -ERANGE;
 		}
 
@@ -402,49 +405,48 @@
 }
 
 static int
-gf100_ram_prog(struct nvkm_fb *pfb)
+gf100_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gf100_ram *ram = (void *)pfb->ram;
-	struct gf100_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+	struct gf100_ram *ram = gf100_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	ram_exec(&ram->fuc, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
 	return 0;
 }
 
 static void
-gf100_ram_tidy(struct nvkm_fb *pfb)
+gf100_ram_tidy(struct nvkm_ram *base)
 {
-	struct gf100_ram *ram = (void *)pfb->ram;
-	struct gf100_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, false);
+	struct gf100_ram *ram = gf100_ram(base);
+	ram_exec(&ram->fuc, false);
 }
 
 extern const u8 gf100_pte_storage_type_map[256];
 
 void
-gf100_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
 {
-	struct nvkm_ltc *ltc = nvkm_ltc(pfb);
+	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
 	struct nvkm_mem *mem = *pmem;
 
 	*pmem = NULL;
 	if (unlikely(mem == NULL))
 		return;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (mem->tag)
-		ltc->tags_free(ltc, &mem->tag);
-	__nv50_ram_put(pfb, mem);
-	mutex_unlock(&pfb->base.mutex);
+		nvkm_ltc_tags_free(ltc, &mem->tag);
+	__nv50_ram_put(ram, mem);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	kfree(mem);
 }
 
 int
-gf100_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
 	      u32 memtype, struct nvkm_mem **pmem)
 {
-	struct nvkm_mm *mm = &pfb->vram;
+	struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
+	struct nvkm_mm *mm = &ram->vram;
 	struct nvkm_mm_node *r;
 	struct nvkm_mem *mem;
 	int type = (memtype & 0x0ff);
@@ -452,9 +454,9 @@
 	const bool comp = gf100_pte_storage_type_map[type] != type;
 	int ret;
 
-	size  >>= 12;
-	align >>= 12;
-	ncmin >>= 12;
+	size  >>= NVKM_RAM_MM_SHIFT;
+	align >>= NVKM_RAM_MM_SHIFT;
+	ncmin >>= NVKM_RAM_MM_SHIFT;
 	if (!ncmin)
 		ncmin = size;
 
@@ -465,14 +467,12 @@
 	INIT_LIST_HEAD(&mem->regions);
 	mem->size = size;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (comp) {
-		struct nvkm_ltc *ltc = nvkm_ltc(pfb);
-
 		/* compression only works with lpages */
-		if (align == (1 << (17 - 12))) {
+		if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
 			int n = size >> 5;
-			ltc->tags_alloc(ltc, n, &mem->tag);
+			nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
 		}
 
 		if (unlikely(!mem->tag))
@@ -486,178 +486,173 @@
 		else
 			ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
 		if (ret) {
-			mutex_unlock(&pfb->base.mutex);
-			pfb->ram->put(pfb, &mem);
+			mutex_unlock(&ram->fb->subdev.mutex);
+			ram->func->put(ram, &mem);
 			return ret;
 		}
 
 		list_add_tail(&r->rl_entry, &mem->regions);
 		size -= r->length;
 	} while (size);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
+	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
 	*pmem = mem;
 	return 0;
 }
 
-int
-gf100_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, u32 maskaddr, int size,
-		  void **pobject)
+static int
+gf100_ram_init(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nvkm_ram *ram;
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	u32 parts = nv_rd32(pfb, 0x022438);
-	u32 pmask = nv_rd32(pfb, maskaddr);
-	u32 bsize = nv_rd32(pfb, 0x10f20c);
-	u32 offset, length;
-	bool uniform = true;
-	int ret, part;
+	static const u8  train0[] = {
+		0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
+		0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
+	};
+	static const u32 train1[] = {
+		0x00000000, 0xffffffff,
+		0x55555555, 0xaaaaaaaa,
+		0x33333333, 0xcccccccc,
+		0xf0f0f0f0, 0x0f0f0f0f,
+		0x00ff00ff, 0xff00ff00,
+		0x0000ffff, 0xffff0000,
+	};
+	struct gf100_ram *ram = gf100_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	int i;
 
-	ret = nvkm_ram_create_(parent, engine, oclass, size, pobject);
-	ram = *pobject;
+	switch (ram->base.type) {
+	case NVKM_RAM_TYPE_GDDR5:
+		break;
+	default:
+		return 0;
+	}
+
+	/* prepare for ddr link training, and load training patterns */
+	for (i = 0; i < 0x30; i++) {
+		nvkm_wr32(device, 0x10f968, 0x00000000 | (i << 8));
+		nvkm_wr32(device, 0x10f96c, 0x00000000 | (i << 8));
+		nvkm_wr32(device, 0x10f920, 0x00000100 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f924, 0x00000100 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f920, 0x00000000 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f924, 0x00000000 | train0[i % 12]);
+		nvkm_wr32(device, 0x10f918,              train1[i % 12]);
+		nvkm_wr32(device, 0x10f91c,              train1[i % 12]);
+	}
+
+	return 0;
+}
+
+static const struct nvkm_ram_func
+gf100_ram_func = {
+	.init = gf100_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
+	.calc = gf100_ram_calc,
+	.prog = gf100_ram_prog,
+	.tidy = gf100_ram_tidy,
+};
+
+int
+gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+	       u32 maskaddr, struct nvkm_ram *ram)
+{
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+	u32 parts = nvkm_rd32(device, 0x022438);
+	u32 pmask = nvkm_rd32(device, maskaddr);
+	u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
+	u64 psize, size = 0;
+	enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
+	bool uniform = true;
+	int ret, i;
+
+	nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
+	nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
+
+	/* read amount of vram attached to each memory controller */
+	for (i = 0; i < parts; i++) {
+		if (pmask & (1 << i))
+			continue;
+
+		psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
+		if (psize != bsize) {
+			if (psize < bsize)
+				bsize = psize;
+			uniform = false;
+		}
+
+		nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
+		size += psize;
+	}
+
+	ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
 	if (ret)
 		return ret;
 
-	nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
-	nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
-	ram->type = nvkm_fb_bios_memtype(bios);
-	ram->ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
-
-	/* read amount of vram attached to each memory controller */
-	for (part = 0; part < parts; part++) {
-		if (!(pmask & (1 << part))) {
-			u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
-			if (psize != bsize) {
-				if (psize < bsize)
-					bsize = psize;
-				uniform = false;
-			}
-
-			nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
-			ram->size += (u64)psize << 20;
-		}
-	}
+	nvkm_mm_fini(&ram->vram);
 
 	/* if all controllers have the same amount attached, there's no holes */
 	if (uniform) {
-		offset = rsvd_head;
-		length = (ram->size >> 12) - rsvd_head - rsvd_tail;
-		ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+				   (size - rsvd_head - rsvd_tail) >>
+				   NVKM_RAM_MM_SHIFT, 1);
+		if (ret)
+			return ret;
 	} else {
 		/* otherwise, address lowest common amount from 0GiB */
-		ret = nvkm_mm_init(&pfb->vram, rsvd_head,
-				   (bsize << 8) * parts - rsvd_head, 1);
+		ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+				   ((bsize * parts) - rsvd_head) >>
+				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
 			return ret;
 
 		/* and the rest starting from (8GiB + common_size) */
-		offset = (0x0200000000ULL >> 12) + (bsize << 8);
-		length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
-
-		ret = nvkm_mm_init(&pfb->vram, offset, length, 1);
+		ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
+				   NVKM_RAM_MM_SHIFT,
+				   (size - (bsize * parts) - rsvd_tail) >>
+				   NVKM_RAM_MM_SHIFT, 1);
 		if (ret)
-			nvkm_mm_fini(&pfb->vram);
+			return ret;
 	}
 
-	if (ret)
-		return ret;
-
-	ram->get = gf100_ram_get;
-	ram->put = gf100_ram_put;
+	ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1;
 	return 0;
 }
 
-static int
-gf100_ram_init(struct nvkm_object *object)
+int
+gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gf100_ram *ram = (void *)object;
-	int ret, i;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
-
-	/* prepare for ddr link training, and load training patterns */
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5: {
-		static const u8  train0[] = {
-			0x00, 0xff, 0x55, 0xaa, 0x33, 0xcc,
-			0x00, 0xff, 0xff, 0x00, 0xff, 0x00,
-		};
-		static const u32 train1[] = {
-			0x00000000, 0xffffffff,
-			0x55555555, 0xaaaaaaaa,
-			0x33333333, 0xcccccccc,
-			0xf0f0f0f0, 0x0f0f0f0f,
-			0x00ff00ff, 0xff00ff00,
-			0x0000ffff, 0xffff0000,
-		};
-
-		for (i = 0; i < 0x30; i++) {
-			nv_wr32(pfb, 0x10f968, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f96c, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f920, 0x00000100 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f924, 0x00000100 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f920, 0x00000000 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f924, 0x00000000 | train0[i % 12]);
-			nv_wr32(pfb, 0x10f918,              train1[i % 12]);
-			nv_wr32(pfb, 0x10f91c,              train1[i % 12]);
-		}
-	}	break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int
-gf100_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_bios *bios = nvkm_bios(parent);
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct gf100_ram *ram;
 	int ret;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
 	if (ret)
 		return ret;
 
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->refpll);
 	if (ret) {
-		nv_error(ram, "mclk refpll data not found\n");
+		nvkm_error(subdev, "mclk refpll data not found\n");
 		return ret;
 	}
 
 	ret = nvbios_pll_parse(bios, 0x04, &ram->mempll);
 	if (ret) {
-		nv_error(ram, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5:
-		ram->base.calc = gf100_ram_calc;
-		ram->base.prog = gf100_ram_prog;
-		ram->base.tidy = gf100_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->fuc.r_0x10fe20 = ramfuc_reg(0x10fe20);
 	ram->fuc.r_0x10fe24 = ramfuc_reg(0x10fe24);
 	ram->fuc.r_0x137320 = ramfuc_reg(0x137320);
@@ -718,14 +713,3 @@
 	ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
 	return 0;
 }
-
-struct nvkm_oclass
-gf100_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gf100_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 1ef15c3..9893556 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
+#define gk104_ram(p) container_of((p), struct gk104_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
-#include "gf100.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
@@ -229,8 +229,9 @@
 gk104_ram_nuts(struct gk104_ram *ram, struct ramfuc_reg *reg,
 	       u32 _mask, u32 _data, u32 _copy)
 {
-	struct gk104_fb_priv *priv = (void *)nvkm_fb(ram);
+	struct nvkm_fb *fb = ram->base.fb;
 	struct ramfuc *fuc = &ram->fuc.base;
+	struct nvkm_device *device = fb->subdev.device;
 	u32 addr = 0x110000 + (reg->addr & 0xfff);
 	u32 mask = _mask | _copy;
 	u32 data = (_data & _mask) | (reg->data & _copy);
@@ -238,7 +239,7 @@
 
 	for (i = 0; i < 16; i++, addr += 0x1000) {
 		if (ram->pnuts & (1 << i)) {
-			u32 prev = nv_rd32(priv, addr);
+			u32 prev = nvkm_rd32(device, addr);
 			u32 next = (prev & ~mask) | data;
 			nvkm_memx_wr32(fuc->memx, addr, next);
 		}
@@ -248,9 +249,8 @@
 	gk104_ram_nuts((s), &(s)->fuc.r_##r, (m), (d), (c))
 
 static int
-gk104_ram_calc_gddr5(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_gddr5(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
 	struct nvkm_ram_data *next = ram->base.next;
 	int vc = !next->bios.ramcfg_11_02_08;
@@ -674,9 +674,8 @@
  ******************************************************************************/
 
 static int
-gk104_ram_calc_sddr3(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc_sddr3(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
 	const u32 rcoef = ((  ram->P1 << 16) | (ram->N1 << 8) | ram->M1);
 	const u32 runk0 = ram->fN1 << 16;
@@ -926,9 +925,9 @@
  ******************************************************************************/
 
 static int
-gk104_ram_calc_data(struct nvkm_fb *pfb, u32 khz, struct nvkm_ram_data *data)
+gk104_ram_calc_data(struct gk104_ram *ram, u32 khz, struct nvkm_ram_data *data)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
 	struct nvkm_ram_data *cfg;
 	u32 mhz = khz / 1000;
 
@@ -941,19 +940,19 @@
 		}
 	}
 
-	nv_error(ram, "ramcfg data for %dMHz not found\n", mhz);
+	nvkm_error(subdev, "ramcfg data for %dMHz not found\n", mhz);
 	return -EINVAL;
 }
 
 static int
-gk104_ram_calc_xits(struct nvkm_fb *pfb, struct nvkm_ram_data *next)
+gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
 	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
 	int refclk, i;
 	int ret;
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -973,11 +972,11 @@
 		refclk = fuc->mempll.refclk;
 
 	/* calculate refpll coefficients */
-	ret = gt215_pll_calc(nv_subdev(pfb), &fuc->refpll, refclk, &ram->N1,
+	ret = gt215_pll_calc(subdev, &fuc->refpll, refclk, &ram->N1,
 			     &ram->fN1, &ram->M1, &ram->P1);
 	fuc->mempll.refclk = ret;
 	if (ret <= 0) {
-		nv_error(pfb, "unable to calc refpll\n");
+		nvkm_error(subdev, "unable to calc refpll\n");
 		return -EINVAL;
 	}
 
@@ -990,10 +989,10 @@
 		fuc->mempll.min_p = 1;
 		fuc->mempll.max_p = 2;
 
-		ret = gt215_pll_calc(nv_subdev(pfb), &fuc->mempll, next->freq,
+		ret = gt215_pll_calc(subdev, &fuc->mempll, next->freq,
 				     &ram->N2, NULL, &ram->M2, &ram->P2);
 		if (ret <= 0) {
-			nv_error(pfb, "unable to calc mempll\n");
+			nvkm_error(subdev, "unable to calc mempll\n");
 			return -EINVAL;
 		}
 	}
@@ -1005,15 +1004,15 @@
 	ram->base.freq = next->freq;
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ret = nvkm_sddr3_calc(&ram->base);
 		if (ret == 0)
-			ret = gk104_ram_calc_sddr3(pfb, next->freq);
+			ret = gk104_ram_calc_sddr3(ram, next->freq);
 		break;
-	case NV_MEM_TYPE_GDDR5:
+	case NVKM_RAM_TYPE_GDDR5:
 		ret = nvkm_gddr5_calc(&ram->base, ram->pnuts != 0);
 		if (ret == 0)
-			ret = gk104_ram_calc_gddr5(pfb, next->freq);
+			ret = gk104_ram_calc_gddr5(ram, next->freq);
 		break;
 	default:
 		ret = -ENOSYS;
@@ -1024,21 +1023,22 @@
 }
 
 static int
-gk104_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_clk *clk = nvkm_clk(pfb);
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct gk104_ram *ram = gk104_ram(base);
+	struct nvkm_clk *clk = ram->base.fb->subdev.device->clk;
 	struct nvkm_ram_data *xits = &ram->base.xition;
 	struct nvkm_ram_data *copy;
 	int ret;
 
 	if (ram->base.next == NULL) {
-		ret = gk104_ram_calc_data(pfb, clk->read(clk, nv_clk_src_mem),
+		ret = gk104_ram_calc_data(ram,
+					  nvkm_clk_read(clk, nv_clk_src_mem),
 					  &ram->base.former);
 		if (ret)
 			return ret;
 
-		ret = gk104_ram_calc_data(pfb, freq, &ram->base.target);
+		ret = gk104_ram_calc_data(ram, freq, &ram->base.target);
 		if (ret)
 			return ret;
 
@@ -1062,13 +1062,13 @@
 		ram->base.next = &ram->base.target;
 	}
 
-	return gk104_ram_calc_xits(pfb, ram->base.next);
+	return gk104_ram_calc_xits(ram, ram->base.next);
 }
 
 static void
-gk104_ram_prog_0(struct nvkm_fb *pfb, u32 freq)
+gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_ram_data *cfg;
 	u32 mhz = freq / 1000;
 	u32 mask, data;
@@ -1090,31 +1090,31 @@
 		data |= cfg->bios.rammap_11_09_01ff;
 		mask |= 0x000001ff;
 	}
-	nv_mask(pfb, 0x10f468, mask, data);
+	nvkm_mask(device, 0x10f468, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0a_0400) {
 		data |= cfg->bios.rammap_11_0a_0400;
 		mask |= 0x00000001;
 	}
-	nv_mask(pfb, 0x10f420, mask, data);
+	nvkm_mask(device, 0x10f420, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0a_0800) {
 		data |= cfg->bios.rammap_11_0a_0800;
 		mask |= 0x00000001;
 	}
-	nv_mask(pfb, 0x10f430, mask, data);
+	nvkm_mask(device, 0x10f430, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0b_01f0) {
 		data |= cfg->bios.rammap_11_0b_01f0;
 		mask |= 0x0000001f;
 	}
-	nv_mask(pfb, 0x10f400, mask, data);
+	nvkm_mask(device, 0x10f400, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0b_0200) {
 		data |= cfg->bios.rammap_11_0b_0200 << 9;
 		mask |= 0x00000200;
 	}
-	nv_mask(pfb, 0x10f410, mask, data);
+	nvkm_mask(device, 0x10f410, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0d) {
 		data |= cfg->bios.rammap_11_0d << 16;
@@ -1124,7 +1124,7 @@
 		data |= cfg->bios.rammap_11_0f << 8;
 		mask |= 0x0000ff00;
 	}
-	nv_mask(pfb, 0x10f440, mask, data);
+	nvkm_mask(device, 0x10f440, mask, data);
 
 	if (mask = 0, data = 0, ram->diff.rammap_11_0e) {
 		data |= cfg->bios.rammap_11_0e << 8;
@@ -1138,15 +1138,15 @@
 		data |= cfg->bios.rammap_11_0b_0400 << 5;
 		mask |= 0x00000020;
 	}
-	nv_mask(pfb, 0x10f444, mask, data);
+	nvkm_mask(device, 0x10f444, mask, data);
 }
 
 static int
-gk104_ram_prog(struct nvkm_fb *pfb)
+gk104_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gk104_ram *ram = (void *)pfb->ram;
+	struct gk104_ram *ram = gk104_ram(base);
 	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	struct nvkm_ram_data *next = ram->base.next;
 
 	if (!nvkm_boolopt(device->cfgopt, "NvMemExec", true)) {
@@ -1154,20 +1154,19 @@
 		return (ram->base.next == &ram->base.xition);
 	}
 
-	gk104_ram_prog_0(pfb, 1000);
+	gk104_ram_prog_0(ram, 1000);
 	ram_exec(fuc, true);
-	gk104_ram_prog_0(pfb, next->freq);
+	gk104_ram_prog_0(ram, next->freq);
 
 	return (ram->base.next == &ram->base.xition);
 }
 
 static void
-gk104_ram_tidy(struct nvkm_fb *pfb)
+gk104_ram_tidy(struct nvkm_ram *base)
 {
-	struct gk104_ram *ram = (void *)pfb->ram;
-	struct gk104_ramfuc *fuc = &ram->fuc;
+	struct gk104_ram *ram = gk104_ram(base);
 	ram->base.next = NULL;
-	ram_exec(fuc, false);
+	ram_exec(&ram->fuc, false);
 }
 
 struct gk104_ram_train {
@@ -1183,10 +1182,10 @@
 };
 
 static int
-gk104_ram_train_type(struct nvkm_fb *pfb, int i, u8 ramcfg,
+gk104_ram_train_type(struct nvkm_ram *ram, int i, u8 ramcfg,
 		     struct gk104_ram_train *train)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_bios *bios = ram->fb->subdev.device->bios;
 	struct nvbios_M0205E M0205E;
 	struct nvbios_M0205S M0205S;
 	struct nvbios_M0209E M0209E;
@@ -1244,33 +1243,35 @@
 }
 
 static int
-gk104_ram_train_init_0(struct nvkm_fb *pfb, struct gk104_ram_train *train)
+gk104_ram_train_init_0(struct nvkm_ram *ram, struct gk104_ram_train *train)
 {
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, j;
 
 	if ((train->mask & 0x03d3) != 0x03d3) {
-		nv_warn(pfb, "missing link training data\n");
+		nvkm_warn(subdev, "missing link training data\n");
 		return -EINVAL;
 	}
 
 	for (i = 0; i < 0x30; i++) {
 		for (j = 0; j < 8; j += 4) {
-			nv_wr32(pfb, 0x10f968 + j, 0x00000000 | (i << 8));
-			nv_wr32(pfb, 0x10f920 + j, 0x00000000 |
+			nvkm_wr32(device, 0x10f968 + j, 0x00000000 | (i << 8));
+			nvkm_wr32(device, 0x10f920 + j, 0x00000000 |
 						   train->type08.data[i] << 4 |
 						   train->type06.data[i]);
-			nv_wr32(pfb, 0x10f918 + j, train->type00.data[i]);
-			nv_wr32(pfb, 0x10f920 + j, 0x00000100 |
+			nvkm_wr32(device, 0x10f918 + j, train->type00.data[i]);
+			nvkm_wr32(device, 0x10f920 + j, 0x00000100 |
 						   train->type09.data[i] << 4 |
 						   train->type07.data[i]);
-			nv_wr32(pfb, 0x10f918 + j, train->type01.data[i]);
+			nvkm_wr32(device, 0x10f918 + j, train->type01.data[i]);
 		}
 	}
 
 	for (j = 0; j < 8; j += 4) {
 		for (i = 0; i < 0x100; i++) {
-			nv_wr32(pfb, 0x10f968 + j, i);
-			nv_wr32(pfb, 0x10f900 + j, train->type04.data[i]);
+			nvkm_wr32(device, 0x10f968 + j, i);
+			nvkm_wr32(device, 0x10f900 + j, train->type04.data[i]);
 		}
 	}
 
@@ -1278,23 +1279,24 @@
 }
 
 static int
-gk104_ram_train_init(struct nvkm_fb *pfb)
+gk104_ram_train_init(struct nvkm_ram *ram)
 {
-	u8 ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+	u8 ramcfg = nvbios_ramcfg_index(&ram->fb->subdev);
 	struct gk104_ram_train *train;
-	int ret = -ENOMEM, i;
+	int ret, i;
 
-	if ((train = kzalloc(sizeof(*train), GFP_KERNEL))) {
-		for (i = 0; i < 0x100; i++) {
-			ret = gk104_ram_train_type(pfb, i, ramcfg, train);
-			if (ret && ret != -ENOENT)
-				break;
-		}
+	if (!(train = kzalloc(sizeof(*train), GFP_KERNEL)))
+		return -ENOMEM;
+
+	for (i = 0; i < 0x100; i++) {
+		ret = gk104_ram_train_type(ram, i, ramcfg, train);
+		if (ret && ret != -ENOENT)
+			break;
 	}
 
-	switch (pfb->ram->type) {
-	case NV_MEM_TYPE_GDDR5:
-		ret = gk104_ram_train_init_0(pfb, train);
+	switch (ram->type) {
+	case NVKM_RAM_TYPE_GDDR5:
+		ret = gk104_ram_train_init_0(ram, train);
 		break;
 	default:
 		ret = 0;
@@ -1306,18 +1308,14 @@
 }
 
 int
-gk104_ram_init(struct nvkm_object *object)
+gk104_ram_init(struct nvkm_ram *ram)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gk104_ram *ram   = (void *)object;
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	u8  ver, hdr, cnt, len, snr, ssz;
 	u32 data, save;
-	int ret, i;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
+	int i;
 
 	/* run a bunch of tables from rammap table.  there's actually
 	 * individual pointers for each rammap entry too, but, nvidia
@@ -1334,33 +1332,32 @@
 	if (!data || hdr < 0x15)
 		return -EINVAL;
 
-	cnt  = nv_ro08(bios, data + 0x14); /* guess at count */
-	data = nv_ro32(bios, data + 0x10); /* guess u32... */
-	save = nv_rd32(pfb, 0x10f65c) & 0x000000f0;
+	cnt  = nvbios_rd08(bios, data + 0x14); /* guess at count */
+	data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+	save = nvkm_rd32(device, 0x10f65c) & 0x000000f0;
 	for (i = 0; i < cnt; i++, data += 4) {
 		if (i != save >> 4) {
-			nv_mask(pfb, 0x10f65c, 0x000000f0, i << 4);
+			nvkm_mask(device, 0x10f65c, 0x000000f0, i << 4);
 			nvbios_exec(&(struct nvbios_init) {
-					.subdev = nv_subdev(pfb),
+					.subdev = subdev,
 					.bios = bios,
-					.offset = nv_ro32(bios, data),
+					.offset = nvbios_rd32(bios, data),
 					.execute = 1,
 				    });
 		}
 	}
-	nv_mask(pfb, 0x10f65c, 0x000000f0, save);
-	nv_mask(pfb, 0x10f584, 0x11000000, 0x00000000);
-	nv_wr32(pfb, 0x10ecc0, 0xffffffff);
-	nv_mask(pfb, 0x10f160, 0x00000010, 0x00000010);
+	nvkm_mask(device, 0x10f65c, 0x000000f0, save);
+	nvkm_mask(device, 0x10f584, 0x11000000, 0x00000000);
+	nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+	nvkm_mask(device, 0x10f160, 0x00000010, 0x00000010);
 
-	return gk104_ram_train_init(pfb);
+	return gk104_ram_train_init(ram);
 }
 
 static int
 gk104_ram_ctor_data(struct gk104_ram *ram, u8 ramcfg, int i)
 {
-	struct nvkm_fb *pfb = (void *)nv_object(ram)->parent;
-	struct nvkm_bios *bios = nvkm_bios(pfb);
+	struct nvkm_bios *bios = ram->base.fb->subdev.device->bios;
 	struct nvkm_ram_data *cfg;
 	struct nvbios_ramcfg *d = &ram->diff;
 	struct nvbios_ramcfg *p, *n;
@@ -1426,63 +1423,64 @@
 	return ret;
 }
 
-static void
-gk104_ram_dtor(struct nvkm_object *object)
+static void *
+gk104_ram_dtor(struct nvkm_ram *base)
 {
-	struct gk104_ram *ram = (void *)object;
+	struct gk104_ram *ram = gk104_ram(base);
 	struct nvkm_ram_data *cfg, *tmp;
 
 	list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) {
 		kfree(cfg);
 	}
 
-	nvkm_ram_destroy(&ram->base);
+	return ram;
 }
 
-static int
-gk104_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gk104_ram_func = {
+	.dtor = gk104_ram_dtor,
+	.init = gk104_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
+	.calc = gk104_ram_calc,
+	.prog = gk104_ram_prog,
+	.tidy = gk104_ram_tidy,
+};
+
+int
+gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+	struct nvkm_subdev *subdev = &fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_gpio *gpio = device->gpio;
 	struct dcb_gpio_func func;
 	struct gk104_ram *ram;
 	int ret, i;
-	u8  ramcfg = nvbios_ramcfg_index(nv_subdev(pfb));
+	u8  ramcfg = nvbios_ramcfg_index(subdev);
 	u32 tmp;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x022554, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = gf100_ram_ctor(&gk104_ram_func, fb, 0x022554, &ram->base);
 	if (ret)
 		return ret;
 
 	INIT_LIST_HEAD(&ram->cfg);
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
-	case NV_MEM_TYPE_GDDR5:
-		ram->base.calc = gk104_ram_calc;
-		ram->base.prog = gk104_ram_prog;
-		ram->base.tidy = gk104_ram_tidy;
-		break;
-	default:
-		nv_warn(pfb, "reclocking of this RAM type is unsupported\n");
-		break;
-	}
-
 	/* calculate a mask of differently configured memory partitions,
 	 * because, of course reclocking wasn't complicated enough
 	 * already without having to treat some of them differently to
 	 * the others....
 	 */
-	ram->parts = nv_rd32(pfb, 0x022438);
-	ram->pmask = nv_rd32(pfb, 0x022554);
+	ram->parts = nvkm_rd32(device, 0x022438);
+	ram->pmask = nvkm_rd32(device, 0x022554);
 	ram->pnuts = 0;
 	for (i = 0, tmp = 0; i < ram->parts; i++) {
 		if (!(ram->pmask & (1 << i))) {
-			u32 cfg1 = nv_rd32(pfb, 0x110204 + (i * 0x1000));
+			u32 cfg1 = nvkm_rd32(device, 0x110204 + (i * 0x1000));
 			if (tmp && tmp != cfg1) {
 				ram->pnuts |= (1 << i);
 				continue;
@@ -1505,7 +1503,7 @@
 	for (i = 0; !ret; i++) {
 		ret = gk104_ram_ctor_data(ram, ramcfg, i);
 		if (ret && ret != -ENOENT) {
-			nv_error(pfb, "failed to parse ramcfg data\n");
+			nvkm_error(subdev, "failed to parse ramcfg data\n");
 			return ret;
 		}
 	}
@@ -1513,25 +1511,25 @@
 	/* parse bios data for both pll's */
 	ret = nvbios_pll_parse(bios, 0x0c, &ram->fuc.refpll);
 	if (ret) {
-		nv_error(pfb, "mclk refpll data not found\n");
+		nvkm_error(subdev, "mclk refpll data not found\n");
 		return ret;
 	}
 
 	ret = nvbios_pll_parse(bios, 0x04, &ram->fuc.mempll);
 	if (ret) {
-		nv_error(pfb, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
 	/* lookup memory voltage gpios */
-	ret = gpio->find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
 		ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
 		ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
 	}
 
-	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
 		ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
@@ -1588,7 +1586,7 @@
 	ram->fuc.r_0x10f914 = ramfuc_reg(0x10f914);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_GDDR5:
+	case NVKM_RAM_TYPE_GDDR5:
 		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
 		ram->fuc.r_mr[1] = ramfuc_reg(0x10f330);
 		ram->fuc.r_mr[2] = ramfuc_reg(0x10f334);
@@ -1600,7 +1598,7 @@
 		ram->fuc.r_mr[8] = ramfuc_reg(0x10f354);
 		ram->fuc.r_mr[15] = ramfuc_reg(0x10f34c);
 		break;
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ram->fuc.r_mr[0] = ramfuc_reg(0x10f300);
 		ram->fuc.r_mr[2] = ramfuc_reg(0x10f320);
 		break;
@@ -1626,14 +1624,3 @@
 	ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
 	return 0;
 }
-
-struct nvkm_oclass
-gk104_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ram_ctor,
-		.dtor = gk104_ram_dtor,
-		.init = gk104_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index a298b39f..43d807f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -21,35 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "gf100.h"
+#include "ram.h"
 
-struct gm107_ram {
-	struct nvkm_ram base;
+static const struct nvkm_ram_func
+gm107_ram_func = {
+	.init = gk104_ram_init,
+	.get = gf100_ram_get,
+	.put = gf100_ram_put,
 };
 
-static int
-gm107_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct gm107_ram *ram;
-	int ret;
+	if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
+		return -ENOMEM;
 
-	ret = gf100_ram_create(parent, engine, oclass, 0x021c14, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	return 0;
+	return gf100_ram_ctor(&gm107_ram_func, fb, 0x021c14, *pram);
 }
-
-struct nvkm_oclass
-gm107_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gk104_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index 2417640..5c08ae8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -22,11 +22,10 @@
  * Authors: Ben Skeggs
  * 	    Roy Spliet <rspliet@eclipso.eu>
  */
-
+#define gt215_ram(p) container_of((p), struct gt215_ram, base)
+#include "ram.h"
 #include "ramfuc.h"
-#include "nv50.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0205.h>
@@ -154,14 +153,14 @@
  * Link training for (at least) DDR3
  */
 int
-gt215_link_train(struct nvkm_fb *pfb)
+gt215_link_train(struct gt215_ram *ram)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
-	struct nvkm_clk *clk = nvkm_clk(pfb);
 	struct gt215_ltrain *train = &ram->ltrain;
-	struct nvkm_device *device = nv_device(pfb);
 	struct gt215_ramfuc *fuc = &ram->fuc;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_clk *clk = device->clk;
 	u32 *result, r1700;
 	int ret, i;
 	struct nvbios_M0205T M0205T = { 0 };
@@ -182,27 +181,29 @@
 
 	/* Clock speeds for training and back */
 	nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
-	if (M0205T.freq == 0)
+	if (M0205T.freq == 0) {
+		kfree(result);
 		return -ENOENT;
+	}
 
-	clk_current = clk->read(clk, nv_clk_src_mem);
+	clk_current = nvkm_clk_read(clk, nv_clk_src_mem);
 
 	ret = gt215_clk_pre(clk, f);
 	if (ret)
 		goto out;
 
 	/* First: clock up/down */
-	ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
+	ret = ram->base.func->calc(&ram->base, (u32) M0205T.freq * 1000);
 	if (ret)
 		goto out;
 
 	/* Do this *after* calc, eliminates write in script */
-	nv_wr32(pfb, 0x111400, 0x00000000);
+	nvkm_wr32(device, 0x111400, 0x00000000);
 	/* XXX: Magic writes that improve train reliability? */
-	nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
-	nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
-	nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
-	nv_wr32(pfb, 0x100c04, 0x00000400);
+	nvkm_mask(device, 0x100674, 0x0000ffff, 0x00000000);
+	nvkm_mask(device, 0x1005e4, 0x0000ffff, 0x00000000);
+	nvkm_mask(device, 0x100b0c, 0x000000ff, 0x00000000);
+	nvkm_wr32(device, 0x100c04, 0x00000400);
 
 	/* Now the training script */
 	r1700 = ram_rd32(fuc, 0x001700);
@@ -235,22 +236,22 @@
 
 	ram_exec(fuc, true);
 
-	ram->base.calc(pfb, clk_current);
+	ram->base.func->calc(&ram->base, clk_current);
 	ram_exec(fuc, true);
 
 	/* Post-processing, avoids flicker */
-	nv_mask(pfb, 0x616308, 0x10, 0x10);
-	nv_mask(pfb, 0x616b08, 0x10, 0x10);
+	nvkm_mask(device, 0x616308, 0x10, 0x10);
+	nvkm_mask(device, 0x616b08, 0x10, 0x10);
 
 	gt215_clk_post(clk, f);
 
-	ram_train_result(pfb, result, 64);
+	ram_train_result(ram->base.fb, result, 64);
 	for (i = 0; i < 64; i++)
-		nv_debug(pfb, "Train: %08x", result[i]);
+		nvkm_debug(subdev, "Train: %08x", result[i]);
 	gt215_link_train_calc(result, train);
 
-	nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
-			train->r_1111e0, train->r_111400);
+	nvkm_debug(subdev, "Train: %08x %08x %08x", train->r_100720,
+		   train->r_1111e0, train->r_111400);
 
 	kfree(result);
 
@@ -265,11 +266,12 @@
 	train->state = NVA3_TRAIN_UNSUPPORTED;
 
 	gt215_clk_post(clk, f);
+	kfree(result);
 	return ret;
 }
 
 int
-gt215_link_train_init(struct nvkm_fb *pfb)
+gt215_link_train_init(struct gt215_ram *ram)
 {
 	static const u32 pattern[16] = {
 		0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
@@ -277,9 +279,9 @@
 		0x33333333, 0x55555555, 0x77777777, 0x66666666,
 		0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
 	};
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
 	struct gt215_ltrain *train = &ram->ltrain;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mem *mem;
 	struct nvbios_M0205E M0205E;
 	u8 ver, hdr, cnt, len;
@@ -298,48 +300,47 @@
 
 	train->state = NVA3_TRAIN_ONCE;
 
-	ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
+	ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
+				  &ram->ltrain.mem);
 	if (ret)
 		return ret;
 
 	mem = ram->ltrain.mem;
 
-	nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
-	nv_wr32(pfb, 0x1005a8, 0x0000ffff);
-	nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
+	nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+	nvkm_wr32(device, 0x1005a8, 0x0000ffff);
+	nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
 
 	for (i = 0; i < 0x30; i++) {
-		nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
-		nv_wr32(pfb, 0x10f900, pattern[i % 16]);
+		nvkm_wr32(device, 0x10f8c0, (i << 8) | i);
+		nvkm_wr32(device, 0x10f900, pattern[i % 16]);
 	}
 
 	for (i = 0; i < 0x30; i++) {
-		nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
-		nv_wr32(pfb, 0x10f920, pattern[i % 16]);
+		nvkm_wr32(device, 0x10f8e0, (i << 8) | i);
+		nvkm_wr32(device, 0x10f920, pattern[i % 16]);
 	}
 
 	/* And upload the pattern */
-	r001700 = nv_rd32(pfb, 0x1700);
-	nv_wr32(pfb, 0x1700, mem->offset >> 16);
+	r001700 = nvkm_rd32(device, 0x1700);
+	nvkm_wr32(device, 0x1700, mem->offset >> 16);
 	for (i = 0; i < 16; i++)
-		nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
+		nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
 	for (i = 0; i < 16; i++)
-		nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
-	nv_wr32(pfb, 0x1700, r001700);
+		nvkm_wr32(device, 0x700100 + (i << 2), pattern[i]);
+	nvkm_wr32(device, 0x1700, r001700);
 
-	train->r_100720 = nv_rd32(pfb, 0x100720);
-	train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
-	train->r_111400 = nv_rd32(pfb, 0x111400);
+	train->r_100720 = nvkm_rd32(device, 0x100720);
+	train->r_1111e0 = nvkm_rd32(device, 0x1111e0);
+	train->r_111400 = nvkm_rd32(device, 0x111400);
 	return 0;
 }
 
 void
-gt215_link_train_fini(struct nvkm_fb *pfb)
+gt215_link_train_fini(struct gt215_ram *ram)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
-
 	if (ram->ltrain.mem)
-		pfb->ram->put(pfb, &ram->ltrain.mem);
+		ram->base.func->put(&ram->base, &ram->ltrain.mem);
 }
 
 /*
@@ -347,24 +348,25 @@
  */
 #define T(t) cfg->timing_10_##t
 static int
-gt215_ram_timing_calc(struct nvkm_fb *pfb, u32 *timing)
+gt215_ram_timing_calc(struct gt215_ram *ram, u32 *timing)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
 	struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int tUNK_base, tUNK_40_0, prevCL;
 	u32 cur2, cur3, cur7, cur8;
 
-	cur2 = nv_rd32(pfb, 0x100228);
-	cur3 = nv_rd32(pfb, 0x10022c);
-	cur7 = nv_rd32(pfb, 0x10023c);
-	cur8 = nv_rd32(pfb, 0x100240);
+	cur2 = nvkm_rd32(device, 0x100228);
+	cur3 = nvkm_rd32(device, 0x10022c);
+	cur7 = nvkm_rd32(device, 0x10023c);
+	cur8 = nvkm_rd32(device, 0x100240);
 
 
 	switch ((!T(CWL)) * ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		T(CWL) = T(CL) - 1;
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
 		break;
 	}
@@ -402,8 +404,8 @@
 	timing[8] = cur8 & 0xffffff00;
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_GDDR3:
 		tUNK_40_0 = prevCL - (cur8 & 0xff);
 		if (tUNK_40_0 > 0)
 			timing[8] |= T(CL);
@@ -412,11 +414,11 @@
 		break;
 	}
 
-	nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
-			timing[0], timing[1], timing[2], timing[3]);
-	nv_debug(pfb, "  230: %08x %08x %08x %08x\n",
-			timing[4], timing[5], timing[6], timing[7]);
-	nv_debug(pfb, "  240: %08x\n", timing[8]);
+	nvkm_debug(subdev, "Entry: 220: %08x %08x %08x %08x\n",
+		   timing[0], timing[1], timing[2], timing[3]);
+	nvkm_debug(subdev, "  230: %08x %08x %08x %08x\n",
+		   timing[4], timing[5], timing[6], timing[7]);
+	nvkm_debug(subdev, "  240: %08x\n", timing[8]);
 	return 0;
 }
 #undef T
@@ -466,13 +468,13 @@
 static void
 gt215_ram_fbvref(struct gt215_ramfuc *fuc, u32 val)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(fuc->base.pfb);
+	struct nvkm_gpio *gpio = fuc->base.fb->subdev.device->gpio;
 	struct dcb_gpio_func func;
 	u32 reg, sh, gpio_val;
 	int ret;
 
-	if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
-		ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	if (nvkm_gpio_get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
+		ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 		if (ret)
 			return;
 
@@ -487,12 +489,14 @@
 }
 
 static int
-gt215_ram_calc(struct nvkm_fb *pfb, u32 freq)
+gt215_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
+	struct gt215_ram *ram = gt215_ram(base);
 	struct gt215_ramfuc *fuc = &ram->fuc;
 	struct gt215_ltrain *train = &ram->ltrain;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct gt215_clk_info mclk;
 	struct nvkm_ram_data *next;
 	u8  ver, hdr, cnt, len, strap;
@@ -508,28 +512,27 @@
 	ram->base.next = next;
 
 	if (ram->ltrain.state == NVA3_TRAIN_ONCE)
-		gt215_link_train(pfb);
+		gt215_link_train(ram);
 
 	/* lookup memory config data relevant to the target frequency */
-	i = 0;
 	data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
 			       &next->bios);
 	if (!data || ver != 0x10 || hdr < 0x05) {
-		nv_error(pfb, "invalid/missing rammap entry\n");
+		nvkm_error(subdev, "invalid/missing rammap entry\n");
 		return -EINVAL;
 	}
 
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
 	data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
 			       &ver, &hdr, &next->bios);
 	if (!data || ver != 0x10 || hdr < 0x09) {
-		nv_error(pfb, "invalid/missing ramcfg entry\n");
+		nvkm_error(subdev, "invalid/missing ramcfg entry\n");
 		return -EINVAL;
 	}
 
@@ -539,20 +542,20 @@
 				       &ver, &hdr, &cnt, &len,
 				       &next->bios);
 		if (!data || ver != 0x10 || hdr < 0x17) {
-			nv_error(pfb, "invalid/missing timing entry\n");
+			nvkm_error(subdev, "invalid/missing timing entry\n");
 			return -EINVAL;
 		}
 	}
 
-	ret = gt215_pll_info(nvkm_clk(pfb), 0x12, 0x4000, freq, &mclk);
+	ret = gt215_pll_info(device->clk, 0x12, 0x4000, freq, &mclk);
 	if (ret < 0) {
-		nv_error(pfb, "failed mclk calculation\n");
+		nvkm_error(subdev, "failed mclk calculation\n");
 		return ret;
 	}
 
-	gt215_ram_timing_calc(pfb, timing);
+	gt215_ram_timing_calc(ram, timing);
 
-	ret = ram_init(fuc, pfb);
+	ret = ram_init(fuc, ram->base.fb);
 	if (ret)
 		return ret;
 
@@ -562,13 +565,13 @@
 	ram->base.mr[2] = ram_rd32(fuc, mr[2]);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		ret = nvkm_sddr2_calc(&ram->base);
 		break;
-	case NV_MEM_TYPE_DDR3:
+	case NVKM_RAM_TYPE_DDR3:
 		ret = nvkm_sddr3_calc(&ram->base);
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		ret = nvkm_gddr3_calc(&ram->base);
 		break;
 	default:
@@ -579,7 +582,7 @@
 	if (ret)
 		return ret;
 
-	/* XXX: where the fuck does 750MHz come from? */
+	/* XXX: 750MHz seems rather arbitrary */
 	if (freq <= 750000) {
 		r004018 = 0x10000000;
 		r100760 = 0x22222222;
@@ -590,7 +593,7 @@
 		r100da0 = 0x00000000;
 	}
 
-	if (!next->bios.ramcfg_10_DLLoff)
+	if (!next->bios.ramcfg_DLLoff)
 		r004018 |= 0x00004000;
 
 	/* pll2pll requires to switch to a safe clock first */
@@ -623,18 +626,18 @@
 	ram_nsec(fuc, 2000);
 
 	if (!next->bios.ramcfg_10_02_10) {
-		if (ram->base.type == NV_MEM_TYPE_GDDR3)
+		if (ram->base.type == NVKM_RAM_TYPE_GDDR3)
 			ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
 		else
 			ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
 	}
 
 	/* If we're disabling the DLL, do it now */
-	switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
-	case NV_MEM_TYPE_DDR3:
+	switch (next->bios.ramcfg_DLLoff * ram->base.type) {
+	case NVKM_RAM_TYPE_DDR3:
 		nvkm_sddr3_dll_disable(fuc, ram->base.mr);
 		break;
-	case NV_MEM_TYPE_GDDR3:
+	case NVKM_RAM_TYPE_GDDR3:
 		nvkm_gddr3_dll_disable(fuc, ram->base.mr);
 		break;
 	}
@@ -650,7 +653,7 @@
 	ram_wr32(fuc, 0x1002dc, 0x00000001);
 	ram_nsec(fuc, 2000);
 
-	if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
+	if (device->chipset == 0xa3 && freq <= 500000)
 		ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
 
 	/* Fiddle with clocks */
@@ -708,7 +711,7 @@
 		ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
 	}
 
-	if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
+	if (device->chipset == 0xa3 && freq > 500000) {
 		ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
 	}
 
@@ -752,11 +755,11 @@
 
 	if (next->bios.ramcfg_10_02_04) {
 		switch (ram->base.type) {
-		case NV_MEM_TYPE_DDR3:
-			if (nv_device(pfb)->chipset != 0xa8)
+		case NVKM_RAM_TYPE_DDR3:
+			if (device->chipset != 0xa8)
 				r111100 |= 0x00000004;
 			/* no break */
-		case NV_MEM_TYPE_DDR2:
+		case NVKM_RAM_TYPE_DDR2:
 			r111100 |= 0x08000000;
 			break;
 		default:
@@ -764,12 +767,12 @@
 		}
 	} else {
 		switch (ram->base.type) {
-		case NV_MEM_TYPE_DDR2:
+		case NVKM_RAM_TYPE_DDR2:
 			r111100 |= 0x1a800000;
 			unk714  |= 0x00000010;
 			break;
-		case NV_MEM_TYPE_DDR3:
-			if (nv_device(pfb)->chipset == 0xa8) {
+		case NVKM_RAM_TYPE_DDR3:
+			if (device->chipset == 0xa8) {
 				r111100 |=  0x08000000;
 			} else {
 				r111100 &= ~0x00000004;
@@ -777,7 +780,7 @@
 			}
 			unk714  |= 0x00000010;
 			break;
-		case NV_MEM_TYPE_GDDR3:
+		case NVKM_RAM_TYPE_GDDR3:
 			r111100 |= 0x30000000;
 			unk714  |= 0x00000020;
 			break;
@@ -810,16 +813,16 @@
 		gt215_ram_fbvref(fuc, 1);
 
 	/* Reset DLL */
-	if (!next->bios.ramcfg_10_DLLoff)
+	if (!next->bios.ramcfg_DLLoff)
 		nvkm_sddr2_dll_reset(fuc);
 
-	if (ram->base.type == NV_MEM_TYPE_GDDR3) {
+	if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
 		ram_nsec(fuc, 31000);
 	} else {
 		ram_nsec(fuc, 14000);
 	}
 
-	if (ram->base.type == NV_MEM_TYPE_DDR3) {
+	if (ram->base.type == NVKM_RAM_TYPE_DDR3) {
 		ram_wr32(fuc, 0x100264, 0x1);
 		ram_nsec(fuc, 2000);
 	}
@@ -855,24 +858,24 @@
 }
 
 static int
-gt215_ram_prog(struct nvkm_fb *pfb)
+gt215_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct gt215_ram *ram = (void *)pfb->ram;
+	struct gt215_ram *ram = gt215_ram(base);
 	struct gt215_ramfuc *fuc = &ram->fuc;
+	struct nvkm_device *device = ram->base.fb->subdev.device;
 	bool exec = nvkm_boolopt(device->cfgopt, "NvMemExec", true);
 
 	if (exec) {
-		nv_mask(pfb, 0x001534, 0x2, 0x2);
+		nvkm_mask(device, 0x001534, 0x2, 0x2);
 
 		ram_exec(fuc, true);
 
 		/* Post-processing, avoids flicker */
-		nv_mask(pfb, 0x002504, 0x1, 0x0);
-		nv_mask(pfb, 0x001534, 0x2, 0x0);
+		nvkm_mask(device, 0x002504, 0x1, 0x0);
+		nvkm_mask(device, 0x001534, 0x2, 0x0);
 
-		nv_mask(pfb, 0x616308, 0x10, 0x10);
-		nv_mask(pfb, 0x616b08, 0x10, 0x10);
+		nvkm_mask(device, 0x616308, 0x10, 0x10);
+		nvkm_mask(device, 0x616b08, 0x10, 0x10);
 	} else {
 		ram_exec(fuc, false);
 	}
@@ -880,69 +883,56 @@
 }
 
 static void
-gt215_ram_tidy(struct nvkm_fb *pfb)
+gt215_ram_tidy(struct nvkm_ram *base)
 {
-	struct gt215_ram *ram = (void *)pfb->ram;
-	struct gt215_ramfuc *fuc = &ram->fuc;
-	ram_exec(fuc, false);
+	struct gt215_ram *ram = gt215_ram(base);
+	ram_exec(&ram->fuc, false);
 }
 
 static int
-gt215_ram_init(struct nvkm_object *object)
+gt215_ram_init(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-	struct gt215_ram   *ram = (void *)object;
-	int ret;
-
-	ret = nvkm_ram_init(&ram->base);
-	if (ret)
-		return ret;
-
-	gt215_link_train_init(pfb);
+	struct gt215_ram *ram = gt215_ram(base);
+	gt215_link_train_init(ram);
 	return 0;
 }
 
-static int
-gt215_ram_fini(struct nvkm_object *object, bool suspend)
+static void *
+gt215_ram_dtor(struct nvkm_ram *base)
 {
-	struct nvkm_fb *pfb = (void *)object->parent;
-
-	if (!suspend)
-		gt215_link_train_fini(pfb);
-
-	return 0;
+	struct gt215_ram *ram = gt215_ram(base);
+	gt215_link_train_fini(ram);
+	return ram;
 }
 
-static int
-gt215_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 datasize,
-	       struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+gt215_ram_func = {
+	.dtor = gt215_ram_dtor,
+	.init = gt215_ram_init,
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+	.calc = gt215_ram_calc,
+	.prog = gt215_ram_prog,
+	.tidy = gt215_ram_tidy,
+};
+
+int
+gt215_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_gpio *gpio = nvkm_gpio(pfb);
+	struct nvkm_gpio *gpio = fb->subdev.device->gpio;
 	struct dcb_gpio_func func;
 	struct gt215_ram *ram;
-	int ret, i;
 	u32 reg, shift;
+	int ret, i;
 
-	ret = nv50_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nv50_ram_ctor(&gt215_ram_func, fb, &ram->base);
 	if (ret)
 		return ret;
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_DDR3:
-	case NV_MEM_TYPE_GDDR3:
-		ram->base.calc = gt215_ram_calc;
-		ram->base.prog = gt215_ram_prog;
-		ram->base.tidy = gt215_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
 	ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
 	ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
@@ -992,7 +982,7 @@
 		ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
 	}
 
-	ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+	ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
 	if (ret == 0) {
 		nv50_gpio_location(func.line, &reg, &shift);
 		ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
@@ -1000,13 +990,3 @@
 
 	return 0;
 }
-
-struct nvkm_oclass
-gt215_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = gt215_ram_init,
-		.fini = gt215_ram_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index abc18e8..0a0e44b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -21,81 +21,67 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#define mcp77_ram(p) container_of((p), struct mcp77_ram, base)
+#include "ram.h"
 
-struct mcp77_ram_priv {
+struct mcp77_ram {
 	struct nvkm_ram base;
 	u64 poller_base;
 };
 
 static int
-mcp77_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 datasize,
-	       struct nvkm_object **pobject)
+mcp77_ram_init(struct nvkm_ram *base)
 {
-	u32 rsvd_head = ( 256 * 1024); /* vga memory */
-	u32 rsvd_tail = (1024 * 1024); /* vbios etc */
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct mcp77_ram_priv *priv;
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.type   = NV_MEM_TYPE_STOLEN;
-	priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
-	priv->base.size   = (u64)nv_rd32(pfb, 0x100e14) << 12;
-
-	rsvd_tail += 0x1000;
-	priv->poller_base = priv->base.size - rsvd_tail;
-
-	ret = nvkm_mm_init(&pfb->vram, rsvd_head >> 12,
-			   (priv->base.size  - (rsvd_head + rsvd_tail)) >> 12,
-			   1);
-	if (ret)
-		return ret;
-
-	priv->base.get = nv50_ram_get;
-	priv->base.put = nv50_ram_put;
-	return 0;
-}
-
-static int
-mcp77_ram_init(struct nvkm_object *object)
-{
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct mcp77_ram_priv *priv = (void *)object;
-	int ret;
-	u64 dniso, hostnb, flush;
-
-	ret = nvkm_ram_init(&priv->base);
-	if (ret)
-		return ret;
-
-	dniso  = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
-	hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
-	flush  = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+	struct mcp77_ram *ram = mcp77_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	u32 dniso  = ((ram->base.size - (ram->poller_base + 0x00)) >> 5) - 1;
+	u32 hostnb = ((ram->base.size - (ram->poller_base + 0x20)) >> 5) - 1;
+	u32 flush  = ((ram->base.size - (ram->poller_base + 0x40)) >> 5) - 1;
 
 	/* Enable NISO poller for various clients and set their associated
 	 * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
 	 */
-	nv_wr32(pfb, 0x100c18, dniso);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
-	nv_wr32(pfb, 0x100c1c, hostnb);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
-	nv_wr32(pfb, 0x100c24, flush);
-	nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+	nvkm_wr32(device, 0x100c18, dniso);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00000001);
+	nvkm_wr32(device, 0x100c1c, hostnb);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00000002);
+	nvkm_wr32(device, 0x100c24, flush);
+	nvkm_mask(device, 0x100c14, 0x00000000, 0x00010000);
 	return 0;
 }
 
-struct nvkm_oclass
-mcp77_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = mcp77_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = mcp77_ram_init,
-		.fini = _nvkm_ram_fini,
-	},
+static const struct nvkm_ram_func
+mcp77_ram_func = {
+	.init = mcp77_ram_init,
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
 };
+
+int
+mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	u32 rsvd_tail = (1024 * 1024) + 0x1000; /* vbios etc + poller mem */
+	u64 base = (u64)nvkm_rd32(device, 0x100e10) << 12;
+	u64 size = (u64)nvkm_rd32(device, 0x100e14) << 12;
+	struct mcp77_ram *ram;
+	int ret;
+
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+			    size, 0, &ram->base);
+	if (ret)
+		return ret;
+
+	ram->poller_base = size - rsvd_tail;
+	ram->base.stolen = base;
+	nvkm_mm_fini(&ram->base.vram);
+
+	return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+			    (size - rsvd_head - rsvd_tail) >>
+			    NVKM_RAM_MM_SHIFT, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 855de16..6f053a0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -21,59 +21,45 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 #include "regsnv04.h"
 
-static int
-nv04_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
-	int ret;
+const struct nvkm_ram_func
+nv04_ram_func = {
+};
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+int
+nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 boot0 = nvkm_rd32(device, NV04_PFB_BOOT_0);
+	u64 size;
+	enum nvkm_ram_type type;
 
 	if (boot0 & 0x00000100) {
-		ram->size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-		ram->size *= 1024 * 1024;
+		size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+		size *= 1024 * 1024;
 	} else {
 		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-			ram->size = 32 * 1024 * 1024;
+			size = 32 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-			ram->size = 16 * 1024 * 1024;
+			size = 16 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-			ram->size = 8 * 1024 * 1024;
+			size = 8 * 1024 * 1024;
 			break;
 		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-			ram->size = 4 * 1024 * 1024;
+			size = 4 * 1024 * 1024;
 			break;
 		}
 	}
 
 	if ((boot0 & 0x00000038) <= 0x10)
-		ram->type = NV_MEM_TYPE_SGRAM;
+		type = NVKM_RAM_TYPE_SGRAM;
 	else
-		ram->type = NV_MEM_TYPE_SDRAM;
+		type = NVKM_RAM_TYPE_SDRAM;
 
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv04_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index 3b8a1ed..dfd155c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -21,39 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv10_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 cfg0 = nv_rd32(pfb, 0x100200);
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32 cfg0 = nvkm_rd32(device, 0x100200);
+	enum nvkm_ram_type type;
 
 	if (cfg0 & 0x00000001)
-		ram->type = NV_MEM_TYPE_DDR1;
+		type = NVKM_RAM_TYPE_DDR1;
 	else
-		ram->type = NV_MEM_TYPE_SDRAM;
+		type = NVKM_RAM_TYPE_SDRAM;
 
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv10_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv10_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index fbae05d..3c6a871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -21,33 +21,21 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-#include <core/device.h>
-
-static int
-nv1a_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
 	struct pci_dev *bridge;
 	u32 mem, mib;
-	int ret;
 
 	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
 	if (!bridge) {
-		nv_fatal(pfb, "no bridge device\n");
+		nvkm_error(&fb->subdev, "no bridge device\n");
 		return -ENODEV;
 	}
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	if (nv_device(pfb)->chipset == 0x1a) {
+	if (fb->subdev.device->chipset == 0x1a) {
 		pci_read_config_dword(bridge, 0x7c, &mem);
 		mib = ((mem >> 6) & 31) + 1;
 	} else {
@@ -55,18 +43,6 @@
 		mib = ((mem >> 4) & 127) + 1;
 	}
 
-	ram->type = NV_MEM_TYPE_STOLEN;
-	ram->size = mib * 1024 * 1024;
-	return 0;
+	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
+			     mib * 1024 * 1024, 0, pram);
 }
-
-struct nvkm_oclass
-nv1a_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv1a_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index d9e7187..747e47c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -21,42 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv20_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	struct nvkm_device *device = fb->subdev.device;
+	u32 pbus1218 =  nvkm_rd32(device, 0x001218);
+	u32     size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
+	u32     tags =  nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+	case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
+	}
+
+	ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->type = NV_MEM_TYPE_GDDR2; break;
-	}
-	ram->size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
-	ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->tags  = nv_rd32(pfb, 0x100320);
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv20_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv20_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 3d31fa4..56f8cff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -21,9 +21,8 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-#include <core/device.h>
 #include <subdev/bios.h>
 #include <subdev/bios/bit.h>
 #include <subdev/bios/init.h>
@@ -31,23 +30,23 @@
 #include <subdev/clk/pll.h>
 #include <subdev/timer.h>
 
-int
-nv40_ram_calc(struct nvkm_fb *pfb, u32 freq)
+static int
+nv40_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nv40_ram *ram = nv40_ram(base);
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_pll pll;
 	int N1, M1, N2, M2;
 	int log2P, ret;
 
 	ret = nvbios_pll_parse(bios, 0x04, &pll);
 	if (ret) {
-		nv_error(pfb, "mclk pll data not found\n");
+		nvkm_error(subdev, "mclk pll data not found\n");
 		return ret;
 	}
 
-	ret = nv04_pll_calc(nv_subdev(pfb), &pll, freq,
-			    &N1, &M1, &N2, &M2, &log2P);
+	ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P);
 	if (ret < 0)
 		return ret;
 
@@ -64,11 +63,13 @@
 	return 0;
 }
 
-int
-nv40_ram_prog(struct nvkm_fb *pfb)
+static int
+nv40_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv40_ram *ram = (void *)pfb->ram;
+	struct nv40_ram *ram = nv40_ram(base);
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_bios *bios = device->bios;
 	struct bit_entry M;
 	u32 crtc_mask = 0;
 	u8  sr1[2];
@@ -76,12 +77,12 @@
 
 	/* determine which CRTCs are active, fetch VGA_SR1 for each */
 	for (i = 0; i < 2; i++) {
-		u32 vbl = nv_rd32(pfb, 0x600808 + (i * 0x2000));
+		u32 vbl = nvkm_rd32(device, 0x600808 + (i * 0x2000));
 		u32 cnt = 0;
 		do {
-			if (vbl != nv_rd32(pfb, 0x600808 + (i * 0x2000))) {
-				nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-				sr1[i] = nv_rd08(pfb, 0x0c03c5 + (i * 0x2000));
+			if (vbl != nvkm_rd32(device, 0x600808 + (i * 0x2000))) {
+				nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nvkm_rd08(device, 0x0c03c5 + (i * 0x2000));
 				if (!(sr1[i] & 0x20))
 					crtc_mask |= (1 << i);
 				break;
@@ -94,55 +95,66 @@
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if (!(tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if ( (tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
 	}
 
 	/* prepare ram for reclocking */
-	nv_wr32(pfb, 0x1002d4, 0x00000001); /* precharge */
-	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
-	nv_wr32(pfb, 0x1002d0, 0x00000001); /* refresh */
-	nv_mask(pfb, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
-	nv_wr32(pfb, 0x1002dc, 0x00000001); /* enable self-refresh */
+	nvkm_wr32(device, 0x1002d4, 0x00000001); /* precharge */
+	nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nvkm_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nvkm_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nvkm_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
 
 	/* change the PLL of each memory partition */
-	nv_mask(pfb, 0x00c040, 0x0000c000, 0x00000000);
-	switch (nv_device(pfb)->chipset) {
+	nvkm_mask(device, 0x00c040, 0x0000c000, 0x00000000);
+	switch (device->chipset) {
 	case 0x40:
 	case 0x45:
 	case 0x41:
 	case 0x42:
 	case 0x47:
-		nv_mask(pfb, 0x004044, 0xc0771100, ram->ctrl);
-		nv_mask(pfb, 0x00402c, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x004048, ram->coef);
-		nv_wr32(pfb, 0x004030, ram->coef);
+		nvkm_mask(device, 0x004044, 0xc0771100, ram->ctrl);
+		nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x004048, ram->coef);
+		nvkm_wr32(device, 0x004030, ram->coef);
 	case 0x43:
 	case 0x49:
 	case 0x4b:
-		nv_mask(pfb, 0x004038, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x00403c, ram->coef);
+		nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x00403c, ram->coef);
 	default:
-		nv_mask(pfb, 0x004020, 0xc0771100, ram->ctrl);
-		nv_wr32(pfb, 0x004024, ram->coef);
+		nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
+		nvkm_wr32(device, 0x004024, ram->coef);
 		break;
 	}
 	udelay(100);
-	nv_mask(pfb, 0x00c040, 0x0000c000, 0x0000c000);
+	nvkm_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
 
 	/* re-enable normal operation of memory controller */
-	nv_wr32(pfb, 0x1002dc, 0x00000000);
-	nv_mask(pfb, 0x100210, 0x80000000, 0x80000000);
+	nvkm_wr32(device, 0x1002dc, 0x00000000);
+	nvkm_mask(device, 0x100210, 0x80000000, 0x80000000);
 	udelay(100);
 
 	/* execute memory reset script from vbios */
 	if (!bit_entry(bios, 'M', &M)) {
 		struct nvbios_init init = {
-			.subdev = nv_subdev(pfb),
+			.subdev = subdev,
 			.bios = bios,
-			.offset = nv_ro16(bios, M.offset + 0x00),
+			.offset = nvbios_rd16(bios, M.offset + 0x00),
 			.execute = 1,
 		};
 
@@ -155,58 +167,64 @@
 	for (i = 0; i < 2; i++) {
 		if (!(crtc_mask & (1 << i)))
 			continue;
-		nv_wait(pfb, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
-		nv_wr08(pfb, 0x0c03c4 + (i * 0x2000), 0x01);
-		nv_wr08(pfb, 0x0c03c5 + (i * 0x2000), sr1[i]);
+
+		nvkm_msec(device, 2000,
+			u32 tmp = nvkm_rd32(device, 0x600808 + (i * 0x2000));
+			if ( (tmp & 0x00010000))
+				break;
+		);
+
+		nvkm_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nvkm_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
 	}
 
 	return 0;
 }
 
-void
-nv40_ram_tidy(struct nvkm_fb *pfb)
+static void
+nv40_ram_tidy(struct nvkm_ram *base)
 {
 }
 
-static int
-nv40_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_ram_func
+nv40_ram_func = {
+	.calc = nv40_ram_calc,
+	.prog = nv40_ram_prog,
+	.tidy = nv40_ram_tidy,
+};
+
+int
+nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
+	      u32 tags, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
 	struct nv40_ram *ram;
-	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+	return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+}
+
+int
+nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+	struct nvkm_device *device = fb->subdev.device;
+	u32 pbus1218 = nvkm_rd32(device, 0x001218);
+	u32     size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32     tags = nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: type = NVKM_RAM_TYPE_SDRAM; break;
+	case 0x00000100: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000200: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
+	}
+
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	switch (pbus1218 & 0x00000300) {
-	case 0x00000000: ram->base.type = NV_MEM_TYPE_SDRAM; break;
-	case 0x00000100: ram->base.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000200: ram->base.type = NV_MEM_TYPE_GDDR3; break;
-	case 0x00000300: ram->base.type = NV_MEM_TYPE_DDR2; break;
-	}
-
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-
-struct nvkm_oclass
-nv40_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
new file mode 100644
index 0000000..8a05245
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -0,0 +1,14 @@
+#ifndef __NV40_FB_RAM_H__
+#define __NV40_FB_RAM_H__
+#define nv40_ram(p) container_of((p), struct nv40_ram, base)
+#include "ram.h"
+
+struct nv40_ram {
+	struct nvkm_ram base;
+	u32 ctrl;
+	u32 coef;
+};
+
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+		  struct nvkm_ram **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 33c612b..114828b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -21,46 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv41_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32  tags = nvkm_rd32(device, 0x100320);
+	u32 fb474 = nvkm_rd32(device, 0x100474);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (fb474 & 0x00000004)
+		type = NVKM_RAM_TYPE_GDDR3;
+	if (fb474 & 0x00000002)
+		type = NVKM_RAM_TYPE_DDR2;
+	if (fb474 & 0x00000001)
+		type = NVKM_RAM_TYPE_DDR1;
+
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
 	if (ret)
 		return ret;
 
-	if (pfb474 & 0x00000004)
-		ram->base.type = NV_MEM_TYPE_GDDR3;
-	if (pfb474 & 0x00000002)
-		ram->base.type = NV_MEM_TYPE_DDR2;
-	if (pfb474 & 0x00000001)
-		ram->base.type = NV_MEM_TYPE_DDR1;
-
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv41_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv41_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index f575a72..bc56fbf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -21,44 +21,22 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv44_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb474 = nv_rd32(pfb, 0x100474);
-	int ret;
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32 fb474 = nvkm_rd32(device, 0x100474);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
+	if (fb474 & 0x00000004)
+		type = NVKM_RAM_TYPE_GDDR3;
+	if (fb474 & 0x00000002)
+		type = NVKM_RAM_TYPE_DDR2;
+	if (fb474 & 0x00000001)
+		type = NVKM_RAM_TYPE_DDR1;
 
-	if (pfb474 & 0x00000004)
-		ram->base.type = NV_MEM_TYPE_GDDR3;
-	if (pfb474 & 0x00000002)
-		ram->base.type = NV_MEM_TYPE_DDR2;
-	if (pfb474 & 0x00000001)
-		ram->base.type = NV_MEM_TYPE_DDR1;
-
-	ram->base.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
-	return 0;
+	return nv40_ram_new_(fb, type, size, 0, pram);
 }
-
-struct nvkm_oclass
-nv44_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index 51b44cd..c01f4b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -21,46 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv40.h"
+#include "ramnv40.h"
 
-static int
-nv49_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nv40_ram *ram;
-	u32 pfb914 = nv_rd32(pfb, 0x100914);
+	struct nvkm_device *device = fb->subdev.device;
+	u32  size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	u32  tags = nvkm_rd32(device, 0x100320);
+	u32 fb914 = nvkm_rd32(device, 0x100914);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	switch (pfb914 & 0x00000003) {
-	case 0x00000000: ram->base.type = NV_MEM_TYPE_DDR1; break;
-	case 0x00000001: ram->base.type = NV_MEM_TYPE_DDR2; break;
-	case 0x00000002: ram->base.type = NV_MEM_TYPE_GDDR3; break;
+	switch (fb914 & 0x00000003) {
+	case 0x00000000: type = NVKM_RAM_TYPE_DDR1 ; break;
+	case 0x00000001: type = NVKM_RAM_TYPE_DDR2 ; break;
+	case 0x00000002: type = NVKM_RAM_TYPE_GDDR3; break;
 	case 0x00000003: break;
 	}
 
-	ram->base.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->base.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-	ram->base.tags  =  nv_rd32(pfb, 0x100320);
-	ram->base.calc = nv40_ram_calc;
-	ram->base.prog = nv40_ram_prog;
-	ram->base.tidy = nv40_ram_tidy;
+	ret = nv40_ram_new_(fb, type, size, tags, pram);
+	if (ret)
+		return ret;
+
+	(*pram)->parts = (nvkm_rd32(device, 0x100200) & 0x00000003) + 1;
 	return 0;
 }
-
-struct nvkm_oclass
-nv49_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv49_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index f3ed1c6..fa3c2e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -21,34 +21,13 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "ram.h"
 
-static int
-nv4e_ram_create(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+int
+nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
-	int ret;
-
-	ret = nvkm_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
-	if (ret)
-		return ret;
-
-	ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-	ram->type = NV_MEM_TYPE_STOLEN;
-	return 0;
+	struct nvkm_device *device = fb->subdev.device;
+	u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
+	return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
+			     size, 0, pram);
 }
-
-struct nvkm_oclass
-nv4e_ram_oclass = {
-	.handle = 0,
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv4e_ram_create,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index d2c81dd..9197e0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -21,14 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#define nv50_ram(p) container_of((p), struct nv50_ram, base)
+#include "ram.h"
 #include "ramseq.h"
+#include "nv50.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/perf.h>
 #include <subdev/bios/pll.h>
+#include <subdev/bios/rammap.h>
 #include <subdev/bios/timing.h>
 #include <subdev/clk/pll.h>
 
@@ -38,11 +40,20 @@
 	struct hwsq_reg r_0x004008;
 	struct hwsq_reg r_0x00400c;
 	struct hwsq_reg r_0x00c040;
+	struct hwsq_reg r_0x100200;
 	struct hwsq_reg r_0x100210;
+	struct hwsq_reg r_0x10021c;
 	struct hwsq_reg r_0x1002d0;
 	struct hwsq_reg r_0x1002d4;
 	struct hwsq_reg r_0x1002dc;
-	struct hwsq_reg r_0x100da0[8];
+	struct hwsq_reg r_0x10053c;
+	struct hwsq_reg r_0x1005a0;
+	struct hwsq_reg r_0x1005a4;
+	struct hwsq_reg r_0x100710;
+	struct hwsq_reg r_0x100714;
+	struct hwsq_reg r_0x100718;
+	struct hwsq_reg r_0x10071c;
+	struct hwsq_reg r_0x100da0;
 	struct hwsq_reg r_0x100e20;
 	struct hwsq_reg r_0x100e24;
 	struct hwsq_reg r_0x611200;
@@ -55,64 +66,181 @@
 	struct nv50_ramseq hwsq;
 };
 
-#define QFX5800NVA0 1
+#define T(t) cfg->timing_10_##t
+static int
+nv50_ram_timing_calc(struct nv50_ram *ram, u32 *timing)
+{
+	struct nvbios_ramcfg *cfg = &ram->base.target.bios;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 cur2, cur4, cur7, cur8;
+	u8 unkt3b;
+
+	cur2 = nvkm_rd32(device, 0x100228);
+	cur4 = nvkm_rd32(device, 0x100230);
+	cur7 = nvkm_rd32(device, 0x10023c);
+	cur8 = nvkm_rd32(device, 0x100240);
+
+	switch ((!T(CWL)) * ram->base.type) {
+	case NVKM_RAM_TYPE_DDR2:
+		T(CWL) = T(CL) - 1;
+		break;
+	case NVKM_RAM_TYPE_GDDR3:
+		T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
+		break;
+	}
+
+	/* XXX: N=1 is not proper statistics */
+	if (device->chipset == 0xa0) {
+		unkt3b = 0x19 + ram->base.next->bios.rammap_00_16_40;
+		timing[6] = (0x2d + T(CL) - T(CWL) +
+				ram->base.next->bios.rammap_00_16_40) << 16 |
+			    T(CWL) << 8 |
+			    (0x2f + T(CL) - T(CWL));
+	} else {
+		unkt3b = 0x16;
+		timing[6] = (0x2b + T(CL) - T(CWL)) << 16 |
+			    max_t(s8, T(CWL) - 2, 1) << 8 |
+			    (0x2e + T(CL) - T(CWL));
+	}
+
+	timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
+	timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
+		    max_t(u8, T(18), 1) << 16 |
+		    (T(WTR) + 1 + T(CWL)) << 8 |
+		    (3 + T(CL) - T(CWL));
+	timing[2] = (T(CWL) - 1) << 24 |
+		    (T(RRD) << 16) |
+		    (T(RCDWR) << 8) |
+		    T(RCDRD);
+	timing[3] = (unkt3b - 2 + T(CL)) << 24 |
+		    unkt3b << 16 |
+		    (T(CL) - 1) << 8 |
+		    (T(CL) - 1);
+	timing[4] = (cur4 & 0xffff0000) |
+		    T(13) << 8 |
+		    T(13);
+	timing[5] = T(RFC) << 24 |
+		    max_t(u8, T(RCDRD), T(RCDWR)) << 16 |
+		    T(RP);
+	/* Timing 6 is already done above */
+	timing[7] = (cur7 & 0xff00ffff) | (T(CL) - 1) << 16;
+	timing[8] = (cur8 & 0xffffff00);
+
+	/* XXX: P.version == 1 only has DDR2 and GDDR3? */
+	if (ram->base.type == NVKM_RAM_TYPE_DDR2) {
+		timing[5] |= (T(CL) + 3) << 8;
+		timing[8] |= (T(CL) - 4);
+	} else
+	if (ram->base.type == NVKM_RAM_TYPE_GDDR3) {
+		timing[5] |= (T(CL) + 2) << 8;
+		timing[8] |= (T(CL) - 2);
+	}
+
+	nvkm_debug(subdev, " 220: %08x %08x %08x %08x\n",
+		   timing[0], timing[1], timing[2], timing[3]);
+	nvkm_debug(subdev, " 230: %08x %08x %08x %08x\n",
+		   timing[4], timing[5], timing[6], timing[7]);
+	nvkm_debug(subdev, " 240: %08x\n", timing[8]);
+	return 0;
+}
+#undef T
+
+static void
+nvkm_sddr2_dll_reset(struct nv50_ramseq *hwsq)
+{
+	ram_mask(hwsq, mr[0], 0x100, 0x100);
+	ram_mask(hwsq, mr[0], 0x100, 0x000);
+	ram_nsec(hwsq, 24000);
+}
 
 static int
-nv50_ram_calc(struct nvkm_fb *pfb, u32 freq)
+nv50_ram_calc(struct nvkm_ram *base, u32 freq)
 {
-	struct nvkm_bios *bios = nvkm_bios(pfb);
-	struct nv50_ram *ram = (void *)pfb->ram;
+	struct nv50_ram *ram = nv50_ram(base);
 	struct nv50_ramseq *hwsq = &ram->hwsq;
+	struct nvkm_subdev *subdev = &ram->base.fb->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	struct nvbios_perfE perfE;
 	struct nvbios_pll mpll;
-	struct {
-		u32 data;
-		u8  size;
-	} ramcfg, timing;
-	u8  ver, hdr, cnt, len, strap;
+	struct nvkm_ram_data *next;
+	u8  ver, hdr, cnt, len, strap, size;
+	u32 data;
+	u32 r100da0, r004008, unk710, unk714, unk718, unk71c;
 	int N1, M1, N2, M2, P;
 	int ret, i;
+	u32 timing[9];
+
+	next = &ram->base.target;
+	next->freq = freq;
+	ram->base.next = next;
 
 	/* lookup closest matching performance table entry for frequency */
 	i = 0;
 	do {
-		ramcfg.data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
-					    &ramcfg.size, &perfE);
-		if (!ramcfg.data || (ver < 0x25 || ver >= 0x40) ||
-		    (ramcfg.size < 2)) {
-			nv_error(pfb, "invalid/missing perftab entry\n");
+		data = nvbios_perfEp(bios, i++, &ver, &hdr, &cnt,
+				     &size, &perfE);
+		if (!data || (ver < 0x25 || ver >= 0x40) ||
+		    (size < 2)) {
+			nvkm_error(subdev, "invalid/missing perftab entry\n");
 			return -EINVAL;
 		}
 	} while (perfE.memory < freq);
 
+	nvbios_rammapEp_from_perf(bios, data, hdr, &next->bios);
+
 	/* locate specific data set for the attached memory */
-	strap = nvbios_ramcfg_index(nv_subdev(pfb));
+	strap = nvbios_ramcfg_index(subdev);
 	if (strap >= cnt) {
-		nv_error(pfb, "invalid ramcfg strap\n");
+		nvkm_error(subdev, "invalid ramcfg strap\n");
 		return -EINVAL;
 	}
 
-	ramcfg.data += hdr + (strap * ramcfg.size);
-
-	/* lookup memory timings, if bios says they're present */
-	strap = nv_ro08(bios, ramcfg.data + 0x01);
-	if (strap != 0xff) {
-		timing.data = nvbios_timingEe(bios, strap, &ver, &hdr,
-					      &cnt, &len);
-		if (!timing.data || ver != 0x10 || hdr < 0x12) {
-			nv_error(pfb, "invalid/missing timing entry "
-				 "%02x %04x %02x %02x\n",
-				 strap, timing.data, ver, hdr);
-			return -EINVAL;
-		}
-	} else {
-		timing.data = 0;
+	data = nvbios_rammapSp_from_perf(bios, data + hdr, size, strap,
+			&next->bios);
+	if (!data) {
+		nvkm_error(subdev, "invalid/missing rammap entry ");
+		return -EINVAL;
 	}
 
-	ret = ram_init(hwsq, nv_subdev(pfb));
+	/* lookup memory timings, if bios says they're present */
+	if (next->bios.ramcfg_timing != 0xff) {
+		data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
+					&ver, &hdr, &cnt, &len, &next->bios);
+		if (!data || ver != 0x10 || hdr < 0x12) {
+			nvkm_error(subdev, "invalid/missing timing entry "
+				 "%02x %04x %02x %02x\n",
+				 strap, data, ver, hdr);
+			return -EINVAL;
+		}
+	}
+
+	nv50_ram_timing_calc(ram, timing);
+
+	ret = ram_init(hwsq, subdev);
 	if (ret)
 		return ret;
 
+	/* Determine ram-specific MR values */
+	ram->base.mr[0] = ram_rd32(hwsq, mr[0]);
+	ram->base.mr[1] = ram_rd32(hwsq, mr[1]);
+	ram->base.mr[2] = ram_rd32(hwsq, mr[2]);
+
+	switch (ram->base.type) {
+	case NVKM_RAM_TYPE_GDDR3:
+		ret = nvkm_gddr3_calc(&ram->base);
+		break;
+	default:
+		ret = -ENOSYS;
+		break;
+	}
+
+	if (ret)
+		return ret;
+
+	/* Always disable this bit during reclock */
+	ram_mask(hwsq, 0x100200, 0x00000800, 0x00000000);
+
 	ram_wait(hwsq, 0x01, 0x00); /* wait for !vblank */
 	ram_wait(hwsq, 0x01, 0x01); /* wait for vblank */
 	ram_wr32(hwsq, 0x611200, 0x00003300);
@@ -120,6 +248,7 @@
 	ram_nsec(hwsq, 8000);
 	ram_setf(hwsq, 0x10, 0x00); /* disable fb */
 	ram_wait(hwsq, 0x00, 0x01); /* wait for fb disabled */
+	ram_nsec(hwsq, 2000);
 
 	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge */
 	ram_wr32(hwsq, 0x1002d0, 0x00000001); /* refresh */
@@ -129,97 +258,149 @@
 
 	ret = nvbios_pll_parse(bios, 0x004008, &mpll);
 	mpll.vco2.max_freq = 0;
-	if (ret == 0) {
-		ret = nv04_pll_calc(nv_subdev(pfb), &mpll, freq,
+	if (ret >= 0) {
+		ret = nv04_pll_calc(subdev, &mpll, freq,
 				    &N1, &M1, &N2, &M2, &P);
-		if (ret == 0)
+		if (ret <= 0)
 			ret = -EINVAL;
 	}
 
 	if (ret < 0)
 		return ret;
 
+	/* XXX: 750MHz seems rather arbitrary */
+	if (freq <= 750000) {
+		r100da0 = 0x00000010;
+		r004008 = 0x90000000;
+	} else {
+		r100da0 = 0x00000000;
+		r004008 = 0x80000000;
+	}
+
+	r004008 |= (mpll.bias_p << 19) | (P << 22) | (P << 16);
+
 	ram_mask(hwsq, 0x00c040, 0xc000c000, 0x0000c000);
-	ram_mask(hwsq, 0x004008, 0x00000200, 0x00000200);
+	/* XXX: Is rammap_00_16_40 the DLL bit we've seen in GT215? Why does
+	 * it have a different rammap bit from DLLoff? */
+	ram_mask(hwsq, 0x004008, 0x00004200, 0x00000200 |
+			next->bios.rammap_00_16_40 << 14);
 	ram_mask(hwsq, 0x00400c, 0x0000ffff, (N1 << 8) | M1);
-	ram_mask(hwsq, 0x004008, 0x81ff0000, 0x80000000 | (mpll.bias_p << 19) |
-					     (P << 22) | (P << 16));
-#if QFX5800NVA0
-	for (i = 0; i < 8; i++)
-		ram_mask(hwsq, 0x100da0[i], 0x00000000, 0x00000000); /*XXX*/
-#endif
-	ram_nsec(hwsq, 96000); /*XXX*/
+	ram_mask(hwsq, 0x004008, 0x91ff0000, r004008);
+	if (subdev->device->chipset >= 0x96)
+		ram_wr32(hwsq, 0x100da0, r100da0);
+	ram_nsec(hwsq, 64000); /*XXX*/
+	ram_nsec(hwsq, 32000); /*XXX*/
+
 	ram_mask(hwsq, 0x004008, 0x00002200, 0x00002000);
 
 	ram_wr32(hwsq, 0x1002dc, 0x00000000); /* disable self-refresh */
+	ram_wr32(hwsq, 0x1002d4, 0x00000001); /* disable self-refresh */
 	ram_wr32(hwsq, 0x100210, 0x80000000); /* enable auto-refresh */
 
 	ram_nsec(hwsq, 12000);
 
 	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
+	case NVKM_RAM_TYPE_DDR2:
 		ram_nuke(hwsq, mr[0]); /* force update */
 		ram_mask(hwsq, mr[0], 0x000, 0x000);
 		break;
-	case NV_MEM_TYPE_GDDR3:
-		ram_mask(hwsq, mr[2], 0x000, 0x000);
+	case NVKM_RAM_TYPE_GDDR3:
+		ram_nuke(hwsq, mr[1]); /* force update */
+		ram_wr32(hwsq, mr[1], ram->base.mr[1]);
 		ram_nuke(hwsq, mr[0]); /* force update */
-		ram_mask(hwsq, mr[0], 0x000, 0x000);
+		ram_wr32(hwsq, mr[0], ram->base.mr[0]);
 		break;
 	default:
 		break;
 	}
 
-	ram_mask(hwsq, timing[3], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[1], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[6], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[7], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[8], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[2], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[4], 0x00000000, 0x00000000); /*XXX*/
-	ram_mask(hwsq, timing[5], 0x00000000, 0x00000000); /*XXX*/
+	ram_mask(hwsq, timing[3], 0xffffffff, timing[3]);
+	ram_mask(hwsq, timing[1], 0xffffffff, timing[1]);
+	ram_mask(hwsq, timing[6], 0xffffffff, timing[6]);
+	ram_mask(hwsq, timing[7], 0xffffffff, timing[7]);
+	ram_mask(hwsq, timing[8], 0xffffffff, timing[8]);
+	ram_mask(hwsq, timing[0], 0xffffffff, timing[0]);
+	ram_mask(hwsq, timing[2], 0xffffffff, timing[2]);
+	ram_mask(hwsq, timing[4], 0xffffffff, timing[4]);
+	ram_mask(hwsq, timing[5], 0xffffffff, timing[5]);
 
-	ram_mask(hwsq, timing[0], 0x00000000, 0x00000000); /*XXX*/
+	if (!next->bios.ramcfg_00_03_02)
+		ram_mask(hwsq, 0x10021c, 0x00010000, 0x00000000);
+	ram_mask(hwsq, 0x100200, 0x00001000, !next->bios.ramcfg_00_04_02 << 12);
 
-#if QFX5800NVA0
-	ram_nuke(hwsq, 0x100e24);
-	ram_mask(hwsq, 0x100e24, 0x00000000, 0x00000000);
-	ram_nuke(hwsq, 0x100e20);
-	ram_mask(hwsq, 0x100e20, 0x00000000, 0x00000000);
-#endif
+	/* XXX: A lot of this could be "chipset"/"ram type" specific stuff */
+	unk710  = ram_rd32(hwsq, 0x100710) & ~0x00000101;
+	unk714  = ram_rd32(hwsq, 0x100714) & ~0xf0000020;
+	unk718  = ram_rd32(hwsq, 0x100718) & ~0x00000100;
+	unk71c  = ram_rd32(hwsq, 0x10071c) & ~0x00000100;
 
-	ram_mask(hwsq, mr[0], 0x100, 0x100);
-	ram_mask(hwsq, mr[0], 0x100, 0x000);
+	if ( next->bios.ramcfg_00_03_01)
+		unk71c |= 0x00000100;
+	if ( next->bios.ramcfg_00_03_02)
+		unk710 |= 0x00000100;
+	if (!next->bios.ramcfg_00_03_08) {
+		unk710 |= 0x1;
+		unk714 |= 0x20;
+	}
+	if ( next->bios.ramcfg_00_04_04)
+		unk714 |= 0x70000000;
+	if ( next->bios.ramcfg_00_04_20)
+		unk718 |= 0x00000100;
+
+	ram_mask(hwsq, 0x100714, 0xffffffff, unk714);
+	ram_mask(hwsq, 0x10071c, 0xffffffff, unk71c);
+	ram_mask(hwsq, 0x100718, 0xffffffff, unk718);
+	ram_mask(hwsq, 0x100710, 0xffffffff, unk710);
+
+	if (next->bios.rammap_00_16_20) {
+		ram_wr32(hwsq, 0x1005a0, next->bios.ramcfg_00_07 << 16 |
+					 next->bios.ramcfg_00_06 << 8 |
+					 next->bios.ramcfg_00_05);
+		ram_wr32(hwsq, 0x1005a4, next->bios.ramcfg_00_09 << 8 |
+					 next->bios.ramcfg_00_08);
+		ram_mask(hwsq, 0x10053c, 0x00001000, 0x00000000);
+	} else {
+		ram_mask(hwsq, 0x10053c, 0x00001000, 0x00001000);
+	}
+	ram_mask(hwsq, mr[1], 0xffffffff, ram->base.mr[1]);
+
+	/* Reset DLL */
+	if (!next->bios.ramcfg_DLLoff)
+		nvkm_sddr2_dll_reset(hwsq);
 
 	ram_setf(hwsq, 0x10, 0x01); /* enable fb */
 	ram_wait(hwsq, 0x00, 0x00); /* wait for fb enabled */
 	ram_wr32(hwsq, 0x611200, 0x00003330);
 	ram_wr32(hwsq, 0x002504, 0x00000000); /* un-block fifo */
+
+	if (next->bios.rammap_00_17_02)
+		ram_mask(hwsq, 0x100200, 0x00000800, 0x00000800);
+	if (!next->bios.rammap_00_16_40)
+		ram_mask(hwsq, 0x004008, 0x00004000, 0x00000000);
+	if (next->bios.ramcfg_00_03_02)
+		ram_mask(hwsq, 0x10021c, 0x00010000, 0x00010000);
+
 	return 0;
 }
 
 static int
-nv50_ram_prog(struct nvkm_fb *pfb)
+nv50_ram_prog(struct nvkm_ram *base)
 {
-	struct nvkm_device *device = nv_device(pfb);
-	struct nv50_ram *ram = (void *)pfb->ram;
-	struct nv50_ramseq *hwsq = &ram->hwsq;
-
-	ram_exec(hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
+	struct nv50_ram *ram = nv50_ram(base);
+	struct nvkm_device *device = ram->base.fb->subdev.device;
+	ram_exec(&ram->hwsq, nvkm_boolopt(device->cfgopt, "NvMemExec", true));
 	return 0;
 }
 
 static void
-nv50_ram_tidy(struct nvkm_fb *pfb)
+nv50_ram_tidy(struct nvkm_ram *base)
 {
-	struct nv50_ram *ram = (void *)pfb->ram;
-	struct nv50_ramseq *hwsq = &ram->hwsq;
-	ram_exec(hwsq, false);
+	struct nv50_ram *ram = nv50_ram(base);
+	ram_exec(&ram->hwsq, false);
 }
 
 void
-__nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem *mem)
+__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
 {
 	struct nvkm_mm_node *this;
 
@@ -227,14 +408,14 @@
 		this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
 
 		list_del(&this->rl_entry);
-		nvkm_mm_free(&pfb->vram, &this);
+		nvkm_mm_free(&ram->vram, &this);
 	}
 
-	nvkm_mm_free(&pfb->tags, &mem->tag);
+	nvkm_mm_free(&ram->tags, &mem->tag);
 }
 
 void
-nv50_ram_put(struct nvkm_fb *pfb, struct nvkm_mem **pmem)
+nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
 {
 	struct nvkm_mem *mem = *pmem;
 
@@ -242,19 +423,19 @@
 	if (unlikely(mem == NULL))
 		return;
 
-	mutex_lock(&pfb->base.mutex);
-	__nv50_ram_put(pfb, mem);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
+	__nv50_ram_put(ram, mem);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	kfree(mem);
 }
 
 int
-nv50_ram_get(struct nvkm_fb *pfb, u64 size, u32 align, u32 ncmin,
+nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
 	     u32 memtype, struct nvkm_mem **pmem)
 {
-	struct nvkm_mm *heap = &pfb->vram;
-	struct nvkm_mm *tags = &pfb->tags;
+	struct nvkm_mm *heap = &ram->vram;
+	struct nvkm_mm *tags = &ram->tags;
 	struct nvkm_mm_node *r;
 	struct nvkm_mem *mem;
 	int comp = (memtype & 0x300) >> 8;
@@ -262,17 +443,17 @@
 	int back = (memtype & 0x800);
 	int min, max, ret;
 
-	max = (size >> 12);
-	min = ncmin ? (ncmin >> 12) : max;
-	align >>= 12;
+	max = (size >> NVKM_RAM_MM_SHIFT);
+	min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
+	align >>= NVKM_RAM_MM_SHIFT;
 
 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
 	if (!mem)
 		return -ENOMEM;
 
-	mutex_lock(&pfb->base.mutex);
+	mutex_lock(&ram->fb->subdev.mutex);
 	if (comp) {
-		if (align == 16) {
+		if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
 			int n = (max >> 4) * comp;
 
 			ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
@@ -295,34 +476,45 @@
 		else
 			ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
 		if (ret) {
-			mutex_unlock(&pfb->base.mutex);
-			pfb->ram->put(pfb, &mem);
+			mutex_unlock(&ram->fb->subdev.mutex);
+			ram->func->put(ram, &mem);
 			return ret;
 		}
 
 		list_add_tail(&r->rl_entry, &mem->regions);
 		max -= r->length;
 	} while (max);
-	mutex_unlock(&pfb->base.mutex);
+	mutex_unlock(&ram->fb->subdev.mutex);
 
 	r = list_first_entry(&mem->regions, struct nvkm_mm_node, rl_entry);
-	mem->offset = (u64)r->offset << 12;
+	mem->offset = (u64)r->offset << NVKM_RAM_MM_SHIFT;
 	*pmem = mem;
 	return 0;
 }
 
+static const struct nvkm_ram_func
+nv50_ram_func = {
+	.get = nv50_ram_get,
+	.put = nv50_ram_put,
+	.calc = nv50_ram_calc,
+	.prog = nv50_ram_prog,
+	.tidy = nv50_ram_tidy,
+};
+
 static u32
-nv50_fb_vram_rblock(struct nvkm_fb *pfb, struct nvkm_ram *ram)
+nv50_fb_vram_rblock(struct nvkm_ram *ram)
 {
+	struct nvkm_subdev *subdev = &ram->fb->subdev;
+	struct nvkm_device *device = subdev->device;
 	int colbits, rowbitsa, rowbitsb, banks;
 	u64 rowsize, predicted;
 	u32 r0, r4, rt, rblock_size;
 
-	r0 = nv_rd32(pfb, 0x100200);
-	r4 = nv_rd32(pfb, 0x100204);
-	rt = nv_rd32(pfb, 0x100250);
-	nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		 r0, r4, rt, nv_rd32(pfb, 0x001540));
+	r0 = nvkm_rd32(device, 0x100200);
+	r4 = nvkm_rd32(device, 0x100204);
+	rt = nvkm_rd32(device, 0x100250);
+	nvkm_debug(subdev, "memcfg %08x %08x %08x %08x\n",
+		   r0, r4, rt, nvkm_rd32(device, 0x001540));
 
 	colbits  =  (r4 & 0x0000f000) >> 12;
 	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
@@ -335,103 +527,94 @@
 		predicted += rowsize << rowbitsb;
 
 	if (predicted != ram->size) {
-		nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
-			(u32)(ram->size >> 20));
+		nvkm_warn(subdev, "memory controller reports %d MiB VRAM\n",
+			  (u32)(ram->size >> 20));
 	}
 
 	rblock_size = rowsize;
 	if (rt & 1)
 		rblock_size *= 3;
 
-	nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+	nvkm_debug(subdev, "rblock %d bytes\n", rblock_size);
 	return rblock_size;
 }
 
 int
-nv50_ram_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nv50_ram_ctor(const struct nvkm_ram_func *func,
+	      struct nvkm_fb *fb, struct nvkm_ram *ram)
 {
-	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ram *ram;
+	struct nvkm_device *device = fb->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+	u64 size = nvkm_rd32(device, 0x10020c);
+	u32 tags = nvkm_rd32(device, 0x100320);
+	enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
 	int ret;
 
-	ret = nvkm_ram_create_(parent, engine, oclass, length, pobject);
-	ram = *pobject;
-	if (ret)
-		return ret;
-
-	ram->size = nv_rd32(pfb, 0x10020c);
-	ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
-
-	ram->part_mask = (nv_rd32(pfb, 0x001540) & 0x00ff0000) >> 16;
-	ram->parts = hweight8(ram->part_mask);
-
-	switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
-	case 0: ram->type = NV_MEM_TYPE_DDR1; break;
+	switch (nvkm_rd32(device, 0x100714) & 0x00000007) {
+	case 0: type = NVKM_RAM_TYPE_DDR1; break;
 	case 1:
-		if (nvkm_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-			ram->type = NV_MEM_TYPE_DDR3;
+		if (nvkm_fb_bios_memtype(bios) == NVKM_RAM_TYPE_DDR3)
+			type = NVKM_RAM_TYPE_DDR3;
 		else
-			ram->type = NV_MEM_TYPE_DDR2;
+			type = NVKM_RAM_TYPE_DDR2;
 		break;
-	case 2: ram->type = NV_MEM_TYPE_GDDR3; break;
-	case 3: ram->type = NV_MEM_TYPE_GDDR4; break;
-	case 4: ram->type = NV_MEM_TYPE_GDDR5; break;
+	case 2: type = NVKM_RAM_TYPE_GDDR3; break;
+	case 3: type = NVKM_RAM_TYPE_GDDR4; break;
+	case 4: type = NVKM_RAM_TYPE_GDDR5; break;
 	default:
 		break;
 	}
 
-	ret = nvkm_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
-			   (rsvd_head + rsvd_tail),
-			   nv50_fb_vram_rblock(pfb, ram) >> 12);
+	size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
+
+	ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
 	if (ret)
 		return ret;
 
-	ram->ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
-	ram->tags  =  nv_rd32(pfb, 0x100320);
-	ram->get = nv50_ram_get;
-	ram->put = nv50_ram_put;
-	return 0;
+	ram->part_mask = (nvkm_rd32(device, 0x001540) & 0x00ff0000) >> 16;
+	ram->parts = hweight8(ram->part_mask);
+	ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
+	nvkm_mm_fini(&ram->vram);
+
+	return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+			    (size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
+			    nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
 }
 
-static int
-nv50_ram_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 datasize,
-	      struct nvkm_object **pobject)
+int
+nv50_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
 {
 	struct nv50_ram *ram;
 	int ret, i;
 
-	ret = nv50_ram_create(parent, engine, oclass, &ram);
-	*pobject = nv_object(ram);
+	if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+		return -ENOMEM;
+	*pram = &ram->base;
+
+	ret = nv50_ram_ctor(&nv50_ram_func, fb, &ram->base);
 	if (ret)
 		return ret;
 
-	switch (ram->base.type) {
-	case NV_MEM_TYPE_DDR2:
-	case NV_MEM_TYPE_GDDR3:
-		ram->base.calc = nv50_ram_calc;
-		ram->base.prog = nv50_ram_prog;
-		ram->base.tidy = nv50_ram_tidy;
-		break;
-	default:
-		nv_warn(ram, "reclocking of this ram type unsupported\n");
-		return 0;
-	}
-
 	ram->hwsq.r_0x002504 = hwsq_reg(0x002504);
 	ram->hwsq.r_0x00c040 = hwsq_reg(0x00c040);
 	ram->hwsq.r_0x004008 = hwsq_reg(0x004008);
 	ram->hwsq.r_0x00400c = hwsq_reg(0x00400c);
+	ram->hwsq.r_0x100200 = hwsq_reg(0x100200);
 	ram->hwsq.r_0x100210 = hwsq_reg(0x100210);
+	ram->hwsq.r_0x10021c = hwsq_reg(0x10021c);
 	ram->hwsq.r_0x1002d0 = hwsq_reg(0x1002d0);
 	ram->hwsq.r_0x1002d4 = hwsq_reg(0x1002d4);
 	ram->hwsq.r_0x1002dc = hwsq_reg(0x1002dc);
-	for (i = 0; i < 8; i++)
-		ram->hwsq.r_0x100da0[i] = hwsq_reg(0x100da0 + (i * 0x04));
+	ram->hwsq.r_0x10053c = hwsq_reg(0x10053c);
+	ram->hwsq.r_0x1005a0 = hwsq_reg(0x1005a0);
+	ram->hwsq.r_0x1005a4 = hwsq_reg(0x1005a4);
+	ram->hwsq.r_0x100710 = hwsq_reg(0x100710);
+	ram->hwsq.r_0x100714 = hwsq_reg(0x100714);
+	ram->hwsq.r_0x100718 = hwsq_reg(0x100718);
+	ram->hwsq.r_0x10071c = hwsq_reg(0x10071c);
+	ram->hwsq.r_0x100da0 = hwsq_stride(0x100da0, 4, ram->base.part_mask);
 	ram->hwsq.r_0x100e20 = hwsq_reg(0x100e20);
 	ram->hwsq.r_0x100e24 = hwsq_reg(0x100e24);
 	ram->hwsq.r_0x611200 = hwsq_reg(0x611200);
@@ -453,13 +636,3 @@
 
 	return 0;
 }
-
-struct nvkm_oclass
-nv50_ram_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_ram_ctor,
-		.dtor = _nvkm_ram_dtor,
-		.init = _nvkm_ram_init,
-		.fini = _nvkm_ram_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
index afab42d..86bf674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr2.c
@@ -65,7 +65,7 @@
 	case 0x10:
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT & 3;
 		break;
 	case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
index 1084435..b4edc97 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/sddr3.c
@@ -53,7 +53,7 @@
 ramddr3_wr[] = {
 	{ 5, 1 }, { 6, 2 }, { 7, 3 }, { 8, 4 }, { 10, 5 }, { 12, 6 },
 	/* the below are mentioned in some, but not all, ddr3 docs */
-	{ 14, 7 }, { 16, 0 },
+	{ 14, 7 }, { 15, 7 }, { 16, 0 },
 	{ -1 }
 };
 
@@ -61,7 +61,7 @@
 ramddr3_cwl[] = {
 	{ 5, 0 }, { 6, 1 }, { 7, 2 }, { 8, 3 },
 	/* the below are mentioned in some, but not all, ddr3 docs */
-	{ 9, 4 },
+	{ 9, 4 }, { 10, 5 },
 	{ -1 }
 };
 
@@ -79,7 +79,7 @@
 		CWL = ram->next->bios.timing_10_CWL;
 		CL  = ram->next->bios.timing_10_CL;
 		WR  = ram->next->bios.timing_10_WR;
-		DLL = !ram->next->bios.ramcfg_10_DLLoff;
+		DLL = !ram->next->bios.ramcfg_DLLoff;
 		ODT = ram->next->bios.timing_10_ODT;
 		break;
 	case 0x20:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index b7b7193..f414497 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -21,31 +21,34 @@
  *
  * Authors: Martin Peres
  */
-#include <subdev/fuse.h>
+#include "priv.h"
 
-int
-_nvkm_fuse_init(struct nvkm_object *object)
+u32
+nvkm_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct nvkm_fuse *fuse = (void *)object;
-	return nvkm_subdev_init(&fuse->base);
+	return fuse->func->read(fuse, addr);
 }
 
-void
-_nvkm_fuse_dtor(struct nvkm_object *object)
+static void *
+nvkm_fuse_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_fuse *fuse = (void *)object;
-	nvkm_subdev_destroy(&fuse->base);
+	return nvkm_fuse(subdev);
 }
 
+static const struct nvkm_subdev_func
+nvkm_fuse = {
+	.dtor = nvkm_fuse_dtor,
+};
+
 int
-nvkm_fuse_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_fuse **pfuse)
 {
 	struct nvkm_fuse *fuse;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "FUSE",
-				  "fuse", length, pobject);
-	fuse = *pobject;
-	return ret;
+	if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
+	fuse->func = func;
+	spin_lock_init(&fuse->lock);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
index 393ef3a0..13671fe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gf100.c
@@ -23,56 +23,31 @@
  */
 #include "priv.h"
 
-struct gf100_fuse_priv {
-	struct nvkm_fuse base;
-
-	spinlock_t fuse_enable_lock;
-};
-
 static u32
-gf100_fuse_rd32(struct nvkm_object *object, u64 addr)
+gf100_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct gf100_fuse_priv *priv = (void *)object;
+	struct nvkm_device *device = fuse->subdev.device;
 	unsigned long flags;
 	u32 fuse_enable, unk, val;
 
 	/* racy if another part of nvkm start writing to these regs */
-	spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-	fuse_enable = nv_mask(priv, 0x22400, 0x800, 0x800);
-	unk = nv_mask(priv, 0x21000, 0x1, 0x1);
-	val = nv_rd32(priv, 0x21100 + addr);
-	nv_wr32(priv, 0x21000, unk);
-	nv_wr32(priv, 0x22400, fuse_enable);
-	spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+	spin_lock_irqsave(&fuse->lock, flags);
+	fuse_enable = nvkm_mask(device, 0x022400, 0x800, 0x800);
+	unk = nvkm_mask(device, 0x021000, 0x1, 0x1);
+	val = nvkm_rd32(device, 0x021100 + addr);
+	nvkm_wr32(device, 0x021000, unk);
+	nvkm_wr32(device, 0x022400, fuse_enable);
+	spin_unlock_irqrestore(&fuse->lock, flags);
 	return val;
 }
 
-
-static int
-gf100_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gf100_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->fuse_enable_lock);
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0xC0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = gf100_fuse_rd32,
-	},
+static const struct nvkm_fuse_func
+gf100_fuse = {
+	.read = gf100_fuse_read,
 };
+
+int
+gf100_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+{
+	return nvkm_fuse_new_(&gf100_fuse, device, index, pfuse);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
index 0b256aa..9aff4ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
@@ -23,40 +23,20 @@
  */
 #include "priv.h"
 
-struct gm107_fuse_priv {
-	struct nvkm_fuse base;
-};
-
 static u32
-gm107_fuse_rd32(struct nvkm_object *object, u64 addr)
+gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct gf100_fuse_priv *priv = (void *)object;
-	return nv_rd32(priv, 0x21100 + addr);
+	struct nvkm_device *device = fuse->subdev.device;
+	return nvkm_rd32(device, 0x021100 + addr);
 }
 
-
-static int
-gm107_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gm107_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-
-	return ret;
-}
-
-struct nvkm_oclass
-gm107_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0x117),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = gm107_fuse_rd32,
-	},
+static const struct nvkm_fuse_func
+gm107_fuse = {
+	.read = gm107_fuse_read,
 };
+
+int
+gm107_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+{
+	return nvkm_fuse_new_(&gm107_fuse, device, index, pfuse);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
index 0d2afc4..514c193 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/nv50.c
@@ -23,54 +23,29 @@
  */
 #include "priv.h"
 
-struct nv50_fuse_priv {
-	struct nvkm_fuse base;
-
-	spinlock_t fuse_enable_lock;
-};
-
 static u32
-nv50_fuse_rd32(struct nvkm_object *object, u64 addr)
+nv50_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
-	struct nv50_fuse_priv *priv = (void *)object;
+	struct nvkm_device *device = fuse->subdev.device;
 	unsigned long flags;
 	u32 fuse_enable, val;
 
 	/* racy if another part of nvkm start writing to this reg */
-	spin_lock_irqsave(&priv->fuse_enable_lock, flags);
-	fuse_enable = nv_mask(priv, 0x1084, 0x800, 0x800);
-	val = nv_rd32(priv, 0x21000 + addr);
-	nv_wr32(priv, 0x1084, fuse_enable);
-	spin_unlock_irqrestore(&priv->fuse_enable_lock, flags);
+	spin_lock_irqsave(&fuse->lock, flags);
+	fuse_enable = nvkm_mask(device, 0x001084, 0x800, 0x800);
+	val = nvkm_rd32(device, 0x021000 + addr);
+	nvkm_wr32(device, 0x001084, fuse_enable);
+	spin_unlock_irqrestore(&fuse->lock, flags);
 	return val;
 }
 
-
-static int
-nv50_fuse_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nv50_fuse_priv *priv;
-	int ret;
-
-	ret = nvkm_fuse_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->fuse_enable_lock);
-	return 0;
-}
-
-struct nvkm_oclass
-nv50_fuse_oclass = {
-	.handle = NV_SUBDEV(FUSE, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_fuse_ctor,
-		.dtor = _nvkm_fuse_dtor,
-		.init = _nvkm_fuse_init,
-		.fini = _nvkm_fuse_fini,
-		.rd32 = nv50_fuse_rd32,
-	},
+static const struct nvkm_fuse_func
+nv50_fuse = {
+	.read = &nv50_fuse_read,
 };
+
+int
+nv50_fuse_new(struct nvkm_device *device, int index, struct nvkm_fuse **pfuse)
+{
+	return nvkm_fuse_new_(&nv50_fuse, device, index, pfuse);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index 7e050f7..b0390b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,7 +1,12 @@
 #ifndef __NVKM_FUSE_PRIV_H__
 #define __NVKM_FUSE_PRIV_H__
+#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
 #include <subdev/fuse.h>
 
-int _nvkm_fuse_init(struct nvkm_object *object);
-void _nvkm_fuse_dtor(struct nvkm_object *object);
+struct nvkm_fuse_func {
+	u32 (*read)(struct nvkm_fuse *, u32 addr);
+};
+
+int nvkm_fuse_new_(const struct nvkm_fuse_func *, struct nvkm_device *,
+		   int index, struct nvkm_fuse **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
index ea42a9e..e52c5e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
@@ -2,5 +2,5 @@
 nvkm-y += nvkm/subdev/gpio/nv10.o
 nvkm-y += nvkm/subdev/gpio/nv50.o
 nvkm-y += nvkm/subdev/gpio/g94.o
-nvkm-y += nvkm/subdev/gpio/gf110.o
+nvkm-y += nvkm/subdev/gpio/gf119.o
 nvkm-y += nvkm/subdev/gpio/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index dea5816..d45ec99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -23,28 +23,33 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/notify.h>
 
 static int
 nvkm_gpio_drive(struct nvkm_gpio *gpio, int idx, int line, int dir, int out)
 {
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	return impl->drive ? impl->drive(gpio, line, dir, out) : -ENODEV;
+	return gpio->func->drive(gpio, line, dir, out);
 }
 
 static int
 nvkm_gpio_sense(struct nvkm_gpio *gpio, int idx, int line)
 {
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	return impl->sense ? impl->sense(gpio, line) : -ENODEV;
+	return gpio->func->sense(gpio, line);
 }
 
-static int
+void
+nvkm_gpio_reset(struct nvkm_gpio *gpio, u8 func)
+{
+	if (gpio->func->reset)
+		gpio->func->reset(gpio, func);
+}
+
+int
 nvkm_gpio_find(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line,
 	       struct dcb_gpio_func *func)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8  ver, len;
 	u16 data;
 
@@ -56,11 +61,11 @@
 		return 0;
 
 	/* Apple iMac G4 NV18 */
-	if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
+	if (device->quirk && device->quirk->tv_gpio) {
 		if (tag == DCB_GPIO_TVDAC0) {
 			*func = (struct dcb_gpio_func) {
 				.func = DCB_GPIO_TVDAC0,
-				.line = 4,
+				.line = device->quirk->tv_gpio,
 				.log[0] = 0,
 				.log[1] = 1,
 			};
@@ -71,7 +76,7 @@
 	return -ENOENT;
 }
 
-static int
+int
 nvkm_gpio_set(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line, int state)
 {
 	struct dcb_gpio_func func;
@@ -87,7 +92,7 @@
 	return ret;
 }
 
-static int
+int
 nvkm_gpio_get(struct nvkm_gpio *gpio, int idx, u8 tag, u8 line)
 {
 	struct dcb_gpio_func func;
@@ -107,16 +112,14 @@
 nvkm_gpio_intr_fini(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	impl->intr_mask(gpio, type, 1 << index, 0);
+	gpio->func->intr_mask(gpio, type, 1 << index, 0);
 }
 
 static void
 nvkm_gpio_intr_init(struct nvkm_event *event, int type, int index)
 {
 	struct nvkm_gpio *gpio = container_of(event, typeof(*gpio), event);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
-	impl->intr_mask(gpio, type, 1 << index, 1 << index);
+	gpio->func->intr_mask(gpio, type, 1 << index, 1 << index);
 }
 
 static int
@@ -133,16 +136,22 @@
 	return -EINVAL;
 }
 
+static const struct nvkm_event_func
+nvkm_gpio_intr_func = {
+	.ctor = nvkm_gpio_intr_ctor,
+	.init = nvkm_gpio_intr_init,
+	.fini = nvkm_gpio_intr_fini,
+};
+
 static void
 nvkm_gpio_intr(struct nvkm_subdev *subdev)
 {
 	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
-	const struct nvkm_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
 	u32 hi, lo, i;
 
-	impl->intr_stat(gpio, &hi, &lo);
+	gpio->func->intr_stat(gpio, &hi, &lo);
 
-	for (i = 0; (hi | lo) && i < impl->lines; i++) {
+	for (i = 0; (hi | lo) && i < gpio->func->lines; i++) {
 		struct nvkm_gpio_ntfy_rep rep = {
 			.mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
 				(NVKM_GPIO_LO * !!(lo & (1 << i))),
@@ -151,24 +160,15 @@
 	}
 }
 
-static const struct nvkm_event_func
-nvkm_gpio_intr_func = {
-	.ctor = nvkm_gpio_intr_ctor,
-	.init = nvkm_gpio_intr_init,
-	.fini = nvkm_gpio_intr_fini,
-};
-
-int
-_nvkm_gpio_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	const struct nvkm_gpio_impl *impl = (void *)object->oclass;
-	struct nvkm_gpio *gpio = nvkm_gpio(object);
-	u32 mask = (1 << impl->lines) - 1;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+	u32 mask = (1 << gpio->func->lines) - 1;
 
-	impl->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
-	impl->intr_stat(gpio, &mask, &mask);
-
-	return nvkm_subdev_fini(&gpio->base, suspend);
+	gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
+	gpio->func->intr_stat(gpio, &mask, &mask);
+	return 0;
 }
 
 static struct dmi_system_id gpio_reset_ids[] = {
@@ -182,70 +182,43 @@
 	{ }
 };
 
-int
-_nvkm_gpio_init(struct nvkm_object *object)
+static int
+nvkm_gpio_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(object);
-	int ret;
-
-	ret = nvkm_subdev_init(&gpio->base);
-	if (ret)
-		return ret;
-
-	if (gpio->reset && dmi_check_system(gpio_reset_ids))
-		gpio->reset(gpio, DCB_GPIO_UNUSED);
-
-	return ret;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
+	if (dmi_check_system(gpio_reset_ids))
+		nvkm_gpio_reset(gpio, DCB_GPIO_UNUSED);
+	return 0;
 }
 
-void
-_nvkm_gpio_dtor(struct nvkm_object *object)
+static void *
+nvkm_gpio_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_gpio *gpio = (void *)object;
+	struct nvkm_gpio *gpio = nvkm_gpio(subdev);
 	nvkm_event_fini(&gpio->event);
-	nvkm_subdev_destroy(&gpio->base);
+	return gpio;
 }
 
-int
-nvkm_gpio_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	const struct nvkm_gpio_impl *impl = (void *)oclass;
-	struct nvkm_gpio *gpio;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "GPIO",
-				  "gpio", length, pobject);
-	gpio = *pobject;
-	if (ret)
-		return ret;
-
-	gpio->find = nvkm_gpio_find;
-	gpio->set  = nvkm_gpio_set;
-	gpio->get  = nvkm_gpio_get;
-	gpio->reset = impl->reset;
-
-	ret = nvkm_event_init(&nvkm_gpio_intr_func, 2, impl->lines,
-			      &gpio->event);
-	if (ret)
-		return ret;
-
-	nv_subdev(gpio)->intr = nvkm_gpio_intr;
-	return 0;
-}
+static const struct nvkm_subdev_func
+nvkm_gpio = {
+	.dtor = nvkm_gpio_dtor,
+	.init = nvkm_gpio_init,
+	.fini = nvkm_gpio_fini,
+	.intr = nvkm_gpio_intr,
+};
 
 int
-_nvkm_gpio_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_gpio **pgpio)
 {
 	struct nvkm_gpio *gpio;
-	int ret;
 
-	ret = nvkm_gpio_create(parent, engine, oclass, &gpio);
-	*pobject = nv_object(gpio);
-	if (ret)
-		return ret;
+	if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
+		return -ENOMEM;
 
-	return 0;
+	nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
+	gpio->func = func;
+
+	return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
+			       &gpio->event);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
index 12b3e01..6dcda55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/g94.c
@@ -26,21 +26,23 @@
 void
 g94_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr0 = nv_rd32(gpio, 0x00e054);
-	u32 intr1 = nv_rd32(gpio, 0x00e074);
-	u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
-	u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x00e054);
+	u32 intr1 = nvkm_rd32(device, 0x00e074);
+	u32 stat0 = nvkm_rd32(device, 0x00e050) & intr0;
+	u32 stat1 = nvkm_rd32(device, 0x00e070) & intr1;
 	*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
 	*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
-	nv_wr32(gpio, 0x00e054, intr0);
-	nv_wr32(gpio, 0x00e074, intr1);
+	nvkm_wr32(device, 0x00e054, intr0);
+	nvkm_wr32(device, 0x00e074, intr1);
 }
 
 void
 g94_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte0 = nv_rd32(gpio, 0x00e050);
-	u32 inte1 = nv_rd32(gpio, 0x00e070);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte0 = nvkm_rd32(device, 0x00e050);
+	u32 inte1 = nvkm_rd32(device, 0x00e070);
 	if (type & NVKM_GPIO_LO)
 		inte0 = (inte0 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@
 		inte1 = (inte1 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte1 = (inte1 & ~mask) | data;
-	nv_wr32(gpio, 0x00e050, inte0);
-	nv_wr32(gpio, 0x00e070, inte1);
+	nvkm_wr32(device, 0x00e050, inte0);
+	nvkm_wr32(device, 0x00e070, inte1);
 }
 
-struct nvkm_oclass *
-g94_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+g94_gpio = {
 	.lines = 32,
 	.intr_stat = g94_gpio_intr_stat,
 	.intr_mask = g94_gpio_intr_mask,
 	.drive = nv50_gpio_drive,
 	.sense = nv50_gpio_sense,
 	.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+g94_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&g94_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
deleted file mode 100644
index 2c3bb25..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf110.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-void
-gf110_gpio_reset(struct nvkm_gpio *gpio, u8 match)
-{
-	struct nvkm_bios *bios = nvkm_bios(gpio);
-	u8 ver, len;
-	u16 entry;
-	int ent = -1;
-
-	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
-		u32 data = nv_ro32(bios, entry);
-		u8  line =   (data & 0x0000003f);
-		u8  defs = !!(data & 0x00000080);
-		u8  func =   (data & 0x0000ff00) >> 8;
-		u8  unk0 =   (data & 0x00ff0000) >> 16;
-		u8  unk1 =   (data & 0x1f000000) >> 24;
-
-		if ( func  == DCB_GPIO_UNUSED ||
-		    (match != DCB_GPIO_UNUSED && match != func))
-			continue;
-
-		gpio->set(gpio, 0, func, line, defs);
-
-		nv_mask(gpio, 0x00d610 + (line * 4), 0xff, unk0);
-		if (unk1--)
-			nv_mask(gpio, 0x00d740 + (unk1 * 4), 0xff, line);
-	}
-}
-
-int
-gf110_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
-{
-	u32 data = ((dir ^ 1) << 13) | (out << 12);
-	nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
-	nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
-	return 0;
-}
-
-int
-gf110_gpio_sense(struct nvkm_gpio *gpio, int line)
-{
-	return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
-}
-
-struct nvkm_oclass *
-gf110_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
-	.lines = 32,
-	.intr_stat = g94_gpio_intr_stat,
-	.intr_mask = g94_gpio_intr_mask,
-	.drive = gf110_gpio_drive,
-	.sense = gf110_gpio_sense,
-	.reset = gf110_gpio_reset,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
new file mode 100644
index 0000000..bb7400d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+void
+gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	u8 ver, len;
+	u16 entry;
+	int ent = -1;
+
+	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+		u32 data = nvbios_rd32(bios, entry);
+		u8  line =   (data & 0x0000003f);
+		u8  defs = !!(data & 0x00000080);
+		u8  func =   (data & 0x0000ff00) >> 8;
+		u8  unk0 =   (data & 0x00ff0000) >> 16;
+		u8  unk1 =   (data & 0x1f000000) >> 24;
+
+		if ( func  == DCB_GPIO_UNUSED ||
+		    (match != DCB_GPIO_UNUSED && match != func))
+			continue;
+
+		nvkm_gpio_set(gpio, 0, func, line, defs);
+
+		nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0);
+		if (unk1--)
+			nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+	}
+}
+
+int
+gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 data = ((dir ^ 1) << 13) | (out << 12);
+	nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data);
+	nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+	return 0;
+}
+
+int
+gf119_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+	struct nvkm_device *device = gpio->subdev.device;
+	return !!(nvkm_rd32(device, 0x00d610 + (line * 4)) & 0x00004000);
+}
+
+static const struct nvkm_gpio_func
+gf119_gpio = {
+	.lines = 32,
+	.intr_stat = g94_gpio_intr_stat,
+	.intr_mask = g94_gpio_intr_mask,
+	.drive = gf119_gpio_drive,
+	.sense = gf119_gpio_sense,
+	.reset = gf119_gpio_reset,
+};
+
+int
+gf119_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&gf119_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
index 42fd2fa..3f45afd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
@@ -26,21 +26,23 @@
 static void
 gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr0 = nv_rd32(gpio, 0x00dc00);
-	u32 intr1 = nv_rd32(gpio, 0x00dc80);
-	u32 stat0 = nv_rd32(gpio, 0x00dc08) & intr0;
-	u32 stat1 = nv_rd32(gpio, 0x00dc88) & intr1;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x00dc00);
+	u32 intr1 = nvkm_rd32(device, 0x00dc80);
+	u32 stat0 = nvkm_rd32(device, 0x00dc08) & intr0;
+	u32 stat1 = nvkm_rd32(device, 0x00dc88) & intr1;
 	*lo = (stat1 & 0xffff0000) | (stat0 >> 16);
 	*hi = (stat1 << 16) | (stat0 & 0x0000ffff);
-	nv_wr32(gpio, 0x00dc00, intr0);
-	nv_wr32(gpio, 0x00dc80, intr1);
+	nvkm_wr32(device, 0x00dc00, intr0);
+	nvkm_wr32(device, 0x00dc80, intr1);
 }
 
 void
 gk104_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte0 = nv_rd32(gpio, 0x00dc08);
-	u32 inte1 = nv_rd32(gpio, 0x00dc88);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte0 = nvkm_rd32(device, 0x00dc08);
+	u32 inte1 = nvkm_rd32(device, 0x00dc88);
 	if (type & NVKM_GPIO_LO)
 		inte0 = (inte0 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
@@ -51,23 +53,22 @@
 		inte1 = (inte1 & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte1 = (inte1 & ~mask) | data;
-	nv_wr32(gpio, 0x00dc08, inte0);
-	nv_wr32(gpio, 0x00dc88, inte1);
+	nvkm_wr32(device, 0x00dc08, inte0);
+	nvkm_wr32(device, 0x00dc88, inte1);
 }
 
-struct nvkm_oclass *
-gk104_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+gk104_gpio = {
 	.lines = 32,
 	.intr_stat = gk104_gpio_intr_stat,
 	.intr_mask = gk104_gpio_intr_mask,
-	.drive = gf110_gpio_drive,
-	.sense = gf110_gpio_sense,
-	.reset = gf110_gpio_reset,
-}.base;
+	.drive = gf119_gpio_drive,
+	.sense = gf119_gpio_sense,
+	.reset = gf119_gpio_reset,
+};
+
+int
+gk104_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&gk104_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
index 2b29515..ae3499b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv10.c
@@ -28,19 +28,20 @@
 static int
 nv10_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	if (line < 2) {
 		line = line * 16;
-		line = nv_rd32(gpio, 0x600818) >> line;
+		line = nvkm_rd32(device, 0x600818) >> line;
 		return !!(line & 0x0100);
 	} else
 	if (line < 10) {
 		line = (line - 2) * 4;
-		line = nv_rd32(gpio, 0x60081c) >> line;
+		line = nvkm_rd32(device, 0x60081c) >> line;
 		return !!(line & 0x04);
 	} else
 	if (line < 14) {
 		line = (line - 10) * 4;
-		line = nv_rd32(gpio, 0x600850) >> line;
+		line = nvkm_rd32(device, 0x600850) >> line;
 		return !!(line & 0x04);
 	}
 
@@ -50,6 +51,7 @@
 static int
 nv10_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, mask, data;
 
 	if (line < 2) {
@@ -73,43 +75,44 @@
 		return -EINVAL;
 	}
 
-	nv_mask(gpio, reg, mask << line, data << line);
+	nvkm_mask(device, reg, mask << line, data << line);
 	return 0;
 }
 
 static void
 nv10_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr = nv_rd32(gpio, 0x001104);
-	u32 stat = nv_rd32(gpio, 0x001144) & intr;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x001104);
+	u32 stat = nvkm_rd32(device, 0x001144) & intr;
 	*lo = (stat & 0xffff0000) >> 16;
 	*hi = (stat & 0x0000ffff);
-	nv_wr32(gpio, 0x001104, intr);
+	nvkm_wr32(device, 0x001104, intr);
 }
 
 static void
 nv10_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte = nv_rd32(gpio, 0x001144);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte = nvkm_rd32(device, 0x001144);
 	if (type & NVKM_GPIO_LO)
 		inte = (inte & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte = (inte & ~mask) | data;
-	nv_wr32(gpio, 0x001144, inte);
+	nvkm_wr32(device, 0x001144, inte);
 }
 
-struct nvkm_oclass *
-nv10_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x10),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+nv10_gpio = {
 	.lines = 16,
 	.intr_stat = nv10_gpio_intr_stat,
 	.intr_mask = nv10_gpio_intr_mask,
 	.drive = nv10_gpio_drive,
 	.sense = nv10_gpio_sense,
-}.base;
+};
+
+int
+nv10_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&nv10_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
index 6a03103..8996649 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c
@@ -26,14 +26,15 @@
 void
 nv50_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 {
-	struct nvkm_bios *bios = nvkm_bios(gpio);
+	struct nvkm_device *device = gpio->subdev.device;
+	struct nvkm_bios *bios = device->bios;
 	u8 ver, len;
 	u16 entry;
 	int ent = -1;
 
 	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
 		static const u32 regs[] = { 0xe100, 0xe28c };
-		u32 data = nv_ro32(bios, entry);
+		u32 data = nvbios_rd32(bios, entry);
 		u8  line =   (data & 0x0000001f);
 		u8  func =   (data & 0x0000ff00) >> 8;
 		u8  defs = !!(data & 0x01000000);
@@ -47,9 +48,9 @@
 		    (match != DCB_GPIO_UNUSED && match != func))
 			continue;
 
-		gpio->set(gpio, 0, func, line, defs);
+		nvkm_gpio_set(gpio, 0, func, line, defs);
 
-		nv_mask(gpio, reg, 0x00010001 << lsh, val << lsh);
+		nvkm_mask(device, reg, 0x00010001 << lsh, val << lsh);
 	}
 }
 
@@ -69,60 +70,63 @@
 int
 nv50_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, shift;
 
 	if (nv50_gpio_location(line, &reg, &shift))
 		return -EINVAL;
 
-	nv_mask(gpio, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
+	nvkm_mask(device, reg, 3 << shift, (((dir ^ 1) << 1) | out) << shift);
 	return 0;
 }
 
 int
 nv50_gpio_sense(struct nvkm_gpio *gpio, int line)
 {
+	struct nvkm_device *device = gpio->subdev.device;
 	u32 reg, shift;
 
 	if (nv50_gpio_location(line, &reg, &shift))
 		return -EINVAL;
 
-	return !!(nv_rd32(gpio, reg) & (4 << shift));
+	return !!(nvkm_rd32(device, reg) & (4 << shift));
 }
 
 static void
 nv50_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
-	u32 intr = nv_rd32(gpio, 0x00e054);
-	u32 stat = nv_rd32(gpio, 0x00e050) & intr;
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00e054);
+	u32 stat = nvkm_rd32(device, 0x00e050) & intr;
 	*lo = (stat & 0xffff0000) >> 16;
 	*hi = (stat & 0x0000ffff);
-	nv_wr32(gpio, 0x00e054, intr);
+	nvkm_wr32(device, 0x00e054, intr);
 }
 
 static void
 nv50_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
 {
-	u32 inte = nv_rd32(gpio, 0x00e050);
+	struct nvkm_device *device = gpio->subdev.device;
+	u32 inte = nvkm_rd32(device, 0x00e050);
 	if (type & NVKM_GPIO_LO)
 		inte = (inte & ~(mask << 16)) | (data << 16);
 	if (type & NVKM_GPIO_HI)
 		inte = (inte & ~mask) | data;
-	nv_wr32(gpio, 0x00e050, inte);
+	nvkm_wr32(device, 0x00e050, inte);
 }
 
-struct nvkm_oclass *
-nv50_gpio_oclass = &(struct nvkm_gpio_impl) {
-	.base.handle = NV_SUBDEV(GPIO, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_gpio_ctor,
-		.dtor = _nvkm_gpio_dtor,
-		.init = _nvkm_gpio_init,
-		.fini = _nvkm_gpio_fini,
-	},
+static const struct nvkm_gpio_func
+nv50_gpio = {
 	.lines = 16,
 	.intr_stat = nv50_gpio_intr_stat,
 	.intr_mask = nv50_gpio_intr_mask,
 	.drive = nv50_gpio_drive,
 	.sense = nv50_gpio_sense,
 	.reset = nv50_gpio_reset,
-}.base;
+};
+
+int
+nv50_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+	return nvkm_gpio_new_(&nv50_gpio, device, index, pgpio);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 382f8d4..371bcdb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,33 +1,9 @@
 #ifndef __NVKM_GPIO_PRIV_H__
 #define __NVKM_GPIO_PRIV_H__
+#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
 #include <subdev/gpio.h>
 
-#define nvkm_gpio_create(p,e,o,d)                                           \
-	nvkm_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_gpio_destroy(p) ({                                             \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_dtor(nv_object(gpio));                                   \
-})
-#define nvkm_gpio_init(p) ({                                                \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_init(nv_object(gpio));                                   \
-})
-#define nvkm_gpio_fini(p,s) ({                                              \
-	struct nvkm_gpio *gpio = (p);                                       \
-	_nvkm_gpio_fini(nv_object(gpio), (s));                              \
-})
-
-int  nvkm_gpio_create_(struct nvkm_object *, struct nvkm_object *,
-			  struct nvkm_oclass *, int, void **);
-int  _nvkm_gpio_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-void _nvkm_gpio_dtor(struct nvkm_object *);
-int  _nvkm_gpio_init(struct nvkm_object *);
-int  _nvkm_gpio_fini(struct nvkm_object *, bool);
-
-struct nvkm_gpio_impl {
-	struct nvkm_oclass base;
+struct nvkm_gpio_func {
 	int lines;
 
 	/* read and ack pending interrupts, returning only data
@@ -51,6 +27,9 @@
 	void (*reset)(struct nvkm_gpio *, u8);
 };
 
+int nvkm_gpio_new_(const struct nvkm_gpio_func *, struct nvkm_device *,
+		   int index, struct nvkm_gpio **);
+
 void nv50_gpio_reset(struct nvkm_gpio *, u8);
 int  nv50_gpio_drive(struct nvkm_gpio *, int, int, int);
 int  nv50_gpio_sense(struct nvkm_gpio *, int);
@@ -58,7 +37,7 @@
 void g94_gpio_intr_stat(struct nvkm_gpio *, u32 *, u32 *);
 void g94_gpio_intr_mask(struct nvkm_gpio *, u32, u32, u32);
 
-void gf110_gpio_reset(struct nvkm_gpio *, u8);
-int  gf110_gpio_drive(struct nvkm_gpio *, int, int, int);
-int  gf110_gpio_sense(struct nvkm_gpio *, int);
+void gf119_gpio_reset(struct nvkm_gpio *, u8);
+int  gf119_gpio_drive(struct nvkm_gpio *, int, int, int);
+int  gf119_gpio_sense(struct nvkm_gpio *, int);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index d683074..1f730613 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -1,16 +1,30 @@
 nvkm-y += nvkm/subdev/i2c/base.o
-nvkm-y += nvkm/subdev/i2c/anx9805.o
-nvkm-y += nvkm/subdev/i2c/aux.o
-nvkm-y += nvkm/subdev/i2c/bit.o
-nvkm-y += nvkm/subdev/i2c/pad.o
-nvkm-y += nvkm/subdev/i2c/padnv04.o
-nvkm-y += nvkm/subdev/i2c/padg94.o
-nvkm-y += nvkm/subdev/i2c/padgm204.o
 nvkm-y += nvkm/subdev/i2c/nv04.o
 nvkm-y += nvkm/subdev/i2c/nv4e.o
 nvkm-y += nvkm/subdev/i2c/nv50.o
 nvkm-y += nvkm/subdev/i2c/g94.o
-nvkm-y += nvkm/subdev/i2c/gf110.o
 nvkm-y += nvkm/subdev/i2c/gf117.o
+nvkm-y += nvkm/subdev/i2c/gf119.o
 nvkm-y += nvkm/subdev/i2c/gk104.o
 nvkm-y += nvkm/subdev/i2c/gm204.o
+
+nvkm-y += nvkm/subdev/i2c/pad.o
+nvkm-y += nvkm/subdev/i2c/padnv04.o
+nvkm-y += nvkm/subdev/i2c/padnv4e.o
+nvkm-y += nvkm/subdev/i2c/padnv50.o
+nvkm-y += nvkm/subdev/i2c/padg94.o
+nvkm-y += nvkm/subdev/i2c/padgf119.o
+nvkm-y += nvkm/subdev/i2c/padgm204.o
+
+nvkm-y += nvkm/subdev/i2c/bus.o
+nvkm-y += nvkm/subdev/i2c/busnv04.o
+nvkm-y += nvkm/subdev/i2c/busnv4e.o
+nvkm-y += nvkm/subdev/i2c/busnv50.o
+nvkm-y += nvkm/subdev/i2c/busgf119.o
+nvkm-y += nvkm/subdev/i2c/bit.o
+
+nvkm-y += nvkm/subdev/i2c/aux.o
+nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgm204.o
+
+nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index d17dd1c..b7b01c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -21,184 +21,57 @@
  *
  * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
-#include "port.h"
+#define anx9805_pad(p) container_of((p), struct anx9805_pad, base)
+#define anx9805_bus(p) container_of((p), struct anx9805_bus, base)
+#define anx9805_aux(p) container_of((p), struct anx9805_aux, base)
+#include "aux.h"
+#include "bus.h"
 
-struct anx9805_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-	u32 ctrl;
+struct anx9805_pad {
+	struct nvkm_i2c_pad base;
+	struct nvkm_i2c_bus *bus;
+	u8 addr;
+};
+
+struct anx9805_bus {
+	struct nvkm_i2c_bus base;
+	struct anx9805_pad *pad;
+	u8 addr;
 };
 
 static int
-anx9805_train(struct nvkm_i2c_port *port, int link_nr, int link_bw, bool enh)
+anx9805_bus_xfer(struct nvkm_i2c_bus *base, struct i2c_msg *msgs, int num)
 {
-	struct anx9805_i2c_port *chan = (void *)port;
-	struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
-	u8 tmp, i;
-
-	DBG("ANX9805 train %d 0x%02x %d\n", link_nr, link_bw, enh);
-
-	nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
-	nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
-	nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
-	nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
-
-	i = 0;
-	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
-		mdelay(5);
-		if (i++ == 100) {
-			nv_error(port, "link training timed out\n");
-			return -ETIMEDOUT;
-		}
-	}
-
-	if (tmp & 0x70) {
-		nv_error(port, "link training failed: 0x%02x\n", tmp);
-		return -EIO;
-	}
-
-	return 1;
-}
-
-static int
-anx9805_aux(struct nvkm_i2c_port *port, bool retry,
-	    u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct anx9805_i2c_port *chan = (void *)port;
-	struct nvkm_i2c_port *mast = (void *)nv_object(chan)->parent;
-	int i, ret = -ETIMEDOUT;
-	u8 buf[16] = {};
-	u8 tmp;
-
-	DBG("%02x %05x %d\n", type, addr, size);
-
-	tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
-	nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
-	nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
-	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
-
-	nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
-	if (!(type & 1)) {
-		memcpy(buf, data, size);
-		DBG("%16ph", buf);
-		for (i = 0; i < size; i++)
-			nv_wri2cr(mast, chan->addr, 0xf0 + i, buf[i]);
-	}
-	nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
-	nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >>  0);
-	nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >>  8);
-	nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
-	nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
-
-	i = 0;
-	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
-		mdelay(5);
-		if (i++ == 32)
-			goto done;
-	}
-
-	if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
-		ret = -EIO;
-		goto done;
-	}
-
-	if (type & 1) {
-		for (i = 0; i < size; i++)
-			buf[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
-		DBG("%16ph", buf);
-		memcpy(data, buf, size);
-	}
-
-	ret = 0;
-done:
-	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
-	return ret;
-}
-
-static const struct nvkm_i2c_func
-anx9805_aux_func = {
-	.aux = anx9805_aux,
-	.lnk_ctl = anx9805_train,
-};
-
-static int
-anx9805_aux_chan_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 index,
-		      struct nvkm_object **pobject)
-{
-	struct nvkm_i2c_port *mast = (void *)parent;
-	struct anx9805_i2c_port *chan;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &anx9805_aux_func,
-				   &chan);
-	*pobject = nv_object(chan);
-	if (ret)
-		return ret;
-
-	switch ((oclass->handle & 0xff00) >> 8) {
-	case 0x0d:
-		chan->addr = 0x38;
-		chan->ctrl = 0x39;
-		break;
-	case 0x0e:
-		chan->addr = 0x3c;
-		chan->ctrl = 0x3b;
-		break;
-	default:
-		BUG_ON(1);
-	}
-
-	if (mast->adapter.algo == &i2c_bit_algo) {
-		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
-		algo->udelay = max(algo->udelay, 40);
-	}
-
-	return 0;
-}
-
-static struct nvkm_ofuncs
-anx9805_aux_ofuncs = {
-	.ctor =  anx9805_aux_chan_ctor,
-	.dtor = _nvkm_i2c_port_dtor,
-	.init = _nvkm_i2c_port_init,
-	.fini = _nvkm_i2c_port_fini,
-};
-
-static int
-anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
-	struct anx9805_i2c_port *port = adap->algo_data;
-	struct nvkm_i2c_port *mast = (void *)nv_object(port)->parent;
+	struct anx9805_bus *bus = anx9805_bus(base);
+	struct anx9805_pad *pad = bus->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
 	struct i2c_msg *msg = msgs;
 	int ret = -ETIMEDOUT;
 	int i, j, cnt = num;
 	u8 seg = 0x00, off = 0x00, tmp;
 
-	tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
-	nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
-	nv_wri2cr(mast, port->ctrl, 0x07, tmp);
-	nv_wri2cr(mast, port->addr, 0x43, 0x05);
+	tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x10;
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x10);
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+	nvkm_wri2cr(adap, bus->addr, 0x43, 0x05);
 	mdelay(5);
 
 	while (cnt--) {
 		if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
-			nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
-			nv_wri2cr(mast, port->addr, 0x41, seg);
-			nv_wri2cr(mast, port->addr, 0x42, off);
-			nv_wri2cr(mast, port->addr, 0x44, msg->len);
-			nv_wri2cr(mast, port->addr, 0x45, 0x00);
-			nv_wri2cr(mast, port->addr, 0x43, 0x01);
+			nvkm_wri2cr(adap, bus->addr, 0x40, msg->addr << 1);
+			nvkm_wri2cr(adap, bus->addr, 0x41, seg);
+			nvkm_wri2cr(adap, bus->addr, 0x42, off);
+			nvkm_wri2cr(adap, bus->addr, 0x44, msg->len);
+			nvkm_wri2cr(adap, bus->addr, 0x45, 0x00);
+			nvkm_wri2cr(adap, bus->addr, 0x43, 0x01);
 			for (i = 0; i < msg->len; i++) {
 				j = 0;
-				while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
+				while (nvkm_rdi2cr(adap, bus->addr, 0x46) & 0x10) {
 					mdelay(5);
 					if (j++ == 32)
 						goto done;
 				}
-				msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
+				msg->buf[i] = nvkm_rdi2cr(adap, bus->addr, 0x47);
 			}
 		} else
 		if (!(msg->flags & I2C_M_RD)) {
@@ -217,76 +90,189 @@
 
 	ret = num;
 done:
-	nv_wri2cr(mast, port->addr, 0x43, 0x00);
+	nvkm_wri2cr(adap, bus->addr, 0x43, 0x00);
 	return ret;
 }
 
-static u32
-anx9805_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm
-anx9805_i2c_algo = {
-	.master_xfer = anx9805_xfer,
-	.functionality = anx9805_func
-};
-
-static const struct nvkm_i2c_func
-anx9805_i2c_func = {
+static const struct nvkm_i2c_bus_func
+anx9805_bus_func = {
+	.xfer = anx9805_bus_xfer,
 };
 
 static int
-anx9805_ddc_port_ctor(struct nvkm_object *parent,
-		      struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, void *data, u32 index,
-		      struct nvkm_object **pobject)
+anx9805_bus_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+		struct nvkm_i2c_bus **pbus)
 {
-	struct nvkm_i2c_port *mast = (void *)parent;
-	struct anx9805_i2c_port *port;
+	struct anx9805_pad *pad = anx9805_pad(base);
+	struct anx9805_bus *bus;
 	int ret;
 
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &anx9805_i2c_algo, &anx9805_i2c_func, &port);
-	*pobject = nv_object(port);
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+	bus->pad = pad;
+
+	ret = nvkm_i2c_bus_ctor(&anx9805_bus_func, &pad->base, id, &bus->base);
 	if (ret)
 		return ret;
 
-	switch ((oclass->handle & 0xff00) >> 8) {
-	case 0x0d:
-		port->addr = 0x3d;
-		port->ctrl = 0x39;
-		break;
-	case 0x0e:
-		port->addr = 0x3f;
-		port->ctrl = 0x3b;
-		break;
+	switch (pad->addr) {
+	case 0x39: bus->addr = 0x3d; break;
+	case 0x3b: bus->addr = 0x3f; break;
 	default:
-		BUG_ON(1);
-	}
-
-	if (mast->adapter.algo == &i2c_bit_algo) {
-		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
-		algo->udelay = max(algo->udelay, 40);
+		return -ENOSYS;
 	}
 
 	return 0;
 }
 
-static struct nvkm_ofuncs
-anx9805_ddc_ofuncs = {
-	.ctor =  anx9805_ddc_port_ctor,
-	.dtor = _nvkm_i2c_port_dtor,
-	.init = _nvkm_i2c_port_init,
-	.fini = _nvkm_i2c_port_fini,
+struct anx9805_aux {
+	struct nvkm_i2c_aux base;
+	struct anx9805_pad *pad;
+	u8 addr;
 };
 
-struct nvkm_oclass
-nvkm_anx9805_sclass[] = {
-	{ .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
-	{ .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
-	{}
+static int
+anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
+		 u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct anx9805_aux *aux = anx9805_aux(base);
+	struct anx9805_pad *pad = aux->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
+	int i, ret = -ETIMEDOUT;
+	u8 buf[16] = {};
+	u8 tmp;
+
+	AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size);
+
+	tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
+	nvkm_wri2cr(adap, pad->addr, 0x07, tmp);
+	nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
+
+	nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
+	if (!(type & 1)) {
+		memcpy(buf, data, size);
+		AUX_DBG(&aux->base, "%16ph", buf);
+		for (i = 0; i < size; i++)
+			nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
+	}
+	nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type);
+	nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >>  0);
+	nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >>  8);
+	nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
+	nvkm_wri2cr(adap, aux->addr, 0xe9, 0x01);
+
+	i = 0;
+	while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xe9)) & 0x01) {
+		mdelay(5);
+		if (i++ == 32)
+			goto done;
+	}
+
+	if ((tmp = nvkm_rdi2cr(adap, pad->addr, 0xf7)) & 0x01) {
+		ret = -EIO;
+		goto done;
+	}
+
+	if (type & 1) {
+		for (i = 0; i < size; i++)
+			buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
+		AUX_DBG(&aux->base, "%16ph", buf);
+		memcpy(data, buf, size);
+	}
+
+	ret = 0;
+done:
+	nvkm_wri2cr(adap, pad->addr, 0xf7, 0x01);
+	return ret;
+}
+
+static int
+anx9805_aux_lnk_ctl(struct nvkm_i2c_aux *base,
+		    int link_nr, int link_bw, bool enh)
+{
+	struct anx9805_aux *aux = anx9805_aux(base);
+	struct anx9805_pad *pad = aux->pad;
+	struct i2c_adapter *adap = &pad->bus->i2c;
+	u8 tmp, i;
+
+	AUX_DBG(&aux->base, "ANX9805 train %d %02x %d",
+		link_nr, link_bw, enh);
+
+	nvkm_wri2cr(adap, aux->addr, 0xa0, link_bw);
+	nvkm_wri2cr(adap, aux->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+	nvkm_wri2cr(adap, aux->addr, 0xa2, 0x01);
+	nvkm_wri2cr(adap, aux->addr, 0xa8, 0x01);
+
+	i = 0;
+	while ((tmp = nvkm_rdi2cr(adap, aux->addr, 0xa8)) & 0x01) {
+		mdelay(5);
+		if (i++ == 100) {
+			AUX_ERR(&aux->base, "link training timeout");
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (tmp & 0x70) {
+		AUX_ERR(&aux->base, "link training failed");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_i2c_aux_func
+anx9805_aux_func = {
+	.xfer = anx9805_aux_xfer,
+	.lnk_ctl = anx9805_aux_lnk_ctl,
 };
+
+static int
+anx9805_aux_new(struct nvkm_i2c_pad *base, int id, u8 drive,
+		struct nvkm_i2c_aux **pbus)
+{
+	struct anx9805_pad *pad = anx9805_pad(base);
+	struct anx9805_aux *aux;
+	int ret;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &aux->base;
+	aux->pad = pad;
+
+	ret = nvkm_i2c_aux_ctor(&anx9805_aux_func, &pad->base, id, &aux->base);
+	if (ret)
+		return ret;
+
+	switch (pad->addr) {
+	case 0x39: aux->addr = 0x38; break;
+	case 0x3b: aux->addr = 0x3c; break;
+	default:
+		return -ENOSYS;
+	}
+
+	return 0;
+}
+
+static const struct nvkm_i2c_pad_func
+anx9805_pad_func = {
+	.bus_new_4 = anx9805_bus_new,
+	.aux_new_6 = anx9805_aux_new,
+};
+
+int
+anx9805_pad_new(struct nvkm_i2c_bus *bus, int id, u8 addr,
+		struct nvkm_i2c_pad **ppad)
+{
+	struct anx9805_pad *pad;
+
+	if (!(pad = kzalloc(sizeof(*pad), GFP_KERNEL)))
+		return -ENOMEM;
+	*ppad = &pad->base;
+
+	nvkm_i2c_pad_ctor(&anx9805_pad_func, bus->pad->i2c, id, &pad->base);
+	pad->bus = bus;
+	pad->addr = addr;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 1c18860..f0851d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -21,50 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
-
-int
-nv_rdaux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-	if (port->func->aux) {
-		int ret = i2c->acquire(port, 0);
-		if (ret == 0) {
-			ret = port->func->aux(port, true, 9, addr, data, size);
-			i2c->release(port);
-		}
-		return ret;
-	}
-	return -ENODEV;
-}
-
-int
-nv_wraux(struct nvkm_i2c_port *port, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-	if (port->func->aux) {
-		int ret = i2c->acquire(port, 0);
-		if (ret == 0) {
-			ret = port->func->aux(port, true, 8, addr, data, size);
-			i2c->release(port);
-		}
-		return ret;
-	}
-	return -ENODEV;
-}
+#include "aux.h"
+#include "pad.h"
 
 static int
-aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 {
-	struct nvkm_i2c_port *port = adap->algo_data;
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
+	struct nvkm_i2c_aux *aux = container_of(adap, typeof(*aux), i2c);
 	struct i2c_msg *msg = msgs;
 	int ret, mcnt = num;
 
-	if (!port->func->aux)
-		return -ENODEV;
-
-	ret = i2c->acquire(port, 0);
+	ret = nvkm_i2c_aux_acquire(aux);
 	if (ret)
 		return ret;
 
@@ -84,9 +51,9 @@
 			if (mcnt || remaining > 16)
 				cmd |= 4; /* MOT */
 
-			ret = port->func->aux(port, true, cmd, msg->addr, ptr, cnt);
+			ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt);
 			if (ret < 0) {
-				i2c->release(port);
+				nvkm_i2c_aux_release(aux);
 				return ret;
 			}
 
@@ -97,17 +64,111 @@
 		msg++;
 	}
 
-	i2c->release(port);
+	nvkm_i2c_aux_release(aux);
 	return num;
 }
 
 static u32
-aux_func(struct i2c_adapter *adap)
+nvkm_i2c_aux_i2c_func(struct i2c_adapter *adap)
 {
 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
-const struct i2c_algorithm nvkm_i2c_aux_algo = {
-	.master_xfer = aux_xfer,
-	.functionality = aux_func
+const struct i2c_algorithm
+nvkm_i2c_aux_i2c_algo = {
+	.master_xfer = nvkm_i2c_aux_i2c_xfer,
+	.functionality = nvkm_i2c_aux_i2c_func
 };
+
+void
+nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *aux, bool monitor)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	AUX_TRACE(aux, "monitor: %s", monitor ? "yes" : "no");
+	if (monitor)
+		nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_AUX);
+	else
+		nvkm_i2c_pad_mode(pad, NVKM_I2C_PAD_OFF);
+}
+
+void
+nvkm_i2c_aux_release(struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	AUX_TRACE(aux, "release");
+	nvkm_i2c_pad_release(pad);
+	mutex_unlock(&aux->mutex);
+}
+
+int
+nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_i2c_pad *pad = aux->pad;
+	int ret;
+	AUX_TRACE(aux, "acquire");
+	mutex_lock(&aux->mutex);
+	ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+	if (ret)
+		mutex_unlock(&aux->mutex);
+	return ret;
+}
+
+int
+nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
+		  u32 addr, u8 *data, u8 size)
+{
+	return aux->func->xfer(aux, retry, type, addr, data, size);
+}
+
+int
+nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *aux, int nr, int bw, bool ef)
+{
+	if (aux->func->lnk_ctl)
+		return aux->func->lnk_ctl(aux, nr, bw, ef);
+	return -ENODEV;
+}
+
+void
+nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
+{
+	struct nvkm_i2c_aux *aux = *paux;
+	if (aux && !WARN_ON(!aux->func)) {
+		AUX_TRACE(aux, "dtor");
+		list_del(&aux->head);
+		i2c_del_adapter(&aux->i2c);
+		kfree(*paux);
+		*paux = NULL;
+	}
+}
+
+int
+nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_aux *aux)
+{
+	struct nvkm_device *device = pad->i2c->subdev.device;
+
+	aux->func = func;
+	aux->pad = pad;
+	aux->id = id;
+	mutex_init(&aux->mutex);
+	list_add_tail(&aux->head, &pad->i2c->aux);
+	AUX_TRACE(aux, "ctor");
+
+	snprintf(aux->i2c.name, sizeof(aux->i2c.name), "nvkm-%s-aux-%04x",
+		 dev_name(device->dev), id);
+	aux->i2c.owner = THIS_MODULE;
+	aux->i2c.dev.parent = device->dev;
+	aux->i2c.algo = &nvkm_i2c_aux_i2c_algo;
+	return i2c_add_adapter(&aux->i2c);
+}
+
+int
+nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_aux **paux)
+{
+	if (!(*paux = kzalloc(sizeof(**paux), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_i2c_aux_ctor(func, pad, id, *paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
new file mode 100644
index 0000000..35a892e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_I2C_AUX_H__
+#define __NVKM_I2C_AUX_H__
+#include "pad.h"
+
+struct nvkm_i2c_aux_func {
+	int  (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
+		     u32 addr, u8 *data, u8 size);
+	int  (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
+			bool enhanced_framing);
+};
+
+int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_aux *);
+int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
+		      u32 addr, u8 *data, u8 size);
+
+int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int gm204_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+
+#define AUX_MSG(b,l,f,a...) do {                                               \
+	struct nvkm_i2c_aux *_aux = (b);                                       \
+	nvkm_##l(&_aux->pad->i2c->subdev, "aux %04x: "f"\n", _aux->id, ##a);   \
+} while(0)
+#define AUX_ERR(b,f,a...) AUX_MSG((b), error, f, ##a)
+#define AUX_DBG(b,f,a...) AUX_MSG((b), debug, f, ##a)
+#define AUX_TRACE(b,f,a...) AUX_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
new file mode 100644
index 0000000..954f5b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define g94_i2c_aux(p) container_of((p), struct g94_i2c_aux, base)
+#include "aux.h"
+
+struct g94_i2c_aux {
+	struct nvkm_i2c_aux base;
+	int ch;
+};
+
+static void
+g94_i2c_aux_fini(struct g94_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+g94_i2c_aux_init(struct g94_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nvkm_mask(device, 0x00e4e4 + (aux->ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00e4e4 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+			g94_i2c_aux_fini(aux);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+		 u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct g94_i2c_aux *aux = g94_i2c_aux(obj);
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 base = aux->ch * 0x50;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+	ret = g94_i2c_aux_init(aux);
+	if (ret < 0)
+		goto out;
+
+	stat = nvkm_rd32(device, 0x00e4e8 + base);
+	if (!(stat & 0x10000000)) {
+		AUX_TRACE(&aux->base, "sink not detected");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+			nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nvkm_rd32(device, 0x00e4e4 + base);
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nvkm_wr32(device, 0x00e4e0 + base, addr);
+
+	/* (maybe) retry transaction a number of times on failure... */
+	for (retries = 0; !ret && retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nvkm_wr32(device, 0x00e4e4 + base, 0x80000000 | ctrl);
+		nvkm_wr32(device, 0x00e4e4 + base, 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nvkm_rd32(device, 0x00e4e4 + base);
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR(&aux->base, "timeout %08x", ctrl);
+				ret = -EIO;
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+		ret = 1;
+
+		/* read status, and check if transaction completed ok */
+		stat = nvkm_mask(device, 0x00e4e8 + base, 0, 0);
+		if ((stat & 0x000f0000) == 0x00080000 ||
+		    (stat & 0x000f0000) == 0x00020000)
+			ret = retry ? 0 : 1;
+		if ((stat & 0x00000100))
+			ret = -ETIMEDOUT;
+		if ((stat & 0x00000e00))
+			ret = -EIO;
+
+		AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
+			AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	g94_i2c_aux_fini(aux);
+	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux_func = {
+	.xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+		struct nvkm_i2c_aux **paux)
+{
+	struct g94_i2c_aux *aux;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*paux = &aux->base;
+
+	nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+	aux->ch = drive;
+	aux->base.intr = 1 << aux->ch;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
new file mode 100644
index 0000000..bed231b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gm204_i2c_aux(p) container_of((p), struct gm204_i2c_aux, base)
+#include "aux.h"
+
+struct gm204_i2c_aux {
+	struct nvkm_i2c_aux base;
+	int ch;
+};
+
+static void
+gm204_i2c_aux_fini(struct gm204_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+gm204_i2c_aux_init(struct gm204_i2c_aux *aux)
+{
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR(&aux->base, "magic wait %08x", ctrl);
+			gm204_i2c_aux_fini(aux);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+static int
+gm204_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
+		   u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct gm204_i2c_aux *aux = gm204_i2c_aux(obj);
+	struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+	const u32 base = aux->ch * 0x50;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ret, i;
+
+	AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+
+	ret = gm204_i2c_aux_init(aux);
+	if (ret < 0)
+		goto out;
+
+	stat = nvkm_rd32(device, 0x00d958 + base);
+	if (!(stat & 0x10000000)) {
+		AUX_TRACE(&aux->base, "sink not detected");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
+			nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nvkm_rd32(device, 0x00d954 + base);
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nvkm_wr32(device, 0x00d950 + base, addr);
+
+	/* (maybe) retry transaction a number of times on failure... */
+	for (retries = 0; !ret && retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nvkm_wr32(device, 0x00d954 + base, 0x80000000 | ctrl);
+		nvkm_wr32(device, 0x00d954 + base, 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nvkm_rd32(device, 0x00d954 + base);
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR(&aux->base, "timeout %08x", ctrl);
+				ret = -EIO;
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+		ret = 1;
+
+		/* read status, and check if transaction completed ok */
+		stat = nvkm_mask(device, 0x00d958 + base, 0, 0);
+		if ((stat & 0x000f0000) == 0x00080000 ||
+		    (stat & 0x000f0000) == 0x00020000)
+			ret = retry ? 0 : 1;
+		if ((stat & 0x00000100))
+			ret = -ETIMEDOUT;
+		if ((stat & 0x00000e00))
+			ret = -EIO;
+
+		AUX_TRACE(&aux->base, "%02d %08x %08x", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
+			AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	gm204_i2c_aux_fini(aux);
+	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+}
+
+static const struct nvkm_i2c_aux_func
+gm204_i2c_aux_func = {
+	.xfer = gm204_i2c_aux_xfer,
+};
+
+int
+gm204_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+		struct nvkm_i2c_aux **paux)
+{
+	struct gm204_i2c_aux *aux;
+
+	if (!(aux = kzalloc(sizeof(*aux), GFP_KERNEL)))
+		return -ENOMEM;
+	*paux = &aux->base;
+
+	nvkm_i2c_aux_ctor(&gm204_i2c_aux_func, pad, index, &aux->base);
+	aux->ch = drive;
+	aux->base.intr = 1 << aux->ch;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 9200f12..243a71f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -22,328 +22,91 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include "aux.h"
+#include "bus.h"
 #include "pad.h"
 
-#include <core/device.h>
 #include <core/notify.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
 
-/******************************************************************************
- * interface to linux i2c bit-banging algorithm
- *****************************************************************************/
-
-#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
-#define CSTMSEL true
-#else
-#define CSTMSEL false
-#endif
-
-static int
-nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
+static struct nvkm_i2c_pad *
+nvkm_i2c_pad_find(struct nvkm_i2c *i2c, int id)
 {
-	struct i2c_algo_bit_data *bit = adap->algo_data;
-	struct nvkm_i2c_port *port = bit->data;
-	return nvkm_i2c(port)->acquire(port, bit->timeout);
-}
+	struct nvkm_i2c_pad *pad;
 
-static void
-nvkm_i2c_post_xfer(struct i2c_adapter *adap)
-{
-	struct i2c_algo_bit_data *bit = adap->algo_data;
-	struct nvkm_i2c_port *port = bit->data;
-	return nvkm_i2c(port)->release(port);
-}
-
-static void
-nvkm_i2c_setscl(void *data, int state)
-{
-	struct nvkm_i2c_port *port = data;
-	port->func->drive_scl(port, state);
-}
-
-static void
-nvkm_i2c_setsda(void *data, int state)
-{
-	struct nvkm_i2c_port *port = data;
-	port->func->drive_sda(port, state);
-}
-
-static int
-nvkm_i2c_getscl(void *data)
-{
-	struct nvkm_i2c_port *port = data;
-	return port->func->sense_scl(port);
-}
-
-static int
-nvkm_i2c_getsda(void *data)
-{
-	struct nvkm_i2c_port *port = data;
-	return port->func->sense_sda(port);
-}
-
-/******************************************************************************
- * base i2c "port" class implementation
- *****************************************************************************/
-
-int
-_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_i2c_port *port = (void *)object;
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-	nv_ofuncs(pad)->fini(nv_object(pad), suspend);
-	return nvkm_object_fini(&port->base, suspend);
-}
-
-void
-_nvkm_i2c_port_dtor(struct nvkm_object *object)
-{
-	struct nvkm_i2c_port *port = (void *)object;
-	i2c_del_adapter(&port->adapter);
-	nvkm_object_destroy(&port->base);
-}
-
-int
-nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		      struct nvkm_oclass *oclass, u8 index,
-		      const struct i2c_algorithm *algo,
-		      const struct nvkm_i2c_func *func,
-		      int size, void **pobject)
-{
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_i2c_port *port;
-	int ret;
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
-	port = *pobject;
-	if (ret)
-		return ret;
-
-	snprintf(port->adapter.name, sizeof(port->adapter.name),
-		 "nvkm-%s-%d", device->name, index);
-	port->adapter.owner = THIS_MODULE;
-	port->adapter.dev.parent = nv_device_base(device);
-	port->index = index;
-	port->aux = -1;
-	port->func = func;
-	mutex_init(&port->mutex);
-
-	if ( algo == &nvkm_i2c_bit_algo &&
-	    !nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
-		struct i2c_algo_bit_data *bit;
-
-		bit = kzalloc(sizeof(*bit), GFP_KERNEL);
-		if (!bit)
-			return -ENOMEM;
-
-		bit->udelay = 10;
-		bit->timeout = usecs_to_jiffies(2200);
-		bit->data = port;
-		bit->pre_xfer = nvkm_i2c_pre_xfer;
-		bit->post_xfer = nvkm_i2c_post_xfer;
-		bit->setsda = nvkm_i2c_setsda;
-		bit->setscl = nvkm_i2c_setscl;
-		bit->getsda = nvkm_i2c_getsda;
-		bit->getscl = nvkm_i2c_getscl;
-
-		port->adapter.algo_data = bit;
-		ret = i2c_bit_add_bus(&port->adapter);
-	} else {
-		port->adapter.algo_data = port;
-		port->adapter.algo = algo;
-		ret = i2c_add_adapter(&port->adapter);
+	list_for_each_entry(pad, &i2c->pad, head) {
+		if (pad->id == id)
+			return pad;
 	}
 
-	if (ret == 0)
-		list_add_tail(&port->head, &i2c->ports);
-	return ret;
+	return NULL;
 }
 
-/******************************************************************************
- * base i2c subdev class implementation
- *****************************************************************************/
-
-static struct nvkm_i2c_port *
-nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
+struct nvkm_i2c_bus *
+nvkm_i2c_bus_find(struct nvkm_i2c *i2c, int id)
 {
-	struct nvkm_bios *bios = nvkm_bios(i2c);
-	struct nvkm_i2c_port *port;
+	struct nvkm_bios *bios = i2c->subdev.device->bios;
+	struct nvkm_i2c_bus *bus;
 
-	if (index == NV_I2C_DEFAULT(0) ||
-	    index == NV_I2C_DEFAULT(1)) {
+	if (id == NVKM_I2C_BUS_PRI || id == NVKM_I2C_BUS_SEC) {
 		u8  ver, hdr, cnt, len;
 		u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
 		if (i2c && ver >= 0x30) {
-			u8 auxidx = nv_ro08(bios, i2c + 4);
-			if (index == NV_I2C_DEFAULT(0))
-				index = (auxidx & 0x0f) >> 0;
+			u8 auxidx = nvbios_rd08(bios, i2c + 4);
+			if (id == NVKM_I2C_BUS_PRI)
+				id = NVKM_I2C_BUS_CCB((auxidx & 0x0f) >> 0);
 			else
-				index = (auxidx & 0xf0) >> 4;
+				id = NVKM_I2C_BUS_CCB((auxidx & 0xf0) >> 4);
 		} else {
-			index = 2;
+			id = NVKM_I2C_BUS_CCB(2);
 		}
 	}
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		if (port->index == index)
-			return port;
+	list_for_each_entry(bus, &i2c->bus, head) {
+		if (bus->id == id)
+			return bus;
 	}
 
 	return NULL;
 }
 
-static struct nvkm_i2c_port *
-nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type)
+struct nvkm_i2c_aux *
+nvkm_i2c_aux_find(struct nvkm_i2c *i2c, int id)
 {
-	struct nvkm_i2c_port *port;
+	struct nvkm_i2c_aux *aux;
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		if (nv_hclass(port) == type)
-			return port;
+	list_for_each_entry(aux, &i2c->aux, head) {
+		if (aux->id == id)
+			return aux;
 	}
 
 	return NULL;
 }
 
 static void
-nvkm_i2c_release_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-
-	if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
-		nv_ofuncs(pad)->fini(nv_object(pad), false);
-		wake_up_all(&i2c->wait);
-	}
-}
-
-static int
-nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
-
-	if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
-		struct nvkm_object *owner = (void *)pad->port;
-		do {
-			if (owner == (void *)port)
-				return 0;
-			owner = owner->parent;
-		} while(owner);
-		nvkm_i2c_release_pad(port);
-		return -EBUSY;
-	}
-
-	pad->next = port;
-	nv_ofuncs(pad)->init(nv_object(pad));
-	return 0;
-}
-
-static int
-nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(port);
-
-	if (timeout) {
-		if (wait_event_timeout(i2c->wait,
-				       nvkm_i2c_try_acquire_pad(port) == 0,
-				       timeout) == 0)
-			return -EBUSY;
-	} else {
-		wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
-	}
-
-	return 0;
-}
-
-static void
-nvkm_i2c_release(struct nvkm_i2c_port *port)
-__releases(pad->mutex)
-{
-	nvkm_i2c(port)->release_pad(port);
-	mutex_unlock(&port->mutex);
-}
-
-static int
-nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
-__acquires(pad->mutex)
-{
-	int ret;
-	mutex_lock(&port->mutex);
-	if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
-		mutex_unlock(&port->mutex);
-	return ret;
-}
-
-static int
-nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
-		  struct nvkm_i2c_board_info *info,
-		  bool (*match)(struct nvkm_i2c_port *,
-				struct i2c_board_info *, void *), void *data)
-{
-	struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
-	int i;
-
-	if (!port) {
-		nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
-		return -ENODEV;
-	}
-
-	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
-	for (i = 0; info[i].dev.addr; i++) {
-		u8 orig_udelay = 0;
-
-		if ((port->adapter.algo == &i2c_bit_algo) &&
-		    (info[i].udelay != 0)) {
-			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
-			nv_debug(i2c, "using custom udelay %d instead of %d\n",
-			         info[i].udelay, algo->udelay);
-			orig_udelay = algo->udelay;
-			algo->udelay = info[i].udelay;
-		}
-
-		if (nv_probe_i2c(port, info[i].dev.addr) &&
-		    (!match || match(port, &info[i].dev, data))) {
-			nv_info(i2c, "detected %s: %s\n", what,
-				info[i].dev.type);
-			return i;
-		}
-
-		if (orig_udelay) {
-			struct i2c_algo_bit_data *algo = port->adapter.algo_data;
-			algo->udelay = orig_udelay;
-		}
-	}
-
-	nv_debug(i2c, "no devices found.\n");
-	return -ENODEV;
-}
-
-static void
-nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int id)
 {
 	struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
-	struct nvkm_i2c_port *port = i2c->find(i2c, index);
-	const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
-	if (port && port->aux >= 0)
-		impl->aux_mask(i2c, type, 1 << port->aux, 0);
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+	if (aux)
+		i2c->func->aux_mask(i2c, type, aux->intr, 0);
 }
 
 static void
-nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index)
+nvkm_i2c_intr_init(struct nvkm_event *event, int type, int id)
 {
 	struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
-	struct nvkm_i2c_port *port = i2c->find(i2c, index);
-	const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
-	if (port && port->aux >= 0)
-		impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
+	struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, id);
+	if (aux)
+		i2c->func->aux_mask(i2c, type, aux->intr, aux->intr);
 }
 
 static int
 nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
-		      struct nvkm_notify *notify)
+		   struct nvkm_notify *notify)
 {
 	struct nvkm_i2c_ntfy_req *req = data;
 	if (!WARN_ON(size != sizeof(*req))) {
@@ -355,38 +118,6 @@
 	return -EINVAL;
 }
 
-static void
-nvkm_i2c_intr(struct nvkm_subdev *subdev)
-{
-	struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
-	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
-	struct nvkm_i2c_port *port;
-	u32 hi, lo, rq, tx, e;
-
-	if (impl->aux_stat) {
-		impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
-		if (hi || lo || rq || tx) {
-			list_for_each_entry(port, &i2c->ports, head) {
-				if (e = 0, port->aux < 0)
-					continue;
-
-				if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
-				if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
-				if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
-				if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
-				if (e) {
-					struct nvkm_i2c_ntfy_rep rep = {
-						.mask = e,
-					};
-					nvkm_event_send(&i2c->event, rep.mask,
-							port->index, &rep,
-							sizeof(rep));
-				}
-			}
-		}
-	}
-}
-
 static const struct nvkm_event_func
 nvkm_i2c_intr_func = {
 	.ctor = nvkm_i2c_intr_ctor,
@@ -394,229 +125,272 @@
 	.fini = nvkm_i2c_intr_fini,
 };
 
-int
-_nvkm_i2c_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_i2c_impl *impl = (void *)nv_oclass(object);
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port;
-	u32 mask;
-	int ret;
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_aux *aux;
+	u32 hi, lo, rq, tx;
 
-	list_for_each_entry(port, &i2c->ports, head) {
-		ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
-		if (ret && suspend)
-			goto fail;
-	}
+	if (!i2c->func->aux_stat)
+		return;
 
-	if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
-		impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
-		impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
-	}
+	i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
+	if (!hi && !lo && !rq && !tx)
+		return;
 
-	return nvkm_subdev_fini(&i2c->base, suspend);
-fail:
-	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
-		nv_ofuncs(port)->init(nv_object(port));
-	}
-
-	return ret;
-}
-
-int
-_nvkm_i2c_init(struct nvkm_object *object)
-{
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port;
-	int ret;
-
-	ret = nvkm_subdev_init(&i2c->base);
-	if (ret == 0) {
-		list_for_each_entry(port, &i2c->ports, head) {
-			ret = nv_ofuncs(port)->init(nv_object(port));
-			if (ret)
-				goto fail;
+	list_for_each_entry(aux, &i2c->aux, head) {
+		u32 mask = 0;
+		if (hi & aux->intr) mask |= NVKM_I2C_PLUG;
+		if (lo & aux->intr) mask |= NVKM_I2C_UNPLUG;
+		if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
+		if (tx & aux->intr) mask |= NVKM_I2C_DONE;
+		if (mask) {
+			struct nvkm_i2c_ntfy_rep rep = {
+				.mask = mask,
+			};
+			nvkm_event_send(&i2c->event, rep.mask, aux->id,
+					&rep, sizeof(rep));
 		}
 	}
-
-	return ret;
-fail:
-	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
-		nv_ofuncs(port)->fini(nv_object(port), false);
-	}
-
-	return ret;
 }
 
-void
-_nvkm_i2c_dtor(struct nvkm_object *object)
+static int
+nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_i2c *i2c = (void *)object;
-	struct nvkm_i2c_port *port, *temp;
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_pad *pad;
+	u32 mask;
+
+	if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
+		i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
+		i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
+	}
+
+	list_for_each_entry(pad, &i2c->pad, head) {
+		nvkm_i2c_pad_fini(pad);
+	}
+
+	return 0;
+}
+
+static int
+nvkm_i2c_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+	struct nvkm_i2c_bus *bus;
+	struct nvkm_i2c_pad *pad;
+
+	list_for_each_entry(pad, &i2c->pad, head) {
+		nvkm_i2c_pad_init(pad);
+	}
+
+	list_for_each_entry(bus, &i2c->bus, head) {
+		nvkm_i2c_bus_init(bus);
+	}
+
+	return 0;
+}
+
+static void *
+nvkm_i2c_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_i2c *i2c = nvkm_i2c(subdev);
 
 	nvkm_event_fini(&i2c->event);
 
-	list_for_each_entry_safe(port, temp, &i2c->ports, head) {
-		nvkm_object_ref(NULL, (struct nvkm_object **)&port);
+	while (!list_empty(&i2c->aux)) {
+		struct nvkm_i2c_aux *aux =
+			list_first_entry(&i2c->aux, typeof(*aux), head);
+		nvkm_i2c_aux_del(&aux);
 	}
 
-	nvkm_subdev_destroy(&i2c->base);
+	while (!list_empty(&i2c->bus)) {
+		struct nvkm_i2c_bus *bus =
+			list_first_entry(&i2c->bus, typeof(*bus), head);
+		nvkm_i2c_bus_del(&bus);
+	}
+
+	while (!list_empty(&i2c->pad)) {
+		struct nvkm_i2c_pad *pad =
+			list_first_entry(&i2c->pad, typeof(*pad), head);
+		nvkm_i2c_pad_del(&pad);
+	}
+
+	return i2c;
 }
 
-static struct nvkm_oclass *
-nvkm_i2c_extdev_sclass[] = {
-	nvkm_anx9805_sclass,
+static const struct nvkm_subdev_func
+nvkm_i2c = {
+	.dtor = nvkm_i2c_dtor,
+	.init = nvkm_i2c_init,
+	.fini = nvkm_i2c_fini,
+	.intr = nvkm_i2c_intr,
 };
 
-static void
-nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type,
-		     struct dcb_i2c_entry *info)
-{
-	const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
-	struct nvkm_oclass *oclass;
-	struct nvkm_object *parent;
-	struct nvkm_object *object;
-	int ret, pad;
-
-	if (info->share != DCB_I2C_UNUSED) {
-		pad    = info->share;
-		oclass = impl->pad_s;
-	} else {
-		if (type != DCB_I2C_NVIO_AUX)
-			pad = 0x100 + info->drive;
-		else
-			pad = 0x100 + info->auxch;
-		oclass = impl->pad_x;
-	}
-
-	ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass,
-			       NULL, pad, &parent);
-	if (ret < 0)
-		return;
-
-	oclass = impl->sclass;
-	do {
-		ret = -EINVAL;
-		if (oclass->handle == type) {
-			ret = nvkm_object_ctor(parent, NULL, oclass,
-					       info, index, &object);
-		}
-	} while (ret && (++oclass)->handle);
-
-	nvkm_object_ref(NULL, &parent);
+static const struct nvkm_i2c_drv {
+	u8 bios;
+	u8 addr;
+	int (*pad_new)(struct nvkm_i2c_bus *, int id, u8 addr,
+		       struct nvkm_i2c_pad **);
 }
+nvkm_i2c_drv[] = {
+	{ 0x0d, 0x39, anx9805_pad_new },
+	{ 0x0e, 0x3b, anx9805_pad_new },
+	{}
+};
 
 int
-nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_i2c **pi2c)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_i2c *i2c;
-	struct nvkm_object *object;
-	struct dcb_i2c_entry info;
-	int ret, i, j, index = -1;
-	struct dcb_output outp;
-	u8  ver, hdr;
-	u32 data;
+	struct dcb_i2c_entry ccbE;
+	struct dcb_output dcbE;
+	u8 ver, hdr;
+	int ret, i;
 
-	ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c);
-	*pobject = nv_object(i2c);
-	if (ret)
-		return ret;
+	if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
+		return -ENOMEM;
 
-	nv_subdev(i2c)->intr = nvkm_i2c_intr;
-	i2c->find = nvkm_i2c_find;
-	i2c->find_type = nvkm_i2c_find_type;
-	i2c->acquire_pad = nvkm_i2c_acquire_pad;
-	i2c->release_pad = nvkm_i2c_release_pad;
-	i2c->acquire = nvkm_i2c_acquire;
-	i2c->release = nvkm_i2c_release;
-	i2c->identify = nvkm_i2c_identify;
-	init_waitqueue_head(&i2c->wait);
-	INIT_LIST_HEAD(&i2c->ports);
+	nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
+	i2c->func = func;
+	INIT_LIST_HEAD(&i2c->pad);
+	INIT_LIST_HEAD(&i2c->bus);
+	INIT_LIST_HEAD(&i2c->aux);
 
-	while (!dcb_i2c_parse(bios, ++index, &info)) {
-		switch (info.type) {
-		case DCB_I2C_NV04_BIT:
-		case DCB_I2C_NV4E_BIT:
-		case DCB_I2C_NVIO_BIT:
-			nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
-					     info.type, &info);
-			break;
-		case DCB_I2C_NVIO_AUX:
-			nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
-					     info.type, &info);
-			break;
-		case DCB_I2C_PMGR:
-			if (info.drive != DCB_I2C_UNUSED) {
-				nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
-						     DCB_I2C_NVIO_BIT, &info);
-			}
-			if (info.auxch != DCB_I2C_UNUSED) {
-				nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
-						     DCB_I2C_NVIO_AUX, &info);
-			}
-			break;
-		case DCB_I2C_UNUSED:
-		default:
-			continue;
-		}
-	}
-
-	/* in addition to the busses specified in the i2c table, there
-	 * may be ddc/aux channels hiding behind external tmds/dp/etc
-	 * transmitters.
-	 */
-	index = NV_I2C_EXT(0);
 	i = -1;
-	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
-		if (!outp.location || !outp.extdev)
-			continue;
+	while (!dcb_i2c_parse(bios, ++i, &ccbE)) {
+		struct nvkm_i2c_pad *pad = NULL;
+		struct nvkm_i2c_bus *bus = NULL;
+		struct nvkm_i2c_aux *aux = NULL;
 
-		switch (outp.type) {
-		case DCB_OUTPUT_TMDS:
-			info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
-			break;
-		case DCB_OUTPUT_DP:
-			info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
-			break;
-		default:
+		nvkm_debug(&i2c->subdev, "ccb %02x: type %02x drive %02x "
+			   "sense %02x share %02x auxch %02x\n", i, ccbE.type,
+			   ccbE.drive, ccbE.sense, ccbE.share, ccbE.auxch);
+
+		if (ccbE.share != DCB_I2C_UNUSED) {
+			const int id = NVKM_I2C_PAD_HYBRID(ccbE.share);
+			if (!(pad = nvkm_i2c_pad_find(i2c, id)))
+				ret = func->pad_s_new(i2c, id, &pad);
+			else
+				ret = 0;
+		} else {
+			ret = func->pad_x_new(i2c, NVKM_I2C_PAD_CCB(i), &pad);
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x pad, %d\n", i, ret);
+			nvkm_i2c_pad_del(&pad);
 			continue;
 		}
 
-		ret = -ENODEV;
-		j = -1;
-		while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) {
-			parent = nv_object(i2c->find(i2c, outp.i2c_index));
-			oclass = nvkm_i2c_extdev_sclass[j];
-			do {
-				if (oclass->handle != info.type)
-					continue;
-				ret = nvkm_object_ctor(parent, NULL, oclass,
-						       NULL, index++, &object);
-			} while (ret && (++oclass)->handle);
+		if (pad->func->bus_new_0 && ccbE.type == DCB_I2C_NV04_BIT) {
+			ret = pad->func->bus_new_0(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.drive,
+						   ccbE.sense, &bus);
+		} else
+		if (pad->func->bus_new_4 &&
+		    ( ccbE.type == DCB_I2C_NV4E_BIT ||
+		      ccbE.type == DCB_I2C_NVIO_BIT ||
+		     (ccbE.type == DCB_I2C_PMGR &&
+		      ccbE.drive != DCB_I2C_UNUSED))) {
+			ret = pad->func->bus_new_4(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.drive, &bus);
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x bus, %d\n", i, ret);
+			nvkm_i2c_bus_del(&bus);
+		}
+
+		if (pad->func->aux_new_6 &&
+		    ( ccbE.type == DCB_I2C_NVIO_AUX ||
+		     (ccbE.type == DCB_I2C_PMGR &&
+		      ccbE.auxch != DCB_I2C_UNUSED))) {
+			ret = pad->func->aux_new_6(pad, NVKM_I2C_BUS_CCB(i),
+						   ccbE.auxch, &aux);
+		} else {
+			ret = 0;
+		}
+
+		if (ret) {
+			nvkm_error(&i2c->subdev, "ccb %02x aux, %d\n", i, ret);
+			nvkm_i2c_aux_del(&aux);
+		}
+
+		if (ccbE.type != DCB_I2C_UNUSED && !bus && !aux) {
+			nvkm_warn(&i2c->subdev, "ccb %02x was ignored\n", i);
+			continue;
 		}
 	}
 
-	ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event);
-	if (ret)
-		return ret;
+	i = -1;
+	while (dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE)) {
+		const struct nvkm_i2c_drv *drv = nvkm_i2c_drv;
+		struct nvkm_i2c_bus *bus;
+		struct nvkm_i2c_pad *pad;
 
-	return 0;
-}
+		/* internal outputs handled by native i2c busses (above) */
+		if (!dcbE.location)
+			continue;
 
-int
-_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_i2c *i2c;
-	int ret;
+		/* we need an i2c bus to talk to the external encoder */
+		bus = nvkm_i2c_bus_find(i2c, dcbE.i2c_index);
+		if (!bus) {
+			nvkm_debug(&i2c->subdev, "dcb %02x no bus\n", i);
+			continue;
+		}
 
-	ret = nvkm_i2c_create(parent, engine, oclass, &i2c);
-	*pobject = nv_object(i2c);
-	if (ret)
-		return ret;
+		/* ... and a driver for it */
+		while (drv->pad_new) {
+			if (drv->bios == dcbE.extdev)
+				break;
+			drv++;
+		}
 
-	return 0;
+		if (!drv->pad_new) {
+			nvkm_debug(&i2c->subdev, "dcb %02x drv %02x unknown\n",
+				   i, dcbE.extdev);
+			continue;
+		}
+
+		/* find/create an instance of the driver */
+		pad = nvkm_i2c_pad_find(i2c, NVKM_I2C_PAD_EXT(dcbE.extdev));
+		if (!pad) {
+			const int id = NVKM_I2C_PAD_EXT(dcbE.extdev);
+			ret = drv->pad_new(bus, id, drv->addr, &pad);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x pad, %d\n",
+					   i, ret);
+				nvkm_i2c_pad_del(&pad);
+				continue;
+			}
+		}
+
+		/* create any i2c bus / aux channel required by the output */
+		if (pad->func->aux_new_6 && dcbE.type == DCB_OUTPUT_DP) {
+			const int id = NVKM_I2C_AUX_EXT(dcbE.extdev);
+			struct nvkm_i2c_aux *aux = NULL;
+			ret = pad->func->aux_new_6(pad, id, 0, &aux);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x aux, %d\n",
+					   i, ret);
+				nvkm_i2c_aux_del(&aux);
+			}
+		} else
+		if (pad->func->bus_new_4) {
+			const int id = NVKM_I2C_BUS_EXT(dcbE.extdev);
+			struct nvkm_i2c_bus *bus = NULL;
+			ret = pad->func->bus_new_4(pad, id, 0, &bus);
+			if (ret) {
+				nvkm_error(&i2c->subdev, "dcb %02x bus, %d\n",
+					   i, ret);
+				nvkm_i2c_bus_del(&bus);
+			}
+		}
+	}
+
+	return nvkm_event_init(&nvkm_i2c_intr_func, 4, i, &i2c->event);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
index 861a453..cdce11b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bit.c
@@ -9,7 +9,7 @@
  * Software is furnished to do so, subject to the following conditions:
  *
  * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * all copies or substantial busions of the Software.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -21,7 +21,7 @@
  *
  * Authors: Ben Skeggs
  */
-#include "priv.h"
+#include "bus.h"
 
 #ifdef CONFIG_NOUVEAU_I2C_INTERNAL
 #define T_TIMEOUT  2200000
@@ -29,205 +29,188 @@
 #define T_HOLD     5000
 
 static inline void
-i2c_drive_scl(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_scl(struct nvkm_i2c_bus *bus, int state)
 {
-	port->func->drive_scl(port, state);
+	bus->func->drive_scl(bus, state);
 }
 
 static inline void
-i2c_drive_sda(struct nvkm_i2c_port *port, int state)
+nvkm_i2c_drive_sda(struct nvkm_i2c_bus *bus, int state)
 {
-	port->func->drive_sda(port, state);
+	bus->func->drive_sda(bus, state);
 }
 
 static inline int
-i2c_sense_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_scl(struct nvkm_i2c_bus *bus)
 {
-	return port->func->sense_scl(port);
+	return bus->func->sense_scl(bus);
 }
 
 static inline int
-i2c_sense_sda(struct nvkm_i2c_port *port)
+nvkm_i2c_sense_sda(struct nvkm_i2c_bus *bus)
 {
-	return port->func->sense_sda(port);
+	return bus->func->sense_sda(bus);
 }
 
 static void
-i2c_delay(struct nvkm_i2c_port *port, u32 nsec)
+nvkm_i2c_delay(struct nvkm_i2c_bus *bus, u32 nsec)
 {
 	udelay((nsec + 500) / 1000);
 }
 
 static bool
-i2c_raise_scl(struct nvkm_i2c_port *port)
+nvkm_i2c_raise_scl(struct nvkm_i2c_bus *bus)
 {
 	u32 timeout = T_TIMEOUT / T_RISEFALL;
 
-	i2c_drive_scl(port, 1);
+	nvkm_i2c_drive_scl(bus, 1);
 	do {
-		i2c_delay(port, T_RISEFALL);
-	} while (!i2c_sense_scl(port) && --timeout);
+		nvkm_i2c_delay(bus, T_RISEFALL);
+	} while (!nvkm_i2c_sense_scl(bus) && --timeout);
 
 	return timeout != 0;
 }
 
 static int
-i2c_start(struct nvkm_i2c_port *port)
+i2c_start(struct nvkm_i2c_bus *bus)
 {
 	int ret = 0;
 
-	if (!i2c_sense_scl(port) ||
-	    !i2c_sense_sda(port)) {
-		i2c_drive_scl(port, 0);
-		i2c_drive_sda(port, 1);
-		if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_sense_scl(bus) ||
+	    !nvkm_i2c_sense_sda(bus)) {
+		nvkm_i2c_drive_scl(bus, 0);
+		nvkm_i2c_drive_sda(bus, 1);
+		if (!nvkm_i2c_raise_scl(bus))
 			ret = -EBUSY;
 	}
 
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_sda(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return ret;
 }
 
 static void
-i2c_stop(struct nvkm_i2c_port *port)
+i2c_stop(struct nvkm_i2c_bus *bus)
 {
-	i2c_drive_scl(port, 0);
-	i2c_drive_sda(port, 0);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_drive_sda(bus, 0);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	i2c_drive_scl(port, 1);
-	i2c_delay(port, T_HOLD);
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 1);
+	nvkm_i2c_delay(bus, T_HOLD);
+	nvkm_i2c_drive_sda(bus, 1);
+	nvkm_i2c_delay(bus, T_HOLD);
 }
 
 static int
-i2c_bitw(struct nvkm_i2c_port *port, int sda)
+i2c_bitw(struct nvkm_i2c_bus *bus, int sda)
 {
-	i2c_drive_sda(port, sda);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_sda(bus, sda);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_raise_scl(bus))
 		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_delay(bus, T_HOLD);
 
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return 0;
 }
 
 static int
-i2c_bitr(struct nvkm_i2c_port *port)
+i2c_bitr(struct nvkm_i2c_bus *bus)
 {
 	int sda;
 
-	i2c_drive_sda(port, 1);
-	i2c_delay(port, T_RISEFALL);
+	nvkm_i2c_drive_sda(bus, 1);
+	nvkm_i2c_delay(bus, T_RISEFALL);
 
-	if (!i2c_raise_scl(port))
+	if (!nvkm_i2c_raise_scl(bus))
 		return -ETIMEDOUT;
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_delay(bus, T_HOLD);
 
-	sda = i2c_sense_sda(port);
+	sda = nvkm_i2c_sense_sda(bus);
 
-	i2c_drive_scl(port, 0);
-	i2c_delay(port, T_HOLD);
+	nvkm_i2c_drive_scl(bus, 0);
+	nvkm_i2c_delay(bus, T_HOLD);
 	return sda;
 }
 
 static int
-i2c_get_byte(struct nvkm_i2c_port *port, u8 *byte, bool last)
+nvkm_i2c_get_byte(struct nvkm_i2c_bus *bus, u8 *byte, bool last)
 {
 	int i, bit;
 
 	*byte = 0;
 	for (i = 7; i >= 0; i--) {
-		bit = i2c_bitr(port);
+		bit = i2c_bitr(bus);
 		if (bit < 0)
 			return bit;
 		*byte |= bit << i;
 	}
 
-	return i2c_bitw(port, last ? 1 : 0);
+	return i2c_bitw(bus, last ? 1 : 0);
 }
 
 static int
-i2c_put_byte(struct nvkm_i2c_port *port, u8 byte)
+nvkm_i2c_put_byte(struct nvkm_i2c_bus *bus, u8 byte)
 {
 	int i, ret;
 	for (i = 7; i >= 0; i--) {
-		ret = i2c_bitw(port, !!(byte & (1 << i)));
+		ret = i2c_bitw(bus, !!(byte & (1 << i)));
 		if (ret < 0)
 			return ret;
 	}
 
-	ret = i2c_bitr(port);
+	ret = i2c_bitr(bus);
 	if (ret == 1) /* nack */
 		ret = -EIO;
 	return ret;
 }
 
 static int
-i2c_addr(struct nvkm_i2c_port *port, struct i2c_msg *msg)
+i2c_addr(struct nvkm_i2c_bus *bus, struct i2c_msg *msg)
 {
 	u32 addr = msg->addr << 1;
 	if (msg->flags & I2C_M_RD)
 		addr |= 1;
-	return i2c_put_byte(port, addr);
+	return nvkm_i2c_put_byte(bus, addr);
 }
 
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
 {
-	struct nvkm_i2c_port *port = adap->algo_data;
 	struct i2c_msg *msg = msgs;
 	int ret = 0, mcnt = num;
 
-	ret = nvkm_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT));
-	if (ret)
-		return ret;
-
 	while (!ret && mcnt--) {
 		u8 remaining = msg->len;
 		u8 *ptr = msg->buf;
 
-		ret = i2c_start(port);
+		ret = i2c_start(bus);
 		if (ret == 0)
-			ret = i2c_addr(port, msg);
+			ret = i2c_addr(bus, msg);
 
 		if (msg->flags & I2C_M_RD) {
 			while (!ret && remaining--)
-				ret = i2c_get_byte(port, ptr++, !remaining);
+				ret = nvkm_i2c_get_byte(bus, ptr++, !remaining);
 		} else {
 			while (!ret && remaining--)
-				ret = i2c_put_byte(port, *ptr++);
+				ret = nvkm_i2c_put_byte(bus, *ptr++);
 		}
 
 		msg++;
 	}
 
-	i2c_stop(port);
-	nvkm_i2c(port)->release(port);
+	i2c_stop(bus);
 	return (ret < 0) ? ret : num;
 }
 #else
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+int
+nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *bus, struct i2c_msg *msgs, int num)
 {
 	return -ENODEV;
 }
 #endif
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nvkm_i2c_bit_algo = {
-	.master_xfer = i2c_bit_xfer,
-	.functionality = i2c_bit_func
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
new file mode 100644
index 0000000..807a2b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "bus.h"
+#include "pad.h"
+
+#include <core/option.h>
+
+/*******************************************************************************
+ * i2c-algo-bit
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_pre_xfer(struct i2c_adapter *adap)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	return nvkm_i2c_bus_acquire(bus);
+}
+
+static void
+nvkm_i2c_bus_post_xfer(struct i2c_adapter *adap)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	return nvkm_i2c_bus_release(bus);
+}
+
+static void
+nvkm_i2c_bus_setscl(void *data, int state)
+{
+	struct nvkm_i2c_bus *bus = data;
+	bus->func->drive_scl(bus, state);
+}
+
+static void
+nvkm_i2c_bus_setsda(void *data, int state)
+{
+	struct nvkm_i2c_bus *bus = data;
+	bus->func->drive_sda(bus, state);
+}
+
+static int
+nvkm_i2c_bus_getscl(void *data)
+{
+	struct nvkm_i2c_bus *bus = data;
+	return bus->func->sense_scl(bus);
+}
+
+static int
+nvkm_i2c_bus_getsda(void *data)
+{
+	struct nvkm_i2c_bus *bus = data;
+	return bus->func->sense_sda(bus);
+}
+
+/*******************************************************************************
+ * !i2c-algo-bit (off-chip i2c bus / hw i2c / internal bit-banging algo)
+ ******************************************************************************/
+static int
+nvkm_i2c_bus_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nvkm_i2c_bus *bus = container_of(adap, typeof(*bus), i2c);
+	int ret;
+
+	ret = nvkm_i2c_bus_acquire(bus);
+	if (ret)
+		return ret;
+
+	ret = bus->func->xfer(bus, msgs, num);
+	nvkm_i2c_bus_release(bus);
+	return ret;
+}
+
+static u32
+nvkm_i2c_bus_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+nvkm_i2c_bus_algo = {
+	.master_xfer = nvkm_i2c_bus_xfer,
+	.functionality = nvkm_i2c_bus_func,
+};
+
+/*******************************************************************************
+ * nvkm_i2c_bus base
+ ******************************************************************************/
+void
+nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
+{
+	BUS_TRACE(bus, "init");
+	if (bus->func->init)
+		bus->func->init(bus);
+}
+
+void
+nvkm_i2c_bus_release(struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_i2c_pad *pad = bus->pad;
+	BUS_TRACE(bus, "release");
+	nvkm_i2c_pad_release(pad);
+	mutex_unlock(&bus->mutex);
+}
+
+int
+nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_i2c_pad *pad = bus->pad;
+	int ret;
+	BUS_TRACE(bus, "acquire");
+	mutex_lock(&bus->mutex);
+	ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+	if (ret)
+		mutex_unlock(&bus->mutex);
+	return ret;
+}
+
+int
+nvkm_i2c_bus_probe(struct nvkm_i2c_bus *bus, const char *what,
+		   struct nvkm_i2c_bus_probe *info,
+		   bool (*match)(struct nvkm_i2c_bus *,
+				 struct i2c_board_info *, void *), void *data)
+{
+	int i;
+
+	BUS_DBG(bus, "probing %ss", what);
+	for (i = 0; info[i].dev.addr; i++) {
+		u8 orig_udelay = 0;
+
+		if ((bus->i2c.algo == &i2c_bit_algo) && (info[i].udelay != 0)) {
+			struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+			BUS_DBG(bus, "%dms delay instead of %dms",
+				     info[i].udelay, algo->udelay);
+			orig_udelay = algo->udelay;
+			algo->udelay = info[i].udelay;
+		}
+
+		if (nvkm_probe_i2c(&bus->i2c, info[i].dev.addr) &&
+		    (!match || match(bus, &info[i].dev, data))) {
+			BUS_DBG(bus, "detected %s: %s",
+				what, info[i].dev.type);
+			return i;
+		}
+
+		if (orig_udelay) {
+			struct i2c_algo_bit_data *algo = bus->i2c.algo_data;
+			algo->udelay = orig_udelay;
+		}
+	}
+
+	BUS_DBG(bus, "no devices found.");
+	return -ENODEV;
+}
+
+void
+nvkm_i2c_bus_del(struct nvkm_i2c_bus **pbus)
+{
+	struct nvkm_i2c_bus *bus = *pbus;
+	if (bus && !WARN_ON(!bus->func)) {
+		BUS_TRACE(bus, "dtor");
+		list_del(&bus->head);
+		i2c_del_adapter(&bus->i2c);
+		kfree(bus->i2c.algo_data);
+		kfree(*pbus);
+		*pbus = NULL;
+	}
+}
+
+int
+nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_bus *bus)
+{
+	struct nvkm_device *device = pad->i2c->subdev.device;
+	struct i2c_algo_bit_data *bit;
+#ifndef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+	const bool internal = false;
+#else
+	const bool internal = true;
+#endif
+	int ret;
+
+	bus->func = func;
+	bus->pad = pad;
+	bus->id = id;
+	mutex_init(&bus->mutex);
+	list_add_tail(&bus->head, &pad->i2c->bus);
+	BUS_TRACE(bus, "ctor");
+
+	snprintf(bus->i2c.name, sizeof(bus->i2c.name), "nvkm-%s-bus-%04x",
+		 dev_name(device->dev), id);
+	bus->i2c.owner = THIS_MODULE;
+	bus->i2c.dev.parent = device->dev;
+
+	if ( bus->func->drive_scl &&
+	    !nvkm_boolopt(device->cfgopt, "NvI2C", internal)) {
+		if (!(bit = kzalloc(sizeof(*bit), GFP_KERNEL)))
+			return -ENOMEM;
+		bit->udelay = 10;
+		bit->timeout = usecs_to_jiffies(2200);
+		bit->data = bus;
+		bit->pre_xfer = nvkm_i2c_bus_pre_xfer;
+		bit->post_xfer = nvkm_i2c_bus_post_xfer;
+		bit->setscl = nvkm_i2c_bus_setscl;
+		bit->setsda = nvkm_i2c_bus_setsda;
+		bit->getscl = nvkm_i2c_bus_getscl;
+		bit->getsda = nvkm_i2c_bus_getsda;
+		bus->i2c.algo_data = bit;
+		ret = i2c_bit_add_bus(&bus->i2c);
+	} else {
+		bus->i2c.algo = &nvkm_i2c_bus_algo;
+		ret = i2c_add_adapter(&bus->i2c);
+	}
+
+	return ret;
+}
+
+int
+nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *func,
+		  struct nvkm_i2c_pad *pad, int id,
+		  struct nvkm_i2c_bus **pbus)
+{
+	if (!(*pbus = kzalloc(sizeof(**pbus), GFP_KERNEL)))
+		return -ENOMEM;
+	return nvkm_i2c_bus_ctor(func, pad, id, *pbus);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
new file mode 100644
index 0000000..e1be14c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -0,0 +1,37 @@
+#ifndef __NVKM_I2C_BUS_H__
+#define __NVKM_I2C_BUS_H__
+#include "pad.h"
+
+struct nvkm_i2c_bus_func {
+	void (*init)(struct nvkm_i2c_bus *);
+	void (*drive_scl)(struct nvkm_i2c_bus *, int state);
+	void (*drive_sda)(struct nvkm_i2c_bus *, int state);
+	int (*sense_scl)(struct nvkm_i2c_bus *);
+	int (*sense_sda)(struct nvkm_i2c_bus *);
+	int (*xfer)(struct nvkm_i2c_bus *, struct i2c_msg *, int num);
+};
+
+int nvkm_i2c_bus_ctor(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_bus *);
+int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
+		      int id, struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
+void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+
+int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
+
+int nv04_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, u8,
+		     struct nvkm_i2c_bus **);
+
+int nv4e_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int nv50_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+int gf119_i2c_bus_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_bus **);
+
+#define BUS_MSG(b,l,f,a...) do {                                               \
+	struct nvkm_i2c_bus *_bus = (b);                                       \
+	nvkm_##l(&_bus->pad->i2c->subdev, "bus %04x: "f"\n", _bus->id, ##a);   \
+} while(0)
+#define BUS_ERR(b,f,a...) BUS_MSG((b), error, f, ##a)
+#define BUS_DBG(b,f,a...) BUS_MSG((b), debug, f, ##a)
+#define BUS_TRACE(b,f,a...) BUS_MSG((b), trace, f, ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
new file mode 100644
index 0000000..96bbdda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busgf119.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define gf119_i2c_bus(p) container_of((p), struct gf119_i2c_bus, base)
+#include "bus.h"
+
+struct gf119_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+};
+
+static void
+gf119_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x00000001, state ? 0x00000001 : 0);
+}
+
+static void
+gf119_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x00000002, state ? 0x00000002 : 0);
+}
+
+static int
+gf119_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000010);
+}
+
+static int
+gf119_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000020);
+}
+
+static void
+gf119_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+	struct gf119_i2c_bus *bus = gf119_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_wr32(device, bus->addr, 0x00000007);
+}
+
+static const struct nvkm_i2c_bus_func
+gf119_i2c_bus_func = {
+	.init = gf119_i2c_bus_init,
+	.drive_scl = gf119_i2c_bus_drive_scl,
+	.drive_sda = gf119_i2c_bus_drive_sda,
+	.sense_scl = gf119_i2c_bus_sense_scl,
+	.sense_sda = gf119_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+gf119_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct gf119_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&gf119_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = 0x00d014 + (drive * 0x20);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
new file mode 100644
index 0000000..a58db15
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv04.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv04_i2c_bus(p) container_of((p), struct nv04_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv04_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u8 drive;
+	u8 sense;
+};
+
+static void
+nv04_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	u8 val = nvkm_rdvgac(device, 0, bus->drive);
+	if (state) val |= 0x20;
+	else	   val &= 0xdf;
+	nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	u8 val = nvkm_rdvgac(device, 0, bus->drive);
+	if (state) val |= 0x10;
+	else	   val &= 0xef;
+	nvkm_wrvgac(device, 0, bus->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x04);
+}
+
+static int
+nv04_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv04_i2c_bus *bus = nv04_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rdvgac(device, 0, bus->sense) & 0x08);
+}
+
+static const struct nvkm_i2c_bus_func
+nv04_i2c_bus_func = {
+	.drive_scl = nv04_i2c_bus_drive_scl,
+	.drive_sda = nv04_i2c_bus_drive_sda,
+	.sense_scl = nv04_i2c_bus_sense_scl,
+	.sense_sda = nv04_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv04_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive, u8 sense,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct nv04_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv04_i2c_bus_func, pad, id, &bus->base);
+	bus->drive = drive;
+	bus->sense = sense;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
new file mode 100644
index 0000000..cdd73dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv4e.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv4e_i2c_bus(p) container_of((p), struct nv4e_i2c_bus, base)
+#include "bus.h"
+
+struct nv4e_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+};
+
+static void
+nv4e_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_mask(device, bus->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv4e_i2c_bus *bus = nv4e_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00080000);
+}
+
+static const struct nvkm_i2c_bus_func
+nv4e_i2c_bus_func = {
+	.drive_scl = nv4e_i2c_bus_drive_scl,
+	.drive_sda = nv4e_i2c_bus_drive_sda,
+	.sense_scl = nv4e_i2c_bus_sense_scl,
+	.sense_sda = nv4e_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv4e_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	struct nv4e_i2c_bus *bus;
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv4e_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = 0x600800 + drive;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
new file mode 100644
index 0000000..8db8399
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/busnv50.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial busions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#define nv50_i2c_bus(p) container_of((p), struct nv50_i2c_bus, base)
+#include "bus.h"
+
+#include <subdev/vga.h>
+
+struct nv50_i2c_bus {
+	struct nvkm_i2c_bus base;
+	u32 addr;
+	u32 data;
+};
+
+static void
+nv50_i2c_bus_drive_scl(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	if (state) bus->data |= 0x01;
+	else	   bus->data &= 0xfe;
+	nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static void
+nv50_i2c_bus_drive_sda(struct nvkm_i2c_bus *base, int state)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	if (state) bus->data |= 0x02;
+	else	   bus->data &= 0xfd;
+	nvkm_wr32(device, bus->addr, bus->data);
+}
+
+static int
+nv50_i2c_bus_sense_scl(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000001);
+}
+
+static int
+nv50_i2c_bus_sense_sda(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	return !!(nvkm_rd32(device, bus->addr) & 0x00000002);
+}
+
+static void
+nv50_i2c_bus_init(struct nvkm_i2c_bus *base)
+{
+	struct nv50_i2c_bus *bus = nv50_i2c_bus(base);
+	struct nvkm_device *device = bus->base.pad->i2c->subdev.device;
+	nvkm_wr32(device, bus->addr, (bus->data = 0x00000007));
+}
+
+static const struct nvkm_i2c_bus_func
+nv50_i2c_bus_func = {
+	.init = nv50_i2c_bus_init,
+	.drive_scl = nv50_i2c_bus_drive_scl,
+	.drive_sda = nv50_i2c_bus_drive_sda,
+	.sense_scl = nv50_i2c_bus_sense_scl,
+	.sense_sda = nv50_i2c_bus_sense_sda,
+	.xfer = nvkm_i2c_bit_xfer,
+};
+
+int
+nv50_i2c_bus_new(struct nvkm_i2c_pad *pad, int id, u8 drive,
+		 struct nvkm_i2c_bus **pbus)
+{
+	static const u32 addr[] = {
+		0x00e138, 0x00e150, 0x00e168, 0x00e180,
+		0x00e254, 0x00e274, 0x00e764, 0x00e780,
+		0x00e79c, 0x00e7b8
+	};
+	struct nv50_i2c_bus *bus;
+
+	if (drive >= ARRAY_SIZE(addr)) {
+		nvkm_warn(&pad->i2c->subdev, "bus %d unknown\n", drive);
+		return -ENODEV;
+	}
+
+	if (!(bus = kzalloc(sizeof(*bus), GFP_KERNEL)))
+		return -ENOMEM;
+	*pbus = &bus->base;
+
+	nvkm_i2c_bus_ctor(&nv50_i2c_bus_func, pad, id, &bus->base);
+	bus->addr = addr[drive];
+	bus->data = 0x00000007;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
index 2a2dd47..bb2a31d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c
@@ -21,26 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
 void
 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
-	u32 intr = nv_rd32(i2c, 0x00e06c);
-	u32 stat = nv_rd32(i2c, 0x00e068) & intr, i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00e06c);
+	u32 stat = nvkm_rd32(device, 0x00e068) & intr, i;
 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
 		if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
 		if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
 		if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
 	}
-	nv_wr32(i2c, 0x00e06c, intr);
+	nvkm_wr32(device, 0x00e06c, intr);
 }
 
 void
 g94_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 {
-	u32 temp = nv_rd32(i2c, 0x00e068), i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 temp = nvkm_rd32(device, 0x00e068), i;
 	for (i = 0; i < 8; i++) {
 		if (mask & (1 << i)) {
 			if (!(data & (1 << i))) {
@@ -50,230 +53,20 @@
 			temp |= type << (i * 4);
 		}
 	}
-	nv_wr32(i2c, 0x00e068, temp);
-}
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
-	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(aux, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-int
-g94_aux(struct nvkm_i2c_port *base, bool retry,
-	 u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *aux = nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ch = port->addr;
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(aux, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
-
-	/* (maybe) retry transaction a number of times on failure... */
-	for (retries = 0; !ret && retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				ret = -EIO;
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-		ret = 1;
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
-		if ((stat & 0x000f0000) == 0x00080000 ||
-		    (stat & 0x000f0000) == 0x00020000)
-			ret = retry ? 0 : 1;
-		if ((stat & 0x00000100))
-			ret = -ETIMEDOUT;
-		if ((stat & 0x00000e00))
-			ret = -EIO;
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(aux, ch);
-	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+	nvkm_wr32(device, 0x00e068, temp);
 }
 
 static const struct nvkm_i2c_func
-g94_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = nv50_i2c_sense_scl,
-	.sense_sda = nv50_i2c_sense_sda,
-};
-
-static int
-g94_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &g94_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	if (info->drive >= nv50_i2c_addr_nr)
-		return -EINVAL;
-
-	port->state = 7;
-	port->addr = nv50_i2c_addr[info->drive];
-	return 0;
-}
-
-static const struct nvkm_i2c_func
-g94_aux_func = {
-	.aux       = g94_aux,
-};
-
-int
-g94_aux_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 index,
-		  struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &g94_aux_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->base.aux = info->auxch;
-	port->addr = info->auxch;
-	return 0;
-}
-
-static struct nvkm_oclass
-g94_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-g94_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = g94_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
+g94_i2c = {
+	.pad_x_new = g94_i2c_pad_x_new,
+	.pad_s_new = g94_i2c_pad_s_new,
 	.aux = 4,
 	.aux_stat = g94_aux_stat,
 	.aux_mask = g94_aux_mask,
-}.base;
+};
+
+int
+g94_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&g94_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
deleted file mode 100644
index 4d4ac66..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf110.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv50.h"
-
-static int
-gf110_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000010);
-}
-
-static int
-gf110_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000020);
-}
-
-static const struct nvkm_i2c_func
-gf110_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = gf110_i2c_sense_scl,
-	.sense_sda = gf110_i2c_sense_sda,
-};
-
-int
-gf110_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &gf110_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->state = 0x00000007;
-	port->addr = 0x00d014 + (info->drive * 0x20);
-	return 0;
-}
-
-struct nvkm_oclass
-gf110_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gf110_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = g94_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-gf110_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
-	.aux = 4,
-	.aux_stat = g94_aux_stat,
-	.aux_mask = g94_aux_mask,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
index e290b40..ae4aad3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf117.c
@@ -21,18 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
-struct nvkm_oclass *
-gf117_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xd7),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &nv04_i2c_pad_oclass,
-}.base;
+static const struct nvkm_i2c_func
+gf117_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+};
+
+int
+gf117_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gf117_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
new file mode 100644
index 0000000..6f2b02a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gf119.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "pad.h"
+
+static const struct nvkm_i2c_func
+gf119_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gf119_i2c_pad_s_new,
+	.aux = 4,
+	.aux_stat = g94_aux_stat,
+	.aux_mask = g94_aux_mask,
+};
+
+int
+gf119_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gf119_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
index 1a46490..f9f6bf4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c
@@ -21,26 +21,29 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
+#include "priv.h"
+#include "pad.h"
 
 void
 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
 {
-	u32 intr = nv_rd32(i2c, 0x00dc60);
-	u32 stat = nv_rd32(i2c, 0x00dc68) & intr, i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 intr = nvkm_rd32(device, 0x00dc60);
+	u32 stat = nvkm_rd32(device, 0x00dc68) & intr, i;
 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
 		if ((stat & (1 << (i * 4)))) *hi |= 1 << i;
 		if ((stat & (2 << (i * 4)))) *lo |= 1 << i;
 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
 		if ((stat & (8 << (i * 4)))) *tx |= 1 << i;
 	}
-	nv_wr32(i2c, 0x00dc60, intr);
+	nvkm_wr32(device, 0x00dc60, intr);
 }
 
 void
 gk104_aux_mask(struct nvkm_i2c *i2c, u32 type, u32 mask, u32 data)
 {
-	u32 temp = nv_rd32(i2c, 0x00dc68), i;
+	struct nvkm_device *device = i2c->subdev.device;
+	u32 temp = nvkm_rd32(device, 0x00dc68), i;
 	for (i = 0; i < 8; i++) {
 		if (mask & (1 << i)) {
 			if (!(data & (1 << i))) {
@@ -50,22 +53,20 @@
 			temp |= type << (i * 4);
 		}
 	}
-	nv_wr32(i2c, 0x00dc68, temp);
+	nvkm_wr32(device, 0x00dc68, temp);
 }
 
-struct nvkm_oclass *
-gk104_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0xe0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gf110_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &g94_i2c_pad_oclass,
+static const struct nvkm_i2c_func
+gk104_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gf119_i2c_pad_s_new,
 	.aux = 4,
 	.aux_stat = gk104_aux_stat,
 	.aux_mask = gk104_aux_mask,
-}.base;
+};
+
+int
+gk104_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gk104_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
index ab64237..ff9f7d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm204.c
@@ -21,199 +21,20 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
-
-#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
-#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
-
-static void
-auxch_fini(struct nvkm_i2c *aux, int ch)
-{
-	nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
-}
-
-static int
-auxch_init(struct nvkm_i2c *aux, int ch)
-{
-	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
-	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
-	const u32 urep = unksel ? 0x01000000 : 0x02000000;
-	u32 ctrl, timeout;
-
-	/* wait up to 1ms for any previous transaction to be done... */
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
-			return -EBUSY;
-		}
-	} while (ctrl & 0x03010000);
-
-	/* set some magic, and wait up to 1ms for it to appear */
-	nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
-	timeout = 1000;
-	do {
-		ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-		udelay(1);
-		if (!timeout--) {
-			AUX_ERR("magic wait 0x%08x\n", ctrl);
-			auxch_fini(aux, ch);
-			return -EBUSY;
-		}
-	} while ((ctrl & 0x03000000) != urep);
-
-	return 0;
-}
-
-int
-gm204_aux(struct nvkm_i2c_port *base, bool retry,
-	 u8 type, u32 addr, u8 *data, u8 size)
-{
-	struct nvkm_i2c *aux = nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	u32 ctrl, stat, timeout, retries;
-	u32 xbuf[4] = {};
-	int ch = port->addr;
-	int ret, i;
-
-	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
-
-	ret = auxch_init(aux, ch);
-	if (ret)
-		goto out;
-
-	stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
-	if (!(stat & 0x10000000)) {
-		AUX_DBG("sink not detected\n");
-		ret = -ENXIO;
-		goto out;
-	}
-
-	if (!(type & 1)) {
-		memcpy(xbuf, data, size);
-		for (i = 0; i < 16; i += 4) {
-			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
-			nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
-		}
-	}
-
-	ctrl  = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-	ctrl &= ~0x0001f0ff;
-	ctrl |= type << 12;
-	ctrl |= size - 1;
-	nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
-
-	/* (maybe) retry transaction a number of times on failure... */
-	for (retries = 0; !ret && retries < 32; retries++) {
-		/* reset, and delay a while if this is a retry */
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
-		if (retries)
-			udelay(400);
-
-		/* transaction request, wait up to 1ms for it to complete */
-		nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
-
-		timeout = 1000;
-		do {
-			ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
-			udelay(1);
-			if (!timeout--) {
-				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
-				ret = -EIO;
-				goto out;
-			}
-		} while (ctrl & 0x00010000);
-		ret = 1;
-
-		/* read status, and check if transaction completed ok */
-		stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
-		if ((stat & 0x000f0000) == 0x00080000 ||
-		    (stat & 0x000f0000) == 0x00020000)
-			ret = retry ? 0 : 1;
-		if ((stat & 0x00000100))
-			ret = -ETIMEDOUT;
-		if ((stat & 0x00000e00))
-			ret = -EIO;
-
-		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
-	}
-
-	if (type & 1) {
-		for (i = 0; i < 16; i += 4) {
-			xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
-			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
-		}
-		memcpy(data, xbuf, size);
-	}
-
-out:
-	auxch_fini(aux, ch);
-	return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
-}
+#include "priv.h"
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-gm204_aux_func = {
-	.aux       = gm204_aux,
-};
-
-int
-gm204_aux_port_ctor(struct nvkm_object *parent,
-		    struct nvkm_object *engine,
-		    struct nvkm_oclass *oclass, void *data, u32 index,
-		    struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_aux_algo, &gm204_aux_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->base.aux = info->auxch;
-	port->addr = info->auxch;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gf110_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = gm204_aux_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-gm204_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x24),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = gm204_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-	.pad_s = &gm204_i2c_pad_oclass,
+gm204_i2c = {
+	.pad_x_new = gf119_i2c_pad_x_new,
+	.pad_s_new = gm204_i2c_pad_s_new,
 	.aux = 8,
 	.aux_stat = gk104_aux_stat,
 	.aux_mask = gk104_aux_mask,
-}.base;
+};
+
+int
+gm204_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+	return nvkm_i2c_new_(&gm204_i2c, device, index, pi2c);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
index 4cdf1c4..18776f4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv04.c
@@ -22,107 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv04_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv04_i2c_port {
-	struct nvkm_i2c_port base;
-	u8 drive;
-	u8 sense;
-};
-
-static void
-nv04_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	u8 val = nv_rdvgac(priv, 0, port->drive);
-	if (state) val |= 0x20;
-	else	   val &= 0xdf;
-	nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static void
-nv04_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	u8 val = nv_rdvgac(priv, 0, port->drive);
-	if (state) val |= 0x10;
-	else	   val &= 0xef;
-	nv_wrvgac(priv, 0, port->drive, val | 0x01);
-}
-
-static int
-nv04_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
-}
-
-static int
-nv04_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv04_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv04_i2c_port *port = (void *)base;
-	return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
-}
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv04_i2c_func = {
-	.drive_scl = nv04_i2c_drive_scl,
-	.drive_sda = nv04_i2c_drive_sda,
-	.sense_scl = nv04_i2c_sense_scl,
-	.sense_sda = nv04_i2c_sense_sda,
+nv04_i2c = {
+	.pad_x_new = nv04_i2c_pad_new,
 };
 
-static int
-nv04_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+int
+nv04_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv04_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv04_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->drive = info->drive;
-	port->sense = info->sense;
-	return 0;
+	return nvkm_i2c_new_(&nv04_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv04_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv04_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv04_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv04_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
index 046fe5e..6b762f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv4e.c
@@ -22,99 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
-
-#include <subdev/vga.h>
-
-struct nv4e_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv4e_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-};
-
-static void
-nv4e_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
-}
-
-static void
-nv4e_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
-}
-
-static int
-nv4e_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00040000);
-}
-
-static int
-nv4e_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv4e_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv4e_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00080000);
-}
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv4e_i2c_func = {
-	.drive_scl = nv4e_i2c_drive_scl,
-	.drive_sda = nv4e_i2c_drive_sda,
-	.sense_scl = nv4e_i2c_sense_scl,
-	.sense_sda = nv4e_i2c_sense_sda,
+nv4e_i2c = {
+	.pad_x_new = nv4e_i2c_pad_new,
 };
 
-static int
-nv4e_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+int
+nv4e_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct dcb_i2c_entry *info = data;
-	struct nv4e_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv4e_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	port->addr = 0x600800 + info->drive;
-	return 0;
+	return nvkm_i2c_new_(&nv4e_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv4e_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv4e_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = _nvkm_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv4e_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x4e),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv4e_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
index fba5b26..75640ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.c
@@ -21,113 +21,16 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv50.h"
-
-void
-nv50_i2c_drive_scl(struct nvkm_i2c_port *base, int state)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	if (state) port->state |= 0x01;
-	else	   port->state &= 0xfe;
-	nv_wr32(priv, port->addr, port->state);
-}
-
-void
-nv50_i2c_drive_sda(struct nvkm_i2c_port *base, int state)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	if (state) port->state |= 0x02;
-	else	   port->state &= 0xfd;
-	nv_wr32(priv, port->addr, port->state);
-}
-
-int
-nv50_i2c_sense_scl(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000001);
-}
-
-int
-nv50_i2c_sense_sda(struct nvkm_i2c_port *base)
-{
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(base);
-	struct nv50_i2c_port *port = (void *)base;
-	return !!(nv_rd32(priv, port->addr) & 0x00000002);
-}
+#include "priv.h"
+#include "pad.h"
 
 static const struct nvkm_i2c_func
-nv50_i2c_func = {
-	.drive_scl = nv50_i2c_drive_scl,
-	.drive_sda = nv50_i2c_drive_sda,
-	.sense_scl = nv50_i2c_sense_scl,
-	.sense_sda = nv50_i2c_sense_sda,
+nv50_i2c = {
+	.pad_x_new = nv50_i2c_pad_new,
 };
 
-const u32 nv50_i2c_addr[] = {
-	0x00e138, 0x00e150, 0x00e168, 0x00e180,
-	0x00e254, 0x00e274, 0x00e764, 0x00e780,
-	0x00e79c, 0x00e7b8
-};
-const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
-
-static int
-nv50_i2c_port_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
-{
-	struct dcb_i2c_entry *info = data;
-	struct nv50_i2c_port *port;
-	int ret;
-
-	ret = nvkm_i2c_port_create(parent, engine, oclass, index,
-				   &nvkm_i2c_bit_algo, &nv50_i2c_func, &port);
-	*pobject = nv_object(port);
-	if (ret)
-		return ret;
-
-	if (info->drive >= nv50_i2c_addr_nr)
-		return -EINVAL;
-
-	port->state = 0x00000007;
-	port->addr = nv50_i2c_addr[info->drive];
-	return 0;
-}
-
 int
-nv50_i2c_port_init(struct nvkm_object *object)
+nv50_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
 {
-	struct nv50_i2c_priv *priv = (void *)nvkm_i2c(object);
-	struct nv50_i2c_port *port = (void *)object;
-	nv_wr32(priv, port->addr, port->state);
-	return nvkm_i2c_port_init(&port->base);
+	return nvkm_i2c_new_(&nv50_i2c, device, index, pi2c);
 }
-
-static struct nvkm_oclass
-nv50_i2c_sclass[] = {
-	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
-	  .ofuncs = &(struct nvkm_ofuncs) {
-		  .ctor = nv50_i2c_port_ctor,
-		  .dtor = _nvkm_i2c_port_dtor,
-		  .init = nv50_i2c_port_init,
-		  .fini = _nvkm_i2c_port_fini,
-	  },
-	},
-	{}
-};
-
-struct nvkm_oclass *
-nv50_i2c_oclass = &(struct nvkm_i2c_impl) {
-	.base.handle = NV_SUBDEV(I2C, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_ctor,
-		.dtor = _nvkm_i2c_dtor,
-		.init = _nvkm_i2c_init,
-		.fini = _nvkm_i2c_fini,
-	},
-	.sclass = nv50_i2c_sclass,
-	.pad_x = &nv04_i2c_pad_oclass,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
deleted file mode 100644
index b3139e7..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/nv50.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __NV50_I2C_H__
-#define __NV50_I2C_H__
-#include "priv.h"
-
-struct nv50_i2c_priv {
-	struct nvkm_i2c base;
-};
-
-struct nv50_i2c_port {
-	struct nvkm_i2c_port base;
-	u32 addr;
-	u32 state;
-};
-
-extern const u32 nv50_i2c_addr[];
-extern const int nv50_i2c_addr_nr;
-int  nv50_i2c_port_init(struct nvkm_object *);
-int  nv50_i2c_sense_scl(struct nvkm_i2c_port *);
-int  nv50_i2c_sense_sda(struct nvkm_i2c_port *);
-void nv50_i2c_drive_scl(struct nvkm_i2c_port *, int state);
-void nv50_i2c_drive_sda(struct nvkm_i2c_port *, int state);
-
-int  g94_aux_port_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-void g94_i2c_acquire(struct nvkm_i2c_port *);
-void g94_i2c_release(struct nvkm_i2c_port *);
-
-int  gf110_i2c_port_ctor(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, void *, u32,
-			struct nvkm_object **);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
index a242eeb..2c5fcb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014 Red Hat Inc.
+ * Copyright 2015 Red Hat Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -19,65 +19,98 @@
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
  */
 #include "pad.h"
 
-int
-_nvkm_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+static void
+nvkm_i2c_pad_mode_locked(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c_pad *pad = (void *)object;
-	DBG("-> NULL\n");
-	pad->port = NULL;
-	return nvkm_object_fini(&pad->base, suspend);
+	PAD_TRACE(pad, "-> %s", (mode == NVKM_I2C_PAD_AUX) ? "aux" :
+			      (mode == NVKM_I2C_PAD_I2C) ? "i2c" : "off");
+	if (pad->func->mode)
+		pad->func->mode(pad, mode);
+}
+
+void
+nvkm_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
+{
+	PAD_TRACE(pad, "mode %d", mode);
+	mutex_lock(&pad->mutex);
+	nvkm_i2c_pad_mode_locked(pad, mode);
+	pad->mode = mode;
+	mutex_unlock(&pad->mutex);
+}
+
+void
+nvkm_i2c_pad_release(struct nvkm_i2c_pad *pad)
+{
+	PAD_TRACE(pad, "release");
+	if (pad->mode == NVKM_I2C_PAD_OFF)
+		nvkm_i2c_pad_mode_locked(pad, pad->mode);
+	mutex_unlock(&pad->mutex);
 }
 
 int
-_nvkm_i2c_pad_init(struct nvkm_object *object)
+nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c_pad *pad = (void *)object;
-	DBG("-> PORT:%02x\n", pad->next->index);
-	pad->port = pad->next;
-	return nvkm_object_init(&pad->base);
-}
-
-int
-nvkm_i2c_pad_create_(struct nvkm_object *parent,
-		     struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int index,
-		     int size, void **pobject)
-{
-	struct nvkm_i2c *i2c = nvkm_i2c(parent);
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_pad *pad;
-	int ret;
-
-	list_for_each_entry(port, &i2c->ports, head) {
-		pad = nvkm_i2c_pad(port);
-		if (pad->index == index) {
-			atomic_inc(&nv_object(pad)->refcount);
-			*pobject = pad;
-			return 1;
+	PAD_TRACE(pad, "acquire");
+	mutex_lock(&pad->mutex);
+	if (pad->mode != mode) {
+		if (pad->mode != NVKM_I2C_PAD_OFF) {
+			mutex_unlock(&pad->mutex);
+			return -EBUSY;
 		}
+		nvkm_i2c_pad_mode_locked(pad, mode);
 	}
-
-	ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
-	pad = *pobject;
-	if (ret)
-		return ret;
-
-	pad->index = index;
 	return 0;
 }
 
-int
-_nvkm_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
+void
+nvkm_i2c_pad_fini(struct nvkm_i2c_pad *pad)
 {
-	struct nvkm_i2c_pad *pad;
-	int ret;
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	return ret;
+	PAD_TRACE(pad, "fini");
+	nvkm_i2c_pad_mode_locked(pad, NVKM_I2C_PAD_OFF);
+}
+
+void
+nvkm_i2c_pad_init(struct nvkm_i2c_pad *pad)
+{
+	PAD_TRACE(pad, "init");
+	nvkm_i2c_pad_mode_locked(pad, pad->mode);
+}
+
+void
+nvkm_i2c_pad_del(struct nvkm_i2c_pad **ppad)
+{
+	struct nvkm_i2c_pad *pad = *ppad;
+	if (pad) {
+		PAD_TRACE(pad, "dtor");
+		list_del(&pad->head);
+		kfree(pad);
+		pad = NULL;
+	}
+}
+
+void
+nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+		  int id, struct nvkm_i2c_pad *pad)
+{
+	pad->func = func;
+	pad->i2c = i2c;
+	pad->id = id;
+	pad->mode = NVKM_I2C_PAD_OFF;
+	mutex_init(&pad->mutex);
+	list_add_tail(&pad->head, &i2c->pad);
+	PAD_TRACE(pad, "ctor");
+}
+
+int
+nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *func, struct nvkm_i2c *i2c,
+		  int id, struct nvkm_i2c_pad **ppad)
+{
+	if (!(*ppad = kzalloc(sizeof(**ppad), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_i2c_pad_ctor(func, i2c, id, *ppad);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index f3422cc..9eeb992 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,56 +1,67 @@
 #ifndef __NVKM_I2C_PAD_H__
 #define __NVKM_I2C_PAD_H__
-#include "priv.h"
+#include <subdev/i2c.h>
 
 struct nvkm_i2c_pad {
-	struct nvkm_object base;
-	int index;
-	struct nvkm_i2c_port *port;
-	struct nvkm_i2c_port *next;
+	const struct nvkm_i2c_pad_func *func;
+	struct nvkm_i2c *i2c;
+#define NVKM_I2C_PAD_HYBRID(n) /* 'n' is hw pad index */                     (n)
+#define NVKM_I2C_PAD_CCB(n) /* 'n' is ccb index */                 ((n) + 0x100)
+#define NVKM_I2C_PAD_EXT(n) /* 'n' is dcb external encoder type */ ((n) + 0x200)
+	int id;
+
+	enum nvkm_i2c_pad_mode {
+		NVKM_I2C_PAD_OFF,
+		NVKM_I2C_PAD_I2C,
+		NVKM_I2C_PAD_AUX,
+	} mode;
+	struct mutex mutex;
+	struct list_head head;
 };
 
-static inline struct nvkm_i2c_pad *
-nvkm_i2c_pad(struct nvkm_i2c_port *port)
-{
-	struct nvkm_object *pad = nv_object(port);
-	while (!nv_iclass(pad->parent, NV_SUBDEV_CLASS))
-		pad = pad->parent;
-	return (void *)pad;
-}
+struct nvkm_i2c_pad_func {
+	int (*bus_new_0)(struct nvkm_i2c_pad *, int id, u8 drive, u8 sense,
+			 struct nvkm_i2c_bus **);
+	int (*bus_new_4)(struct nvkm_i2c_pad *, int id, u8 drive,
+			 struct nvkm_i2c_bus **);
 
-#define nvkm_i2c_pad_create(p,e,o,i,d)                                         \
-	nvkm_i2c_pad_create_((p), (e), (o), (i), sizeof(**d), (void **)d)
-#define nvkm_i2c_pad_destroy(p) ({                                             \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_dtor(nv_object(_p));                                     \
-})
-#define nvkm_i2c_pad_init(p) ({                                                \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_init(nv_object(_p));                                     \
-})
-#define nvkm_i2c_pad_fini(p,s) ({                                              \
-	struct nvkm_i2c_pad *_p = (p);                                         \
-	_nvkm_i2c_pad_fini(nv_object(_p), (s));                                \
-})
+	int (*aux_new_6)(struct nvkm_i2c_pad *, int id, u8 drive,
+			 struct nvkm_i2c_aux **);
 
-int nvkm_i2c_pad_create_(struct nvkm_object *, struct nvkm_object *,
-			 struct nvkm_oclass *, int index, int, void **);
+	void (*mode)(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+};
 
-int _nvkm_i2c_pad_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-#define _nvkm_i2c_pad_dtor nvkm_object_destroy
-int _nvkm_i2c_pad_init(struct nvkm_object *);
-int _nvkm_i2c_pad_fini(struct nvkm_object *, bool);
+void nvkm_i2c_pad_ctor(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+		       int id, struct nvkm_i2c_pad *);
+int nvkm_i2c_pad_new_(const struct nvkm_i2c_pad_func *, struct nvkm_i2c *,
+		      int id, struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_del(struct nvkm_i2c_pad **);
+void nvkm_i2c_pad_init(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_fini(struct nvkm_i2c_pad *);
+void nvkm_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+int nvkm_i2c_pad_acquire(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+void nvkm_i2c_pad_release(struct nvkm_i2c_pad *);
 
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_i2c_pad *_pad = (void *)pad;                               \
-	nv_##l(_pad, "PAD:%c:%02x: "f,                                         \
-	       _pad->index >= 0x100 ? 'X' : 'S',                               \
-	       _pad->index >= 0x100 ? _pad->index - 0x100 : _pad->index, ##a); \
+void g94_i2c_pad_mode(struct nvkm_i2c_pad *, enum nvkm_i2c_pad_mode);
+
+int nv04_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv4e_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int nv50_i2c_pad_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int g94_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_x_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int g94_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gf119_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+int gm204_i2c_pad_s_new(struct nvkm_i2c *, int, struct nvkm_i2c_pad **);
+
+int anx9805_pad_new(struct nvkm_i2c_bus *, int, u8, struct nvkm_i2c_pad **);
+
+#define PAD_MSG(p,l,f,a...) do {                                               \
+	struct nvkm_i2c_pad *_pad = (p);                                       \
+	nvkm_##l(&_pad->i2c->subdev, "pad %04x: "f"\n", _pad->id, ##a);        \
 } while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
+#define PAD_ERR(p,f,a...) PAD_MSG((p), error, f, ##a)
+#define PAD_DBG(p,f,a...) PAD_MSG((p), debug, f, ##a)
+#define PAD_TRACE(p,f,a...) PAD_MSG((p), trace, f, ##a)
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
index e9832f7..5904bc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padg94.c
@@ -22,64 +22,55 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "aux.h"
+#include "bus.h"
 
-struct g94_i2c_pad {
-	struct nvkm_i2c_pad base;
-	int addr;
-};
-
-static int
-g94_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+void
+g94_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct g94_i2c_pad *pad = (void *)object;
-	nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000001);
-	return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
+	struct nvkm_subdev *subdev = &pad->i2c->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
 
-static int
-g94_i2c_pad_init(struct nvkm_object *object)
-{
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct g94_i2c_pad *pad = (void *)object;
-
-	switch (nv_oclass(pad->base.next)->handle) {
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
-		nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x00000002);
+	switch (mode) {
+	case NVKM_I2C_PAD_OFF:
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000001);
 		break;
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+	case NVKM_I2C_PAD_I2C:
+		nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x0000c001);
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
+		break;
+	case NVKM_I2C_PAD_AUX:
+		nvkm_mask(device, 0x00e500 + base, 0x0000c003, 0x00000002);
+		nvkm_mask(device, 0x00e50c + base, 0x00000001, 0x00000000);
+		break;
 	default:
-		nv_mask(i2c, 0x00e500 + pad->addr, 0x0000c003, 0x0000c001);
+		WARN_ON(1);
 		break;
 	}
-
-	nv_mask(i2c, 0x00e50c + pad->addr, 0x00000001, 0x00000000);
-	return nvkm_i2c_pad_init(&pad->base);
 }
 
-static int
-g94_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 index,
-		 struct nvkm_object **pobject)
-{
-	struct g94_i2c_pad *pad;
-	int ret;
-
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	if (ret)
-		return ret;
-
-	pad->addr = index * 0x50;;
-	return 0;
-}
-
-struct nvkm_oclass
-g94_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g94_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = g94_i2c_pad_init,
-		.fini = g94_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_s_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+	.mode = g94_i2c_pad_mode,
 };
+
+int
+g94_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&g94_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+g94_i2c_pad_x_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+};
+
+int
+g94_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&g94_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
new file mode 100644
index 0000000..d53212f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "aux.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_s_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+	.mode = g94_i2c_pad_mode,
+};
+
+int
+gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gf119_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+gf119_i2c_pad_x_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = g94_i2c_aux_new,
+};
+
+int
+gf119_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gf119_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
index be59040..24a4d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgm204.c
@@ -22,64 +22,55 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "aux.h"
+#include "bus.h"
 
-struct gm204_i2c_pad {
-	struct nvkm_i2c_pad base;
-	int addr;
-};
-
-static int
-gm204_i2c_pad_fini(struct nvkm_object *object, bool suspend)
+static void
+gm204_i2c_pad_mode(struct nvkm_i2c_pad *pad, enum nvkm_i2c_pad_mode mode)
 {
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct gm204_i2c_pad *pad = (void *)object;
-	nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
-	return nvkm_i2c_pad_fini(&pad->base, suspend);
-}
+	struct nvkm_subdev *subdev = &pad->i2c->subdev;
+	struct nvkm_device *device = subdev->device;
+	const u32 base = (pad->id - NVKM_I2C_PAD_HYBRID(0)) * 0x50;
 
-static int
-gm204_i2c_pad_init(struct nvkm_object *object)
-{
-	struct nvkm_i2c *i2c = (void *)nvkm_i2c(object);
-	struct gm204_i2c_pad *pad = (void *)object;
-
-	switch (nv_oclass(pad->base.next)->handle) {
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
-		nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
+	switch (mode) {
+	case NVKM_I2C_PAD_OFF:
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000001);
 		break;
-	case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
+	case NVKM_I2C_PAD_I2C:
+		nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x0000c001);
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
+		break;
+	case NVKM_I2C_PAD_AUX:
+		nvkm_mask(device, 0x00d970 + base, 0x0000c003, 0x00000002);
+		nvkm_mask(device, 0x00d97c + base, 0x00000001, 0x00000000);
+		break;
 	default:
-		nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
+		WARN_ON(1);
 		break;
 	}
-
-	nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
-	return nvkm_i2c_pad_init(&pad->base);
 }
 
-static int
-gm204_i2c_pad_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 index,
-		   struct nvkm_object **pobject)
-{
-	struct gm204_i2c_pad *pad;
-	int ret;
-
-	ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
-	*pobject = nv_object(pad);
-	if (ret)
-		return ret;
-
-	pad->addr = index * 0x50;;
-	return 0;
-}
-
-struct nvkm_oclass
-gm204_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm204_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = gm204_i2c_pad_init,
-		.fini = gm204_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_s_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = gm204_i2c_aux_new,
+	.mode = gm204_i2c_pad_mode,
 };
+
+int
+gm204_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gm204_i2c_pad_s_func, i2c, id, ppad);
+}
+
+static const struct nvkm_i2c_pad_func
+gm204_i2c_pad_x_func = {
+	.bus_new_4 = gf119_i2c_bus_new,
+	.aux_new_6 = gm204_i2c_aux_new,
+};
+
+int
+gm204_i2c_pad_x_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&gm204_i2c_pad_x_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
index 22c7daa..310046a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv04.c
@@ -22,13 +22,15 @@
  * Authors: Ben Skeggs
  */
 #include "pad.h"
+#include "bus.h"
 
-struct nvkm_oclass
-nv04_i2c_pad_oclass = {
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_i2c_pad_ctor,
-		.dtor = _nvkm_i2c_pad_dtor,
-		.init = _nvkm_i2c_pad_init,
-		.fini = _nvkm_i2c_pad_fini,
-	},
+static const struct nvkm_i2c_pad_func
+nv04_i2c_pad_func = {
+	.bus_new_0 = nv04_i2c_bus_new,
 };
+
+int
+nv04_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv04_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
new file mode 100644
index 0000000..dda6fc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv4e.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv4e_i2c_pad_func = {
+	.bus_new_4 = nv4e_i2c_bus_new,
+};
+
+int
+nv4e_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv4e_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
new file mode 100644
index 0000000..a03f25b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padnv50.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "pad.h"
+#include "bus.h"
+
+static const struct nvkm_i2c_pad_func
+nv50_i2c_pad_func = {
+	.bus_new_4 = nv50_i2c_bus_new,
+};
+
+int
+nv50_i2c_pad_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
+{
+	return nvkm_i2c_pad_new_(&nv50_i2c_pad_func, i2c, id, ppad);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
deleted file mode 100644
index 586f53d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/port.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __NVKM_I2C_PORT_H__
-#define __NVKM_I2C_PORT_H__
-#include "priv.h"
-
-#ifndef MSG
-#define MSG(l,f,a...) do {                                                     \
-	struct nvkm_i2c_port *_port = (void *)port;                         \
-	nv_##l(_port, "PORT:%02x: "f, _port->index, ##a);                      \
-} while(0)
-#define DBG(f,a...) MSG(debug, f, ##a)
-#define ERR(f,a...) MSG(error, f, ##a)
-#endif
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index 6586e15..bf655a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,69 +1,14 @@
 #ifndef __NVKM_I2C_PRIV_H__
 #define __NVKM_I2C_PRIV_H__
+#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
 #include <subdev/i2c.h>
 
-extern struct nvkm_oclass nv04_i2c_pad_oclass;
-extern struct nvkm_oclass g94_i2c_pad_oclass;
-extern struct nvkm_oclass gm204_i2c_pad_oclass;
+int nvkm_i2c_new_(const struct nvkm_i2c_func *, struct nvkm_device *,
+		  int index, struct nvkm_i2c **);
 
-#define nvkm_i2c_port_create(p,e,o,i,a,f,d)                                 \
-	nvkm_i2c_port_create_((p), (e), (o), (i), (a), (f),                 \
-				 sizeof(**d), (void **)d)
-#define nvkm_i2c_port_destroy(p) ({                                         \
-	struct nvkm_i2c_port *port = (p);                                   \
-	_nvkm_i2c_port_dtor(nv_object(i2c));                                \
-})
-#define nvkm_i2c_port_init(p)                                               \
-	nvkm_object_init(&(p)->base)
-#define nvkm_i2c_port_fini(p,s)                                             \
-	nvkm_object_fini(&(p)->base, (s))
-
-int nvkm_i2c_port_create_(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, u8,
-			     const struct i2c_algorithm *,
-			     const struct nvkm_i2c_func *,
-			     int, void **);
-void _nvkm_i2c_port_dtor(struct nvkm_object *);
-#define _nvkm_i2c_port_init nvkm_object_init
-int  _nvkm_i2c_port_fini(struct nvkm_object *, bool);
-
-#define nvkm_i2c_create(p,e,o,d)                                            \
-	nvkm_i2c_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_i2c_destroy(p) ({                                              \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_dtor(nv_object(i2c));                                     \
-})
-#define nvkm_i2c_init(p) ({                                                 \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_init(nv_object(i2c));                                     \
-})
-#define nvkm_i2c_fini(p,s) ({                                               \
-	struct nvkm_i2c *i2c = (p);                                         \
-	_nvkm_i2c_fini(nv_object(i2c), (s));                                \
-})
-
-int nvkm_i2c_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-int  _nvkm_i2c_ctor(struct nvkm_object *, struct nvkm_object *,
-		       struct nvkm_oclass *, void *, u32,
-		       struct nvkm_object **);
-void _nvkm_i2c_dtor(struct nvkm_object *);
-int  _nvkm_i2c_init(struct nvkm_object *);
-int  _nvkm_i2c_fini(struct nvkm_object *, bool);
-
-extern struct nvkm_oclass nvkm_anx9805_sclass[];
-extern struct nvkm_oclass gf110_i2c_sclass[];
-
-extern const struct i2c_algorithm nvkm_i2c_bit_algo;
-extern const struct i2c_algorithm nvkm_i2c_aux_algo;
-
-struct nvkm_i2c_impl {
-	struct nvkm_oclass base;
-
-	/* supported i2c port classes */
-	struct nvkm_oclass *sclass;
-	struct nvkm_oclass *pad_x;
-	struct nvkm_oclass *pad_s;
+struct nvkm_i2c_func {
+	int (*pad_x_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
+	int (*pad_s_new)(struct nvkm_i2c *, int id, struct nvkm_i2c_pad **);
 
 	/* number of native dp aux channels present */
 	int aux;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 8e578f8..37a0496 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -23,55 +23,54 @@
  */
 #include <subdev/ibus.h>
 
-struct gf100_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gf100_ibus_intr_hub(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
-	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
+	nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr_rop(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
-	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
+	nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr_gpc(struct gf100_ibus_priv *priv, int i)
+gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
-	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
-	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
-	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
+	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
+	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
+	nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
-gf100_ibus_intr(struct nvkm_subdev *subdev)
+gf100_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gf100_ibus_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x121c58);
-	u32 intr1 = nv_rd32(priv, 0x121c5c);
-	u32 hubnr = nv_rd32(priv, 0x121c70);
-	u32 ropnr = nv_rd32(priv, 0x121c74);
-	u32 gpcnr = nv_rd32(priv, 0x121c78);
+	struct nvkm_device *device = ibus->device;
+	u32 intr0 = nvkm_rd32(device, 0x121c58);
+	u32 intr1 = nvkm_rd32(device, 0x121c5c);
+	u32 hubnr = nvkm_rd32(device, 0x121c70);
+	u32 ropnr = nvkm_rd32(device, 0x121c74);
+	u32 gpcnr = nvkm_rd32(device, 0x121c78);
 	u32 i;
 
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_hub(priv, i);
+			gf100_ibus_intr_hub(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -79,7 +78,7 @@
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gf100_ibus_intr_rop(priv, i);
+			gf100_ibus_intr_rop(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -87,36 +86,24 @@
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gf100_ibus_intr_gpc(priv, i);
+			gf100_ibus_intr_gpc(ibus, i);
 			intr1 &= ~stat;
 		}
 	}
 }
 
-static int
-gf100_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_subdev_func
+gf100_ibus = {
+	.intr = gf100_ibus_intr,
+};
+
+int
+gf100_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
 {
-	struct gf100_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gf100_ibus_intr;
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
 	return 0;
 }
-
-struct nvkm_oclass
-gf100_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = _nvkm_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 7b6e9a6..ba33609 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -23,55 +23,54 @@
  */
 #include <subdev/ibus.h>
 
-struct gk104_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gk104_ibus_intr_hub(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
-	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
+	nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr_rop(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
-	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
+	nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr_gpc(struct gk104_ibus_priv *priv, int i)
+gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
 {
-	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
-	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
-	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
-	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
-	nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+	struct nvkm_device *device = ibus->device;
+	u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
+	u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
+	u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
+	nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+	nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
-gk104_ibus_intr(struct nvkm_subdev *subdev)
+gk104_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gk104_ibus_priv *priv = (void *)subdev;
-	u32 intr0 = nv_rd32(priv, 0x120058);
-	u32 intr1 = nv_rd32(priv, 0x12005c);
-	u32 hubnr = nv_rd32(priv, 0x120070);
-	u32 ropnr = nv_rd32(priv, 0x120074);
-	u32 gpcnr = nv_rd32(priv, 0x120078);
+	struct nvkm_device *device = ibus->device;
+	u32 intr0 = nvkm_rd32(device, 0x120058);
+	u32 intr1 = nvkm_rd32(device, 0x12005c);
+	u32 hubnr = nvkm_rd32(device, 0x120070);
+	u32 ropnr = nvkm_rd32(device, 0x120074);
+	u32 gpcnr = nvkm_rd32(device, 0x120078);
 	u32 i;
 
 	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
 		u32 stat = 0x00000100 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_hub(priv, i);
+			gk104_ibus_intr_hub(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -79,7 +78,7 @@
 	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
 		u32 stat = 0x00010000 << i;
 		if (intr0 & stat) {
-			gk104_ibus_intr_rop(priv, i);
+			gk104_ibus_intr_rop(ibus, i);
 			intr0 &= ~stat;
 		}
 	}
@@ -87,53 +86,40 @@
 	for (i = 0; intr1 && i < gpcnr; i++) {
 		u32 stat = 0x00000001 << i;
 		if (intr1 & stat) {
-			gk104_ibus_intr_gpc(priv, i);
+			gk104_ibus_intr_gpc(ibus, i);
 			intr1 &= ~stat;
 		}
 	}
 }
 
 static int
-gk104_ibus_init(struct nvkm_object *object)
+gk104_ibus_init(struct nvkm_subdev *ibus)
 {
-	struct gk104_ibus_priv *priv = (void *)object;
-	int ret = nvkm_ibus_init(&priv->base);
-	if (ret == 0) {
-		nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
-		nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
-		nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
-		nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
-		nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
-		nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
-		nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
-	}
-	return ret;
-}
-
-static int
-gk104_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gk104_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gk104_ibus_intr;
+	struct nvkm_device *device = ibus->device;
+	nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000);
+	nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200);
+	nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800);
+	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100);
+	nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff);
+	nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200);
+	nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880);
 	return 0;
 }
 
-struct nvkm_oclass
-gk104_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xe0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk104_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = gk104_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
+static const struct nvkm_subdev_func
+gk104_ibus = {
+	.preinit = gk104_ibus_init,
+	.init = gk104_ibus_init,
+	.intr = gk104_ibus_intr,
 };
+
+int
+gk104_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 24dcdfb..3484079 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -22,89 +22,68 @@
 #include <subdev/ibus.h>
 #include <subdev/timer.h>
 
-struct gk20a_ibus_priv {
-	struct nvkm_ibus base;
-};
-
 static void
-gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
+gk20a_ibus_init_ibus_ring(struct nvkm_subdev *ibus)
 {
-	nv_mask(priv, 0x137250, 0x3f, 0);
+	struct nvkm_device *device = ibus->device;
+	nvkm_mask(device, 0x137250, 0x3f, 0);
 
-	nv_mask(priv, 0x000200, 0x20, 0);
+	nvkm_mask(device, 0x000200, 0x20, 0);
 	usleep_range(20, 30);
-	nv_mask(priv, 0x000200, 0x20, 0x20);
+	nvkm_mask(device, 0x000200, 0x20, 0x20);
 
-	nv_wr32(priv, 0x12004c, 0x4);
-	nv_wr32(priv, 0x122204, 0x2);
-	nv_rd32(priv, 0x122204);
+	nvkm_wr32(device, 0x12004c, 0x4);
+	nvkm_wr32(device, 0x122204, 0x2);
+	nvkm_rd32(device, 0x122204);
 
 	/*
 	 * Bug: increase clock timeout to avoid operation failure at high
 	 * gpcclk rate.
 	 */
-	nv_wr32(priv, 0x122354, 0x800);
-	nv_wr32(priv, 0x128328, 0x800);
-	nv_wr32(priv, 0x124320, 0x800);
+	nvkm_wr32(device, 0x122354, 0x800);
+	nvkm_wr32(device, 0x128328, 0x800);
+	nvkm_wr32(device, 0x124320, 0x800);
 }
 
 static void
-gk20a_ibus_intr(struct nvkm_subdev *subdev)
+gk20a_ibus_intr(struct nvkm_subdev *ibus)
 {
-	struct gk20a_ibus_priv *priv = (void *)subdev;
-	u32 status0 = nv_rd32(priv, 0x120058);
+	struct nvkm_device *device = ibus->device;
+	u32 status0 = nvkm_rd32(device, 0x120058);
 
 	if (status0 & 0x7) {
-		nv_debug(priv, "resetting priv ring\n");
-		gk20a_ibus_init_priv_ring(priv);
+		nvkm_debug(ibus, "resetting ibus ring\n");
+		gk20a_ibus_init_ibus_ring(ibus);
 	}
 
 	/* Acknowledge interrupt */
-	nv_mask(priv, 0x12004c, 0x2, 0x2);
-
-	if (!nv_wait(subdev, 0x12004c, 0x3f, 0x00))
-		nv_warn(priv, "timeout waiting for ringmaster ack\n");
+	nvkm_mask(device, 0x12004c, 0x2, 0x2);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+			break;
+	);
 }
 
 static int
-gk20a_ibus_init(struct nvkm_object *object)
+gk20a_ibus_init(struct nvkm_subdev *ibus)
 {
-	struct gk20a_ibus_priv *priv = (void *)object;
-	int ret;
-
-	ret = _nvkm_ibus_init(object);
-	if (ret)
-		return ret;
-
-	gk20a_ibus_init_priv_ring(priv);
-
+	gk20a_ibus_init_ibus_ring(ibus);
 	return 0;
 }
 
-static int
-gk20a_ibus_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
-{
-	struct gk20a_ibus_priv *priv;
-	int ret;
-
-	ret = nvkm_ibus_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gk20a_ibus_intr;
-	return 0;
-}
-
-struct nvkm_oclass
-gk20a_ibus_oclass = {
-	.handle = NV_SUBDEV(IBUS, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_ibus_ctor,
-		.dtor = _nvkm_ibus_dtor,
-		.init = gk20a_ibus_init,
-		.fini = _nvkm_ibus_fini,
-	},
+static const struct nvkm_subdev_func
+gk20a_ibus = {
+	.init = gk20a_ibus_init,
+	.intr = gk20a_ibus_intr,
 };
+
+int
+gk20a_ibus_new(struct nvkm_device *device, int index,
+	       struct nvkm_subdev **pibus)
+{
+	struct nvkm_subdev *ibus;
+	if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index d16358c..895ba74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,124 +23,291 @@
  */
 #include "priv.h"
 
-#include <core/engine.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
 
 /******************************************************************************
  * instmem object base implementation
  *****************************************************************************/
+#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
 
-void
-_nvkm_instobj_dtor(struct nvkm_object *object)
+struct nvkm_instobj {
+	struct nvkm_memory memory;
+	struct nvkm_memory *parent;
+	struct nvkm_instmem *imem;
+	struct list_head head;
+	u32 *suspend;
+	void __iomem *map;
+};
+
+static enum nvkm_memory_target
+nvkm_instobj_target(struct nvkm_memory *memory)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(object);
-	struct nvkm_instobj *iobj = (void *)object;
-
-	mutex_lock(&nv_subdev(imem)->mutex);
-	list_del(&iobj->head);
-	mutex_unlock(&nv_subdev(imem)->mutex);
-
-	return nvkm_object_destroy(&iobj->base);
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_target(memory);
 }
 
-int
-nvkm_instobj_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int length, void **pobject)
+static u64
+nvkm_instobj_addr(struct nvkm_memory *memory)
 {
-	struct nvkm_instmem *imem = nvkm_instmem(parent);
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_addr(memory);
+}
+
+static u64
+nvkm_instobj_size(struct nvkm_memory *memory)
+{
+	memory = nvkm_instobj(memory)->parent;
+	return nvkm_memory_size(memory);
+}
+
+static void
+nvkm_instobj_release(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	nvkm_bar_flush(iobj->imem->subdev.device->bar);
+}
+
+static void __iomem *
+nvkm_instobj_acquire(struct nvkm_memory *memory)
+{
+	return nvkm_instobj(memory)->map;
+}
+
+static u32
+nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	return ioread32_native(nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	iowrite32_native(data, nvkm_instobj(memory)->map + offset);
+}
+
+static void
+nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+	memory = nvkm_instobj(memory)->parent;
+	nvkm_memory_map(memory, vma, offset);
+}
+
+static void *
+nvkm_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	list_del(&iobj->head);
+	nvkm_memory_del(&iobj->parent);
+	return iobj;
+}
+
+const struct nvkm_memory_func
+nvkm_instobj_func = {
+	.dtor = nvkm_instobj_dtor,
+	.target = nvkm_instobj_target,
+	.addr = nvkm_instobj_addr,
+	.size = nvkm_instobj_size,
+	.acquire = nvkm_instobj_acquire,
+	.release = nvkm_instobj_release,
+	.rd32 = nvkm_instobj_rd32,
+	.wr32 = nvkm_instobj_wr32,
+	.map = nvkm_instobj_map,
+};
+
+static void
+nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+	memory = nvkm_instobj(memory)->parent;
+	nvkm_memory_boot(memory, vm);
+}
+
+static void
+nvkm_instobj_release_slow(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	nvkm_instobj_release(memory);
+	nvkm_done(iobj->parent);
+}
+
+static void __iomem *
+nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	iobj->map = nvkm_kmap(iobj->parent);
+	if (iobj->map)
+		memory->func = &nvkm_instobj_func;
+	return iobj->map;
+}
+
+static u32
+nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	return nvkm_ro32(iobj->parent, offset);
+}
+
+static void
+nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nvkm_instobj *iobj = nvkm_instobj(memory);
+	return nvkm_wo32(iobj->parent, offset, data);
+}
+
+const struct nvkm_memory_func
+nvkm_instobj_func_slow = {
+	.dtor = nvkm_instobj_dtor,
+	.target = nvkm_instobj_target,
+	.addr = nvkm_instobj_addr,
+	.size = nvkm_instobj_size,
+	.boot = nvkm_instobj_boot,
+	.acquire = nvkm_instobj_acquire_slow,
+	.release = nvkm_instobj_release_slow,
+	.rd32 = nvkm_instobj_rd32_slow,
+	.wr32 = nvkm_instobj_wr32_slow,
+	.map = nvkm_instobj_map,
+};
+
+int
+nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
+{
+	struct nvkm_memory *memory = NULL;
 	struct nvkm_instobj *iobj;
+	u32 offset;
 	int ret;
 
-	ret = nvkm_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
-				  length, pobject);
-	iobj = *pobject;
+	ret = imem->func->memory_new(imem, size, align, zero, &memory);
 	if (ret)
-		return ret;
+		goto done;
 
-	mutex_lock(&imem->base.mutex);
-	list_add(&iobj->head, &imem->list);
-	mutex_unlock(&imem->base.mutex);
-	return 0;
+	if (!imem->func->persistent) {
+		if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
+		iobj->parent = memory;
+		iobj->imem = imem;
+		list_add_tail(&iobj->head, &imem->list);
+		memory = &iobj->memory;
+	}
+
+	if (!imem->func->zero && zero) {
+		void __iomem *map = nvkm_kmap(memory);
+		if (unlikely(!map)) {
+			for (offset = 0; offset < size; offset += 4)
+				nvkm_wo32(memory, offset, 0x00000000);
+		} else {
+			memset_io(map, 0x00, size);
+		}
+		nvkm_done(memory);
+	}
+
+done:
+	if (ret)
+		nvkm_memory_del(&memory);
+	*pmemory = memory;
+	return ret;
 }
 
 /******************************************************************************
  * instmem subdev base implementation
  *****************************************************************************/
 
-static int
-nvkm_instmem_alloc(struct nvkm_instmem *imem, struct nvkm_object *parent,
-		   u32 size, u32 align, struct nvkm_object **pobject)
+u32
+nvkm_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
 {
-	struct nvkm_instmem_impl *impl = (void *)imem->base.object.oclass;
-	struct nvkm_instobj_args args = { .size = size, .align = align };
-	return nvkm_object_ctor(parent, &parent->engine->subdev.object,
-				impl->instobj, &args, sizeof(args), pobject);
+	return imem->func->rd32(imem, addr);
 }
 
-int
-_nvkm_instmem_fini(struct nvkm_object *object, bool suspend)
+void
+nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
 {
-	struct nvkm_instmem *imem = (void *)object;
+	return imem->func->wr32(imem, addr, data);
+}
+
+static int
+nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int i, ret = 0;
+	int i;
+
+	if (imem->func->fini)
+		imem->func->fini(imem);
 
 	if (suspend) {
-		mutex_lock(&imem->base.mutex);
 		list_for_each_entry(iobj, &imem->list, head) {
-			iobj->suspend = vmalloc(iobj->size);
-			if (!iobj->suspend) {
-				ret = -ENOMEM;
-				break;
-			}
+			struct nvkm_memory *memory = iobj->parent;
+			u64 size = nvkm_memory_size(memory);
 
-			for (i = 0; i < iobj->size; i += 4)
-				iobj->suspend[i / 4] = nv_ro32(iobj, i);
+			iobj->suspend = vmalloc(size);
+			if (!iobj->suspend)
+				return -ENOMEM;
+
+			for (i = 0; i < size; i += 4)
+				iobj->suspend[i / 4] = nvkm_ro32(memory, i);
 		}
-		mutex_unlock(&imem->base.mutex);
-		if (ret)
-			return ret;
 	}
 
-	return nvkm_subdev_fini(&imem->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_instmem_init(struct nvkm_object *object)
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
 {
-	struct nvkm_instmem *imem = (void *)object;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->oneinit)
+		return imem->func->oneinit(imem);
+	return 0;
+}
+
+static int
+nvkm_instmem_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
 	struct nvkm_instobj *iobj;
-	int ret, i;
+	int i;
 
-	ret = nvkm_subdev_init(&imem->base);
-	if (ret)
-		return ret;
-
-	mutex_lock(&imem->base.mutex);
 	list_for_each_entry(iobj, &imem->list, head) {
 		if (iobj->suspend) {
-			for (i = 0; i < iobj->size; i += 4)
-				nv_wo32(iobj, i, iobj->suspend[i / 4]);
+			struct nvkm_memory *memory = iobj->parent;
+			u64 size = nvkm_memory_size(memory);
+			for (i = 0; i < size; i += 4)
+				nvkm_wo32(memory, i, iobj->suspend[i / 4]);
 			vfree(iobj->suspend);
 			iobj->suspend = NULL;
 		}
 	}
-	mutex_unlock(&imem->base.mutex);
+
 	return 0;
 }
 
-int
-nvkm_instmem_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		     struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_instmem_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_instmem *imem;
-	int ret;
+	struct nvkm_instmem *imem = nvkm_instmem(subdev);
+	if (imem->func->dtor)
+		return imem->func->dtor(imem);
+	return imem;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "INSTMEM",
-				  "instmem", length, pobject);
-	imem = *pobject;
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+nvkm_instmem = {
+	.dtor = nvkm_instmem_dtor,
+	.oneinit = nvkm_instmem_oneinit,
+	.init = nvkm_instmem_init,
+	.fini = nvkm_instmem_fini,
+};
 
+void
+nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
+		  struct nvkm_device *device, int index,
+		  struct nvkm_instmem *imem)
+{
+	nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
+	imem->func = func;
 	INIT_LIST_HEAD(&imem->list);
-	imem->alloc = nvkm_instmem_alloc;
-	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index dd0994d..cd7feb1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -37,32 +37,27 @@
  * to use more "relaxed" allocation parameters when using the DMA API, since we
  * never need a kernel mapping.
  */
-
-#include <subdev/fb.h>
-#include <core/mm.h>
-#include <core/device.h>
-
-#ifdef __KERNEL__
-#include <linux/dma-attrs.h>
-#include <linux/iommu.h>
-#include <nouveau_platform.h>
-#endif
-
+#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
 #include "priv.h"
 
-struct gk20a_instobj_priv {
-	struct nvkm_instobj base;
-	/* Must be second member here - see nouveau_gpuobj_map_vm() */
-	struct nvkm_mem *mem;
-	/* Pointed by mem */
-	struct nvkm_mem _mem;
+#include <core/memory.h>
+#include <core/mm.h>
+#include <core/tegra.h>
+#include <subdev/fb.h>
+
+#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
+
+struct gk20a_instobj {
+	struct nvkm_memory memory;
+	struct gk20a_instmem *imem;
+	struct nvkm_mem mem;
 };
 
 /*
  * Used for objects allocated using the DMA API
  */
 struct gk20a_instobj_dma {
-	struct gk20a_instobj_priv base;
+	struct gk20a_instobj base;
 
 	void *cpuaddr;
 	dma_addr_t handle;
@@ -73,14 +68,15 @@
  * Used for objects flattened using the IOMMU API
  */
 struct gk20a_instobj_iommu {
-	struct gk20a_instobj_priv base;
+	struct gk20a_instobj base;
 
 	/* array of base.mem->size pages */
 	struct page *pages[];
 };
 
-struct gk20a_instmem_priv {
+struct gk20a_instmem {
 	struct nvkm_instmem base;
+	unsigned long lock_flags;
 	spinlock_t lock;
 	u64 addr;
 
@@ -94,6 +90,42 @@
 	struct dma_attrs attrs;
 };
 
+static enum nvkm_memory_target
+gk20a_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_HOST;
+}
+
+static u64
+gk20a_instobj_addr(struct nvkm_memory *memory)
+{
+	return gk20a_instobj(memory)->mem.offset;
+
+}
+
+static u64
+gk20a_instobj_size(struct nvkm_memory *memory)
+{
+	return (u64)gk20a_instobj(memory)->mem.size << 12;
+}
+
+static void __iomem *
+gk20a_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+	unsigned long flags;
+	spin_lock_irqsave(&imem->lock, flags);
+	imem->lock_flags = flags;
+	return NULL;
+}
+
+static void
+gk20a_instobj_release(struct nvkm_memory *memory)
+{
+	struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
+	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
 /*
  * Use PRAMIN to read/write data and avoid coherency issues.
  * PRAMIN uses the GPU path and ensures data will always be coherent.
@@ -104,160 +136,170 @@
  */
 
 static u32
-gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
+gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct gk20a_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 	u32 data;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	data = nv_rd32(priv, 0x700000 + addr);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	data = nvkm_rd32(device, 0x700000 + addr);
 	return data;
 }
 
 static void
-gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct gk20a_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	nv_wr32(priv, 0x700000 + addr, data);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	nvkm_wr32(device, 0x700000 + addr, data);
 }
 
 static void
-gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
+gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+{
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	nvkm_vm_map_at(vma, offset, &node->mem);
+}
+
+static void
+gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
 {
 	struct gk20a_instobj_dma *node = (void *)_node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
-	struct device *dev = nv_device_base(nv_device(priv));
+	struct gk20a_instmem *imem = _node->imem;
+	struct device *dev = imem->base.subdev.device->dev;
 
 	if (unlikely(!node->cpuaddr))
 		return;
 
-	dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
-		       node->handle, &priv->attrs);
+	dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
+		       node->handle, &imem->attrs);
 }
 
 static void
-gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
+gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
 {
 	struct gk20a_instobj_iommu *node = (void *)_node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+	struct gk20a_instmem *imem = _node->imem;
 	struct nvkm_mm_node *r;
 	int i;
 
-	if (unlikely(list_empty(&_node->mem->regions)))
+	if (unlikely(list_empty(&_node->mem.regions)))
 		return;
 
-	r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
+	r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
 			     rl_entry);
 
 	/* clear bit 34 to unmap pages */
-	r->offset &= ~BIT(34 - priv->iommu_pgshift);
+	r->offset &= ~BIT(34 - imem->iommu_pgshift);
 
 	/* Unmap pages from GPU address space and free them */
-	for (i = 0; i < _node->mem->size; i++) {
-		iommu_unmap(priv->domain,
-			    (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
+	for (i = 0; i < _node->mem.size; i++) {
+		iommu_unmap(imem->domain,
+			    (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
 		__free_page(node->pages[i]);
 	}
 
 	/* Release area from GPU address space */
-	mutex_lock(priv->mm_mutex);
-	nvkm_mm_free(priv->mm, &r);
-	mutex_unlock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
+	nvkm_mm_free(imem->mm, &r);
+	mutex_unlock(imem->mm_mutex);
 }
 
-static void
-gk20a_instobj_dtor(struct nvkm_object *object)
+static void *
+gk20a_instobj_dtor(struct nvkm_memory *memory)
 {
-	struct gk20a_instobj_priv *node = (void *)object;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
+	struct gk20a_instobj *node = gk20a_instobj(memory);
+	struct gk20a_instmem *imem = node->imem;
 
-	if (priv->domain)
+	if (imem->domain)
 		gk20a_instobj_dtor_iommu(node);
 	else
 		gk20a_instobj_dtor_dma(node);
 
-	nvkm_instobj_destroy(&node->base);
+	return node;
 }
 
+static const struct nvkm_memory_func
+gk20a_instobj_func = {
+	.dtor = gk20a_instobj_dtor,
+	.target = gk20a_instobj_target,
+	.addr = gk20a_instobj_addr,
+	.size = gk20a_instobj_size,
+	.acquire = gk20a_instobj_acquire,
+	.release = gk20a_instobj_release,
+	.rd32 = gk20a_instobj_rd32,
+	.wr32 = gk20a_instobj_wr32,
+	.map = gk20a_instobj_map,
+};
+
 static int
-gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
-		       struct nvkm_oclass *oclass, u32 npages, u32 align,
-		       struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
+		       struct gk20a_instobj **_node)
 {
 	struct gk20a_instobj_dma *node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct device *dev = nv_device_base(nv_device(parent));
-	int ret;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
+	struct device *dev = subdev->device->dev;
 
-	ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
-				   (void **)&node);
+	if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
+		return -ENOMEM;
 	*_node = &node->base;
-	if (ret)
-		return ret;
 
 	node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
 					&node->handle, GFP_KERNEL,
-					&priv->attrs);
+					&imem->attrs);
 	if (!node->cpuaddr) {
-		nv_error(priv, "cannot allocate DMA memory\n");
+		nvkm_error(subdev, "cannot allocate DMA memory\n");
 		return -ENOMEM;
 	}
 
 	/* alignment check */
 	if (unlikely(node->handle & (align - 1)))
-		nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
-			&node->handle, align);
+		nvkm_warn(subdev,
+			  "memory not aligned as requested: %pad (0x%x)\n",
+			  &node->handle, align);
 
 	/* present memory for being mapped using small pages */
 	node->r.type = 12;
 	node->r.offset = node->handle >> 12;
 	node->r.length = (npages << PAGE_SHIFT) >> 12;
 
-	node->base._mem.offset = node->handle;
+	node->base.mem.offset = node->handle;
 
-	INIT_LIST_HEAD(&node->base._mem.regions);
-	list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
+	INIT_LIST_HEAD(&node->base.mem.regions);
+	list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
 
 	return 0;
 }
 
 static int
-gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
-			 struct nvkm_oclass *oclass, u32 npages, u32 align,
-			 struct gk20a_instobj_priv **_node)
+gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
+			 struct gk20a_instobj **_node)
 {
 	struct gk20a_instobj_iommu *node;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	struct nvkm_mm_node *r;
 	int ret;
 	int i;
 
-	ret = nvkm_instobj_create_(parent, engine, oclass,
-				sizeof(*node) + sizeof(node->pages[0]) * npages,
-				(void **)&node);
+	if (!(node = kzalloc(sizeof(*node) +
+			     sizeof( node->pages[0]) * npages, GFP_KERNEL)))
+		return -ENOMEM;
 	*_node = &node->base;
-	if (ret)
-		return ret;
 
 	/* Allocate backing memory */
 	for (i = 0; i < npages; i++) {
@@ -270,48 +312,48 @@
 		node->pages[i] = p;
 	}
 
-	mutex_lock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
 	/* Reserve area from GPU address space */
-	ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
-			   align >> priv->iommu_pgshift, &r);
-	mutex_unlock(priv->mm_mutex);
+	ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
+			   align >> imem->iommu_pgshift, &r);
+	mutex_unlock(imem->mm_mutex);
 	if (ret) {
-		nv_error(priv, "virtual space is full!\n");
+		nvkm_error(subdev, "virtual space is full!\n");
 		goto free_pages;
 	}
 
 	/* Map into GPU address space */
 	for (i = 0; i < npages; i++) {
 		struct page *p = node->pages[i];
-		u32 offset = (r->offset + i) << priv->iommu_pgshift;
+		u32 offset = (r->offset + i) << imem->iommu_pgshift;
 
-		ret = iommu_map(priv->domain, offset, page_to_phys(p),
+		ret = iommu_map(imem->domain, offset, page_to_phys(p),
 				PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
 		if (ret < 0) {
-			nv_error(priv, "IOMMU mapping failure: %d\n", ret);
+			nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 
 			while (i-- > 0) {
 				offset -= PAGE_SIZE;
-				iommu_unmap(priv->domain, offset, PAGE_SIZE);
+				iommu_unmap(imem->domain, offset, PAGE_SIZE);
 			}
 			goto release_area;
 		}
 	}
 
 	/* Bit 34 tells that an address is to be resolved through the IOMMU */
-	r->offset |= BIT(34 - priv->iommu_pgshift);
+	r->offset |= BIT(34 - imem->iommu_pgshift);
 
-	node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
+	node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
 
-	INIT_LIST_HEAD(&node->base._mem.regions);
-	list_add_tail(&r->rl_entry, &node->base._mem.regions);
+	INIT_LIST_HEAD(&node->base.mem.regions);
+	list_add_tail(&r->rl_entry, &node->base.mem.regions);
 
 	return 0;
 
 release_area:
-	mutex_lock(priv->mm_mutex);
-	nvkm_mm_free(priv->mm, &r);
-	mutex_unlock(priv->mm_mutex);
+	mutex_lock(imem->mm_mutex);
+	nvkm_mm_free(imem->mm, &r);
+	mutex_unlock(imem->mm_mutex);
 
 free_pages:
 	for (i = 0; i < npages && node->pages[i] != NULL; i++)
@@ -321,120 +363,92 @@
 }
 
 static int
-gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 _size,
-		   struct nvkm_object **pobject)
+gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		  struct nvkm_memory **pmemory)
 {
-	struct nvkm_instobj_args *args = data;
-	struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct gk20a_instobj_priv *node;
-	u32 size, align;
+	struct gk20a_instmem *imem = gk20a_instmem(base);
+	struct gk20a_instobj *node = NULL;
+	struct nvkm_subdev *subdev = &imem->base.subdev;
 	int ret;
 
-	nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
-		 priv->domain ? "IOMMU" : "DMA", args->size, args->align);
+	nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
+		   imem->domain ? "IOMMU" : "DMA", size, align);
 
 	/* Round size and align to page bounds */
-	size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
-	align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
+	size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
+	align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
 
-	if (priv->domain)
-		ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
-					      size >> PAGE_SHIFT, align, &node);
+	if (imem->domain)
+		ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
+					       align, &node);
 	else
-		ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
-					     size >> PAGE_SHIFT, align, &node);
-	*pobject = nv_object(node);
+		ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
+					     align, &node);
+	*pmemory = node ? &node->memory : NULL;
 	if (ret)
 		return ret;
 
-	node->mem = &node->_mem;
+	nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
+	node->imem = imem;
 
 	/* present memory for being mapped using small pages */
-	node->mem->size = size >> 12;
-	node->mem->memtype = 0;
-	node->mem->page_shift = 12;
+	node->mem.size = size >> 12;
+	node->mem.memtype = 0;
+	node->mem.page_shift = 12;
 
-	node->base.addr = node->mem->offset;
-	node->base.size = size;
-
-	nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
-		 size, align, node->mem->offset);
+	nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
+		   size, align, node->mem.offset);
 
 	return 0;
 }
 
-static struct nvkm_instobj_impl
-gk20a_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_instobj_ctor,
-		.dtor = gk20a_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = gk20a_instobj_rd32,
-		.wr32 = gk20a_instobj_wr32,
-	},
-};
-
-
-
-static int
-gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+gk20a_instmem_fini(struct nvkm_instmem *base)
 {
-	struct gk20a_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nvkm_instmem_fini(&priv->base, suspend);
+	gk20a_instmem(base)->addr = ~0ULL;
 }
 
-static int
-gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, void *data, u32 size,
-		   struct nvkm_object **pobject)
+static const struct nvkm_instmem_func
+gk20a_instmem = {
+	.fini = gk20a_instmem_fini,
+	.memory_new = gk20a_instobj_new,
+	.persistent = true,
+	.zero = false,
+};
+
+int
+gk20a_instmem_new(struct nvkm_device *device, int index,
+		  struct nvkm_instmem **pimem)
 {
-	struct gk20a_instmem_priv *priv;
-	struct nouveau_platform_device *plat;
-	int ret;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_instmem *imem;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
+	spin_lock_init(&imem->lock);
+	*pimem = &imem->base;
 
-	spin_lock_init(&priv->lock);
+	if (tdev->iommu.domain) {
+		imem->domain = tdev->iommu.domain;
+		imem->mm = &tdev->iommu.mm;
+		imem->iommu_pgshift = tdev->iommu.pgshift;
+		imem->mm_mutex = &tdev->iommu.mutex;
 
-	plat = nv_device_to_platform(nv_device(parent));
-	if (plat->gpu->iommu.domain) {
-		priv->domain = plat->gpu->iommu.domain;
-		priv->mm = plat->gpu->iommu.mm;
-		priv->iommu_pgshift = plat->gpu->iommu.pgshift;
-		priv->mm_mutex = &plat->gpu->iommu.mutex;
-
-		nv_info(priv, "using IOMMU\n");
+		nvkm_info(&imem->base.subdev, "using IOMMU\n");
 	} else {
-		init_dma_attrs(&priv->attrs);
+		init_dma_attrs(&imem->attrs);
 		/*
 		 * We will access instmem through PRAMIN and thus do not need a
 		 * consistent CPU pointer or kernel mapping
 		 */
-		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
-		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
-		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
-		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
+		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
+		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
+		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
 
-		nv_info(priv, "using DMA API\n");
+		nvkm_info(&imem->base.subdev, "using DMA API\n");
 	}
 
 	return 0;
 }
-
-struct nvkm_oclass *
-gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_instmem_ctor,
-		.dtor = _nvkm_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = gk20a_instmem_fini,
-	},
-	.instobj = &gk20a_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 282143f..6133c8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -21,173 +21,207 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
+#include "priv.h"
 
+#include <core/memory.h>
 #include <core/ramht.h>
 
+struct nv04_instmem {
+	struct nvkm_instmem base;
+	struct nvkm_mm heap;
+};
+
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
+
+struct nv04_instobj {
+	struct nvkm_memory memory;
+	struct nv04_instmem *imem;
+	struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv04_instobj(memory)->node->offset;
+}
+
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
+{
+	return nv04_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv04_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return device->pri + 0x700000 + iobj->node->offset;
+}
+
+static void
+nv04_instobj_release(struct nvkm_memory *memory)
+{
+}
 
 static u32
-nv04_instobj_rd32(struct nvkm_object *object, u64 addr)
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	return nv_ro32(priv, node->mem->offset + addr);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
 }
 
 static void
-nv04_instobj_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	nv_wo32(priv, node->mem->offset + addr, data);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	struct nvkm_device *device = iobj->imem->base.subdev.device;
+	nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
 }
 
-static void
-nv04_instobj_dtor(struct nvkm_object *object)
+static void *
+nv04_instobj_dtor(struct nvkm_memory *memory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv04_instobj_priv *node = (void *)object;
-	struct nvkm_subdev *subdev = (void *)priv;
-
-	mutex_lock(&subdev->mutex);
-	nvkm_mm_free(&priv->heap, &node->mem);
-	mutex_unlock(&subdev->mutex);
-
-	nvkm_instobj_destroy(&node->base);
+	struct nv04_instobj *iobj = nv04_instobj(memory);
+	mutex_lock(&iobj->imem->base.subdev.mutex);
+	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	return iobj;
 }
 
+static const struct nvkm_memory_func
+nv04_instobj_func = {
+	.dtor = nv04_instobj_dtor,
+	.target = nv04_instobj_target,
+	.size = nv04_instobj_size,
+	.addr = nv04_instobj_addr,
+	.acquire = nv04_instobj_acquire,
+	.release = nv04_instobj_release,
+	.rd32 = nv04_instobj_rd32,
+	.wr32 = nv04_instobj_wr32,
+};
+
 static int
-nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
-	struct nv04_instobj_priv *node;
-	struct nvkm_instobj_args *args = data;
-	struct nvkm_subdev *subdev = (void *)priv;
+	struct nv04_instmem *imem = nv04_instmem(base);
+	struct nv04_instobj *iobj;
 	int ret;
 
-	if (!args->align)
-		args->align = 1;
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	ret = nvkm_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
-	if (ret)
-		return ret;
+	nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+	iobj->imem = imem;
 
-	mutex_lock(&subdev->mutex);
-	ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
-			   args->align, &node->mem);
-	mutex_unlock(&subdev->mutex);
-	if (ret)
-		return ret;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->length;
-	return 0;
+	mutex_lock(&imem->base.subdev.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+			   align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.subdev.mutex);
+	return ret;
 }
 
-struct nvkm_instobj_impl
-nv04_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_instobj_ctor,
-		.dtor = nv04_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = nv04_instobj_rd32,
-		.wr32 = nv04_instobj_wr32,
-	},
-};
-
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
 static u32
-nv04_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv04_instmem_rd32(struct nvkm_instmem *imem, u32 addr)
 {
-	return nv_rd32(object, 0x700000 + addr);
+	return nvkm_rd32(imem->subdev.device, 0x700000 + addr);
 }
 
 static void
-nv04_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
 {
-	return nv_wr32(object, 0x700000 + addr, data);
-}
-
-void
-nv04_instmem_dtor(struct nvkm_object *object)
-{
-	struct nv04_instmem_priv *priv = (void *)object;
-	nvkm_gpuobj_ref(NULL, &priv->ramfc);
-	nvkm_gpuobj_ref(NULL, &priv->ramro);
-	nvkm_ramht_ref(NULL, &priv->ramht);
-	nvkm_gpuobj_ref(NULL, &priv->vbios);
-	nvkm_mm_fini(&priv->heap);
-	if (priv->iomem)
-		iounmap(priv->iomem);
-	nvkm_instmem_destroy(&priv->base);
+	nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
 }
 
 static int
-nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv04_instmem_oneinit(struct nvkm_instmem *base)
 {
-	struct nv04_instmem_priv *priv;
+	struct nv04_instmem *imem = nv04_instmem(base);
+	struct nvkm_device *device = imem->base.subdev.device;
 	int ret;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
 	/* PRAMIN aperture maps over the end of VRAM, reserve it */
-	priv->base.reserved = 512 * 1024;
+	imem->base.reserved = 512 * 1024;
 
-	ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
 	/* 0x00000-0x10000: reserve for probable vbios image */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
-			      &priv->vbios);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+			      &imem->base.vbios);
 	if (ret)
 		return ret;
 
 	/* 0x10000-0x18000: reserve for RAMHT */
-	ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
 	if (ret)
 		return ret;
 
 	/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
+			      &imem->base.ramfc);
 	if (ret)
 		return ret;
 
 	/* 0x18800-0x18a00: reserve for RAMRO */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
-			      &priv->ramro);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
+			      &imem->base.ramro);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-struct nvkm_oclass *
-nv04_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_instmem_ctor,
-		.dtor = nv04_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = _nvkm_instmem_fini,
-		.rd32 = nv04_instmem_rd32,
-		.wr32 = nv04_instmem_wr32,
-	},
-	.instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv04_instmem_dtor(struct nvkm_instmem *base)
+{
+	struct nv04_instmem *imem = nv04_instmem(base);
+	nvkm_memory_del(&imem->base.ramfc);
+	nvkm_memory_del(&imem->base.ramro);
+	nvkm_ramht_del(&imem->base.ramht);
+	nvkm_memory_del(&imem->base.vbios);
+	nvkm_mm_fini(&imem->heap);
+	return imem;
+}
+
+static const struct nvkm_instmem_func
+nv04_instmem = {
+	.dtor = nv04_instmem_dtor,
+	.oneinit = nv04_instmem_oneinit,
+	.rd32 = nv04_instmem_rd32,
+	.wr32 = nv04_instmem_wr32,
+	.memory_new = nv04_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv04_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv04_instmem *imem;
+
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv04_instmem, device, index, &imem->base);
+	*pimem = &imem->base;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
deleted file mode 100644
index 42b6c92..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __NV04_INSTMEM_H__
-#define __NV04_INSTMEM_H__
-#include "priv.h"
-
-#include <core/mm.h>
-
-extern struct nvkm_instobj_impl nv04_instobj_oclass;
-
-struct nv04_instmem_priv {
-	struct nvkm_instmem base;
-
-	void __iomem *iomem;
-	struct nvkm_mm heap;
-
-	struct nvkm_gpuobj *vbios;
-	struct nvkm_ramht  *ramht;
-	struct nvkm_gpuobj *ramro;
-	struct nvkm_gpuobj *ramfc;
-};
-
-static inline struct nv04_instmem_priv *
-nv04_instmem(void *obj)
-{
-	return (void *)nvkm_instmem(obj);
-}
-
-struct nv04_instobj_priv {
-	struct nvkm_instobj base;
-	struct nvkm_mm_node *mem;
-};
-
-void nv04_instmem_dtor(struct nvkm_object *);
-
-int nv04_instmem_alloc(struct nvkm_instmem *, struct nvkm_object *,
-		       u32 size, u32 align, struct nvkm_object **pobject);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index b42b858..c054387 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -21,116 +21,239 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
+#include "priv.h"
 
+#include <core/memory.h>
 #include <core/ramht.h>
 #include <engine/gr/nv40.h>
 
+struct nv40_instmem {
+	struct nvkm_instmem base;
+	struct nvkm_mm heap;
+	void __iomem *iomem;
+};
+
+/******************************************************************************
+ * instmem object implementation
+ *****************************************************************************/
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+
+struct nv40_instobj {
+	struct nvkm_memory memory;
+	struct nv40_instmem *imem;
+	struct nvkm_mm_node *node;
+};
+
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_INST;
+}
+
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv40_instobj(memory)->node->offset;
+}
+
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
+{
+	return nv40_instobj(memory)->node->length;
+}
+
+static void __iomem *
+nv40_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return iobj->imem->iomem + iobj->node->offset;
+}
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
+{
+}
+
+static u32
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
+}
+
+static void
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+}
+
+static void *
+nv40_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nv40_instobj *iobj = nv40_instobj(memory);
+	mutex_lock(&iobj->imem->base.subdev.mutex);
+	nvkm_mm_free(&iobj->imem->heap, &iobj->node);
+	mutex_unlock(&iobj->imem->base.subdev.mutex);
+	return iobj;
+}
+
+static const struct nvkm_memory_func
+nv40_instobj_func = {
+	.dtor = nv40_instobj_dtor,
+	.target = nv40_instobj_target,
+	.size = nv40_instobj_size,
+	.addr = nv40_instobj_addr,
+	.acquire = nv40_instobj_acquire,
+	.release = nv40_instobj_release,
+	.rd32 = nv40_instobj_rd32,
+	.wr32 = nv40_instobj_wr32,
+};
+
+static int
+nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
+{
+	struct nv40_instmem *imem = nv40_instmem(base);
+	struct nv40_instobj *iobj;
+	int ret;
+
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
+
+	nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+	iobj->imem = imem;
+
+	mutex_lock(&imem->base.subdev.mutex);
+	ret = nvkm_mm_head(&imem->heap, 0, 1, size, size,
+			   align ? align : 1, &iobj->node);
+	mutex_unlock(&imem->base.subdev.mutex);
+	return ret;
+}
+
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
 static u32
-nv40_instmem_rd32(struct nvkm_object *object, u64 addr)
+nv40_instmem_rd32(struct nvkm_instmem *base, u32 addr)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	return ioread32_native(priv->iomem + addr);
+	return ioread32_native(nv40_instmem(base)->iomem + addr);
 }
 
 static void
-nv40_instmem_wr32(struct nvkm_object *object, u64 addr, u32 data)
+nv40_instmem_wr32(struct nvkm_instmem *base, u32 addr, u32 data)
 {
-	struct nv04_instmem_priv *priv = (void *)object;
-	iowrite32_native(data, priv->iomem + addr);
+	iowrite32_native(data, nv40_instmem(base)->iomem + addr);
 }
 
 static int
-nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv40_instmem_oneinit(struct nvkm_instmem *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_instmem_priv *priv;
-	int ret, bar, vs;
-
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	/* map bar */
-	if (nv_device_resource_len(device, 2))
-		bar = 2;
-	else
-		bar = 3;
-
-	priv->iomem = ioremap(nv_device_resource_start(device, bar),
-			      nv_device_resource_len(device, bar));
-	if (!priv->iomem) {
-		nv_error(priv, "unable to map PRAMIN BAR\n");
-		return -EFAULT;
-	}
+	struct nv40_instmem *imem = nv40_instmem(base);
+	struct nvkm_device *device = imem->base.subdev.device;
+	int ret, vs;
 
 	/* PRAMIN aperture maps over the end of vram, reserve enough space
 	 * to fit graphics contexts for every channel, the magics come
 	 * from engine/gr/nv40.c
 	 */
-	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
-	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
-	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
-	else if (nv44_gr_class(priv))  priv->base.reserved = 0x4980 * vs;
-	else				  priv->base.reserved = 0x4a40 * vs;
-	priv->base.reserved += 16 * 1024;
-	priv->base.reserved *= 32;		/* per-channel */
-	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
-	priv->base.reserved += 512 * 1024;	/* object storage */
+	vs = hweight8((nvkm_rd32(device, 0x001540) & 0x0000ff00) >> 8);
+	if      (device->chipset == 0x40) imem->base.reserved = 0x6aa0 * vs;
+	else if (device->chipset  < 0x43) imem->base.reserved = 0x4f00 * vs;
+	else if (nv44_gr_class(device))   imem->base.reserved = 0x4980 * vs;
+	else				  imem->base.reserved = 0x4a40 * vs;
+	imem->base.reserved += 16 * 1024;
+	imem->base.reserved *= 32;		/* per-channel */
+	imem->base.reserved += 512 * 1024;	/* pci(e)gart table */
+	imem->base.reserved += 512 * 1024;	/* object storage */
+	imem->base.reserved = round_up(imem->base.reserved, 4096);
 
-	priv->base.reserved = round_up(priv->base.reserved, 4096);
-
-	ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
 	if (ret)
 		return ret;
 
 	/* 0x00000-0x10000: reserve for probable vbios image */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
-			      &priv->vbios);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
+			      &imem->base.vbios);
 	if (ret)
 		return ret;
 
 	/* 0x10000-0x18000: reserve for RAMHT */
-	ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	ret = nvkm_ramht_new(device, 0x08000, 0, NULL, &imem->base.ramht);
 	if (ret)
 		return ret;
 
 	/* 0x18000-0x18200: reserve for RAMRO
 	 * 0x18200-0x20000: padding
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
-			      &priv->ramro);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
+			      &imem->base.ramro);
 	if (ret)
 		return ret;
 
 	/* 0x20000-0x21000: reserve for RAMFC
 	 * 0x21000-0x40000: padding and some unknown crap
 	 */
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
-			      NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
+			      &imem->base.ramfc);
 	if (ret)
 		return ret;
 
 	return 0;
 }
 
-struct nvkm_oclass *
-nv40_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_instmem_ctor,
-		.dtor = nv04_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = _nvkm_instmem_fini,
-		.rd32 = nv40_instmem_rd32,
-		.wr32 = nv40_instmem_wr32,
-	},
-	.instobj = &nv04_instobj_oclass.base,
-}.base;
+static void *
+nv40_instmem_dtor(struct nvkm_instmem *base)
+{
+	struct nv40_instmem *imem = nv40_instmem(base);
+	nvkm_memory_del(&imem->base.ramfc);
+	nvkm_memory_del(&imem->base.ramro);
+	nvkm_ramht_del(&imem->base.ramht);
+	nvkm_memory_del(&imem->base.vbios);
+	nvkm_mm_fini(&imem->heap);
+	if (imem->iomem)
+		iounmap(imem->iomem);
+	return imem;
+}
+
+static const struct nvkm_instmem_func
+nv40_instmem = {
+	.dtor = nv40_instmem_dtor,
+	.oneinit = nv40_instmem_oneinit,
+	.rd32 = nv40_instmem_rd32,
+	.wr32 = nv40_instmem_wr32,
+	.memory_new = nv40_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv40_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
+{
+	struct nv40_instmem *imem;
+	int bar;
+
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv40_instmem, device, index, &imem->base);
+	*pimem = &imem->base;
+
+	/* map bar */
+	if (device->func->resource_size(device, 2))
+		bar = 2;
+	else
+		bar = 3;
+
+	imem->iomem = ioremap(device->func->resource_addr(device, bar),
+			      device->func->resource_size(device, bar));
+	if (!imem->iomem) {
+		nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 8404143..6d512c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -21,149 +21,229 @@
  *
  * Authors: Ben Skeggs
  */
+#define nv50_instmem(p) container_of((p), struct nv50_instmem, base)
 #include "priv.h"
 
+#include <core/memory.h>
+#include <subdev/bar.h>
 #include <subdev/fb.h>
+#include <subdev/mmu.h>
 
-struct nv50_instmem_priv {
+struct nv50_instmem {
 	struct nvkm_instmem base;
+	unsigned long lock_flags;
 	spinlock_t lock;
 	u64 addr;
 };
 
-struct nv50_instobj_priv {
-	struct nvkm_instobj base;
-	struct nvkm_mem *mem;
-};
-
 /******************************************************************************
  * instmem object implementation
  *****************************************************************************/
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
+
+struct nv50_instobj {
+	struct nvkm_memory memory;
+	struct nv50_instmem *imem;
+	struct nvkm_mem *mem;
+	struct nvkm_vma bar;
+	void *map;
+};
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+	return NVKM_MEM_TARGET_VRAM;
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+	return nv50_instobj(memory)->mem->offset;
+}
+
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
+{
+	return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+}
+
+static void
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	u64 size = nvkm_memory_size(memory);
+	void __iomem *map;
+	int ret;
+
+	iobj->map = ERR_PTR(-ENOMEM);
+
+	ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
+	if (ret == 0) {
+		map = ioremap(device->func->resource_addr(device, 3) +
+			      (u32)iobj->bar.offset, size);
+		if (map) {
+			nvkm_memory_map(memory, &iobj->bar, 0);
+			iobj->map = map;
+		} else {
+			nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+			nvkm_vm_put(&iobj->bar);
+		}
+	} else {
+		nvkm_warn(subdev, "PRAMIN exhausted\n");
+	}
+}
+
+static void
+nv50_instobj_release(struct nvkm_memory *memory)
+{
+	struct nv50_instmem *imem = nv50_instobj(memory)->imem;
+	spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+}
+
+static void __iomem *
+nv50_instobj_acquire(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_bar *bar = imem->base.subdev.device->bar;
+	struct nvkm_vm *vm;
+	unsigned long flags;
+
+	if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
+		nvkm_memory_boot(memory, vm);
+	if (!IS_ERR_OR_NULL(iobj->map))
+		return iobj->map;
+
+	spin_lock_irqsave(&imem->lock, flags);
+	imem->lock_flags = flags;
+	return NULL;
+}
 
 static u32
-nv50_instobj_rd32(struct nvkm_object *object, u64 offset)
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 {
-	struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv50_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
 	u32 data;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	data = nv_rd32(priv, 0x700000 + addr);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	data = nvkm_rd32(device, 0x700000 + addr);
 	return data;
 }
 
 static void
-nv50_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 {
-	struct nv50_instmem_priv *priv = (void *)nvkm_instmem(object);
-	struct nv50_instobj_priv *node = (void *)object;
-	unsigned long flags;
-	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
-	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nv50_instmem *imem = iobj->imem;
+	struct nvkm_device *device = imem->base.subdev.device;
+	u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	if (unlikely(priv->addr != base)) {
-		nv_wr32(priv, 0x001700, base >> 16);
-		priv->addr = base;
+	if (unlikely(imem->addr != base)) {
+		nvkm_wr32(device, 0x001700, base >> 16);
+		imem->addr = base;
 	}
-	nv_wr32(priv, 0x700000 + addr, data);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	nvkm_wr32(device, 0x700000 + addr, data);
 }
 
 static void
-nv50_instobj_dtor(struct nvkm_object *object)
+nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
 {
-	struct nv50_instobj_priv *node = (void *)object;
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	pfb->ram->put(pfb, &node->mem);
-	nvkm_instobj_destroy(&node->base);
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	nvkm_vm_map_at(vma, offset, iobj->mem);
 }
 
+static void *
+nv50_instobj_dtor(struct nvkm_memory *memory)
+{
+	struct nv50_instobj *iobj = nv50_instobj(memory);
+	struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
+	if (!IS_ERR_OR_NULL(iobj->map)) {
+		nvkm_vm_put(&iobj->bar);
+		iounmap(iobj->map);
+	}
+	ram->func->put(ram, &iobj->mem);
+	return iobj;
+}
+
+static const struct nvkm_memory_func
+nv50_instobj_func = {
+	.dtor = nv50_instobj_dtor,
+	.target = nv50_instobj_target,
+	.size = nv50_instobj_size,
+	.addr = nv50_instobj_addr,
+	.boot = nv50_instobj_boot,
+	.acquire = nv50_instobj_acquire,
+	.release = nv50_instobj_release,
+	.rd32 = nv50_instobj_rd32,
+	.wr32 = nv50_instobj_wr32,
+	.map = nv50_instobj_map,
+};
+
 static int
-nv50_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
+		 struct nvkm_memory **pmemory)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_instobj_args *args = data;
-	struct nv50_instobj_priv *node;
+	struct nv50_instmem *imem = nv50_instmem(base);
+	struct nv50_instobj *iobj;
+	struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
 	int ret;
 
-	args->size  = max((args->size  + 4095) & ~4095, (u32)4096);
-	args->align = max((args->align + 4095) & ~4095, (u32)4096);
+	if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmemory = &iobj->memory;
 
-	ret = nvkm_instobj_create(parent, engine, oclass, &node);
-	*pobject = nv_object(node);
+	nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+	iobj->imem = imem;
+
+	size  = max((size  + 4095) & ~4095, (u32)4096);
+	align = max((align + 4095) & ~4095, (u32)4096);
+
+	ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
 	if (ret)
 		return ret;
 
-	ret = pfb->ram->get(pfb, args->size, args->align, 0, 0x800, &node->mem);
-	if (ret)
-		return ret;
-
-	node->base.addr = node->mem->offset;
-	node->base.size = node->mem->size << 12;
-	node->mem->page_shift = 12;
+	iobj->mem->page_shift = 12;
 	return 0;
 }
 
-static struct nvkm_instobj_impl
-nv50_instobj_oclass = {
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_instobj_ctor,
-		.dtor = nv50_instobj_dtor,
-		.init = _nvkm_instobj_init,
-		.fini = _nvkm_instobj_fini,
-		.rd32 = nv50_instobj_rd32,
-		.wr32 = nv50_instobj_wr32,
-	},
-};
-
 /******************************************************************************
  * instmem subdev implementation
  *****************************************************************************/
 
-static int
-nv50_instmem_fini(struct nvkm_object *object, bool suspend)
+static void
+nv50_instmem_fini(struct nvkm_instmem *base)
 {
-	struct nv50_instmem_priv *priv = (void *)object;
-	priv->addr = ~0ULL;
-	return nvkm_instmem_fini(&priv->base, suspend);
+	nv50_instmem(base)->addr = ~0ULL;
 }
 
-static int
-nv50_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, void *data, u32 size,
-		  struct nvkm_object **pobject)
+static const struct nvkm_instmem_func
+nv50_instmem = {
+	.fini = nv50_instmem_fini,
+	.memory_new = nv50_instobj_new,
+	.persistent = false,
+	.zero = false,
+};
+
+int
+nv50_instmem_new(struct nvkm_device *device, int index,
+		 struct nvkm_instmem **pimem)
 {
-	struct nv50_instmem_priv *priv;
-	int ret;
+	struct nv50_instmem *imem;
 
-	ret = nvkm_instmem_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	spin_lock_init(&priv->lock);
+	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
+	spin_lock_init(&imem->lock);
+	*pimem = &imem->base;
 	return 0;
 }
-
-struct nvkm_oclass *
-nv50_instmem_oclass = &(struct nvkm_instmem_impl) {
-	.base.handle = NV_SUBDEV(INSTMEM, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_instmem_ctor,
-		.dtor = _nvkm_instmem_dtor,
-		.init = _nvkm_instmem_init,
-		.fini = nv50_instmem_fini,
-	},
-	.instobj = &nv50_instobj_oclass.base,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index b10e292..ace4471 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,54 +1,20 @@
 #ifndef __NVKM_INSTMEM_PRIV_H__
 #define __NVKM_INSTMEM_PRIV_H__
+#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
 #include <subdev/instmem.h>
 
-struct nvkm_instobj_impl {
-	struct nvkm_oclass base;
+struct nvkm_instmem_func {
+	void *(*dtor)(struct nvkm_instmem *);
+	int (*oneinit)(struct nvkm_instmem *);
+	void (*fini)(struct nvkm_instmem *);
+	u32  (*rd32)(struct nvkm_instmem *, u32 addr);
+	void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
+	int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
+			  bool zero, struct nvkm_memory **);
+	bool persistent;
+	bool zero;
 };
 
-struct nvkm_instobj_args {
-	u32 size;
-	u32 align;
-};
-
-#define nvkm_instobj_create(p,e,o,d)                                        \
-	nvkm_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instobj_destroy(p) ({                                          \
-	struct nvkm_instobj *iobj = (p);                                    \
-	_nvkm_instobj_dtor(nv_object(iobj));                                \
-})
-#define nvkm_instobj_init(p)                                                \
-	nvkm_object_init(&(p)->base)
-#define nvkm_instobj_fini(p,s)                                              \
-	nvkm_object_fini(&(p)->base, (s))
-
-int  nvkm_instobj_create_(struct nvkm_object *, struct nvkm_object *,
-			     struct nvkm_oclass *, int, void **);
-void _nvkm_instobj_dtor(struct nvkm_object *);
-#define _nvkm_instobj_init nvkm_object_init
-#define _nvkm_instobj_fini nvkm_object_fini
-
-struct nvkm_instmem_impl {
-	struct nvkm_oclass base;
-	struct nvkm_oclass *instobj;
-};
-
-#define nvkm_instmem_create(p,e,o,d)                                        \
-	nvkm_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_instmem_destroy(p)                                             \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_instmem_init(p) ({                                             \
-	struct nvkm_instmem *imem = (p);                                    \
-	_nvkm_instmem_init(nv_object(imem));                                \
-})
-#define nvkm_instmem_fini(p,s) ({                                           \
-	struct nvkm_instmem *imem = (p);                                    \
-	_nvkm_instmem_fini(nv_object(imem), (s));                           \
-})
-
-int nvkm_instmem_create_(struct nvkm_object *, struct nvkm_object *,
-			    struct nvkm_oclass *, int, void **);
-#define _nvkm_instmem_dtor _nvkm_subdev_dtor
-int _nvkm_instmem_init(struct nvkm_object *);
-int _nvkm_instmem_fini(struct nvkm_object *, bool);
+void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
+		       int index, struct nvkm_instmem *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 2fb87fb..930d25b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,102 +23,110 @@
  */
 #include "priv.h"
 
-static int
+#include <subdev/fb.h>
+
+int
 nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
 {
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	int ret;
-
-	ret = nvkm_mm_head(&priv->tags, 0, 1, n, n, 1, pnode);
+	int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
 	if (ret)
 		*pnode = NULL;
-
 	return ret;
 }
 
-static void
+void
 nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
 {
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	nvkm_mm_free(&priv->tags, pnode);
+	nvkm_mm_free(&ltc->tags, pnode);
 }
 
-static void
+void
 nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
 	const u32 limit = first + count - 1;
 
-	BUG_ON((first > limit) || (limit >= priv->num_tags));
+	BUG_ON((first > limit) || (limit >= ltc->num_tags));
 
-	impl->cbc_clear(priv, first, limit);
-	impl->cbc_wait(priv);
+	ltc->func->cbc_clear(ltc, first, limit);
+	ltc->func->cbc_wait(ltc);
 }
 
-static int
+int
 nvkm_ltc_zbc_color_get(struct nvkm_ltc *ltc, int index, const u32 color[4])
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
-	impl->zbc_clear_color(priv, index, color);
-	return index;
-}
-
-static int
-nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
-{
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
-	struct nvkm_ltc_priv *priv = (void *)ltc;
-	priv->zbc_depth[index] = depth;
-	impl->zbc_clear_depth(priv, index, depth);
+	memcpy(ltc->zbc_color[index], color, sizeof(ltc->zbc_color[index]));
+	ltc->func->zbc_clear_color(ltc, index, color);
 	return index;
 }
 
 int
-_nvkm_ltc_init(struct nvkm_object *object)
+nvkm_ltc_zbc_depth_get(struct nvkm_ltc *ltc, int index, const u32 depth)
 {
-	const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
-	struct nvkm_ltc_priv *priv = (void *)object;
-	int ret, i;
+	ltc->zbc_depth[index] = depth;
+	ltc->func->zbc_clear_depth(ltc, index, depth);
+	return index;
+}
 
-	ret = nvkm_subdev_init(&priv->base.base);
-	if (ret)
-		return ret;
+static void
+nvkm_ltc_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	ltc->func->intr(ltc);
+}
 
-	for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
-		impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
-		impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
+static int
+nvkm_ltc_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	return ltc->func->oneinit(ltc);
+}
+
+static int
+nvkm_ltc_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	int i;
+
+	for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+		ltc->func->zbc_clear_color(ltc, i, ltc->zbc_color[i]);
+		ltc->func->zbc_clear_depth(ltc, i, ltc->zbc_depth[i]);
 	}
 
+	ltc->func->init(ltc);
 	return 0;
 }
 
-int
-nvkm_ltc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+static void *
+nvkm_ltc_dtor(struct nvkm_subdev *subdev)
 {
-	const struct nvkm_ltc_impl *impl = (void *)oclass;
-	struct nvkm_ltc_priv *priv;
-	int ret;
+	struct nvkm_ltc *ltc = nvkm_ltc(subdev);
+	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+	nvkm_mm_fini(&ltc->tags);
+	if (ram)
+		nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+	return ltc;
+}
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PLTCG",
-				  "l2c", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
+static const struct nvkm_subdev_func
+nvkm_ltc = {
+	.dtor = nvkm_ltc_dtor,
+	.oneinit = nvkm_ltc_oneinit,
+	.init = nvkm_ltc_init,
+	.intr = nvkm_ltc_intr,
+};
 
-	memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
-	memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
+int
+nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_ltc **pltc)
+{
+	struct nvkm_ltc *ltc;
 
-	priv->base.base.intr = impl->intr;
-	priv->base.tags_alloc = nvkm_ltc_tags_alloc;
-	priv->base.tags_free = nvkm_ltc_tags_free;
-	priv->base.tags_clear = nvkm_ltc_tags_clear;
-	priv->base.zbc_min = 1; /* reserve 0 for disabled */
-	priv->base.zbc_max = min(impl->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
-	priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
-	priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
+	if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
+	ltc->func = func;
+	ltc->zbc_min = 1; /* reserve 0 for disabled */
+	ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 7fb5ea0..45ac765 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -28,38 +28,47 @@
 #include <subdev/timer.h>
 
 void
-gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gf100_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
 {
-	nv_wr32(priv, 0x17e8cc, start);
-	nv_wr32(priv, 0x17e8d0, limit);
-	nv_wr32(priv, 0x17e8c8, 0x00000004);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_wr32(device, 0x17e8cc, start);
+	nvkm_wr32(device, 0x17e8d0, limit);
+	nvkm_wr32(device, 0x17e8c8, 0x00000004);
 }
 
 void
-gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gf100_ltc_cbc_wait(struct nvkm_ltc *ltc)
 {
+	struct nvkm_device *device = ltc->subdev.device;
 	int c, s;
-	for (c = 0; c < priv->ltc_nr; c++) {
-		for (s = 0; s < priv->lts_nr; s++)
-			nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
+	for (c = 0; c < ltc->ltc_nr; c++) {
+		for (s = 0; s < ltc->lts_nr; s++) {
+			const u32 addr = 0x1410c8 + (c * 0x2000) + (s * 0x400);
+			nvkm_msec(device, 2000,
+				if (!nvkm_rd32(device, addr))
+					break;
+			);
+		}
 	}
 }
 
 void
-gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gf100_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
-	nv_mask(priv, 0x17ea44, 0x0000000f, i);
-	nv_wr32(priv, 0x17ea48, color[0]);
-	nv_wr32(priv, 0x17ea4c, color[1]);
-	nv_wr32(priv, 0x17ea50, color[2]);
-	nv_wr32(priv, 0x17ea54, color[3]);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+	nvkm_wr32(device, 0x17ea48, color[0]);
+	nvkm_wr32(device, 0x17ea4c, color[1]);
+	nvkm_wr32(device, 0x17ea50, color[2]);
+	nvkm_wr32(device, 0x17ea54, color[3]);
 }
 
 void
-gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 {
-	nv_mask(priv, 0x17ea44, 0x0000000f, i);
-	nv_wr32(priv, 0x17ea58, depth);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17ea44, 0x0000000f, i);
+	nvkm_wr32(device, 0x17ea58, depth);
 }
 
 static const struct nvkm_bitfield
@@ -81,88 +90,60 @@
 };
 
 static void
-gf100_ltc_lts_intr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gf100_ltc_lts_intr(struct nvkm_ltc *ltc, int c, int s)
 {
-	u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
-	u32 intr = nv_rd32(priv, base + 0x020);
+	struct nvkm_subdev *subdev = &ltc->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 base = 0x141000 + (c * 0x2000) + (s * 0x400);
+	u32 intr = nvkm_rd32(device, base + 0x020);
 	u32 stat = intr & 0x0000ffff;
+	char msg[128];
 
 	if (stat) {
-		nv_info(priv, "LTC%d_LTS%d:", ltc, lts);
-		nvkm_bitfield_print(gf100_ltc_lts_intr_name, stat);
-		pr_cont("\n");
+		nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+		nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, stat, msg);
 	}
 
-	nv_wr32(priv, base + 0x020, intr);
+	nvkm_wr32(device, base + 0x020, intr);
 }
 
 void
-gf100_ltc_intr(struct nvkm_subdev *subdev)
+gf100_ltc_intr(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)subdev;
+	struct nvkm_device *device = ltc->subdev.device;
 	u32 mask;
 
-	mask = nv_rd32(priv, 0x00017c);
+	mask = nvkm_rd32(device, 0x00017c);
 	while (mask) {
-		u32 lts, ltc = __ffs(mask);
-		for (lts = 0; lts < priv->lts_nr; lts++)
-			gf100_ltc_lts_intr(priv, ltc, lts);
-		mask &= ~(1 << ltc);
+		u32 s, c = __ffs(mask);
+		for (s = 0; s < ltc->lts_nr; s++)
+			gf100_ltc_lts_intr(ltc, c, s);
+		mask &= ~(1 << c);
 	}
 }
 
-static int
-gf100_ltc_init(struct nvkm_object *object)
-{
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
-
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-	nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
-	nv_wr32(priv, 0x17e8d4, priv->tag_base);
-	nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
-}
-
-void
-gf100_ltc_dtor(struct nvkm_object *object)
-{
-	struct nvkm_fb *pfb = nvkm_fb(object);
-	struct nvkm_ltc_priv *priv = (void *)object;
-
-	nvkm_mm_fini(&priv->tags);
-	if (pfb->ram)
-		nvkm_mm_free(&pfb->vram, &priv->tag_ram);
-
-	nvkm_ltc_destroy(priv);
-}
-
 /* TODO: Figure out tag memory details and drop the over-cautious allocation.
  */
 int
-gf100_ltc_init_tag_ram(struct nvkm_fb *pfb, struct nvkm_ltc_priv *priv)
+gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
 {
+	struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
 	u32 tag_size, tag_margin, tag_align;
 	int ret;
 
 	/* No VRAM, no tags for now. */
-	if (!pfb->ram) {
-		priv->num_tags = 0;
+	if (!ram) {
+		ltc->num_tags = 0;
 		goto mm_init;
 	}
 
 	/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
-	priv->num_tags = (pfb->ram->size >> 17) / 4;
-	if (priv->num_tags > (1 << 17))
-		priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
-	priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+	ltc->num_tags = (ram->size >> 17) / 4;
+	if (ltc->num_tags > (1 << 17))
+		ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+	ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
 
-	tag_align = priv->ltc_nr * 0x800;
+	tag_align = ltc->ltc_nr * 0x800;
 	tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
 
 	/* 4 part 4 sub: 0x2000 bytes for 56 tags */
@@ -173,72 +154,71 @@
 	 *
 	 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
 	 */
-	tag_size  = (priv->num_tags / 64) * 0x6000 + tag_margin;
+	tag_size  = (ltc->num_tags / 64) * 0x6000 + tag_margin;
 	tag_size += tag_align;
 	tag_size  = (tag_size + 0xfff) >> 12; /* round up */
 
-	ret = nvkm_mm_tail(&pfb->vram, 1, 1, tag_size, tag_size, 1,
-			   &priv->tag_ram);
+	ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
+			   &ltc->tag_ram);
 	if (ret) {
-		priv->num_tags = 0;
+		ltc->num_tags = 0;
 	} else {
-		u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin;
+		u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
 
 		tag_base += tag_align - 1;
-		ret = do_div(tag_base, tag_align);
+		do_div(tag_base, tag_align);
 
-		priv->tag_base = tag_base;
+		ltc->tag_base = tag_base;
 	}
 
 mm_init:
-	ret = nvkm_mm_init(&priv->tags, 0, priv->num_tags, 1);
-	return ret;
+	return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
 }
 
 int
-gf100_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+gf100_ltc_oneinit(struct nvkm_ltc *ltc)
 {
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ltc_priv *priv;
-	u32 parts, mask;
-	int ret, i;
+	struct nvkm_device *device = ltc->subdev.device;
+	const u32 parts = nvkm_rd32(device, 0x022438);
+	const u32  mask = nvkm_rd32(device, 0x022554);
+	const u32 slice = nvkm_rd32(device, 0x17e8dc) >> 28;
+	int i;
 
-	ret = nvkm_ltc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	parts = nv_rd32(priv, 0x022438);
-	mask = nv_rd32(priv, 0x022554);
 	for (i = 0; i < parts; i++) {
 		if (!(mask & (1 << i)))
-			priv->ltc_nr++;
+			ltc->ltc_nr++;
 	}
-	priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+	ltc->lts_nr = slice;
 
-	ret = gf100_ltc_init_tag_ram(pfb, priv);
-	if (ret)
-		return ret;
-
-	nv_subdev(priv)->intr = gf100_ltc_intr;
-	return 0;
+	return gf100_ltc_oneinit_tag_ram(ltc);
 }
 
-struct nvkm_oclass *
-gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gf100_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static void
+gf100_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
+
+	nvkm_mask(device, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+	nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+	nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
+}
+
+static const struct nvkm_ltc_func
+gf100_ltc = {
+	.oneinit = gf100_ltc_oneinit,
+	.init = gf100_ltc_init,
 	.intr = gf100_ltc_intr,
 	.cbc_clear = gf100_ltc_cbc_clear,
 	.cbc_wait = gf100_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gf100_ltc_zbc_clear_color,
 	.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gf100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gf100_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
index d53959b..839e6b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gk104.c
@@ -23,37 +23,32 @@
  */
 #include "priv.h"
 
-static int
-gk104_ltc_init(struct nvkm_object *object)
+static void
+gk104_ltc_init(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
 
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
-	nv_wr32(priv, 0x17e000, priv->ltc_nr);
-	nv_wr32(priv, 0x17e8d4, priv->tag_base);
-	nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
+	nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e8d4, ltc->tag_base);
+	nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
 }
 
-struct nvkm_oclass *
-gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gk104_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static const struct nvkm_ltc_func
+gk104_ltc = {
+	.oneinit = gf100_ltc_oneinit,
+	.init = gk104_ltc_init,
 	.intr = gf100_ltc_intr,
 	.cbc_clear = gf100_ltc_cbc_clear,
 	.cbc_wait = gf100_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gf100_ltc_zbc_clear_color,
 	.zbc_clear_depth = gf100_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gk104_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gk104_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 6b3f6f4..389331b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -27,127 +27,121 @@
 #include <subdev/timer.h>
 
 static void
-gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit)
 {
-	nv_wr32(priv, 0x17e270, start);
-	nv_wr32(priv, 0x17e274, limit);
-	nv_wr32(priv, 0x17e26c, 0x00000004);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_wr32(device, 0x17e270, start);
+	nvkm_wr32(device, 0x17e274, limit);
+	nvkm_wr32(device, 0x17e26c, 0x00000004);
 }
 
 static void
-gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+gm107_ltc_cbc_wait(struct nvkm_ltc *ltc)
 {
+	struct nvkm_device *device = ltc->subdev.device;
 	int c, s;
-	for (c = 0; c < priv->ltc_nr; c++) {
-		for (s = 0; s < priv->lts_nr; s++)
-			nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
+	for (c = 0; c < ltc->ltc_nr; c++) {
+		for (s = 0; s < ltc->lts_nr; s++) {
+			const u32 addr = 0x14046c + (c * 0x2000) + (s * 0x200);
+			nvkm_msec(device, 2000,
+				if (!nvkm_rd32(device, addr))
+					break;
+			);
+		}
 	}
 }
 
 static void
-gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
-	nv_mask(priv, 0x17e338, 0x0000000f, i);
-	nv_wr32(priv, 0x17e33c, color[0]);
-	nv_wr32(priv, 0x17e340, color[1]);
-	nv_wr32(priv, 0x17e344, color[2]);
-	nv_wr32(priv, 0x17e348, color[3]);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e33c, color[0]);
+	nvkm_wr32(device, 0x17e340, color[1]);
+	nvkm_wr32(device, 0x17e344, color[2]);
+	nvkm_wr32(device, 0x17e348, color[3]);
 }
 
 static void
-gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
 {
-	nv_mask(priv, 0x17e338, 0x0000000f, i);
-	nv_wr32(priv, 0x17e34c, depth);
+	struct nvkm_device *device = ltc->subdev.device;
+	nvkm_mask(device, 0x17e338, 0x0000000f, i);
+	nvkm_wr32(device, 0x17e34c, depth);
 }
 
 static void
-gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
+gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
 {
-	u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
-	u32 stat = nv_rd32(priv, base + 0x00c);
+	struct nvkm_subdev *subdev = &ltc->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 base = 0x140000 + (c * 0x2000) + (s * 0x400);
+	u32 stat = nvkm_rd32(device, base + 0x00c);
 
 	if (stat) {
-		nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", ltc, lts, stat);
-		nv_wr32(priv, base + 0x00c, stat);
+		nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
+		nvkm_wr32(device, base + 0x00c, stat);
 	}
 }
 
 static void
-gm107_ltc_intr(struct nvkm_subdev *subdev)
+gm107_ltc_intr(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)subdev;
+	struct nvkm_device *device = ltc->subdev.device;
 	u32 mask;
 
-	mask = nv_rd32(priv, 0x00017c);
+	mask = nvkm_rd32(device, 0x00017c);
 	while (mask) {
-		u32 lts, ltc = __ffs(mask);
-		for (lts = 0; lts < priv->lts_nr; lts++)
-			gm107_ltc_lts_isr(priv, ltc, lts);
-		mask &= ~(1 << ltc);
+		u32 s, c = __ffs(mask);
+		for (s = 0; s < ltc->lts_nr; s++)
+			gm107_ltc_lts_isr(ltc, c, s);
+		mask &= ~(1 << c);
 	}
 }
 
 static int
-gm107_ltc_init(struct nvkm_object *object)
+gm107_ltc_oneinit(struct nvkm_ltc *ltc)
 {
-	struct nvkm_ltc_priv *priv = (void *)object;
-	u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
-	int ret;
+	struct nvkm_device *device = ltc->subdev.device;
+	const u32 parts = nvkm_rd32(device, 0x022438);
+	const u32  mask = nvkm_rd32(device, 0x021c14);
+	const u32 slice = nvkm_rd32(device, 0x17e280) >> 28;
+	int i;
 
-	ret = nvkm_ltc_init(priv);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x17e27c, priv->ltc_nr);
-	nv_wr32(priv, 0x17e278, priv->tag_base);
-	nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
-	return 0;
-}
-
-static int
-gm107_ltc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_fb *pfb = nvkm_fb(parent);
-	struct nvkm_ltc_priv *priv;
-	u32 parts, mask;
-	int ret, i;
-
-	ret = nvkm_ltc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	parts = nv_rd32(priv, 0x022438);
-	mask = nv_rd32(priv, 0x021c14);
 	for (i = 0; i < parts; i++) {
 		if (!(mask & (1 << i)))
-			priv->ltc_nr++;
+			ltc->ltc_nr++;
 	}
-	priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
+	ltc->lts_nr = slice;
 
-	ret = gf100_ltc_init_tag_ram(pfb, priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	return gf100_ltc_oneinit_tag_ram(ltc);
 }
 
-struct nvkm_oclass *
-gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
-	.base.handle = NV_SUBDEV(LTC, 0xff),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_ltc_ctor,
-		.dtor = gf100_ltc_dtor,
-		.init = gm107_ltc_init,
-		.fini = _nvkm_ltc_fini,
-	},
+static void
+gm107_ltc_init(struct nvkm_ltc *ltc)
+{
+	struct nvkm_device *device = ltc->subdev.device;
+	u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001);
+
+	nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
+	nvkm_wr32(device, 0x17e278, ltc->tag_base);
+	nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
+}
+
+static const struct nvkm_ltc_func
+gm107_ltc = {
+	.oneinit = gm107_ltc_oneinit,
+	.init = gm107_ltc_init,
 	.intr = gm107_ltc_intr,
 	.cbc_clear = gm107_ltc_cbc_clear,
 	.cbc_wait = gm107_ltc_cbc_wait,
 	.zbc = 16,
 	.zbc_clear_color = gm107_ltc_zbc_clear_color,
 	.zbc_clear_depth = gm107_ltc_zbc_clear_depth,
-}.base;
+};
+
+int
+gm107_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+	return nvkm_ltc_new_(&gm107_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 09537d7..4e05037 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,69 +1,29 @@
 #ifndef __NVKM_LTC_PRIV_H__
 #define __NVKM_LTC_PRIV_H__
+#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
 #include <subdev/ltc.h>
 
-#include <core/mm.h>
-struct nvkm_fb;
+int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
+		  int index, struct nvkm_ltc **);
 
-struct nvkm_ltc_priv {
-	struct nvkm_ltc base;
-	u32 ltc_nr;
-	u32 lts_nr;
+struct nvkm_ltc_func {
+	int  (*oneinit)(struct nvkm_ltc *);
+	void (*init)(struct nvkm_ltc *);
+	void (*intr)(struct nvkm_ltc *);
 
-	u32 num_tags;
-	u32 tag_base;
-	struct nvkm_mm tags;
-	struct nvkm_mm_node *tag_ram;
-
-	u32 zbc_color[NVKM_LTC_MAX_ZBC_CNT][4];
-	u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
-};
-
-#define nvkm_ltc_create(p,e,o,d)                                               \
-	nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_ltc_destroy(p) ({                                                 \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_dtor(nv_object(_priv));                                      \
-})
-#define nvkm_ltc_init(p) ({                                                    \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_init(nv_object(_priv));                                      \
-})
-#define nvkm_ltc_fini(p,s) ({                                                  \
-	struct nvkm_ltc_priv *_priv = (p);                                     \
-	_nvkm_ltc_fini(nv_object(_priv), (s));                                 \
-})
-
-int  nvkm_ltc_create_(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, int, void **);
-
-#define _nvkm_ltc_dtor _nvkm_subdev_dtor
-int _nvkm_ltc_init(struct nvkm_object *);
-#define _nvkm_ltc_fini _nvkm_subdev_fini
-
-int  gf100_ltc_ctor(struct nvkm_object *, struct nvkm_object *,
-		    struct nvkm_oclass *, void *, u32,
-		    struct nvkm_object **);
-void gf100_ltc_dtor(struct nvkm_object *);
-int  gf100_ltc_init_tag_ram(struct nvkm_fb *, struct nvkm_ltc_priv *);
-int  gf100_ltc_tags_alloc(struct nvkm_ltc *, u32, struct nvkm_mm_node **);
-void gf100_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-
-struct nvkm_ltc_impl {
-	struct nvkm_oclass base;
-	void (*intr)(struct nvkm_subdev *);
-
-	void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
-	void (*cbc_wait)(struct nvkm_ltc_priv *);
+	void (*cbc_clear)(struct nvkm_ltc *, u32 start, u32 limit);
+	void (*cbc_wait)(struct nvkm_ltc *);
 
 	int zbc;
-	void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
-	void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
+	void (*zbc_clear_color)(struct nvkm_ltc *, int, const u32[4]);
+	void (*zbc_clear_depth)(struct nvkm_ltc *, int, const u32);
 };
 
-void gf100_ltc_intr(struct nvkm_subdev *);
-void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
-void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
-void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
-void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
+int gf100_ltc_oneinit(struct nvkm_ltc *);
+int gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *);
+void gf100_ltc_intr(struct nvkm_ltc *);
+void gf100_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
+void gf100_ltc_cbc_wait(struct nvkm_ltc *);
+void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
+void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 721643f..bef325d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,11 +1,7 @@
 nvkm-y += nvkm/subdev/mc/base.o
 nvkm-y += nvkm/subdev/mc/nv04.o
-nvkm-y += nvkm/subdev/mc/nv40.o
 nvkm-y += nvkm/subdev/mc/nv44.o
-nvkm-y += nvkm/subdev/mc/nv4c.o
 nvkm-y += nvkm/subdev/mc/nv50.o
-nvkm-y += nvkm/subdev/mc/g94.o
 nvkm-y += nvkm/subdev/mc/g98.o
 nvkm-y += nvkm/subdev/mc/gf100.o
-nvkm-y += nvkm/subdev/mc/gf106.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 5b051a2..954fbbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -23,147 +23,101 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 
-static inline void
-nvkm_mc_unk260(struct nvkm_mc *pmc, u32 data)
+void
+nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
 {
-	const struct nvkm_mc_oclass *impl = (void *)nv_oclass(pmc);
-	if (impl->unk260)
-		impl->unk260(pmc, data);
-}
-
-static inline u32
-nvkm_mc_intr_mask(struct nvkm_mc *pmc)
-{
-	u32 intr = nv_rd32(pmc, 0x000100);
-	if (intr == 0xffffffff) /* likely fallen off the bus */
-		intr = 0x00000000;
-	return intr;
-}
-
-static irqreturn_t
-nvkm_mc_intr(int irq, void *arg)
-{
-	struct nvkm_mc *pmc = arg;
-	const struct nvkm_mc_oclass *oclass = (void *)nv_object(pmc)->oclass;
-	const struct nvkm_mc_intr *map = oclass->intr;
-	struct nvkm_subdev *unit;
-	u32 intr;
-
-	nv_wr32(pmc, 0x000140, 0x00000000);
-	nv_rd32(pmc, 0x000140);
-	intr = nvkm_mc_intr_mask(pmc);
-	if (pmc->use_msi)
-		oclass->msi_rearm(pmc);
-
-	if (intr) {
-		u32 stat = intr = nvkm_mc_intr_mask(pmc);
-		while (map->stat) {
-			if (intr & map->stat) {
-				unit = nvkm_subdev(pmc, map->unit);
-				if (unit && unit->intr)
-					unit->intr(unit);
-				stat &= ~map->stat;
-			}
-			map++;
-		}
-
-		if (stat)
-			nv_error(pmc, "unknown intr 0x%08x\n", stat);
-	}
-
-	nv_wr32(pmc, 0x000140, 0x00000001);
-	return intr ? IRQ_HANDLED : IRQ_NONE;
-}
-
-int
-_nvkm_mc_fini(struct nvkm_object *object, bool suspend)
-{
-	struct nvkm_mc *pmc = (void *)object;
-	nv_wr32(pmc, 0x000140, 0x00000000);
-	return nvkm_subdev_fini(&pmc->base, suspend);
-}
-
-int
-_nvkm_mc_init(struct nvkm_object *object)
-{
-	struct nvkm_mc *pmc = (void *)object;
-	int ret = nvkm_subdev_init(&pmc->base);
-	if (ret)
-		return ret;
-	nv_wr32(pmc, 0x000140, 0x00000001);
-	return 0;
+	if (mc->func->unk260)
+		mc->func->unk260(mc, data);
 }
 
 void
-_nvkm_mc_dtor(struct nvkm_object *object)
+nvkm_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nvkm_device *device = nv_device(object);
-	struct nvkm_mc *pmc = (void *)object;
-	free_irq(pmc->irq, pmc);
-	if (pmc->use_msi)
-		pci_disable_msi(device->pdev);
-	nvkm_subdev_destroy(&pmc->base);
+	return mc->func->intr_unarm(mc);
 }
 
-int
-nvkm_mc_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *bclass, int length, void **pobject)
+void
+nvkm_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	const struct nvkm_mc_oclass *oclass = (void *)bclass;
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_mc *pmc;
-	int ret;
+	return mc->func->intr_rearm(mc);
+}
 
-	ret = nvkm_subdev_create_(parent, engine, bclass, 0, "PMC",
-				  "master", length, pobject);
-	pmc = *pobject;
-	if (ret)
-		return ret;
+static u32
+nvkm_mc_intr_mask(struct nvkm_mc *mc)
+{
+	u32 intr = mc->func->intr_mask(mc);
+	if (WARN_ON_ONCE(intr == 0xffffffff))
+		intr = 0; /* likely fallen off the bus */
+	return intr;
+}
 
-	pmc->unk260 = nvkm_mc_unk260;
+void
+nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	struct nvkm_subdev *subdev;
+	const struct nvkm_mc_intr *map = mc->func->intr;
+	u32 stat, intr;
 
-	if (nv_device_is_pci(device)) {
-		switch (device->pdev->device & 0x0ff0) {
-		case 0x00f0:
-		case 0x02e0:
-			/* BR02? NFI how these would be handled yet exactly */
-			break;
-		default:
-			switch (device->chipset) {
-			case 0xaa:
-				/* reported broken, nv also disable it */
-				break;
-			default:
-				pmc->use_msi = true;
-				break;
-			}
+	stat = intr = nvkm_mc_intr_mask(mc);
+	while (map->stat) {
+		if (intr & map->stat) {
+			subdev = nvkm_device_subdev(device, map->unit);
+			if (subdev)
+				nvkm_subdev_intr(subdev);
+			stat &= ~map->stat;
 		}
-
-		pmc->use_msi = nvkm_boolopt(device->cfgopt, "NvMSI",
-					    pmc->use_msi);
-
-		if (pmc->use_msi && oclass->msi_rearm) {
-			pmc->use_msi = pci_enable_msi(device->pdev) == 0;
-			if (pmc->use_msi) {
-				nv_info(pmc, "MSI interrupts enabled\n");
-				oclass->msi_rearm(pmc);
-			}
-		} else {
-			pmc->use_msi = false;
-		}
+		map++;
 	}
 
-	ret = nv_device_get_irq(device, true);
-	if (ret < 0)
-		return ret;
-	pmc->irq = ret;
+	if (stat)
+		nvkm_error(&mc->subdev, "intr %08x\n", stat);
+	*handled = intr != 0;
+}
 
-	ret = request_irq(pmc->irq, nvkm_mc_intr, IRQF_SHARED, "nvkm", pmc);
-	if (ret < 0)
-		return ret;
+static int
+nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_mc *mc = nvkm_mc(subdev);
+	nvkm_mc_intr_unarm(mc);
+	return 0;
+}
 
+static int
+nvkm_mc_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mc *mc = nvkm_mc(subdev);
+	if (mc->func->init)
+		mc->func->init(mc);
+	nvkm_mc_intr_rearm(mc);
+	return 0;
+}
+
+static void *
+nvkm_mc_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_mc(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_mc = {
+	.dtor = nvkm_mc_dtor,
+	.init = nvkm_mc_init,
+	.fini = nvkm_mc_fini,
+};
+
+int
+nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+	     int index, struct nvkm_mc **pmc)
+{
+	struct nvkm_mc *mc;
+
+	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
+	mc->func = func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
deleted file mode 100644
index f042e7d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv04.h"
-
-struct nvkm_oclass *
-g94_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x94),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv50_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 8ab7f12..7344ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -21,38 +21,40 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 static const struct nvkm_mc_intr
 g98_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
-	{ 0x00000001, NVDEV_ENGINE_MSPPP },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00004000, NVDEV_ENGINE_SEC },	/* NV84:NVA3 */
-	{ 0x00008000, NVDEV_ENGINE_MSVLD },
-	{ 0x00020000, NVDEV_ENGINE_MSPDEC },
-	{ 0x00040000, NVDEV_SUBDEV_PMU },	/* NVA3:NVC0 */
-	{ 0x00080000, NVDEV_SUBDEV_THERM },	/* NVA3:NVC0 */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x00400000, NVDEV_ENGINE_CE0 },	/* NVA3-     */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
-	{ 0x0042d101, NVDEV_SUBDEV_FB },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work */
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00004000, NVKM_ENGINE_SEC },	/* NV84:NVA3 */
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00040000, NVKM_SUBDEV_PMU },	/* NVA3:NVC0 */
+	{ 0x00080000, NVKM_SUBDEV_THERM },	/* NVA3:NVC0 */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x00400000, NVKM_ENGINE_CE0 },	/* NVA3-     */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x0042d101, NVKM_SUBDEV_FB },
 	{},
 };
 
-struct nvkm_oclass *
-g98_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x98),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+g98_mc = {
+	.init = nv50_mc_init,
 	.intr = g98_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&g98_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 2425984..122fe69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -21,56 +21,77 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 const struct nvkm_mc_intr
 gf100_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
-	{ 0x00000001, NVDEV_ENGINE_MSPPP },
-	{ 0x00000020, NVDEV_ENGINE_CE0 },
-	{ 0x00000040, NVDEV_ENGINE_CE1 },
-	{ 0x00000080, NVDEV_ENGINE_CE2 },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00002000, NVDEV_SUBDEV_FB },
-	{ 0x00008000, NVDEV_ENGINE_MSVLD },
-	{ 0x00040000, NVDEV_SUBDEV_THERM },
-	{ 0x00020000, NVDEV_ENGINE_MSPDEC },
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C },	/* PMGR->I2C/AUX */
-	{ 0x01000000, NVDEV_SUBDEV_PMU },
-	{ 0x02000000, NVDEV_SUBDEV_LTC },
-	{ 0x08000000, NVDEV_SUBDEV_FB },
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x40000000, NVDEV_SUBDEV_IBUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP first, so pageflip timestamps work. */
+	{ 0x00000001, NVKM_ENGINE_MSPPP },
+	{ 0x00000020, NVKM_ENGINE_CE0 },
+	{ 0x00000040, NVKM_ENGINE_CE1 },
+	{ 0x00000080, NVKM_ENGINE_CE2 },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00002000, NVKM_SUBDEV_FB },
+	{ 0x00008000, NVKM_ENGINE_MSVLD },
+	{ 0x00040000, NVKM_SUBDEV_THERM },
+	{ 0x00020000, NVKM_ENGINE_MSPDEC },
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C },	/* PMGR->I2C/AUX */
+	{ 0x01000000, NVKM_SUBDEV_PMU },
+	{ 0x02000000, NVKM_SUBDEV_LTC },
+	{ 0x08000000, NVKM_SUBDEV_FB },
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x40000000, NVKM_SUBDEV_IBUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
 	{},
 };
 
-static void
-gf100_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+gf100_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)pmc;
-	nv_wr32(priv, 0x088704, 0x00000000);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000000);
+	nvkm_wr32(device, 0x000144, 0x00000000);
+	nvkm_rd32(device, 0x000140);
 }
 
 void
-gf100_mc_unk260(struct nvkm_mc *pmc, u32 data)
+gf100_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	nv_wr32(pmc, 0x000260, data);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000001);
+	nvkm_wr32(device, 0x000144, 0x00000001);
 }
 
-struct nvkm_oclass *
-gf100_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+u32
+gf100_mc_intr_mask(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	u32 intr0 = nvkm_rd32(device, 0x000100);
+	u32 intr1 = nvkm_rd32(device, 0x000104);
+	return intr0 | intr1;
+}
+
+void
+gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
+{
+	nvkm_wr32(mc->subdev.device, 0x000260, data);
+}
+
+static const struct nvkm_mc_func
+gf100_mc = {
+	.init = nv50_mc_init,
 	.intr = gf100_mc_intr,
-	.msi_rearm = gf100_mc_msi_rearm,
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
 	.unk260 = gf100_mc_unk260,
-}.base;
+};
+
+int
+gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gf100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
deleted file mode 100644
index 8d2a8f4..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf106.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv04.h"
-
-struct nvkm_oclass *
-gf106_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xc3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = gf100_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-	.unk260 = gf100_mc_unk260,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 43b2774..d92efb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -21,17 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-struct nvkm_oclass *
-gk20a_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+gk20a_mc = {
+	.init = nv50_mc_init,
 	.intr = gf100_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = gf100_mc_intr_unarm,
+	.intr_rearm = gf100_mc_intr_rearm,
+	.intr_mask = gf100_mc_intr_mask,
+};
+
+int
+gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index 3271382..d282ec1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -21,58 +21,63 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
 const struct nvkm_mc_intr
 nv04_mc_intr[] = {
-	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00010000, NVDEV_ENGINE_DISP },
-	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
-	{ 0x02000000, NVDEV_ENGINE_DISP },	/* NV11- PCRTC1 */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x00000001, NVKM_ENGINE_MPEG },	/* NV17- MPEG/ME */
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00010000, NVKM_ENGINE_DISP },
+	{ 0x00020000, NVKM_ENGINE_VP },	/* NV40- */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x01000000, NVKM_ENGINE_DISP },	/* NV04- PCRTC0 */
+	{ 0x02000000, NVKM_ENGINE_DISP },	/* NV11- PCRTC1 */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
 	{}
 };
 
-int
-nv04_mc_init(struct nvkm_object *object)
+void
+nv04_mc_intr_unarm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
-	nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
-
-	return nvkm_mc_init(&priv->base);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000000);
+	nvkm_rd32(device, 0x000140);
 }
 
-int
-nv04_mc_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	     struct nvkm_oclass *oclass, void *data, u32 size,
-	     struct nvkm_object **pobject)
+void
+nv04_mc_intr_rearm(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv;
-	int ret;
-
-	ret = nvkm_mc_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	return 0;
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000140, 0x00000001);
 }
 
-struct nvkm_oclass *
-nv04_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x04),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv04_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+u32
+nv04_mc_intr_mask(struct nvkm_mc *mc)
+{
+	return nvkm_rd32(mc->subdev.device, 0x000100);
+}
+
+void
+nv04_mc_init(struct nvkm_mc *mc)
+{
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
+	nvkm_wr32(device, 0x001850, 0x00000001); /* disable rom access */
+}
+
+static const struct nvkm_mc_func
+nv04_mc = {
+	.init = nv04_mc_init,
 	.intr = nv04_mc_intr,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv04_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
deleted file mode 100644
index 411de3d..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __NVKM_MC_NV04_H__
-#define __NVKM_MC_NV04_H__
-#include "priv.h"
-
-struct nv04_mc_priv {
-	struct nvkm_mc base;
-};
-
-int  nv04_mc_ctor(struct nvkm_object *, struct nvkm_object *,
-		  struct nvkm_oclass *, void *, u32,
-		  struct nvkm_object **);
-
-extern const struct nvkm_mc_intr nv04_mc_intr[];
-int  nv04_mc_init(struct nvkm_object *);
-void nv40_mc_msi_rearm(struct nvkm_mc *);
-int  nv44_mc_init(struct nvkm_object *object);
-int  nv50_mc_init(struct nvkm_object *);
-extern const struct nvkm_mc_intr nv50_mc_intr[];
-extern const struct nvkm_mc_intr gf100_mc_intr[];
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
deleted file mode 100644
index b761305..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv40.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "nv04.h"
-
-void
-nv40_mc_msi_rearm(struct nvkm_mc *pmc)
-{
-	struct nv04_mc_priv *priv = (void *)pmc;
-	nv_wr08(priv, 0x088068, 0xff);
-}
-
-struct nvkm_oclass *
-nv40_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x40),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv04_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv04_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 2c7f7c7..9a3ac99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -21,33 +21,33 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-int
-nv44_mc_init(struct nvkm_object *object)
+void
+nv44_mc_init(struct nvkm_mc *mc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-	u32 tmp = nv_rd32(priv, 0x10020c);
+	struct nvkm_device *device = mc->subdev.device;
+	u32 tmp = nvkm_rd32(device, 0x10020c);
 
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything enabled */
 
-	nv_wr32(priv, 0x001700, tmp);
-	nv_wr32(priv, 0x001704, 0);
-	nv_wr32(priv, 0x001708, 0);
-	nv_wr32(priv, 0x00170c, tmp);
-
-	return nvkm_mc_init(&priv->base);
+	nvkm_wr32(device, 0x001700, tmp);
+	nvkm_wr32(device, 0x001704, 0);
+	nvkm_wr32(device, 0x001708, 0);
+	nvkm_wr32(device, 0x00170c, tmp);
 }
 
-struct nvkm_oclass *
-nv44_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x44),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv44_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
+static const struct nvkm_mc_func
+nv44_mc = {
+	.init = nv44_mc_init,
 	.intr = nv04_mc_intr,
-	.msi_rearm = nv40_mc_msi_rearm,
-}.base;
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
+int
+nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+	return nvkm_mc_new_(&nv44_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
deleted file mode 100644
index c0aac7e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright 2014 Ilia Mirkin
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ilia Mirkin
- */
-#include "nv04.h"
-
-struct nvkm_oclass *
-nv4c_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x4c),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv44_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv04_mc_intr,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 40e3019..5f27d7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -21,52 +21,44 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
-
-#include <core/device.h>
+#include "priv.h"
 
 const struct nvkm_mc_intr
 nv50_mc_intr[] = {
-	{ 0x04000000, NVDEV_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
-	{ 0x00000001, NVDEV_ENGINE_MPEG },
-	{ 0x00000100, NVDEV_ENGINE_FIFO },
-	{ 0x00001000, NVDEV_ENGINE_GR },
-	{ 0x00004000, NVDEV_ENGINE_CIPHER },	/* NV84- */
-	{ 0x00008000, NVDEV_ENGINE_BSP },	/* NV84- */
-	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV84- */
-	{ 0x00100000, NVDEV_SUBDEV_TIMER },
-	{ 0x00200000, NVDEV_SUBDEV_GPIO },	/* PMGR->GPIO */
-	{ 0x00200000, NVDEV_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
-	{ 0x10000000, NVDEV_SUBDEV_BUS },
-	{ 0x80000000, NVDEV_ENGINE_SW },
-	{ 0x0002d101, NVDEV_SUBDEV_FB },
+	{ 0x04000000, NVKM_ENGINE_DISP },  /* DISP before FIFO, so pageflip-timestamping works! */
+	{ 0x00000001, NVKM_ENGINE_MPEG },
+	{ 0x00000100, NVKM_ENGINE_FIFO },
+	{ 0x00001000, NVKM_ENGINE_GR },
+	{ 0x00004000, NVKM_ENGINE_CIPHER },	/* NV84- */
+	{ 0x00008000, NVKM_ENGINE_BSP },	/* NV84- */
+	{ 0x00020000, NVKM_ENGINE_VP },	/* NV84- */
+	{ 0x00100000, NVKM_SUBDEV_TIMER },
+	{ 0x00200000, NVKM_SUBDEV_GPIO },	/* PMGR->GPIO */
+	{ 0x00200000, NVKM_SUBDEV_I2C }, 	/* PMGR->I2C/AUX */
+	{ 0x10000000, NVKM_SUBDEV_BUS },
+	{ 0x80000000, NVKM_ENGINE_SW },
+	{ 0x0002d101, NVKM_SUBDEV_FB },
 	{},
 };
 
-static void
-nv50_mc_msi_rearm(struct nvkm_mc *pmc)
+void
+nv50_mc_init(struct nvkm_mc *mc)
 {
-	struct nvkm_device *device = nv_device(pmc);
-	pci_write_config_byte(device->pdev, 0x68, 0xff);
+	struct nvkm_device *device = mc->subdev.device;
+	nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
 }
 
+static const struct nvkm_mc_func
+nv50_mc = {
+	.init = nv50_mc_init,
+	.intr = nv50_mc_intr,
+	.intr_unarm = nv04_mc_intr_unarm,
+	.intr_rearm = nv04_mc_intr_rearm,
+	.intr_mask = nv04_mc_intr_mask,
+};
+
 int
-nv50_mc_init(struct nvkm_object *object)
+nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
 {
-	struct nv04_mc_priv *priv = (void *)object;
-	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
-	return nvkm_mc_init(&priv->base);
+	return nvkm_mc_new_(&nv50_mc, device, index, pmc);
 }
-
-struct nvkm_oclass *
-nv50_mc_oclass = &(struct nvkm_mc_oclass) {
-	.base.handle = NV_SUBDEV(MC, 0x50),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mc_ctor,
-		.dtor = _nvkm_mc_dtor,
-		.init = nv50_mc_init,
-		.fini = _nvkm_mc_fini,
-	},
-	.intr = nv50_mc_intr,
-	.msi_rearm = nv50_mc_msi_rearm,
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d2cad07..307f6c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,36 +1,42 @@
 #ifndef __NVKM_MC_PRIV_H__
 #define __NVKM_MC_PRIV_H__
+#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
 #include <subdev/mc.h>
 
-#define nvkm_mc_create(p,e,o,d)                                             \
-	nvkm_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_mc_destroy(p) ({                                               \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_dtor(nv_object(pmc));        \
-})
-#define nvkm_mc_init(p) ({                                                  \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_init(nv_object(pmc));        \
-})
-#define nvkm_mc_fini(p,s) ({                                                \
-	struct nvkm_mc *pmc = (p); _nvkm_mc_fini(nv_object(pmc), (s));   \
-})
-
-int  nvkm_mc_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
-void _nvkm_mc_dtor(struct nvkm_object *);
-int  _nvkm_mc_init(struct nvkm_object *);
-int  _nvkm_mc_fini(struct nvkm_object *, bool);
+int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
+		 int index, struct nvkm_mc **);
 
 struct nvkm_mc_intr {
 	u32 stat;
 	u32 unit;
 };
 
-struct nvkm_mc_oclass {
-	struct nvkm_oclass base;
+struct nvkm_mc_func {
+	void (*init)(struct nvkm_mc *);
 	const struct nvkm_mc_intr *intr;
-	void (*msi_rearm)(struct nvkm_mc *);
+	/* disable reporting of interrupts to host */
+	void (*intr_unarm)(struct nvkm_mc *);
+	/* enable reporting of interrupts to host */
+	void (*intr_rearm)(struct nvkm_mc *);
+	/* retrieve pending interrupt mask (NV_PMC_INTR) */
+	u32 (*intr_mask)(struct nvkm_mc *);
 	void (*unk260)(struct nvkm_mc *, u32);
 };
 
+void nv04_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv04_mc_intr[];
+void nv04_mc_intr_unarm(struct nvkm_mc *);
+void nv04_mc_intr_rearm(struct nvkm_mc *);
+u32 nv04_mc_intr_mask(struct nvkm_mc *);
+
+void nv44_mc_init(struct nvkm_mc *);
+
+void nv50_mc_init(struct nvkm_mc *);
+extern const struct nvkm_mc_intr nv50_mc_intr[];
+
+extern const struct nvkm_mc_intr gf100_mc_intr[];
+void gf100_mc_intr_unarm(struct nvkm_mc *);
+void gf100_mc_intr_rearm(struct nvkm_mc *);
+u32 gf100_mc_intr_mask(struct nvkm_mc *);
 void gf100_mc_unk260(struct nvkm_mc *, u32);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 277b6ec..e04a229 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,10 +21,10 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/fb.h>
+#include "priv.h"
 
 #include <core/gpuobj.h>
+#include <subdev/fb.h>
 
 void
 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
@@ -32,12 +32,12 @@
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_mm_node *r;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	delta = 0;
@@ -46,14 +46,14 @@
 		u32 num  = r->length >> bits;
 
 		while (num) {
-			struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 			end = (pte + num);
 			if (unlikely(end >= max))
 				end = max;
 			len = end - pte;
 
-			mmu->map(vma, pgt, node, pte, len, phys, delta);
+			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
 
 			num -= len;
 			pte += len;
@@ -67,7 +67,7 @@
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 static void
@@ -76,20 +76,20 @@
 {
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	unsigned m, sglen;
 	u32 end, len;
 	int i;
 	struct scatterlist *sg;
 
 	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
 
 		end = pte + sglen;
@@ -100,7 +100,7 @@
 		for (m = 0; m < len; m++) {
 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-			mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
 			num--;
 			pte++;
 
@@ -115,7 +115,7 @@
 			for (; m < sglen; m++) {
 				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
 
-				mmu->map_sg(vma, pgt, mem, pte, 1, &addr);
+				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
 				num--;
 				pte++;
 				if (num == 0)
@@ -125,7 +125,7 @@
 
 	}
 finish:
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 static void
@@ -135,24 +135,24 @@
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
 	dma_addr_t *list = mem->pages;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 		end = (pte + num);
 		if (unlikely(end >= max))
 			end = max;
 		len = end - pte;
 
-		mmu->map_sg(vma, pgt, mem, pte, len, list);
+		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
 
 		num  -= len;
 		pte  += len;
@@ -163,7 +163,7 @@
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 void
@@ -183,24 +183,24 @@
 {
 	struct nvkm_vm *vm = vma->vm;
 	struct nvkm_mmu *mmu = vm->mmu;
-	int big = vma->node->type != mmu->spg_shift;
+	int big = vma->node->type != mmu->func->spg_shift;
 	u32 offset = vma->node->offset + (delta >> 12);
 	u32 bits = vma->node->type - 12;
 	u32 num  = length >> vma->node->type;
-	u32 pde  = (offset >> mmu->pgt_bits) - vm->fpde;
-	u32 pte  = (offset & ((1 << mmu->pgt_bits) - 1)) >> bits;
-	u32 max  = 1 << (mmu->pgt_bits - bits);
+	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (mmu->func->pgt_bits - bits);
 	u32 end, len;
 
 	while (num) {
-		struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
 
 		end = (pte + num);
 		if (unlikely(end >= max))
 			end = max;
 		len = end - pte;
 
-		mmu->unmap(pgt, pte, len);
+		mmu->func->unmap(vma, pgt, pte, len);
 
 		num -= len;
 		pte += len;
@@ -210,7 +210,7 @@
 		}
 	}
 
-	mmu->flush(vm);
+	mmu->func->flush(vm);
 }
 
 void
@@ -225,7 +225,7 @@
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgd *vpgd;
 	struct nvkm_vm_pgt *vpgt;
-	struct nvkm_gpuobj *pgt;
+	struct nvkm_memory *pgt;
 	u32 pde;
 
 	for (pde = fpde; pde <= lpde; pde++) {
@@ -233,16 +233,14 @@
 		if (--vpgt->refcount[big])
 			continue;
 
-		pgt = vpgt->obj[big];
-		vpgt->obj[big] = NULL;
+		pgt = vpgt->mem[big];
+		vpgt->mem[big] = NULL;
 
 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
-			mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 		}
 
-		mutex_unlock(&nv_subdev(mmu)->mutex);
-		nvkm_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&nv_subdev(mmu)->mutex);
+		nvkm_memory_del(&pgt);
 	}
 }
 
@@ -252,34 +250,23 @@
 	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
 	struct nvkm_vm_pgd *vpgd;
-	struct nvkm_gpuobj *pgt;
-	int big = (type != mmu->spg_shift);
+	int big = (type != mmu->func->spg_shift);
 	u32 pgt_size;
 	int ret;
 
-	pgt_size  = (1 << (mmu->pgt_bits + 12)) >> type;
+	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
 	pgt_size *= 8;
 
-	mutex_unlock(&nv_subdev(mmu)->mutex);
-	ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
-			      NVOBJ_FLAG_ZERO_ALLOC, &pgt);
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      pgt_size, 0x1000, true, &vpgt->mem[big]);
 	if (unlikely(ret))
 		return ret;
 
-	/* someone beat us to filling the PDE while we didn't have the lock */
-	if (unlikely(vpgt->refcount[big]++)) {
-		mutex_unlock(&nv_subdev(mmu)->mutex);
-		nvkm_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&nv_subdev(mmu)->mutex);
-		return 0;
-	}
-
-	vpgt->obj[big] = pgt;
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
-		mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
 	}
 
+	vpgt->refcount[big]++;
 	return 0;
 }
 
@@ -293,20 +280,20 @@
 	u32 fpde, lpde, pde;
 	int ret;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
 			   &vma->node);
 	if (unlikely(ret != 0)) {
-		mutex_unlock(&nv_subdev(mmu)->mutex);
+		mutex_unlock(&vm->mutex);
 		return ret;
 	}
 
-	fpde = (vma->node->offset >> mmu->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+	fpde = (vma->node->offset >> mmu->func->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
 
 	for (pde = fpde; pde <= lpde; pde++) {
 		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
-		int big = (vma->node->type != mmu->spg_shift);
+		int big = (vma->node->type != mmu->func->spg_shift);
 
 		if (likely(vpgt->refcount[big])) {
 			vpgt->refcount[big]++;
@@ -318,11 +305,11 @@
 			if (pde != fpde)
 				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
 			nvkm_mm_free(&vm->mm, &vma->node);
-			mutex_unlock(&nv_subdev(mmu)->mutex);
+			mutex_unlock(&vm->mutex);
 			return ret;
 		}
 	}
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 
 	vma->vm = NULL;
 	nvkm_vm_ref(vm, &vma->vm, NULL);
@@ -334,27 +321,49 @@
 void
 nvkm_vm_put(struct nvkm_vma *vma)
 {
-	struct nvkm_vm *vm = vma->vm;
-	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_mmu *mmu;
+	struct nvkm_vm *vm;
 	u32 fpde, lpde;
 
 	if (unlikely(vma->node == NULL))
 		return;
-	fpde = (vma->node->offset >> mmu->pgt_bits);
-	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits;
+	vm = vma->vm;
+	mmu = vm->mmu;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
-	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde);
+	fpde = (vma->node->offset >> mmu->func->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
+
+	mutex_lock(&vm->mutex);
+	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
 	nvkm_mm_free(&vm->mm, &vma->node);
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 
 	nvkm_vm_ref(NULL, &vma->vm, NULL);
 }
 
 int
-nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-	       u32 block, struct nvkm_vm **pvm)
+nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
 {
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_memory *pgt;
+	int ret;
+
+	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
+	if (ret == 0) {
+		vm->pgt[0].refcount[0] = 1;
+		vm->pgt[0].mem[0] = pgt;
+		nvkm_memory_boot(pgt, vm);
+	}
+
+	return ret;
+}
+
+int
+nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
+{
+	static struct lock_class_key _key;
 	struct nvkm_vm *vm;
 	u64 mm_length = (offset + length) - mm_offset;
 	int ret;
@@ -363,11 +372,12 @@
 	if (!vm)
 		return -ENOMEM;
 
+	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
 	INIT_LIST_HEAD(&vm->pgd_list);
 	vm->mmu = mmu;
 	kref_init(&vm->refcount);
-	vm->fpde = offset >> (mmu->pgt_bits + 12);
-	vm->lpde = (offset + length - 1) >> (mmu->pgt_bits + 12);
+	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
+	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
 
 	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
 	if (!vm->pgt) {
@@ -390,10 +400,12 @@
 
 int
 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
-	    struct nvkm_vm **pvm)
+	    struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	struct nvkm_mmu *mmu = nvkm_mmu(device);
-	return mmu->create(mmu, offset, length, mm_offset, pvm);
+	struct nvkm_mmu *mmu = device->mmu;
+	if (!mmu->func->create)
+		return -EINVAL;
+	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
 }
 
 static int
@@ -410,38 +422,33 @@
 	if (!vpgd)
 		return -ENOMEM;
 
-	nvkm_gpuobj_ref(pgd, &vpgd->obj);
+	vpgd->obj = pgd;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	for (i = vm->fpde; i <= vm->lpde; i++)
-		mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
 	list_add(&vpgd->head, &vm->pgd_list);
-	mutex_unlock(&nv_subdev(mmu)->mutex);
+	mutex_unlock(&vm->mutex);
 	return 0;
 }
 
 static void
 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
 {
-	struct nvkm_mmu *mmu = vm->mmu;
 	struct nvkm_vm_pgd *vpgd, *tmp;
-	struct nvkm_gpuobj *pgd = NULL;
 
 	if (!mpgd)
 		return;
 
-	mutex_lock(&nv_subdev(mmu)->mutex);
+	mutex_lock(&vm->mutex);
 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 		if (vpgd->obj == mpgd) {
-			pgd = vpgd->obj;
 			list_del(&vpgd->head);
 			kfree(vpgd);
 			break;
 		}
 	}
-	mutex_unlock(&nv_subdev(mmu)->mutex);
-
-	nvkm_gpuobj_ref(NULL, &pgd);
+	mutex_unlock(&vm->mutex);
 }
 
 static void
@@ -478,3 +485,58 @@
 	*ptr = ref;
 	return 0;
 }
+
+static int
+nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->oneinit)
+		return mmu->func->oneinit(mmu);
+	return 0;
+}
+
+static int
+nvkm_mmu_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->init)
+		mmu->func->init(mmu);
+	return 0;
+}
+
+static void *
+nvkm_mmu_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
+	if (mmu->func->dtor)
+		return mmu->func->dtor(mmu);
+	return mmu;
+}
+
+static const struct nvkm_subdev_func
+nvkm_mmu = {
+	.dtor = nvkm_mmu_dtor,
+	.oneinit = nvkm_mmu_oneinit,
+	.init = nvkm_mmu_init,
+};
+
+void
+nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu *mmu)
+{
+	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
+	mmu->func = func;
+	mmu->limit = func->limit;
+	mmu->dma_bits = func->dma_bits;
+	mmu->lpg_shift = func->lpg_shift;
+}
+
+int
+nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu **pmmu)
+{
+	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_mmu_ctor(func, device, index, *pmmu);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 294cda3..7ac507c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,19 +21,14 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
+#include "priv.h"
+
 #include <subdev/fb.h>
 #include <subdev/ltc.h>
 #include <subdev/timer.h>
 
 #include <core/gpuobj.h>
 
-struct gf100_mmu_priv {
-	struct nvkm_mmu base;
-};
-
-
 /* Map from compressed to corresponding uncompressed storage type.
  * The value 0xff represents an invalid storage type.
  */
@@ -75,17 +70,19 @@
 
 
 static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
 {
 	u32 pde[2] = { 0, 0 };
 
 	if (pgt[0])
-		pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
+		pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
 	if (pgt[1])
-		pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
+		pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
 
-	nv_wo32(pgd, (index * 8) + 0, pde[0]);
-	nv_wo32(pgd, (index * 8) + 4, pde[1]);
+	nvkm_kmap(pgd);
+	nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
+	nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
+	nvkm_done(pgd);
 }
 
 static inline u64
@@ -103,7 +100,7 @@
 }
 
 static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	     struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
 	u64 next = 1 << (vma->node->type - 8);
@@ -112,126 +109,113 @@
 	pte <<= 3;
 
 	if (mem->tag) {
-		struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu);
+		struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
 		u32 tag = mem->tag->offset + (delta >> 17);
 		phys |= (u64)tag << (32 + 12);
 		next |= (u64)1   << (32 + 12);
-		ltc->tags_clear(ltc, tag, cnt);
+		nvkm_ltc_tags_clear(ltc, tag, cnt);
 	}
 
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		phys += next;
 		pte  += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 		struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
 	/* compressed storage types are invalid for system memory */
 	u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
 
+	nvkm_kmap(pgt);
 	pte <<= 3;
 	while (cnt--) {
 		u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
+	nvkm_kmap(pgt);
 	pte <<= 3;
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, 0x00000000);
-		nv_wo32(pgt, pte + 4, 0x00000000);
+		nvkm_wo32(pgt, pte + 0, 0x00000000);
+		nvkm_wo32(pgt, pte + 4, 0x00000000);
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 gf100_vm_flush(struct nvkm_vm *vm)
 {
-	struct gf100_mmu_priv *priv = (void *)vm->mmu;
-	struct nvkm_bar *bar = nvkm_bar(priv);
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_device *device = mmu->subdev.device;
 	struct nvkm_vm_pgd *vpgd;
 	u32 type;
 
-	bar->flush(bar);
-
 	type = 0x00000001; /* PAGE_ALL */
-	if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
+	if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
 		type |= 0x00000004; /* HUB_ONLY */
 
-	mutex_lock(&nv_subdev(priv)->mutex);
+	mutex_lock(&mmu->subdev.mutex);
 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
 		/* looks like maybe a "free flush slots" counter, the
 		 * faster you write to 0x100cbc to more it decreases
 		 */
-		if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
-			nv_error(priv, "vm timeout 0: 0x%08x %d\n",
-				 nv_rd32(priv, 0x100c80), type);
-		}
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+				break;
+		);
 
-		nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
-		nv_wr32(priv, 0x100cbc, 0x80000000 | type);
+		nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
+		nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
 
 		/* wait for flush to be queued? */
-		if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
-			nv_error(priv, "vm timeout 1: 0x%08x %d\n",
-				 nv_rd32(priv, 0x100c80), type);
-		}
+		nvkm_msec(device, 2000,
+			if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+				break;
+		);
 	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_unlock(&mmu->subdev.mutex);
 }
 
 static int
 gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
-		struct nvkm_vm **pvm)
+		struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, pvm);
+	return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
 }
 
-static int
-gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct gf100_mmu_priv *priv;
-	int ret;
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.limit = 1ULL << 40;
-	priv->base.dma_bits = 40;
-	priv->base.pgt_bits  = 27 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 17;
-	priv->base.create = gf100_vm_create;
-	priv->base.map_pgt = gf100_vm_map_pgt;
-	priv->base.map = gf100_vm_map;
-	priv->base.map_sg = gf100_vm_map_sg;
-	priv->base.unmap = gf100_vm_unmap;
-	priv->base.flush = gf100_vm_flush;
-	return 0;
-}
-
-struct nvkm_oclass
-gf100_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0xc0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf100_mmu_ctor,
-		.dtor = _nvkm_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+gf100_mmu = {
+	.limit = (1ULL << 40),
+	.dma_bits = 40,
+	.pgt_bits  = 27 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 17,
+	.create = gf100_vm_create,
+	.map_pgt = gf100_vm_map_pgt,
+	.map = gf100_vm_map,
+	.map_sg = gf100_vm_map_sg,
+	.unmap = gf100_vm_unmap,
+	.flush = gf100_vm_flush,
 };
+
+int
+gf100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&gf100_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index fe93ea2..37927c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 
 #define NV04_PDMA_SIZE (128 * 1024 * 1024)
@@ -34,30 +33,34 @@
  ******************************************************************************/
 
 static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	pte = 0x00008 + (pte * 4);
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
 		u32 phys = (u32)*list++;
 		while (cnt && page--) {
-			nv_wo32(pgt, pte, phys | 3);
+			nvkm_wo32(pgt, pte, phys | 3);
 			phys += NV04_PDMA_PAGE;
 			pte += 4;
 			cnt -= 1;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte = 0x00008 + (pte * 4);
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
+		nvkm_wo32(pgt, pte, 0x00000000);
 		pte += 4;
 	}
+	nvkm_done(pgt);
 }
 
 static void
@@ -66,86 +69,81 @@
 }
 
 /*******************************************************************************
- * VM object
- ******************************************************************************/
-
-int
-nv04_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mmstart,
-	       struct nvkm_vm **pvm)
-{
-	return -EINVAL;
-}
-
-/*******************************************************************************
  * MMU subdev
  ******************************************************************************/
 
 static int
-nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv04_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv;
-	struct nvkm_gpuobj *dma;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *dma;
 	int ret;
 
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
-			      "pcigart", &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV04_PDMA_SIZE;
-	priv->base.dma_bits = 32;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv04_vm_map_sg;
-	priv->base.unmap = nv04_vm_unmap;
-	priv->base.flush = nv04_vm_flush;
-
-	ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
-			      16, NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	dma = priv->vm->pgt[0].obj[0];
-	priv->vm->pgt[0].refcount[0] = 1;
+			      16, true, &dma);
+	mmu->vm->pgt[0].mem[0] = dma;
+	mmu->vm->pgt[0].refcount[0] = 1;
 	if (ret)
 		return ret;
 
-	nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
-	nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	nvkm_kmap(dma);
+	nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+	nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	nvkm_done(dma);
 	return 0;
 }
 
-void
-nv04_mmu_dtor(struct nvkm_object *object)
+void *
+nv04_mmu_dtor(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	if (priv->vm) {
-		nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
-		nvkm_vm_ref(NULL, &priv->vm, NULL);
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	if (mmu->vm) {
+		nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &mmu->vm, NULL);
 	}
-	if (priv->nullp) {
-		pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
-				    priv->nullp, priv->null);
+	if (mmu->nullp) {
+		dma_free_coherent(device->dev, 16 * 1024,
+				  mmu->nullp, mmu->null);
 	}
-	nvkm_mmu_destroy(&priv->base);
+	return mmu;
 }
 
-struct nvkm_oclass
-nv04_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+int
+nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_mmu **pmmu)
+{
+	struct nv04_mmu *mmu;
+	if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
+		return -ENOMEM;
+	*pmmu = &mmu->base;
+	nvkm_mmu_ctor(func, device, index, &mmu->base);
+	return 0;
+}
+
+const struct nvkm_mmu_func
+nv04_mmu = {
+	.oneinit = nv04_mmu_oneinit,
+	.dtor = nv04_mmu_dtor,
+	.limit = NV04_PDMA_SIZE,
+	.dma_bits = 32,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv04_vm_map_sg,
+	.unmap = nv04_vm_unmap,
+	.flush = nv04_vm_flush,
 };
+
+int
+nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
index 7bf6f4b..363e33b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -1,19 +1,18 @@
 #ifndef __NV04_MMU_PRIV__
 #define __NV04_MMU_PRIV__
+#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
+#include "priv.h"
 
-#include <subdev/mmu.h>
-
-struct nv04_mmu_priv {
+struct nv04_mmu {
 	struct nvkm_mmu base;
 	struct nvkm_vm *vm;
 	dma_addr_t null;
 	void *nullp;
 };
 
-static inline struct nv04_mmu_priv *
-nv04_mmu(void *obj)
-{
-	return (void *)nvkm_mmu(obj);
-}
+int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_mmu **);
+void *nv04_mmu_dtor(struct nvkm_mmu *);
 
+extern const struct nvkm_mmu_func nv04_mmu;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 61ee3ab..c6a26f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
 #include <subdev/timer.h>
@@ -36,45 +35,50 @@
  ******************************************************************************/
 
 static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	pte = pte * 4;
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 page = PAGE_SIZE / NV41_GART_PAGE;
 		u64 phys = (u64)*list++;
 		while (cnt && page--) {
-			nv_wo32(pgt, pte, (phys >> 7) | 1);
+			nvkm_wo32(pgt, pte, (phys >> 7) | 1);
 			phys += NV41_GART_PAGE;
 			pte += 4;
 			cnt -= 1;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte = pte * 4;
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte, 0x00000000);
+		nvkm_wo32(pgt, pte, 0x00000000);
 		pte += 4;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 nv41_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv04_mmu_priv *priv = (void *)vm->mmu;
+	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+	struct nvkm_device *device = mmu->base.subdev.device;
 
-	mutex_lock(&nv_subdev(priv)->mutex);
-	nv_wr32(priv, 0x100810, 0x00000022);
-	if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
-		nv_warn(priv, "flush timeout, 0x%08x\n",
-			nv_rd32(priv, 0x100810));
-	}
-	nv_wr32(priv, 0x100810, 0x00000000);
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_lock(&mmu->base.subdev.mutex);
+	nvkm_wr32(device, 0x100810, 0x00000022);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100810) & 0x00000020)
+			break;
+	);
+	nvkm_wr32(device, 0x100810, 0x00000000);
+	mutex_unlock(&mmu->base.subdev.mutex);
 }
 
 /*******************************************************************************
@@ -82,76 +86,56 @@
  ******************************************************************************/
 
 static int
-nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv41_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_mmu_priv *priv;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
 	int ret;
 
-	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
-	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
-		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
-					data, size, pobject);
-	}
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
-			      "pciegart", &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV41_GART_SIZE;
-	priv->base.dma_bits = 39;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv41_vm_map_sg;
-	priv->base.unmap = nv41_vm_unmap;
-	priv->base.flush = nv41_vm_flush;
-
-	ret = nvkm_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
-			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
-			      NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	priv->vm->pgt[0].refcount[0] = 1;
-	if (ret)
-		return ret;
-
-	return 0;
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
+			      &mmu->vm->pgt[0].mem[0]);
+	mmu->vm->pgt[0].refcount[0] = 1;
+	return ret;
 }
 
-static int
-nv41_mmu_init(struct nvkm_object *object)
+static void
+nv41_mmu_init(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	struct nvkm_gpuobj *dma = priv->vm->pgt[0].obj[0];
-	int ret;
-
-	ret = nvkm_mmu_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
-	nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
-	nv_wr32(priv, 0x100820, 0x00000000);
-	return 0;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
+	nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
+	nvkm_wr32(device, 0x100820, 0x00000000);
 }
 
-struct nvkm_oclass
-nv41_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x41),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv41_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = nv41_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv41_mmu = {
+	.dtor = nv04_mmu_dtor,
+	.oneinit = nv41_mmu_oneinit,
+	.init = nv41_mmu_init,
+	.limit = NV41_GART_SIZE,
+	.dma_bits = 39,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv41_vm_map_sg,
+	.unmap = nv41_vm_unmap,
+	.flush = nv41_vm_flush,
 };
+
+int
+nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->type == NVKM_DEVICE_AGP ||
+	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+		return nv04_mmu_new(device, index, pmmu);
+
+	return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index b90ded1..a648c23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -23,7 +23,6 @@
  */
 #include "nv04.h"
 
-#include <core/device.h>
 #include <core/gpuobj.h>
 #include <core/option.h>
 #include <subdev/timer.h>
@@ -36,16 +35,16 @@
  ******************************************************************************/
 
 static void
-nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
+nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
 	     dma_addr_t *list, u32 pte, u32 cnt)
 {
 	u32 base = (pte << 2) & ~0x0000000f;
 	u32 tmp[4];
 
-	tmp[0] = nv_ro32(pgt, base + 0x0);
-	tmp[1] = nv_ro32(pgt, base + 0x4);
-	tmp[2] = nv_ro32(pgt, base + 0x8);
-	tmp[3] = nv_ro32(pgt, base + 0xc);
+	tmp[0] = nvkm_ro32(pgt, base + 0x0);
+	tmp[1] = nvkm_ro32(pgt, base + 0x4);
+	tmp[2] = nvkm_ro32(pgt, base + 0x8);
+	tmp[3] = nvkm_ro32(pgt, base + 0xc);
 
 	while (cnt--) {
 		u32 addr = list ? (*list++ >> 12) : (null >> 12);
@@ -75,24 +74,25 @@
 		}
 	}
 
-	nv_wo32(pgt, base + 0x0, tmp[0]);
-	nv_wo32(pgt, base + 0x4, tmp[1]);
-	nv_wo32(pgt, base + 0x8, tmp[2]);
-	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
+	nvkm_wo32(pgt, base + 0x0, tmp[0]);
+	nvkm_wo32(pgt, base + 0x4, tmp[1]);
+	nvkm_wo32(pgt, base + 0x8, tmp[2]);
+	nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
 }
 
 static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
-	struct nv04_mmu_priv *priv = (void *)vma->vm->mmu;
+	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
 	u32 tmp[4];
 	int i;
 
+	nvkm_kmap(pgt);
 	if (pte & 3) {
 		u32  max = 4 - (pte & 3);
 		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, priv->null, list, pte, part);
+		nv44_vm_fill(pgt, mmu->null, list, pte, part);
 		pte  += part;
 		list += part;
 		cnt  -= part;
@@ -101,51 +101,57 @@
 	while (cnt >= 4) {
 		for (i = 0; i < 4; i++)
 			tmp[i] = *list++ >> 12;
-		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
-		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
-		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
-		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
+		nvkm_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		nvkm_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
 		cnt -= 4;
 	}
 
 	if (cnt)
-		nv44_vm_fill(pgt, priv->null, list, pte, cnt);
+		nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
+	nvkm_done(pgt);
 }
 
 static void
-nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
-	struct nv04_mmu_priv *priv = (void *)nvkm_mmu(pgt);
+	struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
 
+	nvkm_kmap(pgt);
 	if (pte & 3) {
 		u32  max = 4 - (pte & 3);
 		u32 part = (cnt > max) ? max : cnt;
-		nv44_vm_fill(pgt, priv->null, NULL, pte, part);
+		nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
 		pte  += part;
 		cnt  -= part;
 	}
 
 	while (cnt >= 4) {
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
-		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
+		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
 		cnt -= 4;
 	}
 
 	if (cnt)
-		nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
+		nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
+	nvkm_done(pgt);
 }
 
 static void
 nv44_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv04_mmu_priv *priv = (void *)vm->mmu;
-	nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
-	nv_wr32(priv, 0x100808, 0x00000020);
-	if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
-		nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
-	nv_wr32(priv, 0x100808, 0x00000000);
+	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
+	nvkm_wr32(device, 0x100808, 0x00000020);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x100808) & 0x00000001)
+			break;
+	);
+	nvkm_wr32(device, 0x100808, 0x00000000);
 }
 
 /*******************************************************************************
@@ -153,95 +159,78 @@
  ******************************************************************************/
 
 static int
-nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+nv44_mmu_oneinit(struct nvkm_mmu *base)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nv04_mmu_priv *priv;
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
 	int ret;
 
-	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
-	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true)) {
-		return nvkm_object_ctor(parent, engine, &nv04_mmu_oclass,
-					data, size, pobject);
+	mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
+					&mmu->null, GFP_KERNEL);
+	if (!mmu->nullp) {
+		nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
+		mmu->null = 0;
 	}
 
-	ret = nvkm_mmu_create(parent, engine, oclass, "PCIEGART",
-			      "pciegart", &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
+			     &mmu->vm);
 	if (ret)
 		return ret;
 
-	priv->base.create = nv04_vm_create;
-	priv->base.limit = NV44_GART_SIZE;
-	priv->base.dma_bits = 39;
-	priv->base.pgt_bits = 32 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 12;
-	priv->base.map_sg = nv44_vm_map_sg;
-	priv->base.unmap = nv44_vm_unmap;
-	priv->base.flush = nv44_vm_flush;
-
-	priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
-	if (!priv->nullp) {
-		nv_error(priv, "unable to allocate dummy pages\n");
-		return -ENOMEM;
-	}
-
-	ret = nvkm_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
-			     &priv->vm);
-	if (ret)
-		return ret;
-
-	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
+	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
-			      512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
-			      &priv->vm->pgt[0].obj[0]);
-	priv->vm->pgt[0].refcount[0] = 1;
-	if (ret)
-		return ret;
-
-	return 0;
+			      512 * 1024, true,
+			      &mmu->vm->pgt[0].mem[0]);
+	mmu->vm->pgt[0].refcount[0] = 1;
+	return ret;
 }
 
-static int
-nv44_mmu_init(struct nvkm_object *object)
+static void
+nv44_mmu_init(struct nvkm_mmu *base)
 {
-	struct nv04_mmu_priv *priv = (void *)object;
-	struct nvkm_gpuobj *gart = priv->vm->pgt[0].obj[0];
+	struct nv04_mmu *mmu = nv04_mmu(base);
+	struct nvkm_device *device = mmu->base.subdev.device;
+	struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
 	u32 addr;
-	int ret;
-
-	ret = nvkm_mmu_init(&priv->base);
-	if (ret)
-		return ret;
 
 	/* calculate vram address of this PRAMIN block, object must be
 	 * allocated on 512KiB alignment, and not exceed a total size
 	 * of 512KiB for this to work correctly
 	 */
-	addr  = nv_rd32(priv, 0x10020c);
-	addr -= ((gart->addr >> 19) + 1) << 19;
+	addr  = nvkm_rd32(device, 0x10020c);
+	addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
 
-	nv_wr32(priv, 0x100850, 0x80000000);
-	nv_wr32(priv, 0x100818, priv->null);
-	nv_wr32(priv, 0x100804, NV44_GART_SIZE);
-	nv_wr32(priv, 0x100850, 0x00008000);
-	nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
-	nv_wr32(priv, 0x100820, 0x00000000);
-	nv_wr32(priv, 0x10082c, 0x00000001);
-	nv_wr32(priv, 0x100800, addr | 0x00000010);
-	return 0;
+	nvkm_wr32(device, 0x100850, 0x80000000);
+	nvkm_wr32(device, 0x100818, mmu->null);
+	nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+	nvkm_wr32(device, 0x100850, 0x00008000);
+	nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
+	nvkm_wr32(device, 0x100820, 0x00000000);
+	nvkm_wr32(device, 0x10082c, 0x00000001);
+	nvkm_wr32(device, 0x100800, addr | 0x00000010);
 }
 
-struct nvkm_oclass
-nv44_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x44),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv44_mmu_ctor,
-		.dtor = nv04_mmu_dtor,
-		.init = nv44_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv44_mmu = {
+	.dtor = nv04_mmu_dtor,
+	.oneinit = nv44_mmu_oneinit,
+	.init = nv44_mmu_init,
+	.limit = NV44_GART_SIZE,
+	.dma_bits = 39,
+	.pgt_bits = 32 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 12,
+	.map_sg = nv44_vm_map_sg,
+	.unmap = nv44_vm_unmap,
+	.flush = nv44_vm_flush,
 };
+
+int
+nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	if (device->type == NVKM_DEVICE_AGP ||
+	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
+		return nv04_mmu_new(device, index, pmmu);
+
+	return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index b83550f..a1f8d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,31 +21,28 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/mmu.h>
-#include <subdev/bar.h>
+#include "priv.h"
+
+#include <core/gpuobj.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
-
-#include <core/engine.h>
-#include <core/gpuobj.h>
-
-struct nv50_mmu_priv {
-	struct nvkm_mmu base;
-};
+#include <engine/gr.h>
 
 static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
+nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
 {
 	u64 phys = 0xdeadcafe00000000ULL;
 	u32 coverage = 0;
 
 	if (pgt[0]) {
-		phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
-		coverage = (pgt[0]->size >> 3) << 12;
+		/* present, 4KiB pages */
+		phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
+		coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
 	} else
 	if (pgt[1]) {
-		phys = 0x00000001 | pgt[1]->addr; /* present */
-		coverage = (pgt[1]->size >> 3) << 16;
+		/* present, 64KiB pages  */
+		phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
+		coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
 	}
 
 	if (phys & 1) {
@@ -57,8 +54,10 @@
 			phys |= 0x20;
 	}
 
-	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
-	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+	nvkm_kmap(pgd);
+	nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+	nvkm_done(pgd);
 }
 
 static inline u64
@@ -75,17 +74,18 @@
 }
 
 static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	    struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
 {
+	struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
 	u32 comp = (mem->memtype & 0x180) >> 7;
 	u32 block, target;
 	int i;
 
 	/* IGPs don't have real VRAM, re-target to stolen system memory */
 	target = 0;
-	if (nvkm_fb(vma->vm->mmu)->ram->stolen) {
-		phys += nvkm_fb(vma->vm->mmu)->ram->stolen;
+	if (ram->stolen) {
+		phys += ram->stolen;
 		target = 3;
 	}
 
@@ -93,6 +93,7 @@
 	pte <<= 3;
 	cnt <<= 3;
 
+	nvkm_kmap(pgt);
 	while (cnt) {
 		u32 offset_h = upper_32_bits(phys);
 		u32 offset_l = lower_32_bits(phys);
@@ -113,129 +114,118 @@
 		}
 
 		while (block) {
-			nv_wo32(pgt, pte + 0, offset_l);
-			nv_wo32(pgt, pte + 4, offset_h);
+			nvkm_wo32(pgt, pte + 0, offset_l);
+			nvkm_wo32(pgt, pte + 4, offset_h);
 			pte += 8;
 			block -= 8;
 		}
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
 	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
 {
 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
 	pte <<= 3;
+	nvkm_kmap(pgt);
 	while (cnt--) {
 		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
-		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
-		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
-nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
 {
 	pte <<= 3;
+	nvkm_kmap(pgt);
 	while (cnt--) {
-		nv_wo32(pgt, pte + 0, 0x00000000);
-		nv_wo32(pgt, pte + 4, 0x00000000);
+		nvkm_wo32(pgt, pte + 0, 0x00000000);
+		nvkm_wo32(pgt, pte + 4, 0x00000000);
 		pte += 8;
 	}
+	nvkm_done(pgt);
 }
 
 static void
 nv50_vm_flush(struct nvkm_vm *vm)
 {
-	struct nv50_mmu_priv *priv = (void *)vm->mmu;
-	struct nvkm_bar *bar = nvkm_bar(priv);
-	struct nvkm_engine *engine;
+	struct nvkm_mmu *mmu = vm->mmu;
+	struct nvkm_subdev *subdev = &mmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	int i, vme;
 
-	bar->flush(bar);
-
-	mutex_lock(&nv_subdev(priv)->mutex);
-	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+	mutex_lock(&subdev->mutex);
+	for (i = 0; i < NVKM_SUBDEV_NR; i++) {
 		if (!atomic_read(&vm->engref[i]))
 			continue;
 
 		/* unfortunate hw bug workaround... */
-		engine = nvkm_engine(priv, i);
-		if (engine && engine->tlb_flush) {
-			engine->tlb_flush(engine);
-			continue;
+		if (i == NVKM_ENGINE_GR && device->gr) {
+			int ret = nvkm_gr_tlb_flush(device->gr);
+			if (ret != -ENODEV)
+				continue;
 		}
 
 		switch (i) {
-		case NVDEV_ENGINE_GR    : vme = 0x00; break;
-		case NVDEV_ENGINE_VP    :
-		case NVDEV_ENGINE_MSPDEC: vme = 0x01; break;
-		case NVDEV_SUBDEV_BAR   : vme = 0x06; break;
-		case NVDEV_ENGINE_MSPPP :
-		case NVDEV_ENGINE_MPEG  : vme = 0x08; break;
-		case NVDEV_ENGINE_BSP   :
-		case NVDEV_ENGINE_MSVLD : vme = 0x09; break;
-		case NVDEV_ENGINE_CIPHER:
-		case NVDEV_ENGINE_SEC   : vme = 0x0a; break;
-		case NVDEV_ENGINE_CE0   : vme = 0x0d; break;
+		case NVKM_ENGINE_GR    : vme = 0x00; break;
+		case NVKM_ENGINE_VP    :
+		case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
+		case NVKM_SUBDEV_BAR   : vme = 0x06; break;
+		case NVKM_ENGINE_MSPPP :
+		case NVKM_ENGINE_MPEG  : vme = 0x08; break;
+		case NVKM_ENGINE_BSP   :
+		case NVKM_ENGINE_MSVLD : vme = 0x09; break;
+		case NVKM_ENGINE_CIPHER:
+		case NVKM_ENGINE_SEC   : vme = 0x0a; break;
+		case NVKM_ENGINE_CE0   : vme = 0x0d; break;
 		default:
 			continue;
 		}
 
-		nv_wr32(priv, 0x100c80, (vme << 16) | 1);
-		if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
-			nv_error(priv, "vm flush timeout: engine %d\n", vme);
+		nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
+		if (nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+				break;
+		) < 0)
+			nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
 	}
-	mutex_unlock(&nv_subdev(priv)->mutex);
+	mutex_unlock(&subdev->mutex);
 }
 
 static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length,
-	       u64 mm_offset, struct nvkm_vm **pvm)
+nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
+	       struct lock_class_key *key, struct nvkm_vm **pvm)
 {
-	u32 block = (1 << (mmu->pgt_bits + 12));
+	u32 block = (1 << (mmu->func->pgt_bits + 12));
 	if (block > length)
 		block = length;
 
-	return nvkm_vm_create(mmu, offset, length, mm_offset, block, pvm);
+	return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
 }
 
-static int
-nv50_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
-{
-	struct nv50_mmu_priv *priv;
-	int ret;
-
-	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.limit = 1ULL << 40;
-	priv->base.dma_bits = 40;
-	priv->base.pgt_bits  = 29 - 12;
-	priv->base.spg_shift = 12;
-	priv->base.lpg_shift = 16;
-	priv->base.create = nv50_vm_create;
-	priv->base.map_pgt = nv50_vm_map_pgt;
-	priv->base.map = nv50_vm_map;
-	priv->base.map_sg = nv50_vm_map_sg;
-	priv->base.unmap = nv50_vm_unmap;
-	priv->base.flush = nv50_vm_flush;
-	return 0;
-}
-
-struct nvkm_oclass
-nv50_mmu_oclass = {
-	.handle = NV_SUBDEV(MMU, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mmu_ctor,
-		.dtor = _nvkm_mmu_dtor,
-		.init = _nvkm_mmu_init,
-		.fini = _nvkm_mmu_fini,
-	},
+static const struct nvkm_mmu_func
+nv50_mmu = {
+	.limit = (1ULL << 40),
+	.dma_bits = 40,
+	.pgt_bits  = 29 - 12,
+	.spg_shift = 12,
+	.lpg_shift = 16,
+	.create = nv50_vm_create,
+	.map_pgt = nv50_vm_map_pgt,
+	.map = nv50_vm_map,
+	.map_sg = nv50_vm_map_sg,
+	.unmap = nv50_vm_unmap,
+	.flush = nv50_vm_flush,
 };
+
+int
+nv50_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+	return nvkm_mmu_new_(&nv50_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
new file mode 100644
index 0000000..27cedc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -0,0 +1,39 @@
+#ifndef __NVKM_MMU_PRIV_H__
+#define __NVKM_MMU_PRIV_H__
+#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
+#include <subdev/mmu.h>
+
+void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *,
+		   int index, struct nvkm_mmu *);
+int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_mmu **);
+
+struct nvkm_mmu_func {
+	void *(*dtor)(struct nvkm_mmu *);
+	int (*oneinit)(struct nvkm_mmu *);
+	void (*init)(struct nvkm_mmu *);
+
+	u64 limit;
+	u8  dma_bits;
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	int  (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
+		       struct lock_class_key *, struct nvkm_vm **);
+
+	void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
+			struct nvkm_memory *pgt[2]);
+	void (*map)(struct nvkm_vma *, struct nvkm_memory *,
+		    struct nvkm_mem *, u32 pte, u32 cnt,
+		    u64 phys, u64 delta);
+	void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
+		       struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
+	void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
+		      u32 pte, u32 cnt);
+	void (*flush)(struct nvkm_vm *);
+};
+
+int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
+		   struct lock_class_key *, struct nvkm_vm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 0ca9dca..9700a76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -23,14 +23,13 @@
  */
 #include "mxms.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/mxm.h>
 #include <subdev/i2c.h>
 
 static bool
-mxm_shadow_rom_fetch(struct nvkm_i2c_port *i2c, u8 addr,
+mxm_shadow_rom_fetch(struct nvkm_i2c_bus *bus, u8 addr,
 		     u8 offset, u8 size, u8 *data)
 {
 	struct i2c_msg msgs[] = {
@@ -38,27 +37,28 @@
 		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
 	};
 
-	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+	return i2c_transfer(&bus->i2c, msgs, 2) == 2;
 }
 
 static bool
 mxm_shadow_rom(struct nvkm_mxm *mxm, u8 version)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
-	struct nvkm_i2c *i2c = nvkm_i2c(mxm);
-	struct nvkm_i2c_port *port = NULL;
+	struct nvkm_device *device = mxm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
+	struct nvkm_i2c_bus *bus = NULL;
 	u8 i2cidx, mxms[6], addr, size;
 
 	i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
 	if (i2cidx < 0x0f)
-		port = i2c->find(i2c, i2cidx);
-	if (!port)
+		bus = nvkm_i2c_bus_find(i2c, i2cidx);
+	if (!bus)
 		return false;
 
 	addr = 0x54;
-	if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
+	if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms)) {
 		addr = 0x56;
-		if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
+		if (!mxm_shadow_rom_fetch(bus, addr, 0, 6, mxms))
 			return false;
 	}
 
@@ -67,7 +67,7 @@
 	mxm->mxms = kmalloc(size, GFP_KERNEL);
 
 	if (mxm->mxms &&
-	    mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
+	    mxm_shadow_rom_fetch(bus, addr, 0, size, mxm->mxms))
 		return true;
 
 	kfree(mxm->mxms);
@@ -79,7 +79,8 @@
 static bool
 mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 {
-	struct nvkm_device *device = nv_device(mxm);
+	struct nvkm_subdev *subdev = &mxm->subdev;
+	struct nvkm_device *device = subdev->device;
 	static char muid[] = {
 		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
 		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
@@ -94,7 +95,7 @@
 	acpi_handle handle;
 	int rev;
 
-	handle = ACPI_HANDLE(nv_device_base(device));
+	handle = ACPI_HANDLE(device->dev);
 	if (!handle)
 		return false;
 
@@ -106,7 +107,7 @@
 	rev = (version & 0xf0) << 4 | (version & 0x0f);
 	obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
 	if (!obj) {
-		nv_debug(mxm, "DSM MXMS failed\n");
+		nvkm_debug(subdev, "DSM MXMS failed\n");
 		return false;
 	}
 
@@ -114,7 +115,8 @@
 		mxm->mxms = kmemdup(obj->buffer.pointer,
 					 obj->buffer.length, GFP_KERNEL);
 	} else if (obj->type == ACPI_TYPE_INTEGER) {
-		nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+		nvkm_debug(subdev, "DSM MXMS returned 0x%llx\n",
+			   obj->integer.value);
 	}
 
 	ACPI_FREE(obj);
@@ -129,6 +131,7 @@
 static u8
 wmi_wmmx_mxmi(struct nvkm_mxm *mxm, u8 version)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
 	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
 	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -137,18 +140,18 @@
 
 	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
 	if (ACPI_FAILURE(status)) {
-		nv_debug(mxm, "WMMX MXMI returned %d\n", status);
+		nvkm_debug(subdev, "WMMX MXMI returned %d\n", status);
 		return 0x00;
 	}
 
 	obj = retn.pointer;
 	if (obj->type == ACPI_TYPE_INTEGER) {
 		version = obj->integer.value;
-		nv_debug(mxm, "WMMX MXMI version %d.%d\n",
-			     (version >> 4), version & 0x0f);
+		nvkm_debug(subdev, "WMMX MXMI version %d.%d\n",
+			   (version >> 4), version & 0x0f);
 	} else {
 		version = 0;
-		nv_debug(mxm, "WMMX MXMI returned non-integer\n");
+		nvkm_debug(subdev, "WMMX MXMI returned non-integer\n");
 	}
 
 	kfree(obj);
@@ -158,6 +161,7 @@
 static bool
 mxm_shadow_wmi(struct nvkm_mxm *mxm, u8 version)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
 	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
 	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -165,7 +169,7 @@
 	acpi_status status;
 
 	if (!wmi_has_guid(WMI_WMMX_GUID)) {
-		nv_debug(mxm, "WMMX GUID not found\n");
+		nvkm_debug(subdev, "WMMX GUID not found\n");
 		return false;
 	}
 
@@ -177,7 +181,7 @@
 
 	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
 	if (ACPI_FAILURE(status)) {
-		nv_debug(mxm, "WMMX MXMS returned %d\n", status);
+		nvkm_debug(subdev, "WMMX MXMS returned %d\n", status);
 		return false;
 	}
 
@@ -211,7 +215,7 @@
 {
 	struct mxm_shadow_h *shadow = _mxm_shadow;
 	do {
-		nv_debug(mxm, "checking %s\n", shadow->name);
+		nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name);
 		if (shadow->exec(mxm, version)) {
 			if (mxms_valid(mxm))
 				return 0;
@@ -222,33 +226,33 @@
 	return -ENOENT;
 }
 
+static const struct nvkm_subdev_func
+nvkm_mxm = {
+};
+
 int
-nvkm_mxm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
 {
-	struct nvkm_device *device = nv_device(parent);
-	struct nvkm_bios *bios = nvkm_bios(device);
+	struct nvkm_bios *bios = device->bios;
 	struct nvkm_mxm *mxm;
 	u8  ver, len;
 	u16 data;
-	int ret;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
-				  length, pobject);
-	mxm = *pobject;
-	if (ret)
-		return ret;
+	if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
 
 	data = mxm_table(bios, &ver, &len);
-	if (!data || !(ver = nv_ro08(bios, data))) {
-		nv_debug(mxm, "no VBIOS data, nothing to do\n");
+	if (!data || !(ver = nvbios_rd08(bios, data))) {
+		nvkm_debug(&mxm->subdev, "no VBIOS data, nothing to do\n");
 		return 0;
 	}
 
-	nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+	nvkm_info(&mxm->subdev, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
 
 	if (mxm_shadow(mxm, ver)) {
-		nv_info(mxm, "failed to locate valid SIS\n");
+		nvkm_warn(&mxm->subdev, "failed to locate valid SIS\n");
 #if 0
 		/* we should, perhaps, fall back to some kind of limited
 		 * mode here if the x86 vbios hasn't already done the
@@ -261,8 +265,8 @@
 #endif
 	}
 
-	nv_info(mxm, "MXMS Version %d.%d\n",
-		mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
+	nvkm_debug(&mxm->subdev, "MXMS Version %d.%d\n",
+		   mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
 	mxms_foreach(mxm, 0, NULL, NULL);
 
 	if (nvkm_boolopt(device->cfgopt, "NvMXMDCB", true))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
index a9b1d63..45a2f8e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c
@@ -47,7 +47,7 @@
 		break;
 	}
 
-	nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
+	nvkm_debug(&mxm->subdev, "unknown version %d.%d\n", mxms[4], mxms[5]);
 	return 0x0000;
 }
 
@@ -71,7 +71,7 @@
 	while (size--)
 		sum += *mxms++;
 	if (sum) {
-		nv_debug(mxm, "checksum invalid\n");
+		nvkm_debug(&mxm->subdev, "checksum invalid\n");
 		return false;
 	}
 	return true;
@@ -82,7 +82,7 @@
 {
 	u8 *mxms = mxms_data(mxm);
 	if (*(u32 *)mxms != 0x5f4d584d) {
-		nv_debug(mxm, "signature invalid\n");
+		nvkm_debug(&mxm->subdev, "signature invalid\n");
 		return false;
 	}
 
@@ -96,6 +96,7 @@
 mxms_foreach(struct nvkm_mxm *mxm, u8 types,
 	     bool (*exec)(struct nvkm_mxm *, u8 *, void *), void *info)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u8 *mxms = mxms_data(mxm);
 	u8 *desc = mxms + mxms_headerlen(mxm);
 	u8 *fini = desc + mxms_structlen(mxm) - 1;
@@ -140,29 +141,28 @@
 			entries   = desc[1] & 0x07;
 			break;
 		default:
-			nv_debug(mxm, "unknown descriptor type %d\n", type);
+			nvkm_debug(subdev, "unknown descriptor type %d\n", type);
 			return false;
 		}
 
-		if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
-			static const char * mxms_desc_name[] = {
+		if (mxm->subdev.debug >= NV_DBG_DEBUG && (exec == NULL)) {
+			static const char * mxms_desc[] = {
 				"ODS", "SCCS", "TS", "IPS",
 				"GSD", "VSS", "BCS", "FCS",
 			};
 			u8 *dump = desc;
+			char data[32], *ptr;
 			int i, j;
 
-			nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
-			for (j = headerlen - 1; j >= 0; j--)
-				pr_cont("%02x", dump[j]);
-			pr_cont("\n");
+			for (j = headerlen - 1, ptr = data; j >= 0; j--)
+				ptr += sprintf(ptr, "%02x", dump[j]);
 			dump += headerlen;
 
+			nvkm_debug(subdev, "%4s: %s\n", mxms_desc[type], data);
 			for (i = 0; i < entries; i++, dump += recordlen) {
-				nv_debug(mxm, "      ");
-				for (j = recordlen - 1; j >= 0; j--)
-					pr_cont("%02x", dump[j]);
-				pr_cont("\n");
+				for (j = recordlen - 1, ptr = data; j >= 0; j--)
+					ptr += sprintf(ptr, "%02x", dump[j]);
+				nvkm_debug(subdev, "      %s\n", data);
 			}
 		}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 4ef8040..333e0c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,6 +1,6 @@
 #ifndef __NVMXM_MXMS_H__
 #define __NVMXM_MXMS_H__
-#include <subdev/mxm.h>
+#include "priv.h"
 
 struct mxms_odev {
 	u8 outp_type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index f20e4ca..db14fad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -28,10 +28,6 @@
 #include <subdev/bios/dcb.h>
 #include <subdev/bios/mxm.h>
 
-struct nv50_mxm_priv {
-	struct nvkm_mxm base;
-};
-
 struct context {
 	u32 *outp;
 	struct mxms_odev desc;
@@ -53,7 +49,7 @@
 static bool
 mxm_match_dcb(struct nvkm_mxm *mxm, u8 *data, void *info)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
+	struct nvkm_bios *bios = mxm->subdev.device->bios;
 	struct context *ctx = info;
 	u64 desc = *(u64 *)data;
 
@@ -107,8 +103,8 @@
 	 * if one isn't found, disable it.
 	 */
 	if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
-		nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
-			idx, ctx.outp[0], ctx.outp[1]);
+		nvkm_debug(&mxm->subdev, "disable %d: %08x %08x\n",
+			   idx, ctx.outp[0], ctx.outp[1]);
 		ctx.outp[0] |= 0x0000000f;
 		return 0;
 	}
@@ -180,20 +176,22 @@
 static bool
 mxm_show_unmatched(struct nvkm_mxm *mxm, u8 *data, void *info)
 {
+	struct nvkm_subdev *subdev = &mxm->subdev;
 	u64 desc = *(u64 *)data;
 	if ((desc & 0xf0) != 0xf0)
-		nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+		nvkm_info(subdev, "unmatched output device %016llx\n", desc);
 	return true;
 }
 
 static void
 mxm_dcb_sanitise(struct nvkm_mxm *mxm)
 {
-	struct nvkm_bios *bios = nvkm_bios(mxm);
+	struct nvkm_subdev *subdev = &mxm->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 	u8  ver, hdr, cnt, len;
 	u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
 	if (dcb == 0x0000 || ver != 0x40) {
-		nv_debug(mxm, "unsupported DCB version\n");
+		nvkm_debug(subdev, "unsupported DCB version\n");
 		return;
 	}
 
@@ -201,31 +199,20 @@
 	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
 }
 
-static int
-nv50_mxm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	      struct nvkm_oclass *oclass, void *data, u32 size,
-	      struct nvkm_object **pobject)
+int
+nv50_mxm_new(struct nvkm_device *device, int index, struct nvkm_subdev **pmxm)
 {
-	struct nv50_mxm_priv *priv;
+	struct nvkm_mxm *mxm;
 	int ret;
 
-	ret = nvkm_mxm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_mxm_new_(device, index, &mxm);
+	if (mxm)
+		*pmxm = &mxm->subdev;
 	if (ret)
 		return ret;
 
-	if (priv->base.action & MXM_SANITISE_DCB)
-		mxm_dcb_sanitise(&priv->base);
+	if (mxm->action & MXM_SANITISE_DCB)
+		mxm_dcb_sanitise(mxm);
+
 	return 0;
 }
-
-struct nvkm_oclass
-nv50_mxm_oclass = {
-	.handle = NV_SUBDEV(MXM, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_mxm_ctor,
-		.dtor = _nvkm_mxm_dtor,
-		.init = _nvkm_mxm_init,
-		.fini = _nvkm_mxm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
new file mode 100644
index 0000000..7d97015
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -0,0 +1,15 @@
+#ifndef __NVKM_MXM_PRIV_H__
+#define __NVKM_MXM_PRIV_H__
+#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
+#include <subdev/mxm.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nvkm_mxm {
+	struct nvkm_subdev subdev;
+	u32 action;
+	u8 *mxms;
+};
+
+int nvkm_mxm_new_(struct nvkm_device *, int index, struct nvkm_mxm **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
new file mode 100644
index 0000000..99672c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -0,0 +1,7 @@
+nvkm-y += nvkm/subdev/pci/agp.o
+nvkm-y += nvkm/subdev/pci/base.o
+nvkm-y += nvkm/subdev/pci/nv04.o
+nvkm-y += nvkm/subdev/pci/nv40.o
+nvkm-y += nvkm/subdev/pci/nv4c.o
+nvkm-y += nvkm/subdev/pci/nv50.o
+nvkm-y += nvkm/subdev/pci/gf100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
new file mode 100644
index 0000000..814cb51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2015 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "agp.h"
+#ifdef __NVKM_PCI_AGP_H__
+#include <core/option.h>
+
+struct nvkm_device_agp_quirk {
+	u16 hostbridge_vendor;
+	u16 hostbridge_device;
+	u16 chip_vendor;
+	u16 chip_device;
+	int mode;
+};
+
+static const struct nvkm_device_agp_quirk
+nvkm_device_agp_quirks[] = {
+	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+	{},
+};
+
+void
+nvkm_agp_fini(struct nvkm_pci *pci)
+{
+	if (pci->agp.acquired) {
+		agp_backend_release(pci->agp.bridge);
+		pci->agp.acquired = false;
+	}
+}
+
+/* Ensure AGP controller is in a consistent state in case we need to
+ * execute the VBIOS DEVINIT scripts.
+ */
+void
+nvkm_agp_preinit(struct nvkm_pci *pci)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	u32 mode = nvkm_pci_rd32(pci, 0x004c);
+	u32 save[2];
+
+	/* First of all, disable fast writes, otherwise if it's already
+	 * enabled in the AGP bridge and we disable the card's AGP
+	 * controller we might be locking ourselves out of it.
+	 */
+	if ((mode | pci->agp.mode) & PCI_AGP_COMMAND_FW) {
+		mode = pci->agp.mode & ~PCI_AGP_COMMAND_FW;
+		agp_enable(pci->agp.bridge, mode);
+	}
+
+	/* clear busmaster bit, and disable AGP */
+	save[0] = nvkm_pci_rd32(pci, 0x0004);
+	nvkm_pci_wr32(pci, 0x0004, save[0] & ~0x00000004);
+	nvkm_pci_wr32(pci, 0x004c, 0x00000000);
+
+	/* reset PGRAPH, PFIFO and PTIMER */
+	save[1] = nvkm_mask(device, 0x000200, 0x00011100, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00011100, save[1]);
+
+	/* and restore busmaster bit (gives effect of resetting AGP) */
+	nvkm_pci_wr32(pci, 0x0004, save[0]);
+}
+
+int
+nvkm_agp_init(struct nvkm_pci *pci)
+{
+	if (!agp_backend_acquire(pci->pdev)) {
+		nvkm_error(&pci->subdev, "failed to acquire agp\n");
+		return -ENODEV;
+	}
+
+	agp_enable(pci->agp.bridge, pci->agp.mode);
+	pci->agp.acquired = true;
+	return 0;
+}
+
+void
+nvkm_agp_dtor(struct nvkm_pci *pci)
+{
+	arch_phys_wc_del(pci->agp.mtrr);
+}
+
+void
+nvkm_agp_ctor(struct nvkm_pci *pci)
+{
+	const struct nvkm_device_agp_quirk *quirk = nvkm_device_agp_quirks;
+	struct nvkm_subdev *subdev = &pci->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct agp_kern_info info;
+	int mode = -1;
+
+#ifdef __powerpc__
+	/* Disable AGP by default on all PowerPC machines for now -- At
+	 * least some UniNorth-2 AGP bridges are known to be broken:
+	 * DMA from the host to the card works just fine, but writeback
+	 * from the card to the host goes straight to memory
+	 * untranslated bypassing that GATT somehow, making them quite
+	 * painful to deal with...
+	 */
+	mode = 0;
+#endif
+	mode = nvkm_longopt(device->cfgopt, "NvAGP", mode);
+
+	/* acquire bridge temporarily, so that we can copy its info */
+	if (!(pci->agp.bridge = agp_backend_acquire(pci->pdev))) {
+		nvkm_warn(subdev, "failed to acquire agp\n");
+		return;
+	}
+	agp_copy_info(pci->agp.bridge, &info);
+	agp_backend_release(pci->agp.bridge);
+
+	pci->agp.mode = info.mode;
+	pci->agp.base = info.aper_base;
+	pci->agp.size = info.aper_size * 1024 * 1024;
+	pci->agp.cma  = info.cant_use_aperture;
+	pci->agp.mtrr = -1;
+
+	/* determine if bridge + chipset combination needs a workaround */
+	while (quirk->hostbridge_vendor) {
+		if (info.device->vendor == quirk->hostbridge_vendor &&
+		    info.device->device == quirk->hostbridge_device &&
+		    pci->pdev->vendor == quirk->chip_vendor &&
+		    pci->pdev->device == quirk->chip_device) {
+			nvkm_info(subdev, "forcing default agp mode to %dX, "
+					  "use NvAGP=<mode> to override\n",
+				  quirk->mode);
+			mode = quirk->mode;
+			break;
+		}
+		quirk++;
+	}
+
+	/* apply quirk / user-specified mode */
+	if (mode >= 1) {
+		if (pci->agp.mode & 0x00000008)
+			mode /= 4; /* AGPv3 */
+		pci->agp.mode &= ~0x00000007;
+		pci->agp.mode |= (mode & 0x7);
+	} else
+	if (mode == 0) {
+		pci->agp.bridge = NULL;
+		return;
+	}
+
+	/* fast writes appear to be broken on nv18, they make the card
+	 * lock up randomly.
+	 */
+	if (device->chipset == 0x18)
+		pci->agp.mode &= ~PCI_AGP_COMMAND_FW;
+
+	pci->agp.mtrr = arch_phys_wc_add(pci->agp.base, pci->agp.size);
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
new file mode 100644
index 0000000..df2dd08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -0,0 +1,18 @@
+#include "priv.h"
+#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
+#ifndef __NVKM_PCI_AGP_H__
+#define __NVKM_PCI_AGP_H__
+
+void nvkm_agp_ctor(struct nvkm_pci *);
+void nvkm_agp_dtor(struct nvkm_pci *);
+void nvkm_agp_preinit(struct nvkm_pci *);
+int nvkm_agp_init(struct nvkm_pci *);
+void nvkm_agp_fini(struct nvkm_pci *);
+#endif
+#else
+static inline void nvkm_agp_ctor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_dtor(struct nvkm_pci *pci) {}
+static inline void nvkm_agp_preinit(struct nvkm_pci *pci) {}
+static inline int nvkm_agp_init(struct nvkm_pci *pci) { return -ENOSYS; }
+static inline void nvkm_agp_fini(struct nvkm_pci *pci) {}
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
new file mode 100644
index 0000000..d1c148e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+#include "agp.h"
+
+#include <core/option.h>
+#include <core/pci.h>
+#include <subdev/mc.h>
+
+u32
+nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	return pci->func->rd32(pci, addr);
+}
+
+void
+nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	pci->func->wr08(pci, addr, data);
+}
+
+void
+nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	pci->func->wr32(pci, addr, data);
+}
+
+void
+nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
+{
+	u32 data = nvkm_pci_rd32(pci, 0x0050);
+	if (shadow)
+		data |=  0x00000001;
+	else
+		data &= ~0x00000001;
+	nvkm_pci_wr32(pci, 0x0050, data);
+}
+
+static irqreturn_t
+nvkm_pci_intr(int irq, void *arg)
+{
+	struct nvkm_pci *pci = arg;
+	struct nvkm_mc *mc = pci->subdev.device->mc;
+	bool handled = false;
+	if (likely(mc)) {
+		nvkm_mc_intr_unarm(mc);
+		if (pci->msi)
+			pci->func->msi_rearm(pci);
+		nvkm_mc_intr(mc, &handled);
+		nvkm_mc_intr_rearm(mc);
+	}
+	return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int
+nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+
+	if (pci->irq >= 0) {
+		free_irq(pci->irq, pci);
+		pci->irq = -1;
+	};
+
+	if (pci->agp.bridge)
+		nvkm_agp_fini(pci);
+
+	return 0;
+}
+
+static int
+nvkm_pci_preinit(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	if (pci->agp.bridge)
+		nvkm_agp_preinit(pci);
+	return 0;
+}
+
+static int
+nvkm_pci_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	struct pci_dev *pdev = pci->pdev;
+	int ret;
+
+	if (pci->agp.bridge) {
+		ret = nvkm_agp_init(pci);
+		if (ret)
+			return ret;
+	}
+
+	ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
+	if (ret)
+		return ret;
+
+	pci->irq = pdev->irq;
+	return ret;
+}
+
+static void *
+nvkm_pci_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_pci *pci = nvkm_pci(subdev);
+	nvkm_agp_dtor(pci);
+	if (pci->msi)
+		pci_disable_msi(pci->pdev);
+	return nvkm_pci(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_pci_func = {
+	.dtor = nvkm_pci_dtor,
+	.preinit = nvkm_pci_preinit,
+	.init = nvkm_pci_init,
+	.fini = nvkm_pci_fini,
+};
+
+int
+nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pci **ppci)
+{
+	struct nvkm_pci *pci;
+
+	if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
+	pci->func = func;
+	pci->pdev = device->func->pci(device)->pdev;
+	pci->irq = -1;
+
+	if (device->type == NVKM_DEVICE_AGP)
+		nvkm_agp_ctor(pci);
+
+	switch (pci->pdev->device & 0x0ff0) {
+	case 0x00f0:
+	case 0x02e0:
+		/* BR02? NFI how these would be handled yet exactly */
+		break;
+	default:
+		switch (device->chipset) {
+		case 0xaa:
+			/* reported broken, nv also disable it */
+			break;
+		default:
+			pci->msi = true;
+			break;
+		}
+	}
+
+	pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
+	if (pci->msi && func->msi_rearm) {
+		pci->msi = pci_enable_msi(pci->pdev) == 0;
+		if (pci->msi)
+			nvkm_debug(&pci->subdev, "MSI enabled\n");
+	} else {
+		pci->msi = false;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
new file mode 100644
index 0000000..86f8226
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gf100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gf100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	nvkm_pci_wr08(pci, 0x0704, 0xff);
+}
+
+static const struct nvkm_pci_func
+gf100_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = gf100_pci_msi_rearm,
+};
+
+int
+gf100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&gf100_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
new file mode 100644
index 0000000..5b1ed42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv04.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static u32
+nv04_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	return nvkm_rd32(device, 0x001800 + addr);
+}
+
+static void
+nv04_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr08(device, 0x001800 + addr, data);
+}
+
+static void
+nv04_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr32(device, 0x001800 + addr, data);
+}
+
+static const struct nvkm_pci_func
+nv04_pci_func = {
+	.rd32 = nv04_pci_rd32,
+	.wr08 = nv04_pci_wr08,
+	.wr32 = nv04_pci_wr32,
+};
+
+int
+nv04_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv04_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
new file mode 100644
index 0000000..090a187
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv40.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+u32
+nv40_pci_rd32(struct nvkm_pci *pci, u16 addr)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	return nvkm_rd32(device, 0x088000 + addr);
+}
+
+void
+nv40_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr08(device, 0x088000 + addr, data);
+}
+
+void
+nv40_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	nvkm_wr32(device, 0x088000 + addr, data);
+}
+
+static void
+nv40_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	nvkm_pci_wr08(pci, 0x0068, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv40_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = nv40_pci_msi_rearm,
+};
+
+int
+nv40_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv40_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
new file mode 100644
index 0000000..1f1b26b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv4c.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_pci_func
+nv4c_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+};
+
+int
+nv4c_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv4c_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
new file mode 100644
index 0000000..3e167d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/nv50.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+
+/* MSI re-arm through the PRI appears to be broken on the original G80,
+ * so we access it via alternate PCI config space mechanisms.
+ */
+static void
+nv50_pci_msi_rearm(struct nvkm_pci *pci)
+{
+	struct nvkm_device *device = pci->subdev.device;
+	struct pci_dev *pdev = device->func->pci(device)->pdev;
+	pci_write_config_byte(pdev, 0x68, 0xff);
+}
+
+static const struct nvkm_pci_func
+nv50_pci_func = {
+	.rd32 = nv40_pci_rd32,
+	.wr08 = nv40_pci_wr08,
+	.wr32 = nv40_pci_wr32,
+	.msi_rearm = nv50_pci_msi_rearm,
+};
+
+int
+nv50_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+	return nvkm_pci_new_(&nv50_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
new file mode 100644
index 0000000..d22c2c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -0,0 +1,19 @@
+#ifndef __NVKM_PCI_PRIV_H__
+#define __NVKM_PCI_PRIV_H__
+#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
+#include <subdev/pci.h>
+
+int nvkm_pci_new_(const struct nvkm_pci_func *, struct nvkm_device *,
+		  int index, struct nvkm_pci **);
+
+struct nvkm_pci_func {
+	u32 (*rd32)(struct nvkm_pci *, u16 addr);
+	void (*wr08)(struct nvkm_pci *, u16 addr, u8 data);
+	void (*wr32)(struct nvkm_pci *, u16 addr, u32 data);
+	void (*msi_rearm)(struct nvkm_pci *);
+};
+
+u32 nv40_pci_rd32(struct nvkm_pci *, u16);
+void nv40_pci_wr08(struct nvkm_pci *, u16, u8);
+void nv40_pci_wr32(struct nvkm_pci *, u16, u32);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
index 7081d6a..88b643b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/Kbuild
@@ -2,8 +2,9 @@
 nvkm-y += nvkm/subdev/pmu/memx.o
 nvkm-y += nvkm/subdev/pmu/gt215.o
 nvkm-y += nvkm/subdev/pmu/gf100.o
-nvkm-y += nvkm/subdev/pmu/gf110.o
+nvkm-y += nvkm/subdev/pmu/gf119.o
 nvkm-y += nvkm/subdev/pmu/gk104.o
 nvkm-y += nvkm/subdev/pmu/gk110.o
 nvkm-y += nvkm/subdev/pmu/gk208.o
 nvkm-y += nvkm/subdev/pmu/gk20a.o
+nvkm-y += nvkm/subdev/pmu/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 054b2d2..27a79c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -28,21 +28,25 @@
 void
 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
-	const struct nvkm_pmu_impl *impl = (void *)nv_oclass(pmu);
-	if (impl->pgob)
-		impl->pgob(pmu, enable);
+	if (pmu->func->pgob)
+		pmu->func->pgob(pmu, enable);
 }
 
-static int
+int
 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
 	      u32 process, u32 message, u32 data0, u32 data1)
 {
-	struct nvkm_subdev *subdev = nv_subdev(pmu);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 addr;
 
 	/* wait for a free slot in the fifo */
-	addr  = nv_rd32(pmu, 0x10a4a0);
-	if (!nv_wait_ne(pmu, 0x10a4b0, 0xffffffff, addr ^ 8))
+	addr  = nvkm_rd32(device, 0x10a4a0);
+	if (nvkm_msec(device, 2000,
+		u32 tmp = nvkm_rd32(device, 0x10a4b0);
+		if (tmp != (addr ^ 8))
+			break;
+	) < 0)
 		return -EBUSY;
 
 	/* we currently only support a single process at a time waiting
@@ -57,20 +61,20 @@
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000001);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000001);
+		nvkm_wr32(device, 0x10a580, 0x00000001);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000001);
 
 	/* write the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | (((addr & 0x07) << 4) +
 				pmu->send.base));
-	nv_wr32(pmu, 0x10a1c4, process);
-	nv_wr32(pmu, 0x10a1c4, message);
-	nv_wr32(pmu, 0x10a1c4, data0);
-	nv_wr32(pmu, 0x10a1c4, data1);
-	nv_wr32(pmu, 0x10a4a0, (addr + 1) & 0x0f);
+	nvkm_wr32(device, 0x10a1c4, process);
+	nvkm_wr32(device, 0x10a1c4, message);
+	nvkm_wr32(device, 0x10a1c4, data0);
+	nvkm_wr32(device, 0x10a1c4, data1);
+	nvkm_wr32(device, 0x10a4a0, (addr + 1) & 0x0f);
 
 	/* release data segment access */
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* wait for reply, if requested */
 	if (reply) {
@@ -87,29 +91,31 @@
 nvkm_pmu_recv(struct work_struct *work)
 {
 	struct nvkm_pmu *pmu = container_of(work, struct nvkm_pmu, recv.work);
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 process, message, data0, data1;
 
 	/* nothing to do if GET == PUT */
-	u32 addr =  nv_rd32(pmu, 0x10a4cc);
-	if (addr == nv_rd32(pmu, 0x10a4c8))
+	u32 addr =  nvkm_rd32(device, 0x10a4cc);
+	if (addr == nvkm_rd32(device, 0x10a4c8))
 		return;
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000002);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000002);
+		nvkm_wr32(device, 0x10a580, 0x00000002);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000002);
 
 	/* read the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | (((addr & 0x07) << 4) +
 				pmu->recv.base));
-	process = nv_rd32(pmu, 0x10a1c4);
-	message = nv_rd32(pmu, 0x10a1c4);
-	data0   = nv_rd32(pmu, 0x10a1c4);
-	data1   = nv_rd32(pmu, 0x10a1c4);
-	nv_wr32(pmu, 0x10a4cc, (addr + 1) & 0x0f);
+	process = nvkm_rd32(device, 0x10a1c4);
+	message = nvkm_rd32(device, 0x10a1c4);
+	data0   = nvkm_rd32(device, 0x10a1c4);
+	data1   = nvkm_rd32(device, 0x10a1c4);
+	nvkm_wr32(device, 0x10a4cc, (addr + 1) & 0x0f);
 
 	/* release data segment access */
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* wake process if it's waiting on a synchronous reply */
 	if (pmu->recv.process) {
@@ -126,143 +132,149 @@
 	/* right now there's no other expected responses from the engine,
 	 * so assume that any unexpected message is an error.
 	 */
-	nv_warn(pmu, "%c%c%c%c 0x%08x 0x%08x 0x%08x 0x%08x\n",
-		(char)((process & 0x000000ff) >>  0),
-		(char)((process & 0x0000ff00) >>  8),
-		(char)((process & 0x00ff0000) >> 16),
-		(char)((process & 0xff000000) >> 24),
-		process, message, data0, data1);
+	nvkm_warn(subdev, "%c%c%c%c %08x %08x %08x %08x\n",
+		  (char)((process & 0x000000ff) >>  0),
+		  (char)((process & 0x0000ff00) >>  8),
+		  (char)((process & 0x00ff0000) >> 16),
+		  (char)((process & 0xff000000) >> 24),
+		  process, message, data0, data1);
 }
 
 static void
 nvkm_pmu_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu = (void *)subdev;
-	u32 disp = nv_rd32(pmu, 0x10a01c);
-	u32 intr = nv_rd32(pmu, 0x10a008) & disp & ~(disp >> 16);
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
+	u32 disp = nvkm_rd32(device, 0x10a01c);
+	u32 intr = nvkm_rd32(device, 0x10a008) & disp & ~(disp >> 16);
 
 	if (intr & 0x00000020) {
-		u32 stat = nv_rd32(pmu, 0x10a16c);
+		u32 stat = nvkm_rd32(device, 0x10a16c);
 		if (stat & 0x80000000) {
-			nv_error(pmu, "UAS fault at 0x%06x addr 0x%08x\n",
-				 stat & 0x00ffffff, nv_rd32(pmu, 0x10a168));
-			nv_wr32(pmu, 0x10a16c, 0x00000000);
+			nvkm_error(subdev, "UAS fault at %06x addr %08x\n",
+				   stat & 0x00ffffff,
+				   nvkm_rd32(device, 0x10a168));
+			nvkm_wr32(device, 0x10a16c, 0x00000000);
 			intr &= ~0x00000020;
 		}
 	}
 
 	if (intr & 0x00000040) {
 		schedule_work(&pmu->recv.work);
-		nv_wr32(pmu, 0x10a004, 0x00000040);
+		nvkm_wr32(device, 0x10a004, 0x00000040);
 		intr &= ~0x00000040;
 	}
 
 	if (intr & 0x00000080) {
-		nv_info(pmu, "wr32 0x%06x 0x%08x\n", nv_rd32(pmu, 0x10a7a0),
-						     nv_rd32(pmu, 0x10a7a4));
-		nv_wr32(pmu, 0x10a004, 0x00000080);
+		nvkm_info(subdev, "wr32 %06x %08x\n",
+			  nvkm_rd32(device, 0x10a7a0),
+			  nvkm_rd32(device, 0x10a7a4));
+		nvkm_wr32(device, 0x10a004, 0x00000080);
 		intr &= ~0x00000080;
 	}
 
 	if (intr) {
-		nv_error(pmu, "intr 0x%08x\n", intr);
-		nv_wr32(pmu, 0x10a004, intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
+		nvkm_wr32(device, 0x10a004, intr);
 	}
 }
 
-int
-_nvkm_pmu_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_pmu *pmu = (void *)object;
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
 
-	nv_wr32(pmu, 0x10a014, 0x00000060);
+	nvkm_wr32(device, 0x10a014, 0x00000060);
 	flush_work(&pmu->recv.work);
-
-	return nvkm_subdev_fini(&pmu->base, suspend);
+	return 0;
 }
 
-int
-_nvkm_pmu_init(struct nvkm_object *object)
+static int
+nvkm_pmu_init(struct nvkm_subdev *subdev)
 {
-	const struct nvkm_pmu_impl *impl = (void *)object->oclass;
-	struct nvkm_pmu *pmu = (void *)object;
-	int ret, i;
-
-	ret = nvkm_subdev_init(&pmu->base);
-	if (ret)
-		return ret;
-
-	nv_subdev(pmu)->intr = nvkm_pmu_intr;
-	pmu->message = nvkm_pmu_send;
-	pmu->pgob = nvkm_pmu_pgob;
+	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+	struct nvkm_device *device = pmu->subdev.device;
+	int i;
 
 	/* prevent previous ucode from running, wait for idle, reset */
-	nv_wr32(pmu, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
-	nv_wait(pmu, 0x10a04c, 0xffffffff, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00002000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00002000, 0x00002000);
-	nv_rd32(pmu, 0x000200);
-	nv_wait(pmu, 0x10a10c, 0x00000006, 0x00000000);
+	nvkm_wr32(device, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
+	nvkm_msec(device, 2000,
+		if (!nvkm_rd32(device, 0x10a04c))
+			break;
+	);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_msec(device, 2000,
+		if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
+			break;
+	);
 
 	/* upload data segment */
-	nv_wr32(pmu, 0x10a1c0, 0x01000000);
-	for (i = 0; i < impl->data.size / 4; i++)
-		nv_wr32(pmu, 0x10a1c4, impl->data.data[i]);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000);
+	for (i = 0; i < pmu->func->data.size / 4; i++)
+		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
 
 	/* upload code segment */
-	nv_wr32(pmu, 0x10a180, 0x01000000);
-	for (i = 0; i < impl->code.size / 4; i++) {
+	nvkm_wr32(device, 0x10a180, 0x01000000);
+	for (i = 0; i < pmu->func->code.size / 4; i++) {
 		if ((i & 0x3f) == 0)
-			nv_wr32(pmu, 0x10a188, i >> 6);
-		nv_wr32(pmu, 0x10a184, impl->code.data[i]);
+			nvkm_wr32(device, 0x10a188, i >> 6);
+		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
 	}
 
 	/* start it running */
-	nv_wr32(pmu, 0x10a10c, 0x00000000);
-	nv_wr32(pmu, 0x10a104, 0x00000000);
-	nv_wr32(pmu, 0x10a100, 0x00000002);
+	nvkm_wr32(device, 0x10a10c, 0x00000000);
+	nvkm_wr32(device, 0x10a104, 0x00000000);
+	nvkm_wr32(device, 0x10a100, 0x00000002);
 
 	/* wait for valid host->pmu ring configuration */
-	if (!nv_wait_ne(pmu, 0x10a4d0, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4d0))
+			break;
+	) < 0)
 		return -EBUSY;
-	pmu->send.base = nv_rd32(pmu, 0x10a4d0) & 0x0000ffff;
-	pmu->send.size = nv_rd32(pmu, 0x10a4d0) >> 16;
+	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
+	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
 
 	/* wait for valid pmu->host ring configuration */
-	if (!nv_wait_ne(pmu, 0x10a4dc, 0xffffffff, 0x00000000))
+	if (nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x10a4dc))
+			break;
+	) < 0)
 		return -EBUSY;
-	pmu->recv.base = nv_rd32(pmu, 0x10a4dc) & 0x0000ffff;
-	pmu->recv.size = nv_rd32(pmu, 0x10a4dc) >> 16;
+	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
+	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
 
-	nv_wr32(pmu, 0x10a010, 0x000000e0);
+	nvkm_wr32(device, 0x10a010, 0x000000e0);
 	return 0;
 }
 
+static void *
+nvkm_pmu_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_pmu(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_pmu = {
+	.dtor = nvkm_pmu_dtor,
+	.init = nvkm_pmu_init,
+	.fini = nvkm_pmu_fini,
+	.intr = nvkm_pmu_intr,
+};
+
 int
-nvkm_pmu_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, int length, void **pobject)
+nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
+	      int index, struct nvkm_pmu **ppmu)
 {
 	struct nvkm_pmu *pmu;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PMU",
-				  "pmu", length, pobject);
-	pmu = *pobject;
-	if (ret)
-		return ret;
-
+	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
+	pmu->func = func;
 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
 	init_waitqueue_head(&pmu->recv.wait);
 	return 0;
 }
-
-int
-_nvkm_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct nvkm_pmu *pmu;
-	int ret = nvkm_pmu_create(parent, engine, oclass, &pmu);
-	*pobject = nv_object(pmu);
-	return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
deleted file mode 100644
index ae9c3f1..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#define NVKM_PPWR_CHIPSET GF119
-#define HW_TICKS_PER_US 324
-
-//#define NVKM_FALCON_PC24
-#define NVKM_FALCON_UNSHIFTED_IO
-//#define NVKM_FALCON_MMIO_UAS
-//#define NVKM_FALCON_MMIO_TRAP
-
-#include "macros.fuc"
-
-.section #gf110_pmu_data
-#define INCLUDE_PROC
-#include "kernel.fuc"
-#include "arith.fuc"
-#include "host.fuc"
-#include "memx.fuc"
-#include "perf.fuc"
-#include "i2c_.fuc"
-#include "test.fuc"
-#include "idle.fuc"
-#undef INCLUDE_PROC
-
-#define INCLUDE_DATA
-#include "kernel.fuc"
-#include "arith.fuc"
-#include "host.fuc"
-#include "memx.fuc"
-#include "perf.fuc"
-#include "i2c_.fuc"
-#include "test.fuc"
-#include "idle.fuc"
-#undef INCLUDE_DATA
-.align 256
-
-.section #gf110_pmu_code
-#define INCLUDE_CODE
-#include "kernel.fuc"
-#include "arith.fuc"
-#include "host.fuc"
-#include "memx.fuc"
-#include "perf.fuc"
-#include "i2c_.fuc"
-#include "test.fuc"
-#include "idle.fuc"
-#undef INCLUDE_CODE
-.align 256
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
deleted file mode 100644
index a0c499e..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf110.fuc4.h
+++ /dev/null
@@ -1,1795 +0,0 @@
-uint32_t gf110_pmu_data[] = {
-/* 0x0000: proc_kern */
-	0x52544e49,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0058: proc_list_head */
-	0x54534f48,
-	0x0000049d,
-	0x00000446,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x584d454d,
-	0x0000068b,
-	0x0000067d,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x46524550,
-	0x0000068f,
-	0x0000068d,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x5f433249,
-	0x00000aaa,
-	0x0000094d,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x54534554,
-	0x00000acd,
-	0x00000aac,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x454c4449,
-	0x00000ad9,
-	0x00000ad7,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0268: proc_list_tail */
-/* 0x0268: time_prev */
-	0x00000000,
-/* 0x026c: time_next */
-	0x00000000,
-/* 0x0270: fifo_queue */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x02f0: rfifo_queue */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0370: memx_func_head */
-	0x00000001,
-	0x00000000,
-	0x000004d3,
-/* 0x037c: memx_func_next */
-	0x00000002,
-	0x00000000,
-	0x00000554,
-	0x00000003,
-	0x00000002,
-	0x000005d8,
-	0x00040004,
-	0x00000000,
-	0x000005f4,
-	0x00010005,
-	0x00000000,
-	0x0000060e,
-	0x00010006,
-	0x00000000,
-	0x000005d3,
-	0x00000007,
-	0x00000000,
-	0x00000619,
-/* 0x03c4: memx_func_tail */
-/* 0x03c4: memx_ts_start */
-	0x00000000,
-/* 0x03c8: memx_ts_end */
-	0x00000000,
-/* 0x03cc: memx_data_head */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0bcc: memx_data_tail */
-/* 0x0bcc: memx_train_head */
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-/* 0x0ccc: memx_train_tail */
-/* 0x0ccc: i2c_scl_map */
-	0x00000400,
-	0x00000800,
-	0x00001000,
-	0x00002000,
-	0x00004000,
-	0x00008000,
-	0x00010000,
-	0x00020000,
-	0x00040000,
-	0x00080000,
-/* 0x0cf4: i2c_sda_map */
-	0x00100000,
-	0x00200000,
-	0x00400000,
-	0x00800000,
-	0x01000000,
-	0x02000000,
-	0x04000000,
-	0x08000000,
-	0x10000000,
-	0x20000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-};
-
-uint32_t gf110_pmu_code[] = {
-	0x034d0ef5,
-/* 0x0004: rd32 */
-	0x07a007f1,
-	0xbd000ed0,
-	0x01d7f004,
-	0xf101d3f0,
-	0xd007ac07,
-	0x04bd000d,
-/* 0x001c: rd32_wait */
-	0x07acd7f1,
-	0xf100ddcf,
-	0xf47000d4,
-	0xd7f1f51b,
-	0xddcf07a4,
-/* 0x0033: wr32 */
-	0xf100f800,
-	0xd007a007,
-	0x04bd000e,
-	0x07a407f1,
-	0xbd000dd0,
-	0x02d7f004,
-	0xf0f0d5f0,
-	0x07f101d3,
-	0x0dd007ac,
-/* 0x0057: wr32_wait */
-	0xf104bd00,
-	0xcf07acd7,
-	0xd4f100dd,
-	0x1bf47000,
-/* 0x0067: nsec */
-	0xf900f8f5,
-	0xf080f990,
-	0x88cf2c87,
-/* 0x0071: nsec_loop */
-	0x2c97f000,
-	0xbb0099cf,
-	0x9eb80298,
-	0xf41ef406,
-	0x90fc80fc,
-/* 0x0086: wait */
-	0x90f900f8,
-	0x87f080f9,
-	0x0088cf2c,
-/* 0x0090: wait_loop */
-	0xf402eeb9,
-	0xdab90421,
-	0x04adfd02,
-	0xf406acb8,
-	0x97f0120b,
-	0x0099cf2c,
-	0xb80298bb,
-	0x1ef4069b,
-/* 0x00b1: wait_done */
-	0xfc80fce2,
-/* 0x00b7: intr_watchdog */
-	0x9800f890,
-	0x96b003e9,
-	0x2a0bf400,
-	0xbb9a0a98,
-	0x1cf4029a,
-	0x01d7f00f,
-	0x028c21f5,
-	0x0ef494bd,
-/* 0x00d5: intr_watchdog_next_time */
-	0x9b0a9815,
-	0xf400a6b0,
-	0x9ab8090b,
-	0x061cf406,
-/* 0x00e4: intr_watchdog_next_time_set */
-/* 0x00e7: intr_watchdog_next_proc */
-	0x809b0980,
-	0xe0b603e9,
-	0x68e6b158,
-	0xc61bf402,
-/* 0x00f6: intr */
-	0x00f900f8,
-	0x80f904bd,
-	0xa0f990f9,
-	0xc0f9b0f9,
-	0xe0f9d0f9,
-	0xf7f0f0f9,
-	0x0188fe00,
-	0x87f180f9,
-	0x88cf05d0,
-	0x0180b600,
-	0x05d007f1,
-	0xbd0008d0,
-	0x0887f004,
-	0xc40088cf,
-	0x0bf40289,
-	0x9b008020,
-	0xf458e7f0,
-	0x0998b721,
-	0x0096b09b,
-	0xf00e0bf4,
-	0x09d03407,
-	0x8004bd00,
-/* 0x014e: intr_skip_watchdog */
-	0x89e49a09,
-	0x0bf40800,
-	0x8897f13c,
-	0x0099cf06,
-	0xf4029ac4,
-	0xc7f1260b,
-	0xcccf04c0,
-	0xf1c0f900,
-	0xf14f48e7,
-	0xf05453e3,
-	0x21f500d7,
-	0xc0fc02f1,
-	0x04c007f1,
-	0xbd000cd0,
-/* 0x0185: intr_subintr_skip_fifo */
-	0x8807f104,
-	0x0009d006,
-/* 0x018e: intr_skip_subintr */
-	0x89c404bd,
-	0x070bf420,
-	0xffbfa4f1,
-/* 0x0198: intr_skip_pause */
-	0xf44089c4,
-	0xa4f1070b,
-/* 0x01a2: intr_skip_user0 */
-	0x07f0ffbf,
-	0x0008d004,
-	0x80fc04bd,
-	0xfc0088fe,
-	0xfce0fcf0,
-	0xfcc0fcd0,
-	0xfca0fcb0,
-	0xfc80fc90,
-	0x0032f400,
-/* 0x01c6: ticks_from_ns */
-	0xc0f901f8,
-	0xd7f1b0f9,
-	0xd3f00144,
-	0xb321f500,
-	0xe8ccec03,
-	0x00b4b003,
-	0xec120bf4,
-	0xf103e8ee,
-	0xf00144d7,
-	0x21f500d3,
-/* 0x01ee: ticks_from_ns_quit */
-	0xceb903b3,
-	0xfcb0fc02,
-/* 0x01f7: ticks_from_us */
-	0xf900f8c0,
-	0xf1b0f9c0,
-	0xf00144d7,
-	0x21f500d3,
-	0xceb903b3,
-	0x00b4b002,
-	0xbd050bf4,
-/* 0x0211: ticks_from_us_quit */
-	0xfcb0fce4,
-/* 0x0217: ticks_to_us */
-	0xf100f8c0,
-	0xf00144d7,
-	0xedff00d3,
-/* 0x0223: timer */
-	0xf900f8ec,
-	0xf480f990,
-	0xf8981032,
-	0x0086b003,
-	0xbd531cf4,
-	0x3807f084,
-	0xbd0008d0,
-	0x3487f004,
-	0x980088cf,
-	0x98bb9a09,
-	0x00e9bb02,
-	0xf003fe80,
-	0x88cf0887,
-	0x0284f000,
-	0xf0201bf4,
-	0x88cf3487,
-	0x06e0b800,
-	0xb8090bf4,
-	0x1cf406e8,
-/* 0x026d: timer_reset */
-	0x3407f00e,
-	0xbd000ed0,
-	0x9a0e8004,
-/* 0x0278: timer_enable */
-	0xf00187f0,
-	0x08d03807,
-/* 0x0283: timer_done */
-	0xf404bd00,
-	0x80fc1031,
-	0x00f890fc,
-/* 0x028c: send_proc */
-	0x90f980f9,
-	0x9805e898,
-	0x86f004e9,
-	0x0689b804,
-	0xc42a0bf4,
-	0x88940398,
-	0x1880b604,
-	0x98008ebb,
-	0x8a8000fa,
-	0x018d8000,
-	0x80028c80,
-	0x90b6038b,
-	0x0794f001,
-	0xf404e980,
-/* 0x02c6: send_done */
-	0x90fc0231,
-	0x00f880fc,
-/* 0x02cc: find */
-	0x87f080f9,
-	0x0131f458,
-/* 0x02d4: find_loop */
-	0xb8008a98,
-	0x0bf406ae,
-	0x5880b610,
-	0x026886b1,
-	0xf4f01bf4,
-/* 0x02ea: find_done */
-	0x8eb90132,
-	0xf880fc02,
-/* 0x02f1: send */
-	0xcc21f500,
-	0x9701f402,
-/* 0x02fa: recv */
-	0x90f900f8,
-	0xe89880f9,
-	0x04e99805,
-	0xb80132f4,
-	0x0bf40689,
-	0x0389c43d,
-	0xf00180b6,
-	0xe8800784,
-	0x02ea9805,
-	0x8ffef0f9,
-	0xb9f0f901,
-	0x999402ef,
-	0x00e9bb04,
-	0x9818e0b6,
-	0xec9803eb,
-	0x01ed9802,
-	0xf900ee98,
-	0xfef0fca5,
-	0x31f400f8,
-/* 0x0347: recv_done */
-	0xfcf0fc01,
-	0xf890fc80,
-/* 0x034d: init */
-	0x0817f100,
-	0x0011cf01,
-	0x010911e7,
-	0xfe0814b6,
-	0x17f10014,
-	0x13f000e0,
-	0x1c07f000,
-	0xbd0001d0,
-	0xff17f004,
-	0xd01407f0,
-	0x04bd0001,
-	0xf10217f0,
-	0xf0080015,
-	0x01d01007,
-	0xf104bd00,
-	0xf000f617,
-	0x10fe0013,
-	0x1031f400,
-	0xf00117f0,
-	0x01d03807,
-	0xf004bd00,
-/* 0x03a2: init_proc */
-	0xf19858f7,
-	0x0016b001,
-	0xf9fa0bf4,
-	0x58f0b615,
-/* 0x03b3: mulu32_32_64 */
-	0xf9f20ef4,
-	0xf920f910,
-	0x9540f930,
-	0xd29510e1,
-	0xbdc4bd10,
-	0xc0edffb4,
-	0xb9301dff,
-	0x34f10234,
-	0x34b6ffff,
-	0x1045b610,
-	0xbb00c3bb,
-	0xe2ff01b4,
-	0x0234b930,
-	0xffff34f1,
-	0xb61034b6,
-	0xc3bb1045,
-	0x01b4bb00,
-	0xbb3012ff,
-	0x40fc00b3,
-	0x20fc30fc,
-	0x00f810fc,
-/* 0x0404: host_send */
-	0x04b017f1,
-	0xf10011cf,
-	0xcf04a027,
-	0x12b80022,
-	0x2f0bf406,
-	0x94071ec4,
-	0xe0b704ee,
-	0xeb980270,
-	0x02ec9803,
-	0x9801ed98,
-	0x21f500ee,
-	0x10b602f1,
-	0x0f1ec401,
-	0x04b007f1,
-	0xbd000ed0,
-	0xc30ef404,
-/* 0x0444: host_send_done */
-/* 0x0446: host_recv */
-	0x17f100f8,
-	0x13f14e49,
-	0xe1b85254,
-	0xb30bf406,
-/* 0x0454: host_recv_wait */
-	0x04cc17f1,
-	0xf10011cf,
-	0xcf04c827,
-	0x16f00022,
-	0x0612b808,
-	0xc4ec0bf4,
-	0x34b60723,
-	0xf030b704,
-	0x033b8002,
-	0x80023c80,
-	0x3e80013d,
-	0x0120b600,
-	0xf10f24f0,
-	0xd004c807,
-	0x04bd0002,
-	0xf04027f0,
-	0x02d00007,
-	0xf804bd00,
-/* 0x049d: host_init */
-	0x8017f100,
-	0x1014b600,
-	0x027015f1,
-	0x04d007f1,
-	0xbd0001d0,
-	0x8017f104,
-	0x1014b600,
-	0x02f015f1,
-	0x04dc07f1,
-	0xbd0001d0,
-	0x0117f004,
-	0x04c407f1,
-	0xbd0001d0,
-/* 0x04d3: memx_func_enter */
-	0xf100f804,
-	0xf1162067,
-	0xf1f55d77,
-	0xb9ffff73,
-	0x21f4026e,
-	0x02d8b904,
-	0xf90487fd,
-	0xfc80f960,
-	0xf4e0fcd0,
-	0x77f13321,
-	0x73f1fffe,
-	0x6eb9ffff,
-	0x0421f402,
-	0xfd02d8b9,
-	0x60f90487,
-	0xd0fc80f9,
-	0x21f4e0fc,
-	0xf067f133,
-	0x026eb926,
-	0xb90421f4,
-	0x87fd02d8,
-	0xf960f904,
-	0xfcd0fc80,
-	0x3321f4e0,
-	0xf10467f0,
-	0xd007e007,
-	0x04bd0006,
-/* 0x053c: memx_func_enter_wait */
-	0x07c067f1,
-	0xf00066cf,
-	0x0bf40464,
-	0x2c67f0f6,
-	0x800066cf,
-	0x00f8f106,
-/* 0x0554: memx_func_leave */
-	0xcf2c67f0,
-	0x06800066,
-	0x0467f0f2,
-	0x07e407f1,
-	0xbd0006d0,
-/* 0x0569: memx_func_leave_wait */
-	0xc067f104,
-	0x0066cf07,
-	0xf40464f0,
-	0x67f1f61b,
-	0x77f126f0,
-	0x73f00001,
-	0x026eb900,
-	0xb90421f4,
-	0x87fd02d8,
-	0xf960f905,
-	0xfcd0fc80,
-	0x3321f4e0,
-	0x162067f1,
-	0xf4026eb9,
-	0xd8b90421,
-	0x0587fd02,
-	0x80f960f9,
-	0xe0fcd0fc,
-	0xf13321f4,
-	0xf00aa277,
-	0x6eb90073,
-	0x0421f402,
-	0xfd02d8b9,
-	0x60f90587,
-	0xd0fc80f9,
-	0x21f4e0fc,
-/* 0x05d3: memx_func_wait_vblank */
-	0xb600f833,
-	0x00f80410,
-/* 0x05d8: memx_func_wr32 */
-	0x98001698,
-	0x10b60115,
-	0xf960f908,
-	0xfcd0fc50,
-	0x3321f4e0,
-	0xf40242b6,
-	0x00f8e91b,
-/* 0x05f4: memx_func_wait */
-	0xcf2c87f0,
-	0x1e980088,
-	0x011d9800,
-	0x98021c98,
-	0x10b6031b,
-	0x8621f410,
-/* 0x060e: memx_func_delay */
-	0x1e9800f8,
-	0x0410b600,
-	0xf86721f4,
-/* 0x0619: memx_func_train */
-/* 0x061b: memx_exec */
-	0xf900f800,
-	0xb9d0f9e0,
-	0xb2b902c1,
-/* 0x0625: memx_exec_next */
-	0x00139802,
-	0xe70410b6,
-	0xe701f034,
-	0xb601e033,
-	0x30f00132,
-	0xde35980c,
-	0x12b855f9,
-	0xe41ef406,
-	0x98f10b98,
-	0xcbbbf20c,
-	0xc4b7f102,
-	0x00bbcf07,
-	0xe0fcd0fc,
-	0x02f121f5,
-/* 0x065e: memx_info */
-	0xc67000f8,
-	0x0e0bf401,
-/* 0x0664: memx_info_data */
-	0x03ccc7f1,
-	0x0800b7f1,
-/* 0x066f: memx_info_train */
-	0xf10b0ef4,
-	0xf10bccc7,
-/* 0x0677: memx_info_send */
-	0xf50100b7,
-	0xf802f121,
-/* 0x067d: memx_recv */
-	0x01d6b000,
-	0xb09b0bf4,
-	0x0bf400d6,
-/* 0x068b: memx_init */
-	0xf800f8d8,
-/* 0x068d: perf_recv */
-/* 0x068f: perf_init */
-	0xf800f800,
-/* 0x0691: i2c_drive_scl */
-	0x0036b000,
-	0xf10e0bf4,
-	0xd007e007,
-	0x04bd0001,
-/* 0x06a2: i2c_drive_scl_lo */
-	0x07f100f8,
-	0x01d007e4,
-	0xf804bd00,
-/* 0x06ad: i2c_drive_sda */
-	0x0036b000,
-	0xf10e0bf4,
-	0xd007e007,
-	0x04bd0002,
-/* 0x06be: i2c_drive_sda_lo */
-	0x07f100f8,
-	0x02d007e4,
-	0xf804bd00,
-/* 0x06c9: i2c_sense_scl */
-	0x0132f400,
-	0x07c437f1,
-	0xfd0033cf,
-	0x0bf40431,
-	0x0131f406,
-/* 0x06dc: i2c_sense_scl_done */
-/* 0x06de: i2c_sense_sda */
-	0x32f400f8,
-	0xc437f101,
-	0x0033cf07,
-	0xf40432fd,
-	0x31f4060b,
-/* 0x06f1: i2c_sense_sda_done */
-/* 0x06f3: i2c_raise_scl */
-	0xf900f801,
-	0x9847f140,
-	0x0137f008,
-	0x069121f5,
-/* 0x0700: i2c_raise_scl_wait */
-	0x03e8e7f1,
-	0xf56721f4,
-	0xf406c921,
-	0x42b60901,
-	0xef1bf401,
-/* 0x0714: i2c_raise_scl_done */
-	0x00f840fc,
-/* 0x0718: i2c_start */
-	0x06c921f5,
-	0xf50d11f4,
-	0xf406de21,
-	0x0ef40611,
-/* 0x0729: i2c_start_rep */
-	0x0037f030,
-	0x069121f5,
-	0xf50137f0,
-	0xbb06ad21,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x06f321f5,
-	0xf40464b6,
-/* 0x0756: i2c_start_send */
-	0x37f01f11,
-	0xad21f500,
-	0x88e7f106,
-	0x6721f413,
-	0xf50037f0,
-	0xf1069121,
-	0xf41388e7,
-/* 0x0772: i2c_start_out */
-	0x00f86721,
-/* 0x0774: i2c_stop */
-	0xf50037f0,
-	0xf0069121,
-	0x21f50037,
-	0xe7f106ad,
-	0x21f403e8,
-	0x0137f067,
-	0x069121f5,
-	0x1388e7f1,
-	0xf06721f4,
-	0x21f50137,
-	0xe7f106ad,
-	0x21f41388,
-/* 0x07a7: i2c_bitw */
-	0xf500f867,
-	0xf106ad21,
-	0xf403e8e7,
-	0x76bb6721,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb606f321,
-	0x11f40464,
-	0x88e7f118,
-	0x6721f413,
-	0xf50037f0,
-	0xf1069121,
-	0xf41388e7,
-/* 0x07e6: i2c_bitw_out */
-	0x00f86721,
-/* 0x07e8: i2c_bitr */
-	0xf50137f0,
-	0xf106ad21,
-	0xf403e8e7,
-	0x76bb6721,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb606f321,
-	0x11f40464,
-	0xde21f51b,
-	0x0037f006,
-	0x069121f5,
-	0x1388e7f1,
-	0xf06721f4,
-	0x31f4013c,
-/* 0x082d: i2c_bitr_done */
-/* 0x082f: i2c_get_byte */
-	0xf000f801,
-	0x47f00057,
-/* 0x0835: i2c_get_byte_next */
-	0x0154b608,
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0xe821f550,
-	0x0464b607,
-	0xfd2b11f4,
-	0x42b60553,
-	0xd81bf401,
-	0xbb0137f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07a721f5,
-/* 0x087f: i2c_get_byte_done */
-	0xf80464b6,
-/* 0x0881: i2c_put_byte */
-	0x0847f000,
-/* 0x0884: i2c_put_byte_next */
-	0xff0142b6,
-	0x76bb3854,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb607a721,
-	0x11f40464,
-	0x0046b034,
-	0xbbd81bf4,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x07e821f5,
-	0xf40464b6,
-	0x76bb0f11,
-	0x0136b000,
-	0xf4061bf4,
-/* 0x08da: i2c_put_byte_done */
-	0x00f80132,
-/* 0x08dc: i2c_addr */
-	0xb60076bb,
-	0x50f90465,
-	0xbb046594,
-	0x50bd0256,
-	0xfc0475fd,
-	0x1821f550,
-	0x0464b607,
-	0xe72911f4,
-	0xb6012ec3,
-	0x53fd0134,
-	0x0076bb05,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b60881,
-/* 0x0921: i2c_addr_done */
-/* 0x0923: i2c_acquire_addr */
-	0xc700f804,
-	0xe4b6f8ce,
-	0x14e0b705,
-/* 0x092f: i2c_acquire */
-	0xf500f8d0,
-	0xf4092321,
-	0xd9f00421,
-	0x3321f403,
-/* 0x093e: i2c_release */
-	0x21f500f8,
-	0x21f40923,
-	0x03daf004,
-	0xf83321f4,
-/* 0x094d: i2c_recv */
-	0x0132f400,
-	0xb6f8c1c7,
-	0x16b00214,
-	0x3a1ff528,
-	0xf413a001,
-	0x0032980c,
-	0x0ccc13a0,
-	0xf4003198,
-	0xd0f90231,
-	0xd0f9e0f9,
-	0x000067f1,
-	0x100063f1,
-	0xbb016792,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x092f21f5,
-	0xfc0464b6,
-	0x00d6b0d0,
-	0x00b31bf5,
-	0xbb0057f0,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x08dc21f5,
-	0xf50464b6,
-	0xc700d011,
-	0x76bbe0c5,
-	0x0465b600,
-	0x659450f9,
-	0x0256bb04,
-	0x75fd50bd,
-	0xf550fc04,
-	0xb6088121,
-	0x11f50464,
-	0x57f000ad,
-	0x0076bb01,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b608dc,
-	0x8a11f504,
-	0x0076bb00,
-	0xf90465b6,
-	0x04659450,
-	0xbd0256bb,
-	0x0475fd50,
-	0x21f550fc,
-	0x64b6082f,
-	0x6a11f404,
-	0xbbe05bcb,
-	0x65b60076,
-	0x9450f904,
-	0x56bb0465,
-	0xfd50bd02,
-	0x50fc0475,
-	0x077421f5,
-	0xb90464b6,
-	0x74bd025b,
-/* 0x0a53: i2c_recv_not_rd08 */
-	0xb0430ef4,
-	0x1bf401d6,
-	0x0057f03d,
-	0x08dc21f5,
-	0xc73311f4,
-	0x21f5e0c5,
-	0x11f40881,
-	0x0057f029,
-	0x08dc21f5,
-	0xc71f11f4,
-	0x21f5e0b5,
-	0x11f40881,
-	0x7421f515,
-	0xc774bd07,
-	0x1bf408c5,
-	0x0232f409,
-/* 0x0a93: i2c_recv_not_wr08 */
-/* 0x0a93: i2c_recv_done */
-	0xc7030ef4,
-	0x21f5f8ce,
-	0xe0fc093e,
-	0x12f4d0fc,
-	0x027cb90a,
-	0x02f121f5,
-/* 0x0aa8: i2c_recv_exit */
-/* 0x0aaa: i2c_init */
-	0x00f800f8,
-/* 0x0aac: test_recv */
-	0x05d817f1,
-	0xb60011cf,
-	0x07f10110,
-	0x01d005d8,
-	0xf104bd00,
-	0xf1d900e7,
-	0xf5134fe3,
-	0xf8022321,
-/* 0x0acd: test_init */
-	0x00e7f100,
-	0x2321f508,
-/* 0x0ad7: idle_recv */
-	0xf800f802,
-/* 0x0ad9: idle */
-	0x0031f400,
-	0x05d417f1,
-	0xb60011cf,
-	0x07f10110,
-	0x01d005d4,
-/* 0x0aef: idle_loop */
-	0xf004bd00,
-	0x32f45817,
-/* 0x0af5: idle_proc */
-/* 0x0af5: idle_proc_exec */
-	0xb910f902,
-	0x21f5021e,
-	0x10fc02fa,
-	0xf40911f4,
-	0x0ef40231,
-/* 0x0b09: idle_proc_next */
-	0x5810b6ef,
-	0xf4061fb8,
-	0x02f4e61b,
-	0x0028f4dd,
-	0x00c10ef4,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-	0x00000000,
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4 b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
new file mode 100644
index 0000000..2f28c7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#define NVKM_PPWR_CHIPSET GF119
+#define HW_TICKS_PER_US 324
+
+//#define NVKM_FALCON_PC24
+#define NVKM_FALCON_UNSHIFTED_IO
+//#define NVKM_FALCON_MMIO_UAS
+//#define NVKM_FALCON_MMIO_TRAP
+
+#include "macros.fuc"
+
+.section #gf119_pmu_data
+#define INCLUDE_PROC
+#include "kernel.fuc"
+#include "arith.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "i2c_.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_PROC
+
+#define INCLUDE_DATA
+#include "kernel.fuc"
+#include "arith.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "i2c_.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_DATA
+.align 256
+
+.section #gf119_pmu_code
+#define INCLUDE_CODE
+#include "kernel.fuc"
+#include "arith.fuc"
+#include "host.fuc"
+#include "memx.fuc"
+#include "perf.fuc"
+#include "i2c_.fuc"
+#include "test.fuc"
+#include "idle.fuc"
+#undef INCLUDE_CODE
+.align 256
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
new file mode 100644
index 0000000..31552af
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -0,0 +1,1795 @@
+uint32_t gf119_pmu_data[] = {
+/* 0x0000: proc_kern */
+	0x52544e49,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0058: proc_list_head */
+	0x54534f48,
+	0x0000049d,
+	0x00000446,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x584d454d,
+	0x0000068b,
+	0x0000067d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x46524550,
+	0x0000068f,
+	0x0000068d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x5f433249,
+	0x00000aaa,
+	0x0000094d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x54534554,
+	0x00000acd,
+	0x00000aac,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x454c4449,
+	0x00000ad9,
+	0x00000ad7,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0268: proc_list_tail */
+/* 0x0268: time_prev */
+	0x00000000,
+/* 0x026c: time_next */
+	0x00000000,
+/* 0x0270: fifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x02f0: rfifo_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0370: memx_func_head */
+	0x00000001,
+	0x00000000,
+	0x000004d3,
+/* 0x037c: memx_func_next */
+	0x00000002,
+	0x00000000,
+	0x00000554,
+	0x00000003,
+	0x00000002,
+	0x000005d8,
+	0x00040004,
+	0x00000000,
+	0x000005f4,
+	0x00010005,
+	0x00000000,
+	0x0000060e,
+	0x00010006,
+	0x00000000,
+	0x000005d3,
+	0x00000007,
+	0x00000000,
+	0x00000619,
+/* 0x03c4: memx_func_tail */
+/* 0x03c4: memx_ts_start */
+	0x00000000,
+/* 0x03c8: memx_ts_end */
+	0x00000000,
+/* 0x03cc: memx_data_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0bcc: memx_data_tail */
+/* 0x0bcc: memx_train_head */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0ccc: memx_train_tail */
+/* 0x0ccc: i2c_scl_map */
+	0x00000400,
+	0x00000800,
+	0x00001000,
+	0x00002000,
+	0x00004000,
+	0x00008000,
+	0x00010000,
+	0x00020000,
+	0x00040000,
+	0x00080000,
+/* 0x0cf4: i2c_sda_map */
+	0x00100000,
+	0x00200000,
+	0x00400000,
+	0x00800000,
+	0x01000000,
+	0x02000000,
+	0x04000000,
+	0x08000000,
+	0x10000000,
+	0x20000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+uint32_t gf119_pmu_code[] = {
+	0x034d0ef5,
+/* 0x0004: rd32 */
+	0x07a007f1,
+	0xbd000ed0,
+	0x01d7f004,
+	0xf101d3f0,
+	0xd007ac07,
+	0x04bd000d,
+/* 0x001c: rd32_wait */
+	0x07acd7f1,
+	0xf100ddcf,
+	0xf47000d4,
+	0xd7f1f51b,
+	0xddcf07a4,
+/* 0x0033: wr32 */
+	0xf100f800,
+	0xd007a007,
+	0x04bd000e,
+	0x07a407f1,
+	0xbd000dd0,
+	0x02d7f004,
+	0xf0f0d5f0,
+	0x07f101d3,
+	0x0dd007ac,
+/* 0x0057: wr32_wait */
+	0xf104bd00,
+	0xcf07acd7,
+	0xd4f100dd,
+	0x1bf47000,
+/* 0x0067: nsec */
+	0xf900f8f5,
+	0xf080f990,
+	0x88cf2c87,
+/* 0x0071: nsec_loop */
+	0x2c97f000,
+	0xbb0099cf,
+	0x9eb80298,
+	0xf41ef406,
+	0x90fc80fc,
+/* 0x0086: wait */
+	0x90f900f8,
+	0x87f080f9,
+	0x0088cf2c,
+/* 0x0090: wait_loop */
+	0xf402eeb9,
+	0xdab90421,
+	0x04adfd02,
+	0xf406acb8,
+	0x97f0120b,
+	0x0099cf2c,
+	0xb80298bb,
+	0x1ef4069b,
+/* 0x00b1: wait_done */
+	0xfc80fce2,
+/* 0x00b7: intr_watchdog */
+	0x9800f890,
+	0x96b003e9,
+	0x2a0bf400,
+	0xbb9a0a98,
+	0x1cf4029a,
+	0x01d7f00f,
+	0x028c21f5,
+	0x0ef494bd,
+/* 0x00d5: intr_watchdog_next_time */
+	0x9b0a9815,
+	0xf400a6b0,
+	0x9ab8090b,
+	0x061cf406,
+/* 0x00e4: intr_watchdog_next_time_set */
+/* 0x00e7: intr_watchdog_next_proc */
+	0x809b0980,
+	0xe0b603e9,
+	0x68e6b158,
+	0xc61bf402,
+/* 0x00f6: intr */
+	0x00f900f8,
+	0x80f904bd,
+	0xa0f990f9,
+	0xc0f9b0f9,
+	0xe0f9d0f9,
+	0xf7f0f0f9,
+	0x0188fe00,
+	0x87f180f9,
+	0x88cf05d0,
+	0x0180b600,
+	0x05d007f1,
+	0xbd0008d0,
+	0x0887f004,
+	0xc40088cf,
+	0x0bf40289,
+	0x9b008020,
+	0xf458e7f0,
+	0x0998b721,
+	0x0096b09b,
+	0xf00e0bf4,
+	0x09d03407,
+	0x8004bd00,
+/* 0x014e: intr_skip_watchdog */
+	0x89e49a09,
+	0x0bf40800,
+	0x8897f13c,
+	0x0099cf06,
+	0xf4029ac4,
+	0xc7f1260b,
+	0xcccf04c0,
+	0xf1c0f900,
+	0xf14f48e7,
+	0xf05453e3,
+	0x21f500d7,
+	0xc0fc02f1,
+	0x04c007f1,
+	0xbd000cd0,
+/* 0x0185: intr_subintr_skip_fifo */
+	0x8807f104,
+	0x0009d006,
+/* 0x018e: intr_skip_subintr */
+	0x89c404bd,
+	0x070bf420,
+	0xffbfa4f1,
+/* 0x0198: intr_skip_pause */
+	0xf44089c4,
+	0xa4f1070b,
+/* 0x01a2: intr_skip_user0 */
+	0x07f0ffbf,
+	0x0008d004,
+	0x80fc04bd,
+	0xfc0088fe,
+	0xfce0fcf0,
+	0xfcc0fcd0,
+	0xfca0fcb0,
+	0xfc80fc90,
+	0x0032f400,
+/* 0x01c6: ticks_from_ns */
+	0xc0f901f8,
+	0xd7f1b0f9,
+	0xd3f00144,
+	0xb321f500,
+	0xe8ccec03,
+	0x00b4b003,
+	0xec120bf4,
+	0xf103e8ee,
+	0xf00144d7,
+	0x21f500d3,
+/* 0x01ee: ticks_from_ns_quit */
+	0xceb903b3,
+	0xfcb0fc02,
+/* 0x01f7: ticks_from_us */
+	0xf900f8c0,
+	0xf1b0f9c0,
+	0xf00144d7,
+	0x21f500d3,
+	0xceb903b3,
+	0x00b4b002,
+	0xbd050bf4,
+/* 0x0211: ticks_from_us_quit */
+	0xfcb0fce4,
+/* 0x0217: ticks_to_us */
+	0xf100f8c0,
+	0xf00144d7,
+	0xedff00d3,
+/* 0x0223: timer */
+	0xf900f8ec,
+	0xf480f990,
+	0xf8981032,
+	0x0086b003,
+	0xbd531cf4,
+	0x3807f084,
+	0xbd0008d0,
+	0x3487f004,
+	0x980088cf,
+	0x98bb9a09,
+	0x00e9bb02,
+	0xf003fe80,
+	0x88cf0887,
+	0x0284f000,
+	0xf0201bf4,
+	0x88cf3487,
+	0x06e0b800,
+	0xb8090bf4,
+	0x1cf406e8,
+/* 0x026d: timer_reset */
+	0x3407f00e,
+	0xbd000ed0,
+	0x9a0e8004,
+/* 0x0278: timer_enable */
+	0xf00187f0,
+	0x08d03807,
+/* 0x0283: timer_done */
+	0xf404bd00,
+	0x80fc1031,
+	0x00f890fc,
+/* 0x028c: send_proc */
+	0x90f980f9,
+	0x9805e898,
+	0x86f004e9,
+	0x0689b804,
+	0xc42a0bf4,
+	0x88940398,
+	0x1880b604,
+	0x98008ebb,
+	0x8a8000fa,
+	0x018d8000,
+	0x80028c80,
+	0x90b6038b,
+	0x0794f001,
+	0xf404e980,
+/* 0x02c6: send_done */
+	0x90fc0231,
+	0x00f880fc,
+/* 0x02cc: find */
+	0x87f080f9,
+	0x0131f458,
+/* 0x02d4: find_loop */
+	0xb8008a98,
+	0x0bf406ae,
+	0x5880b610,
+	0x026886b1,
+	0xf4f01bf4,
+/* 0x02ea: find_done */
+	0x8eb90132,
+	0xf880fc02,
+/* 0x02f1: send */
+	0xcc21f500,
+	0x9701f402,
+/* 0x02fa: recv */
+	0x90f900f8,
+	0xe89880f9,
+	0x04e99805,
+	0xb80132f4,
+	0x0bf40689,
+	0x0389c43d,
+	0xf00180b6,
+	0xe8800784,
+	0x02ea9805,
+	0x8ffef0f9,
+	0xb9f0f901,
+	0x999402ef,
+	0x00e9bb04,
+	0x9818e0b6,
+	0xec9803eb,
+	0x01ed9802,
+	0xf900ee98,
+	0xfef0fca5,
+	0x31f400f8,
+/* 0x0347: recv_done */
+	0xfcf0fc01,
+	0xf890fc80,
+/* 0x034d: init */
+	0x0817f100,
+	0x0011cf01,
+	0x010911e7,
+	0xfe0814b6,
+	0x17f10014,
+	0x13f000e0,
+	0x1c07f000,
+	0xbd0001d0,
+	0xff17f004,
+	0xd01407f0,
+	0x04bd0001,
+	0xf10217f0,
+	0xf0080015,
+	0x01d01007,
+	0xf104bd00,
+	0xf000f617,
+	0x10fe0013,
+	0x1031f400,
+	0xf00117f0,
+	0x01d03807,
+	0xf004bd00,
+/* 0x03a2: init_proc */
+	0xf19858f7,
+	0x0016b001,
+	0xf9fa0bf4,
+	0x58f0b615,
+/* 0x03b3: mulu32_32_64 */
+	0xf9f20ef4,
+	0xf920f910,
+	0x9540f930,
+	0xd29510e1,
+	0xbdc4bd10,
+	0xc0edffb4,
+	0xb9301dff,
+	0x34f10234,
+	0x34b6ffff,
+	0x1045b610,
+	0xbb00c3bb,
+	0xe2ff01b4,
+	0x0234b930,
+	0xffff34f1,
+	0xb61034b6,
+	0xc3bb1045,
+	0x01b4bb00,
+	0xbb3012ff,
+	0x40fc00b3,
+	0x20fc30fc,
+	0x00f810fc,
+/* 0x0404: host_send */
+	0x04b017f1,
+	0xf10011cf,
+	0xcf04a027,
+	0x12b80022,
+	0x2f0bf406,
+	0x94071ec4,
+	0xe0b704ee,
+	0xeb980270,
+	0x02ec9803,
+	0x9801ed98,
+	0x21f500ee,
+	0x10b602f1,
+	0x0f1ec401,
+	0x04b007f1,
+	0xbd000ed0,
+	0xc30ef404,
+/* 0x0444: host_send_done */
+/* 0x0446: host_recv */
+	0x17f100f8,
+	0x13f14e49,
+	0xe1b85254,
+	0xb30bf406,
+/* 0x0454: host_recv_wait */
+	0x04cc17f1,
+	0xf10011cf,
+	0xcf04c827,
+	0x16f00022,
+	0x0612b808,
+	0xc4ec0bf4,
+	0x34b60723,
+	0xf030b704,
+	0x033b8002,
+	0x80023c80,
+	0x3e80013d,
+	0x0120b600,
+	0xf10f24f0,
+	0xd004c807,
+	0x04bd0002,
+	0xf04027f0,
+	0x02d00007,
+	0xf804bd00,
+/* 0x049d: host_init */
+	0x8017f100,
+	0x1014b600,
+	0x027015f1,
+	0x04d007f1,
+	0xbd0001d0,
+	0x8017f104,
+	0x1014b600,
+	0x02f015f1,
+	0x04dc07f1,
+	0xbd0001d0,
+	0x0117f004,
+	0x04c407f1,
+	0xbd0001d0,
+/* 0x04d3: memx_func_enter */
+	0xf100f804,
+	0xf1162067,
+	0xf1f55d77,
+	0xb9ffff73,
+	0x21f4026e,
+	0x02d8b904,
+	0xf90487fd,
+	0xfc80f960,
+	0xf4e0fcd0,
+	0x77f13321,
+	0x73f1fffe,
+	0x6eb9ffff,
+	0x0421f402,
+	0xfd02d8b9,
+	0x60f90487,
+	0xd0fc80f9,
+	0x21f4e0fc,
+	0xf067f133,
+	0x026eb926,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f904,
+	0xfcd0fc80,
+	0x3321f4e0,
+	0xf10467f0,
+	0xd007e007,
+	0x04bd0006,
+/* 0x053c: memx_func_enter_wait */
+	0x07c067f1,
+	0xf00066cf,
+	0x0bf40464,
+	0x2c67f0f6,
+	0x800066cf,
+	0x00f8f106,
+/* 0x0554: memx_func_leave */
+	0xcf2c67f0,
+	0x06800066,
+	0x0467f0f2,
+	0x07e407f1,
+	0xbd0006d0,
+/* 0x0569: memx_func_leave_wait */
+	0xc067f104,
+	0x0066cf07,
+	0xf40464f0,
+	0x67f1f61b,
+	0x77f126f0,
+	0x73f00001,
+	0x026eb900,
+	0xb90421f4,
+	0x87fd02d8,
+	0xf960f905,
+	0xfcd0fc80,
+	0x3321f4e0,
+	0x162067f1,
+	0xf4026eb9,
+	0xd8b90421,
+	0x0587fd02,
+	0x80f960f9,
+	0xe0fcd0fc,
+	0xf13321f4,
+	0xf00aa277,
+	0x6eb90073,
+	0x0421f402,
+	0xfd02d8b9,
+	0x60f90587,
+	0xd0fc80f9,
+	0x21f4e0fc,
+/* 0x05d3: memx_func_wait_vblank */
+	0xb600f833,
+	0x00f80410,
+/* 0x05d8: memx_func_wr32 */
+	0x98001698,
+	0x10b60115,
+	0xf960f908,
+	0xfcd0fc50,
+	0x3321f4e0,
+	0xf40242b6,
+	0x00f8e91b,
+/* 0x05f4: memx_func_wait */
+	0xcf2c87f0,
+	0x1e980088,
+	0x011d9800,
+	0x98021c98,
+	0x10b6031b,
+	0x8621f410,
+/* 0x060e: memx_func_delay */
+	0x1e9800f8,
+	0x0410b600,
+	0xf86721f4,
+/* 0x0619: memx_func_train */
+/* 0x061b: memx_exec */
+	0xf900f800,
+	0xb9d0f9e0,
+	0xb2b902c1,
+/* 0x0625: memx_exec_next */
+	0x00139802,
+	0xe70410b6,
+	0xe701f034,
+	0xb601e033,
+	0x30f00132,
+	0xde35980c,
+	0x12b855f9,
+	0xe41ef406,
+	0x98f10b98,
+	0xcbbbf20c,
+	0xc4b7f102,
+	0x00bbcf07,
+	0xe0fcd0fc,
+	0x02f121f5,
+/* 0x065e: memx_info */
+	0xc67000f8,
+	0x0e0bf401,
+/* 0x0664: memx_info_data */
+	0x03ccc7f1,
+	0x0800b7f1,
+/* 0x066f: memx_info_train */
+	0xf10b0ef4,
+	0xf10bccc7,
+/* 0x0677: memx_info_send */
+	0xf50100b7,
+	0xf802f121,
+/* 0x067d: memx_recv */
+	0x01d6b000,
+	0xb09b0bf4,
+	0x0bf400d6,
+/* 0x068b: memx_init */
+	0xf800f8d8,
+/* 0x068d: perf_recv */
+/* 0x068f: perf_init */
+	0xf800f800,
+/* 0x0691: i2c_drive_scl */
+	0x0036b000,
+	0xf10e0bf4,
+	0xd007e007,
+	0x04bd0001,
+/* 0x06a2: i2c_drive_scl_lo */
+	0x07f100f8,
+	0x01d007e4,
+	0xf804bd00,
+/* 0x06ad: i2c_drive_sda */
+	0x0036b000,
+	0xf10e0bf4,
+	0xd007e007,
+	0x04bd0002,
+/* 0x06be: i2c_drive_sda_lo */
+	0x07f100f8,
+	0x02d007e4,
+	0xf804bd00,
+/* 0x06c9: i2c_sense_scl */
+	0x0132f400,
+	0x07c437f1,
+	0xfd0033cf,
+	0x0bf40431,
+	0x0131f406,
+/* 0x06dc: i2c_sense_scl_done */
+/* 0x06de: i2c_sense_sda */
+	0x32f400f8,
+	0xc437f101,
+	0x0033cf07,
+	0xf40432fd,
+	0x31f4060b,
+/* 0x06f1: i2c_sense_sda_done */
+/* 0x06f3: i2c_raise_scl */
+	0xf900f801,
+	0x9847f140,
+	0x0137f008,
+	0x069121f5,
+/* 0x0700: i2c_raise_scl_wait */
+	0x03e8e7f1,
+	0xf56721f4,
+	0xf406c921,
+	0x42b60901,
+	0xef1bf401,
+/* 0x0714: i2c_raise_scl_done */
+	0x00f840fc,
+/* 0x0718: i2c_start */
+	0x06c921f5,
+	0xf50d11f4,
+	0xf406de21,
+	0x0ef40611,
+/* 0x0729: i2c_start_rep */
+	0x0037f030,
+	0x069121f5,
+	0xf50137f0,
+	0xbb06ad21,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x06f321f5,
+	0xf40464b6,
+/* 0x0756: i2c_start_send */
+	0x37f01f11,
+	0xad21f500,
+	0x88e7f106,
+	0x6721f413,
+	0xf50037f0,
+	0xf1069121,
+	0xf41388e7,
+/* 0x0772: i2c_start_out */
+	0x00f86721,
+/* 0x0774: i2c_stop */
+	0xf50037f0,
+	0xf0069121,
+	0x21f50037,
+	0xe7f106ad,
+	0x21f403e8,
+	0x0137f067,
+	0x069121f5,
+	0x1388e7f1,
+	0xf06721f4,
+	0x21f50137,
+	0xe7f106ad,
+	0x21f41388,
+/* 0x07a7: i2c_bitw */
+	0xf500f867,
+	0xf106ad21,
+	0xf403e8e7,
+	0x76bb6721,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606f321,
+	0x11f40464,
+	0x88e7f118,
+	0x6721f413,
+	0xf50037f0,
+	0xf1069121,
+	0xf41388e7,
+/* 0x07e6: i2c_bitw_out */
+	0x00f86721,
+/* 0x07e8: i2c_bitr */
+	0xf50137f0,
+	0xf106ad21,
+	0xf403e8e7,
+	0x76bb6721,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb606f321,
+	0x11f40464,
+	0xde21f51b,
+	0x0037f006,
+	0x069121f5,
+	0x1388e7f1,
+	0xf06721f4,
+	0x31f4013c,
+/* 0x082d: i2c_bitr_done */
+/* 0x082f: i2c_get_byte */
+	0xf000f801,
+	0x47f00057,
+/* 0x0835: i2c_get_byte_next */
+	0x0154b608,
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0xe821f550,
+	0x0464b607,
+	0xfd2b11f4,
+	0x42b60553,
+	0xd81bf401,
+	0xbb0137f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07a721f5,
+/* 0x087f: i2c_get_byte_done */
+	0xf80464b6,
+/* 0x0881: i2c_put_byte */
+	0x0847f000,
+/* 0x0884: i2c_put_byte_next */
+	0xff0142b6,
+	0x76bb3854,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb607a721,
+	0x11f40464,
+	0x0046b034,
+	0xbbd81bf4,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x07e821f5,
+	0xf40464b6,
+	0x76bb0f11,
+	0x0136b000,
+	0xf4061bf4,
+/* 0x08da: i2c_put_byte_done */
+	0x00f80132,
+/* 0x08dc: i2c_addr */
+	0xb60076bb,
+	0x50f90465,
+	0xbb046594,
+	0x50bd0256,
+	0xfc0475fd,
+	0x1821f550,
+	0x0464b607,
+	0xe72911f4,
+	0xb6012ec3,
+	0x53fd0134,
+	0x0076bb05,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b60881,
+/* 0x0921: i2c_addr_done */
+/* 0x0923: i2c_acquire_addr */
+	0xc700f804,
+	0xe4b6f8ce,
+	0x14e0b705,
+/* 0x092f: i2c_acquire */
+	0xf500f8d0,
+	0xf4092321,
+	0xd9f00421,
+	0x3321f403,
+/* 0x093e: i2c_release */
+	0x21f500f8,
+	0x21f40923,
+	0x03daf004,
+	0xf83321f4,
+/* 0x094d: i2c_recv */
+	0x0132f400,
+	0xb6f8c1c7,
+	0x16b00214,
+	0x3a1ff528,
+	0xf413a001,
+	0x0032980c,
+	0x0ccc13a0,
+	0xf4003198,
+	0xd0f90231,
+	0xd0f9e0f9,
+	0x000067f1,
+	0x100063f1,
+	0xbb016792,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x092f21f5,
+	0xfc0464b6,
+	0x00d6b0d0,
+	0x00b31bf5,
+	0xbb0057f0,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x08dc21f5,
+	0xf50464b6,
+	0xc700d011,
+	0x76bbe0c5,
+	0x0465b600,
+	0x659450f9,
+	0x0256bb04,
+	0x75fd50bd,
+	0xf550fc04,
+	0xb6088121,
+	0x11f50464,
+	0x57f000ad,
+	0x0076bb01,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b608dc,
+	0x8a11f504,
+	0x0076bb00,
+	0xf90465b6,
+	0x04659450,
+	0xbd0256bb,
+	0x0475fd50,
+	0x21f550fc,
+	0x64b6082f,
+	0x6a11f404,
+	0xbbe05bcb,
+	0x65b60076,
+	0x9450f904,
+	0x56bb0465,
+	0xfd50bd02,
+	0x50fc0475,
+	0x077421f5,
+	0xb90464b6,
+	0x74bd025b,
+/* 0x0a53: i2c_recv_not_rd08 */
+	0xb0430ef4,
+	0x1bf401d6,
+	0x0057f03d,
+	0x08dc21f5,
+	0xc73311f4,
+	0x21f5e0c5,
+	0x11f40881,
+	0x0057f029,
+	0x08dc21f5,
+	0xc71f11f4,
+	0x21f5e0b5,
+	0x11f40881,
+	0x7421f515,
+	0xc774bd07,
+	0x1bf408c5,
+	0x0232f409,
+/* 0x0a93: i2c_recv_not_wr08 */
+/* 0x0a93: i2c_recv_done */
+	0xc7030ef4,
+	0x21f5f8ce,
+	0xe0fc093e,
+	0x12f4d0fc,
+	0x027cb90a,
+	0x02f121f5,
+/* 0x0aa8: i2c_recv_exit */
+/* 0x0aaa: i2c_init */
+	0x00f800f8,
+/* 0x0aac: test_recv */
+	0x05d817f1,
+	0xb60011cf,
+	0x07f10110,
+	0x01d005d8,
+	0xf104bd00,
+	0xf1d900e7,
+	0xf5134fe3,
+	0xf8022321,
+/* 0x0acd: test_init */
+	0x00e7f100,
+	0x2321f508,
+/* 0x0ad7: idle_recv */
+	0xf800f802,
+/* 0x0ad9: idle */
+	0x0031f400,
+	0x05d417f1,
+	0xb60011cf,
+	0x07f10110,
+	0x01d005d4,
+/* 0x0aef: idle_loop */
+	0xf004bd00,
+	0x32f45817,
+/* 0x0af5: idle_proc */
+/* 0x0af5: idle_proc_exec */
+	0xb910f902,
+	0x21f5021e,
+	0x10fc02fa,
+	0xf40911f4,
+	0x0ef40231,
+/* 0x0b09: idle_proc_next */
+	0x5810b6ef,
+	0xf4061fb8,
+	0x02f4e61b,
+	0x0028f4dd,
+	0x00c10ef4,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 78a4ea0..aeb8ccd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,17 +24,16 @@
 #include "priv.h"
 #include "fuc/gf100.fuc3.h"
 
-struct nvkm_oclass *
-gf100_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xc0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gf100_pmu = {
 	.code.data = gf100_pmu_code,
 	.code.size = sizeof(gf100_pmu_code),
 	.data.data = gf100_pmu_data,
 	.data.size = sizeof(gf100_pmu_data),
-}.base;
+};
+
+int
+gf100_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gf100_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
deleted file mode 100644
index 6b3a238..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2013 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-#include "fuc/gf110.fuc4.h"
-
-struct nvkm_oclass *
-gf110_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xd0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
-	.code.data = gf110_pmu_code,
-	.code.size = sizeof(gf110_pmu_code),
-	.data.data = gf110_pmu_data,
-	.data.size = sizeof(gf110_pmu_data),
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
new file mode 100644
index 0000000..fbc88d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "fuc/gf119.fuc4.h"
+
+static const struct nvkm_pmu_func
+gf119_pmu = {
+	.code.data = gf119_pmu_code,
+	.code.size = sizeof(gf119_pmu_code),
+	.data.data = gf119_pmu_data,
+	.data.size = sizeof(gf119_pmu_data),
+};
+
+int
+gf119_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gf119_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 28fdb8e..e33f5c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -21,47 +21,97 @@
  *
  * Authors: Ben Skeggs
  */
-#define gf110_pmu_code gk104_pmu_code
-#define gf110_pmu_data gk104_pmu_data
+#define gf119_pmu_code gk104_pmu_code
+#define gf119_pmu_data gk104_pmu_data
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
+
+#include <core/option.h>
+#include <subdev/timer.h>
+
+static void
+magic_(struct nvkm_device *device, u32 ctrl, int size)
+{
+	nvkm_wr32(device, 0x00c800, 0x00000000);
+	nvkm_wr32(device, 0x00c808, 0x00000000);
+	nvkm_wr32(device, 0x00c800, ctrl);
+	nvkm_msec(device, 2000,
+		if (nvkm_rd32(device, 0x00c800) & 0x40000000) {
+			while (size--)
+				nvkm_wr32(device, 0x00c804, 0x00000000);
+			break;
+		}
+	);
+	nvkm_wr32(device, 0x00c800, 0x00000000);
+}
+
+static void
+magic(struct nvkm_device *device, u32 ctrl)
+{
+	magic_(device, 0x8000a41f | ctrl, 6);
+	magic_(device, 0x80000421 | ctrl, 1);
+}
 
 static void
 gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
-	nv_rd32(pmu, 0x000200);
-	nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+	struct nvkm_device *device = pmu->subdev.device;
+
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+	nvkm_mask(device, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
-	nv_rd32(pmu, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+	nvkm_rd32(device, 0x000200);
+
+	if ( nvkm_boolopt(device->cfgopt, "War00C800_0",
+	    device->quirk ? device->quirk->War00C800_0 : false)) {
+		nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
+		switch (device->chipset) {
+		case 0xe4:
+			magic(device, 0x04000000);
+			magic(device, 0x06000000);
+			magic(device, 0x0c000000);
+			magic(device, 0x0e000000);
+			break;
+		case 0xe6:
+			magic(device, 0x02000000);
+			magic(device, 0x04000000);
+			magic(device, 0x0a000000);
+			break;
+		case 0xe7:
+			magic(device, 0x02000000);
+			break;
+		default:
+			break;
+		}
+	}
 }
 
-struct nvkm_oclass *
-gk104_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xe4),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk104_pmu = {
 	.code.data = gk104_pmu_code,
 	.code.size = sizeof(gk104_pmu_code),
 	.data.data = gk104_pmu_data,
 	.data.size = sizeof(gk104_pmu_data),
 	.pgob = gk104_pmu_pgob,
-}.base;
+};
+
+int
+gk104_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk104_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index 89bb94b..ae25524 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -21,16 +21,17 @@
  *
  * Authors: Ben Skeggs
  */
-#define gf110_pmu_code gk110_pmu_code
-#define gf110_pmu_data gk110_pmu_data
+#define gf119_pmu_code gk110_pmu_code
+#define gf119_pmu_data gk110_pmu_data
 #include "priv.h"
-#include "fuc/gf110.fuc4.h"
+#include "fuc/gf119.fuc4.h"
 
 #include <subdev/timer.h>
 
 void
 gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	static const struct {
 		u32 addr;
 		u32 data;
@@ -54,42 +55,44 @@
 	};
 	int i;
 
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00000000);
-	nv_rd32(pmu, 0x000200);
-	nv_mask(pmu, 0x000200, 0x08000000, 0x08000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+	nvkm_rd32(device, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x08000000);
 	msleep(50);
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000002);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000002);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x0206b4, 0x00000000, 0x00000000);
+	nvkm_mask(device, 0x0206b4, 0x00000000, 0x00000000);
 	for (i = 0; i < ARRAY_SIZE(magic); i++) {
-		nv_wr32(pmu, magic[i].addr, magic[i].data);
-		nv_wait(pmu, magic[i].addr, 0x80000000, 0x00000000);
+		nvkm_wr32(device, magic[i].addr, magic[i].data);
+		nvkm_msec(device, 2000,
+			if (!(nvkm_rd32(device, magic[i].addr) & 0x80000000))
+				break;
+		);
 	}
 
-	nv_mask(pmu, 0x10a78c, 0x00000002, 0x00000000);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000001);
-	nv_mask(pmu, 0x10a78c, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000002, 0x00000000);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000001);
+	nvkm_mask(device, 0x10a78c, 0x00000001, 0x00000000);
 
-	nv_mask(pmu, 0x000200, 0x08000000, 0x00000000);
-	nv_mask(pmu, 0x000200, 0x00001000, 0x00001000);
-	nv_rd32(pmu, 0x000200);
+	nvkm_mask(device, 0x000200, 0x08000000, 0x00000000);
+	nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+	nvkm_rd32(device, 0x000200);
 }
 
-struct nvkm_oclass *
-gk110_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xf0),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk110_pmu = {
 	.code.data = gk110_pmu_code,
 	.code.size = sizeof(gk110_pmu_code),
 	.data.data = gk110_pmu_data,
 	.data.size = sizeof(gk110_pmu_data),
 	.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk110_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk110_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index b14134e..3b49176 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -24,18 +24,17 @@
 #include "priv.h"
 #include "fuc/gk208.fuc5.h"
 
-struct nvkm_oclass *
-gk208_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0x00),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = _nvkm_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gk208_pmu = {
 	.code.data = gk208_pmu_code,
 	.code.size = sizeof(gk208_pmu_code),
 	.data.data = gk208_pmu_data,
 	.data.size = sizeof(gk208_pmu_data),
 	.pgob = gk110_pmu_pgob,
-}.base;
+};
+
+int
+gk208_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gk208_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 594f746..6689d02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -19,6 +19,7 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
+#define gk20a_pmu(p) container_of((p), struct gk20a_pmu, base.subdev)
 #include "priv.h"
 
 #include <subdev/clk.h>
@@ -35,7 +36,7 @@
 	unsigned int avg_load;
 };
 
-struct gk20a_pmu_priv {
+struct gk20a_pmu {
 	struct nvkm_pmu base;
 	struct nvkm_alarm alarm;
 	struct gk20a_pmu_dvfs_data *data;
@@ -48,28 +49,28 @@
 };
 
 static int
-gk20a_pmu_dvfs_target(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
 {
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 
 	return nvkm_clk_astate(clk, *state, 0, false);
 }
 
 static int
-gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu_priv *priv, int *state)
+gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
 {
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 
 	*state = clk->pstate;
 	return 0;
 }
 
 static int
-gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
 				int *state, int load)
 {
-	struct gk20a_pmu_dvfs_data *data = priv->data;
-	struct nvkm_clk *clk = nvkm_clk(priv);
+	struct gk20a_pmu_dvfs_data *data = pmu->data;
+	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
 	int cur_level, level;
 
 	/* For GK20A, the performance level is directly mapped to pstate */
@@ -84,7 +85,8 @@
 		level = min(clk->state_nr - 1, level);
 	}
 
-	nv_trace(priv, "cur level = %d, new level = %d\n", cur_level, level);
+	nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
+		   cur_level, level);
 
 	*state = level;
 
@@ -95,30 +97,35 @@
 }
 
 static int
-gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu_priv *priv,
+gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
 			      struct gk20a_pmu_dvfs_dev_status *status)
 {
-	status->busy = nv_rd32(priv, 0x10a508 + (BUSY_SLOT * 0x10));
-	status->total= nv_rd32(priv, 0x10a508 + (CLK_SLOT * 0x10));
+	struct nvkm_device *device = pmu->base.subdev.device;
+	status->busy = nvkm_rd32(device, 0x10a508 + (BUSY_SLOT * 0x10));
+	status->total= nvkm_rd32(device, 0x10a508 + (CLK_SLOT * 0x10));
 	return 0;
 }
 
 static void
-gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu_priv *priv)
+gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
 {
-	nv_wr32(priv, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
-	nv_wr32(priv, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
+	struct nvkm_device *device = pmu->base.subdev.device;
+	nvkm_wr32(device, 0x10a508 + (BUSY_SLOT * 0x10), 0x80000000);
+	nvkm_wr32(device, 0x10a508 + (CLK_SLOT * 0x10), 0x80000000);
 }
 
 static void
 gk20a_pmu_dvfs_work(struct nvkm_alarm *alarm)
 {
-	struct gk20a_pmu_priv *priv =
-		container_of(alarm, struct gk20a_pmu_priv, alarm);
-	struct gk20a_pmu_dvfs_data *data = priv->data;
+	struct gk20a_pmu *pmu =
+		container_of(alarm, struct gk20a_pmu, alarm);
+	struct gk20a_pmu_dvfs_data *data = pmu->data;
 	struct gk20a_pmu_dvfs_dev_status status;
-	struct nvkm_clk *clk = nvkm_clk(priv);
-	struct nvkm_volt *volt = nvkm_volt(priv);
+	struct nvkm_subdev *subdev = &pmu->base.subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_clk *clk = device->clk;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_volt *volt = device->volt;
 	u32 utilization = 0;
 	int state, ret;
 
@@ -129,9 +136,9 @@
 	if (!clk || !volt)
 		goto resched;
 
-	ret = gk20a_pmu_dvfs_get_dev_status(priv, &status);
+	ret = gk20a_pmu_dvfs_get_dev_status(pmu, &status);
 	if (ret) {
-		nv_warn(priv, "failed to get device status\n");
+		nvkm_warn(subdev, "failed to get device status\n");
 		goto resched;
 	}
 
@@ -140,56 +147,52 @@
 
 	data->avg_load = (data->p_smooth * data->avg_load) + utilization;
 	data->avg_load /= data->p_smooth + 1;
-	nv_trace(priv, "utilization = %d %%, avg_load = %d %%\n",
-			utilization, data->avg_load);
+	nvkm_trace(subdev, "utilization = %d %%, avg_load = %d %%\n",
+		   utilization, data->avg_load);
 
-	ret = gk20a_pmu_dvfs_get_cur_state(priv, &state);
+	ret = gk20a_pmu_dvfs_get_cur_state(pmu, &state);
 	if (ret) {
-		nv_warn(priv, "failed to get current state\n");
+		nvkm_warn(subdev, "failed to get current state\n");
 		goto resched;
 	}
 
-	if (gk20a_pmu_dvfs_get_target_state(priv, &state, data->avg_load)) {
-		nv_trace(priv, "set new state to %d\n", state);
-		gk20a_pmu_dvfs_target(priv, &state);
+	if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
+		nvkm_trace(subdev, "set new state to %d\n", state);
+		gk20a_pmu_dvfs_target(pmu, &state);
 	}
 
 resched:
-	gk20a_pmu_dvfs_reset_dev_status(priv);
-	nvkm_timer_alarm(priv, 100000000, alarm);
+	gk20a_pmu_dvfs_reset_dev_status(pmu);
+	nvkm_timer_alarm(tmr, 100000000, alarm);
 }
 
 static int
-gk20a_pmu_fini(struct nvkm_object *object, bool suspend)
+gk20a_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	struct gk20a_pmu_priv *priv = (void *)pmu;
+	struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+	nvkm_timer_alarm_cancel(subdev->device->timer, &pmu->alarm);
+	return 0;
+}
 
-	nvkm_timer_alarm_cancel(priv, &priv->alarm);
-
-	return nvkm_subdev_fini(&pmu->base, suspend);
+static void *
+gk20a_pmu_dtor(struct nvkm_subdev *subdev)
+{
+	return gk20a_pmu(subdev);
 }
 
 static int
-gk20a_pmu_init(struct nvkm_object *object)
+gk20a_pmu_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	struct gk20a_pmu_priv *priv = (void *)pmu;
-	int ret;
-
-	ret = nvkm_subdev_init(&pmu->base);
-	if (ret)
-		return ret;
-
-	pmu->pgob = nvkm_pmu_pgob;
+	struct gk20a_pmu *pmu = gk20a_pmu(subdev);
+	struct nvkm_device *device = pmu->base.subdev.device;
 
 	/* init pwr perf counter */
-	nv_wr32(pmu, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
-	nv_wr32(pmu, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
-	nv_wr32(pmu, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
+	nvkm_wr32(device, 0x10a504 + (BUSY_SLOT * 0x10), 0x00200001);
+	nvkm_wr32(device, 0x10a50c + (BUSY_SLOT * 0x10), 0x00000002);
+	nvkm_wr32(device, 0x10a50c + (CLK_SLOT * 0x10), 0x00000003);
 
-	nvkm_timer_alarm(pmu, 2000000000, &priv->alarm);
-	return ret;
+	nvkm_timer_alarm(device->timer, 2000000000, &pmu->alarm);
+	return 0;
 }
 
 static struct gk20a_pmu_dvfs_data
@@ -199,32 +202,26 @@
 	.p_smooth = 1,
 };
 
-static int
-gk20a_pmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+static const struct nvkm_subdev_func
+gk20a_pmu = {
+	.init = gk20a_pmu_init,
+	.fini = gk20a_pmu_fini,
+	.dtor = gk20a_pmu_dtor,
+};
+
+int
+gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
 {
-	struct gk20a_pmu_priv *priv;
-	int ret;
+	static const struct nvkm_pmu_func func = {};
+	struct gk20a_pmu *pmu;
 
-	ret = nvkm_pmu_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
+		return -ENOMEM;
+	pmu->base.func = &func;
+	*ppmu = &pmu->base;
 
-	priv->data = &gk20a_dvfs_data;
-
-	nvkm_alarm_init(&priv->alarm, gk20a_pmu_dvfs_work);
+	nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
+	pmu->data = &gk20a_dvfs_data;
+	nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
 	return 0;
 }
-
-struct nvkm_oclass *
-gk20a_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xea),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = gk20a_pmu_init,
-		.fini = gk20a_pmu_fini,
-	},
-}.base;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
new file mode 100644
index 0000000..31b8692
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#define gk208_pmu_code gm107_pmu_code
+#define gk208_pmu_data gm107_pmu_data
+#include "fuc/gk208.fuc5.h"
+
+static const struct nvkm_pmu_func
+gm107_pmu = {
+	.code.data = gm107_pmu_code,
+	.code.size = sizeof(gm107_pmu_code),
+	.data.data = gm107_pmu_data,
+	.data.size = sizeof(gm107_pmu_data),
+};
+
+int
+gm107_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gm107_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 30aaeb2..8ba7fa4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -24,26 +24,25 @@
 #include "priv.h"
 #include "fuc/gt215.fuc3.h"
 
-static int
-gt215_pmu_init(struct nvkm_object *object)
+static void
+gt215_pmu_reset(struct nvkm_pmu *pmu)
 {
-	struct nvkm_pmu *pmu = (void *)object;
-	nv_mask(pmu, 0x022210, 0x00000001, 0x00000000);
-	nv_mask(pmu, 0x022210, 0x00000001, 0x00000001);
-	return nvkm_pmu_init(pmu);
+	struct nvkm_device *device = pmu->subdev.device;
+	nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
 }
 
-struct nvkm_oclass *
-gt215_pmu_oclass = &(struct nvkm_pmu_impl) {
-	.base.handle = NV_SUBDEV(PMU, 0xa3),
-	.base.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = _nvkm_pmu_ctor,
-		.dtor = _nvkm_pmu_dtor,
-		.init = gt215_pmu_init,
-		.fini = _nvkm_pmu_fini,
-	},
+static const struct nvkm_pmu_func
+gt215_pmu = {
+	.reset = gt215_pmu_reset,
 	.code.data = gt215_pmu_code,
 	.code.size = sizeof(gt215_pmu_code),
 	.data.data = gt215_pmu_data,
 	.data.size = sizeof(gt215_pmu_data),
-}.base;
+};
+
+int
+gt215_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
+{
+	return nvkm_pmu_new_(&gt215_pmu, device, index, ppmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index b75c5b8..e6f7416 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -2,8 +2,6 @@
 #define __NVKM_PMU_MEMX_H__
 #include "priv.h"
 
-#include <core/device.h>
-
 struct nvkm_memx {
 	struct nvkm_pmu *pmu;
 	u32 base;
@@ -18,13 +16,13 @@
 static void
 memx_out(struct nvkm_memx *memx)
 {
-	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_device *device = memx->pmu->subdev.device;
 	int i;
 
 	if (memx->c.mthd) {
-		nv_wr32(pmu, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
+		nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd);
 		for (i = 0; i < memx->c.size; i++)
-			nv_wr32(pmu, 0x10a1c4, memx->c.data[i]);
+			nvkm_wr32(device, 0x10a1c4, memx->c.data[i]);
 		memx->c.mthd = 0;
 		memx->c.size = 0;
 	}
@@ -44,12 +42,13 @@
 int
 nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	struct nvkm_memx *memx;
 	u32 reply[2];
 	int ret;
 
-	ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
-			   MEMX_INFO_DATA, 0);
+	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+			    MEMX_INFO_DATA, 0);
 	if (ret)
 		return ret;
 
@@ -62,9 +61,9 @@
 
 	/* acquire data segment access */
 	do {
-		nv_wr32(pmu, 0x10a580, 0x00000003);
-	} while (nv_rd32(pmu, 0x10a580) != 0x00000003);
-	nv_wr32(pmu, 0x10a1c0, 0x01000000 | memx->base);
+		nvkm_wr32(device, 0x10a580, 0x00000003);
+	} while (nvkm_rd32(device, 0x10a580) != 0x00000003);
+	nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base);
 	return 0;
 }
 
@@ -73,23 +72,25 @@
 {
 	struct nvkm_memx *memx = *pmemx;
 	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_subdev *subdev = &pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 finish, reply[2];
 
 	/* flush the cache... */
 	memx_out(memx);
 
 	/* release data segment access */
-	finish = nv_rd32(pmu, 0x10a1c0) & 0x00ffffff;
-	nv_wr32(pmu, 0x10a580, 0x00000000);
+	finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff;
+	nvkm_wr32(device, 0x10a580, 0x00000000);
 
 	/* call MEMX process to execute the script, and wait for reply */
 	if (exec) {
-		pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
-			     memx->base, finish);
+		nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+			      memx->base, finish);
 	}
 
-	nv_debug(memx->pmu, "Exec took %uns, PMU_IN %08x\n",
-		 reply[0], reply[1]);
+	nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+		   reply[0], reply[1]);
 	kfree(memx);
 	return 0;
 }
@@ -97,7 +98,7 @@
 void
 nvkm_memx_wr32(struct nvkm_memx *memx, u32 addr, u32 data)
 {
-	nv_debug(memx->pmu, "R[%06x] = 0x%08x\n", addr, data);
+	nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
 	memx_cmd(memx, MEMX_WR32, 2, (u32[]){ addr, data });
 }
 
@@ -105,8 +106,8 @@
 nvkm_memx_wait(struct nvkm_memx *memx,
 		  u32 addr, u32 mask, u32 data, u32 nsec)
 {
-	nv_debug(memx->pmu, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
-				addr, mask, data, nsec);
+	nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
+		   addr, mask, data, nsec);
 	memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -114,7 +115,7 @@
 void
 nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec)
 {
-	nv_debug(memx->pmu, "    DELAY = %d ns\n", nsec);
+	nvkm_debug(&memx->pmu->subdev, "    DELAY = %d ns\n", nsec);
 	memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -122,16 +123,17 @@
 void
 nvkm_memx_wait_vblank(struct nvkm_memx *memx)
 {
-	struct nvkm_pmu *pmu = memx->pmu;
+	struct nvkm_subdev *subdev = &memx->pmu->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 heads, x, y, px = 0;
 	int i, head_sync;
 
-	if (nv_device(pmu)->chipset < 0xd0) {
-		heads = nv_rd32(pmu, 0x610050);
+	if (device->chipset < 0xd0) {
+		heads = nvkm_rd32(device, 0x610050);
 		for (i = 0; i < 2; i++) {
 			/* Heuristic: sync to head with biggest resolution */
 			if (heads & (2 << (i << 3))) {
-				x = nv_rd32(pmu, 0x610b40 + (0x540 * i));
+				x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
 				y = (x & 0xffff0000) >> 16;
 				x &= 0x0000ffff;
 				if ((x * y) > px) {
@@ -143,11 +145,11 @@
 	}
 
 	if (px == 0) {
-		nv_debug(memx->pmu, "WAIT VBLANK !NO ACTIVE HEAD\n");
+		nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
 		return;
 	}
 
-	nv_debug(memx->pmu, "WAIT VBLANK HEAD%d\n", head_sync);
+	nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
 	memx_cmd(memx, MEMX_VBLANK, 1, (u32[]){ head_sync });
 	memx_out(memx); /* fuc can't handle multiple */
 }
@@ -155,18 +157,19 @@
 void
 nvkm_memx_train(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   MEM TRAIN\n");
+	nvkm_debug(&memx->pmu->subdev, "   MEM TRAIN\n");
 	memx_cmd(memx, MEMX_TRAIN, 0, NULL);
 }
 
 int
 nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
 {
+	struct nvkm_device *device = pmu->subdev.device;
 	u32 reply[2], base, size, i;
 	int ret;
 
-	ret = pmu->message(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
-			   MEMX_INFO_TRAIN, 0);
+	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
+			    MEMX_INFO_TRAIN, 0);
 	if (ret)
 		return ret;
 
@@ -176,10 +179,10 @@
 		return -ENOMEM;
 
 	/* read the packet */
-	nv_wr32(pmu, 0x10a1c0, 0x02000000 | base);
+	nvkm_wr32(device, 0x10a1c0, 0x02000000 | base);
 
 	for (i = 0; i < size; i++)
-		res[i] = nv_rd32(pmu, 0x10a1c4);
+		res[i] = nvkm_rd32(device, 0x10a1c4);
 
 	return 0;
 }
@@ -187,14 +190,14 @@
 void
 nvkm_memx_block(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   HOST BLOCKED\n");
+	nvkm_debug(&memx->pmu->subdev, "   HOST BLOCKED\n");
 	memx_cmd(memx, MEMX_ENTER, 0, NULL);
 }
 
 void
 nvkm_memx_unblock(struct nvkm_memx *memx)
 {
-	nv_debug(memx->pmu, "   HOST UNBLOCKED\n");
+	nvkm_debug(&memx->pmu->subdev, "   HOST UNBLOCKED\n");
 	memx_cmd(memx, MEMX_LEAVE, 0, NULL);
 }
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 799e7c8..f38c88f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,38 +1,20 @@
 #ifndef __NVKM_PMU_PRIV_H__
 #define __NVKM_PMU_PRIV_H__
+#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
 #include <subdev/pmu.h>
 #include <subdev/pmu/fuc/os.h>
 
-#define nvkm_pmu_create(p, e, o, d)                                         \
-	nvkm_pmu_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nvkm_pmu_destroy(p)                                                 \
-	nvkm_subdev_destroy(&(p)->base)
-#define nvkm_pmu_init(p) ({                                                 \
-	struct nvkm_pmu *_pmu = (p);                                       \
-	_nvkm_pmu_init(nv_object(_pmu));                                   \
-})
-#define nvkm_pmu_fini(p,s) ({                                               \
-	struct nvkm_pmu *_pmu = (p);                                       \
-	_nvkm_pmu_fini(nv_object(_pmu), (s));                              \
-})
+int nvkm_pmu_new_(const struct nvkm_pmu_func *, struct nvkm_device *,
+		  int index, struct nvkm_pmu **);
 
-int nvkm_pmu_create_(struct nvkm_object *, struct nvkm_object *,
-			struct nvkm_oclass *, int, void **);
+struct nvkm_pmu_func {
+	void (*reset)(struct nvkm_pmu *);
 
-int _nvkm_pmu_ctor(struct nvkm_object *, struct nvkm_object *,
-		      struct nvkm_oclass *, void *, u32,
-		      struct nvkm_object **);
-#define _nvkm_pmu_dtor _nvkm_subdev_dtor
-int _nvkm_pmu_init(struct nvkm_object *);
-int _nvkm_pmu_fini(struct nvkm_object *, bool);
-void nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable);
-
-struct nvkm_pmu_impl {
-	struct nvkm_oclass base;
 	struct {
 		u32 *data;
 		u32  size;
 	} code;
+
 	struct {
 		u32 *data;
 		u32  size;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 5837cf1..135758b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,5 +9,5 @@
 nvkm-y += nvkm/subdev/therm/nv50.o
 nvkm-y += nvkm/subdev/therm/g84.o
 nvkm-y += nvkm/subdev/therm/gt215.o
-nvkm-y += nvkm/subdev/therm/gf110.o
+nvkm-y += nvkm/subdev/therm/gf119.o
 nvkm-y += nvkm/subdev/therm/gm107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index ec327cb..949dc61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -23,21 +23,26 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
+int
+nvkm_therm_temp_get(struct nvkm_therm *therm)
+{
+	if (therm->func->temp_get)
+		return therm->func->temp_get(therm);
+	return -ENODEV;
+}
 
 static int
 nvkm_therm_update_trip(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_trip_point *trip = priv->fan->bios.trip,
+	struct nvbios_therm_trip_point *trip = therm->fan->bios.trip,
 				       *cur_trip = NULL,
-				       *last_trip = priv->last_trip;
-	u8  temp = therm->temp_get(therm);
+				       *last_trip = therm->last_trip;
+	u8  temp = therm->func->temp_get(therm);
 	u16 duty, i;
 
 	/* look for the trip point corresponding to the current temperature */
 	cur_trip = NULL;
-	for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+	for (i = 0; i < therm->fan->bios.nr_fan_trip; i++) {
 		if (temp >= trip[i].temp)
 			cur_trip = &trip[i];
 	}
@@ -49,10 +54,10 @@
 
 	if (cur_trip) {
 		duty = cur_trip->fan_duty;
-		priv->last_trip = cur_trip;
+		therm->last_trip = cur_trip;
 	} else {
 		duty = 0;
-		priv->last_trip = NULL;
+		therm->last_trip = NULL;
 	}
 
 	return duty;
@@ -61,51 +66,50 @@
 static int
 nvkm_therm_update_linear(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	u8  linear_min_temp = priv->fan->bios.linear_min_temp;
-	u8  linear_max_temp = priv->fan->bios.linear_max_temp;
-	u8  temp = therm->temp_get(therm);
+	u8  linear_min_temp = therm->fan->bios.linear_min_temp;
+	u8  linear_max_temp = therm->fan->bios.linear_max_temp;
+	u8  temp = therm->func->temp_get(therm);
 	u16 duty;
 
 	/* handle the non-linear part first */
 	if (temp < linear_min_temp)
-		return priv->fan->bios.min_duty;
+		return therm->fan->bios.min_duty;
 	else if (temp > linear_max_temp)
-		return priv->fan->bios.max_duty;
+		return therm->fan->bios.max_duty;
 
 	/* we are in the linear zone */
 	duty  = (temp - linear_min_temp);
-	duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+	duty *= (therm->fan->bios.max_duty - therm->fan->bios.min_duty);
 	duty /= (linear_max_temp - linear_min_temp);
-	duty += priv->fan->bios.min_duty;
+	duty += therm->fan->bios.min_duty;
 	return duty;
 }
 
 static void
 nvkm_therm_update(struct nvkm_therm *therm, int mode)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_timer *tmr = subdev->device->timer;
 	unsigned long flags;
 	bool immd = true;
 	bool poll = true;
 	int duty = -1;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&therm->lock, flags);
 	if (mode < 0)
-		mode = priv->mode;
-	priv->mode = mode;
+		mode = therm->mode;
+	therm->mode = mode;
 
 	switch (mode) {
 	case NVKM_THERM_CTRL_MANUAL:
-		ptimer->alarm_cancel(ptimer, &priv->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->alarm);
 		duty = nvkm_therm_fan_get(therm);
 		if (duty < 0)
 			duty = 100;
 		poll = false;
 		break;
 	case NVKM_THERM_CTRL_AUTO:
-		switch(priv->fan->bios.fan_mode) {
+		switch(therm->fan->bios.fan_mode) {
 		case NVBIOS_THERM_FAN_TRIP:
 			duty = nvkm_therm_update_trip(therm);
 			break;
@@ -113,8 +117,8 @@
 			duty = nvkm_therm_update_linear(therm);
 			break;
 		case NVBIOS_THERM_FAN_OTHER:
-			if (priv->cstate)
-				duty = priv->cstate;
+			if (therm->cstate)
+				duty = therm->cstate;
 			poll = false;
 			break;
 		}
@@ -122,29 +126,29 @@
 		break;
 	case NVKM_THERM_CTRL_NONE:
 	default:
-		ptimer->alarm_cancel(ptimer, &priv->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->alarm);
 		poll = false;
 	}
 
-	if (list_empty(&priv->alarm.head) && poll)
-		ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
-	spin_unlock_irqrestore(&priv->lock, flags);
+	if (list_empty(&therm->alarm.head) && poll)
+		nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
+	spin_unlock_irqrestore(&therm->lock, flags);
 
 	if (duty >= 0) {
-		nv_debug(therm, "FAN target request: %d%%\n", duty);
+		nvkm_debug(subdev, "FAN target request: %d%%\n", duty);
 		nvkm_therm_fan_set(therm, immd, duty);
 	}
 }
 
 int
-nvkm_therm_cstate(struct nvkm_therm *ptherm, int fan, int dir)
+nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir)
 {
-	struct nvkm_therm_priv *priv = (void *)ptherm;
-	if (!dir || (dir < 0 && fan < priv->cstate) ||
-		    (dir > 0 && fan > priv->cstate)) {
-		nv_debug(ptherm, "default fan speed -> %d%%\n", fan);
-		priv->cstate = fan;
-		nvkm_therm_update(ptherm, -1);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	if (!dir || (dir < 0 && fan < therm->cstate) ||
+		    (dir > 0 && fan > therm->cstate)) {
+		nvkm_debug(subdev, "default fan speed -> %d%%\n", fan);
+		therm->cstate = fan;
+		nvkm_therm_update(therm, -1);
 	}
 	return 0;
 }
@@ -152,16 +156,16 @@
 static void
 nvkm_therm_alarm(struct nvkm_alarm *alarm)
 {
-	struct nvkm_therm_priv *priv =
-	       container_of(alarm, struct nvkm_therm_priv, alarm);
-	nvkm_therm_update(&priv->base, -1);
+	struct nvkm_therm *therm =
+	       container_of(alarm, struct nvkm_therm, alarm);
+	nvkm_therm_update(therm, -1);
 }
 
 int
 nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_device *device = nv_device(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	static const char *name[] = {
 		"disabled",
 		"manual",
@@ -171,51 +175,49 @@
 	/* The default PPWR ucode on fermi interferes with fan management */
 	if ((mode >= ARRAY_SIZE(name)) ||
 	    (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 &&
-	     !nvkm_subdev(device, NVDEV_SUBDEV_PMU)))
+	     !device->pmu))
 		return -EINVAL;
 
 	/* do not allow automatic fan management if the thermal sensor is
 	 * not available */
-	if (mode == NVKM_THERM_CTRL_AUTO && therm->temp_get(therm) < 0)
+	if (mode == NVKM_THERM_CTRL_AUTO &&
+	    therm->func->temp_get(therm) < 0)
 		return -EINVAL;
 
-	if (priv->mode == mode)
+	if (therm->mode == mode)
 		return 0;
 
-	nv_info(therm, "fan management: %s\n", name[mode]);
+	nvkm_debug(subdev, "fan management: %s\n", name[mode]);
 	nvkm_therm_update(therm, mode);
 	return 0;
 }
 
 int
-nvkm_therm_attr_get(struct nvkm_therm *therm,
-		       enum nvkm_therm_attr_type type)
+nvkm_therm_attr_get(struct nvkm_therm *therm, enum nvkm_therm_attr_type type)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
 	switch (type) {
 	case NVKM_THERM_ATTR_FAN_MIN_DUTY:
-		return priv->fan->bios.min_duty;
+		return therm->fan->bios.min_duty;
 	case NVKM_THERM_ATTR_FAN_MAX_DUTY:
-		return priv->fan->bios.max_duty;
+		return therm->fan->bios.max_duty;
 	case NVKM_THERM_ATTR_FAN_MODE:
-		return priv->mode;
+		return therm->mode;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST:
-		return priv->bios_sensor.thrs_fan_boost.temp;
+		return therm->bios_sensor.thrs_fan_boost.temp;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
-		return priv->bios_sensor.thrs_fan_boost.hysteresis;
+		return therm->bios_sensor.thrs_fan_boost.hysteresis;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK:
-		return priv->bios_sensor.thrs_down_clock.temp;
+		return therm->bios_sensor.thrs_down_clock.temp;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
-		return priv->bios_sensor.thrs_down_clock.hysteresis;
+		return therm->bios_sensor.thrs_down_clock.hysteresis;
 	case NVKM_THERM_ATTR_THRS_CRITICAL:
-		return priv->bios_sensor.thrs_critical.temp;
+		return therm->bios_sensor.thrs_critical.temp;
 	case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
-		return priv->bios_sensor.thrs_critical.hysteresis;
+		return therm->bios_sensor.thrs_critical.hysteresis;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN:
-		return priv->bios_sensor.thrs_shutdown.temp;
+		return therm->bios_sensor.thrs_shutdown.temp;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
-		return priv->bios_sensor.thrs_shutdown.hysteresis;
+		return therm->bios_sensor.thrs_shutdown.hysteresis;
 	}
 
 	return -EINVAL;
@@ -225,143 +227,156 @@
 nvkm_therm_attr_set(struct nvkm_therm *therm,
 		    enum nvkm_therm_attr_type type, int value)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
 	switch (type) {
 	case NVKM_THERM_ATTR_FAN_MIN_DUTY:
 		if (value < 0)
 			value = 0;
-		if (value > priv->fan->bios.max_duty)
-			value = priv->fan->bios.max_duty;
-		priv->fan->bios.min_duty = value;
+		if (value > therm->fan->bios.max_duty)
+			value = therm->fan->bios.max_duty;
+		therm->fan->bios.min_duty = value;
 		return 0;
 	case NVKM_THERM_ATTR_FAN_MAX_DUTY:
 		if (value < 0)
 			value = 0;
-		if (value < priv->fan->bios.min_duty)
-			value = priv->fan->bios.min_duty;
-		priv->fan->bios.max_duty = value;
+		if (value < therm->fan->bios.min_duty)
+			value = therm->fan->bios.min_duty;
+		therm->fan->bios.max_duty = value;
 		return 0;
 	case NVKM_THERM_ATTR_FAN_MODE:
 		return nvkm_therm_fan_mode(therm, value);
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST:
-		priv->bios_sensor.thrs_fan_boost.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_fan_boost.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST:
-		priv->bios_sensor.thrs_fan_boost.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_fan_boost.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK:
-		priv->bios_sensor.thrs_down_clock.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_down_clock.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST:
-		priv->bios_sensor.thrs_down_clock.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_down_clock.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_CRITICAL:
-		priv->bios_sensor.thrs_critical.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_critical.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_CRITICAL_HYST:
-		priv->bios_sensor.thrs_critical.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_critical.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN:
-		priv->bios_sensor.thrs_shutdown.temp = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_shutdown.temp = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	case NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST:
-		priv->bios_sensor.thrs_shutdown.hysteresis = value;
-		priv->sensor.program_alarms(therm);
+		therm->bios_sensor.thrs_shutdown.hysteresis = value;
+		therm->func->program_alarms(therm);
 		return 0;
 	}
 
 	return -EINVAL;
 }
 
-int
-_nvkm_therm_init(struct nvkm_object *object)
+static void
+nvkm_therm_intr(struct nvkm_subdev *subdev)
 {
-	struct nvkm_therm *therm = (void *)object;
-	struct nvkm_therm_priv *priv = (void *)therm;
-	int ret;
-
-	ret = nvkm_subdev_init(&therm->base);
-	if (ret)
-		return ret;
-
-	if (priv->suspend >= 0) {
-		/* restore the pwm value only when on manual or auto mode */
-		if (priv->suspend > 0)
-			nvkm_therm_fan_set(therm, true, priv->fan->percent);
-
-		nvkm_therm_fan_mode(therm, priv->suspend);
-	}
-	nvkm_therm_sensor_init(therm);
-	nvkm_therm_fan_init(therm);
-	return 0;
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+	if (therm->func->intr)
+		therm->func->intr(therm);
 }
 
-int
-_nvkm_therm_fini(struct nvkm_object *object, bool suspend)
+static int
+nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-	struct nvkm_therm *therm = (void *)object;
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+
+	if (therm->func->fini)
+		therm->func->fini(therm);
 
 	nvkm_therm_fan_fini(therm, suspend);
 	nvkm_therm_sensor_fini(therm, suspend);
+
 	if (suspend) {
-		priv->suspend = priv->mode;
-		priv->mode = NVKM_THERM_CTRL_NONE;
+		therm->suspend = therm->mode;
+		therm->mode = NVKM_THERM_CTRL_NONE;
 	}
 
-	return nvkm_subdev_fini(&therm->base, suspend);
-}
-
-int
-nvkm_therm_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		   struct nvkm_oclass *oclass, int length, void **pobject)
-{
-	struct nvkm_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "PTHERM",
-				  "therm", length, pobject);
-	priv = *pobject;
-	if (ret)
-		return ret;
-
-	nvkm_alarm_init(&priv->alarm, nvkm_therm_alarm);
-	spin_lock_init(&priv->lock);
-	spin_lock_init(&priv->sensor.alarm_program_lock);
-
-	priv->base.fan_get = nvkm_therm_fan_user_get;
-	priv->base.fan_set = nvkm_therm_fan_user_set;
-	priv->base.fan_sense = nvkm_therm_fan_sense;
-	priv->base.attr_get = nvkm_therm_attr_get;
-	priv->base.attr_set = nvkm_therm_attr_set;
-	priv->mode = priv->suspend = -1; /* undefined */
 	return 0;
 }
 
-int
-nvkm_therm_preinit(struct nvkm_therm *therm)
+static int
+nvkm_therm_oneinit(struct nvkm_subdev *subdev)
 {
+	struct nvkm_therm *therm = nvkm_therm(subdev);
 	nvkm_therm_sensor_ctor(therm);
 	nvkm_therm_ic_ctor(therm);
 	nvkm_therm_fan_ctor(therm);
-
 	nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
 	nvkm_therm_sensor_preinit(therm);
 	return 0;
 }
 
-void
-_nvkm_therm_dtor(struct nvkm_object *object)
+static int
+nvkm_therm_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_therm_priv *priv = (void *)object;
-	kfree(priv->fan);
-	nvkm_subdev_destroy(&priv->base.base);
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+
+	therm->func->init(therm);
+
+	if (therm->suspend >= 0) {
+		/* restore the pwm value only when on manual or auto mode */
+		if (therm->suspend > 0)
+			nvkm_therm_fan_set(therm, true, therm->fan->percent);
+
+		nvkm_therm_fan_mode(therm, therm->suspend);
+	}
+
+	nvkm_therm_sensor_init(therm);
+	nvkm_therm_fan_init(therm);
+	return 0;
+}
+
+static void *
+nvkm_therm_dtor(struct nvkm_subdev *subdev)
+{
+	struct nvkm_therm *therm = nvkm_therm(subdev);
+	kfree(therm->fan);
+	return therm;
+}
+
+static const struct nvkm_subdev_func
+nvkm_therm = {
+	.dtor = nvkm_therm_dtor,
+	.oneinit = nvkm_therm_oneinit,
+	.init = nvkm_therm_init,
+	.fini = nvkm_therm_fini,
+	.intr = nvkm_therm_intr,
+};
+
+int
+nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+		int index, struct nvkm_therm **ptherm)
+{
+	struct nvkm_therm *therm;
+
+	if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
+	therm->func = func;
+
+	nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
+	spin_lock_init(&therm->lock);
+	spin_lock_init(&therm->sensor.alarm_program_lock);
+
+	therm->fan_get = nvkm_therm_fan_user_get;
+	therm->fan_set = nvkm_therm_fan_user_set;
+	therm->attr_get = nvkm_therm_attr_get;
+	therm->attr_set = nvkm_therm_attr_set;
+	therm->mode = therm->suspend = -1; /* undefined */
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 434fa74..91198d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -32,8 +32,8 @@
 nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
 {
 	struct nvkm_therm *therm = fan->parent;
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(priv);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_timer *tmr = subdev->device->timer;
 	unsigned long flags;
 	int ret = 0;
 	int duty;
@@ -45,7 +45,7 @@
 	target = max_t(u8, target, fan->bios.min_duty);
 	target = min_t(u8, target, fan->bios.max_duty);
 	if (fan->percent != target) {
-		nv_debug(therm, "FAN target: %d\n", target);
+		nvkm_debug(subdev, "FAN target: %d\n", target);
 		fan->percent = target;
 	}
 
@@ -70,7 +70,7 @@
 		duty = target;
 	}
 
-	nv_debug(therm, "FAN update: %d\n", duty);
+	nvkm_debug(subdev, "FAN update: %d\n", duty);
 	ret = fan->set(therm, duty);
 	if (ret) {
 		spin_unlock_irqrestore(&fan->lock, flags);
@@ -95,7 +95,7 @@
 		else
 			delay = bump_period;
 
-		ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
+		nvkm_timer_alarm(tmr, delay * 1000 * 1000, &fan->alarm);
 	}
 
 	return ret;
@@ -111,48 +111,51 @@
 int
 nvkm_therm_fan_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return priv->fan->get(therm);
+	return therm->fan->get(therm);
 }
 
 int
 nvkm_therm_fan_set(struct nvkm_therm *therm, bool immediate, int percent)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return nvkm_fan_update(priv->fan, immediate, percent);
+	return nvkm_fan_update(therm->fan, immediate, percent);
 }
 
 int
 nvkm_therm_fan_sense(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_gpio *gpio = device->gpio;
 	u32 cycles, cur, prev;
 	u64 start, end, tach;
 
-	if (priv->fan->tach.func == DCB_GPIO_UNUSED)
+	if (therm->func->fan_sense)
+		return therm->func->fan_sense(therm);
+
+	if (therm->fan->tach.func == DCB_GPIO_UNUSED)
 		return -ENODEV;
 
 	/* Time a complete rotation and extrapolate to RPM:
 	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
 	 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
 	 */
-	start = ptimer->read(ptimer);
-	prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+	start = nvkm_timer_read(tmr);
+	prev = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+				      therm->fan->tach.line);
 	cycles = 0;
 	do {
 		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
 
-		cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+		cur = nvkm_gpio_get(gpio, 0, therm->fan->tach.func,
+					     therm->fan->tach.line);
 		if (prev != cur) {
 			if (!start)
-				start = ptimer->read(ptimer);
+				start = nvkm_timer_read(tmr);
 			cycles++;
 			prev = cur;
 		}
-	} while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
-	end = ptimer->read(ptimer);
+	} while (cycles < 5 && nvkm_timer_read(tmr) - start < 250000000);
+	end = nvkm_timer_read(tmr);
 
 	if (cycles == 5) {
 		tach = (u64)60000000000ULL;
@@ -171,9 +174,7 @@
 int
 nvkm_therm_fan_user_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
-	if (priv->mode != NVKM_THERM_CTRL_MANUAL)
+	if (therm->mode != NVKM_THERM_CTRL_MANUAL)
 		return -EINVAL;
 
 	return nvkm_therm_fan_set(therm, true, percent);
@@ -182,29 +183,25 @@
 static void
 nvkm_therm_fan_set_defaults(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-
-	priv->fan->bios.pwm_freq = 0;
-	priv->fan->bios.min_duty = 0;
-	priv->fan->bios.max_duty = 100;
-	priv->fan->bios.bump_period = 500;
-	priv->fan->bios.slow_down_period = 2000;
-	priv->fan->bios.linear_min_temp = 40;
-	priv->fan->bios.linear_max_temp = 85;
+	therm->fan->bios.pwm_freq = 0;
+	therm->fan->bios.min_duty = 0;
+	therm->fan->bios.max_duty = 100;
+	therm->fan->bios.bump_period = 500;
+	therm->fan->bios.slow_down_period = 2000;
+	therm->fan->bios.linear_min_temp = 40;
+	therm->fan->bios.linear_max_temp = 85;
 }
 
 static void
 nvkm_therm_fan_safety_checks(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	if (therm->fan->bios.min_duty > 100)
+		therm->fan->bios.min_duty = 100;
+	if (therm->fan->bios.max_duty > 100)
+		therm->fan->bios.max_duty = 100;
 
-	if (priv->fan->bios.min_duty > 100)
-		priv->fan->bios.min_duty = 100;
-	if (priv->fan->bios.max_duty > 100)
-		priv->fan->bios.max_duty = 100;
-
-	if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
-		priv->fan->bios.min_duty = priv->fan->bios.max_duty;
+	if (therm->fan->bios.min_duty > therm->fan->bios.max_duty)
+		therm->fan->bios.min_duty = therm->fan->bios.max_duty;
 }
 
 int
@@ -216,29 +213,28 @@
 int
 nvkm_therm_fan_fini(struct nvkm_therm *therm, bool suspend)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	if (suspend)
-		ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->fan->alarm);
 	return 0;
 }
 
 int
 nvkm_therm_fan_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
-	struct nvkm_bios *bios = nvkm_bios(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvkm_gpio *gpio = device->gpio;
+	struct nvkm_bios *bios = device->bios;
 	struct dcb_gpio_func func;
 	int ret;
 
 	/* attempt to locate a drivable fan, and determine control method */
-	ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+	ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
 	if (ret == 0) {
 		/* FIXME: is this really the place to perform such checks ? */
 		if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) {
-			nv_debug(therm, "GPIO_FAN is in input mode\n");
+			nvkm_debug(subdev, "GPIO_FAN is in input mode\n");
 			ret = -EINVAL;
 		} else {
 			ret = nvkm_fanpwm_create(therm, &func);
@@ -254,28 +250,29 @@
 			return ret;
 	}
 
-	nv_info(therm, "FAN control: %s\n", priv->fan->type);
+	nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type);
 
 	/* read the current speed, it is useful when resuming */
-	priv->fan->percent = nvkm_therm_fan_get(therm);
+	therm->fan->percent = nvkm_therm_fan_get(therm);
 
 	/* attempt to detect a tachometer connection */
-	ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+	ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff,
+			     &therm->fan->tach);
 	if (ret)
-		priv->fan->tach.func = DCB_GPIO_UNUSED;
+		therm->fan->tach.func = DCB_GPIO_UNUSED;
 
 	/* initialise fan bump/slow update handling */
-	priv->fan->parent = therm;
-	nvkm_alarm_init(&priv->fan->alarm, nvkm_fan_alarm);
-	spin_lock_init(&priv->fan->lock);
+	therm->fan->parent = therm;
+	nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm);
+	spin_lock_init(&therm->fan->lock);
 
 	/* other random init... */
 	nvkm_therm_fan_set_defaults(therm);
-	nvbios_perf_fan_parse(bios, &priv->fan->perf);
-	if (!nvbios_fan_parse(bios, &priv->fan->bios)) {
-		nv_debug(therm, "parsing the fan table failed\n");
-		if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
-			nv_error(therm, "parsing both fan tables failed\n");
+	nvbios_perf_fan_parse(bios, &therm->fan->perf);
+	if (!nvbios_fan_parse(bios, &therm->fan->bios)) {
+		nvkm_debug(subdev, "parsing the fan table failed\n");
+		if (nvbios_therm_fan_parse(bios, &therm->fan->bios))
+			nvkm_error(subdev, "parsing both fan tables failed\n");
 	}
 	nvkm_therm_fan_safety_checks(therm);
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
index 534e597..8ae300f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fannil.c
@@ -38,11 +38,10 @@
 int
 nvkm_fannil_create(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
 	struct nvkm_fan *priv;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = priv;
+	therm->fan = priv;
 	if (!priv)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
index bde5cea..340f37a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fanpwm.c
@@ -24,13 +24,12 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/fan.h>
 #include <subdev/gpio.h>
 
-struct nvkm_fanpwm_priv {
+struct nvkm_fanpwm {
 	struct nvkm_fan base;
 	struct dcb_gpio_func func;
 };
@@ -38,76 +37,74 @@
 static int
 nvkm_fanpwm_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
-	struct nvkm_gpio *gpio = nvkm_gpio(therm);
-	int card_type = nv_device(therm)->card_type;
+	struct nvkm_fanpwm *fan = (void *)therm->fan;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_gpio *gpio = device->gpio;
+	int card_type = device->card_type;
 	u32 divs, duty;
 	int ret;
 
-	ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+	ret = therm->func->pwm_get(therm, fan->func.line, &divs, &duty);
 	if (ret == 0 && divs) {
 		divs = max(divs, duty);
-		if (card_type <= NV_40 || (priv->func.log[0] & 1))
+		if (card_type <= NV_40 || (fan->func.log[0] & 1))
 			duty = divs - duty;
 		return (duty * 100) / divs;
 	}
 
-	return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+	return nvkm_gpio_get(gpio, 0, fan->func.func, fan->func.line) * 100;
 }
 
 static int
 nvkm_fanpwm_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fanpwm_priv *priv = (void *)tpriv->fan;
-	int card_type = nv_device(therm)->card_type;
+	struct nvkm_fanpwm *fan = (void *)therm->fan;
+	int card_type = therm->subdev.device->card_type;
 	u32 divs, duty;
 	int ret;
 
-	divs = priv->base.perf.pwm_divisor;
-	if (priv->base.bios.pwm_freq) {
+	divs = fan->base.perf.pwm_divisor;
+	if (fan->base.bios.pwm_freq) {
 		divs = 1;
-		if (therm->pwm_clock)
-			divs = therm->pwm_clock(therm, priv->func.line);
-		divs /= priv->base.bios.pwm_freq;
+		if (therm->func->pwm_clock)
+			divs = therm->func->pwm_clock(therm, fan->func.line);
+		divs /= fan->base.bios.pwm_freq;
 	}
 
 	duty = ((divs * percent) + 99) / 100;
-	if (card_type <= NV_40 || (priv->func.log[0] & 1))
+	if (card_type <= NV_40 || (fan->func.log[0] & 1))
 		duty = divs - duty;
 
-	ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+	ret = therm->func->pwm_set(therm, fan->func.line, divs, duty);
 	if (ret == 0)
-		ret = therm->pwm_ctrl(therm, priv->func.line, true);
+		ret = therm->func->pwm_ctrl(therm, fan->func.line, true);
 	return ret;
 }
 
 int
 nvkm_fanpwm_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
 {
-	struct nvkm_device *device = nv_device(therm);
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
-	struct nvkm_fanpwm_priv *priv;
-	struct nvbios_therm_fan fan;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_fanpwm *fan;
+	struct nvbios_therm_fan info = {};
 	u32 divs, duty;
 
-	nvbios_fan_parse(bios, &fan);
+	nvbios_fan_parse(bios, &info);
 
 	if (!nvkm_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
-	    !therm->pwm_ctrl || fan.type == NVBIOS_THERM_FAN_TOGGLE ||
-	     therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+	    !therm->func->pwm_ctrl || info.type == NVBIOS_THERM_FAN_TOGGLE ||
+	     therm->func->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
 		return -ENODEV;
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = &priv->base;
-	if (!priv)
+	fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+	therm->fan = &fan->base;
+	if (!fan)
 		return -ENOMEM;
 
-	priv->base.type = "PWM";
-	priv->base.get = nvkm_fanpwm_get;
-	priv->base.set = nvkm_fanpwm_set;
-	priv->func = *func;
+	fan->base.type = "PWM";
+	fan->base.get = nvkm_fanpwm_get;
+	fan->base.set = nvkm_fanpwm_set;
+	fan->func = *func;
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 4ce041e..59701b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -26,7 +26,7 @@
 #include <subdev/gpio.h>
 #include <subdev/timer.h>
 
-struct nvkm_fantog_priv {
+struct nvkm_fantog {
 	struct nvkm_fan base;
 	struct nvkm_alarm alarm;
 	spinlock_t lock;
@@ -36,83 +36,81 @@
 };
 
 static void
-nvkm_fantog_update(struct nvkm_fantog_priv *priv, int percent)
+nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)priv->base.parent;
-	struct nvkm_timer *ptimer = nvkm_timer(tpriv);
-	struct nvkm_gpio *gpio = nvkm_gpio(tpriv);
+	struct nvkm_therm *therm = fan->base.parent;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_timer *tmr = device->timer;
+	struct nvkm_gpio *gpio = device->gpio;
 	unsigned long flags;
 	int duty;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock_irqsave(&fan->lock, flags);
 	if (percent < 0)
-		percent = priv->percent;
-	priv->percent = percent;
+		percent = fan->percent;
+	fan->percent = percent;
 
-	duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
-	gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+	duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
+	nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-	if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
-		u64 next_change = (percent * priv->period_us) / 100;
+	if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+		u64 next_change = (percent * fan->period_us) / 100;
 		if (!duty)
-			next_change = priv->period_us - next_change;
-		ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+			next_change = fan->period_us - next_change;
+		nvkm_timer_alarm(tmr, next_change * 1000, &fan->alarm);
 	}
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock_irqrestore(&fan->lock, flags);
 }
 
 static void
 nvkm_fantog_alarm(struct nvkm_alarm *alarm)
 {
-	struct nvkm_fantog_priv *priv =
-	       container_of(alarm, struct nvkm_fantog_priv, alarm);
-	nvkm_fantog_update(priv, -1);
+	struct nvkm_fantog *fan =
+	       container_of(alarm, struct nvkm_fantog, alarm);
+	nvkm_fantog_update(fan, -1);
 }
 
 static int
 nvkm_fantog_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
-	return priv->percent;
+	struct nvkm_fantog *fan = (void *)therm->fan;
+	return fan->percent;
 }
 
 static int
 nvkm_fantog_set(struct nvkm_therm *therm, int percent)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv = (void *)tpriv->fan;
-	if (therm->pwm_ctrl)
-		therm->pwm_ctrl(therm, priv->func.line, false);
-	nvkm_fantog_update(priv, percent);
+	struct nvkm_fantog *fan = (void *)therm->fan;
+	if (therm->func->pwm_ctrl)
+		therm->func->pwm_ctrl(therm, fan->func.line, false);
+	nvkm_fantog_update(fan, percent);
 	return 0;
 }
 
 int
 nvkm_fantog_create(struct nvkm_therm *therm, struct dcb_gpio_func *func)
 {
-	struct nvkm_therm_priv *tpriv = (void *)therm;
-	struct nvkm_fantog_priv *priv;
+	struct nvkm_fantog *fan;
 	int ret;
 
-	if (therm->pwm_ctrl) {
-		ret = therm->pwm_ctrl(therm, func->line, false);
+	if (therm->func->pwm_ctrl) {
+		ret = therm->func->pwm_ctrl(therm, func->line, false);
 		if (ret)
 			return ret;
 	}
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	tpriv->fan = &priv->base;
-	if (!priv)
+	fan = kzalloc(sizeof(*fan), GFP_KERNEL);
+	therm->fan = &fan->base;
+	if (!fan)
 		return -ENOMEM;
 
-	priv->base.type = "toggle";
-	priv->base.get = nvkm_fantog_get;
-	priv->base.set = nvkm_fantog_set;
-	nvkm_alarm_init(&priv->alarm, nvkm_fantog_alarm);
-	priv->period_us = 100000; /* 10Hz */
-	priv->percent = 100;
-	priv->func = *func;
-	spin_lock_init(&priv->lock);
+	fan->base.type = "toggle";
+	fan->base.get = nvkm_fantog_get;
+	fan->base.set = nvkm_fantog_set;
+	nvkm_alarm_init(&fan->alarm, nvkm_fantog_alarm);
+	fan->period_us = 100000; /* 10Hz */
+	fan->percent = 100;
+	fan->func = *func;
+	spin_lock_init(&fan->lock);
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 85b5d0c..86e8193 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -26,17 +26,13 @@
 
 #include <subdev/fuse.h>
 
-struct g84_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 int
 g84_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_fuse *fuse = nvkm_fuse(therm);
+	struct nvkm_device *device = therm->subdev.device;
 
-	if (nv_ro32(fuse, 0x1a8) == 1)
-		return nv_rd32(therm, 0x20400);
+	if (nvkm_fuse_read(device->fuse, 0x1a8) == 1)
+		return nvkm_rd32(device, 0x20400);
 	else
 		return -ENODEV;
 }
@@ -44,12 +40,12 @@
 void
 g84_sensor_setup(struct nvkm_therm *therm)
 {
-	struct nvkm_fuse *fuse = nvkm_fuse(therm);
+	struct nvkm_device *device = therm->subdev.device;
 
 	/* enable temperature reading for cards with insane defaults */
-	if (nv_ro32(fuse, 0x1a8) == 1) {
-		nv_mask(therm, 0x20008, 0x80008000, 0x80000000);
-		nv_mask(therm, 0x2000c, 0x80000003, 0x00000000);
+	if (nvkm_fuse_read(device->fuse, 0x1a8) == 1) {
+		nvkm_mask(device, 0x20008, 0x80008000, 0x80000000);
+		nvkm_mask(device, 0x2000c, 0x80000003, 0x00000000);
 		mdelay(20); /* wait for the temperature to stabilize */
 	}
 }
@@ -57,36 +53,40 @@
 static void
 g84_therm_program_alarms(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
 	/* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
-	nv_wr32(therm, 0x20000, 0x000003ff);
+	nvkm_wr32(device, 0x20000, 0x000003ff);
 
 	/* shutdown: The computer should be shutdown when reached */
-	nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
-	nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+	nvkm_wr32(device, 0x20484, sensor->thrs_shutdown.hysteresis);
+	nvkm_wr32(device, 0x20480, sensor->thrs_shutdown.temp);
 
 	/* THRS_1 : fan boost*/
-	nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+	nvkm_wr32(device, 0x204c4, sensor->thrs_fan_boost.temp);
 
 	/* THRS_2 : critical */
-	nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+	nvkm_wr32(device, 0x204c0, sensor->thrs_critical.temp);
 
 	/* THRS_4 : down clock */
-	nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	nvkm_wr32(device, 0x20414, sensor->thrs_down_clock.temp);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
-	nv_debug(therm,
-		 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-		 sensor->thrs_down_clock.temp,
-		 sensor->thrs_down_clock.hysteresis,
-		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+	nvkm_debug(subdev,
+		   "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		   sensor->thrs_fan_boost.temp,
+		   sensor->thrs_fan_boost.hysteresis,
+		   sensor->thrs_down_clock.temp,
+		   sensor->thrs_down_clock.hysteresis,
+		   sensor->thrs_critical.temp,
+		   sensor->thrs_critical.hysteresis,
+		   sensor->thrs_shutdown.temp,
+		   sensor->thrs_shutdown.hysteresis);
 
 }
 
@@ -97,24 +97,25 @@
 				   const struct nvbios_therm_threshold *thrs,
 				   enum nvkm_therm_thrs thrs_name)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	enum nvkm_therm_thrs_direction direction;
 	enum nvkm_therm_thrs_state prev_state, new_state;
 	int temp, cur;
 
 	prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
-	temp = nv_rd32(therm, thrs_reg);
+	temp = nvkm_rd32(device, thrs_reg);
 
 	/* program the next threshold */
 	if (temp == thrs->temp) {
-		nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+		nvkm_wr32(device, thrs_reg, thrs->temp - thrs->hysteresis);
 		new_state = NVKM_THERM_THRS_HIGHER;
 	} else {
-		nv_wr32(therm, thrs_reg, thrs->temp);
+		nvkm_wr32(device, thrs_reg, thrs->temp);
 		new_state = NVKM_THERM_THRS_LOWER;
 	}
 
 	/* fix the state (in case someone reprogrammed the alarms) */
-	cur = therm->temp_get(therm);
+	cur = therm->func->temp_get(therm);
 	if (new_state == NVKM_THERM_THRS_LOWER && cur > thrs->temp)
 		new_state = NVKM_THERM_THRS_HIGHER;
 	else if (new_state == NVKM_THERM_THRS_HIGHER &&
@@ -135,17 +136,17 @@
 }
 
 static void
-g84_therm_intr(struct nvkm_subdev *subdev)
+g84_therm_intr(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = nvkm_therm(subdev);
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	unsigned long flags;
 	uint32_t intr;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
-	intr = nv_rd32(therm, 0x20100) & 0x3ff;
+	intr = nvkm_rd32(device, 0x20100) & 0x3ff;
 
 	/* THRS_4: downclock */
 	if (intr & 0x002) {
@@ -180,87 +181,66 @@
 	}
 
 	if (intr)
-		nv_error(therm, "unhandled intr 0x%08x\n", intr);
+		nvkm_error(subdev, "intr %08x\n", intr);
 
 	/* ACK everything */
-	nv_wr32(therm, 0x20100, 0xffffffff);
-	nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+	nvkm_wr32(device, 0x20100, 0xffffffff);
+	nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
 
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 }
 
-static int
-g84_therm_init(struct nvkm_object *object)
+void
+g84_therm_fini(struct nvkm_therm *therm)
 {
-	struct g84_therm_priv *priv = (void *)object;
-	int ret;
+	struct nvkm_device *device = therm->subdev.device;
 
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
-	return 0;
-}
-
-static int
-g84_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
-{
-	struct g84_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.sensor.program_alarms = g84_therm_program_alarms;
-	nv_subdev(priv)->intr = g84_therm_intr;
-
-	/* init the thresholds */
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_SHUTDOWN,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_FANBOOST,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_CRITICAL,
-					      NVKM_THERM_THRS_LOWER);
-	nvkm_therm_sensor_set_threshold_state(&priv->base.base,
-					      NVKM_THERM_THRS_DOWNCLOCK,
-					      NVKM_THERM_THRS_LOWER);
-
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-int
-g84_therm_fini(struct nvkm_object *object, bool suspend)
-{
 	/* Disable PTherm IRQs */
-	nv_wr32(object, 0x20000, 0x00000000);
+	nvkm_wr32(device, 0x20000, 0x00000000);
 
 	/* ACK all PTherm IRQs */
-	nv_wr32(object, 0x20100, 0xffffffff);
-	nv_wr32(object, 0x1100, 0x10000); /* PBUS */
-
-	return _nvkm_therm_fini(object, suspend);
+	nvkm_wr32(device, 0x20100, 0xffffffff);
+	nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
 }
 
-struct nvkm_oclass
-g84_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x84),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = g84_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = g84_therm_init,
-		.fini = g84_therm_fini,
-	},
+static void
+g84_therm_init(struct nvkm_therm *therm)
+{
+	g84_sensor_setup(therm);
+}
+
+static const struct nvkm_therm_func
+g84_therm = {
+	.init = g84_therm_init,
+	.fini = g84_therm_fini,
+	.intr = g84_therm_intr,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.program_alarms = g84_therm_program_alarms,
 };
+
+int
+g84_therm_new(struct nvkm_device *device, int index, struct nvkm_therm **ptherm)
+{
+	struct nvkm_therm *therm;
+	int ret;
+
+	ret = nvkm_therm_new_(&g84_therm, device, index, &therm);
+	*ptherm = therm;
+	if (ret)
+		return ret;
+
+	/* init the thresholds */
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_SHUTDOWN,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_FANBOOST,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_CRITICAL,
+						     NVKM_THERM_THRS_LOWER);
+	nvkm_therm_sensor_set_threshold_state(therm, NVKM_THERM_THRS_DOWNCLOCK,
+						     NVKM_THERM_THRS_LOWER);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
deleted file mode 100644
index 46b7e65..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf110.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-#include "priv.h"
-
-#include <core/device.h>
-
-struct gf110_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
-static int
-pwm_info(struct nvkm_therm *therm, int line)
-{
-	u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
-
-	switch (gpio & 0x000000c0) {
-	case 0x00000000: /* normal mode, possibly pwm forced off by us */
-	case 0x00000040: /* nvio special */
-		switch (gpio & 0x0000001f) {
-		case 0x00: return 2;
-		case 0x19: return 1;
-		case 0x1c: return 0;
-		case 0x1e: return 2;
-		default:
-			break;
-		}
-	default:
-		break;
-	}
-
-	nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
-	return -ENODEV;
-}
-
-static int
-gf110_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
-{
-	u32 data = enable ? 0x00000040 : 0x00000000;
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2)
-		nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
-	/* nothing to do for indx == 2, it seems hardwired to PTHERM */
-	return 0;
-}
-
-static int
-gf110_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2) {
-		if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
-			*divs = nv_rd32(therm, 0x00e114 + (indx * 8));
-			*duty = nv_rd32(therm, 0x00e118 + (indx * 8));
-			return 0;
-		}
-	} else if (indx == 2) {
-		*divs = nv_rd32(therm, 0x0200d8) & 0x1fff;
-		*duty = nv_rd32(therm, 0x0200dc) & 0x1fff;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static int
-gf110_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return indx;
-	else if (indx < 2) {
-		nv_wr32(therm, 0x00e114 + (indx * 8), divs);
-		nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
-	} else if (indx == 2) {
-		nv_mask(therm, 0x0200d8, 0x1fff, divs); /* keep the high bits */
-		nv_wr32(therm, 0x0200dc, duty | 0x40000000);
-	}
-	return 0;
-}
-
-static int
-gf110_fan_pwm_clock(struct nvkm_therm *therm, int line)
-{
-	int indx = pwm_info(therm, line);
-	if (indx < 0)
-		return 0;
-	else if (indx < 2)
-		return (nv_device(therm)->crystal * 1000) / 20;
-	else
-		return nv_device(therm)->crystal * 1000 / 10;
-}
-
-int
-gf110_therm_init(struct nvkm_object *object)
-{
-	struct gf110_therm_priv *priv = (void *)object;
-	int ret;
-
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	/* enable fan tach, count revolutions per-second */
-	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
-	if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
-		nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
-		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
-		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
-	}
-	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
-	return 0;
-}
-
-static int
-gf110_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gf110_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
-
-	priv->base.base.pwm_ctrl = gf110_fan_pwm_ctrl;
-	priv->base.base.pwm_get = gf110_fan_pwm_get;
-	priv->base.base.pwm_set = gf110_fan_pwm_set;
-	priv->base.base.pwm_clock = gf110_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gf110_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0xd0),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gf110_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gf110_therm_init,
-		.fini = g84_therm_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
new file mode 100644
index 0000000..06dcfd6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static int
+pwm_info(struct nvkm_therm *therm, int line)
+{
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 gpio = nvkm_rd32(device, 0x00d610 + (line * 0x04));
+
+	switch (gpio & 0x000000c0) {
+	case 0x00000000: /* normal mode, possibly pwm forced off by us */
+	case 0x00000040: /* nvio special */
+		switch (gpio & 0x0000001f) {
+		case 0x00: return 2;
+		case 0x19: return 1;
+		case 0x1c: return 0;
+		case 0x1e: return 2;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	nvkm_error(subdev, "GPIO %d unknown PWM: %08x\n", line, gpio);
+	return -ENODEV;
+}
+
+static int
+gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	u32 data = enable ? 0x00000040 : 0x00000000;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2)
+		nvkm_mask(device, 0x00d610 + (line * 0x04), 0x000000c0, data);
+	/* nothing to do for indx == 2, it seems hardwired to PTHERM */
+	return 0;
+}
+
+static int
+gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2) {
+		if (nvkm_rd32(device, 0x00d610 + (line * 0x04)) & 0x00000040) {
+			*divs = nvkm_rd32(device, 0x00e114 + (indx * 8));
+			*duty = nvkm_rd32(device, 0x00e118 + (indx * 8));
+			return 0;
+		}
+	} else if (indx == 2) {
+		*divs = nvkm_rd32(device, 0x0200d8) & 0x1fff;
+		*duty = nvkm_rd32(device, 0x0200dc) & 0x1fff;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+	else if (indx < 2) {
+		nvkm_wr32(device, 0x00e114 + (indx * 8), divs);
+		nvkm_wr32(device, 0x00e118 + (indx * 8), duty | 0x80000000);
+	} else if (indx == 2) {
+		nvkm_mask(device, 0x0200d8, 0x1fff, divs); /* keep the high bits */
+		nvkm_wr32(device, 0x0200dc, duty | 0x40000000);
+	}
+	return 0;
+}
+
+static int
+gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
+{
+	struct nvkm_device *device = therm->subdev.device;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return 0;
+	else if (indx < 2)
+		return (device->crystal * 1000) / 20;
+	else
+		return device->crystal * 1000 / 10;
+}
+
+void
+gf119_therm_init(struct nvkm_therm *therm)
+{
+	struct nvkm_device *device = therm->subdev.device;
+
+	g84_sensor_setup(therm);
+
+	/* enable fan tach, count revolutions per-second */
+	nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
+	if (therm->fan->tach.func != DCB_GPIO_UNUSED) {
+		nvkm_mask(device, 0x00d79c, 0x000000ff, therm->fan->tach.line);
+		nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+		nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
+	}
+	nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
+}
+
+static const struct nvkm_therm_func
+gf119_therm = {
+	.init = gf119_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = gf119_fan_pwm_ctrl,
+	.pwm_get = gf119_fan_pwm_get,
+	.pwm_set = gf119_fan_pwm_set,
+	.pwm_clock = gf119_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gf119_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gf119_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
index 2fd110f..86848ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm107.c
@@ -23,12 +23,6 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct gm107_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 static int
 gm107_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
@@ -39,55 +33,43 @@
 static int
 gm107_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
-	*divs = nv_rd32(therm, 0x10eb20) & 0x1fff;
-	*duty = nv_rd32(therm, 0x10eb24) & 0x1fff;
+	struct nvkm_device *device = therm->subdev.device;
+	*divs = nvkm_rd32(device, 0x10eb20) & 0x1fff;
+	*duty = nvkm_rd32(device, 0x10eb24) & 0x1fff;
 	return 0;
 }
 
 static int
 gm107_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
-	nv_mask(therm, 0x10eb10, 0x1fff, divs); /* keep the high bits */
-	nv_wr32(therm, 0x10eb14, duty | 0x80000000);
+	struct nvkm_device *device = therm->subdev.device;
+	nvkm_mask(device, 0x10eb10, 0x1fff, divs); /* keep the high bits */
+	nvkm_wr32(device, 0x10eb14, duty | 0x80000000);
 	return 0;
 }
 
 static int
 gm107_fan_pwm_clock(struct nvkm_therm *therm, int line)
 {
-	return nv_device(therm)->crystal * 1000;
+	return therm->subdev.device->crystal * 1000;
 }
 
-static int
-gm107_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gm107_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = gm107_fan_pwm_ctrl;
-	priv->base.base.pwm_get = gm107_fan_pwm_get;
-	priv->base.base.pwm_set = gm107_fan_pwm_set;
-	priv->base.base.pwm_clock = gm107_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gm107_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x117),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gm107_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gf110_therm_init,
-		.fini = g84_therm_fini,
-	},
+static const struct nvkm_therm_func
+gm107_therm = {
+	.init = gf119_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = gm107_fan_pwm_ctrl,
+	.pwm_get = gm107_fan_pwm_get,
+	.pwm_set = gm107_fan_pwm_set,
+	.pwm_clock = gm107_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+gm107_therm_new(struct nvkm_device *device, int index,
+		struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gm107_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index e99be20..c08097f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -23,78 +23,53 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
 #include <subdev/gpio.h>
 
-struct gt215_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 int
 gt215_therm_fan_sense(struct nvkm_therm *therm)
 {
-	u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
-	u32 ctrl = nv_rd32(therm, 0x00e720);
+	struct nvkm_device *device = therm->subdev.device;
+	u32 tach = nvkm_rd32(device, 0x00e728) & 0x0000ffff;
+	u32 ctrl = nvkm_rd32(device, 0x00e720);
 	if (ctrl & 0x00000001)
 		return tach * 60 / 2;
 	return -ENODEV;
 }
 
-static int
-gt215_therm_init(struct nvkm_object *object)
+static void
+gt215_therm_init(struct nvkm_therm *therm)
 {
-	struct gt215_therm_priv *priv = (void *)object;
-	struct dcb_gpio_func *tach = &priv->base.fan->tach;
-	int ret;
+	struct nvkm_device *device = therm->subdev.device;
+	struct dcb_gpio_func *tach = &therm->fan->tach;
 
-	ret = nvkm_therm_init(&priv->base.base);
-	if (ret)
-		return ret;
-
-	g84_sensor_setup(&priv->base.base);
+	g84_sensor_setup(therm);
 
 	/* enable fan tach, count revolutions per-second */
-	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+	nvkm_mask(device, 0x00e720, 0x00000003, 0x00000002);
 	if (tach->func != DCB_GPIO_UNUSED) {
-		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
-		nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
-		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+		nvkm_wr32(device, 0x00e724, device->crystal * 1000);
+		nvkm_mask(device, 0x00e720, 0x001f0000, tach->line << 16);
+		nvkm_mask(device, 0x00e720, 0x00000001, 0x00000001);
 	}
-	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
-
-	return 0;
+	nvkm_mask(device, 0x00e720, 0x00000002, 0x00000000);
 }
 
-static int
-gt215_therm_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		 struct nvkm_oclass *oclass, void *data, u32 size,
-		 struct nvkm_object **pobject)
-{
-	struct gt215_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = g84_temp_get;
-	priv->base.base.fan_sense = gt215_therm_fan_sense;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-struct nvkm_oclass
-gt215_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0xa3),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gt215_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = gt215_therm_init,
-		.fini = g84_therm_fini,
-	},
+static const struct nvkm_therm_func
+gt215_therm = {
+	.init = gt215_therm_init,
+	.fini = g84_therm_fini,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = g84_temp_get,
+	.fan_sense = gt215_therm_fan_sense,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+gt215_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&gt215_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 09fc460..6e0ddc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -27,16 +27,16 @@
 #include <subdev/i2c.h>
 
 static bool
-probe_monitoring_device(struct nvkm_i2c_port *i2c,
+probe_monitoring_device(struct nvkm_i2c_bus *bus,
 			struct i2c_board_info *info, void *data)
 {
-	struct nvkm_therm_priv *priv = data;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_therm *therm = data;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	struct i2c_client *client;
 
 	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
-	client = i2c_new_device(&i2c->adapter, info);
+	client = i2c_new_device(&bus->i2c, info);
 	if (!client)
 		return false;
 
@@ -46,15 +46,15 @@
 		return false;
 	}
 
-	nv_info(priv,
-		"Found an %s at address 0x%x (controlled by lm_sensors, "
-		"temp offset %+i C)\n",
-		info->type, info->addr, sensor->offset_constant);
-	priv->ic = client;
+	nvkm_debug(&therm->subdev,
+		   "Found an %s at address 0x%x (controlled by lm_sensors, "
+		   "temp offset %+i C)\n",
+		   info->type, info->addr, sensor->offset_constant);
+	therm->ic = client;
 	return true;
 }
 
-static struct nvkm_i2c_board_info
+static struct nvkm_i2c_bus_probe
 nv_board_infos[] = {
 	{ { I2C_BOARD_INFO("w83l785ts", 0x2d) }, 0 },
 	{ { I2C_BOARD_INFO("w83781d", 0x2d) }, 0  },
@@ -82,38 +82,43 @@
 void
 nvkm_therm_ic_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
-	struct nvkm_i2c *i2c = nvkm_i2c(therm);
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvkm_bios *bios = device->bios;
+	struct nvkm_i2c *i2c = device->i2c;
+	struct nvkm_i2c_bus *bus;
 	struct nvbios_extdev_func extdev_entry;
 
+	bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+	if (!bus)
+		return;
+
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
-		struct nvkm_i2c_board_info board[] = {
+		struct nvkm_i2c_bus_probe board[] = {
 		  { { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) }, 0},
 		  { }
 		};
 
-		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-			      board, probe_monitoring_device, therm);
-		if (priv->ic)
+		nvkm_i2c_bus_probe(bus, "monitoring device", board,
+				   probe_monitoring_device, therm);
+		if (therm->ic)
 			return;
 	}
 
 	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
-		struct nvkm_i2c_board_info board[] = {
+		struct nvkm_i2c_bus_probe board[] = {
 		  { { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) }, 20 },
 		  { }
 		};
 
-		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-			      board, probe_monitoring_device, therm);
-		if (priv->ic)
+		nvkm_i2c_bus_probe(bus, "monitoring device", board,
+				   probe_monitoring_device, therm);
+		if (therm->ic)
 			return;
 	}
 
 	/* The vbios doesn't provide the address of an exisiting monitoring
 	   device. Let's try our static list.
 	 */
-	i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
-		      nv_board_infos, probe_monitoring_device, therm);
+	nvkm_i2c_bus_probe(bus, "monitoring device", nv_board_infos,
+			   probe_monitoring_device, therm);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
index 8496fff..6326fdc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv40.c
@@ -24,26 +24,17 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct nv40_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
 
 static enum nv40_sensor_style
 nv40_sensor_style(struct nvkm_therm *therm)
 {
-	struct nvkm_device *device = nv_device(therm);
-
-	switch (device->chipset) {
+	switch (therm->subdev.device->chipset) {
 	case 0x43:
 	case 0x44:
 	case 0x4a:
 	case 0x47:
 		return OLD_STYLE;
-
 	case 0x46:
 	case 0x49:
 	case 0x4b:
@@ -61,18 +52,19 @@
 static int
 nv40_sensor_setup(struct nvkm_therm *therm)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	enum nv40_sensor_style style = nv40_sensor_style(therm);
 
 	/* enable ADC readout and disable the ALARM threshold */
 	if (style == NEW_STYLE) {
-		nv_mask(therm, 0x15b8, 0x80000000, 0);
-		nv_wr32(therm, 0x15b0, 0x80003fff);
+		nvkm_mask(device, 0x15b8, 0x80000000, 0);
+		nvkm_wr32(device, 0x15b0, 0x80003fff);
 		mdelay(20); /* wait for the temperature to stabilize */
-		return nv_rd32(therm, 0x15b4) & 0x3fff;
+		return nvkm_rd32(device, 0x15b4) & 0x3fff;
 	} else if (style == OLD_STYLE) {
-		nv_wr32(therm, 0x15b0, 0xff);
+		nvkm_wr32(device, 0x15b0, 0xff);
 		mdelay(20); /* wait for the temperature to stabilize */
-		return nv_rd32(therm, 0x15b4) & 0xff;
+		return nvkm_rd32(device, 0x15b4) & 0xff;
 	} else
 		return -ENODEV;
 }
@@ -80,17 +72,17 @@
 static int
 nv40_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	enum nv40_sensor_style style = nv40_sensor_style(therm);
 	int core_temp;
 
 	if (style == NEW_STYLE) {
-		nv_wr32(therm, 0x15b0, 0x80003fff);
-		core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
+		nvkm_wr32(device, 0x15b0, 0x80003fff);
+		core_temp = nvkm_rd32(device, 0x15b4) & 0x3fff;
 	} else if (style == OLD_STYLE) {
-		nv_wr32(therm, 0x15b0, 0xff);
-		core_temp = nv_rd32(therm, 0x15b4) & 0xff;
+		nvkm_wr32(device, 0x15b0, 0xff);
+		core_temp = nvkm_rd32(device, 0x15b4) & 0xff;
 	} else
 		return -ENODEV;
 
@@ -113,11 +105,13 @@
 static int
 nv40_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	u32 mask = enable ? 0x80000000 : 0x0000000;
-	if      (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
-	else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+	if      (line == 2) nvkm_mask(device, 0x0010f0, 0x80000000, mask);
+	else if (line == 9) nvkm_mask(device, 0x0015f4, 0x80000000, mask);
 	else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 	return 0;
@@ -126,8 +120,10 @@
 static int
 nv40_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	if (line == 2) {
-		u32 reg = nv_rd32(therm, 0x0010f0);
+		u32 reg = nvkm_rd32(device, 0x0010f0);
 		if (reg & 0x80000000) {
 			*duty = (reg & 0x7fff0000) >> 16;
 			*divs = (reg & 0x00007fff);
@@ -135,14 +131,14 @@
 		}
 	} else
 	if (line == 9) {
-		u32 reg = nv_rd32(therm, 0x0015f4);
+		u32 reg = nvkm_rd32(device, 0x0015f4);
 		if (reg & 0x80000000) {
-			*divs = nv_rd32(therm, 0x0015f8);
+			*divs = nvkm_rd32(device, 0x0015f8);
 			*duty = (reg & 0x7fffffff);
 			return 0;
 		}
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 
@@ -152,14 +148,16 @@
 static int
 nv40_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
 	if (line == 2) {
-		nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
+		nvkm_mask(device, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
 	} else
 	if (line == 9) {
-		nv_wr32(therm, 0x0015f8, divs);
-		nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
+		nvkm_wr32(device, 0x0015f8, divs);
+		nvkm_mask(device, 0x0015f4, 0x7fffffff, duty);
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", line);
 		return -ENODEV;
 	}
 
@@ -167,59 +165,40 @@
 }
 
 void
-nv40_therm_intr(struct nvkm_subdev *subdev)
+nv40_therm_intr(struct nvkm_therm *therm)
 {
-	struct nvkm_therm *therm = nvkm_therm(subdev);
-	uint32_t stat = nv_rd32(therm, 0x1100);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_device *device = subdev->device;
+	uint32_t stat = nvkm_rd32(device, 0x1100);
 
 	/* traitement */
 
 	/* ack all IRQs */
-	nv_wr32(therm, 0x1100, 0x70000);
+	nvkm_wr32(device, 0x1100, 0x70000);
 
-	nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+	nvkm_error(subdev, "THERM received an IRQ: stat = %x\n", stat);
 }
 
-static int
-nv40_therm_ctor(struct nvkm_object *parent,
-		struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static void
+nv40_therm_init(struct nvkm_therm *therm)
 {
-	struct nv40_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv40_fan_pwm_get;
-	priv->base.base.pwm_set = nv40_fan_pwm_set;
-	priv->base.base.temp_get = nv40_temp_get;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	nv_subdev(priv)->intr = nv40_therm_intr;
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv40_therm_init(struct nvkm_object *object)
-{
-	struct nvkm_therm *therm = (void *)object;
-
 	nv40_sensor_setup(therm);
-
-	return _nvkm_therm_init(object);
 }
 
-struct nvkm_oclass
-nv40_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = nv40_therm_init,
-		.fini = _nvkm_therm_fini,
-	},
+static const struct nvkm_therm_func
+nv40_therm = {
+	.init = nv40_therm_init,
+	.intr = nv40_therm_intr,
+	.pwm_ctrl = nv40_fan_pwm_ctrl,
+	.pwm_get = nv40_fan_pwm_get,
+	.pwm_set = nv40_fan_pwm_set,
+	.temp_get = nv40_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+nv40_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&nv40_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
index 1ef59e8..9b57b43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/nv50.c
@@ -24,15 +24,11 @@
  */
 #include "priv.h"
 
-#include <core/device.h>
-
-struct nv50_therm_priv {
-	struct nvkm_therm_priv base;
-};
-
 static int
 pwm_info(struct nvkm_therm *therm, int *line, int *ctrl, int *indx)
 {
+	struct nvkm_subdev *subdev = &therm->subdev;
+
 	if (*line == 0x04) {
 		*ctrl = 0x00e100;
 		*line = 4;
@@ -48,7 +44,7 @@
 		*line = 0;
 		*indx = 0;
 	} else {
-		nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
+		nvkm_error(subdev, "unknown pwm ctrl for gpio %d\n", *line);
 		return -ENODEV;
 	}
 
@@ -58,23 +54,25 @@
 int
 nv50_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	u32 data = enable ? 0x00000001 : 0x00000000;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret == 0)
-		nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+		nvkm_mask(device, ctrl, 0x00010001 << line, data << line);
 	return ret;
 }
 
 int
 nv50_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret)
 		return ret;
 
-	if (nv_rd32(therm, ctrl) & (1 << line)) {
-		*divs = nv_rd32(therm, 0x00e114 + (id * 8));
-		*duty = nv_rd32(therm, 0x00e118 + (id * 8));
+	if (nvkm_rd32(device, ctrl) & (1 << line)) {
+		*divs = nvkm_rd32(device, 0x00e114 + (id * 8));
+		*duty = nvkm_rd32(device, 0x00e118 + (id * 8));
 		return 0;
 	}
 
@@ -84,36 +82,36 @@
 int
 nv50_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
 {
+	struct nvkm_device *device = therm->subdev.device;
 	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
 	if (ret)
 		return ret;
 
-	nv_wr32(therm, 0x00e114 + (id * 8), divs);
-	nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
+	nvkm_wr32(device, 0x00e114 + (id * 8), divs);
+	nvkm_wr32(device, 0x00e118 + (id * 8), duty | 0x80000000);
 	return 0;
 }
 
 int
 nv50_fan_pwm_clock(struct nvkm_therm *therm, int line)
 {
-	int chipset = nv_device(therm)->chipset;
-	int crystal = nv_device(therm)->crystal;
+	struct nvkm_device *device = therm->subdev.device;
 	int pwm_clock;
 
 	/* determine the PWM source clock */
-	if (chipset > 0x50 && chipset < 0x94) {
-		u8 pwm_div = nv_rd32(therm, 0x410c);
-		if (nv_rd32(therm, 0xc040) & 0x800000) {
+	if (device->chipset > 0x50 && device->chipset < 0x94) {
+		u8 pwm_div = nvkm_rd32(device, 0x410c);
+		if (nvkm_rd32(device, 0xc040) & 0x800000) {
 			/* Use the HOST clock (100 MHz)
 			* Where does this constant(2.4) comes from? */
 			pwm_clock = (100000000 >> pwm_div) * 10 / 24;
 		} else {
 			/* Where does this constant(20) comes from? */
-			pwm_clock = (crystal * 1000) >> pwm_div;
+			pwm_clock = (device->crystal * 1000) >> pwm_div;
 			pwm_clock /= 20;
 		}
 	} else {
-		pwm_clock = (crystal * 1000) / 20;
+		pwm_clock = (device->crystal * 1000) / 20;
 	}
 
 	return pwm_clock;
@@ -122,18 +120,19 @@
 static void
 nv50_sensor_setup(struct nvkm_therm *therm)
 {
-	nv_mask(therm, 0x20010, 0x40000000, 0x0);
+	struct nvkm_device *device = therm->subdev.device;
+	nvkm_mask(device, 0x20010, 0x40000000, 0x0);
 	mdelay(20); /* wait for the temperature to stabilize */
 }
 
 static int
 nv50_temp_get(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvkm_device *device = therm->subdev.device;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 	int core_temp;
 
-	core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
+	core_temp = nvkm_rd32(device, 0x20014) & 0x3fff;
 
 	/* if the slope or the offset is unset, do no use the sensor */
 	if (!sensor->slope_div || !sensor->slope_mult ||
@@ -151,48 +150,27 @@
 	return core_temp;
 }
 
-static int
-nv50_therm_ctor(struct nvkm_object *parent,
-		struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static void
+nv50_therm_init(struct nvkm_therm *therm)
 {
-	struct nv50_therm_priv *priv;
-	int ret;
-
-	ret = nvkm_therm_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
-	priv->base.base.pwm_get = nv50_fan_pwm_get;
-	priv->base.base.pwm_set = nv50_fan_pwm_set;
-	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
-	priv->base.base.temp_get = nv50_temp_get;
-	priv->base.sensor.program_alarms = nvkm_therm_program_alarms_polling;
-	nv_subdev(priv)->intr = nv40_therm_intr;
-
-	return nvkm_therm_preinit(&priv->base.base);
-}
-
-static int
-nv50_therm_init(struct nvkm_object *object)
-{
-	struct nvkm_therm *therm = (void *)object;
-
 	nv50_sensor_setup(therm);
-
-	return _nvkm_therm_init(object);
 }
 
-struct nvkm_oclass
-nv50_therm_oclass = {
-	.handle = NV_SUBDEV(THERM, 0x50),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv50_therm_ctor,
-		.dtor = _nvkm_therm_dtor,
-		.init = nv50_therm_init,
-		.fini = _nvkm_therm_fini,
-	},
+static const struct nvkm_therm_func
+nv50_therm = {
+	.init = nv50_therm_init,
+	.intr = nv40_therm_intr,
+	.pwm_ctrl = nv50_fan_pwm_ctrl,
+	.pwm_get = nv50_fan_pwm_get,
+	.pwm_set = nv50_fan_pwm_set,
+	.pwm_clock = nv50_fan_pwm_clock,
+	.temp_get = nv50_temp_get,
+	.program_alarms = nvkm_therm_program_alarms_polling,
 };
+
+int
+nv50_therm_new(struct nvkm_device *device, int index,
+	       struct nvkm_therm **ptherm)
+{
+	return nvkm_therm_new_(&nv50_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 916a149..235a5d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -1,5 +1,6 @@
 #ifndef __NVTHERM_PRIV_H__
 #define __NVTHERM_PRIV_H__
+#define nvkm_therm(p) container_of((p), struct nvkm_therm, subdev)
 /*
  * Copyright 2012 The Nouveau community
  *
@@ -28,8 +29,9 @@
 #include <subdev/bios/extdev.h>
 #include <subdev/bios/gpio.h>
 #include <subdev/bios/perf.h>
-#include <subdev/bios/therm.h>
-#include <subdev/timer.h>
+
+int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
+		    int index, struct nvkm_therm **);
 
 struct nvkm_fan {
 	struct nvkm_therm *parent;
@@ -48,59 +50,6 @@
 	struct dcb_gpio_func tach;
 };
 
-enum nvkm_therm_thrs_direction {
-	NVKM_THERM_THRS_FALLING = 0,
-	NVKM_THERM_THRS_RISING = 1
-};
-
-enum nvkm_therm_thrs_state {
-	NVKM_THERM_THRS_LOWER = 0,
-	NVKM_THERM_THRS_HIGHER = 1
-};
-
-enum nvkm_therm_thrs {
-	NVKM_THERM_THRS_FANBOOST = 0,
-	NVKM_THERM_THRS_DOWNCLOCK = 1,
-	NVKM_THERM_THRS_CRITICAL = 2,
-	NVKM_THERM_THRS_SHUTDOWN = 3,
-	NVKM_THERM_THRS_NR
-};
-
-struct nvkm_therm_priv {
-	struct nvkm_therm base;
-
-	/* automatic thermal management */
-	struct nvkm_alarm alarm;
-	spinlock_t lock;
-	struct nvbios_therm_trip_point *last_trip;
-	int mode;
-	int cstate;
-	int suspend;
-
-	/* bios */
-	struct nvbios_therm_sensor bios_sensor;
-
-	/* fan priv */
-	struct nvkm_fan *fan;
-
-	/* alarms priv */
-	struct {
-		spinlock_t alarm_program_lock;
-		struct nvkm_alarm therm_poll_alarm;
-		enum nvkm_therm_thrs_state alarm_state[NVKM_THERM_THRS_NR];
-		void (*program_alarms)(struct nvkm_therm *);
-	} sensor;
-
-	/* what should be done if the card overheats */
-	struct {
-		void (*downclock)(struct nvkm_therm *, bool active);
-		void (*pause)(struct nvkm_therm *, bool active);
-	} emergency;
-
-	/* ic */
-	struct i2c_client *ic;
-};
-
 int nvkm_therm_fan_mode(struct nvkm_therm *, int mode);
 int nvkm_therm_attr_get(struct nvkm_therm *, enum nvkm_therm_attr_type);
 int nvkm_therm_attr_set(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
@@ -117,8 +66,6 @@
 int nvkm_therm_fan_user_get(struct nvkm_therm *);
 int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
 
-int nvkm_therm_fan_sense(struct nvkm_therm *);
-
 int nvkm_therm_preinit(struct nvkm_therm *);
 
 int  nvkm_therm_sensor_init(struct nvkm_therm *);
@@ -134,18 +81,37 @@
 			     enum nvkm_therm_thrs_direction);
 void nvkm_therm_program_alarms_polling(struct nvkm_therm *);
 
-void nv40_therm_intr(struct nvkm_subdev *);
+struct nvkm_therm_func {
+	void (*init)(struct nvkm_therm *);
+	void (*fini)(struct nvkm_therm *);
+	void (*intr)(struct nvkm_therm *);
+
+	int (*pwm_ctrl)(struct nvkm_therm *, int line, bool);
+	int (*pwm_get)(struct nvkm_therm *, int line, u32 *, u32 *);
+	int (*pwm_set)(struct nvkm_therm *, int line, u32, u32);
+	int (*pwm_clock)(struct nvkm_therm *, int line);
+
+	int (*temp_get)(struct nvkm_therm *);
+
+	int (*fan_sense)(struct nvkm_therm *);
+
+	void (*program_alarms)(struct nvkm_therm *);
+};
+
+void nv40_therm_intr(struct nvkm_therm *);
+
 int  nv50_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
 int  nv50_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
 int  nv50_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
 int  nv50_fan_pwm_clock(struct nvkm_therm *, int);
+
 int  g84_temp_get(struct nvkm_therm *);
 void g84_sensor_setup(struct nvkm_therm *);
-int  g84_therm_fini(struct nvkm_object *, bool suspend);
+void g84_therm_fini(struct nvkm_therm *);
 
 int gt215_therm_fan_sense(struct nvkm_therm *);
 
-int gf110_therm_init(struct nvkm_object *);
+void gf119_therm_init(struct nvkm_therm *);
 
 int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
 int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index aa13744..b9703c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -26,29 +26,25 @@
 static void
 nvkm_therm_temp_set_defaults(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	therm->bios_sensor.offset_constant = 0;
 
-	priv->bios_sensor.offset_constant = 0;
+	therm->bios_sensor.thrs_fan_boost.temp = 90;
+	therm->bios_sensor.thrs_fan_boost.hysteresis = 3;
 
-	priv->bios_sensor.thrs_fan_boost.temp = 90;
-	priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
+	therm->bios_sensor.thrs_down_clock.temp = 95;
+	therm->bios_sensor.thrs_down_clock.hysteresis = 3;
 
-	priv->bios_sensor.thrs_down_clock.temp = 95;
-	priv->bios_sensor.thrs_down_clock.hysteresis = 3;
+	therm->bios_sensor.thrs_critical.temp = 105;
+	therm->bios_sensor.thrs_critical.hysteresis = 5;
 
-	priv->bios_sensor.thrs_critical.temp = 105;
-	priv->bios_sensor.thrs_critical.hysteresis = 5;
-
-	priv->bios_sensor.thrs_shutdown.temp = 135;
-	priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
+	therm->bios_sensor.thrs_shutdown.temp = 135;
+	therm->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
 }
 
-
 static void
 nvkm_therm_temp_safety_checks(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *s = &priv->bios_sensor;
+	struct nvbios_therm_sensor *s = &therm->bios_sensor;
 
 	/* enforce a minimum hysteresis on thresholds */
 	s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
@@ -63,8 +59,7 @@
 				      enum nvkm_therm_thrs thrs,
 				      enum nvkm_therm_thrs_state st)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	priv->sensor.alarm_state[thrs] = st;
+	therm->sensor.alarm_state[thrs] = st;
 }
 
 /* must be called with alarm_program_lock taken ! */
@@ -72,8 +67,7 @@
 nvkm_therm_sensor_get_threshold_state(struct nvkm_therm *therm,
 				      enum nvkm_therm_thrs thrs)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	return priv->sensor.alarm_state[thrs];
+	return therm->sensor.alarm_state[thrs];
 }
 
 static void
@@ -87,22 +81,23 @@
 nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
 			enum nvkm_therm_thrs_direction dir)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
+	struct nvkm_subdev *subdev = &therm->subdev;
 	bool active;
 	const char *thresolds[] = {
 		"fanboost", "downclock", "critical", "shutdown"
 	};
-	int temperature = therm->temp_get(therm);
+	int temperature = therm->func->temp_get(therm);
 
 	if (thrs < 0 || thrs > 3)
 		return;
 
 	if (dir == NVKM_THERM_THRS_FALLING)
-		nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
-			temperature, thresolds[thrs]);
+		nvkm_info(subdev,
+			  "temperature (%i C) went below the '%s' threshold\n",
+			  temperature, thresolds[thrs]);
 	else
-		nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
-			temperature, thresolds[thrs]);
+		nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
+			  temperature, thresolds[thrs]);
 
 	active = (dir == NVKM_THERM_THRS_RISING);
 	switch (thrs) {
@@ -113,12 +108,12 @@
 		}
 		break;
 	case NVKM_THERM_THRS_DOWNCLOCK:
-		if (priv->emergency.downclock)
-			priv->emergency.downclock(therm, active);
+		if (therm->emergency.downclock)
+			therm->emergency.downclock(therm, active);
 		break;
 	case NVKM_THERM_THRS_CRITICAL:
-		if (priv->emergency.pause)
-			priv->emergency.pause(therm, active);
+		if (therm->emergency.pause)
+			therm->emergency.pause(therm, active);
 		break;
 	case NVKM_THERM_THRS_SHUTDOWN:
 		if (active) {
@@ -145,7 +140,7 @@
 {
 	enum nvkm_therm_thrs_direction direction;
 	enum nvkm_therm_thrs_state prev_state, new_state;
-	int temp = therm->temp_get(therm);
+	int temp = therm->func->temp_get(therm);
 
 	prev_state = nvkm_therm_sensor_get_threshold_state(therm, thrs_name);
 
@@ -166,19 +161,19 @@
 static void
 alarm_timer_callback(struct nvkm_alarm *alarm)
 {
-	struct nvkm_therm_priv *priv =
-	container_of(alarm, struct nvkm_therm_priv, sensor.therm_poll_alarm);
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
-	struct nvkm_timer *ptimer = nvkm_timer(priv);
-	struct nvkm_therm *therm = &priv->base;
+	struct nvkm_therm *therm =
+		container_of(alarm, struct nvkm_therm, sensor.therm_poll_alarm);
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	unsigned long flags;
 
-	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+	spin_lock_irqsave(&therm->sensor.alarm_program_lock, flags);
 
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
 					  NVKM_THERM_THRS_FANBOOST);
 
-	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+	nvkm_therm_threshold_hyst_polling(therm,
+					  &sensor->thrs_down_clock,
 					  NVKM_THERM_THRS_DOWNCLOCK);
 
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
@@ -187,46 +182,45 @@
 	nvkm_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
 					  NVKM_THERM_THRS_SHUTDOWN);
 
-	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
 	/* schedule the next poll in one second */
-	if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
-		ptimer->alarm(ptimer, 1000000000ULL, alarm);
+	if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+		nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
 void
 nvkm_therm_program_alarms_polling(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nvbios_therm_sensor *sensor = &therm->bios_sensor;
 
-	nv_debug(therm,
-		 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
-		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
-		 sensor->thrs_down_clock.temp,
-		 sensor->thrs_down_clock.hysteresis,
-		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
-		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+	nvkm_debug(&therm->subdev,
+		   "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		   sensor->thrs_fan_boost.temp,
+		   sensor->thrs_fan_boost.hysteresis,
+		   sensor->thrs_down_clock.temp,
+		   sensor->thrs_down_clock.hysteresis,
+		   sensor->thrs_critical.temp,
+		   sensor->thrs_critical.hysteresis,
+		   sensor->thrs_shutdown.temp,
+		   sensor->thrs_shutdown.hysteresis);
 
-	alarm_timer_callback(&priv->sensor.therm_poll_alarm);
+	alarm_timer_callback(&therm->sensor.therm_poll_alarm);
 }
 
 int
 nvkm_therm_sensor_init(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	priv->sensor.program_alarms(therm);
+	therm->func->program_alarms(therm);
 	return 0;
 }
 
 int
 nvkm_therm_sensor_fini(struct nvkm_therm *therm, bool suspend)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_timer *ptimer = nvkm_timer(therm);
-
+	struct nvkm_timer *tmr = therm->subdev.device->timer;
 	if (suspend)
-		ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
+		nvkm_timer_alarm_cancel(tmr, &therm->sensor.therm_poll_alarm);
 	return 0;
 }
 
@@ -235,24 +229,24 @@
 {
 	const char *sensor_avail = "yes";
 
-	if (therm->temp_get(therm) < 0)
+	if (therm->func->temp_get(therm) < 0)
 		sensor_avail = "no";
 
-	nv_info(therm, "internal sensor: %s\n", sensor_avail);
+	nvkm_debug(&therm->subdev, "internal sensor: %s\n", sensor_avail);
 }
 
 int
 nvkm_therm_sensor_ctor(struct nvkm_therm *therm)
 {
-	struct nvkm_therm_priv *priv = (void *)therm;
-	struct nvkm_bios *bios = nvkm_bios(therm);
+	struct nvkm_subdev *subdev = &therm->subdev;
+	struct nvkm_bios *bios = subdev->device->bios;
 
-	nvkm_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+	nvkm_alarm_init(&therm->sensor.therm_poll_alarm, alarm_timer_callback);
 
 	nvkm_therm_temp_set_defaults(therm);
 	if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
-				      &priv->bios_sensor))
-		nv_error(therm, "nvbios_therm_sensor_parse failed\n");
+				      &therm->bios_sensor))
+		nvkm_error(subdev, "nvbios_therm_sensor_parse failed\n");
 	nvkm_therm_temp_safety_checks(therm);
 
 	return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
index d1d38b4..e436f0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/Kbuild
@@ -1,3 +1,5 @@
 nvkm-y += nvkm/subdev/timer/base.o
 nvkm-y += nvkm/subdev/timer/nv04.o
+nvkm-y += nvkm/subdev/timer/nv40.o
+nvkm-y += nvkm/subdev/timer/nv41.o
 nvkm-y += nvkm/subdev/timer/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d894061..d4dae1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -21,73 +21,131 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/timer.h>
+#include "priv.h"
 
-bool
-nvkm_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+u64
+nvkm_timer_read(struct nvkm_timer *tmr)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
-			if ((nv_rd32(obj, addr) & mask) == data)
-				return true;
-		} else {
-			if ((nv_ro32(obj, addr) & mask) == data)
-				return true;
-		}
-	} while (ptimer->read(ptimer) - time0 < nsec);
-
-	return false;
-}
-
-bool
-nvkm_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
-			if ((nv_rd32(obj, addr) & mask) != data)
-				return true;
-		} else {
-			if ((nv_ro32(obj, addr) & mask) != data)
-				return true;
-		}
-	} while (ptimer->read(ptimer) - time0 < nsec);
-
-	return false;
-}
-
-bool
-nvkm_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
-{
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	u64 time0;
-
-	time0 = ptimer->read(ptimer);
-	do {
-		if (func(data) == true)
-			return true;
-	} while (ptimer->read(ptimer) - time0 < nsec);
-
-	return false;
+	return tmr->func->read(tmr);
 }
 
 void
-nvkm_timer_alarm(void *obj, u32 nsec, struct nvkm_alarm *alarm)
+nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	ptimer->alarm(ptimer, nsec, alarm);
+	struct nvkm_alarm *alarm, *atemp;
+	unsigned long flags;
+	LIST_HEAD(exec);
+
+	/* move any due alarms off the pending list */
+	spin_lock_irqsave(&tmr->lock, flags);
+	list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
+		if (alarm->timestamp <= nvkm_timer_read(tmr))
+			list_move_tail(&alarm->head, &exec);
+	}
+
+	/* reschedule interrupt for next alarm time */
+	if (!list_empty(&tmr->alarms)) {
+		alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
+		tmr->func->alarm_init(tmr, alarm->timestamp);
+	} else {
+		tmr->func->alarm_fini(tmr);
+	}
+	spin_unlock_irqrestore(&tmr->lock, flags);
+
+	/* execute any pending alarm handlers */
+	list_for_each_entry_safe(alarm, atemp, &exec, head) {
+		list_del_init(&alarm->head);
+		alarm->func(alarm);
+	}
 }
 
 void
-nvkm_timer_alarm_cancel(void *obj, struct nvkm_alarm *alarm)
+nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
 {
-	struct nvkm_timer *ptimer = nvkm_timer(obj);
-	ptimer->alarm_cancel(ptimer, alarm);
+	struct nvkm_alarm *list;
+	unsigned long flags;
+
+	alarm->timestamp = nvkm_timer_read(tmr) + nsec;
+
+	/* append new alarm to list, in soonest-alarm-first order */
+	spin_lock_irqsave(&tmr->lock, flags);
+	if (!nsec) {
+		if (!list_empty(&alarm->head))
+			list_del(&alarm->head);
+	} else {
+		list_for_each_entry(list, &tmr->alarms, head) {
+			if (list->timestamp > alarm->timestamp)
+				break;
+		}
+		list_add_tail(&alarm->head, &list->head);
+	}
+	spin_unlock_irqrestore(&tmr->lock, flags);
+
+	/* process pending alarms */
+	nvkm_timer_alarm_trigger(tmr);
+}
+
+void
+nvkm_timer_alarm_cancel(struct nvkm_timer *tmr, struct nvkm_alarm *alarm)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&tmr->lock, flags);
+	list_del_init(&alarm->head);
+	spin_unlock_irqrestore(&tmr->lock, flags);
+}
+
+static void
+nvkm_timer_intr(struct nvkm_subdev *subdev)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	tmr->func->intr(tmr);
+}
+
+static int
+nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	tmr->func->alarm_fini(tmr);
+	return 0;
+}
+
+static int
+nvkm_timer_init(struct nvkm_subdev *subdev)
+{
+	struct nvkm_timer *tmr = nvkm_timer(subdev);
+	if (tmr->func->init)
+		tmr->func->init(tmr);
+	tmr->func->time(tmr, ktime_to_ns(ktime_get()));
+	nvkm_timer_alarm_trigger(tmr);
+	return 0;
+}
+
+static void *
+nvkm_timer_dtor(struct nvkm_subdev *subdev)
+{
+	return nvkm_timer(subdev);
+}
+
+static const struct nvkm_subdev_func
+nvkm_timer = {
+	.dtor = nvkm_timer_dtor,
+	.init = nvkm_timer_init,
+	.fini = nvkm_timer_fini,
+	.intr = nvkm_timer_intr,
+};
+
+int
+nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
+		int index, struct nvkm_timer **ptmr)
+{
+	struct nvkm_timer *tmr;
+
+	if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
+		return -ENOMEM;
+
+	nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
+	tmr->func = func;
+	INIT_LIST_HEAD(&tmr->alarms);
+	spin_lock_init(&tmr->lock);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
index 80e3806..9ed5f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/gk20a.c
@@ -21,36 +21,19 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
 
-static int
-gk20a_timer_init(struct nvkm_object *object)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	u32 hi = upper_32_bits(priv->suspend_time);
-	u32 lo = lower_32_bits(priv->suspend_time);
-	int ret;
-
-	ret = nvkm_timer_init(&priv->base);
-	if (ret)
-		return ret;
-
-	nv_debug(priv, "time low        : 0x%08x\n", lo);
-	nv_debug(priv, "time high       : 0x%08x\n", hi);
-
-	/* restore the time before suspend */
-	nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
-	nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-	return 0;
-}
-
-struct nvkm_oclass
-gk20a_timer_oclass = {
-	.handle = NV_SUBDEV(TIMER, 0xff),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_timer_ctor,
-		.dtor = nv04_timer_dtor,
-		.init = gk20a_timer_init,
-		.fini = nv04_timer_fini,
-	}
+static const struct nvkm_timer_func
+gk20a_timer = {
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
 };
+
+int
+gk20a_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&gk20a_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 6b7facb..7b9ce87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -21,165 +21,92 @@
  *
  * Authors: Ben Skeggs
  */
-#include "nv04.h"
+#include "priv.h"
+#include "regsnv04.h"
 
-#include <core/device.h>
-
-static u64
-nv04_timer_read(struct nvkm_timer *ptimer)
+void
+nv04_timer_time(struct nvkm_timer *tmr, u64 time)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 hi = upper_32_bits(time);
+	u32 lo = lower_32_bits(time);
+
+	nvkm_debug(subdev, "time low        : %08x\n", lo);
+	nvkm_debug(subdev, "time high       : %08x\n", hi);
+
+	nvkm_wr32(device, NV04_PTIMER_TIME_1, hi);
+	nvkm_wr32(device, NV04_PTIMER_TIME_0, lo);
+}
+
+u64
+nv04_timer_read(struct nvkm_timer *tmr)
+{
+	struct nvkm_device *device = tmr->subdev.device;
 	u32 hi, lo;
 
 	do {
-		hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
-		lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
-	} while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
+		hi = nvkm_rd32(device, NV04_PTIMER_TIME_1);
+		lo = nvkm_rd32(device, NV04_PTIMER_TIME_0);
+	} while (hi != nvkm_rd32(device, NV04_PTIMER_TIME_1));
 
 	return ((u64)hi << 32 | lo);
 }
 
-static void
-nv04_timer_alarm_trigger(struct nvkm_timer *ptimer)
+void
+nv04_timer_alarm_fini(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	struct nvkm_alarm *alarm, *atemp;
-	unsigned long flags;
-	LIST_HEAD(exec);
-
-	/* move any due alarms off the pending list */
-	spin_lock_irqsave(&priv->lock, flags);
-	list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
-		if (alarm->timestamp <= ptimer->read(ptimer))
-			list_move_tail(&alarm->head, &exec);
-	}
-
-	/* reschedule interrupt for next alarm time */
-	if (!list_empty(&priv->alarms)) {
-		alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
-		nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
-		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
-	} else {
-		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* execute any pending alarm handlers */
-	list_for_each_entry_safe(alarm, atemp, &exec, head) {
-		list_del_init(&alarm->head);
-		alarm->func(alarm);
-	}
+	struct nvkm_device *device = tmr->subdev.device;
+	nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000000);
 }
 
-static void
-nv04_timer_alarm(struct nvkm_timer *ptimer, u64 time, struct nvkm_alarm *alarm)
+void
+nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	struct nvkm_alarm *list;
-	unsigned long flags;
-
-	alarm->timestamp = ptimer->read(ptimer) + time;
-
-	/* append new alarm to list, in soonest-alarm-first order */
-	spin_lock_irqsave(&priv->lock, flags);
-	if (!time) {
-		if (!list_empty(&alarm->head))
-			list_del(&alarm->head);
-	} else {
-		list_for_each_entry(list, &priv->alarms, head) {
-			if (list->timestamp > alarm->timestamp)
-				break;
-		}
-		list_add_tail(&alarm->head, &list->head);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	/* process pending alarms */
-	nv04_timer_alarm_trigger(ptimer);
+	struct nvkm_device *device = tmr->subdev.device;
+	nvkm_wr32(device, NV04_PTIMER_ALARM_0, time);
+	nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001);
 }
 
-static void
-nv04_timer_alarm_cancel(struct nvkm_timer *ptimer, struct nvkm_alarm *alarm)
+void
+nv04_timer_intr(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)ptimer;
-	unsigned long flags;
-	spin_lock_irqsave(&priv->lock, flags);
-	list_del_init(&alarm->head);
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv04_timer_intr(struct nvkm_subdev *subdev)
-{
-	struct nv04_timer_priv *priv = (void *)subdev;
-	u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
 	if (stat & 0x00000001) {
-		nv04_timer_alarm_trigger(&priv->base);
-		nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
+		nvkm_timer_alarm_trigger(tmr);
+		nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
 		stat &= ~0x00000001;
 	}
 
 	if (stat) {
-		nv_error(priv, "unknown stat 0x%08x\n", stat);
-		nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
+		nvkm_error(subdev, "intr %08x\n", stat);
+		nvkm_wr32(device, NV04_PTIMER_INTR_0, stat);
 	}
 }
 
-int
-nv04_timer_fini(struct nvkm_object *object, bool suspend)
+static void
+nv04_timer_init(struct nvkm_timer *tmr)
 {
-	struct nv04_timer_priv *priv = (void *)object;
-	if (suspend)
-		priv->suspend_time = nv04_timer_read(&priv->base);
-	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	return nvkm_timer_fini(&priv->base, suspend);
-}
-
-static int
-nv04_timer_init(struct nvkm_object *object)
-{
-	struct nvkm_device *device = nv_device(object);
-	struct nv04_timer_priv *priv = (void *)object;
-	u32 m = 1, f, n, d, lo, hi;
-	int ret;
-
-	ret = nvkm_timer_init(&priv->base);
-	if (ret)
-		return ret;
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = 0; /*XXX: nvclk */
+	u32 n, d;
 
 	/* aim for 31.25MHz, which gives us nanosecond timestamps */
 	d = 1000000 / 32;
+	n = f;
 
-	/* determine base clock for timer source */
-#if 0 /*XXX*/
-	if (device->chipset < 0x40) {
-		n = nvkm_hw_get_clock(device, PLL_CORE);
-	} else
-#endif
-	if (device->chipset <= 0x40) {
-		/*XXX: figure this out */
-		f = -1;
-		n = 0;
-	} else {
-		f = device->crystal;
-		n = f;
-		while (n < (d * 2)) {
-			n += (n / m);
-			m++;
+	if (!f) {
+		n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+		d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+		if (!n || !d) {
+			n = 1;
+			d = 1;
 		}
-
-		nv_wr32(priv, 0x009220, m - 1);
-	}
-
-	if (!n) {
-		nv_warn(priv, "unknown input clock freq\n");
-		if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
-		    !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
-			nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
-			nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
-		}
-		return 0;
+		nvkm_warn(subdev, "unknown input clock freq\n");
 	}
 
 	/* reduce ratio to acceptable values */
@@ -198,65 +125,27 @@
 		d >>= 1;
 	}
 
-	/* restore the time before suspend */
-	lo = priv->suspend_time;
-	hi = (priv->suspend_time >> 32);
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
 
-	nv_debug(priv, "input frequency : %dHz\n", f);
-	nv_debug(priv, "input multiplier: %d\n", m);
-	nv_debug(priv, "numerator       : 0x%08x\n", n);
-	nv_debug(priv, "denominator     : 0x%08x\n", d);
-	nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
-	nv_debug(priv, "time low        : 0x%08x\n", lo);
-	nv_debug(priv, "time high       : 0x%08x\n", hi);
-
-	nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
-	nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
-	nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
-	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
-	nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
-	nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
-	return 0;
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
 }
 
-void
-nv04_timer_dtor(struct nvkm_object *object)
-{
-	struct nv04_timer_priv *priv = (void *)object;
-	return nvkm_timer_destroy(&priv->base);
-}
+static const struct nvkm_timer_func
+nv04_timer = {
+	.init = nv04_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
 
 int
-nv04_timer_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+nv04_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
 {
-	struct nv04_timer_priv *priv;
-	int ret;
-
-	ret = nvkm_timer_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
-
-	priv->base.base.intr = nv04_timer_intr;
-	priv->base.read = nv04_timer_read;
-	priv->base.alarm = nv04_timer_alarm;
-	priv->base.alarm_cancel = nv04_timer_alarm_cancel;
-	priv->suspend_time = 0;
-
-	INIT_LIST_HEAD(&priv->alarms);
-	spin_lock_init(&priv->lock);
-	return 0;
+	return nvkm_timer_new_(&nv04_timer, device, index, ptmr);
 }
-
-struct nvkm_oclass
-nv04_timer_oclass = {
-	.handle = NV_SUBDEV(TIMER, 0x04),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv04_timer_ctor,
-		.dtor = nv04_timer_dtor,
-		.init = nv04_timer_init,
-		.fini = nv04_timer_fini,
-	}
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
deleted file mode 100644
index 89996a9..0000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __NVKM_TIMER_NV04_H__
-#define __NVKM_TIMER_NV04_H__
-#include "priv.h"
-
-#define NV04_PTIMER_INTR_0      0x009100
-#define NV04_PTIMER_INTR_EN_0   0x009140
-#define NV04_PTIMER_NUMERATOR   0x009200
-#define NV04_PTIMER_DENOMINATOR 0x009210
-#define NV04_PTIMER_TIME_0      0x009400
-#define NV04_PTIMER_TIME_1      0x009410
-#define NV04_PTIMER_ALARM_0     0x009420
-
-struct nv04_timer_priv {
-	struct nvkm_timer base;
-	struct list_head alarms;
-	spinlock_t lock;
-	u64 suspend_time;
-};
-
-int  nv04_timer_ctor(struct nvkm_object *, struct nvkm_object *,
-		     struct nvkm_oclass *, void *, u32,
-		     struct nvkm_object **);
-void nv04_timer_dtor(struct nvkm_object *);
-int  nv04_timer_fini(struct nvkm_object *, bool);
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
new file mode 100644
index 0000000..bb99a15
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv40.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv40_timer_init(struct nvkm_timer *tmr)
+{
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = 0; /*XXX: figure this out */
+	u32 n, d;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+	n = f;
+
+	if (!f) {
+		n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR);
+		d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR);
+		if (!n || !d) {
+			n = 1;
+			d = 1;
+		}
+		nvkm_warn(subdev, "unknown input clock freq\n");
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n);
+
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv40_timer = {
+	.init = nv40_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv40_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&nv40_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
new file mode 100644
index 0000000..3cf9ec1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv41.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "regsnv04.h"
+
+static void
+nv41_timer_init(struct nvkm_timer *tmr)
+{
+	struct nvkm_subdev *subdev = &tmr->subdev;
+	struct nvkm_device *device = subdev->device;
+	u32 f = device->crystal;
+	u32 m = 1, n, d;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+	n = f;
+
+	while (n < (d * 2)) {
+		n += (n / m);
+		m++;
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nvkm_debug(subdev, "input frequency : %dHz\n", f);
+	nvkm_debug(subdev, "input multiplier: %d\n", m);
+	nvkm_debug(subdev, "numerator       : %08x\n", n);
+	nvkm_debug(subdev, "denominator     : %08x\n", d);
+	nvkm_debug(subdev, "timer frequency : %dHz\n", (f * m) * d / n);
+
+	nvkm_wr32(device, 0x009220, m - 1);
+	nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n);
+	nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d);
+}
+
+static const struct nvkm_timer_func
+nv41_timer = {
+	.init = nv41_timer_init,
+	.intr = nv04_timer_intr,
+	.read = nv04_timer_read,
+	.time = nv04_timer_time,
+	.alarm_init = nv04_timer_alarm_init,
+	.alarm_fini = nv04_timer_alarm_fini,
+};
+
+int
+nv41_timer_new(struct nvkm_device *device, int index, struct nvkm_timer **ptmr)
+{
+	return nvkm_timer_new_(&nv41_timer, device, index, ptmr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index 08e29a3..f820ca2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,4 +1,26 @@
 #ifndef __NVKM_TIMER_PRIV_H__
 #define __NVKM_TIMER_PRIV_H__
+#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
 #include <subdev/timer.h>
+
+int nvkm_timer_new_(const struct nvkm_timer_func *, struct nvkm_device *,
+		    int index, struct nvkm_timer **);
+
+struct nvkm_timer_func {
+	void (*init)(struct nvkm_timer *);
+	void (*intr)(struct nvkm_timer *);
+	u64 (*read)(struct nvkm_timer *);
+	void (*time)(struct nvkm_timer *, u64 time);
+	void (*alarm_init)(struct nvkm_timer *, u32 time);
+	void (*alarm_fini)(struct nvkm_timer *);
+};
+
+void nvkm_timer_alarm_trigger(struct nvkm_timer *);
+
+void nv04_timer_fini(struct nvkm_timer *);
+void nv04_timer_intr(struct nvkm_timer *);
+void nv04_timer_time(struct nvkm_timer *, u64);
+u64 nv04_timer_read(struct nvkm_timer *);
+void nv04_timer_alarm_init(struct nvkm_timer *, u32);
+void nv04_timer_alarm_fini(struct nvkm_timer *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
new file mode 100644
index 0000000..10bef85
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -0,0 +1,7 @@
+#define NV04_PTIMER_INTR_0      0x009100
+#define NV04_PTIMER_INTR_EN_0   0x009140
+#define NV04_PTIMER_NUMERATOR   0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0      0x009400
+#define NV04_PTIMER_TIME_1      0x009410
+#define NV04_PTIMER_ALARM_0     0x009420
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 39f1580..4752dbd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -21,49 +21,45 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/volt.h>
+#include "priv.h"
+
 #include <subdev/bios.h>
 #include <subdev/bios/vmap.h>
 #include <subdev/bios/volt.h>
 
-static int
+int
 nvkm_volt_get(struct nvkm_volt *volt)
 {
-	if (volt->vid_get) {
-		int ret = volt->vid_get(volt), i;
-		if (ret >= 0) {
-			for (i = 0; i < volt->vid_nr; i++) {
-				if (volt->vid[i].vid == ret)
-					return volt->vid[i].uv;
-			}
-			ret = -EINVAL;
+	int ret = volt->func->vid_get(volt), i;
+	if (ret >= 0) {
+		for (i = 0; i < volt->vid_nr; i++) {
+			if (volt->vid[i].vid == ret)
+				return volt->vid[i].uv;
 		}
-		return ret;
+		ret = -EINVAL;
 	}
-	return -ENODEV;
+	return ret;
 }
 
 static int
 nvkm_volt_set(struct nvkm_volt *volt, u32 uv)
 {
-	if (volt->vid_set) {
-		int i, ret = -EINVAL;
-		for (i = 0; i < volt->vid_nr; i++) {
-			if (volt->vid[i].uv == uv) {
-				ret = volt->vid_set(volt, volt->vid[i].vid);
-				nv_debug(volt, "set %duv: %d\n", uv, ret);
-				break;
-			}
+	struct nvkm_subdev *subdev = &volt->subdev;
+	int i, ret = -EINVAL;
+	for (i = 0; i < volt->vid_nr; i++) {
+		if (volt->vid[i].uv == uv) {
+			ret = volt->func->vid_set(volt, volt->vid[i].vid);
+			nvkm_debug(subdev, "set %duv: %d\n", uv, ret);
+			break;
 		}
-		return ret;
 	}
-	return -ENODEV;
+	return ret;
 }
 
 static int
 nvkm_volt_map(struct nvkm_volt *volt, u8 id)
 {
-	struct nvkm_bios *bios = nvkm_bios(volt);
+	struct nvkm_bios *bios = volt->subdev.device->bios;
 	struct nvbios_vmap_entry info;
 	u8  ver, len;
 	u16 vmap;
@@ -82,10 +78,15 @@
 	return id ? id * 10000 : -ENODEV;
 }
 
-static int
+int
 nvkm_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
 {
-	int ret = nvkm_volt_map(volt, id);
+	int ret;
+
+	if (volt->func->set_id)
+		return volt->func->set_id(volt, id, condition);
+
+	ret = nvkm_volt_map(volt, id);
 	if (ret >= 0) {
 		int prev = nvkm_volt_get(volt);
 		if (!condition || prev < 0 ||
@@ -134,51 +135,41 @@
 	}
 }
 
-int
-_nvkm_volt_init(struct nvkm_object *object)
+static int
+nvkm_volt_init(struct nvkm_subdev *subdev)
 {
-	struct nvkm_volt *volt = (void *)object;
-	int ret;
-
-	ret = nvkm_subdev_init(&volt->base);
-	if (ret)
-		return ret;
-
-	ret = volt->get(volt);
+	struct nvkm_volt *volt = nvkm_volt(subdev);
+	int ret = nvkm_volt_get(volt);
 	if (ret < 0) {
 		if (ret != -ENODEV)
-			nv_debug(volt, "current voltage unknown\n");
+			nvkm_debug(subdev, "current voltage unknown\n");
 		return 0;
 	}
-
-	nv_info(volt, "GPU voltage: %duv\n", ret);
+	nvkm_debug(subdev, "current voltage: %duv\n", ret);
 	return 0;
 }
 
-void
-_nvkm_volt_dtor(struct nvkm_object *object)
+static void *
+nvkm_volt_dtor(struct nvkm_subdev *subdev)
 {
-	struct nvkm_volt *volt = (void *)object;
-	nvkm_subdev_destroy(&volt->base);
+	return nvkm_volt(subdev);
 }
 
-int
-nvkm_volt_create_(struct nvkm_object *parent, struct nvkm_object *engine,
-		  struct nvkm_oclass *oclass, int length, void **pobject)
+static const struct nvkm_subdev_func
+nvkm_volt = {
+	.dtor = nvkm_volt_dtor,
+	.init = nvkm_volt_init,
+};
+
+void
+nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_volt *volt)
 {
-	struct nvkm_bios *bios = nvkm_bios(parent);
-	struct nvkm_volt *volt;
-	int ret, i;
+	struct nvkm_bios *bios = device->bios;
+	int i;
 
-	ret = nvkm_subdev_create_(parent, engine, oclass, 0, "VOLT",
-				  "voltage", length, pobject);
-	volt = *pobject;
-	if (ret)
-		return ret;
-
-	volt->get = nvkm_volt_get;
-	volt->set = nvkm_volt_set;
-	volt->set_id = nvkm_volt_set_id;
+	nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
+	volt->func = func;
 
 	/* Assuming the non-bios device should build the voltage table later */
 	if (bios)
@@ -186,19 +177,18 @@
 
 	if (volt->vid_nr) {
 		for (i = 0; i < volt->vid_nr; i++) {
-			nv_debug(volt, "VID %02x: %duv\n",
-				 volt->vid[i].vid, volt->vid[i].uv);
-		}
-
-		/*XXX: this is an assumption.. there probably exists boards
-		 * out there with i2c-connected voltage controllers too..
-		 */
-		ret = nvkm_voltgpio_init(volt);
-		if (ret == 0) {
-			volt->vid_get = nvkm_voltgpio_get;
-			volt->vid_set = nvkm_voltgpio_set;
+			nvkm_debug(&volt->subdev, "VID %02x: %duv\n",
+				   volt->vid[i].vid, volt->vid[i].uv);
 		}
 	}
+}
 
-	return ret;
+int
+nvkm_volt_new_(const struct nvkm_volt_func *func, struct nvkm_device *device,
+	       int index, struct nvkm_volt **pvolt)
+{
+	if (!(*pvolt = kzalloc(sizeof(**pvolt), GFP_KERNEL)))
+		return -ENOMEM;
+	nvkm_volt_ctor(func, device, index, *pvolt);
+	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index 871fd51..fd56c64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -19,10 +19,10 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <subdev/volt.h>
-#ifdef __KERNEL__
-#include <nouveau_platform.h>
-#endif
+#define gk20a_volt(p) container_of((p), struct gk20a_volt, base)
+#include "priv.h"
+
+#include <core/tegra.h>
 
 struct cvb_coef {
 	int c0;
@@ -33,7 +33,7 @@
 	int c5;
 };
 
-struct gk20a_volt_priv {
+struct gk20a_volt {
 	struct nvkm_volt base;
 	struct regulator *vdd;
 };
@@ -101,43 +101,45 @@
 }
 
 static int
-gk20a_volt_vid_get(struct nvkm_volt *volt)
+gk20a_volt_vid_get(struct nvkm_volt *base)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
+	struct gk20a_volt *volt = gk20a_volt(base);
 	int i, uv;
 
-	uv = regulator_get_voltage(priv->vdd);
+	uv = regulator_get_voltage(volt->vdd);
 
-	for (i = 0; i < volt->vid_nr; i++)
-		if (volt->vid[i].uv >= uv)
+	for (i = 0; i < volt->base.vid_nr; i++)
+		if (volt->base.vid[i].uv >= uv)
 			return i;
 
 	return -EINVAL;
 }
 
 static int
-gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid)
+gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
+	struct gk20a_volt *volt = gk20a_volt(base);
+	struct nvkm_subdev *subdev = &volt->base.subdev;
 
-	nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
-	return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
+	nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv);
+	return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
 }
 
 static int
-gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition)
+gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
 {
-	struct gk20a_volt_priv *priv = (void *)volt;
-	int prev_uv = regulator_get_voltage(priv->vdd);
-	int target_uv = volt->vid[id].uv;
+	struct gk20a_volt *volt = gk20a_volt(base);
+	struct nvkm_subdev *subdev = &volt->base.subdev;
+	int prev_uv = regulator_get_voltage(volt->vdd);
+	int target_uv = volt->base.vid[id].uv;
 	int ret;
 
-	nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
-			prev_uv, target_uv, condition);
+	nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n",
+		   prev_uv, target_uv, condition);
 	if (!condition ||
 		(condition < 0 && target_uv < prev_uv) ||
 		(condition > 0 && target_uv > prev_uv)) {
-		ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
+		ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid);
 	} else {
 		ret = 0;
 	}
@@ -145,53 +147,42 @@
 	return ret;
 }
 
-static int
-gk20a_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-		struct nvkm_oclass *oclass, void *data, u32 size,
-		struct nvkm_object **pobject)
+static const struct nvkm_volt_func
+gk20a_volt = {
+	.vid_get = gk20a_volt_vid_get,
+	.vid_set = gk20a_volt_vid_set,
+	.set_id = gk20a_volt_set_id,
+};
+
+int
+gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
-	struct gk20a_volt_priv *priv;
-	struct nvkm_volt *volt;
-	struct nouveau_platform_device *plat;
-	int i, ret, uv;
+	struct nvkm_device_tegra *tdev = device->func->tegra(device);
+	struct gk20a_volt *volt;
+	int i, uv;
 
-	ret = nvkm_volt_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
-	if (ret)
-		return ret;
+	if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL)))
+		return -ENOMEM;
 
-	volt = &priv->base;
+	nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base);
+	*pvolt = &volt->base;
 
-	plat = nv_device_to_platform(nv_device(parent));
+	uv = regulator_get_voltage(tdev->vdd);
+	nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv);
 
-	uv = regulator_get_voltage(plat->gpu->vdd);
-	nv_info(priv, "The default voltage is %duV\n", uv);
+	volt->vdd = tdev->vdd;
 
-	priv->vdd = plat->gpu->vdd;
-	priv->base.vid_get = gk20a_volt_vid_get;
-	priv->base.vid_set = gk20a_volt_vid_set;
-	priv->base.set_id = gk20a_volt_set_id;
-
-	volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
-	nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
-	for (i = 0; i < volt->vid_nr; i++) {
-		volt->vid[i].vid = i;
-		volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
-					plat->gpu_speedo);
-		nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
-					volt->vid[i].uv);
+	volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
+	nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__,
+		   volt->base.vid_nr);
+	for (i = 0; i < volt->base.vid_nr; i++) {
+		volt->base.vid[i].vid = i;
+		volt->base.vid[i].uv =
+			gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
+						tdev->gpu_speedo);
+		nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
+			   volt->base.vid[i].vid, volt->base.vid[i].uv);
 	}
 
 	return 0;
 }
-
-struct nvkm_oclass
-gk20a_volt_oclass = {
-	.handle = NV_SUBDEV(VOLT, 0xea),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = gk20a_volt_ctor,
-		.dtor = _nvkm_volt_dtor,
-		.init = _nvkm_volt_init,
-		.fini = _nvkm_volt_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
index b778deb..d2bac1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gpio.c
@@ -34,13 +34,13 @@
 int
 nvkm_voltgpio_get(struct nvkm_volt *volt)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_gpio *gpio = volt->subdev.device->gpio;
 	u8 vid = 0;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tags); i++) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->get(gpio, 0, tags[i], 0xff);
+			int ret = nvkm_gpio_get(gpio, 0, tags[i], 0xff);
 			if (ret < 0)
 				return ret;
 			vid |= ret << i;
@@ -53,12 +53,12 @@
 int
 nvkm_voltgpio_set(struct nvkm_volt *volt, u8 vid)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_gpio *gpio = volt->subdev.device->gpio;
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tags); i++, vid >>= 1) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->set(gpio, 0, tags[i], 0xff, vid & 1);
+			int ret = nvkm_gpio_set(gpio, 0, tags[i], 0xff, vid & 1);
 			if (ret < 0)
 				return ret;
 		}
@@ -70,7 +70,8 @@
 int
 nvkm_voltgpio_init(struct nvkm_volt *volt)
 {
-	struct nvkm_gpio *gpio = nvkm_gpio(volt);
+	struct nvkm_subdev *subdev = &volt->subdev;
+	struct nvkm_gpio *gpio = subdev->device->gpio;
 	struct dcb_gpio_func func;
 	int i;
 
@@ -82,11 +83,11 @@
 	 */
 	for (i = 0; i < ARRAY_SIZE(tags); i++) {
 		if (volt->vid_mask & (1 << i)) {
-			int ret = gpio->find(gpio, 0, tags[i], 0xff, &func);
+			int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func);
 			if (ret) {
 				if (ret != -ENOENT)
 					return ret;
-				nv_debug(volt, "VID bit %d has no GPIO\n", i);
+				nvkm_debug(subdev, "VID bit %d has no GPIO\n", i);
 				volt->vid_mask &= ~(1 << i);
 			}
 		}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
index 0ac5a3f..23409387 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/nv40.c
@@ -21,35 +21,24 @@
  *
  * Authors: Ben Skeggs
  */
-#include <subdev/volt.h>
+#include "priv.h"
 
-struct nv40_volt_priv {
-	struct nvkm_volt base;
+static const struct nvkm_volt_func
+nv40_volt = {
+	.vid_get = nvkm_voltgpio_get,
+	.vid_set = nvkm_voltgpio_set,
 };
 
-static int
-nv40_volt_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
-	       struct nvkm_oclass *oclass, void *data, u32 size,
-	       struct nvkm_object **pobject)
+int
+nv40_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
-	struct nv40_volt_priv *priv;
+	struct nvkm_volt *volt;
 	int ret;
 
-	ret = nvkm_volt_create(parent, engine, oclass, &priv);
-	*pobject = nv_object(priv);
+	ret = nvkm_volt_new_(&nv40_volt, device, index, &volt);
+	*pvolt = volt;
 	if (ret)
 		return ret;
 
-	return 0;
+	return nvkm_voltgpio_init(volt);
 }
-
-struct nvkm_oclass
-nv40_volt_oclass = {
-	.handle = NV_SUBDEV(VOLT, 0x40),
-	.ofuncs = &(struct nvkm_ofuncs) {
-		.ctor = nv40_volt_ctor,
-		.dtor = _nvkm_volt_dtor,
-		.init = _nvkm_volt_init,
-		.fini = _nvkm_volt_fini,
-	},
-};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
new file mode 100644
index 0000000..394f37c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -0,0 +1,20 @@
+#ifndef __NVKM_VOLT_PRIV_H__
+#define __NVKM_VOLT_PRIV_H__
+#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
+#include <subdev/volt.h>
+
+void nvkm_volt_ctor(const struct nvkm_volt_func *, struct nvkm_device *,
+		    int index, struct nvkm_volt *);
+int nvkm_volt_new_(const struct nvkm_volt_func *, struct nvkm_device *,
+		   int index, struct nvkm_volt **);
+
+struct nvkm_volt_func {
+	int (*vid_get)(struct nvkm_volt *);
+	int (*vid_set)(struct nvkm_volt *, u8 vid);
+	int (*set_id)(struct nvkm_volt *, u8 id, int condition);
+};
+
+int nvkm_voltgpio_init(struct nvkm_volt *);
+int nvkm_voltgpio_get(struct nvkm_volt *);
+int nvkm_voltgpio_set(struct nvkm_volt *, u8);
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 23d9c92..9a4ba4f 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -388,11 +388,13 @@
 	copy_timings_drm_to_omap(&omap_crtc->timings, mode);
 }
 
-static void omap_crtc_atomic_begin(struct drm_crtc *crtc)
+static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
 }
 
-static void omap_crtc_atomic_flush(struct drm_crtc *crtc)
+static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
 {
 	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 720d16b..b8e4cde 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -86,11 +86,11 @@
 	/* Note: to properly handle manual update displays, we wrap the
 	 * basic fbdev ops which write to the framebuffer
 	 */
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_read = drm_fb_helper_sys_read,
+	.fb_write = drm_fb_helper_sys_write,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
@@ -179,10 +179,10 @@
 
 	mutex_lock(&dev->struct_mutex);
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
 		dev_err(dev->dev, "failed to allocate fb info\n");
-		ret = -ENOMEM;
+		ret = PTR_ERR(fbi);
 		goto fail_unlock;
 	}
 
@@ -190,7 +190,6 @@
 
 	fbdev->fb = fb;
 	helper->fb = fb;
-	helper->fbdev = fbi;
 
 	fbi->par = helper;
 	fbi->flags = FBINFO_DEFAULT;
@@ -198,12 +197,6 @@
 
 	strcpy(fbi->fix.id, MODULE_NAME);
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto fail_unlock;
-	}
-
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
 
@@ -236,8 +229,9 @@
 fail:
 
 	if (ret) {
-		if (fbi)
-			framebuffer_release(fbi);
+
+		drm_fb_helper_release_fbi(helper);
+
 		if (fb) {
 			drm_framebuffer_unregister_private(fb);
 			drm_framebuffer_remove(fb);
@@ -312,17 +306,11 @@
 	struct omap_drm_private *priv = dev->dev_private;
 	struct drm_fb_helper *helper = priv->fbdev;
 	struct omap_fbdev *fbdev;
-	struct fb_info *fbi;
 
 	DBG();
 
-	fbi = helper->fbdev;
-
-	/* only cleanup framebuffer if it is present */
-	if (fbi) {
-		unregister_framebuffer(fbi);
-		framebuffer_release(fbi);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	drm_fb_helper_fini(helper);
 
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6d64c7b..7d4704b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,13 +18,21 @@
 	  that it can be automatically turned off when the panel goes into a
 	  low power state.
 
-config DRM_PANEL_LD9040
-	tristate "LD9040 RGB/SPI panel"
+config DRM_PANEL_SAMSUNG_LD9040
+	tristate "Samsung LD9040 RGB/SPI panel"
 	depends on OF && SPI
 	select VIDEOMODE_HELPERS
 
-config DRM_PANEL_S6E8AA0
-	tristate "S6E8AA0 DSI video mode panel"
+config DRM_PANEL_LG_LG4573
+	tristate "LG4573 RGB/SPI panel"
+	depends on OF && SPI
+	select VIDEOMODE_HELPERS
+	help
+	  Say Y here if you want to enable support for LG4573 RGB panel.
+	  To compile this driver as a module, choose M here.
+
+config DRM_PANEL_SAMSUNG_S6E8AA0
+	tristate "Samsung S6E8AA0 DSI video mode panel"
 	depends on OF
 	select DRM_MIPI_DSI
 	select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 4b2a043..d0f016d 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,4 +1,5 @@
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
-obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
-obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
 obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
deleted file mode 100644
index 9c27bde..0000000
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * ld9040 AMOLED LCD drm_panel driver.
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd
- * Derived from drivers/video/backlight/ld9040.c
- *
- * Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
-
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
-
-#include <video/mipi_display.h>
-#include <video/of_videomode.h>
-#include <video/videomode.h>
-
-/* Manufacturer Command Set */
-#define MCS_MANPWR		0xb0
-#define MCS_ELVSS_ON		0xb1
-#define MCS_USER_SETTING	0xf0
-#define MCS_DISPCTL		0xf2
-#define MCS_POWER_CTRL		0xf4
-#define MCS_GTCON		0xf7
-#define MCS_PANEL_CONDITION	0xf8
-#define MCS_GAMMA_SET1		0xf9
-#define MCS_GAMMA_CTRL		0xfb
-
-/* array of gamma tables for gamma value 2.2 */
-static u8 const ld9040_gammas[25][22] = {
-	{ 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
-	  0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
-	{ 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
-	  0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
-	{ 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
-	  0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
-	{ 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
-	  0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
-	{ 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
-	  0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
-	{ 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
-	  0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
-	{ 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
-	  0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
-	{ 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
-	  0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
-	{ 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
-	  0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
-	{ 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
-	  0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
-	{ 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
-	  0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
-	{ 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
-	  0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
-	{ 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
-	  0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
-	{ 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
-	  0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
-	{ 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
-	  0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
-	{ 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
-	  0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
-	{ 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
-	  0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
-	{ 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
-	  0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
-	{ 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
-	  0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
-	{ 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
-	  0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
-	{ 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
-	  0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
-	{ 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
-	  0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
-	{ 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
-	  0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
-	{ 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
-	  0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
-	{ 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
-	  0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
-};
-
-struct ld9040 {
-	struct device *dev;
-	struct drm_panel panel;
-
-	struct regulator_bulk_data supplies[2];
-	struct gpio_desc *reset_gpio;
-	u32 power_on_delay;
-	u32 reset_delay;
-	struct videomode vm;
-	u32 width_mm;
-	u32 height_mm;
-
-	int brightness;
-
-	/* This field is tested by functions directly accessing bus before
-	 * transfer, transfer is skipped if it is set. In case of transfer
-	 * failure or unexpected response the field is set to error value.
-	 * Such construct allows to eliminate many checks in higher level
-	 * functions.
-	 */
-	int error;
-};
-
-static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
-{
-	return container_of(panel, struct ld9040, panel);
-}
-
-static int ld9040_clear_error(struct ld9040 *ctx)
-{
-	int ret = ctx->error;
-
-	ctx->error = 0;
-	return ret;
-}
-
-static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
-{
-	struct spi_device *spi = to_spi_device(ctx->dev);
-	struct spi_transfer xfer = {
-		.len		= 2,
-		.tx_buf		= &data,
-	};
-	struct spi_message msg;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	return spi_sync(spi, &msg);
-}
-
-static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
-{
-	int ret = 0;
-
-	if (ctx->error < 0 || len == 0)
-		return;
-
-	dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
-	ret = ld9040_spi_write_word(ctx, *data);
-
-	while (!ret && --len) {
-		++data;
-		ret = ld9040_spi_write_word(ctx, *data | 0x100);
-	}
-
-	if (ret) {
-		dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
-			(int)len, data);
-		ctx->error = ret;
-	}
-
-	usleep_range(300, 310);
-}
-
-#define ld9040_dcs_write_seq_static(ctx, seq...) \
-({\
-	static const u8 d[] = { seq };\
-	ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
-})
-
-static void ld9040_brightness_set(struct ld9040 *ctx)
-{
-	ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
-			 ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
-
-	ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
-}
-
-static void ld9040_init(struct ld9040 *ctx)
-{
-	ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
-	ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
-		0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
-		0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
-		0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
-	ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
-		0x02, 0x08, 0x08, 0x10, 0x10);
-	ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
-	ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
-		0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
-	ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
-	ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
-	ld9040_brightness_set(ctx);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
-}
-
-static int ld9040_power_on(struct ld9040 *ctx)
-{
-	int ret;
-
-	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-	if (ret < 0)
-		return ret;
-
-	msleep(ctx->power_on_delay);
-	gpiod_set_value(ctx->reset_gpio, 0);
-	msleep(ctx->reset_delay);
-	gpiod_set_value(ctx->reset_gpio, 1);
-	msleep(ctx->reset_delay);
-
-	return 0;
-}
-
-static int ld9040_power_off(struct ld9040 *ctx)
-{
-	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static int ld9040_disable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int ld9040_unprepare(struct drm_panel *panel)
-{
-	struct ld9040 *ctx = panel_to_ld9040(panel);
-
-	msleep(120);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
-	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
-	msleep(40);
-
-	ld9040_clear_error(ctx);
-
-	return ld9040_power_off(ctx);
-}
-
-static int ld9040_prepare(struct drm_panel *panel)
-{
-	struct ld9040 *ctx = panel_to_ld9040(panel);
-	int ret;
-
-	ret = ld9040_power_on(ctx);
-	if (ret < 0)
-		return ret;
-
-	ld9040_init(ctx);
-
-	ret = ld9040_clear_error(ctx);
-
-	if (ret < 0)
-		ld9040_unprepare(panel);
-
-	return ret;
-}
-
-static int ld9040_enable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int ld9040_get_modes(struct drm_panel *panel)
-{
-	struct drm_connector *connector = panel->connector;
-	struct ld9040 *ctx = panel_to_ld9040(panel);
-	struct drm_display_mode *mode;
-
-	mode = drm_mode_create(connector->dev);
-	if (!mode) {
-		DRM_ERROR("failed to create a new display mode\n");
-		return 0;
-	}
-
-	drm_display_mode_from_videomode(&ctx->vm, mode);
-	mode->width_mm = ctx->width_mm;
-	mode->height_mm = ctx->height_mm;
-	connector->display_info.width_mm = mode->width_mm;
-	connector->display_info.height_mm = mode->height_mm;
-
-	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(connector, mode);
-
-	return 1;
-}
-
-static const struct drm_panel_funcs ld9040_drm_funcs = {
-	.disable = ld9040_disable,
-	.unprepare = ld9040_unprepare,
-	.prepare = ld9040_prepare,
-	.enable = ld9040_enable,
-	.get_modes = ld9040_get_modes,
-};
-
-static int ld9040_parse_dt(struct ld9040 *ctx)
-{
-	struct device *dev = ctx->dev;
-	struct device_node *np = dev->of_node;
-	int ret;
-
-	ret = of_get_videomode(np, &ctx->vm, 0);
-	if (ret < 0)
-		return ret;
-
-	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
-	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
-	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
-	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
-
-	return 0;
-}
-
-static int ld9040_probe(struct spi_device *spi)
-{
-	struct device *dev = &spi->dev;
-	struct ld9040 *ctx;
-	int ret;
-
-	ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	spi_set_drvdata(spi, ctx);
-
-	ctx->dev = dev;
-	ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
-
-	ret = ld9040_parse_dt(ctx);
-	if (ret < 0)
-		return ret;
-
-	ctx->supplies[0].supply = "vdd3";
-	ctx->supplies[1].supply = "vci";
-	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
-				      ctx->supplies);
-	if (ret < 0)
-		return ret;
-
-	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
-	if (IS_ERR(ctx->reset_gpio)) {
-		dev_err(dev, "cannot get reset-gpios %ld\n",
-			PTR_ERR(ctx->reset_gpio));
-		return PTR_ERR(ctx->reset_gpio);
-	}
-
-	spi->bits_per_word = 9;
-	ret = spi_setup(spi);
-	if (ret < 0) {
-		dev_err(dev, "spi setup failed.\n");
-		return ret;
-	}
-
-	drm_panel_init(&ctx->panel);
-	ctx->panel.dev = dev;
-	ctx->panel.funcs = &ld9040_drm_funcs;
-
-	return drm_panel_add(&ctx->panel);
-}
-
-static int ld9040_remove(struct spi_device *spi)
-{
-	struct ld9040 *ctx = spi_get_drvdata(spi);
-
-	ld9040_power_off(ctx);
-	drm_panel_remove(&ctx->panel);
-
-	return 0;
-}
-
-static const struct of_device_id ld9040_of_match[] = {
-	{ .compatible = "samsung,ld9040" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, ld9040_of_match);
-
-static struct spi_driver ld9040_driver = {
-	.probe = ld9040_probe,
-	.remove = ld9040_remove,
-	.driver = {
-		.name = "ld9040",
-		.owner = THIS_MODULE,
-		.of_match_table = ld9040_of_match,
-	},
-};
-module_spi_driver(ld9040_driver);
-
-MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
-MODULE_DESCRIPTION("ld9040 LCD Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
new file mode 100644
index 0000000..a7b4939
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Heiko Schocher <hs@denx.de>
+ *
+ * from:
+ * drivers/gpu/drm/panel/panel-ld9040.c
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+struct lg4573 {
+	struct drm_panel panel;
+	struct spi_device *spi;
+	struct videomode vm;
+};
+
+static inline struct lg4573 *panel_to_lg4573(struct drm_panel *panel)
+{
+	return container_of(panel, struct lg4573, panel);
+}
+
+static int lg4573_spi_write_u16(struct lg4573 *ctx, u16 data)
+{
+	struct spi_transfer xfer = {
+		.len = 2,
+	};
+	u16 temp = cpu_to_be16(data);
+	struct spi_message msg;
+
+	dev_dbg(ctx->panel.dev, "writing data: %x\n", data);
+	xfer.tx_buf = &temp;
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+
+	return spi_sync(ctx->spi, &msg);
+}
+
+static int lg4573_spi_write_u16_array(struct lg4573 *ctx, const u16 *buffer,
+				      unsigned int count)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < count; i++) {
+		ret = lg4573_spi_write_u16(ctx, buffer[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int lg4573_spi_write_dcs(struct lg4573 *ctx, u8 dcs)
+{
+	return lg4573_spi_write_u16(ctx, (0x70 << 8 | dcs));
+}
+
+static int lg4573_display_on(struct lg4573 *ctx)
+{
+	int ret;
+
+	ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	if (ret)
+		return ret;
+
+	msleep(5);
+
+	return lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int lg4573_display_off(struct lg4573 *ctx)
+{
+	int ret;
+
+	ret = lg4573_spi_write_dcs(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	if (ret)
+		return ret;
+
+	msleep(120);
+
+	return lg4573_spi_write_dcs(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+}
+
+static int lg4573_display_mode_settings(struct lg4573 *ctx)
+{
+	static const u16 display_mode_settings[] = {
+		0x703A, 0x7270, 0x70B1, 0x7208,
+		0x723B, 0x720F, 0x70B2, 0x7200,
+		0x72C8, 0x70B3, 0x7200, 0x70B4,
+		0x7200, 0x70B5, 0x7242, 0x7210,
+		0x7210, 0x7200, 0x7220, 0x70B6,
+		0x720B, 0x720F, 0x723C, 0x7213,
+		0x7213, 0x72E8, 0x70B7, 0x7246,
+		0x7206, 0x720C, 0x7200, 0x7200,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer display mode settings\n");
+	return lg4573_spi_write_u16_array(ctx, display_mode_settings,
+					  ARRAY_SIZE(display_mode_settings));
+}
+
+static int lg4573_power_settings(struct lg4573 *ctx)
+{
+	static const u16 power_settings[] = {
+		0x70C0, 0x7201, 0x7211, 0x70C3,
+		0x7207, 0x7203, 0x7204, 0x7204,
+		0x7204, 0x70C4, 0x7212, 0x7224,
+		0x7218, 0x7218, 0x7202, 0x7249,
+		0x70C5, 0x726F, 0x70C6, 0x7241,
+		0x7263,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer power settings\n");
+	return lg4573_spi_write_u16_array(ctx, power_settings,
+					  ARRAY_SIZE(power_settings));
+}
+
+static int lg4573_gamma_settings(struct lg4573 *ctx)
+{
+	static const u16 gamma_settings[] = {
+		0x70D0, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D1, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+		0x70D2, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D3, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+		0x70D4, 0x7203, 0x7207, 0x7273,
+		0x7235, 0x7200, 0x7201, 0x7220,
+		0x7200, 0x7203, 0x70D5, 0x7203,
+		0x7207, 0x7273, 0x7235, 0x7200,
+		0x7201, 0x7220, 0x7200, 0x7203,
+	};
+
+	dev_dbg(ctx->panel.dev, "transfer gamma settings\n");
+	return lg4573_spi_write_u16_array(ctx, gamma_settings,
+					  ARRAY_SIZE(gamma_settings));
+}
+
+static int lg4573_init(struct lg4573 *ctx)
+{
+	int ret;
+
+	dev_dbg(ctx->panel.dev, "initializing LCD\n");
+
+	ret = lg4573_display_mode_settings(ctx);
+	if (ret)
+		return ret;
+
+	ret = lg4573_power_settings(ctx);
+	if (ret)
+		return ret;
+
+	return lg4573_gamma_settings(ctx);
+}
+
+static int lg4573_power_on(struct lg4573 *ctx)
+{
+	return lg4573_display_on(ctx);
+}
+
+static int lg4573_disable(struct drm_panel *panel)
+{
+	struct lg4573 *ctx = panel_to_lg4573(panel);
+
+	return lg4573_display_off(ctx);
+}
+
+static int lg4573_enable(struct drm_panel *panel)
+{
+	struct lg4573 *ctx = panel_to_lg4573(panel);
+
+	lg4573_init(ctx);
+
+	return lg4573_power_on(ctx);
+}
+
+static const struct drm_display_mode default_mode = {
+	.clock = 27000,
+	.hdisplay = 480,
+	.hsync_start = 480 + 10,
+	.hsync_end = 480 + 10 + 59,
+	.htotal = 480 + 10 + 59 + 10,
+	.vdisplay = 800,
+	.vsync_start = 800 + 15,
+	.vsync_end = 800 + 15 + 15,
+	.vtotal = 800 + 15 + 15 + 15,
+	.vrefresh = 60,
+};
+
+static int lg4573_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(panel->drm, &default_mode);
+	if (!mode) {
+		dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+			default_mode.hdisplay, default_mode.vdisplay,
+			default_mode.vrefresh);
+		return -ENOMEM;
+	}
+
+	drm_mode_set_name(mode);
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	panel->connector->display_info.width_mm = 61;
+	panel->connector->display_info.height_mm = 103;
+
+	return 1;
+}
+
+static const struct drm_panel_funcs lg4573_drm_funcs = {
+	.disable = lg4573_disable,
+	.enable = lg4573_enable,
+	.get_modes = lg4573_get_modes,
+};
+
+static int lg4573_probe(struct spi_device *spi)
+{
+	struct lg4573 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->spi = spi;
+
+	spi_set_drvdata(spi, ctx);
+	spi->bits_per_word = 8;
+
+	ret = spi_setup(spi);
+	if (ret < 0) {
+		dev_err(&spi->dev, "SPI setup failed: %d\n", ret);
+		return ret;
+	}
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = &spi->dev;
+	ctx->panel.funcs = &lg4573_drm_funcs;
+
+	return drm_panel_add(&ctx->panel);
+}
+
+static int lg4573_remove(struct spi_device *spi)
+{
+	struct lg4573 *ctx = spi_get_drvdata(spi);
+
+	lg4573_display_off(ctx);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id lg4573_of_match[] = {
+	{ .compatible = "lg,lg4573" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lg4573_of_match);
+
+static struct spi_driver lg4573_driver = {
+	.probe = lg4573_probe,
+	.remove = lg4573_remove,
+	.driver = {
+		.name = "lg4573",
+		.owner = THIS_MODULE,
+		.of_match_table = lg4573_of_match,
+	},
+};
+module_spi_driver(lg4573_driver);
+
+MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
+MODULE_DESCRIPTION("lg4573 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
deleted file mode 100644
index 3005110..0000000
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ /dev/null
@@ -1,1067 +0,0 @@
-/*
- * MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
- *
- * Copyright (c) 2013 Samsung Electronics Co., Ltd
- *
- * Inki Dae, <inki.dae@samsung.com>
- * Donghwa Lee, <dh09.lee@samsung.com>
- * Joongmock Shin <jmock.shin@samsung.com>
- * Eunchul Kim <chulspro.kim@samsung.com>
- * Tomasz Figa <t.figa@samsung.com>
- * Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-
-#include <video/mipi_display.h>
-#include <video/of_videomode.h>
-#include <video/videomode.h>
-
-#define LDI_MTP_LENGTH			24
-#define GAMMA_LEVEL_NUM			25
-#define GAMMA_TABLE_LEN			26
-
-#define PANELCTL_SS_MASK		(1 << 5)
-#define PANELCTL_SS_1_800		(0 << 5)
-#define PANELCTL_SS_800_1		(1 << 5)
-#define PANELCTL_GTCON_MASK		(7 << 2)
-#define PANELCTL_GTCON_110		(6 << 2)
-#define PANELCTL_GTCON_111		(7 << 2)
-
-#define PANELCTL_CLK1_CON_MASK		(7 << 3)
-#define PANELCTL_CLK1_000		(0 << 3)
-#define PANELCTL_CLK1_001		(1 << 3)
-#define PANELCTL_CLK2_CON_MASK		(7 << 0)
-#define PANELCTL_CLK2_000		(0 << 0)
-#define PANELCTL_CLK2_001		(1 << 0)
-
-#define PANELCTL_INT1_CON_MASK		(7 << 3)
-#define PANELCTL_INT1_000		(0 << 3)
-#define PANELCTL_INT1_001		(1 << 3)
-#define PANELCTL_INT2_CON_MASK		(7 << 0)
-#define PANELCTL_INT2_000		(0 << 0)
-#define PANELCTL_INT2_001		(1 << 0)
-
-#define PANELCTL_BICTL_CON_MASK		(7 << 3)
-#define PANELCTL_BICTL_000		(0 << 3)
-#define PANELCTL_BICTL_001		(1 << 3)
-#define PANELCTL_BICTLB_CON_MASK	(7 << 0)
-#define PANELCTL_BICTLB_000		(0 << 0)
-#define PANELCTL_BICTLB_001		(1 << 0)
-
-#define PANELCTL_EM_CLK1_CON_MASK	(7 << 3)
-#define PANELCTL_EM_CLK1_110		(6 << 3)
-#define PANELCTL_EM_CLK1_111		(7 << 3)
-#define PANELCTL_EM_CLK1B_CON_MASK	(7 << 0)
-#define PANELCTL_EM_CLK1B_110		(6 << 0)
-#define PANELCTL_EM_CLK1B_111		(7 << 0)
-
-#define PANELCTL_EM_CLK2_CON_MASK	(7 << 3)
-#define PANELCTL_EM_CLK2_110		(6 << 3)
-#define PANELCTL_EM_CLK2_111		(7 << 3)
-#define PANELCTL_EM_CLK2B_CON_MASK	(7 << 0)
-#define PANELCTL_EM_CLK2B_110		(6 << 0)
-#define PANELCTL_EM_CLK2B_111		(7 << 0)
-
-#define PANELCTL_EM_INT1_CON_MASK	(7 << 3)
-#define PANELCTL_EM_INT1_000		(0 << 3)
-#define PANELCTL_EM_INT1_001		(1 << 3)
-#define PANELCTL_EM_INT2_CON_MASK	(7 << 0)
-#define PANELCTL_EM_INT2_000		(0 << 0)
-#define PANELCTL_EM_INT2_001		(1 << 0)
-
-#define AID_DISABLE			(0x4)
-#define AID_1				(0x5)
-#define AID_2				(0x6)
-#define AID_3				(0x7)
-
-typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
-
-struct s6e8aa0_variant {
-	u8 version;
-	const s6e8aa0_gamma_table *gamma_tables;
-};
-
-struct s6e8aa0 {
-	struct device *dev;
-	struct drm_panel panel;
-
-	struct regulator_bulk_data supplies[2];
-	struct gpio_desc *reset_gpio;
-	u32 power_on_delay;
-	u32 reset_delay;
-	u32 init_delay;
-	bool flip_horizontal;
-	bool flip_vertical;
-	struct videomode vm;
-	u32 width_mm;
-	u32 height_mm;
-
-	u8 version;
-	u8 id;
-	const struct s6e8aa0_variant *variant;
-	int brightness;
-
-	/* This field is tested by functions directly accessing DSI bus before
-	 * transfer, transfer is skipped if it is set. In case of transfer
-	 * failure or unexpected response the field is set to error value.
-	 * Such construct allows to eliminate many checks in higher level
-	 * functions.
-	 */
-	int error;
-};
-
-static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
-{
-	return container_of(panel, struct s6e8aa0, panel);
-}
-
-static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
-{
-	int ret = ctx->error;
-
-	ctx->error = 0;
-	return ret;
-}
-
-static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
-{
-	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-	ssize_t ret;
-
-	if (ctx->error < 0)
-		return;
-
-	ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
-	if (ret < 0) {
-		dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
-			(int)len, data);
-		ctx->error = ret;
-	}
-}
-
-static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
-{
-	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-	int ret;
-
-	if (ctx->error < 0)
-		return ctx->error;
-
-	ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
-	if (ret < 0) {
-		dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
-		ctx->error = ret;
-	}
-
-	return ret;
-}
-
-#define s6e8aa0_dcs_write_seq(ctx, seq...) \
-({\
-	const u8 d[] = { seq };\
-	BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
-	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
-})
-
-#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
-({\
-	static const u8 d[] = { seq };\
-	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
-})
-
-static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
-}
-
-static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
-{
-	static const u8 aids[] = {
-		0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
-	};
-	u8 aid = aids[ctx->id >> 5];
-	u8 cfg = 0x3d;
-	u8 clk_con = 0xc8;
-	u8 int_con = 0x08;
-	u8 bictl_con = 0x48;
-	u8 em_clk1_con = 0xff;
-	u8 em_clk2_con = 0xff;
-	u8 em_int_con = 0xc8;
-
-	if (ctx->flip_vertical) {
-		/* GTCON */
-		cfg &= ~(PANELCTL_GTCON_MASK);
-		cfg |= (PANELCTL_GTCON_110);
-	}
-
-	if (ctx->flip_horizontal) {
-		/* SS */
-		cfg &= ~(PANELCTL_SS_MASK);
-		cfg |= (PANELCTL_SS_1_800);
-	}
-
-	if (ctx->flip_horizontal || ctx->flip_vertical) {
-		/* CLK1,2_CON */
-		clk_con &= ~(PANELCTL_CLK1_CON_MASK |
-			PANELCTL_CLK2_CON_MASK);
-		clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
-
-		/* INT1,2_CON */
-		int_con &= ~(PANELCTL_INT1_CON_MASK |
-			PANELCTL_INT2_CON_MASK);
-		int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
-
-		/* BICTL,B_CON */
-		bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
-			PANELCTL_BICTLB_CON_MASK);
-		bictl_con |= (PANELCTL_BICTL_000 |
-			PANELCTL_BICTLB_001);
-
-		/* EM_CLK1,1B_CON */
-		em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
-			PANELCTL_EM_CLK1B_CON_MASK);
-		em_clk1_con |= (PANELCTL_EM_CLK1_110 |
-			PANELCTL_EM_CLK1B_110);
-
-		/* EM_CLK2,2B_CON */
-		em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
-			PANELCTL_EM_CLK2B_CON_MASK);
-		em_clk2_con |= (PANELCTL_EM_CLK2_110 |
-			PANELCTL_EM_CLK2B_110);
-
-		/* EM_INT1,2_CON */
-		em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
-			PANELCTL_EM_INT2_CON_MASK);
-		em_int_con |= (PANELCTL_EM_INT1_000 |
-			PANELCTL_EM_INT2_001);
-	}
-
-	s6e8aa0_dcs_write_seq(ctx,
-		0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
-		0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
-		0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
-		0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
-		bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
-		em_int_con);
-}
-
-static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
-{
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write_seq_static(ctx,
-			0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
-			0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
-			0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
-			0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
-			0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
-		);
-	else
-		s6e8aa0_panel_cond_set_v142(ctx);
-}
-
-static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
-}
-
-static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
-}
-
-static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
-{
-	static const u8 pent32[] = {
-		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
-	};
-
-	static const u8 pent142[] = {
-		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
-	};
-
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
-	else
-		s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
-}
-
-static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
-{
-	static const u8 pwr142[] = {
-		0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
-	};
-
-	static const u8 pwr32[] = {
-		0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
-	};
-
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
-	else
-		s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
-}
-
-static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
-{
-	u8 id = ctx->id ? 0 : 0x95;
-
-	s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
-}
-
-static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
-{
-	u8 br;
-
-	switch (ctx->brightness) {
-	case 0 ... 6: /* 30cd ~ 100cd */
-		br = 0xdf;
-		break;
-	case 7 ... 11: /* 120cd ~ 150cd */
-		br = 0xdd;
-		break;
-	case 12 ... 15: /* 180cd ~ 210cd */
-	default:
-		br = 0xd9;
-		break;
-	case 16 ... 24: /* 240cd ~ 300cd */
-		br = 0xd0;
-		break;
-	}
-
-	s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
-		0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
-}
-
-static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
-{
-	if (ctx->version < 142)
-		s6e8aa0_dcs_write_seq_static(ctx,
-			0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
-			0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
-	else
-		s6e8aa0_elvss_nvm_set_v142(ctx);
-};
-
-static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
-}
-
-static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
-	{
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
-		0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
-		0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
-		0x00, 0x70,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
-		0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
-		0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
-		0x00, 0x7d,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
-		0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
-		0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
-		0x00, 0x8f,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
-		0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
-		0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
-		0x00, 0x9e,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
-		0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
-		0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
-		0x00, 0xa4,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
-		0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
-		0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
-		0x00, 0xaa,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
-		0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
-		0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
-		0x00, 0xaf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
-		0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
-		0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
-		0x00, 0xb9,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
-		0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
-		0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
-		0x00, 0xbf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
-		0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
-		0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
-		0x00, 0xc3,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
-		0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
-		0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
-		0x00, 0xc8,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
-		0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
-		0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
-		0x00, 0xcc,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
-		0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
-		0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
-		0x00, 0xcf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
-		0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
-		0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
-		0x00, 0xd4,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
-		0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
-		0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
-		0x00, 0xd8,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
-		0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
-		0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
-		0x00, 0xdc,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
-		0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
-		0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
-		0x00, 0xdf,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
-		0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
-		0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
-		0x00, 0xe2,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
-		0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
-		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
-		0x00, 0xe6,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
-		0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
-		0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
-		0x00, 0xe9,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
-		0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
-		0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
-		0x00, 0xec,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
-		0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
-		0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
-		0x00, 0xf0,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
-		0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
-		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
-		0x00, 0xf3,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
-		0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
-		0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
-		0x00, 0xf6,
-	}, {
-		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
-		0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
-		0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
-		0x00, 0xfc,
-	},
-};
-
-static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
-	{
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
-		0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
-		0x00, 0x5f,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
-		0xc1, 0xd2, 0xd1, 0xce,	0x00, 0x53, 0x00, 0x46,
-		0x00, 0x67,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
-		0xbd, 0xd2, 0xd2, 0xce,	0x00, 0x59, 0x00, 0x4b,
-		0x00, 0x6e,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
-		0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
-		0x00, 0x75,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
-		0xb9, 0xd0, 0xd1, 0xcd,	0x00, 0x63, 0x00, 0x54,
-		0x00, 0x7a,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
-		0xb9, 0xce, 0xce, 0xc9,	0x00, 0x68, 0x00, 0x59,
-		0x00, 0x81,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
-		0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
-		0xb8, 0xcc, 0xcd, 0xc7,	0x00, 0x6c, 0x00, 0x5c,
-		0x00, 0x86,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
-		0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
-		0xb5, 0xca, 0xcc, 0xc5,	0x00, 0x74, 0x00, 0x63,
-		0x00, 0x90,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
-		0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
-		0xb4, 0xca, 0xcb, 0xc5,	0x00, 0x77, 0x00, 0x66,
-		0x00, 0x94,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
-		0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
-		0xb3, 0xc9, 0xca, 0xc3,	0x00, 0x7b, 0x00, 0x69,
-		0x00, 0x99,
-
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
-		0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
-		0xb2, 0xca, 0xcb, 0xc4,	0x00, 0x7e, 0x00, 0x6c,
-		0x00, 0x9d,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
-		0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
-		0xb0, 0xc7, 0xc9, 0xc1,	0x00, 0x84, 0x00, 0x71,
-		0x00, 0xa5,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
-		0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
-		0xaf, 0xc7, 0xc9, 0xc1,	0x00, 0x87, 0x00, 0x73,
-		0x00, 0xa8,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
-		0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
-		0xaf, 0xc5, 0xc7, 0xbf,	0x00, 0x8a, 0x00, 0x76,
-		0x00, 0xac,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
-		0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
-		0xaF, 0xc5, 0xc7, 0xbf,	0x00, 0x8c, 0x00, 0x78,
-		0x00, 0xaf,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
-		0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
-		0xae, 0xc5, 0xc6, 0xbe,	0x00, 0x91, 0x00, 0x7d,
-		0x00, 0xb6,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
-		0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
-		0xad, 0xc3, 0xc4, 0xbb,	0x00, 0x94, 0x00, 0x7f,
-		0x00, 0xba,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
-		0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
-		0xac, 0xc3, 0xc5, 0xbc,	0x00, 0x96, 0x00, 0x81,
-		0x00, 0xbd,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
-		0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
-		0xab, 0xc2, 0xc4, 0xbb,	0x00, 0x99, 0x00, 0x83,
-		0x00, 0xc0,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
-		0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
-		0xab, 0xc1, 0xc4, 0xba,	0x00, 0x9b, 0x00, 0x85,
-		0x00, 0xc3,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
-		0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
-		0xab, 0xc1, 0xc2, 0xb9,	0x00, 0x9D, 0x00, 0x87,
-		0x00, 0xc6,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
-		0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
-		0xab, 0xbe, 0xc0, 0xb7,	0x00, 0xa1, 0x00, 0x8a,
-		0x00, 0xca,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
-		0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
-		0xa9, 0xbe, 0xc1, 0xb7,	0x00, 0xa3, 0x00, 0x8b,
-		0x00, 0xce,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
-		0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
-		0xa9, 0xbd, 0xc0, 0xb6,	0x00, 0xa5, 0x00, 0x8d,
-		0x00, 0xd0,
-	}, {
-		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
-		0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
-		0xa8, 0xbe, 0xc0, 0xb7,	0x00, 0xa8, 0x00, 0x90,
-		0x00, 0xd3,
-	}
-};
-
-static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
-	{
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
-		0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
-		0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
-		0x00, 0x58,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
-		0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
-		0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
-		0x00, 0x64,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
-		0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
-		0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
-		0x00, 0x74,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
-		0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
-		0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
-		0x00, 0x80,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
-		0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
-		0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
-		0x00, 0x85,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
-		0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
-		0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
-		0x00, 0x8a,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
-		0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
-		0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
-		0x00, 0x8e,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
-		0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
-		0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
-		0x00, 0x96,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
-		0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
-		0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
-		0x00, 0x9b,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
-		0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
-		0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
-		0x00, 0x9e,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
-		0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
-		0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
-		0x00, 0xa2,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
-		0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
-		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
-		0x00, 0xa5,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
-		0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
-		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
-		0x00, 0xa8,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
-		0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
-		0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
-		0x00, 0xac,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
-		0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
-		0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
-		0x00, 0xaf,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
-		0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
-		0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
-		0x00, 0xb2,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
-		0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
-		0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
-		0x00, 0xb5,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
-		0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
-		0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
-		0x00, 0xb8,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
-		0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
-		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
-		0x00, 0xbb,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
-		0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
-		0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
-		0x00, 0xbd,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
-		0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
-		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
-		0x00, 0xc0,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
-		0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
-		0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
-		0x00, 0xc3,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
-		0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
-		0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
-		0x00, 0xc5,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
-		0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
-		0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
-		0x00, 0xc8,
-	}, {
-		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
-		0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
-		0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
-		0x00, 0xcc,
-	},
-};
-
-static const struct s6e8aa0_variant s6e8aa0_variants[] = {
-	{
-		.version = 32,
-		.gamma_tables = s6e8aa0_gamma_tables_v32,
-	}, {
-		.version = 96,
-		.gamma_tables = s6e8aa0_gamma_tables_v96,
-	}, {
-		.version = 142,
-		.gamma_tables = s6e8aa0_gamma_tables_v142,
-	}, {
-		.version = 210,
-		.gamma_tables = s6e8aa0_gamma_tables_v142,
-	}
-};
-
-static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
-{
-	const u8 *gamma;
-
-	if (ctx->error)
-		return;
-
-	gamma = ctx->variant->gamma_tables[ctx->brightness];
-
-	if (ctx->version >= 142)
-		s6e8aa0_elvss_nvm_set(ctx);
-
-	s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
-
-	/* update gamma table. */
-	s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
-}
-
-static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_apply_level_1_key(ctx);
-	s6e8aa0_apply_level_2_key(ctx);
-	msleep(20);
-
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
-	msleep(40);
-
-	s6e8aa0_panel_cond_set(ctx);
-	s6e8aa0_display_condition_set(ctx);
-	s6e8aa0_brightness_set(ctx);
-	s6e8aa0_etc_source_control(ctx);
-	s6e8aa0_etc_pentile_control(ctx);
-	s6e8aa0_elvss_nvm_set(ctx);
-	s6e8aa0_etc_power_control(ctx);
-	s6e8aa0_etc_elvss_control(ctx);
-	msleep(ctx->init_delay);
-}
-
-static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
-						   u16 size)
-{
-	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-	int ret;
-
-	if (ctx->error < 0)
-		return;
-
-	ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
-	if (ret < 0) {
-		dev_err(ctx->dev,
-			"error %d setting maximum return packet size to %d\n",
-			ret, size);
-		ctx->error = ret;
-	}
-}
-
-static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
-{
-	u8 id[3];
-	int ret, i;
-
-	ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
-	if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
-		dev_err(ctx->dev, "read id failed\n");
-		ctx->error = -EIO;
-		return;
-	}
-
-	dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
-
-	for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
-		if (id[1] == s6e8aa0_variants[i].version)
-			break;
-	}
-	if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
-		dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
-		ctx->error = -EINVAL;
-		return;
-	}
-
-	ctx->variant = &s6e8aa0_variants[i];
-	ctx->version = id[1];
-	ctx->id = id[2];
-}
-
-static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
-{
-	s6e8aa0_set_maximum_return_packet_size(ctx, 3);
-	s6e8aa0_read_mtp_id(ctx);
-	s6e8aa0_panel_init(ctx);
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
-}
-
-static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
-{
-	int ret;
-
-	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-	if (ret < 0)
-		return ret;
-
-	msleep(ctx->power_on_delay);
-
-	gpiod_set_value(ctx->reset_gpio, 0);
-	usleep_range(10000, 11000);
-	gpiod_set_value(ctx->reset_gpio, 1);
-
-	msleep(ctx->reset_delay);
-
-	return 0;
-}
-
-static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
-{
-	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static int s6e8aa0_disable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int s6e8aa0_unprepare(struct drm_panel *panel)
-{
-	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
-
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
-	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
-	msleep(40);
-
-	s6e8aa0_clear_error(ctx);
-
-	return s6e8aa0_power_off(ctx);
-}
-
-static int s6e8aa0_prepare(struct drm_panel *panel)
-{
-	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
-	int ret;
-
-	ret = s6e8aa0_power_on(ctx);
-	if (ret < 0)
-		return ret;
-
-	s6e8aa0_set_sequence(ctx);
-	ret = ctx->error;
-
-	if (ret < 0)
-		s6e8aa0_unprepare(panel);
-
-	return ret;
-}
-
-static int s6e8aa0_enable(struct drm_panel *panel)
-{
-	return 0;
-}
-
-static int s6e8aa0_get_modes(struct drm_panel *panel)
-{
-	struct drm_connector *connector = panel->connector;
-	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
-	struct drm_display_mode *mode;
-
-	mode = drm_mode_create(connector->dev);
-	if (!mode) {
-		DRM_ERROR("failed to create a new display mode\n");
-		return 0;
-	}
-
-	drm_display_mode_from_videomode(&ctx->vm, mode);
-	mode->width_mm = ctx->width_mm;
-	mode->height_mm = ctx->height_mm;
-	connector->display_info.width_mm = mode->width_mm;
-	connector->display_info.height_mm = mode->height_mm;
-
-	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
-	drm_mode_probed_add(connector, mode);
-
-	return 1;
-}
-
-static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
-	.disable = s6e8aa0_disable,
-	.unprepare = s6e8aa0_unprepare,
-	.prepare = s6e8aa0_prepare,
-	.enable = s6e8aa0_enable,
-	.get_modes = s6e8aa0_get_modes,
-};
-
-static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
-{
-	struct device *dev = ctx->dev;
-	struct device_node *np = dev->of_node;
-	int ret;
-
-	ret = of_get_videomode(np, &ctx->vm, 0);
-	if (ret < 0)
-		return ret;
-
-	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
-	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
-	of_property_read_u32(np, "init-delay", &ctx->init_delay);
-	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
-	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
-
-	ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
-	ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
-
-	return 0;
-}
-
-static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
-{
-	struct device *dev = &dsi->dev;
-	struct s6e8aa0 *ctx;
-	int ret;
-
-	ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	mipi_dsi_set_drvdata(dsi, ctx);
-
-	ctx->dev = dev;
-
-	dsi->lanes = 4;
-	dsi->format = MIPI_DSI_FMT_RGB888;
-	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
-		| MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP
-		| MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET
-		| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
-
-	ret = s6e8aa0_parse_dt(ctx);
-	if (ret < 0)
-		return ret;
-
-	ctx->supplies[0].supply = "vdd3";
-	ctx->supplies[1].supply = "vci";
-	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
-				      ctx->supplies);
-	if (ret < 0) {
-		dev_err(dev, "failed to get regulators: %d\n", ret);
-		return ret;
-	}
-
-	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
-	if (IS_ERR(ctx->reset_gpio)) {
-		dev_err(dev, "cannot get reset-gpios %ld\n",
-			PTR_ERR(ctx->reset_gpio));
-		return PTR_ERR(ctx->reset_gpio);
-	}
-
-	ctx->brightness = GAMMA_LEVEL_NUM - 1;
-
-	drm_panel_init(&ctx->panel);
-	ctx->panel.dev = dev;
-	ctx->panel.funcs = &s6e8aa0_drm_funcs;
-
-	ret = drm_panel_add(&ctx->panel);
-	if (ret < 0)
-		return ret;
-
-	ret = mipi_dsi_attach(dsi);
-	if (ret < 0)
-		drm_panel_remove(&ctx->panel);
-
-	return ret;
-}
-
-static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
-{
-	struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
-
-	mipi_dsi_detach(dsi);
-	drm_panel_remove(&ctx->panel);
-
-	return 0;
-}
-
-static const struct of_device_id s6e8aa0_of_match[] = {
-	{ .compatible = "samsung,s6e8aa0" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
-
-static struct mipi_dsi_driver s6e8aa0_driver = {
-	.probe = s6e8aa0_probe,
-	.remove = s6e8aa0_remove,
-	.driver = {
-		.name = "panel_s6e8aa0",
-		.of_match_table = s6e8aa0_of_match,
-	},
-};
-module_mipi_dsi_driver(s6e8aa0_driver);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>");
-MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>");
-MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
-MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
-MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
new file mode 100644
index 0000000..b202377
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -0,0 +1,389 @@
+/*
+ * ld9040 AMOLED LCD drm_panel driver.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * Derived from drivers/video/backlight/ld9040.c
+ *
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/* Manufacturer Command Set */
+#define MCS_MANPWR		0xb0
+#define MCS_ELVSS_ON		0xb1
+#define MCS_USER_SETTING	0xf0
+#define MCS_DISPCTL		0xf2
+#define MCS_POWER_CTRL		0xf4
+#define MCS_GTCON		0xf7
+#define MCS_PANEL_CONDITION	0xf8
+#define MCS_GAMMA_SET1		0xf9
+#define MCS_GAMMA_CTRL		0xfb
+
+/* array of gamma tables for gamma value 2.2 */
+static u8 const ld9040_gammas[25][22] = {
+	{ 0xf9, 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30, 0x00, 0xaf, 0xc0,
+	  0xb8, 0xcd, 0x00, 0x3d, 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44 },
+	{ 0xf9, 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c, 0x00, 0xaf, 0xbf,
+	  0xb6, 0xcb, 0x00, 0x4b, 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52 },
+	{ 0xf9, 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41, 0x00, 0xb0, 0xbe,
+	  0xb5, 0xc9, 0x00, 0x51, 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57 },
+	{ 0xf9, 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46, 0x00, 0xb1, 0xbc,
+	  0xb5, 0xc8, 0x00, 0x56, 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d },
+	{ 0xf9, 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b, 0x00, 0xb3, 0xbc,
+	  0xb4, 0xc7, 0x00, 0x5c, 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62 },
+	{ 0xf9, 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f, 0x00, 0xb4, 0xbb,
+	  0xb3, 0xc7, 0x00, 0x60, 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67 },
+	{ 0xf9, 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53, 0x00, 0xb5, 0xbb,
+	  0xb3, 0xc6, 0x00, 0x65, 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c },
+	{ 0xf9, 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57, 0x00, 0xb5, 0xbb,
+	  0xb0, 0xc5, 0x00, 0x6a, 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70 },
+	{ 0xf9, 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b, 0x00, 0xb5, 0xba,
+	  0xb1, 0xc4, 0x00, 0x6e, 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75 },
+	{ 0xf9, 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f, 0x00, 0xb5, 0xba,
+	  0xb0, 0xc3, 0x00, 0x72, 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a },
+	{ 0xf9, 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62, 0x00, 0xb6, 0xba,
+	  0xaf, 0xc3, 0x00, 0x76, 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e },
+	{ 0xf9, 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65, 0x00, 0xb7, 0xb8,
+	  0xaf, 0xc3, 0x00, 0x7a, 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81 },
+	{ 0xf9, 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69, 0x00, 0xb8, 0xb9,
+	  0xae, 0xc1, 0x00, 0x7f, 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85 },
+	{ 0xf9, 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c, 0x00, 0xb8, 0xb8,
+	  0xae, 0xc1, 0x00, 0x82, 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89 },
+	{ 0xf9, 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f, 0x00, 0xb8, 0xb8,
+	  0xad, 0xc0, 0x00, 0x86, 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d },
+	{ 0xf9, 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72, 0x00, 0xb8, 0xb8,
+	  0xac, 0xbf, 0x00, 0x8a, 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91 },
+	{ 0xf9, 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75, 0x00, 0xb9, 0xb8,
+	  0xab, 0xbe, 0x00, 0x8e, 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94 },
+	{ 0xf9, 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77, 0x00, 0xb9, 0xb7,
+	  0xab, 0xbe, 0x00, 0x90, 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97 },
+	{ 0xf9, 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a, 0x00, 0xb9, 0xb7,
+	  0xaa, 0xbd, 0x00, 0x94, 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a },
+	{ 0xf9, 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d, 0x00, 0xb9, 0xb6,
+	  0xaa, 0xbb, 0x00, 0x97, 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d },
+	{ 0xf9, 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80, 0x00, 0xb8, 0xb6,
+	  0xaa, 0xbc, 0x00, 0x9a, 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0 },
+	{ 0xf9, 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84, 0x00, 0xb9, 0xb7,
+	  0xa8, 0xbc, 0x00, 0x9d, 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4 },
+	{ 0xf9, 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86, 0x00, 0xb8, 0xb5,
+	  0xa8, 0xbc, 0x00, 0xa0, 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7 },
+	{ 0xf9, 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89, 0x00, 0xb7, 0xb6,
+	  0xa8, 0xba, 0x00, 0xa4, 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa },
+	{ 0xf9, 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91, 0x00, 0xb2, 0xb4,
+	  0xaa, 0xbb, 0x00, 0xac, 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3 },
+};
+
+struct ld9040 {
+	struct device *dev;
+	struct drm_panel panel;
+
+	struct regulator_bulk_data supplies[2];
+	struct gpio_desc *reset_gpio;
+	u32 power_on_delay;
+	u32 reset_delay;
+	struct videomode vm;
+	u32 width_mm;
+	u32 height_mm;
+
+	int brightness;
+
+	/* This field is tested by functions directly accessing bus before
+	 * transfer, transfer is skipped if it is set. In case of transfer
+	 * failure or unexpected response the field is set to error value.
+	 * Such construct allows to eliminate many checks in higher level
+	 * functions.
+	 */
+	int error;
+};
+
+static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
+{
+	return container_of(panel, struct ld9040, panel);
+}
+
+static int ld9040_clear_error(struct ld9040 *ctx)
+{
+	int ret = ctx->error;
+
+	ctx->error = 0;
+	return ret;
+}
+
+static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data)
+{
+	struct spi_device *spi = to_spi_device(ctx->dev);
+	struct spi_transfer xfer = {
+		.len		= 2,
+		.tx_buf		= &data,
+	};
+	struct spi_message msg;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+
+	return spi_sync(spi, &msg);
+}
+
+static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
+{
+	int ret = 0;
+
+	if (ctx->error < 0 || len == 0)
+		return;
+
+	dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
+	ret = ld9040_spi_write_word(ctx, *data);
+
+	while (!ret && --len) {
+		++data;
+		ret = ld9040_spi_write_word(ctx, *data | 0x100);
+	}
+
+	if (ret) {
+		dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
+			(int)len, data);
+		ctx->error = ret;
+	}
+
+	usleep_range(300, 310);
+}
+
+#define ld9040_dcs_write_seq_static(ctx, seq...) \
+({\
+	static const u8 d[] = { seq };\
+	ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+static void ld9040_brightness_set(struct ld9040 *ctx)
+{
+	ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness],
+			 ARRAY_SIZE(ld9040_gammas[ctx->brightness]));
+
+	ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a);
+}
+
+static void ld9040_init(struct ld9040 *ctx)
+{
+	ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a);
+	ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION,
+		0x05, 0x65, 0x96, 0x71, 0x7d, 0x19, 0x3b, 0x0d,
+		0x19, 0x7e, 0x0d, 0xe2, 0x00, 0x00, 0x7e, 0x7d,
+		0x07, 0x07, 0x20, 0x20, 0x20, 0x02, 0x02);
+	ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL,
+		0x02, 0x08, 0x08, 0x10, 0x10);
+	ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04);
+	ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL,
+		0x0a, 0x87, 0x25, 0x6a, 0x44, 0x02, 0x88);
+	ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16);
+	ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00);
+	ld9040_brightness_set(ctx);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int ld9040_power_on(struct ld9040 *ctx)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	msleep(ctx->power_on_delay);
+	gpiod_set_value(ctx->reset_gpio, 0);
+	msleep(ctx->reset_delay);
+	gpiod_set_value(ctx->reset_gpio, 1);
+	msleep(ctx->reset_delay);
+
+	return 0;
+}
+
+static int ld9040_power_off(struct ld9040 *ctx)
+{
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int ld9040_disable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int ld9040_unprepare(struct drm_panel *panel)
+{
+	struct ld9040 *ctx = panel_to_ld9040(panel);
+
+	msleep(120);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+	msleep(40);
+
+	ld9040_clear_error(ctx);
+
+	return ld9040_power_off(ctx);
+}
+
+static int ld9040_prepare(struct drm_panel *panel)
+{
+	struct ld9040 *ctx = panel_to_ld9040(panel);
+	int ret;
+
+	ret = ld9040_power_on(ctx);
+	if (ret < 0)
+		return ret;
+
+	ld9040_init(ctx);
+
+	ret = ld9040_clear_error(ctx);
+
+	if (ret < 0)
+		ld9040_unprepare(panel);
+
+	return ret;
+}
+
+static int ld9040_enable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int ld9040_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct ld9040 *ctx = panel_to_ld9040(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (!mode) {
+		DRM_ERROR("failed to create a new display mode\n");
+		return 0;
+	}
+
+	drm_display_mode_from_videomode(&ctx->vm, mode);
+	mode->width_mm = ctx->width_mm;
+	mode->height_mm = ctx->height_mm;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs ld9040_drm_funcs = {
+	.disable = ld9040_disable,
+	.unprepare = ld9040_unprepare,
+	.prepare = ld9040_prepare,
+	.enable = ld9040_enable,
+	.get_modes = ld9040_get_modes,
+};
+
+static int ld9040_parse_dt(struct ld9040 *ctx)
+{
+	struct device *dev = ctx->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ret = of_get_videomode(np, &ctx->vm, 0);
+	if (ret < 0)
+		return ret;
+
+	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
+	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
+	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
+	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
+
+	return 0;
+}
+
+static int ld9040_probe(struct spi_device *spi)
+{
+	struct device *dev = &spi->dev;
+	struct ld9040 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	spi_set_drvdata(spi, ctx);
+
+	ctx->dev = dev;
+	ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1;
+
+	ret = ld9040_parse_dt(ctx);
+	if (ret < 0)
+		return ret;
+
+	ctx->supplies[0].supply = "vdd3";
+	ctx->supplies[1].supply = "vci";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ctx->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(ctx->reset_gpio));
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	spi->bits_per_word = 9;
+	ret = spi_setup(spi);
+	if (ret < 0) {
+		dev_err(dev, "spi setup failed.\n");
+		return ret;
+	}
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = dev;
+	ctx->panel.funcs = &ld9040_drm_funcs;
+
+	return drm_panel_add(&ctx->panel);
+}
+
+static int ld9040_remove(struct spi_device *spi)
+{
+	struct ld9040 *ctx = spi_get_drvdata(spi);
+
+	ld9040_power_off(ctx);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id ld9040_of_match[] = {
+	{ .compatible = "samsung,ld9040" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ld9040_of_match);
+
+static struct spi_driver ld9040_driver = {
+	.probe = ld9040_probe,
+	.remove = ld9040_remove,
+	.driver = {
+		.name = "panel-samsung-ld9040",
+		.owner = THIS_MODULE,
+		.of_match_table = ld9040_of_match,
+	},
+};
+module_spi_driver(ld9040_driver);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("ld9040 LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
new file mode 100644
index 0000000..a188a39
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -0,0 +1,1067 @@
+/*
+ * MIPI-DSI based s6e8aa0 AMOLED LCD 5.3 inch panel driver.
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *
+ * Inki Dae, <inki.dae@samsung.com>
+ * Donghwa Lee, <dh09.lee@samsung.com>
+ * Joongmock Shin <jmock.shin@samsung.com>
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ * Tomasz Figa <t.figa@samsung.com>
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#define LDI_MTP_LENGTH			24
+#define GAMMA_LEVEL_NUM			25
+#define GAMMA_TABLE_LEN			26
+
+#define PANELCTL_SS_MASK		(1 << 5)
+#define PANELCTL_SS_1_800		(0 << 5)
+#define PANELCTL_SS_800_1		(1 << 5)
+#define PANELCTL_GTCON_MASK		(7 << 2)
+#define PANELCTL_GTCON_110		(6 << 2)
+#define PANELCTL_GTCON_111		(7 << 2)
+
+#define PANELCTL_CLK1_CON_MASK		(7 << 3)
+#define PANELCTL_CLK1_000		(0 << 3)
+#define PANELCTL_CLK1_001		(1 << 3)
+#define PANELCTL_CLK2_CON_MASK		(7 << 0)
+#define PANELCTL_CLK2_000		(0 << 0)
+#define PANELCTL_CLK2_001		(1 << 0)
+
+#define PANELCTL_INT1_CON_MASK		(7 << 3)
+#define PANELCTL_INT1_000		(0 << 3)
+#define PANELCTL_INT1_001		(1 << 3)
+#define PANELCTL_INT2_CON_MASK		(7 << 0)
+#define PANELCTL_INT2_000		(0 << 0)
+#define PANELCTL_INT2_001		(1 << 0)
+
+#define PANELCTL_BICTL_CON_MASK		(7 << 3)
+#define PANELCTL_BICTL_000		(0 << 3)
+#define PANELCTL_BICTL_001		(1 << 3)
+#define PANELCTL_BICTLB_CON_MASK	(7 << 0)
+#define PANELCTL_BICTLB_000		(0 << 0)
+#define PANELCTL_BICTLB_001		(1 << 0)
+
+#define PANELCTL_EM_CLK1_CON_MASK	(7 << 3)
+#define PANELCTL_EM_CLK1_110		(6 << 3)
+#define PANELCTL_EM_CLK1_111		(7 << 3)
+#define PANELCTL_EM_CLK1B_CON_MASK	(7 << 0)
+#define PANELCTL_EM_CLK1B_110		(6 << 0)
+#define PANELCTL_EM_CLK1B_111		(7 << 0)
+
+#define PANELCTL_EM_CLK2_CON_MASK	(7 << 3)
+#define PANELCTL_EM_CLK2_110		(6 << 3)
+#define PANELCTL_EM_CLK2_111		(7 << 3)
+#define PANELCTL_EM_CLK2B_CON_MASK	(7 << 0)
+#define PANELCTL_EM_CLK2B_110		(6 << 0)
+#define PANELCTL_EM_CLK2B_111		(7 << 0)
+
+#define PANELCTL_EM_INT1_CON_MASK	(7 << 3)
+#define PANELCTL_EM_INT1_000		(0 << 3)
+#define PANELCTL_EM_INT1_001		(1 << 3)
+#define PANELCTL_EM_INT2_CON_MASK	(7 << 0)
+#define PANELCTL_EM_INT2_000		(0 << 0)
+#define PANELCTL_EM_INT2_001		(1 << 0)
+
+#define AID_DISABLE			(0x4)
+#define AID_1				(0x5)
+#define AID_2				(0x6)
+#define AID_3				(0x7)
+
+typedef u8 s6e8aa0_gamma_table[GAMMA_TABLE_LEN];
+
+struct s6e8aa0_variant {
+	u8 version;
+	const s6e8aa0_gamma_table *gamma_tables;
+};
+
+struct s6e8aa0 {
+	struct device *dev;
+	struct drm_panel panel;
+
+	struct regulator_bulk_data supplies[2];
+	struct gpio_desc *reset_gpio;
+	u32 power_on_delay;
+	u32 reset_delay;
+	u32 init_delay;
+	bool flip_horizontal;
+	bool flip_vertical;
+	struct videomode vm;
+	u32 width_mm;
+	u32 height_mm;
+
+	u8 version;
+	u8 id;
+	const struct s6e8aa0_variant *variant;
+	int brightness;
+
+	/* This field is tested by functions directly accessing DSI bus before
+	 * transfer, transfer is skipped if it is set. In case of transfer
+	 * failure or unexpected response the field is set to error value.
+	 * Such construct allows to eliminate many checks in higher level
+	 * functions.
+	 */
+	int error;
+};
+
+static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
+{
+	return container_of(panel, struct s6e8aa0, panel);
+}
+
+static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
+{
+	int ret = ctx->error;
+
+	ctx->error = 0;
+	return ret;
+}
+
+static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	ssize_t ret;
+
+	if (ctx->error < 0)
+		return;
+
+	ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
+	if (ret < 0) {
+		dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
+			(int)len, data);
+		ctx->error = ret;
+	}
+}
+
+static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (ctx->error < 0)
+		return ctx->error;
+
+	ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
+	if (ret < 0) {
+		dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
+		ctx->error = ret;
+	}
+
+	return ret;
+}
+
+#define s6e8aa0_dcs_write_seq(ctx, seq...) \
+({\
+	const u8 d[] = { seq };\
+	BUILD_BUG_ON_MSG(ARRAY_SIZE(d) > 64, "DCS sequence too big for stack");\
+	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+#define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
+({\
+	static const u8 d[] = { seq };\
+	s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
+})
+
+static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
+}
+
+static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx)
+{
+	static const u8 aids[] = {
+		0x04, 0x04, 0x04, 0x04, 0x04, 0x60, 0x80, 0xA0
+	};
+	u8 aid = aids[ctx->id >> 5];
+	u8 cfg = 0x3d;
+	u8 clk_con = 0xc8;
+	u8 int_con = 0x08;
+	u8 bictl_con = 0x48;
+	u8 em_clk1_con = 0xff;
+	u8 em_clk2_con = 0xff;
+	u8 em_int_con = 0xc8;
+
+	if (ctx->flip_vertical) {
+		/* GTCON */
+		cfg &= ~(PANELCTL_GTCON_MASK);
+		cfg |= (PANELCTL_GTCON_110);
+	}
+
+	if (ctx->flip_horizontal) {
+		/* SS */
+		cfg &= ~(PANELCTL_SS_MASK);
+		cfg |= (PANELCTL_SS_1_800);
+	}
+
+	if (ctx->flip_horizontal || ctx->flip_vertical) {
+		/* CLK1,2_CON */
+		clk_con &= ~(PANELCTL_CLK1_CON_MASK |
+			PANELCTL_CLK2_CON_MASK);
+		clk_con |= (PANELCTL_CLK1_000 | PANELCTL_CLK2_001);
+
+		/* INT1,2_CON */
+		int_con &= ~(PANELCTL_INT1_CON_MASK |
+			PANELCTL_INT2_CON_MASK);
+		int_con |= (PANELCTL_INT1_000 | PANELCTL_INT2_001);
+
+		/* BICTL,B_CON */
+		bictl_con &= ~(PANELCTL_BICTL_CON_MASK |
+			PANELCTL_BICTLB_CON_MASK);
+		bictl_con |= (PANELCTL_BICTL_000 |
+			PANELCTL_BICTLB_001);
+
+		/* EM_CLK1,1B_CON */
+		em_clk1_con &= ~(PANELCTL_EM_CLK1_CON_MASK |
+			PANELCTL_EM_CLK1B_CON_MASK);
+		em_clk1_con |= (PANELCTL_EM_CLK1_110 |
+			PANELCTL_EM_CLK1B_110);
+
+		/* EM_CLK2,2B_CON */
+		em_clk2_con &= ~(PANELCTL_EM_CLK2_CON_MASK |
+			PANELCTL_EM_CLK2B_CON_MASK);
+		em_clk2_con |= (PANELCTL_EM_CLK2_110 |
+			PANELCTL_EM_CLK2B_110);
+
+		/* EM_INT1,2_CON */
+		em_int_con &= ~(PANELCTL_EM_INT1_CON_MASK |
+			PANELCTL_EM_INT2_CON_MASK);
+		em_int_con |= (PANELCTL_EM_INT1_000 |
+			PANELCTL_EM_INT2_001);
+	}
+
+	s6e8aa0_dcs_write_seq(ctx,
+		0xf8, cfg, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00,
+		0x3c, 0x78, 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00,
+		0x00, 0x20, aid, 0x08, 0x6e, 0x00, 0x00, 0x00,
+		0x02, 0x07, 0x07, 0x23, 0x23, 0xc0, clk_con, int_con,
+		bictl_con, 0xc1, 0x00, 0xc1, em_clk1_con, em_clk2_con,
+		em_int_con);
+}
+
+static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx)
+{
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write_seq_static(ctx,
+			0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x94, 0x00,
+			0x3c, 0x78, 0x10, 0x27, 0x08, 0x6e, 0x00, 0x00,
+			0x00, 0x00, 0x04, 0x08, 0x6e, 0x00, 0x00, 0x00,
+			0x00, 0x07, 0x07, 0x23, 0x6e, 0xc0, 0xc1, 0x01,
+			0x81, 0xc1, 0x00, 0xc3, 0xf6, 0xf6, 0xc1
+		);
+	else
+		s6e8aa0_panel_cond_set_v142(ctx);
+}
+
+static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d);
+}
+
+static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00);
+}
+
+static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx)
+{
+	static const u8 pent32[] = {
+		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xc0, 0x44, 0x44, 0xc0, 0x00
+	};
+
+	static const u8 pent142[] = {
+		0xb6, 0x0c, 0x02, 0x03, 0x32, 0xff, 0x44, 0x44, 0xc0, 0x00
+	};
+
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32));
+	else
+		s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142));
+}
+
+static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx)
+{
+	static const u8 pwr142[] = {
+		0xf4, 0xcf, 0x0a, 0x12, 0x10, 0x1e, 0x33, 0x02
+	};
+
+	static const u8 pwr32[] = {
+		0xf4, 0xcf, 0x0a, 0x15, 0x10, 0x19, 0x33, 0x02
+	};
+
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32));
+	else
+		s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142));
+}
+
+static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx)
+{
+	u8 id = ctx->id ? 0 : 0x95;
+
+	s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id);
+}
+
+static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx)
+{
+	u8 br;
+
+	switch (ctx->brightness) {
+	case 0 ... 6: /* 30cd ~ 100cd */
+		br = 0xdf;
+		break;
+	case 7 ... 11: /* 120cd ~ 150cd */
+		br = 0xdd;
+		break;
+	case 12 ... 15: /* 180cd ~ 210cd */
+	default:
+		br = 0xd9;
+		break;
+	case 16 ... 24: /* 240cd ~ 300cd */
+		br = 0xd0;
+		break;
+	}
+
+	s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e,
+		0xc4, 0x0f, 0x40, 0x41, br, 0x00, 0x60, 0x19);
+}
+
+static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx)
+{
+	if (ctx->version < 142)
+		s6e8aa0_dcs_write_seq_static(ctx,
+			0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, 0xc4, 0x07,
+			0x40, 0x41, 0xc1, 0x00, 0x60, 0x19);
+	else
+		s6e8aa0_elvss_nvm_set_v142(ctx);
+};
+
+static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
+}
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v142[GAMMA_LEVEL_NUM] = {
+	{
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x62, 0x55, 0x55,
+		0xaf, 0xb1, 0xb1, 0xbd, 0xce, 0xb7, 0x9a, 0xb1,
+		0x90, 0xb2, 0xc4, 0xae, 0x00, 0x60, 0x00, 0x40,
+		0x00, 0x70,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x74, 0x68, 0x69,
+		0xb8, 0xc1, 0xb7, 0xbd, 0xcd, 0xb8, 0x93, 0xab,
+		0x88, 0xb4, 0xc4, 0xb1, 0x00, 0x6b, 0x00, 0x4d,
+		0x00, 0x7d,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x95, 0x8a, 0x89,
+		0xb4, 0xc6, 0xb2, 0xc5, 0xd2, 0xbf, 0x90, 0xa8,
+		0x85, 0xb5, 0xc4, 0xb3, 0x00, 0x7b, 0x00, 0x5d,
+		0x00, 0x8f,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9f, 0x98, 0x92,
+		0xb3, 0xc4, 0xb0, 0xbc, 0xcc, 0xb4, 0x91, 0xa6,
+		0x87, 0xb5, 0xc5, 0xb4, 0x00, 0x87, 0x00, 0x6a,
+		0x00, 0x9e,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x99, 0x93, 0x8b,
+		0xb2, 0xc2, 0xb0, 0xbd, 0xce, 0xb4, 0x90, 0xa6,
+		0x87, 0xb3, 0xc3, 0xb2, 0x00, 0x8d, 0x00, 0x70,
+		0x00, 0xa4,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xa5, 0x99,
+		0xb2, 0xc2, 0xb0, 0xbb, 0xcd, 0xb1, 0x93, 0xa7,
+		0x8a, 0xb2, 0xc1, 0xb0, 0x00, 0x92, 0x00, 0x75,
+		0x00, 0xaa,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xa0, 0x93,
+		0xb6, 0xc4, 0xb4, 0xb5, 0xc8, 0xaa, 0x94, 0xa9,
+		0x8c, 0xb2, 0xc0, 0xb0, 0x00, 0x97, 0x00, 0x7a,
+		0x00, 0xaf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xa7, 0x96,
+		0xb3, 0xc2, 0xb0, 0xba, 0xcb, 0xb0, 0x94, 0xa8,
+		0x8c, 0xb0, 0xbf, 0xaf, 0x00, 0x9f, 0x00, 0x83,
+		0x00, 0xb9,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9d, 0xa2, 0x90,
+		0xb6, 0xc5, 0xb3, 0xb8, 0xc9, 0xae, 0x94, 0xa8,
+		0x8d, 0xaf, 0xbd, 0xad, 0x00, 0xa4, 0x00, 0x88,
+		0x00, 0xbf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xac, 0x97,
+		0xb4, 0xc4, 0xb1, 0xbb, 0xcb, 0xb2, 0x93, 0xa7,
+		0x8d, 0xae, 0xbc, 0xad, 0x00, 0xa7, 0x00, 0x8c,
+		0x00, 0xc3,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa2, 0xa9, 0x93,
+		0xb6, 0xc5, 0xb2, 0xba, 0xc9, 0xb0, 0x93, 0xa7,
+		0x8d, 0xae, 0xbb, 0xac, 0x00, 0xab, 0x00, 0x90,
+		0x00, 0xc8,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0x9e, 0xa6, 0x8f,
+		0xb7, 0xc6, 0xb3, 0xb8, 0xc8, 0xb0, 0x93, 0xa6,
+		0x8c, 0xae, 0xbb, 0xad, 0x00, 0xae, 0x00, 0x93,
+		0x00, 0xcc,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb4, 0x9c,
+		0xb3, 0xc3, 0xaf, 0xb7, 0xc7, 0xaf, 0x93, 0xa6,
+		0x8c, 0xaf, 0xbc, 0xad, 0x00, 0xb1, 0x00, 0x97,
+		0x00, 0xcf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa6, 0xb1, 0x98,
+		0xb1, 0xc2, 0xab, 0xba, 0xc9, 0xb2, 0x93, 0xa6,
+		0x8d, 0xae, 0xba, 0xab, 0x00, 0xb5, 0x00, 0x9b,
+		0x00, 0xd4,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xae, 0x94,
+		0xb2, 0xc3, 0xac, 0xbb, 0xca, 0xb4, 0x91, 0xa4,
+		0x8a, 0xae, 0xba, 0xac, 0x00, 0xb8, 0x00, 0x9e,
+		0x00, 0xd8,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xab, 0xb7, 0x9c,
+		0xae, 0xc0, 0xa9, 0xba, 0xc9, 0xb3, 0x92, 0xa5,
+		0x8b, 0xad, 0xb9, 0xab, 0x00, 0xbb, 0x00, 0xa1,
+		0x00, 0xdc,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb4, 0x97,
+		0xb0, 0xc1, 0xaa, 0xb9, 0xc8, 0xb2, 0x92, 0xa5,
+		0x8c, 0xae, 0xb9, 0xab, 0x00, 0xbe, 0x00, 0xa4,
+		0x00, 0xdf,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
+		0xb0, 0xc2, 0xab, 0xbb, 0xc9, 0xb3, 0x91, 0xa4,
+		0x8b, 0xad, 0xb8, 0xaa, 0x00, 0xc1, 0x00, 0xa8,
+		0x00, 0xe2,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa3, 0xb0, 0x94,
+		0xae, 0xbf, 0xa8, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
+		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xc4, 0x00, 0xab,
+		0x00, 0xe6,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb6, 0x98,
+		0xaf, 0xc0, 0xa8, 0xb8, 0xc7, 0xb2, 0x93, 0xa5,
+		0x8d, 0xad, 0xb7, 0xa9, 0x00, 0xc7, 0x00, 0xae,
+		0x00, 0xe9,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
+		0xaf, 0xc1, 0xa9, 0xb9, 0xc8, 0xb3, 0x92, 0xa4,
+		0x8b, 0xad, 0xb7, 0xaa, 0x00, 0xc9, 0x00, 0xb0,
+		0x00, 0xec,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb3, 0x95,
+		0xac, 0xbe, 0xa6, 0xbb, 0xc9, 0xb4, 0x90, 0xa3,
+		0x8a, 0xad, 0xb7, 0xa9, 0x00, 0xcc, 0x00, 0xb4,
+		0x00, 0xf0,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa0, 0xb0, 0x91,
+		0xae, 0xc0, 0xa6, 0xba, 0xc8, 0xb4, 0x91, 0xa4,
+		0x8b, 0xad, 0xb7, 0xa9, 0x00, 0xcf, 0x00, 0xb7,
+		0x00, 0xf3,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa7, 0xb8, 0x98,
+		0xab, 0xbd, 0xa4, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
+		0x8b, 0xac, 0xb6, 0xa8, 0x00, 0xd1, 0x00, 0xb9,
+		0x00, 0xf6,
+	}, {
+		0xfa, 0x01, 0x71, 0x31, 0x7b, 0xa4, 0xb5, 0x95,
+		0xa9, 0xbc, 0xa1, 0xbb, 0xc9, 0xb5, 0x91, 0xa3,
+		0x8a, 0xad, 0xb6, 0xa8, 0x00, 0xd6, 0x00, 0xbf,
+		0x00, 0xfc,
+	},
+};
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v96[GAMMA_LEVEL_NUM] = {
+	{
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xdf, 0x1f, 0xd7, 0xdc, 0xb7, 0xe1, 0xc0, 0xaf,
+		0xc4, 0xd2, 0xd0, 0xcf, 0x00, 0x4d, 0x00, 0x40,
+		0x00, 0x5f,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd5, 0x35, 0xcf, 0xdc, 0xc1, 0xe1, 0xbf, 0xb3,
+		0xc1, 0xd2, 0xd1, 0xce,	0x00, 0x53, 0x00, 0x46,
+		0x00, 0x67,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd2, 0x64, 0xcf, 0xdb, 0xc6, 0xe1, 0xbd, 0xb3,
+		0xbd, 0xd2, 0xd2, 0xce,	0x00, 0x59, 0x00, 0x4b,
+		0x00, 0x6e,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd0, 0x7c, 0xcf, 0xdb, 0xc9, 0xe0, 0xbc, 0xb4,
+		0xbb, 0xcf, 0xd1, 0xcc, 0x00, 0x5f, 0x00, 0x50,
+		0x00, 0x75,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd0, 0x8e, 0xd1, 0xdb, 0xcc, 0xdf, 0xbb, 0xb6,
+		0xb9, 0xd0, 0xd1, 0xcd,	0x00, 0x63, 0x00, 0x54,
+		0x00, 0x7a,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd1, 0x9e, 0xd5, 0xda, 0xcd, 0xdd, 0xbb, 0xb7,
+		0xb9, 0xce, 0xce, 0xc9,	0x00, 0x68, 0x00, 0x59,
+		0x00, 0x81,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x00, 0xff,
+		0xd0, 0xa5, 0xd6, 0xda, 0xcf, 0xdd, 0xbb, 0xb7,
+		0xb8, 0xcc, 0xcd, 0xc7,	0x00, 0x6c, 0x00, 0x5c,
+		0x00, 0x86,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xfe,
+		0xd0, 0xae, 0xd7, 0xd9, 0xd0, 0xdb, 0xb9, 0xb6,
+		0xb5, 0xca, 0xcc, 0xc5,	0x00, 0x74, 0x00, 0x63,
+		0x00, 0x90,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf9,
+		0xcf, 0xb0, 0xd6, 0xd9, 0xd1, 0xdb, 0xb9, 0xb6,
+		0xb4, 0xca, 0xcb, 0xc5,	0x00, 0x77, 0x00, 0x66,
+		0x00, 0x94,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xff, 0x1f, 0xf7,
+		0xcf, 0xb3, 0xd7, 0xd8, 0xd1, 0xd9, 0xb7, 0xb6,
+		0xb3, 0xc9, 0xca, 0xc3,	0x00, 0x7b, 0x00, 0x69,
+		0x00, 0x99,
+
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfd, 0x2f, 0xf7,
+		0xdf, 0xb5, 0xd6, 0xd8, 0xd1, 0xd8, 0xb6, 0xb5,
+		0xb2, 0xca, 0xcb, 0xc4,	0x00, 0x7e, 0x00, 0x6c,
+		0x00, 0x9d,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xfa, 0x2f, 0xf5,
+		0xce, 0xb6, 0xd5, 0xd7, 0xd2, 0xd8, 0xb6, 0xb4,
+		0xb0, 0xc7, 0xc9, 0xc1,	0x00, 0x84, 0x00, 0x71,
+		0x00, 0xa5,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf7, 0x2f, 0xf2,
+		0xce, 0xb9, 0xd5, 0xd8, 0xd2, 0xd8, 0xb4, 0xb4,
+		0xaf, 0xc7, 0xc9, 0xc1,	0x00, 0x87, 0x00, 0x73,
+		0x00, 0xa8,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf5, 0x2f, 0xf0,
+		0xdf, 0xba, 0xd5, 0xd7, 0xd2, 0xd7, 0xb4, 0xb4,
+		0xaf, 0xc5, 0xc7, 0xbf,	0x00, 0x8a, 0x00, 0x76,
+		0x00, 0xac,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xf2, 0x2f, 0xed,
+		0xcE, 0xbb, 0xd4, 0xd6, 0xd2, 0xd6, 0xb5, 0xb4,
+		0xaF, 0xc5, 0xc7, 0xbf,	0x00, 0x8c, 0x00, 0x78,
+		0x00, 0xaf,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x2f, 0xeb,
+		0xcd, 0xbb, 0xd2, 0xd7, 0xd3, 0xd6, 0xb3, 0xb4,
+		0xae, 0xc5, 0xc6, 0xbe,	0x00, 0x91, 0x00, 0x7d,
+		0x00, 0xb6,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xee, 0x2f, 0xea,
+		0xce, 0xbd, 0xd4, 0xd6, 0xd2, 0xd5, 0xb2, 0xb3,
+		0xad, 0xc3, 0xc4, 0xbb,	0x00, 0x94, 0x00, 0x7f,
+		0x00, 0xba,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xec, 0x2f, 0xe8,
+		0xce, 0xbe, 0xd3, 0xd6, 0xd3, 0xd5, 0xb2, 0xb2,
+		0xac, 0xc3, 0xc5, 0xbc,	0x00, 0x96, 0x00, 0x81,
+		0x00, 0xbd,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xeb, 0x2f, 0xe7,
+		0xce, 0xbf, 0xd3, 0xd6, 0xd2, 0xd5, 0xb1, 0xb2,
+		0xab, 0xc2, 0xc4, 0xbb,	0x00, 0x99, 0x00, 0x83,
+		0x00, 0xc0,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xef, 0x5f, 0xe9,
+		0xca, 0xbf, 0xd3, 0xd5, 0xd2, 0xd4, 0xb2, 0xb2,
+		0xab, 0xc1, 0xc4, 0xba,	0x00, 0x9b, 0x00, 0x85,
+		0x00, 0xc3,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xea, 0x5f, 0xe8,
+		0xee, 0xbf, 0xd2, 0xd5, 0xd2, 0xd4, 0xb1, 0xb2,
+		0xab, 0xc1, 0xc2, 0xb9,	0x00, 0x9D, 0x00, 0x87,
+		0x00, 0xc6,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe9, 0x5f, 0xe7,
+		0xcd, 0xbf, 0xd2, 0xd6, 0xd2, 0xd4, 0xb1, 0xb2,
+		0xab, 0xbe, 0xc0, 0xb7,	0x00, 0xa1, 0x00, 0x8a,
+		0x00, 0xca,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x61, 0xe6,
+		0xcd, 0xbf, 0xd1, 0xd6, 0xd3, 0xd4, 0xaf, 0xb0,
+		0xa9, 0xbe, 0xc1, 0xb7,	0x00, 0xa3, 0x00, 0x8b,
+		0x00, 0xce,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe8, 0x62, 0xe5,
+		0xcc, 0xc0, 0xd0, 0xd6, 0xd2, 0xd4, 0xaf, 0xb1,
+		0xa9, 0xbd, 0xc0, 0xb6,	0x00, 0xa5, 0x00, 0x8d,
+		0x00, 0xd0,
+	}, {
+		0xfa, 0x01, 0x1f, 0x1f, 0x1f, 0xe7, 0x7f, 0xe3,
+		0xcc, 0xc1, 0xd0, 0xd5, 0xd3, 0xd3, 0xae, 0xaf,
+		0xa8, 0xbe, 0xc0, 0xb7,	0x00, 0xa8, 0x00, 0x90,
+		0x00, 0xd3,
+	}
+};
+
+static const s6e8aa0_gamma_table s6e8aa0_gamma_tables_v32[GAMMA_LEVEL_NUM] = {
+	{
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0x72, 0x5e, 0x6b,
+		0xa1, 0xa7, 0x9a, 0xb4, 0xcb, 0xb8, 0x92, 0xac,
+		0x97, 0xb4, 0xc3, 0xb5, 0x00, 0x4e, 0x00, 0x37,
+		0x00, 0x58,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0x85, 0x71, 0x7d,
+		0xa6, 0xb6, 0xa1, 0xb5, 0xca, 0xba, 0x93, 0xac,
+		0x98, 0xb2, 0xc0, 0xaf, 0x00, 0x59, 0x00, 0x43,
+		0x00, 0x64,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa4, 0x94, 0x9e,
+		0xa0, 0xbb, 0x9c, 0xc3, 0xd2, 0xc6, 0x93, 0xaa,
+		0x95, 0xb7, 0xc2, 0xb4, 0x00, 0x65, 0x00, 0x50,
+		0x00, 0x74,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa1, 0xa6,
+		0xa0, 0xb9, 0x9b, 0xc3, 0xd1, 0xc8, 0x90, 0xa6,
+		0x90, 0xbb, 0xc3, 0xb7, 0x00, 0x6f, 0x00, 0x5b,
+		0x00, 0x80,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa6, 0x9d, 0x9f,
+		0x9f, 0xb8, 0x9a, 0xc7, 0xd5, 0xcc, 0x90, 0xa5,
+		0x8f, 0xb8, 0xc1, 0xb6, 0x00, 0x74, 0x00, 0x60,
+		0x00, 0x85,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb3, 0xae, 0xae,
+		0x9e, 0xb7, 0x9a, 0xc8, 0xd6, 0xce, 0x91, 0xa6,
+		0x90, 0xb6, 0xc0, 0xb3, 0x00, 0x78, 0x00, 0x65,
+		0x00, 0x8a,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xa9, 0xa8,
+		0xa3, 0xb9, 0x9e, 0xc4, 0xd3, 0xcb, 0x94, 0xa6,
+		0x90, 0xb6, 0xbf, 0xb3, 0x00, 0x7c, 0x00, 0x69,
+		0x00, 0x8e,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xaf, 0xaf, 0xa9,
+		0xa5, 0xbc, 0xa2, 0xc7, 0xd5, 0xcd, 0x93, 0xa5,
+		0x8f, 0xb4, 0xbd, 0xb1, 0x00, 0x83, 0x00, 0x70,
+		0x00, 0x96,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xab, 0xa3,
+		0xaa, 0xbf, 0xa7, 0xc5, 0xd3, 0xcb, 0x93, 0xa5,
+		0x8f, 0xb2, 0xbb, 0xb0, 0x00, 0x86, 0x00, 0x74,
+		0x00, 0x9b,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xb5, 0xab,
+		0xab, 0xc0, 0xa9, 0xc7, 0xd4, 0xcc, 0x94, 0xa4,
+		0x8f, 0xb1, 0xbb, 0xaf, 0x00, 0x8a, 0x00, 0x77,
+		0x00, 0x9e,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb2, 0xa7,
+		0xae, 0xc2, 0xab, 0xc5, 0xd3, 0xca, 0x93, 0xa4,
+		0x8f, 0xb1, 0xba, 0xae, 0x00, 0x8d, 0x00, 0x7b,
+		0x00, 0xa2,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xaf, 0xa3,
+		0xb0, 0xc3, 0xae, 0xc4, 0xd1, 0xc8, 0x93, 0xa4,
+		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x8f, 0x00, 0x7d,
+		0x00, 0xa5,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbd, 0xaf,
+		0xae, 0xc1, 0xab, 0xc2, 0xd0, 0xc6, 0x94, 0xa4,
+		0x8f, 0xb1, 0xba, 0xaf, 0x00, 0x92, 0x00, 0x80,
+		0x00, 0xa8,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xb9, 0xac,
+		0xad, 0xc1, 0xab, 0xc4, 0xd1, 0xc7, 0x95, 0xa4,
+		0x90, 0xb0, 0xb9, 0xad, 0x00, 0x95, 0x00, 0x84,
+		0x00, 0xac,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb6, 0xa7,
+		0xaf, 0xc2, 0xae, 0xc5, 0xd1, 0xc7, 0x93, 0xa3,
+		0x8e, 0xb0, 0xb9, 0xad, 0x00, 0x98, 0x00, 0x86,
+		0x00, 0xaf,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb4, 0xbf, 0xaf,
+		0xad, 0xc1, 0xab, 0xc3, 0xd0, 0xc6, 0x94, 0xa3,
+		0x8f, 0xaf, 0xb8, 0xac, 0x00, 0x9a, 0x00, 0x89,
+		0x00, 0xb2,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xbc, 0xac,
+		0xaf, 0xc2, 0xad, 0xc2, 0xcf, 0xc4, 0x94, 0xa3,
+		0x90, 0xaf, 0xb8, 0xad, 0x00, 0x9c, 0x00, 0x8b,
+		0x00, 0xb5,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
+		0xb1, 0xc4, 0xaf, 0xc3, 0xcf, 0xc5, 0x94, 0xa3,
+		0x8f, 0xae, 0xb7, 0xac, 0x00, 0x9f, 0x00, 0x8e,
+		0x00, 0xb8,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xb9, 0xa7,
+		0xaf, 0xc2, 0xad, 0xc1, 0xce, 0xc3, 0x95, 0xa3,
+		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa2, 0x00, 0x91,
+		0x00, 0xbb,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb1, 0xbe, 0xac,
+		0xb1, 0xc4, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa4,
+		0x91, 0xad, 0xb6, 0xab, 0x00, 0xa4, 0x00, 0x93,
+		0x00, 0xbd,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
+		0xb3, 0xc5, 0xb2, 0xc1, 0xcd, 0xc2, 0x95, 0xa3,
+		0x90, 0xad, 0xb6, 0xab, 0x00, 0xa6, 0x00, 0x95,
+		0x00, 0xc0,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbb, 0xa8,
+		0xb0, 0xc3, 0xaf, 0xc2, 0xce, 0xc2, 0x94, 0xa2,
+		0x90, 0xac, 0xb6, 0xab, 0x00, 0xa8, 0x00, 0x98,
+		0x00, 0xc3,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xa9, 0xb8, 0xa5,
+		0xb3, 0xc5, 0xb2, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
+		0x90, 0xad, 0xb6, 0xab, 0x00, 0xaa, 0x00, 0x9a,
+		0x00, 0xc5,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xb0, 0xc0, 0xac,
+		0xb0, 0xc3, 0xaf, 0xc1, 0xcd, 0xc1, 0x95, 0xa2,
+		0x90, 0xac, 0xb5, 0xa9, 0x00, 0xac, 0x00, 0x9c,
+		0x00, 0xc8,
+	}, {
+		0xfa, 0x01, 0x43, 0x14, 0x45, 0xad, 0xbd, 0xa8,
+		0xaf, 0xc2, 0xaf, 0xc1, 0xcc, 0xc0, 0x95, 0xa2,
+		0x90, 0xac, 0xb5, 0xaa, 0x00, 0xb1, 0x00, 0xa1,
+		0x00, 0xcc,
+	},
+};
+
+static const struct s6e8aa0_variant s6e8aa0_variants[] = {
+	{
+		.version = 32,
+		.gamma_tables = s6e8aa0_gamma_tables_v32,
+	}, {
+		.version = 96,
+		.gamma_tables = s6e8aa0_gamma_tables_v96,
+	}, {
+		.version = 142,
+		.gamma_tables = s6e8aa0_gamma_tables_v142,
+	}, {
+		.version = 210,
+		.gamma_tables = s6e8aa0_gamma_tables_v142,
+	}
+};
+
+static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx)
+{
+	const u8 *gamma;
+
+	if (ctx->error)
+		return;
+
+	gamma = ctx->variant->gamma_tables[ctx->brightness];
+
+	if (ctx->version >= 142)
+		s6e8aa0_elvss_nvm_set(ctx);
+
+	s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN);
+
+	/* update gamma table. */
+	s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03);
+}
+
+static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_apply_level_1_key(ctx);
+	s6e8aa0_apply_level_2_key(ctx);
+	msleep(20);
+
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
+	msleep(40);
+
+	s6e8aa0_panel_cond_set(ctx);
+	s6e8aa0_display_condition_set(ctx);
+	s6e8aa0_brightness_set(ctx);
+	s6e8aa0_etc_source_control(ctx);
+	s6e8aa0_etc_pentile_control(ctx);
+	s6e8aa0_elvss_nvm_set(ctx);
+	s6e8aa0_etc_power_control(ctx);
+	s6e8aa0_etc_elvss_control(ctx);
+	msleep(ctx->init_delay);
+}
+
+static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
+						   u16 size)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+	int ret;
+
+	if (ctx->error < 0)
+		return;
+
+	ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
+	if (ret < 0) {
+		dev_err(ctx->dev,
+			"error %d setting maximum return packet size to %d\n",
+			ret, size);
+		ctx->error = ret;
+	}
+}
+
+static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
+{
+	u8 id[3];
+	int ret, i;
+
+	ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
+	if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+		dev_err(ctx->dev, "read id failed\n");
+		ctx->error = -EIO;
+		return;
+	}
+
+	dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]);
+
+	for (i = 0; i < ARRAY_SIZE(s6e8aa0_variants); ++i) {
+		if (id[1] == s6e8aa0_variants[i].version)
+			break;
+	}
+	if (i >= ARRAY_SIZE(s6e8aa0_variants)) {
+		dev_err(ctx->dev, "unsupported display version %d\n", id[1]);
+		ctx->error = -EINVAL;
+		return;
+	}
+
+	ctx->variant = &s6e8aa0_variants[i];
+	ctx->version = id[1];
+	ctx->id = id[2];
+}
+
+static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx)
+{
+	s6e8aa0_set_maximum_return_packet_size(ctx, 3);
+	s6e8aa0_read_mtp_id(ctx);
+	s6e8aa0_panel_init(ctx);
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int s6e8aa0_power_on(struct s6e8aa0 *ctx)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+	if (ret < 0)
+		return ret;
+
+	msleep(ctx->power_on_delay);
+
+	gpiod_set_value(ctx->reset_gpio, 0);
+	usleep_range(10000, 11000);
+	gpiod_set_value(ctx->reset_gpio, 1);
+
+	msleep(ctx->reset_delay);
+
+	return 0;
+}
+
+static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
+{
+	return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int s6e8aa0_disable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int s6e8aa0_unprepare(struct drm_panel *panel)
+{
+	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
+	s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
+	msleep(40);
+
+	s6e8aa0_clear_error(ctx);
+
+	return s6e8aa0_power_off(ctx);
+}
+
+static int s6e8aa0_prepare(struct drm_panel *panel)
+{
+	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+	int ret;
+
+	ret = s6e8aa0_power_on(ctx);
+	if (ret < 0)
+		return ret;
+
+	s6e8aa0_set_sequence(ctx);
+	ret = ctx->error;
+
+	if (ret < 0)
+		s6e8aa0_unprepare(panel);
+
+	return ret;
+}
+
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+	return 0;
+}
+
+static int s6e8aa0_get_modes(struct drm_panel *panel)
+{
+	struct drm_connector *connector = panel->connector;
+	struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (!mode) {
+		DRM_ERROR("failed to create a new display mode\n");
+		return 0;
+	}
+
+	drm_display_mode_from_videomode(&ctx->vm, mode);
+	mode->width_mm = ctx->width_mm;
+	mode->height_mm = ctx->height_mm;
+	connector->display_info.width_mm = mode->width_mm;
+	connector->display_info.height_mm = mode->height_mm;
+
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+
+	return 1;
+}
+
+static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
+	.disable = s6e8aa0_disable,
+	.unprepare = s6e8aa0_unprepare,
+	.prepare = s6e8aa0_prepare,
+	.enable = s6e8aa0_enable,
+	.get_modes = s6e8aa0_get_modes,
+};
+
+static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx)
+{
+	struct device *dev = ctx->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	ret = of_get_videomode(np, &ctx->vm, 0);
+	if (ret < 0)
+		return ret;
+
+	of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay);
+	of_property_read_u32(np, "reset-delay", &ctx->reset_delay);
+	of_property_read_u32(np, "init-delay", &ctx->init_delay);
+	of_property_read_u32(np, "panel-width-mm", &ctx->width_mm);
+	of_property_read_u32(np, "panel-height-mm", &ctx->height_mm);
+
+	ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal");
+	ctx->flip_vertical = of_property_read_bool(np, "flip-vertical");
+
+	return 0;
+}
+
+static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
+{
+	struct device *dev = &dsi->dev;
+	struct s6e8aa0 *ctx;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	mipi_dsi_set_drvdata(dsi, ctx);
+
+	ctx->dev = dev;
+
+	dsi->lanes = 4;
+	dsi->format = MIPI_DSI_FMT_RGB888;
+	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+		| MIPI_DSI_MODE_VIDEO_HFP | MIPI_DSI_MODE_VIDEO_HBP
+		| MIPI_DSI_MODE_VIDEO_HSA | MIPI_DSI_MODE_EOT_PACKET
+		| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+
+	ret = s6e8aa0_parse_dt(ctx);
+	if (ret < 0)
+		return ret;
+
+	ctx->supplies[0].supply = "vdd3";
+	ctx->supplies[1].supply = "vci";
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+				      ctx->supplies);
+	if (ret < 0) {
+		dev_err(dev, "failed to get regulators: %d\n", ret);
+		return ret;
+	}
+
+	ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(ctx->reset_gpio)) {
+		dev_err(dev, "cannot get reset-gpios %ld\n",
+			PTR_ERR(ctx->reset_gpio));
+		return PTR_ERR(ctx->reset_gpio);
+	}
+
+	ctx->brightness = GAMMA_LEVEL_NUM - 1;
+
+	drm_panel_init(&ctx->panel);
+	ctx->panel.dev = dev;
+	ctx->panel.funcs = &s6e8aa0_drm_funcs;
+
+	ret = drm_panel_add(&ctx->panel);
+	if (ret < 0)
+		return ret;
+
+	ret = mipi_dsi_attach(dsi);
+	if (ret < 0)
+		drm_panel_remove(&ctx->panel);
+
+	return ret;
+}
+
+static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
+{
+	struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
+
+	mipi_dsi_detach(dsi);
+	drm_panel_remove(&ctx->panel);
+
+	return 0;
+}
+
+static const struct of_device_id s6e8aa0_of_match[] = {
+	{ .compatible = "samsung,s6e8aa0" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, s6e8aa0_of_match);
+
+static struct mipi_dsi_driver s6e8aa0_driver = {
+	.probe = s6e8aa0_probe,
+	.remove = s6e8aa0_remove,
+	.driver = {
+		.name = "panel-samsung-s6e8aa0",
+		.of_match_table = s6e8aa0_of_match,
+	},
+};
+module_mipi_dsi_driver(s6e8aa0_driver);
+
+MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joongmock Shin <jmock.shin@samsung.com>");
+MODULE_AUTHOR("Eunchul Kim <chulspro.kim@samsung.com>");
+MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e8aa0 AMOLED LCD Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f94201b..f97b73e 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -713,7 +713,12 @@
 	.hactive = { 1280, 1280, 1280 },
 	.hfront_porch = { 1, 1, 10 },
 	.hback_porch = { 1, 1, 10 },
-	.hsync_len = { 52, 158, 661 },
+	/*
+	 * According to the data sheet, the minimum horizontal blanking interval
+	 * is 54 clocks (1 + 52 + 1), but tests with a Nitrogen6X have shown the
+	 * minimum working horizontal blanking interval to be 60 clocks.
+	 */
+	.hsync_len = { 58, 158, 661 },
 	.vactive = { 800, 800, 800 },
 	.vfront_porch = { 1, 1, 10 },
 	.vback_porch = { 1, 1, 10 },
@@ -729,6 +734,7 @@
 		.width = 151,
 		.height = 94,
 	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
 };
 
 static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -943,6 +949,60 @@
 	},
 };
 
+static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
+	.clock = 10870,
+	.hdisplay = 480,
+	.hsync_start = 480 + 2,
+	.hsync_end = 480 + 2 + 41,
+	.htotal = 480 + 2 + 41 + 2,
+	.vdisplay = 272,
+	.vsync_start = 272 + 2,
+	.vsync_end = 272 + 2 + 4,
+	.vtotal = 272 + 2 + 4 + 2,
+	.vrefresh = 74,
+};
+
+static const struct panel_desc nec_nl4827hc19_05b = {
+	.modes = &nec_nl4827hc19_05b_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 95,
+		.height = 54,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X24
+};
+
+static const struct display_timing okaya_rs800480t_7x0gp_timing = {
+	.pixelclock = { 30000000, 30000000, 40000000 },
+	.hactive = { 800, 800, 800 },
+	.hfront_porch = { 40, 40, 40 },
+	.hback_porch = { 40, 40, 40 },
+	.hsync_len = { 1, 48, 48 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 13, 13, 13 },
+	.vback_porch = { 29, 29, 29 },
+	.vsync_len = { 3, 3, 3 },
+	.flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc okaya_rs800480t_7x0gp = {
+	.timings = &okaya_rs800480t_7x0gp_timing,
+	.num_timings = 1,
+	.bpc = 6,
+	.size = {
+		.width = 154,
+		.height = 87,
+	},
+	.delay = {
+		.prepare = 41,
+		.enable = 50,
+		.unprepare = 41,
+		.disable = 50,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
 	.clock = 25000,
 	.hdisplay = 480,
@@ -1113,6 +1173,12 @@
 		.compatible = "lg,lp129qe",
 		.data = &lg_lp129qe,
 	}, {
+		.compatible = "nec,nl4827hc19-05b",
+		.data = &nec_nl4827hc19_05b,
+	}, {
+		.compatible = "okaya,rs800480t-7x0gp",
+		.data = &okaya_rs800480t_7x0gp,
+	}, {
 		.compatible = "ortustech,com43h4m85ulc",
 		.data = &ortustech_com43h4m85ulc,
 	}, {
@@ -1169,6 +1235,34 @@
 	unsigned int lanes;
 };
 
+static const struct drm_display_mode auo_b080uan01_mode = {
+	.clock = 154500,
+	.hdisplay = 1200,
+	.hsync_start = 1200 + 62,
+	.hsync_end = 1200 + 62 + 4,
+	.htotal = 1200 + 62 + 4 + 62,
+	.vdisplay = 1920,
+	.vsync_start = 1920 + 9,
+	.vsync_end = 1920 + 9 + 2,
+	.vtotal = 1920 + 9 + 2 + 8,
+	.vrefresh = 60,
+};
+
+static const struct panel_desc_dsi auo_b080uan01 = {
+	.desc = {
+		.modes = &auo_b080uan01_mode,
+		.num_modes = 1,
+		.bpc = 8,
+		.size = {
+			.width = 108,
+			.height = 272,
+		},
+	},
+	.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
+	.format = MIPI_DSI_FMT_RGB888,
+	.lanes = 4,
+};
+
 static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
 	.clock = 71000,
 	.hdisplay = 800,
@@ -1256,6 +1350,9 @@
 
 static const struct of_device_id dsi_of_match[] = {
 	{
+		.compatible = "auo,b080uan01",
+		.data = &auo_b080uan01
+	}, {
 		.compatible = "lg,ld070wx3-sl01",
 		.data = &lg_ld070wx3_sl01
 	}, {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 6b6e57e..41c422f 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -197,7 +197,7 @@
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
 			 rect->height);
 }
@@ -207,7 +207,7 @@
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
 			 area->height);
 }
@@ -217,7 +217,7 @@
 {
 	struct qxl_fbdev *qfbdev = info->par;
 
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
 			 image->height);
 }
@@ -345,7 +345,6 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct qxl_bo *qbo = NULL;
-	struct device *device = &qdev->pdev->dev;
 	int ret;
 	int size;
 	int bpp = sizes->surface_bpp;
@@ -374,9 +373,9 @@
 		 shadow);
 	size = mode_cmd.pitches[0] * mode_cmd.height;
 
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -388,7 +387,7 @@
 
 	/* setup helper with fb data */
 	qfbdev->helper.fb = fb;
-	qfbdev->helper.fbdev = info;
+
 	qfbdev->shadow = shadow;
 	strcpy(info->fix.id, "qxldrmfb");
 
@@ -410,11 +409,6 @@
 			       sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = qdev->vram_size;
 
@@ -423,13 +417,7 @@
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	info->fbdefio = &qxl_defio;
@@ -441,6 +429,8 @@
 	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(&qfbdev->helper);
 out_unref:
 	if (qbo) {
 		ret = qxl_bo_reserve(qbo, false);
@@ -479,15 +469,11 @@
 
 static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
 {
-	struct fb_info *info;
 	struct qxl_framebuffer *qfb = &qfbdev->qfb;
 
-	if (qfbdev->helper.fbdev) {
-		info = qfbdev->helper.fbdev;
+	drm_fb_helper_unregister_fbi(&qfbdev->helper);
+	drm_fb_helper_release_fbi(&qfbdev->helper);
 
-		unregister_framebuffer(info);
-		framebuffer_release(info);
-	}
 	if (qfb->obj) {
 		qxlfb_destroy_pinned_object(qfb->obj);
 		qfb->obj = NULL;
@@ -557,7 +543,7 @@
 
 void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
 {
-	fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
+	drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state);
 }
 
 bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6d6f33d..b28370e 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -272,7 +272,6 @@
 		return;
 	dev_err(qdev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
-		mutex_lock(&qdev->ddev->struct_mutex);
 		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -280,8 +279,7 @@
 		list_del_init(&bo->list);
 		mutex_unlock(&qdev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&qdev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index f81e0d7..9cd49c5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -171,8 +171,9 @@
 		return -E2BIG;
 
 	tx_buf[0] = msg->address & 0xff;
-	tx_buf[1] = msg->address >> 8;
-	tx_buf[2] = msg->request << 4;
+	tx_buf[1] = (msg->address >> 8) & 0xff;
+	tx_buf[2] = (msg->request << 4) |
+		((msg->address >> 16) & 0xf);
 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
 
 	switch (msg->request & ~DP_AUX_I2C_MOT) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 44480c1..7520727 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -76,16 +76,35 @@
 
 struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
 {
-	int i;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	struct r600_audio_pin *pin = NULL;
+	int i, pin_count;
 
 	dce6_afmt_get_connected_pins(rdev);
 
 	for (i = 0; i < rdev->audio.num_pins; i++) {
-		if (rdev->audio.pin[i].connected)
-			return &rdev->audio.pin[i];
+		if (rdev->audio.pin[i].connected) {
+			pin = &rdev->audio.pin[i];
+			pin_count = 0;
+
+			list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+				if (radeon_encoder_is_digital(encoder)) {
+					radeon_encoder = to_radeon_encoder(encoder);
+					dig = radeon_encoder->enc_priv;
+					if (dig->pin == pin)
+						pin_count++;
+				}
+			}
+
+			if (pin_count == 0)
+				return pin;
+		}
 	}
-	DRM_ERROR("No connected audio pins found!\n");
-	return NULL;
+	if (!pin)
+		DRM_ERROR("No connected audio pins found!\n");
+	return pin;
 }
 
 void dce6_afmt_select_pin(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fbc8d88..2c02e99 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -522,13 +522,15 @@
 		return err;
 	}
 
-	if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
-		if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
-		else
-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
-	} else {
-		frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+	if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
+		if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+			if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+			else
+				frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+		} else {
+			frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+		}
 	}
 
 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c097d3a..a9b01bc 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3387,6 +3387,14 @@
 	    rdev->pdev->subsystem_device == 0x30ae)
 		return;
 
+	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x280a)
+		return;
+
 	/* DYN CLK 1 */
 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 	if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 94b21ae..5a2cafb 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -95,6 +95,11 @@
 			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+				/* Don't try to start link training before we
+				 * have the dpcd */
+				if (!radeon_dp_getdpcd(radeon_connector))
+					return;
+
 				/* set it to OFF so that drm_helper_connector_dpms()
 				 * won't return immediately since the current state
 				 * is ON at this point.
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index fcbd60b..3b0c229 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -116,8 +116,8 @@
 	       AUX_SW_WR_BYTES(bytes));
 
 	/* write the data header into the registers */
-	/* request, addres, msg size */
-	byte = (msg->request << 4);
+	/* request, address, msg size */
+	byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
 	WREG32(AUX_SW_DATA + aux_offset[instance],
 	       AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
 
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 257b10be..5e09c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -246,9 +246,10 @@
 	kfree(radeon_connector);
 }
 
-static void radeon_connector_dpms(struct drm_connector *connector, int mode)
+static int radeon_connector_dpms(struct drm_connector *connector, int mode)
 {
 	DRM_DEBUG_KMS("\n");
+	return 0;
 }
 
 static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
@@ -284,11 +285,10 @@
 
 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
 	drm_mode_connector_set_path_property(connector, pathprop);
-	drm_reinit_primary_mode_group(dev);
 
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	radeon_fb_add_connector(rdev, connector);
-	mutex_unlock(&dev->mode_config.mutex);
+	drm_modeset_unlock_all(dev);
 
 	drm_connector_register(connector);
 	return connector;
@@ -303,14 +303,12 @@
 
 	drm_connector_unregister(connector);
 	/* need to nuke the connector */
-	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock_all(dev);
 	/* dpms off */
 	radeon_fb_remove_connector(rdev, connector);
 
 	drm_connector_cleanup(connector);
-	mutex_unlock(&dev->mode_config.mutex);
-	drm_reinit_primary_mode_group(dev);
-
+	drm_modeset_unlock_all(dev);
 
 	kfree(connector);
 	DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index aeb6767..7214858 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -82,9 +82,9 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = radeon_fb_helper_set_par,
-	.fb_fillrect = cfb_fillrect,
-	.fb_copyarea = cfb_copyarea,
-	.fb_imageblit = cfb_imageblit,
+	.fb_fillrect = drm_fb_helper_cfb_fillrect,
+	.fb_copyarea = drm_fb_helper_cfb_copyarea,
+	.fb_imageblit = drm_fb_helper_cfb_imageblit,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -227,7 +227,6 @@
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct drm_gem_object *gobj = NULL;
 	struct radeon_bo *rbo = NULL;
-	struct device *device = &rdev->pdev->dev;
 	int ret;
 	unsigned long tmp;
 
@@ -250,9 +249,9 @@
 	rbo = gem_to_radeon_bo(gobj);
 
 	/* okay we have an object now allocate the framebuffer */
-	info = framebuffer_alloc(0, device);
-	if (info == NULL) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_unref;
 	}
 
@@ -262,14 +261,13 @@
 	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 	if (ret) {
 		DRM_ERROR("failed to initialize framebuffer %d\n", ret);
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	fb = &rfbdev->rfb.base;
 
 	/* setup helper */
 	rfbdev->helper.fb = fb;
-	rfbdev->helper.fbdev = info;
 
 	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
 
@@ -289,11 +287,6 @@
 	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
 
 	/* setup aperture base/size for vesafb takeover */
-	info->apertures = alloc_apertures(1);
-	if (!info->apertures) {
-		ret = -ENOMEM;
-		goto out_unref;
-	}
 	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = rdev->mc.aper_size;
 
@@ -301,13 +294,7 @@
 
 	if (info->screen_base == NULL) {
 		ret = -ENOSPC;
-		goto out_unref;
-	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unref;
+		goto out_destroy_fbi;
 	}
 
 	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
@@ -319,6 +306,8 @@
 	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
 	return 0;
 
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_unref:
 	if (rbo) {
 
@@ -339,17 +328,10 @@
 
 static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
 {
-	struct fb_info *info;
 	struct radeon_framebuffer *rfb = &rfbdev->rfb;
 
-	if (rfbdev->helper.fbdev) {
-		info = rfbdev->helper.fbdev;
-
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&rfbdev->helper);
+	drm_fb_helper_release_fbi(&rfbdev->helper);
 
 	if (rfb->obj) {
 		radeonfb_destroy_pinned_object(rfb->obj);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index e476c33..9a4d69e 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -845,7 +845,8 @@
 		hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
 		break;
 
-	case KGD_ENGINE_SDMA:
+	case KGD_ENGINE_SDMA1:
+	case KGD_ENGINE_SDMA2:
 		hdr = (const union radeon_firmware_header *)
 							rdev->sdma_fw->data;
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6763627..d302488 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -419,7 +419,6 @@
 	}
 	dev_err(rdev->dev, "Userspace still has active objects !\n");
 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
-		mutex_lock(&rdev->ddev->struct_mutex);
 		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
 			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
 			*((unsigned long *)&bo->gem_base.refcount));
@@ -427,8 +426,7 @@
 		list_del_init(&bo->list);
 		mutex_unlock(&bo->rdev->gem.mutex);
 		/* this should unref the ttm bo */
-		drm_gem_object_unreference(&bo->gem_base);
-		mutex_unlock(&rdev->ddev->struct_mutex);
+		drm_gem_object_unreference_unlocked(&bo->gem_base);
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c1ba83a..05751f3 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -253,7 +253,6 @@
 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
 		return;
 
-	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
@@ -268,7 +267,6 @@
 			/* needs a GPU reset dont reset here */
 			mutex_unlock(&rdev->ring_lock);
 			up_write(&rdev->pm.mclk_lock);
-			mutex_unlock(&rdev->ddev->struct_mutex);
 			return;
 		}
 	}
@@ -304,7 +302,6 @@
 
 	mutex_unlock(&rdev->ring_lock);
 	up_write(&rdev->pm.mclk_lock);
-	mutex_unlock(&rdev->ddev->struct_mutex);
 }
 
 static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -1062,7 +1059,6 @@
 		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
 	}
 
-	mutex_lock(&rdev->ddev->struct_mutex);
 	down_write(&rdev->pm.mclk_lock);
 	mutex_lock(&rdev->ring_lock);
 
@@ -1113,7 +1109,6 @@
 done:
 	mutex_unlock(&rdev->ring_lock);
 	up_write(&rdev->pm.mclk_lock);
-	mutex_unlock(&rdev->ddev->struct_mutex);
 }
 
 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 65d6ba6..48cb199 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -496,7 +496,8 @@
 	return true;
 }
 
-static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
 {
 	struct drm_pending_vblank_event *event = crtc->state->event;
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -512,7 +513,8 @@
 	}
 }
 
-static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
+				      struct drm_crtc_state *old_crtc_state)
 {
 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
 
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 5b0dc0f..f261512 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -37,9 +37,9 @@
 static struct fb_ops rockchip_drm_fbdev_ops = {
 	.owner		= THIS_MODULE,
 	.fb_mmap	= rockchip_fbdev_mmap,
-	.fb_fillrect	= cfb_fillrect,
-	.fb_copyarea	= cfb_copyarea,
-	.fb_imageblit	= cfb_imageblit,
+	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
+	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
+	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
 	.fb_check_var	= drm_fb_helper_check_var,
 	.fb_set_par	= drm_fb_helper_set_par,
 	.fb_blank	= drm_fb_helper_blank,
@@ -77,10 +77,10 @@
 
 	private->fbdev_bo = &rk_obj->base;
 
-	fbi = framebuffer_alloc(0, dev->dev);
-	if (!fbi) {
-		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
-		ret = -ENOMEM;
+	fbi = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(fbi)) {
+		dev_err(dev->dev, "Failed to create framebuffer info.\n");
+		ret = PTR_ERR(fbi);
 		goto err_rockchip_gem_free_object;
 	}
 
@@ -89,21 +89,13 @@
 	if (IS_ERR(helper->fb)) {
 		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
 		ret = PTR_ERR(helper->fb);
-		goto err_framebuffer_release;
+		goto err_release_fbi;
 	}
 
-	helper->fbdev = fbi;
-
 	fbi->par = helper;
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->fbops = &rockchip_drm_fbdev_ops;
 
-	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-	if (ret) {
-		dev_err(dev->dev, "Failed to allocate color map.\n");
-		goto err_drm_framebuffer_unref;
-	}
-
 	fb = helper->fb;
 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
@@ -124,10 +116,8 @@
 
 	return 0;
 
-err_drm_framebuffer_unref:
-	drm_framebuffer_unreference(helper->fb);
-err_framebuffer_release:
-	framebuffer_release(fbi);
+err_release_fbi:
+	drm_fb_helper_release_fbi(helper);
 err_rockchip_gem_free_object:
 	rockchip_gem_free_object(&rk_obj->base);
 	return ret;
@@ -190,21 +180,8 @@
 
 	helper = &private->fbdev_helper;
 
-	if (helper->fbdev) {
-		struct fb_info *info;
-		int ret;
-
-		info = helper->fbdev;
-		ret = unregister_framebuffer(info);
-		if (ret < 0)
-			DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
-				      ret);
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(helper);
+	drm_fb_helper_release_fbi(helper);
 
 	if (helper->fb)
 		drm_framebuffer_unreference(helper->fb);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eba5f8a..a6d9104 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -200,13 +200,10 @@
 	struct drm_gem_object *obj;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
-
 	obj = drm_gem_object_lookup(dev, file_priv, handle);
 	if (!obj) {
 		DRM_ERROR("failed to lookup gem object.\n");
-		ret = -EINVAL;
-		goto unlock;
+		return -EINVAL;
 	}
 
 	ret = drm_gem_create_mmap_offset(obj);
@@ -217,10 +214,9 @@
 	DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
 
 out:
-	drm_gem_object_unreference(obj);
-unlock:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
 }
 
 /*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 34b78e7..5d8ae5e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -50,6 +50,8 @@
 
 #define VOP_WIN_SET(x, win, name, v) \
 		REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_SCL_SET(x, win, name, v) \
+		REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
 #define VOP_CTRL_SET(x, name, v) \
 		REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
 
@@ -164,7 +166,37 @@
 	struct vop_reg vpost_st_end;
 };
 
+struct vop_scl_regs {
+	struct vop_reg cbcr_vsd_mode;
+	struct vop_reg cbcr_vsu_mode;
+	struct vop_reg cbcr_hsd_mode;
+	struct vop_reg cbcr_ver_scl_mode;
+	struct vop_reg cbcr_hor_scl_mode;
+	struct vop_reg yrgb_vsd_mode;
+	struct vop_reg yrgb_vsu_mode;
+	struct vop_reg yrgb_hsd_mode;
+	struct vop_reg yrgb_ver_scl_mode;
+	struct vop_reg yrgb_hor_scl_mode;
+	struct vop_reg line_load_mode;
+	struct vop_reg cbcr_axi_gather_num;
+	struct vop_reg yrgb_axi_gather_num;
+	struct vop_reg vsd_cbcr_gt2;
+	struct vop_reg vsd_cbcr_gt4;
+	struct vop_reg vsd_yrgb_gt2;
+	struct vop_reg vsd_yrgb_gt4;
+	struct vop_reg bic_coe_sel;
+	struct vop_reg cbcr_axi_gather_en;
+	struct vop_reg yrgb_axi_gather_en;
+
+	struct vop_reg lb_mode;
+	struct vop_reg scale_yrgb_x;
+	struct vop_reg scale_yrgb_y;
+	struct vop_reg scale_cbcr_x;
+	struct vop_reg scale_cbcr_y;
+};
+
 struct vop_win_phy {
+	const struct vop_scl_regs *scl;
 	const uint32_t *data_formats;
 	uint32_t nformats;
 
@@ -222,7 +254,36 @@
 	DRM_FORMAT_BGR565,
 };
 
+static const struct vop_scl_regs win_full_scl = {
+	.cbcr_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 31),
+	.cbcr_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 30),
+	.cbcr_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 28),
+	.cbcr_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 26),
+	.cbcr_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 24),
+	.yrgb_vsd_mode = VOP_REG(WIN0_CTRL1, 0x1, 23),
+	.yrgb_vsu_mode = VOP_REG(WIN0_CTRL1, 0x1, 22),
+	.yrgb_hsd_mode = VOP_REG(WIN0_CTRL1, 0x3, 20),
+	.yrgb_ver_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 18),
+	.yrgb_hor_scl_mode = VOP_REG(WIN0_CTRL1, 0x3, 16),
+	.line_load_mode = VOP_REG(WIN0_CTRL1, 0x1, 15),
+	.cbcr_axi_gather_num = VOP_REG(WIN0_CTRL1, 0x7, 12),
+	.yrgb_axi_gather_num = VOP_REG(WIN0_CTRL1, 0xf, 8),
+	.vsd_cbcr_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 7),
+	.vsd_cbcr_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 6),
+	.vsd_yrgb_gt2 = VOP_REG(WIN0_CTRL1, 0x1, 5),
+	.vsd_yrgb_gt4 = VOP_REG(WIN0_CTRL1, 0x1, 4),
+	.bic_coe_sel = VOP_REG(WIN0_CTRL1, 0x3, 2),
+	.cbcr_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 1),
+	.yrgb_axi_gather_en = VOP_REG(WIN0_CTRL1, 0x1, 0),
+	.lb_mode = VOP_REG(WIN0_CTRL0, 0x7, 5),
+	.scale_yrgb_x = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+	.scale_yrgb_y = VOP_REG(WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+	.scale_cbcr_x = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+	.scale_cbcr_y = VOP_REG(WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
 static const struct vop_win_phy win01_data = {
+	.scl = &win_full_scl,
 	.data_formats = formats_01,
 	.nformats = ARRAY_SIZE(formats_01),
 	.enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
@@ -279,6 +340,12 @@
 	{DSP_CTRL0, 0x00000000},
 	{WIN0_CTRL0, 0x00000080},
 	{WIN1_CTRL0, 0x00000080},
+	/* TODO: Win2/3 support multiple area function, but we haven't found
+	 * a suitable way to use it yet, so let's just use them as other windows
+	 * with only area 0 enabled.
+	 */
+	{WIN2_CTRL0, 0x00000010},
+	{WIN3_CTRL0, 0x00000010},
 };
 
 /*
@@ -393,6 +460,18 @@
 	}
 }
 
+static bool is_yuv_support(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		return true;
+	default:
+		return false;
+	}
+}
+
 static bool is_alpha_support(uint32_t format)
 {
 	switch (format) {
@@ -404,6 +483,126 @@
 	}
 }
 
+static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
+				  uint32_t dst, bool is_horizontal,
+				  int vsu_mode, int *vskiplines)
+{
+	uint16_t val = 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT;
+
+	if (is_horizontal) {
+		if (mode == SCALE_UP)
+			val = GET_SCL_FT_BIC(src, dst);
+		else if (mode == SCALE_DOWN)
+			val = GET_SCL_FT_BILI_DN(src, dst);
+	} else {
+		if (mode == SCALE_UP) {
+			if (vsu_mode == SCALE_UP_BIL)
+				val = GET_SCL_FT_BILI_UP(src, dst);
+			else
+				val = GET_SCL_FT_BIC(src, dst);
+		} else if (mode == SCALE_DOWN) {
+			if (vskiplines) {
+				*vskiplines = scl_get_vskiplines(src, dst);
+				val = scl_get_bili_dn_vskip(src, dst,
+							    *vskiplines);
+			} else {
+				val = GET_SCL_FT_BILI_DN(src, dst);
+			}
+		}
+	}
+
+	return val;
+}
+
+static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
+			     uint32_t src_w, uint32_t src_h, uint32_t dst_w,
+			     uint32_t dst_h, uint32_t pixel_format)
+{
+	uint16_t yrgb_hor_scl_mode, yrgb_ver_scl_mode;
+	uint16_t cbcr_hor_scl_mode = SCALE_NONE;
+	uint16_t cbcr_ver_scl_mode = SCALE_NONE;
+	int hsub = drm_format_horz_chroma_subsampling(pixel_format);
+	int vsub = drm_format_vert_chroma_subsampling(pixel_format);
+	bool is_yuv = is_yuv_support(pixel_format);
+	uint16_t cbcr_src_w = src_w / hsub;
+	uint16_t cbcr_src_h = src_h / vsub;
+	uint16_t vsu_mode;
+	uint16_t lb_mode;
+	uint32_t val;
+	int vskiplines;
+
+	if (dst_w > 3840) {
+		DRM_ERROR("Maximum destination width (3840) exceeded\n");
+		return;
+	}
+
+	yrgb_hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
+	yrgb_ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+
+	if (is_yuv) {
+		cbcr_hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+		cbcr_ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
+		if (cbcr_hor_scl_mode == SCALE_DOWN)
+			lb_mode = scl_vop_cal_lb_mode(dst_w, true);
+		else
+			lb_mode = scl_vop_cal_lb_mode(cbcr_src_w, true);
+	} else {
+		if (yrgb_hor_scl_mode == SCALE_DOWN)
+			lb_mode = scl_vop_cal_lb_mode(dst_w, false);
+		else
+			lb_mode = scl_vop_cal_lb_mode(src_w, false);
+	}
+
+	VOP_SCL_SET(vop, win, lb_mode, lb_mode);
+	if (lb_mode == LB_RGB_3840X2) {
+		if (yrgb_ver_scl_mode != SCALE_NONE) {
+			DRM_ERROR("ERROR : not allow yrgb ver scale\n");
+			return;
+		}
+		if (cbcr_ver_scl_mode != SCALE_NONE) {
+			DRM_ERROR("ERROR : not allow cbcr ver scale\n");
+			return;
+		}
+		vsu_mode = SCALE_UP_BIL;
+	} else if (lb_mode == LB_RGB_2560X4) {
+		vsu_mode = SCALE_UP_BIL;
+	} else {
+		vsu_mode = SCALE_UP_BIC;
+	}
+
+	val = scl_vop_cal_scale(yrgb_hor_scl_mode, src_w, dst_w,
+				true, 0, NULL);
+	VOP_SCL_SET(vop, win, scale_yrgb_x, val);
+	val = scl_vop_cal_scale(yrgb_ver_scl_mode, src_h, dst_h,
+				false, vsu_mode, &vskiplines);
+	VOP_SCL_SET(vop, win, scale_yrgb_y, val);
+
+	VOP_SCL_SET(vop, win, vsd_yrgb_gt4, vskiplines == 4);
+	VOP_SCL_SET(vop, win, vsd_yrgb_gt2, vskiplines == 2);
+
+	VOP_SCL_SET(vop, win, yrgb_hor_scl_mode, yrgb_hor_scl_mode);
+	VOP_SCL_SET(vop, win, yrgb_ver_scl_mode, yrgb_ver_scl_mode);
+	VOP_SCL_SET(vop, win, yrgb_hsd_mode, SCALE_DOWN_BIL);
+	VOP_SCL_SET(vop, win, yrgb_vsd_mode, SCALE_DOWN_BIL);
+	VOP_SCL_SET(vop, win, yrgb_vsu_mode, vsu_mode);
+	if (is_yuv) {
+		val = scl_vop_cal_scale(cbcr_hor_scl_mode, cbcr_src_w,
+					dst_w, true, 0, NULL);
+		VOP_SCL_SET(vop, win, scale_cbcr_x, val);
+		val = scl_vop_cal_scale(cbcr_ver_scl_mode, cbcr_src_h,
+					dst_h, false, vsu_mode, &vskiplines);
+		VOP_SCL_SET(vop, win, scale_cbcr_y, val);
+
+		VOP_SCL_SET(vop, win, vsd_cbcr_gt4, vskiplines == 4);
+		VOP_SCL_SET(vop, win, vsd_cbcr_gt2, vskiplines == 2);
+		VOP_SCL_SET(vop, win, cbcr_hor_scl_mode, cbcr_hor_scl_mode);
+		VOP_SCL_SET(vop, win, cbcr_ver_scl_mode, cbcr_ver_scl_mode);
+		VOP_SCL_SET(vop, win, cbcr_hsd_mode, SCALE_DOWN_BIL);
+		VOP_SCL_SET(vop, win, cbcr_vsd_mode, SCALE_DOWN_BIL);
+		VOP_SCL_SET(vop, win, cbcr_vsu_mode, vsu_mode);
+	}
+}
+
 static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
 {
 	unsigned long flags;
@@ -478,6 +677,7 @@
 		goto err_disable_aclk;
 	}
 
+	memcpy(vop->regs, vop->regsbak, vop->len);
 	/*
 	 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
 	 */
@@ -598,17 +798,22 @@
 	struct vop *vop = to_vop(crtc);
 	struct drm_gem_object *obj;
 	struct rockchip_gem_object *rk_obj;
+	struct drm_gem_object *uv_obj;
+	struct rockchip_gem_object *rk_uv_obj;
 	unsigned long offset;
 	unsigned int actual_w;
 	unsigned int actual_h;
 	unsigned int dsp_stx;
 	unsigned int dsp_sty;
 	unsigned int y_vir_stride;
+	unsigned int uv_vir_stride = 0;
 	dma_addr_t yrgb_mst;
+	dma_addr_t uv_mst = 0;
 	enum vop_data_format format;
 	uint32_t val;
 	bool is_alpha;
 	bool rb_swap;
+	bool is_yuv;
 	bool visible;
 	int ret;
 	struct drm_rect dest = {
@@ -629,11 +834,15 @@
 		.y2 = crtc->mode.vdisplay,
 	};
 	bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+	int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+					DRM_PLANE_HELPER_NO_SCALING;
+	int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+					DRM_PLANE_HELPER_NO_SCALING;
 
 	ret = drm_plane_helper_check_update(plane, crtc, fb,
 					    &src, &dest, &clip,
-					    DRM_PLANE_HELPER_NO_SCALING,
-					    DRM_PLANE_HELPER_NO_SCALING,
+					    min_scale,
+					    max_scale,
 					    can_position, false, &visible);
 	if (ret)
 		return ret;
@@ -643,6 +852,8 @@
 
 	is_alpha = is_alpha_support(fb->pixel_format);
 	rb_swap = has_rb_swapped(fb->pixel_format);
+	is_yuv = is_yuv_support(fb->pixel_format);
+
 	format = vop_convert_format(fb->pixel_format);
 	if (format < 0)
 		return format;
@@ -655,19 +866,46 @@
 
 	rk_obj = to_rockchip_obj(obj);
 
+	if (is_yuv) {
+		/*
+		 * Src.x1 can be odd when do clip, but yuv plane start point
+		 * need align with 2 pixel.
+		 */
+		val = (src.x1 >> 16) % 2;
+		src.x1 += val << 16;
+		src.x2 += val << 16;
+	}
+
 	actual_w = (src.x2 - src.x1) >> 16;
 	actual_h = (src.y2 - src.y1) >> 16;
-	crtc_x = max(0, crtc_x);
-	crtc_y = max(0, crtc_y);
 
-	dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
-	dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+	dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start;
+	dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
 
-	offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+	offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
 	offset += (src.y1 >> 16) * fb->pitches[0];
-	yrgb_mst = rk_obj->dma_addr + offset;
 
-	y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+	yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+	y_vir_stride = fb->pitches[0] >> 2;
+
+	if (is_yuv) {
+		int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
+		int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
+		int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+
+		uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+		if (!uv_obj) {
+			DRM_ERROR("fail to get uv object from framebuffer\n");
+			return -EINVAL;
+		}
+		rk_uv_obj = to_rockchip_obj(uv_obj);
+		uv_vir_stride = fb->pitches[1] >> 2;
+
+		offset = (src.x1 >> 16) * bpp / hsub;
+		offset += (src.y1 >> 16) * fb->pitches[1] / vsub;
+
+		uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1];
+	}
 
 	/*
 	 * If this plane update changes the plane's framebuffer, (or more
@@ -704,9 +942,22 @@
 	VOP_WIN_SET(vop, win, format, format);
 	VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
 	VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+	if (is_yuv) {
+		VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride);
+		VOP_WIN_SET(vop, win, uv_mst, uv_mst);
+	}
+
+	if (win->phy->scl)
+		scl_vop_cal_scl_fac(vop, win, actual_w, actual_h,
+				    dest.x2 - dest.x1, dest.y2 - dest.y1,
+				    fb->pixel_format);
+
 	val = (actual_h - 1) << 16;
 	val |= (actual_w - 1) & 0xffff;
 	VOP_WIN_SET(vop, win, act_info, val);
+
+	val = (dest.y2 - dest.y1 - 1) << 16;
+	val |= (dest.x2 - dest.x1 - 1) & 0xffff;
 	VOP_WIN_SET(vop, win, dsp_info, val);
 	val = (dsp_sty - 1) << 16;
 	val |= (dsp_stx - 1) & 0xffff;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 63e9b3a..a2d4ddb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -198,4 +198,92 @@
 	ALPHA_SRC_GLOBAL,
 };
 
+enum scale_mode {
+	SCALE_NONE = 0x0,
+	SCALE_UP   = 0x1,
+	SCALE_DOWN = 0x2
+};
+
+enum lb_mode {
+	LB_YUV_3840X5 = 0x0,
+	LB_YUV_2560X8 = 0x1,
+	LB_RGB_3840X2 = 0x2,
+	LB_RGB_2560X4 = 0x3,
+	LB_RGB_1920X5 = 0x4,
+	LB_RGB_1280X8 = 0x5
+};
+
+enum sacle_up_mode {
+	SCALE_UP_BIL = 0x0,
+	SCALE_UP_BIC = 0x1
+};
+
+enum scale_down_mode {
+	SCALE_DOWN_BIL = 0x0,
+	SCALE_DOWN_AVG = 0x1
+};
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+#define SCL_FT_DEFAULT_FIXPOINT_SHIFT	12
+#define SCL_MAX_VSKIPLINES		4
+#define MIN_SCL_FT_AFTER_VSKIP		1
+
+static inline uint16_t scl_cal_scale(int src, int dst, int shift)
+{
+	return ((src * 2 - 3) << (shift - 1)) / (dst - 1);
+}
+
+#define GET_SCL_FT_BILI_DN(src, dst)	scl_cal_scale(src, dst, 12)
+#define GET_SCL_FT_BILI_UP(src, dst)	scl_cal_scale(src, dst, 16)
+#define GET_SCL_FT_BIC(src, dst)	scl_cal_scale(src, dst, 16)
+
+static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
+					     int vskiplines)
+{
+	int act_height;
+
+	act_height = (src_h + vskiplines - 1) / vskiplines;
+
+	return GET_SCL_FT_BILI_DN(act_height, dst_h);
+}
+
+static inline enum scale_mode scl_get_scl_mode(int src, int dst)
+{
+	if (src < dst)
+		return SCALE_UP;
+	else if (src > dst)
+		return SCALE_DOWN;
+
+	return SCALE_NONE;
+}
+
+static inline int scl_get_vskiplines(uint32_t srch, uint32_t dsth)
+{
+	uint32_t vskiplines;
+
+	for (vskiplines = SCL_MAX_VSKIPLINES; vskiplines > 1; vskiplines /= 2)
+		if (srch >= vskiplines * dsth * MIN_SCL_FT_AFTER_VSKIP)
+			break;
+
+	return vskiplines;
+}
+
+static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
+{
+	int lb_mode;
+
+	if (width > 2560)
+		lb_mode = LB_RGB_3840X2;
+	else if (width > 1920)
+		lb_mode = LB_RGB_2560X4;
+	else if (!is_yuv)
+		lb_mode = LB_RGB_1920X5;
+	else if (width > 1280)
+		lb_mode = LB_YUV_3840X5;
+	else
+		lb_mode = LB_YUV_2560X8;
+
+	return lb_mode;
+}
+
 #endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 859ccb6..e9272b0 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -248,7 +248,7 @@
 	lcdc_write(sdev, LDDDSR, value);
 
 	/* Setup planes. */
-	drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
+	drm_for_each_legacy_plane(plane, dev) {
 		if (plane->crtc == crtc)
 			shmob_drm_plane_setup(plane);
 	}
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4e..e27490b 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
 sticompositor-y := \
-	sti_layer.o \
 	sti_mixer.o \
 	sti_gdp.o \
 	sti_vid.o \
 	sti_cursor.o \
 	sti_compositor.o \
-	sti_drm_crtc.o \
-	sti_drm_plane.o
+	sti_crtc.o \
+	sti_plane.o
 
 stihdmi-y := sti_hdmi.o \
 	sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@
 	sticompositor.o \
 	sti_hqvdp.o \
 	stidvo.o \
-	sti_drm_drv.o
+	sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3..c652627 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
 #include <drm/drmP.h>
 
 #include "sti_compositor.h"
-#include "sti_drm_crtc.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
+#include "sti_crtc.h"
+#include "sti_cursor.h"
+#include "sti_drv.h"
 #include "sti_gdp.h"
+#include "sti_plane.h"
+#include "sti_vid.h"
 #include "sti_vtg.h"
 
 /*
@@ -31,7 +33,7 @@
 			{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
 			{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
 			{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
-			{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+			{STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
 			{STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
 			{STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
 	},
@@ -53,14 +55,29 @@
 	},
 };
 
-static int sti_compositor_init_subdev(struct sti_compositor *compo,
-		struct sti_compositor_subdev_descriptor *desc,
-		unsigned int array_size)
+static int sti_compositor_bind(struct device *dev,
+			       struct device *master,
+			       void *data)
 {
-	unsigned int i, mixer_id = 0, layer_id = 0;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+	struct drm_device *drm_dev = data;
+	unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
+	struct sti_private *dev_priv = drm_dev->dev_private;
+	struct drm_plane *cursor = NULL;
+	struct drm_plane *primary = NULL;
+	struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
+	unsigned int array_size = compo->data.nb_subdev;
 
+	dev_priv->compo = compo;
+
+	/* Register mixer subdev and video subdev first */
 	for (i = 0; i < array_size; i++) {
 		switch (desc[i].type) {
+		case STI_VID_SUBDEV:
+			compo->vid[vid_id++] =
+			    sti_vid_create(compo->dev, desc[i].id,
+					   compo->regs + desc[i].offset);
+			break;
 		case STI_MIXER_MAIN_SUBDEV:
 		case STI_MIXER_AUX_SUBDEV:
 			compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@
 					     compo->regs + desc[i].offset);
 			break;
 		case STI_GPD_SUBDEV:
-		case STI_VID_SUBDEV:
 		case STI_CURSOR_SUBDEV:
-			compo->layer[layer_id++] =
-			    sti_layer_create(compo->dev, desc[i].id,
-					     compo->regs + desc[i].offset);
+			/* Nothing to do, wait for the second round */
 			break;
 		default:
 			DRM_ERROR("Unknow subdev compoment type\n");
 			return 1;
 		}
-
 	}
-	compo->nb_mixers = mixer_id;
-	compo->nb_layers = layer_id;
 
-	return 0;
-}
+	/* Register the other subdevs, create crtc and planes */
+	for (i = 0; i < array_size; i++) {
+		enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
 
-static int sti_compositor_bind(struct device *dev, struct device *master,
-	void *data)
-{
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct drm_device *drm_dev = data;
-	unsigned int i, crtc = 0, plane = 0;
-	struct sti_drm_private *dev_priv = drm_dev->dev_private;
-	struct drm_plane *cursor = NULL;
-	struct drm_plane *primary = NULL;
+		if (crtc_id < mixer_id)
+			plane_type = DRM_PLANE_TYPE_PRIMARY;
 
-	dev_priv->compo = compo;
-
-	for (i = 0; i < compo->nb_layers; i++) {
-		if (compo->layer[i]) {
-			enum sti_layer_desc desc = compo->layer[i]->desc;
-			enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
-			enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
-
-			if (crtc < compo->nb_mixers)
-				plane_type = DRM_PLANE_TYPE_PRIMARY;
-
-			switch (type) {
-			case STI_CUR:
-				cursor = sti_drm_plane_init(drm_dev,
-						compo->layer[i],
-						1, DRM_PLANE_TYPE_CURSOR);
-				break;
-			case STI_GDP:
-			case STI_VID:
-				primary = sti_drm_plane_init(drm_dev,
-						compo->layer[i],
-						(1 << compo->nb_mixers) - 1,
-						plane_type);
-				plane++;
-				break;
-			case STI_BCK:
-			case STI_VDP:
+		switch (desc[i].type) {
+		case STI_MIXER_MAIN_SUBDEV:
+		case STI_MIXER_AUX_SUBDEV:
+		case STI_VID_SUBDEV:
+			/* Nothing to do, already done at the first round */
+			break;
+		case STI_CURSOR_SUBDEV:
+			cursor = sti_cursor_create(drm_dev, compo->dev,
+						   desc[i].id,
+						   compo->regs + desc[i].offset,
+						   1);
+			if (!cursor) {
+				DRM_ERROR("Can't create CURSOR plane\n");
 				break;
 			}
-
-			/* The first planes are reserved for primary planes*/
-			if (crtc < compo->nb_mixers && primary) {
-				sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
-						primary, cursor);
-				crtc++;
-				cursor = NULL;
-				primary = NULL;
+			break;
+		case STI_GPD_SUBDEV:
+			primary = sti_gdp_create(drm_dev, compo->dev,
+						 desc[i].id,
+						 compo->regs + desc[i].offset,
+						 (1 << mixer_id) - 1,
+						 plane_type);
+			if (!primary) {
+				DRM_ERROR("Can't create GDP plane\n");
+				break;
 			}
+			break;
+		default:
+			DRM_ERROR("Unknown subdev compoment type\n");
+			return 1;
+		}
+
+		/* The first planes are reserved for primary planes*/
+		if (crtc_id < mixer_id && primary) {
+			sti_crtc_init(drm_dev, compo->mixer[crtc_id],
+				      primary, cursor);
+			crtc_id++;
+			cursor = NULL;
+			primary = NULL;
 		}
 	}
 
-	drm_vblank_init(drm_dev, crtc);
+	drm_vblank_init(drm_dev, crtc_id);
 	/* Allow usage of vblank without having to call drm_irq_install */
 	drm_dev->irq_enabled = 1;
 
-	DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
-			 crtc, plane);
-	DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
-
 	return 0;
 }
 
@@ -179,7 +181,6 @@
 	struct device_node *vtg_np;
 	struct sti_compositor *compo;
 	struct resource *res;
-	int err;
 
 	compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
 	if (!compo) {
@@ -187,7 +188,7 @@
 		return -ENOMEM;
 	}
 	compo->dev = dev;
-	compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+	compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
 
 	/* populate data structure depending on compatibility */
 	BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@
 	if (vtg_np)
 		compo->vtg_aux = of_vtg_find(vtg_np);
 
-	/* Initialize compositor subdevices */
-	err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
-					 compo->data.nb_subdev);
-	if (err)
-		return err;
-
 	platform_set_drvdata(pdev, compo);
 
 	return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44..1a4a73d 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
 #include <linux/clk.h>
 #include <linux/kernel.h>
 
-#include "sti_layer.h"
 #include "sti_mixer.h"
+#include "sti_plane.h"
 
 #define WAIT_NEXT_VSYNC_MS      50 /*ms*/
 
-#define STI_MAX_LAYER 8
 #define STI_MAX_MIXER 2
+#define STI_MAX_VID   1
 
 enum sti_compositor_subdev_type {
 	STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@
  * @rst_main: reset control of the main path
  * @rst_aux: reset control of the aux path
  * @mixer: array of mixers
+ * @vid: array of vids
  * @vtg_main: vtg for main data path
  * @vtg_aux: vtg for auxillary data path
- * @layer: array of layers
- * @nb_mixers: number of mixers for this compositor
- * @nb_layers: number of layers (GDP,VID,...) for this compositor
  * @vtg_vblank_nb: callback for VTG VSYNC notification
  */
 struct sti_compositor {
@@ -77,11 +75,9 @@
 	struct reset_control *rst_main;
 	struct reset_control *rst_aux;
 	struct sti_mixer *mixer[STI_MAX_MIXER];
+	struct sti_vid *vid[STI_MAX_VID];
 	struct sti_vtg *vtg_main;
 	struct sti_vtg *vtg_aux;
-	struct sti_layer *layer[STI_MAX_LAYER];
-	int nb_mixers;
-	int nb_layers;
 	struct notifier_block vtg_vblank_nb;
 };
 
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
new file mode 100644
index 0000000..018ffc9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *          Fabien Dessenne <fabien.dessenne@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_crtc.h"
+#include "sti_drv.h"
+#include "sti_vid.h"
+#include "sti_vtg.h"
+
+static void sti_crtc_enable(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct device *dev = mixer->dev;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	mixer->status = STI_MIXER_READY;
+
+	/* Prepare and enable the compo IP clock */
+	if (mixer->id == STI_MIXER_MAIN) {
+		if (clk_prepare_enable(compo->clk_compo_main))
+			DRM_INFO("Failed to prepare/enable compo_main clk\n");
+	} else {
+		if (clk_prepare_enable(compo->clk_compo_aux))
+			DRM_INFO("Failed to prepare/enable compo_aux clk\n");
+	}
+
+	drm_crtc_vblank_on(crtc);
+}
+
+static void sti_crtc_disabling(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	mixer->status = STI_MIXER_DISABLING;
+}
+
+static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	/* accept the provided drm_display_mode, do not fix it up */
+	return true;
+}
+
+static int
+sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct device *dev = mixer->dev;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+	struct clk *clk;
+	int rate = mode->clock * 1000;
+	int res;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      mode->base.id, mode->name);
+
+	DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+		      mode->vrefresh, mode->clock,
+		      mode->hdisplay,
+		      mode->hsync_start, mode->hsync_end,
+		      mode->htotal,
+		      mode->vdisplay,
+		      mode->vsync_start, mode->vsync_end,
+		      mode->vtotal, mode->type, mode->flags);
+
+	/* Set rate and prepare/enable pixel clock */
+	if (mixer->id == STI_MIXER_MAIN)
+		clk = compo->clk_pix_main;
+	else
+		clk = compo->clk_pix_aux;
+
+	res = clk_set_rate(clk, rate);
+	if (res < 0) {
+		DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
+		return -EINVAL;
+	}
+	if (clk_prepare_enable(clk)) {
+		DRM_ERROR("Failed to prepare/enable pix clk\n");
+		return -EINVAL;
+	}
+
+	sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
+			compo->vtg_main : compo->vtg_aux, &crtc->mode);
+
+	res = sti_mixer_active_video_area(mixer, &crtc->mode);
+	if (res) {
+		DRM_ERROR("Can't set active video area\n");
+		return -EINVAL;
+	}
+
+	return res;
+}
+
+static void sti_crtc_disable(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct device *dev = mixer->dev;
+	struct sti_compositor *compo = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
+
+	/* Disable Background */
+	sti_mixer_set_background_status(mixer, false);
+
+	drm_crtc_vblank_off(crtc);
+
+	/* Disable pixel clock and compo IP clocks */
+	if (mixer->id == STI_MIXER_MAIN) {
+		clk_disable_unprepare(compo->clk_pix_main);
+		clk_disable_unprepare(compo->clk_compo_main);
+	} else {
+		clk_disable_unprepare(compo->clk_pix_aux);
+		clk_disable_unprepare(compo->clk_compo_aux);
+	}
+
+	mixer->status = STI_MIXER_DISABLED;
+}
+
+static void
+sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+	sti_crtc_enable(crtc);
+	sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
+}
+
+static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+	if (crtc->state->event) {
+		crtc->state->event->pipe = drm_crtc_index(crtc);
+
+		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+		mixer->pending_event = crtc->state->event;
+		crtc->state->event = NULL;
+	}
+}
+
+static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
+				  struct drm_crtc_state *old_crtc_state)
+{
+	struct drm_device *drm_dev = crtc->dev;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+	struct drm_plane *p;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	/* perform plane actions */
+	list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+		struct sti_plane *plane = to_sti_plane(p);
+
+		switch (plane->status) {
+		case STI_PLANE_UPDATED:
+			/* update planes tag as updated */
+			DRM_DEBUG_DRIVER("update plane %s\n",
+					 sti_plane_to_str(plane));
+
+			if (sti_mixer_set_plane_depth(mixer, plane)) {
+				DRM_ERROR("Cannot set plane %s depth\n",
+					  sti_plane_to_str(plane));
+				break;
+			}
+
+			if (sti_mixer_set_plane_status(mixer, plane, true)) {
+				DRM_ERROR("Cannot enable plane %s at mixer\n",
+					  sti_plane_to_str(plane));
+				break;
+			}
+
+			/* if plane is HQVDP_0 then commit the vid[0] */
+			if (plane->desc == STI_HQVDP_0)
+				sti_vid_commit(compo->vid[0], p->state);
+
+			plane->status = STI_PLANE_READY;
+
+			break;
+		case STI_PLANE_DISABLING:
+			/* disabling sequence for planes tag as disabling */
+			DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
+					 sti_plane_to_str(plane));
+
+			if (sti_mixer_set_plane_status(mixer, plane, false)) {
+				DRM_ERROR("Cannot disable plane %s at mixer\n",
+					  sti_plane_to_str(plane));
+				continue;
+			}
+
+			if (plane->desc == STI_CURSOR)
+				/* tag plane status for disabled */
+				plane->status = STI_PLANE_DISABLED;
+			else
+				/* tag plane status for flushing */
+				plane->status = STI_PLANE_FLUSHING;
+
+			/* if plane is HQVDP_0 then disable the vid[0] */
+			if (plane->desc == STI_HQVDP_0)
+				sti_vid_disable(compo->vid[0]);
+
+			break;
+		default:
+			/* Other status case are not handled */
+			break;
+		}
+	}
+}
+
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+	.enable = sti_crtc_enable,
+	.disable = sti_crtc_disabling,
+	.mode_fixup = sti_crtc_mode_fixup,
+	.mode_set = drm_helper_crtc_mode_set,
+	.mode_set_nofb = sti_crtc_mode_set_nofb,
+	.mode_set_base = drm_helper_crtc_mode_set_base,
+	.atomic_begin = sti_crtc_atomic_begin,
+	.atomic_flush = sti_crtc_atomic_flush,
+};
+
+static void sti_crtc_destroy(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("\n");
+	drm_crtc_cleanup(crtc);
+}
+
+static int sti_crtc_set_property(struct drm_crtc *crtc,
+				 struct drm_property *property,
+				 uint64_t val)
+{
+	DRM_DEBUG_KMS("\n");
+	return 0;
+}
+
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+		       unsigned long event, void *data)
+{
+	struct drm_device *drm_dev;
+	struct sti_compositor *compo =
+		container_of(nb, struct sti_compositor, vtg_vblank_nb);
+	int *crtc = data;
+	unsigned long flags;
+	struct sti_private *priv;
+
+	drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
+	priv = drm_dev->dev_private;
+
+	if ((event != VTG_TOP_FIELD_EVENT) &&
+	    (event != VTG_BOTTOM_FIELD_EVENT)) {
+		DRM_ERROR("unknown event: %lu\n", event);
+		return -EINVAL;
+	}
+
+	drm_handle_vblank(drm_dev, *crtc);
+
+	spin_lock_irqsave(&drm_dev->event_lock, flags);
+	if (compo->mixer[*crtc]->pending_event) {
+		drm_send_vblank_event(drm_dev, -1,
+				      compo->mixer[*crtc]->pending_event);
+		drm_vblank_put(drm_dev, *crtc);
+		compo->mixer[*crtc]->pending_event = NULL;
+	}
+	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+	if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
+		struct drm_plane *p;
+
+		/* Disable mixer only if all overlay planes (GDP and VDP)
+		 * are disabled */
+		list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+			struct sti_plane *plane = to_sti_plane(p);
+
+			if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
+				if (plane->status != STI_PLANE_DISABLED)
+					return 0;
+		}
+		sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
+	}
+
+	return 0;
+}
+
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct sti_private *dev_priv = dev->dev_private;
+	struct sti_compositor *compo = dev_priv->compo;
+	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
+			compo->vtg_main : compo->vtg_aux,
+			vtg_vblank_nb, crtc)) {
+		DRM_ERROR("Cannot register VTG notifier\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sti_crtc_enable_vblank);
+
+void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
+{
+	struct sti_private *priv = drm_dev->dev_private;
+	struct sti_compositor *compo = priv->compo;
+	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
+			compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+	/* free the resources of the pending requests */
+	if (compo->mixer[crtc]->pending_event) {
+		drm_vblank_put(drm_dev, crtc);
+		compo->mixer[crtc]->pending_event = NULL;
+	}
+}
+EXPORT_SYMBOL(sti_crtc_disable_vblank);
+
+static struct drm_crtc_funcs sti_crtc_funcs = {
+	.set_config = drm_atomic_helper_set_config,
+	.page_flip = drm_atomic_helper_page_flip,
+	.destroy = sti_crtc_destroy,
+	.set_property = sti_crtc_set_property,
+	.reset = drm_atomic_helper_crtc_reset,
+	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+bool sti_crtc_is_main(struct drm_crtc *crtc)
+{
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+	if (mixer->id == STI_MIXER_MAIN)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(sti_crtc_is_main);
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+		  struct drm_plane *primary, struct drm_plane *cursor)
+{
+	struct drm_crtc *crtc = &mixer->drm_crtc;
+	int res;
+
+	res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+					&sti_crtc_funcs);
+	if (res) {
+		DRM_ERROR("Can't initialze CRTC\n");
+		return -EINVAL;
+	}
+
+	drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
+
+	DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
+			 crtc->base.id, sti_mixer_to_str(mixer));
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 0000000..51963e6
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CRTC_H_
+#define _STI_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+		  struct drm_plane *primary, struct drm_plane *cursor);
+int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_crtc_vblank_cb(struct notifier_block *nb,
+		       unsigned long event, void *data);
+bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee..dd10321 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
  */
 #include <drm/drmP.h>
 
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "sti_compositor.h"
 #include "sti_cursor.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 /* Registers */
@@ -42,15 +48,19 @@
 /**
  * STI Cursor structure
  *
- * @layer:      layer structure
- * @width:      cursor width
- * @height:     cursor height
- * @clut:       color look up table
- * @clut_paddr: color look up table physical address
- * @pixmap:     pixmap dma buffer (clut8-format cursor)
+ * @sti_plane:    sti_plane structure
+ * @dev:          driver device
+ * @regs:         cursor registers
+ * @width:        cursor width
+ * @height:       cursor height
+ * @clut:         color look up table
+ * @clut_paddr:   color look up table physical address
+ * @pixmap:       pixmap dma buffer (clut8-format cursor)
  */
 struct sti_cursor {
-	struct sti_layer layer;
+	struct sti_plane plane;
+	struct device *dev;
+	void __iomem *regs;
 	unsigned int width;
 	unsigned int height;
 	unsigned short *clut;
@@ -62,22 +72,10 @@
 	DRM_FORMAT_ARGB8888,
 };
 
-#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
 
-static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
+static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
 {
-	return cursor_supported_formats;
-}
-
-static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(cursor_supported_formats);
-}
-
-static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	u32 *src = layer->vaddr;
 	u8  *dst = cursor->pixmap.base;
 	unsigned int i, j;
 	u32 a, r, g, b;
@@ -96,101 +94,8 @@
 	}
 }
 
-static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_cursor_init(struct sti_cursor *cursor)
 {
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	struct drm_display_mode *mode = layer->mode;
-	u32 y, x;
-	u32 val;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	if (layer->src_w < STI_CURS_MIN_SIZE ||
-	    layer->src_h < STI_CURS_MIN_SIZE ||
-	    layer->src_w > STI_CURS_MAX_SIZE ||
-	    layer->src_h > STI_CURS_MAX_SIZE) {
-		DRM_ERROR("Invalid cursor size (%dx%d)\n",
-				layer->src_w, layer->src_h);
-		return -EINVAL;
-	}
-
-	/* If the cursor size has changed, re-allocated the pixmap */
-	if (!cursor->pixmap.base ||
-	    (cursor->width != layer->src_w) ||
-	    (cursor->height != layer->src_h)) {
-		cursor->width = layer->src_w;
-		cursor->height = layer->src_h;
-
-		if (cursor->pixmap.base)
-			dma_free_writecombine(layer->dev,
-					      cursor->pixmap.size,
-					      cursor->pixmap.base,
-					      cursor->pixmap.paddr);
-
-		cursor->pixmap.size = cursor->width * cursor->height;
-
-		cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
-							cursor->pixmap.size,
-							&cursor->pixmap.paddr,
-							GFP_KERNEL | GFP_DMA);
-		if (!cursor->pixmap.base) {
-			DRM_ERROR("Failed to allocate memory for pixmap\n");
-			return -ENOMEM;
-		}
-	}
-
-	/* Convert ARGB8888 to CLUT8 */
-	sti_cursor_argb8888_to_clut8(layer);
-
-	/* AWS and AWE depend on the mode */
-	y = sti_vtg_get_line_number(*mode, 0);
-	x = sti_vtg_get_pixel_number(*mode, 0);
-	val = y << 16 | x;
-	writel(val, layer->regs + CUR_AWS);
-	y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
-	x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
-	val = y << 16 | x;
-	writel(val, layer->regs + CUR_AWE);
-
-	if (first_prepare) {
-		/* Set and fetch CLUT */
-		writel(cursor->clut_paddr, layer->regs + CUR_CML);
-		writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
-	}
-
-	return 0;
-}
-
-static int sti_cursor_commit_layer(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
-	struct drm_display_mode *mode = layer->mode;
-	u32 ydo, xdo;
-
-	dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	/* Set memory location, size, and position */
-	writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
-	writel(cursor->width, layer->regs + CUR_PMP);
-	writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
-
-	ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
-	xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
-	writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
-
-	return 0;
-}
-
-static int sti_cursor_disable_layer(struct sti_layer *layer)
-{
-	return 0;
-}
-
-static void sti_cursor_init(struct sti_layer *layer)
-{
-	struct sti_cursor *cursor = to_sti_cursor(layer);
 	unsigned short *base = cursor->clut;
 	unsigned int a, r, g, b;
 
@@ -205,18 +110,139 @@
 						  (b * 5);
 }
 
-static const struct sti_layer_funcs cursor_ops = {
-	.get_formats = sti_cursor_get_formats,
-	.get_nb_formats = sti_cursor_get_nb_formats,
-	.init = sti_cursor_init,
-	.prepare = sti_cursor_prepare_layer,
-	.commit = sti_cursor_commit_layer,
-	.disable = sti_cursor_disable_layer,
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_cursor *cursor = to_sti_cursor(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	int src_w = state->src_w >> 16;
+	int src_h = state->src_h >> 16;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_gem_cma_object *cma_obj;
+	u32 y, x;
+	u32 val;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
+
+	dev_dbg(cursor->dev, "%s %s\n", __func__,
+		sti_plane_to_str(plane));
+
+	if (src_w < STI_CURS_MIN_SIZE ||
+	    src_h < STI_CURS_MIN_SIZE ||
+	    src_w > STI_CURS_MAX_SIZE ||
+	    src_h > STI_CURS_MAX_SIZE) {
+		DRM_ERROR("Invalid cursor size (%dx%d)\n",
+				src_w, src_h);
+		return;
+	}
+
+	/* If the cursor size has changed, re-allocated the pixmap */
+	if (!cursor->pixmap.base ||
+	    (cursor->width != src_w) ||
+	    (cursor->height != src_h)) {
+		cursor->width = src_w;
+		cursor->height = src_h;
+
+		if (cursor->pixmap.base)
+			dma_free_writecombine(cursor->dev,
+					      cursor->pixmap.size,
+					      cursor->pixmap.base,
+					      cursor->pixmap.paddr);
+
+		cursor->pixmap.size = cursor->width * cursor->height;
+
+		cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
+							cursor->pixmap.size,
+							&cursor->pixmap.paddr,
+							GFP_KERNEL | GFP_DMA);
+		if (!cursor->pixmap.base) {
+			DRM_ERROR("Failed to allocate memory for pixmap\n");
+			return;
+		}
+	}
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	/* Convert ARGB8888 to CLUT8 */
+	sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
+
+	/* AWS and AWE depend on the mode */
+	y = sti_vtg_get_line_number(*mode, 0);
+	x = sti_vtg_get_pixel_number(*mode, 0);
+	val = y << 16 | x;
+	writel(val, cursor->regs + CUR_AWS);
+	y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+	x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+	val = y << 16 | x;
+	writel(val, cursor->regs + CUR_AWE);
+
+	if (first_prepare) {
+		/* Set and fetch CLUT */
+		writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+		writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
+	}
+
+	/* Set memory location, size, and position */
+	writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
+	writel(cursor->width, cursor->regs + CUR_PMP);
+	writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
+
+	y = sti_vtg_get_line_number(*mode, dst_y);
+	x = sti_vtg_get_pixel_number(*mode, dst_y);
+	writel((y << 16) | x, cursor->regs + CUR_VPO);
+
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
+				      struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+	.atomic_update = sti_cursor_atomic_update,
+	.atomic_disable = sti_cursor_atomic_disable,
 };
 
-struct sti_layer *sti_cursor_create(struct device *dev)
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+				    struct device *dev, int desc,
+				    void __iomem *baseaddr,
+				    unsigned int possible_crtcs)
 {
 	struct sti_cursor *cursor;
+	size_t size;
+	int res;
 
 	cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
 	if (!cursor) {
@@ -225,18 +251,43 @@
 	}
 
 	/* Allocate clut buffer */
-	cursor->clut = dma_alloc_writecombine(dev,
-			0x100 * sizeof(unsigned short),
-			&cursor->clut_paddr,
-			GFP_KERNEL | GFP_DMA);
+	size = 0x100 * sizeof(unsigned short);
+	cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
+					      GFP_KERNEL | GFP_DMA);
 
 	if (!cursor->clut) {
 		DRM_ERROR("Failed to allocate memory for cursor clut\n");
-		devm_kfree(dev, cursor);
-		return NULL;
+		goto err_clut;
 	}
 
-	cursor->layer.ops = &cursor_ops;
+	cursor->dev = dev;
+	cursor->regs = baseaddr;
+	cursor->plane.desc = desc;
+	cursor->plane.status = STI_PLANE_DISABLED;
 
-	return (struct sti_layer *)cursor;
+	sti_cursor_init(cursor);
+
+	res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
+				       possible_crtcs,
+				       &sti_plane_helpers_funcs,
+				       cursor_supported_formats,
+				       ARRAY_SIZE(cursor_supported_formats),
+				       DRM_PLANE_TYPE_CURSOR);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		goto err_plane;
+	}
+
+	drm_plane_helper_add(&cursor->plane.drm_plane,
+			     &sti_cursor_helpers_funcs);
+
+	sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+
+	return &cursor->plane.drm_plane;
+
+err_plane:
+	dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+err_clut:
+	devm_kfree(dev, cursor);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c98274..2ee5c10 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
 #ifndef _STI_CURSOR_H_
 #define _STI_CURSOR_H_
 
-struct sti_layer *sti_cursor_create(struct device *dev);
+struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
+				    struct device *dev, int desc,
+				    void __iomem *baseaddr,
+				    unsigned int possible_crtcs);
 
 #endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
deleted file mode 100644
index 6b641c5..0000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <linux/clk.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
-#include "sti_vtg.h"
-
-static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-	DRM_DEBUG_KMS("\n");
-}
-
-static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-
-	mixer->enabled = true;
-
-	/* Prepare and enable the compo IP clock */
-	if (mixer->id == STI_MIXER_MAIN) {
-		if (clk_prepare_enable(compo->clk_compo_main))
-			DRM_INFO("Failed to prepare/enable compo_main clk\n");
-	} else {
-		if (clk_prepare_enable(compo->clk_compo_aux))
-			DRM_INFO("Failed to prepare/enable compo_aux clk\n");
-	}
-
-	sti_mixer_clear_all_layers(mixer);
-}
-
-static void sti_drm_crtc_commit(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct sti_layer *layer;
-
-	if ((!mixer || !compo)) {
-		DRM_ERROR("Can not find mixer or compositor)\n");
-		return;
-	}
-
-	/* get GDP which is reserved to the CRTC FB */
-	layer = to_sti_layer(crtc->primary);
-	if (layer)
-		sti_layer_commit(layer);
-	else
-		DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
-
-	/* Enable layer on mixer */
-	if (sti_mixer_set_layer_status(mixer, layer, true))
-		DRM_ERROR("Can not enable layer at mixer\n");
-
-	drm_crtc_vblank_on(crtc);
-}
-
-static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-				    const struct drm_display_mode *mode,
-				    struct drm_display_mode *adjusted_mode)
-{
-	/* accept the provided drm_display_mode, do not fix it up */
-	return true;
-}
-
-static int
-sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	struct clk *clk;
-	int rate = mode->clock * 1000;
-	int res;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      mode->base.id, mode->name);
-
-	DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
-		      mode->vrefresh, mode->clock,
-		      mode->hdisplay,
-		      mode->hsync_start, mode->hsync_end,
-		      mode->htotal,
-		      mode->vdisplay,
-		      mode->vsync_start, mode->vsync_end,
-		      mode->vtotal, mode->type, mode->flags);
-
-	/* Set rate and prepare/enable pixel clock */
-	if (mixer->id == STI_MIXER_MAIN)
-		clk = compo->clk_pix_main;
-	else
-		clk = compo->clk_pix_aux;
-
-	res = clk_set_rate(clk, rate);
-	if (res < 0) {
-		DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
-		return -EINVAL;
-	}
-	if (clk_prepare_enable(clk)) {
-		DRM_ERROR("Failed to prepare/enable pix clk\n");
-		return -EINVAL;
-	}
-
-	sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, &crtc->mode);
-
-	res = sti_mixer_active_video_area(mixer, &crtc->mode);
-	if (res) {
-		DRM_ERROR("Can not set active video area\n");
-		return -EINVAL;
-	}
-
-	return res;
-}
-
-static void sti_drm_crtc_disable(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	struct device *dev = mixer->dev;
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-
-	if (!mixer->enabled)
-		return;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
-
-	/* Disable Background */
-	sti_mixer_set_background_status(mixer, false);
-
-	drm_crtc_vblank_off(crtc);
-
-	/* Disable pixel clock and compo IP clocks */
-	if (mixer->id == STI_MIXER_MAIN) {
-		clk_disable_unprepare(compo->clk_pix_main);
-		clk_disable_unprepare(compo->clk_compo_main);
-	} else {
-		clk_disable_unprepare(compo->clk_pix_aux);
-		clk_disable_unprepare(compo->clk_compo_aux);
-	}
-
-	mixer->enabled = false;
-}
-
-static void
-sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
-{
-	sti_drm_crtc_prepare(crtc);
-	sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
-}
-
-static void sti_drm_atomic_begin(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-
-	if (crtc->state->event) {
-		crtc->state->event->pipe = drm_crtc_index(crtc);
-
-		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
-		mixer->pending_event = crtc->state->event;
-		crtc->state->event = NULL;
-	}
-}
-
-static void sti_drm_atomic_flush(struct drm_crtc *crtc)
-{
-}
-
-static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
-	.dpms = sti_drm_crtc_dpms,
-	.prepare = sti_drm_crtc_prepare,
-	.commit = sti_drm_crtc_commit,
-	.mode_fixup = sti_drm_crtc_mode_fixup,
-	.mode_set = drm_helper_crtc_mode_set,
-	.mode_set_nofb = sti_drm_crtc_mode_set_nofb,
-	.mode_set_base = drm_helper_crtc_mode_set_base,
-	.disable = sti_drm_crtc_disable,
-	.atomic_begin = sti_drm_atomic_begin,
-	.atomic_flush = sti_drm_atomic_flush,
-};
-
-static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
-{
-	DRM_DEBUG_KMS("\n");
-	drm_crtc_cleanup(crtc);
-}
-
-static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
-				     struct drm_property *property,
-				     uint64_t val)
-{
-	DRM_DEBUG_KMS("\n");
-	return 0;
-}
-
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
-			   unsigned long event, void *data)
-{
-	struct drm_device *drm_dev;
-	struct sti_compositor *compo =
-		container_of(nb, struct sti_compositor, vtg_vblank_nb);
-	int *crtc = data;
-	unsigned long flags;
-	struct sti_drm_private *priv;
-
-	drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
-	priv = drm_dev->dev_private;
-
-	if ((event != VTG_TOP_FIELD_EVENT) &&
-	    (event != VTG_BOTTOM_FIELD_EVENT)) {
-		DRM_ERROR("unknown event: %lu\n", event);
-		return -EINVAL;
-	}
-
-	drm_handle_vblank(drm_dev, *crtc);
-
-	spin_lock_irqsave(&drm_dev->event_lock, flags);
-	if (compo->mixer[*crtc]->pending_event) {
-		drm_send_vblank_event(drm_dev, -1,
-				compo->mixer[*crtc]->pending_event);
-		drm_vblank_put(drm_dev, *crtc);
-		compo->mixer[*crtc]->pending_event = NULL;
-	}
-	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
-	return 0;
-}
-
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
-{
-	struct sti_drm_private *dev_priv = dev->dev_private;
-	struct sti_compositor *compo = dev_priv->compo;
-	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
-
-	if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux,
-			vtg_vblank_nb, crtc)) {
-		DRM_ERROR("Cannot register VTG notifier\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
-
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
-{
-	struct sti_drm_private *priv = dev->dev_private;
-	struct sti_compositor *compo = priv->compo;
-	struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
-			compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
-		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
-
-	/* free the resources of the pending requests */
-	if (compo->mixer[crtc]->pending_event) {
-		drm_vblank_put(dev, crtc);
-		compo->mixer[crtc]->pending_event = NULL;
-	}
-}
-EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
-
-static struct drm_crtc_funcs sti_crtc_funcs = {
-	.set_config = drm_atomic_helper_set_config,
-	.page_flip = drm_atomic_helper_page_flip,
-	.destroy = sti_drm_crtc_destroy,
-	.set_property = sti_drm_crtc_set_property,
-	.reset = drm_atomic_helper_crtc_reset,
-	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-};
-
-bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
-{
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-
-	if (mixer->id == STI_MIXER_MAIN)
-		return true;
-
-	return false;
-}
-EXPORT_SYMBOL(sti_drm_crtc_is_main);
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
-		struct drm_plane *primary, struct drm_plane *cursor)
-{
-	struct drm_crtc *crtc = &mixer->drm_crtc;
-	int res;
-
-	res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
-			&sti_crtc_funcs);
-	if (res) {
-		DRM_ERROR("Can not initialze CRTC\n");
-		return -EINVAL;
-	}
-
-	drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
-
-	DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
-			 crtc->base.id, sti_mixer_to_str(mixer));
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b1..0000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_CRTC_H_
-#define _STI_DRM_CRTC_H_
-
-#include <drm/drmP.h>
-
-struct sti_mixer;
-
-int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
-		struct drm_plane *primary, struct drm_plane *cursor);
-int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
-		unsigned long event, void *data);
-bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
deleted file mode 100644
index 59d558b..0000000
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-
-#include <linux/component.h>
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_drm_drv.h"
-#include "sti_drm_crtc.h"
-
-#define DRIVER_NAME	"sti"
-#define DRIVER_DESC	"STMicroelectronics SoC DRM"
-#define DRIVER_DATE	"20140601"
-#define DRIVER_MAJOR	1
-#define DRIVER_MINOR	0
-
-#define STI_MAX_FB_HEIGHT	4096
-#define STI_MAX_FB_WIDTH	4096
-
-static void sti_drm_atomic_schedule(struct sti_drm_private *private,
-				  struct drm_atomic_state *state)
-{
-	private->commit.state = state;
-	schedule_work(&private->commit.work);
-}
-
-static void sti_drm_atomic_complete(struct sti_drm_private *private,
-				  struct drm_atomic_state *state)
-{
-	struct drm_device *drm = private->drm_dev;
-
-	/*
-	 * Everything below can be run asynchronously without the need to grab
-	 * any modeset locks at all under one condition: It must be guaranteed
-	 * that the asynchronous work has either been cancelled (if the driver
-	 * supports it, which at least requires that the framebuffers get
-	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
-	 * before the new state gets committed on the software side with
-	 * drm_atomic_helper_swap_state().
-	 *
-	 * This scheme allows new atomic state updates to be prepared and
-	 * checked in parallel to the asynchronous completion of the previous
-	 * update. Which is important since compositors need to figure out the
-	 * composition of the next frame right after having submitted the
-	 * current layout.
-	 */
-
-	drm_atomic_helper_commit_modeset_disables(drm, state);
-	drm_atomic_helper_commit_planes(drm, state);
-	drm_atomic_helper_commit_modeset_enables(drm, state);
-
-	drm_atomic_helper_wait_for_vblanks(drm, state);
-
-	drm_atomic_helper_cleanup_planes(drm, state);
-	drm_atomic_state_free(state);
-}
-
-static void sti_drm_atomic_work(struct work_struct *work)
-{
-	struct sti_drm_private *private = container_of(work,
-			struct sti_drm_private, commit.work);
-
-	sti_drm_atomic_complete(private, private->commit.state);
-}
-
-static int sti_drm_atomic_commit(struct drm_device *drm,
-			       struct drm_atomic_state *state, bool async)
-{
-	struct sti_drm_private *private = drm->dev_private;
-	int err;
-
-	err = drm_atomic_helper_prepare_planes(drm, state);
-	if (err)
-		return err;
-
-	/* serialize outstanding asynchronous commits */
-	mutex_lock(&private->commit.lock);
-	flush_work(&private->commit.work);
-
-	/*
-	 * This is the point of no return - everything below never fails except
-	 * when the hw goes bonghits. Which means we can commit the new state on
-	 * the software side now.
-	 */
-
-	drm_atomic_helper_swap_state(drm, state);
-
-	if (async)
-		sti_drm_atomic_schedule(private, state);
-	else
-		sti_drm_atomic_complete(private, state);
-
-	mutex_unlock(&private->commit.lock);
-	return 0;
-}
-
-static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
-	.fb_create = drm_fb_cma_create,
-	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = sti_drm_atomic_commit,
-};
-
-static void sti_drm_mode_config_init(struct drm_device *dev)
-{
-	dev->mode_config.min_width = 0;
-	dev->mode_config.min_height = 0;
-
-	/*
-	 * set max width and height as default value.
-	 * this value would be used to check framebuffer size limitation
-	 * at drm_mode_addfb().
-	 */
-	dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
-	dev->mode_config.max_height = STI_MAX_FB_WIDTH;
-
-	dev->mode_config.funcs = &sti_drm_mode_config_funcs;
-}
-
-static int sti_drm_load(struct drm_device *dev, unsigned long flags)
-{
-	struct sti_drm_private *private;
-	int ret;
-
-	private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
-	if (!private) {
-		DRM_ERROR("Failed to allocate private\n");
-		return -ENOMEM;
-	}
-	dev->dev_private = (void *)private;
-	private->drm_dev = dev;
-
-	mutex_init(&private->commit.lock);
-	INIT_WORK(&private->commit.work, sti_drm_atomic_work);
-
-	drm_mode_config_init(dev);
-	drm_kms_helper_poll_init(dev);
-
-	sti_drm_mode_config_init(dev);
-
-	ret = component_bind_all(dev->dev, dev);
-	if (ret) {
-		drm_kms_helper_poll_fini(dev);
-		drm_mode_config_cleanup(dev);
-		kfree(private);
-		return ret;
-	}
-
-	drm_mode_config_reset(dev);
-
-#ifdef CONFIG_DRM_STI_FBDEV
-	drm_fbdev_cma_init(dev, 32,
-		   dev->mode_config.num_crtc,
-		   dev->mode_config.num_connector);
-#endif
-	return 0;
-}
-
-static const struct file_operations sti_drm_driver_fops = {
-	.owner = THIS_MODULE,
-	.open = drm_open,
-	.mmap = drm_gem_cma_mmap,
-	.poll = drm_poll,
-	.read = drm_read,
-	.unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = drm_compat_ioctl,
-#endif
-	.release = drm_release,
-};
-
-static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
-						struct drm_gem_object *obj,
-						int flags)
-{
-	/* we want to be able to write in mmapped buffer */
-	flags |= O_RDWR;
-	return drm_gem_prime_export(dev, obj, flags);
-}
-
-static struct drm_driver sti_drm_driver = {
-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
-	    DRIVER_GEM | DRIVER_PRIME,
-	.load = sti_drm_load,
-	.gem_free_object = drm_gem_cma_free_object,
-	.gem_vm_ops = &drm_gem_cma_vm_ops,
-	.dumb_create = drm_gem_cma_dumb_create,
-	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
-	.dumb_destroy = drm_gem_dumb_destroy,
-	.fops = &sti_drm_driver_fops,
-
-	.get_vblank_counter = drm_vblank_count,
-	.enable_vblank = sti_drm_crtc_enable_vblank,
-	.disable_vblank = sti_drm_crtc_disable_vblank,
-
-	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
-	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-	.gem_prime_export = sti_drm_gem_prime_export,
-	.gem_prime_import = drm_gem_prime_import,
-	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
-	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
-	.gem_prime_vmap = drm_gem_cma_prime_vmap,
-	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
-	.gem_prime_mmap = drm_gem_cma_prime_mmap,
-
-	.name = DRIVER_NAME,
-	.desc = DRIVER_DESC,
-	.date = DRIVER_DATE,
-	.major = DRIVER_MAJOR,
-	.minor = DRIVER_MINOR,
-};
-
-static int compare_of(struct device *dev, void *data)
-{
-	return dev->of_node == data;
-}
-
-static int sti_drm_bind(struct device *dev)
-{
-	return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
-}
-
-static void sti_drm_unbind(struct device *dev)
-{
-	drm_put_dev(dev_get_drvdata(dev));
-}
-
-static const struct component_master_ops sti_drm_ops = {
-	.bind = sti_drm_bind,
-	.unbind = sti_drm_unbind,
-};
-
-static int sti_drm_master_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->parent->of_node;
-	struct device_node *child_np;
-	struct component_match *match = NULL;
-
-	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
-
-	child_np = of_get_next_available_child(node, NULL);
-
-	while (child_np) {
-		component_match_add(dev, &match, compare_of, child_np);
-		of_node_put(child_np);
-		child_np = of_get_next_available_child(node, child_np);
-	}
-
-	return component_master_add_with_match(dev, &sti_drm_ops, match);
-}
-
-static int sti_drm_master_remove(struct platform_device *pdev)
-{
-	component_master_del(&pdev->dev, &sti_drm_ops);
-	return 0;
-}
-
-static struct platform_driver sti_drm_master_driver = {
-	.probe = sti_drm_master_probe,
-	.remove = sti_drm_master_remove,
-	.driver = {
-		.name = DRIVER_NAME "__master",
-	},
-};
-
-static int sti_drm_platform_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *node = dev->of_node;
-	struct platform_device *master;
-
-	of_platform_populate(node, NULL, NULL, dev);
-
-	platform_driver_register(&sti_drm_master_driver);
-	master = platform_device_register_resndata(dev,
-			DRIVER_NAME "__master", -1,
-			NULL, 0, NULL, 0);
-	if (IS_ERR(master))
-               return PTR_ERR(master);
-
-	platform_set_drvdata(pdev, master);
-	return 0;
-}
-
-static int sti_drm_platform_remove(struct platform_device *pdev)
-{
-	struct platform_device *master = platform_get_drvdata(pdev);
-
-	of_platform_depopulate(&pdev->dev);
-	platform_device_unregister(master);
-	platform_driver_unregister(&sti_drm_master_driver);
-	return 0;
-}
-
-static const struct of_device_id sti_drm_dt_ids[] = {
-	{ .compatible = "st,sti-display-subsystem", },
-	{ /* end node */ },
-};
-MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
-
-static struct platform_driver sti_drm_platform_driver = {
-	.probe = sti_drm_platform_probe,
-	.remove = sti_drm_platform_remove,
-	.driver = {
-		.name = DRIVER_NAME,
-		.of_match_table = sti_drm_dt_ids,
-	},
-};
-
-module_platform_driver(sti_drm_platform_driver);
-
-MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
deleted file mode 100644
index c413aa3..0000000
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_DRV_H_
-#define _STI_DRM_DRV_H_
-
-#include <drm/drmP.h>
-
-struct sti_compositor;
-struct sti_tvout;
-
-/**
- * STI drm private structure
- * This structure is stored as private in the drm_device
- *
- * @compo:                 compositor
- * @plane_zorder_property: z-order property for CRTC planes
- * @drm_dev:               drm device
- */
-struct sti_drm_private {
-	struct sti_compositor *compo;
-	struct drm_property *plane_zorder_property;
-	struct drm_device *drm_dev;
-
-	struct {
-		struct drm_atomic_state *state;
-		struct work_struct work;
-		struct mutex lock;
-	} commit;
-};
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed4..0000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_drm_drv.h"
-#include "sti_drm_plane.h"
-#include "sti_vtg.h"
-
-enum sti_layer_desc sti_layer_default_zorder[] = {
-	STI_GDP_0,
-	STI_VID_0,
-	STI_GDP_1,
-	STI_VID_1,
-	STI_GDP_2,
-	STI_GDP_3,
-};
-
-/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
-
-static int
-sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-		     unsigned int crtc_w, unsigned int crtc_h,
-		     uint32_t src_x, uint32_t src_y,
-		     uint32_t src_w, uint32_t src_h)
-{
-	struct sti_layer *layer = to_sti_layer(plane);
-	struct sti_mixer *mixer = to_sti_mixer(crtc);
-	int res;
-
-	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-		      crtc->base.id, sti_mixer_to_str(mixer),
-		      plane->base.id, sti_layer_to_str(layer));
-	DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
-
-	res = sti_mixer_set_layer_depth(mixer, layer);
-	if (res) {
-		DRM_ERROR("Can not set layer depth\n");
-		return res;
-	}
-
-	/* src_x are in 16.16 format. */
-	res = sti_layer_prepare(layer, crtc, fb,
-			&crtc->mode, mixer->id,
-			crtc_x, crtc_y, crtc_w, crtc_h,
-			src_x >> 16, src_y >> 16,
-			src_w >> 16, src_h >> 16);
-	if (res) {
-		DRM_ERROR("Layer prepare failed\n");
-		return res;
-	}
-
-	res = sti_layer_commit(layer);
-	if (res) {
-		DRM_ERROR("Layer commit failed\n");
-		return res;
-	}
-
-	res = sti_mixer_set_layer_status(mixer, layer, true);
-	if (res) {
-		DRM_ERROR("Can not enable layer at mixer\n");
-		return res;
-	}
-
-	return 0;
-}
-
-static int sti_drm_disable_plane(struct drm_plane *plane)
-{
-	struct sti_layer *layer;
-	struct sti_mixer *mixer;
-	int lay_res, mix_res;
-
-	if (!plane->crtc) {
-		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
-		return 0;
-	}
-	layer = to_sti_layer(plane);
-	mixer = to_sti_mixer(plane->crtc);
-
-	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-			plane->crtc->base.id, sti_mixer_to_str(mixer),
-			plane->base.id, sti_layer_to_str(layer));
-
-	/* Disable layer at mixer level */
-	mix_res = sti_mixer_set_layer_status(mixer, layer, false);
-	if (mix_res)
-		DRM_ERROR("Can not disable layer at mixer\n");
-
-	/* Wait a while to be sure that a Vsync event is received */
-	msleep(WAIT_NEXT_VSYNC_MS);
-
-	/* Then disable layer itself */
-	lay_res = sti_layer_disable(layer);
-	if (lay_res)
-		DRM_ERROR("Layer disable failed\n");
-
-	if (lay_res || mix_res)
-		return -EINVAL;
-
-	return 0;
-}
-
-static void sti_drm_plane_destroy(struct drm_plane *plane)
-{
-	DRM_DEBUG_DRIVER("\n");
-
-	drm_plane_helper_disable(plane);
-	drm_plane_cleanup(plane);
-}
-
-static int sti_drm_plane_set_property(struct drm_plane *plane,
-				      struct drm_property *property,
-				      uint64_t val)
-{
-	struct drm_device *dev = plane->dev;
-	struct sti_drm_private *private = dev->dev_private;
-	struct sti_layer *layer = to_sti_layer(plane);
-
-	DRM_DEBUG_DRIVER("\n");
-
-	if (property == private->plane_zorder_property) {
-		layer->zorder = val;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static struct drm_plane_funcs sti_drm_plane_funcs = {
-	.update_plane = drm_atomic_helper_update_plane,
-	.disable_plane = drm_atomic_helper_disable_plane,
-	.destroy = sti_drm_plane_destroy,
-	.set_property = sti_drm_plane_set_property,
-	.reset = drm_atomic_helper_plane_reset,
-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
-				  struct drm_framebuffer *fb,
-				  const struct drm_plane_state *new_state)
-{
-	return 0;
-}
-
-static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
-				   struct drm_framebuffer *fb,
-				   const struct drm_plane_state *old_fb)
-{
-}
-
-static int sti_drm_plane_atomic_check(struct drm_plane *plane,
-				      struct drm_plane_state *state)
-{
-	return 0;
-}
-
-static void sti_drm_plane_atomic_update(struct drm_plane *plane,
-					struct drm_plane_state *oldstate)
-{
-	struct drm_plane_state *state = plane->state;
-
-	sti_drm_update_plane(plane, state->crtc, state->fb,
-			    state->crtc_x, state->crtc_y,
-			    state->crtc_w, state->crtc_h,
-			    state->src_x, state->src_y,
-			    state->src_w, state->src_h);
-}
-
-static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
-					 struct drm_plane_state *oldstate)
-{
-	sti_drm_disable_plane(plane);
-}
-
-static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
-	.prepare_fb = sti_drm_plane_prepare_fb,
-	.cleanup_fb = sti_drm_plane_cleanup_fb,
-	.atomic_check = sti_drm_plane_atomic_check,
-	.atomic_update = sti_drm_plane_atomic_update,
-	.atomic_disable = sti_drm_plane_atomic_disable,
-};
-
-static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
-						 uint64_t default_val)
-{
-	struct drm_device *dev = plane->dev;
-	struct sti_drm_private *private = dev->dev_private;
-	struct drm_property *prop;
-	struct sti_layer *layer = to_sti_layer(plane);
-
-	prop = private->plane_zorder_property;
-	if (!prop) {
-		prop = drm_property_create_range(dev, 0, "zpos", 0,
-						 GAM_MIXER_NB_DEPTH_LEVEL - 1);
-		if (!prop)
-			return;
-
-		private->plane_zorder_property = prop;
-	}
-
-	drm_object_attach_property(&plane->base, prop, default_val);
-	layer->zorder = default_val;
-}
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
-				     struct sti_layer *layer,
-				     unsigned int possible_crtcs,
-				     enum drm_plane_type type)
-{
-	int err, i;
-	uint64_t default_zorder = 0;
-
-	err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
-			     &sti_drm_plane_funcs,
-			     sti_layer_get_formats(layer),
-			     sti_layer_get_nb_formats(layer), type);
-	if (err) {
-		DRM_ERROR("Failed to initialize plane\n");
-		return NULL;
-	}
-
-	drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
-
-	for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
-		if (sti_layer_default_zorder[i] == layer->desc)
-			break;
-
-	default_zorder = i + 1;
-
-	if (type == DRM_PLANE_TYPE_OVERLAY)
-		sti_drm_plane_attach_zorder_property(&layer->plane,
-				default_zorder);
-
-	DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
-			 layer->plane.base.id,
-			 sti_layer_to_str(layer), default_zorder);
-
-	return &layer->plane;
-}
-EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f19183..0000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_DRM_PLANE_H_
-#define _STI_DRM_PLANE_H_
-
-#include <drm/drmP.h>
-
-struct sti_layer;
-
-struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
-		struct sti_layer *layer,
-		unsigned int possible_crtcs,
-		enum drm_plane_type type);
-#endif
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
new file mode 100644
index 0000000..6f4af6a
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_crtc.h"
+#include "sti_drv.h"
+
+#define DRIVER_NAME	"sti"
+#define DRIVER_DESC	"STMicroelectronics SoC DRM"
+#define DRIVER_DATE	"20140601"
+#define DRIVER_MAJOR	1
+#define DRIVER_MINOR	0
+
+#define STI_MAX_FB_HEIGHT	4096
+#define STI_MAX_FB_WIDTH	4096
+
+static void sti_atomic_schedule(struct sti_private *private,
+				struct drm_atomic_state *state)
+{
+	private->commit.state = state;
+	schedule_work(&private->commit.work);
+}
+
+static void sti_atomic_complete(struct sti_private *private,
+				struct drm_atomic_state *state)
+{
+	struct drm_device *drm = private->drm_dev;
+
+	/*
+	 * Everything below can be run asynchronously without the need to grab
+	 * any modeset locks at all under one condition: It must be guaranteed
+	 * that the asynchronous work has either been cancelled (if the driver
+	 * supports it, which at least requires that the framebuffers get
+	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+	 * before the new state gets committed on the software side with
+	 * drm_atomic_helper_swap_state().
+	 *
+	 * This scheme allows new atomic state updates to be prepared and
+	 * checked in parallel to the asynchronous completion of the previous
+	 * update. Which is important since compositors need to figure out the
+	 * composition of the next frame right after having submitted the
+	 * current layout.
+	 */
+
+	drm_atomic_helper_commit_modeset_disables(drm, state);
+	drm_atomic_helper_commit_planes(drm, state);
+	drm_atomic_helper_commit_modeset_enables(drm, state);
+
+	drm_atomic_helper_wait_for_vblanks(drm, state);
+
+	drm_atomic_helper_cleanup_planes(drm, state);
+	drm_atomic_state_free(state);
+}
+
+static void sti_atomic_work(struct work_struct *work)
+{
+	struct sti_private *private = container_of(work,
+			struct sti_private, commit.work);
+
+	sti_atomic_complete(private, private->commit.state);
+}
+
+static int sti_atomic_commit(struct drm_device *drm,
+			     struct drm_atomic_state *state, bool async)
+{
+	struct sti_private *private = drm->dev_private;
+	int err;
+
+	err = drm_atomic_helper_prepare_planes(drm, state);
+	if (err)
+		return err;
+
+	/* serialize outstanding asynchronous commits */
+	mutex_lock(&private->commit.lock);
+	flush_work(&private->commit.work);
+
+	/*
+	 * This is the point of no return - everything below never fails except
+	 * when the hw goes bonghits. Which means we can commit the new state on
+	 * the software side now.
+	 */
+
+	drm_atomic_helper_swap_state(drm, state);
+
+	if (async)
+		sti_atomic_schedule(private, state);
+	else
+		sti_atomic_complete(private, state);
+
+	mutex_unlock(&private->commit.lock);
+	return 0;
+}
+
+static struct drm_mode_config_funcs sti_mode_config_funcs = {
+	.fb_create = drm_fb_cma_create,
+	.atomic_check = drm_atomic_helper_check,
+	.atomic_commit = sti_atomic_commit,
+};
+
+static void sti_mode_config_init(struct drm_device *dev)
+{
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * set max width and height as default value.
+	 * this value would be used to check framebuffer size limitation
+	 * at drm_mode_addfb().
+	 */
+	dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
+	dev->mode_config.max_height = STI_MAX_FB_WIDTH;
+
+	dev->mode_config.funcs = &sti_mode_config_funcs;
+}
+
+static int sti_load(struct drm_device *dev, unsigned long flags)
+{
+	struct sti_private *private;
+	int ret;
+
+	private = kzalloc(sizeof(*private), GFP_KERNEL);
+	if (!private) {
+		DRM_ERROR("Failed to allocate private\n");
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)private;
+	private->drm_dev = dev;
+
+	mutex_init(&private->commit.lock);
+	INIT_WORK(&private->commit.work, sti_atomic_work);
+
+	drm_mode_config_init(dev);
+	drm_kms_helper_poll_init(dev);
+
+	sti_mode_config_init(dev);
+
+	ret = component_bind_all(dev->dev, dev);
+	if (ret) {
+		drm_kms_helper_poll_fini(dev);
+		drm_mode_config_cleanup(dev);
+		kfree(private);
+		return ret;
+	}
+
+	drm_mode_config_reset(dev);
+
+#ifdef CONFIG_DRM_STI_FBDEV
+	drm_fbdev_cma_init(dev, 32,
+			   dev->mode_config.num_crtc,
+			   dev->mode_config.num_connector);
+#endif
+	return 0;
+}
+
+static const struct file_operations sti_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.mmap = drm_gem_cma_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+	.unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.release = drm_release,
+};
+
+static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
+					    struct drm_gem_object *obj,
+					    int flags)
+{
+	/* we want to be able to write in mmapped buffer */
+	flags |= O_RDWR;
+	return drm_gem_prime_export(dev, obj, flags);
+}
+
+static struct drm_driver sti_driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+	    DRIVER_GEM | DRIVER_PRIME,
+	.load = sti_load,
+	.gem_free_object = drm_gem_cma_free_object,
+	.gem_vm_ops = &drm_gem_cma_vm_ops,
+	.dumb_create = drm_gem_cma_dumb_create,
+	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
+	.dumb_destroy = drm_gem_dumb_destroy,
+	.fops = &sti_driver_fops,
+
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = sti_crtc_enable_vblank,
+	.disable_vblank = sti_crtc_disable_vblank,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = sti_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+	.gem_prime_vmap = drm_gem_cma_prime_vmap,
+	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+	.gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+	return dev->of_node == data;
+}
+
+static int sti_bind(struct device *dev)
+{
+	return drm_platform_init(&sti_driver, to_platform_device(dev));
+}
+
+static void sti_unbind(struct device *dev)
+{
+	drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops sti_ops = {
+	.bind = sti_bind,
+	.unbind = sti_unbind,
+};
+
+static int sti_platform_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct device_node *child_np;
+	struct component_match *match = NULL;
+
+	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+	of_platform_populate(node, NULL, NULL, dev);
+
+	child_np = of_get_next_available_child(node, NULL);
+
+	while (child_np) {
+		component_match_add(dev, &match, compare_of, child_np);
+		of_node_put(child_np);
+		child_np = of_get_next_available_child(node, child_np);
+	}
+
+	return component_master_add_with_match(dev, &sti_ops, match);
+}
+
+static int sti_platform_remove(struct platform_device *pdev)
+{
+	component_master_del(&pdev->dev, &sti_ops);
+	of_platform_depopulate(&pdev->dev);
+
+	return 0;
+}
+
+static const struct of_device_id sti_dt_ids[] = {
+	{ .compatible = "st,sti-display-subsystem", },
+	{ /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, sti_dt_ids);
+
+static struct platform_driver sti_platform_driver = {
+	.probe = sti_platform_probe,
+	.remove = sti_platform_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = sti_dt_ids,
+	},
+};
+
+module_platform_driver(sti_platform_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
new file mode 100644
index 0000000..9372f69
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRV_H_
+#define _STI_DRV_H_
+
+#include <drm/drmP.h>
+
+struct sti_compositor;
+struct sti_tvout;
+
+/**
+ * STI drm private structure
+ * This structure is stored as private in the drm_device
+ *
+ * @compo:                 compositor
+ * @plane_zorder_property: z-order property for CRTC planes
+ * @drm_dev:               drm device
+ */
+struct sti_private {
+	struct sti_compositor *compo;
+	struct drm_property *plane_zorder_property;
+	struct drm_device *drm_dev;
+
+	struct {
+		struct drm_atomic_state *state;
+		struct work_struct work;
+		struct mutex lock;
+	} commit;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906f..9365670 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
 #include "sti_compositor.h"
 #include "sti_gdp.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 #define ALPHASWITCH     BIT(6)
@@ -26,7 +29,7 @@
 #define GDP_XBGR8888    (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB8565    0x04
 #define GDP_ARGB8888    0x05
-#define GDP_ABGR8888	(GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
+#define GDP_ABGR8888    (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
 #define GDP_ARGB1555    0x06
 #define GDP_ARGB4444    0x07
 #define GDP_CLUT8       0x0B
@@ -53,8 +56,8 @@
 #define GAM_GDP_PPT_IGNORE      (BIT(1) | BIT(0))
 #define GAM_GDP_SIZE_MAX        0x7FF
 
-#define GDP_NODE_NB_BANK	2
-#define GDP_NODE_PER_FIELD	2
+#define GDP_NODE_NB_BANK        2
+#define GDP_NODE_PER_FIELD      2
 
 struct sti_gdp_node {
 	u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@
 /**
  * STI GDP structure
  *
- * @layer:		layer structure
+ * @sti_plane:          sti_plane structure
+ * @dev:                driver device
+ * @regs:               gdp registers
  * @clk_pix:            pixel clock for the current gdp
  * @clk_main_parent:    gdp parent clock if main path used
  * @clk_aux_parent:     gdp parent clock if aux path used
  * @vtg_field_nb:       callback for VTG FIELD (top or bottom) notification
  * @is_curr_top:        true if the current node processed is the top field
- * @node_list:		array of node list
+ * @node_list:          array of node list
  */
 struct sti_gdp {
-	struct sti_layer layer;
+	struct sti_plane plane;
+	struct device *dev;
+	void __iomem *regs;
 	struct clk *clk_pix;
 	struct clk *clk_main_parent;
 	struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@
 	struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
 };
 
-#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
 
 static const uint32_t gdp_supported_formats[] = {
 	DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@
 	DRM_FORMAT_C8,
 };
 
-static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
-{
-	return gdp_supported_formats;
-}
-
-static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(gdp_supported_formats);
-}
-
 static int sti_gdp_fourcc2format(int fourcc)
 {
 	switch (fourcc) {
@@ -175,20 +172,19 @@
 
 /**
  * sti_gdp_get_free_nodes
- * @layer: gdp layer
+ * @gdp: gdp pointer
  *
  * Look for a GDP node list that is not currently read by the HW.
  *
  * RETURNS:
  * Pointer to the free GDP node list
  */
-static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
 {
 	int hw_nvn;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
 	unsigned int i;
 
-	hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+	hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
 	if (!hw_nvn)
 		goto end;
 
@@ -199,7 +195,7 @@
 
 	/* in hazardious cases restart with the first node */
 	DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
-			sti_layer_to_str(layer), hw_nvn);
+			sti_plane_to_str(&gdp->plane), hw_nvn);
 
 end:
 	return &gdp->node_list[0];
@@ -207,7 +203,7 @@
 
 /**
  * sti_gdp_get_current_nodes
- * @layer: GDP layer
+ * @gdp: gdp pointer
  *
  * Look for GDP nodes that are currently read by the HW.
  *
@@ -215,13 +211,12 @@
  * Pointer to the current GDP node list
  */
 static
-struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
 {
 	int hw_nvn;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
 	unsigned int i;
 
-	hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+	hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
 	if (!hw_nvn)
 		goto end;
 
@@ -232,205 +227,25 @@
 
 end:
 	DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
-				hw_nvn, sti_layer_to_str(layer));
+				hw_nvn, sti_plane_to_str(&gdp->plane));
 
 	return NULL;
 }
 
 /**
- * sti_gdp_prepare_layer
- * @lay: gdp layer
- * @first_prepare: true if it is the first time this function is called
- *
- * Update the free GDP node list according to the layer properties.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
-{
-	struct sti_gdp_node_list *list;
-	struct sti_gdp_node *top_field, *btm_field;
-	struct drm_display_mode *mode = layer->mode;
-	struct device *dev = layer->dev;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct sti_compositor *compo = dev_get_drvdata(dev);
-	int format;
-	unsigned int depth, bpp;
-	int rate = mode->clock * 1000;
-	int res;
-	u32 ydo, xdo, yds, xds;
-
-	list = sti_gdp_get_free_nodes(layer);
-	top_field = list->top_field;
-	btm_field = list->btm_field;
-
-	dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
-			sti_layer_to_str(layer), top_field, btm_field);
-
-	/* Build the top field from layer params */
-	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
-	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
-	format = sti_gdp_fourcc2format(layer->format);
-	if (format == -1) {
-		DRM_ERROR("Format not supported by GDP %.4s\n",
-			  (char *)&layer->format);
-		return 1;
-	}
-	top_field->gam_gdp_ctl |= format;
-	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
-	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
-
-	/* pixel memory location */
-	drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
-	top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
-	top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
-	top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
-
-	/* input parameters */
-	top_field->gam_gdp_pmp = layer->pitches[0];
-	top_field->gam_gdp_size =
-	    clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
-	    clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
-
-	/* output parameters */
-	ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
-	yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
-	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
-	top_field->gam_gdp_vps = (yds << 16) | xds;
-
-	/* Same content and chained together */
-	memcpy(btm_field, top_field, sizeof(*btm_field));
-	top_field->gam_gdp_nvn = list->btm_field_paddr;
-	btm_field->gam_gdp_nvn = list->top_field_paddr;
-
-	/* Interlaced mode */
-	if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
-		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
-		    layer->pitches[0];
-
-	if (first_prepare) {
-		/* Register gdp callback */
-		if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
-				compo->vtg_main : compo->vtg_aux,
-				&gdp->vtg_field_nb, layer->mixer_id)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return 1;
-		}
-
-		/* Set and enable gdp clock */
-		if (gdp->clk_pix) {
-			struct clk *clkp;
-			/* According to the mixer used, the gdp pixel clock
-			 * should have a different parent clock. */
-			if (layer->mixer_id == STI_MIXER_MAIN)
-				clkp = gdp->clk_main_parent;
-			else
-				clkp = gdp->clk_aux_parent;
-
-			if (clkp)
-				clk_set_parent(gdp->clk_pix, clkp);
-
-			res = clk_set_rate(gdp->clk_pix, rate);
-			if (res < 0) {
-				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
-						rate);
-				return 1;
-			}
-
-			if (clk_prepare_enable(gdp->clk_pix)) {
-				DRM_ERROR("Failed to prepare/enable gdp\n");
-				return 1;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * sti_gdp_commit_layer
- * @lay: gdp layer
- *
- * Update the NVN field of the 'right' field of the current GDP node (being
- * used by the HW) with the address of the updated ('free') top field GDP node.
- * - In interlaced mode the 'right' field is the bottom field as we update
- *   frames starting from their top field
- * - In progressive mode, we update both bottom and top fields which are
- *   equal nodes.
- * At the next VSYNC, the updated node list will be used by the HW.
- *
- * RETURNS:
- * 0 on success.
- */
-static int sti_gdp_commit_layer(struct sti_layer *layer)
-{
-	struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
-	struct sti_gdp_node *updated_top_node = updated_list->top_field;
-	struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	u32 dma_updated_top = updated_list->top_field_paddr;
-	u32 dma_updated_btm = updated_list->btm_field_paddr;
-	struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
-
-	dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
-			sti_layer_to_str(layer),
-			updated_top_node, updated_btm_node);
-	dev_dbg(layer->dev, "Current NVN:0x%X\n",
-		readl(layer->regs + GAM_GDP_NVN_OFFSET));
-	dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
-		(unsigned long)layer->paddr,
-		readl(layer->regs + GAM_GDP_PML_OFFSET));
-
-	if (curr_list == NULL) {
-		/* First update or invalid node should directly write in the
-		 * hw register */
-		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
-				sti_layer_to_str(layer));
-
-		writel(gdp->is_curr_top == true ?
-				dma_updated_btm : dma_updated_top,
-				layer->regs + GAM_GDP_NVN_OFFSET);
-		return 0;
-	}
-
-	if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
-		if (gdp->is_curr_top == true) {
-			/* Do not update in the middle of the frame, but
-			 * postpone the update after the bottom field has
-			 * been displayed */
-			curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
-		} else {
-			/* Direct update to avoid one frame delay */
-			writel(dma_updated_top,
-				layer->regs + GAM_GDP_NVN_OFFSET);
-		}
-	} else {
-		/* Direct update for progressive to avoid one frame delay */
-		writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
-	}
-
-	return 0;
-}
-
-/**
- * sti_gdp_disable_layer
- * @lay: gdp layer
+ * sti_gdp_disable
+ * @gdp: gdp pointer
  *
  * Disable a GDP.
- *
- * RETURNS:
- * 0 on success.
  */
-static int sti_gdp_disable_layer(struct sti_layer *layer)
+static void sti_gdp_disable(struct sti_gdp *gdp)
 {
+	struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
 	unsigned int i;
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct sti_compositor *compo = dev_get_drvdata(layer->dev);
 
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
 
 	/* Set the nodes as 'to be ignored on mixer' */
 	for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@
 		gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
 	}
 
-	if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+	if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
 			compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	if (gdp->clk_pix)
 		clk_disable_unprepare(gdp->clk_pix);
 
-	return 0;
+	gdp->plane.status = STI_PLANE_DISABLED;
 }
 
 /**
@@ -464,6 +279,14 @@
 {
 	struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
 
+	if (gdp->plane.status == STI_PLANE_FLUSHING) {
+		/* disable need to be synchronize on vsync event */
+		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+				 sti_plane_to_str(&gdp->plane));
+
+		sti_gdp_disable(gdp);
+	}
+
 	switch (event) {
 	case VTG_TOP_FIELD_EVENT:
 		gdp->is_curr_top = true;
@@ -479,10 +302,9 @@
 	return 0;
 }
 
-static void sti_gdp_init(struct sti_layer *layer)
+static void sti_gdp_init(struct sti_gdp *gdp)
 {
-	struct sti_gdp *gdp = to_sti_gdp(layer);
-	struct device_node *np = layer->dev->of_node;
+	struct device_node *np = gdp->dev->of_node;
 	dma_addr_t dma_addr;
 	void *base;
 	unsigned int i, size;
@@ -490,8 +312,8 @@
 	/* Allocate all the nodes within a single memory page */
 	size = sizeof(struct sti_gdp_node) *
 	    GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-	base = dma_alloc_writecombine(layer->dev,
-			size, &dma_addr, GFP_KERNEL | GFP_DMA);
+	base = dma_alloc_writecombine(gdp->dev,
+				      size, &dma_addr, GFP_KERNEL | GFP_DMA);
 
 	if (!base) {
 		DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@
 		/* GDP of STiH407 chip have its own pixel clock */
 		char *clk_name;
 
-		switch (layer->desc) {
+		switch (gdp->plane.desc) {
 		case STI_GDP_0:
 			clk_name = "pix_gdp1";
 			break;
@@ -544,32 +366,249 @@
 			return;
 		}
 
-		gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+		gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
 		if (IS_ERR(gdp->clk_pix))
 			DRM_ERROR("Cannot get %s clock\n", clk_name);
 
-		gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+		gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
 		if (IS_ERR(gdp->clk_main_parent))
 			DRM_ERROR("Cannot get main_parent clock\n");
 
-		gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+		gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
 		if (IS_ERR(gdp->clk_aux_parent))
 			DRM_ERROR("Cannot get aux_parent clock\n");
 	}
 }
 
-static const struct sti_layer_funcs gdp_ops = {
-	.get_formats = sti_gdp_get_formats,
-	.get_nb_formats = sti_gdp_get_nb_formats,
-	.init = sti_gdp_init,
-	.prepare = sti_gdp_prepare_layer,
-	.commit = sti_gdp_commit_layer,
-	.disable = sti_gdp_disable_layer,
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+				  struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_gdp *gdp = to_sti_gdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
+	struct drm_framebuffer *fb =  state->fb;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct sti_mixer *mixer;
+	struct drm_display_mode *mode;
+	int dst_x, dst_y, dst_w, dst_h;
+	int src_x, src_y, src_w, src_h;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_gdp_node_list *list;
+	struct sti_gdp_node_list *curr_list;
+	struct sti_gdp_node *top_field, *btm_field;
+	u32 dma_updated_top;
+	u32 dma_updated_btm;
+	int format;
+	unsigned int depth, bpp;
+	u32 ydo, xdo, yds, xds;
+	int res;
+
+	/* Manage the case where crtc is null (disabled) */
+	if (!crtc)
+		return;
+
+	mixer = to_sti_mixer(crtc);
+	mode = &crtc->mode;
+	dst_x = state->crtc_x;
+	dst_y = state->crtc_y;
+	dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	src_x = state->src_x >> 16;
+	src_y = state->src_y >> 16;
+	src_w = state->src_w >> 16;
+	src_h = state->src_h >> 16;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	list = sti_gdp_get_free_nodes(gdp);
+	top_field = list->top_field;
+	btm_field = list->btm_field;
+
+	dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+		sti_plane_to_str(plane), top_field, btm_field);
+
+	/* build the top field */
+	top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+	top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+	format = sti_gdp_fourcc2format(fb->pixel_format);
+	if (format == -1) {
+		DRM_ERROR("Format not supported by GDP %.4s\n",
+			  (char *)&fb->pixel_format);
+		return;
+	}
+	top_field->gam_gdp_ctl |= format;
+	top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+	top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* pixel memory location */
+	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+	top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+	top_field->gam_gdp_pml += src_x * (bpp >> 3);
+	top_field->gam_gdp_pml += src_y * fb->pitches[0];
+
+	/* input parameters */
+	top_field->gam_gdp_pmp = fb->pitches[0];
+	top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+				  clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
+
+	/* output parameters */
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
+	top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+	top_field->gam_gdp_vps = (yds << 16) | xds;
+
+	/* Same content and chained together */
+	memcpy(btm_field, top_field, sizeof(*btm_field));
+	top_field->gam_gdp_nvn = list->btm_field_paddr;
+	btm_field->gam_gdp_nvn = list->top_field_paddr;
+
+	/* Interlaced mode */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+					 fb->pitches[0];
+
+	if (first_prepare) {
+		/* Register gdp callback */
+		if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
+				compo->vtg_main : compo->vtg_aux,
+				&gdp->vtg_field_nb, mixer->id)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return;
+		}
+
+		/* Set and enable gdp clock */
+		if (gdp->clk_pix) {
+			struct clk *clkp;
+			int rate = mode->clock * 1000;
+
+			/* According to the mixer used, the gdp pixel clock
+			 * should have a different parent clock. */
+			if (mixer->id == STI_MIXER_MAIN)
+				clkp = gdp->clk_main_parent;
+			else
+				clkp = gdp->clk_aux_parent;
+
+			if (clkp)
+				clk_set_parent(gdp->clk_pix, clkp);
+
+			res = clk_set_rate(gdp->clk_pix, rate);
+			if (res < 0) {
+				DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+					  rate);
+				return;
+			}
+
+			if (clk_prepare_enable(gdp->clk_pix)) {
+				DRM_ERROR("Failed to prepare/enable gdp\n");
+				return;
+			}
+		}
+	}
+
+	/* Update the NVN field of the 'right' field of the current GDP node
+	 * (being used by the HW) with the address of the updated ('free') top
+	 * field GDP node.
+	 * - In interlaced mode the 'right' field is the bottom field as we
+	 *   update frames starting from their top field
+	 * - In progressive mode, we update both bottom and top fields which
+	 *   are equal nodes.
+	 * At the next VSYNC, the updated node list will be used by the HW.
+	 */
+	curr_list = sti_gdp_get_current_nodes(gdp);
+	dma_updated_top = list->top_field_paddr;
+	dma_updated_btm = list->btm_field_paddr;
+
+	dev_dbg(gdp->dev, "Current NVN:0x%X\n",
+		readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+	dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
+		(unsigned long)cma_obj->paddr,
+		readl(gdp->regs + GAM_GDP_PML_OFFSET));
+
+	if (!curr_list) {
+		/* First update or invalid node should directly write in the
+		 * hw register */
+		DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+				 sti_plane_to_str(plane));
+
+		writel(gdp->is_curr_top ?
+				dma_updated_btm : dma_updated_top,
+				gdp->regs + GAM_GDP_NVN_OFFSET);
+		goto end;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (gdp->is_curr_top) {
+			/* Do not update in the middle of the frame, but
+			 * postpone the update after the bottom field has
+			 * been displayed */
+			curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+		} else {
+			/* Direct update to avoid one frame delay */
+			writel(dma_updated_top,
+			       gdp->regs + GAM_GDP_NVN_OFFSET);
+		}
+	} else {
+		/* Direct update for progressive to avoid one frame delay */
+		writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
+	}
+
+end:
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
+				   struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+	.atomic_update = sti_gdp_atomic_update,
+	.atomic_disable = sti_gdp_atomic_disable,
 };
 
-struct sti_layer *sti_gdp_create(struct device *dev, int id)
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+				 struct device *dev, int desc,
+				 void __iomem *baseaddr,
+				 unsigned int possible_crtcs,
+				 enum drm_plane_type type)
 {
 	struct sti_gdp *gdp;
+	int res;
 
 	gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
 	if (!gdp) {
@@ -577,8 +616,33 @@
 		return NULL;
 	}
 
-	gdp->layer.ops = &gdp_ops;
+	gdp->dev = dev;
+	gdp->regs = baseaddr;
+	gdp->plane.desc = desc;
+	gdp->plane.status = STI_PLANE_DISABLED;
+
 	gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
 
-	return (struct sti_layer *)gdp;
+	sti_gdp_init(gdp);
+
+	res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
+				       possible_crtcs,
+				       &sti_plane_helpers_funcs,
+				       gdp_supported_formats,
+				       ARRAY_SIZE(gdp_supported_formats),
+				       type);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		goto err;
+	}
+
+	drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
+
+	sti_plane_init_property(&gdp->plane, type);
+
+	return &gdp->plane.drm_plane;
+
+err:
+	devm_kfree(dev, gdp);
+	return NULL;
 }
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab682..73947a4 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
 
 #include <linux/types.h>
 
-struct sti_layer *sti_gdp_create(struct device *dev, int id);
-
+struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
+				 struct device *dev, int desc,
+				 void __iomem *baseaddr,
+				 unsigned int possible_crtcs,
+				 enum drm_plane_type type);
 #endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d5..09e29e4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@
 	return count;
 
 fail:
-	DRM_ERROR("Can not read HDMI EDID\n");
+	DRM_ERROR("Can't read HDMI EDID\n");
 	return 0;
 }
 
@@ -693,21 +693,8 @@
 	struct sti_hdmi_connector *connector;
 	struct drm_connector *drm_connector;
 	struct drm_bridge *bridge;
-	struct device_node *ddc;
 	int err;
 
-	ddc = of_parse_phandle(dev->of_node, "ddc", 0);
-	if (ddc) {
-		hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
-		if (!hdmi->ddc_adapt) {
-			err = -EPROBE_DEFER;
-			of_node_put(ddc);
-			return err;
-		}
-
-		of_node_put(ddc);
-	}
-
 	/* Set the drm device handle */
 	hdmi->drm_dev = drm_dev;
 
@@ -796,6 +783,7 @@
 	struct sti_hdmi *hdmi;
 	struct device_node *np = dev->of_node;
 	struct resource *res;
+	struct device_node *ddc;
 	int ret;
 
 	DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@
 	if (!hdmi)
 		return -ENOMEM;
 
+	ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
+	if (ddc) {
+		hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+		if (!hdmi->ddc_adapt) {
+			of_node_put(ddc);
+			return -EPROBE_DEFER;
+		}
+
+		of_node_put(ddc);
+	}
+
 	hdmi->dev = pdev->dev;
 
 	/* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62d..7c8f9b8 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
 #include <linux/reset.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 
-#include "sti_drm_plane.h"
-#include "sti_hqvdp.h"
+#include "sti_compositor.h"
 #include "sti_hqvdp_lut.h"
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vtg.h"
 
 /* Firmware name */
@@ -322,8 +323,7 @@
  * @dev:               driver device
  * @drm_dev:           the drm device
  * @regs:              registers
- * @layer:             layer structure for hqvdp it self
- * @vid_plane:         VID plug used as link with compositor IP
+ * @plane:             plane structure for hqvdp it self
  * @clk:               IP clock
  * @clk_pix_main:      pix main clock
  * @reset:             reset control
@@ -334,13 +334,13 @@
  * @hqvdp_cmd:         buffer of commands
  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
  * @vtg:               vtg for main data path
+ * @xp70_initialized:  true if xp70 is already initialized
  */
 struct sti_hqvdp {
 	struct device *dev;
 	struct drm_device *drm_dev;
 	void __iomem *regs;
-	struct sti_layer layer;
-	struct drm_plane *vid_plane;
+	struct sti_plane plane;
 	struct clk *clk;
 	struct clk *clk_pix_main;
 	struct reset_control *reset;
@@ -351,24 +351,15 @@
 	void *hqvdp_cmd;
 	dma_addr_t hqvdp_cmd_paddr;
 	struct sti_vtg *vtg;
+	bool xp70_initialized;
 };
 
-#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
 
 static const uint32_t hqvdp_supported_formats[] = {
 	DRM_FORMAT_NV12,
 };
 
-static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
-{
-	return hqvdp_supported_formats;
-}
-
-static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
-{
-	return ARRAY_SIZE(hqvdp_supported_formats);
-}
-
 /**
  * sti_hqvdp_get_free_cmd
  * @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@
 
 /**
  * sti_hqvdp_check_hw_scaling
- * @layer: hqvdp layer
+ * @hqvdp: hqvdp pointer
+ * @mode: display mode with timing constraints
+ * @src_w: source width
+ * @src_h: source height
+ * @dst_w: destination width
+ * @dst_h: destination height
  *
  * Check if the HW is able to perform the scaling request
  * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@
  * RETURNS:
  * True if the HW can scale.
  */
-static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
+				       struct drm_display_mode *mode,
+				       int src_w, int src_h,
+				       int dst_w, int dst_h)
 {
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	unsigned long lfw;
 	unsigned int inv_zy;
 
-	lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
-	lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+	lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+	lfw /= max(src_w, dst_w) * mode->clock / 1000;
 
-	inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+	inv_zy = DIV_ROUND_UP(src_h, dst_h);
 
 	return (inv_zy <= lfw) ? true : false;
 }
 
 /**
- * sti_hqvdp_prepare_layer
- * @layer: hqvdp layer
- * @first_prepare: true if it is the first time this function is called
+ * sti_hqvdp_disable
+ * @hqvdp: hqvdp pointer
  *
- * Prepares a command for the firmware
- *
- * RETURNS:
- * 0 on success.
+ * Disables the HQVDP plane
  */
-static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
 {
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
-	struct sti_hqvdp_cmd *cmd;
-	int scale_h, scale_v;
-	int cmd_offset;
-
-	dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	/* prepare and commit VID plane */
-	hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
-					layer->crtc, layer->fb,
-					layer->dst_x, layer->dst_y,
-					layer->dst_w, layer->dst_h,
-					layer->src_x, layer->src_y,
-					layer->src_w, layer->src_h);
-
-	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
-	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
-		return -EBUSY;
-	}
-	cmd = hqvdp->hqvdp_cmd + cmd_offset;
-
-	if (!sti_hqvdp_check_hw_scaling(layer)) {
-		DRM_ERROR("Scaling beyond HW capabilities\n");
-		return -EINVAL;
-	}
-
-	/* Static parameters, defaulting to progressive mode */
-	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
-	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
-	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
-	cmd->csdi.config = CSDI_CONFIG_PROG;
-
-	/* VC1RE, FMD bypassed : keep everything set to 0
-	 * IQI/P2I bypassed */
-	cmd->iqi.config = IQI_CONFIG_DFLT;
-	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
-	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
-	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
-
-	/* Buffer planes address */
-	cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
-	cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
-
-	/* Pitches */
-	cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
-			layer->pitches[0];
-	cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
-			layer->pitches[1];
-
-	/* Input / output size
-	 * Align to upper even value */
-	layer->dst_w = ALIGN(layer->dst_w, 2);
-	layer->dst_h = ALIGN(layer->dst_h, 2);
-
-	if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
-	    (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
-	    (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
-	    (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
-		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
-				layer->src_w, layer->src_h,
-				layer->dst_w, layer->dst_h);
-		return -EINVAL;
-	}
-	cmd->top.input_viewport_size = cmd->top.input_frame_size =
-			layer->src_h << 16 | layer->src_w;
-	cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
-	cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
-
-	/* Handle interlaced */
-	if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
-		/* Top field to display */
-		cmd->top.config = TOP_CONFIG_INTER_TOP;
-
-		/* Update pitches and vert size */
-		cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
-					     layer->src_w;
-		cmd->top.luma_processed_pitch *= 2;
-		cmd->top.luma_src_pitch *= 2;
-		cmd->top.chroma_processed_pitch *= 2;
-		cmd->top.chroma_src_pitch *= 2;
-
-		/* Enable directional deinterlacing processing */
-		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
-		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
-		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
-	}
-
-	/* Update hvsrc lut coef */
-	scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
-	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
-
-	scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
-	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
-
-	if (first_prepare) {
-		/* Prevent VTG shutdown */
-		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
-			DRM_ERROR("Failed to prepare/enable pix main clk\n");
-			return -ENXIO;
-		}
-
-		/* Register VTG Vsync callback to handle bottom fields */
-		if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
-				sti_vtg_register_client(hqvdp->vtg,
-					&hqvdp->vtg_nb, layer->mixer_id)) {
-			DRM_ERROR("Cannot register VTG notifier\n");
-			return -ENXIO;
-		}
-	}
-
-	return 0;
-}
-
-static int sti_hqvdp_commit_layer(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
-	int cmd_offset;
-
-	dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
-
-	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
-	if (cmd_offset == -1) {
-		DRM_ERROR("No available hqvdp_cmd now\n");
-		return -EBUSY;
-	}
-
-	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
-			hqvdp->regs + HQVDP_MBX_NEXT_CMD);
-
-	hqvdp->curr_field_count++;
-
-	/* Interlaced : get ready to display the bottom field at next Vsync */
-	if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
-		hqvdp->btm_field_pending = true;
-
-	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
-			__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
-
-	return 0;
-}
-
-static int sti_hqvdp_disable_layer(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	int i;
 
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+	DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
 
 	/* Unregister VTG Vsync callback */
-	if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
-		sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+	if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
 		DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
 	/* Set next cmd to NULL */
@@ -691,15 +539,10 @@
 	/* VTG can stop now */
 	clk_disable_unprepare(hqvdp->clk_pix_main);
 
-	if (i == POLL_MAX_ATTEMPT) {
+	if (i == POLL_MAX_ATTEMPT)
 		DRM_ERROR("XP70 could not revert to idle\n");
-		return -ENXIO;
-	}
 
-	/* disable VID plane */
-	hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
-
-	return 0;
+	hqvdp->plane.status = STI_PLANE_DISABLED;
 }
 
 /**
@@ -724,6 +567,14 @@
 		return 0;
 	}
 
+	if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
+		/* disable need to be synchronize on vsync event */
+		DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
+				 sti_plane_to_str(&hqvdp->plane));
+
+		sti_hqvdp_disable(hqvdp);
+	}
+
 	if (hqvdp->btm_field_pending) {
 		/* Create the btm field command from the current one */
 		btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@
 	return 0;
 }
 
-static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
 {
-	struct drm_plane *plane;
-
-	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-		struct sti_layer *layer = to_sti_layer(plane);
-
-		if (layer->desc == id)
-			return plane;
-	}
-
-	return NULL;
-}
-
-static void sti_hqvd_init(struct sti_layer *layer)
-{
-	struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
 	int size;
 
-	/* find the plane macthing with vid 0 */
-	hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
-	if (!hqvdp->vid_plane) {
-		DRM_ERROR("Cannot find Main video layer\n");
-		return;
-	}
-
 	hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
 
 	/* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@
 	memset(hqvdp->hqvdp_cmd, 0, size);
 }
 
-static const struct sti_layer_funcs hqvdp_ops = {
-	.get_formats = sti_hqvdp_get_formats,
-	.get_nb_formats = sti_hqvdp_get_nb_formats,
-	.init = sti_hqvd_init,
-	.prepare = sti_hqvdp_prepare_layer,
-	.commit = sti_hqvdp_commit_layer,
-	.disable = sti_hqvdp_disable_layer,
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+				    struct drm_plane_state *oldstate)
+{
+	struct drm_plane_state *state = drm_plane->state;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+	struct drm_crtc *crtc = state->crtc;
+	struct sti_mixer *mixer = to_sti_mixer(crtc);
+	struct drm_framebuffer *fb = state->fb;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	/* src_x are in 16.16 format */
+	int src_x = state->src_x >> 16;
+	int src_y = state->src_y >> 16;
+	int src_w = state->src_w >> 16;
+	int src_h = state->src_h >> 16;
+	bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+	struct drm_gem_cma_object *cma_obj;
+	struct sti_hqvdp_cmd *cmd;
+	int scale_h, scale_v;
+	int cmd_offset;
+
+	DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+		      crtc->base.id, sti_mixer_to_str(mixer),
+		      drm_plane->base.id, sti_plane_to_str(plane));
+	DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+		      sti_plane_to_str(plane),
+		      dst_w, dst_h, dst_x, dst_y,
+		      src_w, src_h, src_x, src_y);
+
+	cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+	if (cmd_offset == -1) {
+		DRM_ERROR("No available hqvdp_cmd now\n");
+		return;
+	}
+	cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+	if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+					src_w, src_h,
+					dst_w, dst_h)) {
+		DRM_ERROR("Scaling beyond HW capabilities\n");
+		return;
+	}
+
+	/* Static parameters, defaulting to progressive mode */
+	cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+	cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+	cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+	cmd->csdi.config = CSDI_CONFIG_PROG;
+
+	/* VC1RE, FMD bypassed : keep everything set to 0
+	 * IQI/P2I bypassed */
+	cmd->iqi.config = IQI_CONFIG_DFLT;
+	cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+	cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+	cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+	if (!cma_obj) {
+		DRM_ERROR("Can't get CMA GEM object for fb\n");
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+			 (char *)&fb->pixel_format,
+			 (unsigned long)cma_obj->paddr);
+
+	/* Buffer planes address */
+	cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
+	cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+
+	/* Pitches */
+	cmd->top.luma_processed_pitch = fb->pitches[0];
+	cmd->top.luma_src_pitch = fb->pitches[0];
+	cmd->top.chroma_processed_pitch = fb->pitches[1];
+	cmd->top.chroma_src_pitch = fb->pitches[1];
+
+	/* Input / output size
+	 * Align to upper even value */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
+
+	if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+	    (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+	    (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+	    (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+		DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+			  src_w, src_h,
+			  dst_w, dst_h);
+		return;
+	}
+
+	cmd->top.input_viewport_size = src_h << 16 | src_w;
+	cmd->top.input_frame_size = src_h << 16 | src_w;
+	cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
+	cmd->top.input_viewport_ori = src_y << 16 | src_x;
+
+	/* Handle interlaced */
+	if (fb->flags & DRM_MODE_FB_INTERLACED) {
+		/* Top field to display */
+		cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+		/* Update pitches and vert size */
+		cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
+		cmd->top.luma_processed_pitch *= 2;
+		cmd->top.luma_src_pitch *= 2;
+		cmd->top.chroma_processed_pitch *= 2;
+		cmd->top.chroma_src_pitch *= 2;
+
+		/* Enable directional deinterlacing processing */
+		cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+		cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+		cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+	}
+
+	/* Update hvsrc lut coef */
+	scale_h = SCALE_FACTOR * dst_w / src_w;
+	sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+	scale_v = SCALE_FACTOR * dst_h / src_h;
+	sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+	if (first_prepare) {
+		/* Prevent VTG shutdown */
+		if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+			DRM_ERROR("Failed to prepare/enable pix main clk\n");
+			return;
+		}
+
+		/* Register VTG Vsync callback to handle bottom fields */
+		if (sti_vtg_register_client(hqvdp->vtg,
+					    &hqvdp->vtg_nb,
+					    mixer->id)) {
+			DRM_ERROR("Cannot register VTG notifier\n");
+			return;
+		}
+	}
+
+	writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+	       hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+	hqvdp->curr_field_count++;
+
+	/* Interlaced : get ready to display the bottom field at next Vsync */
+	if (fb->flags & DRM_MODE_FB_INTERLACED)
+		hqvdp->btm_field_pending = true;
+
+	dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+		__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+	plane->status = STI_PLANE_UPDATED;
+}
+
+static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
+				     struct drm_plane_state *oldstate)
+{
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
+
+	if (!drm_plane->crtc) {
+		DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
+				 drm_plane->base.id);
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+			 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+			 drm_plane->base.id, sti_plane_to_str(plane));
+
+	plane->status = STI_PLANE_DISABLING;
+}
+
+static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+	.atomic_update = sti_hqvdp_atomic_update,
+	.atomic_disable = sti_hqvdp_atomic_disable,
 };
 
-struct sti_layer *sti_hqvdp_create(struct device *dev)
+static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
+					  struct device *dev, int desc)
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+	int res;
 
-	hqvdp->layer.ops = &hqvdp_ops;
+	hqvdp->plane.desc = desc;
+	hqvdp->plane.status = STI_PLANE_DISABLED;
 
-	return &hqvdp->layer;
+	sti_hqvdp_init(hqvdp);
+
+	res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
+				       &sti_plane_helpers_funcs,
+				       hqvdp_supported_formats,
+				       ARRAY_SIZE(hqvdp_supported_formats),
+				       DRM_PLANE_TYPE_OVERLAY);
+	if (res) {
+		DRM_ERROR("Failed to initialize universal plane\n");
+		return NULL;
+	}
+
+	drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
+
+	sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+
+	return &hqvdp->plane.drm_plane;
 }
-EXPORT_SYMBOL(sti_hqvdp_create);
 
 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
 {
@@ -859,6 +877,12 @@
 	} *header;
 
 	DRM_DEBUG_DRIVER("\n");
+
+	if (hqvdp->xp70_initialized) {
+		DRM_INFO("HQVDP XP70 already initialized\n");
+		return;
+	}
+
 	/* Check firmware parts */
 	if (!firmware) {
 		DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@
 	/* Launch Vsync */
 	writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
 
-	DRM_INFO("HQVDP XP70 started\n");
+	DRM_INFO("HQVDP XP70 initialized\n");
+
+	hqvdp->xp70_initialized = true;
+
 out:
 	release_firmware(firmware);
 }
@@ -955,7 +982,7 @@
 {
 	struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
-	struct sti_layer *layer;
+	struct drm_plane *plane;
 	int err;
 
 	DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@
 		return err;
 	}
 
-	layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
-	if (!layer) {
+	/* Create HQVDP plane once xp70 is initialized */
+	plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
+	if (!plane)
 		DRM_ERROR("Can't create HQVDP plane\n");
-		return -ENOMEM;
-	}
-
-	sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0..0000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_HQVDP_H_
-#define _STI_HQVDP_H_
-
-struct sti_layer *sti_hqvdp_create(struct device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f..0000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-
-#include "sti_compositor.h"
-#include "sti_cursor.h"
-#include "sti_gdp.h"
-#include "sti_hqvdp.h"
-#include "sti_layer.h"
-#include "sti_vid.h"
-
-const char *sti_layer_to_str(struct sti_layer *layer)
-{
-	switch (layer->desc) {
-	case STI_GDP_0:
-		return "GDP0";
-	case STI_GDP_1:
-		return "GDP1";
-	case STI_GDP_2:
-		return "GDP2";
-	case STI_GDP_3:
-		return "GDP3";
-	case STI_VID_0:
-		return "VID0";
-	case STI_VID_1:
-		return "VID1";
-	case STI_CURSOR:
-		return "CURSOR";
-	case STI_HQVDP_0:
-		return "HQVDP0";
-	default:
-		return "<UNKNOWN LAYER>";
-	}
-}
-EXPORT_SYMBOL(sti_layer_to_str);
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
-				   void __iomem *baseaddr)
-{
-
-	struct sti_layer *layer = NULL;
-
-	switch (desc & STI_LAYER_TYPE_MASK) {
-	case STI_GDP:
-		layer = sti_gdp_create(dev, desc);
-		break;
-	case STI_VID:
-		layer = sti_vid_create(dev);
-		break;
-	case STI_CUR:
-		layer = sti_cursor_create(dev);
-		break;
-	case STI_VDP:
-		layer = sti_hqvdp_create(dev);
-		break;
-	}
-
-	if (!layer) {
-		DRM_ERROR("Failed to create layer\n");
-		return NULL;
-	}
-
-	layer->desc = desc;
-	layer->dev = dev;
-	layer->regs = baseaddr;
-
-	layer->ops->init(layer);
-
-	DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
-
-	return layer;
-}
-EXPORT_SYMBOL(sti_layer_create);
-
-int sti_layer_prepare(struct sti_layer *layer,
-		      struct drm_crtc *crtc,
-		      struct drm_framebuffer *fb,
-		      struct drm_display_mode *mode, int mixer_id,
-		      int dest_x, int dest_y, int dest_w, int dest_h,
-		      int src_x, int src_y, int src_w, int src_h)
-{
-	int ret;
-	unsigned int i;
-	struct drm_gem_cma_object *cma_obj;
-
-	if (!layer || !fb || !mode) {
-		DRM_ERROR("Null fb, layer or mode\n");
-		return 1;
-	}
-
-	cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-	if (!cma_obj) {
-		DRM_ERROR("Can't get CMA GEM object for fb\n");
-		return 1;
-	}
-
-	layer->crtc = crtc;
-	layer->fb = fb;
-	layer->mode = mode;
-	layer->mixer_id = mixer_id;
-	layer->dst_x = dest_x;
-	layer->dst_y = dest_y;
-	layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
-	layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
-	layer->src_x = src_x;
-	layer->src_y = src_y;
-	layer->src_w = src_w;
-	layer->src_h = src_h;
-	layer->format = fb->pixel_format;
-	layer->vaddr = cma_obj->vaddr;
-	layer->paddr = cma_obj->paddr;
-	for (i = 0; i < 4; i++) {
-		layer->pitches[i] = fb->pitches[i];
-		layer->offsets[i] = fb->offsets[i];
-	}
-
-	DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
-			 sti_layer_to_str(layer),
-			 layer->mixer_id);
-	DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
-			 sti_layer_to_str(layer),
-			 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
-			 layer->src_w, layer->src_h, layer->src_x,
-			 layer->src_y);
-
-	DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
-			 (char *)&layer->format, (unsigned long)layer->paddr);
-
-	if (!layer->ops->prepare)
-		goto err_no_prepare;
-
-	ret = layer->ops->prepare(layer, !layer->enabled);
-	if (!ret)
-		layer->enabled = true;
-
-	return ret;
-
-err_no_prepare:
-	DRM_ERROR("Cannot prepare\n");
-	return 1;
-}
-
-int sti_layer_commit(struct sti_layer *layer)
-{
-	if (!layer)
-		return 1;
-
-	if (!layer->ops->commit)
-		goto err_no_commit;
-
-	return layer->ops->commit(layer);
-
-err_no_commit:
-	DRM_ERROR("Cannot commit\n");
-	return 1;
-}
-
-int sti_layer_disable(struct sti_layer *layer)
-{
-	int ret;
-
-	DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
-	if (!layer)
-		return 1;
-
-	if (!layer->enabled)
-		return 0;
-
-	if (!layer->ops->disable)
-		goto err_no_disable;
-
-	ret = layer->ops->disable(layer);
-	if (!ret)
-		layer->enabled = false;
-	else
-		DRM_ERROR("Disable failed\n");
-
-	return ret;
-
-err_no_disable:
-	DRM_ERROR("Cannot disable\n");
-	return 1;
-}
-
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
-{
-	if (!layer)
-		return NULL;
-
-	if (!layer->ops->get_formats)
-		return NULL;
-
-	return layer->ops->get_formats(layer);
-}
-
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
-{
-	if (!layer)
-		return 0;
-
-	if (!layer->ops->get_nb_formats)
-		return 0;
-
-	return layer->ops->get_nb_formats(layer);
-}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497..0000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
- *          Fabien Dessenne <fabien.dessenne@st.com>
- *          for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#ifndef _STI_LAYER_H_
-#define _STI_LAYER_H_
-
-#include <drm/drmP.h>
-
-#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
-
-#define STI_LAYER_TYPE_SHIFT 8
-#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
-
-struct sti_layer;
-
-enum sti_layer_type {
-	STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
-	STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
-	STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
-	STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
-	STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
-};
-
-enum sti_layer_id_of_type {
-	STI_ID_0 = 0,
-	STI_ID_1 = 1,
-	STI_ID_2 = 2,
-	STI_ID_3 = 3
-};
-
-enum sti_layer_desc {
-	STI_GDP_0       = STI_GDP | STI_ID_0,
-	STI_GDP_1       = STI_GDP | STI_ID_1,
-	STI_GDP_2       = STI_GDP | STI_ID_2,
-	STI_GDP_3       = STI_GDP | STI_ID_3,
-	STI_VID_0       = STI_VID | STI_ID_0,
-	STI_VID_1       = STI_VID | STI_ID_1,
-	STI_HQVDP_0     = STI_VDP | STI_ID_0,
-	STI_CURSOR      = STI_CUR,
-	STI_BACK        = STI_BCK
-};
-
-/**
- * STI layer functions structure
- *
- * @get_formats:	get layer supported formats
- * @get_nb_formats:	get number of format supported
- * @init:               initialize the layer
- * @prepare:		prepare layer before rendering
- * @commit:		set layer for rendering
- * @disable:		disable layer
- */
-struct sti_layer_funcs {
-	const uint32_t* (*get_formats)(struct sti_layer *layer);
-	unsigned int (*get_nb_formats)(struct sti_layer *layer);
-	void (*init)(struct sti_layer *layer);
-	int (*prepare)(struct sti_layer *layer, bool first_prepare);
-	int (*commit)(struct sti_layer *layer);
-	int (*disable)(struct sti_layer *layer);
-};
-
-/**
- * STI layer structure
- *
- * @plane:              drm plane it is bound to (if any)
- * @fb:                 drm fb it is bound to
- * @crtc:               crtc it is bound to
- * @mode:               display mode
- * @desc:               layer type & id
- * @device:		driver device
- * @regs:		layer registers
- * @ops:                layer functions
- * @zorder:             layer z-order
- * @mixer_id:           id of the mixer used to display the layer
- * @enabled:            to know if the layer is active or not
- * @src_x src_y:        coordinates of the input (fb) area
- * @src_w src_h:        size of the input (fb) area
- * @dst_x dst_y:        coordinates of the output (crtc) area
- * @dst_w dst_h:        size of the output (crtc) area
- * @format:             format
- * @pitches:            pitch of 'planes' (eg: Y, U, V)
- * @offsets:            offset of 'planes'
- * @vaddr:              virtual address of the input buffer
- * @paddr:              physical address of the input buffer
- */
-struct sti_layer {
-	struct drm_plane plane;
-	struct drm_framebuffer *fb;
-	struct drm_crtc *crtc;
-	struct drm_display_mode *mode;
-	enum sti_layer_desc desc;
-	struct device *dev;
-	void __iomem *regs;
-	const struct sti_layer_funcs *ops;
-	int zorder;
-	int mixer_id;
-	bool enabled;
-	int src_x, src_y;
-	int src_w, src_h;
-	int dst_x, dst_y;
-	int dst_w, dst_h;
-	uint32_t format;
-	unsigned int pitches[4];
-	unsigned int offsets[4];
-	void *vaddr;
-	dma_addr_t paddr;
-};
-
-struct sti_layer *sti_layer_create(struct device *dev, int desc,
-			void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer,
-			struct drm_crtc *crtc,
-			struct drm_framebuffer *fb,
-			struct drm_display_mode *mode,
-			int mixer_id,
-			int dest_x, int dest_y,
-			int dest_w, int dest_h,
-			int src_x, int src_y,
-			int src_w, int src_h);
-int sti_layer_commit(struct sti_layer *layer);
-int sti_layer_disable(struct sti_layer *layer);
-const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
-unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
-const char *sti_layer_to_str(struct sti_layer *layer);
-
-#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84..0182e93 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@
 		return "<UNKNOWN MIXER>";
 	}
 }
+EXPORT_SYMBOL(sti_mixer_to_str);
 
 static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
 {
@@ -101,52 +102,57 @@
 	sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
 }
 
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
 {
-	int layer_id = 0, depth = layer->zorder;
+	int plane_id, depth = plane->zorder;
+	unsigned int i;
 	u32 mask, val;
 
-	if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+	if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
 		return 1;
 
-	switch (layer->desc) {
+	switch (plane->desc) {
 	case STI_GDP_0:
-		layer_id = GAM_DEPTH_GDP0_ID;
+		plane_id = GAM_DEPTH_GDP0_ID;
 		break;
 	case STI_GDP_1:
-		layer_id = GAM_DEPTH_GDP1_ID;
+		plane_id = GAM_DEPTH_GDP1_ID;
 		break;
 	case STI_GDP_2:
-		layer_id = GAM_DEPTH_GDP2_ID;
+		plane_id = GAM_DEPTH_GDP2_ID;
 		break;
 	case STI_GDP_3:
-		layer_id = GAM_DEPTH_GDP3_ID;
+		plane_id = GAM_DEPTH_GDP3_ID;
 		break;
-	case STI_VID_0:
 	case STI_HQVDP_0:
-		layer_id = GAM_DEPTH_VID0_ID;
-		break;
-	case STI_VID_1:
-		layer_id = GAM_DEPTH_VID1_ID;
+		plane_id = GAM_DEPTH_VID0_ID;
 		break;
 	case STI_CURSOR:
 		/* no need to set depth for cursor */
 		return 0;
 	default:
-		DRM_ERROR("Unknown layer %d\n", layer->desc);
+		DRM_ERROR("Unknown plane %d\n", plane->desc);
 		return 1;
 	}
-	mask = GAM_DEPTH_MASK_ID << (3 * depth);
-	layer_id = layer_id << (3 * depth);
+
+	/* Search if a previous depth was already assigned to the plane */
+	val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+	for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+		mask = GAM_DEPTH_MASK_ID << (3 * i);
+		if ((val & mask) == plane_id << (3 * i))
+			break;
+	}
+
+	mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
+	plane_id = plane_id << (3 * (depth - 1));
 
 	DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
-			 sti_layer_to_str(layer), depth);
+			 sti_plane_to_str(plane), depth);
 	dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
-		layer_id, mask);
+		plane_id, mask);
 
-	val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
 	val &= ~mask;
-	val |= layer_id;
+	val |= plane_id;
 	sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
 
 	dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@
 	return 0;
 }
 
-static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
 {
-	switch (layer->desc) {
+	switch (plane->desc) {
 	case STI_BACK:
 		return GAM_CTL_BACK_MASK;
 	case STI_GDP_0:
@@ -189,11 +195,8 @@
 		return GAM_CTL_GDP2_MASK;
 	case STI_GDP_3:
 		return GAM_CTL_GDP3_MASK;
-	case STI_VID_0:
 	case STI_HQVDP_0:
 		return GAM_CTL_VID0_MASK;
-	case STI_VID_1:
-		return GAM_CTL_VID1_MASK;
 	case STI_CURSOR:
 		return GAM_CTL_CURSOR_MASK;
 	default:
@@ -201,17 +204,17 @@
 	}
 }
 
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
-			       struct sti_layer *layer, bool status)
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+			       struct sti_plane *plane, bool status)
 {
 	u32 mask, val;
 
 	DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
-			 sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+			 sti_mixer_to_str(mixer), sti_plane_to_str(plane));
 
-	mask = sti_mixer_get_layer_mask(layer);
+	mask = sti_mixer_get_plane_mask(plane);
 	if (!mask) {
-		DRM_ERROR("Can not find layer mask\n");
+		DRM_ERROR("Can't find layer mask\n");
 		return -EINVAL;
 	}
 
@@ -223,15 +226,6 @@
 	return 0;
 }
 
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
-{
-	u32 val;
-
-	DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
-	val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
-	sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
-}
-
 void sti_mixer_set_matrix(struct sti_mixer *mixer)
 {
 	unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b972821..efb1a9a 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
 
 #include <drm/drmP.h>
 
-#include "sti_layer.h"
+#include "sti_plane.h"
 
 #define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
 
+enum sti_mixer_status {
+	STI_MIXER_READY,
+	STI_MIXER_DISABLING,
+	STI_MIXER_DISABLED,
+};
+
 /**
  * STI Mixer subdevice structure
  *
@@ -23,33 +29,32 @@
  * @id: id of the mixer
  * @drm_crtc: crtc object link to the mixer
  * @pending_event: set if a flip event is pending on crtc
- * @enabled: to know if the mixer is active or not
+ * @status: to know the status of the mixer
  */
 struct sti_mixer {
 	struct device *dev;
 	void __iomem *regs;
 	int id;
-	struct drm_crtc	drm_crtc;
+	struct drm_crtc drm_crtc;
 	struct drm_pending_vblank_event *pending_event;
-	bool enabled;
+	enum sti_mixer_status status;
 };
 
 const char *sti_mixer_to_str(struct sti_mixer *mixer);
 
 struct sti_mixer *sti_mixer_create(struct device *dev, int id,
-		void __iomem *baseaddr);
+				   void __iomem *baseaddr);
 
-int sti_mixer_set_layer_status(struct sti_mixer *mixer,
-		struct sti_layer *layer, bool status);
-void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
-int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_set_plane_status(struct sti_mixer *mixer,
+			       struct sti_plane *plane, bool status);
+int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
 int sti_mixer_active_video_area(struct sti_mixer *mixer,
-		struct drm_display_mode *mode);
+				struct drm_display_mode *mode);
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
 
 /* depth in Cross-bar control = z order */
-#define GAM_MIXER_NB_DEPTH_LEVEL 7
+#define GAM_MIXER_NB_DEPTH_LEVEL 6
 
 #define STI_MIXER_MAIN 0
 #define STI_MIXER_AUX  1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 0000000..d5c5e91
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *          Fabien Dessenne <fabien.dessenne@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drv.h"
+#include "sti_plane.h"
+
+/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
+enum sti_plane_desc sti_plane_default_zorder[] = {
+	STI_GDP_0,
+	STI_GDP_1,
+	STI_HQVDP_0,
+	STI_GDP_2,
+	STI_GDP_3,
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane)
+{
+	switch (plane->desc) {
+	case STI_GDP_0:
+		return "GDP0";
+	case STI_GDP_1:
+		return "GDP1";
+	case STI_GDP_2:
+		return "GDP2";
+	case STI_GDP_3:
+		return "GDP3";
+	case STI_HQVDP_0:
+		return "HQVDP0";
+	case STI_CURSOR:
+		return "CURSOR";
+	default:
+		return "<UNKNOWN PLANE>";
+	}
+}
+EXPORT_SYMBOL(sti_plane_to_str);
+
+static void sti_plane_destroy(struct drm_plane *drm_plane)
+{
+	DRM_DEBUG_DRIVER("\n");
+
+	drm_plane_helper_disable(drm_plane);
+	drm_plane_cleanup(drm_plane);
+}
+
+static int sti_plane_set_property(struct drm_plane *drm_plane,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct sti_private *private = dev->dev_private;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	if (property == private->plane_zorder_property) {
+		plane->zorder = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+{
+	struct drm_device *dev = drm_plane->dev;
+	struct sti_private *private = dev->dev_private;
+	struct sti_plane *plane = to_sti_plane(drm_plane);
+	struct drm_property *prop;
+
+	prop = private->plane_zorder_property;
+	if (!prop) {
+		prop = drm_property_create_range(dev, 0, "zpos", 1,
+						 GAM_MIXER_NB_DEPTH_LEVEL);
+		if (!prop)
+			return;
+
+		private->plane_zorder_property = prop;
+	}
+
+	drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
+}
+
+void sti_plane_init_property(struct sti_plane *plane,
+			     enum drm_plane_type type)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
+		if (sti_plane_default_zorder[i] == plane->desc)
+			break;
+
+	plane->zorder = i + 1;
+
+	if (type == DRM_PLANE_TYPE_OVERLAY)
+		sti_plane_attach_zorder_property(&plane->drm_plane);
+
+	DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
+			 plane->drm_plane.base.id,
+			 sti_plane_to_str(plane), plane->zorder);
+}
+EXPORT_SYMBOL(sti_plane_init_property);
+
+struct drm_plane_funcs sti_plane_helpers_funcs = {
+	.update_plane = drm_atomic_helper_update_plane,
+	.disable_plane = drm_atomic_helper_disable_plane,
+	.destroy = sti_plane_destroy,
+	.set_property = sti_plane_set_property,
+	.reset = drm_atomic_helper_plane_reset,
+	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+	.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 0000000..86f1e6f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_PLANE_H_
+#define _STI_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+extern struct drm_plane_funcs sti_plane_helpers_funcs;
+
+#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
+
+#define STI_PLANE_TYPE_SHIFT 8
+#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
+
+enum sti_plane_type {
+	STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
+	STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
+	STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
+	STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
+};
+
+enum sti_plane_id_of_type {
+	STI_ID_0 = 0,
+	STI_ID_1 = 1,
+	STI_ID_2 = 2,
+	STI_ID_3 = 3
+};
+
+enum sti_plane_desc {
+	STI_GDP_0       = STI_GDP | STI_ID_0,
+	STI_GDP_1       = STI_GDP | STI_ID_1,
+	STI_GDP_2       = STI_GDP | STI_ID_2,
+	STI_GDP_3       = STI_GDP | STI_ID_3,
+	STI_HQVDP_0     = STI_VDP | STI_ID_0,
+	STI_CURSOR      = STI_CUR,
+	STI_BACK        = STI_BCK
+};
+
+enum sti_plane_status {
+	STI_PLANE_READY,
+	STI_PLANE_UPDATED,
+	STI_PLANE_DISABLING,
+	STI_PLANE_FLUSHING,
+	STI_PLANE_DISABLED,
+};
+
+/**
+ * STI plane structure
+ *
+ * @plane:              drm plane it is bound to (if any)
+ * @desc:               plane type & id
+ * @status:             to know the status of the plane
+ * @zorder:             plane z-order
+ */
+struct sti_plane {
+	struct drm_plane drm_plane;
+	enum sti_plane_desc desc;
+	enum sti_plane_status status;
+	int zorder;
+};
+
+const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_init_property(struct sti_plane *plane,
+			     enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc5311..c1aac8e 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
-#include "sti_drm_crtc.h"
+#include "sti_crtc.h"
 
 /* glue registers */
 #define TVO_CSC_MAIN_M0                  0x000
@@ -473,7 +473,7 @@
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@
 {
 	struct sti_tvout *tvout = to_sti_tvout(encoder);
 
-	tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
+	tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
 }
 
 static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@
 	struct sti_tvout *tvout = dev_get_drvdata(dev);
 	struct drm_device *drm_dev = data;
 	unsigned int i;
-	int ret;
 
 	tvout->drm_dev = drm_dev;
 
@@ -658,17 +657,15 @@
 
 	sti_tvout_create_encoders(drm_dev, tvout);
 
-	ret = component_bind_all(dev, drm_dev);
-	if (ret)
-		sti_tvout_destroy_encoders(tvout);
-
-	return ret;
+	return 0;
 }
 
 static void sti_tvout_unbind(struct device *dev, struct device *master,
 	void *data)
 {
-	/* do nothing */
+	struct sti_tvout *tvout = dev_get_drvdata(dev);
+
+	sti_tvout_destroy_encoders(tvout);
 }
 
 static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@
 	.unbind	= sti_tvout_unbind,
 };
 
-static int compare_of(struct device *dev, void *data)
-{
-	return dev->of_node == data;
-}
-
-static int sti_tvout_master_bind(struct device *dev)
-{
-	return 0;
-}
-
-static void sti_tvout_master_unbind(struct device *dev)
-{
-	/* do nothing */
-}
-
-static const struct component_master_ops sti_tvout_master_ops = {
-	.bind = sti_tvout_master_bind,
-	.unbind = sti_tvout_master_unbind,
-};
-
 static int sti_tvout_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *node = dev->of_node;
 	struct sti_tvout *tvout;
 	struct resource *res;
-	struct device_node *child_np;
-	struct component_match *match = NULL;
 
 	DRM_INFO("%s\n", __func__);
 
@@ -734,24 +709,11 @@
 
 	platform_set_drvdata(pdev, tvout);
 
-	of_platform_populate(node, NULL, NULL, dev);
-
-	child_np = of_get_next_available_child(node, NULL);
-
-	while (child_np) {
-		component_match_add(dev, &match, compare_of, child_np);
-		of_node_put(child_np);
-		child_np = of_get_next_available_child(node, child_np);
-	}
-
-	component_master_add_with_match(dev, &sti_tvout_master_ops, match);
-
 	return component_add(dev, &sti_tvout_ops);
 }
 
 static int sti_tvout_remove(struct platform_device *pdev)
 {
-	component_master_del(&pdev->dev, &sti_tvout_master_ops);
 	component_del(&pdev->dev, &sti_tvout_ops);
 	return 0;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a..a8254cc 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
 
 #include <drm/drmP.h>
 
-#include "sti_layer.h"
+#include "sti_plane.h"
 #include "sti_vid.h"
 #include "sti_vtg.h"
 
@@ -43,35 +43,37 @@
 #define VID_MPR2_BT709          0x07150545
 #define VID_MPR3_BT709          0x00000AE8
 
-static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+void sti_vid_commit(struct sti_vid *vid,
+		    struct drm_plane_state *state)
 {
-	u32 val;
+	struct drm_crtc *crtc = state->crtc;
+	struct drm_display_mode *mode = &crtc->mode;
+	int dst_x = state->crtc_x;
+	int dst_y = state->crtc_y;
+	int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+	int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+	u32 val, ydo, xdo, yds, xds;
+
+	/* Input / output size
+	 * Align to upper even value */
+	dst_w = ALIGN(dst_w, 2);
+	dst_h = ALIGN(dst_h, 2);
 
 	/* Unmask */
 	val = readl(vid->regs + VID_CTL);
 	val &= ~VID_CTL_IGNORE;
 	writel(val, vid->regs + VID_CTL);
 
-	return 0;
-}
-
-static int sti_vid_commit_layer(struct sti_layer *vid)
-{
-	struct drm_display_mode *mode = vid->mode;
-	u32 ydo, xdo, yds, xds;
-
-	ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
-	yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
-	xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
-	xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+	ydo = sti_vtg_get_line_number(*mode, dst_y);
+	yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
+	xdo = sti_vtg_get_pixel_number(*mode, dst_x);
+	xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
 
 	writel((ydo << 16) | xdo, vid->regs + VID_VPO);
 	writel((yds << 16) | xds, vid->regs + VID_VPS);
-
-	return 0;
 }
 
-static int sti_vid_disable_layer(struct sti_layer *vid)
+void sti_vid_disable(struct sti_vid *vid)
 {
 	u32 val;
 
@@ -79,21 +81,9 @@
 	val = readl(vid->regs + VID_CTL);
 	val |= VID_CTL_IGNORE;
 	writel(val, vid->regs + VID_CTL);
-
-	return 0;
 }
 
-static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
-{
-	return NULL;
-}
-
-static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
-{
-	return 0;
-}
-
-static void sti_vid_init(struct sti_layer *vid)
+static void sti_vid_init(struct sti_vid *vid)
 {
 	/* Enable PSI, Mask layer */
 	writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@
 	writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
 }
 
-static const struct sti_layer_funcs vid_ops = {
-	.get_formats = sti_vid_get_formats,
-	.get_nb_formats = sti_vid_get_nb_formats,
-	.init = sti_vid_init,
-	.prepare = sti_vid_prepare_layer,
-	.commit = sti_vid_commit_layer,
-	.disable = sti_vid_disable_layer,
-};
-
-struct sti_layer *sti_vid_create(struct device *dev)
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+			       void __iomem *baseaddr)
 {
-	struct sti_layer *vid;
+	struct sti_vid *vid;
 
 	vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
 	if (!vid) {
@@ -132,7 +114,11 @@
 		return NULL;
 	}
 
-	vid->ops = &vid_ops;
+	vid->dev = dev;
+	vid->regs = baseaddr;
+	vid->id = id;
+
+	sti_vid_init(vid);
 
 	return vid;
 }
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd..5dea479 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
 #ifndef _STI_VID_H_
 #define _STI_VID_H_
 
-struct sti_layer *sti_vid_create(struct device *dev);
+/**
+ * STI VID structure
+ *
+ * @dev:   driver device
+ * @regs:  vid registers
+ * @id:    id of the vid
+ */
+struct sti_vid {
+	struct device *dev;
+	void __iomem *regs;
+	int id;
+};
+
+void sti_vid_commit(struct sti_vid *vid,
+		    struct drm_plane_state *state);
+void sti_vid_disable(struct sti_vid *vid);
+struct sti_vid *sti_vid_create(struct device *dev, int id,
+			       void __iomem *baseaddr);
 
 #endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index a287e4f..ddefb85 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -76,6 +76,14 @@
 	return NULL;
 }
 
+static void tegra_dc_stats_reset(struct tegra_dc_stats *stats)
+{
+	stats->frames = 0;
+	stats->vblank = 0;
+	stats->underflow = 0;
+	stats->overflow = 0;
+}
+
 /*
  * Reads the active copy of a register. This takes the dc->lock spinlock to
  * prevent races with the VBLANK processing which also needs access to the
@@ -759,7 +767,6 @@
 	/* position the cursor */
 	value = (state->crtc_y & 0x3fff) << 16 | (state->crtc_x & 0x3fff);
 	tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
-
 }
 
 static void tegra_cursor_atomic_disable(struct drm_plane *plane,
@@ -809,9 +816,11 @@
 		return ERR_PTR(-ENOMEM);
 
 	/*
-	 * We'll treat the cursor as an overlay plane with index 6 here so
-	 * that the update and activation request bits in DC_CMD_STATE_CONTROL
-	 * match up.
+	 * This index is kind of fake. The cursor isn't a regular plane, but
+	 * its update and activation request bits in DC_CMD_STATE_CONTROL do
+	 * use the same programming. Setting this fake index here allows the
+	 * code in tegra_add_plane_state() to do the right thing without the
+	 * need to special-casing the cursor plane.
 	 */
 	plane->index = 6;
 
@@ -1015,6 +1024,8 @@
 		crtc->state = &state->base;
 		crtc->state->crtc = crtc;
 	}
+
+	drm_crtc_vblank_reset(crtc);
 }
 
 static struct drm_crtc_state *
@@ -1052,90 +1063,6 @@
 	.atomic_destroy_state = tegra_crtc_atomic_destroy_state,
 };
 
-static void tegra_dc_stop(struct tegra_dc *dc)
-{
-	u32 value;
-
-	/* stop the display controller */
-	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
-	value &= ~DISP_CTRL_MODE_MASK;
-	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
-
-	tegra_dc_commit(dc);
-}
-
-static bool tegra_dc_idle(struct tegra_dc *dc)
-{
-	u32 value;
-
-	value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
-
-	return (value & DISP_CTRL_MODE_MASK) == 0;
-}
-
-static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
-{
-	timeout = jiffies + msecs_to_jiffies(timeout);
-
-	while (time_before(jiffies, timeout)) {
-		if (tegra_dc_idle(dc))
-			return 0;
-
-		usleep_range(1000, 2000);
-	}
-
-	dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
-	return -ETIMEDOUT;
-}
-
-static void tegra_crtc_disable(struct drm_crtc *crtc)
-{
-	struct tegra_dc *dc = to_tegra_dc(crtc);
-	u32 value;
-
-	if (!tegra_dc_idle(dc)) {
-		tegra_dc_stop(dc);
-
-		/*
-		 * Ignore the return value, there isn't anything useful to do
-		 * in case this fails.
-		 */
-		tegra_dc_wait_idle(dc, 100);
-	}
-
-	/*
-	 * This should really be part of the RGB encoder driver, but clearing
-	 * these bits has the side-effect of stopping the display controller.
-	 * When that happens no VBLANK interrupts will be raised. At the same
-	 * time the encoder is disabled before the display controller, so the
-	 * above code is always going to timeout waiting for the controller
-	 * to go idle.
-	 *
-	 * Given the close coupling between the RGB encoder and the display
-	 * controller doing it here is still kind of okay. None of the other
-	 * encoder drivers require these bits to be cleared.
-	 *
-	 * XXX: Perhaps given that the display controller is switched off at
-	 * this point anyway maybe clearing these bits isn't even useful for
-	 * the RGB encoder?
-	 */
-	if (dc->rgb) {
-		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
-		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
-			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
-		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
-	}
-
-	drm_crtc_vblank_off(crtc);
-}
-
-static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
-				  const struct drm_display_mode *mode,
-				  struct drm_display_mode *adjusted)
-{
-	return true;
-}
-
 static int tegra_dc_set_timings(struct tegra_dc *dc,
 				struct drm_display_mode *mode)
 {
@@ -1229,7 +1156,85 @@
 	tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
 }
 
-static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void tegra_dc_stop(struct tegra_dc *dc)
+{
+	u32 value;
+
+	/* stop the display controller */
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value &= ~DISP_CTRL_MODE_MASK;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	tegra_dc_commit(dc);
+}
+
+static bool tegra_dc_idle(struct tegra_dc *dc)
+{
+	u32 value;
+
+	value = tegra_dc_readl_active(dc, DC_CMD_DISPLAY_COMMAND);
+
+	return (value & DISP_CTRL_MODE_MASK) == 0;
+}
+
+static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
+{
+	timeout = jiffies + msecs_to_jiffies(timeout);
+
+	while (time_before(jiffies, timeout)) {
+		if (tegra_dc_idle(dc))
+			return 0;
+
+		usleep_range(1000, 2000);
+	}
+
+	dev_dbg(dc->dev, "timeout waiting for DC to become idle\n");
+	return -ETIMEDOUT;
+}
+
+static void tegra_crtc_disable(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	u32 value;
+
+	if (!tegra_dc_idle(dc)) {
+		tegra_dc_stop(dc);
+
+		/*
+		 * Ignore the return value, there isn't anything useful to do
+		 * in case this fails.
+		 */
+		tegra_dc_wait_idle(dc, 100);
+	}
+
+	/*
+	 * This should really be part of the RGB encoder driver, but clearing
+	 * these bits has the side-effect of stopping the display controller.
+	 * When that happens no VBLANK interrupts will be raised. At the same
+	 * time the encoder is disabled before the display controller, so the
+	 * above code is always going to timeout waiting for the controller
+	 * to go idle.
+	 *
+	 * Given the close coupling between the RGB encoder and the display
+	 * controller doing it here is still kind of okay. None of the other
+	 * encoder drivers require these bits to be cleared.
+	 *
+	 * XXX: Perhaps given that the display controller is switched off at
+	 * this point anyway maybe clearing these bits isn't even useful for
+	 * the RGB encoder?
+	 */
+	if (dc->rgb) {
+		value = tegra_dc_readl(dc, DC_CMD_DISPLAY_POWER_CONTROL);
+		value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+			   PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
+		tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+	}
+
+	tegra_dc_stats_reset(&dc->stats);
+	drm_crtc_vblank_off(crtc);
+}
+
+static void tegra_crtc_enable(struct drm_crtc *crtc)
 {
 	struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1259,15 +1264,7 @@
 	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
 
 	tegra_dc_commit(dc);
-}
 
-static void tegra_crtc_prepare(struct drm_crtc *crtc)
-{
-	drm_crtc_vblank_off(crtc);
-}
-
-static void tegra_crtc_commit(struct drm_crtc *crtc)
-{
 	drm_crtc_vblank_on(crtc);
 }
 
@@ -1277,7 +1274,8 @@
 	return 0;
 }
 
-static void tegra_crtc_atomic_begin(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct tegra_dc *dc = to_tegra_dc(crtc);
 
@@ -1291,7 +1289,8 @@
 	}
 }
 
-static void tegra_crtc_atomic_flush(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
+				    struct drm_crtc_state *old_crtc_state)
 {
 	struct tegra_dc_state *state = to_dc_state(crtc->state);
 	struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -1302,10 +1301,7 @@
 
 static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
 	.disable = tegra_crtc_disable,
-	.mode_fixup = tegra_crtc_mode_fixup,
-	.mode_set_nofb = tegra_crtc_mode_set_nofb,
-	.prepare = tegra_crtc_prepare,
-	.commit = tegra_crtc_commit,
+	.enable = tegra_crtc_enable,
 	.atomic_check = tegra_crtc_atomic_check,
 	.atomic_begin = tegra_crtc_atomic_begin,
 	.atomic_flush = tegra_crtc_atomic_flush,
@@ -1323,6 +1319,7 @@
 		/*
 		dev_dbg(dc->dev, "%s(): frame end\n", __func__);
 		*/
+		dc->stats.frames++;
 	}
 
 	if (status & VBLANK_INT) {
@@ -1331,12 +1328,21 @@
 		*/
 		drm_crtc_handle_vblank(&dc->base);
 		tegra_dc_finish_page_flip(dc);
+		dc->stats.vblank++;
 	}
 
 	if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
 		/*
 		dev_dbg(dc->dev, "%s(): underflow\n", __func__);
 		*/
+		dc->stats.underflow++;
+	}
+
+	if (status & (WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT)) {
+		/*
+		dev_dbg(dc->dev, "%s(): overflow\n", __func__);
+		*/
+		dc->stats.overflow++;
 	}
 
 	return IRQ_HANDLED;
@@ -1346,6 +1352,14 @@
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_dc *dc = node->info_ent->data;
+	int err = 0;
+
+	drm_modeset_lock_crtc(&dc->base, NULL);
+
+	if (!dc->base.state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-40s %#05x %08x\n", #name, name,	\
@@ -1566,11 +1580,59 @@
 
 #undef DUMP_REG
 
+unlock:
+	drm_modeset_unlock_crtc(&dc->base);
+	return err;
+}
+
+static int tegra_dc_show_crc(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+	int err = 0;
+	u32 value;
+
+	drm_modeset_lock_crtc(&dc->base, NULL);
+
+	if (!dc->base.state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
+
+	value = DC_COM_CRC_CONTROL_ACTIVE_DATA | DC_COM_CRC_CONTROL_ENABLE;
+	tegra_dc_writel(dc, value, DC_COM_CRC_CONTROL);
+	tegra_dc_commit(dc);
+
+	drm_crtc_wait_one_vblank(&dc->base);
+	drm_crtc_wait_one_vblank(&dc->base);
+
+	value = tegra_dc_readl(dc, DC_COM_CRC_CHECKSUM);
+	seq_printf(s, "%08x\n", value);
+
+	tegra_dc_writel(dc, 0, DC_COM_CRC_CONTROL);
+
+unlock:
+	drm_modeset_unlock_crtc(&dc->base);
+	return err;
+}
+
+static int tegra_dc_show_stats(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+
+	seq_printf(s, "frames: %lu\n", dc->stats.frames);
+	seq_printf(s, "vblank: %lu\n", dc->stats.vblank);
+	seq_printf(s, "underflow: %lu\n", dc->stats.underflow);
+	seq_printf(s, "overflow: %lu\n", dc->stats.overflow);
+
 	return 0;
 }
 
 static struct drm_info_list debugfs_files[] = {
 	{ "regs", tegra_dc_show_regs, 0, NULL },
+	{ "crc", tegra_dc_show_crc, 0, NULL },
+	{ "stats", tegra_dc_show_stats, 0, NULL },
 };
 
 static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -1716,7 +1778,8 @@
 		tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
 	}
 
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
 
 	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
@@ -1732,15 +1795,19 @@
 		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
 	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
 
-	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
 
-	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
 	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
 
 	if (dc->soc->supports_border_color)
 		tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
 
+	tegra_dc_stats_reset(&dc->stats);
+
 	return 0;
 
 cleanup:
@@ -1826,8 +1893,20 @@
 	.has_powergate = true,
 };
 
+static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
+	.supports_border_color = false,
+	.supports_interlacing = true,
+	.supports_cursor = true,
+	.supports_block_linear = true,
+	.pitch_align = 64,
+	.has_powergate = true,
+};
+
 static const struct of_device_id tegra_dc_of_match[] = {
 	{
+		.compatible = "nvidia,tegra210-dc",
+		.data = &tegra210_dc_soc_info,
+	}, {
 		.compatible = "nvidia,tegra124-dc",
 		.data = &tegra124_dc_soc_info,
 	}, {
@@ -1957,6 +2036,10 @@
 		return -ENXIO;
 	}
 
+	dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
+	if (!dc->syncpt)
+		dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
+
 	INIT_LIST_HEAD(&dc->client.list);
 	dc->client.ops = &dc_client_ops;
 	dc->client.dev = &pdev->dev;
@@ -1974,10 +2057,6 @@
 		return err;
 	}
 
-	dc->syncpt = host1x_syncpt_request(&pdev->dev, flags);
-	if (!dc->syncpt)
-		dev_warn(&pdev->dev, "failed to allocate syncpoint\n");
-
 	platform_set_drvdata(pdev, dc);
 
 	return 0;
@@ -2016,7 +2095,6 @@
 struct platform_driver tegra_dc_driver = {
 	.driver = {
 		.name = "tegra-dc",
-		.owner = THIS_MODULE,
 		.of_match_table = tegra_dc_of_match,
 	},
 	.probe = tegra_dc_probe,
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 55792da..4a26863 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -86,6 +86,11 @@
 #define DC_CMD_REG_ACT_CONTROL			0x043
 
 #define DC_COM_CRC_CONTROL			0x300
+#define  DC_COM_CRC_CONTROL_ALWAYS (1 << 3)
+#define  DC_COM_CRC_CONTROL_FULL_FRAME  (0 << 2)
+#define  DC_COM_CRC_CONTROL_ACTIVE_DATA (1 << 2)
+#define  DC_COM_CRC_CONTROL_WAIT (1 << 1)
+#define  DC_COM_CRC_CONTROL_ENABLE (1 << 0)
 #define DC_COM_CRC_CHECKSUM			0x301
 #define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
 #define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
@@ -114,15 +119,17 @@
 #define DC_COM_CRC_CHECKSUM_LATCHED		0x329
 
 #define DC_DISP_DISP_SIGNAL_OPTIONS0		0x400
-#define H_PULSE_0_ENABLE (1 <<  8)
-#define H_PULSE_1_ENABLE (1 << 10)
-#define H_PULSE_2_ENABLE (1 << 12)
+#define H_PULSE0_ENABLE (1 <<  8)
+#define H_PULSE1_ENABLE (1 << 10)
+#define H_PULSE2_ENABLE (1 << 12)
 
 #define DC_DISP_DISP_SIGNAL_OPTIONS1		0x401
 
 #define DC_DISP_DISP_WIN_OPTIONS		0x402
 #define HDMI_ENABLE	(1 << 30)
 #define DSI_ENABLE	(1 << 29)
+#define SOR1_TIMING_CYA	(1 << 27)
+#define SOR1_ENABLE	(1 << 26)
 #define SOR_ENABLE	(1 << 25)
 #define CURSOR_ENABLE	(1 << 16)
 
@@ -242,9 +249,20 @@
 #define BASE_COLOR_SIZE565     (6 << 0)
 #define BASE_COLOR_SIZE332     (7 << 0)
 #define BASE_COLOR_SIZE888     (8 << 0)
+#define DITHER_CONTROL_MASK    (3 << 8)
 #define DITHER_CONTROL_DISABLE (0 << 8)
 #define DITHER_CONTROL_ORDERED (2 << 8)
 #define DITHER_CONTROL_ERRDIFF (3 << 8)
+#define BASE_COLOR_SIZE_MASK   (0xf << 0)
+#define BASE_COLOR_SIZE_666    (0 << 0)
+#define BASE_COLOR_SIZE_111    (1 << 0)
+#define BASE_COLOR_SIZE_222    (2 << 0)
+#define BASE_COLOR_SIZE_333    (3 << 0)
+#define BASE_COLOR_SIZE_444    (4 << 0)
+#define BASE_COLOR_SIZE_555    (5 << 0)
+#define BASE_COLOR_SIZE_565    (6 << 0)
+#define BASE_COLOR_SIZE_332    (7 << 0)
+#define BASE_COLOR_SIZE_888    (8 << 0)
 
 #define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
 #define  SC1_H_QUALIFIER_NONE	(1 << 16)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 07b2697..224a7dc 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -294,26 +294,41 @@
 	}
 
 	dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
-	if (IS_ERR(dpaux->rst))
+	if (IS_ERR(dpaux->rst)) {
+		dev_err(&pdev->dev, "failed to get reset control: %ld\n",
+			PTR_ERR(dpaux->rst));
 		return PTR_ERR(dpaux->rst);
+	}
 
 	dpaux->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(dpaux->clk))
+	if (IS_ERR(dpaux->clk)) {
+		dev_err(&pdev->dev, "failed to get module clock: %ld\n",
+			PTR_ERR(dpaux->clk));
 		return PTR_ERR(dpaux->clk);
+	}
 
 	err = clk_prepare_enable(dpaux->clk);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable module clock: %d\n",
+			err);
 		return err;
+	}
 
 	reset_control_deassert(dpaux->rst);
 
 	dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
-	if (IS_ERR(dpaux->clk_parent))
+	if (IS_ERR(dpaux->clk_parent)) {
+		dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
+			PTR_ERR(dpaux->clk_parent));
 		return PTR_ERR(dpaux->clk_parent);
+	}
 
 	err = clk_prepare_enable(dpaux->clk_parent);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
+			err);
 		return err;
+	}
 
 	err = clk_set_rate(dpaux->clk_parent, 270000000);
 	if (err < 0) {
@@ -323,8 +338,11 @@
 	}
 
 	dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(dpaux->vdd))
+	if (IS_ERR(dpaux->vdd)) {
+		dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
+			PTR_ERR(dpaux->vdd));
 		return PTR_ERR(dpaux->vdd);
+	}
 
 	err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
 			       dev_name(dpaux->dev), dpaux);
@@ -334,6 +352,8 @@
 		return err;
 	}
 
+	disable_irq(dpaux->irq);
+
 	dpaux->aux.transfer = tegra_dpaux_transfer;
 	dpaux->aux.dev = &pdev->dev;
 
@@ -341,6 +361,24 @@
 	if (err < 0)
 		return err;
 
+	/*
+	 * Assume that by default the DPAUX/I2C pads will be used for HDMI,
+	 * so power them up and configure them in I2C mode.
+	 *
+	 * The DPAUX code paths reconfigure the pads in AUX mode, but there
+	 * is no possibility to perform the I2C mode configuration in the
+	 * HDMI path.
+	 */
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+	value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
+	value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+		DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+		DPAUX_HYBRID_PADCTL_MODE_I2C;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+
 	/* enable and clear all interrupts */
 	value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
 		DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -359,6 +397,12 @@
 static int tegra_dpaux_remove(struct platform_device *pdev)
 {
 	struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
+	u32 value;
+
+	/* make sure pads are powered down when not in use */
+	value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+	value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+	tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
 
 	drm_dp_aux_unregister(&dpaux->aux);
 
@@ -376,6 +420,7 @@
 }
 
 static const struct of_device_id tegra_dpaux_of_match[] = {
+	{ .compatible = "nvidia,tegra210-dpaux", },
 	{ .compatible = "nvidia,tegra124-dpaux", },
 	{ },
 };
@@ -425,8 +470,10 @@
 		enum drm_connector_status status;
 
 		status = tegra_dpaux_detect(dpaux);
-		if (status == connector_status_connected)
+		if (status == connector_status_connected) {
+			enable_irq(dpaux->irq);
 			return 0;
+		}
 
 		usleep_range(1000, 2000);
 	}
@@ -439,6 +486,8 @@
 	unsigned long timeout;
 	int err;
 
+	disable_irq(dpaux->irq);
+
 	err = regulator_disable(dpaux->vdd);
 	if (err < 0)
 		return err;
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 806e245..20783d9 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -57,6 +57,8 @@
 #define DPAUX_DP_AUX_CONFIG 0x45
 
 #define DPAUX_HYBRID_PADCTL 0x49
+#define DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV (1 << 15)
+#define DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV (1 << 14)
 #define DPAUX_HYBRID_PADCTL_AUX_CMH(x) (((x) & 0x3) << 12)
 #define DPAUX_HYBRID_PADCTL_AUX_DRVZ(x) (((x) & 0x7) << 8)
 #define DPAUX_HYBRID_PADCTL_AUX_DRVI(x) (((x) & 0x3f) << 2)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 427f50c..6d88cf1 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -171,8 +171,6 @@
 	if (err < 0)
 		goto fbdev;
 
-	drm_mode_config_reset(drm);
-
 	/*
 	 * We don't use the drm_irq_install() helpers provided by the DRM
 	 * core, so we need to set this manually in order to allow the
@@ -182,11 +180,14 @@
 
 	/* syncpoints are used for full 32-bit hardware VBLANK counters */
 	drm->max_vblank_count = 0xffffffff;
+	drm->vblank_disable_allowed = true;
 
 	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
 	if (err < 0)
 		goto device;
 
+	drm_mode_config_reset(drm);
+
 	err = tegra_drm_fb_init(drm);
 	if (err < 0)
 		goto vblank;
@@ -1037,9 +1038,8 @@
 }
 #endif
 
-static const struct dev_pm_ops host1x_drm_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(host1x_drm_suspend, host1x_drm_resume)
-};
+static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
+			 host1x_drm_resume);
 
 static const struct of_device_id host1x_drm_subdevs[] = {
 	{ .compatible = "nvidia,tegra20-dc", },
@@ -1056,6 +1056,12 @@
 	{ .compatible = "nvidia,tegra124-dc", },
 	{ .compatible = "nvidia,tegra124-sor", },
 	{ .compatible = "nvidia,tegra124-hdmi", },
+	{ .compatible = "nvidia,tegra124-dsi", },
+	{ .compatible = "nvidia,tegra132-dsi", },
+	{ .compatible = "nvidia,tegra210-dc", },
+	{ .compatible = "nvidia,tegra210-dsi", },
+	{ .compatible = "nvidia,tegra210-sor", },
+	{ .compatible = "nvidia,tegra210-sor1", },
 	{ /* sentinel */ }
 };
 
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 659b2fc..ec49275 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
 
 #include <uapi/drm/tegra_drm.h>
 #include <linux/host1x.h>
+#include <linux/of_gpio.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -104,6 +105,13 @@
 struct tegra_dc_soc_info;
 struct tegra_output;
 
+struct tegra_dc_stats {
+	unsigned long frames;
+	unsigned long vblank;
+	unsigned long underflow;
+	unsigned long overflow;
+};
+
 struct tegra_dc {
 	struct host1x_client client;
 	struct host1x_syncpt *syncpt;
@@ -121,6 +129,7 @@
 
 	struct tegra_output *rgb;
 
+	struct tegra_dc_stats stats;
 	struct list_head list;
 
 	struct drm_info_list *debugfs_files;
@@ -200,6 +209,7 @@
 	const struct edid *edid;
 	unsigned int hpd_irq;
 	int hpd_gpio;
+	enum of_gpio_flags hpd_gpio_flags;
 
 	struct drm_encoder encoder;
 	struct drm_connector connector;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index ed970f6..f0a138e 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -119,6 +119,16 @@
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_dsi *dsi = node->info_ent->data;
+	struct drm_crtc *crtc = dsi->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
+
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-32s %#05x %08x\n", #name, name,	\
@@ -208,7 +218,9 @@
 
 #undef DUMP_REG
 
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static struct drm_info_list debugfs_files[] = {
@@ -548,14 +560,19 @@
 
 		/* horizontal sync width */
 		hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
-		hsw -= 10;
 
 		/* horizontal back porch */
 		hbp = (mode->htotal - mode->hsync_end) * mul / div;
-		hbp -= 14;
+
+		if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+			hbp += hsw;
 
 		/* horizontal front porch */
 		hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+
+		/* subtract packet overhead */
+		hsw -= 10;
+		hbp -= 14;
 		hfp -= 8;
 
 		tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
@@ -726,10 +743,6 @@
 		tegra_dsi_soft_reset(dsi->slave);
 }
 
-static void tegra_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
 static void tegra_dsi_connector_reset(struct drm_connector *connector)
 {
 	struct tegra_dsi_state *state;
@@ -756,7 +769,7 @@
 }
 
 static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
-	.dpms = tegra_dsi_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = tegra_dsi_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -782,59 +795,6 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_dsi_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static void tegra_dsi_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_dsi_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_dsi_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	struct tegra_dsi *dsi = to_dsi(output);
-	struct tegra_dsi_state *state;
-	u32 value;
-
-	state = tegra_dsi_get_state(dsi);
-
-	tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
-
-	/*
-	 * The D-PHY timing fields are expressed in byte-clock cycles, so
-	 * multiply the period by 8.
-	 */
-	tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
-
-	if (output->panel)
-		drm_panel_prepare(output->panel);
-
-	tegra_dsi_configure(dsi, dc->pipe, mode);
-
-	/* enable display controller */
-	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-	value |= DSI_ENABLE;
-	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-	tegra_dc_commit(dc);
-
-	/* enable DSI controller */
-	tegra_dsi_enable(dsi);
-
-	if (output->panel)
-		drm_panel_enable(output->panel);
-
-	return;
-}
-
 static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
@@ -874,6 +834,46 @@
 	return;
 }
 
+static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_dsi *dsi = to_dsi(output);
+	struct tegra_dsi_state *state;
+	u32 value;
+
+	state = tegra_dsi_get_state(dsi);
+
+	tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
+
+	/*
+	 * The D-PHY timing fields are expressed in byte-clock cycles, so
+	 * multiply the period by 8.
+	 */
+	tegra_dsi_set_phy_timing(dsi, state->period * 8, &state->timing);
+
+	if (output->panel)
+		drm_panel_prepare(output->panel);
+
+	tegra_dsi_configure(dsi, dc->pipe, mode);
+
+	/* enable display controller */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= DSI_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	/* enable DSI controller */
+	tegra_dsi_enable(dsi);
+
+	if (output->panel)
+		drm_panel_enable(output->panel);
+
+	return;
+}
+
 static int
 tegra_dsi_encoder_atomic_check(struct drm_encoder *encoder,
 			       struct drm_crtc_state *crtc_state,
@@ -956,11 +956,8 @@
 }
 
 static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
-	.dpms = tegra_dsi_encoder_dpms,
-	.prepare = tegra_dsi_encoder_prepare,
-	.commit = tegra_dsi_encoder_commit,
-	.mode_set = tegra_dsi_encoder_mode_set,
 	.disable = tegra_dsi_encoder_disable,
+	.enable = tegra_dsi_encoder_enable,
 	.atomic_check = tegra_dsi_encoder_atomic_check,
 };
 
@@ -992,6 +989,10 @@
 		DSI_PAD_OUT_CLK(0x0);
 	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
 
+	value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+		DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+	tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
 	return tegra_mipi_calibrate(dsi->mipi);
 }
 
@@ -1621,6 +1622,9 @@
 }
 
 static const struct of_device_id tegra_dsi_of_match[] = {
+	{ .compatible = "nvidia,tegra210-dsi", },
+	{ .compatible = "nvidia,tegra132-dsi", },
+	{ .compatible = "nvidia,tegra124-dsi", },
 	{ .compatible = "nvidia,tegra114-dsi", },
 	{ },
 };
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index bad1006..2192636 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -113,6 +113,10 @@
 #define DSI_PAD_SLEW_DN(x)		(((x) & 0x7) << 12)
 #define DSI_PAD_SLEW_UP(x)		(((x) & 0x7) << 16)
 #define DSI_PAD_CONTROL_3		0x51
+#define  DSI_PAD_PREEMP_PD_CLK(x)	(((x) & 0x3) << 12)
+#define  DSI_PAD_PREEMP_PU_CLK(x)	(((x) & 0x3) << 8)
+#define  DSI_PAD_PREEMP_PD(x)		(((x) & 0x3) << 4)
+#define  DSI_PAD_PREEMP_PU(x)		(((x) & 0x3) << 0)
 #define DSI_PAD_CONTROL_4		0x52
 #define DSI_GANGED_MODE_CONTROL		0x53
 #define DSI_GANGED_MODE_CONTROL_ENABLE	(1 << 0)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 397fb34..07c844b 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -184,9 +184,9 @@
 #ifdef CONFIG_DRM_TEGRA_FBDEV
 static struct fb_ops tegra_fb_ops = {
 	.owner = THIS_MODULE,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	.fb_fillrect = drm_fb_helper_sys_fillrect,
+	.fb_copyarea = drm_fb_helper_sys_copyarea,
+	.fb_imageblit = drm_fb_helper_sys_imageblit,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_blank = drm_fb_helper_blank,
@@ -224,11 +224,11 @@
 	if (IS_ERR(bo))
 		return PTR_ERR(bo);
 
-	info = framebuffer_alloc(0, drm->dev);
-	if (!info) {
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
 		dev_err(drm->dev, "failed to allocate framebuffer info\n");
 		drm_gem_object_unreference_unlocked(&bo->gem);
-		return -ENOMEM;
+		return PTR_ERR(info);
 	}
 
 	fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
@@ -248,12 +248,6 @@
 	info->flags = FBINFO_FLAG_DEFAULT;
 	info->fbops = &tegra_fb_ops;
 
-	err = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (err < 0) {
-		dev_err(drm->dev, "failed to allocate color map: %d\n", err);
-		goto destroy;
-	}
-
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
 
@@ -282,7 +276,7 @@
 	drm_framebuffer_unregister_private(fb);
 	tegra_fb_destroy(fb);
 release:
-	framebuffer_release(info);
+	drm_fb_helper_release_fbi(helper);
 	return err;
 }
 
@@ -347,20 +341,9 @@
 
 static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
 {
-	struct fb_info *info = fbdev->base.fbdev;
 
-	if (info) {
-		int err;
-
-		err = unregister_framebuffer(info);
-		if (err < 0)
-			DRM_DEBUG_KMS("failed to unregister framebuffer\n");
-
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&fbdev->base);
+	drm_fb_helper_release_fbi(&fbdev->base);
 
 	if (fbdev->fb) {
 		drm_framebuffer_unregister_private(&fbdev->fb->base);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 06ab178..52b32cb 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -772,13 +772,8 @@
 	return drm_detect_hdmi_monitor(edid);
 }
 
-static void tegra_hdmi_connector_dpms(struct drm_connector *connector,
-				      int mode)
-{
-}
-
 static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
-	.dpms = tegra_hdmi_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -818,22 +813,27 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_hdmi_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	u32 value;
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~HDMI_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_commit(dc);
+	}
 }
 
-static void tegra_hdmi_encoder_prepare(struct drm_encoder *encoder)
+static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 {
-}
-
-static void tegra_hdmi_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder,
-					struct drm_display_mode *mode,
-					struct drm_display_mode *adjusted)
-{
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
@@ -872,13 +872,13 @@
 
 	tegra_dc_writel(dc, VSYNC_H_POSITION(1),
 			DC_DISP_DISP_TIMING_OPTIONS);
-	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE_888,
 			DC_DISP_DISP_COLOR_CONTROL);
 
 	/* video_preamble uses h_pulse2 */
 	pulse_start = 1 + h_sync_width + h_back_porch - 10;
 
-	tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	tegra_dc_writel(dc, H_PULSE2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
 
 	value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
 		PULSE_LAST_END_A;
@@ -1035,24 +1035,6 @@
 	/* TODO: add HDCP support */
 }
 
-static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	u32 value;
-
-	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
-	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~HDMI_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-		tegra_dc_commit(dc);
-	}
-}
-
 static int
 tegra_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
 				struct drm_crtc_state *crtc_state,
@@ -1075,11 +1057,8 @@
 }
 
 static const struct drm_encoder_helper_funcs tegra_hdmi_encoder_helper_funcs = {
-	.dpms = tegra_hdmi_encoder_dpms,
-	.prepare = tegra_hdmi_encoder_prepare,
-	.commit = tegra_hdmi_encoder_commit,
-	.mode_set = tegra_hdmi_encoder_mode_set,
 	.disable = tegra_hdmi_encoder_disable,
+	.enable = tegra_hdmi_encoder_enable,
 	.atomic_check = tegra_hdmi_encoder_atomic_check,
 };
 
@@ -1087,11 +1066,16 @@
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_hdmi *hdmi = node->info_ent->data;
-	int err;
+	struct drm_crtc *crtc = hdmi->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
 
-	err = clk_prepare_enable(hdmi->clk);
-	if (err)
-		return err;
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-56s %#05x %08x\n", #name, name,	\
@@ -1258,9 +1242,9 @@
 
 #undef DUMP_REG
 
-	clk_disable_unprepare(hdmi->clk);
-
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static struct drm_info_list debugfs_files[] = {
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 37db479..46664b6 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -7,8 +7,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/of_gpio.h>
-
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_panel.h>
 #include "drm.h"
@@ -59,10 +57,17 @@
 	enum drm_connector_status status = connector_status_unknown;
 
 	if (gpio_is_valid(output->hpd_gpio)) {
-		if (gpio_get_value(output->hpd_gpio) == 0)
-			status = connector_status_disconnected;
-		else
-			status = connector_status_connected;
+		if (output->hpd_gpio_flags & OF_GPIO_ACTIVE_LOW) {
+			if (gpio_get_value(output->hpd_gpio) != 0)
+				status = connector_status_disconnected;
+			else
+				status = connector_status_connected;
+		} else {
+			if (gpio_get_value(output->hpd_gpio) == 0)
+				status = connector_status_disconnected;
+			else
+				status = connector_status_connected;
+		}
 	} else {
 		if (!output->panel)
 			status = connector_status_disconnected;
@@ -97,7 +102,6 @@
 int tegra_output_probe(struct tegra_output *output)
 {
 	struct device_node *ddc, *panel;
-	enum of_gpio_flags flags;
 	int err, size;
 
 	if (!output->of_node)
@@ -128,7 +132,7 @@
 
 	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
 						   "nvidia,hpd-gpio", 0,
-						   &flags);
+						   &output->hpd_gpio_flags);
 	if (gpio_is_valid(output->hpd_gpio)) {
 		unsigned long flags;
 
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 7cd833f..bc9735b 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -18,7 +18,6 @@
 struct tegra_rgb {
 	struct tegra_output output;
 	struct tegra_dc *dc;
-	bool enabled;
 
 	struct clk *clk_parent;
 	struct clk *clk;
@@ -88,13 +87,8 @@
 		tegra_dc_writel(dc, table[i].value, table[i].offset);
 }
 
-static void tegra_rgb_connector_dpms(struct drm_connector *connector,
-				     int mode)
-{
-}
-
 static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
-	.dpms = tegra_rgb_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_output_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -125,21 +119,22 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_rgb_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
 {
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_rgb *rgb = to_rgb(output);
+
+	if (output->panel)
+		drm_panel_disable(output->panel);
+
+	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+	tegra_dc_commit(rgb->dc);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
 }
 
-static void tegra_rgb_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_rgb_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void tegra_rgb_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
 {
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_rgb *rgb = to_rgb(output);
@@ -174,21 +169,6 @@
 		drm_panel_enable(output->panel);
 }
 
-static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_rgb *rgb = to_rgb(output);
-
-	if (output->panel)
-		drm_panel_disable(output->panel);
-
-	tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
-	tegra_dc_commit(rgb->dc);
-
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
-}
-
 static int
 tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder,
 			       struct drm_crtc_state *crtc_state,
@@ -231,11 +211,8 @@
 }
 
 static const struct drm_encoder_helper_funcs tegra_rgb_encoder_helper_funcs = {
-	.dpms = tegra_rgb_encoder_dpms,
-	.prepare = tegra_rgb_encoder_prepare,
-	.commit = tegra_rgb_encoder_commit,
-	.mode_set = tegra_rgb_encoder_mode_set,
 	.disable = tegra_rgb_encoder_disable,
+	.enable = tegra_rgb_encoder_enable,
 	.atomic_check = tegra_rgb_encoder_atomic_check,
 };
 
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7591d89..da1715e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -10,7 +10,9 @@
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
 #include <soc/tegra/pmc.h>
@@ -23,11 +25,146 @@
 #include "drm.h"
 #include "sor.h"
 
+#define SOR_REKEY 0x38
+
+struct tegra_sor_hdmi_settings {
+	unsigned long frequency;
+
+	u8 vcocap;
+	u8 ichpmp;
+	u8 loadadj;
+	u8 termadj;
+	u8 tx_pu;
+	u8 bg_vref;
+
+	u8 drive_current[4];
+	u8 preemphasis[4];
+};
+
+#if 1
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+	{
+		.frequency = 54000000,
+		.vcocap = 0x0,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x10,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 75000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x40,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 150000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3a, 0x3a, 0x3a },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 300000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0xa,
+		.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+		.preemphasis = { 0x00, 0x17, 0x17, 0x17 },
+	}, {
+		.frequency = 600000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x33, 0x3f, 0x3f, 0x3f },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	},
+};
+#else
+static const struct tegra_sor_hdmi_settings tegra210_sor_hdmi_defaults[] = {
+	{
+		.frequency = 75000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x40,
+		.bg_vref = 0x8,
+		.drive_current = { 0x29, 0x29, 0x29, 0x29 },
+		.preemphasis = { 0x00, 0x00, 0x00, 0x00 },
+	}, {
+		.frequency = 150000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x1,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0x8,
+		.drive_current = { 0x30, 0x37, 0x37, 0x37 },
+		.preemphasis = { 0x01, 0x02, 0x02, 0x02 },
+	}, {
+		.frequency = 300000000,
+		.vcocap = 0x3,
+		.ichpmp = 0x6,
+		.loadadj = 0x3,
+		.termadj = 0x9,
+		.tx_pu = 0x66,
+		.bg_vref = 0xf,
+		.drive_current = { 0x30, 0x37, 0x37, 0x37 },
+		.preemphasis = { 0x10, 0x3e, 0x3e, 0x3e },
+	}, {
+		.frequency = 600000000,
+		.vcocap = 0x3,
+		.ichpmp = 0xa,
+		.loadadj = 0x3,
+		.termadj = 0xb,
+		.tx_pu = 0x66,
+		.bg_vref = 0xe,
+		.drive_current = { 0x35, 0x3e, 0x3e, 0x3e },
+		.preemphasis = { 0x02, 0x3f, 0x3f, 0x3f },
+	},
+};
+#endif
+
+struct tegra_sor_soc {
+	bool supports_edp;
+	bool supports_lvds;
+	bool supports_hdmi;
+	bool supports_dp;
+
+	const struct tegra_sor_hdmi_settings *settings;
+	unsigned int num_settings;
+};
+
+struct tegra_sor;
+
+struct tegra_sor_ops {
+	const char *name;
+	int (*probe)(struct tegra_sor *sor);
+	int (*remove)(struct tegra_sor *sor);
+};
+
 struct tegra_sor {
 	struct host1x_client client;
 	struct tegra_output output;
 	struct device *dev;
 
+	const struct tegra_sor_soc *soc;
 	void __iomem *regs;
 
 	struct reset_control *rst;
@@ -38,12 +175,19 @@
 
 	struct tegra_dpaux *dpaux;
 
-	struct mutex lock;
-	bool enabled;
-
 	struct drm_info_list *debugfs_files;
 	struct drm_minor *minor;
 	struct dentry *debugfs;
+
+	const struct tegra_sor_ops *ops;
+
+	/* for HDMI 2.0 */
+	struct tegra_sor_hdmi_settings *settings;
+	unsigned int num_settings;
+
+	struct regulator *avdd_io_supply;
+	struct regulator *vdd_pll_supply;
+	struct regulator *hdmi_supply;
 };
 
 struct tegra_sor_config {
@@ -94,40 +238,40 @@
 		SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
 		SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
 		SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
-	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT_0);
+	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
 
 	value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
 		SOR_LANE_PREEMPHASIS_LANE0(0x0f);
-	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS_0);
+	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
 
-	value = SOR_LANE_POST_CURSOR_LANE3(0x00) |
-		SOR_LANE_POST_CURSOR_LANE2(0x00) |
-		SOR_LANE_POST_CURSOR_LANE1(0x00) |
-		SOR_LANE_POST_CURSOR_LANE0(0x00);
-	tegra_sor_writel(sor, value, SOR_LANE_POST_CURSOR_0);
+	value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
+		SOR_LANE_POSTCURSOR_LANE2(0x00) |
+		SOR_LANE_POSTCURSOR_LANE1(0x00) |
+		SOR_LANE_POSTCURSOR_LANE0(0x00);
+	tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
 
 	/* disable LVDS mode */
 	tegra_sor_writel(sor, 0, SOR_LVDS);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_TX_PU_ENABLE;
 	value &= ~SOR_DP_PADCTL_TX_PU_MASK;
 	value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
 		 SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	usleep_range(10, 100);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
 		   SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	err = tegra_dpaux_prepare(sor->dpaux, DP_SET_ANSI_8B10B);
 	if (err < 0)
@@ -148,11 +292,11 @@
 	if (err < 0)
 		return err;
 
-	value = tegra_sor_readl(sor, SOR_DP_SPARE_0);
+	value = tegra_sor_readl(sor, SOR_DP_SPARE0);
 	value |= SOR_DP_SPARE_SEQ_ENABLE;
 	value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
 	value |= SOR_DP_SPARE_MACRO_SOR_CLK;
-	tegra_sor_writel(sor, value, SOR_DP_SPARE_0);
+	tegra_sor_writel(sor, value, SOR_DP_SPARE0);
 
 	for (i = 0, value = 0; i < link->num_lanes; i++) {
 		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -187,18 +331,59 @@
 	return 0;
 }
 
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+	u32 mask = 0x08, adj = 0, value;
+
+	/* enable pad calibration logic */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value |= SOR_PLL1_TMDS_TERM;
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	while (mask) {
+		adj |= mask;
+
+		value = tegra_sor_readl(sor, SOR_PLL1);
+		value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+		value |= SOR_PLL1_TMDS_TERMADJ(adj);
+		tegra_sor_writel(sor, value, SOR_PLL1);
+
+		usleep_range(100, 200);
+
+		value = tegra_sor_readl(sor, SOR_PLL1);
+		if (value & SOR_PLL1_TERM_COMPOUT)
+			adj &= ~mask;
+
+		mask >>= 1;
+	}
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+	value |= SOR_PLL1_TMDS_TERMADJ(adj);
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	/* disable pad calibration logic */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+}
+
 static void tegra_sor_super_update(struct tegra_sor *sor)
 {
-	tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
-	tegra_sor_writel(sor, 1, SOR_SUPER_STATE_0);
-	tegra_sor_writel(sor, 0, SOR_SUPER_STATE_0);
+	tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
+	tegra_sor_writel(sor, 1, SOR_SUPER_STATE0);
+	tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
 }
 
 static void tegra_sor_update(struct tegra_sor *sor)
 {
-	tegra_sor_writel(sor, 0, SOR_STATE_0);
-	tegra_sor_writel(sor, 1, SOR_STATE_0);
-	tegra_sor_writel(sor, 0, SOR_STATE_0);
+	tegra_sor_writel(sor, 0, SOR_STATE0);
+	tegra_sor_writel(sor, 1, SOR_STATE0);
+	tegra_sor_writel(sor, 0, SOR_STATE0);
 }
 
 static int tegra_sor_setup_pwm(struct tegra_sor *sor, unsigned long timeout)
@@ -235,16 +420,16 @@
 	unsigned long value, timeout;
 
 	/* wake up in normal mode */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value |= SOR_SUPER_STATE_HEAD_MODE_AWAKE;
 	value |= SOR_SUPER_STATE_MODE_NORMAL;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	/* attach */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value |= SOR_SUPER_STATE_ATTACHED;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -385,7 +570,7 @@
 }
 
 static int tegra_sor_calc_config(struct tegra_sor *sor,
-				 struct drm_display_mode *mode,
+				 const struct drm_display_mode *mode,
 				 struct tegra_sor_config *config,
 				 struct drm_dp_link *link)
 {
@@ -481,9 +666,9 @@
 	unsigned long value, timeout;
 
 	/* switch to safe mode */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_MODE_NORMAL;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -498,15 +683,15 @@
 		return -ETIMEDOUT;
 
 	/* go to sleep */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_HEAD_MODE_MASK;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	/* detach */
-	value = tegra_sor_readl(sor, SOR_SUPER_STATE_1);
+	value = tegra_sor_readl(sor, SOR_SUPER_STATE1);
 	value &= ~SOR_SUPER_STATE_ATTACHED;
-	tegra_sor_writel(sor, value, SOR_SUPER_STATE_1);
+	tegra_sor_writel(sor, value, SOR_SUPER_STATE1);
 	tegra_sor_super_update(sor);
 
 	timeout = jiffies + msecs_to_jiffies(250);
@@ -552,10 +737,10 @@
 	if (err < 0)
 		dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
 		   SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	/* stop lane sequencer */
 	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
@@ -575,39 +760,26 @@
 	if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
 		return -ETIMEDOUT;
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value |= SOR_PLL_0_POWER_OFF;
-	value |= SOR_PLL_0_VCOPD;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD;
-	value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD;
+	value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
 	return 0;
 }
 
-static int tegra_sor_crc_open(struct inode *inode, struct file *file)
-{
-	file->private_data = inode->i_private;
-
-	return 0;
-}
-
-static int tegra_sor_crc_release(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
 static int tegra_sor_crc_wait(struct tegra_sor *sor, unsigned long timeout)
 {
 	u32 value;
@@ -615,8 +787,8 @@
 	timeout = jiffies + msecs_to_jiffies(timeout);
 
 	while (time_before(jiffies, timeout)) {
-		value = tegra_sor_readl(sor, SOR_CRC_A);
-		if (value & SOR_CRC_A_VALID)
+		value = tegra_sor_readl(sor, SOR_CRCA);
+		if (value & SOR_CRCA_VALID)
 			return 0;
 
 		usleep_range(100, 200);
@@ -625,24 +797,25 @@
 	return -ETIMEDOUT;
 }
 
-static ssize_t tegra_sor_crc_read(struct file *file, char __user *buffer,
-				  size_t size, loff_t *ppos)
+static int tegra_sor_show_crc(struct seq_file *s, void *data)
 {
-	struct tegra_sor *sor = file->private_data;
-	ssize_t num, err;
-	char buf[10];
+	struct drm_info_node *node = s->private;
+	struct tegra_sor *sor = node->info_ent->data;
+	struct drm_crtc *crtc = sor->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
 	u32 value;
 
-	mutex_lock(&sor->lock);
+	drm_modeset_lock_all(drm);
 
-	if (!sor->enabled) {
-		err = -EAGAIN;
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
 		goto unlock;
 	}
 
-	value = tegra_sor_readl(sor, SOR_STATE_1);
+	value = tegra_sor_readl(sor, SOR_STATE1);
 	value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
-	tegra_sor_writel(sor, value, SOR_STATE_1);
+	tegra_sor_writel(sor, value, SOR_STATE1);
 
 	value = tegra_sor_readl(sor, SOR_CRC_CNTRL);
 	value |= SOR_CRC_CNTRL_ENABLE;
@@ -656,65 +829,66 @@
 	if (err < 0)
 		goto unlock;
 
-	tegra_sor_writel(sor, SOR_CRC_A_RESET, SOR_CRC_A);
-	value = tegra_sor_readl(sor, SOR_CRC_B);
+	tegra_sor_writel(sor, SOR_CRCA_RESET, SOR_CRCA);
+	value = tegra_sor_readl(sor, SOR_CRCB);
 
-	num = scnprintf(buf, sizeof(buf), "%08x\n", value);
-
-	err = simple_read_from_buffer(buffer, size, ppos, buf, num);
+	seq_printf(s, "%08x\n", value);
 
 unlock:
-	mutex_unlock(&sor->lock);
+	drm_modeset_unlock_all(drm);
 	return err;
 }
 
-static const struct file_operations tegra_sor_crc_fops = {
-	.owner = THIS_MODULE,
-	.open = tegra_sor_crc_open,
-	.read = tegra_sor_crc_read,
-	.release = tegra_sor_crc_release,
-};
-
 static int tegra_sor_show_regs(struct seq_file *s, void *data)
 {
 	struct drm_info_node *node = s->private;
 	struct tegra_sor *sor = node->info_ent->data;
+	struct drm_crtc *crtc = sor->output.encoder.crtc;
+	struct drm_device *drm = node->minor->dev;
+	int err = 0;
+
+	drm_modeset_lock_all(drm);
+
+	if (!crtc || !crtc->state->active) {
+		err = -EBUSY;
+		goto unlock;
+	}
 
 #define DUMP_REG(name)						\
 	seq_printf(s, "%-38s %#05x %08x\n", #name, name,	\
 		   tegra_sor_readl(sor, name))
 
 	DUMP_REG(SOR_CTXSW);
-	DUMP_REG(SOR_SUPER_STATE_0);
-	DUMP_REG(SOR_SUPER_STATE_1);
-	DUMP_REG(SOR_STATE_0);
-	DUMP_REG(SOR_STATE_1);
-	DUMP_REG(SOR_HEAD_STATE_0(0));
-	DUMP_REG(SOR_HEAD_STATE_0(1));
-	DUMP_REG(SOR_HEAD_STATE_1(0));
-	DUMP_REG(SOR_HEAD_STATE_1(1));
-	DUMP_REG(SOR_HEAD_STATE_2(0));
-	DUMP_REG(SOR_HEAD_STATE_2(1));
-	DUMP_REG(SOR_HEAD_STATE_3(0));
-	DUMP_REG(SOR_HEAD_STATE_3(1));
-	DUMP_REG(SOR_HEAD_STATE_4(0));
-	DUMP_REG(SOR_HEAD_STATE_4(1));
-	DUMP_REG(SOR_HEAD_STATE_5(0));
-	DUMP_REG(SOR_HEAD_STATE_5(1));
+	DUMP_REG(SOR_SUPER_STATE0);
+	DUMP_REG(SOR_SUPER_STATE1);
+	DUMP_REG(SOR_STATE0);
+	DUMP_REG(SOR_STATE1);
+	DUMP_REG(SOR_HEAD_STATE0(0));
+	DUMP_REG(SOR_HEAD_STATE0(1));
+	DUMP_REG(SOR_HEAD_STATE1(0));
+	DUMP_REG(SOR_HEAD_STATE1(1));
+	DUMP_REG(SOR_HEAD_STATE2(0));
+	DUMP_REG(SOR_HEAD_STATE2(1));
+	DUMP_REG(SOR_HEAD_STATE3(0));
+	DUMP_REG(SOR_HEAD_STATE3(1));
+	DUMP_REG(SOR_HEAD_STATE4(0));
+	DUMP_REG(SOR_HEAD_STATE4(1));
+	DUMP_REG(SOR_HEAD_STATE5(0));
+	DUMP_REG(SOR_HEAD_STATE5(1));
 	DUMP_REG(SOR_CRC_CNTRL);
 	DUMP_REG(SOR_DP_DEBUG_MVID);
 	DUMP_REG(SOR_CLK_CNTRL);
 	DUMP_REG(SOR_CAP);
 	DUMP_REG(SOR_PWR);
 	DUMP_REG(SOR_TEST);
-	DUMP_REG(SOR_PLL_0);
-	DUMP_REG(SOR_PLL_1);
-	DUMP_REG(SOR_PLL_2);
-	DUMP_REG(SOR_PLL_3);
+	DUMP_REG(SOR_PLL0);
+	DUMP_REG(SOR_PLL1);
+	DUMP_REG(SOR_PLL2);
+	DUMP_REG(SOR_PLL3);
 	DUMP_REG(SOR_CSTM);
 	DUMP_REG(SOR_LVDS);
-	DUMP_REG(SOR_CRC_A);
-	DUMP_REG(SOR_CRC_B);
+	DUMP_REG(SOR_CRCA);
+	DUMP_REG(SOR_CRCB);
 	DUMP_REG(SOR_BLANK);
 	DUMP_REG(SOR_SEQ_CTL);
 	DUMP_REG(SOR_LANE_SEQ_CTL);
@@ -736,86 +910,89 @@
 	DUMP_REG(SOR_SEQ_INST(15));
 	DUMP_REG(SOR_PWM_DIV);
 	DUMP_REG(SOR_PWM_CTL);
-	DUMP_REG(SOR_VCRC_A_0);
-	DUMP_REG(SOR_VCRC_A_1);
-	DUMP_REG(SOR_VCRC_B_0);
-	DUMP_REG(SOR_VCRC_B_1);
-	DUMP_REG(SOR_CCRC_A_0);
-	DUMP_REG(SOR_CCRC_A_1);
-	DUMP_REG(SOR_CCRC_B_0);
-	DUMP_REG(SOR_CCRC_B_1);
-	DUMP_REG(SOR_EDATA_A_0);
-	DUMP_REG(SOR_EDATA_A_1);
-	DUMP_REG(SOR_EDATA_B_0);
-	DUMP_REG(SOR_EDATA_B_1);
-	DUMP_REG(SOR_COUNT_A_0);
-	DUMP_REG(SOR_COUNT_A_1);
-	DUMP_REG(SOR_COUNT_B_0);
-	DUMP_REG(SOR_COUNT_B_1);
-	DUMP_REG(SOR_DEBUG_A_0);
-	DUMP_REG(SOR_DEBUG_A_1);
-	DUMP_REG(SOR_DEBUG_B_0);
-	DUMP_REG(SOR_DEBUG_B_1);
+	DUMP_REG(SOR_VCRC_A0);
+	DUMP_REG(SOR_VCRC_A1);
+	DUMP_REG(SOR_VCRC_B0);
+	DUMP_REG(SOR_VCRC_B1);
+	DUMP_REG(SOR_CCRC_A0);
+	DUMP_REG(SOR_CCRC_A1);
+	DUMP_REG(SOR_CCRC_B0);
+	DUMP_REG(SOR_CCRC_B1);
+	DUMP_REG(SOR_EDATA_A0);
+	DUMP_REG(SOR_EDATA_A1);
+	DUMP_REG(SOR_EDATA_B0);
+	DUMP_REG(SOR_EDATA_B1);
+	DUMP_REG(SOR_COUNT_A0);
+	DUMP_REG(SOR_COUNT_A1);
+	DUMP_REG(SOR_COUNT_B0);
+	DUMP_REG(SOR_COUNT_B1);
+	DUMP_REG(SOR_DEBUG_A0);
+	DUMP_REG(SOR_DEBUG_A1);
+	DUMP_REG(SOR_DEBUG_B0);
+	DUMP_REG(SOR_DEBUG_B1);
 	DUMP_REG(SOR_TRIG);
 	DUMP_REG(SOR_MSCHECK);
 	DUMP_REG(SOR_XBAR_CTRL);
 	DUMP_REG(SOR_XBAR_POL);
-	DUMP_REG(SOR_DP_LINKCTL_0);
-	DUMP_REG(SOR_DP_LINKCTL_1);
-	DUMP_REG(SOR_LANE_DRIVE_CURRENT_0);
-	DUMP_REG(SOR_LANE_DRIVE_CURRENT_1);
-	DUMP_REG(SOR_LANE4_DRIVE_CURRENT_0);
-	DUMP_REG(SOR_LANE4_DRIVE_CURRENT_1);
-	DUMP_REG(SOR_LANE_PREEMPHASIS_0);
-	DUMP_REG(SOR_LANE_PREEMPHASIS_1);
-	DUMP_REG(SOR_LANE4_PREEMPHASIS_0);
-	DUMP_REG(SOR_LANE4_PREEMPHASIS_1);
-	DUMP_REG(SOR_LANE_POST_CURSOR_0);
-	DUMP_REG(SOR_LANE_POST_CURSOR_1);
-	DUMP_REG(SOR_DP_CONFIG_0);
-	DUMP_REG(SOR_DP_CONFIG_1);
-	DUMP_REG(SOR_DP_MN_0);
-	DUMP_REG(SOR_DP_MN_1);
-	DUMP_REG(SOR_DP_PADCTL_0);
-	DUMP_REG(SOR_DP_PADCTL_1);
-	DUMP_REG(SOR_DP_DEBUG_0);
-	DUMP_REG(SOR_DP_DEBUG_1);
-	DUMP_REG(SOR_DP_SPARE_0);
-	DUMP_REG(SOR_DP_SPARE_1);
+	DUMP_REG(SOR_DP_LINKCTL0);
+	DUMP_REG(SOR_DP_LINKCTL1);
+	DUMP_REG(SOR_LANE_DRIVE_CURRENT0);
+	DUMP_REG(SOR_LANE_DRIVE_CURRENT1);
+	DUMP_REG(SOR_LANE4_DRIVE_CURRENT0);
+	DUMP_REG(SOR_LANE4_DRIVE_CURRENT1);
+	DUMP_REG(SOR_LANE_PREEMPHASIS0);
+	DUMP_REG(SOR_LANE_PREEMPHASIS1);
+	DUMP_REG(SOR_LANE4_PREEMPHASIS0);
+	DUMP_REG(SOR_LANE4_PREEMPHASIS1);
+	DUMP_REG(SOR_LANE_POSTCURSOR0);
+	DUMP_REG(SOR_LANE_POSTCURSOR1);
+	DUMP_REG(SOR_DP_CONFIG0);
+	DUMP_REG(SOR_DP_CONFIG1);
+	DUMP_REG(SOR_DP_MN0);
+	DUMP_REG(SOR_DP_MN1);
+	DUMP_REG(SOR_DP_PADCTL0);
+	DUMP_REG(SOR_DP_PADCTL1);
+	DUMP_REG(SOR_DP_DEBUG0);
+	DUMP_REG(SOR_DP_DEBUG1);
+	DUMP_REG(SOR_DP_SPARE0);
+	DUMP_REG(SOR_DP_SPARE1);
 	DUMP_REG(SOR_DP_AUDIO_CTRL);
 	DUMP_REG(SOR_DP_AUDIO_HBLANK_SYMBOLS);
 	DUMP_REG(SOR_DP_AUDIO_VBLANK_SYMBOLS);
 	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_HEADER);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_0);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_1);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_2);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_3);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_4);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_5);
-	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK_6);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK0);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK1);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK2);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK3);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK4);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK5);
+	DUMP_REG(SOR_DP_GENERIC_INFOFRAME_SUBPACK6);
 	DUMP_REG(SOR_DP_TPG);
 	DUMP_REG(SOR_DP_TPG_CONFIG);
-	DUMP_REG(SOR_DP_LQ_CSTM_0);
-	DUMP_REG(SOR_DP_LQ_CSTM_1);
-	DUMP_REG(SOR_DP_LQ_CSTM_2);
+	DUMP_REG(SOR_DP_LQ_CSTM0);
+	DUMP_REG(SOR_DP_LQ_CSTM1);
+	DUMP_REG(SOR_DP_LQ_CSTM2);
 
 #undef DUMP_REG
 
-	return 0;
+unlock:
+	drm_modeset_unlock_all(drm);
+	return err;
 }
 
 static const struct drm_info_list debugfs_files[] = {
+	{ "crc", tegra_sor_show_crc, 0, NULL },
 	{ "regs", tegra_sor_show_regs, 0, NULL },
 };
 
 static int tegra_sor_debugfs_init(struct tegra_sor *sor,
 				  struct drm_minor *minor)
 {
-	struct dentry *entry;
+	const char *name = sor->soc->supports_dp ? "sor1" : "sor";
 	unsigned int i;
-	int err = 0;
+	int err;
 
-	sor->debugfs = debugfs_create_dir("sor", minor->debugfs_root);
+	sor->debugfs = debugfs_create_dir(name, minor->debugfs_root);
 	if (!sor->debugfs)
 		return -ENOMEM;
 
@@ -835,14 +1012,9 @@
 	if (err < 0)
 		goto free;
 
-	entry = debugfs_create_file("crc", 0644, sor->debugfs, sor,
-				    &tegra_sor_crc_fops);
-	if (!entry) {
-		err = -ENOMEM;
-		goto free;
-	}
+	sor->minor = minor;
 
-	return err;
+	return 0;
 
 free:
 	kfree(sor->debugfs_files);
@@ -860,14 +1032,10 @@
 	sor->minor = NULL;
 
 	kfree(sor->debugfs_files);
-	sor->debugfs = NULL;
+	sor->debugfs_files = NULL;
 
 	debugfs_remove_recursive(sor->debugfs);
-	sor->debugfs_files = NULL;
-}
-
-static void tegra_sor_connector_dpms(struct drm_connector *connector, int mode)
-{
+	sor->debugfs = NULL;
 }
 
 static enum drm_connector_status
@@ -879,11 +1047,11 @@
 	if (sor->dpaux)
 		return tegra_dpaux_detect(sor->dpaux);
 
-	return connector_status_unknown;
+	return tegra_output_connector_detect(connector, force);
 }
 
 static const struct drm_connector_funcs tegra_sor_connector_funcs = {
-	.dpms = tegra_sor_connector_dpms,
+	.dpms = drm_atomic_helper_connector_dpms,
 	.reset = drm_atomic_helper_connector_reset,
 	.detect = tegra_sor_connector_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
@@ -926,22 +1094,102 @@
 	.destroy = tegra_output_encoder_destroy,
 };
 
-static void tegra_sor_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tegra_sor_edp_disable(struct drm_encoder *encoder)
 {
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_sor *sor = to_sor(output);
+	u32 value;
+	int err;
+
+	if (output->panel)
+		drm_panel_disable(output->panel);
+
+	err = tegra_sor_detach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+	tegra_sor_writel(sor, 0, SOR_STATE1);
+	tegra_sor_update(sor);
+
+	/*
+	 * The following accesses registers of the display controller, so make
+	 * sure it's only executed when the output is attached to one.
+	 */
+	if (dc) {
+		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+		value &= ~SOR_ENABLE;
+		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+		tegra_dc_commit(dc);
+	}
+
+	err = tegra_sor_power_down(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+	if (sor->dpaux) {
+		err = tegra_dpaux_disable(sor->dpaux);
+		if (err < 0)
+			dev_err(sor->dev, "failed to disable DP: %d\n", err);
+	}
+
+	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
+
+	if (output->panel)
+		drm_panel_unprepare(output->panel);
+
+	reset_control_assert(sor->rst);
+	clk_disable_unprepare(sor->clk);
 }
 
-static void tegra_sor_encoder_prepare(struct drm_encoder *encoder)
+#if 0
+static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
+			      unsigned int *value)
 {
-}
+	unsigned int hfp, hsw, hbp, a = 0, b;
 
-static void tegra_sor_encoder_commit(struct drm_encoder *encoder)
-{
-}
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	hbp = mode->htotal - mode->hsync_end;
 
-static void tegra_sor_encoder_mode_set(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted)
+	pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
+
+	b = hfp - 1;
+
+	pr_info("a: %u, b: %u\n", a, b);
+	pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
+
+	if (a + hsw + hbp <= 11) {
+		a = 1 + 11 - hsw - hbp;
+		pr_info("a: %u\n", a);
+	}
+
+	if (a > b)
+		return -EINVAL;
+
+	if (hsw < 1)
+		return -EINVAL;
+
+	if (mode->hdisplay < 16)
+		return -EINVAL;
+
+	if (value) {
+		if (b > a && a % 2)
+			*value = a + 1;
+		else
+			*value = a;
+	}
+
+	return 0;
+}
+#endif
+
+static void tegra_sor_edp_enable(struct drm_encoder *encoder)
 {
+	struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
 	struct tegra_output *output = encoder_to_output(encoder);
 	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
 	unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
@@ -952,14 +1200,9 @@
 	int err = 0;
 	u32 value;
 
-	mutex_lock(&sor->lock);
-
-	if (sor->enabled)
-		goto unlock;
-
 	err = clk_prepare_enable(sor->clk);
 	if (err < 0)
-		goto unlock;
+		dev_err(sor->dev, "failed to enable clock: %d\n", err);
 
 	reset_control_deassert(sor->rst);
 
@@ -978,7 +1221,7 @@
 		if (err < 0) {
 			dev_err(sor->dev, "failed to probe eDP link: %d\n",
 				err);
-			goto unlock;
+			return;
 		}
 	}
 
@@ -999,40 +1242,40 @@
 	value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 	usleep_range(20, 100);
 
-	value = tegra_sor_readl(sor, SOR_PLL_3);
-	value |= SOR_PLL_3_PLL_VDD_MODE_V3_3;
-	tegra_sor_writel(sor, value, SOR_PLL_3);
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+	tegra_sor_writel(sor, value, SOR_PLL3);
 
-	value = SOR_PLL_0_ICHPMP(0xf) | SOR_PLL_0_VCOCAP_RST |
-		SOR_PLL_0_PLLREG_LEVEL_V45 | SOR_PLL_0_RESISTOR_EXT;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
+		SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD;
-	value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	value |= SOR_PLL_2_LVDS_ENABLE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD;
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	value |= SOR_PLL2_LVDS_ENABLE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
-	value = SOR_PLL_1_TERM_COMPOUT | SOR_PLL_1_TMDS_TERM;
-	tegra_sor_writel(sor, value, SOR_PLL_1);
+	value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
+	tegra_sor_writel(sor, value, SOR_PLL1);
 
 	while (true) {
-		value = tegra_sor_readl(sor, SOR_PLL_2);
-		if ((value & SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE) == 0)
+		value = tegra_sor_readl(sor, SOR_PLL2);
+		if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
 			break;
 
 		usleep_range(250, 1000);
 	}
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_POWERDOWN_OVERRIDE;
-	value &= ~SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	/*
 	 * power up
@@ -1045,51 +1288,49 @@
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
 	/* step 1 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value |= SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL_2_PORT_POWERDOWN |
-		 SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
+		 SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value |= SOR_PLL_0_VCOPD | SOR_PLL_0_POWER_OFF;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	/* step 2 */
 	err = tegra_io_rail_power_on(TEGRA_IO_RAIL_LVDS);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to power on I/O rail: %d\n", err);
-		goto unlock;
-	}
 
 	usleep_range(5, 100);
 
 	/* step 3 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_BANDGAP_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(20, 100);
 
 	/* step 4 */
-	value = tegra_sor_readl(sor, SOR_PLL_0);
-	value &= ~SOR_PLL_0_POWER_OFF;
-	value &= ~SOR_PLL_0_VCOPD;
-	tegra_sor_writel(sor, value, SOR_PLL_0);
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_VCOPD;
+	value &= ~SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
 
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	usleep_range(200, 1000);
 
 	/* step 5 */
-	value = tegra_sor_readl(sor, SOR_PLL_2);
-	value &= ~SOR_PLL_2_PORT_POWERDOWN;
-	tegra_sor_writel(sor, value, SOR_PLL_2);
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
 
 	/* switch to DP clock */
 	err = clk_set_parent(sor->clk, sor->clk_dp);
@@ -1097,7 +1338,7 @@
 		dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
 
 	/* power DP lanes */
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 
 	if (link.num_lanes <= 2)
 		value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
@@ -1114,12 +1355,12 @@
 	else
 		value |= SOR_DP_PADCTL_PD_TXD_0;
 
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
-	value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 	value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
 	value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
-	tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 	/* start lane sequencer */
 	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
@@ -1141,14 +1382,14 @@
 	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
 	/* set linkctl */
-	value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 	value |= SOR_DP_LINKCTL_ENABLE;
 
 	value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
 	value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
 
 	value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-	tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 	for (i = 0, value = 0; i < 4; i++) {
 		unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
@@ -1159,7 +1400,7 @@
 
 	tegra_sor_writel(sor, value, SOR_DP_TPG);
 
-	value = tegra_sor_readl(sor, SOR_DP_CONFIG_0);
+	value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
 	value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
 	value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
 
@@ -1176,7 +1417,7 @@
 
 	value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
 	value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
-	tegra_sor_writel(sor, value, SOR_DP_CONFIG_0);
+	tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
 
 	value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
 	value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
@@ -1189,33 +1430,27 @@
 	tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
 
 	/* enable pad calibration logic */
-	value = tegra_sor_readl(sor, SOR_DP_PADCTL_0);
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
 	value |= SOR_DP_PADCTL_PAD_CAL_PD;
-	tegra_sor_writel(sor, value, SOR_DP_PADCTL_0);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
 
 	if (sor->dpaux) {
 		u8 rate, lanes;
 
 		err = drm_dp_link_probe(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to probe eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		err = drm_dp_link_power_up(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to power up eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		err = drm_dp_link_configure(aux, &link);
-		if (err < 0) {
+		if (err < 0)
 			dev_err(sor->dev, "failed to configure eDP link: %d\n",
 				err);
-			goto unlock;
-		}
 
 		rate = drm_dp_link_rate_to_bw_code(link.rate);
 		lanes = link.num_lanes;
@@ -1225,14 +1460,14 @@
 		value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
 		tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-		value = tegra_sor_readl(sor, SOR_DP_LINKCTL_0);
+		value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
 		value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
 		value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
 
 		if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
 			value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
 
-		tegra_sor_writel(sor, value, SOR_DP_LINKCTL_0);
+		tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
 		/* disable training pattern generator */
 
@@ -1249,17 +1484,14 @@
 		if (err < 0) {
 			dev_err(sor->dev, "DP fast link training failed: %d\n",
 				err);
-			goto unlock;
 		}
 
 		dev_dbg(sor->dev, "fast link training succeeded\n");
 	}
 
 	err = tegra_sor_power_up(sor, 250);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-		goto unlock;
-	}
 
 	/*
 	 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
@@ -1295,7 +1527,7 @@
 		break;
 	}
 
-	tegra_sor_writel(sor, value, SOR_STATE_1);
+	tegra_sor_writel(sor, value, SOR_STATE1);
 
 	/*
 	 * TODO: The video timing programming below doesn't seem to match the
@@ -1303,25 +1535,27 @@
 	 */
 
 	value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_1(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
 
 	vse = mode->vsync_end - mode->vsync_start - 1;
 	hse = mode->hsync_end - mode->hsync_start - 1;
 
 	value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_2(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
 
 	vbe = vse + (mode->vsync_start - mode->vdisplay);
 	hbe = hse + (mode->hsync_start - mode->hdisplay);
 
 	value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_3(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
 
 	vbs = vbe + mode->vdisplay;
 	hbs = hbe + mode->hdisplay;
 
 	value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-	tegra_sor_writel(sor, value, SOR_HEAD_STATE_4(0));
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+	tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
 
 	/* CSTM (LVDS, link A/B, upper) */
 	value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
@@ -1330,10 +1564,8 @@
 
 	/* PWM setup */
 	err = tegra_sor_setup_pwm(sor, 250);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-		goto unlock;
-	}
 
 	tegra_sor_update(sor);
 
@@ -1344,93 +1576,15 @@
 	tegra_dc_commit(dc);
 
 	err = tegra_sor_attach(sor);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-		goto unlock;
-	}
 
 	err = tegra_sor_wakeup(sor);
-	if (err < 0) {
+	if (err < 0)
 		dev_err(sor->dev, "failed to enable DC: %d\n", err);
-		goto unlock;
-	}
 
 	if (output->panel)
 		drm_panel_enable(output->panel);
-
-	sor->enabled = true;
-
-unlock:
-	mutex_unlock(&sor->lock);
-}
-
-static void tegra_sor_encoder_disable(struct drm_encoder *encoder)
-{
-	struct tegra_output *output = encoder_to_output(encoder);
-	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-	struct tegra_sor *sor = to_sor(output);
-	u32 value;
-	int err;
-
-	mutex_lock(&sor->lock);
-
-	if (!sor->enabled)
-		goto unlock;
-
-	if (output->panel)
-		drm_panel_disable(output->panel);
-
-	err = tegra_sor_detach(sor);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-		goto unlock;
-	}
-
-	tegra_sor_writel(sor, 0, SOR_STATE_1);
-	tegra_sor_update(sor);
-
-	/*
-	 * The following accesses registers of the display controller, so make
-	 * sure it's only executed when the output is attached to one.
-	 */
-	if (dc) {
-		value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
-		value &= ~SOR_ENABLE;
-		tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
-		tegra_dc_commit(dc);
-	}
-
-	err = tegra_sor_power_down(sor);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-		goto unlock;
-	}
-
-	if (sor->dpaux) {
-		err = tegra_dpaux_disable(sor->dpaux);
-		if (err < 0) {
-			dev_err(sor->dev, "failed to disable DP: %d\n", err);
-			goto unlock;
-		}
-	}
-
-	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_LVDS);
-	if (err < 0) {
-		dev_err(sor->dev, "failed to power off I/O rail: %d\n", err);
-		goto unlock;
-	}
-
-	if (output->panel)
-		drm_panel_unprepare(output->panel);
-
-	clk_disable_unprepare(sor->clk);
-	reset_control_assert(sor->rst);
-
-	sor->enabled = false;
-
-unlock:
-	mutex_unlock(&sor->lock);
 }
 
 static int
@@ -1454,37 +1608,578 @@
 	return 0;
 }
 
-static const struct drm_encoder_helper_funcs tegra_sor_encoder_helper_funcs = {
-	.dpms = tegra_sor_encoder_dpms,
-	.prepare = tegra_sor_encoder_prepare,
-	.commit = tegra_sor_encoder_commit,
-	.mode_set = tegra_sor_encoder_mode_set,
-	.disable = tegra_sor_encoder_disable,
+static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
+	.disable = tegra_sor_edp_disable,
+	.enable = tegra_sor_edp_enable,
+	.atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
+{
+	u32 value = 0;
+	size_t i;
+
+	for (i = size; i > 0; i--)
+		value = (value << 8) | ptr[i - 1];
+
+	return value;
+}
+
+static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
+					  const void *data, size_t size)
+{
+	const u8 *ptr = data;
+	unsigned long offset;
+	size_t i, j;
+	u32 value;
+
+	switch (ptr[0]) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
+		break;
+
+	default:
+		dev_err(sor->dev, "unsupported infoframe type: %02x\n",
+			ptr[0]);
+		return;
+	}
+
+	value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+		INFOFRAME_HEADER_VERSION(ptr[1]) |
+		INFOFRAME_HEADER_LEN(ptr[2]);
+	tegra_sor_writel(sor, value, offset);
+	offset++;
+
+	/*
+	 * Each subpack contains 7 bytes, divided into:
+	 * - subpack_low: bytes 0 - 3
+	 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+	 */
+	for (i = 3, j = 0; i < size; i += 7, j += 8) {
+		size_t rem = size - i, num = min_t(size_t, rem, 4);
+
+		value = tegra_sor_hdmi_subpack(&ptr[i], num);
+		tegra_sor_writel(sor, value, offset++);
+
+		num = min_t(size_t, rem - num, 3);
+
+		value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
+		tegra_sor_writel(sor, value, offset++);
+	}
+}
+
+static int
+tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
+				   const struct drm_display_mode *mode)
+{
+	u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+	struct hdmi_avi_infoframe frame;
+	u32 value;
+	int err;
+
+	/* disable AVI infoframe */
+	value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+	value &= ~INFOFRAME_CTRL_SINGLE;
+	value &= ~INFOFRAME_CTRL_OTHER;
+	value &= ~INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
+		return err;
+	}
+
+	tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+	/* enable AVI infoframe */
+	value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
+	value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+	value |= INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
+
+	return 0;
+}
+
+static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
+{
+	u32 value;
+
+	value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+	value &= ~INFOFRAME_CTRL_ENABLE;
+	tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static struct tegra_sor_hdmi_settings *
+tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
+{
+	unsigned int i;
+
+	for (i = 0; i < sor->num_settings; i++)
+		if (frequency <= sor->settings[i].frequency)
+			return &sor->settings[i];
+
+	return NULL;
+}
+
+static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	struct tegra_sor *sor = to_sor(output);
+	u32 value;
+	int err;
+
+	err = tegra_sor_detach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+	tegra_sor_writel(sor, 0, SOR_STATE1);
+	tegra_sor_update(sor);
+
+	/* disable display to SOR clock */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value &= ~SOR1_TIMING_CYA;
+	value &= ~SOR1_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	err = tegra_sor_power_down(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+	err = tegra_io_rail_power_off(TEGRA_IO_RAIL_HDMI);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
+
+	reset_control_assert(sor->rst);
+	usleep_range(1000, 2000);
+	clk_disable_unprepare(sor->clk);
+}
+
+static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
+	struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+	unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
+	struct tegra_sor_hdmi_settings *settings;
+	struct tegra_sor *sor = to_sor(output);
+	struct drm_display_mode *mode;
+	struct drm_display_info *info;
+	u32 value;
+	int err;
+
+	mode = &encoder->crtc->state->adjusted_mode;
+	info = &output->connector.display_info;
+
+	err = clk_prepare_enable(sor->clk);
+	if (err < 0)
+		dev_err(sor->dev, "failed to enable clock: %d\n", err);
+
+	usleep_range(1000, 2000);
+
+	reset_control_deassert(sor->rst);
+
+	err = clk_set_parent(sor->clk, sor->clk_safe);
+	if (err < 0)
+		dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+	div = clk_get_rate(sor->clk) / 1000000 * 4;
+
+	err = tegra_io_rail_power_on(TEGRA_IO_RAIL_HDMI);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power on HDMI rail: %d\n", err);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
+	tegra_sor_writel(sor, value, SOR_PLL3);
+
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_VCOPD;
+	value &= ~SOR_PLL0_PWR;
+	tegra_sor_writel(sor, value, SOR_PLL0);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(200, 400);
+
+	value = tegra_sor_readl(sor, SOR_PLL2);
+	value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+	value &= ~SOR_PLL2_PORT_POWERDOWN;
+	tegra_sor_writel(sor, value, SOR_PLL2);
+
+	usleep_range(20, 100);
+
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+		 SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	while (true) {
+		value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+		if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
+			break;
+
+		usleep_range(250, 1000);
+	}
+
+	value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+		SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
+	tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+	while (true) {
+		value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+		if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+			break;
+
+		usleep_range(250, 1000);
+	}
+
+	value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+	value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+	value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+	if (mode->clock < 340000)
+		value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
+	else
+		value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
+
+	value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+	tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+	value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+	value |= SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
+	value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+	value |= SOR_DP_SPARE_SEQ_ENABLE;
+	tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+	value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
+		SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
+	tegra_sor_writel(sor, value, SOR_SEQ_CTL);
+
+	value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
+		SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
+	tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
+	tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
+
+	/* program the reference clock */
+	value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
+	tegra_sor_writel(sor, value, SOR_REFCLK);
+
+	/* XXX don't hardcode */
+	value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
+		SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
+		SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
+		SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
+		SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
+		SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
+		SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
+		SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
+		SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
+		SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
+	tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+	tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+
+	err = clk_set_parent(sor->clk, sor->clk_parent);
+	if (err < 0)
+		dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+
+	value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
+
+	/* XXX is this the proper check? */
+	if (mode->clock < 75000)
+		value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
+
+	tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
+
+	max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
+
+	value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
+		SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
+	tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
+
+	/* H_PULSE2 setup */
+	pulse_start = h_ref_to_sync + (mode->hsync_end - mode->hsync_start) +
+		      (mode->htotal - mode->hsync_end) - 10;
+
+	value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
+		PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+	value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+	value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
+	value |= H_PULSE2_ENABLE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+	/* infoframe setup */
+	err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
+	if (err < 0)
+		dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
+
+	/* XXX HDMI audio support not implemented yet */
+	tegra_sor_hdmi_disable_audio_infoframe(sor);
+
+	/* use single TMDS protocol */
+	value = tegra_sor_readl(sor, SOR_STATE1);
+	value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+	value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
+	tegra_sor_writel(sor, value, SOR_STATE1);
+
+	/* power up pad calibration */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* production settings */
+	settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
+	if (IS_ERR(settings)) {
+		dev_err(sor->dev, "no settings for pixel clock %d Hz: %ld\n",
+			mode->clock * 1000, PTR_ERR(settings));
+		return;
+	}
+
+	value = tegra_sor_readl(sor, SOR_PLL0);
+	value &= ~SOR_PLL0_ICHPMP_MASK;
+	value &= ~SOR_PLL0_VCOCAP_MASK;
+	value |= SOR_PLL0_ICHPMP(settings->ichpmp);
+	value |= SOR_PLL0_VCOCAP(settings->vcocap);
+	tegra_sor_writel(sor, value, SOR_PLL0);
+
+	tegra_sor_dp_term_calibrate(sor);
+
+	value = tegra_sor_readl(sor, SOR_PLL1);
+	value &= ~SOR_PLL1_LOADADJ_MASK;
+	value |= SOR_PLL1_LOADADJ(settings->loadadj);
+	tegra_sor_writel(sor, value, SOR_PLL1);
+
+	value = tegra_sor_readl(sor, SOR_PLL3);
+	value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
+	value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref);
+	tegra_sor_writel(sor, value, SOR_PLL3);
+
+	value = settings->drive_current[0] << 24 |
+		settings->drive_current[1] << 16 |
+		settings->drive_current[2] <<  8 |
+		settings->drive_current[3] <<  0;
+	tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+
+	value = settings->preemphasis[0] << 24 |
+		settings->preemphasis[1] << 16 |
+		settings->preemphasis[2] <<  8 |
+		settings->preemphasis[3] <<  0;
+	tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+	value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+	value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu);
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* power down pad calibration */
+	value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
+	value |= SOR_DP_PADCTL_PAD_CAL_PD;
+	tegra_sor_writel(sor, value, SOR_DP_PADCTL0);
+
+	/* miscellaneous display controller settings */
+	value = VSYNC_H_POSITION(1);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
+
+	value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
+	value &= ~DITHER_CONTROL_MASK;
+	value &= ~BASE_COLOR_SIZE_MASK;
+
+	switch (info->bpc) {
+	case 6:
+		value |= BASE_COLOR_SIZE_666;
+		break;
+
+	case 8:
+		value |= BASE_COLOR_SIZE_888;
+		break;
+
+	default:
+		WARN(1, "%u bits-per-color not supported\n", info->bpc);
+		break;
+	}
+
+	tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
+
+	err = tegra_sor_power_up(sor, 250);
+	if (err < 0)
+		dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+	/* configure mode */
+	value = tegra_sor_readl(sor, SOR_STATE1);
+	value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+	value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+	value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+	value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+		 SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL;
+
+	switch (info->bpc) {
+	case 8:
+		value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+		break;
+
+	case 6:
+		value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	tegra_sor_writel(sor, value, SOR_STATE1);
+
+	value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+	value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
+	value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+	value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
+	value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
+	value |= SOR_HEAD_STATE_COLORSPACE_RGB;
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+
+	/*
+	 * TODO: The video timing programming below doesn't seem to match the
+	 * register definitions.
+	 */
+
+	value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+	/* sync end = sync width - 1 */
+	vse = mode->vsync_end - mode->vsync_start - 1;
+	hse = mode->hsync_end - mode->hsync_start - 1;
+
+	value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+	/* blank end = sync end + back porch */
+	vbe = vse + (mode->vtotal - mode->vsync_end);
+	hbe = hse + (mode->htotal - mode->hsync_end);
+
+	value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+	/* blank start = blank end + active */
+	vbs = vbe + mode->vdisplay;
+	hbs = hbe + mode->hdisplay;
+
+	value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+	tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+	tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+
+	tegra_sor_update(sor);
+
+	err = tegra_sor_attach(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+	/* enable display to SOR clock and generate HDMI preamble */
+	value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+	value |= SOR1_ENABLE | SOR1_TIMING_CYA;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+	tegra_dc_commit(dc);
+
+	err = tegra_sor_wakeup(sor);
+	if (err < 0)
+		dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
+	.disable = tegra_sor_hdmi_disable,
+	.enable = tegra_sor_hdmi_enable,
 	.atomic_check = tegra_sor_encoder_atomic_check,
 };
 
 static int tegra_sor_init(struct host1x_client *client)
 {
 	struct drm_device *drm = dev_get_drvdata(client->parent);
+	const struct drm_encoder_helper_funcs *helpers = NULL;
 	struct tegra_sor *sor = host1x_client_to_sor(client);
+	int connector = DRM_MODE_CONNECTOR_Unknown;
+	int encoder = DRM_MODE_ENCODER_NONE;
 	int err;
 
-	if (!sor->dpaux)
-		return -ENODEV;
+	if (!sor->dpaux) {
+		if (sor->soc->supports_hdmi) {
+			connector = DRM_MODE_CONNECTOR_HDMIA;
+			encoder = DRM_MODE_ENCODER_TMDS;
+			helpers = &tegra_sor_hdmi_helpers;
+		} else if (sor->soc->supports_lvds) {
+			connector = DRM_MODE_CONNECTOR_LVDS;
+			encoder = DRM_MODE_ENCODER_LVDS;
+		}
+	} else {
+		if (sor->soc->supports_edp) {
+			connector = DRM_MODE_CONNECTOR_eDP;
+			encoder = DRM_MODE_ENCODER_TMDS;
+			helpers = &tegra_sor_edp_helpers;
+		} else if (sor->soc->supports_dp) {
+			connector = DRM_MODE_CONNECTOR_DisplayPort;
+			encoder = DRM_MODE_ENCODER_TMDS;
+		}
+	}
 
 	sor->output.dev = sor->dev;
 
 	drm_connector_init(drm, &sor->output.connector,
 			   &tegra_sor_connector_funcs,
-			   DRM_MODE_CONNECTOR_eDP);
+			   connector);
 	drm_connector_helper_add(&sor->output.connector,
 				 &tegra_sor_connector_helper_funcs);
 	sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
 
 	drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
-			 DRM_MODE_ENCODER_TMDS);
-	drm_encoder_helper_add(&sor->output.encoder,
-			       &tegra_sor_encoder_helper_funcs);
+			 encoder);
+	drm_encoder_helper_add(&sor->output.encoder, helpers);
 
 	drm_mode_connector_attach_encoder(&sor->output.connector,
 					  &sor->output.encoder);
@@ -1577,18 +2272,130 @@
 	.exit = tegra_sor_exit,
 };
 
+static const struct tegra_sor_ops tegra_sor_edp_ops = {
+	.name = "eDP",
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+	int err;
+
+	sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+	if (IS_ERR(sor->avdd_io_supply)) {
+		dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+			PTR_ERR(sor->avdd_io_supply));
+		return PTR_ERR(sor->avdd_io_supply);
+	}
+
+	err = regulator_enable(sor->avdd_io_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+			err);
+		return err;
+	}
+
+	sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+	if (IS_ERR(sor->vdd_pll_supply)) {
+		dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+			PTR_ERR(sor->vdd_pll_supply));
+		return PTR_ERR(sor->vdd_pll_supply);
+	}
+
+	err = regulator_enable(sor->vdd_pll_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+			err);
+		return err;
+	}
+
+	sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+	if (IS_ERR(sor->hdmi_supply)) {
+		dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+			PTR_ERR(sor->hdmi_supply));
+		return PTR_ERR(sor->hdmi_supply);
+	}
+
+	err = regulator_enable(sor->hdmi_supply);
+	if (err < 0) {
+		dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+	regulator_disable(sor->hdmi_supply);
+	regulator_disable(sor->vdd_pll_supply);
+	regulator_disable(sor->avdd_io_supply);
+
+	return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+	.name = "HDMI",
+	.probe = tegra_sor_hdmi_probe,
+	.remove = tegra_sor_hdmi_remove,
+};
+
+static const struct tegra_sor_soc tegra124_sor = {
+	.supports_edp = true,
+	.supports_lvds = true,
+	.supports_hdmi = false,
+	.supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor = {
+	.supports_edp = true,
+	.supports_lvds = false,
+	.supports_hdmi = false,
+	.supports_dp = false,
+};
+
+static const struct tegra_sor_soc tegra210_sor1 = {
+	.supports_edp = false,
+	.supports_lvds = false,
+	.supports_hdmi = true,
+	.supports_dp = true,
+
+	.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
+	.settings = tegra210_sor_hdmi_defaults,
+};
+
+static const struct of_device_id tegra_sor_of_match[] = {
+	{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
+	{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+	{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
+
 static int tegra_sor_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *match;
 	struct device_node *np;
 	struct tegra_sor *sor;
 	struct resource *regs;
 	int err;
 
+	match = of_match_device(tegra_sor_of_match, &pdev->dev);
+
 	sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
 	if (!sor)
 		return -ENOMEM;
 
 	sor->output.dev = sor->dev = &pdev->dev;
+	sor->soc = match->data;
+
+	sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
+				     sor->soc->num_settings *
+					sizeof(*sor->settings),
+				     GFP_KERNEL);
+	if (!sor->settings)
+		return -ENOMEM;
+
+	sor->num_settings = sor->soc->num_settings;
 
 	np = of_parse_phandle(pdev->dev.of_node, "nvidia,dpaux", 0);
 	if (np) {
@@ -1599,51 +2406,106 @@
 			return -EPROBE_DEFER;
 	}
 
+	if (!sor->dpaux) {
+		if (sor->soc->supports_hdmi) {
+			sor->ops = &tegra_sor_hdmi_ops;
+		} else if (sor->soc->supports_lvds) {
+			dev_err(&pdev->dev, "LVDS not supported yet\n");
+			return -ENODEV;
+		} else {
+			dev_err(&pdev->dev, "unknown (non-DP) support\n");
+			return -ENODEV;
+		}
+	} else {
+		if (sor->soc->supports_edp) {
+			sor->ops = &tegra_sor_edp_ops;
+		} else if (sor->soc->supports_dp) {
+			dev_err(&pdev->dev, "DisplayPort not supported yet\n");
+			return -ENODEV;
+		} else {
+			dev_err(&pdev->dev, "unknown (DP) support\n");
+			return -ENODEV;
+		}
+	}
+
 	err = tegra_output_probe(&sor->output);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to probe output: %d\n", err);
 		return err;
+	}
+
+	if (sor->ops && sor->ops->probe) {
+		err = sor->ops->probe(sor);
+		if (err < 0) {
+			dev_err(&pdev->dev, "failed to probe %s: %d\n",
+				sor->ops->name, err);
+			goto output;
+		}
+	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	sor->regs = devm_ioremap_resource(&pdev->dev, regs);
-	if (IS_ERR(sor->regs))
-		return PTR_ERR(sor->regs);
+	if (IS_ERR(sor->regs)) {
+		err = PTR_ERR(sor->regs);
+		goto remove;
+	}
 
 	sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-	if (IS_ERR(sor->rst))
-		return PTR_ERR(sor->rst);
+	if (IS_ERR(sor->rst)) {
+		err = PTR_ERR(sor->rst);
+		dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(sor->clk))
-		return PTR_ERR(sor->clk);
+	if (IS_ERR(sor->clk)) {
+		err = PTR_ERR(sor->clk);
+		dev_err(&pdev->dev, "failed to get module clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
-	if (IS_ERR(sor->clk_parent))
-		return PTR_ERR(sor->clk_parent);
+	if (IS_ERR(sor->clk_parent)) {
+		err = PTR_ERR(sor->clk_parent);
+		dev_err(&pdev->dev, "failed to get parent clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_safe = devm_clk_get(&pdev->dev, "safe");
-	if (IS_ERR(sor->clk_safe))
-		return PTR_ERR(sor->clk_safe);
+	if (IS_ERR(sor->clk_safe)) {
+		err = PTR_ERR(sor->clk_safe);
+		dev_err(&pdev->dev, "failed to get safe clock: %d\n", err);
+		goto remove;
+	}
 
 	sor->clk_dp = devm_clk_get(&pdev->dev, "dp");
-	if (IS_ERR(sor->clk_dp))
-		return PTR_ERR(sor->clk_dp);
+	if (IS_ERR(sor->clk_dp)) {
+		err = PTR_ERR(sor->clk_dp);
+		dev_err(&pdev->dev, "failed to get DP clock: %d\n", err);
+		goto remove;
+	}
 
 	INIT_LIST_HEAD(&sor->client.list);
 	sor->client.ops = &sor_client_ops;
 	sor->client.dev = &pdev->dev;
 
-	mutex_init(&sor->lock);
-
 	err = host1x_client_register(&sor->client);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
 			err);
-		return err;
+		goto remove;
 	}
 
 	platform_set_drvdata(pdev, sor);
 
 	return 0;
+
+remove:
+	if (sor->ops && sor->ops->remove)
+		sor->ops->remove(sor);
+output:
+	tegra_output_remove(&sor->output);
+	return err;
 }
 
 static int tegra_sor_remove(struct platform_device *pdev)
@@ -1658,17 +2520,17 @@
 		return err;
 	}
 
+	if (sor->ops && sor->ops->remove) {
+		err = sor->ops->remove(sor);
+		if (err < 0)
+			dev_err(&pdev->dev, "failed to remove SOR: %d\n", err);
+	}
+
 	tegra_output_remove(&sor->output);
 
 	return 0;
 }
 
-static const struct of_device_id tegra_sor_of_match[] = {
-	{ .compatible = "nvidia,tegra124-sor", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
-
 struct platform_driver tegra_sor_driver = {
 	.driver = {
 		.name = "tegra-sor",
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index a5f8853..2d31d02 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -11,9 +11,9 @@
 
 #define SOR_CTXSW 0x00
 
-#define SOR_SUPER_STATE_0 0x01
+#define SOR_SUPER_STATE0 0x01
 
-#define SOR_SUPER_STATE_1 0x02
+#define SOR_SUPER_STATE1 0x02
 #define  SOR_SUPER_STATE_ATTACHED		(1 << 3)
 #define  SOR_SUPER_STATE_MODE_NORMAL		(1 << 2)
 #define  SOR_SUPER_STATE_HEAD_MODE_MASK		(3 << 0)
@@ -21,9 +21,9 @@
 #define  SOR_SUPER_STATE_HEAD_MODE_SNOOZE	(1 << 0)
 #define  SOR_SUPER_STATE_HEAD_MODE_SLEEP	(0 << 0)
 
-#define SOR_STATE_0 0x03
+#define SOR_STATE0 0x03
 
-#define SOR_STATE_1 0x04
+#define SOR_STATE1 0x04
 #define  SOR_STATE_ASY_PIXELDEPTH_MASK		(0xf << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_18_444	(0x2 << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_24_444	(0x5 << 17)
@@ -33,19 +33,27 @@
 #define  SOR_STATE_ASY_PROTOCOL_CUSTOM		(0xf << 8)
 #define  SOR_STATE_ASY_PROTOCOL_DP_A		(0x8 << 8)
 #define  SOR_STATE_ASY_PROTOCOL_DP_B		(0x9 << 8)
+#define  SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A	(0x1 << 8)
 #define  SOR_STATE_ASY_PROTOCOL_LVDS		(0x0 << 8)
 #define  SOR_STATE_ASY_CRC_MODE_MASK		(0x3 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_NON_ACTIVE	(0x2 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_COMPLETE	(0x1 << 6)
 #define  SOR_STATE_ASY_CRC_MODE_ACTIVE		(0x0 << 6)
+#define  SOR_STATE_ASY_OWNER_MASK		0xf
 #define  SOR_STATE_ASY_OWNER(x)			(((x) & 0xf) << 0)
 
-#define SOR_HEAD_STATE_0(x) (0x05 + (x))
-#define SOR_HEAD_STATE_1(x) (0x07 + (x))
-#define SOR_HEAD_STATE_2(x) (0x09 + (x))
-#define SOR_HEAD_STATE_3(x) (0x0b + (x))
-#define SOR_HEAD_STATE_4(x) (0x0d + (x))
-#define SOR_HEAD_STATE_5(x) (0x0f + (x))
+#define SOR_HEAD_STATE0(x) (0x05 + (x))
+#define  SOR_HEAD_STATE_RANGECOMPRESS_MASK (0x1 << 3)
+#define  SOR_HEAD_STATE_DYNRANGE_MASK (0x1 << 2)
+#define  SOR_HEAD_STATE_DYNRANGE_VESA (0 << 2)
+#define  SOR_HEAD_STATE_DYNRANGE_CEA (1 << 2)
+#define  SOR_HEAD_STATE_COLORSPACE_MASK (0x3 << 0)
+#define  SOR_HEAD_STATE_COLORSPACE_RGB (0 << 0)
+#define SOR_HEAD_STATE1(x) (0x07 + (x))
+#define SOR_HEAD_STATE2(x) (0x09 + (x))
+#define SOR_HEAD_STATE3(x) (0x0b + (x))
+#define SOR_HEAD_STATE4(x) (0x0d + (x))
+#define SOR_HEAD_STATE5(x) (0x0f + (x))
 #define SOR_CRC_CNTRL 0x11
 #define  SOR_CRC_CNTRL_ENABLE			(1 << 0)
 #define SOR_DP_DEBUG_MVID 0x12
@@ -75,62 +83,101 @@
 #define  SOR_TEST_HEAD_MODE_MASK		(3 << 8)
 #define  SOR_TEST_HEAD_MODE_AWAKE		(2 << 8)
 
-#define SOR_PLL_0 0x17
-#define  SOR_PLL_0_ICHPMP_MASK			(0xf << 24)
-#define  SOR_PLL_0_ICHPMP(x)			(((x) & 0xf) << 24)
-#define  SOR_PLL_0_VCOCAP_MASK			(0xf << 8)
-#define  SOR_PLL_0_VCOCAP(x)			(((x) & 0xf) << 8)
-#define  SOR_PLL_0_VCOCAP_RST			SOR_PLL_0_VCOCAP(3)
-#define  SOR_PLL_0_PLLREG_MASK			(0x3 << 6)
-#define  SOR_PLL_0_PLLREG_LEVEL(x)		(((x) & 0x3) << 6)
-#define  SOR_PLL_0_PLLREG_LEVEL_V25		SOR_PLL_0_PLLREG_LEVEL(0)
-#define  SOR_PLL_0_PLLREG_LEVEL_V15		SOR_PLL_0_PLLREG_LEVEL(1)
-#define  SOR_PLL_0_PLLREG_LEVEL_V35		SOR_PLL_0_PLLREG_LEVEL(2)
-#define  SOR_PLL_0_PLLREG_LEVEL_V45		SOR_PLL_0_PLLREG_LEVEL(3)
-#define  SOR_PLL_0_PULLDOWN			(1 << 5)
-#define  SOR_PLL_0_RESISTOR_EXT			(1 << 4)
-#define  SOR_PLL_0_VCOPD			(1 << 2)
-#define  SOR_PLL_0_POWER_OFF			(1 << 0)
+#define SOR_PLL0 0x17
+#define  SOR_PLL0_ICHPMP_MASK			(0xf << 24)
+#define  SOR_PLL0_ICHPMP(x)			(((x) & 0xf) << 24)
+#define  SOR_PLL0_VCOCAP_MASK			(0xf << 8)
+#define  SOR_PLL0_VCOCAP(x)			(((x) & 0xf) << 8)
+#define  SOR_PLL0_VCOCAP_RST			SOR_PLL0_VCOCAP(3)
+#define  SOR_PLL0_PLLREG_MASK			(0x3 << 6)
+#define  SOR_PLL0_PLLREG_LEVEL(x)		(((x) & 0x3) << 6)
+#define  SOR_PLL0_PLLREG_LEVEL_V25		SOR_PLL0_PLLREG_LEVEL(0)
+#define  SOR_PLL0_PLLREG_LEVEL_V15		SOR_PLL0_PLLREG_LEVEL(1)
+#define  SOR_PLL0_PLLREG_LEVEL_V35		SOR_PLL0_PLLREG_LEVEL(2)
+#define  SOR_PLL0_PLLREG_LEVEL_V45		SOR_PLL0_PLLREG_LEVEL(3)
+#define  SOR_PLL0_PULLDOWN			(1 << 5)
+#define  SOR_PLL0_RESISTOR_EXT			(1 << 4)
+#define  SOR_PLL0_VCOPD				(1 << 2)
+#define  SOR_PLL0_PWR				(1 << 0)
 
-#define SOR_PLL_1 0x18
+#define SOR_PLL1 0x18
 /* XXX: read-only bit? */
-#define  SOR_PLL_1_TERM_COMPOUT			(1 << 15)
-#define  SOR_PLL_1_TMDS_TERM			(1 << 8)
+#define  SOR_PLL1_LOADADJ_MASK			(0xf << 20)
+#define  SOR_PLL1_LOADADJ(x)			(((x) & 0xf) << 20)
+#define  SOR_PLL1_TERM_COMPOUT			(1 << 15)
+#define  SOR_PLL1_TMDS_TERMADJ_MASK		(0xf << 9)
+#define  SOR_PLL1_TMDS_TERMADJ(x)		(((x) & 0xf) << 9)
+#define  SOR_PLL1_TMDS_TERM			(1 << 8)
 
-#define SOR_PLL_2 0x19
-#define  SOR_PLL_2_LVDS_ENABLE			(1 << 25)
-#define  SOR_PLL_2_SEQ_PLLCAPPD_ENFORCE		(1 << 24)
-#define  SOR_PLL_2_PORT_POWERDOWN		(1 << 23)
-#define  SOR_PLL_2_BANDGAP_POWERDOWN		(1 << 22)
-#define  SOR_PLL_2_POWERDOWN_OVERRIDE		(1 << 18)
-#define  SOR_PLL_2_SEQ_PLLCAPPD			(1 << 17)
+#define SOR_PLL2 0x19
+#define  SOR_PLL2_LVDS_ENABLE			(1 << 25)
+#define  SOR_PLL2_SEQ_PLLCAPPD_ENFORCE		(1 << 24)
+#define  SOR_PLL2_PORT_POWERDOWN		(1 << 23)
+#define  SOR_PLL2_BANDGAP_POWERDOWN		(1 << 22)
+#define  SOR_PLL2_POWERDOWN_OVERRIDE		(1 << 18)
+#define  SOR_PLL2_SEQ_PLLCAPPD			(1 << 17)
+#define  SOR_PLL2_SEQ_PLL_PULLDOWN		(1 << 16)
 
-#define SOR_PLL_3 0x1a
-#define  SOR_PLL_3_PLL_VDD_MODE_V1_8 (0 << 13)
-#define  SOR_PLL_3_PLL_VDD_MODE_V3_3 (1 << 13)
+#define SOR_PLL3 0x1a
+#define  SOR_PLL3_BG_VREF_LEVEL_MASK		(0xf << 24)
+#define  SOR_PLL3_BG_VREF_LEVEL(x)		(((x) & 0xf) << 24)
+#define  SOR_PLL3_PLL_VDD_MODE_1V8		(0 << 13)
+#define  SOR_PLL3_PLL_VDD_MODE_3V3		(1 << 13)
 
 #define SOR_CSTM 0x1b
+#define  SOR_CSTM_ROTCLK_MASK			(0xf << 24)
+#define  SOR_CSTM_ROTCLK(x)			(((x) & 0xf) << 24)
 #define  SOR_CSTM_LVDS				(1 << 16)
 #define  SOR_CSTM_LINK_ACT_B			(1 << 15)
 #define  SOR_CSTM_LINK_ACT_A			(1 << 14)
 #define  SOR_CSTM_UPPER				(1 << 11)
 
 #define SOR_LVDS 0x1c
-#define SOR_CRC_A 0x1d
-#define  SOR_CRC_A_VALID			(1 << 0)
-#define  SOR_CRC_A_RESET			(1 << 0)
-#define SOR_CRC_B 0x1e
+#define SOR_CRCA 0x1d
+#define  SOR_CRCA_VALID			(1 << 0)
+#define  SOR_CRCA_RESET			(1 << 0)
+#define SOR_CRCB 0x1e
 #define SOR_BLANK 0x1f
 #define SOR_SEQ_CTL 0x20
+#define  SOR_SEQ_CTL_PD_PC_ALT(x)	(((x) & 0xf) << 12)
+#define  SOR_SEQ_CTL_PD_PC(x)		(((x) & 0xf) <<  8)
+#define  SOR_SEQ_CTL_PU_PC_ALT(x)	(((x) & 0xf) <<  4)
+#define  SOR_SEQ_CTL_PU_PC(x)		(((x) & 0xf) <<  0)
 
 #define SOR_LANE_SEQ_CTL 0x21
 #define  SOR_LANE_SEQ_CTL_TRIGGER		(1 << 31)
+#define  SOR_LANE_SEQ_CTL_STATE_BUSY		(1 << 28)
 #define  SOR_LANE_SEQ_CTL_SEQUENCE_UP		(0 << 20)
 #define  SOR_LANE_SEQ_CTL_SEQUENCE_DOWN		(1 << 20)
 #define  SOR_LANE_SEQ_CTL_POWER_STATE_UP	(0 << 16)
 #define  SOR_LANE_SEQ_CTL_POWER_STATE_DOWN	(1 << 16)
+#define  SOR_LANE_SEQ_CTL_DELAY(x)		(((x) & 0xf) << 12)
 
 #define SOR_SEQ_INST(x) (0x22 + (x))
+#define  SOR_SEQ_INST_PLL_PULLDOWN (1 << 31)
+#define  SOR_SEQ_INST_POWERDOWN_MACRO (1 << 30)
+#define  SOR_SEQ_INST_ASSERT_PLL_RESET (1 << 29)
+#define  SOR_SEQ_INST_BLANK_V (1 << 28)
+#define  SOR_SEQ_INST_BLANK_H (1 << 27)
+#define  SOR_SEQ_INST_BLANK_DE (1 << 26)
+#define  SOR_SEQ_INST_BLACK_DATA (1 << 25)
+#define  SOR_SEQ_INST_TRISTATE_IOS (1 << 24)
+#define  SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+#define  SOR_SEQ_INST_PIN_B_LOW (0 << 22)
+#define  SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
+#define  SOR_SEQ_INST_PIN_A_LOW (0 << 21)
+#define  SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
+#define  SOR_SEQ_INST_SEQUENCE_UP (0 << 19)
+#define  SOR_SEQ_INST_SEQUENCE_DOWN (1 << 19)
+#define  SOR_SEQ_INST_LANE_SEQ_STOP (0 << 18)
+#define  SOR_SEQ_INST_LANE_SEQ_RUN (1 << 18)
+#define  SOR_SEQ_INST_PORT_POWERDOWN (1 << 17)
+#define  SOR_SEQ_INST_PLL_POWERDOWN (1 << 16)
+#define  SOR_SEQ_INST_HALT (1 << 15)
+#define  SOR_SEQ_INST_WAIT_US (0 << 12)
+#define  SOR_SEQ_INST_WAIT_MS (1 << 12)
+#define  SOR_SEQ_INST_WAIT_VSYNC (2 << 12)
+#define  SOR_SEQ_INST_WAIT(x) (((x) & 0x3ff) << 0)
 
 #define SOR_PWM_DIV 0x32
 #define  SOR_PWM_DIV_MASK			0xffffff
@@ -140,32 +187,36 @@
 #define  SOR_PWM_CTL_CLK_SEL			(1 << 30)
 #define  SOR_PWM_CTL_DUTY_CYCLE_MASK		0xffffff
 
-#define SOR_VCRC_A_0 0x34
-#define SOR_VCRC_A_1 0x35
-#define SOR_VCRC_B_0 0x36
-#define SOR_VCRC_B_1 0x37
-#define SOR_CCRC_A_0 0x38
-#define SOR_CCRC_A_1 0x39
-#define SOR_CCRC_B_0 0x3a
-#define SOR_CCRC_B_1 0x3b
-#define SOR_EDATA_A_0 0x3c
-#define SOR_EDATA_A_1 0x3d
-#define SOR_EDATA_B_0 0x3e
-#define SOR_EDATA_B_1 0x3f
-#define SOR_COUNT_A_0 0x40
-#define SOR_COUNT_A_1 0x41
-#define SOR_COUNT_B_0 0x42
-#define SOR_COUNT_B_1 0x43
-#define SOR_DEBUG_A_0 0x44
-#define SOR_DEBUG_A_1 0x45
-#define SOR_DEBUG_B_0 0x46
-#define SOR_DEBUG_B_1 0x47
+#define SOR_VCRC_A0 0x34
+#define SOR_VCRC_A1 0x35
+#define SOR_VCRC_B0 0x36
+#define SOR_VCRC_B1 0x37
+#define SOR_CCRC_A0 0x38
+#define SOR_CCRC_A1 0x39
+#define SOR_CCRC_B0 0x3a
+#define SOR_CCRC_B1 0x3b
+#define SOR_EDATA_A0 0x3c
+#define SOR_EDATA_A1 0x3d
+#define SOR_EDATA_B0 0x3e
+#define SOR_EDATA_B1 0x3f
+#define SOR_COUNT_A0 0x40
+#define SOR_COUNT_A1 0x41
+#define SOR_COUNT_B0 0x42
+#define SOR_COUNT_B1 0x43
+#define SOR_DEBUG_A0 0x44
+#define SOR_DEBUG_A1 0x45
+#define SOR_DEBUG_B0 0x46
+#define SOR_DEBUG_B1 0x47
 #define SOR_TRIG 0x48
 #define SOR_MSCHECK 0x49
 #define SOR_XBAR_CTRL 0x4a
+#define  SOR_XBAR_CTRL_LINK1_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) << 17)
+#define  SOR_XBAR_CTRL_LINK0_XSEL(channel, value) ((((value) & 0x7) << ((channel) * 3)) <<  2)
+#define  SOR_XBAR_CTRL_LINK_SWAP (1 << 1)
+#define  SOR_XBAR_CTRL_BYPASS (1 << 0)
 #define SOR_XBAR_POL 0x4b
 
-#define SOR_DP_LINKCTL_0 0x4c
+#define SOR_DP_LINKCTL0 0x4c
 #define  SOR_DP_LINKCTL_LANE_COUNT_MASK		(0x1f << 16)
 #define  SOR_DP_LINKCTL_LANE_COUNT(x)		(((1 << (x)) - 1) << 16)
 #define  SOR_DP_LINKCTL_ENHANCED_FRAME		(1 << 14)
@@ -173,34 +224,34 @@
 #define  SOR_DP_LINKCTL_TU_SIZE(x)		(((x) & 0x7f) << 2)
 #define  SOR_DP_LINKCTL_ENABLE			(1 << 0)
 
-#define SOR_DP_LINKCTL_1 0x4d
+#define SOR_DP_LINKCTL1 0x4d
 
-#define SOR_LANE_DRIVE_CURRENT_0 0x4e
-#define SOR_LANE_DRIVE_CURRENT_1 0x4f
-#define SOR_LANE4_DRIVE_CURRENT_0 0x50
-#define SOR_LANE4_DRIVE_CURRENT_1 0x51
+#define SOR_LANE_DRIVE_CURRENT0 0x4e
+#define SOR_LANE_DRIVE_CURRENT1 0x4f
+#define SOR_LANE4_DRIVE_CURRENT0 0x50
+#define SOR_LANE4_DRIVE_CURRENT1 0x51
 #define  SOR_LANE_DRIVE_CURRENT_LANE3(x) (((x) & 0xff) << 24)
 #define  SOR_LANE_DRIVE_CURRENT_LANE2(x) (((x) & 0xff) << 16)
 #define  SOR_LANE_DRIVE_CURRENT_LANE1(x) (((x) & 0xff) << 8)
 #define  SOR_LANE_DRIVE_CURRENT_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_LANE_PREEMPHASIS_0 0x52
-#define SOR_LANE_PREEMPHASIS_1 0x53
-#define SOR_LANE4_PREEMPHASIS_0 0x54
-#define SOR_LANE4_PREEMPHASIS_1 0x55
+#define SOR_LANE_PREEMPHASIS0 0x52
+#define SOR_LANE_PREEMPHASIS1 0x53
+#define SOR_LANE4_PREEMPHASIS0 0x54
+#define SOR_LANE4_PREEMPHASIS1 0x55
 #define  SOR_LANE_PREEMPHASIS_LANE3(x) (((x) & 0xff) << 24)
 #define  SOR_LANE_PREEMPHASIS_LANE2(x) (((x) & 0xff) << 16)
 #define  SOR_LANE_PREEMPHASIS_LANE1(x) (((x) & 0xff) << 8)
 #define  SOR_LANE_PREEMPHASIS_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_LANE_POST_CURSOR_0 0x56
-#define SOR_LANE_POST_CURSOR_1 0x57
-#define  SOR_LANE_POST_CURSOR_LANE3(x) (((x) & 0xff) << 24)
-#define  SOR_LANE_POST_CURSOR_LANE2(x) (((x) & 0xff) << 16)
-#define  SOR_LANE_POST_CURSOR_LANE1(x) (((x) & 0xff) << 8)
-#define  SOR_LANE_POST_CURSOR_LANE0(x) (((x) & 0xff) << 0)
+#define SOR_LANE_POSTCURSOR0 0x56
+#define SOR_LANE_POSTCURSOR1 0x57
+#define  SOR_LANE_POSTCURSOR_LANE3(x) (((x) & 0xff) << 24)
+#define  SOR_LANE_POSTCURSOR_LANE2(x) (((x) & 0xff) << 16)
+#define  SOR_LANE_POSTCURSOR_LANE1(x) (((x) & 0xff) << 8)
+#define  SOR_LANE_POSTCURSOR_LANE0(x) (((x) & 0xff) << 0)
 
-#define SOR_DP_CONFIG_0 0x58
+#define SOR_DP_CONFIG0 0x58
 #define SOR_DP_CONFIG_DISPARITY_NEGATIVE	(1 << 31)
 #define SOR_DP_CONFIG_ACTIVE_SYM_ENABLE		(1 << 26)
 #define SOR_DP_CONFIG_ACTIVE_SYM_POLARITY	(1 << 24)
@@ -211,11 +262,11 @@
 #define SOR_DP_CONFIG_WATERMARK_MASK	(0x3f << 0)
 #define SOR_DP_CONFIG_WATERMARK(x)	(((x) & 0x3f) << 0)
 
-#define SOR_DP_CONFIG_1 0x59
-#define SOR_DP_MN_0 0x5a
-#define SOR_DP_MN_1 0x5b
+#define SOR_DP_CONFIG1 0x59
+#define SOR_DP_MN0 0x5a
+#define SOR_DP_MN1 0x5b
 
-#define SOR_DP_PADCTL_0 0x5c
+#define SOR_DP_PADCTL0 0x5c
 #define  SOR_DP_PADCTL_PAD_CAL_PD	(1 << 23)
 #define  SOR_DP_PADCTL_TX_PU_ENABLE	(1 << 22)
 #define  SOR_DP_PADCTL_TX_PU_MASK	(0xff << 8)
@@ -229,17 +280,18 @@
 #define  SOR_DP_PADCTL_PD_TXD_1		(1 << 1)
 #define  SOR_DP_PADCTL_PD_TXD_2		(1 << 0)
 
-#define SOR_DP_PADCTL_1 0x5d
+#define SOR_DP_PADCTL1 0x5d
 
-#define SOR_DP_DEBUG_0 0x5e
-#define SOR_DP_DEBUG_1 0x5f
+#define SOR_DP_DEBUG0 0x5e
+#define SOR_DP_DEBUG1 0x5f
 
-#define SOR_DP_SPARE_0 0x60
-#define  SOR_DP_SPARE_MACRO_SOR_CLK	(1 << 2)
-#define  SOR_DP_SPARE_PANEL_INTERNAL	(1 << 1)
-#define  SOR_DP_SPARE_SEQ_ENABLE	(1 << 0)
+#define SOR_DP_SPARE0 0x60
+#define  SOR_DP_SPARE_DISP_VIDEO_PREAMBLE	(1 << 3)
+#define  SOR_DP_SPARE_MACRO_SOR_CLK		(1 << 2)
+#define  SOR_DP_SPARE_PANEL_INTERNAL		(1 << 1)
+#define  SOR_DP_SPARE_SEQ_ENABLE		(1 << 0)
 
-#define SOR_DP_SPARE_1 0x61
+#define SOR_DP_SPARE1 0x61
 #define SOR_DP_AUDIO_CTRL 0x62
 
 #define SOR_DP_AUDIO_HBLANK_SYMBOLS 0x63
@@ -249,13 +301,13 @@
 #define SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK (0x1fffff << 0)
 
 #define SOR_DP_GENERIC_INFOFRAME_HEADER 0x65
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_0 0x66
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_1 0x67
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_2 0x68
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_3 0x69
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_4 0x6a
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_5 0x6b
-#define SOR_DP_GENERIC_INFOFRAME_SUBPACK_6 0x6c
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK0 0x66
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK1 0x67
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK2 0x68
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK3 0x69
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK4 0x6a
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK5 0x6b
+#define SOR_DP_GENERIC_INFOFRAME_SUBPACK6 0x6c
 
 #define SOR_DP_TPG 0x6d
 #define  SOR_DP_TPG_CHANNEL_CODING	(1 << 6)
@@ -275,8 +327,44 @@
 #define  SOR_DP_TPG_PATTERN_NONE	(0x0 << 0)
 
 #define SOR_DP_TPG_CONFIG 0x6e
-#define SOR_DP_LQ_CSTM_0 0x6f
-#define SOR_DP_LQ_CSTM_1 0x70
-#define SOR_DP_LQ_CSTM_2 0x71
+#define SOR_DP_LQ_CSTM0 0x6f
+#define SOR_DP_LQ_CSTM1 0x70
+#define SOR_DP_LQ_CSTM2 0x71
+
+#define SOR_HDMI_AUDIO_INFOFRAME_CTRL 0x9a
+#define SOR_HDMI_AUDIO_INFOFRAME_STATUS 0x9b
+#define SOR_HDMI_AUDIO_INFOFRAME_HEADER 0x9c
+
+#define SOR_HDMI_AVI_INFOFRAME_CTRL 0x9f
+#define  INFOFRAME_CTRL_CHECKSUM_ENABLE	(1 << 9)
+#define  INFOFRAME_CTRL_SINGLE		(1 << 8)
+#define  INFOFRAME_CTRL_OTHER		(1 << 4)
+#define  INFOFRAME_CTRL_ENABLE		(1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_STATUS 0xa0
+#define  INFOFRAME_STATUS_DONE		(1 << 0)
+
+#define SOR_HDMI_AVI_INFOFRAME_HEADER 0xa1
+#define  INFOFRAME_HEADER_LEN(x) (((x) & 0xff) << 16)
+#define  INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
+#define  INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+
+#define SOR_HDMI_CTRL 0xc0
+#define  SOR_HDMI_CTRL_ENABLE (1 << 30)
+#define  SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define  SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
+#define  SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+
+#define SOR_REFCLK 0xe6
+#define  SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
+#define  SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
+
+#define SOR_INPUT_CONTROL 0xe8
+#define  SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
+#define  SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+
+#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
+#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
+#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
 
 #endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 7a03158..0af8bed 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -375,25 +375,17 @@
 		dev_info(&pdev->dev, "found backlight\n");
 	}
 
-	panel_mod->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
+	panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
+							 GPIOD_OUT_LOW);
 	if (IS_ERR(panel_mod->enable_gpio)) {
 		ret = PTR_ERR(panel_mod->enable_gpio);
-		if (ret != -ENOENT) {
-			dev_err(&pdev->dev, "failed to request enable GPIO\n");
-			goto fail_backlight;
-		}
-
-		/* Optional GPIO is not here, continue silently. */
-		panel_mod->enable_gpio = NULL;
-	} else {
-		ret = gpiod_direction_output(panel_mod->enable_gpio, 0);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to setup GPIO\n");
-			goto fail_backlight;
-		}
-		dev_info(&pdev->dev, "found enable GPIO\n");
+		dev_err(&pdev->dev, "failed to request enable GPIO\n");
+		goto fail_backlight;
 	}
 
+	if (panel_mod->enable_gpio)
+		dev_info(&pdev->dev, "found enable GPIO\n");
+
 	mod = &panel_mod->base;
 	pdev->dev.platform_data = mod;
 
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf080ab..4e19d0f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -340,7 +340,7 @@
 		swap_storage = shmem_file_setup("ttm swap",
 						ttm->num_pages << PAGE_SHIFT,
 						0);
-		if (unlikely(IS_ERR(swap_storage))) {
+		if (IS_ERR(swap_storage)) {
 			pr_err("Failed allocating swap storage\n");
 			return PTR_ERR(swap_storage);
 		}
@@ -354,7 +354,7 @@
 		if (unlikely(from_page == NULL))
 			continue;
 		to_page = shmem_read_mapping_page(swap_space, i);
-		if (unlikely(IS_ERR(to_page))) {
+		if (IS_ERR(to_page)) {
 			ret = PTR_ERR(to_page);
 			goto out_err;
 		}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 5fc16ce..62c7b1d 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -288,7 +288,7 @@
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 
 	udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
 			  rect->height);
@@ -298,7 +298,7 @@
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_copyarea(info, region);
+	drm_fb_helper_sys_copyarea(info, region);
 
 	udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
 			  region->height);
@@ -308,7 +308,7 @@
 {
 	struct udl_fbdev *ufbdev = info->par;
 
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 
 	udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
 			  image->height);
@@ -476,7 +476,6 @@
 		container_of(helper, struct udl_fbdev, helper);
 	struct drm_device *dev = ufbdev->helper.dev;
 	struct fb_info *info;
-	struct device *device = dev->dev;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd;
 	struct udl_gem_object *obj;
@@ -506,21 +505,20 @@
 		goto out_gfree;
 	}
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto out_gfree;
 	}
 	info->par = ufbdev;
 
 	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
 	if (ret)
-		goto out_gfree;
+		goto out_destroy_fbi;
 
 	fb = &ufbdev->ufb.base;
 
 	ufbdev->helper.fb = fb;
-	ufbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "udldrmfb");
 
@@ -533,18 +531,13 @@
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_gfree;
-	}
-
-
 	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
 		      fb->width, fb->height,
 		      ufbdev->ufb.obj->vmapping);
 
 	return ret;
+out_destroy_fbi:
+	drm_fb_helper_release_fbi(helper);
 out_gfree:
 	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
 out:
@@ -558,14 +551,8 @@
 static void udl_fbdev_destroy(struct drm_device *dev,
 			      struct udl_fbdev *ufbdev)
 {
-	struct fb_info *info;
-	if (ufbdev->helper.fbdev) {
-		info = ufbdev->helper.fbdev;
-		unregister_framebuffer(info);
-		if (info->cmap.len)
-			fb_dealloc_cmap(&info->cmap);
-		framebuffer_release(info);
-	}
+	drm_fb_helper_unregister_fbi(&ufbdev->helper);
+	drm_fb_helper_release_fbi(&ufbdev->helper);
 	drm_fb_helper_fini(&ufbdev->helper);
 	drm_framebuffer_unregister_private(&ufbdev->ufb.base);
 	drm_framebuffer_cleanup(&ufbdev->ufb.base);
@@ -631,11 +618,7 @@
 		return;
 
 	ufbdev = udl->fbdev;
-	if (ufbdev->helper.fbdev) {
-		struct fb_info *info;
-		info = ufbdev->helper.fbdev;
-		unlink_framebuffer(info);
-	}
+	drm_fb_helper_unlink_fbi(&ufbdev->helper);
 }
 
 struct drm_framebuffer *
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index df198d9..6a81e08 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -173,7 +173,7 @@
 				   const struct fb_fillrect *rect)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_fillrect(info, rect);
+	drm_fb_helper_sys_fillrect(info, rect);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
 			     rect->width, rect->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -183,7 +183,7 @@
 				   const struct fb_copyarea *area)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_copyarea(info, area);
+	drm_fb_helper_sys_copyarea(info, area);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
 			   area->width, area->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -193,7 +193,7 @@
 				    const struct fb_image *image)
 {
 	struct virtio_gpu_fbdev *vfbdev = info->par;
-	sys_imageblit(info, image);
+	drm_fb_helper_sys_imageblit(info, image);
 	virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
 			     image->width, image->height);
 	schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
@@ -230,7 +230,6 @@
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd2 mode_cmd = {};
 	struct virtio_gpu_object *obj;
-	struct device *device = vgdev->dev;
 	uint32_t resid, format, size;
 	int ret;
 
@@ -317,18 +316,12 @@
 	if (ret)
 		goto err_obj_attach;
 
-	info = framebuffer_alloc(0, device);
-	if (!info) {
-		ret = -ENOMEM;
+	info = drm_fb_helper_alloc_fbi(helper);
+	if (IS_ERR(info)) {
+		ret = PTR_ERR(info);
 		goto err_fb_alloc;
 	}
 
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto err_fb_alloc_cmap;
-	}
-
 	info->par = helper;
 
 	ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
@@ -339,7 +332,6 @@
 	fb = &vfbdev->vgfb.base;
 
 	vfbdev->helper.fb = fb;
-	vfbdev->helper.fbdev = info;
 
 	strcpy(info->fix.id, "virtiodrmfb");
 	info->flags = FBINFO_DEFAULT;
@@ -357,9 +349,7 @@
 	return 0;
 
 err_fb_init:
-	fb_dealloc_cmap(&info->cmap);
-err_fb_alloc_cmap:
-	framebuffer_release(info);
+	drm_fb_helper_release_fbi(helper);
 err_fb_alloc:
 	virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
 err_obj_attach:
@@ -371,15 +361,11 @@
 static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
 				    struct virtio_gpu_fbdev *vgfbdev)
 {
-	struct fb_info *info;
 	struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
 
-	if (vgfbdev->helper.fbdev) {
-		info = vgfbdev->helper.fbdev;
+	drm_fb_helper_unregister_fbi(&vgfbdev->helper);
+	drm_fb_helper_release_fbi(&vgfbdev->helper);
 
-		unregister_framebuffer(info);
-		framebuffer_release(info);
-	}
 	if (vgfb->obj)
 		vgfb->obj = NULL;
 	drm_fb_helper_fini(&vgfbdev->helper);
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ce0ab95..d281575 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,7 @@
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
 	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
 	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
-	    vmwgfx_cmdbuf_res.o \
+	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
+	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
new file mode 100644
index 0000000..8cce7f1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/includeCheck.h
@@ -0,0 +1,3 @@
+/*
+ * Intentionally empty file.
+ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
new file mode 100644
index 0000000..9ce2466
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
@@ -0,0 +1,110 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_caps.h --
+ *
+ *       Definitions for SVGA3D hardware capabilities.  Capabilities
+ *       are used to query for optional rendering features during
+ *       driver initialization. The capability data is stored as very
+ *       basic key/value dictionary within the "FIFO register" memory
+ *       area at the beginning of BAR2.
+ *
+ *       Note that these definitions are only for 3D capabilities.
+ *       The SVGA device also has "device capabilities" and "FIFO
+ *       capabilities", which are non-3D-specific and are stored as
+ *       bitfields rather than key/value pairs.
+ */
+
+#ifndef _SVGA3D_CAPS_H_
+#define _SVGA3D_CAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#define SVGA_FIFO_3D_CAPS_SIZE   (SVGA_FIFO_3D_CAPS_LAST - \
+                                  SVGA_FIFO_3D_CAPS + 1)
+
+
+/*
+ * SVGA3dCapsRecordType
+ *
+ *    Record types that can be found in the caps block.
+ *    Related record types are grouped together numerically so that
+ *    SVGA3dCaps_FindRecord() can be applied on a range of record
+ *    types.
+ */
+
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+
+/*
+ * SVGA3dCapsRecordHeader
+ *
+ *    Header field leading each caps block record. Contains the offset (in
+ *    register words, NOT bytes) to the next caps block record (or the end
+ *    of caps block records which will be a zero word) and the record type
+ *    as defined above.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecordHeader;
+
+
+/*
+ * SVGA3dCapsRecord
+ *
+ *    Caps block record; "data" is a placeholder for the actual data structure
+ *    contained within the record;
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+#include "vmware_pack_end.h"
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
new file mode 100644
index 0000000..2dfd57c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
@@ -0,0 +1,2071 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_cmd.h --
+ *
+ *       SVGA 3d hardware cmd definitions
+ */
+
+#ifndef _SVGA3D_CMD_H_
+#define _SVGA3D_CMD_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+#include "svga3d_types.h"
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+typedef enum {
+   SVGA_3D_CMD_LEGACY_BASE                                = 1000,
+   SVGA_3D_CMD_BASE                                       = 1040,
+
+   SVGA_3D_CMD_SURFACE_DEFINE                             = 1040,
+   SVGA_3D_CMD_SURFACE_DESTROY                            = 1041,
+   SVGA_3D_CMD_SURFACE_COPY                               = 1042,
+   SVGA_3D_CMD_SURFACE_STRETCHBLT                         = 1043,
+   SVGA_3D_CMD_SURFACE_DMA                                = 1044,
+   SVGA_3D_CMD_CONTEXT_DEFINE                             = 1045,
+   SVGA_3D_CMD_CONTEXT_DESTROY                            = 1046,
+   SVGA_3D_CMD_SETTRANSFORM                               = 1047,
+   SVGA_3D_CMD_SETZRANGE                                  = 1048,
+   SVGA_3D_CMD_SETRENDERSTATE                             = 1049,
+   SVGA_3D_CMD_SETRENDERTARGET                            = 1050,
+   SVGA_3D_CMD_SETTEXTURESTATE                            = 1051,
+   SVGA_3D_CMD_SETMATERIAL                                = 1052,
+   SVGA_3D_CMD_SETLIGHTDATA                               = 1053,
+   SVGA_3D_CMD_SETLIGHTENABLED                            = 1054,
+   SVGA_3D_CMD_SETVIEWPORT                                = 1055,
+   SVGA_3D_CMD_SETCLIPPLANE                               = 1056,
+   SVGA_3D_CMD_CLEAR                                      = 1057,
+   SVGA_3D_CMD_PRESENT                                    = 1058,
+   SVGA_3D_CMD_SHADER_DEFINE                              = 1059,
+   SVGA_3D_CMD_SHADER_DESTROY                             = 1060,
+   SVGA_3D_CMD_SET_SHADER                                 = 1061,
+   SVGA_3D_CMD_SET_SHADER_CONST                           = 1062,
+   SVGA_3D_CMD_DRAW_PRIMITIVES                            = 1063,
+   SVGA_3D_CMD_SETSCISSORRECT                             = 1064,
+   SVGA_3D_CMD_BEGIN_QUERY                                = 1065,
+   SVGA_3D_CMD_END_QUERY                                  = 1066,
+   SVGA_3D_CMD_WAIT_FOR_QUERY                             = 1067,
+   SVGA_3D_CMD_PRESENT_READBACK                           = 1068,
+   SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN                     = 1069,
+   SVGA_3D_CMD_SURFACE_DEFINE_V2                          = 1070,
+   SVGA_3D_CMD_GENERATE_MIPMAPS                           = 1071,
+   SVGA_3D_CMD_VIDEO_CREATE_DECODER                       = 1072,
+   SVGA_3D_CMD_VIDEO_DESTROY_DECODER                      = 1073,
+   SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR                     = 1074,
+   SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR                    = 1075,
+   SVGA_3D_CMD_VIDEO_DECODE_START_FRAME                   = 1076,
+   SVGA_3D_CMD_VIDEO_DECODE_RENDER                        = 1077,
+   SVGA_3D_CMD_VIDEO_DECODE_END_FRAME                     = 1078,
+   SVGA_3D_CMD_VIDEO_PROCESS_FRAME                        = 1079,
+   SVGA_3D_CMD_ACTIVATE_SURFACE                           = 1080,
+   SVGA_3D_CMD_DEACTIVATE_SURFACE                         = 1081,
+   SVGA_3D_CMD_SCREEN_DMA                                 = 1082,
+   SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE                   = 1083,
+   SVGA_3D_CMD_OPEN_CONTEXT_SURFACE                       = 1084,
+
+   SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1085,
+   SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1086,
+   SVGA_3D_CMD_LOGICOPS_STRETCHBLT                        = 1087,
+   SVGA_3D_CMD_LOGICOPS_COLORFILL                         = 1088,
+   SVGA_3D_CMD_LOGICOPS_ALPHABLEND                        = 1089,
+   SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND                    = 1090,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE                            = 1091,
+   SVGA_3D_CMD_READBACK_OTABLE                            = 1092,
+
+   SVGA_3D_CMD_DEFINE_GB_MOB                              = 1093,
+   SVGA_3D_CMD_DESTROY_GB_MOB                             = 1094,
+   SVGA_3D_CMD_DEAD3                                      = 1095,
+   SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING                      = 1096,
+
+   SVGA_3D_CMD_DEFINE_GB_SURFACE                          = 1097,
+   SVGA_3D_CMD_DESTROY_GB_SURFACE                         = 1098,
+   SVGA_3D_CMD_BIND_GB_SURFACE                            = 1099,
+   SVGA_3D_CMD_COND_BIND_GB_SURFACE                       = 1100,
+   SVGA_3D_CMD_UPDATE_GB_IMAGE                            = 1101,
+   SVGA_3D_CMD_UPDATE_GB_SURFACE                          = 1102,
+   SVGA_3D_CMD_READBACK_GB_IMAGE                          = 1103,
+   SVGA_3D_CMD_READBACK_GB_SURFACE                        = 1104,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE                        = 1105,
+   SVGA_3D_CMD_INVALIDATE_GB_SURFACE                      = 1106,
+
+   SVGA_3D_CMD_DEFINE_GB_CONTEXT                          = 1107,
+   SVGA_3D_CMD_DESTROY_GB_CONTEXT                         = 1108,
+   SVGA_3D_CMD_BIND_GB_CONTEXT                            = 1109,
+   SVGA_3D_CMD_READBACK_GB_CONTEXT                        = 1110,
+   SVGA_3D_CMD_INVALIDATE_GB_CONTEXT                      = 1111,
+
+   SVGA_3D_CMD_DEFINE_GB_SHADER                           = 1112,
+   SVGA_3D_CMD_DESTROY_GB_SHADER                          = 1113,
+   SVGA_3D_CMD_BIND_GB_SHADER                             = 1114,
+
+   SVGA_3D_CMD_SET_OTABLE_BASE64                          = 1115,
+
+   SVGA_3D_CMD_BEGIN_GB_QUERY                             = 1116,
+   SVGA_3D_CMD_END_GB_QUERY                               = 1117,
+   SVGA_3D_CMD_WAIT_FOR_GB_QUERY                          = 1118,
+
+   SVGA_3D_CMD_NOP                                        = 1119,
+
+   SVGA_3D_CMD_ENABLE_GART                                = 1120,
+   SVGA_3D_CMD_DISABLE_GART                               = 1121,
+   SVGA_3D_CMD_MAP_MOB_INTO_GART                          = 1122,
+   SVGA_3D_CMD_UNMAP_GART_RANGE                           = 1123,
+
+   SVGA_3D_CMD_DEFINE_GB_SCREENTARGET                     = 1124,
+   SVGA_3D_CMD_DESTROY_GB_SCREENTARGET                    = 1125,
+   SVGA_3D_CMD_BIND_GB_SCREENTARGET                       = 1126,
+   SVGA_3D_CMD_UPDATE_GB_SCREENTARGET                     = 1127,
+
+   SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL                  = 1128,
+   SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL                = 1129,
+
+   SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE                 = 1130,
+
+   SVGA_3D_CMD_GB_SCREEN_DMA                              = 1131,
+   SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH                 = 1132,
+   SVGA_3D_CMD_GB_MOB_FENCE                               = 1133,
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V2                       = 1134,
+   SVGA_3D_CMD_DEFINE_GB_MOB64                            = 1135,
+   SVGA_3D_CMD_REDEFINE_GB_MOB64                          = 1136,
+   SVGA_3D_CMD_NOP_ERROR                                  = 1137,
+
+   SVGA_3D_CMD_SET_VERTEX_STREAMS                         = 1138,
+   SVGA_3D_CMD_SET_VERTEX_DECLS                           = 1139,
+   SVGA_3D_CMD_SET_VERTEX_DIVISORS                        = 1140,
+   SVGA_3D_CMD_DRAW                                       = 1141,
+   SVGA_3D_CMD_DRAW_INDEXED                               = 1142,
+
+   /*
+    * DX10 Commands
+    */
+   SVGA_3D_CMD_DX_MIN                                     = 1143,
+   SVGA_3D_CMD_DX_DEFINE_CONTEXT                          = 1143,
+   SVGA_3D_CMD_DX_DESTROY_CONTEXT                         = 1144,
+   SVGA_3D_CMD_DX_BIND_CONTEXT                            = 1145,
+   SVGA_3D_CMD_DX_READBACK_CONTEXT                        = 1146,
+   SVGA_3D_CMD_DX_INVALIDATE_CONTEXT                      = 1147,
+   SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER              = 1148,
+   SVGA_3D_CMD_DX_SET_SHADER_RESOURCES                    = 1149,
+   SVGA_3D_CMD_DX_SET_SHADER                              = 1150,
+   SVGA_3D_CMD_DX_SET_SAMPLERS                            = 1151,
+   SVGA_3D_CMD_DX_DRAW                                    = 1152,
+   SVGA_3D_CMD_DX_DRAW_INDEXED                            = 1153,
+   SVGA_3D_CMD_DX_DRAW_INSTANCED                          = 1154,
+   SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED                  = 1155,
+   SVGA_3D_CMD_DX_DRAW_AUTO                               = 1156,
+   SVGA_3D_CMD_DX_SET_INPUT_LAYOUT                        = 1157,
+   SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS                      = 1158,
+   SVGA_3D_CMD_DX_SET_INDEX_BUFFER                        = 1159,
+   SVGA_3D_CMD_DX_SET_TOPOLOGY                            = 1160,
+   SVGA_3D_CMD_DX_SET_RENDERTARGETS                       = 1161,
+   SVGA_3D_CMD_DX_SET_BLEND_STATE                         = 1162,
+   SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE                  = 1163,
+   SVGA_3D_CMD_DX_SET_RASTERIZER_STATE                    = 1164,
+   SVGA_3D_CMD_DX_DEFINE_QUERY                            = 1165,
+   SVGA_3D_CMD_DX_DESTROY_QUERY                           = 1166,
+   SVGA_3D_CMD_DX_BIND_QUERY                              = 1167,
+   SVGA_3D_CMD_DX_SET_QUERY_OFFSET                        = 1168,
+   SVGA_3D_CMD_DX_BEGIN_QUERY                             = 1169,
+   SVGA_3D_CMD_DX_END_QUERY                               = 1170,
+   SVGA_3D_CMD_DX_READBACK_QUERY                          = 1171,
+   SVGA_3D_CMD_DX_SET_PREDICATION                         = 1172,
+   SVGA_3D_CMD_DX_SET_SOTARGETS                           = 1173,
+   SVGA_3D_CMD_DX_SET_VIEWPORTS                           = 1174,
+   SVGA_3D_CMD_DX_SET_SCISSORRECTS                        = 1175,
+   SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW                 = 1176,
+   SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW                 = 1177,
+   SVGA_3D_CMD_DX_PRED_COPY_REGION                        = 1178,
+   SVGA_3D_CMD_DX_PRED_COPY                               = 1179,
+   SVGA_3D_CMD_DX_STRETCHBLT                              = 1180,
+   SVGA_3D_CMD_DX_GENMIPS                                 = 1181,
+   SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE                      = 1182,
+   SVGA_3D_CMD_DX_READBACK_SUBRESOURCE                    = 1183,
+   SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE                  = 1184,
+   SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW              = 1185,
+   SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW             = 1186,
+   SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW                = 1187,
+   SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW               = 1188,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW                = 1189,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW               = 1190,
+   SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT                    = 1191,
+   SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT                   = 1192,
+   SVGA_3D_CMD_DX_DEFINE_BLEND_STATE                      = 1193,
+   SVGA_3D_CMD_DX_DESTROY_BLEND_STATE                     = 1194,
+   SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE               = 1195,
+   SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE              = 1196,
+   SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE                 = 1197,
+   SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE                = 1198,
+   SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE                    = 1199,
+   SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE                   = 1200,
+   SVGA_3D_CMD_DX_DEFINE_SHADER                           = 1201,
+   SVGA_3D_CMD_DX_DESTROY_SHADER                          = 1202,
+   SVGA_3D_CMD_DX_BIND_SHADER                             = 1203,
+   SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT                     = 1204,
+   SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT                    = 1205,
+   SVGA_3D_CMD_DX_SET_STREAMOUTPUT                        = 1206,
+   SVGA_3D_CMD_DX_SET_COTABLE                             = 1207,
+   SVGA_3D_CMD_DX_READBACK_COTABLE                        = 1208,
+   SVGA_3D_CMD_DX_BUFFER_COPY                             = 1209,
+   SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER                    = 1210,
+   SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK               = 1211,
+   SVGA_3D_CMD_DX_MOVE_QUERY                              = 1212,
+   SVGA_3D_CMD_DX_BIND_ALL_QUERY                          = 1213,
+   SVGA_3D_CMD_DX_READBACK_ALL_QUERY                      = 1214,
+   SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER               = 1215,
+   SVGA_3D_CMD_DX_MOB_FENCE_64                            = 1216,
+   SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT                  = 1217,
+   SVGA_3D_CMD_DX_HINT                                    = 1218,
+   SVGA_3D_CMD_DX_BUFFER_UPDATE                           = 1219,
+   SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET           = 1220,
+   SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET           = 1221,
+   SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET           = 1222,
+
+   /*
+    * Reserve some IDs to be used for the DX11 shader types.
+    */
+   SVGA_3D_CMD_DX_RESERVED1                               = 1223,
+   SVGA_3D_CMD_DX_RESERVED2                               = 1224,
+   SVGA_3D_CMD_DX_RESERVED3                               = 1225,
+
+   SVGA_3D_CMD_DX_MAX                                     = 1226,
+   SVGA_3D_CMD_MAX                                        = 1226,
+   SVGA_3D_CMD_FUTURE_MAX                                 = 3000
+} SVGAFifo3dCmdId;
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               id;
+   uint32               size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdHeader;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               numMipLevels;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceFace;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   uint32                      multisampleCount;
+   SVGA3dTextureFilter         autogenFilter;
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dClearFlag      clearFlag;
+   uint32               color;
+   float                depth;
+   uint32               stencil;
+   /* Followed by variable number of SVGA3dRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dLightType      type;
+   SVGA3dBool           inWorldSpace;
+   float                diffuse[4];
+   float                specular[4];
+   float                ambient[4];
+   float                position[4];
+   float                direction[4];
+   float                range;
+   float                falloff;
+   float                attenuation0;
+   float                attenuation1;
+   float                attenuation2;
+   float                theta;
+   float                phi;
+}
+#include "vmware_pack_end.h"
+SVGA3dLightData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   /* Followed by variable number of SVGA3dCopyRect structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRenderStateName   state;
+   union {
+      uint32               uintValue;
+      float                floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dRenderState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                 cid;
+   SVGA3dRenderTargetType type;
+   SVGA3dSurfaceImageId   target;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   SVGA3dBox             boxSrc;
+   SVGA3dBox             boxDest;
+   SVGA3dStretchBltMode  mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * If the discard flag is present in a surface DMA operation, the host may
+    * discard the contents of the current mipmap level and face of the target
+    * surface before applying the surface DMA contents.
+    */
+   uint32 discard : 1;
+
+   /*
+    * If the unsynchronized flag is present, the host may perform this upload
+    * without syncing to pending reads on this surface.
+    */
+   uint32 unsynchronized : 1;
+
+   /*
+    * Guests *MUST* set the reserved bits to 0 before submitting the command
+    * suffix as future flags may occupy these bits.
+    */
+   uint32 reserved : 30;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceDMAFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestImage guest;
+   SVGA3dSurfaceImageId host;
+   SVGA3dTransferType transfer;
+   /*
+    * Followed by variable number of SVGA3dCopyBox structures. For consistency
+    * in all clipping logic and coordinate translation, we define the
+    * "source" in each copyBox as the guest image and the
+    * "destination" as the host image, regardless of transfer
+    * direction.
+    *
+    * For efficiency, the SVGA3D device is free to copy more data than
+    * specified. For example, it may round copy boxes outwards such
+    * that they lie on particular alignment boundaries.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ *    This is a command suffix that will appear after a SurfaceDMA command in
+ *    the FIFO.  It contains some extra information that hosts may use to
+ *    optimize performance or protect the guest.  This suffix exists to preserve
+ *    backwards compatibility while also allowing for new functionality to be
+ *    implemented.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 suffixSize;
+
+   /*
+    * The maximum offset is used to determine the maximum offset from the
+    * guestPtr base address that will be accessed or written to during this
+    * surfaceDMA.  If the suffix is supported, the host will respect this
+    * boundary while performing surface DMAs.
+    *
+    * Defaults to MAX_UINT32
+    */
+   uint32 maximumOffset;
+
+   /*
+    * A set of flags that describes optimizations that the host may perform
+    * while performing this surface DMA operation.  The guest should never rely
+    * on behaviour that is different when these flags are set for correctness.
+    *
+    * Defaults to 0
+    */
+   SVGA3dSurfaceDMAFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ *   This command is the SVGA3D device's generic drawing entry point.
+ *   It can draw multiple ranges of primitives, optionally using an
+ *   index buffer, using an arbitrary collection of vertex buffers.
+ *
+ *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ *   during this draw call. The declarations specify which surface
+ *   the vertex data lives in, what that vertex data is used for,
+ *   and how to interpret it.
+ *
+ *   Each SVGA3dPrimitiveRange defines a collection of primitives
+ *   to render using the same vertex arrays. An index buffer is
+ *   optional.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * A range hint is an optional specification for the range of indices
+    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+    * that the entire array will be used.
+    *
+    * These are only hints. The SVGA3D device may use them for
+    * performance optimization if possible, but it's also allowed to
+    * ignore these values.
+    */
+   uint32               first;
+   uint32               last;
+}
+#include "vmware_pack_end.h"
+SVGA3dArrayRangeHint;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Define the origin and shape of a vertex or index array. Both
+    * 'offset' and 'stride' are in bytes. The provided surface will be
+    * reinterpreted as a flat array of bytes in the same format used
+    * by surface DMA operations. To avoid unnecessary conversions, the
+    * surface should be created with the SVGA3D_BUFFER format.
+    *
+    * Index 0 in the array starts 'offset' bytes into the surface.
+    * Index 1 begins at byte 'offset + stride', etc. Array indices may
+    * not be negative.
+    */
+   uint32               surfaceId;
+   uint32               offset;
+   uint32               stride;
+}
+#include "vmware_pack_end.h"
+SVGA3dArray;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   SVGA3dDeclType       type;
+   SVGA3dDeclMethod     method;
+   SVGA3dDeclUsage      usage;
+   uint32               usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexArrayIdentity;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexDecl {
+   SVGA3dVertexArrayIdentity  identity;
+   SVGA3dArray                array;
+   SVGA3dArrayRangeHint       rangeHint;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexDecl;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dPrimitiveRange {
+   /*
+    * Define a group of primitives to render, from sequential indices.
+    *
+    * The value of 'primitiveType' and 'primitiveCount' imply the
+    * total number of vertices that will be rendered.
+    */
+   SVGA3dPrimitiveType  primType;
+   uint32               primitiveCount;
+
+   /*
+    * Optional index buffer. If indexArray.surfaceId is
+    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+    * without an index buffer is identical to rendering with an index
+    * buffer containing the sequence [0, 1, 2, 3, ...].
+    *
+    * If an index buffer is in use, indexWidth specifies the width in
+    * bytes of each index value. It must be less than or equal to
+    * indexArray.stride.
+    *
+    * (Currently, the SVGA3D device requires index buffers to be tightly
+    * packed. In other words, indexWidth == indexArray.stride)
+    */
+   SVGA3dArray          indexArray;
+   uint32               indexWidth;
+
+   /*
+    * Optional index bias. This number is added to all indices from
+    * indexArray before they are used as vertex array indices. This
+    * can be used in multiple ways:
+    *
+    *  - When not using an indexArray, this bias can be used to
+    *    specify where in the vertex arrays to begin rendering.
+    *
+    *  - A positive number here is equivalent to increasing the
+    *    offset in each vertex array.
+    *
+    *  - A negative number can be used to render using a small
+    *    vertex array and an index buffer that contains large
+    *    values. This may be used by some applications that
+    *    crop a vertex buffer without modifying their index
+    *    buffer.
+    *
+    * Note that rendering with a negative bias value may be slower and
+    * use more memory than rendering with a positive or zero bias.
+    */
+   int32                indexBias;
+}
+#include "vmware_pack_end.h"
+SVGA3dPrimitiveRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               numVertexDecls;
+   uint32               numRanges;
+
+   /*
+    * There are two variable size arrays after the
+    * SVGA3dCmdDrawPrimitives structure. In order,
+    * they are:
+    *
+    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+    *    SVGA3D_MAX_VERTEX_ARRAYS;
+    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+    *    the frequency divisor for the corresponding vertex decl).
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 primitiveCount;        /* How many primitives to render */
+   uint32 startVertexLocation;   /* Which vertex do we start rendering at. */
+
+   uint8 primitiveType;          /* SVGA3dPrimitiveType */
+   uint8 padding[3];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDraw;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint8 primitiveType;       /* SVGA3dPrimitiveType */
+
+   uint32 indexBufferSid;     /* Valid index buffer sid. */
+   uint32 indexBufferOffset;  /* Byte offset into the vertex buffer, almost */
+			      /* always 0 for DX9 guests, non-zero for OpenGL */
+                              /* guests.  We can't represent non-multiple of */
+                              /* stride offsets in D3D9Renderer... */
+   uint8 indexBufferStride;   /* Allowable values = 1, 2, or 4 */
+
+   int32 baseVertexLocation;  /* Bias applied to the index when selecting a */
+                              /* vertex from the streams, may be negative */
+
+   uint32 primitiveCount;     /* How many primitives to render */
+   uint32 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDrawIndexed;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;          /* SVGA3dDeclType */
+   uint8 method;        /* SVGA3dDeclMethod */
+   uint8 usage;         /* SVGA3dDeclUsage */
+   uint8 usageIndex;
+   uint8 padding;
+
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numElements;
+
+   /*
+    * Followed by numElements SVGA3dVertexElement structures.
+    *
+    * If numElements < SVGA3D_MAX_VERTEX_ARRAYS, the remaining elements
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDecls;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexStream;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+
+   uint32 numStreams;
+   /*
+    * Followed by numStream SVGA3dVertexStream structures.
+    *
+    * If numStreams < SVGA3D_MAX_VERTEX_ARRAYS, the remaining streams
+    * are cleared and will not be used by following draws.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexStreams;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   uint32 numDivisors;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetVertexDivisors;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   stage;
+   SVGA3dTextureStateName   name;
+   union {
+      uint32                value;
+      float                 floatValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dTextureState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dTextureState structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                   cid;
+   SVGA3dTransformType      type;
+   float                    matrix[16];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                min;
+   float                max;
+}
+#include "vmware_pack_end.h"
+SVGA3dZRange;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dZRange         zRange;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float                diffuse[4];
+   float                ambient[4];
+   float                specular[4];
+   float                emissive[4];
+   float                shininess;
+}
+#include "vmware_pack_end.h"
+SVGA3dMaterial;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dFace           face;
+   SVGA3dMaterial       material;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   SVGA3dLightData      data;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   uint32               enabled;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               index;
+   float                plane[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+   /* Followed by variable number of DWORDs for shader bycode */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  reg;     /* register number */
+   SVGA3dShaderType        type;
+   SVGA3dShaderConstType   ctype;
+   uint32                  values[4];
+
+   /*
+    * Followed by a variable number of additional values.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dShaderType     type;
+   uint32               shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetShader;       /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;   /* Points to an SVGA3dQueryResult structure */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
+
+
+/*
+ * SVGA3D_CMD_WAIT_FOR_QUERY --
+ *
+ *    Will read the SVGA3dQueryResult structure pointed to by guestResult,
+ *    and if the state member is set to anything else than
+ *    SVGA3D_QUERYSTATE_PENDING, this command will always be a no-op.
+ *
+ *    Otherwise, in addition to the query explicitly waited for,
+ *    All queries with the same type and issued with the same cid, for which
+ *    an SVGA_3D_CMD_END_QUERY command has previously been sent, will
+ *    be finished after execution of this command.
+ *
+ *    A query will be identified by the gmrId and offset of the guestResult
+ *    member. If the device can't find an SVGA_3D_CMD_END_QUERY that has
+ *    been sent previously with an indentical gmrId and offset, it will
+ *    effectively end all queries with an identical type issued with the
+ *    same cid, and the SVGA3dQueryResult structure pointed to by
+ *    guestResult will not be written to. This property can be used to
+ *    implement a query barrier for a given cid and query type.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;        /* Same parameters passed to END_QUERY */
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               totalSize;    /* Set by guest before query is ended. */
+   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
+   union {                            /* Set by host on exit from PENDING state */
+      uint32            result32;
+      uint32            queryCookie; /* May be used to identify which QueryGetData this
+                                        result corresponds to. */
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dQueryResult;
+
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ *    This is a blit from an SVGA3D surface to a Screen Object.
+ *    This blit must be directed at a specific screen.
+ *
+ *    The blit copies from a rectangular region of an SVGA3D surface
+ *    image to a rectangular region of a screen.
+ *
+ *    This command takes an optional variable-length list of clipping
+ *    rectangles after the body of the command. If no rectangles are
+ *    specified, there is no clipping region. The entire destRect is
+ *    drawn to. If one or more rectangles are included, they describe
+ *    a clipping region. The clip rectangle coordinates are measured
+ *    relative to the top-left corner of destRect.
+ *
+ *    The srcImage must be from mip=0 face=0.
+ *
+ *    This supports scaling if the src and dest are of different sizes.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId srcImage;
+   SVGASignedRect       srcRect;
+   uint32               destScreenId; /* Screen Object ID */
+   SVGASignedRect       destRect;
+   /* Clipping: zero or more SVGASignedRects follow */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               sid;
+   SVGA3dTextureFilter  filter;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdActivateSurface;               /* SVGA_3D_CMD_ACTIVATE_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDeactivateSurface;             /* SVGA_3D_CMD_DEACTIVATE_SURFACE */
+
+/*
+ * Screen DMA command
+ *
+ * Available with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  The SVGA_CAP_3D device
+ * cap bit is not required.
+ *
+ * - refBuffer and destBuffer are 32bit BGRX; refBuffer and destBuffer could
+ *   be different, but it is required that guest makes sure refBuffer has
+ *   exactly the same contents that were written to when last time screen DMA
+ *   command is received by host.
+ *
+ * - changemap is generated by lib/blit, and it has the changes from last
+ *   received screen DMA or more.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdScreenDMA {
+   uint32 screenId;
+   SVGAGuestImage refBuffer;
+   SVGAGuestImage destBuffer;
+   SVGAGuestImage changeMap;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenDMA;        /* SVGA_3D_CMD_SCREEN_DMA */
+
+/*
+ * Set Unity Surface Cookie
+ *
+ * Associates the supplied cookie with the surface id for use with
+ * Unity.  This cookie is a hint from guest to host, there is no way
+ * for the guest to readback the cookie and the host is free to drop
+ * the cookie association at will.  The default value for the cookie
+ * on all surfaces is 0.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdSetUnitySurfaceCookie {
+   uint32 sid;
+   uint64 cookie;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetUnitySurfaceCookie;   /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
+
+/*
+ * Open a context-specific surface in a non-context-specific manner.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdOpenContextSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdOpenContextSurface;   /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
+
+
+/*
+ * Logic ops
+ */
+
+#define SVGA3D_LOTRANSBLT_HONORALPHA     (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORX      (0x01)
+#define SVGA3D_LOSTRETCHBLT_MIRRORY      (0x02)
+#define SVGA3D_LOALPHABLEND_SRCHASALPHA  (0x01)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsBitBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsBitBlt;   /* SVGA_3D_CMD_LOGICOPS_BITBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsTransBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsTransBlt;   /* SVGA_3D_CMD_LOGICOPS_TRANSBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsStretchBlt {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint16 mode;
+   uint16 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsStretchBlt;   /* SVGA_3D_CMD_LOGICOPS_STRETCHBLT */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsColorFill {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId dst;
+   uint32 color;
+   SVGA3dLogicOp logicOp;
+   /* Followed by variable number of SVGA3dRect structures. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsColorFill;   /* SVGA_3D_CMD_LOGICOPS_COLORFILL */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsAlphaBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId src;
+   SVGA3dSurfaceImageId dst;
+   uint32 alphaVal;
+   uint32 flags;
+   SVGA3dBox srcBox;
+   SVGA3dBox dstBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsAlphaBlend;   /* SVGA_3D_CMD_LOGICOPS_ALPHABLEND */
+
+#define SVGA3D_CLEARTYPE_INVALID_GAMMA_INDEX 0xFFFFFFFF
+
+#define SVGA3D_CLEARTYPE_GAMMA_WIDTH  512
+#define SVGA3D_CLEARTYPE_GAMMA_HEIGHT 16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdLogicOpsClearTypeBlend {
+   /*
+    * All LogicOps surfaces are one-level
+    * surfaces so mipmap & face should always
+    * be zero.
+    */
+   SVGA3dSurfaceImageId tmp;
+   SVGA3dSurfaceImageId dst;
+   SVGA3dSurfaceImageId gammaSurf;
+   SVGA3dSurfaceImageId alphaSurf;
+   uint32 gamma;
+   uint32 color;
+   uint32 color2;
+   int32 alphaOffsetX;
+   int32 alphaOffsetY;
+   /* Followed by variable number of SVGA3dBox structures */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdLogicOpsClearTypeBlend;   /* SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND */
+
+
+/*
+ * Guest-backed objects definitions.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobFormat ptDepth;
+   uint32 sizeInBytes;
+   PPN64 base;
+}
+#include "vmware_pack_end.h"
+SVGAOTableMobEntry;
+#define SVGA3D_OTABLE_MOB_ENTRY_SIZE (sizeof(SVGAOTableMobEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceFormat format;
+   SVGA3dSurfaceFlags surfaceFlags;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   SVGAMobId mobid;
+   uint32 arraySize;
+   uint32 mobPitch;
+   uint32 pad[5];
+}
+#include "vmware_pack_end.h"
+SVGAOTableSurfaceEntry;
+#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE (sizeof(SVGAOTableSurfaceEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableContextEntry;
+#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE (sizeof(SVGAOTableContextEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableShaderEntry;
+#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
+
+#define SVGA_STFLAG_PRIMARY (1 << 0)
+typedef uint32 SVGAScreenTargetFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId image;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+   uint32 dpi;
+   uint32 pad[7];
+}
+#include "vmware_pack_end.h"
+SVGAOTableScreenTargetEntry;
+#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE \
+	(sizeof(SVGAOTableScreenTargetEntry))
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   float value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32 value[4];
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstInt;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderConstBool;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint16 streamOffset;
+   uint8 stream;
+   uint8 type;
+   uint8 methodUsage;
+   uint8 usageIndex;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexElement;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 sid;
+   uint16 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGBVertexStream;
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dRect viewport;
+   SVGA3dRect scissorRect;
+   SVGA3dZRange zRange;
+
+   SVGA3dSurfaceImageId renderTargets[SVGA3D_RT_MAX];
+   SVGAGBVertexElement decl1[4];
+
+   uint32 renderStates[SVGA3D_RS_MAX];
+   SVGAGBVertexElement decl2[18];
+   uint32 pad0[2];
+
+   struct {
+      SVGA3dFace face;
+      SVGA3dMaterial material;
+   } material;
+
+   float clipPlanes[SVGA3D_NUM_CLIPPLANES][4];
+   float matrices[SVGA3D_TRANSFORM_MAX][16];
+
+   SVGA3dBool lightEnabled[SVGA3D_NUM_LIGHTS];
+   SVGA3dLightData lightData[SVGA3D_NUM_LIGHTS];
+
+   /*
+    * Shaders currently bound
+    */
+   uint32 shaders[SVGA3D_NUM_SHADERTYPE_PREDX];
+   SVGAGBVertexElement decl3[10];
+   uint32 pad1[3];
+
+   uint32 occQueryActive;
+   uint32 occQueryValue;
+
+   /*
+    * Int/Bool Shader constants
+    */
+   SVGA3dShaderConstInt pShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   SVGA3dShaderConstInt vShaderIValues[SVGA3D_CONSTINTREG_MAX];
+   uint16 pShaderBValues;
+   uint16 vShaderBValues;
+
+
+   SVGAGBVertexStream streams[SVGA3D_MAX_VERTEX_ARRAYS];
+   SVGA3dVertexDivisor divisors[SVGA3D_MAX_VERTEX_ARRAYS];
+   uint32 numVertexDecls;
+   uint32 numVertexStreams;
+   uint32 numVertexDivisors;
+   uint32 pad2[30];
+
+   /*
+    * Texture Stages
+    *
+    * SVGA3D_TS_INVALID through SVGA3D_TS_CONSTANT are in the
+    * textureStages array.
+    * SVGA3D_TS_COLOR_KEY is in tsColorKey.
+    */
+   uint32 tsColorKey[SVGA3D_NUM_TEXTURE_UNITS];
+   uint32 textureStages[SVGA3D_NUM_TEXTURE_UNITS][SVGA3D_TS_CONSTANT + 1];
+   uint32 tsColorKeyEnable[SVGA3D_NUM_TEXTURE_UNITS];
+
+   /*
+    * Float Shader constants.
+    */
+   SVGA3dShaderConstFloat pShaderFValues[SVGA3D_CONSTREG_MAX];
+   SVGA3dShaderConstFloat vShaderFValues[SVGA3D_CONSTREG_MAX];
+}
+#include "vmware_pack_end.h"
+SVGAGBContextData;
+#define SVGA3D_CONTEXT_DATA_SIZE (sizeof(SVGAGBContextData))
+
+/*
+ * SVGA3dCmdSetOTableBase --
+ *
+ * This command allows the guest to specify the base PPN of the
+ * specified object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
+
+/*
+ * Define a memory object (Mob) in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
+
+
+/*
+ * Destroys an object in the OTable.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBMob {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
+
+
+/*
+ * Define a memory object (Mob) in the OTable with a PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
+
+/*
+ * Redefine an object in the OTable with PPN64 base.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdRedefineGBMob64 {
+   SVGAMobId mobid;
+   SVGAMobFormat ptDepth;
+   PPN64 base;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
+
+/*
+ * Notification that the page tables have been modified.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBMobMapping {
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
+
+/*
+ * Define a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+
+/*
+ * Destroy a guest-backed surface.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+
+/*
+ * Bind a guest-backed surface to a mob.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurface {
+   uint32 sid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBSurfaceWithPitch {
+   uint32 sid;
+   SVGAMobId mobid;
+   uint32 baseLevelPitch;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBSurfaceWithPitch;   /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
+
+/*
+ * Conditionally bind a mob to a guest-backed surface if testMobid
+ * matches the currently bound mob.  Optionally issue a
+ * readback/update on the surface while it is still bound to the old
+ * mobid if the mobid is changed by this command.
+ */
+
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
+#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_UPDATE   (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct{
+   uint32 sid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+   uint32 flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
+
+/*
+ * Update an image in a guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBImage {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+
+/*
+ * Update an entire guest-backed surface.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdUpdateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+
+/*
+ * Readback an image in a guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE */
+
+/*
+ * Readback an entire guest-backed surface.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+
+/*
+ * Readback a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImage {
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+
+/*
+ * Invalidate an entire guest-backed surface.
+ * (Notify the device that the contents if all images can be lost.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBSurface {
+   uint32 sid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+
+/*
+ * Invalidate a sub rect of an image in a guest-backed surface.  After
+ * issuing this command the driver is required to issue an update call
+ * of the same region before issuing any other commands that reference
+ * this surface or rendering is not guaranteed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBImagePartial {
+   SVGA3dSurfaceImageId image;
+   SVGA3dBox box;
+   uint32 invertBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
+
+
+/*
+ * Define a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+
+/*
+ * Destroy a guest-backed context.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+
+/*
+ * Bind a guest-backed context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+
+/*
+ * Readback a guest-backed context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdReadbackGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdInvalidateGBContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+
+/*
+ * Define a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBShader {
+   uint32 shid;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+
+/*
+ * Bind a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdBindGBShader {
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
+
+/*
+ * Destroy a guest-backed shader.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDestroyGBShader {
+   uint32 shid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32                  cid;
+   uint32                  regStart;
+   SVGA3dShaderType        shaderType;
+   SVGA3dShaderConstType   constType;
+
+   /*
+    * Followed by a variable number of shader constants.
+    *
+    * Note that FLOAT and INT constants are 4-dwords in length, while
+    * BOOL constants are 1-dword in length.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdSetGBShaderConstInline;   /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
+
+
+/*
+ * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
+ *
+ *    The semantics of this command are identical to the
+ *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
+ *    to a Mob instead of a GMR.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 mustBeZero;
+   uint32 initialized;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAMobId mobid;
+   uint32 gartOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gartOffset;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
+
+
+/*
+ * Screen Targets
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   uint32 width;
+   uint32 height;
+   int32 xRoot;
+   int32 yRoot;
+   SVGAScreenTargetFlags flags;
+
+   /*
+    * The physical DPI that the guest expects this screen displayed at.
+    *
+    * Guests which are not DPI-aware should set this to zero.
+    */
+   uint32 dpi;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId image;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dRect rect;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdGBScreenDMA {
+   uint32 screenId;
+   uint32 dead;
+   SVGAMobId destMobID;
+   uint32 destPitch;
+   SVGAMobId changeMapMobID;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBScreenDMA;        /* SVGA_3D_CMD_GB_SCREEN_DMA */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGBMobFence;  /* SVGA_3D_CMD_GB_MOB_FENCE*/
+
+#endif /* _SVGA3D_CMD_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
new file mode 100644
index 0000000..c18b663
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
@@ -0,0 +1,457 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_devcaps.h --
+ *
+ *       SVGA 3d caps definitions
+ */
+
+#ifndef _SVGA3D_DEVCAPS_H_
+#define _SVGA3D_DEVCAPS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * 3D Hardware Version
+ *
+ *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ *   register.   Is set by the host and read by the guest.  This lets
+ *   us make new guest drivers which are backwards-compatible with old
+ *   SVGA hardware revisions.  It does not let us support old guest
+ *   drivers.  Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
+
+typedef enum {
+   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
+   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
+   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
+   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
+   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
+   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
+   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * DevCap indexes.
+ */
+
+typedef enum {
+   SVGA3D_DEVCAP_INVALID                           = ((uint32)-1),
+   SVGA3D_DEVCAP_3D                                = 0,
+   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
+
+   /*
+    * SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+    * fixed-function texture units available. Each of these units
+    * work in both FFP and Shader modes, and they support texture
+    * transforms and texture coordinates. The host may have additional
+    * texture image units that are only usable with shaders.
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,
+   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
+   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
+   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
+   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
+   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
+   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
+   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
+   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12,
+   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13,
+   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14,
+   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
+   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
+   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
+   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
+   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
+   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
+   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
+   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
+   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
+   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
+   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
+   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
+   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
+   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
+   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
+   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
+   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
+   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
+   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
+   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
+   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
+   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
+
+   /*
+    * There is a hole in our devcap definitions for
+    * historical reasons.
+    *
+    * Define a constant just for completeness.
+    */
+   SVGA3D_DEVCAP_MISSING62                         = 62,
+
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
+
+   /*
+    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+    * render targets.  This does not include the depth or stencil targets.
+    */
+   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
+
+   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
+   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
+   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
+   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
+   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
+   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
+   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
+   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
+   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
+   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
+   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
+   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+
+   /*
+    * This is the maximum number of SVGA context IDs that the guest
+    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+    */
+   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+
+   /*
+    * This is the maximum number of SVGA surface IDs that the guest
+    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+    */
+   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+
+   SVGA3D_DEVCAP_SURFACEFMT_ATI1                   = 82,
+   SVGA3D_DEVCAP_SURFACEFMT_ATI2                   = 83,
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD1                             = 84,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
+    * ored together, one for every type of video decoding supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
+
+   /*
+    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
+    * ored together, one for every type of video processing supported.
+    */
+   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
+
+   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
+   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
+   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
+   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
+
+   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
+
+   /*
+    * Does the host support the SVGA logic ops commands?
+    */
+   SVGA3D_DEVCAP_LOGICOPS                          = 92,
+
+   /*
+    * Are TS_CONSTANT, TS_COLOR_KEY, and TS_COLOR_KEY_ENABLE supported?
+    */
+   SVGA3D_DEVCAP_TS_COLOR_KEY                      = 93, /* boolean */
+
+   /*
+    * Deprecated.
+    */
+   SVGA3D_DEVCAP_DEAD2                             = 94,
+
+   /*
+    * Does the device support the DX commands?
+    */
+   SVGA3D_DEVCAP_DX                                = 95,
+
+   /*
+    * What is the maximum size of a texture array?
+    *
+    * (Even if this cap is zero, cubemaps are still allowed.)
+    */
+   SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE            = 96,
+
+   /*
+    * What is the maximum number of vertex buffers that can
+    * be used in the DXContext inputAssembly?
+    */
+   SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS              = 97,
+
+   /*
+    * What is the maximum number of constant buffers
+    * that can be expected to work correctly with a
+    * DX context?
+    */
+   SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS           = 98,
+
+   /*
+    * Does the device support provoking vertex control?
+    * If zero, the first vertex will always be the provoking vertex.
+    */
+   SVGA3D_DEVCAP_DX_PROVOKING_VERTEX               = 99,
+
+   SVGA3D_DEVCAP_DXFMT_X8R8G8B8                    = 100,
+   SVGA3D_DEVCAP_DXFMT_A8R8G8B8                    = 101,
+   SVGA3D_DEVCAP_DXFMT_R5G6B5                      = 102,
+   SVGA3D_DEVCAP_DXFMT_X1R5G5B5                    = 103,
+   SVGA3D_DEVCAP_DXFMT_A1R5G5B5                    = 104,
+   SVGA3D_DEVCAP_DXFMT_A4R4G4B4                    = 105,
+   SVGA3D_DEVCAP_DXFMT_Z_D32                       = 106,
+   SVGA3D_DEVCAP_DXFMT_Z_D16                       = 107,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8                     = 108,
+   SVGA3D_DEVCAP_DXFMT_Z_D15S1                     = 109,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8                  = 110,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE4_ALPHA4           = 111,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE16                 = 112,
+   SVGA3D_DEVCAP_DXFMT_LUMINANCE8_ALPHA8           = 113,
+   SVGA3D_DEVCAP_DXFMT_DXT1                        = 114,
+   SVGA3D_DEVCAP_DXFMT_DXT2                        = 115,
+   SVGA3D_DEVCAP_DXFMT_DXT3                        = 116,
+   SVGA3D_DEVCAP_DXFMT_DXT4                        = 117,
+   SVGA3D_DEVCAP_DXFMT_DXT5                        = 118,
+   SVGA3D_DEVCAP_DXFMT_BUMPU8V8                    = 119,
+   SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                  = 120,
+   SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                = 121,
+   SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8                  = 122,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                  = 123,
+   SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                  = 124,
+   SVGA3D_DEVCAP_DXFMT_A2R10G10B10                 = 125,
+   SVGA3D_DEVCAP_DXFMT_V8U8                        = 126,
+   SVGA3D_DEVCAP_DXFMT_Q8W8V8U8                    = 127,
+   SVGA3D_DEVCAP_DXFMT_CxV8U8                      = 128,
+   SVGA3D_DEVCAP_DXFMT_X8L8V8U8                    = 129,
+   SVGA3D_DEVCAP_DXFMT_A2W10V10U10                 = 130,
+   SVGA3D_DEVCAP_DXFMT_ALPHA8                      = 131,
+   SVGA3D_DEVCAP_DXFMT_R_S10E5                     = 132,
+   SVGA3D_DEVCAP_DXFMT_R_S23E8                     = 133,
+   SVGA3D_DEVCAP_DXFMT_RG_S10E5                    = 134,
+   SVGA3D_DEVCAP_DXFMT_RG_S23E8                    = 135,
+   SVGA3D_DEVCAP_DXFMT_BUFFER                      = 136,
+   SVGA3D_DEVCAP_DXFMT_Z_D24X8                     = 137,
+   SVGA3D_DEVCAP_DXFMT_V16U16                      = 138,
+   SVGA3D_DEVCAP_DXFMT_G16R16                      = 139,
+   SVGA3D_DEVCAP_DXFMT_A16B16G16R16                = 140,
+   SVGA3D_DEVCAP_DXFMT_UYVY                        = 141,
+   SVGA3D_DEVCAP_DXFMT_YUY2                        = 142,
+   SVGA3D_DEVCAP_DXFMT_NV12                        = 143,
+   SVGA3D_DEVCAP_DXFMT_AYUV                        = 144,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_TYPELESS       = 145,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_UINT           = 146,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_SINT           = 147,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_TYPELESS          = 148,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_FLOAT             = 149,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_UINT              = 150,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32_SINT              = 151,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_TYPELESS       = 152,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UINT           = 153,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SNORM          = 154,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_SINT           = 155,
+   SVGA3D_DEVCAP_DXFMT_R32G32_TYPELESS             = 156,
+   SVGA3D_DEVCAP_DXFMT_R32G32_UINT                 = 157,
+   SVGA3D_DEVCAP_DXFMT_R32G32_SINT                 = 158,
+   SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS           = 159,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT        = 160,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS    = 161,
+   SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT     = 162,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS        = 163,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT            = 164,
+   SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT             = 165,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_TYPELESS           = 166,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM              = 167,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UNORM_SRGB         = 168,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_UINT               = 169,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SINT               = 170,
+   SVGA3D_DEVCAP_DXFMT_R16G16_TYPELESS             = 171,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UINT                 = 172,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SINT                 = 173,
+   SVGA3D_DEVCAP_DXFMT_R32_TYPELESS                = 174,
+   SVGA3D_DEVCAP_DXFMT_D32_FLOAT                   = 175,
+   SVGA3D_DEVCAP_DXFMT_R32_UINT                    = 176,
+   SVGA3D_DEVCAP_DXFMT_R32_SINT                    = 177,
+   SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS              = 178,
+   SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT           = 179,
+   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS       = 180,
+   SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT        = 181,
+   SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS               = 182,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                  = 183,
+   SVGA3D_DEVCAP_DXFMT_R8G8_UINT                   = 184,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SINT                   = 185,
+   SVGA3D_DEVCAP_DXFMT_R16_TYPELESS                = 186,
+   SVGA3D_DEVCAP_DXFMT_R16_UNORM                   = 187,
+   SVGA3D_DEVCAP_DXFMT_R16_UINT                    = 188,
+   SVGA3D_DEVCAP_DXFMT_R16_SNORM                   = 189,
+   SVGA3D_DEVCAP_DXFMT_R16_SINT                    = 190,
+   SVGA3D_DEVCAP_DXFMT_R8_TYPELESS                 = 191,
+   SVGA3D_DEVCAP_DXFMT_R8_UNORM                    = 192,
+   SVGA3D_DEVCAP_DXFMT_R8_UINT                     = 193,
+   SVGA3D_DEVCAP_DXFMT_R8_SNORM                    = 194,
+   SVGA3D_DEVCAP_DXFMT_R8_SINT                     = 195,
+   SVGA3D_DEVCAP_DXFMT_P8                          = 196,
+   SVGA3D_DEVCAP_DXFMT_R9G9B9E5_SHAREDEXP          = 197,
+   SVGA3D_DEVCAP_DXFMT_R8G8_B8G8_UNORM             = 198,
+   SVGA3D_DEVCAP_DXFMT_G8R8_G8B8_UNORM             = 199,
+   SVGA3D_DEVCAP_DXFMT_BC1_TYPELESS                = 200,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM_SRGB              = 201,
+   SVGA3D_DEVCAP_DXFMT_BC2_TYPELESS                = 202,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM_SRGB              = 203,
+   SVGA3D_DEVCAP_DXFMT_BC3_TYPELESS                = 204,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM_SRGB              = 205,
+   SVGA3D_DEVCAP_DXFMT_BC4_TYPELESS                = 206,
+   SVGA3D_DEVCAP_DXFMT_ATI1                        = 207,
+   SVGA3D_DEVCAP_DXFMT_BC4_SNORM                   = 208,
+   SVGA3D_DEVCAP_DXFMT_BC5_TYPELESS                = 209,
+   SVGA3D_DEVCAP_DXFMT_ATI2                        = 210,
+   SVGA3D_DEVCAP_DXFMT_BC5_SNORM                   = 211,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10_XR_BIAS_A2_UNORM  = 212,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_TYPELESS           = 213,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM_SRGB         = 214,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_TYPELESS           = 215,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM_SRGB         = 216,
+   SVGA3D_DEVCAP_DXFMT_Z_DF16                      = 217,
+   SVGA3D_DEVCAP_DXFMT_Z_DF24                      = 218,
+   SVGA3D_DEVCAP_DXFMT_Z_D24S8_INT                 = 219,
+   SVGA3D_DEVCAP_DXFMT_YV12                        = 220,
+   SVGA3D_DEVCAP_DXFMT_R32G32B32A32_FLOAT          = 221,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_FLOAT          = 222,
+   SVGA3D_DEVCAP_DXFMT_R16G16B16A16_UNORM          = 223,
+   SVGA3D_DEVCAP_DXFMT_R32G32_FLOAT                = 224,
+   SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UNORM           = 225,
+   SVGA3D_DEVCAP_DXFMT_R8G8B8A8_SNORM              = 226,
+   SVGA3D_DEVCAP_DXFMT_R16G16_FLOAT                = 227,
+   SVGA3D_DEVCAP_DXFMT_R16G16_UNORM                = 228,
+   SVGA3D_DEVCAP_DXFMT_R16G16_SNORM                = 229,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT                   = 230,
+   SVGA3D_DEVCAP_DXFMT_R8G8_SNORM                  = 231,
+   SVGA3D_DEVCAP_DXFMT_R16_FLOAT                   = 232,
+   SVGA3D_DEVCAP_DXFMT_D16_UNORM                   = 233,
+   SVGA3D_DEVCAP_DXFMT_A8_UNORM                    = 234,
+   SVGA3D_DEVCAP_DXFMT_BC1_UNORM                   = 235,
+   SVGA3D_DEVCAP_DXFMT_BC2_UNORM                   = 236,
+   SVGA3D_DEVCAP_DXFMT_BC3_UNORM                   = 237,
+   SVGA3D_DEVCAP_DXFMT_B5G6R5_UNORM                = 238,
+   SVGA3D_DEVCAP_DXFMT_B5G5R5A1_UNORM              = 239,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8A8_UNORM              = 240,
+   SVGA3D_DEVCAP_DXFMT_B8G8R8X8_UNORM              = 241,
+   SVGA3D_DEVCAP_DXFMT_BC4_UNORM                   = 242,
+   SVGA3D_DEVCAP_DXFMT_BC5_UNORM                   = 243,
+
+   SVGA3D_DEVCAP_MAX                       /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+/*
+ * Bit definitions for DXFMT devcaps
+ *
+ *
+ * SUPPORTED: Can the format be defined?
+ * SHADER_SAMPLE: Can the format be sampled from a shader?
+ * COLOR_RENDERTARGET: Can the format be a color render target?
+ * DEPTH_RENDERTARGET: Can the format be a depth render target?
+ * BLENDABLE: Is the format blendable?
+ * MIPS: Does the format support mip levels?
+ * ARRAY: Does the format support texture arrays?
+ * VOLUME: Does the format support having volume?
+ * MULTISAMPLE_2: Does the format support 2x multisample?
+ * MULTISAMPLE_4: Does the format support 4x multisample?
+ * MULTISAMPLE_8: Does the format support 8x multisample?
+ */
+#define SVGA3D_DXFMT_SUPPORTED                (1 <<  0)
+#define SVGA3D_DXFMT_SHADER_SAMPLE            (1 <<  1)
+#define SVGA3D_DXFMT_COLOR_RENDERTARGET       (1 <<  2)
+#define SVGA3D_DXFMT_DEPTH_RENDERTARGET       (1 <<  3)
+#define SVGA3D_DXFMT_BLENDABLE                (1 <<  4)
+#define SVGA3D_DXFMT_MIPS                     (1 <<  5)
+#define SVGA3D_DXFMT_ARRAY                    (1 <<  6)
+#define SVGA3D_DXFMT_VOLUME                   (1 <<  7)
+#define SVGA3D_DXFMT_DX_VERTEX_BUFFER         (1 <<  8)
+#define SVGADX_DXFMT_MULTISAMPLE_2            (1 <<  9)
+#define SVGADX_DXFMT_MULTISAMPLE_4            (1 << 10)
+#define SVGADX_DXFMT_MULTISAMPLE_8            (1 << 11)
+#define SVGADX_DXFMT_MAX                      (1 << 12)
+
+/*
+ * Convenience mask for any multisample capability.
+ *
+ * The multisample bits imply both load and render capability.
+ */
+#define SVGA3D_DXFMT_MULTISAMPLE ( \
+           SVGADX_DXFMT_MULTISAMPLE_2 | \
+           SVGADX_DXFMT_MULTISAMPLE_4 | \
+           SVGADX_DXFMT_MULTISAMPLE_8 )
+
+typedef union {
+   Bool   b;
+   uint32 u;
+   int32  i;
+   float  f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_DEVCAPS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
new file mode 100644
index 0000000..8c5ae60
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
@@ -0,0 +1,1487 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_dx.h --
+ *
+ *       SVGA 3d hardware definitions for DX10 support.
+ */
+
+#ifndef _SVGA3D_DX_H_
+#define _SVGA3D_DX_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga3d_limits.h"
+
+#define SVGA3D_INPUT_MIN               0
+#define SVGA3D_INPUT_PER_VERTEX_DATA   0
+#define SVGA3D_INPUT_PER_INSTANCE_DATA 1
+#define SVGA3D_INPUT_MAX               2
+typedef uint32 SVGA3dInputClassification;
+
+#define SVGA3D_RESOURCE_TYPE_MIN      1
+#define SVGA3D_RESOURCE_BUFFER        1
+#define SVGA3D_RESOURCE_TEXTURE1D     2
+#define SVGA3D_RESOURCE_TEXTURE2D     3
+#define SVGA3D_RESOURCE_TEXTURE3D     4
+#define SVGA3D_RESOURCE_TEXTURECUBE   5
+#define SVGA3D_RESOURCE_TYPE_DX10_MAX 6
+#define SVGA3D_RESOURCE_BUFFEREX      6
+#define SVGA3D_RESOURCE_TYPE_MAX      7
+typedef uint32 SVGA3dResourceType;
+
+#define SVGA3D_DEPTH_WRITE_MASK_ZERO   0
+#define SVGA3D_DEPTH_WRITE_MASK_ALL    1
+typedef uint8 SVGA3dDepthWriteMask;
+
+#define SVGA3D_FILTER_MIP_LINEAR  (1 << 0)
+#define SVGA3D_FILTER_MAG_LINEAR  (1 << 2)
+#define SVGA3D_FILTER_MIN_LINEAR  (1 << 4)
+#define SVGA3D_FILTER_ANISOTROPIC (1 << 6)
+#define SVGA3D_FILTER_COMPARE     (1 << 7)
+typedef uint32 SVGA3dFilter;
+
+#define SVGA3D_CULL_INVALID 0
+#define SVGA3D_CULL_MIN     1
+#define SVGA3D_CULL_NONE    1
+#define SVGA3D_CULL_FRONT   2
+#define SVGA3D_CULL_BACK    3
+#define SVGA3D_CULL_MAX     4
+typedef uint8 SVGA3dCullMode;
+
+#define SVGA3D_COMPARISON_INVALID         0
+#define SVGA3D_COMPARISON_MIN             1
+#define SVGA3D_COMPARISON_NEVER           1
+#define SVGA3D_COMPARISON_LESS            2
+#define SVGA3D_COMPARISON_EQUAL           3
+#define SVGA3D_COMPARISON_LESS_EQUAL      4
+#define SVGA3D_COMPARISON_GREATER         5
+#define SVGA3D_COMPARISON_NOT_EQUAL       6
+#define SVGA3D_COMPARISON_GREATER_EQUAL   7
+#define SVGA3D_COMPARISON_ALWAYS          8
+#define SVGA3D_COMPARISON_MAX             9
+typedef uint8 SVGA3dComparisonFunc;
+
+#define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_SOTARGETS 4
+#define SVGA3D_DX_MAX_SRVIEWS 128
+#define SVGA3D_DX_MAX_CONSTBUFFERS 16
+#define SVGA3D_DX_MAX_SAMPLERS 16
+
+/* Id limits */
+static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
+static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+
+typedef uint32 SVGA3dSurfaceId;
+typedef uint32 SVGA3dShaderResourceViewId;
+typedef uint32 SVGA3dRenderTargetViewId;
+typedef uint32 SVGA3dDepthStencilViewId;
+
+typedef uint32 SVGA3dShaderId;
+typedef uint32 SVGA3dElementLayoutId;
+typedef uint32 SVGA3dSamplerId;
+typedef uint32 SVGA3dBlendStateId;
+typedef uint32 SVGA3dDepthStencilStateId;
+typedef uint32 SVGA3dRasterizerStateId;
+typedef uint32 SVGA3dQueryId;
+typedef uint32 SVGA3dStreamOutputId;
+
+typedef union {
+   struct {
+      float r;
+      float g;
+      float b;
+      float a;
+   };
+
+   float value[4];
+} SVGA3dRGBAFloat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGAOTableDXContextEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineContext;   /* SVGA_3D_CMD_DX_DEFINE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyContext;   /* SVGA_3D_CMD_DX_DESTROY_CONTEXT */
+
+/*
+ * Bind a DX context.
+ *
+ * validContents should be set to 0 for new contexts,
+ * and 1 if this is an old context which is getting paged
+ * back on to the device.
+ *
+ * For new contexts, it is recommended that the driver
+ * issue commands to initialize all interesting state
+ * prior to rendering.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindContext {
+   uint32 cid;
+   SVGAMobId mobid;
+   uint32 validContents;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindContext;   /* SVGA_3D_CMD_DX_BIND_CONTEXT */
+
+/*
+ * Readback a DX context.
+ * (Request that the device flush the contents back into guest memory.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackContext;   /* SVGA_3D_CMD_DX_READBACK_CONTEXT */
+
+/*
+ * Invalidate a guest-backed context.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateContext {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateContext;   /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dReplyFormatData {
+   uint32 formatSupport;
+   uint32 msaa2xQualityLevels:5;
+   uint32 msaa4xQualityLevels:5;
+   uint32 msaa8xQualityLevels:5;
+   uint32 msaa16xQualityLevels:5;
+   uint32 msaa32xQualityLevels:5;
+   uint32 pad:7;
+}
+#include "vmware_pack_end.h"
+SVGA3dReplyFormatData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSingleConstantBuffer {
+   uint32 slot;
+   SVGA3dShaderType type;
+   SVGA3dSurfaceId sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSingleConstantBuffer;
+/* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShaderResources {
+   uint32 startView;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dShaderResourceViewId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShaderResources; /* SVGA_3D_CMD_DX_SET_SHADER_RESOURCES */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetShader; /* SVGA_3D_CMD_DX_SET_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSamplers {
+   uint32 startSampler;
+   SVGA3dShaderType type;
+
+   /*
+    * Followed by a variable number of SVGA3dSamplerId's.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSamplers; /* SVGA_3D_CMD_DX_SET_SAMPLERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDraw {
+   uint32 vertexCount;
+   uint32 startVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDraw; /* SVGA_3D_CMD_DX_DRAW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexed {
+   uint32 indexCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexed; /* SVGA_3D_CMD_DX_DRAW_INDEXED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawInstanced {
+   uint32 vertexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawInstanced; /* SVGA_3D_CMD_DX_DRAW_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawIndexedInstanced {
+   uint32 indexCountPerInstance;
+   uint32 instanceCount;
+   uint32 startIndexLocation;
+   int32  baseVertexLocation;
+   uint32 startInstanceLocation;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawIndexedInstanced; /* SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDrawAuto {
+   uint32 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDrawAuto; /* SVGA_3D_CMD_DX_DRAW_AUTO */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetInputLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetInputLayout; /* SVGA_3D_CMD_DX_SET_INPUT_LAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dVertexBuffer {
+   SVGA3dSurfaceId sid;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dVertexBuffer;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetVertexBuffers {
+   uint32 startBuffer;
+   /* Followed by a variable number of SVGA3dVertexBuffer's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetVertexBuffers; /* SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetIndexBuffer {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetIndexBuffer; /* SVGA_3D_CMD_DX_SET_INDEX_BUFFER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetTopology {
+   SVGA3dPrimitiveType topology;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetTopology; /* SVGA_3D_CMD_DX_SET_TOPOLOGY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRenderTargets {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   /* Followed by a variable number of SVGA3dRenderTargetViewId's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRenderTargets; /* SVGA_3D_CMD_DX_SET_RENDERTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetBlendState {
+   SVGA3dBlendStateId blendId;
+   float blendFactor[4];
+   uint32 sampleMask;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetBlendState; /* SVGA_3D_CMD_DX_SET_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+   uint32 stencilRef;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetDepthStencilState; /* SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetRasterizerState; /* SVGA_3D_CMD_DX_SET_RASTERIZER_STATE */
+
+#define SVGA3D_DXQUERY_FLAG_PREDICATEHINT (1 << 0)
+typedef uint32 SVGA3dDXQueryFlags;
+
+/*
+ * The SVGADXQueryDeviceState and SVGADXQueryDeviceBits are used by the device
+ * to track query state transitions, but are not intended to be used by the
+ * driver.
+ */
+#define SVGADX_QDSTATE_INVALID   ((uint8)-1) /* Query has no state */
+#define SVGADX_QDSTATE_MIN       0
+#define SVGADX_QDSTATE_IDLE      0   /* Query hasn't started yet */
+#define SVGADX_QDSTATE_ACTIVE    1   /* Query is actively gathering data */
+#define SVGADX_QDSTATE_PENDING   2   /* Query is waiting for results */
+#define SVGADX_QDSTATE_FINISHED  3   /* Query has completed */
+#define SVGADX_QDSTATE_MAX       4
+typedef uint8 SVGADXQueryDeviceState;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dQueryTypeUint8 type;
+   uint16 pad0;
+   SVGADXQueryDeviceState state;
+   SVGA3dDXQueryFlags flags;
+   SVGAMobId mobid;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXQueryEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineQuery {
+   SVGA3dQueryId queryId;
+   SVGA3dQueryType type;
+   SVGA3dDXQueryFlags flags;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineQuery; /* SVGA_3D_CMD_DX_DEFINE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyQuery; /* SVGA_3D_CMD_DX_DESTROY_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindQuery; /* SVGA_3D_CMD_DX_BIND_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetQueryOffset {
+   SVGA3dQueryId queryId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetQueryOffset; /* SVGA_3D_CMD_DX_SET_QUERY_OFFSET */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBeginQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBeginQuery; /* SVGA_3D_CMD_DX_QUERY_BEGIN */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXEndQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXEndQuery; /* SVGA_3D_CMD_DX_QUERY_END */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackQuery {
+   SVGA3dQueryId queryId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackQuery; /* SVGA_3D_CMD_DX_READBACK_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXMoveQuery {
+   SVGA3dQueryId queryId;
+   SVGAMobId mobid;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMoveQuery; /* SVGA_3D_CMD_DX_MOVE_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllQuery {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllQuery; /* SVGA_3D_CMD_DX_BIND_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackAllQuery {
+   uint32 cid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackAllQuery; /* SVGA_3D_CMD_DX_READBACK_ALL_QUERY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetPredication {
+   SVGA3dQueryId queryId;
+   uint32 predicateValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetPredication; /* SVGA_3D_CMD_DX_SET_PREDICATION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct MKS3dDXSOState {
+   uint32 offset;       /* Starting offset */
+   uint32 intOffset;    /* Internal offset */
+   uint32 vertexCount;  /* vertices written */
+   uint32 sizeInBytes;  /* max bytes to write */
+}
+#include "vmware_pack_end.h"
+SVGA3dDXSOState;
+
+/* Set the offset field to this value to append SO values to the buffer */
+#define SVGA3D_DX_SO_OFFSET_APPEND ((uint32) ~0u)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSoTarget {
+   SVGA3dSurfaceId sid;
+   uint32 offset;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dSoTarget;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetSOTargets {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dSOTarget's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetSOTargets; /* SVGA_3D_CMD_DX_SET_SOTARGETS */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dViewport
+{
+   float x;
+   float y;
+   float width;
+   float height;
+   float minDepth;
+   float maxDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dViewport;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetViewports {
+   uint32 pad0;
+   /* Followed by a variable number of SVGA3dViewport's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetViewports; /* SVGA_3D_CMD_DX_SET_VIEWPORTS */
+
+#define SVGA3D_DX_MAX_VIEWPORTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetScissorRects {
+   uint32 pad0;
+   /* Followed by a variable number of SVGASignedRect's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetScissorRects; /* SVGA_3D_CMD_DX_SET_SCISSORRECTS */
+
+#define SVGA3D_DX_MAX_SCISSORRECTS  16
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+   SVGA3dRGBAFloat rgba;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearRenderTargetView; /* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXClearDepthStencilView {
+   uint16 flags;
+   uint16 stencil;
+   SVGA3dDepthStencilViewId depthStencilViewId;
+   float depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXClearDepthStencilView; /* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopyRegion {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopyRegion;
+/* SVGA_3D_CMD_DX_PRED_COPY_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredCopy {
+   SVGA3dSurfaceId dstSid;
+   SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferCopy {
+   SVGA3dSurfaceId dest;
+   SVGA3dSurfaceId src;
+   uint32 destX;
+   uint32 srcX;
+   uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferCopy;
+/* SVGA_3D_CMD_DX_BUFFER_COPY */
+
+typedef uint32 SVGA3dDXStretchBltMode;
+#define SVGADX_STRETCHBLT_LINEAR         (1 << 0)
+#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXStretchBlt {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceId dstSid;
+   uint32 destSubResource;
+   SVGA3dBox boxSrc;
+   SVGA3dBox boxDest;
+   SVGA3dDXStretchBltMode mode;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGenMips {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
+
+/*
+ * Defines a resource/DX surface.  Resources share the surfaceId namespace.
+ *
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+   uint32 sid;
+   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Update a sub-resource in a guest-backed resource.
+ * (Inform the device that the guest-contents have been updated.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXUpdateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+   SVGA3dBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXUpdateSubResource;   /* SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE */
+
+/*
+ * Readback a subresource in a guest-backed resource.
+ * (Request the device to flush the dirty contents into the guest.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackSubResource;   /* SVGA_3D_CMD_DX_READBACK_SUBRESOURCE */
+
+/*
+ * Invalidate an image in a guest-backed surface.
+ * (Notify the device that the contents can be lost.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXInvalidateSubResource {
+   SVGA3dSurfaceId sid;
+   uint32 subResource;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXInvalidateSubResource;   /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTransferFromBuffer;   /* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER */
+
+
+/*
+ * Raw byte wise transfer from a buffer surface into another surface
+ * of the requested box.  Supported if SVGA3D_DEVCAP_DXCONTEXT is set.
+ * The context is implied from the command buffer header.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredTransferFromBuffer {
+   SVGA3dSurfaceId srcSid;
+   uint32 srcOffset;
+   uint32 srcPitch;
+   uint32 srcSlicePitch;
+   SVGA3dSurfaceId destSid;
+   uint32 destSubResource;
+   SVGA3dBox destBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredTransferFromBuffer;
+/* SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSurfaceCopyAndReadback {
+   SVGA3dSurfaceId srcSid;
+   SVGA3dSurfaceId destSid;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSurfaceCopyAndReadback;
+/* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
+
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 pad0;
+         uint32 pad1;
+      } buffer;
+      struct {
+         uint32 mostDetailedMip;
+         uint32 firstArraySlice;
+         uint32 mipLevels;
+         uint32 arraySize;
+      } tex;
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+         uint32 flags;
+         uint32 pad0;
+      } bufferex;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dShaderResourceViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dShaderResourceViewDesc desc;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSRViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dShaderResourceViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShaderResourceView;
+/* SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShaderResourceView {
+   SVGA3dShaderResourceViewId shaderResourceViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShaderResourceView;
+/* SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRenderTargetViewDesc {
+   union {
+      struct {
+         uint32 firstElement;
+         uint32 numElements;
+      } buffer;
+      struct {
+         uint32 mipSlice;
+         uint32 firstArraySlice;
+         uint32 arraySize;
+      } tex;                    /* 1d, 2d, cube */
+      struct {
+         uint32 mipSlice;
+         uint32 firstW;
+         uint32 wSize;
+      } tex3D;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dRenderTargetViewDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   SVGA3dRenderTargetViewDesc desc;
+   uint32 pad[2];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRTViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+
+   SVGA3dRenderTargetViewDesc desc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRenderTargetView;
+/* SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRenderTargetView {
+   SVGA3dRenderTargetViewId renderTargetViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRenderTargetView;
+/* SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW */
+
+/*
+ */
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_DEPTH   0x01
+#define SVGA3D_DXDSVIEW_CREATE_READ_ONLY_STENCIL 0x02
+#define SVGA3D_DXDSVIEW_CREATE_FLAG_MASK         0x03
+typedef uint8 SVGA3DCreateDSViewFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+   uint32 pad2;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDSViewEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+
+   SVGA3dSurfaceId sid;
+   SVGA3dSurfaceFormat format;
+   SVGA3dResourceType resourceDimension;
+   uint32 mipSlice;
+   uint32 firstArraySlice;
+   uint32 arraySize;
+   SVGA3DCreateDSViewFlags flags;
+   uint8 pad0;
+   uint16 pad1;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilView;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilView {
+   SVGA3dDepthStencilViewId depthStencilViewId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilView;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dInputElementDesc {
+   uint32 inputSlot;
+   uint32 alignedByteOffset;
+   SVGA3dSurfaceFormat format;
+   SVGA3dInputClassification inputSlotClass;
+   uint32 instanceDataStepRate;
+   uint32 inputRegister;
+}
+#include "vmware_pack_end.h"
+SVGA3dInputElementDesc;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   /*
+    * XXX: How many of these can there be?
+    */
+   uint32 elid;
+   uint32 numDescs;
+   SVGA3dInputElementDesc desc[32];
+   uint32 pad[62];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXElementLayoutEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+   /* Followed by a variable number of SVGA3dInputElementDesc's. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineElementLayout;
+/* SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyElementLayout {
+   SVGA3dElementLayoutId elementLayoutId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyElementLayout;
+/* SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT */
+
+
+#define SVGA3D_DX_MAX_RENDER_TARGETS 8
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dDXBlendStatePerRT {
+      uint8 blendEnable;
+      uint8 srcBlend;
+      uint8 destBlend;
+      uint8 blendOp;
+      uint8 srcBlendAlpha;
+      uint8 destBlendAlpha;
+      uint8 blendOpAlpha;
+      uint8 renderTargetWriteMask;
+      uint8 logicOpEnable;
+      uint8 logicOp;
+      uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dDXBlendStatePerRT;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+   uint32 pad1[7];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXBlendStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineBlendState {
+   SVGA3dBlendStateId blendId;
+   uint8 alphaToCoverageEnable;
+   uint8 independentBlendEnable;
+   uint16 pad0;
+   SVGA3dDXBlendStatePerRT perRT[SVGA3D_MAX_RENDER_TARGETS];
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineBlendState; /* SVGA_3D_CMD_DX_DEFINE_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyBlendState {
+   SVGA3dBlendStateId blendId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyBlendState; /* SVGA_3D_CMD_DX_DESTROY_BLEND_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXDepthStencilEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+
+   uint8 depthEnable;
+   SVGA3dDepthWriteMask depthWriteMask;
+   SVGA3dComparisonFunc depthFunc;
+   uint8 stencilEnable;
+   uint8 frontEnable;
+   uint8 backEnable;
+   uint8 stencilReadMask;
+   uint8 stencilWriteMask;
+
+   uint8 frontStencilFailOp;
+   uint8 frontStencilDepthFailOp;
+   uint8 frontStencilPassOp;
+   SVGA3dComparisonFunc frontStencilFunc;
+
+   uint8 backStencilFailOp;
+   uint8 backStencilDepthFailOp;
+   uint8 backStencilPassOp;
+   SVGA3dComparisonFunc backStencilFunc;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineDepthStencilState;
+/* SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyDepthStencilState {
+   SVGA3dDepthStencilStateId depthStencilId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyDepthStencilState;
+/* SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   uint8 multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+   uint32 forcedSampleCount;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXRasterizerStateEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+
+   uint8 fillMode;
+   SVGA3dCullMode cullMode;
+   uint8 frontCounterClockwise;
+   uint8 provokingVertexLast;
+   int32 depthBias;
+   float depthBiasClamp;
+   float slopeScaledDepthBias;
+   uint8 depthClipEnable;
+   uint8 scissorEnable;
+   uint8 multisampleEnable;
+   uint8 antialiasedLineEnable;
+   float lineWidth;
+   uint8 lineStippleEnable;
+   uint8 lineStippleFactor;
+   uint16 lineStipplePattern;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineRasterizerState;
+/* SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyRasterizerState {
+   SVGA3dRasterizerStateId rasterizerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyRasterizerState;
+/* SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+   uint32 pad2[6];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXSamplerEntry;
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineSamplerState {
+   SVGA3dSamplerId samplerId;
+   SVGA3dFilter filter;
+   uint8 addressU;
+   uint8 addressV;
+   uint8 addressW;
+   uint8 pad0;
+   float mipLODBias;
+   uint8 maxAnisotropy;
+   SVGA3dComparisonFunc comparisonFunc;
+   uint16 pad1;
+   SVGA3dRGBAFloat borderColor;
+   float minLOD;
+   float maxLOD;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineSamplerState; /* SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroySamplerState {
+   SVGA3dSamplerId samplerId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
+
+/*
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSignatureEntry {
+   uint8 systemValue;
+   uint8 reg;                 /* register is a reserved word */
+   uint16 mask;
+   uint8 registerComponentType;
+   uint8 minPrecision;
+   uint16 pad0;
+}
+#include "vmware_pack_end.h"
+SVGA3dSignatureEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineShader {
+   SVGA3dShaderId shaderId;
+   SVGA3dShaderType type;
+   uint32 sizeInBytes; /* Number of bytes of shader text. */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineShader; /* SVGA_3D_CMD_DX_DEFINE_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACOTableDXShaderEntry {
+   SVGA3dShaderType type;
+   uint32 sizeInBytes;
+   uint32 offsetInBytes;
+   SVGAMobId mobid;
+   uint32 numInputSignatureEntries;
+   uint32 numOutputSignatureEntries;
+
+   uint32 numPatchConstantSignatureEntries;
+
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXShaderEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyShader {
+   SVGA3dShaderId shaderId;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyShader; /* SVGA_3D_CMD_DX_DESTROY_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindShader {
+   uint32 cid;
+   uint32 shid;
+   SVGAMobId mobid;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindShader;   /* SVGA_3D_CMD_DX_BIND_SHADER */
+
+/*
+ * The maximum number of streamout decl's in each streamout entry.
+ */
+#define SVGA3D_MAX_STREAMOUT_DECLS 64
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dStreamOutputDeclarationEntry {
+   uint32 outputSlot;
+   uint32 registerIndex;
+   uint8  registerMask;
+   uint8  pad0;
+   uint16 pad1;
+   uint32 stream;
+}
+#include "vmware_pack_end.h"
+SVGA3dStreamOutputDeclarationEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOTableStreamOutputEntry {
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+   uint32 pad[250];
+}
+#include "vmware_pack_end.h"
+SVGACOTableDXStreamOutputEntry;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDefineStreamOutput {
+   SVGA3dStreamOutputId soid;
+   uint32 numOutputStreamEntries;
+   SVGA3dStreamOutputDeclarationEntry decl[SVGA3D_MAX_STREAMOUT_DECLS];
+   uint32 streamOutputStrideInBytes[SVGA3D_DX_MAX_SOTARGETS];
+   uint32 rasterizedStream;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDefineStreamOutput; /* SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXDestroyStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXDestroyStreamOutput; /* SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetStreamOutput {
+   SVGA3dStreamOutputId soid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetStreamOutput; /* SVGA_3D_CMD_DX_SET_STREAMOUTPUT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 value;
+   uint32 mobId;
+   uint32 mobOffset;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXMobFence64;  /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
+
+/*
+ * SVGA3dCmdSetCOTable --
+ *
+ * This command allows the guest to bind a mob to a context-object table.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetCOTable {
+   uint32 cid;
+   uint32 mobid;
+   SVGACOTableType type;
+   uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXReadbackCOTable {
+   uint32 cid;
+   SVGACOTableType type;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXReadbackCOTable; /* SVGA_3D_CMD_DX_READBACK_COTABLE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCOTableData {
+   uint32 mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCOTableData;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dBufferBinding {
+   uint32 bufferId;
+   uint32 stride;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGA3dBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dConstantBufferBinding {
+   uint32 sid;
+   uint32 offsetInBytes;
+   uint32 sizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dConstantBufferBinding;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXInputAssemblyMobFormat {
+   uint32 layoutId;
+   SVGA3dBufferBinding vertexBuffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+   uint32 indexBufferSid;
+   uint32 pad;
+   uint32 indexBufferOffset;
+   uint32 indexBufferFormat;
+   uint32 topology;
+}
+#include "vmware_pack_end.h"
+SVGADXInputAssemblyMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGADXContextMobFormat {
+   SVGADXInputAssemblyMobFormat inputAssembly;
+
+   struct {
+      uint32 blendStateId;
+      uint32 blendFactor[4];
+      uint32 sampleMask;
+      uint32 depthStencilStateId;
+      uint32 stencilRef;
+      uint32 rasterizerStateId;
+      uint32 depthStencilViewId;
+      uint32 renderTargetViewIds[SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS];
+      uint32 unorderedAccessViewIds[SVGA3D_MAX_UAVIEWS];
+   } renderState;
+
+   struct {
+      uint32 targets[SVGA3D_DX_MAX_SOTARGETS];
+      uint32 soid;
+   } streamOut;
+   uint32 pad0[11];
+
+   uint8 numViewports;
+   uint8 numScissorRects;
+   uint16 pad1[1];
+
+   uint32 pad2[3];
+
+   SVGA3dViewport viewports[SVGA3D_DX_MAX_VIEWPORTS];
+   uint32 pad3[32];
+
+   SVGASignedRect scissorRects[SVGA3D_DX_MAX_SCISSORRECTS];
+   uint32 pad4[64];
+
+   struct {
+      uint32 queryID;
+      uint32 value;
+   } predication;
+   uint32 pad5[2];
+
+   struct {
+      uint32 shaderId;
+      SVGA3dConstantBufferBinding constantBuffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+      uint32 shaderResources[SVGA3D_DX_MAX_SRVIEWS];
+      uint32 samplers[SVGA3D_DX_MAX_SAMPLERS];
+   } shaderState[SVGA3D_NUM_SHADERTYPE];
+   uint32 pad6[26];
+
+   SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
+
+   SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
+   uint32 pad7[381];
+}
+#include "vmware_pack_end.h"
+SVGADXContextMobFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXTempSetContext {
+   uint32 dxcid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXTempSetContext; /* SVGA_3D_CMD_DX_TEMP_SET_CONTEXT */
+
+#endif /* _SVGA3D_DX_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
new file mode 100644
index 0000000..a1c3687
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
@@ -0,0 +1,99 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_limits.h --
+ *
+ *       SVGA 3d hardware limits
+ */
+
+#ifndef _SVGA3D_LIMITS_H_
+#define _SVGA3D_LIMITS_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#define SVGA3D_NUM_CLIPPLANES                   6
+#define SVGA3D_MAX_RENDER_TARGETS               8
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  (SVGA3D_MAX_RENDER_TARGETS)
+#define SVGA3D_MAX_UAVIEWS                      8
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+
+/*
+ * Maximum ID a shader can be assigned on a given context.
+ */
+#define SVGA3D_MAX_SHADERIDS                    5000
+/*
+ * Maximum number of shaders of a given type that can be defined
+ * (including all contexts).
+ */
+#define SVGA3D_MAX_SIMULTANEOUS_SHADERS         20000
+
+#define SVGA3D_NUM_TEXTURE_UNITS                32
+#define SVGA3D_NUM_LIGHTS                       8
+
+/*
+ * Maximum size in dwords of shader text the SVGA device will allow.
+ * Currently 8 MB.
+ */
+#define SVGA3D_MAX_SHADER_MEMORY  (8 * 1024 * 1024 / sizeof(uint32))
+
+#define SVGA3D_MAX_CLIP_PLANES    6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Number of faces in a cubemap.
+ */
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+/*
+ * Maximum number of array indexes in a GB surface (with DX enabled).
+ */
+#define SVGA3D_MAX_SURFACE_ARRAYSIZE 512
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS   32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+#endif /* _SVGA3D_LIMITS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
new file mode 100644
index 0000000..b44ce64
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
@@ -0,0 +1,50 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ *       SVGA 3d hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+#include "svga_reg.h"
+
+#include "svga3d_types.h"
+#include "svga3d_limits.h"
+#include "svga3d_cmd.h"
+#include "svga3d_dx.h"
+#include "svga3d_devcaps.h"
+
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
new file mode 100644
index 0000000..58704f0
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -0,0 +1,1204 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+						    data */
+	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+						    data */
+	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+						    U and V */
+	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+						    data */
+	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+						    data */
+	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+						    channel */
+	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+						    data */
+	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+						    data */
+	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+						    data */
+	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+						    data */
+	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+						    channel */
+	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+						    data */
+	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+						    data */
+	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+						    data depending on the
+						    compression method used */
+	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+						    floating point
+						    representation in
+						    all channels */
+	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+						    data. */
+	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+						    e.g., NV12. */
+	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+						    Y, U, V, e.g., YV12. */
+
+	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V,
+	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_LUMINANCE,
+	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_W,
+	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V |
+	SVGA3DBLOCKDESC_W |
+	SVGA3DBLOCKDESC_Q,
+	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_IEEE_FP,
+	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+	SVGA3DBLOCKDESC_STENCIL,
+	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+	SVGA3DBLOCKDESC_Y,
+	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+	SVGA3DBLOCKDESC_Y |
+	SVGA3DBLOCKDESC_U_VIDEO |
+	SVGA3DBLOCKDESC_V_VIDEO,
+	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_EXP,
+	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_2PLANAR_YUV,
+	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+struct svga3d_channel_def {
+	union {
+		u8 blue;
+		u8 u;
+		u8 uv_video;
+		u8 u_video;
+	};
+	union {
+		u8 green;
+		u8 v;
+		u8 stencil;
+		u8 v_video;
+	};
+	union {
+		u8 red;
+		u8 w;
+		u8 luminance;
+		u8 y;
+		u8 depth;
+		u8 data;
+	};
+	union {
+		u8 alpha;
+		u8 q;
+		u8 exp;
+	};
+};
+
+struct svga3d_surface_desc {
+	SVGA3dSurfaceFormat format;
+	enum svga3d_block_desc block_desc;
+	surf_size_struct block_size;
+	u32 bytes_per_block;
+	u32 pitch_bytes_per_block;
+
+	u32 total_bit_depth;
+	struct svga3d_channel_def bit_depth;
+	struct svga3d_channel_def bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+   {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
+      {1, 1, 1},  0, 0,
+      0, {{0}, {0}, {0}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      15, {{5}, {5}, {5}, {0}},
+      {{0}, {5}, {10}, {0}}},
+
+   {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{4}, {4}, {4}, {4}},
+      {{0}, {4}, {8}, {12}}},
+
+   {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {1}, {15}, {0}},
+      {{0}, {15}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
+    {1  , 1, 1},  1, 1,
+      8, {{0}, {0}, {4}, {4}},
+      {{0}, {0}, {0}, {4}}},
+
+   {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {0}, {8}}},
+
+   {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {6}, {0}},
+      {{11}, {6}, {0}, {0}}},
+
+   {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  3, 3,
+      24, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{24}, {16}, {8}, {0}}},
+
+   {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{16}, {8}, {0}, {0}}},
+
+   {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  4, 4,
+      32, {{16}, {16}, {0}, {0}},
+      {{16}, {0}, {0}, {0}}},
+
+   {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {0}, {16}, {0}}},
+
+   {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}}},
+
+   {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {0}, {8}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
+      {2, 2, 1},  6, 2,
+      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+      {1, 1, 1},  12, 12,
+      96, {{32}, {32}, {32}, {0}},
+      {{64}, {32}, {0}, {0}}},
+
+   {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {8}, {0}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {11}, {11}, {0}},
+      {{0}, {10}, {21}, {0}}},
+
+   {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
+
+   {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {0}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+      {1, 1, 1},  4, 4,
+      32, {{9}, {9}, {9}, {5}},
+      {{18}, {9}, {0}, {27}}},
+
+   {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
+
+   {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {8}, {24}, {0}},
+      {{0}, {24}, {0}, {0}}},
+
+   {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
+      {2, 2, 1},  6, 2,
+      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  16, 16,
+      128, {{32}, {32}, {32}, {32}},
+      {{64}, {32}, {0}, {96}}},
+
+   {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  8, 8,
+      64, {{16}, {16}, {16}, {16}},
+      {{32}, {16}, {0}, {48}}},
+
+   {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  8, 8,
+      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {0}, {0}}},
+
+   {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{10}, {10}, {10}, {2}},
+      {{0}, {10}, {20}, {30}}},
+
+   {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{24}, {16}, {8}, {0}}},
+
+   {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
+
+   {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {16}, {16}, {0}},
+      {{0}, {0}, {16}, {0}}},
+
+   {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  4, 4,
+      32, {{16}, {16}, {0}, {0}},
+      {{16}, {0}, {0}, {0}}},
+
+   {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  4, 4,
+      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+      {1, 1, 1},  2, 2,
+      16, {{8}, {8}, {0}, {0}},
+      {{8}, {0}, {0}, {0}}},
+
+   {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+      {1, 1, 1},  2, 2,
+      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+      {1, 1, 1},  1, 1,
+      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {6}, {5}, {0}},
+      {{0}, {5}, {11}, {0}}},
+
+   {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  2, 2,
+      16, {{5}, {5}, {5}, {1}},
+      {{0}, {5}, {10}, {15}}},
+
+   {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+      {1, 1, 1},  4, 4,
+      32, {{8}, {8}, {8}, {8}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+      {1, 1, 1},  4, 4,
+      24, {{8}, {8}, {8}, {0}},
+      {{0}, {8}, {16}, {24}}},
+
+   {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  8, 8,
+      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+   {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+      {4, 4, 1},  16, 16,
+      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {0}, {0}}},
+
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+	uint64_t tmp = (uint64_t) a*b;
+	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+	if (format < ARRAY_SIZE(svga3d_surface_descs))
+		return &svga3d_surface_descs[format];
+
+	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+	surf_size_struct size;
+
+	size.width = max_t(u32, base_level.width >> mip_level, 1);
+	size.height = max_t(u32, base_level.height >> mip_level, 1);
+	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+	return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+				 const surf_size_struct *pixel_size,
+				 surf_size_struct *block_size)
+{
+	block_size->width = DIV_ROUND_UP(pixel_size->width,
+					 desc->block_size.width);
+	block_size->height = DIV_ROUND_UP(pixel_size->height,
+					  desc->block_size.height);
+	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+					 desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+			      const surf_size_struct *size)
+{
+	u32 pitch;
+	surf_size_struct blocks;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+	pitch = blocks.width * desc->pitch_bytes_per_block;
+
+	return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+				    const surf_size_struct *size,
+				    u32 pitch)
+{
+	surf_size_struct image_blocks;
+	u32 slice_size, total_size;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+	if (svga3dsurface_is_planar_surface(desc)) {
+		total_size = clamped_umul32(image_blocks.width,
+					    image_blocks.height);
+		total_size = clamped_umul32(total_size, image_blocks.depth);
+		total_size = clamped_umul32(total_size, desc->bytes_per_block);
+		return total_size;
+	}
+
+	if (pitch == 0)
+		pitch = svga3dsurface_calculate_pitch(desc, size);
+
+	slice_size = clamped_umul32(image_blocks.height, pitch);
+	total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+	return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+				  surf_size_struct base_level_size,
+				  u32 num_mip_levels,
+				  u32 num_layers)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	u32 total_size = 0;
+	u32 mip;
+
+	for (mip = 0; mip < num_mip_levels; mip++) {
+		surf_size_struct size =
+			svga3dsurface_get_mip_size(base_level_size, mip);
+		total_size += svga3dsurface_get_image_buffer_size(desc,
+								  &size, 0);
+	}
+
+	return total_size * num_layers;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+			       u32 width, u32 height,
+			       u32 x, u32 y, u32 z)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+	const u32 bd = desc->block_size.depth;
+	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+	const u32 offset = (z / bd * imgstride +
+			    y / bh * rowstride +
+			    x / bw * desc->bytes_per_block);
+	return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+			       surf_size_struct baseLevelSize,
+			       u32 numMipLevels,
+			       u32 face,
+			       u32 mip)
+
+{
+	u32 offset;
+	u32 mipChainBytes;
+	u32 mipChainBytesToLevel;
+	u32 i;
+	const struct svga3d_surface_desc *desc;
+	surf_size_struct mipSize;
+	u32 bytes;
+
+	desc = svga3dsurface_get_desc(format);
+
+	mipChainBytes = 0;
+	mipChainBytesToLevel = 0;
+	for (i = 0; i < numMipLevels; i++) {
+		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+		mipChainBytes += bytes;
+		if (i < mip)
+			mipChainBytesToLevel += bytes;
+	}
+
+	offset = mipChainBytes * face + mipChainBytesToLevel;
+
+	return offset;
+}
+
+
+/**
+ * svga3dsurface_is_gb_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with just the GBObjects cap-bit
+ *                                             set)
+ * @format: format to queried
+ *
+ * RETURNS:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_gb_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_X8R8G8B8 ||
+		format == SVGA3D_A8R8G8B8 ||
+		format == SVGA3D_R5G6B5   ||
+		format == SVGA3D_X1R5G5B5 ||
+		format == SVGA3D_A1R5G5B5 ||
+		format == SVGA3D_P8);
+}
+
+
+/**
+ * svga3dsurface_is_dx_screen_target_format - Is the specified format usable as
+ *                                            a ScreenTarget?
+ *                                            (with DX10 enabled)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_dx_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	return (format == SVGA3D_R8G8B8A8_UNORM ||
+		format == SVGA3D_B8G8R8A8_UNORM ||
+		format == SVGA3D_B8G8R8X8_UNORM);
+}
+
+
+/**
+ * svga3dsurface_is_screen_target_format - Is the specified format usable as a
+ *                                         ScreenTarget?
+ *                                         (for some combination of caps)
+ *
+ * @format: format to queried
+ *
+ * Results:
+ * true if queried format is valid for screen targets
+ */
+static inline bool
+svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
+{
+	if (svga3dsurface_is_gb_screen_target_format(format)) {
+		return true;
+	}
+	return svga3dsurface_is_dx_screen_target_format(format);
+}
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
new file mode 100644
index 0000000..27b33ba
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
@@ -0,0 +1,1633 @@
+/**********************************************************
+ * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_types.h --
+ *
+ *       SVGA 3d hardware definitions for basic types
+ */
+
+#ifndef _SVGA3D_TYPES_H_
+#define _SVGA3D_TYPES_H_
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_VMCORE
+
+#include "includeCheck.h"
+
+/*
+ * Generic Types
+ */
+
+#define SVGA3D_INVALID_ID         ((uint32)-1)
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+   uint32               srcx;
+   uint32               srcy;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCopyBox {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+   uint32               srcx;
+   uint32               srcy;
+   uint32               srcz;
+}
+#include "vmware_pack_end.h"
+SVGA3dCopyBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+}
+#include "vmware_pack_end.h"
+SVGA3dRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+}
+#include "vmware_pack_end.h"
+SVGA3dBox;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+}
+#include "vmware_pack_end.h"
+SVGA3dPoint;
+
+/*
+ * Surface formats.
+ */
+typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_INVALID               = 0,
+
+   SVGA3D_X8R8G8B8                     = 1,
+   SVGA3D_FORMAT_MIN                   = 1,
+
+   SVGA3D_A8R8G8B8                     = 2,
+
+   SVGA3D_R5G6B5                       = 3,
+   SVGA3D_X1R5G5B5                     = 4,
+   SVGA3D_A1R5G5B5                     = 5,
+   SVGA3D_A4R4G4B4                     = 6,
+
+   SVGA3D_Z_D32                        = 7,
+   SVGA3D_Z_D16                        = 8,
+   SVGA3D_Z_D24S8                      = 9,
+   SVGA3D_Z_D15S1                      = 10,
+
+   SVGA3D_LUMINANCE8                   = 11,
+   SVGA3D_LUMINANCE4_ALPHA4            = 12,
+   SVGA3D_LUMINANCE16                  = 13,
+   SVGA3D_LUMINANCE8_ALPHA8            = 14,
+
+   SVGA3D_DXT1                         = 15,
+   SVGA3D_DXT2                         = 16,
+   SVGA3D_DXT3                         = 17,
+   SVGA3D_DXT4                         = 18,
+   SVGA3D_DXT5                         = 19,
+
+   SVGA3D_BUMPU8V8                     = 20,
+   SVGA3D_BUMPL6V5U5                   = 21,
+   SVGA3D_BUMPX8L8V8U8                 = 22,
+   SVGA3D_BUMPL8V8U8                   = 23,
+
+   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
+   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
+
+   SVGA3D_A2R10G10B10                  = 26,
+
+   /* signed formats */
+   SVGA3D_V8U8                         = 27,
+   SVGA3D_Q8W8V8U8                     = 28,
+   SVGA3D_CxV8U8                       = 29,
+
+   /* mixed formats */
+   SVGA3D_X8L8V8U8                     = 30,
+   SVGA3D_A2W10V10U10                  = 31,
+
+   SVGA3D_ALPHA8                       = 32,
+
+   /* Single- and dual-component floating point formats */
+   SVGA3D_R_S10E5                      = 33,
+   SVGA3D_R_S23E8                      = 34,
+   SVGA3D_RG_S10E5                     = 35,
+   SVGA3D_RG_S23E8                     = 36,
+
+   SVGA3D_BUFFER                       = 37,
+
+   SVGA3D_Z_D24X8                      = 38,
+
+   SVGA3D_V16U16                       = 39,
+
+   SVGA3D_G16R16                       = 40,
+   SVGA3D_A16B16G16R16                 = 41,
+
+   /* Packed Video formats */
+   SVGA3D_UYVY                         = 42,
+   SVGA3D_YUY2                         = 43,
+
+   /* Planar video formats */
+   SVGA3D_NV12                         = 44,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 45,
+
+   SVGA3D_R32G32B32A32_TYPELESS        = 46,
+   SVGA3D_R32G32B32A32_UINT            = 47,
+   SVGA3D_R32G32B32A32_SINT            = 48,
+   SVGA3D_R32G32B32_TYPELESS           = 49,
+   SVGA3D_R32G32B32_FLOAT              = 50,
+   SVGA3D_R32G32B32_UINT               = 51,
+   SVGA3D_R32G32B32_SINT               = 52,
+   SVGA3D_R16G16B16A16_TYPELESS        = 53,
+   SVGA3D_R16G16B16A16_UINT            = 54,
+   SVGA3D_R16G16B16A16_SNORM           = 55,
+   SVGA3D_R16G16B16A16_SINT            = 56,
+   SVGA3D_R32G32_TYPELESS              = 57,
+   SVGA3D_R32G32_UINT                  = 58,
+   SVGA3D_R32G32_SINT                  = 59,
+   SVGA3D_R32G8X24_TYPELESS            = 60,
+   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
+   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
+   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
+   SVGA3D_R10G10B10A2_TYPELESS         = 64,
+   SVGA3D_R10G10B10A2_UINT             = 65,
+   SVGA3D_R11G11B10_FLOAT              = 66,
+   SVGA3D_R8G8B8A8_TYPELESS            = 67,
+   SVGA3D_R8G8B8A8_UNORM               = 68,
+   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
+   SVGA3D_R8G8B8A8_UINT                = 70,
+   SVGA3D_R8G8B8A8_SINT                = 71,
+   SVGA3D_R16G16_TYPELESS              = 72,
+   SVGA3D_R16G16_UINT                  = 73,
+   SVGA3D_R16G16_SINT                  = 74,
+   SVGA3D_R32_TYPELESS                 = 75,
+   SVGA3D_D32_FLOAT                    = 76,
+   SVGA3D_R32_UINT                     = 77,
+   SVGA3D_R32_SINT                     = 78,
+   SVGA3D_R24G8_TYPELESS               = 79,
+   SVGA3D_D24_UNORM_S8_UINT            = 80,
+   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
+   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
+   SVGA3D_R8G8_TYPELESS                = 83,
+   SVGA3D_R8G8_UNORM                   = 84,
+   SVGA3D_R8G8_UINT                    = 85,
+   SVGA3D_R8G8_SINT                    = 86,
+   SVGA3D_R16_TYPELESS                 = 87,
+   SVGA3D_R16_UNORM                    = 88,
+   SVGA3D_R16_UINT                     = 89,
+   SVGA3D_R16_SNORM                    = 90,
+   SVGA3D_R16_SINT                     = 91,
+   SVGA3D_R8_TYPELESS                  = 92,
+   SVGA3D_R8_UNORM                     = 93,
+   SVGA3D_R8_UINT                      = 94,
+   SVGA3D_R8_SNORM                     = 95,
+   SVGA3D_R8_SINT                      = 96,
+   SVGA3D_P8                           = 97,
+   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
+   SVGA3D_R8G8_B8G8_UNORM              = 99,
+   SVGA3D_G8R8_G8B8_UNORM              = 100,
+   SVGA3D_BC1_TYPELESS                 = 101,
+   SVGA3D_BC1_UNORM_SRGB               = 102,
+   SVGA3D_BC2_TYPELESS                 = 103,
+   SVGA3D_BC2_UNORM_SRGB               = 104,
+   SVGA3D_BC3_TYPELESS                 = 105,
+   SVGA3D_BC3_UNORM_SRGB               = 106,
+   SVGA3D_BC4_TYPELESS                 = 107,
+   SVGA3D_ATI1                         = 108,   /* DX9-specific BC4_UNORM */
+   SVGA3D_BC4_SNORM                    = 109,
+   SVGA3D_BC5_TYPELESS                 = 110,
+   SVGA3D_ATI2                         = 111,   /* DX9-specific BC5_UNORM */
+   SVGA3D_BC5_SNORM                    = 112,
+   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
+   SVGA3D_B8G8R8A8_TYPELESS            = 114,
+   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
+   SVGA3D_B8G8R8X8_TYPELESS            = 116,
+   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
+
+   /* Advanced depth formats. */
+   SVGA3D_Z_DF16                       = 118,
+   SVGA3D_Z_DF24                       = 119,
+   SVGA3D_Z_D24S8_INT                  = 120,
+
+   /* Planar video formats. */
+   SVGA3D_YV12                         = 121,
+
+   SVGA3D_R32G32B32A32_FLOAT           = 122,
+   SVGA3D_R16G16B16A16_FLOAT           = 123,
+   SVGA3D_R16G16B16A16_UNORM           = 124,
+   SVGA3D_R32G32_FLOAT                 = 125,
+   SVGA3D_R10G10B10A2_UNORM            = 126,
+   SVGA3D_R8G8B8A8_SNORM               = 127,
+   SVGA3D_R16G16_FLOAT                 = 128,
+   SVGA3D_R16G16_UNORM                 = 129,
+   SVGA3D_R16G16_SNORM                 = 130,
+   SVGA3D_R32_FLOAT                    = 131,
+   SVGA3D_R8G8_SNORM                   = 132,
+   SVGA3D_R16_FLOAT                    = 133,
+   SVGA3D_D16_UNORM                    = 134,
+   SVGA3D_A8_UNORM                     = 135,
+   SVGA3D_BC1_UNORM                    = 136,
+   SVGA3D_BC2_UNORM                    = 137,
+   SVGA3D_BC3_UNORM                    = 138,
+   SVGA3D_B5G6R5_UNORM                 = 139,
+   SVGA3D_B5G5R5A1_UNORM               = 140,
+   SVGA3D_B8G8R8A8_UNORM               = 141,
+   SVGA3D_B8G8R8X8_UNORM               = 142,
+   SVGA3D_BC4_UNORM                    = 143,
+   SVGA3D_BC5_UNORM                    = 144,
+
+   SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef enum SVGA3dSurfaceFlags {
+   SVGA3D_SURFACE_CUBEMAP               = (1 << 0),
+
+   /*
+    * HINT flags are not enforced by the device but are useful for
+    * performance.
+    */
+   SVGA3D_SURFACE_HINT_STATIC           = (1 << 1),
+   SVGA3D_SURFACE_HINT_DYNAMIC          = (1 << 2),
+   SVGA3D_SURFACE_HINT_INDEXBUFFER      = (1 << 3),
+   SVGA3D_SURFACE_HINT_VERTEXBUFFER     = (1 << 4),
+   SVGA3D_SURFACE_HINT_TEXTURE          = (1 << 5),
+   SVGA3D_SURFACE_HINT_RENDERTARGET     = (1 << 6),
+   SVGA3D_SURFACE_HINT_DEPTHSTENCIL     = (1 << 7),
+   SVGA3D_SURFACE_HINT_WRITEONLY        = (1 << 8),
+   SVGA3D_SURFACE_MASKABLE_ANTIALIAS    = (1 << 9),
+   SVGA3D_SURFACE_AUTOGENMIPMAPS        = (1 << 10),
+   SVGA3D_SURFACE_DECODE_RENDERTARGET   = (1 << 11),
+
+   /*
+    * Is this surface using a base-level pitch for it's mob backing?
+    *
+    * This flag is not intended to be set by guest-drivers, but is instead
+    * set by the device when the surface is bound to a mob with a specified
+    * pitch.
+    */
+   SVGA3D_SURFACE_MOB_PITCH             = (1 << 12),
+
+   SVGA3D_SURFACE_INACTIVE              = (1 << 13),
+   SVGA3D_SURFACE_HINT_RT_LOCKABLE      = (1 << 14),
+   SVGA3D_SURFACE_VOLUME                = (1 << 15),
+
+   /*
+    * Required to be set on a surface to bind it to a screen target.
+    */
+   SVGA3D_SURFACE_SCREENTARGET          = (1 << 16),
+
+   /*
+    * Align images in the guest-backing mob to 16-bytes.
+    */
+   SVGA3D_SURFACE_ALIGN16               = (1 << 17),
+
+   SVGA3D_SURFACE_1D                    = (1 << 18),
+   SVGA3D_SURFACE_ARRAY                 = (1 << 19),
+
+   /*
+    * Bind flags.
+    * These are enforced for any surface defined with DefineGBSurface_v2.
+    */
+   SVGA3D_SURFACE_BIND_VERTEX_BUFFER    = (1 << 20),
+   SVGA3D_SURFACE_BIND_INDEX_BUFFER     = (1 << 21),
+   SVGA3D_SURFACE_BIND_CONSTANT_BUFFER  = (1 << 22),
+   SVGA3D_SURFACE_BIND_SHADER_RESOURCE  = (1 << 23),
+   SVGA3D_SURFACE_BIND_RENDER_TARGET    = (1 << 24),
+   SVGA3D_SURFACE_BIND_DEPTH_STENCIL    = (1 << 25),
+   SVGA3D_SURFACE_BIND_STREAM_OUTPUT    = (1 << 26),
+
+   /*
+    * A note on staging flags:
+    *
+    * The STAGING flags notes that the surface will not be used directly by the
+    * drawing pipeline, i.e. that it will not be bound to any bind point.
+    * Staging surfaces may be used by copy operations to move data in and out
+    * of other surfaces.
+    *
+    * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+    * updates indirectly, i.e. the surface will not be updated directly, but
+    * will receive copies from staging surfaces.
+    */
+   SVGA3D_SURFACE_STAGING_UPLOAD        = (1 << 27),
+   SVGA3D_SURFACE_STAGING_DOWNLOAD      = (1 << 28),
+   SVGA3D_SURFACE_HINT_INDIRECT_UPDATE  = (1 << 29),
+
+   /*
+    * Setting this flag allow this surface to be used with the
+    * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
+    * buffer surfaces, an no bind flags are allowed to be set on surfaces
+    * with this flag.
+    */
+   SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  = (1 << 30),
+
+   /*
+    * Marker for the last defined bit.
+    */
+   SVGA3D_SURFACE_FLAG_MAX              = (1 << 31),
+} SVGA3dSurfaceFlags;
+
+#define SVGA3D_SURFACE_HB_DISALLOWED_MASK        \
+        (  SVGA3D_SURFACE_MOB_PITCH    |         \
+           SVGA3D_SURFACE_SCREENTARGET |         \
+           SVGA3D_SURFACE_ALIGN16 |              \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER | \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |   \
+           SVGA3D_SURFACE_STAGING_UPLOAD |       \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   \
+        )
+
+#define SVGA3D_SURFACE_2D_DISALLOWED_MASK           \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_ARRAY |                   \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+        )
+
+#define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
+           SVGA3D_SURFACE_INACTIVE |                \
+           SVGA3D_SURFACE_STAGING_UPLOAD |          \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
+           SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+        )
+
+#define SVGA3D_SURFACE_DX_ONLY_MASK             \
+        (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |  \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  \
+
+#define SVGA3D_SURFACE_STAGING_MASK             \
+        (  SVGA3D_SURFACE_STAGING_UPLOAD |      \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD      \
+        )
+
+#define SVGA3D_SURFACE_BIND_MASK                  \
+        (  SVGA3D_SURFACE_BIND_VERTEX_BUFFER   |  \
+           SVGA3D_SURFACE_BIND_INDEX_BUFFER    |  \
+           SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |  \
+           SVGA3D_SURFACE_BIND_SHADER_RESOURCE |  \
+           SVGA3D_SURFACE_BIND_RENDER_TARGET   |  \
+           SVGA3D_SURFACE_BIND_DEPTH_STENCIL   |  \
+           SVGA3D_SURFACE_BIND_STREAM_OUTPUT      \
+        )
+
+typedef enum {
+   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
+   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
+   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
+   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
+   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
+   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
+   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip).  This flag
+ * should not to be set on alpha formats.
+ */
+   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target
+ * (meaning that the pixel pipe will DE-linearize data on output to format)
+ */
+   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate
+ * wrap modes, nor mipmapping.
+ */
+   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
+} SVGA3dFormatOp;
+
+#define SVGA3D_FORMAT_POSITIVE                             \
+   (SVGA3DFORMAT_OP_TEXTURE                              | \
+    SVGA3DFORMAT_OP_VOLUMETEXTURE                        | \
+    SVGA3DFORMAT_OP_CUBETEXTURE                          | \
+    SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET               | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET             | \
+    SVGA3DFORMAT_OP_ZSTENCIL                             | \
+    SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH  | \
+    SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET | \
+    SVGA3DFORMAT_OP_DISPLAYMODE                          | \
+    SVGA3DFORMAT_OP_3DACCELERATION                       | \
+    SVGA3DFORMAT_OP_PIXELSIZE                            | \
+    SVGA3DFORMAT_OP_CONVERT_TO_ARGB                      | \
+    SVGA3DFORMAT_OP_OFFSCREENPLAIN                       | \
+    SVGA3DFORMAT_OP_SRGBREAD                             | \
+    SVGA3DFORMAT_OP_BUMPMAP                              | \
+    SVGA3DFORMAT_OP_DMAP                                 | \
+    SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                   | \
+    SVGA3DFORMAT_OP_SRGBWRITE                            | \
+    SVGA3DFORMAT_OP_AUTOGENMIPMAP                        | \
+    SVGA3DFORMAT_OP_VERTEXTEXTURE)
+
+#define SVGA3D_FORMAT_NEGATIVE               \
+   (SVGA3DFORMAT_OP_NOFILTER               | \
+    SVGA3DFORMAT_OP_NOALPHABLEND           | \
+    SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP)
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*
+ * Entries must be located at the same position.
+ */
+typedef union {
+   uint32 value;
+   struct {
+      uint32 texture : 1;
+      uint32 volumeTexture : 1;
+      uint32 cubeTexture : 1;
+      uint32 offscreenRenderTarget : 1;
+      uint32 sameFormatRenderTarget : 1;
+      uint32 unknown1 : 1;
+      uint32 zStencil : 1;
+      uint32 zStencilArbitraryDepth : 1;
+      uint32 sameFormatUpToAlpha : 1;
+      uint32 unknown2 : 1;
+      uint32 displayMode : 1;
+      uint32 acceleration3d : 1;
+      uint32 pixelSize : 1;
+      uint32 convertToARGB : 1;
+      uint32 offscreenPlain : 1;
+      uint32 sRGBRead : 1;
+      uint32 bumpMap : 1;
+      uint32 dmap : 1;
+      uint32 noFilter : 1;
+      uint32 memberOfGroupARGB : 1;
+      uint32 sRGBWrite : 1;
+      uint32 noAlphaBlend : 1;
+      uint32 autoGenMipMap : 1;
+      uint32 vertexTexture : 1;
+      uint32 noTexCoordWrapNorMip : 1;
+   };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_RS_INVALID                   = 0,
+   SVGA3D_RS_MIN                       = 1,
+   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
+   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
+   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
+   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
+   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
+   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
+   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
+   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
+   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
+   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
+   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
+   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
+   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
+   SVGA3D_RS_FOGSTART                  = 16,    /* float */
+   SVGA3D_RS_FOGEND                    = 17,    /* float */
+   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
+   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
+   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
+   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
+   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
+   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
+   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
+   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
+   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
+   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
+   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
+   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
+   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
+   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
+   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
+   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
+   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
+   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
+   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
+   SVGA3D_RS_ZBIAS                     = 45,    /* float */
+   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
+   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
+   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
+   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
+   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
+   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
+   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
+   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
+   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
+   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
+   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
+
+
+   /*
+    * Output Gamma Level
+    *
+    * Output gamma effects the gamma curve of colors that are output from the
+    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
+    * value is <= 0.0, gamma correction is ignored and linear color space is
+    * used.
+    */
+
+   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
+   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
+   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
+   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
+   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
+   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
+   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
+   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
+   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
+   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
+   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
+   SVGA3D_RS_LINEWIDTH                 = 98,    /* float */
+   SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
+   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
+   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
+   SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
+   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
+   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
+   SVGA3D_VERTEXMATERIAL_MAX      = 3,
+} SVGA3dVertexMaterial;
+
+typedef enum {
+   SVGA3D_FILLMODE_INVALID = 0,
+   SVGA3D_FILLMODE_MIN     = 1,
+   SVGA3D_FILLMODE_POINT   = 1,
+   SVGA3D_FILLMODE_LINE    = 2,
+   SVGA3D_FILLMODE_FILL    = 3,
+   SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16   mode;       /* SVGA3dFillModeType */
+      uint16   face;       /* SVGA3dFace */
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dFillMode;
+
+typedef enum {
+   SVGA3D_SHADEMODE_INVALID = 0,
+   SVGA3D_SHADEMODE_FLAT    = 1,
+   SVGA3D_SHADEMODE_SMOOTH  = 2,
+   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
+   SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint16 repeat;
+      uint16 pattern;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dLinePattern;
+
+typedef enum {
+   SVGA3D_BLENDOP_INVALID             = 0,
+   SVGA3D_BLENDOP_MIN                 = 1,
+   SVGA3D_BLENDOP_ZERO                = 1,
+   SVGA3D_BLENDOP_ONE                 = 2,
+   SVGA3D_BLENDOP_SRCCOLOR            = 3,
+   SVGA3D_BLENDOP_INVSRCCOLOR         = 4,
+   SVGA3D_BLENDOP_SRCALPHA            = 5,
+   SVGA3D_BLENDOP_INVSRCALPHA         = 6,
+   SVGA3D_BLENDOP_DESTALPHA           = 7,
+   SVGA3D_BLENDOP_INVDESTALPHA        = 8,
+   SVGA3D_BLENDOP_DESTCOLOR           = 9,
+   SVGA3D_BLENDOP_INVDESTCOLOR        = 10,
+   SVGA3D_BLENDOP_SRCALPHASAT         = 11,
+   SVGA3D_BLENDOP_BLENDFACTOR         = 12,
+   SVGA3D_BLENDOP_INVBLENDFACTOR      = 13,
+   SVGA3D_BLENDOP_SRC1COLOR           = 14,
+   SVGA3D_BLENDOP_INVSRC1COLOR        = 15,
+   SVGA3D_BLENDOP_SRC1ALPHA           = 16,
+   SVGA3D_BLENDOP_INVSRC1ALPHA        = 17,
+   SVGA3D_BLENDOP_BLENDFACTORALPHA    = 18,
+   SVGA3D_BLENDOP_INVBLENDFACTORALPHA = 19,
+   SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+   SVGA3D_BLENDEQ_INVALID            = 0,
+   SVGA3D_BLENDEQ_MIN                = 1,
+   SVGA3D_BLENDEQ_ADD                = 1,
+   SVGA3D_BLENDEQ_SUBTRACT           = 2,
+   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
+   SVGA3D_BLENDEQ_MINIMUM            = 4,
+   SVGA3D_BLENDEQ_MAXIMUM            = 5,
+   SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+   SVGA3D_DX11_LOGICOP_MIN           = 0,
+   SVGA3D_DX11_LOGICOP_CLEAR         = 0,
+   SVGA3D_DX11_LOGICOP_SET           = 1,
+   SVGA3D_DX11_LOGICOP_COPY          = 2,
+   SVGA3D_DX11_LOGICOP_COPY_INVERTED = 3,
+   SVGA3D_DX11_LOGICOP_NOOP          = 4,
+   SVGA3D_DX11_LOGICOP_INVERT        = 5,
+   SVGA3D_DX11_LOGICOP_AND           = 6,
+   SVGA3D_DX11_LOGICOP_NAND          = 7,
+   SVGA3D_DX11_LOGICOP_OR            = 8,
+   SVGA3D_DX11_LOGICOP_NOR           = 9,
+   SVGA3D_DX11_LOGICOP_XOR           = 10,
+   SVGA3D_DX11_LOGICOP_EQUIV         = 11,
+   SVGA3D_DX11_LOGICOP_AND_REVERSE   = 12,
+   SVGA3D_DX11_LOGICOP_AND_INVERTED  = 13,
+   SVGA3D_DX11_LOGICOP_OR_REVERSE    = 14,
+   SVGA3D_DX11_LOGICOP_OR_INVERTED   = 15,
+   SVGA3D_DX11_LOGICOP_MAX
+} SVGA3dDX11LogicOp;
+
+typedef enum {
+   SVGA3D_FRONTWINDING_INVALID = 0,
+   SVGA3D_FRONTWINDING_CW      = 1,
+   SVGA3D_FRONTWINDING_CCW     = 2,
+   SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+   SVGA3D_FACE_INVALID  = 0,
+   SVGA3D_FACE_NONE     = 1,
+   SVGA3D_FACE_MIN      = 1,
+   SVGA3D_FACE_FRONT    = 2,
+   SVGA3D_FACE_BACK     = 3,
+   SVGA3D_FACE_FRONT_BACK = 4,
+   SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+   SVGA3D_CMP_INVALID              = 0,
+   SVGA3D_CMP_NEVER                = 1,
+   SVGA3D_CMP_LESS                 = 2,
+   SVGA3D_CMP_EQUAL                = 3,
+   SVGA3D_CMP_LESSEQUAL            = 4,
+   SVGA3D_CMP_GREATER              = 5,
+   SVGA3D_CMP_NOTEQUAL             = 6,
+   SVGA3D_CMP_GREATEREQUAL         = 7,
+   SVGA3D_CMP_ALWAYS               = 8,
+   SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+   SVGA3D_FOGFUNC_INVALID          = 0,
+   SVGA3D_FOGFUNC_EXP              = 1,
+   SVGA3D_FOGFUNC_EXP2             = 2,
+   SVGA3D_FOGFUNC_LINEAR           = 3,
+   SVGA3D_FOGFUNC_PER_VERTEX       = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+   SVGA3D_FOGTYPE_INVALID          = 0,
+   SVGA3D_FOGTYPE_VERTEX           = 1,
+   SVGA3D_FOGTYPE_PIXEL            = 2,
+   SVGA3D_FOGTYPE_MAX              = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+   SVGA3D_FOGBASE_INVALID          = 0,
+   SVGA3D_FOGBASE_DEPTHBASED       = 1,
+   SVGA3D_FOGBASE_RANGEBASED       = 2,
+   SVGA3D_FOGBASE_MAX              = 3
+} SVGA3dFogBase;
+
+typedef enum {
+   SVGA3D_STENCILOP_INVALID        = 0,
+   SVGA3D_STENCILOP_MIN            = 1,
+   SVGA3D_STENCILOP_KEEP           = 1,
+   SVGA3D_STENCILOP_ZERO           = 2,
+   SVGA3D_STENCILOP_REPLACE        = 3,
+   SVGA3D_STENCILOP_INCRSAT        = 4,
+   SVGA3D_STENCILOP_DECRSAT        = 5,
+   SVGA3D_STENCILOP_INVERT         = 6,
+   SVGA3D_STENCILOP_INCR           = 7,
+   SVGA3D_STENCILOP_DECR           = 8,
+   SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+   SVGA3D_CLIPPLANE_0              = (1 << 0),
+   SVGA3D_CLIPPLANE_1              = (1 << 1),
+   SVGA3D_CLIPPLANE_2              = (1 << 2),
+   SVGA3D_CLIPPLANE_3              = (1 << 3),
+   SVGA3D_CLIPPLANE_4              = (1 << 4),
+   SVGA3D_CLIPPLANE_5              = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+   SVGA3D_CLEAR_COLOR              = 0x1,
+   SVGA3D_CLEAR_DEPTH              = 0x2,
+   SVGA3D_CLEAR_STENCIL            = 0x4,
+
+   /*
+    * Hint only, must be used together with SVGA3D_CLEAR_COLOR. If
+    * SVGA3D_CLEAR_DEPTH or SVGA3D_CLEAR_STENCIL bit is set, this
+    * bit will be ignored.
+    */
+   SVGA3D_CLEAR_COLORFILL          = 0x8
+} SVGA3dClearFlag;
+
+typedef enum {
+   SVGA3D_RT_DEPTH                 = 0,
+   SVGA3D_RT_MIN                   = 0,
+   SVGA3D_RT_STENCIL               = 1,
+   SVGA3D_RT_COLOR0                = 2,
+   SVGA3D_RT_COLOR1                = 3,
+   SVGA3D_RT_COLOR2                = 4,
+   SVGA3D_RT_COLOR3                = 5,
+   SVGA3D_RT_COLOR4                = 6,
+   SVGA3D_RT_COLOR5                = 7,
+   SVGA3D_RT_COLOR6                = 8,
+   SVGA3D_RT_COLOR7                = 9,
+   SVGA3D_RT_MAX,
+   SVGA3D_RT_INVALID               = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+#include "vmware_pack_begin.h"
+union {
+   struct {
+      uint32  red   : 1;
+      uint32  green : 1;
+      uint32  blue  : 1;
+      uint32  alpha : 1;
+   };
+   uint32 uintValue;
+}
+#include "vmware_pack_end.h"
+SVGA3dColorMask;
+
+typedef enum {
+   SVGA3D_VBLEND_DISABLE            = 0,
+   SVGA3D_VBLEND_1WEIGHT            = 1,
+   SVGA3D_VBLEND_2WEIGHT            = 2,
+   SVGA3D_VBLEND_3WEIGHT            = 3,
+   SVGA3D_VBLEND_MAX                = 4,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+   SVGA3D_WRAPCOORD_0   = 1 << 0,
+   SVGA3D_WRAPCOORD_1   = 1 << 1,
+   SVGA3D_WRAPCOORD_2   = 1 << 2,
+   SVGA3D_WRAPCOORD_3   = 1 << 3,
+   SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_TS_INVALID                    = 0,
+   SVGA3D_TS_MIN                        = 1,
+   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
+   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
+   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
+   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
+   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
+   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
+   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
+   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
+   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
+   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
+   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
+   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
+   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
+
+
+   /*
+    * Sampler Gamma Level
+    *
+    * Sampler gamma effects the color of samples taken from the sampler.  A
+    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
+    * gamma value is ignored and a linear space is used.
+    */
+
+   SVGA3D_TS_GAMMA                      = 25,   /* float */
+   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
+   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
+   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_PREGB_MAX                  = 30,   /* Max value before GBObjects */
+   SVGA3D_TS_CONSTANT                   = 30,   /* SVGA3dColor */
+   SVGA3D_TS_COLOR_KEY_ENABLE           = 31,   /* SVGA3dBool */
+   SVGA3D_TS_COLOR_KEY                  = 32,   /* SVGA3dColor */
+   SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+   SVGA3D_TC_INVALID                   = 0,
+   SVGA3D_TC_DISABLE                   = 1,
+   SVGA3D_TC_SELECTARG1                = 2,
+   SVGA3D_TC_SELECTARG2                = 3,
+   SVGA3D_TC_MODULATE                  = 4,
+   SVGA3D_TC_ADD                       = 5,
+   SVGA3D_TC_ADDSIGNED                 = 6,
+   SVGA3D_TC_SUBTRACT                  = 7,
+   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
+   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
+   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
+   SVGA3D_TC_BLENDFACTORALPHA          = 11,
+   SVGA3D_TC_MODULATE2X                = 12,
+   SVGA3D_TC_MODULATE4X                = 13,
+   SVGA3D_TC_DSDT                      = 14,
+   SVGA3D_TC_DOTPRODUCT3               = 15,
+   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
+   SVGA3D_TC_ADDSIGNED2X               = 17,
+   SVGA3D_TC_ADDSMOOTH                 = 18,
+   SVGA3D_TC_PREMODULATE               = 19,
+   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
+   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
+   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
+   SVGA3D_TC_MULTIPLYADD               = 25,
+   SVGA3D_TC_LERP                      = 26,
+   SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+   SVGA3D_TEX_ADDRESS_INVALID    = 0,
+   SVGA3D_TEX_ADDRESS_MIN        = 1,
+   SVGA3D_TEX_ADDRESS_WRAP       = 1,
+   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
+   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
+   SVGA3D_TEX_ADDRESS_BORDER     = 4,
+   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+   SVGA3D_TEX_ADDRESS_EDGE       = 6,
+   SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+   SVGA3D_TEX_FILTER_NONE           = 0,
+   SVGA3D_TEX_FILTER_MIN            = 0,
+   SVGA3D_TEX_FILTER_NEAREST        = 1,
+   SVGA3D_TEX_FILTER_LINEAR         = 2,
+   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
+   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+   SVGA3D_TEX_TRANSFORM_OFF    = 0,
+   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
+   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
+   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
+   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
+   SVGA3D_TEX_PROJECTED        = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+   SVGA3D_TEXCOORD_GEN_OFF              = 0,
+   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
+   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
+   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
+   SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+   SVGA3D_TA_INVALID    = 0,
+   SVGA3D_TA_TFACTOR    = 1,
+   SVGA3D_TA_PREVIOUS   = 2,
+   SVGA3D_TA_DIFFUSE    = 3,
+   SVGA3D_TA_TEXTURE    = 4,
+   SVGA3D_TA_SPECULAR   = 5,
+   SVGA3D_TA_CONSTANT   = 6,
+   SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+   SVGA3D_TM_NONE       = 0,
+   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
+   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+   SVGA3D_DECLUSAGE_POSITION     = 0,
+   SVGA3D_DECLUSAGE_BLENDWEIGHT,
+   SVGA3D_DECLUSAGE_BLENDINDICES,
+   SVGA3D_DECLUSAGE_NORMAL,
+   SVGA3D_DECLUSAGE_PSIZE,
+   SVGA3D_DECLUSAGE_TEXCOORD,
+   SVGA3D_DECLUSAGE_TANGENT,
+   SVGA3D_DECLUSAGE_BINORMAL,
+   SVGA3D_DECLUSAGE_TESSFACTOR,
+   SVGA3D_DECLUSAGE_POSITIONT,
+   SVGA3D_DECLUSAGE_COLOR,
+   SVGA3D_DECLUSAGE_FOG,
+   SVGA3D_DECLUSAGE_DEPTH,
+   SVGA3D_DECLUSAGE_SAMPLE,
+   SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+   SVGA3D_DECLMETHOD_DEFAULT     = 0,
+   SVGA3D_DECLMETHOD_PARTIALU,
+   SVGA3D_DECLMETHOD_PARTIALV,
+   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
+   SVGA3D_DECLMETHOD_UV,
+   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
+   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement */
+                                       /* map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+   SVGA3D_DECLTYPE_FLOAT1        =  0,
+   SVGA3D_DECLTYPE_FLOAT2        =  1,
+   SVGA3D_DECLTYPE_FLOAT3        =  2,
+   SVGA3D_DECLTYPE_FLOAT4        =  3,
+   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
+   SVGA3D_DECLTYPE_UBYTE4        =  5,
+   SVGA3D_DECLTYPE_SHORT2        =  6,
+   SVGA3D_DECLTYPE_SHORT4        =  7,
+   SVGA3D_DECLTYPE_UBYTE4N       =  8,
+   SVGA3D_DECLTYPE_SHORT2N       =  9,
+   SVGA3D_DECLTYPE_SHORT4N       = 10,
+   SVGA3D_DECLTYPE_USHORT2N      = 11,
+   SVGA3D_DECLTYPE_USHORT4N      = 12,
+   SVGA3D_DECLTYPE_UDEC3         = 13,
+   SVGA3D_DECLTYPE_DEC3N         = 14,
+   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
+   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
+   SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+   struct {
+      /*
+       * For index data, this number represents the number of instances to draw.
+       * For instance data, this number represents the number of
+       * instances/vertex in this stream
+       */
+      uint32 count : 30;
+
+      /*
+       * This is 1 if this is supposed to be the data that is repeated for
+       * every instance.
+       */
+      uint32 indexedData : 1;
+
+      /*
+       * This is 1 if this is supposed to be the per-instance data.
+       */
+      uint32 instanceData : 1;
+   };
+
+   uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+   /*
+    * SVGA3D_PRIMITIVE_INVALID is a valid primitive type.
+    *
+    * List MIN second so debuggers will think INVALID is
+    * the correct name.
+    */
+   SVGA3D_PRIMITIVE_INVALID                     = 0,
+   SVGA3D_PRIMITIVE_MIN                         = 0,
+   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
+   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
+   SVGA3D_PRIMITIVE_LINELIST                    = 3,
+   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
+   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
+   SVGA3D_PRIMITIVE_LINELIST_ADJ                = 7,
+   SVGA3D_PRIMITIVE_PREDX_MAX                   = 7,
+   SVGA3D_PRIMITIVE_LINESTRIP_ADJ               = 8,
+   SVGA3D_PRIMITIVE_TRIANGLELIST_ADJ            = 9,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP_ADJ           = 10,
+   SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+   SVGA3D_COORDINATE_INVALID                   = 0,
+   SVGA3D_COORDINATE_LEFTHANDED                = 1,
+   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
+   SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+   SVGA3D_TRANSFORM_INVALID                     = 0,
+   SVGA3D_TRANSFORM_WORLD                       = 1,
+   SVGA3D_TRANSFORM_MIN                         = 1,
+   SVGA3D_TRANSFORM_VIEW                        = 2,
+   SVGA3D_TRANSFORM_PROJECTION                  = 3,
+   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
+   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
+   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
+   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
+   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
+   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
+   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
+   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
+   SVGA3D_TRANSFORM_WORLD1                      = 12,
+   SVGA3D_TRANSFORM_WORLD2                      = 13,
+   SVGA3D_TRANSFORM_WORLD3                      = 14,
+   SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+   SVGA3D_LIGHTTYPE_INVALID                     = 0,
+   SVGA3D_LIGHTTYPE_MIN                         = 1,
+   SVGA3D_LIGHTTYPE_POINT                       = 1,
+   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
+   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
+   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
+   SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+   SVGA3D_CUBEFACE_POSX                         = 0,
+   SVGA3D_CUBEFACE_NEGX                         = 1,
+   SVGA3D_CUBEFACE_POSY                         = 2,
+   SVGA3D_CUBEFACE_NEGY                         = 3,
+   SVGA3D_CUBEFACE_POSZ                         = 4,
+   SVGA3D_CUBEFACE_NEGZ                         = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+   SVGA3D_SHADERTYPE_INVALID                    = 0,
+   SVGA3D_SHADERTYPE_MIN                        = 1,
+   SVGA3D_SHADERTYPE_VS                         = 1,
+   SVGA3D_SHADERTYPE_PS                         = 2,
+   SVGA3D_SHADERTYPE_PREDX_MAX                  = 3,
+   SVGA3D_SHADERTYPE_GS                         = 3,
+   SVGA3D_SHADERTYPE_DX10_MAX                   = 4,
+   SVGA3D_SHADERTYPE_HS                         = 4,
+   SVGA3D_SHADERTYPE_DS                         = 5,
+   SVGA3D_SHADERTYPE_CS                         = 6,
+   SVGA3D_SHADERTYPE_MAX                        = 7
+} SVGA3dShaderType;
+
+#define SVGA3D_NUM_SHADERTYPE_PREDX \
+   (SVGA3D_SHADERTYPE_PREDX_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE_DX10 \
+   (SVGA3D_SHADERTYPE_DX10_MAX - SVGA3D_SHADERTYPE_MIN)
+
+#define SVGA3D_NUM_SHADERTYPE \
+   (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
+
+typedef enum {
+   SVGA3D_CONST_TYPE_MIN                        = 0,
+   SVGA3D_CONST_TYPE_FLOAT                      = 0,
+   SVGA3D_CONST_TYPE_INT                        = 1,
+   SVGA3D_CONST_TYPE_BOOL                       = 2,
+   SVGA3D_CONST_TYPE_MAX                        = 3,
+} SVGA3dShaderConstType;
+
+/*
+ * Register limits for shader consts.
+ */
+#define SVGA3D_CONSTREG_MAX            256
+#define SVGA3D_CONSTINTREG_MAX         16
+#define SVGA3D_CONSTBOOLREG_MAX        16
+
+typedef enum {
+   SVGA3D_STRETCH_BLT_POINT                     = 0,
+   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
+   SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+   SVGA3D_QUERYTYPE_INVALID                     = ((uint8)-1),
+   SVGA3D_QUERYTYPE_MIN                         = 0,
+   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
+   SVGA3D_QUERYTYPE_TIMESTAMP                   = 1,
+   SVGA3D_QUERYTYPE_TIMESTAMPDISJOINT           = 2,
+   SVGA3D_QUERYTYPE_PIPELINESTATS               = 3,
+   SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE          = 4,
+   SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS           = 5,
+   SVGA3D_QUERYTYPE_STREAMOVERFLOWPREDICATE     = 6,
+   SVGA3D_QUERYTYPE_OCCLUSION64                 = 7,
+   SVGA3D_QUERYTYPE_EVENT                       = 8,
+   SVGA3D_QUERYTYPE_DX10_MAX                    = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM0             = 9,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM1             = 10,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM2             = 11,
+   SVGA3D_QUERYTYPE_SOSTATS_STREAM3             = 12,
+   SVGA3D_QUERYTYPE_SOP_STREAM0                 = 13,
+   SVGA3D_QUERYTYPE_SOP_STREAM1                 = 14,
+   SVGA3D_QUERYTYPE_SOP_STREAM2                 = 15,
+   SVGA3D_QUERYTYPE_SOP_STREAM3                 = 16,
+   SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef uint8 SVGA3dQueryTypeUint8;
+
+#define SVGA3D_NUM_QUERYTYPE  (SVGA3D_QUERYTYPE_MAX - SVGA3D_QUERYTYPE_MIN)
+
+/*
+ * This is the maximum number of queries per context that can be active
+ * simultaneously between a beginQuery and endQuery.
+ */
+#define SVGA3D_MAX_QUERY 64
+
+/*
+ * Query result buffer formats
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 passed;
+}
+#include "vmware_pack_end.h"
+SVGADXEventQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 timestamp;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 realFrequency;
+   uint32 disjoint;
+}
+#include "vmware_pack_end.h"
+SVGADXTimestampDisjointQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 inputAssemblyVertices;
+   uint64 inputAssemblyPrimitives;
+   uint64 vertexShaderInvocations;
+   uint64 geometryShaderInvocations;
+   uint64 geometryShaderPrimitives;
+   uint64 clipperInvocations;
+   uint64 clipperPrimitives;
+   uint64 pixelShaderInvocations;
+   uint64 hullShaderInvocations;
+   uint64 domainShaderInvocations;
+   uint64 computeShaderInvocations;
+}
+#include "vmware_pack_end.h"
+SVGADXPipelineStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 anySamplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusionPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 numPrimitivesWritten;
+   uint64 numPrimitivesRequired;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutStatisticsQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 overflowed;
+}
+#include "vmware_pack_end.h"
+SVGADXStreamOutPredicateQueryResult;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint64 samplesRendered;
+}
+#include "vmware_pack_end.h"
+SVGADXOcclusion64QueryResult;
+
+/*
+ * SVGADXQueryResultUnion is not intended for use in the protocol, but is
+ * very helpful when working with queries generically.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union SVGADXQueryResultUnion {
+   SVGADXOcclusionQueryResult occ;
+   SVGADXEventQueryResult event;
+   SVGADXTimestampQueryResult ts;
+   SVGADXTimestampDisjointQueryResult tsDisjoint;
+   SVGADXPipelineStatisticsQueryResult pipelineStats;
+   SVGADXOcclusionPredicateQueryResult occPred;
+   SVGADXStreamOutStatisticsQueryResult soStats;
+   SVGADXStreamOutPredicateQueryResult soPred;
+   SVGADXOcclusion64QueryResult occ64;
+}
+#include "vmware_pack_end.h"
+SVGADXQueryResultUnion;
+
+
+typedef enum {
+   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Query is not finished yet */
+   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully */
+   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully */
+   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (guest only) */
+} SVGA3dQueryState;
+
+typedef enum {
+   SVGA3D_WRITE_HOST_VRAM        = 1,
+   SVGA3D_READ_HOST_VRAM         = 2,
+} SVGA3dTransferType;
+
+typedef enum {
+   SVGA3D_LOGICOP_INVALID   = 0,
+   SVGA3D_LOGICOP_MIN       = 1,
+   SVGA3D_LOGICOP_COPY      = 1,
+   SVGA3D_LOGICOP_NOT       = 2,
+   SVGA3D_LOGICOP_AND       = 3,
+   SVGA3D_LOGICOP_OR        = 4,
+   SVGA3D_LOGICOP_XOR       = 5,
+   SVGA3D_LOGICOP_NXOR      = 6,
+   SVGA3D_LOGICOP_ROP3MIN   = 30,   /* 7-29 are reserved for future logic ops. */
+   SVGA3D_LOGICOP_ROP3MAX   = (SVGA3D_LOGICOP_ROP3MIN + 255),
+   SVGA3D_LOGICOP_MAX       = (SVGA3D_LOGICOP_ROP3MAX + 1),
+} SVGA3dLogicOp;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   union {
+      struct {
+	 uint16  function;       /* SVGA3dFogFunction */
+	 uint8   type;           /* SVGA3dFogType */
+	 uint8   base;           /* SVGA3dFogBase */
+      };
+      uint32     uintValue;
+   };
+}
+#include "vmware_pack_end.h"
+SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSurfaceImageId {
+   uint32               sid;
+   uint32               face;
+   uint32               mipmap;
+}
+#include "vmware_pack_end.h"
+SVGA3dSurfaceImageId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32               width;
+   uint32               height;
+   uint32               depth;
+}
+#include "vmware_pack_end.h"
+SVGA3dSize;
+
+/*
+ * Guest-backed objects definitions.
+ */
+typedef enum {
+   SVGA_OTABLE_MOB             = 0,
+   SVGA_OTABLE_MIN             = 0,
+   SVGA_OTABLE_SURFACE         = 1,
+   SVGA_OTABLE_CONTEXT         = 2,
+   SVGA_OTABLE_SHADER          = 3,
+   SVGA_OTABLE_SCREENTARGET    = 4,
+
+   SVGA_OTABLE_DX9_MAX         = 5,
+
+   SVGA_OTABLE_DXCONTEXT       = 5,
+   SVGA_OTABLE_MAX             = 6
+} SVGAOTableType;
+
+/*
+ * Deprecated.
+ */
+#define SVGA_OTABLE_COUNT 4
+
+typedef enum {
+   SVGA_COTABLE_MIN             = 0,
+   SVGA_COTABLE_RTVIEW          = 0,
+   SVGA_COTABLE_DSVIEW          = 1,
+   SVGA_COTABLE_SRVIEW          = 2,
+   SVGA_COTABLE_ELEMENTLAYOUT   = 3,
+   SVGA_COTABLE_BLENDSTATE      = 4,
+   SVGA_COTABLE_DEPTHSTENCIL    = 5,
+   SVGA_COTABLE_RASTERIZERSTATE = 6,
+   SVGA_COTABLE_SAMPLER         = 7,
+   SVGA_COTABLE_STREAMOUTPUT    = 8,
+   SVGA_COTABLE_DXQUERY         = 9,
+   SVGA_COTABLE_DXSHADER        = 10,
+   SVGA_COTABLE_DX10_MAX        = 11,
+   SVGA_COTABLE_UAVIEW          = 11,
+   SVGA_COTABLE_MAX
+} SVGACOTableType;
+
+/*
+ * The largest size (number of entries) allowed in a COTable.
+ */
+#define SVGA_COTABLE_MAX_IDS (MAX_UINT16 - 2)
+
+typedef enum SVGAMobFormat {
+   SVGA3D_MOBFMT_INVALID     = SVGA3D_INVALID_ID,
+   SVGA3D_MOBFMT_PTDEPTH_0   = 0,
+   SVGA3D_MOBFMT_MIN         = 0,
+   SVGA3D_MOBFMT_PTDEPTH_1   = 1,
+   SVGA3D_MOBFMT_PTDEPTH_2   = 2,
+   SVGA3D_MOBFMT_RANGE       = 3,
+   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
+   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
+   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
+   SVGA3D_MOBFMT_PREDX_MAX   = 7,
+   SVGA3D_MOBFMT_EMPTY       = 7,
+   SVGA3D_MOBFMT_MAX,
+} SVGAMobFormat;
+
+#define SVGA3D_MOB_EMPTY_BASE 1
+
+#endif /* _SVGA3D_TYPES_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
new file mode 100644
index 0000000..884b1d1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
@@ -0,0 +1,89 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ *    Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ *   0000: Reserved
+ *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ *     that use the SVGA Screen Object extension. Instead of sending
+ *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ *     Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
+
+typedef
+struct {
+   uint32 command;
+   uint32 fullscreen;
+   struct {
+      int32 x, y;
+   } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
new file mode 100644
index 0000000..faf6d9b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
@@ -0,0 +1,199 @@
+/**********************************************************
+ * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ *    Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
+
+typedef enum {
+   SVGA_OVERLAY_FORMAT_INVALID = 0,
+   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
+        /* FIFO escape layout:
+         * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
+        /* FIFO escape layout:
+         * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+   struct {
+      uint32 cmdType;
+      uint32 streamId;
+   } header;
+
+   /* May include zero or more items. */
+   struct {
+      uint32 registerId;
+      uint32 value;
+   } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+   uint32 cmdType;
+   uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+   uint32 command;
+   uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ *      Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ *      TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ *      Pitches and offsets for the given YUV frame are put in 'pitches'
+ *      and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
+                         uint32 *width,                     /* IN / OUT */
+                         uint32 *height,                    /* IN / OUT */
+                         uint32 *size,                      /* OUT */
+                         uint32 *pitches,                   /* OUT (optional) */
+                         uint32 *offsets)                   /* OUT (optional) */
+{
+    int tmp;
+
+    *width = (*width + 1) & ~1;
+
+    if (offsets) {
+        offsets[0] = 0;
+    }
+
+    switch (format) {
+    case VMWARE_FOURCC_YV12:
+       *height = (*height + 1) & ~1;
+       *size = (*width) * (*height);
+
+       if (pitches) {
+          pitches[0] = *width;
+       }
+
+       if (offsets) {
+          offsets[1] = *size;
+       }
+
+       tmp = *width >> 1;
+
+       if (pitches) {
+          pitches[1] = pitches[2] = tmp;
+       }
+
+       tmp *= (*height >> 1);
+       *size += tmp;
+
+       if (offsets) {
+          offsets[2] = *size;
+       }
+
+       *size += tmp;
+       break;
+
+    case VMWARE_FOURCC_YUY2:
+    case VMWARE_FOURCC_UYVY:
+       *size = *width * 2;
+
+       if (pitches) {
+          pitches[0] = *size;
+       }
+
+       *size *= *height;
+       break;
+
+    default:
+       return false;
+    }
+
+    return true;
+}
+
+#endif /* _SVGA_OVERLAY_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
new file mode 100644
index 0000000..6e0ccb7
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -0,0 +1,1936 @@
+/**********************************************************
+ * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ *    Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+#include <linux/pci_ids.h>
+
+#define INCLUDE_ALLOW_MODULE
+#define INCLUDE_ALLOW_USERLEVEL
+
+#define INCLUDE_ALLOW_VMCORE
+#include "includeCheck.h"
+
+#include "svga_types.h"
+
+/*
+ * SVGA_REG_ENABLE bit definitions.
+ */
+typedef enum {
+   SVGA_REG_ENABLE_DISABLE = 0,
+   SVGA_REG_ENABLE_ENABLE = (1 << 0),
+   SVGA_REG_ENABLE_HIDE = (1 << 1),
+} SvgaRegEnable;
+
+typedef uint32 SVGAMobId;
+
+/*
+ * Arbitrary and meaningless limits. Please ignore these when writing
+ * new drivers.
+ */
+#define SVGA_MAX_WIDTH                  2560
+#define SVGA_MAX_HEIGHT                 1600
+
+
+#define SVGA_MAX_BITS_PER_PIXEL         32
+#define SVGA_MAX_DEPTH                  24
+#define SVGA_MAX_DISPLAYS               10
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
+
+/*
+ * The maximum framebuffer size that can traced for guests unless the
+ * SVGA_CAP_GBOBJECTS is set in SVGA_REG_CAPABILITIES.  In that case
+ * the full framebuffer can be traced independent of this limit.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
+#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC         0x900000UL
+#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2     2
+#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+   PALETTE_BASE has moved */
+#define SVGA_VERSION_1     1
+#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0     0
+#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+#define SVGA_ID_INVALID    0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT         0x0
+#define SVGA_VALUE_PORT         0x1
+#define SVGA_BIOS_PORT          0x2
+#define SVGA_IRQSTATUS_PORT     0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER       0x8    /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
+
+/*
+ * Registers
+ */
+
+enum {
+   SVGA_REG_ID = 0,
+   SVGA_REG_ENABLE = 1,
+   SVGA_REG_WIDTH = 2,
+   SVGA_REG_HEIGHT = 3,
+   SVGA_REG_MAX_WIDTH = 4,
+   SVGA_REG_MAX_HEIGHT = 5,
+   SVGA_REG_DEPTH = 6,
+   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
+   SVGA_REG_PSEUDOCOLOR = 8,
+   SVGA_REG_RED_MASK = 9,
+   SVGA_REG_GREEN_MASK = 10,
+   SVGA_REG_BLUE_MASK = 11,
+   SVGA_REG_BYTES_PER_LINE = 12,
+   SVGA_REG_FB_START = 13,            /* (Deprecated) */
+   SVGA_REG_FB_OFFSET = 14,
+   SVGA_REG_VRAM_SIZE = 15,
+   SVGA_REG_FB_SIZE = 16,
+
+   /* ID 0 implementation only had the above registers, then the palette */
+   SVGA_REG_ID_0_TOP = 17,
+
+   SVGA_REG_CAPABILITIES = 17,
+   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
+   SVGA_REG_MEM_SIZE = 19,
+   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
+   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
+   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
+   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
+   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
+   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
+   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
+   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
+   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
+   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
+   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
+
+   /* Legacy multi-monitor support */
+   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
+   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
+   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
+
+   /* See "Guest memory regions" below. */
+   SVGA_REG_GMR_ID = 41,
+   SVGA_REG_GMR_DESCRIPTOR = 42,
+   SVGA_REG_GMR_MAX_IDS = 43,
+   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
+   SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
+   SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
+   SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
+   SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
+   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
+   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
+   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
+   SVGA_REG_CMD_PREPEND_LOW = 53,
+   SVGA_REG_CMD_PREPEND_HIGH = 54,
+   SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+   SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+   SVGA_REG_MOB_MAX_SIZE = 57,
+   SVGA_REG_TOP = 58,               /* Must be 1 more than the last register */
+
+   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
+   /* Next 768 (== 256*3) registers exist for colormap */
+   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+                                    /* Base of scratch registers */
+   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+      First 4 are reserved for VESA BIOS Extension; any remaining are for
+      the use of the current SVGA driver. */
+};
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ *   SVGA_REG_GMR_ID --
+ *
+ *     Read/write.
+ *     This register holds the 32-bit ID (a small positive integer)
+ *     of a GMR to create, delete, or redefine. Writing this register
+ *     has no side-effects.
+ *
+ *   SVGA_REG_GMR_DESCRIPTOR --
+ *
+ *     Write-only.
+ *     Writing this register will create, delete, or redefine the GMR
+ *     specified by the above ID register. If this register is zero,
+ *     the GMR is deleted. Any pointers into this GMR (including those
+ *     currently being processed by FIFO commands) will be
+ *     synchronously invalidated.
+ *
+ *     If this register is nonzero, it must be the physical page
+ *     number (PPN) of a data structure which describes the physical
+ *     layout of the memory region this GMR should describe. The
+ *     descriptor structure will be read synchronously by the SVGA
+ *     device when this register is written. The descriptor need not
+ *     remain allocated for the lifetime of the GMR.
+ *
+ *     The guest driver should write SVGA_REG_GMR_ID first, then
+ *     SVGA_REG_GMR_DESCRIPTOR.
+ *
+ *   SVGA_REG_GMR_MAX_IDS --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to support a maximum number of
+ *     user-defined GMR IDs. This register holds the number of supported
+ *     IDs. (The maximum supported ID plus 1)
+ *
+ *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to put a limit on the total number
+ *     of SVGAGuestMemDescriptor structures it will read when defining
+ *     a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ *   - Terminate the GMR descriptor list.
+ *     (ppn==0, numPages==0)
+ *
+ *   - Add a PPN or range of PPNs to the GMR's virtual address space.
+ *     (ppn != 0, numPages != 0)
+ *
+ *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ *     support multi-page GMR descriptor tables without forcing the
+ *     driver to allocate physically contiguous memory.
+ *     (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page.  The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well.  In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL         ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestMemDescriptor {
+   uint32 ppn;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGAGuestMemDescriptor;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestPtr {
+   uint32 gmrId;
+   uint32 offset;
+}
+#include "vmware_pack_end.h"
+SVGAGuestPtr;
+
+/*
+ * Register based command buffers --
+ *
+ * Provide an SVGA device interface that allows the guest to submit
+ * command buffers to the SVGA device through an SVGA device register.
+ * The metadata for each command buffer is contained in the
+ * SVGACBHeader structure along with the return status codes.
+ *
+ * The SVGA device supports command buffers if
+ * SVGA_CAP_COMMAND_BUFFERS is set in the device caps register.  The
+ * fifo must be enabled for command buffers to be submitted.
+ *
+ * Command buffers are submitted when the guest writing the 64 byte
+ * aligned physical address into the SVGA_REG_COMMAND_LOW and
+ * SVGA_REG_COMMAND_HIGH.  SVGA_REG_COMMAND_HIGH contains the upper 32
+ * bits of the physical address.  SVGA_REG_COMMAND_LOW contains the
+ * lower 32 bits of the physical address, since the command buffer
+ * headers are required to be 64 byte aligned the lower 6 bits are
+ * used for the SVGACBContext value.  Writing to SVGA_REG_COMMAND_LOW
+ * submits the command buffer to the device and queues it for
+ * execution.  The SVGA device supports at least
+ * SVGA_CB_MAX_QUEUED_PER_CONTEXT command buffers that can be queued
+ * per context and if that limit is reached the device will write the
+ * status SVGA_CB_STATUS_QUEUE_FULL to the status value of the command
+ * buffer header synchronously and not raise any IRQs.
+ *
+ * It is invalid to submit a command buffer without a valid physical
+ * address and results are undefined.
+ *
+ * The device guarantees that command buffers of size SVGA_CB_MAX_SIZE
+ * will be supported.  If a larger command buffer is submitted results
+ * are unspecified and the device will either complete the command
+ * buffer or return an error.
+ *
+ * The device guarantees that any individual command in a command
+ * buffer can be up to SVGA_CB_MAX_COMMAND_SIZE in size which is
+ * enough to fit a 64x64 color-cursor definition.  If the command is
+ * too large the device is allowed to process the command or return an
+ * error.
+ *
+ * The device context is a special SVGACBContext that allows for
+ * synchronous register like accesses with the flexibility of
+ * commands.  There is a different command set defined by
+ * SVGADeviceContextCmdId.  The commands in each command buffer is not
+ * allowed to straddle physical pages.
+ *
+ * The offset field which is available starting with the
+ * SVGA_CAP_CMD_BUFFERS_2 cap bit can be set by the guest to bias the
+ * start of command processing into the buffer.  If an error is
+ * encountered the errorOffset will still be relative to the specific
+ * PA, not biased by the offset.  When the command buffer is finished
+ * the guest should not read the offset field as there is no guarantee
+ * what it will set to.
+ */
+
+#define SVGA_CB_MAX_SIZE (512 * 1024)  /* 512 KB */
+#define SVGA_CB_MAX_QUEUED_PER_CONTEXT 32
+#define SVGA_CB_MAX_COMMAND_SIZE (32 * 1024) /* 32 KB */
+
+#define SVGA_CB_CONTEXT_MASK 0x3f
+typedef enum {
+   SVGA_CB_CONTEXT_DEVICE = 0x3f,
+   SVGA_CB_CONTEXT_0      = 0x0,
+   SVGA_CB_CONTEXT_MAX    = 0x1,
+} SVGACBContext;
+
+
+typedef enum {
+   /*
+    * The guest is supposed to write SVGA_CB_STATUS_NONE to the status
+    * field before submitting the command buffer header, the host will
+    * change the value when it is done with the command buffer.
+    */
+   SVGA_CB_STATUS_NONE             = 0,
+
+   /*
+    * Written by the host when a command buffer completes successfully.
+    * The device raises an IRQ with SVGA_IRQFLAG_COMMAND_BUFFER unless
+    * the SVGA_CB_FLAG_NO_IRQ flag is set.
+    */
+   SVGA_CB_STATUS_COMPLETED        = 1,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the command buffer was not submitted.  No
+    * IRQ is raised.
+    */
+   SVGA_CB_STATUS_QUEUE_FULL       = 2,
+
+   /*
+    * Written by the host when an error was detected parsing a command
+    * in the command buffer, errorOffset is written to contain the
+    * offset to the first byte of the failing command.  The device
+    * raises the IRQ with both SVGA_IRQFLAG_ERROR and
+    * SVGA_IRQFLAG_COMMAND_BUFFER.  Some of the commands may have been
+    * processed.
+    */
+   SVGA_CB_STATUS_COMMAND_ERROR    = 3,
+
+   /*
+    * Written by the host if there is an error parsing the command
+    * buffer header.  The device raises the IRQ with both
+    * SVGA_IRQFLAG_ERROR and SVGA_IRQFLAG_COMMAND_BUFFER.  The device
+    * did not processes any of the command buffer.
+    */
+   SVGA_CB_STATUS_CB_HEADER_ERROR  = 4,
+
+   /*
+    * Written by the host if the guest requested the host to preempt
+    * the command buffer.  The device will not raise any IRQs and the
+    * command buffer was not processed.
+    */
+   SVGA_CB_STATUS_PREEMPTED        = 5,
+
+   /*
+    * Written by the host synchronously with the command buffer
+    * submission to indicate the the command buffer was not submitted
+    * due to an error.  No IRQ is raised.
+    */
+   SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+} SVGACBStatus;
+
+typedef enum {
+   SVGA_CB_FLAG_NONE       = 0,
+   SVGA_CB_FLAG_NO_IRQ     = 1 << 0,
+   SVGA_CB_FLAG_DX_CONTEXT = 1 << 1,
+   SVGA_CB_FLAG_MOB        = 1 << 2,
+} SVGACBFlags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   volatile SVGACBStatus status;
+   volatile uint32 errorOffset;
+   uint64 id;
+   SVGACBFlags flags;
+   uint32 length;
+   union {
+      PA pa;
+      struct {
+         SVGAMobId mobid;
+         uint32 mobOffset;
+      } mob;
+   } ptr;
+   uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+   uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
+   uint32 mustBeZero[6];
+}
+#include "vmware_pack_end.h"
+SVGACBHeader;
+
+typedef enum {
+   SVGA_DC_CMD_NOP                   = 0,
+   SVGA_DC_CMD_START_STOP_CONTEXT    = 1,
+   SVGA_DC_CMD_PREEMPT               = 2,
+   SVGA_DC_CMD_MAX                   = 3,
+   SVGA_DC_CMD_FORCE_UINT            = MAX_UINT32,
+} SVGADeviceContextCmdId;
+
+typedef struct {
+   uint32 enable;
+   SVGACBContext context;
+} SVGADCCmdStartStop;
+
+/*
+ * SVGADCCmdPreempt --
+ *
+ * This command allows the guest to request that all command buffers
+ * on the specified context be preempted that can be.  After execution
+ * of this command all command buffers that were preempted will
+ * already have SVGA_CB_STATUS_PREEMPTED written into the status
+ * field.  The device might still be processing a command buffer,
+ * assuming execution of it started before the preemption request was
+ * received.  Specifying the ignoreIDZero flag to TRUE will cause the
+ * device to not preempt command buffers with the id field in the
+ * command buffer header set to zero.
+ */
+
+typedef struct {
+   SVGACBContext context;
+   uint32 ignoreIDZero;
+} SVGADCCmdPreempt;
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ *    This is a packed representation of the source 2D image format
+ *    for a GMR-to-screen blit. Currently it is defined as an encoding
+ *    of the screen's color depth and bits-per-pixel, however, 16 bits
+ *    are reserved for future use to identify other encodings (such as
+ *    RGBA or higher-precision images).
+ *
+ *    Currently supported formats:
+ *
+ *       bpp depth  Format Name
+ *       --- -----  -----------
+ *        32    24  32-bit BGRX
+ *        24    24  24-bit BGR
+ *        16    16  RGB 5-6-5
+ *        16    15  RGB 5-5-5
+ *
+ */
+
+typedef struct SVGAGMRImageFormat {
+   union {
+      struct {
+         uint32 bitsPerPixel : 8;
+         uint32 colorDepth   : 8;
+	 uint32 reserved     : 16;  /* Must be zero */
+      };
+
+      uint32 value;
+   };
+} SVGAGMRImageFormat;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAGuestImage {
+   SVGAGuestPtr         ptr;
+
+   /*
+    * A note on interpretation of pitch: This value of pitch is the
+    * number of bytes between vertically adjacent image
+    * blocks. Normally this is the number of bytes between the first
+    * pixel of two adjacent scanlines. With compressed textures,
+    * however, this may represent the number of bytes between
+    * compression blocks rather than between rows of pixels.
+    *
+    * XXX: Compressed textures currently must be tightly packed in guest memory.
+    *
+    * If the image is 1-dimensional, pitch is ignored.
+    *
+    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+    * assuming each row of blocks is tightly packed.
+    */
+   uint32 pitch;
+}
+#include "vmware_pack_end.h"
+SVGAGuestImage;
+
+/*
+ * SVGAColorBGRX --
+ *
+ *    A 24-bit color format (BGRX), which does not depend on the
+ *    format of the legacy guest framebuffer (GFB) or the current
+ *    GMRFB state.
+ */
+
+typedef struct SVGAColorBGRX {
+   union {
+      struct {
+         uint32 b : 8;
+         uint32 g : 8;
+         uint32 r : 8;
+	 uint32 x : 8;  /* Unused */
+      };
+
+      uint32 value;
+   };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ *    Signed rectangle and point primitives. These are used by the new
+ *    2D primitives for drawing to Screen Objects, which can occupy a
+ *    signed virtual coordinate space.
+ *
+ *    SVGASignedRect specifies a half-open interval: the (left, top)
+ *    pixel is part of the rectangle, but the (right, bottom) pixel is
+ *    not.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32  left;
+   int32  top;
+   int32  right;
+   int32  bottom;
+}
+#include "vmware_pack_end.h"
+SVGASignedRect;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   int32  x;
+   int32  y;
+}
+#include "vmware_pack_end.h"
+SVGASignedPoint;
+
+
+/*
+ * SVGA Device Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_CAP_IRQMASK --
+ *    Provides device interrupts.  Adds device register SVGA_REG_IRQMASK
+ *    to set interrupt mask and direct I/O port SVGA_IRQSTATUS_PORT to
+ *    set/clear pending interrupts.
+ *
+ * SVGA_CAP_GMR --
+ *    Provides synchronous mapping of guest memory regions (GMR).
+ *    Adds device registers SVGA_REG_GMR_ID, SVGA_REG_GMR_DESCRIPTOR,
+ *    SVGA_REG_GMR_MAX_IDS, and SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH.
+ *
+ * SVGA_CAP_TRACES --
+ *    Allows framebuffer trace-based updates even when FIFO is enabled.
+ *    Adds device register SVGA_REG_TRACES.
+ *
+ * SVGA_CAP_GMR2 --
+ *    Provides asynchronous commands to define and remap guest memory
+ *    regions.  Adds device registers SVGA_REG_GMRS_MAX_PAGES and
+ *    SVGA_REG_MEMORY_SIZE.
+ *
+ * SVGA_CAP_SCREEN_OBJECT_2 --
+ *    Allow screen object support, and require backing stores from the
+ *    guest for each screen object.
+ *
+ * SVGA_CAP_COMMAND_BUFFERS --
+ *    Enable register based command buffer submission.
+ *
+ * SVGA_CAP_DEAD1 --
+ *    This cap was incorrectly used by old drivers and should not be
+ *    reused.
+ *
+ * SVGA_CAP_CMD_BUFFERS_2 --
+ *    Enable support for the prepend command buffer submision
+ *    registers.  SVGA_REG_CMD_PREPEND_LOW and
+ *    SVGA_REG_CMD_PREPEND_HIGH.
+ *
+ * SVGA_CAP_GBOBJECTS --
+ *    Enable guest-backed objects and surfaces.
+ *
+ * SVGA_CAP_CMD_BUFFERS_3 --
+ *    Enable support for command buffers in a mob.
+ */
+
+#define SVGA_CAP_NONE               0x00000000
+#define SVGA_CAP_RECT_COPY          0x00000002
+#define SVGA_CAP_CURSOR             0x00000020
+#define SVGA_CAP_CURSOR_BYPASS      0x00000040
+#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080
+#define SVGA_CAP_8BIT_EMULATION     0x00000100
+#define SVGA_CAP_ALPHA_CURSOR       0x00000200
+#define SVGA_CAP_3D                 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO      0x00008000
+#define SVGA_CAP_MULTIMON           0x00010000
+#define SVGA_CAP_PITCHLOCK          0x00020000
+#define SVGA_CAP_IRQMASK            0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000
+#define SVGA_CAP_GMR                0x00100000
+#define SVGA_CAP_TRACES             0x00200000
+#define SVGA_CAP_GMR2               0x00400000
+#define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
+#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
+#define SVGA_CAP_DEAD1              0x02000000
+#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
+#define SVGA_CAP_GBOBJECTS          0x08000000
+#define SVGA_CAP_DX                 0x10000000
+
+#define SVGA_CAP_CMD_RESERVED       0x80000000
+
+
+/*
+ * The Guest can optionally read some SVGA device capabilities through
+ * the backdoor with command BDOOR_CMD_GET_SVGA_CAPABILITIES before
+ * the SVGA device is initialized.  The type of capability the guest
+ * is requesting from the SVGABackdoorCapType enum should be placed in
+ * the upper 16 bits of the backdoor command id (ECX).  On success the
+ * the value of EBX will be set to BDOOR_MAGIC and EAX will be set to
+ * the requested capability.  If the command is not supported then EBX
+ * will be left unchanged and EAX will be set to -1.  Because it is
+ * possible that -1 is the value of the requested cap the correct way
+ * to check if the command was successful is to check if EBX was changed
+ * to BDOOR_MAGIC making sure to initialize the register to something
+ * else first.
+ */
+
+typedef enum {
+   SVGABackdoorCapDeviceCaps = 0,
+   SVGABackdoorCapFifoCaps = 1,
+   SVGABackdoorCap3dHWVersion = 2,
+   SVGABackdoorCapMax = 3,
+} SVGABackdoorCapType;
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem.  It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ *   following data area is and how much data it contains; there may be
+ *   more registers following these, depending on the FIFO protocol
+ *   version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+   /*
+    * Block 1 (basic registers): The originally defined FIFO registers.
+    * These exist and are valid for all versions of the FIFO protocol.
+    */
+
+   SVGA_FIFO_MIN = 0,
+   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
+   SVGA_FIFO_NEXT_CMD,
+   SVGA_FIFO_STOP,
+
+   /*
+    * Block 2 (extended registers): Mandatory registers for the extended
+    * FIFO.  These exist if the SVGA caps register includes
+    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+    * associated capability bit is enabled.
+    *
+    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+    * This means that the guest has to test individually (in most cases
+    * using FIFO caps) for the presence of registers after this; the VMX
+    * can define "extended FIFO" to mean whatever it wants, and currently
+    * won't enable it unless there's room for that set and much more.
+    */
+
+   SVGA_FIFO_CAPABILITIES = 4,
+   SVGA_FIFO_FLAGS,
+   /* Valid with SVGA_FIFO_CAP_FENCE: */
+   SVGA_FIFO_FENCE,
+
+   /*
+    * Block 3a (optional extended registers): Additional registers for the
+    * extended FIFO, whose presence isn't actually implied by
+    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+    * leave room for them.
+    *
+    * These in block 3a, the VMX currently considers mandatory for the
+    * extended FIFO.
+    */
+
+   /* Valid if exists (i.e. if extended FIFO enabled): */
+   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
+   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
+   SVGA_FIFO_PITCHLOCK,
+
+   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
+   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
+   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
+   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
+   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
+   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+   /* Valid with SVGA_FIFO_CAP_RESERVE: */
+   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
+
+   /*
+    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
+    *
+    * By default this is SVGA_ID_INVALID, to indicate that the cursor
+    * coordinates are specified relative to the virtual root. If this
+    * is set to a specific screen ID, cursor position is reinterpreted
+    * as a signed offset relative to that screen's origin.
+    */
+   SVGA_FIFO_CURSOR_SCREEN_ID,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_DEAD
+    *
+    * An arbitrary value written by the host, drivers should not use it.
+    */
+   SVGA_FIFO_DEAD,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+    *
+    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+    * on platforms that can enforce graphics resource limits.
+    */
+   SVGA_FIFO_3D_HWVERSION_REVISED,
+
+   /*
+    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+    * registers, but this must be done carefully and with judicious use of
+    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+    * enough to tell you whether the register exists: we've shipped drivers
+    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+    * the earlier ones.  The actual order of introduction was:
+    * - PITCHLOCK
+    * - 3D_CAPS
+    * - CURSOR_* (cursor bypass 3)
+    * - RESERVED
+    * So, code that wants to know whether it can use any of the
+    * aforementioned registers, or anything else added after PITCHLOCK and
+    * before 3D_CAPS, needs to reason about something other than
+    * SVGA_FIFO_MIN.
+    */
+
+   /*
+    * 3D caps block space; valid with 3D hardware version >=
+    * SVGA3D_HWVERSION_WS6_B1.
+    */
+   SVGA_FIFO_3D_CAPS      = 32,
+   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+   /*
+    * End of VMX's current definition of "extended-FIFO registers".
+    * Registers before here are always enabled/disabled as a block; either
+    * the extended FIFO is enabled and includes all preceding registers, or
+    * it's disabled entirely.
+    *
+    * Block 3b (truly optional extended registers): Additional registers for
+    * the extended FIFO, which the VMX already knows how to enable and
+    * disable with correct granularity.
+    *
+    * Registers after here exist if and only if the guest SVGA driver
+    * sets SVGA_FIFO_MIN high enough to leave room for them.
+    */
+
+   /* Valid if register exists: */
+   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
+
+   /*
+    * Always keep this last.  This defines the maximum number of
+    * registers we know about.  At power-on, this value is placed in
+    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+    * to allocate this much space in FIFO memory for registers.
+    */
+    SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data.  It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ *  This explains the relationship between the various FIFO
+ *  sync-related registers in IOSpace and in FIFO space.
+ *
+ *  SVGA_REG_SYNC --
+ *
+ *       The SYNC register can be used in two different ways by the guest:
+ *
+ *         1. If the guest wishes to fully sync (drain) the FIFO,
+ *            it will write once to SYNC then poll on the BUSY
+ *            register. The FIFO is sync'ed once BUSY is zero.
+ *
+ *         2. If the guest wants to asynchronously wake up the host,
+ *            it will write once to SYNC without polling on BUSY.
+ *            Ideally it will do this after some new commands have
+ *            been placed in the FIFO, and after reading a zero
+ *            from SVGA_FIFO_BUSY.
+ *
+ *       (1) is the original behaviour that SYNC was designed to
+ *       support.  Originally, a write to SYNC would implicitly
+ *       trigger a read from BUSY. This causes us to synchronously
+ *       process the FIFO.
+ *
+ *       This behaviour has since been changed so that writing SYNC
+ *       will *not* implicitly cause a read from BUSY. Instead, it
+ *       makes a channel call which asynchronously wakes up the MKS
+ *       thread.
+ *
+ *       New guests can use this new behaviour to implement (2)
+ *       efficiently. This lets guests get the host's attention
+ *       without waiting for the MKS to poll, which gives us much
+ *       better CPU utilization on SMP hosts and on UP hosts while
+ *       we're blocked on the host GPU.
+ *
+ *       Old guests shouldn't notice the behaviour change. SYNC was
+ *       never guaranteed to process the entire FIFO, since it was
+ *       bounded to a particular number of CPU cycles. Old guests will
+ *       still loop on the BUSY register until the FIFO is empty.
+ *
+ *       Writing to SYNC currently has the following side-effects:
+ *
+ *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ *         - Asynchronously wakes up the MKS thread for FIFO processing
+ *         - The value written to SYNC is recorded as a "reason", for
+ *           stats purposes.
+ *
+ *       If SVGA_FIFO_BUSY is available, drivers are advised to only
+ *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ *       eventually set SVGA_FIFO_BUSY on its own, but this approach
+ *       lets the driver avoid sending multiple asynchronous wakeup
+ *       messages to the MKS thread.
+ *
+ *  SVGA_REG_BUSY --
+ *
+ *       This register is set to TRUE when SVGA_REG_SYNC is written,
+ *       and it reads as FALSE when the FIFO has been completely
+ *       drained.
+ *
+ *       Every read from this register causes us to synchronously
+ *       process FIFO commands. There is no guarantee as to how many
+ *       commands each read will process.
+ *
+ *       CPU time spent processing FIFO commands will be billed to
+ *       the guest.
+ *
+ *       New drivers should avoid using this register unless they
+ *       need to guarantee that the FIFO is completely drained. It
+ *       is overkill for performing a sync-to-fence. Older drivers
+ *       will use this register for any type of synchronization.
+ *
+ *  SVGA_FIFO_BUSY --
+ *
+ *       This register is a fast way for the guest driver to check
+ *       whether the FIFO is already being processed. It reads and
+ *       writes at normal RAM speeds, with no monitor intervention.
+ *
+ *       If this register reads as TRUE, the host is guaranteeing that
+ *       any new commands written into the FIFO will be noticed before
+ *       the MKS goes back to sleep.
+ *
+ *       If this register reads as FALSE, no such guarantee can be
+ *       made.
+ *
+ *       The guest should use this register to quickly determine
+ *       whether or not it needs to wake up the host. If the guest
+ *       just wrote a command or group of commands that it would like
+ *       the host to begin processing, it should:
+ *
+ *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ *            action is necessary.
+ *
+ *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ *            code that we've already sent a SYNC to the host and we
+ *            don't need to send a duplicate.
+ *
+ *         3. Write a reason to SVGA_REG_SYNC. This will send an
+ *            asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ *      Fence -- Fence register and command are supported
+ *      Accel Front -- Front buffer only commands are supported
+ *      Pitch Lock -- Pitch lock register is supported
+ *      Video -- SVGA Video overlay units are supported
+ *      Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ *    Provides dynamic multi-screen rendering, for improved Unity and
+ *    multi-monitor modes. With Screen Object, the guest can
+ *    dynamically create and destroy 'screens', which can represent
+ *    Unity windows or virtual monitors. Screen Object also provides
+ *    strong guarantees that DMA operations happen only when
+ *    guest-initiated. Screen Object deprecates the BAR1 guest
+ *    framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ *    New registers:
+ *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ *    New 2D commands:
+ *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ *    New 3D commands:
+ *       BLIT_SURFACE_TO_SCREEN
+ *
+ *    New guarantees:
+ *
+ *       - The host will not read or write guest memory, including the GFB,
+ *         except when explicitly initiated by a DMA command.
+ *
+ *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ *         is guaranteed to complete before any subsequent FENCEs.
+ *
+ *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ *         PRESENT_READBACK) as well as new Screen blit commands will
+ *         all behave consistently as blits, and memory will be read
+ *         or written in FIFO order.
+ *
+ *         For example, if you PRESENT from one SVGA3D surface to multiple
+ *         places on the screen, the data copied will always be from the
+ *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ *         This was not necessarily true on devices without Screen Object.
+ *
+ *         This means that on devices that support Screen Object, the
+ *         PRESENT_READBACK command should not be necessary unless you
+ *         actually want to read back the results of 3D rendering into
+ *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ *         command provides a strict superset of functionality.)
+ *
+ *       - When a screen is resized, either using Screen Object commands or
+ *         legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ *    Provides new commands to define and remap guest memory regions (GMR).
+ *
+ *    New 2D commands:
+ *       DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ *    that enforce graphics resource limits.  This allows the platform
+ *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ *    drivers that do not limit their resources.
+ *
+ *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ *    are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ *    Modifies the DEFINE_SCREEN command to include a guest provided
+ *    backing store in GMR memory and the bytesPerLine for the backing
+ *    store.  This capability requires the use of a backing store when
+ *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ *    Drivers should not use this cap bit.  This cap bit can not be
+ *    reused since some hosts already expose it.
+ */
+
+#define SVGA_FIFO_CAP_NONE                  0
+#define SVGA_FIFO_CAP_FENCE             (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
+#define SVGA_FIFO_CAP_VIDEO             (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
+#define SVGA_FIFO_CAP_RESERVE           (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
+#define SVGA_FIFO_CAP_GMR2              (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
+#define SVGA_FIFO_CAP_DEAD              (1<<10)
+
+
+/*
+ * FIFO Flags
+ *
+ *      Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE                 0
+#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+   SVGA_VIDEO_ENABLED = 0,
+   SVGA_VIDEO_FLAGS,
+   SVGA_VIDEO_DATA_OFFSET,
+   SVGA_VIDEO_FORMAT,
+   SVGA_VIDEO_COLORKEY,
+   SVGA_VIDEO_SIZE,          /* Deprecated */
+   SVGA_VIDEO_WIDTH,
+   SVGA_VIDEO_HEIGHT,
+   SVGA_VIDEO_SRC_X,
+   SVGA_VIDEO_SRC_Y,
+   SVGA_VIDEO_SRC_WIDTH,
+   SVGA_VIDEO_SRC_HEIGHT,
+   SVGA_VIDEO_DST_X,         /* Signed int32 */
+   SVGA_VIDEO_DST_Y,         /* Signed int32 */
+   SVGA_VIDEO_DST_WIDTH,
+   SVGA_VIDEO_DST_HEIGHT,
+   SVGA_VIDEO_PITCH_1,
+   SVGA_VIDEO_PITCH_2,
+   SVGA_VIDEO_PITCH_3,
+   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords */
+                             /* (SVGA_ID_INVALID) */
+   SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ *      width and height relate to the entire source video frame.
+ *      srcX, srcY, srcWidth and srcHeight represent subset of the source
+ *      video frame to be displayed.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAOverlayUnit {
+   uint32 enabled;
+   uint32 flags;
+   uint32 dataOffset;
+   uint32 format;
+   uint32 colorKey;
+   uint32 size;
+   uint32 width;
+   uint32 height;
+   uint32 srcX;
+   uint32 srcY;
+   uint32 srcWidth;
+   uint32 srcHeight;
+   int32  dstX;
+   int32  dstY;
+   uint32 dstWidth;
+   uint32 dstHeight;
+   uint32 pitches[3];
+   uint32 dataGMRId;
+   uint32 dstScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAOverlayUnit;
+
+
+/*
+ * Guest display topology
+ *
+ * XXX: This structure is not part of the SVGA device's interface, and
+ * doesn't really belong here.
+ */
+#define SVGA_INVALID_DISPLAY_ID ((uint32)-1)
+
+typedef struct SVGADisplayTopology {
+   uint16 displayId;
+   uint16 isPrimary;
+   uint32 width;
+   uint32 height;
+   uint32 positionX;
+   uint32 positionY;
+} SVGADisplayTopology;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ *    This is a new way to represent a guest's multi-monitor screen or
+ *    Unity window. Screen objects are only supported if the
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ *    If Screen Objects are supported, they can be used to fully
+ *    replace the functionality provided by the framebuffer registers
+ *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ *    The screen object is a struct with guaranteed binary
+ *    compatibility. New flags can be added, and the struct may grow,
+ *    but existing fields must retain their meaning.
+ *
+ *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ *    a SVGAGuestPtr that is used to back the screen contents.  This
+ *    memory must come from the GFB.  The guest is not allowed to
+ *    access the memory and doing so will have undefined results.  The
+ *    backing store is required to be page aligned and the size is
+ *    padded to the next page boundry.  The number of pages is:
+ *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ *    The pitch in the backingStore is required to be at least large
+ *    enough to hold a 32bbp scanline.  It is recommended that the
+ *    driver pad bytesPerLine for a potential performance win.
+ *
+ *    The cloneCount field is treated as a hint from the guest that
+ *    the user wants this display to be cloned, countCount times.  A
+ *    value of zero means no cloning should happen.
+ */
+
+#define SVGA_SCREEN_MUST_BE_SET     (1 << 0)
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY      (1 << 1)
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black.  When a screen is deactivated the backing store is
+ * optional.  When set backingPtr and bytesPerLine will be ignored.
+ */
+#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved.  The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user.  When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver.  This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 structSize;   /* sizeof(SVGAScreenObject) */
+   uint32 id;
+   uint32 flags;
+   struct {
+      uint32 width;
+      uint32 height;
+   } size;
+   struct {
+      int32 x;
+      int32 y;
+   } root;
+
+   /*
+    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+    */
+   SVGAGuestImage backingStore;
+
+   /*
+    * The cloneCount field is treated as a hint from the guest that
+    * the user wants this display to be cloned, cloneCount times.
+    *
+    * A value of zero means no cloning should happen.
+    */
+   uint32 cloneCount;
+}
+#include "vmware_pack_end.h"
+SVGAScreenObject;
+
+
+/*
+ *  Commands in the command FIFO:
+ *
+ *  Command IDs defined below are used for the traditional 2D FIFO
+ *  communication (not all commands are available for all versions of the
+ *  SVGA FIFO protocol).
+ *
+ *  Note the holes in the command ID numbers: These commands have been
+ *  deprecated, and the old IDs must not be reused.
+ *
+ *  Command IDs from 1000 to 2999 are reserved for use by the SVGA3D
+ *  protocol.
+ *
+ *  Each command's parameters are described by the comments and
+ *  structs below.
+ */
+
+typedef enum {
+   SVGA_CMD_INVALID_CMD           = 0,
+   SVGA_CMD_UPDATE                = 1,
+   SVGA_CMD_RECT_COPY             = 3,
+   SVGA_CMD_RECT_ROP_COPY         = 14,
+   SVGA_CMD_DEFINE_CURSOR         = 19,
+   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
+   SVGA_CMD_UPDATE_VERBOSE        = 25,
+   SVGA_CMD_FRONT_ROP_FILL        = 29,
+   SVGA_CMD_FENCE                 = 30,
+   SVGA_CMD_ESCAPE                = 33,
+   SVGA_CMD_DEFINE_SCREEN         = 34,
+   SVGA_CMD_DESTROY_SCREEN        = 35,
+   SVGA_CMD_DEFINE_GMRFB          = 36,
+   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
+   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
+   SVGA_CMD_ANNOTATION_FILL       = 39,
+   SVGA_CMD_ANNOTATION_COPY       = 40,
+   SVGA_CMD_DEFINE_GMR2           = 41,
+   SVGA_CMD_REMAP_GMR2            = 42,
+   SVGA_CMD_DEAD                  = 43,
+   SVGA_CMD_DEAD_2                = 44,
+   SVGA_CMD_NOP                   = 45,
+   SVGA_CMD_NOP_ERROR             = 46,
+   SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_DATASIZE       (256 * 1024)
+#define SVGA_CMD_MAX_ARGS           64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ *    This is a DMA transfer which copies from the Guest Framebuffer
+ *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ *    intersect with the provided virtual rectangle.
+ *
+ *    This command does not support using arbitrary guest memory as a
+ *    data source- it only works with the pre-defined GFB memory.
+ *    This command also does not support signed virtual coordinates.
+ *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ *    negative root x/y coordinates, the negative portion of those
+ *    screens will not be reachable by this command.
+ *
+ *    This command is not necessary when using framebuffer
+ *    traces. Traces are automatically enabled if the SVGA FIFO is
+ *    disabled, and you may explicitly enable/disable traces using
+ *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ *    pseudocolor screen updates. The newer Screen Object commands
+ *    only support true color formats.
+ *
+ * Availability:
+ *    Always available.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_RECT_ROP_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *    The value of ROP may only be SVGA_ROP_COPY, and this command is
+ *    only supported for backwards compatibility reasons.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+   uint32 rop;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRectRopCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ *    Provide a new cursor image, as an AND/XOR mask.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_CURSOR
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   /*
+    * Followed by scanline data for AND mask, then XOR mask.
+    * Each scanline is padded to a 32-bit boundary.
+   */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ *    Provide a new cursor image, in 32-bit BGRA format.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   /* Followed by scanline data */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ *    'reason' value, an opaque cookie which is used by internal
+ *    debugging tools. Third party drivers should not use this
+ *    command.
+ *
+ * Availability:
+ *    SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 reason;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ *    This is a hint which tells the SVGA device that the driver has
+ *    just filled a rectangular region of the GFB with a solid
+ *    color. Instead of reading these pixels from the GFB, the device
+ *    can assume that they all equal 'color'. This is primarily used
+ *    for remote desktop protocols.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define  SVGA_ROP_COPY                    0x03
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 color;     /* In the same format as the GFB */
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 rop;       /* Must be SVGA_ROP_COPY */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ *    Insert a synchronization fence.  When the SVGA device reaches
+ *    this command, it will copy the 'fence' value into the
+ *    SVGA_FIFO_FENCE register. It will also compare the fence against
+ *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ *    raise this interrupt.
+ *
+ * Availability:
+ *    SVGA_FIFO_FENCE for this command,
+ *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 fence;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ *    Send an extended or vendor-specific variable length command.
+ *    This is used for video overlay, third party plugins, and
+ *    internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 nsid;
+   uint32 size;
+   /* followed by 'size' bytes of data */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ *    Define or redefine an SVGAScreenObject. See the description of
+ *    SVGAScreenObject above.  The video driver is responsible for
+ *    generating new screen IDs. They should be small positive
+ *    integers. The virtual device will have an implementation
+ *    specific upper limit on the number of screen IDs
+ *    supported. Drivers are responsible for recycling IDs. The first
+ *    valid ID is zero.
+ *
+ *    - Interaction with other registers:
+ *
+ *    For backwards compatibility, when the GFB mode registers (WIDTH,
+ *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ *    deletes all screens other than screen #0, and redefines screen
+ *    #0 according to the specified mode. Drivers that use
+ *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ *    If you use screen objects, do not use the legacy multi-mon
+ *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAScreenObject screen;   /* Variable-length according to version */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ *    Destroy an SVGAScreenObject. Its ID is immediately available for
+ *    re-use.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 screenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ *    This command sets a piece of SVGA device state called the
+ *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ *    piece of light-weight state which identifies the location and
+ *    format of an image in guest memory or in BAR1. The GMRFB has
+ *    an arbitrary size, and it doesn't need to match the geometry
+ *    of the GFB or any screen object.
+ *
+ *    The GMRFB can be redefined as often as you like. You could
+ *    always use the same GMRFB, you could redefine it before
+ *    rendering from a different guest screen, or you could even
+ *    redefine it before every blit.
+ *
+ *    There are multiple ways to use this command. The simplest way is
+ *    to use it to move the framebuffer either to elsewhere in the GFB
+ *    (BAR1) memory region, or to a user-defined GMR. This lets a
+ *    driver use a framebuffer allocated entirely out of normal system
+ *    memory, which we encourage.
+ *
+ *    Another way to use this command is to set up a ring buffer of
+ *    updates in GFB memory. If a driver wants to ensure that no
+ *    frames are skipped by the SVGA device, it is important that the
+ *    driver not modify the source data for a blit until the device is
+ *    done processing the command. One efficient way to accomplish
+ *    this is to use a ring of small DMA buffers. Each buffer is used
+ *    for one blit, then we move on to the next buffer in the
+ *    ring. The FENCE mechanism is used to protect each buffer from
+ *    re-use until the device is finished with that buffer's
+ *    corresponding blit.
+ *
+ *    This command does not affect the meaning of SVGA_CMD_UPDATE.
+ *    UPDATEs always occur from the legacy GFB memory area. This
+ *    command has no support for pseudocolor GMRFBs. Currently only
+ *    true-color 15, 16, and 24-bit depths are supported. Future
+ *    devices may expose capabilities for additional framebuffer
+ *    formats.
+ *
+ *    The default GMRFB value is undefined. Drivers must always send
+ *    this command at least once before performing any blit from the
+ *    GMRFB.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAGuestPtr        ptr;
+   uint32              bytesPerLine;
+   SVGAGMRImageFormat  format;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ *    This is a guest-to-host blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from the current GMRFB to
+ *    a ScreenObject.
+ *
+ *    The destination coordinate may be specified relative to a
+ *    screen's origin.  The provided screen ID must be valid.
+ *
+ *    The SVGA device is guaranteed to finish reading from the GMRFB
+ *    by the time any subsequent FENCE commands are reached.
+ *
+ *    This command consumes an annotation. See the
+ *    SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  srcOrigin;
+   SVGASignedRect   destRect;
+   uint32           destScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ *    This is a host-to-guest blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from a single ScreenObject
+ *    back to the current GMRFB.
+ *
+ *    The source coordinate is specified relative to a screen's
+ *    origin.  The provided screen ID must be valid. If any parameters
+ *    are invalid, the resulting pixel values are undefined.
+ *
+ *    The SVGA device is guaranteed to finish writing to the GMRFB by
+ *    the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  destOrigin;
+   SVGASignedRect   srcRect;
+   uint32           srcScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAColorBGRX  color;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ *    The annotation commands have been deprecated, should not be used
+ *    by new drivers.  They used to provide performance hints to the SVGA
+ *    device about the content of screen updates, but newer SVGA devices
+ *    ignore these.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGASignedPoint  srcOrigin;
+   uint32           srcScreenId;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdAnnotationCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMR2 --
+ *
+ *    Define guest memory region v2.  See the description of GMRs above.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gmrId;
+   uint32 numPages;
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdDefineGMR2;
+
+
+/*
+ * SVGA_CMD_REMAP_GMR2 --
+ *
+ *    Remap guest memory region v2.  See the description of GMRs above.
+ *
+ *    This command allows guest to modify a portion of an existing GMR by
+ *    invalidating it or reassigning it to different guest physical pages.
+ *    The pages are identified by physical page number (PPN).  The pages
+ *    are assumed to be pinned and valid for DMA operations.
+ *
+ *    Description of command flags:
+ *
+ *    SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
+ *       The PPN list must not overlap with the remap region (this can be
+ *       handled trivially by referencing a separate GMR).  If flag is
+ *       disabled, PPN list is appended to SVGARemapGMR command.
+ *
+ *    SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
+ *       it is in PPN32 format.
+ *
+ *    SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
+ *       A single PPN can be used to invalidate a portion of a GMR or
+ *       map it to to a single guest scratch page.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef enum {
+   SVGA_REMAP_GMR2_PPN32         = 0,
+   SVGA_REMAP_GMR2_VIA_GMR       = (1 << 0),
+   SVGA_REMAP_GMR2_PPN64         = (1 << 1),
+   SVGA_REMAP_GMR2_SINGLE_PPN    = (1 << 2),
+} SVGARemapGMR2Flags;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 gmrId;
+   SVGARemapGMR2Flags flags;
+   uint32 offsetPages; /* offset in pages to begin remap */
+   uint32 numPages; /* number of pages to remap */
+   /*
+    * Followed by additional data depending on SVGARemapGMR2Flags.
+    *
+    * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
+    * Otherwise an array of page descriptors in PPN32 or PPN64 format
+    * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
+    * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
+    */
+}
+#include "vmware_pack_end.h"
+SVGAFifoCmdRemapGMR2;
+
+
+/*
+ * Size of SVGA device memory such as frame buffer and FIFO.
+ */
+#define SVGA_VRAM_MIN_SIZE             (4 * 640 * 480) /* bytes */
+#define SVGA_VRAM_MIN_SIZE_3D       (16 * 1024 * 1024)
+#define SVGA_VRAM_MAX_SIZE         (128 * 1024 * 1024)
+#define SVGA_MEMORY_SIZE_MAX      (1024 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_MAX           (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MIN       (32 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_MAX       (2 * 1024 * 1024)
+#define SVGA_GRAPHICS_MEMORY_KB_DEFAULT   (256 * 1024)
+
+#define SVGA_VRAM_SIZE_W2K          (64 * 1024 * 1024) /* 64 MB */
+
+/*
+ * To simplify autoDetect display configuration, support a minimum of
+ * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
+ *   numDisplays = 2
+ *   maxWidth = numDisplay * 1920 = 3840
+ *   maxHeight = rotated width of single monitor = 1920
+ *   vramSize = maxWidth * maxHeight * 4 = 29491200
+ */
+#define SVGA_VRAM_SIZE_AUTODETECT   (32 * 1024 * 1024)
+
+#if defined(VMX86_SERVER)
+#define SVGA_VRAM_SIZE               (4 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           (64 * 1024 * 1024)
+#define SVGA_FIFO_SIZE                    (256 * 1024)
+#define SVGA_FIFO_SIZE_3D                 (516 * 1024)
+#define SVGA_MEMORY_SIZE_DEFAULT   (160 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                  FALSE
+#else
+#define SVGA_VRAM_SIZE              (16 * 1024 * 1024)
+#define SVGA_VRAM_SIZE_3D           SVGA_VRAM_MAX_SIZE
+#define SVGA_FIFO_SIZE               (2 * 1024 * 1024)
+#define SVGA_FIFO_SIZE_3D               SVGA_FIFO_SIZE
+#define SVGA_MEMORY_SIZE_DEFAULT   (768 * 1024 * 1024)
+#define SVGA_AUTODETECT_DEFAULT                   TRUE
+#endif
+
+#define SVGA_FIFO_SIZE_GBOBJECTS          (256 * 1024)
+#define SVGA_VRAM_SIZE_GBOBJECTS     (4 * 1024 * 1024)
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
new file mode 100644
index 0000000..2e8ba4d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_types.h
@@ -0,0 +1,46 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+#define MAX_UINT16 U16_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
new file mode 100644
index 0000000..120eab8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -0,0 +1,21 @@
+#ifndef _VM_BASIC_TYPES_H_
+#define _VM_BASIC_TYPES_H_
+#include <linux/kernel.h>
+
+typedef u32 uint32;
+typedef s32 int32;
+typedef u64 uint64;
+typedef u16 uint16;
+typedef s16 int16;
+typedef u8  uint8;
+typedef s8  int8;
+
+typedef uint64 PA;
+typedef uint32 PPN;
+typedef uint64 PPN64;
+
+typedef bool Bool;
+
+#define MAX_UINT32 U32_MAX
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
new file mode 100644
index 0000000..7e7b0ce
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+#include <linux/compiler.h>
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
new file mode 100644
index 0000000..e2e440e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
@@ -0,0 +1,25 @@
+/**********************************************************
+ * Copyright 2015 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+__packed
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
deleted file mode 100644
index f58dc7d..0000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ /dev/null
@@ -1,2627 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga3d_reg.h --
- *
- *       SVGA 3D hardware definitions
- */
-
-#ifndef _SVGA3D_REG_H_
-#define _SVGA3D_REG_H_
-
-#include "svga_reg.h"
-
-typedef uint32 PPN;
-typedef __le64 PPN64;
-
-/*
- * 3D Hardware Version
- *
- *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
- *   register.   Is set by the host and read by the guest.  This lets
- *   us make new guest drivers which are backwards-compatible with old
- *   SVGA hardware revisions.  It does not let us support old guest
- *   drivers.  Good enough for now.
- *
- */
-
-#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
-#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
-#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
-
-typedef enum {
-   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
-   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
-   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
-   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
-   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
-   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
-   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
-   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
-} SVGA3dHardwareVersion;
-
-/*
- * Generic Types
- */
-
-typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
-#define SVGA3D_NUM_CLIPPLANES                   6
-#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
-#define SVGA3D_MAX_CONTEXT_IDS                  256
-#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
-
-#define SVGA3D_NUM_TEXTURE_UNITS                32
-#define SVGA3D_NUM_LIGHTS                       8
-
-/*
- * Surface formats.
- *
- * If you modify this list, be sure to keep GLUtil.c in sync. It
- * includes the internal format definition of each surface in
- * GLUtil_ConvertSurfaceFormat, and it contains a table of
- * human-readable names in GLUtil_GetFormatName.
- */
-
-typedef enum SVGA3dSurfaceFormat {
-   SVGA3D_FORMAT_MIN                   = 0,
-   SVGA3D_FORMAT_INVALID               = 0,
-
-   SVGA3D_X8R8G8B8                     = 1,
-   SVGA3D_A8R8G8B8                     = 2,
-
-   SVGA3D_R5G6B5                       = 3,
-   SVGA3D_X1R5G5B5                     = 4,
-   SVGA3D_A1R5G5B5                     = 5,
-   SVGA3D_A4R4G4B4                     = 6,
-
-   SVGA3D_Z_D32                        = 7,
-   SVGA3D_Z_D16                        = 8,
-   SVGA3D_Z_D24S8                      = 9,
-   SVGA3D_Z_D15S1                      = 10,
-
-   SVGA3D_LUMINANCE8                   = 11,
-   SVGA3D_LUMINANCE4_ALPHA4            = 12,
-   SVGA3D_LUMINANCE16                  = 13,
-   SVGA3D_LUMINANCE8_ALPHA8            = 14,
-
-   SVGA3D_DXT1                         = 15,
-   SVGA3D_DXT2                         = 16,
-   SVGA3D_DXT3                         = 17,
-   SVGA3D_DXT4                         = 18,
-   SVGA3D_DXT5                         = 19,
-
-   SVGA3D_BUMPU8V8                     = 20,
-   SVGA3D_BUMPL6V5U5                   = 21,
-   SVGA3D_BUMPX8L8V8U8                 = 22,
-   SVGA3D_BUMPL8V8U8                   = 23,
-
-   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
-   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
-
-   SVGA3D_A2R10G10B10                  = 26,
-
-   /* signed formats */
-   SVGA3D_V8U8                         = 27,
-   SVGA3D_Q8W8V8U8                     = 28,
-   SVGA3D_CxV8U8                       = 29,
-
-   /* mixed formats */
-   SVGA3D_X8L8V8U8                     = 30,
-   SVGA3D_A2W10V10U10                  = 31,
-
-   SVGA3D_ALPHA8                       = 32,
-
-   /* Single- and dual-component floating point formats */
-   SVGA3D_R_S10E5                      = 33,
-   SVGA3D_R_S23E8                      = 34,
-   SVGA3D_RG_S10E5                     = 35,
-   SVGA3D_RG_S23E8                     = 36,
-
-   SVGA3D_BUFFER                       = 37,
-
-   SVGA3D_Z_D24X8                      = 38,
-
-   SVGA3D_V16U16                       = 39,
-
-   SVGA3D_G16R16                       = 40,
-   SVGA3D_A16B16G16R16                 = 41,
-
-   /* Packed Video formats */
-   SVGA3D_UYVY                         = 42,
-   SVGA3D_YUY2                         = 43,
-
-   /* Planar video formats */
-   SVGA3D_NV12                         = 44,
-
-   /* Video format with alpha */
-   SVGA3D_AYUV                         = 45,
-
-   SVGA3D_R32G32B32A32_TYPELESS        = 46,
-   SVGA3D_R32G32B32A32_FLOAT           = 25,
-   SVGA3D_R32G32B32A32_UINT            = 47,
-   SVGA3D_R32G32B32A32_SINT            = 48,
-   SVGA3D_R32G32B32_TYPELESS           = 49,
-   SVGA3D_R32G32B32_FLOAT              = 50,
-   SVGA3D_R32G32B32_UINT               = 51,
-   SVGA3D_R32G32B32_SINT               = 52,
-   SVGA3D_R16G16B16A16_TYPELESS        = 53,
-   SVGA3D_R16G16B16A16_FLOAT           = 24,
-   SVGA3D_R16G16B16A16_UNORM           = 41,
-   SVGA3D_R16G16B16A16_UINT            = 54,
-   SVGA3D_R16G16B16A16_SNORM           = 55,
-   SVGA3D_R16G16B16A16_SINT            = 56,
-   SVGA3D_R32G32_TYPELESS              = 57,
-   SVGA3D_R32G32_FLOAT                 = 36,
-   SVGA3D_R32G32_UINT                  = 58,
-   SVGA3D_R32G32_SINT                  = 59,
-   SVGA3D_R32G8X24_TYPELESS            = 60,
-   SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
-   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
-   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
-   SVGA3D_R10G10B10A2_TYPELESS         = 64,
-   SVGA3D_R10G10B10A2_UNORM            = 26,
-   SVGA3D_R10G10B10A2_UINT             = 65,
-   SVGA3D_R11G11B10_FLOAT              = 66,
-   SVGA3D_R8G8B8A8_TYPELESS            = 67,
-   SVGA3D_R8G8B8A8_UNORM               = 68,
-   SVGA3D_R8G8B8A8_UNORM_SRGB          = 69,
-   SVGA3D_R8G8B8A8_UINT                = 70,
-   SVGA3D_R8G8B8A8_SNORM               = 28,
-   SVGA3D_R8G8B8A8_SINT                = 71,
-   SVGA3D_R16G16_TYPELESS              = 72,
-   SVGA3D_R16G16_FLOAT                 = 35,
-   SVGA3D_R16G16_UNORM                 = 40,
-   SVGA3D_R16G16_UINT                  = 73,
-   SVGA3D_R16G16_SNORM                 = 39,
-   SVGA3D_R16G16_SINT                  = 74,
-   SVGA3D_R32_TYPELESS                 = 75,
-   SVGA3D_D32_FLOAT                    = 76,
-   SVGA3D_R32_FLOAT                    = 34,
-   SVGA3D_R32_UINT                     = 77,
-   SVGA3D_R32_SINT                     = 78,
-   SVGA3D_R24G8_TYPELESS               = 79,
-   SVGA3D_D24_UNORM_S8_UINT            = 80,
-   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
-   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
-   SVGA3D_R8G8_TYPELESS                = 83,
-   SVGA3D_R8G8_UNORM                   = 84,
-   SVGA3D_R8G8_UINT                    = 85,
-   SVGA3D_R8G8_SNORM                   = 27,
-   SVGA3D_R8G8_SINT                    = 86,
-   SVGA3D_R16_TYPELESS                 = 87,
-   SVGA3D_R16_FLOAT                    = 33,
-   SVGA3D_D16_UNORM                    = 8,
-   SVGA3D_R16_UNORM                    = 88,
-   SVGA3D_R16_UINT                     = 89,
-   SVGA3D_R16_SNORM                    = 90,
-   SVGA3D_R16_SINT                     = 91,
-   SVGA3D_R8_TYPELESS                  = 92,
-   SVGA3D_R8_UNORM                     = 93,
-   SVGA3D_R8_UINT                      = 94,
-   SVGA3D_R8_SNORM                     = 95,
-   SVGA3D_R8_SINT                      = 96,
-   SVGA3D_A8_UNORM                     = 32,
-   SVGA3D_R1_UNORM                     = 97,
-   SVGA3D_R9G9B9E5_SHAREDEXP           = 98,
-   SVGA3D_R8G8_B8G8_UNORM              = 99,
-   SVGA3D_G8R8_G8B8_UNORM              = 100,
-   SVGA3D_BC1_TYPELESS                 = 101,
-   SVGA3D_BC1_UNORM                    = 15,
-   SVGA3D_BC1_UNORM_SRGB               = 102,
-   SVGA3D_BC2_TYPELESS                 = 103,
-   SVGA3D_BC2_UNORM                    = 17,
-   SVGA3D_BC2_UNORM_SRGB               = 104,
-   SVGA3D_BC3_TYPELESS                 = 105,
-   SVGA3D_BC3_UNORM                    = 19,
-   SVGA3D_BC3_UNORM_SRGB               = 106,
-   SVGA3D_BC4_TYPELESS                 = 107,
-   SVGA3D_BC4_UNORM                    = 108,
-   SVGA3D_BC4_SNORM                    = 109,
-   SVGA3D_BC5_TYPELESS                 = 110,
-   SVGA3D_BC5_UNORM                    = 111,
-   SVGA3D_BC5_SNORM                    = 112,
-   SVGA3D_B5G6R5_UNORM                 = 3,
-   SVGA3D_B5G5R5A1_UNORM               = 5,
-   SVGA3D_B8G8R8A8_UNORM               = 2,
-   SVGA3D_B8G8R8X8_UNORM               = 1,
-   SVGA3D_R10G10B10_XR_BIAS_A2_UNORM   = 113,
-   SVGA3D_B8G8R8A8_TYPELESS            = 114,
-   SVGA3D_B8G8R8A8_UNORM_SRGB          = 115,
-   SVGA3D_B8G8R8X8_TYPELESS            = 116,
-   SVGA3D_B8G8R8X8_UNORM_SRGB          = 117,
-
-   /* Advanced D3D9 depth formats. */
-   SVGA3D_Z_DF16                       = 118,
-   SVGA3D_Z_DF24                       = 119,
-   SVGA3D_Z_D24S8_INT                  = 120,
-
-   /* Planar video formats. */
-   SVGA3D_YV12                         = 121,
-
-   SVGA3D_FORMAT_MAX                   = 122,
-} SVGA3dSurfaceFormat;
-
-typedef uint32 SVGA3dColor; /* a, r, g, b */
-
-/*
- * These match the D3DFORMAT_OP definitions used by Direct3D. We need
- * them so that we can query the host for what the supported surface
- * operations are (when we're using the D3D backend, in particular),
- * and so we can send those operations to the guest.
- */
-typedef enum {
-   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
-   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
-   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
-   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
-   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
-   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
-   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
-
-/*
- * This format can be used as a render target if the current display mode
- * is the same depth if the alpha channel is ignored. e.g. if the device
- * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
- * format op list entry for A8R8G8B8 should have this cap.
- */
-   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
-
-/*
- * This format contains DirectDraw support (including Flip).  This flag
- * should not to be set on alpha formats.
- */
-   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
-
-/*
- * The rasterizer can support some level of Direct3D support in this format
- * and implies that the driver can create a Context in this mode (for some
- * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
- * flag must also be set.
- */
-   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
-
-/*
- * This is set for a private format when the driver has put the bpp in
- * the structure.
- */
-   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
-
-/*
- * Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
- */
-   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
-
-/*
- * Indicates that this format can be used to create offscreen plain surfaces.
- */
-   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
-
-/*
- * Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
- */
-   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
-
-/*
- * Indicates that this format can be used in the bumpmap instructions
- */
-   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
-
-/*
- * Indicates that this format can be sampled by the displacement map sampler
- */
-   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
-
-/*
- * Indicates that this format cannot be used with texture filtering
- */
-   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
-
-/*
- * Indicates that format conversions are supported to this RGB format if
- * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
- */
-   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
-
-/*
- * Indicated that this format can be written as an SRGB target (meaning that the
- * pixel pipe will DE-linearize data on output to format)
- */
-   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
-
-/*
- * Indicates that this format cannot be used with alpha blending
- */
-   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
-
-/*
- * Indicates that the device can auto-generated sublevels for resources
- * of this format
- */
-   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
-
-/*
- * Indicates that this format can be used by vertex texture sampler
- */
-   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
-
-/*
- * Indicates that this format supports neither texture coordinate wrap
- * modes, nor mipmapping
- */
-   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
-} SVGA3dFormatOp;
-
-/*
- * This structure is a conversion of SVGA3DFORMAT_OP_*.
- * Entries must be located at the same position.
- */
-typedef union {
-   uint32 value;
-   struct {
-      uint32 texture : 1;
-      uint32 volumeTexture : 1;
-      uint32 cubeTexture : 1;
-      uint32 offscreenRenderTarget : 1;
-      uint32 sameFormatRenderTarget : 1;
-      uint32 unknown1 : 1;
-      uint32 zStencil : 1;
-      uint32 zStencilArbitraryDepth : 1;
-      uint32 sameFormatUpToAlpha : 1;
-      uint32 unknown2 : 1;
-      uint32 displayMode : 1;
-      uint32 acceleration3d : 1;
-      uint32 pixelSize : 1;
-      uint32 convertToARGB : 1;
-      uint32 offscreenPlain : 1;
-      uint32 sRGBRead : 1;
-      uint32 bumpMap : 1;
-      uint32 dmap : 1;
-      uint32 noFilter : 1;
-      uint32 memberOfGroupARGB : 1;
-      uint32 sRGBWrite : 1;
-      uint32 noAlphaBlend : 1;
-      uint32 autoGenMipMap : 1;
-      uint32 vertexTexture : 1;
-      uint32 noTexCoordWrapNorMip : 1;
-   };
-} SVGA3dSurfaceFormatCaps;
-
-/*
- * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
- * must fit in a uint32.
- */
-
-typedef enum {
-   SVGA3D_RS_INVALID                   = 0,
-   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
-   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
-   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
-   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
-   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
-   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
-   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
-   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
-   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
-   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
-   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
-   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
-   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
-   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
-   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
-   SVGA3D_RS_FOGSTART                  = 16,    /* float */
-   SVGA3D_RS_FOGEND                    = 17,    /* float */
-   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
-   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
-   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
-   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
-   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
-   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
-   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
-   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
-   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
-   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
-   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
-   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
-   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
-   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
-   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
-   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
-   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
-   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
-   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
-   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
-   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
-   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
-   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
-   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
-   SVGA3D_RS_ZBIAS                     = 45,    /* float */
-   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
-   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
-   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
-   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
-   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
-   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
-   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
-   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
-   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
-   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
-   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
-   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
-   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
-   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
-   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
-   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
-
-
-   /*
-    * Output Gamma Level
-    *
-    * Output gamma effects the gamma curve of colors that are output from the
-    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
-    * value is <= 0.0, gamma correction is ignored and linear color space is
-    * used.
-    */
-
-   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
-   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
-   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
-   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
-   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
-   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
-   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
-   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
-   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
-   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
-   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
-   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
-   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
-   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
-   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
-   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
-   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
-   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
-   SVGA3D_RS_LINEAA                    = 98,    /* SVGA3dBool */
-   SVGA3D_RS_LINEWIDTH                 = 99,    /* float */
-   SVGA3D_RS_MAX
-} SVGA3dRenderStateName;
-
-typedef enum {
-   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
-   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
-   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
-   SVGA3D_TRANSPARENCYANTIALIAS_MAX
-} SVGA3dTransparencyAntialiasType;
-
-typedef enum {
-   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
-   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
-   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
-} SVGA3dVertexMaterial;
-
-typedef enum {
-   SVGA3D_FILLMODE_INVALID = 0,
-   SVGA3D_FILLMODE_POINT   = 1,
-   SVGA3D_FILLMODE_LINE    = 2,
-   SVGA3D_FILLMODE_FILL    = 3,
-   SVGA3D_FILLMODE_MAX
-} SVGA3dFillModeType;
-
-
-typedef
-union {
-   struct {
-      uint16   mode;       /* SVGA3dFillModeType */
-      uint16   face;       /* SVGA3dFace */
-   };
-   uint32 uintValue;
-} SVGA3dFillMode;
-
-typedef enum {
-   SVGA3D_SHADEMODE_INVALID = 0,
-   SVGA3D_SHADEMODE_FLAT    = 1,
-   SVGA3D_SHADEMODE_SMOOTH  = 2,
-   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
-   SVGA3D_SHADEMODE_MAX
-} SVGA3dShadeMode;
-
-typedef
-union {
-   struct {
-      uint16 repeat;
-      uint16 pattern;
-   };
-   uint32 uintValue;
-} SVGA3dLinePattern;
-
-typedef enum {
-   SVGA3D_BLENDOP_INVALID            = 0,
-   SVGA3D_BLENDOP_ZERO               = 1,
-   SVGA3D_BLENDOP_ONE                = 2,
-   SVGA3D_BLENDOP_SRCCOLOR           = 3,
-   SVGA3D_BLENDOP_INVSRCCOLOR        = 4,
-   SVGA3D_BLENDOP_SRCALPHA           = 5,
-   SVGA3D_BLENDOP_INVSRCALPHA        = 6,
-   SVGA3D_BLENDOP_DESTALPHA          = 7,
-   SVGA3D_BLENDOP_INVDESTALPHA       = 8,
-   SVGA3D_BLENDOP_DESTCOLOR          = 9,
-   SVGA3D_BLENDOP_INVDESTCOLOR       = 10,
-   SVGA3D_BLENDOP_SRCALPHASAT        = 11,
-   SVGA3D_BLENDOP_BLENDFACTOR        = 12,
-   SVGA3D_BLENDOP_INVBLENDFACTOR     = 13,
-   SVGA3D_BLENDOP_MAX
-} SVGA3dBlendOp;
-
-typedef enum {
-   SVGA3D_BLENDEQ_INVALID            = 0,
-   SVGA3D_BLENDEQ_ADD                = 1,
-   SVGA3D_BLENDEQ_SUBTRACT           = 2,
-   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
-   SVGA3D_BLENDEQ_MINIMUM            = 4,
-   SVGA3D_BLENDEQ_MAXIMUM            = 5,
-   SVGA3D_BLENDEQ_MAX
-} SVGA3dBlendEquation;
-
-typedef enum {
-   SVGA3D_FRONTWINDING_INVALID = 0,
-   SVGA3D_FRONTWINDING_CW      = 1,
-   SVGA3D_FRONTWINDING_CCW     = 2,
-   SVGA3D_FRONTWINDING_MAX
-} SVGA3dFrontWinding;
-
-typedef enum {
-   SVGA3D_FACE_INVALID  = 0,
-   SVGA3D_FACE_NONE     = 1,
-   SVGA3D_FACE_FRONT    = 2,
-   SVGA3D_FACE_BACK     = 3,
-   SVGA3D_FACE_FRONT_BACK = 4,
-   SVGA3D_FACE_MAX
-} SVGA3dFace;
-
-/*
- * The order and the values should not be changed
- */
-
-typedef enum {
-   SVGA3D_CMP_INVALID              = 0,
-   SVGA3D_CMP_NEVER                = 1,
-   SVGA3D_CMP_LESS                 = 2,
-   SVGA3D_CMP_EQUAL                = 3,
-   SVGA3D_CMP_LESSEQUAL            = 4,
-   SVGA3D_CMP_GREATER              = 5,
-   SVGA3D_CMP_NOTEQUAL             = 6,
-   SVGA3D_CMP_GREATEREQUAL         = 7,
-   SVGA3D_CMP_ALWAYS               = 8,
-   SVGA3D_CMP_MAX
-} SVGA3dCmpFunc;
-
-/*
- * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
- * the fog factor to be specified in the alpha component of the specular
- * (a.k.a. secondary) vertex color.
- */
-typedef enum {
-   SVGA3D_FOGFUNC_INVALID          = 0,
-   SVGA3D_FOGFUNC_EXP              = 1,
-   SVGA3D_FOGFUNC_EXP2             = 2,
-   SVGA3D_FOGFUNC_LINEAR           = 3,
-   SVGA3D_FOGFUNC_PER_VERTEX       = 4
-} SVGA3dFogFunction;
-
-/*
- * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
- * or per-pixel basis.
- */
-typedef enum {
-   SVGA3D_FOGTYPE_INVALID          = 0,
-   SVGA3D_FOGTYPE_VERTEX           = 1,
-   SVGA3D_FOGTYPE_PIXEL            = 2,
-   SVGA3D_FOGTYPE_MAX              = 3
-} SVGA3dFogType;
-
-/*
- * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
- * computed using the eye Z value of each pixel (or vertex), whereas range-
- * based fog is computed using the actual distance (range) to the eye.
- */
-typedef enum {
-   SVGA3D_FOGBASE_INVALID          = 0,
-   SVGA3D_FOGBASE_DEPTHBASED       = 1,
-   SVGA3D_FOGBASE_RANGEBASED       = 2,
-   SVGA3D_FOGBASE_MAX              = 3
-} SVGA3dFogBase;
-
-typedef enum {
-   SVGA3D_STENCILOP_INVALID        = 0,
-   SVGA3D_STENCILOP_KEEP           = 1,
-   SVGA3D_STENCILOP_ZERO           = 2,
-   SVGA3D_STENCILOP_REPLACE        = 3,
-   SVGA3D_STENCILOP_INCRSAT        = 4,
-   SVGA3D_STENCILOP_DECRSAT        = 5,
-   SVGA3D_STENCILOP_INVERT         = 6,
-   SVGA3D_STENCILOP_INCR           = 7,
-   SVGA3D_STENCILOP_DECR           = 8,
-   SVGA3D_STENCILOP_MAX
-} SVGA3dStencilOp;
-
-typedef enum {
-   SVGA3D_CLIPPLANE_0              = (1 << 0),
-   SVGA3D_CLIPPLANE_1              = (1 << 1),
-   SVGA3D_CLIPPLANE_2              = (1 << 2),
-   SVGA3D_CLIPPLANE_3              = (1 << 3),
-   SVGA3D_CLIPPLANE_4              = (1 << 4),
-   SVGA3D_CLIPPLANE_5              = (1 << 5),
-} SVGA3dClipPlanes;
-
-typedef enum {
-   SVGA3D_CLEAR_COLOR              = 0x1,
-   SVGA3D_CLEAR_DEPTH              = 0x2,
-   SVGA3D_CLEAR_STENCIL            = 0x4
-} SVGA3dClearFlag;
-
-typedef enum {
-   SVGA3D_RT_DEPTH                 = 0,
-   SVGA3D_RT_STENCIL               = 1,
-   SVGA3D_RT_COLOR0                = 2,
-   SVGA3D_RT_COLOR1                = 3,
-   SVGA3D_RT_COLOR2                = 4,
-   SVGA3D_RT_COLOR3                = 5,
-   SVGA3D_RT_COLOR4                = 6,
-   SVGA3D_RT_COLOR5                = 7,
-   SVGA3D_RT_COLOR6                = 8,
-   SVGA3D_RT_COLOR7                = 9,
-   SVGA3D_RT_MAX,
-   SVGA3D_RT_INVALID               = ((uint32)-1),
-} SVGA3dRenderTargetType;
-
-#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
-
-typedef
-union {
-   struct {
-      uint32  red   : 1;
-      uint32  green : 1;
-      uint32  blue  : 1;
-      uint32  alpha : 1;
-   };
-   uint32 uintValue;
-} SVGA3dColorMask;
-
-typedef enum {
-   SVGA3D_VBLEND_DISABLE            = 0,
-   SVGA3D_VBLEND_1WEIGHT            = 1,
-   SVGA3D_VBLEND_2WEIGHT            = 2,
-   SVGA3D_VBLEND_3WEIGHT            = 3,
-} SVGA3dVertexBlendFlags;
-
-typedef enum {
-   SVGA3D_WRAPCOORD_0   = 1 << 0,
-   SVGA3D_WRAPCOORD_1   = 1 << 1,
-   SVGA3D_WRAPCOORD_2   = 1 << 2,
-   SVGA3D_WRAPCOORD_3   = 1 << 3,
-   SVGA3D_WRAPCOORD_ALL = 0xF,
-} SVGA3dWrapFlags;
-
-/*
- * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
- * must fit in a uint32.
- */
-
-typedef enum {
-   SVGA3D_TS_INVALID                    = 0,
-   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
-   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
-   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
-   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
-   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
-   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
-   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
-   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
-   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
-   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
-   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
-   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
-   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
-   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
-   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
-   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
-   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
-   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
-   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
-
-
-   /*
-    * Sampler Gamma Level
-    *
-    * Sampler gamma effects the color of samples taken from the sampler.  A
-    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
-    * gamma value is ignored and a linear space is used.
-    */
-
-   SVGA3D_TS_GAMMA                      = 25,   /* float */
-   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
-   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
-   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
-   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
-   SVGA3D_TS_MAX
-} SVGA3dTextureStateName;
-
-typedef enum {
-   SVGA3D_TC_INVALID                   = 0,
-   SVGA3D_TC_DISABLE                   = 1,
-   SVGA3D_TC_SELECTARG1                = 2,
-   SVGA3D_TC_SELECTARG2                = 3,
-   SVGA3D_TC_MODULATE                  = 4,
-   SVGA3D_TC_ADD                       = 5,
-   SVGA3D_TC_ADDSIGNED                 = 6,
-   SVGA3D_TC_SUBTRACT                  = 7,
-   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
-   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
-   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
-   SVGA3D_TC_BLENDFACTORALPHA          = 11,
-   SVGA3D_TC_MODULATE2X                = 12,
-   SVGA3D_TC_MODULATE4X                = 13,
-   SVGA3D_TC_DSDT                      = 14,
-   SVGA3D_TC_DOTPRODUCT3               = 15,
-   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
-   SVGA3D_TC_ADDSIGNED2X               = 17,
-   SVGA3D_TC_ADDSMOOTH                 = 18,
-   SVGA3D_TC_PREMODULATE               = 19,
-   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
-   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
-   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
-   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
-   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
-   SVGA3D_TC_MULTIPLYADD               = 25,
-   SVGA3D_TC_LERP                      = 26,
-   SVGA3D_TC_MAX
-} SVGA3dTextureCombiner;
-
-#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
-
-typedef enum {
-   SVGA3D_TEX_ADDRESS_INVALID    = 0,
-   SVGA3D_TEX_ADDRESS_WRAP       = 1,
-   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
-   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
-   SVGA3D_TEX_ADDRESS_BORDER     = 4,
-   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
-   SVGA3D_TEX_ADDRESS_EDGE       = 6,
-   SVGA3D_TEX_ADDRESS_MAX
-} SVGA3dTextureAddress;
-
-/*
- * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
- * disabled, and the rasterizer should use the magnification filter instead.
- */
-typedef enum {
-   SVGA3D_TEX_FILTER_NONE           = 0,
-   SVGA3D_TEX_FILTER_NEAREST        = 1,
-   SVGA3D_TEX_FILTER_LINEAR         = 2,
-   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
-   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
-   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
-   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
-   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
-   SVGA3D_TEX_FILTER_MAX
-} SVGA3dTextureFilter;
-
-typedef enum {
-   SVGA3D_TEX_TRANSFORM_OFF    = 0,
-   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
-   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
-   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
-   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
-   SVGA3D_TEX_PROJECTED        = (1 << 15),
-} SVGA3dTexTransformFlags;
-
-typedef enum {
-   SVGA3D_TEXCOORD_GEN_OFF              = 0,
-   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
-   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
-   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
-   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
-   SVGA3D_TEXCOORD_GEN_MAX
-} SVGA3dTextureCoordGen;
-
-/*
- * Texture argument constants for texture combiner
- */
-typedef enum {
-   SVGA3D_TA_INVALID    = 0,
-   SVGA3D_TA_CONSTANT   = 1,
-   SVGA3D_TA_PREVIOUS   = 2,
-   SVGA3D_TA_DIFFUSE    = 3,
-   SVGA3D_TA_TEXTURE    = 4,
-   SVGA3D_TA_SPECULAR   = 5,
-   SVGA3D_TA_MAX
-} SVGA3dTextureArgData;
-
-#define SVGA3D_TM_MASK_LEN 4
-
-/* Modifiers for texture argument constants defined above. */
-typedef enum {
-   SVGA3D_TM_NONE       = 0,
-   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
-   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
-} SVGA3dTextureArgModifier;
-
-#define SVGA3D_INVALID_ID         ((uint32)-1)
-#define SVGA3D_MAX_CLIP_PLANES    6
-
-/*
- * This is the limit to the number of fixed-function texture
- * transforms and texture coordinates we can support. It does *not*
- * correspond to the number of texture image units (samplers) we
- * support!
- */
-#define SVGA3D_MAX_TEXTURE_COORDS 8
-
-/*
- * Vertex declarations
- *
- * Notes:
- *
- * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
- * draw with any POSITIONT vertex arrays, the programmable vertex
- * pipeline will be implicitly disabled. Drawing will take place as if
- * no vertex shader was bound.
- */
-
-typedef enum {
-   SVGA3D_DECLUSAGE_POSITION     = 0,
-   SVGA3D_DECLUSAGE_BLENDWEIGHT,       /*  1 */
-   SVGA3D_DECLUSAGE_BLENDINDICES,      /*  2 */
-   SVGA3D_DECLUSAGE_NORMAL,            /*  3 */
-   SVGA3D_DECLUSAGE_PSIZE,             /*  4 */
-   SVGA3D_DECLUSAGE_TEXCOORD,          /*  5 */
-   SVGA3D_DECLUSAGE_TANGENT,           /*  6 */
-   SVGA3D_DECLUSAGE_BINORMAL,          /*  7 */
-   SVGA3D_DECLUSAGE_TESSFACTOR,        /*  8 */
-   SVGA3D_DECLUSAGE_POSITIONT,         /*  9 */
-   SVGA3D_DECLUSAGE_COLOR,             /* 10 */
-   SVGA3D_DECLUSAGE_FOG,               /* 11 */
-   SVGA3D_DECLUSAGE_DEPTH,             /* 12 */
-   SVGA3D_DECLUSAGE_SAMPLE,            /* 13 */
-   SVGA3D_DECLUSAGE_MAX
-} SVGA3dDeclUsage;
-
-typedef enum {
-   SVGA3D_DECLMETHOD_DEFAULT     = 0,
-   SVGA3D_DECLMETHOD_PARTIALU,
-   SVGA3D_DECLMETHOD_PARTIALV,
-   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
-   SVGA3D_DECLMETHOD_UV,
-   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
-   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
-} SVGA3dDeclMethod;
-
-typedef enum {
-   SVGA3D_DECLTYPE_FLOAT1        =  0,
-   SVGA3D_DECLTYPE_FLOAT2        =  1,
-   SVGA3D_DECLTYPE_FLOAT3        =  2,
-   SVGA3D_DECLTYPE_FLOAT4        =  3,
-   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
-   SVGA3D_DECLTYPE_UBYTE4        =  5,
-   SVGA3D_DECLTYPE_SHORT2        =  6,
-   SVGA3D_DECLTYPE_SHORT4        =  7,
-   SVGA3D_DECLTYPE_UBYTE4N       =  8,
-   SVGA3D_DECLTYPE_SHORT2N       =  9,
-   SVGA3D_DECLTYPE_SHORT4N       = 10,
-   SVGA3D_DECLTYPE_USHORT2N      = 11,
-   SVGA3D_DECLTYPE_USHORT4N      = 12,
-   SVGA3D_DECLTYPE_UDEC3         = 13,
-   SVGA3D_DECLTYPE_DEC3N         = 14,
-   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
-   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
-   SVGA3D_DECLTYPE_MAX,
-} SVGA3dDeclType;
-
-/*
- * This structure is used for the divisor for geometry instancing;
- * it's a direct translation of the Direct3D equivalent.
- */
-typedef union {
-   struct {
-      /*
-       * For index data, this number represents the number of instances to draw.
-       * For instance data, this number represents the number of
-       * instances/vertex in this stream
-       */
-      uint32 count : 30;
-
-      /*
-       * This is 1 if this is supposed to be the data that is repeated for
-       * every instance.
-       */
-      uint32 indexedData : 1;
-
-      /*
-       * This is 1 if this is supposed to be the per-instance data.
-       */
-      uint32 instanceData : 1;
-   };
-
-   uint32 value;
-} SVGA3dVertexDivisor;
-
-typedef enum {
-   SVGA3D_PRIMITIVE_INVALID                     = 0,
-   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
-   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
-   SVGA3D_PRIMITIVE_LINELIST                    = 3,
-   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
-   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
-   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
-   SVGA3D_PRIMITIVE_MAX
-} SVGA3dPrimitiveType;
-
-typedef enum {
-   SVGA3D_COORDINATE_INVALID                   = 0,
-   SVGA3D_COORDINATE_LEFTHANDED                = 1,
-   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
-   SVGA3D_COORDINATE_MAX
-} SVGA3dCoordinateType;
-
-typedef enum {
-   SVGA3D_TRANSFORM_INVALID                     = 0,
-   SVGA3D_TRANSFORM_WORLD                       = 1,
-   SVGA3D_TRANSFORM_VIEW                        = 2,
-   SVGA3D_TRANSFORM_PROJECTION                  = 3,
-   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
-   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
-   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
-   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
-   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
-   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
-   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
-   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
-   SVGA3D_TRANSFORM_WORLD1                      = 12,
-   SVGA3D_TRANSFORM_WORLD2                      = 13,
-   SVGA3D_TRANSFORM_WORLD3                      = 14,
-   SVGA3D_TRANSFORM_MAX
-} SVGA3dTransformType;
-
-typedef enum {
-   SVGA3D_LIGHTTYPE_INVALID                     = 0,
-   SVGA3D_LIGHTTYPE_POINT                       = 1,
-   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
-   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
-   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
-   SVGA3D_LIGHTTYPE_MAX
-} SVGA3dLightType;
-
-typedef enum {
-   SVGA3D_CUBEFACE_POSX                         = 0,
-   SVGA3D_CUBEFACE_NEGX                         = 1,
-   SVGA3D_CUBEFACE_POSY                         = 2,
-   SVGA3D_CUBEFACE_NEGY                         = 3,
-   SVGA3D_CUBEFACE_POSZ                         = 4,
-   SVGA3D_CUBEFACE_NEGZ                         = 5,
-} SVGA3dCubeFace;
-
-typedef enum {
-   SVGA3D_SHADERTYPE_INVALID                    = 0,
-   SVGA3D_SHADERTYPE_MIN                        = 1,
-   SVGA3D_SHADERTYPE_VS                         = 1,
-   SVGA3D_SHADERTYPE_PS                         = 2,
-   SVGA3D_SHADERTYPE_MAX                        = 3,
-   SVGA3D_SHADERTYPE_GS                         = 3,
-} SVGA3dShaderType;
-
-#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN)
-
-typedef enum {
-   SVGA3D_CONST_TYPE_FLOAT                      = 0,
-   SVGA3D_CONST_TYPE_INT                        = 1,
-   SVGA3D_CONST_TYPE_BOOL                       = 2,
-   SVGA3D_CONST_TYPE_MAX
-} SVGA3dShaderConstType;
-
-#define SVGA3D_MAX_SURFACE_FACES                6
-
-typedef enum {
-   SVGA3D_STRETCH_BLT_POINT                     = 0,
-   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
-   SVGA3D_STRETCH_BLT_MAX
-} SVGA3dStretchBltMode;
-
-typedef enum {
-   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
-   SVGA3D_QUERYTYPE_MAX
-} SVGA3dQueryType;
-
-typedef enum {
-   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Waiting on the host (set by guest) */
-   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully (set by host) */
-   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully (set by host) */
-   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (For guest use only) */
-} SVGA3dQueryState;
-
-typedef enum {
-   SVGA3D_WRITE_HOST_VRAM        = 1,
-   SVGA3D_READ_HOST_VRAM         = 2,
-} SVGA3dTransferType;
-
-/*
- * The maximum number of vertex arrays we're guaranteed to support in
- * SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_VERTEX_ARRAYS   32
-
-/*
- * The maximum number of primitive ranges we're guaranteed to support
- * in SVGA_3D_CMD_DRAWPRIMITIVES.
- */
-#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
-
-/*
- * Identifiers for commands in the command FIFO.
- *
- * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
- * the SVGA3D protocol and remain reserved; they should not be used in the
- * future.
- *
- * IDs between 1040 and 1999 (inclusive) are available for use by the
- * current SVGA3D protocol.
- *
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
- * and up.
- */
-
-#define SVGA_3D_CMD_LEGACY_BASE            1000
-#define SVGA_3D_CMD_BASE                   1040
-
-#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0     /* Deprecated */
-#define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
-#define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
-#define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
-#define SVGA_3D_CMD_SURFACE_DMA            SVGA_3D_CMD_BASE + 4
-#define SVGA_3D_CMD_CONTEXT_DEFINE         SVGA_3D_CMD_BASE + 5
-#define SVGA_3D_CMD_CONTEXT_DESTROY        SVGA_3D_CMD_BASE + 6
-#define SVGA_3D_CMD_SETTRANSFORM           SVGA_3D_CMD_BASE + 7
-#define SVGA_3D_CMD_SETZRANGE              SVGA_3D_CMD_BASE + 8
-#define SVGA_3D_CMD_SETRENDERSTATE         SVGA_3D_CMD_BASE + 9
-#define SVGA_3D_CMD_SETRENDERTARGET        SVGA_3D_CMD_BASE + 10
-#define SVGA_3D_CMD_SETTEXTURESTATE        SVGA_3D_CMD_BASE + 11
-#define SVGA_3D_CMD_SETMATERIAL            SVGA_3D_CMD_BASE + 12
-#define SVGA_3D_CMD_SETLIGHTDATA           SVGA_3D_CMD_BASE + 13
-#define SVGA_3D_CMD_SETLIGHTENABLED        SVGA_3D_CMD_BASE + 14
-#define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
-#define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
-#define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    /* Deprecated */
-#define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
-#define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
-#define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
-#define SVGA_3D_CMD_SET_SHADER_CONST       SVGA_3D_CMD_BASE + 22
-#define SVGA_3D_CMD_DRAW_PRIMITIVES        SVGA_3D_CMD_BASE + 23
-#define SVGA_3D_CMD_SETSCISSORRECT         SVGA_3D_CMD_BASE + 24
-#define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
-#define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
-#define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    /* Deprecated */
-#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_SURFACE_DEFINE_V2      SVGA_3D_CMD_BASE + 30
-#define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
-#define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
-#define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
-#define SVGA_3D_CMD_SCREEN_DMA               1082
-#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083
-#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE     1084
-
-#define SVGA_3D_CMD_LOGICOPS_BITBLT          1085
-#define SVGA_3D_CMD_LOGICOPS_TRANSBLT        1086
-#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT      1087
-#define SVGA_3D_CMD_LOGICOPS_COLORFILL       1088
-#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND      1089
-#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND  1090
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE          1091
-#define SVGA_3D_CMD_READBACK_OTABLE          1092
-
-#define SVGA_3D_CMD_DEFINE_GB_MOB            1093
-#define SVGA_3D_CMD_DESTROY_GB_MOB           1094
-#define SVGA_3D_CMD_REDEFINE_GB_MOB          1095
-#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING    1096
-
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE        1097
-#define SVGA_3D_CMD_DESTROY_GB_SURFACE       1098
-#define SVGA_3D_CMD_BIND_GB_SURFACE          1099
-#define SVGA_3D_CMD_COND_BIND_GB_SURFACE     1100
-#define SVGA_3D_CMD_UPDATE_GB_IMAGE          1101
-#define SVGA_3D_CMD_UPDATE_GB_SURFACE        1102
-#define SVGA_3D_CMD_READBACK_GB_IMAGE        1103
-#define SVGA_3D_CMD_READBACK_GB_SURFACE      1104
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE      1105
-#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE    1106
-
-#define SVGA_3D_CMD_DEFINE_GB_CONTEXT        1107
-#define SVGA_3D_CMD_DESTROY_GB_CONTEXT       1108
-#define SVGA_3D_CMD_BIND_GB_CONTEXT          1109
-#define SVGA_3D_CMD_READBACK_GB_CONTEXT      1110
-#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT    1111
-
-#define SVGA_3D_CMD_DEFINE_GB_SHADER         1112
-#define SVGA_3D_CMD_DESTROY_GB_SHADER        1113
-#define SVGA_3D_CMD_BIND_GB_SHADER           1114
-
-#define SVGA_3D_CMD_SET_OTABLE_BASE64        1115
-
-#define SVGA_3D_CMD_BEGIN_GB_QUERY           1116
-#define SVGA_3D_CMD_END_GB_QUERY             1117
-#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY        1118
-
-#define SVGA_3D_CMD_NOP                      1119
-
-#define SVGA_3D_CMD_ENABLE_GART              1120
-#define SVGA_3D_CMD_DISABLE_GART             1121
-#define SVGA_3D_CMD_MAP_MOB_INTO_GART        1122
-#define SVGA_3D_CMD_UNMAP_GART_RANGE         1123
-
-#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET   1124
-#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET  1125
-#define SVGA_3D_CMD_BIND_GB_SCREENTARGET     1126
-#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET   1127
-
-#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL   1128
-#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
-
-#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE  1130
-#define SVGA_3D_CMD_GB_SCREEN_DMA               1131
-#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH  1132
-#define SVGA_3D_CMD_GB_MOB_FENCE                1133
-#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2        1134
-#define SVGA_3D_CMD_DEFINE_GB_MOB64          1135
-#define SVGA_3D_CMD_REDEFINE_GB_MOB64        1136
-#define SVGA_3D_CMD_NOP_ERROR                1137
-
-#define SVGA_3D_CMD_RESERVED1                1138
-#define SVGA_3D_CMD_RESERVED2                1139
-#define SVGA_3D_CMD_RESERVED3                1140
-#define SVGA_3D_CMD_RESERVED4                1141
-#define SVGA_3D_CMD_RESERVED5                1142
-
-#define SVGA_3D_CMD_MAX                      1142
-#define SVGA_3D_CMD_FUTURE_MAX               3000
-
-/*
- * Common substructures used in multiple FIFO commands:
- */
-
-typedef struct {
-   union {
-      struct {
-         uint16  function;       /* SVGA3dFogFunction */
-         uint8   type;           /* SVGA3dFogType */
-         uint8   base;           /* SVGA3dFogBase */
-      };
-      uint32     uintValue;
-   };
-} SVGA3dFogMode;
-
-/*
- * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
- * is a surface ID as well as face/mipmap indices.
- */
-
-typedef
-struct SVGA3dSurfaceImageId {
-   uint32               sid;
-   uint32               face;
-   uint32               mipmap;
-} SVGA3dSurfaceImageId;
-
-typedef
-struct SVGA3dGuestImage {
-   SVGAGuestPtr         ptr;
-
-   /*
-    * A note on interpretation of pitch: This value of pitch is the
-    * number of bytes between vertically adjacent image
-    * blocks. Normally this is the number of bytes between the first
-    * pixel of two adjacent scanlines. With compressed textures,
-    * however, this may represent the number of bytes between
-    * compression blocks rather than between rows of pixels.
-    *
-    * XXX: Compressed textures currently must be tightly packed in guest memory.
-    *
-    * If the image is 1-dimensional, pitch is ignored.
-    *
-    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
-    * assuming each row of blocks is tightly packed.
-    */
-   uint32 pitch;
-} SVGA3dGuestImage;
-
-
-/*
- * FIFO command format definitions:
- */
-
-/*
- * The data size header following cmdNum for every 3d command
- */
-typedef
-struct {
-   uint32               id;
-   uint32               size;
-} SVGA3dCmdHeader;
-
-/*
- * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
- * optional mipmaps and cube faces.
- */
-
-typedef
-struct {
-   uint32               width;
-   uint32               height;
-   uint32               depth;
-} SVGA3dSize;
-
-typedef enum {
-   SVGA3D_SURFACE_CUBEMAP              = (1 << 0),
-   SVGA3D_SURFACE_HINT_STATIC          = (1 << 1),
-   SVGA3D_SURFACE_HINT_DYNAMIC         = (1 << 2),
-   SVGA3D_SURFACE_HINT_INDEXBUFFER     = (1 << 3),
-   SVGA3D_SURFACE_HINT_VERTEXBUFFER    = (1 << 4),
-   SVGA3D_SURFACE_HINT_TEXTURE         = (1 << 5),
-   SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
-   SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
-   SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
-   SVGA3D_SURFACE_MASKABLE_ANTIALIAS   = (1 << 9),
-   SVGA3D_SURFACE_AUTOGENMIPMAPS       = (1 << 10),
-} SVGA3dSurfaceFlags;
-
-typedef
-struct {
-   uint32               numMipLevels;
-} SVGA3dSurfaceFace;
-
-typedef
-struct {
-   uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
-   SVGA3dSurfaceFormat         format;
-   /*
-    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
-    * structures must have the same value of numMipLevels field.
-    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
-    * numMipLevels set to 0.
-    */
-   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
-   /*
-    * Followed by an SVGA3dSize structure for each mip level in each face.
-    *
-    * A note on surface sizes: Sizes are always specified in pixels,
-    * even if the true surface size is not a multiple of the minimum
-    * block size of the surface's format. For example, a 3x3x1 DXT1
-    * compressed texture would actually be stored as a 4x4x1 image in
-    * memory.
-    */
-} SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
-
-typedef
-struct {
-   uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
-   SVGA3dSurfaceFormat         format;
-   /*
-    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
-    * structures must have the same value of numMipLevels field.
-    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
-    * numMipLevels set to 0.
-    */
-   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
-   uint32                      multisampleCount;
-   SVGA3dTextureFilter         autogenFilter;
-   /*
-    * Followed by an SVGA3dSize structure for each mip level in each face.
-    *
-    * A note on surface sizes: Sizes are always specified in pixels,
-    * even if the true surface size is not a multiple of the minimum
-    * block size of the surface's format. For example, a 3x3x1 DXT1
-    * compressed texture would actually be stored as a 4x4x1 image in
-    * memory.
-    */
-} SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
-
-typedef
-struct {
-   uint32               sid;
-} SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
-
-typedef
-struct {
-   uint32               cid;
-} SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
-
-typedef
-struct {
-   uint32               cid;
-} SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dClearFlag      clearFlag;
-   uint32               color;
-   float                depth;
-   uint32               stencil;
-   /* Followed by variable number of SVGA3dRect structures */
-} SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
-
-typedef
-struct SVGA3dCopyRect {
-   uint32               x;
-   uint32               y;
-   uint32               w;
-   uint32               h;
-   uint32               srcx;
-   uint32               srcy;
-} SVGA3dCopyRect;
-
-typedef
-struct SVGA3dCopyBox {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-   uint32               w;
-   uint32               h;
-   uint32               d;
-   uint32               srcx;
-   uint32               srcy;
-   uint32               srcz;
-} SVGA3dCopyBox;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               w;
-   uint32               h;
-} SVGA3dRect;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-   uint32               w;
-   uint32               h;
-   uint32               d;
-} SVGA3dBox;
-
-typedef
-struct {
-   uint32               x;
-   uint32               y;
-   uint32               z;
-} SVGA3dPoint;
-
-typedef
-struct {
-   SVGA3dLightType      type;
-   SVGA3dBool           inWorldSpace;
-   float                diffuse[4];
-   float                specular[4];
-   float                ambient[4];
-   float                position[4];
-   float                direction[4];
-   float                range;
-   float                falloff;
-   float                attenuation0;
-   float                attenuation1;
-   float                attenuation2;
-   float                theta;
-   float                phi;
-} SVGA3dLightData;
-
-typedef
-struct {
-   uint32               sid;
-   /* Followed by variable number of SVGA3dCopyRect structures */
-} SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
-
-typedef
-struct {
-   SVGA3dRenderStateName   state;
-   union {
-      uint32               uintValue;
-      float                floatValue;
-   };
-} SVGA3dRenderState;
-
-typedef
-struct {
-   uint32               cid;
-   /* Followed by variable number of SVGA3dRenderState structures */
-} SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
-
-typedef
-struct {
-   uint32                 cid;
-   SVGA3dRenderTargetType type;
-   SVGA3dSurfaceImageId   target;
-} SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId  src;
-   SVGA3dSurfaceImageId  dest;
-   /* Followed by variable number of SVGA3dCopyBox structures */
-} SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId  src;
-   SVGA3dSurfaceImageId  dest;
-   SVGA3dBox             boxSrc;
-   SVGA3dBox             boxDest;
-   SVGA3dStretchBltMode  mode;
-} SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
-
-typedef
-struct {
-   /*
-    * If the discard flag is present in a surface DMA operation, the host may
-    * discard the contents of the current mipmap level and face of the target
-    * surface before applying the surface DMA contents.
-    */
-   uint32 discard : 1;
-
-   /*
-    * If the unsynchronized flag is present, the host may perform this upload
-    * without syncing to pending reads on this surface.
-    */
-   uint32 unsynchronized : 1;
-
-   /*
-    * Guests *MUST* set the reserved bits to 0 before submitting the command
-    * suffix as future flags may occupy these bits.
-    */
-   uint32 reserved : 30;
-} SVGA3dSurfaceDMAFlags;
-
-typedef
-struct {
-   SVGA3dGuestImage      guest;
-   SVGA3dSurfaceImageId  host;
-   SVGA3dTransferType    transfer;
-   /*
-    * Followed by variable number of SVGA3dCopyBox structures. For consistency
-    * in all clipping logic and coordinate translation, we define the
-    * "source" in each copyBox as the guest image and the
-    * "destination" as the host image, regardless of transfer
-    * direction.
-    *
-    * For efficiency, the SVGA3D device is free to copy more data than
-    * specified. For example, it may round copy boxes outwards such
-    * that they lie on particular alignment boundaries.
-    */
-} SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
-
-/*
- * SVGA3dCmdSurfaceDMASuffix --
- *
- *    This is a command suffix that will appear after a SurfaceDMA command in
- *    the FIFO.  It contains some extra information that hosts may use to
- *    optimize performance or protect the guest.  This suffix exists to preserve
- *    backwards compatibility while also allowing for new functionality to be
- *    implemented.
- */
-
-typedef
-struct {
-   uint32 suffixSize;
-
-   /*
-    * The maximum offset is used to determine the maximum offset from the
-    * guestPtr base address that will be accessed or written to during this
-    * surfaceDMA.  If the suffix is supported, the host will respect this
-    * boundary while performing surface DMAs.
-    *
-    * Defaults to MAX_UINT32
-    */
-   uint32 maximumOffset;
-
-   /*
-    * A set of flags that describes optimizations that the host may perform
-    * while performing this surface DMA operation.  The guest should never rely
-    * on behaviour that is different when these flags are set for correctness.
-    *
-    * Defaults to 0
-    */
-   SVGA3dSurfaceDMAFlags flags;
-} SVGA3dCmdSurfaceDMASuffix;
-
-/*
- * SVGA_3D_CMD_DRAW_PRIMITIVES --
- *
- *   This command is the SVGA3D device's generic drawing entry point.
- *   It can draw multiple ranges of primitives, optionally using an
- *   index buffer, using an arbitrary collection of vertex buffers.
- *
- *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
- *   during this draw call. The declarations specify which surface
- *   the vertex data lives in, what that vertex data is used for,
- *   and how to interpret it.
- *
- *   Each SVGA3dPrimitiveRange defines a collection of primitives
- *   to render using the same vertex arrays. An index buffer is
- *   optional.
- */
-
-typedef
-struct {
-   /*
-    * A range hint is an optional specification for the range of indices
-    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
-    * that the entire array will be used.
-    *
-    * These are only hints. The SVGA3D device may use them for
-    * performance optimization if possible, but it's also allowed to
-    * ignore these values.
-    */
-   uint32               first;
-   uint32               last;
-} SVGA3dArrayRangeHint;
-
-typedef
-struct {
-   /*
-    * Define the origin and shape of a vertex or index array. Both
-    * 'offset' and 'stride' are in bytes. The provided surface will be
-    * reinterpreted as a flat array of bytes in the same format used
-    * by surface DMA operations. To avoid unnecessary conversions, the
-    * surface should be created with the SVGA3D_BUFFER format.
-    *
-    * Index 0 in the array starts 'offset' bytes into the surface.
-    * Index 1 begins at byte 'offset + stride', etc. Array indices may
-    * not be negative.
-    */
-   uint32               surfaceId;
-   uint32               offset;
-   uint32               stride;
-} SVGA3dArray;
-
-typedef
-struct {
-   /*
-    * Describe a vertex array's data type, and define how it is to be
-    * used by the fixed function pipeline or the vertex shader. It
-    * isn't useful to have two VertexDecls with the same
-    * VertexArrayIdentity in one draw call.
-    */
-   SVGA3dDeclType       type;
-   SVGA3dDeclMethod     method;
-   SVGA3dDeclUsage      usage;
-   uint32               usageIndex;
-} SVGA3dVertexArrayIdentity;
-
-typedef
-struct {
-   SVGA3dVertexArrayIdentity  identity;
-   SVGA3dArray                array;
-   SVGA3dArrayRangeHint       rangeHint;
-} SVGA3dVertexDecl;
-
-typedef
-struct {
-   /*
-    * Define a group of primitives to render, from sequential indices.
-    *
-    * The value of 'primitiveType' and 'primitiveCount' imply the
-    * total number of vertices that will be rendered.
-    */
-   SVGA3dPrimitiveType  primType;
-   uint32               primitiveCount;
-
-   /*
-    * Optional index buffer. If indexArray.surfaceId is
-    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
-    * without an index buffer is identical to rendering with an index
-    * buffer containing the sequence [0, 1, 2, 3, ...].
-    *
-    * If an index buffer is in use, indexWidth specifies the width in
-    * bytes of each index value. It must be less than or equal to
-    * indexArray.stride.
-    *
-    * (Currently, the SVGA3D device requires index buffers to be tightly
-    * packed. In other words, indexWidth == indexArray.stride)
-    */
-   SVGA3dArray          indexArray;
-   uint32               indexWidth;
-
-   /*
-    * Optional index bias. This number is added to all indices from
-    * indexArray before they are used as vertex array indices. This
-    * can be used in multiple ways:
-    *
-    *  - When not using an indexArray, this bias can be used to
-    *    specify where in the vertex arrays to begin rendering.
-    *
-    *  - A positive number here is equivalent to increasing the
-    *    offset in each vertex array.
-    *
-    *  - A negative number can be used to render using a small
-    *    vertex array and an index buffer that contains large
-    *    values. This may be used by some applications that
-    *    crop a vertex buffer without modifying their index
-    *    buffer.
-    *
-    * Note that rendering with a negative bias value may be slower and
-    * use more memory than rendering with a positive or zero bias.
-    */
-   int32                indexBias;
-} SVGA3dPrimitiveRange;
-
-typedef
-struct {
-   uint32               cid;
-   uint32               numVertexDecls;
-   uint32               numRanges;
-
-   /*
-    * There are two variable size arrays after the
-    * SVGA3dCmdDrawPrimitives structure. In order,
-    * they are:
-    *
-    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
-    *    SVGA3D_MAX_VERTEX_ARRAYS;
-    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
-    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
-    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
-    *    the frequency divisor for the corresponding vertex decl).
-    */
-} SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
-
-typedef
-struct {
-   uint32                   stage;
-   SVGA3dTextureStateName   name;
-   union {
-      uint32                value;
-      float                 floatValue;
-   };
-} SVGA3dTextureState;
-
-typedef
-struct {
-   uint32               cid;
-   /* Followed by variable number of SVGA3dTextureState structures */
-} SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
-
-typedef
-struct {
-   uint32                   cid;
-   SVGA3dTransformType      type;
-   float                    matrix[16];
-} SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
-
-typedef
-struct {
-   float                min;
-   float                max;
-} SVGA3dZRange;
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dZRange         zRange;
-} SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
-
-typedef
-struct {
-   float                diffuse[4];
-   float                ambient[4];
-   float                specular[4];
-   float                emissive[4];
-   float                shininess;
-} SVGA3dMaterial;
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dFace           face;
-   SVGA3dMaterial       material;
-} SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   SVGA3dLightData      data;
-} SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   uint32               enabled;
-} SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dRect           rect;
-} SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dRect           rect;
-} SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               index;
-   float                plane[4];
-} SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               shid;
-   SVGA3dShaderType     type;
-   /* Followed by variable number of DWORDs for shader bycode */
-} SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
-
-typedef
-struct {
-   uint32               cid;
-   uint32               shid;
-   SVGA3dShaderType     type;
-} SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
-
-typedef
-struct {
-   uint32                  cid;
-   uint32                  reg;     /* register number */
-   SVGA3dShaderType        type;
-   SVGA3dShaderConstType   ctype;
-   uint32                  values[4];
-} SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dShaderType     type;
-   uint32               shid;
-} SVGA3dCmdSetShader;             /* SVGA_3D_CMD_SET_SHADER */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-} SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAGuestPtr         guestResult;  /* Points to an SVGA3dQueryResult structure */
-} SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
-
-typedef
-struct {
-   uint32               cid;          /* Same parameters passed to END_QUERY */
-   SVGA3dQueryType      type;
-   SVGAGuestPtr         guestResult;
-} SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
-
-typedef
-struct {
-   uint32               totalSize;    /* Set by guest before query is ended. */
-   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
-   union {                            /* Set by host on exit from PENDING state */
-      uint32            result32;
-   };
-} SVGA3dQueryResult;
-
-/*
- * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
- *
- *    This is a blit from an SVGA3D surface to a Screen Object. Just
- *    like GMR-to-screen blits, this blit may be directed at a
- *    specific screen or to the virtual coordinate space.
- *
- *    The blit copies from a rectangular region of an SVGA3D surface
- *    image to a rectangular region of a screen or screens.
- *
- *    This command takes an optional variable-length list of clipping
- *    rectangles after the body of the command. If no rectangles are
- *    specified, there is no clipping region. The entire destRect is
- *    drawn to. If one or more rectangles are included, they describe
- *    a clipping region. The clip rectangle coordinates are measured
- *    relative to the top-left corner of destRect.
- *
- *    This clipping region serves multiple purposes:
- *
- *      - It can be used to perform an irregularly shaped blit more
- *        efficiently than by issuing many separate blit commands.
- *
- *      - It is equivalent to allowing blits with non-integer
- *        source coordinates. You could blit just one half-pixel
- *        of a source, for example, by specifying a larger
- *        destination rectangle than you need, then removing
- *        part of it using a clip rectangle.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
- *
- * Limitations:
- *
- *    - Currently, no backend supports blits from a mipmap or face
- *      other than the first one.
- */
-
-typedef
-struct {
-   SVGA3dSurfaceImageId srcImage;
-   SVGASignedRect       srcRect;
-   uint32               destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
-   SVGASignedRect       destRect;     /* Supports scaling if src/rest different size */
-   /* Clipping: zero or more SVGASignedRects follow */
-} SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
-
-typedef
-struct {
-   uint32               sid;
-   SVGA3dTextureFilter  filter;
-} SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
-
-
-/*
- * Guest-backed surface definitions.
- */
-
-typedef uint32 SVGAMobId;
-
-typedef enum SVGAMobFormat {
-   SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID,
-   SVGA3D_MOBFMT_PTDEPTH_0 = 0,
-   SVGA3D_MOBFMT_PTDEPTH_1 = 1,
-   SVGA3D_MOBFMT_PTDEPTH_2 = 2,
-   SVGA3D_MOBFMT_RANGE     = 3,
-   SVGA3D_MOBFMT_PTDEPTH64_0 = 4,
-   SVGA3D_MOBFMT_PTDEPTH64_1 = 5,
-   SVGA3D_MOBFMT_PTDEPTH64_2 = 6,
-   SVGA3D_MOBFMT_MAX,
-} SVGAMobFormat;
-
-/*
- * Sizes of opaque types.
- */
-
-#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8
-#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64
-#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16
-#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64
-#define SVGA3D_CONTEXT_DATA_SIZE 16384
-
-/*
- * SVGA3dCmdSetOTableBase --
- *
- * This command allows the guest to specify the base PPN of the
- * specified object table.
- */
-
-typedef enum {
-   SVGA_OTABLE_MOB           = 0,
-   SVGA_OTABLE_MIN           = 0,
-   SVGA_OTABLE_SURFACE       = 1,
-   SVGA_OTABLE_CONTEXT       = 2,
-   SVGA_OTABLE_SHADER        = 3,
-   SVGA_OTABLE_SCREEN_TARGET = 4,
-   SVGA_OTABLE_DX9_MAX       = 5,
-   SVGA_OTABLE_MAX           = 8
-} SVGAOTableType;
-
-typedef
-struct {
-   SVGAOTableType type;
-   PPN baseAddress;
-   uint32 sizeInBytes;
-   uint32 validSizeInBytes;
-   SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase;  /* SVGA_3D_CMD_SET_OTABLE_BASE */
-
-typedef
-struct {
-   SVGAOTableType type;
-   PPN64 baseAddress;
-   uint32 sizeInBytes;
-   uint32 validSizeInBytes;
-   SVGAMobFormat ptDepth;
-} __packed
-SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
-
-typedef
-struct {
-   SVGAOTableType type;
-} __packed
-SVGA3dCmdReadbackOTable;  /* SVGA_3D_CMD_READBACK_OTABLE */
-
-/*
- * Define a memory object (Mob) in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob;   /* SVGA_3D_CMD_DEFINE_GB_MOB */
-
-
-/*
- * Destroys an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBMob {
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdDestroyGBMob;   /* SVGA_3D_CMD_DESTROY_GB_MOB */
-
-/*
- * Redefine an object in the OTable.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob;   /* SVGA_3D_CMD_REDEFINE_GB_MOB */
-
-/*
- * Define a memory object (Mob) in the OTable with a PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdDefineGBMob64 {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN64 base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBMob64;   /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
-
-/*
- * Redefine an object in the OTable with PPN64 base.
- */
-
-typedef
-struct SVGA3dCmdRedefineGBMob64 {
-   SVGAMobId mobid;
-   SVGAMobFormat ptDepth;
-   PPN64 base;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdRedefineGBMob64;   /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
-
-/*
- * Notification that the page tables have been modified.
- */
-
-typedef
-struct SVGA3dCmdUpdateGBMobMapping {
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdUpdateGBMobMapping;   /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
-
-/*
- * Define a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDefineGBSurface {
-   uint32 sid;
-   SVGA3dSurfaceFlags surfaceFlags;
-   SVGA3dSurfaceFormat format;
-   uint32 numMipLevels;
-   uint32 multisampleCount;
-   SVGA3dTextureFilter autogenFilter;
-   SVGA3dSize size;
-} __packed
-SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
-
-/*
- * Destroy a guest-backed surface.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdDestroyGBSurface;   /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
-
-/*
- * Bind a guest-backed surface to an object.
- */
-
-typedef
-struct SVGA3dCmdBindGBSurface {
-   uint32 sid;
-   SVGAMobId mobid;
-} __packed
-SVGA3dCmdBindGBSurface;   /* SVGA_3D_CMD_BIND_GB_SURFACE */
-
-/*
- * Conditionally bind a mob to a guest backed surface if testMobid
- * matches the currently bound mob.  Optionally issue a readback on
- * the surface while it is still bound to the old mobid if the mobid
- * is changed by this command.
- */
-
-#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0)
-
-typedef
-struct{
-   uint32 sid;
-   SVGAMobId testMobid;
-   SVGAMobId mobid;
-   uint32 flags;
-} __packed
-SVGA3dCmdCondBindGBSurface;          /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
-
-/*
- * Update an image in a guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBImage {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBImage;   /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
-
-/*
- * Update an entire guest-backed surface.
- * (Inform the device that the guest-contents have been updated.)
- */
-
-typedef
-struct SVGA3dCmdUpdateGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdUpdateGBSurface;   /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
-
-/*
- * Readback an image in a guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImage {
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdReadbackGBImage;   /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
-
-/*
- * Readback an entire guest-backed surface.
- * (Request the device to flush the dirty contents into the guest.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdReadbackGBSurface;   /* SVGA_3D_CMD_READBACK_GB_SURFACE */
-
-/*
- * Readback a sub rect of an image in a guest-backed surface.  After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdReadbackGBImagePartial {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-   uint32 invertBox;
-} __packed
-SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
-
-/*
- * Invalidate an image in a guest-backed surface.
- * (Notify the device that the contents can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImage {
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdInvalidateGBImage;   /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
-
-/*
- * Invalidate an entire guest-backed surface.
- * (Notify the device that the contents if all images can be lost.)
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBSurface {
-   uint32 sid;
-} __packed
-SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
-
-/*
- * Invalidate a sub rect of an image in a guest-backed surface.  After
- * issuing this command the driver is required to issue an update call
- * of the same region before issuing any other commands that reference
- * this surface or rendering is not guaranteed.
- */
-
-typedef
-struct SVGA3dCmdInvalidateGBImagePartial {
-   SVGA3dSurfaceImageId image;
-   SVGA3dBox box;
-   uint32 invertBox;
-} __packed
-SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
-
-/*
- * Define a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDefineGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdDefineGBContext;   /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
-
-/*
- * Destroy a guest-backed context.
- */
-
-typedef
-struct SVGA3dCmdDestroyGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdDestroyGBContext;   /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
-
-/*
- * Bind a guest-backed context.
- *
- * validContents should be set to 0 for new contexts,
- * and 1 if this is an old context which is getting paged
- * back on to the device.
- *
- * For new contexts, it is recommended that the driver
- * issue commands to initialize all interesting state
- * prior to rendering.
- */
-
-typedef
-struct SVGA3dCmdBindGBContext {
-   uint32 cid;
-   SVGAMobId mobid;
-   uint32 validContents;
-} __packed
-SVGA3dCmdBindGBContext;   /* SVGA_3D_CMD_BIND_GB_CONTEXT */
-
-/*
- * Readback a guest-backed context.
- * (Request that the device flush the contents back into guest memory.)
- */
-
-typedef
-struct SVGA3dCmdReadbackGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdReadbackGBContext;   /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
-
-/*
- * Invalidate a guest-backed context.
- */
-typedef
-struct SVGA3dCmdInvalidateGBContext {
-   uint32 cid;
-} __packed
-SVGA3dCmdInvalidateGBContext;   /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
-
-/*
- * Define a guest-backed shader.
- */
-
-typedef
-struct SVGA3dCmdDefineGBShader {
-   uint32 shid;
-   SVGA3dShaderType type;
-   uint32 sizeInBytes;
-} __packed
-SVGA3dCmdDefineGBShader;   /* SVGA_3D_CMD_DEFINE_GB_SHADER */
-
-/*
- * Bind a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdBindGBShader {
-   uint32 shid;
-   SVGAMobId mobid;
-   uint32 offsetInBytes;
-} __packed
-SVGA3dCmdBindGBShader;   /* SVGA_3D_CMD_BIND_GB_SHADER */
-
-/*
- * Destroy a guest-backed shader.
- */
-
-typedef struct SVGA3dCmdDestroyGBShader {
-   uint32 shid;
-} __packed
-SVGA3dCmdDestroyGBShader;   /* SVGA_3D_CMD_DESTROY_GB_SHADER */
-
-typedef
-struct {
-   uint32                  cid;
-   uint32                  regStart;
-   SVGA3dShaderType        shaderType;
-   SVGA3dShaderConstType   constType;
-
-   /*
-    * Followed by a variable number of shader constants.
-    *
-    * Note that FLOAT and INT constants are 4-dwords in length, while
-    * BOOL constants are 1-dword in length.
-    */
-} __packed
-SVGA3dCmdSetGBShaderConstInline;
-/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-} __packed
-SVGA3dCmdBeginGBQuery;           /* SVGA_3D_CMD_BEGIN_GB_QUERY */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAMobId mobid;
-   uint32 offset;
-} __packed
-SVGA3dCmdEndGBQuery;                  /* SVGA_3D_CMD_END_GB_QUERY */
-
-
-/*
- * SVGA_3D_CMD_WAIT_FOR_GB_QUERY --
- *
- *    The semantics of this command are identical to the
- *    SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written
- *    to a Mob instead of a GMR.
- */
-
-typedef
-struct {
-   uint32               cid;
-   SVGA3dQueryType      type;
-   SVGAMobId mobid;
-   uint32 offset;
-} __packed
-SVGA3dCmdWaitForGBQuery;          /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
-
-typedef
-struct {
-   SVGAMobId mobid;
-   uint32 fbOffset;
-   uint32 initalized;
-} __packed
-SVGA3dCmdEnableGart;              /* SVGA_3D_CMD_ENABLE_GART */
-
-typedef
-struct {
-   SVGAMobId mobid;
-   uint32 gartOffset;
-} __packed
-SVGA3dCmdMapMobIntoGart;          /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
-
-
-typedef
-struct {
-   uint32 gartOffset;
-   uint32 numPages;
-} __packed
-SVGA3dCmdUnmapGartRange;          /* SVGA_3D_CMD_UNMAP_GART_RANGE */
-
-
-/*
- * Screen Targets
- */
-#define SVGA_STFLAG_PRIMARY (1 << 0)
-
-typedef
-struct {
-   uint32 stid;
-   uint32 width;
-   uint32 height;
-   int32 xRoot;
-   int32 yRoot;
-   uint32 flags;
-} __packed
-SVGA3dCmdDefineGBScreenTarget;    /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-} __packed
-SVGA3dCmdDestroyGBScreenTarget;  /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-   SVGA3dSurfaceImageId image;
-} __packed
-SVGA3dCmdBindGBScreenTarget;  /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
-
-typedef
-struct {
-   uint32 stid;
-   SVGA3dBox box;
-} __packed
-SVGA3dCmdUpdateGBScreenTarget;  /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
-
-/*
- * Capability query index.
- *
- * Notes:
- *
- *   1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
- *      fixed-function texture units available. Each of these units
- *      work in both FFP and Shader modes, and they support texture
- *      transforms and texture coordinates. The host may have additional
- *      texture image units that are only usable with shaders.
- *
- *   2. The BUFFER_FORMAT capabilities are deprecated, and they always
- *      return TRUE. Even on physical hardware that does not support
- *      these formats natively, the SVGA3D device will provide an emulation
- *      which should be invisible to the guest OS.
- *
- *      In general, the SVGA3D device should support any operation on
- *      any surface format, it just may perform some of these
- *      operations in software depending on the capabilities of the
- *      available physical hardware.
- *
- *      XXX: In the future, we will add capabilities that describe in
- *      detail what formats are supported in hardware for what kinds
- *      of operations.
- */
-
-typedef enum {
-   SVGA3D_DEVCAP_3D                                = 0,
-   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
-   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,  /* See note (1) */
-   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
-   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
-   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
-   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
-   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
-   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
-   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
-   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
-   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12, /* See note (2) */
-   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13, /* See note (2) */
-   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14, /* See note (2) */
-   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
-   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
-   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
-   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
-   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
-   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
-   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
-   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
-   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
-   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
-   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
-   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
-   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
-   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
-   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
-   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
-   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
-   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
-   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
-   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
-   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
-   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
-   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
-   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
-   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
-   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
-   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
-   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
-   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
-   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
-   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
-
-   /*
-    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
-    * render targets.  This does no include the depth or stencil targets.
-    */
-   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
-
-   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
-   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
-   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
-   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
-   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
-   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
-   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
-   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
-   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
-   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
-   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
-   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
-
-   /*
-    * This is the maximum number of SVGA context IDs that the guest
-    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
-    */
-   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
-
-   /*
-    * This is the maximum number of SVGA surface IDs that the guest
-    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
-    */
-   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
-
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
-   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
-
-   SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM              = 82,
-   SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
-
-   /*
-    * Deprecated.
-    */
-   SVGA3D_DEVCAP_VGPU10                            = 84,
-
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements
-    * ored together, one for every type of video decoding supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_DECODE                      = 85,
-
-   /*
-    * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements
-    * ored together, one for every type of video processing supported.
-    */
-   SVGA3D_DEVCAP_VIDEO_PROCESS                     = 86,
-
-   SVGA3D_DEVCAP_LINE_AA                           = 87,  /* boolean */
-   SVGA3D_DEVCAP_LINE_STIPPLE                      = 88,  /* boolean */
-   SVGA3D_DEVCAP_MAX_LINE_WIDTH                    = 89,  /* float */
-   SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH                 = 90,  /* float */
-
-   SVGA3D_DEVCAP_SURFACEFMT_YV12                   = 91,
-
-   /*
-    * Does the host support the SVGA logic ops commands?
-    */
-   SVGA3D_DEVCAP_LOGICOPS                          = 92,
-
-   /*
-    * What support does the host have for screen targets?
-    *
-    * See the SVGA3D_SCREENTARGET_CAP bits below.
-    */
-   SVGA3D_DEVCAP_SCREENTARGETS                     = 93,
-
-   SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
-} SVGA3dDevCapIndex;
-
-typedef union {
-   Bool   b;
-   uint32 u;
-   int32  i;
-   float  f;
-} SVGA3dDevCapResult;
-
-typedef enum {
-   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
-   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
-   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
-   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
-} SVGA3dCapsRecordType;
-
-typedef
-struct SVGA3dCapsRecordHeader {
-   uint32 length;
-   SVGA3dCapsRecordType type;
-}
-SVGA3dCapsRecordHeader;
-
-typedef
-struct SVGA3dCapsRecord {
-   SVGA3dCapsRecordHeader header;
-   uint32 data[1];
-}
-SVGA3dCapsRecord;
-
-
-typedef uint32 SVGA3dCapPair[2];
-
-#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
deleted file mode 100644
index ef33850..0000000
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ /dev/null
@@ -1,912 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
-
-#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
-#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
-#define min_t(type, x, y)  ((x) < (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
-#define u64 uint64_t
-#define U32_MAX ((u32)~0U)
-
-#endif /* __KERNEL__ */
-
-#include "svga3d_reg.h"
-
-/*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- *    1. Red, bump W, luminance and depth are stored in the first channel.
- *    2. Green, bump V and stencil are stored in the second channel.
- *    3. Blue and bump U are stored in the third channel.
- *    4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- *    1. For compressed formats, only the data channel is used and its size
- *       is equal to that of a singular block in the compression scheme.
- *    2. For buffer formats, only the data channel is used and its size is
- *       exactly one byte in length.
- *    3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
- */
-
-enum svga3d_block_desc {
-	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
-	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
-						    data */
-	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
-						    data */
-	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
-						    U and V */
-	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
-						    data */
-	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
-						    data */
-	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
-						    channel */
-	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
-						    data */
-	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
-						    data */
-	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
-						    data */
-	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
-						    data */
-	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
-	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
-						    channel */
-	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
-						    data */
-	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
-						    data */
-	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
-						    data depending on the
-						    compression method used */
-	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
-						    floating point
-						    representation in
-						    all channels */
-	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
-						    data. */
-	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
-	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
-	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
-	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
-	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
-						    e.g., NV12. */
-	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
-						    Y, U, V, e.g., YV12. */
-
-	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
-	SVGA3DBLOCKDESC_GREEN,
-	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
-	SVGA3DBLOCKDESC_BLUE,
-	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
-	SVGA3DBLOCKDESC_V,
-	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
-	SVGA3DBLOCKDESC_LUMINANCE,
-	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
-	SVGA3DBLOCKDESC_W,
-	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
-	SVGA3DBLOCKDESC_V |
-	SVGA3DBLOCKDESC_W |
-	SVGA3DBLOCKDESC_Q,
-	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
-	SVGA3DBLOCKDESC_IEEE_FP,
-	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
-	SVGA3DBLOCKDESC_GREEN,
-	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
-	SVGA3DBLOCKDESC_BLUE,
-	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
-	SVGA3DBLOCKDESC_ALPHA,
-	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
-	SVGA3DBLOCKDESC_STENCIL,
-	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
-	SVGA3DBLOCKDESC_Y,
-	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
-	SVGA3DBLOCKDESC_Y |
-	SVGA3DBLOCKDESC_U_VIDEO |
-	SVGA3DBLOCKDESC_V_VIDEO,
-	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
-	SVGA3DBLOCKDESC_EXP,
-	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
-	SVGA3DBLOCKDESC_SRGB,
-	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-	SVGA3DBLOCKDESC_2PLANAR_YUV,
-	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-	SVGA3DBLOCKDESC_3PLANAR_YUV,
-};
-
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- *    1. Block description.
- *    2. Dimensions of a block in the surface.
- *    3. Size of block in bytes.
- *    4. Bit depth of the pixel data.
- *    5. Channel bit depths and masks (if applicable).
- */
-#define SVGA3D_CHANNEL_DEF(type)		\
-	struct {				\
-		union {				\
-			type blue;              \
-			type u;                 \
-			type uv_video;          \
-			type u_video;           \
-		};				\
-		union {				\
-			type green;             \
-			type v;                 \
-			type stencil;           \
-			type v_video;           \
-		};				\
-		union {				\
-			type red;               \
-			type w;                 \
-			type luminance;         \
-			type y;                 \
-			type depth;             \
-			type data;              \
-		};				\
-		union {				\
-			type alpha;             \
-			type q;                 \
-			type exp;               \
-		};				\
-	}
-
-struct svga3d_surface_desc {
-	enum svga3d_block_desc block_desc;
-	surf_size_struct block_size;
-	u32 bytes_per_block;
-	u32 pitch_bytes_per_block;
-
-	struct {
-		u32 total;
-		SVGA3D_CHANNEL_DEF(uint8);
-	} bit_depth;
-
-	struct {
-		SVGA3D_CHANNEL_DEF(uint8);
-	} bit_offset;
-};
-
-static const struct svga3d_surface_desc svga3d_surface_descs[] = {
-	{SVGA3DBLOCKDESC_NONE,
-	 {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
-	 {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
-	 {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
-	 {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
-	 {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
-	 {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
-
-	{SVGA3DBLOCKDESC_LUMINANCE,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
-
-	{SVGA3DBLOCKDESC_LA,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
-	 {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
-
-	{SVGA3DBLOCKDESC_LUMINANCE,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
-
-	{SVGA3DBLOCKDESC_LA,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
-	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
-	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
-	 {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
-
-	{SVGA3DBLOCKDESC_RGBA_FP,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
-
-	{SVGA3DBLOCKDESC_RGBA_FP,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
-
-	{SVGA3DBLOCKDESC_UVL,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
-
-	{SVGA3DBLOCKDESC_UVWA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
-
-	{SVGA3DBLOCKDESC_ALPHA,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
-
-	{SVGA3DBLOCKDESC_BUFFER,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
-	 {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
-
-	{SVGA3DBLOCKDESC_YUV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
-
-	{SVGA3DBLOCKDESC_YUV,
-	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
-	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
-
-	{SVGA3DBLOCKDESC_NV12,
-	 {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
-
-	{SVGA3DBLOCKDESC_AYUV,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
-	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGB_FP,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
-
-	{SVGA3DBLOCKDESC_UVW,
-	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
-	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
-
-	{SVGA3DBLOCKDESC_UVWQ,
-	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
-	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
-
-	{SVGA3DBLOCKDESC_R_FP,
-	 {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
-
-	{SVGA3DBLOCKDESC_GREEN,
-	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
-	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
-
-	{SVGA3DBLOCKDESC_RGB_FP,
-	 {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
-	 {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBA_SRGB,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG_FP,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
-	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_GREEN,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
-
-	{SVGA3DBLOCKDESC_UV,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
-
-	{SVGA3DBLOCKDESC_U,
-	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
-
-	{SVGA3DBLOCKDESC_RED,
-	 {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBE,
-	 {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
-	 {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
-
-	{SVGA3DBLOCKDESC_RG,
-	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
-
-	{SVGA3DBLOCKDESC_COMPRESSED,
-	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
-	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
-
-	{SVGA3DBLOCKDESC_RGBA,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGBA_SRGB,
-	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_RGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
-
-	{SVGA3DBLOCKDESC_RGB_SRGB,
-	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
-	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
-
-	{SVGA3DBLOCKDESC_DEPTH,
-	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
-	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
-
-	{SVGA3DBLOCKDESC_DS,
-	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
-	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
-};
-
-static inline u32 clamped_umul32(u32 a, u32 b)
-{
-	u64 tmp = (u64) a*b;
-	return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
-}
-
-static inline const struct svga3d_surface_desc *
-svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
-{
-	if (format < ARRAY_SIZE(svga3d_surface_descs))
-		return &svga3d_surface_descs[format];
-
-	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
-}
-
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- *      Given a base level size and the mip level, compute the size of
- *      the mip level.
- *
- * Results:
- *      See above.
- *
- * Side effects:
- *      None.
- *
- *----------------------------------------------------------------------
- */
-
-static inline surf_size_struct
-svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
-{
-	surf_size_struct size;
-
-	size.width = max_t(u32, base_level.width >> mip_level, 1);
-	size.height = max_t(u32, base_level.height >> mip_level, 1);
-	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
-	return size;
-}
-
-static inline void
-svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
-				 const surf_size_struct *pixel_size,
-				 surf_size_struct *block_size)
-{
-	block_size->width = DIV_ROUND_UP(pixel_size->width,
-					 desc->block_size.width);
-	block_size->height = DIV_ROUND_UP(pixel_size->height,
-					  desc->block_size.height);
-	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
-					 desc->block_size.depth);
-}
-
-static inline bool
-svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
-{
-	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
-}
-
-static inline u32
-svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
-			      const surf_size_struct *size)
-{
-	u32 pitch;
-	surf_size_struct blocks;
-
-	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
-
-	pitch = blocks.width * desc->pitch_bytes_per_block;
-
-	return pitch;
-}
-
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- *      Return the number of bytes of buffer space required to store
- *      one image of a surface, optionally using the specified pitch.
- *
- *      If pitch is zero, it is assumed that rows are tightly packed.
- *
- *      This function is overflow-safe. If the result would have
- *      overflowed, instead we return MAX_UINT32.
- *
- * Results:
- *      Byte count.
- *
- * Side effects:
- *      None.
- *
- *-----------------------------------------------------------------------------
- */
-
-static inline u32
-svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
-				    const surf_size_struct *size,
-				    u32 pitch)
-{
-	surf_size_struct image_blocks;
-	u32 slice_size, total_size;
-
-	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
-
-	if (svga3dsurface_is_planar_surface(desc)) {
-		total_size = clamped_umul32(image_blocks.width,
-					    image_blocks.height);
-		total_size = clamped_umul32(total_size, image_blocks.depth);
-		total_size = clamped_umul32(total_size, desc->bytes_per_block);
-		return total_size;
-	}
-
-	if (pitch == 0)
-		pitch = svga3dsurface_calculate_pitch(desc, size);
-
-	slice_size = clamped_umul32(image_blocks.height, pitch);
-	total_size = clamped_umul32(slice_size, image_blocks.depth);
-
-	return total_size;
-}
-
-static inline u32
-svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
-				  surf_size_struct base_level_size,
-				  u32 num_mip_levels,
-				  bool cubemap)
-{
-	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	u64 total_size = 0;
-	u32 mip;
-
-	for (mip = 0; mip < num_mip_levels; mip++) {
-		surf_size_struct size =
-			svga3dsurface_get_mip_size(base_level_size, mip);
-		total_size += svga3dsurface_get_image_buffer_size(desc,
-								  &size, 0);
-	}
-
-	if (cubemap)
-		total_size *= SVGA3D_MAX_SURFACE_FACES;
-
-	return (u32) min_t(u64, total_size, (u64) U32_MAX);
-}
-
-
-/**
- * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
- * in an image (or volume).
- *
- * @width: The image width in pixels.
- * @height: The image height in pixels
- */
-static inline u32
-svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
-			       u32 width, u32 height,
-			       u32 x, u32 y, u32 z)
-{
-	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
-	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
-	const u32 bd = desc->block_size.depth;
-	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
-	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
-	const u32 offset = (z / bd * imgstride +
-			    y / bh * rowstride +
-			    x / bw * desc->bytes_per_block);
-	return offset;
-}
-
-
-static inline u32
-svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
-			       surf_size_struct baseLevelSize,
-			       u32 numMipLevels,
-			       u32 face,
-			       u32 mip)
-
-{
-	u32 offset;
-	u32 mipChainBytes;
-	u32 mipChainBytesToLevel;
-	u32 i;
-	const struct svga3d_surface_desc *desc;
-	surf_size_struct mipSize;
-	u32 bytes;
-
-	desc = svga3dsurface_get_desc(format);
-
-	mipChainBytes = 0;
-	mipChainBytesToLevel = 0;
-	for (i = 0; i < numMipLevels; i++) {
-		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
-		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
-		mipChainBytes += bytes;
-		if (i < mip)
-			mipChainBytesToLevel += bytes;
-	}
-
-	offset = mipChainBytes * face + mipChainBytesToLevel;
-
-	return offset;
-}
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
deleted file mode 100644
index 8e8d968..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_escape.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/**********************************************************
- * Copyright 2007-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga_escape.h --
- *
- *    Definitions for our own (vendor-specific) SVGA Escape commands.
- */
-
-#ifndef _SVGA_ESCAPE_H_
-#define _SVGA_ESCAPE_H_
-
-
-/*
- * Namespace IDs for the escape command
- */
-
-#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
-#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
-
-
-/*
- * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
- * the first DWORD of escape data (after the nsID and size). As a
- * guideline we're using the high word and low word as a major and
- * minor command number, respectively.
- *
- * Major command number allocation:
- *
- *   0000: Reserved
- *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
- *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
- *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
- */
-
-#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
-
-
-/*
- * SVGA Hint commands.
- *
- * These escapes let the SVGA driver provide optional information to
- * he host about the state of the guest or guest applications. The
- * host can use these hints to make user interface or performance
- * decisions.
- *
- * Notes:
- *
- *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
- *     that use the SVGA Screen Object extension. Instead of sending
- *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
- *     Screen Object.
- */
-
-#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
-#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
-
-typedef
-struct {
-   uint32 command;
-   uint32 fullscreen;
-   struct {
-      int32 x, y;
-   } monitorPosition;
-} SVGAEscapeHintFullscreen;
-
-#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
deleted file mode 100644
index f38416f..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_overlay.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/**********************************************************
- * Copyright 2007-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga_overlay.h --
- *
- *    Definitions for video-overlay support.
- */
-
-#ifndef _SVGA_OVERLAY_H_
-#define _SVGA_OVERLAY_H_
-
-#include "svga_reg.h"
-
-/*
- * Video formats we support
- */
-
-#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
-#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
-#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
-
-typedef enum {
-   SVGA_OVERLAY_FORMAT_INVALID = 0,
-   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
-   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
-   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
-} SVGAOverlayFormat;
-
-#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
-
-#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
-
-#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
-        /* FIFO escape layout:
-         * Type, Stream Id, (Register Id, Value) pairs */
-
-#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
-        /* FIFO escape layout:
-         * Type, Stream Id */
-
-typedef
-struct SVGAEscapeVideoSetRegs {
-   struct {
-      uint32 cmdType;
-      uint32 streamId;
-   } header;
-
-   /* May include zero or more items. */
-   struct {
-      uint32 registerId;
-      uint32 value;
-   } items[1];
-} SVGAEscapeVideoSetRegs;
-
-typedef
-struct SVGAEscapeVideoFlush {
-   uint32 cmdType;
-   uint32 streamId;
-} SVGAEscapeVideoFlush;
-
-
-/*
- * Struct definitions for the video overlay commands built on
- * SVGAFifoCmdEscape.
- */
-typedef
-struct {
-   uint32 command;
-   uint32 overlay;
-} SVGAFifoEscapeCmdVideoBase;
-
-typedef
-struct {
-   SVGAFifoEscapeCmdVideoBase videoCmd;
-} SVGAFifoEscapeCmdVideoFlush;
-
-typedef
-struct {
-   SVGAFifoEscapeCmdVideoBase videoCmd;
-   struct {
-      uint32 regId;
-      uint32 value;
-   } items[1];
-} SVGAFifoEscapeCmdVideoSetRegs;
-
-typedef
-struct {
-   SVGAFifoEscapeCmdVideoBase videoCmd;
-   struct {
-      uint32 regId;
-      uint32 value;
-   } items[SVGA_VIDEO_NUM_REGS];
-} SVGAFifoEscapeCmdVideoSetAllRegs;
-
-
-/*
- *----------------------------------------------------------------------
- *
- * VMwareVideoGetAttributes --
- *
- *      Computes the size, pitches and offsets for YUV frames.
- *
- * Results:
- *      TRUE on success; otherwise FALSE on failure.
- *
- * Side effects:
- *      Pitches and offsets for the given YUV frame are put in 'pitches'
- *      and 'offsets' respectively. They are both optional though.
- *
- *----------------------------------------------------------------------
- */
-
-static inline bool
-VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
-                         uint32 *width,                     /* IN / OUT */
-                         uint32 *height,                    /* IN / OUT */
-                         uint32 *size,                      /* OUT */
-                         uint32 *pitches,                   /* OUT (optional) */
-                         uint32 *offsets)                   /* OUT (optional) */
-{
-    int tmp;
-
-    *width = (*width + 1) & ~1;
-
-    if (offsets) {
-        offsets[0] = 0;
-    }
-
-    switch (format) {
-    case VMWARE_FOURCC_YV12:
-       *height = (*height + 1) & ~1;
-       *size = (*width + 3) & ~3;
-
-       if (pitches) {
-          pitches[0] = *size;
-       }
-
-       *size *= *height;
-
-       if (offsets) {
-          offsets[1] = *size;
-       }
-
-       tmp = ((*width >> 1) + 3) & ~3;
-
-       if (pitches) {
-          pitches[1] = pitches[2] = tmp;
-       }
-
-       tmp *= (*height >> 1);
-       *size += tmp;
-
-       if (offsets) {
-          offsets[2] = *size;
-       }
-
-       *size += tmp;
-       break;
-
-    case VMWARE_FOURCC_YUY2:
-    case VMWARE_FOURCC_UYVY:
-       *size = *width * 2;
-
-       if (pitches) {
-          pitches[0] = *size;
-       }
-
-       *size *= *height;
-       break;
-
-    default:
-       return false;
-    }
-
-    return true;
-}
-
-#endif /* _SVGA_OVERLAY_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
deleted file mode 100644
index e4259c2..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ /dev/null
@@ -1,1564 +0,0 @@
-/**********************************************************
- * Copyright 1998-2009 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
-
-/*
- * svga_reg.h --
- *
- *    Virtual hardware definitions for the VMware SVGA II device.
- */
-
-#ifndef _SVGA_REG_H_
-#define _SVGA_REG_H_
-
-/*
- * PCI device IDs.
- */
-#define PCI_DEVICE_ID_VMWARE_SVGA2      0x0405
-
-/*
- * SVGA_REG_ENABLE bit definitions.
- */
-#define SVGA_REG_ENABLE_DISABLE     0
-#define SVGA_REG_ENABLE_ENABLE      1
-#define SVGA_REG_ENABLE_HIDE        2
-#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
-				     SVGA_REG_ENABLE_HIDE)
-
-/*
- * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
- * cursor bypass mode. This is still supported, but no new guest
- * drivers should use it.
- */
-#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
-#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
-
-/*
- * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
- * The changeMap in the monitor is proportional to this number. Therefore, we'd
- * like to keep it as small as possible to reduce monitor overhead (using
- * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
- * 4k!).
- *
- * NB: For compatibility reasons, this value must be greater than 0xff0000.
- *     See bug 335072.
- */
-#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
-
-#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
-#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
-#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
-
-#define SVGA_MAGIC         0x900000UL
-#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
-
-/* Version 2 let the address of the frame buffer be unsigned on Win32 */
-#define SVGA_VERSION_2     2
-#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
-
-/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
-   PALETTE_BASE has moved */
-#define SVGA_VERSION_1     1
-#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
-
-/* Version 0 is the initial version */
-#define SVGA_VERSION_0     0
-#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
-
-/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
-#define SVGA_ID_INVALID    0xFFFFFFFF
-
-/* Port offsets, relative to BAR0 */
-#define SVGA_INDEX_PORT         0x0
-#define SVGA_VALUE_PORT         0x1
-#define SVGA_BIOS_PORT          0x2
-#define SVGA_IRQSTATUS_PORT     0x8
-
-/*
- * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
- *
- * Interrupts are only supported when the
- * SVGA_CAP_IRQMASK capability is present.
- */
-#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
-#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
-#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
-
-/*
- * Registers
- */
-
-enum {
-   SVGA_REG_ID = 0,
-   SVGA_REG_ENABLE = 1,
-   SVGA_REG_WIDTH = 2,
-   SVGA_REG_HEIGHT = 3,
-   SVGA_REG_MAX_WIDTH = 4,
-   SVGA_REG_MAX_HEIGHT = 5,
-   SVGA_REG_DEPTH = 6,
-   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
-   SVGA_REG_PSEUDOCOLOR = 8,
-   SVGA_REG_RED_MASK = 9,
-   SVGA_REG_GREEN_MASK = 10,
-   SVGA_REG_BLUE_MASK = 11,
-   SVGA_REG_BYTES_PER_LINE = 12,
-   SVGA_REG_FB_START = 13,            /* (Deprecated) */
-   SVGA_REG_FB_OFFSET = 14,
-   SVGA_REG_VRAM_SIZE = 15,
-   SVGA_REG_FB_SIZE = 16,
-
-   /* ID 0 implementation only had the above registers, then the palette */
-
-   SVGA_REG_CAPABILITIES = 17,
-   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
-   SVGA_REG_MEM_SIZE = 19,
-   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
-   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
-   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
-   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
-   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
-   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
-   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
-   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
-   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
-   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
-   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
-   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
-   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
-   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
-
-   /* Legacy multi-monitor support */
-   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
-   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
-   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
-   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
-   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
-   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
-   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
-
-   /* See "Guest memory regions" below. */
-   SVGA_REG_GMR_ID = 41,
-   SVGA_REG_GMR_DESCRIPTOR = 42,
-   SVGA_REG_GMR_MAX_IDS = 43,
-   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
-
-   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
-   SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
-   SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
-   SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
-   SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
-   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
-   SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
-   SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
-   SVGA_REG_CMD_PREPEND_LOW = 53,
-   SVGA_REG_CMD_PREPEND_HIGH = 54,
-   SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
-   SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
-   SVGA_REG_MOB_MAX_SIZE = 57,
-   SVGA_REG_TOP = 58,               /* Must be 1 more than the last register */
-
-   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
-   /* Next 768 (== 256*3) registers exist for colormap */
-
-   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
-                                    /* Base of scratch registers */
-   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
-      First 4 are reserved for VESA BIOS Extension; any remaining are for
-      the use of the current SVGA driver. */
-};
-
-
-/*
- * Guest memory regions (GMRs):
- *
- * This is a new memory mapping feature available in SVGA devices
- * which have the SVGA_CAP_GMR bit set. Previously, there were two
- * fixed memory regions available with which to share data between the
- * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
- * are our name for an extensible way of providing arbitrary DMA
- * buffers for use between the driver and the SVGA device. They are a
- * new alternative to framebuffer memory, usable for both 2D and 3D
- * graphics operations.
- *
- * Since GMR mapping must be done synchronously with guest CPU
- * execution, we use a new pair of SVGA registers:
- *
- *   SVGA_REG_GMR_ID --
- *
- *     Read/write.
- *     This register holds the 32-bit ID (a small positive integer)
- *     of a GMR to create, delete, or redefine. Writing this register
- *     has no side-effects.
- *
- *   SVGA_REG_GMR_DESCRIPTOR --
- *
- *     Write-only.
- *     Writing this register will create, delete, or redefine the GMR
- *     specified by the above ID register. If this register is zero,
- *     the GMR is deleted. Any pointers into this GMR (including those
- *     currently being processed by FIFO commands) will be
- *     synchronously invalidated.
- *
- *     If this register is nonzero, it must be the physical page
- *     number (PPN) of a data structure which describes the physical
- *     layout of the memory region this GMR should describe. The
- *     descriptor structure will be read synchronously by the SVGA
- *     device when this register is written. The descriptor need not
- *     remain allocated for the lifetime of the GMR.
- *
- *     The guest driver should write SVGA_REG_GMR_ID first, then
- *     SVGA_REG_GMR_DESCRIPTOR.
- *
- *   SVGA_REG_GMR_MAX_IDS --
- *
- *     Read-only.
- *     The SVGA device may choose to support a maximum number of
- *     user-defined GMR IDs. This register holds the number of supported
- *     IDs. (The maximum supported ID plus 1)
- *
- *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
- *
- *     Read-only.
- *     The SVGA device may choose to put a limit on the total number
- *     of SVGAGuestMemDescriptor structures it will read when defining
- *     a single GMR.
- *
- * The descriptor structure is an array of SVGAGuestMemDescriptor
- * structures. Each structure may do one of three things:
- *
- *   - Terminate the GMR descriptor list.
- *     (ppn==0, numPages==0)
- *
- *   - Add a PPN or range of PPNs to the GMR's virtual address space.
- *     (ppn != 0, numPages != 0)
- *
- *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
- *     support multi-page GMR descriptor tables without forcing the
- *     driver to allocate physically contiguous memory.
- *     (ppn != 0, numPages == 0)
- *
- * Note that each physical page of SVGAGuestMemDescriptor structures
- * can describe at least 2MB of guest memory. If the driver needs to
- * use more than one page of descriptor structures, it must use one of
- * its SVGAGuestMemDescriptors to point to an additional page.  The
- * device will never automatically cross a page boundary.
- *
- * Once the driver has described a GMR, it is immediately available
- * for use via any FIFO command that uses an SVGAGuestPtr structure.
- * These pointers include a GMR identifier plus an offset into that
- * GMR.
- *
- * The driver must check the SVGA_CAP_GMR bit before using the GMR
- * registers.
- */
-
-/*
- * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
- * memory as well.  In the future, these IDs could even be used to
- * allow legacy memory regions to be redefined by the guest as GMRs.
- *
- * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
- * is being phased out. Please try to use user-defined GMRs whenever
- * possible.
- */
-#define SVGA_GMR_NULL         ((uint32) -1)
-#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
-
-typedef
-struct SVGAGuestMemDescriptor {
-   uint32 ppn;
-   uint32 numPages;
-} SVGAGuestMemDescriptor;
-
-typedef
-struct SVGAGuestPtr {
-   uint32 gmrId;
-   uint32 offset;
-} SVGAGuestPtr;
-
-
-/*
- * SVGAGMRImageFormat --
- *
- *    This is a packed representation of the source 2D image format
- *    for a GMR-to-screen blit. Currently it is defined as an encoding
- *    of the screen's color depth and bits-per-pixel, however, 16 bits
- *    are reserved for future use to identify other encodings (such as
- *    RGBA or higher-precision images).
- *
- *    Currently supported formats:
- *
- *       bpp depth  Format Name
- *       --- -----  -----------
- *        32    24  32-bit BGRX
- *        24    24  24-bit BGR
- *        16    16  RGB 5-6-5
- *        16    15  RGB 5-5-5
- *
- */
-
-typedef
-struct SVGAGMRImageFormat {
-   union {
-      struct {
-         uint32 bitsPerPixel : 8;
-         uint32 colorDepth   : 8;
-         uint32 reserved     : 16;  /* Must be zero */
-      };
-
-      uint32 value;
-   };
-} SVGAGMRImageFormat;
-
-typedef
-struct SVGAGuestImage {
-   SVGAGuestPtr         ptr;
-
-   /*
-    * A note on interpretation of pitch: This value of pitch is the
-    * number of bytes between vertically adjacent image
-    * blocks. Normally this is the number of bytes between the first
-    * pixel of two adjacent scanlines. With compressed textures,
-    * however, this may represent the number of bytes between
-    * compression blocks rather than between rows of pixels.
-    *
-    * XXX: Compressed textures currently must be tightly packed in guest memory.
-    *
-    * If the image is 1-dimensional, pitch is ignored.
-    *
-    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
-    * assuming each row of blocks is tightly packed.
-    */
-   uint32 pitch;
-} SVGAGuestImage;
-
-/*
- * SVGAColorBGRX --
- *
- *    A 24-bit color format (BGRX), which does not depend on the
- *    format of the legacy guest framebuffer (GFB) or the current
- *    GMRFB state.
- */
-
-typedef
-struct SVGAColorBGRX {
-   union {
-      struct {
-         uint32 b : 8;
-         uint32 g : 8;
-         uint32 r : 8;
-         uint32 x : 8;  /* Unused */
-      };
-
-      uint32 value;
-   };
-} SVGAColorBGRX;
-
-
-/*
- * SVGASignedRect --
- * SVGASignedPoint --
- *
- *    Signed rectangle and point primitives. These are used by the new
- *    2D primitives for drawing to Screen Objects, which can occupy a
- *    signed virtual coordinate space.
- *
- *    SVGASignedRect specifies a half-open interval: the (left, top)
- *    pixel is part of the rectangle, but the (right, bottom) pixel is
- *    not.
- */
-
-typedef
-struct SVGASignedRect {
-   int32  left;
-   int32  top;
-   int32  right;
-   int32  bottom;
-} SVGASignedRect;
-
-typedef
-struct SVGASignedPoint {
-   int32  x;
-   int32  y;
-} SVGASignedPoint;
-
-
-/*
- *  Capabilities
- *
- *  Note the holes in the bitfield. Missing bits have been deprecated,
- *  and must not be reused. Those capabilities will never be reported
- *  by new versions of the SVGA device.
- *
- * SVGA_CAP_GMR2 --
- *    Provides asynchronous commands to define and remap guest memory
- *    regions.  Adds device registers SVGA_REG_GMRS_MAX_PAGES and
- *    SVGA_REG_MEMORY_SIZE.
- *
- * SVGA_CAP_SCREEN_OBJECT_2 --
- *    Allow screen object support, and require backing stores from the
- *    guest for each screen object.
- */
-
-#define SVGA_CAP_NONE               0x00000000
-#define SVGA_CAP_RECT_COPY          0x00000002
-#define SVGA_CAP_CURSOR             0x00000020
-#define SVGA_CAP_CURSOR_BYPASS      0x00000040   /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   /* Legacy (Use Cursor Bypass 3 instead) */
-#define SVGA_CAP_8BIT_EMULATION     0x00000100
-#define SVGA_CAP_ALPHA_CURSOR       0x00000200
-#define SVGA_CAP_3D                 0x00004000
-#define SVGA_CAP_EXTENDED_FIFO      0x00008000
-#define SVGA_CAP_MULTIMON           0x00010000   /* Legacy multi-monitor support */
-#define SVGA_CAP_PITCHLOCK          0x00020000
-#define SVGA_CAP_IRQMASK            0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   /* Legacy multi-monitor support */
-#define SVGA_CAP_GMR                0x00100000
-#define SVGA_CAP_TRACES             0x00200000
-#define SVGA_CAP_GMR2               0x00400000
-#define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
-#define SVGA_CAP_COMMAND_BUFFERS    0x01000000
-#define SVGA_CAP_DEAD1              0x02000000
-#define SVGA_CAP_CMD_BUFFERS_2      0x04000000
-#define SVGA_CAP_GBOBJECTS          0x08000000
-
-/*
- * FIFO register indices.
- *
- * The FIFO is a chunk of device memory mapped into guest physmem.  It
- * is always treated as 32-bit words.
- *
- * The guest driver gets to decide how to partition it between
- * - FIFO registers (there are always at least 4, specifying where the
- *   following data area is and how much data it contains; there may be
- *   more registers following these, depending on the FIFO protocol
- *   version in use)
- * - FIFO data, written by the guest and slurped out by the VMX.
- * These indices are 32-bit word offsets into the FIFO.
- */
-
-enum {
-   /*
-    * Block 1 (basic registers): The originally defined FIFO registers.
-    * These exist and are valid for all versions of the FIFO protocol.
-    */
-
-   SVGA_FIFO_MIN = 0,
-   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
-   SVGA_FIFO_NEXT_CMD,
-   SVGA_FIFO_STOP,
-
-   /*
-    * Block 2 (extended registers): Mandatory registers for the extended
-    * FIFO.  These exist if the SVGA caps register includes
-    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
-    * associated capability bit is enabled.
-    *
-    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
-    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
-    * This means that the guest has to test individually (in most cases
-    * using FIFO caps) for the presence of registers after this; the VMX
-    * can define "extended FIFO" to mean whatever it wants, and currently
-    * won't enable it unless there's room for that set and much more.
-    */
-
-   SVGA_FIFO_CAPABILITIES = 4,
-   SVGA_FIFO_FLAGS,
-   /* Valid with SVGA_FIFO_CAP_FENCE: */
-   SVGA_FIFO_FENCE,
-
-   /*
-    * Block 3a (optional extended registers): Additional registers for the
-    * extended FIFO, whose presence isn't actually implied by
-    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
-    * leave room for them.
-    *
-    * These in block 3a, the VMX currently considers mandatory for the
-    * extended FIFO.
-    */
-
-   /* Valid if exists (i.e. if extended FIFO enabled): */
-   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
-   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
-   SVGA_FIFO_PITCHLOCK,
-
-   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
-   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
-   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
-   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
-   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
-   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
-
-   /* Valid with SVGA_FIFO_CAP_RESERVE: */
-   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
-
-   /*
-    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
-    *
-    * By default this is SVGA_ID_INVALID, to indicate that the cursor
-    * coordinates are specified relative to the virtual root. If this
-    * is set to a specific screen ID, cursor position is reinterpreted
-    * as a signed offset relative to that screen's origin.
-    */
-   SVGA_FIFO_CURSOR_SCREEN_ID,
-
-   /*
-    * Valid with SVGA_FIFO_CAP_DEAD
-    *
-    * An arbitrary value written by the host, drivers should not use it.
-    */
-   SVGA_FIFO_DEAD,
-
-   /*
-    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
-    *
-    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
-    * on platforms that can enforce graphics resource limits.
-    */
-   SVGA_FIFO_3D_HWVERSION_REVISED,
-
-   /*
-    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
-    * registers, but this must be done carefully and with judicious use of
-    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
-    * enough to tell you whether the register exists: we've shipped drivers
-    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
-    * the earlier ones.  The actual order of introduction was:
-    * - PITCHLOCK
-    * - 3D_CAPS
-    * - CURSOR_* (cursor bypass 3)
-    * - RESERVED
-    * So, code that wants to know whether it can use any of the
-    * aforementioned registers, or anything else added after PITCHLOCK and
-    * before 3D_CAPS, needs to reason about something other than
-    * SVGA_FIFO_MIN.
-    */
-
-   /*
-    * 3D caps block space; valid with 3D hardware version >=
-    * SVGA3D_HWVERSION_WS6_B1.
-    */
-   SVGA_FIFO_3D_CAPS      = 32,
-   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
-
-   /*
-    * End of VMX's current definition of "extended-FIFO registers".
-    * Registers before here are always enabled/disabled as a block; either
-    * the extended FIFO is enabled and includes all preceding registers, or
-    * it's disabled entirely.
-    *
-    * Block 3b (truly optional extended registers): Additional registers for
-    * the extended FIFO, which the VMX already knows how to enable and
-    * disable with correct granularity.
-    *
-    * Registers after here exist if and only if the guest SVGA driver
-    * sets SVGA_FIFO_MIN high enough to leave room for them.
-    */
-
-   /* Valid if register exists: */
-   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
-   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
-   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
-
-   /*
-    * Always keep this last.  This defines the maximum number of
-    * registers we know about.  At power-on, this value is placed in
-    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
-    * to allocate this much space in FIFO memory for registers.
-    */
-    SVGA_FIFO_NUM_REGS
-};
-
-
-/*
- * Definition of registers included in extended FIFO support.
- *
- * The guest SVGA driver gets to allocate the FIFO between registers
- * and data.  It must always allocate at least 4 registers, but old
- * drivers stopped there.
- *
- * The VMX will enable extended FIFO support if and only if the guest
- * left enough room for all registers defined as part of the mandatory
- * set for the extended FIFO.
- *
- * Note that the guest drivers typically allocate the FIFO only at
- * initialization time, not at mode switches, so it's likely that the
- * number of FIFO registers won't change without a reboot.
- *
- * All registers less than this value are guaranteed to be present if
- * svgaUser->fifo.extended is set. Any later registers must be tested
- * individually for compatibility at each use (in the VMX).
- *
- * This value is used only by the VMX, so it can change without
- * affecting driver compatibility; keep it that way?
- */
-#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
-
-
-/*
- * FIFO Synchronization Registers
- *
- *  This explains the relationship between the various FIFO
- *  sync-related registers in IOSpace and in FIFO space.
- *
- *  SVGA_REG_SYNC --
- *
- *       The SYNC register can be used in two different ways by the guest:
- *
- *         1. If the guest wishes to fully sync (drain) the FIFO,
- *            it will write once to SYNC then poll on the BUSY
- *            register. The FIFO is sync'ed once BUSY is zero.
- *
- *         2. If the guest wants to asynchronously wake up the host,
- *            it will write once to SYNC without polling on BUSY.
- *            Ideally it will do this after some new commands have
- *            been placed in the FIFO, and after reading a zero
- *            from SVGA_FIFO_BUSY.
- *
- *       (1) is the original behaviour that SYNC was designed to
- *       support.  Originally, a write to SYNC would implicitly
- *       trigger a read from BUSY. This causes us to synchronously
- *       process the FIFO.
- *
- *       This behaviour has since been changed so that writing SYNC
- *       will *not* implicitly cause a read from BUSY. Instead, it
- *       makes a channel call which asynchronously wakes up the MKS
- *       thread.
- *
- *       New guests can use this new behaviour to implement (2)
- *       efficiently. This lets guests get the host's attention
- *       without waiting for the MKS to poll, which gives us much
- *       better CPU utilization on SMP hosts and on UP hosts while
- *       we're blocked on the host GPU.
- *
- *       Old guests shouldn't notice the behaviour change. SYNC was
- *       never guaranteed to process the entire FIFO, since it was
- *       bounded to a particular number of CPU cycles. Old guests will
- *       still loop on the BUSY register until the FIFO is empty.
- *
- *       Writing to SYNC currently has the following side-effects:
- *
- *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
- *         - Asynchronously wakes up the MKS thread for FIFO processing
- *         - The value written to SYNC is recorded as a "reason", for
- *           stats purposes.
- *
- *       If SVGA_FIFO_BUSY is available, drivers are advised to only
- *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
- *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
- *       eventually set SVGA_FIFO_BUSY on its own, but this approach
- *       lets the driver avoid sending multiple asynchronous wakeup
- *       messages to the MKS thread.
- *
- *  SVGA_REG_BUSY --
- *
- *       This register is set to TRUE when SVGA_REG_SYNC is written,
- *       and it reads as FALSE when the FIFO has been completely
- *       drained.
- *
- *       Every read from this register causes us to synchronously
- *       process FIFO commands. There is no guarantee as to how many
- *       commands each read will process.
- *
- *       CPU time spent processing FIFO commands will be billed to
- *       the guest.
- *
- *       New drivers should avoid using this register unless they
- *       need to guarantee that the FIFO is completely drained. It
- *       is overkill for performing a sync-to-fence. Older drivers
- *       will use this register for any type of synchronization.
- *
- *  SVGA_FIFO_BUSY --
- *
- *       This register is a fast way for the guest driver to check
- *       whether the FIFO is already being processed. It reads and
- *       writes at normal RAM speeds, with no monitor intervention.
- *
- *       If this register reads as TRUE, the host is guaranteeing that
- *       any new commands written into the FIFO will be noticed before
- *       the MKS goes back to sleep.
- *
- *       If this register reads as FALSE, no such guarantee can be
- *       made.
- *
- *       The guest should use this register to quickly determine
- *       whether or not it needs to wake up the host. If the guest
- *       just wrote a command or group of commands that it would like
- *       the host to begin processing, it should:
- *
- *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
- *            action is necessary.
- *
- *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
- *            code that we've already sent a SYNC to the host and we
- *            don't need to send a duplicate.
- *
- *         3. Write a reason to SVGA_REG_SYNC. This will send an
- *            asynchronous wakeup to the MKS thread.
- */
-
-
-/*
- * FIFO Capabilities
- *
- *      Fence -- Fence register and command are supported
- *      Accel Front -- Front buffer only commands are supported
- *      Pitch Lock -- Pitch lock register is supported
- *      Video -- SVGA Video overlay units are supported
- *      Escape -- Escape command is supported
- *
- * XXX: Add longer descriptions for each capability, including a list
- *      of the new features that each capability provides.
- *
- * SVGA_FIFO_CAP_SCREEN_OBJECT --
- *
- *    Provides dynamic multi-screen rendering, for improved Unity and
- *    multi-monitor modes. With Screen Object, the guest can
- *    dynamically create and destroy 'screens', which can represent
- *    Unity windows or virtual monitors. Screen Object also provides
- *    strong guarantees that DMA operations happen only when
- *    guest-initiated. Screen Object deprecates the BAR1 guest
- *    framebuffer (GFB) and all commands that work only with the GFB.
- *
- *    New registers:
- *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
- *
- *    New 2D commands:
- *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
- *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
- *
- *    New 3D commands:
- *       BLIT_SURFACE_TO_SCREEN
- *
- *    New guarantees:
- *
- *       - The host will not read or write guest memory, including the GFB,
- *         except when explicitly initiated by a DMA command.
- *
- *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
- *         is guaranteed to complete before any subsequent FENCEs.
- *
- *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
- *         PRESENT_READBACK) as well as new Screen blit commands will
- *         all behave consistently as blits, and memory will be read
- *         or written in FIFO order.
- *
- *         For example, if you PRESENT from one SVGA3D surface to multiple
- *         places on the screen, the data copied will always be from the
- *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
- *         This was not necessarily true on devices without Screen Object.
- *
- *         This means that on devices that support Screen Object, the
- *         PRESENT_READBACK command should not be necessary unless you
- *         actually want to read back the results of 3D rendering into
- *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
- *         command provides a strict superset of functionality.)
- *
- *       - When a screen is resized, either using Screen Object commands or
- *         legacy multimon registers, its contents are preserved.
- *
- * SVGA_FIFO_CAP_GMR2 --
- *
- *    Provides new commands to define and remap guest memory regions (GMR).
- *
- *    New 2D commands:
- *       DEFINE_GMR2, REMAP_GMR2.
- *
- * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
- *
- *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
- *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
- *    that enforce graphics resource limits.  This allows the platform
- *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
- *    drivers that do not limit their resources.
- *
- *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
- *    are codependent (and thus we use a single capability bit).
- *
- * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
- *
- *    Modifies the DEFINE_SCREEN command to include a guest provided
- *    backing store in GMR memory and the bytesPerLine for the backing
- *    store.  This capability requires the use of a backing store when
- *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
- *    is present then backing stores are optional.
- *
- * SVGA_FIFO_CAP_DEAD --
- *
- *    Drivers should not use this cap bit.  This cap bit can not be
- *    reused since some hosts already expose it.
- */
-
-#define SVGA_FIFO_CAP_NONE                  0
-#define SVGA_FIFO_CAP_FENCE             (1<<0)
-#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
-#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
-#define SVGA_FIFO_CAP_VIDEO             (1<<3)
-#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
-#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
-#define SVGA_FIFO_CAP_RESERVE           (1<<6)
-#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
-#define SVGA_FIFO_CAP_GMR2              (1<<8)
-#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
-#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
-#define SVGA_FIFO_CAP_DEAD              (1<<10)
-
-
-/*
- * FIFO Flags
- *
- *      Accel Front -- Driver should use front buffer only commands
- */
-
-#define SVGA_FIFO_FLAG_NONE                 0
-#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
-#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
-
-/*
- * FIFO reservation sentinel value
- */
-
-#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
-
-
-/*
- * Video overlay support
- */
-
-#define SVGA_NUM_OVERLAY_UNITS 32
-
-
-/*
- * Video capabilities that the guest is currently using
- */
-
-#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
-
-
-/*
- * Offsets for the video overlay registers
- */
-
-enum {
-   SVGA_VIDEO_ENABLED = 0,
-   SVGA_VIDEO_FLAGS,
-   SVGA_VIDEO_DATA_OFFSET,
-   SVGA_VIDEO_FORMAT,
-   SVGA_VIDEO_COLORKEY,
-   SVGA_VIDEO_SIZE,          /* Deprecated */
-   SVGA_VIDEO_WIDTH,
-   SVGA_VIDEO_HEIGHT,
-   SVGA_VIDEO_SRC_X,
-   SVGA_VIDEO_SRC_Y,
-   SVGA_VIDEO_SRC_WIDTH,
-   SVGA_VIDEO_SRC_HEIGHT,
-   SVGA_VIDEO_DST_X,         /* Signed int32 */
-   SVGA_VIDEO_DST_Y,         /* Signed int32 */
-   SVGA_VIDEO_DST_WIDTH,
-   SVGA_VIDEO_DST_HEIGHT,
-   SVGA_VIDEO_PITCH_1,
-   SVGA_VIDEO_PITCH_2,
-   SVGA_VIDEO_PITCH_3,
-   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
-   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
-   SVGA_VIDEO_NUM_REGS
-};
-
-
-/*
- * SVGA Overlay Units
- *
- *      width and height relate to the entire source video frame.
- *      srcX, srcY, srcWidth and srcHeight represent subset of the source
- *      video frame to be displayed.
- */
-
-typedef struct SVGAOverlayUnit {
-   uint32 enabled;
-   uint32 flags;
-   uint32 dataOffset;
-   uint32 format;
-   uint32 colorKey;
-   uint32 size;
-   uint32 width;
-   uint32 height;
-   uint32 srcX;
-   uint32 srcY;
-   uint32 srcWidth;
-   uint32 srcHeight;
-   int32  dstX;
-   int32  dstY;
-   uint32 dstWidth;
-   uint32 dstHeight;
-   uint32 pitches[3];
-   uint32 dataGMRId;
-   uint32 dstScreenId;
-} SVGAOverlayUnit;
-
-
-/*
- * SVGAScreenObject --
- *
- *    This is a new way to represent a guest's multi-monitor screen or
- *    Unity window. Screen objects are only supported if the
- *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
- *
- *    If Screen Objects are supported, they can be used to fully
- *    replace the functionality provided by the framebuffer registers
- *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
- *
- *    The screen object is a struct with guaranteed binary
- *    compatibility. New flags can be added, and the struct may grow,
- *    but existing fields must retain their meaning.
- *
- *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
- *    a SVGAGuestPtr that is used to back the screen contents.  This
- *    memory must come from the GFB.  The guest is not allowed to
- *    access the memory and doing so will have undefined results.  The
- *    backing store is required to be page aligned and the size is
- *    padded to the next page boundry.  The number of pages is:
- *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
- *
- *    The pitch in the backingStore is required to be at least large
- *    enough to hold a 32bbp scanline.  It is recommended that the
- *    driver pad bytesPerLine for a potential performance win.
- *
- *    The cloneCount field is treated as a hint from the guest that
- *    the user wants this display to be cloned, countCount times.  A
- *    value of zero means no cloning should happen.
- */
-
-#define SVGA_SCREEN_MUST_BE_SET     (1 << 0) /* Must be set or results undefined */
-#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
-#define SVGA_SCREEN_IS_PRIMARY      (1 << 1) /* Guest considers this screen to be 'primary' */
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
-
-/*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
- * deactivated the base layer is defined to lose all contents and
- * become black.  When a screen is deactivated the backing store is
- * optional.  When set backingPtr and bytesPerLine will be ignored.
- */
-#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
-
-/*
- * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
- * the screen contents will be outputted as all black to the user
- * though the base layer contents is preserved.  The screen base layer
- * can still be read and written to like normal though the no visible
- * effect will be seen by the user.  When the flag is changed the
- * screen will be blanked or redrawn to the current contents as needed
- * without any extra commands from the driver.  This flag only has an
- * effect when the screen is not deactivated.
- */
-#define SVGA_SCREEN_BLANKING (1 << 4)
-
-typedef
-struct SVGAScreenObject {
-   uint32 structSize;   /* sizeof(SVGAScreenObject) */
-   uint32 id;
-   uint32 flags;
-   struct {
-      uint32 width;
-      uint32 height;
-   } size;
-   struct {
-      int32 x;
-      int32 y;
-   } root;
-
-   /*
-    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
-    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
-    */
-   SVGAGuestImage backingStore;
-   uint32 cloneCount;
-} SVGAScreenObject;
-
-
-/*
- *  Commands in the command FIFO:
- *
- *  Command IDs defined below are used for the traditional 2D FIFO
- *  communication (not all commands are available for all versions of the
- *  SVGA FIFO protocol).
- *
- *  Note the holes in the command ID numbers: These commands have been
- *  deprecated, and the old IDs must not be reused.
- *
- *  Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
- *  protocol.
- *
- *  Each command's parameters are described by the comments and
- *  structs below.
- */
-
-typedef enum {
-   SVGA_CMD_INVALID_CMD           = 0,
-   SVGA_CMD_UPDATE                = 1,
-   SVGA_CMD_RECT_COPY             = 3,
-   SVGA_CMD_DEFINE_CURSOR         = 19,
-   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
-   SVGA_CMD_UPDATE_VERBOSE        = 25,
-   SVGA_CMD_FRONT_ROP_FILL        = 29,
-   SVGA_CMD_FENCE                 = 30,
-   SVGA_CMD_ESCAPE                = 33,
-   SVGA_CMD_DEFINE_SCREEN         = 34,
-   SVGA_CMD_DESTROY_SCREEN        = 35,
-   SVGA_CMD_DEFINE_GMRFB          = 36,
-   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
-   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
-   SVGA_CMD_ANNOTATION_FILL       = 39,
-   SVGA_CMD_ANNOTATION_COPY       = 40,
-   SVGA_CMD_DEFINE_GMR2           = 41,
-   SVGA_CMD_REMAP_GMR2            = 42,
-   SVGA_CMD_MAX
-} SVGAFifoCmdId;
-
-#define SVGA_CMD_MAX_ARGS           64
-
-
-/*
- * SVGA_CMD_UPDATE --
- *
- *    This is a DMA transfer which copies from the Guest Framebuffer
- *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
- *    intersect with the provided virtual rectangle.
- *
- *    This command does not support using arbitrary guest memory as a
- *    data source- it only works with the pre-defined GFB memory.
- *    This command also does not support signed virtual coordinates.
- *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
- *    negative root x/y coordinates, the negative portion of those
- *    screens will not be reachable by this command.
- *
- *    This command is not necessary when using framebuffer
- *    traces. Traces are automatically enabled if the SVGA FIFO is
- *    disabled, and you may explicitly enable/disable traces using
- *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
- *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
- *
- *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
- *    pseudocolor screen updates. The newer Screen Object commands
- *    only support true color formats.
- *
- * Availability:
- *    Always available.
- */
-
-typedef
-struct SVGAFifoCmdUpdate {
-   uint32 x;
-   uint32 y;
-   uint32 width;
-   uint32 height;
-} SVGAFifoCmdUpdate;
-
-
-/*
- * SVGA_CMD_RECT_COPY --
- *
- *    Perform a rectangular DMA transfer from one area of the GFB to
- *    another, and copy the result to any screens which intersect it.
- *
- * Availability:
- *    SVGA_CAP_RECT_COPY
- */
-
-typedef
-struct SVGAFifoCmdRectCopy {
-   uint32 srcX;
-   uint32 srcY;
-   uint32 destX;
-   uint32 destY;
-   uint32 width;
-   uint32 height;
-} SVGAFifoCmdRectCopy;
-
-
-/*
- * SVGA_CMD_DEFINE_CURSOR --
- *
- *    Provide a new cursor image, as an AND/XOR mask.
- *
- *    The recommended way to position the cursor overlay is by using
- *    the SVGA_FIFO_CURSOR_* registers, supported by the
- *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
- *
- * Availability:
- *    SVGA_CAP_CURSOR
- */
-
-typedef
-struct SVGAFifoCmdDefineCursor {
-   uint32 id;             /* Reserved, must be zero. */
-   uint32 hotspotX;
-   uint32 hotspotY;
-   uint32 width;
-   uint32 height;
-   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
-   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
-   /*
-    * Followed by scanline data for AND mask, then XOR mask.
-    * Each scanline is padded to a 32-bit boundary.
-   */
-} SVGAFifoCmdDefineCursor;
-
-
-/*
- * SVGA_CMD_DEFINE_ALPHA_CURSOR --
- *
- *    Provide a new cursor image, in 32-bit BGRA format.
- *
- *    The recommended way to position the cursor overlay is by using
- *    the SVGA_FIFO_CURSOR_* registers, supported by the
- *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
- *
- * Availability:
- *    SVGA_CAP_ALPHA_CURSOR
- */
-
-typedef
-struct SVGAFifoCmdDefineAlphaCursor {
-   uint32 id;             /* Reserved, must be zero. */
-   uint32 hotspotX;
-   uint32 hotspotY;
-   uint32 width;
-   uint32 height;
-   /* Followed by scanline data */
-} SVGAFifoCmdDefineAlphaCursor;
-
-
-/*
- * SVGA_CMD_UPDATE_VERBOSE --
- *
- *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
- *    'reason' value, an opaque cookie which is used by internal
- *    debugging tools. Third party drivers should not use this
- *    command.
- *
- * Availability:
- *    SVGA_CAP_EXTENDED_FIFO
- */
-
-typedef
-struct SVGAFifoCmdUpdateVerbose {
-   uint32 x;
-   uint32 y;
-   uint32 width;
-   uint32 height;
-   uint32 reason;
-} SVGAFifoCmdUpdateVerbose;
-
-
-/*
- * SVGA_CMD_FRONT_ROP_FILL --
- *
- *    This is a hint which tells the SVGA device that the driver has
- *    just filled a rectangular region of the GFB with a solid
- *    color. Instead of reading these pixels from the GFB, the device
- *    can assume that they all equal 'color'. This is primarily used
- *    for remote desktop protocols.
- *
- * Availability:
- *    SVGA_FIFO_CAP_ACCELFRONT
- */
-
-#define  SVGA_ROP_COPY                    0x03
-
-typedef
-struct SVGAFifoCmdFrontRopFill {
-   uint32 color;     /* In the same format as the GFB */
-   uint32 x;
-   uint32 y;
-   uint32 width;
-   uint32 height;
-   uint32 rop;       /* Must be SVGA_ROP_COPY */
-} SVGAFifoCmdFrontRopFill;
-
-
-/*
- * SVGA_CMD_FENCE --
- *
- *    Insert a synchronization fence.  When the SVGA device reaches
- *    this command, it will copy the 'fence' value into the
- *    SVGA_FIFO_FENCE register. It will also compare the fence against
- *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
- *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
- *    raise this interrupt.
- *
- * Availability:
- *    SVGA_FIFO_FENCE for this command,
- *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
- */
-
-typedef
-struct {
-   uint32 fence;
-} SVGAFifoCmdFence;
-
-
-/*
- * SVGA_CMD_ESCAPE --
- *
- *    Send an extended or vendor-specific variable length command.
- *    This is used for video overlay, third party plugins, and
- *    internal debugging tools. See svga_escape.h
- *
- * Availability:
- *    SVGA_FIFO_CAP_ESCAPE
- */
-
-typedef
-struct SVGAFifoCmdEscape {
-   uint32 nsid;
-   uint32 size;
-   /* followed by 'size' bytes of data */
-} SVGAFifoCmdEscape;
-
-
-/*
- * SVGA_CMD_DEFINE_SCREEN --
- *
- *    Define or redefine an SVGAScreenObject. See the description of
- *    SVGAScreenObject above.  The video driver is responsible for
- *    generating new screen IDs. They should be small positive
- *    integers. The virtual device will have an implementation
- *    specific upper limit on the number of screen IDs
- *    supported. Drivers are responsible for recycling IDs. The first
- *    valid ID is zero.
- *
- *    - Interaction with other registers:
- *
- *    For backwards compatibility, when the GFB mode registers (WIDTH,
- *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
- *    deletes all screens other than screen #0, and redefines screen
- *    #0 according to the specified mode. Drivers that use
- *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
- *
- *    If you use screen objects, do not use the legacy multi-mon
- *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGAScreenObject screen;   /* Variable-length according to version */
-} SVGAFifoCmdDefineScreen;
-
-
-/*
- * SVGA_CMD_DESTROY_SCREEN --
- *
- *    Destroy an SVGAScreenObject. Its ID is immediately available for
- *    re-use.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   uint32 screenId;
-} SVGAFifoCmdDestroyScreen;
-
-
-/*
- * SVGA_CMD_DEFINE_GMRFB --
- *
- *    This command sets a piece of SVGA device state called the
- *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
- *    piece of light-weight state which identifies the location and
- *    format of an image in guest memory or in BAR1. The GMRFB has
- *    an arbitrary size, and it doesn't need to match the geometry
- *    of the GFB or any screen object.
- *
- *    The GMRFB can be redefined as often as you like. You could
- *    always use the same GMRFB, you could redefine it before
- *    rendering from a different guest screen, or you could even
- *    redefine it before every blit.
- *
- *    There are multiple ways to use this command. The simplest way is
- *    to use it to move the framebuffer either to elsewhere in the GFB
- *    (BAR1) memory region, or to a user-defined GMR. This lets a
- *    driver use a framebuffer allocated entirely out of normal system
- *    memory, which we encourage.
- *
- *    Another way to use this command is to set up a ring buffer of
- *    updates in GFB memory. If a driver wants to ensure that no
- *    frames are skipped by the SVGA device, it is important that the
- *    driver not modify the source data for a blit until the device is
- *    done processing the command. One efficient way to accomplish
- *    this is to use a ring of small DMA buffers. Each buffer is used
- *    for one blit, then we move on to the next buffer in the
- *    ring. The FENCE mechanism is used to protect each buffer from
- *    re-use until the device is finished with that buffer's
- *    corresponding blit.
- *
- *    This command does not affect the meaning of SVGA_CMD_UPDATE.
- *    UPDATEs always occur from the legacy GFB memory area. This
- *    command has no support for pseudocolor GMRFBs. Currently only
- *    true-color 15, 16, and 24-bit depths are supported. Future
- *    devices may expose capabilities for additional framebuffer
- *    formats.
- *
- *    The default GMRFB value is undefined. Drivers must always send
- *    this command at least once before performing any blit from the
- *    GMRFB.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGAGuestPtr        ptr;
-   uint32              bytesPerLine;
-   SVGAGMRImageFormat  format;
-} SVGAFifoCmdDefineGMRFB;
-
-
-/*
- * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
- *
- *    This is a guest-to-host blit. It performs a DMA operation to
- *    copy a rectangular region of pixels from the current GMRFB to
- *    one or more Screen Objects.
- *
- *    The destination coordinate may be specified relative to a
- *    screen's origin (if a screen ID is specified) or relative to the
- *    virtual coordinate system's origin (if the screen ID is
- *    SVGA_ID_INVALID). The actual destination may span zero or more
- *    screens, in the case of a virtual destination rect or a rect
- *    which extends off the edge of the specified screen.
- *
- *    This command writes to the screen's "base layer": the underlying
- *    framebuffer which exists below any cursor or video overlays. No
- *    action is necessary to explicitly hide or update any overlays
- *    which exist on top of the updated region.
- *
- *    The SVGA device is guaranteed to finish reading from the GMRFB
- *    by the time any subsequent FENCE commands are reached.
- *
- *    This command consumes an annotation. See the
- *    SVGA_CMD_ANNOTATION_* commands for details.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGASignedPoint  srcOrigin;
-   SVGASignedRect   destRect;
-   uint32           destScreenId;
-} SVGAFifoCmdBlitGMRFBToScreen;
-
-
-/*
- * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
- *
- *    This is a host-to-guest blit. It performs a DMA operation to
- *    copy a rectangular region of pixels from a single Screen Object
- *    back to the current GMRFB.
- *
- *    Usage note: This command should be used rarely. It will
- *    typically be inefficient, but it is necessary for some types of
- *    synchronization between 3D (GPU) and 2D (CPU) rendering into
- *    overlapping areas of a screen.
- *
- *    The source coordinate is specified relative to a screen's
- *    origin. The provided screen ID must be valid. If any parameters
- *    are invalid, the resulting pixel values are undefined.
- *
- *    This command reads the screen's "base layer". Overlays like
- *    video and cursor are not included, but any data which was sent
- *    using a blit-to-screen primitive will be available, no matter
- *    whether the data's original source was the GMRFB or the 3D
- *    acceleration hardware.
- *
- *    Note that our guest-to-host blits and host-to-guest blits aren't
- *    symmetric in their current implementation. While the parameters
- *    are identical, host-to-guest blits are a lot less featureful.
- *    They do not support clipping: If the source parameters don't
- *    fully fit within a screen, the blit fails. They must originate
- *    from exactly one screen. Virtual coordinates are not directly
- *    supported.
- *
- *    Host-to-guest blits do support the same set of GMRFB formats
- *    offered by guest-to-host blits.
- *
- *    The SVGA device is guaranteed to finish writing to the GMRFB by
- *    the time any subsequent FENCE commands are reached.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGASignedPoint  destOrigin;
-   SVGASignedRect   srcRect;
-   uint32           srcScreenId;
-} SVGAFifoCmdBlitScreenToGMRFB;
-
-
-/*
- * SVGA_CMD_ANNOTATION_FILL --
- *
- *    This is a blit annotation. This command stores a small piece of
- *    device state which is consumed by the next blit-to-screen
- *    command. The state is only cleared by commands which are
- *    specifically documented as consuming an annotation. Other
- *    commands (such as ESCAPEs for debugging) may intervene between
- *    the annotation and its associated blit.
- *
- *    This annotation is a promise about the contents of the next
- *    blit: The video driver is guaranteeing that all pixels in that
- *    blit will have the same value, specified here as a color in
- *    SVGAColorBGRX format.
- *
- *    The SVGA device can still render the blit correctly even if it
- *    ignores this annotation, but the annotation may allow it to
- *    perform the blit more efficiently, for example by ignoring the
- *    source data and performing a fill in hardware.
- *
- *    This annotation is most important for performance when the
- *    user's display is being remoted over a network connection.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGAColorBGRX  color;
-} SVGAFifoCmdAnnotationFill;
-
-
-/*
- * SVGA_CMD_ANNOTATION_COPY --
- *
- *    This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
- *    information about annotations.
- *
- *    This annotation is a promise about the contents of the next
- *    blit: The video driver is guaranteeing that all pixels in that
- *    blit will have the same value as those which already exist at an
- *    identically-sized region on the same or a different screen.
- *
- *    Note that the source pixels for the COPY in this annotation are
- *    sampled before applying the anqnotation's associated blit. They
- *    are allowed to overlap with the blit's destination pixels.
- *
- *    The copy source rectangle is specified the same way as the blit
- *    destination: it can be a rectangle which spans zero or more
- *    screens, specified relative to either a screen or to the virtual
- *    coordinate system's origin. If the source rectangle includes
- *    pixels which are not from exactly one screen, the results are
- *    undefined.
- *
- * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
- */
-
-typedef
-struct {
-   SVGASignedPoint  srcOrigin;
-   uint32           srcScreenId;
-} SVGAFifoCmdAnnotationCopy;
-
-
-/*
- * SVGA_CMD_DEFINE_GMR2 --
- *
- *    Define guest memory region v2.  See the description of GMRs above.
- *
- * Availability:
- *    SVGA_CAP_GMR2
- */
-
-typedef
-struct {
-   uint32 gmrId;
-   uint32 numPages;
-} SVGAFifoCmdDefineGMR2;
-
-
-/*
- * SVGA_CMD_REMAP_GMR2 --
- *
- *    Remap guest memory region v2.  See the description of GMRs above.
- *
- *    This command allows guest to modify a portion of an existing GMR by
- *    invalidating it or reassigning it to different guest physical pages.
- *    The pages are identified by physical page number (PPN).  The pages
- *    are assumed to be pinned and valid for DMA operations.
- *
- *    Description of command flags:
- *
- *    SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
- *       The PPN list must not overlap with the remap region (this can be
- *       handled trivially by referencing a separate GMR).  If flag is
- *       disabled, PPN list is appended to SVGARemapGMR command.
- *
- *    SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
- *       it is in PPN32 format.
- *
- *    SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
- *       A single PPN can be used to invalidate a portion of a GMR or
- *       map it to to a single guest scratch page.
- *
- * Availability:
- *    SVGA_CAP_GMR2
- */
-
-typedef enum {
-   SVGA_REMAP_GMR2_PPN32         = 0,
-   SVGA_REMAP_GMR2_VIA_GMR       = (1 << 0),
-   SVGA_REMAP_GMR2_PPN64         = (1 << 1),
-   SVGA_REMAP_GMR2_SINGLE_PPN    = (1 << 2),
-} SVGARemapGMR2Flags;
-
-typedef
-struct {
-   uint32 gmrId;
-   SVGARemapGMR2Flags flags;
-   uint32 offsetPages; /* offset in pages to begin remap */
-   uint32 numPages; /* number of pages to remap */
-   /*
-    * Followed by additional data depending on SVGARemapGMR2Flags.
-    *
-    * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
-    * Otherwise an array of page descriptors in PPN32 or PPN64 format
-    * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
-    * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
-    */
-} SVGAFifoCmdRemapGMR2;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
deleted file mode 100644
index 55836de..0000000
--- a/drivers/gpu/drm/vmwgfx/svga_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * Silly typedefs for the svga headers. Currently the headers are shared
- * between all components that talk to svga. And as such the headers are
- * are in a completely different style and use weird defines.
- *
- * This file lets all the ugly be prefixed with svga*.
- */
-
-#ifndef _SVGA_TYPES_H_
-#define _SVGA_TYPES_H_
-
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint8_t uint8;
-typedef int32_t int32;
-typedef bool Bool;
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
new file mode 100644
index 0000000..9c42e96
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -0,0 +1,1294 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * This file implements the vmwgfx context binding manager,
+ * The sole reason for having to use this code is that vmware guest
+ * backed contexts can be swapped out to their backing mobs by the device
+ * at any time, also swapped in at any time. At swapin time, the device
+ * validates the context bindings to make sure they point to valid resources.
+ * It's this outside-of-drawcall validation (that can happen at any time),
+ * that makes this code necessary.
+ *
+ * We therefore need to kill any context bindings pointing to a resource
+ * when the resource is swapped out. Furthermore, if the vmwgfx driver has
+ * swapped out the context we can't swap it in again to kill bindings because
+ * of backing mob reservation lockdep violations, so as part of
+ * context swapout, also kill all bindings of a context, so that they are
+ * already killed if a resource to which a binding points
+ * needs to be swapped out.
+ *
+ * Note that a resource can be pointed to by bindings from multiple contexts,
+ * Therefore we can't easily protect this data by a per context mutex
+ * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
+ * to protect all binding manager data.
+ *
+ * Finally, any association between a context and a global resource
+ * (surface, shader or even DX query) is conceptually a context binding that
+ * needs to be tracked by this code.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
+#include "device_include/svga3d_reg.h"
+
+#define VMW_BINDING_RT_BIT     0
+#define VMW_BINDING_PS_BIT     1
+#define VMW_BINDING_SO_BIT     2
+#define VMW_BINDING_VB_BIT     3
+#define VMW_BINDING_NUM_BITS   4
+
+#define VMW_BINDING_PS_SR_BIT  0
+
+/**
+ * struct vmw_ctx_binding_state - per context binding state
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: linked list of individual active bindings.
+ * @render_targets: Render target bindings.
+ * @texture_units: Texture units bindings.
+ * @ds_view: Depth-stencil view binding.
+ * @so_targets: StreamOutput target bindings.
+ * @vertex_buffers: Vertex buffer bindings.
+ * @index_buffer: Index buffer binding.
+ * @per_shader: Per shader-type bindings.
+ * @dirty: Bitmap tracking per binding-type changes that have not yet
+ * been emitted to the device.
+ * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
+ * have not yet been emitted to the device.
+ * @bind_cmd_buffer: Scratch space used to construct binding commands.
+ * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
+ * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
+ * device binding slot of the first command data entry in @bind_cmd_buffer.
+ *
+ * Note that this structure also provides storage space for the individual
+ * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
+ * for individual bindings.
+ *
+ */
+struct vmw_ctx_binding_state {
+	struct vmw_private *dev_priv;
+	struct list_head list;
+	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
+	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
+	struct vmw_ctx_bindinfo_view ds_view;
+	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
+	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
+	struct vmw_ctx_bindinfo_ib index_buffer;
+	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
+
+	unsigned long dirty;
+	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
+
+	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
+	u32 bind_cmd_count;
+	u32 bind_first_slot;
+};
+
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind);
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
+				       bool rebind);
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_binding_build_asserts(void) __attribute__ ((unused));
+
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
+
+/**
+ * struct vmw_binding_info - Per binding type information for the binding
+ * manager
+ *
+ * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
+ * @offsets: array[shader_slot] of offsets to the array[slot]
+ * of struct bindings for the binding type.
+ * @scrub_func: Pointer to the scrub function for this binding type.
+ *
+ * Holds static information to help optimize the binding manager and avoid
+ * an excessive amount of switch statements.
+ */
+struct vmw_binding_info {
+	size_t size;
+	const size_t *offsets;
+	vmw_scrub_func scrub_func;
+};
+
+/*
+ * A number of static variables that help determine the scrub func and the
+ * location of the struct vmw_ctx_bindinfo slots for each binding type.
+ */
+static const size_t vmw_binding_shader_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
+};
+static const size_t vmw_binding_rt_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, render_targets),
+};
+static const size_t vmw_binding_tex_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, texture_units),
+};
+static const size_t vmw_binding_cb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
+};
+static const size_t vmw_binding_dx_ds_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, ds_view),
+};
+static const size_t vmw_binding_sr_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
+	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
+};
+static const size_t vmw_binding_so_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, so_targets),
+};
+static const size_t vmw_binding_vb_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
+};
+static const size_t vmw_binding_ib_offsets[] = {
+	offsetof(struct vmw_ctx_binding_state, index_buffer),
+};
+
+static const struct vmw_binding_info vmw_binding_infos[] = {
+	[vmw_ctx_binding_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_shader},
+	[vmw_ctx_binding_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_render_target},
+	[vmw_ctx_binding_tex] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_tex),
+		.offsets = vmw_binding_tex_offsets,
+		.scrub_func = vmw_binding_scrub_texture},
+	[vmw_ctx_binding_cb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_cb),
+		.offsets = vmw_binding_cb_offsets,
+		.scrub_func = vmw_binding_scrub_cb},
+	[vmw_ctx_binding_dx_shader] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_shader),
+		.offsets = vmw_binding_shader_offsets,
+		.scrub_func = vmw_binding_scrub_dx_shader},
+	[vmw_ctx_binding_dx_rt] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_rt_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_sr] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_sr_offsets,
+		.scrub_func = vmw_binding_scrub_sr},
+	[vmw_ctx_binding_ds] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_view),
+		.offsets = vmw_binding_dx_ds_offsets,
+		.scrub_func = vmw_binding_scrub_dx_rt},
+	[vmw_ctx_binding_so] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_so),
+		.offsets = vmw_binding_so_offsets,
+		.scrub_func = vmw_binding_scrub_so},
+	[vmw_ctx_binding_vb] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_vb),
+		.offsets = vmw_binding_vb_offsets,
+		.scrub_func = vmw_binding_scrub_vb},
+	[vmw_ctx_binding_ib] = {
+		.size = sizeof(struct vmw_ctx_bindinfo_ib),
+		.offsets = vmw_binding_ib_offsets,
+		.scrub_func = vmw_binding_scrub_ib},
+};
+
+/**
+ * vmw_cbs_context - Return a pointer to the context resource of a
+ * context binding state tracker.
+ *
+ * @cbs: The context binding state tracker.
+ *
+ * Provided there are any active bindings, this function will return an
+ * unreferenced pointer to the context resource that owns the context
+ * binding state tracker. If there are no active bindings, this function
+ * will return NULL. Note that the caller must somehow ensure that a reference
+ * is held on the context resource prior to calling this function.
+ */
+static const struct vmw_resource *
+vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
+{
+	if (list_empty(&cbs->list))
+		return NULL;
+
+	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
+				ctx_list)->ctx;
+}
+
+/**
+ * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
+ *
+ * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
+ * @bt: The binding type.
+ * @shader_slot: The shader slot of the binding. If none, then set to 0.
+ * @slot: The slot of the binding.
+ */
+static struct vmw_ctx_bindinfo *
+vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
+		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
+{
+	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
+	size_t offset = b->offsets[shader_slot] + b->size*slot;
+
+	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
+}
+
+/**
+ * vmw_binding_drop: Stop tracking a context binding
+ *
+ * @bi: Pointer to binding tracker storage.
+ *
+ * Stops tracking a context binding, and re-initializes its storage.
+ * Typically used when the context binding is replaced with a binding to
+ * another (or the same, for that matter) resource.
+ */
+static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
+{
+	list_del(&bi->ctx_list);
+	if (!list_empty(&bi->res_list))
+		list_del(&bi->res_list);
+	bi->ctx = NULL;
+}
+
+/**
+ * vmw_binding_add: Start tracking a context binding
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ * Starts tracking the binding in the context binding
+ * state structure @cbs.
+ */
+void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+		    const struct vmw_ctx_bindinfo *bi,
+		    u32 shader_slot, u32 slot)
+{
+	struct vmw_ctx_bindinfo *loc =
+		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
+	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
+
+	if (loc->ctx != NULL)
+		vmw_binding_drop(loc);
+
+	memcpy(loc, bi, b->size);
+	loc->scrubbed = false;
+	list_add(&loc->ctx_list, &cbs->list);
+	INIT_LIST_HEAD(&loc->res_list);
+}
+
+/**
+ * vmw_binding_transfer: Transfer a context binding tracking entry.
+ *
+ * @cbs: Pointer to the persistent context binding state tracker.
+ * @bi: Information about the binding to track.
+ *
+ */
+static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_binding_state *from,
+				 const struct vmw_ctx_bindinfo *bi)
+{
+	size_t offset = (unsigned long)bi - (unsigned long)from;
+	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
+		((unsigned long) cbs + offset);
+
+	if (loc->ctx != NULL) {
+		WARN_ON(bi->scrubbed);
+
+		vmw_binding_drop(loc);
+	}
+
+	if (bi->res != NULL) {
+		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
+		list_add_tail(&loc->ctx_list, &cbs->list);
+		list_add_tail(&loc->res_list, &loc->res->binding_head);
+	}
+}
+
+/**
+ * vmw_binding_state_kill - Kill all bindings associated with a
+ * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker. Then re-initializes the whole structure.
+ */
+void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_state_scrub(cbs);
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	(void) vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_res_list_kill - Kill all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Kills all bindings associated with a specific resource. Typically
+ * called before the resource is destroyed.
+ */
+void vmw_binding_res_list_kill(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	vmw_binding_res_list_scrub(head);
+	list_for_each_entry_safe(entry, next, head, res_list)
+		vmw_binding_drop(entry);
+}
+
+/**
+ * vmw_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_binding_res_list_scrub(struct list_head *head)
+{
+	struct vmw_ctx_bindinfo *entry;
+
+	list_for_each_entry(entry, head, res_list) {
+		if (!entry->scrubbed) {
+			(void) vmw_binding_infos[entry->bt].scrub_func
+				(entry, false);
+			entry->scrubbed = true;
+		}
+	}
+
+	list_for_each_entry(entry, head, res_list) {
+		struct vmw_ctx_binding_state *cbs =
+			vmw_context_binding_state(entry->ctx);
+
+		(void) vmw_binding_emit_dirty(cbs);
+	}
+}
+
+
+/**
+ * vmw_binding_state_commit - Commit staged binding info
+ *
+ * @ctx: Pointer to context to commit the staged binding info to.
+ * @from: Staged binding info built during execbuf.
+ * @scrubbed: Transfer only scrubbed bindings.
+ *
+ * Transfers binding info from a temporary structure
+ * (typically used by execbuf) to the persistent
+ * structure in the context. This can be done once commands have been
+ * submitted to hardware
+ */
+void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			      struct vmw_ctx_binding_state *from)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
+		vmw_binding_transfer(to, from, entry);
+		vmw_binding_drop(entry);
+	}
+}
+
+/**
+ * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry;
+	int ret;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (likely(!entry->scrubbed))
+			continue;
+
+		if ((entry->res == NULL || entry->res->id ==
+			    SVGA3D_INVALID_ID))
+			continue;
+
+		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		entry->scrubbed = false;
+	}
+
+	return vmw_binding_emit_dirty(cbs);
+}
+
+/**
+ * vmw_binding_scrub_shader - scrub a shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_render_target - scrub a render target binding
+ * from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for render target "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = bi->ctx->id;
+	cmd->body.type = binding->slot;
+	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	cmd->body.target.face = 0;
+	cmd->body.target.mipmap = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_texture - scrub a texture binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ *
+ * TODO: Possibly complement this function with a function that takes
+ * a list of texture bindings and combines them to a single command.
+ */
+static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
+				     bool rebind)
+{
+	struct vmw_ctx_bindinfo_tex *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct {
+			SVGA3dCmdSetTextureState c;
+			SVGA3dTextureState s1;
+		} body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for texture "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.c.cid = bi->ctx->id;
+	cmd->body.s1.stage = binding->texture_stage;
+	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
+	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_shader *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_cb *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.slot = binding->slot;
+	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
+	if (rebind) {
+		cmd->body.offsetInBytes = binding->offset;
+		cmd->body.sizeInBytes = binding->size;
+		cmd->body.sid = bi->res->id;
+	} else {
+		cmd->body.offsetInBytes = 0;
+		cmd->body.sizeInBytes = 0;
+		cmd->body.sid = SVGA3D_INVALID_ID;
+	}
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_collect_view_ids - Build view id data for a view binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of view id data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
+				 const struct vmw_ctx_bindinfo *bi,
+				 u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biv) {
+		if (!biv->bi.ctx)
+			break;
+
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+	}
+}
+
+/**
+ * vmw_collect_dirty_view_ids - Build view id data for a view binding command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of view id data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
+				       const struct vmw_ctx_bindinfo *bi,
+				       unsigned long *dirty,
+				       u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	unsigned long i, next_bit;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv) {
+		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
+			((!biv->bi.ctx || biv->bi.scrubbed) ?
+			 SVGA3D_INVALID_ID : biv->bi.res->id);
+
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
+			   int shader_slot)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->per_shader[shader_slot].shader_res[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_view_ids(cbs, loc,
+				   cbs->per_shader[shader_slot].dirty_sr,
+				   SVGA3D_DX_MAX_SRVIEWS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX shader"
+			  " resource binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
+	cmd->body.startView = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd;
+	size_t cmd_size, view_id_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
+	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
+	cmd_size = sizeof(*cmd) + view_id_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX render-target"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
+	cmd->header.size = sizeof(cmd->body) + view_id_size;
+
+	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
+		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
+	else
+		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
+ * without checking which bindings actually need to be emitted
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
+ * Stops at the first non-existing binding in the @bi array.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
+ * contains the command data.
+ */
+static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
+				   const struct vmw_ctx_bindinfo *bi,
+				   u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_so *biso =
+		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
+	unsigned long i;
+	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	cbs->bind_first_slot = 0;
+
+	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
+		    ++cbs->bind_cmd_count) {
+		if (!biso->bi.ctx)
+			break;
+
+		if (!biso->bi.scrubbed) {
+			so_buffer->sid = biso->bi.res->id;
+			so_buffer->offset = biso->offset;
+			so_buffer->sizeInBytes = biso->size;
+		} else {
+			so_buffer->sid = SVGA3D_INVALID_ID;
+			so_buffer->offset = 0;
+			so_buffer->sizeInBytes = 0;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_so - Issue delayed streamout binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ */
+static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+	} *cmd;
+	size_t cmd_size, so_target_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
+	cmd_size = sizeof(*cmd) + so_target_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX SO target"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
+	cmd->header.size = sizeof(cmd->body) + so_target_size;
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+
+	return 0;
+
+}
+
+/**
+ * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
+	u32 i;
+	int ret;
+
+	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
+		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
+			continue;
+
+		ret = vmw_emit_set_sr(cbs, i);
+		if (ret)
+			break;
+
+		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
+ * SVGA3dCmdDXSetVertexBuffers command
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @bi: Pointer to where the binding info array is stored in @cbs
+ * @dirty: Bitmap indicating which bindings need to be emitted.
+ * @max_num: Maximum number of entries in the @bi array.
+ *
+ * Scans the @bi array for bindings that need to be emitted and
+ * builds a buffer of SVGA3dVertexBuffer data.
+ * On output, @cbs->bind_cmd_count contains the number of bindings to be
+ * emitted, @cbs->bind_first_slot indicates the index of the first emitted
+ * binding, and @cbs->bind_cmd_buffer contains the command data.
+ */
+static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
+				  const struct vmw_ctx_bindinfo *bi,
+				  unsigned long *dirty,
+				  u32 max_num)
+{
+	const struct vmw_ctx_bindinfo_vb *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	unsigned long i, next_bit;
+	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
+
+	cbs->bind_cmd_count = 0;
+	i = find_first_bit(dirty, max_num);
+	next_bit = i;
+	cbs->bind_first_slot = i;
+
+	biv += i;
+	for (; i < max_num; ++i, ++biv, ++vbs) {
+		if (!biv->bi.ctx || biv->bi.scrubbed) {
+			vbs->sid = SVGA3D_INVALID_ID;
+			vbs->stride = 0;
+			vbs->offset = 0;
+		} else {
+			vbs->sid = biv->bi.res->id;
+			vbs->stride = biv->stride;
+			vbs->offset = biv->offset;
+		}
+		cbs->bind_cmd_count++;
+		if (next_bit == i) {
+			next_bit = find_next_bit(dirty, max_num, i + 1);
+			if (next_bit >= max_num)
+				break;
+		}
+	}
+}
+
+/**
+ * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ */
+static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
+{
+	const struct vmw_ctx_bindinfo *loc =
+		&cbs->vertex_buffers[0].bi;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+	} *cmd;
+	size_t cmd_size, set_vb_size;
+	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
+
+	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
+			     SVGA3D_DX_MAX_VERTEXBUFFERS);
+	if (cbs->bind_cmd_count == 0)
+		return 0;
+
+	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
+	cmd_size = sizeof(*cmd) + set_vb_size;
+	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
+			  " binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
+	cmd->header.size = sizeof(cmd->body) + set_vb_size;
+	cmd->body.startBuffer = cbs->bind_first_slot;
+
+	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
+
+	vmw_fifo_commit(ctx->dev_priv, cmd_size);
+	bitmap_clear(cbs->dirty_vb,
+		     cbs->bind_first_slot, cbs->bind_cmd_count);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_emit_dirty - Issue delayed binding commands
+ *
+ * @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ *
+ * This function issues the delayed binding commands that arise from
+ * previous scrub / unscrub calls. These binding commands are typically
+ * commands that batch a number of bindings and therefore it makes sense
+ * to delay them.
+ */
+static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
+{
+	int ret = 0;
+	unsigned long hit = 0;
+
+	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
+	      < VMW_BINDING_NUM_BITS) {
+
+		switch (hit) {
+		case VMW_BINDING_RT_BIT:
+			ret = vmw_emit_set_rt(cbs);
+			break;
+		case VMW_BINDING_PS_BIT:
+			ret = vmw_binding_emit_dirty_ps(cbs);
+			break;
+		case VMW_BINDING_SO_BIT:
+			ret = vmw_emit_set_so(cbs);
+			break;
+		case VMW_BINDING_VB_BIT:
+			ret = vmw_emit_set_vb(cbs);
+			break;
+		default:
+			BUG();
+		}
+		if (ret)
+			return ret;
+
+		__clear_bit(hit, &cbs->dirty);
+		hit++;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_view *biv =
+		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
+	__set_bit(VMW_BINDING_PS_SR_BIT,
+		  &cbs->per_shader[biv->shader_slot].dirty);
+	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
+ * scrub from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_vb *bivb =
+		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
+	struct vmw_ctx_binding_state *cbs =
+		vmw_context_binding_state(bi->ctx);
+
+	__set_bit(bivb->slot, cbs->dirty_vb);
+	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
+
+	return 0;
+}
+
+/**
+ * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
+ *
+ * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
+ */
+static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
+{
+	struct vmw_ctx_bindinfo_ib *binding =
+		container_of(bi, typeof(*binding), bi);
+	struct vmw_private *dev_priv = bi->ctx->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for DX index buffer "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
+	cmd->header.size = sizeof(cmd->body);
+	if (rebind) {
+		cmd->body.sid = bi->res->id;
+		cmd->body.format = binding->format;
+		cmd->body.offset = binding->offset;
+	} else {
+		cmd->body.sid = SVGA3D_INVALID_ID;
+		cmd->body.format = 0;
+		cmd->body.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
+ * memory accounting.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Returns a pointer to a newly allocated struct or an error pointer on error.
+ */
+struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv)
+{
+	struct vmw_ctx_binding_state *cbs;
+	int ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
+				   false, false);
+	if (ret)
+		return ERR_PTR(ret);
+
+	cbs = vzalloc(sizeof(*cbs));
+	if (!cbs) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+		return ERR_PTR(-ENOMEM);
+	}
+
+	cbs->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&cbs->list);
+
+	return cbs;
+}
+
+/**
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
+ * memory accounting info.
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
+ */
+void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_private *dev_priv = cbs->dev_priv;
+
+	vfree(cbs);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
+}
+
+/**
+ * vmw_binding_state_list - Get the binding list of a
+ * struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state
+ *
+ * Returns the binding list which can be used to traverse through the bindings
+ * and access the resource information of all bindings.
+ */
+struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
+{
+	return &cbs->list;
+}
+
+/**
+ * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ *
+ * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
+ *
+ * Drops all bindings registered in @cbs. No device binding actions are
+ * performed.
+ */
+void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_bindinfo *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
+		vmw_binding_drop(entry);
+}
+
+/*
+ * This function is unused at run-time, and only used to hold various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_binding_build_asserts(void)
+{
+	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
+	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
+	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
+	 * view id arrays.
+	 */
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
+	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
+
+	/*
+	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
+	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
+	 */
+	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
+		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
new file mode 100644
index 0000000..bf2e77a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -0,0 +1,209 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef _VMWGFX_BINDING_H_
+#define _VMWGFX_BINDING_H_
+
+#include "device_include/svga3d_reg.h"
+#include <linux/list.h>
+
+#define VMW_MAX_VIEW_BINDINGS 128
+
+struct vmw_private;
+struct vmw_ctx_binding_state;
+
+/*
+ * enum vmw_ctx_binding_type - abstract resource to context binding types
+ */
+enum vmw_ctx_binding_type {
+	vmw_ctx_binding_shader,
+	vmw_ctx_binding_rt,
+	vmw_ctx_binding_tex,
+	vmw_ctx_binding_cb,
+	vmw_ctx_binding_dx_shader,
+	vmw_ctx_binding_dx_rt,
+	vmw_ctx_binding_sr,
+	vmw_ctx_binding_ds,
+	vmw_ctx_binding_so,
+	vmw_ctx_binding_vb,
+	vmw_ctx_binding_ib,
+	vmw_ctx_binding_max
+};
+
+/**
+ * struct vmw_ctx_bindinfo - single binding metadata
+ *
+ * @ctx_list: List head for the context's list of bindings.
+ * @res_list: List head for a resource's list of bindings.
+ * @ctx: Non-refcounted pointer to the context that owns the binding. NULL
+ * indicates no binding present.
+ * @res: Non-refcounted pointer to the resource the binding points to. This
+ * is typically a surface or a view.
+ * @bt: Binding type.
+ * @scrubbed: Whether the binding has been scrubbed from the context.
+ */
+struct vmw_ctx_bindinfo {
+	struct list_head ctx_list;
+	struct list_head res_list;
+	struct vmw_resource *ctx;
+	struct vmw_resource *res;
+	enum vmw_ctx_binding_type bt;
+	bool scrubbed;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_tex - texture stage binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @texture_stage: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_tex {
+	struct vmw_ctx_bindinfo bi;
+	uint32 texture_stage;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_shader - Shader binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_shader {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_cb {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_view - View binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @shader_slot: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_view {
+	struct vmw_ctx_bindinfo bi;
+	SVGA3dShaderType shader_slot;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @size: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_so {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 size;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @stride: Device data used to reconstruct binding command.
+ * @slot: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_vb {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 stride;
+	uint32 slot;
+};
+
+/**
+ * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata
+ *
+ * @bi: struct vmw_ctx_bindinfo we derive from.
+ * @offset: Device data used to reconstruct binding command.
+ * @format: Device data used to reconstruct binding command.
+ */
+struct vmw_ctx_bindinfo_ib {
+	struct vmw_ctx_bindinfo bi;
+	uint32 offset;
+	uint32 format;
+};
+
+/**
+ * struct vmw_dx_shader_bindings - per shader type context binding state
+ *
+ * @shader: The shader binding for this shader type
+ * @const_buffer: Const buffer bindings for this shader type.
+ * @shader_res: Shader resource view bindings for this shader type.
+ * @dirty_sr: Bitmap tracking individual shader resource bindings changes
+ * that have not yet been emitted to the device.
+ * @dirty: Bitmap tracking per-binding type binding changes that have not
+ * yet been emitted to the device.
+ */
+struct vmw_dx_shader_bindings {
+	struct vmw_ctx_bindinfo_shader shader;
+	struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS];
+	struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS];
+	DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS);
+	unsigned long dirty;
+};
+
+extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
+			    const struct vmw_ctx_bindinfo *ci,
+			    u32 shader_slot, u32 slot);
+extern void
+vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
+			 struct vmw_ctx_binding_state *from);
+extern void vmw_binding_res_list_kill(struct list_head *head);
+extern void vmw_binding_res_list_scrub(struct list_head *head);
+extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
+extern struct vmw_ctx_binding_state *
+vmw_binding_state_alloc(struct vmw_private *dev_priv);
+extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
+extern struct list_head *
+vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
+extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index cff2bf9..3329f62 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -72,6 +72,12 @@
 	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
 };
 
+static struct ttm_place mob_ne_placement_flags = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
 struct ttm_placement vmw_vram_placement = {
 	.num_placement = 1,
 	.placement = &vram_placement_flags,
@@ -200,6 +206,13 @@
 	.busy_placement = &mob_placement_flags
 };
 
+struct ttm_placement vmw_mob_ne_placement = {
+	.num_placement = 1,
+	.num_busy_placement = 1,
+	.placement = &mob_ne_placement_flags,
+	.busy_placement = &mob_ne_placement_flags
+};
+
 struct vmw_ttm_tt {
 	struct ttm_dma_tt dma_ttm;
 	struct vmw_private *dev_priv;
@@ -804,9 +817,9 @@
 /**
  * vmw_move_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to move.
- * @mem:            The truct ttm_mem_reg indicating to what memory
- *                  region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
  *
  * Calls move_notify for all subsystems needing it.
  * (currently only resources).
@@ -815,13 +828,14 @@
 			    struct ttm_mem_reg *mem)
 {
 	vmw_resource_move_notify(bo, mem);
+	vmw_query_move_notify(bo, mem);
 }
 
 
 /**
  * vmw_swap_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to be swapped out.
+ * @bo: The TTM buffer object about to be swapped out.
  */
 static void vmw_swap_notify(struct ttm_buffer_object *bo)
 {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
new file mode 100644
index 0000000..5ae8f92
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -0,0 +1,1303 @@
+/**************************************************************************
+ *
+ * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_api.h"
+
+/*
+ * Size of inline command buffers. Try to make sure that a page size is a
+ * multiple of the DMA pool allocation size.
+ */
+#define VMW_CMDBUF_INLINE_ALIGN 64
+#define VMW_CMDBUF_INLINE_SIZE \
+	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+
+/**
+ * struct vmw_cmdbuf_context - Command buffer context queues
+ *
+ * @submitted: List of command buffers that have been submitted to the
+ * manager but not yet submitted to hardware.
+ * @hw_submitted: List of command buffers submitted to hardware.
+ * @preempted: List of preempted command buffers.
+ * @num_hw_submitted: Number of buffers currently being processed by hardware
+ */
+struct vmw_cmdbuf_context {
+	struct list_head submitted;
+	struct list_head hw_submitted;
+	struct list_head preempted;
+	unsigned num_hw_submitted;
+};
+
+/**
+ * struct vmw_cmdbuf_man: - Command buffer manager
+ *
+ * @cur_mutex: Mutex protecting the command buffer used for incremental small
+ * kernel command submissions, @cur.
+ * @space_mutex: Mutex to protect against starvation when we allocate
+ * main pool buffer space.
+ * @work: A struct work_struct implementeing command buffer error handling.
+ * Immutable.
+ * @dev_priv: Pointer to the device private struct. Immutable.
+ * @ctx: Array of command buffer context queues. The queues and the context
+ * data is protected by @lock.
+ * @error: List of command buffers that have caused device errors.
+ * Protected by @lock.
+ * @mm: Range manager for the command buffer space. Manager allocations and
+ * frees are protected by @lock.
+ * @cmd_space: Buffer object for the command buffer space, unless we were
+ * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
+ * @map_obj: Mapping state for @cmd_space. Immutable.
+ * @map: Pointer to command buffer space. May be a mapped buffer object or
+ * a contigous coherent DMA memory allocation. Immutable.
+ * @cur: Command buffer for small kernel command submissions. Protected by
+ * the @cur_mutex.
+ * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
+ * @default_size: Default size for the @cur command buffer. Immutable.
+ * @max_hw_submitted: Max number of in-flight command buffers the device can
+ * handle. Immutable.
+ * @lock: Spinlock protecting command submission queues.
+ * @header: Pool of DMA memory for device command buffer headers.
+ * Internal protection.
+ * @dheaders: Pool of DMA memory for device command buffer headers with trailing
+ * space for inline data. Internal protection.
+ * @tasklet: Tasklet struct for irq processing. Immutable.
+ * @alloc_queue: Wait queue for processes waiting to allocate command buffer
+ * space.
+ * @idle_queue: Wait queue for processes waiting for command buffer idle.
+ * @irq_on: Whether the process function has requested irq to be turned on.
+ * Protected by @lock.
+ * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
+ * allocation. Immutable.
+ * @has_pool: Has a large pool of DMA memory which allows larger allocations.
+ * Typically this is false only during bootstrap.
+ * @handle: DMA address handle for the command buffer space if @using_mob is
+ * false. Immutable.
+ * @size: The size of the command buffer space. Immutable.
+ */
+struct vmw_cmdbuf_man {
+	struct mutex cur_mutex;
+	struct mutex space_mutex;
+	struct work_struct work;
+	struct vmw_private *dev_priv;
+	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
+	struct list_head error;
+	struct drm_mm mm;
+	struct ttm_buffer_object *cmd_space;
+	struct ttm_bo_kmap_obj map_obj;
+	u8 *map;
+	struct vmw_cmdbuf_header *cur;
+	size_t cur_pos;
+	size_t default_size;
+	unsigned max_hw_submitted;
+	spinlock_t lock;
+	struct dma_pool *headers;
+	struct dma_pool *dheaders;
+	struct tasklet_struct tasklet;
+	wait_queue_head_t alloc_queue;
+	wait_queue_head_t idle_queue;
+	bool irq_on;
+	bool using_mob;
+	bool has_pool;
+	dma_addr_t handle;
+	size_t size;
+};
+
+/**
+ * struct vmw_cmdbuf_header - Command buffer metadata
+ *
+ * @man: The command buffer manager.
+ * @cb_header: Device command buffer header, allocated from a DMA pool.
+ * @cb_context: The device command buffer context.
+ * @list: List head for attaching to the manager lists.
+ * @node: The range manager node.
+ * @handle. The DMA address of @cb_header. Handed to the device on command
+ * buffer submission.
+ * @cmd: Pointer to the command buffer space of this buffer.
+ * @size: Size of the command buffer space of this buffer.
+ * @reserved: Reserved space of this buffer.
+ * @inline_space: Whether inline command buffer space is used.
+ */
+struct vmw_cmdbuf_header {
+	struct vmw_cmdbuf_man *man;
+	SVGACBHeader *cb_header;
+	SVGACBContext cb_context;
+	struct list_head list;
+	struct drm_mm_node node;
+	dma_addr_t handle;
+	u8 *cmd;
+	size_t size;
+	size_t reserved;
+	bool inline_space;
+};
+
+/**
+ * struct vmw_cmdbuf_dheader - Device command buffer header with inline
+ * command buffer space.
+ *
+ * @cb_header: Device command buffer header.
+ * @cmd: Inline command buffer space.
+ */
+struct vmw_cmdbuf_dheader {
+	SVGACBHeader cb_header;
+	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
+};
+
+/**
+ * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
+ *
+ * @page_size: Size of requested command buffer space in pages.
+ * @node: Pointer to the range manager node.
+ * @done: True if this allocation has succeeded.
+ */
+struct vmw_cmdbuf_alloc_info {
+	size_t page_size;
+	struct drm_mm_node *node;
+	bool done;
+};
+
+/* Loop over each context in the command buffer manager. */
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+	     ++(_i), ++(_ctx))
+
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
+
+
+/**
+ * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
+ *
+ * @man: The range manager.
+ * @interruptible: Whether to wait interruptible when locking.
+ */
+static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
+{
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->cur_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->cur_mutex);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
+ *
+ * @man: The range manager.
+ */
+static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
+{
+	mutex_unlock(&man->cur_mutex);
+}
+
+/**
+ * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
+ * been used for the device context with inline command buffers.
+ * Need not be called locked.
+ *
+ * @header: Pointer to the header to free.
+ */
+static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+
+	if (WARN_ON_ONCE(!header->inline_space))
+		return;
+
+	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
+			       cb_header);
+	dma_pool_free(header->man->dheaders, dheader, header->handle);
+	kfree(header);
+}
+
+/**
+ * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * header: Pointer to the header to free.
+ *
+ * For internal use. Must be called with man::lock held.
+ */
+static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	BUG_ON(!spin_is_locked(&man->lock));
+
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+
+	drm_mm_remove_node(&header->node);
+	wake_up_all(&man->alloc_queue);
+	if (header->cb_header)
+		dma_pool_free(man->headers, header->cb_header,
+			      header->handle);
+	kfree(header);
+}
+
+/**
+ * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
+ * associated structures.
+ *
+ * @header: Pointer to the header to free.
+ */
+void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+
+	/* Avoid locking if inline_space */
+	if (header->inline_space) {
+		vmw_cmdbuf_header_inline_free(header);
+		return;
+	}
+	spin_lock_bh(&man->lock);
+	__vmw_cmdbuf_header_free(header);
+	spin_unlock_bh(&man->lock);
+}
+
+
+/**
+ * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ *
+ * @header: The header of the buffer to submit.
+ */
+static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
+{
+	struct vmw_cmdbuf_man *man = header->man;
+	u32 val;
+
+	if (sizeof(header->handle) > 4)
+		val = (header->handle >> 32);
+	else
+		val = 0;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
+
+	val = (header->handle & 0xFFFFFFFFULL);
+	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
+	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
+
+	return header->cb_header->status;
+}
+
+/**
+ * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
+ *
+ * @ctx: The command buffer context to initialize
+ */
+static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
+{
+	INIT_LIST_HEAD(&ctx->hw_submitted);
+	INIT_LIST_HEAD(&ctx->submitted);
+	INIT_LIST_HEAD(&ctx->preempted);
+	ctx->num_hw_submitted = 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submits command buffers to hardware until there are no more command
+ * buffers to submit or the hardware can't handle more command buffers.
+ */
+static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
+				  struct vmw_cmdbuf_context *ctx)
+{
+	while (ctx->num_hw_submitted < man->max_hw_submitted &&
+	      !list_empty(&ctx->submitted)) {
+		struct vmw_cmdbuf_header *entry;
+		SVGACBStatus status;
+
+		entry = list_first_entry(&ctx->submitted,
+					 struct vmw_cmdbuf_header,
+					 list);
+
+		status = vmw_cmdbuf_header_submit(entry);
+
+		/* This should never happen */
+		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
+			entry->cb_header->status = SVGA_CB_STATUS_NONE;
+			break;
+		}
+
+		list_del(&entry->list);
+		list_add_tail(&entry->list, &ctx->hw_submitted);
+		ctx->num_hw_submitted++;
+	}
+
+}
+
+/**
+ * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ *
+ * @man: The command buffer manager.
+ * @ctx: The command buffer context.
+ *
+ * Submit command buffers to hardware if possible, and process finished
+ * buffers. Typically freeing them, but on preemption or error take
+ * appropriate action. Wake up waiters if appropriate.
+ */
+static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_context *ctx,
+				   int *notempty)
+{
+	struct vmw_cmdbuf_header *entry, *next;
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+
+	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
+		SVGACBStatus status = entry->cb_header->status;
+
+		if (status == SVGA_CB_STATUS_NONE)
+			break;
+
+		list_del(&entry->list);
+		wake_up_all(&man->idle_queue);
+		ctx->num_hw_submitted--;
+		switch (status) {
+		case SVGA_CB_STATUS_COMPLETED:
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		case SVGA_CB_STATUS_COMMAND_ERROR:
+		case SVGA_CB_STATUS_CB_HEADER_ERROR:
+			list_add_tail(&entry->list, &man->error);
+			schedule_work(&man->work);
+			break;
+		case SVGA_CB_STATUS_PREEMPTED:
+			list_add(&entry->list, &ctx->preempted);
+			break;
+		default:
+			WARN_ONCE(true, "Undefined command buffer status.\n");
+			__vmw_cmdbuf_header_free(entry);
+			break;
+		}
+	}
+
+	vmw_cmdbuf_ctx_submit(man, ctx);
+	if (!list_empty(&ctx->submitted))
+		(*notempty)++;
+}
+
+/**
+ * vmw_cmdbuf_man_process - Process all command buffer contexts and
+ * switch on and off irqs as appropriate.
+ *
+ * @man: The command buffer manager.
+ *
+ * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
+ * command buffers left that are not submitted to hardware, Make sure
+ * IRQ handling is turned on. Otherwise, make sure it's turned off. This
+ * function may return -EAGAIN to indicate it should be rerun due to
+ * possibly missed IRQs if IRQs has just been turned on.
+ */
+static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
+{
+	int notempty = 0;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
+
+	if (man->irq_on && !notempty) {
+		vmw_generic_waiter_remove(man->dev_priv,
+					  SVGA_IRQFLAG_COMMAND_BUFFER,
+					  &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = false;
+	} else if (!man->irq_on && notempty) {
+		vmw_generic_waiter_add(man->dev_priv,
+				       SVGA_IRQFLAG_COMMAND_BUFFER,
+				       &man->dev_priv->cmdbuf_waiters);
+		man->irq_on = true;
+
+		/* Rerun in case we just missed an irq. */
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
+ * command buffer context
+ *
+ * @man: The command buffer manager.
+ * @header: The header of the buffer to submit.
+ * @cb_context: The command buffer context to use.
+ *
+ * This function adds @header to the "submitted" queue of the command
+ * buffer context identified by @cb_context. It then calls the command buffer
+ * manager processing to potentially submit the buffer to hardware.
+ * @man->lock needs to be held when calling this function.
+ */
+static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
+			       struct vmw_cmdbuf_header *header,
+			       SVGACBContext cb_context)
+{
+	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
+		header->cb_header->dxContext = 0;
+	header->cb_context = cb_context;
+	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
+
+	if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+		vmw_cmdbuf_man_process(man);
+}
+
+/**
+ * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
+ * handler implemented as a tasklet.
+ *
+ * @data: Tasklet closure. A pointer to the command buffer manager cast to
+ * an unsigned long.
+ *
+ * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * command buffer processor to free finished buffers and submit any
+ * queued buffers to hardware.
+ */
+static void vmw_cmdbuf_man_tasklet(unsigned long data)
+{
+	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
+
+	spin_lock(&man->lock);
+	if (vmw_cmdbuf_man_process(man) == -EAGAIN)
+		(void) vmw_cmdbuf_man_process(man);
+	spin_unlock(&man->lock);
+}
+
+/**
+ * vmw_cmdbuf_work_func - The deferred work function that handles
+ * command buffer errors.
+ *
+ * @work: The work func closure argument.
+ *
+ * Restarting the command buffer context after an error requires process
+ * context, so it is deferred to this work function.
+ */
+static void vmw_cmdbuf_work_func(struct work_struct *work)
+{
+	struct vmw_cmdbuf_man *man =
+		container_of(work, struct vmw_cmdbuf_man, work);
+	struct vmw_cmdbuf_header *entry, *next;
+	bool restart = false;
+
+	spin_lock_bh(&man->lock);
+	list_for_each_entry_safe(entry, next, &man->error, list) {
+		restart = true;
+		DRM_ERROR("Command buffer error.\n");
+
+		list_del(&entry->list);
+		__vmw_cmdbuf_header_free(entry);
+		wake_up_all(&man->idle_queue);
+	}
+	spin_unlock_bh(&man->lock);
+
+	if (restart && vmw_cmdbuf_startstop(man, true))
+		DRM_ERROR("Failed restarting command buffer context 0.\n");
+
+}
+
+/**
+ * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ *
+ * @man: The command buffer manager.
+ * @check_preempted: Check also the preempted queue for pending command buffers.
+ *
+ */
+static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
+				bool check_preempted)
+{
+	struct vmw_cmdbuf_context *ctx;
+	bool idle = false;
+	int i;
+
+	spin_lock_bh(&man->lock);
+	vmw_cmdbuf_man_process(man);
+	for_each_cmdbuf_ctx(man, i, ctx) {
+		if (!list_empty(&ctx->submitted) ||
+		    !list_empty(&ctx->hw_submitted) ||
+		    (check_preempted && !list_empty(&ctx->preempted)))
+			goto out_unlock;
+	}
+
+	idle = list_empty(&man->error);
+
+out_unlock:
+	spin_unlock_bh(&man->lock);
+
+	return idle;
+}
+
+/**
+ * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed. Call with @man->cur_mutex held.
+ */
+static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+	if (!cur)
+		return;
+
+	spin_lock_bh(&man->lock);
+	if (man->cur_pos == 0) {
+		__vmw_cmdbuf_header_free(cur);
+		goto out_unlock;
+	}
+
+	man->cur->cb_header->length = man->cur_pos;
+	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
+out_unlock:
+	spin_unlock_bh(&man->lock);
+	man->cur = NULL;
+	man->cur_pos = 0;
+}
+
+/**
+ * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
+ * command submissions
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Whether to sleep interruptible when sleeping.
+ *
+ * Flushes the current command buffer without allocating a new one. A new one
+ * is automatically allocated when needed.
+ */
+int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+			 bool interruptible)
+{
+	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
+
+	if (ret)
+		return ret;
+
+	__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_idle - Wait for command buffer manager idle.
+ *
+ * @man: The command buffer manager.
+ * @interruptible: Sleep interruptible while waiting.
+ * @timeout: Time out after this many ticks.
+ *
+ * Wait until the command buffer manager has processed all command buffers,
+ * or until a timeout occurs. If a timeout occurs, the function will return
+ * -EBUSY.
+ */
+int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+		    unsigned long timeout)
+{
+	int ret;
+
+	ret = vmw_cmdbuf_cur_flush(man, interruptible);
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		ret = wait_event_interruptible_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	} else {
+		ret = wait_event_timeout
+			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
+			 timeout);
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+	if (ret == 0) {
+		if (!vmw_cmdbuf_man_idle(man, true))
+			ret = -EBUSY;
+		else
+			ret = 0;
+	}
+	if (ret > 0)
+		ret = 0;
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @info: Allocation info. Will hold the size on entry and allocated mm node
+ * on successful return.
+ *
+ * Try to allocate buffer space from the main pool. Returns true if succeeded.
+ * If a fatal error was hit, the error code is returned in @info->ret.
+ */
+static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_alloc_info *info)
+{
+	int ret;
+
+	if (info->done)
+		return true;
+ 
+	memset(info->node, 0, sizeof(*info->node));
+	spin_lock_bh(&man->lock);
+	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
+					 0, 0,
+					 DRM_MM_SEARCH_DEFAULT,
+					 DRM_MM_CREATE_DEFAULT);
+	spin_unlock_bh(&man->lock);
+	info->done = !ret;
+
+	return info->done;
+}
+
+/**
+ * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @node: Pointer to pre-allocated range-manager node.
+ * @size: The size of the allocation.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * This function allocates buffer space from the main pool, and if there is
+ * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
+ * become available.
+ */
+static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
+				  struct drm_mm_node *node,
+				  size_t size,
+				  bool interruptible)
+{
+	struct vmw_cmdbuf_alloc_info info;
+
+	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	info.node = node;
+	info.done = false;
+
+	/*
+	 * To prevent starvation of large requests, only one allocating call
+	 * at a time waiting for space.
+	 */
+	if (interruptible) {
+		if (mutex_lock_interruptible(&man->space_mutex))
+			return -ERESTARTSYS;
+	} else {
+		mutex_lock(&man->space_mutex);
+	}
+
+	/* Try to allocate space without waiting. */
+	if (vmw_cmdbuf_try_alloc(man, &info))
+		goto out_unlock;
+
+	vmw_generic_waiter_add(man->dev_priv,
+			       SVGA_IRQFLAG_COMMAND_BUFFER,
+			       &man->dev_priv->cmdbuf_waiters);
+
+	if (interruptible) {
+		int ret;
+
+		ret = wait_event_interruptible
+			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+		if (ret) {
+			vmw_generic_waiter_remove
+				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+				 &man->dev_priv->cmdbuf_waiters);
+			mutex_unlock(&man->space_mutex);
+			return ret;
+		}
+	} else {
+		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
+	}
+	vmw_generic_waiter_remove(man->dev_priv,
+				  SVGA_IRQFLAG_COMMAND_BUFFER,
+				  &man->dev_priv->cmdbuf_waiters);
+
+out_unlock:
+	mutex_unlock(&man->space_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
+ * space from the main pool.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ */
+static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
+				 struct vmw_cmdbuf_header *header,
+				 size_t size,
+				 bool interruptible)
+{
+	SVGACBHeader *cb_hdr;
+	size_t offset;
+	int ret;
+
+	if (!man->has_pool)
+		return -ENOMEM;
+
+	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
+
+	if (ret)
+		return ret;
+
+	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
+					   &header->handle);
+	if (!header->cb_header) {
+		ret = -ENOMEM;
+		goto out_no_cb_header;
+	}
+
+	header->size = header->node.size << PAGE_SHIFT;
+	cb_hdr = header->cb_header;
+	offset = header->node.start << PAGE_SHIFT;
+	header->cmd = man->map + offset;
+	memset(cb_hdr, 0, sizeof(*cb_hdr));
+	if (man->using_mob) {
+		cb_hdr->flags = SVGA_CB_FLAG_MOB;
+		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
+		cb_hdr->ptr.mob.mobOffset = offset;
+	} else {
+		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+	}
+
+	return 0;
+
+out_no_cb_header:
+	spin_lock_bh(&man->lock);
+	drm_mm_remove_node(&header->node);
+	spin_unlock_bh(&man->lock);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_space_inline - Set up a command buffer header with
+ * inline command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @header: Pointer to the header to set up.
+ * @size: The requested size of the buffer space.
+ */
+static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
+				   struct vmw_cmdbuf_header *header,
+				   int size)
+{
+	struct vmw_cmdbuf_dheader *dheader;
+	SVGACBHeader *cb_hdr;
+
+	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
+		return -ENOMEM;
+
+	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
+				 &header->handle);
+	if (!dheader)
+		return -ENOMEM;
+
+	header->inline_space = true;
+	header->size = VMW_CMDBUF_INLINE_SIZE;
+	cb_hdr = &dheader->cb_header;
+	header->cb_header = cb_hdr;
+	header->cmd = dheader->cmd;
+	memset(dheader, 0, sizeof(*dheader));
+	cb_hdr->status = SVGA_CB_STATUS_NONE;
+	cb_hdr->flags = SVGA_CB_FLAG_NONE;
+	cb_hdr->ptr.pa = (u64)header->handle +
+		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
+ * command buffer space.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the buffer space.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @p_header: points to a header pointer to populate on successful return.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer. The header pointer returned in @p_header should
+ * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
+ */
+void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+		       size_t size, bool interruptible,
+		       struct vmw_cmdbuf_header **p_header)
+{
+	struct vmw_cmdbuf_header *header;
+	int ret = 0;
+
+	*p_header = NULL;
+
+	header = kzalloc(sizeof(*header), GFP_KERNEL);
+	if (!header)
+		return ERR_PTR(-ENOMEM);
+
+	if (size <= VMW_CMDBUF_INLINE_SIZE)
+		ret = vmw_cmdbuf_space_inline(man, header, size);
+	else
+		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
+
+	if (ret) {
+		kfree(header);
+		return ERR_PTR(ret);
+	}
+
+	header->man = man;
+	INIT_LIST_HEAD(&header->list);
+	header->cb_header->status = SVGA_CB_STATUS_NONE;
+	*p_header = header;
+
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
+ * command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
+				    size_t size,
+				    int ctx_id,
+				    bool interruptible)
+{
+	struct vmw_cmdbuf_header *cur;
+	void *ret;
+
+	if (vmw_cmdbuf_cur_lock(man, interruptible))
+		return ERR_PTR(-ERESTARTSYS);
+
+	cur = man->cur;
+	if (cur && (size + man->cur_pos > cur->size ||
+		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
+		     ctx_id != cur->cb_header->dxContext)))
+		__vmw_cmdbuf_cur_flush(man);
+
+	if (!man->cur) {
+		ret = vmw_cmdbuf_alloc(man,
+				       max_t(size_t, size, man->default_size),
+				       interruptible, &man->cur);
+		if (IS_ERR(ret)) {
+			vmw_cmdbuf_cur_unlock(man);
+			return ret;
+		}
+
+		cur = man->cur;
+	}
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		cur->cb_header->dxContext = ctx_id;
+	}
+
+	cur->reserved = size;
+
+	return (void *) (man->cur->cmd + man->cur_pos);
+}
+
+/**
+ * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
+				  size_t size, bool flush)
+{
+	struct vmw_cmdbuf_header *cur = man->cur;
+
+	WARN_ON(!mutex_is_locked(&man->cur_mutex));
+
+	WARN_ON(size > cur->reserved);
+	man->cur_pos += size;
+	if (!size)
+		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The requested size of the commands.
+ * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
+ * @interruptible: Whether to sleep interruptible while waiting for space.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ *
+ * Returns a pointer to command buffer space if successful. Otherwise
+ * returns an error pointer.
+ */
+void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+			 int ctx_id, bool interruptible,
+			 struct vmw_cmdbuf_header *header)
+{
+	if (!header)
+		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
+
+	if (size > header->size)
+		return ERR_PTR(-EINVAL);
+
+	if (ctx_id != SVGA3D_INVALID_ID) {
+		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
+		header->cb_header->dxContext = ctx_id;
+	}
+
+	header->reserved = size;
+	return header->cmd;
+}
+
+/**
+ * vmw_cmdbuf_commit - Commit commands in a command buffer.
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the commands actually written.
+ * @header: Header of the command buffer. NULL if the current command buffer
+ * should be used.
+ * @flush: Whether to flush the command buffer immediately.
+ */
+void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+		       struct vmw_cmdbuf_header *header, bool flush)
+{
+	if (!header) {
+		vmw_cmdbuf_commit_cur(man, size, flush);
+		return;
+	}
+
+	(void) vmw_cmdbuf_cur_lock(man, false);
+	__vmw_cmdbuf_cur_flush(man);
+	WARN_ON(size > header->reserved);
+	man->cur = header;
+	man->cur_pos = size;
+	if (!size)
+		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
+	if (flush)
+		__vmw_cmdbuf_cur_flush(man);
+	vmw_cmdbuf_cur_unlock(man);
+}
+
+/**
+ * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
+ *
+ * @man: The command buffer manager.
+ */
+void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
+{
+	if (!man)
+		return;
+
+	tasklet_schedule(&man->tasklet);
+}
+
+/**
+ * vmw_cmdbuf_send_device_command - Send a command through the device context.
+ *
+ * @man: The command buffer manager.
+ * @command: Pointer to the command to send.
+ * @size: Size of the command.
+ *
+ * Synchronously sends a device context command.
+ */
+static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
+					  const void *command,
+					  size_t size)
+{
+	struct vmw_cmdbuf_header *header;
+	int status;
+	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
+
+	if (IS_ERR(cmd))
+		return PTR_ERR(cmd);
+
+	memcpy(cmd, command, size);
+	header->cb_header->length = size;
+	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
+	spin_lock_bh(&man->lock);
+	status = vmw_cmdbuf_header_submit(header);
+	spin_unlock_bh(&man->lock);
+	vmw_cmdbuf_header_free(header);
+
+	if (status != SVGA_CB_STATUS_COMPLETED) {
+		DRM_ERROR("Device context command failed with status %d\n",
+			  status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmdbuf_startstop - Send a start / stop command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ * @enable: Whether to enable or disable the context.
+ *
+ * Synchronously sends a device start / stop context command.
+ */
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+				bool enable)
+{
+	struct {
+		uint32 id;
+		SVGADCCmdStartStop body;
+	} __packed cmd;
+
+	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
+	cmd.body.enable = (enable) ? 1 : 0;
+	cmd.body.context = SVGA_CB_CONTEXT_0;
+
+	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+/**
+ * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
+ *
+ * @man: The command buffer manager.
+ * @size: The size of the main space pool.
+ * @default_size: The default size of the command buffer for small kernel
+ * submissions.
+ *
+ * Set the size and allocate the main command buffer space pool,
+ * as well as the default size of the command buffer for
+ * small kernel submissions. If successful, this enables large command
+ * submissions. Note that this function requires that rudimentary command
+ * submission is already available and that the MOB memory manager is alive.
+ * Returns 0 on success. Negative error code on failure.
+ */
+int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+			     size_t size, size_t default_size)
+{
+	struct vmw_private *dev_priv = man->dev_priv;
+	bool dummy;
+	int ret;
+
+	if (man->has_pool)
+		return -EINVAL;
+
+	/* First, try to allocate a huge chunk of DMA memory */
+	size = PAGE_ALIGN(size);
+	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
+				      &man->handle, GFP_KERNEL);
+	if (man->map) {
+		man->using_mob = false;
+	} else {
+		/*
+		 * DMA memory failed. If we can have command buffers in a
+		 * MOB, try to use that instead. Note that this will
+		 * actually call into the already enabled manager, when
+		 * binding the MOB.
+		 */
+		if (!(dev_priv->capabilities & SVGA_CAP_DX))
+			return -ENOMEM;
+
+		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
+				    &vmw_mob_ne_placement, 0, false, NULL,
+				    &man->cmd_space);
+		if (ret)
+			return ret;
+
+		man->using_mob = true;
+		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
+				  &man->map_obj);
+		if (ret)
+			goto out_no_map;
+
+		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+	}
+
+	man->size = size;
+	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
+
+	man->has_pool = true;
+	man->default_size = default_size;
+	DRM_INFO("Using command buffers with %s pool.\n",
+		 (man->using_mob) ? "MOB" : "DMA");
+
+	return 0;
+
+out_no_map:
+	if (man->using_mob)
+		ttm_bo_unref(&man->cmd_space);
+
+	return ret;
+}
+
+/**
+ * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
+ * inline command buffer submissions only.
+ *
+ * @dev_priv: Pointer to device private structure.
+ *
+ * Returns a pointer to a cummand buffer manager to success or error pointer
+ * on failure. The command buffer manager will be enabled for submissions of
+ * size VMW_CMDBUF_INLINE_SIZE only.
+ */
+struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_cmdbuf_man *man;
+	struct vmw_cmdbuf_context *ctx;
+	int i;
+	int ret;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
+		return ERR_PTR(-ENOSYS);
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+	if (!man)
+		return ERR_PTR(-ENOMEM);
+
+	man->headers = dma_pool_create("vmwgfx cmdbuf",
+				       &dev_priv->dev->pdev->dev,
+				       sizeof(SVGACBHeader),
+				       64, PAGE_SIZE);
+	if (!man->headers) {
+		ret = -ENOMEM;
+		goto out_no_pool;
+	}
+
+	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
+					&dev_priv->dev->pdev->dev,
+					sizeof(struct vmw_cmdbuf_dheader),
+					64, PAGE_SIZE);
+	if (!man->dheaders) {
+		ret = -ENOMEM;
+		goto out_no_dpool;
+	}
+
+	for_each_cmdbuf_ctx(man, i, ctx)
+		vmw_cmdbuf_ctx_init(ctx);
+
+	INIT_LIST_HEAD(&man->error);
+	spin_lock_init(&man->lock);
+	mutex_init(&man->cur_mutex);
+	mutex_init(&man->space_mutex);
+	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
+		     (unsigned long) man);
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	init_waitqueue_head(&man->alloc_queue);
+	init_waitqueue_head(&man->idle_queue);
+	man->dev_priv = dev_priv;
+	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
+	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
+	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
+			       &dev_priv->error_waiters);
+	ret = vmw_cmdbuf_startstop(man, true);
+	if (ret) {
+		DRM_ERROR("Failed starting command buffer context 0.\n");
+		vmw_cmdbuf_man_destroy(man);
+		return ERR_PTR(ret);
+	}
+
+	return man;
+
+out_no_dpool:
+	dma_pool_destroy(man->headers);
+out_no_pool:
+	kfree(man);
+
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function removes the main buffer space pool, and should be called
+ * before MOB memory management is removed. When this function has been called,
+ * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
+ * less are allowed, and the default size of the command buffer for small kernel
+ * submissions is also set to this size.
+ */
+void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
+{
+	if (!man->has_pool)
+		return;
+
+	man->has_pool = false;
+	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (man->using_mob) {
+		(void) ttm_bo_kunmap(&man->map_obj);
+		ttm_bo_unref(&man->cmd_space);
+	} else {
+		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
+				  man->size, man->map, man->handle);
+	}
+}
+
+/**
+ * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+ *
+ * @man: Pointer to a command buffer manager.
+ *
+ * This function idles and then destroys a command buffer manager.
+ */
+void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
+{
+	WARN_ON_ONCE(man->has_pool);
+	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
+	if (vmw_cmdbuf_startstop(man, false))
+		DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
+				  &man->dev_priv->error_waiters);
+	tasklet_kill(&man->tasklet);
+	(void) cancel_work_sync(&man->work);
+	dma_pool_destroy(man->dheaders);
+	dma_pool_destroy(man->headers);
+	mutex_destroy(&man->cur_mutex);
+	mutex_destroy(&man->space_mutex);
+	kfree(man);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 21e9b7f..13db8a2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,15 +26,10 @@
  **************************************************************************/
 
 #include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
 
 #define VMW_CMDBUF_RES_MAN_HT_ORDER 12
 
-enum vmw_cmdbuf_res_state {
-	VMW_CMDBUF_RES_COMMITED,
-	VMW_CMDBUF_RES_ADD,
-	VMW_CMDBUF_RES_DEL
-};
-
 /**
  * struct vmw_cmdbuf_res - Command buffer managed resource entry.
  *
@@ -132,9 +127,12 @@
 
 	list_for_each_entry_safe(entry, next, list, head) {
 		list_del(&entry->head);
+		if (entry->res->func->commit_notify)
+			entry->res->func->commit_notify(entry->res,
+							entry->state);
 		switch (entry->state) {
 		case VMW_CMDBUF_RES_ADD:
-			entry->state = VMW_CMDBUF_RES_COMMITED;
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
 			list_add_tail(&entry->head, &entry->man->list);
 			break;
 		case VMW_CMDBUF_RES_DEL:
@@ -175,7 +173,7 @@
 						 &entry->hash);
 			list_del(&entry->head);
 			list_add_tail(&entry->head, &entry->man->list);
-			entry->state = VMW_CMDBUF_RES_COMMITED;
+			entry->state = VMW_CMDBUF_RES_COMMITTED;
 			break;
 		default:
 			BUG();
@@ -231,6 +229,9 @@
  * @res_type: The resource type.
  * @user_key: The user-space id of the resource.
  * @list: The staging list.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
  *
  * This function looks up the struct vmw_cmdbuf_res entry from the manager
  * hash table and, if it exists, removes it. Depending on its current staging
@@ -240,7 +241,8 @@
 int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 			  enum vmw_cmdbuf_res_type res_type,
 			  u32 user_key,
-			  struct list_head *list)
+			  struct list_head *list,
+			  struct vmw_resource **res_p)
 {
 	struct vmw_cmdbuf_res *entry;
 	struct drm_hash_item *hash;
@@ -256,12 +258,14 @@
 	switch (entry->state) {
 	case VMW_CMDBUF_RES_ADD:
 		vmw_cmdbuf_res_free(man, entry);
+		*res_p = NULL;
 		break;
-	case VMW_CMDBUF_RES_COMMITED:
+	case VMW_CMDBUF_RES_COMMITTED:
 		(void) drm_ht_remove_item(&man->resources, &entry->hash);
 		list_del(&entry->head);
 		entry->state = VMW_CMDBUF_RES_DEL;
 		list_add_tail(&entry->head, list);
+		*res_p = entry->res;
 		break;
 	default:
 		BUG();
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 5ac9287..443d1ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,19 +27,19 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_user_context {
 	struct ttm_base_object base;
 	struct vmw_resource res;
-	struct vmw_ctx_binding_state cbs;
+	struct vmw_ctx_binding_state *cbs;
 	struct vmw_cmdbuf_res_manager *man;
+	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+	spinlock_t cotable_lock;
+	struct vmw_dma_buffer *dx_query_mob;
 };
 
-
-
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_context_base_to_res(struct ttm_base_object *base);
@@ -51,12 +51,14 @@
 				 bool readback,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+static int vmw_dx_context_create(struct vmw_resource *res);
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_dx_context_destroy(struct vmw_resource *res);
+
 static uint64_t vmw_user_context_size;
 
 static const struct vmw_user_resource_conv user_context_conv = {
@@ -93,15 +95,38 @@
 	.unbind = vmw_gb_context_unbind
 };
 
-static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
-	[vmw_ctx_binding_shader] = vmw_context_scrub_shader,
-	[vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
-	[vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+static const struct vmw_res_func vmw_dx_context_func = {
+	.res_type = vmw_res_dx_context,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "dx contexts",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_context_create,
+	.destroy = vmw_dx_context_destroy,
+	.bind = vmw_dx_context_bind,
+	.unbind = vmw_dx_context_unbind
+};
 
 /**
  * Context management:
  */
 
+static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+{
+	struct vmw_resource *res;
+	int i;
+
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[i];
+		uctx->cotables[i] = NULL;
+		spin_unlock(&uctx->cotable_lock);
+
+		if (res)
+			vmw_resource_unreference(&res);
+	}
+}
+
 static void vmw_hw_context_destroy(struct vmw_resource *res)
 {
 	struct vmw_user_context *uctx =
@@ -113,17 +138,19 @@
 	} *cmd;
 
 
-	if (res->func->destroy == vmw_gb_context_destroy) {
+	if (res->func->destroy == vmw_gb_context_destroy ||
+	    res->func->destroy == vmw_dx_context_destroy) {
 		mutex_lock(&dev_priv->cmdbuf_mutex);
 		vmw_cmdbuf_res_man_destroy(uctx->man);
 		mutex_lock(&dev_priv->binding_mutex);
-		(void) vmw_context_binding_state_kill(&uctx->cbs);
-		(void) vmw_gb_context_destroy(res);
+		vmw_binding_state_kill(uctx->cbs);
+		(void) res->func->destroy(res);
 		mutex_unlock(&dev_priv->binding_mutex);
 		if (dev_priv->pinned_bo != NULL &&
 		    !dev_priv->query_cid_valid)
 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
+		vmw_context_cotables_unref(uctx);
 		return;
 	}
 
@@ -135,43 +162,67 @@
 		return;
 	}
 
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 static int vmw_gb_context_init(struct vmw_private *dev_priv,
+			       bool dx,
 			       struct vmw_resource *res,
-			       void (*res_free) (struct vmw_resource *res))
+			       void (*res_free)(struct vmw_resource *res))
 {
-	int ret;
+	int ret, i;
 	struct vmw_user_context *uctx =
 		container_of(res, struct vmw_user_context, res);
 
+	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+			    SVGA3D_CONTEXT_DATA_SIZE);
 	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_context_func);
-	res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+				res_free,
+				dx ? &vmw_dx_context_func :
+				&vmw_gb_context_func);
 	if (unlikely(ret != 0))
 		goto out_err;
 
 	if (dev_priv->has_mob) {
 		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
-		if (unlikely(IS_ERR(uctx->man))) {
+		if (IS_ERR(uctx->man)) {
 			ret = PTR_ERR(uctx->man);
 			uctx->man = NULL;
 			goto out_err;
 		}
 	}
 
-	memset(&uctx->cbs, 0, sizeof(uctx->cbs));
-	INIT_LIST_HEAD(&uctx->cbs.list);
+	uctx->cbs = vmw_binding_state_alloc(dev_priv);
+	if (IS_ERR(uctx->cbs)) {
+		ret = PTR_ERR(uctx->cbs);
+		goto out_err;
+	}
+
+	spin_lock_init(&uctx->cotable_lock);
+
+	if (dx) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+							      &uctx->res, i);
+			if (unlikely(uctx->cotables[i] == NULL)) {
+				ret = -ENOMEM;
+				goto out_cotables;
+			}
+		}
+	}
+
+
 
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
+out_cotables:
+	vmw_context_cotables_unref(uctx);
 out_err:
 	if (res_free)
 		res_free(res);
@@ -182,7 +233,8 @@
 
 static int vmw_context_init(struct vmw_private *dev_priv,
 			    struct vmw_resource *res,
-			    void (*res_free) (struct vmw_resource *res))
+			    void (*res_free)(struct vmw_resource *res),
+			    bool dx)
 {
 	int ret;
 
@@ -192,7 +244,7 @@
 	} *cmd;
 
 	if (dev_priv->has_mob)
-		return vmw_gb_context_init(dev_priv, res, res_free);
+		return vmw_gb_context_init(dev_priv, dx, res, res_free);
 
 	ret = vmw_resource_init(dev_priv, res, false,
 				res_free, &vmw_legacy_context_func);
@@ -215,12 +267,12 @@
 		return -ENOMEM;
 	}
 
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-	cmd->body.cid = cpu_to_le32(res->id);
+	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 	vmw_resource_activate(res, vmw_hw_context_destroy);
 	return 0;
 
@@ -232,19 +284,10 @@
 	return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
-	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
-	int ret;
 
-	if (unlikely(res == NULL))
-		return NULL;
-
-	ret = vmw_context_init(dev_priv, res, NULL);
-
-	return (ret == 0) ? res : NULL;
-}
-
+/*
+ * GB context.
+ */
 
 static int vmw_gb_context_create(struct vmw_resource *res)
 {
@@ -281,7 +324,7 @@
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
 
@@ -309,7 +352,6 @@
 			  "binding.\n");
 		return -ENOMEM;
 	}
-
 	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = res->id;
@@ -346,7 +388,7 @@
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_state_scrub(&uctx->cbs);
+	vmw_binding_state_scrub(uctx->cbs);
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -414,7 +456,231 @@
 	if (dev_priv->query_cid == res->id)
 		dev_priv->query_cid_valid = false;
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
+
+	return 0;
+}
+
+/*
+ * DX context.
+ */
+
+static int vmw_dx_context_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineContext body;
+	} *cmd;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a context id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_fifo_resource_inc(dev_priv);
+
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+static int vmw_dx_context_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validContents = res->backup_dirty;
+	res->backup_dirty = false;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+	return 0;
+}
+
+/**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+				   bool readback)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	int i;
+
+	vmw_binding_state_scrub(uctx->cbs);
+	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+		struct vmw_resource *res;
+
+		/* Avoid racing with ongoing cotable destruction. */
+		spin_lock(&uctx->cotable_lock);
+		res = uctx->cotables[vmw_cotable_scrub_order[i]];
+		if (res)
+			res = vmw_resource_reference_unless_doomed(res);
+		spin_unlock(&uctx->cotable_lock);
+		if (!res)
+			continue;
+
+		WARN_ON(vmw_cotable_scrub(res, readback));
+		vmw_resource_unreference(&res);
+	}
+}
+
+static int vmw_dx_context_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	struct vmw_user_context *uctx =
+		container_of(res, struct vmw_user_context, res);
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackContext body;
+	} *cmd1;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindContext body;
+	} *cmd2;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_context_scrub_cotables(res, readback);
+
+	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+	    readback) {
+		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+		if (vmw_query_readback_all(uctx->dx_query_mob))
+			DRM_ERROR("Failed to read back query states\n");
+	}
+
+	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "unbinding.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+
+	cmd2 = (void *) cmd;
+	if (readback) {
+		cmd1 = (void *) cmd;
+		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+		cmd1->header.size = sizeof(cmd1->body);
+		cmd1->body.cid = res->id;
+		cmd2 = (void *) (&cmd1[1]);
+	}
+	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+	cmd2->header.size = sizeof(cmd2->body);
+	cmd2->body.cid = res->id;
+	cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+	vmw_fifo_commit(dev_priv, submit_size);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+static int vmw_dx_context_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyContext body;
+	} *cmd;
+
+	if (likely(res->id == -1))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for context "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = res->id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	if (dev_priv->query_cid == res->id)
+		dev_priv->query_cid_valid = false;
+	vmw_resource_release_id(res);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
@@ -435,6 +701,11 @@
 	    container_of(res, struct vmw_user_context, res);
 	struct vmw_private *dev_priv = res->dev_priv;
 
+	if (ctx->cbs)
+		vmw_binding_state_free(ctx->cbs);
+
+	(void) vmw_context_bind_dx_query(res, NULL);
+
 	ttm_base_object_kfree(ctx, base);
 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 			    vmw_user_context_size);
@@ -465,8 +736,8 @@
 	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_context_define(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv, bool dx)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct vmw_user_context *ctx;
@@ -476,6 +747,10 @@
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 
+	if (!dev_priv->has_dx && dx) {
+		DRM_ERROR("DX contexts not supported by device.\n");
+		return -EINVAL;
+	}
 
 	/*
 	 * Approximate idr memory usage with 128 bytes. It will be limited
@@ -516,7 +791,7 @@
 	 * From here on, the destructor takes over resource freeing.
 	 */
 
-	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
 	if (unlikely(ret != 0))
 		goto out_unlock;
 
@@ -535,371 +810,29 @@
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
-
 }
 
-/**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetShader body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for shader "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.shader_type;
-	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
+	return vmw_context_define(dev, data, file_priv, false);
 }
 
-/**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
-					   bool rebind)
+int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
 {
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdSetRenderTarget body;
-	} *cmd;
+	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+	struct drm_vmw_context_arg *rep = &arg->rep;
 
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for render target "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.cid = bi->ctx->id;
-	cmd->body.type = bi->i1.rt_type;
-	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	cmd->body.target.face = 0;
-	cmd->body.target.mipmap = 0;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
-				     bool rebind)
-{
-	struct vmw_private *dev_priv = bi->ctx->dev_priv;
-	struct {
-		SVGA3dCmdHeader header;
-		struct {
-			SVGA3dCmdSetTextureState c;
-			SVGA3dTextureState s1;
-		} body;
-	} *cmd;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving FIFO space for texture "
-			  "unbinding.\n");
-		return -ENOMEM;
-	}
-
-
-	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
-	cmd->header.size = sizeof(cmd->body);
-	cmd->body.c.cid = bi->ctx->id;
-	cmd->body.s1.stage = bi->i1.texture_stage;
-	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
-static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
-{
-	list_del(&cb->ctx_list);
-	if (!list_empty(&cb->res_list))
-		list_del(&cb->res_list);
-	cb->bi.ctx = NULL;
-}
-
-/**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
-int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-			    const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
-			DRM_ERROR("Illegal render target type %u.\n",
-				  (unsigned) bi->i1.rt_type);
-			return -EINVAL;
-		}
-		loc = &cbs->render_targets[bi->i1.rt_type];
-		break;
-	case vmw_ctx_binding_tex:
-		if (unlikely((unsigned)bi->i1.texture_stage >=
-			     SVGA3D_NUM_TEXTURE_UNITS)) {
-			DRM_ERROR("Illegal texture/sampler unit %u.\n",
-				  (unsigned) bi->i1.texture_stage);
-			return -EINVAL;
-		}
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		if (unlikely((unsigned)bi->i1.shader_type >=
-			     SVGA3D_SHADERTYPE_MAX)) {
-			DRM_ERROR("Illegal shader type %u.\n",
-				  (unsigned) bi->i1.shader_type);
-			return -EINVAL;
-		}
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
+	switch (arg->req) {
+	case drm_vmw_context_legacy:
+		return vmw_context_define(dev, rep, file_priv, false);
+	case drm_vmw_context_dx:
+		return vmw_context_define(dev, rep, file_priv, true);
 	default:
-		BUG();
-	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	loc->bi = *bi;
-	loc->bi.scrubbed = false;
-	list_add_tail(&loc->ctx_list, &cbs->list);
-	INIT_LIST_HEAD(&loc->res_list);
-
-	return 0;
-}
-
-/**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
-static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
-					 const struct vmw_ctx_bindinfo *bi)
-{
-	struct vmw_ctx_binding *loc;
-
-	switch (bi->bt) {
-	case vmw_ctx_binding_rt:
-		loc = &cbs->render_targets[bi->i1.rt_type];
 		break;
-	case vmw_ctx_binding_tex:
-		loc = &cbs->texture_units[bi->i1.texture_stage];
-		break;
-	case vmw_ctx_binding_shader:
-		loc = &cbs->shaders[bi->i1.shader_type];
-		break;
-	default:
-		BUG();
 	}
-
-	if (loc->bi.ctx != NULL)
-		vmw_context_binding_drop(loc);
-
-	if (bi->res != NULL) {
-		loc->bi = *bi;
-		list_add_tail(&loc->ctx_list, &cbs->list);
-		list_add_tail(&loc->res_list, &bi->res->binding_head);
-	}
-}
-
-/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
-static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
-{
-	if (!cb->bi.scrubbed) {
-		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
-		cb->bi.scrubbed = true;
-	}
-	vmw_context_binding_drop(cb);
-}
-
-/**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
- */
-static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
-{
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
-		vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
-static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
-{
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
-}
-
-/**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
-void vmw_context_binding_res_list_kill(struct list_head *head)
-{
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, head, res_list)
-		vmw_context_binding_kill(entry);
-}
-
-/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
- */
-void vmw_context_binding_res_list_scrub(struct list_head *head)
-{
-	struct vmw_ctx_binding *entry;
-
-	list_for_each_entry(entry, head, res_list) {
-		if (!entry->bi.scrubbed) {
-			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
-			entry->bi.scrubbed = true;
-		}
-	}
-}
-
-/**
- * vmw_context_binding_state_transfer - Commit staged binding info
- *
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
- *
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
- */
-void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
-					struct vmw_ctx_binding_state *from)
-{
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding *entry, *next;
-
-	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
-		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
-}
-
-/**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
-int vmw_context_rebind_all(struct vmw_resource *ctx)
-{
-	struct vmw_ctx_binding *entry;
-	struct vmw_user_context *uctx =
-		container_of(ctx, struct vmw_user_context, res);
-	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
-	int ret;
-
-	list_for_each_entry(entry, &cbs->list, ctx_list) {
-		if (likely(!entry->bi.scrubbed))
-			continue;
-
-		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
-			    SVGA3D_INVALID_ID))
-			continue;
-
-		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
-		if (unlikely(ret != 0))
-			return ret;
-
-		entry->bi.scrubbed = false;
-	}
-
-	return 0;
+	return -EINVAL;
 }
 
 /**
@@ -912,10 +845,93 @@
  */
 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
 {
-	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+
+	return vmw_binding_state_list(uctx->cbs);
 }
 
 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
 {
 	return container_of(ctx, struct vmw_user_context, res)->man;
 }
+
+struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+					 SVGACOTableType cotable_type)
+{
+	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+		return ERR_PTR(-EINVAL);
+
+	return vmw_resource_reference
+		(container_of(ctx, struct vmw_user_context, res)->
+		 cotables[cotable_type]);
+}
+
+/**
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx)
+{
+	return container_of(ctx, struct vmw_user_context, res)->cbs;
+}
+
+/**
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context.  If @mob is NULL, then this function will
+ * remove the association between the MOB and the context.  This function
+ * assumes the binding_mutex is held.
+ *
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
+ *
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter.  0 otherwise.
+ */
+int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+			      struct vmw_dma_buffer *mob)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
+
+	if (mob == NULL) {
+		if (uctx->dx_query_mob) {
+			uctx->dx_query_mob->dx_query_ctx = NULL;
+			vmw_dmabuf_unreference(&uctx->dx_query_mob);
+			uctx->dx_query_mob = NULL;
+		}
+
+		return 0;
+	}
+
+	/* Can only have one MOB per context for queries */
+	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+		return -EINVAL;
+
+	mob->dx_query_ctx  = ctx_res;
+
+	if (!uctx->dx_query_mob)
+		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+
+	return 0;
+}
+
+/**
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
+ *
+ * @ctx_res: The context resource
+ */
+struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
+{
+	struct vmw_user_context *uctx =
+		container_of(ctx_res, struct vmw_user_context, res);
+
+	return uctx->dx_query_mob;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
new file mode 100644
index 0000000..ce659a1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -0,0 +1,662 @@
+/**************************************************************************
+ *
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Treat context OTables as resources to make use of the resource
+ * backing MOB eviction mechanism, that is used to read back the COTable
+ * whenever the backing MOB is evicted.
+ */
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+
+/**
+ * struct vmw_cotable - Context Object Table resource
+ *
+ * @res: struct vmw_resource we are deriving from.
+ * @ctx: non-refcounted pointer to the owning context.
+ * @size_read_back: Size of data read back during eviction.
+ * @seen_entries: Seen entries in command stream for this cotable.
+ * @type: The cotable type.
+ * @scrubbed: Whether the cotable has been scrubbed.
+ * @resource_list: List of resources in the cotable.
+ */
+struct vmw_cotable {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	size_t size_read_back;
+	int seen_entries;
+	u32 type;
+	bool scrubbed;
+	struct list_head resource_list;
+};
+
+/**
+ * struct vmw_cotable_info - Static info about cotable types
+ *
+ * @min_initial_entries: Min number of initial intries at cotable allocation
+ * for this cotable type.
+ * @size: Size of each entry.
+ */
+struct vmw_cotable_info {
+	u32 min_initial_entries;
+	u32 size;
+	void (*unbind_func)(struct vmw_private *, struct list_head *,
+			    bool);
+};
+
+static const struct vmw_cotable_info co_info[] = {
+	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
+	{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
+	{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
+	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
+	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
+	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
+	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
+	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
+};
+
+/*
+ * Cotables with bindings that we remove must be scrubbed first,
+ * otherwise, the device will swap in an invalid context when we remove
+ * bindings before scrubbing a cotable...
+ */
+const SVGACOTableType vmw_cotable_scrub_order[] = {
+	SVGA_COTABLE_RTVIEW,
+	SVGA_COTABLE_DSVIEW,
+	SVGA_COTABLE_SRVIEW,
+	SVGA_COTABLE_DXSHADER,
+	SVGA_COTABLE_ELEMENTLAYOUT,
+	SVGA_COTABLE_BLENDSTATE,
+	SVGA_COTABLE_DEPTHSTENCIL,
+	SVGA_COTABLE_RASTERIZERSTATE,
+	SVGA_COTABLE_SAMPLER,
+	SVGA_COTABLE_STREAMOUTPUT,
+	SVGA_COTABLE_DXQUERY,
+};
+
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf);
+static int vmw_cotable_create(struct vmw_resource *res);
+static int vmw_cotable_destroy(struct vmw_resource *res);
+
+static const struct vmw_res_func vmw_cotable_func = {
+	.res_type = vmw_res_cotable,
+	.needs_backup = true,
+	.may_evict = true,
+	.type_name = "context guest backed object tables",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_cotable_create,
+	.destroy = vmw_cotable_destroy,
+	.bind = vmw_cotable_bind,
+	.unbind = vmw_cotable_unbind,
+};
+
+/**
+ * vmw_cotable - Convert a struct vmw_resource pointer to a struct
+ * vmw_cotable pointer
+ *
+ * @res: Pointer to the resource.
+ */
+static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_cotable, res);
+}
+
+/**
+ * vmw_cotable_destroy - Cotable resource destroy callback
+ *
+ * @res: Pointer to the cotable resource.
+ *
+ * There is no device cotable destroy command, so this function only
+ * makes sure that the resource id is set to invalid.
+ */
+static int vmw_cotable_destroy(struct vmw_resource *res)
+{
+	res->id = -1;
+	return 0;
+}
+
+/**
+ * vmw_cotable_unscrub - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ * This is identical to bind() except the function interface looks different.
+ */
+static int vmw_cotable_unscrub(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = &res->backup->base;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	lockdep_assert_held(&bo->resv->lock.base);
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for cotable "
+			  "binding.\n");
+		return -ENOMEM;
+	}
+
+	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
+	WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
+	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = vcotbl->ctx->id;
+	cmd->body.type = vcotbl->type;
+	cmd->body.mobid = bo->mem.start;
+	cmd->body.validSizeInBytes = vcotbl->size_read_back;
+
+	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
+	vcotbl->scrubbed = false;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_bind - Undo a cotable unscrub operation
+ *
+ * @res: Pointer to the cotable resource
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * This function issues commands to (re)bind the cotable to
+ * its backing mob, which needs to be validated and reserved at this point.
+ */
+static int vmw_cotable_bind(struct vmw_resource *res,
+			    struct ttm_validate_buffer *val_buf)
+{
+	/*
+	 * The create() callback may have changed @res->backup without
+	 * the caller noticing, and with val_buf->bo still pointing to
+	 * the old backup buffer. Although hackish, and not used currently,
+	 * take the opportunity to correct the value here so that it's not
+	 * misused in the future.
+	 */
+	val_buf->bo = &res->backup->base;
+
+	return vmw_cotable_unscrub(res);
+}
+
+/**
+ * vmw_cotable_scrub - Scrub the cotable from the device.
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether initiate a readback of the cotable data to the backup
+ * buffer.
+ *
+ * In some situations (context swapouts) it might be desirable to make the
+ * device forget about the cotable without performing a full unbind. A full
+ * unbind requires reserved backup buffers and it might not be possible to
+ * reserve them due to locking order violation issues. The vmw_cotable_scrub
+ * function implements a partial unbind() without that requirement but with the
+ * following restrictions.
+ * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
+ *    be called.
+ * 2) Before the cotable backing buffer is used by the CPU, or during the
+ *    resource destruction, vmw_cotable_unbind() must be called.
+ */
+int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	size_t submit_size;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd0;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetCOTable body;
+	} *cmd1;
+
+	if (vcotbl->scrubbed)
+		return 0;
+
+	if (co_info[vcotbl->type].unbind_func)
+		co_info[vcotbl->type].unbind_func(dev_priv,
+						  &vcotbl->resource_list,
+						  readback);
+	submit_size = sizeof(*cmd1);
+	if (readback)
+		submit_size += sizeof(*cmd0);
+
+	cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
+	if (!cmd1) {
+		DRM_ERROR("Failed reserving FIFO space for cotable "
+			  "unbinding.\n");
+		return -ENOMEM;
+	}
+
+	vcotbl->size_read_back = 0;
+	if (readback) {
+		cmd0 = (void *) cmd1;
+		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd0->header.size = sizeof(cmd0->body);
+		cmd0->body.cid = vcotbl->ctx->id;
+		cmd0->body.type = vcotbl->type;
+		cmd1 = (void *) &cmd0[1];
+		vcotbl->size_read_back = res->backup_size;
+	}
+	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
+	cmd1->header.size = sizeof(cmd1->body);
+	cmd1->body.cid = vcotbl->ctx->id;
+	cmd1->body.type = vcotbl->type;
+	cmd1->body.mobid = SVGA3D_INVALID_ID;
+	cmd1->body.validSizeInBytes = 0;
+	vmw_fifo_commit_flush(dev_priv, submit_size);
+	vcotbl->scrubbed = true;
+
+	/* Trigger a create() on next validate. */
+	res->id = -1;
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_unbind - Cotable resource unbind callback
+ *
+ * @res: Pointer to the cotable resource.
+ * @readback: Whether to read back cotable data to the backup buffer.
+ * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * for convenience / fencing.
+ *
+ * Unbinds the cotable from the device and fences the backup buffer.
+ */
+static int vmw_cotable_unbind(struct vmw_resource *res,
+			      bool readback,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	if (list_empty(&res->mob_head))
+		return 0;
+
+	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
+	lockdep_assert_held(&bo->resv->lock.base);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!vcotbl->scrubbed)
+		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
+	mutex_unlock(&dev_priv->binding_mutex);
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_fence_single_bo(bo, fence);
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_readback - Read back a cotable without unbinding.
+ *
+ * @res: The cotable resource.
+ *
+ * Reads back a cotable to its backing mob without scrubbing the MOB from
+ * the cotable. The MOB is fenced for subsequent CPU access.
+ */
+static int vmw_cotable_readback(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackCOTable body;
+	} *cmd;
+	struct vmw_fence_obj *fence;
+
+	if (!vcotbl->scrubbed) {
+		cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+					  SVGA3D_INVALID_ID);
+		if (!cmd) {
+			DRM_ERROR("Failed reserving FIFO space for cotable "
+				  "readback.\n");
+			return -ENOMEM;
+		}
+		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.cid = vcotbl->ctx->id;
+		cmd->body.type = vcotbl->type;
+		vcotbl->size_read_back = res->backup_size;
+		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	}
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	vmw_fence_single_bo(&res->backup->base, fence);
+	vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_resize - Resize a cotable.
+ *
+ * @res: The cotable resource.
+ * @new_size: The new size.
+ *
+ * Resizes a cotable and binds the new backup buffer.
+ * On failure the cotable is left intact.
+ * Important! This function may not fail once the MOB switch has been
+ * committed to hardware. That would put the device context in an
+ * invalid state which we can't currently recover from.
+ */
+static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	struct vmw_dma_buffer *buf, *old_buf = res->backup;
+	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
+	size_t old_size = res->backup_size;
+	size_t old_size_read_back = vcotbl->size_read_back;
+	size_t cur_size_read_back;
+	struct ttm_bo_kmap_obj old_map, new_map;
+	int ret;
+	size_t i;
+
+	ret = vmw_cotable_readback(res);
+	if (ret)
+		return ret;
+
+	cur_size_read_back = vcotbl->size_read_back;
+	vcotbl->size_read_back = old_size_read_back;
+
+	/*
+	 * While device is processing, Allocate and reserve a buffer object
+	 * for the new COTable. Initially pin the buffer object to make sure
+	 * we can use tryreserve without failure.
+	 */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+			      true, vmw_dmabuf_bo_free);
+	if (ret) {
+		DRM_ERROR("Failed initializing new cotable MOB.\n");
+		return ret;
+	}
+
+	bo = &buf->base;
+	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+
+	ret = ttm_bo_wait(old_bo, false, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed waiting for cotable unbind.\n");
+		goto out_wait;
+	}
+
+	/*
+	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
+	 * This should really be a TTM utility.
+	 */
+	for (i = 0; i < old_bo->num_pages; ++i) {
+		bool dummy;
+
+		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping old COTable on resize.\n");
+			goto out_wait;
+		}
+		ret = ttm_bo_kmap(bo, i, 1, &new_map);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed mapping new COTable on resize.\n");
+			goto out_map_new;
+		}
+		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
+		       ttm_kmap_obj_virtual(&old_map, &dummy),
+		       PAGE_SIZE);
+		ttm_bo_kunmap(&new_map);
+		ttm_bo_kunmap(&old_map);
+	}
+
+	/* Unpin new buffer, and switch backup buffers. */
+	ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed validating new COTable backup buffer.\n");
+		goto out_wait;
+	}
+
+	res->backup = buf;
+	res->backup_size = new_size;
+	vcotbl->size_read_back = cur_size_read_back;
+
+	/*
+	 * Now tell the device to switch. If this fails, then we need to
+	 * revert the full resize.
+	 */
+	ret = vmw_cotable_unscrub(res);
+	if (ret) {
+		DRM_ERROR("Failed switching COTable backup buffer.\n");
+		res->backup = old_buf;
+		res->backup_size = old_size;
+		vcotbl->size_read_back = old_size_read_back;
+		goto out_wait;
+	}
+
+	/* Let go of the old mob. */
+	list_del(&res->mob_head);
+	list_add_tail(&res->mob_head, &buf->res_list);
+	vmw_dmabuf_unreference(&old_buf);
+	res->id = vcotbl->type;
+
+	return 0;
+
+out_map_new:
+	ttm_bo_kunmap(&old_map);
+out_wait:
+	ttm_bo_unreserve(bo);
+	vmw_dmabuf_unreference(&buf);
+
+	return ret;
+}
+
+/**
+ * vmw_cotable_create - Cotable resource create callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * There is no separate create command for cotables, so this callback, which
+ * is called before bind() in the validation sequence is instead used for two
+ * things.
+ * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
+ *    buffer, that is, if @res->mob_head is non-empty.
+ * 2) Resize the cotable if needed.
+ */
+static int vmw_cotable_create(struct vmw_resource *res)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+	size_t new_size = res->backup_size;
+	size_t needed_size;
+	int ret;
+
+	/* Check whether we need to resize the cotable */
+	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
+	while (needed_size > new_size)
+		new_size *= 2;
+
+	if (likely(new_size <= res->backup_size)) {
+		if (vcotbl->scrubbed && !list_empty(&res->mob_head)) {
+			ret = vmw_cotable_unscrub(res);
+			if (ret)
+				return ret;
+		}
+		res->id = vcotbl->type;
+		return 0;
+	}
+
+	return vmw_cotable_resize(res, new_size);
+}
+
+/**
+ * vmw_hw_cotable_destroy - Cotable hw_destroy callback
+ *
+ * @res: Pointer to a cotable resource.
+ *
+ * The final (part of resource destruction) destroy callback.
+ */
+static void vmw_hw_cotable_destroy(struct vmw_resource *res)
+{
+	(void) vmw_cotable_destroy(res);
+}
+
+static size_t cotable_acc_size;
+
+/**
+ * vmw_cotable_free - Cotable resource destructor
+ *
+ * @res: Pointer to a cotable resource.
+ */
+static void vmw_cotable_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	kfree(res);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+}
+
+/**
+ * vmw_cotable_alloc - Create a cotable resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @ctx: Pointer to the context resource.
+ * The cotable resource will not add a refcount.
+ * @type: The cotable type.
+ */
+struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+				       struct vmw_resource *ctx,
+				       u32 type)
+{
+	struct vmw_cotable *vcotbl;
+	int ret;
+	u32 num_entries;
+
+	if (unlikely(cotable_acc_size == 0))
+		cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   cotable_acc_size, false, true);
+	if (unlikely(ret))
+		return ERR_PTR(ret);
+
+	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
+	if (unlikely(vcotbl == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_alloc;
+	}
+
+	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
+				vmw_cotable_free, &vmw_cotable_func);
+	if (unlikely(ret != 0))
+		goto out_no_init;
+
+	INIT_LIST_HEAD(&vcotbl->resource_list);
+	vcotbl->res.id = type;
+	vcotbl->res.backup_size = PAGE_SIZE;
+	num_entries = PAGE_SIZE / co_info[type].size;
+	if (num_entries < co_info[type].min_initial_entries) {
+		vcotbl->res.backup_size = co_info[type].min_initial_entries *
+			co_info[type].size;
+		vcotbl->res.backup_size =
+			(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	vcotbl->scrubbed = true;
+	vcotbl->seen_entries = -1;
+	vcotbl->type = type;
+	vcotbl->ctx = ctx;
+
+	vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy);
+
+	return &vcotbl->res;
+
+out_no_init:
+	kfree(vcotbl);
+out_no_alloc:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cotable_notify - Notify the cotable about an item creation
+ *
+ * @res: Pointer to a cotable resource.
+ * @id: Item id.
+ */
+int vmw_cotable_notify(struct vmw_resource *res, int id)
+{
+	struct vmw_cotable *vcotbl = vmw_cotable(res);
+
+	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
+		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
+			  (unsigned) vcotbl->type, id);
+		return -EINVAL;
+	}
+
+	if (vcotbl->seen_entries < id) {
+		/* Trigger a call to create() on next validate */
+		res->id = -1;
+		vcotbl->seen_entries = id;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ *
+ * @res: pointer struct vmw_resource representing the cotable.
+ * @head: pointer to the struct list_head member of the resource, dedicated
+ * to the cotable active resource list.
+ */
+void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
+{
+	struct vmw_cotable *vcotbl =
+		container_of(res, struct vmw_cotable, res);
+
+	list_add_tail(head, &vcotbl->resource_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 914b375..299925a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,25 +32,20 @@
 
 
 /**
- * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
- * @pin:  Pin buffer if true.
+ * @placement:  The placement to pin it.
  * @interruptible:  Use interruptible wait.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
  * Returns
  *  -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
-			    struct vmw_dma_buffer *buf,
-			    struct ttm_placement *placement,
-			    bool interruptible)
+int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
+				struct vmw_dma_buffer *buf,
+				struct ttm_placement *placement,
+				bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
 	int ret;
@@ -66,6 +61,8 @@
 		goto err;
 
 	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
 	ttm_bo_unreserve(bo);
 
@@ -75,12 +72,10 @@
 }
 
 /**
- * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
@@ -90,55 +85,34 @@
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
-			      struct vmw_dma_buffer *buf,
-			      bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
-	struct ttm_placement *placement;
 	int ret;
 
 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 
 	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err;
 
-	/**
-	 * Put BO in VRAM if there is space, otherwise as a GMR.
-	 * If there is no space in VRAM and GMR ids are all used up,
-	 * start evicting GMRs to make room. If the DMA buffer can't be
-	 * used as a GMR, this will return -ENOMEM.
-	 */
-
-	if (pin)
-		placement = &vmw_vram_gmr_ne_placement;
-	else
-		placement = &vmw_vram_gmr_placement;
-
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+			      false);
 	if (likely(ret == 0) || ret == -ERESTARTSYS)
-		goto err_unreserve;
+		goto out_unreserve;
 
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 
-	/**
-	 * If that failed, try VRAM again, this time evicting
-	 * previous contents.
-	 */
+out_unreserve:
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
-	if (pin)
-		placement = &vmw_vram_ne_placement;
-	else
-		placement = &vmw_vram_placement;
-
-	ret = ttm_bo_validate(bo, placement, interruptible, false);
-
-err_unreserve:
 	ttm_bo_unreserve(bo);
 err:
 	ttm_write_unlock(&dev_priv->reservation_sem);
@@ -146,67 +120,50 @@
 }
 
 /**
- * vmw_dmabuf_to_vram - Move a buffer to vram.
+ * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to move.
- * @pin:  Pin buffer in vram if true.
  * @interruptible:  Use interruptible wait.
  *
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
-		       struct vmw_dma_buffer *buf,
-		       bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+			   struct vmw_dma_buffer *buf,
+			   bool interruptible)
 {
-	struct ttm_placement *placement;
-
-	if (pin)
-		placement = &vmw_vram_ne_placement;
-	else
-		placement = &vmw_vram_placement;
-
-	return vmw_dmabuf_to_placement(dev_priv, buf,
-				       placement,
-				       interruptible);
+	return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+					   interruptible);
 }
 
 /**
- * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
- * Flushes and unpins the query bo if @pin == true to avoid failures.
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
  *
  * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @pin:  Pin buffer in vram if true.
+ * @buf:  DMA buffer to pin.
  * @interruptible:  Use interruptible wait.
  *
  * Returns
  * -ERESTARTSYS if interrupted by a signal.
  */
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
-				struct vmw_dma_buffer *buf,
-				bool pin, bool interruptible)
+int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
+				    struct vmw_dma_buffer *buf,
+				    bool interruptible)
 {
 	struct ttm_buffer_object *bo = &buf->base;
 	struct ttm_placement placement;
 	struct ttm_place place;
 	int ret = 0;
 
-	if (pin)
-		place = vmw_vram_ne_placement.placement[0];
-	else
-		place = vmw_vram_placement.placement[0];
+	place = vmw_vram_placement.placement[0];
 	place.lpfn = bo->num_pages;
-
 	placement.num_placement = 1;
 	placement.placement = &place;
 	placement.num_busy_placement = 1;
@@ -216,13 +173,16 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (pin)
-		vmw_execbuf_release_pinned_bo(dev_priv);
+	vmw_execbuf_release_pinned_bo(dev_priv);
 	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
 	if (unlikely(ret != 0))
 		goto err_unlock;
 
-	/* Is this buffer already in vram but not at the start of it? */
+	/*
+	 * Is this buffer already in vram but not at the start of it?
+	 * In that case, evict it first because TTM isn't good at handling
+	 * that situation.
+	 */
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
 	    bo->mem.start < bo->num_pages &&
 	    bo->mem.start > 0)
@@ -230,8 +190,10 @@
 
 	ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
-	/* For some reason we didn't up at the start of vram */
+	/* For some reason we didn't end up at the start of vram */
 	WARN_ON(ret == 0 && bo->offset != 0);
+	if (!ret)
+		vmw_bo_pin_reserved(buf, true);
 
 	ttm_bo_unreserve(bo);
 err_unlock:
@@ -240,13 +202,10 @@
 	return ret;
 }
 
-
 /**
- * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
  *
- * May only be called by the current master since it assumes that the
- * master lock is the current master's lock.
- * This function takes the master's lock in write mode.
+ * This function takes the reservation_sem in write mode.
  *
  * @dev_priv:  Driver private.
  * @buf:  DMA buffer to unpin.
@@ -259,16 +218,25 @@
 		     struct vmw_dma_buffer *buf,
 		     bool interruptible)
 {
-	/*
-	 * We could in theory early out if the buffer is
-	 * unpinned but we need to lock and reserve the buffer
-	 * anyways so we don't gain much by that.
-	 */
-	return vmw_dmabuf_to_placement(dev_priv, buf,
-				       &vmw_evictable_placement,
-				       interruptible);
-}
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
 
+	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+	if (unlikely(ret != 0))
+		goto err;
+
+	vmw_bo_pin_reserved(buf, false);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
 
 /**
  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
@@ -291,21 +259,31 @@
 
 
 /**
- * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
  *
- * @bo: The buffer object. Must be reserved.
+ * @vbo: The buffer object. Must be reserved.
  * @pin: Whether to pin or unpin.
  *
  */
-void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 {
 	struct ttm_place pl;
 	struct ttm_placement placement;
+	struct ttm_buffer_object *bo = &vbo->base;
 	uint32_t old_mem_type = bo->mem.mem_type;
 	int ret;
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
+	if (pin) {
+		if (vbo->pin_count++ > 0)
+			return;
+	} else {
+		WARN_ON(vbo->pin_count <= 0);
+		if (--vbo->pin_count > 0)
+			return;
+	}
+
 	pl.fpfn = 0;
 	pl.lpfn = 0;
 	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 620bb5c..e13b20b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_binding.h"
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
@@ -127,6 +128,9 @@
 #define DRM_IOCTL_VMW_SYNCCPU					\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
 		 struct drm_vmw_synccpu_arg)
+#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
+		struct drm_vmw_context_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -168,8 +172,8 @@
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
-	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
-		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+		      DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
@@ -206,6 +210,9 @@
 	VMW_IOCTL_DEF(VMW_SYNCCPU,
 		      vmw_user_dmabuf_synccpu_ioctl,
 		      DRM_UNLOCKED | DRM_RENDER_ALLOW),
+	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+		      vmw_extended_context_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -278,6 +285,8 @@
 		DRM_INFO("  Command Buffers 2.\n");
 	if (capabilities & SVGA_CAP_GBOBJECTS)
 		DRM_INFO("  Guest Backed Resources.\n");
+	if (capabilities & SVGA_CAP_DX)
+		DRM_INFO("  DX Features.\n");
 }
 
 /**
@@ -296,30 +305,31 @@
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
 	int ret;
-	struct ttm_buffer_object *bo;
+	struct vmw_dma_buffer *vbo;
 	struct ttm_bo_kmap_obj map;
 	volatile SVGA3dQueryResult *result;
 	bool dummy;
 
 	/*
-	 * Create the bo as pinned, so that a tryreserve will
+	 * Create the vbo as pinned, so that a tryreserve will
 	 * immediately succeed. This is because we're the only
 	 * user of the bo currently.
 	 */
-	ret = ttm_bo_create(&dev_priv->bdev,
-			    PAGE_SIZE,
-			    ttm_bo_type_device,
-			    &vmw_sys_ne_placement,
-			    0, false, NULL,
-			    &bo);
+	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+	if (!vbo)
+		return -ENOMEM;
 
+	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+			      &vmw_sys_ne_placement, false,
+			      &vmw_dmabuf_bo_free);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
 	BUG_ON(ret != 0);
+	vmw_bo_pin_reserved(vbo, true);
 
-	ret = ttm_bo_kmap(bo, 0, 1, &map);
+	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
 	if (likely(ret == 0)) {
 		result = ttm_kmap_obj_virtual(&map, &dummy);
 		result->totalSize = sizeof(*result);
@@ -327,18 +337,55 @@
 		result->result32 = 0xff;
 		ttm_bo_kunmap(&map);
 	}
-	vmw_bo_pin(bo, false);
-	ttm_bo_unreserve(bo);
+	vmw_bo_pin_reserved(vbo, false);
+	ttm_bo_unreserve(&vbo->base);
 
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Dummy query buffer map failed.\n");
-		ttm_bo_unref(&bo);
+		vmw_dmabuf_unreference(&vbo);
 	} else
-		dev_priv->dummy_query_bo = bo;
+		dev_priv->dummy_query_bo = vbo;
 
 	return ret;
 }
 
+/**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+static int vmw_request_device_late(struct vmw_private *dev_priv)
+{
+	int ret;
+
+	if (dev_priv->has_mob) {
+		ret = vmw_otables_setup(dev_priv);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Unable to initialize "
+				  "guest Memory OBjects.\n");
+			return ret;
+		}
+	}
+
+	if (dev_priv->cman) {
+		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+					       256*4096, 2*4096);
+		if (ret) {
+			struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+			dev_priv->cman = NULL;
+			vmw_cmdbuf_man_destroy(man);
+		}
+	}
+
+	return 0;
+}
+
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
 	int ret;
@@ -349,14 +396,16 @@
 		return ret;
 	}
 	vmw_fence_fifo_up(dev_priv->fman);
-	if (dev_priv->has_mob) {
-		ret = vmw_otables_setup(dev_priv);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Unable to initialize "
-				  "guest Memory OBjects.\n");
-			goto out_no_mob;
-		}
+	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+	if (IS_ERR(dev_priv->cman)) {
+		dev_priv->cman = NULL;
+		dev_priv->has_dx = false;
 	}
+
+	ret = vmw_request_device_late(dev_priv);
+	if (ret)
+		goto out_no_mob;
+
 	ret = vmw_dummy_query_bo_create(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_query_bo;
@@ -364,15 +413,29 @@
 	return 0;
 
 out_no_query_bo:
-	if (dev_priv->has_mob)
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+	if (dev_priv->has_mob) {
+		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 		vmw_otables_takedown(dev_priv);
+	}
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
 out_no_mob:
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 	return ret;
 }
 
-static void vmw_release_device(struct vmw_private *dev_priv)
+/**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+static void vmw_release_device_early(struct vmw_private *dev_priv)
 {
 	/*
 	 * Previous destructions should've released
@@ -381,65 +444,31 @@
 
 	BUG_ON(dev_priv->pinned_bo != NULL);
 
-	ttm_bo_unref(&dev_priv->dummy_query_bo);
-	if (dev_priv->has_mob)
+	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+	if (dev_priv->cman)
+		vmw_cmdbuf_remove_pool(dev_priv->cman);
+
+	if (dev_priv->has_mob) {
+		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
 		vmw_otables_takedown(dev_priv);
-	vmw_fence_fifo_down(dev_priv->fman);
-	vmw_fifo_release(dev_priv, &dev_priv->fifo);
-}
-
-
-/**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
-int vmw_3d_resource_inc(struct vmw_private *dev_priv,
-			bool unhide_svga)
-{
-	int ret = 0;
-
-	mutex_lock(&dev_priv->release_mutex);
-	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			--dev_priv->num_3d_resources;
-	} else if (unhide_svga) {
-		vmw_write(dev_priv, SVGA_REG_ENABLE,
-			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
-			  ~SVGA_REG_ENABLE_HIDE);
 	}
-
-	mutex_unlock(&dev_priv->release_mutex);
-	return ret;
 }
 
 /**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
  */
-void vmw_3d_resource_dec(struct vmw_private *dev_priv,
-			 bool hide_svga)
+static void vmw_release_device_late(struct vmw_private *dev_priv)
 {
-	int32_t n3d;
+	vmw_fence_fifo_down(dev_priv->fman);
+	if (dev_priv->cman)
+		vmw_cmdbuf_man_destroy(dev_priv->cman);
 
-	mutex_lock(&dev_priv->release_mutex);
-	if (unlikely(--dev_priv->num_3d_resources == 0))
-		vmw_release_device(dev_priv);
-	else if (hide_svga)
-		vmw_write(dev_priv, SVGA_REG_ENABLE,
-			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
-			  SVGA_REG_ENABLE_HIDE);
-
-	n3d = (int32_t) dev_priv->num_3d_resources;
-	mutex_unlock(&dev_priv->release_mutex);
-
-	BUG_ON(n3d < 0);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 
 /**
@@ -603,6 +632,7 @@
 	spin_lock_init(&dev_priv->hw_lock);
 	spin_lock_init(&dev_priv->waiter_lock);
 	spin_lock_init(&dev_priv->cap_lock);
+	spin_lock_init(&dev_priv->svga_lock);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
 		idr_init(&dev_priv->res_idr[i]);
@@ -673,22 +703,31 @@
 				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
 		dev_priv->max_mob_size =
 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
-	} else
+		dev_priv->stdu_max_width =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+		dev_priv->stdu_max_height =
+			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+		dev_priv->texture_max_width = vmw_read(dev_priv,
+						       SVGA_REG_DEV_CAP);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+		dev_priv->texture_max_height = vmw_read(dev_priv,
+							SVGA_REG_DEV_CAP);
+	} else {
+		dev_priv->texture_max_width = 8192;
+		dev_priv->texture_max_height = 8192;
 		dev_priv->prim_bb_mem = dev_priv->vram_size;
+	}
+
+	vmw_print_capabilities(dev_priv->capabilities);
 
 	ret = vmw_dma_masks(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_err0;
 
-	/*
-	 * Limit back buffer size to VRAM size.  Remove this once
-	 * screen targets are implemented.
-	 */
-	if (dev_priv->prim_bb_mem > dev_priv->vram_size)
-		dev_priv->prim_bb_mem = dev_priv->vram_size;
-
-	vmw_print_capabilities(dev_priv->capabilities);
-
 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max GMR ids is %u\n",
 			 (unsigned)dev_priv->max_gmr_ids);
@@ -714,17 +753,6 @@
 	dev_priv->active_master = &dev_priv->fbdev_master;
 
 
-	ret = ttm_bo_device_init(&dev_priv->bdev,
-				 dev_priv->bo_global_ref.ref.object,
-				 &vmw_bo_driver,
-				 dev->anon_inode->i_mapping,
-				 VMWGFX_FILE_PAGE_OFFSET,
-				 false);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
-		goto out_err1;
-	}
-
 	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
 					       dev_priv->mmio_size);
 
@@ -787,13 +815,28 @@
 		goto out_no_fman;
 	}
 
+	ret = ttm_bo_device_init(&dev_priv->bdev,
+				 dev_priv->bo_global_ref.ref.object,
+				 &vmw_bo_driver,
+				 dev->anon_inode->i_mapping,
+				 VMWGFX_FILE_PAGE_OFFSET,
+				 false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+		goto out_no_bdev;
+	}
 
+	/*
+	 * Enable VRAM, but initially don't use it until SVGA is enabled and
+	 * unhidden.
+	 */
 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
 			     (dev_priv->vram_size >> PAGE_SHIFT));
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
 		goto out_no_vram;
 	}
+	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 
 	dev_priv->has_gmr = true;
 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
@@ -814,18 +857,28 @@
 		}
 	}
 
-	vmw_kms_save_vga(dev_priv);
+	if (dev_priv->has_mob) {
+		spin_lock(&dev_priv->cap_lock);
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+		spin_unlock(&dev_priv->cap_lock);
+	}
 
-	/* Start kms and overlay systems, needs fifo. */
+
 	ret = vmw_kms_init(dev_priv);
 	if (unlikely(ret != 0))
 		goto out_no_kms;
 	vmw_overlay_init(dev_priv);
 
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		goto out_no_fifo;
+
+	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
 	if (dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, true);
-		if (unlikely(ret != 0))
-			goto out_no_fifo;
+		vmw_fifo_resource_inc(dev_priv);
+		vmw_svga_enable(dev_priv);
 		vmw_fb_init(dev_priv);
 	}
 
@@ -838,13 +891,14 @@
 	vmw_overlay_close(dev_priv);
 	vmw_kms_close(dev_priv);
 out_no_kms:
-	vmw_kms_restore_vga(dev_priv);
 	if (dev_priv->has_mob)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 out_no_vram:
+	(void)ttm_bo_device_release(&dev_priv->bdev);
+out_no_bdev:
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -860,13 +914,13 @@
 	iounmap(dev_priv->mmio_virt);
 out_err3:
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	(void)ttm_bo_device_release(&dev_priv->bdev);
-out_err1:
 	vmw_ttm_global_release(dev_priv);
 out_err0:
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
 		idr_destroy(&dev_priv->res_idr[i]);
 
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	kfree(dev_priv);
 	return ret;
 }
@@ -882,19 +936,24 @@
 		drm_ht_remove(&dev_priv->ctx.res_ht);
 	vfree(dev_priv->ctx.cmd_bounce);
 	if (dev_priv->enable_fb) {
+		vmw_fb_off(dev_priv);
 		vmw_fb_close(dev_priv);
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, false);
+		vmw_fifo_resource_dec(dev_priv);
+		vmw_svga_disable(dev_priv);
 	}
+
 	vmw_kms_close(dev_priv);
 	vmw_overlay_close(dev_priv);
 
-	if (dev_priv->has_mob)
-		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
 	if (dev_priv->has_gmr)
 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
 
+	vmw_release_device_early(dev_priv);
+	if (dev_priv->has_mob)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+	(void) ttm_bo_device_release(&dev_priv->bdev);
+	vmw_release_device_late(dev_priv);
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
@@ -906,7 +965,8 @@
 	ttm_object_device_release(&dev_priv->tdev);
 	iounmap(dev_priv->mmio_virt);
 	arch_phys_wc_del(dev_priv->mmio_mtrr);
-	(void)ttm_bo_device_release(&dev_priv->bdev);
+	if (dev_priv->ctx.staged_bindings)
+		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	vmw_ttm_global_release(dev_priv);
 
 	for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -992,10 +1052,15 @@
 	}
 
 	/*
-	 * Check if we were previously master, but now dropped.
+	 * Check if we were previously master, but now dropped. In that
+	 * case, allow at least render node functionality.
 	 */
 	if (vmw_fp->locked_master) {
 		mutex_unlock(&dev->master_mutex);
+
+		if (flags & DRM_RENDER_ALLOW)
+			return NULL;
+
 		DRM_ERROR("Dropped master trying to access ioctl that "
 			  "requires authentication.\n");
 		return ERR_PTR(-EACCES);
@@ -1044,17 +1109,27 @@
 		const struct drm_ioctl_desc *ioctl =
 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
 
-		if (unlikely(ioctl->cmd != cmd)) {
-			DRM_ERROR("Invalid command format, ioctl %d\n",
-				  nr - DRM_COMMAND_BASE);
-			return -EINVAL;
+		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+			if (unlikely(ret != 0))
+				return ret;
+
+			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+				goto out_io_encoding;
+
+			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+							_IOC_SIZE(cmd));
 		}
+
+		if (unlikely(ioctl->cmd != cmd))
+			goto out_io_encoding;
+
 		flags = ioctl->flags;
 	} else if (!drm_ioctl_flags(nr, &flags))
 		return -EINVAL;
 
 	vmaster = vmw_master_check(dev, file_priv, flags);
-	if (unlikely(IS_ERR(vmaster))) {
+	if (IS_ERR(vmaster)) {
 		ret = PTR_ERR(vmaster);
 
 		if (ret != -ERESTARTSYS)
@@ -1068,6 +1143,12 @@
 		ttm_read_unlock(&vmaster->lock);
 
 	return ret;
+
+out_io_encoding:
+	DRM_ERROR("Invalid command format, ioctl %d\n",
+		  nr - DRM_COMMAND_BASE);
+
+	return -EINVAL;
 }
 
 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
@@ -1086,30 +1167,11 @@
 
 static void vmw_lastclose(struct drm_device *dev)
 {
-	struct drm_crtc *crtc;
-	struct drm_mode_set set;
-	int ret;
-
-	set.x = 0;
-	set.y = 0;
-	set.fb = NULL;
-	set.mode = NULL;
-	set.connectors = NULL;
-	set.num_connectors = 0;
-
-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-		set.crtc = crtc;
-		ret = drm_mode_set_config_internal(&set);
-		WARN_ON(ret != 0);
-	}
-
 }
 
 static void vmw_master_init(struct vmw_master *vmaster)
 {
 	ttm_lock_init(&vmaster->lock);
-	INIT_LIST_HEAD(&vmaster->fb_surf);
-	mutex_init(&vmaster->fb_surf_mutex);
 }
 
 static int vmw_master_create(struct drm_device *dev,
@@ -1137,7 +1199,6 @@
 	kfree(vmaster);
 }
 
-
 static int vmw_master_set(struct drm_device *dev,
 			  struct drm_file *file_priv,
 			  bool from_open)
@@ -1148,27 +1209,13 @@
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret = 0;
 
-	if (!dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, true);
-		if (unlikely(ret != 0))
-			return ret;
-		vmw_kms_save_vga(dev_priv);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-	}
-
 	if (active) {
 		BUG_ON(active != &dev_priv->fbdev_master);
 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
 		if (unlikely(ret != 0))
-			goto out_no_active_lock;
+			return ret;
 
 		ttm_lock_set_kill(&active->lock, true, SIGTERM);
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0)) {
-			DRM_ERROR("Unable to clean VRAM on "
-				  "master drop.\n");
-		}
-
 		dev_priv->active_master = NULL;
 	}
 
@@ -1182,14 +1229,6 @@
 	dev_priv->active_master = vmaster;
 
 	return 0;
-
-out_no_active_lock:
-	if (!dev_priv->enable_fb) {
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-	}
-	return ret;
 }
 
 static void vmw_master_drop(struct drm_device *dev,
@@ -1214,16 +1253,9 @@
 	}
 
 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
-	vmw_execbuf_release_pinned_bo(dev_priv);
 
-	if (!dev_priv->enable_fb) {
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0))
-			DRM_ERROR("Unable to clean VRAM on master drop.\n");
-		vmw_kms_restore_vga(dev_priv);
-		vmw_3d_resource_dec(dev_priv, true);
-		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-	}
+	if (!dev_priv->enable_fb)
+		vmw_svga_disable(dev_priv);
 
 	dev_priv->active_master = &dev_priv->fbdev_master;
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
@@ -1233,6 +1265,76 @@
 		vmw_fb_on(dev_priv);
 }
 
+/**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+static void __vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+void vmw_svga_enable(struct vmw_private *dev_priv)
+{
+	ttm_read_lock(&dev_priv->reservation_sem, false);
+	__vmw_svga_enable(dev_priv);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+static void __vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	}
+	spin_unlock(&dev_priv->svga_lock);
+}
+
+/**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+void vmw_svga_disable(struct vmw_private *dev_priv)
+{
+	ttm_write_lock(&dev_priv->reservation_sem, false);
+	spin_lock(&dev_priv->svga_lock);
+	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+		spin_unlock(&dev_priv->svga_lock);
+		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+			DRM_ERROR("Failed evicting VRAM buffers.\n");
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  SVGA_REG_ENABLE_HIDE |
+			  SVGA_REG_ENABLE_ENABLE);
+	} else
+		spin_unlock(&dev_priv->svga_lock);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+}
 
 static void vmw_remove(struct pci_dev *pdev)
 {
@@ -1250,23 +1352,26 @@
 
 	switch (val) {
 	case PM_HIBERNATION_PREPARE:
-	case PM_SUSPEND_PREPARE:
+		if (dev_priv->enable_fb)
+			vmw_fb_off(dev_priv);
 		ttm_suspend_lock(&dev_priv->reservation_sem);
 
-		/**
+		/*
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * Buffer contents is moved to swappable memory.
 		 */
 		vmw_execbuf_release_pinned_bo(dev_priv);
 		vmw_resource_evict_all(dev_priv);
+		vmw_release_device_early(dev_priv);
 		ttm_bo_swapout_all(&dev_priv->bdev);
-
+		vmw_fence_fifo_down(dev_priv->fman);
 		break;
 	case PM_POST_HIBERNATION:
-	case PM_POST_SUSPEND:
 	case PM_POST_RESTORE:
+		vmw_fence_fifo_up(dev_priv->fman);
 		ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+		if (dev_priv->enable_fb)
+			vmw_fb_on(dev_priv);
 		break;
 	case PM_RESTORE_PREPARE:
 		break;
@@ -1276,20 +1381,13 @@
 	return 0;
 }
 
-/**
- * These might not be needed with the virtual SVGA device.
- */
-
 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	if (dev_priv->num_3d_resources != 0) {
-		DRM_INFO("Can't suspend or hibernate "
-			 "while 3D resources are active.\n");
+	if (dev_priv->refuse_hibernation)
 		return -EBUSY;
-	}
 
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
@@ -1321,56 +1419,62 @@
 	return vmw_pci_resume(pdev);
 }
 
-static int vmw_pm_prepare(struct device *kdev)
+static int vmw_pm_freeze(struct device *kdev)
 {
 	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
 
-	/**
-	 * Release 3d reference held by fbdev and potentially
-	 * stop fifo.
-	 */
 	dev_priv->suspended = true;
 	if (dev_priv->enable_fb)
-			vmw_3d_resource_dec(dev_priv, true);
+		vmw_fifo_resource_dec(dev_priv);
 
-	if (dev_priv->num_3d_resources != 0) {
-
-		DRM_INFO("Can't suspend or hibernate "
-			 "while 3D resources are active.\n");
-
+	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
 		if (dev_priv->enable_fb)
-			vmw_3d_resource_inc(dev_priv, true);
+			vmw_fifo_resource_inc(dev_priv);
+		WARN_ON(vmw_request_device_late(dev_priv));
 		dev_priv->suspended = false;
 		return -EBUSY;
 	}
 
+	if (dev_priv->enable_fb)
+		__vmw_svga_disable(dev_priv);
+	
+	vmw_release_device_late(dev_priv);
+
 	return 0;
 }
 
-static void vmw_pm_complete(struct device *kdev)
+static int vmw_pm_restore(struct device *kdev)
 {
 	struct pci_dev *pdev = to_pci_dev(kdev);
 	struct drm_device *dev = pci_get_drvdata(pdev);
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	int ret;
 
 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 	(void) vmw_read(dev_priv, SVGA_REG_ID);
 
-	/**
-	 * Reclaim 3d reference held by fbdev and potentially
-	 * start fifo.
-	 */
 	if (dev_priv->enable_fb)
-			vmw_3d_resource_inc(dev_priv, false);
+		vmw_fifo_resource_inc(dev_priv);
+
+	ret = vmw_request_device(dev_priv);
+	if (ret)
+		return ret;
+
+	if (dev_priv->enable_fb)
+		__vmw_svga_enable(dev_priv);
 
 	dev_priv->suspended = false;
+
+	return 0;
 }
 
 static const struct dev_pm_ops vmw_pm_ops = {
-	.prepare = vmw_pm_prepare,
-	.complete = vmw_pm_complete,
+	.freeze = vmw_pm_freeze,
+	.thaw = vmw_pm_restore,
+	.restore = vmw_pm_restore,
 	.suspend = vmw_pm_suspend,
 	.resume = vmw_pm_resume,
 };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 0336d49..6d02de6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -40,17 +40,17 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20140704"
+#define VMWGFX_DRIVER_DATE "20150810"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
 #define VMWGFX_MAX_VALIDATIONS 2048
 #define VMWGFX_MAX_DISPLAYS 16
 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
-#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
+#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
 
 /*
  * Perhaps we should have sysfs entries for these.
@@ -59,6 +59,8 @@
 #define VMWGFX_NUM_GB_SHADER 20000
 #define VMWGFX_NUM_GB_SURFACE 32768
 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
+#define VMWGFX_NUM_DXCONTEXT 256
+#define VMWGFX_NUM_DXQUERY 512
 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
 			VMWGFX_NUM_GB_SHADER +\
 			VMWGFX_NUM_GB_SURFACE +\
@@ -85,6 +87,9 @@
 struct vmw_dma_buffer {
 	struct ttm_buffer_object base;
 	struct list_head res_list;
+	s32 pin_count;
+	/* Not ref-counted.  Protected by binding_mutex */
+	struct vmw_resource *dx_query_ctx;
 };
 
 /**
@@ -113,6 +118,7 @@
 	bool backup_dirty; /* Protected by backup buffer reserved */
 	struct vmw_dma_buffer *backup;
 	unsigned long backup_offset;
+	unsigned long pin_count; /* Protected by resource reserved */
 	const struct vmw_res_func *func;
 	struct list_head lru_head; /* Protected by the resource lock */
 	struct list_head mob_head; /* Protected by @backup reserved */
@@ -130,6 +136,9 @@
 	vmw_res_surface,
 	vmw_res_stream,
 	vmw_res_shader,
+	vmw_res_dx_context,
+	vmw_res_cotable,
+	vmw_res_view,
 	vmw_res_max
 };
 
@@ -137,7 +146,8 @@
  * Resources that are managed using command streams.
  */
 enum vmw_cmdbuf_res_type {
-	vmw_cmdbuf_res_compat_shader
+	vmw_cmdbuf_res_shader,
+	vmw_cmdbuf_res_view
 };
 
 struct vmw_cmdbuf_res_manager;
@@ -160,11 +170,13 @@
 	struct drm_vmw_size *sizes;
 	uint32_t num_sizes;
 	bool scanout;
+	uint32_t array_size;
 	/* TODO so far just a extra pointer */
 	struct vmw_cursor_snooper snooper;
 	struct vmw_surface_offset *offsets;
 	SVGA3dTextureFilter autogen_filter;
 	uint32_t multisample_count;
+	struct list_head view_list;
 };
 
 struct vmw_marker_queue {
@@ -176,14 +188,15 @@
 
 struct vmw_fifo_state {
 	unsigned long reserved_size;
-	__le32 *dynamic_buffer;
-	__le32 *static_buffer;
+	u32 *dynamic_buffer;
+	u32 *static_buffer;
 	unsigned long static_buffer_size;
 	bool using_bounce_buffer;
 	uint32_t capabilities;
 	struct mutex fifo_mutex;
 	struct rw_semaphore rwsem;
 	struct vmw_marker_queue marker_queue;
+	bool dx;
 };
 
 struct vmw_relocation {
@@ -264,70 +277,15 @@
 };
 
 /*
- * enum vmw_ctx_binding_type - abstract resource to context binding types
+ * enum vmw_display_unit_type - Describes the display unit
  */
-enum vmw_ctx_binding_type {
-	vmw_ctx_binding_shader,
-	vmw_ctx_binding_rt,
-	vmw_ctx_binding_tex,
-	vmw_ctx_binding_max
+enum vmw_display_unit_type {
+	vmw_du_invalid = 0,
+	vmw_du_legacy,
+	vmw_du_screen_object,
+	vmw_du_screen_target
 };
 
-/**
- * struct vmw_ctx_bindinfo - structure representing a single context binding
- *
- * @ctx: Pointer to the context structure. NULL means the binding is not
- * active.
- * @res: Non ref-counted pointer to the bound resource.
- * @bt: The binding type.
- * @i1: Union of information needed to unbind.
- */
-struct vmw_ctx_bindinfo {
-	struct vmw_resource *ctx;
-	struct vmw_resource *res;
-	enum vmw_ctx_binding_type bt;
-	bool scrubbed;
-	union {
-		SVGA3dShaderType shader_type;
-		SVGA3dRenderTargetType rt_type;
-		uint32 texture_stage;
-	} i1;
-};
-
-/**
- * struct vmw_ctx_binding - structure representing a single context binding
- *                        - suitable for tracking in a context
- *
- * @ctx_list: List head for context.
- * @res_list: List head for bound resource.
- * @bi: Binding info
- */
-struct vmw_ctx_binding {
-	struct list_head ctx_list;
-	struct list_head res_list;
-	struct vmw_ctx_bindinfo bi;
-};
-
-
-/**
- * struct vmw_ctx_binding_state - context binding state
- *
- * @list: linked list of individual bindings.
- * @render_targets: Render target bindings.
- * @texture_units: Texture units/samplers bindings.
- * @shaders: Shader bindings.
- *
- * Note that this structure also provides storage space for the individual
- * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
- * for individual bindings.
- *
- */
-struct vmw_ctx_binding_state {
-	struct list_head list;
-	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
-	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
-	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
-};
 
 struct vmw_sw_context{
 	struct drm_open_hash res_ht;
@@ -342,15 +300,21 @@
 	uint32_t *cmd_bounce;
 	uint32_t cmd_bounce_size;
 	struct list_head resource_list;
-	struct ttm_buffer_object *cur_query_bo;
+	struct list_head ctx_resource_list; /* For contexts and cotables */
+	struct vmw_dma_buffer *cur_query_bo;
 	struct list_head res_relocations;
 	uint32_t *buf_start;
 	struct vmw_res_cache_entry res_cache[vmw_res_max];
 	struct vmw_resource *last_query_ctx;
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
-	struct vmw_ctx_binding_state staged_bindings;
+	struct vmw_ctx_binding_state *staged_bindings;
+	bool staged_bindings_inuse;
 	struct list_head staged_cmd_res;
+	struct vmw_resource_val_node *dx_ctx_node;
+	struct vmw_dma_buffer *dx_query_mob;
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_cmdbuf_res_manager *man;
 };
 
 struct vmw_legacy_display;
@@ -358,8 +322,6 @@
 
 struct vmw_master {
 	struct ttm_lock lock;
-	struct mutex fb_surf_mutex;
-	struct list_head fb_surf;
 };
 
 struct vmw_vga_topology_state {
@@ -370,6 +332,26 @@
 	uint32_t pos_y;
 };
 
+
+/*
+ * struct vmw_otable - Guest Memory OBject table metadata
+ *
+ * @size:           Size of the table (page-aligned).
+ * @page_table:     Pointer to a struct vmw_mob holding the page table.
+ */
+struct vmw_otable {
+	unsigned long size;
+	struct vmw_mob *page_table;
+	bool enabled;
+};
+
+struct vmw_otable_batch {
+	unsigned num_otables;
+	struct vmw_otable *otables;
+	struct vmw_resource *context;
+	struct ttm_buffer_object *otable_bo;
+};
+
 struct vmw_private {
 	struct ttm_bo_device bdev;
 	struct ttm_bo_global_ref bo_global_ref;
@@ -387,9 +369,13 @@
 	uint32_t mmio_size;
 	uint32_t fb_max_width;
 	uint32_t fb_max_height;
+	uint32_t texture_max_width;
+	uint32_t texture_max_height;
+	uint32_t stdu_max_width;
+	uint32_t stdu_max_height;
 	uint32_t initial_width;
 	uint32_t initial_height;
-	__le32 __iomem *mmio_virt;
+	u32 __iomem *mmio_virt;
 	int mmio_mtrr;
 	uint32_t capabilities;
 	uint32_t max_gmr_ids;
@@ -401,6 +387,7 @@
 	bool has_mob;
 	spinlock_t hw_lock;
 	spinlock_t cap_lock;
+	bool has_dx;
 
 	/*
 	 * VGA registers.
@@ -420,6 +407,7 @@
 	 */
 
 	void *fb_info;
+	enum vmw_display_unit_type active_display_unit;
 	struct vmw_legacy_display *ldu_priv;
 	struct vmw_screen_object_display *sou_priv;
 	struct vmw_overlay *overlay_priv;
@@ -453,6 +441,8 @@
 	spinlock_t waiter_lock;
 	int fence_queue_waiters; /* Protected by waiter_lock */
 	int goal_queue_waiters; /* Protected by waiter_lock */
+	int cmdbuf_waiters; /* Protected by irq_lock */
+	int error_waiters; /* Protected by irq_lock */
 	atomic_t fifo_queue_waiters;
 	uint32_t last_read_seqno;
 	spinlock_t irq_lock;
@@ -484,6 +474,7 @@
 
 	bool stealth;
 	bool enable_fb;
+	spinlock_t svga_lock;
 
 	/**
 	 * Master management.
@@ -493,9 +484,10 @@
 	struct vmw_master fbdev_master;
 	struct notifier_block pm_nb;
 	bool suspended;
+	bool refuse_hibernation;
 
 	struct mutex release_mutex;
-	uint32_t num_3d_resources;
+	atomic_t num_fifo_resources;
 
 	/*
 	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
@@ -507,8 +499,8 @@
 	 * are protected by the cmdbuf mutex.
 	 */
 
-	struct ttm_buffer_object *dummy_query_bo;
-	struct ttm_buffer_object *pinned_bo;
+	struct vmw_dma_buffer *dummy_query_bo;
+	struct vmw_dma_buffer *pinned_bo;
 	uint32_t query_cid;
 	uint32_t query_cid_valid;
 	bool dummy_query_bo_pinned;
@@ -531,8 +523,9 @@
 	/*
 	 * Guest Backed stuff
 	 */
-	struct ttm_buffer_object *otable_bo;
-	struct vmw_otable *otables;
+	struct vmw_otable_batch otable_batch;
+
+	struct vmw_cmdbuf_man *cman;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -587,8 +580,9 @@
 	return val;
 }
 
-int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
-void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+extern void vmw_svga_enable(struct vmw_private *dev_priv);
+extern void vmw_svga_disable(struct vmw_private *dev_priv);
+
 
 /**
  * GMR utilities - vmwgfx_gmr.c
@@ -610,7 +604,8 @@
 extern struct vmw_resource *
 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
-extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+				bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
@@ -660,10 +655,14 @@
 				  uint32_t *inout_id,
 				  struct vmw_resource **out);
 extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   bool switch_backup,
 				   struct vmw_dma_buffer *new_backup,
 				   unsigned long new_backup_offset);
 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
 				     struct ttm_mem_reg *mem);
+extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem);
+extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
 				struct vmw_fence_obj *fence);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
@@ -671,25 +670,25 @@
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
  */
-extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
-				   struct vmw_dma_buffer *bo,
-				   struct ttm_placement *placement,
-				   bool interruptible);
-extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
-			      struct vmw_dma_buffer *buf,
-			      bool pin, bool interruptible);
-extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
-				     struct vmw_dma_buffer *buf,
-				     bool pin, bool interruptible);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
 				       struct vmw_dma_buffer *bo,
-				       bool pin, bool interruptible);
+				       struct ttm_placement *placement,
+				       bool interruptible);
+extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible);
+extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+					 struct vmw_dma_buffer *buf,
+					 bool interruptible);
+extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+					   struct vmw_dma_buffer *bo,
+					   bool interruptible);
 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
 			    struct vmw_dma_buffer *bo,
 			    bool interruptible);
 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
 				 SVGAGuestPtr *ptr);
-extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
+extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -717,7 +716,10 @@
 extern void vmw_fifo_release(struct vmw_private *dev_priv,
 			     struct vmw_fifo_state *fifo);
 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void *
+vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
 			       uint32_t *seqno);
 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
@@ -726,6 +728,8 @@
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
 				     uint32_t cid);
+extern int vmw_fifo_flush(struct vmw_private *dev_priv,
+			  bool interruptible);
 
 /**
  * TTM glue - vmwgfx_ttm_glue.c
@@ -750,6 +754,7 @@
 extern struct ttm_placement vmw_evictable_placement;
 extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_placement vmw_mob_placement;
+extern struct ttm_placement vmw_mob_ne_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -800,14 +805,15 @@
  * Command submission - vmwgfx_execbuf.c
  */
 
-extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+			     struct drm_file *file_priv, size_t size);
 extern int vmw_execbuf_process(struct drm_file *file_priv,
 			       struct vmw_private *dev_priv,
 			       void __user *user_commands,
 			       void *kernel_commands,
 			       uint32_t command_size,
 			       uint64_t throttle_us,
+			       uint32_t dx_context_handle,
 			       struct drm_vmw_fence_rep __user
 			       *user_fence_rep,
 			       struct vmw_fence_obj **out_fence);
@@ -826,6 +832,11 @@
 					*user_fence_rep,
 					struct vmw_fence_obj *fence,
 					uint32_t fence_handle);
+extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+				      struct ttm_buffer_object *bo,
+				      bool interruptible,
+				      bool validate_as_mob);
+
 
 /**
  * IRQs and wating - vmwgfx_irq.c
@@ -833,8 +844,8 @@
 
 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
-			     uint32_t seqno, bool interruptible,
-			     unsigned long timeout);
+			  uint32_t seqno, bool interruptible,
+			  unsigned long timeout);
 extern void vmw_irq_preinstall(struct drm_device *dev);
 extern int vmw_irq_postinstall(struct drm_device *dev);
 extern void vmw_irq_uninstall(struct drm_device *dev);
@@ -852,6 +863,10 @@
 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+				   int *waiter_count);
+extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+				      u32 flag, int *waiter_count);
 
 /**
  * Rudimentary fence-like objects currently used only for throttling -
@@ -861,9 +876,9 @@
 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
 extern int vmw_marker_push(struct vmw_marker_queue *queue,
-			  uint32_t seqno);
+			   uint32_t seqno);
 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
-			  uint32_t signaled_seqno);
+			   uint32_t signaled_seqno);
 extern int vmw_wait_lag(struct vmw_private *dev_priv,
 			struct vmw_marker_queue *queue, uint32_t us);
 
@@ -908,12 +923,6 @@
 		    uint32_t sid, int32_t destX, int32_t destY,
 		    struct drm_vmw_rect *clips,
 		    uint32_t num_clips);
-int vmw_kms_readback(struct vmw_private *dev_priv,
-		     struct drm_file *file_priv,
-		     struct vmw_framebuffer *vfb,
-		     struct drm_vmw_fence_rep __user *user_fence_rep,
-		     struct drm_vmw_rect *clips,
-		     uint32_t num_clips);
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 
@@ -927,6 +936,10 @@
 int vmw_dumb_destroy(struct drm_file *file_priv,
 		     struct drm_device *dev,
 		     uint32_t handle);
+extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
+extern void vmw_resource_unpin(struct vmw_resource *res);
+extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+
 /**
  * Overlay control - vmwgfx_overlay.c
  */
@@ -982,27 +995,33 @@
 
 extern const struct vmw_user_resource_conv *user_context_converter;
 
-extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
-
 extern int vmw_context_check(struct vmw_private *dev_priv,
 			     struct ttm_object_file *tfile,
 			     int id,
 			     struct vmw_resource **p_res);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
+extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+					     struct drm_file *file_priv);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
 				     struct drm_file *file_priv);
-extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
-				   const struct vmw_ctx_bindinfo *ci);
-extern void
-vmw_context_binding_state_transfer(struct vmw_resource *res,
-				   struct vmw_ctx_binding_state *cbs);
-extern void vmw_context_binding_res_list_kill(struct list_head *head);
-extern void vmw_context_binding_res_list_scrub(struct list_head *head);
-extern int vmw_context_rebind_all(struct vmw_resource *ctx);
 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 extern struct vmw_cmdbuf_res_manager *
 vmw_context_res_man(struct vmw_resource *ctx);
+extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+						SVGACOTableType cotable_type);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+struct vmw_ctx_binding_state;
+extern struct vmw_ctx_binding_state *
+vmw_context_binding_state(struct vmw_resource *ctx);
+extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+					  bool readback);
+extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+				     struct vmw_dma_buffer *mob);
+extern struct vmw_dma_buffer *
+vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
+
+
 /*
  * Surface management - vmwgfx_surface.c
  */
@@ -1025,6 +1044,16 @@
 			     uint32_t handle, int *id);
 extern int vmw_surface_validate(struct vmw_private *dev_priv,
 				struct vmw_surface *srf);
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       uint32_t svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       struct vmw_surface **srf_out);
 
 /*
  * Shader management - vmwgfx_shader.c
@@ -1042,12 +1071,21 @@
 				 SVGA3dShaderType shader_type,
 				 size_t size,
 				 struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-				    u32 user_key, SVGA3dShaderType shader_type,
-				    struct list_head *list);
+extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+			     u32 user_key, SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+			     struct vmw_resource *ctx,
+			     u32 user_key,
+			     SVGA3dShaderType shader_type,
+			     struct list_head *list);
+extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+					     struct list_head *list,
+					     bool readback);
+
 extern struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-			 u32 user_key, SVGA3dShaderType shader_type);
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key, SVGA3dShaderType shader_type);
 
 /*
  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
@@ -1071,7 +1109,48 @@
 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
 				 enum vmw_cmdbuf_res_type res_type,
 				 u32 user_key,
-				 struct list_head *list);
+				 struct list_head *list,
+				 struct vmw_resource **res);
+
+/*
+ * COTable management - vmwgfx_cotable.c
+ */
+extern const SVGACOTableType vmw_cotable_scrub_order[];
+extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
+					      struct vmw_resource *ctx,
+					      u32 type);
+extern int vmw_cotable_notify(struct vmw_resource *res, int id);
+extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
+extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
+				     struct list_head *head);
+
+/*
+ * Command buffer managerment vmwgfx_cmdbuf.c
+ */
+struct vmw_cmdbuf_man;
+struct vmw_cmdbuf_header;
+
+extern struct vmw_cmdbuf_man *
+vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
+extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
+				    size_t size, size_t default_size);
+extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
+extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
+extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
+			   unsigned long timeout);
+extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
+				int ctx_id, bool interruptible,
+				struct vmw_cmdbuf_header *header);
+extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
+			      struct vmw_cmdbuf_header *header,
+			      bool flush);
+extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
+extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
+			      size_t size, bool interruptible,
+			      struct vmw_cmdbuf_header **p_header);
+extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
+extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
+				bool interruptible);
 
 
 /**
@@ -1116,4 +1195,14 @@
 {
 	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
 }
+
+static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
+{
+	atomic_inc(&dev_priv->num_fifo_resources);
+}
+
+static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
+{
+	atomic_dec(&dev_priv->num_fifo_resources);
+}
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 97ad3bc..b565654 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,6 +29,8 @@
 #include "vmwgfx_reg.h"
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_HT_ORDER 12
 
@@ -59,8 +61,11 @@
  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  * @first_usage: Set to true the first time the resource is referenced in
  * the command stream.
- * @no_buffer_needed: Resources do not need to allocate buffer backup on
- * reservation. The command stream will provide one.
+ * @switching_backup: The command stream provides a new backup buffer for a
+ * resource.
+ * @no_buffer_needed: This means @switching_backup is true on first buffer
+ * reference. So resource reservation does not need to allocate a backup
+ * buffer for the resource.
  */
 struct vmw_resource_val_node {
 	struct list_head head;
@@ -69,8 +74,9 @@
 	struct vmw_dma_buffer *new_backup;
 	struct vmw_ctx_binding_state *staged_bindings;
 	unsigned long new_backup_offset;
-	bool first_usage;
-	bool no_buffer_needed;
+	u32 first_usage : 1;
+	u32 switching_backup : 1;
+	u32 no_buffer_needed : 1;
 };
 
 /**
@@ -92,22 +98,40 @@
 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 				       (_gb_disable), (_gb_enable)}
 
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx);
+static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGAMobId *id,
+				 struct vmw_dma_buffer **vmw_bo_p);
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+				   struct vmw_dma_buffer *vbo,
+				   bool validate_as_mob,
+				   uint32_t *p_val_node);
+
+
 /**
- * vmw_resource_unreserve - unreserve resources previously reserved for
+ * vmw_resources_unreserve - unreserve resources previously reserved for
  * command submission.
  *
- * @list_head: list of resources to unreserve.
+ * @sw_context: pointer to the software context
  * @backoff: Whether command submission failed.
  */
-static void vmw_resource_list_unreserve(struct list_head *list,
-					bool backoff)
+static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
+				    bool backoff)
 {
 	struct vmw_resource_val_node *val;
+	struct list_head *list = &sw_context->resource_list;
+
+	if (sw_context->dx_query_mob && !backoff)
+		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
+					  sw_context->dx_query_mob);
 
 	list_for_each_entry(val, list, head) {
 		struct vmw_resource *res = val->res;
-		struct vmw_dma_buffer *new_backup =
-			backoff ? NULL : val->new_backup;
+		bool switch_backup =
+			(backoff) ? false : val->switching_backup;
 
 		/*
 		 * Transfer staged context bindings to the
@@ -115,18 +139,71 @@
 		 */
 		if (unlikely(val->staged_bindings)) {
 			if (!backoff) {
-				vmw_context_binding_state_transfer
-					(val->res, val->staged_bindings);
+				vmw_binding_state_commit
+					(vmw_context_binding_state(val->res),
+					 val->staged_bindings);
 			}
-			kfree(val->staged_bindings);
+
+			if (val->staged_bindings != sw_context->staged_bindings)
+				vmw_binding_state_free(val->staged_bindings);
+			else
+				sw_context->staged_bindings_inuse = false;
 			val->staged_bindings = NULL;
 		}
-		vmw_resource_unreserve(res, new_backup,
-			val->new_backup_offset);
+		vmw_resource_unreserve(res, switch_backup, val->new_backup,
+				       val->new_backup_offset);
 		vmw_dmabuf_unreference(&val->new_backup);
 	}
 }
 
+/**
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
+ * added to the validate list.
+ *
+ * @dev_priv: Pointer to the device private:
+ * @sw_context: The validation context:
+ * @node: The validation node holding this context.
+ */
+static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   struct vmw_resource_val_node *node)
+{
+	int ret;
+
+	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	if (!sw_context->staged_bindings) {
+		sw_context->staged_bindings =
+			vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(sw_context->staged_bindings)) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			ret = PTR_ERR(sw_context->staged_bindings);
+			sw_context->staged_bindings = NULL;
+			goto out_err;
+		}
+	}
+
+	if (sw_context->staged_bindings_inuse) {
+		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
+		if (IS_ERR(node->staged_bindings)) {
+			DRM_ERROR("Failed to allocate context binding "
+				  "information.\n");
+			ret = PTR_ERR(node->staged_bindings);
+			node->staged_bindings = NULL;
+			goto out_err;
+		}
+	} else {
+		node->staged_bindings = sw_context->staged_bindings;
+		sw_context->staged_bindings_inuse = true;
+	}
+
+	return 0;
+out_err:
+	return ret;
+}
 
 /**
  * vmw_resource_val_add - Add a resource to the software context's
@@ -141,6 +218,7 @@
 				struct vmw_resource *res,
 				struct vmw_resource_val_node **p_node)
 {
+	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_resource_val_node *node;
 	struct drm_hash_item *hash;
 	int ret;
@@ -169,14 +247,90 @@
 		kfree(node);
 		return ret;
 	}
-	list_add_tail(&node->head, &sw_context->resource_list);
 	node->res = vmw_resource_reference(res);
 	node->first_usage = true;
-
 	if (unlikely(p_node != NULL))
 		*p_node = node;
 
-	return 0;
+	if (!dev_priv->has_mob) {
+		list_add_tail(&node->head, &sw_context->resource_list);
+		return 0;
+	}
+
+	switch (vmw_res_type(res)) {
+	case vmw_res_context:
+	case vmw_res_dx_context:
+		list_add(&node->head, &sw_context->ctx_resource_list);
+		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
+		break;
+	case vmw_res_cotable:
+		list_add_tail(&node->head, &sw_context->ctx_resource_list);
+		break;
+	default:
+		list_add_tail(&node->head, &sw_context->resource_list);
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to
+ * to the validation list
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view: Pointer to the view resource.
+ *
+ * Returns 0 if success, negative error code otherwise.
+ */
+static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *view)
+{
+	int ret;
+
+	/*
+	 * First add the resource the view is pointing to, otherwise
+	 * it may be swapped out when the view is validated.
+	 */
+	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
+	if (ret)
+		return ret;
+
+	return vmw_resource_val_add(sw_context, view, NULL);
+}
+
+/**
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's
+ * pointing to to the validation list.
+ *
+ * @sw_context: The software context holding the validation list.
+ * @view_type: The view type to look up.
+ * @id: view id of the view.
+ *
+ * The view is represented by a view id and the DX context it's created on,
+ * or scheduled for creation on. If there is no DX context set, the function
+ * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
+ */
+static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
+			       enum vmw_view_type view_type, u32 id)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	view = vmw_view_lookup(sw_context->man, view_type, id);
+	if (IS_ERR(view))
+		return PTR_ERR(view);
+
+	ret = vmw_view_res_val_add(sw_context, view);
+	vmw_resource_unreference(&view);
+
+	return ret;
 }
 
 /**
@@ -195,24 +349,56 @@
 					struct vmw_resource *ctx)
 {
 	struct list_head *binding_list;
-	struct vmw_ctx_binding *entry;
+	struct vmw_ctx_bindinfo *entry;
 	int ret = 0;
 	struct vmw_resource *res;
+	u32 i;
 
+	/* Add all cotables to the validation list. */
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+			res = vmw_context_cotable(ctx, i);
+			if (IS_ERR(res))
+				continue;
+
+			ret = vmw_resource_val_add(sw_context, res, NULL);
+			vmw_resource_unreference(&res);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+
+
+	/* Add all resources bound to the context to the validation list */
 	mutex_lock(&dev_priv->binding_mutex);
 	binding_list = vmw_context_binding_list(ctx);
 
 	list_for_each_entry(entry, binding_list, ctx_list) {
-		res = vmw_resource_reference_unless_doomed(entry->bi.res);
+		/* entry->res is not refcounted */
+		res = vmw_resource_reference_unless_doomed(entry->res);
 		if (unlikely(res == NULL))
 			continue;
 
-		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+		if (vmw_res_type(entry->res) == vmw_res_view)
+			ret = vmw_view_res_val_add(sw_context, entry->res);
+		else
+			ret = vmw_resource_val_add(sw_context, entry->res,
+						   NULL);
 		vmw_resource_unreference(&res);
 		if (unlikely(ret != 0))
 			break;
 	}
 
+	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
+		struct vmw_dma_buffer *dx_query_mob;
+
+		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
+		if (dx_query_mob)
+			ret = vmw_bo_to_validate_list(sw_context,
+						      dx_query_mob,
+						      true, NULL);
+	}
+
 	mutex_unlock(&dev_priv->binding_mutex);
 	return ret;
 }
@@ -308,7 +494,7 @@
  * submission is reached.
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-				   struct ttm_buffer_object *bo,
+				   struct vmw_dma_buffer *vbo,
 				   bool validate_as_mob,
 				   uint32_t *p_val_node)
 {
@@ -318,7 +504,7 @@
 	struct drm_hash_item *hash;
 	int ret;
 
-	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
 				    &hash) == 0)) {
 		vval_buf = container_of(hash, struct vmw_validate_buffer,
 					hash);
@@ -336,7 +522,7 @@
 			return -EINVAL;
 		}
 		vval_buf = &sw_context->val_bufs[val_node];
-		vval_buf->hash.key = (unsigned long) bo;
+		vval_buf->hash.key = (unsigned long) vbo;
 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Failed to initialize a buffer validation "
@@ -345,7 +531,7 @@
 		}
 		++sw_context->cur_val_buf;
 		val_buf = &vval_buf->base;
-		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->bo = ttm_bo_reference(&vbo->base);
 		val_buf->shared = false;
 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 		vval_buf->validate_as_mob = validate_as_mob;
@@ -370,27 +556,39 @@
 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
 	struct vmw_resource_val_node *val;
-	int ret;
+	int ret = 0;
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
 		struct vmw_resource *res = val->res;
 
-		ret = vmw_resource_reserve(res, val->no_buffer_needed);
+		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
 		if (unlikely(ret != 0))
 			return ret;
 
 		if (res->backup) {
-			struct ttm_buffer_object *bo = &res->backup->base;
+			struct vmw_dma_buffer *vbo = res->backup;
 
 			ret = vmw_bo_to_validate_list
-				(sw_context, bo,
+				(sw_context, vbo,
 				 vmw_resource_needs_backup(res), NULL);
 
 			if (unlikely(ret != 0))
 				return ret;
 		}
 	}
-	return 0;
+
+	if (sw_context->dx_query_mob) {
+		struct vmw_dma_buffer *expected_dx_query_mob;
+
+		expected_dx_query_mob =
+			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
+		if (expected_dx_query_mob &&
+		    expected_dx_query_mob != sw_context->dx_query_mob) {
+			ret = -EINVAL;
+		}
+	}
+
+	return ret;
 }
 
 /**
@@ -409,6 +607,7 @@
 
 	list_for_each_entry(val, &sw_context->resource_list, head) {
 		struct vmw_resource *res = val->res;
+		struct vmw_dma_buffer *backup = res->backup;
 
 		ret = vmw_resource_validate(res);
 		if (unlikely(ret != 0)) {
@@ -416,18 +615,29 @@
 				DRM_ERROR("Failed to validate resource.\n");
 			return ret;
 		}
+
+		/* Check if the resource switched backup buffer */
+		if (backup && res->backup && (backup != res->backup)) {
+			struct vmw_dma_buffer *vbo = res->backup;
+
+			ret = vmw_bo_to_validate_list
+				(sw_context, vbo,
+				 vmw_resource_needs_backup(res), NULL);
+			if (ret) {
+				ttm_bo_unreserve(&vbo->base);
+				return ret;
+			}
+		}
 	}
 	return 0;
 }
 
-
 /**
  * vmw_cmd_res_reloc_add - Add a resource to a software context's
  * relocation- and validation lists.
  *
  * @dev_priv: Pointer to a struct vmw_private identifying the device.
  * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
  * @id_loc: Pointer to where the id that needs translation is located.
  * @res: Valid pointer to a struct vmw_resource.
  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
@@ -435,7 +645,6 @@
  */
 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 				 struct vmw_sw_context *sw_context,
-				 enum vmw_res_type res_type,
 				 uint32_t *id_loc,
 				 struct vmw_resource *res,
 				 struct vmw_resource_val_node **p_val)
@@ -454,29 +663,6 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	if (res_type == vmw_res_context && dev_priv->has_mob &&
-	    node->first_usage) {
-
-		/*
-		 * Put contexts first on the list to be able to exit
-		 * list traversal for contexts early.
-		 */
-		list_del(&node->head);
-		list_add(&node->head, &sw_context->resource_list);
-
-		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
-		if (unlikely(ret != 0))
-			return ret;
-		node->staged_bindings =
-			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
-		if (node->staged_bindings == NULL) {
-			DRM_ERROR("Failed to allocate context binding "
-				  "information.\n");
-			return -ENOMEM;
-		}
-		INIT_LIST_HEAD(&node->staged_bindings->list);
-	}
-
 	if (p_val)
 		*p_val = node;
 
@@ -554,7 +740,7 @@
 	rcache->res = res;
 	rcache->handle = *id_loc;
 
-	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
 				    res, &node);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
@@ -573,6 +759,46 @@
 }
 
 /**
+ * vmw_rebind_dx_query - Rebind DX query associated with the context
+ *
+ * @ctx_res: context the query belongs to
+ *
+ * This function assumes binding_mutex is held.
+ */
+static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
+{
+	struct vmw_private *dev_priv = ctx_res->dev_priv;
+	struct vmw_dma_buffer *dx_query_mob;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindAllQuery body;
+	} *cmd;
+
+
+	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
+
+	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
+
+	if (cmd == NULL) {
+		DRM_ERROR("Failed to rebind queries.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = ctx_res->id;
+	cmd->body.mobid = dx_query_mob->base.mem.start;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
+
+	return 0;
+}
+
+/**
  * vmw_rebind_contexts - Rebind all resources previously bound to
  * referenced contexts.
  *
@@ -589,12 +815,80 @@
 		if (unlikely(!val->staged_bindings))
 			break;
 
-		ret = vmw_context_rebind_all(val->res);
+		ret = vmw_binding_rebind_all
+			(vmw_context_binding_state(val->res));
 		if (unlikely(ret != 0)) {
 			if (ret != -ERESTARTSYS)
 				DRM_ERROR("Failed to rebind context.\n");
 			return ret;
 		}
+
+		ret = vmw_rebind_all_dx_query(val->res);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_view_bindings_add - Add an array of view bindings to a context
+ * binding state tracker.
+ *
+ * @sw_context: The execbuf state used for this command.
+ * @view_type: View type for the bindings.
+ * @binding_type: Binding type for the bindings.
+ * @shader_slot: The shader slot to user for the bindings.
+ * @view_ids: Array of view ids to be bound.
+ * @num_views: Number of view ids in @view_ids.
+ * @first_slot: The binding slot to be used for the first view id in @view_ids.
+ */
+static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
+				 enum vmw_view_type view_type,
+				 enum vmw_ctx_binding_type binding_type,
+				 uint32 shader_slot,
+				 uint32 view_ids[], u32 num_views,
+				 u32 first_slot)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_cmdbuf_res_manager *man;
+	u32 i;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	man = sw_context->man;
+	for (i = 0; i < num_views; ++i) {
+		struct vmw_ctx_bindinfo_view binding;
+		struct vmw_resource *view = NULL;
+
+		if (view_ids[i] != SVGA3D_INVALID_ID) {
+			view = vmw_view_lookup(man, view_type, view_ids[i]);
+			if (IS_ERR(view)) {
+				DRM_ERROR("View not found.\n");
+				return PTR_ERR(view);
+			}
+
+			ret = vmw_view_res_val_add(sw_context, view);
+			if (ret) {
+				DRM_ERROR("Could not add view to "
+					  "validation list.\n");
+				vmw_resource_unreference(&view);
+				return ret;
+			}
+		}
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = view;
+		binding.bi.bt = binding_type;
+		binding.shader_slot = shader_slot;
+		binding.slot = first_slot + i;
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				shader_slot, binding.slot);
+		if (view)
+			vmw_resource_unreference(&view);
 	}
 
 	return 0;
@@ -638,6 +932,12 @@
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 
+	if (cmd->body.type >= SVGA3D_RT_MAX) {
+		DRM_ERROR("Illegal render target type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
 				&ctx_node);
@@ -651,13 +951,14 @@
 		return ret;
 
 	if (dev_priv->has_mob) {
-		struct vmw_ctx_bindinfo bi;
+		struct vmw_ctx_bindinfo_view binding;
 
-		bi.ctx = ctx_node->res;
-		bi.res = res_node ? res_node->res : NULL;
-		bi.bt = vmw_ctx_binding_rt;
-		bi.i1.rt_type = cmd->body.type;
-		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = res_node ? res_node->res : NULL;
+		binding.bi.bt = vmw_ctx_binding_rt;
+		binding.slot = cmd->body.type;
+		vmw_binding_add(ctx_node->staged_bindings,
+				&binding.bi, 0, binding.slot);
 	}
 
 	return 0;
@@ -674,16 +975,62 @@
 	int ret;
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				user_surface_converter,
 				&cmd->body.src.sid, NULL);
-	if (unlikely(ret != 0))
+	if (ret)
 		return ret;
+
 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 				 user_surface_converter,
 				 &cmd->body.dest.sid, NULL);
 }
 
+static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBufferCopy body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest, NULL);
+}
+
+static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXPredCopyRegion body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.srcSid, NULL);
+	if (ret != 0)
+		return ret;
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dstSid, NULL);
+}
+
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 				     struct vmw_sw_context *sw_context,
 				     SVGA3dCmdHeader *header)
@@ -752,7 +1099,7 @@
  * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-				       struct ttm_buffer_object *new_query_bo,
+				       struct vmw_dma_buffer *new_query_bo,
 				       struct vmw_sw_context *sw_context)
 {
 	struct vmw_res_cache_entry *ctx_entry =
@@ -764,7 +1111,7 @@
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-		if (unlikely(new_query_bo->num_pages > 4)) {
+		if (unlikely(new_query_bo->base.num_pages > 4)) {
 			DRM_ERROR("Query buffer too large.\n");
 			return -EINVAL;
 		}
@@ -833,12 +1180,12 @@
 
 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 		if (dev_priv->pinned_bo) {
-			vmw_bo_pin(dev_priv->pinned_bo, false);
-			ttm_bo_unref(&dev_priv->pinned_bo);
+			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
 		}
 
 		if (!sw_context->needs_post_query_barrier) {
-			vmw_bo_pin(sw_context->cur_query_bo, true);
+			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
 
 			/*
 			 * We pin also the dummy_query_bo buffer so that we
@@ -846,14 +1193,17 @@
 			 * dummy queries in context destroy paths.
 			 */
 
-			vmw_bo_pin(dev_priv->dummy_query_bo, true);
-			dev_priv->dummy_query_bo_pinned = true;
+			if (!dev_priv->dummy_query_bo_pinned) {
+				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
+						    true);
+				dev_priv->dummy_query_bo_pinned = true;
+			}
 
 			BUG_ON(sw_context->last_query_ctx == NULL);
 			dev_priv->query_cid = sw_context->last_query_ctx->id;
 			dev_priv->query_cid_valid = true;
 			dev_priv->pinned_bo =
-				ttm_bo_reference(sw_context->cur_query_bo);
+				vmw_dmabuf_reference(sw_context->cur_query_bo);
 		}
 	}
 }
@@ -882,7 +1232,6 @@
 				 struct vmw_dma_buffer **vmw_bo_p)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	uint32_t handle = *id;
 	struct vmw_relocation *reloc;
 	int ret;
@@ -893,7 +1242,6 @@
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
-	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 		DRM_ERROR("Max number relocations per submission"
@@ -906,7 +1254,7 @@
 	reloc->mob_loc = id;
 	reloc->location = NULL;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -944,7 +1292,6 @@
 				   struct vmw_dma_buffer **vmw_bo_p)
 {
 	struct vmw_dma_buffer *vmw_bo = NULL;
-	struct ttm_buffer_object *bo;
 	uint32_t handle = ptr->gmrId;
 	struct vmw_relocation *reloc;
 	int ret;
@@ -955,7 +1302,6 @@
 		ret = -EINVAL;
 		goto out_no_reloc;
 	}
-	bo = &vmw_bo->base;
 
 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 		DRM_ERROR("Max number relocations per submission"
@@ -967,7 +1313,7 @@
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc->location = ptr;
 
-	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
+	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -980,6 +1326,98 @@
 	return ret;
 }
 
+
+
+/**
+ * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * This function adds the new query into the query COTABLE
+ */
+static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGA3dCmdHeader *header)
+{
+	struct vmw_dx_define_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineQuery q;
+	} *cmd;
+
+	int    ret;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *cotable_res;
+
+
+	if (ctx_node == NULL) {
+		DRM_ERROR("DX Context not set for query.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+
+	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
+	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+		return -EINVAL;
+
+	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
+	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+	vmw_resource_unreference(&cotable_res);
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * The query bind operation will eventually associate the query ID
+ * with its backing MOB.  In this function, we take the user mode
+ * MOB ID and use vmw_translate_mob_ptr() to translate it to its
+ * kernel mode equivalent.
+ */
+static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_dx_bind_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindQuery q;
+	} *cmd;
+
+	struct vmw_dma_buffer *vmw_bo;
+	int    ret;
+
+
+	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+
+	/*
+	 * Look up the buffer pointed to by q.mobid, put it on the relocation
+	 * list so its kernel mode MOB ID can be filled in later
+	 */
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+				    &vmw_bo);
+
+	if (ret != 0)
+		return ret;
+
+	sw_context->dx_query_mob = vmw_bo;
+	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+
+	return ret;
+}
+
+
+
 /**
  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
  *
@@ -1074,7 +1512,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
@@ -1128,7 +1566,7 @@
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
@@ -1363,6 +1801,12 @@
 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 			continue;
 
+		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
+			DRM_ERROR("Illegal texture/sampler unit %u.\n",
+				  (unsigned) cur_state->stage);
+			return -EINVAL;
+		}
+
 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 					user_surface_converter,
 					&cur_state->value, &res_node);
@@ -1370,14 +1814,14 @@
 			return ret;
 
 		if (dev_priv->has_mob) {
-			struct vmw_ctx_bindinfo bi;
+			struct vmw_ctx_bindinfo_tex binding;
 
-			bi.ctx = ctx_node->res;
-			bi.res = res_node ? res_node->res : NULL;
-			bi.bt = vmw_ctx_binding_tex;
-			bi.i1.texture_stage = cur_state->stage;
-			vmw_context_binding_add(ctx_node->staged_bindings,
-						&bi);
+			binding.bi.ctx = ctx_node->res;
+			binding.bi.res = res_node ? res_node->res : NULL;
+			binding.bi.bt = vmw_ctx_binding_tex;
+			binding.texture_stage = cur_state->stage;
+			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+					0, binding.texture_stage);
 		}
 	}
 
@@ -1407,6 +1851,47 @@
 	return ret;
 }
 
+
+/**
+ * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
+ * switching
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @val_node: The validation node representing the resource.
+ * @buf_id: Pointer to the user-space backup buffer handle in the command
+ * stream.
+ * @backup_offset: Offset of backup into MOB.
+ *
+ * This function prepares for registering a switch of backup buffers
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
+ */
+static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     struct vmw_resource_val_node *val_node,
+				     uint32_t *buf_id,
+				     unsigned long backup_offset)
+{
+	struct vmw_dma_buffer *dma_buf;
+	int ret;
+
+	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
+	if (ret)
+		return ret;
+
+	val_node->switching_backup = true;
+	if (val_node->first_usage)
+		val_node->no_buffer_needed = true;
+
+	vmw_dmabuf_unreference(&val_node->new_backup);
+	val_node->new_backup = dma_buf;
+	val_node->new_backup_offset = backup_offset;
+
+	return 0;
+}
+
+
 /**
  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
  *
@@ -1420,7 +1905,8 @@
  * @backup_offset: Offset of backup into MOB.
  *
  * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving.
+ * in the resource metadata just prior to unreserving. It's basically a wrapper
+ * around vmw_cmd_res_switch_backup with a different interface.
  */
 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
 				 struct vmw_sw_context *sw_context,
@@ -1431,27 +1917,16 @@
 				 uint32_t *buf_id,
 				 unsigned long backup_offset)
 {
-	int ret;
-	struct vmw_dma_buffer *dma_buf;
 	struct vmw_resource_val_node *val_node;
+	int ret;
 
 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
 				converter, res_id, &val_node);
-	if (unlikely(ret != 0))
+	if (ret)
 		return ret;
 
-	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
-	if (unlikely(ret != 0))
-		return ret;
-
-	if (val_node->first_usage)
-		val_node->no_buffer_needed = true;
-
-	vmw_dmabuf_unreference(&val_node->new_backup);
-	val_node->new_backup = dma_buf;
-	val_node->new_backup_offset = backup_offset;
-
-	return 0;
+	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
+					 buf_id, backup_offset);
 }
 
 /**
@@ -1703,10 +2178,10 @@
 	if (unlikely(!dev_priv->has_mob))
 		return 0;
 
-	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
-				       cmd->body.shid,
-				       cmd->body.type,
-				       &sw_context->staged_cmd_res);
+	ret = vmw_shader_remove(vmw_context_res_man(val->res),
+				cmd->body.shid,
+				cmd->body.type,
+				&sw_context->staged_cmd_res);
 	if (unlikely(ret != 0))
 		return ret;
 
@@ -1734,13 +2209,19 @@
 		SVGA3dCmdSetShader body;
 	} *cmd;
 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
-	struct vmw_ctx_bindinfo bi;
+	struct vmw_ctx_bindinfo_shader binding;
 	struct vmw_resource *res = NULL;
 	int ret;
 
 	cmd = container_of(header, struct vmw_set_shader_cmd,
 			   header);
 
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+		DRM_ERROR("Illegal shader type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 				user_context_converter, &cmd->body.cid,
 				&ctx_node);
@@ -1751,14 +2232,12 @@
 		return 0;
 
 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
-		res = vmw_compat_shader_lookup
-			(vmw_context_res_man(ctx_node->res),
-			 cmd->body.shid,
-			 cmd->body.type);
+		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+					cmd->body.shid,
+					cmd->body.type);
 
 		if (!IS_ERR(res)) {
 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
-						    vmw_res_shader,
 						    &cmd->body.shid, res,
 						    &res_node);
 			vmw_resource_unreference(&res);
@@ -1776,11 +2255,13 @@
 			return ret;
 	}
 
-	bi.ctx = ctx_node->res;
-	bi.res = res_node ? res_node->res : NULL;
-	bi.bt = vmw_ctx_binding_shader;
-	bi.i1.shader_type = cmd->body.type;
-	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res_node ? res_node->res : NULL;
+	binding.bi.bt = vmw_ctx_binding_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, 0);
+	return 0;
 }
 
 /**
@@ -1842,6 +2323,690 @@
 				     cmd->body.offsetInBytes);
 }
 
+/**
+ * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int
+vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSingleConstantBuffer body;
+	} *cmd;
+	struct vmw_resource_val_node *res_node = NULL;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_cb binding;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res_node ? res_node->res : NULL;
+	binding.bi.bt = vmw_ctx_binding_cb;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+	binding.offset = cmd->body.offsetInBytes;
+	binding.size = cmd->body.sizeInBytes;
+	binding.slot = cmd->body.slot;
+
+	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
+	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
+			  (unsigned) cmd->body.type,
+			  (unsigned) binding.slot);
+		return -EINVAL;
+	}
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, binding.slot);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_shader_res - Validate an
+ * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShaderResources body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dShaderResourceViewId);
+
+	if ((u64) cmd->body.startView + (u64) num_sr_view >
+	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
+	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		DRM_ERROR("Invalid shader binding.\n");
+		return -EINVAL;
+	}
+
+	return vmw_view_bindings_add(sw_context, vmw_view_sr,
+				     vmw_ctx_binding_sr,
+				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
+				     (void *) &cmd[1], num_sr_view,
+				     cmd->body.startView);
+}
+
+/**
+ * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetShader body;
+	} *cmd;
+	struct vmw_resource *res = NULL;
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_shader binding;
+	int ret = 0;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+		DRM_ERROR("Illegal shader type %u.\n",
+			  (unsigned) cmd->body.type);
+		return -EINVAL;
+	}
+
+	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
+		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
+		if (IS_ERR(res)) {
+			DRM_ERROR("Could not find shader for binding.\n");
+			return PTR_ERR(res);
+		}
+
+		ret = vmw_resource_val_add(sw_context, res, NULL);
+		if (ret)
+			goto out_unref;
+	}
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = res;
+	binding.bi.bt = vmw_ctx_binding_dx_shader;
+	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+			binding.shader_slot, 0);
+out_unref:
+	if (res)
+		vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_set_vertex_buffers - Validates an
+ * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_vb binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetVertexBuffers body;
+		SVGA3dVertexBuffer buf[];
+	} *cmd;
+	int i, ret, num;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dVertexBuffer);
+	if ((u64)num + (u64)cmd->body.startBuffer >
+	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
+		DRM_ERROR("Invalid number of vertex buffers.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cmd->buf[i].sid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.bt = vmw_ctx_binding_vb;
+		binding.bi.res = ((res_node) ? res_node->res : NULL);
+		binding.offset = cmd->buf[i].offset;
+		binding.stride = cmd->buf[i].stride;
+		binding.slot = i + cmd->body.startBuffer;
+
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				0, binding.slot);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
+				       struct vmw_sw_context *sw_context,
+				       SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_ib binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetIndexBuffer body;
+	} *cmd;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.sid, &res_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	binding.bi.ctx = ctx_node->res;
+	binding.bi.res = ((res_node) ? res_node->res : NULL);
+	binding.bi.bt = vmw_ctx_binding_ib;
+	binding.offset = cmd->body.offset;
+	binding.format = cmd->body.format;
+
+	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_set_rendertarget - Validate an
+ * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetRenderTargets body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dRenderTargetViewId);
+
+	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
+		DRM_ERROR("Invalid DX Rendertarget binding.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
+				    vmw_ctx_binding_ds, 0,
+				    &cmd->body.depthStencilViewId, 1, 0);
+	if (ret)
+		return ret;
+
+	return vmw_view_bindings_add(sw_context, vmw_view_rt,
+				     vmw_ctx_binding_dx_rt, 0,
+				     (void *)&cmd[1], num_rt_view, 0);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearRenderTargetView body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_rt,
+				   cmd->body.renderTargetViewId);
+}
+
+/**
+ * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
+					      struct vmw_sw_context *sw_context,
+					      SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXClearDepthStencilView body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_view_id_val_add(sw_context, vmw_view_ds,
+				   cmd->body.depthStencilViewId);
+}
+
+static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource_val_node *srf_node;
+	struct vmw_resource *res;
+	enum vmw_view_type view_type;
+	int ret;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+		uint32 sid;
+	} *cmd;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	view_type = vmw_view_cmd_to_type(header->id);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->sid, &srf_node);
+	if (unlikely(ret != 0))
+		return ret;
+
+	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	vmw_resource_unreference(&res);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_view_add(sw_context->man,
+			    ctx_node->res,
+			    srf_node->res,
+			    view_type,
+			    cmd->defined_id,
+			    header,
+			    header->size + sizeof(*header),
+			    &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_set_so_targets - Validate an
+ * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_ctx_bindinfo_so binding;
+	struct vmw_resource_val_node *res_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXSetSOTargets body;
+		SVGA3dSoTarget targets[];
+	} *cmd;
+	int i, ret, num;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	cmd = container_of(header, typeof(*cmd), header);
+	num = (cmd->header.size - sizeof(cmd->body)) /
+		sizeof(SVGA3dSoTarget);
+
+	if (num > SVGA3D_DX_MAX_SOTARGETS) {
+		DRM_ERROR("Invalid DX SO binding.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; i++) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cmd->targets[i].sid, &res_node);
+		if (unlikely(ret != 0))
+			return ret;
+
+		binding.bi.ctx = ctx_node->res;
+		binding.bi.res = ((res_node) ? res_node->res : NULL);
+		binding.bi.bt = vmw_ctx_binding_so,
+		binding.offset = cmd->targets[i].offset;
+		binding.size = cmd->targets[i].sizeInBytes;
+		binding.slot = i;
+
+		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
+				0, binding.slot);
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	/*
+	 * This is based on the fact that all affected define commands have
+	 * the same initial command body layout.
+	 */
+	struct {
+		SVGA3dCmdHeader header;
+		uint32 defined_id;
+	} *cmd;
+	enum vmw_so_type so_type;
+	int ret;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	so_type = vmw_so_cmd_to_type(header->id);
+	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
+	cmd = container_of(header, typeof(*cmd), header);
+	ret = vmw_cotable_notify(res, cmd->defined_id);
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_check_subresource - Validate an
+ * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					SVGA3dCmdHeader *header)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		union {
+			SVGA3dCmdDXReadbackSubResource r_body;
+			SVGA3dCmdDXInvalidateSubResource i_body;
+			SVGA3dCmdDXUpdateSubResource u_body;
+			SVGA3dSurfaceId sid;
+		};
+	} *cmd;
+
+	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
+		     offsetof(typeof(*cmd), sid));
+
+	cmd = container_of(header, typeof(*cmd), header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->sid, NULL);
+}
+
+static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+
+	if (unlikely(ctx_node == NULL)) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_dx_view_remove - validate a view remove command and
+ * schedule the view resource for removal.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ *
+ * Check that the view exists, and if it was not created using this
+ * command batch, make sure it's validated (present in the device) so that
+ * the remove command will not confuse the device.
+ */
+static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
+	struct vmw_resource *view;
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_view_remove(sw_context->man,
+			      cmd->body.view_id, view_type,
+			      &sw_context->staged_cmd_res,
+			      &view);
+	if (ret || !view)
+		return ret;
+
+	/*
+	 * Add view to the validate list iff it was not created using this
+	 * command batch.
+	 */
+	return vmw_view_res_val_add(sw_context, view);
+}
+
+/**
+ * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDefineShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
+	ret = vmw_cotable_notify(res, cmd->body.shaderId);
+	vmw_resource_unreference(&res);
+	if (ret)
+		return ret;
+
+	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
+				 cmd->body.shaderId, cmd->body.type,
+				 &sw_context->staged_cmd_res);
+}
+
+/**
+ * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXDestroyShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (!ctx_node) {
+		DRM_ERROR("DX Context not set.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
+				&sw_context->staged_cmd_res);
+	if (ret)
+		DRM_ERROR("Could not find shader to remove.\n");
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource_val_node *res_node;
+	struct vmw_resource *res;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd = container_of(header, typeof(*cmd), header);
+	int ret;
+
+	if (cmd->body.cid != SVGA3D_INVALID_ID) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+					user_context_converter,
+					&cmd->body.cid, &ctx_node);
+		if (ret)
+			return ret;
+	} else {
+		ctx_node = sw_context->dx_ctx_node;
+		if (!ctx_node) {
+			DRM_ERROR("DX Context not set.\n");
+			return -EINVAL;
+		}
+	}
+
+	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
+				cmd->body.shid, 0);
+	if (IS_ERR(res)) {
+		DRM_ERROR("Could not find shader to bind.\n");
+		return PTR_ERR(res);
+	}
+
+	ret = vmw_resource_val_add(sw_context, res, &res_node);
+	if (ret) {
+		DRM_ERROR("Error creating resource validation node.\n");
+		goto out_unref;
+	}
+
+
+	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
+					&cmd->body.mobid,
+					cmd->body.offsetInBytes);
+out_unref:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 				struct vmw_sw_context *sw_context,
 				void *buf, uint32_t *size)
@@ -1849,7 +3014,7 @@
 	uint32_t size_remaining = *size;
 	uint32_t cmd_id;
 
-	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	cmd_id = ((uint32_t *)buf)[0];
 	switch (cmd_id) {
 	case SVGA_CMD_UPDATE:
 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -1980,7 +3145,7 @@
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
 		    false, false, true),
-	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
+	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
 		    false, false, true),
@@ -2051,7 +3216,147 @@
 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
 		    false, false, true),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
-		    true, false, true)
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
+		    false, false, true),
+
+	/*
+	 * DX commands
+	 */
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
+		    false, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
+		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
+		    &vmw_cmd_dx_set_shader_res, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
+		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
+		    &vmw_cmd_dx_set_index_buffer, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
+		    &vmw_cmd_dx_set_rendertargets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
+		    &vmw_cmd_ok, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
+		    &vmw_cmd_dx_check_subresource, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+		    &vmw_cmd_dx_view_remove, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
+		    &vmw_cmd_dx_define_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
+		    &vmw_cmd_dx_destroy_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
+		    &vmw_cmd_dx_bind_shader, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
+		    &vmw_cmd_dx_so_define, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
+		    true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
+		    &vmw_cmd_dx_set_so_targets, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
+		    &vmw_cmd_dx_cid_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
+		    &vmw_cmd_buffer_copy_check, true, false, true),
+	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
+		    &vmw_cmd_pred_copy_check, true, false, true),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -2065,14 +3370,14 @@
 	const struct vmw_cmd_entry *entry;
 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
 
-	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	cmd_id = ((uint32_t *)buf)[0];
 	/* Handle any none 3D commands */
 	if (unlikely(cmd_id < SVGA_CMD_MAX))
 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 
 
-	cmd_id = le32_to_cpu(header->id);
-	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+	cmd_id = header->id;
+	*size = header->size + sizeof(SVGA3dCmdHeader);
 
 	cmd_id -= SVGA_3D_CMD_BASE;
 	if (unlikely(*size > size_remaining))
@@ -2184,7 +3489,8 @@
  *
  * @list: The resource list.
  */
-static void vmw_resource_list_unreference(struct list_head *list)
+static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
+					  struct list_head *list)
 {
 	struct vmw_resource_val_node *val, *val_next;
 
@@ -2195,8 +3501,15 @@
 	list_for_each_entry_safe(val, val_next, list, head) {
 		list_del_init(&val->head);
 		vmw_resource_unreference(&val->res);
-		if (unlikely(val->staged_bindings))
-			kfree(val->staged_bindings);
+
+		if (val->staged_bindings) {
+			if (val->staged_bindings != sw_context->staged_bindings)
+				vmw_binding_state_free(val->staged_bindings);
+			else
+				sw_context->staged_bindings_inuse = false;
+			val->staged_bindings = NULL;
+		}
+
 		kfree(val);
 	}
 }
@@ -2222,24 +3535,21 @@
 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
-static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
-				      struct ttm_buffer_object *bo,
-				      bool validate_as_mob)
+int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+			       struct ttm_buffer_object *bo,
+			       bool interruptible,
+			       bool validate_as_mob)
 {
+	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
+						  base);
 	int ret;
 
-
-	/*
-	 * Don't validate pinned buffers.
-	 */
-
-	if (bo == dev_priv->pinned_bo ||
-	    (bo == dev_priv->dummy_query_bo &&
-	     dev_priv->dummy_query_bo_pinned))
+	if (vbo->pin_count > 0)
 		return 0;
 
 	if (validate_as_mob)
-		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
+		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
+				       false);
 
 	/**
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -2248,7 +3558,8 @@
 	 * used as a GMR, this will return -ENOMEM.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
+			      false);
 	if (likely(ret == 0 || ret == -ERESTARTSYS))
 		return ret;
 
@@ -2257,8 +3568,7 @@
 	 * previous contents.
 	 */
 
-	DRM_INFO("Falling through to VRAM.\n");
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 	return ret;
 }
 
@@ -2270,6 +3580,7 @@
 
 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
+						 true,
 						 entry->validate_as_mob);
 		if (unlikely(ret != 0))
 			return ret;
@@ -2417,7 +3728,164 @@
 	}
 }
 
+/**
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
+ * the fifo.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @kernel_commands: Pointer to the unpatched command batch.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command batch
+ * pointed to by @kernel_commands will have been modified.
+ */
+static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
+				   void *kernel_commands,
+				   u32 command_size,
+				   struct vmw_sw_context *sw_context)
+{
+	void *cmd;
 
+	if (sw_context->dx_ctx_node)
+		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+					  sw_context->dx_ctx_node->res->id);
+	else
+		cmd = vmw_fifo_reserve(dev_priv, command_size);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving fifo space for commands.\n");
+		return -ENOMEM;
+	}
+
+	vmw_apply_relocations(sw_context);
+	memcpy(cmd, kernel_commands, command_size);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_fifo_commit(dev_priv, command_size);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
+ * the command buffer manager.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @header: Opaque handle to the command buffer allocation.
+ * @command_size: Size of the unpatched command batch.
+ * @sw_context: Structure holding the relocation lists.
+ *
+ * Side effects: If this function returns 0, then the command buffer
+ * represented by @header will have been modified.
+ */
+static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
+				     struct vmw_cmdbuf_header *header,
+				     u32 command_size,
+				     struct vmw_sw_context *sw_context)
+{
+	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
+		  SVGA3D_INVALID_ID);
+	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
+				       id, false, header);
+
+	vmw_apply_relocations(sw_context);
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
+ * submission using a command buffer.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @user_commands: User-space pointer to the commands to be submitted.
+ * @command_size: Size of the unpatched command batch.
+ * @header: Out parameter returning the opaque pointer to the command buffer.
+ *
+ * This function checks whether we can use the command buffer manager for
+ * submission and if so, creates a command buffer of suitable size and
+ * copies the user data into that buffer.
+ *
+ * On successful return, the function returns a pointer to the data in the
+ * command buffer and *@header is set to non-NULL.
+ * If command buffers could not be used, the function will return the value
+ * of @kernel_commands on function call. That value may be NULL. In that case,
+ * the value of *@header will be set to NULL.
+ * If an error is encountered, the function will return a pointer error value.
+ * If the function is interrupted by a signal while sleeping, it will return
+ * -ERESTARTSYS casted to a pointer error value.
+ */
+static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
+				void __user *user_commands,
+				void *kernel_commands,
+				u32 command_size,
+				struct vmw_cmdbuf_header **header)
+{
+	size_t cmdbuf_size;
+	int ret;
+
+	*header = NULL;
+	if (!dev_priv->cman || kernel_commands)
+		return kernel_commands;
+
+	if (command_size > SVGA_CB_MAX_SIZE) {
+		DRM_ERROR("Command buffer is too large.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* If possible, add a little space for fencing. */
+	cmdbuf_size = command_size + 512;
+	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
+	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
+					   true, header);
+	if (IS_ERR(kernel_commands))
+		return kernel_commands;
+
+	ret = copy_from_user(kernel_commands, user_commands,
+			     command_size);
+	if (ret) {
+		DRM_ERROR("Failed copying commands.\n");
+		vmw_cmdbuf_header_free(*header);
+		*header = NULL;
+		return ERR_PTR(-EFAULT);
+	}
+
+	return kernel_commands;
+}
+
+static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   uint32_t handle)
+{
+	struct vmw_resource_val_node *ctx_node;
+	struct vmw_resource *res;
+	int ret;
+
+	if (handle == SVGA3D_INVALID_ID)
+		return 0;
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
+					      handle, user_context_converter,
+					      &res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
+			  (unsigned) handle);
+		return ret;
+	}
+
+	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	sw_context->dx_ctx_node = ctx_node;
+	sw_context->man = vmw_context_res_man(res);
+out_err:
+	vmw_resource_unreference(&res);
+	return ret;
+}
 
 int vmw_execbuf_process(struct drm_file *file_priv,
 			struct vmw_private *dev_priv,
@@ -2425,6 +3893,7 @@
 			void *kernel_commands,
 			uint32_t command_size,
 			uint64_t throttle_us,
+			uint32_t dx_context_handle,
 			struct drm_vmw_fence_rep __user *user_fence_rep,
 			struct vmw_fence_obj **out_fence)
 {
@@ -2432,18 +3901,33 @@
 	struct vmw_fence_obj *fence = NULL;
 	struct vmw_resource *error_resource;
 	struct list_head resource_list;
+	struct vmw_cmdbuf_header *header;
 	struct ww_acquire_ctx ticket;
 	uint32_t handle;
-	void *cmd;
 	int ret;
 
+	if (throttle_us) {
+		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+				   throttle_us);
+
+		if (ret)
+			return ret;
+	}
+
+	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
+					     kernel_commands, command_size,
+					     &header);
+	if (IS_ERR(kernel_commands))
+		return PTR_ERR(kernel_commands);
+
 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-	if (unlikely(ret != 0))
-		return -ERESTARTSYS;
+	if (ret) {
+		ret = -ERESTARTSYS;
+		goto out_free_header;
+	}
 
+	sw_context->kernel = false;
 	if (kernel_commands == NULL) {
-		sw_context->kernel = false;
-
 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
 		if (unlikely(ret != 0))
 			goto out_unlock;
@@ -2458,19 +3942,26 @@
 			goto out_unlock;
 		}
 		kernel_commands = sw_context->cmd_bounce;
-	} else
+	} else if (!header)
 		sw_context->kernel = true;
 
 	sw_context->fp = vmw_fpriv(file_priv);
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	INIT_LIST_HEAD(&sw_context->resource_list);
+	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
 	sw_context->cur_query_bo = dev_priv->pinned_bo;
 	sw_context->last_query_ctx = NULL;
 	sw_context->needs_post_query_barrier = false;
+	sw_context->dx_ctx_node = NULL;
+	sw_context->dx_query_mob = NULL;
+	sw_context->dx_query_ctx = NULL;
 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
 	INIT_LIST_HEAD(&sw_context->res_relocations);
+	if (sw_context->staged_bindings)
+		vmw_binding_state_reset(sw_context->staged_bindings);
+
 	if (!sw_context->res_ht_initialized) {
 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
 		if (unlikely(ret != 0))
@@ -2478,10 +3969,24 @@
 		sw_context->res_ht_initialized = true;
 	}
 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
-
 	INIT_LIST_HEAD(&resource_list);
+	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
+	if (unlikely(ret != 0)) {
+		list_splice_init(&sw_context->ctx_resource_list,
+				 &sw_context->resource_list);
+		goto out_err_nores;
+	}
+
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
+	/*
+	 * Merge the resource lists before checking the return status
+	 * from vmd_cmd_check_all so that all the open hashtabs will
+	 * be handled properly even if vmw_cmd_check_all fails.
+	 */
+	list_splice_init(&sw_context->ctx_resource_list,
+			 &sw_context->resource_list);
+
 	if (unlikely(ret != 0))
 		goto out_err_nores;
 
@@ -2502,14 +4007,6 @@
 	if (unlikely(ret != 0))
 		goto out_err;
 
-	if (throttle_us) {
-		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-				   throttle_us);
-
-		if (unlikely(ret != 0))
-			goto out_err;
-	}
-
 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
 	if (unlikely(ret != 0)) {
 		ret = -ERESTARTSYS;
@@ -2522,21 +4019,17 @@
 			goto out_unlock_binding;
 	}
 
-	cmd = vmw_fifo_reserve(dev_priv, command_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed reserving fifo space for commands.\n");
-		ret = -ENOMEM;
-		goto out_unlock_binding;
+	if (!header) {
+		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
+					      command_size, sw_context);
+	} else {
+		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
+						sw_context);
+		header = NULL;
 	}
-
-	vmw_apply_relocations(sw_context);
-	memcpy(cmd, kernel_commands, command_size);
-
-	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
-	vmw_resource_relocations_free(&sw_context->res_relocations);
-
-	vmw_fifo_commit(dev_priv, command_size);
 	mutex_unlock(&dev_priv->binding_mutex);
+	if (ret)
+		goto out_err;
 
 	vmw_query_bo_switch_commit(dev_priv, sw_context);
 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +4044,7 @@
 	if (ret != 0)
 		DRM_ERROR("Fence submission error. Syncing.\n");
 
-	vmw_resource_list_unreserve(&sw_context->resource_list, false);
+	vmw_resources_unreserve(sw_context, false);
 
 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
 				    (void *) fence);
@@ -2580,7 +4073,7 @@
 	 * Unreference resources outside of the cmdbuf_mutex to
 	 * avoid deadlocks in resource destruction paths.
 	 */
-	vmw_resource_list_unreference(&resource_list);
+	vmw_resource_list_unreference(sw_context, &resource_list);
 
 	return 0;
 
@@ -2589,7 +4082,7 @@
 out_err:
 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
 out_err_nores:
-	vmw_resource_list_unreserve(&sw_context->resource_list, true);
+	vmw_resources_unreserve(sw_context, true);
 	vmw_resource_relocations_free(&sw_context->res_relocations);
 	vmw_free_relocations(sw_context);
 	vmw_clear_validations(sw_context);
@@ -2607,9 +4100,12 @@
 	 * Unreference resources outside of the cmdbuf_mutex to
 	 * avoid deadlocks in resource destruction paths.
 	 */
-	vmw_resource_list_unreference(&resource_list);
+	vmw_resource_list_unreference(sw_context, &resource_list);
 	if (unlikely(error_resource != NULL))
 		vmw_resource_unreference(&error_resource);
+out_free_header:
+	if (header)
+		vmw_cmdbuf_header_free(header);
 
 	return ret;
 }
@@ -2628,9 +4124,11 @@
 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
 
 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
 }
 
 
@@ -2672,11 +4170,11 @@
 
 	INIT_LIST_HEAD(&validate_list);
 
-	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
 	pinned_val.shared = false;
 	list_add_tail(&pinned_val.head, &validate_list);
 
-	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
 	query_val.shared = false;
 	list_add_tail(&query_val.head, &validate_list);
 
@@ -2697,10 +4195,11 @@
 		dev_priv->query_cid_valid = false;
 	}
 
-	vmw_bo_pin(dev_priv->pinned_bo, false);
-	vmw_bo_pin(dev_priv->dummy_query_bo, false);
-	dev_priv->dummy_query_bo_pinned = false;
-
+	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
+	if (dev_priv->dummy_query_bo_pinned) {
+		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
+		dev_priv->dummy_query_bo_pinned = false;
+	}
 	if (fence == NULL) {
 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
 						  NULL);
@@ -2712,7 +4211,9 @@
 
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+	DRM_INFO("Dummy query bo pin count: %d\n",
+		 dev_priv->dummy_query_bo->pin_count);
 
 out_unlock:
 	return;
@@ -2722,7 +4223,7 @@
 out_no_reserve:
 	ttm_bo_unref(&query_val.bo);
 	ttm_bo_unref(&pinned_val.bo);
-	ttm_bo_unref(&dev_priv->pinned_bo);
+	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
 }
 
 /**
@@ -2751,36 +4252,68 @@
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
-
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
+		      struct drm_file *file_priv, size_t size)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+	struct drm_vmw_execbuf_arg arg;
 	int ret;
+	static const size_t copy_offset[] = {
+		offsetof(struct drm_vmw_execbuf_arg, context_handle),
+		sizeof(struct drm_vmw_execbuf_arg)};
+
+	if (unlikely(size < copy_offset[0])) {
+		DRM_ERROR("Invalid command size, ioctl %d\n",
+			  DRM_VMW_EXECBUF);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
+		return -EFAULT;
 
 	/*
-	 * This will allow us to extend the ioctl argument while
+	 * Extend the ioctl argument while
 	 * maintaining backwards compatibility:
 	 * We take different code paths depending on the value of
-	 * arg->version.
+	 * arg.version.
 	 */
 
-	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
+		     arg.version == 0)) {
 		DRM_ERROR("Incorrect execbuf version.\n");
-		DRM_ERROR("You're running outdated experimental "
-			  "vmwgfx user-space drivers.");
 		return -EINVAL;
 	}
 
+	if (arg.version > 1 &&
+	    copy_from_user(&arg.context_handle,
+			   (void __user *) (data + copy_offset[0]),
+			   copy_offset[arg.version - 1] -
+			   copy_offset[0]) != 0)
+		return -EFAULT;
+
+	switch (arg.version) {
+	case 1:
+		arg.context_handle = (uint32_t) -1;
+		break;
+	case 2:
+		if (arg.pad64 != 0) {
+			DRM_ERROR("Unused IOCTL data not set to zero.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
 		return ret;
 
 	ret = vmw_execbuf_process(file_priv, dev_priv,
-				  (void __user *)(unsigned long)arg->commands,
-				  NULL, arg->command_size, arg->throttle_us,
-				  (void __user *)(unsigned long)arg->fence_rep,
+				  (void __user *)(unsigned long)arg.commands,
+				  NULL, arg.command_size, arg.throttle_us,
+				  arg.context_handle,
+				  (void __user *)(unsigned long)arg.fence_rep,
 				  NULL);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 0a474f3..3b1faf7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -1,7 +1,7 @@
 /**************************************************************************
  *
  * Copyright © 2007 David Airlie
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,7 @@
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
+#include "vmwgfx_kms.h"
 
 #include <drm/ttm/ttm_placement.h>
 
@@ -40,21 +41,22 @@
 
 	void *vmalloc;
 
+	struct mutex bo_mutex;
 	struct vmw_dma_buffer *vmw_bo;
 	struct ttm_bo_kmap_obj map;
+	void *bo_ptr;
+	unsigned bo_size;
+	struct drm_framebuffer *set_fb;
+	struct drm_display_mode *set_mode;
+	u32 fb_x;
+	u32 fb_y;
+	bool bo_iowrite;
 
 	u32 pseudo_palette[17];
 
-	unsigned depth;
-	unsigned bpp;
-
 	unsigned max_width;
 	unsigned max_height;
 
-	void *bo_ptr;
-	unsigned bo_size;
-	bool bo_iowrite;
-
 	struct {
 		spinlock_t lock;
 		bool active;
@@ -63,6 +65,10 @@
 		unsigned x2;
 		unsigned y2;
 	} dirty;
+
+	struct drm_crtc *crtc;
+	struct drm_connector *con;
+	struct delayed_work local_work;
 };
 
 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -77,7 +83,7 @@
 		return 1;
 	}
 
-	switch (par->depth) {
+	switch (par->set_fb->depth) {
 	case 24:
 	case 32:
 		pal[regno] = ((red & 0xff00) << 8) |
@@ -85,7 +91,8 @@
 			     ((blue  & 0xff00) >> 8);
 		break;
 	default:
-		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+		DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth,
+			  par->set_fb->bits_per_pixel);
 		return 1;
 	}
 
@@ -134,12 +141,6 @@
 		return -EINVAL;
 	}
 
-	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
-	    (var->xoffset != 0 || var->yoffset != 0)) {
-		DRM_ERROR("Can not handle panning without display topology\n");
-		return -EINVAL;
-	}
-
 	if ((var->xoffset + var->xres) > par->max_width ||
 	    (var->yoffset + var->yres) > par->max_height) {
 		DRM_ERROR("Requested geom can not fit in framebuffer\n");
@@ -156,46 +157,6 @@
 	return 0;
 }
 
-static int vmw_fb_set_par(struct fb_info *info)
-{
-	struct vmw_fb_par *par = info->par;
-	struct vmw_private *vmw_priv = par->vmw_priv;
-	int ret;
-
-	info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
-
-	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
-				 info->fix.line_length,
-				 par->bpp, par->depth);
-	if (ret)
-		return ret;
-
-	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
-		/* TODO check if pitch and offset changes */
-		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
-		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
-	}
-
-	/* This is really helpful since if this fails the user
-	 * can probably not see anything on the screen.
-	 */
-	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
-
-	return 0;
-}
-
-static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
-			      struct fb_info *info)
-{
-	return 0;
-}
-
 static int vmw_fb_blank(int blank, struct fb_info *info)
 {
 	return 0;
@@ -205,65 +166,89 @@
  * Dirty code
  */
 
-static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+static void vmw_fb_dirty_flush(struct work_struct *work)
 {
+	struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
+					      local_work.work);
 	struct vmw_private *vmw_priv = par->vmw_priv;
 	struct fb_info *info = vmw_priv->fb_info;
-	int stride = (info->fix.line_length / 4);
-	int *src = (int *)info->screen_base;
-	__le32 __iomem *vram_mem = par->bo_ptr;
-	unsigned long flags;
-	unsigned x, y, w, h;
-	int i, k;
-	struct {
-		uint32_t header;
-		SVGAFifoCmdUpdate body;
-	} *cmd;
+	unsigned long irq_flags;
+	s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+	u32 cpp, max_x, max_y;
+	struct drm_clip_rect clip;
+	struct drm_framebuffer *cur_fb;
+	u8 *src_ptr, *dst_ptr;
 
 	if (vmw_priv->suspended)
 		return;
 
-	spin_lock_irqsave(&par->dirty.lock, flags);
+	mutex_lock(&par->bo_mutex);
+	cur_fb = par->set_fb;
+	if (!cur_fb)
+		goto out_unlock;
+
+	spin_lock_irqsave(&par->dirty.lock, irq_flags);
 	if (!par->dirty.active) {
-		spin_unlock_irqrestore(&par->dirty.lock, flags);
-		return;
+		spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
+		goto out_unlock;
 	}
-	x = par->dirty.x1;
-	y = par->dirty.y1;
-	w = min(par->dirty.x2, info->var.xres) - x;
-	h = min(par->dirty.y2, info->var.yres) - y;
+
+	/*
+	 * Handle panning when copying from vmalloc to framebuffer.
+	 * Clip dirty area to framebuffer.
+	 */
+	cpp = (cur_fb->bits_per_pixel + 7) / 8;
+	max_x = par->fb_x + cur_fb->width;
+	max_y = par->fb_y + cur_fb->height;
+
+	dst_x1 = par->dirty.x1 - par->fb_x;
+	dst_y1 = par->dirty.y1 - par->fb_y;
+	dst_x1 = max_t(s32, dst_x1, 0);
+	dst_y1 = max_t(s32, dst_y1, 0);
+
+	dst_x2 = par->dirty.x2 - par->fb_x;
+	dst_y2 = par->dirty.y2 - par->fb_y;
+	dst_x2 = min_t(s32, dst_x2, max_x);
+	dst_y2 = min_t(s32, dst_y2, max_y);
+	w = dst_x2 - dst_x1;
+	h = dst_y2 - dst_y1;
+	w = max_t(s32, 0, w);
+	h = max_t(s32, 0, h);
+
 	par->dirty.x1 = par->dirty.x2 = 0;
 	par->dirty.y1 = par->dirty.y2 = 0;
-	spin_unlock_irqrestore(&par->dirty.lock, flags);
+	spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
 
-	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
-		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
-			iowrite32(src[k], vram_mem + k);
+	if (w && h) {
+		dst_ptr = (u8 *)par->bo_ptr  +
+			(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
+		src_ptr = (u8 *)par->vmalloc +
+			((dst_y1 + par->fb_y) * info->fix.line_length +
+			 (dst_x1 + par->fb_x) * cpp);
+
+		while (h-- > 0) {
+			memcpy(dst_ptr, src_ptr, w*cpp);
+			dst_ptr += par->set_fb->pitches[0];
+			src_ptr += info->fix.line_length;
+		}
+
+		clip.x1 = dst_x1;
+		clip.x2 = dst_x2;
+		clip.y1 = dst_y1;
+		clip.y2 = dst_y2;
+
+		WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
+						       &clip, 1));
+		vmw_fifo_flush(vmw_priv, false);
 	}
-
-#if 0
-	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
-#endif
-
-	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		return;
-	}
-
-	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
-	cmd->body.x = cpu_to_le32(x);
-	cmd->body.y = cpu_to_le32(y);
-	cmd->body.width = cpu_to_le32(w);
-	cmd->body.height = cpu_to_le32(h);
-	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+out_unlock:
+	mutex_unlock(&par->bo_mutex);
 }
 
 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
 			      unsigned x1, unsigned y1,
 			      unsigned width, unsigned height)
 {
-	struct fb_info *info = par->vmw_priv->fb_info;
 	unsigned long flags;
 	unsigned x2 = x1 + width;
 	unsigned y2 = y1 + height;
@@ -277,7 +262,8 @@
 		/* if we are active start the dirty work
 		 * we share the work with the defio system */
 		if (par->dirty.active)
-			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+			schedule_delayed_work(&par->local_work,
+					      VMW_DIRTY_DELAY);
 	} else {
 		if (x1 < par->dirty.x1)
 			par->dirty.x1 = x1;
@@ -291,6 +277,28 @@
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 }
 
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+
+	if ((var->xoffset + var->xres) > var->xres_virtual ||
+	    (var->yoffset + var->yres) > var->yres_virtual) {
+		DRM_ERROR("Requested panning can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+	if (par->set_fb)
+		vmw_fb_dirty_mark(par, par->fb_x, par->fb_y, par->set_fb->width,
+				  par->set_fb->height);
+	mutex_unlock(&par->bo_mutex);
+
+	return 0;
+}
+
 static void vmw_deferred_io(struct fb_info *info,
 			    struct list_head *pagelist)
 {
@@ -319,12 +327,17 @@
 		par->dirty.x2 = info->var.xres;
 		par->dirty.y2 = y2;
 		spin_unlock_irqrestore(&par->dirty.lock, flags);
-	}
 
-	vmw_fb_dirty_flush(par);
+		/*
+		 * Since we've already waited on this work once, try to
+		 * execute asap.
+		 */
+		cancel_delayed_work(&par->local_work);
+		schedule_delayed_work(&par->local_work, 0);
+	}
 };
 
-struct fb_deferred_io vmw_defio = {
+static struct fb_deferred_io vmw_defio = {
 	.delay		= VMW_DIRTY_DELAY,
 	.deferred_io	= vmw_deferred_io,
 };
@@ -358,6 +371,256 @@
  * Bring up code
  */
 
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+			    size_t size, struct vmw_dma_buffer **out)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	int ret;
+
+	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
+
+	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+	if (!vmw_bo) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+			      &vmw_sys_placement,
+			      false,
+			      &vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto err_unlock; /* init frees the buffer on failure */
+
+	*out = vmw_bo;
+	ttm_write_unlock(&vmw_priv->reservation_sem);
+
+	return 0;
+
+err_unlock:
+	ttm_write_unlock(&vmw_priv->reservation_sem);
+	return ret;
+}
+
+static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
+				int *depth)
+{
+	switch (var->bits_per_pixel) {
+	case 32:
+		*depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_kms_detach(struct vmw_fb_par *par,
+			     bool detach_bo,
+			     bool unref_bo)
+{
+	struct drm_framebuffer *cur_fb = par->set_fb;
+	int ret;
+
+	/* Detach the KMS framebuffer from crtcs */
+	if (par->set_mode) {
+		struct drm_mode_set set;
+
+		set.crtc = par->crtc;
+		set.x = 0;
+		set.y = 0;
+		set.mode = NULL;
+		set.fb = NULL;
+		set.num_connectors = 1;
+		set.connectors = &par->con;
+		ret = drm_mode_set_config_internal(&set);
+		if (ret) {
+			DRM_ERROR("Could not unset a mode.\n");
+			return ret;
+		}
+		drm_mode_destroy(par->vmw_priv->dev, par->set_mode);
+		par->set_mode = NULL;
+	}
+
+	if (cur_fb) {
+		drm_framebuffer_unreference(cur_fb);
+		par->set_fb = NULL;
+	}
+
+	if (par->vmw_bo && detach_bo) {
+		if (par->bo_ptr) {
+			ttm_bo_kunmap(&par->map);
+			par->bo_ptr = NULL;
+		}
+		if (unref_bo)
+			vmw_dmabuf_unreference(&par->vmw_bo);
+		else
+			vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
+	}
+
+	return 0;
+}
+
+static int vmw_fb_kms_framebuffer(struct fb_info *info)
+{
+	struct drm_mode_fb_cmd mode_cmd;
+	struct vmw_fb_par *par = info->par;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_framebuffer *cur_fb;
+	struct vmw_framebuffer *vfb;
+	int ret = 0;
+	size_t new_bo_size;
+
+	ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
+	if (ret)
+		return ret;
+
+	mode_cmd.width = var->xres;
+	mode_cmd.height = var->yres;
+	mode_cmd.bpp = var->bits_per_pixel;
+	mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
+
+	cur_fb = par->set_fb;
+	if (cur_fb && cur_fb->width == mode_cmd.width &&
+	    cur_fb->height == mode_cmd.height &&
+	    cur_fb->bits_per_pixel == mode_cmd.bpp &&
+	    cur_fb->depth == mode_cmd.depth &&
+	    cur_fb->pitches[0] == mode_cmd.pitch)
+		return 0;
+
+	/* Need new buffer object ? */
+	new_bo_size = (size_t) mode_cmd.pitch * (size_t) mode_cmd.height;
+	ret = vmw_fb_kms_detach(par,
+				par->bo_size < new_bo_size ||
+				par->bo_size > 2*new_bo_size,
+				true);
+	if (ret)
+		return ret;
+
+	if (!par->vmw_bo) {
+		ret = vmw_fb_create_bo(par->vmw_priv, new_bo_size,
+				       &par->vmw_bo);
+		if (ret) {
+			DRM_ERROR("Failed creating a buffer object for "
+				  "fbdev.\n");
+			return ret;
+		}
+		par->bo_size = new_bo_size;
+	}
+
+	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
+				      true, &mode_cmd);
+	if (IS_ERR(vfb))
+		return PTR_ERR(vfb);
+
+	par->set_fb = &vfb->base;
+
+	if (!par->bo_ptr) {
+		/*
+		 * Pin before mapping. Since we don't know in what placement
+		 * to pin, call into KMS to do it for us.
+		 */
+		ret = vfb->pin(vfb);
+		if (ret) {
+			DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+			return ret;
+		}
+
+		ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+				  par->vmw_bo->base.num_pages, &par->map);
+		if (ret) {
+			vfb->unpin(vfb);
+			DRM_ERROR("Could not map the fbdev framebuffer.\n");
+			return ret;
+		}
+
+		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+	}
+
+	return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	struct drm_mode_set set;
+	struct fb_var_screeninfo *var = &info->var;
+	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
+		DRM_MODE_TYPE_DRIVER,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	struct drm_display_mode *old_mode;
+	struct drm_display_mode *mode;
+	int ret;
+
+	old_mode = par->set_mode;
+	mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
+	if (!mode) {
+		DRM_ERROR("Could not create new fb mode.\n");
+		return -ENOMEM;
+	}
+
+	mode->hdisplay = var->xres;
+	mode->vdisplay = var->yres;
+	vmw_guess_mode_timing(mode);
+
+	if (old_mode && drm_mode_equal(old_mode, mode)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		mode = old_mode;
+		old_mode = NULL;
+	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
+					       mode->hdisplay *
+					       (var->bits_per_pixel + 7) / 8,
+					       mode->vdisplay)) {
+		drm_mode_destroy(vmw_priv->dev, mode);
+		return -EINVAL;
+	}
+
+	mutex_lock(&par->bo_mutex);
+	drm_modeset_lock_all(vmw_priv->dev);
+	ret = vmw_fb_kms_framebuffer(info);
+	if (ret)
+		goto out_unlock;
+
+	par->fb_x = var->xoffset;
+	par->fb_y = var->yoffset;
+
+	set.crtc = par->crtc;
+	set.x = 0;
+	set.y = 0;
+	set.mode = mode;
+	set.fb = par->set_fb;
+	set.num_connectors = 1;
+	set.connectors = &par->con;
+
+	ret = drm_mode_set_config_internal(&set);
+	if (ret)
+		goto out_unlock;
+
+	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
+			  par->set_fb->width, par->set_fb->height);
+
+	/* If there already was stuff dirty we wont
+	 * schedule a new work, so lets do it now */
+
+	schedule_delayed_work(&par->local_work, 0);
+
+out_unlock:
+	if (old_mode)
+		drm_mode_destroy(vmw_priv->dev, old_mode);
+	par->set_mode = mode;
+
+	drm_modeset_unlock_all(vmw_priv->dev);
+	mutex_unlock(&par->bo_mutex);
+
+	return ret;
+}
+
+
 static struct fb_ops vmw_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = vmw_fb_check_var,
@@ -370,55 +633,14 @@
 	.fb_blank = vmw_fb_blank,
 };
 
-static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
-			    size_t size, struct vmw_dma_buffer **out)
-{
-	struct vmw_dma_buffer *vmw_bo;
-	struct ttm_place ne_place = vmw_vram_ne_placement.placement[0];
-	struct ttm_placement ne_placement;
-	int ret;
-
-	ne_placement.num_placement = 1;
-	ne_placement.placement = &ne_place;
-	ne_placement.num_busy_placement = 1;
-	ne_placement.busy_placement = &ne_place;
-
-	ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-	(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
-
-	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
-	if (!vmw_bo) {
-		ret = -ENOMEM;
-		goto err_unlock;
-	}
-
-	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
-			      &ne_placement,
-			      false,
-			      &vmw_dmabuf_bo_free);
-	if (unlikely(ret != 0))
-		goto err_unlock; /* init frees the buffer on failure */
-
-	*out = vmw_bo;
-
-	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
-
-	return 0;
-
-err_unlock:
-	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
-	return ret;
-}
-
 int vmw_fb_init(struct vmw_private *vmw_priv)
 {
 	struct device *device = &vmw_priv->dev->pdev->dev;
 	struct vmw_fb_par *par;
 	struct fb_info *info;
-	unsigned initial_width, initial_height;
 	unsigned fb_width, fb_height;
 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+	struct drm_display_mode *init_mode;
 	int ret;
 
 	fb_bpp = 32;
@@ -428,9 +650,6 @@
 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
 
-	initial_width = min(vmw_priv->initial_width, fb_width);
-	initial_height = min(vmw_priv->initial_height, fb_height);
-
 	fb_pitch = fb_width * fb_bpp / 8;
 	fb_size = fb_pitch * fb_height;
 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
@@ -444,35 +663,35 @@
 	 */
 	vmw_priv->fb_info = info;
 	par = info->par;
+	memset(par, 0, sizeof(*par));
+	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
 	par->vmw_priv = vmw_priv;
-	par->depth = fb_depth;
-	par->bpp = fb_bpp;
 	par->vmalloc = NULL;
 	par->max_width = fb_width;
 	par->max_height = fb_height;
 
+	drm_modeset_lock_all(vmw_priv->dev);
+	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
+				      par->max_height, &par->con,
+				      &par->crtc, &init_mode);
+	if (ret) {
+		drm_modeset_unlock_all(vmw_priv->dev);
+		goto err_kms;
+	}
+
+	info->var.xres = init_mode->hdisplay;
+	info->var.yres = init_mode->vdisplay;
+	drm_modeset_unlock_all(vmw_priv->dev);
+
 	/*
 	 * Create buffers and alloc memory
 	 */
-	par->vmalloc = vmalloc(fb_size);
+	par->vmalloc = vzalloc(fb_size);
 	if (unlikely(par->vmalloc == NULL)) {
 		ret = -ENOMEM;
 		goto err_free;
 	}
 
-	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
-	if (unlikely(ret != 0))
-		goto err_free;
-
-	ret = ttm_bo_kmap(&par->vmw_bo->base,
-			  0,
-			  par->vmw_bo->base.num_pages,
-			  &par->map);
-	if (unlikely(ret != 0))
-		goto err_unref;
-	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
-	par->bo_size = fb_size;
-
 	/*
 	 * Fixed and var
 	 */
@@ -490,7 +709,7 @@
 	info->fix.smem_len = fb_size;
 
 	info->pseudo_palette = par->pseudo_palette;
-	info->screen_base = par->vmalloc;
+	info->screen_base = (char __iomem *)par->vmalloc;
 	info->screen_size = fb_size;
 
 	info->flags = FBINFO_DEFAULT;
@@ -508,18 +727,14 @@
 
 	info->var.xres_virtual = fb_width;
 	info->var.yres_virtual = fb_height;
-	info->var.bits_per_pixel = par->bpp;
+	info->var.bits_per_pixel = fb_bpp;
 	info->var.xoffset = 0;
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
 	info->var.height = -1;
 	info->var.width = -1;
 
-	info->var.xres = initial_width;
-	info->var.yres = initial_height;
-
 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
 	info->apertures = alloc_apertures(1);
 	if (!info->apertures) {
 		ret = -ENOMEM;
@@ -535,6 +750,7 @@
 	par->dirty.y1 = par->dirty.y2 = 0;
 	par->dirty.active = true;
 	spin_lock_init(&par->dirty.lock);
+	mutex_init(&par->bo_mutex);
 	info->fbdefio = &vmw_defio;
 	fb_deferred_io_init(info);
 
@@ -542,16 +758,16 @@
 	if (unlikely(ret != 0))
 		goto err_defio;
 
+	vmw_fb_set_par(info);
+
 	return 0;
 
 err_defio:
 	fb_deferred_io_cleanup(info);
 err_aper:
-	ttm_bo_kunmap(&par->map);
-err_unref:
-	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
 err_free:
 	vfree(par->vmalloc);
+err_kms:
 	framebuffer_release(info);
 	vmw_priv->fb_info = NULL;
 
@@ -562,22 +778,19 @@
 {
 	struct fb_info *info;
 	struct vmw_fb_par *par;
-	struct ttm_buffer_object *bo;
 
 	if (!vmw_priv->fb_info)
 		return 0;
 
 	info = vmw_priv->fb_info;
 	par = info->par;
-	bo = &par->vmw_bo->base;
-	par->vmw_bo = NULL;
 
 	/* ??? order */
 	fb_deferred_io_cleanup(info);
+	cancel_delayed_work_sync(&par->local_work);
 	unregister_framebuffer(info);
 
-	ttm_bo_kunmap(&par->map);
-	ttm_bo_unref(&bo);
+	(void) vmw_fb_kms_detach(par, true, true);
 
 	vfree(par->vmalloc);
 	framebuffer_release(info);
@@ -602,11 +815,11 @@
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 
 	flush_delayed_work(&info->deferred_work);
+	flush_delayed_work(&par->local_work);
 
-	par->bo_ptr = NULL;
-	ttm_bo_kunmap(&par->map);
-
-	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+	mutex_lock(&par->bo_mutex);
+	(void) vmw_fb_kms_detach(par, true, false);
+	mutex_unlock(&par->bo_mutex);
 
 	return 0;
 }
@@ -616,8 +829,6 @@
 	struct fb_info *info;
 	struct vmw_fb_par *par;
 	unsigned long flags;
-	bool dummy;
-	int ret;
 
 	if (!vmw_priv->fb_info)
 		return -EINVAL;
@@ -625,38 +836,10 @@
 	info = vmw_priv->fb_info;
 	par = info->par;
 
-	/* we are already active */
-	if (par->bo_ptr != NULL)
-		return 0;
-
-	/* Make sure that all overlays are stoped when we take over */
-	vmw_overlay_stop_all(vmw_priv);
-
-	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
-	if (unlikely(ret != 0)) {
-		DRM_ERROR("could not move buffer to start of VRAM\n");
-		goto err_no_buffer;
-	}
-
-	ret = ttm_bo_kmap(&par->vmw_bo->base,
-			  0,
-			  par->vmw_bo->base.num_pages,
-			  &par->map);
-	BUG_ON(ret != 0);
-	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
-
+	vmw_fb_set_par(info);
 	spin_lock_irqsave(&par->dirty.lock, flags);
 	par->dirty.active = true;
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
-
-err_no_buffer:
-	vmw_fb_set_par(info);
-
-	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
-
-	/* If there already was stuff dirty we wont
-	 * schedule a new work, so lets do it now */
-	schedule_delayed_work(&info->deferred_work, 0);
-
+ 
 	return 0;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0..567dded 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -142,7 +142,7 @@
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_private *dev_priv = fman->dev_priv;
 
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 		return false;
@@ -386,7 +386,7 @@
 				      u32 passed_seqno)
 {
 	u32 goal_seqno;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 	struct vmw_fence_obj *fence;
 
 	if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@
 {
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	u32 goal_seqno;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 
 	if (fence_is_signaled_locked(&fence->base))
 		return false;
@@ -453,7 +453,7 @@
 	struct list_head action_list;
 	bool needs_rerun;
 	uint32_t seqno, new_seqno;
-	__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
 
 	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 26a4add..8be6c29 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 39f2b03..80c40c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -29,9 +29,14 @@
 #include <drm/drmP.h>
 #include <drm/ttm/ttm_placement.h>
 
+struct vmw_temp_set_context {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDXTempSetContext body;
+};
+
 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t fifo_min, hwversion;
 	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
@@ -71,8 +76,8 @@
 	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
 		return false;
 
-	/* Non-Screen Object path does not support surfaces */
-	if (!dev_priv->sou_priv)
+	/* Legacy Display Unit does not support surfaces */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return false;
 
 	return true;
@@ -80,7 +85,7 @@
 
 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t caps;
 
 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,11 +100,11 @@
 
 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
-	uint32_t dummy;
 
+	fifo->dx = false;
 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
 	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
 	if (unlikely(fifo->static_buffer == NULL))
@@ -112,10 +117,6 @@
 	mutex_init(&fifo->fifo_mutex);
 	init_rwsem(&fifo->rwsem);
 
-	/*
-	 * Allow mapping the first page read-only to user-space.
-	 */
-
 	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
 	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
 	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
@@ -123,7 +124,10 @@
 	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
 	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
 	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
-	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+		  SVGA_REG_ENABLE_HIDE);
+	vmw_write(dev_priv, SVGA_REG_TRACES, 0);
 
 	min = 4;
 	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
@@ -155,12 +159,13 @@
 	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
 	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
 	vmw_marker_queue_init(&fifo->marker_queue);
-	return vmw_fifo_send_fence(dev_priv, &dummy);
+
+	return 0;
 }
 
 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	static DEFINE_SPINLOCK(ping_lock);
 	unsigned long irq_flags;
 
@@ -178,7 +183,7 @@
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
 	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -208,7 +213,7 @@
 
 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -312,10 +317,11 @@
  * Returns:
  *   Pointer to the fifo, or null on error (possible hardware hang).
  */
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
+				    uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t max;
 	uint32_t min;
 	uint32_t next_cmd;
@@ -372,7 +378,8 @@
 				if (reserveable)
 					iowrite32(bytes, fifo_mem +
 						  SVGA_FIFO_RESERVED);
-				return fifo_mem + (next_cmd >> 2);
+				return (void __force *) (fifo_mem +
+							 (next_cmd >> 2));
 			} else {
 				need_bounce = true;
 			}
@@ -391,11 +398,36 @@
 out_err:
 	fifo_state->reserved_size = 0;
 	mutex_unlock(&fifo_state->fifo_mutex);
+
 	return NULL;
 }
 
+void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
+			  int ctx_id)
+{
+	void *ret;
+
+	if (dev_priv->cman)
+		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
+					 ctx_id, false, NULL);
+	else if (ctx_id == SVGA3D_INVALID_ID)
+		ret = vmw_local_fifo_reserve(dev_priv, bytes);
+	else {
+		WARN_ON("Command buffer has not been allocated.\n");
+		ret = NULL;
+	}
+	if (IS_ERR_OR_NULL(ret)) {
+		DRM_ERROR("Fifo reserve failure of %u bytes.\n",
+			  (unsigned) bytes);
+		dump_stack();
+		return NULL;
+	}
+
+	return ret;
+}
+
 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
-			      __le32 __iomem *fifo_mem,
+			      u32 __iomem *fifo_mem,
 			      uint32_t next_cmd,
 			      uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -417,7 +449,7 @@
 }
 
 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
-			       __le32 __iomem *fifo_mem,
+			       u32 __iomem *fifo_mem,
 			       uint32_t next_cmd,
 			       uint32_t max, uint32_t min, uint32_t bytes)
 {
@@ -436,15 +468,19 @@
 	}
 }
 
-void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
 	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
 	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
+	if (fifo_state->dx)
+		bytes += sizeof(struct vmw_temp_set_context);
+
+	fifo_state->dx = false;
 	BUG_ON((bytes & 3) != 0);
 	BUG_ON(bytes > fifo_state->reserved_size);
 
@@ -482,13 +518,53 @@
 	mutex_unlock(&fifo_state->fifo_mutex);
 }
 
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+
+/**
+ * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @bytes: Number of bytes to commit.
+ */
+void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	if (dev_priv->cman)
+		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
+	else
+		vmw_local_fifo_commit(dev_priv, bytes);
+}
+
+/**
+ * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * starts.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @interruptible: Whether to wait interruptible if function needs to sleep.
+ */
+int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
+{
+	might_sleep();
+
+	if (dev_priv->cman)
+		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
+	else
+		return 0;
+}
+
 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 	struct svga_fifo_cmd_fence *cmd_fence;
-	void *fm;
+	u32 *fm;
 	int ret = 0;
-	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
 
 	fm = vmw_fifo_reserve(dev_priv, bytes);
 	if (unlikely(fm == NULL)) {
@@ -514,12 +590,10 @@
 		return 0;
 	}
 
-	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
-	cmd_fence = (struct svga_fifo_cmd_fence *)
-	    ((unsigned long)fm + sizeof(__le32));
-
-	iowrite32(*seqno, &cmd_fence->fence);
-	vmw_fifo_commit(dev_priv, bytes);
+	*fm++ = SVGA_CMD_FENCE;
+	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
+	cmd_fence->fence = *seqno;
+	vmw_fifo_commit_flush(dev_priv, bytes);
 	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
 	vmw_update_seqno(dev_priv, fifo_state);
 
@@ -545,7 +619,7 @@
 	 * without writing to the query result structure.
 	 */
 
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdWaitForQuery body;
@@ -594,7 +668,7 @@
 	 * without writing to the query result structure.
 	 */
 
-	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdWaitForGBQuery body;
@@ -647,3 +721,8 @@
 
 	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
 }
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 61d8d80..66ffa1d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 69c8ce23..0a970af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -28,6 +28,7 @@
 #include "vmwgfx_drv.h"
 #include <drm/vmwgfx_drm.h>
 #include "vmwgfx_kms.h"
+#include "device_include/svga3d_caps.h"
 
 struct svga_3d_compat_cap {
 	SVGA3dCapsRecordHeader header;
@@ -63,7 +64,7 @@
 		break;
 	case DRM_VMW_PARAM_FIFO_HW_VERSION:
 	{
-		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
 		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -105,6 +106,13 @@
 	case DRM_VMW_PARAM_MAX_MOB_SIZE:
 		param->value = dev_priv->max_mob_size;
 		break;
+	case DRM_VMW_PARAM_SCREEN_TARGET:
+		param->value =
+			(dev_priv->active_display_unit == vmw_du_screen_target);
+		break;
+	case DRM_VMW_PARAM_DX:
+		param->value = dev_priv->has_dx;
+		break;
 	default:
 		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
 			  param->param);
@@ -154,7 +162,7 @@
 		(struct drm_vmw_get_3d_cap_arg *) data;
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	uint32_t size;
-	__le32 __iomem *fifo_mem;
+	u32 __iomem *fifo_mem;
 	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
 	void *bounce;
 	int ret;
@@ -235,7 +243,7 @@
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
@@ -318,7 +326,7 @@
 	int ret;
 
 	num_clips = arg->num_clips;
-	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+	clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
 
 	if (unlikely(num_clips == 0))
 		return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9fe9827..9498a5e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -56,6 +56,9 @@
 	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 		wake_up_all(&dev_priv->fifo_queue);
 
+	if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+			     SVGA_IRQFLAG_ERROR))
+		vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
 
 	return IRQ_HANDLED;
 }
@@ -69,7 +72,7 @@
 void vmw_update_seqno(struct vmw_private *dev_priv,
 			 struct vmw_fifo_state *fifo_state)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 
 	if (dev_priv->last_read_seqno != seqno) {
@@ -131,8 +134,16 @@
 	 * Block command submission while waiting for idle.
 	 */
 
-	if (fifo_idle)
+	if (fifo_idle) {
 		down_read(&fifo_state->rwsem);
+		if (dev_priv->cman) {
+			ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
+					      10*HZ);
+			if (ret)
+				goto out_err;
+		}
+	}
+
 	signal_seq = atomic_read(&dev_priv->marker_seq);
 	ret = 0;
 
@@ -167,10 +178,11 @@
 	}
 	finish_wait(&dev_priv->fence_queue, &__wait);
 	if (ret == 0 && fifo_idle) {
-		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 	}
 	wake_up_all(&dev_priv->fence_queue);
+out_err:
 	if (fifo_idle)
 		up_read(&fifo_state->rwsem);
 
@@ -315,3 +327,30 @@
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 }
+
+void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+			    u32 flag, int *waiter_count)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+	if ((*waiter_count)++ == 0) {
+		outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
+
+void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+			       u32 flag, int *waiter_count)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+	if (--(*waiter_count) == 0) {
+		dev_priv->irq_mask &= ~flag;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 07cda8c..61fb7f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,45 +31,7 @@
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
-
-struct vmw_clip_rect {
-	int x1, x2, y1, y2;
-};
-
-/**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
-static void vmw_clip_cliprects(struct drm_clip_rect *rects,
-			int num_rects,
-			struct vmw_clip_rect clip,
-			SVGASignedRect *out_rects,
-			int *out_num)
-{
-	int i, k;
-
-	for (i = 0, k = 0; i < num_rects; i++) {
-		int x1 = max_t(int, clip.x1, rects[i].x1);
-		int y1 = max_t(int, clip.y1, rects[i].y1);
-		int x2 = min_t(int, clip.x2, rects[i].x2);
-		int y2 = min_t(int, clip.y2, rects[i].y2);
-
-		if (x1 >= x2)
-			continue;
-		if (y1 >= y2)
-			continue;
-
-		out_rects[k].left   = x1;
-		out_rects[k].top    = y1;
-		out_rects[k].right  = x2;
-		out_rects[k].bottom = y2;
-		k++;
-	}
-
-	*out_num = k;
-}
-
-void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+void vmw_du_cleanup(struct vmw_display_unit *du)
 {
 	if (du->cursor_surface)
 		vmw_surface_unreference(&du->cursor_surface);
@@ -109,12 +71,12 @@
 
 	memcpy(&cmd[1], image, image_size);
 
-	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
-	cmd->cursor.id = cpu_to_le32(0);
-	cmd->cursor.width = cpu_to_le32(width);
-	cmd->cursor.height = cpu_to_le32(height);
-	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
-	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+	cmd->cursor.id = 0;
+	cmd->cursor.width = width;
+	cmd->cursor.height = height;
+	cmd->cursor.hotspotX = hotspotX;
+	cmd->cursor.hotspotY = hotspotY;
 
 	vmw_fifo_commit(dev_priv, cmd_size);
 
@@ -161,7 +123,7 @@
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
 				bool show, int x, int y)
 {
-	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
 	uint32_t count;
 
 	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -367,15 +329,6 @@
 
 	srf->snooper.age++;
 
-	/* we can't call this function from this function since execbuf has
-	 * reserved fifo space.
-	 *
-	 * if (srf->snooper.crtc)
-	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
-	 *					 srf->snooper.image, 64, 64,
-	 *					 du->hotspot_x, du->hotspot_y);
-	 */
-
 	ttm_bo_kunmap(&map);
 err_unreserve:
 	ttm_bo_unreserve(bo);
@@ -412,183 +365,19 @@
  * Surface framebuffer code
  */
 
-#define vmw_framebuffer_to_vfbs(x) \
-	container_of(x, struct vmw_framebuffer_surface, base.base)
-
-struct vmw_framebuffer_surface {
-	struct vmw_framebuffer base;
-	struct vmw_surface *surface;
-	struct vmw_dma_buffer *buffer;
-	struct list_head head;
-	struct drm_master *master;
-};
-
 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_surface *vfbs =
 		vmw_framebuffer_to_vfbs(framebuffer);
-	struct vmw_master *vmaster = vmw_master(vfbs->master);
 
-
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_del(&vfbs->head);
-	mutex_unlock(&vmaster->fb_surf_mutex);
-
-	drm_master_put(&vfbs->master);
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_surface_unreference(&vfbs->surface);
-	ttm_base_object_unref(&vfbs->base.user_obj);
+	if (vfbs->base.user_obj)
+		ttm_base_object_unref(&vfbs->base.user_obj);
 
 	kfree(vfbs);
 }
 
-static int do_surface_dirty_sou(struct vmw_private *dev_priv,
-				struct drm_file *file_priv,
-				struct vmw_framebuffer *framebuffer,
-				unsigned flags, unsigned color,
-				struct drm_clip_rect *clips,
-				unsigned num_clips, int inc,
-				struct vmw_fence_obj **out_fence)
-{
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *clips_ptr;
-	struct drm_clip_rect *tmp;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, num_units;
-	int ret = 0; /* silence warning */
-	int left, right, top, bottom;
-
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdBlitSurfaceToScreen body;
-	} *cmd;
-	SVGASignedRect *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
-			    head) {
-		if (crtc->primary->fb != &framebuffer->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(!clips || !num_clips);
-
-	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
-	if (unlikely(tmp == NULL)) {
-		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
-		return -ENOMEM;
-	}
-
-	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
-	cmd = kzalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Temporary fifo memory alloc failed.\n");
-		ret = -ENOMEM;
-		goto out_free_tmp;
-	}
-
-	/* setup blits pointer */
-	blits = (SVGASignedRect *)&cmd[1];
-
-	/* initial clip region */
-	left = clips->x1;
-	right = clips->x2;
-	top = clips->y1;
-	bottom = clips->y2;
-
-	/* skip the first clip rect */
-	for (i = 1, clips_ptr = clips + inc;
-	     i < num_clips; i++, clips_ptr += inc) {
-		left = min_t(int, left, (int)clips_ptr->x1);
-		right = max_t(int, right, (int)clips_ptr->x2);
-		top = min_t(int, top, (int)clips_ptr->y1);
-		bottom = max_t(int, bottom, (int)clips_ptr->y2);
-	}
-
-	/* only need to do this once */
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-	cmd->body.srcRect.left = left;
-	cmd->body.srcRect.right = right;
-	cmd->body.srcRect.top = top;
-	cmd->body.srcRect.bottom = bottom;
-
-	clips_ptr = clips;
-	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-		tmp[i].x1 = clips_ptr->x1 - left;
-		tmp[i].x2 = clips_ptr->x2 - left;
-		tmp[i].y1 = clips_ptr->y1 - top;
-		tmp[i].y2 = clips_ptr->y2 - top;
-	}
-
-	/* do per unit writing, reuse fifo for each */
-	for (i = 0; i < num_units; i++) {
-		struct vmw_display_unit *unit = units[i];
-		struct vmw_clip_rect clip;
-		int num;
-
-		clip.x1 = left - unit->crtc.x;
-		clip.y1 = top - unit->crtc.y;
-		clip.x2 = right - unit->crtc.x;
-		clip.y2 = bottom - unit->crtc.y;
-
-		/* skip any crtcs that misses the clip region */
-		if (clip.x1 >= unit->crtc.mode.hdisplay ||
-		    clip.y1 >= unit->crtc.mode.vdisplay ||
-		    clip.x2 <= 0 || clip.y2 <= 0)
-			continue;
-
-		/*
-		 * In order for the clip rects to be correctly scaled
-		 * the src and dest rects needs to be the same size.
-		 */
-		cmd->body.destRect.left = clip.x1;
-		cmd->body.destRect.right = clip.x2;
-		cmd->body.destRect.top = clip.y1;
-		cmd->body.destRect.bottom = clip.y2;
-
-		/* create a clip rect of the crtc in dest coords */
-		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
-		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
-		clip.x1 = 0 - clip.x1;
-		clip.y1 = 0 - clip.y1;
-
-		/* need to reset sid as it is changed by execbuf */
-		cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-		cmd->body.destScreenId = unit->unit;
-
-		/* clip and write blits to cmd stream */
-		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
-		/* if no cliprects hit skip this */
-		if (num == 0)
-			continue;
-
-		/* only return the last fence */
-		if (out_fence && *out_fence)
-			vmw_fence_obj_unreference(out_fence);
-
-		/* recalculate package length */
-		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
-		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL, out_fence);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-
-	kfree(cmd);
-out_free_tmp:
-	kfree(tmp);
-
-	return ret;
-}
-
 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 				  struct drm_file *file_priv,
 				  unsigned flags, unsigned color,
@@ -601,11 +390,8 @@
 	struct drm_clip_rect norect;
 	int ret, inc = 1;
 
-	if (unlikely(vfbs->master != file_priv->master))
-		return -EINVAL;
-
-	/* Require ScreenObject support for 3D */
-	if (!dev_priv->sou_priv)
+	/* Legacy Display Unit does not support 3D */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return -EINVAL;
 
 	drm_modeset_lock_all(dev_priv->dev);
@@ -627,10 +413,16 @@
 		inc = 2; /* skip source rects */
 	}
 
-	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
-				   flags, color,
-				   clips, num_clips, inc, NULL);
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
+		ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
+						   clips, NULL, NULL, 0, 0,
+						   num_clips, inc, NULL);
+	else
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
+						 clips, NULL, NULL, 0, 0,
+						 num_clips, inc, NULL);
 
+	vmw_fifo_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
 	drm_modeset_unlock_all(dev_priv->dev);
@@ -638,27 +430,66 @@
 	return 0;
 }
 
+/**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips)
+{
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_object:
+		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+					    user_fence_rep, vclips, num_clips);
+	case vmw_du_screen_target:
+		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+					user_fence_rep, NULL, vclips, num_clips,
+					1, false, true);
+	default:
+		WARN_ONCE(true,
+			  "Readback called with invalid display system.\n");
+}
+
+	return -ENOSYS;
+}
+
+
 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
 	.destroy = vmw_framebuffer_surface_destroy,
 	.dirty = vmw_framebuffer_surface_dirty,
 };
 
 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
-					   struct drm_file *file_priv,
 					   struct vmw_surface *surface,
 					   struct vmw_framebuffer **out,
 					   const struct drm_mode_fb_cmd
-					   *mode_cmd)
+					   *mode_cmd,
+					   bool is_dmabuf_proxy)
 
 {
 	struct drm_device *dev = dev_priv->dev;
 	struct vmw_framebuffer_surface *vfbs;
 	enum SVGA3dSurfaceFormat format;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 
-	/* 3D is only supported on HWv8 hosts which supports screen objects */
-	if (!dev_priv->sou_priv)
+	/* 3D is only supported on HWv8 and newer hosts */
+	if (dev_priv->active_display_unit == vmw_du_legacy)
 		return -ENOSYS;
 
 	/*
@@ -692,15 +523,16 @@
 	case 15:
 		format = SVGA3D_A1R5G5B5;
 		break;
-	case 8:
-		format = SVGA3D_LUMINANCE8;
-		break;
 	default:
 		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
 		return -EINVAL;
 	}
 
-	if (unlikely(format != surface->format)) {
+	/*
+	 * For DX, surface format validation is done when surface->scanout
+	 * is set.
+	 */
+	if (!dev_priv->has_dx && format != surface->format) {
 		DRM_ERROR("Invalid surface format for requested mode.\n");
 		return -EINVAL;
 	}
@@ -711,38 +543,27 @@
 		goto out_err1;
 	}
 
-	if (!vmw_surface_reference(surface)) {
-		DRM_ERROR("failed to reference surface %p\n", surface);
-		ret = -EINVAL;
-		goto out_err2;
-	}
-
 	/* XXX get the first 3 from the surface info */
 	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
 	vfbs->base.base.pitches[0] = mode_cmd->pitch;
 	vfbs->base.base.depth = mode_cmd->depth;
 	vfbs->base.base.width = mode_cmd->width;
 	vfbs->base.base.height = mode_cmd->height;
-	vfbs->surface = surface;
+	vfbs->surface = vmw_surface_reference(surface);
 	vfbs->base.user_handle = mode_cmd->handle;
-	vfbs->master = drm_master_get(file_priv->master);
-
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_add_tail(&vfbs->head, &vmaster->fb_surf);
-	mutex_unlock(&vmaster->fb_surf_mutex);
+	vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
 
 	*out = &vfbs->base;
 
 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
 				   &vmw_framebuffer_surface_funcs);
 	if (ret)
-		goto out_err3;
+		goto out_err2;
 
 	return 0;
 
-out_err3:
-	vmw_surface_unreference(&surface);
 out_err2:
+	vmw_surface_unreference(&surface);
 	kfree(vfbs);
 out_err1:
 	return ret;
@@ -752,14 +573,6 @@
  * Dmabuf framebuffer code
  */
 
-#define vmw_framebuffer_to_vfbd(x) \
-	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
-
-struct vmw_framebuffer_dmabuf {
-	struct vmw_framebuffer base;
-	struct vmw_dma_buffer *buffer;
-};
-
 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 {
 	struct vmw_framebuffer_dmabuf *vfbd =
@@ -767,185 +580,12 @@
 
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_dmabuf_unreference(&vfbd->buffer);
-	ttm_base_object_unref(&vfbd->base.user_obj);
+	if (vfbd->base.user_obj)
+		ttm_base_object_unref(&vfbd->base.user_obj);
 
 	kfree(vfbd);
 }
 
-static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
-			       struct vmw_framebuffer *framebuffer,
-			       unsigned flags, unsigned color,
-			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment)
-{
-	size_t fifo_size;
-	int i;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdUpdate body;
-	} *cmd;
-
-	fifo_size = sizeof(*cmd) * num_clips;
-	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	for (i = 0; i < num_clips; i++, clips += increment) {
-		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
-		cmd[i].body.x = cpu_to_le32(clips->x1);
-		cmd[i].body.y = cpu_to_le32(clips->y1);
-		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
-		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
-	}
-
-	vmw_fifo_commit(dev_priv, fifo_size);
-	return 0;
-}
-
-static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
-				  struct vmw_private *dev_priv,
-				  struct vmw_framebuffer *framebuffer)
-{
-	int depth = framebuffer->base.depth;
-	size_t fifo_size;
-	int ret;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdDefineGMRFB body;
-	} *cmd;
-
-	/* Emulate RGBA support, contrary to svga_reg.h this is not
-	 * supported by hosts. This is only a problem if we are reading
-	 * this value later and expecting what we uploaded back.
-	 */
-	if (depth == 32)
-		depth = 24;
-
-	fifo_size = sizeof(*cmd);
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	cmd->header = SVGA_CMD_DEFINE_GMRFB;
-	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
-	cmd->body.format.colorDepth = depth;
-	cmd->body.format.reserved = 0;
-	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
-	cmd->body.ptr.gmrId = framebuffer->user_handle;
-	cmd->body.ptr.offset = 0;
-
-	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-				  fifo_size, 0, NULL, NULL);
-
-	kfree(cmd);
-
-	return ret;
-}
-
-static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
-			       struct vmw_private *dev_priv,
-			       struct vmw_framebuffer *framebuffer,
-			       unsigned flags, unsigned color,
-			       struct drm_clip_rect *clips,
-			       unsigned num_clips, int increment,
-			       struct vmw_fence_obj **out_fence)
-{
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *clips_ptr;
-	int i, k, num_units, ret;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdBlitGMRFBToScreen body;
-	} *blits;
-
-	ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
-	if (unlikely(ret != 0))
-		return ret; /* define_gmrfb prints warnings */
-
-	fifo_size = sizeof(*blits) * num_clips;
-	blits = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(blits == NULL)) {
-		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
-		return -ENOMEM;
-	}
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &framebuffer->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	for (k = 0; k < num_units; k++) {
-		struct vmw_display_unit *unit = units[k];
-		int hit_num = 0;
-
-		clips_ptr = clips;
-		for (i = 0; i < num_clips; i++, clips_ptr += increment) {
-			int clip_x1 = clips_ptr->x1 - unit->crtc.x;
-			int clip_y1 = clips_ptr->y1 - unit->crtc.y;
-			int clip_x2 = clips_ptr->x2 - unit->crtc.x;
-			int clip_y2 = clips_ptr->y2 - unit->crtc.y;
-			int move_x, move_y;
-
-			/* skip any crtcs that misses the clip region */
-			if (clip_x1 >= unit->crtc.mode.hdisplay ||
-			    clip_y1 >= unit->crtc.mode.vdisplay ||
-			    clip_x2 <= 0 || clip_y2 <= 0)
-				continue;
-
-			/* clip size to crtc size */
-			clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
-			clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
-			/* translate both src and dest to bring clip into screen */
-			move_x = min_t(int, clip_x1, 0);
-			move_y = min_t(int, clip_y1, 0);
-
-			/* actual translate done here */
-			blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
-			blits[hit_num].body.destScreenId = unit->unit;
-			blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
-			blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
-			blits[hit_num].body.destRect.left = clip_x1 - move_x;
-			blits[hit_num].body.destRect.top = clip_y1 - move_y;
-			blits[hit_num].body.destRect.right = clip_x2;
-			blits[hit_num].body.destRect.bottom = clip_y2;
-			hit_num++;
-		}
-
-		/* no clips hit the crtc */
-		if (hit_num == 0)
-			continue;
-
-		/* only return the last fence */
-		if (out_fence && *out_fence)
-			vmw_fence_obj_unreference(out_fence);
-
-		fifo_size = sizeof(*blits) * hit_num;
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
-					  fifo_size, 0, NULL, out_fence);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-	kfree(blits);
-
-	return ret;
-}
-
 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 				 struct drm_file *file_priv,
 				 unsigned flags, unsigned color,
@@ -977,16 +617,29 @@
 		increment = 2;
 	}
 
-	if (dev_priv->ldu_priv) {
-		ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
-					  flags, color,
-					  clips, num_clips, increment);
-	} else {
-		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
-					  flags, color,
-					  clips, num_clips, increment, NULL);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
+				       clips, NULL, num_clips, increment,
+				       true, true);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
+						  clips, num_clips, increment,
+						  true,
+						  NULL);
+		break;
+	case vmw_du_legacy:
+		ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
+						  clips, num_clips, increment);
+		break;
+	default:
+		ret = -EINVAL;
+		WARN_ONCE(true, "Dirty called with invalid display system.\n");
+		break;
 	}
 
+	vmw_fifo_flush(dev_priv, false);
 	ttm_read_unlock(&dev_priv->reservation_sem);
 
 	drm_modeset_unlock_all(dev_priv->dev);
@@ -1002,40 +655,132 @@
 /**
  * Pin the dmabuffer to the start of vram.
  */
-static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 {
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
+	struct vmw_dma_buffer *buf;
 	int ret;
 
-	/* This code should not be used with screen objects */
-	BUG_ON(dev_priv->sou_priv);
+	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
-	vmw_overlay_pause_all(dev_priv);
+	if (!buf)
+		return 0;
 
-	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_legacy:
+		vmw_overlay_pause_all(dev_priv);
+		ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+		vmw_overlay_resume_all(dev_priv);
+		break;
+	case vmw_du_screen_object:
+	case vmw_du_screen_target:
+		if (vfb->dmabuf)
+			return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
+							     false);
 
-	vmw_overlay_resume_all(dev_priv);
+		return vmw_dmabuf_pin_in_placement(dev_priv, buf,
+						   &vmw_mob_placement, false);
+	default:
+		return -EINVAL;
+	}
 
-	WARN_ON(ret != 0);
+	return ret;
+}
+
+static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_dma_buffer *buf;
+
+	buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
+
+	if (WARN_ON(!buf))
+		return 0;
+
+	return vmw_dmabuf_unpin(dev_priv, buf, false);
+}
+
+/**
+ * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @dmabuf_mob: MOB backing the DMA buf
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * same buffer.  This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+				   const struct drm_mode_fb_cmd *mode_cmd,
+				   struct vmw_dma_buffer *dmabuf_mob,
+				   struct vmw_surface **srf_out)
+{
+	uint32_t format;
+	struct drm_vmw_size content_base_size;
+	struct vmw_resource *res;
+	int ret;
+
+	switch (mode_cmd->depth) {
+	case 32:
+	case 24:
+		format = SVGA3D_X8R8G8B8;
+		break;
+
+	case 16:
+	case 15:
+		format = SVGA3D_R5G6B5;
+		break;
+
+	case 8:
+		format = SVGA3D_P8;
+		break;
+
+	default:
+		DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+		return -EINVAL;
+	}
+
+	content_base_size.width  = mode_cmd->width;
+	content_base_size.height = mode_cmd->height;
+	content_base_size.depth  = 1;
+
+	ret = vmw_surface_gb_priv_define(dev,
+			0, /* kernel visible only */
+			0, /* flags */
+			format,
+			true, /* can be a scanout buffer */
+			1, /* num of mip levels */
+			0,
+			0,
+			content_base_size,
+			srf_out);
+	if (ret) {
+		DRM_ERROR("Failed to allocate proxy content buffer\n");
+		return ret;
+	}
+
+	res = &(*srf_out)->res;
+
+	/* Reserve and switch the backing mob. */
+	mutex_lock(&res->dev_priv->cmdbuf_mutex);
+	(void) vmw_resource_reserve(res, false, true);
+	vmw_dmabuf_unreference(&res->backup);
+	res->backup = vmw_dmabuf_reference(dmabuf_mob);
+	res->backup_offset = 0;
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 
 	return 0;
 }
 
-static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
-{
-	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
 
-	if (!vfbd->buffer) {
-		WARN_ON(!vfbd->buffer);
-		return 0;
-	}
-
-	return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
-}
 
 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 					  struct vmw_dma_buffer *dmabuf,
@@ -1057,7 +802,7 @@
 	}
 
 	/* Limited framebuffer color depth support for screen objects */
-	if (dev_priv->sou_priv) {
+	if (dev_priv->active_display_unit == vmw_du_screen_object) {
 		switch (mode_cmd->depth) {
 		case 32:
 		case 24:
@@ -1089,41 +834,96 @@
 		goto out_err1;
 	}
 
-	if (!vmw_dmabuf_reference(dmabuf)) {
-		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
-		ret = -EINVAL;
-		goto out_err2;
-	}
-
 	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
 	vfbd->base.base.pitches[0] = mode_cmd->pitch;
 	vfbd->base.base.depth = mode_cmd->depth;
 	vfbd->base.base.width = mode_cmd->width;
 	vfbd->base.base.height = mode_cmd->height;
-	if (!dev_priv->sou_priv) {
-		vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
-		vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
-	}
 	vfbd->base.dmabuf = true;
-	vfbd->buffer = dmabuf;
+	vfbd->buffer = vmw_dmabuf_reference(dmabuf);
 	vfbd->base.user_handle = mode_cmd->handle;
 	*out = &vfbd->base;
 
 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
 				   &vmw_framebuffer_dmabuf_funcs);
 	if (ret)
-		goto out_err3;
+		goto out_err2;
 
 	return 0;
 
-out_err3:
-	vmw_dmabuf_unreference(&dmabuf);
 out_err2:
+	vmw_dmabuf_unreference(&dmabuf);
 	kfree(vfbd);
 out_err1:
 	return ret;
 }
 
+/**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @only_2d: No presents will occur to this dma buffer based framebuffer. This
+ * Helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
+ */
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_dma_buffer *dmabuf,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd *mode_cmd)
+{
+	struct vmw_framebuffer *vfb = NULL;
+	bool is_dmabuf_proxy = false;
+	int ret;
+
+	/*
+	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
+	 * therefore, wrap the DMA buf in a surface so we can use the
+	 * SurfaceCopy command.
+	 */
+	if (dmabuf && only_2d &&
+	    dev_priv->active_display_unit == vmw_du_screen_target) {
+		ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
+					      dmabuf, &surface);
+		if (ret)
+			return ERR_PTR(ret);
+
+		is_dmabuf_proxy = true;
+	}
+
+	/* Create the new framebuffer depending one what we have */
+	if (surface) {
+		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+						      mode_cmd,
+						      is_dmabuf_proxy);
+
+		/*
+		 * vmw_create_dmabuf_proxy() adds a reference that is no longer
+		 * needed
+		 */
+		if (is_dmabuf_proxy)
+			vmw_surface_unreference(&surface);
+	} else if (dmabuf) {
+		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
+						     mode_cmd);
+	} else {
+		BUG();
+	}
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	vfb->pin = vmw_framebuffer_pin;
+	vfb->unpin = vmw_framebuffer_unpin;
+
+	return vfb;
+}
+
 /*
  * Generic Kernel modesetting functions
  */
@@ -1157,7 +957,7 @@
 	if (!vmw_kms_validate_mode_vram(dev_priv,
 					mode_cmd.pitch,
 					mode_cmd.height)) {
-		DRM_ERROR("VRAM size is too small for requested mode.\n");
+		DRM_ERROR("Requested mode exceed bounding box limit.\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -1187,15 +987,13 @@
 	if (ret)
 		goto err_out;
 
-	/* Create the new framebuffer depending one what we got back */
-	if (bo)
-		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-						     &mode_cmd);
-	else if (surface)
-		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
-						      surface, &vfb, &mode_cmd);
-	else
-		BUG();
+	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+				      !(dev_priv->capabilities & SVGA_CAP_3D),
+				      &mode_cmd);
+	if (IS_ERR(vfb)) {
+		ret = PTR_ERR(vfb);
+		goto err_out;
+ 	}
 
 err_out:
 	/* vmw_user_lookup_handle takes one ref so does new_fb */
@@ -1218,6 +1016,21 @@
 	.fb_create = vmw_kms_fb_create,
 };
 
+static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+				   struct drm_file *file_priv,
+				   struct vmw_framebuffer *vfb,
+				   struct vmw_surface *surface,
+				   uint32_t sid,
+				   int32_t destX, int32_t destY,
+				   struct drm_vmw_rect *clips,
+				   uint32_t num_clips)
+{
+	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+					    &surface->res, destX, destY,
+					    num_clips, 1, NULL);
+}
+
+
 int vmw_kms_present(struct vmw_private *dev_priv,
 		    struct drm_file *file_priv,
 		    struct vmw_framebuffer *vfb,
@@ -1227,238 +1040,31 @@
 		    struct drm_vmw_rect *clips,
 		    uint32_t num_clips)
 {
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_clip_rect *tmp;
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, k, num_units;
-	int ret = 0; /* silence warning */
-	int left, right, top, bottom;
+	int ret;
 
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdBlitSurfaceToScreen body;
-	} *cmd;
-	SVGASignedRect *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &vfb->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
+	switch (dev_priv->active_display_unit) {
+	case vmw_du_screen_target:
+		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+						 &surface->res, destX, destY,
+						 num_clips, 1, NULL);
+		break;
+	case vmw_du_screen_object:
+		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+					      sid, destX, destY, clips,
+					      num_clips);
+		break;
+	default:
+		WARN_ONCE(true,
+			  "Present called with invalid display system.\n");
+		ret = -ENOSYS;
+		break;
 	}
+	if (ret)
+		return ret;
 
-	BUG_ON(surface == NULL);
-	BUG_ON(!clips || !num_clips);
+	vmw_fifo_flush(dev_priv, false);
 
-	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
-	if (unlikely(tmp == NULL)) {
-		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
-		return -ENOMEM;
-	}
-
-	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		ret = -ENOMEM;
-		goto out_free_tmp;
-	}
-
-	left = clips->x;
-	right = clips->x + clips->w;
-	top = clips->y;
-	bottom = clips->y + clips->h;
-
-	for (i = 1; i < num_clips; i++) {
-		left = min_t(int, left, (int)clips[i].x);
-		right = max_t(int, right, (int)clips[i].x + clips[i].w);
-		top = min_t(int, top, (int)clips[i].y);
-		bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
-	}
-
-	/* only need to do this once */
-	memset(cmd, 0, fifo_size);
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-
-	blits = (SVGASignedRect *)&cmd[1];
-
-	cmd->body.srcRect.left = left;
-	cmd->body.srcRect.right = right;
-	cmd->body.srcRect.top = top;
-	cmd->body.srcRect.bottom = bottom;
-
-	for (i = 0; i < num_clips; i++) {
-		tmp[i].x1 = clips[i].x - left;
-		tmp[i].x2 = clips[i].x + clips[i].w - left;
-		tmp[i].y1 = clips[i].y - top;
-		tmp[i].y2 = clips[i].y + clips[i].h - top;
-	}
-
-	for (k = 0; k < num_units; k++) {
-		struct vmw_display_unit *unit = units[k];
-		struct vmw_clip_rect clip;
-		int num;
-
-		clip.x1 = left + destX - unit->crtc.x;
-		clip.y1 = top + destY - unit->crtc.y;
-		clip.x2 = right + destX - unit->crtc.x;
-		clip.y2 = bottom + destY - unit->crtc.y;
-
-		/* skip any crtcs that misses the clip region */
-		if (clip.x1 >= unit->crtc.mode.hdisplay ||
-		    clip.y1 >= unit->crtc.mode.vdisplay ||
-		    clip.x2 <= 0 || clip.y2 <= 0)
-			continue;
-
-		/*
-		 * In order for the clip rects to be correctly scaled
-		 * the src and dest rects needs to be the same size.
-		 */
-		cmd->body.destRect.left = clip.x1;
-		cmd->body.destRect.right = clip.x2;
-		cmd->body.destRect.top = clip.y1;
-		cmd->body.destRect.bottom = clip.y2;
-
-		/* create a clip rect of the crtc in dest coords */
-		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
-		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
-		clip.x1 = 0 - clip.x1;
-		clip.y1 = 0 - clip.y1;
-
-		/* need to reset sid as it is changed by execbuf */
-		cmd->body.srcImage.sid = sid;
-		cmd->body.destScreenId = unit->unit;
-
-		/* clip and write blits to cmd stream */
-		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
-		/* if no cliprects hit skip this */
-		if (num == 0)
-			continue;
-
-		/* recalculate package length */
-		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
-		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
-					  fifo_size, 0, NULL, NULL);
-
-		if (unlikely(ret != 0))
-			break;
-	}
-
-	kfree(cmd);
-out_free_tmp:
-	kfree(tmp);
-
-	return ret;
-}
-
-int vmw_kms_readback(struct vmw_private *dev_priv,
-		     struct drm_file *file_priv,
-		     struct vmw_framebuffer *vfb,
-		     struct drm_vmw_fence_rep __user *user_fence_rep,
-		     struct drm_vmw_rect *clips,
-		     uint32_t num_clips)
-{
-	struct vmw_framebuffer_dmabuf *vfbd =
-		vmw_framebuffer_to_vfbd(&vfb->base);
-	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
-	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
-	struct drm_crtc *crtc;
-	size_t fifo_size;
-	int i, k, ret, num_units, blits_pos;
-
-	struct {
-		uint32_t header;
-		SVGAFifoCmdDefineGMRFB body;
-	} *cmd;
-	struct {
-		uint32_t header;
-		SVGAFifoCmdBlitScreenToGMRFB body;
-	} *blits;
-
-	num_units = 0;
-	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
-		if (crtc->primary->fb != &vfb->base)
-			continue;
-		units[num_units++] = vmw_crtc_to_du(crtc);
-	}
-
-	BUG_ON(dmabuf == NULL);
-	BUG_ON(!clips || !num_clips);
-
-	/* take a safe guess at fifo size */
-	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
-	cmd = kmalloc(fifo_size, GFP_KERNEL);
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, fifo_size);
-	cmd->header = SVGA_CMD_DEFINE_GMRFB;
-	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
-	cmd->body.format.colorDepth = vfb->base.depth;
-	cmd->body.format.reserved = 0;
-	cmd->body.bytesPerLine = vfb->base.pitches[0];
-	cmd->body.ptr.gmrId = vfb->user_handle;
-	cmd->body.ptr.offset = 0;
-
-	blits = (void *)&cmd[1];
-	blits_pos = 0;
-	for (i = 0; i < num_units; i++) {
-		struct drm_vmw_rect *c = clips;
-		for (k = 0; k < num_clips; k++, c++) {
-			/* transform clip coords to crtc origin based coords */
-			int clip_x1 = c->x - units[i]->crtc.x;
-			int clip_x2 = c->x - units[i]->crtc.x + c->w;
-			int clip_y1 = c->y - units[i]->crtc.y;
-			int clip_y2 = c->y - units[i]->crtc.y + c->h;
-			int dest_x = c->x;
-			int dest_y = c->y;
-
-			/* compensate for clipping, we negate
-			 * a negative number and add that.
-			 */
-			if (clip_x1 < 0)
-				dest_x += -clip_x1;
-			if (clip_y1 < 0)
-				dest_y += -clip_y1;
-
-			/* clip */
-			clip_x1 = max(clip_x1, 0);
-			clip_y1 = max(clip_y1, 0);
-			clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
-			clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
-			/* and cull any rects that misses the crtc */
-			if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
-			    clip_y1 >= units[i]->crtc.mode.vdisplay ||
-			    clip_x2 <= 0 || clip_y2 <= 0)
-				continue;
-
-			blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
-			blits[blits_pos].body.srcScreenId = units[i]->unit;
-			blits[blits_pos].body.destOrigin.x = dest_x;
-			blits[blits_pos].body.destOrigin.y = dest_y;
-
-			blits[blits_pos].body.srcRect.left = clip_x1;
-			blits[blits_pos].body.srcRect.top = clip_y1;
-			blits[blits_pos].body.srcRect.right = clip_x2;
-			blits[blits_pos].body.srcRect.bottom = clip_y2;
-			blits_pos++;
-		}
-	}
-	/* reset size here and use calculated exact size from loops */
-	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
-	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
-				  0, user_fence_rep, NULL);
-
-	kfree(cmd);
-
-	return ret;
+	return 0;
 }
 
 int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1470,30 +1076,37 @@
 	dev->mode_config.funcs = &vmw_kms_funcs;
 	dev->mode_config.min_width = 1;
 	dev->mode_config.min_height = 1;
-	/* assumed largest fb size */
-	dev->mode_config.max_width = 8192;
-	dev->mode_config.max_height = 8192;
+	dev->mode_config.max_width = dev_priv->texture_max_width;
+	dev->mode_config.max_height = dev_priv->texture_max_height;
 
-	ret = vmw_kms_init_screen_object_display(dev_priv);
-	if (ret) /* Fallback */
-		(void)vmw_kms_init_legacy_display_system(dev_priv);
+	ret = vmw_kms_stdu_init_display(dev_priv);
+	if (ret) {
+		ret = vmw_kms_sou_init_display(dev_priv);
+		if (ret) /* Fallback */
+			ret = vmw_kms_ldu_init_display(dev_priv);
+	}
 
-	return 0;
+	return ret;
 }
 
 int vmw_kms_close(struct vmw_private *dev_priv)
 {
+	int ret;
+
 	/*
 	 * Docs says we should take the lock before calling this function
 	 * but since it destroys encoders and our destructor calls
 	 * drm_encoder_cleanup which takes the lock we deadlock.
 	 */
 	drm_mode_config_cleanup(dev_priv->dev);
-	if (dev_priv->sou_priv)
-		vmw_kms_close_screen_object_display(dev_priv);
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
+		ret = vmw_kms_sou_close_display(dev_priv);
+	else if (dev_priv->active_display_unit == vmw_du_screen_target)
+		ret = vmw_kms_stdu_close_display(dev_priv);
 	else
-		vmw_kms_close_legacy_display_system(dev_priv);
-	return 0;
+		ret = vmw_kms_ldu_close_display(dev_priv);
+
+	return ret;
 }
 
 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
@@ -1569,7 +1182,7 @@
 		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
 	else if (vmw_fifo_have_pitchlock(vmw_priv))
 		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
-						       SVGA_FIFO_PITCHLOCK);
+						   SVGA_FIFO_PITCHLOCK);
 
 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
 		return 0;
@@ -1641,7 +1254,9 @@
 				uint32_t pitch,
 				uint32_t height)
 {
-	return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
+	return ((u64) pitch * (u64) height) < (u64)
+		((dev_priv->active_display_unit == vmw_du_screen_target) ?
+		 dev_priv->prim_bb_mem : dev_priv->vram_size);
 }
 
 
@@ -1715,75 +1330,6 @@
 	return 0;
 }
 
-int vmw_du_page_flip(struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb,
-		     struct drm_pending_vblank_event *event,
-		     uint32_t page_flip_flags)
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	struct drm_framebuffer *old_fb = crtc->primary->fb;
-	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
-	struct drm_file *file_priv ;
-	struct vmw_fence_obj *fence = NULL;
-	struct drm_clip_rect clips;
-	int ret;
-
-	if (event == NULL)
-		return -EINVAL;
-
-	/* require ScreenObject support for page flipping */
-	if (!dev_priv->sou_priv)
-		return -ENOSYS;
-
-	file_priv = event->base.file_priv;
-	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
-		return -EINVAL;
-
-	crtc->primary->fb = fb;
-
-	/* do a full screen dirty update */
-	clips.x1 = clips.y1 = 0;
-	clips.x2 = fb->width;
-	clips.y2 = fb->height;
-
-	if (vfb->dmabuf)
-		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
-					  0, 0, &clips, 1, 1, &fence);
-	else
-		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
-					   0, 0, &clips, 1, 1, &fence);
-
-
-	if (ret != 0)
-		goto out_no_fence;
-	if (!fence) {
-		ret = -EINVAL;
-		goto out_no_fence;
-	}
-
-	ret = vmw_event_fence_action_queue(file_priv, fence,
-					   &event->base,
-					   &event->event.tv_sec,
-					   &event->event.tv_usec,
-					   true);
-
-	/*
-	 * No need to hold on to this now. The only cleanup
-	 * we need to do if we fail is unref the fence.
-	 */
-	vmw_fence_obj_unreference(&fence);
-
-	if (vmw_crtc_to_du(crtc)->is_implicit)
-		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
-	return ret;
-
-out_no_fence:
-	crtc->primary->fb = old_fb;
-	return ret;
-}
-
-
 void vmw_du_crtc_save(struct drm_crtc *crtc)
 {
 }
@@ -1808,8 +1354,9 @@
 	}
 }
 
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
 {
+	return 0;
 }
 
 void vmw_du_connector_save(struct drm_connector *connector)
@@ -1919,7 +1466,7 @@
  * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
  * members filled in.
  */
-static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+void vmw_guess_mode_timing(struct drm_display_mode *mode)
 {
 	mode->hsync_start = mode->hdisplay + 50;
 	mode->hsync_end = mode->hsync_start + 50;
@@ -1954,36 +1501,39 @@
 	 * If using screen objects, then assume 32-bpp because that's what the
 	 * SVGA device is assuming
 	 */
-	if (dev_priv->sou_priv)
+	if (dev_priv->active_display_unit == vmw_du_screen_object)
 		assumed_bpp = 4;
 
-	/* Add preferred mode */
-	{
-		mode = drm_mode_duplicate(dev, &prefmode);
-		if (!mode)
-			return 0;
-		mode->hdisplay = du->pref_width;
-		mode->vdisplay = du->pref_height;
-		vmw_guess_mode_timing(mode);
-
-		if (vmw_kms_validate_mode_vram(dev_priv,
-						mode->hdisplay * assumed_bpp,
-						mode->vdisplay)) {
-			drm_mode_probed_add(connector, mode);
-		} else {
-			drm_mode_destroy(dev, mode);
-			mode = NULL;
-		}
-
-		if (du->pref_mode) {
-			list_del_init(&du->pref_mode->head);
-			drm_mode_destroy(dev, du->pref_mode);
-		}
-
-		/* mode might be null here, this is intended */
-		du->pref_mode = mode;
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		max_width  = min(max_width,  dev_priv->stdu_max_width);
+		max_height = min(max_height, dev_priv->stdu_max_height);
 	}
 
+	/* Add preferred mode */
+	mode = drm_mode_duplicate(dev, &prefmode);
+	if (!mode)
+		return 0;
+	mode->hdisplay = du->pref_width;
+	mode->vdisplay = du->pref_height;
+	vmw_guess_mode_timing(mode);
+
+	if (vmw_kms_validate_mode_vram(dev_priv,
+					mode->hdisplay * assumed_bpp,
+					mode->vdisplay)) {
+		drm_mode_probed_add(connector, mode);
+	} else {
+		drm_mode_destroy(dev, mode);
+		mode = NULL;
+	}
+
+	if (du->pref_mode) {
+		list_del_init(&du->pref_mode->head);
+		drm_mode_destroy(dev, du->pref_mode);
+	}
+
+	/* mode might be null here, this is intended */
+	du->pref_mode = mode;
+
 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
 		bmode = &vmw_kms_connector_builtin[i];
 		if (bmode->hdisplay > max_width ||
@@ -2003,11 +1553,9 @@
 		drm_mode_probed_add(connector, mode);
 	}
 
-	/* Move the prefered mode first, help apps pick the right mode. */
-	if (du->pref_mode)
-		list_move(&du->pref_mode->head, &connector->probed_modes);
-
 	drm_mode_connector_list_update(connector, true);
+	/* Move the prefered mode first, help apps pick the right mode. */
+	drm_mode_sort(&connector->modes);
 
 	return 1;
 }
@@ -2031,7 +1579,9 @@
 	unsigned rects_size;
 	int ret;
 	int i;
+	u64 total_pixels = 0;
 	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_vmw_rect bounding_box = {0};
 
 	if (!arg->num_outputs) {
 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
@@ -2062,6 +1612,40 @@
 			ret = -EINVAL;
 			goto out_free;
 		}
+
+		/*
+		 * bounding_box.w and bunding_box.h are used as
+		 * lower-right coordinates
+		 */
+		if (rects[i].x + rects[i].w > bounding_box.w)
+			bounding_box.w = rects[i].x + rects[i].w;
+
+		if (rects[i].y + rects[i].h > bounding_box.h)
+			bounding_box.h = rects[i].y + rects[i].h;
+
+		total_pixels += (u64) rects[i].w * (u64) rects[i].h;
+	}
+
+	if (dev_priv->active_display_unit == vmw_du_screen_target) {
+		/*
+		 * For Screen Targets, the limits for a toplogy are:
+		 *	1. Bounding box (assuming 32bpp) must be < prim_bb_mem
+		 *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
+		 */
+		u64 bb_mem    = bounding_box.w * bounding_box.h * 4;
+		u64 pixel_mem = total_pixels * 4;
+
+		if (bb_mem > dev_priv->prim_bb_mem) {
+			DRM_ERROR("Topology is beyond supported limits.\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
+
+		if (pixel_mem > dev_priv->prim_bb_mem) {
+			DRM_ERROR("Combined output size too large\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
 	}
 
 	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
@@ -2070,3 +1654,419 @@
 	kfree(rects);
 	return ret;
 }
+
+/**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_crtc *crtc;
+	u32 num_units = 0;
+	u32 i, k;
+	int ret;
+
+	dirty->dev_priv = dev_priv;
+
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->primary->fb != &framebuffer->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	for (k = 0; k < num_units; k++) {
+		struct vmw_display_unit *unit = units[k];
+		s32 crtc_x = unit->crtc.x;
+		s32 crtc_y = unit->crtc.y;
+		s32 crtc_width = unit->crtc.mode.hdisplay;
+		s32 crtc_height = unit->crtc.mode.vdisplay;
+		const struct drm_clip_rect *clips_ptr = clips;
+		const struct drm_vmw_rect *vclips_ptr = vclips;
+
+		dirty->unit = unit;
+		if (dirty->fifo_reserve_size > 0) {
+			dirty->cmd = vmw_fifo_reserve(dev_priv,
+						      dirty->fifo_reserve_size);
+			if (!dirty->cmd) {
+				DRM_ERROR("Couldn't reserve fifo space "
+					  "for dirty blits.\n");
+				return ret;
+			}
+			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+		}
+		dirty->num_hits = 0;
+		for (i = 0; i < num_clips; i++, clips_ptr += increment,
+		       vclips_ptr += increment) {
+			s32 clip_left;
+			s32 clip_top;
+
+			/*
+			 * Select clip array type. Note that integer type
+			 * in @clips is unsigned short, whereas in @vclips
+			 * it's 32-bit.
+			 */
+			if (clips) {
+				dirty->fb_x = (s32) clips_ptr->x1;
+				dirty->fb_y = (s32) clips_ptr->y1;
+				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+					crtc_x;
+				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+					crtc_y;
+			} else {
+				dirty->fb_x = vclips_ptr->x;
+				dirty->fb_y = vclips_ptr->y;
+				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+					dest_x - crtc_x;
+				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+					dest_y - crtc_y;
+			}
+
+			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+			/* Skip this clip if it's outside the crtc region */
+			if (dirty->unit_x1 >= crtc_width ||
+			    dirty->unit_y1 >= crtc_height ||
+			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+				continue;
+
+			/* Clip right and bottom to crtc limits */
+			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+					       crtc_width);
+			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+					       crtc_height);
+
+			/* Clip left and top to crtc limits */
+			clip_left = min_t(s32, dirty->unit_x1, 0);
+			clip_top = min_t(s32, dirty->unit_y1, 0);
+			dirty->unit_x1 -= clip_left;
+			dirty->unit_y1 -= clip_top;
+			dirty->fb_x -= clip_left;
+			dirty->fb_y -= clip_top;
+
+			dirty->clip(dirty);
+		}
+
+		dirty->fifo_commit(dirty);
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible,
+				  bool validate_as_mob)
+{
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ttm_bo_reserve(bo, false, false, interruptible, NULL);
+	ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+					 validate_as_mob);
+	if (ret)
+		ttm_bo_unreserve(bo);
+
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+{
+	if (buf)
+		ttm_bo_unreserve(&buf->base);
+}
+
+/**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence:  Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+				  struct drm_file *file_priv,
+				  struct vmw_dma_buffer *buf,
+				  struct vmw_fence_obj **out_fence,
+				  struct drm_vmw_fence_rep __user *
+				  user_fence_rep)
+{
+	struct vmw_fence_obj *fence;
+	uint32_t handle;
+	int ret;
+
+	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+					 file_priv ? &handle : NULL);
+	if (buf)
+		vmw_fence_single_bo(&buf->base, fence);
+	if (file_priv)
+		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+					    ret, user_fence_rep, fence,
+					    handle);
+	if (out_fence)
+		*out_fence = fence;
+	else
+		vmw_fence_obj_unreference(&fence);
+
+	vmw_kms_helper_buffer_revert(buf);
+}
+
+
+/**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+{
+	vmw_kms_helper_buffer_revert(res->backup);
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+				    bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible)
+		ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+	else
+		mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+	if (unlikely(ret != 0))
+		return -ERESTARTSYS;
+
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_unlock;
+
+	if (res->backup) {
+		ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+						    interruptible,
+						    res->dev_priv->has_mob);
+		if (ret)
+			goto out_unreserve;
+	}
+	ret = vmw_resource_validate(res);
+	if (ret)
+		goto out_revert;
+	return 0;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(res->backup);
+out_unreserve:
+	vmw_resource_unreserve(res, false, NULL, 0);
+out_unlock:
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+	return ret;
+}
+
+/**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+			     struct vmw_fence_obj **out_fence)
+{
+	if (res->backup || out_fence)
+		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+					     out_fence, NULL);
+
+	vmw_resource_unreserve(res, false, NULL, 0);
+	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+}
+
+/**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdUpdateGBImage body;
+	} *cmd;
+	SVGA3dBox *box;
+	size_t copy_size = 0;
+	int i;
+
+	if (!clips)
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+	if (!cmd) {
+		DRM_ERROR("Couldn't reserve fifo space for proxy surface "
+			  "update.\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+		box = &cmd->body.box;
+
+		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+		cmd->header.size = sizeof(cmd->body);
+		cmd->body.image.sid = res->id;
+		cmd->body.image.face = 0;
+		cmd->body.image.mipmap = 0;
+
+		if (clips->x1 > size->width || clips->x2 > size->width ||
+		    clips->y1 > size->height || clips->y2 > size->height) {
+			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+			return -EINVAL;
+		}
+
+		box->x = clips->x1;
+		box->y = clips->y1;
+		box->z = 0;
+		box->w = clips->x2 - clips->x1;
+		box->h = clips->y2 - clips->y1;
+		box->d = 1;
+
+		copy_size += sizeof(*cmd);
+	}
+
+	vmw_fifo_commit(dev_priv, copy_size);
+
+	return 0;
+}
+
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode)
+{
+	struct drm_connector *con;
+	struct vmw_display_unit *du;
+	struct drm_display_mode *mode;
+	int i = 0;
+
+	list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+			    head) {
+		if (i == unit)
+			break;
+
+		++i;
+	}
+
+	if (i != unit) {
+		DRM_ERROR("Could not find initial display unit.\n");
+		return -EINVAL;
+	}
+
+	if (list_empty(&con->modes))
+		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+	if (list_empty(&con->modes)) {
+		DRM_ERROR("Could not find initial display mode.\n");
+		return -EINVAL;
+	}
+
+	du = vmw_connector_to_du(con);
+	*p_con = con;
+	*p_crtc = &du->crtc;
+
+	list_for_each_entry(mode, &con->modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+			break;
+	}
+
+	if (mode->type & DRM_MODE_TYPE_PREFERRED)
+		*p_mode = mode;
+	else {
+		WARN_ONCE(true, "Could not find initial preferred mode.\n");
+		*p_mode = list_first_entry(&con->modes,
+					   struct drm_display_mode,
+					   head);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 8d038c3..782df7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -32,11 +32,60 @@
 #include <drm/drm_crtc_helper.h>
 #include "vmwgfx_drv.h"
 
+/**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+struct vmw_kms_dirty {
+	void (*fifo_commit)(struct vmw_kms_dirty *);
+	void (*clip)(struct vmw_kms_dirty *);
+	size_t fifo_reserve_size;
+	struct vmw_private *dev_priv;
+	struct vmw_display_unit *unit;
+	void *cmd;
+	u32 num_hits;
+	s32 fb_x;
+	s32 fb_y;
+	s32 unit_x1;
+	s32 unit_y1;
+	s32 unit_x2;
+	s32 unit_y2;
+};
+
 #define VMWGFX_NUM_DISPLAY_UNITS 8
 
 
 #define vmw_framebuffer_to_vfb(x) \
 	container_of(x, struct vmw_framebuffer, base)
+#define vmw_framebuffer_to_vfbs(x) \
+	container_of(x, struct vmw_framebuffer_surface, base.base)
+#define vmw_framebuffer_to_vfbd(x) \
+	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
 
 /**
  * Base class for framebuffers
@@ -53,9 +102,27 @@
 	uint32_t user_handle;
 };
 
+/*
+ * Clip rectangle
+ */
+struct vmw_clip_rect {
+	int x1, x2, y1, y2;
+};
 
-#define vmw_crtc_to_du(x) \
-	container_of(x, struct vmw_display_unit, crtc)
+struct vmw_framebuffer_surface {
+	struct vmw_framebuffer base;
+	struct vmw_surface *surface;
+	struct vmw_dma_buffer *buffer;
+	struct list_head head;
+	bool is_dmabuf_proxy;  /* true if this is proxy surface for DMA buf */
+};
+
+
+struct vmw_framebuffer_dmabuf {
+	struct vmw_framebuffer base;
+	struct vmw_dma_buffer *buffer;
+};
+
 
 /*
  * Basic cursor manipulation
@@ -120,11 +187,7 @@
 /*
  * Shared display unit functions - vmwgfx_kms.c
  */
-void vmw_display_unit_cleanup(struct vmw_display_unit *du);
-int vmw_du_page_flip(struct drm_crtc *crtc,
-		     struct drm_framebuffer *fb,
-		     struct drm_pending_vblank_event *event,
-		     uint32_t page_flip_flags);
+void vmw_du_cleanup(struct vmw_display_unit *du);
 void vmw_du_crtc_save(struct drm_crtc *crtc);
 void vmw_du_crtc_restore(struct drm_crtc *crtc);
 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
@@ -133,7 +196,7 @@
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 			   uint32_t handle, uint32_t width, uint32_t height);
 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
 void vmw_du_connector_save(struct drm_connector *connector);
 void vmw_du_connector_restore(struct drm_connector *connector);
 enum drm_connector_status
@@ -143,25 +206,118 @@
 int vmw_du_connector_set_property(struct drm_connector *connector,
 				  struct drm_property *property,
 				  uint64_t val);
+int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+			 struct vmw_framebuffer *framebuffer,
+			 const struct drm_clip_rect *clips,
+			 const struct drm_vmw_rect *vclips,
+			 s32 dest_x, s32 dest_y,
+			 int num_clips,
+			 int increment,
+			 struct vmw_kms_dirty *dirty);
 
+int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+				  struct vmw_dma_buffer *buf,
+				  bool interruptible,
+				  bool validate_as_mob);
+void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+				  struct drm_file *file_priv,
+				  struct vmw_dma_buffer *buf,
+				  struct vmw_fence_obj **out_fence,
+				  struct drm_vmw_fence_rep __user *
+				  user_fence_rep);
+int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+				    bool interruptible);
+void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+				    struct vmw_fence_obj **out_fence);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips);
+struct vmw_framebuffer *
+vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+			struct vmw_dma_buffer *dmabuf,
+			struct vmw_surface *surface,
+			bool only_2d,
+			const struct drm_mode_fb_cmd *mode_cmd);
+int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+			    unsigned unit,
+			    u32 max_width,
+			    u32 max_height,
+			    struct drm_connector **p_con,
+			    struct drm_crtc **p_crtc,
+			    struct drm_display_mode **p_mode);
+void vmw_guess_mode_timing(struct drm_display_mode *mode);
 
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
  */
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment);
+int vmw_kms_update_proxy(struct vmw_resource *res,
+			 const struct drm_clip_rect *clips,
+			 unsigned num_clips,
+			 int increment);
 
 /*
  * Screen Objects display functions - vmwgfx_scrn.c
  */
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
-			      struct drm_vmw_rect *rects);
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
-				     struct drm_crtc *crtc);
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
-					      struct drm_crtc *crtc);
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence);
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips);
+
+/*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence);
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible);
 
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5c289f7..bb63e4d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -57,7 +57,7 @@
 static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
 {
 	list_del_init(&ldu->active);
-	vmw_display_unit_cleanup(&ldu->base);
+	vmw_du_cleanup(&ldu->base);
 	kfree(ldu);
 }
 
@@ -279,7 +279,7 @@
 		return -EINVAL;
 	}
 
-	vmw_fb_off(dev_priv);
+	vmw_svga_enable(dev_priv);
 
 	crtc->primary->fb = fb;
 	encoder->crtc = crtc;
@@ -385,7 +385,7 @@
 	return 0;
 }
 
-int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
@@ -422,6 +422,10 @@
 	else
 		vmw_ldu_init(dev_priv, 0);
 
+	dev_priv->active_display_unit = vmw_du_legacy;
+
+	DRM_INFO("Legacy Display Unit initialized\n");
+
 	return 0;
 
 err_vblank_cleanup:
@@ -432,7 +436,7 @@
 	return ret;
 }
 
-int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
@@ -447,3 +451,38 @@
 
 	return 0;
 }
+
+
+int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment)
+{
+	size_t fifo_size;
+	int i;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdUpdate body;
+	} *cmd;
+
+	fifo_size = sizeof(*cmd) * num_clips;
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	for (i = 0; i < num_clips; i++, clips += increment) {
+		cmd[i].header = SVGA_CMD_UPDATE;
+		cmd[i].body.x = clips->x1;
+		cmd[i].body.y = clips->y1;
+		cmd[i].body.width = clips->x2 - clips->x1;
+		cmd[i].body.height = clips->y2 - clips->y1;
+	}
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 04a64b8..23db160 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,7 +31,7 @@
  * If we set up the screen target otable, screen objects stop working.
  */
 
-#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1)
+#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1))
 
 #ifdef CONFIG_64BIT
 #define VMW_PPN_SIZE 8
@@ -67,9 +67,23 @@
  * @size:           Size of the table (page-aligned).
  * @page_table:     Pointer to a struct vmw_mob holding the page table.
  */
-struct vmw_otable {
-	unsigned long size;
-	struct vmw_mob *page_table;
+static const struct vmw_otable pre_dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}
+};
+
+static const struct vmw_otable dx_tables[] = {
+	{VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true},
+	{VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE,
+	 NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE},
+	{VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
 };
 
 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
@@ -92,6 +106,7 @@
  */
 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 				 SVGAOTableType type,
+				 struct ttm_buffer_object *otable_bo,
 				 unsigned long offset,
 				 struct vmw_otable *otable)
 {
@@ -106,7 +121,7 @@
 
 	BUG_ON(otable->page_table != NULL);
 
-	vsgt = vmw_bo_sg_table(dev_priv->otable_bo);
+	vsgt = vmw_bo_sg_table(otable_bo);
 	vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
 	WARN_ON(!vmw_piter_next(&iter));
 
@@ -142,7 +157,7 @@
 	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.type = type;
-	cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = otable->size;
 	cmd->body.validSizeInBytes = 0;
 	cmd->body.ptDepth = mob->pt_level;
@@ -191,18 +206,19 @@
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for OTable "
 			  "takedown.\n");
-	} else {
-		memset(cmd, 0, sizeof(*cmd));
-		cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
-		cmd->header.size = sizeof(cmd->body);
-		cmd->body.type = type;
-		cmd->body.baseAddress = 0;
-		cmd->body.sizeInBytes = 0;
-		cmd->body.validSizeInBytes = 0;
-		cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
-		vmw_fifo_commit(dev_priv, sizeof(*cmd));
+		return;
 	}
 
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.type = type;
+	cmd->body.baseAddress = 0;
+	cmd->body.sizeInBytes = 0;
+	cmd->body.validSizeInBytes = 0;
+	cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
 	if (bo) {
 		int ret;
 
@@ -217,47 +233,21 @@
 	otable->page_table = NULL;
 }
 
-/*
- * vmw_otables_setup - Set up guest backed memory object tables
- *
- * @dev_priv:       Pointer to a device private structure
- *
- * Takes care of the device guest backed surface
- * initialization, by setting up the guest backed memory object tables.
- * Returns 0 on success and various error codes on failure. A succesful return
- * means the object tables can be taken down using the vmw_otables_takedown
- * function.
- */
-int vmw_otables_setup(struct vmw_private *dev_priv)
+
+static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
+				  struct vmw_otable_batch *batch)
 {
 	unsigned long offset;
 	unsigned long bo_size;
-	struct vmw_otable *otables;
+	struct vmw_otable *otables = batch->otables;
 	SVGAOTableType i;
 	int ret;
 
-	otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables),
-			  GFP_KERNEL);
-	if (unlikely(otables == NULL)) {
-		DRM_ERROR("Failed to allocate space for otable "
-			  "metadata.\n");
-		return -ENOMEM;
-	}
-
-	otables[SVGA_OTABLE_MOB].size =
-		VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SURFACE].size =
-		VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE;
-	otables[SVGA_OTABLE_CONTEXT].size =
-		VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SHADER].size =
-		VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE;
-	otables[SVGA_OTABLE_SCREEN_TARGET].size =
-		VMWGFX_NUM_GB_SCREEN_TARGET *
-		SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE;
-
 	bo_size = 0;
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) {
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!otables[i].enabled)
+			continue;
+
 		otables[i].size =
 			(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
 		bo_size += otables[i].size;
@@ -267,46 +257,114 @@
 			    ttm_bo_type_device,
 			    &vmw_sys_ne_placement,
 			    0, false, NULL,
-			    &dev_priv->otable_bo);
+			    &batch->otable_bo);
 
 	if (unlikely(ret != 0))
 		goto out_no_bo;
 
-	ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL);
+	ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
 	BUG_ON(ret != 0);
-	ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm);
+	ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
 	if (unlikely(ret != 0))
 		goto out_unreserve;
-	ret = vmw_bo_map_dma(dev_priv->otable_bo);
+	ret = vmw_bo_map_dma(batch->otable_bo);
 	if (unlikely(ret != 0))
 		goto out_unreserve;
 
-	ttm_bo_unreserve(dev_priv->otable_bo);
+	ttm_bo_unreserve(batch->otable_bo);
 
 	offset = 0;
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) {
-		ret = vmw_setup_otable_base(dev_priv, i, offset,
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (!batch->otables[i].enabled)
+			continue;
+
+		ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+					    offset,
 					    &otables[i]);
 		if (unlikely(ret != 0))
 			goto out_no_setup;
 		offset += otables[i].size;
 	}
 
-	dev_priv->otables = otables;
 	return 0;
 
 out_unreserve:
-	ttm_bo_unreserve(dev_priv->otable_bo);
+	ttm_bo_unreserve(batch->otable_bo);
 out_no_setup:
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-		vmw_takedown_otable_base(dev_priv, i, &otables[i]);
+	for (i = 0; i < batch->num_otables; ++i) {
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+	}
 
-	ttm_bo_unref(&dev_priv->otable_bo);
+	ttm_bo_unref(&batch->otable_bo);
 out_no_bo:
-	kfree(otables);
 	return ret;
 }
 
+/*
+ * vmw_otables_setup - Set up guest backed memory object tables
+ *
+ * @dev_priv:       Pointer to a device private structure
+ *
+ * Takes care of the device guest backed surface
+ * initialization, by setting up the guest backed memory object tables.
+ * Returns 0 on success and various error codes on failure. A successful return
+ * means the object tables can be taken down using the vmw_otables_takedown
+ * function.
+ */
+int vmw_otables_setup(struct vmw_private *dev_priv)
+{
+	struct vmw_otable **otables = &dev_priv->otable_batch.otables;
+	int ret;
+
+	if (dev_priv->has_dx) {
+		*otables = kmalloc(sizeof(dx_tables), GFP_KERNEL);
+		if (*otables == NULL)
+			return -ENOMEM;
+
+		memcpy(*otables, dx_tables, sizeof(dx_tables));
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
+	} else {
+		*otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL);
+		if (*otables == NULL)
+			return -ENOMEM;
+
+		memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables));
+		dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
+	}
+
+	ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
+	if (unlikely(ret != 0))
+		goto out_setup;
+
+	return 0;
+
+out_setup:
+	kfree(*otables);
+	return ret;
+}
+
+static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
+			       struct vmw_otable_batch *batch)
+{
+	SVGAOTableType i;
+	struct ttm_buffer_object *bo = batch->otable_bo;
+	int ret;
+
+	for (i = 0; i < batch->num_otables; ++i)
+		if (batch->otables[i].enabled)
+			vmw_takedown_otable_base(dev_priv, i,
+						 &batch->otables[i]);
+
+	ret = ttm_bo_reserve(bo, false, true, false, NULL);
+	BUG_ON(ret != 0);
+
+	vmw_fence_single_bo(bo, NULL);
+	ttm_bo_unreserve(bo);
+
+	ttm_bo_unref(&batch->otable_bo);
+}
 
 /*
  * vmw_otables_takedown - Take down guest backed memory object tables
@@ -317,26 +375,10 @@
  */
 void vmw_otables_takedown(struct vmw_private *dev_priv)
 {
-	SVGAOTableType i;
-	struct ttm_buffer_object *bo = dev_priv->otable_bo;
-	int ret;
-
-	for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i)
-		vmw_takedown_otable_base(dev_priv, i,
-					 &dev_priv->otables[i]);
-
-	ret = ttm_bo_reserve(bo, false, true, false, NULL);
-	BUG_ON(ret != 0);
-
-	vmw_fence_single_bo(bo, NULL);
-	ttm_bo_unreserve(bo);
-
-	ttm_bo_unref(&dev_priv->otable_bo);
-	kfree(dev_priv->otables);
-	dev_priv->otables = NULL;
+	vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
+	kfree(dev_priv->otable_batch.otables);
 }
 
-
 /*
  * vmw_mob_calculate_pt_pages - Calculate the number of page table pages
  * needed for a guest backed memory object.
@@ -409,7 +451,7 @@
 		goto out_unreserve;
 
 	ttm_bo_unreserve(mob->pt_bo);
-	
+
 	return 0;
 
 out_unreserve:
@@ -429,15 +471,15 @@
  * *@addr according to the page table entry size.
  */
 #if (VMW_PPN_SIZE == 8)
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
 {
-	*((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT);
+	*((u64 *) *addr) = val >> PAGE_SHIFT;
 	*addr += 2;
 }
 #else
-static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val)
+static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
 {
-	*(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT);
+	*(*addr)++ = val >> PAGE_SHIFT;
 }
 #endif
 
@@ -459,7 +501,7 @@
 	unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
 	unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
 	unsigned long pt_page;
-	__le32 *addr, *save_addr;
+	u32 *addr, *save_addr;
 	unsigned long i;
 	struct page *page;
 
@@ -574,7 +616,7 @@
 		vmw_fence_single_bo(bo, NULL);
 		ttm_bo_unreserve(bo);
 	}
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 /*
@@ -627,7 +669,7 @@
 		mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
 	}
 
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -640,7 +682,7 @@
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.mobid = mob_id;
 	cmd->body.ptDepth = mob->pt_level;
-	cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT);
+	cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
 	cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
 
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -648,7 +690,7 @@
 	return 0;
 
 out_no_cmd_space:
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 	if (pt_set_up)
 		ttm_bo_unref(&mob->pt_bo);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 87e39f6..76069f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,8 +31,8 @@
 
 #include <drm/ttm/ttm_placement.h>
 
-#include "svga_overlay.h"
-#include "svga_escape.h"
+#include "device_include/svga_overlay.h"
+#include "device_include/svga_escape.h"
 
 #define VMW_MAX_NUM_STREAMS 1
 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
@@ -100,7 +100,7 @@
 {
 	struct vmw_escape_video_flush *flush;
 	size_t fifo_size;
-	bool have_so = dev_priv->sou_priv ? true : false;
+	bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
 	int i, num_items;
 	SVGAGuestPtr ptr;
 
@@ -231,10 +231,10 @@
 	if (!pin)
 		return vmw_dmabuf_unpin(dev_priv, buf, inter);
 
-	if (!dev_priv->sou_priv)
-		return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+	if (dev_priv->active_display_unit == vmw_du_legacy)
+		return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
 
-	return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+	return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
 }
 
 /**
@@ -453,7 +453,7 @@
 
 static bool vmw_overlay_available(const struct vmw_private *dev_priv)
 {
-	return (dev_priv->overlay_priv != NULL && 
+	return (dev_priv->overlay_priv != NULL &&
 		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
 		 VMW_OVERLAY_CAP_MASK));
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a..dce7980 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,19 +39,17 @@
 #define VMWGFX_IRQSTATUS_PORT 0x8
 
 struct svga_guest_mem_descriptor {
-	__le32 ppn;
-	__le32 num_pages;
+	u32 ppn;
+	u32 num_pages;
 };
 
 struct svga_fifo_cmd_fence {
-	__le32 fence;
+	u32 fence;
 };
 
 #define SVGA_SYNC_GENERIC         1
 #define SVGA_SYNC_FIFOFULL        2
 
-#include "svga_types.h"
-
-#include "svga3d_reg.h"
+#include "device_include/svga3d_reg.h"
 
 #endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 210ef15..c1912f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,6 +31,7 @@
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
@@ -121,6 +122,7 @@
 	int id;
 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
+	write_lock(&dev_priv->resource_lock);
 	res->avail = false;
 	list_del_init(&res->lru_head);
 	write_unlock(&dev_priv->resource_lock);
@@ -143,10 +145,10 @@
 	}
 
 	if (likely(res->hw_destroy != NULL)) {
-		res->hw_destroy(res);
 		mutex_lock(&dev_priv->binding_mutex);
-		vmw_context_binding_res_list_kill(&res->binding_head);
+		vmw_binding_res_list_kill(&res->binding_head);
 		mutex_unlock(&dev_priv->binding_mutex);
+		res->hw_destroy(res);
 	}
 
 	id = res->id;
@@ -156,20 +158,17 @@
 		kfree(res);
 
 	write_lock(&dev_priv->resource_lock);
-
 	if (id != -1)
 		idr_remove(idr, id);
+	write_unlock(&dev_priv->resource_lock);
 }
 
 void vmw_resource_unreference(struct vmw_resource **p_res)
 {
 	struct vmw_resource *res = *p_res;
-	struct vmw_private *dev_priv = res->dev_priv;
 
 	*p_res = NULL;
-	write_lock(&dev_priv->resource_lock);
 	kref_put(&res->kref, vmw_resource_release);
-	write_unlock(&dev_priv->resource_lock);
 }
 
 
@@ -260,17 +259,16 @@
 	write_unlock(&dev_priv->resource_lock);
 }
 
-struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
-					 struct idr *idr, int id)
+static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+						struct idr *idr, int id)
 {
 	struct vmw_resource *res;
 
 	read_lock(&dev_priv->resource_lock);
 	res = idr_find(idr, id);
-	if (res && res->avail)
-		kref_get(&res->kref);
-	else
+	if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
 		res = NULL;
+
 	read_unlock(&dev_priv->resource_lock);
 
 	if (unlikely(res == NULL))
@@ -900,20 +898,21 @@
 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
 				   vmw_user_stream_size,
 				   false, true);
+	ttm_read_unlock(&dev_priv->reservation_sem);
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS)
 			DRM_ERROR("Out of graphics memory for stream"
 				  " creation.\n");
-		goto out_unlock;
-	}
 
+		goto out_ret;
+	}
 
 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
 	if (unlikely(stream == NULL)) {
 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 				    vmw_user_stream_size);
 		ret = -ENOMEM;
-		goto out_unlock;
+		goto out_ret;
 	}
 
 	res = &stream->stream.res;
@@ -926,7 +925,7 @@
 
 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
 	if (unlikely(ret != 0))
-		goto out_unlock;
+		goto out_ret;
 
 	tmp = vmw_resource_reference(res);
 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -940,8 +939,7 @@
 	arg->stream_id = res->id;
 out_err:
 	vmw_resource_unreference(&res);
-out_unlock:
-	ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
 	return ret;
 }
 
@@ -1152,14 +1150,16 @@
  * command submission.
  *
  * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @switch_backup:     Backup buffer has been switched.
  * @new_backup:        Pointer to new backup buffer if command submission
- *                     switched.
- * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *                     switched. May be NULL.
+ * @new_backup_offset: New backup offset if @switch_backup is true.
  *
  * Currently unreserving a resource means putting it back on the device's
  * resource lru list, so that it can be evicted if necessary.
  */
 void vmw_resource_unreserve(struct vmw_resource *res,
+			    bool switch_backup,
 			    struct vmw_dma_buffer *new_backup,
 			    unsigned long new_backup_offset)
 {
@@ -1168,22 +1168,25 @@
 	if (!list_empty(&res->lru_head))
 		return;
 
-	if (new_backup && new_backup != res->backup) {
-
+	if (switch_backup && new_backup != res->backup) {
 		if (res->backup) {
 			lockdep_assert_held(&res->backup->base.resv->lock.base);
 			list_del_init(&res->mob_head);
 			vmw_dmabuf_unreference(&res->backup);
 		}
 
-		res->backup = vmw_dmabuf_reference(new_backup);
-		lockdep_assert_held(&new_backup->base.resv->lock.base);
-		list_add_tail(&res->mob_head, &new_backup->res_list);
+		if (new_backup) {
+			res->backup = vmw_dmabuf_reference(new_backup);
+			lockdep_assert_held(&new_backup->base.resv->lock.base);
+			list_add_tail(&res->mob_head, &new_backup->res_list);
+		} else {
+			res->backup = NULL;
+		}
 	}
-	if (new_backup)
+	if (switch_backup)
 		res->backup_offset = new_backup_offset;
 
-	if (!res->func->may_evict || res->id == -1)
+	if (!res->func->may_evict || res->id == -1 || res->pin_count)
 		return;
 
 	write_lock(&dev_priv->resource_lock);
@@ -1259,7 +1262,8 @@
  * the buffer may not be bound to the resource at this point.
  *
  */
-int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
+			 bool no_backup)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	int ret;
@@ -1270,9 +1274,13 @@
 
 	if (res->func->needs_backup && res->backup == NULL &&
 	    !no_backup) {
-		ret = vmw_resource_buf_alloc(res, true);
-		if (unlikely(ret != 0))
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to allocate a backup buffer "
+				  "of size %lu. bytes\n",
+				  (unsigned long) res->backup_size);
 			return ret;
+		}
 	}
 
 	return 0;
@@ -1305,7 +1313,7 @@
  * @res:            The resource to evict.
  * @interruptible:  Whether to wait interruptible.
  */
-int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 {
 	struct ttm_validate_buffer val_buf;
 	const struct vmw_res_func *func = res->func;
@@ -1356,7 +1364,7 @@
 	struct ttm_validate_buffer val_buf;
 	unsigned err_count = 0;
 
-	if (likely(!res->func->may_evict))
+	if (!res->func->create)
 		return 0;
 
 	val_buf.bo = NULL;
@@ -1443,9 +1451,9 @@
 /**
  * vmw_resource_move_notify - TTM move_notify_callback
  *
- * @bo:             The TTM buffer object about to move.
- * @mem:            The truct ttm_mem_reg indicating to what memory
- *                  region the move is taking place.
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
  *
  * Evicts the Guest Backed hardware resource if the backup
  * buffer is being moved out of MOB memory.
@@ -1495,6 +1503,101 @@
 	}
 }
 
+
+
+/**
+ * vmw_query_readback_all - Read back cached query states
+ *
+ * @dx_query_mob: Buffer containing the DX query MOB
+ *
+ * Read back cached states from the device if they exist.  This function
+ * assumings binding_mutex is held.
+ */
+int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+{
+	struct vmw_resource *dx_query_ctx;
+	struct vmw_private *dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXReadbackAllQuery body;
+	} *cmd;
+
+
+	/* No query bound, so do nothing */
+	if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
+		return 0;
+
+	dx_query_ctx = dx_query_mob->dx_query_ctx;
+	dev_priv     = dx_query_ctx->dev_priv;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for "
+			  "query MOB read back.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid    = dx_query_ctx->id;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Triggers a rebind the next time affected context is bound */
+	dx_query_mob->dx_query_ctx = NULL;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_query_move_notify - Read back cached query states
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The memory region @bo is moving to.
+ *
+ * Called before the query MOB is swapped out to read back cached query
+ * states from the device.
+ */
+void vmw_query_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem)
+{
+	struct vmw_dma_buffer *dx_query_mob;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct vmw_private *dev_priv;
+
+
+	dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mutex_lock(&dev_priv->binding_mutex);
+
+	dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+	if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return;
+	}
+
+	/* If BO is being moved from MOB to system memory */
+	if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
+		struct vmw_fence_obj *fence;
+
+		(void) vmw_query_readback_all(dx_query_mob);
+		mutex_unlock(&dev_priv->binding_mutex);
+
+		/* Create a fence and attach the BO to it */
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		vmw_fence_single_bo(bo, fence);
+
+		if (fence != NULL)
+			vmw_fence_obj_unreference(&fence);
+
+		(void) ttm_bo_wait(bo, false, false, false);
+	} else
+		mutex_unlock(&dev_priv->binding_mutex);
+
+}
+
 /**
  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
  *
@@ -1573,3 +1676,107 @@
 
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
+
+/**
+ * vmw_resource_pin - Add a pin reference on a resource
+ *
+ * @res: The resource to add a pin reference on
+ *
+ * This function adds a pin reference, and if needed validates the resource.
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ * This function returns 0 on success and a negative error code on failure.
+ */
+int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	ret = vmw_resource_reserve(res, interruptible, false);
+	if (ret)
+		goto out_no_reserve;
+
+	if (res->pin_count == 0) {
+		struct vmw_dma_buffer *vbo = NULL;
+
+		if (res->backup) {
+			vbo = res->backup;
+
+			ttm_bo_reserve(&vbo->base, interruptible, false, false,
+				       NULL);
+			if (!vbo->pin_count) {
+				ret = ttm_bo_validate
+					(&vbo->base,
+					 res->func->backup_placement,
+					 interruptible, false);
+				if (ret) {
+					ttm_bo_unreserve(&vbo->base);
+					goto out_no_validate;
+				}
+			}
+
+			/* Do we really need to pin the MOB as well? */
+			vmw_bo_pin_reserved(vbo, true);
+		}
+		ret = vmw_resource_validate(res);
+		if (vbo)
+			ttm_bo_unreserve(&vbo->base);
+		if (ret)
+			goto out_no_validate;
+	}
+	res->pin_count++;
+
+out_no_validate:
+	vmw_resource_unreserve(res, false, NULL, 0UL);
+out_no_reserve:
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_write_unlock(&dev_priv->reservation_sem);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unpin - Remove a pin reference from a resource
+ *
+ * @res: The resource to remove a pin reference from
+ *
+ * Having a pin reference means that the resource can never be evicted, and
+ * its id will never change as long as there is a pin reference.
+ */
+void vmw_resource_unpin(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	ttm_read_lock(&dev_priv->reservation_sem, false);
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	ret = vmw_resource_reserve(res, false, true);
+	WARN_ON(ret);
+
+	WARN_ON(res->pin_count == 0);
+	if (--res->pin_count == 0 && res->backup) {
+		struct vmw_dma_buffer *vbo = res->backup;
+
+		ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+		vmw_bo_pin_reserved(vbo, false);
+		ttm_bo_unreserve(&vbo->base);
+	}
+
+	vmw_resource_unreserve(res, false, NULL, 0UL);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	ttm_read_unlock(&dev_priv->reservation_sem);
+}
+
+/**
+ * vmw_res_type - Return the resource type
+ *
+ * @res: Pointer to the resource
+ */
+enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
+{
+	return res->func->res_type;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index f3adeed..5994ef6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -30,6 +30,12 @@
 
 #include "vmwgfx_drv.h"
 
+enum vmw_cmdbuf_res_state {
+	VMW_CMDBUF_RES_COMMITTED,
+	VMW_CMDBUF_RES_ADD,
+	VMW_CMDBUF_RES_DEL
+};
+
 /**
  * struct vmw_user_resource_conv - Identify a derived user-exported resource
  * type and provide a function to convert its ttm_base_object pointer to
@@ -55,8 +61,10 @@
  * @bind:              Bind a hardware resource to persistent buffer storage.
  * @unbind:            Unbind a hardware resource from persistent
  *                     buffer storage.
+ * @commit_notify:     If the resource is a command buffer managed resource,
+ *                     callback to notify that a define or remove command
+ *                     has been committed to the device.
  */
-
 struct vmw_res_func {
 	enum vmw_res_type res_type;
 	bool needs_backup;
@@ -71,6 +79,8 @@
 	int (*unbind) (struct vmw_resource *res,
 		       bool readback,
 		       struct ttm_validate_buffer *val_buf);
+	void (*commit_notify)(struct vmw_resource *res,
+			      enum vmw_cmdbuf_res_state state);
 };
 
 int vmw_resource_alloc_id(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 7dc591d..b96d1ab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -36,10 +36,55 @@
 #define vmw_connector_to_sou(x) \
 	container_of(x, struct vmw_screen_object_unit, base.connector)
 
+/**
+ * struct vmw_kms_sou_surface_dirty - Closure structure for
+ * blit surface to screen command.
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @dst_x: Difference between source clip rects and framebuffer coordinates.
+ * @dst_y: Difference between source clip rects and framebuffer coordinates.
+ * @sid: Surface id of surface to copy from.
+ */
+struct vmw_kms_sou_surface_dirty {
+	struct vmw_kms_dirty base;
+	s32 left, right, top, bottom;
+	s32 dst_x, dst_y;
+	u32 sid;
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_kms_sou_readback_blit {
+	uint32 header;
+	SVGAFifoCmdBlitScreenToGMRFB body;
+};
+
+struct vmw_kms_sou_dmabuf_blit {
+	uint32 header;
+	SVGAFifoCmdBlitGMRFBToScreen body;
+};
+
+struct vmw_kms_sou_dirty_cmd {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdBlitSurfaceToScreen body;
+};
+
+
+/*
+ * Other structs.
+ */
+
 struct vmw_screen_object_display {
 	unsigned num_implicit;
 
 	struct vmw_framebuffer *implicit_fb;
+	SVGAFifoCmdDefineGMRFB cur;
+	struct vmw_dma_buffer *pinned_gmrfb;
 };
 
 /**
@@ -57,7 +102,7 @@
 
 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
 {
-	vmw_display_unit_cleanup(&sou->base);
+	vmw_du_cleanup(&sou->base);
 	kfree(sou);
 }
 
@@ -72,7 +117,7 @@
 }
 
 static void vmw_sou_del_active(struct vmw_private *vmw_priv,
-			      struct vmw_screen_object_unit *sou)
+			       struct vmw_screen_object_unit *sou)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
@@ -84,8 +129,8 @@
 }
 
 static void vmw_sou_add_active(struct vmw_private *vmw_priv,
-			      struct vmw_screen_object_unit *sou,
-			      struct vmw_framebuffer *vfb)
+			       struct vmw_screen_object_unit *sou,
+			       struct vmw_framebuffer *vfb)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
 
@@ -202,14 +247,7 @@
 static void vmw_sou_backing_free(struct vmw_private *dev_priv,
 				 struct vmw_screen_object_unit *sou)
 {
-	struct ttm_buffer_object *bo;
-
-	if (unlikely(sou->buffer == NULL))
-		return;
-
-	bo = &sou->buffer->base;
-	ttm_bo_unref(&bo);
-	sou->buffer = NULL;
+	vmw_dmabuf_unreference(&sou->buffer);
 	sou->buffer_size = 0;
 }
 
@@ -274,13 +312,13 @@
 	dev_priv = vmw_priv(crtc->dev);
 
 	if (set->num_connectors > 1) {
-		DRM_ERROR("to many connectors\n");
+		DRM_ERROR("Too many connectors\n");
 		return -EINVAL;
 	}
 
 	if (set->num_connectors == 1 &&
 	    set->connectors[0] != &sou->base.connector) {
-		DRM_ERROR("connector doesn't match %p %p\n",
+		DRM_ERROR("Connector doesn't match %p %p\n",
 			set->connectors[0], &sou->base.connector);
 		return -EINVAL;
 	}
@@ -331,7 +369,7 @@
 		return -EINVAL;
 	}
 
-	vmw_fb_off(dev_priv);
+	vmw_svga_enable(dev_priv);
 
 	if (mode->hdisplay != crtc->mode.hdisplay ||
 	    mode->vdisplay != crtc->mode.vdisplay) {
@@ -390,6 +428,108 @@
 	return 0;
 }
 
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
+					    struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	if (!sou->base.is_implicit)
+		return true;
+
+	if (dev_priv->sou_priv->num_implicit != 1)
+		return false;
+
+	return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
+				       struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	BUG_ON(!sou->base.is_implicit);
+
+	dev_priv->sou_priv->implicit_fb =
+		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+}
+
+static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_pending_vblank_event *event,
+				  uint32_t flags)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct drm_framebuffer *old_fb = crtc->primary->fb;
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+	struct vmw_fence_obj *fence = NULL;
+	struct drm_clip_rect clips;
+	int ret;
+
+	/* require ScreenObject support for page flipping */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+		return -EINVAL;
+
+	crtc->primary->fb = fb;
+
+	/* do a full screen dirty update */
+	clips.x1 = clips.y1 = 0;
+	clips.x2 = fb->width;
+	clips.y2 = fb->height;
+
+	if (vfb->dmabuf)
+		ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
+						  &clips, 1, 1,
+						  true, &fence);
+	else
+		ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
+						   &clips, NULL, NULL,
+						   0, 0, 1, 1, &fence);
+
+
+	if (ret != 0)
+		goto out_no_fence;
+	if (!fence) {
+		ret = -EINVAL;
+		goto out_no_fence;
+	}
+
+	if (event) {
+		struct drm_file *file_priv = event->base.file_priv;
+
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   true);
+	}
+
+	/*
+	 * No need to hold on to this now. The only cleanup
+	 * we need to do if we fail is unref the fence.
+	 */
+	vmw_fence_obj_unreference(&fence);
+
+	if (vmw_crtc_to_du(crtc)->is_implicit)
+		vmw_sou_update_implicit_fb(dev_priv, crtc);
+
+	return ret;
+
+out_no_fence:
+	crtc->primary->fb = old_fb;
+	return ret;
+}
+
 static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
 	.save = vmw_du_crtc_save,
 	.restore = vmw_du_crtc_restore,
@@ -398,7 +538,7 @@
 	.gamma_set = vmw_du_crtc_gamma_set,
 	.destroy = vmw_sou_crtc_destroy,
 	.set_config = vmw_sou_crtc_set_config,
-	.page_flip = vmw_du_page_flip,
+	.page_flip = vmw_sou_crtc_page_flip,
 };
 
 /*
@@ -423,7 +563,7 @@
 	vmw_sou_destroy(vmw_connector_to_sou(connector));
 }
 
-static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+static struct drm_connector_funcs vmw_sou_connector_funcs = {
 	.dpms = vmw_du_connector_dpms,
 	.save = vmw_du_connector_save,
 	.restore = vmw_du_connector_restore,
@@ -458,7 +598,7 @@
 	sou->base.pref_mode = NULL;
 	sou->base.is_implicit = true;
 
-	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+	drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
 			   DRM_MODE_CONNECTOR_VIRTUAL);
 	connector->status = vmw_du_connector_detect(connector, true);
 
@@ -481,7 +621,7 @@
 	return 0;
 }
 
-int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 	int i, ret;
@@ -516,7 +656,9 @@
 	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 		vmw_sou_init(dev_priv, i);
 
-	DRM_INFO("Screen objects system initialized\n");
+	dev_priv->active_display_unit = vmw_du_screen_object;
+
+	DRM_INFO("Screen Objects Display Unit initialized\n");
 
 	return 0;
 
@@ -529,7 +671,7 @@
 	return ret;
 }
 
-int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
 
@@ -543,35 +685,369 @@
 	return 0;
 }
 
-/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
- */
-bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
-				     struct drm_crtc *crtc)
+static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+				  struct vmw_framebuffer *framebuffer)
 {
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+	struct vmw_dma_buffer *buf =
+		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+			     base)->buffer;
+	int depth = framebuffer->base.depth;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
 
-	if (!sou->base.is_implicit)
-		return true;
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
 
-	if (dev_priv->sou_priv->num_implicit != 1)
-		return false;
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (!cmd) {
+		DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+		return -ENOMEM;
+	}
 
-	return true;
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+	cmd->body.format.colorDepth = depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+	/* Buffer is reserved in vram or GMR */
+	vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
 }
 
 /**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
+ * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
+ * blit surface to screen command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in the command, and translates the cliprects
+ * to match the destination bounding box encoded.
  */
-void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
-					      struct drm_crtc *crtc)
+static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
 {
-	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
+	s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
+	size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+	int i;
 
-	BUG_ON(!sou->base.is_implicit);
+	cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+	cmd->header.size = sizeof(cmd->body) + region_size;
 
-	dev_priv->sou_priv->implicit_fb =
-		vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
+	/*
+	 * Use the destination bounding box to specify destination - and
+	 * source bounding regions.
+	 */
+	cmd->body.destRect.left = sdirty->left;
+	cmd->body.destRect.right = sdirty->right;
+	cmd->body.destRect.top = sdirty->top;
+	cmd->body.destRect.bottom = sdirty->bottom;
+
+	cmd->body.srcRect.left = sdirty->left + trans_x;
+	cmd->body.srcRect.right = sdirty->right + trans_x;
+	cmd->body.srcRect.top = sdirty->top + trans_y;
+	cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
+
+	cmd->body.srcImage.sid = sdirty->sid;
+	cmd->body.destScreenId = dirty->unit->unit;
+
+	/* Blits are relative to the destination rect. Translate. */
+	for (i = 0; i < dirty->num_hits; ++i, ++blit) {
+		blit->left -= sdirty->left;
+		blit->right -= sdirty->left;
+		blit->top -= sdirty->top;
+		blit->bottom -= sdirty->top;
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a SVGASignedRect cliprect and updates the bounding box of the
+ * BLIT_SURFACE_TO_SCREEN command.
+ */
+static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_surface_dirty *sdirty =
+		container_of(dirty, typeof(*sdirty), base);
+	struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
+	SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
+
+	/* Destination rect. */
+	blit += dirty->num_hits;
+	blit->left = dirty->unit_x1;
+	blit->top = dirty->unit_y1;
+	blit->right = dirty->unit_x2;
+	blit->bottom = dirty->unit_y2;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+				 struct vmw_framebuffer *framebuffer,
+				 struct drm_clip_rect *clips,
+				 struct drm_vmw_rect *vclips,
+				 struct vmw_resource *srf,
+				 s32 dest_x,
+				 s32 dest_y,
+				 unsigned num_clips, int inc,
+				 struct vmw_fence_obj **out_fence)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_kms_sou_surface_dirty sdirty;
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_kms_helper_resource_prepare(srf, true);
+	if (ret)
+		return ret;
+
+	sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
+	sdirty.base.clip = vmw_sou_surface_clip;
+	sdirty.base.dev_priv = dev_priv;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
+	  sizeof(SVGASignedRect) * num_clips;
+
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+	sdirty.dst_x = dest_x;
+	sdirty.dst_y = dest_y;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+	vmw_kms_helper_resource_finish(srf, out_fence);
+
+	return ret;
+}
+
+/**
+ * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_dmabuf_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
+ */
+static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+	blit->body.destScreenId = dirty->unit->unit;
+	blit->body.srcOrigin.x = dirty->fb_x;
+	blit->body.srcOrigin.y = dirty->fb_y;
+	blit->body.destRect.left = dirty->unit_x1;
+	blit->body.destRect.top = dirty->unit_y1;
+	blit->body.destRect.right = dirty->unit_x2;
+	blit->body.destRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects.
+ * @num_clips: Number of clip rects in @clips.
+ * @increment: Increment to use when looping over @clips.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int increment,
+				bool interruptible,
+				struct vmw_fence_obj **out_fence)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+			     base)->buffer;
+	struct vmw_kms_dirty dirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+					    false);
+	if (ret)
+		return ret;
+
+	ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
+	dirty.clip = vmw_sou_dmabuf_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+				   0, 0, num_clips, increment, &dirty);
+	vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
+
+	return ret;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
+ *
+ * @dirty: The closure structure.
+ *
+ * Commits a previously built command buffer of readback clips.
+ */
+static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	vmw_fifo_commit(dirty->dev_priv,
+			sizeof(struct vmw_kms_sou_readback_blit) *
+			dirty->num_hits);
+}
+
+/**
+ * vmw_sou_readback_clip - Callback to encode a readback cliprect.
+ *
+ * @dirty: The closure structure
+ *
+ * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
+ */
+static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
+
+	blit += dirty->num_hits;
+	blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+	blit->body.srcScreenId = dirty->unit->unit;
+	blit->body.destOrigin.x = dirty->fb_x;
+	blit->body.destOrigin.y = dirty->fb_y;
+	blit->body.srcRect.left = dirty->unit_x1;
+	blit->body.srcRect.top = dirty->unit_y1;
+	blit->body.srcRect.right = dirty->unit_x2;
+	blit->body.srcRect.bottom = dirty->unit_y2;
+	dirty->num_hits++;
+}
+
+/**
+ * vmw_kms_sou_readback - Perform a readback from the screen object system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+			 struct drm_file *file_priv,
+			 struct vmw_framebuffer *vfb,
+			 struct drm_vmw_fence_rep __user *user_fence_rep,
+			 struct drm_vmw_rect *vclips,
+			 uint32_t num_clips)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+	struct vmw_kms_dirty dirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+	if (ret)
+		return ret;
+
+	ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+	if (unlikely(ret != 0))
+		goto out_revert;
+
+	dirty.fifo_commit = vmw_sou_readback_fifo_commit;
+	dirty.clip = vmw_sou_readback_clip;
+	dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
+		num_clips;
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
+				   0, 0, num_clips, 1, &dirty);
+	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+				     user_fence_rep);
+
+	return ret;
+
+out_revert:
+	vmw_kms_helper_buffer_revert(buf);
+
+	return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6a4584a..bba1ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,12 +27,15 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_binding.h"
 #include "ttm/ttm_placement.h"
 
 struct vmw_shader {
 	struct vmw_resource res;
 	SVGA3dShaderType type;
 	uint32_t size;
+	uint8_t num_input_sig;
+	uint8_t num_output_sig;
 };
 
 struct vmw_user_shader {
@@ -40,8 +43,18 @@
 	struct vmw_shader shader;
 };
 
+struct vmw_dx_shader {
+	struct vmw_resource res;
+	struct vmw_resource *ctx;
+	struct vmw_resource *cotable;
+	u32 id;
+	bool committed;
+	struct list_head cotable_head;
+};
+
 static uint64_t vmw_user_shader_size;
 static uint64_t vmw_shader_size;
+static size_t vmw_shader_dx_size;
 
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -55,6 +68,18 @@
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
+static int vmw_dx_shader_create(struct vmw_resource *res);
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state);
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
+static uint64_t vmw_user_shader_size;
+
 static const struct vmw_user_resource_conv user_shader_conv = {
 	.object_type = VMW_RES_SHADER,
 	.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -77,6 +102,24 @@
 	.unbind = vmw_gb_shader_unbind
 };
 
+static const struct vmw_res_func vmw_dx_shader_func = {
+	.res_type = vmw_res_shader,
+	.needs_backup = true,
+	.may_evict = false,
+	.type_name = "dx shaders",
+	.backup_placement = &vmw_mob_placement,
+	.create = vmw_dx_shader_create,
+	/*
+	 * The destroy callback is only called with a committed resource on
+	 * context destroy, in which case we destroy the cotable anyway,
+	 * so there's no need to destroy DX shaders separately.
+	 */
+	.destroy = NULL,
+	.bind = vmw_dx_shader_bind,
+	.unbind = vmw_dx_shader_unbind,
+	.commit_notify = vmw_dx_shader_commit_notify,
+};
+
 /**
  * Shader management:
  */
@@ -87,25 +130,42 @@
 	return container_of(res, struct vmw_shader, res);
 }
 
+/**
+ * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
+ * struct vmw_dx_shader
+ *
+ * @res: Pointer to the struct vmw_resource.
+ */
+static inline struct vmw_dx_shader *
+vmw_res_to_dx_shader(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_dx_shader, res);
+}
+
 static void vmw_hw_shader_destroy(struct vmw_resource *res)
 {
-	(void) vmw_gb_shader_destroy(res);
+	if (likely(res->func->destroy))
+		(void) res->func->destroy(res);
+	else
+		res->id = -1;
 }
 
+
 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 			      struct vmw_resource *res,
 			      uint32_t size,
 			      uint64_t offset,
 			      SVGA3dShaderType type,
+			      uint8_t num_input_sig,
+			      uint8_t num_output_sig,
 			      struct vmw_dma_buffer *byte_code,
 			      void (*res_free) (struct vmw_resource *res))
 {
 	struct vmw_shader *shader = vmw_res_to_shader(res);
 	int ret;
 
-	ret = vmw_resource_init(dev_priv, res, true,
-				res_free, &vmw_gb_shader_func);
-
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_gb_shader_func);
 
 	if (unlikely(ret != 0)) {
 		if (res_free)
@@ -122,11 +182,17 @@
 	}
 	shader->size = size;
 	shader->type = type;
+	shader->num_input_sig = num_input_sig;
+	shader->num_output_sig = num_output_sig;
 
 	vmw_resource_activate(res, vmw_hw_shader_destroy);
 	return 0;
 }
 
+/*
+ * GB shader code:
+ */
+
 static int vmw_gb_shader_create(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
@@ -165,7 +231,7 @@
 	cmd->body.type = shader->type;
 	cmd->body.sizeInBytes = shader->size;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 
 	return 0;
 
@@ -259,7 +325,7 @@
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_scrub(&res->binding_head);
+	vmw_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -275,11 +341,326 @@
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
 
+/*
+ * DX shader code:
+ */
+
+/**
+ * vmw_dx_shader_commit_notify - Notify that a shader operation has been
+ * committed to hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the shader resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
+					enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (state == VMW_CMDBUF_RES_ADD) {
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_cotable_add_resource(shader->cotable,
+					 &shader->cotable_head);
+		shader->committed = true;
+		res->id = shader->id;
+		mutex_unlock(&dev_priv->binding_mutex);
+	} else {
+		mutex_lock(&dev_priv->binding_mutex);
+		list_del_init(&shader->cotable_head);
+		shader->committed = false;
+		res->id = -1;
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+}
+
+/**
+ * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function reverts a scrub operation.
+ */
+static int vmw_dx_shader_unscrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (!list_empty(&shader->cotable_head) || !shader->committed)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
+				  shader->ctx->id);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "scrubbing.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = shader->id;
+	cmd->body.mobid = res->backup->base.mem.start;
+	cmd->body.offsetInBytes = res->backup_offset;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_create - The DX shader create callback
+ *
+ * @res: The DX shader resource
+ *
+ * The create callback is called as part of resource validation and
+ * makes sure that we unscrub the shader if it's previously been scrubbed.
+ */
+static int vmw_dx_shader_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	int ret = 0;
+
+	WARN_ON_ONCE(!shader->committed);
+
+	if (!list_empty(&res->mob_head)) {
+		mutex_lock(&dev_priv->binding_mutex);
+		ret = vmw_dx_shader_unscrub(res);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
+
+	res->id = shader->id;
+	return ret;
+}
+
+/**
+ * vmw_dx_shader_bind - The DX shader bind callback
+ *
+ * @res: The DX shader resource
+ * @val_buf: Pointer to the validate buffer.
+ *
+ */
+static int vmw_dx_shader_bind(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct ttm_buffer_object *bo = val_buf->bo;
+
+	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+	mutex_lock(&dev_priv->binding_mutex);
+	vmw_dx_shader_unscrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
+ *
+ * @res: The shader resource
+ *
+ * This function unbinds a MOB from the DX shader without requiring the
+ * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
+ * However, once the driver eventually decides to unbind the MOB, it doesn't
+ * need to access the context.
+ */
+static int vmw_dx_shader_scrub(struct vmw_resource *res)
+{
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDXBindShader body;
+	} *cmd;
+
+	if (list_empty(&shader->cotable_head))
+		return 0;
+
+	WARN_ON_ONCE(!shader->committed);
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for shader "
+			  "scrubbing.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = shader->ctx->id;
+	cmd->body.shid = res->id;
+	cmd->body.mobid = SVGA3D_INVALID_ID;
+	cmd->body.offsetInBytes = 0;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&shader->cotable_head);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_unbind - The dx shader unbind callback.
+ *
+ * @res: The shader resource
+ * @readback: Whether this is a readback unbind. Currently unused.
+ * @val_buf: MOB buffer information.
+ */
+static int vmw_dx_shader_unbind(struct vmw_resource *res,
+				bool readback,
+				struct ttm_validate_buffer *val_buf)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
+
+	mutex_lock(&dev_priv->binding_mutex);
+	ret = vmw_dx_shader_scrub(res);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	if (ret)
+		return ret;
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
+ * DX shaders.
+ *
+ * @dev_priv: Pointer to device private structure.
+ * @list: The list of cotable resources.
+ * @readback: Whether the call was part of a readback unbind.
+ *
+ * Scrubs all shader MOBs so that any subsequent shader unbind or shader
+ * destroy operation won't need to swap in the context.
+ */
+void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
+				      struct list_head *list,
+				      bool readback)
+{
+	struct vmw_dx_shader *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, cotable_head) {
+		WARN_ON(vmw_dx_shader_scrub(&entry->res));
+		if (!readback)
+			entry->committed = false;
+	}
+}
+
+/**
+ * vmw_dx_shader_res_free - The DX shader free callback
+ *
+ * @res: The shader resource
+ *
+ * Frees the DX shader resource and updates memory accounting.
+ */
+static void vmw_dx_shader_res_free(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
+
+	vmw_resource_unreference(&shader->cotable);
+	kfree(shader);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+}
+
+/**
+ * vmw_dx_shader_add - Add a shader resource as a command buffer managed
+ * resource.
+ *
+ * @man: The command buffer resource manager.
+ * @ctx: Pointer to the context resource.
+ * @user_key: The id used for this shader.
+ * @shader_type: The shader type.
+ * @list: The list of staged command buffer managed resources.
+ */
+int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
+		      struct vmw_resource *ctx,
+		      u32 user_key,
+		      SVGA3dShaderType shader_type,
+		      struct list_head *list)
+{
+	struct vmw_dx_shader *shader;
+	struct vmw_resource *res;
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	int ret;
+
+	if (!vmw_shader_dx_size)
+		vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
+		return -EINVAL;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
+				   false, true);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		return ret;
+	}
+
+	shader = kmalloc(sizeof(*shader), GFP_KERNEL);
+	if (!shader) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
+		return -ENOMEM;
+	}
+
+	res = &shader->res;
+	shader->ctx = ctx;
+	shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER);
+	shader->id = user_key;
+	shader->committed = false;
+	INIT_LIST_HEAD(&shader->cotable_head);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_dx_shader_res_free, &vmw_dx_shader_func);
+	if (ret)
+		goto out_resource_init;
+
+	/*
+	 * The user_key name-space is not per shader type for DX shaders,
+	 * so when hashing, use a single zero shader type.
+	 */
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, 0),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = shader->id;
+	vmw_resource_activate(res, vmw_hw_shader_destroy);
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+
+
 /**
  * User-space shader management:
  */
@@ -341,6 +722,8 @@
 				 size_t shader_size,
 				 size_t offset,
 				 SVGA3dShaderType shader_type,
+				 uint8_t num_input_sig,
+				 uint8_t num_output_sig,
 				 struct ttm_object_file *tfile,
 				 u32 *handle)
 {
@@ -383,7 +766,8 @@
 	 */
 
 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-				 offset, shader_type, buffer,
+				 offset, shader_type, num_input_sig,
+				 num_output_sig, buffer,
 				 vmw_user_shader_free);
 	if (unlikely(ret != 0))
 		goto out;
@@ -407,11 +791,11 @@
 }
 
 
-struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
-				      struct vmw_dma_buffer *buffer,
-				      size_t shader_size,
-				      size_t offset,
-				      SVGA3dShaderType shader_type)
+static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+					     struct vmw_dma_buffer *buffer,
+					     size_t shader_size,
+					     size_t offset,
+					     SVGA3dShaderType shader_type)
 {
 	struct vmw_shader *shader;
 	struct vmw_resource *res;
@@ -449,7 +833,7 @@
 	 * From here on, the destructor takes over resource freeing.
 	 */
 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
-				 offset, shader_type, buffer,
+				 offset, shader_type, 0, 0, buffer,
 				 vmw_shader_free);
 
 out_err:
@@ -457,19 +841,20 @@
 }
 
 
-int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
-			     struct drm_file *file_priv)
+static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
+			     enum drm_vmw_shader_type shader_type_drm,
+			     u32 buffer_handle, size_t size, size_t offset,
+			     uint8_t num_input_sig, uint8_t num_output_sig,
+			     uint32_t *shader_handle)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_shader_create_arg *arg =
-		(struct drm_vmw_shader_create_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	struct vmw_dma_buffer *buffer = NULL;
 	SVGA3dShaderType shader_type;
 	int ret;
 
-	if (arg->buffer_handle != SVGA3D_INVALID_ID) {
-		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+	if (buffer_handle != SVGA3D_INVALID_ID) {
+		ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
 					     &buffer);
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Could not find buffer for shader "
@@ -478,23 +863,20 @@
 		}
 
 		if ((u64)buffer->base.num_pages * PAGE_SIZE <
-		    (u64)arg->size + (u64)arg->offset) {
+		    (u64)size + (u64)offset) {
 			DRM_ERROR("Illegal buffer- or shader size.\n");
 			ret = -EINVAL;
 			goto out_bad_arg;
 		}
 	}
 
-	switch (arg->shader_type) {
+	switch (shader_type_drm) {
 	case drm_vmw_shader_type_vs:
 		shader_type = SVGA3D_SHADERTYPE_VS;
 		break;
 	case drm_vmw_shader_type_ps:
 		shader_type = SVGA3D_SHADERTYPE_PS;
 		break;
-	case drm_vmw_shader_type_gs:
-		shader_type = SVGA3D_SHADERTYPE_GS;
-		break;
 	default:
 		DRM_ERROR("Illegal shader type.\n");
 		ret = -EINVAL;
@@ -505,8 +887,9 @@
 	if (unlikely(ret != 0))
 		goto out_bad_arg;
 
-	ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
-				    shader_type, tfile, &arg->shader_handle);
+	ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
+				    shader_type, num_input_sig,
+				    num_output_sig, tfile, shader_handle);
 
 	ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
@@ -515,7 +898,7 @@
 }
 
 /**
- * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * vmw_shader_id_ok - Check whether a compat shader user key and
  * shader type are within valid bounds.
  *
  * @user_key: User space id of the shader.
@@ -523,13 +906,13 @@
  *
  * Returns true if valid false if not.
  */
-static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
+static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
 {
 	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
 }
 
 /**
- * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
+ * vmw_shader_key - Compute a hash key suitable for a compat shader.
  *
  * @user_key: User space id of the shader.
  * @shader_type: Shader type.
@@ -537,13 +920,13 @@
  * Returns a hash key suitable for a command buffer managed resource
  * manager hash table.
  */
-static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
+static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
 {
 	return user_key | (shader_type << 20);
 }
 
 /**
- * vmw_compat_shader_remove - Stage a compat shader for removal.
+ * vmw_shader_remove - Stage a compat shader for removal.
  *
  * @man: Pointer to the compat shader manager identifying the shader namespace.
  * @user_key: The key that is used to identify the shader. The key is
@@ -551,17 +934,18 @@
  * @shader_type: Shader type.
  * @list: Caller's list of staged command buffer resource actions.
  */
-int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
-			     u32 user_key, SVGA3dShaderType shader_type,
-			     struct list_head *list)
+int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
+		      u32 user_key, SVGA3dShaderType shader_type,
+		      struct list_head *list)
 {
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	struct vmw_resource *dummy;
+
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
-	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
-				     vmw_compat_shader_key(user_key,
-							   shader_type),
-				     list);
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type),
+				     list, &dummy);
 }
 
 /**
@@ -591,7 +975,7 @@
 	int ret;
 	struct vmw_resource *res;
 
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return -EINVAL;
 
 	/* Allocate and pin a DMA buffer */
@@ -628,8 +1012,8 @@
 	if (unlikely(ret != 0))
 		goto no_reserve;
 
-	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
-				 vmw_compat_shader_key(user_key, shader_type),
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
+				 vmw_shader_key(user_key, shader_type),
 				 res, list);
 	vmw_resource_unreference(&res);
 no_reserve:
@@ -639,7 +1023,7 @@
 }
 
 /**
- * vmw_compat_shader_lookup - Look up a compat shader
+ * vmw_shader_lookup - Look up a compat shader
  *
  * @man: Pointer to the command buffer managed resource manager identifying
  * the shader namespace.
@@ -650,14 +1034,26 @@
  * found. An error pointer otherwise.
  */
 struct vmw_resource *
-vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
-			 u32 user_key,
-			 SVGA3dShaderType shader_type)
+vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+		  u32 user_key,
+		  SVGA3dShaderType shader_type)
 {
-	if (!vmw_compat_shader_id_ok(user_key, shader_type))
+	if (!vmw_shader_id_ok(user_key, shader_type))
 		return ERR_PTR(-EINVAL);
 
-	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
-				     vmw_compat_shader_key(user_key,
-							   shader_type));
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
+				     vmw_shader_key(user_key, shader_type));
+}
+
+int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_vmw_shader_create_arg *arg =
+		(struct drm_vmw_shader_create_arg *)data;
+
+	return vmw_shader_define(dev, file_priv, arg->shader_type,
+				 arg->buffer_handle,
+				 arg->size, arg->offset,
+				 0, 0,
+				 &arg->shader_handle);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
new file mode 100644
index 0000000..5a73eeb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -0,0 +1,555 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
+
+/*
+ * The currently only reason we need to keep track of views is that if we
+ * destroy a hardware surface, all views pointing to it must also be destroyed,
+ * otherwise the device will error.
+ * So in particuar if a surface is evicted, we must destroy all views pointing
+ * to it, and all context bindings of that view. Similarly we must restore
+ * the view bindings, views and surfaces pointed to by the views when a
+ * context is referenced in the command stream.
+ */
+
+/**
+ * struct vmw_view - view metadata
+ *
+ * @res: The struct vmw_resource we derive from
+ * @ctx: Non-refcounted pointer to the context this view belongs to.
+ * @srf: Refcounted pointer to the surface pointed to by this view.
+ * @cotable: Refcounted pointer to the cotable holding this view.
+ * @srf_head: List head for the surface-to-view list.
+ * @cotable_head: List head for the cotable-to_view list.
+ * @view_type: View type.
+ * @view_id: User-space per context view id. Currently used also as per
+ * context device view id.
+ * @cmd_size: Size of the SVGA3D define view command that we've copied from the
+ * command stream.
+ * @committed: Whether the view is actually created or pending creation at the
+ * device level.
+ * @cmd: The SVGA3D define view command copied from the command stream.
+ */
+struct vmw_view {
+	struct rcu_head rcu;
+	struct vmw_resource res;
+	struct vmw_resource *ctx;      /* Immutable */
+	struct vmw_resource *srf;      /* Immutable */
+	struct vmw_resource *cotable;  /* Immutable */
+	struct list_head srf_head;     /* Protected by binding_mutex */
+	struct list_head cotable_head; /* Protected by binding_mutex */
+	unsigned view_type;            /* Immutable */
+	unsigned view_id;              /* Immutable */
+	u32 cmd_size;                  /* Immutable */
+	bool committed;                /* Protected by binding_mutex */
+	u32 cmd[1];                    /* Immutable */
+};
+
+static int vmw_view_create(struct vmw_resource *res);
+static int vmw_view_destroy(struct vmw_resource *res);
+static void vmw_hw_view_destroy(struct vmw_resource *res);
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state);
+
+static const struct vmw_res_func vmw_view_func = {
+	.res_type = vmw_res_view,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "DX view",
+	.backup_placement = NULL,
+	.create = vmw_view_create,
+	.commit_notify = vmw_view_commit_notify,
+};
+
+/**
+ * struct vmw_view - view define command body stub
+ *
+ * @view_id: The device id of the view being defined
+ * @sid: The surface id of the view being defined
+ *
+ * This generic struct is used by the code to change @view_id and @sid of a
+ * saved view define command.
+ */
+struct vmw_view_define {
+	uint32 view_id;
+	uint32 sid;
+};
+
+/**
+ * vmw_view - Convert a struct vmw_resource to a struct vmw_view
+ *
+ * @res: Pointer to the resource to convert.
+ *
+ * Returns a pointer to a struct vmw_view.
+ */
+static struct vmw_view *vmw_view(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_view, res);
+}
+
+/**
+ * vmw_view_commit_notify - Notify that a view operation has been committed to
+ * hardware from a user-supplied command stream.
+ *
+ * @res: Pointer to the view resource.
+ * @state: Indicating whether a creation or removal has been committed.
+ *
+ */
+static void vmw_view_commit_notify(struct vmw_resource *res,
+				   enum vmw_cmdbuf_res_state state)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (state == VMW_CMDBUF_RES_ADD) {
+		struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+
+		list_add_tail(&view->srf_head, &srf->view_list);
+		vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+		view->committed = true;
+		res->id = view->view_id;
+
+	} else {
+		list_del_init(&view->cotable_head);
+		list_del_init(&view->srf_head);
+		view->committed = false;
+		res->id = -1;
+	}
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_create - Create a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Create a hardware view. Typically used if that view has previously been
+ * destroyed by an eviction operation.
+ */
+static int vmw_view_create(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	struct vmw_surface *srf = vmw_res_to_srf(view->srf);
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		struct vmw_view_define body;
+	} *cmd;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	if (!view->committed) {
+		mutex_unlock(&dev_priv->binding_mutex);
+		return 0;
+	}
+
+	cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
+				  view->ctx->id);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for view creation.\n");
+		mutex_unlock(&dev_priv->binding_mutex);
+		return -ENOMEM;
+	}
+	memcpy(cmd, &view->cmd, view->cmd_size);
+	WARN_ON(cmd->body.view_id != view->view_id);
+	/* Sid may have changed due to surface eviction. */
+	WARN_ON(view->srf->id == SVGA3D_INVALID_ID);
+	cmd->body.sid = view->srf->id;
+	vmw_fifo_commit(res->dev_priv, view->cmd_size);
+	res->id = view->view_id;
+	list_add_tail(&view->srf_head, &srf->view_list);
+	vmw_cotable_add_resource(view->cotable, &view->cotable_head);
+	mutex_unlock(&dev_priv->binding_mutex);
+
+	return 0;
+}
+
+/**
+ * vmw_view_destroy - Destroy a hardware view.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view. Typically used on unexpected termination of the
+ * owning process or if the surface the view is pointing to is destroyed.
+ */
+static int vmw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_view *view = vmw_view(res);
+	struct {
+		SVGA3dCmdHeader header;
+		union vmw_view_destroy body;
+	} *cmd;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+	vmw_binding_res_list_scrub(&res->binding_head);
+
+	if (!view->committed || res->id == -1)
+		return 0;
+
+	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
+	if (!cmd) {
+		DRM_ERROR("Failed reserving FIFO space for view "
+			  "destruction.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = vmw_view_destroy_cmds[view->view_type];
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.view_id = view->view_id;
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	res->id = -1;
+	list_del_init(&view->cotable_head);
+	list_del_init(&view->srf_head);
+
+	return 0;
+}
+
+/**
+ * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup.
+ *
+ * @res: Pointer to the view resource.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static void vmw_hw_view_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	WARN_ON(vmw_view_destroy(res));
+	res->id = -1;
+	mutex_unlock(&dev_priv->binding_mutex);
+}
+
+/**
+ * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Destroy a hardware view if it's still present.
+ */
+static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type)
+{
+	return user_key | (view_type << 20);
+}
+
+/**
+ * vmw_view_id_ok - Basic view id and type range checks.
+ *
+ * @user_key: The user-space id used for the view.
+ * @view_type: The view type.
+ *
+ * Checks that the view id and type (typically provided by user-space) is
+ * valid.
+ */
+static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type)
+{
+	return (user_key < SVGA_COTABLE_MAX_IDS &&
+		view_type < vmw_view_max);
+}
+
+/**
+ * vmw_view_res_free - resource res_free callback for view resources
+ *
+ * @res: Pointer to a struct vmw_resource
+ *
+ * Frees memory and memory accounting held by a struct vmw_view.
+ */
+static void vmw_view_res_free(struct vmw_resource *res)
+{
+	struct vmw_view *view = vmw_view(res);
+	size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	vmw_resource_unreference(&view->cotable);
+	vmw_resource_unreference(&view->srf);
+	kfree_rcu(view, rcu);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_view_add - Create a view resource and stage it for addition
+ * as a command buffer managed resource.
+ *
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
+ * @ctx: Pointer to a struct vmw_resource identifying the active context.
+ * @srf: Pointer to a struct vmw_resource identifying the surface the view
+ * points to.
+ * @view_type: The view type deduced from the view create command.
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the view type and to the context.
+ * @cmd: Pointer to the view create command in the command stream.
+ * @cmd_size: Size of the view create command in the command stream.
+ * @list: Caller's list of staged command buffer resource actions.
+ */
+int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+		 struct vmw_resource *ctx,
+		 struct vmw_resource *srf,
+		 enum vmw_view_type view_type,
+		 u32 user_key,
+		 const void *cmd,
+		 size_t cmd_size,
+		 struct list_head *list)
+{
+	static const size_t vmw_view_define_sizes[] = {
+		[vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView),
+		[vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView),
+		[vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView)
+	};
+
+	struct vmw_private *dev_priv = ctx->dev_priv;
+	struct vmw_resource *res;
+	struct vmw_view *view;
+	size_t size;
+	int ret;
+
+	if (cmd_size != vmw_view_define_sizes[view_type] +
+	    sizeof(SVGA3dCmdHeader)) {
+		DRM_ERROR("Illegal view create command size.\n");
+		return -EINVAL;
+	}
+
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		DRM_ERROR("Illegal view add view id.\n");
+		return -EINVAL;
+	}
+
+	size = offsetof(struct vmw_view, cmd) + cmd_size;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for view"
+				  " creation.\n");
+		return ret;
+	}
+
+	view = kmalloc(size, GFP_KERNEL);
+	if (!view) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+		return -ENOMEM;
+	}
+
+	res = &view->res;
+	view->ctx = ctx;
+	view->srf = vmw_resource_reference(srf);
+	view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]);
+	view->view_type = view_type;
+	view->view_id = user_key;
+	view->cmd_size = cmd_size;
+	view->committed = false;
+	INIT_LIST_HEAD(&view->srf_head);
+	INIT_LIST_HEAD(&view->cotable_head);
+	memcpy(&view->cmd, cmd, cmd_size);
+	ret = vmw_resource_init(dev_priv, res, true,
+				vmw_view_res_free, &vmw_view_func);
+	if (ret)
+		goto out_resource_init;
+
+	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view,
+				 vmw_view_key(user_key, view_type),
+				 res, list);
+	if (ret)
+		goto out_resource_init;
+
+	res->id = view->view_id;
+	vmw_resource_activate(res, vmw_hw_view_destroy);
+
+out_resource_init:
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+/**
+ * vmw_view_remove - Stage a view for removal.
+ *
+ * @man: Pointer to the view manager identifying the shader namespace.
+ * @user_key: The key that is used to identify the view. The key is
+ * unique to the view type.
+ * @view_type: View type
+ * @list: Caller's list of staged command buffer resource actions.
+ * @res_p: If the resource is in an already committed state, points to the
+ * struct vmw_resource on successful return. The pointer will be
+ * non ref-counted.
+ */
+int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+		    u32 user_key, enum vmw_view_type view_type,
+		    struct list_head *list,
+		    struct vmw_resource **res_p)
+{
+	if (!vmw_view_id_ok(user_key, view_type)) {
+		DRM_ERROR("Illegal view remove view id.\n");
+		return -EINVAL;
+	}
+
+	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type),
+				     list, res_p);
+}
+
+/**
+ * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views belonging to a cotable.
+ * @readback: Unused. Needed for function interface only.
+ *
+ * This function evicts all views belonging to a cotable.
+ * It must be called with the binding_mutex held, and the caller must hold
+ * a reference to the view resource. This is typically called before the
+ * cotable is paged out.
+ */
+void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list,
+				   bool readback)
+{
+	struct vmw_view *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, cotable_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_surface_list_destroy - Evict all views pointing to a surface
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @list: List of views pointing to a surface.
+ *
+ * This function evicts all views pointing to a surface. This is typically
+ * called before the surface is evicted.
+ */
+void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+				   struct list_head *list)
+{
+	struct vmw_view *entry, *next;
+
+	WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex));
+
+	list_for_each_entry_safe(entry, next, list, srf_head)
+		WARN_ON(vmw_view_destroy(&entry->res));
+}
+
+/**
+ * vmw_view_srf - Return a non-refcounted pointer to the surface a view is
+ * pointing to.
+ *
+ * @res: pointer to a view resource.
+ *
+ * Note that the view itself is holding a reference, so as long
+ * the view resource is alive, the surface resource will be.
+ */
+struct vmw_resource *vmw_view_srf(struct vmw_resource *res)
+{
+	return vmw_view(res)->srf;
+}
+
+/**
+ * vmw_view_lookup - Look up a view.
+ *
+ * @man: The context's cmdbuf ref manager.
+ * @view_type: The view type.
+ * @user_key: The view user id.
+ *
+ * returns a refcounted pointer to a view or an error pointer if not found.
+ */
+struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+				     enum vmw_view_type view_type,
+				     u32 user_key)
+{
+	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view,
+				     vmw_view_key(user_key, view_type));
+}
+
+const u32 vmw_view_destroy_cmds[] = {
+	[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
+	[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
+	[vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
+};
+
+const SVGACOTableType vmw_view_cotables[] = {
+	[vmw_view_sr] = SVGA_COTABLE_SRVIEW,
+	[vmw_view_rt] = SVGA_COTABLE_RTVIEW,
+	[vmw_view_ds] = SVGA_COTABLE_DSVIEW,
+};
+
+const SVGACOTableType vmw_so_cotables[] = {
+	[vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT,
+	[vmw_so_bs] = SVGA_COTABLE_BLENDSTATE,
+	[vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL,
+	[vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE,
+	[vmw_so_ss] = SVGA_COTABLE_SAMPLER,
+	[vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT
+};
+
+
+/* To remove unused function warning */
+static void vmw_so_build_asserts(void) __attribute__((used));
+
+
+/*
+ * This function is unused at run-time, and only used to dump various build
+ * asserts important for code optimization assumptions.
+ */
+static void vmw_so_build_asserts(void)
+{
+	/* Assert that our vmw_view_cmd_to_type() function is correct. */
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4);
+	BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW !=
+		     SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5);
+
+	/* Assert that our "one body fits all" assumption is valid */
+	BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32));
+
+	/* Assert that the view key space can hold all view ids. */
+	BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1));
+
+	/*
+	 * Assert that the offset of sid in all view define commands
+	 * is what we assume it to be.
+	 */
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineShaderResourceView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineRenderTargetView, sid));
+	BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) !=
+		     offsetof(SVGA3dCmdDXDefineDepthStencilView, sid));
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
new file mode 100644
index 0000000..2687383
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -0,0 +1,160 @@
+/**************************************************************************
+ * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef VMW_SO_H
+#define VMW_SO_H
+
+enum vmw_view_type {
+	vmw_view_sr,
+	vmw_view_rt,
+	vmw_view_ds,
+	vmw_view_max,
+};
+
+enum vmw_so_type {
+	vmw_so_el,
+	vmw_so_bs,
+	vmw_so_ds,
+	vmw_so_rs,
+	vmw_so_ss,
+	vmw_so_so,
+	vmw_so_max,
+};
+
+/**
+ * union vmw_view_destroy - view destruction command body
+ *
+ * @rtv: RenderTarget view destruction command body
+ * @srv: ShaderResource view destruction command body
+ * @dsv: DepthStencil view destruction command body
+ * @view_id: A single u32 view id.
+ *
+ * The assumption here is that all union members are really represented by a
+ * single u32 in the command stream. If that's not the case,
+ * the size of this union will not equal the size of an u32, and the
+ * assumption is invalid, and we detect that at compile time in the
+ * vmw_so_build_asserts() function.
+ */
+union vmw_view_destroy {
+	struct SVGA3dCmdDXDestroyRenderTargetView rtv;
+	struct SVGA3dCmdDXDestroyShaderResourceView srv;
+	struct SVGA3dCmdDXDestroyDepthStencilView dsv;
+	u32 view_id;
+};
+
+/* Map enum vmw_view_type to view destroy command ids*/
+extern const u32 vmw_view_destroy_cmds[];
+
+/* Map enum vmw_view_type to SVGACOTableType */
+extern const SVGACOTableType vmw_view_cotables[];
+
+/* Map enum vmw_so_type to SVGACOTableType */
+extern const SVGACOTableType vmw_so_cotables[];
+
+/*
+ * vmw_view_cmd_to_type - Return the view type for a create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given view create or destroy command id, return the corresponding
+ * enum vmw_view_type. If the command is unknown, return vmw_view_max.
+ * The validity of the simplified calculation is verified in the
+ * vmw_so_build_asserts() function.
+ */
+static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id)
+{
+	u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2;
+
+	if (tmp > (u32)vmw_view_max)
+		return vmw_view_max;
+
+	return (enum vmw_view_type) tmp;
+}
+
+/*
+ * vmw_so_cmd_to_type - Return the state object type for a
+ * create or destroy command
+ *
+ * @id: The SVGA3D command id.
+ *
+ * For a given state object create or destroy command id,
+ * return the corresponding enum vmw_so_type. If the command is uknown,
+ * return vmw_so_max. We should perhaps optimize this function using
+ * a similar strategy as vmw_view_cmd_to_type().
+ */
+static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id)
+{
+	switch (id) {
+	case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT:
+	case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT:
+		return vmw_so_el;
+	case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE:
+		return vmw_so_bs;
+	case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE:
+		return vmw_so_ds;
+	case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE:
+		return vmw_so_rs;
+	case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE:
+	case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE:
+		return vmw_so_ss;
+	case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT:
+	case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT:
+		return vmw_so_so;
+	default:
+		break;
+	}
+	return vmw_so_max;
+}
+
+/*
+ * View management - vmwgfx_so.c
+ */
+extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
+			struct vmw_resource *ctx,
+			struct vmw_resource *srf,
+			enum vmw_view_type view_type,
+			u32 user_key,
+			const void *cmd,
+			size_t cmd_size,
+			struct list_head *list);
+
+extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
+			   u32 user_key, enum vmw_view_type view_type,
+			   struct list_head *list,
+			   struct vmw_resource **res_p);
+
+extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *view_list);
+extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv,
+					  struct list_head *list,
+					  bool readback);
+extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
+extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
+					    enum vmw_view_type view_type,
+					    u32 user_key);
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
new file mode 100644
index 0000000..c22e2df
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -0,0 +1,1266 @@
+/******************************************************************************
+ *
+ * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ ******************************************************************************/
+
+#include "vmwgfx_kms.h"
+#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_plane_helper.h>
+
+#define vmw_crtc_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.crtc)
+#define vmw_encoder_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.encoder)
+#define vmw_connector_to_stdu(x) \
+	container_of(x, struct vmw_screen_target_display_unit, base.connector)
+
+
+
+enum stdu_content_type {
+	SAME_AS_DISPLAY = 0,
+	SEPARATE_SURFACE,
+	SEPARATE_DMA
+};
+
+/**
+ * struct vmw_stdu_dirty - closure structure for the update functions
+ *
+ * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
+ * @transfer: Transfer direction for DMA command.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @sid: Surface ID when copying between surface and screen targets.
+ */
+struct vmw_stdu_dirty {
+	struct vmw_kms_dirty base;
+	SVGA3dTransferType  transfer;
+	s32 left, right, top, bottom;
+	u32 pitch;
+	union {
+		struct vmw_dma_buffer *buf;
+		u32 sid;
+	};
+};
+
+/*
+ * SVGA commands that are used by this code. Please see the device headers
+ * for explanation.
+ */
+struct vmw_stdu_update {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdUpdateGBScreenTarget body;
+};
+
+struct vmw_stdu_dma {
+	SVGA3dCmdHeader     header;
+	SVGA3dCmdSurfaceDMA body;
+};
+
+struct vmw_stdu_surface_copy {
+	SVGA3dCmdHeader      header;
+	SVGA3dCmdSurfaceCopy body;
+};
+
+
+/**
+ * struct vmw_screen_target_display_unit
+ *
+ * @base: VMW specific DU structure
+ * @display_srf: surface to be displayed.  The dimension of this will always
+ *               match the display mode.  If the display mode matches
+ *               content_vfbs dimensions, then this is a pointer into the
+ *               corresponding field in content_vfbs.  If not, then this
+ *               is a separate buffer to which content_vfbs will blit to.
+ * @content_fb: holds the rendered content, can be a surface or DMA buffer
+ * @content_type:  content_fb type
+ * @defined:  true if the current display unit has been initialized
+ */
+struct vmw_screen_target_display_unit {
+	struct vmw_display_unit base;
+
+	struct vmw_surface     *display_srf;
+	struct drm_framebuffer *content_fb;
+
+	enum stdu_content_type content_fb_type;
+
+	bool defined;
+};
+
+
+
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit helper Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_pin_display - pins the resource associated with the display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * Since the display surface can either be a private surface allocated by us,
+ * or it can point to the content surface, we use this function to not pin the
+ * same resource twice.
+ */
+static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
+{
+	return vmw_resource_pin(&stdu->display_srf->res, false);
+}
+
+
+
+/**
+ * vmw_stdu_unpin_display - unpins the resource associated with display surface
+ *
+ * @stdu: contains the display surface
+ *
+ * If the display surface was privatedly allocated by
+ * vmw_surface_gb_priv_define() and not registered as a framebuffer, then it
+ * won't be automatically cleaned up when all the framebuffers are freed.  As
+ * such, we have to explicitly call vmw_resource_unreference() to get it freed.
+ */
+static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
+{
+	if (stdu->display_srf) {
+		struct vmw_resource *res = &stdu->display_srf->res;
+
+		vmw_resource_unpin(res);
+
+		if (stdu->content_fb_type != SAME_AS_DISPLAY) {
+			vmw_resource_unreference(&res);
+			stdu->content_fb_type = SAME_AS_DISPLAY;
+		}
+
+		stdu->display_srf = NULL;
+	}
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit CRTC Functions
+ *****************************************************************************/
+
+
+/**
+ * vmw_stdu_crtc_destroy - cleans up the STDU
+ *
+ * @crtc: used to get a reference to the containing STDU
+ */
+static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_stdu_destroy(vmw_crtc_to_stdu(crtc));
+}
+
+/**
+ * vmw_stdu_define_st - Defines a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to create a Screen Target for
+ *
+ * Creates a STDU that we can used later.  This function is called whenever the
+ * framebuffer size changes.
+ *
+ * RETURNs:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_define_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBScreenTarget body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space defining Screen Target\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.width  = stdu->display_srf->base_size.width;
+	cmd->body.height = stdu->display_srf->base_size.height;
+	cmd->body.flags  = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
+	cmd->body.dpi    = 0;
+	cmd->body.xRoot  = stdu->base.crtc.x;
+	cmd->body.yRoot  = stdu->base.crtc.y;
+
+	if (!stdu->base.is_implicit) {
+		cmd->body.xRoot  = stdu->base.gui_x;
+		cmd->body.yRoot  = stdu->base.gui_y;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	stdu->defined = true;
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_bind_st - Binds a surface to a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ * @res: Buffer to bind to the screen target.  Set to NULL to blank screen.
+ *
+ * Binding a surface to a Screen Target the same as flipping
+ */
+static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
+			    struct vmw_screen_target_display_unit *stdu,
+			    struct vmw_resource *res)
+{
+	SVGA3dSurfaceImageId image;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBindGBScreenTarget body;
+	} *cmd;
+
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined\n");
+		return -EINVAL;
+	}
+
+	/* Set up image using information in vfb */
+	memset(&image, 0, sizeof(image));
+	image.sid = res ? res->id : SVGA3D_INVALID_ID;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space binding a screen target\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+	cmd->body.image  = image;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+/**
+ * vmw_stdu_populate_update - populate an UPDATE_GB_SCREENTARGET command with a
+ * bounding box.
+ *
+ * @cmd: Pointer to command stream.
+ * @unit: Screen target unit.
+ * @left: Left side of bounding box.
+ * @right: Right side of bounding box.
+ * @top: Top side of bounding box.
+ * @bottom: Bottom side of bounding box.
+ */
+static void vmw_stdu_populate_update(void *cmd, int unit,
+				     s32 left, s32 right, s32 top, s32 bottom)
+{
+	struct vmw_stdu_update *update = cmd;
+
+	update->header.id   = SVGA_3D_CMD_UPDATE_GB_SCREENTARGET;
+	update->header.size = sizeof(update->body);
+
+	update->body.stid   = unit;
+	update->body.rect.x = left;
+	update->body.rect.y = top;
+	update->body.rect.w = right - left;
+	update->body.rect.h = bottom - top;
+}
+
+/**
+ * vmw_stdu_update_st - Full update of a Screen Target
+ *
+ * @dev_priv: VMW DRM device
+ * @stdu: display unit affected
+ *
+ * This function needs to be called whenever the content of a screen
+ * target has changed completely. Typically as a result of a backing
+ * surface change.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_update_st(struct vmw_private *dev_priv,
+			      struct vmw_screen_target_display_unit *stdu)
+{
+	struct vmw_stdu_update *cmd;
+	struct drm_crtc *crtc = &stdu->base.crtc;
+
+	if (!stdu->defined) {
+		DRM_ERROR("No screen target defined");
+		return -EINVAL;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+		return -ENOMEM;
+	}
+
+	vmw_stdu_populate_update(cmd, stdu->base.unit, 0, crtc->mode.hdisplay,
+				 0, crtc->mode.vdisplay);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
+
+
+
+/**
+ * vmw_stdu_destroy_st - Destroy a Screen Target
+ *
+ * @dev_priv:  VMW DRM device
+ * @stdu: display unit to destroy
+ */
+static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
+			       struct vmw_screen_target_display_unit *stdu)
+{
+	int    ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyGBScreenTarget body;
+	} *cmd;
+
+
+	/* Nothing to do if not successfully defined */
+	if (unlikely(!stdu->defined))
+		return 0;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id   = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
+	cmd->header.size = sizeof(cmd->body);
+
+	cmd->body.stid   = stdu->base.unit;
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+
+	stdu->defined = false;
+
+	return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set:  mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_target_display_unit *stdu;
+	struct vmw_framebuffer *vfb;
+	struct vmw_framebuffer_surface *new_vfbs;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer  *new_fb;
+	struct drm_crtc      *crtc;
+	struct drm_encoder   *encoder;
+	struct drm_connector *connector;
+	int    ret;
+
+
+	if (!set || !set->crtc)
+		return -EINVAL;
+
+	crtc     = set->crtc;
+	crtc->x  = set->x;
+	crtc->y  = set->y;
+	stdu     = vmw_crtc_to_stdu(crtc);
+	mode     = set->mode;
+	new_fb   = set->fb;
+	dev_priv = vmw_priv(crtc->dev);
+
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("Too many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &stdu->base.connector) {
+		DRM_ERROR("Connectors don't match %p %p\n",
+			set->connectors[0], &stdu->base.connector);
+		return -EINVAL;
+	}
+
+
+	/* Since they always map one to one these are safe */
+	connector = &stdu->base.connector;
+	encoder   = &stdu->base.encoder;
+
+
+	/*
+	 * After this point the CRTC will be considered off unless a new fb
+	 * is bound
+	 */
+	if (stdu->defined) {
+		/* Unbind current surface by binding an invalid one */
+		ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+
+		/* Update Screen Target, display will now be blank */
+		if (crtc->primary->fb) {
+			vmw_stdu_update_st(dev_priv, stdu);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+
+		crtc->primary->fb  = NULL;
+		crtc->enabled      = false;
+		encoder->crtc      = NULL;
+		connector->encoder = NULL;
+
+		vmw_stdu_unpin_display(stdu);
+		stdu->content_fb      = NULL;
+		stdu->content_fb_type = SAME_AS_DISPLAY;
+
+		ret = vmw_stdu_destroy_st(dev_priv, stdu);
+		/* The hardware is hung, give up */
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+
+	/* Any of these conditions means the caller wants CRTC off */
+	if (set->num_connectors == 0 || !mode || !new_fb)
+		return 0;
+
+
+	if (set->x + mode->hdisplay > new_fb->width ||
+	    set->y + mode->vdisplay > new_fb->height) {
+		DRM_ERROR("Set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	stdu->content_fb = new_fb;
+	vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+
+	if (vfb->dmabuf)
+		stdu->content_fb_type = SEPARATE_DMA;
+
+	/*
+	 * If the requested mode is different than the width and height
+	 * of the FB or if the content buffer is a DMA buf, then allocate
+	 * a display FB that matches the dimension of the mode
+	 */
+	if (mode->hdisplay != new_fb->width  ||
+	    mode->vdisplay != new_fb->height ||
+	    stdu->content_fb_type != SAME_AS_DISPLAY) {
+		struct vmw_surface content_srf;
+		struct drm_vmw_size display_base_size = {0};
+		struct vmw_surface *display_srf;
+
+
+		display_base_size.width  = mode->hdisplay;
+		display_base_size.height = mode->vdisplay;
+		display_base_size.depth  = 1;
+
+		/*
+		 * If content buffer is a DMA buf, then we have to construct
+		 * surface info
+		 */
+		if (stdu->content_fb_type == SEPARATE_DMA) {
+
+			switch (new_fb->bits_per_pixel) {
+			case 32:
+				content_srf.format = SVGA3D_X8R8G8B8;
+				break;
+
+			case 16:
+				content_srf.format = SVGA3D_R5G6B5;
+				break;
+
+			case 8:
+				content_srf.format = SVGA3D_P8;
+				break;
+
+			default:
+				DRM_ERROR("Invalid format\n");
+				ret = -EINVAL;
+				goto err_unref_content;
+			}
+
+			content_srf.flags             = 0;
+			content_srf.mip_levels[0]     = 1;
+			content_srf.multisample_count = 0;
+		} else {
+
+			stdu->content_fb_type = SEPARATE_SURFACE;
+
+			new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+			content_srf = *new_vfbs->surface;
+		}
+
+
+		ret = vmw_surface_gb_priv_define(crtc->dev,
+				0, /* because kernel visible only */
+				content_srf.flags,
+				content_srf.format,
+				true, /* a scanout buffer */
+				content_srf.mip_levels[0],
+				content_srf.multisample_count,
+				0,
+				display_base_size,
+				&display_srf);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Cannot allocate a display FB.\n");
+			goto err_unref_content;
+		}
+
+		stdu->display_srf = display_srf;
+	} else {
+		new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
+		stdu->display_srf = new_vfbs->surface;
+	}
+
+
+	ret = vmw_stdu_pin_display(stdu);
+	if (unlikely(ret != 0)) {
+		stdu->display_srf = NULL;
+		goto err_unref_content;
+	}
+
+	vmw_svga_enable(dev_priv);
+
+	/*
+	 * Steps to displaying a surface, assume surface is already
+	 * bound:
+	 *   1.  define a screen target
+	 *   2.  bind a fb to the screen target
+	 *   3.  update that screen target (this is done later by
+	 *       vmw_kms_stdu_do_surface_dirty_or_present)
+	 */
+	ret = vmw_stdu_define_st(dev_priv, stdu);
+	if (unlikely(ret != 0))
+		goto err_unpin_display_and_content;
+
+	ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+	if (unlikely(ret != 0))
+		goto err_unpin_destroy_st;
+
+
+	connector->encoder = encoder;
+	encoder->crtc      = crtc;
+
+	crtc->mode    = *mode;
+	crtc->primary->fb = new_fb;
+	crtc->enabled = true;
+
+	return ret;
+
+err_unpin_destroy_st:
+	vmw_stdu_destroy_st(dev_priv, stdu);
+err_unpin_display_and_content:
+	vmw_stdu_unpin_display(stdu);
+err_unref_content:
+	stdu->content_fb = NULL;
+	return ret;
+}
+
+
+
+/**
+ * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
+ *
+ * @crtc: CRTC to attach FB to
+ * @fb: FB to attach
+ * @event: Event to be posted. This event should've been alloced
+ *         using k[mz]alloc, and should've been completely initialized.
+ * @page_flip_flags: Input flags.
+ *
+ * If the STDU uses the same display and content buffers, i.e. a true flip,
+ * this function will replace the existing display buffer with the new content
+ * buffer.
+ *
+ * If the STDU uses different display and content buffers, i.e. a blit, then
+ * only the content buffer will be updated.
+ *
+ * RETURNS:
+ * 0 on success, error code on failure
+ */
+static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
+				   struct drm_framebuffer *new_fb,
+				   struct drm_pending_vblank_event *event,
+				   uint32_t flags)
+
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct vmw_screen_target_display_unit *stdu;
+	int ret;
+
+	if (crtc == NULL)
+		return -EINVAL;
+
+	dev_priv          = vmw_priv(crtc->dev);
+	stdu              = vmw_crtc_to_stdu(crtc);
+	crtc->primary->fb = new_fb;
+	stdu->content_fb  = new_fb;
+
+	if (stdu->display_srf) {
+		/*
+		 * If the display surface is the same as the content surface
+		 * then remove the reference
+		 */
+		if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+			if (stdu->defined) {
+				/* Unbind the current surface */
+				ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+				if (unlikely(ret != 0))
+					goto err_out;
+			}
+			vmw_stdu_unpin_display(stdu);
+			stdu->display_srf = NULL;
+		}
+	}
+
+
+	if (!new_fb) {
+		/* Blanks the display */
+		(void) vmw_stdu_update_st(dev_priv, stdu);
+
+		return 0;
+	}
+
+
+	if (stdu->content_fb_type == SAME_AS_DISPLAY) {
+		stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
+		ret = vmw_stdu_pin_display(stdu);
+		if (ret) {
+			stdu->display_srf = NULL;
+			goto err_out;
+		}
+
+		/* Bind display surface */
+		ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+		if (unlikely(ret != 0))
+			goto err_unpin_display_and_content;
+	}
+
+	/* Update display surface: after this point everything is bound */
+	ret = vmw_stdu_update_st(dev_priv, stdu);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (event) {
+		struct vmw_fence_obj *fence = NULL;
+		struct drm_file *file_priv = event->base.file_priv;
+
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+		if (!fence)
+			return -ENOMEM;
+
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   true);
+		vmw_fence_obj_unreference(&fence);
+	}
+
+	return ret;
+
+err_unpin_display_and_content:
+	vmw_stdu_unpin_display(stdu);
+err_out:
+	crtc->primary->fb = NULL;
+	stdu->content_fb = NULL;
+	return ret;
+}
+
+
+/**
+ * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface DMA command cliprect and updates the bounding box
+ * for the DMA.
+ */
+static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+	blit += dirty->num_hits;
+	blit->srcx = dirty->fb_x;
+	blit->srcy = dirty->fb_y;
+	blit->x = dirty->unit_x1;
+	blit->y = dirty->unit_y1;
+	blit->d = 1;
+	blit->w = dirty->unit_x2 - dirty->unit_x1;
+	blit->h = dirty->unit_y2 - dirty->unit_y1;
+	dirty->num_hits++;
+
+	if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
+		return;
+
+	/* Destination bounding box */
+	ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
+	ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
+	ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
+	ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a DMA command, and optionally encodes
+ * a screen target update command, depending on transfer direction.
+ */
+static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *ddirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_dma *cmd = dirty->cmd;
+	struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+	SVGA3dCmdSurfaceDMASuffix *suffix =
+		(SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
+	size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
+	cmd->header.size = sizeof(cmd->body) + blit_size;
+	vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
+	cmd->body.guest.pitch = ddirty->pitch;
+	cmd->body.host.sid = stdu->display_srf->res.id;
+	cmd->body.host.face = 0;
+	cmd->body.host.mipmap = 0;
+	cmd->body.transfer = ddirty->transfer;
+	suffix->suffixSize = sizeof(*suffix);
+	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+
+	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+		blit_size += sizeof(struct vmw_stdu_update);
+
+		vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
+					 ddirty->left, ddirty->right,
+					 ddirty->top, ddirty->bottom);
+	}
+
+	vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+
+	ddirty->left = ddirty->top = S32_MAX;
+	ddirty->right = ddirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * framebuffer and the screen target system.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm-file identifying the caller. May be
+ * set to NULL, but then @user_fence_rep must also be set to NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @num_clips: Number of clip rects in @clips or @vclips.
+ * @increment: Increment to use when looping over @clips or @vclips.
+ * @to_surface: Whether to DMA to the screen target system as opposed to
+ * from the screen target system.
+ * @interruptible: Whether to perform waits interruptible if possible.
+ *
+ * If DMA-ing till the screen target system, the function will also notify
+ * the screen target system that a bounding box of the cliprects has been
+ * updated.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_clip_rect *clips,
+		     struct drm_vmw_rect *vclips,
+		     uint32_t num_clips,
+		     int increment,
+		     bool to_surface,
+		     bool interruptible)
+{
+	struct vmw_dma_buffer *buf =
+		container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+	struct vmw_stdu_dirty ddirty;
+	int ret;
+
+	ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
+					    false);
+	if (ret)
+		return ret;
+
+	ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
+		SVGA3D_READ_HOST_VRAM;
+	ddirty.left = ddirty.top = S32_MAX;
+	ddirty.right = ddirty.bottom = S32_MIN;
+	ddirty.pitch = vfb->base.pitches[0];
+	ddirty.buf = buf;
+	ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
+	ddirty.base.clip = vmw_stdu_dmabuf_clip;
+	ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
+		num_clips * sizeof(SVGA3dCopyBox) +
+		sizeof(SVGA3dCmdSurfaceDMASuffix);
+	if (to_surface)
+		ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
+
+	ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
+				   0, 0, num_clips, increment, &ddirty.base);
+	vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
+				     user_fence_rep);
+
+	return ret;
+}
+
+/**
+ * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ *
+ * @dirty: The closure structure.
+ *
+ * Encodes a surface copy command cliprect and updates the bounding box
+ * for the copy.
+ */
+static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		blit += dirty->num_hits;
+		blit->srcx = dirty->fb_x;
+		blit->srcy = dirty->fb_y;
+		blit->x = dirty->unit_x1;
+		blit->y = dirty->unit_y1;
+		blit->d = 1;
+		blit->w = dirty->unit_x2 - dirty->unit_x1;
+		blit->h = dirty->unit_y2 - dirty->unit_y1;
+	}
+
+	dirty->num_hits++;
+
+	/* Destination bounding box */
+	sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
+	sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
+	sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
+	sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
+}
+
+/**
+ * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * copy command.
+ *
+ * @dirty: The closure structure.
+ *
+ * Fills in the missing fields in a surface copy command, and encodes a screen
+ * target update command.
+ */
+static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
+{
+	struct vmw_stdu_dirty *sdirty =
+		container_of(dirty, struct vmw_stdu_dirty, base);
+	struct vmw_screen_target_display_unit *stdu =
+		container_of(dirty->unit, typeof(*stdu), base);
+	struct vmw_stdu_surface_copy *cmd = dirty->cmd;
+	struct vmw_stdu_update *update;
+	size_t blit_size = sizeof(SVGA3dCopyBox) * dirty->num_hits;
+	size_t commit_size;
+
+	if (!dirty->num_hits) {
+		vmw_fifo_commit(dirty->dev_priv, 0);
+		return;
+	}
+
+	if (sdirty->sid != stdu->display_srf->res.id) {
+		struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
+
+		cmd->header.id = SVGA_3D_CMD_SURFACE_COPY;
+		cmd->header.size = sizeof(cmd->body) + blit_size;
+		cmd->body.src.sid = sdirty->sid;
+		cmd->body.dest.sid = stdu->display_srf->res.id;
+		update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
+		commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+	} else {
+		update = dirty->cmd;
+		commit_size = sizeof(*update);
+	}
+
+	vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left,
+				 sdirty->right, sdirty->top, sdirty->bottom);
+
+	vmw_fifo_commit(dirty->dev_priv, commit_size);
+
+	sdirty->left = sdirty->top = S32_MAX;
+	sdirty->right = sdirty->bottom = S32_MIN;
+}
+
+/**
+ * vmw_kms_stdu_surface_dirty - Dirty part of a surface backed framebuffer
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @framebuffer: Pointer to the surface-buffer backed framebuffer.
+ * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
+ * @srf: Pointer to surface to blit from. If NULL, the surface attached
+ * to @framebuffer will be used.
+ * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
+ * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
+ * @num_clips: Number of clip rects in @clips.
+ * @inc: Increment to use when looping over @clips.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to a
+ * struct vmw_fence_obj. The returned fence pointer may be NULL in which
+ * case the device has already synchronized.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct drm_clip_rect *clips,
+			       struct drm_vmw_rect *vclips,
+			       struct vmw_resource *srf,
+			       s32 dest_x,
+			       s32 dest_y,
+			       unsigned num_clips, int inc,
+			       struct vmw_fence_obj **out_fence)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		container_of(framebuffer, typeof(*vfbs), base);
+	struct vmw_stdu_dirty sdirty;
+	int ret;
+
+	if (!srf)
+		srf = &vfbs->surface->res;
+
+	ret = vmw_kms_helper_resource_prepare(srf, true);
+	if (ret)
+		return ret;
+
+	if (vfbs->is_dmabuf_proxy) {
+		ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
+		if (ret)
+			goto out_finish;
+	}
+
+	sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
+	sdirty.base.clip = vmw_kms_stdu_surface_clip;
+	sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
+		sizeof(SVGA3dCopyBox) * num_clips +
+		sizeof(struct vmw_stdu_update);
+	sdirty.sid = srf->id;
+	sdirty.left = sdirty.top = S32_MAX;
+	sdirty.right = sdirty.bottom = S32_MIN;
+
+	ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
+				   dest_x, dest_y, num_clips, inc,
+				   &sdirty.base);
+out_finish:
+	vmw_kms_helper_resource_finish(srf, out_fence);
+
+	return ret;
+}
+
+
+/*
+ *  Screen Target CRTC dispatch table
+ */
+static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
+	.cursor_set = vmw_du_crtc_cursor_set,
+	.cursor_move = vmw_du_crtc_cursor_move,
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_stdu_crtc_destroy,
+	.set_config = vmw_stdu_crtc_set_config,
+	.page_flip = vmw_stdu_crtc_page_flip,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Encoder Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_encoder_destroy - cleans up the STDU
+ *
+ * @encoder: used the get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_stdu_destroy(vmw_encoder_to_stdu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_stdu_encoder_funcs = {
+	.destroy = vmw_stdu_encoder_destroy,
+};
+
+
+
+/******************************************************************************
+ * Screen Target Display Unit Connector Functions
+ *****************************************************************************/
+
+/**
+ * vmw_stdu_connector_destroy - cleans up the STDU
+ *
+ * @connector: used to get the containing STDU
+ *
+ * vmwgfx cleans up crtc/encoder/connector all at the same time so technically
+ * this can be a no-op.  Nevertheless, it doesn't hurt of have this in case
+ * the common KMS code changes and somehow vmw_stdu_crtc_destroy() doesn't
+ * get called.
+ */
+static void vmw_stdu_connector_destroy(struct drm_connector *connector)
+{
+	vmw_stdu_destroy(vmw_connector_to_stdu(connector));
+}
+
+
+
+static struct drm_connector_funcs vmw_stdu_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
+	.destroy = vmw_stdu_connector_destroy,
+};
+
+
+
+/**
+ * vmw_stdu_init - Sets up a Screen Target Display Unit
+ *
+ * @dev_priv: VMW DRM device
+ * @unit: unit number range from 0 to VMWGFX_NUM_DISPLAY_UNITS
+ *
+ * This function is called once per CRTC, and allocates one Screen Target
+ * display unit to represent that CRTC.  Since the SVGA device does not separate
+ * out encoder and connector, they are represented as part of the STDU as well.
+ */
+static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_target_display_unit *stdu;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+
+	stdu = kzalloc(sizeof(*stdu), GFP_KERNEL);
+	if (!stdu)
+		return -ENOMEM;
+
+	stdu->base.unit = unit;
+	crtc = &stdu->base.crtc;
+	encoder = &stdu->base.encoder;
+	connector = &stdu->base.connector;
+
+	stdu->base.pref_active = (unit == 0);
+	stdu->base.pref_width  = dev_priv->initial_width;
+	stdu->base.pref_height = dev_priv->initial_height;
+	stdu->base.is_implicit = true;
+
+	drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	connector->status = vmw_du_connector_detect(connector, false);
+
+	drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	(void) drm_connector_register(connector);
+
+	drm_crtc_init(dev, crtc, &vmw_stdu_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.dirty_info_property,
+				   1);
+
+	return 0;
+}
+
+
+
+/**
+ *  vmw_stdu_destroy - Cleans up a vmw_screen_target_display_unit
+ *
+ *  @stdu:  Screen Target Display Unit to be destroyed
+ *
+ *  Clean up after vmw_stdu_init
+ */
+static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu)
+{
+	vmw_stdu_unpin_display(stdu);
+
+	vmw_du_cleanup(&stdu->base);
+	kfree(stdu);
+}
+
+
+
+/******************************************************************************
+ * Screen Target Display KMS Functions
+ *
+ * These functions are called by the common KMS code in vmwgfx_kms.c
+ *****************************************************************************/
+
+/**
+ * vmw_kms_stdu_init_display - Initializes a Screen Target based display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * This function initialize a Screen Target based display device.  It checks
+ * the capability bits to make sure the underlying hardware can support
+ * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
+ * Units, as supported by the display hardware.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+
+	/* Do nothing if Screen Target support is turned off */
+	if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+		return -ENOSYS;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
+		return -ENOSYS;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = drm_mode_create_dirty_info_property(dev);
+	if (unlikely(ret != 0))
+		goto err_vblank_cleanup;
+
+	dev_priv->active_display_unit = vmw_du_screen_target;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
+		ret = vmw_stdu_init(dev_priv, i);
+
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize STDU %d", i);
+			goto err_vblank_cleanup;
+		}
+	}
+
+	DRM_INFO("Screen Target Display device initialized\n");
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+	return ret;
+}
+
+
+
+/**
+ * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
+ *
+ * @dev_priv: VMW DRM device
+ *
+ * Frees up any resources allocated by vmw_kms_stdu_init_display
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	drm_vblank_cleanup(dev);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 4ecdbf3..3361769 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -27,8 +27,11 @@
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_resource_priv.h"
+#include "vmwgfx_so.h"
+#include "vmwgfx_binding.h"
 #include <ttm/ttm_placement.h>
-#include "svga3d_surfacedefs.h"
+#include "device_include/svga3d_surfacedefs.h"
+
 
 /**
  * struct vmw_user_surface - User-space visible surface resource
@@ -36,7 +39,7 @@
  * @base:           The TTM base object handling user-space visibility.
  * @srf:            The surface metadata.
  * @size:           TTM accounting size for the surface.
- * @master:         master of the creating client. Used for security check.
+ * @master: master of the creating client. Used for security check.
  */
 struct vmw_user_surface {
 	struct ttm_prime_object prime;
@@ -220,7 +223,7 @@
 	cmd->header.size = cmd_len;
 	cmd->body.sid = srf->res.id;
 	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
+	cmd->body.format = srf->format;
 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
 
@@ -340,7 +343,7 @@
 		dev_priv->used_memory_size -= res->backup_size;
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
 	}
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 }
 
 /**
@@ -576,14 +579,14 @@
 
 	BUG_ON(res_free == NULL);
 	if (!dev_priv->has_mob)
-		(void) vmw_3d_resource_inc(dev_priv, false);
+		vmw_fifo_resource_inc(dev_priv);
 	ret = vmw_resource_init(dev_priv, res, true, res_free,
 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
 				&vmw_legacy_surface_func);
 
 	if (unlikely(ret != 0)) {
 		if (!dev_priv->has_mob)
-			vmw_3d_resource_dec(dev_priv, false);
+			vmw_fifo_resource_dec(dev_priv);
 		res_free(res);
 		return ret;
 	}
@@ -593,6 +596,7 @@
 	 * surface validate.
 	 */
 
+	INIT_LIST_HEAD(&srf->view_list);
 	vmw_resource_activate(res, vmw_hw_surface_destroy);
 	return ret;
 }
@@ -723,6 +727,7 @@
 	desc = svga3dsurface_get_desc(req->format);
 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
 		DRM_ERROR("Invalid surface format for surface creation.\n");
+		DRM_ERROR("Format requested is: %d\n", req->format);
 		return -EINVAL;
 	}
 
@@ -906,6 +911,12 @@
 				  "surface reference.\n");
 			return -EACCES;
 		}
+		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+			DRM_ERROR("Locked master refused legacy "
+				  "surface reference.\n");
+			return -EACCES;
+		}
+
 		handle = u_handle;
 	}
 
@@ -1018,17 +1029,21 @@
 {
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_surface *srf = vmw_res_to_srf(res);
-	uint32_t cmd_len, submit_len;
+	uint32_t cmd_len, cmd_id, submit_len;
 	int ret;
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDefineGBSurface body;
 	} *cmd;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineGBSurface_v2 body;
+	} *cmd2;
 
 	if (likely(res->id != -1))
 		return 0;
 
-	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_fifo_resource_inc(dev_priv);
 	ret = vmw_resource_alloc_id(res);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed to allocate a surface id.\n");
@@ -1040,9 +1055,19 @@
 		goto out_no_fifo;
 	}
 
-	cmd_len = sizeof(cmd->body);
-	submit_len = sizeof(*cmd);
+	if (srf->array_size > 0) {
+		/* has_dx checked on creation time. */
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
+		cmd_len = sizeof(cmd2->body);
+		submit_len = sizeof(*cmd2);
+	} else {
+		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
+		cmd_len = sizeof(cmd->body);
+		submit_len = sizeof(*cmd);
+	}
+
 	cmd = vmw_fifo_reserve(dev_priv, submit_len);
+	cmd2 = (typeof(cmd2))cmd;
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for surface "
 			  "creation.\n");
@@ -1050,17 +1075,33 @@
 		goto out_no_fifo;
 	}
 
-	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
-	cmd->header.size = cmd_len;
-	cmd->body.sid = srf->res.id;
-	cmd->body.surfaceFlags = srf->flags;
-	cmd->body.format = cpu_to_le32(srf->format);
-	cmd->body.numMipLevels = srf->mip_levels[0];
-	cmd->body.multisampleCount = srf->multisample_count;
-	cmd->body.autogenFilter = srf->autogen_filter;
-	cmd->body.size.width = srf->base_size.width;
-	cmd->body.size.height = srf->base_size.height;
-	cmd->body.size.depth = srf->base_size.depth;
+	if (srf->array_size > 0) {
+		cmd2->header.id = cmd_id;
+		cmd2->header.size = cmd_len;
+		cmd2->body.sid = srf->res.id;
+		cmd2->body.surfaceFlags = srf->flags;
+		cmd2->body.format = cpu_to_le32(srf->format);
+		cmd2->body.numMipLevels = srf->mip_levels[0];
+		cmd2->body.multisampleCount = srf->multisample_count;
+		cmd2->body.autogenFilter = srf->autogen_filter;
+		cmd2->body.size.width = srf->base_size.width;
+		cmd2->body.size.height = srf->base_size.height;
+		cmd2->body.size.depth = srf->base_size.depth;
+		cmd2->body.arraySize = srf->array_size;
+	} else {
+		cmd->header.id = cmd_id;
+		cmd->header.size = cmd_len;
+		cmd->body.sid = srf->res.id;
+		cmd->body.surfaceFlags = srf->flags;
+		cmd->body.format = cpu_to_le32(srf->format);
+		cmd->body.numMipLevels = srf->mip_levels[0];
+		cmd->body.multisampleCount = srf->multisample_count;
+		cmd->body.autogenFilter = srf->autogen_filter;
+		cmd->body.size.width = srf->base_size.width;
+		cmd->body.size.height = srf->base_size.height;
+		cmd->body.size.depth = srf->base_size.depth;
+	}
+
 	vmw_fifo_commit(dev_priv, submit_len);
 
 	return 0;
@@ -1068,7 +1109,7 @@
 out_no_fifo:
 	vmw_resource_release_id(res);
 out_no_id:
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 	return ret;
 }
 
@@ -1188,6 +1229,7 @@
 static int vmw_gb_surface_destroy(struct vmw_resource *res)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDestroyGBSurface body;
@@ -1197,7 +1239,8 @@
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_scrub(&res->binding_head);
+	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
+	vmw_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -1213,11 +1256,12 @@
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 	mutex_unlock(&dev_priv->binding_mutex);
 	vmw_resource_release_id(res);
-	vmw_3d_resource_dec(dev_priv, false);
+	vmw_fifo_resource_dec(dev_priv);
 
 	return 0;
 }
 
+
 /**
  * vmw_gb_surface_define_ioctl - Ioctl function implementing
  *                               the user surface define functionality.
@@ -1241,77 +1285,51 @@
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 	uint32_t size;
-	const struct svga3d_surface_desc *desc;
 	uint32_t backup_handle;
 
+
 	if (unlikely(vmw_user_surface_size == 0))
 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
 			128;
 
 	size = vmw_user_surface_size + 128;
 
-	desc = svga3dsurface_get_desc(req->format);
-	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
-		DRM_ERROR("Invalid surface format for surface creation.\n");
-		return -EINVAL;
-	}
+	/* Define a surface based on the parameters. */
+	ret = vmw_surface_gb_priv_define(dev,
+			size,
+			req->svga3d_flags,
+			req->format,
+			req->drm_surface_flags & drm_vmw_surface_flag_scanout,
+			req->mip_levels,
+			req->multisample_count,
+			req->array_size,
+			req->base_size,
+			&srf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	user_srf = container_of(srf, struct vmw_user_surface, srf);
+	if (drm_is_primary_client(file_priv))
+		user_srf->master = drm_master_get(file_priv->master);
 
 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   size, false, true);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for surface"
-				  " creation.\n");
-		goto out_unlock;
-	}
+	res = &user_srf->srf.res;
 
-	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
-	if (unlikely(user_srf == NULL)) {
-		ret = -ENOMEM;
-		goto out_no_user_srf;
-	}
-
-	srf = &user_srf->srf;
-	res = &srf->res;
-
-	srf->flags = req->svga3d_flags;
-	srf->format = req->format;
-	srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout;
-	srf->mip_levels[0] = req->mip_levels;
-	srf->num_sizes = 1;
-	srf->sizes = NULL;
-	srf->offsets = NULL;
-	user_srf->size = size;
-	srf->base_size = req->base_size;
-	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-	srf->multisample_count = req->multisample_count;
-	res->backup_size = svga3dsurface_get_serialized_size
-	  (srf->format, srf->base_size, srf->mip_levels[0],
-	   srf->flags & SVGA3D_SURFACE_CUBEMAP);
-
-	user_srf->prime.base.shareable = false;
-	user_srf->prime.base.tfile = NULL;
-	if (drm_is_primary_client(file_priv))
-		user_srf->master = drm_master_get(file_priv->master);
-
-	/**
-	 * From this point, the generic resource management functions
-	 * destroy the object on failure.
-	 */
-
-	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-	if (unlikely(ret != 0))
-		goto out_unlock;
 
 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
 					     &res->backup);
-	} else if (req->drm_surface_flags &
-		   drm_vmw_surface_flag_create_buffer)
+		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
+		    res->backup_size) {
+			DRM_ERROR("Surface backup buffer is too small.\n");
+			vmw_dmabuf_unreference(&res->backup);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
 					    res->backup_size,
 					    req->drm_surface_flags &
@@ -1324,7 +1342,7 @@
 		goto out_unlock;
 	}
 
-	tmp = vmw_resource_reference(&srf->res);
+	tmp = vmw_resource_reference(res);
 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
 				    req->drm_surface_flags &
 				    drm_vmw_surface_flag_shareable,
@@ -1337,7 +1355,7 @@
 		goto out_unlock;
 	}
 
-	rep->handle = user_srf->prime.base.hash.key;
+	rep->handle      = user_srf->prime.base.hash.key;
 	rep->backup_size = res->backup_size;
 	if (res->backup) {
 		rep->buffer_map_handle =
@@ -1352,10 +1370,6 @@
 
 	vmw_resource_unreference(&res);
 
-	ttm_read_unlock(&dev_priv->reservation_sem);
-	return 0;
-out_no_user_srf:
-	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 out_unlock:
 	ttm_read_unlock(&dev_priv->reservation_sem);
 	return ret;
@@ -1415,6 +1429,7 @@
 	rep->creq.drm_surface_flags = 0;
 	rep->creq.multisample_count = srf->multisample_count;
 	rep->creq.autogen_filter = srf->autogen_filter;
+	rep->creq.array_size = srf->array_size;
 	rep->creq.buffer_handle = backup_handle;
 	rep->creq.base_size = srf->base_size;
 	rep->crep.handle = user_srf->prime.base.hash.key;
@@ -1429,3 +1444,137 @@
 
 	return ret;
 }
+
+/**
+ * vmw_surface_gb_priv_define - Define a private GB surface
+ *
+ * @dev:  Pointer to a struct drm_device
+ * @user_accounting_size:  Used to track user-space memory usage, set
+ *                         to 0 for kernel mode only memory
+ * @svga3d_flags: SVGA3d surface flags for the device
+ * @format: requested surface format
+ * @for_scanout: true if inteded to be used for scanout buffer
+ * @num_mip_levels:  number of MIP levels
+ * @multisample_count:
+ * @array_size: Surface array size.
+ * @size: width, heigh, depth of the surface requested
+ * @user_srf_out: allocated user_srf.  Set to NULL on failure.
+ *
+ * GB surfaces allocated by this function will not have a user mode handle, and
+ * thus will only be visible to vmwgfx.  For optimization reasons the
+ * surface may later be given a user mode handle by another function to make
+ * it available to user mode drivers.
+ */
+int vmw_surface_gb_priv_define(struct drm_device *dev,
+			       uint32_t user_accounting_size,
+			       uint32_t svga3d_flags,
+			       SVGA3dSurfaceFormat format,
+			       bool for_scanout,
+			       uint32_t num_mip_levels,
+			       uint32_t multisample_count,
+			       uint32_t array_size,
+			       struct drm_vmw_size size,
+			       struct vmw_surface **srf_out)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	int ret;
+	u32 num_layers;
+
+	*srf_out = NULL;
+
+	if (for_scanout) {
+		if (!svga3dsurface_is_screen_target_format(format)) {
+			DRM_ERROR("Invalid Screen Target surface format.");
+			return -EINVAL;
+		}
+	} else {
+		const struct svga3d_surface_desc *desc;
+
+		desc = svga3dsurface_get_desc(format);
+		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+			DRM_ERROR("Invalid surface format.\n");
+			return -EINVAL;
+		}
+	}
+
+	/* array_size must be null for non-GL3 host. */
+	if (array_size > 0 && !dev_priv->has_dx) {
+		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   user_accounting_size, false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(user_srf == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	*srf_out  = &user_srf->srf;
+	user_srf->size = user_accounting_size;
+	user_srf->prime.base.shareable = false;
+	user_srf->prime.base.tfile     = NULL;
+
+	srf = &user_srf->srf;
+	srf->flags             = svga3d_flags;
+	srf->format            = format;
+	srf->scanout           = for_scanout;
+	srf->mip_levels[0]     = num_mip_levels;
+	srf->num_sizes         = 1;
+	srf->sizes             = NULL;
+	srf->offsets           = NULL;
+	srf->base_size         = size;
+	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
+	srf->array_size        = array_size;
+	srf->multisample_count = multisample_count;
+
+	if (array_size)
+		num_layers = array_size;
+	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
+		num_layers = SVGA3D_MAX_SURFACE_FACES;
+	else
+		num_layers = 1;
+
+	srf->res.backup_size   =
+		svga3dsurface_get_serialized_size(srf->format,
+						  srf->base_size,
+						  srf->mip_levels[0],
+						  num_layers);
+
+	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+		srf->res.backup_size += sizeof(SVGA3dDXSOState);
+
+	if (dev_priv->active_display_unit == vmw_du_screen_target &&
+	    for_scanout)
+		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
+
+	/*
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
+
+out_unlock:
+	ttm_read_unlock(&dev_priv->reservation_sem);
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 98d6bfb..e771091 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index fbc6ee6..52a6fd2 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -31,6 +31,9 @@
 #include "dev.h"
 
 #define MIPI_CAL_CTRL			0x00
+#define MIPI_CAL_CTRL_NOISE_FILTER(x)	(((x) & 0xf) << 26)
+#define MIPI_CAL_CTRL_PRESCALE(x)	(((x) & 0x3) << 24)
+#define MIPI_CAL_CTRL_CLKEN_OVR		(1 << 4)
 #define MIPI_CAL_CTRL_START		(1 << 0)
 
 #define MIPI_CAL_AUTOCAL_CTRL		0x01
@@ -44,15 +47,18 @@
 #define MIPI_CAL_CONFIG_CSIC		0x07
 #define MIPI_CAL_CONFIG_CSID		0x08
 #define MIPI_CAL_CONFIG_CSIE		0x09
+#define MIPI_CAL_CONFIG_CSIF		0x0a
 #define MIPI_CAL_CONFIG_DSIA		0x0e
 #define MIPI_CAL_CONFIG_DSIB		0x0f
 #define MIPI_CAL_CONFIG_DSIC		0x10
 #define MIPI_CAL_CONFIG_DSID		0x11
 
-#define MIPI_CAL_CONFIG_DSIAB_CLK	0x19
-#define MIPI_CAL_CONFIG_DSICD_CLK	0x1a
+#define MIPI_CAL_CONFIG_DSIA_CLK	0x19
+#define MIPI_CAL_CONFIG_DSIB_CLK	0x1a
 #define MIPI_CAL_CONFIG_CSIAB_CLK	0x1b
+#define MIPI_CAL_CONFIG_DSIC_CLK	0x1c
 #define MIPI_CAL_CONFIG_CSICD_CLK	0x1c
+#define MIPI_CAL_CONFIG_DSID_CLK	0x1d
 #define MIPI_CAL_CONFIG_CSIE_CLK	0x1d
 
 /* for data and clock lanes */
@@ -73,8 +79,11 @@
 
 #define MIPI_CAL_BIAS_PAD_CFG1		0x17
 #define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_DRV_UP_REF(x) (((x) & 0x7) << 8)
 
 #define MIPI_CAL_BIAS_PAD_CFG2		0x18
+#define MIPI_CAL_BIAS_PAD_VCLAMP(x)	(((x) & 0x7) << 16)
+#define MIPI_CAL_BIAS_PAD_VAUXP(x)	(((x) & 0x7) << 4)
 #define MIPI_CAL_BIAS_PAD_PDVREG	(1 << 1)
 
 struct tegra_mipi_pad {
@@ -86,13 +95,35 @@
 	bool has_clk_lane;
 	const struct tegra_mipi_pad *pads;
 	unsigned int num_pads;
+
+	bool clock_enable_override;
+	bool needs_vclamp_ref;
+
+	/* bias pad configuration settings */
+	u8 pad_drive_down_ref;
+	u8 pad_drive_up_ref;
+
+	u8 pad_vclamp_level;
+	u8 pad_vauxp_level;
+
+	/* calibration settings for data lanes */
+	u8 hspdos;
+	u8 hspuos;
+	u8 termos;
+
+	/* calibration settings for clock lanes */
+	u8 hsclkpdos;
+	u8 hsclkpuos;
 };
 
 struct tegra_mipi {
 	const struct tegra_mipi_soc *soc;
+	struct device *dev;
 	void __iomem *regs;
 	struct mutex lock;
 	struct clk *clk;
+
+	unsigned long usage_count;
 };
 
 struct tegra_mipi_device {
@@ -114,6 +145,67 @@
 	writel(value, mipi->regs + (offset << 2));
 }
 
+static int tegra_mipi_power_up(struct tegra_mipi *mipi)
+{
+	u32 value;
+	int err;
+
+	err = clk_enable(mipi->clk);
+	if (err < 0)
+		return err;
+
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+
+	if (mipi->soc->needs_vclamp_ref)
+		value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	clk_disable(mipi->clk);
+
+	return 0;
+}
+
+static int tegra_mipi_power_down(struct tegra_mipi *mipi)
+{
+	u32 value;
+	int err;
+
+	err = clk_enable(mipi->clk);
+	if (err < 0)
+		return err;
+
+	/*
+	 * The MIPI_CAL_BIAS_PAD_PDVREG controls a voltage regulator that
+	 * supplies the DSI pads. This must be kept enabled until none of the
+	 * DSI lanes are used anymore.
+	 */
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+	value |= MIPI_CAL_BIAS_PAD_PDVREG;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+	/*
+	 * MIPI_CAL_BIAS_PAD_PDVCLAMP and MIPI_CAL_BIAS_PAD_E_VCLAMP_REF
+	 * control a regulator that supplies current to the pre-driver logic.
+	 * Powering down this regulator causes DSI to fail, so it must remain
+	 * powered on until none of the DSI lanes are used anymore.
+	 */
+	value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+
+	if (mipi->soc->needs_vclamp_ref)
+		value &= ~MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+
+	value |= MIPI_CAL_BIAS_PAD_PDVCLAMP;
+	tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+	return 0;
+}
+
 struct tegra_mipi_device *tegra_mipi_request(struct device *device)
 {
 	struct device_node *np = device->of_node;
@@ -150,6 +242,20 @@
 	dev->pads = args.args[0];
 	dev->device = device;
 
+	mutex_lock(&dev->mipi->lock);
+
+	if (dev->mipi->usage_count++ == 0) {
+		err = tegra_mipi_power_up(dev->mipi);
+		if (err < 0) {
+			dev_err(dev->mipi->dev,
+				"failed to power up MIPI bricks: %d\n",
+				err);
+			return ERR_PTR(err);
+		}
+	}
+
+	mutex_unlock(&dev->mipi->lock);
+
 	return dev;
 
 put:
@@ -164,6 +270,25 @@
 
 void tegra_mipi_free(struct tegra_mipi_device *device)
 {
+	int err;
+
+	mutex_lock(&device->mipi->lock);
+
+	if (--device->mipi->usage_count == 0) {
+		err = tegra_mipi_power_down(device->mipi);
+		if (err < 0) {
+			/*
+			 * Not much that can be done here, so an error message
+			 * will have to do.
+			 */
+			dev_err(device->mipi->dev,
+				"failed to power down MIPI bricks: %d\n",
+				err);
+		}
+	}
+
+	mutex_unlock(&device->mipi->lock);
+
 	platform_device_put(device->pdev);
 	kfree(device);
 }
@@ -199,16 +324,15 @@
 
 	mutex_lock(&device->mipi->lock);
 
-	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG0);
-	value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
-	value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
-	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
-
-	tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
-			  MIPI_CAL_BIAS_PAD_CFG1);
+	value = MIPI_CAL_BIAS_PAD_DRV_DN_REF(soc->pad_drive_down_ref) |
+		MIPI_CAL_BIAS_PAD_DRV_UP_REF(soc->pad_drive_up_ref);
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG1);
 
 	value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
-	value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+	value &= ~MIPI_CAL_BIAS_PAD_VCLAMP(0x7);
+	value &= ~MIPI_CAL_BIAS_PAD_VAUXP(0x7);
+	value |= MIPI_CAL_BIAS_PAD_VCLAMP(soc->pad_vclamp_level);
+	value |= MIPI_CAL_BIAS_PAD_VAUXP(soc->pad_vauxp_level);
 	tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
 
 	for (i = 0; i < soc->num_pads; i++) {
@@ -216,21 +340,38 @@
 
 		if (device->pads & BIT(i)) {
 			data = MIPI_CAL_CONFIG_SELECT |
-			       MIPI_CAL_CONFIG_HSPDOS(0) |
-			       MIPI_CAL_CONFIG_HSPUOS(4) |
-			       MIPI_CAL_CONFIG_TERMOS(5);
+			       MIPI_CAL_CONFIG_HSPDOS(soc->hspdos) |
+			       MIPI_CAL_CONFIG_HSPUOS(soc->hspuos) |
+			       MIPI_CAL_CONFIG_TERMOS(soc->termos);
 			clk = MIPI_CAL_CONFIG_SELECT |
-			      MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
-			      MIPI_CAL_CONFIG_HSCLKPUOSD(4);
+			      MIPI_CAL_CONFIG_HSCLKPDOSD(soc->hsclkpdos) |
+			      MIPI_CAL_CONFIG_HSCLKPUOSD(soc->hsclkpuos);
 		}
 
 		tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
 
-		if (soc->has_clk_lane)
+		if (soc->has_clk_lane && soc->pads[i].clk != 0)
 			tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
 	}
 
 	value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
+	value &= ~MIPI_CAL_CTRL_NOISE_FILTER(0xf);
+	value &= ~MIPI_CAL_CTRL_PRESCALE(0x3);
+	value |= MIPI_CAL_CTRL_NOISE_FILTER(0xa);
+	value |= MIPI_CAL_CTRL_PRESCALE(0x2);
+
+	if (!soc->clock_enable_override)
+		value &= ~MIPI_CAL_CTRL_CLKEN_OVR;
+	else
+		value |= MIPI_CAL_CTRL_CLKEN_OVR;
+
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
+
+	/* clear any pending status bits */
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_STATUS);
+	tegra_mipi_writel(device->mipi, value, MIPI_CAL_STATUS);
+
+	value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
 	value |= MIPI_CAL_CTRL_START;
 	tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
 
@@ -259,6 +400,17 @@
 	.has_clk_lane = false,
 	.pads = tegra114_mipi_pads,
 	.num_pads = ARRAY_SIZE(tegra114_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = true,
+	.pad_drive_down_ref = 0x2,
+	.pad_drive_up_ref = 0x0,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x4,
+	.termos = 0x5,
+	.hsclkpdos = 0x0,
+	.hsclkpuos = 0x4,
 };
 
 static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
@@ -266,20 +418,80 @@
 	{ .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
 	{ .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
 	{ .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
-	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
-	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
-	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
+	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK  },
+	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK  },
+	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK  },
 };
 
 static const struct tegra_mipi_soc tegra124_mipi_soc = {
 	.has_clk_lane = true,
 	.pads = tegra124_mipi_pads,
 	.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = true,
+	.pad_drive_down_ref = 0x2,
+	.pad_drive_up_ref = 0x0,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x0,
+	.termos = 0x0,
+	.hsclkpdos = 0x1,
+	.hsclkpuos = 0x2,
 };
 
-static struct of_device_id tegra_mipi_of_match[] = {
+static const struct tegra_mipi_soc tegra132_mipi_soc = {
+	.has_clk_lane = true,
+	.pads = tegra124_mipi_pads,
+	.num_pads = ARRAY_SIZE(tegra124_mipi_pads),
+	.clock_enable_override = false,
+	.needs_vclamp_ref = false,
+	.pad_drive_down_ref = 0x0,
+	.pad_drive_up_ref = 0x3,
+	.pad_vclamp_level = 0x0,
+	.pad_vauxp_level = 0x0,
+	.hspdos = 0x0,
+	.hspuos = 0x0,
+	.termos = 0x0,
+	.hsclkpdos = 0x3,
+	.hsclkpuos = 0x2,
+};
+
+static const struct tegra_mipi_pad tegra210_mipi_pads[] = {
+	{ .data = MIPI_CAL_CONFIG_CSIA, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIB, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIC, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSID, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIE, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_CSIF, .clk = 0 },
+	{ .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIA_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIB_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSIC, .clk = MIPI_CAL_CONFIG_DSIC_CLK },
+	{ .data = MIPI_CAL_CONFIG_DSID, .clk = MIPI_CAL_CONFIG_DSID_CLK },
+};
+
+static const struct tegra_mipi_soc tegra210_mipi_soc = {
+	.has_clk_lane = true,
+	.pads = tegra210_mipi_pads,
+	.num_pads = ARRAY_SIZE(tegra210_mipi_pads),
+	.clock_enable_override = true,
+	.needs_vclamp_ref = false,
+	.pad_drive_down_ref = 0x0,
+	.pad_drive_up_ref = 0x3,
+	.pad_vclamp_level = 0x1,
+	.pad_vauxp_level = 0x1,
+	.hspdos = 0x0,
+	.hspuos = 0x2,
+	.termos = 0x0,
+	.hsclkpdos = 0x0,
+	.hsclkpuos = 0x2,
+};
+
+static const struct of_device_id tegra_mipi_of_match[] = {
 	{ .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
 	{ .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
+	{ .compatible = "nvidia,tegra132-mipi", .data = &tegra132_mipi_soc },
+	{ .compatible = "nvidia,tegra210-mipi", .data = &tegra210_mipi_soc },
 	{ },
 };
 
@@ -299,6 +511,7 @@
 		return -ENOMEM;
 
 	mipi->soc = match->data;
+	mipi->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mipi->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 00f2058..243f99a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -915,8 +915,8 @@
 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
-	struct irq_chip *chip = irq_get_chip(irq);
 
 	chained_irq_enter(chip, desc);
 
@@ -928,8 +928,8 @@
 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	const int int_reg[] = { 4, 5, 8, 9};
-	struct irq_chip *chip = irq_get_chip(irq);
 
 	chained_irq_enter(chip, desc);
 
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 37ac7b5..2106066 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -6,17 +6,19 @@
  * Licensed under GPLv2
  *
  * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
-
- Switcher interface - methods require for ATPX and DCM
- - switchto - this throws the output MUX switch
- - discrete_set_power - sets the power state for the discrete card
-
- GPU driver interface
- - set_gpu_state - this should do the equiv of s/r for the card
-		  - this should *not* set the discrete power state
- - switch_check  - check if the device is in a position to switch now
+ *
+ * Switcher interface - methods require for ATPX and DCM
+ * - switchto - this throws the output MUX switch
+ * - discrete_set_power - sets the power state for the discrete card
+ *
+ * GPU driver interface
+ * - set_gpu_state - this should do the equiv of s/r for the card
+ *                 - this should *not* set the discrete power state
+ * - switch_check  - check if the device is in a position to switch now
  */
 
+#define pr_fmt(fmt) "vga_switcheroo: " fmt
+
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
@@ -111,7 +113,7 @@
 
 	vgasr_priv.handler = handler;
 	if (vga_switcheroo_ready()) {
-		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		pr_info("enabled\n");
 		vga_switcheroo_enable();
 	}
 	mutex_unlock(&vgasr_mutex);
@@ -124,7 +126,7 @@
 	mutex_lock(&vgasr_mutex);
 	vgasr_priv.handler = NULL;
 	if (vgasr_priv.active) {
-		pr_info("vga_switcheroo: disabled\n");
+		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
@@ -155,7 +157,7 @@
 		vgasr_priv.registered_clients++;
 
 	if (vga_switcheroo_ready()) {
-		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		pr_info("enabled\n");
 		vga_switcheroo_enable();
 	}
 	mutex_unlock(&vgasr_mutex);
@@ -167,7 +169,8 @@
 				   bool driver_power_control)
 {
 	return register_client(pdev, ops, -1,
-			       pdev == vga_default_device(), driver_power_control);
+			       pdev == vga_default_device(),
+			       driver_power_control);
 }
 EXPORT_SYMBOL(vga_switcheroo_register_client);
 
@@ -183,6 +186,7 @@
 find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->pdev == pdev)
 			return client;
@@ -193,6 +197,7 @@
 find_client_from_id(struct list_head *head, int client_id)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->id == client_id)
 			return client;
@@ -203,6 +208,7 @@
 find_active_client(struct list_head *head)
 {
 	struct vga_switcheroo_client *client;
+
 	list_for_each_entry(client, head, list)
 		if (client->active && client_is_vga(client))
 			return client;
@@ -235,7 +241,7 @@
 		kfree(client);
 	}
 	if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
-		printk(KERN_INFO "vga_switcheroo: disabled\n");
+		pr_info("disabled\n");
 		vga_switcheroo_debugfs_fini(&vgasr_priv);
 		vgasr_priv.active = false;
 	}
@@ -260,10 +266,12 @@
 {
 	struct vga_switcheroo_client *client;
 	int i = 0;
+
 	mutex_lock(&vgasr_mutex);
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
-			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" :
+								     "IGD",
 			   client_is_vga(client) ? "" : "-Audio",
 			   client->active ? '+' : ' ',
 			   client->driver_power_control ? "Dyn" : "",
@@ -347,6 +355,7 @@
 
 	if (new_client->fb_info) {
 		struct fb_event event;
+
 		console_lock();
 		event.info = new_client->fb_info;
 		fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
@@ -375,7 +384,7 @@
 
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
 		if (!client->ops->can_switch(client->pdev)) {
-			printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+			pr_err("client %x refused switch\n", client->id);
 			return false;
 		}
 	}
@@ -484,20 +493,20 @@
 	if (can_switch) {
 		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+			pr_err("switching failed stage 1 %d\n", ret);
 
 		ret = vga_switchto_stage2(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+			pr_err("switching failed stage 2 %d\n", ret);
 
 	} else {
-		printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+		pr_info("setting delayed switch to client %d\n", client->id);
 		vgasr_priv.delayed_switch_active = true;
 		vgasr_priv.delayed_client_id = client_id;
 
 		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
+			pr_err("delayed switching stage 1 failed %d\n", ret);
 	}
 
 out:
@@ -516,32 +525,32 @@
 
 static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
 {
-	if (priv->switch_file) {
-		debugfs_remove(priv->switch_file);
-		priv->switch_file = NULL;
-	}
-	if (priv->debugfs_root) {
-		debugfs_remove(priv->debugfs_root);
-		priv->debugfs_root = NULL;
-	}
+	debugfs_remove(priv->switch_file);
+	priv->switch_file = NULL;
+
+	debugfs_remove(priv->debugfs_root);
+	priv->debugfs_root = NULL;
 }
 
 static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
 {
+	static const char mp[] = "/sys/kernel/debug";
+
 	/* already initialised */
 	if (priv->debugfs_root)
 		return 0;
 	priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
 
 	if (!priv->debugfs_root) {
-		printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+		pr_err("Cannot create %s/vgaswitcheroo\n", mp);
 		goto fail;
 	}
 
 	priv->switch_file = debugfs_create_file("switch", 0644,
-						priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+						priv->debugfs_root, NULL,
+						&vga_switcheroo_debugfs_fops);
 	if (!priv->switch_file) {
-		printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+		pr_err("cannot create %s/vgaswitcheroo/switch\n", mp);
 		goto fail;
 	}
 	return 0;
@@ -560,7 +569,8 @@
 	if (!vgasr_priv.delayed_switch_active)
 		goto err;
 
-	printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+	pr_info("processing delayed switch to %d\n",
+		vgasr_priv.delayed_client_id);
 
 	client = find_client_from_id(&vgasr_priv.clients,
 				     vgasr_priv.delayed_client_id);
@@ -569,7 +579,7 @@
 
 	ret = vga_switchto_stage2(client);
 	if (ret)
-		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
+		pr_err("delayed switching failed stage 2 %d\n", ret);
 
 	vgasr_priv.delayed_switch_active = false;
 	err = 0;
@@ -579,7 +589,8 @@
 }
 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
 
-static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void vga_switcheroo_power_switch(struct pci_dev *pdev,
+					enum vga_switcheroo_state state)
 {
 	struct vga_switcheroo_client *client;
 
@@ -598,7 +609,8 @@
 
 /* force a PCI device to a certain state - mainly to turn off audio clients */
 
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
+				       enum vga_switcheroo_state dynamic)
 {
 	struct vga_switcheroo_client *client;
 
@@ -644,7 +656,8 @@
 
 /* this version is for the case where the power switch is separate
    to the device being powered down. */
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+int vga_switcheroo_init_domain_pm_ops(struct device *dev,
+				      struct dev_pm_domain *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
@@ -675,7 +688,8 @@
 	/* we need to check if we have to switch back on the video
 	   device so the audio device can come back */
 	list_for_each_entry(client, &vgasr_priv.clients, list) {
-		if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+		if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
+		    client_is_vga(client)) {
 			found = client;
 			ret = pm_runtime_get_sync(&client->pdev->dev);
 			if (ret) {
@@ -695,12 +709,15 @@
 	return ret;
 }
 
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+int
+vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
+						 struct dev_pm_domain *domain)
 {
 	/* copy over all the bus versions */
 	if (dev->bus && dev->bus->pm) {
 		domain->ops = *dev->bus->pm;
-		domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+		domain->ops.runtime_resume =
+			vga_switcheroo_runtime_resume_hdmi_audio;
 
 		dev->pm_domain = domain;
 		return 0;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 7bcbf86..a0b4334 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -29,6 +29,8 @@
  *
  */
 
+#define pr_fmt(fmt) "vgaarb: " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
@@ -134,7 +136,6 @@
 {
 	return vga_default;
 }
-
 EXPORT_SYMBOL_GPL(vga_default_device);
 
 void vga_set_default_device(struct pci_dev *pdev)
@@ -298,9 +299,9 @@
 
 	pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
 
-	if (!vgadev->bridge_has_one_vga) {
+	if (!vgadev->bridge_has_one_vga)
 		vga_irq_set_state(vgadev, true);
-	}
+
 	vgadev->owns |= wants;
 lock_them:
 	vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
@@ -452,15 +453,15 @@
 }
 EXPORT_SYMBOL(vga_put);
 
-/* Rules for using a bridge to control a VGA descendant decoding:
-   if a bridge has only one VGA descendant then it can be used
-   to control the VGA routing for that device.
-   It should always use the bridge closest to the device to control it.
-   If a bridge has a direct VGA descendant, but also have a sub-bridge
-   VGA descendant then we cannot use that bridge to control the direct VGA descendant.
-   So for every device we register, we need to iterate all its parent bridges
-   so we can invalidate any devices using them properly.
-*/
+/*
+ * Rules for using a bridge to control a VGA descendant decoding: if a bridge
+ * has only one VGA descendant then it can be used to control the VGA routing
+ * for that device. It should always use the bridge closest to the device to
+ * control it. If a bridge has a direct VGA descendant, but also have a sub-
+ * bridge VGA descendant then we cannot use that bridge to control the direct
+ * VGA descendant. So for every device we register, we need to iterate all
+ * its parent bridges so we can invalidate any devices using them properly.
+ */
 static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
 {
 	struct vga_device *same_bridge_vgadev;
@@ -484,21 +485,26 @@
 
 			/* see if the share a bridge with this device */
 			if (new_bridge == bridge) {
-				/* if their direct parent bridge is the same
-				   as any bridge of this device then it can't be used
-				   for that device */
+				/*
+				 * If their direct parent bridge is the same
+				 * as any bridge of this device then it can't
+				 * be used for that device.
+				 */
 				same_bridge_vgadev->bridge_has_one_vga = false;
 			}
 
-			/* now iterate the previous devices bridge hierarchy */
-			/* if the new devices parent bridge is in the other devices
-			   hierarchy then we can't use it to control this device */
+			/*
+			 * Now iterate the previous devices bridge hierarchy.
+			 * If the new devices parent bridge is in the other
+			 * devices hierarchy then we can't use it to control
+			 * this device
+			 */
 			while (bus) {
 				bridge = bus->self;
-				if (bridge) {
-					if (bridge == vgadev->pdev->bus->self)
-						vgadev->bridge_has_one_vga = false;
-				}
+
+				if (bridge && bridge == vgadev->pdev->bus->self)
+					vgadev->bridge_has_one_vga = false;
+
 				bus = bus->parent;
 			}
 		}
@@ -527,10 +533,10 @@
 	/* Allocate structure */
 	vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
 	if (vgadev == NULL) {
-		pr_err("vgaarb: failed to allocate pci device\n");
-		/* What to do on allocation failure ? For now, let's
-		 * just do nothing, I'm not sure there is anything saner
-		 * to be done
+		pr_err("failed to allocate pci device\n");
+		/*
+		 * What to do on allocation failure ? For now, let's just do
+		 * nothing, I'm not sure there is anything saner to be done.
 		 */
 		return false;
 	}
@@ -566,8 +572,8 @@
 		bridge = bus->self;
 		if (bridge) {
 			u16 l;
-			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
-					     &l);
+
+			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l);
 			if (!(l & PCI_BRIDGE_CTL_VGA)) {
 				vgadev->owns = 0;
 				break;
@@ -581,8 +587,7 @@
 	 */
 	if (vga_default == NULL &&
 	    ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) {
-		pr_info("vgaarb: setting as boot device: PCI:%s\n",
-			pci_name(pdev));
+		pr_info("setting as boot device: PCI:%s\n", pci_name(pdev));
 		vga_set_default_device(pdev);
 	}
 
@@ -591,7 +596,7 @@
 	/* Add to the list */
 	list_add(&vgadev->list, &vga_list);
 	vga_count++;
-	pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+	pr_info("device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
 		pci_name(pdev),
 		vga_iostate_to_str(vgadev->decodes),
 		vga_iostate_to_str(vgadev->owns),
@@ -651,7 +656,7 @@
 	decodes_unlocked = vgadev->locks & decodes_removed;
 	vgadev->decodes = new_decodes;
 
-	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+	pr_info("device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
 		pci_name(vgadev->pdev),
 		vga_iostate_to_str(old_decodes),
 		vga_iostate_to_str(vgadev->decodes),
@@ -673,10 +678,12 @@
 	if (!(old_decodes & VGA_RSRC_LEGACY_MASK) &&
 	    new_decodes & VGA_RSRC_LEGACY_MASK)
 		vga_decode_count++;
-	pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
+	pr_debug("decoding count now is: %d\n", vga_decode_count);
 }
 
-static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+static void __vga_set_legacy_decoding(struct pci_dev *pdev,
+				      unsigned int decodes,
+				      bool userspace)
 {
 	struct vga_device *vgadev;
 	unsigned long flags;
@@ -712,7 +719,8 @@
 /* call with NULL to unregister */
 int vga_client_register(struct pci_dev *pdev, void *cookie,
 			void (*irq_set_state)(void *cookie, bool state),
-			unsigned int (*set_vga_decode)(void *cookie, bool decode))
+			unsigned int (*set_vga_decode)(void *cookie,
+						       bool decode))
 {
 	int ret = -ENODEV;
 	struct vga_device *vgadev;
@@ -832,7 +840,7 @@
 	return 1;
 }
 
-static ssize_t vga_arb_read(struct file *file, char __user * buf,
+static ssize_t vga_arb_read(struct file *file, char __user *buf,
 			    size_t count, loff_t *ppos)
 {
 	struct vga_arb_private *priv = file->private_data;
@@ -899,7 +907,7 @@
  * TODO: To avoid parsing inside kernel and to improve the speed we may
  * consider use ioctl here
  */
-static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+static ssize_t vga_arb_write(struct file *file, const char __user *buf,
 			     size_t count, loff_t *ppos)
 {
 	struct vga_arb_private *priv = file->private_data;
@@ -1075,13 +1083,13 @@
 				ret_val = -EPROTO;
 				goto done;
 			}
-			pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+			pr_debug("%s ==> %x:%x:%x.%x\n", curr_pos,
 				domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 
 			pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
-			pr_debug("vgaarb: pdev %p\n", pdev);
+			pr_debug("pdev %p\n", pdev);
 			if (!pdev) {
-				pr_err("vgaarb: invalid PCI address %x:%x:%x\n",
+				pr_err("invalid PCI address %x:%x:%x\n",
 					domain, bus, devfn);
 				ret_val = -ENODEV;
 				goto done;
@@ -1089,10 +1097,13 @@
 		}
 
 		vgadev = vgadev_find(pdev);
-		pr_debug("vgaarb: vgadev %p\n", vgadev);
+		pr_debug("vgadev %p\n", vgadev);
 		if (vgadev == NULL) {
-			pr_err("vgaarb: this pci device is not a vga device\n");
-			pci_dev_put(pdev);
+			if (pdev) {
+				pr_err("this pci device is not a vga device\n");
+				pci_dev_put(pdev);
+			}
+
 			ret_val = -ENODEV;
 			goto done;
 		}
@@ -1109,7 +1120,7 @@
 			}
 		}
 		if (i == MAX_USER_CARDS) {
-			pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+			pr_err("maximum user cards (%d) number reached!\n",
 				MAX_USER_CARDS);
 			pci_dev_put(pdev);
 			/* XXX: which value to return? */
@@ -1125,7 +1136,7 @@
 	} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
 		curr_pos += 8;
 		remaining -= 8;
-		pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
+		pr_debug("client 0x%p called 'decodes'\n", priv);
 
 		if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
 			ret_val = -EPROTO;
@@ -1150,7 +1161,7 @@
 	return ret_val;
 }
 
-static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
 {
 	struct vga_arb_private *priv = file->private_data;
 
@@ -1246,7 +1257,8 @@
 		else
 			new_state = true;
 		if (vgadev->set_vga_decode) {
-			new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+			new_decodes = vgadev->set_vga_decode(vgadev->cookie,
+							     new_state);
 			vga_update_device_decodes(vgadev, new_decodes);
 		}
 	}
@@ -1300,7 +1312,7 @@
 
 	rc = misc_register(&vga_arb_device);
 	if (rc < 0)
-		pr_err("vgaarb: error %d registering device\n", rc);
+		pr_err("error %d registering device\n", rc);
 
 	bus_register_notifier(&pci_bus_type, &pci_notifier);
 
@@ -1312,21 +1324,29 @@
 			       PCI_ANY_ID, pdev)) != NULL)
 		vga_arbiter_add_pci_device(pdev);
 
-	pr_info("vgaarb: loaded\n");
+	pr_info("loaded\n");
 
 	list_for_each_entry(vgadev, &vga_list, list) {
 #if defined(CONFIG_X86) || defined(CONFIG_IA64)
-		/* Override I/O based detection done by vga_arbiter_add_pci_device()
-		 * as it may take the wrong device (e.g. on Apple system under EFI).
+		/*
+		 * Override vga_arbiter_add_pci_device()'s I/O based detection
+		 * as it may take the wrong device (e.g. on Apple system under
+		 * EFI).
 		 *
-		 * Select the device owning the boot framebuffer if there is one.
+		 * Select the device owning the boot framebuffer if there is
+		 * one.
 		 */
-		resource_size_t start, end;
+		resource_size_t start, end, limit;
+		unsigned long flags;
 		int i;
 
+		limit = screen_info.lfb_base + screen_info.lfb_size;
+
 		/* Does firmware framebuffer belong to us? */
 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-			if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM))
+			flags = pci_resource_flags(vgadev->pdev, i);
+
+			if ((flags & IORESOURCE_MEM) == 0)
 				continue;
 
 			start = pci_resource_start(vgadev->pdev, i);
@@ -1335,22 +1355,24 @@
 			if (!start || !end)
 				continue;
 
-			if (screen_info.lfb_base < start ||
-			    (screen_info.lfb_base + screen_info.lfb_size) >= end)
+			if (screen_info.lfb_base < start || limit >= end)
 				continue;
+
 			if (!vga_default_device())
-				pr_info("vgaarb: setting as boot device: PCI:%s\n",
+				pr_info("setting as boot device: PCI:%s\n",
 					pci_name(vgadev->pdev));
 			else if (vgadev->pdev != vga_default_device())
-				pr_info("vgaarb: overriding boot device: PCI:%s\n",
+				pr_info("overriding boot device: PCI:%s\n",
 					pci_name(vgadev->pdev));
 			vga_set_default_device(vgadev->pdev);
 		}
 #endif
 		if (vgadev->bridge_has_one_vga)
-			pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
+			pr_info("bridge control possible %s\n",
+				pci_name(vgadev->pdev));
 		else
-			pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
+			pr_info("no bridge control possible %s\n",
+				pci_name(vgadev->pdev));
 	}
 	return rc;
 }
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index bcd914a..70a11ac 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2293,6 +2293,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5ecbb3f..eaef9bc 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -88,12 +88,13 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
 		defined CONFIG_DMI
 #include <linux/gpio.h>
 #include <linux/i2c-mux-gpio.h>
-#include <linux/platform_device.h>
 #endif
 
 /* I801 SMBus address offsets */
@@ -113,6 +114,16 @@
 #define SMBPCICTL	0x004
 #define SMBPCISTS	0x006
 #define SMBHSTCFG	0x040
+#define TCOBASE		0x050
+#define TCOCTL		0x054
+
+#define ACPIBASE		0x040
+#define ACPIBASE_SMI_OFF	0x030
+#define ACPICTRL		0x044
+#define ACPICTRL_EN		0x080
+
+#define SBREG_BAR		0x10
+#define SBREG_SMBCTRL		0xc6000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS		0x08
@@ -125,6 +136,9 @@
 #define SMBHSTCFG_SMB_SMI_EN	2
 #define SMBHSTCFG_I2C_EN	4
 
+/* TCO configuration bits for TCOCTL */
+#define TCOCTL_EN		0x0100
+
 /* Auxiliary control register bits, ICH4+ only */
 #define SMBAUXCTL_CRC		1
 #define SMBAUXCTL_E32B		2
@@ -221,6 +235,7 @@
 	const struct i801_mux_config *mux_drvdata;
 	struct platform_device *mux_pdev;
 #endif
+	struct platform_device *tco_pdev;
 };
 
 #define FEATURE_SMBUS_PEC	(1 << 0)
@@ -230,6 +245,7 @@
 #define FEATURE_IRQ		(1 << 4)
 /* Not really a feature, but it's convenient to handle it as such */
 #define FEATURE_IDF		(1 << 15)
+#define FEATURE_TCO		(1 << 16)
 
 static const char *i801_feature_names[] = {
 	"SMBus PEC",
@@ -1132,6 +1148,95 @@
 }
 #endif
 
+static const struct itco_wdt_platform_data tco_platform_data = {
+	.name = "Intel PCH",
+	.version = 4,
+};
+
+static DEFINE_SPINLOCK(p2sb_spinlock);
+
+static void i801_add_tco(struct i801_priv *priv)
+{
+	struct pci_dev *pci_dev = priv->pci_dev;
+	struct resource tco_res[3], *res;
+	struct platform_device *pdev;
+	unsigned int devfn;
+	u32 tco_base, tco_ctl;
+	u32 base_addr, ctrl_val;
+	u64 base64_addr;
+
+	if (!(priv->features & FEATURE_TCO))
+		return;
+
+	pci_read_config_dword(pci_dev, TCOBASE, &tco_base);
+	pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl);
+	if (!(tco_ctl & TCOCTL_EN))
+		return;
+
+	memset(tco_res, 0, sizeof(tco_res));
+
+	res = &tco_res[ICH_RES_IO_TCO];
+	res->start = tco_base & ~1;
+	res->end = res->start + 32 - 1;
+	res->flags = IORESOURCE_IO;
+
+	/*
+	 * Power Management registers.
+	 */
+	devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
+	pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
+
+	res = &tco_res[ICH_RES_IO_SMI];
+	res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
+	res->end = res->start + 3;
+	res->flags = IORESOURCE_IO;
+
+	/*
+	 * Enable the ACPI I/O space.
+	 */
+	pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
+	ctrl_val |= ACPICTRL_EN;
+	pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
+
+	/*
+	 * We must access the NO_REBOOT bit over the Primary to Sideband
+	 * bridge (P2SB). The BIOS prevents the P2SB device from being
+	 * enumerated by the PCI subsystem, so we need to unhide/hide it
+	 * to lookup the P2SB BAR.
+	 */
+	spin_lock(&p2sb_spinlock);
+
+	devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
+
+	/* Unhide the P2SB device */
+	pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
+
+	pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
+	base64_addr = base_addr & 0xfffffff0;
+
+	pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
+	base64_addr |= (u64)base_addr << 32;
+
+	/* Hide the P2SB device */
+	pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x1);
+	spin_unlock(&p2sb_spinlock);
+
+	res = &tco_res[ICH_RES_MEM_OFF];
+	res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+	res->end = res->start + 3;
+	res->flags = IORESOURCE_MEM;
+
+	pdev = platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+						 tco_res, 3, &tco_platform_data,
+						 sizeof(tco_platform_data));
+	if (IS_ERR(pdev)) {
+		dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
+		return;
+	}
+
+	priv->tco_pdev = pdev;
+}
+
 static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	unsigned char temp;
@@ -1149,6 +1254,15 @@
 
 	priv->pci_dev = dev;
 	switch (dev->device) {
+	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
+	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+		priv->features |= FEATURE_I2C_BLOCK_READ;
+		priv->features |= FEATURE_IRQ;
+		priv->features |= FEATURE_SMBUS_PEC;
+		priv->features |= FEATURE_BLOCK_BUFFER;
+		priv->features |= FEATURE_TCO;
+		break;
+
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
@@ -1265,6 +1379,8 @@
 	dev_info(&dev->dev, "SMBus using %s\n",
 		 priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
 
+	i801_add_tco(priv);
+
 	/* set up the sysfs linkage to our parent device */
 	priv->adapter.dev.parent = &dev->dev;
 
@@ -1296,6 +1412,8 @@
 	i2c_del_adapter(&priv->adapter);
 	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 
+	platform_device_unregister(priv->tco_pdev);
+
 	/*
 	 * do not call pci_disable_device(dev) since it can cause hard hangs on
 	 * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index f101bb5..993eb20 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -469,16 +469,12 @@
 	dev = &client->dev;
 
 	/* gpio interrupt pin */
-	gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0);
+	gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0, GPIOD_IN);
 	if (IS_ERR(gpio)) {
 		dev_err(dev, "acpi gpio get index failed\n");
 		return PTR_ERR(gpio);
 	}
 
-	ret = gpiod_direction_input(gpio);
-	if (ret)
-		return ret;
-
 	ret = gpiod_to_irq(gpio);
 	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
 
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index e330205..d8e614c 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -845,16 +845,12 @@
 	dev = &client->dev;
 
 	/* data ready GPIO interrupt pin */
-	gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0);
+	gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0, GPIOD_IN);
 	if (IS_ERR(gpio)) {
 		dev_err(dev, "ACPI GPIO get index failed\n");
 		return PTR_ERR(gpio);
 	}
 
-	ret = gpiod_direction_input(gpio);
-	if (ret)
-		return ret;
-
 	ret = gpiod_to_irq(gpio);
 
 	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 36eb3d0..180a8f7 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -871,7 +871,7 @@
 		if (is_eth) {
 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
 			if (be32_to_cpu(cqe->vlan_my_qpn) &
-					MLX4_CQE_VLAN_PRESENT_MASK) {
+					MLX4_CQE_CVLAN_PRESENT_MASK) {
 				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
 					MLX4_CQE_VID_MASK;
 			} else {
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f4a30fc..c642082 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -343,9 +343,8 @@
 	__set_bit(EV_FF, dev->evbit);
 
 	/* Copy "true" bits into ff device bitmap */
-	for (i = 0; i <= FF_MAX; i++)
-		if (test_bit(i, dev->ffbit))
-			__set_bit(i, ff->ffbit);
+	for_each_set_bit(i, dev->ffbit, FF_CNT)
+		__set_bit(i, ff->ffbit);
 
 	/* we can emulate RUMBLE with periodic effects */
 	if (test_bit(FF_PERIODIC, ff->ffbit))
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 78d2499..5391abd 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -674,13 +674,19 @@
  */
 static void input_dev_release_keys(struct input_dev *dev)
 {
+	bool need_sync = false;
 	int code;
 
 	if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
-		for_each_set_bit(code, dev->key, KEY_CNT)
+		for_each_set_bit(code, dev->key, KEY_CNT) {
 			input_pass_event(dev, EV_KEY, code, 0);
+			need_sync = true;
+		}
+
+		if (need_sync)
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+
 		memset(dev->key, 0, sizeof(dev->key));
-		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 1d247bc..6cb5a3e 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -859,12 +859,11 @@
 	joydev->handle.handler = handler;
 	joydev->handle.private = joydev;
 
-	for (i = 0; i < ABS_CNT; i++)
-		if (test_bit(i, dev->absbit)) {
-			joydev->absmap[i] = joydev->nabs;
-			joydev->abspam[joydev->nabs] = i;
-			joydev->nabs++;
-		}
+	for_each_set_bit(i, dev->absbit, ABS_CNT) {
+		joydev->absmap[i] = joydev->nabs;
+		joydev->abspam[joydev->nabs] = i;
+		joydev->nabs++;
+	}
 
 	for (i = BTN_JOYSTICK - BTN_MISC; i < KEY_MAX - BTN_MISC + 1; i++)
 		if (test_bit(i + BTN_MISC, dev->keybit)) {
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index 30af2e8..4a8258b 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -47,6 +47,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/bitrev.h>
 #include <linux/input.h>
 #include <linux/serio.h>
 
@@ -72,16 +73,6 @@
 	char phys[32];
 };
 
-
-/* bits in all incoming bytes needs to be "reversed" */
-static int zhenhua_bitreverse(int x)
-{
-	x = ((x & 0xaa) >> 1) | ((x & 0x55) << 1);
-	x = ((x & 0xcc) >> 2) | ((x & 0x33) << 2);
-	x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4);
-	return x;
-}
-
 /*
  * zhenhua_process_packet() decodes packets the driver receives from the
  * RC transmitter. It updates the data accordingly.
@@ -120,7 +111,7 @@
 		return IRQ_HANDLED;	/* wrong MSB -- ignore this byte */
 
 	if (zhenhua->idx < ZHENHUA_MAX_LENGTH)
-		zhenhua->data[zhenhua->idx++] = zhenhua_bitreverse(data);
+		zhenhua->data[zhenhua->idx++] = bitrev8(data);
 
 	if (zhenhua->idx == ZHENHUA_MAX_LENGTH) {
 		zhenhua_process_packet(zhenhua);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 82a8fb5..2e80107 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -187,7 +187,7 @@
 
 config KEYBOARD_GPIO
 	tristate "GPIO Buttons"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  This driver implements support for buttons connected
 	  to GPIO pins of various CPUs (and some other chips).
@@ -253,7 +253,7 @@
 
 config KEYBOARD_MATRIX
 	tristate "GPIO driven matrix keypad support"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_MATRIXKMAP
 	help
 	  Enable support for GPIO driven matrix keypad.
@@ -689,7 +689,7 @@
 config KEYBOARD_CROS_EC
 	tristate "ChromeOS EC keyboard"
 	select INPUT_MATRIXKMAP
-	depends on CROS_EC_PROTO
+	depends on MFD_CROS_EC
 	help
 	  Say Y here to enable the matrix keyboard used by ChromeOS devices
 	  and implemented on the ChromeOS EC. You must enable one bus option
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 6ed83cf..4d446d5 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1097,7 +1097,6 @@
 static struct i2c_driver adp5589_driver = {
 	.driver = {
 		.name = KBUILD_MODNAME,
-		.owner = THIS_MODULE,
 		.pm = &adp5589_dev_pm_ops,
 	},
 	.probe = adp5589_probe,
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index f07461a..378db10 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/of_irq.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
@@ -47,6 +48,20 @@
 #define CAP11XX_REG_CONFIG2		0x44
 #define CAP11XX_REG_CONFIG2_ALT_POL	BIT(6)
 #define CAP11XX_REG_SENSOR_BASE_CNT(X)	(0x50 + (X))
+#define CAP11XX_REG_LED_POLARITY	0x73
+#define CAP11XX_REG_LED_OUTPUT_CONTROL	0x74
+
+#define CAP11XX_REG_LED_DUTY_CYCLE_1	0x90
+#define CAP11XX_REG_LED_DUTY_CYCLE_2	0x91
+#define CAP11XX_REG_LED_DUTY_CYCLE_3	0x92
+#define CAP11XX_REG_LED_DUTY_CYCLE_4	0x93
+
+#define CAP11XX_REG_LED_DUTY_MIN_MASK	(0x0f)
+#define CAP11XX_REG_LED_DUTY_MIN_MASK_SHIFT	(0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK	(0xf0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT	(4)
+#define CAP11XX_REG_LED_DUTY_MAX_VALUE	(15)
+
 #define CAP11XX_REG_SENSOR_CALIB	(0xb1 + (X))
 #define CAP11XX_REG_SENSOR_CALIB_LSB1	0xb9
 #define CAP11XX_REG_SENSOR_CALIB_LSB2	0xba
@@ -56,10 +71,23 @@
 
 #define CAP11XX_MANUFACTURER_ID	0x5d
 
+#ifdef CONFIG_LEDS_CLASS
+struct cap11xx_led {
+	struct cap11xx_priv *priv;
+	struct led_classdev cdev;
+	struct work_struct work;
+	u32 reg;
+	enum led_brightness new_brightness;
+};
+#endif
+
 struct cap11xx_priv {
 	struct regmap *regmap;
 	struct input_dev *idev;
 
+	struct cap11xx_led *leds;
+	int num_leds;
+
 	/* config */
 	u32 keycodes[];
 };
@@ -67,6 +95,7 @@
 struct cap11xx_hw_model {
 	u8 product_id;
 	unsigned int num_channels;
+	unsigned int num_leds;
 };
 
 enum {
@@ -76,9 +105,9 @@
 };
 
 static const struct cap11xx_hw_model cap11xx_devices[] = {
-	[CAP1106] = { .product_id = 0x55, .num_channels = 6 },
-	[CAP1126] = { .product_id = 0x53, .num_channels = 6 },
-	[CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+	[CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0 },
+	[CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2 },
+	[CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8 },
 };
 
 static const struct reg_default cap11xx_reg_defaults[] = {
@@ -111,6 +140,7 @@
 	{ CAP11XX_REG_STANDBY_SENSITIVITY,	0x02 },
 	{ CAP11XX_REG_STANDBY_THRESH,		0x40 },
 	{ CAP11XX_REG_CONFIG2,			0x40 },
+	{ CAP11XX_REG_LED_POLARITY,		0x00 },
 	{ CAP11XX_REG_SENSOR_CALIB_LSB1,	0x00 },
 	{ CAP11XX_REG_SENSOR_CALIB_LSB2,	0x00 },
 };
@@ -177,6 +207,12 @@
 
 static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
 {
+	/*
+	 * DLSEEP mode will turn off all LEDS, prevent this
+	 */
+	if (IS_ENABLED(CONFIG_LEDS_CLASS) && priv->num_leds)
+		return 0;
+
 	return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
 				  CAP11XX_REG_MAIN_CONTROL_DLSEEP,
 				  sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
@@ -196,6 +232,104 @@
 	cap11xx_set_sleep(priv, true);
 }
 
+#ifdef CONFIG_LEDS_CLASS
+static void cap11xx_led_work(struct work_struct *work)
+{
+	struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
+	struct cap11xx_priv *priv = led->priv;
+	int value = led->new_brightness;
+
+	/*
+	 * All LEDs share the same duty cycle as this is a HW limitation.
+	 * Brightness levels per LED are either 0 (OFF) and 1 (ON).
+	 */
+	regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
+				BIT(led->reg), value ? BIT(led->reg) : 0);
+}
+
+static void cap11xx_led_set(struct led_classdev *cdev,
+			   enum led_brightness value)
+{
+	struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
+
+	if (led->new_brightness == value)
+		return;
+
+	led->new_brightness = value;
+	schedule_work(&led->work);
+}
+
+static int cap11xx_init_leds(struct device *dev,
+			     struct cap11xx_priv *priv, int num_leds)
+{
+	struct device_node *node = dev->of_node, *child;
+	struct cap11xx_led *led;
+	int cnt = of_get_child_count(node);
+	int error;
+
+	if (!num_leds || !cnt)
+		return 0;
+
+	if (cnt > num_leds)
+		return -EINVAL;
+
+	led = devm_kcalloc(dev, cnt, sizeof(struct cap11xx_led), GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	priv->leds = led;
+
+	error = regmap_update_bits(priv->regmap,
+				CAP11XX_REG_LED_OUTPUT_CONTROL, 0xff, 0);
+	if (error)
+		return error;
+
+	error = regmap_update_bits(priv->regmap, CAP11XX_REG_LED_DUTY_CYCLE_4,
+				CAP11XX_REG_LED_DUTY_MAX_MASK,
+				CAP11XX_REG_LED_DUTY_MAX_VALUE <<
+				CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT);
+	if (error)
+		return error;
+
+	for_each_child_of_node(node, child) {
+		u32 reg;
+
+		led->cdev.name =
+			of_get_property(child, "label", NULL) ? : child->name;
+		led->cdev.default_trigger =
+			of_get_property(child, "linux,default-trigger", NULL);
+		led->cdev.flags = 0;
+		led->cdev.brightness_set = cap11xx_led_set;
+		led->cdev.max_brightness = 1;
+		led->cdev.brightness = LED_OFF;
+
+		error = of_property_read_u32(child, "reg", &reg);
+		if (error != 0 || reg >= num_leds)
+			return -EINVAL;
+
+		led->reg = reg;
+		led->priv = priv;
+
+		INIT_WORK(&led->work, cap11xx_led_work);
+
+		error = devm_led_classdev_register(dev, &led->cdev);
+		if (error)
+			return error;
+
+		priv->num_leds++;
+		led++;
+	}
+
+	return 0;
+}
+#else
+static int cap11xx_init_leds(struct device *dev,
+			     struct cap11xx_priv *priv, int num_leds)
+{
+	return 0;
+}
+#endif
+
 static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
 			     const struct i2c_device_id *id)
 {
@@ -316,6 +450,10 @@
 	priv->idev->open = cap11xx_input_open;
 	priv->idev->close = cap11xx_input_close;
 
+	error = cap11xx_init_leds(dev, priv, cap->num_leds);
+	if (error)
+		return error;
+
 	input_set_drvdata(priv->idev, priv);
 
 	/*
@@ -361,7 +499,6 @@
 static struct i2c_driver cap11xx_i2c_driver = {
 	.driver = {
 		.name	= "cap11xx",
-		.owner	= THIS_MODULE,
 		.of_match_table = cap11xx_dt_ids,
 	},
 	.id_table	= cap11xx_i2c_ids,
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ddf4045..9d517ca 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -239,6 +239,11 @@
 		}
 	}
 
+	if (i == ddata->pdata->nbuttons) {
+		error = -EINVAL;
+		goto out;
+	}
+
 	mutex_lock(&ddata->disable_lock);
 
 	for (i = 0; i < ddata->pdata->nbuttons; i++) {
@@ -655,7 +660,9 @@
 		if (of_property_read_u32(pp, "linux,input-type", &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+		button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
+				 /* legacy name */
+				 of_property_read_bool(pp, "gpio-key,wakeup");
 
 		button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
 
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index c6dc644..870cfa6 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -152,7 +152,10 @@
 					     &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = fwnode_property_present(child, "gpio-key,wakeup");
+		button->wakeup =
+			fwnode_property_read_bool(child, "wakeup-source") ||
+			/* legacy name */
+			fwnode_property_read_bool(child, "gpio-key,wakeup");
 
 		if (fwnode_property_read_u32(child, "debounce-interval",
 					     &button->debounce_interval))
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 0ad422b..c717e8f 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -223,7 +223,6 @@
 static struct i2c_driver lm8333_driver = {
 	.driver = {
 		.name		= "lm8333",
-		.owner		= THIS_MODULE,
 	},
 	.probe		= lm8333_probe,
 	.remove		= lm8333_remove,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index b370a59..7f12b65 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -425,8 +425,10 @@
 
 	if (of_get_property(np, "linux,no-autorepeat", NULL))
 		pdata->no_autorepeat = true;
-	if (of_get_property(np, "linux,wakeup", NULL))
-		pdata->wakeup = true;
+
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+			of_property_read_bool(np, "linux,wakeup"); /* legacy */
+
 	if (of_get_property(np, "gpio-activelow", NULL))
 		pdata->active_low = true;
 
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 375b05c..31090d7 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -265,7 +265,6 @@
 static struct i2c_driver mcs_touchkey_driver = {
 	.driver = {
 		.name	= "mcs_touchkey",
-		.owner	= THIS_MODULE,
 		.pm	= &mcs_touchkey_pm_ops,
 	},
 	.probe		= mcs_touchkey_probe,
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 3aa2ec4..0fd612d 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -305,7 +305,6 @@
 static struct i2c_driver mpr_touchkey_driver = {
 	.driver = {
 		.name	= "mpr121",
-		.owner	= THIS_MODULE,
 		.pm	= &mpr121_touchkey_pm_ops,
 	},
 	.id_table	= mpr121_id,
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 32580af..5c68e3f 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -507,6 +507,7 @@
  */
 static int pmic8xxx_kp_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
 	unsigned int rows, cols;
 	bool repeat;
 	bool wakeup;
@@ -524,10 +525,11 @@
 		return -EINVAL;
 	}
 
-	repeat = !of_property_read_bool(pdev->dev.of_node,
-					"linux,input-no-autorepeat");
-	wakeup = of_property_read_bool(pdev->dev.of_node,
-					"linux,keypad-wakeup");
+	repeat = !of_property_read_bool(np, "linux,input-no-autorepeat");
+
+	wakeup = of_property_read_bool(np, "wakeup-source") ||
+		 /* legacy name */
+		 of_property_read_bool(np, "linux,keypad-wakeup");
 
 	kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
 	if (!kp)
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 52cd6e8..5a57787 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -277,7 +277,6 @@
 static struct i2c_driver qt1070_driver = {
 	.driver	= {
 		.name	= "qt1070",
-		.owner	= THIS_MODULE,
 		.pm	= &qt1070_pm_ops,
 	},
 	.id_table	= qt1070_id,
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 819b228..43b8648 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -497,7 +497,6 @@
 static struct i2c_driver qt2160_driver = {
 	.driver = {
 		.name	= "qt2160",
-		.owner  = THIS_MODULE,
 	},
 
 	.id_table	= qt2160_idtable,
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 43e48dac..4e319eb 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -299,8 +299,10 @@
 	if (of_get_property(np, "linux,input-no-autorepeat", NULL))
 		pdata->no_autorepeat = true;
 
-	if (of_get_property(np, "linux,input-wakeup", NULL))
-		pdata->wakeup = true;
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+			/* legacy name */
+			of_property_read_bool(np, "linux,input-wakeup");
+
 
 	return pdata;
 }
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 31c606a..e92dfd8 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -17,6 +17,7 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/mfd/tc3589x.h>
+#include <linux/device.h>
 
 /* Maximum supported keypad matrix row/columns size */
 #define TC3589x_MAX_KPROW               8
@@ -352,7 +353,10 @@
 	}
 
 	plat->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
-	plat->enable_wakeup = of_property_read_bool(np, "linux,wakeup");
+
+	plat->enable_wakeup = of_property_read_bool(np, "wakeup-source") ||
+			      /* legacy name */
+			      of_property_read_bool(np, "linux,wakeup");
 
 	/* The custom delay format is ms/16 */
 	of_property_read_u32(np, "debounce-delay-ms", &debounce_ms);
@@ -386,12 +390,15 @@
 	if (irq < 0)
 		return irq;
 
-	keypad = kzalloc(sizeof(struct tc_keypad), GFP_KERNEL);
-	input = input_allocate_device();
-	if (!keypad || !input) {
-		dev_err(&pdev->dev, "failed to allocate keypad memory\n");
-		error = -ENOMEM;
-		goto err_free_mem;
+	keypad = devm_kzalloc(&pdev->dev, sizeof(struct tc_keypad),
+			      GFP_KERNEL);
+	if (!keypad)
+		return -ENOMEM;
+
+	input = devm_input_allocate_device(&pdev->dev);
+	if (!input) {
+		dev_err(&pdev->dev, "failed to allocate input device\n");
+		return -ENOMEM;
 	}
 
 	keypad->board = plat;
@@ -410,7 +417,7 @@
 					   NULL, input);
 	if (error) {
 		dev_err(&pdev->dev, "Failed to build keymap\n");
-		goto err_free_mem;
+		return error;
 	}
 
 	keypad->keymap = input->keycode;
@@ -421,20 +428,23 @@
 
 	input_set_drvdata(input, keypad);
 
-	error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
-				     plat->irqtype | IRQF_ONESHOT,
-				     "tc3589x-keypad", keypad);
-	if (error < 0) {
+	tc3589x_keypad_disable(keypad);
+
+	error = devm_request_threaded_irq(&pdev->dev, irq,
+					  NULL, tc3589x_keypad_irq,
+					  plat->irqtype | IRQF_ONESHOT,
+					  "tc3589x-keypad", keypad);
+	if (error) {
 		dev_err(&pdev->dev,
 				"Could not allocate irq %d,error %d\n",
 				irq, error);
-		goto err_free_mem;
+		return error;
 	}
 
 	error = input_register_device(input);
 	if (error) {
 		dev_err(&pdev->dev, "Could not register input device\n");
-		goto err_free_irq;
+		return error;
 	}
 
 	/* let platform decide if keypad is a wakeup source or not */
@@ -444,30 +454,6 @@
 	platform_set_drvdata(pdev, keypad);
 
 	return 0;
-
-err_free_irq:
-	free_irq(irq, keypad);
-err_free_mem:
-	input_free_device(input);
-	kfree(keypad);
-	return error;
-}
-
-static int tc3589x_keypad_remove(struct platform_device *pdev)
-{
-	struct tc_keypad *keypad = platform_get_drvdata(pdev);
-	int irq = platform_get_irq(pdev, 0);
-
-	if (!keypad->keypad_stopped)
-		tc3589x_keypad_disable(keypad);
-
-	free_irq(irq, keypad);
-
-	input_unregister_device(keypad->input);
-
-	kfree(keypad);
-
-	return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -518,7 +504,6 @@
 		.pm	= &tc3589x_keypad_dev_pm_ops,
 	},
 	.probe	= tc3589x_keypad_probe,
-	.remove	= tc3589x_keypad_remove,
 };
 module_platform_driver(tc3589x_keypad_driver);
 
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 4e491c1..9002298 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -404,7 +404,6 @@
 static struct i2c_driver tca8418_keypad_driver = {
 	.driver = {
 		.name	= TCA8418_NAME,
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tca8418_dt_ids),
 	},
 	.probe		= tca8418_keypad_probe,
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c41dec8..906dd1b 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -247,7 +247,7 @@
 config INPUT_GP2A
 	tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
 	  hooked to an I2C bus.
@@ -257,7 +257,7 @@
 
 config INPUT_GPIO_BEEPER
 	tristate "Generic GPIO Beeper support"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a beeper connected to a GPIO pin.
 
@@ -266,7 +266,7 @@
 
 config INPUT_GPIO_TILT_POLLED
 	tristate "Polled GPIO tilt switch"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_POLLDEV
 	help
 	  This driver implements support for tilt switches connected
@@ -557,7 +557,7 @@
 
 config INPUT_GPIO_ROTARY_ENCODER
 	tristate "Rotary encoders connected to GPIO pins"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here to add support for rotary encoders connected to GPIO lines.
 	  Check file:Documentation/input/rotary-encoder.txt for more
@@ -764,7 +764,8 @@
 
 config INPUT_DRV260X_HAPTICS
 	tristate "TI DRV260X haptics support"
-	depends on INPUT && I2C && GPIOLIB
+	depends on INPUT && I2C
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_FF_MEMLESS
 	select REGMAP_I2C
 	help
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index bdb5d03..a8b0a2e 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -158,7 +158,6 @@
 static struct i2c_driver adxl34x_driver = {
 	.driver = {
 		.name = "adxl34x",
-		.owner = THIS_MODULE,
 		.pm = &adxl34x_i2c_pm,
 		.of_match_table = of_match_ptr(adxl34x_of_id),
 	},
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4dbbed7..4bf6785 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -170,8 +170,8 @@
 
 	INIT_WORK(&haptics->work, arizona_haptics_work);
 
-	haptics->input_dev = input_allocate_device();
-	if (haptics->input_dev == NULL) {
+	haptics->input_dev = devm_input_allocate_device(&pdev->dev);
+	if (!haptics->input_dev) {
 		dev_err(arizona->dev, "Failed to allocate input device\n");
 		return -ENOMEM;
 	}
@@ -188,41 +188,23 @@
 	if (ret < 0) {
 		dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
 			ret);
-		goto err_ialloc;
+		return ret;
 	}
 
 	ret = input_register_device(haptics->input_dev);
 	if (ret < 0) {
 		dev_err(arizona->dev, "couldn't register input device: %d\n",
 			ret);
-		goto err_iff;
+		return ret;
 	}
 
 	platform_set_drvdata(pdev, haptics);
 
 	return 0;
-
-err_iff:
-	if (haptics->input_dev)
-		input_ff_destroy(haptics->input_dev);
-err_ialloc:
-	input_free_device(haptics->input_dev);
-
-	return ret;
-}
-
-static int arizona_haptics_remove(struct platform_device *pdev)
-{
-	struct arizona_haptics *haptics = platform_get_drvdata(pdev);
-
-	input_unregister_device(haptics->input_dev);
-
-	return 0;
 }
 
 static struct platform_driver arizona_haptics_driver = {
 	.probe		= arizona_haptics_probe,
-	.remove		= arizona_haptics_remove,
 	.driver		= {
 		.name	= "arizona-haptics",
 	},
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b36831c..1d0e61d 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -333,10 +333,9 @@
 	y = ((0xc0 & data[2]) >> 6) | (data[3] << 2);
 	z = ((0xc0 & data[4]) >> 6) | (data[5] << 2);
 
-	/* sign extension */
-	x = (s16) (x << 6) >> 6;
-	y = (s16) (y << 6) >> 6;
-	z = (s16) (z << 6) >> 6;
+	x = sign_extend32(x, 9);
+	y = sign_extend32(y, 9);
+	z = sign_extend32(z, 9);
 
 	input_report_abs(bma150->input, ABS_X, x);
 	input_report_abs(bma150->input, ABS_Y, y);
@@ -654,7 +653,6 @@
 
 static struct i2c_driver bma150_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= BMA150_DRIVER,
 		.pm	= &bma150_pm,
 	},
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index 4fdef98..c702191 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -118,7 +118,6 @@
 	.id_table	= cma3000_i2c_id,
 	.driver = {
 		.name	= "cma3000_i2c_accl",
-		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &cma3000_i2c_pm_ops,
 #endif
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index e5d60ec..2adfd86c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -204,7 +204,7 @@
 	int overdrive_voltage;
 };
 
-static struct reg_default drv260x_reg_defs[] = {
+static const struct reg_default drv260x_reg_defs[] = {
 	{ DRV260X_STATUS, 0xe0 },
 	{ DRV260X_MODE, 0x40 },
 	{ DRV260X_RT_PB_IN, 0x00 },
@@ -313,14 +313,14 @@
 	gpiod_set_value(haptics->enable_gpio, 0);
 }
 
-static const struct reg_default drv260x_lra_cal_regs[] = {
+static const struct reg_sequence drv260x_lra_cal_regs[] = {
 	{ DRV260X_MODE, DRV260X_AUTO_CAL },
 	{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
 	{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
 		DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
 };
 
-static const struct reg_default drv260x_lra_init_regs[] = {
+static const struct reg_sequence drv260x_lra_init_regs[] = {
 	{ DRV260X_MODE, DRV260X_RT_PLAYBACK },
 	{ DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
 		DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
@@ -337,7 +337,7 @@
 	{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
 };
 
-static const struct reg_default drv260x_erm_cal_regs[] = {
+static const struct reg_sequence drv260x_erm_cal_regs[] = {
 	{ DRV260X_MODE, DRV260X_AUTO_CAL },
 	{ DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
 	{ DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
@@ -720,7 +720,6 @@
 	.probe		= drv260x_probe,
 	.driver		= {
 		.name	= "drv260x-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv260x_of_match),
 		.pm	= &drv260x_pm_ops,
 	},
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index 0afaa33..ef9bc12 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -74,7 +74,7 @@
 	0x9b, 0x9f, 0xa5, 0xad, 0xb8, 0xc4, 0xd2, 0xe0, 0xf0, 0x00,
 };
 
-static struct reg_default drv2665_reg_defs[] = {
+static const struct reg_default drv2665_reg_defs[] = {
 	{ DRV2665_STATUS, 0x02 },
 	{ DRV2665_CTRL_1, 0x28 },
 	{ DRV2665_CTRL_2, 0x40 },
@@ -132,7 +132,7 @@
 			"Failed to enter standby mode: %d\n", error);
 }
 
-static const struct reg_default drv2665_init_regs[] = {
+static const struct reg_sequence drv2665_init_regs[] = {
 	{ DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT },
 	{ DRV2665_CTRL_1, DRV2665_25_VPP_GAIN },
 };
@@ -309,7 +309,6 @@
 	.probe		= drv2665_probe,
 	.driver		= {
 		.name	= "drv2665-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv2665_of_match),
 		.pm	= &drv2665_pm_ops,
 	},
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index fc0fddf..d5ba748 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -116,7 +116,7 @@
 	u32 frequency;
 };
 
-static struct reg_default drv2667_reg_defs[] = {
+static const struct reg_default drv2667_reg_defs[] = {
 	{ DRV2667_STATUS, 0x02 },
 	{ DRV2667_CTRL_1, 0x28 },
 	{ DRV2667_CTRL_2, 0x40 },
@@ -262,14 +262,14 @@
 			"Failed to enter standby mode: %d\n", error);
 }
 
-static const struct reg_default drv2667_init_regs[] = {
+static const struct reg_sequence drv2667_init_regs[] = {
 	{ DRV2667_CTRL_2, 0 },
 	{ DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
 	{ DRV2667_WV_SEQ_0, 1 },
 	{ DRV2667_WV_SEQ_1, 0 }
 };
 
-static const struct reg_default drv2667_page1_init[] = {
+static const struct reg_sequence drv2667_page1_init[] = {
 	{ DRV2667_RAM_HDR_SZ, 0x05 },
 	{ DRV2667_RAM_START_HI, 0x80 },
 	{ DRV2667_RAM_START_LO, 0x06 },
@@ -484,7 +484,6 @@
 	.probe		= drv2667_probe,
 	.driver		= {
 		.name	= "drv2667-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv2667_of_match),
 		.pm	= &drv2667_pm_ops,
 	},
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index 0ac176d..3bfdfcc 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -267,11 +267,11 @@
 	{ GP2A_I2C_NAME, 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
 
 static struct i2c_driver gp2a_i2c_driver = {
 	.driver = {
 		.name	= GP2A_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &gp2a_pm,
 	},
 	.probe		= gp2a_probe,
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index 6e29349..e058d71 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -658,7 +658,6 @@
 static struct i2c_driver kxtj9_driver = {
 	.driver = {
 		.name	= NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &kxtj9_pm_ops,
 	},
 	.probe		= kxtj9_probe,
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index d0f6872..a806ba3 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -394,7 +394,7 @@
 	{ "max8997-haptic", 0 },
 	{ },
 };
-MODULE_DEVICE_TABLE(i2c, max8997_haptic_id);
+MODULE_DEVICE_TABLE(platform, max8997_haptic_id);
 
 static struct platform_driver max8997_haptic_driver = {
 	.driver	= {
@@ -407,7 +407,6 @@
 };
 module_platform_driver(max8997_haptic_driver);
 
-MODULE_ALIAS("platform:max8997-haptic");
 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
 MODULE_DESCRIPTION("max8997_haptic driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 5e50513..f088db3 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -466,7 +466,6 @@
 static struct i2c_driver mpu3050_i2c_driver = {
 	.driver	= {
 		.name	= "mpu3050",
-		.owner	= THIS_MODULE,
 		.pm	= &mpu3050_pm,
 		.of_match_table = mpu3050_of_match,
 	},
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 97f711a..4abdf1e 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -208,7 +208,6 @@
 static struct i2c_driver pcf8574_kp_driver = {
 	.driver = {
 		.name  = DRV_NAME,
-		.owner = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm = &pcf8574_kp_pm_ops,
 #endif
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index c4ca20e..3f02e0e 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -20,17 +20,72 @@
 #include <linux/regmap.h>
 #include <linux/log2.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
 #define PON_CNTL_1 0x1C
 #define PON_CNTL_PULL_UP BIT(7)
 #define PON_CNTL_TRIG_DELAY_MASK (0x7)
+#define PON_CNTL_1_PULL_UP_EN			0xe0
+#define PON_CNTL_1_USB_PWR_EN			0x10
+#define PON_CNTL_1_WD_EN_RESET			0x08
+
+#define PM8058_SLEEP_CTRL			0x02b
+#define PM8921_SLEEP_CTRL			0x10a
+
+#define SLEEP_CTRL_SMPL_EN_RESET		0x04
+
+/* Regulator master enable addresses */
+#define REG_PM8058_VREG_EN_MSM			0x018
+#define REG_PM8058_VREG_EN_GRP_5_4		0x1c8
+
+/* Regulator control registers for shutdown/reset */
+#define PM8058_S0_CTRL				0x004
+#define PM8058_S1_CTRL				0x005
+#define PM8058_S3_CTRL				0x111
+#define PM8058_L21_CTRL				0x120
+#define PM8058_L22_CTRL				0x121
+
+#define PM8058_REGULATOR_ENABLE_MASK		0x80
+#define PM8058_REGULATOR_ENABLE			0x80
+#define PM8058_REGULATOR_DISABLE		0x00
+#define PM8058_REGULATOR_PULL_DOWN_MASK		0x40
+#define PM8058_REGULATOR_PULL_DOWN_EN		0x40
+
+/* Buck CTRL register */
+#define PM8058_SMPS_LEGACY_VREF_SEL		0x20
+#define PM8058_SMPS_LEGACY_VPROG_MASK		0x1f
+#define PM8058_SMPS_ADVANCED_BAND_MASK		0xC0
+#define PM8058_SMPS_ADVANCED_BAND_SHIFT		6
+#define PM8058_SMPS_ADVANCED_VPROG_MASK		0x3f
+
+/* Buck TEST2 registers for shutdown/reset */
+#define PM8058_S0_TEST2				0x084
+#define PM8058_S1_TEST2				0x085
+#define PM8058_S3_TEST2				0x11a
+
+#define PM8058_REGULATOR_BANK_WRITE		0x80
+#define PM8058_REGULATOR_BANK_MASK		0x70
+#define PM8058_REGULATOR_BANK_SHIFT		4
+#define PM8058_REGULATOR_BANK_SEL(n)	((n) << PM8058_REGULATOR_BANK_SHIFT)
+
+/* Buck TEST2 register bank 1 */
+#define PM8058_SMPS_LEGACY_VLOW_SEL		0x01
+
+/* Buck TEST2 register bank 7 */
+#define PM8058_SMPS_ADVANCED_MODE_MASK		0x02
+#define PM8058_SMPS_ADVANCED_MODE		0x02
+#define PM8058_SMPS_LEGACY_MODE			0x00
 
 /**
  * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
  * @key_press_irq: key press irq number
+ * @regmap: device regmap
+ * @shutdown_fn: shutdown configuration function
  */
 struct pmic8xxx_pwrkey {
 	int key_press_irq;
+	struct regmap *regmap;
+	int (*shutdown_fn)(struct pmic8xxx_pwrkey *, bool);
 };
 
 static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
@@ -76,6 +131,212 @@
 static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
 		pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
 
+static void pmic8xxx_pwrkey_shutdown(struct platform_device *pdev)
+{
+	struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
+	int error;
+	u8 mask, val;
+	bool reset = system_state == SYSTEM_RESTART;
+
+	if (pwrkey->shutdown_fn) {
+		error = pwrkey->shutdown_fn(pwrkey, reset);
+		if (error)
+			return;
+	}
+
+	/*
+	 * Select action to perform (reset or shutdown) when PS_HOLD goes low.
+	 * Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
+	 * USB charging is enabled.
+	 */
+	mask = PON_CNTL_1_PULL_UP_EN | PON_CNTL_1_USB_PWR_EN;
+	mask |= PON_CNTL_1_WD_EN_RESET;
+	val = mask;
+	if (!reset)
+		val &= ~PON_CNTL_1_WD_EN_RESET;
+
+	regmap_update_bits(pwrkey->regmap, PON_CNTL_1, mask, val);
+}
+
+/*
+ * Set an SMPS regulator to be disabled in its CTRL register, but enabled
+ * in the master enable register.  Also set it's pull down enable bit.
+ * Take care to make sure that the output voltage doesn't change if switching
+ * from advanced mode to legacy mode.
+ */
+static int pm8058_disable_smps_locally_set_pull_down(struct regmap *regmap,
+	u16 ctrl_addr, u16 test2_addr, u16 master_enable_addr,
+	u8 master_enable_bit)
+{
+	int error;
+	u8 vref_sel, vlow_sel, band, vprog, bank;
+	unsigned int reg;
+
+	bank = PM8058_REGULATOR_BANK_SEL(7);
+	error = regmap_write(regmap, test2_addr, bank);
+	if (error)
+		return error;
+
+	error = regmap_read(regmap, test2_addr, &reg);
+	if (error)
+		return error;
+
+	reg &= PM8058_SMPS_ADVANCED_MODE_MASK;
+	/* Check if in advanced mode. */
+	if (reg == PM8058_SMPS_ADVANCED_MODE) {
+		/* Determine current output voltage. */
+		error = regmap_read(regmap, ctrl_addr, &reg);
+		if (error)
+			return error;
+
+		band = reg & PM8058_SMPS_ADVANCED_BAND_MASK;
+		band >>= PM8058_SMPS_ADVANCED_BAND_SHIFT;
+		switch (band) {
+		case 3:
+			vref_sel = 0;
+			vlow_sel = 0;
+			break;
+		case 2:
+			vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+			vlow_sel = 0;
+			break;
+		case 1:
+			vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+			vlow_sel = PM8058_SMPS_LEGACY_VLOW_SEL;
+			break;
+		default:
+			pr_err("%s: regulator already disabled\n", __func__);
+			return -EPERM;
+		}
+		vprog = reg & PM8058_SMPS_ADVANCED_VPROG_MASK;
+		/* Round up if fine step is in use. */
+		vprog = (vprog + 1) >> 1;
+		if (vprog > PM8058_SMPS_LEGACY_VPROG_MASK)
+			vprog = PM8058_SMPS_LEGACY_VPROG_MASK;
+
+		/* Set VLOW_SEL bit. */
+		bank = PM8058_REGULATOR_BANK_SEL(1);
+		error = regmap_write(regmap, test2_addr, bank);
+		if (error)
+			return error;
+
+		error = regmap_update_bits(regmap, test2_addr,
+			PM8058_REGULATOR_BANK_WRITE | PM8058_REGULATOR_BANK_MASK
+				| PM8058_SMPS_LEGACY_VLOW_SEL,
+			PM8058_REGULATOR_BANK_WRITE |
+			PM8058_REGULATOR_BANK_SEL(1) | vlow_sel);
+		if (error)
+			return error;
+
+		/* Switch to legacy mode */
+		bank = PM8058_REGULATOR_BANK_SEL(7);
+		error = regmap_write(regmap, test2_addr, bank);
+		if (error)
+			return error;
+
+		error = regmap_update_bits(regmap, test2_addr,
+				PM8058_REGULATOR_BANK_WRITE |
+				PM8058_REGULATOR_BANK_MASK |
+				PM8058_SMPS_ADVANCED_MODE_MASK,
+				PM8058_REGULATOR_BANK_WRITE |
+				PM8058_REGULATOR_BANK_SEL(7) |
+				PM8058_SMPS_LEGACY_MODE);
+		if (error)
+			return error;
+
+		/* Enable locally, enable pull down, keep voltage the same. */
+		error = regmap_update_bits(regmap, ctrl_addr,
+			PM8058_REGULATOR_ENABLE_MASK |
+			PM8058_REGULATOR_PULL_DOWN_MASK |
+			PM8058_SMPS_LEGACY_VREF_SEL |
+			PM8058_SMPS_LEGACY_VPROG_MASK,
+			PM8058_REGULATOR_ENABLE | PM8058_REGULATOR_PULL_DOWN_EN
+				| vref_sel | vprog);
+		if (error)
+			return error;
+	}
+
+	/* Enable in master control register. */
+	error = regmap_update_bits(regmap, master_enable_addr,
+			master_enable_bit, master_enable_bit);
+	if (error)
+		return error;
+
+	/* Disable locally and enable pull down. */
+	return regmap_update_bits(regmap, ctrl_addr,
+		PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+		PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_disable_ldo_locally_set_pull_down(struct regmap *regmap,
+		u16 ctrl_addr, u16 master_enable_addr, u8 master_enable_bit)
+{
+	int error;
+
+	/* Enable LDO in master control register. */
+	error = regmap_update_bits(regmap, master_enable_addr,
+			master_enable_bit, master_enable_bit);
+	if (error)
+		return error;
+
+	/* Disable LDO in CTRL register and set pull down */
+	return regmap_update_bits(regmap, ctrl_addr,
+		PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+		PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+	int error;
+	struct regmap *regmap = pwrkey->regmap;
+	u8 mask, val;
+
+	/* When shutting down, enable active pulldowns on important rails. */
+	if (!reset) {
+		/* Disable SMPS's 0,1,3 locally and set pulldown enable bits. */
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S0_CTRL, PM8058_S0_TEST2,
+			REG_PM8058_VREG_EN_MSM, BIT(7));
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S1_CTRL, PM8058_S1_TEST2,
+			REG_PM8058_VREG_EN_MSM, BIT(6));
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S3_CTRL, PM8058_S3_TEST2,
+			REG_PM8058_VREG_EN_GRP_5_4, BIT(7) | BIT(4));
+		/* Disable LDO 21 locally and set pulldown enable bit. */
+		pm8058_disable_ldo_locally_set_pull_down(regmap,
+			PM8058_L21_CTRL, REG_PM8058_VREG_EN_GRP_5_4,
+			BIT(1));
+	}
+
+	/*
+	 * Fix-up: Set regulator LDO22 to 1.225 V in high power mode. Leave its
+	 * pull-down state intact. This ensures a safe shutdown.
+	 */
+	error = regmap_update_bits(regmap, PM8058_L22_CTRL, 0xbf, 0x93);
+	if (error)
+		return error;
+
+	/* Enable SMPL if resetting is desired */
+	mask = SLEEP_CTRL_SMPL_EN_RESET;
+	val = 0;
+	if (reset)
+		val = mask;
+	return regmap_update_bits(regmap, PM8058_SLEEP_CTRL, mask, val);
+}
+
+static int pm8921_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+	struct regmap *regmap = pwrkey->regmap;
+	u8 mask = SLEEP_CTRL_SMPL_EN_RESET;
+	u8 val = 0;
+
+	/* Enable SMPL if resetting is desired */
+	if (reset)
+		val = mask;
+	return regmap_update_bits(regmap, PM8921_SLEEP_CTRL, mask, val);
+}
+
 static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
 {
 	struct input_dev *pwr;
@@ -109,6 +370,8 @@
 	if (!pwrkey)
 		return -ENOMEM;
 
+	pwrkey->shutdown_fn = of_device_get_match_data(&pdev->dev);
+	pwrkey->regmap = regmap;
 	pwrkey->key_press_irq = key_press_irq;
 
 	pwr = devm_input_allocate_device(&pdev->dev);
@@ -182,8 +445,8 @@
 }
 
 static const struct of_device_id pm8xxx_pwr_key_id_table[] = {
-	{ .compatible = "qcom,pm8058-pwrkey" },
-	{ .compatible = "qcom,pm8921-pwrkey" },
+	{ .compatible = "qcom,pm8058-pwrkey", .data = &pm8058_pwrkey_shutdown },
+	{ .compatible = "qcom,pm8921-pwrkey", .data = &pm8921_pwrkey_shutdown },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
@@ -191,6 +454,7 @@
 static struct platform_driver pmic8xxx_pwrkey_driver = {
 	.probe		= pmic8xxx_pwrkey_probe,
 	.remove		= pmic8xxx_pwrkey_remove,
+	.shutdown	= pmic8xxx_pwrkey_shutdown,
 	.driver		= {
 		.name	= "pm8xxx-pwrkey",
 		.pm	= &pm8xxx_pwr_key_pm_ops,
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e956e81..62c5814 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -7,6 +7,7 @@
 #include <linux/input-polldev.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/gpio.h>
 
 #include <asm/mach-rc32434/gpio.h>
 #include <asm/mach-rc32434/rb.h>
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 421e29e..345df9b 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -320,10 +320,8 @@
 	 * Check if absmin/absmax/absfuzz/absflat are sane.
 	 */
 
-	for (cnt = 0; cnt < ABS_CNT; cnt++) {
+	for_each_set_bit(cnt, dev->absbit, ABS_CNT) {
 		int min, max;
-		if (!test_bit(cnt, dev->absbit))
-			continue;
 
 		min = input_abs_get_min(dev, cnt);
 		max = input_abs_get_max(dev, cnt);
@@ -416,7 +414,7 @@
 	dev->id.product	= user_dev->id.product;
 	dev->id.version	= user_dev->id.version;
 
-	for (i = 0; i < ABS_CNT; i++) {
+	for_each_set_bit(i, dev->absbit, ABS_CNT) {
 		input_abs_set_max(dev, i, user_dev->absmax[i]);
 		input_abs_set_min(dev, i, user_dev->absmin[i]);
 		input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index d7820d1..17f97e5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -341,7 +341,7 @@
 
 config MOUSE_GPIO
 	tristate "GPIO mouse"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_POLLDEV
 	help
 	  This driver simulates a mouse on GPIO lines of various CPUs (and some
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 793300b..ee6a6e9 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -24,7 +24,7 @@
 obj-$(CONFIG_MOUSE_SYNAPTICS_USB)	+= synaptics_usb.o
 obj-$(CONFIG_MOUSE_VSXXXAA)		+= vsxxxaa.o
 
-cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o
+cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
 psmouse-objs := psmouse-base.o synaptics.o focaltech.o
 
 psmouse-$(CONFIG_MOUSE_PS2_ALPS)	+= alps.o
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index efe1484..eb76b61 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
  *   Daniel Kurtz <djkurtz@chromium.org>
  *   Benson Leung <bleung@chromium.org>
  *
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
  * Copyright (C) 2011-2012 Google, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -21,10 +21,12 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
 #include <linux/acpi.h>
+#include <linux/of.h>
 #include "cyapa.h"
 
 
@@ -39,11 +41,33 @@
 
 static int cyapa_reinitialize(struct cyapa *cyapa);
 
-static inline bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa)
 {
+	if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_BL)
+		return true;
+
 	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_BL)
 		return true;
 
+	return false;
+}
+
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa)
+{
+	if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_APP)
+		return true;
+
+	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+		return true;
+
+	return false;
+}
+
+static bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+{
+	if (cyapa_is_pip_bl_mode(cyapa))
+		return true;
+
 	if (cyapa->gen == CYAPA_GEN3 &&
 		cyapa->state >= CYAPA_STATE_BL_BUSY &&
 		cyapa->state <= CYAPA_STATE_BL_ACTIVE)
@@ -54,7 +78,7 @@
 
 static inline bool cyapa_is_operational_mode(struct cyapa *cyapa)
 {
-	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+	if (cyapa_is_pip_app_mode(cyapa))
 		return true;
 
 	if (cyapa->gen == CYAPA_GEN3 && cyapa->state == CYAPA_STATE_OP)
@@ -188,6 +212,15 @@
 			if (!error)
 				goto out_detected;
 		}
+		if (cyapa->gen == CYAPA_GEN_UNKNOWN ||
+				cyapa->gen == CYAPA_GEN6 ||
+				cyapa->gen == CYAPA_GEN5) {
+			error = cyapa_pip_state_parse(cyapa,
+					status, BL_STATUS_SIZE);
+			if (!error)
+				goto out_detected;
+		}
+		/* For old Gen5 trackpads detecting. */
 		if ((cyapa->gen == CYAPA_GEN_UNKNOWN ||
 				cyapa->gen == CYAPA_GEN5) &&
 			!smbus && even_addr) {
@@ -284,6 +317,9 @@
 		return error;
 
 	switch (cyapa->gen) {
+	case CYAPA_GEN6:
+		cyapa->ops = &cyapa_gen6_ops;
+		break;
 	case CYAPA_GEN5:
 		cyapa->ops = &cyapa_gen5_ops;
 		break;
@@ -306,7 +342,7 @@
 
 /*
  * Returns 0 on device detected, negative errno on no device detected.
- * And when the device is detected and opertaional, it will be reset to
+ * And when the device is detected and operational, it will be reset to
  * full power active mode automatically.
  */
 static int cyapa_detect(struct cyapa *cyapa)
@@ -333,6 +369,7 @@
 {
 	struct cyapa *cyapa = input_get_drvdata(input);
 	struct i2c_client *client = cyapa->client;
+	struct device *dev = &client->dev;
 	int error;
 
 	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
@@ -346,10 +383,9 @@
 		 * when in operational mode.
 		 */
 		error = cyapa->ops->set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error) {
-			dev_warn(&client->dev,
-				"set active power failed: %d\n", error);
+			dev_warn(dev, "set active power failed: %d\n", error);
 			goto out;
 		}
 	} else {
@@ -361,10 +397,14 @@
 	}
 
 	enable_irq(client->irq);
-	if (!pm_runtime_enabled(&client->dev)) {
-		pm_runtime_set_active(&client->dev);
-		pm_runtime_enable(&client->dev);
+	if (!pm_runtime_enabled(dev)) {
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
 	}
+
+	pm_runtime_get_sync(dev);
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_sync_autosuspend(dev);
 out:
 	mutex_unlock(&cyapa->state_sync_lock);
 	return error;
@@ -374,16 +414,17 @@
 {
 	struct cyapa *cyapa = input_get_drvdata(input);
 	struct i2c_client *client = cyapa->client;
+	struct device *dev = &cyapa->client->dev;
 
 	mutex_lock(&cyapa->state_sync_lock);
 
 	disable_irq(client->irq);
-	if (pm_runtime_enabled(&client->dev))
-		pm_runtime_disable(&client->dev);
-	pm_runtime_set_suspended(&client->dev);
+	if (pm_runtime_enabled(dev))
+		pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
 
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
 
 	mutex_unlock(&cyapa->state_sync_lock);
 }
@@ -443,6 +484,7 @@
 	if (cyapa->gen >= CYAPA_GEN5) {
 		input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
 		input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 255, 0, 0);
+		input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
 	}
 
 	input_abs_set_res(input, ABS_MT_POSITION_X,
@@ -492,7 +534,7 @@
 		 */
 		if (!input || cyapa->operational)
 			cyapa->ops->set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		/* Gen3 always using polling mode for command. */
 		if (cyapa->gen >= CYAPA_GEN5)
 			enable_irq(cyapa->client->irq);
@@ -507,7 +549,8 @@
 		if (cyapa->gen >= CYAPA_GEN5)
 			disable_irq(cyapa->client->irq);
 		if (!input || cyapa->operational)
-			cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+			cyapa->ops->set_power_mode(cyapa,
+						   PWR_MODE_OFF, 0, false);
 	}
 }
 
@@ -563,6 +606,8 @@
 	error = cyapa_gen3_ops.initialize(cyapa);
 	if (!error)
 		error = cyapa_gen5_ops.initialize(cyapa);
+	if (!error)
+		error = cyapa_gen6_ops.initialize(cyapa);
 	if (error)
 		return error;
 
@@ -572,7 +617,7 @@
 
 	/* Power down the device until we need it. */
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
 
 	return 0;
 }
@@ -588,7 +633,8 @@
 
 	/* Avoid command failures when TP was in OFF state. */
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+		cyapa->ops->set_power_mode(cyapa,
+					   PWR_MODE_FULL_ACTIVE, 0, false);
 
 	error = cyapa_detect(cyapa);
 	if (error)
@@ -607,7 +653,8 @@
 	if (!input || !input->users) {
 		/* Reset to power OFF state to save power when no user open. */
 		if (cyapa->operational)
-			cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+			cyapa->ops->set_power_mode(cyapa,
+						   PWR_MODE_OFF, 0, false);
 	} else if (!error && cyapa->operational) {
 		/*
 		 * Make sure only enable runtime PM when device is
@@ -615,6 +662,10 @@
 		 */
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
+
+		pm_runtime_get_sync(dev);
+		pm_runtime_mark_last_busy(dev);
+		pm_runtime_put_sync_autosuspend(dev);
 	}
 
 	return error;
@@ -624,27 +675,44 @@
 {
 	struct cyapa *cyapa = dev_id;
 	struct device *dev = &cyapa->client->dev;
+	int error;
 
-	pm_runtime_get_sync(dev);
 	if (device_may_wakeup(dev))
 		pm_wakeup_event(dev, 0);
 
-	/* Interrupt event maybe cuased by host command to trackpad device. */
+	/* Interrupt event can be caused by host command to trackpad device. */
 	if (cyapa->ops->irq_cmd_handler(cyapa)) {
 		/*
 		 * Interrupt event maybe from trackpad device input reporting.
 		 */
 		if (!cyapa->input) {
 			/*
-			 * Still in probling or in firware image
-			 * udpating or reading.
+			 * Still in probing or in firmware image
+			 * updating or reading.
 			 */
 			cyapa->ops->sort_empty_output_data(cyapa,
 					NULL, NULL, NULL);
 			goto out;
 		}
 
-		if (!cyapa->operational || cyapa->ops->irq_handler(cyapa)) {
+		if (cyapa->operational) {
+			error = cyapa->ops->irq_handler(cyapa);
+
+			/*
+			 * Apply runtime power management to touch report event
+			 * except the events caused by the command responses.
+			 * Note:
+			 * It will introduce about 20~40 ms additional delay
+			 * time in receiving for first valid touch report data.
+			 * The time is used to execute device runtime resume
+			 * process.
+			 */
+			pm_runtime_get_sync(dev);
+			pm_runtime_mark_last_busy(dev);
+			pm_runtime_put_sync_autosuspend(dev);
+		}
+
+		if (!cyapa->operational || error) {
 			if (!mutex_trylock(&cyapa->state_sync_lock)) {
 				cyapa->ops->sort_empty_output_data(cyapa,
 					NULL, NULL, NULL);
@@ -656,8 +724,6 @@
 	}
 
 out:
-	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_sync_autosuspend(dev);
 	return IRQ_HANDLED;
 }
 
@@ -1051,12 +1117,12 @@
 		dev_dbg(dev, "firmware update successfully done.\n");
 
 	/*
-	 * Redetect trackpad device states because firmware update process
+	 * Re-detect trackpad device states because firmware update process
 	 * will reset trackpad device into bootloader mode.
 	 */
 	ret = cyapa_reinitialize(cyapa);
 	if (ret) {
-		dev_err(dev, "failed to redetect after updated: %d\n", ret);
+		dev_err(dev, "failed to re-detect after updated: %d\n", ret);
 		error = error ? error : ret;
 	}
 
@@ -1120,9 +1186,11 @@
 	case CYAPA_STATE_BL_ACTIVE:
 		return "bootloader active";
 	case CYAPA_STATE_GEN5_BL:
+	case CYAPA_STATE_GEN6_BL:
 		return "bootloader";
 	case CYAPA_STATE_OP:
 	case CYAPA_STATE_GEN5_APP:
+	case CYAPA_STATE_GEN6_APP:
 		return "operational";  /* Normal valid state. */
 	default:
 		return "invalid mode";
@@ -1175,6 +1243,13 @@
 	sysfs_remove_group(&cyapa->client->dev.kobj, &cyapa_sysfs_group);
 }
 
+static void cyapa_disable_regulator(void *data)
+{
+	struct cyapa *cyapa = data;
+
+	regulator_disable(cyapa->vcc);
+}
+
 static int cyapa_probe(struct i2c_client *client,
 		       const struct i2c_device_id *dev_id)
 {
@@ -1208,6 +1283,27 @@
 	sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
 		client->addr);
 
+	cyapa->vcc = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(cyapa->vcc)) {
+		error = PTR_ERR(cyapa->vcc);
+		dev_err(dev, "failed to get vcc regulator: %d\n", error);
+		return error;
+	}
+
+	error = regulator_enable(cyapa->vcc);
+	if (error) {
+		dev_err(dev, "failed to enable regulator: %d\n", error);
+		return error;
+	}
+
+	error = devm_add_action(dev, cyapa_disable_regulator, cyapa);
+	if (error) {
+		cyapa_disable_regulator(cyapa);
+		dev_err(dev, "failed to add disable regulator action: %d\n",
+			error);
+		return error;
+	}
+
 	error = cyapa_initialize(cyapa);
 	if (error) {
 		dev_err(dev, "failed to detect and initialize tp device.\n");
@@ -1296,12 +1392,19 @@
 		power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
 						    : PWR_MODE_OFF;
 		error = cyapa->ops->set_power_mode(cyapa, power_mode,
-				cyapa->suspend_sleep_time);
+				cyapa->suspend_sleep_time, true);
 		if (error)
 			dev_err(dev, "suspend set power mode failed: %d\n",
 					error);
 	}
 
+	/*
+	 * Disable proximity interrupt when system idle, want true touch to
+	 * wake the system.
+	 */
+	if (cyapa->dev_pwr_mode != PWR_MODE_OFF)
+		cyapa->ops->set_proximity(cyapa, false);
+
 	if (device_may_wakeup(dev))
 		cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
 
@@ -1322,7 +1425,10 @@
 		cyapa->irq_wake = false;
 	}
 
-	/* Update device states and runtime PM states. */
+	/*
+	 * Update device states and runtime PM states.
+	 * Re-Enable proximity interrupt after enter operational mode.
+	 */
 	error = cyapa_reinitialize(cyapa);
 	if (error)
 		dev_warn(dev, "failed to reinitialize TP device: %d\n", error);
@@ -1340,7 +1446,8 @@
 
 	error = cyapa->ops->set_power_mode(cyapa,
 			cyapa->runtime_suspend_power_mode,
-			cyapa->runtime_suspend_sleep_time);
+			cyapa->runtime_suspend_sleep_time,
+			false);
 	if (error)
 		dev_warn(dev, "runtime suspend failed: %d\n", error);
 
@@ -1352,7 +1459,8 @@
 	struct cyapa *cyapa = dev_get_drvdata(dev);
 	int error;
 
-	error = cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+	error = cyapa->ops->set_power_mode(cyapa,
+					   PWR_MODE_FULL_ACTIVE, 0, false);
 	if (error)
 		dev_warn(dev, "runtime resume failed: %d\n", error);
 
@@ -1374,17 +1482,26 @@
 static const struct acpi_device_id cyapa_acpi_id[] = {
 	{ "CYAP0000", 0 },  /* Gen3 trackpad with 0x67 I2C address. */
 	{ "CYAP0001", 0 },  /* Gen5 trackpad with 0x24 I2C address. */
+	{ "CYAP0002", 0 },  /* Gen6 trackpad with 0x24 I2C address. */
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, cyapa_acpi_id);
 #endif
 
+#ifdef CONFIG_OF
+static const struct of_device_id cyapa_of_match[] = {
+	{ .compatible = "cypress,cyapa" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cyapa_of_match);
+#endif
+
 static struct i2c_driver cyapa_driver = {
 	.driver = {
 		.name = "cyapa",
-		.owner = THIS_MODULE,
 		.pm = &cyapa_pm_ops,
 		.acpi_match_table = ACPI_PTR(cyapa_acpi_id),
+		.of_match_table = of_match_ptr(cyapa_of_match),
 	},
 
 	.probe = cyapa_probe,
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
index adc9ed5..b812bba 100644
--- a/drivers/input/mouse/cyapa.h
+++ b/drivers/input/mouse/cyapa.h
@@ -3,7 +3,7 @@
  *
  * Author: Dudley Du <dudl@cypress.com>
  *
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive for
@@ -19,13 +19,14 @@
 #define CYAPA_GEN_UNKNOWN   0x00   /* unknown protocol. */
 #define CYAPA_GEN3   0x03   /* support MT-protocol B with tracking ID. */
 #define CYAPA_GEN5   0x05   /* support TrueTouch GEN5 trackpad device. */
+#define CYAPA_GEN6   0x06   /* support TrueTouch GEN6 trackpad device. */
 
 #define CYAPA_NAME   "Cypress APA Trackpad (cyapa)"
 
 /*
  * Macros for SMBus communication
  */
-#define SMBUS_READ   0x01
+#define SMBUS_READ  0x01
 #define SMBUS_WRITE 0x00
 #define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
 #define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
@@ -159,12 +160,89 @@
 
 #define AUTOSUSPEND_DELAY   2000 /* unit : ms */
 
-#define UNINIT_SLEEP_TIME 0xFFFF
-#define UNINIT_PWR_MODE   0xFF
-
 #define BTN_ONLY_MODE_NAME   "buttononly"
 #define OFF_MODE_NAME        "off"
 
+/* Common macros for PIP interface. */
+#define PIP_HID_DESCRIPTOR_ADDR		0x0001
+#define PIP_REPORT_DESCRIPTOR_ADDR	0x0002
+#define PIP_INPUT_REPORT_ADDR		0x0003
+#define PIP_OUTPUT_REPORT_ADDR		0x0004
+#define PIP_CMD_DATA_ADDR		0x0006
+
+#define PIP_RETRIEVE_DATA_STRUCTURE	0x24
+#define PIP_CMD_CALIBRATE		0x28
+#define PIP_BL_CMD_VERIFY_APP_INTEGRITY	0x31
+#define PIP_BL_CMD_GET_BL_INFO		0x38
+#define PIP_BL_CMD_PROGRAM_VERIFY_ROW	0x39
+#define PIP_BL_CMD_LAUNCH_APP		0x3b
+#define PIP_BL_CMD_INITIATE_BL		0x48
+#define PIP_INVALID_CMD			0xff
+
+#define PIP_HID_DESCRIPTOR_SIZE		32
+#define PIP_HID_APP_REPORT_ID		0xf7
+#define PIP_HID_BL_REPORT_ID		0xff
+
+#define PIP_BL_CMD_REPORT_ID		0x40
+#define PIP_BL_RESP_REPORT_ID		0x30
+#define PIP_APP_CMD_REPORT_ID		0x2f
+#define PIP_APP_RESP_REPORT_ID		0x1f
+
+#define PIP_READ_SYS_INFO_CMD_LENGTH	7
+#define PIP_BL_READ_APP_INFO_CMD_LENGTH	13
+#define PIP_MIN_BL_CMD_LENGTH		13
+#define PIP_MIN_BL_RESP_LENGTH		11
+#define PIP_MIN_APP_CMD_LENGTH		7
+#define PIP_MIN_APP_RESP_LENGTH		5
+#define PIP_UNSUPPORTED_CMD_RESP_LENGTH	6
+#define PIP_READ_SYS_INFO_RESP_LENGTH	71
+#define PIP_BL_APP_INFO_RESP_LENGTH	30
+#define PIP_BL_GET_INFO_RESP_LENGTH	19
+
+#define PIP_BL_PLATFORM_VER_SHIFT	4
+#define PIP_BL_PLATFORM_VER_MASK	0x0f
+
+#define PIP_PRODUCT_FAMILY_MASK		0xf000
+#define PIP_PRODUCT_FAMILY_TRACKPAD	0x1000
+
+#define PIP_DEEP_SLEEP_STATE_ON		0x00
+#define PIP_DEEP_SLEEP_STATE_OFF	0x01
+#define PIP_DEEP_SLEEP_STATE_MASK	0x03
+#define PIP_APP_DEEP_SLEEP_REPORT_ID	0xf0
+#define PIP_DEEP_SLEEP_RESP_LENGTH	5
+#define PIP_DEEP_SLEEP_OPCODE		0x08
+#define PIP_DEEP_SLEEP_OPCODE_MASK	0x0f
+
+#define PIP_RESP_LENGTH_OFFSET		0
+#define	    PIP_RESP_LENGTH_SIZE	2
+#define PIP_RESP_REPORT_ID_OFFSET	2
+#define PIP_RESP_RSVD_OFFSET		3
+#define     PIP_RESP_RSVD_KEY		0x00
+#define PIP_RESP_BL_SOP_OFFSET		4
+#define     PIP_SOP_KEY			0x01  /* Start of Packet */
+#define     PIP_EOP_KEY			0x17  /* End of Packet */
+#define PIP_RESP_APP_CMD_OFFSET		4
+#define     GET_PIP_CMD_CODE(reg)	((reg) & 0x7f)
+#define PIP_RESP_STATUS_OFFSET		5
+
+#define VALID_CMD_RESP_HEADER(resp, cmd)				  \
+	(((resp)[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID) && \
+	((resp)[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) &&		  \
+	(GET_PIP_CMD_CODE((resp)[PIP_RESP_APP_CMD_OFFSET]) == (cmd)))
+
+#define PIP_CMD_COMPLETE_SUCCESS(resp_data) \
+	((resp_data)[PIP_RESP_STATUS_OFFSET] == 0x00)
+
+/* Variables to record latest gen5 trackpad power states. */
+#define UNINIT_SLEEP_TIME	0xffff
+#define UNINIT_PWR_MODE		0xff
+#define PIP_DEV_SET_PWR_STATE(cyapa, s)		((cyapa)->dev_pwr_mode = (s))
+#define PIP_DEV_GET_PWR_STATE(cyapa)		((cyapa)->dev_pwr_mode)
+#define PIP_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
+#define PIP_DEV_GET_SLEEP_TIME(cyapa)		((cyapa)->dev_sleep_time)
+#define PIP_DEV_UNINIT_SLEEP_TIME(cyapa)	\
+		(((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+
 /* The touch.id is used as the MT slot id, thus max MT slot is 15 */
 #define CYAPA_MAX_MT_SLOTS  15
 
@@ -195,10 +273,12 @@
 	int (*sort_empty_output_data)(struct cyapa *,
 			u8 *, int *, cb_sort);
 
-	int (*set_power_mode)(struct cyapa *, u8, u16);
+	int (*set_power_mode)(struct cyapa *, u8, u16, bool);
+
+	int (*set_proximity)(struct cyapa *, bool);
 };
 
-struct cyapa_gen5_cmd_states {
+struct cyapa_pip_cmd_states {
 	struct mutex cmd_lock;
 	struct completion cmd_ready;
 	atomic_t cmd_issued;
@@ -214,7 +294,7 @@
 };
 
 union cyapa_cmd_states {
-	struct cyapa_gen5_cmd_states gen5;
+	struct cyapa_pip_cmd_states pip;
 };
 
 enum cyapa_state {
@@ -225,6 +305,14 @@
 	CYAPA_STATE_OP,
 	CYAPA_STATE_GEN5_BL,
 	CYAPA_STATE_GEN5_APP,
+	CYAPA_STATE_GEN6_BL,
+	CYAPA_STATE_GEN6_APP,
+};
+
+struct gen6_interval_setting {
+	u16 active_interval;
+	u16 lp1_interval;
+	u16 lp2_interval;
 };
 
 /* The main device structure */
@@ -233,6 +321,7 @@
 	u8 status[BL_STATUS_SIZE];
 	bool operational; /* true: ready for data reporting; false: not. */
 
+	struct regulator *vcc;
 	struct i2c_client *client;
 	struct input_dev *input;
 	char phys[32];	/* Device physical location */
@@ -246,9 +335,11 @@
 	u16 runtime_suspend_sleep_time;
 	u8 dev_pwr_mode;
 	u16 dev_sleep_time;
+	struct gen6_interval_setting gen6_interval_setting;
 
 	/* Read from query data region. */
 	char product_id[16];
+	u8 platform_ver;  /* Platform version. */
 	u8 fw_maj_ver;  /* Firmware major version. */
 	u8 fw_min_ver;  /* Firmware minor version. */
 	u8 btn_capability;
@@ -259,7 +350,7 @@
 	int physical_size_y;
 
 	/* Used in ttsp and truetouch based trackpad devices. */
-	u8 x_origin;  /* X Axis Origin: 0 = left side; 1 = rigth side. */
+	u8 x_origin;  /* X Axis Origin: 0 = left side; 1 = right side. */
 	u8 y_origin;  /* Y Axis Origin: 0 = top; 1 = bottom. */
 	int electrodes_x;  /* Number of electrodes on the X Axis*/
 	int electrodes_y;  /* Number of electrodes on the Y Axis*/
@@ -282,9 +373,9 @@
 
 
 ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
-				u8 *values);
+				 u8 *values);
 ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
-				u8 *values);
+			       u8 *values);
 
 ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values);
 
@@ -293,9 +384,51 @@
 u8 cyapa_sleep_time_to_pwr_cmd(u16 sleep_time);
 u16 cyapa_pwr_cmd_to_sleep_time(u8 pwr_mode);
 
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size);
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size);
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+				u8 *buf, int *len, cb_sort func);
+int cyapa_i2c_pip_cmd_irq_sync(struct cyapa *cyapa,
+			       u8 *cmd, int cmd_len,
+			       u8 *resp_data, int *resp_len,
+			       unsigned long timeout,
+			       cb_sort func,
+			       bool irq_mode);
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len);
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa, u8 *buf, int len);
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state);
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_bl_exit(struct cyapa *cyapa);
+int cyapa_pip_bl_enter(struct cyapa *cyapa);
 
+
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa);
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa);
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa);
+
+int cyapa_pip_resume_scanning(struct cyapa *cyapa);
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa);
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_do_fw_update(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_activate(struct cyapa *cyapa);
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa);
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count);
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable);
+
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa);
+int cyapa_pip_irq_handler(struct cyapa *cyapa);
+
+
+extern u8 pip_read_sys_info[];
+extern u8 pip_bl_read_app_info[];
 extern const char product_id[];
 extern const struct cyapa_dev_ops cyapa_gen3_ops;
 extern const struct cyapa_dev_ops cyapa_gen5_ops;
+extern const struct cyapa_dev_ops cyapa_gen6_ops;
 
 #endif
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 3faf01c..1a9d12a 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -6,7 +6,7 @@
  *   Daniel Kurtz <djkurtz@chromium.org>
  *   Benson Leung <bleung@chromium.org>
  *
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
  * Copyright (C) 2011-2012 Google, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -950,7 +950,7 @@
  * Device power mode can only be set when device is in operational mode.
  */
 static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
-				     u16 always_unused)
+		u16 always_unused, bool is_suspend_unused)
 {
 	int ret;
 	u8 power;
@@ -999,6 +999,11 @@
 	return ret;
 }
 
+static int cyapa_gen3_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	return -EOPNOTSUPP;
+}
+
 static int cyapa_gen3_get_query_data(struct cyapa *cyapa)
 {
 	u8 query_data[QUERY_DATA_SIZE];
@@ -1107,7 +1112,7 @@
 		 * may cause problems, so we set the power mode first here.
 		 */
 		error = cyapa_gen3_set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error)
 			dev_err(dev, "%s: set full power mode failed: %d\n",
 				__func__, error);
@@ -1156,7 +1161,7 @@
 	 * so, stop cyapa_gen3_irq_handler to continue process to
 	 * avoid unwanted to error detecting and processing.
 	 *
-	 * And also, avoid the periodicly accerted interrupts to be processed
+	 * And also, avoid the periodically asserted interrupts to be processed
 	 * as touch inputs when gen3 failed to launch into application mode,
 	 * which will cause gen3 stays in bootloader mode.
 	 */
@@ -1243,4 +1248,6 @@
 	.irq_cmd_handler = cyapa_gen3_irq_cmd_handler,
 	.sort_empty_output_data = cyapa_gen3_empty_output_data,
 	.set_power_mode = cyapa_gen3_set_power_mode,
+
+	.set_proximity = cyapa_gen3_set_proximity,
 };
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index afc39e7..118ba97 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -3,7 +3,7 @@
  *
  * Author: Dudley Du <dudl@cypress.com>
  *
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive for
@@ -19,15 +19,11 @@
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 #include <linux/crc-itu-t.h>
+#include <linux/pm_runtime.h>
 #include "cyapa.h"
 
 
-/* Macro of Gen5 */
-#define RECORD_EVENT_NONE        0
-#define RECORD_EVENT_TOUCHDOWN	 1
-#define RECORD_EVENT_DISPLACE    2
-#define RECORD_EVENT_LIFTOFF     3
-
+/* Macro of TSG firmware image */
 #define CYAPA_TSG_FLASH_MAP_BLOCK_SIZE      0x80
 #define CYAPA_TSG_IMG_FW_HDR_SIZE           13
 #define CYAPA_TSG_FW_ROW_SIZE               (CYAPA_TSG_FLASH_MAP_BLOCK_SIZE)
@@ -44,32 +40,65 @@
 
 #define CYAPA_TSG_MAX_CMD_SIZE              256
 
-#define GEN5_BL_CMD_VERIFY_APP_INTEGRITY    0x31
-#define GEN5_BL_CMD_GET_BL_INFO		    0x38
-#define GEN5_BL_CMD_PROGRAM_VERIFY_ROW      0x39
-#define GEN5_BL_CMD_LAUNCH_APP		    0x3b
-#define GEN5_BL_CMD_INITIATE_BL		    0x48
+/* Macro of PIP interface */
+#define PIP_BL_INITIATE_RESP_LEN            11
+#define PIP_BL_FAIL_EXIT_RESP_LEN           11
+#define PIP_BL_FAIL_EXIT_STATUS_CODE        0x0c
+#define PIP_BL_VERIFY_INTEGRITY_RESP_LEN    12
+#define PIP_BL_INTEGRITY_CHEKC_PASS         0x00
+#define PIP_BL_BLOCK_WRITE_RESP_LEN         11
 
-#define GEN5_HID_DESCRIPTOR_ADDR	0x0001
-#define GEN5_REPORT_DESCRIPTOR_ADDR	0x0002
-#define GEN5_INPUT_REPORT_ADDR		0x0003
-#define GEN5_OUTPUT_REPORT_ADDR		0x0004
-#define GEN5_CMD_DATA_ADDR		0x0006
+#define PIP_TOUCH_REPORT_ID         0x01
+#define PIP_BTN_REPORT_ID           0x03
+#define PIP_WAKEUP_EVENT_REPORT_ID  0x04
+#define PIP_PUSH_BTN_REPORT_ID      0x06
+#define GEN5_OLD_PUSH_BTN_REPORT_ID 0x05  /* Special for old Gen5 TP. */
+#define PIP_PROXIMITY_REPORT_ID     0x07
 
-#define GEN5_TOUCH_REPORT_HEAD_SIZE     7
-#define GEN5_TOUCH_REPORT_MAX_SIZE      127
-#define GEN5_BTN_REPORT_HEAD_SIZE       6
-#define GEN5_BTN_REPORT_MAX_SIZE        14
-#define GEN5_WAKEUP_EVENT_SIZE          4
-#define GEN5_RAW_DATA_HEAD_SIZE         24
+#define PIP_PROXIMITY_REPORT_SIZE	6
+#define PIP_PROXIMITY_DISTANCE_OFFSET	0x05
+#define PIP_PROXIMITY_DISTANCE_MASK	0x01
 
-#define GEN5_BL_CMD_REPORT_ID           0x40
-#define GEN5_BL_RESP_REPORT_ID          0x30
-#define GEN5_APP_CMD_REPORT_ID          0x2f
-#define GEN5_APP_RESP_REPORT_ID         0x1f
+#define PIP_TOUCH_REPORT_HEAD_SIZE     7
+#define PIP_TOUCH_REPORT_MAX_SIZE      127
+#define PIP_BTN_REPORT_HEAD_SIZE       6
+#define PIP_BTN_REPORT_MAX_SIZE        14
+#define PIP_WAKEUP_EVENT_SIZE          4
 
-#define GEN5_APP_DEEP_SLEEP_REPORT_ID   0xf0
-#define GEN5_DEEP_SLEEP_RESP_LENGTH     5
+#define PIP_NUMBER_OF_TOUCH_OFFSET  5
+#define PIP_NUMBER_OF_TOUCH_MASK    0x1f
+#define PIP_BUTTONS_OFFSET          5
+#define PIP_BUTTONS_MASK            0x0f
+#define PIP_GET_EVENT_ID(reg)       (((reg) >> 5) & 0x03)
+#define PIP_GET_TOUCH_ID(reg)       ((reg) & 0x1f)
+#define PIP_TOUCH_TYPE_FINGER	    0x00
+#define PIP_TOUCH_TYPE_PROXIMITY    0x01
+#define PIP_TOUCH_TYPE_HOVER	    0x02
+#define PIP_GET_TOUCH_TYPE(reg)     ((reg) & 0x07)
+
+#define RECORD_EVENT_NONE        0
+#define RECORD_EVENT_TOUCHDOWN	 1
+#define RECORD_EVENT_DISPLACE    2
+#define RECORD_EVENT_LIFTOFF     3
+
+#define PIP_SENSING_MODE_MUTUAL_CAP_FINE   0x00
+#define PIP_SENSING_MODE_SELF_CAP          0x02
+
+#define PIP_SET_PROXIMITY	0x49
+
+/* Macro of Gen5 */
+#define GEN5_BL_MAX_OUTPUT_LENGTH     0x0100
+#define GEN5_APP_MAX_OUTPUT_LENGTH    0x00fe
+
+#define GEN5_POWER_STATE_ACTIVE              0x01
+#define GEN5_POWER_STATE_LOOK_FOR_TOUCH      0x02
+#define GEN5_POWER_STATE_READY               0x03
+#define GEN5_POWER_STATE_IDLE                0x04
+#define GEN5_POWER_STATE_BTN_ONLY            0x05
+#define GEN5_POWER_STATE_OFF                 0x06
+
+#define GEN5_POWER_READY_MAX_INTRVL_TIME  50   /* Unit: ms */
+#define GEN5_POWER_IDLE_MAX_INTRVL_TIME   250  /* Unit: ms */
 
 #define GEN5_CMD_GET_PARAMETER		     0x05
 #define GEN5_CMD_SET_PARAMETER		     0x06
@@ -82,80 +111,12 @@
 
 #define GEN5_PARAMETER_DISABLE_PIP_REPORT    0x08
 
-#define GEN5_POWER_STATE_ACTIVE              0x01
-#define GEN5_POWER_STATE_LOOK_FOR_TOUCH      0x02
-#define GEN5_POWER_STATE_READY               0x03
-#define GEN5_POWER_STATE_IDLE                0x04
-#define GEN5_POWER_STATE_BTN_ONLY            0x05
-#define GEN5_POWER_STATE_OFF                 0x06
-
-#define GEN5_DEEP_SLEEP_STATE_MASK  0x03
-#define GEN5_DEEP_SLEEP_STATE_ON    0x00
-#define GEN5_DEEP_SLEEP_STATE_OFF   0x01
-
-#define GEN5_DEEP_SLEEP_OPCODE      0x08
-#define GEN5_DEEP_SLEEP_OPCODE_MASK 0x0f
-
-#define GEN5_POWER_READY_MAX_INTRVL_TIME  50   /* Unit: ms */
-#define GEN5_POWER_IDLE_MAX_INTRVL_TIME   250  /* Unit: ms */
-
-#define GEN5_CMD_REPORT_ID_OFFSET       4
-
-#define GEN5_RESP_REPORT_ID_OFFSET      2
-#define GEN5_RESP_RSVD_OFFSET           3
-#define     GEN5_RESP_RSVD_KEY          0x00
-#define GEN5_RESP_BL_SOP_OFFSET         4
-#define     GEN5_SOP_KEY                0x01  /* Start of Packet */
-#define     GEN5_EOP_KEY                0x17  /* End of Packet */
-#define GEN5_RESP_APP_CMD_OFFSET        4
-#define     GET_GEN5_CMD_CODE(reg)      ((reg) & 0x7f)
-
-#define VALID_CMD_RESP_HEADER(resp, cmd)				    \
-	(((resp)[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID) && \
-	((resp)[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) &&	    \
-	(GET_GEN5_CMD_CODE((resp)[GEN5_RESP_APP_CMD_OFFSET]) == (cmd)))
-
-#define GEN5_MIN_BL_CMD_LENGTH           13
-#define GEN5_MIN_BL_RESP_LENGTH          11
-#define GEN5_MIN_APP_CMD_LENGTH          7
-#define GEN5_MIN_APP_RESP_LENGTH         5
-#define GEN5_UNSUPPORTED_CMD_RESP_LENGTH 6
-
-#define GEN5_RESP_LENGTH_OFFSET  0x00
-#define GEN5_RESP_LENGTH_SIZE    2
-
-#define GEN5_HID_DESCRIPTOR_SIZE      32
-#define GEN5_BL_HID_REPORT_ID         0xff
-#define GEN5_APP_HID_REPORT_ID        0xf7
-#define GEN5_BL_MAX_OUTPUT_LENGTH     0x0100
-#define GEN5_APP_MAX_OUTPUT_LENGTH    0x00fe
-
 #define GEN5_BL_REPORT_DESCRIPTOR_SIZE            0x1d
 #define GEN5_BL_REPORT_DESCRIPTOR_ID              0xfe
 #define GEN5_APP_REPORT_DESCRIPTOR_SIZE           0xee
 #define GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE  0xfa
 #define GEN5_APP_REPORT_DESCRIPTOR_ID             0xf6
 
-#define GEN5_TOUCH_REPORT_ID         0x01
-#define GEN5_BTN_REPORT_ID           0x03
-#define GEN5_WAKEUP_EVENT_REPORT_ID  0x04
-#define GEN5_OLD_PUSH_BTN_REPORT_ID  0x05
-#define GEN5_PUSH_BTN_REPORT_ID      0x06
-
-#define GEN5_CMD_COMPLETE_SUCCESS(status) ((status) == 0x00)
-
-#define GEN5_BL_INITIATE_RESP_LEN            11
-#define GEN5_BL_FAIL_EXIT_RESP_LEN           11
-#define GEN5_BL_FAIL_EXIT_STATUS_CODE        0x0c
-#define GEN5_BL_VERIFY_INTEGRITY_RESP_LEN    12
-#define GEN5_BL_INTEGRITY_CHEKC_PASS         0x00
-#define GEN5_BL_BLOCK_WRITE_RESP_LEN         11
-#define GEN5_BL_READ_APP_INFO_RESP_LEN       31
-#define GEN5_CMD_CALIBRATE                   0x28
-#define CYAPA_SENSING_MODE_MUTUAL_CAP_FINE   0x00
-#define CYAPA_SENSING_MODE_SELF_CAP          0x02
-
-#define GEN5_CMD_RETRIEVE_DATA_STRUCTURE     0x24
 #define GEN5_RETRIEVE_MUTUAL_PWC_DATA        0x00
 #define GEN5_RETRIEVE_SELF_CAP_PWC_DATA      0x01
 
@@ -170,28 +131,19 @@
 #define GEN5_PANEL_SCAN_SELF_BASELINE        0x04
 #define GEN5_PANEL_SCAN_SELF_DIFFCOUNT       0x05
 
-/* The offset only valid for reterive PWC and panel scan commands */
+/* The offset only valid for retrieve PWC and panel scan commands */
 #define GEN5_RESP_DATA_STRUCTURE_OFFSET      10
 #define GEN5_PWC_DATA_ELEMENT_SIZE_MASK      0x07
 
-#define	GEN5_NUMBER_OF_TOUCH_OFFSET  5
-#define GEN5_NUMBER_OF_TOUCH_MASK    0x1f
-#define GEN5_BUTTONS_OFFSET          5
-#define GEN5_BUTTONS_MASK            0x0f
-#define GEN5_GET_EVENT_ID(reg)       (((reg) >> 5) & 0x03)
-#define GEN5_GET_TOUCH_ID(reg)       ((reg) & 0x1f)
 
-#define GEN5_PRODUCT_FAMILY_MASK        0xf000
-#define GEN5_PRODUCT_FAMILY_TRACKPAD    0x1000
-
-#define TSG_INVALID_CMD   0xff
-
-struct cyapa_gen5_touch_record {
+struct cyapa_pip_touch_record {
 	/*
 	 * Bit 7 - 3: reserved
 	 * Bit 2 - 0: touch type;
 	 *            0 : standard finger;
-	 *            1 - 15 : reserved.
+	 *            1 : proximity (Start supported in Gen5 TP).
+	 *            2 : finger hover (defined, but not used yet.)
+	 *            3 - 15 : reserved.
 	 */
 	u8 touch_type;
 
@@ -221,7 +173,14 @@
 	/* Bit 15 - 8 of Y-axis coordinate of the touch in pixel. */
 	u8 y_hi;
 
-	/* Touch intensity in counts, pressure value. */
+	/*
+	 * The meaning of this value is different when touch_type is different.
+	 * For standard finger type:
+	 *	Touch intensity in counts, pressure value.
+	 * For proximity type (Start supported in Gen5 TP):
+	 *	The distance, in surface units, between the contact and
+	 *	the surface.
+	 **/
 	u8 z;
 
 	/*
@@ -260,9 +219,9 @@
 	u8 orientation;
 } __packed;
 
-struct cyapa_gen5_report_data {
-	u8 report_head[GEN5_TOUCH_REPORT_HEAD_SIZE];
-	struct cyapa_gen5_touch_record touch_records[10];
+struct cyapa_pip_report_data {
+	u8 report_head[PIP_TOUCH_REPORT_HEAD_SIZE];
+	struct cyapa_pip_touch_record touch_records[10];
 } __packed;
 
 struct cyapa_tsg_bin_image_head {
@@ -272,6 +231,12 @@
 	u8 fw_major_version;
 	u8 fw_minor_version;
 	u8 fw_revision_control_number[8];
+	u8 silicon_id_hi;
+	u8 silicon_id_lo;
+	u8 chip_revision;
+	u8 family_id;
+	u8 bl_ver_maj;
+	u8 bl_ver_min;
 } __packed;
 
 struct cyapa_tsg_bin_image_data_record {
@@ -288,36 +253,36 @@
 	struct cyapa_tsg_bin_image_data_record records[0];
 } __packed;
 
-struct gen5_bl_packet_start {
+struct pip_bl_packet_start {
 	u8 sop;  /* Start of packet, must be 01h */
 	u8 cmd_code;
 	__le16 data_length;  /* Size of data parameter start from data[0] */
 } __packed;
 
-struct gen5_bl_packet_end {
+struct pip_bl_packet_end {
 	__le16 crc;
 	u8 eop;  /* End of packet, must be 17h */
 } __packed;
 
-struct gen5_bl_cmd_head {
+struct pip_bl_cmd_head {
 	__le16 addr;   /* Output report register address, must be 0004h */
 	/* Size of packet not including output report register address */
 	__le16 length;
 	u8 report_id;  /* Bootloader output report id, must be 40h */
 	u8 rsvd;  /* Reserved, must be 0 */
-	struct gen5_bl_packet_start packet_start;
+	struct pip_bl_packet_start packet_start;
 	u8 data[0];  /* Command data variable based on commands */
 } __packed;
 
 /* Initiate bootload command data structure. */
-struct gen5_bl_initiate_cmd_data {
+struct pip_bl_initiate_cmd_data {
 	/* Key must be "A5h 01h 02h 03h FFh FEh FDh 5Ah" */
 	u8 key[CYAPA_TSG_BL_KEY_SIZE];
 	u8 metadata_raw_parameter[CYAPA_TSG_FLASH_MAP_METADATA_SIZE];
 	__le16 metadata_crc;
 } __packed;
 
-struct gen5_bl_metadata_row_params {
+struct tsg_bl_metadata_row_params {
 	__le16 size;
 	__le16 maximum_size;
 	__le32 app_start;
@@ -332,13 +297,13 @@
 } __packed;
 
 /* Bootload program and verify row command data structure */
-struct gen5_bl_flash_row_head {
+struct tsg_bl_flash_row_head {
 	u8 flash_array_id;
 	__le16 flash_row_id;
 	u8 flash_data[0];
 } __packed;
 
-struct gen5_app_cmd_head {
+struct pip_app_cmd_head {
 	__le16 addr;   /* Output report register address, must be 0004h */
 	/* Size of packet not including output report register address */
 	__le16 length;
@@ -369,30 +334,26 @@
 	u8 data_id;
 } __packed;
 
-/* Variables to record latest gen5 trackpad power states. */
-#define GEN5_DEV_SET_PWR_STATE(cyapa, s)	((cyapa)->dev_pwr_mode = (s))
-#define GEN5_DEV_GET_PWR_STATE(cyapa)		((cyapa)->dev_pwr_mode)
-#define GEN5_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
-#define GEN5_DEV_GET_SLEEP_TIME(cyapa)		((cyapa)->dev_sleep_time)
-#define GEN5_DEV_UNINIT_SLEEP_TIME(cyapa)	\
-		(((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+u8 pip_read_sys_info[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02 };
+u8 pip_bl_read_app_info[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
+		0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
+	};
 
-
-static u8 cyapa_gen5_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
+static u8 cyapa_pip_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
 	0xff, 0xfe, 0xfd, 0x5a };
 
-static int cyapa_gen5_initialize(struct cyapa *cyapa)
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 
-	init_completion(&gen5_pip->cmd_ready);
-	atomic_set(&gen5_pip->cmd_issued, 0);
-	mutex_init(&gen5_pip->cmd_lock);
+	init_completion(&pip->cmd_ready);
+	atomic_set(&pip->cmd_issued, 0);
+	mutex_init(&pip->cmd_lock);
 
-	gen5_pip->resp_sort_func = NULL;
-	gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
-	gen5_pip->resp_data = NULL;
-	gen5_pip->resp_len = NULL;
+	pip->resp_sort_func = NULL;
+	pip->in_progress_cmd = PIP_INVALID_CMD;
+	pip->resp_data = NULL;
+	pip->resp_len = NULL;
 
 	cyapa->dev_pwr_mode = UNINIT_PWR_MODE;
 	cyapa->dev_sleep_time = UNINIT_SLEEP_TIME;
@@ -401,7 +362,7 @@
 }
 
 /* Return negative errno, or else the number of bytes read. */
-static ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
 {
 	int ret;
 
@@ -415,14 +376,13 @@
 
 	if (ret != size)
 		return (ret < 0) ? ret : -EIO;
-
 	return size;
 }
 
 /**
  * Return a negative errno code else zero on success.
  */
-static ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
 {
 	int ret;
 
@@ -441,10 +401,10 @@
  * This function is aimed to dump all not read data in Gen5 trackpad
  * before send any command, otherwise, the interrupt line will be blocked.
  */
-static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
 		u8 *buf, int *len, cb_sort func)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 	int report_count;
 	int empty_count;
@@ -476,13 +436,13 @@
 		if (empty_count > 5)
 			return 0;
 
-		error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf,
-				GEN5_RESP_LENGTH_SIZE);
+		error = cyapa_i2c_pip_read(cyapa, pip->empty_buf,
+				PIP_RESP_LENGTH_SIZE);
 		if (error < 0)
 			return error;
 
-		length = get_unaligned_le16(gen5_pip->empty_buf);
-		if (length == GEN5_RESP_LENGTH_SIZE) {
+		length = get_unaligned_le16(pip->empty_buf);
+		if (length == PIP_RESP_LENGTH_SIZE) {
 			empty_count++;
 			continue;
 		} else if (length > CYAPA_REG_MAP_SIZE) {
@@ -490,11 +450,11 @@
 			return -EINVAL;
 		} else if (length == 0) {
 			/* Application or bootloader launch data polled out. */
-			length = GEN5_RESP_LENGTH_SIZE;
+			length = PIP_RESP_LENGTH_SIZE;
 			if (buf && buf_len && func &&
-				func(cyapa, gen5_pip->empty_buf, length)) {
+				func(cyapa, pip->empty_buf, length)) {
 				length = min(buf_len, length);
-				memcpy(buf, gen5_pip->empty_buf, length);
+				memcpy(buf, pip->empty_buf, length);
 				*len = length;
 				/* Response found, success. */
 				return 0;
@@ -502,19 +462,19 @@
 			continue;
 		}
 
-		error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+		error = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
 		if (error < 0)
 			return error;
 
 		report_count--;
 		empty_count = 0;
-		length = get_unaligned_le16(gen5_pip->empty_buf);
-		if (length <= GEN5_RESP_LENGTH_SIZE) {
+		length = get_unaligned_le16(pip->empty_buf);
+		if (length <= PIP_RESP_LENGTH_SIZE) {
 			empty_count++;
 		} else if (buf && buf_len && func &&
-			func(cyapa, gen5_pip->empty_buf, length)) {
+			func(cyapa, pip->empty_buf, length)) {
 			length = min(buf_len, length);
-			memcpy(buf, gen5_pip->empty_buf, length);
+			memcpy(buf, pip->empty_buf, length);
 			*len = length;
 			/* Response found, success. */
 			return 0;
@@ -531,24 +491,24 @@
 		u8 *cmd, size_t cmd_len,
 		unsigned long timeout)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int error;
 
 	/* Wait for interrupt to set ready completion */
-	init_completion(&gen5_pip->cmd_ready);
+	init_completion(&pip->cmd_ready);
 
-	atomic_inc(&gen5_pip->cmd_issued);
+	atomic_inc(&pip->cmd_issued);
 	error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
 	if (error) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return (error < 0) ? error : -EIO;
 	}
 
 	/* Wait for interrupt to indicate command is completed. */
-	timeout = wait_for_completion_timeout(&gen5_pip->cmd_ready,
+	timeout = wait_for_completion_timeout(&pip->cmd_ready,
 				msecs_to_jiffies(timeout));
 	if (timeout == 0) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return -ETIMEDOUT;
 	}
 
@@ -562,15 +522,15 @@
 		unsigned long timeout,
 		cb_sort func)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int tries;
 	int length;
 	int error;
 
-	atomic_inc(&gen5_pip->cmd_issued);
+	atomic_inc(&pip->cmd_issued);
 	error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
 	if (error) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return error < 0 ? error : -EIO;
 	}
 
@@ -591,11 +551,11 @@
 			error = error ? error : -ETIMEDOUT;
 	}
 
-	atomic_dec(&gen5_pip->cmd_issued);
+	atomic_dec(&pip->cmd_issued);
 	return error;
 }
 
-static int cyapa_i2c_pip_cmd_irq_sync(
+int cyapa_i2c_pip_cmd_irq_sync(
 		struct cyapa *cyapa,
 		u8 *cmd, int cmd_len,
 		u8 *resp_data, int *resp_len,
@@ -603,34 +563,34 @@
 		cb_sort func,
 		bool irq_mode)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int error;
 
 	if (!cmd || !cmd_len)
 		return -EINVAL;
 
 	/* Commands must be serialized. */
-	error = mutex_lock_interruptible(&gen5_pip->cmd_lock);
+	error = mutex_lock_interruptible(&pip->cmd_lock);
 	if (error)
 		return error;
 
-	gen5_pip->resp_sort_func = func;
-	gen5_pip->resp_data = resp_data;
-	gen5_pip->resp_len = resp_len;
+	pip->resp_sort_func = func;
+	pip->resp_data = resp_data;
+	pip->resp_len = resp_len;
 
-	if (cmd_len >= GEN5_MIN_APP_CMD_LENGTH &&
-			cmd[4] == GEN5_APP_CMD_REPORT_ID) {
+	if (cmd_len >= PIP_MIN_APP_CMD_LENGTH &&
+			cmd[4] == PIP_APP_CMD_REPORT_ID) {
 		/* Application command */
-		gen5_pip->in_progress_cmd = cmd[6] & 0x7f;
-	} else if (cmd_len >= GEN5_MIN_BL_CMD_LENGTH &&
-			cmd[4] == GEN5_BL_CMD_REPORT_ID) {
+		pip->in_progress_cmd = cmd[6] & 0x7f;
+	} else if (cmd_len >= PIP_MIN_BL_CMD_LENGTH &&
+			cmd[4] == PIP_BL_CMD_REPORT_ID) {
 		/* Bootloader command */
-		gen5_pip->in_progress_cmd = cmd[7];
+		pip->in_progress_cmd = cmd[7];
 	}
 
 	/* Send command data, wait and read output response data's length. */
 	if (irq_mode) {
-		gen5_pip->is_irq_mode = true;
+		pip->is_irq_mode = true;
 		error = cyapa_do_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 							timeout);
 		if (error == -ETIMEDOUT && resp_data &&
@@ -646,54 +606,54 @@
 				error = error ? error : -ETIMEDOUT;
 		}
 	} else {
-		gen5_pip->is_irq_mode = false;
+		pip->is_irq_mode = false;
 		error = cyapa_do_i2c_pip_cmd_polling(cyapa, cmd, cmd_len,
 				resp_data, resp_len, timeout, func);
 	}
 
-	gen5_pip->resp_sort_func = NULL;
-	gen5_pip->resp_data = NULL;
-	gen5_pip->resp_len = NULL;
-	gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
+	pip->resp_sort_func = NULL;
+	pip->resp_data = NULL;
+	pip->resp_len = NULL;
+	pip->in_progress_cmd = PIP_INVALID_CMD;
 
-	mutex_unlock(&gen5_pip->cmd_lock);
+	mutex_unlock(&pip->cmd_lock);
 	return error;
 }
 
-static bool cyapa_gen5_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
 		u8 *data, int len)
 {
-	if (!data || len < GEN5_MIN_BL_RESP_LENGTH)
+	if (!data || len < PIP_MIN_BL_RESP_LENGTH)
 		return false;
 
 	/* Bootloader input report id 30h */
-	if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_RESP_REPORT_ID &&
-			data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
-			data[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY)
+	if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_BL_RESP_REPORT_ID &&
+			data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+			data[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY)
 		return true;
 
 	return false;
 }
 
-static bool cyapa_gen5_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
 		u8 *data, int len)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int resp_len;
 
-	if (!data || len < GEN5_MIN_APP_RESP_LENGTH)
+	if (!data || len < PIP_MIN_APP_RESP_LENGTH)
 		return false;
 
-	if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID &&
-			data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) {
-		resp_len = get_unaligned_le16(&data[GEN5_RESP_LENGTH_OFFSET]);
-		if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) == 0x00 &&
-			resp_len == GEN5_UNSUPPORTED_CMD_RESP_LENGTH &&
-			data[5] == gen5_pip->in_progress_cmd) {
+	if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID &&
+			data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) {
+		resp_len = get_unaligned_le16(&data[PIP_RESP_LENGTH_OFFSET]);
+		if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) == 0x00 &&
+			resp_len == PIP_UNSUPPORTED_CMD_RESP_LENGTH &&
+			data[5] == pip->in_progress_cmd) {
 			/* Unsupported command code */
 			return false;
-		} else if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) ==
-				gen5_pip->in_progress_cmd) {
+		} else if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) ==
+				pip->in_progress_cmd) {
 			/* Correct command response received */
 			return true;
 		}
@@ -702,10 +662,10 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_application_launch_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_application_launch_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
-	if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+	if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
 		return false;
 
 	/*
@@ -718,25 +678,25 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_hid_descriptor_data(struct cyapa *cyapa,
+static bool cyapa_sort_gen5_hid_descriptor_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
 	int resp_len;
 	int max_output_len;
 
 	/* Check hid descriptor. */
-	if (len != GEN5_HID_DESCRIPTOR_SIZE)
+	if (len != PIP_HID_DESCRIPTOR_SIZE)
 		return false;
 
-	resp_len = get_unaligned_le16(&buf[GEN5_RESP_LENGTH_OFFSET]);
+	resp_len = get_unaligned_le16(&buf[PIP_RESP_LENGTH_OFFSET]);
 	max_output_len = get_unaligned_le16(&buf[16]);
-	if (resp_len == GEN5_HID_DESCRIPTOR_SIZE) {
-		if (buf[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_HID_REPORT_ID &&
+	if (resp_len == PIP_HID_DESCRIPTOR_SIZE) {
+		if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID &&
 				max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 			/* BL mode HID Descriptor */
 			return true;
-		} else if ((buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+		} else if ((buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 				max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 			/* APP mode HID Descriptor */
 			return true;
@@ -746,21 +706,21 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_deep_sleep_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_deep_sleep_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
-	if (len == GEN5_DEEP_SLEEP_RESP_LENGTH &&
-		buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-			GEN5_APP_DEEP_SLEEP_REPORT_ID &&
-		(buf[4] & GEN5_DEEP_SLEEP_OPCODE_MASK) ==
-			GEN5_DEEP_SLEEP_OPCODE)
+	if (len == PIP_DEEP_SLEEP_RESP_LENGTH &&
+		buf[PIP_RESP_REPORT_ID_OFFSET] ==
+			PIP_APP_DEEP_SLEEP_REPORT_ID &&
+		(buf[4] & PIP_DEEP_SLEEP_OPCODE_MASK) ==
+			PIP_DEEP_SLEEP_OPCODE)
 		return true;
 	return false;
 }
 
 static int gen5_idle_state_parse(struct cyapa *cyapa)
 {
-	u8 resp_data[GEN5_HID_DESCRIPTOR_SIZE];
+	u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
 	int max_output_len;
 	int length;
 	u8 cmd[2];
@@ -778,9 +738,9 @@
 	if (ret != 3)
 		return ret < 0 ? ret : -EIO;
 
-	length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
-	if (length == GEN5_RESP_LENGTH_SIZE) {
-		/* Normal state of Gen5 with no data to respose */
+	length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
+	if (length == PIP_RESP_LENGTH_SIZE) {
+		/* Normal state of Gen5 with no data to response */
 		cyapa->gen = CYAPA_GEN5;
 
 		cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -788,30 +748,30 @@
 		/* Read description from trackpad device */
 		cmd[0] = 0x01;
 		cmd[1] = 0x00;
-		length = GEN5_HID_DESCRIPTOR_SIZE;
+		length = PIP_HID_DESCRIPTOR_SIZE;
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-				cmd, GEN5_RESP_LENGTH_SIZE,
+				cmd, PIP_RESP_LENGTH_SIZE,
 				resp_data, &length,
 				300,
-				cyapa_gen5_sort_hid_descriptor_data,
+				cyapa_sort_gen5_hid_descriptor_data,
 				false);
 		if (error)
 			return error;
 
 		length = get_unaligned_le16(
-				&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+				&resp_data[PIP_RESP_LENGTH_OFFSET]);
 		max_output_len = get_unaligned_le16(&resp_data[16]);
-		if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
-				length == GEN5_RESP_LENGTH_SIZE) &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_HID_REPORT_ID) &&
+		if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+				length == PIP_RESP_LENGTH_SIZE) &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_BL_REPORT_ID) &&
 			max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 			/* BL mode HID Description read */
 			cyapa->state = CYAPA_STATE_GEN5_BL;
-		} else if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
-				length == GEN5_RESP_LENGTH_SIZE) &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+		} else if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+				length == PIP_RESP_LENGTH_SIZE) &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 			max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 			/* APP mode HID Description read */
 			cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -839,14 +799,14 @@
 	 * or report any touch or button data.
 	 */
 	ret = cyapa_i2c_pip_read(cyapa, resp_data,
-			GEN5_HID_DESCRIPTOR_SIZE);
-	if (ret != GEN5_HID_DESCRIPTOR_SIZE)
+			PIP_HID_DESCRIPTOR_SIZE);
+	if (ret != PIP_HID_DESCRIPTOR_SIZE)
 		return ret < 0 ? ret : -EIO;
-	length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+	length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
 	max_output_len = get_unaligned_le16(&resp_data[16]);
-	if (length == GEN5_RESP_LENGTH_SIZE) {
-		if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_HID_REPORT_ID) {
+	if (length == PIP_RESP_LENGTH_SIZE) {
+		if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_BL_REPORT_ID) {
 			/*
 			 * BL mode HID Description has been previously
 			 * read out.
@@ -861,15 +821,15 @@
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_APP;
 		}
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			resp_data[2] == GEN5_BL_HID_REPORT_ID &&
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			resp_data[2] == PIP_HID_BL_REPORT_ID &&
 			max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 		/* BL mode HID Description read. */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 			max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 		/* APP mode HID Description read. */
 		cyapa->gen = CYAPA_GEN5;
@@ -886,22 +846,22 @@
 {
 	int length;
 
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	switch (reg_data[GEN5_RESP_REPORT_ID_OFFSET]) {
-	case GEN5_TOUCH_REPORT_ID:
-		if (length < GEN5_TOUCH_REPORT_HEAD_SIZE ||
-			length > GEN5_TOUCH_REPORT_MAX_SIZE)
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	switch (reg_data[PIP_RESP_REPORT_ID_OFFSET]) {
+	case PIP_TOUCH_REPORT_ID:
+		if (length < PIP_TOUCH_REPORT_HEAD_SIZE ||
+			length > PIP_TOUCH_REPORT_MAX_SIZE)
 			return -EINVAL;
 		break;
-	case GEN5_BTN_REPORT_ID:
+	case PIP_BTN_REPORT_ID:
 	case GEN5_OLD_PUSH_BTN_REPORT_ID:
-	case GEN5_PUSH_BTN_REPORT_ID:
-		if (length < GEN5_BTN_REPORT_HEAD_SIZE ||
-			length > GEN5_BTN_REPORT_MAX_SIZE)
+	case PIP_PUSH_BTN_REPORT_ID:
+		if (length < PIP_BTN_REPORT_HEAD_SIZE ||
+			length > PIP_BTN_REPORT_MAX_SIZE)
 			return -EINVAL;
 		break;
-	case GEN5_WAKEUP_EVENT_REPORT_ID:
-		if (length != GEN5_WAKEUP_EVENT_SIZE)
+	case PIP_WAKEUP_EVENT_REPORT_ID:
+		if (length != PIP_WAKEUP_EVENT_SIZE)
 			return -EINVAL;
 		break;
 	default:
@@ -915,7 +875,7 @@
 
 static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 	int ret;
 
@@ -924,15 +884,15 @@
 	 * otherwise Gen5 trackpad cannot response next command
 	 * or report any touch or button data.
 	 */
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	ret = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	ret = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
 	if (ret != length)
 		return ret < 0 ? ret : -EIO;
 
-	if (length == GEN5_RESP_LENGTH_SIZE) {
+	if (length == PIP_RESP_LENGTH_SIZE) {
 		/* Previous command has read the data through out. */
-		if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID) {
+		if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID) {
 			/* Gen5 BL command response data detected */
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_BL;
@@ -941,21 +901,21 @@
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_APP;
 		}
-	} else if ((gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID) &&
-			(gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
-				GEN5_RESP_RSVD_KEY) &&
-			(gen5_pip->empty_buf[GEN5_RESP_BL_SOP_OFFSET] ==
-				GEN5_SOP_KEY) &&
-			(gen5_pip->empty_buf[length - 1] ==
-				GEN5_EOP_KEY)) {
+	} else if ((pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID) &&
+			(pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+				PIP_RESP_RSVD_KEY) &&
+			(pip->empty_buf[PIP_RESP_BL_SOP_OFFSET] ==
+				PIP_SOP_KEY) &&
+			(pip->empty_buf[length - 1] ==
+				PIP_EOP_KEY)) {
 		/* Gen5 BL command response data detected */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_RESP_REPORT_ID &&
-			gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
-				GEN5_RESP_RSVD_KEY) {
+	} else if (pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_APP_RESP_REPORT_ID &&
+			pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+				PIP_RESP_RSVD_KEY) {
 		/* Gen5 APP command response data detected */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -977,12 +937,12 @@
 	cyapa->state = CYAPA_STATE_NO_DEVICE;
 
 	/* Parse based on Gen5 characteristic registers and bits */
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	if (length == 0 || length == GEN5_RESP_LENGTH_SIZE) {
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	if (length == 0 || length == PIP_RESP_LENGTH_SIZE) {
 		gen5_idle_state_parse(cyapa);
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			(reg_data[2] == GEN5_BL_HID_REPORT_ID ||
-				reg_data[2] == GEN5_APP_HID_REPORT_ID)) {
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			(reg_data[2] == PIP_HID_BL_REPORT_ID ||
+				reg_data[2] == PIP_HID_APP_REPORT_ID)) {
 		gen5_hid_description_header_parse(cyapa, reg_data);
 	} else if ((length == GEN5_APP_REPORT_DESCRIPTOR_SIZE ||
 			length == GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE) &&
@@ -992,17 +952,17 @@
 		cyapa->state = CYAPA_STATE_GEN5_APP;
 	} else if (length == GEN5_BL_REPORT_DESCRIPTOR_SIZE &&
 			reg_data[2] == GEN5_BL_REPORT_DESCRIPTOR_ID) {
-		/* 0x1D 0x00 0xFE is Gen5 BL report descriptior header. */
+		/* 0x1D 0x00 0xFE is Gen5 BL report descriptor header. */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (reg_data[2] == GEN5_TOUCH_REPORT_ID ||
-			reg_data[2] == GEN5_BTN_REPORT_ID ||
+	} else if (reg_data[2] == PIP_TOUCH_REPORT_ID ||
+			reg_data[2] == PIP_BTN_REPORT_ID ||
 			reg_data[2] == GEN5_OLD_PUSH_BTN_REPORT_ID ||
-			reg_data[2] == GEN5_PUSH_BTN_REPORT_ID ||
-			reg_data[2] == GEN5_WAKEUP_EVENT_REPORT_ID) {
+			reg_data[2] == PIP_PUSH_BTN_REPORT_ID ||
+			reg_data[2] == PIP_WAKEUP_EVENT_REPORT_ID) {
 		gen5_report_data_header_parse(cyapa, reg_data);
-	} else if (reg_data[2] == GEN5_BL_RESP_REPORT_ID ||
-			reg_data[2] == GEN5_APP_RESP_REPORT_ID) {
+	} else if (reg_data[2] == PIP_BL_RESP_REPORT_ID ||
+			reg_data[2] == PIP_APP_RESP_REPORT_ID) {
 		gen5_cmd_resp_header_parse(cyapa, reg_data);
 	}
 
@@ -1023,14 +983,25 @@
 	return -EAGAIN;
 }
 
-static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
-		const struct firmware *fw)
+static struct cyapa_tsg_bin_image_data_record *
+cyapa_get_image_record_data_num(const struct firmware *fw,
+		int *record_num)
 {
-	struct cyapa_tsg_bin_image *image;
-	struct gen5_bl_cmd_head *bl_cmd_head;
-	struct gen5_bl_packet_start *bl_packet_start;
-	struct gen5_bl_initiate_cmd_data *cmd_data;
-	struct gen5_bl_packet_end *bl_packet_end;
+	int head_size;
+
+	head_size = fw->data[0] + 1;
+	*record_num = (fw->size - head_size) /
+			sizeof(struct cyapa_tsg_bin_image_data_record);
+	return (struct cyapa_tsg_bin_image_data_record *)&fw->data[head_size];
+}
+
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw)
+{
+	struct cyapa_tsg_bin_image_data_record *image_records;
+	struct pip_bl_cmd_head *bl_cmd_head;
+	struct pip_bl_packet_start *bl_packet_start;
+	struct pip_bl_initiate_cmd_data *cmd_data;
+	struct pip_bl_packet_end *bl_packet_end;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
 	u16 cmd_data_len;
@@ -1046,30 +1017,28 @@
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+	bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
 	cmd_data_len = CYAPA_TSG_BL_KEY_SIZE + CYAPA_TSG_FLASH_MAP_BLOCK_SIZE;
-	cmd_len = sizeof(struct gen5_bl_cmd_head) + cmd_data_len +
-		  sizeof(struct gen5_bl_packet_end);
+	cmd_len = sizeof(struct pip_bl_cmd_head) + cmd_data_len +
+		  sizeof(struct pip_bl_packet_end);
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
 	put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
-	bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
+	bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
 
 	bl_packet_start = &bl_cmd_head->packet_start;
-	bl_packet_start->sop = GEN5_SOP_KEY;
-	bl_packet_start->cmd_code = GEN5_BL_CMD_INITIATE_BL;
+	bl_packet_start->sop = PIP_SOP_KEY;
+	bl_packet_start->cmd_code = PIP_BL_CMD_INITIATE_BL;
 	/* 8 key bytes and 128 bytes block size */
 	put_unaligned_le16(cmd_data_len, &bl_packet_start->data_length);
 
-	cmd_data = (struct gen5_bl_initiate_cmd_data *)bl_cmd_head->data;
-	memcpy(cmd_data->key, cyapa_gen5_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
+	cmd_data = (struct pip_bl_initiate_cmd_data *)bl_cmd_head->data;
+	memcpy(cmd_data->key, cyapa_pip_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
 
-	/* Copy 60 bytes Meta Data Row Parameters */
-	image = (struct cyapa_tsg_bin_image *)fw->data;
-	records_num = (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
-				sizeof(struct cyapa_tsg_bin_image_data_record);
+	image_records = cyapa_get_image_record_data_num(fw, &records_num);
+
 	/* APP_INTEGRITY row is always the last row block */
-	data = image->records[records_num - 1].record_data;
+	data = image_records[records_num - 1].record_data;
 	memcpy(cmd_data->metadata_raw_parameter, data,
 		CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
 
@@ -1077,47 +1046,47 @@
 				CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
 	put_unaligned_le16(meta_data_crc, &cmd_data->metadata_crc);
 
-	bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+	bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
 				cmd_data_len);
 	cmd_crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
-		sizeof(struct gen5_bl_packet_start) + cmd_data_len);
+		sizeof(struct pip_bl_packet_start) + cmd_data_len);
 	put_unaligned_le16(cmd_crc, &bl_packet_end->crc);
-	bl_packet_end->eop = GEN5_EOP_KEY;
+	bl_packet_end->eop = PIP_EOP_KEY;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, cmd_len,
 			resp_data, &resp_len, 12000,
-			cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
-	if (error || resp_len != GEN5_BL_INITIATE_RESP_LEN ||
-			resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			cyapa_sort_tsg_pip_bl_resp_data, true);
+	if (error || resp_len != PIP_BL_INITIATE_RESP_LEN ||
+			resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EAGAIN;
 
 	return 0;
 }
 
-static bool cyapa_gen5_sort_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
+static bool cyapa_sort_pip_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
 {
-	if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+	if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
 		return false;
 
 	if (buf[0] == 0 && buf[1] == 0)
 		return true;
 
 	/* Exit bootloader failed for some reason. */
-	if (len == GEN5_BL_FAIL_EXIT_RESP_LEN &&
-			buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID &&
-			buf[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
-			buf[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY &&
-			buf[10] == GEN5_EOP_KEY)
+	if (len == PIP_BL_FAIL_EXIT_RESP_LEN &&
+			buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID &&
+			buf[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+			buf[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY &&
+			buf[10] == PIP_EOP_KEY)
 		return true;
 
 	return false;
 }
 
-static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
+int cyapa_pip_bl_exit(struct cyapa *cyapa)
 {
 
 	u8 bl_gen5_bl_exit[] = { 0x04, 0x00,
@@ -1132,13 +1101,13 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			bl_gen5_bl_exit, sizeof(bl_gen5_bl_exit),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_bl_exit_data, false);
+			5000, cyapa_sort_pip_bl_exit_data, false);
 	if (error)
 		return error;
 
-	if (resp_len == GEN5_BL_FAIL_EXIT_RESP_LEN ||
-			resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID)
+	if (resp_len == PIP_BL_FAIL_EXIT_RESP_LEN ||
+			resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID)
 		return -EAGAIN;
 
 	if (resp_data[0] == 0x00 && resp_data[1] == 0x00)
@@ -1147,7 +1116,7 @@
 	return -ENODEV;
 }
 
-static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
+int cyapa_pip_bl_enter(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2F, 0x00, 0x01 };
 	u8 resp_data[2];
@@ -1157,15 +1126,12 @@
 	error = cyapa_poll_state(cyapa, 500);
 	if (error < 0)
 		return error;
-	if (cyapa->gen != CYAPA_GEN5)
-		return -EINVAL;
 
-	/* Already in Gen5 BL. Skipping exit. */
-	if (cyapa->state == CYAPA_STATE_GEN5_BL)
+	/* Already in bootloader mode, Skipping exit. */
+	if (cyapa_is_pip_bl_mode(cyapa))
 		return 0;
-
-	if (cyapa->state != CYAPA_STATE_GEN5_APP)
-		return -EAGAIN;
+	else if (!cyapa_is_pip_app_mode(cyapa))
+		return -EINVAL;
 
 	/* Try to dump all buffered report data before any send command. */
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -1179,39 +1145,79 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_application_launch_data,
+			5000, cyapa_sort_pip_application_launch_data,
 			true);
 	if (error || resp_data[0] != 0x00 || resp_data[1] != 0x00)
 		return error < 0 ? error : -EAGAIN;
 
 	cyapa->operational = false;
-	cyapa->state = CYAPA_STATE_GEN5_BL;
+	if (cyapa->gen == CYAPA_GEN5)
+		cyapa->state = CYAPA_STATE_GEN5_BL;
+	else if (cyapa->gen == CYAPA_GEN6)
+		cyapa->state = CYAPA_STATE_GEN6_BL;
 	return 0;
 }
 
-static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
+static int cyapa_pip_fw_head_check(struct cyapa *cyapa,
+		struct cyapa_tsg_bin_image_head *image_head)
+{
+	if (image_head->head_size != 0x0C && image_head->head_size != 0x12)
+		return -EINVAL;
+
+	switch (cyapa->gen) {
+	case CYAPA_GEN6:
+		if (image_head->family_id != 0x9B ||
+		    image_head->silicon_id_hi != 0x0B)
+			return -EINVAL;
+		break;
+	case CYAPA_GEN5:
+		/* Gen5 without proximity support. */
+		if (cyapa->platform_ver < 2) {
+			if (image_head->head_size == 0x0C)
+				break;
+			return -EINVAL;
+		}
+
+		if (image_head->family_id != 0x91 ||
+		    image_head->silicon_id_hi != 0x02)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw)
 {
 	struct device *dev = &cyapa->client->dev;
-	const struct cyapa_tsg_bin_image *image = (const void *)fw->data;
+	struct cyapa_tsg_bin_image_data_record *image_records;
 	const struct cyapa_tsg_bin_image_data_record *app_integrity;
-	const struct gen5_bl_metadata_row_params *metadata;
-	size_t flash_records_count;
+	const struct tsg_bl_metadata_row_params *metadata;
+	int flash_records_count;
 	u32 fw_app_start, fw_upgrade_start;
 	u16 fw_app_len, fw_upgrade_len;
 	u16 app_crc;
 	u16 app_integrity_crc;
-	int record_index;
 	int i;
 
-	flash_records_count = (fw->size -
-			sizeof(struct cyapa_tsg_bin_image_head)) /
-			sizeof(struct cyapa_tsg_bin_image_data_record);
+	/* Verify the firmware image not miss-used for Gen5 and Gen6. */
+	if (cyapa_pip_fw_head_check(cyapa,
+		(struct cyapa_tsg_bin_image_head *)fw->data)) {
+		dev_err(dev, "%s: firmware image not match TP device.\n",
+			     __func__);
+		return -EINVAL;
+	}
+
+	image_records =
+		cyapa_get_image_record_data_num(fw, &flash_records_count);
 
 	/*
 	 * APP_INTEGRITY row is always the last row block,
 	 * and the row id must be 0x01ff.
 	 */
-	app_integrity = &image->records[flash_records_count - 1];
+	app_integrity = &image_records[flash_records_count - 1];
 
 	if (app_integrity->flash_array_id != 0x00 ||
 	    get_unaligned_be16(&app_integrity->row_number) != 0x01ff) {
@@ -1242,14 +1248,11 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Verify application image CRC
-	 */
-	record_index = fw_app_start / CYAPA_TSG_FW_ROW_SIZE -
-				CYAPA_TSG_IMG_START_ROW_NUM;
+	/* Verify application image CRC. */
 	app_crc = 0xffffU;
 	for (i = 0; i < fw_app_len / CYAPA_TSG_FW_ROW_SIZE; i++) {
-		const u8 *data = image->records[record_index + i].record_data;
+		const u8 *data = image_records[i].record_data;
+
 		app_crc = crc_itu_t(app_crc, data, CYAPA_TSG_FW_ROW_SIZE);
 	}
 
@@ -1261,13 +1264,13 @@
 	return 0;
 }
 
-static int cyapa_gen5_write_fw_block(struct cyapa *cyapa,
+static int cyapa_pip_write_fw_block(struct cyapa *cyapa,
 		struct cyapa_tsg_bin_image_data_record *flash_record)
 {
-	struct gen5_bl_cmd_head *bl_cmd_head;
-	struct gen5_bl_packet_start *bl_packet_start;
-	struct gen5_bl_flash_row_head *flash_row_head;
-	struct gen5_bl_packet_end *bl_packet_end;
+	struct pip_bl_cmd_head *bl_cmd_head;
+	struct pip_bl_packet_start *bl_packet_start;
+	struct tsg_bl_flash_row_head *flash_row_head;
+	struct pip_bl_packet_end *bl_packet_end;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	u16 cmd_len;
 	u8 flash_array_id;
@@ -1286,71 +1289,68 @@
 	record_data = flash_record->record_data;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+	bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
 	bl_packet_start = &bl_cmd_head->packet_start;
-	cmd_len = sizeof(struct gen5_bl_cmd_head) +
-		  sizeof(struct gen5_bl_flash_row_head) +
+	cmd_len = sizeof(struct pip_bl_cmd_head) +
+		  sizeof(struct tsg_bl_flash_row_head) +
 		  CYAPA_TSG_FLASH_MAP_BLOCK_SIZE +
-		  sizeof(struct gen5_bl_packet_end);
+		  sizeof(struct pip_bl_packet_end);
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
 	/* Don't include 2 bytes register address */
 	put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
-	bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
-	bl_packet_start->sop = GEN5_SOP_KEY;
-	bl_packet_start->cmd_code = GEN5_BL_CMD_PROGRAM_VERIFY_ROW;
+	bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
+	bl_packet_start->sop = PIP_SOP_KEY;
+	bl_packet_start->cmd_code = PIP_BL_CMD_PROGRAM_VERIFY_ROW;
 
 	/* 1 (Flash Array ID) + 2 (Flash Row ID) + 128 (flash data) */
-	data_len = sizeof(struct gen5_bl_flash_row_head) + record_len;
+	data_len = sizeof(struct tsg_bl_flash_row_head) + record_len;
 	put_unaligned_le16(data_len, &bl_packet_start->data_length);
 
-	flash_row_head = (struct gen5_bl_flash_row_head *)bl_cmd_head->data;
+	flash_row_head = (struct tsg_bl_flash_row_head *)bl_cmd_head->data;
 	flash_row_head->flash_array_id = flash_array_id;
 	put_unaligned_le16(flash_row_id, &flash_row_head->flash_row_id);
 	memcpy(flash_row_head->flash_data, record_data, record_len);
 
-	bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+	bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
 						      data_len);
 	crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
-		sizeof(struct gen5_bl_packet_start) + data_len);
+		sizeof(struct pip_bl_packet_start) + data_len);
 	put_unaligned_le16(crc, &bl_packet_end->crc);
-	bl_packet_end->eop = GEN5_EOP_KEY;
+	bl_packet_end->eop = PIP_EOP_KEY;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
-	if (error || resp_len != GEN5_BL_BLOCK_WRITE_RESP_LEN ||
-			resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			500, cyapa_sort_tsg_pip_bl_resp_data, true);
+	if (error || resp_len != PIP_BL_BLOCK_WRITE_RESP_LEN ||
+			resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EAGAIN;
 
 	return 0;
 }
 
-static int cyapa_gen5_do_fw_update(struct cyapa *cyapa,
+int cyapa_pip_do_fw_update(struct cyapa *cyapa,
 		const struct firmware *fw)
 {
 	struct device *dev = &cyapa->client->dev;
-	struct cyapa_tsg_bin_image_data_record *flash_record;
-	struct cyapa_tsg_bin_image *image =
-		(struct cyapa_tsg_bin_image *)fw->data;
+	struct cyapa_tsg_bin_image_data_record *image_records;
 	int flash_records_count;
 	int i;
 	int error;
 
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
-	flash_records_count =
-		(fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
-			sizeof(struct cyapa_tsg_bin_image_data_record);
+	image_records =
+		cyapa_get_image_record_data_num(fw, &flash_records_count);
+
 	/*
 	 * The last flash row 0x01ff has been written through bl_initiate
 	 * command, so DO NOT write flash 0x01ff to trackpad device.
 	 */
 	for (i = 0; i < (flash_records_count - 1); i++) {
-		flash_record = &image->records[i];
-		error = cyapa_gen5_write_fw_block(cyapa, flash_record);
+		error = cyapa_pip_write_fw_block(cyapa, &image_records[i]);
 		if (error) {
 			dev_err(dev, "%s: Gen5 FW update aborted: %d\n",
 				__func__, error);
@@ -1372,9 +1372,9 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x08) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EINVAL;
 
 	return 0;
@@ -1383,7 +1383,7 @@
 static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
 		u8 parameter_id, u16 interval_time)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_app_set_parameter_data *parameter_data;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
@@ -1393,10 +1393,10 @@
 	int error;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 	parameter_data = (struct gen5_app_set_parameter_data *)
 			 app_cmd_head->parameter_data;
-	cmd_len = sizeof(struct gen5_app_cmd_head) +
+	cmd_len = sizeof(struct pip_app_cmd_head) +
 		  sizeof(struct gen5_app_set_parameter_data);
 
 	switch (parameter_id) {
@@ -1413,14 +1413,14 @@
 		return -EINVAL;
 	}
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	/*
 	 * Don't include unused parameter value bytes and
 	 * 2 bytes register address.
 	 */
 	put_unaligned_le16(cmd_len - (4 - parameter_size) - 2,
 			   &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
 	parameter_data->parameter_id = parameter_id;
 	parameter_data->parameter_size = parameter_size;
@@ -1428,7 +1428,7 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != parameter_id ||
 		resp_data[6] != parameter_size ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER))
@@ -1440,7 +1440,7 @@
 static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
 		u8 parameter_id, u16 *interval_time)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_app_get_parameter_data *parameter_data;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
@@ -1451,10 +1451,10 @@
 	int error;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 	parameter_data = (struct gen5_app_get_parameter_data *)
 			 app_cmd_head->parameter_data;
-	cmd_len = sizeof(struct gen5_app_cmd_head) +
+	cmd_len = sizeof(struct pip_app_cmd_head) +
 		  sizeof(struct gen5_app_get_parameter_data);
 
 	*interval_time = 0;
@@ -1472,17 +1472,17 @@
 		return -EINVAL;
 	}
 
-	put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	/* Don't include 2 bytes register address */
 	put_unaligned_le16(cmd_len - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_GET_PARAMETER;
 	parameter_data->parameter_id = parameter_id;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != parameter_id || resp_data[6] == 0 ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_GET_PARAMETER))
 		return error < 0 ? error : -EINVAL;
@@ -1497,18 +1497,18 @@
 
 static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[10];
 	u8 resp_data[7];
 	int resp_len;
 	int error;
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 
-	put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
 	app_cmd_head->parameter_data[0] = GEN5_PARAMETER_DISABLE_PIP_REPORT;
 	app_cmd_head->parameter_data[1] = 0x01;
@@ -1516,7 +1516,7 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != GEN5_PARAMETER_DISABLE_PIP_REPORT ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER) ||
 		resp_data[6] != 0x01)
@@ -1525,26 +1525,48 @@
 	return 0;
 }
 
-static int cyapa_gen5_deep_sleep(struct cyapa *cyapa, u8 state)
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, PIP_SET_PROXIMITY,
+		     (u8)!!enable
+	};
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_SET_PROXIMITY) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data)) {
+		error = (error == -ETIMEDOUT) ? -EOPNOTSUPP : error;
+		return error < 0 ? error : -EINVAL;
+	}
+
+	return 0;
+}
+
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state)
 {
 	u8 cmd[] = { 0x05, 0x00, 0x00, 0x08};
 	u8 resp_data[5];
 	int resp_len;
 	int error;
 
-	cmd[2] = state & GEN5_DEEP_SLEEP_STATE_MASK;
+	cmd[2] = state & PIP_DEEP_SLEEP_STATE_MASK;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_deep_sleep_data, false);
-	if (error || ((resp_data[3] & GEN5_DEEP_SLEEP_STATE_MASK) != state))
+			500, cyapa_sort_pip_deep_sleep_data, false);
+	if (error || ((resp_data[3] & PIP_DEEP_SLEEP_STATE_MASK) != state))
 		return -EINVAL;
 
 	return 0;
 }
 
 static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
-		u8 power_mode, u16 sleep_time)
+		u8 power_mode, u16 sleep_time, bool is_suspend)
 {
 	struct device *dev = &cyapa->client->dev;
 	u8 power_state;
@@ -1553,43 +1575,40 @@
 	if (cyapa->state != CYAPA_STATE_GEN5_APP)
 		return 0;
 
-	/* Dump all the report data before do power mode commmands. */
-	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
 		/*
 		 * Assume TP in deep sleep mode when driver is loaded,
 		 * avoid driver unload and reload command IO issue caused by TP
 		 * has been set into deep sleep mode when unloading.
 		 */
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
 	}
 
-	if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) &&
-			GEN5_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+	if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+			PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
 		if (cyapa_gen5_get_interval_time(cyapa,
 				GEN5_PARAMETER_LP_INTRVL_ID,
 				&cyapa->dev_sleep_time) != 0)
-			GEN5_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+			PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
 
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
 		if (power_mode == PWR_MODE_OFF ||
 			power_mode == PWR_MODE_FULL_ACTIVE ||
 			power_mode == PWR_MODE_BTN_ONLY ||
-			GEN5_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+			PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
 			/* Has in correct power mode state, early return. */
 			return 0;
 		}
 	}
 
 	if (power_mode == PWR_MODE_OFF) {
-		error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_OFF);
+		error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
 		if (error) {
 			dev_err(dev, "enter deep sleep fail: %d\n", error);
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
 		return 0;
 	}
 
@@ -1598,8 +1617,8 @@
 	 * state directly, must be wake up from sleep firstly, then
 	 * continue to do next power sate change.
 	 */
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
-		error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_ON);
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+		error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
 		if (error) {
 			dev_err(dev, "deep sleep wake fail: %d\n", error);
 			return error;
@@ -1614,7 +1633,7 @@
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
 	} else if (power_mode == PWR_MODE_BTN_ONLY) {
 		error = cyapa_gen5_change_power_state(cyapa,
 				GEN5_POWER_STATE_BTN_ONLY);
@@ -1623,19 +1642,19 @@
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
 	} else {
 		/*
 		 * Continue to change power mode even failed to set
 		 * interval time, it won't affect the power mode change.
 		 * except the sleep interval time is not correct.
 		 */
-		if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) ||
-				sleep_time != GEN5_DEV_GET_SLEEP_TIME(cyapa))
+		if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) ||
+				sleep_time != PIP_DEV_GET_SLEEP_TIME(cyapa))
 			if (cyapa_gen5_set_interval_time(cyapa,
 					GEN5_PARAMETER_LP_INTRVL_ID,
 					sleep_time) == 0)
-				GEN5_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+				PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
 
 		if (sleep_time <= GEN5_POWER_READY_MAX_INTRVL_TIME)
 			power_state = GEN5_POWER_STATE_READY;
@@ -1658,17 +1677,17 @@
 		 * is suspending which may cause interrupt line unable to be
 		 * asserted again.
 		 */
-		cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-		cyapa_gen5_disable_pip_report(cyapa);
+		if (is_suspend)
+			cyapa_gen5_disable_pip_report(cyapa);
 
-		GEN5_DEV_SET_PWR_STATE(cyapa,
+		PIP_DEV_SET_PWR_STATE(cyapa,
 			cyapa_sleep_time_to_pwr_cmd(sleep_time));
 	}
 
 	return 0;
 }
 
-static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
+int cyapa_pip_resume_scanning(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x04 };
 	u8 resp_data[6];
@@ -1682,7 +1701,7 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x04))
 		return -EINVAL;
 
@@ -1692,7 +1711,7 @@
 	return 0;
 }
 
-static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x03 };
 	u8 resp_data[6];
@@ -1706,7 +1725,7 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x03))
 		return -EINVAL;
 
@@ -1716,10 +1735,10 @@
 	return 0;
 }
 
-static int cyapa_gen5_calibrate_pwcs(struct cyapa *cyapa,
+static int cyapa_pip_calibrate_pwcs(struct cyapa *cyapa,
 		u8 calibrate_sensing_mode_type)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[8];
 	u8 resp_data[6];
 	int resp_len;
@@ -1729,25 +1748,25 @@
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
-	app_cmd_head->cmd_code = GEN5_CMD_CALIBRATE;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
+	app_cmd_head->cmd_code = PIP_CMD_CALIBRATE;
 	app_cmd_head->parameter_data[0] = calibrate_sensing_mode_type;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
-	if (error || !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_CALIBRATE) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			5000, cyapa_sort_tsg_pip_app_resp_data, true);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_CMD_CALIBRATE) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EAGAIN;
 
 	return 0;
 }
 
-static ssize_t cyapa_gen5_do_calibrate(struct device *dev,
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
@@ -1755,25 +1774,25 @@
 	int error, calibrate_error;
 
 	/* 1. Suspend Scanning*/
-	error = cyapa_gen5_suspend_scanning(cyapa);
+	error = cyapa_pip_suspend_scanning(cyapa);
 	if (error)
 		return error;
 
 	/* 2. Do mutual capacitance fine calibrate. */
-	calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
-				CYAPA_SENSING_MODE_MUTUAL_CAP_FINE);
+	calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+				PIP_SENSING_MODE_MUTUAL_CAP_FINE);
 	if (calibrate_error)
 		goto resume_scanning;
 
 	/* 3. Do self capacitance calibrate. */
-	calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
-				CYAPA_SENSING_MODE_SELF_CAP);
+	calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+				PIP_SENSING_MODE_SELF_CAP);
 	if (calibrate_error)
 		goto resume_scanning;
 
 resume_scanning:
 	/* 4. Resume Scanning*/
-	error = cyapa_gen5_resume_scanning(cyapa);
+	error = cyapa_pip_resume_scanning(cyapa);
 	if (error || calibrate_error)
 		return error ? error : calibrate_error;
 
@@ -1856,7 +1875,7 @@
  * If the input value of @data_size is not 0, than means read the mutual or
  * self local PWC data. The @idac_max, @idac_min and @idac_ave are used to
  * return the max, min and average value of the mutual or self local PWC data.
- * Note, in order to raed mutual local PWC data, must read invoke this function
+ * Note, in order to read mutual local PWC data, must read invoke this function
  * to read the mutual global idac data firstly to set the correct Rx number
  * value, otherwise, the read mutual idac and PWC data may not correct.
  */
@@ -1864,7 +1883,7 @@
 		u8 cmd_code, u8 idac_data_type, int *data_size,
 		int *idac_max, int *idac_min, int *idac_ave)
 {
-	struct gen5_app_cmd_head *cmd_head;
+	struct pip_app_cmd_head *cmd_head;
 	u8 cmd[12];
 	u8 resp_data[256];
 	int resp_len;
@@ -1879,7 +1898,7 @@
 	int i;
 	int error;
 
-	if (cmd_code != GEN5_CMD_RETRIEVE_DATA_STRUCTURE ||
+	if (cmd_code != PIP_RETRIEVE_DATA_STRUCTURE ||
 		(idac_data_type != GEN5_RETRIEVE_MUTUAL_PWC_DATA &&
 		idac_data_type != GEN5_RETRIEVE_SELF_CAP_PWC_DATA) ||
 		!data_size || !idac_max || !idac_min || !idac_ave)
@@ -1935,10 +1954,10 @@
 	}
 
 	memset(cmd, 0, sizeof(cmd));
-	cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &cmd_head->addr);
+	cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &cmd_head->length);
-	cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	cmd_head->cmd_code = cmd_code;
 	do {
 		read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) /
@@ -1953,11 +1972,11 @@
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 				cmd, sizeof(cmd),
 				resp_data, &resp_len,
-				500, cyapa_gen5_sort_tsg_pip_app_resp_data,
+				500, cyapa_sort_tsg_pip_app_resp_data,
 				true);
 		if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
 				!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
-				!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+				!PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
 				resp_data[6] != idac_data_type)
 			return (error < 0) ? error : -EAGAIN;
 		read_len = get_unaligned_le16(&resp_data[7]);
@@ -1997,7 +2016,7 @@
 				tmp_count < cyapa->aligned_electrodes_rx &&
 				read_global_idac) {
 				/*
-				 * The value gap betwen global and local mutual
+				 * The value gap between global and local mutual
 				 * idac data must bigger than 50%.
 				 * Normally, global value bigger than 50,
 				 * local values less than 10.
@@ -2061,7 +2080,7 @@
 
 	data_size = 0;
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_MUTUAL_PWC_DATA,
 		&data_size,
 		gidac_mutual_max, gidac_mutual_min, gidac_mutual_ave);
@@ -2069,7 +2088,7 @@
 		return error;
 
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_MUTUAL_PWC_DATA,
 		&data_size,
 		lidac_mutual_max, lidac_mutual_min, lidac_mutual_ave);
@@ -2088,7 +2107,7 @@
 
 	data_size = 0;
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
 		&data_size,
 		lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2098,7 +2117,7 @@
 	*gidac_self_tx = *lidac_self_min;
 
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
 		&data_size,
 		lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2107,27 +2126,27 @@
 
 static ssize_t cyapa_gen5_execute_panel_scan(struct cyapa *cyapa)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[7];
 	u8 resp_data[6];
 	int resp_len;
 	int error;
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_EXECUTE_PANEL_SCAN;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || resp_len != sizeof(resp_data) ||
 			!VALID_CMD_RESP_HEADER(resp_data,
 				GEN5_CMD_EXECUTE_PANEL_SCAN) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EAGAIN;
 
 	return 0;
@@ -2138,7 +2157,7 @@
 		int *raw_data_max, int *raw_data_min, int *raw_data_ave,
 		u8 *buffer)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_retrieve_panel_scan_data *panel_sacn_data;
 	u8 cmd[12];
 	u8 resp_data[256];  /* Max bytes can transfer one time. */
@@ -2166,10 +2185,10 @@
 	/* Assume max element size is 4 currently. */
 	read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) / 4;
 	read_len = read_elements * 4;
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = cmd_code;
 	panel_sacn_data = (struct gen5_retrieve_panel_scan_data *)
 			app_cmd_head->parameter_data;
@@ -2183,10 +2202,10 @@
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 		if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
 				!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
-				!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+				!PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
 				resp_data[6] != raw_data_type)
 			return error ? error : -EAGAIN;
 
@@ -2245,11 +2264,11 @@
 	int error, resume_error;
 	int size;
 
-	if (cyapa->state != CYAPA_STATE_GEN5_APP)
+	if (!cyapa_is_pip_app_mode(cyapa))
 		return -EBUSY;
 
 	/* 1. Suspend Scanning*/
-	error = cyapa_gen5_suspend_scanning(cyapa);
+	error = cyapa_pip_suspend_scanning(cyapa);
 	if (error)
 		return error;
 
@@ -2270,7 +2289,7 @@
 	if (error)
 		goto resume_scanning;
 
-	/* 4. Execuate panel scan. It must be executed before read data. */
+	/* 4. Execute panel scan. It must be executed before read data. */
 	error = cyapa_gen5_execute_panel_scan(cyapa);
 	if (error)
 		goto resume_scanning;
@@ -2343,7 +2362,7 @@
 
 resume_scanning:
 	/* 11. Resume Scanning*/
-	resume_error = cyapa_gen5_resume_scanning(cyapa);
+	resume_error = cyapa_pip_resume_scanning(cyapa);
 	if (resume_error || error)
 		return resume_error ? resume_error : error;
 
@@ -2364,7 +2383,7 @@
 	return size;
 }
 
-static bool cyapa_gen5_sort_system_info_data(struct cyapa *cyapa,
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
 	/* Check the report id and command code */
@@ -2376,20 +2395,17 @@
 
 static int cyapa_gen5_bl_query_data(struct cyapa *cyapa)
 {
-	u8 bl_query_data_cmd[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
-		0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
-	};
-	u8 resp_data[GEN5_BL_READ_APP_INFO_RESP_LEN];
+	u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
 	int resp_len;
 	int error;
 
-	resp_len = GEN5_BL_READ_APP_INFO_RESP_LEN;
+	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-			bl_query_data_cmd, sizeof(bl_query_data_cmd),
+			pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_bl_resp_data, false);
-	if (error || resp_len != GEN5_BL_READ_APP_INFO_RESP_LEN ||
-		!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			500, cyapa_sort_tsg_pip_bl_resp_data, false);
+	if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+		!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EIO;
 
 	memcpy(&cyapa->product_id[0], &resp_data[8], 5);
@@ -2402,34 +2418,42 @@
 	cyapa->fw_maj_ver = resp_data[22];
 	cyapa->fw_min_ver = resp_data[23];
 
+	cyapa->platform_ver = (resp_data[26] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+
 	return 0;
 }
 
 static int cyapa_gen5_get_query_data(struct cyapa *cyapa)
 {
-	u8 get_system_information[] = {
-		0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02
-	};
-	u8 resp_data[71];
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
 	int resp_len;
 	u16 product_family;
 	int error;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-			get_system_information, sizeof(get_system_information),
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
 			resp_data, &resp_len,
-			2000, cyapa_gen5_sort_system_info_data, false);
+			2000, cyapa_pip_sort_system_info_data, false);
 	if (error || resp_len < sizeof(resp_data))
 		return error ? error : -EIO;
 
 	product_family = get_unaligned_le16(&resp_data[7]);
-	if ((product_family & GEN5_PRODUCT_FAMILY_MASK) !=
-		GEN5_PRODUCT_FAMILY_TRACKPAD)
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
 		return -EINVAL;
 
-	cyapa->fw_maj_ver = resp_data[15];
-	cyapa->fw_min_ver = resp_data[16];
+	cyapa->platform_ver = (resp_data[49] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+	if (cyapa->gen == CYAPA_GEN5 && cyapa->platform_ver < 2) {
+		/* Gen5 firmware that does not support proximity. */
+		cyapa->fw_maj_ver = resp_data[15];
+		cyapa->fw_min_ver = resp_data[16];
+	} else {
+		cyapa->fw_maj_ver = resp_data[9];
+		cyapa->fw_min_ver = resp_data[10];
+	}
 
 	cyapa->electrodes_x = resp_data[52];
 	cyapa->electrodes_y = resp_data[53];
@@ -2472,9 +2496,9 @@
 
 	switch (cyapa->state) {
 	case CYAPA_STATE_GEN5_BL:
-		error = cyapa_gen5_bl_exit(cyapa);
+		error = cyapa_pip_bl_exit(cyapa);
 		if (error) {
-			/* Rry to update trackpad product information. */
+			/* Try to update trackpad product information. */
 			cyapa_gen5_bl_query_data(cyapa);
 			goto out;
 		}
@@ -2486,14 +2510,23 @@
 		 * If trackpad device in deep sleep mode,
 		 * the app command will fail.
 		 * So always try to reset trackpad device to full active when
-		 * the device state is requeried.
+		 * the device state is required.
 		 */
 		error = cyapa_gen5_set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error)
 			dev_warn(dev, "%s: failed to set power active mode.\n",
 				__func__);
 
+		/* By default, the trackpad proximity function is enabled. */
+		if (cyapa->platform_ver >= 2) {
+			error = cyapa_pip_set_proximity(cyapa, true);
+			if (error)
+				dev_warn(dev,
+					"%s: failed to enable proximity.\n",
+					__func__);
+		}
+
 		/* Get trackpad product information. */
 		error = cyapa_gen5_get_query_data(cyapa);
 		if (error)
@@ -2518,14 +2551,14 @@
  * Return false, do not continue process
  * Return true, continue process.
  */
-static bool cyapa_gen5_irq_cmd_handler(struct cyapa *cyapa)
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 
-	if (atomic_read(&gen5_pip->cmd_issued)) {
+	if (atomic_read(&pip->cmd_issued)) {
 		/* Polling command response data. */
-		if (gen5_pip->is_irq_mode == false)
+		if (pip->is_irq_mode == false)
 			return false;
 
 		/*
@@ -2533,59 +2566,64 @@
 		 * these output data may caused by user put finger on
 		 * trackpad when host waiting the command response.
 		 */
-		cyapa_i2c_pip_read(cyapa, gen5_pip->irq_cmd_buf,
-			GEN5_RESP_LENGTH_SIZE);
-		length = get_unaligned_le16(gen5_pip->irq_cmd_buf);
-		length = (length <= GEN5_RESP_LENGTH_SIZE) ?
-				GEN5_RESP_LENGTH_SIZE : length;
-		if (length > GEN5_RESP_LENGTH_SIZE)
+		cyapa_i2c_pip_read(cyapa, pip->irq_cmd_buf,
+			PIP_RESP_LENGTH_SIZE);
+		length = get_unaligned_le16(pip->irq_cmd_buf);
+		length = (length <= PIP_RESP_LENGTH_SIZE) ?
+				PIP_RESP_LENGTH_SIZE : length;
+		if (length > PIP_RESP_LENGTH_SIZE)
 			cyapa_i2c_pip_read(cyapa,
-				gen5_pip->irq_cmd_buf, length);
-
-		if (!(gen5_pip->resp_sort_func &&
-			gen5_pip->resp_sort_func(cyapa,
-				gen5_pip->irq_cmd_buf, length))) {
+				pip->irq_cmd_buf, length);
+		if (!(pip->resp_sort_func &&
+			pip->resp_sort_func(cyapa,
+				pip->irq_cmd_buf, length))) {
 			/*
-			 * Work around the Gen5 V1 firmware
-			 * that does not assert interrupt signalling
-			 * that command response is ready if user
-			 * keeps touching the trackpad while command
-			 * is sent to the device.
+			 * Cover the Gen5 V1 firmware issue.
+			 * The issue is no interrupt would be asserted from
+			 * trackpad device to host for the command response
+			 * ready event. Because when there was a finger touch
+			 * on trackpad device, and the firmware output queue
+			 * won't be empty (always with touch report data), so
+			 * the interrupt signal won't be asserted again until
+			 * the output queue was previous emptied.
+			 * This issue would happen in the scenario that
+			 * user always has his/her fingers touched on the
+			 * trackpad device during system booting/rebooting.
 			 */
 			length = 0;
-			if (gen5_pip->resp_len)
-				length = *gen5_pip->resp_len;
+			if (pip->resp_len)
+				length = *pip->resp_len;
 			cyapa_empty_pip_output_data(cyapa,
-					gen5_pip->resp_data,
+					pip->resp_data,
 					&length,
-					gen5_pip->resp_sort_func);
-			if (gen5_pip->resp_len && length != 0) {
-				*gen5_pip->resp_len = length;
-				atomic_dec(&gen5_pip->cmd_issued);
-				complete(&gen5_pip->cmd_ready);
+					pip->resp_sort_func);
+			if (pip->resp_len && length != 0) {
+				*pip->resp_len = length;
+				atomic_dec(&pip->cmd_issued);
+				complete(&pip->cmd_ready);
 			}
 			return false;
 		}
 
-		if (gen5_pip->resp_data && gen5_pip->resp_len) {
-			*gen5_pip->resp_len = (*gen5_pip->resp_len < length) ?
-				*gen5_pip->resp_len : length;
-			memcpy(gen5_pip->resp_data, gen5_pip->irq_cmd_buf,
-				*gen5_pip->resp_len);
+		if (pip->resp_data && pip->resp_len) {
+			*pip->resp_len = (*pip->resp_len < length) ?
+				*pip->resp_len : length;
+			memcpy(pip->resp_data, pip->irq_cmd_buf,
+				*pip->resp_len);
 		}
-		atomic_dec(&gen5_pip->cmd_issued);
-		complete(&gen5_pip->cmd_ready);
+		atomic_dec(&pip->cmd_issued);
+		complete(&pip->cmd_ready);
 		return false;
 	}
 
 	return true;
 }
 
-static void cyapa_gen5_report_buttons(struct cyapa *cyapa,
-		const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_buttons(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
-	u8 buttons = report_data->report_head[GEN5_BUTTONS_OFFSET];
+	u8 buttons = report_data->report_head[PIP_BUTTONS_OFFSET];
 
 	buttons = (buttons << CAPABILITY_BTN_SHIFT) & CAPABILITY_BTN_MASK;
 
@@ -2605,12 +2643,23 @@
 	input_sync(input);
 }
 
-static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
-		const struct cyapa_gen5_touch_record *touch)
+static void cyapa_pip_report_proximity(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
-	u8 event_id = GEN5_GET_EVENT_ID(touch->touch_tip_event_id);
-	int slot = GEN5_GET_TOUCH_ID(touch->touch_tip_event_id);
+	u8 distance = report_data->report_head[PIP_PROXIMITY_DISTANCE_OFFSET] &
+			PIP_PROXIMITY_DISTANCE_MASK;
+
+	input_report_abs(input, ABS_DISTANCE, distance);
+	input_sync(input);
+}
+
+static void cyapa_pip_report_slot_data(struct cyapa *cyapa,
+		const struct cyapa_pip_touch_record *touch)
+{
+	struct input_dev *input = cyapa->input;
+	u8 event_id = PIP_GET_EVENT_ID(touch->touch_tip_event_id);
+	int slot = PIP_GET_TOUCH_ID(touch->touch_tip_event_id);
 	int x, y;
 
 	if (event_id == RECORD_EVENT_LIFTOFF)
@@ -2621,11 +2670,12 @@
 	x = (touch->x_hi << 8) | touch->x_lo;
 	if (cyapa->x_origin)
 		x = cyapa->max_abs_x - x;
-	input_report_abs(input, ABS_MT_POSITION_X, x);
 	y = (touch->y_hi << 8) | touch->y_lo;
 	if (cyapa->y_origin)
 		y = cyapa->max_abs_y - y;
+	input_report_abs(input, ABS_MT_POSITION_X, x);
 	input_report_abs(input, ABS_MT_POSITION_Y, y);
+	input_report_abs(input, ABS_DISTANCE, 0);
 	input_report_abs(input, ABS_MT_PRESSURE,
 		touch->z);
 	input_report_abs(input, ABS_MT_TOUCH_MAJOR,
@@ -2642,50 +2692,49 @@
 		touch->orientation);
 }
 
-static void cyapa_gen5_report_touches(struct cyapa *cyapa,
-		const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_touches(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
 	unsigned int touch_num;
 	int i;
 
-	touch_num = report_data->report_head[GEN5_NUMBER_OF_TOUCH_OFFSET] &
-			GEN5_NUMBER_OF_TOUCH_MASK;
+	touch_num = report_data->report_head[PIP_NUMBER_OF_TOUCH_OFFSET] &
+			PIP_NUMBER_OF_TOUCH_MASK;
 
 	for (i = 0; i < touch_num; i++)
-		cyapa_gen5_report_slot_data(cyapa,
+		cyapa_pip_report_slot_data(cyapa,
 			&report_data->touch_records[i]);
 
 	input_mt_sync_frame(input);
 	input_sync(input);
 }
 
-static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
+int cyapa_pip_irq_handler(struct cyapa *cyapa)
 {
 	struct device *dev = &cyapa->client->dev;
-	struct cyapa_gen5_report_data report_data;
-	int ret;
-	u8 report_id;
+	struct cyapa_pip_report_data report_data;
 	unsigned int report_len;
+	u8 report_id;
+	int ret;
 
-	if (cyapa->gen != CYAPA_GEN5 ||
-		cyapa->state != CYAPA_STATE_GEN5_APP) {
+	if (!cyapa_is_pip_app_mode(cyapa)) {
 		dev_err(dev, "invalid device state, gen=%d, state=0x%02x\n",
 			cyapa->gen, cyapa->state);
 		return -EINVAL;
 	}
 
 	ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data,
-			GEN5_RESP_LENGTH_SIZE);
-	if (ret != GEN5_RESP_LENGTH_SIZE) {
+			PIP_RESP_LENGTH_SIZE);
+	if (ret != PIP_RESP_LENGTH_SIZE) {
 		dev_err(dev, "failed to read length bytes, (%d)\n", ret);
 		return -EINVAL;
 	}
 
 	report_len = get_unaligned_le16(
-			&report_data.report_head[GEN5_RESP_LENGTH_OFFSET]);
-	if (report_len < GEN5_RESP_LENGTH_SIZE) {
-		/* Invliad length or internal reset happened. */
+			&report_data.report_head[PIP_RESP_LENGTH_OFFSET]);
+	if (report_len < PIP_RESP_LENGTH_SIZE) {
+		/* Invalid length or internal reset happened. */
 		dev_err(dev, "invalid report_len=%d. bytes: %02x %02x\n",
 			report_len, report_data.report_head[0],
 			report_data.report_head[1]);
@@ -2693,7 +2742,7 @@
 	}
 
 	/* Idle, no data for report. */
-	if (report_len == GEN5_RESP_LENGTH_SIZE)
+	if (report_len == PIP_RESP_LENGTH_SIZE)
 		return 0;
 
 	ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data, report_len);
@@ -2703,70 +2752,92 @@
 		return -EINVAL;
 	}
 
-	report_id = report_data.report_head[GEN5_RESP_REPORT_ID_OFFSET];
-	if (report_id == GEN5_WAKEUP_EVENT_REPORT_ID &&
-			report_len == GEN5_WAKEUP_EVENT_SIZE) {
+	report_id = report_data.report_head[PIP_RESP_REPORT_ID_OFFSET];
+	if (report_id == PIP_WAKEUP_EVENT_REPORT_ID &&
+			report_len == PIP_WAKEUP_EVENT_SIZE) {
 		/*
 		 * Device wake event from deep sleep mode for touch.
 		 * This interrupt event is used to wake system up.
+		 *
+		 * Note:
+		 * It will introduce about 20~40 ms additional delay
+		 * time in receiving for first valid touch report data.
+		 * The time is used to execute device runtime resume
+		 * process.
 		 */
+		pm_runtime_get_sync(dev);
+		pm_runtime_mark_last_busy(dev);
+		pm_runtime_put_sync_autosuspend(dev);
 		return 0;
-	} else if (report_id != GEN5_TOUCH_REPORT_ID &&
-			report_id != GEN5_BTN_REPORT_ID &&
+	} else if (report_id != PIP_TOUCH_REPORT_ID &&
+			report_id != PIP_BTN_REPORT_ID &&
 			report_id != GEN5_OLD_PUSH_BTN_REPORT_ID &&
-			report_id != GEN5_PUSH_BTN_REPORT_ID) {
+			report_id != PIP_PUSH_BTN_REPORT_ID &&
+			report_id != PIP_PROXIMITY_REPORT_ID) {
 		/* Running in BL mode or unknown response data read. */
 		dev_err(dev, "invalid report_id=0x%02x\n", report_id);
 		return -EINVAL;
 	}
 
-	if (report_id == GEN5_TOUCH_REPORT_ID &&
-		(report_len < GEN5_TOUCH_REPORT_HEAD_SIZE ||
-			report_len > GEN5_TOUCH_REPORT_MAX_SIZE)) {
+	if (report_id == PIP_TOUCH_REPORT_ID &&
+		(report_len < PIP_TOUCH_REPORT_HEAD_SIZE ||
+			report_len > PIP_TOUCH_REPORT_MAX_SIZE)) {
 		/* Invalid report data length for finger packet. */
 		dev_err(dev, "invalid touch packet length=%d\n", report_len);
 		return 0;
 	}
 
-	if ((report_id == GEN5_BTN_REPORT_ID ||
+	if ((report_id == PIP_BTN_REPORT_ID ||
 			report_id == GEN5_OLD_PUSH_BTN_REPORT_ID ||
-			report_id == GEN5_PUSH_BTN_REPORT_ID) &&
-		(report_len < GEN5_BTN_REPORT_HEAD_SIZE ||
-			report_len > GEN5_BTN_REPORT_MAX_SIZE)) {
+			report_id == PIP_PUSH_BTN_REPORT_ID) &&
+		(report_len < PIP_BTN_REPORT_HEAD_SIZE ||
+			report_len > PIP_BTN_REPORT_MAX_SIZE)) {
 		/* Invalid report data length of button packet. */
 		dev_err(dev, "invalid button packet length=%d\n", report_len);
 		return 0;
 	}
 
-	if (report_id == GEN5_TOUCH_REPORT_ID)
-		cyapa_gen5_report_touches(cyapa, &report_data);
+	if (report_id == PIP_PROXIMITY_REPORT_ID &&
+			report_len != PIP_PROXIMITY_REPORT_SIZE) {
+		/* Invalid report data length of proximity packet. */
+		dev_err(dev, "invalid proximity data, length=%d\n", report_len);
+		return 0;
+	}
+
+	if (report_id == PIP_TOUCH_REPORT_ID)
+		cyapa_pip_report_touches(cyapa, &report_data);
+	else if (report_id == PIP_PROXIMITY_REPORT_ID)
+		cyapa_pip_report_proximity(cyapa, &report_data);
 	else
-		cyapa_gen5_report_buttons(cyapa, &report_data);
+		cyapa_pip_report_buttons(cyapa, &report_data);
 
 	return 0;
 }
 
-static int cyapa_gen5_bl_activate(struct cyapa *cyapa) { return 0; }
-static int cyapa_gen5_bl_deactivate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_activate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa) { return 0; }
+
 
 const struct cyapa_dev_ops cyapa_gen5_ops = {
-	.check_fw = cyapa_gen5_check_fw,
-	.bl_enter = cyapa_gen5_bl_enter,
-	.bl_initiate = cyapa_gen5_bl_initiate,
-	.update_fw = cyapa_gen5_do_fw_update,
-	.bl_activate = cyapa_gen5_bl_activate,
-	.bl_deactivate = cyapa_gen5_bl_deactivate,
+	.check_fw = cyapa_pip_check_fw,
+	.bl_enter = cyapa_pip_bl_enter,
+	.bl_initiate = cyapa_pip_bl_initiate,
+	.update_fw = cyapa_pip_do_fw_update,
+	.bl_activate = cyapa_pip_bl_activate,
+	.bl_deactivate = cyapa_pip_bl_deactivate,
 
 	.show_baseline = cyapa_gen5_show_baseline,
-	.calibrate_store = cyapa_gen5_do_calibrate,
+	.calibrate_store = cyapa_pip_do_calibrate,
 
-	.initialize = cyapa_gen5_initialize,
+	.initialize = cyapa_pip_cmd_state_initialize,
 
 	.state_parse = cyapa_gen5_state_parse,
 	.operational_check = cyapa_gen5_do_operational_check,
 
-	.irq_handler = cyapa_gen5_irq_handler,
-	.irq_cmd_handler = cyapa_gen5_irq_cmd_handler,
+	.irq_handler = cyapa_pip_irq_handler,
+	.irq_cmd_handler = cyapa_pip_irq_cmd_handler,
 	.sort_empty_output_data = cyapa_empty_pip_output_data,
 	.set_power_mode = cyapa_gen5_set_power_mode,
+
+	.set_proximity = cyapa_pip_set_proximity,
 };
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
new file mode 100644
index 0000000..5f19107
--- /dev/null
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -0,0 +1,749 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ *
+ * Copyright (C) 2015 Cypress Semiconductor, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/crc-itu-t.h>
+#include "cyapa.h"
+
+
+#define GEN6_ENABLE_CMD_IRQ	0x41
+#define GEN6_DISABLE_CMD_IRQ	0x42
+#define GEN6_ENABLE_DEV_IRQ	0x43
+#define GEN6_DISABLE_DEV_IRQ	0x44
+
+#define GEN6_POWER_MODE_ACTIVE		0x01
+#define GEN6_POWER_MODE_LP_MODE1	0x02
+#define GEN6_POWER_MODE_LP_MODE2	0x03
+#define GEN6_POWER_MODE_BTN_ONLY	0x04
+
+#define GEN6_SET_POWER_MODE_INTERVAL	0x47
+#define GEN6_GET_POWER_MODE_INTERVAL	0x48
+
+#define GEN6_MAX_RX_NUM 14
+#define GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC	0x00
+#define GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM		0x12
+
+
+struct pip_app_cmd_head {
+	__le16 addr;
+	__le16 length;
+	u8 report_id;
+	u8 resv;  /* Reserved, must be 0 */
+	u8 cmd_code;  /* bit7: resv, set to 0; bit6~0: command code.*/
+} __packed;
+
+struct pip_app_resp_head {
+	__le16 length;
+	u8 report_id;
+	u8 resv;  /* Reserved, must be 0 */
+	u8 cmd_code;  /* bit7: TGL; bit6~0: command code.*/
+	/*
+	 * The value of data_status can be the first byte of data or
+	 * the command status or the unsupported command code depending on the
+	 * requested command code.
+	*/
+	u8 data_status;
+} __packed;
+
+struct pip_fixed_info {
+	u8 silicon_id_high;
+	u8 silicon_id_low;
+	u8 family_id;
+};
+
+static u8 pip_get_bl_info[] = {
+	0x04, 0x00, 0x0B, 0x00, 0x40, 0x00, 0x01, 0x38,
+	0x00, 0x00, 0x70, 0x9E, 0x17
+};
+
+static bool cyapa_sort_pip_hid_descriptor_data(struct cyapa *cyapa,
+		u8 *buf, int len)
+{
+	if (len != PIP_HID_DESCRIPTOR_SIZE)
+		return false;
+
+	if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID ||
+		buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+		return true;
+
+	return false;
+}
+
+static int cyapa_get_pip_fixed_info(struct cyapa *cyapa,
+		struct pip_fixed_info *pip_info, bool is_bootloader)
+{
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+	int resp_len;
+	u16 product_family;
+	int error;
+
+	if (is_bootloader) {
+		/* Read Bootloader Information to determine Gen5 or Gen6. */
+		resp_len = sizeof(resp_data);
+		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+				pip_get_bl_info, sizeof(pip_get_bl_info),
+				resp_data, &resp_len,
+				2000, cyapa_sort_tsg_pip_bl_resp_data,
+				false);
+		if (error || resp_len < PIP_BL_GET_INFO_RESP_LENGTH)
+			return error ? error : -EIO;
+
+		pip_info->family_id = resp_data[8];
+		pip_info->silicon_id_low = resp_data[10];
+		pip_info->silicon_id_high = resp_data[11];
+
+		return 0;
+	}
+
+	/* Get App System Information to determine Gen5 or Gen6. */
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			2000, cyapa_pip_sort_system_info_data, false);
+	if (error || resp_len < PIP_READ_SYS_INFO_RESP_LENGTH)
+		return error ? error : -EIO;
+
+	product_family = get_unaligned_le16(&resp_data[7]);
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
+		return -EINVAL;
+
+	pip_info->family_id = resp_data[19];
+	pip_info->silicon_id_low = resp_data[21];
+	pip_info->silicon_id_high = resp_data[22];
+
+	return 0;
+
+}
+
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
+{
+	u8 cmd[] = { 0x01, 0x00};
+	struct pip_fixed_info pip_info;
+	u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
+	int resp_len;
+	bool is_bootloader;
+	int error;
+
+	cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+	/* Try to wake from it deep sleep state if it is. */
+	cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+
+	/* Empty the buffer queue to get fresh data with later commands. */
+	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+	/*
+	 * Read description info from trackpad device to determine running in
+	 * APP mode or Bootloader mode.
+	 */
+	resp_len = PIP_HID_DESCRIPTOR_SIZE;
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			300,
+			cyapa_sort_pip_hid_descriptor_data,
+			false);
+	if (error)
+		return error;
+
+	if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+		is_bootloader = true;
+	else if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID)
+		is_bootloader = false;
+	else
+		return -EAGAIN;
+
+	/* Get PIP fixed information to determine Gen5 or Gen6. */
+	memset(&pip_info, 0, sizeof(struct pip_fixed_info));
+	error = cyapa_get_pip_fixed_info(cyapa, &pip_info, is_bootloader);
+	if (error)
+		return error;
+
+	if (pip_info.family_id == 0x9B && pip_info.silicon_id_high == 0x0B) {
+		cyapa->gen = CYAPA_GEN6;
+		cyapa->state = is_bootloader ? CYAPA_STATE_GEN6_BL
+					     : CYAPA_STATE_GEN6_APP;
+	} else if (pip_info.family_id == 0x91 &&
+		   pip_info.silicon_id_high == 0x02) {
+		cyapa->gen = CYAPA_GEN5;
+		cyapa->state = is_bootloader ? CYAPA_STATE_GEN5_BL
+					     : CYAPA_STATE_GEN5_APP;
+	}
+
+	return 0;
+}
+
+static int cyapa_gen6_read_sys_info(struct cyapa *cyapa)
+{
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+	int resp_len;
+	u16 product_family;
+	u8 rotat_align;
+	int error;
+
+	/* Get App System Information to determine Gen5 or Gen6. */
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			2000, cyapa_pip_sort_system_info_data, false);
+	if (error || resp_len < sizeof(resp_data))
+		return error ? error : -EIO;
+
+	product_family = get_unaligned_le16(&resp_data[7]);
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
+		return -EINVAL;
+
+	cyapa->platform_ver = (resp_data[67] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+	cyapa->fw_maj_ver = resp_data[9];
+	cyapa->fw_min_ver = resp_data[10];
+
+	cyapa->electrodes_x = resp_data[33];
+	cyapa->electrodes_y = resp_data[34];
+
+	cyapa->physical_size_x =  get_unaligned_le16(&resp_data[35]) / 100;
+	cyapa->physical_size_y = get_unaligned_le16(&resp_data[37]) / 100;
+
+	cyapa->max_abs_x = get_unaligned_le16(&resp_data[39]);
+	cyapa->max_abs_y = get_unaligned_le16(&resp_data[41]);
+
+	cyapa->max_z = get_unaligned_le16(&resp_data[43]);
+
+	cyapa->x_origin = resp_data[45] & 0x01;
+	cyapa->y_origin = resp_data[46] & 0x01;
+
+	cyapa->btn_capability = (resp_data[70] << 3) & CAPABILITY_BTN_MASK;
+
+	memcpy(&cyapa->product_id[0], &resp_data[51], 5);
+	cyapa->product_id[5] = '-';
+	memcpy(&cyapa->product_id[6], &resp_data[56], 6);
+	cyapa->product_id[12] = '-';
+	memcpy(&cyapa->product_id[13], &resp_data[62], 2);
+	cyapa->product_id[15] = '\0';
+
+	rotat_align = resp_data[68];
+	if (rotat_align) {
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+	} else {
+		cyapa->electrodes_rx = cyapa->electrodes_x;
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+	}
+	cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
+
+	if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
+		!cyapa->physical_size_x || !cyapa->physical_size_y ||
+		!cyapa->max_abs_x || !cyapa->max_abs_y || !cyapa->max_z)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cyapa_gen6_bl_read_app_info(struct cyapa *cyapa)
+{
+	u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_bl_resp_data, false);
+	if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+		!PIP_CMD_COMPLETE_SUCCESS(resp_data))
+		return error ? error : -EIO;
+
+	cyapa->fw_maj_ver = resp_data[8];
+	cyapa->fw_min_ver = resp_data[9];
+
+	cyapa->platform_ver = (resp_data[12] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+
+	memcpy(&cyapa->product_id[0], &resp_data[13], 5);
+	cyapa->product_id[5] = '-';
+	memcpy(&cyapa->product_id[6], &resp_data[18], 6);
+	cyapa->product_id[12] = '-';
+	memcpy(&cyapa->product_id[13], &resp_data[24], 2);
+	cyapa->product_id[15] = '\0';
+
+	return 0;
+
+}
+
+static int cyapa_gen6_config_dev_irq(struct cyapa *cyapa, u8 cmd_code)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, cmd_code };
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data)
+			)
+		return error < 0 ? error : -EINVAL;
+
+	return 0;
+}
+
+static int cyapa_gen6_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	int error;
+
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+	error = cyapa_pip_set_proximity(cyapa, enable);
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+
+	return error;
+}
+
+static int cyapa_gen6_change_power_state(struct cyapa *cyapa, u8 power_mode)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, 0x46, power_mode };
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x46))
+		return error < 0 ? error : -EINVAL;
+
+	/* New power state applied in device not match the set power state. */
+	if (resp_data[5] != power_mode)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int cyapa_gen6_set_interval_setting(struct cyapa *cyapa,
+		struct gen6_interval_setting *interval_setting)
+{
+	struct gen6_set_interval_cmd {
+		__le16 addr;
+		__le16 length;
+		u8 report_id;
+		u8 rsvd;  /* Reserved, must be 0 */
+		u8 cmd_code;
+		__le16 active_interval;
+		__le16 lp1_interval;
+		__le16 lp2_interval;
+	} __packed set_interval_cmd;
+	u8 resp_data[11];
+	int resp_len;
+	int error;
+
+	memset(&set_interval_cmd, 0, sizeof(set_interval_cmd));
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &set_interval_cmd.addr);
+	put_unaligned_le16(sizeof(set_interval_cmd) - 2,
+			   &set_interval_cmd.length);
+	set_interval_cmd.report_id = PIP_APP_CMD_REPORT_ID;
+	set_interval_cmd.cmd_code = GEN6_SET_POWER_MODE_INTERVAL;
+	put_unaligned_le16(interval_setting->active_interval,
+			   &set_interval_cmd.active_interval);
+	put_unaligned_le16(interval_setting->lp1_interval,
+			   &set_interval_cmd.lp1_interval);
+	put_unaligned_le16(interval_setting->lp2_interval,
+			   &set_interval_cmd.lp2_interval);
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			(u8 *)&set_interval_cmd, sizeof(set_interval_cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error ||
+		!VALID_CMD_RESP_HEADER(resp_data, GEN6_SET_POWER_MODE_INTERVAL))
+		return error < 0 ? error : -EINVAL;
+
+	/* Get the real set intervals from response. */
+	interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+	interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+	interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+	return 0;
+}
+
+static int cyapa_gen6_get_interval_setting(struct cyapa *cyapa,
+		struct gen6_interval_setting *interval_setting)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00,
+		     GEN6_GET_POWER_MODE_INTERVAL };
+	u8 resp_data[11];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error ||
+		!VALID_CMD_RESP_HEADER(resp_data, GEN6_GET_POWER_MODE_INTERVAL))
+		return error < 0 ? error : -EINVAL;
+
+	interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+	interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+	interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+	return 0;
+}
+
+static int cyapa_gen6_deep_sleep(struct cyapa *cyapa, u8 state)
+{
+	u8 ping[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x00 };
+
+	if (state == PIP_DEEP_SLEEP_STATE_ON)
+		/*
+		 * Send ping command to notify device prepare for wake up
+		 * when it's in deep sleep mode. At this time, device will
+		 * response nothing except an I2C NAK.
+		 */
+		cyapa_i2c_pip_write(cyapa, ping, sizeof(ping));
+
+	return cyapa_pip_deep_sleep(cyapa, state);
+}
+
+static int cyapa_gen6_set_power_mode(struct cyapa *cyapa,
+		u8 power_mode, u16 sleep_time, bool is_suspend)
+{
+	struct device *dev = &cyapa->client->dev;
+	struct gen6_interval_setting *interval_setting =
+			&cyapa->gen6_interval_setting;
+	u8 lp_mode;
+	int error;
+
+	if (cyapa->state != CYAPA_STATE_GEN6_APP)
+		return 0;
+
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+		/*
+		 * Assume TP in deep sleep mode when driver is loaded,
+		 * avoid driver unload and reload command IO issue caused by TP
+		 * has been set into deep sleep mode when unloading.
+		 */
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+	}
+
+	if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+		PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+		PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+		if (power_mode == PWR_MODE_OFF ||
+			power_mode == PWR_MODE_FULL_ACTIVE ||
+			power_mode == PWR_MODE_BTN_ONLY ||
+			PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+			/* Has in correct power mode state, early return. */
+			return 0;
+		}
+	}
+
+	if (power_mode == PWR_MODE_OFF) {
+		cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+		error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
+		if (error) {
+			dev_err(dev, "enter deep sleep fail: %d\n", error);
+			return error;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		return 0;
+	}
+
+	/*
+	 * When trackpad in power off mode, it cannot change to other power
+	 * state directly, must be wake up from sleep firstly, then
+	 * continue to do next power sate change.
+	 */
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+		error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+		if (error) {
+			dev_err(dev, "deep sleep wake fail: %d\n", error);
+			return error;
+		}
+	}
+
+	/*
+	 * Disable device assert interrupts for command response to avoid
+	 * disturbing system suspending or hibernating process.
+	 */
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+	if (power_mode == PWR_MODE_FULL_ACTIVE) {
+		error = cyapa_gen6_change_power_state(cyapa,
+				GEN6_POWER_MODE_ACTIVE);
+		if (error) {
+			dev_err(dev, "change to active fail: %d\n", error);
+			goto out;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+
+		/* Sync the interval setting from device. */
+		cyapa_gen6_get_interval_setting(cyapa, interval_setting);
+
+	} else if (power_mode == PWR_MODE_BTN_ONLY) {
+		error = cyapa_gen6_change_power_state(cyapa,
+				GEN6_POWER_MODE_BTN_ONLY);
+		if (error) {
+			dev_err(dev, "fail to button only mode: %d\n", error);
+			goto out;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+	} else {
+		/*
+		 * Gen6 internally supports to 2 low power scan interval time,
+		 * so can help to switch power mode quickly.
+		 * such as runtime suspend and system suspend.
+		 */
+		if (interval_setting->lp1_interval == sleep_time) {
+			lp_mode = GEN6_POWER_MODE_LP_MODE1;
+		} else if (interval_setting->lp2_interval == sleep_time) {
+			lp_mode = GEN6_POWER_MODE_LP_MODE2;
+		} else {
+			if (interval_setting->lp1_interval == 0) {
+				interval_setting->lp1_interval = sleep_time;
+				lp_mode = GEN6_POWER_MODE_LP_MODE1;
+			} else {
+				interval_setting->lp2_interval = sleep_time;
+				lp_mode = GEN6_POWER_MODE_LP_MODE2;
+			}
+			cyapa_gen6_set_interval_setting(cyapa,
+							interval_setting);
+		}
+
+		error = cyapa_gen6_change_power_state(cyapa, lp_mode);
+		if (error) {
+			dev_err(dev, "set power state to 0x%02x failed: %d\n",
+				lp_mode, error);
+			goto out;
+		}
+
+		PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+		PIP_DEV_SET_PWR_STATE(cyapa,
+			cyapa_sleep_time_to_pwr_cmd(sleep_time));
+	}
+
+out:
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+	return error;
+}
+
+static int cyapa_gen6_initialize(struct cyapa *cyapa)
+{
+	return 0;
+}
+
+static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
+		u16 read_offset, u16 read_len, u8 data_id,
+		u8 *data, int *data_buf_lens)
+{
+	struct retrieve_data_struct_cmd {
+		struct pip_app_cmd_head head;
+		__le16 read_offset;
+		__le16 read_length;
+		u8 data_id;
+	} __packed cmd;
+	u8 resp_data[GEN6_MAX_RX_NUM + 10];
+	int resp_len;
+	int error;
+
+	memset(&cmd, 0, sizeof(cmd));
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
+	put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
+	cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
+	cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
+	put_unaligned_le16(read_offset, &cmd.read_offset);
+	put_unaligned_le16(read_len, &cmd.read_length);
+	cmd.data_id = data_id;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+				(u8 *)&cmd, sizeof(cmd),
+				resp_data, &resp_len,
+				500, cyapa_sort_tsg_pip_app_resp_data,
+				true);
+	if (error || !PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
+		resp_data[6] != data_id ||
+		!VALID_CMD_RESP_HEADER(resp_data, PIP_RETRIEVE_DATA_STRUCTURE))
+		return (error < 0) ? error : -EAGAIN;
+
+	read_len = get_unaligned_le16(&resp_data[7]);
+	if (*data_buf_lens < read_len) {
+		*data_buf_lens = read_len;
+		return -ENOBUFS;
+	}
+
+	memcpy(data, &resp_data[10], read_len);
+	*data_buf_lens = read_len;
+	return 0;
+}
+
+static ssize_t cyapa_gen6_show_baseline(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyapa *cyapa = dev_get_drvdata(dev);
+	u8 data[GEN6_MAX_RX_NUM];
+	int data_len;
+	int size = 0;
+	int i;
+	int error;
+	int resume_error;
+
+	if (!cyapa_is_pip_app_mode(cyapa))
+		return -EBUSY;
+
+	/* 1. Suspend Scanning*/
+	error = cyapa_pip_suspend_scanning(cyapa);
+	if (error)
+		return error;
+
+	/* 2. IDAC and RX Attenuator Calibration Data (Center Frequency). */
+	data_len = sizeof(data);
+	error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+			GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC,
+			data, &data_len);
+	if (error)
+		goto resume_scanning;
+
+	size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d ",
+			data[0],  /* RX Attenuator Mutual */
+			data[1],  /* IDAC Mutual */
+			data[2],  /* RX Attenuator Self RX */
+			data[3],  /* IDAC Self RX */
+			data[4],  /* RX Attenuator Self TX */
+			data[5]	  /* IDAC Self TX */
+			);
+
+	/* 3. Read Attenuator Trim. */
+	data_len = sizeof(data);
+	error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+			GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM,
+			data, &data_len);
+	if (error)
+		goto resume_scanning;
+
+	/* set attenuator trim values. */
+	for (i = 0; i < data_len; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,	"%d ", data[i]);
+	size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+resume_scanning:
+	/* 4. Resume Scanning*/
+	resume_error = cyapa_pip_resume_scanning(cyapa);
+	if (resume_error || error) {
+		memset(buf, 0, PAGE_SIZE);
+		return resume_error ? resume_error : error;
+	}
+
+	return size;
+}
+
+static int cyapa_gen6_operational_check(struct cyapa *cyapa)
+{
+	struct device *dev = &cyapa->client->dev;
+	int error;
+
+	if (cyapa->gen != CYAPA_GEN6)
+		return -ENODEV;
+
+	switch (cyapa->state) {
+	case CYAPA_STATE_GEN6_BL:
+		error = cyapa_pip_bl_exit(cyapa);
+		if (error) {
+			/* Try to update trackpad product information. */
+			cyapa_gen6_bl_read_app_info(cyapa);
+			goto out;
+		}
+
+		cyapa->state = CYAPA_STATE_GEN6_APP;
+
+	case CYAPA_STATE_GEN6_APP:
+		/*
+		 * If trackpad device in deep sleep mode,
+		 * the app command will fail.
+		 * So always try to reset trackpad device to full active when
+		 * the device state is required.
+		 */
+		error = cyapa_gen6_set_power_mode(cyapa,
+				PWR_MODE_FULL_ACTIVE, 0, false);
+		if (error)
+			dev_warn(dev, "%s: failed to set power active mode.\n",
+				__func__);
+
+		/* By default, the trackpad proximity function is enabled. */
+		error = cyapa_pip_set_proximity(cyapa, true);
+		if (error)
+			dev_warn(dev, "%s: failed to enable proximity.\n",
+				__func__);
+
+		/* Get trackpad product information. */
+		error = cyapa_gen6_read_sys_info(cyapa);
+		if (error)
+			goto out;
+		/* Only support product ID starting with CYTRA */
+		if (memcmp(cyapa->product_id, product_id,
+				strlen(product_id)) != 0) {
+			dev_err(dev, "%s: unknown product ID (%s)\n",
+				__func__, cyapa->product_id);
+			error = -EINVAL;
+		}
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+out:
+	return error;
+}
+
+const struct cyapa_dev_ops cyapa_gen6_ops = {
+	.check_fw = cyapa_pip_check_fw,
+	.bl_enter = cyapa_pip_bl_enter,
+	.bl_initiate = cyapa_pip_bl_initiate,
+	.update_fw = cyapa_pip_do_fw_update,
+	.bl_activate = cyapa_pip_bl_activate,
+	.bl_deactivate = cyapa_pip_bl_deactivate,
+
+	.show_baseline = cyapa_gen6_show_baseline,
+	.calibrate_store = cyapa_pip_do_calibrate,
+
+	.initialize = cyapa_gen6_initialize,
+
+	.state_parse = cyapa_pip_state_parse,
+	.operational_check = cyapa_gen6_operational_check,
+
+	.irq_handler = cyapa_pip_irq_handler,
+	.irq_cmd_handler = cyapa_pip_irq_cmd_handler,
+	.sort_empty_output_data = cyapa_empty_pip_output_data,
+	.set_power_mode = cyapa_gen6_set_power_mode,
+
+	.set_proximity = cyapa_gen6_set_proximity,
+};
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5b5f403..e2b7420 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,7 @@
  * Copyright (c) 2013 ELAN Microelectronics Corp.
  *
  * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.9
+ * Version: 1.6.0
  *
  * Based on cyapa driver:
  * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME		"elan_i2c"
-#define ELAN_DRIVER_VERSION	"1.5.9"
+#define ELAN_DRIVER_VERSION	"1.6.0"
 #define ETP_MAX_PRESSURE	255
 #define ETP_FWIDTH_REDUCE	90
 #define ETP_FINGER_WIDTH	15
@@ -84,7 +84,7 @@
 	int			pressure_adjustment;
 	u8			mode;
 	u8			ic_type;
-	u16			fw_vaildpage_count;
+	u16			fw_validpage_count;
 	u16			fw_signature_address;
 
 	bool			irq_wake;
@@ -94,25 +94,28 @@
 	bool			baseline_ready;
 };
 
-static int elan_get_fwinfo(u8 ic_type, u16 *vaildpage_count,
+static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
 			   u16 *signature_address)
 {
-	switch(ic_type) {
+	switch (iap_version) {
+	case 0x08:
+		*validpage_count = 512;
+		break;
 	case 0x09:
-		*vaildpage_count = 768;
+		*validpage_count = 768;
 		break;
 	case 0x0D:
-		*vaildpage_count = 896;
+		*validpage_count = 896;
 		break;
 	default:
 		/* unknown ic type clear value */
-		*vaildpage_count = 0;
+		*validpage_count = 0;
 		*signature_address = 0;
 		return -ENXIO;
 	}
 
 	*signature_address =
-		(*vaildpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
+		(*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
 
 	return 0;
 }
@@ -261,11 +264,11 @@
 	if (error)
 		return error;
 
-	error = elan_get_fwinfo(data->ic_type, &data->fw_vaildpage_count,
+	error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
 				&data->fw_signature_address);
 	if (error) {
 		dev_err(&data->client->dev,
-			"unknown ic type %d\n", data->ic_type);
+			"unknown iap version %d\n", data->iap_version);
 		return error;
 	}
 
@@ -353,7 +356,7 @@
 	iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
 
 	boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
-	for (i = boot_page_count; i < data->fw_vaildpage_count; i++) {
+	for (i = boot_page_count; i < data->fw_validpage_count; i++) {
 		u16 checksum = 0;
 		const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
 
@@ -1165,6 +1168,8 @@
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0000", 0 },
+	{ "ELAN0100", 0 },
+	{ "ELAN0600", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
@@ -1181,10 +1186,10 @@
 static struct i2c_driver elan_driver = {
 	.driver = {
 		.name	= DRIVER_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &elan_pm_ops,
 		.acpi_match_table = ACPI_PTR(elan_acpi_id),
 		.of_match_table = of_match_ptr(elan_of_match),
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 	.probe		= elan_probe,
 	.id_table	= elan_id,
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index ec347703..ad18dab 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1540,6 +1540,10 @@
 	if (error)
 		goto err_clear_drvdata;
 
+	/* give PT device some time to settle down before probing */
+	if (serio->id.type == SERIO_PS_PSTHRU)
+		usleep_range(10000, 15000);
+
 	if (psmouse_probe(psmouse) < 0) {
 		error = -ENODEV;
 		goto err_close_serio;
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index cc7e0d4..11c32ac 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -432,7 +432,7 @@
 static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
 				   const char *buf, size_t count)
 {
-	int reg, val;
+	unsigned int reg, val;
 	char *rest;
 	ssize_t retval;
 
@@ -440,7 +440,7 @@
 	if (rest == buf || *rest != ' ' || reg > 0xff)
 		return -EINVAL;
 
-	retval = kstrtoint(rest + 1, 16, &val);
+	retval = kstrtouint(rest + 1, 16, &val);
 	if (retval)
 		return retval;
 
@@ -476,9 +476,10 @@
 					const char *buf, size_t count)
 {
 	struct fsp_data *pad = psmouse->private;
-	int reg, val, err;
+	unsigned int reg, val;
+	int err;
 
-	err = kstrtoint(buf, 16, &reg);
+	err = kstrtouint(buf, 16, &reg);
 	if (err)
 		return err;
 
@@ -511,9 +512,10 @@
 static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
 					const char *buf, size_t count)
 {
-	int val, err;
+	unsigned int val;
+	int err;
 
-	err = kstrtoint(buf, 16, &val);
+	err = kstrtouint(buf, 16, &val);
 	if (err)
 		return err;
 
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb4..994ae78 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,14 +519,18 @@
 	struct synaptics_data *priv = psmouse->private;
 
 	priv->mode = 0;
-	if (priv->absolute_mode)
+
+	if (priv->absolute_mode) {
 		priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-	if (priv->disable_gesture)
+		if (SYN_CAP_EXTENDED(priv->capabilities))
+			priv->mode |= SYN_BIT_W_MODE;
+	}
+
+	if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
 		priv->mode |= SYN_BIT_DISABLE_GESTURE;
+
 	if (psmouse->rate >= 80)
 		priv->mode |= SYN_BIT_HIGH_RATE;
-	if (SYN_CAP_EXTENDED(priv->capabilities))
-		priv->mode |= SYN_BIT_W_MODE;
 
 	if (synaptics_mode_cmd(psmouse, priv->mode))
 		return -1;
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ffceedc..aa7c5da 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -655,7 +655,6 @@
 static struct i2c_driver synaptics_i2c_driver = {
 	.driver = {
 		.name	= DRIVER_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &synaptics_i2c_pm,
 	},
 
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 8b748d9..c6606ca 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -175,9 +175,9 @@
 	return 0;
 }
 
-static int amba_kmi_resume(struct amba_device *dev)
+static int __maybe_unused amba_kmi_resume(struct device *dev)
 {
-	struct amba_kmi_port *kmi = amba_get_drvdata(dev);
+	struct amba_kmi_port *kmi = dev_get_drvdata(dev);
 
 	/* kick the serio layer to rescan this port */
 	serio_reconnect(kmi->io);
@@ -185,6 +185,8 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume);
+
 static struct amba_id amba_kmi_idtable[] = {
 	{
 		.id	= 0x00041050,
@@ -199,11 +201,11 @@
 	.drv		= {
 		.name	= "kmi-pl050",
 		.owner	= THIS_MODULE,
+		.pm	= &amba_kmi_dev_pm_ops,
 	},
 	.id_table	= amba_kmi_idtable,
 	.probe		= amba_kmi_probe,
 	.remove		= amba_kmi_remove,
-	.resume		= amba_kmi_resume,
 };
 
 module_amba_driver(ambakmi_driver);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index cb5ece7..c9c98f0a 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -88,6 +88,10 @@
 static bool i8042_debug;
 module_param_named(debug, i8042_debug, bool, 0600);
 MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
+
+static bool i8042_unmask_kbd_data;
+module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
+MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
 #endif
 
 static bool i8042_bypass_aux_irq_test;
@@ -116,6 +120,7 @@
 	struct serio *serio;
 	int irq;
 	bool exists;
+	bool driver_bound;
 	signed char mux;
 };
 
@@ -133,6 +138,7 @@
 static bool i8042_aux_irq_registered;
 static unsigned char i8042_suppress_kbd_ack;
 static struct platform_device *i8042_platform_device;
+static struct notifier_block i8042_kbd_bind_notifier_block;
 
 static irqreturn_t i8042_interrupt(int irq, void *dev_id);
 static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
@@ -528,10 +534,10 @@
 	port = &i8042_ports[port_no];
 	serio = port->exists ? port->serio : NULL;
 
-	dbg("%02x <- i8042 (interrupt, %d, %d%s%s)\n",
-	    data, port_no, irq,
-	    dfl & SERIO_PARITY ? ", bad parity" : "",
-	    dfl & SERIO_TIMEOUT ? ", timeout" : "");
+	filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
+		   port_no, irq,
+		   dfl & SERIO_PARITY ? ", bad parity" : "",
+		   dfl & SERIO_TIMEOUT ? ", timeout" : "");
 
 	filtered = i8042_filter(data, str, serio);
 
@@ -1438,6 +1444,29 @@
 	return error;
 }
 
+static int i8042_kbd_bind_notifier(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct serio *serio = to_serio_port(dev);
+	struct i8042_port *port = serio->port_data;
+
+	if (serio != i8042_ports[I8042_KBD_PORT_NO].serio)
+		return 0;
+
+	switch (action) {
+	case BUS_NOTIFY_BOUND_DRIVER:
+		port->driver_bound = true;
+		break;
+
+	case BUS_NOTIFY_UNBIND_DRIVER:
+		port->driver_bound = false;
+		break;
+	}
+
+	return 0;
+}
+
 static int __init i8042_probe(struct platform_device *dev)
 {
 	int error;
@@ -1507,6 +1536,10 @@
 	.shutdown	= i8042_shutdown,
 };
 
+static struct notifier_block i8042_kbd_bind_notifier_block = {
+	.notifier_call = i8042_kbd_bind_notifier,
+};
+
 static int __init i8042_init(void)
 {
 	struct platform_device *pdev;
@@ -1528,6 +1561,7 @@
 		goto err_platform_exit;
 	}
 
+	bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
 	panic_blink = i8042_panic_blink;
 
 	return 0;
@@ -1543,6 +1577,7 @@
 	platform_driver_unregister(&i8042_driver);
 	i8042_platform_exit();
 
+	bus_unregister_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
 	panic_blink = NULL;
 }
 
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index fc080be..1db0a40 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -73,6 +73,17 @@
 			printk(KERN_DEBUG KBUILD_MODNAME ": [%d] " format,	\
 			       (int) (jiffies - i8042_start_time), ##arg);	\
 	} while (0)
+
+#define filter_dbg(filter, data, format, args...)		\
+	do {							\
+		if (!i8042_debug)				\
+			break;					\
+								\
+		if (!filter || i8042_unmask_kbd_data)		\
+			dbg("%02x " format, data, ##args);	\
+		else						\
+			dbg("** " format, ##args);		\
+	} while (0)
 #else
 #define dbg_init() do { } while (0)
 #define dbg(format, arg...)							\
@@ -80,6 +91,8 @@
 		if (0)								\
 			printk(KERN_DEBUG pr_fmt(format), ##arg);		\
 	} while (0)
+
+#define filter_dbg(filter, data, format, args...) do { } while (0)
 #endif
 
 #endif /* _I8042_H */
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a05a517..8f82897 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -49,8 +49,6 @@
 
 static LIST_HEAD(serio_list);
 
-static struct bus_type serio_bus;
-
 static void serio_add_port(struct serio *serio);
 static int serio_reconnect_port(struct serio *serio);
 static void serio_disconnect_port(struct serio *serio);
@@ -1017,7 +1015,7 @@
 }
 EXPORT_SYMBOL(serio_interrupt);
 
-static struct bus_type serio_bus = {
+struct bus_type serio_bus = {
 	.name		= "serio",
 	.drv_groups	= serio_driver_groups,
 	.match		= serio_bus_match,
@@ -1029,6 +1027,7 @@
 	.pm		= &serio_pm_ops,
 #endif
 };
+EXPORT_SYMBOL(serio_bus);
 
 static int __init serio_init(void)
 {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index a854c6e..059edeb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,9 +11,9 @@
 
 if INPUT_TOUCHSCREEN
 
-config OF_TOUCHSCREEN
+config TOUCHSCREEN_PROPERTIES
 	def_tristate INPUT
-	depends on INPUT && OF
+	depends on INPUT
 
 config TOUCHSCREEN_88PM860X
 	tristate "Marvell 88PM860x touchscreen"
@@ -118,7 +118,7 @@
 config TOUCHSCREEN_AUO_PIXCIR
 	tristate "AUO in-cell touchscreen using Pixcir ICs"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a AUO display with in-cell touchscreen
 	  using Pixcir ICs.
@@ -142,7 +142,7 @@
 
 config TOUCHSCREEN_CHIPONE_ICN8318
 	tristate "chipone icn8318 touchscreen controller"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	depends on I2C
 	depends on OF
 	help
@@ -156,7 +156,7 @@
 config TOUCHSCREEN_CY8CTMG110
 	tristate "cy8ctmg110 touchscreen"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a cy8ctmg110 capacitive touchscreen on
 	  an AAVA device.
@@ -915,10 +915,11 @@
 	  module will be called tsc40.
 
 config TOUCHSCREEN_TSC2005
-        tristate "TSC2005 based touchscreens"
-        depends on SPI_MASTER
-        help
-          Say Y here if you have a TSC2005 based touchscreen.
+	tristate "TSC2005 based touchscreens"
+	depends on SPI_MASTER
+	select REGMAP_SPI
+	help
+	  Say Y here if you have a TSC2005 based touchscreen.
 
 	  If unsure, say N.
 
@@ -1029,7 +1030,7 @@
 config TOUCHSCREEN_ZFORCE
 	tristate "Neonode zForce infrared touchscreens"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a touchscreen using the zforce
 	  infraread technology from Neonode.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fa3d33b..c85aae2 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,7 +6,7 @@
 
 wm97xx-ts-y := wm97xx-core.o
 
-obj-$(CONFIG_OF_TOUCHSCREEN)		+= of_touchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PROPERTIES)	+= of_touchscreen.o
 obj-$(CONFIG_TOUCHSCREEN_88PM860X)	+= 88pm860x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_AD7877)	+= ad7877.o
 obj-$(CONFIG_TOUCHSCREEN_AD7879)	+= ad7879.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index dcf3907..d66962c 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -94,7 +94,6 @@
 static struct i2c_driver ad7879_i2c_driver = {
 	.driver = {
 		.name	= "ad7879",
-		.owner	= THIS_MODULE,
 		.pm	= &ad7879_pm_ops,
 	},
 	.probe		= ad7879_i2c_probe,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index e4eb8a6..0f5f968 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1234,7 +1234,8 @@
 	of_property_read_u32(node, "ti,pendown-gpio-debounce",
 			     &pdata->gpio_pendown_debounce);
 
-	pdata->wakeup = of_property_read_bool(node, "linux,wakeup");
+	pdata->wakeup = of_property_read_bool(node, "wakeup-source") ||
+			of_property_read_bool(node, "linux,wakeup");
 
 	pdata->gpio_pendown = of_get_named_gpio(dev->of_node, "pendown-gpio", 0);
 
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index f0b954d..71b5a63 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -166,7 +166,6 @@
 static struct i2c_driver ar1021_i2c_driver = {
 	.driver	= {
 		.name	= "ar1021_i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &ar1021_i2c_pm,
 		.of_match_table = ar1021_i2c_of_match,
 	},
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index dfc7309..c562205 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -22,34 +22,20 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
 #include <linux/input/mt.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
-/* Version */
-#define MXT_VER_20		20
-#define MXT_VER_21		21
-#define MXT_VER_22		22
-
 /* Firmware files */
 #define MXT_FW_NAME		"maxtouch.fw"
 #define MXT_CFG_NAME		"maxtouch.cfg"
 #define MXT_CFG_MAGIC		"OBP_RAW V1"
 
 /* Registers */
-#define MXT_INFO		0x00
-#define MXT_FAMILY_ID		0x00
-#define MXT_VARIANT_ID		0x01
-#define MXT_VERSION		0x02
-#define MXT_BUILD		0x03
-#define MXT_MATRIX_X_SIZE	0x04
-#define MXT_MATRIX_Y_SIZE	0x05
-#define MXT_OBJECT_NUM		0x06
 #define MXT_OBJECT_START	0x07
-
 #define MXT_OBJECT_SIZE		6
 #define MXT_INFO_CHECKSUM_SIZE	3
 #define MXT_MAX_BLOCK_WRITE	256
@@ -103,21 +89,16 @@
 #define MXT_T6_STATUS_COMSERR	(1 << 2)
 
 /* MXT_GEN_POWER_T7 field */
-#define MXT_POWER_IDLEACQINT	0
-#define MXT_POWER_ACTVACQINT	1
-#define MXT_POWER_ACTV2IDLETO	2
+struct t7_config {
+	u8 idle;
+	u8 active;
+} __packed;
 
-/* MXT_GEN_ACQUIRE_T8 field */
-#define MXT_ACQUIRE_CHRGTIME	0
-#define MXT_ACQUIRE_TCHDRIFT	2
-#define MXT_ACQUIRE_DRIFTST	3
-#define MXT_ACQUIRE_TCHAUTOCAL	4
-#define MXT_ACQUIRE_SYNC	5
-#define MXT_ACQUIRE_ATCHCALST	6
-#define MXT_ACQUIRE_ATCHCALSTHR	7
+#define MXT_POWER_CFG_RUN		0
+#define MXT_POWER_CFG_DEEPSLEEP		1
 
 /* MXT_TOUCH_MULTI_T9 field */
-#define MXT_TOUCH_CTRL		0
+#define MXT_T9_CTRL		0
 #define MXT_T9_ORIENT		9
 #define MXT_T9_RANGE		18
 
@@ -139,51 +120,10 @@
 /* MXT_TOUCH_MULTI_T9 orient */
 #define MXT_T9_ORIENT_SWITCH	(1 << 0)
 
-/* MXT_PROCI_GRIPFACE_T20 field */
-#define MXT_GRIPFACE_CTRL	0
-#define MXT_GRIPFACE_XLOGRIP	1
-#define MXT_GRIPFACE_XHIGRIP	2
-#define MXT_GRIPFACE_YLOGRIP	3
-#define MXT_GRIPFACE_YHIGRIP	4
-#define MXT_GRIPFACE_MAXTCHS	5
-#define MXT_GRIPFACE_SZTHR1	7
-#define MXT_GRIPFACE_SZTHR2	8
-#define MXT_GRIPFACE_SHPTHR1	9
-#define MXT_GRIPFACE_SHPTHR2	10
-#define MXT_GRIPFACE_SUPEXTTO	11
-
-/* MXT_PROCI_NOISE field */
-#define MXT_NOISE_CTRL		0
-#define MXT_NOISE_OUTFLEN	1
-#define MXT_NOISE_GCAFUL_LSB	3
-#define MXT_NOISE_GCAFUL_MSB	4
-#define MXT_NOISE_GCAFLL_LSB	5
-#define MXT_NOISE_GCAFLL_MSB	6
-#define MXT_NOISE_ACTVGCAFVALID	7
-#define MXT_NOISE_NOISETHR	8
-#define MXT_NOISE_FREQHOPSCALE	10
-#define MXT_NOISE_FREQ0		11
-#define MXT_NOISE_FREQ1		12
-#define MXT_NOISE_FREQ2		13
-#define MXT_NOISE_FREQ3		14
-#define MXT_NOISE_FREQ4		15
-#define MXT_NOISE_IDLEGCAFVALID	16
-
 /* MXT_SPT_COMMSCONFIG_T18 */
 #define MXT_COMMS_CTRL		0
 #define MXT_COMMS_CMD		1
 
-/* MXT_SPT_CTECONFIG_T28 field */
-#define MXT_CTE_CTRL		0
-#define MXT_CTE_CMD		1
-#define MXT_CTE_MODE		2
-#define MXT_CTE_IDLEGCAFDEPTH	3
-#define MXT_CTE_ACTVGCAFDEPTH	4
-#define MXT_CTE_VOLTAGE		5
-
-#define MXT_VOLTAGE_DEFAULT	2700000
-#define MXT_VOLTAGE_STEP	10000
-
 /* Define for MXT_GEN_COMMAND_T6 */
 #define MXT_BOOT_VALUE		0xa5
 #define MXT_RESET_VALUE		0x01
@@ -291,6 +231,7 @@
 	u8 last_message_count;
 	u8 num_touchids;
 	u8 multitouch;
+	struct t7_config t7_cfg;
 
 	/* Cached parameters from object table */
 	u16 T5_address;
@@ -997,16 +938,15 @@
 
 	count = data->msg_buf[0];
 
-	if (count == 0) {
-		/*
-		 * This condition is caused by the CHG line being configured
-		 * in Mode 0. It results in unnecessary I2C operations but it
-		 * is benign.
-		 */
-		dev_dbg(dev, "Interrupt triggered but zero messages\n");
+	/*
+	 * This condition may be caused by the CHG line being configured in
+	 * Mode 0. It results in unnecessary I2C operations but it is benign.
+	 */
+	if (count == 0)
 		return IRQ_NONE;
-	} else if (count > data->max_reportid) {
-		dev_err(dev, "T44 count %d exceeded max report id\n", count);
+
+	if (count > data->max_reportid) {
+		dev_warn(dev, "T44 count %d exceeded max report id\n", count);
 		count = data->max_reportid;
 	}
 
@@ -1157,7 +1097,9 @@
 	struct device *dev = &data->client->dev;
 	int ret = 0;
 
-	dev_info(dev, "Resetting chip\n");
+	dev_info(dev, "Resetting device\n");
+
+	disable_irq(data->irq);
 
 	reinit_completion(&data->reset_completion);
 
@@ -1165,6 +1107,11 @@
 	if (ret)
 		return ret;
 
+	/* Ignore CHG line for 100ms after reset */
+	msleep(100);
+
+	enable_irq(data->irq);
+
 	ret = mxt_wait_for_completion(data, &data->reset_completion,
 				      MXT_RESET_TIMEOUT);
 	if (ret)
@@ -1361,6 +1308,8 @@
 	return 0;
 }
 
+static int mxt_init_t7_power_cfg(struct mxt_data *data);
+
 /*
  * mxt_update_cfg - download configuration to chip
  *
@@ -1508,6 +1457,9 @@
 
 	dev_info(dev, "Config successfully updated\n");
 
+	/* T7 config may have changed */
+	mxt_init_t7_power_cfg(data);
+
 release_mem:
 	kfree(config_mem);
 	return ret;
@@ -1533,7 +1485,7 @@
 	int error;
 
 	/* Read 7-byte info block starting at address 0 */
-	error = __mxt_read_reg(client, MXT_INFO, sizeof(*info), info);
+	error = __mxt_read_reg(client, 0, sizeof(*info), info);
 	if (error)
 		return error;
 
@@ -1905,6 +1857,8 @@
 	if (pdata->t19_num_keys) {
 		mxt_set_up_as_touchpad(input_dev, data);
 		mt_flags |= INPUT_MT_POINTER;
+	} else {
+		mt_flags |= INPUT_MT_DIRECT;
 	}
 
 	/* For multi touch */
@@ -2051,6 +2005,60 @@
 	return error;
 }
 
+static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+{
+	struct device *dev = &data->client->dev;
+	int error;
+	struct t7_config *new_config;
+	struct t7_config deepsleep = { .active = 0, .idle = 0 };
+
+	if (sleep == MXT_POWER_CFG_DEEPSLEEP)
+		new_config = &deepsleep;
+	else
+		new_config = &data->t7_cfg;
+
+	error = __mxt_write_reg(data->client, data->T7_address,
+				sizeof(data->t7_cfg), new_config);
+	if (error)
+		return error;
+
+	dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
+		new_config->active, new_config->idle);
+
+	return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int error;
+	bool retry = false;
+
+recheck:
+	error = __mxt_read_reg(data->client, data->T7_address,
+				sizeof(data->t7_cfg), &data->t7_cfg);
+	if (error)
+		return error;
+
+	if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
+		if (!retry) {
+			dev_dbg(dev, "T7 cfg zero, resetting\n");
+			mxt_soft_reset(data);
+			retry = true;
+			goto recheck;
+		} else {
+			dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
+			data->t7_cfg.active = 20;
+			data->t7_cfg.idle = 100;
+			return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+		}
+	}
+
+	dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
+		data->t7_cfg.active, data->t7_cfg.idle);
+	return 0;
+}
+
 static int mxt_configure_objects(struct mxt_data *data,
 				 const struct firmware *cfg)
 {
@@ -2058,6 +2066,12 @@
 	struct mxt_info *info = &data->info;
 	int error;
 
+	error = mxt_init_t7_power_cfg(data);
+	if (error) {
+		dev_err(dev, "Failed to initialize power cfg\n");
+		return error;
+	}
+
 	if (cfg) {
 		error = mxt_update_cfg(data, cfg);
 		if (error)
@@ -2346,14 +2360,41 @@
 
 static void mxt_start(struct mxt_data *data)
 {
-	/* Touch enable */
-	mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0x83);
+	switch (data->pdata->suspend_mode) {
+	case MXT_SUSPEND_T9_CTRL:
+		mxt_soft_reset(data);
+
+		/* Touch enable */
+		/* 0x83 = SCANEN | RPTEN | ENABLE */
+		mxt_write_object(data,
+				MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0x83);
+		break;
+
+	case MXT_SUSPEND_DEEP_SLEEP:
+	default:
+		mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+		/* Recalibrate since chip has been in deep sleep */
+		mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+		break;
+	}
+
 }
 
 static void mxt_stop(struct mxt_data *data)
 {
-	/* Touch disable */
-	mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0);
+	switch (data->pdata->suspend_mode) {
+	case MXT_SUSPEND_T9_CTRL:
+		/* Touch disable */
+		mxt_write_object(data,
+				MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0);
+		break;
+
+	case MXT_SUSPEND_DEEP_SLEEP:
+	default:
+		mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+		break;
+	}
 }
 
 static int mxt_input_open(struct input_dev *dev)
@@ -2376,19 +2417,18 @@
 static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
 {
 	struct mxt_platform_data *pdata;
+	struct device_node *np = client->dev.of_node;
 	u32 *keymap;
-	u32 keycode;
-	int proplen, i, ret;
+	int proplen, ret;
 
-	if (!client->dev.of_node)
+	if (!np)
 		return ERR_PTR(-ENOENT);
 
 	pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
-	if (of_find_property(client->dev.of_node, "linux,gpio-keymap",
-			     &proplen)) {
+	if (of_find_property(np, "linux,gpio-keymap", &proplen)) {
 		pdata->t19_num_keys = proplen / sizeof(u32);
 
 		keymap = devm_kzalloc(&client->dev,
@@ -2397,18 +2437,17 @@
 		if (!keymap)
 			return ERR_PTR(-ENOMEM);
 
-		for (i = 0; i < pdata->t19_num_keys; i++) {
-			ret = of_property_read_u32_index(client->dev.of_node,
-					"linux,gpio-keymap", i, &keycode);
-			if (ret)
-				keycode = KEY_RESERVED;
-
-			keymap[i] = keycode;
-		}
+		ret = of_property_read_u32_array(np, "linux,gpio-keymap",
+						 keymap, pdata->t19_num_keys);
+		if (ret)
+			dev_warn(&client->dev,
+				 "Couldn't read linux,gpio-keymap: %d\n", ret);
 
 		pdata->t19_keymap = keymap;
 	}
 
+	pdata->suspend_mode = MXT_SUSPEND_DEEP_SLEEP;
+
 	return pdata;
 }
 #else
@@ -2609,6 +2648,9 @@
 	struct mxt_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
+	if (!input_dev)
+		return 0;
+
 	mutex_lock(&input_dev->mutex);
 
 	if (input_dev->users)
@@ -2625,7 +2667,8 @@
 	struct mxt_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
-	mxt_soft_reset(data);
+	if (!input_dev)
+		return 0;
 
 	mutex_lock(&input_dev->mutex);
 
@@ -2666,7 +2709,6 @@
 static struct i2c_driver mxt_driver = {
 	.driver = {
 		.name	= "atmel_mxt_ts",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(mxt_of_match),
 		.acpi_match_table = ACPI_PTR(mxt_acpi_id),
 		.pm	= &mxt_pm_ops,
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 40e02dd..38c06f7 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -686,7 +686,6 @@
 
 static struct i2c_driver auo_pixcir_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "auo_pixcir_ts",
 		.pm	= &auo_pixcir_pm_ops,
 		.of_match_table	= of_match_ptr(auo_pixcir_ts_dt_idtable),
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index b9b5dda..931417e 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -716,7 +716,6 @@
 static struct i2c_driver bu21013_driver = {
 	.driver	= {
 		.name	=	DRIVER_TP,
-		.owner	=	THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	=	&bu21013_dev_pm_ops,
 #endif
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 32e9db0..22a6fea 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -300,7 +300,6 @@
 
 static struct i2c_driver icn8318_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "chipone_icn8318",
 		.pm	= &icn8318_pm_ops,
 		.of_match_table = icn8318_of_match,
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index f2119ee..cc1d135 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -347,7 +347,6 @@
 
 static struct i2c_driver cy8ctmg110_driver = {
 	.driver		= {
-		.owner	= THIS_MODULE,
 		.name	= CY8CTMG110_DRIVER_NAME,
 		.pm	= &cy8ctmg110_pm,
 	},
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index 8e2012c..9a323dd 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -74,7 +74,6 @@
 static struct i2c_driver cyttsp4_i2c_driver = {
 	.driver = {
 		.name	= CYTTSP4_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &cyttsp4_pm_ops,
 	},
 	.probe		= cyttsp4_i2c_probe,
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 63104a8..519e2de 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -74,7 +74,6 @@
 static struct i2c_driver cyttsp_i2c_driver = {
 	.driver = {
 		.name	= CY_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &cyttsp_pm_ops,
 	},
 	.probe		= cyttsp_i2c_probe,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 394b1de..48de1e8 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1041,7 +1041,7 @@
 			     0, tsdata->num_y * 64 - 1, 0, 0);
 
 	if (!pdata)
-		touchscreen_parse_of_params(input, true);
+		touchscreen_parse_properties(input, true);
 
 	error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, INPUT_MT_DIRECT);
 	if (error) {
@@ -1134,7 +1134,6 @@
 
 static struct i2c_driver edt_ft5x06_ts_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "edt_ft5x06",
 		.of_match_table = of_match_ptr(edt_ft5x06_of_match),
 		.pm = &edt_ft5x06_ts_pm_ops,
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 4c56299..1afc08b 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -264,11 +264,11 @@
 	{ .compatible = "eeti,egalax_ts" },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, egalax_ts_dt_ids);
 
 static struct i2c_driver egalax_ts_driver = {
 	.driver = {
 		.name	= "egalax_ts",
-		.owner	= THIS_MODULE,
 		.pm	= &egalax_ts_pm_ops,
 		.of_match_table	= egalax_ts_dt_ids,
 	},
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 0efd766..ddac134 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -38,6 +38,8 @@
 #include <linux/input/mt.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
 #include <asm/unaligned.h>
 
 /* Device, Driver information */
@@ -102,6 +104,9 @@
 /* calibration timeout definition */
 #define ELAN_CALI_TIMEOUT_MSEC	10000
 
+#define ELAN_POWERON_DELAY_USEC	500
+#define ELAN_RESET_DELAY_MSEC	20
+
 enum elants_state {
 	ELAN_STATE_NORMAL,
 	ELAN_WAIT_QUEUE_HEADER,
@@ -118,6 +123,10 @@
 	struct i2c_client *client;
 	struct input_dev *input;
 
+	struct regulator *vcc33;
+	struct regulator *vccio;
+	struct gpio_desc *reset_gpio;
+
 	u16 fw_version;
 	u8 test_version;
 	u8 solution_version;
@@ -141,6 +150,7 @@
 	u8 buf[MAX_PACKET_SIZE];
 
 	bool wake_irq_enabled;
+	bool keep_power_in_suspend;
 };
 
 static int elants_i2c_send(struct i2c_client *client,
@@ -605,6 +615,7 @@
 	const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
 	const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
 	const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
+	const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
 	u8 buf[HEADER_SIZE];
 	u16 send_id;
 	int page, n_fw_pages;
@@ -617,8 +628,13 @@
 	} else {
 		/* Start IAP Procedure */
 		dev_dbg(&client->dev, "Normal IAP procedure\n");
+		/* Close idle mode */
+		error = elants_i2c_send(client, close_idle, sizeof(close_idle));
+		if (error)
+			dev_err(&client->dev, "Failed close idle: %d\n", error);
+		msleep(60);
 		elants_i2c_sw_reset(client);
-
+		msleep(20);
 		error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
 	}
 
@@ -1052,6 +1068,67 @@
 	sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
 }
 
+static int elants_i2c_power_on(struct elants_data *ts)
+{
+	int error;
+
+	/*
+	 * If we do not have reset gpio assume platform firmware
+	 * controls regulators and does power them on for us.
+	 */
+	if (IS_ERR_OR_NULL(ts->reset_gpio))
+		return 0;
+
+	gpiod_set_value_cansleep(ts->reset_gpio, 1);
+
+	error = regulator_enable(ts->vcc33);
+	if (error) {
+		dev_err(&ts->client->dev,
+			"failed to enable vcc33 regulator: %d\n",
+			error);
+		goto release_reset_gpio;
+	}
+
+	error = regulator_enable(ts->vccio);
+	if (error) {
+		dev_err(&ts->client->dev,
+			"failed to enable vccio regulator: %d\n",
+			error);
+		regulator_disable(ts->vcc33);
+		goto release_reset_gpio;
+	}
+
+	/*
+	 * We need to wait a bit after powering on controller before
+	 * we are allowed to release reset GPIO.
+	 */
+	udelay(ELAN_POWERON_DELAY_USEC);
+
+release_reset_gpio:
+	gpiod_set_value_cansleep(ts->reset_gpio, 0);
+	if (error)
+		return error;
+
+	msleep(ELAN_RESET_DELAY_MSEC);
+
+	return 0;
+}
+
+static void elants_i2c_power_off(void *_data)
+{
+	struct elants_data *ts = _data;
+
+	if (!IS_ERR_OR_NULL(ts->reset_gpio)) {
+		/*
+		 * Activate reset gpio to prevent leakage through the
+		 * pin once we shut off power to the controller.
+		 */
+		gpiod_set_value_cansleep(ts->reset_gpio, 1);
+		regulator_disable(ts->vccio);
+		regulator_disable(ts->vcc33);
+	}
+}
+
 static int elants_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 {
@@ -1066,13 +1143,6 @@
 		return -ENXIO;
 	}
 
-	/* Make sure there is something at this address */
-	if (i2c_smbus_xfer(client->adapter, client->addr, 0,
-			I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
-		dev_err(&client->dev, "nothing at this address\n");
-		return -ENXIO;
-	}
-
 	ts = devm_kzalloc(&client->dev, sizeof(struct elants_data), GFP_KERNEL);
 	if (!ts)
 		return -ENOMEM;
@@ -1083,6 +1153,62 @@
 	ts->client = client;
 	i2c_set_clientdata(client, ts);
 
+	ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+	if (IS_ERR(ts->vcc33)) {
+		error = PTR_ERR(ts->vcc33);
+		if (error != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"Failed to get 'vcc33' regulator: %d\n",
+				error);
+		return error;
+	}
+
+	ts->vccio = devm_regulator_get(&client->dev, "vccio");
+	if (IS_ERR(ts->vccio)) {
+		error = PTR_ERR(ts->vccio);
+		if (error != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"Failed to get 'vccio' regulator: %d\n",
+				error);
+		return error;
+	}
+
+	ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ts->reset_gpio)) {
+		error = PTR_ERR(ts->reset_gpio);
+
+		if (error == -EPROBE_DEFER)
+			return error;
+
+		if (error != -ENOENT && error != -ENOSYS) {
+			dev_err(&client->dev,
+				"failed to get reset gpio: %d\n",
+				error);
+			return error;
+		}
+
+		ts->keep_power_in_suspend = true;
+	}
+
+	error = elants_i2c_power_on(ts);
+	if (error)
+		return error;
+
+	error = devm_add_action(&client->dev, elants_i2c_power_off, ts);
+	if (error) {
+		dev_err(&client->dev,
+			"failed to install power off action: %d\n", error);
+		elants_i2c_power_off(ts);
+		return error;
+	}
+
+	/* Make sure there is something at this address */
+	if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+			   I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+		dev_err(&client->dev, "nothing at this address\n");
+		return -ENXIO;
+	}
+
 	error = elants_i2c_initialize(ts);
 	if (error) {
 		dev_err(&client->dev, "failed to initialize: %d\n", error);
@@ -1190,18 +1316,24 @@
 
 	disable_irq(client->irq);
 
-	for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-		error = elants_i2c_send(client, set_sleep_cmd,
-					sizeof(set_sleep_cmd));
-		if (!error)
-			break;
+	if (device_may_wakeup(dev) || ts->keep_power_in_suspend) {
+		for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+			error = elants_i2c_send(client, set_sleep_cmd,
+						sizeof(set_sleep_cmd));
+			if (!error)
+				break;
 
-		dev_err(&client->dev, "suspend command failed: %d\n", error);
+			dev_err(&client->dev,
+				"suspend command failed: %d\n", error);
+		}
+
+		if (device_may_wakeup(dev))
+			ts->wake_irq_enabled =
+					(enable_irq_wake(client->irq) == 0);
+	} else {
+		elants_i2c_power_off(ts);
 	}
 
-	if (device_may_wakeup(dev))
-		ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
-
 	return 0;
 }
 
@@ -1216,13 +1348,19 @@
 	if (device_may_wakeup(dev) && ts->wake_irq_enabled)
 		disable_irq_wake(client->irq);
 
-	for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-		error = elants_i2c_send(client, set_active_cmd,
-					sizeof(set_active_cmd));
-		if (!error)
-			break;
+	if (ts->keep_power_in_suspend) {
+		for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+			error = elants_i2c_send(client, set_active_cmd,
+						sizeof(set_active_cmd));
+			if (!error)
+				break;
 
-		dev_err(&client->dev, "resume command failed: %d\n", error);
+			dev_err(&client->dev,
+				"resume command failed: %d\n", error);
+		}
+	} else {
+		elants_i2c_power_on(ts);
+		elants_i2c_initialize(ts);
 	}
 
 	ts->state = ELAN_STATE_NORMAL;
@@ -1261,10 +1399,10 @@
 	.id_table = elants_i2c_id,
 	.driver = {
 		.name = DEVICE_NAME,
-		.owner = THIS_MODULE,
 		.pm = &elants_i2c_pm_ops,
 		.acpi_match_table = ACPI_PTR(elants_acpi_id),
 		.of_match_table = of_match_ptr(elants_of_match),
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 };
 module_i2c_driver(elants_i2c_driver);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index e36162b..4d113c9 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -420,6 +420,7 @@
 	{ "GDIX1001:00", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id goodix_acpi_match[] = {
@@ -448,7 +449,6 @@
 	.id_table = goodix_ts_id,
 	.driver = {
 		.name = "Goodix-TS",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(goodix_acpi_match),
 		.of_match_table = of_match_ptr(goodix_of_match),
 	},
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 586bee4..ddf694b 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -343,7 +343,6 @@
 static struct i2c_driver ili210x_ts_driver = {
 	.driver = {
 		.name = "ili210x_i2c",
-		.owner = THIS_MODULE,
 		.pm = &ili210x_i2c_pm,
 	},
 	.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index a68ec14..82079cd 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -229,7 +229,6 @@
 static struct i2c_driver max11801_ts_driver = {
 	.driver = {
 		.name	= "max11801_ts",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= max11801_ts_id,
 	.probe		= max11801_ts_probe,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 67c0d31..7cce876 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -572,12 +572,12 @@
 	{ .compatible = "melfas,mms114" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, mms114_dt_match);
 #endif
 
 static struct i2c_driver mms114_driver = {
 	.driver = {
 		.name	= "mms114",
-		.owner	= THIS_MODULE,
 		.pm	= &mms114_pm_ops,
 		.of_match_table = of_match_ptr(mms114_dt_match),
 	},
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 806cd0a..bb6f2fe 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -9,12 +9,12 @@
  *
  */
 
-#include <linux/of.h>
+#include <linux/property.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
 
-static bool touchscreen_get_prop_u32(struct device_node *np,
+static bool touchscreen_get_prop_u32(struct device *dev,
 				     const char *property,
 				     unsigned int default_value,
 				     unsigned int *value)
@@ -22,7 +22,7 @@
 	u32 val;
 	int error;
 
-	error = of_property_read_u32(np, property, &val);
+	error = device_property_read_u32(dev, property, &val);
 	if (error) {
 		*value = default_value;
 		return false;
@@ -39,13 +39,9 @@
 	struct input_absinfo *absinfo;
 
 	if (!test_bit(axis, dev->absbit)) {
-		/*
-		 * Emit a warning only if the axis is not a multitouch
-		 * axis, which might not be set by the driver.
-		 */
-		if (!input_is_mt_axis(axis))
-			dev_warn(&dev->dev,
-				 "DT specifies parameters but the axis is not set up\n");
+		dev_warn(&dev->dev,
+			 "DT specifies parameters but the axis %lu is not set up\n",
+			 axis);
 		return;
 	}
 
@@ -55,52 +51,58 @@
 }
 
 /**
- * touchscreen_parse_of_params - parse common touchscreen DT properties
- * @dev: device that should be parsed
+ * touchscreen_parse_properties - parse common touchscreen DT properties
+ * @input: input device that should be parsed
+ * @multitouch: specifies whether parsed properties should be applied to
+ *	single-touch or multi-touch axes
  *
  * This function parses common DT properties for touchscreens and setups the
- * input device accordingly. The function keeps previously setuped default
+ * input device accordingly. The function keeps previously set up default
  * values if no value is specified via DT.
  */
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch)
+void touchscreen_parse_properties(struct input_dev *input, bool multitouch)
 {
-	struct device_node *np = dev->dev.parent->of_node;
+	struct device *dev = input->dev.parent;
 	unsigned int axis;
 	unsigned int maximum, fuzz;
 	bool data_present;
 
-	input_alloc_absinfo(dev);
-	if (!dev->absinfo)
+	input_alloc_absinfo(input);
+	if (!input->absinfo)
 		return;
 
 	axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-size-x",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+						input_abs_get_max(input,
+								  axis) + 1,
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-x",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum - 1, fuzz);
 
 	axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-size-y",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+						input_abs_get_max(input,
+								  axis) + 1,
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-y",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum - 1, fuzz);
 
 	axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-max-pressure",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev,
+						"touchscreen-max-pressure",
+						input_abs_get_max(input, axis),
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-pressure",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev,
+						"touchscreen-fuzz-pressure",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum, fuzz);
 }
-EXPORT_SYMBOL(touchscreen_parse_of_params);
+EXPORT_SYMBOL(touchscreen_parse_properties);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 8f3e243..9162172 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -24,20 +24,23 @@
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
-#include <linux/input/pixcir_ts.h>
+#include <linux/input/touchscreen.h>
 #include <linux/gpio.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+/*#include <linux/of.h>*/
 #include <linux/of_device.h>
+#include <linux/platform_data/pixcir_i2c_ts.h>
 
 #define PIXCIR_MAX_SLOTS       5 /* Max fingers supported by driver */
 
 struct pixcir_i2c_ts_data {
 	struct i2c_client *client;
 	struct input_dev *input;
-	const struct pixcir_ts_platform_data *pdata;
-	bool running;
+	struct gpio_desc *gpio_attb;
+	struct gpio_desc *gpio_reset;
+	const struct pixcir_i2c_chip_data *chip;
 	int max_fingers;	/* Max fingers supported in this instance */
+	bool running;
 };
 
 struct pixcir_touch {
@@ -60,7 +63,7 @@
 	u8 touch;
 	int ret, i;
 	int readsize;
-	const struct pixcir_i2c_chip_data *chip = &tsdata->pdata->chip;
+	const struct pixcir_i2c_chip_data *chip = tsdata->chip;
 
 	memset(report, 0, sizeof(struct pixcir_report_data));
 
@@ -113,13 +116,13 @@
 	struct pixcir_touch *touch;
 	int n, i, slot;
 	struct device *dev = &ts->client->dev;
-	const struct pixcir_i2c_chip_data *chip = &ts->pdata->chip;
+	const struct pixcir_i2c_chip_data *chip = ts->chip;
 
 	n = report->num_touches;
 	if (n > PIXCIR_MAX_SLOTS)
 		n = PIXCIR_MAX_SLOTS;
 
-	if (!chip->has_hw_ids) {
+	if (!ts->chip->has_hw_ids) {
 		for (i = 0; i < n; i++) {
 			touch = &report->touches[i];
 			pos[i].x = touch->x;
@@ -161,7 +164,6 @@
 static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
 {
 	struct pixcir_i2c_ts_data *tsdata = dev_id;
-	const struct pixcir_ts_platform_data *pdata = tsdata->pdata;
 	struct pixcir_report_data report;
 
 	while (tsdata->running) {
@@ -171,7 +173,7 @@
 		/* report it */
 		pixcir_ts_report(tsdata, &report);
 
-		if (gpio_get_value(pdata->gpio_attb)) {
+		if (gpiod_get_value_cansleep(tsdata->gpio_attb)) {
 			if (report.num_touches) {
 				/*
 				 * Last report with no finger up?
@@ -189,6 +191,17 @@
 	return IRQ_HANDLED;
 }
 
+static void pixcir_reset(struct pixcir_i2c_ts_data *tsdata)
+{
+	if (!IS_ERR_OR_NULL(tsdata->gpio_reset)) {
+		gpiod_set_value_cansleep(tsdata->gpio_reset, 1);
+		ndelay(100);	/* datasheet section 1.2.3 says 80ns min. */
+		gpiod_set_value_cansleep(tsdata->gpio_reset, 0);
+		/* wait for controller ready. 100ms guess. */
+		msleep(100);
+	}
+}
+
 static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
 				 enum pixcir_power_mode mode)
 {
@@ -411,85 +424,59 @@
 #ifdef CONFIG_OF
 static const struct of_device_id pixcir_of_match[];
 
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+			   struct pixcir_i2c_ts_data *tsdata)
 {
-	struct pixcir_ts_platform_data *pdata;
-	struct device_node *np = dev->of_node;
 	const struct of_device_id *match;
 
 	match = of_match_device(of_match_ptr(pixcir_of_match), dev);
 	if (!match)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return ERR_PTR(-ENOMEM);
+	tsdata->chip = (const struct pixcir_i2c_chip_data *)match->data;
+	if (!tsdata->chip)
+		return -EINVAL;
 
-	pdata->chip = *(const struct pixcir_i2c_chip_data *)match->data;
-
-	pdata->gpio_attb = of_get_named_gpio(np, "attb-gpio", 0);
-	/* gpio_attb validity is checked in probe */
-
-	if (of_property_read_u32(np, "touchscreen-size-x", &pdata->x_max)) {
-		dev_err(dev, "Failed to get touchscreen-size-x property\n");
-		return ERR_PTR(-EINVAL);
-	}
-	pdata->x_max -= 1;
-
-	if (of_property_read_u32(np, "touchscreen-size-y", &pdata->y_max)) {
-		dev_err(dev, "Failed to get touchscreen-size-y property\n");
-		return ERR_PTR(-EINVAL);
-	}
-	pdata->y_max -= 1;
-
-	dev_dbg(dev, "%s: x %d, y %d, gpio %d\n", __func__,
-		pdata->x_max + 1, pdata->y_max + 1, pdata->gpio_attb);
-
-	return pdata;
+	return 0;
 }
 #else
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+			   struct pixcir_i2c_ts_data *tsdata)
 {
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 #endif
 
 static int pixcir_i2c_ts_probe(struct i2c_client *client,
-					 const struct i2c_device_id *id)
+			       const struct i2c_device_id *id)
 {
 	const struct pixcir_ts_platform_data *pdata =
 			dev_get_platdata(&client->dev);
 	struct device *dev = &client->dev;
-	struct device_node *np = dev->of_node;
 	struct pixcir_i2c_ts_data *tsdata;
 	struct input_dev *input;
 	int error;
 
-	if (np && !pdata) {
-		pdata = pixcir_parse_dt(dev);
-		if (IS_ERR(pdata))
-			return PTR_ERR(pdata);
-	}
+	tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
+	if (!tsdata)
+		return -ENOMEM;
 
-	if (!pdata) {
+	if (pdata) {
+		tsdata->chip = &pdata->chip;
+	} else if (dev->of_node) {
+		error = pixcir_parse_dt(dev, tsdata);
+		if (error)
+			return error;
+	} else {
 		dev_err(&client->dev, "platform data not defined\n");
 		return -EINVAL;
 	}
 
-	if (!gpio_is_valid(pdata->gpio_attb)) {
-		dev_err(dev, "Invalid gpio_attb in pdata\n");
+	if (!tsdata->chip->max_fingers) {
+		dev_err(dev, "Invalid max_fingers in chip data\n");
 		return -EINVAL;
 	}
 
-	if (!pdata->chip.max_fingers) {
-		dev_err(dev, "Invalid max_fingers in pdata\n");
-		return -EINVAL;
-	}
-
-	tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
-	if (!tsdata)
-		return -ENOMEM;
-
 	input = devm_input_allocate_device(dev);
 	if (!input) {
 		dev_err(dev, "Failed to allocate input device\n");
@@ -498,7 +485,6 @@
 
 	tsdata->client = client;
 	tsdata->input = input;
-	tsdata->pdata = pdata;
 
 	input->name = client->name;
 	input->id.bustype = BUS_I2C;
@@ -506,15 +492,21 @@
 	input->close = pixcir_input_close;
 	input->dev.parent = &client->dev;
 
-	__set_bit(EV_KEY, input->evbit);
-	__set_bit(EV_ABS, input->evbit);
-	__set_bit(BTN_TOUCH, input->keybit);
-	input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
-	input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
-	input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
-	input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+	if (pdata) {
+		input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+		input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+	} else {
+		input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+		input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+		touchscreen_parse_properties(input, true);
+		if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+		    !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+			dev_err(dev, "Touchscreen size is not specified\n");
+			return -EINVAL;
+		}
+	}
 
-	tsdata->max_fingers = tsdata->pdata->chip.max_fingers;
+	tsdata->max_fingers = tsdata->chip->max_fingers;
 	if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
 		tsdata->max_fingers = PIXCIR_MAX_SLOTS;
 		dev_info(dev, "Limiting maximum fingers to %d\n",
@@ -530,10 +522,18 @@
 
 	input_set_drvdata(input, tsdata);
 
-	error = devm_gpio_request_one(dev, pdata->gpio_attb,
-				      GPIOF_DIR_IN, "pixcir_i2c_attb");
-	if (error) {
-		dev_err(dev, "Failed to request ATTB gpio\n");
+	tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
+	if (IS_ERR(tsdata->gpio_attb)) {
+		error = PTR_ERR(tsdata->gpio_attb);
+		dev_err(dev, "Failed to request ATTB gpio: %d\n", error);
+		return error;
+	}
+
+	tsdata->gpio_reset = devm_gpiod_get_optional(dev, "reset",
+						     GPIOD_OUT_LOW);
+	if (IS_ERR(tsdata->gpio_reset)) {
+		error = PTR_ERR(tsdata->gpio_reset);
+		dev_err(dev, "Failed to request RESET gpio: %d\n", error);
 		return error;
 	}
 
@@ -545,6 +545,8 @@
 		return error;
 	}
 
+	pixcir_reset(tsdata);
+
 	/* Always be in IDLE mode to save power, device supports auto wake */
 	error = pixcir_set_power_mode(tsdata, PIXCIR_POWER_IDLE);
 	if (error) {
@@ -602,7 +604,6 @@
 
 static struct i2c_driver pixcir_i2c_ts_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "pixcir_ts",
 		.pm	= &pixcir_dev_pm_ops,
 		.of_match_table = of_match_ptr(pixcir_of_match),
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 697e26e..e943678 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -296,7 +296,6 @@
 	.id_table	= st1232_ts_id,
 	.driver = {
 		.name	= ST1232_TS_NAME,
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(st1232_ts_dt_ids),
 		.pm	= &st1232_ts_pm_ops,
 	},
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 8be7b9b..3f11763 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -581,6 +581,7 @@
 	sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev);
 	if (IS_ERR(sur40->alloc_ctx)) {
 		dev_err(sur40->dev, "Can't allocate buffer context");
+		error = PTR_ERR(sur40->alloc_ctx);
 		goto err_unreg_v4l2;
 	}
 
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index d8c025b..0f65d02 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -30,10 +30,11 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/tsc2005.h>
 #include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
 
 /*
  * The touchscreen interface operates as follows:
@@ -61,16 +62,24 @@
 #define TSC2005_CMD_12BIT		0x04
 
 /* control byte 0 */
-#define TSC2005_REG_READ		0x0001
-#define TSC2005_REG_PND0		0x0002
-#define TSC2005_REG_X			0x0000
-#define TSC2005_REG_Y			0x0008
-#define TSC2005_REG_Z1			0x0010
-#define TSC2005_REG_Z2			0x0018
-#define TSC2005_REG_TEMP_HIGH		0x0050
-#define TSC2005_REG_CFR0		0x0060
-#define TSC2005_REG_CFR1		0x0068
-#define TSC2005_REG_CFR2		0x0070
+#define TSC2005_REG_READ		0x01 /* R/W access */
+#define TSC2005_REG_PND0		0x02 /* Power Not Down Control */
+#define TSC2005_REG_X			(0x0 << 3)
+#define TSC2005_REG_Y			(0x1 << 3)
+#define TSC2005_REG_Z1			(0x2 << 3)
+#define TSC2005_REG_Z2			(0x3 << 3)
+#define TSC2005_REG_AUX			(0x4 << 3)
+#define TSC2005_REG_TEMP1		(0x5 << 3)
+#define TSC2005_REG_TEMP2		(0x6 << 3)
+#define TSC2005_REG_STATUS		(0x7 << 3)
+#define TSC2005_REG_AUX_HIGH		(0x8 << 3)
+#define TSC2005_REG_AUX_LOW		(0x9 << 3)
+#define TSC2005_REG_TEMP_HIGH		(0xA << 3)
+#define TSC2005_REG_TEMP_LOW		(0xB << 3)
+#define TSC2005_REG_CFR0		(0xC << 3)
+#define TSC2005_REG_CFR1		(0xD << 3)
+#define TSC2005_REG_CFR2		(0xE << 3)
+#define TSC2005_REG_CONV_FUNC		(0xF << 3)
 
 /* configuration register 0 */
 #define TSC2005_CFR0_PRECHARGE_276US	0x0040
@@ -112,20 +121,37 @@
 #define TSC2005_SPI_MAX_SPEED_HZ	10000000
 #define TSC2005_PENUP_TIME_MS		40
 
-struct tsc2005_spi_rd {
-	struct spi_transfer	spi_xfer;
-	u32			spi_tx;
-	u32			spi_rx;
+static const struct regmap_range tsc2005_writable_ranges[] = {
+	regmap_reg_range(TSC2005_REG_AUX_HIGH, TSC2005_REG_CFR2),
 };
 
+static const struct regmap_access_table tsc2005_writable_table = {
+	.yes_ranges = tsc2005_writable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(tsc2005_writable_ranges),
+};
+
+static struct regmap_config tsc2005_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 16,
+	.reg_stride = 0x08,
+	.max_register = 0x78,
+	.read_flag_mask = TSC2005_REG_READ,
+	.write_flag_mask = TSC2005_REG_PND0,
+	.wr_table = &tsc2005_writable_table,
+	.use_single_rw = true,
+};
+
+struct tsc2005_data {
+	u16 x;
+	u16 y;
+	u16 z1;
+	u16 z2;
+} __packed;
+#define TSC2005_DATA_REGS 4
+
 struct tsc2005 {
 	struct spi_device	*spi;
-
-	struct spi_message      spi_read_msg;
-	struct tsc2005_spi_rd	spi_x;
-	struct tsc2005_spi_rd	spi_y;
-	struct tsc2005_spi_rd	spi_z1;
-	struct tsc2005_spi_rd	spi_z2;
+	struct regmap		*regmap;
 
 	struct input_dev	*idev;
 	char			phys[32];
@@ -154,7 +180,7 @@
 
 	struct regulator	*vio;
 
-	int			reset_gpio;
+	struct gpio_desc	*reset_gpio;
 	void			(*set_reset)(bool enable);
 };
 
@@ -182,62 +208,6 @@
 	return 0;
 }
 
-static int tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
-{
-	u32 tx = ((reg | TSC2005_REG_PND0) << 16) | value;
-	struct spi_transfer xfer = {
-		.tx_buf		= &tx,
-		.len		= 4,
-		.bits_per_word	= 24,
-	};
-	struct spi_message msg;
-	int error;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	error = spi_sync(ts->spi, &msg);
-	if (error) {
-		dev_err(&ts->spi->dev,
-			"%s: failed, register: %x, value: %x, error: %d\n",
-			__func__, reg, value, error);
-		return error;
-	}
-
-	return 0;
-}
-
-static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
-{
-	memset(rd, 0, sizeof(*rd));
-
-	rd->spi_tx		   = (reg | TSC2005_REG_READ) << 16;
-	rd->spi_xfer.tx_buf	   = &rd->spi_tx;
-	rd->spi_xfer.rx_buf	   = &rd->spi_rx;
-	rd->spi_xfer.len	   = 4;
-	rd->spi_xfer.bits_per_word = 24;
-	rd->spi_xfer.cs_change	   = !last;
-}
-
-static int tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
-{
-	struct tsc2005_spi_rd spi_rd;
-	struct spi_message msg;
-	int error;
-
-	tsc2005_setup_read(&spi_rd, reg, true);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&spi_rd.spi_xfer, &msg);
-
-	error = spi_sync(ts->spi, &msg);
-	if (error)
-		return error;
-
-	*value = spi_rd.spi_rx;
-	return 0;
-}
-
 static void tsc2005_update_pen_state(struct tsc2005 *ts,
 				     int x, int y, int pressure)
 {
@@ -266,26 +236,23 @@
 	struct tsc2005 *ts = _ts;
 	unsigned long flags;
 	unsigned int pressure;
-	u32 x, y;
-	u32 z1, z2;
+	struct tsc2005_data tsdata;
 	int error;
 
 	/* read the coordinates */
-	error = spi_sync(ts->spi, &ts->spi_read_msg);
+	error = regmap_bulk_read(ts->regmap, TSC2005_REG_X, &tsdata,
+				 TSC2005_DATA_REGS);
 	if (unlikely(error))
 		goto out;
 
-	x = ts->spi_x.spi_rx;
-	y = ts->spi_y.spi_rx;
-	z1 = ts->spi_z1.spi_rx;
-	z2 = ts->spi_z2.spi_rx;
-
 	/* validate position */
-	if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
+	if (unlikely(tsdata.x > MAX_12BIT || tsdata.y > MAX_12BIT))
 		goto out;
 
 	/* Skip reading if the pressure components are out of range */
-	if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
+	if (unlikely(tsdata.z1 == 0 || tsdata.z2 > MAX_12BIT))
+		goto out;
+	if (unlikely(tsdata.z1 >= tsdata.z2))
 		goto out;
 
        /*
@@ -293,8 +260,8 @@
 	* the value before pen-up - that implies SPI fed us stale data
 	*/
 	if (!ts->pen_down &&
-	    ts->in_x == x && ts->in_y == y &&
-	    ts->in_z1 == z1 && ts->in_z2 == z2) {
+	    ts->in_x == tsdata.x && ts->in_y == tsdata.y &&
+	    ts->in_z1 == tsdata.z1 && ts->in_z2 == tsdata.z2) {
 		goto out;
 	}
 
@@ -302,20 +269,20 @@
 	 * At this point we are happy we have a valid and useful reading.
 	 * Remember it for later comparisons. We may now begin downsampling.
 	 */
-	ts->in_x = x;
-	ts->in_y = y;
-	ts->in_z1 = z1;
-	ts->in_z2 = z2;
+	ts->in_x = tsdata.x;
+	ts->in_y = tsdata.y;
+	ts->in_z1 = tsdata.z1;
+	ts->in_z2 = tsdata.z2;
 
 	/* Compute touch pressure resistance using equation #1 */
-	pressure = x * (z2 - z1) / z1;
+	pressure = tsdata.x * (tsdata.z2 - tsdata.z1) / tsdata.z1;
 	pressure = pressure * ts->x_plate_ohm / 4096;
 	if (unlikely(pressure > MAX_12BIT))
 		goto out;
 
 	spin_lock_irqsave(&ts->lock, flags);
 
-	tsc2005_update_pen_state(ts, x, y, pressure);
+	tsc2005_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
 	mod_timer(&ts->penup_timer,
 		  jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
 
@@ -338,9 +305,9 @@
 
 static void tsc2005_start_scan(struct tsc2005 *ts)
 {
-	tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
-	tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
-	tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
 	tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
 }
 
@@ -351,8 +318,8 @@
 
 static void tsc2005_set_reset(struct tsc2005 *ts, bool enable)
 {
-	if (ts->reset_gpio >= 0)
-		gpio_set_value(ts->reset_gpio, enable);
+	if (ts->reset_gpio)
+		gpiod_set_value_cansleep(ts->reset_gpio, enable);
 	else if (ts->set_reset)
 		ts->set_reset(enable);
 }
@@ -388,11 +355,10 @@
 				     struct device_attribute *attr,
 				     char *buf)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
-	u16 temp_high;
-	u16 temp_high_orig;
-	u16 temp_high_test;
+	struct tsc2005 *ts = dev_get_drvdata(dev);
+	unsigned int temp_high;
+	unsigned int temp_high_orig;
+	unsigned int temp_high_test;
 	bool success = true;
 	int error;
 
@@ -403,7 +369,7 @@
 	 */
 	__tsc2005_disable(ts);
 
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d\n", error);
 		success = false;
@@ -412,14 +378,14 @@
 
 	temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
 
-	error = tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
+	error = regmap_write(ts->regmap, TSC2005_REG_TEMP_HIGH, temp_high_test);
 	if (error) {
 		dev_warn(dev, "selftest failed: write error %d\n", error);
 		success = false;
 		goto out;
 	}
 
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d after write\n",
 			 error);
@@ -442,7 +408,7 @@
 		goto out;
 
 	/* test that the reset really happened */
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d after reset\n",
 			 error);
@@ -474,8 +440,7 @@
 				      struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 	umode_t mode = attr->mode;
 
 	if (attr == &dev_attr_selftest.attr) {
@@ -495,7 +460,7 @@
 {
 	struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work.work);
 	int error;
-	u16 r;
+	unsigned int r;
 
 	if (!mutex_trylock(&ts->mutex)) {
 		/*
@@ -511,7 +476,7 @@
 		goto out;
 
 	/* We should be able to read register without disabling interrupts. */
-	error = tsc2005_read(ts, TSC2005_REG_CFR0, &r);
+	error = regmap_read(ts->regmap, TSC2005_REG_CFR0, &r);
 	if (!error &&
 	    !((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK)) {
 		goto out;
@@ -575,20 +540,6 @@
 	mutex_unlock(&ts->mutex);
 }
 
-static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
-{
-	tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
-	tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
-	tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, false);
-	tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, true);
-
-	spi_message_init(&ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
-}
-
 static int tsc2005_probe(struct spi_device *spi)
 {
 	const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -653,38 +604,31 @@
 	ts->spi = spi;
 	ts->idev = input_dev;
 
+	ts->regmap = devm_regmap_init_spi(spi, &tsc2005_regmap_config);
+	if (IS_ERR(ts->regmap))
+		return PTR_ERR(ts->regmap);
+
 	ts->x_plate_ohm = x_plate_ohm;
 	ts->esd_timeout = esd_timeout;
 
-	if (np) {
-		ts->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
-		if (ts->reset_gpio == -EPROBE_DEFER)
-			return ts->reset_gpio;
-		if (ts->reset_gpio < 0) {
-			dev_err(&spi->dev, "error acquiring reset gpio: %d\n",
-				ts->reset_gpio);
-			return ts->reset_gpio;
-		}
-
-		error = devm_gpio_request_one(&spi->dev, ts->reset_gpio, 0,
-					      "reset-gpios");
-		if (error) {
-			dev_err(&spi->dev, "error requesting reset gpio: %d\n",
-				error);
-			return error;
-		}
-
-		ts->vio = devm_regulator_get(&spi->dev, "vio");
-		if (IS_ERR(ts->vio)) {
-			error = PTR_ERR(ts->vio);
-			dev_err(&spi->dev, "vio regulator missing (%d)", error);
-			return error;
-		}
-	} else {
-		ts->reset_gpio = -1;
-		ts->set_reset = pdata->set_reset;
+	ts->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+						 GPIOD_OUT_HIGH);
+	if (IS_ERR(ts->reset_gpio)) {
+		error = PTR_ERR(ts->reset_gpio);
+		dev_err(&spi->dev, "error acquiring reset gpio: %d\n", error);
+		return error;
 	}
 
+	ts->vio = devm_regulator_get_optional(&spi->dev, "vio");
+	if (IS_ERR(ts->vio)) {
+		error = PTR_ERR(ts->vio);
+		dev_err(&spi->dev, "vio regulator missing (%d)", error);
+		return error;
+	}
+
+	if (!ts->reset_gpio && pdata)
+		ts->set_reset = pdata->set_reset;
+
 	mutex_init(&ts->mutex);
 
 	spin_lock_init(&ts->lock);
@@ -692,8 +636,6 @@
 
 	INIT_DELAYED_WORK(&ts->esd_work, tsc2005_esd_work);
 
-	tsc2005_setup_spi_xfer(ts);
-
 	snprintf(ts->phys, sizeof(ts->phys),
 		 "%s/input-ts", dev_name(&spi->dev));
 
@@ -709,7 +651,7 @@
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
 
 	if (np)
-		touchscreen_parse_of_params(input_dev, false);
+		touchscreen_parse_properties(input_dev, false);
 
 	input_dev->open = tsc2005_open;
 	input_dev->close = tsc2005_close;
@@ -735,7 +677,7 @@
 			return error;
 	}
 
-	spi_set_drvdata(spi, ts);
+	dev_set_drvdata(&spi->dev, ts);
 	error = sysfs_create_group(&spi->dev.kobj, &tsc2005_attr_group);
 	if (error) {
 		dev_err(&spi->dev,
@@ -763,7 +705,7 @@
 
 static int tsc2005_remove(struct spi_device *spi)
 {
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
 
 	sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
 
@@ -775,8 +717,7 @@
 
 static int __maybe_unused tsc2005_suspend(struct device *dev)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 
 	mutex_lock(&ts->mutex);
 
@@ -792,8 +733,7 @@
 
 static int __maybe_unused tsc2005_resume(struct device *dev)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 
 	mutex_lock(&ts->mutex);
 
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index ccc8aa6..5d0cd51 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -482,7 +482,6 @@
 
 static struct i2c_driver tsc2007_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tsc2007",
 		.of_match_table = of_match_ptr(tsc2007_of_match),
 	},
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 32f8ac0..8d7a285 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -271,7 +271,6 @@
 static struct i2c_driver wacom_i2c_driver = {
 	.driver	= {
 		.name	= "wacom_i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &wacom_i2c_pm,
 	},
 
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index fb92ae1..515c20a 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -23,7 +23,7 @@
 #include <asm/unaligned.h>
 
 #define WDT87XX_NAME		"wdt87xx_i2c"
-#define WDT87XX_DRV_VER		"0.9.6"
+#define WDT87XX_DRV_VER		"0.9.7"
 #define WDT87XX_FW_NAME		"wdt87xx_fw.bin"
 #define WDT87XX_CFG_NAME	"wdt87xx_cfg.bin"
 
@@ -85,6 +85,11 @@
 #define CTL_PARAM_OFFSET_PHY_H		24
 #define CTL_PARAM_OFFSET_FACTOR		32
 
+/* The definition of the device descriptor */
+#define WDT_GD_DEVICE			1
+#define DEV_DESC_OFFSET_VID		8
+#define DEV_DESC_OFFSET_PID		10
+
 /* Communication commands */
 #define PACKET_SIZE			56
 #define VND_REQ_READ			0x06
@@ -152,6 +157,7 @@
 /* Controller requires minimum 300us between commands */
 #define WDT_COMMAND_DELAY_MS		2
 #define WDT_FLASH_WRITE_DELAY_MS	4
+#define WDT_FW_RESET_TIME		2500
 
 struct wdt87xx_sys_param {
 	u16	fw_id;
@@ -165,6 +171,8 @@
 	u16	scaling_factor;
 	u32	max_x;
 	u32	max_y;
+	u16	vendor_id;
+	u16	product_id;
 };
 
 struct wdt87xx_data {
@@ -208,6 +216,32 @@
 	return 0;
 }
 
+static int wdt87xx_get_desc(struct i2c_client *client, u8 desc_idx,
+			    u8 *buf, size_t len)
+{
+	u8 tx_buf[] = { 0x22, 0x00, 0x10, 0x0E, 0x23, 0x00 };
+	int error;
+
+	tx_buf[2] |= desc_idx & 0xF;
+
+	error = wdt87xx_i2c_xfer(client, tx_buf, sizeof(tx_buf),
+				 buf, len);
+	if (error) {
+		dev_err(&client->dev, "get desc failed: %d\n", error);
+		return error;
+	}
+
+	if (buf[0] != len) {
+		dev_err(&client->dev, "unexpected response to get desc: %d\n",
+			buf[0]);
+		return -EINVAL;
+	}
+
+	mdelay(WDT_COMMAND_DELAY_MS);
+
+	return 0;
+}
+
 static int wdt87xx_get_string(struct i2c_client *client, u8 str_idx,
 			      u8 *buf, size_t len)
 {
@@ -373,7 +407,7 @@
 	}
 
 	/* Wait the device to be ready */
-	msleep(200);
+	msleep(WDT_FW_RESET_TIME);
 
 	return 0;
 }
@@ -403,6 +437,15 @@
 	u8 buf[PKT_READ_SIZE];
 	int error;
 
+	error = wdt87xx_get_desc(client, WDT_GD_DEVICE, buf, 18);
+	if (error) {
+		dev_err(&client->dev, "failed to get device desc\n");
+		return error;
+	}
+
+	param->vendor_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_VID);
+	param->product_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_PID);
+
 	error = wdt87xx_get_string(client, STRIDX_PARAMETERS, buf, 34);
 	if (error) {
 		dev_err(&client->dev, "failed to get parameters\n");
@@ -994,6 +1037,8 @@
 
 	input->name = "WDT87xx Touchscreen";
 	input->id.bustype = BUS_I2C;
+	input->id.vendor = wdt->param.vendor_id;
+	input->id.product = wdt->param.product_id;
 	input->phys = wdt->phys;
 
 	input_set_abs_params(input, ABS_MT_POSITION_X, 0,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index b1ae779..1534e9b 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -732,8 +732,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int wm97xx_suspend(struct device *dev, pm_message_t state)
+static int __maybe_unused wm97xx_suspend(struct device *dev)
 {
 	struct wm97xx *wm = dev_get_drvdata(dev);
 	u16 reg;
@@ -765,7 +764,7 @@
 	return 0;
 }
 
-static int wm97xx_resume(struct device *dev)
+static int __maybe_unused wm97xx_resume(struct device *dev)
 {
 	struct wm97xx *wm = dev_get_drvdata(dev);
 
@@ -799,10 +798,7 @@
 	return 0;
 }
 
-#else
-#define wm97xx_suspend		NULL
-#define wm97xx_resume		NULL
-#endif
+static SIMPLE_DEV_PM_OPS(wm97xx_pm_ops, wm97xx_suspend, wm97xx_resume);
 
 /*
  * Machine specific operations
@@ -836,8 +832,7 @@
 	.owner =	THIS_MODULE,
 	.probe =	wm97xx_probe,
 	.remove =	wm97xx_remove,
-	.suspend =	wm97xx_suspend,
-	.resume =	wm97xx_resume,
+	.pm =		&wm97xx_pm_ops,
 };
 
 static int __init wm97xx_init(void)
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index f58a196..781d0f8 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -24,14 +24,13 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/device.h>
 #include <linux/sysfs.h>
 #include <linux/input/mt.h>
 #include <linux/platform_data/zforce_ts.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 
 #define WAIT_TIMEOUT		msecs_to_jiffies(1000)
 
@@ -120,6 +119,9 @@
 
 	struct regulator	*reg_vdd;
 
+	struct gpio_desc	*gpio_int;
+	struct gpio_desc	*gpio_rst;
+
 	bool			suspending;
 	bool			suspended;
 	bool			boot_complete;
@@ -161,6 +163,16 @@
 	return 0;
 }
 
+static void zforce_reset_assert(struct zforce_ts *ts)
+{
+	gpiod_set_value_cansleep(ts->gpio_rst, 1);
+}
+
+static void zforce_reset_deassert(struct zforce_ts *ts)
+{
+	gpiod_set_value_cansleep(ts->gpio_rst, 0);
+}
+
 static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
 {
 	struct i2c_client *client = ts->client;
@@ -479,7 +491,6 @@
 {
 	struct zforce_ts *ts = dev_id;
 	struct i2c_client *client = ts->client;
-	const struct zforce_ts_platdata *pdata = ts->pdata;
 	int ret;
 	u8 payload_buffer[FRAME_MAXSIZE];
 	u8 *payload;
@@ -499,7 +510,16 @@
 	if (!ts->suspending && device_may_wakeup(&client->dev))
 		pm_stay_awake(&client->dev);
 
-	while (!gpio_get_value(pdata->gpio_int)) {
+	/*
+	 * Run at least once and exit the loop if
+	 * - the optional interrupt GPIO isn't specified
+	 *   (there is only one packet read per ISR invocation, then)
+	 * or
+	 * - the GPIO isn't active any more
+	 *   (packet read until the level GPIO indicates that there is
+	 *    no IRQ any more)
+	 */
+	do {
 		ret = zforce_read_packet(ts, payload_buffer);
 		if (ret < 0) {
 			dev_err(&client->dev,
@@ -566,7 +586,7 @@
 				payload[RESPONSE_ID]);
 			break;
 		}
-	}
+	} while (gpiod_get_value_cansleep(ts->gpio_int));
 
 	if (!ts->suspending && device_may_wakeup(&client->dev))
 		pm_relax(&client->dev);
@@ -690,7 +710,7 @@
 {
 	struct zforce_ts *ts = data;
 
-	gpio_set_value(ts->pdata->gpio_rst, 0);
+	zforce_reset_assert(ts);
 
 	udelay(10);
 
@@ -712,18 +732,6 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	pdata->gpio_int = of_get_gpio(np, 0);
-	if (!gpio_is_valid(pdata->gpio_int)) {
-		dev_err(dev, "failed to get interrupt gpio\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	pdata->gpio_rst = of_get_gpio(np, 1);
-	if (!gpio_is_valid(pdata->gpio_rst)) {
-		dev_err(dev, "failed to get reset gpio\n");
-		return ERR_PTR(-EINVAL);
-	}
-
 	if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
 		dev_err(dev, "failed to get x-size property\n");
 		return ERR_PTR(-EINVAL);
@@ -755,20 +763,49 @@
 	if (!ts)
 		return -ENOMEM;
 
-	ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
-				    "zforce_ts_int");
-	if (ret) {
-		dev_err(&client->dev, "request of gpio %d failed, %d\n",
-			pdata->gpio_int, ret);
+	ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset",
+					       GPIOD_OUT_HIGH);
+	if (IS_ERR(ts->gpio_rst)) {
+		ret = PTR_ERR(ts->gpio_rst);
+		dev_err(&client->dev,
+			"failed to request reset GPIO: %d\n", ret);
 		return ret;
 	}
 
-	ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
-				    GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
-	if (ret) {
-		dev_err(&client->dev, "request of gpio %d failed, %d\n",
-			pdata->gpio_rst, ret);
-		return ret;
+	if (ts->gpio_rst) {
+		ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq",
+						       GPIOD_IN);
+		if (IS_ERR(ts->gpio_int)) {
+			ret = PTR_ERR(ts->gpio_int);
+			dev_err(&client->dev,
+				"failed to request interrupt GPIO: %d\n", ret);
+			return ret;
+		}
+	} else {
+		/*
+		 * Deprecated GPIO handling for compatibility
+		 * with legacy binding.
+		 */
+
+		/* INT GPIO */
+		ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0,
+						    GPIOD_IN);
+		if (IS_ERR(ts->gpio_int)) {
+			ret = PTR_ERR(ts->gpio_int);
+			dev_err(&client->dev,
+				"failed to request interrupt GPIO: %d\n", ret);
+			return ret;
+		}
+
+		/* RST GPIO */
+		ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
+					    GPIOD_OUT_HIGH);
+		if (IS_ERR(ts->gpio_rst)) {
+			ret = PTR_ERR(ts->gpio_rst);
+			dev_err(&client->dev,
+				"failed to request reset GPIO: %d\n", ret);
+			return ret;
+		}
 	}
 
 	ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
@@ -863,7 +900,7 @@
 	i2c_set_clientdata(client, ts);
 
 	/* let the controller boot */
-	gpio_set_value(pdata->gpio_rst, 1);
+	zforce_reset_deassert(ts);
 
 	ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
 	if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
@@ -917,7 +954,6 @@
 
 static struct i2c_driver zforce_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "zforce-ts",
 		.pm	= &zforce_pm_ops,
 		.of_match_table	= of_match_ptr(zforce_dt_idtable),
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 697291a..c82ebee 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2103,7 +2103,7 @@
 			sg_res = aligned_nrpages(sg->offset, sg->length);
 			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
 			sg->dma_length = sg->length;
-			pteval = page_to_phys(sg_page(sg)) | prot;
+			pteval = (sg_phys(sg) & PAGE_MASK) | prot;
 			phys_pfn = pteval >> VTD_PAGE_SHIFT;
 		}
 
@@ -3631,7 +3631,7 @@
 
 	for_each_sg(sglist, sg, nelems, i) {
 		BUG_ON(!sg_page(sg));
-		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+		sg->dma_address = sg_phys(sg);
 		sg->dma_length = sg->length;
 	}
 	return nelems;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f286090..049df49 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1408,7 +1408,7 @@
 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 
 	for_each_sg(sg, s, nents, i) {
-		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+		phys_addr_t phys = sg_phys(s);
 
 		/*
 		 * We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index dae5914..1764bcf 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -41,20 +41,46 @@
 
 static void __gic_irq_dispatch(void);
 
-static inline unsigned int gic_read(unsigned int reg)
+static inline u32 gic_read32(unsigned int reg)
 {
 	return __raw_readl(gic_base + reg);
 }
 
-static inline void gic_write(unsigned int reg, unsigned int val)
+static inline u64 gic_read64(unsigned int reg)
 {
-	__raw_writel(val, gic_base + reg);
+	return __raw_readq(gic_base + reg);
 }
 
-static inline void gic_update_bits(unsigned int reg, unsigned int mask,
-				   unsigned int val)
+static inline unsigned long gic_read(unsigned int reg)
 {
-	unsigned int regval;
+	if (!mips_cm_is64)
+		return gic_read32(reg);
+	else
+		return gic_read64(reg);
+}
+
+static inline void gic_write32(unsigned int reg, u32 val)
+{
+	return __raw_writel(val, gic_base + reg);
+}
+
+static inline void gic_write64(unsigned int reg, u64 val)
+{
+	return __raw_writeq(val, gic_base + reg);
+}
+
+static inline void gic_write(unsigned int reg, unsigned long val)
+{
+	if (!mips_cm_is64)
+		return gic_write32(reg, (u32)val);
+	else
+		return gic_write64(reg, (u64)val);
+}
+
+static inline void gic_update_bits(unsigned int reg, unsigned long mask,
+				   unsigned long val)
+{
+	unsigned long regval;
 
 	regval = gic_read(reg);
 	regval &= ~mask;
@@ -65,40 +91,40 @@
 static inline void gic_reset_mask(unsigned int intr)
 {
 	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
-		  1 << GIC_INTR_BIT(intr));
+		  1ul << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_mask(unsigned int intr)
 {
 	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
-		  1 << GIC_INTR_BIT(intr));
+		  1ul << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
-			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
-			pol << GIC_INTR_BIT(intr));
+			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+			(unsigned long)pol << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
-			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
-			trig << GIC_INTR_BIT(intr));
+			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+			(unsigned long)trig << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
-			1 << GIC_INTR_BIT(intr),
-			dual << GIC_INTR_BIT(intr));
+			1ul << GIC_INTR_BIT(intr),
+			(unsigned long)dual << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
 {
-	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
-		  GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
+	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
+		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
 }
 
 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
@@ -113,10 +139,13 @@
 {
 	unsigned int hi, hi2, lo;
 
+	if (mips_cm_is64)
+		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
+
 	do {
-		hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
-		lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
-		hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
+		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
 	} while (hi2 != hi);
 
 	return (((cycle_t) hi) << 32) + lo;
@@ -135,10 +164,14 @@
 
 void gic_write_compare(cycle_t cnt)
 {
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
-				(int)(cnt >> 32));
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
-				(int)(cnt & 0xffffffff));
+	if (mips_cm_is64) {
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
+	} else {
+		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+					(int)(cnt >> 32));
+		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+					(int)(cnt & 0xffffffff));
+	}
 }
 
 void gic_write_cpu_compare(cycle_t cnt, int cpu)
@@ -148,10 +181,15 @@
 	local_irq_save(flags);
 
 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
-	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
-				(int)(cnt >> 32));
-	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
-				(int)(cnt & 0xffffffff));
+
+	if (mips_cm_is64) {
+		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
+	} else {
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+					(int)(cnt >> 32));
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+					(int)(cnt & 0xffffffff));
+	}
 
 	local_irq_restore(flags);
 }
@@ -160,8 +198,11 @@
 {
 	unsigned int hi, lo;
 
-	hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
-	lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
+	if (mips_cm_is64)
+		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
+
+	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
+	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
 
 	return (((cycle_t) hi) << 32) + lo;
 }
@@ -196,7 +237,7 @@
 	if (cpu_has_veic)
 		return true;
 
-	vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
+	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
 	switch (intr) {
 	case GIC_LOCAL_INT_TIMER:
 		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
@@ -262,7 +303,7 @@
 
 static void gic_handle_shared_int(bool chained)
 {
-	unsigned int i, intr, virq;
+	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
 	unsigned long *pcpu_mask;
 	unsigned long pending_reg, intrmask_reg;
 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
@@ -277,8 +318,8 @@
 	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
 		pending[i] = gic_read(pending_reg);
 		intrmask[i] = gic_read(intrmask_reg);
-		pending_reg += 0x4;
-		intrmask_reg += 0x4;
+		pending_reg += gic_reg_step;
+		intrmask_reg += gic_reg_step;
 	}
 
 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -425,8 +466,8 @@
 	unsigned long pending, masked;
 	unsigned int intr, virq;
 
-	pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-	masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
+	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
+	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
 
 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
 
@@ -449,14 +490,14 @@
 {
 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
+	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
 }
 
 static void gic_unmask_local_irq(struct irq_data *d)
 {
 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
+	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
 }
 
 static struct irq_chip gic_local_irq_controller = {
@@ -474,7 +515,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
 }
@@ -488,7 +529,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
 }
@@ -608,7 +649,7 @@
 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
 			if (!gic_local_irq_is_routable(j))
 				continue;
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
 		}
 	}
 }
@@ -653,27 +694,32 @@
 
 		switch (intr) {
 		case GIC_LOCAL_INT_WD:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
 			break;
 		case GIC_LOCAL_INT_COMPARE:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_TIMER:
 			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
 			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_PERFCTR:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_SWINT0:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_SWINT1:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_FDC:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
 			break;
 		default:
 			pr_err("Invalid local IRQ %d\n", intr);
@@ -778,7 +824,7 @@
 		 */
 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
-			timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
 							 GIC_VPE_TIMER_MAP)) &
 					GIC_MAP_MSK;
 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
index 0602295..bbef98e 100644
--- a/drivers/isdn/mISDN/dsp_audio.c
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -13,6 +13,7 @@
 #include <linux/mISDNif.h>
 #include <linux/mISDNdsp.h>
 #include <linux/export.h>
+#include <linux/bitrev.h>
 #include "core.h"
 #include "dsp.h"
 
@@ -137,27 +138,14 @@
 	return ulawbyte;
 }
 
-static int reverse_bits(int i)
-{
-	int z, j;
-	z = 0;
-
-	for (j = 0; j < 8; j++) {
-		if ((i & (1 << j)) != 0)
-			z |= 1 << (7 - j);
-	}
-	return z;
-}
-
-
 void dsp_audio_generate_law_tables(void)
 {
 	int i;
 	for (i = 0; i < 256; i++)
-		dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
+		dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i));
 
 	for (i = 0; i < 256; i++)
-		dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
+		dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i));
 
 	for (i = 0; i < 256; i++) {
 		dsp_audio_alaw_to_ulaw[i] =
@@ -176,13 +164,13 @@
 		/* generating ulaw-table */
 		for (i = -32768; i < 32768; i++) {
 			dsp_audio_s16_to_law[i & 0xffff] =
-				reverse_bits(linear2ulaw(i));
+				bitrev8(linear2ulaw(i));
 		}
 	} else {
 		/* generating alaw-table */
 		for (i = -32768; i < 32768; i++) {
 			dsp_audio_s16_to_law[i & 0xffff] =
-				reverse_bits(linear2alaw(i));
+				bitrev8(linear2alaw(i));
 		}
 	}
 }
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 23408bd..70f4255 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -565,6 +565,17 @@
 	  This option enables support for the BlinkM RGB LED connected
 	  through I2C. Say Y to enable support for the BlinkM LED.
 
+config LEDS_POWERNV
+	tristate "LED support for PowerNV Platform"
+	depends on LEDS_CLASS
+	depends on PPC_POWERNV
+	depends on OF
+	help
+	  This option enables support for the system LEDs present on
+	  PowerNV platforms. Say 'y' to enable this support in kernel.
+	  To compile this driver as a module, choose 'm' here: the module
+	  will be called leds-powernv.
+
 config LEDS_SYSCON
 	bool "LED support for LEDs on system controllers"
 	depends on LEDS_CLASS=y
@@ -583,14 +594,6 @@
 	  This option enabled support for the LEDs on the ARM Versatile
 	  and RealView boards. Say Y to enabled these.
 
-config LEDS_PM8941_WLED
-	tristate "LED support for the Qualcomm PM8941 WLED block"
-	depends on LEDS_CLASS
-	select REGMAP
-	help
-	  This option enables support for the 'White' LED block
-	  on Qualcomm PM8941 PMICs.
-
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8d6a24a..b503f92 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -63,8 +63,8 @@
 obj-$(CONFIG_LEDS_SYSCON)		+= leds-syscon.o
 obj-$(CONFIG_LEDS_VERSATILE)		+= leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)		+= leds-menf21bmc.o
-obj-$(CONFIG_LEDS_PM8941_WLED)		+= leds-pm8941-wled.o
 obj-$(CONFIG_LEDS_KTD2692)		+= leds-ktd2692.o
+obj-$(CONFIG_LEDS_POWERNV)		+= leds-powernv.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/leds-pm8941-wled.c b/drivers/leds/leds-pm8941-wled.c
deleted file mode 100644
index bf64a59..0000000
--- a/drivers/leds/leds-pm8941-wled.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/* Copyright (c) 2015, Sony Mobile Communications, AB.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define PM8941_WLED_REG_VAL_BASE		0x40
-#define  PM8941_WLED_REG_VAL_MAX		0xFFF
-
-#define PM8941_WLED_REG_MOD_EN			0x46
-#define  PM8941_WLED_REG_MOD_EN_BIT		BIT(7)
-#define  PM8941_WLED_REG_MOD_EN_MASK		BIT(7)
-
-#define PM8941_WLED_REG_SYNC			0x47
-#define  PM8941_WLED_REG_SYNC_MASK		0x07
-#define  PM8941_WLED_REG_SYNC_LED1		BIT(0)
-#define  PM8941_WLED_REG_SYNC_LED2		BIT(1)
-#define  PM8941_WLED_REG_SYNC_LED3		BIT(2)
-#define  PM8941_WLED_REG_SYNC_ALL		0x07
-#define  PM8941_WLED_REG_SYNC_CLEAR		0x00
-
-#define PM8941_WLED_REG_FREQ			0x4c
-#define  PM8941_WLED_REG_FREQ_MASK		0x0f
-
-#define PM8941_WLED_REG_OVP			0x4d
-#define  PM8941_WLED_REG_OVP_MASK		0x03
-
-#define PM8941_WLED_REG_BOOST			0x4e
-#define  PM8941_WLED_REG_BOOST_MASK		0x07
-
-#define PM8941_WLED_REG_SINK			0x4f
-#define  PM8941_WLED_REG_SINK_MASK		0xe0
-#define  PM8941_WLED_REG_SINK_SHFT		0x05
-
-/* Per-'string' registers below */
-#define PM8941_WLED_REG_STR_OFFSET		0x10
-
-#define PM8941_WLED_REG_STR_MOD_EN_BASE		0x60
-#define  PM8941_WLED_REG_STR_MOD_MASK		BIT(7)
-#define  PM8941_WLED_REG_STR_MOD_EN		BIT(7)
-
-#define PM8941_WLED_REG_STR_SCALE_BASE		0x62
-#define  PM8941_WLED_REG_STR_SCALE_MASK		0x1f
-
-#define PM8941_WLED_REG_STR_MOD_SRC_BASE	0x63
-#define  PM8941_WLED_REG_STR_MOD_SRC_MASK	0x01
-#define  PM8941_WLED_REG_STR_MOD_SRC_INT	0x00
-#define  PM8941_WLED_REG_STR_MOD_SRC_EXT	0x01
-
-#define PM8941_WLED_REG_STR_CABC_BASE		0x66
-#define  PM8941_WLED_REG_STR_CABC_MASK		BIT(7)
-#define  PM8941_WLED_REG_STR_CABC_EN		BIT(7)
-
-struct pm8941_wled_config {
-	u32 i_boost_limit;
-	u32 ovp;
-	u32 switch_freq;
-	u32 num_strings;
-	u32 i_limit;
-	bool cs_out_en;
-	bool ext_gen;
-	bool cabc_en;
-};
-
-struct pm8941_wled {
-	struct regmap *regmap;
-	u16 addr;
-
-	struct led_classdev cdev;
-
-	struct pm8941_wled_config cfg;
-};
-
-static int pm8941_wled_set(struct led_classdev *cdev,
-			   enum led_brightness value)
-{
-	struct pm8941_wled *wled;
-	u8 ctrl = 0;
-	u16 val;
-	int rc;
-	int i;
-
-	wled = container_of(cdev, struct pm8941_wled, cdev);
-
-	if (value != 0)
-		ctrl = PM8941_WLED_REG_MOD_EN_BIT;
-
-	val = value * PM8941_WLED_REG_VAL_MAX / LED_FULL;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_MOD_EN,
-			PM8941_WLED_REG_MOD_EN_MASK, ctrl);
-	if (rc)
-		return rc;
-
-	for (i = 0; i < wled->cfg.num_strings; ++i) {
-		u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
-
-		rc = regmap_bulk_write(wled->regmap,
-				wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
-				v, 2);
-		if (rc)
-			return rc;
-	}
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_SYNC,
-			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_SYNC,
-			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
-	return rc;
-}
-
-static void pm8941_wled_set_brightness(struct led_classdev *cdev,
-				       enum led_brightness value)
-{
-	if (pm8941_wled_set(cdev, value)) {
-		dev_err(cdev->dev, "Unable to set brightness\n");
-		return;
-	}
-	cdev->brightness = value;
-}
-
-static int pm8941_wled_setup(struct pm8941_wled *wled)
-{
-	int rc;
-	int i;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_OVP,
-			PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_BOOST,
-			PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_FREQ,
-			PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
-	if (rc)
-		return rc;
-
-	if (wled->cfg.cs_out_en) {
-		u8 all = (BIT(wled->cfg.num_strings) - 1)
-				<< PM8941_WLED_REG_SINK_SHFT;
-
-		rc = regmap_update_bits(wled->regmap,
-				wled->addr + PM8941_WLED_REG_SINK,
-				PM8941_WLED_REG_SINK_MASK, all);
-		if (rc)
-			return rc;
-	}
-
-	for (i = 0; i < wled->cfg.num_strings; ++i) {
-		u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
-				PM8941_WLED_REG_STR_MOD_MASK,
-				PM8941_WLED_REG_STR_MOD_EN);
-		if (rc)
-			return rc;
-
-		if (wled->cfg.ext_gen) {
-			rc = regmap_update_bits(wled->regmap,
-					addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
-					PM8941_WLED_REG_STR_MOD_SRC_MASK,
-					PM8941_WLED_REG_STR_MOD_SRC_EXT);
-			if (rc)
-				return rc;
-		}
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_SCALE_BASE,
-				PM8941_WLED_REG_STR_SCALE_MASK,
-				wled->cfg.i_limit);
-		if (rc)
-			return rc;
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_CABC_BASE,
-				PM8941_WLED_REG_STR_CABC_MASK,
-				wled->cfg.cabc_en ?
-					PM8941_WLED_REG_STR_CABC_EN : 0);
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
-static const struct pm8941_wled_config pm8941_wled_config_defaults = {
-	.i_boost_limit = 3,
-	.i_limit = 20,
-	.ovp = 2,
-	.switch_freq = 5,
-	.num_strings = 0,
-	.cs_out_en = false,
-	.ext_gen = false,
-	.cabc_en = false,
-};
-
-struct pm8941_wled_var_cfg {
-	const u32 *values;
-	u32 (*fn)(u32);
-	int size;
-};
-
-static const u32 pm8941_wled_i_boost_limit_values[] = {
-	105, 385, 525, 805, 980, 1260, 1400, 1680,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
-	.values = pm8941_wled_i_boost_limit_values,
-	.size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
-};
-
-static const u32 pm8941_wled_ovp_values[] = {
-	35, 32, 29, 27,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
-	.values = pm8941_wled_ovp_values,
-	.size = ARRAY_SIZE(pm8941_wled_ovp_values),
-};
-
-static u32 pm8941_wled_num_strings_values_fn(u32 idx)
-{
-	return idx + 1;
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
-	.fn = pm8941_wled_num_strings_values_fn,
-	.size = 3,
-};
-
-static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
-{
-	return 19200 / (2 * (1 + idx));
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
-	.fn = pm8941_wled_switch_freq_values_fn,
-	.size = 16,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
-	.size = 26,
-};
-
-static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
-{
-	if (idx >= cfg->size)
-		return UINT_MAX;
-	if (cfg->fn)
-		return cfg->fn(idx);
-	if (cfg->values)
-		return cfg->values[idx];
-	return idx;
-}
-
-static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
-{
-	struct pm8941_wled_config *cfg = &wled->cfg;
-	u32 val;
-	int rc;
-	u32 c;
-	int i;
-	int j;
-
-	const struct {
-		const char *name;
-		u32 *val_ptr;
-		const struct pm8941_wled_var_cfg *cfg;
-	} u32_opts[] = {
-		{
-			"qcom,current-boost-limit",
-			&cfg->i_boost_limit,
-			.cfg = &pm8941_wled_i_boost_limit_cfg,
-		},
-		{
-			"qcom,current-limit",
-			&cfg->i_limit,
-			.cfg = &pm8941_wled_i_limit_cfg,
-		},
-		{
-			"qcom,ovp",
-			&cfg->ovp,
-			.cfg = &pm8941_wled_ovp_cfg,
-		},
-		{
-			"qcom,switching-freq",
-			&cfg->switch_freq,
-			.cfg = &pm8941_wled_switch_freq_cfg,
-		},
-		{
-			"qcom,num-strings",
-			&cfg->num_strings,
-			.cfg = &pm8941_wled_num_strings_cfg,
-		},
-	};
-	const struct {
-		const char *name;
-		bool *val_ptr;
-	} bool_opts[] = {
-		{ "qcom,cs-out", &cfg->cs_out_en, },
-		{ "qcom,ext-gen", &cfg->ext_gen, },
-		{ "qcom,cabc", &cfg->cabc_en, },
-	};
-
-	rc = of_property_read_u32(dev->of_node, "reg", &val);
-	if (rc || val > 0xffff) {
-		dev_err(dev, "invalid IO resources\n");
-		return rc ? rc : -EINVAL;
-	}
-	wled->addr = val;
-
-	rc = of_property_read_string(dev->of_node, "label", &wled->cdev.name);
-	if (rc)
-		wled->cdev.name = dev->of_node->name;
-
-	wled->cdev.default_trigger = of_get_property(dev->of_node,
-			"linux,default-trigger", NULL);
-
-	*cfg = pm8941_wled_config_defaults;
-	for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
-		rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
-		if (rc == -EINVAL) {
-			continue;
-		} else if (rc) {
-			dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
-			return rc;
-		}
-
-		c = UINT_MAX;
-		for (j = 0; c != val; j++) {
-			c = pm8941_wled_values(u32_opts[i].cfg, j);
-			if (c == UINT_MAX) {
-				dev_err(dev, "invalid value for '%s'\n",
-					u32_opts[i].name);
-				return -EINVAL;
-			}
-		}
-
-		dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
-		*u32_opts[i].val_ptr = j;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
-		if (of_property_read_bool(dev->of_node, bool_opts[i].name))
-			*bool_opts[i].val_ptr = true;
-	}
-
-	cfg->num_strings = cfg->num_strings + 1;
-
-	return 0;
-}
-
-static int pm8941_wled_probe(struct platform_device *pdev)
-{
-	struct pm8941_wled *wled;
-	struct regmap *regmap;
-	int rc;
-
-	regmap = dev_get_regmap(pdev->dev.parent, NULL);
-	if (!regmap) {
-		dev_err(&pdev->dev, "Unable to get regmap\n");
-		return -EINVAL;
-	}
-
-	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
-	if (!wled)
-		return -ENOMEM;
-
-	wled->regmap = regmap;
-
-	rc = pm8941_wled_configure(wled, &pdev->dev);
-	if (rc)
-		return rc;
-
-	rc = pm8941_wled_setup(wled);
-	if (rc)
-		return rc;
-
-	wled->cdev.brightness_set = pm8941_wled_set_brightness;
-
-	rc = devm_led_classdev_register(&pdev->dev, &wled->cdev);
-	if (rc)
-		return rc;
-
-	platform_set_drvdata(pdev, wled);
-
-	return 0;
-};
-
-static const struct of_device_id pm8941_wled_match_table[] = {
-	{ .compatible = "qcom,pm8941-wled" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
-
-static struct platform_driver pm8941_wled_driver = {
-	.probe = pm8941_wled_probe,
-	.driver	= {
-		.name = "pm8941-wled",
-		.of_match_table	= pm8941_wled_match_table,
-	},
-};
-
-module_platform_driver(pm8941_wled_driver);
-
-MODULE_DESCRIPTION("pm8941 wled driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:pm8941-wled");
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
new file mode 100644
index 0000000..2c5c5b1
--- /dev/null
+++ b/drivers/leds/leds-powernv.c
@@ -0,0 +1,345 @@
+/*
+ * PowerNV LED Driver
+ *
+ * Copyright IBM Corp. 2015
+ *
+ * Author: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
+ * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/opal.h>
+
+/* Map LED type to description. */
+struct led_type_map {
+	const int	type;
+	const char	*desc;
+};
+static const struct led_type_map led_type_map[] = {
+	{OPAL_SLOT_LED_TYPE_ID,		"identify"},
+	{OPAL_SLOT_LED_TYPE_FAULT,	"fault"},
+	{OPAL_SLOT_LED_TYPE_ATTN,	"attention"},
+	{-1,				NULL},
+};
+
+struct powernv_led_common {
+	/*
+	 * By default unload path resets all the LEDs. But on PowerNV
+	 * platform we want to retain LED state across reboot as these
+	 * are controlled by firmware. Also service processor can modify
+	 * the LEDs independent of OS. Hence avoid resetting LEDs in
+	 * unload path.
+	 */
+	bool		led_disabled;
+
+	/* Max supported LED type */
+	__be64		max_led_type;
+
+	/* glabal lock */
+	struct mutex	lock;
+};
+
+/* PowerNV LED data */
+struct powernv_led_data {
+	struct led_classdev	cdev;
+	char			*loc_code;	/* LED location code */
+	int			led_type;	/* OPAL_SLOT_LED_TYPE_* */
+
+	struct powernv_led_common *common;
+};
+
+
+/* Returns OPAL_SLOT_LED_TYPE_* for given led type string */
+static int powernv_get_led_type(const char *led_type_desc)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(led_type_map); i++)
+		if (!strcmp(led_type_map[i].desc, led_type_desc))
+			return led_type_map[i].type;
+
+	return -1;
+}
+
+/*
+ * This commits the state change of the requested LED through an OPAL call.
+ * This function is called from work queue task context when ever it gets
+ * scheduled. This function can sleep at opal_async_wait_response call.
+ */
+static void powernv_led_set(struct powernv_led_data *powernv_led,
+			    enum led_brightness value)
+{
+	int rc, token;
+	u64 led_mask, led_value = 0;
+	__be64 max_type;
+	struct opal_msg msg;
+	struct device *dev = powernv_led->cdev.dev;
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Prepare for the OPAL call */
+	max_type = powernv_led_common->max_led_type;
+	led_mask = OPAL_SLOT_LED_STATE_ON << powernv_led->led_type;
+	if (value)
+		led_value = led_mask;
+
+	/* OPAL async call */
+	token = opal_async_get_token_interruptible();
+	if (token < 0) {
+		if (token != -ERESTARTSYS)
+			dev_err(dev, "%s: Couldn't get OPAL async token\n",
+				__func__);
+		return;
+	}
+
+	rc = opal_leds_set_ind(token, powernv_led->loc_code,
+			       led_mask, led_value, &max_type);
+	if (rc != OPAL_ASYNC_COMPLETION) {
+		dev_err(dev, "%s: OPAL set LED call failed for %s [rc=%d]\n",
+			__func__, powernv_led->loc_code, rc);
+		goto out_token;
+	}
+
+	rc = opal_async_wait_response(token, &msg);
+	if (rc) {
+		dev_err(dev,
+			"%s: Failed to wait for the async response [rc=%d]\n",
+			__func__, rc);
+		goto out_token;
+	}
+
+	rc = be64_to_cpu(msg.params[1]);
+	if (rc != OPAL_SUCCESS)
+		dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n",
+			__func__, rc);
+
+out_token:
+	opal_async_release_token(token);
+}
+
+/*
+ * This function fetches the LED state for a given LED type for
+ * mentioned LED classdev structure.
+ */
+static enum led_brightness powernv_led_get(struct powernv_led_data *powernv_led)
+{
+	int rc;
+	__be64 mask, value, max_type;
+	u64 led_mask, led_value;
+	struct device *dev = powernv_led->cdev.dev;
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Fetch all LED status */
+	mask = cpu_to_be64(0);
+	value = cpu_to_be64(0);
+	max_type = powernv_led_common->max_led_type;
+
+	rc = opal_leds_get_ind(powernv_led->loc_code,
+			       &mask, &value, &max_type);
+	if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
+		dev_err(dev, "%s: OPAL get led call failed [rc=%d]\n",
+			__func__, rc);
+		return LED_OFF;
+	}
+
+	led_mask = be64_to_cpu(mask);
+	led_value = be64_to_cpu(value);
+
+	/* LED status available */
+	if (!((led_mask >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)) {
+		dev_err(dev, "%s: LED status not available for %s\n",
+			__func__, powernv_led->cdev.name);
+		return LED_OFF;
+	}
+
+	/* LED status value */
+	if ((led_value >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)
+		return LED_FULL;
+
+	return LED_OFF;
+}
+
+/*
+ * LED classdev 'brightness_get' function. This schedules work
+ * to update LED state.
+ */
+static void powernv_brightness_set(struct led_classdev *led_cdev,
+				   enum led_brightness value)
+{
+	struct powernv_led_data *powernv_led =
+		container_of(led_cdev, struct powernv_led_data, cdev);
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Do not modify LED in unload path */
+	if (powernv_led_common->led_disabled)
+		return;
+
+	mutex_lock(&powernv_led_common->lock);
+	powernv_led_set(powernv_led, value);
+	mutex_unlock(&powernv_led_common->lock);
+}
+
+/* LED classdev 'brightness_get' function */
+static enum led_brightness powernv_brightness_get(struct led_classdev *led_cdev)
+{
+	struct powernv_led_data *powernv_led =
+		container_of(led_cdev, struct powernv_led_data, cdev);
+
+	return powernv_led_get(powernv_led);
+}
+
+/*
+ * This function registers classdev structure for any given type of LED on
+ * a given child LED device node.
+ */
+static int powernv_led_create(struct device *dev,
+			      struct powernv_led_data *powernv_led,
+			      const char *led_type_desc)
+{
+	int rc;
+
+	/* Make sure LED type is supported */
+	powernv_led->led_type = powernv_get_led_type(led_type_desc);
+	if (powernv_led->led_type == -1) {
+		dev_warn(dev, "%s: No support for led type : %s\n",
+			 __func__, led_type_desc);
+		return -EINVAL;
+	}
+
+	/* Create the name for classdev */
+	powernv_led->cdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+						powernv_led->loc_code,
+						led_type_desc);
+	if (!powernv_led->cdev.name) {
+		dev_err(dev,
+			"%s: Memory allocation failed for classdev name\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	powernv_led->cdev.brightness_set = powernv_brightness_set;
+	powernv_led->cdev.brightness_get = powernv_brightness_get;
+	powernv_led->cdev.brightness = LED_OFF;
+	powernv_led->cdev.max_brightness = LED_FULL;
+
+	/* Register the classdev */
+	rc = devm_led_classdev_register(dev, &powernv_led->cdev);
+	if (rc) {
+		dev_err(dev, "%s: Classdev registration failed for %s\n",
+			__func__, powernv_led->cdev.name);
+	}
+
+	return rc;
+}
+
+/* Go through LED device tree node and register LED classdev structure */
+static int powernv_led_classdev(struct platform_device *pdev,
+				struct device_node *led_node,
+				struct powernv_led_common *powernv_led_common)
+{
+	const char *cur = NULL;
+	int rc = -1;
+	struct property *p;
+	struct device_node *np;
+	struct powernv_led_data *powernv_led;
+	struct device *dev = &pdev->dev;
+
+	for_each_child_of_node(led_node, np) {
+		p = of_find_property(np, "led-types", NULL);
+		if (!p)
+			continue;
+
+		while ((cur = of_prop_next_string(p, cur)) != NULL) {
+			powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
+						   GFP_KERNEL);
+			if (!powernv_led)
+				return -ENOMEM;
+
+			powernv_led->common = powernv_led_common;
+			powernv_led->loc_code = (char *)np->name;
+
+			rc = powernv_led_create(dev, powernv_led, cur);
+			if (rc)
+				return rc;
+		} /* while end */
+	}
+
+	return rc;
+}
+
+/* Platform driver probe */
+static int powernv_led_probe(struct platform_device *pdev)
+{
+	struct device_node *led_node;
+	struct powernv_led_common *powernv_led_common;
+	struct device *dev = &pdev->dev;
+
+	led_node = of_find_node_by_path("/ibm,opal/leds");
+	if (!led_node) {
+		dev_err(dev, "%s: LED parent device node not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	powernv_led_common = devm_kzalloc(dev, sizeof(*powernv_led_common),
+					  GFP_KERNEL);
+	if (!powernv_led_common)
+		return -ENOMEM;
+
+	mutex_init(&powernv_led_common->lock);
+	powernv_led_common->max_led_type = cpu_to_be64(OPAL_SLOT_LED_TYPE_MAX);
+
+	platform_set_drvdata(pdev, powernv_led_common);
+
+	return powernv_led_classdev(pdev, led_node, powernv_led_common);
+}
+
+/* Platform driver remove */
+static int powernv_led_remove(struct platform_device *pdev)
+{
+	struct powernv_led_common *powernv_led_common;
+
+	/* Disable LED operation */
+	powernv_led_common = platform_get_drvdata(pdev);
+	powernv_led_common->led_disabled = true;
+
+	/* Destroy lock */
+	mutex_destroy(&powernv_led_common->lock);
+
+	dev_info(&pdev->dev, "PowerNV led module unregistered\n");
+	return 0;
+}
+
+/* Platform driver property match */
+static const struct of_device_id powernv_led_match[] = {
+	{
+		.compatible	= "ibm,opal-v3-led",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, powernv_led_match);
+
+static struct platform_driver powernv_led_driver = {
+	.probe	= powernv_led_probe,
+	.remove = powernv_led_remove,
+	.driver = {
+		.name = "powernv-led-driver",
+		.of_match_table = powernv_led_match,
+	},
+};
+
+module_platform_driver(powernv_led_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV LED driver");
+MODULE_AUTHOR("Vasant Hegde <hegdevasant@linux.vnet.ibm.com>");
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 109dcaa..68dcbcb 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -408,6 +408,7 @@
 	{ "therm_adm1030", adm1030 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
 
 static int
 do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
@@ -459,6 +460,7 @@
 	.compatible	= "adm1030"
     }, {}
 };
+MODULE_DEVICE_TABLE(of, therm_of_match);
 
 static struct platform_driver therm_of_driver = {
 	.driver = {
diff --git a/drivers/macintosh/windfarm.h b/drivers/macintosh/windfarm.h
index 028cdac..901c42f 100644
--- a/drivers/macintosh/windfarm.h
+++ b/drivers/macintosh/windfarm.h
@@ -53,11 +53,9 @@
  * the kref and wf_unregister_control will decrement it, thus the
  * object creating/disposing a given control shouldn't assume it
  * still exists after wf_unregister_control has been called.
- * wf_find_control will inc the refcount for you
  */
 extern int wf_register_control(struct wf_control *ct);
 extern void wf_unregister_control(struct wf_control *ct);
-extern struct wf_control * wf_find_control(const char *name);
 extern int wf_get_control(struct wf_control *ct);
 extern void wf_put_control(struct wf_control *ct);
 
@@ -117,7 +115,6 @@
 /* Same lifetime rules as controls */
 extern int wf_register_sensor(struct wf_sensor *sr);
 extern void wf_unregister_sensor(struct wf_sensor *sr);
-extern struct wf_sensor * wf_find_sensor(const char *name);
 extern int wf_get_sensor(struct wf_sensor *sr);
 extern void wf_put_sensor(struct wf_sensor *sr);
 
@@ -144,7 +141,6 @@
 /* Overtemp conditions. Those are refcounted */
 extern void wf_set_overtemp(void);
 extern void wf_clear_overtemp(void);
-extern int wf_is_overtemp(void);
 
 #define WF_EVENT_NEW_CONTROL	0 /* param is wf_control * */
 #define WF_EVENT_NEW_SENSOR	1 /* param is wf_sensor * */
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 3ee198b6..465d770 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -72,7 +72,7 @@
 	blocking_notifier_call_chain(&wf_client_list, event, param);
 }
 
-int wf_critical_overtemp(void)
+static int wf_critical_overtemp(void)
 {
 	static char * critical_overtemp_path = "/sbin/critical_overtemp";
 	char *argv[] = { critical_overtemp_path, NULL };
@@ -84,7 +84,6 @@
 	return call_usermodehelper(critical_overtemp_path,
 				   argv, envp, UMH_WAIT_EXEC);
 }
-EXPORT_SYMBOL_GPL(wf_critical_overtemp);
 
 static int wf_thread_func(void *data)
 {
@@ -255,24 +254,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_unregister_control);
 
-struct wf_control * wf_find_control(const char *name)
-{
-	struct wf_control *ct;
-
-	mutex_lock(&wf_lock);
-	list_for_each_entry(ct, &wf_controls, link) {
-		if (!strcmp(ct->name, name)) {
-			if (wf_get_control(ct))
-				ct = NULL;
-			mutex_unlock(&wf_lock);
-			return ct;
-		}
-	}
-	mutex_unlock(&wf_lock);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_control);
-
 int wf_get_control(struct wf_control *ct)
 {
 	if (!try_module_get(ct->ops->owner))
@@ -368,24 +349,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_unregister_sensor);
 
-struct wf_sensor * wf_find_sensor(const char *name)
-{
-	struct wf_sensor *sr;
-
-	mutex_lock(&wf_lock);
-	list_for_each_entry(sr, &wf_sensors, link) {
-		if (!strcmp(sr->name, name)) {
-			if (wf_get_sensor(sr))
-				sr = NULL;
-			mutex_unlock(&wf_lock);
-			return sr;
-		}
-	}
-	mutex_unlock(&wf_lock);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_sensor);
-
 int wf_get_sensor(struct wf_sensor *sr)
 {
 	if (!try_module_get(sr->ops->owner))
@@ -435,7 +398,7 @@
 {
 	mutex_lock(&wf_lock);
 	blocking_notifier_chain_unregister(&wf_client_list, nb);
-	wf_client_count++;
+	wf_client_count--;
 	if (wf_client_count == 0)
 		wf_stop_thread();
 	mutex_unlock(&wf_lock);
@@ -474,12 +437,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_clear_overtemp);
 
-int wf_is_overtemp(void)
-{
-	return (wf_overtemp != 0);
-}
-EXPORT_SYMBOL_GPL(wf_is_overtemp);
-
 static int __init windfarm_core_init(void)
 {
 	DBG("wf: core loaded\n");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 200366c..1ffbeb1 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -772,7 +772,7 @@
 	struct dm_cache_policy policy;
 
 	/* protects everything */
-	struct mutex lock;
+	spinlock_t lock;
 	dm_cblock_t cache_size;
 	sector_t cache_block_size;
 
@@ -807,13 +807,7 @@
 	/*
 	 * Keeps track of time, incremented by the core.  We use this to
 	 * avoid attributing multiple hits within the same tick.
-	 *
-	 * Access to tick_protected should be done with the spin lock held.
-	 * It's copied to tick at the start of the map function (within the
-	 * mutex).
 	 */
-	spinlock_t tick_lock;
-	unsigned tick_protected;
 	unsigned tick;
 
 	/*
@@ -1296,46 +1290,20 @@
 	kfree(mq);
 }
 
-static void copy_tick(struct smq_policy *mq)
-{
-	unsigned long flags, tick;
-
-	spin_lock_irqsave(&mq->tick_lock, flags);
-	tick = mq->tick_protected;
-	if (tick != mq->tick) {
-		update_sentinels(mq);
-		end_hotspot_period(mq);
-		end_cache_period(mq);
-		mq->tick = tick;
-	}
-	spin_unlock_irqrestore(&mq->tick_lock, flags);
-}
-
-static bool maybe_lock(struct smq_policy *mq, bool can_block)
-{
-	if (can_block) {
-		mutex_lock(&mq->lock);
-		return true;
-	} else
-		return mutex_trylock(&mq->lock);
-}
-
 static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
 		   bool can_block, bool can_migrate, bool fast_promote,
 		   struct bio *bio, struct policy_locker *locker,
 		   struct policy_result *result)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
 	result->op = POLICY_MISS;
 
-	if (!maybe_lock(mq, can_block))
-		return -EWOULDBLOCK;
-
-	copy_tick(mq);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1343,20 +1311,18 @@
 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 	struct entry *e;
 
-	if (!mutex_trylock(&mq->lock))
-		return -EWOULDBLOCK;
-
+	spin_lock_irqsave(&mq->lock, flags);
 	e = h_lookup(&mq->table, oblock);
 	if (e) {
 		*cblock = infer_cblock(mq, e);
 		r = 0;
 	} else
 		r = -ENOENT;
-
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1375,20 +1341,22 @@
 
 static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__smq_set_clear_dirty(mq, oblock, true);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
 	struct smq_policy *mq = to_smq_policy(p);
+	unsigned long flags;
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__smq_set_clear_dirty(mq, oblock, false);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static int smq_load_mapping(struct dm_cache_policy *p,
@@ -1433,14 +1401,14 @@
 	struct smq_policy *mq = to_smq_policy(p);
 	int r = 0;
 
-	mutex_lock(&mq->lock);
-
+	/*
+	 * We don't need to lock here since this method is only called once
+	 * the IO has stopped.
+	 */
 	r = smq_save_hints(mq, &mq->clean, fn, context);
 	if (!r)
 		r = smq_save_hints(mq, &mq->dirty, fn, context);
 
-	mutex_unlock(&mq->lock);
-
 	return r;
 }
 
@@ -1458,10 +1426,11 @@
 static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
 	struct smq_policy *mq = to_smq_policy(p);
+	unsigned long flags;
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__remove_mapping(mq, oblock);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
@@ -1480,11 +1449,12 @@
 static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = __remove_cblock(mq, cblock);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1537,11 +1507,12 @@
 			      dm_cblock_t *cblock, bool critical_only)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = __smq_writeback_work(mq, oblock, cblock, critical_only);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1562,21 +1533,23 @@
 static void smq_force_mapping(struct dm_cache_policy *p,
 			      dm_oblock_t current_oblock, dm_oblock_t new_oblock)
 {
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__force_mapping(mq, current_oblock, new_oblock);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
 {
 	dm_cblock_t r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = to_cblock(mq->cache_alloc.nr_allocated);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1586,15 +1559,12 @@
 	struct smq_policy *mq = to_smq_policy(p);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mq->tick_lock, flags);
-	mq->tick_protected++;
-	spin_unlock_irqrestore(&mq->tick_lock, flags);
-
-	if (can_block) {
-		mutex_lock(&mq->lock);
-		copy_tick(mq);
-		mutex_unlock(&mq->lock);
-	}
+	spin_lock_irqsave(&mq->lock, flags);
+	mq->tick++;
+	update_sentinels(mq);
+	end_hotspot_period(mq);
+	end_cache_period(mq);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 /* Init the policy plugin interface function pointers. */
@@ -1694,10 +1664,8 @@
 	} else
 		mq->cache_hit_bits = NULL;
 
-	mq->tick_protected = 0;
 	mq->tick = 0;
-	mutex_init(&mq->lock);
-	spin_lock_init(&mq->tick_lock);
+	spin_lock_init(&mq->lock);
 
 	q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
 	mq->hotspot.nr_top_levels = 8;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 7245071..dd90d12 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,7 +424,6 @@
 		wake_up(&cache->migration_wait);
 
 	mempool_free(mg, cache->migration_pool);
-	wake_worker(cache);
 }
 
 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -1064,14 +1063,6 @@
 	atomic_dec(&cache->nr_io_migrations);
 }
 
-static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell,
-			   bool holder, struct bio_list *bios)
-{
-	(holder ? dm_cell_release : dm_cell_release_no_holder)
-		(cache->prison, cell, bios);
-	free_prison_cell(cache, cell);
-}
-
 static bool discard_or_flush(struct bio *bio)
 {
 	return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
@@ -1079,14 +1070,13 @@
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
 {
-	if (discard_or_flush(cell->holder))
+	if (discard_or_flush(cell->holder)) {
 		/*
-		 * We have to handle these bios
-		 * individually.
+		 * We have to handle these bios individually.
 		 */
-		__cell_release(cache, cell, true, &cache->deferred_bios);
-
-	else
+		dm_cell_release(cache->prison, cell, &cache->deferred_bios);
+		free_prison_cell(cache, cell);
+	} else
 		list_add_tail(&cell->user_list, &cache->deferred_cells);
 }
 
@@ -1113,7 +1103,7 @@
 static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
 {
 	dm_cell_error(cache->prison, cell, err);
-	dm_bio_prison_free_cell(cache->prison, cell);
+	free_prison_cell(cache, cell);
 }
 
 static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1123,8 +1113,11 @@
 
 static void free_io_migration(struct dm_cache_migration *mg)
 {
-	dec_io_migrations(mg->cache);
+	struct cache *cache = mg->cache;
+
+	dec_io_migrations(cache);
 	free_migration(mg);
+	wake_worker(cache);
 }
 
 static void migration_failure(struct dm_cache_migration *mg)
@@ -1351,16 +1344,18 @@
 {
 	dm_dblock_t b, e;
 	struct bio *bio = mg->new_ocell->holder;
+	struct cache *cache = mg->cache;
 
-	calc_discard_block_range(mg->cache, bio, &b, &e);
+	calc_discard_block_range(cache, bio, &b, &e);
 	while (b != e) {
-		set_discard(mg->cache, b);
+		set_discard(cache, b);
 		b = to_dblock(from_dblock(b) + 1);
 	}
 
 	bio_endio(bio);
-	cell_defer(mg->cache, mg->new_ocell, false);
+	cell_defer(cache, mg->new_ocell, false);
 	free_migration(mg);
+	wake_worker(cache);
 }
 
 static void issue_copy_or_discard(struct dm_cache_migration *mg)
@@ -1729,6 +1724,8 @@
 		remap_to_origin(cache, bio);
 		issue(cache, bio);
 	}
+
+	free_prison_cell(cache, cell);
 }
 
 static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -1763,6 +1760,8 @@
 		remap_to_cache(cache, bio, cblock);
 		issue(cache, bio);
 	}
+
+	free_prison_cell(cache, cell);
 }
 
 /*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ba5c210..d60c88d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1811,11 +1811,13 @@
 	}
 	cc->iv_offset = tmpll;
 
-	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
+	ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
+	if (ret) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
 
+	ret = -EINVAL;
 	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid device sector";
 		goto bad;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 57b6a19..b34f6e2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -129,6 +129,7 @@
 	struct delay_c *dc;
 	unsigned long long tmpll;
 	char dummy;
+	int ret;
 
 	if (argc != 3 && argc != 6) {
 		ti->error = "requires exactly 3 or 6 arguments";
@@ -143,6 +144,7 @@
 
 	dc->reads = dc->writes = 0;
 
+	ret = -EINVAL;
 	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid device sector";
 		goto bad;
@@ -154,12 +156,14 @@
 		goto bad;
 	}
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
-			  &dc->dev_read)) {
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &dc->dev_read);
+	if (ret) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
 
+	ret = -EINVAL;
 	dc->dev_write = NULL;
 	if (argc == 3)
 		goto out;
@@ -175,13 +179,15 @@
 		goto bad_dev_read;
 	}
 
-	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
-			  &dc->dev_write)) {
+	ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+			    &dc->dev_write);
+	if (ret) {
 		ti->error = "Write device lookup failed";
 		goto bad_dev_read;
 	}
 
 out:
+	ret = -EINVAL;
 	dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
 	if (!dc->kdelayd_wq) {
 		DMERR("Couldn't start kdelayd");
@@ -208,7 +214,7 @@
 	dm_put_device(ti, dc->dev_read);
 bad:
 	kfree(dc);
-	return -EINVAL;
+	return ret;
 }
 
 static void delay_dtr(struct dm_target *ti)
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index afab13b..645e8b4 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -183,6 +183,7 @@
 
 	devname = dm_shift_arg(&as);
 
+	r = -EINVAL;
 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid device sector";
 		goto bad;
@@ -211,7 +212,8 @@
 	if (r)
 		goto bad;
 
-	if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
+	if (r) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
@@ -224,7 +226,7 @@
 
 bad:
 	kfree(fc);
-	return -EINVAL;
+	return r;
 }
 
 static void flakey_dtr(struct dm_target *ti)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 7dd5fc8..436f5c9 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -30,6 +30,7 @@
 	struct linear_c *lc;
 	unsigned long long tmp;
 	char dummy;
+	int ret;
 
 	if (argc != 2) {
 		ti->error = "Invalid argument count";
@@ -42,13 +43,15 @@
 		return -ENOMEM;
 	}
 
+	ret = -EINVAL;
 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
 		ti->error = "dm-linear: Invalid device sector";
 		goto bad;
 	}
 	lc->start = tmp;
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
+	if (ret) {
 		ti->error = "dm-linear: Device lookup failed";
 		goto bad;
 	}
@@ -61,7 +64,7 @@
 
       bad:
 	kfree(lc);
-	return -EINVAL;
+	return ret;
 }
 
 static void linear_dtr(struct dm_target *ti)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 316cc3f..b2912db 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -417,6 +417,7 @@
 	struct log_writes_c *lc;
 	struct dm_arg_set as;
 	const char *devname, *logdevname;
+	int ret;
 
 	as.argc = argc;
 	as.argv = argv;
@@ -440,18 +441,22 @@
 	atomic_set(&lc->pending_blocks, 0);
 
 	devname = dm_shift_arg(&as);
-	if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) {
+	ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
+	if (ret) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
 
 	logdevname = dm_shift_arg(&as);
-	if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) {
+	ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
+			    &lc->logdev);
+	if (ret) {
 		ti->error = "Log device lookup failed";
 		dm_put_device(ti, lc->dev);
 		goto bad;
 	}
 
+	ret = -EINVAL;
 	lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
 	if (!lc->log_kthread) {
 		ti->error = "Couldn't alloc kthread";
@@ -476,7 +481,7 @@
 
 bad:
 	kfree(lc);
-	return -EINVAL;
+	return ret;
 }
 
 static int log_mark(struct log_writes_c *lc, char *data)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index e1eabfb..f2a363a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -945,16 +945,18 @@
 {
 	unsigned long long offset;
 	char dummy;
+	int ret;
 
 	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
 		ti->error = "Invalid offset";
 		return -EINVAL;
 	}
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
-			  &ms->mirror[mirror].dev)) {
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &ms->mirror[mirror].dev);
+	if (ret) {
 		ti->error = "Device lookup failure";
-		return -ENXIO;
+		return ret;
 	}
 
 	ms->mirror[mirror].ms = ms;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 808b841..bf71583 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -533,7 +533,7 @@
 		chunk = area_location(ps, ps->current_area);
 
 		area = dm_bufio_read(client, chunk, &bp);
-		if (unlikely(IS_ERR(area))) {
+		if (IS_ERR(area)) {
 			r = PTR_ERR(area);
 			goto ret_destroy_bufio;
 		}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d10b687..c0bcd65 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -63,6 +63,13 @@
 	 */
 	int valid;
 
+	/*
+	 * The snapshot overflowed because of a write to the snapshot device.
+	 * We don't have to invalidate the snapshot in this case, but we need
+	 * to prevent further writes.
+	 */
+	int snapshot_overflowed;
+
 	/* Origin writes don't trigger exceptions until this is set */
 	int active;
 
@@ -1152,6 +1159,7 @@
 
 	s->ti = ti;
 	s->valid = 1;
+	s->snapshot_overflowed = 0;
 	s->active = 0;
 	atomic_set(&s->pending_exceptions_count, 0);
 	s->exception_start_sequence = 0;
@@ -1301,6 +1309,7 @@
 
 	snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
 	snap_dest->valid = snap_src->valid;
+	snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
 
 	/*
 	 * Set source invalid to ensure it receives no further I/O.
@@ -1691,7 +1700,7 @@
 	 * to copy an exception */
 	down_write(&s->lock);
 
-	if (!s->valid) {
+	if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
 		r = -EIO;
 		goto out_unlock;
 	}
@@ -1715,7 +1724,7 @@
 			pe = alloc_pending_exception(s);
 			down_write(&s->lock);
 
-			if (!s->valid) {
+			if (!s->valid || s->snapshot_overflowed) {
 				free_pending_exception(pe);
 				r = -EIO;
 				goto out_unlock;
@@ -1730,7 +1739,8 @@
 
 			pe = __find_pending_exception(s, pe, chunk);
 			if (!pe) {
-				__invalidate_snapshot(s, -ENOMEM);
+				s->snapshot_overflowed = 1;
+				DMERR("Snapshot overflowed: Unable to allocate exception.");
 				r = -EIO;
 				goto out_unlock;
 			}
@@ -1990,6 +2000,8 @@
 			DMEMIT("Invalid");
 		else if (snap->merge_failed)
 			DMEMIT("Merge failed");
+		else if (snap->snapshot_overflowed)
+			DMEMIT("Overflow");
 		else {
 			if (snap->store->type->usage) {
 				sector_t total_sectors, sectors_allocated,
@@ -2353,7 +2365,7 @@
 
 static struct target_type snapshot_target = {
 	.name    = "snapshot",
-	.version = {1, 13, 0},
+	.version = {1, 14, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8a8b48f..8289804 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -457,12 +457,24 @@
 	list_for_each_entry(s, &stats->list, list_entry) {
 		if (!program || !strcmp(program, s->program_id)) {
 			len = s->end - s->start;
-			DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+			DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
 				(unsigned long long)s->start,
 				(unsigned long long)len,
 				(unsigned long long)s->step,
 				s->program_id,
 				s->aux_data);
+			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+				DMEMIT(" precise_timestamps");
+			if (s->n_histogram_entries) {
+				unsigned i;
+				DMEMIT(" histogram:");
+				for (i = 0; i < s->n_histogram_entries; i++) {
+					if (i)
+						DMEMIT(",");
+					DMEMIT("%llu", s->histogram_boundaries[i]);
+				}
+			}
+			DMEMIT("\n");
 		}
 	}
 	mutex_unlock(&stats->mutex);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 484029d..797ddb9 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -75,13 +75,15 @@
 {
 	unsigned long long start;
 	char dummy;
+	int ret;
 
 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
 		return -EINVAL;
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
-			  &sc->stripe[stripe].dev))
-		return -ENXIO;
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &sc->stripe[stripe].dev);
+	if (ret)
+		return ret;
 
 	sc->stripe[stripe].physical_start = start;
 
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 271a662..6578b7b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -332,9 +332,6 @@
  *
  * Description:
  *    Asynchronously issue a discard request for the sectors in question.
- *    NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
- *    that is being kept local to DM thinp until the block changes to allow
- *    late bio splitting land upstream.
  */
 static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
 					sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
@@ -342,91 +339,36 @@
 {
 	struct request_queue *q = bdev_get_queue(bdev);
 	int type = REQ_WRITE | REQ_DISCARD;
-	unsigned int max_discard_sectors, granularity;
-	int alignment;
 	struct bio *bio;
-	int ret = 0;
-	struct blk_plug plug;
 
-	if (!q)
+	if (!q || !nr_sects)
 		return -ENXIO;
 
 	if (!blk_queue_discard(q))
 		return -EOPNOTSUPP;
 
-	/* Zero-sector (unknown) and one-sector granularities are the same.  */
-	granularity = max(q->limits.discard_granularity >> 9, 1U);
-	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
-	/*
-	 * Ensure that max_discard_sectors is of the proper
-	 * granularity, so that requests stay aligned after a split.
-	 */
-	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-	max_discard_sectors -= max_discard_sectors % granularity;
-	if (unlikely(!max_discard_sectors)) {
-		/* Avoid infinite loop below. Being cautious never hurts. */
-		return -EOPNOTSUPP;
-	}
-
 	if (flags & BLKDEV_DISCARD_SECURE) {
 		if (!blk_queue_secdiscard(q))
 			return -EOPNOTSUPP;
 		type |= REQ_SECURE;
 	}
 
-	blk_start_plug(&plug);
-	while (nr_sects) {
-		unsigned int req_sects;
-		sector_t end_sect, tmp;
+	/*
+	 * Required bio_put occurs in bio_endio thanks to bio_chain below
+	 */
+	bio = bio_alloc(gfp_mask, 1);
+	if (!bio)
+		return -ENOMEM;
 
-		/*
-		 * Required bio_put occurs in bio_endio thanks to bio_chain below
-		 */
-		bio = bio_alloc(gfp_mask, 1);
-		if (!bio) {
-			ret = -ENOMEM;
-			break;
-		}
+	bio_chain(bio, parent_bio);
 
-		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
+	bio->bi_iter.bi_sector = sector;
+	bio->bi_bdev = bdev;
+	bio->bi_iter.bi_size = nr_sects << 9;
 
-		/*
-		 * If splitting a request, and the next starting sector would be
-		 * misaligned, stop the discard at the previous aligned sector.
-		 */
-		end_sect = sector + req_sects;
-		tmp = end_sect;
-		if (req_sects < nr_sects &&
-		    sector_div(tmp, granularity) != alignment) {
-			end_sect = end_sect - alignment;
-			sector_div(end_sect, granularity);
-			end_sect = end_sect * granularity + alignment;
-			req_sects = end_sect - sector;
-		}
+	submit_bio(type, bio);
 
-		bio_chain(bio, parent_bio);
-
-		bio->bi_iter.bi_sector = sector;
-		bio->bi_bdev = bdev;
-
-		bio->bi_iter.bi_size = req_sects << 9;
-		nr_sects -= req_sects;
-		sector = end_sect;
-
-		submit_bio(type, bio);
-
-		/*
-		 * We can loop for a long time in here, if someone does
-		 * full device discards (like mkfs). Be nice and allow
-		 * us to schedule out to avoid softlocking if preempt
-		 * is disabled.
-		 */
-		cond_resched();
-	}
-	blk_finish_plug(&plug);
-
-	return ret;
+	return 0;
 }
 
 static bool block_size_is_power_of_two(struct pool *pool)
@@ -1543,9 +1485,8 @@
 }
 
 /*
- * FIXME: DM local hack to defer parent bios's end_io until we
- * _know_ all chained sub range discard bios have completed.
- * Will go away once late bio splitting lands upstream!
+ * __bio_inc_remaining() is used to defer parent bios's end_io until
+ * we _know_ all chained sub range discard bios have completed.
  */
 static inline void __bio_inc_remaining(struct bio *bio)
 {
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index c137dcb..edc624b 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -26,8 +26,6 @@
 #define DM_VERITY_ENV_LENGTH		42
 #define DM_VERITY_ENV_VAR_NAME		"DM_VERITY_ERR_BLOCK_NR"
 
-#define DM_VERITY_IO_VEC_INLINE		16
-#define DM_VERITY_MEMPOOL_SIZE		4
 #define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
 
 #define DM_VERITY_MAX_LEVELS		63
@@ -76,8 +74,6 @@
 	enum verity_mode mode;	/* mode for handling verification errors */
 	unsigned corrupted_errs;/* Number of errors for corrupted blocks */
 
-	mempool_t *vec_mempool;	/* mempool of bio vector */
-
 	struct workqueue_struct *verify_wq;
 
 	/* starting blocks for each tree level. 0 is the lowest level. */
@@ -271,7 +267,7 @@
 	verity_hash_at_level(v, block, level, &hash_block, &offset);
 
 	data = dm_bufio_read(v->bufio, hash_block, &buf);
-	if (unlikely(IS_ERR(data)))
+	if (IS_ERR(data))
 		return PTR_ERR(data);
 
 	aux = dm_bufio_get_aux_data(buf);
@@ -677,9 +673,6 @@
 	if (v->verify_wq)
 		destroy_workqueue(v->verify_wq);
 
-	if (v->vec_mempool)
-		mempool_destroy(v->vec_mempool);
-
 	if (v->bufio)
 		dm_bufio_client_destroy(v->bufio);
 
@@ -948,14 +941,6 @@
 
 	ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
 
-	v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
-					BIO_MAX_PAGES * sizeof(struct bio_vec));
-	if (!v->vec_mempool) {
-		ti->error = "Cannot allocate vector mempool";
-		r = -ENOMEM;
-		goto bad;
-	}
-
 	/* WQ_UNBOUND greatly improves performance when running on ramdisk */
 	v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
 	if (!v->verify_wq) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6ffc01b..6264781 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1464,7 +1464,7 @@
 		md = tio->io->md;
 		dec_pending(tio->io, r);
 		free_tio(md, tio);
-	} else if (r) {
+	} else if (r != DM_MAPIO_SUBMITTED) {
 		DMWARN("unimplemented target map return value: %d", r);
 		BUG();
 	}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 4d6c9b6..88dbe7b 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -454,7 +454,7 @@
 	int r;
 
 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 
 	aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -490,7 +490,7 @@
 		return -EPERM;
 
 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 
 	aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -523,7 +523,7 @@
 	int r;
 
 	p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 	if (unlikely(!p))
 		return -EWOULDBLOCK;
@@ -559,7 +559,7 @@
 		return -EPERM;
 
 	p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 
 	memset(p, 0, dm_bm_block_size(bm));
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 4222f77..421a36c 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -409,29 +409,11 @@
 	return 0;
 }
 
-static int get_nr_entries(struct dm_transaction_manager *tm,
-			  dm_block_t b, uint32_t *result)
-{
-	int r;
-	struct dm_block *block;
-	struct btree_node *n;
-
-	r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
-	if (r)
-		return r;
-
-	n = dm_block_data(block);
-	*result = le32_to_cpu(n->header.nr_entries);
-
-	return dm_tm_unlock(tm, block);
-}
-
 static int rebalance_children(struct shadow_spine *s,
 			      struct dm_btree_info *info,
 			      struct dm_btree_value_type *vt, uint64_t key)
 {
 	int i, r, has_left_sibling, has_right_sibling;
-	uint32_t child_entries;
 	struct btree_node *n;
 
 	n = dm_block_data(shadow_current(s));
@@ -458,10 +440,6 @@
 	if (i < 0)
 		return -ENODATA;
 
-	r = get_nr_entries(info->tm, value64(n, i), &child_entries);
-	if (r)
-		return r;
-
 	has_left_sibling = i > 0;
 	has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
 
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index c7726ce..b6cec25 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -420,8 +420,8 @@
  *
  * Where A* is a shadow of A.
  */
-static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
-			       unsigned parent_index, uint64_t key)
+static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
+			       uint64_t key)
 {
 	int r;
 	size_t size;
@@ -625,7 +625,7 @@
 			if (top)
 				r = btree_split_beneath(s, key);
 			else
-				r = btree_split_sibling(s, root, i, key);
+				r = btree_split_sibling(s, i, key);
 
 			if (r < 0)
 				return r;
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index c70abab..5dd3977 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -465,7 +465,7 @@
 
 	of_node_put(child);
 
-	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
 	if (!pd->enable_gpio) {
 		dev_err(&client->dev, "Error getting GPIO\n");
 		return -EINVAL;
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 58f6548..284ac4c 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2157,7 +2157,7 @@
 	/* Get IRAM pool from device tree or platform data */
 	pool = of_gen_pool_get(np, "iram", 0);
 	if (!pool && pdata)
-		pool = gen_pool_get(pdata->iram_dev);
+		pool = gen_pool_get(pdata->iram_dev, NULL);
 	if (!pool) {
 		dev_err(&pdev->dev, "iram pool not available\n");
 		return -ENOMEM;
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 410c397..e87459f 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -62,7 +62,7 @@
 		return -ENODEV;
 
 	for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
-		u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+		u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
 		if (cspr & CSPR_V && (cspr & CSPR_BA) ==
 				convert_ifc_address(addr_base))
 			return i;
@@ -79,16 +79,16 @@
 	/*
 	 * Clear all the common status and event registers
 	 */
-	if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
-		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+	if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
 	/* enable all error and events */
-	out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+	ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en);
 
 	/* enable all error and event interrupts */
-	out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
-	out_be32(&ifc->cm_erattr0, 0x0);
-	out_be32(&ifc->cm_erattr1, 0x0);
+	ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en);
+	ifc_out32(0x0, &ifc->cm_erattr0);
+	ifc_out32(0x0, &ifc->cm_erattr1);
 
 	return 0;
 }
@@ -127,9 +127,9 @@
 
 	spin_lock_irqsave(&nand_irq_lock, flags);
 
-	stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+	stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
 	if (stat) {
-		out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+		ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat);
 		ctrl->nand_stat = stat;
 		wake_up(&ctrl->nand_wait);
 	}
@@ -161,16 +161,16 @@
 	irqreturn_t ret = IRQ_NONE;
 
 	/* read for chip select error */
-	cs_err = in_be32(&ifc->cm_evter_stat);
+	cs_err = ifc_in32(&ifc->cm_evter_stat);
 	if (cs_err) {
 		dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
 				"any memory bank 0x%08X\n", cs_err);
 		/* clear the chip select error */
-		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
 		/* read error attribute registers print the error information */
-		status = in_be32(&ifc->cm_erattr0);
-		err_addr = in_be32(&ifc->cm_erattr1);
+		status = ifc_in32(&ifc->cm_erattr0);
+		err_addr = ifc_in32(&ifc->cm_erattr1);
 
 		if (status & IFC_CM_ERATTR0_ERTYP_READ)
 			dev_err(ctrl->dev, "Read transaction error"
@@ -231,6 +231,23 @@
 		goto err;
 	}
 
+	version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+			FSL_IFC_VERSION_MASK;
+	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+	dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+		version >> 24, (version >> 16) & 0xf, banks);
+
+	fsl_ifc_ctrl_dev->version = version;
+	fsl_ifc_ctrl_dev->banks = banks;
+
+	if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
+		fsl_ifc_ctrl_dev->little_endian = true;
+		dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
+	} else {
+		fsl_ifc_ctrl_dev->little_endian = false;
+		dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
+	}
+
 	version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
 			FSL_IFC_VERSION_MASK;
 	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 841717a..f2d9fb4 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -609,7 +609,6 @@
 static struct i2c_driver pm800_driver = {
 	.driver = {
 		.name = "88PM800",
-		.owner = THIS_MODULE,
 		.pm = &pm80x_pm_ops,
 		},
 	.probe = pm800_probe,
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index e9d5064..39f2302 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -267,7 +267,6 @@
 static struct i2c_driver pm805_driver = {
 	.driver = {
 		.name = "88PM805",
-		.owner = THIS_MODULE,
 		.pm = &pm80x_pm_ops,
 		},
 	.probe = pm805_probe,
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index e03b7f4..3269a99 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -558,11 +558,7 @@
 	irq_set_chip_data(virq, d->host_data);
 	irq_set_chip_and_handler(virq, &pm860x_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 	return 0;
 }
 
@@ -1258,7 +1254,6 @@
 static struct i2c_driver pm860x_driver = {
 	.driver	= {
 		.name	= "88PM860x",
-		.owner	= THIS_MODULE,
 		.pm     = &pm860x_pm_ops,
 		.of_match_table	= pm860x_dt_ids,
 	},
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 076f593..99d6367 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -97,6 +97,7 @@
 	select MFD_CORE
 	select CHROME_PLATFORMS
 	select CROS_EC_PROTO
+	depends on X86 || ARM || COMPILE_TEST
 	help
 	  If you say Y here you get support for the ChromeOS Embedded
 	  Controller (EC) providing keyboard, battery and power services.
@@ -105,7 +106,7 @@
 
 config MFD_CROS_EC_I2C
 	tristate "ChromeOS Embedded Controller (I2C)"
-	depends on MFD_CROS_EC && CROS_EC_PROTO && I2C
+	depends on MFD_CROS_EC && I2C
 
 	help
 	  If you say Y here, you get support for talking to the ChromeOS
@@ -115,7 +116,7 @@
 
 config MFD_CROS_EC_SPI
 	tristate "ChromeOS Embedded Controller (SPI)"
-	depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
+	depends on MFD_CROS_EC && SPI
 
 	---help---
 	  If you say Y here, you get support for talking to the ChromeOS EC
@@ -186,6 +187,18 @@
 	  This driver can be built as a module. If built as a module it will be
 	  called "da9055"
 
+config MFD_DA9062
+	tristate "Dialog Semiconductor DA9062 PMIC Support"
+	select MFD_CORE
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	depends on I2C=y
+	help
+	  Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
+	  This includes the I2C driver and core APIs.
+	  Additional drivers must be enabled in order to use the functionality
+	  of the device.
+
 config MFD_DA9063
 	bool "Dialog Semiconductor DA9063 PMIC Support"
 	select MFD_CORE
@@ -318,6 +331,7 @@
 
 config INTEL_SOC_PMIC
 	bool "Support for Intel Atom SoC PMIC"
+	depends on GPIOLIB
 	depends on I2C=y
 	select MFD_CORE
 	select REGMAP_I2C
@@ -398,12 +412,14 @@
 	  device may provide functions like watchdog, GPIO, UART and I2C bus.
 
 	  The following modules are supported:
+		* COMe-bBL6
 		* COMe-bHL6
 		* COMe-bIP#
 		* COMe-bPC2 (ETXexpress-PC)
 		* COMe-bSC# (ETXexpress-SC T#)
 		* COMe-cBL6
 		* COMe-cBT6
+		* COMe-cBW6
 		* COMe-cCT6
 		* COMe-cDC2 (microETXexpress-DC)
 		* COMe-cHL6
@@ -1379,6 +1395,12 @@
 	help
 	  Support for Wolfson Microelectronics WM8997 low power audio SoC
 
+config MFD_WM8998
+	bool "Wolfson Microelectronics WM8998"
+	depends on MFD_ARIZONA
+	help
+	  Support for Wolfson Microelectronics WM8998 low power audio SoC
+
 config MFD_WM8400
 	bool "Wolfson Microelectronics WM8400"
 	select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 9d730a2..a59e3fc 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -48,6 +48,9 @@
 ifeq ($(CONFIG_MFD_WM8997),y)
 obj-$(CONFIG_MFD_ARIZONA)	+= wm8997-tables.o
 endif
+ifeq ($(CONFIG_MFD_WM8998),y)
+obj-$(CONFIG_MFD_ARIZONA)	+= wm8998-tables.o
+endif
 obj-$(CONFIG_MFD_WM8400)	+= wm8400-core.o
 wm831x-objs			:= wm831x-core.o wm831x-irq.o wm831x-otp.o
 wm831x-objs			+= wm831x-auxadc.o
@@ -110,10 +113,11 @@
 
 da9055-objs			:= da9055-core.o da9055-i2c.o
 obj-$(CONFIG_MFD_DA9055)	+= da9055.o
-
+obj-$(CONFIG_MFD_DA9062)	+= da9062-core.o
 da9063-objs			:= da9063-core.o da9063-irq.o da9063-i2c.o
 obj-$(CONFIG_MFD_DA9063)	+= da9063.o
 obj-$(CONFIG_MFD_DA9150)	+= da9150-core.o
+
 obj-$(CONFIG_MFD_MAX14577)	+= max14577.o
 obj-$(CONFIG_MFD_MAX77686)	+= max77686.o
 obj-$(CONFIG_MFD_MAX77693)	+= max77693.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 4e6e03d..29b6a2d 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -500,7 +500,6 @@
 static struct i2c_driver aat2870_i2c_driver = {
 	.driver = {
 		.name	= "aat2870",
-		.owner	= THIS_MODULE,
 		.pm	= &aat2870_pm_ops,
 	},
 	.probe		= aat2870_i2c_probe,
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 4659ac1..f0afb44 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -972,7 +972,6 @@
 static struct i2c_driver ab3100_driver = {
 	.driver = {
 		.name	= "ab3100",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= ab3100_id,
 	.probe		= ab3100_probe,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 000da72..fefbe4c 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -565,11 +565,7 @@
 	irq_set_chip_and_handler(virq, &ab8500_irq_chip,
 				handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f495b8b..ae88654 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -351,7 +351,6 @@
 static struct i2c_driver adp5520_driver = {
 	.driver = {
 		.name	= "adp5520",
-		.owner	= THIS_MODULE,
 		.pm	= &adp5520_pm,
 	},
 	.probe		= adp5520_probe,
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index a72ddb29..44cfdbb 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -30,7 +30,7 @@
 
 #include "arizona.h"
 
-static const char *wm5102_core_supplies[] = {
+static const char * const wm5102_core_supplies[] = {
 	"AVDD",
 	"DBVDD1",
 };
@@ -146,17 +146,31 @@
 static irqreturn_t arizona_overclocked(int irq, void *data)
 {
 	struct arizona *arizona = data;
-	unsigned int val[2];
+	unsigned int val[3];
 	int ret;
-	
+
 	ret = regmap_bulk_read(arizona->regmap, ARIZONA_INTERRUPT_RAW_STATUS_6,
-			       &val[0], 2);
+			       &val[0], 3);
 	if (ret != 0) {
 		dev_err(arizona->dev, "Failed to read overclock status: %d\n",
 			ret);
 		return IRQ_NONE;
 	}
 
+	switch (arizona->type) {
+	case WM8998:
+	case WM1814:
+		/* Some bits are shifted on WM8998,
+		 * rearrange to match the standard bit layout
+		 */
+		val[0] = ((val[0] & 0x60e0) >> 1) |
+			 ((val[0] & 0x1e00) >> 2) |
+			 (val[0] & 0x000f);
+		break;
+	default:
+		break;
+	}
+
 	if (val[0] & ARIZONA_PWM_OVERCLOCKED_STS)
 		dev_err(arizona->dev, "PWM overclocked\n");
 	if (val[0] & ARIZONA_FX_CORE_OVERCLOCKED_STS)
@@ -201,6 +215,9 @@
 	if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS)
 		dev_err(arizona->dev, "ISRC1 overclocked\n");
 
+	if (val[2] & ARIZONA_SPDIF_OVERCLOCKED_STS)
+		dev_err(arizona->dev, "SPDIF overclocked\n");
+
 	return IRQ_HANDLED;
 }
 
@@ -392,7 +409,7 @@
  * Register patch to some of the CODECs internal write sequences
  * to ensure a clean exit from the low power sleep state.
  */
-static const struct reg_default wm5110_sleep_patch[] = {
+static const struct reg_sequence wm5110_sleep_patch[] = {
 	{ 0x337A, 0xC100 },
 	{ 0x337B, 0x0041 },
 	{ 0x3300, 0xA210 },
@@ -550,9 +567,8 @@
 		break;
 	default:
 		ret = arizona_wait_for_boot(arizona);
-		if (ret != 0) {
+		if (ret != 0)
 			goto err;
-		}
 
 		if (arizona->external_dcvdd) {
 			ret = regmap_update_bits(arizona->regmap,
@@ -759,8 +775,8 @@
 
 	ret = of_property_read_u32_array(arizona->dev->of_node,
 					 "wlf,gpio-defaults",
-					 arizona->pdata.gpio_defaults,
-					 ARRAY_SIZE(arizona->pdata.gpio_defaults));
+					 pdata->gpio_defaults,
+					 ARRAY_SIZE(pdata->gpio_defaults));
 	if (ret >= 0) {
 		/*
 		 * All values are literal except out of range values
@@ -768,11 +784,11 @@
 		 * data which uses 0 as chip default and out of range
 		 * as zero.
 		 */
-		for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
-			if (arizona->pdata.gpio_defaults[i] > 0xffff)
-				arizona->pdata.gpio_defaults[i] = 0;
-			else if (arizona->pdata.gpio_defaults[i] == 0)
-				arizona->pdata.gpio_defaults[i] = 0x10000;
+		for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+			if (pdata->gpio_defaults[i] > 0xffff)
+				pdata->gpio_defaults[i] = 0;
+			else if (pdata->gpio_defaults[i] == 0)
+				pdata->gpio_defaults[i] = 0x10000;
 		}
 	} else {
 		dev_err(arizona->dev, "Failed to parse GPIO defaults: %d\n",
@@ -781,20 +797,20 @@
 
 	of_property_for_each_u32(arizona->dev->of_node, "wlf,inmode", prop,
 				 cur, val) {
-		if (count == ARRAY_SIZE(arizona->pdata.inmode))
+		if (count == ARRAY_SIZE(pdata->inmode))
 			break;
 
-		arizona->pdata.inmode[count] = val;
+		pdata->inmode[count] = val;
 		count++;
 	}
 
 	count = 0;
 	of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
 				 cur, val) {
-		if (count == ARRAY_SIZE(arizona->pdata.dmic_ref))
+		if (count == ARRAY_SIZE(pdata->dmic_ref))
 			break;
 
-		arizona->pdata.dmic_ref[count] = val;
+		pdata->dmic_ref[count] = val;
 		count++;
 	}
 
@@ -806,6 +822,8 @@
 	{ .compatible = "wlf,wm5110", .data = (void *)WM5110 },
 	{ .compatible = "wlf,wm8280", .data = (void *)WM8280 },
 	{ .compatible = "wlf,wm8997", .data = (void *)WM8997 },
+	{ .compatible = "wlf,wm8998", .data = (void *)WM8998 },
+	{ .compatible = "wlf,wm1814", .data = (void *)WM1814 },
 	{},
 };
 EXPORT_SYMBOL_GPL(arizona_of_match);
@@ -820,7 +838,7 @@
 	{ .name = "arizona-ldo1" },
 };
 
-static const char *wm5102_supplies[] = {
+static const char * const wm5102_supplies[] = {
 	"MICVDD",
 	"DBVDD2",
 	"DBVDD3",
@@ -863,7 +881,7 @@
 	},
 };
 
-static const char *wm8997_supplies[] = {
+static const char * const wm8997_supplies[] = {
 	"MICVDD",
 	"DBVDD2",
 	"CPVDD",
@@ -887,11 +905,28 @@
 	},
 };
 
+static const struct mfd_cell wm8998_devs[] = {
+	{
+		.name = "arizona-extcon",
+		.parent_supplies = wm5102_supplies,
+		.num_parent_supplies = 1, /* We only need MICVDD */
+	},
+	{ .name = "arizona-gpio" },
+	{ .name = "arizona-haptics" },
+	{ .name = "arizona-pwm" },
+	{
+		.name = "wm8998-codec",
+		.parent_supplies = wm5102_supplies,
+		.num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+	},
+	{ .name = "arizona-micsupp" },
+};
+
 int arizona_dev_init(struct arizona *arizona)
 {
 	struct device *dev = arizona->dev;
 	const char *type_name;
-	unsigned int reg, val;
+	unsigned int reg, val, mask;
 	int (*apply_patch)(struct arizona *) = NULL;
 	int ret, i;
 
@@ -911,6 +946,8 @@
 	case WM5110:
 	case WM8280:
 	case WM8997:
+	case WM8998:
+	case WM1814:
 		for (i = 0; i < ARRAY_SIZE(wm5102_core_supplies); i++)
 			arizona->core_supplies[i].supply
 				= wm5102_core_supplies[i];
@@ -992,6 +1029,7 @@
 	switch (reg) {
 	case 0x5102:
 	case 0x5110:
+	case 0x6349:
 	case 0x8997:
 		break;
 	default:
@@ -1093,6 +1131,27 @@
 		apply_patch = wm8997_patch;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case 0x6349:
+		switch (arizona->type) {
+		case WM8998:
+			type_name = "WM8998";
+			break;
+
+		case WM1814:
+			type_name = "WM1814";
+			break;
+
+		default:
+			type_name = "WM8998";
+			dev_err(arizona->dev, "WM8998 registered as %d\n",
+				arizona->type);
+			arizona->type = WM8998;
+		}
+
+		apply_patch = wm8998_patch;
+		break;
+#endif
 	default:
 		dev_err(arizona->dev, "Unknown device ID %x\n", reg);
 		goto err_reset;
@@ -1204,14 +1263,38 @@
 			<< ARIZONA_IN1_DMIC_SUP_SHIFT;
 		if (arizona->pdata.inmode[i] & ARIZONA_INMODE_DMIC)
 			val |= 1 << ARIZONA_IN1_MODE_SHIFT;
-		if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
-			val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+		switch (arizona->type) {
+		case WM8998:
+		case WM1814:
+			regmap_update_bits(arizona->regmap,
+				ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
+				ARIZONA_IN1L_SRC_SE_MASK,
+				(arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+					<< ARIZONA_IN1L_SRC_SE_SHIFT);
+
+			regmap_update_bits(arizona->regmap,
+				ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
+				ARIZONA_IN1R_SRC_SE_MASK,
+				(arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+					<< ARIZONA_IN1R_SRC_SE_SHIFT);
+
+			mask = ARIZONA_IN1_DMIC_SUP_MASK |
+				ARIZONA_IN1_MODE_MASK;
+			break;
+		default:
+			if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+				val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+			mask = ARIZONA_IN1_DMIC_SUP_MASK |
+				ARIZONA_IN1_MODE_MASK |
+				ARIZONA_IN1_SINGLE_ENDED_MASK;
+			break;
+		}
 
 		regmap_update_bits(arizona->regmap,
 				   ARIZONA_IN1L_CONTROL + (i * 8),
-				   ARIZONA_IN1_DMIC_SUP_MASK |
-				   ARIZONA_IN1_MODE_MASK |
-				   ARIZONA_IN1_SINGLE_ENDED_MASK, val);
+				   mask, val);
 	}
 
 	for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
@@ -1273,6 +1356,11 @@
 		ret = mfd_add_devices(arizona->dev, -1, wm8997_devs,
 				      ARRAY_SIZE(wm8997_devs), NULL, 0, NULL);
 		break;
+	case WM8998:
+	case WM1814:
+		ret = mfd_add_devices(arizona->dev, -1, wm8998_devs,
+				      ARRAY_SIZE(wm8998_devs), NULL, 0, NULL);
+		break;
 	}
 
 	if (ret != 0) {
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index ff782a5..cea1b40 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -53,6 +53,12 @@
 		regmap_config = &wm8997_i2c_regmap;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case WM8998:
+	case WM1814:
+		regmap_config = &wm8998_i2c_regmap;
+		break;
+#endif
 	default:
 		dev_err(&i2c->dev, "Unknown device type %ld\n",
 			id->driver_data);
@@ -90,6 +96,8 @@
 	{ "wm5110", WM5110 },
 	{ "wm8280", WM8280 },
 	{ "wm8997", WM8997 },
+	{ "wm8998", WM8998 },
+	{ "wm1814", WM1814 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, arizona_i2c_id);
@@ -97,7 +105,6 @@
 static struct i2c_driver arizona_i2c_driver = {
 	.driver = {
 		.name	= "arizona",
-		.owner	= THIS_MODULE,
 		.pm	= &arizona_pm_ops,
 		.of_match_table	= of_match_ptr(arizona_of_match),
 	},
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 2b9965d5..2cac4f4 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -174,14 +174,7 @@
 	irq_set_chip_data(virq, data);
 	irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -234,6 +227,15 @@
 		arizona->ctrlif_error = false;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case WM8998:
+	case WM1814:
+		aod = &wm8998_aod;
+		irq = &wm8998_irq;
+
+		arizona->ctrlif_error = false;
+		break;
+#endif
 	default:
 		BUG_ON("Unknown Arizona class device" == NULL);
 		return -EINVAL;
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
index fbe2843..3af12e9 100644
--- a/drivers/mfd/arizona.h
+++ b/drivers/mfd/arizona.h
@@ -27,6 +27,8 @@
 
 extern const struct regmap_config wm8997_i2c_regmap;
 
+extern const struct regmap_config wm8998_i2c_regmap;
+
 extern const struct dev_pm_ops arizona_pm_ops;
 
 extern const struct of_device_id arizona_of_match[];
@@ -41,6 +43,9 @@
 extern const struct regmap_irq_chip wm8997_aod;
 extern const struct regmap_irq_chip wm8997_irq;
 
+extern struct regmap_irq_chip wm8998_aod;
+extern struct regmap_irq_chip wm8998_irq;
+
 int arizona_dev_init(struct arizona *arizona);
 int arizona_dev_exit(struct arizona *arizona);
 int arizona_irq_init(struct arizona *arizona);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index d9706ed..d001f7e2 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -211,7 +211,6 @@
 static struct i2c_driver as3711_i2c_driver = {
 	.driver = {
 		   .name = "as3711",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(as3711_of_match),
 	},
 	.probe = as3711_i2c_probe,
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 39fa554..924ea90 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -437,7 +437,6 @@
 static struct i2c_driver as3722_i2c_driver = {
 	.driver = {
 		.name = "as3722",
-		.owner = THIS_MODULE,
 		.of_match_table = as3722_of_match,
 	},
 	.probe = as3722_i2c_probe,
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 120df5c..4b54128 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -411,7 +411,7 @@
 
 		irq_set_chip_data(irq, asic);
 		irq_set_handler(irq, handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
@@ -431,7 +431,7 @@
 	irq_base = asic->irq_base;
 
 	for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
-		set_irq_flags(irq, 0);
+		irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		irq_set_chip_and_handler(irq, NULL, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index cfd58f4..3fff6b5 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/atmel-hlcdc.h>
 #include <linux/mfd/core.h>
 #include <linux/module.h>
@@ -26,6 +27,10 @@
 
 #define ATMEL_HLCDC_REG_MAX		(0x4000 - 0x4)
 
+struct atmel_hlcdc_regmap {
+	void __iomem *regs;
+};
+
 static const struct mfd_cell atmel_hlcdc_cells[] = {
 	{
 		.name = "atmel-hlcdc-pwm",
@@ -37,28 +42,62 @@
 	},
 };
 
+static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	struct atmel_hlcdc_regmap *hregmap = context;
+
+	if (reg <= ATMEL_HLCDC_DIS) {
+		u32 status;
+
+		readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status,
+				   !(status & ATMEL_HLCDC_SIP), 1, 100);
+	}
+
+	writel(val, hregmap->regs + reg);
+
+	return 0;
+}
+
+static int regmap_atmel_hlcdc_reg_read(void *context, unsigned int reg,
+				       unsigned int *val)
+{
+	struct atmel_hlcdc_regmap *hregmap = context;
+
+	*val = readl(hregmap->regs + reg);
+
+	return 0;
+}
+
 static const struct regmap_config atmel_hlcdc_regmap_config = {
 	.reg_bits = 32,
 	.val_bits = 32,
 	.reg_stride = 4,
 	.max_register = ATMEL_HLCDC_REG_MAX,
+	.reg_write = regmap_atmel_hlcdc_reg_write,
+	.reg_read = regmap_atmel_hlcdc_reg_read,
+	.fast_io = true,
 };
 
 static int atmel_hlcdc_probe(struct platform_device *pdev)
 {
+	struct atmel_hlcdc_regmap *hregmap;
 	struct device *dev = &pdev->dev;
 	struct atmel_hlcdc *hlcdc;
 	struct resource *res;
-	void __iomem *regs;
+
+	hregmap = devm_kzalloc(dev, sizeof(*hregmap), GFP_KERNEL);
+	if (!hregmap)
+		return -ENOMEM;
 
 	hlcdc = devm_kzalloc(dev, sizeof(*hlcdc), GFP_KERNEL);
 	if (!hlcdc)
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs))
-		return PTR_ERR(regs);
+	hregmap->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hregmap->regs))
+		return PTR_ERR(hregmap->regs);
 
 	hlcdc->irq = platform_get_irq(pdev, 0);
 	if (hlcdc->irq < 0)
@@ -82,8 +121,8 @@
 		return PTR_ERR(hlcdc->slow_clk);
 	}
 
-	hlcdc->regmap = devm_regmap_init_mmio(dev, regs,
-					      &atmel_hlcdc_regmap_config);
+	hlcdc->regmap = devm_regmap_init(dev, NULL, hregmap,
+					 &atmel_hlcdc_regmap_config);
 	if (IS_ERR(hlcdc->regmap))
 		return PTR_ERR(hlcdc->regmap);
 
@@ -102,7 +141,11 @@
 }
 
 static const struct of_device_id atmel_hlcdc_match[] = {
+	{ .compatible = "atmel,at91sam9n12-hlcdc" },
+	{ .compatible = "atmel,at91sam9x5-hlcdc" },
+	{ .compatible = "atmel,sama5d2-hlcdc" },
 	{ .compatible = "atmel,sama5d3-hlcdc" },
+	{ .compatible = "atmel,sama5d4-hlcdc" },
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 6df9155..3f576b7 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -30,19 +30,47 @@
 #define AXP20X_OFF	0x80
 
 static const char * const axp20x_model_names[] = {
+	"AXP152",
 	"AXP202",
 	"AXP209",
 	"AXP221",
 	"AXP288",
 };
 
+static const struct regmap_range axp152_writeable_ranges[] = {
+	regmap_reg_range(AXP152_LDO3456_DC1234_CTRL, AXP152_IRQ3_STATE),
+	regmap_reg_range(AXP152_DCDC_MODE, AXP152_PWM1_DUTY_CYCLE),
+};
+
+static const struct regmap_range axp152_volatile_ranges[] = {
+	regmap_reg_range(AXP152_PWR_OP_MODE, AXP152_PWR_OP_MODE),
+	regmap_reg_range(AXP152_IRQ1_EN, AXP152_IRQ3_STATE),
+	regmap_reg_range(AXP152_GPIO_INPUT, AXP152_GPIO_INPUT),
+};
+
+static const struct regmap_access_table axp152_writeable_table = {
+	.yes_ranges	= axp152_writeable_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(axp152_writeable_ranges),
+};
+
+static const struct regmap_access_table axp152_volatile_table = {
+	.yes_ranges	= axp152_volatile_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(axp152_volatile_ranges),
+};
+
 static const struct regmap_range axp20x_writeable_ranges[] = {
 	regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
 	regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES),
+	regmap_reg_range(AXP20X_RDC_H, AXP20X_OCV(AXP20X_OCV_MAX)),
 };
 
 static const struct regmap_range axp20x_volatile_ranges[] = {
+	regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_USB_OTG_STATUS),
+	regmap_reg_range(AXP20X_CHRG_CTRL1, AXP20X_CHRG_CTRL2),
 	regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
+	regmap_reg_range(AXP20X_ACIN_V_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
+	regmap_reg_range(AXP20X_GPIO20_SS, AXP20X_GPIO3_CTRL),
+	regmap_reg_range(AXP20X_FG_RES, AXP20X_RDC_L),
 };
 
 static const struct regmap_access_table axp20x_writeable_table = {
@@ -93,6 +121,11 @@
 	.n_yes_ranges	= ARRAY_SIZE(axp288_volatile_ranges),
 };
 
+static struct resource axp152_pek_resources[] = {
+	DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
+	DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
+};
+
 static struct resource axp20x_pek_resources[] = {
 	{
 		.name	= "PEK_DBR",
@@ -107,6 +140,13 @@
 	},
 };
 
+static struct resource axp20x_usb_power_supply_resources[] = {
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_VALID, "VBUS_VALID"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
+};
+
 static struct resource axp22x_pek_resources[] = {
 	{
 		.name   = "PEK_DBR",
@@ -154,12 +194,21 @@
 	},
 };
 
+static const struct regmap_config axp152_regmap_config = {
+	.reg_bits	= 8,
+	.val_bits	= 8,
+	.wr_table	= &axp152_writeable_table,
+	.volatile_table	= &axp152_volatile_table,
+	.max_register	= AXP152_PWM1_DUTY_CYCLE,
+	.cache_type	= REGCACHE_RBTREE,
+};
+
 static const struct regmap_config axp20x_regmap_config = {
 	.reg_bits	= 8,
 	.val_bits	= 8,
 	.wr_table	= &axp20x_writeable_table,
 	.volatile_table	= &axp20x_volatile_table,
-	.max_register	= AXP20X_FG_RES,
+	.max_register	= AXP20X_OCV(AXP20X_OCV_MAX),
 	.cache_type	= REGCACHE_RBTREE,
 };
 
@@ -184,6 +233,26 @@
 #define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask)			\
 	[_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) }
 
+static const struct regmap_irq axp152_regmap_irqs[] = {
+	INIT_REGMAP_IRQ(AXP152, LDO0IN_CONNECT,		0, 6),
+	INIT_REGMAP_IRQ(AXP152, LDO0IN_REMOVAL,		0, 5),
+	INIT_REGMAP_IRQ(AXP152, ALDO0IN_CONNECT,	0, 3),
+	INIT_REGMAP_IRQ(AXP152, ALDO0IN_REMOVAL,	0, 2),
+	INIT_REGMAP_IRQ(AXP152, DCDC1_V_LOW,		1, 5),
+	INIT_REGMAP_IRQ(AXP152, DCDC2_V_LOW,		1, 4),
+	INIT_REGMAP_IRQ(AXP152, DCDC3_V_LOW,		1, 3),
+	INIT_REGMAP_IRQ(AXP152, DCDC4_V_LOW,		1, 2),
+	INIT_REGMAP_IRQ(AXP152, PEK_SHORT,		1, 1),
+	INIT_REGMAP_IRQ(AXP152, PEK_LONG,		1, 0),
+	INIT_REGMAP_IRQ(AXP152, TIMER,			2, 7),
+	INIT_REGMAP_IRQ(AXP152, PEK_RIS_EDGE,		2, 6),
+	INIT_REGMAP_IRQ(AXP152, PEK_FAL_EDGE,		2, 5),
+	INIT_REGMAP_IRQ(AXP152, GPIO3_INPUT,		2, 3),
+	INIT_REGMAP_IRQ(AXP152, GPIO2_INPUT,		2, 2),
+	INIT_REGMAP_IRQ(AXP152, GPIO1_INPUT,		2, 1),
+	INIT_REGMAP_IRQ(AXP152, GPIO0_INPUT,		2, 0),
+};
+
 static const struct regmap_irq axp20x_regmap_irqs[] = {
 	INIT_REGMAP_IRQ(AXP20X, ACIN_OVER_V,		0, 7),
 	INIT_REGMAP_IRQ(AXP20X, ACIN_PLUGIN,		0, 6),
@@ -293,6 +362,7 @@
 };
 
 static const struct of_device_id axp20x_of_match[] = {
+	{ .compatible = "x-powers,axp152", .data = (void *) AXP152_ID },
 	{ .compatible = "x-powers,axp202", .data = (void *) AXP202_ID },
 	{ .compatible = "x-powers,axp209", .data = (void *) AXP209_ID },
 	{ .compatible = "x-powers,axp221", .data = (void *) AXP221_ID },
@@ -317,6 +387,18 @@
 };
 MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
 
+static const struct regmap_irq_chip axp152_regmap_irq_chip = {
+	.name			= "axp152_irq_chip",
+	.status_base		= AXP152_IRQ1_STATE,
+	.ack_base		= AXP152_IRQ1_STATE,
+	.mask_base		= AXP152_IRQ1_EN,
+	.mask_invert		= true,
+	.init_ack_masked	= true,
+	.irqs			= axp152_regmap_irqs,
+	.num_irqs		= ARRAY_SIZE(axp152_regmap_irqs),
+	.num_regs		= 3,
+};
+
 static const struct regmap_irq_chip axp20x_regmap_irq_chip = {
 	.name			= "axp20x_irq_chip",
 	.status_base		= AXP20X_IRQ1_STATE,
@@ -357,11 +439,16 @@
 
 static struct mfd_cell axp20x_cells[] = {
 	{
-		.name			= "axp20x-pek",
-		.num_resources		= ARRAY_SIZE(axp20x_pek_resources),
-		.resources		= axp20x_pek_resources,
+		.name		= "axp20x-pek",
+		.num_resources	= ARRAY_SIZE(axp20x_pek_resources),
+		.resources	= axp20x_pek_resources,
 	}, {
-		.name			= "axp20x-regulator",
+		.name		= "axp20x-regulator",
+	}, {
+		.name		= "axp20x-usb-power-supply",
+		.of_compatible	= "x-powers,axp202-usb-power-supply",
+		.num_resources	= ARRAY_SIZE(axp20x_usb_power_supply_resources),
+		.resources	= axp20x_usb_power_supply_resources,
 	},
 };
 
@@ -375,6 +462,14 @@
 	},
 };
 
+static struct mfd_cell axp152_cells[] = {
+	{
+		.name			= "axp20x-pek",
+		.num_resources		= ARRAY_SIZE(axp152_pek_resources),
+		.resources		= axp152_pek_resources,
+	},
+};
+
 static struct resource axp288_adc_resources[] = {
 	{
 		.name  = "GPADC",
@@ -513,6 +608,12 @@
 	}
 
 	switch (axp20x->variant) {
+	case AXP152_ID:
+		axp20x->nr_cells = ARRAY_SIZE(axp152_cells);
+		axp20x->cells = axp152_cells;
+		axp20x->regmap_cfg = &axp152_regmap_config;
+		axp20x->regmap_irq_chip = &axp152_regmap_irq_chip;
+		break;
 	case AXP202_ID:
 	case AXP209_ID:
 		axp20x->nr_cells = ARRAY_SIZE(axp20x_cells);
@@ -613,7 +714,6 @@
 static struct i2c_driver axp20x_i2c_driver = {
 	.driver = {
 		.name	= "axp20x",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(axp20x_of_match),
 		.acpi_match_table = ACPI_PTR(axp20x_acpi_match),
 	},
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index e334de0..da2af5b 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -117,7 +117,6 @@
 static struct i2c_driver bcm590xx_i2c_driver = {
 	.driver = {
 		   .name = "bcm590xx",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(bcm590xx_of_match),
 	},
 	.probe = bcm590xx_i2c_probe,
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
index b9a0963..d06e4b4 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -353,7 +353,6 @@
 static struct i2c_driver cros_ec_driver = {
 	.driver	= {
 		.name	= "cros-ec-i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &cros_ec_i2c_pm_ops,
 	},
 	.probe		= cros_ec_i2c_probe,
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 16f228d..30a296b 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -701,6 +701,12 @@
 static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
 			 cros_ec_spi_resume);
 
+static const struct of_device_id cros_ec_spi_of_match[] = {
+	{ .compatible = "google,cros-ec-spi", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cros_ec_spi_of_match);
+
 static const struct spi_device_id cros_ec_spi_id[] = {
 	{ "cros-ec-spi", 0 },
 	{ }
@@ -710,6 +716,7 @@
 static struct spi_driver cros_ec_driver_spi = {
 	.driver	= {
 		.name	= "cros-ec-spi",
+		.of_match_table = of_match_ptr(cros_ec_spi_of_match),
 		.owner	= THIS_MODULE,
 		.pm	= &cros_ec_spi_pm_ops,
 	},
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index e0a2e0e..ef7fe2a 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -550,7 +550,6 @@
 static struct i2c_driver da903x_driver = {
 	.driver	= {
 		.name	= "da903x",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= da903x_probe,
 	.remove		= da903x_remove,
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ec39287..0288700 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -195,7 +195,6 @@
 	.id_table = da9052_i2c_id,
 	.driver = {
 		.name = "da9052",
-		.owner = THIS_MODULE,
 #ifdef CONFIG_OF
 		.of_match_table = dialog_dt_ids,
 #endif
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index d4d4c16..b53e100 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -79,7 +79,6 @@
 	.id_table = da9055_i2c_id,
 	.driver = {
 		.name = "da9055-pmic",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9055_of_match),
 	},
 };
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
new file mode 100644
index 0000000..f80d947
--- /dev/null
+++ b/drivers/mfd/da9062-core.c
@@ -0,0 +1,533 @@
+/*
+ * Core, IRQ and I2C device driver for DA9062 PMIC
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/mfd/da9062/core.h>
+#include <linux/mfd/da9062/registers.h>
+#include <linux/regulator/of_regulator.h>
+
+#define	DA9062_REG_EVENT_A_OFFSET	0
+#define	DA9062_REG_EVENT_B_OFFSET	1
+#define	DA9062_REG_EVENT_C_OFFSET	2
+
+static struct regmap_irq da9062_irqs[] = {
+	/* EVENT A */
+	[DA9062_IRQ_ONKEY] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_NONKEY_MASK,
+	},
+	[DA9062_IRQ_ALARM] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_ALARM_MASK,
+	},
+	[DA9062_IRQ_TICK] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_TICK_MASK,
+	},
+	[DA9062_IRQ_WDG_WARN] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_WDG_WARN_MASK,
+	},
+	[DA9062_IRQ_SEQ_RDY] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_SEQ_RDY_MASK,
+	},
+	/* EVENT B */
+	[DA9062_IRQ_TEMP] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_TEMP_MASK,
+	},
+	[DA9062_IRQ_LDO_LIM] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_LDO_LIM_MASK,
+	},
+	[DA9062_IRQ_DVC_RDY] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_DVC_RDY_MASK,
+	},
+	[DA9062_IRQ_VDD_WARN] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_VDD_WARN_MASK,
+	},
+	/* EVENT C */
+	[DA9062_IRQ_GPI0] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI0_MASK,
+	},
+	[DA9062_IRQ_GPI1] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI1_MASK,
+	},
+	[DA9062_IRQ_GPI2] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI2_MASK,
+	},
+	[DA9062_IRQ_GPI3] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI3_MASK,
+	},
+	[DA9062_IRQ_GPI4] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI4_MASK,
+	},
+};
+
+static struct regmap_irq_chip da9062_irq_chip = {
+	.name = "da9062-irq",
+	.irqs = da9062_irqs,
+	.num_irqs = DA9062_NUM_IRQ,
+	.num_regs = 3,
+	.status_base = DA9062AA_EVENT_A,
+	.mask_base = DA9062AA_IRQ_MASK_A,
+	.ack_base = DA9062AA_EVENT_A,
+};
+
+static struct resource da9062_core_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_VDD_WARN, 1, "VDD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_regulators_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_LDO_LIM, 1, "LDO_LIM", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_thermal_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_TEMP, 1, "THERMAL", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_wdt_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_WDG_WARN, 1, "WD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_rtc_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_ALARM, 1, "ALARM", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_TICK, 1, "TICK", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_onkey_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
+};
+
+static const struct mfd_cell da9062_devs[] = {
+	{
+		.name		= "da9062-core",
+		.num_resources	= ARRAY_SIZE(da9062_core_resources),
+		.resources	= da9062_core_resources,
+	},
+	{
+		.name		= "da9062-regulators",
+		.num_resources	= ARRAY_SIZE(da9062_regulators_resources),
+		.resources	= da9062_regulators_resources,
+	},
+	{
+		.name		= "da9062-watchdog",
+		.num_resources	= ARRAY_SIZE(da9062_wdt_resources),
+		.resources	= da9062_wdt_resources,
+		.of_compatible  = "dlg,da9062-wdt",
+	},
+	{
+		.name		= "da9062-thermal",
+		.num_resources	= ARRAY_SIZE(da9062_thermal_resources),
+		.resources	= da9062_thermal_resources,
+		.of_compatible  = "dlg,da9062-thermal",
+	},
+	{
+		.name		= "da9062-rtc",
+		.num_resources	= ARRAY_SIZE(da9062_rtc_resources),
+		.resources	= da9062_rtc_resources,
+		.of_compatible  = "dlg,da9062-rtc",
+	},
+	{
+		.name		= "da9062-onkey",
+		.num_resources	= ARRAY_SIZE(da9062_onkey_resources),
+		.resources	= da9062_onkey_resources,
+		.of_compatible = "dlg,da9062-onkey",
+	},
+};
+
+static int da9062_clear_fault_log(struct da9062 *chip)
+{
+	int ret;
+	int fault_log;
+
+	ret = regmap_read(chip->regmap, DA9062AA_FAULT_LOG, &fault_log);
+	if (ret < 0)
+		return ret;
+
+	if (fault_log) {
+		if (fault_log & DA9062AA_TWD_ERROR_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: TWD_ERROR\n");
+		if (fault_log & DA9062AA_POR_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: POR\n");
+		if (fault_log & DA9062AA_VDD_FAULT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: VDD_FAULT\n");
+		if (fault_log & DA9062AA_VDD_START_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: VDD_START\n");
+		if (fault_log & DA9062AA_TEMP_CRIT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: TEMP_CRIT\n");
+		if (fault_log & DA9062AA_KEY_RESET_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: KEY_RESET\n");
+		if (fault_log & DA9062AA_NSHUTDOWN_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: NSHUTDOWN\n");
+		if (fault_log & DA9062AA_WAIT_SHUT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: WAIT_SHUT\n");
+
+		ret = regmap_write(chip->regmap, DA9062AA_FAULT_LOG,
+				   fault_log);
+	}
+
+	return ret;
+}
+
+int get_device_type(struct da9062 *chip)
+{
+	int device_id, variant_id, variant_mrc;
+	int ret;
+
+	ret = regmap_read(chip->regmap, DA9062AA_DEVICE_ID, &device_id);
+	if (ret < 0) {
+		dev_err(chip->dev, "Cannot read chip ID.\n");
+		return -EIO;
+	}
+	if (device_id != DA9062_PMIC_DEVICE_ID) {
+		dev_err(chip->dev, "Invalid device ID: 0x%02x\n", device_id);
+		return -ENODEV;
+	}
+
+	ret = regmap_read(chip->regmap, DA9062AA_VARIANT_ID, &variant_id);
+	if (ret < 0) {
+		dev_err(chip->dev, "Cannot read chip variant id.\n");
+		return -EIO;
+	}
+
+	dev_info(chip->dev,
+		 "Device detected (device-ID: 0x%02X, var-ID: 0x%02X)\n",
+		 device_id, variant_id);
+
+	variant_mrc = (variant_id & DA9062AA_MRC_MASK) >> DA9062AA_MRC_SHIFT;
+
+	if (variant_mrc < DA9062_PMIC_VARIANT_MRC_AA) {
+		dev_err(chip->dev,
+			"Cannot support variant MRC: 0x%02X\n", variant_mrc);
+		return -ENODEV;
+	}
+
+	return ret;
+}
+
+static const struct regmap_range da9062_aa_readable_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_STATUS_B,
+	}, {
+		.range_min = DA9062AA_STATUS_D,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_IRQ_MASK_A,
+		.range_max = DA9062AA_IRQ_MASK_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_A,
+		.range_max = DA9062AA_GPIO_4,
+	}, {
+		.range_min = DA9062AA_GPIO_WKUP_MODE,
+		.range_max = DA9062AA_BUCK4_CONT,
+	}, {
+		.range_min = DA9062AA_BUCK3_CONT,
+		.range_max = DA9062AA_BUCK3_CONT,
+	}, {
+		.range_min = DA9062AA_LDO1_CONT,
+		.range_max = DA9062AA_LDO4_CONT,
+	}, {
+		.range_min = DA9062AA_DVC_1,
+		.range_max = DA9062AA_DVC_1,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_SECOND_D,
+	}, {
+		.range_min = DA9062AA_SEQ,
+		.range_max = DA9062AA_ID_4_3,
+	}, {
+		.range_min = DA9062AA_ID_12_11,
+		.range_max = DA9062AA_ID_16_15,
+	}, {
+		.range_min = DA9062AA_ID_22_21,
+		.range_max = DA9062AA_ID_32_31,
+	}, {
+		.range_min = DA9062AA_SEQ_A,
+		.range_max = DA9062AA_BUCK3_CFG,
+	}, {
+		.range_min = DA9062AA_VBUCK2_A,
+		.range_max = DA9062AA_VBUCK4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK3_A,
+		.range_max = DA9062AA_VBUCK3_A,
+	}, {
+		.range_min = DA9062AA_VLDO1_A,
+		.range_max = DA9062AA_VLDO4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK2_B,
+		.range_max = DA9062AA_VBUCK4_B,
+	}, {
+		.range_min = DA9062AA_VBUCK3_B,
+		.range_max = DA9062AA_VBUCK3_B,
+	}, {
+		.range_min = DA9062AA_VLDO1_B,
+		.range_max = DA9062AA_VLDO4_B,
+	}, {
+		.range_min = DA9062AA_BBAT_CONT,
+		.range_max = DA9062AA_BBAT_CONT,
+	}, {
+		.range_min = DA9062AA_INTERFACE,
+		.range_max = DA9062AA_CONFIG_E,
+	}, {
+		.range_min = DA9062AA_CONFIG_G,
+		.range_max = DA9062AA_CONFIG_K,
+	}, {
+		.range_min = DA9062AA_CONFIG_M,
+		.range_max = DA9062AA_CONFIG_M,
+	}, {
+		.range_min = DA9062AA_TRIM_CLDR,
+		.range_max = DA9062AA_GP_ID_19,
+	}, {
+		.range_min = DA9062AA_DEVICE_ID,
+		.range_max = DA9062AA_CONFIG_ID,
+	},
+};
+
+static const struct regmap_range da9062_aa_writeable_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_PAGE_CON,
+	}, {
+		.range_min = DA9062AA_FAULT_LOG,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_IRQ_MASK_A,
+		.range_max = DA9062AA_IRQ_MASK_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_A,
+		.range_max = DA9062AA_GPIO_4,
+	}, {
+		.range_min = DA9062AA_GPIO_WKUP_MODE,
+		.range_max = DA9062AA_BUCK4_CONT,
+	}, {
+		.range_min = DA9062AA_BUCK3_CONT,
+		.range_max = DA9062AA_BUCK3_CONT,
+	}, {
+		.range_min = DA9062AA_LDO1_CONT,
+		.range_max = DA9062AA_LDO4_CONT,
+	}, {
+		.range_min = DA9062AA_DVC_1,
+		.range_max = DA9062AA_DVC_1,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_ALARM_Y,
+	}, {
+		.range_min = DA9062AA_SEQ,
+		.range_max = DA9062AA_ID_4_3,
+	}, {
+		.range_min = DA9062AA_ID_12_11,
+		.range_max = DA9062AA_ID_16_15,
+	}, {
+		.range_min = DA9062AA_ID_22_21,
+		.range_max = DA9062AA_ID_32_31,
+	}, {
+		.range_min = DA9062AA_SEQ_A,
+		.range_max = DA9062AA_BUCK3_CFG,
+	}, {
+		.range_min = DA9062AA_VBUCK2_A,
+		.range_max = DA9062AA_VBUCK4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK3_A,
+		.range_max = DA9062AA_VBUCK3_A,
+	}, {
+		.range_min = DA9062AA_VLDO1_A,
+		.range_max = DA9062AA_VLDO4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK2_B,
+		.range_max = DA9062AA_VBUCK4_B,
+	}, {
+		.range_min = DA9062AA_VBUCK3_B,
+		.range_max = DA9062AA_VBUCK3_B,
+	}, {
+		.range_min = DA9062AA_VLDO1_B,
+		.range_max = DA9062AA_VLDO4_B,
+	}, {
+		.range_min = DA9062AA_BBAT_CONT,
+		.range_max = DA9062AA_BBAT_CONT,
+	}, {
+		.range_min = DA9062AA_GP_ID_0,
+		.range_max = DA9062AA_GP_ID_19,
+	},
+};
+
+static const struct regmap_range da9062_aa_volatile_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_STATUS_B,
+	}, {
+		.range_min = DA9062AA_STATUS_D,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_F,
+		.range_max = DA9062AA_CONTROL_F,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_SECOND_D,
+	},
+};
+
+static const struct regmap_access_table da9062_aa_readable_table = {
+	.yes_ranges = da9062_aa_readable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_readable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_writeable_table = {
+	.yes_ranges = da9062_aa_writeable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_writeable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_volatile_table = {
+	.yes_ranges = da9062_aa_volatile_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_volatile_ranges),
+};
+
+static const struct regmap_range_cfg da9062_range_cfg[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_CONFIG_ID,
+		.selector_reg = DA9062AA_PAGE_CON,
+		.selector_mask = 1 << DA9062_I2C_PAGE_SEL_SHIFT,
+		.selector_shift = DA9062_I2C_PAGE_SEL_SHIFT,
+		.window_start = 0,
+		.window_len = 256,
+	}
+};
+
+static struct regmap_config da9062_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.ranges = da9062_range_cfg,
+	.num_ranges = ARRAY_SIZE(da9062_range_cfg),
+	.max_register = DA9062AA_CONFIG_ID,
+	.cache_type = REGCACHE_RBTREE,
+	.rd_table = &da9062_aa_readable_table,
+	.wr_table = &da9062_aa_writeable_table,
+	.volatile_table = &da9062_aa_volatile_table,
+};
+
+static int da9062_i2c_probe(struct i2c_client *i2c,
+	const struct i2c_device_id *id)
+{
+	struct da9062 *chip;
+	unsigned int irq_base;
+	int ret;
+
+	chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, chip);
+	chip->dev = &i2c->dev;
+
+	if (!i2c->irq) {
+		dev_err(chip->dev, "No IRQ configured\n");
+		return -EINVAL;
+	}
+
+	chip->regmap = devm_regmap_init_i2c(i2c, &da9062_regmap_config);
+	if (IS_ERR(chip->regmap)) {
+		ret = PTR_ERR(chip->regmap);
+		dev_err(chip->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = da9062_clear_fault_log(chip);
+	if (ret < 0)
+		dev_warn(chip->dev, "Cannot clear fault log\n");
+
+	ret = get_device_type(chip);
+	if (ret)
+		return ret;
+
+	ret = regmap_add_irq_chip(chip->regmap, i2c->irq,
+			IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+			-1, &da9062_irq_chip,
+			&chip->regmap_irq);
+	if (ret) {
+		dev_err(chip->dev, "Failed to request IRQ %d: %d\n",
+			i2c->irq, ret);
+		return ret;
+	}
+
+	irq_base = regmap_irq_chip_get_base(chip->regmap_irq);
+
+	ret = mfd_add_devices(chip->dev, PLATFORM_DEVID_NONE, da9062_devs,
+			      ARRAY_SIZE(da9062_devs), NULL, irq_base,
+			      NULL);
+	if (ret) {
+		dev_err(chip->dev, "Cannot register child devices\n");
+		regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int da9062_i2c_remove(struct i2c_client *i2c)
+{
+	struct da9062 *chip = i2c_get_clientdata(i2c);
+
+	mfd_remove_devices(chip->dev);
+	regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+
+	return 0;
+}
+
+static const struct i2c_device_id da9062_i2c_id[] = {
+	{ "da9062", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
+
+static const struct of_device_id da9062_dt_ids[] = {
+	{ .compatible = "dlg,da9062", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, da9062_dt_ids);
+
+static struct i2c_driver da9062_i2c_driver = {
+	.driver = {
+		.name = "da9062",
+		.of_match_table = of_match_ptr(da9062_dt_ids),
+	},
+	.probe    = da9062_i2c_probe,
+	.remove   = da9062_i2c_remove,
+	.id_table = da9062_i2c_id,
+};
+
+module_i2c_driver(da9062_i2c_driver);
+
+MODULE_DESCRIPTION("Core device driver for Dialog DA9062");
+MODULE_AUTHOR("Steve Twiss <stwiss.opensource@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 6f3a7c0..2d4e3e0 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -264,7 +264,6 @@
 static struct i2c_driver da9063_i2c_driver = {
 	.driver = {
 		.name = "da9063",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9063_dt_ids),
 	},
 	.probe    = da9063_i2c_probe,
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index eaf1ec9..2630263 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -77,6 +77,10 @@
 		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
 		.mask = DA9063_M_UVOV,
 	},
+	[DA9063_IRQ_DVC_RDY] = {
+		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
+		.mask = DA9063_M_DVC_RDY,
+	},
 	[DA9063_IRQ_VDD_MON] = {
 		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
 		.mask = DA9063_M_VDD_MON,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 8b14740..e6e4bac 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2654,7 +2654,6 @@
 {
 	irq_set_chip_and_handler(virq, &prcmu_irq_chip,
 				handle_simple_irq);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 5991fad..a76eb6e 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -207,7 +207,7 @@
 
 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct pcap_chip *pcap = irq_get_handler_data(irq);
+	struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	queue_work(pcap->workqueue, &pcap->isr_work);
@@ -463,11 +463,7 @@
 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
 		irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
 		irq_set_chip_data(i, pcap);
-#ifdef CONFIG_ARM
-		set_irq_flags(i, IRQF_VALID);
-#else
-		irq_set_noprobe(i);
-#endif
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	/* mask/ack all PCAP interrupts */
@@ -476,8 +472,7 @@
 	pcap->msr = PCAP_MASK_ALL_INTERRUPT;
 
 	irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
-	irq_set_handler_data(spi->irq, pcap);
-	irq_set_chained_handler(spi->irq, pcap_irq_handler);
+	irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
 	irq_set_irq_wake(spi->irq, 1);
 
 	/* ADC */
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 49f39fe..9131cdc 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -350,11 +350,11 @@
 			irq_set_chip_and_handler(irq, &egpio_muxed_chip,
 						 handle_simple_irq);
 			irq_set_chip_data(irq, ei);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
-		irq_set_handler_data(ei->chained_irq, ei);
-		irq_set_chained_handler(ei->chained_irq, egpio_handler);
+		irq_set_chained_handler_and_data(ei->chained_irq,
+						 egpio_handler, ei);
 		ack_irqs(ei);
 
 		device_init_wakeup(&pdev->dev, 1);
@@ -376,7 +376,7 @@
 		irq_end = ei->irq_start + ei->nirqs;
 		for (irq = ei->irq_start; irq < irq_end; irq++) {
 			irq_set_chip_and_handler(irq, NULL, NULL);
-			set_irq_flags(irq, 0);
+			irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		irq_set_chained_handler(ei->chained_irq, NULL);
 		device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index b54baad..1bd5b04 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -330,11 +330,7 @@
 		irq_set_chip_and_handler(irq, &htcpld_muxed_chip,
 					 handle_simple_irq);
 		irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#else
-		irq_set_probe(irq);
-#endif
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	return ret;
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 7b50b6b..d9e15cf 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -24,8 +24,25 @@
 #include <linux/acpi.h>
 #include <linux/regmap.h>
 #include <linux/mfd/intel_soc_pmic.h>
+#include <linux/gpio/machine.h>
+#include <linux/pwm.h>
 #include "intel_soc_pmic_core.h"
 
+/* Lookup table for the Panel Enable/Disable line as GPIO signals */
+static struct gpiod_lookup_table panel_gpio_table = {
+	/* Intel GFX is consumer */
+	.dev_id = "0000:00:02.0",
+	.table = {
+		/* Panel EN/DISABLE */
+		GPIO_LOOKUP("gpio_crystalcove", 94, "panel", GPIO_ACTIVE_HIGH),
+	},
+};
+
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+	PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
 static int intel_soc_pmic_find_gpio_irq(struct device *dev)
 {
 	struct gpio_desc *desc;
@@ -85,6 +102,12 @@
 	if (ret)
 		dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
 
+	/* Add lookup table binding for Panel Control to the GPIO Chip */
+	gpiod_add_lookup_table(&panel_gpio_table);
+
+	/* Add lookup table for crc-pwm */
+	pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
 	ret = mfd_add_devices(dev, -1, config->cell_dev,
 			      config->n_cell_devs, NULL, 0,
 			      regmap_irq_get_domain(pmic->irq_chip_data));
@@ -104,6 +127,12 @@
 
 	regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
 
+	/* Remove lookup table for Panel Control from the GPIO Chip */
+	gpiod_remove_lookup_table(&panel_gpio_table);
+
+	/* remove crc-pwm lookup table */
+	pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
 	mfd_remove_devices(&i2c->dev);
 
 	return 0;
@@ -147,7 +176,7 @@
 MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
 
 #if defined(CONFIG_ACPI)
-static struct acpi_device_id intel_soc_pmic_acpi_match[] = {
+static const struct acpi_device_id intel_soc_pmic_acpi_match[] = {
 	{"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc},
 	{ },
 };
@@ -157,7 +186,6 @@
 static struct i2c_driver intel_soc_pmic_i2c_driver = {
 	.driver = {
 		.name = "intel_soc_pmic_i2c",
-		.owner = THIS_MODULE,
 		.pm = &intel_soc_pmic_pm_ops,
 		.acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
 	},
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 7436075..4a74948 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -109,6 +109,9 @@
 	{
 		.name = "crystal_cove_pmic",
 	},
+	{
+		.name = "crystal_cove_pwm",
+	},
 };
 
 static const struct regmap_config crystal_cove_regmap_config = {
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 8df3266..a41859c 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -53,8 +53,8 @@
 	tx->buf[bp++] = checksum;
 	tx->len = bp;
 	tx->index = 0;
-	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
-		       tx->buf, tx->len, true);
+	print_hex_dump_debug("data: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     tx->buf, tx->len, true);
 
 	/* Enable interrupt */
 	val = readl(micro->base + UTCR3);
@@ -242,7 +242,7 @@
 	return data[1] << 8 | data[0];
 }
 
-static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
+static void __init ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
 {
 	u8 dump[256];
 	char *str;
@@ -250,7 +250,7 @@
 	ipaq_micro_eeprom_read(micro, 0, 128, dump);
 	str = ipaq_micro_str(dump, 10);
 	if (str) {
-		dev_info(micro->dev, "HM version %s\n", str);
+		dev_info(micro->dev, "HW version %s\n", str);
 		kfree(str);
 	}
 	str = ipaq_micro_str(dump+10, 40);
@@ -281,8 +281,8 @@
 	dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92));
 	dev_info(micro->dev, "screen: %u x %u\n",
 		 ipaq_micro_to_u16(dump+94), ipaq_micro_to_u16(dump+96));
-	print_hex_dump(KERN_DEBUG, "eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
-		       dump, 256, true);
+	print_hex_dump_debug("eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     dump, 256, true);
 
 }
 
@@ -386,7 +386,7 @@
 	return 0;
 }
 
-static int micro_probe(struct platform_device *pdev)
+static int __init micro_probe(struct platform_device *pdev)
 {
 	struct ipaq_micro *micro;
 	struct resource *res;
@@ -448,21 +448,6 @@
 	return 0;
 }
 
-static int micro_remove(struct platform_device *pdev)
-{
-	struct ipaq_micro *micro = platform_get_drvdata(pdev);
-	u32 val;
-
-	mfd_remove_devices(&pdev->dev);
-
-	val = readl(micro->base + UTCR3);
-	val &= ~(UTCR3_RXE | UTCR3_RIE); /* disable receive interrupt */
-	val &= ~(UTCR3_TXE | UTCR3_TIE); /* disable transmit interrupt */
-	writel(val, micro->base + UTCR3);
-
-	return 0;
-}
-
 static const struct dev_pm_ops micro_dev_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(NULL, micro_resume)
 };
@@ -471,12 +456,7 @@
 	.driver   = {
 		.name	= "ipaq-h3xxx-micro",
 		.pm	= &micro_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
-	.probe    = micro_probe,
-	.remove   = micro_remove,
-	/* .shutdown = micro_suspend, // FIXME */
 };
-module_platform_driver(micro_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("driver for iPAQ Atmel micro core and backlight");
+builtin_platform_driver_probe(micro_device_driver, micro_probe);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index b31c54e..5bb49f0 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,12 +273,12 @@
 	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
 	ct->chip.irq_ack = irq_gc_ack_set_bit;
 
-	irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+	irq_setup_generic_chip(gc, IRQ_MSK(5), IRQ_GC_INIT_MASK_CACHE, 0,
+				IRQ_NOPROBE | IRQ_LEVEL);
 
 	adc->gc = gc;
 
-	irq_set_handler_data(adc->irq, gc);
-	irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
+	irq_set_chained_handler_and_data(adc->irq, jz4740_adc_irq_demux, gc);
 
 	writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
 	writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
@@ -308,8 +308,7 @@
 
 	irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
 	kfree(adc->gc);
-	irq_set_handler_data(adc->irq, NULL);
-	irq_set_chained_handler(adc->irq, NULL);
+	irq_set_chained_handler_and_data(adc->irq, NULL, NULL);
 
 	iounmap(adc->base);
 	release_mem_region(adc->mem->start, resource_size(adc->mem));
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 8057849..463f4ea 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -501,6 +501,14 @@
 
 static struct dmi_system_id kempld_dmi_table[] __initdata = {
 	{
+		.ident = "BBL6",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-bBL6"),
+		},
+		.driver_data = (void *)&kempld_platform_data_generic,
+		.callback = kempld_create_platform_device,
+	}, {
 		.ident = "BHL6",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -517,6 +525,14 @@
 		.driver_data = (void *)&kempld_platform_data_generic,
 		.callback = kempld_create_platform_device,
 	}, {
+		.ident = "CBW6",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-cBW6"),
+		},
+		.driver_data = (void *)&kempld_platform_data_generic,
+		.callback = kempld_create_platform_device,
+	}, {
 		.ident = "CCR2",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index d42fbb6..643f375 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -640,7 +640,6 @@
 static struct i2c_driver lm3533_i2c_driver = {
 	.driver = {
 		   .name = "lm3533",
-		   .owner = THIS_MODULE,
 	},
 	.id_table	= lm3533_i2c_ids,
 	.probe		= lm3533_i2c_probe,
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index 335b930..eecbb13 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -154,7 +154,6 @@
 	.remove = lp3943_remove,
 	.driver = {
 		.name = "lp3943",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(lp3943_of_match),
 	},
 	.id_table = lp3943_ids,
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index a87f2b5..c7a9825 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -141,12 +141,7 @@
 	irq_set_chip_data(virq, irqd);
 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index a30bc15..acf6165 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -221,7 +221,6 @@
 static struct i2c_driver lp8788_driver = {
 	.driver = {
 		.name = "lp8788",
-		.owner = THIS_MODULE,
 	},
 	.probe = lp8788_probe,
 	.remove = lp8788_remove,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 8de3439..c5a9a08 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -66,6 +66,7 @@
 #include <linux/pci.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #define ACPIBASE		0x40
 #define ACPIBASE_GPE_OFF	0x28
@@ -835,9 +836,31 @@
 	priv->actrl_pbase_save = reg_save;
 }
 
-static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
+static int lpc_ich_finalize_wdt_cell(struct pci_dev *dev)
+{
+	struct itco_wdt_platform_data *pdata;
+	struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+	struct lpc_ich_info *info;
+	struct mfd_cell *cell = &lpc_ich_cells[LPC_WDT];
+
+	pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	info = &lpc_chipset_info[priv->chipset];
+
+	pdata->version = info->iTCO_version;
+	strlcpy(pdata->name, info->name, sizeof(pdata->name));
+
+	cell->platform_data = pdata;
+	cell->pdata_size = sizeof(*pdata);
+	return 0;
+}
+
+static void lpc_ich_finalize_gpio_cell(struct pci_dev *dev)
 {
 	struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+	struct mfd_cell *cell = &lpc_ich_cells[LPC_GPIO];
 
 	cell->platform_data = &lpc_chipset_info[priv->chipset];
 	cell->pdata_size = sizeof(struct lpc_ich_info);
@@ -933,7 +956,7 @@
 	lpc_chipset_info[priv->chipset].use_gpio = ret;
 	lpc_ich_enable_gpio_space(dev);
 
-	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
+	lpc_ich_finalize_gpio_cell(dev);
 	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
 			      &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL);
 
@@ -1007,7 +1030,10 @@
 		res->end = base_addr + ACPIBASE_PMC_END;
 	}
 
-	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
+	ret = lpc_ich_finalize_wdt_cell(dev);
+	if (ret)
+		goto wdt_done;
+
 	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
 			      &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL);
 
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 3bf8def..56e216d 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -532,7 +532,6 @@
 static struct i2c_driver max14577_i2c_driver = {
 	.driver = {
 		.name = "max14577",
-		.owner = THIS_MODULE,
 		.pm = &max14577_pm,
 		.of_match_table = max14577_dt_match,
 	},
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 760d08d..d19be64 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -391,7 +391,6 @@
 static struct i2c_driver max77686_i2c_driver = {
 	.driver = {
 		   .name = "max77686",
-		   .owner = THIS_MODULE,
 		   .pm = &max77686_pm,
 		   .of_match_table = of_match_ptr(max77686_pmic_dt_match),
 	},
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 67bc53f..007f729 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -373,7 +373,6 @@
 static struct i2c_driver max77693_i2c_driver = {
 	.driver = {
 		   .name = "max77693",
-		   .owner = THIS_MODULE,
 		   .pm = &max77693_pm,
 		   .of_match_table = of_match_ptr(max77693_dt_match),
 	},
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 232749c..2974c8b1 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -321,7 +321,6 @@
 static struct i2c_driver max8907_i2c_driver = {
 	.driver = {
 		.name = "max8907",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max8907_of_match),
 	},
 	.probe = max8907_i2c_probe,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 8520bd6..fd8b15c 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -650,11 +650,8 @@
 	irq_set_chip_data(virq, d->host_data);
 	irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index c880c89..b0fe810 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -245,7 +245,6 @@
 static struct i2c_driver max8925_driver = {
 	.driver	= {
 		.name	= "max8925",
-		.owner	= THIS_MODULE,
 		.pm     = &max8925_pm_ops,
 		.of_match_table = max8925_dt_ids,
 	},
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index d3025be..b95a46d 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -113,14 +113,14 @@
 
 static void max8997_irq_lock(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&max8997->irqlock);
 }
 
 static void max8997_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) {
@@ -140,26 +140,25 @@
 }
 
 static const inline struct max8997_irq_data *
-irq_to_max8997_irq(struct max8997_dev *max8997, int irq)
+irq_to_max8997_irq(struct max8997_dev *max8997, struct irq_data *data)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
 	return &max8997_irqs[data->hwirq];
 }
 
 static void max8997_irq_mask(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
-								data->irq);
+								     data);
 
 	max8997->irq_masks_cur[irq_data->group] |= irq_data->mask;
 }
 
 static void max8997_irq_unmask(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
-								data->irq);
+								     data);
 
 	max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
 }
@@ -295,11 +294,8 @@
 	irq_set_chip_data(irq, max8997);
 	irq_set_chip_and_handler(irq, &max8997_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 595364e..d3cfa9cf 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -508,7 +508,6 @@
 static struct i2c_driver max8997_i2c_driver = {
 	.driver = {
 		   .name = "max8997",
-		   .owner = THIS_MODULE,
 		   .pm = &max8997_pm,
 		   .of_match_table = of_match_ptr(max8997_pmic_dt_match),
 	},
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 3702056..90bad9f 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -98,9 +98,8 @@
 };
 
 static inline struct max8998_irq_data *
-irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
+irq_to_max8998_irq(struct max8998_dev *max8998, struct irq_data *data)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
 	return &max8998_irqs[data->hwirq];
 }
 
@@ -134,8 +133,7 @@
 static void max8998_irq_unmask(struct irq_data *data)
 {
 	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
-							       data->irq);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
@@ -143,8 +141,7 @@
 static void max8998_irq_mask(struct irq_data *data)
 {
 	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
-							       data->irq);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
@@ -206,11 +203,8 @@
 	irq_set_chip_data(irq, max8998);
 	irq_set_chip_and_handler(irq, &max8998_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index a37cb74..a7afe3b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -377,7 +377,6 @@
 static struct i2c_driver max8998_i2c_driver = {
 	.driver = {
 		   .name = "max8998",
-		   .owner = THIS_MODULE,
 		   .pm = &max8998_pm,
 		   .of_match_table = of_match_ptr(max8998_dt_match),
 	},
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 68b8448..67e4c9a 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -96,7 +96,6 @@
 static struct i2c_driver mc13xxx_i2c_driver = {
 	.id_table = mc13xxx_i2c_device_id,
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "mc13xxx",
 		.of_match_table = mc13xxx_dt_ids,
 	},
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 03929a6..1749c1c 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -60,14 +60,14 @@
 
 static void mt6397_irq_lock(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&mt6397->irqlock);
 }
 
 static void mt6397_irq_sync_unlock(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 
 	regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
 	regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
@@ -77,7 +77,7 @@
 
 static void mt6397_irq_disable(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 	int shift = data->hwirq & 0xf;
 	int reg = data->hwirq >> 4;
 
@@ -86,19 +86,38 @@
 
 static void mt6397_irq_enable(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 	int shift = data->hwirq & 0xf;
 	int reg = data->hwirq >> 4;
 
 	mt6397->irq_masks_cur[reg] |= BIT(shift);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
+	int shift = irq_data->hwirq & 0xf;
+	int reg = irq_data->hwirq >> 4;
+
+	if (on)
+		mt6397->wake_mask[reg] |= BIT(shift);
+	else
+		mt6397->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+#else
+#define mt6397_irq_set_wake NULL
+#endif
+
 static struct irq_chip mt6397_irq_chip = {
 	.name = "mt6397-irq",
 	.irq_bus_lock = mt6397_irq_lock,
 	.irq_bus_sync_unlock = mt6397_irq_sync_unlock,
 	.irq_enable = mt6397_irq_enable,
 	.irq_disable = mt6397_irq_disable,
+	.irq_set_wake = mt6397_irq_set_wake,
 };
 
 static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
@@ -142,11 +161,7 @@
 	irq_set_chip_data(irq, mt6397);
 	irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -183,6 +198,35 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_suspend(struct device *dev)
+{
+	struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+	regmap_write(chip->regmap, MT6397_INT_CON0, chip->wake_mask[0]);
+	regmap_write(chip->regmap, MT6397_INT_CON1, chip->wake_mask[1]);
+
+	enable_irq_wake(chip->irq);
+
+	return 0;
+}
+
+static int mt6397_irq_resume(struct device *dev)
+{
+	struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+	regmap_write(chip->regmap, MT6397_INT_CON0, chip->irq_masks_cur[0]);
+	regmap_write(chip->regmap, MT6397_INT_CON1, chip->irq_masks_cur[1]);
+
+	disable_irq_wake(chip->irq);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
+			mt6397_irq_resume);
+
 static int mt6397_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -237,6 +281,7 @@
 	.driver = {
 		.name = "mt6397",
 		.of_match_table = of_match_ptr(mt6397_of_match),
+		.pm = &mt6397_pm_ops,
 	},
 };
 
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 28cb048..8f8bacb 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -719,7 +719,6 @@
 	.driver = {
 		   .name = "palmas",
 		   .of_match_table = of_palmas_match_tbl,
-		   .owner = THIS_MODULE,
 	},
 	.probe = palmas_i2c_probe,
 	.remove = palmas_i2c_remove,
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 5a92646..59502d0 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -236,11 +236,49 @@
 	return pm8xxx_config_irq(chip, block, config);
 }
 
+static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
+					enum irqchip_irq_state which,
+					bool *state)
+{
+	struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+	unsigned int pmirq = irqd_to_hwirq(d);
+	unsigned int bits;
+	int irq_bit;
+	u8 block;
+	int rc;
+
+	if (which != IRQCHIP_STATE_LINE_LEVEL)
+		return -EINVAL;
+
+	block = pmirq / 8;
+	irq_bit = pmirq % 8;
+
+	spin_lock(&chip->pm_irq_lock);
+	rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
+	if (rc) {
+		pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
+		goto bail;
+	}
+
+	rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
+	if (rc) {
+		pr_err("Failed Reading Status rc=%d\n", rc);
+		goto bail;
+	}
+
+	*state = !!(bits & BIT(irq_bit));
+bail:
+	spin_unlock(&chip->pm_irq_lock);
+
+	return rc;
+}
+
 static struct irq_chip pm8xxx_irq_chip = {
 	.name		= "pm8xxx",
 	.irq_mask_ack	= pm8xxx_irq_mask_ack,
 	.irq_unmask	= pm8xxx_irq_unmask,
 	.irq_set_type	= pm8xxx_irq_set_type,
+	.irq_get_irqchip_state = pm8xxx_irq_get_irqchip_state,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -251,11 +289,8 @@
 
 	irq_set_chip_and_handler(irq, &pm8xxx_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
@@ -336,14 +371,12 @@
 	if (!chip->irqdomain)
 		return -ENODEV;
 
-	irq_set_handler_data(irq, chip);
-	irq_set_chained_handler(irq, pm8xxx_irq_handler);
+	irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
 	irq_set_irq_wake(irq, 1);
 
 	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	if (rc) {
-		irq_set_chained_handler(irq, NULL);
-		irq_set_handler_data(irq, NULL);
+		irq_set_chained_handler_and_data(irq, NULL, NULL);
 		irq_domain_remove(chip->irqdomain);
 	}
 
@@ -362,8 +395,7 @@
 	struct pm_irq_chip *chip = platform_get_drvdata(pdev);
 
 	device_for_each_child(&pdev->dev, NULL, pm8921_remove_child);
-	irq_set_chained_handler(irq, NULL);
-	irq_set_handler_data(irq, NULL);
+	irq_set_chained_handler_and_data(irq, NULL, NULL);
 	irq_domain_remove(chip->irqdomain);
 
 	return 0;
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 12e3243..6afc9fa 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -149,6 +149,7 @@
 	[QCOM_RPM_USB_OTG_SWITCH] =		{ 210, 125, 82, 1 },
 	[QCOM_RPM_HDMI_SWITCH] =		{ 211, 126, 83, 1 },
 	[QCOM_RPM_DDR_DMM] =			{ 212, 127, 84, 2 },
+	[QCOM_RPM_QDSS_CLK] =			{ 214, ~0, 7, 1 },
 	[QCOM_RPM_VDDMIN_GPIO] =		{ 215, 131, 89, 1 },
 };
 
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index bb85020..3f8812d 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -386,9 +386,7 @@
 		irq_set_chip_and_handler(__irq, &rc5t583_irq_chip,
 					 handle_simple_irq);
 		irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
-		set_irq_flags(__irq, IRQF_VALID);
-#endif
+		irq_clear_status_flags(__irq, IRQ_NOREQUEST);
 	}
 
 	ret = request_threaded_irq(irq, NULL, rc5t583_irq, IRQF_ONESHOT,
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index df276ad..e10f02f 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -322,7 +322,6 @@
 static struct i2c_driver rc5t583_i2c_driver = {
 	.driver = {
 		   .name = "rc5t583",
-		   .owner = THIS_MODULE,
 		   },
 	.probe = rc5t583_i2c_probe,
 	.remove = rc5t583_i2c_remove,
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 2d64430..d4c114a 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -311,7 +311,6 @@
 static struct i2c_driver retu_driver = {
 	.driver		= {
 		.name = "retu-mfd",
-		.owner = THIS_MODULE,
 	},
 	.probe		= retu_probe,
 	.remove		= retu_remove,
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index db395a6..d60f916 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -124,6 +124,7 @@
 	{ .compatible = "richtek,rt5033", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, rt5033_dt_match);
 
 static struct i2c_driver rt5033_driver = {
 	.driver = {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 4a69afb..d206a3e 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -486,7 +486,6 @@
 static struct i2c_driver sec_pmic_driver = {
 	.driver = {
 		   .name = "sec_pmic",
-		   .owner = THIS_MODULE,
 		   .pm = &sec_pmic_pm_ops,
 		   .of_match_table = of_match_ptr(sec_dt_match),
 	},
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index e3deb46..fb4ce6d0 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -873,7 +873,6 @@
 static struct i2c_driver si476x_core_driver = {
 	.driver		= {
 		.name	= "si476x-core",
-		.owner  = THIS_MODULE,
 	},
 	.probe		= si476x_core_probe,
 	.remove         = si476x_core_remove,
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 0324688..a4c0df7 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -98,7 +98,6 @@
 static struct i2c_driver smsc_i2c_driver = {
 	.driver = {
 		   .name = "smsc",
-		   .owner = THIS_MODULE,
 	},
 	.probe = smsc_i2c_probe,
 	.remove = smsc_i2c_remove,
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index e14c8c9..c3f4aab 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -112,7 +112,6 @@
 static struct i2c_driver stmpe_i2c_driver = {
 	.driver = {
 		.name = "stmpe-i2c",
-		.owner = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm = &stmpe_dev_pm_ops,
 #endif
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 6fdb30e..618ba24 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -11,6 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/types.h>
 #include "stmpe.h"
 
@@ -108,6 +109,17 @@
 	return stmpe_remove(stmpe);
 }
 
+static const struct of_device_id stmpe_spi_of_match[] = {
+	{ .compatible = "st,stmpe610", },
+	{ .compatible = "st,stmpe801", },
+	{ .compatible = "st,stmpe811", },
+	{ .compatible = "st,stmpe1601", },
+	{ .compatible = "st,stmpe2401", },
+	{ .compatible = "st,stmpe2403", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stmpe_spi_of_match);
+
 static const struct spi_device_id stmpe_spi_id[] = {
 	{ "stmpe610", STMPE610 },
 	{ "stmpe801", STMPE801 },
@@ -122,6 +134,7 @@
 static struct spi_driver stmpe_spi_driver = {
 	.driver = {
 		.name	= "stmpe-spi",
+		.of_match_table = of_match_ptr(stmpe_spi_of_match),
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &stmpe_dev_pm_ops,
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 18c4d72..e971af8 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -971,20 +971,13 @@
 	irq_set_chip_data(virq, stmpe);
 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-		set_irq_flags(virq, 0);
-#endif
 		irq_set_chip_and_handler(virq, NULL, NULL);
 		irq_set_chip_data(virq, NULL);
 }
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index 7ceb3df..ca613df 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -231,6 +231,7 @@
 	{ "stw481x", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, stw481x_id);
 
 static const struct of_device_id stw481x_match[] = {
 	{ .compatible = "st,stw4810", },
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index c09fb5d..16fc1ad 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -187,7 +187,7 @@
 /* Handle the T7L66XB interrupt mux */
 static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct t7l66xb *t7l66xb = irq_get_handler_data(irq);
+	struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
 	unsigned int i, irq_base;
 
@@ -246,14 +246,10 @@
 	for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq);
 		irq_set_chip_data(irq, t7l66xb);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
 	}
 
 	irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
-	irq_set_handler_data(t7l66xb->irq, t7l66xb);
-	irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq);
+	irq_set_chained_handler_and_data(t7l66xb->irq, t7l66xb_irq, t7l66xb);
 }
 
 static void t7l66xb_detach_irq(struct platform_device *dev)
@@ -263,13 +259,9 @@
 
 	irq_base = t7l66xb->irq_base;
 
-	irq_set_chained_handler(t7l66xb->irq, NULL);
-	irq_set_handler_data(t7l66xb->irq, NULL);
+	irq_set_chained_handler_and_data(t7l66xb->irq, NULL, NULL);
 
 	for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, 0);
-#endif
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
@@ -318,7 +310,7 @@
 	struct resource *iomem, *rscr;
 	int ret;
 
-	if (pdata == NULL)
+	if (!pdata)
 		return -EINVAL;
 
 	iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -371,7 +363,7 @@
 
 	clk_prepare_enable(t7l66xb->clk48m);
 
-	if (pdata && pdata->enable)
+	if (pdata->enable)
 		pdata->enable(dev);
 
 	/* Mask all interrupts */
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 96d420d..274bf39 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -215,20 +215,13 @@
 	irq_set_chip_and_handler(virq, &dummy_irq_chip,
 				handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, 0);
-#endif
 	irq_set_chip_and_handler(virq, NULL, NULL);
 	irq_set_chip_data(virq, NULL);
 }
@@ -492,7 +485,6 @@
 static struct i2c_driver tc3589x_driver = {
 	.driver = {
 		.name	= "tc3589x",
-		.owner	= THIS_MODULE,
 		.pm	= &tc3589x_dev_pm_ops,
 		.of_match_table = of_match_ptr(tc3589x_match),
 	},
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 63458b3..775b9ac 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -525,7 +525,7 @@
 static void
 tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
+	struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
 	unsigned int i, irq_base;
 
@@ -586,12 +586,12 @@
 	for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
 		irq_set_chip_data(irq, tc6393xb);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
-	irq_set_handler_data(tc6393xb->irq, tc6393xb);
-	irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
+	irq_set_chained_handler_and_data(tc6393xb->irq, tc6393xb_irq,
+					 tc6393xb);
 }
 
 static void tc6393xb_detach_irq(struct platform_device *dev)
@@ -599,13 +599,12 @@
 	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
 	unsigned int irq, irq_base;
 
-	irq_set_chained_handler(tc6393xb->irq, NULL);
-	irq_set_handler_data(tc6393xb->irq, NULL);
+	irq_set_chained_handler_and_data(tc6393xb->irq, NULL, NULL);
 
 	irq_base = tc6393xb->irq_base;
 
 	for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
-		set_irq_flags(irq, 0);
+		irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index a2e1990..1ab3dd6 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -129,7 +129,6 @@
 static struct i2c_driver tps6507x_i2c_driver = {
 	.driver = {
 		   .name = "tps6507x",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(tps6507x_of_match),
 	},
 	.probe = tps6507x_i2c_probe,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 14b62e11a..f88085a 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -259,7 +259,6 @@
 static struct i2c_driver tps65090_driver = {
 	.driver	= {
 		.name	= "tps65090",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tps65090_of_match),
 	},
 	.probe		= tps65090_i2c_probe,
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 7d1cfc1..55add04 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -156,6 +156,7 @@
 	{ .compatible = "ti,tps65217", .data = (void *)TPS65217 },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, tps65217_of_match);
 
 static int tps65217_probe(struct i2c_client *client,
 				const struct i2c_device_id *ids)
@@ -248,7 +249,6 @@
 static struct i2c_driver tps65217_driver = {
 	.driver		= {
 		.name	= "tps65217",
-		.owner	= THIS_MODULE,
 		.of_match_table = tps65217_of_match,
 	},
 	.id_table	= tps65217_id_table,
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 7af11a8..80b9dc3 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -211,6 +211,7 @@
 	{ .compatible = "ti,tps65218", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, of_tps65218_match_table);
 
 static int tps65218_probe(struct i2c_client *client,
 				const struct i2c_device_id *ids)
@@ -280,7 +281,6 @@
 static struct i2c_driver tps65218_driver = {
 	.driver		= {
 		.name	= "tps65218",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_tps65218_match_table,
 	},
 	.probe		= tps65218_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index e0a2583..5628a6b 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -52,7 +52,7 @@
 #define TPS6586X_VERSIONCRC	0xcd
 
 /* Maximum register */
-#define TPS6586X_MAX_REGISTER	(TPS6586X_VERSIONCRC + 1)
+#define TPS6586X_MAX_REGISTER	TPS6586X_VERSIONCRC
 
 struct tps6586x_irq_data {
 	u8	mask_reg;
@@ -299,14 +299,7 @@
 	irq_set_chip_data(virq, tps6586x);
 	irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -467,7 +460,7 @@
 static const struct regmap_config tps6586x_regmap_config = {
 	.reg_bits = 8,
 	.val_bits = 8,
-	.max_register = TPS6586X_MAX_REGISTER - 1,
+	.max_register = TPS6586X_MAX_REGISTER,
 	.volatile_reg = is_volatile_reg,
 	.cache_type = REGCACHE_RBTREE,
 };
@@ -610,7 +603,6 @@
 static struct i2c_driver tps6586x_driver = {
 	.driver	= {
 		.name	= "tps6586x",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tps6586x_of_match),
 	},
 	.probe		= tps6586x_i2c_probe,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 7612d89..f7ab115 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -544,7 +544,6 @@
 static struct i2c_driver tps65910_i2c_driver = {
 	.driver = {
 		   .name = "tps65910",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(tps65910_of_match),
 	},
 	.probe = tps65910_i2c_probe,
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 6a6343e..7e55640 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -109,7 +109,6 @@
 static struct i2c_driver tps65912_i2c_driver = {
 	.driver = {
 		   .name = "tps65912",
-		   .owner = THIS_MODULE,
 	},
 	.probe = tps65912_i2c_probe,
 	.remove = tps65912_i2c_remove,
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
index fbecec7..db2c29c 100644
--- a/drivers/mfd/tps65912-irq.c
+++ b/drivers/mfd/tps65912-irq.c
@@ -197,13 +197,7 @@
 		irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
 					 handle_edge_irq);
 		irq_set_nested_thread(cur_irq, 1);
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+		irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index ed6c5b0..0812df3 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -549,7 +549,6 @@
 static struct i2c_driver tps80031_driver = {
 	.driver	= {
 		.name	= "tps80031",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= tps80031_probe,
 	.remove		= tps80031_remove,
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index a3fa7f4..40e51b0 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -419,16 +419,7 @@
 
 static inline void activate_irq(int irq)
 {
-#ifdef CONFIG_ARM
-	/*
-	 * ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
-	irq_set_noprobe(irq);
-#endif
+	irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 }
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 20fb581..5357450 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -231,7 +231,7 @@
 
 static int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	struct twl6030_irq *pdata = irq_get_chip_data(d->irq);
+	struct twl6030_irq *pdata = irq_data_get_irq_chip_data(d);
 
 	if (on)
 		atomic_inc(&pdata->wakeirqs);
@@ -352,26 +352,13 @@
 	irq_set_chip_and_handler(virq,  &pdata->irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, true);
 	irq_set_parent(virq, pdata->twl_irq);
-
-#ifdef CONFIG_ARM
-	/*
-	 * ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(virq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void twl6030_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, 0);
-#endif
 	irq_set_chip_and_handler(virq, NULL, NULL);
 	irq_set_chip_data(virq, NULL);
 }
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c5265c1..a151ee2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -86,7 +86,7 @@
 	{ 0x2E, 0x00 }, /* REG_STATUS	(ro) */
 };
 
-static struct reg_default twl6040_patch[] = {
+static struct reg_sequence twl6040_patch[] = {
 	/*
 	 * Select I2C bus access to dual access registers
 	 * Interrupt register is cleared on read
@@ -801,7 +801,6 @@
 static struct i2c_driver twl6040_driver = {
 	.driver = {
 		.name = "twl6040",
-		.owner = THIS_MODULE,
 	},
 	.probe		= twl6040_probe,
 	.remove		= twl6040_remove,
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 3591550..9a23021 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@
  * SIBCLK to talk to the chip.  We leave the clock running until
  * we have finished processing all interrupts from the chip.
  */
-static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
+static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
 	unsigned int isr, i;
@@ -292,7 +292,7 @@
 	ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
 	ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
 
-	for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
+	for (i = 0; i < 16 && isr; i++, isr >>= 1)
 		if (isr & 1)
 			generic_handle_irq(ucb->irq_base + i);
 	ucb1x00_disable(ucb);
@@ -562,7 +562,7 @@
 
 		irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
 		irq_set_chip_data(irq, ucb);
-		set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index aeae6ec..0386eaf 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -21,7 +21,7 @@
 #define WM5102_NUM_AOD_ISR 2
 #define WM5102_NUM_ISR 5
 
-static const struct reg_default wm5102_reva_patch[] = {
+static const struct reg_sequence wm5102_reva_patch[] = {
 	{ 0x80, 0x0003 },
 	{ 0x221, 0x0090 },
 	{ 0x211, 0x0014 },
@@ -57,7 +57,7 @@
 	{ 0x80, 0x0000 },
 };
 
-static const struct reg_default wm5102_revb_patch[] = {
+static const struct reg_sequence wm5102_revb_patch[] = {
 	{ 0x19, 0x0001 },
 	{ 0x80, 0x0003 },
 	{ 0x081, 0xE022 },
@@ -80,7 +80,7 @@
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5102_patch(struct arizona *arizona)
 {
-	const struct reg_default *wm5102_patch;
+	const struct reg_sequence *wm5102_patch;
 	int patch_size;
 
 	switch (arizona->rev) {
@@ -266,8 +266,6 @@
 	{ 0x00000069, 0x01FF },   /* R105   - Always On Triggers Sequence Select 4 */
 	{ 0x0000006A, 0x01FF },   /* R106   - Always On Triggers Sequence Select 5 */
 	{ 0x0000006B, 0x01FF },   /* R107   - Always On Triggers Sequence Select 6 */
-	{ 0x0000006E, 0x01FF },   /* R110   - Trigger Sequence Select 32 */
-	{ 0x0000006F, 0x01FF },   /* R111   - Trigger Sequence Select 33 */
 	{ 0x00000070, 0x0000 },   /* R112   - Comfort Noise Generator */ 
 	{ 0x00000090, 0x0000 },   /* R144   - Haptics Control 1 */ 
 	{ 0x00000091, 0x7FFF },   /* R145   - Haptics Control 2 */ 
@@ -300,7 +298,6 @@
 	{ 0x00000175, 0x0004 },   /* R373   - FLL1 Control 5 */ 
 	{ 0x00000176, 0x0000 },   /* R374   - FLL1 Control 6 */ 
 	{ 0x00000177, 0x0181 },   /* R375   - FLL1 Loop Filter Test 1 */ 
-	{ 0x00000178, 0x0000 },   /* R376   - FLL1 NCO Test 0 */
 	{ 0x00000179, 0x0000 },   /* R377   - FLL1 Control 7 */
 	{ 0x00000181, 0x0000 },   /* R385   - FLL1 Synchroniser 1 */ 
 	{ 0x00000182, 0x0000 },   /* R386   - FLL1 Synchroniser 2 */ 
@@ -318,7 +315,6 @@
 	{ 0x00000195, 0x0004 },   /* R405   - FLL2 Control 5 */ 
 	{ 0x00000196, 0x0000 },   /* R406   - FLL2 Control 6 */ 
 	{ 0x00000197, 0x0000 },   /* R407   - FLL2 Loop Filter Test 1 */ 
-	{ 0x00000198, 0x0000 },   /* R408   - FLL2 NCO Test 0 */
 	{ 0x00000199, 0x0000 },   /* R409   - FLL2 Control 7 */
 	{ 0x000001A1, 0x0000 },   /* R417   - FLL2 Synchroniser 1 */ 
 	{ 0x000001A2, 0x0000 },   /* R418   - FLL2 Synchroniser 2 */ 
@@ -338,12 +334,9 @@
 	{ 0x0000021A, 0x01A6 },   /* R538   - Mic Bias Ctrl 3 */ 
 	{ 0x00000293, 0x0000 },   /* R659   - Accessory Detect Mode 1 */ 
 	{ 0x0000029B, 0x0020 },   /* R667   - Headphone Detect 1 */ 
-	{ 0x0000029C, 0x0000 },   /* R668   - Headphone Detect 2 */
-	{ 0x0000029F, 0x0000 },   /* R671   - Headphone Detect Test */
 	{ 0x000002A2, 0x0000 },   /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },   /* R675   - Mic Detect 1 */ 
 	{ 0x000002A4, 0x009F },   /* R676   - Mic Detect 2 */ 
-	{ 0x000002A5, 0x0000 },   /* R677   - Mic Detect 3 */ 
 	{ 0x000002A6, 0x3737 },   /* R678   - Mic Detect Level 1 */
 	{ 0x000002A7, 0x372C },   /* R679   - Mic Detect Level 2 */
 	{ 0x000002A8, 0x1422 },   /* R680   - Mic Detect Level 3 */
@@ -887,11 +880,11 @@
 	{ 0x00000D1B, 0xFFFF },   /* R3355  - IRQ2 Status 4 Mask */ 
 	{ 0x00000D1C, 0xFFFF },   /* R3356  - IRQ2 Status 5 Mask */ 
 	{ 0x00000D1F, 0x0000 },   /* R3359  - IRQ2 Control */ 
+	{ 0x00000D41, 0x0000 },   /* R3393  - ADSP2 IRQ0 */
 	{ 0x00000D53, 0xFFFF },   /* R3411  - AOD IRQ Mask IRQ1 */ 
 	{ 0x00000D54, 0xFFFF },   /* R3412  - AOD IRQ Mask IRQ2 */ 
 	{ 0x00000D56, 0x0000 },   /* R3414  - Jack detect debounce */ 
 	{ 0x00000E00, 0x0000 },   /* R3584  - FX_Ctrl1 */ 
-	{ 0x00000E01, 0x0000 },   /* R3585  - FX_Ctrl2 */ 
 	{ 0x00000E10, 0x6318 },   /* R3600  - EQ1_1 */ 
 	{ 0x00000E11, 0x6300 },   /* R3601  - EQ1_2 */ 
 	{ 0x00000E12, 0x0FC8 },   /* R3602  - EQ1_3 */ 
@@ -991,6 +984,7 @@
 	{ 0x00000ECD, 0x0000 },   /* R3789  - HPLPF4_2 */ 
 	{ 0x00000EE0, 0x0000 },   /* R3808  - ASRC_ENABLE */ 
 	{ 0x00000EE2, 0x0000 },   /* R3810  - ASRC_RATE1 */ 
+	{ 0x00000EE3, 0x0400 },   /* R3811  - ASRC_RATE2 */
 	{ 0x00000EF0, 0x0000 },   /* R3824  - ISRC 1 CTRL 1 */ 
 	{ 0x00000EF1, 0x0000 },   /* R3825  - ISRC 1 CTRL 2 */ 
 	{ 0x00000EF2, 0x0000 },   /* R3826  - ISRC 1 CTRL 3 */ 
@@ -998,7 +992,6 @@
 	{ 0x00000EF4, 0x0000 },   /* R3828  - ISRC 2 CTRL 2 */ 
 	{ 0x00000EF5, 0x0000 },   /* R3829  - ISRC 2 CTRL 3 */ 
 	{ 0x00001100, 0x0010 },   /* R4352  - DSP1 Control 1 */ 
-	{ 0x00001101, 0x0000 },   /* R4353  - DSP1 Clocking 1 */ 
 };
 
 static bool wm5102_readable_register(struct device *dev, unsigned int reg)
@@ -1008,12 +1001,10 @@
 	case ARIZONA_DEVICE_REVISION:
 	case ARIZONA_CTRL_IF_SPI_CFG_1:
 	case ARIZONA_CTRL_IF_I2C1_CFG_1:
-	case ARIZONA_CTRL_IF_STATUS_1:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_3:
-	case ARIZONA_WRITE_SEQUENCER_PROM:
 	case ARIZONA_TONE_GENERATOR_1:
 	case ARIZONA_TONE_GENERATOR_2:
 	case ARIZONA_TONE_GENERATOR_3:
@@ -1034,8 +1025,6 @@
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
-	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7:
-	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8:
 	case ARIZONA_COMFORT_NOISE_GENERATOR:
 	case ARIZONA_HAPTICS_CONTROL_1:
 	case ARIZONA_HAPTICS_CONTROL_2:
@@ -1176,7 +1165,6 @@
 	case ARIZONA_DAC_DIGITAL_VOLUME_4L:
 	case ARIZONA_OUT_VOLUME_4L:
 	case ARIZONA_NOISE_GATE_SELECT_4L:
-	case ARIZONA_OUTPUT_PATH_CONFIG_4R:
 	case ARIZONA_DAC_DIGITAL_VOLUME_4R:
 	case ARIZONA_OUT_VOLUME_4R:
 	case ARIZONA_NOISE_GATE_SELECT_4R:
@@ -1184,7 +1172,6 @@
 	case ARIZONA_DAC_DIGITAL_VOLUME_5L:
 	case ARIZONA_DAC_VOLUME_LIMIT_5L:
 	case ARIZONA_NOISE_GATE_SELECT_5L:
-	case ARIZONA_OUTPUT_PATH_CONFIG_5R:
 	case ARIZONA_DAC_DIGITAL_VOLUME_5R:
 	case ARIZONA_DAC_VOLUME_LIMIT_5R:
 	case ARIZONA_NOISE_GATE_SELECT_5R:
@@ -1195,8 +1182,6 @@
 	case ARIZONA_NOISE_GATE_CONTROL:
 	case ARIZONA_PDM_SPK1_CTRL_1:
 	case ARIZONA_PDM_SPK1_CTRL_2:
-	case ARIZONA_SPK_CTRL_2:
-	case ARIZONA_SPK_CTRL_3:
 	case ARIZONA_DAC_COMP_1:
 	case ARIZONA_DAC_COMP_2:
 	case ARIZONA_DAC_COMP_3:
@@ -1228,7 +1213,6 @@
 	case ARIZONA_AIF1_FRAME_CTRL_18:
 	case ARIZONA_AIF1_TX_ENABLES:
 	case ARIZONA_AIF1_RX_ENABLES:
-	case ARIZONA_AIF1_FORCE_WRITE:
 	case ARIZONA_AIF2_BCLK_CTRL:
 	case ARIZONA_AIF2_TX_PIN_CTRL:
 	case ARIZONA_AIF2_RX_PIN_CTRL:
@@ -1244,7 +1228,6 @@
 	case ARIZONA_AIF2_FRAME_CTRL_12:
 	case ARIZONA_AIF2_TX_ENABLES:
 	case ARIZONA_AIF2_RX_ENABLES:
-	case ARIZONA_AIF2_FORCE_WRITE:
 	case ARIZONA_AIF3_BCLK_CTRL:
 	case ARIZONA_AIF3_TX_PIN_CTRL:
 	case ARIZONA_AIF3_RX_PIN_CTRL:
@@ -1260,7 +1243,6 @@
 	case ARIZONA_AIF3_FRAME_CTRL_12:
 	case ARIZONA_AIF3_TX_ENABLES:
 	case ARIZONA_AIF3_RX_ENABLES:
-	case ARIZONA_AIF3_FORCE_WRITE:
 	case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
 	case ARIZONA_SLIMBUS_RATES_1:
 	case ARIZONA_SLIMBUS_RATES_2:
@@ -1586,22 +1568,6 @@
 	case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
 	case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
 	case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
 	case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
 	case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
 	case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
@@ -1810,11 +1776,6 @@
 	case ARIZONA_DRC1_CTRL3:
 	case ARIZONA_DRC1_CTRL4:
 	case ARIZONA_DRC1_CTRL5:
-	case ARIZONA_DRC2_CTRL1:
-	case ARIZONA_DRC2_CTRL2:
-	case ARIZONA_DRC2_CTRL3:
-	case ARIZONA_DRC2_CTRL4:
-	case ARIZONA_DRC2_CTRL5:
 	case ARIZONA_HPLPF1_1:
 	case ARIZONA_HPLPF1_2:
 	case ARIZONA_HPLPF2_1:
@@ -1832,9 +1793,6 @@
 	case ARIZONA_ISRC_2_CTRL_1:
 	case ARIZONA_ISRC_2_CTRL_2:
 	case ARIZONA_ISRC_2_CTRL_3:
-	case ARIZONA_ISRC_3_CTRL_1:
-	case ARIZONA_ISRC_3_CTRL_2:
-	case ARIZONA_ISRC_3_CTRL_3:
 	case ARIZONA_DSP1_CONTROL_1:
 	case ARIZONA_DSP1_CLOCKING_1:
 	case ARIZONA_DSP1_STATUS_1:
@@ -1883,7 +1841,6 @@
 	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_3:
 	case ARIZONA_OUTPUT_STATUS_1:
-	case ARIZONA_RAW_OUTPUT_STATUS_1:
 	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
 	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
 	case ARIZONA_SAMPLE_RATE_1_STATUS:
@@ -1969,6 +1926,8 @@
 	.reg_bits = 32,
 	.pad_bits = 16,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5102_MAX_REGISTER,
 	.readable_reg = wm5102_readable_register,
@@ -1983,6 +1942,8 @@
 const struct regmap_config wm5102_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5102_MAX_REGISTER,
 	.readable_reg = wm5102_readable_register,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 12cad94..c4b9374 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -21,7 +21,7 @@
 #define WM5110_NUM_AOD_ISR 2
 #define WM5110_NUM_ISR 5
 
-static const struct reg_default wm5110_reva_patch[] = {
+static const struct reg_sequence wm5110_reva_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x44, 0x20 },
 	{ 0x45, 0x40 },
@@ -134,7 +134,7 @@
 	{ 0x209, 0x002A },
 };
 
-static const struct reg_default wm5110_revb_patch[] = {
+static const struct reg_sequence wm5110_revb_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x36e, 0x0210 },
 	{ 0x370, 0x0210 },
@@ -224,7 +224,7 @@
 	{ 0x80, 0x0 },
 };
 
-static const struct reg_default wm5110_revd_patch[] = {
+static const struct reg_sequence wm5110_revd_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x80, 0x3 },
 	{ 0x393, 0x27 },
@@ -249,6 +249,16 @@
 	{ 0x80, 0x0 },
 };
 
+/* Add extra headphone write sequence locations */
+static const struct reg_default wm5110_reve_patch[] = {
+	{ 0x80, 0x3 },
+	{ 0x80, 0x3 },
+	{ 0x4b, 0x138 },
+	{ 0x4c, 0x13d },
+	{ 0x80, 0x0 },
+	{ 0x80, 0x0 },
+};
+
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5110_patch(struct arizona *arizona)
 {
@@ -266,7 +276,9 @@
 					     wm5110_revd_patch,
 					     ARRAY_SIZE(wm5110_revd_patch));
 	default:
-		return 0;
+		return regmap_register_patch(arizona->regmap,
+					     wm5110_reve_patch,
+					     ARRAY_SIZE(wm5110_reve_patch));
 	}
 }
 EXPORT_SYMBOL_GPL(wm5110_patch);
@@ -676,6 +688,7 @@
 	{ 0x00000032, 0x0100 },    /* R50    - PWM Drive 3 */
 	{ 0x00000040, 0x0000 },    /* R64    - Wake control */
 	{ 0x00000041, 0x0000 },    /* R65    - Sequence control */
+	{ 0x00000042, 0x0000 },    /* R66    - Spare Triggers */
 	{ 0x00000061, 0x01FF },    /* R97    - Sample Rate Sequence Select 1 */
 	{ 0x00000062, 0x01FF },    /* R98    - Sample Rate Sequence Select 2 */
 	{ 0x00000063, 0x01FF },    /* R99    - Sample Rate Sequence Select 3 */
@@ -754,11 +767,9 @@
 	{ 0x0000021A, 0x01A6 },    /* R538   - Mic Bias Ctrl 3 */
 	{ 0x00000293, 0x0000 },    /* R659   - Accessory Detect Mode 1 */
 	{ 0x0000029B, 0x0028 },    /* R667   - Headphone Detect 1 */
-	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
 	{ 0x000002A2, 0x0000 },    /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
-	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
 	{ 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
 	{ 0x000002A7, 0x372C },    /* R679   - Mic Detect Level 2 */
 	{ 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
@@ -848,8 +859,6 @@
 	{ 0x00000440, 0x8FFF },    /* R1088  - DRE Enable */
 	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
 	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
-	{ 0x00000480, 0x0040 },    /* R1152  - Class W ANC Threshold 1 */
-	{ 0x00000481, 0x0040 },    /* R1153  - Class W ANC Threshold 2 */
 	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
 	{ 0x00000491, 0x0000 },    /* R1169  - PDM SPK1 CTRL 2 */
 	{ 0x00000492, 0x0069 },    /* R1170  - PDM SPK2 CTRL 1 */
@@ -1508,7 +1517,6 @@
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
 	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
-	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
 	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
 	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
 	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
@@ -1625,14 +1633,9 @@
 	{ 0x00000F00, 0x0000 },    /* R3840  - Clock Control */
 	{ 0x00000F01, 0x0000 },    /* R3841  - ANC_SRC */
 	{ 0x00001100, 0x0010 },    /* R4352  - DSP1 Control 1 */
-	{ 0x00001101, 0x0000 },    /* R4353  - DSP1 Clocking 1 */
 	{ 0x00001200, 0x0010 },    /* R4608  - DSP2 Control 1 */
-	{ 0x00001201, 0x0000 },    /* R4609  - DSP2 Clocking 1 */
 	{ 0x00001300, 0x0010 },    /* R4864  - DSP3 Control 1 */
-	{ 0x00001301, 0x0000 },    /* R4865  - DSP3 Clocking 1 */
 	{ 0x00001400, 0x0010 },    /* R5120  - DSP4 Control 1 */
-	{ 0x00001401, 0x0000 },    /* R5121  - DSP4 Clocking 1 */
-	{ 0x00001404, 0x0000 },    /* R5124  - DSP4 Status 1 */
 };
 
 static bool wm5110_is_rev_b_adsp_memory(unsigned int reg)
@@ -1716,6 +1719,7 @@
 	case ARIZONA_PWM_DRIVE_3:
 	case ARIZONA_WAKE_CONTROL:
 	case ARIZONA_SEQUENCE_CONTROL:
+	case ARIZONA_SPARE_TRIGGERS:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
@@ -3007,6 +3011,8 @@
 	.reg_bits = 32,
 	.pad_bits = 16,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5110_MAX_REGISTER,
 	.readable_reg = wm5110_readable_register,
@@ -3021,6 +3027,8 @@
 const struct regmap_config wm5110_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5110_MAX_REGISTER,
 	.readable_reg = wm5110_readable_register,
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index a4cbefe..824bcba 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -93,7 +93,6 @@
 static struct i2c_driver wm831x_i2c_driver = {
 	.driver = {
 		.name = "wm831x",
-		.owner = THIS_MODULE,
 		.pm = &wm831x_pm_ops,
 	},
 	.probe = wm831x_i2c_probe,
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 3da8126..dfea8b9 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -552,14 +552,7 @@
 	irq_set_chip_data(virq, h->host_data);
 	irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 6a16a8a..9358f03 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -69,7 +69,6 @@
 static struct i2c_driver wm8350_i2c_driver = {
 	.driver = {
 		   .name = "wm8350",
-		   .owner = THIS_MODULE,
 	},
 	.probe = wm8350_i2c_probe,
 	.remove = wm8350_i2c_remove,
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 813ff50..27054f3 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -526,13 +526,7 @@
 					 handle_edge_irq);
 		irq_set_nested_thread(cur_irq, 1);
 
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+		irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index c6fb5d1..3bd44a4 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -194,7 +194,6 @@
 static struct i2c_driver wm8400_i2c_driver = {
 	.driver = {
 		.name = "WM8400",
-		.owner = THIS_MODULE,
 	},
 	.probe    = wm8400_i2c_probe,
 	.remove   = wm8400_i2c_remove,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 53ae5af..7eec619 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -243,21 +243,21 @@
 }
 #endif
 
-static const struct reg_default wm8994_revc_patch[] = {
+static const struct reg_sequence wm8994_revc_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0x56, 0x3 },
 	{ 0x817, 0x0 },
 	{ 0x102, 0x0 },
 };
 
-static const struct reg_default wm8958_reva_patch[] = {
+static const struct reg_sequence wm8958_reva_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0xcb, 0x81 },
 	{ 0x817, 0x0 },
 	{ 0x102, 0x0 },
 };
 
-static const struct reg_default wm1811_reva_patch[] = {
+static const struct reg_sequence wm1811_reva_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0x56, 0xc07 },
 	{ 0x5d, 0x7e },
@@ -326,7 +326,7 @@
 {
 	struct wm8994_pdata *pdata;
 	struct regmap_config *regmap_config;
-	const struct reg_default *regmap_patch = NULL;
+	const struct reg_sequence *regmap_patch = NULL;
 	const char *devname;
 	int ret, i, patch_regs = 0;
 	int pulls = 0;
@@ -677,7 +677,6 @@
 static struct i2c_driver wm8994_i2c_driver = {
 	.driver = {
 		.name = "wm8994",
-		.owner = THIS_MODULE,
 		.pm = &wm8994_pm_ops,
 		.of_match_table = of_match_ptr(wm8994_of_match),
 	},
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 55c380a..18710f3 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -172,14 +172,7 @@
 	irq_set_chip_data(virq, wm8994);
 	irq_set_chip_and_handler(virq, &wm8994_edge_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -193,7 +186,7 @@
 {
 	int ret;
 	unsigned long irqflags;
-	struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
+	struct wm8994_pdata *pdata = &wm8994->pdata;
 
 	if (!wm8994->irq) {
 		dev_warn(wm8994->dev,
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index 300e9b6..c56b160 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -19,7 +19,7 @@
 
 #include "wm8994.h"
 
-static struct reg_default wm1811_defaults[] = {
+static const struct reg_default wm1811_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1    - Power Management (1) */
 	{ 0x0002, 0x6000 },    /* R2    - Power Management (2) */
 	{ 0x0003, 0x0000 },    /* R3    - Power Management (3) */
@@ -251,7 +251,7 @@
 	{ 0x0748, 0x003F },    /* R1864 - IRQ Debounce */
 };
 
-static struct reg_default wm8994_defaults[] = {
+static const struct reg_default wm8994_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1     - Power Management (1) */ 
 	{ 0x0002, 0x6000 },    /* R2     - Power Management (2) */ 
 	{ 0x0003, 0x0000 },    /* R3     - Power Management (3) */ 
@@ -470,7 +470,7 @@
 	{ 0x0748, 0x003F },    /* R1864  - IRQ Debounce */ 
 };
 
-static struct reg_default wm8958_defaults[] = {
+static const struct reg_default wm8958_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1     - Power Management (1) */
 	{ 0x0002, 0x6000 },    /* R2     - Power Management (2) */
 	{ 0x0003, 0x0000 },    /* R3     - Power Management (3) */
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index c0c25d75..ca41a56 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -17,7 +17,7 @@
 
 #include "arizona.h"
 
-static const struct reg_default wm8997_reva_patch[] = {
+static const struct reg_sequence wm8997_reva_patch[] = {
 	{ 0x80, 0x0003 },
 	{ 0x214, 0x0008 },
 	{ 0x458, 0x0000 },
@@ -243,7 +243,6 @@
 	{ 0x0000029B, 0x0020 },    /* R667   - Headphone Detect 1 */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
-	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
 	{ 0x000002C3, 0x0000 },    /* R707   - Mic noise mix control 1 */
 	{ 0x000002CB, 0x0000 },    /* R715   - Isolation control */
 	{ 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
@@ -684,7 +683,6 @@
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
 	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
-	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
 	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
 	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
 	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
@@ -788,8 +786,6 @@
 	{ 0x00000EF3, 0x0000 },    /* R3827  - ISRC 2 CTRL 1 */
 	{ 0x00000EF4, 0x0000 },    /* R3828  - ISRC 2 CTRL 2 */
 	{ 0x00000EF5, 0x0000 },    /* R3829  - ISRC 2 CTRL 3 */
-	{ 0x00001100, 0x0010 },    /* R4352  - DSP1 Control 1 */
-	{ 0x00001101, 0x0000 },    /* R4353  - DSP1 Clocking 1 */
 };
 
 static bool wm8997_readable_register(struct device *dev, unsigned int reg)
@@ -1480,6 +1476,8 @@
 	case ARIZONA_SAMPLE_RATE_2_STATUS:
 	case ARIZONA_SAMPLE_RATE_3_STATUS:
 	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_FLL1_NCO_TEST_0:
+	case ARIZONA_FLL2_NCO_TEST_0:
 	case ARIZONA_MIC_DETECT_3:
 	case ARIZONA_HP_CTRL_1L:
 	case ARIZONA_HP_CTRL_1R:
@@ -1521,6 +1519,8 @@
 const struct regmap_config wm8997_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM8997_MAX_REGISTER,
 	.readable_reg = wm8997_readable_register,
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
new file mode 100644
index 0000000..e6de3cd
--- /dev/null
+++ b/drivers/mfd/wm8998-tables.c
@@ -0,0 +1,1594 @@
+/*
+ * wm8998-tables.c  --  data tables for wm8998-class codecs
+ *
+ * Copyright 2014 Wolfson Microelectronics plc
+ *
+ * Author: Richard Fitzgerald <rf@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+#include <linux/device.h>
+
+#include "arizona.h"
+
+#define WM8998_NUM_AOD_ISR 2
+#define WM8998_NUM_ISR 5
+
+static const struct reg_default wm8998_rev_a_patch[] = {
+	{ 0x0212, 0x0000 },
+	{ 0x0211, 0x0014 },
+	{ 0x04E4, 0x0E0D },
+	{ 0x04E5, 0x0E0D },
+	{ 0x04E6, 0x0E0D },
+	{ 0x04EB, 0x060E },
+	{ 0x0441, 0xC759 },
+	{ 0x0442, 0x2A08 },
+	{ 0x0443, 0x5CFA },
+	{ 0x026E, 0x0064 },
+	{ 0x026F, 0x00EA },
+	{ 0x0270, 0x1F16 },
+	{ 0x0410, 0x2080 },
+	{ 0x0418, 0x2080 },
+	{ 0x0420, 0x2080 },
+	{ 0x04B8, 0x1120 },
+	{ 0x047E, 0x080E },
+	{ 0x0448, 0x03EF },
+};
+
+/* We use a function so we can use ARRAY_SIZE() */
+int wm8998_patch(struct arizona *arizona)
+{
+	return regmap_register_patch(arizona->regmap,
+				     wm8998_rev_a_patch,
+				     ARRAY_SIZE(wm8998_rev_a_patch));
+}
+
+static const struct regmap_irq wm8998_aod_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+		.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+	},
+	[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+		.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+	},
+	[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
+	[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
+	[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
+	[ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
+};
+
+struct regmap_irq_chip wm8998_aod = {
+	.name = "wm8998 AOD",
+	.status_base = ARIZONA_AOD_IRQ1,
+	.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
+	.ack_base = ARIZONA_AOD_IRQ1,
+	.wake_base = ARIZONA_WAKE_CONTROL,
+	.wake_invert = 1,
+	.num_regs = 1,
+	.irqs = wm8998_aod_irqs,
+	.num_irqs = ARRAY_SIZE(wm8998_aod_irqs),
+};
+
+static const struct regmap_irq wm8998_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+	[ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+	[ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+	[ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+	[ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+		.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
+	},
+	[ARIZONA_IRQ_SPK_OVERHEAT] = {
+		.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
+	},
+	[ARIZONA_IRQ_HPDET] = {
+		.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+	},
+	[ARIZONA_IRQ_MICDET] = {
+		.reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+	},
+	[ARIZONA_IRQ_WSEQ_DONE] = {
+		.reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+	},
+	[ARIZONA_IRQ_DRC1_SIG_DET] = {
+		.reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+	},
+	[ARIZONA_IRQ_ASRC2_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_ASRC1_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_UNDERCLOCKED] = {
+		.reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+	},
+	[ARIZONA_IRQ_OVERCLOCKED] = {
+		.reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+	},
+	[ARIZONA_IRQ_FLL2_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_FLL1_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_CLKGEN_ERR] = {
+		.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+	},
+	[ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+		.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+	},
+
+	[ARIZONA_IRQ_ASRC_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF3_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF2_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF1_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
+	},
+	[ARIZONA_IRQ_CTRLIF_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
+	},
+	[ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+		.reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
+	},
+	[ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+		.reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
+	},
+	[ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+		.reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
+	},
+	[ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
+	},
+	[ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
+	},
+
+	[ARIZONA_IRQ_BOOT_DONE] = {
+		.reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+	},
+	[ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+		.reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+	},
+	[ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+		.reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+	},
+};
+
+struct regmap_irq_chip wm8998_irq = {
+	.name = "wm8998 IRQ",
+	.status_base = ARIZONA_INTERRUPT_STATUS_1,
+	.mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+	.ack_base = ARIZONA_INTERRUPT_STATUS_1,
+	.num_regs = 5,
+	.irqs = wm8998_irqs,
+	.num_irqs = ARRAY_SIZE(wm8998_irqs),
+};
+
+static const struct reg_default wm8998_reg_default[] = {
+	{ 0x00000009, 0x0001 },    /* R9     - Ctrl IF I2C1 CFG 1 */
+	{ 0x0000000B, 0x001A },    /* R11    - Ctrl IF I2C1 CFG 2 */
+	{ 0x00000020, 0x0000 },    /* R32    - Tone Generator 1 */
+	{ 0x00000021, 0x1000 },    /* R33    - Tone Generator 2 */
+	{ 0x00000022, 0x0000 },    /* R34    - Tone Generator 3 */
+	{ 0x00000023, 0x1000 },    /* R35    - Tone Generator 4 */
+	{ 0x00000024, 0x0000 },    /* R36    - Tone Generator 5 */
+	{ 0x00000030, 0x0000 },    /* R48    - PWM Drive 1 */
+	{ 0x00000031, 0x0100 },    /* R49    - PWM Drive 2 */
+	{ 0x00000032, 0x0100 },    /* R50    - PWM Drive 3 */
+	{ 0x00000040, 0x0000 },    /* R64    - Wake control */
+	{ 0x00000041, 0x0000 },    /* R65    - Sequence control */
+	{ 0x00000061, 0x01FF },    /* R97    - Sample Rate Sequence Select 1 */
+	{ 0x00000062, 0x01FF },    /* R98    - Sample Rate Sequence Select 2 */
+	{ 0x00000063, 0x01FF },    /* R99    - Sample Rate Sequence Select 3 */
+	{ 0x00000064, 0x01FF },    /* R100   - Sample Rate Sequence Select 4 */
+	{ 0x00000066, 0x01FF },    /* R102   - Always On Triggers Sequence Select 1 */
+	{ 0x00000067, 0x01FF },    /* R103   - Always On Triggers Sequence Select 2 */
+	{ 0x00000068, 0x01FF },    /* R104   - Always On Triggers Sequence Select 3 */
+	{ 0x00000069, 0x01FF },    /* R105   - Always On Triggers Sequence Select 4 */
+	{ 0x0000006A, 0x01FF },    /* R106   - Always On Triggers Sequence Select 5 */
+	{ 0x0000006B, 0x01FF },    /* R107   - Always On Triggers Sequence Select 6 */
+	{ 0x0000006E, 0x01FF },    /* R110   - Trigger Sequence Select 32 */
+	{ 0x0000006F, 0x01FF },    /* R111   - Trigger Sequence Select 33 */
+	{ 0x00000090, 0x0000 },    /* R144   - Haptics Control 1 */
+	{ 0x00000091, 0x7FFF },    /* R145   - Haptics Control 2 */
+	{ 0x00000092, 0x0000 },    /* R146   - Haptics phase 1 intensity */
+	{ 0x00000093, 0x0000 },    /* R147   - Haptics phase 1 duration */
+	{ 0x00000094, 0x0000 },    /* R148   - Haptics phase 2 intensity */
+	{ 0x00000095, 0x0000 },    /* R149   - Haptics phase 2 duration */
+	{ 0x00000096, 0x0000 },    /* R150   - Haptics phase 3 intensity */
+	{ 0x00000097, 0x0000 },    /* R151   - Haptics phase 3 duration */
+	{ 0x00000100, 0x0002 },    /* R256   - Clock 32k 1 */
+	{ 0x00000101, 0x0304 },    /* R257   - System Clock 1 */
+	{ 0x00000102, 0x0011 },    /* R258   - Sample rate 1 */
+	{ 0x00000103, 0x0011 },    /* R259   - Sample rate 2 */
+	{ 0x00000104, 0x0011 },    /* R260   - Sample rate 3 */
+	{ 0x00000112, 0x0305 },    /* R274   - Async clock 1 */
+	{ 0x00000113, 0x0011 },    /* R275   - Async sample rate 1 */
+	{ 0x00000114, 0x0011 },    /* R276   - Async sample rate 2 */
+	{ 0x00000149, 0x0000 },    /* R329   - Output system clock */
+	{ 0x0000014A, 0x0000 },    /* R330   - Output async clock */
+	{ 0x00000152, 0x0000 },    /* R338   - Rate Estimator 1 */
+	{ 0x00000153, 0x0000 },    /* R339   - Rate Estimator 2 */
+	{ 0x00000154, 0x0000 },    /* R340   - Rate Estimator 3 */
+	{ 0x00000155, 0x0000 },    /* R341   - Rate Estimator 4 */
+	{ 0x00000156, 0x0000 },    /* R342   - Rate Estimator 5 */
+	{ 0x00000161, 0x0000 },    /* R353   - Dynamic Frequency Scaling 1 */
+	{ 0x00000171, 0x0002 },    /* R369   - FLL1 Control 1 */
+	{ 0x00000172, 0x0008 },    /* R370   - FLL1 Control 2 */
+	{ 0x00000173, 0x0018 },    /* R371   - FLL1 Control 3 */
+	{ 0x00000174, 0x007D },    /* R372   - FLL1 Control 4 */
+	{ 0x00000175, 0x0004 },    /* R373   - FLL1 Control 5 */
+	{ 0x00000176, 0x0000 },    /* R374   - FLL1 Control 6 */
+	{ 0x00000177, 0x0181 },    /* R375   - FLL1 Loop Filter Test 1 */
+	{ 0x00000178, 0x0000 },    /* R376   - FLL1 NCO Test 0 */
+	{ 0x00000179, 0x0000 },    /* R377   - FLL1 Control 7 */
+	{ 0x00000181, 0x0000 },    /* R385   - FLL1 Synchroniser 1 */
+	{ 0x00000182, 0x0000 },    /* R386   - FLL1 Synchroniser 2 */
+	{ 0x00000183, 0x0000 },    /* R387   - FLL1 Synchroniser 3 */
+	{ 0x00000184, 0x0000 },    /* R388   - FLL1 Synchroniser 4 */
+	{ 0x00000185, 0x0000 },    /* R389   - FLL1 Synchroniser 5 */
+	{ 0x00000186, 0x0000 },    /* R390   - FLL1 Synchroniser 6 */
+	{ 0x00000187, 0x0001 },    /* R391   - FLL1 Synchroniser 7 */
+	{ 0x00000189, 0x0000 },    /* R393   - FLL1 Spread Spectrum */
+	{ 0x0000018A, 0x0004 },    /* R394   - FLL1 GPIO Clock */
+	{ 0x00000191, 0x0000 },    /* R401   - FLL2 Control 1 */
+	{ 0x00000192, 0x0008 },    /* R402   - FLL2 Control 2 */
+	{ 0x00000193, 0x0018 },    /* R403   - FLL2 Control 3 */
+	{ 0x00000194, 0x007D },    /* R404   - FLL2 Control 4 */
+	{ 0x00000195, 0x0004 },    /* R405   - FLL2 Control 5 */
+	{ 0x00000196, 0x0000 },    /* R406   - FLL2 Control 6 */
+	{ 0x00000197, 0x0000 },    /* R407   - FLL2 Loop Filter Test 1 */
+	{ 0x00000198, 0x0000 },    /* R408   - FLL2 NCO Test 0 */
+	{ 0x00000199, 0x0000 },    /* R409   - FLL2 Control 7 */
+	{ 0x000001A1, 0x0000 },    /* R417   - FLL2 Synchroniser 1 */
+	{ 0x000001A2, 0x0000 },    /* R418   - FLL2 Synchroniser 2 */
+	{ 0x000001A3, 0x0000 },    /* R419   - FLL2 Synchroniser 3 */
+	{ 0x000001A4, 0x0000 },    /* R420   - FLL2 Synchroniser 4 */
+	{ 0x000001A5, 0x0000 },    /* R421   - FLL2 Synchroniser 5 */
+	{ 0x000001A6, 0x0000 },    /* R422   - FLL2 Synchroniser 6 */
+	{ 0x000001A7, 0x0001 },    /* R423   - FLL2 Synchroniser 7 */
+	{ 0x000001A9, 0x0000 },    /* R425   - FLL2 Spread Spectrum */
+	{ 0x000001AA, 0x0004 },    /* R426   - FLL2 GPIO Clock */
+	{ 0x00000200, 0x0006 },    /* R512   - Mic Charge Pump 1 */
+	{ 0x00000210, 0x00D4 },    /* R528   - LDO1 Control 1 */
+	{ 0x00000212, 0x0000 },    /* R530   - LDO1 Control 2 */
+	{ 0x00000213, 0x0344 },    /* R531   - LDO2 Control 1 */
+	{ 0x00000218, 0x01A6 },    /* R536   - Mic Bias Ctrl 1 */
+	{ 0x00000219, 0x01A6 },    /* R537   - Mic Bias Ctrl 2 */
+	{ 0x0000021A, 0x01A6 },    /* R538   - Mic Bias Ctrl 3 */
+	{ 0x00000293, 0x0080 },    /* R659   - Accessory Detect Mode 1 */
+	{ 0x0000029B, 0x0000 },    /* R667   - Headphone Detect 1 */
+	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
+	{ 0x000002A2, 0x0000 },    /* R674   - Micd Clamp control */
+	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
+	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
+	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
+	{ 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
+	{ 0x000002A7, 0x2C37 },    /* R679   - Mic Detect Level 2 */
+	{ 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
+	{ 0x000002A9, 0x030A },    /* R681   - Mic Detect Level 4 */
+	{ 0x000002AB, 0x0000 },    /* R683   - Mic Detect 4 */
+	{ 0x000002CB, 0x0000 },    /* R715   - Isolation control */
+	{ 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
+	{ 0x00000300, 0x0000 },    /* R768   - Input Enables */
+	{ 0x00000308, 0x0000 },    /* R776   - Input Rate */
+	{ 0x00000309, 0x0022 },    /* R777   - Input Volume Ramp */
+	{ 0x0000030C, 0x0002 },    /* R780   - HPF Control */
+	{ 0x00000310, 0x2080 },    /* R784   - IN1L Control */
+	{ 0x00000311, 0x0180 },    /* R785   - ADC Digital Volume 1L */
+	{ 0x00000312, 0x0000 },    /* R786   - DMIC1L Control */
+	{ 0x00000314, 0x0080 },    /* R788   - IN1R Control */
+	{ 0x00000315, 0x0180 },    /* R789   - ADC Digital Volume 1R */
+	{ 0x00000316, 0x0000 },    /* R790   - DMIC1R Control */
+	{ 0x00000318, 0x2080 },    /* R792   - IN2L Control */
+	{ 0x00000319, 0x0180 },    /* R793   - ADC Digital Volume 2L */
+	{ 0x0000031A, 0x0000 },    /* R794   - DMIC2L Control */
+	{ 0x00000400, 0x0000 },    /* R1024  - Output Enables 1 */
+	{ 0x00000408, 0x0000 },    /* R1032  - Output Rate 1 */
+	{ 0x00000409, 0x0022 },    /* R1033  - Output Volume Ramp */
+	{ 0x00000410, 0x2080 },    /* R1040  - Output Path Config 1L */
+	{ 0x00000411, 0x0180 },    /* R1041  - DAC Digital Volume 1L */
+	{ 0x00000413, 0x0001 },    /* R1043  - Noise Gate Select 1L */
+	{ 0x00000414, 0x0080 },    /* R1044  - Output Path Config 1R */
+	{ 0x00000415, 0x0180 },    /* R1045  - DAC Digital Volume 1R */
+	{ 0x00000417, 0x0002 },    /* R1047  - Noise Gate Select 1R */
+	{ 0x00000418, 0x2080 },    /* R1048  - Output Path Config 2L */
+	{ 0x00000419, 0x0180 },    /* R1049  - DAC Digital Volume 2L */
+	{ 0x0000041B, 0x0004 },    /* R1051  - Noise Gate Select 2L */
+	{ 0x0000041C, 0x0080 },    /* R1052  - Output Path Config 2R */
+	{ 0x0000041D, 0x0180 },    /* R1053  - DAC Digital Volume 2R */
+	{ 0x0000041F, 0x0008 },    /* R1055  - Noise Gate Select 2R */
+	{ 0x00000420, 0x2080 },    /* R1056  - Output Path Config 3L */
+	{ 0x00000421, 0x0180 },    /* R1057  - DAC Digital Volume 3L */
+	{ 0x00000423, 0x0010 },    /* R1059  - Noise Gate Select 3L */
+	{ 0x00000428, 0x0000 },    /* R1064  - Output Path Config 4L */
+	{ 0x00000429, 0x0180 },    /* R1065  - DAC Digital Volume 4L */
+	{ 0x0000042B, 0x0040 },    /* R1067  - Noise Gate Select 4L */
+	{ 0x0000042C, 0x0000 },    /* R1068  - Output Path Config 4R */
+	{ 0x0000042D, 0x0180 },    /* R1069  - DAC Digital Volume 4R */
+	{ 0x0000042F, 0x0080 },    /* R1071  - Noise Gate Select 4R */
+	{ 0x00000430, 0x0000 },    /* R1072  - Output Path Config 5L */
+	{ 0x00000431, 0x0180 },    /* R1073  - DAC Digital Volume 5L */
+	{ 0x00000433, 0x0100 },    /* R1075  - Noise Gate Select 5L */
+	{ 0x00000434, 0x0000 },    /* R1076  - Output Path Config 5R */
+	{ 0x00000435, 0x0180 },    /* R1077  - DAC Digital Volume 5R */
+	{ 0x00000437, 0x0200 },    /* R1079  - Noise Gate Select 5R */
+	{ 0x00000440, 0x8FFF },    /* R1088  - DRE Enable */
+	{ 0x00000441, 0xC759 },    /* R1089  - DRE Control 1 */
+	{ 0x00000442, 0x2A08 },    /* R1089  - DRE Control 2 */
+	{ 0x00000443, 0x5CFA },    /* R1089  - DRE Control 3 */
+	{ 0x00000448, 0x03EF },    /* R1096  - EDRE Enable */
+	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
+	{ 0x00000451, 0x0000 },    /* R1105  - DAC AEC Control 2 */
+	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
+	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
+	{ 0x00000491, 0x0000 },    /* R1169  - PDM SPK1 CTRL 2 */
+	{ 0x0000049A, 0x0000 },    /* R1178  - HP_TEST_CTRL_13 */
+	{ 0x00000500, 0x000C },    /* R1280  - AIF1 BCLK Ctrl */
+	{ 0x00000501, 0x0008 },    /* R1281  - AIF1 Tx Pin Ctrl */
+	{ 0x00000502, 0x0000 },    /* R1282  - AIF1 Rx Pin Ctrl */
+	{ 0x00000503, 0x0000 },    /* R1283  - AIF1 Rate Ctrl */
+	{ 0x00000504, 0x0000 },    /* R1284  - AIF1 Format */
+	{ 0x00000506, 0x0040 },    /* R1286  - AIF1 Rx BCLK Rate */
+	{ 0x00000507, 0x1818 },    /* R1287  - AIF1 Frame Ctrl 1 */
+	{ 0x00000508, 0x1818 },    /* R1288  - AIF1 Frame Ctrl 2 */
+	{ 0x00000509, 0x0000 },    /* R1289  - AIF1 Frame Ctrl 3 */
+	{ 0x0000050A, 0x0001 },    /* R1290  - AIF1 Frame Ctrl 4 */
+	{ 0x0000050B, 0x0002 },    /* R1291  - AIF1 Frame Ctrl 5 */
+	{ 0x0000050C, 0x0003 },    /* R1292  - AIF1 Frame Ctrl 6 */
+	{ 0x0000050D, 0x0004 },    /* R1293  - AIF1 Frame Ctrl 7 */
+	{ 0x0000050E, 0x0005 },    /* R1294  - AIF1 Frame Ctrl 8 */
+	{ 0x00000511, 0x0000 },    /* R1297  - AIF1 Frame Ctrl 11 */
+	{ 0x00000512, 0x0001 },    /* R1298  - AIF1 Frame Ctrl 12 */
+	{ 0x00000513, 0x0002 },    /* R1299  - AIF1 Frame Ctrl 13 */
+	{ 0x00000514, 0x0003 },    /* R1300  - AIF1 Frame Ctrl 14 */
+	{ 0x00000515, 0x0004 },    /* R1301  - AIF1 Frame Ctrl 15 */
+	{ 0x00000516, 0x0005 },    /* R1302  - AIF1 Frame Ctrl 16 */
+	{ 0x00000519, 0x0000 },    /* R1305  - AIF1 Tx Enables */
+	{ 0x0000051A, 0x0000 },    /* R1306  - AIF1 Rx Enables */
+	{ 0x00000540, 0x000C },    /* R1344  - AIF2 BCLK Ctrl */
+	{ 0x00000541, 0x0008 },    /* R1345  - AIF2 Tx Pin Ctrl */
+	{ 0x00000542, 0x0000 },    /* R1346  - AIF2 Rx Pin Ctrl */
+	{ 0x00000543, 0x0000 },    /* R1347  - AIF2 Rate Ctrl */
+	{ 0x00000544, 0x0000 },    /* R1348  - AIF2 Format */
+	{ 0x00000546, 0x0040 },    /* R1350  - AIF2 Rx BCLK Rate */
+	{ 0x00000547, 0x1818 },    /* R1351  - AIF2 Frame Ctrl 1 */
+	{ 0x00000548, 0x1818 },    /* R1352  - AIF2 Frame Ctrl 2 */
+	{ 0x00000549, 0x0000 },    /* R1353  - AIF2 Frame Ctrl 3 */
+	{ 0x0000054A, 0x0001 },    /* R1354  - AIF2 Frame Ctrl 4 */
+	{ 0x0000054B, 0x0002 },    /* R1355  - AIF2 Frame Ctrl 5 */
+	{ 0x0000054C, 0x0003 },    /* R1356  - AIF2 Frame Ctrl 6 */
+	{ 0x0000054D, 0x0004 },    /* R1357  - AIF2 Frame Ctrl 7 */
+	{ 0x0000054E, 0x0005 },    /* R1358  - AIF2 Frame Ctrl 8 */
+	{ 0x00000551, 0x0000 },    /* R1361  - AIF2 Frame Ctrl 11 */
+	{ 0x00000552, 0x0001 },    /* R1362  - AIF2 Frame Ctrl 12 */
+	{ 0x00000553, 0x0002 },    /* R1363  - AIF2 Frame Ctrl 13 */
+	{ 0x00000554, 0x0003 },    /* R1364  - AIF2 Frame Ctrl 14 */
+	{ 0x00000555, 0x0004 },    /* R1365  - AIF2 Frame Ctrl 15 */
+	{ 0x00000556, 0x0005 },    /* R1366  - AIF2 Frame Ctrl 16 */
+	{ 0x00000559, 0x0000 },    /* R1369  - AIF2 Tx Enables */
+	{ 0x0000055A, 0x0000 },    /* R1370  - AIF2 Rx Enables */
+	{ 0x00000580, 0x000C },    /* R1408  - AIF3 BCLK Ctrl */
+	{ 0x00000581, 0x0008 },    /* R1409  - AIF3 Tx Pin Ctrl */
+	{ 0x00000582, 0x0000 },    /* R1410  - AIF3 Rx Pin Ctrl */
+	{ 0x00000583, 0x0000 },    /* R1411  - AIF3 Rate Ctrl */
+	{ 0x00000584, 0x0000 },    /* R1412  - AIF3 Format */
+	{ 0x00000586, 0x0040 },    /* R1414  - AIF3 Rx BCLK Rate */
+	{ 0x00000587, 0x1818 },    /* R1415  - AIF3 Frame Ctrl 1 */
+	{ 0x00000588, 0x1818 },    /* R1416  - AIF3 Frame Ctrl 2 */
+	{ 0x00000589, 0x0000 },    /* R1417  - AIF3 Frame Ctrl 3 */
+	{ 0x0000058A, 0x0001 },    /* R1418  - AIF3 Frame Ctrl 4 */
+	{ 0x00000591, 0x0000 },    /* R1425  - AIF3 Frame Ctrl 11 */
+	{ 0x00000592, 0x0001 },    /* R1426  - AIF3 Frame Ctrl 12 */
+	{ 0x00000599, 0x0000 },    /* R1433  - AIF3 Tx Enables */
+	{ 0x0000059A, 0x0000 },    /* R1434  - AIF3 Rx Enables */
+	{ 0x000005C2, 0x0000 },    /* R1474  - SPD1 TX Control */
+	{ 0x000005C3, 0x0000 },    /* R1475  - SPD1 TX Channel Status 1 */
+	{ 0x000005C4, 0x0B01 },    /* R1476  - SPD1 TX Channel Status 2 */
+	{ 0x000005C5, 0x0000 },    /* R1477  - SPD1 TX Channel Status 3 */
+	{ 0x000005E3, 0x0004 },    /* R1507  - SLIMbus Framer Ref Gear */
+	{ 0x000005E5, 0x0000 },    /* R1509  - SLIMbus Rates 1 */
+	{ 0x000005E6, 0x0000 },    /* R1510  - SLIMbus Rates 2 */
+	{ 0x000005E9, 0x0000 },    /* R1513  - SLIMbus Rates 5 */
+	{ 0x000005EA, 0x0000 },    /* R1514  - SLIMbus Rates 6 */
+	{ 0x000005EB, 0x0000 },    /* R1515  - SLIMbus Rates 7 */
+	{ 0x000005F5, 0x0000 },    /* R1525  - SLIMbus RX Channel Enable */
+	{ 0x000005F6, 0x0000 },    /* R1526  - SLIMbus TX Channel Enable */
+	{ 0x00000640, 0x0000 },    /* R1600  - PWM1MIX Input 1 Source */
+	{ 0x00000641, 0x0080 },    /* R1601  - PWM1MIX Input 1 Volume */
+	{ 0x00000642, 0x0000 },    /* R1602  - PWM1MIX Input 2 Source */
+	{ 0x00000643, 0x0080 },    /* R1603  - PWM1MIX Input 2 Volume */
+	{ 0x00000644, 0x0000 },    /* R1604  - PWM1MIX Input 3 Source */
+	{ 0x00000645, 0x0080 },    /* R1605  - PWM1MIX Input 3 Volume */
+	{ 0x00000646, 0x0000 },    /* R1606  - PWM1MIX Input 4 Source */
+	{ 0x00000647, 0x0080 },    /* R1607  - PWM1MIX Input 4 Volume */
+	{ 0x00000648, 0x0000 },    /* R1608  - PWM2MIX Input 1 Source */
+	{ 0x00000649, 0x0080 },    /* R1609  - PWM2MIX Input 1 Volume */
+	{ 0x0000064A, 0x0000 },    /* R1610  - PWM2MIX Input 2 Source */
+	{ 0x0000064B, 0x0080 },    /* R1611  - PWM2MIX Input 2 Volume */
+	{ 0x0000064C, 0x0000 },    /* R1612  - PWM2MIX Input 3 Source */
+	{ 0x0000064D, 0x0080 },    /* R1613  - PWM2MIX Input 3 Volume */
+	{ 0x0000064E, 0x0000 },    /* R1614  - PWM2MIX Input 4 Source */
+	{ 0x0000064F, 0x0080 },    /* R1615  - PWM2MIX Input 4 Volume */
+	{ 0x00000680, 0x0000 },    /* R1664  - OUT1LMIX Input 1 Source */
+	{ 0x00000681, 0x0080 },    /* R1665  - OUT1LMIX Input 1 Volume */
+	{ 0x00000682, 0x0000 },    /* R1666  - OUT1LMIX Input 2 Source */
+	{ 0x00000683, 0x0080 },    /* R1667  - OUT1LMIX Input 2 Volume */
+	{ 0x00000684, 0x0000 },    /* R1668  - OUT1LMIX Input 3 Source */
+	{ 0x00000685, 0x0080 },    /* R1669  - OUT1LMIX Input 3 Volume */
+	{ 0x00000686, 0x0000 },    /* R1670  - OUT1LMIX Input 4 Source */
+	{ 0x00000687, 0x0080 },    /* R1671  - OUT1LMIX Input 4 Volume */
+	{ 0x00000688, 0x0000 },    /* R1672  - OUT1RMIX Input 1 Source */
+	{ 0x00000689, 0x0080 },    /* R1673  - OUT1RMIX Input 1 Volume */
+	{ 0x0000068A, 0x0000 },    /* R1674  - OUT1RMIX Input 2 Source */
+	{ 0x0000068B, 0x0080 },    /* R1675  - OUT1RMIX Input 2 Volume */
+	{ 0x0000068C, 0x0000 },    /* R1676  - OUT1RMIX Input 3 Source */
+	{ 0x0000068D, 0x0080 },    /* R1677  - OUT1RMIX Input 3 Volume */
+	{ 0x0000068E, 0x0000 },    /* R1678  - OUT1RMIX Input 4 Source */
+	{ 0x0000068F, 0x0080 },    /* R1679  - OUT1RMIX Input 4 Volume */
+	{ 0x00000690, 0x0000 },    /* R1680  - OUT2LMIX Input 1 Source */
+	{ 0x00000691, 0x0080 },    /* R1681  - OUT2LMIX Input 1 Volume */
+	{ 0x00000692, 0x0000 },    /* R1682  - OUT2LMIX Input 2 Source */
+	{ 0x00000693, 0x0080 },    /* R1683  - OUT2LMIX Input 2 Volume */
+	{ 0x00000694, 0x0000 },    /* R1684  - OUT2LMIX Input 3 Source */
+	{ 0x00000695, 0x0080 },    /* R1685  - OUT2LMIX Input 3 Volume */
+	{ 0x00000696, 0x0000 },    /* R1686  - OUT2LMIX Input 4 Source */
+	{ 0x00000697, 0x0080 },    /* R1687  - OUT2LMIX Input 4 Volume */
+	{ 0x00000698, 0x0000 },    /* R1688  - OUT2RMIX Input 1 Source */
+	{ 0x00000699, 0x0080 },    /* R1689  - OUT2RMIX Input 1 Volume */
+	{ 0x0000069A, 0x0000 },    /* R1690  - OUT2RMIX Input 2 Source */
+	{ 0x0000069B, 0x0080 },    /* R1691  - OUT2RMIX Input 2 Volume */
+	{ 0x0000069C, 0x0000 },    /* R1692  - OUT2RMIX Input 3 Source */
+	{ 0x0000069D, 0x0080 },    /* R1693  - OUT2RMIX Input 3 Volume */
+	{ 0x0000069E, 0x0000 },    /* R1694  - OUT2RMIX Input 4 Source */
+	{ 0x0000069F, 0x0080 },    /* R1695  - OUT2RMIX Input 4 Volume */
+	{ 0x000006A0, 0x0000 },    /* R1696  - OUT3LMIX Input 1 Source */
+	{ 0x000006A1, 0x0080 },    /* R1697  - OUT3LMIX Input 1 Volume */
+	{ 0x000006A2, 0x0000 },    /* R1698  - OUT3LMIX Input 2 Source */
+	{ 0x000006A3, 0x0080 },    /* R1699  - OUT3LMIX Input 2 Volume */
+	{ 0x000006A4, 0x0000 },    /* R1700  - OUT3LMIX Input 3 Source */
+	{ 0x000006A5, 0x0080 },    /* R1701  - OUT3LMIX Input 3 Volume */
+	{ 0x000006A6, 0x0000 },    /* R1702  - OUT3LMIX Input 4 Source */
+	{ 0x000006A7, 0x0080 },    /* R1703  - OUT3LMIX Input 4 Volume */
+	{ 0x000006B0, 0x0000 },    /* R1712  - OUT4LMIX Input 1 Source */
+	{ 0x000006B1, 0x0080 },    /* R1713  - OUT4LMIX Input 1 Volume */
+	{ 0x000006B2, 0x0000 },    /* R1714  - OUT4LMIX Input 2 Source */
+	{ 0x000006B3, 0x0080 },    /* R1715  - OUT4LMIX Input 2 Volume */
+	{ 0x000006B4, 0x0000 },    /* R1716  - OUT4LMIX Input 3 Source */
+	{ 0x000006B5, 0x0080 },    /* R1717  - OUT4LMIX Input 3 Volume */
+	{ 0x000006B6, 0x0000 },    /* R1718  - OUT4LMIX Input 4 Source */
+	{ 0x000006B7, 0x0080 },    /* R1719  - OUT4LMIX Input 4 Volume */
+	{ 0x000006B8, 0x0000 },    /* R1720  - OUT4RMIX Input 1 Source */
+	{ 0x000006B9, 0x0080 },    /* R1721  - OUT4RMIX Input 1 Volume */
+	{ 0x000006BA, 0x0000 },    /* R1722  - OUT4RMIX Input 2 Source */
+	{ 0x000006BB, 0x0080 },    /* R1723  - OUT4RMIX Input 2 Volume */
+	{ 0x000006BC, 0x0000 },    /* R1724  - OUT4RMIX Input 3 Source */
+	{ 0x000006BD, 0x0080 },    /* R1725  - OUT4RMIX Input 3 Volume */
+	{ 0x000006BE, 0x0000 },    /* R1726  - OUT4RMIX Input 4 Source */
+	{ 0x000006BF, 0x0080 },    /* R1727  - OUT4RMIX Input 4 Volume */
+	{ 0x000006C0, 0x0000 },    /* R1728  - OUT5LMIX Input 1 Source */
+	{ 0x000006C1, 0x0080 },    /* R1729  - OUT5LMIX Input 1 Volume */
+	{ 0x000006C2, 0x0000 },    /* R1730  - OUT5LMIX Input 2 Source */
+	{ 0x000006C3, 0x0080 },    /* R1731  - OUT5LMIX Input 2 Volume */
+	{ 0x000006C4, 0x0000 },    /* R1732  - OUT5LMIX Input 3 Source */
+	{ 0x000006C5, 0x0080 },    /* R1733  - OUT5LMIX Input 3 Volume */
+	{ 0x000006C6, 0x0000 },    /* R1734  - OUT5LMIX Input 4 Source */
+	{ 0x000006C7, 0x0080 },    /* R1735  - OUT5LMIX Input 4 Volume */
+	{ 0x000006C8, 0x0000 },    /* R1736  - OUT5RMIX Input 1 Source */
+	{ 0x000006C9, 0x0080 },    /* R1737  - OUT5RMIX Input 1 Volume */
+	{ 0x000006CA, 0x0000 },    /* R1738  - OUT5RMIX Input 2 Source */
+	{ 0x000006CB, 0x0080 },    /* R1739  - OUT5RMIX Input 2 Volume */
+	{ 0x000006CC, 0x0000 },    /* R1740  - OUT5RMIX Input 3 Source */
+	{ 0x000006CD, 0x0080 },    /* R1741  - OUT5RMIX Input 3 Volume */
+	{ 0x000006CE, 0x0000 },    /* R1742  - OUT5RMIX Input 4 Source */
+	{ 0x000006CF, 0x0080 },    /* R1743  - OUT5RMIX Input 4 Volume */
+	{ 0x00000700, 0x0000 },    /* R1792  - AIF1TX1MIX Input 1 Source */
+	{ 0x00000701, 0x0080 },    /* R1793  - AIF1TX1MIX Input 1 Volume */
+	{ 0x00000702, 0x0000 },    /* R1794  - AIF1TX1MIX Input 2 Source */
+	{ 0x00000703, 0x0080 },    /* R1795  - AIF1TX1MIX Input 2 Volume */
+	{ 0x00000704, 0x0000 },    /* R1796  - AIF1TX1MIX Input 3 Source */
+	{ 0x00000705, 0x0080 },    /* R1797  - AIF1TX1MIX Input 3 Volume */
+	{ 0x00000706, 0x0000 },    /* R1798  - AIF1TX1MIX Input 4 Source */
+	{ 0x00000707, 0x0080 },    /* R1799  - AIF1TX1MIX Input 4 Volume */
+	{ 0x00000708, 0x0000 },    /* R1800  - AIF1TX2MIX Input 1 Source */
+	{ 0x00000709, 0x0080 },    /* R1801  - AIF1TX2MIX Input 1 Volume */
+	{ 0x0000070A, 0x0000 },    /* R1802  - AIF1TX2MIX Input 2 Source */
+	{ 0x0000070B, 0x0080 },    /* R1803  - AIF1TX2MIX Input 2 Volume */
+	{ 0x0000070C, 0x0000 },    /* R1804  - AIF1TX2MIX Input 3 Source */
+	{ 0x0000070D, 0x0080 },    /* R1805  - AIF1TX2MIX Input 3 Volume */
+	{ 0x0000070E, 0x0000 },    /* R1806  - AIF1TX2MIX Input 4 Source */
+	{ 0x0000070F, 0x0080 },    /* R1807  - AIF1TX2MIX Input 4 Volume */
+	{ 0x00000710, 0x0000 },    /* R1808  - AIF1TX3MIX Input 1 Source */
+	{ 0x00000711, 0x0080 },    /* R1809  - AIF1TX3MIX Input 1 Volume */
+	{ 0x00000712, 0x0000 },    /* R1810  - AIF1TX3MIX Input 2 Source */
+	{ 0x00000713, 0x0080 },    /* R1811  - AIF1TX3MIX Input 2 Volume */
+	{ 0x00000714, 0x0000 },    /* R1812  - AIF1TX3MIX Input 3 Source */
+	{ 0x00000715, 0x0080 },    /* R1813  - AIF1TX3MIX Input 3 Volume */
+	{ 0x00000716, 0x0000 },    /* R1814  - AIF1TX3MIX Input 4 Source */
+	{ 0x00000717, 0x0080 },    /* R1815  - AIF1TX3MIX Input 4 Volume */
+	{ 0x00000718, 0x0000 },    /* R1816  - AIF1TX4MIX Input 1 Source */
+	{ 0x00000719, 0x0080 },    /* R1817  - AIF1TX4MIX Input 1 Volume */
+	{ 0x0000071A, 0x0000 },    /* R1818  - AIF1TX4MIX Input 2 Source */
+	{ 0x0000071B, 0x0080 },    /* R1819  - AIF1TX4MIX Input 2 Volume */
+	{ 0x0000071C, 0x0000 },    /* R1820  - AIF1TX4MIX Input 3 Source */
+	{ 0x0000071D, 0x0080 },    /* R1821  - AIF1TX4MIX Input 3 Volume */
+	{ 0x0000071E, 0x0000 },    /* R1822  - AIF1TX4MIX Input 4 Source */
+	{ 0x0000071F, 0x0080 },    /* R1823  - AIF1TX4MIX Input 4 Volume */
+	{ 0x00000720, 0x0000 },    /* R1824  - AIF1TX5MIX Input 1 Source */
+	{ 0x00000721, 0x0080 },    /* R1825  - AIF1TX5MIX Input 1 Volume */
+	{ 0x00000722, 0x0000 },    /* R1826  - AIF1TX5MIX Input 2 Source */
+	{ 0x00000723, 0x0080 },    /* R1827  - AIF1TX5MIX Input 2 Volume */
+	{ 0x00000724, 0x0000 },    /* R1828  - AIF1TX5MIX Input 3 Source */
+	{ 0x00000725, 0x0080 },    /* R1829  - AIF1TX5MIX Input 3 Volume */
+	{ 0x00000726, 0x0000 },    /* R1830  - AIF1TX5MIX Input 4 Source */
+	{ 0x00000727, 0x0080 },    /* R1831  - AIF1TX5MIX Input 4 Volume */
+	{ 0x00000728, 0x0000 },    /* R1832  - AIF1TX6MIX Input 1 Source */
+	{ 0x00000729, 0x0080 },    /* R1833  - AIF1TX6MIX Input 1 Volume */
+	{ 0x0000072A, 0x0000 },    /* R1834  - AIF1TX6MIX Input 2 Source */
+	{ 0x0000072B, 0x0080 },    /* R1835  - AIF1TX6MIX Input 2 Volume */
+	{ 0x0000072C, 0x0000 },    /* R1836  - AIF1TX6MIX Input 3 Source */
+	{ 0x0000072D, 0x0080 },    /* R1837  - AIF1TX6MIX Input 3 Volume */
+	{ 0x0000072E, 0x0000 },    /* R1838  - AIF1TX6MIX Input 4 Source */
+	{ 0x0000072F, 0x0080 },    /* R1839  - AIF1TX6MIX Input 4 Volume */
+	{ 0x00000740, 0x0000 },    /* R1856  - AIF2TX1MIX Input 1 Source */
+	{ 0x00000741, 0x0080 },    /* R1857  - AIF2TX1MIX Input 1 Volume */
+	{ 0x00000742, 0x0000 },    /* R1858  - AIF2TX1MIX Input 2 Source */
+	{ 0x00000743, 0x0080 },    /* R1859  - AIF2TX1MIX Input 2 Volume */
+	{ 0x00000744, 0x0000 },    /* R1860  - AIF2TX1MIX Input 3 Source */
+	{ 0x00000745, 0x0080 },    /* R1861  - AIF2TX1MIX Input 3 Volume */
+	{ 0x00000746, 0x0000 },    /* R1862  - AIF2TX1MIX Input 4 Source */
+	{ 0x00000747, 0x0080 },    /* R1863  - AIF2TX1MIX Input 4 Volume */
+	{ 0x00000748, 0x0000 },    /* R1864  - AIF2TX2MIX Input 1 Source */
+	{ 0x00000749, 0x0080 },    /* R1865  - AIF2TX2MIX Input 1 Volume */
+	{ 0x0000074A, 0x0000 },    /* R1866  - AIF2TX2MIX Input 2 Source */
+	{ 0x0000074B, 0x0080 },    /* R1867  - AIF2TX2MIX Input 2 Volume */
+	{ 0x0000074C, 0x0000 },    /* R1868  - AIF2TX2MIX Input 3 Source */
+	{ 0x0000074D, 0x0080 },    /* R1869  - AIF2TX2MIX Input 3 Volume */
+	{ 0x0000074E, 0x0000 },    /* R1870  - AIF2TX2MIX Input 4 Source */
+	{ 0x0000074F, 0x0080 },    /* R1871  - AIF2TX2MIX Input 4 Volume */
+	{ 0x00000750, 0x0000 },    /* R1872  - AIF2TX3MIX Input 1 Source */
+	{ 0x00000751, 0x0080 },    /* R1873  - AIF2TX3MIX Input 1 Volume */
+	{ 0x00000752, 0x0000 },    /* R1874  - AIF2TX3MIX Input 2 Source */
+	{ 0x00000753, 0x0080 },    /* R1875  - AIF2TX3MIX Input 2 Volume */
+	{ 0x00000754, 0x0000 },    /* R1876  - AIF2TX3MIX Input 3 Source */
+	{ 0x00000755, 0x0080 },    /* R1877  - AIF2TX3MIX Input 3 Volume */
+	{ 0x00000756, 0x0000 },    /* R1878  - AIF2TX3MIX Input 4 Source */
+	{ 0x00000757, 0x0080 },    /* R1879  - AIF2TX3MIX Input 4 Volume */
+	{ 0x00000758, 0x0000 },    /* R1880  - AIF2TX4MIX Input 1 Source */
+	{ 0x00000759, 0x0080 },    /* R1881  - AIF2TX4MIX Input 1 Volume */
+	{ 0x0000075A, 0x0000 },    /* R1882  - AIF2TX4MIX Input 2 Source */
+	{ 0x0000075B, 0x0080 },    /* R1883  - AIF2TX4MIX Input 2 Volume */
+	{ 0x0000075C, 0x0000 },    /* R1884  - AIF2TX4MIX Input 3 Source */
+	{ 0x0000075D, 0x0080 },    /* R1885  - AIF2TX4MIX Input 3 Volume */
+	{ 0x0000075E, 0x0000 },    /* R1886  - AIF2TX4MIX Input 4 Source */
+	{ 0x0000075F, 0x0080 },    /* R1887  - AIF2TX4MIX Input 4 Volume */
+	{ 0x00000760, 0x0000 },    /* R1888  - AIF2TX5MIX Input 1 Source */
+	{ 0x00000761, 0x0080 },    /* R1889  - AIF2TX5MIX Input 1 Volume */
+	{ 0x00000762, 0x0000 },    /* R1890  - AIF2TX5MIX Input 2 Source */
+	{ 0x00000763, 0x0080 },    /* R1891  - AIF2TX5MIX Input 2 Volume */
+	{ 0x00000764, 0x0000 },    /* R1892  - AIF2TX5MIX Input 3 Source */
+	{ 0x00000765, 0x0080 },    /* R1893  - AIF2TX5MIX Input 3 Volume */
+	{ 0x00000766, 0x0000 },    /* R1894  - AIF2TX5MIX Input 4 Source */
+	{ 0x00000767, 0x0080 },    /* R1895  - AIF2TX5MIX Input 4 Volume */
+	{ 0x00000768, 0x0000 },    /* R1896  - AIF2TX6MIX Input 1 Source */
+	{ 0x00000769, 0x0080 },    /* R1897  - AIF2TX6MIX Input 1 Volume */
+	{ 0x0000076A, 0x0000 },    /* R1898  - AIF2TX6MIX Input 2 Source */
+	{ 0x0000076B, 0x0080 },    /* R1899  - AIF2TX6MIX Input 2 Volume */
+	{ 0x0000076C, 0x0000 },    /* R1900  - AIF2TX6MIX Input 3 Source */
+	{ 0x0000076D, 0x0080 },    /* R1901  - AIF2TX6MIX Input 3 Volume */
+	{ 0x0000076E, 0x0000 },    /* R1902  - AIF2TX6MIX Input 4 Source */
+	{ 0x0000076F, 0x0080 },    /* R1903  - AIF2TX6MIX Input 4 Volume */
+	{ 0x00000780, 0x0000 },    /* R1920  - AIF3TX1MIX Input 1 Source */
+	{ 0x00000781, 0x0080 },    /* R1921  - AIF3TX1MIX Input 1 Volume */
+	{ 0x00000782, 0x0000 },    /* R1922  - AIF3TX1MIX Input 2 Source */
+	{ 0x00000783, 0x0080 },    /* R1923  - AIF3TX1MIX Input 2 Volume */
+	{ 0x00000784, 0x0000 },    /* R1924  - AIF3TX1MIX Input 3 Source */
+	{ 0x00000785, 0x0080 },    /* R1925  - AIF3TX1MIX Input 3 Volume */
+	{ 0x00000786, 0x0000 },    /* R1926  - AIF3TX1MIX Input 4 Source */
+	{ 0x00000787, 0x0080 },    /* R1927  - AIF3TX1MIX Input 4 Volume */
+	{ 0x00000788, 0x0000 },    /* R1928  - AIF3TX2MIX Input 1 Source */
+	{ 0x00000789, 0x0080 },    /* R1929  - AIF3TX2MIX Input 1 Volume */
+	{ 0x0000078A, 0x0000 },    /* R1930  - AIF3TX2MIX Input 2 Source */
+	{ 0x0000078B, 0x0080 },    /* R1931  - AIF3TX2MIX Input 2 Volume */
+	{ 0x0000078C, 0x0000 },    /* R1932  - AIF3TX2MIX Input 3 Source */
+	{ 0x0000078D, 0x0080 },    /* R1933  - AIF3TX2MIX Input 3 Volume */
+	{ 0x0000078E, 0x0000 },    /* R1934  - AIF3TX2MIX Input 4 Source */
+	{ 0x0000078F, 0x0080 },    /* R1935  - AIF3TX2MIX Input 4 Volume */
+	{ 0x000007C0, 0x0000 },    /* R1984  - SLIMTX1MIX Input 1 Source */
+	{ 0x000007C1, 0x0080 },    /* R1985  - SLIMTX1MIX Input 1 Volume */
+	{ 0x000007C8, 0x0000 },    /* R1992  - SLIMTX2MIX Input 1 Source */
+	{ 0x000007C9, 0x0080 },    /* R1993  - SLIMTX2MIX Input 1 Volume */
+	{ 0x000007D0, 0x0000 },    /* R2000  - SLIMTX3MIX Input 1 Source */
+	{ 0x000007D1, 0x0080 },    /* R2001  - SLIMTX3MIX Input 1 Volume */
+	{ 0x000007D8, 0x0000 },    /* R2008  - SLIMTX4MIX Input 1 Source */
+	{ 0x000007D9, 0x0080 },    /* R2009  - SLIMTX4MIX Input 1 Volume */
+	{ 0x000007E0, 0x0000 },    /* R2016  - SLIMTX5MIX Input 1 Source */
+	{ 0x000007E1, 0x0080 },    /* R2017  - SLIMTX5MIX Input 1 Volume */
+	{ 0x000007E8, 0x0000 },    /* R2024  - SLIMTX6MIX Input 1 Source */
+	{ 0x000007E9, 0x0080 },    /* R2025  - SLIMTX6MIX Input 1 Volume */
+	{ 0x00000800, 0x0000 },    /* R2048  - SPDIF1TX1MIX Input 1 Source */
+	{ 0x00000801, 0x0080 },    /* R2049  - SPDIF1TX1MIX Input 1 Volume */
+	{ 0x00000808, 0x0000 },    /* R2056  - SPDIF1TX2MIX Input 1 Source */
+	{ 0x00000809, 0x0080 },    /* R2057  - SPDIF1TX2MIX Input 1 Volume */
+	{ 0x00000880, 0x0000 },    /* R2176  - EQ1MIX Input 1 Source */
+	{ 0x00000881, 0x0080 },    /* R2177  - EQ1MIX Input 1 Volume */
+	{ 0x00000888, 0x0000 },    /* R2184  - EQ2MIX Input 1 Source */
+	{ 0x00000889, 0x0080 },    /* R2185  - EQ2MIX Input 1 Volume */
+	{ 0x00000890, 0x0000 },    /* R2192  - EQ3MIX Input 1 Source */
+	{ 0x00000891, 0x0080 },    /* R2193  - EQ3MIX Input 1 Volume */
+	{ 0x00000898, 0x0000 },    /* R2200  - EQ4MIX Input 1 Source */
+	{ 0x00000899, 0x0080 },    /* R2201  - EQ4MIX Input 1 Volume */
+	{ 0x000008C0, 0x0000 },    /* R2240  - DRC1LMIX Input 1 Source */
+	{ 0x000008C1, 0x0080 },    /* R2241  - DRC1LMIX Input 1 Volume */
+	{ 0x000008C8, 0x0000 },    /* R2248  - DRC1RMIX Input 1 Source */
+	{ 0x000008C9, 0x0080 },    /* R2249  - DRC1RMIX Input 1 Volume */
+	{ 0x00000900, 0x0000 },    /* R2304  - HPLP1MIX Input 1 Source */
+	{ 0x00000901, 0x0080 },    /* R2305  - HPLP1MIX Input 1 Volume */
+	{ 0x00000902, 0x0000 },    /* R2306  - HPLP1MIX Input 2 Source */
+	{ 0x00000903, 0x0080 },    /* R2307  - HPLP1MIX Input 2 Volume */
+	{ 0x00000904, 0x0000 },    /* R2308  - HPLP1MIX Input 3 Source */
+	{ 0x00000905, 0x0080 },    /* R2309  - HPLP1MIX Input 3 Volume */
+	{ 0x00000906, 0x0000 },    /* R2310  - HPLP1MIX Input 4 Source */
+	{ 0x00000907, 0x0080 },    /* R2311  - HPLP1MIX Input 4 Volume */
+	{ 0x00000908, 0x0000 },    /* R2312  - HPLP2MIX Input 1 Source */
+	{ 0x00000909, 0x0080 },    /* R2313  - HPLP2MIX Input 1 Volume */
+	{ 0x0000090A, 0x0000 },    /* R2314  - HPLP2MIX Input 2 Source */
+	{ 0x0000090B, 0x0080 },    /* R2315  - HPLP2MIX Input 2 Volume */
+	{ 0x0000090C, 0x0000 },    /* R2316  - HPLP2MIX Input 3 Source */
+	{ 0x0000090D, 0x0080 },    /* R2317  - HPLP2MIX Input 3 Volume */
+	{ 0x0000090E, 0x0000 },    /* R2318  - HPLP2MIX Input 4 Source */
+	{ 0x0000090F, 0x0080 },    /* R2319  - HPLP2MIX Input 4 Volume */
+	{ 0x00000910, 0x0000 },    /* R2320  - HPLP3MIX Input 1 Source */
+	{ 0x00000911, 0x0080 },    /* R2321  - HPLP3MIX Input 1 Volume */
+	{ 0x00000912, 0x0000 },    /* R2322  - HPLP3MIX Input 2 Source */
+	{ 0x00000913, 0x0080 },    /* R2323  - HPLP3MIX Input 2 Volume */
+	{ 0x00000914, 0x0000 },    /* R2324  - HPLP3MIX Input 3 Source */
+	{ 0x00000915, 0x0080 },    /* R2325  - HPLP3MIX Input 3 Volume */
+	{ 0x00000916, 0x0000 },    /* R2326  - HPLP3MIX Input 4 Source */
+	{ 0x00000917, 0x0080 },    /* R2327  - HPLP3MIX Input 4 Volume */
+	{ 0x00000918, 0x0000 },    /* R2328  - HPLP4MIX Input 1 Source */
+	{ 0x00000919, 0x0080 },    /* R2329  - HPLP4MIX Input 1 Volume */
+	{ 0x0000091A, 0x0000 },    /* R2330  - HPLP4MIX Input 2 Source */
+	{ 0x0000091B, 0x0080 },    /* R2331  - HPLP4MIX Input 2 Volume */
+	{ 0x0000091C, 0x0000 },    /* R2332  - HPLP4MIX Input 3 Source */
+	{ 0x0000091D, 0x0080 },    /* R2333  - HPLP4MIX Input 3 Volume */
+	{ 0x0000091E, 0x0000 },    /* R2334  - HPLP4MIX Input 4 Source */
+	{ 0x0000091F, 0x0080 },    /* R2335  - HPLP4MIX Input 4 Volume */
+	{ 0x00000A80, 0x0000 },    /* R2688  - ASRC1LMIX Input 1 Source */
+	{ 0x00000A88, 0x0000 },    /* R2696  - ASRC1RMIX Input 1 Source */
+	{ 0x00000A90, 0x0000 },    /* R2704  - ASRC2LMIX Input 1 Source */
+	{ 0x00000A98, 0x0000 },    /* R2712  - ASRC2RMIX Input 1 Source */
+	{ 0x00000B00, 0x0000 },    /* R2816  - ISRC1DEC1MIX Input 1 Source */
+	{ 0x00000B08, 0x0000 },    /* R2824  - ISRC1DEC2MIX Input 1 Source */
+	{ 0x00000B10, 0x0000 },    /* R2832  - ISRC1DEC3MIX Input 1 Source */
+	{ 0x00000B18, 0x0000 },    /* R2840  - ISRC1DEC4MIX Input 1 Source */
+	{ 0x00000B20, 0x0000 },    /* R2848  - ISRC1INT1MIX Input 1 Source */
+	{ 0x00000B28, 0x0000 },    /* R2856  - ISRC1INT2MIX Input 1 Source */
+	{ 0x00000B30, 0x0000 },    /* R2864  - ISRC1INT3MIX Input 1 Source */
+	{ 0x00000B38, 0x0000 },    /* R2872  - ISRC1INT4MIX Input 1 Source */
+	{ 0x00000B40, 0x0000 },    /* R2880  - ISRC2DEC1MIX Input 1 Source */
+	{ 0x00000B48, 0x0000 },    /* R2888  - ISRC2DEC2MIX Input 1 Source */
+	{ 0x00000B60, 0x0000 },    /* R2912  - ISRC2INT1MIX Input 1 Source */
+	{ 0x00000B68, 0x0000 },    /* R2920  - ISRC2INT2MIX Input 1 Source */
+	{ 0x00000C00, 0xA101 },    /* R3072  - GPIO1 CTRL */
+	{ 0x00000C01, 0xA101 },    /* R3073  - GPIO2 CTRL */
+	{ 0x00000C02, 0xA101 },    /* R3074  - GPIO3 CTRL */
+	{ 0x00000C03, 0xA101 },    /* R3075  - GPIO4 CTRL */
+	{ 0x00000C04, 0xA101 },    /* R3076  - GPIO5 CTRL */
+	{ 0x00000C0F, 0x0400 },    /* R3087  - IRQ CTRL 1 */
+	{ 0x00000C10, 0x1000 },    /* R3088  - GPIO Debounce Config */
+	{ 0x00000C18, 0x0000 },    /* R3096  - GP Switch 1 */
+	{ 0x00000C20, 0x8002 },    /* R3104  - Misc Pad Ctrl 1 */
+	{ 0x00000C21, 0x8001 },    /* R3105  - Misc Pad Ctrl 2 */
+	{ 0x00000C22, 0x0000 },    /* R3106  - Misc Pad Ctrl 3 */
+	{ 0x00000C23, 0x0000 },    /* R3107  - Misc Pad Ctrl 4 */
+	{ 0x00000C24, 0x0000 },    /* R3108  - Misc Pad Ctrl 5 */
+	{ 0x00000C25, 0x0000 },    /* R3109  - Misc Pad Ctrl 6 */
+	{ 0x00000D08, 0xFFFF },    /* R3336  - Interrupt Status 1 Mask */
+	{ 0x00000D09, 0xFFFF },    /* R3337  - Interrupt Status 2 Mask */
+	{ 0x00000D0A, 0xFFFF },    /* R3338  - Interrupt Status 3 Mask */
+	{ 0x00000D0B, 0xFFFF },    /* R3339  - Interrupt Status 4 Mask */
+	{ 0x00000D0C, 0xFEFF },    /* R3340  - Interrupt Status 5 Mask */
+	{ 0x00000D0F, 0x0000 },    /* R3343  - Interrupt Control */
+	{ 0x00000D18, 0xFFFF },    /* R3352  - IRQ2 Status 1 Mask */
+	{ 0x00000D19, 0xFFFF },    /* R3353  - IRQ2 Status 2 Mask */
+	{ 0x00000D1A, 0xFFFF },    /* R3354  - IRQ2 Status 3 Mask */
+	{ 0x00000D1B, 0xFFFF },    /* R3355  - IRQ2 Status 4 Mask */
+	{ 0x00000D1C, 0xFEFF },    /* R3356  - IRQ2 Status 5 Mask */
+	{ 0x00000D1D, 0xFFFF },    /* R3357  - IRQ2 Status 6 Mask */
+	{ 0x00000D1F, 0x0000 },    /* R3359  - IRQ2 Control */
+	{ 0x00000D53, 0xFFFF },    /* R3411  - AOD IRQ Mask IRQ1 */
+	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
+	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
+	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
+	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
+	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
+	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
+	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
+	{ 0x00000E13, 0x03FE },    /* R3603  - EQ1_4 */
+	{ 0x00000E14, 0x00E0 },    /* R3604  - EQ1_5 */
+	{ 0x00000E15, 0x1EC4 },    /* R3605  - EQ1_6 */
+	{ 0x00000E16, 0xF136 },    /* R3606  - EQ1_7 */
+	{ 0x00000E17, 0x0409 },    /* R3607  - EQ1_8 */
+	{ 0x00000E18, 0x04CC },    /* R3608  - EQ1_9 */
+	{ 0x00000E19, 0x1C9B },    /* R3609  - EQ1_10 */
+	{ 0x00000E1A, 0xF337 },    /* R3610  - EQ1_11 */
+	{ 0x00000E1B, 0x040B },    /* R3611  - EQ1_12 */
+	{ 0x00000E1C, 0x0CBB },    /* R3612  - EQ1_13 */
+	{ 0x00000E1D, 0x16F8 },    /* R3613  - EQ1_14 */
+	{ 0x00000E1E, 0xF7D9 },    /* R3614  - EQ1_15 */
+	{ 0x00000E1F, 0x040A },    /* R3615  - EQ1_16 */
+	{ 0x00000E20, 0x1F14 },    /* R3616  - EQ1_17 */
+	{ 0x00000E21, 0x058C },    /* R3617  - EQ1_18 */
+	{ 0x00000E22, 0x0563 },    /* R3618  - EQ1_19 */
+	{ 0x00000E23, 0x4000 },    /* R3619  - EQ1_20 */
+	{ 0x00000E24, 0x0B75 },    /* R3620  - EQ1_21 */
+	{ 0x00000E26, 0x6318 },    /* R3622  - EQ2_1 */
+	{ 0x00000E27, 0x6300 },    /* R3623  - EQ2_2 */
+	{ 0x00000E28, 0x0FC8 },    /* R3624  - EQ2_3 */
+	{ 0x00000E29, 0x03FE },    /* R3625  - EQ2_4 */
+	{ 0x00000E2A, 0x00E0 },    /* R3626  - EQ2_5 */
+	{ 0x00000E2B, 0x1EC4 },    /* R3627  - EQ2_6 */
+	{ 0x00000E2C, 0xF136 },    /* R3628  - EQ2_7 */
+	{ 0x00000E2D, 0x0409 },    /* R3629  - EQ2_8 */
+	{ 0x00000E2E, 0x04CC },    /* R3630  - EQ2_9 */
+	{ 0x00000E2F, 0x1C9B },    /* R3631  - EQ2_10 */
+	{ 0x00000E30, 0xF337 },    /* R3632  - EQ2_11 */
+	{ 0x00000E31, 0x040B },    /* R3633  - EQ2_12 */
+	{ 0x00000E32, 0x0CBB },    /* R3634  - EQ2_13 */
+	{ 0x00000E33, 0x16F8 },    /* R3635  - EQ2_14 */
+	{ 0x00000E34, 0xF7D9 },    /* R3636  - EQ2_15 */
+	{ 0x00000E35, 0x040A },    /* R3637  - EQ2_16 */
+	{ 0x00000E36, 0x1F14 },    /* R3638  - EQ2_17 */
+	{ 0x00000E37, 0x058C },    /* R3639  - EQ2_18 */
+	{ 0x00000E38, 0x0563 },    /* R3640  - EQ2_19 */
+	{ 0x00000E39, 0x4000 },    /* R3641  - EQ2_20 */
+	{ 0x00000E3A, 0x0B75 },    /* R3642  - EQ2_21 */
+	{ 0x00000E3C, 0x6318 },    /* R3644  - EQ3_1 */
+	{ 0x00000E3D, 0x6300 },    /* R3645  - EQ3_2 */
+	{ 0x00000E3E, 0x0FC8 },    /* R3646  - EQ3_3 */
+	{ 0x00000E3F, 0x03FE },    /* R3647  - EQ3_4 */
+	{ 0x00000E40, 0x00E0 },    /* R3648  - EQ3_5 */
+	{ 0x00000E41, 0x1EC4 },    /* R3649  - EQ3_6 */
+	{ 0x00000E42, 0xF136 },    /* R3650  - EQ3_7 */
+	{ 0x00000E43, 0x0409 },    /* R3651  - EQ3_8 */
+	{ 0x00000E44, 0x04CC },    /* R3652  - EQ3_9 */
+	{ 0x00000E45, 0x1C9B },    /* R3653  - EQ3_10 */
+	{ 0x00000E46, 0xF337 },    /* R3654  - EQ3_11 */
+	{ 0x00000E47, 0x040B },    /* R3655  - EQ3_12 */
+	{ 0x00000E48, 0x0CBB },    /* R3656  - EQ3_13 */
+	{ 0x00000E49, 0x16F8 },    /* R3657  - EQ3_14 */
+	{ 0x00000E4A, 0xF7D9 },    /* R3658  - EQ3_15 */
+	{ 0x00000E4B, 0x040A },    /* R3659  - EQ3_16 */
+	{ 0x00000E4C, 0x1F14 },    /* R3660  - EQ3_17 */
+	{ 0x00000E4D, 0x058C },    /* R3661  - EQ3_18 */
+	{ 0x00000E4E, 0x0563 },    /* R3662  - EQ3_19 */
+	{ 0x00000E4F, 0x4000 },    /* R3663  - EQ3_20 */
+	{ 0x00000E50, 0x0B75 },    /* R3664  - EQ3_21 */
+	{ 0x00000E52, 0x6318 },    /* R3666  - EQ4_1 */
+	{ 0x00000E53, 0x6300 },    /* R3667  - EQ4_2 */
+	{ 0x00000E54, 0x0FC8 },    /* R3668  - EQ4_3 */
+	{ 0x00000E55, 0x03FE },    /* R3669  - EQ4_4 */
+	{ 0x00000E56, 0x00E0 },    /* R3670  - EQ4_5 */
+	{ 0x00000E57, 0x1EC4 },    /* R3671  - EQ4_6 */
+	{ 0x00000E58, 0xF136 },    /* R3672  - EQ4_7 */
+	{ 0x00000E59, 0x0409 },    /* R3673  - EQ4_8 */
+	{ 0x00000E5A, 0x04CC },    /* R3674  - EQ4_9 */
+	{ 0x00000E5B, 0x1C9B },    /* R3675  - EQ4_10 */
+	{ 0x00000E5C, 0xF337 },    /* R3676  - EQ4_11 */
+	{ 0x00000E5D, 0x040B },    /* R3677  - EQ4_12 */
+	{ 0x00000E5E, 0x0CBB },    /* R3678  - EQ4_13 */
+	{ 0x00000E5F, 0x16F8 },    /* R3679  - EQ4_14 */
+	{ 0x00000E60, 0xF7D9 },    /* R3680  - EQ4_15 */
+	{ 0x00000E61, 0x040A },    /* R3681  - EQ4_16 */
+	{ 0x00000E62, 0x1F14 },    /* R3682  - EQ4_17 */
+	{ 0x00000E63, 0x058C },    /* R3683  - EQ4_18 */
+	{ 0x00000E64, 0x0563 },    /* R3684  - EQ4_19 */
+	{ 0x00000E65, 0x4000 },    /* R3685  - EQ4_20 */
+	{ 0x00000E66, 0x0B75 },    /* R3686  - EQ4_21 */
+	{ 0x00000E80, 0x0018 },    /* R3712  - DRC1 ctrl1 */
+	{ 0x00000E81, 0x0933 },    /* R3713  - DRC1 ctrl2 */
+	{ 0x00000E82, 0x0018 },    /* R3714  - DRC1 ctrl3 */
+	{ 0x00000E83, 0x0000 },    /* R3715  - DRC1 ctrl4 */
+	{ 0x00000E84, 0x0000 },    /* R3716  - DRC1 ctrl5 */
+	{ 0x00000EC0, 0x0000 },    /* R3776  - HPLPF1_1 */
+	{ 0x00000EC1, 0x0000 },    /* R3777  - HPLPF1_2 */
+	{ 0x00000EC4, 0x0000 },    /* R3780  - HPLPF2_1 */
+	{ 0x00000EC5, 0x0000 },    /* R3781  - HPLPF2_2 */
+	{ 0x00000EC8, 0x0000 },    /* R3784  - HPLPF3_1 */
+	{ 0x00000EC9, 0x0000 },    /* R3785  - HPLPF3_2 */
+	{ 0x00000ECC, 0x0000 },    /* R3788  - HPLPF4_1 */
+	{ 0x00000ECD, 0x0000 },    /* R3789  - HPLPF4_2 */
+	{ 0x00000EE0, 0x0000 },    /* R3808  - ASRC_ENABLE */
+	{ 0x00000EE2, 0x0000 },    /* R3810  - ASRC_RATE1 */
+	{ 0x00000EE3, 0x4000 },    /* R3811  - ASRC_RATE2 */
+	{ 0x00000EF0, 0x0000 },    /* R3824  - ISRC 1 CTRL 1 */
+	{ 0x00000EF1, 0x0001 },    /* R3825  - ISRC 1 CTRL 2 */
+	{ 0x00000EF2, 0x0000 },    /* R3826  - ISRC 1 CTRL 3 */
+	{ 0x00000EF3, 0x0000 },    /* R3827  - ISRC 2 CTRL 1 */
+	{ 0x00000EF4, 0x0001 },    /* R3828  - ISRC 2 CTRL 2 */
+	{ 0x00000EF5, 0x0000 },    /* R3829  - ISRC 2 CTRL 3 */
+	{ 0x00001700, 0x0000 },    /* R5888  - FRF_COEFF_1 */
+	{ 0x00001701, 0x0000 },    /* R5889  - FRF_COEFF_2 */
+	{ 0x00001702, 0x0000 },    /* R5890  - FRF_COEFF_3 */
+	{ 0x00001703, 0x0000 },    /* R5891  - FRF_COEFF_4 */
+	{ 0x00001704, 0x0000 },    /* R5892  - DAC_COMP_1 */
+	{ 0x00001705, 0x0000 },    /* R5893  - DAC_COMP_2 */
+};
+
+static bool wm8998_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ARIZONA_SOFTWARE_RESET:
+	case ARIZONA_DEVICE_REVISION:
+	case ARIZONA_CTRL_IF_SPI_CFG_1:
+	case ARIZONA_CTRL_IF_I2C1_CFG_1:
+	case ARIZONA_CTRL_IF_I2C1_CFG_2:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+	case ARIZONA_TONE_GENERATOR_1:
+	case ARIZONA_TONE_GENERATOR_2:
+	case ARIZONA_TONE_GENERATOR_3:
+	case ARIZONA_TONE_GENERATOR_4:
+	case ARIZONA_TONE_GENERATOR_5:
+	case ARIZONA_PWM_DRIVE_1:
+	case ARIZONA_PWM_DRIVE_2:
+	case ARIZONA_PWM_DRIVE_3:
+	case ARIZONA_WAKE_CONTROL:
+	case ARIZONA_SEQUENCE_CONTROL:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
+	case ARIZONA_HAPTICS_CONTROL_1:
+	case ARIZONA_HAPTICS_CONTROL_2:
+	case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_1_DURATION:
+	case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_2_DURATION:
+	case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_3_DURATION:
+	case ARIZONA_HAPTICS_STATUS:
+	case ARIZONA_CLOCK_32K_1:
+	case ARIZONA_SYSTEM_CLOCK_1:
+	case ARIZONA_SAMPLE_RATE_1:
+	case ARIZONA_SAMPLE_RATE_2:
+	case ARIZONA_SAMPLE_RATE_3:
+	case ARIZONA_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_SAMPLE_RATE_3_STATUS:
+	case ARIZONA_ASYNC_CLOCK_1:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_OUTPUT_SYSTEM_CLOCK:
+	case ARIZONA_OUTPUT_ASYNC_CLOCK:
+	case ARIZONA_RATE_ESTIMATOR_1:
+	case ARIZONA_RATE_ESTIMATOR_2:
+	case ARIZONA_RATE_ESTIMATOR_3:
+	case ARIZONA_RATE_ESTIMATOR_4:
+	case ARIZONA_RATE_ESTIMATOR_5:
+	case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
+	case ARIZONA_FLL1_CONTROL_1:
+	case ARIZONA_FLL1_CONTROL_2:
+	case ARIZONA_FLL1_CONTROL_3:
+	case ARIZONA_FLL1_CONTROL_4:
+	case ARIZONA_FLL1_CONTROL_5:
+	case ARIZONA_FLL1_CONTROL_6:
+	case ARIZONA_FLL1_CONTROL_7:
+	case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+	case ARIZONA_FLL1_NCO_TEST_0:
+	case ARIZONA_FLL1_SYNCHRONISER_1:
+	case ARIZONA_FLL1_SYNCHRONISER_2:
+	case ARIZONA_FLL1_SYNCHRONISER_3:
+	case ARIZONA_FLL1_SYNCHRONISER_4:
+	case ARIZONA_FLL1_SYNCHRONISER_5:
+	case ARIZONA_FLL1_SYNCHRONISER_6:
+	case ARIZONA_FLL1_SYNCHRONISER_7:
+	case ARIZONA_FLL1_SPREAD_SPECTRUM:
+	case ARIZONA_FLL1_GPIO_CLOCK:
+	case ARIZONA_FLL2_CONTROL_1:
+	case ARIZONA_FLL2_CONTROL_2:
+	case ARIZONA_FLL2_CONTROL_3:
+	case ARIZONA_FLL2_CONTROL_4:
+	case ARIZONA_FLL2_CONTROL_5:
+	case ARIZONA_FLL2_CONTROL_6:
+	case ARIZONA_FLL2_CONTROL_7:
+	case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+	case ARIZONA_FLL2_NCO_TEST_0:
+	case ARIZONA_FLL2_SYNCHRONISER_1:
+	case ARIZONA_FLL2_SYNCHRONISER_2:
+	case ARIZONA_FLL2_SYNCHRONISER_3:
+	case ARIZONA_FLL2_SYNCHRONISER_4:
+	case ARIZONA_FLL2_SYNCHRONISER_5:
+	case ARIZONA_FLL2_SYNCHRONISER_6:
+	case ARIZONA_FLL2_SYNCHRONISER_7:
+	case ARIZONA_FLL2_SPREAD_SPECTRUM:
+	case ARIZONA_FLL2_GPIO_CLOCK:
+	case ARIZONA_MIC_CHARGE_PUMP_1:
+	case ARIZONA_LDO1_CONTROL_1:
+	case ARIZONA_LDO1_CONTROL_2:
+	case ARIZONA_LDO2_CONTROL_1:
+	case ARIZONA_MIC_BIAS_CTRL_1:
+	case ARIZONA_MIC_BIAS_CTRL_2:
+	case ARIZONA_MIC_BIAS_CTRL_3:
+	case ARIZONA_ACCESSORY_DETECT_MODE_1:
+	case ARIZONA_HEADPHONE_DETECT_1:
+	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_MICD_CLAMP_CONTROL:
+	case ARIZONA_MIC_DETECT_1:
+	case ARIZONA_MIC_DETECT_2:
+	case ARIZONA_MIC_DETECT_3:
+	case ARIZONA_MIC_DETECT_4:
+	case ARIZONA_MIC_DETECT_LEVEL_1:
+	case ARIZONA_MIC_DETECT_LEVEL_2:
+	case ARIZONA_MIC_DETECT_LEVEL_3:
+	case ARIZONA_MIC_DETECT_LEVEL_4:
+	case ARIZONA_ISOLATION_CONTROL:
+	case ARIZONA_JACK_DETECT_ANALOGUE:
+	case ARIZONA_INPUT_ENABLES:
+	case ARIZONA_INPUT_ENABLES_STATUS:
+	case ARIZONA_INPUT_RATE:
+	case ARIZONA_INPUT_VOLUME_RAMP:
+	case ARIZONA_HPF_CONTROL:
+	case ARIZONA_IN1L_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_1L:
+	case ARIZONA_DMIC1L_CONTROL:
+	case ARIZONA_IN1R_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_1R:
+	case ARIZONA_DMIC1R_CONTROL:
+	case ARIZONA_IN2L_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+	case ARIZONA_DMIC2L_CONTROL:
+	case ARIZONA_OUTPUT_ENABLES_1:
+	case ARIZONA_OUTPUT_STATUS_1:
+	case ARIZONA_RAW_OUTPUT_STATUS_1:
+	case ARIZONA_OUTPUT_RATE_1:
+	case ARIZONA_OUTPUT_VOLUME_RAMP:
+	case ARIZONA_OUTPUT_PATH_CONFIG_1L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_1L:
+	case ARIZONA_NOISE_GATE_SELECT_1L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_1R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_1R:
+	case ARIZONA_NOISE_GATE_SELECT_1R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_2L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_2L:
+	case ARIZONA_NOISE_GATE_SELECT_2L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_2R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_2R:
+	case ARIZONA_NOISE_GATE_SELECT_2R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_3L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_3L:
+	case ARIZONA_NOISE_GATE_SELECT_3L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_4L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_4L:
+	case ARIZONA_NOISE_GATE_SELECT_4L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_4R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_4R:
+	case ARIZONA_NOISE_GATE_SELECT_4R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_5L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_5L:
+	case ARIZONA_NOISE_GATE_SELECT_5L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_5R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_5R:
+	case ARIZONA_NOISE_GATE_SELECT_5R:
+	case ARIZONA_DRE_ENABLE:
+	case ARIZONA_DRE_CONTROL_1:
+	case ARIZONA_DRE_CONTROL_2:
+	case ARIZONA_DRE_CONTROL_3:
+	case ARIZONA_EDRE_ENABLE:
+	case ARIZONA_DAC_AEC_CONTROL_1:
+	case ARIZONA_DAC_AEC_CONTROL_2:
+	case ARIZONA_NOISE_GATE_CONTROL:
+	case ARIZONA_PDM_SPK1_CTRL_1:
+	case ARIZONA_PDM_SPK1_CTRL_2:
+	case ARIZONA_HP_TEST_CTRL_13:
+	case ARIZONA_AIF1_BCLK_CTRL:
+	case ARIZONA_AIF1_TX_PIN_CTRL:
+	case ARIZONA_AIF1_RX_PIN_CTRL:
+	case ARIZONA_AIF1_RATE_CTRL:
+	case ARIZONA_AIF1_FORMAT:
+	case ARIZONA_AIF1_RX_BCLK_RATE:
+	case ARIZONA_AIF1_FRAME_CTRL_1:
+	case ARIZONA_AIF1_FRAME_CTRL_2:
+	case ARIZONA_AIF1_FRAME_CTRL_3:
+	case ARIZONA_AIF1_FRAME_CTRL_4:
+	case ARIZONA_AIF1_FRAME_CTRL_5:
+	case ARIZONA_AIF1_FRAME_CTRL_6:
+	case ARIZONA_AIF1_FRAME_CTRL_7:
+	case ARIZONA_AIF1_FRAME_CTRL_8:
+	case ARIZONA_AIF1_FRAME_CTRL_11:
+	case ARIZONA_AIF1_FRAME_CTRL_12:
+	case ARIZONA_AIF1_FRAME_CTRL_13:
+	case ARIZONA_AIF1_FRAME_CTRL_14:
+	case ARIZONA_AIF1_FRAME_CTRL_15:
+	case ARIZONA_AIF1_FRAME_CTRL_16:
+	case ARIZONA_AIF1_TX_ENABLES:
+	case ARIZONA_AIF1_RX_ENABLES:
+	case ARIZONA_AIF2_BCLK_CTRL:
+	case ARIZONA_AIF2_TX_PIN_CTRL:
+	case ARIZONA_AIF2_RX_PIN_CTRL:
+	case ARIZONA_AIF2_RATE_CTRL:
+	case ARIZONA_AIF2_FORMAT:
+	case ARIZONA_AIF2_RX_BCLK_RATE:
+	case ARIZONA_AIF2_FRAME_CTRL_1:
+	case ARIZONA_AIF2_FRAME_CTRL_2:
+	case ARIZONA_AIF2_FRAME_CTRL_3:
+	case ARIZONA_AIF2_FRAME_CTRL_4:
+	case ARIZONA_AIF2_FRAME_CTRL_5:
+	case ARIZONA_AIF2_FRAME_CTRL_6:
+	case ARIZONA_AIF2_FRAME_CTRL_7:
+	case ARIZONA_AIF2_FRAME_CTRL_8:
+	case ARIZONA_AIF2_FRAME_CTRL_11:
+	case ARIZONA_AIF2_FRAME_CTRL_12:
+	case ARIZONA_AIF2_FRAME_CTRL_13:
+	case ARIZONA_AIF2_FRAME_CTRL_14:
+	case ARIZONA_AIF2_FRAME_CTRL_15:
+	case ARIZONA_AIF2_FRAME_CTRL_16:
+	case ARIZONA_AIF2_TX_ENABLES:
+	case ARIZONA_AIF2_RX_ENABLES:
+	case ARIZONA_AIF3_BCLK_CTRL:
+	case ARIZONA_AIF3_TX_PIN_CTRL:
+	case ARIZONA_AIF3_RX_PIN_CTRL:
+	case ARIZONA_AIF3_RATE_CTRL:
+	case ARIZONA_AIF3_FORMAT:
+	case ARIZONA_AIF3_RX_BCLK_RATE:
+	case ARIZONA_AIF3_FRAME_CTRL_1:
+	case ARIZONA_AIF3_FRAME_CTRL_2:
+	case ARIZONA_AIF3_FRAME_CTRL_3:
+	case ARIZONA_AIF3_FRAME_CTRL_4:
+	case ARIZONA_AIF3_FRAME_CTRL_11:
+	case ARIZONA_AIF3_FRAME_CTRL_12:
+	case ARIZONA_AIF3_TX_ENABLES:
+	case ARIZONA_AIF3_RX_ENABLES:
+	case ARIZONA_SPD1_TX_CONTROL:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_1:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_2:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_3:
+	case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
+	case ARIZONA_SLIMBUS_RATES_1:
+	case ARIZONA_SLIMBUS_RATES_2:
+	case ARIZONA_SLIMBUS_RATES_5:
+	case ARIZONA_SLIMBUS_RATES_6:
+	case ARIZONA_SLIMBUS_RATES_7:
+	case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
+	case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
+	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+	case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
+	case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
+	case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
+	case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
+	case ARIZONA_GPIO1_CTRL:
+	case ARIZONA_GPIO2_CTRL:
+	case ARIZONA_GPIO3_CTRL:
+	case ARIZONA_GPIO4_CTRL:
+	case ARIZONA_GPIO5_CTRL:
+	case ARIZONA_IRQ_CTRL_1:
+	case ARIZONA_GPIO_DEBOUNCE_CONFIG:
+	case ARIZONA_GP_SWITCH_1:
+	case ARIZONA_MISC_PAD_CTRL_1:
+	case ARIZONA_MISC_PAD_CTRL_2:
+	case ARIZONA_MISC_PAD_CTRL_3:
+	case ARIZONA_MISC_PAD_CTRL_4:
+	case ARIZONA_MISC_PAD_CTRL_5:
+	case ARIZONA_MISC_PAD_CTRL_6:
+	case ARIZONA_INTERRUPT_STATUS_1:
+	case ARIZONA_INTERRUPT_STATUS_2:
+	case ARIZONA_INTERRUPT_STATUS_3:
+	case ARIZONA_INTERRUPT_STATUS_4:
+	case ARIZONA_INTERRUPT_STATUS_5:
+	case ARIZONA_INTERRUPT_STATUS_1_MASK:
+	case ARIZONA_INTERRUPT_STATUS_2_MASK:
+	case ARIZONA_INTERRUPT_STATUS_3_MASK:
+	case ARIZONA_INTERRUPT_STATUS_4_MASK:
+	case ARIZONA_INTERRUPT_STATUS_5_MASK:
+	case ARIZONA_INTERRUPT_CONTROL:
+	case ARIZONA_IRQ2_STATUS_1:
+	case ARIZONA_IRQ2_STATUS_2:
+	case ARIZONA_IRQ2_STATUS_3:
+	case ARIZONA_IRQ2_STATUS_4:
+	case ARIZONA_IRQ2_STATUS_5:
+	case ARIZONA_IRQ2_STATUS_1_MASK:
+	case ARIZONA_IRQ2_STATUS_2_MASK:
+	case ARIZONA_IRQ2_STATUS_3_MASK:
+	case ARIZONA_IRQ2_STATUS_4_MASK:
+	case ARIZONA_IRQ2_STATUS_5_MASK:
+	case ARIZONA_IRQ2_CONTROL:
+	case ARIZONA_INTERRUPT_RAW_STATUS_2:
+	case ARIZONA_INTERRUPT_RAW_STATUS_3:
+	case ARIZONA_INTERRUPT_RAW_STATUS_4:
+	case ARIZONA_INTERRUPT_RAW_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_6:
+	case ARIZONA_INTERRUPT_RAW_STATUS_7:
+	case ARIZONA_INTERRUPT_RAW_STATUS_8:
+	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
+	case ARIZONA_AOD_IRQ1:
+	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_MASK_IRQ1:
+	case ARIZONA_AOD_IRQ_MASK_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
+	case ARIZONA_JACK_DETECT_DEBOUNCE:
+	case ARIZONA_FX_CTRL1:
+	case ARIZONA_FX_CTRL2:
+	case ARIZONA_EQ1_1:
+	case ARIZONA_EQ1_2:
+	case ARIZONA_EQ1_3:
+	case ARIZONA_EQ1_4:
+	case ARIZONA_EQ1_5:
+	case ARIZONA_EQ1_6:
+	case ARIZONA_EQ1_7:
+	case ARIZONA_EQ1_8:
+	case ARIZONA_EQ1_9:
+	case ARIZONA_EQ1_10:
+	case ARIZONA_EQ1_11:
+	case ARIZONA_EQ1_12:
+	case ARIZONA_EQ1_13:
+	case ARIZONA_EQ1_14:
+	case ARIZONA_EQ1_15:
+	case ARIZONA_EQ1_16:
+	case ARIZONA_EQ1_17:
+	case ARIZONA_EQ1_18:
+	case ARIZONA_EQ1_19:
+	case ARIZONA_EQ1_20:
+	case ARIZONA_EQ1_21:
+	case ARIZONA_EQ2_1:
+	case ARIZONA_EQ2_2:
+	case ARIZONA_EQ2_3:
+	case ARIZONA_EQ2_4:
+	case ARIZONA_EQ2_5:
+	case ARIZONA_EQ2_6:
+	case ARIZONA_EQ2_7:
+	case ARIZONA_EQ2_8:
+	case ARIZONA_EQ2_9:
+	case ARIZONA_EQ2_10:
+	case ARIZONA_EQ2_11:
+	case ARIZONA_EQ2_12:
+	case ARIZONA_EQ2_13:
+	case ARIZONA_EQ2_14:
+	case ARIZONA_EQ2_15:
+	case ARIZONA_EQ2_16:
+	case ARIZONA_EQ2_17:
+	case ARIZONA_EQ2_18:
+	case ARIZONA_EQ2_19:
+	case ARIZONA_EQ2_20:
+	case ARIZONA_EQ2_21:
+	case ARIZONA_EQ3_1:
+	case ARIZONA_EQ3_2:
+	case ARIZONA_EQ3_3:
+	case ARIZONA_EQ3_4:
+	case ARIZONA_EQ3_5:
+	case ARIZONA_EQ3_6:
+	case ARIZONA_EQ3_7:
+	case ARIZONA_EQ3_8:
+	case ARIZONA_EQ3_9:
+	case ARIZONA_EQ3_10:
+	case ARIZONA_EQ3_11:
+	case ARIZONA_EQ3_12:
+	case ARIZONA_EQ3_13:
+	case ARIZONA_EQ3_14:
+	case ARIZONA_EQ3_15:
+	case ARIZONA_EQ3_16:
+	case ARIZONA_EQ3_17:
+	case ARIZONA_EQ3_18:
+	case ARIZONA_EQ3_19:
+	case ARIZONA_EQ3_20:
+	case ARIZONA_EQ3_21:
+	case ARIZONA_EQ4_1:
+	case ARIZONA_EQ4_2:
+	case ARIZONA_EQ4_3:
+	case ARIZONA_EQ4_4:
+	case ARIZONA_EQ4_5:
+	case ARIZONA_EQ4_6:
+	case ARIZONA_EQ4_7:
+	case ARIZONA_EQ4_8:
+	case ARIZONA_EQ4_9:
+	case ARIZONA_EQ4_10:
+	case ARIZONA_EQ4_11:
+	case ARIZONA_EQ4_12:
+	case ARIZONA_EQ4_13:
+	case ARIZONA_EQ4_14:
+	case ARIZONA_EQ4_15:
+	case ARIZONA_EQ4_16:
+	case ARIZONA_EQ4_17:
+	case ARIZONA_EQ4_18:
+	case ARIZONA_EQ4_19:
+	case ARIZONA_EQ4_20:
+	case ARIZONA_EQ4_21:
+	case ARIZONA_DRC1_CTRL1:
+	case ARIZONA_DRC1_CTRL2:
+	case ARIZONA_DRC1_CTRL3:
+	case ARIZONA_DRC1_CTRL4:
+	case ARIZONA_DRC1_CTRL5:
+	case ARIZONA_HPLPF1_1:
+	case ARIZONA_HPLPF1_2:
+	case ARIZONA_HPLPF2_1:
+	case ARIZONA_HPLPF2_2:
+	case ARIZONA_HPLPF3_1:
+	case ARIZONA_HPLPF3_2:
+	case ARIZONA_HPLPF4_1:
+	case ARIZONA_HPLPF4_2:
+	case ARIZONA_ASRC_ENABLE:
+	case ARIZONA_ASRC_STATUS:
+	case ARIZONA_ASRC_RATE1:
+	case ARIZONA_ASRC_RATE2:
+	case ARIZONA_ISRC_1_CTRL_1:
+	case ARIZONA_ISRC_1_CTRL_2:
+	case ARIZONA_ISRC_1_CTRL_3:
+	case ARIZONA_ISRC_2_CTRL_1:
+	case ARIZONA_ISRC_2_CTRL_2:
+	case ARIZONA_ISRC_2_CTRL_3:
+	case ARIZONA_FRF_COEFF_1:
+	case ARIZONA_FRF_COEFF_2:
+	case ARIZONA_FRF_COEFF_3:
+	case ARIZONA_FRF_COEFF_4:
+	case ARIZONA_V2_DAC_COMP_1:
+	case ARIZONA_V2_DAC_COMP_2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool wm8998_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ARIZONA_SOFTWARE_RESET:
+	case ARIZONA_DEVICE_REVISION:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+	case ARIZONA_HAPTICS_STATUS:
+	case ARIZONA_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_SAMPLE_RATE_3_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_MIC_DETECT_3:
+	case ARIZONA_MIC_DETECT_4:
+	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_INPUT_ENABLES_STATUS:
+	case ARIZONA_OUTPUT_STATUS_1:
+	case ARIZONA_RAW_OUTPUT_STATUS_1:
+	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+	case ARIZONA_INTERRUPT_STATUS_1:
+	case ARIZONA_INTERRUPT_STATUS_2:
+	case ARIZONA_INTERRUPT_STATUS_3:
+	case ARIZONA_INTERRUPT_STATUS_4:
+	case ARIZONA_INTERRUPT_STATUS_5:
+	case ARIZONA_IRQ2_STATUS_1:
+	case ARIZONA_IRQ2_STATUS_2:
+	case ARIZONA_IRQ2_STATUS_3:
+	case ARIZONA_IRQ2_STATUS_4:
+	case ARIZONA_IRQ2_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_2:
+	case ARIZONA_INTERRUPT_RAW_STATUS_3:
+	case ARIZONA_INTERRUPT_RAW_STATUS_4:
+	case ARIZONA_INTERRUPT_RAW_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_6:
+	case ARIZONA_INTERRUPT_RAW_STATUS_7:
+	case ARIZONA_INTERRUPT_RAW_STATUS_8:
+	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
+	case ARIZONA_AOD_IRQ1:
+	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
+	case ARIZONA_FX_CTRL2:
+	case ARIZONA_ASRC_STATUS:
+		return true;
+	default:
+		return false;
+	}
+}
+
+#define WM8998_MAX_REGISTER 0x31ff
+
+const struct regmap_config wm8998_i2c_regmap = {
+	.reg_bits = 32,
+	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
+
+	.max_register = WM8998_MAX_REGISTER,
+	.readable_reg = wm8998_readable_register,
+	.volatile_reg = wm8998_volatile_register,
+
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wm8998_reg_default,
+	.num_reg_defaults = ARRAY_SIZE(wm8998_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm8998_i2c_regmap);
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index b6db9eb..8756d06 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -11,11 +11,16 @@
 	bool
 	default n
 
+config CXL_EEH
+	bool
+	default n
+
 config CXL
 	tristate "Support for IBM Coherent Accelerators (CXL)"
-	depends on PPC_POWERNV && PCI_MSI
+	depends on PPC_POWERNV && PCI_MSI && EEH
 	select CXL_BASE
 	select CXL_KERNEL_API
+	select CXL_EEH
 	default m
 	help
 	  Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 14e3f82..6f484df 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Werror
+
 cxl-y				+= main.o file.o irq.o fault.o native.o
 cxl-y				+= context.o sysfs.o debugfs.o pci.o trace.o
 cxl-y				+= vphb.o api.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 729e085..8af12c8 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -12,11 +12,13 @@
 #include <linux/anon_inodes.h>
 #include <linux/file.h>
 #include <misc/cxl.h>
+#include <linux/fs.h>
 
 #include "cxl.h"
 
 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 {
+	struct address_space *mapping;
 	struct cxl_afu *afu;
 	struct cxl_context  *ctx;
 	int rc;
@@ -25,19 +27,42 @@
 
 	get_device(&afu->dev);
 	ctx = cxl_context_alloc();
-	if (IS_ERR(ctx))
-		return ctx;
+	if (IS_ERR(ctx)) {
+		rc = PTR_ERR(ctx);
+		goto err_dev;
+	}
+
+	ctx->kernelapi = true;
+
+	/*
+	 * Make our own address space since we won't have one from the
+	 * filesystem like the user api has, and even if we do associate a file
+	 * with this context we don't want to use the global anonymous inode's
+	 * address space as that can invalidate unrelated users:
+	 */
+	mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
+	if (!mapping) {
+		rc = -ENOMEM;
+		goto err_ctx;
+	}
+	address_space_init_once(mapping);
 
 	/* Make it a slave context.  We can promote it later? */
-	rc = cxl_context_init(ctx, afu, false, NULL);
-	if (rc) {
-		kfree(ctx);
-		put_device(&afu->dev);
-		return ERR_PTR(-ENOMEM);
-	}
+	rc = cxl_context_init(ctx, afu, false, mapping);
+	if (rc)
+		goto err_mapping;
+
 	cxl_assign_psn_space(ctx);
 
 	return ctx;
+
+err_mapping:
+	kfree(mapping);
+err_ctx:
+	kfree(ctx);
+err_dev:
+	put_device(&afu->dev);
+	return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
 
@@ -59,7 +84,7 @@
 
 int cxl_release_context(struct cxl_context *ctx)
 {
-	if (ctx->status != CLOSED)
+	if (ctx->status >= STARTED)
 		return -EBUSY;
 
 	put_device(&ctx->afu->dev);
@@ -255,9 +280,16 @@
 
 	file = anon_inode_getfile("cxl", fops, ctx, flags);
 	if (IS_ERR(file))
-		put_unused_fd(fdtmp);
+		goto err_fd;
+
+	file->f_mapping = ctx->mapping;
+
 	*fd = fdtmp;
 	return file;
+
+err_fd:
+	put_unused_fd(fdtmp);
+	return NULL;
 }
 EXPORT_SYMBOL_GPL(cxl_get_fd);
 
@@ -327,3 +359,10 @@
 	return cxl_afu_check_and_enable(afu);
 }
 EXPORT_SYMBOL_GPL(cxl_afu_reset);
+
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+				  bool perst_reloads_same_image)
+{
+	afu->adapter->perst_same_image = perst_reloads_same_image;
+}
+EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 1287148..e762f85 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -126,6 +126,18 @@
 	if (ctx->status != STARTED) {
 		mutex_unlock(&ctx->status_mutex);
 		pr_devel("%s: Context not started, failing problem state access\n", __func__);
+		if (ctx->mmio_err_ff) {
+			if (!ctx->ff_page) {
+				ctx->ff_page = alloc_page(GFP_USER);
+				if (!ctx->ff_page)
+					return VM_FAULT_OOM;
+				memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
+			}
+			get_page(ctx->ff_page);
+			vmf->page = ctx->ff_page;
+			vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+			return 0;
+		}
 		return VM_FAULT_SIGBUS;
 	}
 
@@ -193,7 +205,11 @@
 	if (status != STARTED)
 		return -EBUSY;
 
-	WARN_ON(cxl_detach_process(ctx));
+	/* Only warn if we detached while the link was OK.
+	 * If detach fails when hw is down, we don't care.
+	 */
+	WARN_ON(cxl_detach_process(ctx) &&
+		cxl_adapter_link_ok(ctx->afu->adapter));
 	flush_work(&ctx->fault_work); /* Only needed for dedicated process */
 	put_pid(ctx->pid);
 	cxl_ctx_put();
@@ -253,7 +269,11 @@
 	struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
 
 	free_page((u64)ctx->sstp);
+	if (ctx->ff_page)
+		__free_page(ctx->ff_page);
 	ctx->sstp = NULL;
+	if (ctx->kernelapi)
+		kfree(ctx->mapping);
 
 	kfree(ctx);
 }
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 4fd66ca..1c30ef7 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -34,7 +34,7 @@
  * Bump version each time a user API change is made, whether it is
  * backwards compatible ot not.
  */
-#define CXL_API_VERSION 1
+#define CXL_API_VERSION 2
 #define CXL_API_VERSION_COMPATIBLE 1
 
 /*
@@ -83,8 +83,10 @@
 /* 0x00C0:7EFF Implementation dependent area */
 static const cxl_p1_reg_t CXL_PSL_FIR1      = {0x0100};
 static const cxl_p1_reg_t CXL_PSL_FIR2      = {0x0108};
+static const cxl_p1_reg_t CXL_PSL_Timebase  = {0x0110};
 static const cxl_p1_reg_t CXL_PSL_VERSION   = {0x0118};
 static const cxl_p1_reg_t CXL_PSL_RESLCKTO  = {0x0128};
+static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140};
 static const cxl_p1_reg_t CXL_PSL_FIR_CNTL  = {0x0148};
 static const cxl_p1_reg_t CXL_PSL_DSNDCTL   = {0x0150};
 static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
@@ -152,6 +154,9 @@
 #define CXL_PSL_SPAP_Size_Shift 4
 #define CXL_PSL_SPAP_V    0x0000000000000001ULL
 
+/****** CXL_PSL_Control ****************************************************/
+#define CXL_PSL_Control_tb 0x0000000000000001ULL
+
 /****** CXL_PSL_DLCNTL *****************************************************/
 #define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
 #define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
@@ -418,6 +423,9 @@
 	/* Used to unmap any mmaps when force detaching */
 	struct address_space *mapping;
 	struct mutex mapping_lock;
+	struct page *ff_page;
+	bool mmio_err_ff;
+	bool kernelapi;
 
 	spinlock_t sste_lock; /* Protects segment table entries */
 	struct cxl_sste *sstp;
@@ -493,6 +501,7 @@
 	bool user_image_loaded;
 	bool perst_loads_image;
 	bool perst_select_user;
+	bool perst_same_image;
 };
 
 int cxl_alloc_one_irq(struct cxl *adapter);
@@ -531,16 +540,33 @@
 	__be32 software_state;
 } __packed;
 
+static inline bool cxl_adapter_link_ok(struct cxl *cxl)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(cxl->dev.parent);
+	return !pci_channel_offline(pdev);
+}
+
 static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
 {
 	WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
 	return cxl->p1_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p1_write(cxl, reg, val) \
-	out_be64(_cxl_p1_addr(cxl, reg), val)
-#define cxl_p1_read(cxl, reg) \
-	in_be64(_cxl_p1_addr(cxl, reg))
+static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(cxl)))
+		out_be64(_cxl_p1_addr(cxl, reg), val);
+}
+
+static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(cxl)))
+		return in_be64(_cxl_p1_addr(cxl, reg));
+	else
+		return ~0ULL;
+}
 
 static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
 {
@@ -548,26 +574,56 @@
 	return afu->p1n_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p1n_write(afu, reg, val) \
-	out_be64(_cxl_p1n_addr(afu, reg), val)
-#define cxl_p1n_read(afu, reg) \
-	in_be64(_cxl_p1n_addr(afu, reg))
+static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		out_be64(_cxl_p1n_addr(afu, reg), val);
+}
+
+static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_be64(_cxl_p1n_addr(afu, reg));
+	else
+		return ~0ULL;
+}
 
 static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
 {
 	return afu->p2n_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p2n_write(afu, reg, val) \
-	out_be64(_cxl_p2n_addr(afu, reg), val)
-#define cxl_p2n_read(afu, reg) \
-	in_be64(_cxl_p2n_addr(afu, reg))
+static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		out_be64(_cxl_p2n_addr(afu, reg), val);
+}
 
+static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_be64(_cxl_p2n_addr(afu, reg));
+	else
+		return ~0ULL;
+}
 
-#define cxl_afu_cr_read64(afu, cr, off) \
-	in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
-#define cxl_afu_cr_read32(afu, cr, off) \
-	in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
+static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset +
+			       ((cr) * (afu)->crs_len) + (off));
+	else
+		return ~0ULL;
+}
+
+static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset +
+			       ((cr) * (afu)->crs_len) + (off));
+	else
+		return 0xffffffff;
+}
 u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
 u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
 
@@ -585,6 +641,9 @@
 int cxl_alloc_adapter_nr(struct cxl *adapter);
 void cxl_remove_adapter_nr(struct cxl *adapter);
 
+int cxl_alloc_spa(struct cxl_afu *afu);
+void cxl_release_spa(struct cxl_afu *afu);
+
 int cxl_file_init(void);
 void cxl_file_exit(void);
 int cxl_register_adapter(struct cxl *adapter);
@@ -675,6 +734,7 @@
 
 void cxl_stop_trace(struct cxl *cxl);
 int cxl_pci_vphb_add(struct cxl_afu *afu);
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
 void cxl_pci_vphb_remove(struct cxl_afu *afu);
 
 extern struct pci_driver cxl_pci_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 825c412..18df6f4 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -48,7 +48,7 @@
 static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
 					    struct dentry *parent, u64 __iomem *value)
 {
-	return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64);
+	return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
 }
 
 int cxl_debugfs_adapter_add(struct cxl *adapter)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e3f4b69..a30bf28 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -73,6 +73,11 @@
 	if (!afu->current_mode)
 		goto err_put_afu;
 
+	if (!cxl_adapter_link_ok(adapter)) {
+		rc = -EIO;
+		goto err_put_afu;
+	}
+
 	if (!(ctx = cxl_context_alloc())) {
 		rc = -ENOMEM;
 		goto err_put_afu;
@@ -179,6 +184,8 @@
 	if (work.flags & CXL_START_WORK_AMR)
 		amr = work.amr & mfspr(SPRN_UAMOR);
 
+	ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
+
 	/*
 	 * We grab the PID here and not in the file open to allow for the case
 	 * where a process (master, some daemon, etc) has opened the chardev on
@@ -238,6 +245,9 @@
 	if (ctx->status == CLOSED)
 		return -EIO;
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	pr_devel("afu_ioctl\n");
 	switch (cmd) {
 	case CXL_IOCTL_START_WORK:
@@ -251,7 +261,7 @@
 	return -EINVAL;
 }
 
-long afu_compat_ioctl(struct file *file, unsigned int cmd,
+static long afu_compat_ioctl(struct file *file, unsigned int cmd,
 			     unsigned long arg)
 {
 	return afu_ioctl(file, cmd, arg);
@@ -265,6 +275,9 @@
 	if (ctx->status != STARTED)
 		return -EIO;
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	return cxl_context_iomap(ctx, vm);
 }
 
@@ -309,6 +322,9 @@
 	int rc;
 	DEFINE_WAIT(wait);
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	if (count < CXL_READ_MIN_SIZE)
 		return -EINVAL;
 
@@ -319,6 +335,11 @@
 		if (ctx_event_pending(ctx))
 			break;
 
+		if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+			rc = -EIO;
+			goto out;
+		}
+
 		if (file->f_flags & O_NONBLOCK) {
 			rc = -EAGAIN;
 			goto out;
@@ -396,7 +417,7 @@
 	.mmap           = afu_mmap,
 };
 
-const struct file_operations afu_master_fops = {
+static const struct file_operations afu_master_fops = {
 	.owner		= THIS_MODULE,
 	.open           = afu_master_open,
 	.poll		= afu_poll,
@@ -519,7 +540,7 @@
 	 * If these change we really need to update API.  Either change some
 	 * flags or update API version number CXL_API_VERSION.
 	 */
-	BUILD_BUG_ON(CXL_API_VERSION != 1);
+	BUILD_BUG_ON(CXL_API_VERSION != 2);
 	BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
 	BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
 	BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 680cd26..583b42a 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -30,12 +30,12 @@
 	serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
 	afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
 
-	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
-	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
-	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
-	dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
-	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
-	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
+	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+	dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
 
 	dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
 	cxl_stop_trace(ctx->afu->adapter);
@@ -54,10 +54,10 @@
 	fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
 	errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
 	afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
-	dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
-	dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
-	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
-	dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+	dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+	dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
+	dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
 
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
 
@@ -72,7 +72,7 @@
 	WARN(1, "CXL ERROR interrupt %i\n", irq);
 
 	err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
-	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
+	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
 
 	dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
 	cxl_stop_trace(adapter);
@@ -80,7 +80,7 @@
 	fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
 	fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
 
-	dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
+	dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
 
 	return IRQ_HANDLED;
 }
@@ -147,7 +147,7 @@
 	if (dsisr & CXL_PSL_DSISR_An_PE)
 		return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
 	if (dsisr & CXL_PSL_DSISR_An_AE) {
-		pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
+		pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
 
 		if (ctx->pending_afu_err) {
 			/*
@@ -158,7 +158,7 @@
 			 * probably best that we log them somewhere:
 			 */
 			dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
-					    "undelivered to pe %i: %.llx\n",
+					    "undelivered to pe %i: 0x%016llx\n",
 					    ctx->pe, irq_info->afu_err);
 		} else {
 			spin_lock(&ctx->lock);
@@ -211,8 +211,8 @@
 	}
 	rcu_read_unlock();
 
-	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
-		" %.16llx\n(Possible AFU HW issue - was a term/remove acked"
+	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
+		" %016llx\n(Possible AFU HW issue - was a term/remove acked"
 		" with outstanding transactions?)\n", ph, irq_info.dsisr,
 		irq_info.dar);
 	return fail_psl_irq(afu, &irq_info);
@@ -341,6 +341,9 @@
 
 void cxl_release_psl_err_irq(struct cxl *adapter)
 {
+	if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
+		return;
+
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_unmap_irq(adapter->err_virq, adapter);
 	cxl_release_one_irq(adapter, adapter->err_hwirq);
@@ -374,6 +377,9 @@
 
 void cxl_release_serr_irq(struct cxl_afu *afu)
 {
+	if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+		return;
+
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
@@ -400,12 +406,15 @@
 
 void cxl_release_psl_irq(struct cxl_afu *afu)
 {
+	if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
+		return;
+
 	cxl_unmap_irq(afu->psl_virq, afu);
 	cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
 	kfree(afu->psl_irq_name);
 }
 
-void afu_irq_name_free(struct cxl_context *ctx)
+static void afu_irq_name_free(struct cxl_context *ctx)
 {
 	struct cxl_irq_name *irq_name, *tmp;
 
@@ -421,6 +430,9 @@
 	int rc, r, i, j = 1;
 	struct cxl_irq_name *irq_name;
 
+	/* Initialize the list head to hold irq names */
+	INIT_LIST_HEAD(&ctx->irq_names);
+
 	if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
 		return rc;
 
@@ -432,13 +444,12 @@
 	ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
 				  sizeof(*ctx->irq_bitmap), GFP_KERNEL);
 	if (!ctx->irq_bitmap)
-		return -ENOMEM;
+		goto out;
 
 	/*
 	 * Allocate names first.  If any fail, bail out before allocating
 	 * actual hardware IRQs.
 	 */
-	INIT_LIST_HEAD(&ctx->irq_names);
 	for (r = 1; r < CXL_IRQ_RANGES; r++) {
 		for (i = 0; i < ctx->irqs.range[r]; i++) {
 			irq_name = kmalloc(sizeof(struct cxl_irq_name),
@@ -460,11 +471,12 @@
 	return 0;
 
 out:
+	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 	afu_irq_name_free(ctx);
 	return -ENOMEM;
 }
 
-void afu_register_hwirqs(struct cxl_context *ctx)
+static void afu_register_hwirqs(struct cxl_context *ctx)
 {
 	irq_hw_number_t hwirq;
 	struct cxl_irq_name *irq_name;
@@ -511,4 +523,8 @@
 
 	afu_irq_name_free(ctx);
 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+
+	kfree(ctx->irq_bitmap);
+	ctx->irq_bitmap = NULL;
+	ctx->irq_count = 0;
 }
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 4a164ab..9fde75e 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -222,6 +222,7 @@
 	cxl_debugfs_exit();
 	cxl_file_exit();
 	unregister_cxl_calls(&cxl_calls);
+	idr_destroy(&cxl_adapter_idr);
 }
 
 module_init(init_cxl);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 10567f2..b37f2e8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -41,7 +41,14 @@
 			rc = -EBUSY;
 			goto out;
 		}
-		pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
+
+		if (!cxl_adapter_link_ok(afu->adapter)) {
+			afu->enabled = enabled;
+			rc = -EIO;
+			goto out;
+		}
+
+		pr_devel_ratelimited("AFU control... (0x%016llx)\n",
 				     AFU_Cntl | command);
 		cpu_relax();
 		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
@@ -85,6 +92,10 @@
 
 int cxl_afu_check_and_enable(struct cxl_afu *afu)
 {
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		WARN(1, "Refusing to enable afu while link down!\n");
+		return -EIO;
+	}
 	if (afu->enabled)
 		return 0;
 	return afu_enable(afu);
@@ -103,6 +114,12 @@
 
 	pr_devel("PSL purge request\n");
 
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
+		rc = -EIO;
+		goto out;
+	}
+
 	if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
 		WARN(1, "psl_purge request while AFU not disabled!\n");
 		cxl_afu_disable(afu);
@@ -119,14 +136,19 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		if (!cxl_adapter_link_ok(afu->adapter)) {
+			rc = -EIO;
+			goto out;
+		}
+
 		dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
-		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx  PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
+		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
 		if (dsisr & CXL_PSL_DSISR_TRANS) {
 			dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
-			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
+			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
 		} else if (dsisr) {
-			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
+			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
 		} else {
 			cpu_relax();
@@ -161,10 +183,8 @@
 	return ((spa_size / 8) - 96) / 17;
 }
 
-static int alloc_spa(struct cxl_afu *afu)
+int cxl_alloc_spa(struct cxl_afu *afu)
 {
-	u64 spap;
-
 	/* Work out how many pages to allocate */
 	afu->spa_order = 0;
 	do {
@@ -183,6 +203,13 @@
 	pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
 		 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
 
+	return 0;
+}
+
+static void attach_spa(struct cxl_afu *afu)
+{
+	u64 spap;
+
 	afu->sw_command_status = (__be64 *)((char *)afu->spa +
 					    ((afu->spa_max_procs + 3) * 128));
 
@@ -191,14 +218,19 @@
 	spap |= CXL_PSL_SPAP_V;
 	pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
-
-	return 0;
 }
 
-static void release_spa(struct cxl_afu *afu)
+static inline void detach_spa(struct cxl_afu *afu)
 {
 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
-	free_pages((unsigned long) afu->spa, afu->spa_order);
+}
+
+void cxl_release_spa(struct cxl_afu *afu)
+{
+	if (afu->spa) {
+		free_pages((unsigned long) afu->spa, afu->spa_order);
+		afu->spa = NULL;
+	}
 }
 
 int cxl_tlb_slb_invalidate(struct cxl *adapter)
@@ -215,6 +247,8 @@
 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
 			return -EBUSY;
 		}
+		if (!cxl_adapter_link_ok(adapter))
+			return -EIO;
 		cpu_relax();
 	}
 
@@ -224,6 +258,8 @@
 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
 			return -EBUSY;
 		}
+		if (!cxl_adapter_link_ok(adapter))
+			return -EIO;
 		cpu_relax();
 	}
 	return 0;
@@ -240,6 +276,11 @@
 			dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
 			return -EBUSY;
 		}
+		/* If the adapter has gone down, we can assume that we
+		 * will PERST it and that will invalidate everything.
+		 */
+		if (!cxl_adapter_link_ok(afu->adapter))
+			return -EIO;
 		cpu_relax();
 	}
 	return 0;
@@ -279,6 +320,8 @@
 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
 
 	while (1) {
+		if (!cxl_adapter_link_ok(adapter))
+			break;
 		slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
 		if (!(slbia & CXL_TLB_SLB_P))
 			break;
@@ -308,6 +351,11 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+			dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
+			rc = -EIO;
+			goto out;
+		}
 		state = be64_to_cpup(ctx->afu->sw_command_status);
 		if (state == ~0ULL) {
 			pr_err("cxl: Error adding process element to AFU\n");
@@ -355,8 +403,13 @@
 
 	mutex_lock(&ctx->afu->spa_mutex);
 	pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
-	rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
-				    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
+	/* We could be asked to terminate when the hw is down. That
+	 * should always succeed: it's not running if the hw has gone
+	 * away and is being reset.
+	 */
+	if (cxl_adapter_link_ok(ctx->afu->adapter))
+		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
+					    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
 	ctx->elem->software_state = 0;	/* Remove Valid bit */
 	pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
 	mutex_unlock(&ctx->afu->spa_mutex);
@@ -369,7 +422,14 @@
 
 	mutex_lock(&ctx->afu->spa_mutex);
 	pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
-	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
+
+	/* We could be asked to remove when the hw is down. Again, if
+	 * the hw is down, the PE is gone, so we succeed.
+	 */
+	if (cxl_adapter_link_ok(ctx->afu->adapter))
+		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
+
+	if (!rc)
 		ctx->pe_inserted = false;
 	slb_invalid(ctx);
 	pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
@@ -397,8 +457,11 @@
 
 	dev_info(&afu->dev, "Activating AFU directed mode\n");
 
-	if (alloc_spa(afu))
-		return -ENOMEM;
+	if (afu->spa == NULL) {
+		if (cxl_alloc_spa(afu))
+			return -ENOMEM;
+	}
+	attach_spa(afu);
 
 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
 	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
@@ -492,9 +555,7 @@
 	if ((result = cxl_afu_check_and_enable(ctx->afu)))
 		return result;
 
-	add_process_element(ctx);
-
-	return 0;
+	return add_process_element(ctx);
 }
 
 static int deactivate_afu_directed(struct cxl_afu *afu)
@@ -511,8 +572,6 @@
 	cxl_afu_disable(afu);
 	cxl_psl_purge(afu);
 
-	release_spa(afu);
-
 	return 0;
 }
 
@@ -614,6 +673,11 @@
 	if (!(mode & afu->modes_supported))
 		return -EINVAL;
 
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		WARN(1, "Device link is down, refusing to activate!\n");
+		return -EIO;
+	}
+
 	if (mode == CXL_MODE_DIRECTED)
 		return activate_afu_directed(afu);
 	if (mode == CXL_MODE_DEDICATED)
@@ -624,6 +688,11 @@
 
 int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
 {
+	if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+		WARN(1, "Device link is down, refusing to attach process!\n");
+		return -EIO;
+	}
+
 	ctx->kernel = kernel;
 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
 		return attach_afu_directed(ctx, wed, amr);
@@ -668,6 +737,12 @@
 {
 	u64 pidtid;
 
+	/* If the adapter has gone away, we can't get any meaningful
+	 * information.
+	 */
+	if (!cxl_adapter_link_ok(afu->adapter))
+		return -EIO;
+
 	info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
 	info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
 	info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
@@ -684,7 +759,7 @@
 {
 	u64 dsisr;
 
-	pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
+	pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
 
 	/* Clear PSL_DSISR[PE] */
 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 32ad097..02c8516 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -24,6 +24,7 @@
 #include <asm/io.h>
 
 #include "cxl.h"
+#include <misc/cxl.h>
 
 
 #define CXL_PCI_VSEC_ID	0x1280
@@ -133,7 +134,7 @@
 	return (val >> ((off & 0x3) * 8)) & 0xff;
 }
 
-static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
+static const struct pci_device_id cxl_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
@@ -369,6 +370,55 @@
 	return 0;
 }
 
+#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
+#define _2048_250MHZ_CYCLES 1
+
+static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
+{
+	u64 psl_tb;
+	int delta;
+	unsigned int retry = 0;
+	struct device_node *np;
+
+	if (!(np = pnv_pci_get_phb_node(dev)))
+		return -ENODEV;
+
+	/* Do not fail when CAPP timebase sync is not supported by OPAL */
+	of_node_get(np);
+	if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
+		of_node_put(np);
+		pr_err("PSL: Timebase sync: OPAL support missing\n");
+		return 0;
+	}
+	of_node_put(np);
+
+	/*
+	 * Setup PSL Timebase Control and Status register
+	 * with the recommended Timebase Sync Count value
+	 */
+	cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
+		     TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
+
+	/* Enable PSL Timebase */
+	cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
+	cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
+
+	/* Wait until CORE TB and PSL TB difference <= 16usecs */
+	do {
+		msleep(1);
+		if (retry++ > 5) {
+			pr_err("PSL: Timebase sync: giving up!\n");
+			return -EIO;
+		}
+		psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
+		delta = mftb() - psl_tb;
+		if (delta < 0)
+			delta = -delta;
+	} while (cputime_to_usecs(delta) > 16);
+
+	return 0;
+}
+
 static int init_implementation_afu_regs(struct cxl_afu *afu)
 {
 	/* read/write masks for this slice */
@@ -539,10 +589,18 @@
 
 static void cxl_unmap_slice_regs(struct cxl_afu *afu)
 {
-	if (afu->p2n_mmio)
+	if (afu->p2n_mmio) {
 		iounmap(afu->p2n_mmio);
-	if (afu->p1n_mmio)
+		afu->p2n_mmio = NULL;
+	}
+	if (afu->p1n_mmio) {
 		iounmap(afu->p1n_mmio);
+		afu->p1n_mmio = NULL;
+	}
+	if (afu->afu_desc_mmio) {
+		iounmap(afu->afu_desc_mmio);
+		afu->afu_desc_mmio = NULL;
+	}
 }
 
 static void cxl_release_afu(struct device *dev)
@@ -551,6 +609,9 @@
 
 	pr_devel("cxl_release_afu\n");
 
+	idr_destroy(&afu->contexts_idr);
+	cxl_release_spa(afu);
+
 	kfree(afu);
 }
 
@@ -656,7 +717,7 @@
 	 */
 	reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
 	if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
-		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
 		if (__cxl_afu_reset(afu))
 			return -EIO;
 		if (cxl_afu_disable(afu))
@@ -677,7 +738,7 @@
 	cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
 	reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
 	if (reg) {
-		dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
 		if (reg & CXL_PSL_DSISR_TRANS)
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
 		else
@@ -686,12 +747,12 @@
 	reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
 	if (reg) {
 		if (reg & ~0xffff)
-			dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
+			dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
 		cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
 	}
 	reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
 	if (reg) {
-		dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
 		cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
 	}
 
@@ -742,45 +803,70 @@
 	return count;
 }
 
-static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
 {
-	struct cxl_afu *afu;
-	bool free = true;
 	int rc;
 
-	if (!(afu = cxl_alloc_afu(adapter, slice)))
-		return -ENOMEM;
-
-	if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
-		goto err1;
-
 	if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
-		goto err1;
+		return rc;
 
 	if ((rc = sanitise_afu_regs(afu)))
-		goto err2;
+		goto err1;
 
 	/* We need to reset the AFU before we can read the AFU descriptor */
 	if ((rc = __cxl_afu_reset(afu)))
-		goto err2;
+		goto err1;
 
 	if (cxl_verbose)
 		dump_afu_descriptor(afu);
 
 	if ((rc = cxl_read_afu_descriptor(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_afu_descriptor_looks_ok(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = init_implementation_afu_regs(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_register_serr_irq(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_register_psl_irq(afu)))
-		goto err3;
+		goto err2;
+
+	return 0;
+
+err2:
+	cxl_release_serr_irq(afu);
+err1:
+	cxl_unmap_slice_regs(afu);
+	return rc;
+}
+
+static void cxl_deconfigure_afu(struct cxl_afu *afu)
+{
+	cxl_release_psl_irq(afu);
+	cxl_release_serr_irq(afu);
+	cxl_unmap_slice_regs(afu);
+}
+
+static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+{
+	struct cxl_afu *afu;
+	int rc;
+
+	afu = cxl_alloc_afu(adapter, slice);
+	if (!afu)
+		return -ENOMEM;
+
+	rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
+	if (rc)
+		goto err_free;
+
+	rc = cxl_configure_afu(afu, adapter, dev);
+	if (rc)
+		goto err_free;
 
 	/* Don't care if this fails */
 	cxl_debugfs_afu_add(afu);
@@ -795,10 +881,6 @@
 	if ((rc = cxl_sysfs_afu_add(afu)))
 		goto err_put1;
 
-
-	if ((rc = cxl_afu_select_best_mode(afu)))
-		goto err_put2;
-
 	adapter->afu[afu->slice] = afu;
 
 	if ((rc = cxl_pci_vphb_add(afu)))
@@ -806,21 +888,16 @@
 
 	return 0;
 
-err_put2:
-	cxl_sysfs_afu_remove(afu);
 err_put1:
-	device_unregister(&afu->dev);
-	free = false;
+	cxl_deconfigure_afu(afu);
 	cxl_debugfs_afu_remove(afu);
-	cxl_release_psl_irq(afu);
-err3:
-	cxl_release_serr_irq(afu);
-err2:
-	cxl_unmap_slice_regs(afu);
-err1:
-	if (free)
-		kfree(afu);
+	device_unregister(&afu->dev);
 	return rc;
+
+err_free:
+	kfree(afu);
+	return rc;
+
 }
 
 static void cxl_remove_afu(struct cxl_afu *afu)
@@ -840,10 +917,7 @@
 	cxl_context_detach_all(afu);
 	cxl_afu_deactivate_mode(afu);
 
-	cxl_release_psl_irq(afu);
-	cxl_release_serr_irq(afu);
-	cxl_unmap_slice_regs(afu);
-
+	cxl_deconfigure_afu(afu);
 	device_unregister(&afu->dev);
 }
 
@@ -851,16 +925,15 @@
 {
 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
 	int rc;
-	int i;
-	u32 val;
+
+	if (adapter->perst_same_image) {
+		dev_warn(&dev->dev,
+			 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
+		return -EINVAL;
+	}
 
 	dev_info(&dev->dev, "CXL reset\n");
 
-	for (i = 0; i < adapter->slices; i++) {
-		cxl_pci_vphb_remove(adapter->afu[i]);
-		cxl_remove_afu(adapter->afu[i]);
-	}
-
 	/* pcie_warm_reset requests a fundamental pci reset which includes a
 	 * PERST assert/deassert.  PERST triggers a loading of the image
 	 * if "user" or "factory" is selected in sysfs */
@@ -869,20 +942,6 @@
 		return rc;
 	}
 
-	/* the PERST done above fences the PHB.  So, reset depends on EEH
-	 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
-	 * the driver.  Do an mmio read explictly to ensure EEH notices the
-	 * fenced PHB.  Retry for a few seconds before giving up. */
-	i = 0;
-	while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
-		(i < 5)) {
-		msleep(500);
-		i++;
-	}
-
-	if (val != 0xffffffff)
-		dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
-
 	return rc;
 }
 
@@ -893,7 +952,7 @@
 	if (pci_request_region(dev, 0, "priv 1 regs"))
 		goto err2;
 
-	pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
+	pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
 			p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
 
 	if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
@@ -917,10 +976,16 @@
 
 static void cxl_unmap_adapter_regs(struct cxl *adapter)
 {
-	if (adapter->p1_mmio)
+	if (adapter->p1_mmio) {
 		iounmap(adapter->p1_mmio);
-	if (adapter->p2_mmio)
+		adapter->p1_mmio = NULL;
+		pci_release_region(to_pci_dev(adapter->dev.parent), 2);
+	}
+	if (adapter->p2_mmio) {
 		iounmap(adapter->p2_mmio);
+		adapter->p2_mmio = NULL;
+		pci_release_region(to_pci_dev(adapter->dev.parent), 0);
+	}
 }
 
 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
@@ -949,7 +1014,6 @@
 	CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
 	CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
 	adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
-	adapter->perst_loads_image = true;
 	adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
 
 	CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
@@ -1009,82 +1073,139 @@
 
 	pr_devel("cxl_release_adapter\n");
 
+	cxl_remove_adapter_nr(adapter);
+
 	kfree(adapter);
 }
 
-static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
+static struct cxl *cxl_alloc_adapter(void)
 {
 	struct cxl *adapter;
 
 	if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
 		return NULL;
 
-	adapter->dev.parent = &dev->dev;
-	adapter->dev.release = cxl_release_adapter;
-	pci_set_drvdata(dev, adapter);
 	spin_lock_init(&adapter->afu_list_lock);
 
+	if (cxl_alloc_adapter_nr(adapter))
+		goto err1;
+
+	if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+		goto err2;
+
 	return adapter;
+
+err2:
+	cxl_remove_adapter_nr(adapter);
+err1:
+	kfree(adapter);
+	return NULL;
 }
 
+#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
+
 static int sanitise_adapter_regs(struct cxl *adapter)
 {
-	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+	/* Clear PSL tberror bit by writing 1 to it */
+	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
 	return cxl_tlb_slb_invalidate(adapter);
 }
 
+/* This should contain *only* operations that can safely be done in
+ * both creation and recovery.
+ */
+static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
+{
+	int rc;
+
+	adapter->dev.parent = &dev->dev;
+	adapter->dev.release = cxl_release_adapter;
+	pci_set_drvdata(dev, adapter);
+
+	rc = pci_enable_device(dev);
+	if (rc) {
+		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
+		return rc;
+	}
+
+	if ((rc = cxl_read_vsec(adapter, dev)))
+		return rc;
+
+	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
+	        return rc;
+
+	if ((rc = setup_cxl_bars(dev)))
+		return rc;
+
+	if ((rc = switch_card_to_cxl(dev)))
+		return rc;
+
+	if ((rc = cxl_update_image_control(adapter)))
+		return rc;
+
+	if ((rc = cxl_map_adapter_regs(adapter, dev)))
+		return rc;
+
+	if ((rc = sanitise_adapter_regs(adapter)))
+		goto err;
+
+	if ((rc = init_implementation_adapter_regs(adapter, dev)))
+		goto err;
+
+	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
+		goto err;
+
+	/* If recovery happened, the last step is to turn on snooping.
+	 * In the non-recovery case this has no effect */
+	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
+		goto err;
+
+	if ((rc = cxl_setup_psl_timebase(adapter, dev)))
+		goto err;
+
+	if ((rc = cxl_register_psl_err_irq(adapter)))
+		goto err;
+
+	return 0;
+
+err:
+	cxl_unmap_adapter_regs(adapter);
+	return rc;
+
+}
+
+static void cxl_deconfigure_adapter(struct cxl *adapter)
+{
+	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+
+	cxl_release_psl_err_irq(adapter);
+	cxl_unmap_adapter_regs(adapter);
+
+	pci_disable_device(pdev);
+}
+
 static struct cxl *cxl_init_adapter(struct pci_dev *dev)
 {
 	struct cxl *adapter;
-	bool free = true;
 	int rc;
 
-
-	if (!(adapter = cxl_alloc_adapter(dev)))
+	adapter = cxl_alloc_adapter();
+	if (!adapter)
 		return ERR_PTR(-ENOMEM);
 
-	if ((rc = cxl_read_vsec(adapter, dev)))
-		goto err1;
+	/* Set defaults for parameters which need to persist over
+	 * configure/reconfigure
+	 */
+	adapter->perst_loads_image = true;
+	adapter->perst_same_image = false;
 
-	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
-		goto err1;
-
-	if ((rc = setup_cxl_bars(dev)))
-		goto err1;
-
-	if ((rc = switch_card_to_cxl(dev)))
-		goto err1;
-
-	if ((rc = cxl_alloc_adapter_nr(adapter)))
-		goto err1;
-
-	if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
-		goto err2;
-
-	if ((rc = cxl_update_image_control(adapter)))
-		goto err2;
-
-	if ((rc = cxl_map_adapter_regs(adapter, dev)))
-		goto err2;
-
-	if ((rc = sanitise_adapter_regs(adapter)))
-		goto err2;
-
-	if ((rc = init_implementation_adapter_regs(adapter, dev)))
-		goto err3;
-
-	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
-		goto err3;
-
-	/* If recovery happened, the last step is to turn on snooping.
-	 * In the non-recovery case this has no effect */
-	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
-		goto err3;
+	rc = cxl_configure_adapter(adapter, dev);
+	if (rc) {
+		pci_disable_device(dev);
+		cxl_release_adapter(&adapter->dev);
+		return ERR_PTR(rc);
 	}
 
-	if ((rc = cxl_register_psl_err_irq(adapter)))
-		goto err3;
-
 	/* Don't care if this one fails: */
 	cxl_debugfs_adapter_add(adapter);
 
@@ -1101,37 +1222,25 @@
 	return adapter;
 
 err_put1:
-	device_unregister(&adapter->dev);
-	free = false;
+	/* This should mirror cxl_remove_adapter, except without the
+	 * sysfs parts
+	 */
 	cxl_debugfs_adapter_remove(adapter);
-	cxl_release_psl_err_irq(adapter);
-err3:
-	cxl_unmap_adapter_regs(adapter);
-err2:
-	cxl_remove_adapter_nr(adapter);
-err1:
-	if (free)
-		kfree(adapter);
+	cxl_deconfigure_adapter(adapter);
+	device_unregister(&adapter->dev);
 	return ERR_PTR(rc);
 }
 
 static void cxl_remove_adapter(struct cxl *adapter)
 {
-	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
-
-	pr_devel("cxl_release_adapter\n");
+	pr_devel("cxl_remove_adapter\n");
 
 	cxl_sysfs_adapter_remove(adapter);
 	cxl_debugfs_adapter_remove(adapter);
-	cxl_release_psl_err_irq(adapter);
-	cxl_unmap_adapter_regs(adapter);
-	cxl_remove_adapter_nr(adapter);
+
+	cxl_deconfigure_adapter(adapter);
 
 	device_unregister(&adapter->dev);
-
-	pci_release_region(pdev, 0);
-	pci_release_region(pdev, 2);
-	pci_disable_device(pdev);
 }
 
 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -1145,21 +1254,21 @@
 	if (cxl_verbose)
 		dump_cxl_config_space(dev);
 
-	if ((rc = pci_enable_device(dev))) {
-		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
-		return rc;
-	}
-
 	adapter = cxl_init_adapter(dev);
 	if (IS_ERR(adapter)) {
 		dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
-		pci_disable_device(dev);
 		return PTR_ERR(adapter);
 	}
 
 	for (slice = 0; slice < adapter->slices; slice++) {
-		if ((rc = cxl_init_afu(adapter, slice, dev)))
+		if ((rc = cxl_init_afu(adapter, slice, dev))) {
 			dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
+			continue;
+		}
+
+		rc = cxl_afu_select_best_mode(adapter->afu[slice]);
+		if (rc)
+			dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
 	}
 
 	return 0;
@@ -1183,10 +1292,262 @@
 	cxl_remove_adapter(adapter);
 }
 
+static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
+						pci_channel_state_t state)
+{
+	struct pci_dev *afu_dev;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
+
+	/* There should only be one entry, but go through the list
+	 * anyway
+	 */
+	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+		if (!afu_dev->driver)
+			continue;
+
+		afu_dev->error_state = state;
+
+		if (afu_dev->driver->err_handler)
+			afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
+										  state);
+		/* Disconnect trumps all, NONE trumps NEED_RESET */
+		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+			result = PCI_ERS_RESULT_DISCONNECT;
+		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+			 (result == PCI_ERS_RESULT_NEED_RESET))
+			result = PCI_ERS_RESULT_NONE;
+	}
+	return result;
+}
+
+static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	int i;
+
+	/* At this point, we could still have an interrupt pending.
+	 * Let's try to get them out of the way before they do
+	 * anything we don't like.
+	 */
+	schedule();
+
+	/* If we're permanently dead, give up. */
+	if (state == pci_channel_io_perm_failure) {
+		/* Tell the AFU drivers; but we don't care what they
+		 * say, we're going away.
+		 */
+		for (i = 0; i < adapter->slices; i++) {
+			afu = adapter->afu[i];
+			cxl_vphb_error_detected(afu, state);
+		}
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	/* Are we reflashing?
+	 *
+	 * If we reflash, we could come back as something entirely
+	 * different, including a non-CAPI card. As such, by default
+	 * we don't participate in the process. We'll be unbound and
+	 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
+	 * us!)
+	 *
+	 * However, this isn't the entire story: for reliablity
+	 * reasons, we usually want to reflash the FPGA on PERST in
+	 * order to get back to a more reliable known-good state.
+	 *
+	 * This causes us a bit of a problem: if we reflash we can't
+	 * trust that we'll come back the same - we could have a new
+	 * image and been PERSTed in order to load that
+	 * image. However, most of the time we actually *will* come
+	 * back the same - for example a regular EEH event.
+	 *
+	 * Therefore, we allow the user to assert that the image is
+	 * indeed the same and that we should continue on into EEH
+	 * anyway.
+	 */
+	if (adapter->perst_loads_image && !adapter->perst_same_image) {
+		/* TODO take the PHB out of CXL mode */
+		dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
+		return PCI_ERS_RESULT_NONE;
+	}
+
+	/*
+	 * At this point, we want to try to recover.  We'll always
+	 * need a complete slot reset: we don't trust any other reset.
+	 *
+	 * Now, we go through each AFU:
+	 *  - We send the driver, if bound, an error_detected callback.
+	 *    We expect it to clean up, but it can also tell us to give
+	 *    up and permanently detach the card. To simplify things, if
+	 *    any bound AFU driver doesn't support EEH, we give up on EEH.
+	 *
+	 *  - We detach all contexts associated with the AFU. This
+	 *    does not free them, but puts them into a CLOSED state
+	 *    which causes any the associated files to return useful
+	 *    errors to userland. It also unmaps, but does not free,
+	 *    any IRQs.
+	 *
+	 *  - We clean up our side: releasing and unmapping resources we hold
+	 *    so we can wire them up again when the hardware comes back up.
+	 *
+	 * Driver authors should note:
+	 *
+	 *  - Any contexts you create in your kernel driver (except
+	 *    those associated with anonymous file descriptors) are
+	 *    your responsibility to free and recreate. Likewise with
+	 *    any attached resources.
+	 *
+	 *  - We will take responsibility for re-initialising the
+	 *    device context (the one set up for you in
+	 *    cxl_pci_enable_device_hook and accessed through
+	 *    cxl_get_context). If you've attached IRQs or other
+	 *    resources to it, they remains yours to free.
+	 *
+	 * You can call the same functions to release resources as you
+	 * normally would: we make sure that these functions continue
+	 * to work when the hardware is down.
+	 *
+	 * Two examples:
+	 *
+	 * 1) If you normally free all your resources at the end of
+	 *    each request, or if you use anonymous FDs, your
+	 *    error_detected callback can simply set a flag to tell
+	 *    your driver not to start any new calls. You can then
+	 *    clear the flag in the resume callback.
+	 *
+	 * 2) If you normally allocate your resources on startup:
+	 *     * Set a flag in error_detected as above.
+	 *     * Let CXL detach your contexts.
+	 *     * In slot_reset, free the old resources and allocate new ones.
+	 *     * In resume, clear the flag to allow things to start.
+	 */
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		result = cxl_vphb_error_detected(afu, state);
+
+		/* Only continue if everyone agrees on NEED_RESET */
+		if (result != PCI_ERS_RESULT_NEED_RESET)
+			return result;
+
+		cxl_context_detach_all(afu);
+		cxl_afu_deactivate_mode(afu);
+		cxl_deconfigure_afu(afu);
+	}
+	cxl_deconfigure_adapter(adapter);
+
+	return result;
+}
+
+static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	struct cxl_context *ctx;
+	struct pci_dev *afu_dev;
+	pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
+	pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
+	int i;
+
+	if (cxl_configure_adapter(adapter, pdev))
+		goto err;
+
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		if (cxl_configure_afu(afu, adapter, pdev))
+			goto err;
+
+		if (cxl_afu_select_best_mode(afu))
+			goto err;
+
+		cxl_pci_vphb_reconfigure(afu);
+
+		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+			/* Reset the device context.
+			 * TODO: make this less disruptive
+			 */
+			ctx = cxl_get_context(afu_dev);
+
+			if (ctx && cxl_release_context(ctx))
+				goto err;
+
+			ctx = cxl_dev_context_init(afu_dev);
+			if (!ctx)
+				goto err;
+
+			afu_dev->dev.archdata.cxl_ctx = ctx;
+
+			if (cxl_afu_check_and_enable(afu))
+				goto err;
+
+			afu_dev->error_state = pci_channel_io_normal;
+
+			/* If there's a driver attached, allow it to
+			 * chime in on recovery. Drivers should check
+			 * if everything has come back OK, but
+			 * shouldn't start new work until we call
+			 * their resume function.
+			 */
+			if (!afu_dev->driver)
+				continue;
+
+			if (afu_dev->driver->err_handler &&
+			    afu_dev->driver->err_handler->slot_reset)
+				afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
+
+			if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+				result = PCI_ERS_RESULT_DISCONNECT;
+		}
+	}
+	return result;
+
+err:
+	/* All the bits that happen in both error_detected and cxl_remove
+	 * should be idempotent, so we don't need to worry about leaving a mix
+	 * of unconfigured and reconfigured resources.
+	 */
+	dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void cxl_pci_resume(struct pci_dev *pdev)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	struct pci_dev *afu_dev;
+	int i;
+
+	/* Everything is back now. Drivers should restart work now.
+	 * This is not the place to be checking if everything came back up
+	 * properly, because there's no return value: do that in slot_reset.
+	 */
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+			if (afu_dev->driver && afu_dev->driver->err_handler &&
+			    afu_dev->driver->err_handler->resume)
+				afu_dev->driver->err_handler->resume(afu_dev);
+		}
+	}
+}
+
+static const struct pci_error_handlers cxl_err_handler = {
+	.error_detected = cxl_pci_error_detected,
+	.slot_reset = cxl_pci_slot_reset,
+	.resume = cxl_pci_resume,
+};
+
 struct pci_driver cxl_pci_driver = {
 	.name = "cxl-pci",
 	.id_table = cxl_pci_tbl,
 	.probe = cxl_probe,
 	.remove = cxl_remove,
 	.shutdown = cxl_remove,
+	.err_handler = &cxl_err_handler,
 };
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 87cd747..25868c2 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -112,12 +112,38 @@
 	return count;
 }
 
+static ssize_t perst_reloads_same_image_show(struct device *device,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cxl *adapter = to_cxl_adapter(device);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
+}
+
+static ssize_t perst_reloads_same_image_store(struct device *device,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct cxl *adapter = to_cxl_adapter(device);
+	int rc;
+	int val;
+
+	rc = sscanf(buf, "%i", &val);
+	if ((rc != 1) || !(val == 1 || val == 0))
+		return -EINVAL;
+
+	adapter->perst_same_image = (val == 1 ? true : false);
+	return count;
+}
+
 static struct device_attribute adapter_attrs[] = {
 	__ATTR_RO(caia_version),
 	__ATTR_RO(psl_revision),
 	__ATTR_RO(base_image),
 	__ATTR_RO(image_loaded),
 	__ATTR_RW(load_image_on_perst),
+	__ATTR_RW(perst_reloads_same_image),
 	__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
 };
 
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index ae434d8..6e1e2ad 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -105,7 +105,7 @@
 		__entry->num_interrupts = num_interrupts;
 	),
 
-	TP_printk("afu%i.%i pid=%i pe=%i wed=0x%.16llx irqs=%i amr=0x%llx",
+	TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pid,
@@ -177,7 +177,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -233,7 +233,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -264,7 +264,7 @@
 		__entry->v = v;
 	),
 
-	TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%.16llx V=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -295,7 +295,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 2eba002..6dd16a6 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -138,6 +138,26 @@
 	return 0;
 }
 
+
+static inline bool cxl_config_link_ok(struct pci_bus *bus)
+{
+	struct pci_controller *phb;
+	struct cxl_afu *afu;
+
+	/* Config space IO is based on phb->cfg_addr, which is based on
+	 * afu_desc_mmio. This isn't safe to read/write when the link
+	 * goes down, as EEH tears down MMIO space.
+	 *
+	 * Check if the link is OK before proceeding.
+	 */
+
+	phb = pci_bus_to_host(bus);
+	if (phb == NULL)
+		return false;
+	afu = (struct cxl_afu *)phb->private_data;
+	return cxl_adapter_link_ok(afu->adapter);
+}
+
 static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
 				int offset, int len, u32 *val)
 {
@@ -150,6 +170,9 @@
 	if (rc)
 		return rc;
 
+	if (!cxl_config_link_ok(bus))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
 	/* Can only read 32 bits */
 	*val = (in_le32(ioaddr) >> shift) & mask;
 	return PCIBIOS_SUCCESSFUL;
@@ -167,6 +190,9 @@
 	if (rc)
 		return rc;
 
+	if (!cxl_config_link_ok(bus))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
 	/* Can only write 32 bits so do read-modify-write */
 	mask <<= shift;
 	val <<= shift;
@@ -240,6 +266,14 @@
 	return 0;
 }
 
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
+{
+	/* When we are reconfigured, the AFU's MMIO space is unmapped
+	 * and remapped. We need to reflect this in the PHB's view of
+	 * the world.
+	 */
+	afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
+}
 
 void cxl_pci_vphb_remove(struct cxl_afu *afu)
 {
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 15c33cc..431e1dd 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -186,10 +186,10 @@
 	if (IS_ERR(sram->virt_base))
 		return PTR_ERR(sram->virt_base);
 
-	sram->pool = devm_gen_pool_create(sram->dev,
-					  ilog2(SRAM_GRANULARITY), -1);
-	if (!sram->pool)
-		return -ENOMEM;
+	sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
+					  NUMA_NO_NODE, NULL);
+	if (IS_ERR(sram->pool))
+		return PTR_ERR(sram->pool);
 
 	ret = sram_reserve_regions(sram, res);
 	if (ret)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 5daf302..6f4323c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -467,7 +467,7 @@
 			sg_set_buf(__sg, buf + offset, len);
 			offset += len;
 			remain -= len;
-			(__sg++)->page_link &= ~0x02;
+			sg_unmark_end(__sg++);
 			sg_len++;
 		} while (remain);
 	}
@@ -475,7 +475,7 @@
 	list_for_each_entry(req, &packed->list, queuelist) {
 		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
 		__sg = sg + (sg_len - 1);
-		(__sg++)->page_link &= ~0x02;
+		sg_unmark_end(__sg++);
 	}
 	sg_mark_end(sg + (sg_len - 1));
 	return sg_len;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 51394e5..a4e27e8 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -238,8 +238,8 @@
 
 	ifc_nand_ctrl->page = page_addr;
 	/* Program ROW0/COL0 */
-	iowrite32be(page_addr, &ifc->ifc_nand.row0);
-	iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+	ifc_out32(page_addr, &ifc->ifc_nand.row0);
+	ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
 
 	buf_num = page_addr & priv->bufnum_mask;
 
@@ -301,19 +301,19 @@
 	int i;
 
 	/* set the chip select for NAND Transaction */
-	iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
-		    &ifc->ifc_nand.nand_csel);
+	ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+		  &ifc->ifc_nand.nand_csel);
 
 	dev_vdbg(priv->dev,
 			"%s: fir0=%08x fcr0=%08x\n",
 			__func__,
-			ioread32be(&ifc->ifc_nand.nand_fir0),
-			ioread32be(&ifc->ifc_nand.nand_fcr0));
+			ifc_in32(&ifc->ifc_nand.nand_fir0),
+			ifc_in32(&ifc->ifc_nand.nand_fcr0));
 
 	ctrl->nand_stat = 0;
 
 	/* start read/write seq */
-	iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -336,7 +336,7 @@
 		int sector_end = sector + chip->ecc.steps - 1;
 
 		for (i = sector / 4; i <= sector_end / 4; i++)
-			eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+			eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
 
 		for (i = sector; i <= sector_end; i++) {
 			errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -376,33 +376,33 @@
 
 	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
 	if (mtd->writesize > 512) {
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-			    (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
-			    (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
 
-		iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			    (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
 	} else {
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
-			    (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
 
 		if (oob)
-			iowrite32be(NAND_CMD_READOOB <<
-				    IFC_NAND_FCR0_CMD0_SHIFT,
-				    &ifc->ifc_nand.nand_fcr0);
+			ifc_out32(NAND_CMD_READOOB <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
 		else
-			iowrite32be(NAND_CMD_READ0 <<
-				    IFC_NAND_FCR0_CMD0_SHIFT,
-				    &ifc->ifc_nand.nand_fcr0);
+			ifc_out32(NAND_CMD_READ0 <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
 	}
 }
 
@@ -422,7 +422,7 @@
 	switch (command) {
 	/* READ0 read the entire buffer to use hardware ECC. */
 	case NAND_CMD_READ0:
-		iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, 0, page_addr, 0);
 
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -437,7 +437,7 @@
 
 	/* READOOB reads only the OOB because no ECC is performed. */
 	case NAND_CMD_READOOB:
-		iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, column, page_addr, 1);
 
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -453,19 +453,19 @@
 		if (command == NAND_CMD_PARAM)
 			timing = IFC_FIR_OP_RBCD;
 
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (timing << IFC_NAND_FIR0_OP2_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
-		iowrite32be(column, &ifc->ifc_nand.row3);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (timing << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(column, &ifc->ifc_nand.row3);
 
 		/*
 		 * although currently it's 8 bytes for READID, we always read
 		 * the maximum 256 bytes(for PARAM)
 		 */
-		iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
 		ifc_nand_ctrl->read_bytes = 256;
 
 		set_addr(mtd, 0, 0, 0);
@@ -480,16 +480,16 @@
 
 	/* ERASE2 uses the block and page address from ERASE1 */
 	case NAND_CMD_ERASE2:
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
 
-		iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			    (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
 
-		iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		ifc_nand_ctrl->read_bytes = 0;
 		fsl_ifc_run_command(mtd);
 		return;
@@ -506,19 +506,18 @@
 				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
 				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
 
-			iowrite32be(
-				 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-				 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-				 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-				 (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) |
-				 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
-				 &ifc->ifc_nand.nand_fir0);
-			iowrite32be(
-				 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				 (IFC_FIR_OP_RDSTAT <<
-					IFC_NAND_FIR1_OP6_SHIFT) |
-				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
-				 &ifc->ifc_nand.nand_fir1);
+			ifc_out32(
+				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+				&ifc->ifc_nand.nand_fir0);
+			ifc_out32(
+				(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
 		} else {
 			nand_fcr0 = ((NAND_CMD_PAGEPROG <<
 					IFC_NAND_FCR0_CMD1_SHIFT) |
@@ -527,20 +526,19 @@
 				    (NAND_CMD_STATUS <<
 					IFC_NAND_FCR0_CMD3_SHIFT));
 
-			iowrite32be(
+			ifc_out32(
 				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
 				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
 				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
 				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
 				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
 				&ifc->ifc_nand.nand_fir0);
-			iowrite32be(
-				 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
-				 (IFC_FIR_OP_RDSTAT <<
-					IFC_NAND_FIR1_OP7_SHIFT) |
-				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
-				  &ifc->ifc_nand.nand_fir1);
+			ifc_out32(
+				(IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
 
 			if (column >= mtd->writesize)
 				nand_fcr0 |=
@@ -555,7 +553,7 @@
 			column -= mtd->writesize;
 			ifc_nand_ctrl->oob = 1;
 		}
-		iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
 		set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
 		return;
 	}
@@ -563,24 +561,26 @@
 	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
 	case NAND_CMD_PAGEPROG: {
 		if (ifc_nand_ctrl->oob) {
-			iowrite32be(ifc_nand_ctrl->index -
-				    ifc_nand_ctrl->column,
-				    &ifc->ifc_nand.nand_fbcr);
+			ifc_out32(ifc_nand_ctrl->index -
+				  ifc_nand_ctrl->column,
+				  &ifc->ifc_nand.nand_fbcr);
 		} else {
-			iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+			ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		}
 
 		fsl_ifc_run_command(mtd);
 		return;
 	}
 
-	case NAND_CMD_STATUS:
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
-		iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+	case NAND_CMD_STATUS: {
+		void __iomem *addr;
+
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, 0, 0, 0);
 		ifc_nand_ctrl->read_bytes = 1;
 
@@ -590,17 +590,19 @@
 		 * The chip always seems to report that it is
 		 * write-protected, even when it is not.
 		 */
+		addr = ifc_nand_ctrl->addr;
 		if (chip->options & NAND_BUSWIDTH_16)
-			setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+			ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
 		else
-			setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+			ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
 		return;
+	}
 
 	case NAND_CMD_RESET:
-		iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
 		fsl_ifc_run_command(mtd);
 		return;
 
@@ -658,7 +660,7 @@
 	 */
 	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
 		offset = ifc_nand_ctrl->index++;
-		return in_8(ifc_nand_ctrl->addr + offset);
+		return ifc_in8(ifc_nand_ctrl->addr + offset);
 	}
 
 	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
@@ -680,7 +682,7 @@
 	 * next byte.
 	 */
 	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
-		data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+		data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
 		ifc_nand_ctrl->index += 2;
 		return (uint8_t) data;
 	}
@@ -726,18 +728,18 @@
 	u32 nand_fsr;
 
 	/* Use READ_STATUS command, but wait for the device to be ready */
-	iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
-		    &ifc->ifc_nand.nand_fir0);
-	iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc->ifc_nand.nand_fcr0);
-	iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
 	set_addr(mtd, 0, 0, 0);
 	ifc_nand_ctrl->read_bytes = 1;
 
 	fsl_ifc_run_command(mtd);
 
-	nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
 
 	/*
 	 * The chip always seems to report that it is
@@ -829,34 +831,34 @@
 	uint32_t cs = priv->bank;
 
 	/* Save CSOR and CSOR_ext */
-	csor = ioread32be(&ifc->csor_cs[cs].csor);
-	csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+	csor = ifc_in32(&ifc->csor_cs[cs].csor);
+	csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
 
 	/* chage PageSize 8K and SpareSize 1K*/
 	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
-	iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
-	iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
+	ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
 
 	/* READID */
-	iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-		    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
-		    &ifc->ifc_nand.nand_fir0);
-	iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc->ifc_nand.nand_fcr0);
-	iowrite32be(0x0, &ifc->ifc_nand.row3);
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+		  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(0x0, &ifc->ifc_nand.row3);
 
-	iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
 
 	/* Program ROW0/COL0 */
-	iowrite32be(0x0, &ifc->ifc_nand.row0);
-	iowrite32be(0x0, &ifc->ifc_nand.col0);
+	ifc_out32(0x0, &ifc->ifc_nand.row0);
+	ifc_out32(0x0, &ifc->ifc_nand.col0);
 
 	/* set the chip select for NAND Transaction */
-	iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+	ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
 
 	/* start read seq */
-	iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -866,8 +868,8 @@
 		printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
 
 	/* Restore CSOR and CSOR_ext */
-	iowrite32be(csor, &ifc->csor_cs[cs].csor);
-	iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor, &ifc->csor_cs[cs].csor);
+	ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
 }
 
 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -884,7 +886,7 @@
 
 	/* fill in nand_chip structure */
 	/* set up function call table */
-	if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+	if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
 		chip->read_byte = fsl_ifc_read_byte16;
 	else
 		chip->read_byte = fsl_ifc_read_byte;
@@ -898,13 +900,13 @@
 	chip->bbt_td = &bbt_main_descr;
 	chip->bbt_md = &bbt_mirror_descr;
 
-	iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+	ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
 
 	/* set up nand options */
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->options = NAND_NO_SUBPAGE_WRITE;
 
-	if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+	if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
 		chip->read_byte = fsl_ifc_read_byte16;
 		chip->options |= NAND_BUSWIDTH_16;
 	} else {
@@ -917,7 +919,7 @@
 	chip->ecc.read_page = fsl_ifc_read_page;
 	chip->ecc.write_page = fsl_ifc_write_page;
 
-	csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+	csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
 
 	/* Hardware generates ECC per 512 Bytes */
 	chip->ecc.size = 512;
@@ -1006,7 +1008,7 @@
 static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
 		      phys_addr_t addr)
 {
-	u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+	u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
 
 	if (!(cspr & CSPR_V))
 		return 0;
@@ -1092,16 +1094,16 @@
 
 	dev_set_drvdata(priv->dev, priv);
 
-	iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
-		    IFC_NAND_EVTER_EN_FTOER_EN |
-		    IFC_NAND_EVTER_EN_WPER_EN,
-		    &ifc->ifc_nand.nand_evter_en);
+	ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+		  IFC_NAND_EVTER_EN_FTOER_EN |
+		  IFC_NAND_EVTER_EN_WPER_EN,
+		  &ifc->ifc_nand.nand_evter_en);
 
 	/* enable NAND Machine Interrupts */
-	iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
-		    IFC_NAND_EVTER_INTR_FTOERIR_EN |
-		    IFC_NAND_EVTER_INTR_WPERIR_EN,
-		    &ifc->ifc_nand.nand_evter_intr_en);
+	ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+		  IFC_NAND_EVTER_INTR_FTOERIR_EN |
+		  IFC_NAND_EVTER_INTR_WPERIR_EN,
+		  &ifc->ifc_nand.nand_evter_intr_en);
 	priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
 	if (!priv->mtd.name) {
 		ret = -ENOMEM;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c18f9e6..d18eb60 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -180,8 +180,8 @@
 	  will be called vxlan.
 
 config GENEVE
-       tristate "Generic Network Virtualization Encapsulation netdev"
-       depends on INET && GENEVE_CORE
+       tristate "Generic Network Virtualization Encapsulation"
+       depends on INET && NET_UDP_TUNNEL
        select NET_IP_TUNNEL
        ---help---
 	  This allows one to create geneve virtual interfaces that provide
@@ -282,7 +282,6 @@
 config VIRTIO_NET
 	tristate "Virtio network driver"
 	depends on VIRTIO
-	select AVERAGE
 	---help---
 	  This is the virtual network driver for virtio.  It can be used with
 	  lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
@@ -297,6 +296,13 @@
 	  diagnostics, etc. This is mostly intended for developers or support
 	  to debug netlink issues. If unsure, say N.
 
+config NET_VRF
+	tristate "Virtual Routing and Forwarding (Lite)"
+	depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+	---help---
+	  This option enables the support for mapping interfaces into VRF's. The
+	  support enables VRF devices.
+
 endif # NET_CORE
 
 config SUNGEM_PHY
@@ -407,6 +413,13 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmxnet3.
 
+config FUJITSU_ES
+	tristate "FUJITSU Extended Socket Network Device driver"
+	depends on ACPI
+	help
+	  This driver provides support for Extended Socket network device
+          on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
+
 source "drivers/net/hyperv/Kconfig"
 
 endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c12cb22..900b0c5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_VXLAN) += vxlan.o
 obj-$(CONFIG_GENEVE) += geneve.o
 obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_NET_VRF) += vrf.o
 
 #
 # Networking Drivers
@@ -67,3 +68,5 @@
 
 obj-$(CONFIG_HYPERV_NET) += hyperv/
 obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
+
+obj-$(CONFIG_FUJITSU_ES) += fjes/
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 7fde4d5..3c45358 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1870,8 +1870,6 @@
 static void ad_marker_response_received(struct bond_marker *marker,
 					struct port *port)
 {
-	marker = NULL;
-	port = NULL;
 	/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
 }
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a98dd4f..771a449 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -979,7 +979,6 @@
 		if (bond_3ad_get_active_agg_info(bond, &ad_info))
 			return;
 
-	rcu_read_lock_bh();
 	bond_for_each_slave_rcu(bond, slave, iter) {
 		ops = slave->dev->netdev_ops;
 		if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
@@ -1000,7 +999,6 @@
 		ops->ndo_poll_controller(slave->dev);
 		up(&ni->dev_lock);
 	}
-	rcu_read_unlock_bh();
 }
 
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -3097,7 +3095,7 @@
 	int noff, proto = -1;
 
 	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
-		return skb_flow_dissect_flow_keys(skb, fk);
+		return skb_flow_dissect_flow_keys(skb, fk, 0);
 
 	fk->ports.ports = 0;
 	noff = skb_network_offset(skb);
@@ -3780,7 +3778,6 @@
 	struct slave *slave;
 	struct list_head *iter;
 	struct bond_up_slave *new_arr, *old_arr;
-	int slaves_in_agg;
 	int agg_id = 0;
 	int ret = 0;
 
@@ -3811,7 +3808,6 @@
 			}
 			goto out;
 		}
-		slaves_in_agg = ad_info.ports;
 		agg_id = ad_info.aggregator_id;
 	}
 	bond_for_each_slave(bond, slave, iter) {
@@ -4122,9 +4118,8 @@
 	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
 
 	/* Initialize the device options */
-	bond_dev->tx_queue_len = 0;
 	bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
-	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
+	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
 	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
 	/* don't acquire bond device's netif_tx_lock when transmitting */
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1bda292..db760e8 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -111,6 +111,7 @@
 	[IFLA_BOND_AD_USER_PORT_KEY]	= { .type = NLA_U16 },
 	[IFLA_BOND_AD_ACTOR_SYSTEM]	= { .type = NLA_BINARY,
 					    .len  = ETH_ALEN },
+	[IFLA_BOND_TLB_DYNAMIC_LB]	= { .type = NLA_U8 },
 };
 
 static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -405,7 +406,6 @@
 		if (err)
 			return err;
 	}
-
 	if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
 		int port_key =
 			nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
@@ -415,7 +415,6 @@
 		if (err)
 			return err;
 	}
-
 	if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
 		if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
 			return -EINVAL;
@@ -426,6 +425,15 @@
 		if (err)
 			return err;
 	}
+	if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
+		int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
+
+		bond_opt_initval(&newval, dynamic_lb);
+		err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+		if (err)
+			return err;
+	}
+
 	return 0;
 }
 
@@ -476,6 +484,7 @@
 		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
 		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
 		nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
+		nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
 		0;
 }
 
@@ -598,6 +607,10 @@
 		       bond->params.ad_select))
 		goto nla_put_failure;
 
+	if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
+		       bond->params.tlb_dynamic_lb))
+		goto nla_put_failure;
+
 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
 		struct ad_info info;
 
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e9c624d..6dda57e 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -420,6 +420,13 @@
 		.flags = BOND_OPTFLAG_IFDOWN,
 		.values = bond_ad_user_port_key_tbl,
 		.set = bond_option_ad_user_port_key_set,
+	},
+	[BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+		.id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+		.name = "num_grat_arp",
+		.desc = "Number of peer notifications to send on failover event",
+		.values = bond_num_peer_notif_tbl,
+		.set = bond_option_num_peer_notif_set
 	}
 };
 
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 31835a4..f4ae720 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -380,7 +380,7 @@
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
 		   bonding_show_ad_select, bonding_sysfs_store_option);
 
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
 					   struct device_attribute *attr,
 					   char *buf)
@@ -388,24 +388,10 @@
 	struct bonding *bond = to_bond(d);
 	return sprintf(buf, "%d\n", bond->params.num_peer_notif);
 }
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
-					    struct device_attribute *attr,
-					    const char *buf, size_t count)
-{
-	struct bonding *bond = to_bond(d);
-	int ret;
-
-	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
-	if (!ret)
-		ret = count;
-
-	return ret;
-}
 static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
-		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+		   bonding_show_num_peer_notif, bonding_sysfs_store_option);
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
-		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+		   bonding_show_num_peer_notif, bonding_sysfs_store_option);
 
 /* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b3b922a..615c65d 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1120,7 +1120,7 @@
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->destructor = free_netdev;
 	dev->netdev_ops = &cfhsi_netdevops;
 	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 9da0653..c2dea49 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -427,7 +427,7 @@
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CAIF_MAX_MTU;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->destructor = free_netdev;
 	skb_queue_head_init(&serdev->head);
 	serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 72ea9ff..de39620 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -710,7 +710,7 @@
 	dev->netdev_ops = &cfspi_ops;
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
 	dev->destructor = free_netdev;
 	skb_queue_head_init(&cfspi->qhead);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index b1e8d72..c83f0f03 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -805,7 +805,7 @@
 	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
 		reg |= FLEXCAN_CTRL_SMP;
 
-	netdev_info(dev, "writing ctrl=0x%08x\n", reg);
+	netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
 	flexcan_write(reg, &regs->ctrl);
 
 	/* print chip status */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 8b4d3e6..5eee62b 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -162,7 +162,7 @@
 	struct can_bittiming_const bt_const;
 	unsigned int channel;	/* channel number */
 
-	/* This lock prevents a race condition between xmit and recieve. */
+	/* This lock prevents a race condition between xmit and receive. */
 	spinlock_t tx_ctx_lock;
 	struct gs_tx_context tx_context[GS_MAX_TX_URBS];
 
@@ -274,7 +274,7 @@
 	}
 }
 
-static void gs_usb_recieve_bulk_callback(struct urb *urb)
+static void gs_usb_receive_bulk_callback(struct urb *urb)
 {
 	struct gs_usb *usbcan = urb->context;
 	struct gs_can *dev;
@@ -376,7 +376,7 @@
 			  usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
 			  hf,
 			  sizeof(struct gs_host_frame),
-			  gs_usb_recieve_bulk_callback,
+			  gs_usb_receive_bulk_callback,
 			  usbcan
 			  );
 
@@ -605,7 +605,7 @@
 							  GSUSB_ENDPOINT_IN),
 					  buf,
 					  sizeof(struct gs_host_frame),
-					  gs_usb_recieve_bulk_callback,
+					  gs_usb_receive_bulk_callback,
 					  parent);
 			urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7ad0a4d..4c483d9 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -46,13 +46,13 @@
 	  ethernet switches chips.
 
 config NET_DSA_MV88E6352
-	tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
+	tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
 	depends on NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
-	  This enables support for the Marvell 88E6172, 88E6176 and 88E6352
-	  ethernet switch chips.
+	  This enables support for the Marvell 88E6172, 88E6176, 88E6320,
+	  88E6321 and 88E6352 ethernet switch chips.
 
 config NET_DSA_BCM_SF2
 	tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 079897b..289e2044 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -901,15 +901,11 @@
 					 struct fixed_phy_status *status)
 {
 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
-	u32 duplex, pause, speed;
+	u32 duplex, pause;
 	u32 reg;
 
 	duplex = core_readl(priv, CORE_DUPSTS);
 	pause = core_readl(priv, CORE_PAUSESTS);
-	speed = core_readl(priv, CORE_SPDSTS);
-
-	speed >>= (port * SPDSTS_SHIFT);
-	speed &= SPDSTS_MASK;
 
 	status->link = 0;
 
@@ -944,18 +940,6 @@
 		reg &= ~LINK_STS;
 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
 
-	switch (speed) {
-	case SPDSTS_10:
-		status->speed = SPEED_10;
-		break;
-	case SPDSTS_100:
-		status->speed = SPEED_100;
-		break;
-	case SPDSTS_1000:
-		status->speed = SPEED_1000;
-		break;
-	}
-
 	if ((pause & (1 << port)) &&
 	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
 		status->asym_pause = 1;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 71a29a7..3de2a6d 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -129,6 +129,7 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 #ifdef CONFIG_NET_DSA_HWMON
 	.get_temp		= mv88e6xxx_get_temp,
 #endif
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 32f4a08..3e83865 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -182,6 +182,7 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 };
 
 MODULE_ALIAS("platform:mv88e6085");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 1c78084..d54b740 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -108,6 +108,7 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 #ifdef CONFIG_NET_DSA_HWMON
 	.get_temp               = mv88e6xxx_get_temp,
 #endif
@@ -116,9 +117,9 @@
 	.port_join_bridge       = mv88e6xxx_join_bridge,
 	.port_leave_bridge      = mv88e6xxx_leave_bridge,
 	.port_stp_update        = mv88e6xxx_port_stp_update,
-	.fdb_add		= mv88e6xxx_port_fdb_add,
-	.fdb_del		= mv88e6xxx_port_fdb_del,
-	.fdb_getnext		= mv88e6xxx_port_fdb_getnext,
+	.port_fdb_add		= mv88e6xxx_port_fdb_add,
+	.port_fdb_del		= mv88e6xxx_port_fdb_del,
+	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
 };
 
 MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 632815c..1f5129c 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -36,6 +36,18 @@
 			return "Marvell 88E6172";
 		if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
 			return "Marvell 88E6176";
+		if (ret == PORT_SWITCH_ID_6320_A1)
+			return "Marvell 88E6320 (A1)";
+		if (ret == PORT_SWITCH_ID_6320_A2)
+			return "Marvell 88e6320 (A2)";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6320)
+			return "Marvell 88E6320";
+		if (ret == PORT_SWITCH_ID_6321_A1)
+			return "Marvell 88E6321 (A1)";
+		if (ret == PORT_SWITCH_ID_6321_A2)
+			return "Marvell 88e6321 (A2)";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6321)
+			return "Marvell 88E6321";
 		if (ret == PORT_SWITCH_ID_6352_A0)
 			return "Marvell 88E6352 (A0)";
 		if (ret == PORT_SWITCH_ID_6352_A1)
@@ -80,66 +92,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
-	int ret;
-
-	*temp = 0;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
-	if (ret < 0)
-		return ret;
-
-	*temp = (ret & 0xff) - 25;
-
-	return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
-	int ret;
-
-	*temp = 0;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-	if (ret < 0)
-		return ret;
-
-	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
-	return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
-	int ret;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-	if (ret < 0)
-		return ret;
-	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-	return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
-					(ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
-	int ret;
-
-	*alarm = false;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-	if (ret < 0)
-		return ret;
-
-	*alarm = !!(ret & 0x40);
-
-	return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 static int mv88e6352_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -171,8 +123,9 @@
 
 	mutex_lock(&ps->eeprom_mutex);
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
-				  0xc000 | (addr & 0xff));
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+				  GLOBAL2_EEPROM_OP_READ |
+				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
 	if (ret < 0)
 		goto error;
 
@@ -180,7 +133,7 @@
 	if (ret < 0)
 		goto error;
 
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
 error:
 	mutex_unlock(&ps->eeprom_mutex);
 	return ret;
@@ -253,11 +206,11 @@
 {
 	int ret;
 
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
 	if (ret < 0)
 		return ret;
 
-	if (!(ret & 0x0400))
+	if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
 		return -EROFS;
 
 	return 0;
@@ -271,12 +224,13 @@
 
 	mutex_lock(&ps->eeprom_mutex);
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
 	if (ret < 0)
 		goto error;
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
-				  0xb000 | (addr & 0xff));
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+				  GLOBAL2_EEPROM_OP_WRITE |
+				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
 	if (ret < 0)
 		goto error;
 
@@ -374,13 +328,14 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 	.set_eee		= mv88e6xxx_set_eee,
 	.get_eee		= mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
-	.get_temp		= mv88e6352_get_temp,
-	.get_temp_limit		= mv88e6352_get_temp_limit,
-	.set_temp_limit		= mv88e6352_set_temp_limit,
-	.get_temp_alarm		= mv88e6352_get_temp_alarm,
+	.get_temp		= mv88e6xxx_get_temp,
+	.get_temp_limit		= mv88e6xxx_get_temp_limit,
+	.set_temp_limit		= mv88e6xxx_set_temp_limit,
+	.get_temp_alarm		= mv88e6xxx_get_temp_alarm,
 #endif
 	.get_eeprom		= mv88e6352_get_eeprom,
 	.set_eeprom		= mv88e6352_set_eeprom,
@@ -389,10 +344,18 @@
 	.port_join_bridge	= mv88e6xxx_join_bridge,
 	.port_leave_bridge	= mv88e6xxx_leave_bridge,
 	.port_stp_update	= mv88e6xxx_port_stp_update,
-	.fdb_add		= mv88e6xxx_port_fdb_add,
-	.fdb_del		= mv88e6xxx_port_fdb_del,
-	.fdb_getnext		= mv88e6xxx_port_fdb_getnext,
+	.port_pvid_get		= mv88e6xxx_port_pvid_get,
+	.port_pvid_set		= mv88e6xxx_port_pvid_set,
+	.port_vlan_add		= mv88e6xxx_port_vlan_add,
+	.port_vlan_del		= mv88e6xxx_port_vlan_del,
+	.vlan_getnext		= mv88e6xxx_vlan_getnext,
+	.port_fdb_add		= mv88e6xxx_port_fdb_add,
+	.port_fdb_del		= mv88e6xxx_port_fdb_del,
+	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
 };
 
-MODULE_ALIAS("platform:mv88e6352");
 MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6176");
+MODULE_ALIAS("platform:mv88e6320");
+MODULE_ALIAS("platform:mv88e6321");
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 5613424..6f13f72 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2,6 +2,9 @@
  * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  * Copyright (c) 2008 Marvell Semiconductor
  *
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ *	Added support for VLAN Table Unit operations
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -11,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
+#include <linux/ethtool.h>
 #include <linux/if_bridge.h>
 #include <linux/jiffies.h>
 #include <linux/list.h>
@@ -391,6 +395,7 @@
 	for (i = 0; i < DSA_MAX_PORTS; i++) {
 		struct net_device *dev;
 		int uninitialized_var(port_status);
+		int pcs_ctrl;
 		int link;
 		int speed;
 		int duplex;
@@ -400,6 +405,10 @@
 		if (dev == NULL)
 			continue;
 
+		pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
+		if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
+			continue;
+
 		link = 0;
 		if (dev->flags & IFF_UP) {
 			port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
@@ -517,6 +526,18 @@
 	return false;
 }
 
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6320:
+	case PORT_SWITCH_ID_6321:
+		return true;
+	}
+	return false;
+}
+
 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -545,6 +566,73 @@
 	return false;
 }
 
+/* We expect the switch to perform auto negotiation if there is a real
+ * phy. However, in the case of a fixed link phy, we force the port
+ * settings from the fixed link settings.
+ */
+void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+			   struct phy_device *phydev)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u32 ret, reg;
+
+	if (!phy_is_pseudo_fixed_link(phydev))
+		return;
+
+	mutex_lock(&ps->smi_mutex);
+
+	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+	if (ret < 0)
+		goto out;
+
+	reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
+		      PORT_PCS_CTRL_FORCE_LINK |
+		      PORT_PCS_CTRL_DUPLEX_FULL |
+		      PORT_PCS_CTRL_FORCE_DUPLEX |
+		      PORT_PCS_CTRL_UNFORCED);
+
+	reg |= PORT_PCS_CTRL_FORCE_LINK;
+	if (phydev->link)
+			reg |= PORT_PCS_CTRL_LINK_UP;
+
+	if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+		goto out;
+
+	switch (phydev->speed) {
+	case SPEED_1000:
+		reg |= PORT_PCS_CTRL_1000;
+		break;
+	case SPEED_100:
+		reg |= PORT_PCS_CTRL_100;
+		break;
+	case SPEED_10:
+		reg |= PORT_PCS_CTRL_10;
+		break;
+	default:
+		pr_info("Unknown speed");
+		goto out;
+	}
+
+	reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
+	if (phydev->duplex == DUPLEX_FULL)
+		reg |= PORT_PCS_CTRL_DUPLEX_FULL;
+
+	if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
+	    (port >= ps->num_ports - 2)) {
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+			reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+			reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+			reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
+				PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+	}
+	_mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+
+out:
+	mutex_unlock(&ps->smi_mutex);
+}
+
 /* Must be called with SMI mutex held */
 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
 {
@@ -565,7 +653,7 @@
 {
 	int ret;
 
-	if (mv88e6xxx_6352_family(ds))
+	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
 		port = (port + 1) << 5;
 
 	/* Snapshot the hardware statistics counters for this port. */
@@ -796,54 +884,6 @@
 	}
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-	int val;
-
-	*temp = 0;
-
-	mutex_lock(&ps->smi_mutex);
-
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
-	if (ret < 0)
-		goto error;
-
-	/* Enable temperature sensor */
-	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-	if (ret < 0)
-		goto error;
-
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
-	if (ret < 0)
-		goto error;
-
-	/* Wait for temperature to stabilize */
-	usleep_range(10000, 12000);
-
-	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-	if (val < 0) {
-		ret = val;
-		goto error;
-	}
-
-	/* Disable temperature sensor */
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
-	if (ret < 0)
-		goto error;
-
-	*temp = ((val & 0x1f) - 5) * 5;
-
-error:
-	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-	mutex_unlock(&ps->smi_mutex);
-	return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
 			   u16 mask)
@@ -1000,7 +1040,7 @@
 {
 	int ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
 	if (ret < 0)
 		return ret;
 
@@ -1127,7 +1167,7 @@
 	ps->bridge_mask[fid] = br_port_mask;
 
 	if (fid != ps->fid[port]) {
-		ps->fid_mask |= 1 << ps->fid[port];
+		clear_bit(ps->fid[port], ps->fid_bitmap);
 		ps->fid[port] = fid;
 		ret = _mv88e6xxx_update_bridge_config(ds, fid);
 	}
@@ -1161,9 +1201,16 @@
 
 	mutex_lock(&ps->smi_mutex);
 
-	newfid = __ffs(ps->fid_mask);
+	newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
+	if (unlikely(newfid > ps->num_ports)) {
+		netdev_err(ds->ports[port], "all first %d FIDs are used\n",
+			   ps->num_ports);
+		ret = -ENOSPC;
+		goto unlock;
+	}
+
 	ps->fid[port] = newfid;
-	ps->fid_mask &= ~(1 << newfid);
+	set_bit(newfid, ps->fid_bitmap);
 	ps->bridge_mask[fid] &= ~(1 << port);
 	ps->bridge_mask[newfid] = 1 << port;
 
@@ -1171,6 +1218,7 @@
 	if (!ret)
 		ret = _mv88e6xxx_update_bridge_config(ds, newfid);
 
+unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -1210,8 +1258,476 @@
 	return 0;
 }
 
-static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
-				  const unsigned char *addr)
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+{
+	int ret;
+
+	ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+	if (ret < 0)
+		return ret;
+
+	*pvid = ret & PORT_DEFAULT_VLAN_MASK;
+
+	return 0;
+}
+
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+{
+	return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+				   pvid & PORT_DEFAULT_VLAN_MASK);
+}
+
+static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+{
+	return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+			       GLOBAL_VTU_OP_BUSY);
+}
+
+static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+{
+	int ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_wait(ds);
+}
+
+static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+{
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+}
+
+static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+					struct mv88e6xxx_vtu_stu_entry *entry,
+					unsigned int nibble_offset)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u16 regs[3];
+	int i;
+	int ret;
+
+	for (i = 0; i < 3; ++i) {
+		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+					  GLOBAL_VTU_DATA_0_3 + i);
+		if (ret < 0)
+			return ret;
+
+		regs[i] = ret;
+	}
+
+	for (i = 0; i < ps->num_ports; ++i) {
+		unsigned int shift = (i % 4) * 4 + nibble_offset;
+		u16 reg = regs[i / 4];
+
+		entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+					 struct mv88e6xxx_vtu_stu_entry *entry,
+					 unsigned int nibble_offset)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u16 regs[3] = { 0 };
+	int i;
+	int ret;
+
+	for (i = 0; i < ps->num_ports; ++i) {
+		unsigned int shift = (i % 4) * 4 + nibble_offset;
+		u8 data = entry->data[i];
+
+		regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
+	}
+
+	for (i = 0; i < 3; ++i) {
+		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+					   GLOBAL_VTU_DATA_0_3 + i, regs[i]);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+				  struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	struct mv88e6xxx_vtu_stu_entry next = { 0 };
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+				   vid & GLOBAL_VTU_VID_MASK);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+	if (ret < 0)
+		return ret;
+
+	next.vid = ret & GLOBAL_VTU_VID_MASK;
+	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+	if (next.valid) {
+		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+		if (ret < 0)
+			return ret;
+
+		if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+		    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+						  GLOBAL_VTU_FID);
+			if (ret < 0)
+				return ret;
+
+			next.fid = ret & GLOBAL_VTU_FID_MASK;
+
+			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+						  GLOBAL_VTU_SID);
+			if (ret < 0)
+				return ret;
+
+			next.sid = ret & GLOBAL_VTU_SID_MASK;
+		}
+	}
+
+	*entry = next;
+	return 0;
+}
+
+static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+				    struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	u16 reg = 0;
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (!entry->valid)
+		goto loadpurge;
+
+	/* Write port member tags */
+	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+	if (ret < 0)
+		return ret;
+
+	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+		reg = entry->sid & GLOBAL_VTU_SID_MASK;
+		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+		if (ret < 0)
+			return ret;
+
+		reg = entry->fid & GLOBAL_VTU_FID_MASK;
+		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+		if (ret < 0)
+			return ret;
+	}
+
+	reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+	reg |= entry->vid & GLOBAL_VTU_VID_MASK;
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+				  struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	struct mv88e6xxx_vtu_stu_entry next = { 0 };
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+				   sid & GLOBAL_VTU_SID_MASK);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+	if (ret < 0)
+		return ret;
+
+	next.sid = ret & GLOBAL_VTU_SID_MASK;
+
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+	if (ret < 0)
+		return ret;
+
+	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+	if (next.valid) {
+		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+		if (ret < 0)
+			return ret;
+	}
+
+	*entry = next;
+	return 0;
+}
+
+static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+				    struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	u16 reg = 0;
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (!entry->valid)
+		goto loadpurge;
+
+	/* Write port states */
+	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+	if (ret < 0)
+		return ret;
+
+	reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+	if (ret < 0)
+		return ret;
+
+	reg = entry->sid & GLOBAL_VTU_SID_MASK;
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
+				struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan = {
+		.valid = true,
+		.vid = vid,
+	};
+	int i;
+
+	/* exclude all ports except the CPU */
+	for (i = 0; i < ps->num_ports; ++i)
+		vlan.data[i] = dsa_is_cpu_port(ds, i) ?
+			GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
+			GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+		struct mv88e6xxx_vtu_stu_entry vstp;
+		int err;
+
+		/* Adding a VTU entry requires a valid STU entry. As VSTP is not
+		 * implemented, only one STU entry is needed to cover all VTU
+		 * entries. Thus, validate the SID 0.
+		 */
+		vlan.sid = 0;
+		err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+		if (err)
+			return err;
+
+		if (vstp.sid != vlan.sid || !vstp.valid) {
+			memset(&vstp, 0, sizeof(vstp));
+			vstp.valid = true;
+			vstp.sid = vlan.sid;
+
+			err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+			if (err)
+				return err;
+		}
+
+		/* Non-bridged ports and bridge groups use FIDs from 1 to
+		 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
+		 */
+		vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
+					      ps->num_ports + 1);
+		if (unlikely(vlan.fid == VLAN_N_VID)) {
+			pr_err("no more FID available for VLAN %d\n", vid);
+			return -ENOSPC;
+		}
+
+		err = _mv88e6xxx_flush_fid(ds, vlan.fid);
+		if (err)
+			return err;
+
+		set_bit(vlan.fid, ps->fid_bitmap);
+	}
+
+	*entry = vlan;
+	return 0;
+}
+
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+			    bool untagged)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan;
+	int err;
+
+	mutex_lock(&ps->smi_mutex);
+	err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+	if (err)
+		goto unlock;
+
+	if (vlan.vid != vid || !vlan.valid) {
+		err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
+		if (err)
+			goto unlock;
+	}
+
+	vlan.data[port] = untagged ?
+		GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
+		GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+
+	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return err;
+}
+
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan;
+	bool keep = false;
+	int i, err;
+
+	mutex_lock(&ps->smi_mutex);
+
+	err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+	if (err)
+		goto unlock;
+
+	if (vlan.vid != vid || !vlan.valid ||
+	    vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+		err = -ENOENT;
+		goto unlock;
+	}
+
+	vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+	/* keep the VLAN unless all ports are excluded */
+	for (i = 0; i < ps->num_ports; ++i) {
+		if (dsa_is_cpu_port(ds, i))
+			continue;
+
+		if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+			keep = true;
+			break;
+		}
+	}
+
+	vlan.valid = keep;
+	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+	if (err)
+		goto unlock;
+
+	if (!keep)
+		clear_bit(vlan.fid, ps->fid_bitmap);
+
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return err;
+}
+
+static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
+				       struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	int err;
+
+	do {
+		if (vid == 4095)
+			return -ENOENT;
+
+		err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
+		if (err)
+			return err;
+
+		if (!entry->valid)
+			return -ENOENT;
+
+		vid = entry->vid;
+	} while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
+		 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
+
+	return 0;
+}
+
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+			   unsigned long *ports, unsigned long *untagged)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry next;
+	int port;
+	int err;
+
+	if (*vid == 4095)
+		return -ENOENT;
+
+	mutex_lock(&ps->smi_mutex);
+	err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+	mutex_unlock(&ps->smi_mutex);
+
+	if (err)
+		return err;
+
+	if (!next.valid)
+		return -ENOENT;
+
+	*vid = next.vid;
+
+	for (port = 0; port < ps->num_ports; ++port) {
+		clear_bit(port, ports);
+		clear_bit(port, untagged);
+
+		if (dsa_is_cpu_port(ds, port))
+			continue;
+
+		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
+		    next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+			set_bit(port, ports);
+
+		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+			set_bit(port, untagged);
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+				    const unsigned char *addr)
 {
 	int i, ret;
 
@@ -1226,7 +1742,7 @@
 	return 0;
 }
 
-static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
 {
 	int i, ret;
 
@@ -1242,29 +1758,83 @@
 	return 0;
 }
 
-static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
-				    const unsigned char *addr, int state)
+static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+			       struct mv88e6xxx_atu_entry *entry)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u8 fid = ps->fid[port];
+	u16 reg = 0;
 	int ret;
 
 	ret = _mv88e6xxx_atu_wait(ds);
 	if (ret < 0)
 		return ret;
 
-	ret = __mv88e6xxx_write_addr(ds, addr);
+	ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
-				   (0x10 << port) | state);
-	if (ret)
+	if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+		unsigned int mask, shift;
+
+		if (entry->trunk) {
+			reg |= GLOBAL_ATU_DATA_TRUNK;
+			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+		} else {
+			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+		}
+
+		reg |= (entry->portv_trunkid << shift) & mask;
+	}
+
+	reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
+	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+	return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+}
 
-	return ret;
+static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan;
+	int err;
+
+	if (vid == 0)
+		return ps->fid[port];
+
+	err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
+	if (err)
+		return err;
+
+	if (vlan.vid == vid)
+		return vlan.fid;
+
+	return -ENOENT;
+}
+
+static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+				    const unsigned char *addr, u16 vid,
+				    u8 state)
+{
+	struct mv88e6xxx_atu_entry entry = { 0 };
+	int ret;
+
+	ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
+	if (ret < 0)
+		return ret;
+
+	entry.fid = ret;
+	entry.state = state;
+	ether_addr_copy(entry.mac, addr);
+	if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+		entry.trunk = false;
+		entry.portv_trunkid = BIT(port);
+	}
+
+	return _mv88e6xxx_atu_load(ds, &entry);
 }
 
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
@@ -1277,7 +1847,7 @@
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -1290,61 +1860,105 @@
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
 				       GLOBAL_ATU_DATA_STATE_UNUSED);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
-static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
-				    unsigned char *addr, bool *is_static)
+static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+				  const unsigned char *addr,
+				  struct mv88e6xxx_atu_entry *entry)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u8 fid = ps->fid[port];
-	int ret, state;
+	struct mv88e6xxx_atu_entry next = { 0 };
+	int ret;
+
+	next.fid = fid;
 
 	ret = _mv88e6xxx_atu_wait(ds);
 	if (ret < 0)
 		return ret;
 
-	ret = __mv88e6xxx_write_addr(ds, addr);
+	ret = _mv88e6xxx_atu_mac_write(ds, addr);
 	if (ret < 0)
 		return ret;
 
-	do {
-		ret = _mv88e6xxx_atu_cmd(ds, fid,  GLOBAL_ATU_OP_GET_NEXT_DB);
-		if (ret < 0)
-			return ret;
-
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
-		if (ret < 0)
-			return ret;
-		state = ret & GLOBAL_ATU_DATA_STATE_MASK;
-		if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
-			return -ENOENT;
-	} while (!(((ret >> 4) & 0xff) & (1 << port)));
-
-	ret = __mv88e6xxx_read_addr(ds, addr);
+	ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
 	if (ret < 0)
 		return ret;
 
-	*is_static = state == (is_multicast_ether_addr(addr) ?
-			       GLOBAL_ATU_DATA_STATE_MC_STATIC :
-			       GLOBAL_ATU_DATA_STATE_UC_STATIC);
+	ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
+	if (ret < 0)
+		return ret;
 
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+	if (ret < 0)
+		return ret;
+
+	next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+	if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+		unsigned int mask, shift;
+
+		if (ret & GLOBAL_ATU_DATA_TRUNK) {
+			next.trunk = true;
+			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+		} else {
+			next.trunk = false;
+			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+		}
+
+		next.portv_trunkid = (ret & mask) >> shift;
+	}
+
+	*entry = next;
 	return 0;
 }
 
 /* get next entry for port */
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-			       unsigned char *addr, bool *is_static)
+			       unsigned char *addr, u16 *vid, bool *is_static)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_atu_entry next;
+	u16 fid;
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+
+	ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
+	if (ret < 0)
+		goto unlock;
+	fid = ret;
+
+	do {
+		if (is_broadcast_ether_addr(addr)) {
+			struct mv88e6xxx_vtu_stu_entry vtu;
+
+			ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
+			if (ret < 0)
+				goto unlock;
+
+			*vid = vtu.vid;
+			fid = vtu.fid;
+		}
+
+		ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
+		if (ret < 0)
+			goto unlock;
+
+		ether_addr_copy(addr, next.mac);
+
+		if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+			continue;
+	} while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+
+	*is_static = next.state == (is_multicast_ether_addr(addr) ?
+				    GLOBAL_ATU_DATA_STATE_MC_STATIC :
+				    GLOBAL_ATU_DATA_STATE_UC_STATIC);
+unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -1377,7 +1991,7 @@
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
 	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-	    mv88e6xxx_6065_family(ds)) {
+	    mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
 		/* MAC Forcing register: don't force link, speed,
 		 * duplex or flow control state to any particular
 		 * values on physical ports, but force the CPU port
@@ -1385,8 +1999,7 @@
 		 * full duplex.
 		 */
 		reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
-		if (dsa_is_cpu_port(ds, port) ||
-		    ds->dsa_port_mask & (1 << port)) {
+		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
 			reg |= PORT_PCS_CTRL_FORCE_LINK |
 				PORT_PCS_CTRL_LINK_UP |
 				PORT_PCS_CTRL_DUPLEX_FULL |
@@ -1423,7 +2036,7 @@
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
 	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-	    mv88e6xxx_6185_family(ds))
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
 		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
 		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
 		PORT_CONTROL_STATE_FORWARDING;
@@ -1431,7 +2044,8 @@
 		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
 			reg |= PORT_CONTROL_DSA_TAG;
 		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+		    mv88e6xxx_6320_family(ds)) {
 			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
 				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
 			else
@@ -1441,16 +2055,20 @@
 		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
 		    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-		    mv88e6xxx_6185_family(ds)) {
+		    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
 			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
 				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
 		}
 	}
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
-		if (ds->dsa_port_mask & (1 << port))
+	if (dsa_is_dsa_port(ds, port)) {
+		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+			reg |= PORT_CONTROL_DSA_TAG;
+		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+		    mv88e6xxx_6320_family(ds)) {
 			reg |= PORT_CONTROL_FRAME_MODE_DSA;
+		}
+
 		if (port == dsa_upstream_port(ds))
 			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
 				PORT_CONTROL_FORWARD_UNKNOWN_MC;
@@ -1462,22 +2080,20 @@
 			goto abort;
 	}
 
-	/* Port Control 2: don't force a good FCS, set the maximum
-	 * frame size to 10240 bytes, don't let the switch add or
-	 * strip 802.1q tags, don't discard tagged or untagged frames
-	 * on this port, do a destination address lookup on all
-	 * received packets as usual, disable ARP mirroring and don't
-	 * send a copy of all transmitted/received frames on this port
-	 * to the CPU.
+	/* Port Control 2: don't force a good FCS, set the maximum frame size to
+	 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
+	 * untagged frames on this port, do a destination address lookup on all
+	 * received packets as usual, disable ARP mirroring and don't send a
+	 * copy of all transmitted/received frames on this port to the CPU.
 	 */
 	reg = 0;
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6095_family(ds))
+	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
 		reg = PORT_CONTROL_2_MAP_DA;
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds))
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
 		reg |= PORT_CONTROL_2_JUMBO_10240;
 
 	if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
@@ -1490,6 +2106,8 @@
 			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
 	}
 
+	reg |= PORT_CONTROL_2_8021Q_FALLBACK;
+
 	if (reg) {
 		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
 					   PORT_CONTROL_2, reg);
@@ -1514,7 +2132,8 @@
 		goto abort;
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Do not limit the period of time that this port can
 		 * be paused for by the remote end or the period of
 		 * time that this port can pause the remote end.
@@ -1564,7 +2183,8 @@
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Rate Control: disable ingress rate limiting. */
 		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
 					   PORT_RATE_CONTROL, 0x0001);
@@ -1584,9 +2204,9 @@
 	 * ports, and allow each of the 'real' ports to only talk to
 	 * the upstream port.
 	 */
-	fid = __ffs(ps->fid_mask);
+	fid = port + 1;
 	ps->fid[port] = fid;
-	ps->fid_mask &= ~(1 << fid);
+	set_bit(fid, ps->fid_bitmap);
 
 	if (!dsa_is_cpu_port(ds, port))
 		ps->bridge_mask[fid] = 1 << port;
@@ -1683,7 +2303,7 @@
 	unsigned char addr[6];
 	int ret, data, state;
 
-	ret = __mv88e6xxx_write_addr(ds, bcast);
+	ret = _mv88e6xxx_atu_mac_write(ds, bcast);
 	if (ret < 0)
 		return ret;
 
@@ -1698,7 +2318,7 @@
 		state = data & GLOBAL_ATU_DATA_STATE_MASK;
 		if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
 			break;
-		ret = __mv88e6xxx_read_addr(ds, addr);
+		ret = _mv88e6xxx_atu_mac_read(ds, addr);
 		if (ret < 0)
 			return ret;
 		mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
@@ -1885,8 +2505,6 @@
 
 	ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
 
-	ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
-
 	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
 	name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
@@ -1913,6 +2531,7 @@
 int mv88e6xxx_setup_global(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
 	int i;
 
 	/* Set the default address aging time to 5 minutes, and
@@ -1976,7 +2595,8 @@
 			  (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Send all frames with destination addresses matching
 		 * 01:80:c2:00:00:2x to the CPU port.
 		 */
@@ -1995,7 +2615,8 @@
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Disable ingress rate limiting by resetting all
 		 * ingress rate limit registers to their initial
 		 * state.
@@ -2009,9 +2630,17 @@
 	REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
 
 	/* Wait for the flush to complete. */
-	_mv88e6xxx_stats_wait(ds);
+	mutex_lock(&ps->smi_mutex);
+	ret = _mv88e6xxx_stats_wait(ds);
+	if (ret < 0)
+		goto unlock;
 
-	return 0;
+	/* Clear all the VTU and STU entries */
+	ret = _mv88e6xxx_vtu_stu_flush(ds);
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return ret;
 }
 
 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
@@ -2162,6 +2791,132 @@
 	return ret;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+	int val;
+
+	*temp = 0;
+
+	mutex_lock(&ps->smi_mutex);
+
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+	if (ret < 0)
+		goto error;
+
+	/* Enable temperature sensor */
+	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (ret < 0)
+		goto error;
+
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+	if (ret < 0)
+		goto error;
+
+	/* Wait for temperature to stabilize */
+	usleep_range(10000, 12000);
+
+	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (val < 0) {
+		ret = val;
+		goto error;
+	}
+
+	/* Disable temperature sensor */
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+	if (ret < 0)
+		goto error;
+
+	*temp = ((val & 0x1f) - 5) * 5;
+
+error:
+	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+	mutex_unlock(&ps->smi_mutex);
+	return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	*temp = 0;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+	if (ret < 0)
+		return ret;
+
+	*temp = (ret & 0xff) - 25;
+
+	return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+		return mv88e63xx_get_temp(ds, temp);
+
+	return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	*temp = 0;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+	return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+	return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+					(ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	*alarm = false;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*alarm = !!(ret & 0x40);
+
+	return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index a650b26..9b6f3d9 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,6 +11,8 @@
 #ifndef __MV88E6XXX_H
 #define __MV88E6XXX_H
 
+#include <linux/if_vlan.h>
+
 #ifndef UINT64_MAX
 #define UINT64_MAX		(u64)(~((u64)0))
 #endif
@@ -44,6 +46,8 @@
 #define PORT_STATUS_TX_PAUSED	BIT(5)
 #define PORT_STATUS_FLOW_CTRL	BIT(4)
 #define PORT_PCS_CTRL		0x01
+#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK	BIT(15)
+#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK	BIT(14)
 #define PORT_PCS_CTRL_FC		BIT(7)
 #define PORT_PCS_CTRL_FORCE_FC		BIT(6)
 #define PORT_PCS_CTRL_LINK_UP		BIT(5)
@@ -89,7 +93,12 @@
 #define PORT_SWITCH_ID_6182	0x1a60
 #define PORT_SWITCH_ID_6185	0x1a70
 #define PORT_SWITCH_ID_6240	0x2400
-#define PORT_SWITCH_ID_6320	0x1250
+#define PORT_SWITCH_ID_6320	0x1150
+#define PORT_SWITCH_ID_6320_A1	0x1151
+#define PORT_SWITCH_ID_6320_A2	0x1152
+#define PORT_SWITCH_ID_6321	0x3100
+#define PORT_SWITCH_ID_6321_A1	0x3101
+#define PORT_SWITCH_ID_6321_A2	0x3102
 #define PORT_SWITCH_ID_6350	0x3710
 #define PORT_SWITCH_ID_6351	0x3750
 #define PORT_SWITCH_ID_6352	0x3520
@@ -124,6 +133,7 @@
 #define PORT_CONTROL_1		0x05
 #define PORT_BASE_VLAN		0x06
 #define PORT_DEFAULT_VLAN	0x07
+#define PORT_DEFAULT_VLAN_MASK	0xfff
 #define PORT_CONTROL_2		0x08
 #define PORT_CONTROL_2_IGNORE_FCS	BIT(15)
 #define PORT_CONTROL_2_VTU_PRI_OVERRIDE	BIT(14)
@@ -132,6 +142,11 @@
 #define PORT_CONTROL_2_JUMBO_1522	(0x00 << 12)
 #define PORT_CONTROL_2_JUMBO_2048	(0x01 << 12)
 #define PORT_CONTROL_2_JUMBO_10240	(0x02 << 12)
+#define PORT_CONTROL_2_8021Q_MASK	(0x03 << 10)
+#define PORT_CONTROL_2_8021Q_DISABLED	(0x00 << 10)
+#define PORT_CONTROL_2_8021Q_FALLBACK	(0x01 << 10)
+#define PORT_CONTROL_2_8021Q_CHECK	(0x02 << 10)
+#define PORT_CONTROL_2_8021Q_SECURE	(0x03 << 10)
 #define PORT_CONTROL_2_DISCARD_TAGGED	BIT(9)
 #define PORT_CONTROL_2_DISCARD_UNTAGGED	BIT(8)
 #define PORT_CONTROL_2_MAP_DA		BIT(7)
@@ -164,6 +179,11 @@
 #define GLOBAL_MAC_01		0x01
 #define GLOBAL_MAC_23		0x02
 #define GLOBAL_MAC_45		0x03
+#define GLOBAL_ATU_FID		0x01	/* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID		0x02	/* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID_MASK	0xfff
+#define GLOBAL_VTU_SID		0x03	/* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_SID_MASK	0x3f
 #define GLOBAL_CONTROL		0x04
 #define GLOBAL_CONTROL_SW_RESET		BIT(15)
 #define GLOBAL_CONTROL_PPU_ENABLE	BIT(14)
@@ -180,10 +200,27 @@
 #define GLOBAL_CONTROL_TCAM_EN		BIT(1)
 #define GLOBAL_CONTROL_EEPROM_DONE_EN	BIT(0)
 #define GLOBAL_VTU_OP		0x05
+#define GLOBAL_VTU_OP_BUSY	BIT(15)
+#define GLOBAL_VTU_OP_FLUSH_ALL		((0x01 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_LOAD_PURGE	((0x03 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_GET_NEXT	((0x04 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_LOAD_PURGE	((0x05 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_GET_NEXT	((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
 #define GLOBAL_VTU_VID		0x06
+#define GLOBAL_VTU_VID_MASK	0xfff
+#define GLOBAL_VTU_VID_VALID	BIT(12)
 #define GLOBAL_VTU_DATA_0_3	0x07
 #define GLOBAL_VTU_DATA_4_7	0x08
 #define GLOBAL_VTU_DATA_8_11	0x09
+#define GLOBAL_VTU_STU_DATA_MASK		0x03
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED	0x00
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED	0x01
+#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED	0x02
+#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER	0x03
+#define GLOBAL_STU_DATA_PORT_STATE_DISABLED	0x00
+#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING	0x01
+#define GLOBAL_STU_DATA_PORT_STATE_LEARNING	0x02
+#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING	0x03
 #define GLOBAL_ATU_CONTROL	0x0a
 #define GLOBAL_ATU_CONTROL_LEARN2ALL	BIT(3)
 #define GLOBAL_ATU_OP		0x0b
@@ -198,6 +235,8 @@
 #define GLOBAL_ATU_OP_GET_CLR_VIOLATION	  ((7 << 12) | GLOBAL_ATU_OP_BUSY)
 #define GLOBAL_ATU_DATA		0x0c
 #define GLOBAL_ATU_DATA_TRUNK			BIT(15)
+#define GLOBAL_ATU_DATA_TRUNK_ID_MASK		0x00f0
+#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT		4
 #define GLOBAL_ATU_DATA_PORT_VECTOR_MASK	0x3ff0
 #define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT	4
 #define GLOBAL_ATU_DATA_STATE_MASK		0x0f
@@ -280,8 +319,12 @@
 #define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP		BIT(3)
 #define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT		0
 #define GLOBAL2_EEPROM_OP	0x14
-#define GLOBAL2_EEPROM_OP_BUSY	BIT(15)
-#define GLOBAL2_EEPROM_OP_LOAD	BIT(11)
+#define GLOBAL2_EEPROM_OP_BUSY		BIT(15)
+#define GLOBAL2_EEPROM_OP_WRITE		((3 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_READ		((4 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_LOAD		BIT(11)
+#define GLOBAL2_EEPROM_OP_WRITE_EN	BIT(10)
+#define GLOBAL2_EEPROM_OP_ADDR_MASK	0xff
 #define GLOBAL2_EEPROM_DATA	0x15
 #define GLOBAL2_PTP_AVB_OP	0x16
 #define GLOBAL2_PTP_AVB_DATA	0x17
@@ -304,6 +347,25 @@
 #define GLOBAL2_QOS_WEIGHT	0x1c
 #define GLOBAL2_MISC		0x1d
 
+struct mv88e6xxx_atu_entry {
+	u16	fid;
+	u8	state;
+	bool	trunk;
+	u16	portv_trunkid;
+	u8	mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_stu_entry {
+	/* VTU only */
+	u16	vid;
+	u16	fid;
+
+	/* VTU and STU */
+	u8	sid;
+	bool	valid;
+	u8	data[DSA_MAX_PORTS];
+};
+
 struct mv88e6xxx_priv_state {
 	/* When using multi-chip addressing, this mutex protects
 	 * access to the indirect access registers.  (In single-chip
@@ -342,9 +404,9 @@
 
 	/* hw bridging */
 
-	u32 fid_mask;
-	u8 fid[DSA_MAX_PORTS];
-	u16 bridge_mask[DSA_MAX_PORTS];
+	DECLARE_BITMAP(fid_bitmap, VLAN_N_VID);	/* FIDs 1 to 4095 available */
+	u16 fid[DSA_MAX_PORTS];			/* per (non-bridged) port FID */
+	u16 bridge_mask[DSA_MAX_PORTS];		/* br groups (indexed by FID) */
 
 	unsigned long port_state_update_mask;
 	u8 port_state[DSA_MAX_PORTS];
@@ -386,10 +448,15 @@
 				 uint64_t *data);
 int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
 int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
+void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+			   struct phy_device *phydev);
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
 			struct ethtool_regs *regs, void *_p);
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -401,15 +468,23 @@
 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+			    bool untagged);
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+			   unsigned long *ports, unsigned long *untagged);
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
 			   const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
 			   const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-			       unsigned char *addr, bool *is_static);
+			       unsigned char *addr, u16 *vid, bool *is_static);
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
 			     int reg, int val);
+
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 49adbf1..815eb94 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -144,10 +144,9 @@
 	dev->destructor = free_netdev;
 
 	/* Fill in device structure with ethernet-generic values. */
-	dev->tx_queue_len = 0;
 	dev->flags |= IFF_NOARP;
 	dev->flags &= ~IFF_MULTICAST;
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 	dev->features	|= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
 	dev->features	|= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
 	eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 753887d..2839af0 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1726,6 +1726,7 @@
 	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
 		iowrite32(0x8000, vp->cb_fn_base + 4);
 	netif_start_queue (dev);
+	netdev_reset_queue(dev);
 err_out:
 	return err;
 }
@@ -1935,16 +1936,18 @@
 		if (vp->cur_tx - vp->dirty_tx > 0  &&  ioread32(ioaddr + DownListPtr) == 0)
 			iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
 				 ioaddr + DownListPtr);
-		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
 			netif_wake_queue (dev);
+			netdev_reset_queue (dev);
+		}
 		if (vp->drv_flags & IS_BOOMERANG)
 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
 		iowrite16(DownUnstall, ioaddr + EL3_CMD);
 	} else {
 		dev->stats.tx_dropped++;
 		netif_wake_queue(dev);
+		netdev_reset_queue(dev);
 	}
-
 	/* Issue Tx Enable */
 	iowrite16(TxEnable, ioaddr + EL3_CMD);
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -2063,6 +2066,7 @@
 {
 	struct vortex_private *vp = netdev_priv(dev);
 	void __iomem *ioaddr = vp->ioaddr;
+	int skblen = skb->len;
 
 	/* Put out the doubleword header... */
 	iowrite32(skb->len, ioaddr + TX_FIFO);
@@ -2094,6 +2098,7 @@
 		}
 	}
 
+	netdev_sent_queue(dev, skblen);
 
 	/* Clear the Tx status stack. */
 	{
@@ -2125,6 +2130,7 @@
 	void __iomem *ioaddr = vp->ioaddr;
 	/* Calculate the next Tx descriptor entry. */
 	int entry = vp->cur_tx % TX_RING_SIZE;
+	int skblen = skb->len;
 	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
 	unsigned long flags;
 	dma_addr_t dma_addr;
@@ -2230,6 +2236,8 @@
 	}
 
 	vp->cur_tx++;
+	netdev_sent_queue(dev, skblen);
+
 	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
 		netif_stop_queue (dev);
 	} else {					/* Clear previous interrupt enable. */
@@ -2267,6 +2275,7 @@
 	int status;
 	int work_done = max_interrupt_work;
 	int handled = 0;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	ioaddr = vp->ioaddr;
 	spin_lock(&vp->lock);
@@ -2314,6 +2323,8 @@
 			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
 				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
 				pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+				pkts_compl++;
+				bytes_compl += vp->tx_skb->len;
 				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
 				if (ioread16(ioaddr + TxFree) > 1536) {
 					/*
@@ -2358,6 +2369,7 @@
 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
 
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
 	spin_unlock(&vp->window_lock);
 
 	if (vortex_debug > 4)
@@ -2382,6 +2394,7 @@
 	int status;
 	int work_done = max_interrupt_work;
 	int handled = 0;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	ioaddr = vp->ioaddr;
 
@@ -2455,6 +2468,8 @@
 					pci_unmap_single(VORTEX_PCI(vp),
 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
 #endif
+					pkts_compl++;
+					bytes_compl += skb->len;
 					dev_kfree_skb_irq(skb);
 					vp->tx_skbuff[entry] = NULL;
 				} else {
@@ -2495,6 +2510,7 @@
 			iowrite32(0x8000, vp->cb_fn_base + 4);
 
 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
 
 	if (vortex_debug > 4)
 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
@@ -2696,7 +2712,8 @@
 	struct vortex_private *vp = netdev_priv(dev);
 	void __iomem *ioaddr = vp->ioaddr;
 
-	netif_stop_queue (dev);
+	netdev_reset_queue(dev);
+	netif_stop_queue(dev);
 
 	del_timer_sync(&vp->rx_oom_timer);
 	del_timer_sync(&vp->timer);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f3bb178..05aa759 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -167,6 +167,7 @@
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
 source "drivers/net/ethernet/ti/Kconfig"
 source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index b52e0f6..ddfc808 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -77,6 +77,7 @@
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
 obj-$(CONFIG_NET_VENDOR_TI) += ti/
 obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index bab01c84..48ce83e 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -28,6 +28,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/phy.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
 
 #include "sun4i-emac.h"
 
@@ -857,11 +858,17 @@
 
 	clk_prepare_enable(db->clk);
 
+	ret = sunxi_sram_claim(&pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
+		goto out;
+	}
+
 	db->phy_node = of_parse_phandle(np, "phy", 0);
 	if (!db->phy_node) {
 		dev_err(&pdev->dev, "no associated PHY\n");
 		ret = -ENODEV;
-		goto out;
+		goto out_release_sram;
 	}
 
 	/* Read MAC-address from DT */
@@ -893,7 +900,7 @@
 	if (ret) {
 		dev_err(&pdev->dev, "Registering netdev failed!\n");
 		ret = -ENODEV;
-		goto out;
+		goto out_release_sram;
 	}
 
 	dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n",
@@ -901,6 +908,8 @@
 
 	return 0;
 
+out_release_sram:
+	sunxi_sram_release(&pdev->dev);
 out:
 	dev_err(db->dev, "not found (%d).\n", ret);
 
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 580553d..88ef67a 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -71,8 +71,6 @@
 		      SGDMA_CTRLREG_INTEN |
 		      SGDMA_CTRLREG_ILASTD;
 
-	priv->sgdmadesclen = sizeof(struct sgdma_descrip);
-
 	INIT_LIST_HEAD(&priv->txlisthd);
 	INIT_LIST_HEAD(&priv->rxlisthd);
 
@@ -254,7 +252,7 @@
 		unsigned int pktstatus = 0;
 		dma_sync_single_for_cpu(priv->device,
 					priv->rxdescphys,
-					priv->sgdmadesclen,
+					SGDMA_DESC_LEN,
 					DMA_FROM_DEVICE);
 
 		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
@@ -374,7 +372,7 @@
 
 		dma_sync_single_for_device(priv->device,
 					   priv->rxdescphys,
-					   priv->sgdmadesclen,
+					   SGDMA_DESC_LEN,
 					   DMA_TO_DEVICE);
 
 		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
@@ -402,7 +400,7 @@
 	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
-				   priv->sgdmadesclen, DMA_TO_DEVICE);
+				   SGDMA_DESC_LEN, DMA_TO_DEVICE);
 
 	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 		priv->tx_dma_csr,
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index 85bc33b..bbd52f0 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -50,6 +50,7 @@
 	u8	control;
 } __packed;
 
+#define SGDMA_DESC_LEN	sizeof(struct sgdma_descrip)
 
 #define SGDMA_STATUS_ERR		BIT(0)
 #define SGDMA_STATUS_LENGTH_ERR		BIT(1)
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 2adb24d..103c30d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -458,7 +458,6 @@
 	u32 rxctrlreg;
 	dma_addr_t rxdescphys;
 	dma_addr_t txdescphys;
-	size_t sgdmadesclen;
 
 	struct list_head txlisthd;
 	struct list_head rxlisthd;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 541bed0..ff05bbc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -193,12 +193,16 @@
 #define USERINFO_LEN			32
 #define FPQNUM_POS			32
 #define FPQNUM_LEN			12
+#define NV_POS				50
+#define NV_LEN				1
+#define LL_POS				51
+#define LL_LEN				1
 #define LERR_POS			60
 #define LERR_LEN			3
 #define STASH_POS			52
 #define STASH_LEN			2
 #define BUFDATALEN_POS			48
-#define BUFDATALEN_LEN			12
+#define BUFDATALEN_LEN			15
 #define DATAADDR_POS			0
 #define DATAADDR_LEN			42
 #define COHERENT_POS			63
@@ -215,9 +219,19 @@
 #define IPHDR_LEN			6
 #define EC_POS				22	/* Enable checksum */
 #define EC_LEN				1
+#define ET_POS				23	/* Enable TSO */
 #define IS_POS				24	/* IP protocol select */
 #define IS_LEN				1
 #define TYPE_ETH_WORK_MESSAGE_POS	44
+#define LL_BYTES_MSB_POS		56
+#define LL_BYTES_MSB_LEN		8
+#define LL_BYTES_LSB_POS		48
+#define LL_BYTES_LSB_LEN		12
+#define LL_LEN_POS			48
+#define LL_LEN_LEN			8
+#define DATALEN_MASK			GENMASK(11, 0)
+
+#define LAST_BUFFER			(0x7800ULL << BUFDATALEN_POS)
 
 struct xgene_enet_raw_desc {
 	__le64 m0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index a02ea7f8..e47298f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -147,18 +147,27 @@
 {
 	struct sk_buff *skb;
 	struct device *dev;
+	skb_frag_t *frag;
+	dma_addr_t *frag_dma_addr;
 	u16 skb_index;
 	u8 status;
-	int ret = 0;
+	int i, ret = 0;
 
 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
 	skb = cp_ring->cp_skb[skb_index];
+	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
 
 	dev = ndev_to_dev(cp_ring->ndev);
 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
-			 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
+			 skb_headlen(skb),
 			 DMA_TO_DEVICE);
 
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
+			       DMA_TO_DEVICE);
+	}
+
 	/* Checking for error */
 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
 	if (unlikely(status > 2)) {
@@ -179,12 +188,16 @@
 
 static u64 xgene_enet_work_msg(struct sk_buff *skb)
 {
+	struct net_device *ndev = skb->dev;
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct iphdr *iph;
-	u8 l3hlen, l4hlen = 0;
-	u8 csum_enable = 0;
-	u8 proto = 0;
-	u8 ethhdr;
-	u64 hopinfo;
+	u8 l3hlen = 0, l4hlen = 0;
+	u8 ethhdr, proto = 0, csum_enable = 0;
+	u64 hopinfo = 0;
+	u32 hdr_len, mss = 0;
+	u32 i, len, nr_frags;
+
+	ethhdr = xgene_enet_hdr_len(skb->data);
 
 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
@@ -201,14 +214,40 @@
 		l4hlen = tcp_hdrlen(skb) >> 2;
 		csum_enable = 1;
 		proto = TSO_IPPROTO_TCP;
+		if (ndev->features & NETIF_F_TSO) {
+			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
+			mss = skb_shinfo(skb)->gso_size;
+
+			if (skb_is_nonlinear(skb)) {
+				len = skb_headlen(skb);
+				nr_frags = skb_shinfo(skb)->nr_frags;
+
+				for (i = 0; i < 2 && i < nr_frags; i++)
+					len += skb_shinfo(skb)->frags[i].size;
+
+				/* HW requires header must reside in 3 buffer */
+				if (unlikely(hdr_len > len)) {
+					if (skb_linearize(skb))
+						return 0;
+				}
+			}
+
+			if (!mss || ((skb->len - hdr_len) <= mss))
+				goto out;
+
+			if (mss != pdata->mss) {
+				pdata->mss = mss;
+				pdata->mac_ops->set_mss(pdata);
+			}
+			hopinfo |= SET_BIT(ET);
+		}
 	} else if (iph->protocol == IPPROTO_UDP) {
 		l4hlen = UDP_HDR_SIZE;
 		csum_enable = 1;
 	}
 out:
 	l3hlen = ip_hdrlen(skb) >> 2;
-	ethhdr = xgene_enet_hdr_len(skb->data);
-	hopinfo = SET_VAL(TCPHDR, l4hlen) |
+	hopinfo |= SET_VAL(TCPHDR, l4hlen) |
 		  SET_VAL(IPHDR, l3hlen) |
 		  SET_VAL(ETHHDR, ethhdr) |
 		  SET_VAL(EC, csum_enable) |
@@ -219,35 +258,170 @@
 	return hopinfo;
 }
 
+static u16 xgene_enet_encode_len(u16 len)
+{
+	return (len == BUFLEN_16K) ? 0 : len;
+}
+
+static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
+{
+	desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
+				    SET_VAL(BUFDATALEN, len));
+}
+
+static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
+{
+	__le64 *exp_bufs;
+
+	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
+	memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
+	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
+
+	return exp_bufs;
+}
+
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
+{
+	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
+}
+
 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
 				    struct sk_buff *skb)
 {
 	struct device *dev = ndev_to_dev(tx_ring->ndev);
 	struct xgene_enet_raw_desc *raw_desc;
-	dma_addr_t dma_addr;
+	__le64 *exp_desc = NULL, *exp_bufs = NULL;
+	dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
+	skb_frag_t *frag;
 	u16 tail = tx_ring->tail;
 	u64 hopinfo;
+	u32 len, hw_len;
+	u8 ll = 0, nv = 0, idx = 0;
+	bool split = false;
+	u32 size, offset, ell_bytes = 0;
+	u32 i, fidx, nr_frags, count = 1;
 
 	raw_desc = &tx_ring->raw_desc[tail];
+	tail = (tail + 1) & (tx_ring->slots - 1);
 	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
 
-	dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+	hopinfo = xgene_enet_work_msg(skb);
+	if (!hopinfo)
+		return -EINVAL;
+	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
+				   hopinfo);
+
+	len = skb_headlen(skb);
+	hw_len = xgene_enet_encode_len(len);
+
+	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, dma_addr)) {
 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
 		return -EINVAL;
 	}
 
 	/* Hardware expects descriptor in little endian format */
-	raw_desc->m0 = cpu_to_le64(tail);
 	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
-				   SET_VAL(BUFDATALEN, skb->len) |
+				   SET_VAL(BUFDATALEN, hw_len) |
 				   SET_BIT(COHERENT));
-	hopinfo = xgene_enet_work_msg(skb);
-	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
-				   hopinfo);
-	tx_ring->cp_ring->cp_skb[tail] = skb;
 
-	return 0;
+	if (!skb_is_nonlinear(skb))
+		goto out;
+
+	/* scatter gather */
+	nv = 1;
+	exp_desc = (void *)&tx_ring->raw_desc[tail];
+	tail = (tail + 1) & (tx_ring->slots - 1);
+	memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (i = nr_frags; i < 4 ; i++)
+		exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
+
+	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
+
+	for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
+		if (!split) {
+			frag = &skb_shinfo(skb)->frags[fidx];
+			size = skb_frag_size(frag);
+			offset = 0;
+
+			pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
+						     DMA_TO_DEVICE);
+			if (dma_mapping_error(dev, pbuf_addr))
+				return -EINVAL;
+
+			frag_dma_addr[fidx] = pbuf_addr;
+			fidx++;
+
+			if (size > BUFLEN_16K)
+				split = true;
+		}
+
+		if (size > BUFLEN_16K) {
+			len = BUFLEN_16K;
+			size -= BUFLEN_16K;
+		} else {
+			len = size;
+			split = false;
+		}
+
+		dma_addr = pbuf_addr + offset;
+		hw_len = xgene_enet_encode_len(len);
+
+		switch (i) {
+		case 0:
+		case 1:
+		case 2:
+			xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
+			break;
+		case 3:
+			if (split || (fidx != nr_frags)) {
+				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
+				xgene_set_addr_len(exp_bufs, idx, dma_addr,
+						   hw_len);
+				idx++;
+				ell_bytes += len;
+			} else {
+				xgene_set_addr_len(exp_desc, i, dma_addr,
+						   hw_len);
+			}
+			break;
+		default:
+			xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
+			idx++;
+			ell_bytes += len;
+			break;
+		}
+
+		if (split)
+			offset += BUFLEN_16K;
+	}
+	count++;
+
+	if (idx) {
+		ll = 1;
+		dma_addr = dma_map_single(dev, exp_bufs,
+					  sizeof(u64) * MAX_EXP_BUFFS,
+					  DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+		i = ell_bytes >> LL_BYTES_LSB_LEN;
+		exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+					  SET_VAL(LL_BYTES_MSB, i) |
+					  SET_VAL(LL_LEN, idx));
+		raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
+	}
+
+out:
+	raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
+				   SET_VAL(USERINFO, tx_ring->tail));
+	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+	tx_ring->tail = tail;
+
+	return count;
 }
 
 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
@@ -257,6 +431,7 @@
 	struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
 	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
 	u32 tx_level, cq_level;
+	int count;
 
 	tx_level = pdata->ring_ops->len(tx_ring);
 	cq_level = pdata->ring_ops->len(cp_ring);
@@ -266,14 +441,17 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
+	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
+		return NETDEV_TX_OK;
+
+	count = xgene_enet_setup_tx_desc(tx_ring, skb);
+	if (count <= 0) {
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
 	}
 
-	pdata->ring_ops->wr_cmd(tx_ring, 1);
+	pdata->ring_ops->wr_cmd(tx_ring, count);
 	skb_tx_timestamp(skb);
-	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
 
 	pdata->stats.tx_packets++;
 	pdata->stats.tx_bytes += skb->len;
@@ -326,7 +504,7 @@
 
 	/* strip off CRC as HW isn't doing this */
 	datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
-	datalen -= 4;
+	datalen = (datalen & DATALEN_MASK) - 4;
 	prefetch(skb->data - NET_IP_ALIGN);
 	skb_put(skb, datalen);
 
@@ -358,26 +536,41 @@
 				   int budget)
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
-	struct xgene_enet_raw_desc *raw_desc;
+	struct xgene_enet_raw_desc *raw_desc, *exp_desc;
 	u16 head = ring->head;
 	u16 slots = ring->slots - 1;
-	int ret, count = 0;
+	int ret, count = 0, processed = 0;
 
 	do {
 		raw_desc = &ring->raw_desc[head];
+		exp_desc = NULL;
 		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
 			break;
 
 		/* read fpqnum field after dataaddr field */
 		dma_rmb();
+		if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
+			head = (head + 1) & slots;
+			exp_desc = &ring->raw_desc[head];
+
+			if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
+				head = (head - 1) & slots;
+				break;
+			}
+			dma_rmb();
+			count++;
+		}
 		if (is_rx_desc(raw_desc))
 			ret = xgene_enet_rx_frame(ring, raw_desc);
 		else
 			ret = xgene_enet_tx_completion(ring, raw_desc);
 		xgene_enet_mark_desc_slot_empty(raw_desc);
+		if (exp_desc)
+			xgene_enet_mark_desc_slot_empty(exp_desc);
 
 		head = (head + 1) & slots;
 		count++;
+		processed++;
 
 		if (ret)
 			break;
@@ -393,7 +586,7 @@
 		}
 	}
 
-	return count;
+	return processed;
 }
 
 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -738,12 +931,13 @@
 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
 	struct xgene_enet_desc_ring *buf_pool = NULL;
 	enum xgene_ring_owner owner;
+	dma_addr_t dma_exp_bufs;
 	u8 cpu_bufnum = pdata->cpu_bufnum;
 	u8 eth_bufnum = pdata->eth_bufnum;
 	u8 bp_bufnum = pdata->bp_bufnum;
 	u16 ring_num = pdata->ring_num;
 	u16 ring_id;
-	int ret;
+	int ret, size;
 
 	/* allocate rx descriptor ring */
 	owner = xgene_derive_ring_owner(pdata);
@@ -794,6 +988,15 @@
 		ret = -ENOMEM;
 		goto err;
 	}
+
+	size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+	tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
+						GFP_KERNEL);
+	if (!tx_ring->exp_bufs) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	pdata->tx_ring = tx_ring;
 
 	if (!pdata->cq_cnt) {
@@ -818,6 +1021,16 @@
 		ret = -ENOMEM;
 		goto err;
 	}
+
+	size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+	cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+					      size, GFP_KERNEL);
+	if (!cp_ring->frag_dma_addr) {
+		devm_kfree(dev, cp_ring->cp_skb);
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	pdata->tx_ring->cp_ring = cp_ring;
 	pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
 
@@ -905,40 +1118,6 @@
 	return ret;
 }
 
-static int xgene_get_mac_address(struct device *dev,
-				 unsigned char *addr)
-{
-	int ret;
-
-	ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
-	if (ret)
-		ret = device_property_read_u8_array(dev, "mac-address",
-						    addr, 6);
-	if (ret)
-		return -ENODEV;
-
-	return ETH_ALEN;
-}
-
-static int xgene_get_phy_mode(struct device *dev)
-{
-	int i, ret;
-	char *modestr;
-
-	ret = device_property_read_string(dev, "phy-connection-type",
-					  (const char **)&modestr);
-	if (ret)
-		ret = device_property_read_string(dev, "phy-mode",
-						  (const char **)&modestr);
-	if (ret)
-		return -ENODEV;
-
-	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
-		if (!strcasecmp(modestr, phy_modes(i)))
-			return i;
-	}
-	return -ENODEV;
-}
 
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
@@ -998,12 +1177,12 @@
 	if (ret)
 		return ret;
 
-	if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
 		eth_hw_addr_random(ndev);
 
 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
-	pdata->phy_mode = xgene_get_phy_mode(dev);
+	pdata->phy_mode = device_get_phy_mode(dev);
 	if (pdata->phy_mode < 0) {
 		dev_err(dev, "Unable to get phy-connection-type\n");
 		return pdata->phy_mode;
@@ -1207,7 +1386,8 @@
 	xgene_enet_set_ethtool_ops(ndev);
 	ndev->features |= NETIF_F_IP_CSUM |
 			  NETIF_F_GSO |
-			  NETIF_F_GRO;
+			  NETIF_F_GRO |
+			  NETIF_F_SG;
 
 	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
 	if (of_id) {
@@ -1233,6 +1413,12 @@
 
 	xgene_enet_setup_ops(pdata);
 
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+		ndev->features |= NETIF_F_TSO;
+		pdata->mss = XGENE_ENET_MSS;
+	}
+	ndev->hw_features = ndev->features;
+
 	ret = register_netdev(ndev);
 	if (ret) {
 		netdev_err(ndev, "Failed to register netdev\n");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 1c85fc8..50f92c3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -40,8 +40,12 @@
 #define XGENE_DRV_VERSION	"v1.0"
 #define XGENE_ENET_MAX_MTU	1536
 #define SKB_BUFFER_SIZE		(XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+#define BUFLEN_16K	(16 * 1024)
 #define NUM_PKT_BUF	64
 #define NUM_BUFPOOL	32
+#define MAX_EXP_BUFFS	256
+#define XGENE_ENET_MSS	1448
+#define XGENE_MIN_ENET_FRAME_SIZE	60
 
 #define START_CPU_BUFNUM_0	0
 #define START_ETH_BUFNUM_0	2
@@ -79,6 +83,7 @@
 	u16 num;
 	u16 head;
 	u16 tail;
+	u16 exp_buf_tail;
 	u16 slots;
 	u16 irq;
 	char irq_name[IRQ_ID_SIZE];
@@ -93,6 +98,7 @@
 	u8 nbufpool;
 	struct sk_buff *(*rx_skb);
 	struct sk_buff *(*cp_skb);
+	dma_addr_t *frag_dma_addr;
 	enum xgene_enet_ring_cfgsize cfgsize;
 	struct xgene_enet_desc_ring *cp_ring;
 	struct xgene_enet_desc_ring *buf_pool;
@@ -102,6 +108,7 @@
 		struct xgene_enet_raw_desc *raw_desc;
 		struct xgene_enet_raw_desc16 *raw_desc16;
 	};
+	__le64 *exp_bufs;
 };
 
 struct xgene_mac_ops {
@@ -112,6 +119,7 @@
 	void (*tx_disable)(struct xgene_enet_pdata *pdata);
 	void (*rx_disable)(struct xgene_enet_pdata *pdata);
 	void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+	void (*set_mss)(struct xgene_enet_pdata *pdata);
 	void (*link_state)(struct work_struct *work);
 };
 
@@ -170,6 +178,7 @@
 	u8 eth_bufnum;
 	u8 bp_bufnum;
 	u16 ring_num;
+	u32 mss;
 };
 
 struct xgene_indirect_ctl {
@@ -204,6 +213,9 @@
 #define GET_VAL(field, src) \
 		xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
 
+#define GET_BIT(field, src) \
+		xgene_enet_get_field_value(field ## _POS, 1, src)
+
 static inline struct device *ndev_to_dev(struct net_device *ndev)
 {
 	return ndev->dev.parent;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 05edb84..7a28a48 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -184,6 +184,11 @@
 	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
 }
 
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+}
+
 static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
 {
 	u32 data;
@@ -204,8 +209,8 @@
 	data &= ~HSTLENCHK;
 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
 
-	xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
 	xgene_xgmac_set_mac_addr(pdata);
+	xgene_xgmac_set_mss(pdata);
 
 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
@@ -329,6 +334,7 @@
 	.rx_disable = xgene_xgmac_rx_disable,
 	.tx_disable = xgene_xgmac_tx_disable,
 	.set_mac_addr = xgene_xgmac_set_mac_addr,
+	.set_mss = xgene_xgmac_set_mss,
 	.link_state = xgene_enet_link_state
 };
 
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index bf0a994..f8f908d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -62,7 +62,9 @@
 #define XCLE_BYPASS_REG0_ADDR           0x0160
 #define XCLE_BYPASS_REG1_ADDR           0x0164
 #define XG_CFG_BYPASS_ADDR		0x0204
+#define XG_CFG_LINK_AGGR_RESUME_0_ADDR	0x0214
 #define XG_LINK_STATUS_ADDR		0x0228
+#define XG_TSIF_MSS_REG0_ADDR		0x02a4
 #define XG_ENET_SPARE_CFG_REG_ADDR	0x040c
 #define XG_ENET_SPARE_CFG_REG_1_ADDR	0x0410
 #define XGENET_RX_DV_GATE_REG_0_ADDR	0x0804
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 932bd18..2795d6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -874,6 +874,8 @@
 		atl1c_clean_buffer(pdev, buffer_info);
 	}
 
+	netdev_reset_queue(adapter->netdev);
+
 	/* Zero out Tx-buffers */
 	memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
 		ring_count);
@@ -1551,6 +1553,7 @@
 	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	u16 hw_next_to_clean;
 	u16 reg;
+	unsigned int total_bytes = 0, total_packets = 0;
 
 	reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
 
@@ -1558,12 +1561,18 @@
 
 	while (next_to_clean != hw_next_to_clean) {
 		buffer_info = &tpd_ring->buffer_info[next_to_clean];
+		if (buffer_info->skb) {
+			total_bytes += buffer_info->skb->len;
+			total_packets++;
+		}
 		atl1c_clean_buffer(pdev, buffer_info);
 		if (++next_to_clean == tpd_ring->count)
 			next_to_clean = 0;
 		atomic_set(&tpd_ring->next_to_clean, next_to_clean);
 	}
 
+	netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+
 	if (netif_queue_stopped(adapter->netdev) &&
 			netif_carrier_ok(adapter->netdev)) {
 		netif_wake_queue(adapter->netdev);
@@ -2256,6 +2265,7 @@
 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		dev_kfree_skb_any(skb);
 	} else {
+		netdev_sent_queue(adapter->netdev, skb->len);
 		atl1c_tx_queue(adapter, skb, tpd, type);
 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 	}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 8be9eab..e930aa9 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,6 +139,16 @@
 	  Virtualization support in the 578xx and 57712 products. This
 	  allows for virtual function acceleration in virtual environments.
 
+config BNX2X_VXLAN
+	bool "Virtual eXtensible Local Area Network support"
+	default n
+	depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+	---help---
+	  This enables hardward offload support for VXLAN protocol over the
+	  NetXtremeII series adapters.
+	  Say Y here if you want to enable hardware offload support for
+	  Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
 config BGMAC
 	tristate "BCMA bus GBit core support"
 	depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4566cdf..b9a5a97 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -933,6 +933,21 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	disable_irq(priv->irq0);
+	bcm_sysport_rx_isr(priv->irq0, priv);
+	enable_irq(priv->irq0);
+
+	disable_irq(priv->irq1);
+	bcm_sysport_tx_isr(priv->irq1, priv);
+	enable_irq(priv->irq1);
+}
+#endif
+
 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
 					      struct net_device *dev)
 {
@@ -1723,6 +1738,9 @@
 	.ndo_set_features	= bcm_sysport_set_features,
 	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
 	.ndo_set_mac_address	= bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bcm_sysport_poll_controller,
+#endif
 };
 
 #define REV_FMT	"v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 21e3c38..28f7610 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1447,7 +1447,7 @@
 	struct phy_device *phy_dev;
 	int err;
 
-	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
 	if (!phy_dev || IS_ERR(phy_dev)) {
 		bgmac_err(bgmac, "Failed to register fixed PHY device\n");
 		return -ENODEV;
@@ -1549,11 +1549,20 @@
 	struct net_device *net_dev;
 	struct bgmac *bgmac;
 	struct ssb_sprom *sprom = &core->bus->sprom;
-	u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+	u8 *mac;
 	int err;
 
-	/* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
-	if (core->core_unit > 1) {
+	switch (core->core_unit) {
+	case 0:
+		mac = sprom->et0mac;
+		break;
+	case 1:
+		mac = sprom->et1mac;
+		break;
+	case 2:
+		mac = sprom->et2mac;
+		break;
+	default:
 		pr_err("Unsupported core_unit %d\n", core->core_unit);
 		return -ENOTSUPP;
 	}
@@ -1588,8 +1597,17 @@
 	}
 	bgmac->cmn = core->bus->drv_gmac_cmn.core;
 
-	bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
-			 sprom->et0phyaddr;
+	switch (core->core_unit) {
+	case 0:
+		bgmac->phyaddr = sprom->et0phyaddr;
+		break;
+	case 1:
+		bgmac->phyaddr = sprom->et1phyaddr;
+		break;
+	case 2:
+		bgmac->phyaddr = sprom->et2phyaddr;
+		break;
+	}
 	bgmac->phyaddr &= BGMAC_PHY_MASK;
 	if (bgmac->phyaddr == BGMAC_PHY_MASK) {
 		bgmac_err(bgmac, "No PHY found\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index cd4ae76..ba93663 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.710.51-0"
+#define DRV_MODULE_VERSION      "1.712.30-0"
 #define DRV_MODULE_RELDATE      "2014/02/10"
 #define BNX2X_BC_VER            0x040200
 
@@ -1227,6 +1229,10 @@
 	} mac_rdata;
 
 	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_rdata;
+
+	union {
 		struct tstorm_eth_mac_filter_config	e1x;
 		struct eth_filter_rules_ramrod_data	e2;
 	} rx_mode_rdata;
@@ -1386,6 +1392,8 @@
 	BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 	BNX2X_SP_RTNL_TX_STOP,
 	BNX2X_SP_RTNL_GET_DRV_VERSION,
+	BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+	BNX2X_SP_RTNL_DEL_VXLAN_PORT,
 };
 
 enum bnx2x_iov_flag {
@@ -1408,6 +1416,9 @@
 
 	/* Queue State object */
 	struct bnx2x_queue_sp_obj q_obj;
+
+	/* VLANs object */
+	struct bnx2x_vlan_mac_obj vlan_obj;
 };
 
 struct bnx2x_fp_stats {
@@ -1422,6 +1433,13 @@
 	SUB_MF_MODE_UNKNOWN = 0,
 	SUB_MF_MODE_UFP,
 	SUB_MF_MODE_NPAR1_DOT_5,
+	SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+	struct list_head link;
+	u16 vid;
+	bool hw;
 };
 
 struct bnx2x {
@@ -1636,6 +1654,8 @@
 	u8			mf_sub_mode;
 #define IS_MF_UFP(bp)		(IS_MF_SD(bp) && \
 				 bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp)		(IS_MF_SD(bp) && \
+				 bp->mf_sub_mode == SUB_MF_MODE_BD)
 
 	u8			wol;
 
@@ -1860,8 +1880,6 @@
 	int					dcb_version;
 
 	/* CAM credit pools */
-
-	/* used only in sriov */
 	struct bnx2x_credit_pool_obj		vlans_pool;
 
 	struct bnx2x_credit_pool_obj		macs_pool;
@@ -1924,6 +1942,11 @@
 	u16 rx_filter;
 
 	struct bnx2x_link_report_data		vf_link_vars;
+	struct list_head vlan_reg;
+	u16 vlan_cnt;
+	u16 vlan_credit;
+	u16 vxlan_dst_port;
+	bool accept_any_vlan;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1974,14 @@
 #define RSS_IPV6_TCP_CAP_MASK						\
 	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
-/* func init flags */
-#define FUNC_FLG_RSS		0x0001
-#define FUNC_FLG_STATS		0x0002
-/* removed  FUNC_FLG_UNMATCHED	0x0004 */
-#define FUNC_FLG_TPA		0x0008
-#define FUNC_FLG_SPQ		0x0010
-#define FUNC_FLG_LEADING	0x0020	/* PF only */
-#define FUNC_FLG_LEADING_STATS	0x0040
 struct bnx2x_func_init_params {
 	/* dma */
-	dma_addr_t	fw_stat_map;	/* valid iff FUNC_FLG_STATS */
-	dma_addr_t	spq_map;	/* valid iff FUNC_FLG_SPQ */
+	bool		spq_active;
+	dma_addr_t	spq_map;
+	u16		spq_prod;
 
-	u16		func_flgs;
 	u16		func_id;	/* abs fid */
 	u16		pf_id;
-	u16		spq_prod;	/* valid iff FUNC_FLG_SPQ */
 };
 
 #define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2091,11 @@
 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 		      struct bnx2x_vlan_mac_obj *obj, bool set,
 		      int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags);
+
 /**
  * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
  *
@@ -2481,6 +2500,7 @@
 #define VF_ACQUIRE_THRESH		3
 #define VF_ACQUIRE_MAC_FILTERS		1
 #define VF_ACQUIRE_MC_FILTERS		10
+#define VF_ACQUIRE_VLAN_FILTERS		2 /* VLAN0 + 'real' VLAN */
 
 #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
 			    (!((me_reg) & ME_REG_VF_ERR)))
@@ -2553,6 +2573,10 @@
 			(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) ||	\
 			 IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
 
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
 
 #define SET_FLAG(value, mask, flag) \
 	do {\
@@ -2577,6 +2601,8 @@
 
 void bnx2x_update_mng_version(struct bnx2x *bp);
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
 #define MCPR_SCRATCH_BASE(bp) \
 	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 
@@ -2589,4 +2615,9 @@
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
 
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f7fbdc9..44173be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1188,7 +1190,7 @@
 		/* Calculate the current MAX line speed limit for the MF
 		 * devices
 		 */
-		if (IS_MF_SI(bp))
+		if (IS_MF_PERCENT_BW(bp))
 			line_speed = (line_speed * maxCfg) / 100;
 		else { /* SD mode */
 			u16 vn_max_rate = maxCfg * 100;
@@ -2103,9 +2105,14 @@
 		if (rss_obj->udp_rss_v6)
 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
 
-		if (!CHIP_IS_E1x(bp))
+		if (!CHIP_IS_E1x(bp)) {
+			/* valid only for TUNN_MODE_VXLAN tunnel mode */
+			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
 			/* valid only for TUNN_MODE_GRE tunnel mode */
-			__set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+		}
 	} else {
 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
 	}
@@ -2510,6 +2517,20 @@
 		fp->mode = TPA_MODE_DISABLED;
 }
 
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+	u32 cur;
+
+	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+		return;
+
+	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+	   cur, state);
+
+	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
 int bnx2x_load_cnic(struct bnx2x *bp)
 {
 	int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@
 
 	/* Start fast path */
 
+	/* Re-configure vlan filters */
+	rc = bnx2x_vlan_reconfigure_vid(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error3);
+
 	/* Initialize Rx filter. */
 	bnx2x_set_rx_mode_inner(bp);
 
@@ -2873,6 +2899,8 @@
 		/* mark driver is loaded in shmem2 */
 		u32 val;
 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+		val &= ~DRV_FLAGS_MTU_MASK;
+		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@
 		return -EBUSY;
 	}
 
+	/* Update driver data for On-Chip MFW dump. */
+	if (IS_PF(bp))
+		bnx2x_update_mfw_dump(bp);
+
 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
 		bnx2x_dcbx_init(bp, false);
 
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
 
 	return 0;
@@ -2956,6 +2991,9 @@
 
 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
 
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
 	/* mark driver is unloaded in shmem2 */
 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
 		u32 val;
@@ -3677,7 +3715,7 @@
 		pbd2->fw_ip_hdr_to_payload_w =
 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
 		pbd_e2->data.tunnel_data.flags |=
-			ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+			ETH_TUNNEL_DATA_IPV6_OUTER;
 	}
 
 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@
 	return NETDEV_TX_OK;
 }
 
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+	int mfw_vn = BP_FW_MB_IDX(bp);
+	u32 tmp;
+
+	/* If the shmem shouldn't affect configuration, reflect */
+	if (!IS_MF_BD(bp)) {
+		int i;
+
+		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+			c2s_map[i] = i;
+		*c2s_default = 0;
+
+		return;
+	}
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	c2s_map[0] = tmp & 0xff;
+	c2s_map[1] = (tmp >> 8) & 0xff;
+	c2s_map[2] = (tmp >> 16) & 0xff;
+	c2s_map[3] = (tmp >> 24) & 0xff;
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	c2s_map[4] = tmp & 0xff;
+	c2s_map[5] = (tmp >> 8) & 0xff;
+	c2s_map[6] = (tmp >> 16) & 0xff;
+	c2s_map[7] = (tmp >> 24) & 0xff;
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
 /**
  * bnx2x_setup_tc - routine to configure net_device for multi tc
  *
@@ -4194,8 +4267,9 @@
  */
 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 {
-	int cos, prio, count, offset;
 	struct bnx2x *bp = netdev_priv(dev);
+	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+	int cos, prio, count, offset;
 
 	/* setup tc must be called under rtnl lock */
 	ASSERT_RTNL();
@@ -4219,12 +4293,16 @@
 		return -EINVAL;
 	}
 
+	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
 	/* configure priority to traffic class mapping */
 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
-		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+		int outer_prio = c2s_map[prio];
+
+		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
 		   "mapping priority %d to tc %d\n",
-		   prio, bp->prio_to_cos[prio]);
+		   outer_prio, bp->prio_to_cos[outer_prio]);
 	}
 
 	/* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@
 	if (netif_running(dev))
 		rc = bnx2x_set_eth_mac(bp, true);
 
+	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
 	return rc;
 }
 
@@ -4831,6 +4912,9 @@
 	 */
 	dev->mtu = new_mtu;
 
+	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
 	return bnx2x_reload_if_running(dev);
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 03b7404..b7d32e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@
  */
 void bnx2x_tx_timeout(struct net_device *dev);
 
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp:			driver handle
+ * @c2s_map:		should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default:	entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
 /*********************** Inlines **********************************/
 /*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,35 @@
 	start_params->mf_mode = bp->mf_mode;
 	start_params->sd_vlan_tag = bp->mf_ov;
 
+	/* Configure Ethertype for BD mode */
+	if (IS_MF_BD(bp)) {
+		DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+		start_params->sd_vlan_eth_type = ETH_P_8021AD;
+		REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+		REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+		REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+		bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+				      &start_params->c2s_pri_default);
+		start_params->c2s_pri_valid = 1;
+
+		DP(NETIF_MSG_IFUP,
+		   "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+		   start_params->c2s_pri[0], start_params->c2s_pri[1],
+		   start_params->c2s_pri[2], start_params->c2s_pri[3],
+		   start_params->c2s_pri[4], start_params->c2s_pri[5],
+		   start_params->c2s_pri[6], start_params->c2s_pri[7],
+		   start_params->c2s_pri_default);
+	}
+
 	if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
 		start_params->network_cos_mode = STATIC_COS;
 	else /* CHIP_IS_E1X */
 		start_params->network_cos_mode = FW_WRR;
 
-	start_params->tunnel_mode	= TUNN_MODE_GRE;
-	start_params->gre_tunnel_type	= IPGRE_TUNNEL;
-	start_params->inner_gre_rss_en	= 1;
+	start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
+	start_params->inner_rss = 1;
 
 	if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
 		start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1068,15 @@
 			   BNX2X_FILTER_MAC_PENDING,
 			   &bp->sp_state, obj_type,
 			   &bp->macs_pool);
+
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+				    fp->cl_id, fp->cid, BP_FUNC(bp),
+				    bnx2x_sp(bp, vlan_rdata),
+				    bnx2x_sp_mapping(bp, vlan_rdata),
+				    BNX2X_FILTER_VLAN_PENDING,
+				    &bp->sp_state, obj_type,
+				    &bp->vlans_pool);
 }
 
 /**
@@ -1096,7 +1136,7 @@
 	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
 				   bnx2x_get_path_func_num(bp));
 
-	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
 				    bnx2x_get_path_func_num(bp));
 
 	/* RSS configuration object */
@@ -1106,6 +1146,8 @@
 				  bnx2x_sp_mapping(bp, rss_rdata),
 				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
 				  BNX2X_OBJ_TYPE_RX);
+
+	bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
 }
 
 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1381,23 @@
 void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
 			    u32 verbose);
 
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp:		driver handle
+ * @state:	OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp:		driver handle
+ * @offset:	byte offset in nvram
+ * @ret_buf:	pointer to buffer where data is to be stored
+ * @buf_size:   Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+		     int buf_size);
+
 #endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6e4294e..7ccf668 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@
 			if (bp->dcbx_port_params.ets.cos_params[cos].
 						pri_bitmask & pri_bit)
 					tt2cos[pri].cos = cos;
+
+		pfc_fw_cfg->dcb_outer_pri[pri]  = ttp[pri];
 	}
 
 	/* we never want the FW to add a 0 vlan tag */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index c6939ec..9a9517c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 741aa13..eccfa13 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  */
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5907c82..aeb7ce6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1129,6 +1131,9 @@
 	} else
 		bp->wol = 0;
 
+	if (SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
 	return 0;
 }
 
@@ -1343,8 +1348,8 @@
 	return rc;
 }
 
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
-			    int buf_size)
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+		     int buf_size)
 {
 	int rc;
 	u32 cmd_flags;
@@ -3578,17 +3583,8 @@
 
 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
 				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
 				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+				   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 		info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 7636e3c..226ab29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -372,7 +374,7 @@
 #define MAX_COS_NUMBER 4
 #define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
 	/* used by array traffic_type_to_priority[] to mark traffic type \
 	that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 8aafd9b..9e3b5a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,8 @@
 /* bnx2x_fw_file_hdr.h: FW binary file header structure.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 058bc73..cafd5de 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858      0x00001200
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
 
@@ -786,6 +789,7 @@
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858       0x00001200
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
@@ -864,6 +868,7 @@
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE        0x00000500
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE       0x00000600
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE  0x00000700
 
@@ -2064,6 +2069,45 @@
 	#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
 };
 
+enum curr_cfg_method_e {
+	CURR_CFG_MET_NONE = 0,  /* default config */
+	CURR_CFG_MET_OS = 1,
+	CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+	u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+	u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+	/* hdr used internally by the MFW */
+	u32 hdr;
+	u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+	struct bdn_fc_npiv_cfg fc_npiv_cfg;
+	struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+	u32 epoc;
+	u32 drv_ver;
+	u32 fw_ver;
+
+	u32 valid_dump;
+	#define FIRST_DUMP_VALID        (1 << 0)
+	#define SECOND_DUMP_VALID       (1 << 1)
+
+	u32 flags;
+	#define ENABLE_ALL_TRIGGERS     (0x7fffffff)
+	#define TRIGGER_MDUMP_ONCE      (1 << 31)
+};
+
 struct ncsi_oem_data {
 	u32 driver_version[4];
 	struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2231,8 @@
 #define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+#define DRV_FLAGS_MTU_MASK			0xffff0000
+#define DRV_FLAGS_MTU_SHIFT			16
 
 	u32 extended_dev_info_shared_cfg_size;
 
@@ -2251,6 +2297,7 @@
 	u32 reserved4;				/* Offset 0x150 */
 	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */
 	#define LINK_ATTR_SYNC_KR2_ENABLE	0x00000001
+	#define LINK_ATTR_84858			0x00000002
 	#define LINK_SFP_EEPROM_COMP_CODE_MASK	0x0000ff00
 	#define LINK_SFP_EEPROM_COMP_CODE_SHIFT		 8
 	#define LINK_SFP_EEPROM_COMP_CODE_SR	0x00001000
@@ -2268,6 +2315,74 @@
 
 	/* We use indication for each PF (0..3) */
 #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+	union { /* For various OEMs */			/* Offset 0x1a0 */
+		u8 storage_boot_prog[E2_FUNC_MAX];
+	#define STORAGE_BOOT_PROG_MASK				0x000000FF
+	#define STORAGE_BOOT_PROG_NONE				0x00000000
+	#define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED		0x00000002
+	#define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS	0x00000002
+	#define STORAGE_BOOT_PROG_TARGET_FOUND			0x00000004
+	#define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS		0x00000008
+	#define STORAGE_BOOT_PROG_FCOE_LUN_FOUND		0x00000008
+	#define STORAGE_BOOT_PROG_LOGGED_INTO_TGT		0x00000010
+	#define STORAGE_BOOT_PROG_IMG_DOWNLOADED		0x00000020
+	#define STORAGE_BOOT_PROG_OS_HANDOFF			0x00000040
+	#define STORAGE_BOOT_PROG_COMPLETED			0x00000080
+
+		u32 oem_i2c_data_addr;
+	};
+
+	/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+	/* For PCP values 0-3 use the map lower */
+	/* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+	 * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+	 */
+	u32 c2s_pcp_map_lower[E2_FUNC_MAX];			/* 0x1a4 */
+
+	/* For PCP values 4-7 use the map upper */
+	/* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+	 * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+	 */
+	u32 c2s_pcp_map_upper[E2_FUNC_MAX];			/* 0x1b4 */
+
+	/* For PCP default value get the MSB byte of the map default */
+	u32 c2s_pcp_map_default[E2_FUNC_MAX];			/* 0x1c4 */
+
+	/* FC_NPIV table offset in NVRAM */
+	u32 fc_npiv_nvram_tbl_addr[PORT_MAX];			/* 0x1d4 */
+
+	/* Shows last method that changed configuration of this device */
+	enum curr_cfg_method_e curr_cfg;			/* 0x1dc */
+
+	/* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+	 * MM - Major, mm - Minor, bb - Build ,dd - Drop
+	 */
+	u32 netproc_fw_ver;					/* 0x1e0 */
+
+	/* Option ROM SMASH CLP version */
+	u32 clp_ver;						/* 0x1e4 */
+
+	u32 pcie_bus_num;					/* 0x1e8 */
+
+	u32 sriov_switch_mode;					/* 0x1ec */
+	#define SRIOV_SWITCH_MODE_NONE		0x0
+	#define SRIOV_SWITCH_MODE_VEB		0x1
+	#define SRIOV_SWITCH_MODE_VEPA		0x2
+
+	u8  rsrv2[E2_FUNC_MAX];					/* 0x1f0 */
+
+	u32 img_inv_table_addr;	/* Address to INV_TABLE_P */	/* 0x1f4 */
+
+	u32 mtu_size[E2_FUNC_MAX];				/* 0x1f8 */
+
+	u32 os_driver_state[E2_FUNC_MAX];			/* 0x208 */
+	#define OS_DRIVER_STATE_NOT_LOADED	0 /* not installed */
+	#define OS_DRIVER_STATE_LOADING		1 /* transition state */
+	#define OS_DRIVER_STATE_DISABLED	2 /* installed but disabled */
+	#define OS_DRIVER_STATE_ACTIVE		3 /* installed and active */
+
+	/* mini dump driver info */
+	struct mdump_driver_info drv_info;			/* 0x218 */
 };
 
 
@@ -2898,8 +3013,8 @@
 };
 
 #define BCM_5710_FW_MAJOR_VERSION			7
-#define BCM_5710_FW_MINOR_VERSION			10
-#define BCM_5710_FW_REVISION_VERSION		51
+#define BCM_5710_FW_MINOR_VERSION			12
+#define BCM_5710_FW_REVISION_VERSION		30
 #define BCM_5710_FW_ENGINEERING_VERSION		0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
@@ -3901,7 +4016,11 @@
 	__le16 len_on_bd;
 	struct parsing_flags pars_flags;
 	union eth_sgl_or_raw_data sgl_or_raw_data;
-	__le32 reserved1[7];
+	u8 tunn_type;
+	u8 tunn_inner_hdrs_offset;
+	__le16 reserved1;
+	__le32 tunn_tenant_id;
+	__le32 padding[5];
 	u32 marker;
 };
 
@@ -4012,8 +4131,8 @@
 	__le16 pseudo_csum;
 	u8 ip_hdr_start_inner_w;
 	u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
 #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
 };
@@ -4120,16 +4239,12 @@
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
 	u8 rss_result_mask;
 	u8 reserved3;
 	__le16 reserved4;
@@ -4314,6 +4429,18 @@
 	MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
 };
 
+enum eth_tunn_type {
+	TUNN_TYPE_NONE,
+	TUNN_TYPE_VXLAN,
+	TUNN_TYPE_L2_GRE,
+	TUNN_TYPE_IPV4_GRE,
+	TUNN_TYPE_IPV6_GRE,
+	TUNN_TYPE_L2_GENEVE,
+	TUNN_TYPE_IPV4_GENEVE,
+	TUNN_TYPE_IPV6_GENEVE,
+	MAX_ETH_TUNN_TYPE
+};
+
 /*
  * Tx regular BD structure
  */
@@ -4758,6 +4885,9 @@
 	__le16 reserved1;
 };
 
+struct c2s_pri_trans_table_entry {
+	u8 val[MAX_VLAN_PRIORITIES];
+};
 
 /*
  * cfc delete event data
@@ -5246,6 +5376,7 @@
 	u8 dont_add_pri_0_en;
 	u8 reserved1;
 	__le32 reserved2;
+	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 
@@ -5260,18 +5391,25 @@
 	u8 path_id;
 	u8 network_cos_mode;
 	u8 dmae_cmd_id;
-	u8 tunnel_mode;
-	u8 gre_tunnel_type;
-	u8 tunn_clss_en;
-	u8 inner_gre_rss_en;
-	u8 sd_accept_mf_clss_fail;
+	u8 no_added_tags;
+	__le16 reserved0;
+	__le32 reserved1;
+	u8 inner_clss_vxlan;
+	u8 inner_clss_l2gre;
+	u8 inner_clss_l2geneve;
+	u8 inner_rss;
 	__le16 vxlan_dst_port;
+	__le16 geneve_dst_port;
+	u8 sd_accept_mf_clss_fail;
+	u8 sd_accept_mf_clss_fail_match_ethtype;
 	__le16 sd_accept_mf_clss_fail_ethtype;
 	__le16 sd_vlan_eth_type;
 	u8 sd_vlan_force_pri_flg;
 	u8 sd_vlan_force_pri_val;
-	u8 sd_accept_mf_clss_fail_match_ethtype;
-	u8 no_added_tags;
+	u8 c2s_pri_tt_valid;
+	u8 c2s_pri_default;
+	u8 reserved2[6];
+	struct c2s_pri_trans_table_entry c2s_pri_trans_table;
 };
 
 struct function_update_data {
@@ -5289,11 +5427,12 @@
 	u8 tx_switch_suspend;
 	u8 echo;
 	u8 update_tunn_cfg_flg;
-	u8 tunnel_mode;
-	u8 gre_tunnel_type;
-	u8 tunn_clss_en;
-	u8 inner_gre_rss_en;
+	u8 inner_clss_vxlan;
+	u8 inner_clss_l2gre;
+	u8 inner_clss_l2geneve;
+	u8 inner_rss;
 	__le16 vxlan_dst_port;
+	__le16 geneve_dst_port;
 	u8 sd_vlan_force_pri_change_flg;
 	u8 sd_vlan_force_pri_flg;
 	u8 sd_vlan_force_pri_val;
@@ -5302,6 +5441,8 @@
 	u8 reserved1;
 	__le16 sd_vlan_tag;
 	__le16 sd_vlan_eth_type;
+	__le16 reserved0;
+	__le32 reserved2;
 };
 
 /*
@@ -5330,15 +5471,6 @@
 #define __FW_VERSION_RESERVED_SHIFT 4
 };
 
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
-	NVGRE_TUNNEL,
-	L2GRE_TUNNEL,
-	IPGRE_TUNNEL,
-	MAX_GRE_TUNNEL_TYPE
-};
-
 /*
  * Dynamic Host-Coalescing - Driver(host) counters
  */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index d6e1975..46ee2c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
  *               Structures and macroes needed during the initialization.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 5669ed2..1835d2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
  *               Static functions needed during the initialization.
  *               This file is "included" in bnx2x_main.c.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a0b03c2..d946bba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@
 /******************************************************************/
 /*		BCM8481/BCM84823/BCM84833 PHY SECTION	          */
 /******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+	return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+		(phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+		(phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 					    struct bnx2x *bp,
 					    u8 port)
@@ -9666,8 +9675,7 @@
 	};
 	u16 fw_ver1;
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+	if (bnx2x_is_8483x_8485x(phy)) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
 		bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
 				phy->ver_addr);
@@ -9749,8 +9757,7 @@
 		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
 				 reg_set[i].val);
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+	if (bnx2x_is_8483x_8485x(phy))
 		offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
 	else
 		offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@
 	struct bnx2x *bp = params->bp;
 	switch (action) {
 	case PHY_INIT:
-		if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-		    (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+		if (!bnx2x_is_8483x_8485x(phy)) {
 			/* Save spirom version */
 			bnx2x_save_848xx_spirom_version(phy, bp, params->port);
 		}
@@ -9901,8 +9907,7 @@
 	/* Always write this if this is not 84833/4.
 	 * For 84833/4, write it only when it's a forced speed.
 	 */
-	if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-	     (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+	if (!bnx2x_is_8483x_8485x(phy) ||
 	    ((autoneg_val & (1<<12)) == 0))
 		bnx2x_cl45_write(bp, phy,
 			 MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params,
+				u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	int idx;
+	u16 val;
+	struct bnx2x *bp = params->bp;
+
+	/* Step 1: Poll the STATUS register to see whether the previous command
+	 * is in progress or the system is busy (CMD_IN_PROGRESS or
+	 * SYSTEM_BUSY). If previous command is in progress or system is busy,
+	 * check again until the previous command finishes execution and the
+	 * system is available for taking command
+	 */
+
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+		    (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if (idx >= PHY848xx_CMDHDLR_WAIT) {
+		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+		return -EINVAL;
+	}
+
+	/* Step2: If any parameters are required for the function, write them
+	 * to the required DATA registers
+	 */
+
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				 MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				 cmd_args[idx]);
+	}
+
+	/* Step3: When the firmware is ready for commands, write the 'Command
+	 * code' to the CMD register
+	 */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+	/* Step4: Once the command has been written, poll the STATUS register
+	 * to check whether the command has completed (CMD_COMPLETED_PASS/
+	 * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+	 */
+
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+		    (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+	    (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+		return -EINVAL;
+	}
+	/* Step5: Once the command has completed, read the specficied DATA
+	 * registers for any saved results for the command, if applicable
+	 */
+
+	/* Gather returning data */
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				&cmd_args[idx]);
+	}
+
+	return 0;
+}
+
 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
 				struct link_params *params, u16 fw_cmd,
 				u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@
 	struct bnx2x *bp = params->bp;
 	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_CMD_HDLR_STATUS,
+			MDIO_848xx_CMD_HDLR_STATUS,
 			PHY84833_STATUS_CMD_OPEN_OVERRIDE);
-	for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_STATUS, &val);
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
 		if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
 			break;
 		usleep_range(1000, 2000);
 	}
-	if (idx >= PHY84833_CMDHDLR_WAIT) {
+	if (idx >= PHY848xx_CMDHDLR_WAIT) {
 		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
 		return -EINVAL;
 	}
@@ -9977,42 +10060,62 @@
 	/* Prepare argument(s) and issue command */
 	for (idx = 0; idx < argc; idx++) {
 		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_DATA1 + idx,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
 				cmd_args[idx]);
 	}
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
-	for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+			MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_STATUS, &val);
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
 		if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
-			(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+		    (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
 			break;
 		usleep_range(1000, 2000);
 	}
-	if ((idx >= PHY84833_CMDHDLR_WAIT) ||
-		(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+	if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+	    (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
 		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
 		return -EINVAL;
 	}
 	/* Gather returning data */
 	for (idx = 0; idx < argc; idx++) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_DATA1 + idx,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
 				&cmd_args[idx]);
 	}
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_CMD_HDLR_STATUS,
+			MDIO_848xx_CMD_HDLR_STATUS,
 			PHY84833_STATUS_CMD_CLEAR_COMPLETE);
 	return 0;
 }
 
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
-				   struct link_params *params,
-				   struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params,
+				u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	struct bnx2x *bp = params->bp;
+
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+	    (REG_RD(bp, params->shmem2_base +
+		    offsetof(struct shmem2_region,
+			     link_attr_sync[params->port])) &
+	     LINK_ATTR_84858)) {
+		return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+					    argc);
+	} else {
+		return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+					    argc);
+	}
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     struct link_vars *vars)
 {
 	u32 pair_swap;
-	u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+	u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
 	int status;
 	struct bnx2x *bp = params->bp;
 
@@ -10028,8 +10131,9 @@
 	/* Only the second argument is used for this command */
 	data[1] = (u16)pair_swap;
 
-	status = bnx2x_84833_cmd_hdlr(phy, params,
-		PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+	status = bnx2x_848xx_cmd_hdlr(phy, params,
+				      PHY848xx_CMD_SET_PAIR_SWAP, data,
+				      PHY848xx_CMDHDLR_MAX_ARGS);
 	if (status == 0)
 		DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
 
@@ -10118,8 +10222,8 @@
 	DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
 
 	/* Prevent Phy from working in EEE and advertising it */
-	rc = bnx2x_84833_cmd_hdlr(phy, params,
-		PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+	rc = bnx2x_848xx_cmd_hdlr(phy, params,
+				  PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
 	if (rc) {
 		DP(NETIF_MSG_LINK, "EEE disable failed.\n");
 		return rc;
@@ -10136,8 +10240,8 @@
 	struct bnx2x *bp = params->bp;
 	u16 cmd_args = 1;
 
-	rc = bnx2x_84833_cmd_hdlr(phy, params,
-		PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+	rc = bnx2x_848xx_cmd_hdlr(phy, params,
+				  PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
 	if (rc) {
 		DP(NETIF_MSG_LINK, "EEE enable failed.\n");
 		return rc;
@@ -10155,7 +10259,7 @@
 	u8 port, initialize = 1;
 	u16 val;
 	u32 actual_phy_selection;
-	u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+	u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
 	int rc = 0;
 
 	usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@
 
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
-	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-	    (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+	if (!bnx2x_is_8483x_8485x(phy)) {
 		/* BCM84823 requires that XGXS links up first @ 10G for normal
 		 * behavior.
 		 */
@@ -10192,7 +10295,19 @@
 		bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
 		vars->line_speed = temp;
 	}
+	/* Check if this is actually BCM84858 */
+	if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+		u16 hw_rev;
 
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+		if (hw_rev == BCM84858_PHY_ID) {
+			params->link_attr_sync |= LINK_ATTR_84858;
+			bnx2x_update_link_attr(params, params->link_attr_sync);
+		}
+	}
+
+	/* Set dual-media configuration according to configuration */
 	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 			MDIO_CTL_REG_84823_MEDIA, &val);
 	val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@
 	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
 		   params->multi_phy_config, val);
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
-		bnx2x_84833_pair_swap_cfg(phy, params, vars);
+	if (bnx2x_is_8483x_8485x(phy)) {
+		bnx2x_848xx_pair_swap_cfg(phy, params, vars);
 
 		/* Keep AutogrEEEn disabled. */
 		cmd_args[0] = 0x0;
 		cmd_args[1] = 0x0;
 		cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
 		cmd_args[3] = PHY84833_CONSTANT_LATENCY;
-		rc = bnx2x_84833_cmd_hdlr(phy, params,
-			PHY84833_CMD_SET_EEE_MODE, cmd_args,
-			PHY84833_CMDHDLR_MAX_ARGS);
+		rc = bnx2x_848xx_cmd_hdlr(phy, params,
+					  PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+					  PHY848xx_CMDHDLR_MAX_ARGS);
 		if (rc)
 			DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
 	}
@@ -10302,8 +10416,7 @@
 		vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
 	}
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+	if (bnx2x_is_8483x_8485x(phy)) {
 		/* Bring PHY out of super isolate mode as the final step. */
 		bnx2x_cl45_read_and_write(bp, phy,
 					  MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@
 				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
 
 		/* Determine if EEE was negotiated */
-		if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-		    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+		if (bnx2x_is_8483x_8485x(phy))
 			bnx2x_eee_an_resolve(phy, params, vars);
 	}
 
@@ -11842,6 +11954,40 @@
 	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
+static const struct bnx2x_phy phy_84858 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
 static const struct bnx2x_phy phy_54618se = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
 	.addr		= 0xff,
@@ -12128,6 +12274,9 @@
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
 		*phy = phy_84834;
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+		*phy = phy_84858;
+		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
 		*phy = phy_54618se;
@@ -12184,9 +12333,7 @@
 	}
 	phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-	if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	     (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
-	    (phy->ver_addr)) {
+	if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
 		/* Remove 100Mb link supported for BCM84833/4 when phy fw
 		 * version lower than or equal to 1.39
 		 */
@@ -13281,6 +13428,7 @@
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
 		/* GPIO3's are linked, and so both need to be toggled
 		 * to obtain required 2us pulse.
 		 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d9cce4c..b7d2511 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c27af12..e3da2bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -81,11 +83,11 @@
 #define TX_TIMEOUT		(5*HZ)
 
 static char version[] =
-	"Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+	"QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
 	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
 		   "BCM57710/57711/57711E/"
 		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
 		   "57840/57840_MF Driver");
@@ -163,27 +165,27 @@
 static struct {
 	char *name;
 } board_info[] = {
-	[BCM57710]	= { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
-	[BCM57711]	= { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
-	[BCM57711E]	= { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
-	[BCM57712]	= { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
-	[BCM57712_MF]	= { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
-	[BCM57712_VF]	= { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
-	[BCM57800]	= { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
-	[BCM57800_MF]	= { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
-	[BCM57800_VF]	= { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
-	[BCM57810]	= { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
-	[BCM57810_MF]	= { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
-	[BCM57810_VF]	= { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
-	[BCM57840_4_10]	= { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
-	[BCM57840_2_20]	= { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
-	[BCM57840_MF]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-	[BCM57840_VF]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
-	[BCM57811]	= { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
-	[BCM57811_MF]	= { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
-	[BCM57840_O]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-	[BCM57840_MFO]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-	[BCM57811_VF]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+	[BCM57710]	= { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+	[BCM57711]	= { "QLogic BCM57711 10 Gigabit PCIe" },
+	[BCM57711E]	= { "QLogic BCM57711E 10 Gigabit PCIe" },
+	[BCM57712]	= { "QLogic BCM57712 10 Gigabit Ethernet" },
+	[BCM57712_MF]	= { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+	[BCM57712_VF]	= { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+	[BCM57800]	= { "QLogic BCM57800 10 Gigabit Ethernet" },
+	[BCM57800_MF]	= { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+	[BCM57800_VF]	= { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+	[BCM57810]	= { "QLogic BCM57810 10 Gigabit Ethernet" },
+	[BCM57810_MF]	= { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+	[BCM57810_VF]	= { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+	[BCM57840_4_10]	= { "QLogic BCM57840 10 Gigabit Ethernet" },
+	[BCM57840_2_20]	= { "QLogic BCM57840 20 Gigabit Ethernet" },
+	[BCM57840_MF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+	[BCM57840_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+	[BCM57811]	= { "QLogic BCM57811 10 Gigabit Ethernet" },
+	[BCM57811_MF]	= { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+	[BCM57840_O]	= { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+	[BCM57840_MFO]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+	[BCM57811_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -264,11 +266,14 @@
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
@@ -2492,7 +2497,7 @@
 	else {
 		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-		if (IS_MF_SI(bp)) {
+		if (IS_MF_PERCENT_BW(bp)) {
 			/* maxCfg in percents of linkspeed */
 			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
 		} else /* SD modes */
@@ -2916,7 +2921,7 @@
 	func_params.f_obj = &bp->func_obj;
 	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
-	if (IS_MF_UFP(bp)) {
+	if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
 		int func = BP_ABS_FUNC(bp);
 		u32 val;
 
@@ -2943,16 +2948,16 @@
 			BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
 				  bp->mf_ov);
 			goto fail;
+		} else {
+			DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+			   bp->mf_ov);
 		}
-
-		DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
-		bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
-		return;
+	} else {
+		goto fail;
 	}
 
-	/* not supported by SW yet */
+	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+	return;
 fail:
 	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
 }
@@ -3065,7 +3070,7 @@
 	storm_memset_func_en(bp, p->func_id, 1);
 
 	/* spq */
-	if (p->func_flgs & FUNC_FLG_SPQ) {
+	if (p->spq_active) {
 		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
 		REG_WR(bp, XSEM_REG_FAST_MEMORY +
 		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3286,6 @@
 {
 	struct bnx2x_func_init_params func_init = {0};
 	struct event_ring_data eq_data = { {0} };
-	u16 flags;
 
 	if (!CHIP_IS_E1x(bp)) {
 		/* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3302,7 @@
 				BP_FUNC(bp) : BP_VN(bp))*4, 0);
 	}
 
-	/* function setup flags */
-	flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
-	/* This flag is relevant for E1x only.
-	 * E2 doesn't have a TPA configuration in a function level.
-	 */
-	flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
-	func_init.func_flgs = flags;
+	func_init.spq_active = true;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = BP_FUNC(bp);
 	func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3703,34 @@
 	   ethver, iscsiver, fcoever);
 }
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+	struct timeval epoc;
+	u32 drv_ver;
+	u32 valid_dump;
+
+	if (!SHMEM2_HAS(bp, drv_info))
+		return;
+
+	/* Update Driver load time */
+	do_gettimeofday(&epoc);
+	SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+
+	drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+	SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+	SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+	/* Check & notify On-Chip dump. */
+	valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+	if (valid_dump & FIRST_DUMP_VALID)
+		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+	if (valid_dump & SECOND_DUMP_VALID)
+		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
 {
 	u32 cmd_ok, cmd_fail;
@@ -5274,6 +5298,10 @@
 			vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
 		break;
+	case BNX2X_FILTER_VLAN_PENDING:
+		DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+		vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+		break;
 	case BNX2X_FILTER_MCAST_PENDING:
 		DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
 		/* This is only relevant for 57710 where multicast MACs are
@@ -5568,6 +5596,8 @@
 		      BNX2X_STATE_OPEN):
 		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 		      BNX2X_STATE_OPENING_WAIT4_PORT):
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 			cid = elem->message.data.eth_event.echo &
 				BNX2X_SWCID_MASK;
 			DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5615,7 @@
 		      BNX2X_STATE_DIAG):
 		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 		      BNX2X_STATE_CLOSING_WAIT4_HALT):
-			DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+			DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
 			bnx2x_handle_classification_eqe(bp, elem);
 			break;
 
@@ -6173,6 +6203,11 @@
 		__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
 		break;
 	case BNX2X_RX_MODE_ALLMULTI:
 		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6219,11 @@
 		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
 		break;
 	case BNX2X_RX_MODE_PROMISC:
 		/* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6244,15 @@
 		else
 			__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
 		break;
 	default:
 		BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
 		return -EINVAL;
 	}
 
-	/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
-	if (rx_mode != BNX2X_RX_MODE_NONE) {
-		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
-		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
-	}
-
 	return 0;
 }
 
@@ -7429,6 +7466,9 @@
 	} else
 		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 
+	if (SHMEM2_HAS(bp, netproc_fw_ver))
+		SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
 	return 0;
 }
 
@@ -8406,6 +8446,42 @@
 	return rc;
 }
 
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags)
+{
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
+
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		ramrod_param.user_req.u.vlan.vlan = vlan;
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+	}
+
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+	if (rc == -EEXIST) {
+		/* Do not treat adding same vlan as error. */
+		DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+		rc = 0;
+	} else if (rc < 0) {
+		BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+	}
+
+	return rc;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
 		       struct bnx2x_vlan_mac_obj *mac_obj,
 		       int mac_type, bool wait_for_comp)
@@ -10002,6 +10078,81 @@
 	}
 }
 
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+	struct bnx2x_func_switch_update_params *switch_update_params;
+	struct bnx2x_func_state_params func_params = {NULL};
+	int rc;
+
+	switch_update_params = &func_params.params.switch_update;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+	/* Function parameters */
+	__set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+		  &switch_update_params->changes);
+	switch_update_params->vxlan_dst_port = port;
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc)
+		BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+			  port, rc);
+	return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+	if (!netif_running(bp->dev))
+		return;
+
+	if (bp->vxlan_dst_port || !IS_PF(bp)) {
+		DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+		return;
+	}
+
+	bp->vxlan_dst_port = port;
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 t_port = ntohs(port);
+
+	__bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+	if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+		DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+		return;
+	}
+
+	if (netif_running(bp->dev)) {
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+	} else {
+		bp->vxlan_dst_port = 0;
+		netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+	}
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 t_port = ntohs(port);
+
+	__bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
 static int bnx2x_close(struct net_device *dev);
 
 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10010,6 +10161,9 @@
 static void bnx2x_sp_rtnl_task(struct work_struct *work)
 {
 	struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+	u16 port;
+#endif
 
 	rtnl_lock();
 
@@ -10108,6 +10262,27 @@
 			       &bp->sp_rtnl_state))
 		bnx2x_update_mng_version(bp);
 
+#ifdef CONFIG_BNX2X_VXLAN
+	port = bp->vxlan_dst_port;
+	if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+			       &bp->sp_rtnl_state)) {
+		if (!bnx2x_vxlan_port_update(bp, port))
+			netdev_info(bp->dev, "Added vxlan dest port %d", port);
+		else
+			bp->vxlan_dst_port = 0;
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+			       &bp->sp_rtnl_state)) {
+		if (!bnx2x_vxlan_port_update(bp, 0)) {
+			netdev_info(bp->dev,
+				    "Deleted vxlan dest port %d", port);
+			bp->vxlan_dst_port = 0;
+			vxlan_get_rx_port(bp->dev);
+		}
+	}
+#endif
+
 	/* work which needs rtnl lock not-taken (as it takes the lock itself and
 	 * can be called from other contexts as well)
 	 */
@@ -11678,7 +11853,7 @@
 static int bnx2x_get_hwinfo(struct bnx2x *bp)
 {
 	int /*abs*/func = BP_ABS_FUNC(bp);
-	int vn;
+	int vn, mfw_vn;
 	u32 val = 0, val2 = 0;
 	int rc = 0;
 
@@ -11768,6 +11943,7 @@
 	bp->mf_mode = 0;
 	bp->mf_sub_mode = 0;
 	vn = BP_VN(bp);
+	mfw_vn = BP_FW_MB_IDX(bp);
 
 	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
 		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +12000,31 @@
 				} else
 					BNX2X_DEV_INFO("illegal OV for SD\n");
 				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+				bp->mf_mode = MULTI_FUNCTION_SD;
+				bp->mf_sub_mode = SUB_MF_MODE_BD;
+				bp->mf_config[vn] =
+					MF_CFG_RD(bp,
+						  func_mf_config[func].config);
+
+				if (SHMEM2_HAS(bp, mtu_size)) {
+					int mtu_idx = BP_FW_MB_IDX(bp);
+					u16 mtu_size;
+					u32 mtu;
+
+					mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+					mtu_size = (u16)mtu;
+					DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+					   mtu_size, mtu);
+
+					/* if valid: update device mtu */
+					if (((mtu_size + ETH_HLEN) >=
+					     ETH_MIN_PACKET_SIZE) &&
+					    (mtu_size <=
+					     ETH_MAX_JUMBO_PACKET_SIZE))
+						bp->dev->mtu = mtu_size;
+				}
+				break;
 			case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
 				bp->mf_mode = MULTI_FUNCTION_SD;
 				bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +12072,10 @@
 
 				BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
 					       func, bp->mf_ov, bp->mf_ov);
-			} else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+			} else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+				   (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
 				dev_err(&bp->pdev->dev,
-					"Unexpected - no valid MF OV for func %d in UFP mode\n",
+					"Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
 					func);
 				bp->path_has_ovlan = true;
 			} else {
@@ -12078,6 +12280,7 @@
 	mutex_init(&bp->drv_info_mutex);
 	sema_init(&bp->stats_lock, 1);
 	bp->drv_info_mng_owner = false;
+	INIT_LIST_HEAD(&bp->vlan_reg);
 
 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12278,6 +12481,12 @@
 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
 	if (rc)
 		return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+	if (IS_PF(bp))
+		vxlan_get_rx_port(dev);
+#endif
+
 	return 0;
 }
 
@@ -12596,6 +12805,169 @@
 	return vxlan_features_check(skb, features);
 }
 
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+	int rc;
+
+	if (IS_PF(bp)) {
+		unsigned long ramrod_flags = 0;
+
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+		rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+					add, &ramrod_flags);
+	} else {
+		rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+	}
+
+	return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!bp->vlan_cnt) {
+		DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+		return 0;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link) {
+		/* Prepare for cleanup in case of errors */
+		if (rc) {
+			vlan->hw = false;
+			continue;
+		}
+
+		if (!vlan->hw)
+			continue;
+
+		DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+		rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+		if (rc) {
+			BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+			vlan->hw = false;
+			rc = -EINVAL;
+			continue;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	bool hw = false;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+	vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
+		return -ENOMEM;
+
+	bp->vlan_cnt++;
+	if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+		DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+		bp->accept_any_vlan = true;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	} else if (bp->vlan_cnt <= bp->vlan_credit) {
+		rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+		hw = true;
+	}
+
+	vlan->vid = vid;
+	vlan->hw = hw;
+
+	if (!rc) {
+		list_add(&vlan->link, &bp->vlan_reg);
+	} else {
+		bp->vlan_cnt--;
+		kfree(vlan);
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+	if (!bp->vlan_cnt) {
+		BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		if (vlan->vid == vid)
+			break;
+
+	if (vlan->vid != vid) {
+		BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+		return -EINVAL;
+	}
+
+	if (vlan->hw)
+		rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+	list_del(&vlan->link);
+	kfree(vlan);
+
+	bp->vlan_cnt--;
+
+	if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+		/* Configure all non-configured entries */
+		list_for_each_entry(vlan, &bp->vlan_reg, link) {
+			if (vlan->hw)
+				continue;
+
+			rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+			if (rc) {
+				BNX2X_ERR("Unable to config VLAN %d\n",
+					  vlan->vid);
+				continue;
+			}
+			DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+			   vlan->vid);
+			vlan->hw = true;
+		}
+		DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+		bp->accept_any_vlan = false;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+	return rc;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
 	.ndo_open		= bnx2x_open,
 	.ndo_stop		= bnx2x_close,
@@ -12609,6 +12981,8 @@
 	.ndo_fix_features	= bnx2x_fix_features,
 	.ndo_set_features	= bnx2x_set_features,
 	.ndo_tx_timeout		= bnx2x_tx_timeout,
+	.ndo_vlan_rx_add_vid	= bnx2x_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= bnx2x_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= poll_bnx2x,
 #endif
@@ -12628,6 +13002,10 @@
 	.ndo_get_phys_port_id	= bnx2x_get_phys_port_id,
 	.ndo_set_vf_link_state	= bnx2x_set_vf_link_state,
 	.ndo_features_check	= bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+	.ndo_add_vxlan_port	= bnx2x_add_vxlan_port,
+	.ndo_del_vxlan_port	= bnx2x_del_vxlan_port,
+#endif
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12819,6 +13197,18 @@
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
+	/* VF with OLD Hypervisor or old PF do not support filtering */
+	if (IS_PF(bp)) {
+		if (CHIP_IS_E1x(bp))
+			bp->accept_any_vlan = true;
+		else
+			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+	} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+	}
+
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
 
@@ -13561,6 +13951,9 @@
 
 	bnx2x_register_phc(bp);
 
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
 	return 0;
 
 init_one_exit:
@@ -13623,6 +14016,7 @@
 	/* Power on: we can't let PCI layer write to us while we are in D3 */
 	if (IS_PF(bp)) {
 		bnx2x_set_power_state(bp, PCI_D0);
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
 
 		/* Set endianity registers to reset values in case next driver
 		 * boots in different endianty environment.
@@ -14371,6 +14765,90 @@
 		rc = -EINVAL;
 	}
 
+	/* For storage-only interfaces, change driver state */
+	if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+		switch (ctl->drv_state) {
+		case DRV_NOP:
+			break;
+		case DRV_ACTIVE:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_ACTIVE);
+			break;
+		case DRV_INACTIVE:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_DISABLED);
+			break;
+		case DRV_UNLOADED:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_NOT_LOADED);
+			break;
+		default:
+		BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+			     struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bdn_fc_npiv_tbl *tbl = NULL;
+	u32 offset, entries;
+	int rc = -EINVAL;
+	int i;
+
+	if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+		goto out;
+
+	DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+	tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+	if (!tbl) {
+		BNX2X_ERR("Failed to allocate fc_npiv table\n");
+		goto out;
+	}
+
+	offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+	DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+	/* Read the table contents from nvram */
+	if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+		BNX2X_ERR("Failed to read FC-NPIV table\n");
+		goto out;
+	}
+
+	/* Since bnx2x_nvram_read() returns data in be32, we need to convert
+	 * the number of entries back to cpu endianness.
+	 */
+	entries = tbl->fc_npiv_cfg.num_of_npiv;
+	entries = (__force u32)be32_to_cpu((__force __be32)entries);
+	tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+	if (!tbl->fc_npiv_cfg.num_of_npiv) {
+		DP(BNX2X_MSG_MCP,
+		   "No FC-NPIV table [valid, simply not present]\n");
+		goto out;
+	} else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+		BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+			  tbl->fc_npiv_cfg.num_of_npiv);
+		goto out;
+	} else {
+		DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+		   tbl->fc_npiv_cfg.num_of_npiv);
+	}
+
+	/* Copy the data into cnic-provided struct */
+	cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+	for (i = 0; i < cnic_tbl->count; i++) {
+		memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+		memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+	}
+
+	rc = 0;
+out:
+	kfree(tbl);
 	return rc;
 }
 
@@ -14516,6 +14994,7 @@
 	cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
 	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
 	cp->drv_ctl = bnx2x_drv_ctl;
+	cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
 	cp->drv_register_cnic = bnx2x_register_cnic;
 	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
 	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index caf1aef..a91ccbf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 49d5110..4dead49 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -2137,6 +2139,10 @@
 /* [RW 1] When this bit is set; the LLH will expect all packets to be with
    e1hov */
 #define NIG_REG_LLH_E1HOV_MODE					 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1				 0x16028
 /* [RW 1] When this bit is set; the LLH will classify the packet before
    sending it to the BRB or calculating WoL on it. */
 #define NIG_REG_LLH_MF_MODE					 0x16024
@@ -2953,7 +2959,12 @@
 #define PBF_REG_TQ_OCCUPANCY_Q0					 0x1403ac
 /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
 #define PBF_REG_TQ_OCCUPANCY_Q1					 0x1403b0
-#define PB_REG_CONTROL						 0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0					 0x15c06c
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
 /* [R 2] Interrupt register #0 read */
@@ -3372,6 +3383,12 @@
 #define PRS_REG_TCM_CURRENT_CREDIT				 0x40160
 /* [R 8] debug only: TSDM current credit. Transaction based. */
 #define PRS_REG_TSDM_CURRENT_CREDIT				 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0					 0x401a8
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT			 (0x1<<19)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF			 (0x1<<20)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN			 (0x1<<22)
@@ -7240,6 +7257,9 @@
 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL	0xffe0
 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G	0x40
 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS	0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB		0xffe2
+#define BCM84858_PHY_ID					0x600d
+#define MDIO_AN_REG_848xx_ID_LSB		0xffe3
 #define MDIO_AN_REG_8481_LEGACY_AN_ADV		0xffe4
 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION	0xffe6
 #define MDIO_AN_REG_8481_1000T_CTRL		0xffe9
@@ -7283,31 +7303,31 @@
 #define MDIO_84833_TOP_CFG_FW_NO_EEE		0x1f81
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1			0x401a
 #define MDIO_84833_SUPER_ISOLATE		0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0			0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1			0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2			0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3			0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4			0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26		0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27		0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28		0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29		0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30		0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31		0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND	MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS	MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1	MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2	MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3	MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4	MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5	MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0			0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1			0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2			0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3			0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4			0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26		0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27		0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28		0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29		0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30		0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31		0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND	(MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS	(MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1	(MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2	(MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3	(MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4	(MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5	(MDIO_848xx_TOP_CFG_SCRATCH_REG31)
 
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP			0x8001
-#define PHY84833_CMD_GET_EEE_MODE			0x8008
-#define PHY84833_CMD_SET_EEE_MODE			0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP			0x8001
+#define PHY848xx_CMD_GET_EEE_MODE			0x8008
+#define PHY848xx_CMD_SET_EEE_MODE			0x8009
+/* Mailbox status set used by 84833 only */
 #define PHY84833_STATUS_CMD_RECEIVED			0x0001
 #define PHY84833_STATUS_CMD_IN_PROGRESS			0x0002
 #define PHY84833_STATUS_CMD_COMPLETE_PASS		0x0004
@@ -7318,6 +7338,13 @@
 #define PHY84833_STATUS_CMD_CLEAR_COMPLETE		0x0080
 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE		0xa5a5
 
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED			0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS			0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS		0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR		0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY			0xbbbb
+
 
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD					0x3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 4ad415a..c9bd7f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@
 
 	return vp->get(vp, 1);
 }
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
+
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 {
 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@
 	return vp->put(vp, 1);
 }
 
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
+
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
 /**
  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  *
@@ -636,6 +671,26 @@
 	return 0;
 }
 
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+				    struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+				  ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return -EEXIST;
+
+	return 0;
+}
+
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
 	bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@
 	return NULL;
 }
 
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return pos;
+
+	return NULL;
+}
+
 /* check_move() callback */
 static bool bnx2x_check_move(struct bnx2x *bp,
 			     struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@
 					rule_cnt);
 }
 
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+	u16 inner_mac;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselves */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+	inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+	rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	/* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		struct bnx2x_vlan_mac_obj *target_obj;
+
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		target_obj = elem->cmd_data.vlan_mac.target_obj;
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
+		rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	}
+
+	/* Set the ramrod data header */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/* 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
@@ -1135,6 +1301,25 @@
 	return NULL;
 }
 
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			    sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd ==
+		     elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
 /**
  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  *
@@ -2042,6 +2227,68 @@
 	}
 }
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/* CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+}
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
 			struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@
  * If credit is negative pool operations will always succeed (unlimited pool).
  *
  */
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
-					  int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit)
 {
 	/* Zero the object first */
 	memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@
 		/* CAM credit is equaly divided between all active functions
 		 * on the PATH.
 		 */
-		if ((func_num > 0)) {
+		if (func_num > 0) {
 			if (!CHIP_REV_IS_SLOW(bp))
-				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+				cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
 			else
 				cam_sz = BNX2X_CAM_SIZE_EMUL;
 
@@ -3966,8 +4213,9 @@
 		 * on the PATH.
 		 */
 		if (func_num > 0) {
-			int credit = MAX_VLAN_CREDIT_E2 / func_num;
-			bnx2x_init_credit_pool(p, func_id * credit, credit);
+			int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+			bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
 		} else
 			/* this should never happen! Block VLAN operations. */
 			bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,8 +4308,14 @@
 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
 
-	if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
-		caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+	if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
 
 	/* RSS keys */
 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
@@ -5669,10 +5923,14 @@
 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
 	rdata->path_id		= BP_PATH(bp);
 	rdata->network_cos_mode	= start_params->network_cos_mode;
-	rdata->tunnel_mode	= start_params->tunnel_mode;
-	rdata->gre_tunnel_type	= start_params->gre_tunnel_type;
-	rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
-	rdata->vxlan_dst_port	= cpu_to_le16(4789);
+
+	rdata->vxlan_dst_port	= cpu_to_le16(start_params->vxlan_dst_port);
+	rdata->geneve_dst_port	= cpu_to_le16(start_params->geneve_dst_port);
+	rdata->inner_clss_l2gre	= start_params->inner_clss_l2gre;
+	rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+	rdata->inner_clss_vxlan	= start_params->inner_clss_vxlan;
+	rdata->inner_rss	= start_params->inner_rss;
+
 	rdata->sd_accept_mf_clss_fail = start_params->class_fail;
 	if (start_params->class_fail_ethtype) {
 		rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5948,14 @@
 			cpu_to_le16(0x8100);
 
 	rdata->no_added_tags = start_params->no_added_tags;
+
+	rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+	if (rdata->c2s_pri_tt_valid) {
+		memcpy(rdata->c2s_pri_trans_table.val,
+		       start_params->c2s_pri,
+		       MAX_VLAN_PRIORITIES);
+		rdata->c2s_pri_default = start_params->c2s_pri_default;
+	}
 	/* No need for an explicit memory barrier here as long we would
 	 * need to ensure the ordering of writing to the SPQ element
 	 * and updating of the SPQ producer which involves a memory
@@ -5750,15 +6016,22 @@
 	if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
 		     &switch_update_params->changes)) {
 		rdata->update_tunn_cfg_flg = 1;
-		if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
 			     &switch_update_params->changes))
-			rdata->tunn_clss_en = 1;
-		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+			rdata->inner_clss_l2gre = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
 			     &switch_update_params->changes))
-			rdata->inner_gre_rss_en = 1;
-		rdata->tunnel_mode = switch_update_params->tunnel_mode;
-		rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
-		rdata->vxlan_dst_port = cpu_to_le16(4789);
+			rdata->inner_clss_vxlan = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+			     &switch_update_params->changes))
+			rdata->inner_clss_l2geneve = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+			     &switch_update_params->changes))
+			rdata->inner_rss = 1;
+		rdata->vxlan_dst_port =
+			cpu_to_le16(switch_update_params->vxlan_dst_port);
+		rdata->geneve_dst_port =
+			cpu_to_le16(switch_update_params->geneve_dst_port);
 	}
 
 	rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6158,8 @@
 		rdata->traffic_type_to_priority_cos[i] =
 			tx_start_params->traffic_type_to_priority_cos[i];
 
+	for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+		rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
 	/* No need for an explicit memory barrier here as long as we
 	 * ensure the ordering of writing to the SPQ element
 	 * and updating of the SPQ producer which involves a memory
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 86baecb..4048fc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@
 	BNX2X_RSS_IPV6,
 	BNX2X_RSS_IPV6_TCP,
 	BNX2X_RSS_IPV6_UDP,
-	BNX2X_RSS_GRE_INNER_HDRS,
+
+	BNX2X_RSS_IPV4_VXLAN,
+	BNX2X_RSS_IPV6_VXLAN,
+	BNX2X_RSS_TUNN_INNER_HDRS,
 };
 
 struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@
 	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
 	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
 	BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
-	BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
-	BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+	BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
 };
 
 /* Allowed Function states */
@@ -1171,19 +1178,23 @@
 	/* Function cos mode */
 	u8 network_cos_mode;
 
-	/* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
-	u8 tunnel_mode;
+	/* UDP dest port for VXLAN */
+	u16 vxlan_dst_port;
 
-	/* tunneling classification enablement */
-	u8 tunn_clss_en;
+	/* UDP dest port for Geneve */
+	u16 geneve_dst_port;
 
-	/* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
-	u8 gre_tunnel_type;
+	/* Enable inner Rx classifications for L2GRE packets */
+	u8 inner_clss_l2gre;
 
-	/* Enables Inner GRE RSS on the function, depends on the client RSS
-	 * capailities
-	 */
-	u8 inner_gre_rss_en;
+	/* Enable inner Rx classifications for L2-Geneve packets */
+	u8 inner_clss_l2geneve;
+
+	/* Enable inner Rx classification for vxlan packets */
+	u8 inner_clss_vxlan;
+
+	/* Enable RSS according to inner header */
+	u8 inner_rss;
 
 	/* Allows accepting of packets failing MF classification, possibly
 	 * only matching a given ethertype
@@ -1200,6 +1211,11 @@
 
 	/* Prevent inner vlans from being added by FW */
 	u8 no_added_tags;
+
+	/* Inner-to-Outer vlan priority mapping */
+	u8 c2s_pri[MAX_VLAN_PRIORITIES];
+	u8 c2s_pri_default;
+	u8 c2s_pri_valid;
 };
 
 struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@
 	u16 vlan;
 	u16 vlan_eth_type;
 	u8 vlan_force_prio;
-	u8 tunnel_mode;
-	u8 gre_tunnel_type;
+	u16 vxlan_dst_port;
+	u16 geneve_dst_port;
 };
 
 struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@
 	u8 dcb_enabled;
 	u8 dcb_version;
 	u8 dont_add_pri_0_en;
+	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@
 			 unsigned long *pstate, bnx2x_obj_type type,
 			 struct bnx2x_credit_pool_obj *vlans_pool);
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 					struct bnx2x_vlan_mac_obj *o);
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@
 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
 				 struct bnx2x_credit_pool_obj *p, u8 func_id,
 				 u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit);
 
 /****************** RSS CONFIGURATION ****************/
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@
 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
 			     u8 *ind_table);
 
+#define PF_MAC_CREDIT_E2(bp, func_num)					\
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num)					 \
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
 #endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f67348d..9d02734 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@
 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 	setup_p->gen_params.fp_hsi = vf->fp_hsi;
 
-	/* Setup-op pause params:
-	 * Nothing to do, the pause thresholds are set by default to 0 which
-	 * effectively turns off the feature for this queue. We don't want
-	 * one queue (VF) to interfering with another queue (another VF)
-	 */
-	if (vf->cfg_flags & VF_CFG_FW_FC)
-		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
-			  vf->abs_vfid);
 	/* Setup-op flags:
 	 * collect statistics, zero statistics, local-switching, security,
 	 * OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@
 }
 
 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
-				   int qid, bool drv_only, bool mac)
+				   int qid, bool drv_only, int type)
 {
 	struct bnx2x_vlan_mac_ramrod_params ramrod;
 	int rc;
 
 	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
-	   mac ? "MACs" : "VLANs");
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 
 	/* Prepare ramrod params */
 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-	if (mac) {
+	if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+	} else if (type == BNX2X_VF_FILTER_MAC) {
 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 	} else {
-		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-			&ramrod.user_req.vlan_mac_flags);
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 	}
 	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@
 					     &ramrod.ramrod_flags);
 	if (rc) {
 		BNX2X_ERR("Failed to delete all %s\n",
-			  mac ? "MACs" : "VLANs");
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 		return rc;
 	}
 
-	/* Clear the vlan counters */
-	if (!mac)
-		atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
 	return 0;
 }
 
@@ -412,13 +405,17 @@
 
 	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 	   vf->abs_vfid, filter->add ? "Adding" : "Deleting",
-	   filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+	   (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+	   (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 
 	/* Prepare ramrod params */
 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-	if (filter->type == BNX2X_VF_FILTER_VLAN) {
-		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-			&ramrod.user_req.vlan_mac_flags);
+	if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+		ramrod.user_req.u.vlan.vlan = filter->vid;
+		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+	} else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 		ramrod.user_req.u.vlan.vlan = filter->vid;
 	} else {
@@ -429,16 +426,6 @@
 	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 					    BNX2X_VLAN_MAC_DEL;
 
-	/* Verify there are available vlan credits */
-	if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
-	    (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
-	     vf_vlan_rules_cnt(vf))) {
-		BNX2X_ERR("No credits for vlan [%d >= %d]\n",
-			  atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
-			  vf_vlan_rules_cnt(vf));
-		return -ENOMEM;
-	}
-
 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 	if (drv_only)
 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@
 	if (rc && rc != -EEXIST) {
 		BNX2X_ERR("Failed to %s %s\n",
 			  filter->add ? "add" : "delete",
-			  filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
-								"VLAN");
+			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+				"VLAN-MAC" :
+			  (filter->type == BNX2X_VF_FILTER_MAC) ?
+				"MAC" : "VLAN");
 		return rc;
 	}
 
-	/* Update the vlan counters */
-	if (filter->type == BNX2X_VF_FILTER_VLAN)
-		bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
-				     &bnx2x_vfq(vf, qid, vlan_count));
-
 	return 0;
 }
 
@@ -511,21 +495,7 @@
 	if (rc)
 		goto op_err;
 
-	/* Configure vlan0 for leading queue */
-	if (!qid) {
-		struct bnx2x_vf_mac_vlan_filter filter;
-
-		memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
-		filter.type = BNX2X_VF_FILTER_VLAN;
-		filter.add = true;
-		filter.vid = 0;
-		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
-		if (rc)
-			goto op_err;
-	}
-
 	/* Schedule the configuration of any pending vlan filters */
-	vf->cfg_flags |= VF_CFG_VLAN;
 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 			       BNX2X_MSG_IOV);
 	return 0;
@@ -544,10 +514,16 @@
 	/* If needed, clean the filtering data base */
 	if ((qid == LEADING_IDX) &&
 	    bnx2x_validate_vf_sp_objs(bp, vf, false)) {
-		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN_MAC);
 		if (rc)
 			goto op_err;
-		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN);
+		if (rc)
+			goto op_err;
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_MAC);
 		if (rc)
 			goto op_err;
 	}
@@ -680,11 +656,18 @@
 		/* Remove filtering if feasible */
 		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-						     false, false);
+						     false,
+						     BNX2X_VF_FILTER_VLAN_MAC);
 			if (rc)
 				goto op_err;
 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-						     false, true);
+						     false,
+						     BNX2X_VF_FILTER_VLAN);
+			if (rc)
+				goto op_err;
+			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+						     false,
+						     BNX2X_VF_FILTER_MAC);
 			if (rc)
 				goto op_err;
 			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@
 
 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
-	if (vf->cfg_flags & VF_CFG_INT_SIMD)
-		val |= IGU_VF_CONF_SINGLE_ISR_EN;
 	val &= ~IGU_VF_CONF_PARENT_MASK;
 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@
 	return 0;
 }
 
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
-					  struct bnx2x_virtf *vf,
-					  int new)
-{
-	int num = vf_vlan_rules_cnt(vf);
-	int diff = new - num;
-	bool rc = true;
-
-	DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
-	   vf->abs_vfid, new, num);
-
-	if (diff > 0)
-		rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
-	else if (diff < 0)
-		rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
-	if (rc)
-		vf_vlan_rules_cnt(vf) = new;
-	else
-		DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
-		   vf->abs_vfid);
-}
-
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -875,21 +833,13 @@
 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
-	u16 vlan_count = 0;
 
 	/* will be set only during VF-ACQUIRE */
 	resc->num_rxqs = 0;
 	resc->num_txqs = 0;
 
-	/* no credit calculations for macs (just yet) */
-	resc->num_mac_filters = 1;
-
-	/* divvy up vlan rules */
-	bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
-	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
-	vlan_count = 1 << ilog2(vlan_count);
-	bnx2x_iov_re_set_vlan_filters(bp, vf,
-				      vlan_count / BNX2X_NR_VIRTFN(bp));
+	resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+	resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 
 	/* no real limitation */
 	resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@
 
 	mutex_init(&bp->vfdb->bulletin_mutex);
 
+	if (SHMEM2_HAS(bp, sriov_switch_mode))
+		SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
 	return 0;
 failed:
 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@
 		vf->filter_state = 0;
 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
+		bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+				       vf_vlan_rules_cnt(vf));
+		bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+				       vf_mac_rules_cnt(vf));
+
 		/*  init mcast object - This object will be re-initialized
 		 *  during VF-ACQUIRE with the proper cl_id and cid.
 		 *  It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@
 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
-	/* Save a vlan filter for the Hypervisor */
 	return ((req_resc->num_rxqs <= rxq_cnt) &&
 		(req_resc->num_txqs <= txq_cnt) &&
 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-		(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -2091,16 +2048,12 @@
 	vf_sb_count(vf) = resc->num_sbs;
 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
-	if (resc->num_mac_filters)
-		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-	/* Add an additional vlan filter credit for the hypervisor */
-	bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
 	DP(BNX2X_MSG_IOV,
 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
 	   vf_sb_count(vf), vf_rxq_count(vf),
 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
-	   vf_vlan_rules_visible_cnt(vf));
+	   vf_vlan_rules_cnt(vf));
 
 	/* Initialize the queues */
 	if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@
 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
 {
 	struct bnx2x_func_init_params func_init = {0};
-	u16 flags = 0;
 	int i;
 
 	/* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@
 	/* reset IGU VF statistics: MSIX */
 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
 
-	/* vf init */
-	if (vf->cfg_flags & VF_CFG_STATS)
-		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
-	if (vf->cfg_flags & VF_CFG_TPA)
-		flags |= FUNC_FLG_TPA;
-
-	if (is_vf_multi(vf))
-		flags |= FUNC_FLG_RSS;
-
 	/* function setup */
-	func_init.func_flgs = flags;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
-	func_init.fw_stat_map = vf->fw_stat_map;
-	func_init.spq_map = vf->spq_map;
-	func_init.spq_prod = 0;
 	bnx2x_func_init(bp, &func_init);
 
 	/* Enable the vf */
@@ -2589,8 +2527,8 @@
 
 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
 	for_each_vf(bp, vfidx) {
-	bulletin = BP_VF_BULLETIN(bp, vfidx);
-		if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+		bulletin = BP_VF_BULLETIN(bp, vfidx);
+		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
 	}
 }
@@ -2808,20 +2746,58 @@
 	return rc;
 }
 
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf, bool accept)
+{
+	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+	unsigned long accept_flags;
+
+	/* need to remove/add the VF's accept_any_vlan bit */
+	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+	if (accept)
+		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+	else
+		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+			      accept_flags);
+	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+	bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    u16 vlan, bool add)
+{
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+
+	/* configure the new vlan to device */
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.user_req.u.vlan.vlan = vlan;
+	ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+					: BNX2X_VLAN_MAC_DEL;
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+	if (rc) {
+		BNX2X_ERR("failed to configure vlan\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 {
-	struct bnx2x_queue_state_params q_params = {NULL};
-	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
-	struct bnx2x_queue_update_params *update_params;
 	struct pf_vf_bulletin_content *bulletin = NULL;
-	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
 	struct bnx2x *bp = netdev_priv(dev);
 	struct bnx2x_vlan_mac_obj *vlan_obj;
 	unsigned long vlan_mac_flags = 0;
 	unsigned long ramrod_flags = 0;
 	struct bnx2x_virtf *vf = NULL;
-	unsigned long accept_flags;
-	int rc;
+	int i, rc;
 
 	if (vlan > 4095) {
 		BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@
 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
 	bulletin->vlan = vlan;
 
+	/* Post update on VF's bulletin board */
+	rc = bnx2x_post_vf_bulletin(bp, vfidx);
+	if (rc)
+		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
 	mutex_unlock(&bp->vfdb->bulletin_mutex);
 
 	/* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@
 		goto out;
 	}
 
-	/* need to remove/add the VF's accept_any_vlan bit */
-	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
-	if (vlan)
-		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-	else
-		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
-	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
-			      accept_flags);
-	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
-	bnx2x_config_rx_mode(bp, &rx_ramrod);
-
-	/* configure the new vlan to device */
-	memset(&ramrod_param, 0, sizeof(ramrod_param));
-	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-	ramrod_param.vlan_mac_obj = vlan_obj;
-	ramrod_param.ramrod_flags = ramrod_flags;
-	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-		&ramrod_param.user_req.vlan_mac_flags);
-	ramrod_param.user_req.u.vlan.vlan = vlan;
-	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
-	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-	if (rc) {
-		BNX2X_ERR("failed to configure vlan\n");
-		rc =  -EINVAL;
-		goto out;
-	}
-
-	/* send queue update ramrod to configure default vlan and silent
-	 * vlan removal
+	/* clear accept_any_vlan when HV forces vlan, otherwise
+	 * according to VF capabilities
 	 */
-	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-	q_params.cmd = BNX2X_Q_CMD_UPDATE;
-	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
-	update_params = &q_params.params.update;
-	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
-		  &update_params->update_flags);
-	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
-		  &update_params->update_flags);
-	if (vlan == 0) {
-		/* if vlan is 0 then we want to leave the VF traffic
-		 * untagged, and leave the incoming traffic untouched
-		 * (i.e. do not remove any vlan tags).
-		 */
-		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-			    &update_params->update_flags);
-		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-			    &update_params->update_flags);
-	} else {
-		/* configure default vlan to vf queue and set silent
-		 * vlan removal (the vf remains unaware of this vlan).
-		 */
-		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-			  &update_params->update_flags);
-		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-			  &update_params->update_flags);
-		update_params->def_vlan = vlan;
-		update_params->silent_removal_value =
-			vlan & VLAN_VID_MASK;
-		update_params->silent_removal_mask = VLAN_VID_MASK;
-	}
+	if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+		bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
 
-	/* Update the Queue state */
-	rc = bnx2x_queue_state_change(bp, &q_params);
-	if (rc) {
-		BNX2X_ERR("Failed to configure default VLAN\n");
+	rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+	if (rc)
 		goto out;
-	}
 
-
-	/* clear the flag indicating that this VF needs its vlan
-	 * (will only be set if the HV configured the Vlan before vf was
-	 * up and we were called because the VF came up later
+	/* send queue update ramrods to configure default vlan and
+	 * silent vlan removal
 	 */
+	for_each_vfq(vf, i) {
+		struct bnx2x_queue_state_params q_params = {NULL};
+		struct bnx2x_queue_update_params *update_params;
+
+		q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+		/* validate the Q is UP */
+		if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+		    BNX2X_Q_LOGICAL_STATE_ACTIVE)
+			continue;
+
+		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+		q_params.cmd = BNX2X_Q_CMD_UPDATE;
+		update_params = &q_params.params.update;
+		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			  &update_params->update_flags);
+		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			  &update_params->update_flags);
+		if (vlan == 0) {
+			/* if vlan is 0 then we want to leave the VF traffic
+			 * untagged, and leave the incoming traffic untouched
+			 * (i.e. do not remove any vlan tags).
+			 */
+			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				    &update_params->update_flags);
+			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				    &update_params->update_flags);
+		} else {
+			/* configure default vlan to vf queue and set silent
+			 * vlan removal (the vf remains unaware of this vlan).
+			 */
+			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				  &update_params->update_flags);
+			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				  &update_params->update_flags);
+			update_params->def_vlan = vlan;
+			update_params->silent_removal_value =
+				vlan & VLAN_VID_MASK;
+			update_params->silent_removal_mask = VLAN_VID_MASK;
+		}
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+				  i);
+			goto out;
+		}
+	}
 out:
-	vf->cfg_flags &= ~VF_CFG_VLAN;
 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
+	if (rc)
+		DP(BNX2X_MSG_IOV,
+		   "updated VF[%d] vlan configuration (vlan = %d)\n",
+		   vfidx, vlan);
+
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 66ee62a..670a581 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@
 
 	/* VLANs object */
 	struct bnx2x_vlan_mac_obj	vlan_obj;
-	atomic_t vlan_count;		/* 0 means vlan-0 is set  ~ untagged */
+
+	/* VLAN-MACs object */
+	struct bnx2x_vlan_mac_obj	vlan_mac_obj;
+
 	unsigned long accept_flags;	/* last accept flags configured */
 
 	/* Queue Slow-path State object */
@@ -103,8 +108,10 @@
 
 struct bnx2x_vf_mac_vlan_filter {
 	int type;
-#define BNX2X_VF_FILTER_MAC	1
-#define BNX2X_VF_FILTER_VLAN	2
+#define BNX2X_VF_FILTER_MAC	BIT(0)
+#define BNX2X_VF_FILTER_VLAN	BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+	(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
 	bool add;
 	u8 *mac;
@@ -119,14 +126,9 @@
 /* vf context */
 struct bnx2x_virtf {
 	u16 cfg_flags;
-#define VF_CFG_STATS		0x0001
-#define VF_CFG_FW_FC		0x0002
-#define VF_CFG_TPA		0x0004
-#define VF_CFG_INT_SIMD		0x0008
-#define VF_CACHE_LINE		0x0010
-#define VF_CFG_VLAN		0x0020
-#define VF_CFG_STATS_COALESCE	0x0040
-#define VF_CFG_EXT_BULLETIN	0x0080
+#define VF_CFG_STATS_COALESCE	0x1
+#define VF_CFG_EXT_BULLETIN	0x2
+#define VF_CFG_VLAN_FILTER	0x4
 	u8 link_cfg;		/* IFLA_VF_LINK_STATE_AUTO
 				 * IFLA_VF_LINK_STATE_ENABLE
 				 * IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@
 	bool flr_clnup_stage;	/* true during flr cleanup */
 
 	/* dma */
-	dma_addr_t fw_stat_map;		/* valid iff VF_CFG_STATS */
+	dma_addr_t fw_stat_map;
 	u16 stats_stride;
-	dma_addr_t spq_map;
 	dma_addr_t bulletin_map;
 
 	/* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@
 #define vf_mac_rules_cnt(vf)		((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)		((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)		((vf)->alloc_resc.num_mc_filters)
-	/* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf)	(vf_vlan_rules_cnt(vf) - 1)
 
 	u8 sb_count;	/* actual number of SBs */
 	u8 igu_base_id;	/* base igu status block id */
@@ -207,6 +206,9 @@
 	enum channel_tlvs		op_current;
 
 	u8 fp_hsi;
+
+	struct bnx2x_credit_pool_obj	vf_vlans_pool;
+	struct bnx2x_credit_pool_obj	vf_macs_pool;
 };
 
 #define BNX2X_NR_VIRTFN(bp)	((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@
 #define FW_VF_HANDLE(abs_vfid)	\
 	(abs_vfid + FW_PF_MAX_HANDLE)
 
+#define GET_NUM_VFS_PER_PATH(bp)	64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp)		((bp)->vfdb ? (bp)->vfdb->sriov.total \
+						    : 0)
+#define VF_MAC_CREDIT_CNT		1
+#define VF_VLAN_CREDIT_CNT		2 /* VLAN0 + 'real' VLAN */
+
 /* locking and unlocking the channel mutex */
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
 			      enum channel_tlvs tlv);
@@ -274,6 +282,10 @@
 	} vlan_rdata;
 
 	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_mac_rdata;
+
+	union {
 		struct eth_filter_rules_ramrod_data	e2;
 	} rx_mode_rdata;
 
@@ -536,8 +548,14 @@
 
 int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
 
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
 #else /* CONFIG_BNX2X_SRIOV */
 
+#define GET_NUM_VFS_PER_PATH(bp)	0
+#define GET_NUM_VFS_PER_PF(bp)		0
+#define VF_MAC_CREDIT_CNT		0
+#define VF_VLAN_CREDIT_CNT		0
+
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
 				struct bnx2x_queue_sp_obj **q_obj) {}
 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@
 static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
 					      bool support_long) {}
 
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 69d699f0..7e0919a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 965539a..b2644ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 06b8c0d..1374e53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@
 	req->resc_request.num_sbs = bp->igu_sb_cnt;
 	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
 	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
 
 	/* pf 2 vf bulletin board address */
 	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@
 
 	/* Bulletin support for bulletin board with length > legacy length */
 	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+	/* vlan filtering is supported */
+	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
 
 	/* add list termination tlv */
 	bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@
 		NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
 	bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
 	bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+	bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
 	strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
 		sizeof(bp->fw_ver));
 
@@ -546,7 +553,7 @@
 			   BNX2X_FILTER_MAC_PENDING,
 			   &vf->filter_state,
 			   BNX2X_OBJ_TYPE_RX_TX,
-			   &bp->macs_pool);
+			   &vf->vf_macs_pool);
 	/* vlan */
 	bnx2x_init_vlan_obj(bp, &q->vlan_obj,
 			    cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@
 			    BNX2X_FILTER_VLAN_PENDING,
 			    &vf->filter_state,
 			    BNX2X_OBJ_TYPE_RX_TX,
-			    &bp->vlans_pool);
-
+			    &vf->vf_vlans_pool);
+	/* vlan-mac */
+	bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+				cl_id, q->cid, func_id,
+				bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+				bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+				BNX2X_FILTER_VLAN_MAC_PENDING,
+				&vf->filter_state,
+				BNX2X_OBJ_TYPE_RX_TX,
+				&vf->vf_macs_pool,
+				&vf->vf_vlans_pool);
 	/* mcast */
 	bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
 			     q->cid, func_id, func_id,
@@ -723,7 +739,7 @@
 
 	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
 	if (set)
-		req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
 
 	/* sample bulletin board for new mac */
 	bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@
 	return 0;
 }
 
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc = 0;
+
+	if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+		DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+		return 0;
+	}
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+	req->vf_qid = vf_qid;
+	req->n_mac_vlan_filters = 1;
+
+	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+	if (add)
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+	/* sample bulletin board for hypervisor vlan */
+	bnx2x_sample_bulletin(bp);
+
+	if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+		BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	req->filters[0].vlan_tag = vid;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send message to pf */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+		goto out;
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+			  vid);
+		rc = -EINVAL;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
 {
 	int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@
 		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+		if (mode == BNX2X_RX_MODE_PROMISC)
+			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
 	}
 
+	if (bp->accept_any_vlan)
+		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
 	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
 	req->vf_qid = 0;
 
@@ -1188,7 +1270,8 @@
 	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
 	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
 				   PFVF_CAP_TPA |
-				   PFVF_CAP_TPA_UPDATE);
+				   PFVF_CAP_TPA_UPDATE |
+				   PFVF_CAP_VLAN_FILTER);
 	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
 			  sizeof(resp->pfdev_info.fw_ver));
 
@@ -1203,7 +1286,7 @@
 			bnx2x_vf_max_queue_cnt(bp, vf);
 		resc->num_sbs = vf_sb_count(vf);
 		resc->num_mac_filters = vf_mac_rules_cnt(vf);
-		resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+		resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
 		resc->num_mc_filters = 0;
 
 		if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@
 		vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
 	}
 
+	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+		DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+		   vf->abs_vfid);
+		vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+	} else {
+		vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+	}
+
 out:
 	/* response */
 	bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@
 	int rc;
 
 	/* record ghost addresses from vf message */
-	vf->spq_map = init->spq_addr;
 	vf->fw_stat_map = init->stats_addr;
 	vf->stats_stride = init->stats_stride;
 	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@
 
 		if ((msg_filter->flags & type_flag) != type_flag)
 			continue;
-		if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+		memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+		if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
 			fl->filters[j].mac = msg_filter->mac;
-			fl->filters[j].type = BNX2X_VF_FILTER_MAC;
-		} else {
-			fl->filters[j].vid = msg_filter->vlan_tag;
-			fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+			fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
 		}
-		fl->filters[j].add =
-			(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
-			true : false;
+		if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
+			fl->filters[j].vid = msg_filter->vlan_tag;
+			fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
+		}
+		fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
 		fl->count++;
+		j++;
 	}
 	if (!fl->count)
 		kfree(fl);
@@ -1598,6 +1689,18 @@
 	return 0;
 }
 
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+				    u32 flags)
+{
+	int i, cnt = 0;
+
+	for (i = 0; i < filters->n_mac_vlan_filters; i++)
+		if  ((filters->filters[i].flags & flags) == flags)
+			cnt++;
+
+	return cnt;
+}
+
 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
 				       struct vfpf_q_mac_vlan_filter *filter)
 {
@@ -1629,6 +1732,7 @@
 
 #define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID
 #define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER	(VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
 
 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
@@ -1639,16 +1743,33 @@
 
 	/* check for any mac/vlan changes */
 	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-		/* build mac list */
 		struct bnx2x_vf_mac_vlan_filters *fl = NULL;
 
+		/* build vlan-mac list */
+		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+					       VFPF_VLAN_MAC_FILTER);
+		if (rc)
+			goto op_err;
+
+		if (fl) {
+
+			/* set vlan-mac list */
+			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+							   msg->vf_qid,
+							   false);
+			if (rc)
+				goto op_err;
+		}
+
+		/* build mac list */
+		fl = NULL;
+
 		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
 					       VFPF_MAC_FILTER);
 		if (rc)
 			goto op_err;
 
 		if (fl) {
-
 			/* set mac list */
 			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
 							   msg->vf_qid,
@@ -1657,22 +1778,6 @@
 				goto op_err;
 		}
 
-		/* build vlan list */
-		fl = NULL;
-
-		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-					       VFPF_VLAN_FILTER);
-		if (rc)
-			goto op_err;
-
-		if (fl) {
-			/* set vlan list */
-			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
-							   msg->vf_qid,
-							   false);
-			if (rc)
-				goto op_err;
-		}
 	}
 
 	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@
 			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
 		}
 
-		/* A packet arriving the vf's mac should be accepted
-		 * with any vlan, unless a vlan has already been
-		 * configured.
+		/* any_vlan is not configured if HV is forcing VLAN
+		 * any_vlan is configured if
+		 *   1. VF does not support vlan filtering
+		 *   OR
+		 *   2. VF supports vlan filtering and explicitly requested it
 		 */
-		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+		    (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+		     msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
 			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
 
 		/* set rx-mode */
@@ -1727,17 +1836,31 @@
 	 * since queue was not set up.
 	 */
 	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
-		/* once a mac was set by ndo can only accept a single mac... */
-		if (filters->n_mac_vlan_filters > 1) {
-			BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
-				  vf->abs_vfid);
-			rc = -EPERM;
-			goto response;
+		struct vfpf_q_mac_vlan_filter *filter = NULL;
+		int i;
+
+		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+			if (!(filters->filters[i].flags &
+			      VFPF_Q_FILTER_DEST_MAC_VALID))
+				continue;
+
+			/* once a mac was set by ndo can only accept
+			 * a single mac...
+			 */
+			if (filter) {
+				BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+					  vf->abs_vfid,
+					  filters->n_mac_vlan_filters);
+				rc = -EPERM;
+				goto response;
+			}
+
+			filter = &filters->filters[i];
 		}
 
 		/* ...and only the mac set by the ndo */
-		if (filters->n_mac_vlan_filters == 1 &&
-		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+		if (filter &&
+		    !ether_addr_equal(filter->mac, bulletin->mac)) {
 			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
 				  vf->abs_vfid);
 
@@ -1759,17 +1882,14 @@
 
 	/* if vlan was set by hypervisor we don't allow guest to config vlan */
 	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
-		int i;
-
 		/* search for vlan filters */
-		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
-			if (filters->filters[i].flags &
-			    VFPF_Q_FILTER_VLAN_TAG_VALID) {
-				BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
-					  vf->abs_vfid);
-				rc = -EPERM;
-				goto response;
-			}
+
+		if (bnx2x_vf_filters_contain(filters,
+					     VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+			BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+				  vf->abs_vfid);
+			rc = -EPERM;
+			goto response;
 		}
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index b86479f..64f2b52 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
  *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module.  An independent module is a module which is
+ * not derived from this software.  The special exception does not apply to any
+ * modifications of the software.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@
 #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST		0x00000004
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST	0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST		0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN		0x00000020
+
 #define BULLETIN_CONTENT_SIZE		(sizeof(struct pf_vf_bulletin_content))
 #define BULLETIN_CONTENT_LEGACY_SIZE	(32)
 #define BULLETIN_ATTEMPTS	5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@
 		u8 fp_hsi_ver;
 		u8 caps;
 #define VF_CAP_SUPPORT_EXT_BULLETIN	(1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER	(1 << 1)
 	} vfdev_info;
 
 	struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@
 	struct pf_vf_pfdev_info {
 		u32 chip_num;
 		u32 pf_cap;
-#define PFVF_CAP_RSS		0x00000001
-#define PFVF_CAP_DHC		0x00000002
-#define PFVF_CAP_TPA		0x00000004
-#define PFVF_CAP_TPA_UPDATE	0x00000008
+#define PFVF_CAP_RSS          0x00000001
+#define PFVF_CAP_DHC          0x00000002
+#define PFVF_CAP_TPA          0x00000004
+#define PFVF_CAP_TPA_UPDATE   0x00000008
+#define PFVF_CAP_VLAN_FILTER  0x00000010
+
 		char fw_ver[32];
 		u16 db_size;
 		u8  indices_per_sb;
@@ -288,7 +299,7 @@
 	u32 flags;
 #define VFPF_Q_FILTER_DEST_MAC_VALID	0x01
 #define VFPF_Q_FILTER_VLAN_TAG_VALID	0x02
-#define VFPF_Q_FILTER_SET_MAC		0x100	/* set/clear */
+#define VFPF_Q_FILTER_SET		0x100	/* set/clear */
 	u8  mac[ETH_ALEN];
 	u16 vlan_tag;
 };
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 17c145f..b69dc58 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -192,6 +192,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_CTX_WR_CMD;
 	io->cid_addr = cid_addr;
 	io->offset = off;
@@ -206,6 +207,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 	io->offset = off;
 	io->dma_addr = addr;
@@ -219,6 +221,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_l2_ring *ring = &info.data.ring;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	if (start)
 		info.cmd = DRV_CTL_START_L2_CMD;
 	else
@@ -236,6 +239,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_IO_WR_CMD;
 	io->offset = off;
 	io->data = val;
@@ -249,13 +253,14 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_IO_RD_CMD;
 	io->offset = off;
 	ethdev->drv_ctl(dev->netdev, &info);
 	return io->data;
 }
 
-static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
 {
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_eth_dev *ethdev = cp->ethdev;
@@ -263,6 +268,7 @@
 	struct fcoe_capabilities *fcoe_cap =
 		&info.data.register_data.fcoe_features;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	if (reg) {
 		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
@@ -272,6 +278,7 @@
 	}
 
 	info.data.ulp_type = ulp_type;
+	info.drv_state = state;
 	ethdev->drv_ctl(dev->netdev, &info);
 }
 
@@ -286,6 +293,7 @@
 	struct cnic_eth_dev *ethdev = cp->ethdev;
 	struct drv_ctl_info info;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = cmd;
 	info.data.credit.credit_count = count;
 	ethdev->drv_ctl(dev->netdev, &info);
@@ -591,7 +599,7 @@
 
 	mutex_unlock(&cnic_lock);
 
-	cnic_ulp_ctl(dev, ulp_type, true);
+	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
 
 	return 0;
 
@@ -636,7 +644,10 @@
 	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
-	cnic_ulp_ctl(dev, ulp_type, false);
+	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+	else
+		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
 
 	return 0;
 }
@@ -4267,6 +4278,7 @@
 
 		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
 
+		memset(&info, 0, sizeof(struct drv_ctl_info));
 		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
 		cp->ethdev->drv_ctl(dev->netdev, &info);
 	}
@@ -5433,6 +5445,23 @@
 	kfree(dev);
 }
 
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+				struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int ret;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;     /* bnx2x is down */
+
+	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+		return -EINVAL;
+
+	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+	return ret;
+}
+
 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
 				       struct pci_dev *pdev)
 {
@@ -5451,6 +5480,7 @@
 	cdev->register_device = cnic_register_device;
 	cdev->unregister_device = cnic_unregister_device;
 	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
 
 	cp = cdev->cnic_priv;
 	cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ef6125b..789e5c7 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -15,8 +15,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION	"2.5.21"
-#define CNIC_MODULE_RELDATE	"January 29, 2015"
+#define CNIC_MODULE_VERSION	"2.5.22"
+#define CNIC_MODULE_RELDATE	"July 20, 2015"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -151,6 +151,11 @@
 
 struct drv_ctl_info {
 	int	cmd;
+	int     drv_state;
+#define DRV_NOP		0
+#define DRV_ACTIVE	1
+#define DRV_INACTIVE	2
+#define DRV_UNLOADED	3
 	union {
 		struct drv_ctl_spq_credit credit;
 		struct drv_ctl_io io;
@@ -161,6 +166,15 @@
 	} data;
 };
 
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+	u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+	u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+	u32 count;
+};
+
 struct cnic_ops {
 	struct module	*cnic_owner;
 	/* Calls to these functions are protected by RCU.  When
@@ -226,6 +240,8 @@
 	int		(*drv_submit_kwqes_16)(struct net_device *,
 					       struct kwqe_16 *[], u32);
 	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	int		(*drv_get_fc_npiv_tbl)(struct net_device *,
+					       struct cnic_fc_npiv_tbl *);
 	unsigned long	reserved1[2];
 	union drv_info_to_mcp	*addr_drv_info_to_mcp;
 };
@@ -314,6 +330,7 @@
 	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
 	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
 				 char *data, u16 data_size);
+	int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
 	unsigned long	flags;
 #define CNIC_F_CNIC_UP		1
 #define CNIC_F_BNX2_CLASS	3
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 09ff09f..fadbd00 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -907,9 +907,8 @@
 	}
 
 	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
 	if (mode == GENET_POWER_PASSIVE)
-		bcmgenet_mii_reset(priv->dev);
+		bcmgenet_phy_power_set(priv->dev, true);
 }
 
 /* ioctl handle special commands that are not present in ethtool. */
@@ -1725,7 +1724,7 @@
 	int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
 	/* Monitor cable plug/unplugged event for internal PHY */
-	if (phy_is_internal(priv->phydev)) {
+	if (priv->internal_phy) {
 		int0_enable |= UMAC_IRQ_LINK_EVENT;
 	} else if (priv->ext_phy) {
 		int0_enable |= UMAC_IRQ_LINK_EVENT;
@@ -2405,6 +2404,23 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	/* Invoke the main RX/TX interrupt handler */
+	disable_irq(priv->irq0);
+	bcmgenet_isr0(priv->irq0, priv);
+	enable_irq(priv->irq0);
+
+	/* And the interrupt handler for RX/TX priority queues */
+	disable_irq(priv->irq1);
+	bcmgenet_isr1(priv->irq1, priv);
+	enable_irq(priv->irq1);
+}
+#endif
+
 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 {
 	u32 reg;
@@ -2642,13 +2658,12 @@
 	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
 
 	/* Turn on the clock */
-	if (!IS_ERR(priv->clk))
-		clk_prepare_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	/* If this is an internal GPHY, power it back on now, before UniMAC is
 	 * brought out of reset as absolutely no UniMAC activity is allowed
 	 */
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
 	/* take MAC out of reset */
@@ -2667,7 +2682,7 @@
 
 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-	if (phy_is_internal(priv->phydev)) {
+	if (priv->internal_phy) {
 		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 		reg |= EXT_ENERGY_DET_MASK;
 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2703,23 +2718,24 @@
 		goto err_irq0;
 	}
 
-	/* Re-configure the port multiplexer towards the PHY device */
-	bcmgenet_mii_config(priv->dev, false);
-
-	phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
-			   priv->phy_interface);
+	ret = bcmgenet_mii_probe(dev);
+	if (ret) {
+		netdev_err(dev, "failed to connect to PHY\n");
+		goto err_irq1;
+	}
 
 	bcmgenet_netif_start(dev);
 
 	return 0;
 
+err_irq1:
+	free_irq(priv->irq1, priv);
 err_irq0:
-	free_irq(priv->irq0, dev);
+	free_irq(priv->irq0, priv);
 err_fini_dma:
 	bcmgenet_fini_dma(priv);
 err_clk_disable:
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 	return ret;
 }
 
@@ -2773,11 +2789,10 @@
 	free_irq(priv->irq0, priv);
 	free_irq(priv->irq1, priv);
 
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	return ret;
 }
@@ -2953,6 +2968,9 @@
 	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
 	.ndo_do_ioctl		= bcmgenet_ioctl,
 	.ndo_set_features	= bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bcmgenet_poll_controller,
+#endif
 };
 
 /* Array of GENET hardware parameters/characteristics */
@@ -3226,11 +3244,12 @@
 		priv->version = pd->genet_version;
 
 	priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
-	if (IS_ERR(priv->clk))
+	if (IS_ERR(priv->clk)) {
 		dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+		priv->clk = NULL;
+	}
 
-	if (!IS_ERR(priv->clk))
-		clk_prepare_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	bcmgenet_set_hw_params(priv);
 
@@ -3241,8 +3260,10 @@
 	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
 
 	priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
-	if (IS_ERR(priv->clk_wol))
+	if (IS_ERR(priv->clk_wol)) {
 		dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+		priv->clk_wol = NULL;
+	}
 
 	priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
 	if (IS_ERR(priv->clk_eee)) {
@@ -3268,8 +3289,7 @@
 	netif_carrier_off(dev);
 
 	/* Turn off the main clock, WOL clock is handled separately */
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	err = register_netdev(dev);
 	if (err)
@@ -3278,8 +3298,7 @@
 	return err;
 
 err_clk_disable:
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 err:
 	free_netdev(dev);
 	return err;
@@ -3331,7 +3350,7 @@
 	if (device_may_wakeup(d) && priv->wolopts) {
 		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
 		clk_prepare_enable(priv->clk_wol);
-	} else if (phy_is_internal(priv->phydev)) {
+	} else if (priv->internal_phy) {
 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 	}
 
@@ -3360,7 +3379,7 @@
 	/* If this is an internal GPHY, power it back on now, before UniMAC is
 	 * brought out of reset as absolutely no UniMAC activity is allowed
 	 */
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
 	bcmgenet_umac_reset(priv);
@@ -3375,14 +3394,14 @@
 
 	phy_init_hw(priv->phydev);
 	/* Speed settings must be restored */
-	bcmgenet_mii_config(priv->dev, false);
+	bcmgenet_mii_config(priv->dev);
 
 	/* disable ethernet MAC while updating its registers */
 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
 
 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-	if (phy_is_internal(priv->phydev)) {
+	if (priv->internal_phy) {
 		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 		reg |= EXT_ENERGY_DET_MASK;
 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 6159dea..7299d10 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -593,6 +593,7 @@
 	/* MDIO bus variables */
 	wait_queue_head_t wq;
 	struct phy_device *phydev;
+	bool internal_phy;
 	struct device_node *phy_dn;
 	struct device_node *mdio_dn;
 	struct mii_bus *mii_bus;
@@ -670,9 +671,9 @@
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index adf23d2..c8affad 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,14 +163,13 @@
 	phy_print_status(phydev);
 }
 
-void bcmgenet_mii_reset(struct net_device *dev)
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+					  struct fixed_phy_status *status)
 {
-	struct bcmgenet_priv *priv = netdev_priv(dev);
+	if (dev && dev->phydev && status)
+		status->link = dev->phydev->link;
 
-	if (priv->phydev) {
-		phy_init_hw(priv->phydev);
-		phy_start_aneg(priv->phydev);
-	}
+	return 0;
 }
 
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
@@ -215,7 +214,6 @@
 	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 	reg |= EXT_PWR_DN_EN_LD;
 	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-	bcmgenet_mii_reset(dev);
 }
 
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
@@ -226,9 +224,13 @@
 	reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
 	reg |= LED_ACT_SOURCE_MAC;
 	bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+	if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+		fixed_phy_set_link_update(priv->phydev,
+					  bcmgenet_fixed_phy_link_update);
 }
 
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct phy_device *phydev = priv->phydev;
@@ -238,10 +240,10 @@
 	u32 port_ctrl;
 	u32 reg;
 
-	priv->ext_phy = !phy_is_internal(priv->phydev) &&
+	priv->ext_phy = !priv->internal_phy &&
 			(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		priv->phy_interface = PHY_INTERFACE_MODE_NA;
 
 	switch (priv->phy_interface) {
@@ -259,7 +261,7 @@
 
 		bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
 
-		if (phy_is_internal(priv->phydev)) {
+		if (priv->internal_phy) {
 			phy_name = "internal PHY";
 			bcmgenet_internal_phy_setup(dev);
 		} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +323,12 @@
 		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 	}
 
-	if (init)
-		dev_info(kdev, "configuring instance for %s\n", phy_name);
+	dev_info_once(kdev, "configuring instance for %s\n", phy_name);
 
 	return 0;
 }
 
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +346,6 @@
 	priv->old_pause = -1;
 
 	if (dn) {
-		if (priv->phydev) {
-			pr_info("PHY already attached\n");
-			return 0;
-		}
-
-		/* In the case of a fixed PHY, the DT node associated
-		 * to the PHY is the Ethernet MAC DT node.
-		 */
-		if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
-			ret = of_phy_register_fixed_link(dn);
-			if (ret)
-				return ret;
-
-			priv->phy_dn = of_node_get(dn);
-		}
-
 		phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
 					phy_flags, priv->phy_interface);
 		if (!phydev) {
@@ -386,7 +371,7 @@
 	 * PHY speed which is needed for bcmgenet_mii_config() to configure
 	 * things appropriately.
 	 */
-	ret = bcmgenet_mii_config(dev, true);
+	ret = bcmgenet_mii_config(dev);
 	if (ret) {
 		phy_disconnect(priv->phydev);
 		return ret;
@@ -397,14 +382,11 @@
 	/* The internal PHY has its link interrupts routed to the
 	 * Ethernet MAC ISRs
 	 */
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
 	else
 		priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
-	pr_info("attached PHY at address %d [%s]\n",
-		phydev->addr, phydev->drv->name);
-
 	return 0;
 }
 
@@ -490,7 +472,10 @@
 {
 	struct device_node *dn = priv->pdev->dev.of_node;
 	struct device *kdev = &priv->pdev->dev;
+	const char *phy_mode_str = NULL;
+	struct phy_device *phydev = NULL;
 	char *compat;
+	int phy_mode;
 	int ret;
 
 	compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +498,43 @@
 	/* Fetch the PHY phandle */
 	priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
 
+	/* In the case of a fixed PHY, the DT node associated
+	 * to the PHY is the Ethernet MAC DT node.
+	 */
+	if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+		ret = of_phy_register_fixed_link(dn);
+		if (ret)
+			return ret;
+
+		priv->phy_dn = of_node_get(dn);
+	}
+
 	/* Get the link mode */
-	priv->phy_interface = of_get_phy_mode(dn);
+	phy_mode = of_get_phy_mode(dn);
+	priv->phy_interface = phy_mode;
 
-	return 0;
-}
+	/* We need to specifically look up whether this PHY interface is internal
+	 * or not *before* we even try to probe the PHY driver over MDIO as we
+	 * may have shut down the internal PHY for power saving purposes.
+	 */
+	if (phy_mode < 0) {
+		ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+		if (ret < 0) {
+			dev_err(kdev, "invalid PHY mode property\n");
+			return ret;
+		}
 
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
-					  struct fixed_phy_status *status)
-{
-	if (dev && dev->phydev && status)
-		status->link = dev->phydev->link;
+		priv->phy_interface = PHY_INTERFACE_MODE_NA;
+		if (!strcasecmp(phy_mode_str, "internal"))
+			priv->internal_phy = true;
+	}
+
+	/* Make sure we initialize MoCA PHYs with a link down */
+	if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+		phydev = of_phy_find_device(dn);
+		if (phydev)
+			phydev->link = 0;
+	}
 
 	return 0;
 }
@@ -574,18 +585,15 @@
 			.asym_pause = 0,
 		};
 
-		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+		phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
 		if (!phydev || IS_ERR(phydev)) {
 			dev_err(kdev, "failed to register fixed PHY device\n");
 			return -ENODEV;
 		}
 
-		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
-			ret = fixed_phy_set_link_update(
-				phydev, bcmgenet_fixed_phy_link_update);
-			if (!ret)
-				phydev->link = 0;
-		}
+		/* Make sure we initialize MoCA PHYs with a link down */
+		phydev->link = 0;
+
 	}
 
 	priv->phydev = phydev;
@@ -615,10 +623,6 @@
 
 	ret = bcmgenet_mii_bus_init(priv);
 	if (ret)
-		goto out_free;
-
-	ret = bcmgenet_mii_probe(dev);
-	if (ret)
 		goto out;
 
 	return 0;
@@ -626,7 +630,6 @@
 out:
 	of_node_put(priv->phy_dn);
 	mdiobus_unregister(priv->mii_bus);
-out_free:
 	kfree(priv->mii_bus->irq);
 	mdiobus_free(priv->mii_bus);
 	return ret;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 73c934c..79789d8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10757,7 +10757,7 @@
 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
 				sizeof(temperature));
 	spin_unlock_bh(&tp->lock);
-	return sprintf(buf, "%u\n", temperature);
+	return sprintf(buf, "%u\n", temperature * 1000);
 }
 
 
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index bf9eb2e..88c1e1a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2774,8 +2774,7 @@
 
 
 static const struct macb_config zynqmp_config = {
-	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-		MACB_CAPS_JUMBO,
+	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
 	.dma_burst_length = 16,
 	.clk_init = macb_clk_init,
 	.init = macb_init,
@@ -2783,8 +2782,7 @@
 };
 
 static const struct macb_config zynq_config = {
-	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-		MACB_CAPS_NO_GIGABIT_HALF,
+	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
 	.dma_burst_length = 16,
 	.clk_init = macb_clk_init,
 	.init = macb_init,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1895b6b..6e1faea 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -399,7 +399,7 @@
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
 #define MACB_CAPS_SG_DISABLED			0x40000000
 #define MACB_CAPS_MACB_IS_GEM			0x80000000
-#define MACB_CAPS_JUMBO				0x00000008
+#define MACB_CAPS_JUMBO				0x00000010
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)					\
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 02e23e6..9b35d14 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -34,6 +34,8 @@
 config	THUNDER_NIC_BGX
 	tristate "Thunder MAC interface driver (BGX)"
 	depends on 64BIT
+	select PHYLIB
+	select MDIO_OCTEON
 	---help---
 	  This driver supports programming and controlling of MAC
 	  interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 8aee250..d3950b2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -135,6 +135,7 @@
 #define	NICVF_TX_TIMEOUT		(50 * HZ)
 
 struct nicvf_cq_poll {
+	struct  nicvf *nicvf;
 	u8	cq_idx;		/* Completion queue index */
 	struct	napi_struct napi;
 };
@@ -190,10 +191,10 @@
 };
 
 struct nicvf_hw_stats {
-	u64 rx_bytes_ok;
-	u64 rx_ucast_frames_ok;
-	u64 rx_bcast_frames_ok;
-	u64 rx_mcast_frames_ok;
+	u64 rx_bytes;
+	u64 rx_ucast_frames;
+	u64 rx_bcast_frames;
+	u64 rx_mcast_frames;
 	u64 rx_fcs_errors;
 	u64 rx_l2_errors;
 	u64 rx_drop_red;
@@ -204,6 +205,31 @@
 	u64 rx_drop_mcast;
 	u64 rx_drop_l3_bcast;
 	u64 rx_drop_l3_mcast;
+	u64 rx_bgx_truncated_pkts;
+	u64 rx_jabber_errs;
+	u64 rx_fcs_errs;
+	u64 rx_bgx_errs;
+	u64 rx_prel2_errs;
+	u64 rx_l2_hdr_malformed;
+	u64 rx_oversize;
+	u64 rx_undersize;
+	u64 rx_l2_len_mismatch;
+	u64 rx_l2_pclp;
+	u64 rx_ip_ver_errs;
+	u64 rx_ip_csum_errs;
+	u64 rx_ip_hdr_malformed;
+	u64 rx_ip_payload_malformed;
+	u64 rx_ip_ttl_errs;
+	u64 rx_l3_pclp;
+	u64 rx_l4_malformed;
+	u64 rx_l4_csum_errs;
+	u64 rx_udp_len_errs;
+	u64 rx_l4_port_errs;
+	u64 rx_tcp_flag_errs;
+	u64 rx_tcp_offset_errs;
+	u64 rx_l4_pclp;
+	u64 rx_truncated_pkts;
+
 	u64 tx_bytes_ok;
 	u64 tx_ucast_frames_ok;
 	u64 tx_bcast_frames_ok;
@@ -222,6 +248,7 @@
 	u64 rx_frames_1518;
 	u64 rx_frames_jumbo;
 	u64 rx_drops;
+
 	/* Tx */
 	u64 tx_frames_ok;
 	u64 tx_drops;
@@ -231,13 +258,24 @@
 };
 
 struct nicvf {
+	struct nicvf		*pnicvf;
 	struct net_device	*netdev;
 	struct pci_dev		*pdev;
 	u8			vf_id;
 	u8			node;
-	u8			tns_mode;
+	u8			tns_mode:1;
+	u8			sqs_mode:1;
+	u8			loopback_supported:1;
 	u16			mtu;
 	struct queue_set	*qs;
+#define	MAX_SQS_PER_VF_SINGLE_NODE		5
+#define	MAX_SQS_PER_VF				11
+	u8			sqs_id;
+	u8			sqs_count; /* Secondary Qset count */
+	struct nicvf		*snicvf[MAX_SQS_PER_VF];
+	u8			rx_queues;
+	u8			tx_queues;
+	u8			max_queues;
 	void __iomem		*reg_base;
 	bool			link_up;
 	u8			duplex;
@@ -257,7 +295,7 @@
 	u32			cq_coalesce_usecs;
 
 	u32			msg_enable;
-	struct nicvf_hw_stats   stats;
+	struct nicvf_hw_stats   hw_stats;
 	struct nicvf_drv_stats  drv_stats;
 	struct bgx_stats	bgx_stats;
 	struct work_struct	reset_task;
@@ -269,10 +307,9 @@
 	char			irq_name[NIC_VF_MSIX_VECTORS][20];
 	bool			irq_allocated[NIC_VF_MSIX_VECTORS];
 
-	bool			pf_ready_to_rcv_msg;
+	/* VF <-> PF mailbox communication */
 	bool			pf_acked;
 	bool			pf_nacked;
-	bool			bgx_stats_acked;
 	bool			set_mac_pending;
 } ____cacheline_aligned_in_smp;
 
@@ -304,14 +341,21 @@
 #define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
 #define	NIC_MBOX_MSG_BGX_STATS		0x10	/* Get stats from BGX */
 #define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
-#define NIC_MBOX_MSG_CFG_DONE		0x12	/* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN		0x13	/* VF is being shutdown */
+#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
+#define	NIC_MBOX_MSG_NICVF_PTR		0x13	/* Send nicvf ptr to PF */
+#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
+#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
+#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
+#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
+#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
 
 struct nic_cfg_msg {
 	u8    msg;
 	u8    vf_id;
-	u8    tns_mode;
 	u8    node_id;
+	u8    tns_mode:1;
+	u8    sqs_mode:1;
+	u8    loopback_supported:1;
 	u8    mac_addr[ETH_ALEN];
 };
 
@@ -319,6 +363,7 @@
 struct qs_cfg_msg {
 	u8    msg;
 	u8    num;
+	u8    sqs_count;
 	u64   cfg;
 };
 
@@ -335,6 +380,7 @@
 	u8    msg;
 	u8    qs_num;
 	u8    sq_num;
+	bool  sqs_mode;
 	u64   cfg;
 };
 
@@ -394,6 +440,28 @@
 	u32   speed;
 };
 
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+	u8    msg;
+	u8    vf_id;
+	u8    qs_count;
+};
+
+struct nicvf_ptr {
+	u8    msg;
+	u8    vf_id;
+	bool  sqs_mode;
+	u8    sqs_id;
+	u64   nicvf;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+	u8    msg;
+	u8    vf_id;
+	bool  enable;
+};
+
 /* 128 bit shared memory between PF and each VF */
 union nic_mbx {
 	struct { u8 msg; }	msg;
@@ -408,6 +476,9 @@
 	struct rss_cfg_msg	rss_cfg;
 	struct bgx_stats_msg    bgx_stats;
 	struct bgx_link_status  link_status;
+	struct sqs_alloc        sqs_alloc;
+	struct nicvf_ptr	nicvf;
+	struct set_loopback	lbk;
 };
 
 #define NIC_NODE_ID_MASK	0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6e0c031..b3a5947 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -28,6 +28,11 @@
 	u8			num_vf_en;      /* No of VF enabled */
 	bool			vf_enabled[MAX_NUM_VFS_SUPPORTED];
 	void __iomem		*reg_base;       /* Register start address */
+	u8			num_sqs_en;	/* Secondary qsets enabled */
+	u64			nicvf[MAX_NUM_VFS_SUPPORTED];
+	u8			vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+	u8			pqs_vf[MAX_NUM_VFS_SUPPORTED];
+	bool			sqs_used[MAX_NUM_VFS_SUPPORTED];
 	struct pkind_cfg	pkind;
 #define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	(((bgx & 0xF) << 4) | (lmac & 0xF))
 #define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	((map >> 4) & 0xF)
@@ -139,14 +144,19 @@
 
 	mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
 
-	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+	if (vf < MAX_LMAC) {
+		bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
 
-	mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
-	if (mac)
-		ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
-
+		mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+		if (mac)
+			ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+	}
+	mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
 	mbx.nic_cfg.node_id = nic->node;
+
+	mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+
 	nic_send_msg_to_vf(nic, vf, &mbx);
 }
 
@@ -329,6 +339,10 @@
 
 	/* Timer config */
 	nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+
+	/* Enable VLAN ethertype matching and stripping */
+	nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
+		      (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
 }
 
 /* Channel parse index configuration */
@@ -429,6 +443,12 @@
 	qset = cfg->vf_id;
 
 	for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+		u8 svf = cfg->ind_tbl[idx] >> 3;
+
+		if (svf)
+			qset = nic->vf_sqs[cfg->vf_id][svf - 1];
+		else
+			qset = cfg->vf_id;
 		nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
 			      (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
 		idx++;
@@ -452,19 +472,31 @@
  * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
  * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
  */
-static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+			       struct sq_cfg_msg *sq)
 {
 	u32 bgx, lmac, chan;
 	u32 tl2, tl3, tl4;
 	u32 rr_quantum;
+	u8 sq_idx = sq->sq_num;
+	u8 pqs_vnic;
 
-	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
-	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+	if (sq->sqs_mode)
+		pqs_vnic = nic->pqs_vf[vnic];
+	else
+		pqs_vnic = vnic;
+
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+
 	/* 24 bytes for FCS, IPG and preamble */
 	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
 
 	tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
 	tl4 += sq_idx;
+	if (sq->sqs_mode)
+		tl4 += vnic * 8;
+
 	tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
 	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
 		      ((u64)vnic << NIC_QS_ID_SHIFT) |
@@ -485,6 +517,86 @@
 	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
 }
 
+/* Send primary nicvf pointer to secondary QS's VF */
+static void nic_send_pnicvf(struct nicpf *nic, int sqs)
+{
+	union nic_mbx mbx = {};
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+	mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
+	nic_send_msg_to_vf(nic, sqs, &mbx);
+}
+
+/* Send SQS's nicvf pointer to primary QS's VF */
+static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
+{
+	union nic_mbx mbx = {};
+	int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+	mbx.nicvf.sqs_id = nicvf->sqs_id;
+	mbx.nicvf.nicvf = nic->nicvf[sqs_id];
+	nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
+}
+
+/* Find next available Qset that can be assigned as a
+ * secondary Qset to a VF.
+ */
+static int nic_nxt_avail_sqs(struct nicpf *nic)
+{
+	int sqs;
+
+	for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
+		if (!nic->sqs_used[sqs])
+			nic->sqs_used[sqs] = true;
+		else
+			continue;
+		return sqs + nic->num_vf_en;
+	}
+	return -1;
+}
+
+/* Allocate additional Qsets for requested VF */
+static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
+{
+	union nic_mbx mbx = {};
+	int idx, alloc_qs = 0;
+	int sqs_id;
+
+	if (!nic->num_sqs_en)
+		goto send_mbox;
+
+	for (idx = 0; idx < sqs->qs_count; idx++) {
+		sqs_id = nic_nxt_avail_sqs(nic);
+		if (sqs_id < 0)
+			break;
+		nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
+		nic->pqs_vf[sqs_id] = sqs->vf_id;
+		alloc_qs++;
+	}
+
+send_mbox:
+	mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+	mbx.sqs_alloc.vf_id = sqs->vf_id;
+	mbx.sqs_alloc.qs_count = alloc_qs;
+	nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+	int bgx_idx, lmac_idx;
+
+	if (lbk->vf_id > MAX_LMAC)
+		return -1;
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+	lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+	bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+	return 0;
+}
+
 /* Interrupt handler to handle mailbox messages from VFs */
 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 {
@@ -492,6 +604,7 @@
 	u64 *mbx_data;
 	u64 mbx_addr;
 	u64 reg_addr;
+	u64 cfg;
 	int bgx, lmac;
 	int i;
 	int ret = 0;
@@ -512,15 +625,24 @@
 	switch (mbx.msg.msg) {
 	case NIC_MBOX_MSG_READY:
 		nic_mbx_send_ready(nic, vf);
-		nic->link[vf] = 0;
-		nic->duplex[vf] = 0;
-		nic->speed[vf] = 0;
+		if (vf < MAX_LMAC) {
+			nic->link[vf] = 0;
+			nic->duplex[vf] = 0;
+			nic->speed[vf] = 0;
+		}
 		ret = 1;
 		break;
 	case NIC_MBOX_MSG_QS_CFG:
 		reg_addr = NIC_PF_QSET_0_127_CFG |
 			   (mbx.qs.num << NIC_QS_ID_SHIFT);
-		nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+		cfg = mbx.qs.cfg;
+		/* Check if its a secondary Qset */
+		if (vf >= nic->num_vf_en) {
+			cfg = cfg & (~0x7FULL);
+			/* Assign this Qset to primary Qset's VF */
+			cfg |= nic->pqs_vf[vf];
+		}
+		nic_reg_write(nic, reg_addr, cfg);
 		break;
 	case NIC_MBOX_MSG_RQ_CFG:
 		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
@@ -548,9 +670,11 @@
 			   (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
 			   (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
 		nic_reg_write(nic, reg_addr, mbx.sq.cfg);
-		nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+		nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
 		break;
 	case NIC_MBOX_MSG_SET_MAC:
+		if (vf >= nic->num_vf_en)
+			break;
 		lmac = mbx.mac.vf_id;
 		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
 		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -577,10 +701,28 @@
 	case NIC_MBOX_MSG_SHUTDOWN:
 		/* First msg in VF teardown sequence */
 		nic->vf_enabled[vf] = false;
+		if (vf >= nic->num_vf_en)
+			nic->sqs_used[vf - nic->num_vf_en] = false;
+		nic->pqs_vf[vf] = 0;
 		break;
+	case NIC_MBOX_MSG_ALLOC_SQS:
+		nic_alloc_sqs(nic, &mbx.sqs_alloc);
+		goto unlock;
+	case NIC_MBOX_MSG_NICVF_PTR:
+		nic->nicvf[vf] = mbx.nicvf.nicvf;
+		break;
+	case NIC_MBOX_MSG_PNICVF_PTR:
+		nic_send_pnicvf(nic, vf);
+		goto unlock;
+	case NIC_MBOX_MSG_SNICVF_PTR:
+		nic_send_snicvf(nic, &mbx.nicvf);
+		goto unlock;
 	case NIC_MBOX_MSG_BGX_STATS:
 		nic_get_bgx_stats(nic, &mbx.bgx_stats);
 		goto unlock;
+	case NIC_MBOX_MSG_LOOPBACK:
+		ret = nic_config_loopback(nic, &mbx.lbk);
+		break;
 	default:
 		dev_err(&nic->pdev->dev,
 			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -606,8 +748,7 @@
 		if (intr & (1ULL << vf)) {
 			dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
 				vf + (mbx * vf_per_mbx_reg));
-			if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
-				break;
+
 			nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
 			nic_clear_mbx_intr(nic, vf, mbx);
 		}
@@ -713,9 +854,24 @@
 	nic_disable_msix(nic);
 }
 
+static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
+{
+	int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
+	u16 total_vf;
+
+	/* Check if its a multi-node environment */
+	if (nr_node_ids > 1)
+		sqs_per_vf = MAX_SQS_PER_VF;
+
+	pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
+	pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
+	return min(total_vf - vf_en, vf_en * sqs_per_vf);
+}
+
 static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
 {
 	int pos = 0;
+	int vf_en;
 	int err;
 	u16 total_vf_cnt;
 
@@ -732,16 +888,20 @@
 	if (!total_vf_cnt)
 		return 0;
 
-	err = pci_enable_sriov(pdev, nic->num_vf_en);
+	vf_en = nic->num_vf_en;
+	nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
+	vf_en += nic->num_sqs_en;
+
+	err = pci_enable_sriov(pdev, vf_en);
 	if (err) {
 		dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
-			nic->num_vf_en);
+			vf_en);
 		nic->num_vf_en = 0;
 		return err;
 	}
 
 	dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
-		 nic->num_vf_en);
+		 vf_en);
 
 	nic->flags |= NIC_SRIOV_ENABLED;
 	return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index a4228e6..af54c10 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -35,10 +35,10 @@
 }
 
 static const struct nicvf_stat nicvf_hw_stats[] = {
-	NICVF_HW_STAT(rx_bytes_ok),
-	NICVF_HW_STAT(rx_ucast_frames_ok),
-	NICVF_HW_STAT(rx_bcast_frames_ok),
-	NICVF_HW_STAT(rx_mcast_frames_ok),
+	NICVF_HW_STAT(rx_bytes),
+	NICVF_HW_STAT(rx_ucast_frames),
+	NICVF_HW_STAT(rx_bcast_frames),
+	NICVF_HW_STAT(rx_mcast_frames),
 	NICVF_HW_STAT(rx_fcs_errors),
 	NICVF_HW_STAT(rx_l2_errors),
 	NICVF_HW_STAT(rx_drop_red),
@@ -49,6 +49,30 @@
 	NICVF_HW_STAT(rx_drop_mcast),
 	NICVF_HW_STAT(rx_drop_l3_bcast),
 	NICVF_HW_STAT(rx_drop_l3_mcast),
+	NICVF_HW_STAT(rx_bgx_truncated_pkts),
+	NICVF_HW_STAT(rx_jabber_errs),
+	NICVF_HW_STAT(rx_fcs_errs),
+	NICVF_HW_STAT(rx_bgx_errs),
+	NICVF_HW_STAT(rx_prel2_errs),
+	NICVF_HW_STAT(rx_l2_hdr_malformed),
+	NICVF_HW_STAT(rx_oversize),
+	NICVF_HW_STAT(rx_undersize),
+	NICVF_HW_STAT(rx_l2_len_mismatch),
+	NICVF_HW_STAT(rx_l2_pclp),
+	NICVF_HW_STAT(rx_ip_ver_errs),
+	NICVF_HW_STAT(rx_ip_csum_errs),
+	NICVF_HW_STAT(rx_ip_hdr_malformed),
+	NICVF_HW_STAT(rx_ip_payload_malformed),
+	NICVF_HW_STAT(rx_ip_ttl_errs),
+	NICVF_HW_STAT(rx_l3_pclp),
+	NICVF_HW_STAT(rx_l4_malformed),
+	NICVF_HW_STAT(rx_l4_csum_errs),
+	NICVF_HW_STAT(rx_udp_len_errs),
+	NICVF_HW_STAT(rx_l4_port_errs),
+	NICVF_HW_STAT(rx_tcp_flag_errs),
+	NICVF_HW_STAT(rx_tcp_offset_errs),
+	NICVF_HW_STAT(rx_l4_pclp),
+	NICVF_HW_STAT(rx_truncated_pkts),
 	NICVF_HW_STAT(tx_bytes_ok),
 	NICVF_HW_STAT(tx_ucast_frames_ok),
 	NICVF_HW_STAT(tx_bcast_frames_ok),
@@ -125,10 +149,33 @@
 	nic->msg_enable = lvl;
 }
 
+static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
+{
+	int stats, qidx;
+	int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
+
+	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+			sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+				nicvf_queue_stats[stats].name);
+			*data += ETH_GSTRING_LEN;
+		}
+	}
+
+	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+			sprintf(*data, "txq%d: %s", qidx + start_qidx,
+				nicvf_queue_stats[stats].name);
+			*data += ETH_GSTRING_LEN;
+		}
+	}
+}
+
 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 {
 	struct nicvf *nic = netdev_priv(netdev);
-	int stats, qidx;
+	int stats;
+	int sqs;
 
 	if (sset != ETH_SS_STATS)
 		return;
@@ -143,20 +190,12 @@
 		data += ETH_GSTRING_LEN;
 	}
 
-	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
-		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
-			sprintf(data, "rxq%d: %s", qidx,
-				nicvf_queue_stats[stats].name);
-			data += ETH_GSTRING_LEN;
-		}
-	}
+	nicvf_get_qset_strings(nic, &data, 0);
 
-	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
-		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
-			sprintf(data, "txq%d: %s", qidx,
-				nicvf_queue_stats[stats].name);
-			data += ETH_GSTRING_LEN;
-		}
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		if (!nic->snicvf[sqs])
+			continue;
+		nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
 	}
 
 	for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
@@ -173,21 +212,58 @@
 static int nicvf_get_sset_count(struct net_device *netdev, int sset)
 {
 	struct nicvf *nic = netdev_priv(netdev);
+	int qstats_count;
+	int sqs;
 
 	if (sset != ETH_SS_STATS)
 		return -EINVAL;
 
+	qstats_count = nicvf_n_queue_stats *
+		       (nic->qs->rq_cnt + nic->qs->sq_cnt);
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		struct nicvf *snic;
+
+		snic = nic->snicvf[sqs];
+		if (!snic)
+			continue;
+		qstats_count += nicvf_n_queue_stats *
+				(snic->qs->rq_cnt + snic->qs->sq_cnt);
+	}
+
 	return nicvf_n_hw_stats + nicvf_n_drv_stats +
-		(nicvf_n_queue_stats *
-		 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
+		qstats_count +
 		BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
 }
 
+static void nicvf_get_qset_stats(struct nicvf *nic,
+				 struct ethtool_stats *stats, u64 **data)
+{
+	int stat, qidx;
+
+	if (!nic)
+		return;
+
+	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+		nicvf_update_rq_stats(nic, qidx);
+		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
+					[nicvf_queue_stats[stat].index];
+	}
+
+	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+		nicvf_update_sq_stats(nic, qidx);
+		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+			*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
+					[nicvf_queue_stats[stat].index];
+	}
+}
+
 static void nicvf_get_ethtool_stats(struct net_device *netdev,
 				    struct ethtool_stats *stats, u64 *data)
 {
 	struct nicvf *nic = netdev_priv(netdev);
-	int stat, qidx;
+	int stat;
+	int sqs;
 
 	nicvf_update_stats(nic);
 
@@ -195,22 +271,18 @@
 	nicvf_update_lmac_stats(nic);
 
 	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
-		*(data++) = ((u64 *)&nic->stats)
+		*(data++) = ((u64 *)&nic->hw_stats)
 				[nicvf_hw_stats[stat].index];
 	for (stat = 0; stat < nicvf_n_drv_stats; stat++)
 		*(data++) = ((u64 *)&nic->drv_stats)
 				[nicvf_drv_stats[stat].index];
 
-	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
-		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
-			*(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
-					[nicvf_queue_stats[stat].index];
-	}
+	nicvf_get_qset_stats(nic, stats, &data);
 
-	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
-		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
-			*(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
-					[nicvf_queue_stats[stat].index];
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		if (!nic->snicvf[sqs])
+			continue;
+		nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
 	}
 
 	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
@@ -369,7 +441,7 @@
 
 	switch (info->cmd) {
 	case ETHTOOL_GRXRINGS:
-		info->data = nic->qs->rq_cnt;
+		info->data = nic->rx_queues;
 		ret = 0;
 		break;
 	case ETHTOOL_GRXFH:
@@ -501,17 +573,15 @@
 	struct nicvf_rss_info *rss = &nic->rss_info;
 	int idx;
 
-	if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
-		rss->enable = false;
-		rss->hash_bits = 0;
-		return -EIO;
-	}
-
-	/* We do not allow change in unsupported parameters */
 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 		return -EOPNOTSUPP;
 
-	rss->enable = true;
+	if (!rss->enable) {
+		netdev_err(nic->netdev,
+			   "RSS is disabled, cannot change settings\n");
+		return -EIO;
+	}
+
 	if (indir) {
 		for (idx = 0; idx < rss->rss_size; idx++)
 			rss->ind_tbl[idx] = indir[idx];
@@ -534,11 +604,11 @@
 
 	memset(channel, 0, sizeof(*channel));
 
-	channel->max_rx = MAX_RCV_QUEUES_PER_QS;
-	channel->max_tx = MAX_SND_QUEUES_PER_QS;
+	channel->max_rx = nic->max_queues;
+	channel->max_tx = nic->max_queues;
 
-	channel->rx_count = nic->qs->rq_cnt;
-	channel->tx_count = nic->qs->sq_cnt;
+	channel->rx_count = nic->rx_queues;
+	channel->tx_count = nic->tx_queues;
 }
 
 /* Set no of Tx, Rx queues to be used */
@@ -548,22 +618,34 @@
 	struct nicvf *nic = netdev_priv(dev);
 	int err = 0;
 	bool if_up = netif_running(dev);
+	int cqcount;
 
 	if (!channel->rx_count || !channel->tx_count)
 		return -EINVAL;
-	if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+	if (channel->rx_count > nic->max_queues)
 		return -EINVAL;
-	if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+	if (channel->tx_count > nic->max_queues)
 		return -EINVAL;
 
 	if (if_up)
 		nicvf_stop(dev);
 
-	nic->qs->rq_cnt = channel->rx_count;
-	nic->qs->sq_cnt = channel->tx_count;
+	cqcount = max(channel->rx_count, channel->tx_count);
+
+	if (cqcount > MAX_CMP_QUEUES_PER_QS) {
+		nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
+		nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+	} else {
+		nic->sqs_count = 0;
+	}
+
+	nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
+	nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
 	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
 
-	err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+	nic->rx_queues = channel->rx_count;
+	nic->tx_queues = channel->tx_count;
+	err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
 	if (err)
 		return err;
 
@@ -571,7 +653,7 @@
 		nicvf_open(dev);
 
 	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
-		    nic->qs->sq_cnt, nic->qs->rq_cnt);
+		    nic->tx_queues, nic->rx_queues);
 
 	return err;
 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 3b90afb..b63e579 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/log2.h>
@@ -50,6 +51,14 @@
 MODULE_PARM_DESC(cpi_alg,
 		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
 
+static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
+{
+	if (nic->sqs_mode)
+		return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+	else
+		return qidx;
+}
+
 static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
 					  struct sk_buff *skb)
 {
@@ -105,7 +114,6 @@
 }
 
 /* VF -> PF mailbox communication */
-
 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
 {
 	u64 *msg = (u64 *)mbx;
@@ -147,26 +155,15 @@
 */
 static int nicvf_check_pf_ready(struct nicvf *nic)
 {
-	int timeout = 5000, sleep = 20;
 	union nic_mbx mbx = {};
 
 	mbx.msg.msg = NIC_MBOX_MSG_READY;
-
-	nic->pf_ready_to_rcv_msg = false;
-
-	nicvf_write_to_mbx(nic, &mbx);
-
-	while (!nic->pf_ready_to_rcv_msg) {
-		msleep(sleep);
-		if (nic->pf_ready_to_rcv_msg)
-			break;
-		timeout -= sleep;
-		if (!timeout) {
-			netdev_err(nic->netdev,
-				   "PF didn't respond to READY msg\n");
-			return 0;
-		}
+	if (nicvf_send_msg_to_pf(nic, &mbx)) {
+		netdev_err(nic->netdev,
+			   "PF didn't respond to READY msg\n");
+		return 0;
 	}
+
 	return 1;
 }
 
@@ -197,13 +194,15 @@
 	netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
 	switch (mbx.msg.msg) {
 	case NIC_MBOX_MSG_READY:
-		nic->pf_ready_to_rcv_msg = true;
+		nic->pf_acked = true;
 		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
 		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
 		nic->node = mbx.nic_cfg.node_id;
 		if (!nic->set_mac_pending)
 			ether_addr_copy(nic->netdev->dev_addr,
 					mbx.nic_cfg.mac_addr);
+		nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
 		nic->link_up = false;
 		nic->duplex = 0;
 		nic->speed = 0;
@@ -221,7 +220,6 @@
 	case NIC_MBOX_MSG_BGX_STATS:
 		nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
 		nic->pf_acked = true;
-		nic->bgx_stats_acked = true;
 		break;
 	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
 		nic->pf_acked = true;
@@ -242,6 +240,26 @@
 			netif_tx_stop_all_queues(nic->netdev);
 		}
 		break;
+	case NIC_MBOX_MSG_ALLOC_SQS:
+		nic->sqs_count = mbx.sqs_alloc.qs_count;
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_SNICVF_PTR:
+		/* Primary VF: make note of secondary VF's pointer
+		 * to be used while packet transmission.
+		 */
+		nic->snicvf[mbx.nicvf.sqs_id] =
+			(struct nicvf *)mbx.nicvf.nicvf;
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_PNICVF_PTR:
+		/* Secondary VF/Qset: make note of primary VF's pointer
+		 * to be used while packet reception, to handover packet
+		 * to primary VF's netdev.
+		 */
+		nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
+		nic->pf_acked = true;
+		break;
 	default:
 		netdev_err(nic->netdev,
 			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
@@ -326,7 +344,7 @@
 
 	nicvf_get_rss_size(nic);
 
-	if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+	if (cpi_alg != CPI_ALG_NONE) {
 		rss->enable = false;
 		rss->hash_bits = 0;
 		return 0;
@@ -350,11 +368,100 @@
 
 	for (idx = 0; idx < rss->rss_size; idx++)
 		rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
-							       nic->qs->rq_cnt);
+							       nic->rx_queues);
 	nicvf_config_rss(nic);
 	return 1;
 }
 
+/* Request PF to allocate additional Qsets */
+static void nicvf_request_sqs(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+	int sqs;
+	int sqs_count = nic->sqs_count;
+	int rx_queues = 0, tx_queues = 0;
+
+	/* Only primary VF should request */
+	if (nic->sqs_mode ||  !nic->sqs_count)
+		return;
+
+	mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+	mbx.sqs_alloc.vf_id = nic->vf_id;
+	mbx.sqs_alloc.qs_count = nic->sqs_count;
+	if (nicvf_send_msg_to_pf(nic, &mbx)) {
+		/* No response from PF */
+		nic->sqs_count = 0;
+		return;
+	}
+
+	/* Return if no Secondary Qsets available */
+	if (!nic->sqs_count)
+		return;
+
+	if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
+		rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
+	if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
+		tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
+
+	/* Set no of Rx/Tx queues in each of the SQsets */
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+		mbx.nicvf.vf_id = nic->vf_id;
+		mbx.nicvf.sqs_id = sqs;
+		nicvf_send_msg_to_pf(nic, &mbx);
+
+		nic->snicvf[sqs]->sqs_id = sqs;
+		if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
+			nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
+			rx_queues -= MAX_RCV_QUEUES_PER_QS;
+		} else {
+			nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
+			rx_queues = 0;
+		}
+
+		if (tx_queues > MAX_SND_QUEUES_PER_QS) {
+			nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
+			tx_queues -= MAX_SND_QUEUES_PER_QS;
+		} else {
+			nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
+			tx_queues = 0;
+		}
+
+		nic->snicvf[sqs]->qs->cq_cnt =
+		max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
+
+		/* Initialize secondary Qset's queues and its interrupts */
+		nicvf_open(nic->snicvf[sqs]->netdev);
+	}
+
+	/* Update stack with actual Rx/Tx queue count allocated */
+	if (sqs_count != nic->sqs_count)
+		nicvf_set_real_num_queues(nic->netdev,
+					  nic->tx_queues, nic->rx_queues);
+}
+
+/* Send this Qset's nicvf pointer to PF.
+ * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
+ * so that packets received by these Qsets can use primary VF's netdev
+ */
+static void nicvf_send_vf_struct(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
+	mbx.nicvf.sqs_mode = nic->sqs_mode;
+	mbx.nicvf.nicvf = (u64)nic;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_primary_vf_struct(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
 int nicvf_set_real_num_queues(struct net_device *netdev,
 			      int tx_queues, int rx_queues)
 {
@@ -429,6 +536,34 @@
 	}
 }
 
+static inline void nicvf_set_rxhash(struct net_device *netdev,
+				    struct cqe_rx_t *cqe_rx,
+				    struct sk_buff *skb)
+{
+	u8 hash_type;
+	u32 hash;
+
+	if (!(netdev->features & NETIF_F_RXHASH))
+		return;
+
+	switch (cqe_rx->rss_alg) {
+	case RSS_ALG_TCP_IP:
+	case RSS_ALG_UDP_IP:
+		hash_type = PKT_HASH_TYPE_L4;
+		hash = cqe_rx->rss_tag;
+		break;
+	case RSS_ALG_IP:
+		hash_type = PKT_HASH_TYPE_L3;
+		hash = cqe_rx->rss_tag;
+		break;
+	default:
+		hash_type = PKT_HASH_TYPE_NONE;
+		hash = 0;
+	}
+
+	skb_set_hash(skb, hash, hash_type);
+}
+
 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 				  struct napi_struct *napi,
 				  struct cmp_queue *cq,
@@ -437,6 +572,15 @@
 	struct sk_buff *skb;
 	struct nicvf *nic = netdev_priv(netdev);
 	int err = 0;
+	int rq_idx;
+
+	rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+	if (nic->sqs_mode) {
+		/* Use primary VF's 'nicvf' struct */
+		nic = nic->pnicvf;
+		netdev = nic->netdev;
+	}
 
 	/* Check for errors */
 	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
@@ -456,9 +600,17 @@
 			       skb->data, skb->len, true);
 	}
 
+	/* If error packet, drop it here */
+	if (err) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
 	nicvf_set_rx_frame_cnt(nic, skb);
 
-	skb_record_rx_queue(skb, cqe_rx->rq_idx);
+	nicvf_set_rxhash(netdev, cqe_rx, skb);
+
+	skb_record_rx_queue(skb, rq_idx);
 	if (netdev->hw_features & NETIF_F_RXCSUM) {
 		/* HW by default verifies TCP/UDP/SCTP checksums */
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -468,6 +620,11 @@
 
 	skb->protocol = eth_type_trans(skb, netdev);
 
+	/* Check for stripped VLAN */
+	if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       ntohs((__force __be16)cqe_rx->vlan_tci));
+
 	if (napi && (netdev->features & NETIF_F_GRO))
 		napi_gro_receive(napi, skb);
 	else
@@ -549,8 +706,11 @@
 done:
 	/* Wakeup TXQ if its stopped earlier due to SQ full */
 	if (tx_done) {
-		txq = netdev_get_tx_queue(netdev, cq_idx);
-		if (netif_tx_queue_stopped(txq)) {
+		netdev = nic->pnicvf->netdev;
+		txq = netdev_get_tx_queue(netdev,
+					  nicvf_netdev_qidx(nic, cq_idx));
+		nic = nic->pnicvf;
+		if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
 			netif_tx_start_queue(txq);
 			nic->drv_stats.txq_wake++;
 			if (netif_msg_tx_err(nic))
@@ -624,11 +784,20 @@
 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
 }
 
+static void nicvf_dump_intr_status(struct nicvf *nic)
+{
+	if (netif_msg_intr(nic))
+		netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+			    nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
+}
+
 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
 {
 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
 	u64 intr;
 
+	nicvf_dump_intr_status(nic);
+
 	intr = nicvf_reg_read(nic, NIC_VF_INT);
 	/* Check for spurious interrupt */
 	if (!(intr & NICVF_INTR_MBOX_MASK))
@@ -639,59 +808,58 @@
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
 {
-	u64 qidx, intr, clear_intr = 0;
-	u64 cq_intr, rbdr_intr, qs_err_intr;
+	struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
+	struct nicvf *nic = cq_poll->nicvf;
+	int qidx = cq_poll->cq_idx;
+
+	nicvf_dump_intr_status(nic);
+
+	/* Disable interrupts */
+	nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+
+	/* Schedule NAPI */
+	napi_schedule(&cq_poll->napi);
+
+	/* Clear interrupt */
+	nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
+{
 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
-	struct queue_set *qs = nic->qs;
-	struct nicvf_cq_poll *cq_poll = NULL;
+	u8 qidx;
 
-	intr = nicvf_reg_read(nic, NIC_VF_INT);
-	if (netif_msg_intr(nic))
-		netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
-			    nic->netdev->name, intr);
 
-	qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
-	if (qs_err_intr) {
-		/* Disable Qset err interrupt and schedule softirq */
-		nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
-		tasklet_hi_schedule(&nic->qs_err_task);
-		clear_intr |= qs_err_intr;
-	}
+	nicvf_dump_intr_status(nic);
 
-	/* Disable interrupts and start polling */
-	cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
-	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
-		if (!(cq_intr & (1 << qidx)))
+	/* Disable RBDR interrupt and schedule softirq */
+	for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
+		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
 			continue;
-		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
-			continue;
-
-		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
-		clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
-
-		cq_poll = nic->napi[qidx];
-		/* Schedule NAPI */
-		if (cq_poll)
-			napi_schedule(&cq_poll->napi);
+		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+		tasklet_hi_schedule(&nic->rbdr_task);
+		/* Clear interrupt */
+		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
 	}
 
-	/* Handle RBDR interrupts */
-	rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
-	if (rbdr_intr) {
-		/* Disable RBDR interrupt and schedule softirq */
-		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
-			if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
-				continue;
-			nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
-			tasklet_hi_schedule(&nic->rbdr_task);
-			clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
-		}
-	}
+	return IRQ_HANDLED;
+}
 
-	/* Clear interrupts */
-	nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
+{
+	struct nicvf *nic = (struct nicvf *)nicvf_irq;
+
+	nicvf_dump_intr_status(nic);
+
+	/* Disable Qset err interrupt and schedule softirq */
+	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+	tasklet_hi_schedule(&nic->qs_err_task);
+	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
 	return IRQ_HANDLED;
 }
 
@@ -725,7 +893,7 @@
 
 static int nicvf_register_interrupts(struct nicvf *nic)
 {
-	int irq, free, ret = 0;
+	int irq, ret = 0;
 	int vector;
 
 	for_each_cq_irq(irq)
@@ -740,44 +908,42 @@
 		sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
 			nic->vf_id, irq - NICVF_INTR_ID_RBDR);
 
-	/* Register all interrupts except mailbox */
-	for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+	/* Register CQ interrupts */
+	for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
 		vector = nic->msix_entries[irq].vector;
 		ret = request_irq(vector, nicvf_intr_handler,
-				  0, nic->irq_name[irq], nic);
+				  0, nic->irq_name[irq], nic->napi[irq]);
 		if (ret)
-			break;
+			goto err;
 		nic->irq_allocated[irq] = true;
 	}
 
-	for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+	/* Register RBDR interrupt */
+	for (irq = NICVF_INTR_ID_RBDR;
+	     irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
 		vector = nic->msix_entries[irq].vector;
-		ret = request_irq(vector, nicvf_intr_handler,
+		ret = request_irq(vector, nicvf_rbdr_intr_handler,
 				  0, nic->irq_name[irq], nic);
 		if (ret)
-			break;
+			goto err;
 		nic->irq_allocated[irq] = true;
 	}
 
+	/* Register QS error interrupt */
 	sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
 		"NICVF%d Qset error", nic->vf_id);
-	if (!ret) {
-		vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
-		irq = NICVF_INTR_ID_QS_ERR;
-		ret = request_irq(vector, nicvf_intr_handler,
-				  0, nic->irq_name[irq], nic);
-		if (!ret)
-			nic->irq_allocated[irq] = true;
-	}
+	irq = NICVF_INTR_ID_QS_ERR;
+	ret = request_irq(nic->msix_entries[irq].vector,
+			  nicvf_qs_err_intr_handler,
+			  0, nic->irq_name[irq], nic);
+	if (!ret)
+		nic->irq_allocated[irq] = true;
 
-	if (ret) {
-		netdev_err(nic->netdev, "Request irq failed\n");
-		for (free = 0; free < irq; free++)
-			free_irq(nic->msix_entries[free].vector, nic);
-		return ret;
-	}
+err:
+	if (ret)
+		netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
 
-	return 0;
+	return ret;
 }
 
 static void nicvf_unregister_interrupts(struct nicvf *nic)
@@ -786,8 +952,14 @@
 
 	/* Free registered interrupts */
 	for (irq = 0; irq < nic->num_vec; irq++) {
-		if (nic->irq_allocated[irq])
+		if (!nic->irq_allocated[irq])
+			continue;
+
+		if (irq < NICVF_INTR_ID_SQ)
+			free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+		else
 			free_irq(nic->msix_entries[irq].vector, nic);
+
 		nic->irq_allocated[irq] = false;
 	}
 
@@ -852,13 +1024,26 @@
 			netdev_warn(netdev,
 				    "%s: Transmit ring full, stopping SQ%d\n",
 				    netdev->name, qid);
-
 		return NETDEV_TX_BUSY;
 	}
 
 	return NETDEV_TX_OK;
 }
 
+static inline void nicvf_free_cq_poll(struct nicvf *nic)
+{
+	struct nicvf_cq_poll *cq_poll;
+	int qidx;
+
+	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+		cq_poll = nic->napi[qidx];
+		if (!cq_poll)
+			continue;
+		nic->napi[qidx] = NULL;
+		kfree(cq_poll);
+	}
+}
+
 int nicvf_stop(struct net_device *netdev)
 {
 	int irq, qidx;
@@ -871,6 +1056,17 @@
 	nicvf_send_msg_to_pf(nic, &mbx);
 
 	netif_carrier_off(netdev);
+	netif_tx_stop_all_queues(nic->netdev);
+
+	/* Teardown secondary qsets first */
+	if (!nic->sqs_mode) {
+		for (qidx = 0; qidx < nic->sqs_count; qidx++) {
+			if (!nic->snicvf[qidx])
+				continue;
+			nicvf_stop(nic->snicvf[qidx]->netdev);
+			nic->snicvf[qidx] = NULL;
+		}
+	}
 
 	/* Disable RBDR & QS error interrupts */
 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -893,7 +1089,6 @@
 		cq_poll = nic->napi[qidx];
 		if (!cq_poll)
 			continue;
-		nic->napi[qidx] = NULL;
 		napi_synchronize(&cq_poll->napi);
 		/* CQ intr is enabled while napi_complete,
 		 * so disable it now
@@ -902,7 +1097,6 @@
 		nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
 		napi_disable(&cq_poll->napi);
 		netif_napi_del(&cq_poll->napi);
-		kfree(cq_poll);
 	}
 
 	netif_tx_disable(netdev);
@@ -918,6 +1112,12 @@
 
 	nicvf_unregister_interrupts(nic);
 
+	nicvf_free_cq_poll(nic);
+
+	/* Clear multiqset info */
+	nic->pnicvf = nic;
+	nic->sqs_count = 0;
+
 	return 0;
 }
 
@@ -944,6 +1144,7 @@
 			goto napi_del;
 		}
 		cq_poll->cq_idx = qidx;
+		cq_poll->nicvf = nic;
 		netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
 			       NAPI_POLL_WEIGHT);
 		napi_enable(&cq_poll->napi);
@@ -972,10 +1173,16 @@
 
 	/* Configure CPI alorithm */
 	nic->cpi_alg = cpi_alg;
-	nicvf_config_cpi(nic);
+	if (!nic->sqs_mode)
+		nicvf_config_cpi(nic);
+
+	nicvf_request_sqs(nic);
+	if (nic->sqs_mode)
+		nicvf_get_primary_vf_struct(nic);
 
 	/* Configure receive side scaling */
-	nicvf_rss_init(nic);
+	if (!nic->sqs_mode)
+		nicvf_rss_init(nic);
 
 	err = nicvf_register_interrupts(nic);
 	if (err)
@@ -1011,6 +1218,8 @@
 cleanup:
 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
 	nicvf_unregister_interrupts(nic);
+	tasklet_kill(&nic->qs_err_task);
+	tasklet_kill(&nic->rbdr_task);
 napi_del:
 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 		cq_poll = nic->napi[qidx];
@@ -1018,9 +1227,8 @@
 			continue;
 		napi_disable(&cq_poll->napi);
 		netif_napi_del(&cq_poll->napi);
-		kfree(cq_poll);
-		nic->napi[qidx] = NULL;
 	}
+	nicvf_free_cq_poll(nic);
 	return err;
 }
 
@@ -1077,7 +1285,6 @@
 {
 	int stat = 0;
 	union nic_mbx mbx = {};
-	int timeout;
 
 	if (!netif_running(nic->netdev))
 		return;
@@ -1087,14 +1294,9 @@
 	/* Rx stats */
 	mbx.bgx_stats.rx = 1;
 	while (stat < BGX_RX_STATS_COUNT) {
-		nic->bgx_stats_acked = 0;
 		mbx.bgx_stats.idx = stat;
-		nicvf_send_msg_to_pf(nic, &mbx);
-		timeout = 0;
-		while ((!nic->bgx_stats_acked) && (timeout < 10)) {
-			msleep(2);
-			timeout++;
-		}
+		if (nicvf_send_msg_to_pf(nic, &mbx))
+			return;
 		stat++;
 	}
 
@@ -1103,14 +1305,9 @@
 	/* Tx stats */
 	mbx.bgx_stats.rx = 0;
 	while (stat < BGX_TX_STATS_COUNT) {
-		nic->bgx_stats_acked = 0;
 		mbx.bgx_stats.idx = stat;
-		nicvf_send_msg_to_pf(nic, &mbx);
-		timeout = 0;
-		while ((!nic->bgx_stats_acked) && (timeout < 10)) {
-			msleep(2);
-			timeout++;
-		}
+		if (nicvf_send_msg_to_pf(nic, &mbx))
+			return;
 		stat++;
 	}
 }
@@ -1118,7 +1315,7 @@
 void nicvf_update_stats(struct nicvf *nic)
 {
 	int qidx;
-	struct nicvf_hw_stats *stats = &nic->stats;
+	struct nicvf_hw_stats *stats = &nic->hw_stats;
 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 	struct queue_set *qs = nic->qs;
 
@@ -1127,14 +1324,16 @@
 #define GET_TX_STATS(reg) \
 	nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
 
-	stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
-	stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
-	stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
-	stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+	stats->rx_bytes = GET_RX_STATS(RX_OCTS);
+	stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
+	stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
+	stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
 	stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
 	stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
 	stats->rx_drop_red = GET_RX_STATS(RX_RED);
+	stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
 	stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+	stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
 	stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
 	stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
 	stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
@@ -1146,9 +1345,6 @@
 	stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
 	stats->tx_drops = GET_TX_STATS(TX_DROP);
 
-	drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
-				  stats->rx_bcast_frames_ok +
-				  stats->rx_mcast_frames_ok;
 	drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
 				  stats->tx_bcast_frames_ok +
 				  stats->tx_mcast_frames_ok;
@@ -1167,14 +1363,15 @@
 					    struct rtnl_link_stats64 *stats)
 {
 	struct nicvf *nic = netdev_priv(netdev);
-	struct nicvf_hw_stats *hw_stats = &nic->stats;
+	struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 
 	nicvf_update_stats(nic);
 
-	stats->rx_bytes = hw_stats->rx_bytes_ok;
+	stats->rx_bytes = hw_stats->rx_bytes;
 	stats->rx_packets = drv_stats->rx_frames_ok;
 	stats->rx_dropped = drv_stats->rx_drops;
+	stats->multicast = hw_stats->rx_mcast_frames;
 
 	stats->tx_bytes = hw_stats->tx_bytes_ok;
 	stats->tx_packets = drv_stats->tx_frames_ok;
@@ -1208,6 +1405,45 @@
 	nic->netdev->trans_start = jiffies;
 }
 
+static int nicvf_config_loopback(struct nicvf *nic,
+				 netdev_features_t features)
+{
+	union nic_mbx mbx = {};
+
+	mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+	mbx.lbk.vf_id = nic->vf_id;
+	mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static netdev_features_t nicvf_fix_features(struct net_device *netdev,
+					    netdev_features_t features)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if ((features & NETIF_F_LOOPBACK) &&
+	    netif_running(netdev) && !nic->loopback_supported)
+		features &= ~NETIF_F_LOOPBACK;
+
+	return features;
+}
+
+static int nicvf_set_features(struct net_device *netdev,
+			      netdev_features_t features)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	netdev_features_t changed = features ^ netdev->features;
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		nicvf_config_vlan_stripping(nic, features);
+
+	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+		return nicvf_config_loopback(nic, features);
+
+	return 0;
+}
+
 static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_open		= nicvf_open,
 	.ndo_stop		= nicvf_stop,
@@ -1216,6 +1452,8 @@
 	.ndo_set_mac_address	= nicvf_set_mac_address,
 	.ndo_get_stats64	= nicvf_get_stats64,
 	.ndo_tx_timeout         = nicvf_tx_timeout,
+	.ndo_fix_features       = nicvf_fix_features,
+	.ndo_set_features       = nicvf_set_features,
 };
 
 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1223,8 +1461,7 @@
 	struct device *dev = &pdev->dev;
 	struct net_device *netdev;
 	struct nicvf *nic;
-	struct queue_set *qs;
-	int    err;
+	int    err, qcount;
 
 	err = pci_enable_device(pdev);
 	if (err) {
@@ -1250,9 +1487,17 @@
 		goto err_release_regions;
 	}
 
-	netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
-				    MAX_RCV_QUEUES_PER_QS,
-				    MAX_SND_QUEUES_PER_QS);
+	qcount = MAX_CMP_QUEUES_PER_QS;
+
+	/* Restrict multiqset support only for host bound VFs */
+	if (pdev->is_virtfn) {
+		/* Set max number of queues per VF */
+		qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
+		qcount = min(qcount,
+			     (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+	}
+
+	netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
 	if (!netdev) {
 		err = -ENOMEM;
 		goto err_release_regions;
@@ -1265,6 +1510,8 @@
 	nic = netdev_priv(netdev);
 	nic->netdev = netdev;
 	nic->pdev = pdev;
+	nic->pnicvf = nic;
+	nic->max_queues = qcount;
 
 	/* MAP VF's configuration registers */
 	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1278,20 +1525,31 @@
 	if (err)
 		goto err_free_netdev;
 
-	qs = nic->qs;
-
-	err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
-	if (err)
-		goto err_free_netdev;
-
 	/* Check if PF is alive and get MAC address for this VF */
 	err = nicvf_register_misc_interrupt(nic);
 	if (err)
 		goto err_free_netdev;
 
-	netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
-			     NETIF_F_TSO | NETIF_F_GRO);
-	netdev->hw_features = netdev->features;
+	nicvf_send_vf_struct(nic);
+
+	/* Check if this VF is in QS only mode */
+	if (nic->sqs_mode)
+		return 0;
+
+	err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
+	if (err)
+		goto err_unregister_interrupts;
+
+	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+			       NETIF_F_TSO | NETIF_F_GRO |
+			       NETIF_F_HW_VLAN_CTAG_RX);
+
+	netdev->hw_features |= NETIF_F_RXHASH;
+
+	netdev->features |= netdev->hw_features;
+	netdev->hw_features |= NETIF_F_LOOPBACK;
+
+	netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 
 	netdev->netdev_ops = &nicvf_netdev_ops;
 	netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
@@ -1326,8 +1584,13 @@
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct nicvf *nic = netdev_priv(netdev);
+	struct net_device *pnetdev = nic->pnicvf->netdev;
 
-	unregister_netdev(netdev);
+	/* Check if this Qset is assigned to different VF.
+	 * If yes, clean primary and all secondary Qsets.
+	 */
+	if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
+		unregister_netdev(pnetdev);
 	nicvf_unregister_interrupts(nic);
 	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ca4240a..e404ea8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -475,6 +475,27 @@
 		return;
 }
 
+void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
+{
+	u64 rq_cfg;
+	int sqs;
+
+	rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
+
+	/* Enable first VLAN stripping */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rq_cfg |= (1ULL << 25);
+	else
+		rq_cfg &= ~(1ULL << 25);
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+
+	/* Configure Secondary Qsets, if any */
+	for (sqs = 0; sqs < nic->sqs_count; sqs++)
+		if (nic->snicvf[sqs])
+			nicvf_queue_reg_write(nic->snicvf[sqs],
+					      NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+}
+
 /* Configures receive queue */
 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 				   int qidx, bool enable)
@@ -524,7 +545,9 @@
 	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
 	nicvf_send_msg_to_pf(nic, &mbx);
 
-	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
+	if (!nic->sqs_mode)
+		nicvf_config_vlan_stripping(nic, nic->netdev->features);
 
 	/* Enable Receive queue */
 	rq_cfg.ena = 1;
@@ -598,6 +621,7 @@
 	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
 	mbx.sq.qs_num = qs->vnic_id;
 	mbx.sq.sq_num = qidx;
+	mbx.sq.sqs_mode = nic->sqs_mode;
 	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
 	nicvf_send_msg_to_pf(nic, &mbx);
 
@@ -679,6 +703,7 @@
 	/* Send a mailbox msg to PF to config Qset */
 	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
 	mbx.qs.num = qs->vnic_id;
+	mbx.qs.sqs_count = nic->sqs_count;
 
 	mbx.qs.cfg = 0;
 	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
@@ -759,6 +784,10 @@
 	qs->rbdr_len = RCV_BUF_COUNT;
 	qs->sq_len = SND_QUEUE_LEN;
 	qs->cq_len = CMP_QUEUE_LEN;
+
+	nic->rx_queues = qs->rq_cnt;
+	nic->tx_queues = qs->sq_cnt;
+
 	return 0;
 }
 
@@ -961,9 +990,6 @@
 
 	/* Offload checksum calculation to HW */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		if (skb->protocol != htons(ETH_P_IP))
-			return;
-
 		hdr->csum_l3 = 1; /* Enable IP csum calculation */
 		hdr->l3_offset = skb_network_offset(skb);
 		hdr->l4_offset = skb_transport_offset(skb);
@@ -1005,7 +1031,7 @@
  * them to SQ for transfer
  */
 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
-			       int qentry, struct sk_buff *skb)
+			       int sq_num, int qentry, struct sk_buff *skb)
 {
 	struct tso_t tso;
 	int seg_subdescs = 0, desc_cnt = 0;
@@ -1065,7 +1091,7 @@
 
 	/* Inform HW to xmit all TSO segments */
 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
-			      skb_get_queue_mapping(skb), desc_cnt);
+			      sq_num, desc_cnt);
 	nic->drv_stats.tx_tso++;
 	return 1;
 }
@@ -1076,10 +1102,24 @@
 	int i, size;
 	int subdesc_cnt;
 	int sq_num, qentry;
-	struct queue_set *qs = nic->qs;
+	struct queue_set *qs;
 	struct snd_queue *sq;
 
 	sq_num = skb_get_queue_mapping(skb);
+	if (sq_num >= MAX_SND_QUEUES_PER_QS) {
+		/* Get secondary Qset's SQ structure */
+		i = sq_num / MAX_SND_QUEUES_PER_QS;
+		if (!nic->snicvf[i - 1]) {
+			netdev_warn(nic->netdev,
+				    "Secondary Qset#%d's ptr not initialized\n",
+				    i - 1);
+			return 1;
+		}
+		nic = (struct nicvf *)nic->snicvf[i - 1];
+		sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
+	}
+
+	qs = nic->qs;
 	sq = &qs->sq[sq_num];
 
 	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
@@ -1090,7 +1130,7 @@
 
 	/* Check if its a TSO packet */
 	if (skb_shinfo(skb)->gso_size)
-		return nicvf_sq_append_tso(nic, sq, qentry, skb);
+		return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
 
 	/* Add SQ header subdesc */
 	nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
@@ -1126,6 +1166,8 @@
 	return 1;
 
 append_fail:
+	/* Use original PCI dev for debug log */
+	nic = nic->pnicvf;
 	netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
 	return 0;
 }
@@ -1371,10 +1413,11 @@
 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
 			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
 {
-	struct cmp_queue_stats *stats = &cq->stats;
+	struct nicvf_hw_stats *stats = &nic->hw_stats;
+	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 
 	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
-		stats->rx.errop.good++;
+		drv_stats->rx_frames_ok++;
 		return 0;
 	}
 
@@ -1384,111 +1427,78 @@
 			   nic->netdev->name,
 			   cqe_rx->err_level, cqe_rx->err_opcode);
 
-	switch (cqe_rx->err_level) {
-	case CQ_ERRLVL_MAC:
-		stats->rx.errlvl.mac_errs++;
-		break;
-	case CQ_ERRLVL_L2:
-		stats->rx.errlvl.l2_errs++;
-		break;
-	case CQ_ERRLVL_L3:
-		stats->rx.errlvl.l3_errs++;
-		break;
-	case CQ_ERRLVL_L4:
-		stats->rx.errlvl.l4_errs++;
-		break;
-	}
-
 	switch (cqe_rx->err_opcode) {
 	case CQ_RX_ERROP_RE_PARTIAL:
-		stats->rx.errop.partial_pkts++;
+		stats->rx_bgx_truncated_pkts++;
 		break;
 	case CQ_RX_ERROP_RE_JABBER:
-		stats->rx.errop.jabber_errs++;
+		stats->rx_jabber_errs++;
 		break;
 	case CQ_RX_ERROP_RE_FCS:
-		stats->rx.errop.fcs_errs++;
-		break;
-	case CQ_RX_ERROP_RE_TERMINATE:
-		stats->rx.errop.terminate_errs++;
+		stats->rx_fcs_errs++;
 		break;
 	case CQ_RX_ERROP_RE_RX_CTL:
-		stats->rx.errop.bgx_rx_errs++;
+		stats->rx_bgx_errs++;
 		break;
 	case CQ_RX_ERROP_PREL2_ERR:
-		stats->rx.errop.prel2_errs++;
-		break;
-	case CQ_RX_ERROP_L2_FRAGMENT:
-		stats->rx.errop.l2_frags++;
-		break;
-	case CQ_RX_ERROP_L2_OVERRUN:
-		stats->rx.errop.l2_overruns++;
-		break;
-	case CQ_RX_ERROP_L2_PFCS:
-		stats->rx.errop.l2_pfcs++;
-		break;
-	case CQ_RX_ERROP_L2_PUNY:
-		stats->rx.errop.l2_puny++;
+		stats->rx_prel2_errs++;
 		break;
 	case CQ_RX_ERROP_L2_MAL:
-		stats->rx.errop.l2_hdr_malformed++;
+		stats->rx_l2_hdr_malformed++;
 		break;
 	case CQ_RX_ERROP_L2_OVERSIZE:
-		stats->rx.errop.l2_oversize++;
+		stats->rx_oversize++;
 		break;
 	case CQ_RX_ERROP_L2_UNDERSIZE:
-		stats->rx.errop.l2_undersize++;
+		stats->rx_undersize++;
 		break;
 	case CQ_RX_ERROP_L2_LENMISM:
-		stats->rx.errop.l2_len_mismatch++;
+		stats->rx_l2_len_mismatch++;
 		break;
 	case CQ_RX_ERROP_L2_PCLP:
-		stats->rx.errop.l2_pclp++;
+		stats->rx_l2_pclp++;
 		break;
 	case CQ_RX_ERROP_IP_NOT:
-		stats->rx.errop.non_ip++;
+		stats->rx_ip_ver_errs++;
 		break;
 	case CQ_RX_ERROP_IP_CSUM_ERR:
-		stats->rx.errop.ip_csum_err++;
+		stats->rx_ip_csum_errs++;
 		break;
 	case CQ_RX_ERROP_IP_MAL:
-		stats->rx.errop.ip_hdr_malformed++;
+		stats->rx_ip_hdr_malformed++;
 		break;
 	case CQ_RX_ERROP_IP_MALD:
-		stats->rx.errop.ip_payload_malformed++;
+		stats->rx_ip_payload_malformed++;
 		break;
 	case CQ_RX_ERROP_IP_HOP:
-		stats->rx.errop.ip_hop_errs++;
-		break;
-	case CQ_RX_ERROP_L3_ICRC:
-		stats->rx.errop.l3_icrc_errs++;
+		stats->rx_ip_ttl_errs++;
 		break;
 	case CQ_RX_ERROP_L3_PCLP:
-		stats->rx.errop.l3_pclp++;
+		stats->rx_l3_pclp++;
 		break;
 	case CQ_RX_ERROP_L4_MAL:
-		stats->rx.errop.l4_malformed++;
+		stats->rx_l4_malformed++;
 		break;
 	case CQ_RX_ERROP_L4_CHK:
-		stats->rx.errop.l4_csum_errs++;
+		stats->rx_l4_csum_errs++;
 		break;
 	case CQ_RX_ERROP_UDP_LEN:
-		stats->rx.errop.udp_len_err++;
+		stats->rx_udp_len_errs++;
 		break;
 	case CQ_RX_ERROP_L4_PORT:
-		stats->rx.errop.bad_l4_port++;
+		stats->rx_l4_port_errs++;
 		break;
 	case CQ_RX_ERROP_TCP_FLAG:
-		stats->rx.errop.bad_tcp_flag++;
+		stats->rx_tcp_flag_errs++;
 		break;
 	case CQ_RX_ERROP_TCP_OFFSET:
-		stats->rx.errop.tcp_offset_errs++;
+		stats->rx_tcp_offset_errs++;
 		break;
 	case CQ_RX_ERROP_L4_PCLP:
-		stats->rx.errop.l4_pclp++;
+		stats->rx_l4_pclp++;
 		break;
 	case CQ_RX_ERROP_RBDR_TRUNC:
-		stats->rx.errop.pkt_truncated++;
+		stats->rx_truncated_pkts++;
 		break;
 	}
 
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index f0937b7..fb4957d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -181,47 +181,6 @@
 };
 
 struct cmp_queue_stats {
-	struct rx_stats {
-		struct {
-			u64 mac_errs;
-			u64 l2_errs;
-			u64 l3_errs;
-			u64 l4_errs;
-		} errlvl;
-		struct {
-			u64 good;
-			u64 partial_pkts;
-			u64 jabber_errs;
-			u64 fcs_errs;
-			u64 terminate_errs;
-			u64 bgx_rx_errs;
-			u64 prel2_errs;
-			u64 l2_frags;
-			u64 l2_overruns;
-			u64 l2_pfcs;
-			u64 l2_puny;
-			u64 l2_hdr_malformed;
-			u64 l2_oversize;
-			u64 l2_undersize;
-			u64 l2_len_mismatch;
-			u64 l2_pclp;
-			u64 non_ip;
-			u64 ip_csum_err;
-			u64 ip_hdr_malformed;
-			u64 ip_payload_malformed;
-			u64 ip_hop_errs;
-			u64 l3_icrc_errs;
-			u64 l3_pclp;
-			u64 l4_malformed;
-			u64 l4_csum_errs;
-			u64 udp_len_err;
-			u64 bad_l4_port;
-			u64 bad_tcp_flag;
-			u64 tcp_offset_errs;
-			u64 l4_pclp;
-			u64 pkt_truncated;
-		} errop;
-	} rx;
 	struct tx_stats {
 		u64 good;
 		u64 desc_fault;
@@ -292,6 +251,7 @@
 	void		*desc;
 	struct q_desc_mem   dmem;
 	struct cmp_queue_stats	stats;
+	int		irq;
 } ____cacheline_aligned_in_smp;
 
 struct snd_queue {
@@ -347,6 +307,8 @@
 
 #define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
 
+void nicvf_config_vlan_stripping(struct nicvf *nic,
+				 netdev_features_t features);
 int nicvf_set_qset_resources(struct nicvf *nic);
 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
 void nicvf_qset_config(struct nicvf *nic, bool enable);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index b961a89..574c492 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -6,6 +6,7 @@
  * as published by the Free Software Foundation.
  */
 
+#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -26,7 +27,7 @@
 struct lmac {
 	struct bgx		*bgx;
 	int			dmac;
-	unsigned char		mac[ETH_ALEN];
+	u8			mac[ETH_ALEN];
 	bool			link_up;
 	int			lmacid; /* ID within BGX */
 	int			lmacid_bd; /* ID on board */
@@ -328,6 +329,37 @@
 	}
 }
 
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+				int lmac_idx, bool enable)
+{
+	struct bgx *bgx;
+	struct lmac *lmac;
+	u64    cfg;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	if (!bgx)
+		return;
+
+	lmac = &bgx->lmac[lmac_idx];
+	if (lmac->is_sgmii) {
+		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+		if (enable)
+			cfg |= PCS_MRX_CTL_LOOPBACK1;
+		else
+			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+	} else {
+		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+		if (enable)
+			cfg |= SPU_CTL_LOOPBACK;
+		else
+			cfg &= ~SPU_CTL_LOOPBACK;
+		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+	}
+}
+EXPORT_SYMBOL(bgx_lmac_internal_loopback);
+
 static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
 {
 	u64 cfg;
@@ -835,18 +867,108 @@
 	}
 }
 
-static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
 {
+	u8 mac[ETH_ALEN];
+	int ret;
+
+	ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+					    "mac-address", mac, ETH_ALEN);
+	if (ret)
+		goto out;
+
+	if (!is_valid_ether_addr(mac)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(dst, mac, ETH_ALEN);
+out:
+	return ret;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+					 u32 lvl, void *context, void **rv)
+{
+	struct bgx *bgx = context;
+	struct acpi_device *adev;
+
+	if (acpi_bus_get_device(handle, &adev))
+		goto out;
+
+	acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+
+	SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+
+	bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+out:
+	bgx->lmac_count++;
+	return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+				     void *context, void **ret_val)
+{
+	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct bgx *bgx = context;
+	char bgx_sel[5];
+
+	snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+	if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+		pr_warn("Invalid link device\n");
+		return AE_OK;
+	}
+
+	if (strncmp(string.pointer, bgx_sel, 4))
+		return AE_OK;
+
+	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+			    bgx_acpi_register_phy, NULL, bgx, NULL);
+
+	kfree(string.pointer);
+	return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+	acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+	return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+	struct device_node *np;
 	struct device_node *np_child;
 	u8 lmac = 0;
+	char bgx_sel[5];
+	const char *mac;
+
+	/* Get BGX node from DT */
+	snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+	np = of_find_node_by_name(NULL, bgx_sel);
+	if (!np)
+		return -ENODEV;
 
 	for_each_child_of_node(np, np_child) {
-		struct device_node *phy_np;
-		const char *mac;
-
-		phy_np = of_parse_phandle(np_child, "phy-handle", 0);
-		if (phy_np)
-			bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+		struct device_node *phy_np = of_parse_phandle(np_child,
+							      "phy-handle", 0);
+		if (!phy_np)
+			continue;
+		bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
 
 		mac = of_get_mac_address(np_child);
 		if (mac)
@@ -858,6 +980,24 @@
 		if (lmac == MAX_LMAC_PER_BGX)
 			break;
 	}
+	return 0;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+	if (!acpi_disabled)
+		return bgx_init_acpi_phy(bgx);
+
+	return bgx_init_of_phy(bgx);
 }
 
 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -865,8 +1005,6 @@
 	int err;
 	struct device *dev = &pdev->dev;
 	struct bgx *bgx = NULL;
-	struct device_node *np;
-	char bgx_sel[5];
 	u8 lmac;
 
 	bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
@@ -902,10 +1040,9 @@
 	bgx_vnic[bgx->bgx_id] = bgx;
 	bgx_get_qlm_mode(bgx);
 
-	snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
-	np = of_find_node_by_name(NULL, bgx_sel);
-	if (np)
-		bgx_init_of(bgx, np);
+	err = bgx_init_phy(bgx);
+	if (err)
+		goto err_enable;
 
 	bgx_init_hw(bgx);
 
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index ba4f53b..07b7ec66 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -72,6 +72,7 @@
 
 #define BGX_SPUX_CONTROL1		0x10000
 #define  SPU_CTL_LOW_POWER			BIT_ULL(11)
+#define  SPU_CTL_LOOPBACK			BIT_ULL(14)
 #define  SPU_CTL_RESET				BIT_ULL(15)
 #define BGX_SPUX_STATUS1		0x10008
 #define  SPU_STATUS1_RCV_LNK			BIT_ULL(2)
@@ -126,6 +127,7 @@
 #define	 PCS_MRX_CTL_RST_AN			BIT_ULL(9)
 #define	 PCS_MRX_CTL_PWR_DN			BIT_ULL(11)
 #define	 PCS_MRX_CTL_AN_EN			BIT_ULL(12)
+#define	 PCS_MRX_CTL_LOOPBACK1			BIT_ULL(14)
 #define	 PCS_MRX_CTL_RESET			BIT_ULL(15)
 #define BGX_GMP_PCS_MRX_STATUS		0x30008
 #define	 PCS_MRX_STATUS_AN_CPT			BIT_ULL(5)
@@ -186,6 +188,8 @@
 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+				int lmac_idx, bool enable);
 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
 #define BGX_RX_STATS_COUNT 11
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 629f75d..fa0c7b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -767,6 +767,11 @@
 	bool tid_release_task_busy;
 
 	struct dentry *debugfs_root;
+	u32 use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
+	u32 trace_rss;	/* 1 implies that different RSS flit per filter is
+			 * used per filter else if 0 default RSS flit is
+			 * used for all 4 filters.
+			 */
 
 	spinlock_t stats_lock;
 	spinlock_t win0_lock ____cacheline_aligned_in_smp;
@@ -1284,6 +1289,7 @@
 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 		  const u8 *fw_data, unsigned int size, int force);
 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_check_fw_version(struct adapter *adap);
 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
@@ -1440,6 +1446,10 @@
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+			int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+			 int filter_index, int *enabled);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
 			 u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6074680..052c660 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -31,6 +31,15 @@
 	"Auto Negotiated"
 };
 
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+	if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	    state == CXGB4_DCB_STATE_HOST)
+		return true;
+	else
+		return false;
+}
+
 /* Initialize a port's Data Center Bridging state.  Typically used after a
  * Link Down event.
  */
@@ -603,7 +612,7 @@
 	struct port_info *pi = netdev2pinfo(dev);
 	struct port_dcb_info *dcb = &pi->dcb;
 
-	if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	if (!cxgb4_dcb_state_synced(dcb->state) ||
 	    priority >= CXGB4_MAX_PRIORITY)
 		*pfccfg = 0;
 	else
@@ -620,7 +629,7 @@
 	struct adapter *adap = pi->adapter;
 	int err;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
 	    priority >= CXGB4_MAX_PRIORITY)
 		return;
 
@@ -732,7 +741,7 @@
 {
 	struct port_info *pi = netdev2pinfo(dev);
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return false;
 
 	return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@
 	struct adapter *adap = pi->adapter;
 	int i;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 0;
 
 	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@
  */
 static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
 {
-	return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+	/* Convert app_idtype to firmware format before querying */
+	return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+			      app_idtype : 3, app_id, 0);
 }
 
 /* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@
 	int i, err;
 
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return -EINVAL;
 
 	/* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@
 	struct port_info *pi = netdev2pinfo(dev);
 	struct port_dcb_info *dcb = &pi->dcb;
 
-	if (dcb_subtype && !(dcb->msgs & dcb_subtype))
-		return 0;
+	if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+		if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+			return 0;
 
-	return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+	return (cxgb4_dcb_state_synced(dcb->state) &&
 		(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
 
@@ -1057,7 +1069,7 @@
 
 	/* Can't enable DCB if we haven't successfully negotiated it.
 	 */
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 1;
 
 	/* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@
 	struct adapter *adap = pi->adapter;
 	int i, err = 0;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 1;
 
 	info->willing = 0;
@@ -1114,7 +1126,7 @@
 	struct adapter *adap = pi->adapter;
 	int i, err = 0;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 1;
 
 	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@
 		if (!pcmd.u.dcb.app_priority.protocolid)
 			break;
 
-		table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+		table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
 		table[i].protocol =
 			be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
 		table[i].priority =
@@ -1181,6 +1193,8 @@
 	for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
 		pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
 
+	pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
 	return 0;
 }
 
@@ -1198,6 +1212,8 @@
 	 */
 	pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
 
+	pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c3c7db4..0a87a32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -151,6 +151,45 @@
 	return 0;
 }
 
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Inst    Data      PC     LS0Stat  "
+			 "LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+			   (p[9] >> 16) & 0xff,       /* Status */
+			   p[9] & 0xffff, p[8] >> 16, /* Inst */
+			   p[8] & 0xffff, p[7] >> 16, /* Data */
+			   p[7] & 0xffff, p[6] >> 16, /* PC */
+			   p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
+			   p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
+	}
+	return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Inst    Data      PC\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %08x %08x %08x\n",
+			   p[3] & 0xff, p[2], p[1], p[0]);
+		seq_printf(seq, "  %02x   %02x%06x %02x%06x %02x%06x\n",
+			   (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+			   p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x\n",
+			   (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+			   p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+			   p[6] >> 16);
+	}
+	return 0;
+}
+
 static int cim_la_open(struct inode *inode, struct file *file)
 {
 	int ret;
@@ -162,9 +201,18 @@
 	if (ret)
 		return ret;
 
-	p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
-			 cfg & UPDBGLACAPTPCONLY_F ?
-			 cim_la_show_3in1 : cim_la_show);
+	if (is_t6(adap->params.chip)) {
+		/* +1 to account for integer division of CIMLA_SIZE/10 */
+		p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+				 10 * sizeof(u32), 1,
+				 cfg & UPDBGLACAPTPCONLY_F ?
+					cim_la_show_pc_t6 : cim_la_show_t6);
+	} else {
+		p = seq_open_tab(file, adap->params.cim_la_size / 8,
+				 8 * sizeof(u32), 1,
+				 cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+							     cim_la_show);
+	}
 	if (!p)
 		return -ENOMEM;
 
@@ -298,11 +346,11 @@
 		if (is_t4(adap->params.chip)) {
 			i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
 					ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
-				wr = obq_wr_t4;
+			wr = obq_wr_t4;
 		} else {
 			i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
 					ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
-				wr = obq_wr_t5;
+			wr = obq_wr_t5;
 		}
 	}
 	if (i)
@@ -1153,6 +1201,299 @@
 	.write   = mbox_write
 };
 
+static int mps_trc_show(struct seq_file *seq, void *v)
+{
+	int enabled, i;
+	struct trace_params tp;
+	unsigned int trcidx = (uintptr_t)seq->private & 3;
+	struct adapter *adap = seq->private - trcidx;
+
+	t4_get_trace_filter(adap, &tp, trcidx, &enabled);
+	if (!enabled) {
+		seq_puts(seq, "tracer is disabled\n");
+		return 0;
+	}
+
+	if (tp.skip_ofst * 8 >= TRACE_LEN) {
+		dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
+		return -EINVAL;
+	}
+	if (tp.port < 8) {
+		i = adap->chan_map[tp.port & 3];
+		if (i >= MAX_NPORTS) {
+			dev_err(adap->pdev_dev, "tracer %u is assigned "
+				"to non-existing port\n", trcidx);
+			return -EINVAL;
+		}
+		seq_printf(seq, "tracer is capturing %s %s, ",
+			   adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
+	} else
+		seq_printf(seq, "tracer is capturing loopback %d, ",
+			   tp.port - 8);
+	seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
+		   tp.min_len);
+	seq_printf(seq, "packets captured %smatch filter\n",
+		   tp.invert ? "do not " : "");
+
+	if (tp.skip_ofst) {
+		seq_puts(seq, "filter pattern: ");
+		for (i = 0; i < tp.skip_ofst * 2; i += 2)
+			seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+		seq_putc(seq, '/');
+		for (i = 0; i < tp.skip_ofst * 2; i += 2)
+			seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+		seq_puts(seq, "@0\n");
+	}
+
+	seq_puts(seq, "filter pattern: ");
+	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+		seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+	seq_putc(seq, '/');
+	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+		seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+	seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
+	return 0;
+}
+
+static int mps_trc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mps_trc_show, inode->i_private);
+}
+
+static unsigned int xdigit2int(unsigned char c)
+{
+	return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+#define TRC_PORT_NONE 0xff
+#define TRC_RSS_ENABLE 0x33
+#define TRC_RSS_DISABLE 0x13
+
+/* Set an MPS trace filter.  Syntax is:
+ *
+ * disable
+ *
+ * to disable tracing, or
+ *
+ * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
+ *
+ * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
+ * of the NIC's response qid obtained from sge_qinfo and pattern has the form
+ *
+ * <pattern data>[/<pattern mask>][@<anchor>]
+ *
+ * Up to 2 filter patterns can be specified.  If 2 are supplied the first one
+ * must be anchored at 0.  An omited mask is taken as a mask of 1s, an omitted
+ * anchor is taken as 0.
+ */
+static ssize_t mps_trc_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	int i, enable, ret;
+	u32 *data, *mask;
+	struct trace_params tp;
+	const struct inode *ino;
+	unsigned int trcidx;
+	char *s, *p, *word, *end;
+	struct adapter *adap;
+	u32 j;
+
+	ino = file_inode(file);
+	trcidx = (uintptr_t)ino->i_private & 3;
+	adap = ino->i_private - trcidx;
+
+	/* Don't accept input more than 1K, can't be anything valid except lots
+	 * of whitespace.  Well, use less.
+	 */
+	if (count > 1024)
+		return -EFBIG;
+	p = s = kzalloc(count + 1, GFP_USER);
+	if (!s)
+		return -ENOMEM;
+	if (copy_from_user(s, buf, count)) {
+		count = -EFAULT;
+		goto out;
+	}
+
+	if (s[count - 1] == '\n')
+		s[count - 1] = '\0';
+
+	enable = strcmp("disable", s) != 0;
+	if (!enable)
+		goto apply;
+
+	/* enable or disable trace multi rss filter */
+	if (adap->trace_rss)
+		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
+	else
+		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
+
+	memset(&tp, 0, sizeof(tp));
+	tp.port = TRC_PORT_NONE;
+	i = 0;	/* counts pattern nibbles */
+
+	while (p) {
+		while (isspace(*p))
+			p++;
+		word = strsep(&p, " ");
+		if (!*word)
+			break;
+
+		if (!strncmp(word, "qid=", 4)) {
+			end = (char *)word + 4;
+			ret = kstrtouint(end, 10, &j);
+			if (ret)
+				goto out;
+			if (!adap->trace_rss) {
+				t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
+				continue;
+			}
+
+			switch (trcidx) {
+			case 0:
+				t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
+				break;
+			case 1:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER1_RSS_CONTROL_A, j);
+				break;
+			case 2:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER2_RSS_CONTROL_A, j);
+				break;
+			case 3:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER3_RSS_CONTROL_A, j);
+				break;
+			}
+			continue;
+		}
+		if (!strncmp(word, "snaplen=", 8)) {
+			end = (char *)word + 8;
+			ret = kstrtouint(end, 10, &j);
+			if (ret || j > 9600) {
+inval:				count = -EINVAL;
+				goto out;
+			}
+			tp.snap_len = j;
+			continue;
+		}
+		if (!strncmp(word, "minlen=", 7)) {
+			end = (char *)word + 7;
+			ret = kstrtouint(end, 10, &j);
+			if (ret || j > TFMINPKTSIZE_M)
+				goto inval;
+			tp.min_len = j;
+			continue;
+		}
+		if (!strcmp(word, "not")) {
+			tp.invert = !tp.invert;
+			continue;
+		}
+		if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
+			if (word[8] < '0' || word[8] > '3' || word[9])
+				goto inval;
+			tp.port = word[8] - '0' + 8;
+			continue;
+		}
+		if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
+			if (word[2] < '0' || word[2] > '3' || word[3])
+				goto inval;
+			tp.port = word[2] - '0' + 4;
+			if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
+				goto inval;
+			continue;
+		}
+		if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
+			if (word[2] < '0' || word[2] > '3' || word[3])
+				goto inval;
+			tp.port = word[2] - '0';
+			if (adap->chan_map[tp.port] >= MAX_NPORTS)
+				goto inval;
+			continue;
+		}
+		if (!isxdigit(*word))
+			goto inval;
+
+		/* we have found a trace pattern */
+		if (i) {                            /* split pattern */
+			if (tp.skip_len)            /* too many splits */
+				goto inval;
+			tp.skip_ofst = i / 16;
+		}
+
+		data = &tp.data[i / 8];
+		mask = &tp.mask[i / 8];
+		j = i;
+
+		while (isxdigit(*word)) {
+			if (i >= TRACE_LEN * 2) {
+				count = -EFBIG;
+				goto out;
+			}
+			*data = (*data << 4) + xdigit2int(*word++);
+			if (++i % 8 == 0)
+				data++;
+		}
+		if (*word == '/') {
+			word++;
+			while (isxdigit(*word)) {
+				if (j >= i)         /* mask longer than data */
+					goto inval;
+				*mask = (*mask << 4) + xdigit2int(*word++);
+				if (++j % 8 == 0)
+					mask++;
+			}
+			if (i != j)                 /* mask shorter than data */
+				goto inval;
+		} else {                            /* no mask, use all 1s */
+			for ( ; i - j >= 8; j += 8)
+				*mask++ = 0xffffffff;
+			if (i % 8)
+				*mask = (1 << (i % 8) * 4) - 1;
+		}
+		if (*word == '@') {
+			end = (char *)word + 1;
+			ret = kstrtouint(end, 10, &j);
+			if (*end && *end != '\n')
+				goto inval;
+			if (j & 7)          /* doesn't start at multiple of 8 */
+				goto inval;
+			j /= 8;
+			if (j < tp.skip_ofst)     /* overlaps earlier pattern */
+				goto inval;
+			if (j - tp.skip_ofst > 31)            /* skip too big */
+				goto inval;
+			tp.skip_len = j - tp.skip_ofst;
+		}
+		if (i % 8) {
+			*data <<= (8 - i % 8) * 4;
+			*mask <<= (8 - i % 8) * 4;
+			i = (i + 15) & ~15;         /* 8-byte align */
+		}
+	}
+
+	if (tp.port == TRC_PORT_NONE)
+		goto inval;
+
+apply:
+	i = t4_set_trace_filter(adap, &tp, trcidx, enable);
+	if (i)
+		count = i;
+out:
+	kfree(s);
+	return count;
+}
+
+static const struct file_operations mps_trc_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mps_trc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.write   = mps_trc_write
+};
+
 static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
 			  loff_t *ppos)
 {
@@ -1895,13 +2236,13 @@
 {
 	struct adapter *adap = seq->private;
 	int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
-	int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+	int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
 	int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
 	int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
 	int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
 	int i, r = (uintptr_t)v - 1;
-	int toe_idx = r - eth_entries;
-	int rdma_idx = toe_idx - toe_entries;
+	int iscsi_idx = r - eth_entries;
+	int rdma_idx = iscsi_idx - iscsi_entries;
 	int ciq_idx = rdma_idx - rdma_entries;
 	int ctrl_idx =  ciq_idx - ciq_entries;
 	int fq_idx =  ctrl_idx - ctrl_entries;
@@ -1917,8 +2258,12 @@
 		seq_putc(seq, '\n'); \
 } while (0)
 #define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
 #define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
 #define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
 
 	if (r < eth_entries) {
 		int base_qset = r * 4;
@@ -1957,12 +2302,30 @@
 		R("FL avail:", fl.avail);
 		R("FL PIDX:", fl.pidx);
 		R("FL CIDX:", fl.cidx);
-	} else if (toe_idx < toe_entries) {
-		const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
-		const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
-		int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxCSO:", stats.rx_cso);
+		RL("VLANxtract:", stats.vlan_ex);
+		RL("LROmerged:", stats.lro_merged);
+		RL("LROpackets:", stats.lro_pkts);
+		RL("RxDrops:", stats.rx_drops);
+		TL("TSO:", tso);
+		TL("TxCSO:", tx_cso);
+		TL("VLANins:", vlan_ins);
+		TL("TxQFull:", q.stops);
+		TL("TxQRestarts:", q.restarts);
+		TL("TxMapErr:", mapping_err);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
 
-		S("QType:", "TOE");
+	} else if (iscsi_idx < iscsi_entries) {
+		const struct sge_ofld_rxq *rx =
+			&adap->sge.ofldrxq[iscsi_idx * 4];
+		const struct sge_ofld_txq *tx =
+			&adap->sge.ofldtxq[iscsi_idx * 4];
+		int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
+
+		S("QType:", "iSCSI");
 		T("TxQ ID:", q.cntxt_id);
 		T("TxQ size:", q.size);
 		T("TxQ inuse:", q.in_use);
@@ -1982,6 +2345,13 @@
 		R("FL avail:", fl.avail);
 		R("FL PIDX:", fl.pidx);
 		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxImmPkts:", stats.imm);
+		RL("RxNoMem:", stats.nomem);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
 	} else if (rdma_idx < rdma_entries) {
 		const struct sge_ofld_rxq *rx =
 				&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2004,6 +2374,13 @@
 		R("FL avail:", fl.avail);
 		R("FL PIDX:", fl.pidx);
 		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxImmPkts:", stats.imm);
+		RL("RxNoMem:", stats.nomem);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
 	} else if (ciq_idx < ciq_entries) {
 		const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
 		int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
@@ -2019,6 +2396,9 @@
 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
 		S3("u", "Intr pktcnt:",
 		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		RL("RxAN:", stats.an);
+		RL("RxNoMem:", stats.nomem);
+
 	} else if (ctrl_idx < ctrl_entries) {
 		const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
 		int n = min(4, adap->params.nports - 4 * ctrl_idx);
@@ -2029,6 +2409,8 @@
 		T("TxQ inuse:", q.in_use);
 		T("TxQ CIDX:", q.cidx);
 		T("TxQ PIDX:", q.pidx);
+		TL("TxQFull:", q.stops);
+		TL("TxQRestarts:", q.restarts);
 	} else if (fq_idx == 0) {
 		const struct sge_rspq *evtq = &adap->sge.fw_evtq;
 
@@ -2044,10 +2426,14 @@
 			   adap->sge.counter_val[evtq->pktcnt_idx]);
 	}
 #undef R
+#undef RL
 #undef T
+#undef TL
 #undef S
+#undef R3
+#undef T3
 #undef S3
-return 0;
+	return 0;
 }
 
 static int sge_queue_entries(const struct adapter *adap)
@@ -2164,6 +2550,73 @@
 	.llseek  = default_llseek,
 };
 
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+	const struct tid_info *t = &adap->tids;
+	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+		unsigned int sb;
+
+		if (chip <= CHELSIO_T5)
+			sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+		else
+			sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+		if (sb) {
+			seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u/%u\n",
+				   atomic_read(&t->tids_in_use),
+				   atomic_read(&t->hash_tids_in_use));
+		} else if (adap->flags & FW_OFLD_CONN) {
+			seq_printf(seq, "TID range: %u..%u/%u..%u",
+				   t->aftid_base,
+				   t->aftid_end,
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u/%u\n",
+				   atomic_read(&t->tids_in_use),
+				   atomic_read(&t->hash_tids_in_use));
+		} else {
+			seq_printf(seq, "TID range: %u..%u",
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u\n",
+				   atomic_read(&t->hash_tids_in_use));
+		}
+	} else if (t->ntids) {
+		seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+		seq_printf(seq, ", in use: %u\n",
+			   atomic_read(&t->tids_in_use));
+	}
+
+	if (t->nstids)
+		seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+			   (!t->stid_base &&
+			   (chip <= CHELSIO_T5)) ?
+			   t->stid_base + 1 : t->stid_base,
+			   t->stid_base + t->nstids - 1, t->stids_in_use);
+	if (t->natids)
+		seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+			   t->natids - 1, t->atids_in_use);
+	seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+		   t->ftid_base + t->nftids - 1);
+	if (t->nsftids)
+		seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+			   t->sftid_base, t->sftid_base + t->nsftids - 2,
+			   t->sftids_in_use);
+	if (t->ntids)
+		seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
 static void add_debugfs_mem(struct adapter *adap, const char *name,
 			    unsigned int idx, unsigned int size_mb)
 {
@@ -2227,6 +2680,290 @@
 	.llseek  = generic_file_llseek,
 };
 
+struct mem_desc {
+	unsigned int base;
+	unsigned int limit;
+	unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+	return ((const struct mem_desc *)a)->base -
+	       ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+			    unsigned int from, unsigned int to)
+{
+	char buf[40];
+
+	string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+			sizeof(buf));
+	seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+	static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+					"MC0:", "MC1:"};
+	static const char * const region[] = {
+		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+		"RQUDP region:", "PBL region:", "TXPBL region:",
+		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+		"On-chip queues:"
+	};
+
+	int i, n;
+	u32 lo, hi, used, alloc;
+	struct mem_desc avail[4];
+	struct mem_desc mem[ARRAY_SIZE(region) + 3];      /* up to 3 holes */
+	struct mem_desc *md = mem;
+	struct adapter *adap = seq->private;
+
+	for (i = 0; i < ARRAY_SIZE(mem); i++) {
+		mem[i].limit = 0;
+		mem[i].idx = i;
+	}
+
+	/* Find and sort the populated memory ranges */
+	i = 0;
+	lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+	if (lo & EDRAM0_ENABLE_F) {
+		hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+		avail[i].base = EDRAM0_BASE_G(hi) << 20;
+		avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+		avail[i].idx = 0;
+		i++;
+	}
+	if (lo & EDRAM1_ENABLE_F) {
+		hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+		avail[i].base = EDRAM1_BASE_G(hi) << 20;
+		avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+		avail[i].idx = 1;
+		i++;
+	}
+
+	if (is_t5(adap->params.chip)) {
+		if (lo & EXT_MEM0_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+			avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+			avail[i].idx = 3;
+			i++;
+		}
+		if (lo & EXT_MEM1_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+			avail[i].idx = 4;
+			i++;
+		}
+	} else {
+		if (lo & EXT_MEM_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+			avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+			avail[i].idx = 2;
+			i++;
+		}
+	}
+	if (!i)                                    /* no memory available */
+		return 0;
+	sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+	(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+	/* the next few have explicit upper bounds */
+	md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+		    PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+	md++;
+
+	md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+		    PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+	md++;
+
+	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+			hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+			md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+		 } else {
+			hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+			md->base = t4_read_reg(adap,
+					       LE_DB_HASH_TBL_BASE_ADDR_A);
+		}
+		md->limit = 0;
+	} else {
+		md->base = 0;
+		md->idx = ARRAY_SIZE(region);  /* hide it */
+	}
+	md++;
+
+#define ulp_region(reg) do { \
+	md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+	(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+	ulp_region(RX_ISCSI);
+	ulp_region(RX_TDDP);
+	ulp_region(TX_TPT);
+	ulp_region(RX_STAG);
+	ulp_region(RX_RQ);
+	ulp_region(RX_RQUDP);
+	ulp_region(RX_PBL);
+	ulp_region(TX_PBL);
+#undef ulp_region
+	md->base = 0;
+	md->idx = ARRAY_SIZE(region);
+	if (!is_t4(adap->params.chip)) {
+		u32 size = 0;
+		u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+		u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+		if (is_t5(adap->params.chip)) {
+			if (sge_ctrl & VFIFO_ENABLE_F)
+				size = DBVFIFO_SIZE_G(fifo_size);
+		} else {
+			size = T6_DBVFIFO_SIZE_G(fifo_size);
+		}
+
+		if (size) {
+			md->base = BASEADDR_G(t4_read_reg(adap,
+					SGE_DBVFIFO_BADDR_A));
+			md->limit = md->base + (size << 2) - 1;
+		}
+	}
+
+	md++;
+
+	md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+	md->limit = 0;
+	md++;
+	md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+	md->limit = 0;
+	md++;
+
+	md->base = adap->vres.ocq.start;
+	if (adap->vres.ocq.size)
+		md->limit = md->base + adap->vres.ocq.size - 1;
+	else
+		md->idx = ARRAY_SIZE(region);  /* hide it */
+	md++;
+
+	/* add any address-space holes, there can be up to 3 */
+	for (n = 0; n < i - 1; n++)
+		if (avail[n].limit < avail[n + 1].base)
+			(md++)->base = avail[n].limit;
+	if (avail[n].limit)
+		(md++)->base = avail[n].limit;
+
+	n = md - mem;
+	sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+	for (lo = 0; lo < i; lo++)
+		mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+				avail[lo].limit - 1);
+
+	seq_putc(seq, '\n');
+	for (i = 0; i < n; i++) {
+		if (mem[i].idx >= ARRAY_SIZE(region))
+			continue;                        /* skip holes */
+		if (!mem[i].limit)
+			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+		mem_region_show(seq, region[mem[i].idx], mem[i].base,
+				mem[i].limit);
+	}
+
+	seq_putc(seq, '\n');
+	lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+	hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+	mem_region_show(seq, "uP RAM:", lo, hi);
+
+	lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+	hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+	mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+	lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+	seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+		   PMRXMAXPAGE_G(lo),
+		   t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+		   (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+	lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+	hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+	seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+		   PMTXMAXPAGE_G(lo),
+		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+		   hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+	seq_printf(seq, "%u p-structs\n\n",
+		   t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+	for (i = 0; i < 4; i++) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+		if (is_t5(adap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		/* For T6 these are MAC buffer groups */
+		seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+			   i, used, alloc);
+	}
+	for (i = 0; i < adap->params.arch.nchan; i++) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(adap,
+					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+		if (is_t5(adap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		/* For T6 these are MAC buffer groups */
+		seq_printf(seq,
+			   "Loopback %d using %u pages out of %u allocated\n",
+			   i, used, alloc);
+	}
+	return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+	.owner   = THIS_MODULE,
+	.open    = meminfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -2264,6 +3001,10 @@
 		{ "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
 		{ "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
 		{ "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
+		{ "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+		{ "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+		{ "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
 		{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
 		{ "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
 		{ "rss", &rss_debugfs_fops, S_IRUSR, 0 },
@@ -2293,7 +3034,9 @@
 #if IS_ENABLED(CONFIG_IPV6)
 		{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
 #endif
+		{ "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
 		{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "meminfo", &meminfo_fops, S_IRUSR, 0 },
 	};
 
 	/* Debug FS nodes common to all T5 and later adapters.
@@ -2341,6 +3084,10 @@
 
 	de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
 				      &flash_debugfs_fops, adap->params.sf_size);
+	debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+			    adap->debugfs_root, &adap->use_bd);
+	debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+			    adap->debugfs_root, &adap->trace_rss);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 687acf7..5eedb98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -925,6 +925,20 @@
 	const struct firmware *fw;
 	struct adapter *adap = netdev2adap(netdev);
 	unsigned int mbox = PCIE_FW_MASTER_M + 1;
+	u32 pcie_fw;
+	unsigned int master;
+	u8 master_vld = 0;
+
+	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+	master = PCIE_FW_MASTER_G(pcie_fw);
+	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+		master_vld = 1;
+	/* if csiostor is the master return */
+	if (master_vld && (master != adap->pf)) {
+		dev_warn(adap->pdev_dev,
+			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+		return -EOPNOTSUPP;
+	}
 
 	ef->data[sizeof(ef->data) - 1] = '\0';
 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 351f3b1..eb22d58 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1548,7 +1548,7 @@
 		t->stid_tab[stid].data = data;
 		stid -= t->nstids;
 		stid += t->sftid_base;
-		t->stids_in_use++;
+		t->sftids_in_use++;
 	}
 	spin_unlock_bh(&t->stid_lock);
 	return stid;
@@ -1573,10 +1573,14 @@
 	else
 		bitmap_release_region(t->stid_bmap, stid, 2);
 	t->stid_tab[stid].data = NULL;
-	if (family == PF_INET)
-		t->stids_in_use--;
-	else
-		t->stids_in_use -= 4;
+	if (stid < t->nstids) {
+		if (family == PF_INET)
+			t->stids_in_use--;
+		else
+			t->stids_in_use -= 4;
+	} else {
+		t->sftids_in_use--;
+	}
 	spin_unlock_bh(&t->stid_lock);
 }
 EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1654,20 +1658,25 @@
  */
 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
 {
-	void *old;
 	struct sk_buff *skb;
 	struct adapter *adap = container_of(t, struct adapter, tids);
 
-	old = t->tid_tab[tid];
+	WARN_ON(tid >= t->ntids);
+
+	if (t->tid_tab[tid]) {
+		t->tid_tab[tid] = NULL;
+		if (t->hash_base && (tid >= t->hash_base))
+			atomic_dec(&t->hash_tids_in_use);
+		else
+			atomic_dec(&t->tids_in_use);
+	}
+
 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
 	if (likely(skb)) {
-		t->tid_tab[tid] = NULL;
 		mk_tid_release(skb, chan, tid);
 		t4_ofld_send(adap, skb);
 	} else
 		cxgb4_queue_tid_release(t, chan, tid);
-	if (old)
-		atomic_dec(&t->tids_in_use);
 }
 EXPORT_SYMBOL(cxgb4_remove_tid);
 
@@ -1702,9 +1711,11 @@
 	spin_lock_init(&t->atid_lock);
 
 	t->stids_in_use = 0;
+	t->sftids_in_use = 0;
 	t->afree = NULL;
 	t->atids_in_use = 0;
 	atomic_set(&t->tids_in_use, 0);
+	atomic_set(&t->hash_tids_in_use, 0);
 
 	/* Setup the free list for atid_tab and clear the stid bitmap. */
 	if (natids) {
@@ -3657,6 +3668,10 @@
 	 */
 	t4_get_fw_version(adap, &adap->params.fw_vers);
 	t4_get_tp_version(adap, &adap->params.tp_vers);
+	ret = t4_check_fw_version(adap);
+	/* If firmware is too old (not supported by driver) force an update. */
+	if (ret == -EFAULT)
+		state = DEV_STATE_UNINIT;
 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
 		struct fw_info *fw_info;
 		struct fw_hdr *card_fw;
@@ -4551,6 +4566,32 @@
 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
 #define SEGMENT_SIZE 128
 
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+	int ver, chip;
+	u16 device_id;
+
+	/* Retrieve adapter's device ID */
+	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+	ver = device_id >> 12;
+	switch (ver) {
+	case CHELSIO_T4:
+		chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+		break;
+	case CHELSIO_T5:
+		chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+		break;
+	case CHELSIO_T6:
+		chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+		break;
+	default:
+		dev_err(&pdev->dev, "Device %d is not supported\n",
+			device_id);
+		return -EINVAL;
+	}
+	return chip;
+}
+
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int func, i, err, s_qpp, qpp, num_seg;
@@ -4558,6 +4599,8 @@
 	bool highdma = false;
 	struct adapter *adapter = NULL;
 	void __iomem *regs;
+	u32 whoami, pl_rev;
+	enum chip_type chip;
 
 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -4586,7 +4629,11 @@
 		goto out_unmap_bar0;
 
 	/* We control everything through one PF */
-	func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
+	whoami = readl(regs + PL_WHOAMI_A);
+	pl_rev = REV_G(readl(regs + PL_REV_A));
+	chip = get_chip_type(pdev, pl_rev);
+	func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+		SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 	if (func != ent->driver_data) {
 		iounmap(regs);
 		pci_disable_device(pdev);
@@ -4757,7 +4804,7 @@
 	 */
 	cfg_queues(adapter);
 
-	adapter->l2t = t4_init_l2t();
+	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
 	if (!adapter->l2t) {
 		/* We tolerate a lack of L2T, giving up some functionality */
 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
@@ -4782,6 +4829,22 @@
 		adapter->params.offload = 0;
 	}
 
+	if (is_offload(adapter)) {
+		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+			u32 hash_base, hash_reg;
+
+			if (chip <= CHELSIO_T5) {
+				hash_reg = LE_DB_TID_HASHBASE_A;
+				hash_base = t4_read_reg(adapter, hash_reg);
+				adapter->tids.hash_base = hash_base / 4;
+			} else {
+				hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+				hash_base = t4_read_reg(adapter, hash_reg);
+				adapter->tids.hash_base = hash_base;
+			}
+		}
+	}
+
 	/* See what interrupts we'll be using */
 	if (msi > 1 && enable_msix(adapter) == 0)
 		adapter->flags |= USING_MSIX;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b27897d..c3a8be5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -96,6 +96,7 @@
 	unsigned long *stid_bmap;
 	unsigned int nstids;
 	unsigned int stid_base;
+	unsigned int hash_base;
 
 	union aopen_entry *atid_tab;
 	unsigned int natids;
@@ -116,8 +117,12 @@
 
 	spinlock_t stid_lock;
 	unsigned int stids_in_use;
+	unsigned int sftids_in_use;
 
+	/* TIDs in the TCAM */
 	atomic_t tids_in_use;
+	/* TIDs in the HASH */
+	atomic_t hash_tids_in_use;
 };
 
 static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -147,7 +152,10 @@
 				    unsigned int tid)
 {
 	t->tid_tab[tid] = data;
-	atomic_inc(&t->tids_in_use);
+	if (t->hash_base && (tid >= t->hash_base))
+		atomic_inc(&t->hash_tids_in_use);
+	else
+		atomic_inc(&t->tids_in_use);
 }
 
 int cxgb4_alloc_atid(struct tid_info *t, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 252efc2..ac27898 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -51,24 +51,17 @@
 #define VLAN_NONE 0xfff
 
 /* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR    (1 << 12)
-
-enum {
-	L2T_STATE_VALID,      /* entry is up to date */
-	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
-	L2T_STATE_RESOLVING,  /* entry needs address resolution */
-	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
-	/* when state is one of the below the entry is not hashed */
-	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
-	L2T_STATE_UNUSED      /* entry not in use */
-};
+#define SYNC_WR_S    12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F    SYNC_WR_V(1)
 
 struct l2t_data {
+	unsigned int l2t_start;     /* start index of our piece of the L2T */
+	unsigned int l2t_size;      /* number of entries in l2tab */
 	rwlock_t lock;
 	atomic_t nfree;             /* number of free entries */
 	struct l2t_entry *rover;    /* starting point for next allocation */
-	struct l2t_entry l2tab[L2T_SIZE];
+	struct l2t_entry l2tab[0];  /* MUST BE LAST */
 };
 
 static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@
 /*
  * To avoid having to check address families we do not allow v4 and v6
  * neighbors to be on the same hash chain.  We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second.  We need at least two
+ * entries in our L2T for this scheme to work.
  */
 enum {
-	L2T_SZ_HALF = L2T_SIZE / 2,
-	L2T_HASH_MASK = L2T_SZ_HALF - 1
+	L2T_MIN_HASH_BUCKETS = 2,
 };
 
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+				    int ifindex)
 {
-	return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+	unsigned int l2t_size_half = d->l2t_size / 2;
+
+	return jhash_2words(*key, ifindex, 0) % l2t_size_half;
 }
 
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+				     int ifindex)
 {
+	unsigned int l2t_size_half = d->l2t_size / 2;
 	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
 
-	return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+	return (l2t_size_half +
+		(jhash_2words(xor, ifindex, 0) % l2t_size_half));
 }
 
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+			      int addr_len, int ifindex)
 {
-	return addr_len == 4 ? arp_hash(addr, ifindex) :
-			       ipv6_hash(addr, ifindex);
+	return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+			       ipv6_hash(d, addr, ifindex);
 }
 
 /*
@@ -139,6 +139,8 @@
  */
 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 {
+	struct l2t_data *d = adap->l2t;
+	unsigned int l2t_idx = e->idx + d->l2t_start;
 	struct sk_buff *skb;
 	struct cpl_l2t_write_req *req;
 
@@ -150,10 +152,10 @@
 	INIT_TP_WR(req, 0);
 
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
-					e->idx | (sync ? F_SYNC_WR : 0) |
+					l2t_idx | (sync ? SYNC_WR_F : 0) |
 					TID_QID_V(adap->sge.fw_evtq.abs_id)));
 	req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
-	req->l2t_idx = htons(e->idx);
+	req->l2t_idx = htons(l2t_idx);
 	req->vlan = htons(e->vlan);
 	if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
 		memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@
  */
 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
 {
+	struct l2t_data *d = adap->l2t;
 	unsigned int tid = GET_TID(rpl);
-	unsigned int idx = tid & (L2T_SIZE - 1);
+	unsigned int l2t_idx = tid % L2T_SIZE;
 
 	if (unlikely(rpl->status != CPL_ERR_NONE)) {
 		dev_err(adap->pdev_dev,
 			"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
-			rpl->status, idx);
+			rpl->status, l2t_idx);
 		return;
 	}
 
-	if (tid & F_SYNC_WR) {
-		struct l2t_entry *e = &adap->l2t->l2tab[idx];
+	if (tid & SYNC_WR_F) {
+		struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
 
 		spin_lock(&e->lock);
 		if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@
 		return NULL;
 
 	/* there's definitely a free entry */
-	for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
 		if (atomic_read(&e->refcnt) == 0)
 			goto found;
 
@@ -368,7 +371,7 @@
 	int addr_len = neigh->tbl->key_len;
 	u32 *addr = (u32 *)neigh->primary_key;
 	int ifidx = neigh->dev->ifindex;
-	int hash = addr_hash(addr, addr_len, ifidx);
+	int hash = addr_hash(d, addr, addr_len, ifidx);
 
 	if (neigh->dev->flags & IFF_LOOPBACK)
 		lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@
 	int addr_len = neigh->tbl->key_len;
 	u32 *addr = (u32 *) neigh->primary_key;
 	int ifidx = neigh->dev->ifindex;
-	int hash = addr_hash(addr, addr_len, ifidx);
+	int hash = addr_hash(d, addr, addr_len, ifidx);
 
 	read_lock_bh(&d->lock);
 	for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@
 	return write_l2e(adap, e, 0);
 }
 
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
 {
+	unsigned int l2t_size;
 	int i;
 	struct l2t_data *d;
 
-	d = t4_alloc_mem(sizeof(*d));
+	if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+		return NULL;
+	l2t_size = l2t_end - l2t_start + 1;
+	if (l2t_size < L2T_MIN_HASH_BUCKETS)
+		return NULL;
+
+	d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
 	if (!d)
 		return NULL;
 
+	d->l2t_start = l2t_start;
+	d->l2t_size = l2t_size;
+
 	d->rover = d->l2tab;
-	atomic_set(&d->nfree, L2T_SIZE);
+	atomic_set(&d->nfree, l2t_size);
 	rwlock_init(&d->lock);
 
-	for (i = 0; i < L2T_SIZE; ++i) {
+	for (i = 0; i < d->l2t_size; ++i) {
 		d->l2tab[i].idx = i;
 		d->l2tab[i].state = L2T_STATE_UNUSED;
 		spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@
 
 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
 {
-	struct l2t_entry *l2tab = seq->private;
+	struct l2t_data *d = seq->private;
 
-	return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+	return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
 }
 
 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@
 			 "Ethernet address  VLAN/P LP State Users Port\n");
 	else {
 		char ip[60];
+		struct l2t_data *d = seq->private;
 		struct l2t_entry *e = v;
 
 		spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@
 		else
 			sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
 		seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
-			   e->idx, ip, e->dmac,
+			   e->idx + d->l2t_start, ip, e->dmac,
 			   e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
 			   l2e_state(e), atomic_read(&e->refcnt),
 			   e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@
 		struct adapter *adap = inode->i_private;
 		struct seq_file *seq = file->private_data;
 
-		seq->private = adap->l2t->l2tab;
+		seq->private = adap->l2t;
 	}
 	return rc;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index a30126c..b38dc52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,20 @@
 #include <linux/if_ether.h>
 #include <linux/atomic.h>
 
+enum { L2T_SIZE = 4096 };     /* # of L2T entries */
+
+enum {
+	L2T_STATE_VALID,      /* entry is up to date */
+	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,  /* entry needs address resolution */
+	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+	L2T_STATE_NOARP,      /* Netdev down or removed*/
+
+	/* when state is one of the below the entry is not hashed */
+	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
+	L2T_STATE_UNUSED      /* entry not in use */
+};
+
 struct adapter;
 struct l2t_data;
 struct neighbour;
@@ -56,7 +70,7 @@
  */
 struct l2t_entry {
 	u16 state;                  /* entry state */
-	u16 idx;                    /* entry index */
+	u16 idx;                    /* entry index within in-memory table */
 	u32 addr[4];                /* next hop IP or IPv6 address */
 	int ifindex;                /* neighbor's net_device's ifindex */
 	struct neighbour *neigh;    /* associated neighbour */
@@ -104,7 +118,7 @@
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
 			 u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
 void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
 
 extern const struct file_operations t4_l2t_fops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 942db07..78f446c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1137,7 +1137,7 @@
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	u32 wr_mid;
+	u32 wr_mid, ctrl0;
 	u64 cntrl, *end;
 	int qidx, credits;
 	unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 	}
 
-	cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
-			   TXPKT_INTF_V(pi->tx_chan) |
-			   TXPKT_PF_V(adap->pf));
+	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+		TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+	if (is_t4(adap->params.chip))
+		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+	else
+		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+	cpl->ctrl0 = htonl(ctrl0);
 	cpl->pack = htons(0);
 	cpl->len = htons(skb->len);
 	cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1418,18 +1424,17 @@
 		struct fw_wr_hdr *wr;
 		unsigned int ndesc = skb->priority;     /* previously saved */
 
-		/*
-		 * Write descriptors and free skbs outside the lock to limit
+		written += ndesc;
+		/* Write descriptors and free skbs outside the lock to limit
 		 * wait times.  q->full is still set so new skbs will be queued.
 		 */
+		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+		txq_advance(&q->q, ndesc);
 		spin_unlock(&q->sendq.lock);
 
-		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
 		inline_tx_skb(skb, &q->q, wr);
 		kfree_skb(skb);
 
-		written += ndesc;
-		txq_advance(&q->q, ndesc);
 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
 			unsigned long old = q->q.stops;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b52aae..4480625 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -37,6 +37,7 @@
 #include "t4_regs.h"
 #include "t4_values.h"
 #include "t4fw_api.h"
+#include "t4fw_version.h"
 
 /**
  *	t4_wait_op_done_val - wait until an operation is completed
@@ -345,6 +346,43 @@
 				       FW_CMD_MAX_TIMEOUT);
 }
 
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+	u32 edc_ecc_err_addr_reg;
+	u32 rdata_reg;
+
+	if (is_t4(adap->params.chip)) {
+		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+		return 0;
+	}
+	if (idx != 0 && idx != 1) {
+		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+		return 0;
+	}
+
+	edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+	rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+	CH_WARN(adap,
+		"edc%d err addr 0x%x: 0x%x.\n",
+		idx, edc_ecc_err_addr_reg,
+		t4_read_reg(adap, edc_ecc_err_addr_reg));
+	CH_WARN(adap,
+		"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+		rdata_reg,
+		(unsigned long long)t4_read_reg64(adap, rdata_reg),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+	return 0;
+}
+
 /**
  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  *	@adap: the adapter
@@ -1322,9 +1360,10 @@
 	};
 
 	static const unsigned int t6_reg_ranges[] = {
-		0x1008, 0x114c,
+		0x1008, 0x1124,
+		0x1138, 0x114c,
 		0x1180, 0x11b4,
-		0x11fc, 0x1250,
+		0x11fc, 0x1254,
 		0x1280, 0x133c,
 		0x1800, 0x18fc,
 		0x3000, 0x302c,
@@ -1345,18 +1384,18 @@
 		0x5a80, 0x5a9c,
 		0x5b94, 0x5bfc,
 		0x5c10, 0x5ec0,
-		0x5ec8, 0x5ec8,
+		0x5ec8, 0x5ecc,
 		0x6000, 0x6040,
-		0x6058, 0x6154,
+		0x6058, 0x619c,
 		0x7700, 0x7798,
 		0x77c0, 0x7880,
 		0x78cc, 0x78fc,
 		0x7b00, 0x7c54,
 		0x7d00, 0x7efc,
-		0x8dc0, 0x8de0,
+		0x8dc0, 0x8de4,
 		0x8df8, 0x8e84,
 		0x8ea0, 0x8f88,
-		0x8fb8, 0x911c,
+		0x8fb8, 0x9124,
 		0x9400, 0x9470,
 		0x9600, 0x971c,
 		0x9800, 0x9808,
@@ -1371,20 +1410,21 @@
 		0x9f00, 0x9f6c,
 		0x9f80, 0xa020,
 		0xd004, 0xd03c,
+		0xd100, 0xd118,
+		0xd200, 0xd31c,
 		0xdfc0, 0xdfe0,
 		0xe000, 0xf008,
 		0x11000, 0x11014,
-		0x11048, 0x11110,
-		0x11118, 0x1117c,
-		0x11190, 0x11260,
+		0x11048, 0x1117c,
+		0x11190, 0x11270,
 		0x11300, 0x1130c,
-		0x12000, 0x1205c,
+		0x12000, 0x1206c,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
 		0x1908c, 0x19124,
 		0x19150, 0x191b0,
 		0x191d0, 0x191e8,
-		0x19238, 0x192b8,
+		0x19238, 0x192bc,
 		0x193f8, 0x19474,
 		0x19490, 0x194cc,
 		0x194f0, 0x194f8,
@@ -1461,12 +1501,11 @@
 		0x1ff00, 0x1ff84,
 		0x1ffc0, 0x1ffc8,
 		0x30000, 0x30070,
-		0x30100, 0x3015c,
-		0x30190, 0x301d0,
-		0x30200, 0x30318,
+		0x30100, 0x301d0,
+		0x30200, 0x30320,
 		0x30400, 0x3052c,
 		0x30540, 0x3061c,
-		0x30800, 0x3088c,
+		0x30800, 0x30890,
 		0x308c0, 0x30908,
 		0x30910, 0x309b8,
 		0x30a00, 0x30a04,
@@ -1539,12 +1578,11 @@
 		0x33c24, 0x33c50,
 		0x33cf0, 0x33cfc,
 		0x34000, 0x34070,
-		0x34100, 0x3415c,
-		0x34190, 0x341d0,
-		0x34200, 0x34318,
+		0x34100, 0x341d0,
+		0x34200, 0x34320,
 		0x34400, 0x3452c,
 		0x34540, 0x3461c,
-		0x34800, 0x3488c,
+		0x34800, 0x34890,
 		0x348c0, 0x34908,
 		0x34910, 0x349b8,
 		0x34a00, 0x34a04,
@@ -2129,6 +2167,61 @@
 	return 0;
 }
 
+/**
+ *	t4_check_fw_version - check if the FW is supported with this driver
+ *	@adap: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if there's exact match, a negative error if the version could not be
+ *	read or there's a major version mismatch
+ */
+int t4_check_fw_version(struct adapter *adap)
+{
+	int ret, major, minor, micro;
+	int exp_major, exp_minor, exp_micro;
+	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+	if (ret)
+		return ret;
+
+	major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
+	minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
+	micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
+
+	switch (chip_version) {
+	case CHELSIO_T4:
+		exp_major = T4FW_MIN_VERSION_MAJOR;
+		exp_minor = T4FW_MIN_VERSION_MINOR;
+		exp_micro = T4FW_MIN_VERSION_MICRO;
+		break;
+	case CHELSIO_T5:
+		exp_major = T5FW_MIN_VERSION_MAJOR;
+		exp_minor = T5FW_MIN_VERSION_MINOR;
+		exp_micro = T5FW_MIN_VERSION_MICRO;
+		break;
+	case CHELSIO_T6:
+		exp_major = T6FW_MIN_VERSION_MAJOR;
+		exp_minor = T6FW_MIN_VERSION_MINOR;
+		exp_micro = T6FW_MIN_VERSION_MICRO;
+		break;
+	default:
+		dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
+			adap->chip);
+		return -EINVAL;
+	}
+
+	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
+	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
+		dev_err(adap->pdev_dev,
+			"Card has firmware version %u.%u.%u, minimum "
+			"supported firmware is %u.%u.%u.\n", major, minor,
+			micro, exp_major, exp_minor, exp_micro);
+		return -EFAULT;
+	}
+	return 0;
+}
+
 /* Is the given firmware API compatible with the one the driver was compiled
  * with?
  */
@@ -3281,6 +3374,8 @@
 	if (v & ECC_CE_INT_CAUSE_F) {
 		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
 
+		t4_edc_err_read(adapter, idx);
+
 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
 		if (printk_ratelimit())
 			dev_warn(adapter->pdev_dev,
@@ -3488,7 +3583,9 @@
 void t4_intr_enable(struct adapter *adapter)
 {
 	u32 val = 0;
-	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
 		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
@@ -3513,7 +3610,9 @@
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
@@ -3687,6 +3786,11 @@
 	return 0;
 }
 
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+	return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
 /**
  *	t4_fw_tp_pio_rw - Access TP PIO through LDST
  *	@adap: the adapter
@@ -3730,7 +3834,7 @@
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-	if (adap->flags & FW_OK)
+	if (t4_use_ldst(adap))
 		t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
 	else
 		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +3864,7 @@
 	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
 		rss_key_addr_cnt = 32;
 
-	if (adap->flags & FW_OK)
+	if (t4_use_ldst(adap))
 		t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
 	else
 		t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +3893,7 @@
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
 			   u32 *valp)
 {
-	if (adapter->flags & FW_OK)
+	if (t4_use_ldst(adapter))
 		t4_fw_tp_pio_rw(adapter, valp, 1,
 				TP_RSS_PF0_CONFIG_A + index, 1);
 	else
@@ -3829,7 +3933,7 @@
 
 	/* Grab the VFL/VFH values ...
 	 */
-	if (adapter->flags & FW_OK) {
+	if (t4_use_ldst(adapter)) {
 		t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
 		t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
 	} else {
@@ -3850,7 +3954,7 @@
 {
 	u32 pfmap;
 
-	if (adapter->flags & FW_OK)
+	if (t4_use_ldst(adapter))
 		t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
 	else
 		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +3972,7 @@
 {
 	u32 pfmask;
 
-	if (adapter->flags & FW_OK)
+	if (t4_use_ldst(adapter))
 		t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
 	else
 		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +4028,25 @@
  */
 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
 {
-	/* T6 and later has 2 channels */
-	if (adap->params.arch.nchan == NCHAN) {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_cong_drops, 8,
-				 TP_MIB_TNL_CNG_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_tx_drops, 4,
-				 TP_MIB_TNL_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->ofld_vlan_drops, 4,
-				 TP_MIB_OFD_VLN_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tcp6_in_errs, 4,
-				 TP_MIB_TCP_V6IN_ERR_0_A);
-	} else {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_cong_drops, 2,
-				 TP_MIB_TNL_CNG_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->ofld_chan_drops, 2,
-				 TP_MIB_OFD_CHN_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->ofld_vlan_drops, 2,
-				 TP_MIB_OFD_VLN_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
-	}
+	int nchan = adap->params.arch.nchan;
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
 	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
 			 &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
 }
@@ -3974,16 +4060,13 @@
  */
 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
 {
-	/* T6 and later has 2 channels */
-	if (adap->params.arch.nchan == NCHAN) {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-				 8, TP_MIB_CPL_IN_REQ_0_A);
-	} else {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-				 2, TP_MIB_CPL_IN_REQ_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
-				 2, TP_MIB_CPL_OUT_RSP_0_A);
-	}
+	int nchan = adap->params.arch.nchan;
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+			 nchan, TP_MIB_CPL_IN_REQ_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+			 nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
 }
 
 /**
@@ -4238,6 +4321,119 @@
 }
 
 /**
+ *	t4_set_trace_filter - configure one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the desired trace filter parameters
+ *	@idx: which filter to configure
+ *	@enable: whether to enable or disable the filter
+ *
+ *	Configures one of the tracing filters available in HW.  If @enable is
+ *	%0 @tp is not examined and may be %NULL. The user is responsible to
+ *	set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+			int idx, int enable)
+{
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg, cfg;
+	u32 multitrc = TRCMULTIFILTER_F;
+
+	if (!enable) {
+		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+		return 0;
+	}
+
+	cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
+	if (cfg & TRCMULTIFILTER_F) {
+		/* If multiple tracers are enabled, then maximum
+		 * capture size is 2.5KB (FIFO size of a single channel)
+		 * minus 2 flits for CPL_TRACE_PKT header.
+		 */
+		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
+			return -EINVAL;
+	} else {
+		/* If multiple tracers are disabled, to avoid deadlocks
+		 * maximum packet capture size of 9600 bytes is recommended.
+		 * Also in this mode, only trace0 can be enabled and running.
+		 */
+		multitrc = 0;
+		if (tp->snap_len > 9600 || idx)
+			return -EINVAL;
+	}
+
+	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
+	    tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
+	    tp->min_len > TFMINPKTSIZE_M)
+		return -EINVAL;
+
+	/* stop the tracer we'll be changing */
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+
+	idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
+	data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		t4_write_reg(adap, data_reg, tp->data[i]);
+		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+	}
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
+		     TFCAPTUREMAX_V(tp->snap_len) |
+		     TFMINPKTSIZE_V(tp->min_len));
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
+		     TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
+		     (is_t4(adap->params.chip) ?
+		     TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
+		     T5_TFPORT_V(tp->port) | T5_TFEN_F |
+		     T5_TFINVERTMATCH_V(tp->invert)));
+
+	return 0;
+}
+
+/**
+ *	t4_get_trace_filter - query one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the current trace filter parameters
+ *	@idx: which trace filter to query
+ *	@enabled: non-zero if the filter is enabled
+ *
+ *	Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+			 int *enabled)
+{
+	u32 ctla, ctlb;
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg;
+
+	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
+	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
+
+	if (is_t4(adap->params.chip)) {
+		*enabled = !!(ctla & TFEN_F);
+		tp->port =  TFPORT_G(ctla);
+		tp->invert = !!(ctla & TFINVERTMATCH_F);
+	} else {
+		*enabled = !!(ctla & T5_TFEN_F);
+		tp->port = T5_TFPORT_G(ctla);
+		tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
+	}
+	tp->snap_len = TFCAPTUREMAX_G(ctlb);
+	tp->min_len = TFMINPKTSIZE_G(ctlb);
+	tp->skip_ofst = TFOFFSET_G(ctla);
+	tp->skip_len = TFLENGTH_G(ctla);
+
+	ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
+	data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+	}
+}
+
+/**
  *	t4_pmtx_get_stats - returns the HW stats from PMTX
  *	@adap: the adapter
  *	@cnt: where to store the count statistics
@@ -6294,7 +6490,7 @@
 	/* Cache the adapter's Compressed Filter Mode and global Incress
 	 * Configuration.
 	 */
-	if (adap->flags & FW_OK) {
+	if (t4_use_ldst(adap)) {
 		t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
 				TP_VLAN_PRI_MAP_A, 1);
 		t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c8488f4..640369d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@
 	TCB_SIZE       = 128,   /* TCB size */
 	NMTUS          = 16,    /* size of MTU table */
 	NCCTRL_WIN     = 32,    /* # of congestion control windows */
-	L2T_SIZE       = 4096,  /* # of L2T entries */
 	PM_NSTATS      = 5,     /* # of PM stats */
 	MBOX_LEN       = 64,    /* mailbox size in bytes */
 	TRACE_LEN      = 112,   /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 132cb8f..b99144a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -660,6 +660,9 @@
 #define TXPKT_OVLAN_IDX_S    12
 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
 
+#define TXPKT_T5_OVLAN_IDX_S	12
+#define TXPKT_T5_OVLAN_IDX_V(x)	((x) << TXPKT_T5_OVLAN_IDX_S)
+
 #define TXPKT_INTF_S    16
 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index d7ca106..8353a6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -142,6 +142,8 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5013),	/* T580-chr */
 	CH_PCI_ID_TABLE_FENTRY(0x5014),	/* T580-so */
 	CH_PCI_ID_TABLE_FENTRY(0x5015),	/* T502-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x5016),	/* T580-OCP-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5017),	/* T520-OCP-SO */
 	CH_PCI_ID_TABLE_FENTRY(0x5080),	/* Custom T540-cr */
 	CH_PCI_ID_TABLE_FENTRY(0x5081),	/* Custom T540-LL-cr */
 	CH_PCI_ID_TABLE_FENTRY(0x5082),	/* Custom T504-cr */
@@ -155,6 +157,22 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5090),	/* Custom T540-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5091),	/* Custom T522-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5092),	/* Custom T520-CR */
+
+	/* T6 adapters:
+	 */
+	CH_PCI_ID_TABLE_FENTRY(0x6001),
+	CH_PCI_ID_TABLE_FENTRY(0x6002),
+	CH_PCI_ID_TABLE_FENTRY(0x6003),
+	CH_PCI_ID_TABLE_FENTRY(0x6004),
+	CH_PCI_ID_TABLE_FENTRY(0x6005),
+	CH_PCI_ID_TABLE_FENTRY(0x6006),
+	CH_PCI_ID_TABLE_FENTRY(0x6007),
+	CH_PCI_ID_TABLE_FENTRY(0x6009),
+	CH_PCI_ID_TABLE_FENTRY(0x600d),
+	CH_PCI_ID_TABLE_FENTRY(0x6010),
+	CH_PCI_ID_TABLE_FENTRY(0x6011),
+	CH_PCI_ID_TABLE_FENTRY(0x6014),
+	CH_PCI_ID_TABLE_FENTRY(0x6015),
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 375a825..fc3044c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -136,6 +136,20 @@
 #define  INGPACKBOUNDARY_G(x)	(((x) >> INGPACKBOUNDARY_S) \
 				 & INGPACKBOUNDARY_M)
 
+#define VFIFO_ENABLE_S    10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F    VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S    6
+#define DBVFIFO_SIZE_M    0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S    0
+#define T6_DBVFIFO_SIZE_M    0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
 #define GLOBALENABLE_S    0
 #define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
 #define GLOBALENABLE_F    GLOBALENABLE_V(1U)
@@ -303,6 +317,8 @@
 #define SGE_FL_BUFFER_SIZE7_A 0x1060
 #define SGE_FL_BUFFER_SIZE8_A 0x1064
 
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
 #define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
 
 #define THRESHOLD_0_S    24
@@ -338,6 +354,11 @@
 #define EGRTHRESHOLDPACKING_G(x) \
 	(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
 
+#define T6_EGRTHRESHOLDPACKING_S    16
+#define T6_EGRTHRESHOLDPACKING_M    0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+	(((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
 #define SGE_TIMESTAMP_LO_A 0x1098
 #define SGE_TIMESTAMP_HI_A 0x109c
 
@@ -352,6 +373,7 @@
 #define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
 
 #define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
 
 #define HP_INT_THRESH_S    28
 #define HP_INT_THRESH_M    0xfU
@@ -864,6 +886,10 @@
 /* registers for module MA */
 #define MA_EDRAM0_BAR_A 0x77c0
 
+#define EDRAM0_BASE_S    16
+#define EDRAM0_BASE_M    0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
 #define EDRAM0_SIZE_S    0
 #define EDRAM0_SIZE_M    0xfffU
 #define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
@@ -871,6 +897,10 @@
 
 #define MA_EDRAM1_BAR_A 0x77c4
 
+#define EDRAM1_BASE_S    16
+#define EDRAM1_BASE_M    0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
 #define EDRAM1_SIZE_S    0
 #define EDRAM1_SIZE_M    0xfffU
 #define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
@@ -878,6 +908,11 @@
 
 #define MA_EXT_MEMORY_BAR_A 0x77c8
 
+#define EXT_MEM_BASE_S    16
+#define EXT_MEM_BASE_M    0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
 #define EXT_MEM_SIZE_S    0
 #define EXT_MEM_SIZE_M    0xfffU
 #define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
@@ -885,6 +920,10 @@
 
 #define MA_EXT_MEMORY1_BAR_A 0x7808
 
+#define EXT_MEM1_BASE_S    16
+#define EXT_MEM1_BASE_M    0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
 #define EXT_MEM1_SIZE_S    0
 #define EXT_MEM1_SIZE_M    0xfffU
 #define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
@@ -892,6 +931,10 @@
 
 #define MA_EXT_MEMORY0_BAR_A 0x77c8
 
+#define EXT_MEM0_BASE_S    16
+#define EXT_MEM0_BASE_M    0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
 #define EXT_MEM0_SIZE_S    0
 #define EXT_MEM0_SIZE_M    0xfffU
 #define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
@@ -973,6 +1016,10 @@
 
 /* registers for module CIM */
 #define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
 #define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
 
 #define  BOOTADDR_M	0xffffff00U
@@ -1231,6 +1278,33 @@
 #define TP_OUT_CONFIG_A		0x7d04
 #define TP_GLOBAL_CONFIG_A	0x7d08
 
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S    31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F    PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S    30
+#define PMTXNUMCHN_M    0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S    0
+#define PMTXMAXPAGE_M    0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S    0
+#define PMRXMAXPAGE_M    0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
 #define DBGLAMODE_S	14
 #define DBGLAMODE_M	0x3U
 #define DBGLAMODE_G(x)	(((x) >> DBGLAMODE_S) & DBGLAMODE_M)
@@ -1338,6 +1412,9 @@
 #define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
 
 #define TP_RSS_LKP_TABLE_A	0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
 
 #define LKPTBLROWVLD_S    31
 #define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
@@ -1483,6 +1560,11 @@
 #define TP_MIB_RQE_DFR_PKT_A	0x64
 
 #define ULP_TX_INT_CAUSE_A	0x8dcc
+#define ULP_TX_TPT_LLIMIT_A	0x8dd4
+#define ULP_TX_TPT_ULIMIT_A	0x8dd8
+#define ULP_TX_PBL_LLIMIT_A	0x8ddc
+#define ULP_TX_PBL_ULIMIT_A	0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
 
 #define PBL_BOUND_ERR_CH3_S    31
 #define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
@@ -1804,6 +1886,9 @@
 #define TRCMULTIFILTER_F    TRCMULTIFILTER_V(1U)
 
 #define MPS_TRC_RSS_CONTROL_A		0x9808
+#define MPS_TRC_FILTER1_RSS_CONTROL_A	0x9ff4
+#define MPS_TRC_FILTER2_RSS_CONTROL_A	0x9ffc
+#define MPS_TRC_FILTER3_RSS_CONTROL_A	0xa004
 #define MPS_T5_TRC_RSS_CONTROL_A	0xa00c
 
 #define RSSCONTROL_S    16
@@ -1812,6 +1897,59 @@
 #define QUEUENUMBER_S    0
 #define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
 
+#define TFINVERTMATCH_S    24
+#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S)
+#define TFINVERTMATCH_F    TFINVERTMATCH_V(1U)
+
+#define TFEN_S    22
+#define TFEN_V(x) ((x) << TFEN_S)
+#define TFEN_F    TFEN_V(1U)
+
+#define TFPORT_S    18
+#define TFPORT_M    0xfU
+#define TFPORT_V(x) ((x) << TFPORT_S)
+#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M)
+
+#define TFLENGTH_S    8
+#define TFLENGTH_M    0x1fU
+#define TFLENGTH_V(x) ((x) << TFLENGTH_S)
+#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M)
+
+#define TFOFFSET_S    0
+#define TFOFFSET_M    0x1fU
+#define TFOFFSET_V(x) ((x) << TFOFFSET_S)
+#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M)
+
+#define T5_TFINVERTMATCH_S    25
+#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S)
+#define T5_TFINVERTMATCH_F    T5_TFINVERTMATCH_V(1U)
+
+#define T5_TFEN_S    23
+#define T5_TFEN_V(x) ((x) << T5_TFEN_S)
+#define T5_TFEN_F    T5_TFEN_V(1U)
+
+#define T5_TFPORT_S    18
+#define T5_TFPORT_M    0x1fU
+#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S)
+#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810
+#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820
+
+#define TFMINPKTSIZE_S    16
+#define TFMINPKTSIZE_M    0x1ffU
+#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S)
+#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M)
+
+#define TFCAPTUREMAX_S    0
+#define TFCAPTUREMAX_M    0x3fffU
+#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S)
+#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M)
+
+#define MPS_TRC_FILTER0_MATCH_A 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80
+#define MPS_TRC_FILTER1_MATCH_A 0x9d00
+
 #define TP_RSS_CONFIG_A 0x7df0
 
 #define TNL4TUPENIPV6_S    31
@@ -2247,12 +2385,32 @@
 #define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
 #define MATCHSRAM_F    MATCHSRAM_V(1U)
 
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
 #define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
 
 #define MPS_CLS_TCAM_Y_L_A 0xf000
 #define MPS_CLS_TCAM_DATA0_A 0xf000
 #define MPS_CLS_TCAM_DATA1_A 0xf004
 
+#define USED_S    16
+#define USED_M    0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S    0
+#define ALLOC_M    0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S    16
+#define T5_USED_M    0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S    0
+#define T5_ALLOC_M    0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
 #define DMACH_S    0
 #define DMACH_M    0xffffU
 #define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
@@ -2410,8 +2568,21 @@
 #define SLVFIFOPARINT_F    SLVFIFOPARINT_V(1U)
 
 #define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
 #define ULP_RX_ISCSI_TAGMASK_A 0x19164
 #define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
 #define ULP_RX_LA_CTL_A 0x1923c
 #define ULP_RX_LA_RDPTR_A 0x19240
 #define ULP_RX_LA_RDDATA_A 0x19244
@@ -2473,6 +2644,10 @@
 #define SOURCEPF_M    0x7U
 #define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
 
+#define T6_SOURCEPF_S    9
+#define T6_SOURCEPF_M    0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
 #define PL_INT_CAUSE_A 0x1940c
 
 #define ULP_TX_S    27
@@ -2612,7 +2787,20 @@
 #define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
 #define T6_LIPMISS_F    T6_LIPMISS_V(1U)
 
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
 #define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
+
+#define HASHEN_S    20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F    HASHEN_V(1U)
 
 #define REQQPARERR_S    16
 #define REQQPARERR_V(x) ((x) << REQQPARERR_S)
@@ -2634,6 +2822,10 @@
 #define LIP0_V(x) ((x) << LIP0_S)
 #define LIP0_F    LIP0_V(1U)
 
+#define BASEADDR_S    3
+#define BASEADDR_M    0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
 #define TCAMINTPERR_S    13
 #define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
 #define TCAMINTPERR_F    TCAMINTPERR_V(1U)
@@ -2740,10 +2932,11 @@
 #define EDC_H_BIST_DATA_PATTERN_A	0x50010
 #define EDC_H_BIST_STATUS_RDATA_A	0x50028
 
+#define EDC_H_ECC_ERR_ADDR_A		0x50084
 #define EDC_T51_BASE_ADDR		0x50800
 
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
 
 #define PL_VF_REV_A 0x4
 #define PL_VF_WHOAMI_A 0x0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 32b2135..92bafa7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -40,14 +40,25 @@
 #define T4FW_VERSION_MICRO 0x20
 #define T4FW_VERSION_BUILD 0x00
 
+#define T4FW_MIN_VERSION_MAJOR 0x01
+#define T4FW_MIN_VERSION_MINOR 0x04
+#define T4FW_MIN_VERSION_MICRO 0x00
+
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x0D
 #define T5FW_VERSION_MICRO 0x20
 #define T5FW_VERSION_BUILD 0x00
 
+#define T5FW_MIN_VERSION_MAJOR 0x00
+#define T5FW_MIN_VERSION_MINOR 0x00
+#define T5FW_MIN_VERSION_MICRO 0x00
+
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x0D
 #define T6FW_VERSION_MICRO 0x2D
 #define T6FW_VERSION_BUILD 0x00
 
+#define T6FW_MIN_VERSION_MAJOR 0x00
+#define T6FW_MIN_VERSION_MINOR 0x00
+#define T6FW_MIN_VERSION_MICRO 0x00
 #endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index ad53e5a..fa3786a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1898,7 +1898,10 @@
 		rspq->unhandled_irqs++;
 
 	val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
-	if (is_t4(rspq->adapter->params.chip)) {
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!rspq->bar2_addr)) {
 		t4_write_reg(rspq->adapter,
 			     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
 			     val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@
 	}
 
 	val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
-	if (is_t4(adapter->params.chip))
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!intrq->bar2_addr)) {
 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
 			     val | INGRESSQID_V(intrq->cntxt_id));
-	else {
+	} else {
 		writel(val | INGRESSQID_V(intrq->bar2_qid),
 		       intrq->bar2_addr + SGE_UDB_GTS);
 		wmb();
@@ -2662,8 +2668,22 @@
 	 * give it more Free List entries.  (Note that the SGE's Egress
 	 * Congestion Threshold is in units of 2 Free List pointers.)
 	 */
-	s->fl_starve_thres
-		= EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+	case CHELSIO_T4:
+		s->fl_starve_thres =
+		   EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T5:
+		s->fl_starve_thres =
+		   EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T6:
+	default:
+		s->fl_starve_thres =
+		   T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	}
+	s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
 
 	/*
 	 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 0db6dc9..63dd5fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -619,7 +619,8 @@
 		 */
 		whoami = t4_read_reg(adapter,
 				     T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
-		pf = SOURCEPF_G(whoami);
+		pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 		s_hps = (HOSTPAGESIZEPF0_S +
 			 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 84b6a2b..8b53f7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"2.1.1.83"
+#define DRV_VERSION		"2.3.0.12"
 #define DRV_COPYRIGHT		"Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
@@ -191,6 +191,25 @@
 	struct vnic_gen_stats gen_stats;
 };
 
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+	struct enic *enic = vdev->priv;
+
+	return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...)	dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...)	dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...)	dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...)	netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...)	netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...)	netdev_err(vnic_get_netdev(vdev), args)
+
 static inline struct device *enic_get_dev(struct enic *enic)
 {
 	return &(enic->pdev->dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index d106186..3c677ed 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -177,7 +177,7 @@
 	int res, i;
 
 	enic = netdev_priv(dev);
-	res = skb_flow_dissect_flow_keys(skb, &keys);
+	res = skb_flow_dissect_flow_keys(skb, &keys, 0);
 	if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
 	    (keys.basic.ip_proto != IPPROTO_TCP &&
 	     keys.basic.ip_proto != IPPROTO_UDP))
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f3f1601..f44a39c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -224,7 +224,8 @@
 	struct enic *enic = netdev_priv(netdev);
 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
-	ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
 	if (rxcoal->use_adaptive_rx_coalesce)
 		ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@
 	return 0;
 }
 
+static int enic_coalesce_valid(struct enic *enic,
+			       struct ethtool_coalesce *ec)
+{
+	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+					   ec->rx_coalesce_usecs_high);
+	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+					  ec->rx_coalesce_usecs_low);
+
+	if (ec->rx_max_coalesced_frames		||
+	    ec->rx_coalesce_usecs_irq		||
+	    ec->rx_max_coalesced_frames_irq	||
+	    ec->tx_max_coalesced_frames		||
+	    ec->tx_coalesce_usecs_irq		||
+	    ec->tx_max_coalesced_frames_irq	||
+	    ec->stats_block_coalesce_usecs	||
+	    ec->use_adaptive_tx_coalesce	||
+	    ec->pkt_rate_low			||
+	    ec->rx_max_coalesced_frames_low	||
+	    ec->tx_coalesce_usecs_low		||
+	    ec->tx_max_coalesced_frames_low	||
+	    ec->pkt_rate_high			||
+	    ec->rx_max_coalesced_frames_high	||
+	    ec->tx_coalesce_usecs_high		||
+	    ec->tx_max_coalesced_frames_high	||
+	    ec->rate_sample_interval)
+		return -EINVAL;
+
+	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+	    ec->tx_coalesce_usecs)
+		return -EINVAL;
+
+	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
+	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
+	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
+	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+			    coalesce_usecs_max);
+
+	if (ec->rx_coalesce_usecs_high &&
+	    (rx_coalesce_usecs_high <
+	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int enic_set_coalesce(struct net_device *netdev,
 	struct ethtool_coalesce *ecmd)
 {
@@ -244,8 +292,12 @@
 	u32 rx_coalesce_usecs_high;
 	u32 coalesce_usecs_max;
 	unsigned int i, intr;
+	int ret;
 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
+	ret = enic_coalesce_valid(enic, ecmd);
+	if (ret)
+		return ret;
 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
 				  coalesce_usecs_max);
@@ -257,59 +309,24 @@
 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
 				       coalesce_usecs_max);
 
-	switch (vnic_dev_get_intr_mode(enic->vdev)) {
-	case VNIC_DEV_INTR_MODE_INTX:
-		if (tx_coalesce_usecs != rx_coalesce_usecs)
-			return -EINVAL;
-		if (ecmd->use_adaptive_rx_coalesce	||
-		    ecmd->rx_coalesce_usecs_low		||
-		    ecmd->rx_coalesce_usecs_high)
-			return -EINVAL;
-
-		intr = enic_legacy_io_intr();
-		vnic_intr_coalescing_timer_set(&enic->intr[intr],
-			tx_coalesce_usecs);
-		break;
-	case VNIC_DEV_INTR_MODE_MSI:
-		if (tx_coalesce_usecs != rx_coalesce_usecs)
-			return -EINVAL;
-		if (ecmd->use_adaptive_rx_coalesce	||
-		    ecmd->rx_coalesce_usecs_low		||
-		    ecmd->rx_coalesce_usecs_high)
-			return -EINVAL;
-
-		vnic_intr_coalescing_timer_set(&enic->intr[0],
-			tx_coalesce_usecs);
-		break;
-	case VNIC_DEV_INTR_MODE_MSIX:
-		if (ecmd->rx_coalesce_usecs_high &&
-		    (rx_coalesce_usecs_high <
-		     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
-				return -EINVAL;
-
+	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
 		for (i = 0; i < enic->wq_count; i++) {
 			intr = enic_msix_wq_intr(enic, i);
 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
-				tx_coalesce_usecs);
+						       tx_coalesce_usecs);
 		}
-
-		rxcoal->use_adaptive_rx_coalesce =
-					!!ecmd->use_adaptive_rx_coalesce;
-		if (!rxcoal->use_adaptive_rx_coalesce)
-			enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
-		if (ecmd->rx_coalesce_usecs_high) {
-			rxcoal->range_end = rx_coalesce_usecs_high;
-			rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
-			rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
-							ENIC_AIC_LARGE_PKT_DIFF;
-		}
-		break;
-	default:
-		break;
+		enic->tx_coalesce_usecs = tx_coalesce_usecs;
+	}
+	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+	if (!rxcoal->use_adaptive_rx_coalesce)
+		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+	if (ecmd->rx_coalesce_usecs_high) {
+		rxcoal->range_end = rx_coalesce_usecs_high;
+		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+						ENIC_AIC_LARGE_PKT_DIFF;
 	}
 
-	enic->tx_coalesce_usecs = tx_coalesce_usecs;
 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
 
 	return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 918a8e4..3352d02 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1149,70 +1149,6 @@
 	return 0;
 }
 
-static int enic_poll(struct napi_struct *napi, int budget)
-{
-	struct net_device *netdev = napi->dev;
-	struct enic *enic = netdev_priv(netdev);
-	unsigned int cq_rq = enic_cq_rq(enic, 0);
-	unsigned int cq_wq = enic_cq_wq(enic, 0);
-	unsigned int intr = enic_legacy_io_intr();
-	unsigned int rq_work_to_do = budget;
-	unsigned int wq_work_to_do = -1; /* no limit */
-	unsigned int  work_done, rq_work_done = 0, wq_work_done;
-	int err;
-
-	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
-				       enic_wq_service, NULL);
-
-	if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
-		if (wq_work_done > 0)
-			vnic_intr_return_credits(&enic->intr[intr],
-						 wq_work_done,
-						 0 /* dont unmask intr */,
-						 0 /* dont reset intr timer */);
-		return budget;
-	}
-
-	if (budget > 0)
-		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
-			rq_work_to_do, enic_rq_service, NULL);
-
-	/* Accumulate intr event credits for this polling
-	 * cycle.  An intr event is the completion of a
-	 * a WQ or RQ packet.
-	 */
-
-	work_done = rq_work_done + wq_work_done;
-
-	if (work_done > 0)
-		vnic_intr_return_credits(&enic->intr[intr],
-			work_done,
-			0 /* don't unmask intr */,
-			0 /* don't reset intr timer */);
-
-	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
-	enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
-
-	/* Buffer allocation failed. Stay in polling
-	 * mode so we can try to fill the ring again.
-	 */
-
-	if (err)
-		rq_work_done = rq_work_to_do;
-
-	if (rq_work_done < rq_work_to_do) {
-
-		/* Some work done, but not enough to stay in polling,
-		 * exit polling
-		 */
-
-		napi_complete(napi);
-		vnic_intr_unmask(&enic->intr[intr]);
-	}
-
-	return rq_work_done;
-}
-
 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
 {
 	unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1271,6 +1207,77 @@
 	pkt_size_counter->small_pkt_bytes_cnt = 0;
 }
 
+static int enic_poll(struct napi_struct *napi, int budget)
+{
+	struct net_device *netdev = napi->dev;
+	struct enic *enic = netdev_priv(netdev);
+	unsigned int cq_rq = enic_cq_rq(enic, 0);
+	unsigned int cq_wq = enic_cq_wq(enic, 0);
+	unsigned int intr = enic_legacy_io_intr();
+	unsigned int rq_work_to_do = budget;
+	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int  work_done, rq_work_done = 0, wq_work_done;
+	int err;
+
+	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
+				       enic_wq_service, NULL);
+
+	if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
+		if (wq_work_done > 0)
+			vnic_intr_return_credits(&enic->intr[intr],
+						 wq_work_done,
+						 0 /* dont unmask intr */,
+						 0 /* dont reset intr timer */);
+		return budget;
+	}
+
+	if (budget > 0)
+		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
+			rq_work_to_do, enic_rq_service, NULL);
+
+	/* Accumulate intr event credits for this polling
+	 * cycle.  An intr event is the completion of a
+	 * a WQ or RQ packet.
+	 */
+
+	work_done = rq_work_done + wq_work_done;
+
+	if (work_done > 0)
+		vnic_intr_return_credits(&enic->intr[intr],
+			work_done,
+			0 /* don't unmask intr */,
+			0 /* don't reset intr timer */);
+
+	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+	enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
+
+	/* Buffer allocation failed. Stay in polling
+	 * mode so we can try to fill the ring again.
+	 */
+
+	if (err)
+		rq_work_done = rq_work_to_do;
+	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+		/* Call the function which refreshes the intr coalescing timer
+		 * value based on the traffic.
+		 */
+		enic_calc_int_moderation(enic, &enic->rq[0]);
+
+	if (rq_work_done < rq_work_to_do) {
+
+		/* Some work done, but not enough to stay in polling,
+		 * exit polling
+		 */
+
+		napi_complete(napi);
+		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+			enic_set_int_moderation(enic, &enic->rq[0]);
+		vnic_intr_unmask(&enic->intr[intr]);
+	}
+
+	return rq_work_done;
+}
+
 #ifdef CONFIG_RFS_ACCEL
 static void enic_free_rx_cpu_rmap(struct enic *enic)
 {
@@ -1407,10 +1414,8 @@
 	if (err)
 		work_done = work_to_do;
 	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
-		/* Call the function which refreshes
-		 * the intr coalescing timer value based on
-		 * the traffic.  This is supported only in
-		 * the case of MSI-x mode
+		/* Call the function which refreshes the intr coalescing timer
+		 * value based on the traffic.
 		 */
 		enic_calc_int_moderation(enic, &enic->rq[rq]);
 
@@ -1569,12 +1574,6 @@
 	int index = -1;
 	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
 
-	/* If intr mode is not MSIX, do not do adaptive coalescing */
-	if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
-		netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
-		return;
-	}
-
 	/* 1. Read the link speed from fw
 	 * 2. Pick the default range for the speed
 	 * 3. Update it in enic->rx_coalesce_setting
@@ -2485,6 +2484,11 @@
 		goto err_out_iounmap;
 	}
 
+	err = vnic_devcmd_init(enic->vdev);
+
+	if (err)
+		goto err_out_vnic_unregister;
+
 #ifdef CONFIG_PCI_IOV
 	/* Get number of subvnics */
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -2659,8 +2663,8 @@
 		pci_disable_sriov(pdev);
 		enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
 	}
-err_out_vnic_unregister:
 #endif
+err_out_vnic_unregister:
 	vnic_dev_unregister(enic->vdev);
 err_out_iounmap:
 	enic_iounmap(enic);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 0daa1c7..abeda2a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -24,6 +24,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_cq.h"
+#include "enic.h"
 
 void vnic_cq_free(struct vnic_cq *cq)
 {
@@ -42,7 +43,7 @@
 
 	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
 	if (!cq->ctrl) {
-		pr_err("Failed to hook CQ[%d] resource\n", index);
+		vdev_err("Failed to hook CQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 62f7b7b..a3badef 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -27,46 +27,9 @@
 #include "vnic_resource.h"
 #include "vnic_devcmd.h"
 #include "vnic_dev.h"
+#include "vnic_wq.h"
 #include "vnic_stats.h"
-
-enum vnic_proxy_type {
-	PROXY_NONE,
-	PROXY_BY_BDF,
-	PROXY_BY_INDEX,
-};
-
-struct vnic_res {
-	void __iomem *vaddr;
-	dma_addr_t bus_addr;
-	unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
-	u32 mul;
-	u32 div;
-	u32 max_usec;
-};
-
-struct vnic_dev {
-	void *priv;
-	struct pci_dev *pdev;
-	struct vnic_res res[RES_TYPE_MAX];
-	enum vnic_dev_intr_mode intr_mode;
-	struct vnic_devcmd __iomem *devcmd;
-	struct vnic_devcmd_notify *notify;
-	struct vnic_devcmd_notify notify_copy;
-	dma_addr_t notify_pa;
-	u32 notify_sz;
-	dma_addr_t linkstatus_pa;
-	struct vnic_stats *stats;
-	dma_addr_t stats_pa;
-	struct vnic_devcmd_fw_info *fw_info;
-	dma_addr_t fw_info_pa;
-	enum vnic_proxy_type proxy;
-	u32 proxy_index;
-	u64 args[VNIC_DEVCMD_NARGS];
-	struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
 
 #define VNIC_MAX_RES_HDR_SIZE \
 	(sizeof(struct vnic_resource_header) + \
@@ -90,14 +53,14 @@
 		return -EINVAL;
 
 	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
-		pr_err("vNIC BAR0 res hdr length error\n");
+		vdev_err("vNIC BAR0 res hdr length error\n");
 		return -EINVAL;
 	}
 
 	rh  = bar->vaddr;
 	mrh = bar->vaddr;
 	if (!rh) {
-		pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+		vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
 		return -EINVAL;
 	}
 
@@ -106,11 +69,10 @@
 		(ioread32(&rh->version) != VNIC_RES_VERSION)) {
 		if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
 			(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
-			pr_err("vNIC BAR0 res magic/version error "
-			"exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
-			VNIC_RES_MAGIC, VNIC_RES_VERSION,
-			MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
-			ioread32(&rh->magic), ioread32(&rh->version));
+			vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+				 VNIC_RES_MAGIC, VNIC_RES_VERSION,
+				 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+				 ioread32(&rh->magic), ioread32(&rh->version));
 			return -EINVAL;
 		}
 	}
@@ -144,17 +106,15 @@
 			/* each count is stride bytes long */
 			len = count * VNIC_RES_STRIDE;
 			if (len + bar_offset > bar[bar_num].len) {
-				pr_err("vNIC BAR0 resource %d "
-					"out-of-bounds, offset 0x%x + "
-					"size 0x%x > bar len 0x%lx\n",
-					type, bar_offset,
-					len,
-					bar[bar_num].len);
+				vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+					 type, bar_offset, len,
+					 bar[bar_num].len);
 				return -EINVAL;
 			}
 			break;
 		case RES_TYPE_INTR_PBA_LEGACY:
 		case RES_TYPE_DEVCMD:
+		case RES_TYPE_DEVCMD2:
 			len = count;
 			break;
 		default:
@@ -238,8 +198,8 @@
 		&ring->base_addr_unaligned);
 
 	if (!ring->descs_unaligned) {
-		pr_err("Failed to allocate ring (size=%d), aborting\n",
-			(int)ring->size);
+		vdev_err("Failed to allocate ring (size=%d), aborting\n",
+			 (int)ring->size);
 		return -ENOMEM;
 	}
 
@@ -281,7 +241,7 @@
 		return -ENODEV;
 	}
 	if (status & STAT_BUSY) {
-		pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+		vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
 		return -EBUSY;
 	}
 
@@ -315,8 +275,8 @@
 					return -err;
 				if (err != ERR_ECMDUNKNOWN ||
 				    cmd != CMD_CAPABILITY)
-					pr_err("Error %d devcmd %d\n",
-						err, _CMD_N(cmd));
+					vdev_neterr("Error %d devcmd %d\n",
+						    err, _CMD_N(cmd));
 				return -err;
 			}
 
@@ -330,10 +290,162 @@
 		}
 	}
 
-	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+	vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
 	return -ETIMEDOUT;
 }
 
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+			  int wait)
+{
+	struct devcmd2_controller *dc2c = vdev->devcmd2;
+	struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+	unsigned int i;
+	int delay, err;
+	u32 fetch_index, new_posted;
+	u32 posted = dc2c->posted;
+
+	fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+	if (fetch_index == 0xFFFFFFFF)
+		return -ENODEV;
+
+	new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+	if (new_posted == fetch_index) {
+		vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+			    _CMD_N(cmd), fetch_index, posted);
+		return -EBUSY;
+	}
+	dc2c->cmd_ring[posted].cmd = cmd;
+	dc2c->cmd_ring[posted].flags = 0;
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+		dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+			dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+	/* Adding write memory barrier prevents compiler and/or CPU reordering,
+	 * thus avoiding descriptor posting before descriptor is initialized.
+	 * Otherwise, hardware can read stale descriptor fields.
+	 */
+	wmb();
+	iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+	dc2c->posted = new_posted;
+
+	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+		return 0;
+
+	for (delay = 0; delay < wait; delay++) {
+		if (result->color == dc2c->color) {
+			dc2c->next_result++;
+			if (dc2c->next_result == dc2c->result_size) {
+				dc2c->next_result = 0;
+				dc2c->color = dc2c->color ? 0 : 1;
+			}
+			if (result->error) {
+				err = result->error;
+				if (err != ERR_ECMDUNKNOWN ||
+				    cmd != CMD_CAPABILITY)
+					vdev_neterr("Error %d devcmd %d\n",
+						    err, _CMD_N(cmd));
+				return -err;
+			}
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+				for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+					vdev->args[i] = result->results[i];
+
+			return 0;
+		}
+		udelay(100);
+	}
+
+	vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+	return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		return -ENODEV;
+	vdev->devcmd_rtn = _vnic_dev_cmd;
+
+	return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+	int err;
+	unsigned int fetch_index;
+
+	if (vdev->devcmd2)
+		return 0;
+
+	vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+	if (!vdev->devcmd2)
+		return -ENOMEM;
+
+	vdev->devcmd2->color = 1;
+	vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+	err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+				    DEVCMD2_DESC_SIZE);
+	if (err)
+		goto err_free_devcmd2;
+
+	fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+		vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+		return -ENODEV;
+	}
+
+	enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+			   0);
+	vdev->devcmd2->posted = fetch_index;
+	vnic_wq_enable(&vdev->devcmd2->wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+				       DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+	if (err)
+		goto err_free_wq;
+
+	vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+	vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+	vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+	vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+			VNIC_PADDR_TARGET;
+	vdev->args[1] = DEVCMD2_RING_SIZE;
+
+	err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+	if (err)
+		goto err_free_desc_ring;
+
+	vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+	return 0;
+
+err_free_desc_ring:
+	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+	vnic_wq_disable(&vdev->devcmd2->wq);
+	vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+	kfree(vdev->devcmd2);
+	vdev->devcmd2 = NULL;
+
+	return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+	vnic_wq_disable(&vdev->devcmd2->wq);
+	vnic_wq_free(&vdev->devcmd2->wq);
+	kfree(vdev->devcmd2);
+}
+
 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
 	enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
 	u64 *a0, u64 *a1, int wait)
@@ -348,7 +460,7 @@
 	vdev->args[2] = *a0;
 	vdev->args[3] = *a1;
 
-	err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+	err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
 	if (err)
 		return err;
 
@@ -357,7 +469,8 @@
 		err = (int)vdev->args[1];
 		if (err != ERR_ECMDUNKNOWN ||
 		    cmd != CMD_CAPABILITY)
-			pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+			vdev_neterr("Error %d proxy devcmd %d\n", err,
+				    _CMD_N(cmd));
 		return err;
 	}
 
@@ -375,7 +488,7 @@
 	vdev->args[0] = *a0;
 	vdev->args[1] = *a1;
 
-	err = _vnic_dev_cmd(vdev, cmd, wait);
+	err = vdev->devcmd_rtn(vdev, cmd, wait);
 
 	*a0 = vdev->args[0];
 	*a1 = vdev->args[1];
@@ -650,7 +763,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
 	if (err)
-		pr_err("Can't set packet filter\n");
+		vdev_neterr("Can't set packet filter\n");
 
 	return err;
 }
@@ -667,7 +780,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
 	if (err)
-		pr_err("Can't add addr [%pM], %d\n", addr, err);
+		vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
 
 	return err;
 }
@@ -684,7 +797,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
 	if (err)
-		pr_err("Can't del addr [%pM], %d\n", addr, err);
+		vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
 
 	return err;
 }
@@ -728,7 +841,7 @@
 	dma_addr_t notify_pa;
 
 	if (vdev->notify || vdev->notify_pa) {
-		pr_err("notify block %p still allocated", vdev->notify);
+		vdev_neterr("notify block %p still allocated", vdev->notify);
 		return -EINVAL;
 	}
 
@@ -838,7 +951,7 @@
 	memset(vdev->args, 0, sizeof(vdev->args));
 
 	if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
-		err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+		err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
 	else
 		err = ERR_ECMDUNKNOWN;
 
@@ -847,7 +960,7 @@
 	 */
 	if ((err == ERR_ECMDUNKNOWN) ||
 		(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
-		pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+		vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
 		vnic_dev_intr_coal_timer_info_default(vdev);
 		return 0;
 	}
@@ -938,6 +1051,9 @@
 			pci_free_consistent(vdev->pdev,
 				sizeof(struct vnic_devcmd_fw_info),
 				vdev->fw_info, vdev->fw_info_pa);
+		if (vdev->devcmd2)
+			vnic_dev_deinit_devcmd2(vdev);
+
 		kfree(vdev);
 	}
 }
@@ -959,10 +1075,6 @@
 	if (vnic_dev_discover_res(vdev, bar, num_bars))
 		goto err_out;
 
-	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
-	if (!vdev->devcmd)
-		goto err_out;
-
 	return vdev;
 
 err_out:
@@ -977,6 +1089,29 @@
 }
 EXPORT_SYMBOL(vnic_dev_get_pdev);
 
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+	void __iomem *res;
+	int err;
+
+	res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+	if (res) {
+		err = vnic_dev_init_devcmd2(vdev);
+		if (err)
+			vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+				  err);
+		else
+			return 0;
+	} else {
+		vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+	}
+	err = vnic_dev_init_devcmd1(vdev);
+	if (err)
+		vdev_err("DEVCMD1 initialization failed: %d", err);
+
+	return err;
+}
+
 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
 {
 	u64 a0, a1 = len;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 1fb214e..b013b6a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -70,7 +70,48 @@
 	unsigned int desc_avail;
 };
 
-struct vnic_dev;
+enum vnic_proxy_type {
+	PROXY_NONE,
+	PROXY_BY_BDF,
+	PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+	void __iomem *vaddr;
+	dma_addr_t bus_addr;
+	unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+	u32 mul;
+	u32 div;
+	u32 max_usec;
+};
+
+struct vnic_dev {
+	void *priv;
+	struct pci_dev *pdev;
+	struct vnic_res res[RES_TYPE_MAX];
+	enum vnic_dev_intr_mode intr_mode;
+	struct vnic_devcmd __iomem *devcmd;
+	struct vnic_devcmd_notify *notify;
+	struct vnic_devcmd_notify notify_copy;
+	dma_addr_t notify_pa;
+	u32 notify_sz;
+	dma_addr_t linkstatus_pa;
+	struct vnic_stats *stats;
+	dma_addr_t stats_pa;
+	struct vnic_devcmd_fw_info *fw_info;
+	dma_addr_t fw_info_pa;
+	enum vnic_proxy_type proxy;
+	u32 proxy_index;
+	u64 args[VNIC_DEVCMD_NARGS];
+	struct vnic_intr_coal_timer_info intr_coal_timer_info;
+	struct devcmd2_controller *devcmd2;
+	int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+			  int wait);
+};
+
 struct vnic_stats;
 
 void *vnic_dev_priv(struct vnic_dev *vdev);
@@ -135,5 +176,6 @@
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
 			struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
 
 #endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 435d0cd..2a81288 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -365,6 +365,12 @@
 	 */
 	CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
 
+	/* Initialization for the devcmd2 interface.
+	 * in: (u64) a0 = host result buffer physical address
+	 * in: (u16) a1 = number of entries in result buffer
+	 */
+	CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
 	/* Add a filter.
 	 * in: (u64) a0= filter address
 	 *     (u32) a1= size of filter
@@ -629,4 +635,26 @@
 	u64 args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
 };
 
+#define DEVCMD2_FNORESULT	0x1	/* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS	VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+	u16 pad;
+	u16 flags;
+	u32 cmd;
+	u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS	VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+	u64 results[VNIC_DEVCMD2_NRESULTS];
+	u32 pad;
+	u16 completed_index;
+	u8  error;
+	u8  color;
+};
+
+#define DEVCMD2_RING_SIZE	32
+#define DEVCMD2_DESC_SIZE	128
+
 #endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 0ca107f..942759d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -25,6 +25,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_intr.h"
+#include "enic.h"
 
 void vnic_intr_free(struct vnic_intr *intr)
 {
@@ -39,7 +40,7 @@
 
 	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
 	if (!intr->ctrl) {
-		pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+		vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index e0a73f1..4e45f88 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -48,6 +48,13 @@
 	RES_TYPE_RSVD7,
 	RES_TYPE_DEVCMD,		/* Device command region */
 	RES_TYPE_PASS_THRU_PAGE,	/* Pass-thru page */
+	RES_TYPE_SUBVNIC,		/* subvnic resource type */
+	RES_TYPE_MQ_WQ,			/* MQ Work queues */
+	RES_TYPE_MQ_RQ,			/* MQ Receive queues */
+	RES_TYPE_MQ_CQ,			/* MQ Completion queues */
+	RES_TYPE_DEPRECATED1,		/* Old version of devcmd 2 */
+	RES_TYPE_DEPRECATED2,		/* Old version of devcmd 2 */
+	RES_TYPE_DEVCMD2,		/* Device control region */
 
 	RES_TYPE_MAX,			/* Count of resource types */
 };
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index c4b2183..cce2777 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_rq.h"
+#include "enic.h"
 
 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
 {
@@ -91,7 +92,7 @@
 
 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
 	if (!rq->ctrl) {
-		pr_err("Failed to hook RQ[%d] resource\n", index);
+		vdev_err("Failed to hook RQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
@@ -167,6 +168,7 @@
 int vnic_rq_disable(struct vnic_rq *rq)
 {
 	unsigned int wait;
+	struct vnic_dev *vdev = rq->vdev;
 
 	iowrite32(0, &rq->ctrl->enable);
 
@@ -177,7 +179,7 @@
 		udelay(10);
 	}
 
-	pr_err("Failed to disable RQ[%d]\n", rq->index);
+	vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
 
 	return -ETIMEDOUT;
 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index b5a1c93..05ad16a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_wq.h"
+#include "enic.h"
 
 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
 {
@@ -94,7 +95,7 @@
 
 	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
 	if (!wq->ctrl) {
-		pr_err("Failed to hook WQ[%d] resource\n", index);
+		vdev_err("Failed to hook WQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
@@ -113,10 +114,27 @@
 	return 0;
 }
 
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
-	unsigned int fetch_index, unsigned int posted_index,
-	unsigned int error_interrupt_enable,
-	unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+			  unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = 0;
+	wq->vdev = vdev;
+
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+	if (!wq->ctrl)
+		return -EINVAL;
+	vnic_wq_disable(wq);
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+	return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+			unsigned int fetch_index, unsigned int posted_index,
+			unsigned int error_interrupt_enable,
+			unsigned int error_interrupt_offset)
 {
 	u64 paddr;
 	unsigned int count = wq->ring.desc_count;
@@ -140,7 +158,7 @@
 	unsigned int error_interrupt_enable,
 	unsigned int error_interrupt_offset)
 {
-	vnic_wq_init_start(wq, cq_index, 0, 0,
+	enic_wq_init_start(wq, cq_index, 0, 0,
 		error_interrupt_enable,
 		error_interrupt_offset);
 }
@@ -158,6 +176,7 @@
 int vnic_wq_disable(struct vnic_wq *wq)
 {
 	unsigned int wait;
+	struct vnic_dev *vdev = wq->vdev;
 
 	iowrite32(0, &wq->ctrl->enable);
 
@@ -168,7 +187,7 @@
 		udelay(10);
 	}
 
-	pr_err("Failed to disable WQ[%d]\n", wq->index);
+	vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
 
 	return -ETIMEDOUT;
 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 2961543..0120961 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -88,6 +88,18 @@
 	unsigned int pkts_outstanding;
 };
 
+struct devcmd2_controller {
+	struct vnic_wq_ctrl __iomem *wq_ctrl;
+	struct vnic_devcmd2 *cmd_ring;
+	struct devcmd2_result *result;
+	u16 next_result;
+	u16 result_size;
+	int color;
+	struct vnic_dev_ring results_ring;
+	struct vnic_wq wq;
+	u32 posted;
+};
+
 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
 {
 	/* how many does SW own? */
@@ -174,5 +186,11 @@
 int vnic_wq_disable(struct vnic_wq *wq);
 void vnic_wq_clean(struct vnic_wq *wq,
 	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+			  unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+			unsigned int fetch_index, unsigned int posted_index,
+			unsigned int error_interrupt_enable,
+			unsigned int error_interrupt_offset);
 
 #endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index d101750..f7b4248 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -604,19 +604,7 @@
 	.probe		= ec_bhf_probe,
 	.remove		= ec_bhf_remove,
 };
-
-static int __init ec_bhf_init(void)
-{
-	return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
-	pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
 
 module_param(polling_frequency, long, S_IRUGO);
 MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d12b41..0a27805 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER			"10.6.0.2"
+#define DRV_VER			"10.6.0.3"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"Emulex BladeEngine2"
 #define BE3_NAME		"Emulex BladeEngine3"
@@ -105,6 +105,8 @@
 
 #define MAX_VFS			30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN		32
+#define	CNTL_SERIAL_NUM_WORDS	8  /* Controller serial number words */
+#define	CNTL_SERIAL_NUM_WORD_SZ	(sizeof(u16)) /* Byte-sz of serial num word */
 
 #define	RSS_INDIR_TABLE_LEN	128
 #define RSS_HASH_KEY_LEN	40
@@ -228,6 +230,7 @@
 struct be_tx_stats {
 	u64 tx_bytes;
 	u64 tx_pkts;
+	u64 tx_vxlan_offload_pkts;
 	u64 tx_reqs;
 	u64 tx_compl;
 	ulong tx_jiffies;
@@ -275,6 +278,7 @@
 struct be_rx_stats {
 	u64 rx_bytes;
 	u64 rx_pkts;
+	u64 rx_vxlan_offload_pkts;
 	u32 rx_drops_no_skbs;	/* skb allocation errors */
 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
 	u32 rx_post_fail;	/* page post alloc failures */
@@ -590,6 +594,7 @@
 	struct rss_info rss_info;
 	/* Filters for packets that need to be sent to BMC */
 	u32 bmc_filt_mask;
+	u16 serial_num[CNTL_SERIAL_NUM_WORDS];
 };
 
 #define be_physfn(adapter)		(!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9eac322..3be1fbd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -88,19 +88,21 @@
 	return wrb->payload.embedded_payload;
 }
 
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
 {
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	u32 val = 0;
 
 	if (be_check_error(adapter, BE_ERROR_ANY))
-		return;
+		return -EIO;
 
 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
 	wmb();
 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+	return 0;
 }
 
 /* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@
 		return;
 	}
 
+	if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+		complete(&adapter->et_cmd_compl);
+		return;
+	}
+
 	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
 	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
 	    subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@
 
 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto out;
 
 	status = be_mcc_wait_compl(adapter);
 	if (status == -EIO)
@@ -1547,7 +1557,10 @@
 	else
 		hdr->version = 2;
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err;
+
 	adapter->stats_cmd_sent = true;
 
 err:
@@ -1583,7 +1596,10 @@
 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
 	req->cmd_params.params.reset_stats = 0;
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err;
+
 	adapter->stats_cmd_sent = true;
 
 err:
@@ -1687,8 +1703,7 @@
 			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
 			       sizeof(*req), wrb, NULL);
 
-	be_mcc_notify(adapter);
-
+	status = be_mcc_notify(adapter);
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -1860,7 +1875,7 @@
 				cpu_to_le32(set_eqd[i].delay_multiplier);
 	}
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -1953,7 +1968,7 @@
 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
 	}
 
-	status = be_mcc_notify_wait(adapter);
+	status = be_mcc_notify(adapter);
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -2320,7 +2335,10 @@
 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
 				sizeof(struct lancer_cmd_req_write_object)));
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err_unlock;
+
 	spin_unlock_bh(&adapter->mcc_lock);
 
 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2509,10 @@
 	req->params.op_code = cpu_to_le32(flash_opcode);
 	req->params.data_buf_size = cpu_to_le32(buf_size);
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err_unlock;
+
 	spin_unlock_bh(&adapter->mcc_lock);
 
 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2606,7 @@
 	wrb = wrb_from_mccq(adapter);
 	if (!wrb) {
 		status = -EBUSY;
-		goto err;
+		goto err_unlock;
 	}
 
 	req = embedded_payload(wrb);
@@ -2599,8 +2620,19 @@
 	req->loopback_type = loopback_type;
 	req->loopback_state = enable;
 
-	status = be_mcc_notify_wait(adapter);
-err:
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err_unlock;
+
+	spin_unlock_bh(&adapter->mcc_lock);
+
+	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+					 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+		status = -ETIMEDOUT;
+
+	return status;
+
+err_unlock:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -2636,7 +2668,9 @@
 	req->num_pkts = cpu_to_le32(num_pkts);
 	req->loopback_type = cpu_to_le32(loopback_type);
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err;
 
 	spin_unlock_bh(&adapter->mcc_lock);
 
@@ -2818,10 +2852,11 @@
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_cntl_attribs *req;
 	struct be_cmd_resp_cntl_attribs *resp;
-	int status;
+	int status, i;
 	int payload_len = max(sizeof(*req), sizeof(*resp));
 	struct mgmt_controller_attrib *attribs;
 	struct be_dma_mem attribs_cmd;
+	u32 *serial_num;
 
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
 		return -1;
@@ -2852,6 +2887,10 @@
 	if (!status) {
 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
+		serial_num = attribs->hba_attribs.controller_serial_number;
+		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+				(BIT_MASK(16) - 1);
 	}
 
 err:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 00e3a6b..7d178bd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1500,6 +1500,8 @@
 #define BE_PME_D3COLD_CAP		0x80
 
 /********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT		12000
+
 struct be_cmd_req_loopback_test {
 	struct be_cmd_req_hdr hdr;
 	u32 loopback_type;
@@ -1640,10 +1642,12 @@
 struct mgmt_hba_attribs {
 	u32 rsvd0[24];
 	u8 controller_model_number[32];
-	u32 rsvd1[79];
-	u8 rsvd2[3];
+	u32 rsvd1[16];
+	u32 controller_serial_number[8];
+	u32 rsvd2[55];
+	u8 rsvd3[3];
 	u8 phy_port;
-	u32 rsvd3[13];
+	u32 rsvd4[13];
 } __packed;
 
 struct mgmt_controller_attrib {
@@ -1763,6 +1767,7 @@
 /*********************** HSW Config ***********************/
 #define PORT_FWD_TYPE_VEPA		0x3
 #define PORT_FWD_TYPE_VEB		0x2
+#define PORT_FWD_TYPE_PASSTHRU		0x1
 
 #define ENABLE_MAC_SPOOFCHK		0x2
 #define DISABLE_MAC_SPOOFCHK		0x3
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b2476db..2c9ed17 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -138,6 +138,7 @@
 static const struct be_ethtool_stat et_rx_stats[] = {
 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+	{DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
 	{DRVSTAT_RX_INFO(rx_compl)},
 	{DRVSTAT_RX_INFO(rx_compl_err)},
 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@
 	{DRVSTAT_TX_INFO(tx_internal_parity_err)},
 	{DRVSTAT_TX_INFO(tx_bytes)},
 	{DRVSTAT_TX_INFO(tx_pkts)},
+	{DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
 	/* Number of skbs queued for trasmission by the driver */
 	{DRVSTAT_TX_INFO(tx_reqs)},
 	/* Number of times the TX queue was stopped due to lack
@@ -847,10 +849,21 @@
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
 			    u64 *status)
 {
-	be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+	int ret;
+
+	ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+				  loopback_type, 1);
+	if (ret)
+		return ret;
+
 	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
 				       loopback_type, 1500, 2, 0xabc);
-	be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+	ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+				  BE_NO_LOOPBACK, 1);
+	if (ret)
+		return ret;
+
 	return *status;
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6ca693b..12687bf 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -681,11 +681,14 @@
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
 	struct be_tx_stats *stats = tx_stats(txo);
+	u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
 
 	u64_stats_update_begin(&stats->sync);
 	stats->tx_reqs++;
 	stats->tx_bytes += skb->len;
-	stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+	stats->tx_pkts += tx_pkts;
+	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+		stats->tx_vxlan_offload_pkts += tx_pkts;
 	u64_stats_update_end(&stats->sync);
 }
 
@@ -1258,7 +1261,7 @@
 	if (is_udp_pkt((*skb))) {
 		struct udphdr *udp = udp_hdr((*skb));
 
-		switch (udp->dest) {
+		switch (ntohs(udp->dest)) {
 		case DHCP_CLIENT_PORT:
 			os2bmc = is_dhcp_client_filt_enabled(adapter);
 			goto done;
@@ -1961,6 +1964,8 @@
 	stats->rx_compl++;
 	stats->rx_bytes += rxcp->pkt_size;
 	stats->rx_pkts++;
+	if (rxcp->tunneled)
+		stats->rx_vxlan_offload_pkts++;
 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
 		stats->rx_mcast_pkts++;
 	if (rxcp->err)
@@ -3610,15 +3615,15 @@
 
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
 {
+	struct device *dev = &adapter->pdev->dev;
 	struct be_dma_mem cmd;
-	int status = 0;
 	u8 mac[ETH_ALEN];
+	int status;
 
 	eth_zero_addr(mac);
 
 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
-				     GFP_KERNEL);
+	cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
 	if (!cmd.va)
 		return -ENOMEM;
 
@@ -3627,24 +3632,18 @@
 						PCICFG_PM_CONTROL_OFFSET,
 						PCICFG_PM_CONTROL_MASK);
 		if (status) {
-			dev_err(&adapter->pdev->dev,
-				"Could not enable Wake-on-lan\n");
-			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
-					  cmd.dma);
-			return status;
+			dev_err(dev, "Could not enable Wake-on-lan\n");
+			goto err;
 		}
-		status = be_cmd_enable_magic_wol(adapter,
-						 adapter->netdev->dev_addr,
-						 &cmd);
-		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
-		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
 	} else {
-		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
-		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
-		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+		ether_addr_copy(mac, adapter->netdev->dev_addr);
 	}
 
-	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+	status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+	pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+	pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+	dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
 	return status;
 }
 
@@ -4977,7 +4976,7 @@
 {
 	if (!fhdr) {
 		dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
-		return -1;
+		return false;
 	}
 
 	/* First letter of the build version is used to identify
@@ -5132,9 +5131,6 @@
 	int status = 0;
 	u8 hsw_mode;
 
-	if (!sriov_enabled(adapter))
-		return 0;
-
 	/* BE and Lancer chips support VEB mode only */
 	if (BEx_chip(adapter) || lancer_chip(adapter)) {
 		hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5144,6 +5140,9 @@
 					       NULL);
 		if (status)
 			return 0;
+
+		if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+			return 0;
 	}
 
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5278,6 +5277,27 @@
 }
 #endif
 
+static int be_get_phys_port_id(struct net_device *dev,
+			       struct netdev_phys_item_id *ppid)
+{
+	int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+	struct be_adapter *adapter = netdev_priv(dev);
+	u8 *id;
+
+	if (MAX_PHYS_ITEM_ID_LEN < id_len)
+		return -ENOSPC;
+
+	ppid->id[0] = adapter->hba_port_num + 1;
+	id = &ppid->id[1];
+	for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+	     i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+		memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+	ppid->id_len = id_len;
+
+	return 0;
+}
+
 static const struct net_device_ops be_netdev_ops = {
 	.ndo_open		= be_open,
 	.ndo_stop		= be_close,
@@ -5308,6 +5328,7 @@
 	.ndo_del_vxlan_port	= be_del_vxlan_port,
 	.ndo_features_check	= be_features_check,
 #endif
+	.ndo_get_phys_port_id   = be_get_phys_port_id,
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -5866,7 +5887,6 @@
 	if (status)
 		return status;
 
-	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
 	status = be_resume(adapter);
@@ -5946,7 +5966,6 @@
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	pci_set_master(pdev);
-	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
 	/* Check if card is ok and fw is ready */
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 24a85b2..63c2bcf 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -150,6 +150,9 @@
 	if (!priv->tx_packet_sent || tx_ctrl.ct)
 		return;
 
+	/* Ack Tx ctrl register */
+	nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
 	/* Check Tx transmit error */
 	if (unlikely(tx_ctrl.et)) {
 		ndev->stats.tx_errors++;
@@ -158,11 +161,7 @@
 		ndev->stats.tx_bytes += tx_ctrl.nt;
 	}
 
-	if (priv->tx_skb) {
-		dev_kfree_skb(priv->tx_skb);
-		priv->tx_skb = NULL;
-	}
-
+	dev_kfree_skb(priv->tx_skb);
 	priv->tx_packet_sent = false;
 
 	if (netif_queue_stopped(ndev))
@@ -180,15 +179,16 @@
 {
 	struct net_device *ndev = napi->dev;
 	struct nps_enet_priv *priv = netdev_priv(ndev);
-	struct nps_enet_buf_int_enable buf_int_enable;
 	u32 work_done;
 
-	buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
-	buf_int_enable.tx_done = NPS_ENET_ENABLE;
 	nps_enet_tx_handler(ndev);
 	work_done = nps_enet_rx_handler(ndev);
 	if (work_done < budget) {
+		struct nps_enet_buf_int_enable buf_int_enable;
+
 		napi_complete(napi);
+		buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+		buf_int_enable.tx_done = NPS_ENET_ENABLE;
 		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
 				 buf_int_enable.value);
 	}
@@ -211,12 +211,13 @@
 {
 	struct net_device *ndev = dev_instance;
 	struct nps_enet_priv *priv = netdev_priv(ndev);
-	struct nps_enet_buf_int_cause buf_int_cause;
+	struct nps_enet_rx_ctl rx_ctrl;
+	struct nps_enet_tx_ctl tx_ctrl;
 
-	buf_int_cause.value =
-			nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+	rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+	tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
 
-	if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+	if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
 		if (likely(napi_schedule_prep(&priv->napi))) {
 			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 			__napi_schedule(&priv->napi);
@@ -307,11 +308,8 @@
 
 	/* Discard Packets bigger than max frame length */
 	max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
-	if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+	if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
 		ge_mac_cfg_3->max_len = max_frame_length;
-		nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
-				 ge_mac_cfg_3->value);
-	}
 
 	/* Enable interrupts */
 	buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
@@ -339,11 +337,14 @@
 	ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
 	ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
 	ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+	ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
 
 	/* Enable Rx and Tx */
 	ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
 	ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
 
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+			 ge_mac_cfg_3->value);
 	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
 			 ge_mac_cfg_0.value);
 }
@@ -527,10 +528,10 @@
 	/* This driver handles one frame at a time  */
 	netif_stop_queue(ndev);
 
-	nps_enet_send_frame(ndev, skb);
-
 	priv->tx_skb = skb;
 
+	nps_enet_send_frame(ndev, skb);
+
 	return NETDEV_TX_OK;
 }
 
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index fc45c9d..6703674 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -36,7 +36,6 @@
 #define NPS_ENET_REG_RX_CTL		0x810
 #define NPS_ENET_REG_RX_BUF		0x818
 #define NPS_ENET_REG_BUF_INT_ENABLE	0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE	0x8C4
 #define NPS_ENET_REG_GE_MAC_CFG_0	0x1000
 #define NPS_ENET_REG_GE_MAC_CFG_1	0x1004
 #define NPS_ENET_REG_GE_MAC_CFG_2	0x1008
@@ -108,25 +107,6 @@
 	};
 };
 
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
-	union {
-		/* tx_done: Interrupt in the case when current frame was
-		 *          read from TX buffer.
-		 * rx_rdy:  Interrupt in the case when new frame is ready
-		 *          in RX buffer.
-		 */
-		struct {
-			u32
-			__reserved:30,
-			tx_done:1,
-			rx_rdy:1;
-		};
-
-		u32 value;
-	};
-};
-
 /* Gbps Eth MAC Configuration 0 register */
 struct nps_enet_ge_mac_cfg_0 {
 	union {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b349e6f..91925e3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -364,7 +364,7 @@
 	return 0;
 }
 
-static int
+static struct bufdesc *
 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
 			     struct sk_buff *skb,
 			     struct net_device *ndev)
@@ -439,10 +439,7 @@
 		bdp->cbd_sc = status;
 	}
 
-	txq->cur_tx = bdp;
-
-	return 0;
-
+	return bdp;
 dma_mapping_error:
 	bdp = txq->cur_tx;
 	for (i = 0; i < frag; i++) {
@@ -450,7 +447,7 @@
 		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
 				bdp->cbd_datlen, DMA_TO_DEVICE);
 	}
-	return NETDEV_TX_OK;
+	return ERR_PTR(-ENOMEM);
 }
 
 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@
 	unsigned int estatus = 0;
 	unsigned int index;
 	int entries_free;
-	int ret;
 
 	entries_free = fec_enet_get_free_txdesc_num(fep, txq);
 	if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@
 
 	/* Fill in a Tx ring entry */
 	bdp = txq->cur_tx;
+	last_bdp = bdp;
 	status = bdp->cbd_sc;
 	status &= ~BD_ENET_TX_STATS;
 
@@ -513,9 +510,9 @@
 	}
 
 	if (nr_frags) {
-		ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
-		if (ret)
-			return ret;
+		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+		if (IS_ERR(last_bdp))
+			return NETDEV_TX_OK;
 	} else {
 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
 		if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@
 		ebdp->cbd_esc = estatus;
 	}
 
-	last_bdp = txq->cur_tx;
 	index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
 	/* Save skb pointer */
 	txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@
 
 	skb_tx_timestamp(skb);
 
+	/* Make sure the update to bdp and tx_skbuff are performed before
+	 * cur_tx.
+	 */
+	wmb();
 	txq->cur_tx = bdp;
 
 	/* Trigger transmission start */
@@ -1218,10 +1218,11 @@
 	/* get next bdp of dirty_tx */
 	bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
 
-	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-
-		/* current queue is empty */
-		if (bdp == txq->cur_tx)
+	while (bdp != READ_ONCE(txq->cur_tx)) {
+		/* Order the load of cur_tx and cbd_sc */
+		rmb();
+		status = READ_ONCE(bdp->cbd_sc);
+		if (status & BD_ENET_TX_READY)
 			break;
 
 		index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@
 		/* Free the sk buffer associated with this last transmit */
 		dev_kfree_skb_any(skb);
 
+		/* Make sure the update to bdp and tx_skbuff are performed
+		 * before dirty_tx
+		 */
+		wmb();
 		txq->dirty_tx = bdp;
 
 		/* Update pointer to next buffer descriptor to be transmitted */
@@ -1402,6 +1407,7 @@
 		if ((status & BD_ENET_RX_LAST) == 0)
 			netdev_err(ndev, "rcv is not +last\n");
 
+		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
 
 		/* Check for errors. */
 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1774,7 +1780,7 @@
 	int ret = 0;
 
 	ret = pm_runtime_get_sync(dev);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	fep->mii_timeout = 0;
@@ -1813,7 +1819,7 @@
 	int ret = 0;
 
 	ret = pm_runtime_get_sync(dev);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	fep->mii_timeout = 0;
@@ -2865,7 +2871,7 @@
 	int ret;
 
 	ret = pm_runtime_get_sync(&fep->pdev->dev);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	pinctrl_pm_select_default_state(&fep->pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f457a23..1543cf0 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -506,12 +506,6 @@
 		break;
 
 	default:
-		/*
-		 * register RXMTRL must be set in order to do V1 packets,
-		 * therefore it is not possible to time stamp both V1 Sync and
-		 * Delay_Req messages and hardware does not support
-		 * timestamping all packets => return error
-		 */
 		fep->hwts_rx_en = 1;
 		config.rx_filter = HWTSTAMP_FILTER_ALL;
 		break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 10b3bbbb..4b69d061 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -109,15 +109,15 @@
 
 #define TX_TIMEOUT      (1*HZ)
 
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
 
 static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
-				    dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+				int alloc_cnt);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@
 #endif
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			       int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
 static void gfar_halt_nodisable(struct gfar_private *priv);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@
 	bdp->lstatus = cpu_to_be32(lstatus);
 }
 
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
 {
 	struct gfar_private *priv = netdev_priv(ndev);
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	struct gfar_priv_tx_q *tx_queue = NULL;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	struct txbd8 *txbdp;
-	struct rxbd8 *rxbdp;
 	u32 __iomem *rfbptr;
 	int i, j;
-	dma_addr_t bufaddr;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
 		tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@
 	rfbptr = &regs->rfbptr0;
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		rx_queue->cur_rx = rx_queue->rx_bd_base;
-		rx_queue->skb_currx = 0;
-		rxbdp = rx_queue->rx_bd_base;
 
-		for (j = 0; j < rx_queue->rx_ring_size; j++) {
-			struct sk_buff *skb = rx_queue->rx_skbuff[j];
+		rx_queue->next_to_clean = 0;
+		rx_queue->next_to_use = 0;
+		rx_queue->next_to_alloc = 0;
 
-			if (skb) {
-				bufaddr = be32_to_cpu(rxbdp->bufPtr);
-			} else {
-				skb = gfar_new_skb(ndev, &bufaddr);
-				if (!skb) {
-					netdev_err(ndev, "Can't allocate RX buffers\n");
-					return -ENOMEM;
-				}
-				rx_queue->rx_skbuff[j] = skb;
-			}
-
-			gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
-			rxbdp++;
-		}
+		/* make sure next_to_clean != next_to_use after this
+		 * by leaving at least 1 unused descriptor
+		 */
+		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
 
 		rx_queue->rfbptr = rfbptr;
 		rfbptr += 2;
 	}
-
-	return 0;
 }
 
 static int gfar_alloc_skb_resources(struct net_device *ndev)
 {
 	void *vaddr;
 	dma_addr_t addr;
-	int i, j, k;
+	int i, j;
 	struct gfar_private *priv = netdev_priv(ndev);
 	struct device *dev = priv->dev;
 	struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@
 		rx_queue = priv->rx_queue[i];
 		rx_queue->rx_bd_base = vaddr;
 		rx_queue->rx_bd_dma_base = addr;
-		rx_queue->dev = ndev;
+		rx_queue->ndev = ndev;
+		rx_queue->dev = dev;
 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 	}
@@ -294,25 +278,20 @@
 		if (!tx_queue->tx_skbuff)
 			goto cleanup;
 
-		for (k = 0; k < tx_queue->tx_ring_size; k++)
-			tx_queue->tx_skbuff[k] = NULL;
+		for (j = 0; j < tx_queue->tx_ring_size; j++)
+			tx_queue->tx_skbuff[j] = NULL;
 	}
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		rx_queue->rx_skbuff =
-			kmalloc_array(rx_queue->rx_ring_size,
-				      sizeof(*rx_queue->rx_skbuff),
-				      GFP_KERNEL);
-		if (!rx_queue->rx_skbuff)
+		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+					    sizeof(*rx_queue->rx_buff),
+					    GFP_KERNEL);
+		if (!rx_queue->rx_buff)
 			goto cleanup;
-
-		for (j = 0; j < rx_queue->rx_ring_size; j++)
-			rx_queue->rx_skbuff[j] = NULL;
 	}
 
-	if (gfar_init_bds(ndev))
-		goto cleanup;
+	gfar_init_bds(ndev);
 
 	return 0;
 
@@ -354,10 +333,8 @@
 	}
 }
 
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
 {
-	int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
 	/* set this when rx hw offload (TOE) functions are being used */
 	priv->uses_rxfcb = 0;
 
@@ -366,16 +343,6 @@
 
 	if (priv->hwts_rx_en)
 		priv->uses_rxfcb = 1;
-
-	if (priv->uses_rxfcb)
-		frame_size += GMAC_FCB_LEN;
-
-	frame_size += priv->padding;
-
-	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-		     INCREMENTAL_BUFFER_SIZE;
-
-	priv->rx_buffer_size = frame_size;
 }
 
 static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -593,9 +560,8 @@
 		if (!priv->rx_queue[i])
 			return -ENOMEM;
 
-		priv->rx_queue[i]->rx_skbuff = NULL;
 		priv->rx_queue[i]->qindex = i;
-		priv->rx_queue[i]->dev = priv->ndev;
+		priv->rx_queue[i]->ndev = priv->ndev;
 	}
 	return 0;
 }
@@ -1187,12 +1153,11 @@
 
 	udelay(3);
 
-	/* Compute rx_buff_size based on config flags */
-	gfar_rx_buff_size_config(priv);
+	gfar_rx_offload_en(priv);
 
 	/* Initialize the max receive frame/buffer lengths */
-	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
-	gfar_write(&regs->mrblr, priv->rx_buffer_size);
+	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
 
 	/* Initialize the Minimum Frame Length Register */
 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1200,12 +1165,11 @@
 	/* Initialize MACCFG2. */
 	tempval = MACCFG2_INIT_SETTINGS;
 
-	/* If the mtu is larger than the max size for standard
-	 * ethernet frames (ie, a jumbo frame), then set maccfg2
-	 * to allow huge frames, and to check the length
+	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
+	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
 	 */
-	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-	    gfar_has_errata(priv, GFAR_ERRATA_74))
+	if (gfar_has_errata(priv, GFAR_ERRATA_74))
 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
 
 	gfar_write(&regs->maccfg2, tempval);
@@ -1415,8 +1379,6 @@
 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
 		dev->needed_headroom = GMAC_FCB_LEN;
 
-	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
 	/* Initializing some of the rx/tx queue level parameters */
 	for (i = 0; i < priv->num_tx_queues; i++) {
 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1599,10 +1561,7 @@
 		return 0;
 	}
 
-	if (gfar_init_bds(ndev)) {
-		free_skb_resources(priv);
-		return -ENOMEM;
-	}
+	gfar_init_bds(ndev);
 
 	gfar_mac_reset(priv);
 
@@ -1893,26 +1852,32 @@
 
 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 {
-	struct rxbd8 *rxbdp;
-	struct gfar_private *priv = netdev_priv(rx_queue->dev);
 	int i;
 
-	rxbdp = rx_queue->rx_bd_base;
+	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+	if (rx_queue->skb)
+		dev_kfree_skb(rx_queue->skb);
 
 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
-		if (rx_queue->rx_skbuff[i]) {
-			dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
-					 priv->rx_buffer_size,
-					 DMA_FROM_DEVICE);
-			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
-			rx_queue->rx_skbuff[i] = NULL;
-		}
+		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
 		rxbdp->lstatus = 0;
 		rxbdp->bufPtr = 0;
 		rxbdp++;
+
+		if (!rxb->page)
+			continue;
+
+		dma_unmap_single(rx_queue->dev, rxb->dma,
+				 PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_page(rxb->page);
+
+		rxb->page = NULL;
 	}
-	kfree(rx_queue->rx_skbuff);
-	rx_queue->rx_skbuff = NULL;
+
+	kfree(rx_queue->rx_buff);
+	rx_queue->rx_buff = NULL;
 }
 
 /* If there are any tx skbs or rx skbs still around, free them.
@@ -1937,7 +1902,7 @@
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		if (rx_queue->rx_skbuff)
+		if (rx_queue->rx_buff)
 			free_skb_rx_queue(rx_queue);
 	}
 
@@ -2500,7 +2465,7 @@
 	struct gfar_private *priv = netdev_priv(dev);
 	int frame_size = new_mtu + ETH_HLEN;
 
-	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+	if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
@@ -2554,15 +2519,6 @@
 	schedule_work(&priv->reset_task);
 }
 
-static void gfar_align_skb(struct sk_buff *skb)
-{
-	/* We need the data buffer to be aligned properly.  We will reserve
-	 * as many bytes as needed to align the data properly
-	 */
-	skb_reserve(skb, RXBUF_ALIGNMENT -
-		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
 /* Interrupt Handler for Transmit complete */
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
@@ -2620,7 +2576,8 @@
 
 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
 			struct skb_shared_hwtstamps shhwtstamps;
-			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+					  ~0x7UL);
 
 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2669,49 +2626,85 @@
 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
 }
 
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
 {
-	struct gfar_private *priv = netdev_priv(dev);
-	struct sk_buff *skb;
-
-	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
-	if (!skb)
-		return NULL;
-
-	gfar_align_skb(skb);
-
-	return skb;
-}
-
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct sk_buff *skb;
+	struct page *page;
 	dma_addr_t addr;
 
-	skb = gfar_alloc_skb(dev);
-	if (!skb)
-		return NULL;
+	page = dev_alloc_page();
+	if (unlikely(!page))
+		return false;
 
-	addr = dma_map_single(priv->dev, skb->data,
-			      priv->rx_buffer_size, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(priv->dev, addr))) {
-		dev_kfree_skb_any(skb);
-		return NULL;
+	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+		__free_page(page);
+
+		return false;
 	}
 
-	*bufaddr = addr;
-	return skb;
+	rxb->dma = addr;
+	rxb->page = page;
+	rxb->page_offset = 0;
+
+	return true;
 }
 
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
 {
-	struct gfar_private *priv = netdev_priv(dev);
-	struct net_device_stats *stats = &dev->stats;
+	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+	struct gfar_extra_stats *estats = &priv->extra_stats;
+
+	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+	atomic64_inc(&estats->rx_alloc_err);
+}
+
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+				int alloc_cnt)
+{
+	struct rxbd8 *bdp;
+	struct gfar_rx_buff *rxb;
+	int i;
+
+	i = rx_queue->next_to_use;
+	bdp = &rx_queue->rx_bd_base[i];
+	rxb = &rx_queue->rx_buff[i];
+
+	while (alloc_cnt--) {
+		/* try reuse page */
+		if (unlikely(!rxb->page)) {
+			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+				gfar_rx_alloc_err(rx_queue);
+				break;
+			}
+		}
+
+		/* Setup the new RxBD */
+		gfar_init_rxbdp(rx_queue, bdp,
+				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+		/* Update to the next pointer */
+		bdp++;
+		rxb++;
+
+		if (unlikely(++i == rx_queue->rx_ring_size)) {
+			i = 0;
+			bdp = rx_queue->rx_bd_base;
+			rxb = rx_queue->rx_buff;
+		}
+	}
+
+	rx_queue->next_to_use = i;
+	rx_queue->next_to_alloc = i;
+}
+
+static void count_errors(u32 lstatus, struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
 	struct gfar_extra_stats *estats = &priv->extra_stats;
 
 	/* If the packet was truncated, none of the other errors matter */
-	if (status & RXBD_TRUNCATED) {
+	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
 		stats->rx_length_errors++;
 
 		atomic64_inc(&estats->rx_trunc);
@@ -2719,25 +2712,25 @@
 		return;
 	}
 	/* Count the errors, if there were any */
-	if (status & (RXBD_LARGE | RXBD_SHORT)) {
+	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
 		stats->rx_length_errors++;
 
-		if (status & RXBD_LARGE)
+		if (lstatus & BD_LFLAG(RXBD_LARGE))
 			atomic64_inc(&estats->rx_large);
 		else
 			atomic64_inc(&estats->rx_short);
 	}
-	if (status & RXBD_NONOCTET) {
+	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
 		stats->rx_frame_errors++;
 		atomic64_inc(&estats->rx_nonoctet);
 	}
-	if (status & RXBD_CRCERR) {
+	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
 		atomic64_inc(&estats->rx_crcerr);
 		stats->rx_crc_errors++;
 	}
-	if (status & RXBD_OVERRUN) {
+	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
 		atomic64_inc(&estats->rx_overrun);
-		stats->rx_crc_errors++;
+		stats->rx_over_errors++;
 	}
 }
 
@@ -2788,6 +2781,93 @@
 	return IRQ_HANDLED;
 }
 
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+			     struct sk_buff *skb, bool first)
+{
+	unsigned int size = lstatus & BD_LENGTH_MASK;
+	struct page *page = rxb->page;
+
+	/* Remove the FCS from the packet length */
+	if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+		size -= ETH_FCS_LEN;
+
+	if (likely(first))
+		skb_put(skb, size);
+	else
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+				rxb->page_offset + RXBUF_ALIGNMENT,
+				size, GFAR_RXB_TRUESIZE);
+
+	/* try reuse page */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* change offset to the other half */
+	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+	atomic_inc(&page->_count);
+
+	return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+			       struct gfar_rx_buff *old_rxb)
+{
+	struct gfar_rx_buff *new_rxb;
+	u16 nta = rxq->next_to_alloc;
+
+	new_rxb = &rxq->rx_buff[nta];
+
+	/* find next buf that can reuse a page */
+	nta++;
+	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+	/* copy page reference */
+	*new_rxb = *old_rxb;
+
+	/* sync for use by the device */
+	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+					 old_rxb->page_offset,
+					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+					    u32 lstatus, struct sk_buff *skb)
+{
+	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+	struct page *page = rxb->page;
+	bool first = false;
+
+	if (likely(!skb)) {
+		void *buff_addr = page_address(page) + rxb->page_offset;
+
+		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+		if (unlikely(!skb)) {
+			gfar_rx_alloc_err(rx_queue);
+			return NULL;
+		}
+		skb_reserve(skb, RXBUF_ALIGNMENT);
+		first = true;
+	}
+
+	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+		/* reuse the free half of the page */
+		gfar_reuse_rx_page(rx_queue, rxb);
+	} else {
+		/* page cannot be reused, unmap it */
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+	}
+
+	/* clear rxb content */
+	rxb->page = NULL;
+
+	return skb;
+}
+
 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 {
 	/* If valid headers were found, and valid sums
@@ -2802,10 +2882,9 @@
 }
 
 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			       int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
 {
-	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_private *priv = netdev_priv(ndev);
 	struct rxfcb *fcb = NULL;
 
 	/* fcb is at the beginning if exists */
@@ -2814,10 +2893,8 @@
 	/* Remove the FCB from the skb
 	 * Remove the padded bytes, if there are any
 	 */
-	if (amount_pull) {
-		skb_record_rx_queue(skb, fcb->rq);
-		skb_pull(skb, amount_pull);
-	}
+	if (priv->uses_rxfcb)
+		skb_pull(skb, GMAC_FCB_LEN);
 
 	/* Get receive timestamp from the skb */
 	if (priv->hwts_rx_en) {
@@ -2831,24 +2908,20 @@
 	if (priv->padding)
 		skb_pull(skb, priv->padding);
 
-	if (dev->features & NETIF_F_RXCSUM)
+	if (ndev->features & NETIF_F_RXCSUM)
 		gfar_rx_checksum(skb, fcb);
 
 	/* Tell the skb what kind of packet this is */
-	skb->protocol = eth_type_trans(skb, dev);
+	skb->protocol = eth_type_trans(skb, ndev);
 
 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
 	 * Even if vlan rx accel is disabled, on some chips
 	 * RXFCB_VLN is pseudo randomly set.
 	 */
-	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 				       be16_to_cpu(fcb->vlctl));
-
-	/* Send the packet up the stack */
-	napi_gro_receive(napi, skb);
-
 }
 
 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2857,91 +2930,89 @@
  */
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
-	struct net_device *dev = rx_queue->dev;
-	struct rxbd8 *bdp, *base;
-	struct sk_buff *skb;
-	int pkt_len;
-	int amount_pull;
-	int howmany = 0;
-	struct gfar_private *priv = netdev_priv(dev);
+	struct net_device *ndev = rx_queue->ndev;
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct rxbd8 *bdp;
+	int i, howmany = 0;
+	struct sk_buff *skb = rx_queue->skb;
+	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+	unsigned int total_bytes = 0, total_pkts = 0;
 
 	/* Get the first full descriptor */
-	bdp = rx_queue->cur_rx;
-	base = rx_queue->rx_bd_base;
+	i = rx_queue->next_to_clean;
 
-	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+	while (rx_work_limit--) {
+		u32 lstatus;
 
-	while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
-		struct sk_buff *newskb;
-		dma_addr_t bufaddr;
-
-		rmb();
-
-		/* Add another skb for the future */
-		newskb = gfar_new_skb(dev, &bufaddr);
-
-		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
-
-		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
-				 priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-		if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
-			     be16_to_cpu(bdp->length) > priv->rx_buffer_size))
-			bdp->status = cpu_to_be16(RXBD_LARGE);
-
-		/* We drop the frame if we failed to allocate a new buffer */
-		if (unlikely(!newskb ||
-			     !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
-			     be16_to_cpu(bdp->status) & RXBD_ERR)) {
-			count_errors(be16_to_cpu(bdp->status), dev);
-
-			if (unlikely(!newskb)) {
-				newskb = skb;
-				bufaddr = be32_to_cpu(bdp->bufPtr);
-			} else if (skb)
-				dev_kfree_skb(skb);
-		} else {
-			/* Increment the number of packets */
-			rx_queue->stats.rx_packets++;
-			howmany++;
-
-			if (likely(skb)) {
-				pkt_len = be16_to_cpu(bdp->length) -
-					  ETH_FCS_LEN;
-				/* Remove the FCS from the packet length */
-				skb_put(skb, pkt_len);
-				rx_queue->stats.rx_bytes += pkt_len;
-				skb_record_rx_queue(skb, rx_queue->qindex);
-				gfar_process_frame(dev, skb, amount_pull,
-						   &rx_queue->grp->napi_rx);
-
-			} else {
-				netif_warn(priv, rx_err, dev, "Missing skb!\n");
-				rx_queue->stats.rx_dropped++;
-				atomic64_inc(&priv->extra_stats.rx_skbmissing);
-			}
-
+		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+			cleaned_cnt = 0;
 		}
 
-		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+		bdp = &rx_queue->rx_bd_base[i];
+		lstatus = be32_to_cpu(bdp->lstatus);
+		if (lstatus & BD_LFLAG(RXBD_EMPTY))
+			break;
 
-		/* Setup the new bdp */
-		gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+		/* order rx buffer descriptor reads */
+		rmb();
 
-		/* Update Last Free RxBD pointer for LFC */
-		if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
-			gfar_write(rx_queue->rfbptr, (u32)bdp);
+		/* fetch next to clean buffer from the ring */
+		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+		if (unlikely(!skb))
+			break;
 
-		/* Update to the next pointer */
-		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+		cleaned_cnt++;
+		howmany++;
 
-		/* update to point at the next skb */
-		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
-				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+		if (unlikely(++i == rx_queue->rx_ring_size))
+			i = 0;
+
+		rx_queue->next_to_clean = i;
+
+		/* fetch next buffer if not the last in frame */
+		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+			continue;
+
+		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+			count_errors(lstatus, ndev);
+
+			/* discard faulty buffer */
+			dev_kfree_skb(skb);
+			skb = NULL;
+			rx_queue->stats.rx_dropped++;
+			continue;
+		}
+
+		/* Increment the number of packets */
+		total_pkts++;
+		total_bytes += skb->len;
+
+		skb_record_rx_queue(skb, rx_queue->qindex);
+
+		gfar_process_frame(ndev, skb);
+
+		/* Send the packet up the stack */
+		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
+
+		skb = NULL;
 	}
 
-	/* Update the current rxbd pointer to be the next one */
-	rx_queue->cur_rx = bdp;
+	/* Store incomplete frames for completion */
+	rx_queue->skb = skb;
+
+	rx_queue->stats.rx_packets += total_pkts;
+	rx_queue->stats.rx_bytes += total_bytes;
+
+	if (cleaned_cnt)
+		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+	/* Update Last Free RxBD pointer for LFC */
+	if (unlikely(priv->tx_actual_en)) {
+		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+		gfar_write(rx_queue->rfbptr, bdp_dma);
+	}
 
 	return howmany;
 }
@@ -3459,7 +3530,6 @@
 	struct phy_device *phydev = priv->phydev;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	int i;
-	struct rxbd8 *bdp;
 
 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
 		return;
@@ -3516,15 +3586,11 @@
 		/* Turn last free buffer recording on */
 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
 			for (i = 0; i < priv->num_rx_queues; i++) {
-				rx_queue = priv->rx_queue[i];
-				bdp = rx_queue->cur_rx;
-				/* skip to previous bd */
-				bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
-					      rx_queue->rx_bd_base,
-					      rx_queue->rx_ring_size);
+				u32 bdp_dma;
 
-				if (rx_queue->rfbptr)
-					gfar_write(rx_queue->rfbptr, (u32)bdp);
+				rx_queue = priv->rx_queue[i];
+				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+				gfar_write(rx_queue->rfbptr, bdp_dma);
 			}
 
 			priv->tx_actual_en = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5545e41..8c19948 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -71,11 +71,6 @@
 /* Number of bytes to align the rx bufs to */
 #define RXBUF_ALIGNMENT 64
 
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers.  ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
 #define PHY_INIT_TIMEOUT 100000
 
 #define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@
 #define DEFAULT_TX_RING_SIZE	256
 #define DEFAULT_RX_RING_SIZE	256
 
+#define GFAR_RX_BUFF_ALLOC	16
+
 #define GFAR_RX_MAX_RING_SIZE   256
 #define GFAR_TX_MAX_RING_SIZE   256
 
@@ -103,11 +100,14 @@
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
 
-#define DEFAULT_RX_BUFFER_SIZE  1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+			  + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
 #define TX_RING_MOD_MASK(size) (size-1)
 #define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
 
 #define DEFAULT_FIFO_TX_THR 0x100
 #define DEFAULT_FIFO_TX_STARVE 0x40
@@ -640,6 +640,7 @@
 };
 
 struct gfar_extra_stats {
+	atomic64_t rx_alloc_err;
 	atomic64_t rx_large;
 	atomic64_t rx_short;
 	atomic64_t rx_nonoctet;
@@ -651,7 +652,6 @@
 	atomic64_t eberr;
 	atomic64_t tx_babt;
 	atomic64_t tx_underrun;
-	atomic64_t rx_skbmissing;
 	atomic64_t tx_timeout;
 };
 
@@ -1012,34 +1012,42 @@
 	unsigned long rx_dropped;
 };
 
+struct gfar_rx_buff {
+	dma_addr_t dma;
+	struct page *page;
+	unsigned int page_offset;
+};
+
 /**
  *	struct gfar_priv_rx_q - per rx queue structure
- *	@rx_skbuff: skb pointers
- *	@skb_currx: currently use skb pointer
+ *	@rx_buff: Array of buffer info metadata structs
  *	@rx_bd_base: First rx buffer descriptor
- *	@cur_rx: Next free rx ring entry
+ *	@next_to_use: index of the next buffer to be alloc'd
+ *	@next_to_clean: index of the next buffer to be cleaned
  *	@qindex: index of this queue
- *	@dev: back pointer to the dev structure
+ *	@ndev: back pointer to net_device
  *	@rx_ring_size: Rx ring size
  *	@rxcoalescing: enable/disable rx-coalescing
  *	@rxic: receive interrupt coalescing vlaue
  */
 
 struct gfar_priv_rx_q {
-	struct	sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
-	dma_addr_t rx_bd_dma_base;
+	struct	gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
 	struct	rxbd8 *rx_bd_base;
-	struct	rxbd8 *cur_rx;
-	struct	net_device *dev;
-	struct gfar_priv_grp *grp;
+	struct	net_device *ndev;
+	struct	device *dev;
+	u16 rx_ring_size;
+	u16 qindex;
+	struct	gfar_priv_grp *grp;
+	u16 next_to_clean;
+	u16 next_to_use;
+	u16 next_to_alloc;
+	struct	sk_buff *skb;
 	struct rx_q_stats stats;
-	u16	skb_currx;
-	u16	qindex;
-	unsigned int	rx_ring_size;
-	/* RX Coalescing values */
+	u32 __iomem *rfbptr;
 	unsigned char rxcoalescing;
 	unsigned long rxic;
-	u32 __iomem *rfbptr;
+	dma_addr_t rx_bd_dma_base;
 };
 
 enum gfar_irqinfo_id {
@@ -1109,7 +1117,6 @@
 	struct device *dev;
 	struct net_device *ndev;
 	enum gfar_errata errata;
-	unsigned int rx_buffer_size;
 
 	u16 uses_rxfcb;
 	u16 padding;
@@ -1292,6 +1299,28 @@
 	bdp->lstatus = cpu_to_be32(lstatus);
 }
 
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+	if (rxq->next_to_clean > rxq->next_to_use)
+		return rxq->next_to_clean - rxq->next_to_use - 1;
+
+	return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+	struct rxbd8 *bdp;
+	u32 bdp_dma;
+	int i;
+
+	i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+	bdp = &rxq->rx_bd_base[i];
+	bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+	bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+	return bdp_dma;
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 5b90fcf..6bdc891 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -61,6 +61,8 @@
 			  struct ethtool_drvinfo *drvinfo);
 
 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+	/* extra stats */
+	"rx-allocation-errors",
 	"rx-large-frame-errors",
 	"rx-short-frame-errors",
 	"rx-non-octet-errors",
@@ -72,8 +74,8 @@
 	"ethernet-bus-error",
 	"tx-babbling-errors",
 	"tx-underrun-errors",
-	"rx-skb-missing-errors",
 	"tx-timeout-errors",
+	/* rmon stats */
 	"tx-rx-64-frames",
 	"tx-rx-65-127-frames",
 	"tx-rx-128-255-frames",
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d49bee3..cc2d8b4 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -965,7 +965,6 @@
 	.remove	= hip04_remove,
 	.driver	= {
 		.name		= DRV_NAME,
-		.owner		= THIS_MODULE,
 		.of_match_table	= hip04_mac_match,
 	},
 };
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
index b3bac25d..fca0a5b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -174,7 +174,6 @@
 	.remove = hip04_mdio_remove,
 	.driver = {
 		.name = "hip04-mdio",
-		.owner = THIS_MODULE,
 		.of_match_table = hip04_mdio_match,
 	},
 };
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 29bbb62..7af870a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -79,6 +79,11 @@
 module_param(rx_flush, uint, 0644);
 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
 
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+	"Use old large send method on firmware that supports the new method");
+
 struct ibmveth_stat {
 	char name[ETH_GSTRING_LEN];
 	int offset;
@@ -101,7 +106,8 @@
 	{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
 	{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
 	{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
-	{ "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+	{ "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+	{ "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
 };
 
 /* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@
 	return rc1 ? rc1 : rc2;
 }
 
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+	struct ibmveth_adapter *adapter = netdev_priv(dev);
+	unsigned long set_attr, clr_attr, ret_attr;
+	long ret1, ret2;
+	int rc1 = 0, rc2 = 0;
+	int restart = 0;
+
+	if (netif_running(dev)) {
+		restart = 1;
+		adapter->pool_config = 1;
+		ibmveth_close(dev);
+		adapter->pool_config = 0;
+	}
+
+	set_attr = 0;
+	clr_attr = 0;
+
+	if (data)
+		set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+	else
+		clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+	ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+	if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+	    !old_large_send) {
+		ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+					  set_attr, &ret_attr);
+
+		if (ret2 != H_SUCCESS) {
+			netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+				   data, ret2);
+
+			h_illan_attributes(adapter->vdev->unit_address,
+					   set_attr, clr_attr, &ret_attr);
+
+			if (data == 1)
+				dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+			rc1 = -EIO;
+
+		} else {
+			adapter->fw_large_send_support = data;
+			adapter->large_send = data;
+		}
+	} else {
+		/* Older firmware version of large send offload does not
+		 * support tcp6/ipv6
+		 */
+		if (data == 1) {
+			dev->features &= ~NETIF_F_TSO6;
+			netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+		}
+		adapter->large_send = data;
+	}
+
+	if (restart)
+		rc2 = ibmveth_open(dev);
+
+	return rc1 ? rc1 : rc2;
+}
+
 static int ibmveth_set_features(struct net_device *dev,
 	netdev_features_t features)
 {
 	struct ibmveth_adapter *adapter = netdev_priv(dev);
 	int rx_csum = !!(features & NETIF_F_RXCSUM);
-	int rc;
-	netdev_features_t changed = features ^ dev->features;
+	int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+	int rc1 = 0, rc2 = 0;
 
-	if (features & NETIF_F_TSO & changed)
-		netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+	if (rx_csum != adapter->rx_csum) {
+		rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+		if (rc1 && !adapter->rx_csum)
+			dev->features =
+				features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+	}
 
-	if (rx_csum == adapter->rx_csum)
-		return 0;
+	if (large_send != adapter->large_send) {
+		rc2 = ibmveth_set_tso(dev, large_send);
+		if (rc2 && !adapter->large_send)
+			dev->features =
+				features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+	}
 
-	rc = ibmveth_set_csum_offload(dev, rx_csum);
-	if (rc && !adapter->rx_csum)
-		dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
-
-	return rc;
+	return rc1 ? rc1 : rc2;
 }
 
 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@
 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 
 static int ibmveth_send(struct ibmveth_adapter *adapter,
-			union ibmveth_buf_desc *descs)
+			union ibmveth_buf_desc *descs, unsigned long mss)
 {
 	unsigned long correlator;
 	unsigned int retry_count;
@@ -934,7 +1006,8 @@
 					     descs[0].desc, descs[1].desc,
 					     descs[2].desc, descs[3].desc,
 					     descs[4].desc, descs[5].desc,
-					     correlator, &correlator);
+					     correlator, &correlator, mss,
+					     adapter->fw_large_send_support);
 	} while ((ret == H_BUSY) && (retry_count--));
 
 	if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@
 	int last, i;
 	int force_bounce = 0;
 	dma_addr_t dma_addr;
+	unsigned long mss = 0;
 
 	/*
 	 * veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@
 
 	desc_flags = IBMVETH_BUF_VALID;
 
+	if (skb_is_gso(skb) && adapter->fw_large_send_support)
+		desc_flags |= IBMVETH_BUF_LRG_SND;
+
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		unsigned char *buf = skb_transport_header(skb) +
 						skb->csum_offset;
@@ -1007,7 +1084,7 @@
 		descs[0].fields.flags_len = desc_flags | skb->len;
 		descs[0].fields.address = adapter->bounce_buffer_dma;
 
-		if (ibmveth_send(adapter, descs)) {
+		if (ibmveth_send(adapter, descs, 0)) {
 			adapter->tx_send_failed++;
 			netdev->stats.tx_dropped++;
 		} else {
@@ -1041,16 +1118,23 @@
 		descs[i+1].fields.address = dma_addr;
 	}
 
-	if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
-		/* Put -1 in the IP checksum to tell phyp it
-		 *  is a largesend packet and put the mss in the TCP checksum.
-		 */
-		ip_hdr(skb)->check = 0xffff;
-		tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
-		adapter->tx_large_packets++;
+	if (skb_is_gso(skb)) {
+		if (adapter->fw_large_send_support) {
+			mss = (unsigned long)skb_shinfo(skb)->gso_size;
+			adapter->tx_large_packets++;
+		} else if (!skb_is_gso_v6(skb)) {
+			/* Put -1 in the IP checksum to tell phyp it
+			 * is a largesend packet. Put the mss in
+			 * the TCP checksum.
+			 */
+			ip_hdr(skb)->check = 0xffff;
+			tcp_hdr(skb)->check =
+				cpu_to_be16(skb_shinfo(skb)->gso_size);
+			adapter->tx_large_packets++;
+		}
 	}
 
-	if (ibmveth_send(adapter, descs)) {
+	if (ibmveth_send(adapter, descs, mss)) {
 		adapter->tx_send_failed++;
 		netdev->stats.tx_dropped++;
 	} else {
@@ -1401,6 +1485,8 @@
 	struct ibmveth_adapter *adapter;
 	unsigned char *mac_addr_p;
 	unsigned int *mcastFilterSize_p;
+	long ret;
+	unsigned long ret_attr;
 
 	dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
 		dev->unit_address);
@@ -1449,10 +1535,19 @@
 	SET_NETDEV_DEV(netdev, &dev->dev);
 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
 	netdev->features |= netdev->hw_features;
 
-	/* TSO is disabled by default */
-	netdev->hw_features |= NETIF_F_TSO;
+	ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+	/* If running older firmware, TSO should not be enabled by default */
+	if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+	    !old_large_send) {
+		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+		netdev->features |= netdev->hw_features;
+	} else {
+		netdev->hw_features |= NETIF_F_TSO;
+	}
 
 	memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 41dedb1..4eade67 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -40,6 +40,8 @@
 #define IbmVethMcastRemoveFilter     0x2UL
 #define IbmVethMcastClearFilterTable 0x3UL
 
+#define IBMVETH_ILLAN_LRG_SR_ENABLED	0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT	0x0000000000008000UL
 #define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000UL
 #define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00UL
 #define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004UL
@@ -59,13 +61,20 @@
 static inline long h_send_logical_lan(unsigned long unit_address,
 		unsigned long desc1, unsigned long desc2, unsigned long desc3,
 		unsigned long desc4, unsigned long desc5, unsigned long desc6,
-		unsigned long corellator_in, unsigned long *corellator_out)
+		unsigned long corellator_in, unsigned long *corellator_out,
+		unsigned long mss, unsigned long large_send_support)
 {
 	long rc;
 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
 
-	rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
-			desc2, desc3, desc4, desc5, desc6, corellator_in);
+	if (large_send_support)
+		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+				  desc1, desc2, desc3, desc4, desc5, desc6,
+				  corellator_in, mss);
+	else
+		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+				  desc1, desc2, desc3, desc4, desc5, desc6,
+				  corellator_in);
 
 	*corellator_out = retbuf[0];
 
@@ -147,11 +156,13 @@
     struct ibmveth_rx_q rx_queue;
     int pool_config;
     int rx_csum;
+    int large_send;
     void *bounce_buffer;
     dma_addr_t bounce_buffer_dma;
 
     u64 fw_ipv6_csum_support;
     u64 fw_ipv4_csum_support;
+    u64 fw_large_send_support;
     /* adapter specific stats */
     u64 replenish_task_cycles;
     u64 replenish_no_mem;
@@ -182,6 +193,7 @@
 #endif
 #define IBMVETH_BUF_VALID	0x80000000
 #define IBMVETH_BUF_TOGGLE	0x40000000
+#define IBMVETH_BUF_LRG_SND     0x04000000
 #define IBMVETH_BUF_NO_CSUM	0x02000000
 #define IBMVETH_BUF_CSUM_GOOD	0x01000000
 #define IBMVETH_BUF_LEN_MASK	0x00FFFFFF
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index d2657a4..068789e 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1770,8 +1770,11 @@
 	dma_addr = pci_map_single(nic->pdev,
 				  skb->data, skb->len, PCI_DMA_TODEVICE);
 	/* If we can't map the skb, have the upper layer try later */
-	if (pci_dma_mapping_error(nic->pdev, dma_addr))
+	if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
 		return -ENOMEM;
+	}
 
 	/*
 	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2967,6 +2970,11 @@
 			   nic->params.cbs.max * sizeof(struct cb),
 			   sizeof(u32),
 			   0);
+	if (!nic->cbs_pool) {
+		netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+		err = -ENOMEM;
+		goto err_out_pool;
+	}
 	netif_info(nic, probe, nic->netdev,
 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
@@ -2974,6 +2982,8 @@
 
 	return 0;
 
+err_out_pool:
+	unregister_netdev(netdev);
 err_out_free:
 	e100_free(nic);
 err_out_iounmap:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2645985..34c551e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -106,14 +106,14 @@
 #define E1000_FEXTNVM11_DISABLE_MULR_FIX	0x00002000
 
 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
-#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+#define E1000_RXDCTL_THRESH_UNIT_DESC	0x01000000
 
 #define K1_ENTRY_LATENCY	0
 #define K1_MIN_TIME		1
 #define NVM_SIZE_MULTIPLIER 4096	/*multiplier for NVMS field */
 #define E1000_FLASH_BASE_ADDR 0xE000	/*offset of NVM access regs */
 #define E1000_CTRL_EXT_NVMVS 0x3	/*NVM valid sector */
-
+#define E1000_TARC0_CB_MULTIQ_3_REQ	(1 << 28 | 1 << 29)
 #define PCIE_ICH8_SNOOP_ALL	PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES	7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 89d788d..faf4b3f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -48,7 +48,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -1737,12 +1737,6 @@
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
-	writel(0, rx_ring->head);
-	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-		e1000e_update_rdt_wa(rx_ring, 0);
-	else
-		writel(0, rx_ring->tail);
 }
 
 static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2447,12 +2441,6 @@
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-
-	writel(0, tx_ring->head);
-	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-		e1000e_update_tdt_wa(tx_ring, 0);
-	else
-		writel(0, tx_ring->tail);
 }
 
 /**
@@ -2954,6 +2942,12 @@
 	tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
 	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
+	writel(0, tx_ring->head);
+	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_tdt_wa(tx_ring, 0);
+	else
+		writel(0, tx_ring->tail);
+
 	/* Set the Tx Interrupt Delay register */
 	ew32(TIDV, adapter->tx_int_delay);
 	/* Tx irq moderation */
@@ -3275,6 +3269,12 @@
 	rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
 	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
+	writel(0, rx_ring->head);
+	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_rdt_wa(rx_ring, 0);
+	else
+		writel(0, rx_ring->tail);
+
 	/* Enable Receive Checksum Offload for TCP and UDP */
 	rxcsum = er32(RXCSUM);
 	if (adapter->netdev->features & NETIF_F_RXCSUM)
@@ -4280,18 +4280,29 @@
 	struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
 						     cc);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 systimel_1, systimel_2, systimeh;
 	cycle_t systim, systim_next;
-	/* SYSTIMH latching upon SYSTIML read does not work well. To fix that
-	 * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
-	 * to occur between reads, so if we read a vale close to overflow, we
-	 * wait for overflow to occur and read both registers when its safe.
+	/* SYSTIMH latching upon SYSTIML read does not work well.
+	 * This means that if SYSTIML overflows after we read it but before
+	 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+	 * will experience a huge non linear increment in the systime value
+	 * to fix that we test for overflow and if true, we re-read systime.
 	 */
-	u32 systim_overflow_latch_fix = 0x3FFFFFFF;
-
-	do {
-		systim = (cycle_t)er32(SYSTIML);
-	} while (systim > systim_overflow_latch_fix);
-	systim |= (cycle_t)er32(SYSTIMH) << 32;
+	systimel_1 = er32(SYSTIML);
+	systimeh = er32(SYSTIMH);
+	systimel_2 = er32(SYSTIML);
+	/* Check for overflow. If there was no overflow, use the values */
+	if (systimel_1 < systimel_2) {
+		systim = (cycle_t)systimel_1;
+		systim |= (cycle_t)systimeh << 32;
+	} else {
+		/* There was an overflow, read again SYSTIMH, and use
+		 * systimel_2
+		 */
+		systimeh = er32(SYSTIMH);
+		systim = (cycle_t)systimel_2;
+		systim |= (cycle_t)systimeh << 32;
+	}
 
 	if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
 		u64 incvalue, time_delta, rem, temp;
@@ -4588,6 +4599,7 @@
 	return 0;
 
 err_req_irq:
+	pm_qos_remove_request(&adapter->pm_qos_req);
 	e1000e_release_hw_control(adapter);
 	e1000_power_down_phy(adapter);
 	e1000e_free_rx_resources(adapter->rx_ring);
@@ -6316,6 +6328,33 @@
 			return retval;
 	}
 
+	/* Ensure that the appropriate bits are set in LPI_CTRL
+	 * for EEE in Sx
+	 */
+	if ((hw->phy.type >= e1000_phy_i217) &&
+	    adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
+		u16 lpi_ctrl = 0;
+
+		retval = hw->phy.ops.acquire(hw);
+		if (!retval) {
+			retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
+						 &lpi_ctrl);
+			if (!retval) {
+				if (adapter->eee_advert &
+				    hw->dev_spec.ich8lan.eee_lp_ability &
+				    I82579_EEE_100_SUPPORTED)
+					lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+				if (adapter->eee_advert &
+				    hw->dev_spec.ich8lan.eee_lp_ability &
+				    I82579_EEE_1000_SUPPORTED)
+					lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+				retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
+							 lpi_ctrl);
+			}
+		}
+		hw->phy.ops.release(hw);
+	}
 
 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
@@ -6465,7 +6504,7 @@
 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
 	if (aspm_disable_flag)
-		e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
+		e1000e_disable_aspm(pdev, aspm_disable_flag);
 
 	pci_set_master(pdev);
 
@@ -6743,7 +6782,7 @@
 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
 	if (aspm_disable_flag)
-		e1000e_disable_aspm(pdev, aspm_disable_flag);
+		e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index b24e5fe..1d5e0b7 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,8 +38,8 @@
 #define E1000_FEXTNVM4	0x00024	/* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM6	0x00010	/* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7	0x000E4	/* Future Extended NVM 7 - RW */
-#define E1000_FEXTNVM9	0x5BB4  /* Future Extended NVM 9 - RW */
-#define E1000_FEXTNVM11	0x5BBC  /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM9	0x5BB4	/* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11	0x5BBC	/* Future Extended NVM 11 - RW */
 #define E1000_PCIEANACFG	0x00F18	/* PCIE Analog Config */
 #define E1000_FCT	0x00030	/* Flow Control Type - RW */
 #define E1000_VET	0x00038	/* VLAN Ether Type - RW */
@@ -125,7 +125,6 @@
 				 (0x054E4 + ((_i - 16) * 8)))
 #define E1000_SHRAL(_i)		(0x05438 + ((_i) * 8))
 #define E1000_SHRAH(_i)		(0x0543C + ((_i) * 8))
-#define E1000_TARC0_CB_MULTIQ_3_REQ	(1 << 28 | 1 << 29)
 #define E1000_TDFH		0x03410	/* Tx Data FIFO Head - RW */
 #define E1000_TDFT		0x03418	/* Tx Data FIFO Tail - RW */
 #define E1000_TDFHS		0x03420	/* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ec76c3f..e746279 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -79,10 +79,13 @@
 #define I40E_MIN_MSIX                 2
 #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
 #define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+		(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
-#define I40E_MAX_QUEUES_PER_TC        64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+		(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #ifdef I40E_FCOE
@@ -98,7 +101,7 @@
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG	(1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG	BIT(0)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +292,42 @@
 	struct work_struct service_task;
 
 	u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED		BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED			BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT		BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE		BIT_ULL(9)
+#define I40E_FLAG_IWARP_ENABLED			BIT_ULL(10)
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
-#define I40E_FLAG_PTP                          (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL			BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED	BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT		BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT		BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED			BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED		BIT_ULL(22)
+#define I40E_FLAG_PTP				BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED			BIT_ULL(26)
 #ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC		BIT_ULL(27)
 #endif
-#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID			BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE			BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE		BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE		BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE		BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE		BIT_ULL(35)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE	BIT_ULL(38)
 #define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(40)
 
 	/* tracks features that get auto disabled by errors */
@@ -362,6 +372,7 @@
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *i40e_dbg_pf;
 #endif /* CONFIG_DEBUG_FS */
+	bool cur_promisc;
 
 	u16 instance; /* A unique number per i40e_pf instance in the system */
 
@@ -432,6 +443,8 @@
 	bool stat_offsets_loaded;
 	struct i40e_eth_stats stats;
 	struct i40e_eth_stats stats_offsets;
+	struct i40e_veb_tc_stats tc_stats;
+	struct i40e_veb_tc_stats tc_stats_offsets;
 };
 
 /* struct that defines a VSI, associated with a dev */
@@ -443,8 +456,8 @@
 
 	u32 current_netdev_flags;
 	unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED	BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER		BIT(1)
 	unsigned long flags;
 
 	struct list_head mac_filter_list;
@@ -550,6 +563,7 @@
 	cpumask_t affinity_mask;
 	struct rcu_head rcu;	/* to avoid race with update stats on free */
 	char name[I40E_INT_NAME_STR_LEN];
+	bool arm_wb_state;
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 929e3d7..95d23bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR	0x0002
+#define I40E_FW_API_VERSION_MINOR	0x0004
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -132,12 +132,7 @@
 	i40e_aqc_opc_list_func_capabilities	= 0x000A,
 	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
 
-	i40e_aqc_opc_set_cppm_configuration	= 0x0103,
-	i40e_aqc_opc_set_arp_proxy_entry	= 0x0104,
-	i40e_aqc_opc_set_ns_proxy_entry		= 0x0105,
-
 	/* LAA */
-	i40e_aqc_opc_mng_laa		= 0x0106,   /* AQ obsolete */
 	i40e_aqc_opc_mac_address_read	= 0x0107,
 	i40e_aqc_opc_mac_address_write	= 0x0108,
 
@@ -262,7 +257,10 @@
 	/* Tunnel commands */
 	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
 	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
-	i40e_aqc_opc_tunnel_key_structure	= 0x0B10,
+	i40e_aqc_opc_set_rss_key	= 0x0B02,
+	i40e_aqc_opc_set_rss_lut	= 0x0B03,
+	i40e_aqc_opc_get_rss_key	= 0x0B04,
+	i40e_aqc_opc_get_rss_lut	= 0x0B05,
 
 	/* Async Events */
 	i40e_aqc_opc_event_lan_overflow		= 0x1001,
@@ -274,8 +272,6 @@
 	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
 
 	/* debug commands */
-	i40e_aqc_opc_debug_get_deviceid		= 0xFF00,
-	i40e_aqc_opc_debug_set_mode		= 0xFF01,
 	i40e_aqc_opc_debug_read_reg		= 0xFF03,
 	i40e_aqc_opc_debug_write_reg		= 0xFF04,
 	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
@@ -509,7 +505,8 @@
 #define I40E_AQC_SAN_ADDR_VALID		0x20
 #define I40E_AQC_PORT_ADDR_VALID	0x40
 #define I40E_AQC_WOL_ADDR_VALID		0x80
-#define I40E_AQC_ADDR_VALID_MASK	0xf0
+#define I40E_AQC_MC_MAG_EN_VALID	0x100
+#define I40E_AQC_ADDR_VALID_MASK	0x1F0
 	u8	reserved[6];
 	__le32	addr_high;
 	__le32	addr_low;
@@ -532,7 +529,9 @@
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
 #define I40E_AQC_WRITE_TYPE_PORT	0x8000
-#define I40E_AQC_WRITE_TYPE_MASK	0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG	0xC000
+#define I40E_AQC_WRITE_TYPE_MASK	0xC000
+
 	__le16	mac_sah;
 	__le32	mac_sal;
 	u8	reserved[8];
@@ -826,8 +825,12 @@
 					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
 	/* queueing option section */
 	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
 	u8	queueing_opt_reserved[3];
 	/* scheduler section */
 	u8	up_enable_bits;
@@ -1068,6 +1071,7 @@
 	__le16	seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
 	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK		0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
 	u8	reserved[8];
 };
@@ -2064,6 +2068,12 @@
 #define I40E_AQC_CEE_PFC_STATUS_MASK	(0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
 #define I40E_AQC_CEE_APP_STATUS_SHIFT	0x8
 #define I40E_AQC_CEE_APP_STATUS_MASK	(0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT	0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK	(0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK	(0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT	0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK	(0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
 	u8	reserved1;
 	u8	oper_num_tc;
@@ -2177,6 +2187,46 @@
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+	__le16	vsi_id;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+	__le16	vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
+	__le16	flags;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0bae22d..114dc64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -54,6 +54,15 @@
 		case I40E_DEV_ID_20G_KR2:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
+		case I40E_DEV_ID_SFP_X722:
+		case I40E_DEV_ID_1G_BASE_T_X722:
+		case I40E_DEV_ID_10G_BASE_T_X722:
+			hw->mac.type = I40E_MAC_X722;
+			break;
+		case I40E_DEV_ID_X722_VF:
+		case I40E_DEV_ID_X722_VF_HV:
+			hw->mac.type = I40E_MAC_X722_VF;
+			break;
 		case I40E_DEV_ID_VF:
 		case I40E_DEV_ID_VF_HV:
 			hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@
 }
 
 /**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
  * @mask: debug mask
@@ -177,6 +392,169 @@
 	return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+					   u16 vsi_id, bool pf_lut,
+					   u8 *lut, u16 lut_size,
+					   bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+	if (set)
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_set_rss_lut);
+	else
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_get_rss_lut);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+	if (pf_lut)
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+	else
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+				       false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key,
+				      bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_key *cmd_resp =
+			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+	if (set)
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_set_rss_key);
+	else
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_get_rss_key);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+				u16 vsi_id,
+				struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+				u16 vsi_id,
+				struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
@@ -563,6 +941,7 @@
 
 	switch (hw->mac.type) {
 	case I40E_MAC_XL710:
+	case I40E_MAC_X722:
 		break;
 	default:
 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1187,9 +1566,9 @@
 			blink = false;
 
 		if (blink)
-			gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 		else
-			gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 
 		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
 		break;
@@ -2391,7 +2770,7 @@
 #define I40E_DEV_FUNC_CAP_MSIX_VF	0x44
 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR	0x45
 #define I40E_DEV_FUNC_CAP_IEEE_1588	0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1	0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10	0xF1
 #define I40E_DEV_FUNC_CAP_CEM		0xF2
 #define I40E_DEV_FUNC_CAP_IWARP		0x51
 #define I40E_DEV_FUNC_CAP_LED		0x61
@@ -2416,6 +2795,7 @@
 	u32 valid_functions, num_functions;
 	u32 number, logical_id, phys_id;
 	struct i40e_hw_capabilities *p;
+	u8 major_rev;
 	u32 i = 0;
 	u16 id;
 
@@ -2433,6 +2813,7 @@
 		number = le32_to_cpu(cap->number);
 		logical_id = le32_to_cpu(cap->logical_id);
 		phys_id = le32_to_cpu(cap->phys_id);
+		major_rev = cap->major_rev;
 
 		switch (id) {
 		case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2888,21 @@
 		case I40E_DEV_FUNC_CAP_MSIX_VF:
 			p->num_msix_vectors_vf = number;
 			break;
-		case I40E_DEV_FUNC_CAP_MFP_MODE_1:
-			if (number == 1)
-				p->mfp_mode_1 = true;
+		case I40E_DEV_FUNC_CAP_FLEX10:
+			if (major_rev == 1) {
+				if (number == 1) {
+					p->flex10_enable = true;
+					p->flex10_capable = true;
+				}
+			} else {
+				/* Capability revision >= 2 */
+				if (number & 1)
+					p->flex10_enable = true;
+				if (number & 2)
+					p->flex10_capable = true;
+			}
+			p->flex10_mode = logical_id;
+			p->flex10_status = phys_id;
 			break;
 		case I40E_DEV_FUNC_CAP_CEM:
 			if (number == 1)
@@ -2557,7 +2950,7 @@
 	/* Software override ensuring FCoE is disabled if npar or mfp
 	 * mode because it is not supported in these modes.
 	 */
-	if (p->npar_enable || p->mfp_mode_1)
+	if (p->npar_enable || p->flex10_enable)
 		p->fcoe = false;
 
 	/* count the enabled ports (aka the "not disabled" ports) */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 2547aa2..90de46a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -588,6 +588,8 @@
 		if (!ret) {
 			/* CEE mode */
 			hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+			hw->local_dcbx_config.tlv_status =
+					le16_to_cpu(cee_v1_cfg.tlv_status);
 			i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
 						  &hw->local_dcbx_config);
 		}
@@ -597,6 +599,8 @@
 		if (!ret) {
 			/* CEE mode */
 			hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+			hw->local_dcbx_config.tlv_status =
+					le32_to_cpu(cee_cfg.tlv_status);
 			i40e_cee_to_dcb_config(&cee_cfg,
 					       &hw->local_dcbx_config);
 		}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index e137e3f..50fc894 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -58,9 +58,9 @@
 #define I40E_IEEE_ETS_MAXTC_SHIFT	0
 #define I40E_IEEE_ETS_MAXTC_MASK	(0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
 #define I40E_IEEE_ETS_CBS_SHIFT		6
-#define I40E_IEEE_ETS_CBS_MASK		(0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK		BIT(I40E_IEEE_ETS_CBS_SHIFT)
 #define I40E_IEEE_ETS_WILLING_SHIFT	7
-#define I40E_IEEE_ETS_WILLING_MASK	(0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK	BIT(I40E_IEEE_ETS_WILLING_SHIFT)
 #define I40E_IEEE_ETS_PRIO_0_SHIFT	0
 #define I40E_IEEE_ETS_PRIO_0_MASK	(0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
 #define I40E_IEEE_ETS_PRIO_1_SHIFT	4
@@ -79,9 +79,9 @@
 #define I40E_IEEE_PFC_CAP_SHIFT		0
 #define I40E_IEEE_PFC_CAP_MASK		(0xF << I40E_IEEE_PFC_CAP_SHIFT)
 #define I40E_IEEE_PFC_MBC_SHIFT		6
-#define I40E_IEEE_PFC_MBC_MASK		(0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK		BIT(I40E_IEEE_PFC_MBC_SHIFT)
 #define I40E_IEEE_PFC_WILLING_SHIFT	7
-#define I40E_IEEE_PFC_WILLING_MASK	(0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK	BIT(I40E_IEEE_PFC_WILLING_SHIFT)
 
 /* Defines for IEEE APP TLV */
 #define I40E_IEEE_APP_SEL_SHIFT		0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index bd5079d..1c51f73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -187,7 +187,7 @@
 	/* Set up all the App TLVs if DCBx is negotiated */
 	for (i = 0; i < dcbxcfg->numapps; i++) {
 		prio = dcbxcfg->app[i].priority;
-		tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+		tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
 
 		/* Add APP only if the TC is enabled for this VSI */
 		if (tc_map & vsi->tc_config.enabled_tc) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da0faf4..d7c15d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -964,7 +964,7 @@
 		pf->auto_disable_flags |= flag;
 	}
 	dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-	i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+	i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 }
 
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@
 		}
 	} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "corer", 5) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "globr", 5) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "empr", 4) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "read", 4) == 0) {
 		u32 address;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 56438bd..f141e78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -144,11 +144,8 @@
 	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
 	if (!ret_code &&
 	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-	     (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
-		ret_code = i40e_validate_nvm_checksum(hw, NULL);
-	} else {
-		ret_code = I40E_ERR_DIAG_TEST_FAILED;
-	}
-
-	return ret_code;
+	     BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+		return i40e_validate_nvm_checksum(hw, NULL);
+	else
+		return I40E_ERR_DIAG_TEST_FAILED;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a68c65..e972b5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -114,7 +114,7 @@
 	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
 	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
 	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
-	I40E_PF_STAT("crc_errors", stats.crc_errors),
+	I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
 	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
 	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
 	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
@@ -148,7 +148,9 @@
 	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
 	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
 	I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+	I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
 	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+	I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
 
 	/* LPI stats */
 	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -195,7 +197,14 @@
 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
 		 / sizeof(u64))
+#define I40E_VEB_TC_STATS_LEN ( \
+		(FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
+		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
+		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
+		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
+		 / sizeof(u64))
 #define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_TOTAL	(I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
 #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
 				 I40E_PFC_STATS_LEN + \
 				 I40E_VSI_STATS_LEN((n)))
@@ -679,15 +688,17 @@
 		/* make the aq call */
 		status = i40e_aq_set_phy_config(hw, &config, NULL);
 		if (status) {
-			netdev_info(netdev, "Set phy config failed with error %d.\n",
-				    status);
+			netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
 			return -EAGAIN;
 		}
 
 		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
 		if (status)
-			netdev_info(netdev, "Updating link info failed with error %d\n",
-				    status);
+			netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
 
 	} else {
 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +718,9 @@
 
 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
 	if (ret) {
-		netdev_info(netdev, "link restart failed, aq_err=%d\n",
-			    pf->hw.aq.asq_last_status);
+		netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+			    i40e_stat_str(hw, ret),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		return -EIO;
 	}
 
@@ -820,18 +832,21 @@
 	status = i40e_set_fc(hw, &aq_failures, link_up);
 
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-		netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-		netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 
@@ -1009,7 +1024,7 @@
 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
 	/* register returns value in power of 2, 64Kbyte chunks. */
-	val = (64 * 1024) * (1 << val);
+	val = (64 * 1024) * BIT(val);
 	return val;
 }
 
@@ -1249,7 +1264,7 @@
 			int len = I40E_PF_STATS_LEN(netdev);
 
 			if (pf->lan_veb != I40E_NO_VEB)
-				len += I40E_VEB_STATS_LEN;
+				len += I40E_VEB_STATS_TOTAL;
 			return len;
 		} else {
 			return I40E_VSI_STATS_LEN(netdev);
@@ -1400,6 +1415,20 @@
 					i40e_gstrings_veb_stats[i].stat_string);
 				p += ETH_GSTRING_LEN;
 			}
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_tx_packets", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_tx_bytes", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_rx_packets", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_rx_bytes", i);
+				p += ETH_GSTRING_LEN;
+			}
 		}
 		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
 			snprintf(p, ETH_GSTRING_LEN, "port.%s",
@@ -1462,20 +1491,11 @@
 	else
 		info->phc_index = -1;
 
-	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
 
-	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 	return 0;
 }
@@ -1560,6 +1580,21 @@
 	return false;
 }
 
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+	struct i40e_vsi **vsi = pf->vsi;
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vsi; i++) {
+		if (!vsi[i])
+			continue;
+		if (vsi[i]->type == I40E_VSI_VMDQ2)
+			return true;
+	}
+
+	return false;
+}
+
 static void i40e_diag_test(struct net_device *netdev,
 			   struct ethtool_test *eth_test, u64 *data)
 {
@@ -1573,9 +1608,9 @@
 
 		set_bit(__I40E_TESTING, &pf->state);
 
-		if (i40e_active_vfs(pf)) {
+		if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
 			dev_warn(&pf->pdev->dev,
-				 "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+				 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
 			data[I40E_ETH_TEST_REG]		= 1;
 			data[I40E_ETH_TEST_EEPROM]	= 1;
 			data[I40E_ETH_TEST_INTR]	= 1;
@@ -1591,11 +1626,13 @@
 			/* indicate we're in test mode */
 			dev_close(netdev);
 		else
-			i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+			/* This reset does not affect link - if it is
+			 * changed to a type of reset that does affect
+			 * link then the following link test would have
+			 * to be moved to before the reset
+			 */
+			i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
-		/* Link test performed before hardware reset
-		 * so autoneg doesn't interfere with test result
-		 */
 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1613,7 +1650,7 @@
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		clear_bit(__I40E_TESTING, &pf->state);
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 		if (if_running)
 			dev_open(netdev);
@@ -1646,7 +1683,7 @@
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+	if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
 		wol->supported = 0;
 		wol->wolopts = 0;
 	} else {
@@ -1679,7 +1716,7 @@
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if (((1 << hw->port) & wol_nvm_bits))
+	if (BIT(hw->port) & wol_nvm_bits)
 		return -EOPNOTSUPP;
 
 	/* only magic packet is supported */
@@ -2025,10 +2062,10 @@
 	case TCP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -2037,10 +2074,10 @@
 	case TCP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -2049,12 +2086,12 @@
 	case UDP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		default:
 			return -EINVAL;
@@ -2063,12 +2100,12 @@
 	case UDP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		default:
 			return -EINVAL;
@@ -2081,7 +2118,7 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
 		break;
 	case AH_ESP_V6_FLOW:
 	case AH_V6_FLOW:
@@ -2090,15 +2127,15 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
 		break;
 	case IPV4_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
 		break;
 	case IPV6_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
 		break;
 	default:
 		return -EINVAL;
@@ -2509,7 +2546,7 @@
  * @indir: indirection table
  * @key: hash key
  *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  * returns 0 after programming the table.
  **/
 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index c8b621e..5ea75dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -298,8 +298,8 @@
 
 	/* enable FCoE hash filter */
 	val = rd32(hw, I40E_PFQF_HENA(1));
-	val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
-	val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+	val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+	val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
 	val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
 	wr32(hw, I40E_PFQF_HENA(1), val);
 
@@ -308,10 +308,10 @@
 	pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
 
 	/* Reserve 4K DDP contexts and 20K filter size for FCoE */
-	pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
-				 I40E_DMA_CNTX_BASE_SIZE;
+	pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+				I40E_DMA_CNTX_BASE_SIZE;
 	pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
-				(1 << I40E_HASH_FILTER_SIZE_16K) *
+				BIT(I40E_HASH_FILTER_SIZE_16K) *
 				I40E_HASH_FILTER_BASE_SIZE;
 
 	/* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@
 		if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
 		    app.protocolid == ETH_P_FCOE) {
 			tc = dcbcfg->etscfg.prioritytable[app.priority];
-			enabled_tc |= (1 << tc);
+			enabled_tc |= BIT(tc);
 			break;
 		}
 	}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 0d49e2d..a93174d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -59,9 +59,9 @@
 	(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
 
 #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT	\
-	(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+	BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
 #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT	\
-	(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+	BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
 
 #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)	\
 	I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 9b987cc..5ebe12d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -116,6 +116,7 @@
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  *
  * This function:
  *	1. Initializes the pd entry
@@ -129,12 +130,14 @@
  **/
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index)
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg)
 {
 	i40e_status ret_code = 0;
 	struct i40e_hmc_pd_table *pd_table;
 	struct i40e_hmc_pd_entry *pd_entry;
 	struct i40e_dma_mem mem;
+	struct i40e_dma_mem *page = &mem;
 	u32 sd_idx, rel_pd_idx;
 	u64 *pd_addr;
 	u64 page_desc;
@@ -155,18 +158,24 @@
 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
 	if (!pd_entry->valid) {
-		/* allocate a 4K backing page */
-		ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
-						 I40E_HMC_PAGED_BP_SIZE,
-						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
-		if (ret_code)
-			goto exit;
+		if (rsrc_pg) {
+			pd_entry->rsrc_pg = true;
+			page = rsrc_pg;
+		} else {
+			/* allocate a 4K backing page */
+			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+						I40E_HMC_PAGED_BP_SIZE,
+						I40E_HMC_PD_BP_BUF_ALIGNMENT);
+			if (ret_code)
+				goto exit;
+			pd_entry->rsrc_pg = false;
+		}
 
-		pd_entry->bp.addr = mem;
+		pd_entry->bp.addr = *page;
 		pd_entry->bp.sd_pd_index = pd_index;
 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
 		/* Set page address and valid bit */
-		page_desc = mem.pa | 0x1;
+		page_desc = page->pa | 0x1;
 
 		pd_addr = (u64 *)pd_table->pd_page_addr.va;
 		pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@
 	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
 	/* free memory here */
-	ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+	if (!pd_entry->rsrc_pg)
+		ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
 	if (ret_code)
 		goto exit;
 	if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@
 					    u32 idx, bool is_pf)
 {
 	struct i40e_hmc_sd_entry *sd_entry;
-	i40e_status ret_code = 0;
+
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
 
 	/* get the entry and decrease its ref counter */
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
-	if (is_pf) {
-		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
-	} else {
-		ret_code = I40E_NOT_SUPPORTED;
-		goto exit;
-	}
-	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
-	if (ret_code)
-		goto exit;
-exit:
-	return ret_code;
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+	return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
 }
 
 /**
@@ -341,20 +345,13 @@
 					      struct i40e_hmc_info *hmc_info,
 					      u32 idx, bool is_pf)
 {
-	i40e_status ret_code = 0;
 	struct i40e_hmc_sd_entry *sd_entry;
 
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
+
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
-	if (is_pf) {
-		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
-	} else {
-		ret_code = I40E_NOT_SUPPORTED;
-		goto exit;
-	}
-	/* free memory here */
-	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
-	if (ret_code)
-		goto exit;
-exit:
-	return ret_code;
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+	return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 732a026..d906692 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -62,6 +62,7 @@
 struct i40e_hmc_pd_entry {
 	struct i40e_hmc_bp bp;
 	u32 sd_index;
+	bool rsrc_pg;
 	bool valid;
 };
 
@@ -126,8 +127,8 @@
 		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -146,7 +147,7 @@
 		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -218,7 +219,8 @@
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index);
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
 					struct i40e_hmc_info *hmc_info,
 					u32 idx);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 0079ad7..fa371a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -129,7 +129,7 @@
 	obj->cnt = txq_num;
 	obj->base = 0;
 	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@
 				/* update the pd table entry */
 				ret_code = i40e_add_pd_table_entry(hw,
 								info->hmc_info,
-								i);
+								i, NULL);
 				if (ret_code) {
 					pd_error = true;
 					break;
@@ -763,7 +763,7 @@
 
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
-	mask = ((u8)1 << ce_info->width) - 1;
+	mask = BIT(ce_info->width) - 1;
 
 	src_byte = *from;
 	src_byte &= mask;
@@ -804,7 +804,7 @@
 
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
-	mask = ((u16)1 << ce_info->width) - 1;
+	mask = BIT(ce_info->width) - 1;
 
 	/* don't swizzle the bits until after the mask because the mask bits
 	 * will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@
 	 * to 5 bits so the shift will do nothing
 	 */
 	if (ce_info->width < 32)
-		mask = ((u32)1 << ce_info->width) - 1;
+		mask = BIT(ce_info->width) - 1;
 	else
 		mask = ~(u32)0;
 
@@ -906,7 +906,7 @@
 	 * to 6 bits so the shift will do nothing
 	 */
 	if (ce_info->width < 64)
-		mask = ((u64)1 << ce_info->width) - 1;
+		mask = BIT_ULL(ce_info->width) - 1;
 	else
 		mask = ~(u64)0;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48a52b3..851c1a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 9
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -76,6 +76,9 @@
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
 	/* required last entry */
 	{0, }
 };
@@ -520,7 +523,7 @@
 	if (likely(new_data >= *offset))
 		*stat = new_data - *offset;
 	else
-		*stat = (new_data + ((u64)1 << 48)) - *offset;
+		*stat = (new_data + BIT_ULL(48)) - *offset;
 	*stat &= 0xFFFFFFFFFFFFULL;
 }
 
@@ -543,7 +546,7 @@
 	if (likely(new_data >= *offset))
 		*stat = (u32)(new_data - *offset);
 	else
-		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
 /**
@@ -621,11 +624,15 @@
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_eth_stats *oes;
 	struct i40e_eth_stats *es;     /* device's eth stats */
-	int idx = 0;
+	struct i40e_veb_tc_stats *veb_oes;
+	struct i40e_veb_tc_stats *veb_es;
+	int i, idx = 0;
 
 	idx = veb->stats_idx;
 	es = &veb->stats;
 	oes = &veb->stats_offsets;
+	veb_es = &veb->tc_stats;
+	veb_oes = &veb->tc_stats_offsets;
 
 	/* Gather up the stats that the hw collects */
 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
@@ -661,6 +668,28 @@
 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
 			   veb->stat_offsets_loaded,
 			   &oes->tx_broadcast, &es->tx_broadcast);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
+				   I40E_GLVEBTC_RPCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_rx_packets[i],
+				   &veb_es->tc_rx_packets[i]);
+		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
+				   I40E_GLVEBTC_RBCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_rx_bytes[i],
+				   &veb_es->tc_rx_bytes[i]);
+		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
+				   I40E_GLVEBTC_TPCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_tx_packets[i],
+				   &veb_es->tc_tx_packets[i]);
+		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
+				   I40E_GLVEBTC_TBCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_tx_bytes[i],
+				   &veb_es->tc_tx_bytes[i]);
+	}
 	veb->stat_offsets_loaded = true;
 }
 
@@ -1123,6 +1152,18 @@
 			   pf->stat_offsets_loaded,
 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
+	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+	    !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+		nsd->fd_sb_status = true;
+	else
+		nsd->fd_sb_status = false;
+
+	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+	    !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+		nsd->fd_atr_status = true;
+	else
+		nsd->fd_atr_status = false;
+
 	pf->stat_offsets_loaded = true;
 }
 
@@ -1240,6 +1281,8 @@
 	struct i40e_mac_filter *f;
 
 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if (vsi->info.pvid)
+			f->vlan = le16_to_cpu(vsi->info.pvid);
 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
 				      is_vf, is_netdev)) {
 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
@@ -1264,7 +1307,7 @@
 {
 	struct i40e_aqc_remove_macvlan_element_data element;
 	struct i40e_pf *pf = vsi->back;
-	i40e_status aq_ret;
+	i40e_status ret;
 
 	/* Only appropriate for the PF main VSI */
 	if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1318,8 @@
 	element.vlan_tag = 0;
 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-	aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
-	if (aq_ret)
+	ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+	if (ret)
 		return -ENOENT;
 
 	return 0;
@@ -1514,7 +1557,7 @@
 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
 		/* Find numtc from enabled TC bitmap */
 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-			if (enabled_tc & (1 << i)) /* TC is enabled */
+			if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
 				numtc++;
 		}
 		if (!numtc) {
@@ -1533,14 +1576,18 @@
 	 * vectors available and so we need to lower the used
 	 * q count.
 	 */
-	qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+	else
+		qcount = vsi->alloc_queue_pairs;
 	num_tc_qps = qcount / numtc;
-	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
+	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
 
 	/* Setup queue offset/count for all TCs for given VSI */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		/* See if the given TC is enabled for the given VSI */
-		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+		if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+			/* TC is enabled */
 			int pow, num_qps;
 
 			switch (vsi->type) {
@@ -1566,7 +1613,7 @@
 			/* find the next higher power-of-2 of num queue pairs */
 			num_qps = qcount;
 			pow = 0;
-			while (num_qps && ((1 << pow) < qcount)) {
+			while (num_qps && (BIT_ULL(pow) < qcount)) {
 				pow++;
 				num_qps >>= 1;
 			}
@@ -1596,7 +1643,7 @@
 	if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
 		if (vsi->req_queue_pairs > 0)
 			vsi->num_queue_pairs = vsi->req_queue_pairs;
-		else
+		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 			vsi->num_queue_pairs = pf->num_lan_msix;
 	}
 
@@ -1716,10 +1763,11 @@
 	bool add_happened = false;
 	int filter_list_len = 0;
 	u32 changed_flags = 0;
-	i40e_status aq_ret = 0;
+	i40e_status ret = 0;
 	struct i40e_pf *pf;
 	int num_add = 0;
 	int num_del = 0;
+	int aq_err = 0;
 	u16 cmd_flags;
 
 	/* empty array typed pointers, kcalloc later */
@@ -1771,31 +1819,31 @@
 
 			/* flush a full buffer */
 			if (num_del == filter_list_len) {
-				aq_ret = i40e_aq_remove_macvlan(&pf->hw,
-					    vsi->seid, del_list, num_del,
-					    NULL);
+				ret = i40e_aq_remove_macvlan(&pf->hw,
+						  vsi->seid, del_list, num_del,
+						  NULL);
+				aq_err = pf->hw.aq.asq_last_status;
 				num_del = 0;
 				memset(del_list, 0, sizeof(*del_list));
 
-				if (aq_ret &&
-				    pf->hw.aq.asq_last_status !=
-							      I40E_AQ_RC_ENOENT)
+				if (ret && aq_err != I40E_AQ_RC_ENOENT)
 					dev_info(&pf->pdev->dev,
-						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-						 aq_ret,
-						 pf->hw.aq.asq_last_status);
+						 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+						 i40e_stat_str(&pf->hw, ret),
+						 i40e_aq_str(&pf->hw, aq_err));
 			}
 		}
 		if (num_del) {
-			aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+			ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
 						     del_list, num_del, NULL);
+			aq_err = pf->hw.aq.asq_last_status;
 			num_del = 0;
 
-			if (aq_ret &&
-			    pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+			if (ret && aq_err != I40E_AQ_RC_ENOENT)
 				dev_info(&pf->pdev->dev,
-					 "ignoring delete macvlan error, err %d, aq_err %d\n",
-					 aq_ret, pf->hw.aq.asq_last_status);
+					 "ignoring delete macvlan error, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw, aq_err));
 		}
 
 		kfree(del_list);
@@ -1833,29 +1881,31 @@
 
 			/* flush a full buffer */
 			if (num_add == filter_list_len) {
-				aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-							     add_list, num_add,
-							     NULL);
+				ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+							  add_list, num_add,
+							  NULL);
+				aq_err = pf->hw.aq.asq_last_status;
 				num_add = 0;
 
-				if (aq_ret)
+				if (ret)
 					break;
 				memset(add_list, 0, sizeof(*add_list));
 			}
 		}
 		if (num_add) {
-			aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-						     add_list, num_add, NULL);
+			ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+						  add_list, num_add, NULL);
+			aq_err = pf->hw.aq.asq_last_status;
 			num_add = 0;
 		}
 		kfree(add_list);
 		add_list = NULL;
 
-		if (add_happened && aq_ret &&
-		    pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+		if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
 			dev_info(&pf->pdev->dev,
-				 "add filter failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "add filter failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw, aq_err));
 			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
 			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 				      &vsi->state)) {
@@ -1871,34 +1921,60 @@
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-							       vsi->seid,
-							       cur_multipromisc,
-							       NULL);
-		if (aq_ret)
+		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+							    vsi->seid,
+							    cur_multipromisc,
+							    NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set multi promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "set multi promisc failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
 		bool cur_promisc;
 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 					&vsi->state));
-		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-							     vsi->seid,
-							     cur_promisc, NULL);
-		if (aq_ret)
+		if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+			/* set defport ON for Main VSI instead of true promisc
+			 * this way we will get all unicast/multicast and VLAN
+			 * promisc behavior but will not get VF or VMDq traffic
+			 * replicated on the Main VSI.
+			 */
+			if (pf->cur_promisc != cur_promisc) {
+				pf->cur_promisc = cur_promisc;
+				i40e_do_reset_safe(pf,
+						BIT(__I40E_PF_RESET_REQUESTED));
+			}
+		} else {
+			ret = i40e_aq_set_vsi_unicast_promiscuous(
+							  &vsi->back->hw,
+							  vsi->seid,
+							  cur_promisc, NULL);
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "set unicast promisc failed, err %d, aq_err %d\n",
+					 ret, pf->hw.aq.asq_last_status);
+			ret = i40e_aq_set_vsi_multicast_promiscuous(
+							  &vsi->back->hw,
+							  vsi->seid,
+							  cur_promisc, NULL);
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "set multicast promisc failed, err %d, aq_err %d\n",
+					 ret, pf->hw.aq.asq_last_status);
+		}
+		ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+						vsi->seid,
+						cur_promisc, NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set uni promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
-		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-						   vsi->seid,
-						   cur_promisc, NULL);
-		if (aq_ret)
-			dev_info(&pf->pdev->dev,
-				 "set brdcast promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "set brdcast promisc failed, err %s, aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 
 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1994,8 +2070,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vlan stripping failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 	}
 }
 
@@ -2023,8 +2101,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vlan stripping failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 	}
 }
 
@@ -2294,7 +2374,7 @@
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
 	struct i40e_vsi_context ctxt;
-	i40e_status aq_ret;
+	i40e_status ret;
 
 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
 	vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2384,13 @@
 
 	ctxt.seid = vsi->seid;
 	ctxt.info = vsi->info;
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "add pvid failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		return -ENOENT;
 	}
 
@@ -2696,9 +2778,9 @@
 #endif /* I40E_FCOE */
 	/* round up for the chip's needs */
 	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-				(1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+				BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-				(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
 	/* set up individual rings */
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +2810,7 @@
 	}
 
 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
-		if (!(vsi->tc_config.enabled_tc & (1 << n)))
+		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
 			continue;
 
 		qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -2877,6 +2959,9 @@
 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
 
+	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
 	if (pf->flags & I40E_FLAG_PTP)
 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
 
@@ -3167,6 +3252,13 @@
 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
 		pf->sw_int_count++;
 
+	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+	    (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+		icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+		dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+	}
+
 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
 
@@ -3373,7 +3465,7 @@
  * @v_idx: vector index
  * @qp_idx: queue pair index
  **/
-static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
+static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
 {
 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
@@ -3427,7 +3519,7 @@
 		q_vector->tx.ring = NULL;
 
 		while (num_ringpairs--) {
-			map_vector_to_qp(vsi, v_start, qp_idx);
+			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
 			qp_idx++;
 			qp_remaining--;
 		}
@@ -3929,6 +4021,7 @@
 	i40e_vsi_free_irq(vsi);
 	i40e_vsi_free_tx_resources(vsi);
 	i40e_vsi_free_rx_resources(vsi);
+	vsi->current_netdev_flags = 0;
 }
 
 /**
@@ -4073,7 +4166,7 @@
 		if (app.selector == I40E_APP_SEL_TCPIP &&
 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
 			tc = dcbcfg->etscfg.prioritytable[app.priority];
-			enabled_tc |= (1 << tc);
+			enabled_tc |= BIT_ULL(tc);
 			break;
 		}
 	}
@@ -4122,7 +4215,7 @@
 	u8 i;
 
 	for (i = 0; i < num_tc; i++)
-		enabled_tc |= 1 << i;
+		enabled_tc |= BIT(i);
 
 	return enabled_tc;
 }
@@ -4157,7 +4250,7 @@
 	/* At least have TC0 */
 	enabled_tc = (enabled_tc ? enabled_tc : 0x1);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			num_tc++;
 	}
 	return num_tc;
@@ -4179,11 +4272,11 @@
 
 	/* Find the first enabled TC */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			break;
 	}
 
-	return 1 << i;
+	return BIT(i);
 }
 
 /**
@@ -4221,26 +4314,28 @@
 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	i40e_status aq_ret;
+	i40e_status ret;
 	u32 tc_bw_max;
 	int i;
 
 	/* Get the VSI level BW configuration */
-	aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi bw config, err %d, aq_err %d\n",
-			 aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EINVAL;
 	}
 
 	/* Get the VSI level BW configuration per TC */
-	aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
-						  NULL);
-	if (aq_ret) {
+	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+					       NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
-			 aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EINVAL;
 	}
 
@@ -4279,16 +4374,16 @@
 				       u8 *bw_share)
 {
 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-	i40e_status aq_ret;
+	i40e_status ret;
 	int i;
 
 	bw_data.tc_valid_bits = enabled_tc;
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
 		bw_data.tc_bw_credits[i] = bw_share[i];
 
-	aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-					  NULL);
-	if (aq_ret) {
+	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+				       NULL);
+	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
 			 vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4432,7 @@
 		 * will set the numtc for netdev as 2 that will be
 		 * referenced by the netdev layer as TC 0 and 1.
 		 */
-		if (vsi->tc_config.enabled_tc & (1 << i))
+		if (vsi->tc_config.enabled_tc & BIT_ULL(i))
 			netdev_set_tc_queue(netdev,
 					vsi->tc_config.tc_info[i].netdev_tc,
 					vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4494,7 @@
 
 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			bw_share[i] = 1;
 	}
 
@@ -4423,8 +4518,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "update vsi failed, aq_err=%d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 "Update vsi tc config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		goto out;
 	}
 	/* update the local VSI info with updated queue map */
@@ -4435,8 +4532,10 @@
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "Failed updating vsi bw info, aq_err=%d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 "Failed updating vsi bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		goto out;
 	}
 
@@ -4469,7 +4568,7 @@
 
 	/* Enable ETS TCs with equal BW Share for now */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			bw_data.tc_bw_share_credits[i] = 1;
 	}
 
@@ -4477,8 +4576,9 @@
 						   &bw_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "veb bw config failed, aq_err=%d\n",
-			 pf->hw.aq.asq_last_status);
+			 "VEB bw config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto out;
 	}
 
@@ -4486,8 +4586,9 @@
 	ret = i40e_veb_get_bw_info(veb);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Failed getting veb bw config, aq_err=%d\n",
-			 pf->hw.aq.asq_last_status);
+			 "Failed getting veb bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 
 out:
@@ -4574,8 +4675,9 @@
 	ret = i40e_aq_resume_port_tx(hw, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "AQ command Resume Port Tx failed = %d\n",
-			  pf->hw.aq.asq_last_status);
+			 "Resume Port Tx failed, err %s aq_err %s\n",
+			  i40e_stat_str(&pf->hw, ret),
+			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		/* Schedule PF reset to recover */
 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 		i40e_service_event_schedule(pf);
@@ -4627,8 +4729,9 @@
 		}
 	} else {
 		dev_info(&pf->pdev->dev,
-			 "AQ Querying DCB configuration failed: aq_err %d\n",
-			 pf->hw.aq.asq_last_status);
+			 "Query for DCB configuration failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 
 out:
@@ -4859,7 +4962,7 @@
 
 	/* Generate TC map for number of tc requested */
 	for (i = 0; i < tc; i++)
-		enabled_tc |= (1 << i);
+		enabled_tc |= BIT_ULL(i);
 
 	/* Requesting same TC configuration as already enabled */
 	if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5101,7 @@
 err_setup_tx:
 	i40e_vsi_free_tx_resources(vsi);
 	if (vsi == pf->vsi[pf->lan_vsi])
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
 	return err;
 }
@@ -5066,7 +5169,7 @@
 		i40e_vc_notify_reset(pf);
 
 	/* do the biggest reset indicated */
-	if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
 		/* Request a Global Reset
 		 *
@@ -5081,7 +5184,7 @@
 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-	} else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
 		/* Request a Core Reset
 		 *
@@ -5093,7 +5196,7 @@
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 		i40e_flush(&pf->hw);
 
-	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
 
 		/* Request a PF Reset
 		 *
@@ -5106,7 +5209,7 @@
 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
 		i40e_handle_reset_warning(pf);
 
-	} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
 		int v;
 
 		/* Find the VSI(s) that requested a re-init */
@@ -5123,7 +5226,7 @@
 
 		/* no further action needed, so return now */
 		return;
-	} else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
 		int v;
 
 		/* Find the VSI(s) that needs to be brought down */
@@ -5253,7 +5356,10 @@
 	/* Get updated DCBX data from firmware */
 	ret = i40e_get_dcb_config(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+		dev_info(&pf->pdev->dev,
+			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto exit;
 	}
 
@@ -5761,23 +5867,23 @@
 
 	rtnl_lock();
 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
 	}
 
@@ -5983,27 +6089,29 @@
 {
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	struct i40e_vsi_context ctxt;
-	int aq_ret;
+	int ret;
 
 	ctxt.seid = pf->main_vsi_seid;
 	ctxt.pf_num = pf->hw.pf_id;
 	ctxt.vf_num = 0;
-	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return;
 	}
 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s: update vsi switch failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 }
 
@@ -6017,27 +6125,29 @@
 {
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	struct i40e_vsi_context ctxt;
-	int aq_ret;
+	int ret;
 
 	ctxt.seid = pf->main_vsi_seid;
 	ctxt.pf_num = pf->hw.pf_id;
 	ctxt.vf_num = 0;
-	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return;
 	}
 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s: update vsi switch failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 }
 
@@ -6097,7 +6207,8 @@
 	ret = i40e_add_vsi(ctl_vsi);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "rebuild of owner VSI failed: %d\n", ret);
+			 "rebuild of veb_idx %d owner VSI failed: %d\n",
+			 veb->idx, ret);
 		goto end_reconstitute;
 	}
 	i40e_vsi_reset_stats(ctl_vsi);
@@ -6176,8 +6287,10 @@
 			buf_len = data_size;
 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
 			dev_info(&pf->pdev->dev,
-				 "capability discovery failed: aq=%d\n",
-				 pf->hw.aq.asq_last_status);
+				 "capability discovery failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			return -ENODEV;
 		}
 	} while (err);
@@ -6363,7 +6476,9 @@
 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
 	ret = i40e_init_adminq(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto clear_recovery;
 	}
 
@@ -6373,11 +6488,8 @@
 
 	i40e_clear_pxe_mode(hw);
 	ret = i40e_get_capabilities(pf);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
-			 ret);
+	if (ret)
 		goto end_core_reset;
-	}
 
 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
 				hw->func_caps.num_rx_qp,
@@ -6418,12 +6530,16 @@
 				       I40E_AQ_EVENT_LINK_UPDOWN |
 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* make sure our flow control settings are restored */
 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+		dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* Rebuild the VSIs and VEBs that existed before reset.
 	 * They are still in our local switch element arrays, so only
@@ -6484,8 +6600,10 @@
 		msleep(75);
 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
 		if (ret)
-			dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-				 pf->hw.aq.asq_last_status);
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	/* reinit the misc interrupt */
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6647,8 +6765,8 @@
 	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
 
 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-		if (pf->pending_vxlan_bitmap & (1 << i)) {
-			pf->pending_vxlan_bitmap &= ~(1 << i);
+		if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+			pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
 			port = pf->vxlan_ports[i];
 			if (port)
 				ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +6777,12 @@
 
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+					 "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
 					 port ? "add" : "delete",
-					 ntohs(port), i, ret,
-					 pf->hw.aq.asq_last_status);
+					 ntohs(port), i,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				pf->vxlan_ports[i] = 0;
 			}
 		}
@@ -7013,6 +7133,10 @@
 		tx_ring->count = vsi->num_desc;
 		tx_ring->size = 0;
 		tx_ring->dcb_tc = 0;
+		if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+			tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+		if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+			tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
 		vsi->tx_rings[i] = tx_ring;
 
 		rx_ring = &tx_ring[1];
@@ -7411,62 +7535,139 @@
 }
 
 /**
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+	struct i40e_aqc_get_set_rss_key_data rss_key;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	bool pf_lut = false;
+	u8 *rss_lut;
+	int ret, i;
+
+	memset(&rss_key, 0, sizeof(rss_key));
+	memcpy(&rss_key, seed, sizeof(rss_key));
+
+	rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
+	if (!rss_lut)
+		return -ENOMEM;
+
+	/* Populate the LUT with max no. of queues in round robin fashion */
+	for (i = 0; i < vsi->rss_table_size; i++)
+		rss_lut[i] = i % vsi->rss_size;
+
+	ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Cannot set RSS key, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		return ret;
+	}
+
+	if (vsi->type == I40E_VSI_MAIN)
+		pf_lut = true;
+
+	ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
+				  vsi->rss_table_size);
+	if (ret)
+		dev_info(&pf->pdev->dev,
+			 "Cannot set RSS lut, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+	return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
+	struct i40e_pf *pf = vsi->back;
+
+	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+	vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+		return i40e_config_rss_aq(vsi, seed);
+
+	return 0;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
+ * @pf: board private structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_hw *hw = &pf->hw;
+	u32 *seed_dw = (u32 *)seed;
+	u32 current_queue = 0;
+	u32 lut = 0;
+	int i, j;
+
+	/* Fill out hash function seed */
+	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+		wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+	for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+		lut = 0;
+		for (j = 0; j < 4; j++) {
+			if (current_queue == vsi->rss_size)
+				current_queue = 0;
+			lut |= ((current_queue) << (8 * j));
+			current_queue++;
+		}
+		wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+	}
+	i40e_flush(hw);
+
+	return 0;
+}
+
+/**
  * i40e_config_rss - Prepare for RSS if used
  * @pf: board private structure
  **/
 static int i40e_config_rss(struct i40e_pf *pf)
 {
-	u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
 	struct i40e_hw *hw = &pf->hw;
-	u32 lut = 0;
-	int i, j;
-	u64 hena;
 	u32 reg_val;
+	u64 hena;
 
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-		wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
+	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
 
 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
 		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
-	hena |= I40E_DEFAULT_RSS_HENA;
+	hena |= i40e_pf_get_default_rss_hena(pf);
+
 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
 	vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
 
-	/* Check capability and Set table size and register per hw expectation*/
+	/* Determine the RSS table size based on the hardware capabilities */
 	reg_val = rd32(hw, I40E_PFQF_CTL_0);
-	if (pf->rss_table_size == 512)
-		reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-	else
-		reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+	reg_val = (pf->rss_table_size == 512) ?
+			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
 	wr32(hw, I40E_PFQF_CTL_0, reg_val);
 
-	/* Populate the LUT with max no. of queues in round robin fashion */
-	for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
-		/* The assumption is that lan qp count will be the highest
-		 * qp count for any PF VSI that needs RSS.
-		 * If multiple VSIs need RSS support, all the qp counts
-		 * for those VSIs should be a power of 2 for RSS to work.
-		 * If LAN VSI is the only consumer for RSS then this requirement
-		 * is not necessary.
-		 */
-		if (j == vsi->rss_size)
-			j = 0;
-		/* lut = 4-byte sliding window of 4 lut entries */
-		lut = (lut << 8) | (j &
-			 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
-		/* On i = 3, we have 4 entries in lut; write to the register */
-		if ((i & 3) == 3)
-			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
-	}
-	i40e_flush(hw);
-
-	return 0;
+	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+		return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+	else
+		return i40e_config_rss_reg(pf, seed);
 }
 
 /**
@@ -7533,7 +7734,7 @@
 	i40e_status status;
 
 	/* Set the valid bit for this PF */
-	bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
 	bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
 	bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
 
@@ -7567,8 +7768,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for read access, err %d: aq_err %d\n",
-			 ret, last_aq_status);
+			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 
@@ -7583,8 +7785,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	i40e_release_nvm(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
-			 ret, last_aq_status);
+		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 
@@ -7596,8 +7799,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for write access, err %d: aq_err %d\n",
-			 ret, last_aq_status);
+			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 	/* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +7819,9 @@
 	i40e_release_nvm(&pf->hw);
 	if (ret)
 		dev_info(&pf->pdev->dev,
-			 "BW settings NOT SAVED, err %d aq_err %d\n",
-			 ret, last_aq_status);
+			 "BW settings NOT SAVED, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 bw_commit_out:
 
 	return ret;
@@ -7662,7 +7867,7 @@
 	/* Depending on PF configurations, it is possible that the RSS
 	 * maximum might end up larger than the available queues
 	 */
-	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
 	pf->rss_size = 1;
 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
 	pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +7878,7 @@
 	}
 
 	/* MFP mode enabled */
-	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
 		pf->flags |= I40E_FLAG_MFP_ENABLED;
 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
 		if (i40e_get_npar_bw_setting(pf))
@@ -7703,9 +7908,8 @@
 	}
 
 	if (pf->hw.func_caps.vmdq) {
-		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
-		pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
 	}
 
 #ifdef I40E_FCOE
@@ -7723,6 +7927,14 @@
 					I40E_MAX_VF_COUNT);
 	}
 #endif /* CONFIG_PCI_IOV */
+	if (pf->hw.mac.type == I40E_MAC_X722) {
+		pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
+			     I40E_FLAG_128_QP_RSS_CAPABLE |
+			     I40E_FLAG_HW_ATR_EVICT_CAPABLE |
+			     I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
+			     I40E_FLAG_WB_ON_ITR_CAPABLE |
+			     I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+	}
 	pf->eeprom_version = 0xDEAD;
 	pf->lan_veb = I40E_NO_VEB;
 	pf->lan_vsi = I40E_NO_VSI;
@@ -7812,7 +8024,7 @@
 	need_reset = i40e_set_ntuple(pf, features);
 
 	if (need_reset)
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
 	return 0;
 }
@@ -7875,10 +8087,8 @@
 
 	/* New port: add it and mark its index in the bitmap */
 	pf->vxlan_ports[next_idx] = port;
-	pf->pending_vxlan_bitmap |= (1 << next_idx);
+	pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
 	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
-	dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
 }
 
 /**
@@ -7906,7 +8116,7 @@
 		 * and make it pending
 		 */
 		pf->vxlan_ports[idx] = 0;
-		pf->pending_vxlan_bitmap |= (1 << idx);
+		pf->pending_vxlan_bitmap |= BIT_ULL(idx);
 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
 		dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -7981,7 +8191,6 @@
 	return err;
 }
 
-#ifdef HAVE_BRIDGE_ATTRIBS
 /**
  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
  * @dev: the netdev being configured
@@ -7995,7 +8204,8 @@
  * bridge mode enabled.
  **/
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
-				   struct nlmsghdr *nlh)
+				   struct nlmsghdr *nlh,
+				   u16 flags)
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -8066,14 +8276,9 @@
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
-#ifdef HAVE_BRIDGE_FILTER
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 				   struct net_device *dev,
 				   u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-				   struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8302,25 @@
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
 				       nlflags, 0, 0, filter_mask, NULL);
 }
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+					     struct net_device *dev,
+					     netdev_features_t features)
+{
+	if (skb->encapsulation &&
+	    (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+	     I40E_MAX_TUNNEL_HDR_LEN))
+		return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+	return features;
+}
 
 static const struct net_device_ops i40e_netdev_ops = {
 	.ndo_open		= i40e_open,
@@ -8133,10 +8356,9 @@
 #endif
 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
 	.ndo_fdb_add		= i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+	.ndo_features_check	= i40e_features_check,
 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
 };
 
 /**
@@ -8304,8 +8526,10 @@
 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "couldn't get PF vsi config, err %d, aq_err %d\n",
-				 ret, pf->hw.aq.asq_last_status);
+				 "couldn't get PF vsi config, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			return -ENOENT;
 		}
 		vsi->info = ctxt.info;
@@ -8327,8 +8551,10 @@
 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "update vsi failed, aq_err=%d\n",
-					 pf->hw.aq.asq_last_status);
+					 "update vsi failed, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				ret = -ENOENT;
 				goto err;
 			}
@@ -8345,9 +8571,11 @@
 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
-					 enabled_tc, ret,
-					 pf->hw.aq.asq_last_status);
+					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+					 enabled_tc,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				ret = -ENOENT;
 			}
 		}
@@ -8438,8 +8666,10 @@
 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
 		if (ret) {
 			dev_info(&vsi->back->pdev->dev,
-				 "add vsi failed, aq_err=%d\n",
-				 vsi->back->hw.aq.asq_last_status);
+				 "add vsi failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			ret = -ENOENT;
 			goto err;
 		}
@@ -8484,8 +8714,9 @@
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get vsi bw info, err %d, aq_err %d\n",
-			 ret, pf->hw.aq.asq_last_status);
+			 "couldn't get vsi bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		/* VSI is already added so not tearing that up */
 		ret = 0;
 	}
@@ -8615,6 +8846,11 @@
 		goto vector_setup_out;
 	}
 
+	/* In Legacy mode, we do not have to get any other vector since we
+	 * piggyback on the misc/ICR0 for queue interrupts.
+	*/
+	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+		return ret;
 	if (vsi->num_q_vectors)
 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
 						 vsi->num_q_vectors, vsi->idx);
@@ -8658,7 +8894,7 @@
 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
 	if (ret < 0) {
 		dev_info(&pf->pdev->dev,
-			 "failed to get tracking for %d queues for VSI %d err=%d\n",
+			 "failed to get tracking for %d queues for VSI %d err %d\n",
 			 vsi->alloc_queue_pairs, vsi->seid, ret);
 		goto err_vsi;
 	}
@@ -8857,6 +9093,10 @@
 		break;
 	}
 
+	if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+	    (vsi->type == I40E_VSI_VMDQ2)) {
+		ret = i40e_vsi_config_rss(vsi);
+	}
 	return vsi;
 
 err_rings:
@@ -8896,8 +9136,9 @@
 						  &bw_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "query veb bw config failed, aq_err=%d\n",
-			 hw->aq.asq_last_status);
+			 "query veb bw config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -8905,8 +9146,9 @@
 						   &ets_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "query veb bw ets config failed, aq_err=%d\n",
-			 hw->aq.asq_last_status);
+			 "query veb bw ets config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -9090,36 +9332,40 @@
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
-	bool is_default = false;
+	struct i40e_pf *pf = veb->pf;
+	bool is_default = veb->pf->cur_promisc;
 	bool is_cloud = false;
 	int ret;
 
 	/* get a VEB from the hardware */
-	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
 			      veb->enabled_tc, is_default,
 			      is_cloud, &veb->seid, NULL);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't add VEB, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "couldn't add VEB, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EPERM;
 	}
 
 	/* get statistics counter */
-	ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
 					 &veb->stats_idx, NULL, NULL, NULL);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EPERM;
 	}
 	ret = i40e_veb_get_bw_info(veb);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't get VEB bw info, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
-		i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+		dev_info(&pf->pdev->dev,
+			 "couldn't get VEB bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
 		return -ENOENT;
 	}
 
@@ -9325,8 +9571,10 @@
 						&next_seid, NULL);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "get switch config failed %d aq_err=%x\n",
-				 ret, pf->hw.aq.asq_last_status);
+				 "get switch config failed err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			kfree(aq_buf);
 			return -ENOENT;
 		}
@@ -9367,8 +9615,9 @@
 	ret = i40e_fetch_switch_configuration(pf, false);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't fetch switch config, err %d, aq_err %d\n",
-			 ret, pf->hw.aq.asq_last_status);
+			 "couldn't fetch switch config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return ret;
 	}
 	i40e_pf_reset_stats(pf);
@@ -9743,7 +9992,8 @@
 
 	err = i40e_init_shared_code(hw);
 	if (err) {
-		dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+			 err);
 		goto err_pf_reset;
 	}
 
@@ -9910,15 +10160,19 @@
 				       I40E_AQ_EVENT_LINK_UPDOWN |
 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
 	    (pf->hw.aq.fw_maj_ver < 4)) {
 		msleep(75);
 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
 		if (err)
-			dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-				 pf->hw.aq.asq_last_status);
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	/* The main driver is (mostly) up and happy. We need to set this state
 	 * before setting up the misc vector or we get a race and the vector
@@ -10006,8 +10260,10 @@
 	/* get the requested speeds from the fw */
 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
-			 err);
+		dev_info(&pf->pdev->dev,
+			 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
 	/* print a string summarizing features */
@@ -10247,6 +10503,19 @@
 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
+	del_timer_sync(&pf->service_timer);
+	cancel_work_sync(&pf->service_task);
+	i40e_fdir_teardown(pf);
+
+	rtnl_lock();
+	i40e_prep_for_reset(pf);
+	rtnl_unlock();
+
+	wr32(hw, I40E_PFPM_APM,
+	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+	wr32(hw, I40E_PFPM_WUFC,
+	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
 	i40e_clear_interrupt_scheme(pf);
 
 	if (system_state == SYSTEM_POWER_OFF) {
@@ -10267,9 +10536,6 @@
 
 	set_bit(__I40E_SUSPENDED, &pf->state);
 	set_bit(__I40E_DOWN, &pf->state);
-	del_timer_sync(&pf->service_timer);
-	cancel_work_sync(&pf->service_task);
-	i40e_fdir_teardown(pf);
 
 	rtnl_lock();
 	i40e_prep_for_reset(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 554e49d..9b83abc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -50,7 +50,7 @@
 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
 	/* Switching to words (sr_size contains power of 2KB) */
-	nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
 
 	/* Check if we are in the normal or blank NVM programming mode */
 	fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@
 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
 	if (!ret_code) {
 		/* Write the address and start reading */
-		sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
-			 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -212,6 +212,74 @@
 }
 
 /**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+				    u32 offset, u16 words, void *data,
+				    bool last_command)
+{
+	i40e_status ret_code = I40E_ERR_NVM;
+	struct i40e_asq_cmd_details cmd_details;
+
+	memset(&cmd_details, 0, sizeof(cmd_details));
+
+	/* Here we are checking the SR limit only for the flat memory model.
+	 * We cannot do it for the module-based model, as we did not acquire
+	 * the NVM resource yet (we cannot get the module pointer value).
+	 * Firmware will check the module-based model.
+	 */
+	if ((offset + words) > hw->nvm.sr_size)
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+			   (offset + words), hw->nvm.sr_size);
+	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+		/* We can write only up to 4KB (one sector), in one AQ write */
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write fail error: tried to write %d words, limit is %d.\n",
+			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+		/* A single write cannot spread over two sectors */
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+			   offset, words);
+	else
+		ret_code = i40e_aq_read_nvm(hw, module_pointer,
+					    2 * offset,  /*bytes*/
+					    2 * words,   /*bytes*/
+					    data, last_command, &cmd_details);
+
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+					 u16 *data)
+{
+	i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+	*data = le16_to_cpu(*(__le16 *)data);
+
+	return ret_code;
+}
+
+/**
  * i40e_read_nvm_word - Reads Shadow RAM
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -222,6 +290,8 @@
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 			       u16 *data)
 {
+	if (hw->mac.type == I40E_MAC_X722)
+		return i40e_read_nvm_word_aq(hw, offset, data);
 	return i40e_read_nvm_word_srctl(hw, offset, data);
 }
 
@@ -257,6 +327,63 @@
 }
 
 /**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data)
+{
+	i40e_status ret_code;
+	u16 read_size = *words;
+	bool last_cmd = false;
+	u16 words_read = 0;
+	u16 i = 0;
+
+	do {
+		/* Calculate number of bytes we should read in this step.
+		 * FVL AQ do not allow to read more than one page at a time or
+		 * to cross page boundaries.
+		 */
+		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+			read_size = min(*words,
+					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+		else
+			read_size = min((*words - words_read),
+					I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+		/* Check if this is last command, if so set proper flag */
+		if ((words_read + read_size) >= *words)
+			last_cmd = true;
+
+		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+					    data + words_read, last_cmd);
+		if (ret_code)
+			goto read_nvm_buffer_aq_exit;
+
+		/* Increment counter for words already read and move offset to
+		 * new read location
+		 */
+		words_read += read_size;
+		offset += read_size;
+	} while (words_read < *words);
+
+	for (i = 0; i < *words; i++)
+		data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+	*words = words_read;
+	return ret_code;
+}
+
+/**
  * i40e_read_nvm_buffer - Reads Shadow RAM buffer
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -270,6 +397,8 @@
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 				 u16 *words, u16 *data)
 {
+	if (hw->mac.type == I40E_MAC_X722)
+		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
 	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 7b34f1e..dcb72a8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,6 +58,19 @@
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+				bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+				bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+				u16 seid,
+				struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+				u16 seid,
+				struct i40e_aqc_get_set_rss_key_data *key);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b772..8c40d6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -43,9 +43,8 @@
 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
 #define I40E_PTP_1GB_INCVAL  0x2000000000ULL
 
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
-					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
 					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 
 /**
@@ -357,7 +356,7 @@
 
 	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
 
-	if (!(prttsyn_stat & (1 << index)))
+	if (!(prttsyn_stat & BIT(index)))
 		return;
 
 	lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 522d6df..dc0402f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -873,6 +873,13 @@
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
 #define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
 #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -3366,4 +3373,1933 @@
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9a4f2bc..738aca6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -464,7 +464,7 @@
 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-	if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@
 			dev_info(&pdev->dev,
 				"FD filter programming failed due to incorrect filter parameters\n");
 		}
-	} else if (error ==
-			  (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
 				 rx_desc->wb.qword0.hi_dword.fd_id);
@@ -854,15 +853,40 @@
  **/
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
-	u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-		  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
-		  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
-		  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
-		  /* allow 00 to be written to the index */
+	u16 flags = q_vector->tx.ring[0].flags;
 
-	wr32(&vsi->back->hw,
-	     I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
-	     val);
+	if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+		u32 val;
+
+		if (q_vector->arm_wb_state)
+			return;
+
+		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+
+		wr32(&vsi->back->hw,
+		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+					 vsi->base_vector - 1),
+		     val);
+		q_vector->arm_wb_state = true;
+	} else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+			  /* allow 00 to be written to the index */
+
+		wr32(&vsi->back->hw,
+		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+					 vsi->base_vector - 1), val);
+	} else {
+		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+			/* allow 00 to be written to the index */
+
+		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+	}
 }
 
 /**
@@ -892,7 +916,7 @@
 	 *  20-1249MB/s bulk   (8000 ints/s)
 	 */
 	bytes_per_int = rc->total_bytes / rc->itr;
-	switch (rc->itr) {
+	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
 			new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +929,14 @@
 		break;
 	case I40E_BULK_LATENCY:
 		if (bytes_per_int <= 20)
-			rc->latency_range = I40E_LOW_LATENCY;
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	default:
+		if (bytes_per_int <= 20)
+			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
@@ -923,42 +952,14 @@
 		break;
 	}
 
-	if (new_itr != rc->itr) {
-		/* do an exponential smoothing */
-		new_itr = (10 * new_itr * rc->itr) /
-			  ((9 * new_itr) + rc->itr);
-		rc->itr = new_itr & I40E_MAX_ITR;
-	}
+	if (new_itr != rc->itr)
+		rc->itr = new_itr;
 
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
 }
 
 /**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-	struct i40e_hw *hw = &q_vector->vsi->back->hw;
-	u32 reg_addr;
-	u16 old_itr;
-
-	reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
-	old_itr = q_vector->rx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->rx);
-	if (old_itr != q_vector->rx.itr)
-		wr32(hw, reg_addr, q_vector->rx.itr);
-
-	reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
-	old_itr = q_vector->tx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->tx);
-	if (old_itr != q_vector->tx.itr)
-		wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
  * i40e_clean_programming_status - clean the programming status descriptor
  * @rx_ring: the rx ring that has this descriptor
  * @rx_desc: the rx descriptor written back by HW
@@ -1386,7 +1387,7 @@
 		return;
 
 	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
 	/* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1402,25 @@
 		ipv6 = true;
 
 	if (ipv4 &&
-	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 		goto checksum_fail;
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
 
 	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
 		goto checksum_fail;
 
 	/* handle packets that were not able to be checksummed due
 	 * to arrival speed, in this case the stack can compute
 	 * the csum.
 	 */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1428,7 +1429,8 @@
 	 * so the total length of IPv4 header is IHL*4 bytes
 	 * The UDP_0 bit *may* bet set if the *inner* header is UDP
 	 */
-	if (ipv4_tunnel) {
+	if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+	    (ipv4_tunnel)) {
 		skb->transport_header = skb->mac_header +
 					sizeof(struct ethhdr) +
 					(ip_hdr(skb)->ihl * 4);
@@ -1543,7 +1545,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1584,8 +1586,8 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1639,7 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			struct i40e_rx_buffer *next_buffer;
 
 			next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1649,7 @@
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
@@ -1669,7 +1671,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1730,7 +1732,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1753,7 +1755,7 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1773,13 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			rx_ring->rx_stats.non_eop_descs++;
 			continue;
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			/* TODO: shouldn't we increment a counter indicating the
 			 * drop?
@@ -1802,7 +1804,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1827,6 +1829,68 @@
 }
 
 /**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u16 old_itr;
+	int vector;
+	u32 val;
+
+	vector = (q_vector->v_idx + vsi->base_vector);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+		old_itr = q_vector->rx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->rx);
+		if (old_itr != q_vector->rx.itr) {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_RX_ITR <<
+				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+			(q_vector->rx.itr <<
+				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_ITR_NONE <<
+				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+	} else {
+		i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+	}
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+		old_itr = q_vector->tx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->tx);
+		if (old_itr != q_vector->tx.itr) {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_TX_ITR <<
+				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+				(q_vector->tx.itr <<
+				   I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_ITR_NONE <<
+				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+			      vsi->base_vector - 1), val);
+	} else {
+		i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+	}
+}
+
+/**
  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
@@ -1880,35 +1944,29 @@
 		return budget;
 	}
 
+	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+		q_vector->arm_wb_state = false;
+
 	/* Work is done so exit the polling mode and re-enable the interrupt */
 	napi_complete(napi);
-	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-		i40e_update_dynamic_itr(q_vector);
+	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+		i40e_update_enable_itr(vsi, q_vector);
+	} else { /* Legacy mode */
+		struct i40e_hw *hw = &vsi->back->hw;
+		/* We re-enable the queue 0 cause, but
+		 * don't worry about dynamic_enable
+		 * because we left it on for the other
+		 * possible interrupts during napi
+		 */
+		u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+			   I40E_QINT_RQCTL_CAUSE_ENA_MASK;
 
-	if (!test_bit(__I40E_DOWN, &vsi->state)) {
-		if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-			i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
-		} else {
-			struct i40e_hw *hw = &vsi->back->hw;
-			/* We re-enable the queue 0 cause, but
-			 * don't worry about dynamic_enable
-			 * because we left it on for the other
-			 * possible interrupts during napi
-			 */
-			u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-			qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-			wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-			qval = rd32(hw, I40E_QINT_TQCTL(0));
-			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-			wr32(hw, I40E_QINT_TQCTL(0), qval);
-
-			i40e_irq_dynamic_enable_icr0(vsi->back);
-		}
+		wr32(hw, I40E_QINT_RQCTL(0), qval);
+		qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+		       I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+		wr32(hw, I40E_QINT_TQCTL(0), qval);
+		i40e_irq_dynamic_enable_icr0(vsi->back);
 	}
-
 	return 0;
 }
 
@@ -1982,6 +2040,13 @@
 	/* Due to lack of space, no more new filters can be programmed */
 	if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
 		return;
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+		/* HW ATR eviction will take care of removing filters on FIN
+		 * and RST packets.
+		 */
+		if (th->fin || th->rst)
+			return;
+	}
 
 	tx_ring->atr_count++;
 
@@ -2037,6 +2102,9 @@
 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
 	fdir_desc->rsvd = cpu_to_le32(0);
 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2244,11 +2312,15 @@
 	struct iphdr *this_ip_hdr;
 	u32 network_hdr_len;
 	u8 l4_hdr = 0;
+	struct udphdr *oudph;
+	struct iphdr *oiph;
 	u32 l4_tunnel = 0;
 
 	if (skb->encapsulation) {
 		switch (ip_hdr(skb)->protocol) {
 		case IPPROTO_UDP:
+			oudph = udp_hdr(skb);
+			oiph = ip_hdr(skb);
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
@@ -2285,6 +2357,15 @@
 			*tx_flags &= ~I40E_TX_FLAGS_IPV4;
 			*tx_flags |= I40E_TX_FLAGS_IPV6;
 		}
+		if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+		    (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
+		    (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+			oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+					oiph->daddr,
+					(skb->len - skb_transport_offset(skb)),
+					IPPROTO_UDP, 0);
+			*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+		}
 	} else {
 		network_hdr_len = skb_network_header_len(skb);
 		this_ip_hdr = ip_hdr(skb);
@@ -2616,6 +2697,8 @@
 	    netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
 						   tx_ring->queue_index)))
 		writel(i, tx_ring->tail);
+	else
+		prefetchw(tx_desc + 1);
 
 	return;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 0dc48dc..f1385a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -66,17 +66,29 @@
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+	BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,17 +141,17 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
-#define I40E_TX_FLAGS_CSUM		(u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN		(u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM		BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_TSYN		BIT(8)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -253,6 +265,10 @@
 	bool ring_active;		/* is ring online or not */
 	bool arm_wb;		/* do something to arm write back */
 
+	u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR	BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM	BIT(1)
+
 	/* stats structs */
 	struct i40e_queue_stats	stats;
 	struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 9a5a75b..4842239 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -47,6 +47,11 @@
 #define I40E_DEV_ID_20G_KR2		0x1587
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
 
 #define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
 					 (d) == I40E_DEV_ID_QSFP_B  || \
@@ -120,6 +125,8 @@
 	I40E_MAC_X710,
 	I40E_MAC_XL710,
 	I40E_MAC_VF,
+	I40E_MAC_X722,
+	I40E_MAC_X722_VF,
 	I40E_MAC_GENERIC,
 };
 
@@ -213,7 +220,17 @@
 	bool dcb;
 	bool fcoe;
 	bool iscsi; /* Indicates iSCSI enabled */
-	bool mfp_mode_1;
+	bool flex10_enable;
+	bool flex10_capable;
+	u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN	0x0
+#define I40E_FLEX10_MODE_DCC		0x1
+#define I40E_FLEX10_MODE_DCI		0x2
+
+	u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR	0x1
+#define I40E_FLEX10_STATUS_VC_MODE	0x2
+
 	bool mgmt_cem;
 	bool ieee_1588;
 	bool iwarp;
@@ -423,6 +440,7 @@
 #define I40E_DCBX_MODE_CEE	0x1
 #define I40E_DCBX_MODE_IEEE	0x2
 	u32 numapps;
+	u32 tlv_status; /* CEE mode TLV status */
 	struct i40e_dcb_ets_config etscfg;
 	struct i40e_dcb_ets_config etsrec;
 	struct i40e_dcb_pfc_config pfc;
@@ -487,11 +505,13 @@
 
 	/* debug mask */
 	u32 debug_mask;
+	char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
 {
-	return hw->mac.type == I40E_MAC_VF;
+	return (hw->mac.type == I40E_MAC_VF ||
+		hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
@@ -588,19 +608,23 @@
 	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
 	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
 	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
-	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
+	/* Note: Bit 8 is reserved in X710 and XL710 */
+	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
 	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
 	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
 	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
 	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
 	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
 	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
-	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18,
+	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
+	 * UDP header
+	 */
+	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
 	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
 					 << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +632,8 @@
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
-					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -743,8 +767,7 @@
 					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
-					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
 	/* Note: These are predefined bit offsets */
@@ -920,12 +943,12 @@
 #define I40E_TXD_CTX_QW0_NATT_SHIFT	9
 #define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
-					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -937,6 +960,8 @@
 #define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
 					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_filter_program_desc {
 	__le32 qindex_flex_ptype_vsi;
 	__le32 rsvd;
@@ -955,15 +980,24 @@
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-	/* Note: Values 0-30 are reserved for future use */
+	/* Note: Values 0-28 are reserved for future use.
+	 * Value 29, 30, 32 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
 	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
-	/* Note: Value 32 is reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
 	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
 	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
 	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
 	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
-	/* Note: Values 37-40 are reserved for future use */
+	/* Note: Values 37-38 are reserved for future use.
+	 * Value 39, 40, 42 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
 	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
 	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
 	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
 	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
@@ -990,8 +1024,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1009,14 +1043,17 @@
 #define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
-					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
 						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
 #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
 					  I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
 
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT	(0xEULL + \
+					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
 					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1069,6 +1106,14 @@
 	u64 tx_errors;			/* tepc */
 };
 
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+	u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
 #ifdef I40E_FCOE
 /* Statistics collected per function for FCoE */
 struct i40e_fcoe_stats {
@@ -1134,6 +1179,8 @@
 	u64 fd_atr_match;
 	u64 fd_sb_match;
 	u64 fd_atr_tunnel_match;
+	u32 fd_atr_status;
+	u32 fd_sb_status;
 	/* EEE LPI */
 	u32 tx_lpi_status;
 	u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 2d20af2..0f8d415 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -110,7 +110,9 @@
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR		1
-#define I40E_VIRTCHNL_VERSION_MINOR		0
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
 struct i40e_virtchnl_version_info {
 	u32 major;
 	u32 minor;
@@ -129,7 +131,8 @@
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,13 @@
 	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 23f95cd..d99c116 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -160,13 +160,8 @@
  **/
 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 {
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg;
-
-	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-	reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-	i40e_flush(hw);
+	i40e_vc_notify_vf_reset(vf);
+	i40e_reset_vf(vf, false);
 }
 
 /**
@@ -282,16 +277,14 @@
 	}
 	tempmap = vecmap->rxq_map;
 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-		linklistmap |= (1 <<
-				(I40E_VIRTCHNL_SUPPORTED_QTYPES *
-				 vsi_queue_id));
+		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				    vsi_queue_id));
 	}
 
 	tempmap = vecmap->txq_map;
 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-		linklistmap |= (1 <<
-				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
-				 + 1));
+		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				     vsi_queue_id + 1));
 	}
 
 	next_q = find_first_bit(&linklistmap,
@@ -337,11 +330,23 @@
 		reg = (vector_id) |
 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-		    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 		wr32(hw, reg_idx, reg);
 	}
 
+	/* if the vf is running in polling mode and using interrupt zero,
+	 * need to disable auto-mask on enabling zero interrupt for VFs.
+	 */
+	if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+	    (vector_id == 0)) {
+		reg = rd32(hw, I40E_GLINT_CTL);
+		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+			wr32(hw, I40E_GLINT_CTL, reg);
+		}
+	}
+
 irq_list_done:
 	i40e_flush(hw);
 }
@@ -542,11 +547,13 @@
 		if (vf->port_vlan_id)
 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 		f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-				    vf->port_vlan_id, true, false);
+				    vf->port_vlan_id ? vf->port_vlan_id : -1,
+				    true, false);
 		if (!f)
 			dev_info(&pf->pdev->dev,
 				 "Could not allocate VF MAC addr\n");
-		f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+		f = i40e_add_filter(vsi, brdcast,
+				    vf->port_vlan_id ? vf->port_vlan_id : -1,
 				    true, false);
 		if (!f)
 			dev_info(&pf->pdev->dev,
@@ -835,6 +842,7 @@
 	i40e_alloc_vf_res(vf);
 	i40e_enable_vf_mappings(vf);
 	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+	clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
 
 	/* tell the VF the reset is done */
 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -899,7 +907,7 @@
 		for (vf_id = 0; vf_id < tmp; vf_id++) {
 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 		}
 	}
 	clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -925,8 +933,6 @@
 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 		if (ret) {
-			dev_err(&pf->pdev->dev,
-				"Failed to enable SR-IOV, error %d.\n", ret);
 			pf->num_alloc_vfs = 0;
 			goto err_iov;
 		}
@@ -1123,12 +1129,16 @@
  *
  * called from the VF to request the API version used by the PF
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
 {
 	struct i40e_virtchnl_version_info info = {
 		I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
 	};
 
+	vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+	if (VF_IS_V10(vf))
+		info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
 	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
 				      I40E_SUCCESS, (u8 *)&info,
 				      sizeof(struct
@@ -1143,7 +1153,7 @@
  *
  * called from the VF to request its resources
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
 {
 	struct i40e_virtchnl_vf_resource *vfres = NULL;
 	struct i40e_pf *pf = vf->pf;
@@ -1167,12 +1177,24 @@
 		len = 0;
 		goto err;
 	}
+	if (VF_IS_V11(vf))
+		vf->driver_caps = *(u32 *)msg;
+	else
+		vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+				  I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+				  I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
 	vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
 	vsi = pf->vsi[vf->lan_vsi_idx];
 	if (!vsi->info.pvid)
 		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
-
+	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+			vfres->vf_offload_flags |=
+				I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+	} else {
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+	}
 	vfres->num_vsis = num_vsis;
 	vfres->num_queue_pairs = vf->num_queue_pairs;
 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1773,9 +1795,14 @@
 		valid_len = sizeof(struct i40e_virtchnl_version_info);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
-	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 		valid_len = 0;
 		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		if (VF_IS_V11(vf))
+			valid_len = sizeof(u32);
+		else
+			valid_len = 0;
+		break;
 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
 		valid_len = sizeof(struct i40e_virtchnl_txq_info);
 		break;
@@ -1888,10 +1915,10 @@
 
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_VERSION:
-		ret = i40e_vc_get_version_msg(vf);
+		ret = i40e_vc_get_version_msg(vf, msg);
 		break;
 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-		ret = i40e_vc_get_vf_resources_msg(vf);
+		ret = i40e_vc_get_vf_resources_msg(vf, msg);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
 		i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +1996,9 @@
 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
 		vf = &pf->vf[vf_id];
 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-		if (reg & (1 << bit_idx)) {
+		if (reg & BIT(bit_idx)) {
 			/* clear the bit in GLGEN_VFLRSTAT */
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 
 			if (!test_bit(__I40E_DOWN, &pf->state))
 				i40e_reset_vf(vf, true);
@@ -2023,7 +2050,8 @@
 	}
 
 	/* delete the temporary mac address */
-	i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+	i40e_del_filter(vsi, vf->default_lan_addr.addr,
+			vf->port_vlan_id ? vf->port_vlan_id : -1,
 			true, false);
 
 	/* Delete all the filters for this VSI - we're going to kill it
@@ -2088,7 +2116,12 @@
 		goto error_pvid;
 	}
 
-	if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
+	if (le16_to_cpu(vsi->info.pvid) ==
+	    (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+		/* duplicate request, so just return success */
+		goto error_pvid;
+
+	if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
 		dev_err(&pf->pdev->dev,
 			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
 			vf_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 09043c1..736f6f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -42,6 +42,9 @@
 #define I40E_VLAN_MASK			0xFFF
 #define I40E_PRIORITY_MASK		0x7000
 
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
 	I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@
 	u16 vf_id;
 	/* all VF vsis connect to the same parent */
 	enum i40e_switch_element_types parent_type;
+	struct i40e_virtchnl_version_info vf_ver;
+	u32 driver_caps; /* reported by VF driver */
 
 	/* VF Port Extender (PE) stag if used */
 	u16 stag;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c1d25f8..f08450b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -60,17 +60,6 @@
 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
-	} else {
-		hw->aq.asq.tail = I40E_PF_ATQT;
-		hw->aq.asq.head = I40E_PF_ATQH;
-		hw->aq.asq.len  = I40E_PF_ATQLEN;
-		hw->aq.asq.bal  = I40E_PF_ATQBAL;
-		hw->aq.asq.bah  = I40E_PF_ATQBAH;
-		hw->aq.arq.tail = I40E_PF_ARQT;
-		hw->aq.arq.head = I40E_PF_ARQH;
-		hw->aq.arq.len  = I40E_PF_ARQLEN;
-		hw->aq.arq.bal  = I40E_PF_ARQBAL;
-		hw->aq.arq.bah  = I40E_PF_ARQBAH;
 	}
 }
 
@@ -308,7 +297,7 @@
 
 	/* set starting point */
 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
-				  I40E_PF_ATQLEN_ATQENABLE_MASK));
+				  I40E_VF_ATQLEN1_ATQENABLE_MASK));
 	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
 	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 
@@ -337,7 +326,7 @@
 
 	/* set starting point */
 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
-				  I40E_PF_ARQLEN_ARQENABLE_MASK));
+				  I40E_VF_ARQLEN1_ARQENABLE_MASK));
 	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
 	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 
@@ -899,7 +888,7 @@
 	mutex_lock(&hw->aq.arq_mutex);
 
 	/* set next_to_use to head */
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
 	if (ntu == ntc) {
 		/* nothing to do - shouldn't need to update ring's values */
 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e715bcc..c802209 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -34,8 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR	0x0002
-#define I40E_FW_API_VERSION_A0_MINOR  0x0000
+#define I40E_FW_API_VERSION_MINOR	0x0004
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -133,12 +132,7 @@
 	i40e_aqc_opc_list_func_capabilities	= 0x000A,
 	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
 
-	i40e_aqc_opc_set_cppm_configuration	= 0x0103,
-	i40e_aqc_opc_set_arp_proxy_entry	= 0x0104,
-	i40e_aqc_opc_set_ns_proxy_entry		= 0x0105,
-
 	/* LAA */
-	i40e_aqc_opc_mng_laa		= 0x0106,   /* AQ obsolete */
 	i40e_aqc_opc_mac_address_read	= 0x0107,
 	i40e_aqc_opc_mac_address_write	= 0x0108,
 
@@ -260,7 +254,10 @@
 	/* Tunnel commands */
 	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
 	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
-	i40e_aqc_opc_tunnel_key_structure	= 0x0B10,
+	i40e_aqc_opc_set_rss_key	= 0x0B02,
+	i40e_aqc_opc_set_rss_lut	= 0x0B03,
+	i40e_aqc_opc_get_rss_key	= 0x0B04,
+	i40e_aqc_opc_get_rss_lut	= 0x0B05,
 
 	/* Async Events */
 	i40e_aqc_opc_event_lan_overflow		= 0x1001,
@@ -272,8 +269,6 @@
 	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
 
 	/* debug commands */
-	i40e_aqc_opc_debug_get_deviceid		= 0xFF00,
-	i40e_aqc_opc_debug_set_mode		= 0xFF01,
 	i40e_aqc_opc_debug_read_reg		= 0xFF03,
 	i40e_aqc_opc_debug_write_reg		= 0xFF04,
 	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
@@ -507,7 +502,8 @@
 #define I40E_AQC_SAN_ADDR_VALID		0x20
 #define I40E_AQC_PORT_ADDR_VALID	0x40
 #define I40E_AQC_WOL_ADDR_VALID		0x80
-#define I40E_AQC_ADDR_VALID_MASK	0xf0
+#define I40E_AQC_MC_MAG_EN_VALID	0x100
+#define I40E_AQC_ADDR_VALID_MASK	0x1F0
 	u8	reserved[6];
 	__le32	addr_high;
 	__le32	addr_low;
@@ -530,7 +526,9 @@
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
 #define I40E_AQC_WRITE_TYPE_PORT	0x8000
-#define I40E_AQC_WRITE_TYPE_MASK	0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG	0xC000
+#define I40E_AQC_WRITE_TYPE_MASK	0xC000
+
 	__le16	mac_sah;
 	__le32	mac_sal;
 	u8	reserved[8];
@@ -824,8 +822,12 @@
 					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
 	/* queueing option section */
 	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
 	u8	queueing_opt_reserved[3];
 	/* scheduler section */
 	u8	up_enable_bits;
@@ -1066,6 +1068,7 @@
 	__le16	seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
 	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK		0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
 	u8	reserved[8];
 };
@@ -2093,6 +2096,46 @@
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+	__le16	vsi_id;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+	__le16	vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
+	__le16	flags;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure_A0 {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 39fcb1d..d45d0ae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -54,6 +54,15 @@
 		case I40E_DEV_ID_20G_KR2:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
+		case I40E_DEV_ID_SFP_X722:
+		case I40E_DEV_ID_1G_BASE_T_X722:
+		case I40E_DEV_ID_10G_BASE_T_X722:
+			hw->mac.type = I40E_MAC_X722;
+			break;
+		case I40E_DEV_ID_X722_VF:
+		case I40E_DEV_ID_X722_VF_HV:
+			hw->mac.type = I40E_MAC_X722_VF;
+			break;
 		case I40E_DEV_ID_VF:
 		case I40E_DEV_ID_VF_HV:
 			hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@
 }
 
 /**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
  * i40evf_debug_aq
  * @hw: debug mask related to admin queue
  * @mask: debug mask
@@ -146,7 +361,7 @@
 {
 	if (hw->aq.asq.len)
 		return !!(rd32(hw, hw->aq.asq.len) &
-			  I40E_PF_ATQLEN_ATQENABLE_MASK);
+			  I40E_VF_ATQLEN1_ATQENABLE_MASK);
 	else
 		return false;
 }
@@ -177,6 +392,169 @@
 	return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+					   u16 vsi_id, bool pf_lut,
+					   u8 *lut, u16 lut_size,
+					   bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+	if (set)
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_set_rss_lut);
+	else
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_get_rss_lut);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+	if (pf_lut)
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+	else
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+	status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+				       false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key,
+				      bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_key *cmd_resp =
+			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+	if (set)
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_set_rss_key);
+	else
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_get_rss_key);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+	status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+				  u16 vsi_id,
+				  struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+				  u16 vsi_id,
+				  struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 
 /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 931c880..00ed24b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -62,6 +62,7 @@
 struct i40e_hmc_pd_entry {
 	struct i40e_hmc_bp bp;
 	u32 sd_index;
+	bool rsrc_pg;
 	bool valid;
 };
 
@@ -126,8 +127,8 @@
 		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -146,7 +147,7 @@
 		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -218,7 +219,8 @@
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index);
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
 					struct i40e_hmc_info *hmc_info,
 					u32 idx);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 58e37a4..55ae4b0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,6 +60,19 @@
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+				  bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+				  bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+				  u16 seid,
+				  struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+				  u16 seid,
+				  struct i40e_aqc_get_set_rss_key_data *key);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 3cc7376..10febcf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -27,1580 +27,6 @@
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
-#define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
-#define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
-#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
-#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
-#define I40E_GL_ATQH_ATQH_SHIFT 0
-#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
-#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
-#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
-#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
-#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
-#define I40E_GL_ATQT_ATQT_SHIFT 0
-#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
-#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
-#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
-#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
-#define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
-#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
-#define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
-#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
-#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
-#define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
-#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
-#define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAH_MAX_INDEX 127
-#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAL_MAX_INDEX 127
-#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQH_MAX_INDEX 127
-#define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQLEN_MAX_INDEX 127
-#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQT_MAX_INDEX 127
-#define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAH_MAX_INDEX 127
-#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAL_MAX_INDEX 127
-#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQH_MAX_INDEX 127
-#define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQLEN_MAX_INDEX 127
-#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQT_MAX_INDEX 127
-#define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
-#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
-#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
-#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
-#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
-#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
-#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
-#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
-#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
-#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
-#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
-#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
-#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
-#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
-#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
-#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
-#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
-#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
-#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
-#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
-#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
-#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
-#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
-#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
-#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
-#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
-#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
-#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
-#define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
-#define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
-#define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
-#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
-#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
-#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
-#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
-#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
-#define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
-#define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSCA_MAX_INDEX 3
-#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
-#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
-#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
-#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
-#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSRWD_MAX_INDEX 3
-#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
-#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
-#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
-#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
-#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
-#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
-#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
-#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
-#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
-#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
-#define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
-#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
-#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
-#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
-#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
-#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
-#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
-#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
-#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
-#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
-#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
-#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
-#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
-#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
-#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
-#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
-#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
-#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
-#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
-#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
-#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
-#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_SDPART_MAX_INDEX 15
-#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
-#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
-#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
-#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
-#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
-#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
-#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_PFINT_CEQCTL_MAX_INDEX 511
-#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
-#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
-#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
-#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
-#define I40E_PFINT_ITR0_MAX_INDEX 2
-#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_ITRN_MAX_INDEX 2
-#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
-#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
-#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_RATEN_MAX_INDEX 511
-#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
-#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_RQCTL_MAX_INDEX 1535
-#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_TQCTL_MAX_INDEX 1535
-#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
-#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_MAX_INDEX 127
-#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_ITR0_MAX_INDEX 2
-#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN_MAX_INDEX 2
-#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPINT_AEQCTL_MAX_INDEX 127
-#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_VPINT_CEQCTL_MAX_INDEX 511
-#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLST0_MAX_INDEX 127
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_RATE0_MAX_INDEX 127
-#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
-#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_RATEN_MAX_INDEX 511
-#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
-#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
-#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
-#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
-#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
-#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QRX_ENA_MAX_INDEX 1535
-#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
-#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QRX_TAIL_MAX_INDEX 1535
-#define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_CTL_MAX_INDEX 1535
-#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
-#define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
-#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_ENA_MAX_INDEX 1535
-#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
-#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_HEAD_MAX_INDEX 1535
-#define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
-#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_TAIL_MAX_INDEX 1535
-#define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_MAPENA_MAX_INDEX 127
-#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QTABLE_MAX_INDEX 15
-#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QBASE_MAX_INDEX 383
-#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QTABLE_MAX_INDEX 7
-#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
-#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
-#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
-#define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
-#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
-#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
-#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
-#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
-#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
-#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_METF_MAX_INDEX 3
-#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
-#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
-#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
-#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
-#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
-#define I40E_MSIX_PBA_MAX_INDEX 5
-#define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TADD_MAX_INDEX 128
-#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TMSG_MAX_INDEX 128
-#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TUADD_MAX_INDEX 128
-#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TVCTRL_MAX_INDEX 128
-#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
 #define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
 #define I40E_VFMSIX_PBA1_MAX_INDEX 19
 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
@@ -1623,1525 +49,6 @@
 #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
 #define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
-#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
-#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
-#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
-#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
-#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
-#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
-#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
-#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
-#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
-#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
-#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
-#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
-#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
-#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
-#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
-#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
-#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
-#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
-#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
-#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
-#define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
-#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
-#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
-#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
-#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
-#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
-#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
-#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
-#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
-#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
-#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
-#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
-#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
-#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
-#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
-#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
-#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
-#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
-#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
-#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
-#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
-#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
-#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
-#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
-#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
-#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
-#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
-#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
-#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
-#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
-#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
-#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
-#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
-#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
-#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
-#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
-#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
-#define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
-#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
-#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
-#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
-#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
-#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
-#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
-#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
-#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
-#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
-#define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
-#define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
-#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
-#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
-#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
-#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
-#define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
-#define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
-#define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
-#define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DHW_MAX_INDEX 7
-#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DLW_MAX_INDEX 7
-#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DPS_MAX_INDEX 7
-#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SHT_MAX_INDEX 7
-#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
-#define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SLT_MAX_INDEX 7
-#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
-#define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
-#define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
-#define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
-#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
-#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
-#define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
-#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
-#define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
-#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
-#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
-#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
-#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_GLQF_HKEY_MAX_INDEX 12
-#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
-#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
-#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
-#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
-#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
-#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
-#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
-#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HSYM_MAX_INDEX 63
-#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_GLQF_PCNT_MAX_INDEX 511
-#define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_SWAP_MAX_INDEX 1
-#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
-#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
-#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
-#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
-#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_PFQF_HENA_MAX_INDEX 1
-#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_PFQF_HKEY_MAX_INDEX 12
-#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
-#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
-#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
-#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_PFQF_HLUT_MAX_INDEX 127
-#define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
-#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
-#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
-#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
-#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
-#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HENA1_MAX_INDEX 1
-#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY1_MAX_INDEX 12
-#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HLUT1_MAX_INDEX 15
-#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
-#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
-#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
-#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION1_MAX_INDEX 7
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPQF_CTL_MAX_INDEX 127
-#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
-#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_CTL_MAX_INDEX 383
-#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_TCREGION_MAX_INDEX 3
-#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOECRC_MAX_INDEX 143
-#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDDPC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOELAST_MAX_INDEX 143
-#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPRC_MAX_INDEX 143
-#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPTC_MAX_INDEX 143
-#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOERPDC_MAX_INDEX 143
-#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
-#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR2_L_MAX_INDEX 143
-#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
-#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
-#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCH_MAX_INDEX 3
-#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCL_MAX_INDEX 3
-#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCH_MAX_INDEX 3
-#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCL_MAX_INDEX 3
-#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
-#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LDPC_MAX_INDEX 3
-#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MLFC_MAX_INDEX 3
-#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCH_MAX_INDEX 3
-#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCL_MAX_INDEX 3
-#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCH_MAX_INDEX 3
-#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCL_MAX_INDEX 3
-#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MRFC_MAX_INDEX 3
-#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127H_MAX_INDEX 3
-#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127L_MAX_INDEX 3
-#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255H_MAX_INDEX 3
-#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255L_MAX_INDEX 3
-#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511H_MAX_INDEX 3
-#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511L_MAX_INDEX 3
-#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64H_MAX_INDEX 3
-#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64L_MAX_INDEX 3
-#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127H_MAX_INDEX 3
-#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127L_MAX_INDEX 3
-#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255H_MAX_INDEX 3
-#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255L_MAX_INDEX 3
-#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511H_MAX_INDEX 3
-#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511L_MAX_INDEX 3
-#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64H_MAX_INDEX 3
-#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64L_MAX_INDEX 3
-#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RDPC_MAX_INDEX 3
-#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RFC_MAX_INDEX 3
-#define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RJC_MAX_INDEX 3
-#define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RLEC_MAX_INDEX 3
-#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ROC_MAX_INDEX 3
-#define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUC_MAX_INDEX 3
-#define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUPP_MAX_INDEX 3
-#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDOLD_MAX_INDEX 3
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCH_MAX_INDEX 3
-#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCL_MAX_INDEX 3
-#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCH_MAX_INDEX 3
-#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCL_MAX_INDEX 3
-#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCH_MAX_INDEX 15
-#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCL_MAX_INDEX 15
-#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCH_MAX_INDEX 15
-#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCL_MAX_INDEX 15
-#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCH_MAX_INDEX 15
-#define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCL_MAX_INDEX 15
-#define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCH_MAX_INDEX 15
-#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCL_MAX_INDEX 15
-#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCH_MAX_INDEX 15
-#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCL_MAX_INDEX 15
-#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCH_MAX_INDEX 15
-#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCL_MAX_INDEX 15
-#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_RUPP_MAX_INDEX 15
-#define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_TDPC_MAX_INDEX 15
-#define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCH_MAX_INDEX 15
-#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCL_MAX_INDEX 15
-#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCH_MAX_INDEX 15
-#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCL_MAX_INDEX 15
-#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCH_MAX_INDEX 383
-#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCL_MAX_INDEX 383
-#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCH_MAX_INDEX 383
-#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCL_MAX_INDEX 383
-#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCH_MAX_INDEX 383
-#define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCL_MAX_INDEX 383
-#define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCH_MAX_INDEX 383
-#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCL_MAX_INDEX 383
-#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCH_MAX_INDEX 383
-#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCL_MAX_INDEX 383
-#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCH_MAX_INDEX 383
-#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCL_MAX_INDEX 383
-#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RDPC_MAX_INDEX 383
-#define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RUPP_MAX_INDEX 383
-#define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_TEPC_MAX_INDEX 383
-#define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCH_MAX_INDEX 383
-#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCL_MAX_INDEX 383
-#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCH_MAX_INDEX 383
-#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCL_MAX_INDEX 383
-#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
-#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
-#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
-#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
-#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
-#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
-#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
-#define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
-#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
-#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
-#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
-#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 25
-#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
-#define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
-#define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
-#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_RX_MAX_INDEX 127
-#define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_TX_MAX_INDEX 127
-#define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
-#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
-#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
-#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
-#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
-#define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
-#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
-#define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
-#define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
-#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
-#define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
-#define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
-#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
-#define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
-#define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
-#define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
-#define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
-#define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
-#define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
-#define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
-#define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
-#define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
-#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
-#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
-#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAH_MAX_INDEX 3
-#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
-#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
-#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
-#define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAL_MAX_INDEX 3
-#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
 #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
 #define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
@@ -3366,4 +273,64 @@
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 395f32f..7e91d82 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -366,15 +366,32 @@
  **/
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
-	u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
-		  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
-		  I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
-		  I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
-		  /* allow 00 to be written to the index */
+	u16 flags = q_vector->tx.ring[0].flags;
 
-	wr32(&vsi->back->hw,
-	     I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
-	     val);
+	if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+		u32 val;
+
+		if (q_vector->arm_wb_state)
+			return;
+
+		val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+
+		wr32(&vsi->back->hw,
+		     I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+					  vsi->base_vector - 1),
+		     val);
+		q_vector->arm_wb_state = true;
+	} else {
+		u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+			  I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+			  I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
+			  /* allow 00 to be written to the index */
+
+		wr32(&vsi->back->hw,
+		     I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+					  vsi->base_vector - 1), val);
+	}
 }
 
 /**
@@ -404,7 +421,7 @@
 	 *  20-1249MB/s bulk   (8000 ints/s)
 	 */
 	bytes_per_int = rc->total_bytes / rc->itr;
-	switch (rc->itr) {
+	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
 			new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +434,14 @@
 		break;
 	case I40E_BULK_LATENCY:
 		if (bytes_per_int <= 20)
-			rc->latency_range = I40E_LOW_LATENCY;
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	default:
+		if (bytes_per_int <= 20)
+			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
@@ -435,42 +457,14 @@
 		break;
 	}
 
-	if (new_itr != rc->itr) {
-		/* do an exponential smoothing */
-		new_itr = (10 * new_itr * rc->itr) /
-			  ((9 * new_itr) + rc->itr);
-		rc->itr = new_itr & I40E_MAX_ITR;
-	}
+	if (new_itr != rc->itr)
+		rc->itr = new_itr;
 
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-	struct i40e_hw *hw = &q_vector->vsi->back->hw;
-	u32 reg_addr;
-	u16 old_itr;
-
-	reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
-	old_itr = q_vector->rx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->rx);
-	if (old_itr != q_vector->rx.itr)
-		wr32(hw, reg_addr, q_vector->rx.itr);
-
-	reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
-	old_itr = q_vector->tx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->tx);
-	if (old_itr != q_vector->tx.itr)
-		wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
  * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
  * @tx_ring: the tx ring to set up
  *
@@ -873,7 +867,7 @@
 		return;
 
 	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
 	/* both known and outer_ip must be set for the below code to work */
@@ -888,25 +882,25 @@
 		ipv6 = true;
 
 	if (ipv4 &&
-	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 		goto checksum_fail;
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
 
 	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
 		goto checksum_fail;
 
 	/* handle packets that were not able to be checksummed due
 	 * to arrival speed, in this case the stack can compute
 	 * the csum.
 	 */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1021,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1063,8 +1057,8 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1110,7 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			struct i40e_rx_buffer *next_buffer;
 
 			next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1120,7 @@
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
@@ -1141,7 +1135,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1202,7 +1196,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1220,7 +1214,7 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1232,13 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			rx_ring->rx_stats.non_eop_descs++;
 			continue;
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			/* TODO: shouldn't we increment a counter indicating the
 			 * drop?
@@ -1262,7 +1256,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1281,6 +1275,67 @@
 }
 
 /**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u16 old_itr;
+	int vector;
+	u32 val;
+
+	vector = (q_vector->v_idx + vsi->base_vector);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+		old_itr = q_vector->rx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->rx);
+		if (old_itr != q_vector->rx.itr) {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+			(I40E_RX_ITR <<
+				I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+			(q_vector->rx.itr <<
+				I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+		} else {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+			(I40E_ITR_NONE <<
+				I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+	} else {
+		i40evf_irq_enable_queues(vsi->back, 1
+			<< q_vector->v_idx);
+	}
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+		old_itr = q_vector->tx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->tx);
+		if (old_itr != q_vector->tx.itr) {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+				I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+				(I40E_TX_ITR <<
+				   I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+				(q_vector->tx.itr <<
+				   I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+		} else {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+				I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+				(I40E_ITR_NONE <<
+				   I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+	} else {
+		i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+	}
+}
+
+/**
  * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
@@ -1334,15 +1389,12 @@
 		return budget;
 	}
 
+	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+		q_vector->arm_wb_state = false;
+
 	/* Work is done so exit the polling mode and re-enable the interrupt */
 	napi_complete(napi);
-	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-		i40e_update_dynamic_itr(q_vector);
-
-	if (!test_bit(__I40E_DOWN, &vsi->state))
-		i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+	i40e_update_enable_itr(vsi, q_vector);
 	return 0;
 }
 
@@ -1476,11 +1528,15 @@
 	struct iphdr *this_ip_hdr;
 	u32 network_hdr_len;
 	u8 l4_hdr = 0;
+	struct udphdr *oudph;
+	struct iphdr *oiph;
 	u32 l4_tunnel = 0;
 
 	if (skb->encapsulation) {
 		switch (ip_hdr(skb)->protocol) {
 		case IPPROTO_UDP:
+			oudph = udp_hdr(skb);
+			oiph = ip_hdr(skb);
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
@@ -1519,6 +1575,15 @@
 		}
 
 
+		if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+		    (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
+		    (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+			oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+					oiph->daddr,
+					(skb->len - skb_transport_offset(skb)),
+					IPPROTO_UDP, 0);
+			*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+		}
 	} else {
 		network_hdr_len = skb_network_header_len(skb);
 		this_ip_hdr = ip_hdr(skb);
@@ -1841,6 +1906,8 @@
 	    netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
 						   tx_ring->queue_index)))
 		writel(i, tx_ring->tail);
+	else
+		prefetchw(tx_desc + 1);
 
 	return;
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e7a34f8..9a30f5d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -66,17 +66,29 @@
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+		BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+		I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,16 +141,16 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
-#define I40E_TX_FLAGS_CSUM		(u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM		BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -250,6 +262,10 @@
 	bool ring_active;		/* is ring online or not */
 	bool arm_wb;		/* do something to arm write back */
 
+	u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR	BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM	BIT(1)
+
 	/* stats structs */
 	struct i40e_queue_stats	stats;
 	struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c463ec4..24a2693 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -47,6 +47,11 @@
 #define I40E_DEV_ID_20G_KR2		0x1587
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
 
 #define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
 					 (d) == I40E_DEV_ID_QSFP_B  || \
@@ -120,6 +125,8 @@
 	I40E_MAC_X710,
 	I40E_MAC_XL710,
 	I40E_MAC_VF,
+	I40E_MAC_X722,
+	I40E_MAC_X722_VF,
 	I40E_MAC_GENERIC,
 };
 
@@ -213,7 +220,17 @@
 	bool dcb;
 	bool fcoe;
 	bool iscsi; /* Indicates iSCSI enabled */
-	bool mfp_mode_1;
+	bool flex10_enable;
+	bool flex10_capable;
+	u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN	0x0
+#define I40E_FLEX10_MODE_DCC		0x1
+#define I40E_FLEX10_MODE_DCI		0x2
+
+	u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR	0x1
+#define I40E_FLEX10_STATUS_VC_MODE	0x2
+
 	bool mgmt_cem;
 	bool ieee_1588;
 	bool iwarp;
@@ -417,6 +434,7 @@
 
 struct i40e_dcbx_config {
 	u32 numapps;
+	u32 tlv_status; /* CEE mode TLV status */
 	struct i40e_ieee_ets_config etscfg;
 	struct i40e_ieee_ets_recommend etsrec;
 	struct i40e_ieee_pfc_config pfc;
@@ -481,11 +499,13 @@
 
 	/* debug mask */
 	u32 debug_mask;
+	char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
 {
-	return hw->mac.type == I40E_MAC_VF;
+	return (hw->mac.type == I40E_MAC_VF ||
+		hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
@@ -582,19 +602,23 @@
 	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
 	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
 	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
-	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
+	/* Note: Bit 8 is reserved in X710 and XL710 */
+	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
 	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
 	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
 	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
 	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
 	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
 	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
-	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18,
+	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
+	 * UDP header
+	 */
+	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
 	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
 					 << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +626,8 @@
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
-					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -737,8 +761,7 @@
 					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
-					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
 	/* Note: These are predefined bit offsets */
@@ -914,12 +937,12 @@
 #define I40E_TXD_CTX_QW0_NATT_SHIFT	9
 #define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
-					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -931,6 +954,8 @@
 #define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
 					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_filter_program_desc {
 	__le32 qindex_flex_ptype_vsi;
 	__le32 rsvd;
@@ -949,15 +974,24 @@
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-	/* Note: Values 0-30 are reserved for future use */
+	/* Note: Values 0-28 are reserved for future use.
+	 * Value 29, 30, 32 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
 	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
-	/* Note: Value 32 is reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
 	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
 	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
 	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
 	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
-	/* Note: Values 37-40 are reserved for future use */
+	/* Note: Values 37-38 are reserved for future use.
+	 * Value 39, 40, 42 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
 	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
 	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
 	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
 	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
@@ -984,8 +1018,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1003,8 +1037,7 @@
 #define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
-					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
 						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1063,6 +1096,14 @@
 	u64 tx_errors;			/* tepc */
 };
 
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+	u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
 /* Statistics collected by the MAC */
 struct i40e_hw_port_stats {
 	/* eth stats collected by the port */
@@ -1109,6 +1150,8 @@
 	u64 fd_atr_match;
 	u64 fd_sb_match;
 	u64 fd_atr_tunnel_match;
+	u32 fd_atr_status;
+	u32 fd_sb_status;
 	/* EEE LPI */
 	u32 tx_lpi_status;
 	u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 59f62f0..e6db20e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -110,7 +110,9 @@
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR		1
-#define I40E_VIRTCHNL_VERSION_MINOR		0
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
 struct i40e_virtchnl_version_info {
 	u32 major;
 	u32 minor;
@@ -129,7 +131,8 @@
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,13 @@
 	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fea3b75..3817cbb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -101,6 +101,8 @@
 #define MAX_RX_QUEUES 8
 #define MAX_TX_QUEUES MAX_RX_QUEUES
 
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+
 /* MAX_MSIX_Q_VECTORS of these are allocated,
  * but we only use one per queue-specific vector.
  */
@@ -115,6 +117,7 @@
 	u8 num_ringpairs;	/* total number of ring pairs in vector */
 	int v_idx;	  /* vector index in list */
 	char name[IFNAMSIZ + 9];
+	bool arm_wb_state;
 	cpumask_var_t affinity_mask;
 };
 
@@ -207,33 +210,39 @@
 	struct msix_entry *msix_entries;
 
 	u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED              (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING                (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED                 (u32)(1 << 10)
-/* duplcates for common code */
+#define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED              BIT(8)
+#define I40EVF_FLAG_RESET_PENDING                BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE		BIT(11)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT(12)
+/* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED		 0
 #define I40E_FLAG_DCB_ENABLED			 0
 #define I40E_FLAG_IN_NETPOLL			 I40EVF_FLAG_IN_NETPOLL
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_WB_ON_ITR_CAPABLE		I40EVF_FLAG_WB_ON_ITR_CAPABLE
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
 	/* flags for admin queue service task */
 	u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES		(u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES		(u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		(u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		(u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		(u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		(u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		(u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS		(u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET		(u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES		BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES		BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS		BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET		BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS		BIT(9)
+#define I40EVF_FLAG_AQ_GET_CONFIG		BIT(10)
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -249,8 +258,17 @@
 	bool netdev_registered;
 	bool link_up;
 	enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+			    I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+		    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+			  I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
 	struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
 	struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+	struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+		       ((_a)->pf_version.minor == 1))
 	u16 msg_enable;
 	struct i40e_eth_stats current_stats;
 	struct i40e_vsi vsi;
@@ -264,6 +282,7 @@
 
 int i40evf_up(struct i40evf_adapter *adapter);
 void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
 void i40evf_reset(struct i40evf_adapter *adapter);
 void i40evf_set_ethtool_ops(struct net_device *netdev);
 void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 2b53c87..4790437 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -381,11 +381,11 @@
 
 	switch (cmd->flow_type) {
 	case TCP_V4_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 	case UDP_V4_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 
@@ -397,11 +397,11 @@
 		break;
 
 	case TCP_V6_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 	case UDP_V6_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 
@@ -479,10 +479,10 @@
 	case TCP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -491,10 +491,10 @@
 	case TCP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -503,12 +503,12 @@
 	case UDP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		default:
 			return -EINVAL;
@@ -517,12 +517,12 @@
 	case UDP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		default:
 			return -EINVAL;
@@ -535,7 +535,7 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
 		break;
 	case AH_ESP_V6_FLOW:
 	case AH_V6_FLOW:
@@ -544,15 +544,15 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
 		break;
 	case IPV4_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 		break;
 	case IPV6_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4ab4ebb..e85849b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,10 +34,10 @@
 static const char i40evf_driver_string[] =
 	"Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.5"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
-	"Copyright (c) 2013 - 2014 Intel Corporation.";
+	"Copyright (c) 2013 - 2015 Intel Corporation.";
 
 /* i40evf_pci_tbl - PCI Device ID Table
  *
@@ -49,6 +49,7 @@
  */
 static const struct pci_device_id i40evf_pci_tbl[] = {
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
 	/* required last entry */
 	{0, }
 };
@@ -203,7 +204,7 @@
 
 	wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
 				       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
-	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
 
 	/* read flush */
 	rd32(hw, I40E_VFGEN_RSTAT);
@@ -240,11 +241,11 @@
 	int i;
 
 	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & (1 << (i - 1))) {
+		if (mask & BIT(i - 1)) {
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
 			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
 			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			     I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+			     I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
 		}
 	}
 }
@@ -262,17 +263,17 @@
 
 	if (mask & 1) {
 		dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
-		dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+		dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
 			   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			   I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+			   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
 		wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
 	}
 	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & (1 << i)) {
+		if (mask & BIT(i)) {
 			dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
-			dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+			dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
 				   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-				   I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+				   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
 		}
 	}
@@ -312,7 +313,7 @@
 
 
 	val = rd32(hw, I40E_VFINT_DYN_CTL01);
-	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+	val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
 	wr32(hw, I40E_VFINT_DYN_CTL01, val);
 
 	/* schedule work on the private workqueue */
@@ -377,7 +378,7 @@
 	q_vector->tx.count++;
 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
 	q_vector->num_ringpairs++;
-	q_vector->ring_mask |= (1 << t_idx);
+	q_vector->ring_mask |= BIT(t_idx);
 }
 
 /**
@@ -406,7 +407,7 @@
 	/* The ideal configuration...
 	 * We have enough vectors to map one per queue.
 	 */
-	if (q_vectors == (rxr_remaining * 2)) {
+	if (q_vectors >= (rxr_remaining * 2)) {
 		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
 			i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -892,8 +893,10 @@
 					break;
 				}
 			}
+			if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+				found = true;
 		}
-		if (found) {
+		if (!found) {
 			f->remove = true;
 			adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
 		}
@@ -1170,6 +1173,113 @@
 }
 
 /**
+ * i40e_configure_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+	struct i40e_aqc_get_set_rss_key_data rss_key;
+	struct i40evf_adapter *adapter = vsi->back;
+	struct i40e_hw *hw = &adapter->hw;
+	int ret = 0, i;
+	u8 *rss_lut;
+
+	if (!vsi->id)
+		return;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	memset(&rss_key, 0, sizeof(rss_key));
+	memcpy(&rss_key, seed, sizeof(rss_key));
+
+	rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL);
+	if (!rss_lut)
+		return;
+
+	/* Populate the LUT with max no. PF queues in round robin fashion */
+	for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++)
+		rss_lut[i] = i % adapter->num_active_queues;
+
+	ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key);
+	if (ret) {
+		dev_err(&adapter->pdev->dev,
+			"Cannot set RSS key, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+		return;
+	}
+
+	ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut,
+				    (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4);
+	if (ret)
+		dev_err(&adapter->pdev->dev,
+			"Cannot set RSS lut, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+}
+
+/**
+ * i40e_configure_rss_reg - Prepare for RSS if used
+ * @adapter: board private structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter,
+				     const u8 *seed)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	u32 *seed_dw = (u32 *)seed;
+	u32 cqueue = 0;
+	u32 lut = 0;
+	int i, j;
+
+	/* Fill out hash function seed */
+	for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+		wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
+
+	/* Populate the LUT with max no. PF queues in round robin fashion */
+	for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+		lut = 0;
+		for (j = 0; j < 4; j++) {
+			if (cqueue == adapter->num_active_queues)
+				cqueue = 0;
+			lut |= ((cqueue) << (8 * j));
+			cqueue++;
+		}
+		wr32(hw, I40E_VFQF_HLUT(i), lut);
+	}
+	i40e_flush(hw);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	u8 seed[I40EVF_HKEY_ARRAY_SIZE];
+	u64 hena;
+
+	netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
+
+	/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+	hena = I40E_DEFAULT_RSS_HENA;
+	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+	if (RSS_AQ(adapter))
+		i40evf_configure_rss_aq(&adapter->vsi, seed);
+	else
+		i40evf_configure_rss_reg(adapter, seed);
+}
+
+/**
  * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
  * @adapter: board private structure to initialize
  *
@@ -1369,6 +1479,10 @@
 		}
 		goto watchdog_done;
 	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+		i40evf_send_vf_config_msg(adapter);
+		goto watchdog_done;
+	}
 
 	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
 		i40evf_disable_queues(adapter);
@@ -1410,6 +1524,16 @@
 		goto watchdog_done;
 	}
 
+	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+		/* This message goes straight to the firmware, not the
+		 * PF, so we don't have to set current_op as we will
+		 * not get a response through the ARQ.
+		 */
+		i40evf_configure_rss(adapter);
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+		goto watchdog_done;
+	}
+
 	if (adapter->state == __I40EVF_RUNNING)
 		i40evf_request_stats(adapter);
 watchdog_done:
@@ -1432,45 +1556,6 @@
 	schedule_work(&adapter->adminq_task);
 }
 
-/**
- * i40evf_configure_rss - Prepare for RSS
- * @adapter: board private structure
- **/
-static void i40evf_configure_rss(struct i40evf_adapter *adapter)
-{
-	u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
-	struct i40e_hw *hw = &adapter->hw;
-	u32 cqueue = 0;
-	u32 lut = 0;
-	int i, j;
-	u64 hena;
-
-	/* Hash type is configured by the PF - we just supply the key */
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-
-	/* Fill out hash function seed */
-	for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-		wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
-
-	/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-	hena = I40E_DEFAULT_RSS_HENA;
-	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
-	/* Populate the LUT with max no. of queues in round robin fashion */
-	for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-		lut = 0;
-		for (j = 0; j < 4; j++) {
-			if (cqueue == adapter->num_active_queues)
-				cqueue = 0;
-			lut |= ((cqueue) << (8 * j));
-			cqueue++;
-		}
-		wr32(hw, I40E_VFQF_HLUT(i), lut);
-	}
-	i40e_flush(hw);
-}
-
 #define I40EVF_RESET_WAIT_MS 10
 #define I40EVF_RESET_WAIT_COUNT 500
 /**
@@ -1604,7 +1689,8 @@
 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
 			 err);
 
-	i40evf_map_queues(adapter);
+	adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
 	/* re-add all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1614,7 +1700,7 @@
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		f->add = true;
 	}
-	adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	i40evf_misc_irq_enable(adapter);
@@ -1693,34 +1779,34 @@
 	/* check for error indications */
 	val = rd32(hw, hw->aq.arq.len);
 	oldval = val;
-	if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+	if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
-		val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+		val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
 	}
-	if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+	if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
-		val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+		val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
 	}
-	if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+	if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
-		val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+		val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
 	}
 	if (oldval != val)
 		wr32(hw, hw->aq.arq.len, val);
 
 	val = rd32(hw, hw->aq.asq.len);
 	oldval = val;
-	if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+	if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
 		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
-		val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+		val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
 	}
-	if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+	if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
 		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
-		val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+		val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
 	}
-	if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+	if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
 		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
-		val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+		val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
 	}
 	if (oldval != val)
 		wr32(hw, hw->aq.asq.len, val);
@@ -1856,6 +1942,7 @@
 	if (err)
 		goto err_req_irq;
 
+	i40evf_add_filter(adapter, adapter->hw.mac.addr);
 	i40evf_configure(adapter);
 
 	err = i40evf_up_complete(adapter);
@@ -1979,6 +2066,62 @@
 }
 
 /**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	/* got VF config message back from PF, now we can parse it */
+	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+	}
+	if (!adapter->vsi_res) {
+		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+		return -ENODEV;
+	}
+
+	if (adapter->vf_res->vf_offload_flags
+	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+		netdev->vlan_features = netdev->features;
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_CTAG_RX |
+				    NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+	netdev->features |= NETIF_F_HIGHDMA |
+			    NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_SCTP_CSUM |
+			    NETIF_F_IPV6_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_GRO;
+
+	/* copy netdev features into list of user selectable features */
+	netdev->hw_features |= netdev->features;
+	netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+	adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+	adapter->vsi.back = adapter;
+	adapter->vsi.base_vector = 1;
+	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+	adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+	adapter->vsi.netdev = adapter->netdev;
+	return 0;
+}
+
+/**
  * i40evf_init_task - worker thread to perform delayed initialization
  * @work: pointer to work_struct containing our data
  *
@@ -1996,10 +2139,9 @@
 						      struct i40evf_adapter,
 						      init_task.work);
 	struct net_device *netdev = adapter->netdev;
-	struct i40evf_mac_filter *f;
 	struct i40e_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
-	int i, err, bufsz;
+	int err, bufsz;
 
 	switch (adapter->state) {
 	case __I40EVF_STARTUP:
@@ -2050,6 +2192,12 @@
 		if (err) {
 			if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
 				err = i40evf_send_api_ver(adapter);
+			else
+				dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+					adapter->pf_version.major,
+					adapter->pf_version.minor,
+					I40E_VIRTCHNL_VERSION_MAJOR,
+					I40E_VIRTCHNL_VERSION_MINOR);
 			goto err;
 		}
 		err = i40evf_send_vf_config_msg(adapter);
@@ -2085,42 +2233,15 @@
 	default:
 		goto err_alloc;
 	}
-	/* got VF config message back from PF, now we can parse it */
-	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
-			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
-	}
-	if (!adapter->vsi_res) {
-		dev_err(&pdev->dev, "No LAN VSI found\n");
+	if (i40evf_process_config(adapter))
 		goto err_alloc;
-	}
+	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
 	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
 
 	netdev->netdev_ops = &i40evf_netdev_ops;
 	i40evf_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo = 5 * HZ;
-	netdev->features |= NETIF_F_HIGHDMA |
-			    NETIF_F_SG |
-			    NETIF_F_IP_CSUM |
-			    NETIF_F_SCTP_CSUM |
-			    NETIF_F_IPV6_CSUM |
-			    NETIF_F_TSO |
-			    NETIF_F_TSO6 |
-			    NETIF_F_RXCSUM |
-			    NETIF_F_GRO;
-
-	if (adapter->vf_res->vf_offload_flags
-	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-		netdev->vlan_features = netdev->features;
-		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
-				    NETIF_F_HW_VLAN_CTAG_RX |
-				    NETIF_F_HW_VLAN_CTAG_FILTER;
-	}
-
-	/* copy netdev features into list of user selectable features */
-	netdev->hw_features |= netdev->features;
-	netdev->hw_features &= ~NETIF_F_RXCSUM;
 
 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2130,16 +2251,6 @@
 	ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
 	ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
-	f = kzalloc(sizeof(*f), GFP_ATOMIC);
-	if (!f)
-		goto err_sw_init;
-
-	ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
-	f->add = true;
-	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
-	list_add(&f->list, &adapter->mac_filter_list);
-
 	init_timer(&adapter->watchdog_timer);
 	adapter->watchdog_timer.function = &i40evf_watchdog_timer;
 	adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -2154,24 +2265,14 @@
 	if (err)
 		goto err_sw_init;
 	i40evf_map_rings_to_vectors(adapter);
-	i40evf_configure_rss(adapter);
+	if (!RSS_AQ(adapter))
+		i40evf_configure_rss(adapter);
 	err = i40evf_request_misc_irq(adapter);
 	if (err)
 		goto err_sw_init;
 
 	netif_carrier_off(netdev);
 
-	adapter->vsi.id = adapter->vsi_res->vsi_id;
-	adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
-	adapter->vsi.back = adapter;
-	adapter->vsi.base_vector = 1;
-	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-	adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
-				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
-				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-	adapter->vsi.netdev = adapter->netdev;
-
 	if (!adapter->netdev_registered) {
 		err = register_netdev(netdev);
 		if (err)
@@ -2190,6 +2291,13 @@
 	adapter->state = __I40EVF_DOWN;
 	set_bit(__I40E_DOWN, &adapter->vsi.state);
 	i40evf_misc_irq_enable(adapter);
+
+	if (RSS_AQ(adapter)) {
+		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+		mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+	} else {
+		i40evf_configure_rss(adapter);
+	}
 	return;
 restart:
 	schedule_delayed_work(&adapter->init_task,
@@ -2299,7 +2407,7 @@
 	hw = &adapter->hw;
 	hw->back = adapter;
 
-	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 	adapter->state = __I40EVF_STARTUP;
 
 	/* Call save state here because it relies on the adapter struct. */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 61e0905..d4eb1a5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -51,8 +51,9 @@
 
 	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
 	if (err)
-		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
-			op, err, hw->aq.asq_last_status);
+		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+			op, i40evf_stat_str(hw, err),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
 	return err;
 }
 
@@ -125,8 +126,11 @@
 	}
 
 	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
-	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
-	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+	adapter->pf_version = *pf_vvi;
+
+	if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+	    ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+	     (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
 		err = -EIO;
 
 out_alloc:
@@ -145,8 +149,24 @@
  **/
 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
 {
-	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-				  NULL, 0);
+	u32 caps;
+
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+	       I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	if (PF_IS_V11(adapter))
+		return i40evf_send_pf_msg(adapter,
+					  I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+					  (u8 *)&caps, sizeof(caps));
+	else
+		return i40evf_send_pf_msg(adapter,
+					  I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+					  NULL, 0);
 }
 
 /**
@@ -274,7 +294,7 @@
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
 	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
 	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
 	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
 	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@
 		return;
 	}
 	if (v_retval) {
-		dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
-			__func__, v_retval, v_opcode);
+		dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+			__func__, v_retval,
+			i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
 	}
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@
 		adapter->current_stats = *stats;
 		}
 		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+		u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+			  I40E_MAX_VF_VSI *
+			  sizeof(struct i40e_virtchnl_vsi_resource);
+		memcpy(adapter->vf_res, msg, min(msglen, len));
+		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+		i40evf_process_config(adapter);
+		}
+		break;
 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
 		/* enable transmits */
 		i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@
 		i40evf_free_all_rx_resources(adapter);
 		break;
 	case I40E_VIRTCHNL_OP_VERSION:
-	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
 		/* Don't display an error if we get these out of sequence.
 		 * If the firmware needed to get kicked, we'll get these and
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b0182dd..7a73510 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -139,10 +139,6 @@
 	if (ret_val)
 		return ret_val;
 
-	/* reset page to 0 */
-	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
-	if (ret_val)
-		return ret_val;
 
 	if (data & E1000_M88E1112_STATUS_LINK)
 		port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@
 	if (port && (hw->dev_spec._82575.media_port != port)) {
 		hw->dev_spec._82575.media_port = port;
 		hw->dev_spec._82575.media_changed = true;
+	}
+
+	if (port == E1000_MEDIA_PORT_COPPER) {
+		/* reset page to 0 */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+		if (ret_val)
+			return ret_val;
+		igb_check_for_link_82575(hw);
 	} else {
-		ret_val = igb_check_for_link_82575(hw);
+		igb_check_for_link_82575(hw);
+		/* reset page to 0 */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+		if (ret_val)
+			return ret_val;
 	}
 
 	return 0;
@@ -223,6 +231,7 @@
 	/* Verify phy id and set remaining function pointers */
 	switch (phy->id) {
 	case M88E1543_E_PHY_ID:
+	case M88E1512_E_PHY_ID:
 	case I347AT4_E_PHY_ID:
 	case M88E1112_E_PHY_ID:
 	case M88E1111_I_PHY_ID:
@@ -235,7 +244,7 @@
 		else
 			phy->ops.get_cable_length = igb_get_cable_length_m88;
 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
-		/* Check if this PHY is confgured for media swap. */
+		/* Check if this PHY is configured for media swap. */
 		if (phy->id == M88E1112_E_PHY_ID) {
 			u16 data;
 
@@ -258,6 +267,11 @@
 				hw->mac.ops.check_for_link =
 						igb_check_for_link_media_swap;
 		}
+		if (phy->id == M88E1512_E_PHY_ID) {
+			ret_val = igb_initialize_M88E1512_phy(hw);
+			if (ret_val)
+				goto out;
+		}
 		break;
 	case IGP03E1000_E_PHY_ID:
 		phy->type = e1000_phy_igp_3;
@@ -889,6 +903,7 @@
  **/
 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 {
+	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 
 	/* This isn't a true "hard" reset, but is the only reset
@@ -905,7 +920,11 @@
 		goto out;
 
 	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val)
+		goto out;
 
+	if (phy->id == M88E1512_E_PHY_ID)
+		ret_val = igb_initialize_M88E1512_phy(hw);
 out:
 	return ret_val;
 }
@@ -1579,6 +1598,7 @@
 		case I347AT4_E_PHY_ID:
 		case M88E1112_E_PHY_ID:
 		case M88E1543_E_PHY_ID:
+		case M88E1512_E_PHY_ID:
 		case I210_I_PHY_ID:
 			ret_val = igb_copper_link_setup_m88_gen2(hw);
 			break;
@@ -2621,7 +2641,8 @@
 	u16 phy_data;
 
 	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    (phy->id != M88E1543_E_PHY_ID))
+	    ((phy->id != M88E1543_E_PHY_ID) &&
+	     (phy->id != M88E1512_E_PHY_ID)))
 		goto out;
 
 	if (!hw->dev_spec._82575.eee_disable) {
@@ -2701,7 +2722,8 @@
 
 	/* Check if EEE is supported on this device. */
 	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    (phy->id != M88E1543_E_PHY_ID))
+	    ((phy->id != M88E1543_E_PHY_ID) &&
+	     (phy->id != M88E1512_E_PHY_ID)))
 		goto out;
 
 	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f8684aa..b191504 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -604,6 +604,10 @@
 #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT	7
 #define E1000_M88E1112_PAGE_ADDR		0x16
 #define E1000_M88E1112_STATUS			0x01
+#define E1000_M88E1512_CFG_REG_1		0x0010
+#define E1000_M88E1512_CFG_REG_2		0x0011
+#define E1000_M88E1512_CFG_REG_3		0x0007
+#define E1000_M88E1512_MODE			0x0014
 
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
@@ -861,6 +865,7 @@
 #define M88_VENDOR           0x0141
 #define I210_I_PHY_ID        0x01410C00
 #define M88E1543_E_PHY_ID    0x01410EA0
+#define M88E1512_E_PHY_ID    0x01410DD0
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c1bb64d..23ec28f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,5 +1,5 @@
 /* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] = {
 	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
-	(sizeof(e1000_m88_cable_length_table) / \
-	sizeof(e1000_m88_cable_length_table[0]))
 
 static const u16 e1000_igp_2_cable_length_table[] = {
 	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@
 	60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
 	83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
 	104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
-	(sizeof(e1000_igp_2_cable_length_table) / \
-	 sizeof(e1000_igp_2_cable_length_table[0]))
 
 /**
  *  igb_check_reset_block - Check if PHY reset is blocked
@@ -1268,6 +1262,8 @@
 			switch (hw->phy.id) {
 			case I347AT4_E_PHY_ID:
 			case M88E1112_E_PHY_ID:
+			case M88E1543_E_PHY_ID:
+			case M88E1512_E_PHY_ID:
 			case I210_I_PHY_ID:
 				reset_dsp = false;
 				break;
@@ -1276,9 +1272,9 @@
 					reset_dsp = false;
 				break;
 			}
-			if (!reset_dsp)
+			if (!reset_dsp) {
 				hw_dbg("Link taking longer than expected.\n");
-			else {
+			} else {
 				/* We didn't get link.
 				 * Reset the DSP and cross our fingers.
 				 */
@@ -1303,6 +1299,8 @@
 	if (hw->phy.type != e1000_phy_m88 ||
 	    hw->phy.id == I347AT4_E_PHY_ID ||
 	    hw->phy.id == M88E1112_E_PHY_ID ||
+	    hw->phy.id == M88E1543_E_PHY_ID ||
+	    hw->phy.id == M88E1512_E_PHY_ID ||
 	    hw->phy.id == I210_I_PHY_ID)
 		goto out;
 
@@ -1700,7 +1698,7 @@
 
 	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+	if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
 		ret_val = -E1000_ERR_PHY;
 		goto out;
 	}
@@ -1743,6 +1741,7 @@
 		phy->cable_length = phy_data / (is_cm ? 100 : 1);
 		break;
 	case M88E1543_E_PHY_ID:
+	case M88E1512_E_PHY_ID:
 	case I347AT4_E_PHY_ID:
 		/* Remember the original page select and set it to 7 */
 		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1796,7 +1795,7 @@
 
 		index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 			M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-		if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
 			ret_val = -E1000_ERR_PHY;
 			goto out;
 		}
@@ -1840,7 +1839,7 @@
 	s32 ret_val = 0;
 	u16 phy_data, i, agc_value = 0;
 	u16 cur_agc_index, max_agc_index = 0;
-	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
 	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
 		IGP02E1000_PHY_AGC_A,
 		IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1862,7 @@
 				IGP02E1000_AGC_LENGTH_MASK;
 
 		/* Array index bound check. */
-		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
 		    (cur_agc_index == 0)) {
 			ret_val = -E1000_ERR_PHY;
 			goto out;
@@ -2195,6 +2194,90 @@
 }
 
 /**
+ *  igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+
+	/* Switch to PHY page 0xFF. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+	if (ret_val)
+		goto out;
+
+	/* Switch to PHY page 0xFB. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+	if (ret_val)
+		goto out;
+
+	/* Switch to PHY page 0x12. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+	if (ret_val)
+		goto out;
+
+	/* Change mode to SGMII-to-Copper */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+	if (ret_val)
+		goto out;
+
+	/* Return the PHY to page 0. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val) {
+		hw_dbg("Error committing the PHY changes\n");
+		return ret_val;
+	}
+
+	/* msec_delay(1000); */
+	usleep_range(1000, 2000);
+out:
+	return ret_val;
+}
+
+/**
  * igb_power_up_phy_copper - Restore copper link in case of PHY power down
  * @hw: pointer to the HW structure
  *
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 7af4ffa..24d55ed 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -61,6 +61,7 @@
 void igb_power_up_phy_copper(struct e1000_hw *hw);
 void igb_power_down_phy_copper(struct e1000_hw *hw);
 s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_initialize_M88E1512_phy(struct e1000_hw *hw);
 s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 6f0490d..4af2870 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -104,6 +104,8 @@
 #define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
 #define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
 #define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0   0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1   0x0B658 /* Frequency Out 1 Control Register - RW */
 #define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
 #define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
 #define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c2bd4f9..212d668 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -540,6 +540,7 @@
 			 struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
 #ifdef CONFIG_IGB_HWMON
 void igb_sysfs_exit(struct igb_adapter *adapter);
 int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d5673eb..7426276 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2159,6 +2159,27 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	int i;
 
+	if (ec->rx_max_coalesced_frames ||
+	    ec->rx_coalesce_usecs_irq ||
+	    ec->rx_max_coalesced_frames_irq ||
+	    ec->tx_max_coalesced_frames ||
+	    ec->tx_coalesce_usecs_irq ||
+	    ec->stats_block_coalesce_usecs ||
+	    ec->use_adaptive_rx_coalesce ||
+	    ec->use_adaptive_tx_coalesce ||
+	    ec->pkt_rate_low ||
+	    ec->rx_coalesce_usecs_low ||
+	    ec->rx_max_coalesced_frames_low ||
+	    ec->tx_coalesce_usecs_low ||
+	    ec->tx_max_coalesced_frames_low ||
+	    ec->pkt_rate_high ||
+	    ec->rx_coalesce_usecs_high ||
+	    ec->rx_max_coalesced_frames_high ||
+	    ec->tx_coalesce_usecs_high ||
+	    ec->tx_max_coalesced_frames_high ||
+	    ec->rate_sample_interval)
+		return -ENOTSUPP;
+
 	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
 	    ((ec->rx_coalesce_usecs > 3) &&
 	     (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2417,6 @@
 			info->rx_filters |=
 				(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
 				(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
 				(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 		return 0;
@@ -2991,6 +3008,7 @@
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	unsigned int count = ch->combined_count;
+	unsigned int max_combined = 0;
 
 	/* Verify they are not requesting separate vectors */
 	if (!count || ch->rx_count || ch->tx_count)
@@ -3001,11 +3019,13 @@
 		return -EINVAL;
 
 	/* Verify the number of channels doesn't exceed hw limits */
-	if (count > igb_max_channels(adapter))
+	max_combined = igb_max_channels(adapter);
+	if (count > max_combined)
 		return -EINVAL;
 
 	if (count != adapter->rss_queues) {
 		adapter->rss_queues = count;
+		igb_set_flag_queue_pairs(adapter, max_combined);
 
 		/* Hardware has to reinitialize queues and interrupts to
 		 * match the new configuration.
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 830466c..e174fbb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -179,6 +179,8 @@
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
 #endif
 
 #ifdef CONFIG_PM
@@ -1205,10 +1207,14 @@
 
 	/* allocate q_vector and rings */
 	q_vector = adapter->q_vector[v_idx];
-	if (!q_vector)
+	if (!q_vector) {
 		q_vector = kzalloc(size, GFP_KERNEL);
-	else
+	} else if (size > ksize(q_vector)) {
+		kfree_rcu(q_vector, rcu);
+		q_vector = kzalloc(size, GFP_KERNEL);
+	} else {
 		memset(q_vector, 0, size);
+	}
 	if (!q_vector)
 		return -ENOMEM;
 
@@ -2645,7 +2651,11 @@
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
 err_sw_init:
+	kfree(adapter->shadow_vfta);
 	igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+	igb_disable_sriov(pdev);
+#endif
 	pci_iounmap(pdev, hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
@@ -2805,14 +2815,14 @@
 	 */
 	igb_release_hw_control(adapter);
 
-	unregister_netdev(netdev);
-
-	igb_clear_interrupt_scheme(adapter);
-
 #ifdef CONFIG_PCI_IOV
 	igb_disable_sriov(pdev);
 #endif
 
+	unregister_netdev(netdev);
+
+	igb_clear_interrupt_scheme(adapter);
+
 	pci_iounmap(pdev, hw->hw_addr);
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
@@ -2847,7 +2857,7 @@
 		return;
 
 	pci_sriov_set_totalvfs(pdev, 7);
-	igb_pci_enable_sriov(pdev, max_vfs);
+	igb_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2888,6 +2898,14 @@
 
 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
 
+	igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+			      const u32 max_rss_queues)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
 	/* Determine if we need to pair queues. */
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -2968,6 +2986,8 @@
 	}
 #endif /* CONFIG_PCI_IOV */
 
+	igb_probe_vfs(adapter);
+
 	igb_init_queue_configuration(adapter);
 
 	/* Setup and initialize a copy of the hw vlan table array */
@@ -2980,8 +3000,6 @@
 		return -ENOMEM;
 	}
 
-	igb_probe_vfs(adapter);
-
 	/* Explicitly disable IRQ since the NIC can be in any state. */
 	igb_irq_disable(adapter);
 
@@ -6621,22 +6639,25 @@
 			    struct sk_buff *skb)
 {
 	struct page *page = rx_buffer->page;
+	unsigned char *va = page_address(page) + rx_buffer->page_offset;
 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = IGB_RX_BUFSZ;
 #else
-	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
+	unsigned int pull_len;
 
-	if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
-		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+	if (unlikely(skb_is_nonlinear(skb)))
+		goto add_tail_frag;
 
-		if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-			igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-			va += IGB_TS_HDR_LEN;
-			size -= IGB_TS_HDR_LEN;
-		}
+	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+		va += IGB_TS_HDR_LEN;
+		size -= IGB_TS_HDR_LEN;
+	}
 
+	if (likely(size <= IGB_RX_HDR_LEN)) {
 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 		/* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6669,21 @@
 		return false;
 	}
 
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	va += pull_len;
+	size -= pull_len;
+
+add_tail_frag:
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-			rx_buffer->page_offset, size, truesize);
+			(unsigned long)va & ~PAGE_MASK, size, truesize);
 
 	return igb_can_reuse_rx_page(rx_buffer, page, truesize);
 }
@@ -6791,62 +6825,6 @@
 }
 
 /**
- *  igb_pull_tail - igb specific version of skb_pull_tail
- *  @rx_ring: rx descriptor ring packet is being transacted on
- *  @rx_desc: pointer to the EOP Rx descriptor
- *  @skb: pointer to current skb being adjusted
- *
- *  This function is an igb specific version of __pskb_pull_tail.  The
- *  main difference between this version and the original function is that
- *  this function can make several assumptions about the state of things
- *  that allow for significant optimizations versus the standard function.
- *  As a result we can do things like drop a frag and maintain an accurate
- *  truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
-			  union e1000_adv_rx_desc *rx_desc,
-			  struct sk_buff *skb)
-{
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned char *va;
-	unsigned int pull_len;
-
-	/* it is valid to use page_address instead of kmap since we are
-	 * working with pages allocated out of the lomem pool per
-	 * alloc_page(GFP_ATOMIC)
-	 */
-	va = skb_frag_address(frag);
-
-	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-		/* retrieve timestamp from buffer */
-		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
-		/* update pointers to remove timestamp header */
-		skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
-		frag->page_offset += IGB_TS_HDR_LEN;
-		skb->data_len -= IGB_TS_HDR_LEN;
-		skb->len -= IGB_TS_HDR_LEN;
-
-		/* move va to start of packet data */
-		va += IGB_TS_HDR_LEN;
-	}
-
-	/* we need the header to contain the greater of either ETH_HLEN or
-	 * 60 bytes if the skb->len is less than 60 for skb_pad.
-	 */
-	pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-	/* update all of the pointers */
-	skb_frag_size_sub(frag, pull_len);
-	frag->page_offset += pull_len;
-	skb->data_len -= pull_len;
-	skb->tail += pull_len;
-}
-
-/**
  *  igb_cleanup_headers - Correct corrupted or empty headers
  *  @rx_ring: rx descriptor ring packet is being transacted on
  *  @rx_desc: pointer to the EOP Rx descriptor
@@ -6873,10 +6851,6 @@
 		}
 	}
 
-	/* place header in linear portion of buffer */
-	if (skb_is_nonlinear(skb))
-		igb_pull_tail(rx_ring, rx_desc, skb);
-
 	/* if eth_skb_pad returns an error the skb was freed */
 	if (eth_skb_pad(skb))
 		return true;
@@ -7445,6 +7419,7 @@
 
 	if (igb_init_interrupt_scheme(adapter, true)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		rtnl_unlock();
 		return -ENOMEM;
 	}
 
@@ -7538,6 +7513,7 @@
 	igb_init_queue_configuration(adapter);
 
 	if (igb_init_interrupt_scheme(adapter, true)) {
+		rtnl_unlock();
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c3a9392c..5982f28 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -405,7 +405,7 @@
 	wr32(E1000_CTRL_EXT, ctrl_ext);
 }
 
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
 {
 	static const u32 aux0_sel_sdp[IGB_N_SDP] = {
 		AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
@@ -424,6 +424,14 @@
 		TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
 		TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
 	};
+	static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+		TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+		TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+	};
+	static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+		TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+		TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+	};
 	static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
 		TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
 		TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
@@ -445,11 +453,17 @@
 		tssdp &= ~AUX1_TS_SDP_EN;
 
 	tssdp &= ~ts_sdp_sel_clr[pin];
-	if (chan == 1)
-		tssdp |= ts_sdp_sel_tt1[pin];
-	else
-		tssdp |= ts_sdp_sel_tt0[pin];
-
+	if (freq) {
+		if (chan == 1)
+			tssdp |= ts_sdp_sel_fc1[pin];
+		else
+			tssdp |= ts_sdp_sel_fc0[pin];
+	} else {
+		if (chan == 1)
+			tssdp |= ts_sdp_sel_tt1[pin];
+		else
+			tssdp |= ts_sdp_sel_tt0[pin];
+	}
 	tssdp |= ts_sdp_en[pin];
 
 	wr32(E1000_TSSDP, tssdp);
@@ -463,10 +477,10 @@
 	struct igb_adapter *igb =
 		container_of(ptp, struct igb_adapter, ptp_caps);
 	struct e1000_hw *hw = &igb->hw;
-	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
 	unsigned long flags;
 	struct timespec ts;
-	int pin = -1;
+	int use_freq = 0, pin = -1;
 	s64 ns;
 
 	switch (rq->type) {
@@ -511,40 +525,58 @@
 		ts.tv_nsec = rq->perout.period.nsec;
 		ns = timespec_to_ns(&ts);
 		ns = ns >> 1;
-		if (on && ns < 500000LL) {
-			/* 2k interrupts per second is an awful lot. */
-			return -EINVAL;
+		if (on && ns <= 70000000LL) {
+			if (ns < 8LL)
+				return -EINVAL;
+			use_freq = 1;
 		}
 		ts = ns_to_timespec(ns);
 		if (rq->perout.index == 1) {
-			tsauxc_mask = TSAUXC_EN_TT1;
-			tsim_mask = TSINTR_TT1;
+			if (use_freq) {
+				tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+				tsim_mask = 0;
+			} else {
+				tsauxc_mask = TSAUXC_EN_TT1;
+				tsim_mask = TSINTR_TT1;
+			}
 			trgttiml = E1000_TRGTTIML1;
 			trgttimh = E1000_TRGTTIMH1;
+			freqout = E1000_FREQOUT1;
 		} else {
-			tsauxc_mask = TSAUXC_EN_TT0;
-			tsim_mask = TSINTR_TT0;
+			if (use_freq) {
+				tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+				tsim_mask = 0;
+			} else {
+				tsauxc_mask = TSAUXC_EN_TT0;
+				tsim_mask = TSINTR_TT0;
+			}
 			trgttiml = E1000_TRGTTIML0;
 			trgttimh = E1000_TRGTTIMH0;
+			freqout = E1000_FREQOUT0;
 		}
 		spin_lock_irqsave(&igb->tmreg_lock, flags);
 		tsauxc = rd32(E1000_TSAUXC);
 		tsim = rd32(E1000_TSIM);
+		if (rq->perout.index == 1) {
+			tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+			tsim &= ~TSINTR_TT1;
+		} else {
+			tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+			tsim &= ~TSINTR_TT0;
+		}
 		if (on) {
 			int i = rq->perout.index;
-
-			igb_pin_perout(igb, i, pin);
+			igb_pin_perout(igb, i, pin, use_freq);
 			igb->perout[i].start.tv_sec = rq->perout.start.sec;
 			igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
 			igb->perout[i].period.tv_sec = ts.tv_sec;
 			igb->perout[i].period.tv_nsec = ts.tv_nsec;
 			wr32(trgttimh, rq->perout.start.sec);
 			wr32(trgttiml, rq->perout.start.nsec);
+			if (use_freq)
+				wr32(freqout, ns);
 			tsauxc |= tsauxc_mask;
 			tsim |= tsim_mask;
-		} else {
-			tsauxc &= ~tsauxc_mask;
-			tsim &= ~tsim_mask;
 		}
 		wr32(E1000_TSAUXC, tsauxc);
 		wr32(E1000_TSIM, tsim);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 95af14e..686fa71 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -319,6 +319,7 @@
 			dma_unmap_single(&pdev->dev, buffer_info->dma,
 					 adapter->rx_ps_hdr_size,
 					 DMA_FROM_DEVICE);
+			buffer_info->dma = 0;
 			skb_put(skb, hlen);
 		}
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ac3ac2a..edf1fb9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -630,6 +630,7 @@
 #define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 21)
 #define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 22)
 #define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 23)
+#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE	BIT(24)
 
 	u32 flags2;
 #define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1 << 0)
@@ -644,6 +645,9 @@
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 10)
 #define IXGBE_FLAG2_PHY_INTERRUPT		(u32)(1 << 11)
+#ifdef CONFIG_IXGBE_VXLAN
+#define IXGBE_FLAG2_VXLAN_REREG_NEEDED		BIT(12)
+#endif
 
 	/* Tx fast path data */
 	int num_tx_queues;
@@ -757,7 +761,9 @@
 	u32 timer_event_accumulator;
 	u32 vferr_refcount;
 	struct ixgbe_mac_addr *mac_table;
+#ifdef CONFIG_IXGBE_VXLAN
 	u16 vxlan_port;
+#endif
 	struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
 	struct hwmon_buff *ixgbe_hwmon_buff;
@@ -967,4 +973,5 @@
 				  struct ixgbe_adapter *adapter,
 				  struct ixgbe_ring *tx_ring);
 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
+void ixgbe_store_reta(struct ixgbe_adapter *adapter);
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 6b87d96..dd7062f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -504,16 +504,12 @@
  **/
 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
 {
-	u32 autoc2_reg, fwsm;
+	u32 autoc2_reg;
 	u16 ee_ctrl_2 = 0;
 
 	hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
 
-	/* Check to see if MNG FW could be enabled */
-	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
-
-	if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
-	    !hw->wol_enabled &&
+	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
@@ -1246,6 +1242,25 @@
 }
 
 /**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+			return 0;
+		udelay(10);
+	}
+
+	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
  *  @hw: pointer to hardware structure
  **/
@@ -1253,6 +1268,8 @@
 {
 	int i;
 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	u32 fdircmd;
+	s32 err;
 
 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
 
@@ -1260,15 +1277,10 @@
 	 * Before starting reinitialization process,
 	 * FDIRCMD.CMD must be zero.
 	 */
-	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
-		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-		      IXGBE_FDIRCMD_CMD_MASK))
-			break;
-		udelay(10);
-	}
-	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-		hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
-		return IXGBE_ERR_FDIR_REINIT_FAILED;
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n");
+		return err;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
@@ -1394,14 +1406,12 @@
 	/*
 	 * Continue setup of fdirctrl register bits:
 	 *  Turn perfect match filtering on
-	 *  Report hash in RSS field of Rx wb descriptor
 	 *  Initialize the drop queue
 	 *  Move the flexible bytes to use the ethertype - shift 6 words
 	 *  Set the maximum length per hash bucket to 0xA filters
 	 *  Send interrupt when 64 (0x4 * 16) filters are left
 	 */
 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
-		    IXGBE_FDIRCTRL_REPORT_STATUS |
 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
@@ -1509,20 +1519,28 @@
  *  @input: unique input dword
  *  @common: compressed common input dword
  *  @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 					  union ixgbe_atr_hash_dword input,
 					  union ixgbe_atr_hash_dword common,
 					  u8 queue)
 {
-	u64  fdirhashcmd;
-	u32  fdircmd;
+	u64 fdirhashcmd;
+	u8 flow_type;
+	bool tunnel;
+	u32 fdircmd;
 
 	/*
 	 * Get the flow_type in order to program FDIRCMD properly
 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
 	 */
-	switch (input.formatted.flow_type) {
+	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+	flow_type = input.formatted.flow_type &
+		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+	switch (flow_type) {
 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
@@ -1538,8 +1556,10 @@
 	/* configure FDIRCMD register */
 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	if (tunnel)
+		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
 
 	/*
 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
@@ -1760,6 +1780,7 @@
 					  u16 soft_id, u8 queue)
 {
 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
+	s32 err;
 
 	/* currently IPv6 is not supported, must be programmed with 0 */
 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1808,6 +1829,11 @@
 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		hw_dbg(hw, "Flow Director command did not complete!\n");
+		return err;
+	}
 
 	return 0;
 }
@@ -1817,9 +1843,8 @@
 					  u16 soft_id)
 {
 	u32 fdirhash;
-	u32 fdircmd = 0;
-	u32 retry_count;
-	s32 err = 0;
+	u32 fdircmd;
+	s32 err;
 
 	/* configure FDIRHASH register */
 	fdirhash = input->formatted.bkt_hash;
@@ -1832,18 +1857,12 @@
 	/* Query if filter is present */
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
 
-	for (retry_count = 10; retry_count; retry_count--) {
-		/* allow 10us for query to process */
-		udelay(10);
-		/* verify query completed successfully */
-		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
-		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
-			break;
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		hw_dbg(hw, "Flow Director command did not complete!\n");
+		return err;
 	}
 
-	if (!retry_count)
-		err = IXGBE_ERR_FDIR_REINIT_FAILED;
-
 	/* if filter exists in hardware then remove it */
 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1852,7 +1871,7 @@
 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
 	}
 
-	return err;
+	return 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4c1c267..3f56a80 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3905,3 +3905,18 @@
 		}
 	}
 }
+
+/** ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ **/
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+	u32 fwsm;
+
+	if (hw->mac.type < ixgbe_mac_82599EB)
+		return false;
+
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+	fwsm &= IXGBE_FWSM_MODE_MASK;
+	return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index ec015fe..2f779f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -113,6 +113,7 @@
 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
 				 u32 length, u32 timeout, bool return_data);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
 bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
 
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index ec7b232..ab2edc8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -166,6 +166,8 @@
 	/* set the supported link speeds */
 	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
 		ecmd->supported |= SUPPORTED_10000baseT_Full;
+	if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
+		ecmd->supported |= SUPPORTED_2500baseX_Full;
 	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
 		ecmd->supported |= SUPPORTED_1000baseT_Full;
 	if (supported_link & IXGBE_LINK_SPEED_100_FULL)
@@ -177,6 +179,8 @@
 			ecmd->advertising |= ADVERTISED_100baseT_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+			ecmd->advertising |= ADVERTISED_2500baseX_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
 	} else {
@@ -286,6 +290,9 @@
 		case IXGBE_LINK_SPEED_10GB_FULL:
 			ethtool_cmd_speed_set(ecmd, SPEED_10000);
 			break;
+		case IXGBE_LINK_SPEED_2_5GB_FULL:
+			ethtool_cmd_speed_set(ecmd, SPEED_2500);
+			break;
 		case IXGBE_LINK_SPEED_1GB_FULL:
 			ethtool_cmd_speed_set(ecmd, SPEED_1000);
 			break;
@@ -2868,6 +2875,14 @@
 	return ret;
 }
 
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+	if (adapter->hw.mac.type < ixgbe_mac_X550)
+		return 16;
+	else
+		return 64;
+}
+
 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2907,6 +2922,44 @@
 	return 0;
 }
 
+static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+
+	if (hfunc)
+		return -EINVAL;
+
+	/* Fill out the redirection table */
+	if (indir) {
+		int max_queues = min_t(int, adapter->num_rx_queues,
+				       ixgbe_rss_indir_tbl_max(adapter));
+
+		/*Allow at least 2 queues w/ SR-IOV.*/
+		if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+		    (max_queues < 2))
+			max_queues = 2;
+
+		/* Verify user input. */
+		for (i = 0; i < reta_entries; i++)
+			if (indir[i] >= max_queues)
+				return -EINVAL;
+
+		for (i = 0; i < reta_entries; i++)
+			adapter->rss_indir_tbl[i] = indir[i];
+	}
+
+	/* Fill out the rss hash key */
+	if (key)
+		memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+
+	ixgbe_store_reta(adapter);
+
+	return 0;
+}
+
 static int ixgbe_get_ts_info(struct net_device *dev,
 			     struct ethtool_ts_info *info)
 {
@@ -2938,14 +2991,6 @@
 			(1 << HWTSTAMP_FILTER_NONE) |
 			(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
 			(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
 			(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 		break;
 	default:
@@ -3167,6 +3212,7 @@
 	.get_rxfh_indir_size	= ixgbe_rss_indir_size,
 	.get_rxfh_key_size	= ixgbe_get_rxfh_key_size,
 	.get_rxfh		= ixgbe_get_rxfh,
+	.set_rxfh		= ixgbe_set_rxfh,
 	.get_channels		= ixgbe_get_channels,
 	.set_channels		= ixgbe_set_channels,
 	.get_ts_info		= ixgbe_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ae21e0b..63b2cfe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -65,6 +65,9 @@
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_VXLAN
+#include <net/vxlan.h>
+#endif
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -79,7 +82,7 @@
 #define DRV_VERSION "4.0.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-				"Copyright (c) 1999-2014 Intel Corporation.";
+				"Copyright (c) 1999-2015 Intel Corporation.";
 
 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
 
@@ -243,13 +246,20 @@
 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
 				     int expected_gts)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
 	int max_gts = 0;
 	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
 	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
 	struct pci_dev *pdev;
 
-	/* determine whether to use the the parent device
+	/* Some devices are not connected over PCIe and thus do not negotiate
+	 * speed. These devices do not have valid bus info, and thus any report
+	 * we generate may not be correct.
 	 */
+	if (hw->bus.type == ixgbe_bus_type_internal)
+		return;
+
+	/* determine whether to use the parent device */
 	if (ixgbe_pcie_from_parent(&adapter->hw))
 		pdev = adapter->pdev->bus->parent->self;
 	else
@@ -1360,14 +1370,31 @@
 }
 
 #endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+	((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
 				 union ixgbe_adv_rx_desc *rx_desc,
 				 struct sk_buff *skb)
 {
-	if (ring->netdev->features & NETIF_F_RXHASH)
-		skb_set_hash(skb,
-			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
-			     PKT_HASH_TYPE_L3);
+	u16 rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+	rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+		   IXGBE_RXDADV_RSSTYPE_MASK;
+
+	if (!rss_type)
+		return;
+
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+		     (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 }
 
 #ifdef IXGBE_FCOE
@@ -1414,7 +1441,6 @@
 	    (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
 		encap_pkt = true;
 		skb->encapsulation = 1;
-		skb->ip_summed = CHECKSUM_NONE;
 	}
 
 	/* if IP and error */
@@ -3287,7 +3313,7 @@
  *
  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
  */
-static void ixgbe_store_reta(struct ixgbe_adapter *adapter)
+void ixgbe_store_reta(struct ixgbe_adapter *adapter)
 {
 	u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -4245,6 +4271,21 @@
 	}
 }
 
+static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
+{
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
+#ifdef CONFIG_IXGBE_VXLAN
+		adapter->vxlan_port = 0;
+#endif
+		break;
+	default:
+		break;
+	}
+}
+
 #ifdef CONFIG_IXGBE_DCB
 /**
  * ixgbe_configure_dcb - Configure DCB hardware
@@ -5286,6 +5327,9 @@
 #ifdef CONFIG_IXGBE_DCA
 		adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 #endif
+#ifdef CONFIG_IXGBE_VXLAN
+		adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
+#endif
 		break;
 	default:
 		break;
@@ -5737,10 +5781,11 @@
 
 	ixgbe_up_complete(adapter);
 
-#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
+	ixgbe_clear_vxlan_port(adapter);
+#ifdef CONFIG_IXGBE_VXLAN
 	vxlan_get_rx_port(netdev);
-
 #endif
+
 	return 0;
 
 err_set_queues:
@@ -5761,7 +5806,15 @@
 {
 	ixgbe_ptp_suspend(adapter);
 
-	ixgbe_down(adapter);
+	if (adapter->hw.phy.ops.enter_lplu) {
+		adapter->hw.phy.reset_disable = true;
+		ixgbe_down(adapter);
+		adapter->hw.phy.ops.enter_lplu(&adapter->hw);
+		adapter->hw.phy.reset_disable = false;
+	} else {
+		ixgbe_down(adapter);
+	}
+
 	ixgbe_free_irq(adapter);
 
 	ixgbe_free_all_tx_resources(adapter);
@@ -6327,6 +6380,7 @@
 	struct net_device *upper;
 	struct list_head *iter;
 	u32 link_speed = adapter->link_speed;
+	const char *speed_str;
 	bool flow_rx, flow_tx;
 
 	/* only continue if link was previously down */
@@ -6364,14 +6418,24 @@
 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
 		ixgbe_ptp_start_cyclecounter(adapter);
 
-	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
-	       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
-	       "10 Gbps" :
-	       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-	       "1 Gbps" :
-	       (link_speed == IXGBE_LINK_SPEED_100_FULL ?
-	       "100 Mbps" :
-	       "unknown speed"))),
+	switch (link_speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		speed_str = "10 Gbps";
+		break;
+	case IXGBE_LINK_SPEED_2_5GB_FULL:
+		speed_str = "2.5 Gbps";
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		speed_str = "1 Gbps";
+		break;
+	case IXGBE_LINK_SPEED_100_FULL:
+		speed_str = "100 Mbps";
+		break;
+	default:
+		speed_str = "unknown speed";
+		break;
+	}
+	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
 	       ((flow_rx && flow_tx) ? "RX/TX" :
 	       (flow_rx ? "RX" :
 	       (flow_tx ? "TX" : "None"))));
@@ -6800,6 +6864,12 @@
 		ixgbe_service_event_complete(adapter);
 		return;
 	}
+#ifdef CONFIG_IXGBE_VXLAN
+	if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+		adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+		vxlan_get_rx_port(adapter->netdev);
+	}
+#endif /* CONFIG_IXGBE_VXLAN */
 	ixgbe_reset_subtask(adapter);
 	ixgbe_phy_interrupt_subtask(adapter);
 	ixgbe_sfp_detection_subtask(adapter);
@@ -6896,31 +6966,55 @@
 		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
 		    !(first->tx_flags & IXGBE_TX_FLAGS_CC))
 			return;
+		vlan_macip_lens = skb_network_offset(skb) <<
+				  IXGBE_ADVTXD_MACLEN_SHIFT;
 	} else {
 		u8 l4_hdr = 0;
-		switch (first->protocol) {
-		case htons(ETH_P_IP):
-			vlan_macip_lens |= skb_network_header_len(skb);
+		union {
+			struct iphdr *ipv4;
+			struct ipv6hdr *ipv6;
+			u8 *raw;
+		} network_hdr;
+		union {
+			struct tcphdr *tcphdr;
+			u8 *raw;
+		} transport_hdr;
+
+		if (skb->encapsulation) {
+			network_hdr.raw = skb_inner_network_header(skb);
+			transport_hdr.raw = skb_inner_transport_header(skb);
+			vlan_macip_lens = skb_inner_network_offset(skb) <<
+					  IXGBE_ADVTXD_MACLEN_SHIFT;
+		} else {
+			network_hdr.raw = skb_network_header(skb);
+			transport_hdr.raw = skb_transport_header(skb);
+			vlan_macip_lens = skb_network_offset(skb) <<
+					  IXGBE_ADVTXD_MACLEN_SHIFT;
+		}
+
+		/* use first 4 bits to determine IP version */
+		switch (network_hdr.ipv4->version) {
+		case IPVERSION:
+			vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
 			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
-			l4_hdr = ip_hdr(skb)->protocol;
+			l4_hdr = network_hdr.ipv4->protocol;
 			break;
-		case htons(ETH_P_IPV6):
-			vlan_macip_lens |= skb_network_header_len(skb);
-			l4_hdr = ipv6_hdr(skb)->nexthdr;
+		case 6:
+			vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
+			l4_hdr = network_hdr.ipv6->nexthdr;
 			break;
 		default:
 			if (unlikely(net_ratelimit())) {
 				dev_warn(tx_ring->dev,
-				 "partial checksum but proto=%x!\n",
-				 first->protocol);
+					 "partial checksum but version=%d\n",
+					 network_hdr.ipv4->version);
 			}
-			break;
 		}
 
 		switch (l4_hdr) {
 		case IPPROTO_TCP:
 			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-			mss_l4len_idx = tcp_hdrlen(skb) <<
+			mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
 					IXGBE_ADVTXD_L4LEN_SHIFT;
 			break;
 		case IPPROTO_SCTP:
@@ -6946,7 +7040,6 @@
 	}
 
 	/* vlan_macip_lens: MACLEN, VLAN tag */
-	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
@@ -7201,6 +7294,10 @@
 		struct ipv6hdr *ipv6;
 	} hdr;
 	struct tcphdr *th;
+	struct sk_buff *skb;
+#ifdef CONFIG_IXGBE_VXLAN
+	u8 encap = false;
+#endif /* CONFIG_IXGBE_VXLAN */
 	__be16 vlan_id;
 
 	/* if ring doesn't have a interrupt vector, cannot perform ATR */
@@ -7214,16 +7311,36 @@
 	ring->atr_count++;
 
 	/* snag network header to get L4 type and address */
-	hdr.network = skb_network_header(first->skb);
+	skb = first->skb;
+	hdr.network = skb_network_header(skb);
+	if (skb->encapsulation) {
+#ifdef CONFIG_IXGBE_VXLAN
+		struct ixgbe_adapter *adapter = q_vector->adapter;
 
-	/* Currently only IPv4/IPv6 with TCP is supported */
-	if ((first->protocol != htons(ETH_P_IPV6) ||
-	     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
-	    (first->protocol != htons(ETH_P_IP) ||
-	     hdr.ipv4->protocol != IPPROTO_TCP))
+		if (!adapter->vxlan_port)
+			return;
+		if (first->protocol != htons(ETH_P_IP) ||
+		    hdr.ipv4->version != IPVERSION ||
+		    hdr.ipv4->protocol != IPPROTO_UDP) {
+			return;
+		}
+		if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
+			return;
+		encap = true;
+		hdr.network = skb_inner_network_header(skb);
+		th = inner_tcp_hdr(skb);
+#else
 		return;
-
-	th = tcp_hdr(first->skb);
+#endif /* CONFIG_IXGBE_VXLAN */
+	} else {
+		/* Currently only IPv4/IPv6 with TCP is supported */
+		if ((first->protocol != htons(ETH_P_IPV6) ||
+		     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+		    (first->protocol != htons(ETH_P_IP) ||
+		     hdr.ipv4->protocol != IPPROTO_TCP))
+			return;
+		th = tcp_hdr(skb);
+	}
 
 	/* skip this packet since it is invalid or the socket is closing */
 	if (!th || th->fin)
@@ -7272,6 +7389,11 @@
 			     hdr.ipv6->daddr.s6_addr32[3];
 	}
 
+#ifdef CONFIG_IXGBE_VXLAN
+	if (encap)
+		input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
+#endif /* CONFIG_IXGBE_VXLAN */
+
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
 	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
 					      input, common, ring->queue_index);
@@ -7737,9 +7859,10 @@
 	bool pools;
 
 	/* Hardware supports up to 8 traffic classes */
-	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
-	    (hw->mac.type == ixgbe_mac_82598EB &&
-	     tc < MAX_TRAFFIC_CLASS))
+	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
+		return -EINVAL;
+
+	if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
 		return -EINVAL;
 
 	pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
@@ -7898,12 +8021,23 @@
 		need_reset = true;
 
 	netdev->features = features;
+
+#ifdef CONFIG_IXGBE_VXLAN
+	if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
+		if (features & NETIF_F_RXCSUM)
+			adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+		else
+			ixgbe_clear_vxlan_port(adapter);
+	}
+#endif /* CONFIG_IXGBE_VXLAN */
+
 	if (need_reset)
 		ixgbe_do_reset(netdev);
 
 	return 0;
 }
 
+#ifdef CONFIG_IXGBE_VXLAN
 /**
  * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @dev: The port's netdev
@@ -7917,17 +8051,18 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u16 new_port = ntohs(port);
 
+	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+		return;
+
 	if (sa_family == AF_INET6)
 		return;
 
-	if (adapter->vxlan_port == new_port) {
-		netdev_info(dev, "Port %d already offloaded\n", new_port);
+	if (adapter->vxlan_port == new_port)
 		return;
-	}
 
 	if (adapter->vxlan_port) {
 		netdev_info(dev,
-			    "Hit Max num of UDP ports, not adding port %d\n",
+			    "Hit Max num of VXLAN ports, not adding port %d\n",
 			    new_port);
 		return;
 	}
@@ -7946,9 +8081,11 @@
 				 __be16 port)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	struct ixgbe_hw *hw = &adapter->hw;
 	u16 new_port = ntohs(port);
 
+	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+		return;
+
 	if (sa_family == AF_INET6)
 		return;
 
@@ -7958,9 +8095,10 @@
 		return;
 	}
 
-	adapter->vxlan_port = 0;
-	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
+	ixgbe_clear_vxlan_port(adapter);
+	adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
 }
+#endif /* CONFIG_IXGBE_VXLAN */
 
 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 			     struct net_device *dev,
@@ -8135,7 +8273,7 @@
 	    (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
 		return ERR_PTR(-EBUSY);
 
-	fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+	fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
 	if (!fwd_adapter)
 		return ERR_PTR(-ENOMEM);
 
@@ -8191,6 +8329,21 @@
 	kfree(fwd_adapter);
 }
 
+#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+static netdev_features_t
+ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
+		     netdev_features_t features)
+{
+	if (!skb->encapsulation)
+		return features;
+
+	if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+		     IXGBE_MAX_TUNNEL_HDR_LEN))
+		return features & ~NETIF_F_ALL_CSUM;
+
+	return features;
+}
+
 static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_open		= ixgbe_open,
 	.ndo_stop		= ixgbe_close,
@@ -8236,8 +8389,11 @@
 	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,
 	.ndo_dfwd_add_station	= ixgbe_fwd_add,
 	.ndo_dfwd_del_station	= ixgbe_fwd_del,
+#ifdef CONFIG_IXGBE_VXLAN
 	.ndo_add_vxlan_port	= ixgbe_add_vxlan_port,
 	.ndo_del_vxlan_port	= ixgbe_del_vxlan_port,
+#endif /* CONFIG_IXGBE_VXLAN */
+	.ndo_features_check	= ixgbe_features_check,
 };
 
 /**
@@ -8597,17 +8753,24 @@
 	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
 	netdev->vlan_features |= NETIF_F_SG;
 
+	netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+				   NETIF_F_IPV6_CSUM;
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 
+#ifdef CONFIG_IXGBE_VXLAN
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
-		netdev->hw_enc_features |= NETIF_F_RXCSUM;
+		netdev->hw_enc_features |= NETIF_F_RXCSUM |
+					   NETIF_F_IP_CSUM |
+					   NETIF_F_IPV6_CSUM;
 		break;
 	default:
 		break;
 	}
+#endif /* CONFIG_IXGBE_VXLAN */
 
 #ifdef CONFIG_IXGBE_DCB
 	netdev->dcbnl_ops = &dcbnl_ops;
@@ -8694,9 +8857,10 @@
 	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
 
 	/* pick up the PCI bus settings for reporting later */
-	hw->mac.ops.get_bus_info(hw);
 	if (ixgbe_pcie_from_parent(hw))
 		ixgbe_get_parent_bus_info(adapter);
+	else
+		 hw->mac.ops.get_bus_info(hw);
 
 	/* calculate the expected PCIe bandwidth required for optimal
 	 * performance. Note that some older parts will never have enough
@@ -8859,12 +9023,7 @@
 		unregister_netdev(netdev);
 
 #ifdef CONFIG_PCI_IOV
-	/*
-	 * Only disable SR-IOV on unload if the user specified the now
-	 * deprecated max_vfs module parameter.
-	 */
-	if (max_vfs)
-		ixgbe_disable_sriov(adapter);
+	ixgbe_disable_sriov(adapter);
 #endif
 	ixgbe_clear_interrupt_scheme(adapter);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 526a20b..597d0b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -243,9 +243,7 @@
 	u16 ext_ability = 0;
 
 	if (!hw->phy.phy_semaphore_mask) {
-		hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
-				 IXGBE_STATUS_LAN_ID_1;
-		if (hw->phy.lan_id)
+		if (hw->bus.lan_id)
 			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
 		else
 			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
@@ -608,12 +606,7 @@
 				u32 device_type, u16 phy_data)
 {
 	s32 status;
-	u32 gssr;
-
-	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-		gssr = IXGBE_GSSR_PHY1_SM;
-	else
-		gssr = IXGBE_GSSR_PHY0_SM;
+	u32 gssr = hw->phy.phy_semaphore_mask;
 
 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
 		status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -737,39 +730,61 @@
 }
 
 /**
+ * ixgbe_get_copper_speeds_supported - Get copper link speed from phy
+ * @hw: pointer to hardware structure
+ *
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
+ */
+static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+{
+	u16 speed_ability;
+	s32 status;
+
+	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+				      &speed_ability);
+	if (status)
+		return status;
+
+	if (speed_ability & MDIO_SPEED_10G)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+	if (speed_ability & MDIO_PMA_SPEED_1000)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+	if (speed_ability & MDIO_PMA_SPEED_100)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_X550:
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+		break;
+	case ixgbe_mac_X550EM_x:
+		hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
  * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
  * @hw: pointer to hardware structure
  * @speed: pointer to link speed
  * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
  */
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
 					       ixgbe_link_speed *speed,
 					       bool *autoneg)
 {
-	s32 status;
-	u16 speed_ability;
+	s32 status = 0;
 
-	*speed = 0;
 	*autoneg = true;
+	if (!hw->phy.speeds_supported)
+		status = ixgbe_get_copper_speeds_supported(hw);
 
-	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-				      &speed_ability);
-
-	if (status == 0) {
-		if (speed_ability & MDIO_SPEED_10G)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (speed_ability & MDIO_PMA_SPEED_1000)
-			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (speed_ability & MDIO_PMA_SPEED_100)
-			*speed |= IXGBE_LINK_SPEED_100_FULL;
-	}
-
-	/* Internal PHY does not support 100 Mbps */
-	if (hw->mac.type == ixgbe_mac_X550EM_x)
-		*speed &= ~IXGBE_LINK_SPEED_100_FULL;
-
+	*speed = hw->phy.speeds_supported;
 	return status;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index b6f424f..6368919 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -848,6 +848,7 @@
 #define IXGBE_MDIO_AUTO_NEG_LINK_STATUS	0x4 /* Indicates if link is up */
 
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK	0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK	0x6 /* Speed Mask */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
@@ -856,6 +857,24 @@
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB	0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB	0x6 /* 10Gb/s */
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG	0x20	/* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400	/* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG		0x17	/* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG		0x10	/* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE		0x1000	/* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX	0x4000	/* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE		0x8000	/* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE		0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE		0x0800
+#define IXGBE_MII_100BASE_T_ADVERTISE		0x0100	/* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF	0x0080	/* half duplex, bit:7 */
+#define IXGBE_MII_RESTART			0x200
+#define IXGBE_MII_AUTONEG_COMPLETE		0x20
+#define IXGBE_MII_AUTONEG_LINK_UP		0x04
+#define IXGBE_MII_AUTONEG_REG			0x0
 
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1305,6 +1324,7 @@
 #define IXGBE_MDIO_AUTO_NEG_CONTROL	0x0 /* AUTO_NEG Control Reg */
 #define IXGBE_MDIO_AUTO_NEG_STATUS	0x1 /* AUTO_NEG Status Reg */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT	0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM  0xCC00 /* AUTO_NEG Vendor TX Reg */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
 #define IXGBE_MDIO_AUTO_NEG_VEN_LSC	0x1 /* AUTO_NEG Vendor Tx LSC */
 #define IXGBE_MDIO_AUTO_NEG_ADVT	0x10 /* AUTO_NEG Advt Reg */
@@ -1312,7 +1332,8 @@
 #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT	0x3C /* AUTO_NEG EEE Advt Reg */
 
 #define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE	 0x0800 /* Set low power mode */
-
+#define IXGBE_AUTO_NEG_LP_STATUS	0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP	0x8000 /* AUTO NEG Rx LP 1000BaseT */
 #define IXGBE_MDIO_TX_VENDOR_ALARMS_3	0xCC02 /* Vendor Alarms 3 Reg */
 #define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
 #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
@@ -2041,6 +2062,11 @@
 #define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
 #define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
 
+#define NVM_INIT_CTRL_3			0x38
+#define NVM_INIT_CTRL_3_LPLU		0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0	0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1	0x100
+
 #define IXGBE_EEPROM_PAGE_SIZE_MAX       128
 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2540,9 +2566,11 @@
 #define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
 #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
+#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT	23
 #define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
 #define IXGBE_FDIR_INIT_DONE_POLL               10
 #define IXGBE_FDIRCMD_CMD_POLL                  10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER		0x00800000
 
 #define IXGBE_FDIR_DROP_QUEUE                   127
 
@@ -2833,12 +2861,13 @@
 #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
 
 /* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK     0x7fff
-#define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_UDP       0x1
-#define IXGBE_ATR_L4TYPE_TCP       0x2
-#define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_HASH_MASK		0x7fff
+#define IXGBE_ATR_L4TYPE_MASK		0x3
+#define IXGBE_ATR_L4TYPE_UDP		0x1
+#define IXGBE_ATR_L4TYPE_TCP		0x2
+#define IXGBE_ATR_L4TYPE_SCTP		0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK	0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK	0x10
 enum ixgbe_atr_flow_type {
 	IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
 	IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
@@ -3035,9 +3064,8 @@
 /* PCI bus types */
 enum ixgbe_bus_type {
 	ixgbe_bus_type_unknown = 0,
-	ixgbe_bus_type_pci,
-	ixgbe_bus_type_pcix,
 	ixgbe_bus_type_pci_express,
+	ixgbe_bus_type_internal,
 	ixgbe_bus_type_reserved
 };
 
@@ -3298,6 +3326,7 @@
 	s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
 	s32 (*check_overtemp)(struct ixgbe_hw *);
 	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+	s32 (*enter_lplu)(struct ixgbe_hw *);
 	s32 (*handle_lasi)(struct ixgbe_hw *hw);
 };
 
@@ -3308,6 +3337,7 @@
 	u16                             word_size;
 	u16                             address_bits;
 	u16                             word_page_size;
+	u16				ctrl_word_3;
 };
 
 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
@@ -3351,10 +3381,10 @@
 	bool                            sfp_setup_needed;
 	u32                             revision;
 	enum ixgbe_media_type           media_type;
-	u8				lan_id;
 	u32				phy_semaphore_mask;
 	bool                            reset_disable;
 	ixgbe_autoneg_advertised        autoneg_advertised;
+	ixgbe_link_speed		speeds_supported;
 	enum ixgbe_smart_speed          smart_speed;
 	bool                            smart_speed_active;
 	bool                            multispeed_fiber;
@@ -3460,16 +3490,21 @@
 #define IXGBE_ERR_PBA_SECTION                   -31
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE		-38
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P)	((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P)	((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P)		((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
+#define IXGBE_FUSES0_300MHZ		BIT(5)
+#define IXGBE_FUSES0_REV1		BIT(6)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P)	((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
 
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		(1 << 9)
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		(1 << 11)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 032a587..4e75843 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -54,6 +54,11 @@
 s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+
+	/* set_phy_power was set by default to NULL */
+	if (!ixgbe_mng_present(hw))
+		phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
 
 	mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
 	mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 7581da1..9fe9445 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -26,6 +26,20 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
+static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+
+	/* Start with X540 invariants, since so simular */
+	ixgbe_get_invariants_X540(hw);
+
+	if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+		phy->ops.set_phy_power = NULL;
+
+	return 0;
+}
+
 /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
  *  @hw: pointer to hardware structure
  **/
@@ -597,6 +611,24 @@
 	return status;
 }
 
+/**
+ * ixgbe_get_bus_info_X550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+	hw->bus.type  = ixgbe_bus_type_internal;
+	hw->bus.width = ixgbe_bus_width_unknown;
+	hw->bus.speed = ixgbe_bus_speed_unknown;
+
+	hw->mac.ops.set_lan_id(hw);
+
+	return 0;
+}
+
 /** ixgbe_disable_rx_x550 - Disable RX unit
  *
  *  Enables the Rx DMA unit for x550
@@ -1444,6 +1476,144 @@
 	return ixgbe_enable_lasi_ext_t_x550em(hw);
 }
 
+/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+ *  @hw: pointer to hardware structure
+ *  @lcd_speed: pointer to lowest common link speed
+ *
+ *  Determine lowest common link speed with link partner.
+ **/
+static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+				  ixgbe_link_speed *lcd_speed)
+{
+	u16 an_lp_status;
+	s32 status;
+	u16 word = hw->eeprom.ctrl_word_3;
+
+	*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &an_lp_status);
+	if (status)
+		return status;
+
+	/* If link partner advertised 1G, return 1G */
+	if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
+		*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
+		return status;
+	}
+
+	/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
+	if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
+	    (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
+		return status;
+
+	/* Link partner not capable of lower speeds, return 10G */
+	*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
+	return status;
+}
+
+/** ixgbe_enter_lplu_x550em - Transition to low power states
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures Low Power Link Up on transition to low power states
+ *  (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
+ *  the X557 PHY immediately prior to entering LPLU.
+ **/
+static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+	u16 an_10g_cntl_reg, autoneg_reg, speed;
+	s32 status;
+	ixgbe_link_speed lcd_speed;
+	u32 save_autoneg;
+	bool link_up;
+
+	/* SW LPLU not required on later HW revisions. */
+	if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
+		return 0;
+
+	/* If blocked by MNG FW, then don't restart AN */
+	if (ixgbe_check_reset_blocked(hw))
+		return 0;
+
+	status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+	if (status)
+		return status;
+
+	status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3,
+				     &hw->eeprom.ctrl_word_3);
+	if (status)
+		return status;
+
+	/* If link is down, LPLU disabled in NVM, WoL disabled, or
+	 * manageability disabled, then force link down by entering
+	 * low power mode.
+	 */
+	if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+	    !(hw->wol_enabled || ixgbe_mng_present(hw)))
+		return ixgbe_set_copper_phy_power(hw, false);
+
+	/* Determine LCD */
+	status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+	if (status)
+		return status;
+
+	/* If no valid LCD link speed, then force link down and exit. */
+	if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+		return ixgbe_set_copper_phy_power(hw, false);
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &speed);
+	if (status)
+		return status;
+
+	/* If no link now, speed is invalid so take link down */
+	status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+	if (status)
+		return ixgbe_set_copper_phy_power(hw, false);
+
+	/* clear everything but the speed bits */
+	speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+	/* If current speed is already LCD, then exit. */
+	if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+	     (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+	    ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+	     (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+		return status;
+
+	/* Clear AN completed indication */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_reg);
+	if (status)
+		return status;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &an_10g_cntl_reg);
+	if (status)
+		return status;
+
+	status = hw->phy.ops.read_reg(hw,
+				      IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_reg);
+	if (status)
+		return status;
+
+	save_autoneg = hw->phy.autoneg_advertised;
+
+	/* Setup link at least common link speed */
+	status = hw->mac.ops.setup_link(hw, lcd_speed, false);
+
+	/* restore autoneg from before setting lplu speed */
+	hw->phy.autoneg_advertised = save_autoneg;
+
+	return status;
+}
+
 /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
  *  @hw: pointer to hardware structure
  *
@@ -1514,6 +1684,11 @@
 			ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
 		}
 
+		/* setup SW LPLU only for first revision */
+		if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
+							IXGBE_FUSES0_GROUP(0))))
+			phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
 		phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
 		phy->ops.reset = ixgbe_reset_phy_t_X550em;
 		break;
@@ -1760,7 +1935,6 @@
 	.get_mac_addr			= &ixgbe_get_mac_addr_generic, \
 	.get_device_caps		= &ixgbe_get_device_caps_generic, \
 	.stop_adapter			= &ixgbe_stop_adapter_generic, \
-	.get_bus_info			= &ixgbe_get_bus_info_generic, \
 	.set_lan_id			= &ixgbe_set_lan_id_multi_port_pcie, \
 	.read_analog_reg8		= NULL, \
 	.write_analog_reg8		= NULL, \
@@ -1809,6 +1983,7 @@
 	.get_wwn_prefix		= &ixgbe_get_wwn_prefix_generic,
 	.setup_link		= &ixgbe_setup_mac_link_X540,
 	.get_link_capabilities	= &ixgbe_get_copper_link_capabilities_generic,
+	.get_bus_info		= &ixgbe_get_bus_info_generic,
 	.setup_sfp		= NULL,
 };
 
@@ -1820,6 +1995,7 @@
 	.get_wwn_prefix		= NULL,
 	.setup_link		= NULL, /* defined later */
 	.get_link_capabilities	= &ixgbe_get_link_capabilities_X550em,
+	.get_bus_info		= &ixgbe_get_bus_info_X550em,
 	.setup_sfp		= ixgbe_setup_sfp_modules_X550em,
 
 };
@@ -1855,7 +2031,7 @@
 	.read_reg		= &ixgbe_read_phy_reg_generic, \
 	.write_reg		= &ixgbe_write_phy_reg_generic, \
 	.setup_link		= &ixgbe_setup_phy_link_generic, \
-	.set_phy_power		= &ixgbe_set_copper_phy_power, \
+	.set_phy_power		= NULL, \
 	.check_overtemp		= &ixgbe_tn_check_overtemp, \
 	.get_firmware_version	= &ixgbe_get_phy_firmware_version_generic,
 
@@ -1893,7 +2069,7 @@
 
 struct ixgbe_info ixgbe_X550EM_x_info = {
 	.mac			= ixgbe_mac_X550EM_x,
-	.get_invariants		= &ixgbe_get_invariants_X540,
+	.get_invariants		= &ixgbe_get_invariants_X550_x,
 	.mac_ops		= &mac_ops_X550EM_x,
 	.eeprom_ops		= &eeprom_ops_X550EM_x,
 	.phy_ops		= &phy_ops_X550EM_x,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 770e21a..5843458 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -161,6 +161,18 @@
 #define IXGBE_RXDADV_SPLITHEADER_EN	0x00001000
 #define IXGBE_RXDADV_SPH		0x8000
 
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE		0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP		0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4		0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP		0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX		0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6		0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX	0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP		0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP		0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX	0x00000009
+
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
 				      IXGBE_RXD_ERR_CE |  \
 				      IXGBE_RXD_ERR_LE |  \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index b2f5b16..d3e5f5b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -813,22 +813,15 @@
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-	/* We support this operation only for 82599 and x540 at the moment */
-	if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-		return IXGBEVF_82599_RETA_SIZE;
+	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+		return IXGBEVF_X550_VFRETA_SIZE;
 
-	return 0;
+	return IXGBEVF_82599_RETA_SIZE;
 }
 
 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
 {
-	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
-	/* We support this operation only for 82599 and x540 at the moment */
-	if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-		return IXGBEVF_RSS_HASH_KEY_SIZE;
-
-	return 0;
+	return IXGBEVF_RSS_HASH_KEY_SIZE;
 }
 
 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@
 	if (hfunc)
 		*hfunc = ETH_RSS_HASH_TOP;
 
-	/* If neither indirection table nor hash key was requested - just
-	 * return a success avoiding taking any locks.
-	 */
-	if (!indir && !key)
-		return 0;
+	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+		if (key)
+			memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
 
-	spin_lock_bh(&adapter->mbx_lock);
-	if (indir)
-		err = ixgbevf_get_reta_locked(&adapter->hw, indir,
-					      adapter->num_rx_queues);
+		if (indir) {
+			int i;
 
-	if (!err && key)
-		err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+			for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+				indir[i] = adapter->rss_indir_tbl[i];
+		}
+	} else {
+		/* If neither indirection table nor hash key was requested
+		 *  - just return a success avoiding taking any locks.
+		 */
+		if (!indir && !key)
+			return 0;
 
-	spin_unlock_bh(&adapter->mbx_lock);
+		spin_lock_bh(&adapter->mbx_lock);
+		if (indir)
+			err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+						      adapter->num_rx_queues);
+
+		if (!err && key)
+			err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+		spin_unlock_bh(&adapter->mbx_lock);
+	}
 
 	return err;
 }
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 775d089..04c7ec84 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -144,9 +144,11 @@
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES	2
-#define IXGBEVF_82599_RETA_SIZE	128
+#define IXGBEVF_MAX_RSS_QUEUES		2
+#define IXGBEVF_82599_RETA_SIZE		128	/* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE	64	/* 64 entries */
 #define IXGBEVF_RSS_HASH_KEY_SIZE	40
+#define IXGBEVF_VFRSSRK_REGS		10	/* 10 registers for RSS key */
 
 #define IXGBEVF_DEFAULT_TXD	1024
 #define IXGBEVF_DEFAULT_RXD	512
@@ -447,6 +449,9 @@
 
 	spinlock_t mbx_lock;
 	unsigned long last_reset;
+
+	u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+	u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
 };
 
 enum ixbgevf_state_t {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1d7b00b..149a0b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,6 +457,32 @@
 	napi_gro_receive(&q_vector->napi, skb);
 }
 
+#define IXGBE_RSS_L4_TYPES_MASK \
+	((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+				   union ixgbe_adv_rx_desc *rx_desc,
+				   struct sk_buff *skb)
+{
+	u16 rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+	rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+		   IXGBE_RXDADV_RSSTYPE_MASK;
+
+	if (!rss_type)
+		return;
+
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+		     (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
 /**
  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: structure containig ring specific data
@@ -506,6 +532,7 @@
 				       union ixgbe_adv_rx_desc *rx_desc,
 				       struct sk_buff *skb)
 {
+	ixgbevf_rx_hash(rx_ring, rx_desc, skb);
 	ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
 
 	if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -649,46 +676,6 @@
 }
 
 /**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
-			      struct sk_buff *skb)
-{
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned char *va;
-	unsigned int pull_len;
-
-	/* it is valid to use page_address instead of kmap since we are
-	 * working with pages allocated out of the lomem pool per
-	 * alloc_page(GFP_ATOMIC)
-	 */
-	va = skb_frag_address(frag);
-
-	/* we need the header to contain the greater of either ETH_HLEN or
-	 * 60 bytes if the skb->len is less than 60 for skb_pad.
-	 */
-	pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-	/* update all of the pointers */
-	skb_frag_size_sub(frag, pull_len);
-	frag->page_offset += pull_len;
-	skb->data_len -= pull_len;
-	skb->tail += pull_len;
-}
-
-/**
  * ixgbevf_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
@@ -721,10 +708,6 @@
 		}
 	}
 
-	/* place header in linear portion of buffer */
-	if (skb_is_nonlinear(skb))
-		ixgbevf_pull_tail(rx_ring, skb);
-
 	/* if eth_skb_pad returns an error the skb was freed */
 	if (eth_skb_pad(skb))
 		return true;
@@ -789,16 +772,19 @@
 				struct sk_buff *skb)
 {
 	struct page *page = rx_buffer->page;
+	unsigned char *va = page_address(page) + rx_buffer->page_offset;
 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = IXGBEVF_RX_BUFSZ;
 #else
 	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
 #endif
+	unsigned int pull_len;
 
-	if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+	if (unlikely(skb_is_nonlinear(skb)))
+		goto add_tail_frag;
 
+	if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 		/* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@
 		return false;
 	}
 
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	va += pull_len;
+	size -= pull_len;
+
+add_tail_frag:
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-			rx_buffer->page_offset, size, truesize);
+			(unsigned long)va & ~PAGE_MASK, size, truesize);
 
 	/* avoid re-using remote pages */
 	if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1697,22 +1696,25 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 vfmrqc = 0, vfreta = 0;
-	u32 rss_key[10];
 	u16 rss_i = adapter->num_rx_queues;
-	int i, j;
+	u8 i, j;
 
 	/* Fill out hash function seeds */
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-	for (i = 0; i < 10; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+	netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+	for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
 
-	/* Fill out redirection table */
-	for (i = 0, j = 0; i < 64; i++, j++) {
+	for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
 		if (j == rss_i)
 			j = 0;
-		vfreta = (vfreta << 8) | (j * 0x1);
-		if ((i & 3) == 3)
+
+		adapter->rss_indir_tbl[i] = j;
+
+		vfreta |= j << (i & 0x3) * 8;
+		if ((i & 3) == 3) {
 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+			vfreta = 0;
+		}
 	}
 
 	/* Perform hash on these packet types */
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 62e48bc..fe2299a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3027,8 +3027,8 @@
 	const char *dt_mac_addr;
 	char hw_mac_addr[ETH_ALEN];
 	const char *mac_from;
+	const char *managed;
 	int phy_mode;
-	int fixed_phy = 0;
 	int err;
 
 	/* Our multiqueue support is not complete, so for now, only
@@ -3062,7 +3062,6 @@
 			dev_err(&pdev->dev, "cannot register fixed PHY\n");
 			goto err_free_irq;
 		}
-		fixed_phy = 1;
 
 		/* In the case of a fixed PHY, the DT node associated
 		 * to the PHY is the Ethernet MAC DT node.
@@ -3086,8 +3085,10 @@
 	pp = netdev_priv(dev);
 	pp->phy_node = phy_node;
 	pp->phy_interface = phy_mode;
-	pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
-				fixed_phy;
+
+	err = of_property_read_string(dn, "managed", &managed);
+	pp->use_inband_status = (err == 0 &&
+				 strcmp(managed, "in-band-status") == 0);
 
 	pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665..d547010 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@
 
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef..2e2a5ec 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 63769df..eb8a498 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -100,7 +100,6 @@
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err = 0;
-	char name[25];
 	int timestamp_en = 0;
 	bool assigned_eq = false;
 
@@ -119,8 +118,8 @@
 			err = mlx4_assign_eq(mdev->dev, priv->port,
 					     &cq->vector);
 			if (err) {
-				mlx4_err(mdev, "Failed assigning an EQ to %s\n",
-					 name);
+				mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
+					 cq->vector);
 				goto free_eq;
 			}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c5..f79d812 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -102,6 +102,7 @@
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
 	"blueflame",
+	"phv-bit"
 };
 
 static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@
 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
 	bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
 	bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+	bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+	bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
 	int i;
+	int ret = 0;
 
-	if (bf_enabled_new == bf_enabled_old)
-		return 0; /* Nothing to do */
+	if (bf_enabled_new != bf_enabled_old) {
+		if (bf_enabled_new) {
+			bool bf_supported = true;
 
-	if (bf_enabled_new) {
-		bool bf_supported = true;
+			for (i = 0; i < priv->tx_ring_num; i++)
+				bf_supported &= priv->tx_ring[i]->bf_alloced;
 
-		for (i = 0; i < priv->tx_ring_num; i++)
-			bf_supported &= priv->tx_ring[i]->bf_alloced;
+			if (!bf_supported) {
+				en_err(priv, "BlueFlame is not supported\n");
+				return -EINVAL;
+			}
 
-		if (!bf_supported) {
-			en_err(priv, "BlueFlame is not supported\n");
-			return -EINVAL;
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+		} else {
+			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
 		}
 
-		priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-	} else {
-		priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+		for (i = 0; i < priv->tx_ring_num; i++)
+			priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+
+		en_info(priv, "BlueFlame %s\n",
+			bf_enabled_new ?  "Enabled" : "Disabled");
 	}
 
-	for (i = 0; i < priv->tx_ring_num; i++)
-		priv->tx_ring[i]->bf_enabled = bf_enabled_new;
-
-	en_info(priv, "BlueFlame %s\n",
-		bf_enabled_new ?  "Enabled" : "Disabled");
-
+	if (phv_enabled_new != phv_enabled_old) {
+		ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+		if (ret)
+			return ret;
+		else if (phv_enabled_new)
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+		else
+			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+		en_info(priv, "PHV bit %s\n",
+			phv_enabled_new ?  "Enabled" : "Disabled");
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd..4726122 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2184,6 +2184,25 @@
 	}
 }
 
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+					      netdev_features_t features)
+{
+	struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+	struct mlx4_en_dev *mdev = en_priv->mdev;
+
+	/* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+	 * enable/disable make sure S-TAG flag is always in same state as
+	 * C-TAG.
+	 */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+	    !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+		features |= NETIF_F_HW_VLAN_STAG_RX;
+	else
+		features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+	return features;
+}
+
 static int mlx4_en_set_features(struct net_device *netdev,
 		netdev_features_t features)
 {
@@ -2218,6 +2237,10 @@
 		en_info(priv, "Turn %s TX vlan strip offload\n",
 			(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
+	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+		en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+			(features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
 	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
 		en_info(priv, "Turn %s loopback\n",
 			(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_fix_features	= mlx4_en_fix_features,
 	.ndo_setup_tc		= mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_fix_features	= mlx4_en_fix_features,
 	.ndo_setup_tc		= mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@
 	dev->hw_features |= NETIF_F_LOOPBACK |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+		dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+			NETIF_F_HW_VLAN_STAG_FILTER;
+		dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+	}
+
+	if (mlx4_is_slave(mdev->dev)) {
+		int phv;
+
+		err = get_phv_bit(mdev->dev, port, &phv);
+		if (!err && phv) {
+			dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+		}
+	} else {
+		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+		    !(mdev->dev->caps.flags2 &
+		      MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+			dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+	}
+
 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
 		dev->hw_features |= NETIF_F_RXFCS;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c145dd..4402a1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -725,7 +725,7 @@
 
 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
-	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
 	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
 		hdr += sizeof(struct vlan_hdr);
@@ -906,17 +906,25 @@
 				gro_skb->csum_level = 1;
 
 			if ((cqe->vlan_my_qpn &
-			    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+			    cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
 			    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
 				u16 vid = be16_to_cpu(cqe->sl_vid);
 
 				__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+			} else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+				  MLX4_CQE_SVLAN_PRESENT_MASK) &&
+				 (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+				__vlan_hwaccel_put_tag(gro_skb,
+						       htons(ETH_P_8021AD),
+						       be16_to_cpu(cqe->sl_vid));
 			}
 
 			if (dev->features & NETIF_F_RXHASH)
 				skb_set_hash(gro_skb,
 					     be32_to_cpu(cqe->immed_rss_invalid),
-					     PKT_HASH_TYPE_L3);
+					     (ip_summed == CHECKSUM_UNNECESSARY) ?
+						PKT_HASH_TYPE_L4 :
+						PKT_HASH_TYPE_L3);
 
 			skb_record_rx_queue(gro_skb, cq->ring);
 			skb_mark_napi_id(gro_skb, &cq->napi);
@@ -962,12 +970,19 @@
 		if (dev->features & NETIF_F_RXHASH)
 			skb_set_hash(skb,
 				     be32_to_cpu(cqe->immed_rss_invalid),
-				     PKT_HASH_TYPE_L3);
+				     (ip_summed == CHECKSUM_UNNECESSARY) ?
+					PKT_HASH_TYPE_L4 :
+					PKT_HASH_TYPE_L3);
 
 		if ((be32_to_cpu(cqe->vlan_my_qpn) &
-		    MLX4_CQE_VLAN_PRESENT_MASK) &&
+		    MLX4_CQE_CVLAN_PRESENT_MASK) &&
 		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+		else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+			  MLX4_CQE_SVLAN_PRESENT_MASK) &&
+			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+					       be16_to_cpu(cqe->sl_vid));
 
 		if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
 			timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1065,7 +1080,10 @@
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+	/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+	 * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+	 */
+	int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
 	int buf_size = 0;
 	int i = 0;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f..494e776 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@
 	u32 index, bf_index;
 	__be32 op_own;
 	u16 vlan_tag = 0;
+	u16 vlan_proto = 0;
 	int i_frag;
 	int lso_header_size;
 	void *fragptr = NULL;
@@ -750,9 +751,10 @@
 		goto tx_drop;
 	}
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		vlan_tag = skb_vlan_tag_get(skb);
-
+		vlan_proto = be16_to_cpu(skb->vlan_proto);
+	}
 
 	netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
 
@@ -958,8 +960,11 @@
 		ring->bf.offset ^= ring->bf.buf_size;
 	} else {
 		tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-		tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-			!!skb_vlan_tag_present(skb);
+		if (vlan_proto == ETH_P_8021AD)
+			tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+		else if (vlan_proto == ETH_P_8021Q)
+			tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
 		tx_desc->ctrl.fence_size = real_size;
 
 		/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57..e8ec1de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@
 		[26] = "Port ETS Scheduler support",
 		[27] = "Port beacon support",
 		[28] = "RX-ALL support",
+		[29] = "802.1ad offload support",
 	};
 	int i;
 
@@ -307,6 +308,7 @@
 
 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT			0x40
 
 	if (vhcr->op_modifier == 1) {
 		struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@
 		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+		if (dev->caps.phv_bit[port]) {
+			field = QUERY_FUNC_CAP_PHV_BIT;
+			MLX4_PUT(outbox->buf, field,
+				 QUERY_FUNC_CAP_FLAGS0_OFFSET);
+		}
+
 	} else if (vhcr->op_modifier == 0) {
 		struct mlx4_active_ports actv_ports =
 			mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@
 		MLX4_GET(func_cap->phys_port_id, outbox,
 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+	func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
 	/* All other resources are allocated by the master, but we still report
 	 * 'num' and 'reserved' capabilities as follows:
 	 * - num remains the maximum resource index
@@ -700,6 +711,7 @@
 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET		0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET		0x96
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET		0x9c
@@ -898,6 +910,12 @@
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
 	if (field & (1 << 2))
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+	if (field & 0x80)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+	if (field & 0x40)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
 	MLX4_GET(dev_cap->reserved_lkey, outbox,
 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@
 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
 	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
 
+	/* phv_check enable */
+	MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+	if (byte_field & 0x2)
+		param->phv_check_en = 1;
 out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -2758,3 +2780,63 @@
 			    0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
 			    MLX4_CMD_NATIVE);
 }
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID	0x10
+#define SET_PORT_GEN_PHV_EN	0x80
+
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_general_context *context;
+	u32 in_mod;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+
+	context->v_ignore_fcs |=  SET_PORT_GEN_PHV_VALID;
+	if (phv_bit)
+		context->phv_en |=  SET_PORT_GEN_PHV_EN;
+
+	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+	int err;
+	struct mlx4_func_cap func_cap;
+
+	memset(&func_cap, 0, sizeof(func_cap));
+	err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+	if (!err)
+		*phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+	return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+	int ret;
+
+	if (mlx4_is_slave(dev))
+		return -EPERM;
+
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+		ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+		if (!ret)
+			dev->caps.phv_bit[port] = new_val;
+		return ret;
+	}
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c2..08de555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@
 	u16 cqe_size; /* For use only when CQE stride feature enabled */
 	u16 eqe_size; /* For use only when EQE stride feature enabled */
 	u8 rss_ip_frags;
+	u8 phv_check_en; /* for QUERY_HCA */
 };
 
 struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 29c2a01..006757f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
 
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+		struct mlx4_init_hca_param hca_param;
+
+		memset(&hca_param, 0, sizeof(hca_param));
+		err = mlx4_QUERY_HCA(dev, &hca_param);
+		/* Turn off PHV_EN flag in case phv_check_en is set.
+		 * phv_check_en is a HW check that parse the packet and verify
+		 * phv bit was reported correctly in the wqe. To allow QinQ
+		 * PHV_EN flag should be set and phv_check_en must be cleared
+		 * otherwise QinQ packets will be drop by the HW.
+		 */
+		if (err || hca_param.phv_check_en)
+			dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+	}
+
 	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
 	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
 		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2654,9 +2669,14 @@
 
 	if (msi_x) {
 		int nreq = dev->caps.num_ports * num_online_cpus() + 1;
+		bool shared_ports = false;
 
 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
 			     nreq);
+		if (nreq > MAX_MSIX) {
+			nreq = MAX_MSIX;
+			shared_ports = true;
+		}
 
 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
 		if (!entries)
@@ -2679,6 +2699,9 @@
 		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
 			    dev->caps.num_ports);
 
+		if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
+			shared_ports = true;
+
 		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
 			if (i == MLX4_EQ_ASYNC)
 				continue;
@@ -2686,7 +2709,7 @@
 			priv->eq_table.eq[i].irq =
 				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
 
-			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+			if (shared_ports) {
 				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
 					    dev->caps.num_ports);
 				/* We don't set affinity hint when there
@@ -2912,6 +2935,8 @@
 {
 	u64 dev_flags = dev->flags;
 	int err = 0;
+	int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+					MLX4_MAX_NUM_VF);
 
 	if (reset_flow) {
 		dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2937,6 +2962,12 @@
 	}
 
 	if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
+		if (total_vfs > fw_enabled_sriov_vfs) {
+			mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+				 total_vfs, fw_enabled_sriov_vfs);
+			err = -ENOMEM;
+			goto disable_sriov;
+		}
 		mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
 		err = pci_enable_sriov(pdev, total_vfs);
 	}
@@ -3418,20 +3449,20 @@
 			goto err_disable_pdev;
 		}
 	}
-	if (total_vfs >= MLX4_MAX_NUM_VF) {
+	if (total_vfs > MLX4_MAX_NUM_VF) {
 		dev_err(&pdev->dev,
-			"Requested more VF's (%d) than allowed (%d)\n",
-			total_vfs, MLX4_MAX_NUM_VF - 1);
+			"Requested more VF's (%d) than allowed by hw (%d)\n",
+			total_vfs, MLX4_MAX_NUM_VF);
 		err = -EINVAL;
 		goto err_disable_pdev;
 	}
 
 	for (i = 0; i < MLX4_MAX_PORTS; i++) {
-		if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+		if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
 			dev_err(&pdev->dev,
-				"Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+				"Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
 				nvfs[i] + nvfs[2], i + 1,
-				MLX4_MAX_NUM_VF_P_PORT - 1);
+				MLX4_MAX_NUM_VF_P_PORT);
 			err = -EINVAL;
 			goto err_disable_pdev;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c..232b2b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@
 	u8 pprx;
 	u8 pfcrx;
 	u16 reserved4;
+	u32 reserved5;
+	u8 phv_en;
+	u8 reserved6[3];
 };
 
 struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d166..defcf8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
  */
 
 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV	     2
 
 #define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b49..6cb3830 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
  * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+					   size_t size, dma_addr_t *dma_handle,
+					   int node)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	int original_node;
+	void *cpu_handle;
+
+	mutex_lock(&priv->alloc_mutex);
+	original_node = dev_to_node(&dev->pdev->dev);
+	set_dev_node(&dev->pdev->dev, node);
+	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+					 dma_handle, GFP_KERNEL);
+	set_dev_node(&dev->pdev->dev, original_node);
+	mutex_unlock(&priv->alloc_mutex);
+	return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node)
 {
 	dma_addr_t t;
 
 	buf->size = size;
 	buf->npages       = 1;
 	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-						size, &t, GFP_KERNEL);
+	buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
+							  &t, node);
 	if (!buf->direct.buf)
 		return -ENOMEM;
 
@@ -66,6 +85,11 @@
 
 	return 0;
 }
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+						 int node)
 {
 	struct mlx5_db_pgdir *pgdir;
 
@@ -84,8 +109,9 @@
 		return NULL;
 
 	bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
-	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-					    &pgdir->db_dma, GFP_KERNEL);
+
+	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+						       &pgdir->db_dma, node);
 	if (!pgdir->db_page) {
 		kfree(pgdir);
 		return NULL;
@@ -118,7 +144,7 @@
 	return 0;
 }
 
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
 {
 	struct mlx5_db_pgdir *pgdir;
 	int ret = 0;
@@ -129,7 +155,7 @@
 		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
 			goto out;
 
-	pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+	pgdir = mlx5_alloc_db_pgdir(dev, node);
 	if (!pgdir) {
 		ret = -ENOMEM;
 		goto out;
@@ -145,6 +171,12 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd6..0983a20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -42,24 +42,27 @@
 
 #define MLX5E_MAX_NUM_TC	8
 
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
 
+#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
+#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET             16
 
 static const char vport_strings[][ETH_GSTRING_LEN] = {
 	/* vport statistics */
@@ -91,6 +94,7 @@
 	"lro_bytes",
 	"rx_csum_good",
 	"rx_csum_none",
+	"rx_csum_sw",
 	"tx_csum_offload",
 	"tx_queue_stopped",
 	"tx_queue_wake",
@@ -128,18 +132,94 @@
 	u64 lro_bytes;
 	u64 rx_csum_good;
 	u64 rx_csum_none;
+	u64 rx_csum_sw;
 	u64 tx_csum_offload;
 	u64 tx_queue_stopped;
 	u64 tx_queue_wake;
 	u64 tx_queue_dropped;
 	u64 rx_wqe_err;
 
-#define NUM_VPORT_COUNTERS     31
+#define NUM_VPORT_COUNTERS     32
+};
+
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+	/* IEEE802.3 counters */
+	"frames_tx",
+	"frames_rx",
+	"check_seq_err",
+	"alignment_err",
+	"octets_tx",
+	"octets_received",
+	"multicast_xmitted",
+	"broadcast_xmitted",
+	"multicast_rx",
+	"broadcast_rx",
+	"in_range_len_errors",
+	"out_of_range_len",
+	"too_long_errors",
+	"symbol_err",
+	"mac_control_tx",
+	"mac_control_rx",
+	"unsupported_op_rx",
+	"pause_ctrl_rx",
+	"pause_ctrl_tx",
+
+	/* RFC2863 counters */
+	"in_octets",
+	"in_ucast_pkts",
+	"in_discards",
+	"in_errors",
+	"in_unknown_protos",
+	"out_octets",
+	"out_ucast_pkts",
+	"out_discards",
+	"out_errors",
+	"in_multicast_pkts",
+	"in_broadcast_pkts",
+	"out_multicast_pkts",
+	"out_broadcast_pkts",
+
+	/* RFC2819 counters */
+	"drop_events",
+	"octets",
+	"pkts",
+	"broadcast_pkts",
+	"multicast_pkts",
+	"crc_align_errors",
+	"undersize_pkts",
+	"oversize_pkts",
+	"fragments",
+	"jabbers",
+	"collisions",
+	"p64octets",
+	"p65to127octets",
+	"p128to255octets",
+	"p256to511octets",
+	"p512to1023octets",
+	"p1024to1518octets",
+	"p1519to2047octets",
+	"p2048to4095octets",
+	"p4096to8191octets",
+	"p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS		19
+#define NUM_RFC_2863_COUNTERS		13
+#define NUM_RFC_2819_COUNTERS		21
+#define NUM_PPORT_COUNTERS		(NUM_IEEE_802_3_COUNTERS + \
+					 NUM_RFC_2863_COUNTERS + \
+					 NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+	__be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+	__be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+	__be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
 };
 
 static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
 	"packets",
 	"csum_none",
+	"csum_sw",
 	"lro_packets",
 	"lro_bytes",
 	"wqe_err"
@@ -148,10 +228,11 @@
 struct mlx5e_rq_stats {
 	u64 packets;
 	u64 csum_none;
+	u64 csum_sw;
 	u64 lro_packets;
 	u64 lro_bytes;
 	u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
 };
 
 static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -179,6 +260,7 @@
 
 struct mlx5e_stats {
 	struct mlx5e_vport_stats   vport;
+	struct mlx5e_pport_stats   pport;
 };
 
 struct mlx5e_params {
@@ -192,9 +274,12 @@
 	u16 tx_cq_moderation_usec;
 	u16 tx_cq_moderation_pkts;
 	u16 min_rx_wqes;
-	u16 rx_hash_log_tbl_sz;
 	bool lro_en;
 	u32 lro_wqe_sz;
+	u16 tx_max_inline;
+	u8  rss_hfunc;
+	u8  toeplitz_hash_key[40];
+	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 };
 
 enum {
@@ -214,6 +299,7 @@
 	struct napi_struct        *napi;
 	struct mlx5_core_cq        mcq;
 	struct mlx5e_channel      *channel;
+	struct mlx5e_priv         *priv;
 
 	/* control */
 	struct mlx5_wq_ctrl        wq_ctrl;
@@ -237,6 +323,7 @@
 	struct mlx5_wq_ctrl    wq_ctrl;
 	u32                    rqn;
 	struct mlx5e_channel  *channel;
+	struct mlx5e_priv     *priv;
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_tx_skb_cb {
@@ -266,7 +353,9 @@
 	/* dirtied @xmit */
 	u16                        pc ____cacheline_aligned_in_smp;
 	u32                        dma_fifo_pc;
-	u32                        bf_offset;
+	u16                        bf_offset;
+	u16                        prev_cc;
+	u8                         bf_budget;
 	struct mlx5e_sq_stats      stats;
 
 	struct mlx5e_cq            cq;
@@ -279,9 +368,10 @@
 	struct mlx5_wq_cyc         wq;
 	u32                        dma_fifo_mask;
 	void __iomem              *uar_map;
+	void __iomem              *uar_bf_map;
 	struct netdev_queue       *txq;
 	u32                        sqn;
-	u32                        bf_buf_size;
+	u16                        bf_buf_size;
 	u16                        max_inline;
 	u16                        edge;
 	struct device             *pdev;
@@ -315,7 +405,6 @@
 	__be32                     mkey_be;
 	u8                         num_tc;
 	unsigned long              flags;
-	int                        tc_to_txq_map[MLX5E_MAX_NUM_TC];
 
 	/* control */
 	struct mlx5e_priv         *priv;
@@ -324,20 +413,24 @@
 };
 
 enum mlx5e_traffic_types {
-	MLX5E_TT_IPV4_TCP = 0,
-	MLX5E_TT_IPV6_TCP = 1,
-	MLX5E_TT_IPV4_UDP = 2,
-	MLX5E_TT_IPV6_UDP = 3,
-	MLX5E_TT_IPV4     = 4,
-	MLX5E_TT_IPV6     = 5,
-	MLX5E_TT_ANY      = 6,
-	MLX5E_NUM_TT      = 7,
+	MLX5E_TT_IPV4_TCP,
+	MLX5E_TT_IPV6_TCP,
+	MLX5E_TT_IPV4_UDP,
+	MLX5E_TT_IPV6_UDP,
+	MLX5E_TT_IPV4_IPSEC_AH,
+	MLX5E_TT_IPV6_IPSEC_AH,
+	MLX5E_TT_IPV4_IPSEC_ESP,
+	MLX5E_TT_IPV6_IPSEC_ESP,
+	MLX5E_TT_IPV4,
+	MLX5E_TT_IPV6,
+	MLX5E_TT_ANY,
+	MLX5E_NUM_TT,
 };
 
-enum {
-	MLX5E_RQT_SPREADING  = 0,
-	MLX5E_RQT_DEFAULT_RQ = 1,
-	MLX5E_NUM_RQT        = 2,
+enum mlx5e_rqt_ix {
+	MLX5E_INDIRECTION_RQT,
+	MLX5E_SINGLE_RQ_RQT,
+	MLX5E_NUM_RQT,
 };
 
 struct mlx5e_eth_addr_info {
@@ -362,10 +455,10 @@
 enum {
 	MLX5E_STATE_ASYNC_EVENTS_ENABLE,
 	MLX5E_STATE_OPENED,
+	MLX5E_STATE_DESTROYING,
 };
 
 struct mlx5e_vlan_db {
-	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	u32           active_vlans_ft_ix[VLAN_N_VID];
 	u32           untagged_rule_ft_ix;
 	u32           any_vlan_rule_ft_ix;
@@ -379,9 +472,9 @@
 
 struct mlx5e_priv {
 	/* priv data path fields - start */
-	int                        num_tc;
 	int                        default_vlan_prio;
 	struct mlx5e_sq            **txq_to_sq_map;
+	int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 	/* priv data path fields - end */
 
 	unsigned long              state;
@@ -390,10 +483,11 @@
 	u32                        pdn;
 	u32                        tdn;
 	struct mlx5_core_mr        mr;
+	struct mlx5e_rq            drop_rq;
 
 	struct mlx5e_channel     **channel;
 	u32                        tisn[MLX5E_MAX_NUM_TC];
-	u32                        rqtn;
+	u32                        rqtn[MLX5E_NUM_RQT];
 	u32                        tirn[MLX5E_NUM_TT];
 
 	struct mlx5e_flow_table    ft;
@@ -470,10 +564,9 @@
 
 void mlx5e_update_stats(struct mlx5e_priv *priv);
 
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
 void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -482,17 +575,17 @@
 			   u16 vid);
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-			     struct mlx5e_params *new_params);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-				      struct mlx5e_tx_wqe *wqe)
+				      struct mlx5e_tx_wqe *wqe, int bf_sz)
 {
+	u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
 	/* ensure wqe is visible to device before updating doorbell record */
 	dma_wmb();
 
@@ -503,9 +596,15 @@
 	 */
 	wmb();
 
-	mlx5_write64((__be32 *)&wqe->ctrl,
-		     sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
-		     NULL);
+	if (bf_sz) {
+		__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+		/* flush the write-combining mapped buffer */
+		wmb();
+
+	} else {
+		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+	}
 
 	sq->bf_offset ^= sq->bf_buf_size;
 }
@@ -519,3 +618,4 @@
 }
 
 extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3889384..bce9126 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,9 +171,9 @@
 
 	switch (sset) {
 	case ETH_SS_STATS:
-		return NUM_VPORT_COUNTERS +
+		return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
 		       priv->params.num_channels * NUM_RQ_STATS +
-		       priv->params.num_channels * priv->num_tc *
+		       priv->params.num_channels * priv->params.num_tc *
 						   NUM_SQ_STATS;
 	/* fallthrough */
 	default:
@@ -200,6 +200,11 @@
 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
 			       vport_strings[i]);
 
+		/* PPORT counters */
+		for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+			strcpy(data + (idx++) * ETH_GSTRING_LEN,
+			       pport_strings[i]);
+
 		/* per channel counters */
 		for (i = 0; i < priv->params.num_channels; i++)
 			for (j = 0; j < NUM_RQ_STATS; j++)
@@ -207,7 +212,7 @@
 					"rx%d_%s", i, rq_stats_strings[j]);
 
 		for (i = 0; i < priv->params.num_channels; i++)
-			for (tc = 0; tc < priv->num_tc; tc++)
+			for (tc = 0; tc < priv->params.num_tc; tc++)
 				for (j = 0; j < NUM_SQ_STATS; j++)
 					sprintf(data +
 						(idx++) * ETH_GSTRING_LEN,
@@ -234,6 +239,9 @@
 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 		data[idx++] = ((u64 *)&priv->stats.vport)[i];
 
+	for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+		data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
 	/* per channel counters */
 	for (i = 0; i < priv->params.num_channels; i++)
 		for (j = 0; j < NUM_RQ_STATS; j++)
@@ -242,7 +250,7 @@
 				       ((u64 *)&priv->channel[i]->rq.stats)[j];
 
 	for (i = 0; i < priv->params.num_channels; i++)
-		for (tc = 0; tc < priv->num_tc; tc++)
+		for (tc = 0; tc < priv->params.num_tc; tc++)
 			for (j = 0; j < NUM_SQ_STATS; j++)
 				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
 							&priv->state) ? 0 :
@@ -264,7 +272,7 @@
 			       struct ethtool_ringparam *param)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_params new_params;
+	bool was_opened;
 	u16 min_rx_wqes;
 	u8 log_rq_size;
 	u8 log_sq_size;
@@ -316,11 +324,18 @@
 		return 0;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
-	new_params.log_rq_size = log_rq_size;
-	new_params.log_sq_size = log_sq_size;
-	new_params.min_rx_wqes = min_rx_wqes;
-	err = mlx5e_update_priv_params(priv, &new_params);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.log_rq_size = log_rq_size;
+	priv->params.log_sq_size = log_sq_size;
+	priv->params.min_rx_wqes = min_rx_wqes;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -342,7 +357,7 @@
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
 	unsigned int count = ch->combined_count;
-	struct mlx5e_params new_params;
+	bool was_opened;
 	int err = 0;
 
 	if (!count) {
@@ -365,9 +380,16 @@
 		return 0;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
-	new_params.num_channels = count;
-	err = mlx5e_update_priv_params(priv, &new_params);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.num_channels = count;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -606,7 +628,7 @@
 	u32 link_modes;
 	u32 speed;
 	u32 eth_proto_cap, eth_proto_admin;
-	u8 port_status;
+	enum mlx5_port_status ps;
 	int err;
 
 	speed = ethtool_cmd_speed(cmd);
@@ -640,25 +662,197 @@
 	if (link_modes == eth_proto_admin)
 		goto out;
 
-	err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
-	if (err) {
-		netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
-			   __func__, err);
-		goto out;
+	mlx5_query_port_admin_status(mdev, &ps);
+	if (ps == MLX5_PORT_UP)
+		mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+	mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+	if (ps == MLX5_PORT_UP)
+		mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+
+out:
+	return err;
+}
+
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+	return MLX5E_INDIR_RQT_SIZE;
+}
+
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+			  u8 *hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (indir)
+		memcpy(indir, priv->params.indirection_rqt,
+		       sizeof(priv->params.indirection_rqt));
+
+	if (key)
+		memcpy(key, priv->params.toeplitz_hash_key,
+		       sizeof(priv->params.toeplitz_hash_key));
+
+	if (hfunc)
+		*hfunc = priv->params.rss_hfunc;
+
+	return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	bool close_open;
+	int err = 0;
+
+	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+	    (hfunc != ETH_RSS_HASH_XOR) &&
+	    (hfunc != ETH_RSS_HASH_TOP))
+		return -EINVAL;
+
+	mutex_lock(&priv->state_lock);
+
+	if (indir) {
+		memcpy(priv->params.indirection_rqt, indir,
+		       sizeof(priv->params.indirection_rqt));
+		mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
 	}
 
-	err = mlx5_query_port_status(mdev, &port_status);
-	if (err)
-		goto out;
+	close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+		     test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (close_open)
+		mlx5e_close_locked(dev);
 
-	if (port_status == MLX5_PORT_DOWN)
-		return 0;
+	if (key)
+		memcpy(priv->params.toeplitz_hash_key, key,
+		       sizeof(priv->params.toeplitz_hash_key));
 
-	err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
-	if (err)
-		goto out;
-	err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
-out:
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+		priv->params.rss_hfunc = hfunc;
+
+	if (close_open)
+		err = mlx5e_open_locked(priv->netdev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+			   struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = priv->params.num_channels;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     void *data)
+{
+	const struct mlx5e_priv *priv = netdev_priv(dev);
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		*(u32 *)data = priv->params.tx_max_inline;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     const void *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
+	u32 val;
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		val = *(u32 *)data;
+		if (val > mlx5e_get_max_inline_cap(mdev)) {
+			err = -EINVAL;
+			break;
+		}
+
+		mutex_lock(&priv->state_lock);
+
+		was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+		if (was_opened)
+			mlx5e_close_locked(dev);
+
+		priv->params.tx_max_inline = val;
+
+		if (was_opened)
+			err = mlx5e_open_locked(dev);
+
+		mutex_unlock(&priv->state_lock);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pauseparam)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+				    &pauseparam->tx_pause);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+			   __func__, err);
+	}
+}
+
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pauseparam)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	if (pauseparam->autoneg)
+		return -EINVAL;
+
+	err = mlx5_set_port_pause(mdev,
+				  pauseparam->rx_pause ? 1 : 0,
+				  pauseparam->tx_pause ? 1 : 0);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+			   __func__, err);
+	}
+
 	return err;
 }
 
@@ -676,4 +870,13 @@
 	.set_coalesce      = mlx5e_set_coalesce,
 	.get_settings      = mlx5e_get_settings,
 	.set_settings      = mlx5e_set_settings,
+	.get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
+	.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+	.get_rxfh          = mlx5e_get_rxfh,
+	.set_rxfh          = mlx5e_set_rxfh,
+	.get_rxnfc         = mlx5e_get_rxnfc,
+	.get_tunable       = mlx5e_get_tunable,
+	.set_tunable       = mlx5e_set_tunable,
+	.get_pauseparam    = mlx5e_get_pauseparam,
+	.set_pauseparam    = mlx5e_set_pauseparam,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80..e71563c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@
 {
 	void *ft = priv->ft.main;
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
 }
 
@@ -156,33 +172,37 @@
 		switch (eth_addr_type) {
 		case MLX5E_UC:
 			ret =
-				(1 << MLX5E_TT_IPV4_TCP) |
-				(1 << MLX5E_TT_IPV6_TCP) |
-				(1 << MLX5E_TT_IPV4_UDP) |
-				(1 << MLX5E_TT_IPV6_UDP) |
-				(1 << MLX5E_TT_IPV4)     |
-				(1 << MLX5E_TT_IPV6)     |
-				(1 << MLX5E_TT_ANY)      |
+				BIT(MLX5E_TT_IPV4_TCP)       |
+				BIT(MLX5E_TT_IPV6_TCP)       |
+				BIT(MLX5E_TT_IPV4_UDP)       |
+				BIT(MLX5E_TT_IPV6_UDP)       |
+				BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+				BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+				BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+				BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+				BIT(MLX5E_TT_IPV4)           |
+				BIT(MLX5E_TT_IPV6)           |
+				BIT(MLX5E_TT_ANY)            |
 				0;
 			break;
 
 		case MLX5E_MC_IPV4:
 			ret =
-				(1 << MLX5E_TT_IPV4_UDP) |
-				(1 << MLX5E_TT_IPV4)     |
+				BIT(MLX5E_TT_IPV4_UDP)       |
+				BIT(MLX5E_TT_IPV4)           |
 				0;
 			break;
 
 		case MLX5E_MC_IPV6:
 			ret =
-				(1 << MLX5E_TT_IPV6_UDP) |
-				(1 << MLX5E_TT_IPV6)     |
+				BIT(MLX5E_TT_IPV6_UDP)       |
+				BIT(MLX5E_TT_IPV6)           |
 				0;
 			break;
 
 		case MLX5E_MC_OTHER:
 			ret =
-				(1 << MLX5E_TT_ANY)      |
+				BIT(MLX5E_TT_ANY)            |
 				0;
 			break;
 		}
@@ -191,23 +211,27 @@
 
 	case MLX5E_ALLMULTI:
 		ret =
-			(1 << MLX5E_TT_IPV4_UDP) |
-			(1 << MLX5E_TT_IPV6_UDP) |
-			(1 << MLX5E_TT_IPV4)     |
-			(1 << MLX5E_TT_IPV6)     |
-			(1 << MLX5E_TT_ANY)      |
+			BIT(MLX5E_TT_IPV4_UDP) |
+			BIT(MLX5E_TT_IPV6_UDP) |
+			BIT(MLX5E_TT_IPV4)     |
+			BIT(MLX5E_TT_IPV6)     |
+			BIT(MLX5E_TT_ANY)      |
 			0;
 		break;
 
 	default: /* MLX5E_PROMISC */
 		ret =
-			(1 << MLX5E_TT_IPV4_TCP) |
-			(1 << MLX5E_TT_IPV6_TCP) |
-			(1 << MLX5E_TT_IPV4_UDP) |
-			(1 << MLX5E_TT_IPV6_UDP) |
-			(1 << MLX5E_TT_IPV4)     |
-			(1 << MLX5E_TT_IPV6)     |
-			(1 << MLX5E_TT_ANY)      |
+			BIT(MLX5E_TT_IPV4_TCP)       |
+			BIT(MLX5E_TT_IPV6_TCP)       |
+			BIT(MLX5E_TT_IPV4_UDP)       |
+			BIT(MLX5E_TT_IPV6_UDP)       |
+			BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+			BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+			BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+			BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+			BIT(MLX5E_TT_IPV4)           |
+			BIT(MLX5E_TT_IPV6)           |
+			BIT(MLX5E_TT_ANY)            |
 			0;
 		break;
 	}
@@ -226,6 +250,7 @@
 	u8   *match_criteria_dmac;
 	void *ft   = priv->ft.main;
 	u32  *tirn = priv->tirn;
+	u32  *ft_ix;
 	u32  tt_vec;
 	int  err;
 
@@ -261,51 +286,51 @@
 
 	tt_vec = mlx5e_get_tt_vec(ai, type);
 
-	if (tt_vec & (1 << MLX5E_TT_ANY)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+	if (tt_vec & BIT(MLX5E_TT_ANY)) {
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_ANY]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_ANY]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_ANY);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_ANY);
 	}
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
 			 outer_headers.ethertype);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
 	}
 
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@
 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
 		 IPPROTO_UDP);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4_UDP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4_UDP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6_UDP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6_UDP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
 	}
 
 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
 		 IPPROTO_TCP);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4_TCP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4_TCP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6_TCP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6_TCP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_AH);
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+	}
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_ESP);
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+	}
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
 	}
 
 	return 0;
+
+err_del_ai:
+	mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+	return err;
 }
 
 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -498,44 +594,28 @@
 
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 {
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
+	if (!priv->vlan.filter_disabled)
+		return;
 
-	if (priv->vlan.filter_disabled) {
-		priv->vlan.filter_disabled = false;
-		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					    0);
-	}
+	priv->vlan.filter_disabled = false;
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 {
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
+	if (priv->vlan.filter_disabled)
+		return;
 
-	if (!priv->vlan.filter_disabled) {
-		priv->vlan.filter_disabled = true;
-		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					    0);
-	}
+	priv->vlan.filter_disabled = true;
+	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 			  u16 vid)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	int err = 0;
 
-	mutex_lock(&priv->state_lock);
-
-	set_bit(vid, priv->vlan.active_vlans);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-					  vid);
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
+	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 }
 
 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -543,56 +623,11 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
-	mutex_lock(&priv->state_lock);
-
-	clear_bit(vid, priv->vlan.active_vlans);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
-	mutex_unlock(&priv->state_lock);
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 
 	return 0;
 }
 
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
-	u16 vid;
-	int err;
-
-	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
-		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-					  vid);
-		if (err)
-			return err;
-	}
-
-	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-	if (err)
-		return err;
-
-	if (priv->vlan.filter_disabled) {
-		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					  0);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
-	u16 vid;
-
-	if (priv->vlan.filter_disabled)
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
-	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
-	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
 		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -656,18 +691,21 @@
 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
 		hn->action = MLX5E_ACTION_DEL;
 
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
 		mlx5e_sync_netdev_addr(priv);
 
 	mlx5e_apply_netdev_addr(priv);
 }
 
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
 {
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       set_rx_mode_work);
+
 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
 	struct net_device *ndev = priv->netdev;
 
-	bool rx_mode_enable   = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
 	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
 	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
 	bool broadcast_enabled = rx_mode_enable;
@@ -700,17 +738,6 @@
 	ea->broadcast_enabled = broadcast_enabled;
 }
 
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
-	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-					       set_rx_mode_work);
-
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_set_rx_mode_core(priv);
-	mutex_unlock(&priv->state_lock);
-}
-
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
 {
 	ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -725,7 +752,7 @@
 	if (!g)
 		return -ENOMEM;
 
-	g[0].log_sz = 2;
+	g[0].log_sz = 3;
 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
 			 outer_headers.ethertype);
@@ -833,7 +860,7 @@
 	mlx5_destroy_flow_table(priv->ft.vlan);
 }
 
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
 {
 	int err;
 
@@ -845,16 +872,24 @@
 	if (err)
 		goto err_destroy_main_flow_table;
 
+	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+	if (err)
+		goto err_destroy_vlan_flow_table;
+
 	return 0;
 
+err_destroy_vlan_flow_table:
+	mlx5e_destroy_vlan_flow_table(priv);
+
 err_destroy_main_flow_table:
 	mlx5e_destroy_main_flow_table(priv);
 
 	return err;
 }
 
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
 {
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 	mlx5e_destroy_vlan_flow_table(priv);
 	mlx5e_destroy_main_flow_table(priv);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da..59874d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@
 struct mlx5e_sq_param {
 	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
 	struct mlx5_wq_param       wq;
+	u16                        max_inline;
 };
 
 struct mlx5e_cq_param {
@@ -81,6 +82,47 @@
 	mutex_unlock(&priv->state_lock);
 }
 
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_pport_stats *s = &priv->stats.pport;
+	u32 *in;
+	u32 *out;
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+	in  = mlx5_vzalloc(sz);
+	out = mlx5_vzalloc(sz);
+	if (!in || !out)
+		goto free_out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out,
+			     sz, MLX5_REG_PPCNT, 0, 0);
+	memcpy(s->IEEE_802_3_counters,
+	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+	       sizeof(s->IEEE_802_3_counters));
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out,
+			     sz, MLX5_REG_PPCNT, 0, 0);
+	memcpy(s->RFC_2863_counters,
+	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+	       sizeof(s->RFC_2863_counters));
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out,
+			     sz, MLX5_REG_PPCNT, 0, 0);
+	memcpy(s->RFC_2819_counters,
+	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+	       sizeof(s->RFC_2819_counters));
+
+free_out:
+	kvfree(in);
+	kvfree(out);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
@@ -107,6 +149,7 @@
 	s->lro_packets		= 0;
 	s->lro_bytes		= 0;
 	s->rx_csum_none		= 0;
+	s->rx_csum_sw		= 0;
 	s->rx_wqe_err		= 0;
 	for (i = 0; i < priv->params.num_channels; i++) {
 		rq_stats = &priv->channel[i]->rq.stats;
@@ -114,9 +157,10 @@
 		s->lro_packets	+= rq_stats->lro_packets;
 		s->lro_bytes	+= rq_stats->lro_bytes;
 		s->rx_csum_none	+= rq_stats->csum_none;
+		s->rx_csum_sw	+= rq_stats->csum_sw;
 		s->rx_wqe_err   += rq_stats->wqe_err;
 
-		for (j = 0; j < priv->num_tc; j++) {
+		for (j = 0; j < priv->params.num_tc; j++) {
 			sq_stats = &priv->channel[i]->sq[j].stats;
 
 			s->tso_packets		+= sq_stats->tso_packets;
@@ -199,8 +243,10 @@
 
 	/* Update calculated offload counters */
 	s->tx_csum_offload = s->tx_packets - tx_offload_none;
-	s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+	s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
+			       s->rx_csum_sw;
 
+	mlx5e_update_pport_counters(priv);
 free_out:
 	kvfree(out);
 }
@@ -272,6 +318,8 @@
 	int err;
 	int i;
 
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
 	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
 				&rq->wq_ctrl);
 	if (err)
@@ -304,6 +352,7 @@
 	rq->netdev  = c->netdev;
 	rq->channel = c;
 	rq->ix      = c->ix;
+	rq->priv    = c->priv;
 
 	return 0;
 
@@ -321,8 +370,7 @@
 
 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 {
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
+	struct mlx5e_priv *priv = rq->priv;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 	void *in;
@@ -342,11 +390,11 @@
 
 	memcpy(rqc, param->rqc, sizeof(param->rqc));
 
-	MLX5_SET(rqc,  rqc, cqn,		c->rq.cq.mcq.cqn);
+	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
 	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
 	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
 	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
-						PAGE_SHIFT);
+						MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
 
 	mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -389,11 +437,7 @@
 
 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 {
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	mlx5_core_destroy_rq(mdev, rq->rqn);
+	mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
 }
 
 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -502,6 +546,8 @@
 	if (err)
 		return err;
 
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
 				 &sq->wq_ctrl);
 	if (err)
@@ -509,7 +555,9 @@
 
 	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
 	sq->uar_map     = sq->uar.map;
+	sq->uar_bf_map  = sq->uar.bf_map;
 	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+	sq->max_inline  = param->max_inline;
 
 	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
 	if (err)
@@ -518,11 +566,12 @@
 	txq_ix = c->ix + tc * priv->params.num_channels;
 	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 
-	sq->pdev    = c->pdev;
-	sq->mkey_be = c->mkey_be;
-	sq->channel = c;
-	sq->tc      = tc;
-	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	sq->pdev      = c->pdev;
+	sq->mkey_be   = c->mkey_be;
+	sq->channel   = c;
+	sq->tc        = tc;
+	sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
 	priv->txq_to_sq_map[txq_ix] = sq;
 
 	return 0;
@@ -569,7 +618,6 @@
 
 	memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-	MLX5_SET(sqc,  sqc, user_index,		sq->tc);
 	MLX5_SET(sqc,  sqc, tis_num_0,		priv->tisn[sq->tc]);
 	MLX5_SET(sqc,  sqc, cqn,		c->sq[sq->tc].cq.mcq.cqn);
 	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
@@ -579,7 +627,7 @@
 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
 	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
-					  PAGE_SHIFT);
+					  MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 
 	mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +750,8 @@
 	int err;
 	u32 i;
 
-	param->wq.numa = cpu_to_node(c->cpu);
+	param->wq.buf_numa_node = cpu_to_node(c->cpu);
+	param->wq.db_numa_node  = cpu_to_node(c->cpu);
 	param->eq_ix   = c->ix;
 
 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -732,6 +781,7 @@
 	}
 
 	cq->channel = c;
+	cq->priv = priv;
 
 	return 0;
 }
@@ -743,8 +793,7 @@
 
 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 {
-	struct mlx5e_channel *c = cq->channel;
-	struct mlx5e_priv *priv = c->priv;
+	struct mlx5e_priv *priv = cq->priv;
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_cq *mcq = &cq->mcq;
 
@@ -773,7 +822,7 @@
 	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
 	MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
-					    PAGE_SHIFT);
+					    MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
 	err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -790,8 +839,7 @@
 
 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
 {
-	struct mlx5e_channel *c = cq->channel;
-	struct mlx5e_priv *priv = c->priv;
+	struct mlx5e_priv *priv = cq->priv;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 	mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -901,13 +949,13 @@
 		mlx5e_close_sq(&c->sq[tc]);
 }
 
-static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
-				      int num_channels)
+static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
 {
 	int i;
 
 	for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
-		c->tc_to_txq_map[i] = c->ix + i * num_channels;
+		priv->channeltc_to_txq_map[ix][i] =
+			ix + i * priv->params.num_channels;
 }
 
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
@@ -929,9 +977,9 @@
 	c->pdev     = &priv->mdev->pdev->dev;
 	c->netdev   = priv->netdev;
 	c->mkey_be  = cpu_to_be32(priv->mr.key);
-	c->num_tc   = priv->num_tc;
+	c->num_tc   = priv->params.num_tc;
 
-	mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+	mlx5e_build_channeltc_to_txq_map(priv, ix);
 
 	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 
@@ -1000,7 +1048,7 @@
 	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
 	MLX5_SET(wq, wq, pd,               priv->pdn);
 
-	param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
 	param->wq.linear = 1;
 }
 
@@ -1014,7 +1062,8 @@
 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
 	MLX5_SET(wq, wq, pd,            priv->pdn);
 
-	param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+	param->max_inline = priv->params.tx_max_inline;
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1108,28 @@
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
 	struct mlx5e_channel_param cparam;
+	int nch = priv->params.num_channels;
 	int err = -ENOMEM;
 	int i;
 	int j;
 
-	priv->channel = kcalloc(priv->params.num_channels,
-				sizeof(struct mlx5e_channel *), GFP_KERNEL);
+	priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+				GFP_KERNEL);
 
-	priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
 				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
 	if (!priv->channel || !priv->txq_to_sq_map)
 		goto err_free_txq_to_sq_map;
 
 	mlx5e_build_channel_param(priv, &cparam);
-	for (i = 0; i < priv->params.num_channels; i++) {
+	for (i = 0; i < nch; i++) {
 		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
 		if (err)
 			goto err_close_channels;
 	}
 
-	for (j = 0; j < priv->params.num_channels; j++) {
+	for (j = 0; j < nch; j++) {
 		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
 		if (err)
 			goto err_close_channels;
@@ -1109,67 +1159,73 @@
 	kfree(priv->channel);
 }
 
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
 {
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
-	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(tisc, tisc, prio,  tc);
-	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
-
-	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+	return (hfunc == ETH_RSS_HASH_TOP) ?
+	       MLX5_RX_HASH_FN_TOEPLITZ :
+	       MLX5_RX_HASH_FN_INVERTED_XOR8;
 }
 
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_bits_invert(unsigned long a, int size)
 {
-	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+	int inv = 0;
+	int i;
+
+	for (i = 0; i < size; i++)
+		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+	return inv;
 }
 
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
 {
-	int num_tc = priv->num_tc;
-	int err;
-	int tc;
+	int i;
 
-	for (tc = 0; tc < num_tc; tc++) {
-		err = mlx5e_open_tis(priv, tc);
-		if (err)
-			goto err_close_tises;
+	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+		int ix = i;
+
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+			ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+		ix = priv->params.indirection_rqt[ix];
+		ix = ix % priv->params.num_channels;
+		MLX5_SET(rqtc, rqtc, rq_num[i],
+			 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+			 priv->channel[ix]->rq.rqn :
+			 priv->drop_rq.rqn);
 	}
-
-	return 0;
-
-err_close_tises:
-	for (tc--; tc >= 0; tc--)
-		mlx5e_close_tis(priv, tc);
-
-	return err;
 }
 
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+				enum mlx5e_rqt_ix rqt_ix)
 {
-	int num_tc = priv->num_tc;
-	int tc;
 
-	for (tc = 0; tc < num_tc; tc++)
-		mlx5e_close_tis(priv, tc);
+	switch (rqt_ix) {
+	case MLX5E_INDIRECTION_RQT:
+		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+
+		break;
+
+	default: /* MLX5E_SINGLE_RQ_RQT */
+		MLX5_SET(rqtc, rqtc, rq_num[0],
+			 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+			 priv->channel[0]->rq.rqn :
+			 priv->drop_rq.rqn);
+
+		break;
+	}
 }
 
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 *in;
-	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
 	void *rqtc;
 	int inlen;
-	int err;
 	int sz;
-	int i;
+	int err;
 
-	sz = 1 << priv->params.rx_hash_log_tbl_sz;
+	sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
 
 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
@@ -1181,198 +1237,101 @@
 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-	for (i = 0; i < sz; i++) {
-		int ix = i % priv->params.num_channels;
+	mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
 
-		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
-	}
-
-	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
-	if (!err)
-		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+	err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
 
 	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
-	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
-	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
-
-	mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
-				   sizeof(out));
-}
-
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
-{
-	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
-	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
-
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
-
-#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-			  MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-			  MLX5_HASH_FIELD_SEL_DST_IP   |\
-			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
-			  MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-	if (priv->params.lro_en) {
-		MLX5_SET(tirc, tirc, lro_enable_mask,
-			 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-			 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
-		MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
-			 (priv->params.lro_wqe_sz -
-			  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
-		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
-			 MLX5_CAP_ETH(priv->mdev,
-				      lro_timer_supported_periods[3]));
-	}
-
-	switch (tt) {
-	case MLX5E_TT_ANY:
-		MLX5_SET(tirc, tirc, disp_type,
-			 MLX5_TIRC_DISP_TYPE_DIRECT);
-		MLX5_SET(tirc, tirc, inline_rqn,
-			 priv->channel[0]->rq.rqn);
-		break;
-	default:
-		MLX5_SET(tirc, tirc, disp_type,
-			 MLX5_TIRC_DISP_TYPE_INDIRECT);
-		MLX5_SET(tirc, tirc, indirect_table,
-			 priv->rqtn);
-		MLX5_SET(tirc, tirc, rx_hash_fn,
-			 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
-		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-		netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
-						 rx_hash_toeplitz_key),
-				    MLX5_FLD_SZ_BYTES(tirc,
-						      rx_hash_toeplitz_key));
-		break;
-	}
-
-	switch (tt) {
-	case MLX5E_TT_IPV4_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV6_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV4_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV6_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV4:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-
-	case MLX5E_TT_IPV6:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-	}
-}
-
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 *in;
-	void *tirc;
+	void *rqtc;
 	int inlen;
+	int sz;
 	int err;
 
-	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
 	if (!in)
 		return -ENOMEM;
 
-	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
 
-	mlx5e_build_tir_ctx(priv, tirc, tt);
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
 
-	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+	mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+
+	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+
+	err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
 
 	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
-	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+	mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
 }
 
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
 {
+	mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+	mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
+
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+	if (!priv->params.lro_en)
+		return;
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+	MLX5_SET(tirc, tirc, lro_enable_mask,
+		 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+		 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+	MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+		 (priv->params.lro_wqe_sz -
+		  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+	MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+		 MLX5_CAP_ETH(priv->mdev,
+			      lro_timer_supported_periods[2]));
+}
+
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *tirc;
+	int inlen;
 	int err;
-	int i;
 
-	for (i = 0; i < MLX5E_NUM_TT; i++) {
-		err = mlx5e_open_tir(priv, i);
-		if (err)
-			goto err_close_tirs;
-	}
+	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
 
-	return 0;
+	MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+	tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
 
-err_close_tirs:
-	for (i--; i >= 0; i--)
-		mlx5e_close_tir(priv, i);
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+
+	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
-{
-	int i;
-
-	for (i = 0; i < MLX5E_NUM_TT; i++)
-		mlx5e_close_tir(priv, i);
-}
-
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1400,6 +1359,8 @@
 	int num_txqs;
 	int err;
 
+	set_bit(MLX5E_STATE_OPENED, &priv->state);
+
 	num_txqs = priv->params.num_channels * priv->params.num_tc;
 	netif_set_real_num_tx_queues(netdev, num_txqs);
 	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
@@ -1408,74 +1369,19 @@
 	if (err)
 		return err;
 
-	err = mlx5e_open_tises(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
-			   __func__, err);
-		return err;
-	}
-
 	err = mlx5e_open_channels(priv);
 	if (err) {
 		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
 			   __func__, err);
-		goto err_close_tises;
+		return err;
 	}
 
-	err = mlx5e_open_rqt(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
-			   __func__, err);
-		goto err_close_channels;
-	}
-
-	err = mlx5e_open_tirs(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
-			   __func__, err);
-		goto err_close_rqls;
-	}
-
-	err = mlx5e_open_flow_table(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
-			   __func__, err);
-		goto err_close_tirs;
-	}
-
-	err = mlx5e_add_all_vlan_rules(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
-			   __func__, err);
-		goto err_close_flow_table;
-	}
-
-	mlx5e_init_eth_addr(priv);
-
-	set_bit(MLX5E_STATE_OPENED, &priv->state);
-
 	mlx5e_update_carrier(priv);
-	mlx5e_set_rx_mode_core(priv);
+	mlx5e_redirect_rqts(priv);
 
 	schedule_delayed_work(&priv->update_stats_work, 0);
+
 	return 0;
-
-err_close_flow_table:
-	mlx5e_close_flow_table(priv);
-
-err_close_tirs:
-	mlx5e_close_tirs(priv);
-
-err_close_rqls:
-	mlx5e_close_rqt(priv);
-
-err_close_channels:
-	mlx5e_close_channels(priv);
-
-err_close_tises:
-	mlx5e_close_tises(priv);
-
-	return err;
 }
 
 static int mlx5e_open(struct net_device *netdev)
@@ -1496,14 +1402,9 @@
 
 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
 
-	mlx5e_set_rx_mode_core(priv);
-	mlx5e_del_all_vlan_rules(priv);
+	mlx5e_redirect_rqts(priv);
 	netif_carrier_off(priv->netdev);
-	mlx5e_close_flow_table(priv);
-	mlx5e_close_tirs(priv);
-	mlx5e_close_rqt(priv);
 	mlx5e_close_channels(priv);
-	mlx5e_close_tises(priv);
 
 	return 0;
 }
@@ -1520,26 +1421,341 @@
 	return err;
 }
 
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-			     struct mlx5e_params *new_params)
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+				struct mlx5e_rq *rq,
+				struct mlx5e_rq_param *param)
 {
-	int err = 0;
-	int was_opened;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqc = param->rqc;
+	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+	int err;
 
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
+	param->wq.db_numa_node = param->wq.buf_numa_node;
 
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(priv->netdev);
+	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+				&rq->wq_ctrl);
+	if (err)
+		return err;
 
-	priv->params = *new_params;
+	rq->priv = priv;
 
-	if (was_opened)
-		err = mlx5e_open_locked(priv->netdev);
+	return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+				struct mlx5e_cq *cq,
+				struct mlx5e_cq_param *param)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+	int eqn_not_used;
+	int irqn;
+	int err;
+
+	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+			       &cq->wq_ctrl);
+	if (err)
+		return err;
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+	mcq->cqe_sz     = 64;
+	mcq->set_ci_db  = cq->wq_ctrl.db.db;
+	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+	*mcq->set_ci_db = 0;
+	*mcq->arm_db    = 0;
+	mcq->vector     = param->eq_ix;
+	mcq->comp       = mlx5e_completion_event;
+	mcq->event      = mlx5e_cq_error_event;
+	mcq->irqn       = irqn;
+	mcq->uar        = &priv->cq_uar;
+
+	cq->priv = priv;
+
+	return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+	struct mlx5e_cq_param cq_param;
+	struct mlx5e_rq_param rq_param;
+	struct mlx5e_rq *rq = &priv->drop_rq;
+	struct mlx5e_cq *cq = &priv->drop_rq.cq;
+	int err;
+
+	memset(&cq_param, 0, sizeof(cq_param));
+	memset(&rq_param, 0, sizeof(rq_param));
+	mlx5e_build_rx_cq_param(priv, &cq_param);
+	mlx5e_build_rq_param(priv, &rq_param);
+
+	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_cq(cq, &cq_param);
+	if (err)
+		goto err_destroy_cq;
+
+	err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+	if (err)
+		goto err_disable_cq;
+
+	err = mlx5e_enable_rq(rq, &rq_param);
+	if (err)
+		goto err_destroy_rq;
+
+	return 0;
+
+err_destroy_rq:
+	mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+	mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+	mlx5e_destroy_cq(&priv->drop_rq.cq);
 
 	return err;
 }
 
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+	mlx5e_disable_rq(&priv->drop_rq);
+	mlx5e_destroy_rq(&priv->drop_rq);
+	mlx5e_disable_cq(&priv->drop_rq.cq);
+	mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(tisc, tisc, prio,  tc);
+	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+	int err;
+	int tc;
+
+	for (tc = 0; tc < priv->params.num_tc; tc++) {
+		err = mlx5e_create_tis(priv, tc);
+		if (err)
+			goto err_close_tises;
+	}
+
+	return 0;
+
+err_close_tises:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_destroy_tis(priv, tc);
+
+	return err;
+}
+
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+	int tc;
+
+	for (tc = 0; tc < priv->params.num_tc; tc++)
+		mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+	switch (tt) {
+	case MLX5E_TT_ANY:
+		MLX5_SET(tirc, tirc, indirect_table,
+			 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+		break;
+	default:
+		MLX5_SET(tirc, tirc, indirect_table,
+			 priv->rqtn[MLX5E_INDIRECTION_RQT]);
+		MLX5_SET(tirc, tirc, rx_hash_fn,
+			 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+			void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+						     rx_hash_toeplitz_key);
+			size_t len = MLX5_FLD_SZ_BYTES(tirc,
+						       rx_hash_toeplitz_key);
+
+			MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+			memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+		}
+		break;
+	}
+
+	switch (tt) {
+	case MLX5E_TT_IPV4_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+
+	case MLX5E_TT_IPV6:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+	}
+}
+
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 *in;
+	void *tirc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+	mlx5e_build_tir_ctx(priv, tirc, tt);
+
+	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+{
+	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++) {
+		err = mlx5e_create_tir(priv, i);
+		if (err)
+			goto err_destroy_tirs;
+	}
+
+	return 0;
+
+err_destroy_tirs:
+	for (i--; i >= 0; i--)
+		mlx5e_destroy_tir(priv, i);
+
+	return err;
+}
+
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++)
+		mlx5e_destroy_tir(priv, i);
+}
+
 static struct rtnl_link_stats64 *
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
@@ -1589,20 +1805,26 @@
 			      netdev_features_t features)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
 	netdev_features_t changes = features ^ netdev->features;
-	struct mlx5e_params new_params;
-	bool update_params = false;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
 
 	if (changes & NETIF_F_LRO) {
-		new_params.lro_en = !!(features & NETIF_F_LRO);
-		update_params = true;
+		bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+		if (was_opened)
+			mlx5e_close_locked(priv->netdev);
+
+		priv->params.lro_en = !!(features & NETIF_F_LRO);
+		mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+		mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+
+		if (was_opened)
+			err = mlx5e_open_locked(priv->netdev);
 	}
 
-	if (update_params)
-		mlx5e_update_priv_params(priv, &new_params);
+	mutex_unlock(&priv->state_lock);
 
 	if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1611,8 +1833,6 @@
 			mlx5e_disable_vlan_filter(priv);
 	}
 
-	mutex_unlock(&priv->state_lock);
-
 	return 0;
 }
 
@@ -1620,8 +1840,9 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
 	int max_mtu;
-	int err;
+	int err = 0;
 
 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -1633,8 +1854,16 @@
 	}
 
 	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(netdev);
+
 	netdev->mtu = new_mtu;
-	err = mlx5e_update_priv_params(priv, &priv->params);
+
+	if (was_opened)
+		err = mlx5e_open_locked(netdev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -1673,11 +1902,21 @@
 	return 0;
 }
 
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+	return bf_buf_size -
+	       sizeof(struct mlx5e_tx_wqe) +
+	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 				    struct net_device *netdev,
-				    int num_comp_vectors)
+				    int num_channels)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int i;
 
 	priv->params.log_sq_size           =
 		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1691,24 +1930,25 @@
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
 	priv->params.tx_cq_moderation_pkts =
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
 	priv->params.min_rx_wqes           =
 		MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
-	priv->params.rx_hash_log_tbl_sz    =
-		(order_base_2(num_comp_vectors) >
-		 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
-		order_base_2(num_comp_vectors)           :
-		MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
 	priv->params.num_tc                = 1;
 	priv->params.default_vlan_prio     = 0;
+	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
-	priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+	netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+			    sizeof(priv->params.toeplitz_hash_key));
+
+	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+		priv->params.indirection_rqt[i] = i % num_channels;
+
 	priv->params.lro_wqe_sz            =
 		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 
 	priv->mdev                         = mdev;
 	priv->netdev                       = netdev;
-	priv->params.num_channels          = num_comp_vectors;
-	priv->num_tc                       = priv->params.num_tc;
+	priv->params.num_channels          = num_channels;
 	priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
 	spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1973,8 @@
 
 	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
-	if (priv->num_tc > 1) {
+	if (priv->params.num_tc > 1)
 		mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
-	}
 
 	netdev->netdev_ops        = &mlx5e_netdev_ops;
 	netdev->watchdog_timeo    = 15 * HZ;
@@ -1798,19 +2037,20 @@
 {
 	struct net_device *netdev;
 	struct mlx5e_priv *priv;
-	int ncv = mdev->priv.eq_table.num_comp_vectors;
+	int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+			MLX5E_MAX_NUM_CHANNELS);
 	int err;
 
 	if (mlx5e_check_required_hca_cap(mdev))
 		return NULL;
 
-	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
 	if (!netdev) {
 		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
 		return NULL;
 	}
 
-	mlx5e_build_netdev_priv(mdev, netdev, ncv);
+	mlx5e_build_netdev_priv(mdev, netdev, nch);
 	mlx5e_build_netdev(netdev);
 
 	netif_carrier_off(netdev);
@@ -1819,43 +2059,95 @@
 
 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
 		goto err_free_netdev;
 	}
 
 	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
 		goto err_unmap_free_uar;
 	}
 
 	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
 		goto err_dealloc_pd;
 	}
 
 	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
 		goto err_dealloc_transport_domain;
 	}
 
-	err = register_netdev(netdev);
+	err = mlx5e_create_tises(priv);
 	if (err) {
-		netdev_err(netdev, "%s: register_netdev failed, %d\n",
-			   __func__, err);
+		mlx5_core_warn(mdev, "create tises failed, %d\n", err);
 		goto err_destroy_mkey;
 	}
 
+	err = mlx5e_open_drop_rq(priv);
+	if (err) {
+		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+		goto err_destroy_tises;
+	}
+
+	err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+	if (err) {
+		mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+		goto err_close_drop_rq;
+	}
+
+	err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+	if (err) {
+		mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+		goto err_destroy_rqt_indir;
+	}
+
+	err = mlx5e_create_tirs(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+		goto err_destroy_rqt_single;
+	}
+
+	err = mlx5e_create_flow_tables(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+		goto err_destroy_tirs;
+	}
+
+	mlx5e_init_eth_addr(priv);
+
+	err = register_netdev(netdev);
+	if (err) {
+		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+		goto err_destroy_flow_tables;
+	}
+
 	mlx5e_enable_async_events(priv);
+	schedule_work(&priv->set_rx_mode_work);
 
 	return priv;
 
+err_destroy_flow_tables:
+	mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+	mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+	mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+	mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+	mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+	mlx5e_destroy_tises(priv);
+
 err_destroy_mkey:
 	mlx5_core_destroy_mkey(mdev, &priv->mr);
 
@@ -1879,13 +2171,22 @@
 	struct mlx5e_priv *priv = vpriv;
 	struct net_device *netdev = priv->netdev;
 
+	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+	schedule_work(&priv->set_rx_mode_work);
+	mlx5e_disable_async_events(priv);
+	flush_scheduled_work();
 	unregister_netdev(netdev);
+	mlx5e_destroy_flow_tables(priv);
+	mlx5e_destroy_tirs(priv);
+	mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+	mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+	mlx5e_close_drop_rq(priv);
+	mlx5e_destroy_tises(priv);
 	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
 	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
 	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
 	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-	mlx5e_disable_async_events(priv);
-	flush_scheduled_work();
 	free_netdev(netdev);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9a93741..cf00985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -111,10 +111,12 @@
 		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
 					sizeof(struct iphdr));
 		ipv6 = NULL;
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 	} else {
 		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
 					sizeof(struct ipv6hdr));
 		ipv4 = NULL;
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 	}
 
 	if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@
 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 }
 
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+	__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+				     struct mlx5_cqe64 *cqe,
+				     struct mlx5e_rq *rq,
+				     struct sk_buff *skb)
+{
+	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+		goto csum_none;
+
+	if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (is_first_ethertype_ip(skb)) {
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+		rq->stats.csum_sw++;
+	} else {
+		goto csum_none;
+	}
+
+	return;
+
+csum_none:
+	skb->ip_summed = CHECKSUM_NONE;
+	rq->stats.csum_none++;
+}
+
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
 				      struct mlx5e_rq *rq,
 				      struct sk_buff *skb)
@@ -162,20 +196,12 @@
 	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
 	if (lro_num_seg > 1) {
 		mlx5e_lro_update_hdr(skb, cqe);
-		skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
 		rq->stats.lro_packets++;
 		rq->stats.lro_bytes += cqe_bcnt;
 	}
 
-	if (likely(netdev->features & NETIF_F_RXCSUM) &&
-	    (cqe->hds_ip_ext & CQE_L2_OK) &&
-	    (cqe->hds_ip_ext & CQE_L3_OK) &&
-	    (cqe->hds_ip_ext & CQE_L4_OK)) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else {
-		skb->ip_summed = CHECKSUM_NONE;
-		rq->stats.csum_none++;
-	}
+	mlx5e_handle_csum(netdev, cqe, rq, skb);
 
 	skb->protocol = eth_type_trans(skb, netdev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f4..b73672f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@
 
 	if (notify_hw) {
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe);
+		mlx5e_tx_notify_hw(sq, wqe, 0);
 	}
 }
 
@@ -106,13 +106,21 @@
 		 priv->default_vlan_prio;
 	int tc = netdev_get_prio_tc_map(dev, up);
 
-	return priv->channel[channel_ix]->tc_to_txq_map[tc];
+	return priv->channeltc_to_txq_map[channel_ix][tc];
 }
 
 static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-					    struct sk_buff *skb)
+					    struct sk_buff *skb, bool bf)
 {
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+	/* Some NIC TX decisions, e.g loopback, are based on the packet
+	 * headers and occur before the data gather.
+	 * Therefore these headers must be copied into the WQE
+	 */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+	if (bf && (skb_headlen(skb) <= sq->max_inline))
+		return skb_headlen(skb);
+
 	return MLX5E_MIN_INLINE;
 }
 
@@ -129,6 +137,7 @@
 
 	u8  opcode = MLX5_OPCODE_SEND;
 	dma_addr_t dma_addr = 0;
+	bool bf = false;
 	u16 headlen;
 	u16 ds_cnt;
 	u16 ihs;
@@ -141,6 +150,11 @@
 	else
 		sq->stats.csum_offload_none++;
 
+	if (sq->cc != sq->prev_cc) {
+		sq->prev_cc = sq->cc;
+		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+	}
+
 	if (skb_is_gso(skb)) {
 		u32 payload_len;
 
@@ -153,7 +167,10 @@
 		sq->stats.tso_packets++;
 		sq->stats.tso_bytes += payload_len;
 	} else {
-		ihs = mlx5e_get_inline_hdr_size(sq, skb);
+		bf = sq->bf_budget &&
+		     !skb->xmit_more &&
+		     !skb_shinfo(skb)->nr_frags;
+		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
 		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
 							ETH_ZLEN);
 	}
@@ -225,14 +242,21 @@
 	}
 
 	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+		int bf_sz = 0;
+
+		if (bf && sq->uar_bf_map)
+			bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe);
+		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
 	}
 
 	/* fill sq edge with nops to avoid wqe wrap around */
 	while ((sq->pc & wq->sz_m1) > sq->edge)
 		mlx5e_send_nop(sq, false);
 
+	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
 	sq->stats.packets++;
 	return NETDEV_TX_OK;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 06e3e1e..03aabdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -457,7 +457,7 @@
 	struct mlx5_priv *priv  = &mdev->priv;
 	struct msix_entry *msix = priv->msix_arr;
 	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-	int numa_node           = dev_to_node(&mdev->pdev->dev);
+	int numa_node           = priv->numa_node;
 	int err;
 
 	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -656,6 +656,22 @@
 }
 #endif
 
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+	resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+	resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+	dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+	return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+	if (dev->priv.bf_mapping)
+		io_mapping_free(dev->priv.bf_mapping);
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
 	struct mlx5_priv *priv = &dev->priv;
@@ -670,6 +686,10 @@
 	INIT_LIST_HEAD(&priv->pgdir_list);
 	spin_lock_init(&priv->mkey_lock);
 
+	mutex_init(&priv->alloc_mutex);
+
+	priv->numa_node = dev_to_node(&dev->pdev->dev);
+
 	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
 	if (!priv->dbg_root)
 		return -ENOMEM;
@@ -806,10 +826,13 @@
 		goto err_stop_eqs;
 	}
 
+	if (map_bf_area(dev))
+		dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
 	err = mlx5_irq_set_affinity_hints(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-		goto err_free_comp_eqs;
+		goto err_unmap_bf_area;
 	}
 
 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -821,7 +844,9 @@
 
 	return 0;
 
-err_free_comp_eqs:
+err_unmap_bf_area:
+	unmap_bf_area(dev);
+
 	free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -879,6 +904,7 @@
 	mlx5_cleanup_qp_table(dev);
 	mlx5_cleanup_cq_table(dev);
 	mlx5_irq_clear_affinity_hints(dev);
+	unmap_bf_area(dev);
 	free_comp_eqs(dev);
 	mlx5_stop_eqs(dev);
 	mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88eca..566a704 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -73,7 +73,12 @@
 					     int in_size, u32 *out,
 					     int out_size)
 {
-	mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	int err;
+
+	err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	if (err)
+		return err;
+
 	return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7014799..821caaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -216,22 +216,25 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
 
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-			 enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+			       enum mlx5_port_status status)
 {
 	u32 in[MLX5_ST_SZ_DW(paos_reg)];
 	u32 out[MLX5_ST_SZ_DW(paos_reg)];
 
 	memset(in, 0, sizeof(in));
 
+	MLX5_SET(paos_reg, in, local_port, 1);
 	MLX5_SET(paos_reg, in, admin_status, status);
 	MLX5_SET(paos_reg, in, ase, 1);
 
 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
 				    sizeof(out), MLX5_REG_PAOS, 0, 1);
 }
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
 
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status)
 {
 	u32 in[MLX5_ST_SZ_DW(paos_reg)];
 	u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +242,17 @@
 
 	memset(in, 0, sizeof(in));
 
+	MLX5_SET(paos_reg, in, local_port, 1);
+
 	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
 				   sizeof(out), MLX5_REG_PAOS, 0, 0);
 	if (err)
 		return err;
 
-	*status = MLX5_GET(paos_reg, out, oper_status);
+	*status = MLX5_GET(paos_reg, out, admin_status);
 	return err;
 }
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
 static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
 				int *max_mtu, int *oper_mtu, u8 port)
@@ -328,3 +334,45 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+	MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+	MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PFCC, 0, 1);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+			  u32 *rx_pause, u32 *tx_pause)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PFCC, 0, 0);
+	if (err)
+		return err;
+
+	if (rx_pause)
+		*rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+	if (tx_pause)
+		*tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b03..b4c87c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,6 +163,18 @@
 	return err;
 }
 
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+	MLX5_SET(modify_tir_in, in, tirn, tirn);
+	MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
 {
 	u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -358,3 +370,44 @@
 	return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
 					   sizeof(out));
 }
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+	int err;
+
+	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+	return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+	MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+	MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef244..74cae51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@
 void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
 int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
 			 u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+			 int inlen);
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
 int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
 			 u32 *tisn);
@@ -61,4 +63,10 @@
 int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
 int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
 
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+			 int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
 #endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef8587..eb05c84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -246,6 +247,10 @@
 		goto err_free_uar;
 	}
 
+	if (mdev->priv.bf_mapping)
+		uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+						uar->index << PAGE_SHIFT);
+
 	return 0;
 
 err_free_uar:
@@ -257,6 +262,7 @@
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
+	io_mapping_unmap(uar->bf_map);
 	iounmap(uar->map);
 	mlx5_cmd_free_uar(mdev, uar->index);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 8388411..ce21ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@
 	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
 	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
 	}
 
-	err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+	err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
 		goto err_db_free;
@@ -108,13 +109,14 @@
 	wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
 	wq->sz_m1 = (1 << wq->log_sz) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
 	}
 
-	err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+	err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
 		goto err_db_free;
@@ -144,7 +146,7 @@
 	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
 	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69..6c2a8f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
 
 struct mlx5_wq_param {
 	int		linear;
-	int		numa;
+	int		buf_numa_node;
+	int		db_numa_node;
 };
 
 struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 0000000..2941d9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+	tristate "Mellanox Technologies Switch ASICs support"
+	---help---
+	  This driver supports Mellanox Technologies Switch ASICs family.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_core.
+
+config MLXSW_PCI
+	tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+	depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+	default m
+	---help---
+	  This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+	tristate "Mellanox Technologies SwitchX-2 support"
+	depends on MLXSW_CORE && NET_SWITCHDEV
+	default m
+	---help---
+	  This driver supports Mellanox Technologies SwitchX-2 Ethernet
+	  Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 0000000..0a05f65
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE)	+= mlxsw_core.o
+mlxsw_core-objs			:= core.o
+obj-$(CONFIG_MLXSW_PCI)		+= mlxsw_pci.o
+mlxsw_pci-objs			:= pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2)	+= mlxsw_switchx2.o
+mlxsw_switchx2-objs		:= switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 0000000..770db17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE	4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+	return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+	kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+	memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+		   u32 in_mod, bool out_mbox_direct,
+		   char *in_mbox, size_t in_mbox_size,
+		   char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+				    u8 opcode_mod, u32 in_mod, char *in_mbox,
+				    size_t in_mbox_size)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+			      in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+				     u8 opcode_mod, u32 in_mod,
+				     bool out_mbox_direct,
+				     char *out_mbox, size_t out_mbox_size)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+			      out_mbox_direct, NULL, 0,
+			      out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+				      u8 opcode_mod, u32 in_mod)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+			      NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+	MLXSW_CMD_OPCODE_QUERY_FW		= 0x004,
+	MLXSW_CMD_OPCODE_QUERY_BOARDINFO	= 0x006,
+	MLXSW_CMD_OPCODE_QUERY_AQ_CAP		= 0x003,
+	MLXSW_CMD_OPCODE_MAP_FA			= 0xFFF,
+	MLXSW_CMD_OPCODE_UNMAP_FA		= 0xFFE,
+	MLXSW_CMD_OPCODE_CONFIG_PROFILE		= 0x100,
+	MLXSW_CMD_OPCODE_ACCESS_REG		= 0x040,
+	MLXSW_CMD_OPCODE_SW2HW_DQ		= 0x201,
+	MLXSW_CMD_OPCODE_HW2SW_DQ		= 0x202,
+	MLXSW_CMD_OPCODE_2ERR_DQ		= 0x01E,
+	MLXSW_CMD_OPCODE_QUERY_DQ		= 0x022,
+	MLXSW_CMD_OPCODE_SW2HW_CQ		= 0x016,
+	MLXSW_CMD_OPCODE_HW2SW_CQ		= 0x017,
+	MLXSW_CMD_OPCODE_QUERY_CQ		= 0x018,
+	MLXSW_CMD_OPCODE_SW2HW_EQ		= 0x013,
+	MLXSW_CMD_OPCODE_HW2SW_EQ		= 0x014,
+	MLXSW_CMD_OPCODE_QUERY_EQ		= 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+	switch (opcode) {
+	case MLXSW_CMD_OPCODE_QUERY_FW:
+		return "QUERY_FW";
+	case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+		return "QUERY_BOARDINFO";
+	case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+		return "QUERY_AQ_CAP";
+	case MLXSW_CMD_OPCODE_MAP_FA:
+		return "MAP_FA";
+	case MLXSW_CMD_OPCODE_UNMAP_FA:
+		return "UNMAP_FA";
+	case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+		return "CONFIG_PROFILE";
+	case MLXSW_CMD_OPCODE_ACCESS_REG:
+		return "ACCESS_REG";
+	case MLXSW_CMD_OPCODE_SW2HW_DQ:
+		return "SW2HW_DQ";
+	case MLXSW_CMD_OPCODE_HW2SW_DQ:
+		return "HW2SW_DQ";
+	case MLXSW_CMD_OPCODE_2ERR_DQ:
+		return "2ERR_DQ";
+	case MLXSW_CMD_OPCODE_QUERY_DQ:
+		return "QUERY_DQ";
+	case MLXSW_CMD_OPCODE_SW2HW_CQ:
+		return "SW2HW_CQ";
+	case MLXSW_CMD_OPCODE_HW2SW_CQ:
+		return "HW2SW_CQ";
+	case MLXSW_CMD_OPCODE_QUERY_CQ:
+		return "QUERY_CQ";
+	case MLXSW_CMD_OPCODE_SW2HW_EQ:
+		return "SW2HW_EQ";
+	case MLXSW_CMD_OPCODE_HW2SW_EQ:
+		return "HW2SW_EQ";
+	case MLXSW_CMD_OPCODE_QUERY_EQ:
+		return "QUERY_EQ";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+enum mlxsw_cmd_status {
+	/* Command execution succeeded. */
+	MLXSW_CMD_STATUS_OK		= 0x00,
+	/* Internal error (e.g. bus error) occurred while processing command. */
+	MLXSW_CMD_STATUS_INTERNAL_ERR	= 0x01,
+	/* Operation/command not supported or opcode modifier not supported. */
+	MLXSW_CMD_STATUS_BAD_OP		= 0x02,
+	/* Parameter not supported, parameter out of range. */
+	MLXSW_CMD_STATUS_BAD_PARAM	= 0x03,
+	/* System was not enabled or bad system state. */
+	MLXSW_CMD_STATUS_BAD_SYS_STATE	= 0x04,
+	/* Attempt to access reserved or unallocated resource, or resource in
+	 * inappropriate ownership.
+	 */
+	MLXSW_CMD_STATUS_BAD_RESOURCE	= 0x05,
+	/* Requested resource is currently executing a command. */
+	MLXSW_CMD_STATUS_RESOURCE_BUSY	= 0x06,
+	/* Required capability exceeds device limits. */
+	MLXSW_CMD_STATUS_EXCEED_LIM	= 0x08,
+	/* Resource is not in the appropriate state or ownership. */
+	MLXSW_CMD_STATUS_BAD_RES_STATE	= 0x09,
+	/* Index out of range (might be beyond table size or attempt to
+	 * access a reserved resource).
+	 */
+	MLXSW_CMD_STATUS_BAD_INDEX	= 0x0A,
+	/* NVMEM checksum/CRC failed. */
+	MLXSW_CMD_STATUS_BAD_NVMEM	= 0x0B,
+	/* Bad management packet (silently discarded). */
+	MLXSW_CMD_STATUS_BAD_PKT	= 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+	switch (status) {
+	case MLXSW_CMD_STATUS_OK:
+		return "OK";
+	case MLXSW_CMD_STATUS_INTERNAL_ERR:
+		return "INTERNAL_ERR";
+	case MLXSW_CMD_STATUS_BAD_OP:
+		return "BAD_OP";
+	case MLXSW_CMD_STATUS_BAD_PARAM:
+		return "BAD_PARAM";
+	case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+		return "BAD_SYS_STATE";
+	case MLXSW_CMD_STATUS_BAD_RESOURCE:
+		return "BAD_RESOURCE";
+	case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+		return "RESOURCE_BUSY";
+	case MLXSW_CMD_STATUS_EXCEED_LIM:
+		return "EXCEED_LIM";
+	case MLXSW_CMD_STATUS_BAD_RES_STATE:
+		return "BAD_RES_STATE";
+	case MLXSW_CMD_STATUS_BAD_INDEX:
+		return "BAD_INDEX";
+	case MLXSW_CMD_STATUS_BAD_NVMEM:
+		return "BAD_NVMEM";
+	case MLXSW_CMD_STATUS_BAD_PKT:
+		return "BAD_PKT";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+					 char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+				   char *in_mbox, u32 vpm_entries_count)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+				 0, vpm_entries_count,
+				 in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+					       char *in_mbox)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+				 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+	     set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+		     0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+		     0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+		     0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+				       char *in_mbox, char *out_mbox)
+{
+	return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+			      0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+			      out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+				       char *in_mbox, u32 dq_number,
+				       u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+				 opcode_mod, dq_number,
+				 in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+	MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+	MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+				      char *in_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+				      char *in_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+				       u32 dq_number, u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+				   opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number)
+{
+	return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number)
+{
+	return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number, u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+				   opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+				     u32 dq_number)
+{
+	return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+				   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+				     u32 dq_number)
+{
+	return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+				   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+				       char *out_mbox, u32 dq_number,
+				       u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+				  opcode_mod, dq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+				     char *in_mbox, u32 cq_number)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+				 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+				     u32 cq_number)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+				   0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox, u32 cq_number)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+				  0, cq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+				     char *in_mbox, u32 eq_number)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+				 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+				     u32 eq_number)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+				   0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox, u32 eq_number)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+				  0, eq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 0000000..dbcaf5d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1295 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+	u64			trap_rx_packets[MLXSW_TRAP_ID_MAX];
+	u64			trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+	u64			port_rx_packets[MLXSW_PORT_MAX_PORTS];
+	u64			port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+	struct u64_stats_sync	syncp;
+	u32			trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+	u32			port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+	u32			trap_rx_invalid;
+	u32			port_rx_invalid;
+};
+
+struct mlxsw_core {
+	struct mlxsw_driver *driver;
+	const struct mlxsw_bus *bus;
+	void *bus_priv;
+	const struct mlxsw_bus_info *bus_info;
+	struct list_head rx_listener_list;
+	struct list_head event_listener_list;
+	struct {
+		struct sk_buff *resp_skb;
+		u64 tid;
+		wait_queue_head_t wait;
+		bool trans_active;
+		struct mutex lock; /* One EMAD transaction at a time. */
+		bool use_emad;
+	} emad;
+	struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+	struct dentry *dbg_dir;
+	struct {
+		struct debugfs_blob_wrapper vsd_blob;
+		struct debugfs_blob_wrapper psid_blob;
+	} dbg;
+	unsigned long driver_priv[0];
+	/* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+	struct list_head list;
+	struct mlxsw_rx_listener rxl;
+	void *priv;
+};
+
+struct mlxsw_event_listener_item {
+	struct list_head list;
+	struct mlxsw_event_listener el;
+	void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+	MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+	MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+	switch (type) {
+	case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+		return "query";
+	case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+		return "write";
+	}
+	BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+	mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+	mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+				    const struct mlxsw_reg_info *reg,
+				    char *payload)
+{
+	mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+	mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+	memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+				   const struct mlxsw_reg_info *reg,
+				   enum mlxsw_core_reg_access_type type,
+				   struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+	mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+	mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+	if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+		mlxsw_emad_op_tlv_method_set(op_tlv,
+					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+	else
+		mlxsw_emad_op_tlv_method_set(op_tlv,
+					     MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+	mlxsw_emad_op_tlv_class_set(op_tlv,
+				    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+	mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+	char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+	mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+	mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+	mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+	mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+	mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+	skb_reset_mac_header(skb);
+
+	return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type,
+				 struct mlxsw_core *mlxsw_core)
+{
+	char *buf;
+
+	buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+	mlxsw_emad_pack_end_tlv(buf);
+
+	buf = skb_push(skb, reg->len + sizeof(u32));
+	mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+	mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+	mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+				      MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+	return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+	char *op_tlv;
+
+	op_tlv = mlxsw_emad_op_tlv(skb);
+	return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+	char *op_tlv;
+
+	op_tlv = mlxsw_emad_op_tlv(skb);
+	return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+				 struct sk_buff *skb,
+				 const struct mlxsw_tx_info *tx_info)
+{
+	int err;
+	int ret;
+
+	err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+	if (err) {
+		dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+			mlxsw_core->emad.tid);
+		dev_kfree_skb(skb);
+		return err;
+	}
+
+	mlxsw_core->emad.trans_active = true;
+	ret = wait_event_timeout(mlxsw_core->emad.wait,
+				 !(mlxsw_core->emad.trans_active),
+				 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+	if (!ret) {
+		dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+			 mlxsw_core->emad.tid);
+		mlxsw_core->emad.trans_active = false;
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+				     char *op_tlv)
+{
+	enum mlxsw_emad_op_tlv_status status;
+	u64 tid;
+
+	status = mlxsw_emad_op_tlv_status_get(op_tlv);
+	tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+	switch (status) {
+	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+		return 0;
+	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+		dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+			 tid, status, mlxsw_emad_op_tlv_status_str(status));
+		return -EAGAIN;
+	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+	default:
+		dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+			tid, status, mlxsw_emad_op_tlv_status_str(status));
+		return -EIO;
+	}
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+					 struct sk_buff *skb)
+{
+	return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+			       struct sk_buff *skb,
+			       const struct mlxsw_tx_info *tx_info)
+{
+	struct sk_buff *trans_skb;
+	int n_retry;
+	int err;
+
+	n_retry = 0;
+retry:
+	/* We copy the EMAD to a new skb, since we might need
+	 * to retransmit it in case of failure.
+	 */
+	trans_skb = skb_copy(skb, GFP_KERNEL);
+	if (!trans_skb) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+	if (!err) {
+		struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+		err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+		if (err)
+			dev_kfree_skb(resp_skb);
+		if (!err || err != -EAGAIN)
+			goto out;
+	}
+	if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+		goto retry;
+
+out:
+	dev_kfree_skb(skb);
+	mlxsw_core->emad.tid++;
+	return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+					void *priv)
+{
+	struct mlxsw_core *mlxsw_core = priv;
+
+	if (mlxsw_emad_is_resp(skb) &&
+	    mlxsw_core->emad.trans_active &&
+	    mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+		mlxsw_core->emad.resp_skb = skb;
+		mlxsw_core->emad.trans_active = false;
+		wake_up(&mlxsw_core->emad.wait);
+	} else {
+		dev_kfree_skb(skb);
+	}
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+	.func = mlxsw_emad_rx_listener_func,
+	.local_port = MLXSW_PORT_DONT_CARE,
+	.trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+			    MLXSW_TRAP_ID_ETHEMAD);
+	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+	int err;
+
+	/* Set the upper 32 bits of the transaction ID field to a random
+	 * number. This allows us to discard EMADs addressed to other
+	 * devices.
+	 */
+	get_random_bytes(&mlxsw_core->emad.tid, 4);
+	mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+	init_waitqueue_head(&mlxsw_core->emad.wait);
+	mlxsw_core->emad.trans_active = false;
+	mutex_init(&mlxsw_core->emad.lock);
+
+	err = mlxsw_core_rx_listener_register(mlxsw_core,
+					      &mlxsw_emad_rx_listener,
+					      mlxsw_core);
+	if (err)
+		return err;
+
+	err = mlxsw_emad_traps_set(mlxsw_core);
+	if (err)
+		goto err_emad_trap_set;
+
+	mlxsw_core->emad.use_emad = true;
+
+	return 0;
+
+err_emad_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_core,
+					  &mlxsw_emad_rx_listener,
+					  mlxsw_core);
+	return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+			    MLXSW_TRAP_ID_ETHEMAD);
+	mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+	mlxsw_core_rx_listener_unregister(mlxsw_core,
+					  &mlxsw_emad_rx_listener,
+					  mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+					u16 reg_len)
+{
+	struct sk_buff *skb;
+	u16 emad_len;
+
+	emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+		    (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+		    sizeof(u32) + mlxsw_core->driver->txhdr_len);
+	if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+		return NULL;
+
+	skb = netdev_alloc_skb(NULL, emad_len);
+	if (!skb)
+		return NULL;
+	memset(skb->data, 0, emad_len);
+	skb_reserve(skb, emad_len);
+
+	return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_core *mlxsw_core = file->private;
+	struct mlxsw_core_pcpu_stats *p;
+	u64 rx_packets, rx_bytes;
+	u64 tmp_rx_packets, tmp_rx_bytes;
+	u32 rx_dropped, rx_invalid;
+	unsigned int start;
+	int i;
+	int j;
+	static const char hdr[] =
+		"     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+		rx_packets = 0;
+		rx_bytes = 0;
+		rx_dropped = 0;
+		for_each_possible_cpu(j) {
+			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+			do {
+				start = u64_stats_fetch_begin(&p->syncp);
+				tmp_rx_packets = p->trap_rx_packets[i];
+				tmp_rx_bytes = p->trap_rx_bytes[i];
+			} while (u64_stats_fetch_retry(&p->syncp, start));
+
+			rx_packets += tmp_rx_packets;
+			rx_bytes += tmp_rx_bytes;
+			rx_dropped += p->trap_rx_dropped[i];
+		}
+		seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+			   i, rx_packets, rx_bytes, rx_dropped);
+	}
+	rx_invalid = 0;
+	for_each_possible_cpu(j) {
+		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+		rx_invalid += p->trap_rx_invalid;
+	}
+	seq_printf(file, "trap INV                           %10u\n",
+		   rx_invalid);
+
+	for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+		rx_packets = 0;
+		rx_bytes = 0;
+		rx_dropped = 0;
+		for_each_possible_cpu(j) {
+			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+			do {
+				start = u64_stats_fetch_begin(&p->syncp);
+				tmp_rx_packets = p->port_rx_packets[i];
+				tmp_rx_bytes = p->port_rx_bytes[i];
+			} while (u64_stats_fetch_retry(&p->syncp, start));
+
+			rx_packets += tmp_rx_packets;
+			rx_bytes += tmp_rx_bytes;
+			rx_dropped += p->port_rx_dropped[i];
+		}
+		seq_printf(file, "port %3d %12llu %12llu %10u\n",
+			   i, rx_packets, rx_bytes, rx_dropped);
+	}
+	rx_invalid = 0;
+	for_each_possible_cpu(j) {
+		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+		rx_invalid += p->port_rx_invalid;
+	}
+	seq_printf(file, "port INV                           %10u\n",
+		   rx_invalid);
+	return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+	struct mlxsw_core *mlxsw_core = inode->i_private;
+
+	return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+	.owner = THIS_MODULE,
+	.open = mlxsw_core_rx_stats_dbg_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+				    const char *buf, size_t size)
+{
+	__be32 *m = (__be32 *) buf;
+	int i;
+	int count = size / sizeof(__be32);
+
+	for (i = count - 1; i >= 0; i--)
+		if (m[i])
+			break;
+	i++;
+	count = i ? i : 1;
+	for (i = 0; i < count; i += 4)
+		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+	spin_lock(&mlxsw_core_driver_list_lock);
+	list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+	spin_lock(&mlxsw_core_driver_list_lock);
+	list_del(&mlxsw_driver->list);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+		if (strcmp(mlxsw_driver->kind, kind) == 0)
+			return mlxsw_driver;
+	}
+	return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	spin_lock(&mlxsw_core_driver_list_lock);
+	mlxsw_driver = __driver_find(kind);
+	if (!mlxsw_driver) {
+		spin_unlock(&mlxsw_core_driver_list_lock);
+		request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+		spin_lock(&mlxsw_core_driver_list_lock);
+		mlxsw_driver = __driver_find(kind);
+	}
+	if (mlxsw_driver) {
+		if (!try_module_get(mlxsw_driver->owner))
+			mlxsw_driver = NULL;
+	}
+
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	spin_lock(&mlxsw_core_driver_list_lock);
+	mlxsw_driver = __driver_find(kind);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	if (!mlxsw_driver)
+		return;
+	module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+	const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+	mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+						 mlxsw_core_dbg_root);
+	if (!mlxsw_core->dbg_dir)
+		return -ENOMEM;
+	debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+			    mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+	mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+	mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+	debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+			    &mlxsw_core->dbg.vsd_blob);
+	mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+	mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+	debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+			    &mlxsw_core->dbg.psid_blob);
+	return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+	debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+				   const struct mlxsw_bus *mlxsw_bus,
+				   void *bus_priv)
+{
+	const char *device_kind = mlxsw_bus_info->device_kind;
+	struct mlxsw_core *mlxsw_core;
+	struct mlxsw_driver *mlxsw_driver;
+	size_t alloc_size;
+	int err;
+
+	mlxsw_driver = mlxsw_core_driver_get(device_kind);
+	if (!mlxsw_driver)
+		return -EINVAL;
+	alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+	mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_core) {
+		err = -ENOMEM;
+		goto err_core_alloc;
+	}
+
+	INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+	INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+	mlxsw_core->driver = mlxsw_driver;
+	mlxsw_core->bus = mlxsw_bus;
+	mlxsw_core->bus_priv = bus_priv;
+	mlxsw_core->bus_info = mlxsw_bus_info;
+
+	mlxsw_core->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+	if (!mlxsw_core->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+	if (err)
+		goto err_bus_init;
+
+	err = mlxsw_emad_init(mlxsw_core);
+	if (err)
+		goto err_emad_init;
+
+	err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+				 mlxsw_bus_info);
+	if (err)
+		goto err_driver_init;
+
+	err = mlxsw_core_debugfs_init(mlxsw_core);
+	if (err)
+		goto err_debugfs_init;
+
+	return 0;
+
+err_debugfs_init:
+	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+	mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+	mlxsw_bus->fini(bus_priv);
+err_bus_init:
+	free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+	kfree(mlxsw_core);
+err_core_alloc:
+	mlxsw_core_driver_put(device_kind);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+	const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+	mlxsw_core_debugfs_fini(mlxsw_core);
+	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+	mlxsw_emad_fini(mlxsw_core);
+	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+	free_percpu(mlxsw_core->pcpu_stats);
+	kfree(mlxsw_core);
+	mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+	return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+				  const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+	return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+						  tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+					     tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+				   const struct mlxsw_rx_listener *rxl_b)
+{
+	return (rxl_a->func == rxl_b->func &&
+		rxl_a->local_port == rxl_b->local_port &&
+		rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+			const struct mlxsw_rx_listener *rxl,
+			void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+		if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+		    rxl_item->priv == priv)
+			return rxl_item;
+	}
+	return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+				    const struct mlxsw_rx_listener *rxl,
+				    void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+	if (rxl_item)
+		return -EEXIST;
+	rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+	if (!rxl_item)
+		return -ENOMEM;
+	rxl_item->rxl = *rxl;
+	rxl_item->priv = priv;
+
+	list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_rx_listener *rxl,
+				       void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+	if (!rxl_item)
+		return;
+	list_del_rcu(&rxl_item->list);
+	synchronize_rcu();
+	kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+					   void *priv)
+{
+	struct mlxsw_event_listener_item *event_listener_item = priv;
+	struct mlxsw_reg_info reg;
+	char *payload;
+	char *op_tlv = mlxsw_emad_op_tlv(skb);
+	char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+	reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+	reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+	payload = mlxsw_emad_reg_payload(op_tlv);
+	event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+	dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+				      const struct mlxsw_event_listener *el_b)
+{
+	return (el_a->func == el_b->func &&
+		el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+			   const struct mlxsw_event_listener *el,
+			   void *priv)
+{
+	struct mlxsw_event_listener_item *el_item;
+
+	list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+		if (__is_event_listener_equal(&el_item->el, el) &&
+		    el_item->priv == priv)
+			return el_item;
+	}
+	return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_event_listener *el,
+				       void *priv)
+{
+	int err;
+	struct mlxsw_event_listener_item *el_item;
+	const struct mlxsw_rx_listener rxl = {
+		.func = mlxsw_core_event_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = el->trap_id,
+	};
+
+	el_item = __find_event_listener_item(mlxsw_core, el, priv);
+	if (el_item)
+		return -EEXIST;
+	el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+	if (!el_item)
+		return -ENOMEM;
+	el_item->el = *el;
+	el_item->priv = priv;
+
+	err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+	if (err)
+		goto err_rx_listener_register;
+
+	/* No reason to save item if we did not manage to register an RX
+	 * listener for it.
+	 */
+	list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+	return 0;
+
+err_rx_listener_register:
+	kfree(el_item);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+					  const struct mlxsw_event_listener *el,
+					  void *priv)
+{
+	struct mlxsw_event_listener_item *el_item;
+	const struct mlxsw_rx_listener rxl = {
+		.func = mlxsw_core_event_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = el->trap_id,
+	};
+
+	el_item = __find_event_listener_item(mlxsw_core, el, priv);
+	if (!el_item)
+		return;
+	mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+	list_del(&el_item->list);
+	kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+				      const struct mlxsw_reg_info *reg,
+				      char *payload,
+				      enum mlxsw_core_reg_access_type type)
+{
+	int err;
+	char *op_tlv;
+	struct sk_buff *skb;
+	struct mlxsw_tx_info tx_info = {
+		.local_port = MLXSW_PORT_CPU_PORT,
+		.is_emad = true,
+	};
+
+	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+	if (!skb)
+		return -ENOMEM;
+
+	mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+	mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+	dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+		mlxsw_core->emad.tid);
+	mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+	err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+	if (!err) {
+		op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+		memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+		       reg->len);
+
+		dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+			mlxsw_core->emad.tid - 1);
+		mlxsw_core_buf_dump_dbg(mlxsw_core,
+					mlxsw_core->emad.resp_skb->data,
+					mlxsw_core->emad.resp_skb->len);
+
+		dev_kfree_skb(mlxsw_core->emad.resp_skb);
+	}
+
+	return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+				     const struct mlxsw_reg_info *reg,
+				     char *payload,
+				     enum mlxsw_core_reg_access_type type)
+{
+	int err, n_retry;
+	char *in_mbox, *out_mbox, *tmp;
+
+	in_mbox = mlxsw_cmd_mbox_alloc();
+	if (!in_mbox)
+		return -ENOMEM;
+
+	out_mbox = mlxsw_cmd_mbox_alloc();
+	if (!out_mbox) {
+		err = -ENOMEM;
+		goto free_in_mbox;
+	}
+
+	mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+	tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+	mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+	n_retry = 0;
+retry:
+	err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+	if (!err) {
+		err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+		if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+			goto retry;
+	}
+
+	if (!err)
+		memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+		       reg->len);
+
+	mlxsw_core->emad.tid++;
+	mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+	mlxsw_cmd_mbox_free(in_mbox);
+	return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type)
+{
+	u64 cur_tid;
+	int err;
+
+	if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+		dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+			reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
+		return -EINTR;
+	}
+
+	cur_tid = mlxsw_core->emad.tid;
+	dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+		cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+		mlxsw_core_reg_access_type_str(type));
+
+	/* During initialization EMAD interface is not available to us,
+	 * so we default to command interface. We switch to EMAD interface
+	 * after setting the appropriate traps.
+	 */
+	if (!mlxsw_core->emad.use_emad)
+		err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+						payload, type);
+	else
+		err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+						 payload, type);
+
+	if (err)
+		dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+			cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
+
+	mutex_unlock(&mlxsw_core->emad.lock);
+	return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload)
+{
+	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+				     MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload)
+{
+	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+				     MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+			    struct mlxsw_rx_info *rx_info)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+	const struct mlxsw_rx_listener *rxl;
+	struct mlxsw_core_pcpu_stats *pcpu_stats;
+	u8 local_port = rx_info->sys_port;
+	bool found = false;
+
+	dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+			    __func__, rx_info->sys_port, rx_info->trap_id);
+
+	if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+	    (local_port >= MLXSW_PORT_MAX_PORTS))
+		goto drop;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+		rxl = &rxl_item->rxl;
+		if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+		     rxl->local_port == local_port) &&
+		    rxl->trap_id == rx_info->trap_id) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	if (!found)
+		goto drop;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->port_rx_packets[local_port]++;
+	pcpu_stats->port_rx_bytes[local_port] += skb->len;
+	pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+	pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	rxl->func(skb, local_port, rxl_item->priv);
+	return;
+
+drop:
+	if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+	else
+		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+	if (local_port >= MLXSW_PORT_MAX_PORTS)
+		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+	else
+		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+	dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+		   u32 in_mod, bool out_mbox_direct,
+		   char *in_mbox, size_t in_mbox_size,
+		   char *out_mbox, size_t out_mbox_size)
+{
+	u8 status;
+	int err;
+
+	BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+	if (!mlxsw_core->bus->cmd_exec)
+		return -EOPNOTSUPP;
+
+	dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+		opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+	if (in_mbox) {
+		dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+		mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+	}
+
+	err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+					opcode_mod, in_mod, out_mbox_direct,
+					in_mbox, in_mbox_size,
+					out_mbox, out_mbox_size, &status);
+
+	if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+		dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+			in_mod, status, mlxsw_cmd_status_str(status));
+	} else if (err == -ETIMEDOUT) {
+		dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+			in_mod);
+	}
+
+	if (!err && out_mbox) {
+		dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+		mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+	}
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+	mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+	if (!mlxsw_core_dbg_root)
+		return -ENOMEM;
+	return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+	debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 0000000..1658084
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind)	\
+	MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+				   const struct mlxsw_bus *mlxsw_bus,
+				   void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+	u8 local_port;
+	bool is_emad;
+};
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+				  const struct mlxsw_tx_info *tx_info);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+	void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+	u8 local_port;
+	u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+	void (*func)(const struct mlxsw_reg_info *reg,
+		     char *payload, void *priv);
+	enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+				    const struct mlxsw_rx_listener *rxl,
+				    void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_rx_listener *rxl,
+				       void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_event_listener *el,
+				       void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+					  const struct mlxsw_event_listener *el,
+					  void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+	u16 sys_port;
+	int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+			    struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+	u8	used_type:1,
+		used_properties:1;
+	u8	type;
+	u8	properties;
+};
+
+struct mlxsw_config_profile {
+	u16	used_max_vepa_channels:1,
+		used_max_lag:1,
+		used_max_port_per_lag:1,
+		used_max_mid:1,
+		used_max_pgt:1,
+		used_max_system_port:1,
+		used_max_vlan_groups:1,
+		used_max_regions:1,
+		used_flood_tables:1,
+		used_flood_mode:1,
+		used_max_ib_mc:1,
+		used_max_pkey:1,
+		used_ar_sec:1,
+		used_adaptive_routing_group_cap:1;
+	u8	max_vepa_channels;
+	u16	max_lag;
+	u16	max_port_per_lag;
+	u16	max_mid;
+	u16	max_pgt;
+	u16	max_system_port;
+	u16	max_vlan_groups;
+	u16	max_regions;
+	u8	max_flood_tables;
+	u8	max_vid_flood_tables;
+	u8	flood_mode;
+	u16	max_ib_mc;
+	u16	max_pkey;
+	u8	ar_sec;
+	u16	adaptive_routing_group_cap;
+	u8	arn;
+	struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+	struct list_head list;
+	const char *kind;
+	struct module *owner;
+	size_t priv_size;
+	int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_bus_info *mlxsw_bus_info);
+	void (*fini)(void *driver_priv);
+	void (*txhdr_construct)(struct sk_buff *skb,
+				const struct mlxsw_tx_info *tx_info);
+	u8 txhdr_len;
+	const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+	const char *kind;
+	int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_config_profile *profile);
+	void (*fini)(void *bus_priv);
+	bool (*skb_transmit_busy)(void *bus_priv,
+				  const struct mlxsw_tx_info *tx_info);
+	int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info);
+	int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+			u32 in_mod, bool out_mbox_direct,
+			char *in_mbox, size_t in_mbox_size,
+			char *out_mbox, size_t out_mbox_size,
+			u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+	const char *device_kind;
+	const char *device_name;
+	struct device *dev;
+	struct {
+		u16 major;
+		u16 minor;
+		u16 subminor;
+	} fw_rev;
+	u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+	u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 0000000..97b6bb5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518	/* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10	/* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+	MLXSW_EMAD_TLV_TYPE_END,
+	MLXSW_EMAD_TLV_TYPE_OP,
+	MLXSW_EMAD_TLV_TYPE_DR,
+	MLXSW_EMAD_TLV_TYPE_REG,
+	MLXSW_EMAD_TLV_TYPE_USERDATA,
+	MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4		/* Length in u32 */
+
+enum {
+	MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+	MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+	MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+	MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+	MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+	MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+	MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+	MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+	MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+	switch (status) {
+	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+		return "operation performed";
+	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+		return "device is busy";
+	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+		return "version not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+		return "unknown TLV";
+	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+		return "register not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+		return "class not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+		return "method not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+		return "bad parameter";
+	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+		return "resource not available";
+	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+		return "acknowledged. retransmit";
+	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+		return "internal error";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+enum {
+	MLXSW_EMAD_OP_TLV_REQUEST,
+	MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+	MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+	MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+	MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+	MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1	/* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 0000000..ffd55d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+	unsigned short	offset;		/* bytes in container */
+	unsigned short	step;		/* step in bytes for indexed items */
+	unsigned short	in_step_offset; /* offset within one step */
+	unsigned char	shift;		/* shift in bits */
+	unsigned char	element_size;	/* size of element in bit array */
+	bool		no_real_shift;
+	union {
+		unsigned char	bits;
+		unsigned short	bytes;
+	} size;
+	const char	*name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+		    size_t typesize)
+{
+	BUG_ON(index && !item->step);
+	if (item->offset % typesize != 0 ||
+	    item->step % typesize != 0 ||
+	    item->in_step_offset % typesize != 0) {
+		pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+		       item->name, item->offset, item->step,
+		       item->in_step_offset, typesize);
+		BUG();
+	}
+
+	return ((item->offset + item->step * index + item->in_step_offset) /
+		typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+	__be16 *b = (__be16 *) buf;
+	u16 tmp;
+
+	tmp = be16_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u16 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index,
+						  sizeof(u16));
+	__be16 *b = (__be16 *) buf;
+	u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+	u16 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be16_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+	__be32 *b = (__be32 *) buf;
+	u32 tmp;
+
+	tmp = be32_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u32 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index,
+						  sizeof(u32));
+	__be32 *b = (__be32 *) buf;
+	u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+	u32 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be32_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+	__be64 *b = (__be64 *) buf;
+	u64 tmp;
+
+	tmp = be64_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u64 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+	__be64 *b = (__be64 *) buf;
+	u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+	u64 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be64_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+					    struct mlxsw_item *item)
+{
+	memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+					  struct mlxsw_item *item)
+{
+	memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+	u16 max_index, be_index;
+	u16 offset;		/* byte offset inside the array */
+
+	BUG_ON(index && !item->element_size);
+	if (item->offset % sizeof(u32) != 0 ||
+	    BITS_PER_BYTE % item->element_size != 0) {
+		pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+		       item->name, item->offset, item->element_size);
+		BUG();
+	}
+
+	max_index = (item->size.bytes << 3) / item->element_size - 1;
+	be_index = max_index - index;
+	offset = be_index * item->element_size >> 3;
+	*shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+	return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+					    u16 index)
+{
+	u8 shift, tmp;
+	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+	tmp = buf[offset];
+	tmp >>= shift;
+	tmp &= GENMASK(item->element_size - 1, 0);
+	return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+					      u16 index, u8 val)
+{
+	u8 shift, tmp;
+	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+	u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+	val <<= shift;
+	val &= mask;
+	tmp = buf[offset];
+	tmp &= ~mask;
+	tmp |= val;
+	buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname)					\
+	mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{										\
+	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
+			     _step, _instepoffset, _norealshift)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u16								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u16 val)				\
+{										\
+	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{										\
+	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
+			     _step, _instepoffset, _norealshift)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u32								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u32 val)				\
+{										\
+	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{										\
+	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0,	val);	\
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift,		\
+			     _sizebits, _step, _instepoffset, _norealshift)	\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u64								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u64 val)				\
+{										\
+	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)		\
+{										\
+	__mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)		\
+{										\
+	__mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));	\
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,	\
+			     _element_size)					\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.element_size = _element_size,						\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u8								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index)			\
+{										\
+	return __mlxsw_item_bit_array_get(buf,					\
+					  &__ITEM_NAME(_type, _cname, _iname),	\
+					  index);				\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val)		\
+{										\
+	return __mlxsw_item_bit_array_set(buf,					\
+					  &__ITEM_NAME(_type, _cname, _iname),	\
+					  index, val);				\
+}										\
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 0000000..462cea3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1826 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+	{0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+	switch (id->device) {
+	case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+		return MLXSW_DEVICE_KIND_SWITCHX2;
+	default:
+		BUG();
+	}
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+	MLXSW_PCI_QUEUE_TYPE_SDQ,
+	MLXSW_PCI_QUEUE_TYPE_RDQ,
+	MLXSW_PCI_QUEUE_TYPE_CQ,
+	MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+	switch (q_type) {
+	case MLXSW_PCI_QUEUE_TYPE_SDQ:
+		return "sdq";
+	case MLXSW_PCI_QUEUE_TYPE_RDQ:
+		return "rdq";
+	case MLXSW_PCI_QUEUE_TYPE_CQ:
+		return "cq";
+	case MLXSW_PCI_QUEUE_TYPE_EQ:
+		return "eq";
+	}
+	BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT	4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+	0, /* unused */
+	0, /* unused */
+	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+	char *buf;
+	dma_addr_t mapaddr;
+	size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+	char *elem; /* pointer to actual dma mapped element mem chunk */
+	union {
+		struct {
+			struct sk_buff *skb;
+		} sdq;
+		struct {
+			struct sk_buff *skb;
+		} rdq;
+	} u;
+};
+
+struct mlxsw_pci_queue {
+	spinlock_t lock; /* for queue accesses */
+	struct mlxsw_pci_mem_item mem_item;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	u16 producer_counter;
+	u16 consumer_counter;
+	u16 count; /* number of elements in queue */
+	u8 num; /* queue number */
+	u8 elem_size; /* size of one element */
+	enum mlxsw_pci_queue_type type;
+	struct tasklet_struct tasklet; /* queue processing tasklet */
+	struct mlxsw_pci *pci;
+	union {
+		struct {
+			u32 comp_sdq_count;
+			u32 comp_rdq_count;
+		} cq;
+		struct {
+			u32 ev_cmd_count;
+			u32 ev_comp_count;
+			u32 ev_other_count;
+		} eq;
+	} u;
+};
+
+struct mlxsw_pci_queue_type_group {
+	struct mlxsw_pci_queue *q;
+	u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+	struct pci_dev *pdev;
+	u8 __iomem *hw_addr;
+	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+	u32 doorbell_offset;
+	struct msix_entry msix_entry;
+	struct mlxsw_core *core;
+	struct {
+		u16 num_pages;
+		struct mlxsw_pci_mem_item *items;
+	} fw_area;
+	struct {
+		struct mlxsw_pci_mem_item out_mbox;
+		struct mlxsw_pci_mem_item in_mbox;
+		struct mutex lock; /* Lock access to command registers */
+		bool nopoll;
+		wait_queue_head_t wait;
+		bool wait_done;
+		struct {
+			u8 status;
+			u64 out_param;
+		} comp;
+	} cmd;
+	struct mlxsw_bus_info bus_info;
+	struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+	tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+					size_t elem_size, int elem_index)
+{
+	return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+	return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+	int index = q->producer_counter & (q->count - 1);
+
+	if ((q->producer_counter - q->consumer_counter) == q->count)
+		return NULL;
+	return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+	int index = q->consumer_counter & (q->count - 1);
+
+	return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+	return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+					 u32 (*get_elem_owner_func)(char *))
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *elem;
+	bool owner_bit;
+
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	elem = elem_info->elem;
+	owner_bit = get_elem_owner_func(elem);
+	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+		return NULL;
+	q->consumer_counter++;
+	rmb(); /* make sure we read owned bit before the rest of elem */
+	return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+			       enum mlxsw_pci_queue_type q_type)
+{
+	return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+				  enum mlxsw_pci_queue_type q_type)
+{
+	struct mlxsw_pci_queue_type_group *queue_group;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+	return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+		      enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+	return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+						 u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci,
+				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+						 u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci,
+				     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+						u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+						u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+					   struct mlxsw_pci_queue *q,
+					   u16 val)
+{
+	mlxsw_pci_write32(mlxsw_pci,
+			  DOORBELL(mlxsw_pci->doorbell_offset,
+				   mlxsw_pci_doorbell_type_offset[q->type],
+				   q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+					       struct mlxsw_pci_queue *q,
+					       u16 val)
+{
+	mlxsw_pci_write32(mlxsw_pci,
+			  DOORBELL(mlxsw_pci->doorbell_offset,
+				   mlxsw_pci_doorbell_arm_type_offset[q->type],
+				   q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+						   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+						   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+				       q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+					   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+					     int page_index)
+{
+	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			      struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->producer_counter = 0;
+	q->consumer_counter = 0;
+
+	/* Set CQ of same number of this SDQ. */
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+	}
+
+	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+			       struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %5d\n",
+			   i, q->producer_counter, q->consumer_counter,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+				  int index, char *frag_data, size_t frag_len,
+				  int direction)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	dma_addr_t mapaddr;
+
+	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+		if (net_ratelimit())
+			dev_err(&pdev->dev, "failed to dma map tx frag\n");
+		return -EIO;
+	}
+	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+	return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+				     int index, int direction)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+	if (!frag_len)
+		return;
+	pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+				   struct mlxsw_pci_queue_elem_info *elem_info)
+{
+	size_t buf_len = MLXSW_PORT_MAX_MTU;
+	char *wqe = elem_info->elem;
+	struct sk_buff *skb;
+	int err;
+
+	elem_info->u.rdq.skb = NULL;
+	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+	if (!skb)
+		return -ENOMEM;
+
+	/* Assume that wqe was previously zeroed. */
+
+	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+				     buf_len, DMA_FROM_DEVICE);
+	if (err)
+		goto err_frag_map;
+
+	elem_info->u.rdq.skb = skb;
+	return 0;
+
+err_frag_map:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+				   struct mlxsw_pci_queue_elem_info *elem_info)
+{
+	struct sk_buff *skb;
+	char *wqe;
+
+	skb = elem_info->u.rdq.skb;
+	wqe = elem_info->elem;
+
+	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+	dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			      struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	int i;
+	int err;
+
+	q->producer_counter = 0;
+	q->consumer_counter = 0;
+
+	/* Set CQ of same number of this RDQ with base
+	 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+	 */
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+	}
+
+	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+	for (i = 0; i < q->count; i++) {
+		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+		BUG_ON(!elem_info);
+		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+		if (err)
+			goto rollback;
+		/* Everything is set up, ring doorbell to pass elem to HW */
+		q->producer_counter++;
+		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	}
+
+	return 0;
+
+rollback:
+	for (i--; i >= 0; i--) {
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+	}
+	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+	return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+			       struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	int i;
+
+	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+	for (i = 0; i < q->count; i++) {
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+	}
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %5d\n",
+			   i, q->producer_counter, q->consumer_counter,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			     struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->consumer_counter = 0;
+
+	for (i = 0; i < q->count; i++) {
+		char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+		mlxsw_pci_cqe_owner_set(elem, 1);
+	}
+
+	mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+	mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+	}
+	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+			      struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_cq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %10d %5d\n",
+			   i, q->consumer_counter, q->u.cq.comp_sdq_count,
+			   q->u.cq.comp_rdq_count, q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+				     struct mlxsw_pci_queue *q,
+				     u16 consumer_counter_limit,
+				     char *cqe)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	struct sk_buff *skb;
+	int i;
+
+	spin_lock(&q->lock);
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	skb = elem_info->u.sdq.skb;
+	wqe = elem_info->elem;
+	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+	dev_kfree_skb_any(skb);
+	elem_info->u.sdq.skb = NULL;
+
+	if (q->consumer_counter++ != consumer_counter_limit)
+		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+	spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+				     struct mlxsw_pci_queue *q,
+				     u16 consumer_counter_limit,
+				     char *cqe)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	struct sk_buff *skb;
+	struct mlxsw_rx_info rx_info;
+	u16 byte_count;
+	int err;
+
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	skb = elem_info->u.sdq.skb;
+	if (!skb)
+		return;
+	wqe = elem_info->elem;
+	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+	if (q->consumer_counter++ != consumer_counter_limit)
+		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+	/* We do not support lag now */
+	if (mlxsw_pci_cqe_lag_get(cqe))
+		goto drop;
+
+	rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+	if (mlxsw_pci_cqe_crc_get(cqe))
+		byte_count -= ETH_FCS_LEN;
+	skb_put(skb, byte_count);
+	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+	memset(wqe, 0, q->elem_size);
+	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+	if (err && net_ratelimit())
+		dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+	/* Everything is set up, ring doorbell to pass elem to HW */
+	q->producer_counter++;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	return;
+
+drop:
+	dev_kfree_skb_any(skb);
+	goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+	struct mlxsw_pci *mlxsw_pci = q->pci;
+	char *cqe;
+	int items = 0;
+	int credits = q->count >> 1;
+
+	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+		u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+		u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+		if (sendq) {
+			struct mlxsw_pci_queue *sdq;
+
+			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+						 wqe_counter, cqe);
+			q->u.cq.comp_sdq_count++;
+		} else {
+			struct mlxsw_pci_queue *rdq;
+
+			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+						 wqe_counter, cqe);
+			q->u.cq.comp_rdq_count++;
+		}
+		if (++items == credits)
+			break;
+	}
+	if (items) {
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	}
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			     struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->consumer_counter = 0;
+
+	for (i = 0; i < q->count; i++) {
+		char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+		mlxsw_pci_eqe_owner_set(elem, 1);
+	}
+
+	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+	mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+	}
+	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+			      struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_eq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+			   i, q->consumer_counter, q->u.eq.ev_cmd_count,
+			   q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+	mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+	mlxsw_pci->cmd.comp.out_param =
+		((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+		mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+	mlxsw_pci->cmd.wait_done = true;
+	wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+	struct mlxsw_pci *mlxsw_pci = q->pci;
+	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+	char *eqe;
+	u8 cqn;
+	bool cq_handle = false;
+	int items = 0;
+	int credits = q->count >> 1;
+
+	memset(&active_cqns, 0, sizeof(active_cqns));
+
+	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+		u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+		switch (event_type) {
+		case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+			mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+			q->u.eq.ev_cmd_count++;
+			break;
+		case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+			cqn = mlxsw_pci_eqe_cqn_get(eqe);
+			set_bit(cqn, active_cqns);
+			cq_handle = true;
+			q->u.eq.ev_comp_count++;
+			break;
+		default:
+			q->u.eq.ev_other_count++;
+		}
+		if (++items == credits)
+			break;
+	}
+	if (items) {
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	}
+
+	if (!cq_handle)
+		return;
+	for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+		mlxsw_pci_queue_tasklet_schedule(q);
+	}
+}
+
+struct mlxsw_pci_queue_ops {
+	const char *name;
+	enum mlxsw_pci_queue_type type;
+	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+		    struct mlxsw_pci_queue *q);
+	void (*fini)(struct mlxsw_pci *mlxsw_pci,
+		     struct mlxsw_pci_queue *q);
+	void (*tasklet)(unsigned long data);
+	int (*dbg_read)(struct seq_file *s, void *data);
+	u16 elem_count;
+	u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
+	.init		= mlxsw_pci_sdq_init,
+	.fini		= mlxsw_pci_sdq_fini,
+	.dbg_read	= mlxsw_pci_sdq_dbg_read,
+	.elem_count	= MLXSW_PCI_WQE_COUNT,
+	.elem_size	= MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
+	.init		= mlxsw_pci_rdq_init,
+	.fini		= mlxsw_pci_rdq_fini,
+	.dbg_read	= mlxsw_pci_rdq_dbg_read,
+	.elem_count	= MLXSW_PCI_WQE_COUNT,
+	.elem_size	= MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
+	.init		= mlxsw_pci_cq_init,
+	.fini		= mlxsw_pci_cq_fini,
+	.tasklet	= mlxsw_pci_cq_tasklet,
+	.dbg_read	= mlxsw_pci_cq_dbg_read,
+	.elem_count	= MLXSW_PCI_CQE_COUNT,
+	.elem_size	= MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
+	.init		= mlxsw_pci_eq_init,
+	.fini		= mlxsw_pci_eq_fini,
+	.tasklet	= mlxsw_pci_eq_tasklet,
+	.dbg_read	= mlxsw_pci_eq_dbg_read,
+	.elem_count	= MLXSW_PCI_EQE_COUNT,
+	.elem_size	= MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				const struct mlxsw_pci_queue_ops *q_ops,
+				struct mlxsw_pci_queue *q, u8 q_num)
+{
+	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+	int i;
+	int err;
+
+	spin_lock_init(&q->lock);
+	q->num = q_num;
+	q->count = q_ops->elem_count;
+	q->elem_size = q_ops->elem_size;
+	q->type = q_ops->type;
+	q->pci = mlxsw_pci;
+
+	if (q_ops->tasklet)
+		tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+	mem_item->size = MLXSW_PCI_AQ_SIZE;
+	mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+					     mem_item->size,
+					     &mem_item->mapaddr);
+	if (!mem_item->buf)
+		return -ENOMEM;
+	memset(mem_item->buf, 0, mem_item->size);
+
+	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+	if (!q->elem_info) {
+		err = -ENOMEM;
+		goto err_elem_info_alloc;
+	}
+
+	/* Initialize dma mapped elements info elem_info for
+	 * future easy access.
+	 */
+	for (i = 0; i < q->count; i++) {
+		struct mlxsw_pci_queue_elem_info *elem_info;
+
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		elem_info->elem =
+			__mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+	}
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = q_ops->init(mlxsw_pci, mbox, q);
+	if (err)
+		goto err_q_ops_init;
+	return 0;
+
+err_q_ops_init:
+	kfree(q->elem_info);
+err_elem_info_alloc:
+	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+			    mem_item->buf, mem_item->mapaddr);
+	return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+				 const struct mlxsw_pci_queue_ops *q_ops,
+				 struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+	q_ops->fini(mlxsw_pci, q);
+	kfree(q->elem_info);
+	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+			    mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				      const struct mlxsw_pci_queue_ops *q_ops,
+				      u8 num_qs)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_type_group *queue_group;
+	char tmp[16];
+	int i;
+	int err;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+	if (!queue_group->q)
+		return -ENOMEM;
+
+	for (i = 0; i < num_qs; i++) {
+		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+					   &queue_group->q[i], i);
+		if (err)
+			goto err_queue_init;
+	}
+	queue_group->count = num_qs;
+
+	sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+	debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+				    q_ops->dbg_read);
+
+	return 0;
+
+err_queue_init:
+	for (i--; i >= 0; i--)
+		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+	kfree(queue_group->q);
+	return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+				       const struct mlxsw_pci_queue_ops *q_ops)
+{
+	struct mlxsw_pci_queue_type_group *queue_group;
+	int i;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+	for (i = 0; i < queue_group->count; i++)
+		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+	kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	u8 num_sdqs;
+	u8 sdq_log2sz;
+	u8 num_rdqs;
+	u8 rdq_log2sz;
+	u8 num_cqs;
+	u8 cq_log2sz;
+	u8 num_eqs;
+	u8 eq_log2sz;
+	int err;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+	if (err)
+		return err;
+
+	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+	if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+	    (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+	    (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+	    (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+		dev_err(&pdev->dev, "Unsupported number of queues\n");
+		return -EINVAL;
+	}
+
+	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+	    (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+		return -EINVAL;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+					 num_eqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize event queues\n");
+		return err;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+					 num_cqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+		goto err_cqs_init;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+					 num_sdqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+		goto err_sdqs_init;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+					 num_rdqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+		goto err_rdqs_init;
+	}
+
+	/* We have to poll in command interface until queues are initialized */
+	mlxsw_pci->cmd.nopoll = true;
+	return 0;
+
+err_rdqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+	return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+	mlxsw_pci->cmd.nopoll = false;
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+				     char *mbox, int index,
+				     const struct mlxsw_swid_config *swid)
+{
+	u8 mask = 0;
+
+	if (swid->used_type) {
+		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+			mbox, index, swid->type);
+		mask |= 1;
+	}
+	if (swid->used_properties) {
+		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+			mbox, index, swid->properties);
+		mask |= 2;
+	}
+	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				    const struct mlxsw_config_profile *profile)
+{
+	int i;
+
+	mlxsw_cmd_mbox_zero(mbox);
+
+	if (profile->used_max_vepa_channels) {
+		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+			mbox, profile->max_vepa_channels);
+	}
+	if (profile->used_max_lag) {
+		mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_lag_set(
+			mbox, profile->max_lag);
+	}
+	if (profile->used_max_port_per_lag) {
+		mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+			mbox, profile->max_port_per_lag);
+	}
+	if (profile->used_max_mid) {
+		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_mid_set(
+			mbox, profile->max_mid);
+	}
+	if (profile->used_max_pgt) {
+		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_pgt_set(
+			mbox, profile->max_pgt);
+	}
+	if (profile->used_max_system_port) {
+		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_system_port_set(
+			mbox, profile->max_system_port);
+	}
+	if (profile->used_max_vlan_groups) {
+		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+			mbox, profile->max_vlan_groups);
+	}
+	if (profile->used_max_regions) {
+		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_regions_set(
+			mbox, profile->max_regions);
+	}
+	if (profile->used_flood_tables) {
+		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+			mbox, profile->max_flood_tables);
+		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+			mbox, profile->max_vid_flood_tables);
+	}
+	if (profile->used_flood_mode) {
+		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_flood_mode_set(
+			mbox, profile->flood_mode);
+	}
+	if (profile->used_max_ib_mc) {
+		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+			mbox, profile->max_ib_mc);
+	}
+	if (profile->used_max_pkey) {
+		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_pkey_set(
+			mbox, profile->max_pkey);
+	}
+	if (profile->used_ar_sec) {
+		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_ar_sec_set(
+			mbox, profile->ar_sec);
+	}
+	if (profile->used_adaptive_routing_group_cap) {
+		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+			mbox, profile->adaptive_routing_group_cap);
+	}
+
+	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+						     &profile->swid_config[i]);
+
+	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+	int err;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+	if (err)
+		return err;
+	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+	return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				  u16 num_pages)
+{
+	struct mlxsw_pci_mem_item *mem_item;
+	int i;
+	int err;
+
+	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+					   GFP_KERNEL);
+	if (!mlxsw_pci->fw_area.items)
+		return -ENOMEM;
+	mlxsw_pci->fw_area.num_pages = num_pages;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	for (i = 0; i < num_pages; i++) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		mem_item->size = MLXSW_PCI_PAGE_SIZE;
+		mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+						     mem_item->size,
+						     &mem_item->mapaddr);
+		if (!mem_item->buf) {
+			err = -ENOMEM;
+			goto err_alloc;
+		}
+		mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+	}
+
+	err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+	if (err)
+		goto err_cmd_map_fa;
+
+	return 0;
+
+err_cmd_map_fa:
+err_alloc:
+	for (i--; i >= 0; i--) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+				    mem_item->buf, mem_item->mapaddr);
+	}
+	kfree(mlxsw_pci->fw_area.items);
+	return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+	struct mlxsw_pci_mem_item *mem_item;
+	int i;
+
+	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+	for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+				    mem_item->buf, mem_item->mapaddr);
+	}
+	kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_id;
+	struct mlxsw_pci_queue *q;
+	int i;
+
+	for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+		q = mlxsw_pci_eq_get(mlxsw_pci, i);
+		mlxsw_pci_queue_tasklet_schedule(q);
+	}
+	return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
+				struct mlxsw_pci_mem_item *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	int err = 0;
+
+	mbox->size = MLXSW_CMD_MBOX_SIZE;
+	mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
+					 &mbox->mapaddr);
+	if (!mbox->buf) {
+		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
+		err = -ENOMEM;
+	}
+
+	return err;
+}
+
+static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
+				struct mlxsw_pci_mem_item *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+
+	pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+			    mbox->mapaddr);
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_config_profile *profile)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	char *mbox;
+	u16 num_pages;
+	int err;
+
+	mutex_init(&mlxsw_pci->cmd.lock);
+	init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+	mlxsw_pci->core = mlxsw_core;
+
+	mbox = mlxsw_cmd_mbox_alloc();
+	if (!mbox)
+		return -ENOMEM;
+
+	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+	if (err)
+		goto mbox_put;
+
+	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+	if (err)
+		goto err_out_mbox_alloc;
+
+	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+	if (err)
+		goto err_query_fw;
+
+	mlxsw_pci->bus_info.fw_rev.major =
+		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+	mlxsw_pci->bus_info.fw_rev.minor =
+		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+	mlxsw_pci->bus_info.fw_rev.subminor =
+		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+		err = -EINVAL;
+		goto err_iface_rev;
+	}
+	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+		err = -EINVAL;
+		goto err_doorbell_page_bar;
+	}
+
+	mlxsw_pci->doorbell_offset =
+		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+	if (err)
+		goto err_fw_area_init;
+
+	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+	if (err)
+		goto err_boardinfo;
+
+	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+	if (err)
+		goto err_config_profile;
+
+	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+	if (err)
+		goto err_aqs_init;
+
+	err = request_irq(mlxsw_pci->msix_entry.vector,
+			  mlxsw_pci_eq_irq_handler, 0,
+			  mlxsw_pci_driver_name, mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "IRQ request failed\n");
+		goto err_request_eq_irq;
+	}
+
+	goto mbox_put;
+
+err_request_eq_irq:
+	mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+	mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+err_out_mbox_alloc:
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+mbox_put:
+	mlxsw_cmd_mbox_free(mbox);
+	return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+	free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+	mlxsw_pci_aqs_fini(mlxsw_pci);
+	mlxsw_pci_fw_area_fini(mlxsw_pci);
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+		   const struct mlxsw_tx_info *tx_info)
+{
+	u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+					const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+	return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+				  const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct mlxsw_pci_queue *q;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	int i;
+	int err;
+
+	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+		err = skb_linearize(skb);
+		if (err)
+			return err;
+	}
+
+	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+	spin_lock_bh(&q->lock);
+	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+	if (!elem_info) {
+		/* queue is full */
+		err = -EAGAIN;
+		goto unlock;
+	}
+	elem_info->u.sdq.skb = skb;
+
+	wqe = elem_info->elem;
+	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+	mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+				     skb_headlen(skb), DMA_TO_DEVICE);
+	if (err)
+		goto unlock;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+					     skb_frag_address(frag),
+					     skb_frag_size(frag),
+					     DMA_TO_DEVICE);
+		if (err)
+			goto unmap_frags;
+	}
+
+	/* Set unused sq entries byte count to zero. */
+	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+	/* Everything is set up, ring producer doorbell to get HW going */
+	q->producer_counter++;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+	goto unlock;
+
+unmap_frags:
+	for (; i >= 0; i--)
+		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+	spin_unlock_bh(&q->lock);
+	return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+			      u32 in_mod, bool out_mbox_direct,
+			      char *in_mbox, size_t in_mbox_size,
+			      char *out_mbox, size_t out_mbox_size,
+			      u8 *p_status)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+	dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+	bool evreq = mlxsw_pci->cmd.nopoll;
+	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+	bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+	int err;
+
+	*p_status = MLXSW_CMD_STATUS_OK;
+
+	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+	if (err)
+		return err;
+
+	if (in_mbox)
+		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+	*p_wait_done = false;
+
+	wmb(); /* all needs to be written before we write control register */
+	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+			  MLXSW_PCI_CIR_CTRL_GO_BIT |
+			  (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+			  opcode);
+
+	if (!evreq) {
+		unsigned long end;
+
+		end = jiffies + timeout;
+		do {
+			u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+			if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+				*p_wait_done = true;
+				*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+				break;
+			}
+			cond_resched();
+		} while (time_before(jiffies, end));
+	} else {
+		wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+		*p_status = mlxsw_pci->cmd.comp.status;
+	}
+
+	err = 0;
+	if (*p_wait_done) {
+		if (*p_status)
+			err = -EIO;
+	} else {
+		err = -ETIMEDOUT;
+	}
+
+	if (!err && out_mbox && out_mbox_direct) {
+		/* Some commands don't use output param as address to mailbox
+		 * but they store output directly into registers. In that case,
+		 * copy registers into mbox buffer.
+		 */
+		__be32 tmp;
+
+		if (!evreq) {
+			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+							   CIR_OUT_PARAM_HI));
+			memcpy(out_mbox, &tmp, sizeof(tmp));
+			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+							   CIR_OUT_PARAM_LO));
+			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+		}
+	} else if (!err && out_mbox)
+		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+
+	mutex_unlock(&mlxsw_pci->cmd.lock);
+
+	return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+	.kind			= "pci",
+	.init			= mlxsw_pci_init,
+	.fini			= mlxsw_pci_fini,
+	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
+	.skb_transmit		= mlxsw_pci_skb_transmit,
+	.cmd_exec		= mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+	mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+	/* Current firware does not let us know when the reset is done.
+	 * So we just wait here for constant time and hope for the best.
+	 */
+	msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+	return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct mlxsw_pci *mlxsw_pci;
+	int err;
+
+	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+	if (!mlxsw_pci)
+		return -ENOMEM;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed\n");
+		goto err_pci_enable_device;
+	}
+
+	err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed\n");
+		goto err_pci_request_regions;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (!err) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+			goto err_pci_set_dma_mask;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+			goto err_pci_set_dma_mask;
+		}
+	}
+
+	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+		dev_err(&pdev->dev, "invalid PCI region size\n");
+		err = -EINVAL;
+		goto err_pci_resource_len_check;
+	}
+
+	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+				     pci_resource_len(pdev, 0));
+	if (!mlxsw_pci->hw_addr) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -EIO;
+		goto err_ioremap;
+	}
+	pci_set_master(pdev);
+
+	mlxsw_pci->pdev = pdev;
+	pci_set_drvdata(pdev, mlxsw_pci);
+
+	err = mlxsw_pci_sw_reset(mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "Software reset failed\n");
+		goto err_sw_reset;
+	}
+
+	err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+	if (err) {
+		dev_err(&pdev->dev, "MSI-X init failed\n");
+		goto err_msix_init;
+	}
+
+	mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+	mlxsw_pci->bus_info.dev = &pdev->dev;
+
+	mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+						mlxsw_pci_dbg_root);
+	if (!mlxsw_pci->dbg_dir) {
+		dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+		err = -ENOMEM;
+		goto err_dbg_create_dir;
+	}
+
+	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+					     &mlxsw_pci_bus, mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register bus device\n");
+		goto err_bus_device_register;
+	}
+
+	return 0;
+
+err_bus_device_register:
+	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+	pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+	iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+	pci_release_regions(pdev);
+err_pci_request_regions:
+	pci_disable_device(pdev);
+err_pci_enable_device:
+	kfree(mlxsw_pci);
+	return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+	mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+	pci_disable_msix(mlxsw_pci->pdev);
+	iounmap(mlxsw_pci->hw_addr);
+	pci_release_regions(mlxsw_pci->pdev);
+	pci_disable_device(mlxsw_pci->pdev);
+	kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+	.name		= mlxsw_pci_driver_name,
+	.id_table	= mlxsw_pci_id_table,
+	.probe		= mlxsw_pci_probe,
+	.remove		= mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+	int err;
+
+	mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+	if (!mlxsw_pci_dbg_root)
+		return -ENOMEM;
+	err = pci_register_driver(&mlxsw_pci_driver);
+	if (err)
+		goto err_register_driver;
+	return 0;
+
+err_register_driver:
+	debugfs_remove_recursive(mlxsw_pci_dbg_root);
+	return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+	pci_unregister_driver(&mlxsw_pci_driver);
+	debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 0000000..1ef9664
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,227 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2	0xc738
+#define MLXSW_PCI_BAR0_SIZE		(1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE		4096
+
+#define MLXSW_PCI_CIR_BASE			0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI		MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO		(MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER		(MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI		(MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO		(MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN			(MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL			(MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT		BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT		BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT	12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT		24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS		1000
+
+#define MLXSW_PCI_SW_RESET			0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET		0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET		0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET		0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET		0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET	0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET	0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num)	\
+	((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT	24
+#define MLXSW_PCI_SDQS_COUNT	24
+#define MLXSW_PCI_CQS_COUNT	(MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT	2
+#define MLXSW_PCI_EQ_ASYNC_NUM	0
+#define MLXSW_PCI_EQ_COMP_NUM	1
+
+#define MLXSW_PCI_AQ_PAGES	8
+#define MLXSW_PCI_AQ_SIZE	(MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE	32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE	16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE	16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT	0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES	3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET	0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP	0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD	0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 0000000..726f543
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU		10000
+
+#define MLXSW_PORT_DEFAULT_VID		1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT	255
+#define MLXSW_PORT_SWID_ALL_SWIDS	254
+#define MLXSW_PORT_SWID_TYPE_ETH	2
+
+#define MLXSW_PORT_MID			0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS	0x40
+#define MLXSW_PORT_MAX_PORTS		MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET	10
+#define MLXSW_PORT_PHY_BITS_OFFSET	4
+#define MLXSW_PORT_PHY_BITS_MASK	(MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT		0x0
+
+#define MLXSW_PORT_DONT_CARE		(MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+	MLXSW_PORT_ADMIN_STATUS_UP = 1,
+	MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+	MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+	MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+	MLXSW_PORT_OPER_STATUS_UP = 1,
+	MLXSW_PORT_OPER_STATUS_DOWN = 2,
+	MLXSW_PORT_OPER_STATUS_FAILURE = 4,	/* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 0000000..096e1c1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,1349 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+	u16 id;
+	u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+	.id = MLXSW_REG_SGCR_ID,
+	.len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+	MLXSW_REG_ZERO(sgcr, payload);
+	mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+	.id = MLXSW_REG_SPAD_ID,
+	.len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+	.id = MLXSW_REG_SMID_ID,
+	.len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+	MLXSW_REG_ZERO(smid, payload);
+	mlxsw_reg_smid_swid_set(payload, 0);
+	mlxsw_reg_smid_mid_set(payload, mid);
+	mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+	.id = MLXSW_REG_SSPR_ID,
+	.len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(sspr, payload);
+	mlxsw_reg_sspr_m_set(payload, 1);
+	mlxsw_reg_sspr_local_port_set(payload, local_port);
+	mlxsw_reg_sspr_sub_port_set(payload, 0);
+	mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+	.id = MLXSW_REG_SPMS_ID,
+	.len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+	MLXSW_REG_SPMS_STATE_NO_CHANGE,
+	MLXSW_REG_SPMS_STATE_DISCARDING,
+	MLXSW_REG_SPMS_STATE_LEARNING,
+	MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+				       enum mlxsw_reg_spms_state state)
+{
+	MLXSW_REG_ZERO(spms, payload);
+	mlxsw_reg_spms_local_port_set(payload, local_port);
+	mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+	.id = MLXSW_REG_SFGC_ID,
+	.len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+	MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+	MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+	MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+	MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+	MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+	MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+	MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+	MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+	MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+	MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+	MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+		    enum mlxsw_reg_sfgc_bridge_type bridge_type,
+		    enum mlxsw_flood_table_type table_type,
+		    unsigned int flood_table)
+{
+	MLXSW_REG_ZERO(sfgc, payload);
+	mlxsw_reg_sfgc_type_set(payload, type);
+	mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+	mlxsw_reg_sfgc_table_type_set(payload, table_type);
+	mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+	mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+	.id = MLXSW_REG_SFTR_ID,
+	.len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+				       unsigned int flood_table,
+				       unsigned int index,
+				       enum mlxsw_flood_table_type table_type,
+				       unsigned int range)
+{
+	MLXSW_REG_ZERO(sftr, payload);
+	mlxsw_reg_sftr_swid_set(payload, 0);
+	mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+	mlxsw_reg_sftr_index_set(payload, index);
+	mlxsw_reg_sftr_table_type_set(payload, table_type);
+	mlxsw_reg_sftr_range_set(payload, range);
+	mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+	.id = MLXSW_REG_SPMLR_ID,
+	.len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+	MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+	MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+	MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+					enum mlxsw_reg_spmlr_learn_mode mode)
+{
+	MLXSW_REG_ZERO(spmlr, payload);
+	mlxsw_reg_spmlr_local_port_set(payload, local_port);
+	mlxsw_reg_spmlr_sub_port_set(payload, 0);
+	mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+	.id = MLXSW_REG_PMLP_ID,
+	.len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(pmlp, payload);
+	mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+	.id = MLXSW_REG_PMTU_ID,
+	.len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+				       u16 new_mtu)
+{
+	MLXSW_REG_ZERO(pmtu, payload);
+	mlxsw_reg_pmtu_local_port_set(payload, local_port);
+	mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+	mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+	mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+	.id = MLXSW_REG_PTYS_ID,
+	.len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH	BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII			BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX		BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4		BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4		BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR		BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2		BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4		BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4		BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4		BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR		BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR		BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR		BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4		BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4	BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4		BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4		BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4		BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4		BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4	BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX		BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T		BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T		BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR		BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR		BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR		BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2		BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2		BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+				       u32 proto_admin)
+{
+	MLXSW_REG_ZERO(ptys, payload);
+	mlxsw_reg_ptys_local_port_set(payload, local_port);
+	mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+	mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+					 u32 *p_eth_proto_adm,
+					 u32 *p_eth_proto_oper)
+{
+	if (p_eth_proto_cap)
+		*p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+	if (p_eth_proto_adm)
+		*p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+	if (p_eth_proto_oper)
+		*p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+	.id = MLXSW_REG_PPAD_ID,
+	.len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+				       u8 local_port)
+{
+	MLXSW_REG_ZERO(ppad, payload);
+	mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+	mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+	.id = MLXSW_REG_PAOS_ID,
+	.len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+				       enum mlxsw_port_admin_status status)
+{
+	MLXSW_REG_ZERO(paos, payload);
+	mlxsw_reg_paos_swid_set(payload, 0);
+	mlxsw_reg_paos_local_port_set(payload, local_port);
+	mlxsw_reg_paos_admin_status_set(payload, status);
+	mlxsw_reg_paos_oper_status_set(payload, 0);
+	mlxsw_reg_paos_ase_set(payload, 1);
+	mlxsw_reg_paos_ee_set(payload, 1);
+	mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+	.id = MLXSW_REG_PPCNT_ID,
+	.len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+	     0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+	     0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+	     0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+	     0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+	     0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+	     0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+	     0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+	     0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+	     0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+	     0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+	     0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+	     0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+	     0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+	     0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+	     0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+	     0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+	     0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+	     0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+	     0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(ppcnt, payload);
+	mlxsw_reg_ppcnt_swid_set(payload, 0);
+	mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+	mlxsw_reg_ppcnt_pnat_set(payload, 0);
+	mlxsw_reg_ppcnt_grp_set(payload, 0);
+	mlxsw_reg_ppcnt_clr_set(payload, 0);
+	mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+	.id = MLXSW_REG_PSPA_ID,
+	.len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+	MLXSW_REG_ZERO(pspa, payload);
+	mlxsw_reg_pspa_swid_set(payload, swid);
+	mlxsw_reg_pspa_local_port_set(payload, local_port);
+	mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+	.id = MLXSW_REG_HTGT_ID,
+	.len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0	/* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD	0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX	0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+	MLXSW_REG_HTGT_POLICER_DISABLE,
+	MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD	0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX	0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+	u8 swid, rdq;
+
+	MLXSW_REG_ZERO(htgt, payload);
+	if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+		swid = MLXSW_PORT_SWID_ALL_SWIDS;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+	} else {
+		swid = 0;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+	}
+	mlxsw_reg_htgt_swid_set(payload, swid);
+	mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+	mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+	mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+	mlxsw_reg_htgt_pid_set(payload, 0);
+	mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+	mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+	mlxsw_reg_htgt_priority_set(payload, 0);
+	mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+	mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+	.id = MLXSW_REG_HPKT_ID,
+	.len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+	MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+	MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+	MLXSW_REG_HPKT_ACTION_FORWARD,
+	MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+	MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+	MLXSW_REG_HPKT_ACTION_DISCARD,
+	MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+	MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+	MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+	MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+	MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+				       u8 trap_group, u16 trap_id)
+{
+	MLXSW_REG_ZERO(hpkt, payload);
+	mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+	mlxsw_reg_hpkt_action_set(payload, action);
+	mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+	mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+	mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+	switch (reg_id) {
+	case MLXSW_REG_SGCR_ID:
+		return "SGCR";
+	case MLXSW_REG_SPAD_ID:
+		return "SPAD";
+	case MLXSW_REG_SMID_ID:
+		return "SMID";
+	case MLXSW_REG_SSPR_ID:
+		return "SSPR";
+	case MLXSW_REG_SPMS_ID:
+		return "SPMS";
+	case MLXSW_REG_SFGC_ID:
+		return "SFGC";
+	case MLXSW_REG_SFTR_ID:
+		return "SFTR";
+	case MLXSW_REG_SPMLR_ID:
+		return "SPMLR";
+	case MLXSW_REG_PMLP_ID:
+		return "PMLP";
+	case MLXSW_REG_PMTU_ID:
+		return "PMTU";
+	case MLXSW_REG_PTYS_ID:
+		return "PTYS";
+	case MLXSW_REG_PPAD_ID:
+		return "PPAD";
+	case MLXSW_REG_PAOS_ID:
+		return "PAOS";
+	case MLXSW_REG_PPCNT_ID:
+		return "PPCNT";
+	case MLXSW_REG_PSPA_ID:
+		return "PSPA";
+	case MLXSW_REG_HTGT_ID:
+		return "HTGT";
+	case MLXSW_REG_HPKT_ID:
+		return "HPKT";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 0000000..3e52ee9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1568 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+	struct mlxsw_sx_port **ports;
+	struct mlxsw_core *core;
+	const struct mlxsw_bus_info *bus_info;
+	u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
+	u32			tx_dropped;
+};
+
+struct mlxsw_sx_port {
+	struct net_device *dev;
+	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+	struct mlxsw_sx *mlxsw_sx;
+	u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+				     const struct mlxsw_tx_info *tx_info)
+{
+	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+	bool is_emad = tx_info->is_emad;
+
+	memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+	/* We currently set default values for the egress tclass (QoS). */
+	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+	mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+						  MLXSW_TXHDR_ETCLASS_5);
+	mlxsw_tx_hdr_swid_set(txhdr, 0);
+	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+	mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+	mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+					      MLXSW_TXHDR_RDQ_OTHER);
+	mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+	mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+	mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+	mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+					       MLXSW_TXHDR_NOT_EMAD);
+	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+					  bool is_up)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+			    MLXSW_PORT_ADMIN_STATUS_DOWN);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+					 bool *p_is_up)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+	u8 oper_status;
+	int err;
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+	if (err)
+		return err;
+	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+	return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pmtu_pl[MLXSW_REG_PMTU_LEN];
+	int max_mtu;
+	int err;
+
+	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+	if (err)
+		return err;
+	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+	if (mtu > max_mtu)
+		return -EINVAL;
+
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+				      bool *p_usable)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pmlp_pl[MLXSW_REG_PMLP_LEN];
+	int err;
+
+	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+	if (err)
+		return err;
+	*p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+	return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+	if (err)
+		return err;
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+	const struct mlxsw_tx_info tx_info = {
+		.local_port = mlxsw_sx_port->local_port,
+		.is_emad = false,
+	};
+	u64 len;
+	int err;
+
+	if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+		return NETDEV_TX_BUSY;
+
+	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+		struct sk_buff *skb_orig = skb;
+
+		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+		if (!skb) {
+			this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+			dev_kfree_skb_any(skb_orig);
+			return NETDEV_TX_OK;
+		}
+	}
+	mlxsw_sx_txhdr_construct(skb, &tx_info);
+	len = skb->len;
+	/* Due to a race we might fail here because of a full queue. In that
+	 * unlikely case we simply drop the packet.
+	 */
+	err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+
+	if (!err) {
+		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->tx_packets++;
+		pcpu_stats->tx_bytes += len;
+		u64_stats_update_end(&pcpu_stats->syncp);
+	} else {
+		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+		dev_kfree_skb_any(skb);
+	}
+	return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+	if (err)
+		return err;
+	dev->mtu = mtu;
+	return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+			  struct rtnl_link_stats64 *stats)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx_port_pcpu_stats *p;
+	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+	u32 tx_dropped = 0;
+	unsigned int start;
+	int i;
+
+	for_each_possible_cpu(i) {
+		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&p->syncp);
+			rx_packets	= p->rx_packets;
+			rx_bytes	= p->rx_bytes;
+			tx_packets	= p->tx_packets;
+			tx_bytes	= p->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+		stats->rx_packets	+= rx_packets;
+		stats->rx_bytes		+= rx_bytes;
+		stats->tx_packets	+= tx_packets;
+		stats->tx_bytes		+= tx_bytes;
+		/* tx_dropped is u32, updated without syncp protection. */
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->tx_dropped	= tx_dropped;
+	return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+	.ndo_open		= mlxsw_sx_port_open,
+	.ndo_stop		= mlxsw_sx_port_stop,
+	.ndo_start_xmit		= mlxsw_sx_port_xmit,
+	.ndo_change_mtu		= mlxsw_sx_port_change_mtu,
+	.ndo_get_stats64	= mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+	strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d",
+		 mlxsw_sx->bus_info->fw_rev.major,
+		 mlxsw_sx->bus_info->fw_rev.minor,
+		 mlxsw_sx->bus_info->fw_rev.subminor);
+	strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+		sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+	char str[ETH_GSTRING_LEN];
+	u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+	{
+		.str = "a_frames_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+	},
+	{
+		.str = "a_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+	},
+	{
+		.str = "a_frame_check_sequence_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+	},
+	{
+		.str = "a_alignment_errors",
+		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+	},
+	{
+		.str = "a_octets_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+	},
+	{
+		.str = "a_octets_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+	},
+	{
+		.str = "a_in_range_length_errors",
+		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+	},
+	{
+		.str = "a_out_of_range_length_field",
+		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+	},
+	{
+		.str = "a_frame_too_long_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+	},
+	{
+		.str = "a_symbol_error_during_carrier",
+		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+	},
+	{
+		.str = "a_mac_control_frames_transmitted",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+	},
+	{
+		.str = "a_mac_control_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+	},
+	{
+		.str = "a_unsupported_opcodes_received",
+		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_xmitted",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+	},
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+				      u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return MLXSW_SX_PORT_HW_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+struct mlxsw_sx_port_link_mode {
+	u32 mask;
+	u32 supported;
+	u32 advertised;
+	u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+		.supported	= SUPPORTED_100baseT_Full,
+		.advertised	= ADVERTISED_100baseT_Full,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+		.supported	= SUPPORTED_1000baseKX_Full,
+		.advertised	= ADVERTISED_1000baseKX_Full,
+		.speed		= 1000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+		.supported	= SUPPORTED_10000baseT_Full,
+		.advertised	= ADVERTISED_10000baseT_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+		.supported	= SUPPORTED_10000baseKX4_Full,
+		.advertised	= ADVERTISED_10000baseKX4_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+		.supported	= SUPPORTED_10000baseKR_Full,
+		.advertised	= ADVERTISED_10000baseKR_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+		.supported	= SUPPORTED_20000baseKR2_Full,
+		.advertised	= ADVERTISED_20000baseKR2_Full,
+		.speed		= 20000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+		.supported	= SUPPORTED_40000baseCR4_Full,
+		.advertised	= ADVERTISED_40000baseCR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+		.supported	= SUPPORTED_40000baseKR4_Full,
+		.advertised	= ADVERTISED_40000baseKR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+		.supported	= SUPPORTED_40000baseSR4_Full,
+		.advertised	= ADVERTISED_40000baseSR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+		.supported	= SUPPORTED_40000baseLR4_Full,
+		.advertised	= ADVERTISED_40000baseLR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+		.speed		= 25000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+		.speed		= 50000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+		.supported	= SUPPORTED_56000baseKR4_Full,
+		.advertised	= ADVERTISED_56000baseKR4_Full,
+		.speed		= 56000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+		.speed		= 100000,
+	},
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return SUPPORTED_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+		return SUPPORTED_Backplane;
+	return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+			modes |= mlxsw_sx_port_link_mode[i].supported;
+	}
+	return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+			modes |= mlxsw_sx_port_link_mode[i].advertised;
+	}
+	return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+					    struct ethtool_cmd *cmd)
+{
+	u32 speed = SPEED_UNKNOWN;
+	u8 duplex = DUPLEX_UNKNOWN;
+	int i;
+
+	if (!carrier_ok)
+		goto out;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+			speed = mlxsw_sx_port_link_mode[i].speed;
+			duplex = DUPLEX_FULL;
+			break;
+		}
+	}
+out:
+	ethtool_cmd_speed_set(cmd, speed);
+	cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return PORT_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+		return PORT_DA;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+		return PORT_NONE;
+
+	return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	u32 eth_proto_oper;
+	int err;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+			      &eth_proto_admin, &eth_proto_oper);
+
+	cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+			 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+	mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+					eth_proto_oper, cmd);
+
+	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+	cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+	cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (speed == mlxsw_sx_port_link_mode[i].speed)
+			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 speed;
+	u32 eth_proto_new;
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	bool is_up;
+	int err;
+
+	speed = ethtool_cmd_speed(cmd);
+
+	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+		mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+		mlxsw_sx_to_ptys_speed(speed);
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+	eth_proto_new = eth_proto_new & eth_proto_cap;
+	if (!eth_proto_new) {
+		netdev_err(dev, "Not supported proto admin requested");
+		return -EINVAL;
+	}
+	if (eth_proto_new == eth_proto_admin)
+		return 0;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to set proto admin");
+		return err;
+	}
+
+	err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+	if (err) {
+		netdev_err(dev, "Failed to get oper status");
+		return err;
+	}
+	if (!is_up)
+		return 0;
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+	.get_drvinfo		= mlxsw_sx_port_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= mlxsw_sx_port_get_strings,
+	.get_ethtool_stats	= mlxsw_sx_port_get_stats,
+	.get_sset_count		= mlxsw_sx_port_get_sset_count,
+	.get_settings		= mlxsw_sx_port_get_settings,
+	.set_settings		= mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+				  struct switchdev_attr *attr)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+	.switchdev_port_attr_get	= mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+	char spad_pl[MLXSW_REG_SPAD_LEN];
+	int err;
+
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+	return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	struct net_device *dev = mlxsw_sx_port->dev;
+	char ppad_pl[MLXSW_REG_PPAD_LEN];
+	int err;
+
+	mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+	/* The last byte value in base mac address is guaranteed
+	 * to be such it does not overflow when adding local_port
+	 * value.
+	 */
+	dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+	return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				       u16 vid, enum mlxsw_reg_spms_state state)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char *spms_pl;
+	int err;
+
+	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+	if (!spms_pl)
+		return -ENOMEM;
+	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+	kfree(spms_pl);
+	return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				   u32 speed)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				    enum mlxsw_reg_spmlr_learn_mode mode)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+	mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port;
+	struct net_device *dev;
+	bool usable;
+	int err;
+
+	dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+	if (!dev)
+		return -ENOMEM;
+	mlxsw_sx_port = netdev_priv(dev);
+	mlxsw_sx_port->dev = dev;
+	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+	mlxsw_sx_port->local_port = local_port;
+
+	mlxsw_sx_port->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+	if (!mlxsw_sx_port->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+	dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+	dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+	err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+			mlxsw_sx_port->local_port);
+		goto err_dev_addr_get;
+	}
+
+	netif_carrier_off(dev);
+
+	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+			 NETIF_F_VLAN_CHALLENGED;
+
+	/* Each packet needs to have a Tx header (metadata) on top all other
+	 * headers.
+	 */
+	dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+	err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_module_check;
+	}
+
+	if (!usable) {
+		dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+			mlxsw_sx_port->local_port);
+		goto port_not_usable;
+	}
+
+	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_system_port_mapping_set;
+	}
+
+	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_swid_set;
+	}
+
+	err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+				      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_speed_set;
+	}
+
+	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_mtu_set;
+	}
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+	if (err)
+		goto err_port_admin_status_set;
+
+	err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+					  MLXSW_PORT_DEFAULT_VID,
+					  MLXSW_REG_SPMS_STATE_FORWARDING);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_stp_state_set;
+	}
+
+	err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+						  MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_mac_learning_mode_set;
+	}
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+			mlxsw_sx_port->local_port);
+		goto err_register_netdev;
+	}
+
+	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+	return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+	free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+	free_netdev(dev);
+	return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+	if (!mlxsw_sx_port)
+		return;
+	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+	free_percpu(mlxsw_sx_port->pcpu_stats);
+	free_netdev(mlxsw_sx_port->dev);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+	int i;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+		mlxsw_sx_port_remove(mlxsw_sx, i);
+	kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+	size_t alloc_size;
+	int i;
+	int err;
+
+	alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+	mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_sx->ports)
+		return -ENOMEM;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+		err = mlxsw_sx_port_create(mlxsw_sx, i);
+		if (err)
+			goto err_port_create;
+	}
+	return 0;
+
+err_port_create:
+	for (i--; i >= 1; i--)
+		mlxsw_sx_port_remove(mlxsw_sx, i);
+	kfree(mlxsw_sx->ports);
+	return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+				     char *pude_pl, void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx_port *mlxsw_sx_port;
+	enum mlxsw_reg_pude_oper_status status;
+	u8 local_port;
+
+	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+	mlxsw_sx_port = mlxsw_sx->ports[local_port];
+	if (!mlxsw_sx_port) {
+		dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+			 local_port);
+		return;
+	}
+
+	status = mlxsw_reg_pude_oper_status_get(pude_pl);
+	if (MLXSW_PORT_OPER_STATUS_UP == status) {
+		netdev_info(mlxsw_sx_port->dev, "link up\n");
+		netif_carrier_on(mlxsw_sx_port->dev);
+	} else {
+		netdev_info(mlxsw_sx_port->dev, "link down\n");
+		netif_carrier_off(mlxsw_sx_port->dev);
+	}
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+	.func = mlxsw_sx_pude_event_func,
+	.trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+				   enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sx_pude_event;
+		break;
+	}
+	err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+	if (err)
+		goto err_event_trap_set;
+
+	return 0;
+
+err_event_trap_set:
+	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+	return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+				      enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sx_pude_event;
+		break;
+	}
+	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+				      void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+	if (unlikely(!mlxsw_sx_port)) {
+		if (net_ratelimit())
+			dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+				 local_port);
+		return;
+	}
+
+	skb->dev = mlxsw_sx_port->dev;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->rx_packets++;
+	pcpu_stats->rx_bytes += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_FDB_MC,
+	},
+	/* Traps for specific L2 packet types, not trapped as FDB MC */
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_STP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LACP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_EAPOL,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LLDP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MMRP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MVRP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_RPVST,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_DHCP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+	},
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+		err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+						      &mlxsw_sx_rx_listener[i],
+						      mlxsw_sx);
+		if (err)
+			goto err_rx_listener_register;
+
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+		if (err)
+			goto err_rx_trap_set;
+	}
+	return 0;
+
+err_rx_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+					  &mlxsw_sx_rx_listener[i],
+					  mlxsw_sx);
+err_rx_listener_register:
+	for (i--; i >= 0; i--) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+						  &mlxsw_sx_rx_listener[i],
+						  mlxsw_sx);
+	}
+	return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+						  &mlxsw_sx_rx_listener[i],
+						  mlxsw_sx);
+	}
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+	char sfgc_pl[MLXSW_REG_SFGC_LEN];
+	char sgcr_pl[MLXSW_REG_SGCR_LEN];
+	char *smid_pl;
+	char *sftr_pl;
+	int err;
+
+	/* Due to FW bug, we must configure SMID. */
+	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+	if (!smid_pl)
+		return -ENOMEM;
+	mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+	kfree(smid_pl);
+	if (err)
+		return err;
+
+	/* Configure a flooding table, which includes only CPU port. */
+	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+	if (!sftr_pl)
+		return -ENOMEM;
+	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+	kfree(sftr_pl);
+	if (err)
+		return err;
+
+	/* Flood different packet types using the flooding table. */
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_BROADCAST,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sgcr_pack(sgcr_pl, true);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+			 const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	int err;
+
+	mlxsw_sx->core = mlxsw_core;
+	mlxsw_sx->bus_info = mlxsw_bus_info;
+
+	err = mlxsw_sx_hw_id_get(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+		return err;
+	}
+
+	err = mlxsw_sx_ports_create(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+		return err;
+	}
+
+	err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+		goto err_event_register;
+	}
+
+	err = mlxsw_sx_traps_init(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+		goto err_rx_listener_register;
+	}
+
+	err = mlxsw_sx_flood_init(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+		goto err_flood_init;
+	}
+
+	return 0;
+
+err_flood_init:
+	mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+	mlxsw_sx_ports_remove(mlxsw_sx);
+	return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+
+	mlxsw_sx_traps_fini(mlxsw_sx);
+	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+	mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+	.used_max_vepa_channels		= 1,
+	.max_vepa_channels		= 0,
+	.used_max_lag			= 1,
+	.max_lag			= 64,
+	.used_max_port_per_lag		= 1,
+	.max_port_per_lag		= 16,
+	.used_max_mid			= 1,
+	.max_mid			= 7000,
+	.used_max_pgt			= 1,
+	.max_pgt			= 0,
+	.used_max_system_port		= 1,
+	.max_system_port		= 48000,
+	.used_max_vlan_groups		= 1,
+	.max_vlan_groups		= 127,
+	.used_max_regions		= 1,
+	.max_regions			= 400,
+	.used_flood_tables		= 1,
+	.max_flood_tables		= 2,
+	.max_vid_flood_tables		= 1,
+	.used_flood_mode		= 1,
+	.flood_mode			= 3,
+	.used_max_ib_mc			= 1,
+	.max_ib_mc			= 0,
+	.used_max_pkey			= 1,
+	.max_pkey			= 0,
+	.swid_config			= {
+		{
+			.used_type	= 1,
+			.type		= MLXSW_PORT_SWID_TYPE_ETH,
+		}
+	},
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+	.kind			= MLXSW_DEVICE_KIND_SWITCHX2,
+	.owner			= THIS_MODULE,
+	.priv_size		= sizeof(struct mlxsw_sx),
+	.init			= mlxsw_sx_init,
+	.fini			= mlxsw_sx_fini,
+	.txhdr_construct	= mlxsw_sx_txhdr_construct,
+	.txhdr_len		= MLXSW_TXHDR_LEN,
+	.profile		= &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+	return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 0000000..53a9550
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+	/* Ethernet EMAD and FDB miss */
+	MLXSW_TRAP_ID_FDB_MC = 0x01,
+	MLXSW_TRAP_ID_ETHEMAD = 0x05,
+	/* L2 traps for specific packet types */
+	MLXSW_TRAP_ID_STP = 0x10,
+	MLXSW_TRAP_ID_LACP = 0x11,
+	MLXSW_TRAP_ID_EAPOL = 0x12,
+	MLXSW_TRAP_ID_LLDP = 0x13,
+	MLXSW_TRAP_ID_MMRP = 0x14,
+	MLXSW_TRAP_ID_MVRP = 0x15,
+	MLXSW_TRAP_ID_RPVST = 0x16,
+	MLXSW_TRAP_ID_DHCP = 0x19,
+	MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+	MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+	MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+	MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+	MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+	MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+	/* Port Up/Down event generated by hardware */
+	MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 0000000..06fc46c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+	MLXSW_TXHDR_ETH_CTL,
+	MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+	MLXSW_TXHDR_ETCLASS_0,
+	MLXSW_TXHDR_ETCLASS_1,
+	MLXSW_TXHDR_ETCLASS_2,
+	MLXSW_TXHDR_ETCLASS_3,
+	MLXSW_TXHDR_ETCLASS_4,
+	MLXSW_TXHDR_ETCLASS_5,
+	MLXSW_TXHDR_ETCLASS_6,
+	MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+	MLXSW_TXHDR_RDQ_OTHER,
+	MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+	MLXSW_TXHDR_NOT_EMAD,
+	MLXSW_TXHDR_EMAD,
+};
+
+enum {
+	MLXSW_TXHDR_TYPE_DATA,
+	MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c281117..2d1b942 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8226,31 +8226,7 @@
 	pci_disable_device(pdev);
 }
 
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
-	return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
-	pci_unregister_driver(&s2io_driver);
-	DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
 
 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
 				struct tcphdr **tcp, struct RxD_t *rxdp,
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d89b6ed..6c5997d 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1085,8 +1085,6 @@
 static void tx_intr_handler(struct fifo_info *fifo_data);
 static void s2io_handle_errors(void * dev_id);
 
-static int s2io_starter(void);
-static void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_set_multicast(struct net_device *dev);
 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 055f376..06bcc73 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -24,9 +24,7 @@
 #include <linux/mii.h>
 #include <linux/timer.h>
 #include <linux/irq.h>
-
 #include <linux/vmalloc.h>
-
 #include <linux/io.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
@@ -39,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID  "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID  "5.3.63"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -926,6 +924,7 @@
 #define QLCNIC_FW_CAPABILITY_SET_DRV_VER	BIT_5
 #define QLCNIC_FW_CAPABILITY_2_BEACON		BIT_7
 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP	BIT_13
 
 #define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD	BIT_0
 #define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD	BIT_1
@@ -2291,8 +2290,9 @@
 
 #define PCI_DEVICE_ID_QLOGIC_QLE824X		0x8020
 #define PCI_DEVICE_ID_QLOGIC_QLE834X		0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830		0x8830
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X	0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830		0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30		0x8C30
 #define PCI_DEVICE_ID_QLOGIC_QLE844X		0x8040
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X	0x8440
 
@@ -2319,7 +2319,8 @@
 		  (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
 		  (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
 		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
-		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 
 	return status;
 }
@@ -2335,7 +2336,8 @@
 	bool status;
 
 	status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
-		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 
 	return status;
 }
@@ -2351,7 +2353,8 @@
 {
 	unsigned short device = adapter->pdev->device;
 
-	return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+	return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+		(device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 }
 
 static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 840bf36..5ab3adf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -5,14 +5,15 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
 #include <linux/if_vlan.h>
 #include <linux/ipv6.h>
 #include <linux/ethtool.h>
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
 static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
 static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
 static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@
 	{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
 	{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
 	{QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+	{QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
 };
 
 const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -3513,6 +3515,31 @@
 	qlcnic_free_mbx_args(&cmd);
 }
 
+#define QLCNIC_83XX_ADD_PORT0		BIT_0
+#define QLCNIC_83XX_ADD_PORT1		BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE	13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_cmd_args cmd;
+	int err;
+
+	err = qlcnic_alloc_mbx_args(&cmd, adapter,
+				    QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+	if (err)
+		return err;
+
+	cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+	cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+	cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+	err = qlcnic_issue_cmd(adapter, &cmd);
+	if (err)
+		dev_err(&adapter->pdev->dev,
+			"failed to issue extend iSCSI minidump capability\n");
+
+	return err;
+}
+
 int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
 {
 	u32 major, minor, sub;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 69f828e..331ae2c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -10,6 +10,7 @@
 
 #include <linux/types.h>
 #include <linux/etherdevice.h>
+
 #include "qlcnic_hw.h"
 
 #define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
 int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 753ea8b..bf89216 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1384,7 +1384,7 @@
 	size_t size;
 	u64 addr;
 
-	temp = kzalloc(fw->size, GFP_KERNEL);
+	temp = vzalloc(fw->size);
 	if (!temp) {
 		release_firmware(fw);
 		fw_info->fw = NULL;
@@ -1430,7 +1430,7 @@
 exit:
 	release_firmware(fw);
 	fw_info->fw = NULL;
-	kfree(temp);
+	vfree(temp);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 75ee9e4..509b596 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -5,13 +5,13 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
 #include <linux/slab.h>
 #include <net/ip.h>
 #include <linux/bitops.h>
 
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
 #define MASK(n) ((1ULL<<(n))-1)
 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index cbe2399..4bb33af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -109,6 +109,7 @@
 #define QLCNIC_CMD_GET_LED_CONFIG		0x6A
 #define QLCNIC_CMD_83XX_SET_DRV_VER		0x6F
 #define QLCNIC_CMD_ADD_RCV_RINGS		0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP	0x37
 
 #define QLCNIC_INTRPT_INTX			1
 #define QLCNIC_INTRPT_MSIX			3
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2f6cc42..8b08b20 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -7,11 +7,6 @@
 
 #include <linux/vmalloc.h>
 #include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
 #include <linux/swab.h>
 #include <linux/dma-mapping.h>
 #include <linux/if_vlan.h>
@@ -25,6 +20,10 @@
 #include <net/vxlan.h>
 #endif
 
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@
 static const struct pci_device_id qlcnic_pci_tbl[] = {
 	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
-	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+	ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
 	{0,}
@@ -1149,6 +1149,7 @@
 	case PCI_DEVICE_ID_QLOGIC_QLE844X:
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+	case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
 		*bar = QLCNIC_83XX_BAR0_LENGTH;
 		break;
 	default:
@@ -2403,7 +2404,6 @@
 			qlcnic_free_tx_rings(adapter);
 			return -ENOMEM;
 		}
-		memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
 		tx_ring->cmd_buf_arr = cmd_buf_arr;
 		spin_lock_init(&tx_ring->tx_clean_lock);
 	}
@@ -2492,6 +2492,7 @@
 		qlcnic_83xx_register_map(ahw);
 		break;
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+	case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
 		qlcnic_sriov_vf_register_map(ahw);
 		break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 332bb8a..cda9e60 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -5,13 +5,13 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <net/ip.h>
+
 #include "qlcnic.h"
 #include "qlcnic_hdr.h"
 #include "qlcnic_83xx_hw.h"
 #include "qlcnic_hw.h"
 
-#include <net/ip.h>
-
 #define QLC_83XX_MINIDUMP_FLASH		0x520000
 #define QLC_83XX_OCM_INDEX			3
 #define QLC_83XX_PCI_INDEX			0
@@ -1388,27 +1388,60 @@
 	fw_dump->clr = 1;
 	snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
 	netdev_info(adapter->netdev,
-		    "Dump data %d bytes captured, template header size %d bytes\n",
-		    fw_dump->size, fw_dump->tmpl_hdr_size);
+		    "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+		    fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+		    fw_dump->tmpl_hdr);
 	/* Send a udev event to notify availability of FW dump */
 	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
 
 	return 0;
 }
 
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+	/* For special adapters (with 0x8830 device ID), where iSCSI firmware
+	 * dump needs to be captured as part of regular firmware dump
+	 * collection process, firmware exports it's capability through
+	 * capability registers
+	 */
+	return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+		(adapter->ahw->extra_capability[0] &
+		 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
 {
 	u32 prev_version, current_version;
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
 	struct pci_dev *pdev = adapter->pdev;
+	bool extended = false;
 
 	prev_version = adapter->fw_version;
 	current_version = qlcnic_83xx_get_fw_version(adapter);
 
 	if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
 		vfree(fw_dump->tmpl_hdr);
+
+		if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+			extended = !qlcnic_83xx_extend_md_capab(adapter);
+
 		if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
 			dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+		/* Once we have minidump template with extended iSCSI dump
+		 * capability, update the minidump capture mask to 0x1f as
+		 * per FW requirement
+		 */
+		if (extended) {
+			struct qlcnic_83xx_dump_template_hdr *hdr;
+
+			hdr = fw_dump->tmpl_hdr;
+			hdr->drv_cap_mask = 0x1f;
+			fw_dump->cap_mask = 0x1f;
+			dev_info(&pdev->dev,
+				 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+		}
 	}
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 4677b2e..017d8c2c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -8,10 +8,11 @@
 #ifndef _QLCNIC_83XX_SRIOV_H_
 #define _QLCNIC_83XX_SRIOV_H_
 
-#include "qlcnic.h"
 #include <linux/types.h>
 #include <linux/pci.h>
 
+#include "qlcnic.h"
+
 extern const u32 qlcnic_83xx_reg_tbl[];
 extern const u32 qlcnic_83xx_ext_reg_tbl[];
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index e631246..546cd5f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -5,10 +5,11 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <linux/types.h>
+
 #include "qlcnic_sriov.h"
 #include "qlcnic.h"
 #include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
 
 #define QLC_BC_COMMAND	0
 #define QLC_BC_RESPONSE	1
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index a29538b..afd687e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -5,9 +5,10 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <linux/types.h>
+
 #include "qlcnic_sriov.h"
 #include "qlcnic.h"
-#include <linux/types.h>
 
 #define QLCNIC_SRIOV_VF_MAX_MAC 7
 #define QLC_VF_MIN_TX_RATE	100
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 05c28f2..ccbb045 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -7,10 +7,6 @@
 
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
 #include <linux/swab.h>
 #include <linux/dma-mapping.h>
 #include <net/ip.h>
@@ -24,6 +20,9 @@
 #include <linux/hwmon-sysfs.h>
 #endif
 
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
 	return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f790f61..24dcbe6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -637,6 +637,9 @@
 	/* _TBICSRBit */
 	TBILinkOK	= 0x02000000,
 
+	/* ResetCounterCommand */
+	CounterReset	= 0x1,
+
 	/* DumpCounterCommand */
 	CounterDump	= 0x8,
 
@@ -747,6 +750,13 @@
 	__le16	tx_underun;
 };
 
+struct rtl8169_tc_offsets {
+	bool	inited;
+	__le64	tx_errors;
+	__le32	tx_multi_collision;
+	__le16	tx_aborted;
+};
+
 enum rtl_flag {
 	RTL_FLAG_TASK_ENABLED,
 	RTL_FLAG_TASK_SLOW_PENDING,
@@ -824,6 +834,7 @@
 
 	struct mii_if_info mii;
 	struct rtl8169_counters counters;
+	struct rtl8169_tc_offsets tc_offset;
 	u32 saved_wolopts;
 	u32 opts1_mask;
 
@@ -2179,6 +2190,73 @@
 	}
 }
 
+static struct rtl8169_counters *rtl8169_map_counters(struct net_device *dev,
+						     dma_addr_t *paddr,
+						     u32 counter_cmd)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct device *d = &tp->pci_dev->dev;
+	struct rtl8169_counters *counters;
+	u32 cmd;
+
+	counters = dma_alloc_coherent(d, sizeof(*counters), paddr, GFP_KERNEL);
+	if (counters) {
+		RTL_W32(CounterAddrHigh, (u64)*paddr >> 32);
+		cmd = (u64)*paddr & DMA_BIT_MASK(32);
+		RTL_W32(CounterAddrLow, cmd);
+		RTL_W32(CounterAddrLow, cmd | counter_cmd);
+	}
+	return counters;
+}
+
+static void rtl8169_unmap_counters (struct net_device *dev,
+				    dma_addr_t paddr,
+				    struct rtl8169_counters *counters)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct device *d = &tp->pci_dev->dev;
+
+	RTL_W32(CounterAddrLow, 0);
+	RTL_W32(CounterAddrHigh, 0);
+
+	dma_free_coherent(d, sizeof(*counters), counters, paddr);
+}
+
+DECLARE_RTL_COND(rtl_reset_counters_cond)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	return RTL_R32(CounterAddrLow) & CounterReset;
+}
+
+static bool rtl8169_reset_counters(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	struct rtl8169_counters *counters;
+	dma_addr_t paddr;
+	bool ret = true;
+
+	/*
+	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
+	 * tally counters.
+	 */
+	if (tp->mac_version < RTL_GIGA_MAC_VER_19)
+		return true;
+
+	counters = rtl8169_map_counters(dev, &paddr, CounterReset);
+	if (!counters)
+		return false;
+
+	if (!rtl_udelay_loop_wait_low(tp, &rtl_reset_counters_cond, 10, 1000))
+		ret = false;
+
+	rtl8169_unmap_counters(dev, paddr, counters);
+
+	return ret;
+}
+
 DECLARE_RTL_COND(rtl_counters_cond)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
@@ -2186,38 +2264,71 @@
 	return RTL_R32(CounterAddrLow) & CounterDump;
 }
 
-static void rtl8169_update_counters(struct net_device *dev)
+static bool rtl8169_update_counters(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
-	struct device *d = &tp->pci_dev->dev;
 	struct rtl8169_counters *counters;
 	dma_addr_t paddr;
-	u32 cmd;
+	bool ret = true;
 
 	/*
 	 * Some chips are unable to dump tally counters when the receiver
 	 * is disabled.
 	 */
 	if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
-		return;
+		return true;
 
-	counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
+	counters = rtl8169_map_counters(dev, &paddr, CounterDump);
 	if (!counters)
-		return;
-
-	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
-	cmd = (u64)paddr & DMA_BIT_MASK(32);
-	RTL_W32(CounterAddrLow, cmd);
-	RTL_W32(CounterAddrLow, cmd | CounterDump);
+		return false;
 
 	if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
 		memcpy(&tp->counters, counters, sizeof(*counters));
+	else
+		ret = false;
 
-	RTL_W32(CounterAddrLow, 0);
-	RTL_W32(CounterAddrHigh, 0);
+	rtl8169_unmap_counters(dev, paddr, counters);
 
-	dma_free_coherent(d, sizeof(*counters), counters, paddr);
+	return ret;
+}
+
+static bool rtl8169_init_counter_offsets(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	bool ret = false;
+
+	/*
+	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
+	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
+	 * reset by a power cycle, while the counter values collected by the
+	 * driver are reset at every driver unload/load cycle.
+	 *
+	 * To make sure the HW values returned by @get_stats64 match the SW
+	 * values, we collect the initial values at first open(*) and use them
+	 * as offsets to normalize the values returned by @get_stats64.
+	 *
+	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
+	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
+	 * set at open time by rtl_hw_start.
+	 */
+
+	if (tp->tc_offset.inited)
+		return true;
+
+	/* If both, reset and update fail, propagate to caller. */
+	if (rtl8169_reset_counters(dev))
+		ret = true;
+
+	if (rtl8169_update_counters(dev))
+		ret = true;
+
+	tp->tc_offset.tx_errors = tp->counters.tx_errors;
+	tp->tc_offset.tx_multi_collision = tp->counters.tx_multi_collision;
+	tp->tc_offset.tx_aborted = tp->counters.tx_aborted;
+	tp->tc_offset.inited = true;
+
+	return ret;
 }
 
 static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -7367,6 +7478,9 @@
 			tp->rx_stats.packets++;
 			tp->rx_stats.bytes += pkt_size;
 			u64_stats_update_end(&tp->rx_stats.syncp);
+
+			if (skb->pkt_type == PACKET_MULTICAST)
+				dev->stats.multicast++;
 		}
 release_descriptor:
 		desc->opts2 = 0;
@@ -7631,6 +7745,9 @@
 
 	rtl_hw_start(dev);
 
+	if (!rtl8169_init_counter_offsets(dev))
+		netif_warn(tp, hw, dev, "counter reset/update failed\n");
+
 	netif_start_queue(dev);
 
 	rtl_unlock_work(tp);
@@ -7674,7 +7791,6 @@
 		stats->rx_bytes	= tp->rx_stats.bytes;
 	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
 
-
 	do {
 		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
 		stats->tx_packets = tp->tx_stats.packets;
@@ -7688,6 +7804,24 @@
 	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
 	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
 	stats->rx_missed_errors = dev->stats.rx_missed_errors;
+	stats->multicast	= dev->stats.multicast;
+
+	/*
+	 * Fetch additonal counter values missing in stats collected by driver
+	 * from tally counters.
+	 */
+	rtl8169_update_counters(dev);
+
+	/*
+	 * Subtract values fetched during initalization.
+	 * See rtl8169_init_counter_offsets for a description why we do that.
+	 */
+	stats->tx_errors = le64_to_cpu(tp->counters.tx_errors) -
+		le64_to_cpu(tp->tc_offset.tx_errors);
+	stats->collisions = le32_to_cpu(tp->counters.tx_multi_collision) -
+		le32_to_cpu(tp->tc_offset.tx_multi_collision);
+	stats->tx_aborted_errors = le16_to_cpu(tp->counters.tx_aborted) -
+		le16_to_cpu(tp->tc_offset.tx_aborted);
 
 	return stats;
 }
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 8aa50ac..a157aaa 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -658,6 +658,8 @@
 	__le32 dptr;	/* Descriptor pointer */
 };
 
+#define DPTR_ALIGN	4	/* Required descriptor pointer alignment */
+
 enum DIE_DT {
 	/* Frame data */
 	DT_FMID		= 0x40,
@@ -739,6 +741,7 @@
 #define RX_QUEUE_OFFSET	4
 #define NUM_RX_QUEUE	2
 #define NUM_TX_QUEUE	2
+#define NUM_TX_DESC	2	/* TX descriptors per packet */
 
 struct ravb_tstamp_skb {
 	struct list_head list;
@@ -777,9 +780,9 @@
 	dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
 	struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
 	struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+	void *tx_align[NUM_TX_QUEUE];
 	struct sk_buff **rx_skb[NUM_RX_QUEUE];
 	struct sk_buff **tx_skb[NUM_TX_QUEUE];
-	void **tx_buffers[NUM_TX_QUEUE];
 	u32 rx_over_errors;
 	u32 rx_fifo_errors;
 	struct net_device_stats stats[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 78849dd..450899e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -195,12 +195,8 @@
 	priv->tx_skb[q] = NULL;
 
 	/* Free aligned TX buffers */
-	if (priv->tx_buffers[q]) {
-		for (i = 0; i < priv->num_tx_ring[q]; i++)
-			kfree(priv->tx_buffers[q][i]);
-	}
-	kfree(priv->tx_buffers[q]);
-	priv->tx_buffers[q] = NULL;
+	kfree(priv->tx_align[q]);
+	priv->tx_align[q] = NULL;
 
 	if (priv->rx_ring[q]) {
 		ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@
 
 	if (priv->tx_ring[q]) {
 		ring_size = sizeof(struct ravb_tx_desc) *
-			    (priv->num_tx_ring[q] + 1);
+			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 		dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
 				  priv->tx_desc_dma[q]);
 		priv->tx_ring[q] = NULL;
@@ -223,11 +219,12 @@
 static void ravb_ring_format(struct net_device *ndev, int q)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
-	struct ravb_ex_rx_desc *rx_desc = NULL;
-	struct ravb_tx_desc *tx_desc = NULL;
-	struct ravb_desc *desc = NULL;
+	struct ravb_ex_rx_desc *rx_desc;
+	struct ravb_tx_desc *tx_desc;
+	struct ravb_desc *desc;
 	int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
-	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+			   NUM_TX_DESC;
 	dma_addr_t dma_addr;
 	int i;
 
@@ -260,11 +257,12 @@
 
 	memset(priv->tx_ring[q], 0, tx_ring_size);
 	/* Build TX ring buffer */
-	for (i = 0; i < priv->num_tx_ring[q]; i++) {
-		tx_desc = &priv->tx_ring[q][i];
+	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+	     i++, tx_desc++) {
+		tx_desc->die_dt = DT_EEMPTY;
+		tx_desc++;
 		tx_desc->die_dt = DT_EEMPTY;
 	}
-	tx_desc = &priv->tx_ring[q][i];
 	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 	tx_desc->die_dt = DT_LINKFIX; /* type */
 
@@ -285,7 +283,6 @@
 	struct ravb_private *priv = netdev_priv(ndev);
 	struct sk_buff *skb;
 	int ring_size;
-	void *buffer;
 	int i;
 
 	/* Allocate RX and TX skb rings */
@@ -305,19 +302,11 @@
 	}
 
 	/* Allocate rings for the aligned buffers */
-	priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
-				      sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
-	if (!priv->tx_buffers[q])
+	priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+				    DPTR_ALIGN - 1, GFP_KERNEL);
+	if (!priv->tx_align[q])
 		goto error;
 
-	for (i = 0; i < priv->num_tx_ring[q]; i++) {
-		buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
-		if (!buffer)
-			goto error;
-		/* Aligned TX buffer */
-		priv->tx_buffers[q][i] = buffer;
-	}
-
 	/* Allocate all RX descriptors. */
 	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 	priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -329,7 +318,8 @@
 	priv->dirty_rx[q] = 0;
 
 	/* Allocate all TX descriptors. */
-	ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+	ring_size = sizeof(struct ravb_tx_desc) *
+		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 	priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
 					      &priv->tx_desc_dma[q],
 					      GFP_KERNEL);
@@ -439,11 +429,12 @@
 	struct net_device_stats *stats = &priv->stats[q];
 	struct ravb_tx_desc *desc;
 	int free_num = 0;
-	int entry = 0;
+	int entry;
 	u32 size;
 
 	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-		entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+					     NUM_TX_DESC);
 		desc = &priv->tx_ring[q][entry];
 		if (desc->die_dt != DT_FEMPTY)
 			break;
@@ -451,14 +442,18 @@
 		dma_rmb();
 		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 		/* Free the original skb. */
-		if (priv->tx_skb[q][entry]) {
+		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
 			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
 					 size, DMA_TO_DEVICE);
-			dev_kfree_skb_any(priv->tx_skb[q][entry]);
-			priv->tx_skb[q][entry] = NULL;
+			/* Last packet descriptor? */
+			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+				entry /= NUM_TX_DESC;
+				dev_kfree_skb_any(priv->tx_skb[q][entry]);
+				priv->tx_skb[q][entry] = NULL;
+				stats->tx_packets++;
+			}
 			free_num++;
 		}
-		stats->tx_packets++;
 		stats->tx_bytes += size;
 		desc->die_dt = DT_EEMPTY;
 	}
@@ -512,8 +507,8 @@
 	struct sk_buff *skb;
 	dma_addr_t dma_addr;
 	struct timespec64 ts;
-	u16 pkt_len = 0;
 	u8  desc_status;
+	u16 pkt_len;
 	int limit;
 
 	boguscnt = min(boguscnt, *quota);
@@ -1277,44 +1272,60 @@
 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
-	struct ravb_tstamp_skb *ts_skb = NULL;
 	u16 q = skb_get_queue_mapping(skb);
+	struct ravb_tstamp_skb *ts_skb;
 	struct ravb_tx_desc *desc;
 	unsigned long flags;
 	u32 dma_addr;
 	void *buffer;
 	u32 entry;
+	u32 len;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+	    NUM_TX_DESC) {
 		netif_err(priv, tx_queued, ndev,
 			  "still transmitting with the full ring!\n");
 		netif_stop_subqueue(ndev, q);
 		spin_unlock_irqrestore(&priv->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
-	entry = priv->cur_tx[q] % priv->num_tx_ring[q];
-	priv->tx_skb[q][entry] = skb;
+	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+	priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
 	if (skb_put_padto(skb, ETH_ZLEN))
 		goto drop;
 
-	buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
-	memcpy(buffer, skb->data, skb->len);
-	desc = &priv->tx_ring[q][entry];
-	desc->ds_tagl = cpu_to_le16(skb->len);
-	dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+	buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+		 entry / NUM_TX_DESC * DPTR_ALIGN;
+	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+	memcpy(buffer, skb->data, len);
+	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
 	if (dma_mapping_error(&ndev->dev, dma_addr))
 		goto drop;
+
+	desc = &priv->tx_ring[q][entry];
+	desc->ds_tagl = cpu_to_le16(len);
+	desc->dptr = cpu_to_le32(dma_addr);
+
+	buffer = skb->data + len;
+	len = skb->len - len;
+	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&ndev->dev, dma_addr))
+		goto unmap;
+
+	desc++;
+	desc->ds_tagl = cpu_to_le16(len);
 	desc->dptr = cpu_to_le32(dma_addr);
 
 	/* TX timestamp required */
 	if (q == RAVB_NC) {
 		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
 		if (!ts_skb) {
-			dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+			desc--;
+			dma_unmap_single(&ndev->dev, dma_addr, len,
 					 DMA_TO_DEVICE);
-			goto drop;
+			goto unmap;
 		}
 		ts_skb->skb = skb;
 		ts_skb->tag = priv->ts_skb_tag++;
@@ -1330,13 +1341,15 @@
 
 	/* Descriptor type must be set after all the above writes */
 	dma_wmb();
-	desc->die_dt = DT_FSINGLE;
+	desc->die_dt = DT_FEND;
+	desc--;
+	desc->die_dt = DT_FSTART;
 
 	ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 
-	priv->cur_tx[q]++;
-	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
-	    !ravb_tx_free(ndev, q))
+	priv->cur_tx[q] += NUM_TX_DESC;
+	if (priv->cur_tx[q] - priv->dirty_tx[q] >
+	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
 		netif_stop_subqueue(ndev, q);
 
 exit:
@@ -1344,9 +1357,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 	return NETDEV_TX_OK;
 
+unmap:
+	dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
 	dev_kfree_skb_any(skb);
-	priv->tx_skb[q][entry] = NULL;
+	priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
 	goto exit;
 }
 
@@ -1643,7 +1659,7 @@
 	ndev->dma = -1;
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
-		error = -ENODEV;
+		error = irq;
 		goto out_release;
 	}
 	ndev->irq = irq;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7fb244f..257ea71 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3089,10 +3089,8 @@
 
 	ndev->dma = -1;
 	ret = platform_get_irq(pdev, 0);
-	if (ret < 0) {
-		ret = -ENODEV;
+	if (ret < 0)
 		goto out_release;
-	}
 	ndev->irq = ret;
 
 	SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2e7f9a2..34ac41a 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -202,6 +202,7 @@
 	ROCKER_CTRL_IPV4_MCAST,
 	ROCKER_CTRL_IPV6_MCAST,
 	ROCKER_CTRL_DFLT_BRIDGING,
+	ROCKER_CTRL_DFLT_OVS,
 	ROCKER_CTRL_MAX,
 };
 
@@ -323,7 +324,14 @@
 
 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
 {
-	return !!rocker_port->bridge_dev;
+	return rocker_port->bridge_dev &&
+	       netif_is_bridge_master(rocker_port->bridge_dev);
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+	return rocker_port->bridge_dev &&
+	       netif_is_ovs_master(rocker_port->bridge_dev);
 }
 
 #define ROCKER_OP_FLAG_REMOVE		BIT(0)
@@ -1818,6 +1826,30 @@
 }
 
 static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+				      struct rocker_desc_info *desc_info,
+				      void *priv)
+{
+	int mtu = *(int *)priv;
+	struct rocker_tlv *cmd_info;
+
+	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+			       ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+		return -EMSGSIZE;
+	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+	if (!cmd_info)
+		return -EMSGSIZE;
+	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+			       rocker_port->pport))
+		return -EMSGSIZE;
+	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+			       mtu))
+		return -EMSGSIZE;
+	rocker_tlv_nest_end(desc_info, cmd_info);
+	return 0;
+}
+
+static int
 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
 				  struct rocker_desc_info *desc_info,
 				  void *priv)
@@ -1874,6 +1906,14 @@
 			       macaddr, NULL, NULL);
 }
 
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+					    int mtu)
+{
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+			       rocker_cmd_set_port_settings_mtu_prep,
+			       &mtu, NULL, NULL);
+}
+
 static int rocker_port_set_learning(struct rocker_port *rocker_port,
 				    enum switchdev_trans trans)
 {
@@ -3243,6 +3283,12 @@
 		.bridge = true,
 		.copy_to_cpu = true,
 	},
+	[ROCKER_CTRL_DFLT_OVS] = {
+		/* pass all pkts up to CPU */
+		.eth_dst = zero_mac,
+		.eth_dst_mask = zero_mac,
+		.acl = true,
+	},
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
@@ -3755,11 +3801,14 @@
 		break;
 	case BR_STATE_LEARNING:
 	case BR_STATE_FORWARDING:
-		want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+		if (!rocker_port_is_ovsed(rocker_port))
+			want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
 		want[ROCKER_CTRL_IPV4_MCAST] = true;
 		want[ROCKER_CTRL_IPV6_MCAST] = true;
 		if (rocker_port_is_bridged(rocker_port))
 			want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+		else if (rocker_port_is_ovsed(rocker_port))
+			want[ROCKER_CTRL_DFLT_OVS] = true;
 		else
 			want[ROCKER_CTRL_LOCAL_ARP] = true;
 		break;
@@ -3983,7 +4032,8 @@
 
 	napi_enable(&rocker_port->napi_tx);
 	napi_enable(&rocker_port->napi_rx);
-	rocker_port_set_enable(rocker_port, true);
+	if (!dev->proto_down)
+		rocker_port_set_enable(rocker_port, true);
 	netif_start_queue(dev);
 	return 0;
 
@@ -4102,8 +4152,11 @@
 					  skb->data, skb_headlen(skb));
 	if (err)
 		goto nest_cancel;
-	if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
-		goto nest_cancel;
+	if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+		err = skb_linearize(skb);
+		if (err)
+			goto unmap_frags;
+	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -4152,6 +4205,34 @@
 	return 0;
 }
 
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	int running = netif_running(dev);
+	int err;
+
+#define ROCKER_PORT_MIN_MTU	68
+#define ROCKER_PORT_MAX_MTU	9000
+
+	if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+		return -EINVAL;
+
+	if (running)
+		rocker_port_stop(dev);
+
+	netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+	if (err)
+		return err;
+
+	if (running)
+		err = rocker_port_open(dev);
+
+	return err;
+}
+
 static int rocker_port_get_phys_port_name(struct net_device *dev,
 					  char *buf, size_t len)
 {
@@ -4167,11 +4248,33 @@
 	return err ? -EOPNOTSUPP : 0;
 }
 
+static int rocker_port_change_proto_down(struct net_device *dev,
+					 bool proto_down)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+
+	if (rocker_port->dev->flags & IFF_UP)
+		rocker_port_set_enable(rocker_port, !proto_down);
+	rocker_port->dev->proto_down = proto_down;
+	return 0;
+}
+
+static void rocker_port_neigh_destroy(struct neighbour *n)
+{
+	struct rocker_port *rocker_port = netdev_priv(n->dev);
+	int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
+	__be32 ip_addr = *(__be32 *)n->primary_key;
+
+	rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+			       flags, ip_addr, n->ha);
+}
+
 static const struct net_device_ops rocker_port_netdev_ops = {
 	.ndo_open			= rocker_port_open,
 	.ndo_stop			= rocker_port_stop,
 	.ndo_start_xmit			= rocker_port_xmit,
 	.ndo_set_mac_address		= rocker_port_set_mac_address,
+	.ndo_change_mtu			= rocker_port_change_mtu,
 	.ndo_bridge_getlink		= switchdev_port_bridge_getlink,
 	.ndo_bridge_setlink		= switchdev_port_bridge_setlink,
 	.ndo_bridge_dellink		= switchdev_port_bridge_dellink,
@@ -4179,6 +4282,8 @@
 	.ndo_fdb_del			= switchdev_port_fdb_del,
 	.ndo_fdb_dump			= switchdev_port_fdb_dump,
 	.ndo_get_phys_port_name		= rocker_port_get_phys_port_name,
+	.ndo_change_proto_down		= rocker_port_change_proto_down,
+	.ndo_neigh_destroy		= rocker_port_neigh_destroy,
 };
 
 /********************
@@ -4445,6 +4550,7 @@
 		if (found->key.pport != rocker_port->pport)
 			continue;
 		fdb->addr = found->key.addr;
+		fdb->ndm_state = NUD_REACHABLE;
 		fdb->vid = rocker_port_vlan_to_vid(rocker_port,
 						   found->key.vlan_id);
 		err = obj->cb(rocker_port->dev, obj);
@@ -4726,6 +4832,7 @@
 	const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
 	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
 	size_t rx_len;
+	u16 rx_flags = 0;
 
 	if (!skb)
 		return -ENOENT;
@@ -4733,6 +4840,8 @@
 	rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
 	if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
 		return -EINVAL;
+	if (attrs[ROCKER_TLV_RX_FLAGS])
+		rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
 
 	rocker_dma_rx_ring_skb_unmap(rocker, attrs);
 
@@ -4740,6 +4849,9 @@
 	skb_put(skb, rx_len);
 	skb->protocol = eth_type_trans(skb, rocker_port->dev);
 
+	if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+		skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
 	rocker_port->dev->stats.rx_packets++;
 	rocker_port->dev->stats.rx_bytes += skb->len;
 
@@ -4869,7 +4981,7 @@
 		       NAPI_POLL_WEIGHT);
 	rocker_carrier_init(rocker_port);
 
-	dev->features |= NETIF_F_NETNS_LOCAL;
+	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
 
 	err = register_netdev(dev);
 	if (err) {
@@ -4878,11 +4990,13 @@
 	}
 	rocker->ports[port_number] = rocker_port;
 
+	switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
 	rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
 
 	err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
 	if (err) {
-		dev_err(&pdev->dev, "install ig port table failed\n");
+		netdev_err(rocker_port->dev, "install ig port table failed\n");
 		goto err_port_ig_tbl;
 	}
 
@@ -4902,6 +5016,7 @@
 	rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
 			   ROCKER_OP_FLAG_REMOVE);
 err_port_ig_tbl:
+	rocker->ports[port_number] = NULL;
 	unregister_netdev(dev);
 err_register_netdev:
 	free_netdev(dev);
@@ -5074,7 +5189,8 @@
 		goto err_probe_ports;
 	}
 
-	dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+	dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+		 (int)sizeof(rocker->hw.id), &rocker->hw.id);
 
 	return 0;
 
@@ -5157,6 +5273,7 @@
 		rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
 
 	rocker_port->bridge_dev = bridge;
+	switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
 
 	return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
 				    untagged_vid, 0);
@@ -5177,6 +5294,8 @@
 		rocker_port_internal_vlan_id_get(rocker_port,
 						 rocker_port->dev->ifindex);
 
+	switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+				    false);
 	rocker_port->bridge_dev = NULL;
 
 	err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
@@ -5191,46 +5310,77 @@
 	return err;
 }
 
-static int rocker_port_master_changed(struct net_device *dev)
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+				   struct net_device *master)
 {
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	struct net_device *master = netdev_master_upper_dev_get(dev);
+	int err;
+
+	rocker_port->bridge_dev = master;
+
+	err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+	if (err)
+		return err;
+	err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+
+	return err;
+}
+
+static int rocker_port_master_linked(struct rocker_port *rocker_port,
+				     struct net_device *master)
+{
 	int err = 0;
 
-	/* There are currently three cases handled here:
-	 * 1. Joining a bridge
-	 * 2. Leaving a previously joined bridge
-	 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
-	 *    in which case nothing is done
-	 */
-	if (master && master->rtnl_link_ops &&
-	    !strcmp(master->rtnl_link_ops->kind, "bridge"))
+	if (netif_is_bridge_master(master))
 		err = rocker_port_bridge_join(rocker_port, master);
-	else if (rocker_port_is_bridged(rocker_port))
-		err = rocker_port_bridge_leave(rocker_port);
+	else if (netif_is_ovs_master(master))
+		err = rocker_port_ovs_changed(rocker_port, master);
+	return err;
+}
 
+static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
+{
+	int err = 0;
+
+	if (rocker_port_is_bridged(rocker_port))
+		err = rocker_port_bridge_leave(rocker_port);
+	else if (rocker_port_is_ovsed(rocker_port))
+		err = rocker_port_ovs_changed(rocker_port, NULL);
 	return err;
 }
 
 static int rocker_netdevice_event(struct notifier_block *unused,
 				  unsigned long event, void *ptr)
 {
-	struct net_device *dev;
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct netdev_notifier_changeupper_info *info;
+	struct rocker_port *rocker_port;
 	int err;
 
+	if (!rocker_port_dev_check(dev))
+		return NOTIFY_DONE;
+
 	switch (event) {
 	case NETDEV_CHANGEUPPER:
-		dev = netdev_notifier_info_to_dev(ptr);
-		if (!rocker_port_dev_check(dev))
-			return NOTIFY_DONE;
-		err = rocker_port_master_changed(dev);
-		if (err)
-			netdev_warn(dev,
-				    "failed to reflect master change (err %d)\n",
-				    err);
+		info = ptr;
+		if (!info->master)
+			goto out;
+		rocker_port = netdev_priv(dev);
+		if (info->linking) {
+			err = rocker_port_master_linked(rocker_port,
+							info->upper_dev);
+			if (err)
+				netdev_warn(dev, "failed to reflect master linked (err %d)\n",
+					    err);
+		} else {
+			err = rocker_port_master_unlinked(rocker_port);
+			if (err)
+				netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
+					    err);
+		}
 		break;
 	}
-
+out:
 	return NOTIFY_DONE;
 }
 
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index c61fbf9..12490b2 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -159,6 +159,7 @@
 	ROCKER_TLV_CMD_PORT_SETTINGS_MODE,		/* u8 */
 	ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,		/* u8 */
 	ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,		/* binary */
+	ROCKER_TLV_CMD_PORT_SETTINGS_MTU,		/* u16 */
 
 	__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
 	ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@
 #define ROCKER_RX_FLAGS_TCP			BIT(5)
 #define ROCKER_RX_FLAGS_UDP			BIT(6)
 #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD	BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD		BIT(8)
 
 enum {
 	ROCKER_TLV_TX_UNSPEC,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index b1a4ea2..ff649eb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -49,6 +49,12 @@
  */
 #define HUNT_FILTER_TBL_ROWS 8192
 
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+	u8 addr[ETH_ALEN];
+	u16 id;
+};
+
 struct efx_ef10_filter_table {
 /* The RX match field masks supported by this fw & hw, in order of priority */
 	enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@
 /* Shadow of net_device address lists, guarded by mac_lock */
 #define EFX_EF10_FILTER_DEV_UC_MAX	32
 #define EFX_EF10_FILTER_DEV_MC_MAX	256
-	struct {
-		u8 addr[ETH_ALEN];
-		u16 id;
-	} dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
-	  dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
-	int dev_uc_count;		/* negative for PROMISC */
-	int dev_mc_count;		/* negative for PROMISC/ALLMULTI */
+	struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+	struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+	int dev_uc_count;
+	int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+	u16 ucdef_id;
+	u16 bcast_id;
+	u16 mcdef_id;
 };
 
 /* An arbitrary search limit for the software hash table */
@@ -288,11 +295,11 @@
 	/* We can have one VI for each 8K region.  However, until we
 	 * use TX option descriptors we need two TX queues per channel.
 	 */
-	efx->max_channels =
-		min_t(unsigned int,
-		      EFX_MAX_CHANNELS,
-		      efx_ef10_mem_map_size(efx) /
-		      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+	efx->max_channels = min_t(unsigned int,
+				  EFX_MAX_CHANNELS,
+				  efx_ef10_mem_map_size(efx) /
+				  (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+	efx->max_tx_channels = efx->max_channels;
 	if (WARN_ON(efx->max_channels == 0))
 		return -EIO;
 
@@ -387,7 +394,7 @@
 	 * First try to enable it, then if we get EPERM, just
 	 * ask if it's already enabled
 	 */
-	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
 	if (rc == 0) {
 		nic_data->workaround_35388 = true;
 	} else if (rc == -EPERM) {
@@ -817,11 +824,13 @@
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	unsigned int uc_mem_map_size, wc_mem_map_size;
-	unsigned int min_vis, pio_write_vi_base, max_vis;
+	unsigned int min_vis = max(EFX_TXQ_TYPES,
+				   efx_separate_tx_channels ? 2 : 1);
+	unsigned int channel_vis, pio_write_vi_base, max_vis;
 	void __iomem *membase;
 	int rc;
 
-	min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+	channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
 
 #ifdef EFX_USE_PIO
 	/* Try to allocate PIO buffers if wanted and if the full
@@ -855,11 +864,11 @@
 	 * page size is >4K).  So we may allocate some extra VIs just
 	 * for writing PIO buffers through.
 	 *
-	 * The UC mapping contains (min_vis - 1) complete VIs and the
+	 * The UC mapping contains (channel_vis - 1) complete VIs and the
 	 * first half of the next VI.  Then the WC mapping begins with
 	 * the second half of this last VI.
 	 */
-	uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
 				     ER_DZ_TX_PIOBUF);
 	if (nic_data->n_piobufs) {
 		/* pio_write_vi_base rounds down to give the number of complete
@@ -874,7 +883,7 @@
 	} else {
 		pio_write_vi_base = 0;
 		wc_mem_map_size = 0;
-		max_vis = min_vis;
+		max_vis = channel_vis;
 	}
 
 	/* In case the last attached driver failed to free VIs, do it now */
@@ -886,6 +895,23 @@
 	if (rc != 0)
 		return rc;
 
+	if (nic_data->n_allocated_vis < channel_vis) {
+		netif_info(efx, drv, efx->net_dev,
+			   "Could not allocate enough VIs to satisfy RSS"
+			   " requirements. Performance may not be optimal.\n");
+		/* We didn't get the VIs to populate our channels.
+		 * We could keep what we got but then we'd have more
+		 * interrupts than we need.
+		 * Instead calculate new max_channels and restart
+		 */
+		efx->max_channels = nic_data->n_allocated_vis;
+		efx->max_tx_channels =
+			nic_data->n_allocated_vis / EFX_TXQ_TYPES;
+
+		efx_ef10_free_vis(efx);
+		return -EAGAIN;
+	}
+
 	/* If we didn't get enough VIs to map all the PIO buffers, free the
 	 * PIO buffers
 	 */
@@ -984,12 +1010,24 @@
 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+	unsigned int i;
+#endif
 
 	/* All our allocations have been reset */
 	nic_data->must_realloc_vis = true;
 	nic_data->must_restore_filters = true;
 	nic_data->must_restore_piobufs = true;
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+	/* Driver-created vswitches and vports must be re-created */
+	nic_data->must_probe_vswitching = true;
+	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+	if (nic_data->vf)
+		for (i = 0; i < efx->vf_count; i++)
+			nic_data->vf[i].vport_id = 0;
+#endif
 }
 
 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -1034,6 +1072,12 @@
 {
 	int rc = efx_mcdi_reset(efx, reset_type);
 
+	/* Unprivileged functions return -EPERM, but need to return success
+	 * here so that the datapath is brought back up.
+	 */
+	if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+		rc = 0;
+
 	/* If it was a port reset, trigger reallocation of MC resources.
 	 * Note that on an MC reset nothing needs to be done now because we'll
 	 * detect the MC reset later and handle it then.
@@ -1583,10 +1627,6 @@
 	/* All our allocations have been reset */
 	efx_ef10_reset_mc_allocations(efx);
 
-	/* Driver-created vswitches and vports must be re-created */
-	nic_data->must_probe_vswitching = true;
-	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
 	/* The datapath firmware might have been changed */
 	nic_data->must_check_datapath_caps = true;
 
@@ -2222,6 +2262,29 @@
 				    GFP_KERNEL);
 }
 
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
+	struct efx_nic *efx = channel->efx;
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+
+	if (rc && rc != -EALREADY)
+		goto fail;
+
+	return;
+
+fail:
+	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+			       outbuf, outlen, rc);
+}
+
 static int efx_ef10_ev_init(struct efx_channel *channel)
 {
 	MCDI_DECLARE_BUF(inbuf,
@@ -2233,6 +2296,7 @@
 	struct efx_ef10_nic_data *nic_data;
 	bool supports_rx_merge;
 	size_t inlen, outlen;
+	unsigned int enabled, implemented;
 	dma_addr_t dma_addr;
 	int rc;
 	int i;
@@ -2273,30 +2337,52 @@
 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
 			  outbuf, sizeof(outbuf), &outlen);
 	/* IRQ return is ignored */
-	return rc;
-}
+	if (channel->channel || rc)
+		return rc;
 
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
-	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
-	MCDI_DECLARE_BUF_ERR(outbuf);
-	struct efx_nic *efx = channel->efx;
-	size_t outlen;
-	int rc;
-
-	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
-	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
-			  outbuf, sizeof(outbuf), &outlen);
-
-	if (rc && rc != -EALREADY)
+	/* Successfully created event queue on channel 0 */
+	rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+	if (rc == -ENOSYS) {
+		/* GET_WORKAROUNDS was implemented before the bug26807
+		 * workaround, thus the latter must be unavailable in this fw
+		 */
+		nic_data->workaround_26807 = false;
+		rc = 0;
+	} else if (rc) {
 		goto fail;
+	} else {
+		nic_data->workaround_26807 =
+			!!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
 
-	return;
+		if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+		    !nic_data->workaround_26807) {
+			unsigned int flags;
+
+			rc = efx_mcdi_set_workaround(efx,
+						     MC_CMD_WORKAROUND_BUG26807,
+						     true, &flags);
+
+			if (!rc) {
+				if (flags &
+				    1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+					netif_info(efx, drv, efx->net_dev,
+						   "other functions on NIC have been reset\n");
+					/* MC's boot count has incremented */
+					++nic_data->warm_boot_count;
+				}
+				nic_data->workaround_26807 = true;
+			} else if (rc == -EPERM) {
+				rc = 0;
+			}
+		}
+	}
+
+	if (!rc)
+		return 0;
 
 fail:
-	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
-			       outbuf, outlen, rc);
+	efx_ef10_ev_fini(channel);
+	return rc;
 }
 
 static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3250,6 +3336,19 @@
 					       filter_id, false);
 }
 
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+	return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+					 enum efx_filter_priority priority,
+					 u32 filter_id)
+{
+	return efx_ef10_filter_remove_internal(efx, 1U << priority,
+					       filter_id, true);
+}
+
 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
 				    enum efx_filter_priority priority,
 				    u32 filter_id, struct efx_filter_spec *spec)
@@ -3623,6 +3722,10 @@
 		goto fail;
 	}
 
+	table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+	table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+	table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
 	efx->filter_state = table;
 	init_waitqueue_head(&table->waitq);
 	return 0;
@@ -3725,145 +3828,233 @@
 	kfree(table);
 }
 
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+		if (id != EFX_EF10_FILTER_ID_INVALID) { \
+			filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+			WARN_ON(!table->entry[filter_idx].spec); \
+			table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+		}
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
-	struct net_device *net_dev = efx->net_dev;
-	struct efx_filter_spec spec;
-	bool remove_failed = false;
-	struct netdev_hw_addr *uc;
-	struct netdev_hw_addr *mc;
-	unsigned int filter_idx;
-	int i, n, rc;
-
-	if (!efx_dev_registered(efx))
-		return;
+	unsigned int filter_idx, i;
 
 	if (!table)
 		return;
 
 	/* Mark old filters that may need to be removed */
 	spin_lock_bh(&efx->filter_lock);
-	n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
-	for (i = 0; i < n; i++) {
-		filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
-		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-	}
-	n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
-	for (i = 0; i < n; i++) {
-		filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
-		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-	}
+	for (i = 0; i < table->dev_uc_count; i++)
+		EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+	for (i = 0; i < table->dev_mc_count; i++)
+		EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+	EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+	EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+	EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
 	spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
 
-	/* Copy/convert the address lists; add the primary station
-	 * address and broadcast address
-	 */
-	netif_addr_lock_bh(net_dev);
-	if (net_dev->flags & IFF_PROMISC ||
-	    netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
-		table->dev_uc_count = -1;
-	} else {
-		table->dev_uc_count = 1 + netdev_uc_count(net_dev);
-		ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
-		i = 1;
-		netdev_for_each_uc_addr(uc, net_dev) {
-			ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
-			i++;
-		}
-	}
-	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
-	    netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
-		table->dev_mc_count = -1;
-	} else {
-		table->dev_mc_count = 1 + netdev_mc_count(net_dev);
-		eth_broadcast_addr(table->dev_mc_list[0].addr);
-		i = 1;
-		netdev_for_each_mc_addr(mc, net_dev) {
-			ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
-			i++;
-		}
-	}
-	netif_addr_unlock_bh(net_dev);
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct net_device *net_dev = efx->net_dev;
+	struct netdev_hw_addr *uc;
+	int addr_count;
+	unsigned int i;
 
-	/* Insert/renew unicast filters */
-	if (table->dev_uc_count >= 0) {
-		for (i = 0; i < table->dev_uc_count; i++) {
-			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-					   EFX_FILTER_FLAG_RX_RSS,
-					   0);
-			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-						 table->dev_uc_list[i].addr);
-			rc = efx_ef10_filter_insert(efx, &spec, true);
-			if (rc < 0) {
-				/* Fall back to unicast-promisc */
-				while (i--)
-					efx_ef10_filter_remove_safe(
-						efx, EFX_FILTER_PRI_AUTO,
-						table->dev_uc_list[i].id);
-				table->dev_uc_count = -1;
-				break;
-			}
-			table->dev_uc_list[i].id = rc;
+	table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+	addr_count = netdev_uc_count(net_dev);
+	if (net_dev->flags & IFF_PROMISC)
+		*promisc = true;
+	table->dev_uc_count = 1 + addr_count;
+	ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+	i = 1;
+	netdev_for_each_uc_addr(uc, net_dev) {
+		if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+			*promisc = true;
+			break;
 		}
+		ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+		table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+		i++;
 	}
-	if (table->dev_uc_count < 0) {
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct net_device *net_dev = efx->net_dev;
+	struct netdev_hw_addr *mc;
+	unsigned int i, addr_count;
+
+	table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+	table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+		*promisc = true;
+
+	addr_count = netdev_mc_count(net_dev);
+	i = 0;
+	netdev_for_each_mc_addr(mc, net_dev) {
+		if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+			*promisc = true;
+			break;
+		}
+		ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+		table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+		i++;
+	}
+
+	table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+					     bool multicast, bool rollback)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct efx_ef10_dev_addr *addr_list;
+	struct efx_filter_spec spec;
+	u8 baddr[ETH_ALEN];
+	unsigned int i, j;
+	int addr_count;
+	int rc;
+
+	if (multicast) {
+		addr_list = table->dev_mc_list;
+		addr_count = table->dev_mc_count;
+	} else {
+		addr_list = table->dev_uc_list;
+		addr_count = table->dev_uc_count;
+	}
+
+	/* Insert/renew filters */
+	for (i = 0; i < addr_count; i++) {
 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
 				   EFX_FILTER_FLAG_RX_RSS,
 				   0);
-		efx_filter_set_uc_def(&spec);
+		efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+					 addr_list[i].addr);
 		rc = efx_ef10_filter_insert(efx, &spec, true);
 		if (rc < 0) {
-			WARN_ON(1);
-			table->dev_uc_count = 0;
-		} else {
-			table->dev_uc_list[0].id = rc;
+			if (rollback) {
+				netif_info(efx, drv, efx->net_dev,
+					   "efx_ef10_filter_insert failed rc=%d\n",
+					   rc);
+				/* Fall back to promiscuous */
+				for (j = 0; j < i; j++) {
+					if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+						continue;
+					efx_ef10_filter_remove_unsafe(
+						efx, EFX_FILTER_PRI_AUTO,
+						addr_list[j].id);
+					addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+				}
+				return rc;
+			} else {
+				/* mark as not inserted, and carry on */
+				rc = EFX_EF10_FILTER_ID_INVALID;
+			}
 		}
+		addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
 	}
 
-	/* Insert/renew multicast filters */
-	if (table->dev_mc_count >= 0) {
-		for (i = 0; i < table->dev_mc_count; i++) {
-			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-					   EFX_FILTER_FLAG_RX_RSS,
-					   0);
-			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-						 table->dev_mc_list[i].addr);
-			rc = efx_ef10_filter_insert(efx, &spec, true);
-			if (rc < 0) {
-				/* Fall back to multicast-promisc */
-				while (i--)
-					efx_ef10_filter_remove_safe(
-						efx, EFX_FILTER_PRI_AUTO,
-						table->dev_mc_list[i].id);
-				table->dev_mc_count = -1;
-				break;
-			}
-			table->dev_mc_list[i].id = rc;
-		}
-	}
-	if (table->dev_mc_count < 0) {
+	if (multicast && rollback) {
+		/* Also need an Ethernet broadcast filter */
 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
 				   EFX_FILTER_FLAG_RX_RSS,
 				   0);
+		eth_broadcast_addr(baddr);
+		efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
+		rc = efx_ef10_filter_insert(efx, &spec, true);
+		if (rc < 0) {
+			netif_warn(efx, drv, efx->net_dev,
+				   "Broadcast filter insert failed rc=%d\n", rc);
+			/* Fall back to promiscuous */
+			for (j = 0; j < i; j++) {
+				if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+					continue;
+				efx_ef10_filter_remove_unsafe(
+					efx, EFX_FILTER_PRI_AUTO,
+					addr_list[j].id);
+				addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+			}
+			return rc;
+		} else {
+			table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+		}
+	}
+
+	return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+				      bool rollback)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct efx_filter_spec spec;
+	u8 baddr[ETH_ALEN];
+	int rc;
+
+	efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+			   EFX_FILTER_FLAG_RX_RSS,
+			   0);
+
+	if (multicast)
 		efx_filter_set_mc_def(&spec);
-		rc = efx_ef10_filter_insert(efx, &spec, true);
-		if (rc < 0) {
-			WARN_ON(1);
-			table->dev_mc_count = 0;
-		} else {
-			table->dev_mc_list[0].id = rc;
-		}
-	}
+	else
+		efx_filter_set_uc_def(&spec);
 
-	/* Remove filters that weren't renewed.  Since nothing else
-	 * changes the AUTO_OLD flag or removes these filters, we
-	 * don't need to hold the filter_lock while scanning for
-	 * these filters.
-	 */
+	rc = efx_ef10_filter_insert(efx, &spec, true);
+	if (rc < 0) {
+		netif_warn(efx, drv, efx->net_dev,
+			   "%scast mismatch filter insert failed rc=%d\n",
+			   multicast ? "Multi" : "Uni", rc);
+	} else if (multicast) {
+		table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+		if (!nic_data->workaround_26807) {
+			/* Also need an Ethernet broadcast filter */
+			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+					   EFX_FILTER_FLAG_RX_RSS,
+					   0);
+			eth_broadcast_addr(baddr);
+			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+						 baddr);
+			rc = efx_ef10_filter_insert(efx, &spec, true);
+			if (rc < 0) {
+				netif_warn(efx, drv, efx->net_dev,
+					   "Broadcast filter insert failed rc=%d\n",
+					   rc);
+				if (rollback) {
+					/* Roll back the mc_def filter */
+					efx_ef10_filter_remove_unsafe(
+							efx, EFX_FILTER_PRI_AUTO,
+							table->mcdef_id);
+					table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+					return rc;
+				}
+			} else {
+				table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+			}
+		}
+		rc = 0;
+	} else {
+		table->ucdef_id = rc;
+		rc = 0;
+	}
+	return rc;
+}
+
+/* Remove filters that weren't renewed.  Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	bool remove_failed = false;
+	int i;
+
 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
 		if (ACCESS_ONCE(table->entry[i].spec) &
 		    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3942,6 +4133,87 @@
 	return rc ? rc : rc2;
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct net_device *net_dev = efx->net_dev;
+	bool uc_promisc = false, mc_promisc = false;
+
+	if (!efx_dev_registered(efx))
+		return;
+
+	if (!table)
+		return;
+
+	efx_ef10_filter_mark_old(efx);
+
+	/* Copy/convert the address lists; add the primary station
+	 * address and broadcast address
+	 */
+	netif_addr_lock_bh(net_dev);
+	efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+	efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+	netif_addr_unlock_bh(net_dev);
+
+	/* Insert/renew unicast filters */
+	if (uc_promisc) {
+		efx_ef10_filter_insert_def(efx, false, false);
+		efx_ef10_filter_insert_addr_list(efx, false, false);
+	} else {
+		/* If any of the filters failed to insert, fall back to
+		 * promiscuous mode - add in the uc_def filter.  But keep
+		 * our individual unicast filters.
+		 */
+		if (efx_ef10_filter_insert_addr_list(efx, false, false))
+			efx_ef10_filter_insert_def(efx, false, false);
+	}
+
+	/* Insert/renew multicast filters */
+	/* If changing promiscuous state with cascaded multicast filters, remove
+	 * old filters first, so that packets are dropped rather than duplicated
+	 */
+	if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+		efx_ef10_filter_remove_old(efx);
+	if (mc_promisc) {
+		if (nic_data->workaround_26807) {
+			/* If we failed to insert promiscuous filters, rollback
+			 * and fall back to individual multicast filters
+			 */
+			if (efx_ef10_filter_insert_def(efx, true, true)) {
+				/* Changing promisc state, so remove old filters */
+				efx_ef10_filter_remove_old(efx);
+				efx_ef10_filter_insert_addr_list(efx, true, false);
+			}
+		} else {
+			/* If we failed to insert promiscuous filters, don't
+			 * rollback.  Regardless, also insert the mc_list
+			 */
+			efx_ef10_filter_insert_def(efx, true, false);
+			efx_ef10_filter_insert_addr_list(efx, true, false);
+		}
+	} else {
+		/* If any filters failed to insert, rollback and fall back to
+		 * promiscuous mode - mc_def filter and maybe broadcast.  If
+		 * that fails, roll back again and insert as many of our
+		 * individual multicast filters as we can.
+		 */
+		if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+			/* Changing promisc state, so remove old filters */
+			if (nic_data->workaround_26807)
+				efx_ef10_filter_remove_old(efx);
+			if (efx_ef10_filter_insert_def(efx, true, true))
+				efx_ef10_filter_insert_addr_list(efx, true, false);
+		}
+	}
+
+	efx_ef10_filter_remove_old(efx);
+	efx->mc_promisc = mc_promisc;
+}
+
 static int efx_ef10_set_mac_address(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -4110,6 +4382,8 @@
 	rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
 
 out:
+	if (rc == -EPERM)
+		rc = 0;
 	rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
 	return rc ? rc : rc2;
 }
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 03bc03b..974637d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -115,9 +115,9 @@
  *
  * This is only used in MSI-X interrupt mode
  */
-static bool separate_tx_channels;
-module_param(separate_tx_channels, bool, 0444);
-MODULE_PARM_DESC(separate_tx_channels,
+bool efx_separate_tx_channels;
+module_param(efx_separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(efx_separate_tx_channels,
 		 "Use separate channels for TX and RX");
 
 /* This is the weight assigned to each of the (per-channel) virtual
@@ -1391,7 +1391,7 @@
 		unsigned int n_channels;
 
 		n_channels = efx_wanted_parallelism(efx);
-		if (separate_tx_channels)
+		if (efx_separate_tx_channels)
 			n_channels *= 2;
 		n_channels += extra_channels;
 		n_channels = min(n_channels, efx->max_channels);
@@ -1418,13 +1418,16 @@
 			efx->n_channels = n_channels;
 			if (n_channels > extra_channels)
 				n_channels -= extra_channels;
-			if (separate_tx_channels) {
-				efx->n_tx_channels = max(n_channels / 2, 1U);
+			if (efx_separate_tx_channels) {
+				efx->n_tx_channels = min(max(n_channels / 2,
+							     1U),
+							 efx->max_tx_channels);
 				efx->n_rx_channels = max(n_channels -
 							 efx->n_tx_channels,
 							 1U);
 			} else {
-				efx->n_tx_channels = n_channels;
+				efx->n_tx_channels = min(n_channels,
+							 efx->max_tx_channels);
 				efx->n_rx_channels = n_channels;
 			}
 			for (i = 0; i < efx->n_channels; i++)
@@ -1450,7 +1453,7 @@
 
 	/* Assume legacy interrupts */
 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
-		efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
 		efx->n_rx_channels = 1;
 		efx->n_tx_channels = 1;
 		efx->legacy_irq = efx->pci_dev->irq;
@@ -1624,7 +1627,8 @@
 	struct efx_tx_queue *tx_queue;
 
 	efx->tx_channel_offset =
-		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+		efx_separate_tx_channels ?
+		efx->n_channels - efx->n_tx_channels : 0;
 
 	/* We need to mark which channels really have RX and TX
 	 * queues, and adjust the TX queue numbers if we have separate
@@ -1653,17 +1657,34 @@
 	if (rc)
 		return rc;
 
-	/* Determine the number of channels and queues by trying to hook
-	 * in MSI-X interrupts. */
-	rc = efx_probe_interrupts(efx);
-	if (rc)
-		goto fail1;
+	do {
+		if (!efx->max_channels || !efx->max_tx_channels) {
+			netif_err(efx, drv, efx->net_dev,
+				  "Insufficient resources to allocate"
+				  " any channels\n");
+			rc = -ENOSPC;
+			goto fail1;
+		}
 
-	efx_set_channels(efx);
+		/* Determine the number of channels and queues by trying
+		 * to hook in MSI-X interrupts.
+		 */
+		rc = efx_probe_interrupts(efx);
+		if (rc)
+			goto fail1;
 
-	rc = efx->type->dimension_resources(efx);
-	if (rc)
-		goto fail2;
+		efx_set_channels(efx);
+
+		/* dimension_resources can fail with EAGAIN */
+		rc = efx->type->dimension_resources(efx);
+		if (rc != 0 && rc != -EAGAIN)
+			goto fail2;
+
+		if (rc == -EAGAIN)
+			/* try again with new max_channels */
+			efx_remove_interrupts(efx);
+
+	} while (rc == -EAGAIN);
 
 	if (efx->n_channels > 1)
 		netdev_rss_key_fill(&efx->rx_hash_key,
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index acb1e07..1aaf76c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -35,6 +35,7 @@
 int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 extern unsigned int efx_piobuf_size;
+extern bool efx_separate_tx_channels;
 
 /* RX */
 void efx_set_default_rx_indir_table(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 80e69af..d790cb8 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2371,6 +2371,7 @@
 
 	efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
 			     EFX_MAX_CHANNELS);
+	efx->max_tx_channels = efx->max_channels;
 	efx->timer_quantum_ns = 4968; /* 621 cycles */
 
 	/* Initialise I2C adapter */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81640f8..98d172b 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1779,15 +1779,31 @@
 	return rc;
 }
 
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+			    unsigned int *flags)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+	size_t outlen;
+	int rc;
 
 	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
 	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
 	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
-	return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
-			    NULL, 0, NULL);
+	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	if (!flags)
+		return 0;
+
+	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+	else
+		*flags = 0;
+
+	return 0;
 }
 
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1832,11 @@
 	return 0;
 
 fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+	 * terrifying.  The call site will have to deal with it though.
+	 */
+	netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+		     efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 1838afe..025d504 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,7 +346,8 @@
 bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+			    unsigned int *flags);
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
 			     unsigned int *enabled_out);
 
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 45fca9f..4cc7721 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -26,6 +26,10 @@
  * Unlike a warm boot, assume DMEM has been reloaded, so that
  * the MC persistent data must be reinitialised. */
 #define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode.  This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
 /* BIST state has been initialized */
 #define MC_FW_BIST_INIT_OK (128)
 
@@ -169,6 +173,8 @@
 #define MC_CMD_ERR_EINTR 4
 /* I/O failure */
 #define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
 /* Try again */
 #define MC_CMD_ERR_EAGAIN 11
 /* Out of memory */
@@ -181,6 +187,10 @@
 #define MC_CMD_ERR_ENODEV 19
 /* Invalid argument to target */
 #define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
 /* Out of range */
 #define MC_CMD_ERR_ERANGE 34
 /* Non-recursive resource is already acquired */
@@ -226,6 +236,43 @@
 #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
 /* The datapath is disabled. */
 #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN  0x100c
+/* The requested operation might require the
+   command to be passed between MCs, and the
+   transport doesn't support that.  Should
+   only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
 
 #define MC_CMD_ERR_CODE_OFST 0
 
@@ -275,6 +322,11 @@
 	 MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST +		\
 	 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
 
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n)  (((n) & 0xff) << 16)
+
 
 /* Version 2 adds an optional argument to error returns: the errno value
  * may be followed by the (0-based) number of the first argument that
@@ -394,6 +446,8 @@
 #define          MCDI_EVENT_AOE_BYTEBLASTER 0x9
 /* enum: DDR ECC status update */
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define          MCDI_EVENT_AOE_PTP_STATUS 0xb
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -408,6 +462,16 @@
 #define        MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
 #define        MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
 #define        MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define        MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define          MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define          MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define          MCDI_EVENT_MUM_WATCHDOG 0x3
+#define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
@@ -416,6 +480,8 @@
 #define       MCDI_EVENT_EV_CODE_WIDTH 4
 #define       MCDI_EVENT_CODE_LBN 44
 #define       MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define          MCDI_EVENT_SW_EVENT 0x0
 /* enum: Bad assert. */
 #define          MCDI_EVENT_CODE_BADSSERT 0x1
 /* enum: PM Notice. */
@@ -470,6 +536,14 @@
 #define          MCDI_EVENT_CODE_MC_BIST 0x19
 /* enum: PTP tick event providing current NIC time */
 #define          MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define          MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define          MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
@@ -537,6 +611,33 @@
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
@@ -581,6 +682,10 @@
 #define          FCDI_EVENT_CODE_PTP_TICK 0x7
 /* enum: ECC error counters */
 #define          FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define          FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define          FCDI_EVENT_CODE_PORT_CONFIG 0xa
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -594,11 +699,24 @@
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define       FCDI_EVENT_PTP_STATE_OFST 0
+#define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define       FCDI_EVENT_PTP_STATE_LBN 0
+#define       FCDI_EVENT_PTP_STATE_WIDTH 32
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define       FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 
 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
  * to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -631,6 +749,90 @@
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
 
+/* MUM_EVENT structuredef */
+#define    MUM_EVENT_LEN 8
+#define       MUM_EVENT_CONT_LBN 32
+#define       MUM_EVENT_CONT_WIDTH 1
+#define       MUM_EVENT_LEVEL_LBN 33
+#define       MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          MUM_EVENT_LEVEL_INFO  0x0
+/* enum: Warning. */
+#define          MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          MUM_EVENT_LEVEL_FATAL 0x3
+#define       MUM_EVENT_DATA_OFST 0
+#define        MUM_EVENT_SENSOR_ID_LBN 0
+#define        MUM_EVENT_SENSOR_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define        MUM_EVENT_SENSOR_STATE_LBN 8
+#define        MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define        MUM_EVENT_PORT_PHY_READY_LBN 0
+#define        MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define        MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define        MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define        MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define        MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define       MUM_EVENT_DATA_LBN 0
+#define       MUM_EVENT_DATA_WIDTH 32
+#define       MUM_EVENT_SRC_LBN 36
+#define       MUM_EVENT_SRC_WIDTH 8
+#define       MUM_EVENT_EV_CODE_LBN 60
+#define       MUM_EVENT_EV_CODE_WIDTH 4
+#define       MUM_EVENT_CODE_LBN 44
+#define       MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define          MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define          MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define          MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LBN 0
+#define       MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define       MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define       MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
 
 /***********************************/
 /* MC_CMD_READ32
@@ -687,24 +889,34 @@
 
 /* MC_CMD_COPYCODE_IN msgrequest */
 #define    MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
  */
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
  */
 #define          MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
  */
 #define          MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
@@ -795,6 +1007,10 @@
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -802,7 +1018,8 @@
 
 /***********************************/
 /* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
  */
 #define MC_CMD_LOG_CTRL 0x7
 
@@ -816,6 +1033,7 @@
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -955,8 +1173,12 @@
  * input on the same NIC.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define          MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
 /* enum: Above this for future use. */
-#define          MC_CMD_PTP_OP_MAX 0x1b
+#define          MC_CMD_PTP_OP_MAX 0x1c
 
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -1191,8 +1413,12 @@
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
 
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
@@ -1214,6 +1440,23 @@
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
 
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 
@@ -1375,7 +1618,7 @@
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
 
 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
@@ -1396,6 +1639,13 @@
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
@@ -1415,6 +1665,9 @@
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define    MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
 
 /***********************************/
 /* MC_CMD_CSR_READ32
@@ -1915,6 +2168,14 @@
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define          MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define          MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define          MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
 /* enum: Only this option is allowed for non-admin functions */
 #define          MC_CMD_FW_DONT_CARE  0xffffffff
 
@@ -2481,6 +2742,12 @@
 #define          MC_CMD_LOOPBACK_SD_FES_WS  0x22
 /* enum: Near side of AOE Siena side port */
 #define          MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23
+/* enum: Medford Wireside datapath loopback */
+#define          MC_CMD_LOOPBACK_DATA_WS  0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define          MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25
 /* Supported loopbacks. */
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2552,12 +2819,8 @@
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define          MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define          MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define          MC_CMD_FCNTL_BIDIR 0x2
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -2632,7 +2895,7 @@
 #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /* MC_CMD_SET_MAC_IN msgrequest */
-#define    MC_CMD_SET_MAC_IN_LEN 24
+#define    MC_CMD_SET_MAC_IN_LEN 28
 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
  * EtherII, VLAN, bug16011 padding).
  */
@@ -2649,13 +2912,20 @@
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
 /* enum: Flow control is off. */
-/*               MC_CMD_FCNTL_OFF 0x0 */
+#define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
-/*               MC_CMD_FCNTL_RESPOND 0x1 */
+#define          MC_CMD_FCNTL_RESPOND 0x1
 /* enum: Respond to and Issue flow control. */
-/*               MC_CMD_FCNTL_BIDIR 0x2 */
+#define          MC_CMD_FCNTL_BIDIR 0x2
 /* enum: Auto neg flow control. */
 #define          MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define          MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define          MC_CMD_FCNTL_GENERATE 0x5
+#define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
 /* MC_CMD_SET_MAC_OUT msgresponse */
 #define    MC_CMD_SET_MAC_OUT_LEN 0
@@ -2748,7 +3018,8 @@
  * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
  * performed, and the statistics may be read from the message response. If
  * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
  */
 #define MC_CMD_MAC_STATS 0x2e
 
@@ -2791,6 +3062,7 @@
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
 #define          MC_CMD_MAC_GENERATION_START  0x0 /* enum */
+#define          MC_CMD_MAC_DMABUF_START  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PKTS  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PAUSE_PKTS  0x2 /* enum */
 #define          MC_CMD_MAC_TX_CONTROL_PKTS  0x3 /* enum */
@@ -2890,8 +3162,8 @@
  * PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS  0x47
 /* enum: RXDP counter: Number of times the DPCPU waited for an existing
@@ -3213,6 +3485,8 @@
 #define          MC_CMD_NVRAM_TYPE_LICENSE 0x12
 /* enum: FC Log. */
 #define          MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define          MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
 
 
 /***********************************/
@@ -3407,6 +3681,8 @@
  */
 #define MC_CMD_SCHEDINFO 0x3e
 
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SCHEDINFO_IN msgrequest */
 #define    MC_CMD_SCHEDINFO_IN_LEN 0
 
@@ -3593,6 +3869,68 @@
 #define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC  0x2c
 /* enum: Hotpoint temperature: degC */
 #define          MC_CMD_SENSOR_HOTPOINT_TEMP  0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT0  0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT1  0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define          MC_CMD_SENSOR_MUM_VCC  0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_A  0x31
+/* enum: 0.9v power phase A current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_A  0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_A_TEMP  0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_B  0x34
+/* enum: 0.9v power phase B current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_B  0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_B_TEMP  0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY  0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC  0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY  0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC  0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE1_NEXT  0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT  0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP  0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC  0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC  0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT  0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP  0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC  0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC  0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define          MC_CMD_SENSOR_SODIMM_VOUT  0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_0_TEMP  0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_1_TEMP  0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY0_VCC  0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY1_VCC  0x4d
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
@@ -3701,6 +4039,8 @@
 #define          MC_CMD_SENSOR_STATE_BROKEN  0x3
 /* enum: Sensor is working but does not currently have a reading. */
 #define          MC_CMD_SENSOR_STATE_NO_READING  0x4
+/* enum: Sensor initialisation failed. */
+#define          MC_CMD_SENSOR_STATE_INIT_FAILED  0x5
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -3870,6 +4210,7 @@
 
 /* MC_CMD_WORKAROUND_IN msgrequest */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
@@ -3877,11 +4218,38 @@
 #define          MC_CMD_WORKAROUND_BUG35388 0x2
 /* enum: Bug35017 workaround (A64 tables must be identity map) */
 #define          MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define          MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
 
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_GET_PHY_MEDIA_INFO
@@ -4093,7 +4461,7 @@
 
 /***********************************/
 /* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
 
@@ -4115,6 +4483,527 @@
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
 
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define    MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define       MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define          MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define          MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define          MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define    MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define    MC_CMD_CLP_IN_DEFAULT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_LEN 12
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define    MC_CMD_MUM_IN_LEN 4
+#define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define        MC_CMD_MUM_IN_OP_LBN 0
+#define        MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define          MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define          MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define          MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define          MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define          MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define          MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define          MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define          MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define          MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define          MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define    MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define       MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define    MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/*               MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define    MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define    MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define    MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_READ_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define       MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
 /* MC_CMD_RESOURCE_SPECIFIER enum */
 /* enum: Any */
 #define          MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -4203,6 +5092,30 @@
 #define          NVRAM_PARTITION_TYPE_PHY_MIN              0xa00
 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
 #define          NVRAM_PARTITION_TYPE_PHY_MAX              0xaff
+/* enum: Primary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA                 0xb00
+/* enum: Secondary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA_BACKUP          0xb01
+/* enum: FC firmware partition */
+#define          NVRAM_PARTITION_TYPE_FC_FIRMWARE          0xb02
+/* enum: FC License partition */
+#define          NVRAM_PARTITION_TYPE_FC_LICENSE           0xb03
+/* enum: Non-volatile log output partition for FC */
+#define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
+/* enum: MUM firmware partition */
+#define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
+/* enum: MUM Application table partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_APPTABLE         0xc02
+/* enum: MUM boot rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_BOOT_ROM         0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_PROD_ROM         0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_USER_ROM         0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
@@ -4218,66 +5131,69 @@
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
 /* enum: OpenOnload */
-#define          LICENSED_APP_ID_ONLOAD            0x1
+#define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
-#define          LICENSED_APP_ID_PTP               0x2
+#define          LICENSED_APP_ID_PTP                     0x2
 /* enum: SolarCapture Pro */
-#define          LICENSED_APP_ID_SOLARCAPTURE_PRO  0x4
+#define          LICENSED_APP_ID_SOLARCAPTURE_PRO        0x4
+/* enum: SolarSecure filter engine */
+#define          LICENSED_APP_ID_SOLARSECURE             0x8
+/* enum: Performance monitor */
+#define          LICENSED_APP_ID_PERF_MONITOR            0x10
+/* enum: SolarCapture Live */
+#define          LICENSED_APP_ID_SOLARCAPTURE_LIVE       0x20
+/* enum: Capture SolarSystem */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM     0x40
+/* enum: Network Access Control */
+#define          LICENSED_APP_ID_NETWORK_ACCESS_CONTROL  0x80
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
+/* TX_TIMESTAMP_EVENT structuredef */
+#define    TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
  */
-#define MC_CMD_GET_WORKAROUNDS 0x59
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is the low part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
+/* enum: This is the high part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI  0x52
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
 
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
+/* RSS_MODE structuredef */
+#define    RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
  */
-#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_LEN 1
+#define        RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define        RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_DST_ADDR_LBN 1
+#define        RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_SRC_PORT_LBN 2
+#define        RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define        RSS_MODE_HASH_DST_PORT_LBN 3
+#define        RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define       RSS_MODE_HASH_SELECTOR_LBN 0
+#define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
 
 /***********************************/
@@ -4413,7 +5329,9 @@
 
 #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
@@ -4456,9 +5374,73 @@
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET  0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM  0x1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M  0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K  0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K  0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K  0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K  0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_INIT_TXQ
@@ -4467,7 +5449,9 @@
 
 #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
@@ -4499,6 +5483,10 @@
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4511,6 +5499,60 @@
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
 /* MC_CMD_INIT_TXQ_OUT msgresponse */
 #define    MC_CMD_INIT_TXQ_OUT_LEN 0
 
@@ -4617,6 +5659,132 @@
 /* MC_CMD_PROXY_CMD_OUT msgresponse */
 #define    MC_CMD_PROXY_CMD_OUT_LEN 0
 
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define    MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define       MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define       MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define       MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define       MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define       MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define       MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define       MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define       MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define       MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define       MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define       MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define          MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4688,6 +5856,44 @@
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
+/* PORT_CONFIG_ENTRY structuredef */
+#define    PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define       PORT_CONFIG_ENTRY_CORE_OFST 1
+#define       PORT_CONFIG_ENTRY_CORE_LEN 1
+#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
+#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
+#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
+#define       PORT_CONFIG_ENTRY_CORE_LBN 8
+#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define       PORT_CONFIG_ENTRY_LANES_OFST 4
+#define       PORT_CONFIG_ENTRY_LANES_LBN 32
+#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
 
 /***********************************/
 /* MC_CMD_FILTER_OP
@@ -4759,9 +5965,9 @@
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_HOST  0x1
 /* enum: receive to MC */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_MC  0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX0  0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
@@ -4778,9 +5984,7 @@
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
  * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
 /* transmit domain (reserved; set to 0) */
@@ -4835,6 +6039,235 @@
 #define       MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
 #define       MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
 
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST  0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC  0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0  0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS  0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING  0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN  0x0
+/* enum: Match Geneve traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE  0x1
+/* enum: Reserved for experimental development use */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL  0xfe
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define          MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE  0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
 /* MC_CMD_FILTER_OP_OUT msgresponse */
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
@@ -4849,6 +6282,27 @@
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID  0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID  0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_OUT/HANDLE */
 
 
 /***********************************/
@@ -4865,6 +6319,10 @@
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS  0x2
 
 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -4884,6 +6342,17 @@
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
 
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_PARSER_DISP_RW
@@ -4901,8 +6370,10 @@
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
 /* enum: read a word of DICPU DMEM or a LUE entry */
@@ -4919,6 +6390,8 @@
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -5019,7 +6492,9 @@
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
 
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
@@ -5028,6 +6503,17 @@
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
 
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
 
 /***********************************/
 /* MC_CMD_FREE_VIS
@@ -5114,13 +6600,15 @@
 #define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
 
 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
 
 
 /***********************************/
@@ -5575,6 +7063,7 @@
 #define MC_CMD_GET_CAPABILITIES 0xbe
 
 #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_CAPABILITIES_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
 
@@ -5582,6 +7071,20 @@
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
@@ -5600,8 +7103,14 @@
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5609,6 +7118,10 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP  0x0
 /* enum: Low latency RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
 /* enum: RXDP Test firmware image 2 */
@@ -5632,6 +7145,10 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP  0x0
 /* enum: Low latency TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
 /* enum: TXDP Test firmware image 2 */
@@ -5642,22 +7159,69 @@
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
 /* Licensed capabilities */
@@ -5735,6 +7299,15 @@
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
 
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
 
@@ -5753,8 +7326,14 @@
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
 /* an already reserved bucket (typically set to bucket associated with outer
@@ -5768,6 +7347,35 @@
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
 
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
 
@@ -5826,13 +7434,23 @@
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB  0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA  0x3
+/* enum: MUX */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX  0x4
+/* enum: Snapper specific; semantics TBD */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
@@ -5892,7 +7510,10 @@
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
@@ -6136,8 +7757,13 @@
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
 
 /***********************************/
@@ -6249,7 +7875,11 @@
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6259,6 +7889,20 @@
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
@@ -6279,7 +7923,12 @@
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6289,6 +7938,20 @@
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 
 /***********************************/
@@ -6311,8 +7974,13 @@
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
 
 /***********************************/
@@ -6421,375 +8089,6 @@
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
- */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
-
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define       MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
 /* MC_CMD_VPORT_ADD_MAC_ADDRESS
  * Add a MAC address to a v-port
  */
@@ -6877,7 +8176,7 @@
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
@@ -6921,354 +8220,6 @@
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
 /* MC_CMD_GET_CLOCK
  * Return the system and PDCPU clock frequencies.
  */
@@ -7296,22 +8247,66 @@
 #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
-#define    MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define    MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for MC clock domain */
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
 /* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define    MC_CMD_SET_CLOCK_OUT_LEN 12
+#define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
+/* Resulting PCS frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
+/* Resulting MC frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
+/* Resulting rmon frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
+/* Resulting vswitch frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
 
 /***********************************/
@@ -7325,12 +8320,22 @@
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x0
+/* enum: RxDPCPU0 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX0  0x1
 /* enum: TxDPCPU1 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX1  0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX1   0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX   0x81
 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
  * initialised to zero
  */
@@ -7418,6 +8423,25 @@
 
 
 /***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define    MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define    MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
 /* MC_CMD_CAP_BLK_READ
  * Read multiple 64bit words from capture block memory
  */
@@ -7730,6 +8754,8 @@
  * more data is returned.
  */
 #define          MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT  0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define          MC_CMD_KR_TUNE_IN_READ_FOM  0x7
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7762,20 +8788,32 @@
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT  0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST  0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1  0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2  0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3  0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4  0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5  0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV  0x7
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
@@ -7865,6 +8903,8 @@
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY  0x7
 /* enum: TX Slew Rate Fine control */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET  0x8
+/* enum: TX Termination Impedance control */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0  0x0 /* enum */
@@ -7955,6 +8995,20 @@
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
 
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define    MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
 
 /***********************************/
 /* MC_CMD_PCIE_TUNE
@@ -8224,6 +9278,8 @@
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
+/* enum: mask application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_MASK  0x1
 /* arguments specific to this particular operation */
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -8258,10 +9314,22 @@
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
 
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
  * function. Only a privileged function may change the port sniffing
  * configuration. A copy of all traffic delivered to the host (non-promiscuous
  * mode) or all traffic arriving at the port (promiscuous mode) may be
@@ -8299,7 +9367,7 @@
 
 /***********************************/
 /* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
  * associated with the calling function. Only a privileged function may read
  * the configuration.
  */
@@ -8330,4 +9398,673 @@
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
 
 
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN  0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX  0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define    MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define    MC_CMD_READ_ATB_IN_LEN 16
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define    MC_CMD_READ_ATB_OUT_LEN 4
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define          MC_CMD_PRIVILEGE_MASK_IN_VF_NULL  0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP               0x8 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS  0x10 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING      0x20 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST           0x40 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST         0x80 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST         0x100 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST     0x200 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS       0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE             0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define    MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY   0x3 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF  0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define    MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
+/* Sector data */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define    MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define    MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
 #endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 47d1e3a..c530e1c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -925,6 +925,7 @@
  * @stats_lock: Statistics update lock. Must be held when calling
  *	efx_nic_type::{update,start,stop}_stats.
  * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
  *
  * This is stored in the private area of the &struct net_device.
  */
@@ -971,6 +972,7 @@
 	unsigned next_buffer_table;
 
 	unsigned int max_channels;
+	unsigned int max_tx_channels;
 	unsigned n_channels;
 	unsigned n_rx_channels;
 	unsigned rss_spread;
@@ -1072,6 +1074,7 @@
 	int last_irq_cpu;
 	spinlock_t stats_lock;
 	atomic_t n_rx_noskb_drops;
+	bool mc_promisc;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 31ff908..0b536e2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -506,6 +506,7 @@
  * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
  * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
  *	after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@
 	bool rx_rss_context_exclusive;
 	u64 stats[EF10_STAT_COUNT];
 	bool workaround_35388;
+	bool workaround_26807;
 	bool must_check_datapath_caps;
 	u32 datapath_caps;
 	unsigned int rx_dpcpu_fw_id;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index b605dfd5..9d78830 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -114,7 +114,10 @@
 
 	if (efx->type->test_nvram) {
 		rc = efx->type->test_nvram(efx);
-		tests->nvram = rc ? -1 : 1;
+		if (rc == -EPERM)
+			rc = 0;
+		else
+			tests->nvram = rc ? -1 : 1;
 	}
 
 	return rc;
@@ -253,6 +256,12 @@
 	mutex_lock(&efx->mac_lock);
 	rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
 	mutex_unlock(&efx->mac_lock);
+	if (rc == -EPERM)
+		rc = 0;
+	else
+		netif_info(efx, drv, efx->net_dev,
+			   "%s phy selftest\n", rc ? "Failed" : "Passed");
+
 	return rc;
 }
 
@@ -661,6 +670,9 @@
 	wmb();
 	kfree(state);
 
+	if (rc == -EPERM)
+		rc = 0;
+
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index b323b91..2219b54 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -262,6 +262,7 @@
 	}
 
 	efx->max_channels = EFX_MAX_CHANNELS;
+	efx->max_tx_channels = EFX_MAX_CHANNELS;
 
 	efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
 	efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
@@ -1042,9 +1043,5 @@
 	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
 	.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
 			     1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
-			     1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
-			     1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
-			     1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
-			     1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
-			     1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+			     1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
 };
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 67d9fde..664f596 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1031,36 +1031,8 @@
 static void print_packet( byte * buf, int length )
 {
 #if 0
-	int i;
-	int remainder;
-	int lines;
-
-	pr_dbg("Packet of length %d\n", length);
-	lines = length / 16;
-	remainder = length % 16;
-
-	for ( i = 0; i < lines ; i ++ ) {
-		int cur;
-
-		printk(KERN_DEBUG);
-		for ( cur = 0; cur < 8; cur ++ ) {
-			byte a, b;
-
-			a = *(buf ++ );
-			b = *(buf ++ );
-			pr_cont("%02x%02x ", a, b);
-		}
-		pr_cont("\n");
-	}
-	printk(KERN_DEBUG);
-	for ( i = 0; i < remainder/2 ; i++ ) {
-		byte a, b;
-
-		a = *(buf ++ );
-		b = *(buf ++ );
-		pr_cont("%02x%02x ", a, b);
-	}
-	pr_cont("\n");
+	print_hex_dump_debug(DRV_NAME, DUMP_PREFIX_OFFSET, 16, 1,
+			     buf, length, true);
 #endif
 }
 #endif
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 959aeea..3b4cd8a 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -59,7 +59,9 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_net.h>
+#include <linux/acpi.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 
 #include "smsc911x.h"
 
@@ -2362,59 +2364,50 @@
 	.tx_writefifo = smsc911x_tx_writefifo_shift,
 };
 
-#ifdef CONFIG_OF
-static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
-				    struct device_node *np)
+static int smsc911x_probe_config(struct smsc911x_platform_config *config,
+				 struct device *dev)
 {
-	const char *mac;
+	int phy_interface;
 	u32 width = 0;
+	int err;
 
-	if (!np)
-		return -ENODEV;
+	phy_interface = device_get_phy_mode(dev);
+	if (phy_interface < 0)
+		phy_interface = PHY_INTERFACE_MODE_NA;
+	config->phy_interface = phy_interface;
 
-	config->phy_interface = of_get_phy_mode(np);
+	device_get_mac_address(dev, config->mac, ETH_ALEN);
 
-	mac = of_get_mac_address(np);
-	if (mac)
-		memcpy(config->mac, mac, ETH_ALEN);
-
-	of_property_read_u32(np, "reg-shift", &config->shift);
-
-	of_property_read_u32(np, "reg-io-width", &width);
-	if (width == 4)
+	err = device_property_read_u32(dev, "reg-io-width", &width);
+	if (err == -ENXIO)
+		return err;
+	if (!err && width == 4)
 		config->flags |= SMSC911X_USE_32BIT;
 	else
 		config->flags |= SMSC911X_USE_16BIT;
 
-	if (of_get_property(np, "smsc,irq-active-high", NULL))
+	device_property_read_u32(dev, "reg-shift", &config->shift);
+
+	if (device_property_present(dev, "smsc,irq-active-high"))
 		config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
 
-	if (of_get_property(np, "smsc,irq-push-pull", NULL))
+	if (device_property_present(dev, "smsc,irq-push-pull"))
 		config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
 
-	if (of_get_property(np, "smsc,force-internal-phy", NULL))
+	if (device_property_present(dev, "smsc,force-internal-phy"))
 		config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
 
-	if (of_get_property(np, "smsc,force-external-phy", NULL))
+	if (device_property_present(dev, "smsc,force-external-phy"))
 		config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
 
-	if (of_get_property(np, "smsc,save-mac-address", NULL))
+	if (device_property_present(dev, "smsc,save-mac-address"))
 		config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
 
 	return 0;
 }
-#else
-static inline int smsc911x_probe_config_dt(
-				struct smsc911x_platform_config *config,
-				struct device_node *np)
-{
-	return -ENODEV;
-}
-#endif /* CONFIG_OF */
 
 static int smsc911x_drv_probe(struct platform_device *pdev)
 {
-	struct device_node *np = pdev->dev.of_node;
 	struct net_device *dev;
 	struct smsc911x_data *pdata;
 	struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
@@ -2435,7 +2428,10 @@
 	res_size = resource_size(res);
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0) {
+	if (irq == -EPROBE_DEFER) {
+		retval = -EPROBE_DEFER;
+		goto out_0;
+	} else if (irq <= 0) {
 		pr_warn("Could not allocate irq resource\n");
 		retval = -ENODEV;
 		goto out_0;
@@ -2478,7 +2474,7 @@
 		goto out_disable_resources;
 	}
 
-	retval = smsc911x_probe_config_dt(&pdata->config, np);
+	retval = smsc911x_probe_config(&pdata->config, &pdev->dev);
 	if (retval && config) {
 		/* copy config parameters across to pdata */
 		memcpy(&pdata->config, config, sizeof(pdata->config));
@@ -2654,6 +2650,12 @@
 MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
 #endif
 
+static const struct acpi_device_id smsc911x_acpi_match[] = {
+	{ "ARMH9118", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
+
 static struct platform_driver smsc911x_driver = {
 	.probe = smsc911x_drv_probe,
 	.remove = smsc911x_drv_remove,
@@ -2661,6 +2663,7 @@
 		.name	= SMSC_CHIPNAME,
 		.pm	= SMSC911X_PM_OPS,
 		.of_match_table = of_match_ptr(smsc911x_dt_ids),
+		.acpi_match_table = ACPI_PTR(smsc911x_acpi_match),
 	},
 };
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e817a1a..b1e5f24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -16,6 +16,46 @@
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	if (pdev->dev.of_node) {
+		plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+		if (IS_ERR(plat_dat)) {
+			dev_err(&pdev->dev, "dt configuration failed\n");
+			return PTR_ERR(plat_dat);
+		}
+	} else {
+		plat_dat = dev_get_platdata(&pdev->dev);
+		if (!plat_dat) {
+			dev_err(&pdev->dev, "no platform data provided\n");
+			return  -EINVAL;
+		}
+
+		/* Set default value for multicast hash bins */
+		plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+		/* Set default value for unicast filter entries */
+		plat_dat->unicast_filter_entries = 1;
+	}
+
+	/* Custom initialisation (if needed) */
+	if (plat_dat->init) {
+		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+		if (ret)
+			return ret;
+	}
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
 static const struct of_device_id dwmac_generic_match[] = {
 	{ .compatible = "st,spear600-gmac"},
 	{ .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@
 MODULE_DEVICE_TABLE(of, dwmac_generic_match);
 
 static struct platform_driver dwmac_generic_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = dwmac_generic_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = STMMAC_RESOURCE_NAME,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index f0e4bb4..9d89bdb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -248,23 +248,40 @@
 	return NULL;
 }
 
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
 {
+	struct ipq806x_gmac *gmac = priv;
+
+	ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
 	struct device *dev = &pdev->dev;
 	struct ipq806x_gmac *gmac;
 	int val;
 	void *err;
 
+	val = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (val)
+		return val;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
 	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
 	if (!gmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	gmac->pdev = pdev;
 
 	err = ipq806x_gmac_of_parse(gmac);
-	if (err) {
+	if (IS_ERR(err)) {
 		dev_err(dev, "device tree parsing error\n");
-		return err;
+		return PTR_ERR(err);
 	}
 
 	regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,7 +302,7 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return NULL;
+		return -EINVAL;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
 
@@ -304,7 +321,7 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return NULL;
+		return -EINVAL;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
 
@@ -327,30 +344,21 @@
 			     0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
 	}
 
-	return gmac;
+	plat_dat->has_gmac = true;
+	plat_dat->bsp_priv = gmac;
+	plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
-	struct ipq806x_gmac *gmac = priv;
-
-	ipq806x_gmac_set_speed(gmac, speed);
-}
-
-static const struct stmmac_of_data ipq806x_gmac_data = {
-	.has_gmac	= 1,
-	.setup		= ipq806x_gmac_setup,
-	.fix_mac_speed	= ipq806x_gmac_fix_mac_speed,
-};
-
 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
-	{ .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+	{ .compatible = "qcom,ipq806x-gmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
 
 static struct platform_driver ipq806x_gmac_dwmac_driver = {
-	.probe = stmmac_pltfr_probe,
+	.probe = ipq806x_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name		= "ipq806x-gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index cb888d3..78e9d18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -25,66 +25,53 @@
 # define LPC18XX_CREG_CREG6_ETHMODE_MII		0x0
 # define LPC18XX_CREG_CREG6_ETHMODE_RMII	0x4
 
-struct lpc18xx_dwmac_priv_data {
-	struct regmap *reg;
-	int interface;
-};
-
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
 {
-	struct lpc18xx_dwmac_priv_data *dwmac;
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct regmap *reg;
+	u8 ethmode;
+	int ret;
 
-	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
 
-	dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
-	if (dwmac->interface < 0)
-		return ERR_PTR(dwmac->interface);
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
 
-	dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
-	if (IS_ERR(dwmac->reg)) {
-		dev_err(&pdev->dev, "Syscon lookup failed\n");
-		return dwmac->reg;
+	plat_dat->has_gmac = true;
+
+	reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+	if (IS_ERR(reg)) {
+		dev_err(&pdev->dev, "syscon lookup failed\n");
+		return PTR_ERR(reg);
 	}
 
-	return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
-	struct lpc18xx_dwmac_priv_data *dwmac = priv;
-	u8 ethmode;
-
-	if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+	if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
 		ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
-	} else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+	} else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
 		ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
 	} else {
 		dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
 		return -EINVAL;
 	}
 
-	regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+	regmap_update_bits(reg, LPC18XX_CREG_CREG6,
 			   LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
 
-	return 0;
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
-	.has_gmac = 1,
-	.setup = lpc18xx_dwmac_setup,
-	.init = lpc18xx_dwmac_init,
-};
-
 static const struct of_device_id lpc18xx_dwmac_match[] = {
-	{ .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+	{ .compatible = "nxp,lpc1850-dwmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
 
 static struct platform_driver lpc18xx_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = lpc18xx_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "lpc18xx-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 61a324a..c1bac19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -47,36 +47,45 @@
 	writel(val, dwmac->reg);
 }
 
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
 {
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
 	struct meson_dwmac *dwmac;
 	struct resource *res;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
 	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(dwmac->reg))
-		return ERR_CAST(dwmac->reg);
+		return PTR_ERR(dwmac->reg);
 
-	return dwmac;
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data meson6_dwmac_data = {
-	.setup		= meson6_dwmac_setup,
-	.fix_mac_speed	= meson6_dwmac_fix_mac_speed,
-};
-
 static const struct of_device_id meson6_dwmac_match[] = {
-	{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+	{ .compatible = "amlogic,meson6-dwmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
 
 static struct platform_driver meson6_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = meson6_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "meson6-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 00a1e1e..11baa4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,7 +46,7 @@
 	struct platform_device *pdev;
 	int phy_iface;
 	struct regulator *regulator;
-	struct rk_gmac_ops *ops;
+	const struct rk_gmac_ops *ops;
 
 	bool clk_enabled;
 	bool clock_input;
@@ -177,7 +177,7 @@
 	}
 }
 
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
 	.set_to_rgmii = rk3288_set_to_rgmii,
 	.set_to_rmii = rk3288_set_to_rmii,
 	.set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@
 	}
 }
 
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
 	.set_to_rgmii = rk3368_set_to_rgmii,
 	.set_to_rmii = rk3368_set_to_rmii,
 	.set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@
 }
 
 static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
-					  struct rk_gmac_ops *ops)
+					  const struct rk_gmac_ops *ops)
 {
 	struct rk_priv_data *bsp_priv;
 	struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@
 	return bsp_priv;
 }
 
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
-	return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
-	return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
 static int rk_gmac_init(struct platform_device *pdev, void *priv)
 {
 	struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@
 		dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-static const struct stmmac_of_data rk3288_gmac_data = {
-	.has_gmac = 1,
-	.fix_mac_speed = rk_fix_speed,
-	.setup = rk3288_gmac_setup,
-	.init = rk_gmac_init,
-	.exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	const struct rk_gmac_ops *data;
+	int ret;
 
-static const struct stmmac_of_data rk3368_gmac_data = {
-	.has_gmac = 1,
-	.fix_mac_speed = rk_fix_speed,
-	.setup = rk3368_gmac_setup,
-	.init = rk_gmac_init,
-	.exit = rk_gmac_exit,
-};
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "no of match data provided\n");
+		return -EINVAL;
+	}
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	plat_dat->has_gmac = true;
+	plat_dat->init = rk_gmac_init;
+	plat_dat->exit = rk_gmac_exit;
+	plat_dat->fix_mac_speed = rk_fix_speed;
+
+	plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+	if (IS_ERR(plat_dat->bsp_priv))
+		return PTR_ERR(plat_dat->bsp_priv);
+
+	ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
-	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
 
 static struct platform_driver rk_gmac_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = rk_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "rk_gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8141c5b..401383b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -175,31 +175,6 @@
 	return 0;
 }
 
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
-	struct device		*dev = &pdev->dev;
-	int			ret;
-	struct socfpga_dwmac	*dwmac;
-
-	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
-
-	ret = socfpga_dwmac_parse_data(dwmac, dev);
-	if (ret) {
-		dev_err(dev, "Unable to parse OF data\n");
-		return ERR_PTR(ret);
-	}
-
-	ret = socfpga_dwmac_setup(dwmac);
-	if (ret) {
-		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return dwmac;
-}
-
 static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
 {
 	struct socfpga_dwmac	*dwmac = priv;
@@ -257,21 +232,58 @@
 	return ret;
 }
 
-static const struct stmmac_of_data socfpga_gmac_data = {
-	.setup = socfpga_dwmac_probe,
-	.init = socfpga_dwmac_init,
-	.exit = socfpga_dwmac_exit,
-	.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct device		*dev = &pdev->dev;
+	int			ret;
+	struct socfpga_dwmac	*dwmac;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+	if (!dwmac)
+		return -ENOMEM;
+
+	ret = socfpga_dwmac_parse_data(dwmac, dev);
+	if (ret) {
+		dev_err(dev, "Unable to parse OF data\n");
+		return ret;
+	}
+
+	ret = socfpga_dwmac_setup(dwmac);
+	if (ret) {
+		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+		return ret;
+	}
+
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->init = socfpga_dwmac_init;
+	plat_dat->exit = socfpga_dwmac_exit;
+	plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+	ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id socfpga_dwmac_match[] = {
-	{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+	{ .compatible = "altr,socfpga-stmmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
 
 static struct platform_driver socfpga_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = socfpga_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "socfpga-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a2e8111..7f6f4a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,6 +21,7 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_net.h>
 
 #include "stmmac_platform.h"
@@ -128,6 +129,11 @@
 	struct device *dev;
 	struct regmap *regmap;
 	u32 speed;
+	void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+	void (*fix_retime_src)(void *priv, unsigned int speed);
 };
 
 static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@
 	regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
 }
 
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
 {
+	struct sti_dwmac *dwmac = priv;
 	struct regmap *regmap = dwmac->regmap;
 	int iface = dwmac->interface;
 	struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@
 
 	val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
 	regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
 
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
-	struct sti_dwmac *dwmac = priv;
-	u32 spd = dwmac->speed;
-
-	sti_dwmac_ctrl_init(dwmac);
-
-	stih4xx_fix_retime_src(priv, spd);
-
-	return 0;
-}
-
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
-	struct sti_dwmac *dwmac = priv;
-	u32 spd = dwmac->speed;
-
-	sti_dwmac_ctrl_init(dwmac);
-
-	stid127_fix_retime_src(priv, spd);
+	dwmac->fix_retime_src(priv, dwmac->speed);
 
 	return 0;
 }
@@ -334,36 +321,58 @@
 	return 0;
 }
 
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
 {
+	struct plat_stmmacenet_data *plat_dat;
+	const struct sti_dwmac_of_data *data;
+	struct stmmac_resources stmmac_res;
 	struct sti_dwmac *dwmac;
 	int ret;
 
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "No OF match data provided\n");
+		return -EINVAL;
+	}
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
 	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	ret = sti_dwmac_parse_data(dwmac, pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to parse OF data\n");
-		return ERR_PTR(ret);
+		return ret;
 	}
 
-	return dwmac;
+	dwmac->fix_retime_src = data->fix_retime_src;
+
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->init = sti_dwmac_init;
+	plat_dat->exit = sti_dwmac_exit;
+	plat_dat->fix_mac_speed = data->fix_retime_src;
+
+	ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data stih4xx_dwmac_data = {
-	.fix_mac_speed = stih4xx_fix_retime_src,
-	.setup = sti_dwmac_setup,
-	.init = stix4xx_init,
-	.exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+	.fix_retime_src = stih4xx_fix_retime_src,
 };
 
-static const struct stmmac_of_data stid127_dwmac_data = {
-	.fix_mac_speed = stid127_fix_retime_src,
-	.setup = sti_dwmac_setup,
-	.init = stid127_init,
-	.exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+	.fix_retime_src = stid127_fix_retime_src,
 };
 
 static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@
 MODULE_DEVICE_TABLE(of, sti_dwmac_match);
 
 static struct platform_driver sti_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = sti_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "sti-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 15048ca..52b8ed9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -33,35 +33,6 @@
 	struct regulator *regulator;
 };
 
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
-	struct sunxi_priv_data *gmac;
-	struct device *dev = &pdev->dev;
-
-	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-	if (!gmac)
-		return ERR_PTR(-ENOMEM);
-
-	gmac->interface = of_get_phy_mode(dev->of_node);
-
-	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
-	if (IS_ERR(gmac->tx_clk)) {
-		dev_err(dev, "could not get tx clock\n");
-		return gmac->tx_clk;
-	}
-
-	/* Optional regulator for PHY */
-	gmac->regulator = devm_regulator_get_optional(dev, "phy");
-	if (IS_ERR(gmac->regulator)) {
-		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
-			return ERR_PTR(-EPROBE_DEFER);
-		dev_info(dev, "no regulator found\n");
-		gmac->regulator = NULL;
-	}
-
-	return gmac;
-}
-
 #define SUN7I_GMAC_GMII_RGMII_RATE	125000000
 #define SUN7I_GMAC_MII_RATE		25000000
 
@@ -132,25 +103,67 @@
 	}
 }
 
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
-	.has_gmac = 1,
-	.tx_coe = 1,
-	.fix_mac_speed = sun7i_fix_speed,
-	.setup = sun7i_gmac_setup,
-	.init = sun7i_gmac_init,
-	.exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct sunxi_priv_data *gmac;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+	if (!gmac)
+		return -ENOMEM;
+
+	gmac->interface = of_get_phy_mode(dev->of_node);
+
+	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+	if (IS_ERR(gmac->tx_clk)) {
+		dev_err(dev, "could not get tx clock\n");
+		return PTR_ERR(gmac->tx_clk);
+	}
+
+	/* Optional regulator for PHY */
+	gmac->regulator = devm_regulator_get_optional(dev, "phy");
+	if (IS_ERR(gmac->regulator)) {
+		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no regulator found\n");
+		gmac->regulator = NULL;
+	}
+
+	/* platform data specifying hardware features and callbacks.
+	 * hardware features were copied from Allwinner drivers. */
+	plat_dat->tx_coe = 1;
+	plat_dat->has_gmac = true;
+	plat_dat->bsp_priv = gmac;
+	plat_dat->init = sun7i_gmac_init;
+	plat_dat->exit = sun7i_gmac_exit;
+	plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+	ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id sun7i_dwmac_match[] = {
-	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+	{ .compatible = "allwinner,sun7i-a20-gmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
 
 static struct platform_driver sun7i_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = sun7i_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "sun7i-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bcdc895..d02691b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -104,32 +104,16 @@
  * this function is to read the driver parameters from device-tree and
  * set some private fields that will be used by the main at runtime.
  */
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-				  struct plat_stmmacenet_data *plat,
-				  const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
 	struct device_node *np = pdev->dev.of_node;
+	struct plat_stmmacenet_data *plat;
 	struct stmmac_dma_cfg *dma_cfg;
-	const struct of_device_id *device;
-	struct device *dev = &pdev->dev;
 
-	device = of_match_device(dev->driver->of_match_table, dev);
-	if (device->data) {
-		const struct stmmac_of_data *data = device->data;
-		plat->has_gmac = data->has_gmac;
-		plat->enh_desc = data->enh_desc;
-		plat->tx_coe = data->tx_coe;
-		plat->rx_coe = data->rx_coe;
-		plat->bugged_jumbo = data->bugged_jumbo;
-		plat->pmt = data->pmt;
-		plat->riwt_off = data->riwt_off;
-		plat->fix_mac_speed = data->fix_mac_speed;
-		plat->bus_setup = data->bus_setup;
-		plat->setup = data->setup;
-		plat->free = data->free;
-		plat->init = data->init;
-		plat->exit = data->exit;
-	}
+	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+	if (!plat)
+		return ERR_PTR(-ENOMEM);
 
 	*mac = of_get_mac_address(np);
 	plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@
 	/* If phy-handle is not specified, check if we have a fixed-phy */
 	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
 		if ((of_phy_register_fixed_link(np) < 0))
-			return -ENODEV;
+			return ERR_PTR(-ENODEV);
 
 		plat->phy_node = of_node_get(np);
 	}
@@ -182,6 +166,12 @@
 	 */
 	plat->maxmtu = JUMBO_LEN;
 
+	/* Set default value for multicast hash bins */
+	plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+	/* Set default value for unicast filter entries */
+	plat->unicast_filter_entries = 1;
+
 	/*
 	 * Currently only the properties needed on SPEAr600
 	 * are provided. All other properties should be added
@@ -222,7 +212,7 @@
 				       GFP_KERNEL);
 		if (!dma_cfg) {
 			of_node_put(np);
-			return -ENOMEM;
+			return ERR_PTR(-ENOMEM);
 		}
 		plat->dma_cfg = dma_cfg;
 		of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@
 		pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
 	}
 
-	return 0;
+	return plat;
 }
 #else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-				  struct plat_stmmacenet_data *plat,
-				  const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
-	return -ENOSYS;
+	return ERR_PTR(-ENOSYS);
 }
 #endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
 
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+				  struct stmmac_resources *stmmac_res)
 {
-	struct stmmac_resources stmmac_res;
-	int ret = 0;
 	struct resource *res;
-	struct device *dev = &pdev->dev;
-	struct plat_stmmacenet_data *plat_dat = NULL;
 
-	memset(&stmmac_res, 0, sizeof(stmmac_res));
+	memset(stmmac_res, 0, sizeof(*stmmac_res));
 
 	/* Get IRQ information early to have an ability to ask for deferred
 	 * probe if needed before we went too far with resource allocation.
 	 */
-	stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
-	if (stmmac_res.irq < 0) {
-		if (stmmac_res.irq != -EPROBE_DEFER) {
-			dev_err(dev,
+	stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+	if (stmmac_res->irq < 0) {
+		if (stmmac_res->irq != -EPROBE_DEFER) {
+			dev_err(&pdev->dev,
 				"MAC IRQ configuration information not found\n");
 		}
-		return stmmac_res.irq;
+		return stmmac_res->irq;
 	}
 
 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@
 	 * In case the wake up interrupt is not passed from the platform
 	 * so the driver will continue to use the mac irq (ndev->irq)
 	 */
-	stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-	if (stmmac_res.wol_irq < 0) {
-		if (stmmac_res.wol_irq == -EPROBE_DEFER)
+	stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+	if (stmmac_res->wol_irq < 0) {
+		if (stmmac_res->wol_irq == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
-		stmmac_res.wol_irq = stmmac_res.irq;
+		stmmac_res->wol_irq = stmmac_res->irq;
 	}
 
-	stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-	if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+	stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+	if (stmmac_res->lpi_irq == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	stmmac_res.addr = devm_ioremap_resource(dev, res);
-	if (IS_ERR(stmmac_res.addr))
-		return PTR_ERR(stmmac_res.addr);
+	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
 
-	plat_dat = dev_get_platdata(&pdev->dev);
-
-	if (!plat_dat)
-		plat_dat = devm_kzalloc(&pdev->dev,
-					sizeof(struct plat_stmmacenet_data),
-					GFP_KERNEL);
-	if (!plat_dat) {
-		pr_err("%s: ERROR: no memory", __func__);
-		return  -ENOMEM;
-	}
-
-	/* Set default value for multicast hash bins */
-	plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-	/* Set default value for unicast filter entries */
-	plat_dat->unicast_filter_entries = 1;
-
-	if (pdev->dev.of_node) {
-		ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
-		if (ret) {
-			pr_err("%s: main dt probe failed", __func__);
-			return ret;
-		}
-	}
-
-	/* Custom setup (if needed) */
-	if (plat_dat->setup) {
-		plat_dat->bsp_priv = plat_dat->setup(pdev);
-		if (IS_ERR(plat_dat->bsp_priv))
-			return PTR_ERR(plat_dat->bsp_priv);
-	}
-
-	/* Custom initialisation (if needed)*/
-	if (plat_dat->init) {
-		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
-		if (unlikely(ret))
-			return ret;
-	}
-
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	return PTR_ERR_OR_ZERO(stmmac_res->addr);
 }
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
 
 /**
  * stmmac_pltfr_remove
@@ -361,9 +300,6 @@
 	if (priv->plat->exit)
 		priv->plat->exit(pdev, priv->plat->bsp_priv);
 
-	if (priv->plat->free)
-		priv->plat->free(pdev, priv->plat->bsp_priv);
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 71da86d..ffeb8d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,7 +19,14 @@
 #ifndef __STMMAC_PLATFORM_H__
 #define __STMMAC_PLATFORM_H__
 
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+				  struct stmmac_resources *stmmac_res);
+
 int stmmac_pltfr_remove(struct platform_device *pdev);
 extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 0000000..a8f3151
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+	bool "Synopsys devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) device belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Synopsys devices. If you say Y, you will be asked
+	  for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+	tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+	select PHYLIB
+	select CRC32
+	select MII
+	depends on OF
+	---help---
+	  This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 0000000..7a37572
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644
index 0000000..85b3326
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -0,0 +1,3019 @@
+/*  Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ *  This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ *  This version introduced a lot of changes which breaks backwards
+ *  compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ *  Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ *  bit fields. The driver could be made compatible with 4.00, if all relevant
+ *  HW erratas are handled.
+ *
+ *  The GMAC is highly configurable at synthesis time. This driver has been
+ *  developed for a subset of the total available feature set. Currently
+ *  it supports:
+ *  - TSO
+ *  - Checksum offload for RX and TX.
+ *  - Energy efficient ethernet.
+ *  - GMII phy interface.
+ *  - The statistics module.
+ *  - Single RX and TX queue.
+ *
+ *  Copyright (C) 2015 Axis Communications AB.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME			"dwceqos"
+#define DRIVER_DESCRIPTION		"Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION			"0.9"
+
+#define DWCEQOS_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+	NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN      8
+#define DWCEQOS_LPI_TIMER_MAX      ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET    0x0000
+#define MAX_MAC_REG_OFFSET      0x0bd0
+#define START_MTL_REG_OFFSET    0x0c00
+#define MAX_MTL_REG_OFFSET      0x0d7c
+#define START_DMA_REG_OFFSET    0x1000
+#define MAX_DMA_REG_OFFSET      0x117C
+
+#define REG_SPACE_SIZE          0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE             0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE      0x1004
+#define REG_DWCEQOS_DMA_IS               0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0        0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL         0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL      0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL      0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST  0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST  0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL  0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL  0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN   0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN   0x1130
+#define REG_DWCEQOS_DMA_CH0_IE           0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC   0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC   0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF    0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG    0x115c
+#define REG_DWCEQOS_DMA_CH0_STA          0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR            BIT(11)
+#define DWCEQOS_DMA_MODE_DA              BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI   BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB       BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL      BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+	(((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK       GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+	(((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK       GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+	(((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT   GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8       BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x)      ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x)       ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START         BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x)   ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP            BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE            BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE           BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE           BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE           BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE           BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE          BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE          BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS             BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS             BIT(16)
+#define DWCEQOS_DMA_IS_MACIS             BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI            BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI            BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU           BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE           BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE           BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS           BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB           GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ   BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR  BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB           GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ   BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR  BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE    BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V   BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V   BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE    BIT(7)
+#define DWCEQOS_DMA_RDES3_ES      BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT    BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x)   ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT      0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP  BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP  BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC     BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT    BIT(30)
+#define DWCEQOS_DMA_TDES3_FD      BIT(29)
+#define DWCEQOS_DMA_TDES3_LD      BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH    BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP    BIT(17)
+#define DWCEQOS_DMA_TDES3_CA      0x00030000
+#define DWCEQOS_DMA_TDES3_TSE     BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x)   ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x)   ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV    BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED   0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER             0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST         0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST    0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST    0x0d38
+
+#define REG_DWCEQOS_MTL_IS               0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER        0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER        0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT     0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL         0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL         0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT        0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN            BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF              BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ              BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512           0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC             BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF       BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP              BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP              BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF              BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32            BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG              0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG          0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT         0x0008
+#define REG_DWCEQOS_MAC_WD_TO            0x000c
+#define REG_DWCEQOS_HASTABLE_LO          0x0010
+#define REG_DWCEQOS_HASTABLE_HI          0x0014
+#define REG_DWCEQOS_MAC_IS               0x00b0
+#define REG_DWCEQOS_MAC_IE               0x00b4
+#define REG_DWCEQOS_MAC_STAT             0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR        0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA        0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI     0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO     0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0       0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0      0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1      0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2      0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO     0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI     0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS  0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL  0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER  0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER  0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL     0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW	 0x0070
+
+#define DWCEQOS_MAC_CFG_ACS              BIT(20)
+#define DWCEQOS_MAC_CFG_JD               BIT(17)
+#define DWCEQOS_MAC_CFG_JE               BIT(16)
+#define DWCEQOS_MAC_CFG_PS               BIT(15)
+#define DWCEQOS_MAC_CFG_FES              BIT(14)
+#define DWCEQOS_MAC_CFG_DM               BIT(13)
+#define DWCEQOS_MAC_CFG_DO               BIT(10)
+#define DWCEQOS_MAC_CFG_TE               BIT(1)
+#define DWCEQOS_MAC_CFG_IPC              BIT(27)
+#define DWCEQOS_MAC_CFG_RE               BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg)           (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg)            (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT           BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT           BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN               BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN       BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA          BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF         BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF         BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF        BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF         BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM          BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF        BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC         BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC         BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR          BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x)      (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20      2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35      3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60      0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100     1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150     4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250     5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ   0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE  BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB         BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN  BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX  BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN  BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX  BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST  BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST  BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN   BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS     BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN   BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA  BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE   BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x)  ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE      (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+					  DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+					  DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE   BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time)	((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL    BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL   BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL  BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL   BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN    BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x)  (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+	(1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x)     (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x)      (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR            BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL             0x0700
+#define REG_DWCEQOS_MMC_RXIRQ            0x0704
+#define REG_DWCEQOS_MMC_TXIRQ            0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK        0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK        0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST          BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD         BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR           0x07F0
+#define DWC_MMC_TXLPIUSCNTR              0x07EC
+#define DWC_MMC_TXOVERSIZE_G             0x0778
+#define DWC_MMC_TXVLANPACKETS_G          0x0774
+#define DWC_MMC_TXPAUSEPACKETS           0x0770
+#define DWC_MMC_TXEXCESSDEF              0x076C
+#define DWC_MMC_TXPACKETCOUNT_G          0x0768
+#define DWC_MMC_TXOCTETCOUNT_G           0x0764
+#define DWC_MMC_TXCARRIERERROR           0x0760
+#define DWC_MMC_TXEXCESSCOL              0x075C
+#define DWC_MMC_TXLATECOL                0x0758
+#define DWC_MMC_TXDEFERRED               0x0754
+#define DWC_MMC_TXMULTICOL_G             0x0750
+#define DWC_MMC_TXSINGLECOL_G            0x074C
+#define DWC_MMC_TXUNDERFLOWERROR         0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB    0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB    0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB      0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB     0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB     0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB      0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB      0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB       0x0728
+#define DWC_MMC_TX64OCTETS_GB            0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G     0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G     0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB         0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB          0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR           0x07F8
+#define DWC_MMC_RXLPIUSCNTR              0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G          0x07E4
+#define DWC_MMC_RXRCVERROR               0x07E0
+#define DWC_MMC_RXWATCHDOG               0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB         0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW           0x07D4
+#define DWC_MMC_RXPAUSEPACKETS           0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE         0x07CC
+#define DWC_MMC_RXLENGTHERROR            0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G       0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB     0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB     0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB      0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB      0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB       0x07B0
+#define DWC_MMC_RX64OCTETS_GB            0x07AC
+#define DWC_MMC_RXOVERSIZE_G             0x07A8
+#define DWC_MMC_RXUNDERSIZE_G            0x07A4
+#define DWC_MMC_RXJABBERERROR            0x07A0
+#define DWC_MMC_RXRUNTERROR              0x079C
+#define DWC_MMC_RXALIGNMENTERROR         0x0798
+#define DWC_MMC_RXCRCERROR               0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G     0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G     0x078C
+#define DWC_MMC_RXOCTETCOUNT_G           0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB          0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB         0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+	u32	des0;
+	u32	des1;
+	u32	des2;
+	u32	des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+	__u64 txlpitranscntr;
+	__u64 txpiuscntr;
+	__u64 txoversize_g;
+	__u64 txvlanpackets_g;
+	__u64 txpausepackets;
+	__u64 txexcessdef;
+	__u64 txpacketcount_g;
+	__u64 txoctetcount_g;
+	__u64 txcarriererror;
+	__u64 txexcesscol;
+	__u64 txlatecol;
+	__u64 txdeferred;
+	__u64 txmulticol_g;
+	__u64 txsinglecol_g;
+	__u64 txunderflowerror;
+	__u64 txbroadcastpackets_gb;
+	__u64 txmulticastpackets_gb;
+	__u64 txunicastpackets_gb;
+	__u64 tx1024tomaxoctets_gb;
+	__u64 tx512to1023octets_gb;
+	__u64 tx256to511octets_gb;
+	__u64 tx128to255octets_gb;
+	__u64 tx65to127octets_gb;
+	__u64 tx64octets_gb;
+	__u64 txmulticastpackets_g;
+	__u64 txbroadcastpackets_g;
+	__u64 txpacketcount_gb;
+	__u64 txoctetcount_gb;
+
+	__u64 rxlpitranscntr;
+	__u64 rxlpiuscntr;
+	__u64 rxctrlpackets_g;
+	__u64 rxrcverror;
+	__u64 rxwatchdog;
+	__u64 rxvlanpackets_gb;
+	__u64 rxfifooverflow;
+	__u64 rxpausepackets;
+	__u64 rxoutofrangetype;
+	__u64 rxlengtherror;
+	__u64 rxunicastpackets_g;
+	__u64 rx1024tomaxoctets_gb;
+	__u64 rx512to1023octets_gb;
+	__u64 rx256to511octets_gb;
+	__u64 rx128to255octets_gb;
+	__u64 rx65to127octets_gb;
+	__u64 rx64octets_gb;
+	__u64 rxoversize_g;
+	__u64 rxundersize_g;
+	__u64 rxjabbererror;
+	__u64 rxrunterror;
+	__u64 rxalignmenterror;
+	__u64 rxcrcerror;
+	__u64 rxmulticastpackets_g;
+	__u64 rxbroadcastpackets_g;
+	__u64 rxoctetcount_g;
+	__u64 rxoctetcount_gb;
+	__u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+	const char stat_name[ETH_GSTRING_LEN];
+	int   offset;
+};
+
+#define STAT_ITEM(name, var) \
+	{\
+		name,\
+		offsetof(struct dwceqos_mmc_counters, var),\
+	}
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+	STAT_ITEM("tx_bytes", txoctetcount_gb),
+	STAT_ITEM("tx_packets", txpacketcount_gb),
+	STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+	STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+	STAT_ITEM("tx_multicast_packets",  txmulticastpackets_gb),
+	STAT_ITEM("tx_pause_packets", txpausepackets),
+	STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+	STAT_ITEM("tx_65_to_127_byte_packets",  tx65to127octets_gb),
+	STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+	STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+	STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+	STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+	STAT_ITEM("tx_underflow_errors", txunderflowerror),
+	STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+	STAT_ITEM("rx_bytes", rxoctetcount_gb),
+	STAT_ITEM("rx_packets", rxpacketcount_gb),
+	STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+	STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+	STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+	STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+	STAT_ITEM("rx_pause_packets", rxpausepackets),
+	STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+	STAT_ITEM("rx_65_to_127_byte_packets",  rx65to127octets_gb),
+	STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+	STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+	STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+	STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+	STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+	STAT_ITEM("rx_oversize_packets", rxoversize_g),
+	STAT_ITEM("rx_undersize_packets", rxundersize_g),
+	STAT_ITEM("rx_jabbers", rxjabbererror),
+	STAT_ITEM("rx_align_errors", rxalignmenterror),
+	STAT_ITEM("rx_crc_errors", rxcrcerror),
+	STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+	/* Enable AXI low-power interface. */
+	bool en_lpi;
+	/* Limit on number of outstanding AXI write requests. */
+	u32 write_requests;
+	/* Limit on number of outstanding AXI read requests. */
+	u32 read_requests;
+	/* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+	u32 burst_map;
+	/* DMA Programmable burst length*/
+	u32 tx_pbl;
+	u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+	int autoneg;
+	int rx;
+	int rx_current;
+	int tx;
+	int tx_current;
+};
+
+struct net_local {
+	void __iomem *baseaddr;
+	struct clk *phy_ref_clk;
+	struct clk *apb_pclk;
+
+	struct device_node *phy_node;
+	struct net_device *ndev;
+	struct platform_device *pdev;
+
+	u32 msg_enable;
+
+	struct tasklet_struct tx_bdreclaim_tasklet;
+	struct workqueue_struct *txtimeout_handler_wq;
+	struct work_struct txtimeout_reinit;
+
+	phy_interface_t phy_interface;
+	struct phy_device *phy_dev;
+	struct mii_bus *mii_bus;
+
+	unsigned int link;
+	unsigned int speed;
+	unsigned int duplex;
+
+	struct napi_struct napi;
+
+	/* DMA Descriptor Areas */
+	struct ring_desc *rx_skb;
+	struct ring_desc *tx_skb;
+
+	struct dwceqos_dma_desc *tx_descs;
+	struct dwceqos_dma_desc *rx_descs;
+
+	/* DMA Mapped Descriptor areas*/
+	dma_addr_t tx_descs_addr;
+	dma_addr_t rx_descs_addr;
+	dma_addr_t tx_descs_tail_addr;
+	dma_addr_t rx_descs_tail_addr;
+
+	size_t tx_free;
+	size_t tx_next;
+	size_t rx_cur;
+	size_t tx_cur;
+
+	/* Spinlocks for accessing DMA Descriptors */
+	spinlock_t tx_lock;
+
+	/* Spinlock for register read-modify-writes. */
+	spinlock_t hw_lock;
+
+	u32 feature0;
+	u32 feature1;
+	u32 feature2;
+
+	struct dwceqos_bus_cfg bus_cfg;
+	bool en_tx_lpi_clockgating;
+
+	int eee_enabled;
+	int eee_active;
+	int csr_val;
+	u32 gso_size;
+
+	struct dwceqos_mmc_counters mmc_counters;
+	/* Protect the mmc_counter updates. */
+	spinlock_t stats_lock;
+	u32 mmc_rx_counters_mask;
+	u32 mmc_tx_counters_mask;
+
+	struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+				      u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+				  unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg)						\
+	readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val)					\
+	writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+	lp->link    = 0;
+	lp->speed   = 0;
+	lp->duplex  = DUPLEX_UNKNOWN;
+	lp->flowcontrol.rx_current = 0;
+	lp->flowcontrol.tx_current = 0;
+	lp->eee_active = 0;
+	lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+	struct dwceqos_dma_desc *dd;
+
+	if (tx)
+		dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+	else
+		dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+	pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+		index, dd);
+	pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+		dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+	size_t desci, i;
+
+	pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+		lp->tx_cur, lp->tx_next);
+
+	print_descriptor(lp, lp->rx_cur, 0);
+
+	for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+		 i < DWCEQOS_TX_DCNT;
+		 ++i) {
+		print_descriptor(lp, desci, 1);
+		desci = (desci + 1) % DWCEQOS_TX_DCNT;
+	}
+
+	pr_info("DMA_Debug_Status0:          0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+	pr_info("DMA_CH0_Status:             0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+	pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+		dwceqos_read(lp, 0x1144));
+	pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+		dwceqos_read(lp, 0x1154));
+	pr_info("MTL_Debug_Status:      0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+	pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+	pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+	pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+		dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+	int rate = clk_get_rate(lp->apb_pclk);
+
+	if (rate <= 20000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+	else if (rate <= 35000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+	else if (rate <= 60000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+	else if (rate <= 100000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+	else if (rate <= 150000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+	else if (rate <= 250000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+	struct net_local *lp = bus->priv;
+	u32 regval;
+	int i;
+	int data;
+
+	regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+		DWCEQOS_MDIO_PHYREG(phyreg) |
+		DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+		DWCEQOS_MAC_MDIO_ADDR_GB |
+		DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+	for (i = 0; i < 5; ++i) {
+		usleep_range(64, 128);
+		if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+		      DWCEQOS_MAC_MDIO_ADDR_GB))
+			break;
+	}
+
+	data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+	if (i == 5) {
+		netdev_warn(lp->ndev, "MDIO read timed out\n");
+		data = 0xffff;
+	}
+
+	return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+			      u16 value)
+{
+	struct net_local *lp = bus->priv;
+	u32 regval;
+	int i;
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+	regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+		DWCEQOS_MDIO_PHYREG(phyreg) |
+		DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+		DWCEQOS_MAC_MDIO_ADDR_GB |
+		DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+	for (i = 0; i < 5; ++i) {
+		usleep_range(64, 128);
+		if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+		      DWCEQOS_MAC_MDIO_ADDR_GB))
+			break;
+	}
+	if (i == 5)
+		netdev_warn(lp->ndev, "MDIO write timed out\n");
+	return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return phy_mii_ioctl(phydev, rq, cmd);
+	default:
+		dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+		return -EOPNOTSUPP;
+	}
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Indicate link down to the LPI state machine */
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Indicate link up to the LPI state machine */
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+	lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+	/* Check for changed EEE capability */
+	if (!lp->eee_active && lp->eee_enabled) {
+		lp->eee_enabled = 0;
+
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	}
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+	struct phy_device *phydev = lp->phy_dev;
+	u32 regval;
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+		    DWCEQOS_MAC_CFG_DM);
+
+	if (phydev->duplex)
+		regval |= DWCEQOS_MAC_CFG_DM;
+	if (phydev->speed == SPEED_10) {
+		regval |= DWCEQOS_MAC_CFG_PS;
+	} else if (phydev->speed == SPEED_100) {
+		regval |= DWCEQOS_MAC_CFG_PS |
+			DWCEQOS_MAC_CFG_FES;
+	} else if (phydev->speed != SPEED_1000) {
+		netdev_err(lp->ndev,
+			   "unknown PHY speed %d\n",
+			   phydev->speed);
+		return;
+	}
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+	int status_change = 0;
+
+	if (phydev->link) {
+		if ((lp->speed != phydev->speed) ||
+		    (lp->duplex != phydev->duplex)) {
+			dwceqos_set_speed(lp);
+
+			lp->speed = phydev->speed;
+			lp->duplex = phydev->duplex;
+			status_change = 1;
+		}
+
+		if (lp->flowcontrol.autoneg) {
+			lp->flowcontrol.rx = phydev->pause ||
+					     phydev->asym_pause;
+			lp->flowcontrol.tx = phydev->pause ||
+					     phydev->asym_pause;
+		}
+
+		if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+			if (netif_msg_link(lp))
+				netdev_dbg(ndev, "set rx flow to %d\n",
+					   lp->flowcontrol.rx);
+			dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+			lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+		}
+		if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+			if (netif_msg_link(lp))
+				netdev_dbg(ndev, "set tx flow to %d\n",
+					   lp->flowcontrol.tx);
+			dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+			lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+		}
+	}
+
+	if (phydev->link != lp->link) {
+		lp->link = phydev->link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (phydev->link) {
+			lp->ndev->trans_start = jiffies;
+			dwceqos_link_up(lp);
+		} else {
+			dwceqos_link_down(lp);
+		}
+		phy_print_status(phydev);
+	}
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = NULL;
+
+	if (lp->phy_node) {
+		phydev = of_phy_connect(lp->ndev,
+					lp->phy_node,
+					&dwceqos_adjust_link,
+					0,
+					lp->phy_interface);
+
+		if (!phydev) {
+			netdev_err(ndev, "no PHY found\n");
+			return -1;
+		}
+	} else {
+		netdev_err(ndev, "no PHY configured\n");
+		return -ENODEV;
+	}
+
+	if (netif_msg_probe(lp))
+		netdev_dbg(lp->ndev,
+			   "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+			   phydev, phydev->phy_id, phydev->addr);
+
+	phydev->supported &= PHY_GBIT_FEATURES;
+
+	lp->link    = 0;
+	lp->speed   = 0;
+	lp->duplex  = DUPLEX_UNKNOWN;
+	lp->phy_dev = phydev;
+
+	if (netif_msg_probe(lp)) {
+		netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+			   lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+		netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+			   lp->phy_dev->drv->name);
+	}
+
+	return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+	struct sk_buff *new_skb;
+	dma_addr_t new_skb_baddr = 0;
+
+	new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+	if (!new_skb) {
+		netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+		goto err_out;
+	}
+
+	new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+				       new_skb->data, DWCEQOS_RX_BUF_SIZE,
+				       DMA_FROM_DEVICE);
+	if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+		netdev_err(lp->ndev, "DMA map error\n");
+		dev_kfree_skb(new_skb);
+		new_skb = NULL;
+		goto err_out;
+	}
+
+	lp->rx_descs[index].des0 = new_skb_baddr;
+	lp->rx_descs[index].des1 = 0;
+	lp->rx_descs[index].des2 = 0;
+	lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+				   DWCEQOS_DMA_RDES3_BUF1V |
+				   DWCEQOS_DMA_RDES3_OWN;
+
+	lp->rx_skb[index].mapping = new_skb_baddr;
+	lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+	lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+	int i;
+
+	if (lp->rx_skb) {
+		for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+			if (lp->rx_skb[i].skb) {
+				dma_unmap_single(lp->ndev->dev.parent,
+						 lp->rx_skb[i].mapping,
+						 lp->rx_skb[i].len,
+						 DMA_FROM_DEVICE);
+
+				dev_kfree_skb(lp->rx_skb[i].skb);
+				lp->rx_skb[i].skb = NULL;
+				lp->rx_skb[i].mapping = 0;
+			}
+		}
+	}
+
+	if (lp->tx_skb) {
+		for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+			if (lp->tx_skb[i].skb) {
+				dev_kfree_skb(lp->tx_skb[i].skb);
+				lp->tx_skb[i].skb = NULL;
+			}
+			if (lp->tx_skb[i].mapping) {
+				dma_unmap_single(lp->ndev->dev.parent,
+						 lp->tx_skb[i].mapping,
+						 lp->tx_skb[i].len,
+						 DMA_TO_DEVICE);
+				lp->tx_skb[i].mapping = 0;
+			}
+		}
+	}
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+	int size;
+
+	dwceqos_clean_rings(lp);
+
+	kfree(lp->tx_skb);
+	lp->tx_skb = NULL;
+	kfree(lp->rx_skb);
+	lp->rx_skb = NULL;
+
+	size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+	if (lp->rx_descs) {
+		dma_free_coherent(lp->ndev->dev.parent, size,
+				  (void *)(lp->rx_descs), lp->rx_descs_addr);
+		lp->rx_descs = NULL;
+	}
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+	if (lp->tx_descs) {
+		dma_free_coherent(lp->ndev->dev.parent, size,
+				  (void *)(lp->tx_descs), lp->tx_descs_addr);
+		lp->tx_descs = NULL;
+	}
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+	int size;
+	u32 i;
+
+	lp->gso_size = 0;
+
+	lp->tx_skb = NULL;
+	lp->rx_skb = NULL;
+	lp->rx_descs = NULL;
+	lp->tx_descs = NULL;
+
+	/* Reset the DMA indexes */
+	lp->rx_cur = 0;
+	lp->tx_cur = 0;
+	lp->tx_next = 0;
+	lp->tx_free = DWCEQOS_TX_DCNT;
+
+	/* Allocate Ring descriptors */
+	size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+	lp->rx_skb = kzalloc(size, GFP_KERNEL);
+	if (!lp->rx_skb)
+		goto err_out;
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+	lp->tx_skb = kzalloc(size, GFP_KERNEL);
+	if (!lp->tx_skb)
+		goto err_out;
+
+	/* Allocate DMA descriptors */
+	size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+	lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+			&lp->rx_descs_addr, 0);
+	if (!lp->rx_descs)
+		goto err_out;
+	lp->rx_descs_tail_addr = lp->rx_descs_addr +
+		sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+	lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+			&lp->tx_descs_addr, 0);
+	if (!lp->tx_descs)
+		goto err_out;
+	lp->tx_descs_tail_addr = lp->tx_descs_addr +
+		sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+	/* Initialize RX Ring Descriptors and buffers */
+	for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+		dwceqos_alloc_rxring_desc(lp, i);
+		if (!(lp->rx_skb[lp->rx_cur].skb))
+			goto err_out;
+	}
+
+	/* Initialize TX Descriptors */
+	for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+		lp->tx_descs[i].des0 = 0;
+		lp->tx_descs[i].des1 = 0;
+		lp->tx_descs[i].des2 = 0;
+		lp->tx_descs[i].des3 = 0;
+	}
+
+	/* Make descriptor writes visible to the DMA. */
+	wmb();
+
+	return 0;
+
+err_out:
+	dwceqos_descriptor_free(lp);
+	return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+	return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+	lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+	lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+	lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval |= DWCEQOS_DMA_CH0_IE_TIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval |= DWCEQOS_DMA_CH0_IE_RIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+	dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+	dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+	int ret = -ENXIO, i;
+	struct resource res;
+	struct device_node *mdionode;
+
+	mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+	if (!mdionode)
+		return 0;
+
+	lp->mii_bus = mdiobus_alloc();
+	if (!lp->mii_bus) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	lp->mii_bus->name  = "DWCEQOS MII bus";
+	lp->mii_bus->read  = &dwceqos_mdio_read;
+	lp->mii_bus->write = &dwceqos_mdio_write;
+	lp->mii_bus->priv = lp;
+	lp->mii_bus->parent = &lp->ndev->dev;
+
+	lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!lp->mii_bus->irq) {
+		ret = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		lp->mii_bus->irq[i] = PHY_POLL;
+	of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+		 (unsigned long long)res.start);
+	if (of_mdiobus_register(lp->mii_bus, mdionode))
+		goto err_out_free_mdio_irq;
+
+	return 0;
+
+err_out_free_mdio_irq:
+	kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+	mdiobus_free(lp->mii_bus);
+err_out:
+	of_node_put(mdionode);
+	return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+	/* Wait (at most) 0.5 seconds for DMA reset*/
+	int i = 5000;
+	u32 reg;
+
+	/* Force gigabit to guarantee a TX clock for GMII. */
+	reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+	reg |= DWCEQOS_MAC_CFG_DM;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+	do {
+		udelay(100);
+		i--;
+		reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+	} while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+	/* We might experience a timeout if the chip clock mux is broken */
+	if (!i)
+		netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+	if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+		netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+			   dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+				"read" : "write",
+			   dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+				"descr" : "data",
+			   dma_status);
+
+		print_status(lp);
+	}
+	if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+		netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+			   dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+				"read" : "write",
+			   dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+				"descr" : "data",
+			   dma_status);
+
+		print_status(lp);
+	}
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+
+	/* A latched mmc interrupt can not be masked, we must read
+	 *  all the counters with an interrupt pending.
+	 */
+	dwceqos_read_mmc_counters(lp,
+				  dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+				  dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+	u32 cause;
+
+	cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+	if (cause & DWCEQOS_MAC_IS_MMC_INT)
+		dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct net_local *lp = netdev_priv(ndev);
+
+	u32 cause;
+	u32 dma_status;
+	irqreturn_t ret = IRQ_NONE;
+
+	cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+	/* DMA Channel 0 Interrupt */
+	if (cause & DWCEQOS_DMA_IS_DC0IS) {
+		dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+		/* Transmit Interrupt */
+		if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+			tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+			dwceqos_dma_disable_txirq(lp);
+		}
+
+		/* Receive Interrupt */
+		if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+			/* Disable RX IRQs */
+			dwceqos_dma_disable_rxirq(lp);
+			napi_schedule(&lp->napi);
+		}
+
+		/* Fatal Bus Error interrupt */
+		if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+			dwceqos_fatal_bus_error(lp, dma_status);
+
+			/* errata 9000831707 */
+			dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+				      DWCEQOS_DMA_CH0_IS_REB;
+		}
+
+		/* Ack all DMA Channel 0 IRQs */
+		dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+		ret = IRQ_HANDLED;
+	}
+
+	if (cause & DWCEQOS_DMA_IS_MTLIS) {
+		u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+		dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+		ret = IRQ_HANDLED;
+	}
+
+	if (cause & DWCEQOS_DMA_IS_MACIS) {
+		dwceqos_mac_interrupt(lp);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+	if (enable)
+		regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+	else
+		regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	/* MTL flow control */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+	if (enable)
+		regval |= DWCEQOS_MTL_RXQ_EHFC;
+	else
+		regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+	dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+	/* MAC flow control */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+	if (enable)
+		regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+	else
+		regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+	int RQS, RFD, RFA;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+	/* The queue size is in units of 256 bytes. We want 512 bytes units for
+	 * the threshold fields.
+	 */
+	RQS = ((regval >> 20) & 0x3FF) + 1;
+	RQS /= 2;
+
+	/* The thresholds are relative to a full queue, with a bias
+	 * of 1 KiByte below full.
+	 */
+	RFD = RQS / 2 - 2;
+	RFA = RQS / 8 - 2;
+
+	regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+	if (RFD >= 0 && RFA >= 0) {
+		dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+	} else {
+		netdev_warn(lp->ndev,
+			    "FIFO too small for flow control.");
+	}
+
+	regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+		 DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+	unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+	BUG_ON(!rate_mhz);
+
+	dwceqos_write(lp,
+		      REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+		      DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+	u32 sysbus_reg;
+
+	/* N.B. We do not support the Fixed Burst mode because it
+	 * opens a race window by making HW access to DMA descriptors
+	 * non-atomic.
+	 */
+
+	sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+	if (lp->bus_cfg.en_lpi)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+	if (lp->bus_cfg.burst_map)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+			lp->bus_cfg.burst_map);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+			DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+	if (lp->bus_cfg.read_requests)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+			lp->bus_cfg.read_requests - 1);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+			DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+	if (lp->bus_cfg.write_requests)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+			lp->bus_cfg.write_requests - 1);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+			DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+	if (netif_msg_hw(lp))
+		netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+	u32 regval;
+	u32 buswidth;
+	u32 dma_skip;
+
+	/* Software reset */
+	dwceqos_reset_hw(lp);
+
+	dwceqos_configure_bus(lp);
+
+	/* Probe data bus width, 32/64/128 bits. */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+	buswidth = (regval ^ 0xF) + 1;
+
+	/* Cache-align dma descriptors. */
+	dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+		      DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+		      DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+	/* Initialize DMA Channel 0 */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+		      (u32)lp->tx_descs_addr);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+		      (u32)lp->rx_descs_addr);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+		      lp->tx_descs_tail_addr);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+		      lp->rx_descs_tail_addr);
+
+	if (lp->bus_cfg.tx_pbl)
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+	else
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+	/* Enable TSO if the HW support it */
+	if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+		regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+	if (lp->bus_cfg.rx_pbl)
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+	else
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+	regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+	regval |= DWCEQOS_DMA_CH_CTRL_START;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+	/* Initialize MTL Queues */
+	regval = DWCEQOS_MTL_SCHALG_STRICT;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+	regval = DWCEQOS_MTL_TXQ_SIZE(
+			DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+		DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+		DWCEQOS_MTL_TXQ_TTC512;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+	regval = DWCEQOS_MTL_RXQ_SIZE(
+			DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+		DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+	dwceqos_configure_flow_control(lp);
+
+	/* Initialize MAC */
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+	lp->eee_enabled = 0;
+
+	dwceqos_configure_clock(lp);
+
+	/* MMC counters */
+
+	/* probe implemented counters */
+	dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+	dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+	lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+	lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+	dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+		DWCEQOS_MMC_CTRL_RSTONRD);
+	dwceqos_enable_mmc_interrupt(lp);
+
+	/* Enable Interrupts */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+		      DWCEQOS_DMA_CH0_IE_NIE |
+		      DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+		      DWCEQOS_DMA_CH0_IE_AIE |
+		      DWCEQOS_DMA_CH0_IE_FBEE);
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+		DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+	/* Start TX DMA */
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+		      regval | DWCEQOS_DMA_CH_CTRL_START);
+
+	/* Enable MAC TX/RX */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+		      regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+	struct net_device *ndev = (struct net_device *)data;
+	struct net_local *lp = netdev_priv(ndev);
+	unsigned int tx_bytes = 0;
+	unsigned int tx_packets = 0;
+
+	spin_lock(&lp->tx_lock);
+
+	while (lp->tx_free < DWCEQOS_TX_DCNT) {
+		struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+		struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+		/* Descriptor still being held by DMA ? */
+		if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+			break;
+
+		if (rd->mapping)
+			dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+					 DMA_TO_DEVICE);
+
+		if (unlikely(rd->skb)) {
+			++tx_packets;
+			tx_bytes += rd->skb->len;
+			dev_consume_skb_any(rd->skb);
+		}
+
+		rd->skb = NULL;
+		rd->mapping = 0;
+		lp->tx_free++;
+		lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+		if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+		    (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+			if (netif_msg_tx_err(lp))
+				netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+					   dd->des3);
+			if (netif_msg_hw(lp))
+				print_status(lp);
+		}
+	}
+	spin_unlock(&lp->tx_lock);
+
+	netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+	dwceqos_dma_enable_txirq(lp);
+	netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+	struct sk_buff *skb;
+	u32 tot_size = 0;
+	unsigned int n_packets = 0;
+	unsigned int n_descs = 0;
+	u32 len;
+
+	struct dwceqos_dma_desc *dd;
+	struct sk_buff *new_skb;
+	dma_addr_t new_skb_baddr = 0;
+
+	while (n_descs < budget) {
+		if (!dwceqos_packet_avail(lp))
+			break;
+
+		new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+		if (!new_skb) {
+			netdev_err(lp->ndev, "no memory for new sk_buff\n");
+			break;
+		}
+
+		/* Get dma handle of skb->data */
+		new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+					new_skb->data,
+					DWCEQOS_RX_BUF_SIZE,
+					DMA_FROM_DEVICE);
+		if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+			netdev_err(lp->ndev, "DMA map error\n");
+			dev_kfree_skb(new_skb);
+			break;
+		}
+
+		/* Read descriptor data after reading owner bit. */
+		dma_rmb();
+
+		dd = &lp->rx_descs[lp->rx_cur];
+		len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+		skb = lp->rx_skb[lp->rx_cur].skb;
+
+		/* Unmap old buffer */
+		dma_unmap_single(lp->ndev->dev.parent,
+				 lp->rx_skb[lp->rx_cur].mapping,
+				 lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+		/* Discard packet on reception error or bad checksum */
+		if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+		    (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+			dev_kfree_skb(skb);
+			skb = NULL;
+		} else {
+			skb_put(skb, len);
+			skb->protocol = eth_type_trans(skb, lp->ndev);
+			switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+			case DWCEQOS_DMA_RDES1_PT_UDP:
+			case DWCEQOS_DMA_RDES1_PT_TCP:
+			case DWCEQOS_DMA_RDES1_PT_ICMP:
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				break;
+			default:
+				skb->ip_summed = CHECKSUM_NONE;
+				break;
+			}
+		}
+
+		if (unlikely(!skb)) {
+			if (netif_msg_rx_err(lp))
+				netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+					   lp->rx_descs[lp->rx_cur].des3);
+		} else {
+			tot_size += skb->len;
+			n_packets++;
+
+			netif_receive_skb(skb);
+		}
+
+		lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+		lp->rx_descs[lp->rx_cur].des1 = 0;
+		lp->rx_descs[lp->rx_cur].des2 = 0;
+		/* The DMA must observe des0/1/2 written before des3. */
+		wmb();
+		lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+						DWCEQOS_DMA_RDES3_OWN  |
+						DWCEQOS_DMA_RDES3_BUF1V;
+
+		lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+		lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+		lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+		n_descs++;
+		lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+	}
+
+	/* Make sure any ownership update is written to the descriptors before
+	 * DMA wakeup.
+	 */
+	wmb();
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+	/* Wake up RX by writing tail pointer */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+		      lp->rx_descs_tail_addr);
+
+	return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct net_local *lp = container_of(napi, struct net_local, napi);
+	int work_done = 0;
+
+	work_done = dwceqos_rx(lp, budget - work_done);
+
+	if (!dwceqos_packet_avail(lp) && work_done < budget) {
+		napi_complete(napi);
+		dwceqos_dma_enable_rxirq(lp);
+	} else {
+		work_done = budget;
+	}
+
+	return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+	struct net_local *lp = container_of(data, struct net_local,
+		txtimeout_reinit);
+
+	netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+		   DWCEQOS_TX_TIMEOUT);
+
+	if (netif_msg_hw(lp))
+		print_status(lp);
+
+	rtnl_lock();
+	dwceqos_stop(lp->ndev);
+	dwceqos_open(lp->ndev);
+	rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+	struct net_local *lp;
+	const void *mac_address;
+	struct dwceqos_bus_cfg *bus_cfg;
+	struct device_node *np = pdev->dev.of_node;
+
+	ndev = platform_get_drvdata(pdev);
+	lp = netdev_priv(ndev);
+	bus_cfg = &lp->bus_cfg;
+
+	/* Set the MAC address. */
+	mac_address = of_get_mac_address(pdev->dev.of_node);
+	if (mac_address)
+		ether_addr_copy(ndev->dev_addr, mac_address);
+
+	/* These are all optional parameters */
+	lp->en_tx_lpi_clockgating =  of_property_read_bool(np,
+		"snps,en-tx-lpi-clockgating");
+	bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+	of_property_read_u32(np, "snps,write-requests",
+			     &bus_cfg->write_requests);
+	of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+	of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+	of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+	of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+	netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+		   bus_cfg->en_lpi,
+		   bus_cfg->write_requests,
+		   bus_cfg->read_requests,
+		   bus_cfg->burst_map,
+		   bus_cfg->rx_pbl,
+		   bus_cfg->tx_pbl);
+
+	return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	int res;
+
+	dwceqos_reset_state(lp);
+	res = dwceqos_descriptor_init(lp);
+	if (res) {
+		netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+		return res;
+	}
+	netdev_reset_queue(ndev);
+
+	napi_enable(&lp->napi);
+	phy_start(lp->phy_dev);
+	dwceqos_init_hw(lp);
+
+	netif_start_queue(ndev);
+	tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+	return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+	u32 reg;
+
+	reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+	reg = DMA_GET_TX_STATE_CH0(reg);
+
+	return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+	/* Wait for all pending TX buffers to be sent. Upper limit based
+	 * on max frame size on a 10 Mbit link.
+	 */
+	size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+	while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+		usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	phy_stop(lp->phy_dev);
+
+	tasklet_disable(&lp->tx_bdreclaim_tasklet);
+	netif_stop_queue(ndev);
+	napi_disable(&lp->napi);
+
+	dwceqos_drain_dma(lp);
+
+	netif_tx_lock(lp->ndev);
+	dwceqos_reset_hw(lp);
+	dwceqos_descriptor_free(lp);
+	netif_tx_unlock(lp->ndev);
+
+	return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+				    unsigned short gso_size)
+{
+	struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+	dd->des0 = 0;
+	dd->des1 = 0;
+	dd->des2 = gso_size;
+	dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+	lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+		      lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+	size_t nr_descriptors;
+	size_t initial_descriptor;
+	size_t last_descriptor;
+	size_t prev_gso_size;
+	size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+			       struct dwceqos_tx *tx)
+{
+	size_t n = 1;
+	size_t i;
+
+	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+		++n;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		n +=  (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+		       BYTES_PER_DMA_DESC;
+	}
+
+	tx->nr_descriptors = n;
+	tx->initial_descriptor = lp->tx_next;
+	tx->last_descriptor = lp->tx_next;
+	tx->prev_gso_size = lp->gso_size;
+
+	tx->network_header_len = skb_transport_offset(skb);
+	if (skb_is_gso(skb))
+		tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+			     struct dwceqos_tx *tx)
+{
+	struct ring_desc *rd;
+	struct dwceqos_dma_desc *dd;
+	size_t payload_len;
+	dma_addr_t dma_handle;
+
+	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+		dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+		lp->gso_size = skb_shinfo(skb)->gso_size;
+	}
+
+	dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+				    skb_headlen(skb), DMA_TO_DEVICE);
+
+	if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+		netdev_err(lp->ndev, "TX DMA Mapping error\n");
+		return -ENOMEM;
+	}
+
+	rd = &lp->tx_skb[lp->tx_next];
+	dd = &lp->tx_descs[lp->tx_next];
+
+	rd->skb = NULL;
+	rd->len = skb_headlen(skb);
+	rd->mapping = dma_handle;
+
+	/* Set up DMA Descriptor */
+	dd->des0 = dma_handle;
+
+	if (skb_is_gso(skb)) {
+		payload_len = skb_headlen(skb) - tx->network_header_len;
+
+		if (payload_len)
+			dd->des1 = dma_handle + tx->network_header_len;
+		dd->des2 = tx->network_header_len |
+			DWCEQOS_DMA_DES2_B2L(payload_len);
+		dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+			DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+			(skb->len - tx->network_header_len);
+	} else {
+		dd->des1 = 0;
+		dd->des2 = skb_headlen(skb);
+		dd->des3 = skb->len;
+
+		switch (skb->ip_summed) {
+		case CHECKSUM_PARTIAL:
+			dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+		case CHECKSUM_NONE:
+		case CHECKSUM_UNNECESSARY:
+		case CHECKSUM_COMPLETE:
+		default:
+			break;
+		}
+	}
+
+	dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+	if (lp->tx_next  != tx->initial_descriptor)
+		dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+	tx->last_descriptor = lp->tx_next;
+	lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+	return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+			    struct dwceqos_tx *tx)
+{
+	struct ring_desc *rd = NULL;
+	struct dwceqos_dma_desc *dd;
+	dma_addr_t dma_handle;
+	size_t i;
+
+	/* Setup more ring and DMA descriptor if the packet is fragmented */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		size_t frag_size;
+		size_t consumed_size;
+
+		/* Map DMA Area */
+		dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+					      skb_frag_size(frag),
+					      DMA_TO_DEVICE);
+		if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+			netdev_err(lp->ndev, "DMA Mapping error\n");
+			return -ENOMEM;
+		}
+
+		/* order-3 fragments span more than one descriptor. */
+		frag_size = skb_frag_size(frag);
+		consumed_size = 0;
+		while (consumed_size < frag_size) {
+			size_t dma_size = min_t(size_t, 16376,
+						frag_size - consumed_size);
+
+			rd = &lp->tx_skb[lp->tx_next];
+			memset(rd, 0, sizeof(*rd));
+
+			dd = &lp->tx_descs[lp->tx_next];
+
+			/* Set DMA Descriptor fields */
+			dd->des0 = dma_handle;
+			dd->des1 = 0;
+			dd->des2 = dma_size;
+
+			if (skb_is_gso(skb))
+				dd->des3 = (skb->len - tx->network_header_len);
+			else
+				dd->des3 = skb->len;
+
+			dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+			tx->last_descriptor = lp->tx_next;
+			lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+			consumed_size += dma_size;
+		}
+
+		rd->len = skb_frag_size(frag);
+		rd->mapping = dma_handle;
+	}
+
+	return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+				struct dwceqos_tx *tx)
+{
+	lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+	lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+	lp->tx_skb[tx->last_descriptor].skb = skb;
+
+	/* Make all descriptor updates visible to the DMA before setting the
+	 * owner bit.
+	 */
+	wmb();
+
+	lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+	/* Make the owner bit visible before TX wakeup. */
+	wmb();
+
+	dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+	size_t i = tx->initial_descriptor;
+
+	while (i != lp->tx_next) {
+		if (lp->tx_skb[i].mapping)
+			dma_unmap_single(lp->ndev->dev.parent,
+					 lp->tx_skb[i].mapping,
+					 lp->tx_skb[i].len,
+					 DMA_TO_DEVICE);
+
+		lp->tx_skb[i].mapping = 0;
+		lp->tx_skb[i].skb = NULL;
+
+		memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+		i = (i + 1) % DWCEQOS_TX_DCNT;
+	}
+
+	lp->tx_next = tx->initial_descriptor;
+	lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct dwceqos_tx trans;
+	int err;
+
+	dwceqos_tx_prepare(skb, lp, &trans);
+	if (lp->tx_free < trans.nr_descriptors) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	err = dwceqos_tx_linear(skb, lp, &trans);
+	if (err)
+		goto tx_error;
+
+	err = dwceqos_tx_frags(skb, lp, &trans);
+	if (err)
+		goto tx_error;
+
+	WARN_ON(lp->tx_next !=
+		((trans.initial_descriptor + trans.nr_descriptors) %
+		 DWCEQOS_TX_DCNT));
+
+	dwceqos_tx_finalize(skb, lp, &trans);
+
+	netdev_sent_queue(ndev, skb->len);
+
+	spin_lock_bh(&lp->tx_lock);
+	lp->tx_free -= trans.nr_descriptors;
+	spin_unlock_bh(&lp->tx_lock);
+
+	ndev->trans_start = jiffies;
+	return 0;
+
+tx_error:
+	dwceqos_tx_rollback(lp, &trans);
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+	if (netif_running(ndev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(hwaddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+	return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+				  unsigned int reg_n)
+{
+	unsigned long data;
+
+	data = (addr[5] << 8) | addr[4];
+	dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+		      data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+	/* Do not disable MAC address 0 */
+	if (reg_n != 0)
+		dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 regval = 0;
+	u32 mc_filter[2];
+	int reg = 1;
+	struct netdev_hw_addr *ha;
+	unsigned int max_mac_addr;
+
+	max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+	if (ndev->flags & IFF_PROMISC) {
+		regval = DWCEQOS_MAC_PKT_FILT_PR;
+	} else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+				(ndev->flags & IFF_ALLMULTI))) {
+		regval = DWCEQOS_MAC_PKT_FILT_PM;
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+	} else if (!netdev_mc_empty(ndev)) {
+		regval = DWCEQOS_MAC_PKT_FILT_HMC;
+		memset(mc_filter, 0, sizeof(mc_filter));
+		netdev_for_each_mc_addr(ha, ndev) {
+			/* The upper 6 bits of the calculated CRC are used to
+			 * index the contens of the hash table
+			 */
+			int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+			/* The most significant bit determines the register
+			 * to use (H/L) while the other 5 bits determine
+			 * the bit within the register.
+			 */
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		}
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+	}
+	if (netdev_uc_count(ndev) > max_mac_addr) {
+		regval |= DWCEQOS_MAC_PKT_FILT_PR;
+	} else {
+		netdev_for_each_uc_addr(ha, ndev) {
+			dwceqos_set_umac_addr(lp, ha->addr, reg);
+			reg++;
+		}
+		for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+			dwceqos_disable_umac_addr(lp, reg);
+	}
+	dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+	disable_irq(ndev->irq);
+	dwceqos_interrupt(ndev->irq, ndev);
+	enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+				      u32 tx_mask)
+{
+	if (tx_mask & BIT(27))
+		lp->mmc_counters.txlpitranscntr +=
+			dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+	if (tx_mask & BIT(26))
+		lp->mmc_counters.txpiuscntr +=
+			dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+	if (tx_mask & BIT(25))
+		lp->mmc_counters.txoversize_g +=
+			dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+	if (tx_mask & BIT(24))
+		lp->mmc_counters.txvlanpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+	if (tx_mask & BIT(23))
+		lp->mmc_counters.txpausepackets +=
+			dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+	if (tx_mask & BIT(22))
+		lp->mmc_counters.txexcessdef +=
+			dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+	if (tx_mask & BIT(21))
+		lp->mmc_counters.txpacketcount_g +=
+			dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+	if (tx_mask & BIT(20))
+		lp->mmc_counters.txoctetcount_g +=
+			dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+	if (tx_mask & BIT(19))
+		lp->mmc_counters.txcarriererror +=
+			dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+	if (tx_mask & BIT(18))
+		lp->mmc_counters.txexcesscol +=
+			dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+	if (tx_mask & BIT(17))
+		lp->mmc_counters.txlatecol +=
+			dwceqos_read(lp, DWC_MMC_TXLATECOL);
+	if (tx_mask & BIT(16))
+		lp->mmc_counters.txdeferred +=
+			dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+	if (tx_mask & BIT(15))
+		lp->mmc_counters.txmulticol_g +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+	if (tx_mask & BIT(14))
+		lp->mmc_counters.txsinglecol_g +=
+			dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+	if (tx_mask & BIT(13))
+		lp->mmc_counters.txunderflowerror +=
+			dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+	if (tx_mask & BIT(12))
+		lp->mmc_counters.txbroadcastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+	if (tx_mask & BIT(11))
+		lp->mmc_counters.txmulticastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+	if (tx_mask & BIT(10))
+		lp->mmc_counters.txunicastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+	if (tx_mask & BIT(9))
+		lp->mmc_counters.tx1024tomaxoctets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+	if (tx_mask & BIT(8))
+		lp->mmc_counters.tx512to1023octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+	if (tx_mask & BIT(7))
+		lp->mmc_counters.tx256to511octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+	if (tx_mask & BIT(6))
+		lp->mmc_counters.tx128to255octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+	if (tx_mask & BIT(5))
+		lp->mmc_counters.tx65to127octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+	if (tx_mask & BIT(4))
+		lp->mmc_counters.tx64octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+	if (tx_mask & BIT(3))
+		lp->mmc_counters.txmulticastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+	if (tx_mask & BIT(2))
+		lp->mmc_counters.txbroadcastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+	if (tx_mask & BIT(1))
+		lp->mmc_counters.txpacketcount_gb +=
+			dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+	if (tx_mask & BIT(0))
+		lp->mmc_counters.txoctetcount_gb +=
+			dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+	if (rx_mask & BIT(27))
+		lp->mmc_counters.rxlpitranscntr +=
+			dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+	if (rx_mask & BIT(26))
+		lp->mmc_counters.rxlpiuscntr +=
+			dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+	if (rx_mask & BIT(25))
+		lp->mmc_counters.rxctrlpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+	if (rx_mask & BIT(24))
+		lp->mmc_counters.rxrcverror +=
+			dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+	if (rx_mask & BIT(23))
+		lp->mmc_counters.rxwatchdog +=
+			dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+	if (rx_mask & BIT(22))
+		lp->mmc_counters.rxvlanpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+	if (rx_mask & BIT(21))
+		lp->mmc_counters.rxfifooverflow +=
+			dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+	if (rx_mask & BIT(20))
+		lp->mmc_counters.rxpausepackets +=
+			dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+	if (rx_mask & BIT(19))
+		lp->mmc_counters.rxoutofrangetype +=
+			dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+	if (rx_mask & BIT(18))
+		lp->mmc_counters.rxlengtherror +=
+			dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+	if (rx_mask & BIT(17))
+		lp->mmc_counters.rxunicastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+	if (rx_mask & BIT(16))
+		lp->mmc_counters.rx1024tomaxoctets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+	if (rx_mask & BIT(15))
+		lp->mmc_counters.rx512to1023octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+	if (rx_mask & BIT(14))
+		lp->mmc_counters.rx256to511octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+	if (rx_mask & BIT(13))
+		lp->mmc_counters.rx128to255octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+	if (rx_mask & BIT(12))
+		lp->mmc_counters.rx65to127octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+	if (rx_mask & BIT(11))
+		lp->mmc_counters.rx64octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+	if (rx_mask & BIT(10))
+		lp->mmc_counters.rxoversize_g +=
+			dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+	if (rx_mask & BIT(9))
+		lp->mmc_counters.rxundersize_g +=
+			dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+	if (rx_mask & BIT(8))
+		lp->mmc_counters.rxjabbererror +=
+			dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+	if (rx_mask & BIT(7))
+		lp->mmc_counters.rxrunterror +=
+			dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+	if (rx_mask & BIT(6))
+		lp->mmc_counters.rxalignmenterror +=
+			dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+	if (rx_mask & BIT(5))
+		lp->mmc_counters.rxcrcerror +=
+			dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+	if (rx_mask & BIT(4))
+		lp->mmc_counters.rxmulticastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+	if (rx_mask & BIT(3))
+		lp->mmc_counters.rxbroadcastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+	if (rx_mask & BIT(2))
+		lp->mmc_counters.rxoctetcount_g +=
+			dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+	if (rx_mask & BIT(1))
+		lp->mmc_counters.rxoctetcount_gb +=
+			dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+	if (rx_mask & BIT(0))
+		lp->mmc_counters.rxpacketcount_gb +=
+			dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+	unsigned long flags;
+	struct net_local *lp = netdev_priv(ndev);
+	struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+	dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+				  lp->mmc_tx_counters_mask);
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+	s->rx_packets = hwstats->rxpacketcount_gb;
+	s->rx_bytes = hwstats->rxoctetcount_gb;
+	s->rx_errors = hwstats->rxpacketcount_gb -
+		hwstats->rxbroadcastpackets_g -
+		hwstats->rxmulticastpackets_g -
+		hwstats->rxunicastpackets_g;
+	s->multicast = hwstats->rxmulticastpackets_g;
+	s->rx_length_errors = hwstats->rxlengtherror;
+	s->rx_crc_errors = hwstats->rxcrcerror;
+	s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+	s->tx_packets = hwstats->txpacketcount_gb;
+	s->tx_bytes = hwstats->txoctetcount_gb;
+
+	if (lp->mmc_tx_counters_mask & BIT(21))
+		s->tx_errors = hwstats->txpacketcount_gb -
+			hwstats->txpacketcount_g;
+	else
+		s->tx_errors = hwstats->txunderflowerror +
+			hwstats->txcarriererror;
+
+	return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	strcpy(ed->driver, lp->pdev->dev.driver->name);
+	strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+				   struct ethtool_pauseparam *pp)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	pp->autoneg = lp->flowcontrol.autoneg;
+	pp->tx_pause = lp->flowcontrol.tx;
+	pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+				  struct ethtool_pauseparam *pp)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	int ret = 0;
+
+	lp->flowcontrol.autoneg = pp->autoneg;
+	if (pp->autoneg) {
+		lp->phy_dev->advertising |= ADVERTISED_Pause;
+		lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+	} else {
+		lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+		lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+		lp->flowcontrol.rx = pp->rx_pause;
+		lp->flowcontrol.tx = pp->tx_pause;
+	}
+
+	if (netif_running(ndev))
+		ret = phy_start_aneg(lp->phy_dev);
+
+	return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+				u8 *data)
+{
+	size_t i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+		memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+		       ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+				      struct ethtool_stats *stats, u64 *data)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	unsigned long flags;
+	size_t i;
+	u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+	dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+				  lp->mmc_tx_counters_mask);
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+		memcpy(data,
+		       mmcstat + dwceqos_ethtool_stats[i].offset,
+		       sizeof(u64));
+		data++;
+	}
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+	return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			     void *space)
+{
+	const struct net_local *lp = netdev_priv(dev);
+	u32 *reg_space = (u32 *)space;
+	int reg_offset;
+	int reg_ix = 0;
+
+	/* MAC registers */
+	for (reg_offset = START_MAC_REG_OFFSET;
+		reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+	/* MTL registers */
+	for (reg_offset = START_MTL_REG_OFFSET;
+		reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+
+	/* DMA registers */
+	for (reg_offset = START_DMA_REG_OFFSET;
+		reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+
+	BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+	return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+	return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+	return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 lpi_status;
+	u32 lpi_enabled;
+
+	if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+		return -EOPNOTSUPP;
+
+	edata->eee_active  = lp->eee_active;
+	edata->eee_enabled = lp->eee_enabled;
+	edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+	lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+	edata->tx_lpi_enabled = lpi_enabled;
+
+	if (netif_msg_hw(lp)) {
+		u32 regval;
+
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+		netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+			    dwceqos_get_rx_lpi_state(regval),
+			    dwceqos_get_tx_lpi_state(regval));
+	}
+
+	return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 regval;
+	unsigned long flags;
+
+	if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+		return -EOPNOTSUPP;
+
+	if (edata->eee_enabled && !lp->eee_active)
+		return -EOPNOTSUPP;
+
+	if (edata->tx_lpi_enabled) {
+		if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+		    edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+			return -EINVAL;
+	}
+
+	lp->eee_enabled = edata->eee_enabled;
+
+	if (edata->eee_enabled && edata->tx_lpi_enabled) {
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+			      edata->tx_lpi_timer);
+
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		if (lp->en_tx_lpi_clockgating)
+			regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	} else {
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	}
+
+	return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+	.get_settings   = dwceqos_get_settings,
+	.set_settings   = dwceqos_set_settings,
+	.get_drvinfo    = dwceqos_get_drvinfo,
+	.get_link       = ethtool_op_get_link,
+	.get_pauseparam = dwceqos_get_pauseparam,
+	.set_pauseparam = dwceqos_set_pauseparam,
+	.get_strings    = dwceqos_get_strings,
+	.get_ethtool_stats = dwceqos_get_ethtool_stats,
+	.get_sset_count = dwceqos_get_sset_count,
+	.get_regs       = dwceqos_get_regs,
+	.get_regs_len   = dwceqos_get_regs_len,
+	.get_eee        = dwceqos_get_eee,
+	.set_eee        = dwceqos_set_eee,
+	.get_msglevel   = dwceqos_get_msglevel,
+	.set_msglevel   = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+	.ndo_open		= dwceqos_open,
+	.ndo_stop		= dwceqos_stop,
+	.ndo_start_xmit		= dwceqos_start_xmit,
+	.ndo_set_rx_mode	= dwceqos_set_rx_mode,
+	.ndo_set_mac_address	= dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= dwceqos_poll_controller,
+#endif
+	.ndo_do_ioctl		= dwceqos_ioctl,
+	.ndo_tx_timeout		= dwceqos_tx_timeout,
+	.ndo_get_stats64	= dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+	{ .compatible = "snps,dwc-qos-ethernet-4.10", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+	struct resource *r_mem = NULL;
+	struct net_device *ndev;
+	struct net_local *lp;
+	int ret = -ENXIO;
+
+	r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r_mem) {
+		dev_err(&pdev->dev, "no IO resource defined.\n");
+		return -ENXIO;
+	}
+
+	ndev = alloc_etherdev(sizeof(*lp));
+	if (!ndev) {
+		dev_err(&pdev->dev, "etherdev allocation failed.\n");
+		return -ENOMEM;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	lp = netdev_priv(ndev);
+	lp->ndev = ndev;
+	lp->pdev = pdev;
+	lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+	spin_lock_init(&lp->tx_lock);
+	spin_lock_init(&lp->hw_lock);
+	spin_lock_init(&lp->stats_lock);
+
+	lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+	if (IS_ERR(lp->apb_pclk)) {
+		dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+		ret = PTR_ERR(lp->apb_pclk);
+		goto err_out_free_netdev;
+	}
+
+	ret = clk_prepare_enable(lp->apb_pclk);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+		goto err_out_free_netdev;
+	}
+
+	lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+	if (IS_ERR(lp->baseaddr)) {
+		dev_err(&pdev->dev, "failed to map baseaddress.\n");
+		ret = PTR_ERR(lp->baseaddr);
+		goto err_out_clk_dis_aper;
+	}
+
+	ndev->irq = platform_get_irq(pdev, 0);
+	ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+	ndev->netdev_ops = &netdev_ops;
+	ndev->ethtool_ops = &dwceqos_ethtool_ops;
+	ndev->base_addr = r_mem->start;
+
+	dwceqos_get_hwfeatures(lp);
+	dwceqos_mdio_set_csr(lp);
+
+	ndev->hw_features = NETIF_F_SG;
+
+	if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+	if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+	if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+		ndev->hw_features |= NETIF_F_RXCSUM;
+
+	ndev->features = ndev->hw_features;
+
+	netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+		goto err_out_clk_dis_aper;
+	}
+
+	lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+	if (IS_ERR(lp->phy_ref_clk)) {
+		dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+		ret = PTR_ERR(lp->phy_ref_clk);
+		goto err_out_unregister_netdev;
+	}
+
+	ret = clk_prepare_enable(lp->phy_ref_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to enable device clock.\n");
+		goto err_out_unregister_netdev;
+	}
+
+	lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+						"phy-handle", 0);
+	if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+		ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "invalid fixed-link");
+			goto err_out_unregister_netdev;
+		}
+
+		lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+	}
+
+	ret = of_get_phy_mode(lp->pdev->dev.of_node);
+	if (ret < 0) {
+		dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+		goto err_out_unregister_clk_notifier;
+	}
+
+	lp->phy_interface = ret;
+
+	ret = dwceqos_mii_init(lp);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+		goto err_out_unregister_clk_notifier;
+	}
+
+	ret = dwceqos_mii_probe(ndev);
+	if (ret != 0) {
+		netdev_err(ndev, "mii_probe fail.\n");
+		ret = -ENXIO;
+		goto err_out_unregister_clk_notifier;
+	}
+
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+	tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+		     (unsigned long)ndev);
+	tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+	lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+	INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+	platform_set_drvdata(pdev, ndev);
+	ret = dwceqos_probe_config_dt(pdev);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+			ret);
+		goto err_out_unregister_clk_notifier;
+	}
+	dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+		 pdev->id, ndev->base_addr, ndev->irq);
+
+	ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+			       ndev->name, ndev);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+			ndev->irq, ret);
+		goto err_out_unregister_clk_notifier;
+	}
+
+	if (netif_msg_probe(lp))
+		netdev_dbg(ndev, "net_local@%p\n", lp);
+
+	return 0;
+
+err_out_unregister_clk_notifier:
+	clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+	unregister_netdev(ndev);
+err_out_clk_dis_aper:
+	clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+	if (lp->phy_node)
+		of_node_put(lp->phy_node);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct net_local *lp;
+
+	if (ndev) {
+		lp = netdev_priv(ndev);
+
+		if (lp->phy_dev)
+			phy_disconnect(lp->phy_dev);
+		mdiobus_unregister(lp->mii_bus);
+		kfree(lp->mii_bus->irq);
+		mdiobus_free(lp->mii_bus);
+
+		unregister_netdev(ndev);
+
+		clk_disable_unprepare(lp->phy_ref_clk);
+		clk_disable_unprepare(lp->apb_pclk);
+
+		free_netdev(ndev);
+	}
+
+	return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+	.probe   = dwceqos_probe,
+	.remove  = dwceqos_remove,
+	.driver  = {
+		.name  = DRIVER_NAME,
+		.of_match_table = dwceq_of_match,
+	},
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index dd94300..cba3d9f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -41,6 +41,8 @@
 #include <linux/gpio.h>
 #include <linux/atomic.h>
 
+#include <asm/mach-ar7/ar7.h>
+
 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d155bf2..8fc90f1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -365,7 +365,8 @@
 	spinlock_t			lock;
 	struct platform_device		*pdev;
 	struct net_device		*ndev;
-	struct napi_struct		napi;
+	struct napi_struct		napi_rx;
+	struct napi_struct		napi_tx;
 	struct device			*dev;
 	struct cpsw_platform_data	data;
 	struct cpsw_ss_regs __iomem	*regs;
@@ -386,10 +387,12 @@
 	struct cpsw_ale			*ale;
 	bool				rx_pause;
 	bool				tx_pause;
+	bool				quirk_irq;
+	bool				rx_irq_disabled;
+	bool				tx_irq_disabled;
 	/* snapshot of IRQ numbers */
 	u32 irqs_table[4];
 	u32 num_irqs;
-	bool irq_enabled;
 	struct cpts *cpts;
 	u32 emac_port;
 };
@@ -752,13 +755,15 @@
 {
 	struct cpsw_priv *priv = dev_id;
 
+	writel(0, &priv->wr_regs->tx_en);
 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-	cpdma_chan_process(priv->txch, 128);
 
-	priv = cpsw_get_slave_priv(priv, 1);
-	if (priv)
-		cpdma_chan_process(priv->txch, 128);
+	if (priv->quirk_irq) {
+		disable_irq_nosync(priv->irqs_table[1]);
+		priv->tx_irq_disabled = true;
+	}
 
+	napi_schedule(&priv->napi_tx);
 	return IRQ_HANDLED;
 }
 
@@ -767,43 +772,49 @@
 	struct cpsw_priv *priv = dev_id;
 
 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+	writel(0, &priv->wr_regs->rx_en);
 
-	cpsw_intr_disable(priv);
-	if (priv->irq_enabled == true) {
+	if (priv->quirk_irq) {
 		disable_irq_nosync(priv->irqs_table[0]);
-		priv->irq_enabled = false;
+		priv->rx_irq_disabled = true;
 	}
 
-	if (netif_running(priv->ndev)) {
-		napi_schedule(&priv->napi);
-		return IRQ_HANDLED;
-	}
-
-	priv = cpsw_get_slave_priv(priv, 1);
-	if (!priv)
-		return IRQ_NONE;
-
-	if (netif_running(priv->ndev)) {
-		napi_schedule(&priv->napi);
-		return IRQ_HANDLED;
-	}
-	return IRQ_NONE;
+	napi_schedule(&priv->napi_rx);
+	return IRQ_HANDLED;
 }
 
-static int cpsw_poll(struct napi_struct *napi, int budget)
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 {
-	struct cpsw_priv	*priv = napi_to_priv(napi);
+	struct cpsw_priv	*priv = napi_to_priv(napi_tx);
+	int			num_tx;
+
+	num_tx = cpdma_chan_process(priv->txch, budget);
+	if (num_tx < budget) {
+		napi_complete(napi_tx);
+		writel(0xff, &priv->wr_regs->tx_en);
+		if (priv->quirk_irq && priv->tx_irq_disabled) {
+			priv->tx_irq_disabled = false;
+			enable_irq(priv->irqs_table[1]);
+		}
+	}
+
+	if (num_tx)
+		cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
+
+	return num_tx;
+}
+
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+	struct cpsw_priv	*priv = napi_to_priv(napi_rx);
 	int			num_rx;
 
 	num_rx = cpdma_chan_process(priv->rxch, budget);
 	if (num_rx < budget) {
-		struct cpsw_priv *prim_cpsw;
-
-		napi_complete(napi);
-		cpsw_intr_enable(priv);
-		prim_cpsw = cpsw_get_slave_priv(priv, 0);
-		if (prim_cpsw->irq_enabled == false) {
-			prim_cpsw->irq_enabled = true;
+		napi_complete(napi_rx);
+		writel(0xff, &priv->wr_regs->rx_en);
+		if (priv->quirk_irq && priv->rx_irq_disabled) {
+			priv->rx_irq_disabled = false;
 			enable_irq(priv->irqs_table[0]);
 		}
 	}
@@ -1230,7 +1241,6 @@
 static int cpsw_ndo_open(struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
-	struct cpsw_priv *prim_cpsw;
 	int i, ret;
 	u32 reg;
 
@@ -1260,6 +1270,8 @@
 				  ALE_ALL_PORTS << priv->host_port, 0, 0);
 
 	if (!cpsw_common_res_usage_state(priv)) {
+		struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
 		/* setup tx dma to fixed prio and zero offset */
 		cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
 		cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1273,6 +1285,19 @@
 		/* Enable internal fifo flow control */
 		writel(0x7, &priv->regs->flow_control);
 
+		napi_enable(&priv_sl0->napi_rx);
+		napi_enable(&priv_sl0->napi_tx);
+
+		if (priv_sl0->tx_irq_disabled) {
+			priv_sl0->tx_irq_disabled = false;
+			enable_irq(priv->irqs_table[1]);
+		}
+
+		if (priv_sl0->rx_irq_disabled) {
+			priv_sl0->rx_irq_disabled = false;
+			enable_irq(priv->irqs_table[0]);
+		}
+
 		if (WARN_ON(!priv->data.rx_descs))
 			priv->data.rx_descs = 128;
 
@@ -1311,18 +1336,9 @@
 		cpsw_set_coalesce(ndev, &coal);
 	}
 
-	napi_enable(&priv->napi);
 	cpdma_ctlr_start(priv->dma);
 	cpsw_intr_enable(priv);
 
-	prim_cpsw = cpsw_get_slave_priv(priv, 0);
-	if (prim_cpsw->irq_enabled == false) {
-		if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
-			prim_cpsw->irq_enabled = true;
-			enable_irq(prim_cpsw->irqs_table[0]);
-		}
-	}
-
 	if (priv->data.dual_emac)
 		priv->slaves[priv->emac_port].open_stat = true;
 	return 0;
@@ -1341,10 +1357,13 @@
 
 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
 	netif_stop_queue(priv->ndev);
-	napi_disable(&priv->napi);
 	netif_carrier_off(priv->ndev);
 
 	if (cpsw_common_res_usage_state(priv) <= 1) {
+		struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
+		napi_disable(&priv_sl0->napi_rx);
+		napi_disable(&priv_sl0->napi_tx);
 		cpts_unregister(priv->cpts);
 		cpsw_intr_disable(priv);
 		cpdma_ctlr_stop(priv->dma);
@@ -2127,7 +2146,6 @@
 
 	ndev->netdev_ops = &cpsw_netdev_ops;
 	ndev->ethtool_ops = &cpsw_ethtool_ops;
-	netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
 	/* register the network device */
 	SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2141,6 +2159,44 @@
 	return ret;
 }
 
+#define CPSW_QUIRK_IRQ		BIT(0)
+
+static struct platform_device_id cpsw_devtype[] = {
+	{
+		/* keep it for existing comaptibles */
+		.name = "cpsw",
+		.driver_data = CPSW_QUIRK_IRQ,
+	}, {
+		.name = "am335x-cpsw",
+		.driver_data = CPSW_QUIRK_IRQ,
+	}, {
+		.name = "am4372-cpsw",
+		.driver_data = 0,
+	}, {
+		.name = "dra7-cpsw",
+		.driver_data = 0,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, cpsw_devtype);
+
+enum ti_cpsw_type {
+	CPSW = 0,
+	AM335X_CPSW,
+	AM4372_CPSW,
+	DRA7_CPSW,
+};
+
+static const struct of_device_id cpsw_of_mtable[] = {
+	{ .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
+	{ .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
+	{ .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
+	{ .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
 static int cpsw_probe(struct platform_device *pdev)
 {
 	struct cpsw_platform_data	*data;
@@ -2150,6 +2206,7 @@
 	struct cpsw_ale_params		ale_params;
 	void __iomem			*ss_regs;
 	struct resource			*res, *ss_res;
+	const struct of_device_id	*of_id;
 	u32 slave_offset, sliver_offset, slave_size;
 	int ret = 0, i;
 	int irq;
@@ -2169,7 +2226,6 @@
 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
 	priv->rx_packet_max = max(rx_packet_max, 128);
 	priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
-	priv->irq_enabled = true;
 	if (!priv->cpts) {
 		dev_err(&pdev->dev, "error allocating cpts\n");
 		ret = -ENOMEM;
@@ -2341,6 +2397,13 @@
 		goto clean_ale_ret;
 	}
 
+	of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
+	if (of_id) {
+		pdev->id_entry = of_id->data;
+		if (pdev->id_entry->driver_data)
+			priv->quirk_irq = true;
+	}
+
 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
 	 * MISC IRQs which are always kept disabled with this driver so
 	 * we will not request them.
@@ -2380,7 +2443,8 @@
 
 	ndev->netdev_ops = &cpsw_netdev_ops;
 	ndev->ethtool_ops = &cpsw_ethtool_ops;
-	netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+	netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+	netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
 
 	/* register the network device */
 	SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2504,12 +2568,6 @@
 
 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
-static const struct of_device_id cpsw_of_mtable[] = {
-	{ .compatible = "ti,cpsw", },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
-
 static struct platform_driver cpsw_driver = {
 	.driver = {
 		.name	 = "cpsw",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index aeebc0a..a21c77b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2004,8 +2004,10 @@
 	if (res_ctrl) {
 		priv->ctrl_base =
 			devm_ioremap_resource(&pdev->dev, res_ctrl);
-		if (IS_ERR(priv->ctrl_base))
+		if (IS_ERR(priv->ctrl_base)) {
+			rc = PTR_ERR(priv->ctrl_base);
 			goto no_pdata;
+		}
 	} else {
 		priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
 	}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 4755838..1a5aca5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -52,6 +52,8 @@
 		    NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	|	\
 		    NETIF_MSG_RX_STATUS)
 
+#define NETCP_EFUSE_ADDR_SWAP	2
+
 #define knav_queue_get_id(q)	knav_queue_device_control(q, \
 				KNAV_QUEUE_GET_ID, (unsigned long)NULL)
 
@@ -173,13 +175,22 @@
 }
 
 /* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
 {
 	unsigned int addr0, addr1;
 
 	addr1 = readl(efuse_mac + 4);
 	addr0 = readl(efuse_mac);
 
+	switch (swap) {
+	case NETCP_EFUSE_ADDR_SWAP:
+		addr0 = addr1;
+		addr1 = readl(efuse_mac);
+		break;
+	default:
+		break;
+	}
+
 	x[0] = (addr1 & 0x0000ff00) >> 8;
 	x[1] = addr1 & 0x000000ff;
 	x[2] = (addr0 & 0xff000000) >> 24;
@@ -1901,7 +1912,7 @@
 			goto quit;
 		}
 
-		emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+		emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
 		if (is_valid_ether_addr(efuse_mac_addr))
 			ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
 		else
@@ -2141,7 +2152,6 @@
 static struct platform_driver netcp_driver = {
 	.driver = {
 		.name		= "netcp-1.0",
-		.owner		= THIS_MODULE,
 		.of_match_table	= of_match,
 	},
 	.probe = netcp_probe,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 1974a8a..6f16d6a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -295,8 +295,6 @@
 	u32	rx_dma_overruns;
 };
 
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
 struct gbenu_ss_regs {
 	u32	id_ver;
 	u32	synce_count;		/* NU */
@@ -480,7 +478,6 @@
 	u32	tx_pri7_drop_bcnt;
 };
 
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
 
 struct gbe_ss_regs {
@@ -615,7 +612,6 @@
 	u32	rx_dma_overruns;
 };
 
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
 #define GBE_MAX_HW_STAT_MODS			9
 #define GBE_HW_STATS_REG_MAP_SZ			0x100
 
@@ -646,6 +642,7 @@
 	bool				enable_ale;
 	u8				max_num_slaves;
 	u8				max_num_ports; /* max_num_slaves + 1 */
+	u8				num_stats_mods;
 	struct netcp_tx_pipe		tx_pipe;
 
 	int				host_port;
@@ -675,6 +672,7 @@
 	struct net_device		*dummy_ndev;
 
 	u64				*hw_stats;
+	u32				*hw_stats_prev;
 	const struct netcp_ethtool_stat *et_stats;
 	int				num_et_stats;
 	/*  Lock for updating the hwstats */
@@ -874,7 +872,7 @@
 };
 
 /* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE	33
+#define GBENU_ET_STATS_HOST_SIZE	52
 
 #define GBENU_STATS_HOST(field)					\
 {								\
@@ -883,8 +881,8 @@
 	offsetof(struct gbenu_hw_stats, field)			\
 }
 
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE	46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE	65
 
 #define GBENU_STATS_P1(field)					\
 {								\
@@ -976,7 +974,26 @@
 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
 	GBENU_STATS_HOST(ale_unknown_bcast),
 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+	GBENU_STATS_HOST(ale_pol_match),
+	GBENU_STATS_HOST(ale_pol_match_red),
+	GBENU_STATS_HOST(ale_pol_match_yellow),
 	GBENU_STATS_HOST(tx_mem_protect_err),
+	GBENU_STATS_HOST(tx_pri0_drop),
+	GBENU_STATS_HOST(tx_pri1_drop),
+	GBENU_STATS_HOST(tx_pri2_drop),
+	GBENU_STATS_HOST(tx_pri3_drop),
+	GBENU_STATS_HOST(tx_pri4_drop),
+	GBENU_STATS_HOST(tx_pri5_drop),
+	GBENU_STATS_HOST(tx_pri6_drop),
+	GBENU_STATS_HOST(tx_pri7_drop),
+	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
 	/* GBENU Module 1 */
 	GBENU_STATS_P1(rx_good_frames),
 	GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@
 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
 	GBENU_STATS_P1(ale_unknown_bcast),
 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
+	GBENU_STATS_P1(ale_pol_match),
+	GBENU_STATS_P1(ale_pol_match_red),
+	GBENU_STATS_P1(ale_pol_match_yellow),
 	GBENU_STATS_P1(tx_mem_protect_err),
+	GBENU_STATS_P1(tx_pri0_drop),
+	GBENU_STATS_P1(tx_pri1_drop),
+	GBENU_STATS_P1(tx_pri2_drop),
+	GBENU_STATS_P1(tx_pri3_drop),
+	GBENU_STATS_P1(tx_pri4_drop),
+	GBENU_STATS_P1(tx_pri5_drop),
+	GBENU_STATS_P1(tx_pri6_drop),
+	GBENU_STATS_P1(tx_pri7_drop),
+	GBENU_STATS_P1(tx_pri0_drop_bcnt),
+	GBENU_STATS_P1(tx_pri1_drop_bcnt),
+	GBENU_STATS_P1(tx_pri2_drop_bcnt),
+	GBENU_STATS_P1(tx_pri3_drop_bcnt),
+	GBENU_STATS_P1(tx_pri4_drop_bcnt),
+	GBENU_STATS_P1(tx_pri5_drop_bcnt),
+	GBENU_STATS_P1(tx_pri6_drop_bcnt),
+	GBENU_STATS_P1(tx_pri7_drop_bcnt),
 	/* GBENU Module 2 */
 	GBENU_STATS_P2(rx_good_frames),
 	GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@
 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
 	GBENU_STATS_P2(ale_unknown_bcast),
 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
+	GBENU_STATS_P2(ale_pol_match),
+	GBENU_STATS_P2(ale_pol_match_red),
+	GBENU_STATS_P2(ale_pol_match_yellow),
 	GBENU_STATS_P2(tx_mem_protect_err),
+	GBENU_STATS_P2(tx_pri0_drop),
+	GBENU_STATS_P2(tx_pri1_drop),
+	GBENU_STATS_P2(tx_pri2_drop),
+	GBENU_STATS_P2(tx_pri3_drop),
+	GBENU_STATS_P2(tx_pri4_drop),
+	GBENU_STATS_P2(tx_pri5_drop),
+	GBENU_STATS_P2(tx_pri6_drop),
+	GBENU_STATS_P2(tx_pri7_drop),
+	GBENU_STATS_P2(tx_pri0_drop_bcnt),
+	GBENU_STATS_P2(tx_pri1_drop_bcnt),
+	GBENU_STATS_P2(tx_pri2_drop_bcnt),
+	GBENU_STATS_P2(tx_pri3_drop_bcnt),
+	GBENU_STATS_P2(tx_pri4_drop_bcnt),
+	GBENU_STATS_P2(tx_pri5_drop_bcnt),
+	GBENU_STATS_P2(tx_pri6_drop_bcnt),
+	GBENU_STATS_P2(tx_pri7_drop_bcnt),
 	/* GBENU Module 3 */
 	GBENU_STATS_P3(rx_good_frames),
 	GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@
 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
 	GBENU_STATS_P3(ale_unknown_bcast),
 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
+	GBENU_STATS_P3(ale_pol_match),
+	GBENU_STATS_P3(ale_pol_match_red),
+	GBENU_STATS_P3(ale_pol_match_yellow),
 	GBENU_STATS_P3(tx_mem_protect_err),
+	GBENU_STATS_P3(tx_pri0_drop),
+	GBENU_STATS_P3(tx_pri1_drop),
+	GBENU_STATS_P3(tx_pri2_drop),
+	GBENU_STATS_P3(tx_pri3_drop),
+	GBENU_STATS_P3(tx_pri4_drop),
+	GBENU_STATS_P3(tx_pri5_drop),
+	GBENU_STATS_P3(tx_pri6_drop),
+	GBENU_STATS_P3(tx_pri7_drop),
+	GBENU_STATS_P3(tx_pri0_drop_bcnt),
+	GBENU_STATS_P3(tx_pri1_drop_bcnt),
+	GBENU_STATS_P3(tx_pri2_drop_bcnt),
+	GBENU_STATS_P3(tx_pri3_drop_bcnt),
+	GBENU_STATS_P3(tx_pri4_drop_bcnt),
+	GBENU_STATS_P3(tx_pri5_drop_bcnt),
+	GBENU_STATS_P3(tx_pri6_drop_bcnt),
+	GBENU_STATS_P3(tx_pri7_drop_bcnt),
 	/* GBENU Module 4 */
 	GBENU_STATS_P4(rx_good_frames),
 	GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@
 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
 	GBENU_STATS_P4(ale_unknown_bcast),
 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
+	GBENU_STATS_P4(ale_pol_match),
+	GBENU_STATS_P4(ale_pol_match_red),
+	GBENU_STATS_P4(ale_pol_match_yellow),
 	GBENU_STATS_P4(tx_mem_protect_err),
+	GBENU_STATS_P4(tx_pri0_drop),
+	GBENU_STATS_P4(tx_pri1_drop),
+	GBENU_STATS_P4(tx_pri2_drop),
+	GBENU_STATS_P4(tx_pri3_drop),
+	GBENU_STATS_P4(tx_pri4_drop),
+	GBENU_STATS_P4(tx_pri5_drop),
+	GBENU_STATS_P4(tx_pri6_drop),
+	GBENU_STATS_P4(tx_pri7_drop),
+	GBENU_STATS_P4(tx_pri0_drop_bcnt),
+	GBENU_STATS_P4(tx_pri1_drop_bcnt),
+	GBENU_STATS_P4(tx_pri2_drop_bcnt),
+	GBENU_STATS_P4(tx_pri3_drop_bcnt),
+	GBENU_STATS_P4(tx_pri4_drop_bcnt),
+	GBENU_STATS_P4(tx_pri5_drop_bcnt),
+	GBENU_STATS_P4(tx_pri6_drop_bcnt),
+	GBENU_STATS_P4(tx_pri7_drop_bcnt),
 	/* GBENU Module 5 */
 	GBENU_STATS_P5(rx_good_frames),
 	GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@
 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
 	GBENU_STATS_P5(ale_unknown_bcast),
 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
+	GBENU_STATS_P5(ale_pol_match),
+	GBENU_STATS_P5(ale_pol_match_red),
+	GBENU_STATS_P5(ale_pol_match_yellow),
 	GBENU_STATS_P5(tx_mem_protect_err),
+	GBENU_STATS_P5(tx_pri0_drop),
+	GBENU_STATS_P5(tx_pri1_drop),
+	GBENU_STATS_P5(tx_pri2_drop),
+	GBENU_STATS_P5(tx_pri3_drop),
+	GBENU_STATS_P5(tx_pri4_drop),
+	GBENU_STATS_P5(tx_pri5_drop),
+	GBENU_STATS_P5(tx_pri6_drop),
+	GBENU_STATS_P5(tx_pri7_drop),
+	GBENU_STATS_P5(tx_pri0_drop_bcnt),
+	GBENU_STATS_P5(tx_pri1_drop_bcnt),
+	GBENU_STATS_P5(tx_pri2_drop_bcnt),
+	GBENU_STATS_P5(tx_pri3_drop_bcnt),
+	GBENU_STATS_P5(tx_pri4_drop_bcnt),
+	GBENU_STATS_P5(tx_pri5_drop_bcnt),
+	GBENU_STATS_P5(tx_pri6_drop_bcnt),
+	GBENU_STATS_P5(tx_pri7_drop_bcnt),
 	/* GBENU Module 6 */
 	GBENU_STATS_P6(rx_good_frames),
 	GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@
 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
 	GBENU_STATS_P6(ale_unknown_bcast),
 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
+	GBENU_STATS_P6(ale_pol_match),
+	GBENU_STATS_P6(ale_pol_match_red),
+	GBENU_STATS_P6(ale_pol_match_yellow),
 	GBENU_STATS_P6(tx_mem_protect_err),
+	GBENU_STATS_P6(tx_pri0_drop),
+	GBENU_STATS_P6(tx_pri1_drop),
+	GBENU_STATS_P6(tx_pri2_drop),
+	GBENU_STATS_P6(tx_pri3_drop),
+	GBENU_STATS_P6(tx_pri4_drop),
+	GBENU_STATS_P6(tx_pri5_drop),
+	GBENU_STATS_P6(tx_pri6_drop),
+	GBENU_STATS_P6(tx_pri7_drop),
+	GBENU_STATS_P6(tx_pri0_drop_bcnt),
+	GBENU_STATS_P6(tx_pri1_drop_bcnt),
+	GBENU_STATS_P6(tx_pri2_drop_bcnt),
+	GBENU_STATS_P6(tx_pri3_drop_bcnt),
+	GBENU_STATS_P6(tx_pri4_drop_bcnt),
+	GBENU_STATS_P6(tx_pri5_drop_bcnt),
+	GBENU_STATS_P6(tx_pri6_drop_bcnt),
+	GBENU_STATS_P6(tx_pri7_drop_bcnt),
 	/* GBENU Module 7 */
 	GBENU_STATS_P7(rx_good_frames),
 	GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@
 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
 	GBENU_STATS_P7(ale_unknown_bcast),
 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
+	GBENU_STATS_P7(ale_pol_match),
+	GBENU_STATS_P7(ale_pol_match_red),
+	GBENU_STATS_P7(ale_pol_match_yellow),
 	GBENU_STATS_P7(tx_mem_protect_err),
+	GBENU_STATS_P7(tx_pri0_drop),
+	GBENU_STATS_P7(tx_pri1_drop),
+	GBENU_STATS_P7(tx_pri2_drop),
+	GBENU_STATS_P7(tx_pri3_drop),
+	GBENU_STATS_P7(tx_pri4_drop),
+	GBENU_STATS_P7(tx_pri5_drop),
+	GBENU_STATS_P7(tx_pri6_drop),
+	GBENU_STATS_P7(tx_pri7_drop),
+	GBENU_STATS_P7(tx_pri0_drop_bcnt),
+	GBENU_STATS_P7(tx_pri1_drop_bcnt),
+	GBENU_STATS_P7(tx_pri2_drop_bcnt),
+	GBENU_STATS_P7(tx_pri3_drop_bcnt),
+	GBENU_STATS_P7(tx_pri4_drop_bcnt),
+	GBENU_STATS_P7(tx_pri5_drop_bcnt),
+	GBENU_STATS_P7(tx_pri6_drop_bcnt),
+	GBENU_STATS_P7(tx_pri7_drop_bcnt),
 	/* GBENU Module 8 */
 	GBENU_STATS_P8(rx_good_frames),
 	GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@
 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
 	GBENU_STATS_P8(ale_unknown_bcast),
 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
+	GBENU_STATS_P8(ale_pol_match),
+	GBENU_STATS_P8(ale_pol_match_red),
+	GBENU_STATS_P8(ale_pol_match_yellow),
 	GBENU_STATS_P8(tx_mem_protect_err),
+	GBENU_STATS_P8(tx_pri0_drop),
+	GBENU_STATS_P8(tx_pri1_drop),
+	GBENU_STATS_P8(tx_pri2_drop),
+	GBENU_STATS_P8(tx_pri3_drop),
+	GBENU_STATS_P8(tx_pri4_drop),
+	GBENU_STATS_P8(tx_pri5_drop),
+	GBENU_STATS_P8(tx_pri6_drop),
+	GBENU_STATS_P8(tx_pri7_drop),
+	GBENU_STATS_P8(tx_pri0_drop_bcnt),
+	GBENU_STATS_P8(tx_pri1_drop_bcnt),
+	GBENU_STATS_P8(tx_pri2_drop_bcnt),
+	GBENU_STATS_P8(tx_pri3_drop_bcnt),
+	GBENU_STATS_P8(tx_pri4_drop_bcnt),
+	GBENU_STATS_P8(tx_pri5_drop_bcnt),
+	GBENU_STATS_P8(tx_pri6_drop_bcnt),
+	GBENU_STATS_P8(tx_pri7_drop_bcnt),
 };
 
 #define XGBE_STATS0_INFO(field)				\
@@ -1554,70 +1723,97 @@
 	}
 }
 
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
 {
-	void __iomem *base = NULL;
-	u32  __iomem *p;
-	u32 tmp = 0;
+	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+	u32  __iomem *p_stats_entry;
 	int i;
 
 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
-		base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
-		p = base + gbe_dev->et_stats[i].offset;
-		tmp = readl(p);
-		gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+		if (gbe_dev->et_stats[i].type == stats_mod) {
+			p_stats_entry = base + gbe_dev->et_stats[i].offset;
+			gbe_dev->hw_stats[i] = 0;
+			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+		}
+	}
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+					     int et_stats_entry)
+{
+	void __iomem *base = NULL;
+	u32  __iomem *p_stats_entry;
+	u32 curr, delta;
+
+	/* The hw_stats_regs pointers are already
+	 * properly set to point to the right base:
+	 */
+	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+	curr = readl(p_stats_entry);
+	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+	gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+	int i;
+
+	for (i = 0; i < gbe_dev->num_et_stats; i++) {
+		gbe_update_hw_stats_entry(gbe_dev, i);
+
 		if (data)
 			data[i] = gbe_dev->hw_stats[i];
-		/* write-to-decrement:
-		 * new register value = old register value - write value
-		 */
-		writel(tmp, p);
 	}
 }
 
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+					       int stats_mod)
+{
+	u32 val;
+
+	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+	switch (stats_mod) {
+	case GBE_STATSA_MODULE:
+	case GBE_STATSB_MODULE:
+		val &= ~GBE_STATS_CD_SEL;
+		break;
+	case GBE_STATSC_MODULE:
+	case GBE_STATSD_MODULE:
+		val |= GBE_STATS_CD_SEL;
+		break;
+	default:
+		return;
+	}
+
+	/* make the stat module visible */
+	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
+
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+	gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
 {
-	void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
-	void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
-	u64 *hw_stats = &gbe_dev->hw_stats[0];
-	void __iomem *base = NULL;
-	u32  __iomem *p;
-	u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
-	int i, j, pair;
+	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+	int et_entry, j, pair;
 
 	for (pair = 0; pair < 2; pair++) {
-		val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+						      GBE_STATSC_MODULE :
+						      GBE_STATSA_MODULE));
 
-		if (pair == 0)
-			val &= ~GBE_STATS_CD_SEL;
-		else
-			val |= GBE_STATS_CD_SEL;
+		for (j = 0; j < half_num_et_stats; j++) {
+			et_entry = pair * half_num_et_stats + j;
+			gbe_update_hw_stats_entry(gbe_dev, et_entry);
 
-		/* make the stat modules visible */
-		writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
-
-		for (i = 0; i < pair_size; i++) {
-			j = pair * pair_size + i;
-			switch (gbe_dev->et_stats[j].type) {
-			case GBE_STATSA_MODULE:
-			case GBE_STATSC_MODULE:
-				base = gbe_statsa;
-			break;
-			case GBE_STATSB_MODULE:
-			case GBE_STATSD_MODULE:
-				base  = gbe_statsb;
-			break;
-			}
-
-			p = base + gbe_dev->et_stats[j].offset;
-			tmp = readl(p);
-			hw_stats[j] += tmp;
 			if (data)
-				data[j] = hw_stats[j];
-			/* write-to-decrement:
-			 * new register value = old register value - write value
-			 */
-			writel(tmp, p);
+				data[et_entry] = gbe_dev->hw_stats[et_entry];
 		}
 	}
 }
@@ -2207,14 +2403,15 @@
 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
 	}
 
-	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	/* A timer runs as a BH, no need to block them */
+	spin_lock(&gbe_dev->hw_stats_lock);
 
 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
 		gbe_update_stats_ver14(gbe_dev, NULL);
 	else
 		gbe_update_stats(gbe_dev, NULL);
 
-	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+	spin_unlock(&gbe_dev->hw_stats_lock);
 
 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
 	add_timer(&gbe_dev->timer);
@@ -2571,15 +2768,28 @@
 	}
 	gbe_dev->xgbe_serdes_regs = regs;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = xgbe10_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-				  XGBE10_NUM_STAT_ENTRIES *
-				  (gbe_dev->max_num_ports) * sizeof(u64),
-				  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
 					XGBE10_SGMII_MODULE_OFFSET;
@@ -2593,8 +2803,6 @@
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = xgbe10_et_stats;
-	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
 	/* Subsystem registers */
@@ -2679,30 +2887,45 @@
 	}
 	gbe_dev->switch_regs = regs;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+	gbe_dev->et_stats = gbe13_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-					  GBE13_NUM_HW_STAT_ENTRIES *
-					  gbe_dev->max_num_slaves * sizeof(u64),
-					  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
 
+	/* K2HK has only 2 hw stats modules visible at a time, so
+	 * module 0 & 2 points to one base and
+	 * module 1 & 3 points to the other base
+	 */
 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
 		gbe_dev->hw_stats_regs[i] =
 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
-			(GBE_HW_STATS_REG_MAP_SZ * i);
+			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
 	}
 
 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = gbe13_et_stats;
-	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
 
 	/* Subsystem registers */
@@ -2729,15 +2952,34 @@
 	void __iomem *regs;
 	int i, ret;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = gbenu_et_stats;
+
+	if (IS_SS_ID_NU(gbe_dev))
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+	else
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+					GBENU_ET_STATS_PORT_SIZE;
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-				  GBENU_NUM_HW_STAT_ENTRIES *
-				  (gbe_dev->max_num_ports) * sizeof(u64),
-				  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
 	if (ret) {
 		dev_err(gbe_dev->dev,
@@ -2765,16 +3007,8 @@
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = gbenu_et_stats;
 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
-	if (IS_SS_ID_NU(gbe_dev))
-		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
-	else
-		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-					GBENU_ET_STATS_PORT_SIZE;
-
 	/* Subsystem registers */
 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
 
@@ -2804,7 +3038,7 @@
 	struct cpsw_ale_params ale_params;
 	struct gbe_priv *gbe_dev;
 	u32 slave_num;
-	int ret = 0;
+	int i, ret = 0;
 
 	if (!node) {
 		dev_err(dev, "device tree info unavailable\n");
@@ -2951,6 +3185,15 @@
 	/* initialize host port */
 	gbe_init_host_port(gbe_dev);
 
+	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+			gbe_reset_mod_stats_ver14(gbe_dev, i);
+		else
+			gbe_reset_mod_stats(gbe_dev, i);
+	}
+	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
 	init_timer(&gbe_dev->timer);
 	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
 	gbe_dev->timer.function = netcp_ethss_timer;
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 5924d42..4ca2341 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -74,15 +74,6 @@
 #define NULL 		0
 #endif
 
-#ifdef	LITTLE_ENDIAN
-#define HWM_REVERSE(x)	(x)
-#else
-#define	HWM_REVERSE(x)		((((x)<<24L)&0xff000000L)	+	\
-				 (((x)<< 8L)&0x00ff0000L)	+	\
-				 (((x)>> 8L)&0x0000ff00L)	+	\
-				 (((x)>>24L)&0x000000ffL))
-#endif
-
 #define C_INDIC		(1L<<25)
 #define A_INDIC		(1L<<26)
 #define	RD_FS_LOCAL	0x80
diff --git a/drivers/net/fjes/Makefile b/drivers/net/fjes/Makefile
new file mode 100644
index 0000000..523e3d7
--- /dev/null
+++ b/drivers/net/fjes/Makefile
@@ -0,0 +1,30 @@
+################################################################################
+#
+# FUJITSU Extended Socket Network Device driver
+# Copyright (c) 2015 FUJITSU LIMITED
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+################################################################################
+
+
+#
+# Makefile for the FUJITSU Extended Socket network device driver
+#
+
+obj-$(CONFIG_FUJITSU_ES) += fjes.o
+
+fjes-objs := fjes_main.o fjes_hw.o fjes_ethtool.o
diff --git a/drivers/net/fjes/fjes.h b/drivers/net/fjes/fjes.h
new file mode 100644
index 0000000..a592fe2
--- /dev/null
+++ b/drivers/net/fjes/fjes.h
@@ -0,0 +1,77 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_H_
+#define FJES_H_
+
+#include <linux/acpi.h>
+
+#include "fjes_hw.h"
+
+#define FJES_ACPI_SYMBOL	"Extended Socket"
+#define FJES_MAX_QUEUES		1
+#define FJES_TX_RETRY_INTERVAL	(20 * HZ)
+#define FJES_TX_RETRY_TIMEOUT	(100)
+#define FJES_TX_TX_STALL_TIMEOUT	(FJES_TX_RETRY_INTERVAL / 2)
+#define FJES_OPEN_ZONE_UPDATE_WAIT	(300) /* msec */
+#define FJES_IRQ_WATCH_DELAY	(HZ)
+
+/* board specific private data structure */
+struct fjes_adapter {
+	struct net_device *netdev;
+	struct platform_device *plat_dev;
+
+	struct napi_struct napi;
+	struct rtnl_link_stats64 stats64;
+
+	unsigned int tx_retry_count;
+	unsigned long tx_start_jiffies;
+	unsigned long rx_last_jiffies;
+	bool unset_rx_last;
+
+	struct work_struct force_close_task;
+	bool force_reset;
+	bool open_guard;
+
+	bool irq_registered;
+
+	struct workqueue_struct *txrx_wq;
+	struct workqueue_struct *control_wq;
+
+	struct work_struct tx_stall_task;
+	struct work_struct raise_intr_rxdata_task;
+
+	struct work_struct unshare_watch_task;
+	unsigned long unshare_watch_bitmask;
+
+	struct delayed_work interrupt_watch_task;
+	bool interrupt_watch_enable;
+
+	struct fjes_hw hw;
+};
+
+extern char fjes_driver_name[];
+extern char fjes_driver_version[];
+extern const u32 fjes_support_mtu[];
+
+void fjes_set_ethtool_ops(struct net_device *);
+
+#endif /* FJES_H_ */
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
new file mode 100644
index 0000000..0119dd1
--- /dev/null
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -0,0 +1,137 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/* ethtool support for fjes */
+
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+
+#include "fjes.h"
+
+struct fjes_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define FJES_STAT(name, stat) { \
+	.stat_string = name, \
+	.sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \
+	.stat_offset = offsetof(struct fjes_adapter, stat) \
+}
+
+static const struct fjes_stats fjes_gstrings_stats[] = {
+	FJES_STAT("rx_packets", stats64.rx_packets),
+	FJES_STAT("tx_packets", stats64.tx_packets),
+	FJES_STAT("rx_bytes", stats64.rx_bytes),
+	FJES_STAT("tx_bytes", stats64.rx_bytes),
+	FJES_STAT("rx_dropped", stats64.rx_dropped),
+	FJES_STAT("tx_dropped", stats64.tx_dropped),
+};
+
+static void fjes_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	char *p;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
+		p = (char *)adapter + fjes_gstrings_stats[i].stat_offset;
+		data[i] = (fjes_gstrings_stats[i].sizeof_stat == sizeof(u64))
+			? *(u64 *)p : *(u32 *)p;
+	}
+}
+
+static void fjes_get_strings(struct net_device *netdev,
+			     u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
+			memcpy(p, fjes_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int fjes_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(fjes_gstrings_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void fjes_get_drvinfo(struct net_device *netdev,
+			     struct ethtool_drvinfo *drvinfo)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct platform_device *plat_dev;
+
+	plat_dev = adapter->plat_dev;
+
+	strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, fjes_driver_version,
+		sizeof(drvinfo->version));
+
+	strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
+	snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+		 "platform:%s", plat_dev->name);
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static int fjes_get_settings(struct net_device *netdev,
+			     struct ethtool_cmd *ecmd)
+{
+	ecmd->supported = 0;
+	ecmd->advertising = 0;
+	ecmd->duplex = DUPLEX_FULL;
+	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->transceiver = XCVR_DUMMY1;
+	ecmd->port = PORT_NONE;
+	ethtool_cmd_speed_set(ecmd, 20000);	/* 20Gb/s */
+
+	return 0;
+}
+
+static const struct ethtool_ops fjes_ethtool_ops = {
+		.get_settings		= fjes_get_settings,
+		.get_drvinfo		= fjes_get_drvinfo,
+		.get_ethtool_stats = fjes_get_ethtool_stats,
+		.get_strings      = fjes_get_strings,
+		.get_sset_count   = fjes_get_sset_count,
+};
+
+void fjes_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &fjes_ethtool_ops;
+}
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
new file mode 100644
index 0000000..b5f4a78
--- /dev/null
+++ b/drivers/net/fjes/fjes_hw.c
@@ -0,0 +1,1125 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "fjes_hw.h"
+#include "fjes.h"
+
+static void fjes_hw_update_zone_task(struct work_struct *);
+static void fjes_hw_epstop_task(struct work_struct *);
+
+/* supported MTU list */
+const u32 fjes_support_mtu[] = {
+	FJES_MTU_DEFINE(8 * 1024),
+	FJES_MTU_DEFINE(16 * 1024),
+	FJES_MTU_DEFINE(32 * 1024),
+	FJES_MTU_DEFINE(64 * 1024),
+	0
+};
+
+u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
+{
+	u8 *base = hw->base;
+	u32 value = 0;
+
+	value = readl(&base[reg]);
+
+	return value;
+}
+
+static u8 *fjes_hw_iomap(struct fjes_hw *hw)
+{
+	u8 *base;
+
+	if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
+				fjes_driver_name)) {
+		pr_err("request_mem_region failed\n");
+		return NULL;
+	}
+
+	base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+
+	return base;
+}
+
+static void fjes_hw_iounmap(struct fjes_hw *hw)
+{
+	iounmap(hw->base);
+	release_mem_region(hw->hw_res.start, hw->hw_res.size);
+}
+
+int fjes_hw_reset(struct fjes_hw *hw)
+{
+	union REG_DCTL dctl;
+	int timeout;
+
+	dctl.reg = 0;
+	dctl.bits.reset = 1;
+	wr32(XSCT_DCTL, dctl.reg);
+
+	timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
+	dctl.reg = rd32(XSCT_DCTL);
+	while ((dctl.bits.reset == 1) && (timeout > 0)) {
+		msleep(1000);
+		dctl.reg = rd32(XSCT_DCTL);
+		timeout -= 1000;
+	}
+
+	return timeout > 0 ? 0 : -EIO;
+}
+
+static int fjes_hw_get_max_epid(struct fjes_hw *hw)
+{
+	union REG_MAX_EP info;
+
+	info.reg = rd32(XSCT_MAX_EP);
+
+	return info.bits.maxep;
+}
+
+static int fjes_hw_get_my_epid(struct fjes_hw *hw)
+{
+	union REG_OWNER_EPID info;
+
+	info.reg = rd32(XSCT_OWNER_EPID);
+
+	return info.bits.epid;
+}
+
+static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
+{
+	size_t size;
+
+	size = sizeof(struct fjes_device_shared_info) +
+	    (sizeof(u8) * hw->max_epid);
+	hw->hw_info.share = kzalloc(size, GFP_KERNEL);
+	if (!hw->hw_info.share)
+		return -ENOMEM;
+
+	hw->hw_info.share->epnum = hw->max_epid;
+
+	return 0;
+}
+
+static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
+{
+	kfree(hw->hw_info.share);
+	hw->hw_info.share = NULL;
+}
+
+static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
+{
+	void *mem;
+
+	mem = vzalloc(EP_BUFFER_SIZE);
+	if (!mem)
+		return -ENOMEM;
+
+	epbh->buffer = mem;
+	epbh->size = EP_BUFFER_SIZE;
+
+	epbh->info = (union ep_buffer_info *)mem;
+	epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
+
+	return 0;
+}
+
+static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
+{
+	if (epbh->buffer)
+		vfree(epbh->buffer);
+
+	epbh->buffer = NULL;
+	epbh->size = 0;
+
+	epbh->info = NULL;
+	epbh->ring = NULL;
+}
+
+void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
+{
+	union ep_buffer_info *info = epbh->info;
+	u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
+	int i;
+
+	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
+		vlan_id[i] = info->v1i.vlan_id[i];
+
+	memset(info, 0, sizeof(union ep_buffer_info));
+
+	info->v1i.version = 0;  /* version 0 */
+
+	for (i = 0; i < ETH_ALEN; i++)
+		info->v1i.mac_addr[i] = mac_addr[i];
+
+	info->v1i.head = 0;
+	info->v1i.tail = 1;
+
+	info->v1i.info_size = sizeof(union ep_buffer_info);
+	info->v1i.buffer_size = epbh->size - info->v1i.info_size;
+
+	info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
+	info->v1i.count_max =
+	    EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
+
+	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
+		info->v1i.vlan_id[i] = vlan_id[i];
+}
+
+void
+fjes_hw_init_command_registers(struct fjes_hw *hw,
+			       struct fjes_device_command_param *param)
+{
+	/* Request Buffer length */
+	wr32(XSCT_REQBL, (__le32)(param->req_len));
+	/* Response Buffer Length */
+	wr32(XSCT_RESPBL, (__le32)(param->res_len));
+
+	/* Request Buffer Address */
+	wr32(XSCT_REQBAL,
+	     (__le32)(param->req_start & GENMASK_ULL(31, 0)));
+	wr32(XSCT_REQBAH,
+	     (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
+
+	/* Response Buffer Address */
+	wr32(XSCT_RESPBAL,
+	     (__le32)(param->res_start & GENMASK_ULL(31, 0)));
+	wr32(XSCT_RESPBAH,
+	     (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
+
+	/* Share status address */
+	wr32(XSCT_SHSTSAL,
+	     (__le32)(param->share_start & GENMASK_ULL(31, 0)));
+	wr32(XSCT_SHSTSAH,
+	     (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
+}
+
+static int fjes_hw_setup(struct fjes_hw *hw)
+{
+	u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	struct fjes_device_command_param param;
+	struct ep_share_mem_info *buf_pair;
+	size_t mem_size;
+	int result;
+	int epidx;
+	void *buf;
+
+	hw->hw_info.max_epid = &hw->max_epid;
+	hw->hw_info.my_epid = &hw->my_epid;
+
+	buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
+		      GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	hw->ep_shm_info = (struct ep_share_mem_info *)buf;
+
+	mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
+	hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
+	if (!(hw->hw_info.req_buf))
+		return -ENOMEM;
+
+	hw->hw_info.req_buf_size = mem_size;
+
+	mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
+	hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
+	if (!(hw->hw_info.res_buf))
+		return -ENOMEM;
+
+	hw->hw_info.res_buf_size = mem_size;
+
+	result = fjes_hw_alloc_shared_status_region(hw);
+	if (result)
+		return result;
+
+	hw->hw_info.buffer_share_bit = 0;
+	hw->hw_info.buffer_unshare_reserve_bit = 0;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx != hw->my_epid) {
+			buf_pair = &hw->ep_shm_info[epidx];
+
+			result = fjes_hw_alloc_epbuf(&buf_pair->tx);
+			if (result)
+				return result;
+
+			result = fjes_hw_alloc_epbuf(&buf_pair->rx);
+			if (result)
+				return result;
+
+			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
+					    fjes_support_mtu[0]);
+			fjes_hw_setup_epbuf(&buf_pair->rx, mac,
+					    fjes_support_mtu[0]);
+		}
+	}
+
+	memset(&param, 0, sizeof(param));
+
+	param.req_len = hw->hw_info.req_buf_size;
+	param.req_start = __pa(hw->hw_info.req_buf);
+	param.res_len = hw->hw_info.res_buf_size;
+	param.res_start = __pa(hw->hw_info.res_buf);
+
+	param.share_start = __pa(hw->hw_info.share->ep_status);
+
+	fjes_hw_init_command_registers(hw, &param);
+
+	return 0;
+}
+
+static void fjes_hw_cleanup(struct fjes_hw *hw)
+{
+	int epidx;
+
+	if (!hw->ep_shm_info)
+		return;
+
+	fjes_hw_free_shared_status_region(hw);
+
+	kfree(hw->hw_info.req_buf);
+	hw->hw_info.req_buf = NULL;
+
+	kfree(hw->hw_info.res_buf);
+	hw->hw_info.res_buf = NULL;
+
+	for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+	}
+
+	kfree(hw->ep_shm_info);
+	hw->ep_shm_info = NULL;
+}
+
+int fjes_hw_init(struct fjes_hw *hw)
+{
+	int ret;
+
+	hw->base = fjes_hw_iomap(hw);
+	if (!hw->base)
+		return -EIO;
+
+	ret = fjes_hw_reset(hw);
+	if (ret)
+		return ret;
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+	INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
+	INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
+
+	mutex_init(&hw->hw_info.lock);
+
+	hw->max_epid = fjes_hw_get_max_epid(hw);
+	hw->my_epid = fjes_hw_get_my_epid(hw);
+
+	if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
+		return -ENXIO;
+
+	ret = fjes_hw_setup(hw);
+
+	return ret;
+}
+
+void fjes_hw_exit(struct fjes_hw *hw)
+{
+	int ret;
+
+	if (hw->base) {
+		ret = fjes_hw_reset(hw);
+		if (ret)
+			pr_err("%s: reset error", __func__);
+
+		fjes_hw_iounmap(hw);
+		hw->base = NULL;
+	}
+
+	fjes_hw_cleanup(hw);
+
+	cancel_work_sync(&hw->update_zone_task);
+	cancel_work_sync(&hw->epstop_task);
+}
+
+static enum fjes_dev_command_response_e
+fjes_hw_issue_request_command(struct fjes_hw *hw,
+			      enum fjes_dev_command_request_type type)
+{
+	enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
+	union REG_CR cr;
+	union REG_CS cs;
+	int timeout;
+
+	cr.reg = 0;
+	cr.bits.req_start = 1;
+	cr.bits.req_code = type;
+	wr32(XSCT_CR, cr.reg);
+	cr.reg = rd32(XSCT_CR);
+
+	if (cr.bits.error == 0) {
+		timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
+		cs.reg = rd32(XSCT_CS);
+
+		while ((cs.bits.complete != 1) && timeout > 0) {
+			msleep(1000);
+			cs.reg = rd32(XSCT_CS);
+			timeout -= 1000;
+		}
+
+		if (cs.bits.complete == 1)
+			ret = FJES_CMD_STATUS_NORMAL;
+		else if (timeout <= 0)
+			ret = FJES_CMD_STATUS_TIMEOUT;
+
+	} else {
+		switch (cr.bits.err_info) {
+		case FJES_CMD_REQ_ERR_INFO_PARAM:
+			ret = FJES_CMD_STATUS_ERROR_PARAM;
+			break;
+		case FJES_CMD_REQ_ERR_INFO_STATUS:
+			ret = FJES_CMD_STATUS_ERROR_STATUS;
+			break;
+		default:
+			ret = FJES_CMD_STATUS_UNKNOWN;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int fjes_hw_request_info(struct fjes_hw *hw)
+{
+	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+	enum fjes_dev_command_response_e ret;
+	int result;
+
+	memset(req_buf, 0, hw->hw_info.req_buf_size);
+	memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+	req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
+
+	res_buf->info.length = 0;
+	res_buf->info.code = 0;
+
+	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
+
+	result = 0;
+
+	if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
+		res_buf->info.length) {
+		result = -ENOMSG;
+	} else if (ret == FJES_CMD_STATUS_NORMAL) {
+		switch (res_buf->info.code) {
+		case FJES_CMD_REQ_RES_CODE_NORMAL:
+			result = 0;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	} else {
+		switch (ret) {
+		case FJES_CMD_STATUS_UNKNOWN:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_TIMEOUT:
+			result = -EBUSY;
+			break;
+		case FJES_CMD_STATUS_ERROR_PARAM:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_ERROR_STATUS:
+			result = -EPERM;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	}
+
+	return result;
+}
+
+int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
+			       struct ep_share_mem_info *buf_pair)
+{
+	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+	enum fjes_dev_command_response_e ret;
+	int page_count;
+	int timeout;
+	int i, idx;
+	void *addr;
+	int result;
+
+	if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
+		return 0;
+
+	memset(req_buf, 0, hw->hw_info.req_buf_size);
+	memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+	req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
+						buf_pair->tx.size,
+						buf_pair->rx.size);
+	req_buf->share_buffer.epid = dest_epid;
+
+	idx = 0;
+	req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
+	page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
+	for (i = 0; i < page_count; i++) {
+		addr = ((u8 *)(buf_pair->tx.buffer)) +
+				(i * EP_BUFFER_INFO_SIZE);
+		req_buf->share_buffer.buffer[idx++] =
+				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
+						offset_in_page(addr));
+	}
+
+	req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
+	page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
+	for (i = 0; i < page_count; i++) {
+		addr = ((u8 *)(buf_pair->rx.buffer)) +
+				(i * EP_BUFFER_INFO_SIZE);
+		req_buf->share_buffer.buffer[idx++] =
+				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
+						offset_in_page(addr));
+	}
+
+	res_buf->share_buffer.length = 0;
+	res_buf->share_buffer.code = 0;
+
+	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
+
+	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
+	while ((ret == FJES_CMD_STATUS_NORMAL) &&
+	       (res_buf->share_buffer.length ==
+		FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
+	       (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
+	       (timeout > 0)) {
+			msleep(200 + hw->my_epid * 20);
+			timeout -= (200 + hw->my_epid * 20);
+
+			res_buf->share_buffer.length = 0;
+			res_buf->share_buffer.code = 0;
+
+			ret = fjes_hw_issue_request_command(
+					hw, FJES_CMD_REQ_SHARE_BUFFER);
+	}
+
+	result = 0;
+
+	if (res_buf->share_buffer.length !=
+			FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
+		result = -ENOMSG;
+	else if (ret == FJES_CMD_STATUS_NORMAL) {
+		switch (res_buf->share_buffer.code) {
+		case FJES_CMD_REQ_RES_CODE_NORMAL:
+			result = 0;
+			set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
+			break;
+		case FJES_CMD_REQ_RES_CODE_BUSY:
+			result = -EBUSY;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	} else {
+		switch (ret) {
+		case FJES_CMD_STATUS_UNKNOWN:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_TIMEOUT:
+			result = -EBUSY;
+			break;
+		case FJES_CMD_STATUS_ERROR_PARAM:
+		case FJES_CMD_STATUS_ERROR_STATUS:
+		default:
+			result = -EPERM;
+			break;
+		}
+	}
+
+	return result;
+}
+
+int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
+{
+	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+	struct fjes_device_shared_info *share = hw->hw_info.share;
+	enum fjes_dev_command_response_e ret;
+	int timeout;
+	int result;
+
+	if (!hw->base)
+		return -EPERM;
+
+	if (!req_buf || !res_buf || !share)
+		return -EPERM;
+
+	if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
+		return 0;
+
+	memset(req_buf, 0, hw->hw_info.req_buf_size);
+	memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+	req_buf->unshare_buffer.length =
+			FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
+	req_buf->unshare_buffer.epid = dest_epid;
+
+	res_buf->unshare_buffer.length = 0;
+	res_buf->unshare_buffer.code = 0;
+
+	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
+
+	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
+	while ((ret == FJES_CMD_STATUS_NORMAL) &&
+	       (res_buf->unshare_buffer.length ==
+		FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
+	       (res_buf->unshare_buffer.code ==
+		FJES_CMD_REQ_RES_CODE_BUSY) &&
+	       (timeout > 0)) {
+		msleep(200 + hw->my_epid * 20);
+			timeout -= (200 + hw->my_epid * 20);
+
+		res_buf->unshare_buffer.length = 0;
+		res_buf->unshare_buffer.code = 0;
+
+		ret =
+		fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
+	}
+
+	result = 0;
+
+	if (res_buf->unshare_buffer.length !=
+			FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
+		result = -ENOMSG;
+	} else if (ret == FJES_CMD_STATUS_NORMAL) {
+		switch (res_buf->unshare_buffer.code) {
+		case FJES_CMD_REQ_RES_CODE_NORMAL:
+			result = 0;
+			clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
+			break;
+		case FJES_CMD_REQ_RES_CODE_BUSY:
+			result = -EBUSY;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	} else {
+		switch (ret) {
+		case FJES_CMD_STATUS_UNKNOWN:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_TIMEOUT:
+			result = -EBUSY;
+			break;
+		case FJES_CMD_STATUS_ERROR_PARAM:
+		case FJES_CMD_STATUS_ERROR_STATUS:
+		default:
+			result = -EPERM;
+			break;
+		}
+	}
+
+	return result;
+}
+
+int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
+			    enum REG_ICTL_MASK  mask)
+{
+	u32 ig = mask | dest_epid;
+
+	wr32(XSCT_IG, cpu_to_le32(ig));
+
+	return 0;
+}
+
+u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
+{
+	u32 cur_is;
+
+	cur_is = rd32(XSCT_IS);
+
+	return cur_is;
+}
+
+void fjes_hw_set_irqmask(struct fjes_hw *hw,
+			 enum REG_ICTL_MASK intr_mask, bool mask)
+{
+	if (mask)
+		wr32(XSCT_IMS, intr_mask);
+	else
+		wr32(XSCT_IMC, intr_mask);
+}
+
+bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
+{
+	if (epid >= hw->max_epid)
+		return false;
+
+	if ((hw->ep_shm_info[epid].es_status !=
+			FJES_ZONING_STATUS_ENABLE) ||
+		(hw->ep_shm_info[hw->my_epid].zone ==
+			FJES_ZONING_ZONE_TYPE_NONE))
+		return false;
+	else
+		return (hw->ep_shm_info[epid].zone ==
+				hw->ep_shm_info[hw->my_epid].zone);
+}
+
+int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
+			   int dest_epid)
+{
+	int value = false;
+
+	if (dest_epid < share->epnum)
+		value = share->ep_status[dest_epid];
+
+	return value;
+}
+
+static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
+{
+	return test_bit(src_epid, &hw->txrx_stop_req_bit);
+}
+
+static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
+{
+	return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
+			FJES_RX_STOP_REQ_DONE);
+}
+
+enum ep_partner_status
+fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
+{
+	enum ep_partner_status status;
+
+	if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
+		if (fjes_hw_epid_is_stop_requested(hw, epid)) {
+			status = EP_PARTNER_WAITING;
+		} else {
+			if (fjes_hw_epid_is_stop_process_done(hw, epid))
+				status = EP_PARTNER_COMPLETE;
+			else
+				status = EP_PARTNER_SHARED;
+		}
+	} else {
+		status = EP_PARTNER_UNSHARE;
+	}
+
+	return status;
+}
+
+void fjes_hw_raise_epstop(struct fjes_hw *hw)
+{
+	enum ep_partner_status status;
+	int epidx;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		status = fjes_hw_get_partner_ep_status(hw, epidx);
+		switch (status) {
+		case EP_PARTNER_SHARED:
+			fjes_hw_raise_interrupt(hw, epidx,
+						REG_ICTL_MASK_TXRX_STOP_REQ);
+			break;
+		default:
+			break;
+		}
+
+		set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+		set_bit(epidx, &hw->txrx_stop_req_bit);
+
+		hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+				FJES_RX_STOP_REQ_REQUEST;
+	}
+}
+
+int fjes_hw_wait_epstop(struct fjes_hw *hw)
+{
+	enum ep_partner_status status;
+	union ep_buffer_info *info;
+	int wait_time = 0;
+	int epidx;
+
+	while (hw->hw_info.buffer_unshare_reserve_bit &&
+	       (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+			status = fjes_hw_epid_is_shared(hw->hw_info.share,
+							epidx);
+			info = hw->ep_shm_info[epidx].rx.info;
+			if ((!status ||
+			     (info->v1i.rx_status &
+			      FJES_RX_STOP_REQ_DONE)) &&
+			    test_bit(epidx,
+				     &hw->hw_info.buffer_unshare_reserve_bit)) {
+				clear_bit(epidx,
+					  &hw->hw_info.buffer_unshare_reserve_bit);
+			}
+		}
+
+		msleep(100);
+		wait_time += 100;
+	}
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+		if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
+			clear_bit(epidx,
+				  &hw->hw_info.buffer_unshare_reserve_bit);
+	}
+
+	return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
+			? 0 : -EBUSY;
+}
+
+bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	return (info->common.version == version);
+}
+
+bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+}
+
+bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+	union ep_buffer_info *info = epbh->info;
+	bool ret = false;
+	int i;
+
+	if (vlan_id == 0) {
+		ret = true;
+	} else {
+		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+			if (vlan_id == info->v1i.vlan_id[i]) {
+				ret = true;
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+	union ep_buffer_info *info = epbh->info;
+	int i;
+
+	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+		if (info->v1i.vlan_id[i] == 0) {
+			info->v1i.vlan_id[i] = vlan_id;
+			return true;
+		}
+	}
+	return false;
+}
+
+void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+	union ep_buffer_info *info = epbh->info;
+	int i;
+
+	if (0 != vlan_id) {
+		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+			if (vlan_id == info->v1i.vlan_id[i])
+				info->v1i.vlan_id[i] = 0;
+		}
+	}
+}
+
+bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	if (info->v1i.count_max == 0)
+		return true;
+
+	return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
+			     info->v1i.count_max);
+}
+
+void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
+				       size_t *psize)
+{
+	union ep_buffer_info *info = epbh->info;
+	struct esmem_frame *ring_frame;
+	void *frame;
+
+	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
+					     (info->v1i.head,
+					      info->v1i.count_max) *
+					     info->v1i.frame_max]);
+
+	*psize = (size_t)ring_frame->frame_size;
+
+	frame = ring_frame->frame_data;
+
+	return frame;
+}
+
+void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	if (fjes_hw_epbuf_rx_is_empty(epbh))
+		return;
+
+	EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
+}
+
+int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
+			      void *frame, size_t size)
+{
+	union ep_buffer_info *info = epbh->info;
+	struct esmem_frame *ring_frame;
+
+	if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
+		return -ENOBUFS;
+
+	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
+					     (info->v1i.tail - 1,
+					      info->v1i.count_max) *
+					     info->v1i.frame_max]);
+
+	ring_frame->frame_size = size;
+	memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
+
+	EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
+
+	return 0;
+}
+
+static void fjes_hw_update_zone_task(struct work_struct *work)
+{
+	struct fjes_hw *hw = container_of(work,
+			struct fjes_hw, update_zone_task);
+
+	struct my_s {u8 es_status; u8 zone; } *info;
+	union fjes_device_command_res *res_buf;
+	enum ep_partner_status pstatus;
+
+	struct fjes_adapter *adapter;
+	struct net_device *netdev;
+
+	ulong unshare_bit = 0;
+	ulong share_bit = 0;
+	ulong irq_bit = 0;
+
+	int epidx;
+	int ret;
+
+	adapter = (struct fjes_adapter *)hw->back;
+	netdev = adapter->netdev;
+	res_buf = hw->hw_info.res_buf;
+	info = (struct my_s *)&res_buf->info.info;
+
+	mutex_lock(&hw->hw_info.lock);
+
+	ret = fjes_hw_request_info(hw);
+	switch (ret) {
+	case -ENOMSG:
+	case -EBUSY:
+	default:
+		if (!work_pending(&adapter->force_close_task)) {
+			adapter->force_reset = true;
+			schedule_work(&adapter->force_close_task);
+		}
+		break;
+
+	case 0:
+
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid) {
+				hw->ep_shm_info[epidx].es_status =
+					info[epidx].es_status;
+				hw->ep_shm_info[epidx].zone =
+					info[epidx].zone;
+				continue;
+			}
+
+			pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
+			switch (pstatus) {
+			case EP_PARTNER_UNSHARE:
+			default:
+				if ((info[epidx].zone !=
+					FJES_ZONING_ZONE_TYPE_NONE) &&
+				    (info[epidx].es_status ==
+					FJES_ZONING_STATUS_ENABLE) &&
+				    (info[epidx].zone ==
+					info[hw->my_epid].zone))
+					set_bit(epidx, &share_bit);
+				else
+					set_bit(epidx, &unshare_bit);
+				break;
+
+			case EP_PARTNER_COMPLETE:
+			case EP_PARTNER_WAITING:
+				if ((info[epidx].zone ==
+					FJES_ZONING_ZONE_TYPE_NONE) ||
+				    (info[epidx].es_status !=
+					FJES_ZONING_STATUS_ENABLE) ||
+				    (info[epidx].zone !=
+					info[hw->my_epid].zone)) {
+					set_bit(epidx,
+						&adapter->unshare_watch_bitmask);
+					set_bit(epidx,
+						&hw->hw_info.buffer_unshare_reserve_bit);
+				}
+				break;
+
+			case EP_PARTNER_SHARED:
+				if ((info[epidx].zone ==
+					FJES_ZONING_ZONE_TYPE_NONE) ||
+				    (info[epidx].es_status !=
+					FJES_ZONING_STATUS_ENABLE) ||
+				    (info[epidx].zone !=
+					info[hw->my_epid].zone))
+					set_bit(epidx, &irq_bit);
+				break;
+			}
+		}
+
+		hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
+		hw->ep_shm_info[epidx].zone = info[epidx].zone;
+
+		break;
+	}
+
+	mutex_unlock(&hw->hw_info.lock);
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		if (test_bit(epidx, &share_bit)) {
+			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+					    netdev->dev_addr, netdev->mtu);
+
+			mutex_lock(&hw->hw_info.lock);
+
+			ret = fjes_hw_register_buff_addr(
+				hw, epidx, &hw->ep_shm_info[epidx]);
+
+			switch (ret) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				if (!work_pending(&adapter->force_close_task)) {
+					adapter->force_reset = true;
+					schedule_work(
+					  &adapter->force_close_task);
+				}
+				break;
+			}
+			mutex_unlock(&hw->hw_info.lock);
+		}
+
+		if (test_bit(epidx, &unshare_bit)) {
+			mutex_lock(&hw->hw_info.lock);
+
+			ret = fjes_hw_unregister_buff_addr(hw, epidx);
+
+			switch (ret) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				if (!work_pending(&adapter->force_close_task)) {
+					adapter->force_reset = true;
+					schedule_work(
+					  &adapter->force_close_task);
+				}
+				break;
+			}
+
+			mutex_unlock(&hw->hw_info.lock);
+
+			if (ret == 0)
+				fjes_hw_setup_epbuf(
+					&hw->ep_shm_info[epidx].tx,
+					netdev->dev_addr, netdev->mtu);
+		}
+
+		if (test_bit(epidx, &irq_bit)) {
+			fjes_hw_raise_interrupt(hw, epidx,
+						REG_ICTL_MASK_TXRX_STOP_REQ);
+
+			set_bit(epidx, &hw->txrx_stop_req_bit);
+			hw->ep_shm_info[epidx].tx.
+				info->v1i.rx_status |=
+					FJES_RX_STOP_REQ_REQUEST;
+			set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+		}
+	}
+
+	if (irq_bit || adapter->unshare_watch_bitmask) {
+		if (!work_pending(&adapter->unshare_watch_task))
+			queue_work(adapter->control_wq,
+				   &adapter->unshare_watch_task);
+	}
+}
+
+static void fjes_hw_epstop_task(struct work_struct *work)
+{
+	struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
+	struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+
+	ulong remain_bit;
+	int epid_bit;
+
+	while ((remain_bit = hw->epstop_req_bit)) {
+		for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
+			if (remain_bit & 1) {
+				hw->ep_shm_info[epid_bit].
+					tx.info->v1i.rx_status |=
+						FJES_RX_STOP_REQ_DONE;
+
+				clear_bit(epid_bit, &hw->epstop_req_bit);
+				set_bit(epid_bit,
+					&adapter->unshare_watch_bitmask);
+
+				if (!work_pending(&adapter->unshare_watch_task))
+					queue_work(
+						adapter->control_wq,
+						&adapter->unshare_watch_task);
+			}
+		}
+	}
+}
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
new file mode 100644
index 0000000..6d57b89
--- /dev/null
+++ b/drivers/net/fjes/fjes_hw.h
@@ -0,0 +1,334 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_HW_H_
+#define FJES_HW_H_
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "fjes_regs.h"
+
+struct fjes_hw;
+
+#define EP_BUFFER_SUPPORT_VLAN_MAX 4
+#define EP_BUFFER_INFO_SIZE 4096
+
+#define FJES_DEVICE_RESET_TIMEOUT  ((17 + 1) * 3) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT  (5 + 1) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT	(8 * 3) /* sec */
+#define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT	(1) /* sec */
+
+#define FJES_CMD_REQ_ERR_INFO_PARAM  (0x0001)
+#define FJES_CMD_REQ_ERR_INFO_STATUS (0x0002)
+
+#define FJES_CMD_REQ_RES_CODE_NORMAL (0)
+#define FJES_CMD_REQ_RES_CODE_BUSY   (1)
+
+#define FJES_ZONING_STATUS_DISABLE	(0x00)
+#define FJES_ZONING_STATUS_ENABLE	(0x01)
+#define FJES_ZONING_STATUS_INVALID	(0xFF)
+
+#define FJES_ZONING_ZONE_TYPE_NONE (0xFF)
+
+#define FJES_TX_DELAY_SEND_NONE		(0)
+#define FJES_TX_DELAY_SEND_PENDING	(1)
+
+#define FJES_RX_STOP_REQ_NONE		(0x0)
+#define FJES_RX_STOP_REQ_DONE		(0x1)
+#define FJES_RX_STOP_REQ_REQUEST	(0x2)
+#define FJES_RX_POLL_WORK		(0x4)
+
+#define EP_BUFFER_SIZE \
+	(((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
+		/ EP_BUFFER_INFO_SIZE) * EP_BUFFER_INFO_SIZE)
+
+#define EP_RING_NUM(buffer_size, frame_size) \
+		(u32)((buffer_size) / (frame_size))
+#define EP_RING_INDEX(_num, _max) (((_num) + (_max)) % (_max))
+#define EP_RING_INDEX_INC(_num, _max) \
+	((_num) = EP_RING_INDEX((_num) + 1, (_max)))
+#define EP_RING_FULL(_head, _tail, _max)				\
+	(0 == EP_RING_INDEX(((_tail) - (_head)), (_max)))
+#define EP_RING_EMPTY(_head, _tail, _max) \
+	(1 == EP_RING_INDEX(((_tail) - (_head)), (_max)))
+
+#define FJES_MTU_TO_BUFFER_SIZE(mtu) \
+	(ETH_HLEN + VLAN_HLEN + (mtu) + ETH_FCS_LEN)
+#define FJES_MTU_TO_FRAME_SIZE(mtu) \
+	(sizeof(struct esmem_frame) + FJES_MTU_TO_BUFFER_SIZE(mtu))
+#define FJES_MTU_DEFINE(size) \
+	((size) - sizeof(struct esmem_frame) - \
+	(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+#define FJES_DEV_COMMAND_INFO_REQ_LEN	(4)
+#define FJES_DEV_COMMAND_INFO_RES_LEN(epnum) (8 + 2 * (epnum))
+#define FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(txb, rxb) \
+	(24 + (8 * ((txb) / EP_BUFFER_INFO_SIZE + (rxb) / EP_BUFFER_INFO_SIZE)))
+#define FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN	(8)
+#define FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN	(8)
+#define FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN	(8)
+
+#define FJES_DEV_REQ_BUF_SIZE(maxep) \
+	FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(EP_BUFFER_SIZE, EP_BUFFER_SIZE)
+#define FJES_DEV_RES_BUF_SIZE(maxep) \
+	FJES_DEV_COMMAND_INFO_RES_LEN(maxep)
+
+/* Frame & MTU */
+struct esmem_frame {
+	__le32 frame_size;
+	u8 frame_data[];
+};
+
+/* EP partner status */
+enum ep_partner_status {
+	EP_PARTNER_UNSHARE,
+	EP_PARTNER_SHARED,
+	EP_PARTNER_WAITING,
+	EP_PARTNER_COMPLETE,
+	EP_PARTNER_STATUS_MAX,
+};
+
+/* shared status region */
+struct fjes_device_shared_info {
+	int epnum;
+	u8 ep_status[];
+};
+
+/* structures for command control request data*/
+union fjes_device_command_req {
+	struct {
+		__le32 length;
+	} info;
+	struct {
+		__le32 length;
+		__le32 epid;
+		__le64 buffer[];
+	} share_buffer;
+	struct {
+		__le32 length;
+		__le32 epid;
+	} unshare_buffer;
+	struct {
+		__le32 length;
+		__le32 mode;
+		__le64 buffer_len;
+		__le64 buffer[];
+	} start_trace;
+	struct {
+		__le32 length;
+	} stop_trace;
+};
+
+/* structures for command control response data */
+union fjes_device_command_res {
+	struct {
+		__le32 length;
+		__le32 code;
+		struct {
+			u8 es_status;
+			u8 zone;
+		} info[];
+	} info;
+	struct {
+		__le32 length;
+		__le32 code;
+	} share_buffer;
+	struct {
+		__le32 length;
+		__le32 code;
+	} unshare_buffer;
+	struct {
+		__le32 length;
+		__le32 code;
+	} start_trace;
+	struct {
+		__le32 length;
+		__le32 code;
+	} stop_trace;
+};
+
+/* request command type */
+enum fjes_dev_command_request_type {
+	FJES_CMD_REQ_INFO		= 0x0001,
+	FJES_CMD_REQ_SHARE_BUFFER	= 0x0002,
+	FJES_CMD_REQ_UNSHARE_BUFFER	= 0x0004,
+};
+
+/* parameter for command control */
+struct fjes_device_command_param {
+	u32 req_len;
+	phys_addr_t req_start;
+	u32 res_len;
+	phys_addr_t res_start;
+	phys_addr_t share_start;
+};
+
+/* error code for command control */
+enum fjes_dev_command_response_e {
+	FJES_CMD_STATUS_UNKNOWN,
+	FJES_CMD_STATUS_NORMAL,
+	FJES_CMD_STATUS_TIMEOUT,
+	FJES_CMD_STATUS_ERROR_PARAM,
+	FJES_CMD_STATUS_ERROR_STATUS,
+};
+
+/* EP buffer information */
+union ep_buffer_info {
+	u8 raw[EP_BUFFER_INFO_SIZE];
+
+	struct _ep_buffer_info_common_t {
+		u32 version;
+	} common;
+
+	struct _ep_buffer_info_v1_t {
+		u32 version;
+		u32 info_size;
+
+		u32 buffer_size;
+		u16 count_max;
+
+		u16 _rsv_1;
+
+		u32 frame_max;
+		u8 mac_addr[ETH_ALEN];
+
+		u16 _rsv_2;
+		u32 _rsv_3;
+
+		u16 tx_status;
+		u16 rx_status;
+
+		u32 head;
+		u32 tail;
+
+		u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
+
+	} v1i;
+
+};
+
+/* buffer pair for Extended Partition */
+struct ep_share_mem_info {
+	struct epbuf_handler {
+		void *buffer;
+		size_t size;
+		union ep_buffer_info *info;
+		u8 *ring;
+	} tx, rx;
+
+	struct rtnl_link_stats64 net_stats;
+
+	u16 tx_status_work;
+
+	u8 es_status;
+	u8 zone;
+};
+
+struct es_device_trace {
+	u32 record_num;
+	u32 current_record;
+	u32 status_flag;
+	u32 _rsv;
+
+	struct {
+			u16 epid;
+			u16 dir_offset;
+			u32 data;
+			u64 tsc;
+	} record[];
+};
+
+struct fjes_hw_info {
+	struct fjes_device_shared_info *share;
+	union fjes_device_command_req *req_buf;
+	u64 req_buf_size;
+	union fjes_device_command_res *res_buf;
+	u64 res_buf_size;
+
+	int *my_epid;
+	int *max_epid;
+
+	struct es_device_trace *trace;
+	u64 trace_size;
+
+	struct mutex lock; /* buffer lock*/
+
+	unsigned long buffer_share_bit;
+	unsigned long buffer_unshare_reserve_bit;
+};
+
+struct fjes_hw {
+	void *back;
+
+	unsigned long txrx_stop_req_bit;
+	unsigned long epstop_req_bit;
+	struct work_struct update_zone_task;
+	struct work_struct epstop_task;
+
+	int my_epid;
+	int max_epid;
+
+	struct ep_share_mem_info *ep_shm_info;
+
+	struct fjes_hw_resource {
+		u64 start;
+		u64 size;
+		int irq;
+	} hw_res;
+
+	u8 *base;
+
+	struct fjes_hw_info hw_info;
+};
+
+int fjes_hw_init(struct fjes_hw *);
+void fjes_hw_exit(struct fjes_hw *);
+int fjes_hw_reset(struct fjes_hw *);
+int fjes_hw_request_info(struct fjes_hw *);
+int fjes_hw_register_buff_addr(struct fjes_hw *, int,
+			       struct ep_share_mem_info *);
+int fjes_hw_unregister_buff_addr(struct fjes_hw *, int);
+void fjes_hw_init_command_registers(struct fjes_hw *,
+				    struct fjes_device_command_param *);
+void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32);
+int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK);
+void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool);
+u32 fjes_hw_capture_interrupt_status(struct fjes_hw *);
+void fjes_hw_raise_epstop(struct fjes_hw *);
+int fjes_hw_wait_epstop(struct fjes_hw *);
+enum ep_partner_status
+	fjes_hw_get_partner_ep_status(struct fjes_hw *, int);
+
+bool fjes_hw_epid_is_same_zone(struct fjes_hw *, int);
+int fjes_hw_epid_is_shared(struct fjes_device_shared_info *, int);
+bool fjes_hw_check_epbuf_version(struct epbuf_handler *, u32);
+bool fjes_hw_check_mtu(struct epbuf_handler *, u32);
+bool fjes_hw_check_vlan_id(struct epbuf_handler *, u16);
+bool fjes_hw_set_vlan_id(struct epbuf_handler *, u16);
+void fjes_hw_del_vlan_id(struct epbuf_handler *, u16);
+bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *);
+void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *, size_t *);
+void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *);
+int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *, void *, size_t);
+
+#endif /* FJES_HW_H_ */
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
new file mode 100644
index 0000000..0ddb54f
--- /dev/null
+++ b/drivers/net/fjes/fjes_main.c
@@ -0,0 +1,1383 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/nls.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include "fjes.h"
+
+#define MAJ 1
+#define MIN 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
+#define DRV_NAME	"fjes"
+char fjes_driver_name[] = DRV_NAME;
+char fjes_driver_version[] = DRV_VERSION;
+static const char fjes_driver_string[] =
+		"FUJITSU Extended Socket Network Device Driver";
+static const char fjes_copyright[] =
+		"Copyright (c) 2015 FUJITSU LIMITED";
+
+MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
+MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int fjes_request_irq(struct fjes_adapter *);
+static void fjes_free_irq(struct fjes_adapter *);
+
+static int fjes_open(struct net_device *);
+static int fjes_close(struct net_device *);
+static int fjes_setup_resources(struct fjes_adapter *);
+static void fjes_free_resources(struct fjes_adapter *);
+static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
+static void fjes_raise_intr_rxdata_task(struct work_struct *);
+static void fjes_tx_stall_task(struct work_struct *);
+static void fjes_force_close_task(struct work_struct *);
+static irqreturn_t fjes_intr(int, void*);
+static struct rtnl_link_stats64 *
+fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
+static int fjes_change_mtu(struct net_device *, int);
+static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
+static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
+static void fjes_tx_retry(struct net_device *);
+
+static int fjes_acpi_add(struct acpi_device *);
+static int fjes_acpi_remove(struct acpi_device *);
+static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
+
+static int fjes_probe(struct platform_device *);
+static int fjes_remove(struct platform_device *);
+
+static int fjes_sw_init(struct fjes_adapter *);
+static void fjes_netdev_setup(struct net_device *);
+static void fjes_irq_watch_task(struct work_struct *);
+static void fjes_watch_unshare_task(struct work_struct *);
+static void fjes_rx_irq(struct fjes_adapter *, int);
+static int fjes_poll(struct napi_struct *, int);
+
+static const struct acpi_device_id fjes_acpi_ids[] = {
+	{"PNP0C02", 0},
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
+
+static struct acpi_driver fjes_acpi_driver = {
+	.name = DRV_NAME,
+	.class = DRV_NAME,
+	.owner = THIS_MODULE,
+	.ids = fjes_acpi_ids,
+	.ops = {
+		.add = fjes_acpi_add,
+		.remove = fjes_acpi_remove,
+	},
+};
+
+static struct platform_driver fjes_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = fjes_probe,
+	.remove = fjes_remove,
+};
+
+static struct resource fjes_resource[] = {
+	{
+		.flags = IORESOURCE_MEM,
+		.start = 0,
+		.end = 0,
+	},
+	{
+		.flags = IORESOURCE_IRQ,
+		.start = 0,
+		.end = 0,
+	},
+};
+
+static int fjes_acpi_add(struct acpi_device *device)
+{
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
+	struct platform_device *plat_dev;
+	union acpi_object *str;
+	acpi_status status;
+	int result;
+
+	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	str = buffer.pointer;
+	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
+				 str->string.length, UTF16_LITTLE_ENDIAN,
+				 str_buf, sizeof(str_buf) - 1);
+	str_buf[result] = 0;
+
+	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
+		kfree(buffer.pointer);
+		return -ENODEV;
+	}
+	kfree(buffer.pointer);
+
+	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+				     fjes_get_acpi_resource, fjes_resource);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	/* create platform_device */
+	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+						   ARRAY_SIZE(fjes_resource));
+	device->driver_data = plat_dev;
+
+	return 0;
+}
+
+static int fjes_acpi_remove(struct acpi_device *device)
+{
+	struct platform_device *plat_dev;
+
+	plat_dev = (struct platform_device *)acpi_driver_data(device);
+	platform_device_unregister(plat_dev);
+
+	return 0;
+}
+
+static acpi_status
+fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
+{
+	struct acpi_resource_address32 *addr;
+	struct acpi_resource_irq *irq;
+	struct resource *res = data;
+
+	switch (acpi_res->type) {
+	case ACPI_RESOURCE_TYPE_ADDRESS32:
+		addr = &acpi_res->data.address32;
+		res[0].start = addr->address.minimum;
+		res[0].end = addr->address.minimum +
+			addr->address.address_length - 1;
+		break;
+
+	case ACPI_RESOURCE_TYPE_IRQ:
+		irq = &acpi_res->data.irq;
+		if (irq->interrupt_count != 1)
+			return AE_ERROR;
+		res[1].start = irq->interrupts[0];
+		res[1].end = irq->interrupts[0];
+		break;
+
+	default:
+		break;
+	}
+
+	return AE_OK;
+}
+
+static int fjes_request_irq(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int result = -1;
+
+	adapter->interrupt_watch_enable = true;
+	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
+		queue_delayed_work(adapter->control_wq,
+				   &adapter->interrupt_watch_task,
+				   FJES_IRQ_WATCH_DELAY);
+	}
+
+	if (!adapter->irq_registered) {
+		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
+				     IRQF_SHARED, netdev->name, adapter);
+		if (result)
+			adapter->irq_registered = false;
+		else
+			adapter->irq_registered = true;
+	}
+
+	return result;
+}
+
+static void fjes_free_irq(struct fjes_adapter *adapter)
+{
+	struct fjes_hw *hw = &adapter->hw;
+
+	adapter->interrupt_watch_enable = false;
+	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+	if (adapter->irq_registered) {
+		free_irq(adapter->hw.hw_res.irq, adapter);
+		adapter->irq_registered = false;
+	}
+}
+
+static const struct net_device_ops fjes_netdev_ops = {
+	.ndo_open		= fjes_open,
+	.ndo_stop		= fjes_close,
+	.ndo_start_xmit		= fjes_xmit_frame,
+	.ndo_get_stats64	= fjes_get_stats64,
+	.ndo_change_mtu		= fjes_change_mtu,
+	.ndo_tx_timeout		= fjes_tx_retry,
+	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+};
+
+/* fjes_open - Called when a network interface is made active */
+static int fjes_open(struct net_device *netdev)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+	int result;
+
+	if (adapter->open_guard)
+		return -ENXIO;
+
+	result = fjes_setup_resources(adapter);
+	if (result)
+		goto err_setup_res;
+
+	hw->txrx_stop_req_bit = 0;
+	hw->epstop_req_bit = 0;
+
+	napi_enable(&adapter->napi);
+
+	fjes_hw_capture_interrupt_status(hw);
+
+	result = fjes_request_irq(adapter);
+	if (result)
+		goto err_req_irq;
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
+
+	netif_tx_start_all_queues(netdev);
+	netif_carrier_on(netdev);
+
+	return 0;
+
+err_req_irq:
+	fjes_free_irq(adapter);
+	napi_disable(&adapter->napi);
+
+err_setup_res:
+	fjes_free_resources(adapter);
+	return result;
+}
+
+/* fjes_close - Disables a network interface */
+static int fjes_close(struct net_device *netdev)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+	int epidx;
+
+	netif_tx_stop_all_queues(netdev);
+	netif_carrier_off(netdev);
+
+	fjes_hw_raise_epstop(hw);
+
+	napi_disable(&adapter->napi);
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
+			~FJES_RX_POLL_WORK;
+	}
+
+	fjes_free_irq(adapter);
+
+	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+	cancel_work_sync(&adapter->unshare_watch_task);
+	adapter->unshare_watch_bitmask = 0;
+	cancel_work_sync(&adapter->raise_intr_rxdata_task);
+	cancel_work_sync(&adapter->tx_stall_task);
+
+	cancel_work_sync(&hw->update_zone_task);
+	cancel_work_sync(&hw->epstop_task);
+
+	fjes_hw_wait_epstop(hw);
+
+	fjes_free_resources(adapter);
+
+	return 0;
+}
+
+static int fjes_setup_resources(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ep_share_mem_info *buf_pair;
+	struct fjes_hw *hw = &adapter->hw;
+	int result;
+	int epidx;
+
+	mutex_lock(&hw->hw_info.lock);
+	result = fjes_hw_request_info(hw);
+	switch (result) {
+	case 0:
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			hw->ep_shm_info[epidx].es_status =
+			    hw->hw_info.res_buf->info.info[epidx].es_status;
+			hw->ep_shm_info[epidx].zone =
+			    hw->hw_info.res_buf->info.info[epidx].zone;
+		}
+		break;
+	default:
+	case -ENOMSG:
+	case -EBUSY:
+		adapter->force_reset = true;
+
+		mutex_unlock(&hw->hw_info.lock);
+		return result;
+	}
+	mutex_unlock(&hw->hw_info.lock);
+
+	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
+		if ((epidx != hw->my_epid) &&
+		    (hw->ep_shm_info[epidx].es_status ==
+		     FJES_ZONING_STATUS_ENABLE)) {
+			fjes_hw_raise_interrupt(hw, epidx,
+						REG_ICTL_MASK_INFO_UPDATE);
+		}
+	}
+
+	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
+
+	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		buf_pair = &hw->ep_shm_info[epidx];
+
+		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
+				    netdev->mtu);
+
+		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
+			mutex_lock(&hw->hw_info.lock);
+			result =
+			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
+			mutex_unlock(&hw->hw_info.lock);
+
+			switch (result) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				adapter->force_reset = true;
+				return result;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void fjes_free_resources(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct fjes_device_command_param param;
+	struct ep_share_mem_info *buf_pair;
+	struct fjes_hw *hw = &adapter->hw;
+	bool reset_flag = false;
+	int result;
+	int epidx;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		mutex_lock(&hw->hw_info.lock);
+		result = fjes_hw_unregister_buff_addr(hw, epidx);
+		mutex_unlock(&hw->hw_info.lock);
+
+		if (result)
+			reset_flag = true;
+
+		buf_pair = &hw->ep_shm_info[epidx];
+
+		fjes_hw_setup_epbuf(&buf_pair->tx,
+				    netdev->dev_addr, netdev->mtu);
+
+		clear_bit(epidx, &hw->txrx_stop_req_bit);
+	}
+
+	if (reset_flag || adapter->force_reset) {
+		result = fjes_hw_reset(hw);
+
+		adapter->force_reset = false;
+
+		if (result)
+			adapter->open_guard = true;
+
+		hw->hw_info.buffer_share_bit = 0;
+
+		memset((void *)&param, 0, sizeof(param));
+
+		param.req_len = hw->hw_info.req_buf_size;
+		param.req_start = __pa(hw->hw_info.req_buf);
+		param.res_len = hw->hw_info.res_buf_size;
+		param.res_start = __pa(hw->hw_info.res_buf);
+		param.share_start = __pa(hw->hw_info.share->ep_status);
+
+		fjes_hw_init_command_registers(hw, &param);
+	}
+}
+
+static void fjes_tx_stall_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(work,
+			struct fjes_adapter, tx_stall_task);
+	struct net_device *netdev = adapter->netdev;
+	struct fjes_hw *hw = &adapter->hw;
+	int all_queue_available, sendable;
+	enum ep_partner_status pstatus;
+	int max_epid, my_epid, epid;
+	union ep_buffer_info *info;
+	int i;
+
+	if (((long)jiffies -
+		(long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+		netif_wake_queue(netdev);
+		return;
+	}
+
+	my_epid = hw->my_epid;
+	max_epid = hw->max_epid;
+
+	for (i = 0; i < 5; i++) {
+		all_queue_available = 1;
+
+		for (epid = 0; epid < max_epid; epid++) {
+			if (my_epid == epid)
+				continue;
+
+			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+			sendable = (pstatus == EP_PARTNER_SHARED);
+			if (!sendable)
+				continue;
+
+			info = adapter->hw.ep_shm_info[epid].tx.info;
+
+			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
+					 info->v1i.count_max)) {
+				all_queue_available = 0;
+				break;
+			}
+		}
+
+		if (all_queue_available) {
+			netif_wake_queue(netdev);
+			return;
+		}
+	}
+
+	usleep_range(50, 100);
+
+	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
+}
+
+static void fjes_force_close_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(work,
+			struct fjes_adapter, force_close_task);
+	struct net_device *netdev = adapter->netdev;
+
+	rtnl_lock();
+	dev_close(netdev);
+	rtnl_unlock();
+}
+
+static void fjes_raise_intr_rxdata_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(work,
+			struct fjes_adapter, raise_intr_rxdata_task);
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status pstatus;
+	int max_epid, my_epid, epid;
+
+	my_epid = hw->my_epid;
+	max_epid = hw->max_epid;
+
+	for (epid = 0; epid < max_epid; epid++)
+		hw->ep_shm_info[epid].tx_status_work = 0;
+
+	for (epid = 0; epid < max_epid; epid++) {
+		if (epid == my_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+		if (pstatus == EP_PARTNER_SHARED) {
+			hw->ep_shm_info[epid].tx_status_work =
+				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
+
+			if (hw->ep_shm_info[epid].tx_status_work ==
+				FJES_TX_DELAY_SEND_PENDING) {
+				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
+					FJES_TX_DELAY_SEND_NONE;
+			}
+		}
+	}
+
+	for (epid = 0; epid < max_epid; epid++) {
+		if (epid == my_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+		if ((hw->ep_shm_info[epid].tx_status_work ==
+		     FJES_TX_DELAY_SEND_PENDING) &&
+		    (pstatus == EP_PARTNER_SHARED) &&
+		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+			fjes_hw_raise_interrupt(hw, epid,
+						REG_ICTL_MASK_RX_DATA);
+		}
+	}
+
+	usleep_range(500, 1000);
+}
+
+static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
+			void *data, size_t len)
+{
+	int retval;
+
+	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
+					   data, len);
+	if (retval)
+		return retval;
+
+	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
+		FJES_TX_DELAY_SEND_PENDING;
+	if (!work_pending(&adapter->raise_intr_rxdata_task))
+		queue_work(adapter->txrx_wq,
+			   &adapter->raise_intr_rxdata_task);
+
+	retval = 0;
+	return retval;
+}
+
+static netdev_tx_t
+fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+
+	int max_epid, my_epid, dest_epid;
+	enum ep_partner_status pstatus;
+	struct netdev_queue *cur_queue;
+	char shortpkt[VLAN_ETH_HLEN];
+	bool is_multi, vlan;
+	struct ethhdr *eth;
+	u16 queue_no = 0;
+	u16 vlan_id = 0;
+	netdev_tx_t ret;
+	char *data;
+	int len;
+
+	ret = NETDEV_TX_OK;
+	is_multi = false;
+	cur_queue = netdev_get_tx_queue(netdev, queue_no);
+
+	eth = (struct ethhdr *)skb->data;
+	my_epid = hw->my_epid;
+
+	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
+
+	data = skb->data;
+	len = skb->len;
+
+	if (is_multicast_ether_addr(eth->h_dest)) {
+		dest_epid = 0;
+		max_epid = hw->max_epid;
+		is_multi = true;
+	} else if (is_local_ether_addr(eth->h_dest)) {
+		dest_epid = eth->h_dest[ETH_ALEN - 1];
+		max_epid = dest_epid + 1;
+
+		if ((eth->h_dest[0] == 0x02) &&
+		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
+			      eth->h_dest[3] | eth->h_dest[4])) &&
+		    (dest_epid < hw->max_epid)) {
+			;
+		} else {
+			dest_epid = 0;
+			max_epid = 0;
+			ret = NETDEV_TX_OK;
+
+			adapter->stats64.tx_packets += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+			adapter->stats64.tx_bytes += len;
+			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+		}
+	} else {
+		dest_epid = 0;
+		max_epid = 0;
+		ret = NETDEV_TX_OK;
+
+		adapter->stats64.tx_packets += 1;
+		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+		adapter->stats64.tx_bytes += len;
+		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+	}
+
+	for (; dest_epid < max_epid; dest_epid++) {
+		if (my_epid == dest_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
+		if (pstatus != EP_PARTNER_SHARED) {
+			ret = NETDEV_TX_OK;
+		} else if (!fjes_hw_check_epbuf_version(
+				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
+			/* version is NOT 0 */
+			adapter->stats64.tx_carrier_errors += 1;
+			hw->ep_shm_info[my_epid].net_stats
+						.tx_carrier_errors += 1;
+
+			ret = NETDEV_TX_OK;
+		} else if (!fjes_hw_check_mtu(
+				&adapter->hw.ep_shm_info[dest_epid].rx,
+				netdev->mtu)) {
+			adapter->stats64.tx_dropped += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+			adapter->stats64.tx_errors += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+
+			ret = NETDEV_TX_OK;
+		} else if (vlan &&
+			   !fjes_hw_check_vlan_id(
+				&adapter->hw.ep_shm_info[dest_epid].rx,
+				vlan_id)) {
+			ret = NETDEV_TX_OK;
+		} else {
+			if (len < VLAN_ETH_HLEN) {
+				memset(shortpkt, 0, VLAN_ETH_HLEN);
+				memcpy(shortpkt, skb->data, skb->len);
+				len = VLAN_ETH_HLEN;
+				data = shortpkt;
+			}
+
+			if (adapter->tx_retry_count == 0) {
+				adapter->tx_start_jiffies = jiffies;
+				adapter->tx_retry_count = 1;
+			} else {
+				adapter->tx_retry_count++;
+			}
+
+			if (fjes_tx_send(adapter, dest_epid, data, len)) {
+				if (is_multi) {
+					ret = NETDEV_TX_OK;
+				} else if (
+					   ((long)jiffies -
+					    (long)adapter->tx_start_jiffies) >=
+					    FJES_TX_RETRY_TIMEOUT) {
+					adapter->stats64.tx_fifo_errors += 1;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_fifo_errors += 1;
+					adapter->stats64.tx_errors += 1;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_errors += 1;
+
+					ret = NETDEV_TX_OK;
+				} else {
+					netdev->trans_start = jiffies;
+					netif_tx_stop_queue(cur_queue);
+
+					if (!work_pending(&adapter->tx_stall_task))
+						queue_work(adapter->txrx_wq,
+							   &adapter->tx_stall_task);
+
+					ret = NETDEV_TX_BUSY;
+				}
+			} else {
+				if (!is_multi) {
+					adapter->stats64.tx_packets += 1;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_packets += 1;
+					adapter->stats64.tx_bytes += len;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_bytes += len;
+				}
+
+				adapter->tx_retry_count = 0;
+				ret = NETDEV_TX_OK;
+			}
+		}
+	}
+
+	if (ret == NETDEV_TX_OK) {
+		dev_kfree_skb(skb);
+		if (is_multi) {
+			adapter->stats64.tx_packets += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+			adapter->stats64.tx_bytes += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+		}
+	}
+
+	return ret;
+}
+
+static void fjes_tx_retry(struct net_device *netdev)
+{
+	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
+
+	netif_tx_wake_queue(queue);
+}
+
+static struct rtnl_link_stats64 *
+fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+
+	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
+
+	return stats;
+}
+
+static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	bool running = netif_running(netdev);
+	int ret = 0;
+	int idx;
+
+	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
+		if (new_mtu <= fjes_support_mtu[idx]) {
+			new_mtu = fjes_support_mtu[idx];
+			if (new_mtu == netdev->mtu)
+				return 0;
+
+			if (running)
+				fjes_close(netdev);
+
+			netdev->mtu = new_mtu;
+
+			if (running)
+				ret = fjes_open(netdev);
+
+			return ret;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int fjes_vlan_rx_add_vid(struct net_device *netdev,
+				__be16 proto, u16 vid)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	bool ret = true;
+	int epid;
+
+	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
+		if (epid == adapter->hw.my_epid)
+			continue;
+
+		if (!fjes_hw_check_vlan_id(
+			&adapter->hw.ep_shm_info[epid].tx, vid))
+			ret = fjes_hw_set_vlan_id(
+				&adapter->hw.ep_shm_info[epid].tx, vid);
+	}
+
+	return ret ? 0 : -ENOSPC;
+}
+
+static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
+				 __be16 proto, u16 vid)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	int epid;
+
+	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
+		if (epid == adapter->hw.my_epid)
+			continue;
+
+		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
+	}
+
+	return 0;
+}
+
+static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
+				   int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status status;
+
+	status = fjes_hw_get_partner_ep_status(hw, src_epid);
+	switch (status) {
+	case EP_PARTNER_UNSHARE:
+	case EP_PARTNER_COMPLETE:
+	default:
+		break;
+	case EP_PARTNER_WAITING:
+		if (src_epid < hw->my_epid) {
+			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+				FJES_RX_STOP_REQ_DONE;
+
+			clear_bit(src_epid, &hw->txrx_stop_req_bit);
+			set_bit(src_epid, &adapter->unshare_watch_bitmask);
+
+			if (!work_pending(&adapter->unshare_watch_task))
+				queue_work(adapter->control_wq,
+					   &adapter->unshare_watch_task);
+		}
+		break;
+	case EP_PARTNER_SHARED:
+		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
+		    FJES_RX_STOP_REQ_REQUEST) {
+			set_bit(src_epid, &hw->epstop_req_bit);
+			if (!work_pending(&hw->epstop_task))
+				queue_work(adapter->control_wq,
+					   &hw->epstop_task);
+		}
+		break;
+	}
+}
+
+static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status status;
+
+	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
+
+	status = fjes_hw_get_partner_ep_status(hw, src_epid);
+	switch (status) {
+	case EP_PARTNER_WAITING:
+		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+				FJES_RX_STOP_REQ_DONE;
+		clear_bit(src_epid, &hw->txrx_stop_req_bit);
+		/* fall through */
+	case EP_PARTNER_UNSHARE:
+	case EP_PARTNER_COMPLETE:
+	default:
+		set_bit(src_epid, &adapter->unshare_watch_bitmask);
+		if (!work_pending(&adapter->unshare_watch_task))
+			queue_work(adapter->control_wq,
+				   &adapter->unshare_watch_task);
+		break;
+	case EP_PARTNER_SHARED:
+		set_bit(src_epid, &hw->epstop_req_bit);
+
+		if (!work_pending(&hw->epstop_task))
+			queue_work(adapter->control_wq, &hw->epstop_task);
+		break;
+	}
+}
+
+static void fjes_update_zone_irq(struct fjes_adapter *adapter,
+				 int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+
+	if (!work_pending(&hw->update_zone_task))
+		queue_work(adapter->control_wq, &hw->update_zone_task);
+}
+
+static irqreturn_t fjes_intr(int irq, void *data)
+{
+	struct fjes_adapter *adapter = data;
+	struct fjes_hw *hw = &adapter->hw;
+	irqreturn_t ret;
+	u32 icr;
+
+	icr = fjes_hw_capture_interrupt_status(hw);
+
+	if (icr & REG_IS_MASK_IS_ASSERT) {
+		if (icr & REG_ICTL_MASK_RX_DATA)
+			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		if (icr & REG_ICTL_MASK_DEV_STOP_REQ)
+			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ)
+			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
+			fjes_hw_set_irqmask(hw,
+					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
+
+		if (icr & REG_ICTL_MASK_INFO_UPDATE)
+			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		ret = IRQ_HANDLED;
+	} else {
+		ret = IRQ_NONE;
+	}
+
+	return ret;
+}
+
+static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
+				     int start_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status pstatus;
+	int max_epid, cur_epid;
+	int i;
+
+	max_epid = hw->max_epid;
+	start_epid = (start_epid + 1 + max_epid) % max_epid;
+
+	for (i = 0; i < max_epid; i++) {
+		cur_epid = (start_epid + i) % max_epid;
+		if (cur_epid == hw->my_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
+		if (pstatus == EP_PARTNER_SHARED) {
+			if (!fjes_hw_epbuf_rx_is_empty(
+				&hw->ep_shm_info[cur_epid].rx))
+				return cur_epid;
+		}
+	}
+	return -1;
+}
+
+static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
+			      int *cur_epid)
+{
+	void *frame;
+
+	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
+	if (*cur_epid < 0)
+		return NULL;
+
+	frame =
+	fjes_hw_epbuf_rx_curpkt_get_addr(
+		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
+
+	return frame;
+}
+
+static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
+{
+	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
+}
+
+static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
+
+	adapter->unset_rx_last = true;
+	napi_schedule(&adapter->napi);
+}
+
+static int fjes_poll(struct napi_struct *napi, int budget)
+{
+	struct fjes_adapter *adapter =
+			container_of(napi, struct fjes_adapter, napi);
+	struct net_device *netdev = napi->dev;
+	struct fjes_hw *hw = &adapter->hw;
+	struct sk_buff *skb;
+	int work_done = 0;
+	int cur_epid = 0;
+	int epidx;
+	size_t frame_len;
+	void *frame;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
+			FJES_RX_POLL_WORK;
+	}
+
+	while (work_done < budget) {
+		prefetch(&adapter->hw);
+		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
+
+		if (frame) {
+			skb = napi_alloc_skb(napi, frame_len);
+			if (!skb) {
+				adapter->stats64.rx_dropped += 1;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_dropped += 1;
+				adapter->stats64.rx_errors += 1;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_errors += 1;
+			} else {
+				memcpy(skb_put(skb, frame_len),
+				       frame, frame_len);
+				skb->protocol = eth_type_trans(skb, netdev);
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+				netif_receive_skb(skb);
+
+				work_done++;
+
+				adapter->stats64.rx_packets += 1;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_packets += 1;
+				adapter->stats64.rx_bytes += frame_len;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_bytes += frame_len;
+
+				if (is_multicast_ether_addr(
+					((struct ethhdr *)frame)->h_dest)) {
+					adapter->stats64.multicast += 1;
+					hw->ep_shm_info[cur_epid].net_stats
+								 .multicast += 1;
+				}
+			}
+
+			fjes_rxframe_release(adapter, cur_epid);
+			adapter->unset_rx_last = true;
+		} else {
+			break;
+		}
+	}
+
+	if (work_done < budget) {
+		napi_complete(napi);
+
+		if (adapter->unset_rx_last) {
+			adapter->rx_last_jiffies = jiffies;
+			adapter->unset_rx_last = false;
+		}
+
+		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
+			napi_reschedule(napi);
+		} else {
+			for (epidx = 0; epidx < hw->max_epid; epidx++) {
+				if (epidx == hw->my_epid)
+					continue;
+				adapter->hw.ep_shm_info[epidx]
+					   .tx.info->v1i.rx_status &=
+						~FJES_RX_POLL_WORK;
+			}
+
+			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
+		}
+	}
+
+	return work_done;
+}
+
+/* fjes_probe - Device Initialization Routine */
+static int fjes_probe(struct platform_device *plat_dev)
+{
+	struct fjes_adapter *adapter;
+	struct net_device *netdev;
+	struct resource *res;
+	struct fjes_hw *hw;
+	int err;
+
+	err = -ENOMEM;
+	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
+				 NET_NAME_UNKNOWN, fjes_netdev_setup,
+				 FJES_MAX_QUEUES);
+
+	if (!netdev)
+		goto err_out;
+
+	SET_NETDEV_DEV(netdev, &plat_dev->dev);
+
+	dev_set_drvdata(&plat_dev->dev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->plat_dev = plat_dev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	/* setup the private structure */
+	err = fjes_sw_init(adapter);
+	if (err)
+		goto err_free_netdev;
+
+	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
+	adapter->force_reset = false;
+	adapter->open_guard = false;
+
+	adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx");
+	adapter->control_wq = create_workqueue(DRV_NAME "/control");
+
+	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
+	INIT_WORK(&adapter->raise_intr_rxdata_task,
+		  fjes_raise_intr_rxdata_task);
+	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
+	adapter->unshare_watch_bitmask = 0;
+
+	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
+	adapter->interrupt_watch_enable = false;
+
+	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+	hw->hw_res.start = res->start;
+	hw->hw_res.size = res->end - res->start + 1;
+	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+	err = fjes_hw_init(&adapter->hw);
+	if (err)
+		goto err_free_netdev;
+
+	/* setup MAC address (02:00:00:00:00:[epid])*/
+	netdev->dev_addr[0] = 2;
+	netdev->dev_addr[1] = 0;
+	netdev->dev_addr[2] = 0;
+	netdev->dev_addr[3] = 0;
+	netdev->dev_addr[4] = 0;
+	netdev->dev_addr[5] = hw->my_epid; /* EPID */
+
+	err = register_netdev(netdev);
+	if (err)
+		goto err_hw_exit;
+
+	netif_carrier_off(netdev);
+
+	return 0;
+
+err_hw_exit:
+	fjes_hw_exit(&adapter->hw);
+err_free_netdev:
+	free_netdev(netdev);
+err_out:
+	return err;
+}
+
+/* fjes_remove - Device Removal Routine */
+static int fjes_remove(struct platform_device *plat_dev)
+{
+	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+
+	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+	cancel_work_sync(&adapter->unshare_watch_task);
+	cancel_work_sync(&adapter->raise_intr_rxdata_task);
+	cancel_work_sync(&adapter->tx_stall_task);
+	if (adapter->control_wq)
+		destroy_workqueue(adapter->control_wq);
+	if (adapter->txrx_wq)
+		destroy_workqueue(adapter->txrx_wq);
+
+	unregister_netdev(netdev);
+
+	fjes_hw_exit(hw);
+
+	netif_napi_del(&adapter->napi);
+
+	free_netdev(netdev);
+
+	return 0;
+}
+
+static int fjes_sw_init(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
+
+	return 0;
+}
+
+/* fjes_netdev_setup - netdevice initialization routine */
+static void fjes_netdev_setup(struct net_device *netdev)
+{
+	ether_setup(netdev);
+
+	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
+	netdev->netdev_ops = &fjes_netdev_ops;
+	fjes_set_ethtool_ops(netdev);
+	netdev->mtu = fjes_support_mtu[0];
+	netdev->flags |= IFF_BROADCAST;
+	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+}
+
+static void fjes_irq_watch_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
+			struct fjes_adapter, interrupt_watch_task);
+
+	local_irq_disable();
+	fjes_intr(adapter->hw.hw_res.irq, adapter);
+	local_irq_enable();
+
+	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
+		napi_schedule(&adapter->napi);
+
+	if (adapter->interrupt_watch_enable) {
+		if (!delayed_work_pending(&adapter->interrupt_watch_task))
+			queue_delayed_work(adapter->control_wq,
+					   &adapter->interrupt_watch_task,
+					   FJES_IRQ_WATCH_DELAY);
+	}
+}
+
+static void fjes_watch_unshare_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter =
+	container_of(work, struct fjes_adapter, unshare_watch_task);
+
+	struct net_device *netdev = adapter->netdev;
+	struct fjes_hw *hw = &adapter->hw;
+
+	int unshare_watch, unshare_reserve;
+	int max_epid, my_epid, epidx;
+	int stop_req, stop_req_done;
+	ulong unshare_watch_bitmask;
+	int wait_time = 0;
+	int is_shared;
+	int ret;
+
+	my_epid = hw->my_epid;
+	max_epid = hw->max_epid;
+
+	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
+	adapter->unshare_watch_bitmask = 0;
+
+	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
+	       (wait_time < 3000)) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+
+			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
+							   epidx);
+
+			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
+
+			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
+					FJES_RX_STOP_REQ_DONE;
+
+			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
+
+			unshare_reserve = test_bit(epidx,
+						   &hw->hw_info.buffer_unshare_reserve_bit);
+
+			if ((!stop_req ||
+			     (is_shared && (!is_shared || !stop_req_done))) &&
+			    (is_shared || !unshare_watch || !unshare_reserve))
+				continue;
+
+			mutex_lock(&hw->hw_info.lock);
+			ret = fjes_hw_unregister_buff_addr(hw, epidx);
+			switch (ret) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				if (!work_pending(
+					&adapter->force_close_task)) {
+					adapter->force_reset = true;
+					schedule_work(
+						&adapter->force_close_task);
+				}
+				break;
+			}
+			mutex_unlock(&hw->hw_info.lock);
+
+			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+					    netdev->dev_addr, netdev->mtu);
+
+			clear_bit(epidx, &hw->txrx_stop_req_bit);
+			clear_bit(epidx, &unshare_watch_bitmask);
+			clear_bit(epidx,
+				  &hw->hw_info.buffer_unshare_reserve_bit);
+		}
+
+		msleep(100);
+		wait_time += 100;
+	}
+
+	if (hw->hw_info.buffer_unshare_reserve_bit) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+
+			if (test_bit(epidx,
+				     &hw->hw_info.buffer_unshare_reserve_bit)) {
+				mutex_lock(&hw->hw_info.lock);
+
+				ret = fjes_hw_unregister_buff_addr(hw, epidx);
+				switch (ret) {
+				case 0:
+					break;
+				case -ENOMSG:
+				case -EBUSY:
+				default:
+					if (!work_pending(
+						&adapter->force_close_task)) {
+						adapter->force_reset = true;
+						schedule_work(
+							&adapter->force_close_task);
+					}
+					break;
+				}
+				mutex_unlock(&hw->hw_info.lock);
+
+				fjes_hw_setup_epbuf(
+					&hw->ep_shm_info[epidx].tx,
+					netdev->dev_addr, netdev->mtu);
+
+				clear_bit(epidx, &hw->txrx_stop_req_bit);
+				clear_bit(epidx, &unshare_watch_bitmask);
+				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+			}
+
+			if (test_bit(epidx, &unshare_watch_bitmask)) {
+				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+						~FJES_RX_STOP_REQ_DONE;
+			}
+		}
+	}
+}
+
+/* fjes_init_module - Driver Registration Routine */
+static int __init fjes_init_module(void)
+{
+	int result;
+
+	pr_info("%s - version %s - %s\n",
+		fjes_driver_string, fjes_driver_version, fjes_copyright);
+
+	result = platform_driver_register(&fjes_driver);
+	if (result < 0)
+		return result;
+
+	result = acpi_bus_register_driver(&fjes_acpi_driver);
+	if (result < 0)
+		goto fail_acpi_driver;
+
+	return 0;
+
+fail_acpi_driver:
+	platform_driver_unregister(&fjes_driver);
+	return result;
+}
+
+module_init(fjes_init_module);
+
+/* fjes_exit_module - Driver Exit Cleanup Routine */
+static void __exit fjes_exit_module(void)
+{
+	acpi_bus_unregister_driver(&fjes_acpi_driver);
+	platform_driver_unregister(&fjes_driver);
+}
+
+module_exit(fjes_exit_module);
diff --git a/drivers/net/fjes/fjes_regs.h b/drivers/net/fjes/fjes_regs.h
new file mode 100644
index 0000000..029c924
--- /dev/null
+++ b/drivers/net/fjes/fjes_regs.h
@@ -0,0 +1,142 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_REGS_H_
+#define FJES_REGS_H_
+
+#include <linux/bitops.h>
+
+#define XSCT_DEVICE_REGISTER_SIZE 0x1000
+
+/* register offset */
+/* Information registers */
+#define XSCT_OWNER_EPID     0x0000  /* Owner EPID */
+#define XSCT_MAX_EP         0x0004  /* Maximum EP */
+
+/* Device Control registers */
+#define XSCT_DCTL           0x0010  /* Device Control */
+
+/* Command Control registers */
+#define XSCT_CR             0x0020  /* Command request */
+#define XSCT_CS             0x0024  /* Command status */
+#define XSCT_SHSTSAL        0x0028  /* Share status address Low */
+#define XSCT_SHSTSAH        0x002C  /* Share status address High */
+
+#define XSCT_REQBL          0x0034  /* Request Buffer length */
+#define XSCT_REQBAL         0x0038  /* Request Buffer Address Low */
+#define XSCT_REQBAH         0x003C  /* Request Buffer Address High */
+
+#define XSCT_RESPBL         0x0044  /* Response Buffer Length */
+#define XSCT_RESPBAL        0x0048  /* Response Buffer Address Low */
+#define XSCT_RESPBAH        0x004C  /* Response Buffer Address High */
+
+/* Interrupt Control registers */
+#define XSCT_IS             0x0080  /* Interrupt status */
+#define XSCT_IMS            0x0084  /* Interrupt mask set */
+#define XSCT_IMC            0x0088  /* Interrupt mask clear */
+#define XSCT_IG             0x008C  /* Interrupt generator */
+#define XSCT_ICTL           0x0090  /* Interrupt control */
+
+/* register structure */
+/* Information registers */
+union REG_OWNER_EPID {
+	struct {
+		__le32 epid:16;
+		__le32:16;
+	} bits;
+	__le32 reg;
+};
+
+union REG_MAX_EP {
+	struct {
+		__le32 maxep:16;
+		__le32:16;
+	} bits;
+	__le32 reg;
+};
+
+/* Device Control registers */
+union REG_DCTL {
+	struct {
+		__le32 reset:1;
+		__le32 rsv0:15;
+		__le32 rsv1:16;
+	} bits;
+	__le32 reg;
+};
+
+/* Command Control registers */
+union REG_CR {
+	struct {
+		__le32 req_code:16;
+		__le32 err_info:14;
+		__le32 error:1;
+		__le32 req_start:1;
+	} bits;
+	__le32 reg;
+};
+
+union REG_CS {
+	struct {
+		__le32 req_code:16;
+		__le32 rsv0:14;
+		__le32 busy:1;
+		__le32 complete:1;
+	} bits;
+	__le32 reg;
+};
+
+/* Interrupt Control registers */
+union REG_ICTL {
+	struct {
+		__le32 automak:1;
+		__le32 rsv0:31;
+	} bits;
+	__le32 reg;
+};
+
+enum REG_ICTL_MASK {
+	REG_ICTL_MASK_INFO_UPDATE     = 1 << 20,
+	REG_ICTL_MASK_DEV_STOP_REQ    = 1 << 19,
+	REG_ICTL_MASK_TXRX_STOP_REQ   = 1 << 18,
+	REG_ICTL_MASK_TXRX_STOP_DONE  = 1 << 17,
+	REG_ICTL_MASK_RX_DATA         = 1 << 16,
+	REG_ICTL_MASK_ALL             = GENMASK(20, 16),
+};
+
+enum REG_IS_MASK {
+	REG_IS_MASK_IS_ASSERT	= 1 << 31,
+	REG_IS_MASK_EPID	= GENMASK(15, 0),
+};
+
+struct fjes_hw;
+
+u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg);
+
+#define wr32(reg, val) \
+do { \
+	u8 *base = hw->base; \
+	writel((val), &base[(reg)]); \
+} while (0)
+
+#define rd32(reg) (fjes_hw_rd32(hw, reg))
+
+#endif /* FJES_REGS_H_ */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 78d49d1..da3259c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -15,8 +15,11 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/hash.h>
+#include <net/dst_metadata.h>
+#include <net/gro_cells.h>
 #include <net/rtnetlink.h>
 #include <net/geneve.h>
+#include <net/protocol.h>
 
 #define GENEVE_NETDEV_VER	"0.6"
 
@@ -32,12 +35,17 @@
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
+#define GENEVE_VER 0
+#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
+
 /* per-network namespace private data for this module */
 struct geneve_net {
-	struct list_head  geneve_list;
-	struct hlist_head vni_list[VNI_HASH_SIZE];
+	struct list_head	geneve_list;
+	struct list_head	sock_list;
 };
 
+static int geneve_net_id;
+
 /* Pseudo network device */
 struct geneve_dev {
 	struct hlist_node  hlist;	/* vni hash table */
@@ -49,9 +57,20 @@
 	u8                 tos;		/* TOS override */
 	struct sockaddr_in remote;	/* IPv4 address for link partner */
 	struct list_head   next;	/* geneve's per namespace list */
+	__be16		   dst_port;
+	bool		   collect_md;
+	struct gro_cells   gro_cells;
 };
 
-static int geneve_net_id;
+struct geneve_sock {
+	bool			collect_md;
+	struct list_head	list;
+	struct socket		*sock;
+	struct rcu_head		rcu;
+	int			refcnt;
+	struct udp_offload	udp_offloads;
+	struct hlist_head	vni_list[VNI_HASH_SIZE];
+};
 
 static inline __u32 geneve_net_vni_hash(u8 vni[3])
 {
@@ -61,46 +80,98 @@
 	return hash_32(vnid, VNI_HASH_BITS);
 }
 
+static __be64 vni_to_tunnel_id(const __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+	return (vni[0] << 16) | (vni[1] << 8) | vni[2];
+#else
+	return (__force __be64)(((__force u64)vni[0] << 40) |
+				((__force u64)vni[1] << 48) |
+				((__force u64)vni[2] << 56));
+#endif
+}
+
+static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
+					__be32 addr, u8 vni[])
+{
+	struct hlist_head *vni_list_head;
+	struct geneve_dev *geneve;
+	__u32 hash;
+
+	/* Find the device for this VNI */
+	hash = geneve_net_vni_hash(vni);
+	vni_list_head = &gs->vni_list[hash];
+	hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
+		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+		    addr == geneve->remote.sin_addr.s_addr)
+			return geneve;
+	}
+	return NULL;
+}
+
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+	return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
 /* geneve receive/decap routine */
 static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
 {
 	struct genevehdr *gnvh = geneve_hdr(skb);
-	struct geneve_dev *dummy, *geneve = NULL;
-	struct geneve_net *gn;
-	struct iphdr *iph = NULL;
+	struct metadata_dst *tun_dst = NULL;
+	struct geneve_dev *geneve = NULL;
 	struct pcpu_sw_netstats *stats;
-	struct hlist_head *vni_list_head;
-	int err = 0;
-	__u32 hash;
+	struct iphdr *iph;
+	u8 *vni;
+	__be32 addr;
+	int err;
 
-	iph = ip_hdr(skb); /* Still outer IP header... */
+	if (gs->collect_md) {
+		static u8 zero_vni[3];
 
-	gn = gs->rcv_data;
-
-	/* Find the device for this VNI */
-	hash = geneve_net_vni_hash(gnvh->vni);
-	vni_list_head = &gn->vni_list[hash];
-	hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
-		if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
-		    iph->saddr == dummy->remote.sin_addr.s_addr) {
-			geneve = dummy;
-			break;
-		}
+		vni = zero_vni;
+		addr = 0;
+	} else {
+		vni = gnvh->vni;
+		iph = ip_hdr(skb); /* Still outer IP header... */
+		addr = iph->saddr;
 	}
+
+	geneve = geneve_lookup(gs, addr, vni);
 	if (!geneve)
 		goto drop;
 
-	/* Drop packets w/ critical options,
-	 * since we don't support any...
-	 */
-	if (gnvh->critical)
-		goto drop;
+	if (ip_tunnel_collect_metadata() || gs->collect_md) {
+		__be16 flags;
+
+		flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
+			(gnvh->oam ? TUNNEL_OAM : 0) |
+			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
+
+		tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
+					 vni_to_tunnel_id(gnvh->vni),
+					 gnvh->opt_len * 4);
+		if (!tun_dst)
+			goto drop;
+		/* Update tunnel dst according to Geneve options. */
+		ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+					gnvh->options, gnvh->opt_len * 4);
+	} else {
+		/* Drop packets w/ critical options,
+		 * since we don't support any...
+		 */
+		if (gnvh->critical)
+			goto drop;
+	}
 
 	skb_reset_mac_header(skb);
 	skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
 	skb->protocol = eth_type_trans(skb, geneve->dev);
 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
+	if (tun_dst)
+		skb_dst_set(skb, &tun_dst->dst);
+
 	/* Ignore packet loops (and multicast echo) */
 	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
 		goto drop;
@@ -127,8 +198,7 @@
 	stats->rx_bytes += skb->len;
 	u64_stats_update_end(&stats->syncp);
 
-	netif_rx(skb);
-
+	gro_cells_receive(&geneve->gro_cells, skb);
 	return;
 drop:
 	/* Consume bad packet */
@@ -138,32 +208,305 @@
 /* Setup stats when device is created */
 static int geneve_init(struct net_device *dev)
 {
+	struct geneve_dev *geneve = netdev_priv(dev);
+	int err;
+
 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	err = gro_cells_init(&geneve->gro_cells, dev);
+	if (err) {
+		free_percpu(dev->tstats);
+		return err;
+	}
+
 	return 0;
 }
 
 static void geneve_uninit(struct net_device *dev)
 {
+	struct geneve_dev *geneve = netdev_priv(dev);
+
+	gro_cells_destroy(&geneve->gro_cells);
 	free_percpu(dev->tstats);
 }
 
+/* Callback from net/ipv4/udp.c to receive packets */
+static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+	struct genevehdr *geneveh;
+	struct geneve_sock *gs;
+	int opts_len;
+
+	/* Need Geneve and inner Ethernet header to be present */
+	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
+		goto error;
+
+	/* Return packets with reserved bits set */
+	geneveh = geneve_hdr(skb);
+	if (unlikely(geneveh->ver != GENEVE_VER))
+		goto error;
+
+	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+		goto error;
+
+	opts_len = geneveh->opt_len * 4;
+	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+				 htons(ETH_P_TEB)))
+		goto drop;
+
+	gs = rcu_dereference_sk_user_data(sk);
+	if (!gs)
+		goto drop;
+
+	geneve_rx(gs, skb);
+	return 0;
+
+drop:
+	/* Consume bad packet */
+	kfree_skb(skb);
+	return 0;
+
+error:
+	/* Let the UDP layer deal with the skb */
+	return 1;
+}
+
+static struct socket *geneve_create_sock(struct net *net, bool ipv6,
+					 __be16 port)
+{
+	struct socket *sock;
+	struct udp_port_cfg udp_conf;
+	int err;
+
+	memset(&udp_conf, 0, sizeof(udp_conf));
+
+	if (ipv6) {
+		udp_conf.family = AF_INET6;
+	} else {
+		udp_conf.family = AF_INET;
+		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+	}
+
+	udp_conf.local_udp_port = port;
+
+	/* Open UDP socket */
+	err = udp_sock_create(net, &udp_conf, &sock);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	return sock;
+}
+
+static void geneve_notify_add_rx_port(struct geneve_sock *gs)
+{
+	struct sock *sk = gs->sock->sk;
+	sa_family_t sa_family = sk->sk_family;
+	int err;
+
+	if (sa_family == AF_INET) {
+		err = udp_add_offload(&gs->udp_offloads);
+		if (err)
+			pr_warn("geneve: udp_add_offload failed with status %d\n",
+				err);
+	}
+}
+
+static int geneve_hlen(struct genevehdr *gh)
+{
+	return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+					   struct sk_buff *skb,
+					   struct udp_offload *uoff)
+{
+	struct sk_buff *p, **pp = NULL;
+	struct genevehdr *gh, *gh2;
+	unsigned int hlen, gh_len, off_gnv;
+	const struct packet_offload *ptype;
+	__be16 type;
+	int flush = 1;
+
+	off_gnv = skb_gro_offset(skb);
+	hlen = off_gnv + sizeof(*gh);
+	gh = skb_gro_header_fast(skb, off_gnv);
+	if (skb_gro_header_hard(skb, hlen)) {
+		gh = skb_gro_header_slow(skb, hlen, off_gnv);
+		if (unlikely(!gh))
+			goto out;
+	}
+
+	if (gh->ver != GENEVE_VER || gh->oam)
+		goto out;
+	gh_len = geneve_hlen(gh);
+
+	hlen = off_gnv + gh_len;
+	if (skb_gro_header_hard(skb, hlen)) {
+		gh = skb_gro_header_slow(skb, hlen, off_gnv);
+		if (unlikely(!gh))
+			goto out;
+	}
+
+	flush = 0;
+
+	for (p = *head; p; p = p->next) {
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		gh2 = (struct genevehdr *)(p->data + off_gnv);
+		if (gh->opt_len != gh2->opt_len ||
+		    memcmp(gh, gh2, gh_len)) {
+			NAPI_GRO_CB(p)->same_flow = 0;
+			continue;
+		}
+	}
+
+	type = gh->proto_type;
+
+	rcu_read_lock();
+	ptype = gro_find_receive_by_type(type);
+	if (!ptype) {
+		flush = 1;
+		goto out_unlock;
+	}
+
+	skb_gro_pull(skb, gh_len);
+	skb_gro_postpull_rcsum(skb, gh, gh_len);
+	pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+			       struct udp_offload *uoff)
+{
+	struct genevehdr *gh;
+	struct packet_offload *ptype;
+	__be16 type;
+	int gh_len;
+	int err = -ENOSYS;
+
+	udp_tunnel_gro_complete(skb, nhoff);
+
+	gh = (struct genevehdr *)(skb->data + nhoff);
+	gh_len = geneve_hlen(gh);
+	type = gh->proto_type;
+
+	rcu_read_lock();
+	ptype = gro_find_complete_by_type(type);
+	if (ptype)
+		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+	rcu_read_unlock();
+	return err;
+}
+
+/* Create new listen socket if needed */
+static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
+						bool ipv6)
+{
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
+	struct geneve_sock *gs;
+	struct socket *sock;
+	struct udp_tunnel_sock_cfg tunnel_cfg;
+	int h;
+
+	gs = kzalloc(sizeof(*gs), GFP_KERNEL);
+	if (!gs)
+		return ERR_PTR(-ENOMEM);
+
+	sock = geneve_create_sock(net, ipv6, port);
+	if (IS_ERR(sock)) {
+		kfree(gs);
+		return ERR_CAST(sock);
+	}
+
+	gs->sock = sock;
+	gs->refcnt = 1;
+	for (h = 0; h < VNI_HASH_SIZE; ++h)
+		INIT_HLIST_HEAD(&gs->vni_list[h]);
+
+	/* Initialize the geneve udp offloads structure */
+	gs->udp_offloads.port = port;
+	gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
+	gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
+	geneve_notify_add_rx_port(gs);
+
+	/* Mark socket as an encapsulation socket */
+	tunnel_cfg.sk_user_data = gs;
+	tunnel_cfg.encap_type = 1;
+	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+	tunnel_cfg.encap_destroy = NULL;
+	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+	list_add(&gs->list, &gn->sock_list);
+	return gs;
+}
+
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+	struct sock *sk = gs->sock->sk;
+	sa_family_t sa_family = sk->sk_family;
+
+	if (sa_family == AF_INET)
+		udp_del_offload(&gs->udp_offloads);
+}
+
+static void geneve_sock_release(struct geneve_sock *gs)
+{
+	if (--gs->refcnt)
+		return;
+
+	list_del(&gs->list);
+	geneve_notify_del_rx_port(gs);
+	udp_tunnel_sock_release(gs->sock);
+	kfree_rcu(gs, rcu);
+}
+
+static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
+					    __be16 dst_port)
+{
+	struct geneve_sock *gs;
+
+	list_for_each_entry(gs, &gn->sock_list, list) {
+		if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
+		    inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
+			return gs;
+		}
+	}
+	return NULL;
+}
+
 static int geneve_open(struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct net *net = geneve->net;
-	struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_sock *gs;
+	__u32 hash;
 
-	gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
-	                     false, false);
+	gs = geneve_find_sock(gn, geneve->dst_port);
+	if (gs) {
+		gs->refcnt++;
+		goto out;
+	}
+
+	gs = geneve_socket_create(net, geneve->dst_port, false);
 	if (IS_ERR(gs))
 		return PTR_ERR(gs);
 
+out:
+	gs->collect_md = geneve->collect_md;
 	geneve->sock = gs;
 
+	hash = geneve_net_vni_hash(geneve->vni);
+	hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
 	return 0;
 }
 
@@ -172,74 +515,191 @@
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct geneve_sock *gs = geneve->sock;
 
+	if (!hlist_unhashed(&geneve->hlist))
+		hlist_del_rcu(&geneve->hlist);
 	geneve_sock_release(gs);
-
 	return 0;
 }
 
+static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
+			    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+			    bool csum)
+{
+	struct genevehdr *gnvh;
+	int min_headroom;
+	int err;
+
+	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
+	err = skb_cow_head(skb, min_headroom);
+	if (unlikely(err)) {
+		kfree_skb(skb);
+		goto free_rt;
+	}
+
+	skb = udp_tunnel_handle_offloads(skb, csum);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		goto free_rt;
+	}
+
+	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+	gnvh->ver = GENEVE_VER;
+	gnvh->opt_len = opt_len / 4;
+	gnvh->oam = !!(tun_flags & TUNNEL_OAM);
+	gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+	gnvh->rsvd1 = 0;
+	memcpy(gnvh->vni, vni, 3);
+	gnvh->proto_type = htons(ETH_P_TEB);
+	gnvh->rsvd2 = 0;
+	memcpy(gnvh->options, opt, opt_len);
+
+	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+	return 0;
+
+free_rt:
+	ip_rt_put(rt);
+	return err;
+}
+
+static struct rtable *geneve_get_rt(struct sk_buff *skb,
+				    struct net_device *dev,
+				    struct flowi4 *fl4,
+				    struct ip_tunnel_info *info)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct rtable *rt = NULL;
+	__u8 tos;
+
+	memset(fl4, 0, sizeof(*fl4));
+	fl4->flowi4_mark = skb->mark;
+	fl4->flowi4_proto = IPPROTO_UDP;
+
+	if (info) {
+		fl4->daddr = info->key.u.ipv4.dst;
+		fl4->saddr = info->key.u.ipv4.src;
+		fl4->flowi4_tos = RT_TOS(info->key.tos);
+	} else {
+		tos = geneve->tos;
+		if (tos == 1) {
+			const struct iphdr *iip = ip_hdr(skb);
+
+			tos = ip_tunnel_get_dsfield(iip, skb);
+		}
+
+		fl4->flowi4_tos = RT_TOS(tos);
+		fl4->daddr = geneve->remote.sin_addr.s_addr;
+	}
+
+	rt = ip_route_output_key(geneve->net, fl4);
+	if (IS_ERR(rt)) {
+		netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
+		dev->stats.tx_carrier_errors++;
+		return rt;
+	}
+	if (rt->dst.dev == dev) { /* is this necessary? */
+		netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
+		dev->stats.collisions++;
+		ip_rt_put(rt);
+		return ERR_PTR(-EINVAL);
+	}
+	return rt;
+}
+
+/* Convert 64 bit tunnel ID to 24 bit VNI. */
+static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+	vni[0] = (__force __u8)(tun_id >> 16);
+	vni[1] = (__force __u8)(tun_id >> 8);
+	vni[2] = (__force __u8)tun_id;
+#else
+	vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+	vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+	vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct geneve_sock *gs = geneve->sock;
+	struct ip_tunnel_info *info = NULL;
 	struct rtable *rt = NULL;
-	const struct iphdr *iip; /* interior IP header */
 	struct flowi4 fl4;
-	int err;
-	__be16 sport;
 	__u8 tos, ttl;
+	__be16 sport;
+	bool udp_csum;
+	__be16 df;
+	int err;
 
-	iip = ip_hdr(skb);
+	if (geneve->collect_md) {
+		info = skb_tunnel_info(skb);
+		if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
+			netdev_dbg(dev, "no tunnel metadata\n");
+			goto tx_error;
+		}
+		if (info && ip_tunnel_info_af(info) != AF_INET)
+			goto tx_error;
+	}
 
-	skb_reset_mac_header(skb);
-
-	/* TODO: port min/max limits should be configurable */
-	sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
-
-	tos = geneve->tos;
-	if (tos == 1)
-		tos = ip_tunnel_get_dsfield(iip, skb);
-
-	memset(&fl4, 0, sizeof(fl4));
-	fl4.flowi4_tos = RT_TOS(tos);
-	fl4.daddr = geneve->remote.sin_addr.s_addr;
-	rt = ip_route_output_key(geneve->net, &fl4);
+	rt = geneve_get_rt(skb, dev, &fl4, info);
 	if (IS_ERR(rt)) {
 		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
 		dev->stats.tx_carrier_errors++;
 		goto tx_error;
 	}
-	if (rt->dst.dev == dev) { /* is this necessary? */
-		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
-		dev->stats.collisions++;
-		goto rt_tx_error;
+
+	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+	skb_reset_mac_header(skb);
+
+	if (info) {
+		const struct ip_tunnel_key *key = &info->key;
+		u8 *opts = NULL;
+		u8 vni[3];
+
+		tunnel_id_to_vni(key->tun_id, vni);
+		if (key->tun_flags & TUNNEL_GENEVE_OPT)
+			opts = ip_tunnel_info_opts(info);
+
+		udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+		err = geneve_build_skb(rt, skb, key->tun_flags, vni,
+				       info->options_len, opts, udp_csum);
+		if (unlikely(err))
+			goto err;
+
+		tos = key->tos;
+		ttl = key->ttl;
+		df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+	} else {
+		const struct iphdr *iip; /* interior IP header */
+
+		udp_csum = false;
+		err = geneve_build_skb(rt, skb, 0, geneve->vni,
+				       0, NULL, udp_csum);
+		if (unlikely(err))
+			goto err;
+
+		iip = ip_hdr(skb);
+		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
+		ttl = geneve->ttl;
+		if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+			ttl = 1;
+		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+		df = 0;
 	}
-
-	tos = ip_tunnel_ecn_encap(tos, iip, skb);
-
-	ttl = geneve->ttl;
-	if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
-		ttl = 1;
-
-	ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-
-	/* no need to handle local destination and encap bypass...yet... */
-
-	err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
-	                      tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
-	                      geneve->vni, 0, NULL, false,
-	                      !net_eq(geneve->net, dev_net(geneve->dev)));
-	if (err < 0)
-		ip_rt_put(rt);
+	err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
+				  tos, ttl, df, sport, geneve->dst_port,
+				  !net_eq(geneve->net, dev_net(geneve->dev)),
+				  !udp_csum);
 
 	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
-
 	return NETDEV_TX_OK;
 
-rt_tx_error:
-	ip_rt_put(rt);
 tx_error:
-	dev->stats.tx_errors++;
 	dev_kfree_skb(skb);
+err:
+	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 
@@ -283,7 +743,6 @@
 
 	SET_NETDEV_DEVTYPE(dev, &geneve_type);
 
-	dev->tx_queue_len = 0;
 	dev->features    |= NETIF_F_LLTX;
 	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
 	dev->features    |= NETIF_F_RXCSUM;
@@ -297,7 +756,8 @@
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 
 	netif_keep_dst(dev);
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+	eth_hw_addr_random(dev);
 }
 
 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
@@ -305,6 +765,8 @@
 	[IFLA_GENEVE_REMOTE]		= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
 	[IFLA_GENEVE_TTL]		= { .type = NLA_U8 },
 	[IFLA_GENEVE_TOS]		= { .type = NLA_U8 },
+	[IFLA_GENEVE_PORT]		= { .type = NLA_U16 },
+	[IFLA_GENEVE_COLLECT_METADATA]	= { .type = NLA_FLAG },
 };
 
 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -330,68 +792,117 @@
 	return 0;
 }
 
-static int geneve_newlink(struct net *net, struct net_device *dev,
-			 struct nlattr *tb[], struct nlattr *data[])
+static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
+					  __be16 dst_port,
+					  __be32 rem_addr,
+					  u8 vni[],
+					  bool *tun_on_same_port,
+					  bool *tun_collect_md)
+{
+	struct geneve_dev *geneve, *t;
+
+	*tun_on_same_port = false;
+	*tun_collect_md = false;
+	t = NULL;
+	list_for_each_entry(geneve, &gn->geneve_list, next) {
+		if (geneve->dst_port == dst_port) {
+			*tun_collect_md = geneve->collect_md;
+			*tun_on_same_port = true;
+		}
+		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+		    rem_addr == geneve->remote.sin_addr.s_addr &&
+		    dst_port == geneve->dst_port)
+			t = geneve;
+	}
+	return t;
+}
+
+static int geneve_configure(struct net *net, struct net_device *dev,
+			    __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
+			    __u16 dst_port, bool metadata)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	struct geneve_dev *dummy, *geneve = netdev_priv(dev);
-	struct hlist_head *vni_list_head;
-	struct sockaddr_in remote;	/* IPv4 address for link partner */
-	__u32 vni, hash;
+	struct geneve_dev *t, *geneve = netdev_priv(dev);
+	bool tun_collect_md, tun_on_same_port;
 	int err;
 
-	if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
-		return -EINVAL;
+	if (metadata) {
+		if (rem_addr || vni || tos || ttl)
+			return -EINVAL;
+	}
 
 	geneve->net = net;
 	geneve->dev = dev;
 
-	vni = nla_get_u32(data[IFLA_GENEVE_ID]);
 	geneve->vni[0] = (vni & 0x00ff0000) >> 16;
 	geneve->vni[1] = (vni & 0x0000ff00) >> 8;
 	geneve->vni[2] =  vni & 0x000000ff;
 
-	geneve->remote.sin_addr.s_addr =
-		nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+	geneve->remote.sin_addr.s_addr = rem_addr;
 	if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
 		return -EINVAL;
 
-	remote = geneve->remote;
-	hash = geneve_net_vni_hash(geneve->vni);
-	vni_list_head = &gn->vni_list[hash];
-	hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
-		if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
-		    !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
-			return -EBUSY;
-	}
+	geneve->ttl = ttl;
+	geneve->tos = tos;
+	geneve->dst_port = htons(dst_port);
+	geneve->collect_md = metadata;
 
-	if (tb[IFLA_ADDRESS] == NULL)
-		eth_hw_addr_random(dev);
+	t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni,
+			    &tun_on_same_port, &tun_collect_md);
+	if (t)
+		return -EBUSY;
+
+	if (metadata) {
+		if (tun_on_same_port)
+			return -EPERM;
+	} else {
+		if (tun_collect_md)
+			return -EPERM;
+	}
 
 	err = register_netdevice(dev);
 	if (err)
 		return err;
 
+	list_add(&geneve->next, &gn->geneve_list);
+	return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+			  struct nlattr *tb[], struct nlattr *data[])
+{
+	__u16 dst_port = GENEVE_UDP_PORT;
+	__u8 ttl = 0, tos = 0;
+	bool metadata = false;
+	__be32 rem_addr;
+	__u32 vni;
+
+	if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+		return -EINVAL;
+
+	vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+	rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+
 	if (data[IFLA_GENEVE_TTL])
-		geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+		ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
 
 	if (data[IFLA_GENEVE_TOS])
-		geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+		tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
 
-	list_add(&geneve->next, &gn->geneve_list);
+	if (data[IFLA_GENEVE_PORT])
+		dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]);
 
-	hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+	if (data[IFLA_GENEVE_COLLECT_METADATA])
+		metadata = true;
 
-	return 0;
+	return geneve_configure(net, dev, rem_addr, vni,
+				ttl, tos, dst_port, metadata);
 }
 
 static void geneve_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 
-	if (!hlist_unhashed(&geneve->hlist))
-		hlist_del_rcu(&geneve->hlist);
-
 	list_del(&geneve->next);
 	unregister_netdevice_queue(dev, head);
 }
@@ -402,6 +913,8 @@
 		nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
+		nla_total_size(sizeof(__u16)) +  /* IFLA_GENEVE_PORT */
+		nla_total_size(0) +	 /* IFLA_GENEVE_COLLECT_METADATA */
 		0;
 }
 
@@ -422,6 +935,14 @@
 	    nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
 		goto nla_put_failure;
 
+	if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port)))
+		goto nla_put_failure;
+
+	if (geneve->collect_md) {
+		if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
+			goto nla_put_failure;
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -441,16 +962,34 @@
 	.fill_info	= geneve_fill_info,
 };
 
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+					u8 name_assign_type, u16 dst_port)
+{
+	struct nlattr *tb[IFLA_MAX + 1];
+	struct net_device *dev;
+	int err;
+
+	memset(tb, 0, sizeof(tb));
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &geneve_link_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true);
+	if (err) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
+	return dev;
+}
+EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
+
 static __net_init int geneve_init_net(struct net *net)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	unsigned int h;
 
 	INIT_LIST_HEAD(&gn->geneve_list);
-
-	for (h = 0; h < VNI_HASH_SIZE; ++h)
-		INIT_HLIST_HEAD(&gn->vni_list[h]);
-
+	INIT_LIST_HEAD(&gn->sock_list);
 	return 0;
 }
 
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dd45440..5fa98f5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -162,6 +162,7 @@
 	bool link_state;	/* 0 - link up, 1 - link down */
 	int  ring_size;
 	u32  max_num_vrss_chns;
+	u32  num_chn;
 };
 
 enum rndis_device_state {
@@ -541,6 +542,29 @@
 	struct nvsp_2_free_rxbuf free_rxbuf;
 } __packed;
 
+struct nvsp_4_send_vf_association {
+	/* 1: allocated, serial number is valid. 0: not allocated */
+	u32 allocated;
+
+	/* Serial number of the VF to team with */
+	u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+	NVSP_DATAPATH_SYNTHETIC = 0,
+	NVSP_DATAPATH_VF,
+	NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+	u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+	struct nvsp_4_send_vf_association vf_assoc;
+	struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
 enum nvsp_subchannel_operation {
 	NVSP_SUBCHANNEL_NONE = 0,
 	NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +602,7 @@
 	union nvsp_message_init_uber init_msg;
 	union nvsp_1_message_uber v1_msg;
 	union nvsp_2_message_uber v2_msg;
+	union nvsp_4_message_uber v4_msg;
 	union nvsp_5_message_uber v5_msg;
 } __packed;
 
@@ -589,6 +614,7 @@
 
 
 #define NETVSC_MTU 65536
+#define NETVSC_MTU_MIN 68
 
 #define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16)	/* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024*1024*15)  /* 15MB */
@@ -670,6 +696,8 @@
 	u32 send_table[VRSS_SEND_TAB_SIZE];
 	u32 max_chn;
 	u32 num_chn;
+	spinlock_t sc_lock; /* Protects num_sc_offered variable */
+	u32 num_sc_offered;
 	atomic_t queue_sends[NR_CPUS];
 
 	/* Holds rndis device info */
@@ -688,6 +716,11 @@
 
 	/* The net device context */
 	struct net_device_context *nd_ctx;
+
+	/* 1: allocated, serial number is valid. 0: not allocated */
+	u32 vf_alloc;
+	/* Serial number of the VF to team with */
+	u32 vf_serial;
 };
 
 /* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 23126a7..51e4c0f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -453,13 +453,16 @@
 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
 		return 0;
 
-	/* NVSPv2 only: Send NDIS config */
+	/* NVSPv2 or later: Send NDIS config */
 	memset(init_packet, 0, sizeof(struct nvsp_message));
 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
 	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
 						       ETH_HLEN;
 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
+	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
 	ret = vmbus_sendpacket(device->channel, init_packet,
 				sizeof(struct nvsp_message),
 				(unsigned long)init_packet,
@@ -1064,11 +1067,10 @@
 
 
 static void netvsc_send_table(struct hv_device *hdev,
-			      struct vmpacket_descriptor *vmpkt)
+			      struct nvsp_message *nvmsg)
 {
 	struct netvsc_device *nvscdev;
 	struct net_device *ndev;
-	struct nvsp_message *nvmsg;
 	int i;
 	u32 count, *tab;
 
@@ -1077,12 +1079,6 @@
 		return;
 	ndev = nvscdev->ndev;
 
-	nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
-					(vmpkt->offset8 << 3));
-
-	if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
-		return;
-
 	count = nvmsg->msg.v5_msg.send_table.count;
 	if (count != VRSS_SEND_TAB_SIZE) {
 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@
 		nvscdev->send_table[i] = tab[i];
 }
 
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+			   struct nvsp_message *nvmsg)
+{
+	nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+	nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+					 struct netvsc_device *nvdev,
+					 struct nvsp_message *nvmsg)
+{
+	switch (nvmsg->hdr.msg_type) {
+	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+		netvsc_send_table(hdev, nvmsg);
+		break;
+
+	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+		netvsc_send_vf(nvdev, nvmsg);
+		break;
+	}
+}
+
 void netvsc_channel_cb(void *context)
 {
 	int ret;
@@ -1108,6 +1126,7 @@
 	unsigned char *buffer;
 	int bufferlen = NETVSC_PACKET_SIZE;
 	struct net_device *ndev;
+	struct nvsp_message *nvmsg;
 
 	if (channel->primary_channel != NULL)
 		device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@
 		if (ret == 0) {
 			if (bytes_recvd > 0) {
 				desc = (struct vmpacket_descriptor *)buffer;
+				nvmsg = (struct nvsp_message *)((unsigned long)
+					 desc + (desc->offset8 << 3));
 				switch (desc->type) {
 				case VM_PKT_COMP:
 					netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@
 					break;
 
 				case VM_PKT_DATA_INBAND:
-					netvsc_send_table(device, desc);
+					netvsc_receive_inband(device,
+							      net_device,
+							      nvmsg);
 					break;
 
 				default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 358475e..409b48e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -106,7 +106,7 @@
 		return ret;
 	}
 
-	netif_tx_start_all_queues(net);
+	netif_tx_wake_all_queues(net);
 
 	nvdev = hv_get_drvdata(device_obj);
 	rdev = nvdev->extension;
@@ -120,15 +120,56 @@
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct hv_device *device_obj = net_device_ctx->device_ctx;
+	struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
 	int ret;
+	u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+	struct vmbus_channel *chn;
 
 	netif_tx_disable(net);
 
 	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
 	cancel_work_sync(&net_device_ctx->work);
 	ret = rndis_filter_close(device_obj);
-	if (ret != 0)
+	if (ret != 0) {
 		netdev_err(net, "unable to close device (ret %d).\n", ret);
+		return ret;
+	}
+
+	/* Ensure pending bytes in ring are read */
+	while (true) {
+		aread = 0;
+		for (i = 0; i < nvdev->num_chn; i++) {
+			chn = nvdev->chn_table[i];
+			if (!chn)
+				continue;
+
+			hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
+						     &awrite);
+
+			if (aread)
+				break;
+
+			hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
+						     &awrite);
+
+			if (aread)
+				break;
+		}
+
+		retry++;
+		if (retry > retry_max || aread == 0)
+			break;
+
+		msleep(msec);
+
+		if (msec < 1000)
+			msec *= 2;
+	}
+
+	if (aread) {
+		netdev_err(net, "Ring buffer not empty after closing rndis\n");
+		ret = -ETIMEDOUT;
+	}
 
 	return ret;
 }
@@ -198,7 +239,7 @@
 	struct flow_keys flow;
 	int data_len;
 
-	if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+	if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
 	    !(flow.basic.n_proto == htons(ETH_P_IP) ||
 	      flow.basic.n_proto == htons(ETH_P_IPV6)))
 		return false;
@@ -729,6 +770,104 @@
 	}
 }
 
+static int netvsc_set_channels(struct net_device *net,
+			       struct ethtool_channels *channels)
+{
+	struct net_device_context *net_device_ctx = netdev_priv(net);
+	struct hv_device *dev = net_device_ctx->device_ctx;
+	struct netvsc_device *nvdev = hv_get_drvdata(dev);
+	struct netvsc_device_info device_info;
+	u32 num_chn;
+	u32 max_chn;
+	int ret = 0;
+	bool recovering = false;
+
+	if (!nvdev || nvdev->destroy)
+		return -ENODEV;
+
+	num_chn = nvdev->num_chn;
+	max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+
+	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
+		pr_info("vRSS unsupported before NVSP Version 5\n");
+		return -EINVAL;
+	}
+
+	/* We do not support rx, tx, or other */
+	if (!channels ||
+	    channels->rx_count ||
+	    channels->tx_count ||
+	    channels->other_count ||
+	    (channels->combined_count < 1))
+		return -EINVAL;
+
+	if (channels->combined_count > max_chn) {
+		pr_info("combined channels too high, using %d\n", max_chn);
+		channels->combined_count = max_chn;
+	}
+
+	ret = netvsc_close(net);
+	if (ret)
+		goto out;
+
+ do_set:
+	nvdev->start_remove = true;
+	rndis_filter_device_remove(dev);
+
+	nvdev->num_chn = channels->combined_count;
+
+	net_device_ctx->device_ctx = dev;
+	hv_set_drvdata(dev, net);
+
+	memset(&device_info, 0, sizeof(device_info));
+	device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
+	device_info.ring_size = ring_size;
+	device_info.max_num_vrss_chns = max_num_vrss_chns;
+
+	ret = rndis_filter_device_add(dev, &device_info);
+	if (ret) {
+		if (recovering) {
+			netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+			return ret;
+		}
+		goto recover;
+	}
+
+	nvdev = hv_get_drvdata(dev);
+
+	ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
+	if (ret) {
+		if (recovering) {
+			netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
+			return ret;
+		}
+		goto recover;
+	}
+
+	ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
+	if (ret) {
+		if (recovering) {
+			netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
+			return ret;
+		}
+		goto recover;
+	}
+
+ out:
+	netvsc_open(net);
+
+	return ret;
+
+ recover:
+	/* If the above failed, we attempt to recover through the same
+	 * process but with the original number of channels.
+	 */
+	netdev_err(net, "could not set channels, recovering\n");
+	recovering = true;
+	channels->combined_count = num_chn;
+	goto do_set;
+}
+
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
 	struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -736,6 +875,7 @@
 	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
 	struct netvsc_device_info device_info;
 	int limit = ETH_DATA_LEN;
+	int ret = 0;
 
 	if (nvdev == NULL || nvdev->destroy)
 		return -ENODEV;
@@ -743,25 +883,31 @@
 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
 		limit = NETVSC_MTU - ETH_HLEN;
 
-	/* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
-	if (mtu < ETH_DATA_LEN || mtu > limit)
+	if (mtu < NETVSC_MTU_MIN || mtu > limit)
 		return -EINVAL;
 
+	ret = netvsc_close(ndev);
+	if (ret)
+		goto out;
+
 	nvdev->start_remove = true;
-	cancel_work_sync(&ndevctx->work);
-	netif_tx_disable(ndev);
 	rndis_filter_device_remove(hdev);
 
 	ndev->mtu = mtu;
 
 	ndevctx->device_ctx = hdev;
 	hv_set_drvdata(hdev, ndev);
+
+	memset(&device_info, 0, sizeof(device_info));
 	device_info.ring_size = ring_size;
+	device_info.num_chn = nvdev->num_chn;
 	device_info.max_num_vrss_chns = max_num_vrss_chns;
 	rndis_filter_device_add(hdev, &device_info);
-	netif_tx_wake_all_queues(ndev);
 
-	return 0;
+out:
+	netvsc_open(ndev);
+
+	return ret;
 }
 
 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
@@ -844,6 +990,7 @@
 	.get_drvinfo	= netvsc_get_drvinfo,
 	.get_link	= ethtool_op_get_link,
 	.get_channels   = netvsc_get_channels,
+	.set_channels   = netvsc_set_channels,
 };
 
 static const struct net_device_ops device_ops = {
@@ -977,6 +1124,7 @@
 	net->needed_headroom = max_needed_headroom;
 
 	/* Notify the netvsc driver of the new device */
+	memset(&device_info, 0, sizeof(device_info));
 	device_info.ring_size = ring_size;
 	device_info.max_num_vrss_chns = max_num_vrss_chns;
 	ret = rndis_filter_device_add(dev, &device_info);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 236aeb7..5931a79 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -984,9 +984,16 @@
 	struct netvsc_device *nvscdev;
 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
 	int ret;
+	unsigned long flags;
 
 	nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
 
+	spin_lock_irqsave(&nvscdev->sc_lock, flags);
+	nvscdev->num_sc_offered--;
+	spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+	if (nvscdev->num_sc_offered == 0)
+		complete(&nvscdev->channel_init_wait);
+
 	if (chn_index >= nvscdev->num_chn)
 		return;
 
@@ -1015,8 +1022,10 @@
 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
 	u32 mtu, size;
 	u32 num_rss_qs;
+	u32 sc_delta;
 	const struct cpumask *node_cpu_mask;
 	u32 num_possible_rss_qs;
+	unsigned long flags;
 
 	rndis_device = get_rndis_device();
 	if (!rndis_device)
@@ -1039,6 +1048,8 @@
 	net_device->max_chn = 1;
 	net_device->num_chn = 1;
 
+	spin_lock_init(&net_device->sc_lock);
+
 	net_device->extension = rndis_device;
 	rndis_device->net_dev = net_device;
 
@@ -1054,7 +1065,7 @@
 	ret = rndis_filter_query_device(rndis_device,
 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
 					&mtu, &size);
-	if (ret == 0 && size == sizeof(u32))
+	if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
 		net_device->ndev->mtu = mtu;
 
 	/* Get the mac address */
@@ -1114,7 +1125,15 @@
 	 */
 	node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
 	num_possible_rss_qs = cpumask_weight(node_cpu_mask);
-	net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+	/* We will use the given number of channels if available. */
+	if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
+		net_device->num_chn = device_info->num_chn;
+	else
+		net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+	num_rss_qs = net_device->num_chn - 1;
+	net_device->num_sc_offered = num_rss_qs;
 
 	if (net_device->num_chn == 1)
 		goto out;
@@ -1157,11 +1176,25 @@
 
 	ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
 
+	/*
+	 * Wait for the host to send us the sub-channel offers.
+	 */
+	spin_lock_irqsave(&net_device->sc_lock, flags);
+	sc_delta = num_rss_qs - (net_device->num_chn - 1);
+	net_device->num_sc_offered -= sc_delta;
+	spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+	while (net_device->num_sc_offered != 0) {
+		t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+		if (t == 0)
+			WARN(1, "Netvsc: Waiting for sub-channel processing");
+	}
 out:
 	if (ret) {
 		net_device->max_chn = 1;
 		net_device->num_chn = 1;
 	}
+
 	return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f7bd9f3..6422caa 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -97,9 +97,7 @@
 
 	struct at86rf230_state_change irq;
 
-	bool tx_aret;
 	unsigned long cal_timeout;
-	s8 max_frame_retries;
 	bool is_tx;
 	bool is_tx_from_off;
 	u8 tx_retry;
@@ -545,7 +543,9 @@
 	}
 
 	/* Default delay is 1us in the most cases */
-	tim = ktime_set(0, NSEC_PER_USEC);
+	udelay(1);
+	at86rf230_async_state_timer(&ctx->timer);
+	return;
 
 change:
 	hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
@@ -649,7 +649,7 @@
 
 	enable_irq(ctx->irq);
 
-	ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
+	ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
 }
 
 static void
@@ -758,17 +758,10 @@
 {
 	if (lp->is_tx) {
 		lp->is_tx = 0;
-
-		if (lp->tx_aret)
-			at86rf230_async_state_change(lp, &lp->irq,
-						     STATE_FORCE_TX_ON,
-						     at86rf230_tx_trac_status,
-						     true);
-		else
-			at86rf230_async_state_change(lp, &lp->irq,
-						     STATE_RX_AACK_ON,
-						     at86rf230_tx_complete,
-						     true);
+		at86rf230_async_state_change(lp, &lp->irq,
+					     STATE_FORCE_TX_ON,
+					     at86rf230_tx_trac_status,
+					     true);
 	} else {
 		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
 					 at86rf230_rx_trac_check, true);
@@ -874,24 +867,16 @@
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 
-	/* In ARET mode we need to go into STATE_TX_ARET_ON after we
-	 * are in STATE_TX_ON. The pfad differs here, so we change
-	 * the complete handler.
-	 */
-	if (lp->tx_aret) {
-		if (lp->is_tx_from_off) {
-			lp->is_tx_from_off = false;
-			at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-						     at86rf230_write_frame,
-						     false);
-		} else {
-			at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-						     at86rf230_xmit_tx_on,
-						     false);
-		}
+	/* check if we change from off state */
+	if (lp->is_tx_from_off) {
+		lp->is_tx_from_off = false;
+		at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+					     at86rf230_write_frame,
+					     false);
 	} else {
 		at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-					     at86rf230_write_frame, false);
+					     at86rf230_xmit_tx_on,
+					     false);
 	}
 }
 
@@ -1265,15 +1250,8 @@
 at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
 {
 	struct at86rf230_local *lp = hw->priv;
-	int rc = 0;
 
-	lp->tx_aret = retries >= 0;
-	lp->max_frame_retries = retries;
-
-	if (retries >= 0)
-		rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
-
-	return rc;
+	return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
 }
 
 static int
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index b6fc295..c5b54a1 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -833,6 +833,7 @@
 		if (!spi_pdata)
 			return -ENOENT;
 		*pdata = *spi_pdata;
+		priv->fifo_pin = pdata->fifo;
 		return 0;
 	}
 
@@ -1151,7 +1152,6 @@
 static struct spi_driver cc2520_driver = {
 	.driver = {
 		.name = "cc2520",
-		.bus = &spi_bus_type,
 		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(cc2520_of_ids),
 	},
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 2549760..997724b 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -812,7 +812,6 @@
 static struct spi_driver mrf24j40_driver = {
 	.driver = {
 		.name = "mrf24j40",
-		.bus = &spi_bus_type,
 		.owner = THIS_MODULE,
 	},
 	.id_table = mrf24j40_ids,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 94570aa..cc56fac 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -38,69 +38,68 @@
 #include <net/net_namespace.h>
 
 #define TX_Q_LIMIT    32
-struct ifb_private {
+struct ifb_q_private {
+	struct net_device	*dev;
 	struct tasklet_struct   ifb_tasklet;
-	int     tasklet_pending;
-
-	struct u64_stats_sync	rsync;
+	int			tasklet_pending;
+	int			txqnum;
 	struct sk_buff_head     rq;
-	u64 rx_packets;
-	u64 rx_bytes;
+	u64			rx_packets;
+	u64			rx_bytes;
+	struct u64_stats_sync	rsync;
 
 	struct u64_stats_sync	tsync;
+	u64			tx_packets;
+	u64			tx_bytes;
 	struct sk_buff_head     tq;
-	u64 tx_packets;
-	u64 tx_bytes;
+} ____cacheline_aligned_in_smp;
+
+struct ifb_dev_private {
+	struct ifb_q_private *tx_private;
 };
 
-static int numifbs = 2;
-
-static void ri_tasklet(unsigned long dev);
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
 static int ifb_open(struct net_device *dev);
 static int ifb_close(struct net_device *dev);
 
-static void ri_tasklet(unsigned long dev)
+static void ifb_ri_tasklet(unsigned long _txp)
 {
-	struct net_device *_dev = (struct net_device *)dev;
-	struct ifb_private *dp = netdev_priv(_dev);
+	struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
 	struct netdev_queue *txq;
 	struct sk_buff *skb;
 
-	txq = netdev_get_tx_queue(_dev, 0);
-	if ((skb = skb_peek(&dp->tq)) == NULL) {
-		if (__netif_tx_trylock(txq)) {
-			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
-			__netif_tx_unlock(txq);
-		} else {
-			/* reschedule */
+	txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
+	skb = skb_peek(&txp->tq);
+	if (!skb) {
+		if (!__netif_tx_trylock(txq))
 			goto resched;
-		}
+		skb_queue_splice_tail_init(&txp->rq, &txp->tq);
+		__netif_tx_unlock(txq);
 	}
 
-	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
+	while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
 		u32 from = G_TC_FROM(skb->tc_verd);
 
 		skb->tc_verd = 0;
 		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
 
-		u64_stats_update_begin(&dp->tsync);
-		dp->tx_packets++;
-		dp->tx_bytes += skb->len;
-		u64_stats_update_end(&dp->tsync);
+		u64_stats_update_begin(&txp->tsync);
+		txp->tx_packets++;
+		txp->tx_bytes += skb->len;
+		u64_stats_update_end(&txp->tsync);
 
 		rcu_read_lock();
-		skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
+		skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
 		if (!skb->dev) {
 			rcu_read_unlock();
 			dev_kfree_skb(skb);
-			_dev->stats.tx_dropped++;
-			if (skb_queue_len(&dp->tq) != 0)
+			txp->dev->stats.tx_dropped++;
+			if (skb_queue_len(&txp->tq) != 0)
 				goto resched;
 			break;
 		}
 		rcu_read_unlock();
-		skb->skb_iif = _dev->ifindex;
+		skb->skb_iif = txp->dev->ifindex;
 
 		if (from & AT_EGRESS) {
 			dev_queue_xmit(skb);
@@ -112,10 +111,11 @@
 	}
 
 	if (__netif_tx_trylock(txq)) {
-		if ((skb = skb_peek(&dp->rq)) == NULL) {
-			dp->tasklet_pending = 0;
-			if (netif_queue_stopped(_dev))
-				netif_wake_queue(_dev);
+		skb = skb_peek(&txp->rq);
+		if (!skb) {
+			txp->tasklet_pending = 0;
+			if (netif_tx_queue_stopped(txq))
+				netif_tx_wake_queue(txq);
 		} else {
 			__netif_tx_unlock(txq);
 			goto resched;
@@ -123,8 +123,8 @@
 		__netif_tx_unlock(txq);
 	} else {
 resched:
-		dp->tasklet_pending = 1;
-		tasklet_schedule(&dp->ifb_tasklet);
+		txp->tasklet_pending = 1;
+		tasklet_schedule(&txp->ifb_tasklet);
 	}
 
 }
@@ -132,29 +132,58 @@
 static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
 					     struct rtnl_link_stats64 *stats)
 {
-	struct ifb_private *dp = netdev_priv(dev);
+	struct ifb_dev_private *dp = netdev_priv(dev);
+	struct ifb_q_private *txp = dp->tx_private;
 	unsigned int start;
+	u64 packets, bytes;
+	int i;
 
-	do {
-		start = u64_stats_fetch_begin_irq(&dp->rsync);
-		stats->rx_packets = dp->rx_packets;
-		stats->rx_bytes = dp->rx_bytes;
-	} while (u64_stats_fetch_retry_irq(&dp->rsync, start));
+	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+		do {
+			start = u64_stats_fetch_begin_irq(&txp->rsync);
+			packets = txp->rx_packets;
+			bytes = txp->rx_bytes;
+		} while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+		stats->rx_packets += packets;
+		stats->rx_bytes += bytes;
 
-	do {
-		start = u64_stats_fetch_begin_irq(&dp->tsync);
-
-		stats->tx_packets = dp->tx_packets;
-		stats->tx_bytes = dp->tx_bytes;
-
-	} while (u64_stats_fetch_retry_irq(&dp->tsync, start));
-
+		do {
+			start = u64_stats_fetch_begin_irq(&txp->tsync);
+			packets = txp->tx_packets;
+			bytes = txp->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+		stats->tx_packets += packets;
+		stats->tx_bytes += bytes;
+	}
 	stats->rx_dropped = dev->stats.rx_dropped;
 	stats->tx_dropped = dev->stats.tx_dropped;
 
 	return stats;
 }
 
+static int ifb_dev_init(struct net_device *dev)
+{
+	struct ifb_dev_private *dp = netdev_priv(dev);
+	struct ifb_q_private *txp;
+	int i;
+
+	txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
+	if (!txp)
+		return -ENOMEM;
+	dp->tx_private = txp;
+	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+		txp->txqnum = i;
+		txp->dev = dev;
+		__skb_queue_head_init(&txp->rq);
+		__skb_queue_head_init(&txp->tq);
+		u64_stats_init(&txp->rsync);
+		u64_stats_init(&txp->tsync);
+		tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
+			     (unsigned long)txp);
+		netif_tx_start_queue(netdev_get_tx_queue(dev, i));
+	}
+	return 0;
+}
 
 static const struct net_device_ops ifb_netdev_ops = {
 	.ndo_open	= ifb_open,
@@ -162,6 +191,7 @@
 	.ndo_get_stats64 = ifb_stats64,
 	.ndo_start_xmit	= ifb_xmit,
 	.ndo_validate_addr = eth_validate_addr,
+	.ndo_init	= ifb_dev_init,
 };
 
 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST	| \
@@ -169,10 +199,24 @@
 		      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX		| \
 		      NETIF_F_HW_VLAN_STAG_TX)
 
+static void ifb_dev_free(struct net_device *dev)
+{
+	struct ifb_dev_private *dp = netdev_priv(dev);
+	struct ifb_q_private *txp = dp->tx_private;
+	int i;
+
+	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+		tasklet_kill(&txp->ifb_tasklet);
+		__skb_queue_purge(&txp->rq);
+		__skb_queue_purge(&txp->tq);
+	}
+	kfree(dp->tx_private);
+	free_netdev(dev);
+}
+
 static void ifb_setup(struct net_device *dev)
 {
 	/* Initialize the device structure. */
-	dev->destructor = free_netdev;
 	dev->netdev_ops = &ifb_netdev_ops;
 
 	/* Fill in device structure with ethernet-generic values. */
@@ -188,17 +232,19 @@
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netif_keep_dst(dev);
 	eth_hw_addr_random(dev);
+	dev->destructor = ifb_dev_free;
 }
 
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	struct ifb_private *dp = netdev_priv(dev);
+	struct ifb_dev_private *dp = netdev_priv(dev);
 	u32 from = G_TC_FROM(skb->tc_verd);
+	struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
 
-	u64_stats_update_begin(&dp->rsync);
-	dp->rx_packets++;
-	dp->rx_bytes += skb->len;
-	u64_stats_update_end(&dp->rsync);
+	u64_stats_update_begin(&txp->rsync);
+	txp->rx_packets++;
+	txp->rx_bytes += skb->len;
+	u64_stats_update_end(&txp->rsync);
 
 	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
 		dev_kfree_skb(skb);
@@ -206,14 +252,13 @@
 		return NETDEV_TX_OK;
 	}
 
-	if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
-		netif_stop_queue(dev);
-	}
+	if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
+		netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
 
-	__skb_queue_tail(&dp->rq, skb);
-	if (!dp->tasklet_pending) {
-		dp->tasklet_pending = 1;
-		tasklet_schedule(&dp->ifb_tasklet);
+	__skb_queue_tail(&txp->rq, skb);
+	if (!txp->tasklet_pending) {
+		txp->tasklet_pending = 1;
+		tasklet_schedule(&txp->ifb_tasklet);
 	}
 
 	return NETDEV_TX_OK;
@@ -221,24 +266,13 @@
 
 static int ifb_close(struct net_device *dev)
 {
-	struct ifb_private *dp = netdev_priv(dev);
-
-	tasklet_kill(&dp->ifb_tasklet);
-	netif_stop_queue(dev);
-	__skb_queue_purge(&dp->rq);
-	__skb_queue_purge(&dp->tq);
+	netif_tx_stop_all_queues(dev);
 	return 0;
 }
 
 static int ifb_open(struct net_device *dev)
 {
-	struct ifb_private *dp = netdev_priv(dev);
-
-	tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
-	__skb_queue_head_init(&dp->rq);
-	__skb_queue_head_init(&dp->tq);
-	netif_start_queue(dev);
-
+	netif_tx_start_all_queues(dev);
 	return 0;
 }
 
@@ -255,31 +289,30 @@
 
 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
 	.kind		= "ifb",
-	.priv_size	= sizeof(struct ifb_private),
+	.priv_size	= sizeof(struct ifb_dev_private),
 	.setup		= ifb_setup,
 	.validate	= ifb_validate,
 };
 
-/* Number of ifb devices to be set up by this module. */
+/* Number of ifb devices to be set up by this module.
+ * Note that these legacy devices have one queue.
+ * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
+ */
+static int numifbs = 2;
 module_param(numifbs, int, 0);
 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 
 static int __init ifb_init_one(int index)
 {
 	struct net_device *dev_ifb;
-	struct ifb_private *dp;
 	int err;
 
-	dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+	dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
 			       NET_NAME_UNKNOWN, ifb_setup);
 
 	if (!dev_ifb)
 		return -ENOMEM;
 
-	dp = netdev_priv(dev_ifb);
-	u64_stats_init(&dp->rsync);
-	u64_stats_init(&dp->tsync);
-
 	dev_ifb->rtnl_link_ops = &ifb_link_ops;
 	err = register_netdevice(dev_ifb);
 	if (err < 0)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 20b58bd..a9268db 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -520,12 +520,11 @@
 	ether_setup(dev);
 
 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
-	dev->priv_flags |= IFF_UNICAST_FLT;
+	dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
 	dev->netdev_ops = &ipvlan_netdev_ops;
 	dev->destructor = free_netdev;
 	dev->header_ops = &ipvlan_header_ops;
 	dev->ethtool_ops = &ipvlan_ethtool_ops;
-	dev->tx_queue_len = 0;
 }
 
 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c76283c..dc7d970 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -165,10 +165,9 @@
 	dev->mtu		= 64 * 1024;
 	dev->hard_header_len	= ETH_HLEN;	/* 14	*/
 	dev->addr_len		= ETH_ALEN;	/* 6	*/
-	dev->tx_queue_len	= 0;
 	dev->type		= ARPHRD_LOOPBACK;	/* 0x0001*/
 	dev->flags		= IFF_LOOPBACK;
-	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 	netif_keep_dst(dev);
 	dev->hw_features	= NETIF_F_ALL_TSO | NETIF_F_UFO;
 	dev->features 		= NETIF_F_SG | NETIF_F_FRAGLIST
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9f59f17..47da435 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1047,6 +1047,7 @@
 	.ndo_netpoll_cleanup	= macvlan_dev_netpoll_cleanup,
 #endif
 	.ndo_get_iflink		= macvlan_dev_get_iflink,
+	.ndo_features_check	= passthru_features_check,
 };
 
 void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 34924df..7b7c70e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -130,7 +130,7 @@
 static void nlmon_setup(struct net_device *dev)
 {
 	dev->type = ARPHRD_NETLINK;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	dev->netdev_ops	= &nlmon_ops;
 	dev->ethtool_ops = &nlmon_ethtool_ops;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb86d7a..c07030d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -14,6 +14,11 @@
 
 comment "MII PHY device drivers"
 
+config AQUANTIA_PHY
+        tristate "Drivers for the Aquantia PHYs"
+        ---help---
+          Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
 config AT803X_PHY
 	tristate "Drivers for Atheros AT803X PHYs"
 	---help---
@@ -54,6 +59,11 @@
         ---help---
           Currently supports the vsc8244
 
+config TERANETICS_PHY
+        tristate "Drivers for the Teranetics PHYs"
+        ---help---
+          Currently supports the Teranetics TN2020
+
 config SMSC_PHY
 	tristate "Drivers for SMSC PHYs"
 	---help---
@@ -145,13 +155,13 @@
 	  will be called mdio-gpio.
 
 config MDIO_OCTEON
-	tristate "Support for MDIO buses on Octeon SOCs"
-	depends on CAVIUM_OCTEON_SOC
-	default y
+	tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+	depends on 64BIT
 	help
 
-	  This module provides a driver for the Octeon MDIO busses.
-	  It is required by the Octeon Ethernet device drivers.
+	  This module provides a driver for the Octeon and ThunderX MDIO
+	  busses. It is required by the Octeon and ThunderX ethernet device
+	  drivers.
 
 	  If in doubt, say Y.
 
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcc25a0..9bb1033 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,12 +3,14 @@
 libphy-objs			:= phy.o phy_device.o mdio_bus.o
 
 obj-$(CONFIG_PHYLIB)		+= libphy.o
+obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia.o
 obj-$(CONFIG_MARVELL_PHY)	+= marvell.o
 obj-$(CONFIG_DAVICOM_PHY)	+= davicom.o
 obj-$(CONFIG_CICADA_PHY)	+= cicada.o
 obj-$(CONFIG_LXT_PHY)		+= lxt.o
 obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
 obj-$(CONFIG_SMSC_PHY)		+= smsc.o
+obj-$(CONFIG_TERANETICS_PHY)	+= teranetics.o
 obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
 obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)	+= bcm63xx.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
new file mode 100644
index 0000000..d6111af
--- /dev/null
+++ b/drivers/net/phy/aquantia.c
@@ -0,0 +1,201 @@
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+
+#define PHY_ID_AQ1202	0x03a1b445
+#define PHY_ID_AQ2104	0x03a1b460
+#define PHY_ID_AQR105	0x03a1b4a2
+#define PHY_ID_AQR405	0x03a1b4b0
+
+#define PHY_AQUANTIA_FEATURES	(SUPPORTED_10000baseT_Full | \
+				 SUPPORTED_1000baseT_Full | \
+				 SUPPORTED_100baseT_Full | \
+				 PHY_DEFAULT_FEATURES)
+
+static int aquantia_config_aneg(struct phy_device *phydev)
+{
+	phydev->supported = PHY_AQUANTIA_FEATURES;
+	phydev->advertising = phydev->supported;
+
+	return 0;
+}
+
+static int aquantia_aneg_done(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+	return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+}
+
+static int aquantia_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
+	} else {
+		err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
+	}
+
+	return err;
+}
+
+static int aquantia_ack_interrupt(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
+	return (reg < 0) ? reg : 0;
+}
+
+static int aquantia_read_status(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+	if (reg & MDIO_STAT1_LSTATUS)
+		phydev->link = 1;
+	else
+		phydev->link = 0;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+	mdelay(10);
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+
+	switch (reg) {
+	case 0x9:
+		phydev->speed = SPEED_2500;
+		break;
+	case 0x5:
+		phydev->speed = SPEED_1000;
+		break;
+	case 0x3:
+		phydev->speed = SPEED_100;
+		break;
+	case 0x7:
+	default:
+		phydev->speed = SPEED_10000;
+		break;
+	}
+	phydev->duplex = DUPLEX_FULL;
+
+	return 0;
+}
+
+static struct phy_driver aquantia_driver[] = {
+{
+	.phy_id		= PHY_ID_AQ1202,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQ1202",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+{
+	.phy_id		= PHY_ID_AQ2104,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQ2104",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+{
+	.phy_id		= PHY_ID_AQR105,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQR105",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+{
+	.phy_id		= PHY_ID_AQR405,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQR405",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+};
+
+static int __init aquantia_init(void)
+{
+	return phy_drivers_register(aquantia_driver,
+				    ARRAY_SIZE(aquantia_driver));
+}
+
+static void __exit aquantia_exit(void)
+{
+	return phy_drivers_unregister(aquantia_driver,
+				      ARRAY_SIZE(aquantia_driver));
+}
+
+module_init(aquantia_init);
+module_exit(aquantia_exit);
+
+static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+	{ PHY_ID_AQ1202, 0xfffffff0 },
+	{ PHY_ID_AQ2104, 0xfffffff0 },
+	{ PHY_ID_AQR105, 0xfffffff0 },
+	{ PHY_ID_AQR405, 0xfffffff0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 00cb41e..185b03c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1449,17 +1449,9 @@
 	info->rx_filters =
 		(1 << HWTSTAMP_FILTER_NONE) |
 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
 		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
 		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 	return 0;
 }
 
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8a3bf54..32f1066 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -123,12 +123,8 @@
 	if (ret)
 		return ret;
 
-	ret = of_property_read_u32(of_node, "ti,fifo-depth",
+	return of_property_read_u32(of_node, "ti,fifo-depth",
 				   &dp83867->fifo_depth);
-	if (ret)
-		return ret;
-
-	return 0;
 }
 #else
 static int dp83867_of_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index d7a6524..12c7eb2 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -22,6 +22,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/gpio.h>
 
 #define MII_REGS_NUM 29
 
@@ -38,6 +39,7 @@
 	struct fixed_phy_status status;
 	int (*link_update)(struct net_device *, struct fixed_phy_status *);
 	struct list_head node;
+	int link_gpio;
 };
 
 static struct platform_device *pdev;
@@ -52,58 +54,86 @@
 	u16 lpagb = 0;
 	u16 lpa = 0;
 
-	if (fp->status.duplex) {
-		bmcr |= BMCR_FULLDPLX;
+	if (gpio_is_valid(fp->link_gpio))
+		fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
 
+	if (fp->status.duplex) {
 		switch (fp->status.speed) {
 		case 1000:
 			bmsr |= BMSR_ESTATEN;
-			bmcr |= BMCR_SPEED1000;
-			lpagb |= LPA_1000FULL;
 			break;
 		case 100:
 			bmsr |= BMSR_100FULL;
-			bmcr |= BMCR_SPEED100;
-			lpa |= LPA_100FULL;
 			break;
 		case 10:
 			bmsr |= BMSR_10FULL;
-			lpa |= LPA_10FULL;
 			break;
 		default:
-			pr_warn("fixed phy: unknown speed\n");
-			return -EINVAL;
+			break;
 		}
 	} else {
 		switch (fp->status.speed) {
 		case 1000:
 			bmsr |= BMSR_ESTATEN;
-			bmcr |= BMCR_SPEED1000;
-			lpagb |= LPA_1000HALF;
 			break;
 		case 100:
 			bmsr |= BMSR_100HALF;
-			bmcr |= BMCR_SPEED100;
-			lpa |= LPA_100HALF;
 			break;
 		case 10:
 			bmsr |= BMSR_10HALF;
-			lpa |= LPA_10HALF;
 			break;
 		default:
-			pr_warn("fixed phy: unknown speed\n");
-			return -EINVAL;
+			break;
 		}
 	}
 
-	if (fp->status.link)
+	if (fp->status.link) {
 		bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
 
-	if (fp->status.pause)
-		lpa |= LPA_PAUSE_CAP;
+		if (fp->status.duplex) {
+			bmcr |= BMCR_FULLDPLX;
 
-	if (fp->status.asym_pause)
-		lpa |= LPA_PAUSE_ASYM;
+			switch (fp->status.speed) {
+			case 1000:
+				bmcr |= BMCR_SPEED1000;
+				lpagb |= LPA_1000FULL;
+				break;
+			case 100:
+				bmcr |= BMCR_SPEED100;
+				lpa |= LPA_100FULL;
+				break;
+			case 10:
+				lpa |= LPA_10FULL;
+				break;
+			default:
+				pr_warn("fixed phy: unknown speed\n");
+				return -EINVAL;
+			}
+		} else {
+			switch (fp->status.speed) {
+			case 1000:
+				bmcr |= BMCR_SPEED1000;
+				lpagb |= LPA_1000HALF;
+				break;
+			case 100:
+				bmcr |= BMCR_SPEED100;
+				lpa |= LPA_100HALF;
+				break;
+			case 10:
+				lpa |= LPA_10HALF;
+				break;
+			default:
+				pr_warn("fixed phy: unknown speed\n");
+			return -EINVAL;
+			}
+		}
+
+		if (fp->status.pause)
+			lpa |= LPA_PAUSE_CAP;
+
+		if (fp->status.asym_pause)
+			lpa |= LPA_PAUSE_ASYM;
+	}
 
 	fp->regs[MII_PHYSID1] = 0;
 	fp->regs[MII_PHYSID2] = 0;
@@ -213,7 +243,8 @@
 EXPORT_SYMBOL(fixed_phy_update_state);
 
 int fixed_phy_add(unsigned int irq, int phy_addr,
-		  struct fixed_phy_status *status)
+		  struct fixed_phy_status *status,
+		  int link_gpio)
 {
 	int ret;
 	struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -229,15 +260,26 @@
 
 	fp->addr = phy_addr;
 	fp->status = *status;
+	fp->link_gpio = link_gpio;
+
+	if (gpio_is_valid(fp->link_gpio)) {
+		ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN,
+				       "fixed-link-gpio-link");
+		if (ret)
+			goto err_regs;
+	}
 
 	ret = fixed_phy_update_regs(fp);
 	if (ret)
-		goto err_regs;
+		goto err_gpio;
 
 	list_add_tail(&fp->node, &fmb->phys);
 
 	return 0;
 
+err_gpio:
+	if (gpio_is_valid(fp->link_gpio))
+		gpio_free(fp->link_gpio);
 err_regs:
 	kfree(fp);
 	return ret;
@@ -252,6 +294,8 @@
 	list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
 		if (fp->addr == phy_addr) {
 			list_del(&fp->node);
+			if (gpio_is_valid(fp->link_gpio))
+				gpio_free(fp->link_gpio);
 			kfree(fp);
 			return;
 		}
@@ -264,6 +308,7 @@
 
 struct phy_device *fixed_phy_register(unsigned int irq,
 				      struct fixed_phy_status *status,
+				      int link_gpio,
 				      struct device_node *np)
 {
 	struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -280,7 +325,7 @@
 	phy_addr = phy_fixed_addr++;
 	spin_unlock(&phy_fixed_addr_lock);
 
-	ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+	ret = fixed_phy_add(PHY_POLL, phy_addr, status, link_gpio);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
@@ -301,6 +346,19 @@
 
 	of_node_get(np);
 	phy->dev.of_node = np;
+	phy->is_pseudo_fixed_link = true;
+
+	switch (status->speed) {
+	case SPEED_1000:
+		phy->supported = PHY_1000BT_FEATURES;
+		break;
+	case SPEED_100:
+		phy->supported = PHY_100BT_FEATURES;
+		break;
+	case SPEED_10:
+	default:
+		phy->supported = PHY_10BT_FEATURES;
+	}
 
 	ret = phy_device_register(phy);
 	if (ret) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f721444..e6897b6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -48,8 +48,11 @@
 #define MII_M1011_IMASK_CLEAR		0x0000
 
 #define MII_M1011_PHY_SCR		0x10
+#define MII_M1011_PHY_SCR_MDI		0x0000
+#define MII_M1011_PHY_SCR_MDI_X		0x0020
 #define MII_M1011_PHY_SCR_AUTO_CROSS	0x0060
 
+#define MII_M1145_PHY_EXT_ADDR_PAGE	0x16
 #define MII_M1145_PHY_EXT_SR		0x1b
 #define MII_M1145_PHY_EXT_CR		0x14
 #define MII_M1145_RGMII_RX_DELAY	0x0080
@@ -159,6 +162,43 @@
 	return err;
 }
 
+static int marvell_set_polarity(struct phy_device *phydev, int polarity)
+{
+	int reg;
+	int err;
+	int val;
+
+	/* get the current settings */
+	reg = phy_read(phydev, MII_M1011_PHY_SCR);
+	if (reg < 0)
+		return reg;
+
+	val = reg;
+	val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
+	switch (polarity) {
+	case ETH_TP_MDI:
+		val |= MII_M1011_PHY_SCR_MDI;
+		break;
+	case ETH_TP_MDI_X:
+		val |= MII_M1011_PHY_SCR_MDI_X;
+		break;
+	case ETH_TP_MDI_AUTO:
+	case ETH_TP_MDI_INVALID:
+	default:
+		val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+		break;
+	}
+
+	if (val != reg) {
+		/* Set the new polarity value in the register */
+		err = phy_write(phydev, MII_M1011_PHY_SCR, val);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int marvell_config_aneg(struct phy_device *phydev)
 {
 	int err;
@@ -191,8 +231,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_M1011_PHY_SCR,
-			MII_M1011_PHY_SCR_AUTO_CROSS);
+	err = marvell_set_polarity(phydev, phydev->mdix);
 	if (err < 0)
 		return err;
 
@@ -514,6 +553,16 @@
 		err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
 		if (err < 0)
 			return err;
+
+		/* make sure copper is selected */
+		err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
+		if (err < 0)
+			return err;
+
+		err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
+				err & (~0xff));
+		if (err < 0)
+			return err;
 	}
 
 	if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index c838ad6..fcf4e4d 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -14,11 +15,12 @@
 #include <linux/phy.h>
 #include <linux/io.h>
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
 
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
 
 #define SMI_CMD		0x0
 #define SMI_WR_DAT	0x8
@@ -26,6 +28,79 @@
 #define SMI_CLK		0x18
 #define SMI_EN		0x20
 
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more)	\
+	field;					\
+	more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more)	\
+	more					\
+	field;
+
+#endif
+
+union cvmx_smix_clk {
+	u64 u64;
+	struct cvmx_smix_clk_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+	  OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+	  ;))))))))))
+	} s;
+};
+
+union cvmx_smix_cmd {
+	u64 u64;
+	struct cvmx_smix_cmd_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+	  ;))))))
+	} s;
+};
+
+union cvmx_smix_en {
+	u64 u64;
+	struct cvmx_smix_en_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+	  OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+	  ;))
+	} s;
+};
+
+union cvmx_smix_rd_dat {
+	u64 u64;
+	struct cvmx_smix_rd_dat_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+	  ;))))
+	} s;
+};
+
+union cvmx_smix_wr_dat {
+	u64 u64;
+	struct cvmx_smix_wr_dat_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+	  ;))))
+	} s;
+};
+
 enum octeon_mdiobus_mode {
 	UNINIT = 0,
 	C22,
@@ -41,6 +116,21 @@
 	int phy_irq[PHY_MAX_ADDR];
 };
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+	cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+	return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr)	writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr)		readq_relaxed((void *)addr)
+#endif
+
 static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
 				    enum octeon_mdiobus_mode m)
 {
@@ -49,10 +139,10 @@
 	if (m == p->mode)
 		return;
 
-	smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+	smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
 	smi_clk.s.mode = (m == C45) ? 1 : 0;
 	smi_clk.s.preamble = 1;
-	cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+	oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
 	p->mode = m;
 }
 
@@ -67,7 +157,7 @@
 
 	smi_wr.u64 = 0;
 	smi_wr.s.dat = regnum & 0xffff;
-	cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
 	regnum = (regnum >> 16) & 0x1f;
 
@@ -75,14 +165,14 @@
 	smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
 	} while (smi_wr.s.pending && --timeout);
 
 	if (timeout <= 0)
@@ -114,14 +204,14 @@
 	smi_cmd.s.phy_op = op;
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+		smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
 	} while (smi_rd.s.pending && --timeout);
 
 	if (smi_rd.s.val)
@@ -153,20 +243,20 @@
 
 	smi_wr.u64 = 0;
 	smi_wr.s.dat = val;
-	cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
 	smi_cmd.u64 = 0;
 	smi_cmd.s.phy_op = op;
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
 	} while (smi_wr.s.pending && --timeout);
 
 	if (timeout <= 0)
@@ -187,30 +277,34 @@
 		return -ENOMEM;
 
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
 	if (res_mem == NULL) {
 		dev_err(&pdev->dev, "found no memory resource\n");
-		err = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
+
 	bus->mdio_phys = res_mem->start;
 	bus->regsize = resource_size(res_mem);
+
 	if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
 				     res_mem->name)) {
 		dev_err(&pdev->dev, "request_mem_region failed\n");
-		goto fail;
+		return -ENXIO;
 	}
+
 	bus->register_base =
 		(u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+	if (!bus->register_base) {
+		dev_err(&pdev->dev, "dev_ioremap failed\n");
+		return -ENOMEM;
+	}
 
 	bus->mii_bus = mdiobus_alloc();
-
 	if (!bus->mii_bus)
 		goto fail;
 
 	smi_en.u64 = 0;
 	smi_en.s.en = 1;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 
 	bus->mii_bus->priv = bus;
 	bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@
 	mdiobus_free(bus->mii_bus);
 fail:
 	smi_en.u64 = 0;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 	return err;
 }
 
@@ -248,7 +342,7 @@
 	mdiobus_unregister(bus->mii_bus);
 	mdiobus_free(bus->mii_bus);
 	smi_en.u64 = 0;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 	return 0;
 }
 
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 46a14cb..02a4615 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -303,12 +303,12 @@
 	BUG_ON(bus->state != MDIOBUS_REGISTERED);
 	bus->state = MDIOBUS_UNREGISTERED;
 
-	device_del(&bus->dev);
 	for (i = 0; i < PHY_MAX_ADDR; i++) {
 		if (bus->phy_map[i])
 			device_unregister(&bus->phy_map[i]->dev);
 		bus->phy_map[i] = NULL;
 	}
+	device_del(&bus->dev);
 }
 EXPORT_SYMBOL(mdiobus_unregister);
 
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 34fe339..adb48ab 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -353,6 +353,8 @@
 
 	phydev->duplex = cmd->duplex;
 
+	phydev->mdix = cmd->eth_tp_mdix_ctrl;
+
 	/* Restart the PHY */
 	phy_start_aneg(phydev);
 
@@ -377,6 +379,7 @@
 	cmd->transceiver = phy_is_internal(phydev) ?
 		XCVR_INTERNAL : XCVR_EXTERNAL;
 	cmd->autoneg = phydev->autoneg;
+	cmd->eth_tp_mdix_ctrl = phydev->mdix;
 
 	return 0;
 }
@@ -1037,7 +1040,7 @@
 	struct phy_driver *phydrv = phydev->drv;
 	int value = -1;
 
-	if (phydrv->read_mmd_indirect == NULL) {
+	if (!phydrv->read_mmd_indirect) {
 		struct mii_bus *bus = phydev->bus;
 
 		mutex_lock(&bus->mdio_lock);
@@ -1074,7 +1077,7 @@
 {
 	struct phy_driver *phydrv = phydev->drv;
 
-	if (phydrv->write_mmd_indirect == NULL) {
+	if (!phydrv->write_mmd_indirect) {
 		struct mii_bus *bus = phydev->bus;
 
 		mutex_lock(&bus->mdio_lock);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 55f0178..c0f2111 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -156,8 +156,8 @@
 
 	/* We allocate the device, and initialize the default values */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-	if (NULL == dev)
-		return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
 
 	dev->dev.release = phy_device_release;
 
@@ -178,7 +178,7 @@
 	dev->bus = bus;
 	dev->dev.parent = &bus->dev;
 	dev->dev.bus = &mdio_bus_type;
-	dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
+	dev->irq = bus->irq ? bus->irq[addr] : PHY_POLL;
 	dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
 
 	dev->state = PHY_DOWN;
@@ -589,7 +589,7 @@
 	/* Assume that if there is no driver, that it doesn't
 	 * exist, and we should use the genphy driver.
 	 */
-	if (NULL == d->driver) {
+	if (!d->driver) {
 		if (phydev->is_c45)
 			d->driver = &genphy_driver[GENPHY_DRV_10G].driver;
 		else
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 4535361..43ab691 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -137,6 +137,19 @@
 		.config_intr	= &rtl8211b_config_intr,
 		.driver		= { .owner = THIS_MODULE,},
 	}, {
+		.phy_id		= 0x001cc914,
+		.name		= "RTL8211DN Gigabit Ethernet",
+		.phy_id_mask	= 0x001fffff,
+		.features	= PHY_GBIT_FEATURES,
+		.flags		= PHY_HAS_INTERRUPT,
+		.config_aneg	= genphy_config_aneg,
+		.read_status	= genphy_read_status,
+		.ack_interrupt	= rtl821x_ack_interrupt,
+		.config_intr	= rtl8211e_config_intr,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.driver		= { .owner = THIS_MODULE,},
+	}, {
 		.phy_id		= 0x001cc915,
 		.name		= "RTL8211E Gigabit Ethernet",
 		.phy_id_mask	= 0x001fffff,
@@ -170,6 +183,7 @@
 
 static struct mdio_device_id __maybe_unused realtek_tbl[] = {
 	{ 0x001cc912, 0x001fffff },
+	{ 0x001cc914, 0x001fffff },
 	{ 0x001cc915, 0x001fffff },
 	{ 0x001cc916, 0x001fffff },
 	{ }
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4653015..f091d69 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -209,8 +209,6 @@
 	return ks8995_start(ks);
 }
 
-/* ------------------------------------------------------------------------ */
-
 static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
 	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -220,19 +218,9 @@
 	dev = container_of(kobj, struct device, kobj);
 	ks8995 = dev_get_drvdata(dev);
 
-	if (unlikely(off > ks8995->regs_attr.size))
-		return 0;
-
-	if ((off + count) > ks8995->regs_attr.size)
-		count = ks8995->regs_attr.size - off;
-
-	if (unlikely(!count))
-		return count;
-
 	return ks8995_read(ks8995, buf, off, count);
 }
 
-
 static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
 	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -242,19 +230,9 @@
 	dev = container_of(kobj, struct device, kobj);
 	ks8995 = dev_get_drvdata(dev);
 
-	if (unlikely(off >= ks8995->regs_attr.size))
-		return -EFBIG;
-
-	if ((off + count) > ks8995->regs_attr.size)
-		count = ks8995->regs_attr.size - off;
-
-	if (unlikely(!count))
-		return count;
-
 	return ks8995_write(ks8995, buf, off, count);
 }
 
-
 static const struct bin_attribute ks8995_registers_attr = {
 	.attr = {
 		.name   = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
new file mode 100644
index 0000000..91e1bec
--- /dev/null
+++ b/drivers/net/phy/teranetics.c
@@ -0,0 +1,135 @@
+/*
+ * Driver for Teranetics PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+MODULE_DESCRIPTION("Teranetics PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+
+#define PHY_ID_TN2020	0x00a19410
+#define MDIO_PHYXS_LNSTAT_SYNC0	0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1	0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2	0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3	0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+#define MDIO_PHYXS_LANE_READY	(MDIO_PHYXS_LNSTAT_SYNC0 | \
+				MDIO_PHYXS_LNSTAT_SYNC1 | \
+				MDIO_PHYXS_LNSTAT_SYNC2 | \
+				MDIO_PHYXS_LNSTAT_SYNC3 | \
+				MDIO_PHYXS_LNSTAT_ALIGN)
+
+static int teranetics_config_init(struct phy_device *phydev)
+{
+	phydev->supported = SUPPORTED_10000baseT_Full;
+	phydev->advertising = SUPPORTED_10000baseT_Full;
+
+	return 0;
+}
+
+static int teranetics_soft_reset(struct phy_device *phydev)
+{
+	return 0;
+}
+
+static int teranetics_aneg_done(struct phy_device *phydev)
+{
+	int reg;
+
+	/* auto negotiation state can only be checked when using copper
+	 * port, if using fiber port, just lie it's done.
+	 */
+	if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+		reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+		return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+	}
+
+	return 1;
+}
+
+static int teranetics_config_aneg(struct phy_device *phydev)
+{
+	return 0;
+}
+
+static int teranetics_read_status(struct phy_device *phydev)
+{
+	int reg;
+
+	phydev->link = 1;
+
+	phydev->speed = SPEED_10000;
+	phydev->duplex = DUPLEX_FULL;
+
+	if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+		reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT);
+		if (reg < 0 ||
+		    !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) {
+			phydev->link = 0;
+			return 0;
+		}
+
+		reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+		if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+			phydev->link = 0;
+	}
+
+	return 0;
+}
+
+static int teranetics_match_phy_device(struct phy_device *phydev)
+{
+	return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
+}
+
+static struct phy_driver teranetics_driver[] = {
+{
+	.phy_id		= PHY_ID_TN2020,
+	.phy_id_mask	= 0xffffffff,
+	.name		= "Teranetics TN2020",
+	.soft_reset	= teranetics_soft_reset,
+	.aneg_done	= teranetics_aneg_done,
+	.config_init    = teranetics_config_init,
+	.config_aneg    = teranetics_config_aneg,
+	.read_status	= teranetics_read_status,
+	.match_phy_device = teranetics_match_phy_device,
+	.driver		= { .owner = THIS_MODULE,},
+},
+};
+
+static int __init teranetics_init(void)
+{
+	return phy_drivers_register(teranetics_driver,
+				    ARRAY_SIZE(teranetics_driver));
+}
+
+static void __exit teranetics_exit(void)
+{
+	return phy_drivers_unregister(teranetics_driver,
+				      ARRAY_SIZE(teranetics_driver));
+}
+
+module_init(teranetics_init);
+module_exit(teranetics_exit);
+
+static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+	{ PHY_ID_TN2020, 0xffffffff },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, teranetics_tbl);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa8f504..0481daf 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -283,6 +283,8 @@
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
 
+static const struct net_device_ops ppp_netdev_ops;
+
 static struct class *ppp_class;
 
 /* per net-namespace data */
@@ -919,13 +921,22 @@
 static __net_exit void ppp_exit_net(struct net *net)
 {
 	struct ppp_net *pn = net_generic(net, ppp_net_id);
+	struct net_device *dev;
+	struct net_device *aux;
 	struct ppp *ppp;
 	LIST_HEAD(list);
 	int id;
 
 	rtnl_lock();
+	for_each_netdev_safe(net, dev, aux) {
+		if (dev->netdev_ops == &ppp_netdev_ops)
+			unregister_netdevice_queue(dev, &list);
+	}
+
 	idr_for_each_entry(&pn->units_idr, ppp, id)
-		unregister_netdevice_queue(ppp->dev, &list);
+		/* Skip devices already unregistered by previous loop */
+		if (!net_eq(dev_net(ppp->dev), net))
+			unregister_netdevice_queue(ppp->dev, &list);
 
 	unregister_netdevice_many(&list);
 	rtnl_unlock();
@@ -1017,6 +1028,7 @@
 	proto = npindex_to_proto[npi];
 	put_unaligned_be16(proto, pp);
 
+	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
 	skb_queue_tail(&ppp->file.xq, skb);
 	ppp_xmit_process(ppp);
 	return NETDEV_TX_OK;
@@ -1137,7 +1149,6 @@
 	dev->tx_queue_len = 3;
 	dev->type = ARPHRD_PPP;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
-	dev->features |= NETIF_F_NETNS_LOCAL;
 	netif_keep_dst(dev);
 }
 
@@ -1900,6 +1911,8 @@
 			skb->dev = ppp->dev;
 			skb->protocol = htons(npindex_to_ethertype[npi]);
 			skb_reset_mac_header(skb);
+			skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
+						      dev_net(ppp->dev)));
 			netif_rx(skb);
 		}
 	}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index daa054b..651d35e 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2051,9 +2051,9 @@
 	dev->netdev_ops = &team_netdev_ops;
 	dev->ethtool_ops = &team_ethtool_ops;
 	dev->destructor	= team_destructor;
-	dev->tx_queue_len = 0;
 	dev->flags |= IFF_MULTICAST;
 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	/*
 	 * Indicate we support unicast address filtering. That way core won't
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 06a0394..976aa97 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -961,6 +961,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tun_poll_controller,
 #endif
+	.ndo_features_check	= passthru_features_check,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7ba8d08..1610b79 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -106,6 +106,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called r8152.
 
+config USB_LAN78XX
+	tristate "Microchip LAN78XX Based USB Ethernet Adapters"
+	select MII
+	help
+	  This option adds support for Microchip LAN78XX based USB 2
+	  & USB 3 10/100/1000 Ethernet adapters.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called lan78xx.
+
 config USB_USBNET
 	tristate "Multi-purpose USB Networking Framework"
 	select MII
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e2797f1..cf6a0e6 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_USB_RTL8150)	+= rtl8150.o
 obj-$(CONFIG_USB_RTL8152)	+= r8152.o
 obj-$(CONFIG_USB_HSO)		+= hso.o
+obj-$(CONFIG_USB_LAN78XX)	+= lan78xx.o
 obj-$(CONFIG_USB_NET_AX8817X)	+= asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
 obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
new file mode 100644
index 0000000..39364a4
--- /dev/null
+++ b/drivers/net/usb/lan78xx.c
@@ -0,0 +1,3495 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <net/ip6_checksum.h>
+#include "lan78xx.h"
+
+#define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
+#define DRIVER_NAME	"lan78xx"
+#define DRIVER_VERSION	"1.0.0"
+
+#define TX_TIMEOUT_JIFFIES		(5 * HZ)
+#define THROTTLE_JIFFIES		(HZ / 8)
+#define UNLINK_TIMEOUT_MS		3
+
+#define RX_MAX_QUEUE_MEMORY		(60 * 1518)
+
+#define SS_USB_PKT_SIZE			(1024)
+#define HS_USB_PKT_SIZE			(512)
+#define FS_USB_PKT_SIZE			(64)
+
+#define MAX_RX_FIFO_SIZE		(12 * 1024)
+#define MAX_TX_FIFO_SIZE		(12 * 1024)
+#define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
+#define DEFAULT_BULK_IN_DELAY		(0x0800)
+#define MAX_SINGLE_PACKET_SIZE		(9000)
+#define DEFAULT_TX_CSUM_ENABLE		(true)
+#define DEFAULT_RX_CSUM_ENABLE		(true)
+#define DEFAULT_TSO_CSUM_ENABLE		(true)
+#define DEFAULT_VLAN_FILTER_ENABLE	(true)
+#define INTERNAL_PHY_ID			(2)	/* 2: GMII */
+#define TX_OVERHEAD			(8)
+#define RXW_PADDING			2
+
+#define LAN78XX_USB_VENDOR_ID		(0x0424)
+#define LAN7800_USB_PRODUCT_ID		(0x7800)
+#define LAN7850_USB_PRODUCT_ID		(0x7850)
+#define LAN78XX_EEPROM_MAGIC		(0x78A5)
+#define LAN78XX_OTP_MAGIC		(0x78F3)
+
+#define	MII_READ			1
+#define	MII_WRITE			0
+
+#define EEPROM_INDICATOR		(0xA5)
+#define EEPROM_MAC_OFFSET		(0x01)
+#define MAX_EEPROM_SIZE			512
+#define OTP_INDICATOR_1			(0xF3)
+#define OTP_INDICATOR_2			(0xF7)
+
+#define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
+					 WAKE_MCAST | WAKE_BCAST | \
+					 WAKE_ARP | WAKE_MAGIC)
+
+/* USB related defines */
+#define BULK_IN_PIPE			1
+#define BULK_OUT_PIPE			2
+
+/* default autosuspend delay (mSec)*/
+#define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
+
+static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
+	"RX FCS Errors",
+	"RX Alignment Errors",
+	"Rx Fragment Errors",
+	"RX Jabber Errors",
+	"RX Undersize Frame Errors",
+	"RX Oversize Frame Errors",
+	"RX Dropped Frames",
+	"RX Unicast Byte Count",
+	"RX Broadcast Byte Count",
+	"RX Multicast Byte Count",
+	"RX Unicast Frames",
+	"RX Broadcast Frames",
+	"RX Multicast Frames",
+	"RX Pause Frames",
+	"RX 64 Byte Frames",
+	"RX 65 - 127 Byte Frames",
+	"RX 128 - 255 Byte Frames",
+	"RX 256 - 511 Bytes Frames",
+	"RX 512 - 1023 Byte Frames",
+	"RX 1024 - 1518 Byte Frames",
+	"RX Greater 1518 Byte Frames",
+	"EEE RX LPI Transitions",
+	"EEE RX LPI Time",
+	"TX FCS Errors",
+	"TX Excess Deferral Errors",
+	"TX Carrier Errors",
+	"TX Bad Byte Count",
+	"TX Single Collisions",
+	"TX Multiple Collisions",
+	"TX Excessive Collision",
+	"TX Late Collisions",
+	"TX Unicast Byte Count",
+	"TX Broadcast Byte Count",
+	"TX Multicast Byte Count",
+	"TX Unicast Frames",
+	"TX Broadcast Frames",
+	"TX Multicast Frames",
+	"TX Pause Frames",
+	"TX 64 Byte Frames",
+	"TX 65 - 127 Byte Frames",
+	"TX 128 - 255 Byte Frames",
+	"TX 256 - 511 Bytes Frames",
+	"TX 512 - 1023 Byte Frames",
+	"TX 1024 - 1518 Byte Frames",
+	"TX Greater 1518 Byte Frames",
+	"EEE TX LPI Transitions",
+	"EEE TX LPI Time",
+};
+
+struct lan78xx_statstage {
+	u32 rx_fcs_errors;
+	u32 rx_alignment_errors;
+	u32 rx_fragment_errors;
+	u32 rx_jabber_errors;
+	u32 rx_undersize_frame_errors;
+	u32 rx_oversize_frame_errors;
+	u32 rx_dropped_frames;
+	u32 rx_unicast_byte_count;
+	u32 rx_broadcast_byte_count;
+	u32 rx_multicast_byte_count;
+	u32 rx_unicast_frames;
+	u32 rx_broadcast_frames;
+	u32 rx_multicast_frames;
+	u32 rx_pause_frames;
+	u32 rx_64_byte_frames;
+	u32 rx_65_127_byte_frames;
+	u32 rx_128_255_byte_frames;
+	u32 rx_256_511_bytes_frames;
+	u32 rx_512_1023_byte_frames;
+	u32 rx_1024_1518_byte_frames;
+	u32 rx_greater_1518_byte_frames;
+	u32 eee_rx_lpi_transitions;
+	u32 eee_rx_lpi_time;
+	u32 tx_fcs_errors;
+	u32 tx_excess_deferral_errors;
+	u32 tx_carrier_errors;
+	u32 tx_bad_byte_count;
+	u32 tx_single_collisions;
+	u32 tx_multiple_collisions;
+	u32 tx_excessive_collision;
+	u32 tx_late_collisions;
+	u32 tx_unicast_byte_count;
+	u32 tx_broadcast_byte_count;
+	u32 tx_multicast_byte_count;
+	u32 tx_unicast_frames;
+	u32 tx_broadcast_frames;
+	u32 tx_multicast_frames;
+	u32 tx_pause_frames;
+	u32 tx_64_byte_frames;
+	u32 tx_65_127_byte_frames;
+	u32 tx_128_255_byte_frames;
+	u32 tx_256_511_bytes_frames;
+	u32 tx_512_1023_byte_frames;
+	u32 tx_1024_1518_byte_frames;
+	u32 tx_greater_1518_byte_frames;
+	u32 eee_tx_lpi_transitions;
+	u32 eee_tx_lpi_time;
+};
+
+struct lan78xx_net;
+
+struct lan78xx_priv {
+	struct lan78xx_net *dev;
+	u32 rfe_ctl;
+	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
+	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
+	struct mutex dataport_mutex; /* for dataport access */
+	spinlock_t rfe_ctl_lock; /* for rfe register access */
+	struct work_struct set_multicast;
+	struct work_struct set_vlan;
+	u32 wol;
+};
+
+enum skb_state {
+	illegal = 0,
+	tx_start,
+	tx_done,
+	rx_start,
+	rx_done,
+	rx_cleanup,
+	unlink_start
+};
+
+struct skb_data {		/* skb->cb is one of these */
+	struct urb *urb;
+	struct lan78xx_net *dev;
+	enum skb_state state;
+	size_t length;
+};
+
+struct usb_context {
+	struct usb_ctrlrequest req;
+	struct lan78xx_net *dev;
+};
+
+#define EVENT_TX_HALT			0
+#define EVENT_RX_HALT			1
+#define EVENT_RX_MEMORY			2
+#define EVENT_STS_SPLIT			3
+#define EVENT_LINK_RESET		4
+#define EVENT_RX_PAUSED			5
+#define EVENT_DEV_WAKING		6
+#define EVENT_DEV_ASLEEP		7
+#define EVENT_DEV_OPEN			8
+
+struct lan78xx_net {
+	struct net_device	*net;
+	struct usb_device	*udev;
+	struct usb_interface	*intf;
+	void			*driver_priv;
+
+	int			rx_qlen;
+	int			tx_qlen;
+	struct sk_buff_head	rxq;
+	struct sk_buff_head	txq;
+	struct sk_buff_head	done;
+	struct sk_buff_head	rxq_pause;
+	struct sk_buff_head	txq_pend;
+
+	struct tasklet_struct	bh;
+	struct delayed_work	wq;
+
+	struct usb_host_endpoint *ep_blkin;
+	struct usb_host_endpoint *ep_blkout;
+	struct usb_host_endpoint *ep_intr;
+
+	int			msg_enable;
+
+	struct urb		*urb_intr;
+	struct usb_anchor	deferred;
+
+	struct mutex		phy_mutex; /* for phy access */
+	unsigned		pipe_in, pipe_out, pipe_intr;
+
+	u32			hard_mtu;	/* count any extra framing */
+	size_t			rx_urb_size;	/* size for rx urbs */
+
+	unsigned long		flags;
+
+	wait_queue_head_t	*wait;
+	unsigned char		suspend_count;
+
+	unsigned		maxpacket;
+	struct timer_list	delay;
+
+	unsigned long		data[5];
+	struct mii_if_info	mii;
+
+	int			link_on;
+	u8			mdix_ctrl;
+};
+
+/* use ethtool to change the level for any given device */
+static int msg_level = -1;
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
+
+static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
+{
+	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+	int ret;
+
+	if (!buf)
+		return -ENOMEM;
+
+	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+			      USB_VENDOR_REQUEST_READ_REGISTER,
+			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+	if (likely(ret >= 0)) {
+		le32_to_cpus(buf);
+		*data = *buf;
+	} else {
+		netdev_warn(dev->net,
+			    "Failed to read register index 0x%08x. ret = %d",
+			    index, ret);
+	}
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
+{
+	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+	int ret;
+
+	if (!buf)
+		return -ENOMEM;
+
+	*buf = data;
+	cpu_to_le32s(buf);
+
+	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+			      USB_VENDOR_REQUEST_WRITE_REGISTER,
+			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+	if (unlikely(ret < 0)) {
+		netdev_warn(dev->net,
+			    "Failed to write register index 0x%08x. ret = %d",
+			    index, ret);
+	}
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int lan78xx_read_stats(struct lan78xx_net *dev,
+			      struct lan78xx_statstage *data)
+{
+	int ret = 0;
+	int i;
+	struct lan78xx_statstage *stats;
+	u32 *src;
+	u32 *dst;
+
+	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats)
+		return -ENOMEM;
+
+	ret = usb_control_msg(dev->udev,
+			      usb_rcvctrlpipe(dev->udev, 0),
+			      USB_VENDOR_REQUEST_GET_STATS,
+			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			      0,
+			      0,
+			      (void *)stats,
+			      sizeof(*stats),
+			      USB_CTRL_SET_TIMEOUT);
+	if (likely(ret >= 0)) {
+		src = (u32 *)stats;
+		dst = (u32 *)data;
+		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+			le32_to_cpus(&src[i]);
+			dst[i] = src[i];
+		}
+	} else {
+		netdev_warn(dev->net,
+			    "Failed to read stat ret = 0x%x", ret);
+	}
+
+	kfree(stats);
+
+	return ret;
+}
+
+/* Loop until the read is completed with timeout called with phy_mutex held */
+static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+{
+	unsigned long start_time = jiffies;
+	u32 val;
+	int ret;
+
+	do {
+		ret = lan78xx_read_reg(dev, MII_ACC, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (!(val & MII_ACC_MII_BUSY_))
+			return 0;
+	} while (!time_after(jiffies, start_time + HZ));
+
+	return -EIO;
+}
+
+static inline u32 mii_access(int id, int index, int read)
+{
+	u32 ret;
+
+	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
+	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
+	if (read)
+		ret |= MII_ACC_MII_READ_;
+	else
+		ret |= MII_ACC_MII_WRITE_;
+	ret |= MII_ACC_MII_BUSY_;
+
+	return ret;
+}
+
+static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* set the address, index & direction (read from PHY) */
+	phy_id &= dev->mii.phy_id_mask;
+	idx &= dev->mii.reg_num_mask;
+	addr = mii_access(phy_id, idx, MII_READ);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+	ret = (int)(val & 0xFFFF);
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+	return ret;
+}
+
+static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
+			       int idx, int regval)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	val = regval;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	/* set the address, index & direction (write to PHY) */
+	phy_id &= dev->mii.phy_id_mask;
+	idx &= dev->mii.reg_num_mask;
+	addr = mii_access(phy_id, idx, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
+			      int mmddev, int mmdidx, int regval)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	mmddev &= 0x1F;
+
+	/* set up device address for MMD */
+	ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register of MMD */
+	val = mmdidx;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register data for MMD */
+	val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* write to MMD */
+	val = regval;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
+			    int mmddev, int mmdidx)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* set up device address for MMD */
+	ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register of MMD */
+	val = mmdidx;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register data for MMD */
+	val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* set the address, index & direction (read from PHY) */
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* read from MMD */
+	ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+	ret = (int)(val & 0xFFFF);
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+	return ret;
+}
+
+static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
+{
+	unsigned long start_time = jiffies;
+	u32 val;
+	int ret;
+
+	do {
+		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (!(val & E2P_CMD_EPC_BUSY_) ||
+		    (val & E2P_CMD_EPC_TIMEOUT_))
+			break;
+		usleep_range(40, 100);
+	} while (!time_after(jiffies, start_time + HZ));
+
+	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+		netdev_warn(dev->net, "EEPROM read operation timeout");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
+{
+	unsigned long start_time = jiffies;
+	u32 val;
+	int ret;
+
+	do {
+		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (!(val & E2P_CMD_EPC_BUSY_))
+			return 0;
+
+		usleep_range(40, 100);
+	} while (!time_after(jiffies, start_time + HZ));
+
+	netdev_warn(dev->net, "EEPROM is busy");
+	return -EIO;
+}
+
+static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+				   u32 length, u8 *data)
+{
+	u32 val;
+	int i, ret;
+
+	ret = lan78xx_eeprom_confirm_not_busy(dev);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < length; i++) {
+		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+		ret = lan78xx_write_reg(dev, E2P_CMD, val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		ret = lan78xx_wait_eeprom(dev);
+		if (ret < 0)
+			return ret;
+
+		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		data[i] = val & 0xFF;
+		offset++;
+	}
+
+	return 0;
+}
+
+static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
+			       u32 length, u8 *data)
+{
+	u8 sig;
+	int ret;
+
+	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+	if ((ret == 0) && (sig == EEPROM_INDICATOR))
+		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+				    u32 length, u8 *data)
+{
+	u32 val;
+	int i, ret;
+
+	ret = lan78xx_eeprom_confirm_not_busy(dev);
+	if (ret)
+		return ret;
+
+	/* Issue write/erase enable command */
+	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+	ret = lan78xx_write_reg(dev, E2P_CMD, val);
+	if (unlikely(ret < 0))
+		return -EIO;
+
+	ret = lan78xx_wait_eeprom(dev);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < length; i++) {
+		/* Fill data register */
+		val = data[i];
+		ret = lan78xx_write_reg(dev, E2P_DATA, val);
+		if (ret < 0)
+			return ret;
+
+		/* Send "write" command */
+		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+		ret = lan78xx_write_reg(dev, E2P_CMD, val);
+		if (ret < 0)
+			return ret;
+
+		ret = lan78xx_wait_eeprom(dev);
+		if (ret < 0)
+			return ret;
+
+		offset++;
+	}
+
+	return 0;
+}
+
+static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
+				u32 length, u8 *data)
+{
+	int i;
+	int ret;
+	u32 buf;
+	unsigned long timeout;
+
+	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+
+	if (buf & OTP_PWR_DN_PWRDN_N_) {
+		/* clear it and wait to be cleared */
+		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+
+		timeout = jiffies + HZ;
+		do {
+			usleep_range(1, 10);
+			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+			if (time_after(jiffies, timeout)) {
+				netdev_warn(dev->net,
+					    "timeout on OTP_PWR_DN");
+				return -EIO;
+			}
+		} while (buf & OTP_PWR_DN_PWRDN_N_);
+	}
+
+	for (i = 0; i < length; i++) {
+		ret = lan78xx_write_reg(dev, OTP_ADDR1,
+					((offset + i) >> 8) & OTP_ADDR1_15_11);
+		ret = lan78xx_write_reg(dev, OTP_ADDR2,
+					((offset + i) & OTP_ADDR2_10_3));
+
+		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+		timeout = jiffies + HZ;
+		do {
+			udelay(1);
+			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+			if (time_after(jiffies, timeout)) {
+				netdev_warn(dev->net,
+					    "timeout on OTP_STATUS");
+				return -EIO;
+			}
+		} while (buf & OTP_STATUS_BUSY_);
+
+		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+
+		data[i] = (u8)(buf & 0xFF);
+	}
+
+	return 0;
+}
+
+static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+			    u32 length, u8 *data)
+{
+	u8 sig;
+	int ret;
+
+	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
+
+	if (ret == 0) {
+		if (sig == OTP_INDICATOR_1)
+			offset = offset;
+		else if (sig == OTP_INDICATOR_2)
+			offset += 0x100;
+		else
+			ret = -EINVAL;
+		ret = lan78xx_read_raw_otp(dev, offset, length, data);
+	}
+
+	return ret;
+}
+
+static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
+{
+	int i, ret;
+
+	for (i = 0; i < 100; i++) {
+		u32 dp_sel;
+
+		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (dp_sel & DP_SEL_DPRDY_)
+			return 0;
+
+		usleep_range(40, 100);
+	}
+
+	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+
+	return -EIO;
+}
+
+static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
+				  u32 addr, u32 length, u32 *buf)
+{
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u32 dp_sel;
+	int i, ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+			return 0;
+
+	mutex_lock(&pdata->dataport_mutex);
+
+	ret = lan78xx_dataport_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+
+	dp_sel &= ~DP_SEL_RSEL_MASK_;
+	dp_sel |= ram_select;
+	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+
+	for (i = 0; i < length; i++) {
+		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+
+		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+
+		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+
+		ret = lan78xx_dataport_wait_not_busy(dev);
+		if (ret < 0)
+			goto done;
+	}
+
+done:
+	mutex_unlock(&pdata->dataport_mutex);
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
+				    int index, u8 addr[ETH_ALEN])
+{
+	u32	temp;
+
+	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
+		temp = addr[3];
+		temp = addr[2] | (temp << 8);
+		temp = addr[1] | (temp << 8);
+		temp = addr[0] | (temp << 8);
+		pdata->pfilter_table[index][1] = temp;
+		temp = addr[5];
+		temp = addr[4] | (temp << 8);
+		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
+		pdata->pfilter_table[index][0] = temp;
+	}
+}
+
+/* returns hash bit number for given MAC address */
+static inline u32 lan78xx_hash(char addr[ETH_ALEN])
+{
+	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void lan78xx_deferred_multicast_write(struct work_struct *param)
+{
+	struct lan78xx_priv *pdata =
+			container_of(param, struct lan78xx_priv, set_multicast);
+	struct lan78xx_net *dev = pdata->dev;
+	int i;
+	int ret;
+
+	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+		  pdata->rfe_ctl);
+
+	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
+			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+
+	for (i = 1; i < NUM_OF_MAF; i++) {
+		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+		ret = lan78xx_write_reg(dev, MAF_LO(i),
+					pdata->pfilter_table[i][1]);
+		ret = lan78xx_write_reg(dev, MAF_HI(i),
+					pdata->pfilter_table[i][0]);
+	}
+
+	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+}
+
+static void lan78xx_set_multicast(struct net_device *netdev)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
+			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+
+	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+			pdata->mchash_table[i] = 0;
+	/* pfilter_table[0] has own HW address */
+	for (i = 1; i < NUM_OF_MAF; i++) {
+			pdata->pfilter_table[i][0] =
+			pdata->pfilter_table[i][1] = 0;
+	}
+
+	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
+
+	if (dev->net->flags & IFF_PROMISC) {
+		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
+	} else {
+		if (dev->net->flags & IFF_ALLMULTI) {
+			netif_dbg(dev, drv, dev->net,
+				  "receive all multicast enabled");
+			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
+		}
+	}
+
+	if (netdev_mc_count(dev->net)) {
+		struct netdev_hw_addr *ha;
+		int i;
+
+		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
+
+		i = 1;
+		netdev_for_each_mc_addr(ha, netdev) {
+			/* set first 32 into Perfect Filter */
+			if (i < 33) {
+				lan78xx_set_addr_filter(pdata, i, ha->addr);
+			} else {
+				u32 bitnum = lan78xx_hash(ha->addr);
+
+				pdata->mchash_table[bitnum / 32] |=
+							(1 << (bitnum % 32));
+				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
+			}
+			i++;
+		}
+	}
+
+	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+	/* defer register writes to a sleepable context */
+	schedule_work(&pdata->set_multicast);
+}
+
+static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
+				      u16 lcladv, u16 rmtadv)
+{
+	u32 flow = 0, fct_flow = 0;
+	int ret;
+
+	u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+	if (cap & FLOW_CTRL_TX)
+		flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+
+	if (cap & FLOW_CTRL_RX)
+		flow |= FLOW_CR_RX_FCEN_;
+
+	if (dev->udev->speed == USB_SPEED_SUPER)
+		fct_flow = 0x817;
+	else if (dev->udev->speed == USB_SPEED_HIGH)
+		fct_flow = 0x211;
+
+	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+
+	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+
+	/* threshold value should be set before enabling flow */
+	ret = lan78xx_write_reg(dev, FLOW, flow);
+
+	return 0;
+}
+
+static int lan78xx_link_reset(struct lan78xx_net *dev)
+{
+	struct mii_if_info *mii = &dev->mii;
+	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+	u16 ladv, radv;
+	int ret;
+	u32 buf;
+
+	/* clear PHY interrupt status */
+	/* VTSE PHY */
+	ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+	if (unlikely(ret < 0))
+		return -EIO;
+
+	/* clear LAN78xx interrupt status */
+	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+	if (unlikely(ret < 0))
+		return -EIO;
+
+	if (!mii_link_ok(mii) && dev->link_on) {
+		dev->link_on = false;
+		netif_carrier_off(dev->net);
+
+		/* reset MAC */
+		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+		if (unlikely(ret < 0))
+			return -EIO;
+		buf |= MAC_CR_RST_;
+		ret = lan78xx_write_reg(dev, MAC_CR, buf);
+		if (unlikely(ret < 0))
+			return -EIO;
+	} else if (mii_link_ok(mii) && !dev->link_on) {
+		dev->link_on = true;
+
+		mii_check_media(mii, 1, 1);
+		mii_ethtool_gset(&dev->mii, &ecmd);
+
+		mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+
+		if (dev->udev->speed == USB_SPEED_SUPER) {
+			if (ethtool_cmd_speed(&ecmd) == 1000) {
+				/* disable U2 */
+				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
+				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+				/* enable U1 */
+				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+				buf |= USB_CFG1_DEV_U1_INIT_EN_;
+				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+			} else {
+				/* enable U1 & U2 */
+				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+				buf |= USB_CFG1_DEV_U2_INIT_EN_;
+				buf |= USB_CFG1_DEV_U1_INIT_EN_;
+				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+			}
+		}
+
+		ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+		if (unlikely(ladv < 0))
+			return -EIO;
+
+		radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+		if (unlikely(radv < 0))
+			return -EIO;
+
+		netif_dbg(dev, link, dev->net,
+			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
+			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+
+		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
+		netif_carrier_on(dev->net);
+	}
+
+	return ret;
+}
+
+/* some work can't be done in tasklets, so we use keventd
+ *
+ * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
+ * but tasklet_schedule() doesn't.	hope the failure is rare.
+ */
+void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+{
+	set_bit(work, &dev->flags);
+	if (!schedule_delayed_work(&dev->wq, 0))
+		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+}
+
+static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
+{
+	u32 intdata;
+
+	if (urb->actual_length != 4) {
+		netdev_warn(dev->net,
+			    "unexpected urb length %d", urb->actual_length);
+		return;
+	}
+
+	memcpy(&intdata, urb->transfer_buffer, 4);
+	le32_to_cpus(&intdata);
+
+	if (intdata & INT_ENP_PHY_INT) {
+		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+	} else
+		netdev_warn(dev->net,
+			    "unexpected interrupt: 0x%08x\n", intdata);
+}
+
+static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+	return MAX_EEPROM_SIZE;
+}
+
+static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
+				      struct ethtool_eeprom *ee, u8 *data)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+
+	ee->magic = LAN78XX_EEPROM_MAGIC;
+
+	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
+				      struct ethtool_eeprom *ee, u8 *data)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+
+	/* Allow entire eeprom update only */
+	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
+	    (ee->offset == 0) &&
+	    (ee->len == 512) &&
+	    (data[0] == EEPROM_INDICATOR))
+		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
+		 (ee->offset == 0) &&
+		 (ee->len == 512) &&
+		 (data[0] == OTP_INDICATOR_1))
+		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+
+	return -EINVAL;
+}
+
+static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
+				u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+}
+
+static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(lan78xx_gstrings);
+	else
+		return -EOPNOTSUPP;
+}
+
+static void lan78xx_get_stats(struct net_device *netdev,
+			      struct ethtool_stats *stats, u64 *data)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_statstage lan78xx_stat;
+	u32 *p;
+	int i;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return;
+
+	if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
+		p = (u32 *)&lan78xx_stat;
+		for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
+			data[i] = p[i];
+	}
+
+	usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_get_wol(struct net_device *netdev,
+			    struct ethtool_wolinfo *wol)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	int ret;
+	u32 buf;
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+			return;
+
+	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+	if (unlikely(ret < 0)) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		if (buf & USB_CFG_RMT_WKP_) {
+			wol->supported = WAKE_ALL;
+			wol->wolopts = pdata->wol;
+		} else {
+			wol->supported = 0;
+			wol->wolopts = 0;
+		}
+	}
+
+	usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_set_wol(struct net_device *netdev,
+			   struct ethtool_wolinfo *wol)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	pdata->wol = 0;
+	if (wol->wolopts & WAKE_UCAST)
+		pdata->wol |= WAKE_UCAST;
+	if (wol->wolopts & WAKE_MCAST)
+		pdata->wol |= WAKE_MCAST;
+	if (wol->wolopts & WAKE_BCAST)
+		pdata->wol |= WAKE_BCAST;
+	if (wol->wolopts & WAKE_MAGIC)
+		pdata->wol |= WAKE_MAGIC;
+	if (wol->wolopts & WAKE_PHY)
+		pdata->wol |= WAKE_PHY;
+	if (wol->wolopts & WAKE_ARP)
+		pdata->wol |= WAKE_ARP;
+
+	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	int ret;
+	u32 buf;
+	u32 adv, lpadv;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+	if (buf & MAC_CR_EEE_EN_) {
+		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+				       PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
+		adv = mmd_eee_adv_to_ethtool_adv_t(buf);
+		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+				       PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+		lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+		edata->eee_enabled = true;
+		edata->supported = true;
+		edata->eee_active = !!(adv & lpadv);
+		edata->advertised = adv;
+		edata->lp_advertised = lpadv;
+		edata->tx_lpi_enabled = true;
+		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
+		edata->tx_lpi_timer = buf;
+	} else {
+		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+				       PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+		lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+		edata->eee_enabled = false;
+		edata->eee_active = false;
+		edata->supported = false;
+		edata->advertised = 0;
+		edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
+		edata->tx_lpi_enabled = false;
+		edata->tx_lpi_timer = 0;
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return 0;
+}
+
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	int ret;
+	u32 buf;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	if (edata->eee_enabled) {
+		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+		buf |= MAC_CR_EEE_EN_;
+		ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+		buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+		lan78xx_mmd_write(dev->net, dev->mii.phy_id,
+				  PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+	} else {
+		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+		buf &= ~MAC_CR_EEE_EN_;
+		ret = lan78xx_write_reg(dev, MAC_CR, buf);
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return 0;
+}
+
+static u32 lan78xx_get_link(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	return mii_link_ok(&dev->mii);
+}
+
+int lan78xx_nway_reset(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+		return -EOPNOTSUPP;
+
+	return mii_nway_restart(&dev->mii);
+}
+
+static void lan78xx_get_drvinfo(struct net_device *net,
+				struct ethtool_drvinfo *info)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 lan78xx_get_msglevel(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	return dev->msg_enable;
+}
+
+static void lan78xx_set_msglevel(struct net_device *net, u32 level)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	dev->msg_enable = level;
+}
+
+static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	struct mii_if_info *mii = &dev->mii;
+	int ret;
+	int buf;
+
+	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+		return -EOPNOTSUPP;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	ret = mii_ethtool_gset(&dev->mii, cmd);
+
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+	buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+
+	buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
+	if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	} else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+		cmd->eth_tp_mdix = ETH_TP_MDI;
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+	} else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+		cmd->eth_tp_mdix = ETH_TP_MDI_X;
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	struct mii_if_info *mii = &dev->mii;
+	int ret = 0;
+	int temp;
+
+	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+		return -EOPNOTSUPP;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
+		if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_1);
+			temp = mii->mdio_read(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL);
+			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL,
+					temp | PHY_EXT_MODE_CTRL_MDI_);
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_0);
+		} else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_1);
+			temp = mii->mdio_read(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL);
+			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL,
+					temp | PHY_EXT_MODE_CTRL_MDI_X_);
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_0);
+		} else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_1);
+			temp = mii->mdio_read(mii->dev, mii->phy_id,
+							PHY_EXT_MODE_CTRL);
+			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL,
+					temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_0);
+		}
+	}
+
+	/* change speed & duplex */
+	ret = mii_ethtool_sset(&dev->mii, cmd);
+
+	if (!cmd->autoneg) {
+		/* force link down */
+		temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+		mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
+				temp | BMCR_LOOPBACK);
+		mdelay(1);
+		mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static const struct ethtool_ops lan78xx_ethtool_ops = {
+	.get_link	= lan78xx_get_link,
+	.nway_reset	= lan78xx_nway_reset,
+	.get_drvinfo	= lan78xx_get_drvinfo,
+	.get_msglevel	= lan78xx_get_msglevel,
+	.set_msglevel	= lan78xx_set_msglevel,
+	.get_settings	= lan78xx_get_settings,
+	.set_settings	= lan78xx_set_settings,
+	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
+	.get_eeprom	= lan78xx_ethtool_get_eeprom,
+	.set_eeprom	= lan78xx_ethtool_set_eeprom,
+	.get_ethtool_stats = lan78xx_get_stats,
+	.get_sset_count = lan78xx_get_sset_count,
+	.get_strings	= lan78xx_get_strings,
+	.get_wol	= lan78xx_get_wol,
+	.set_wol	= lan78xx_set_wol,
+	.get_eee	= lan78xx_get_eee,
+	.set_eee	= lan78xx_set_eee,
+};
+
+static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+
+	if (!netif_running(netdev))
+		return -EINVAL;
+
+	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+{
+	u32 addr_lo, addr_hi;
+	int ret;
+	u8 addr[6];
+
+	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+
+	addr[0] = addr_lo & 0xFF;
+	addr[1] = (addr_lo >> 8) & 0xFF;
+	addr[2] = (addr_lo >> 16) & 0xFF;
+	addr[3] = (addr_lo >> 24) & 0xFF;
+	addr[4] = addr_hi & 0xFF;
+	addr[5] = (addr_hi >> 8) & 0xFF;
+
+	if (!is_valid_ether_addr(addr)) {
+		/* reading mac address from EEPROM or OTP */
+		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+					 addr) == 0) ||
+		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+				      addr) == 0)) {
+			if (is_valid_ether_addr(addr)) {
+				/* eeprom values are valid so use them */
+				netif_dbg(dev, ifup, dev->net,
+					  "MAC address read from EEPROM");
+			} else {
+				/* generate random MAC */
+				random_ether_addr(addr);
+				netif_dbg(dev, ifup, dev->net,
+					  "MAC address set to random addr");
+			}
+
+			addr_lo = addr[0] | (addr[1] << 8) |
+				  (addr[2] << 16) | (addr[3] << 24);
+			addr_hi = addr[4] | (addr[5] << 8);
+
+			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+		} else {
+			/* generate random MAC */
+			random_ether_addr(addr);
+			netif_dbg(dev, ifup, dev->net,
+				  "MAC address set to random addr");
+		}
+	}
+
+	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
+	ether_addr_copy(dev->net->dev_addr, addr);
+}
+
+static void lan78xx_mii_init(struct lan78xx_net *dev)
+{
+	/* Initialize MII structure */
+	dev->mii.dev = dev->net;
+	dev->mii.mdio_read = lan78xx_mdio_read;
+	dev->mii.mdio_write = lan78xx_mdio_write;
+	dev->mii.phy_id_mask = 0x1f;
+	dev->mii.reg_num_mask = 0x1f;
+	dev->mii.phy_id = INTERNAL_PHY_ID;
+	dev->mii.supports_gmii = true;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+	int temp;
+	struct mii_if_info *mii = &dev->mii;
+
+	if ((!mii->mdio_write) || (!mii->mdio_read))
+		return -EOPNOTSUPP;
+
+	temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+	temp |= ADVERTISE_ALL;
+	mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
+			temp | ADVERTISE_CSMA |
+			ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+	/* set to AUTOMDIX */
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+	temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+	temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+	mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
+			temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+	dev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+	/* MAC doesn't support 1000HD */
+	temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
+	mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
+			temp & ~ADVERTISE_1000HALF);
+
+	/* clear interrupt */
+	mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+	mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
+			PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
+			PHY_VTSE_INT_MASK_LINK_CHANGE_);
+
+	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+
+	return 0;
+}
+
+static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
+{
+	int ret = 0;
+	u32 buf;
+	bool rxenabled;
+
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+
+	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
+
+	if (rxenabled) {
+		buf &= ~MAC_RX_RXEN_;
+		ret = lan78xx_write_reg(dev, MAC_RX, buf);
+	}
+
+	/* add 4 to size for FCS */
+	buf &= ~MAC_RX_MAX_SIZE_MASK_;
+	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	if (rxenabled) {
+		buf |= MAC_RX_RXEN_;
+		ret = lan78xx_write_reg(dev, MAC_RX, buf);
+	}
+
+	return 0;
+}
+
+static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	int count = 0;
+
+	spin_lock_irqsave(&q->lock, flags);
+	while (!skb_queue_empty(q)) {
+		struct skb_data	*entry;
+		struct urb *urb;
+		int ret;
+
+		skb_queue_walk(q, skb) {
+			entry = (struct skb_data *)skb->cb;
+			if (entry->state != unlink_start)
+				goto found;
+		}
+		break;
+found:
+		entry->state = unlink_start;
+		urb = entry->urb;
+
+		/* Get reference count of the URB to avoid it to be
+		 * freed during usb_unlink_urb, which may trigger
+		 * use-after-free problem inside usb_unlink_urb since
+		 * usb_unlink_urb is always racing with .complete
+		 * handler(include defer_bh).
+		 */
+		usb_get_urb(urb);
+		spin_unlock_irqrestore(&q->lock, flags);
+		/* during some PM-driven resume scenarios,
+		 * these (async) unlinks complete immediately
+		 */
+		ret = usb_unlink_urb(urb);
+		if (ret != -EINPROGRESS && ret != 0)
+			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
+		else
+			count++;
+		usb_put_urb(urb);
+		spin_lock_irqsave(&q->lock, flags);
+	}
+	spin_unlock_irqrestore(&q->lock, flags);
+	return count;
+}
+
+static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	int ll_mtu = new_mtu + netdev->hard_header_len;
+	int old_hard_mtu = dev->hard_mtu;
+	int old_rx_urb_size = dev->rx_urb_size;
+	int ret;
+
+	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+		return -EINVAL;
+
+	if (new_mtu <= 0)
+		return -EINVAL;
+	/* no second zero-length packet read wanted after mtu-sized packets */
+	if ((ll_mtu % dev->maxpacket) == 0)
+		return -EDOM;
+
+	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+
+	netdev->mtu = new_mtu;
+
+	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+	if (dev->rx_urb_size == old_hard_mtu) {
+		dev->rx_urb_size = dev->hard_mtu;
+		if (dev->rx_urb_size > old_rx_urb_size) {
+			if (netif_running(dev->net)) {
+				unlink_urbs(dev, &dev->rxq);
+				tasklet_schedule(&dev->bh);
+			}
+		}
+	}
+
+	return 0;
+}
+
+int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+	u32 addr_lo, addr_hi;
+	int ret;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+	addr_lo = netdev->dev_addr[0] |
+		  netdev->dev_addr[1] << 8 |
+		  netdev->dev_addr[2] << 16 |
+		  netdev->dev_addr[3] << 24;
+	addr_hi = netdev->dev_addr[4] |
+		  netdev->dev_addr[5] << 8;
+
+	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
+	return 0;
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int lan78xx_set_features(struct net_device *netdev,
+				netdev_features_t features)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+	if (features & NETIF_F_RXCSUM) {
+		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
+		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
+	} else {
+		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
+		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
+	}
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
+	else
+		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
+
+	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+	return 0;
+}
+
+static void lan78xx_deferred_vlan_write(struct work_struct *param)
+{
+	struct lan78xx_priv *pdata =
+			container_of(param, struct lan78xx_priv, set_vlan);
+	struct lan78xx_net *dev = pdata->dev;
+
+	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
+			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
+}
+
+static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
+				   __be16 proto, u16 vid)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u16 vid_bit_index;
+	u16 vid_dword_index;
+
+	vid_dword_index = (vid >> 5) & 0x7F;
+	vid_bit_index = vid & 0x1F;
+
+	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
+
+	/* defer register writes to a sleepable context */
+	schedule_work(&pdata->set_vlan);
+
+	return 0;
+}
+
+static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
+				    __be16 proto, u16 vid)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u16 vid_bit_index;
+	u16 vid_dword_index;
+
+	vid_dword_index = (vid >> 5) & 0x7F;
+	vid_bit_index = vid & 0x1F;
+
+	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
+
+	/* defer register writes to a sleepable context */
+	schedule_work(&pdata->set_vlan);
+
+	return 0;
+}
+
+static void lan78xx_init_ltm(struct lan78xx_net *dev)
+{
+	int ret;
+	u32 buf;
+	u32 regs[6] = { 0 };
+
+	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+	if (buf & USB_CFG1_LTM_ENABLE_) {
+		u8 temp[2];
+		/* Get values from EEPROM first */
+		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
+			if (temp[0] == 24) {
+				ret = lan78xx_read_raw_eeprom(dev,
+							      temp[1] * 2,
+							      24,
+							      (u8 *)regs);
+				if (ret < 0)
+					return;
+			}
+		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
+			if (temp[0] == 24) {
+				ret = lan78xx_read_raw_otp(dev,
+							   temp[1] * 2,
+							   24,
+							   (u8 *)regs);
+				if (ret < 0)
+					return;
+			}
+		}
+	}
+
+	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+}
+
+static int lan78xx_reset(struct lan78xx_net *dev)
+{
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u32 buf;
+	int ret = 0;
+	unsigned long timeout;
+
+	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+	buf |= HW_CFG_LRST_;
+	ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+	timeout = jiffies + HZ;
+	do {
+		mdelay(1);
+		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+		if (time_after(jiffies, timeout)) {
+			netdev_warn(dev->net,
+				    "timeout on completion of LiteReset");
+			return -EIO;
+		}
+	} while (buf & HW_CFG_LRST_);
+
+	lan78xx_init_mac_address(dev);
+
+	/* Respond to the IN token with a NAK */
+	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+	buf |= USB_CFG_BIR_;
+	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+	/* Init LTM */
+	lan78xx_init_ltm(dev);
+
+	dev->net->hard_header_len += TX_OVERHEAD;
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+	if (dev->udev->speed == USB_SPEED_SUPER) {
+		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+		dev->rx_qlen = 4;
+		dev->tx_qlen = 4;
+	} else if (dev->udev->speed == USB_SPEED_HIGH) {
+		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
+		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
+	} else {
+		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+		dev->rx_qlen = 4;
+	}
+
+	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+
+	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+	buf |= HW_CFG_MEF_;
+	ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+	buf |= USB_CFG_BCE_;
+	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+	/* set FIFO sizes */
+	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+
+	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+
+	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+	ret = lan78xx_write_reg(dev, FLOW, 0);
+	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+
+	/* Don't need rfe_ctl_lock during initialisation */
+	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+	/* Enable or disable checksum offload engines */
+	lan78xx_set_features(dev->net, dev->net->features);
+
+	lan78xx_set_multicast(dev->net);
+
+	/* reset PHY */
+	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+	buf |= PMT_CTL_PHY_RST_;
+	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+	timeout = jiffies + HZ;
+	do {
+		mdelay(1);
+		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+		if (time_after(jiffies, timeout)) {
+			netdev_warn(dev->net, "timeout waiting for PHY Reset");
+			return -EIO;
+		}
+	} while (buf & PMT_CTL_PHY_RST_);
+
+	lan78xx_mii_init(dev);
+
+	ret = lan78xx_phy_init(dev);
+
+	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+
+	buf |= MAC_CR_GMII_EN_;
+	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+
+	ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+	/* enable on PHY */
+	if (buf & MAC_CR_EEE_EN_)
+		lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
+
+	/* enable PHY interrupts */
+	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+	buf |= INT_ENP_PHY_INT;
+	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
+
+	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+	buf |= MAC_TX_TXEN_;
+	ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
+	buf |= FCT_TX_CTL_EN_;
+	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+
+	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+	buf |= MAC_RX_RXEN_;
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
+	buf |= FCT_RX_CTL_EN_;
+	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
+
+	if (!mii_nway_restart(&dev->mii))
+		netif_dbg(dev, link, dev->net, "autoneg initiated");
+
+	return 0;
+}
+
+static int lan78xx_open(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		goto out;
+
+	ret = lan78xx_reset(dev);
+	if (ret < 0)
+		goto done;
+
+	/* for Link Check */
+	if (dev->urb_intr) {
+		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
+		if (ret < 0) {
+			netif_err(dev, ifup, dev->net,
+				  "intr submit %d\n", ret);
+			goto done;
+		}
+	}
+
+	set_bit(EVENT_DEV_OPEN, &dev->flags);
+
+	netif_start_queue(net);
+
+	dev->link_on = false;
+
+	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+done:
+	usb_autopm_put_interface(dev->intf);
+
+out:
+	return ret;
+}
+
+static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+	DECLARE_WAITQUEUE(wait, current);
+	int temp;
+
+	/* ensure there are no more active urbs */
+	add_wait_queue(&unlink_wakeup, &wait);
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	dev->wait = &unlink_wakeup;
+	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
+
+	/* maybe wait for deletions to finish. */
+	while (!skb_queue_empty(&dev->rxq) &&
+	       !skb_queue_empty(&dev->txq) &&
+	       !skb_queue_empty(&dev->done)) {
+		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		netif_dbg(dev, ifdown, dev->net,
+			  "waited for %d urb completions\n", temp);
+	}
+	set_current_state(TASK_RUNNING);
+	dev->wait = NULL;
+	remove_wait_queue(&unlink_wakeup, &wait);
+}
+
+int lan78xx_stop(struct net_device *net)
+{
+	struct lan78xx_net		*dev = netdev_priv(net);
+
+	clear_bit(EVENT_DEV_OPEN, &dev->flags);
+	netif_stop_queue(net);
+
+	netif_info(dev, ifdown, dev->net,
+		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
+		   net->stats.rx_packets, net->stats.tx_packets,
+		   net->stats.rx_errors, net->stats.tx_errors);
+
+	lan78xx_terminate_urbs(dev);
+
+	usb_kill_urb(dev->urb_intr);
+
+	skb_queue_purge(&dev->rxq_pause);
+
+	/* deferred work (task, timer, softirq) must also stop.
+	 * can't flush_scheduled_work() until we drop rtnl (later),
+	 * else workers could deadlock; so make workers a NOP.
+	 */
+	dev->flags = 0;
+	cancel_delayed_work_sync(&dev->wq);
+	tasklet_kill(&dev->bh);
+
+	usb_autopm_put_interface(dev->intf);
+
+	return 0;
+}
+
+static int lan78xx_linearize(struct sk_buff *skb)
+{
+	return skb_linearize(skb);
+}
+
+static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
+				       struct sk_buff *skb, gfp_t flags)
+{
+	u32 tx_cmd_a, tx_cmd_b;
+
+	if (skb_headroom(skb) < TX_OVERHEAD) {
+		struct sk_buff *skb2;
+
+		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		if (!skb)
+			return NULL;
+	}
+
+	if (lan78xx_linearize(skb) < 0)
+		return NULL;
+
+	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+	tx_cmd_b = 0;
+	if (skb_is_gso(skb)) {
+		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+		tx_cmd_a |= TX_CMD_A_LSO_;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		tx_cmd_a |= TX_CMD_A_IVTG_;
+		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
+	}
+
+	skb_push(skb, 4);
+	cpu_to_le32s(&tx_cmd_b);
+	memcpy(skb->data, &tx_cmd_b, 4);
+
+	skb_push(skb, 4);
+	cpu_to_le32s(&tx_cmd_a);
+	memcpy(skb->data, &tx_cmd_a, 4);
+
+	return skb;
+}
+
+static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
+			       struct sk_buff_head *list, enum skb_state state)
+{
+	unsigned long flags;
+	enum skb_state old_state;
+	struct skb_data *entry = (struct skb_data *)skb->cb;
+
+	spin_lock_irqsave(&list->lock, flags);
+	old_state = entry->state;
+	entry->state = state;
+
+	__skb_unlink(skb, list);
+	spin_unlock(&list->lock);
+	spin_lock(&dev->done.lock);
+
+	__skb_queue_tail(&dev->done, skb);
+	if (skb_queue_len(&dev->done) == 1)
+		tasklet_schedule(&dev->bh);
+	spin_unlock_irqrestore(&dev->done.lock, flags);
+
+	return old_state;
+}
+
+static void tx_complete(struct urb *urb)
+{
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct skb_data *entry = (struct skb_data *)skb->cb;
+	struct lan78xx_net *dev = entry->dev;
+
+	if (urb->status == 0) {
+		dev->net->stats.tx_packets++;
+		dev->net->stats.tx_bytes += entry->length;
+	} else {
+		dev->net->stats.tx_errors++;
+
+		switch (urb->status) {
+		case -EPIPE:
+			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+			break;
+
+		/* software-driven interface shutdown */
+		case -ECONNRESET:
+		case -ESHUTDOWN:
+			break;
+
+		case -EPROTO:
+		case -ETIME:
+		case -EILSEQ:
+			netif_stop_queue(dev->net);
+			break;
+		default:
+			netif_dbg(dev, tx_err, dev->net,
+				  "tx err %d\n", entry->urb->status);
+			break;
+		}
+	}
+
+	usb_autopm_put_interface_async(dev->intf);
+
+	defer_bh(dev, skb, &dev->txq, tx_done);
+}
+
+static void lan78xx_queue_skb(struct sk_buff_head *list,
+			      struct sk_buff *newsk, enum skb_state state)
+{
+	struct skb_data *entry = (struct skb_data *)newsk->cb;
+
+	__skb_queue_tail(list, newsk);
+	entry->state = state;
+}
+
+netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	struct sk_buff *skb2 = NULL;
+
+	if (skb) {
+		skb_tx_timestamp(skb);
+		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+	}
+
+	if (skb2) {
+		skb_queue_tail(&dev->txq_pend, skb2);
+
+		if (skb_queue_len(&dev->txq_pend) > 10)
+			netif_stop_queue(net);
+	} else {
+		netif_dbg(dev, tx_err, dev->net,
+			  "lan78xx_tx_prep return NULL\n");
+		dev->net->stats.tx_errors++;
+		dev->net->stats.tx_dropped++;
+	}
+
+	tasklet_schedule(&dev->bh);
+
+	return NETDEV_TX_OK;
+}
+
+int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+	int tmp;
+	struct usb_host_interface *alt = NULL;
+	struct usb_host_endpoint *in = NULL, *out = NULL;
+	struct usb_host_endpoint *status = NULL;
+
+	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+		unsigned ep;
+
+		in = NULL;
+		out = NULL;
+		status = NULL;
+		alt = intf->altsetting + tmp;
+
+		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+			struct usb_host_endpoint *e;
+			int intr = 0;
+
+			e = alt->endpoint + ep;
+			switch (e->desc.bmAttributes) {
+			case USB_ENDPOINT_XFER_INT:
+				if (!usb_endpoint_dir_in(&e->desc))
+					continue;
+				intr = 1;
+				/* FALLTHROUGH */
+			case USB_ENDPOINT_XFER_BULK:
+				break;
+			default:
+				continue;
+			}
+			if (usb_endpoint_dir_in(&e->desc)) {
+				if (!intr && !in)
+					in = e;
+				else if (intr && !status)
+					status = e;
+			} else {
+				if (!out)
+					out = e;
+			}
+		}
+		if (in && out)
+			break;
+	}
+	if (!alt || !in || !out)
+		return -EINVAL;
+
+	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+				       in->desc.bEndpointAddress &
+				       USB_ENDPOINT_NUMBER_MASK);
+	dev->pipe_out = usb_sndbulkpipe(dev->udev,
+					out->desc.bEndpointAddress &
+					USB_ENDPOINT_NUMBER_MASK);
+	dev->ep_intr = status;
+
+	return 0;
+}
+
+static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+	struct lan78xx_priv *pdata = NULL;
+	int ret;
+	int i;
+
+	ret = lan78xx_get_endpoints(dev, intf);
+
+	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+	pdata = (struct lan78xx_priv *)(dev->data[0]);
+	if (!pdata) {
+		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
+		return -ENOMEM;
+	}
+
+	pdata->dev = dev;
+
+	spin_lock_init(&pdata->rfe_ctl_lock);
+	mutex_init(&pdata->dataport_mutex);
+
+	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
+
+	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
+		pdata->vlan_table[i] = 0;
+
+	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
+
+	dev->net->features = 0;
+
+	if (DEFAULT_TX_CSUM_ENABLE)
+		dev->net->features |= NETIF_F_HW_CSUM;
+
+	if (DEFAULT_RX_CSUM_ENABLE)
+		dev->net->features |= NETIF_F_RXCSUM;
+
+	if (DEFAULT_TSO_CSUM_ENABLE)
+		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+
+	dev->net->hw_features = dev->net->features;
+
+	/* Init all registers */
+	ret = lan78xx_reset(dev);
+
+	dev->net->flags |= IFF_MULTICAST;
+
+	pdata->wol = WAKE_MAGIC;
+
+	return 0;
+}
+
+static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+	if (pdata) {
+		netif_dbg(dev, ifdown, dev->net, "free pdata");
+		kfree(pdata);
+		pdata = NULL;
+		dev->data[0] = 0;
+	}
+}
+
+static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
+				    struct sk_buff *skb,
+				    u32 rx_cmd_a, u32 rx_cmd_b)
+{
+	if (!(dev->net->features & NETIF_F_RXCSUM) ||
+	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+		skb->ip_summed = CHECKSUM_NONE;
+	} else {
+		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+}
+
+void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+	int		status;
+
+	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
+		skb_queue_tail(&dev->rxq_pause, skb);
+		return;
+	}
+
+	skb->protocol = eth_type_trans(skb, dev->net);
+	dev->net->stats.rx_packets++;
+	dev->net->stats.rx_bytes += skb->len;
+
+	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+		  skb->len + sizeof(struct ethhdr), skb->protocol);
+	memset(skb->cb, 0, sizeof(struct skb_data));
+
+	if (skb_defer_rx_timestamp(skb))
+		return;
+
+	status = netif_rx(skb);
+	if (status != NET_RX_SUCCESS)
+		netif_dbg(dev, rx_err, dev->net,
+			  "netif_rx status %d\n", status);
+}
+
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
+	while (skb->len > 0) {
+		u32 rx_cmd_a, rx_cmd_b, align_count, size;
+		u16 rx_cmd_c;
+		struct sk_buff *skb2;
+		unsigned char *packet;
+
+		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+		le32_to_cpus(&rx_cmd_a);
+		skb_pull(skb, sizeof(rx_cmd_a));
+
+		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+		le32_to_cpus(&rx_cmd_b);
+		skb_pull(skb, sizeof(rx_cmd_b));
+
+		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
+		le16_to_cpus(&rx_cmd_c);
+		skb_pull(skb, sizeof(rx_cmd_c));
+
+		packet = skb->data;
+
+		/* get the packet length */
+		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
+		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+			netif_dbg(dev, rx_err, dev->net,
+				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
+		} else {
+			/* last frame in this batch */
+			if (skb->len == size) {
+				lan78xx_rx_csum_offload(dev, skb,
+							rx_cmd_a, rx_cmd_b);
+
+				skb_trim(skb, skb->len - 4); /* remove fcs */
+				skb->truesize = size + sizeof(struct sk_buff);
+
+				return 1;
+			}
+
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (unlikely(!skb2)) {
+				netdev_warn(dev->net, "Error allocating skb");
+				return 0;
+			}
+
+			skb2->len = size;
+			skb2->data = packet;
+			skb_set_tail_pointer(skb2, size);
+
+			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+
+			skb_trim(skb2, skb2->len - 4); /* remove fcs */
+			skb2->truesize = size + sizeof(struct sk_buff);
+
+			lan78xx_skb_return(dev, skb2);
+		}
+
+		skb_pull(skb, size);
+
+		/* padding bytes before the next frame starts */
+		if (skb->len)
+			skb_pull(skb, align_count);
+	}
+
+	if (unlikely(skb->len < 0)) {
+		netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+		return 0;
+	}
+
+	return 1;
+}
+
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+	if (!lan78xx_rx(dev, skb)) {
+		dev->net->stats.rx_errors++;
+		goto done;
+	}
+
+	if (skb->len) {
+		lan78xx_skb_return(dev, skb);
+		return;
+	}
+
+	netif_dbg(dev, rx_err, dev->net, "drop\n");
+	dev->net->stats.rx_errors++;
+done:
+	skb_queue_tail(&dev->done, skb);
+}
+
+static void rx_complete(struct urb *urb);
+
+static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
+{
+	struct sk_buff *skb;
+	struct skb_data *entry;
+	unsigned long lockflags;
+	size_t size = dev->rx_urb_size;
+	int ret = 0;
+
+	skb = netdev_alloc_skb_ip_align(dev->net, size);
+	if (!skb) {
+		usb_free_urb(urb);
+		return -ENOMEM;
+	}
+
+	entry = (struct skb_data *)skb->cb;
+	entry->urb = urb;
+	entry->dev = dev;
+	entry->length = 0;
+
+	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+			  skb->data, size, rx_complete, skb);
+
+	spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+	if (netif_device_present(dev->net) &&
+	    netif_running(dev->net) &&
+	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
+	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		ret = usb_submit_urb(urb, GFP_ATOMIC);
+		switch (ret) {
+		case 0:
+			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+			break;
+		case -EPIPE:
+			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+			break;
+		case -ENODEV:
+			netif_dbg(dev, ifdown, dev->net, "device gone\n");
+			netif_device_detach(dev->net);
+			break;
+		case -EHOSTUNREACH:
+			ret = -ENOLINK;
+			break;
+		default:
+			netif_dbg(dev, rx_err, dev->net,
+				  "rx submit, %d\n", ret);
+			tasklet_schedule(&dev->bh);
+		}
+	} else {
+		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+		ret = -ENOLINK;
+	}
+	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		usb_free_urb(urb);
+	}
+	return ret;
+}
+
+static void rx_complete(struct urb *urb)
+{
+	struct sk_buff	*skb = (struct sk_buff *)urb->context;
+	struct skb_data	*entry = (struct skb_data *)skb->cb;
+	struct lan78xx_net *dev = entry->dev;
+	int urb_status = urb->status;
+	enum skb_state state;
+
+	skb_put(skb, urb->actual_length);
+	state = rx_done;
+	entry->urb = NULL;
+
+	switch (urb_status) {
+	case 0:
+		if (skb->len < dev->net->hard_header_len) {
+			state = rx_cleanup;
+			dev->net->stats.rx_errors++;
+			dev->net->stats.rx_length_errors++;
+			netif_dbg(dev, rx_err, dev->net,
+				  "rx length %d\n", skb->len);
+		}
+		usb_mark_last_busy(dev->udev);
+		break;
+	case -EPIPE:
+		dev->net->stats.rx_errors++;
+		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+		/* FALLTHROUGH */
+	case -ECONNRESET:				/* async unlink */
+	case -ESHUTDOWN:				/* hardware gone */
+		netif_dbg(dev, ifdown, dev->net,
+			  "rx shutdown, code %d\n", urb_status);
+		state = rx_cleanup;
+		entry->urb = urb;
+		urb = NULL;
+		break;
+	case -EPROTO:
+	case -ETIME:
+	case -EILSEQ:
+		dev->net->stats.rx_errors++;
+		state = rx_cleanup;
+		entry->urb = urb;
+		urb = NULL;
+		break;
+
+	/* data overrun ... flush fifo? */
+	case -EOVERFLOW:
+		dev->net->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		state = rx_cleanup;
+		dev->net->stats.rx_errors++;
+		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+		break;
+	}
+
+	state = defer_bh(dev, skb, &dev->rxq, state);
+
+	if (urb) {
+		if (netif_running(dev->net) &&
+		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
+		    state != unlink_start) {
+			rx_submit(dev, urb, GFP_ATOMIC);
+			return;
+		}
+		usb_free_urb(urb);
+	}
+	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+}
+
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
+{
+	int length;
+	struct urb *urb = NULL;
+	struct skb_data *entry;
+	unsigned long flags;
+	struct sk_buff_head *tqp = &dev->txq_pend;
+	struct sk_buff *skb, *skb2;
+	int ret;
+	int count, pos;
+	int skb_totallen, pkt_cnt;
+
+	skb_totallen = 0;
+	pkt_cnt = 0;
+	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+		if (skb_is_gso(skb)) {
+			if (pkt_cnt) {
+				/* handle previous packets first */
+				break;
+			}
+			length = skb->len;
+			skb2 = skb_dequeue(tqp);
+			goto gso_skb;
+		}
+
+		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+			break;
+		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+		pkt_cnt++;
+	}
+
+	/* copy to a single skb */
+	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+	if (!skb)
+		goto drop;
+
+	skb_put(skb, skb_totallen);
+
+	for (count = pos = 0; count < pkt_cnt; count++) {
+		skb2 = skb_dequeue(tqp);
+		if (skb2) {
+			memcpy(skb->data + pos, skb2->data, skb2->len);
+			pos += roundup(skb2->len, sizeof(u32));
+			dev_kfree_skb(skb2);
+		}
+	}
+
+	length = skb_totallen;
+
+gso_skb:
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		netif_dbg(dev, tx_err, dev->net, "no urb\n");
+		goto drop;
+	}
+
+	entry = (struct skb_data *)skb->cb;
+	entry->urb = urb;
+	entry->dev = dev;
+	entry->length = length;
+
+	spin_lock_irqsave(&dev->txq.lock, flags);
+	ret = usb_autopm_get_interface_async(dev->intf);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&dev->txq.lock, flags);
+		goto drop;
+	}
+
+	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
+			  skb->data, skb->len, tx_complete, skb);
+
+	if (length % dev->maxpacket == 0) {
+		/* send USB_ZERO_PACKET */
+		urb->transfer_flags |= URB_ZERO_PACKET;
+	}
+
+#ifdef CONFIG_PM
+	/* if this triggers the device is still a sleep */
+	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		/* transmission will be done in resume */
+		usb_anchor_urb(urb, &dev->deferred);
+		/* no use to process more packets */
+		netif_stop_queue(dev->net);
+		usb_put_urb(urb);
+		spin_unlock_irqrestore(&dev->txq.lock, flags);
+		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+		return;
+	}
+#endif
+
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	switch (ret) {
+	case 0:
+		dev->net->trans_start = jiffies;
+		lan78xx_queue_skb(&dev->txq, skb, tx_start);
+		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
+			netif_stop_queue(dev->net);
+		break;
+	case -EPIPE:
+		netif_stop_queue(dev->net);
+		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+		usb_autopm_put_interface_async(dev->intf);
+		break;
+	default:
+		usb_autopm_put_interface_async(dev->intf);
+		netif_dbg(dev, tx_err, dev->net,
+			  "tx: submit urb err %d\n", ret);
+		break;
+	}
+
+	spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+	if (ret) {
+		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
+drop:
+		dev->net->stats.tx_dropped++;
+		if (skb)
+			dev_kfree_skb_any(skb);
+		usb_free_urb(urb);
+	} else
+		netif_dbg(dev, tx_queued, dev->net,
+			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
+}
+
+static void lan78xx_rx_bh(struct lan78xx_net *dev)
+{
+	struct urb *urb;
+	int i;
+
+	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
+		for (i = 0; i < 10; i++) {
+			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
+				break;
+			urb = usb_alloc_urb(0, GFP_ATOMIC);
+			if (urb)
+				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
+					return;
+		}
+
+		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
+			tasklet_schedule(&dev->bh);
+	}
+	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
+		netif_wake_queue(dev->net);
+}
+
+static void lan78xx_bh(unsigned long param)
+{
+	struct lan78xx_net *dev = (struct lan78xx_net *)param;
+	struct sk_buff *skb;
+	struct skb_data *entry;
+
+	while ((skb = skb_dequeue(&dev->done))) {
+		entry = (struct skb_data *)(skb->cb);
+		switch (entry->state) {
+		case rx_done:
+			entry->state = rx_cleanup;
+			rx_process(dev, skb);
+			continue;
+		case tx_done:
+			usb_free_urb(entry->urb);
+			dev_kfree_skb(skb);
+			continue;
+		case rx_cleanup:
+			usb_free_urb(entry->urb);
+			dev_kfree_skb(skb);
+			continue;
+		default:
+			netdev_dbg(dev->net, "skb state %d\n", entry->state);
+			return;
+		}
+	}
+
+	if (netif_device_present(dev->net) && netif_running(dev->net)) {
+		if (!skb_queue_empty(&dev->txq_pend))
+			lan78xx_tx_bh(dev);
+
+		if (!timer_pending(&dev->delay) &&
+		    !test_bit(EVENT_RX_HALT, &dev->flags))
+			lan78xx_rx_bh(dev);
+	}
+}
+
+static void lan78xx_delayedwork(struct work_struct *work)
+{
+	int status;
+	struct lan78xx_net *dev;
+
+	dev = container_of(work, struct lan78xx_net, wq.work);
+
+	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+		unlink_urbs(dev, &dev->txq);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto fail_pipe;
+		status = usb_clear_halt(dev->udev, dev->pipe_out);
+		usb_autopm_put_interface(dev->intf);
+		if (status < 0 &&
+		    status != -EPIPE &&
+		    status != -ESHUTDOWN) {
+			if (netif_msg_tx_err(dev))
+fail_pipe:
+				netdev_err(dev->net,
+					   "can't clear tx halt, status %d\n",
+					   status);
+		} else {
+			clear_bit(EVENT_TX_HALT, &dev->flags);
+			if (status != -ESHUTDOWN)
+				netif_wake_queue(dev->net);
+		}
+	}
+	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+		unlink_urbs(dev, &dev->rxq);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+				goto fail_halt;
+		status = usb_clear_halt(dev->udev, dev->pipe_in);
+		usb_autopm_put_interface(dev->intf);
+		if (status < 0 &&
+		    status != -EPIPE &&
+		    status != -ESHUTDOWN) {
+			if (netif_msg_rx_err(dev))
+fail_halt:
+				netdev_err(dev->net,
+					   "can't clear rx halt, status %d\n",
+					   status);
+		} else {
+			clear_bit(EVENT_RX_HALT, &dev->flags);
+			tasklet_schedule(&dev->bh);
+		}
+	}
+
+	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+		int ret = 0;
+
+		clear_bit(EVENT_LINK_RESET, &dev->flags);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto skip_reset;
+		if (lan78xx_link_reset(dev) < 0) {
+			usb_autopm_put_interface(dev->intf);
+skip_reset:
+			netdev_info(dev->net, "link reset failed (%d)\n",
+				    ret);
+		} else {
+			usb_autopm_put_interface(dev->intf);
+		}
+	}
+}
+
+static void intr_complete(struct urb *urb)
+{
+	struct lan78xx_net *dev = urb->context;
+	int status = urb->status;
+
+	switch (status) {
+	/* success */
+	case 0:
+		lan78xx_status(dev, urb);
+		break;
+
+	/* software-driven interface shutdown */
+	case -ENOENT:			/* urb killed */
+	case -ESHUTDOWN:		/* hardware gone */
+		netif_dbg(dev, ifdown, dev->net,
+			  "intr shutdown, code %d\n", status);
+		return;
+
+	/* NOTE:  not throttling like RX/TX, since this endpoint
+	 * already polls infrequently
+	 */
+	default:
+		netdev_dbg(dev->net, "intr status %d\n", status);
+		break;
+	}
+
+	if (!netif_running(dev->net))
+		return;
+
+	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
+	if (status != 0)
+		netif_err(dev, timer, dev->net,
+			  "intr resubmit --> %d\n", status);
+}
+
+static void lan78xx_disconnect(struct usb_interface *intf)
+{
+	struct lan78xx_net		*dev;
+	struct usb_device		*udev;
+	struct net_device		*net;
+
+	dev = usb_get_intfdata(intf);
+	usb_set_intfdata(intf, NULL);
+	if (!dev)
+		return;
+
+	udev = interface_to_usbdev(intf);
+
+	net = dev->net;
+	unregister_netdev(net);
+
+	cancel_delayed_work_sync(&dev->wq);
+
+	usb_scuttle_anchored_urbs(&dev->deferred);
+
+	lan78xx_unbind(dev, intf);
+
+	usb_kill_urb(dev->urb_intr);
+	usb_free_urb(dev->urb_intr);
+
+	free_netdev(net);
+	usb_put_dev(udev);
+}
+
+void lan78xx_tx_timeout(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	unlink_urbs(dev, &dev->txq);
+	tasklet_schedule(&dev->bh);
+}
+
+static const struct net_device_ops lan78xx_netdev_ops = {
+	.ndo_open		= lan78xx_open,
+	.ndo_stop		= lan78xx_stop,
+	.ndo_start_xmit		= lan78xx_start_xmit,
+	.ndo_tx_timeout		= lan78xx_tx_timeout,
+	.ndo_change_mtu		= lan78xx_change_mtu,
+	.ndo_set_mac_address	= lan78xx_set_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= lan78xx_ioctl,
+	.ndo_set_rx_mode	= lan78xx_set_multicast,
+	.ndo_set_features	= lan78xx_set_features,
+	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
+};
+
+static int lan78xx_probe(struct usb_interface *intf,
+			 const struct usb_device_id *id)
+{
+	struct lan78xx_net *dev;
+	struct net_device *netdev;
+	struct usb_device *udev;
+	int ret;
+	unsigned maxp;
+	unsigned period;
+	u8 *buf = NULL;
+
+	udev = interface_to_usbdev(intf);
+	udev = usb_get_dev(udev);
+
+	ret = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
+	if (!netdev) {
+			dev_err(&intf->dev, "Error: OOM\n");
+			goto out1;
+	}
+
+	/* netdev_printk() needs this */
+	SET_NETDEV_DEV(netdev, &intf->dev);
+
+	dev = netdev_priv(netdev);
+	dev->udev = udev;
+	dev->intf = intf;
+	dev->net = netdev;
+	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
+					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+
+	skb_queue_head_init(&dev->rxq);
+	skb_queue_head_init(&dev->txq);
+	skb_queue_head_init(&dev->done);
+	skb_queue_head_init(&dev->rxq_pause);
+	skb_queue_head_init(&dev->txq_pend);
+	mutex_init(&dev->phy_mutex);
+
+	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
+	init_usb_anchor(&dev->deferred);
+
+	netdev->netdev_ops = &lan78xx_netdev_ops;
+	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+	netdev->ethtool_ops = &lan78xx_ethtool_ops;
+
+	ret = lan78xx_bind(dev, intf);
+	if (ret < 0)
+		goto out2;
+	strcpy(netdev->name, "eth%d");
+
+	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
+		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+
+	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+
+	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+
+	dev->pipe_intr = usb_rcvintpipe(dev->udev,
+					dev->ep_intr->desc.bEndpointAddress &
+					USB_ENDPOINT_NUMBER_MASK);
+	period = dev->ep_intr->desc.bInterval;
+
+	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+	buf = kmalloc(maxp, GFP_KERNEL);
+	if (buf) {
+		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+		if (!dev->urb_intr) {
+			kfree(buf);
+			goto out3;
+		} else {
+			usb_fill_int_urb(dev->urb_intr, dev->udev,
+					 dev->pipe_intr, buf, maxp,
+					 intr_complete, dev, period);
+		}
+	}
+
+	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
+	/* driver requires remote-wakeup capability during autosuspend. */
+	intf->needs_remote_wakeup = 1;
+
+	ret = register_netdev(netdev);
+	if (ret != 0) {
+		netif_err(dev, probe, netdev, "couldn't register the device\n");
+		goto out2;
+	}
+
+	usb_set_intfdata(intf, dev);
+
+	ret = device_set_wakeup_enable(&udev->dev, true);
+
+	 /* Default delay of 2sec has more overhead than advantage.
+	  * Set to 10sec as default.
+	  */
+	pm_runtime_set_autosuspend_delay(&udev->dev,
+					 DEFAULT_AUTOSUSPEND_DELAY);
+
+	return 0;
+
+out3:
+	lan78xx_unbind(dev, intf);
+out2:
+	free_netdev(netdev);
+out1:
+	usb_put_dev(udev);
+
+	return ret;
+}
+
+static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
+{
+	const u16 crc16poly = 0x8005;
+	int i;
+	u16 bit, crc, msb;
+	u8 data;
+
+	crc = 0xFFFF;
+	for (i = 0; i < len; i++) {
+		data = *buf++;
+		for (bit = 0; bit < 8; bit++) {
+			msb = crc >> 15;
+			crc <<= 1;
+
+			if (msb ^ (u16)(data & 1)) {
+				crc ^= crc16poly;
+				crc |= (u16)0x0001U;
+			}
+			data >>= 1;
+		}
+	}
+
+	return crc;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
+	u32 buf;
+	int ret;
+	int mask_index;
+	u16 crc;
+	u32 temp_wucsr;
+	u32 temp_pmt_ctl;
+	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+	const u8 arp_type[2] = { 0x08, 0x06 };
+
+	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+	buf &= ~MAC_TX_TXEN_;
+	ret = lan78xx_write_reg(dev, MAC_TX, buf);
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+	buf &= ~MAC_RX_RXEN_;
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	ret = lan78xx_write_reg(dev, WUCSR, 0);
+	ret = lan78xx_write_reg(dev, WUCSR2, 0);
+	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+	temp_wucsr = 0;
+
+	temp_pmt_ctl = 0;
+	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
+	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
+
+	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+
+	mask_index = 0;
+	if (wol & WAKE_PHY) {
+		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_MAGIC) {
+		temp_wucsr |= WUCSR_MPEN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
+	}
+	if (wol & WAKE_BCAST) {
+		temp_wucsr |= WUCSR_BCST_EN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_MCAST) {
+		temp_wucsr |= WUCSR_WAKE_EN_;
+
+		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
+		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+					WUF_CFGX_EN_ |
+					WUF_CFGX_TYPE_MCAST_ |
+					(0 << WUF_CFGX_OFFSET_SHIFT_) |
+					(crc & WUF_CFGX_CRC16_MASK_));
+
+		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+		mask_index++;
+
+		/* for IPv6 Multicast */
+		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+					WUF_CFGX_EN_ |
+					WUF_CFGX_TYPE_MCAST_ |
+					(0 << WUF_CFGX_OFFSET_SHIFT_) |
+					(crc & WUF_CFGX_CRC16_MASK_));
+
+		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+		mask_index++;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_UCAST) {
+		temp_wucsr |= WUCSR_PFDA_EN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_ARP) {
+		temp_wucsr |= WUCSR_WAKE_EN_;
+
+		/* set WUF_CFG & WUF_MASK
+		 * for packettype (offset 12,13) = ARP (0x0806)
+		 */
+		crc = lan78xx_wakeframe_crc16(arp_type, 2);
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+					WUF_CFGX_EN_ |
+					WUF_CFGX_TYPE_ALL_ |
+					(0 << WUF_CFGX_OFFSET_SHIFT_) |
+					(crc & WUF_CFGX_CRC16_MASK_));
+
+		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+		mask_index++;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+
+	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+
+	/* when multiple WOL bits are set */
+	if (hweight_long((unsigned long)wol) > 1) {
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+
+	/* clear WUPS */
+	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+	buf |= PMT_CTL_WUPS_MASK_;
+	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+	buf |= MAC_RX_RXEN_;
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	return 0;
+}
+
+int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct lan78xx_net *dev = usb_get_intfdata(intf);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u32 buf;
+	int ret;
+	int event;
+
+	ret = 0;
+	event = message.event;
+
+	if (!dev->suspend_count++) {
+		spin_lock_irq(&dev->txq.lock);
+		/* don't autosuspend while transmitting */
+		if ((skb_queue_len(&dev->txq) ||
+		     skb_queue_len(&dev->txq_pend)) &&
+			PMSG_IS_AUTO(message)) {
+			spin_unlock_irq(&dev->txq.lock);
+			ret = -EBUSY;
+			goto out;
+		} else {
+			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+			spin_unlock_irq(&dev->txq.lock);
+		}
+
+		/* stop TX & RX */
+		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+		buf &= ~MAC_TX_TXEN_;
+		ret = lan78xx_write_reg(dev, MAC_TX, buf);
+		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+		buf &= ~MAC_RX_RXEN_;
+		ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+		/* empty out the rx and queues */
+		netif_device_detach(dev->net);
+		lan78xx_terminate_urbs(dev);
+		usb_kill_urb(dev->urb_intr);
+
+		/* reattach */
+		netif_device_attach(dev->net);
+	}
+
+	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		if (PMSG_IS_AUTO(message)) {
+			/* auto suspend (selective suspend) */
+			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+			buf &= ~MAC_TX_TXEN_;
+			ret = lan78xx_write_reg(dev, MAC_TX, buf);
+			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+			buf &= ~MAC_RX_RXEN_;
+			ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+			ret = lan78xx_write_reg(dev, WUCSR, 0);
+			ret = lan78xx_write_reg(dev, WUCSR2, 0);
+			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+			/* set goodframe wakeup */
+			ret = lan78xx_read_reg(dev, WUCSR, &buf);
+
+			buf |= WUCSR_RFE_WAKE_EN_;
+			buf |= WUCSR_STORE_WAKE_;
+
+			ret = lan78xx_write_reg(dev, WUCSR, buf);
+
+			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+			buf |= PMT_CTL_RES_CLR_WKP_STS_;
+
+			buf |= PMT_CTL_PHY_WAKE_EN_;
+			buf |= PMT_CTL_WOL_EN_;
+			buf &= ~PMT_CTL_SUS_MODE_MASK_;
+			buf |= PMT_CTL_SUS_MODE_3_;
+
+			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+			buf |= PMT_CTL_WUPS_MASK_;
+
+			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+			buf |= MAC_RX_RXEN_;
+			ret = lan78xx_write_reg(dev, MAC_RX, buf);
+		} else {
+			lan78xx_set_suspend(dev, pdata->wol);
+		}
+	}
+
+out:
+	return ret;
+}
+
+int lan78xx_resume(struct usb_interface *intf)
+{
+	struct lan78xx_net *dev = usb_get_intfdata(intf);
+	struct sk_buff *skb;
+	struct urb *res;
+	int ret;
+	u32 buf;
+
+	if (!--dev->suspend_count) {
+		/* resume interrupt URBs */
+		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
+				usb_submit_urb(dev->urb_intr, GFP_NOIO);
+
+		spin_lock_irq(&dev->txq.lock);
+		while ((res = usb_get_from_anchor(&dev->deferred))) {
+			skb = (struct sk_buff *)res->context;
+			ret = usb_submit_urb(res, GFP_ATOMIC);
+			if (ret < 0) {
+				dev_kfree_skb_any(skb);
+				usb_free_urb(res);
+				usb_autopm_put_interface_async(dev->intf);
+			} else {
+				dev->net->trans_start = jiffies;
+				lan78xx_queue_skb(&dev->txq, skb, tx_start);
+			}
+		}
+
+		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+		spin_unlock_irq(&dev->txq.lock);
+
+		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
+				netif_start_queue(dev->net);
+			tasklet_schedule(&dev->bh);
+		}
+	}
+
+	ret = lan78xx_write_reg(dev, WUCSR2, 0);
+	ret = lan78xx_write_reg(dev, WUCSR, 0);
+	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
+					     WUCSR2_ARP_RCD_ |
+					     WUCSR2_IPV6_TCPSYN_RCD_ |
+					     WUCSR2_IPV4_TCPSYN_RCD_);
+
+	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
+					    WUCSR_EEE_RX_WAKE_ |
+					    WUCSR_PFDA_FR_ |
+					    WUCSR_RFE_WAKE_FR_ |
+					    WUCSR_WUFR_ |
+					    WUCSR_MPR_ |
+					    WUCSR_BCST_FR_);
+
+	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+	buf |= MAC_TX_TXEN_;
+	ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+	return 0;
+}
+
+int lan78xx_reset_resume(struct usb_interface *intf)
+{
+	struct lan78xx_net *dev = usb_get_intfdata(intf);
+
+	lan78xx_reset(dev);
+	return lan78xx_resume(intf);
+}
+
+static const struct usb_device_id products[] = {
+	{
+	/* LAN7800 USB Gigabit Ethernet Device */
+	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
+	},
+	{
+	/* LAN7850 USB Gigabit Ethernet Device */
+	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lan78xx_driver = {
+	.name			= DRIVER_NAME,
+	.id_table		= products,
+	.probe			= lan78xx_probe,
+	.disconnect		= lan78xx_disconnect,
+	.suspend		= lan78xx_suspend,
+	.resume			= lan78xx_resume,
+	.reset_resume		= lan78xx_reset_resume,
+	.supports_autosuspend	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(lan78xx_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
new file mode 100644
index 0000000..ae7562e
--- /dev/null
+++ b/drivers/net/usb/lan78xx.h
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _LAN78XX_H
+#define _LAN78XX_H
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER	0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER	0xA1
+#define USB_VENDOR_REQUEST_GET_STATS		0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_EEE_START_TX_LPI_INT		BIT(26)
+#define INT_ENP_EEE_STOP_TX_LPI_INT		BIT(25)
+#define INT_ENP_EEE_RX_LPI_INT			BIT(24)
+#define INT_ENP_RDFO_INT			BIT(22)
+#define INT_ENP_TXE_INT				BIT(21)
+#define INT_ENP_TX_DIS_INT			BIT(19)
+#define INT_ENP_RX_DIS_INT			BIT(18)
+#define INT_ENP_PHY_INT				BIT(17)
+#define INT_ENP_DP_INT				BIT(16)
+#define INT_ENP_MAC_ERR_INT			BIT(15)
+#define INT_ENP_TDFU_INT			BIT(14)
+#define INT_ENP_TDFO_INT			BIT(13)
+#define INT_ENP_UTX_FP_INT			BIT(12)
+
+#define TX_PKT_ALIGNMENT			4
+#define RX_PKT_ALIGNMENT			4
+
+/* Tx Command A */
+#define TX_CMD_A_IGE_			(0x20000000)
+#define TX_CMD_A_ICE_			(0x10000000)
+#define TX_CMD_A_LSO_			(0x08000000)
+#define TX_CMD_A_IPE_			(0x04000000)
+#define TX_CMD_A_TPE_			(0x02000000)
+#define TX_CMD_A_IVTG_			(0x01000000)
+#define TX_CMD_A_RVTG_			(0x00800000)
+#define TX_CMD_A_FCS_			(0x00400000)
+#define TX_CMD_A_LEN_MASK_		(0x000FFFFF)
+
+/* Tx Command B */
+#define TX_CMD_B_MSS_SHIFT_		(16)
+#define TX_CMD_B_MSS_MASK_		(0x3FFF0000)
+#define TX_CMD_B_MSS_MIN_		((unsigned short)8)
+#define TX_CMD_B_VTAG_MASK_		(0x0000FFFF)
+#define TX_CMD_B_VTAG_PRI_MASK_		(0x0000E000)
+#define TX_CMD_B_VTAG_CFI_MASK_		(0x00001000)
+#define TX_CMD_B_VTAG_VID_MASK_		(0x00000FFF)
+
+/* Rx Command A */
+#define RX_CMD_A_ICE_			(0x80000000)
+#define RX_CMD_A_TCE_			(0x40000000)
+#define RX_CMD_A_CSE_MASK_		(0xC0000000)
+#define RX_CMD_A_IPV_			(0x20000000)
+#define RX_CMD_A_PID_MASK_		(0x18000000)
+#define RX_CMD_A_PID_NONE_IP_		(0x00000000)
+#define RX_CMD_A_PID_TCP_IP_		(0x08000000)
+#define RX_CMD_A_PID_UDP_IP_		(0x10000000)
+#define RX_CMD_A_PID_IP_		(0x18000000)
+#define RX_CMD_A_PFF_			(0x04000000)
+#define RX_CMD_A_BAM_			(0x02000000)
+#define RX_CMD_A_MAM_			(0x01000000)
+#define RX_CMD_A_FVTG_			(0x00800000)
+#define RX_CMD_A_RED_			(0x00400000)
+#define RX_CMD_A_RX_ERRS_MASK_		(0xC03F0000)
+#define RX_CMD_A_RWT_			(0x00200000)
+#define RX_CMD_A_RUNT_			(0x00100000)
+#define RX_CMD_A_LONG_			(0x00080000)
+#define RX_CMD_A_RXE_			(0x00040000)
+#define RX_CMD_A_DRB_			(0x00020000)
+#define RX_CMD_A_FCS_			(0x00010000)
+#define RX_CMD_A_UAM_			(0x00008000)
+#define RX_CMD_A_ICSM_			(0x00004000)
+#define RX_CMD_A_LEN_MASK_		(0x00003FFF)
+
+/* Rx Command B */
+#define RX_CMD_B_CSUM_SHIFT_		(16)
+#define RX_CMD_B_CSUM_MASK_		(0xFFFF0000)
+#define RX_CMD_B_VTAG_MASK_		(0x0000FFFF)
+#define RX_CMD_B_VTAG_PRI_MASK_		(0x0000E000)
+#define RX_CMD_B_VTAG_CFI_MASK_		(0x00001000)
+#define RX_CMD_B_VTAG_VID_MASK_		(0x00000FFF)
+
+/* Rx Command C */
+#define RX_CMD_C_WAKE_SHIFT_		(15)
+#define RX_CMD_C_WAKE_			(0x8000)
+#define RX_CMD_C_REF_FAIL_SHIFT_	(14)
+#define RX_CMD_C_REF_FAIL_		(0x4000)
+
+/* SCSRs */
+#define NUMBER_OF_REGS			(193)
+
+#define ID_REV				(0x00)
+#define ID_REV_CHIP_ID_MASK_		(0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_		(0x0000FFFF)
+#define ID_REV_CHIP_ID_7800_		(0x7800)
+
+#define FPGA_REV			(0x04)
+#define FPGA_REV_MINOR_MASK_		(0x0000FF00)
+#define FPGA_REV_MAJOR_MASK_		(0x000000FF)
+
+#define INT_STS				(0x0C)
+#define INT_STS_CLEAR_ALL_		(0xFFFFFFFF)
+#define INT_STS_EEE_TX_LPI_STRT_	(0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_	(0x02000000)
+#define INT_STS_EEE_RX_LPI_		(0x01000000)
+#define INT_STS_RDFO_			(0x00400000)
+#define INT_STS_TXE_			(0x00200000)
+#define INT_STS_TX_DIS_			(0x00080000)
+#define INT_STS_RX_DIS_			(0x00040000)
+#define INT_STS_PHY_INT_		(0x00020000)
+#define INT_STS_DP_INT_			(0x00010000)
+#define INT_STS_MAC_ERR_		(0x00008000)
+#define INT_STS_TDFU_			(0x00004000)
+#define INT_STS_TDFO_			(0x00002000)
+#define INT_STS_UFX_FP_			(0x00001000)
+#define INT_STS_GPIO_MASK_		(0x00000FFF)
+#define INT_STS_GPIO11_			(0x00000800)
+#define INT_STS_GPIO10_			(0x00000400)
+#define INT_STS_GPIO9_			(0x00000200)
+#define INT_STS_GPIO8_			(0x00000100)
+#define INT_STS_GPIO7_			(0x00000080)
+#define INT_STS_GPIO6_			(0x00000040)
+#define INT_STS_GPIO5_			(0x00000020)
+#define INT_STS_GPIO4_			(0x00000010)
+#define INT_STS_GPIO3_			(0x00000008)
+#define INT_STS_GPIO2_			(0x00000004)
+#define INT_STS_GPIO1_			(0x00000002)
+#define INT_STS_GPIO0_			(0x00000001)
+
+#define HW_CFG				(0x010)
+#define HW_CFG_CLK125_EN_		(0x02000000)
+#define HW_CFG_REFCLK25_EN_		(0x01000000)
+#define HW_CFG_LED3_EN_			(0x00800000)
+#define HW_CFG_LED2_EN_			(0x00400000)
+#define HW_CFG_LED1_EN_			(0x00200000)
+#define HW_CFG_LED0_EN_			(0x00100000)
+#define HW_CFG_EEE_PHY_LUSU_		(0x00020000)
+#define HW_CFG_EEE_TSU_			(0x00010000)
+#define HW_CFG_NETDET_STS_		(0x00008000)
+#define HW_CFG_NETDET_EN_		(0x00004000)
+#define HW_CFG_EEM_			(0x00002000)
+#define HW_CFG_RST_PROTECT_		(0x00001000)
+#define HW_CFG_CONNECT_BUF_		(0x00000400)
+#define HW_CFG_CONNECT_EN_		(0x00000200)
+#define HW_CFG_CONNECT_POL_		(0x00000100)
+#define HW_CFG_SUSPEND_N_SEL_MASK_	(0x000000C0)
+#define HW_CFG_SUSPEND_N_SEL_2		(0x00000000)
+#define HW_CFG_SUSPEND_N_SEL_12N	(0x00000040)
+#define HW_CFG_SUSPEND_N_SEL_012N	(0x00000080)
+#define HW_CFG_SUSPEND_N_SEL_0123N	(0x000000C0)
+#define HW_CFG_SUSPEND_N_POL_		(0x00000020)
+#define HW_CFG_MEF_			(0x00000010)
+#define HW_CFG_ETC_			(0x00000008)
+#define HW_CFG_LRST_			(0x00000002)
+#define HW_CFG_SRST_			(0x00000001)
+
+#define PMT_CTL				(0x014)
+#define PMT_CTL_EEE_WAKEUP_EN_		(0x00002000)
+#define PMT_CTL_EEE_WUPS_		(0x00001000)
+#define PMT_CTL_MAC_SRST_		(0x00000800)
+#define PMT_CTL_PHY_PWRUP_		(0x00000400)
+#define PMT_CTL_RES_CLR_WKP_MASK_	(0x00000300)
+#define PMT_CTL_RES_CLR_WKP_STS_	(0x00000200)
+#define PMT_CTL_RES_CLR_WKP_EN_		(0x00000100)
+#define PMT_CTL_READY_			(0x00000080)
+#define PMT_CTL_SUS_MODE_MASK_		(0x00000060)
+#define PMT_CTL_SUS_MODE_0_		(0x00000000)
+#define PMT_CTL_SUS_MODE_1_		(0x00000020)
+#define PMT_CTL_SUS_MODE_2_		(0x00000040)
+#define PMT_CTL_SUS_MODE_3_		(0x00000060)
+#define PMT_CTL_PHY_RST_		(0x00000010)
+#define PMT_CTL_WOL_EN_			(0x00000008)
+#define PMT_CTL_PHY_WAKE_EN_		(0x00000004)
+#define PMT_CTL_WUPS_MASK_		(0x00000003)
+#define PMT_CTL_WUPS_MLT_		(0x00000003)
+#define PMT_CTL_WUPS_MAC_		(0x00000002)
+#define PMT_CTL_WUPS_PHY_		(0x00000001)
+
+#define GPIO_CFG0			(0x018)
+#define GPIO_CFG0_GPIOEN_MASK_		(0x0000F000)
+#define GPIO_CFG0_GPIOEN3_		(0x00008000)
+#define GPIO_CFG0_GPIOEN2_		(0x00004000)
+#define GPIO_CFG0_GPIOEN1_		(0x00002000)
+#define GPIO_CFG0_GPIOEN0_		(0x00001000)
+#define GPIO_CFG0_GPIOBUF_MASK_		(0x00000F00)
+#define GPIO_CFG0_GPIOBUF3_		(0x00000800)
+#define GPIO_CFG0_GPIOBUF2_		(0x00000400)
+#define GPIO_CFG0_GPIOBUF1_		(0x00000200)
+#define GPIO_CFG0_GPIOBUF0_		(0x00000100)
+#define GPIO_CFG0_GPIODIR_MASK_		(0x000000F0)
+#define GPIO_CFG0_GPIODIR3_		(0x00000080)
+#define GPIO_CFG0_GPIODIR2_		(0x00000040)
+#define GPIO_CFG0_GPIODIR1_		(0x00000020)
+#define GPIO_CFG0_GPIODIR0_		(0x00000010)
+#define GPIO_CFG0_GPIOD_MASK_		(0x0000000F)
+#define GPIO_CFG0_GPIOD3_		(0x00000008)
+#define GPIO_CFG0_GPIOD2_		(0x00000004)
+#define GPIO_CFG0_GPIOD1_		(0x00000002)
+#define GPIO_CFG0_GPIOD0_		(0x00000001)
+
+#define GPIO_CFG1			(0x01C)
+#define GPIO_CFG1_GPIOEN_MASK_		(0xFF000000)
+#define GPIO_CFG1_GPIOEN11_		(0x80000000)
+#define GPIO_CFG1_GPIOEN10_		(0x40000000)
+#define GPIO_CFG1_GPIOEN9_		(0x20000000)
+#define GPIO_CFG1_GPIOEN8_		(0x10000000)
+#define GPIO_CFG1_GPIOEN7_		(0x08000000)
+#define GPIO_CFG1_GPIOEN6_		(0x04000000)
+#define GPIO_CFG1_GPIOEN5_		(0x02000000)
+#define GPIO_CFG1_GPIOEN4_		(0x01000000)
+#define GPIO_CFG1_GPIOBUF_MASK_		(0x00FF0000)
+#define GPIO_CFG1_GPIOBUF11_		(0x00800000)
+#define GPIO_CFG1_GPIOBUF10_		(0x00400000)
+#define GPIO_CFG1_GPIOBUF9_		(0x00200000)
+#define GPIO_CFG1_GPIOBUF8_		(0x00100000)
+#define GPIO_CFG1_GPIOBUF7_		(0x00080000)
+#define GPIO_CFG1_GPIOBUF6_		(0x00040000)
+#define GPIO_CFG1_GPIOBUF5_		(0x00020000)
+#define GPIO_CFG1_GPIOBUF4_		(0x00010000)
+#define GPIO_CFG1_GPIODIR_MASK_		(0x0000FF00)
+#define GPIO_CFG1_GPIODIR11_		(0x00008000)
+#define GPIO_CFG1_GPIODIR10_		(0x00004000)
+#define GPIO_CFG1_GPIODIR9_		(0x00002000)
+#define GPIO_CFG1_GPIODIR8_		(0x00001000)
+#define GPIO_CFG1_GPIODIR7_		(0x00000800)
+#define GPIO_CFG1_GPIODIR6_		(0x00000400)
+#define GPIO_CFG1_GPIODIR5_		(0x00000200)
+#define GPIO_CFG1_GPIODIR4_		(0x00000100)
+#define GPIO_CFG1_GPIOD_MASK_		(0x000000FF)
+#define GPIO_CFG1_GPIOD11_		(0x00000080)
+#define GPIO_CFG1_GPIOD10_		(0x00000040)
+#define GPIO_CFG1_GPIOD9_		(0x00000020)
+#define GPIO_CFG1_GPIOD8_		(0x00000010)
+#define GPIO_CFG1_GPIOD7_		(0x00000008)
+#define GPIO_CFG1_GPIOD6_		(0x00000004)
+#define GPIO_CFG1_GPIOD6_		(0x00000004)
+#define GPIO_CFG1_GPIOD5_		(0x00000002)
+#define GPIO_CFG1_GPIOD4_		(0x00000001)
+
+#define GPIO_WAKE			(0x020)
+#define GPIO_WAKE_GPIOPOL_MASK_		(0x0FFF0000)
+#define GPIO_WAKE_GPIOPOL11_		(0x08000000)
+#define GPIO_WAKE_GPIOPOL10_		(0x04000000)
+#define GPIO_WAKE_GPIOPOL9_		(0x02000000)
+#define GPIO_WAKE_GPIOPOL8_		(0x01000000)
+#define GPIO_WAKE_GPIOPOL7_		(0x00800000)
+#define GPIO_WAKE_GPIOPOL6_		(0x00400000)
+#define GPIO_WAKE_GPIOPOL5_		(0x00200000)
+#define GPIO_WAKE_GPIOPOL4_		(0x00100000)
+#define GPIO_WAKE_GPIOPOL3_		(0x00080000)
+#define GPIO_WAKE_GPIOPOL2_		(0x00040000)
+#define GPIO_WAKE_GPIOPOL1_		(0x00020000)
+#define GPIO_WAKE_GPIOPOL0_		(0x00010000)
+#define GPIO_WAKE_GPIOWK_MASK_		(0x00000FFF)
+#define GPIO_WAKE_GPIOWK11_		(0x00000800)
+#define GPIO_WAKE_GPIOWK10_		(0x00000400)
+#define GPIO_WAKE_GPIOWK9_		(0x00000200)
+#define GPIO_WAKE_GPIOWK8_		(0x00000100)
+#define GPIO_WAKE_GPIOWK7_		(0x00000080)
+#define GPIO_WAKE_GPIOWK6_		(0x00000040)
+#define GPIO_WAKE_GPIOWK5_		(0x00000020)
+#define GPIO_WAKE_GPIOWK4_		(0x00000010)
+#define GPIO_WAKE_GPIOWK3_		(0x00000008)
+#define GPIO_WAKE_GPIOWK2_		(0x00000004)
+#define GPIO_WAKE_GPIOWK1_		(0x00000002)
+#define GPIO_WAKE_GPIOWK0_		(0x00000001)
+
+#define DP_SEL				(0x024)
+#define DP_SEL_DPRDY_			(0x80000000)
+#define DP_SEL_RSEL_MASK_		(0x0000000F)
+#define DP_SEL_RSEL_USB_PHY_CSRS_	(0x0000000F)
+#define DP_SEL_RSEL_OTP_64BIT_		(0x00000009)
+#define DP_SEL_RSEL_OTP_8BIT_		(0x00000008)
+#define DP_SEL_RSEL_UTX_BUF_RAM_	(0x00000007)
+#define DP_SEL_RSEL_DESC_RAM_		(0x00000005)
+#define DP_SEL_RSEL_TXFIFO_		(0x00000004)
+#define DP_SEL_RSEL_RXFIFO_		(0x00000003)
+#define DP_SEL_RSEL_LSO_		(0x00000002)
+#define DP_SEL_RSEL_VLAN_DA_		(0x00000001)
+#define DP_SEL_RSEL_URXBUF_		(0x00000000)
+#define DP_SEL_VHF_HASH_LEN		(16)
+#define DP_SEL_VHF_VLAN_LEN		(128)
+
+#define DP_CMD				(0x028)
+#define DP_CMD_WRITE_			(0x00000001)
+#define DP_CMD_READ_			(0x00000000)
+
+#define DP_ADDR				(0x02C)
+#define DP_ADDR_MASK_			(0x00003FFF)
+
+#define DP_DATA				(0x030)
+
+#define E2P_CMD				(0x040)
+#define E2P_CMD_EPC_BUSY_		(0x80000000)
+#define E2P_CMD_EPC_CMD_MASK_		(0x70000000)
+#define E2P_CMD_EPC_CMD_RELOAD_		(0x70000000)
+#define E2P_CMD_EPC_CMD_ERAL_		(0x60000000)
+#define E2P_CMD_EPC_CMD_ERASE_		(0x50000000)
+#define E2P_CMD_EPC_CMD_WRAL_		(0x40000000)
+#define E2P_CMD_EPC_CMD_WRITE_		(0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_		(0x20000000)
+#define E2P_CMD_EPC_CMD_EWDS_		(0x10000000)
+#define E2P_CMD_EPC_CMD_READ_		(0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_		(0x00000400)
+#define E2P_CMD_EPC_DL_			(0x00000200)
+#define E2P_CMD_EPC_ADDR_MASK_		(0x000001FF)
+
+#define E2P_DATA			(0x044)
+#define E2P_DATA_EEPROM_DATA_MASK_	(0x000000FF)
+
+#define BOS_ATTR			(0x050)
+#define BOS_ATTR_BLOCK_SIZE_MASK_	(0x000000FF)
+
+#define SS_ATTR				(0x054)
+#define SS_ATTR_POLL_INT_MASK_		(0x00FF0000)
+#define SS_ATTR_DEV_DESC_SIZE_MASK_	(0x0000FF00)
+#define SS_ATTR_CFG_BLK_SIZE_MASK_	(0x000000FF)
+
+#define HS_ATTR				(0x058)
+#define HS_ATTR_POLL_INT_MASK_		(0x00FF0000)
+#define HS_ATTR_DEV_DESC_SIZE_MASK_	(0x0000FF00)
+#define HS_ATTR_CFG_BLK_SIZE_MASK_	(0x000000FF)
+
+#define FS_ATTR				(0x05C)
+#define FS_ATTR_POLL_INT_MASK_		(0x00FF0000)
+#define FS_ATTR_DEV_DESC_SIZE_MASK_	(0x0000FF00)
+#define FS_ATTR_CFG_BLK_SIZE_MASK_	(0x000000FF)
+
+#define STR_ATTR0			    (0x060)
+#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_    (0xFF000000)
+#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_    (0x00FF0000)
+#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_   (0x0000FF00)
+#define STR_ATTR0_MANUF_DESC_SIZE_MASK_     (0x000000FF)
+
+#define STR_ATTR1			    (0x064)
+#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_    (0x000000FF)
+
+#define STR_FLAG_ATTR			    (0x068)
+#define STR_FLAG_ATTR_PME_FLAGS_MASK_	    (0x000000FF)
+
+#define USB_CFG0			(0x080)
+#define USB_CFG_LPM_RESPONSE_		(0x80000000)
+#define USB_CFG_LPM_CAPABILITY_		(0x40000000)
+#define USB_CFG_LPM_ENBL_SLPM_		(0x20000000)
+#define USB_CFG_HIRD_THR_MASK_		(0x1F000000)
+#define USB_CFG_HIRD_THR_960_		(0x1C000000)
+#define USB_CFG_HIRD_THR_885_		(0x1B000000)
+#define USB_CFG_HIRD_THR_810_		(0x1A000000)
+#define USB_CFG_HIRD_THR_735_		(0x19000000)
+#define USB_CFG_HIRD_THR_660_		(0x18000000)
+#define USB_CFG_HIRD_THR_585_		(0x17000000)
+#define USB_CFG_HIRD_THR_510_		(0x16000000)
+#define USB_CFG_HIRD_THR_435_		(0x15000000)
+#define USB_CFG_HIRD_THR_360_		(0x14000000)
+#define USB_CFG_HIRD_THR_285_		(0x13000000)
+#define USB_CFG_HIRD_THR_210_		(0x12000000)
+#define USB_CFG_HIRD_THR_135_		(0x11000000)
+#define USB_CFG_HIRD_THR_60_		(0x10000000)
+#define USB_CFG_MAX_BURST_BI_MASK_	(0x00F00000)
+#define USB_CFG_MAX_BURST_BO_MASK_	(0x000F0000)
+#define USB_CFG_MAX_DEV_SPEED_MASK_	(0x0000E000)
+#define USB_CFG_MAX_DEV_SPEED_SS_	(0x00008000)
+#define USB_CFG_MAX_DEV_SPEED_HS_	(0x00000000)
+#define USB_CFG_MAX_DEV_SPEED_FS_	(0x00002000)
+#define USB_CFG_PHY_BOOST_MASK_		(0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_12_	(0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_8_	(0x00000100)
+#define USB_CFG_PHY_BOOST_PLUS_4_	(0x00000080)
+#define USB_CFG_PHY_BOOST_NORMAL_	(0x00000000)
+#define USB_CFG_BIR_			(0x00000040)
+#define USB_CFG_BCE_			(0x00000020)
+#define USB_CFG_PORT_SWAP_		(0x00000010)
+#define USB_CFG_LPM_EN_			(0x00000008)
+#define USB_CFG_RMT_WKP_		(0x00000004)
+#define USB_CFG_PWR_SEL_		(0x00000002)
+#define USB_CFG_STALL_BO_DIS_		(0x00000001)
+
+#define USB_CFG1			(0x084)
+#define USB_CFG1_U1_TIMEOUT_MASK_	(0xFF000000)
+#define USB_CFG1_U2_TIMEOUT_MASK_	(0x00FF0000)
+#define USB_CFG1_HS_TOUT_CAL_MASK_	(0x0000E000)
+#define USB_CFG1_DEV_U2_INIT_EN_	(0x00001000)
+#define USB_CFG1_DEV_U2_EN_		(0x00000800)
+#define USB_CFG1_DEV_U1_INIT_EN_	(0x00000400)
+#define USB_CFG1_DEV_U1_EN_		(0x00000200)
+#define USB_CFG1_LTM_ENABLE_		(0x00000100)
+#define USB_CFG1_FS_TOUT_CAL_MASK_	(0x00000070)
+#define USB_CFG1_SCALE_DOWN_MASK_	(0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE3_	(0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE2_	(0x00000002)
+#define USB_CFG1_SCALE_DOWN_MODE1_	(0x00000001)
+#define USB_CFG1_SCALE_DOWN_MODE0_	(0x00000000)
+
+#define USB_CFG2			    (0x088)
+#define USB_CFG2_SS_DETACH_TIME_MASK_	    (0xFFFF0000)
+#define USB_CFG2_HS_DETACH_TIME_MASK_	    (0x0000FFFF)
+
+#define BURST_CAP			(0x090)
+#define BURST_CAP_SIZE_MASK_		(0x000000FF)
+
+#define BULK_IN_DLY			(0x094)
+#define BULK_IN_DLY_MASK_		(0x0000FFFF)
+
+#define INT_EP_CTL			(0x098)
+#define INT_EP_INTEP_ON_		(0x80000000)
+#define INT_STS_EEE_TX_LPI_STRT_EN_	(0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_EN_	(0x02000000)
+#define INT_STS_EEE_RX_LPI_EN_		(0x01000000)
+#define INT_EP_RDFO_EN_			(0x00400000)
+#define INT_EP_TXE_EN_			(0x00200000)
+#define INT_EP_TX_DIS_EN_		(0x00080000)
+#define INT_EP_RX_DIS_EN_		(0x00040000)
+#define INT_EP_PHY_INT_EN_		(0x00020000)
+#define INT_EP_DP_INT_EN_		(0x00010000)
+#define INT_EP_MAC_ERR_EN_		(0x00008000)
+#define INT_EP_TDFU_EN_			(0x00004000)
+#define INT_EP_TDFO_EN_			(0x00002000)
+#define INT_EP_UTX_FP_EN_		(0x00001000)
+#define INT_EP_GPIO_EN_MASK_		(0x00000FFF)
+
+#define PIPE_CTL			(0x09C)
+#define PIPE_CTL_TXSWING_		(0x00000040)
+#define PIPE_CTL_TXMARGIN_MASK_		(0x00000038)
+#define PIPE_CTL_TXDEEMPHASIS_MASK_	(0x00000006)
+#define PIPE_CTL_ELASTICITYBUFFERMODE_	(0x00000001)
+
+#define U1_LATENCY			(0xA0)
+#define U2_LATENCY			(0xA4)
+
+#define USB_STATUS			(0x0A8)
+#define USB_STATUS_REMOTE_WK_		(0x00100000)
+#define USB_STATUS_FUNC_REMOTE_WK_	(0x00080000)
+#define USB_STATUS_LTM_ENABLE_		(0x00040000)
+#define USB_STATUS_U2_ENABLE_		(0x00020000)
+#define USB_STATUS_U1_ENABLE_		(0x00010000)
+#define USB_STATUS_SET_SEL_		(0x00000020)
+#define USB_STATUS_REMOTE_WK_STS_	(0x00000010)
+#define USB_STATUS_FUNC_REMOTE_WK_STS_	(0x00000008)
+#define USB_STATUS_LTM_ENABLE_STS_	(0x00000004)
+#define USB_STATUS_U2_ENABLE_STS_	(0x00000002)
+#define USB_STATUS_U1_ENABLE_STS_	(0x00000001)
+
+#define USB_CFG3			(0x0AC)
+#define USB_CFG3_EN_U2_LTM_		(0x40000000)
+#define USB_CFG3_BULK_OUT_NUMP_OVR_	(0x20000000)
+#define USB_CFG3_DIS_FAST_U1_EXIT_	(0x10000000)
+#define USB_CFG3_LPM_NYET_THR_		(0x0F000000)
+#define USB_CFG3_RX_DET_2_POL_LFPS_	(0x00800000)
+#define USB_CFG3_LFPS_FILT_		(0x00400000)
+#define USB_CFG3_SKIP_RX_DET_		(0x00200000)
+#define USB_CFG3_DELAY_P1P2P3_		(0x001C0000)
+#define USB_CFG3_DELAY_PHY_PWR_CHG_	(0x00020000)
+#define USB_CFG3_U1U2_EXIT_FR_		(0x00010000)
+#define USB_CFG3_REQ_P1P2P3		(0x00008000)
+#define USB_CFG3_HST_PRT_CMPL_		(0x00004000)
+#define USB_CFG3_DIS_SCRAMB_		(0x00002000)
+#define USB_CFG3_PWR_DN_SCALE_		(0x00001FFF)
+
+#define RFE_CTL				(0x0B0)
+#define RFE_CTL_IGMP_COE_		(0x00004000)
+#define RFE_CTL_ICMP_COE_		(0x00002000)
+#define RFE_CTL_TCPUDP_COE_		(0x00001000)
+#define RFE_CTL_IP_COE_			(0x00000800)
+#define RFE_CTL_BCAST_EN_		(0x00000400)
+#define RFE_CTL_MCAST_EN_		(0x00000200)
+#define RFE_CTL_UCAST_EN_		(0x00000100)
+#define RFE_CTL_VLAN_STRIP_		(0x00000080)
+#define RFE_CTL_DISCARD_UNTAGGED_	(0x00000040)
+#define RFE_CTL_VLAN_FILTER_		(0x00000020)
+#define RFE_CTL_SA_FILTER_		(0x00000010)
+#define RFE_CTL_MCAST_HASH_		(0x00000008)
+#define RFE_CTL_DA_HASH_		(0x00000004)
+#define RFE_CTL_DA_PERFECT_		(0x00000002)
+#define RFE_CTL_RST_			(0x00000001)
+
+#define VLAN_TYPE			(0x0B4)
+#define VLAN_TYPE_MASK_			(0x0000FFFF)
+
+#define FCT_RX_CTL			(0x0C0)
+#define FCT_RX_CTL_EN_			(0x80000000)
+#define FCT_RX_CTL_RST_			(0x40000000)
+#define FCT_RX_CTL_SBF_			(0x02000000)
+#define FCT_RX_CTL_OVFL_		(0x01000000)
+#define FCT_RX_CTL_DROP_		(0x00800000)
+#define FCT_RX_CTL_NOT_EMPTY_		(0x00400000)
+#define FCT_RX_CTL_EMPTY_		(0x00200000)
+#define FCT_RX_CTL_DIS_			(0x00100000)
+#define FCT_RX_CTL_USED_MASK_		(0x0000FFFF)
+
+#define FCT_TX_CTL			(0x0C4)
+#define FCT_TX_CTL_EN_			(0x80000000)
+#define FCT_TX_CTL_RST_			(0x40000000)
+#define FCT_TX_CTL_NOT_EMPTY_		(0x00400000)
+#define FCT_TX_CTL_EMPTY_		(0x00200000)
+#define FCT_TX_CTL_DIS_			(0x00100000)
+#define FCT_TX_CTL_USED_MASK_		(0x0000FFFF)
+
+#define FCT_RX_FIFO_END			(0x0C8)
+#define FCT_RX_FIFO_END_MASK_		(0x0000007F)
+
+#define FCT_TX_FIFO_END			(0x0CC)
+#define FCT_TX_FIFO_END_MASK_		(0x0000003F)
+
+#define FCT_FLOW			(0x0D0)
+#define FCT_FLOW_OFF_MASK_		(0x00007F00)
+#define FCT_FLOW_ON_MASK_		(0x0000007F)
+
+#define RX_DP_STOR			(0x0D4)
+#define RX_DP_STORE_TOT_RXUSED_MASK_	(0xFFFF0000)
+#define RX_DP_STORE_UTX_RXUSED_MASK_	(0x0000FFFF)
+
+#define TX_DP_STOR			(0x0D8)
+#define TX_DP_STORE_TOT_TXUSED_MASK_	(0xFFFF0000)
+#define TX_DP_STORE_URX_TXUSED_MASK_	(0x0000FFFF)
+
+#define LTM_BELT_IDLE0			(0x0E0)
+#define LTM_BELT_IDLE0_IDLE1000_	(0x0FFF0000)
+#define LTM_BELT_IDLE0_IDLE100_		(0x00000FFF)
+
+#define LTM_BELT_IDLE1			(0x0E4)
+#define LTM_BELT_IDLE1_IDLE10_		(0x00000FFF)
+
+#define LTM_BELT_ACT0			(0x0E8)
+#define LTM_BELT_ACT0_ACT1000_		(0x0FFF0000)
+#define LTM_BELT_ACT0_ACT100_		(0x00000FFF)
+
+#define LTM_BELT_ACT1			(0x0EC)
+#define LTM_BELT_ACT1_ACT10_		(0x00000FFF)
+
+#define LTM_INACTIVE0			(0x0F0)
+#define LTM_INACTIVE0_TIMER1000_	(0xFFFF0000)
+#define LTM_INACTIVE0_TIMER100_		(0x0000FFFF)
+
+#define LTM_INACTIVE1			(0x0F4)
+#define LTM_INACTIVE1_TIMER10_		(0x0000FFFF)
+
+#define MAC_CR				(0x100)
+#define MAC_CR_GMII_EN_			(0x00080000)
+#define MAC_CR_EEE_TX_CLK_STOP_EN_	(0x00040000)
+#define MAC_CR_EEE_EN_			(0x00020000)
+#define MAC_CR_EEE_TLAR_EN_		(0x00010000)
+#define MAC_CR_ADP_			(0x00002000)
+#define MAC_CR_AUTO_DUPLEX_		(0x00001000)
+#define MAC_CR_AUTO_SPEED_		(0x00000800)
+#define MAC_CR_LOOPBACK_		(0x00000400)
+#define MAC_CR_BOLMT_MASK_		(0x000000C0)
+#define MAC_CR_FULL_DUPLEX_		(0x00000008)
+#define MAC_CR_SPEED_MASK_		(0x00000006)
+#define MAC_CR_SPEED_1000_		(0x00000004)
+#define MAC_CR_SPEED_100_		(0x00000002)
+#define MAC_CR_SPEED_10_		(0x00000000)
+#define MAC_CR_RST_			(0x00000001)
+
+#define MAC_RX				(0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_		(16)
+#define MAC_RX_MAX_SIZE_MASK_		(0x3FFF0000)
+#define MAC_RX_FCS_STRIP_		(0x00000010)
+#define MAC_RX_VLAN_FSE_		(0x00000004)
+#define MAC_RX_RXD_			(0x00000002)
+#define MAC_RX_RXEN_			(0x00000001)
+
+#define MAC_TX				(0x108)
+#define MAC_TX_BAD_FCS_			(0x00000004)
+#define MAC_TX_TXD_			(0x00000002)
+#define MAC_TX_TXEN_			(0x00000001)
+
+#define FLOW				(0x10C)
+#define FLOW_CR_FORCE_FC_		(0x80000000)
+#define FLOW_CR_TX_FCEN_		(0x40000000)
+#define FLOW_CR_RX_FCEN_		(0x20000000)
+#define FLOW_CR_FPF_			(0x10000000)
+#define FLOW_CR_FCPT_MASK_		(0x0000FFFF)
+
+#define RAND_SEED			(0x110)
+#define RAND_SEED_MASK_			(0x0000FFFF)
+
+#define ERR_STS				(0x114)
+#define ERR_STS_FERR_			(0x00000100)
+#define ERR_STS_LERR_			(0x00000080)
+#define ERR_STS_RFERR_			(0x00000040)
+#define ERR_STS_ECERR_			(0x00000010)
+#define ERR_STS_ALERR_			(0x00000008)
+#define ERR_STS_URERR_			(0x00000004)
+
+#define RX_ADDRH			(0x118)
+#define RX_ADDRH_MASK_			(0x0000FFFF)
+
+#define RX_ADDRL			(0x11C)
+#define RX_ADDRL_MASK_			(0xFFFFFFFF)
+
+#define MII_ACC				(0x120)
+#define MII_ACC_PHY_ADDR_SHIFT_		(11)
+#define MII_ACC_PHY_ADDR_MASK_		(0x0000F800)
+#define MII_ACC_MIIRINDA_SHIFT_		(6)
+#define MII_ACC_MIIRINDA_MASK_		(0x000007C0)
+#define MII_ACC_MII_READ_		(0x00000000)
+#define MII_ACC_MII_WRITE_		(0x00000002)
+#define MII_ACC_MII_BUSY_		(0x00000001)
+
+#define MII_DATA			(0x124)
+#define MII_DATA_MASK_			(0x0000FFFF)
+
+#define MAC_RGMII_ID			(0x128)
+#define MAC_RGMII_ID_TXC_DELAY_EN_	(0x00000002)
+#define MAC_RGMII_ID_RXC_DELAY_EN_	(0x00000001)
+
+#define EEE_TX_LPI_REQ_DLY		(0x130)
+#define EEE_TX_LPI_REQ_DLY_CNT_MASK_	(0xFFFFFFFF)
+
+#define EEE_TW_TX_SYS			(0x134)
+#define EEE_TW_TX_SYS_CNT1G_MASK_	(0xFFFF0000)
+#define EEE_TW_TX_SYS_CNT100M_MASK_	(0x0000FFFF)
+
+#define EEE_TX_LPI_REM_DLY		(0x138)
+#define EEE_TX_LPI_REM_DLY_CNT_		(0x00FFFFFF)
+
+#define WUCSR				(0x140)
+#define WUCSR_TESTMODE_			(0x80000000)
+#define WUCSR_RFE_WAKE_EN_		(0x00004000)
+#define WUCSR_EEE_TX_WAKE_		(0x00002000)
+#define WUCSR_EEE_TX_WAKE_EN_		(0x00001000)
+#define WUCSR_EEE_RX_WAKE_		(0x00000800)
+#define WUCSR_EEE_RX_WAKE_EN_		(0x00000400)
+#define WUCSR_RFE_WAKE_FR_		(0x00000200)
+#define WUCSR_STORE_WAKE_		(0x00000100)
+#define WUCSR_PFDA_FR_			(0x00000080)
+#define WUCSR_WUFR_			(0x00000040)
+#define WUCSR_MPR_			(0x00000020)
+#define WUCSR_BCST_FR_			(0x00000010)
+#define WUCSR_PFDA_EN_			(0x00000008)
+#define WUCSR_WAKE_EN_			(0x00000004)
+#define WUCSR_MPEN_			(0x00000002)
+#define WUCSR_BCST_EN_			(0x00000001)
+
+#define WK_SRC				(0x144)
+#define WK_SRC_GPIOX_INT_WK_SHIFT_	(20)
+#define WK_SRC_GPIOX_INT_WK_MASK_	(0xFFF00000)
+#define WK_SRC_IPV6_TCPSYN_RCD_WK_	(0x00010000)
+#define WK_SRC_IPV4_TCPSYN_RCD_WK_	(0x00008000)
+#define WK_SRC_EEE_TX_WK_		(0x00004000)
+#define WK_SRC_EEE_RX_WK_		(0x00002000)
+#define WK_SRC_GOOD_FR_WK_		(0x00001000)
+#define WK_SRC_PFDA_FR_WK_		(0x00000800)
+#define WK_SRC_MP_FR_WK_		(0x00000400)
+#define WK_SRC_BCAST_FR_WK_		(0x00000200)
+#define WK_SRC_WU_FR_WK_		(0x00000100)
+#define WK_SRC_WUFF_MATCH_MASK_		(0x0000001F)
+
+#define WUF_CFG0			(0x150)
+#define NUM_OF_WUF_CFG			(32)
+#define WUF_CFG_BEGIN			(WUF_CFG0)
+#define WUF_CFG(index)			(WUF_CFG_BEGIN + (4 * (index)))
+#define WUF_CFGX_EN_			(0x80000000)
+#define WUF_CFGX_TYPE_MASK_		(0x03000000)
+#define WUF_CFGX_TYPE_MCAST_		(0x02000000)
+#define WUF_CFGX_TYPE_ALL_		(0x01000000)
+#define WUF_CFGX_TYPE_UCAST_		(0x00000000)
+#define WUF_CFGX_OFFSET_SHIFT_		(16)
+#define WUF_CFGX_OFFSET_MASK_		(0x00FF0000)
+#define WUF_CFGX_CRC16_MASK_		(0x0000FFFF)
+
+#define WUF_MASK0_0			(0x200)
+#define WUF_MASK0_1			(0x204)
+#define WUF_MASK0_2			(0x208)
+#define WUF_MASK0_3			(0x20C)
+#define NUM_OF_WUF_MASK			(32)
+#define WUF_MASK0_BEGIN			(WUF_MASK0_0)
+#define WUF_MASK1_BEGIN			(WUF_MASK0_1)
+#define WUF_MASK2_BEGIN			(WUF_MASK0_2)
+#define WUF_MASK3_BEGIN			(WUF_MASK0_3)
+#define WUF_MASK0(index)		(WUF_MASK0_BEGIN + (0x10 * (index)))
+#define WUF_MASK1(index)		(WUF_MASK1_BEGIN + (0x10 * (index)))
+#define WUF_MASK2(index)		(WUF_MASK2_BEGIN + (0x10 * (index)))
+#define WUF_MASK3(index)		(WUF_MASK3_BEGIN + (0x10 * (index)))
+
+#define MAF_BASE			(0x400)
+#define MAF_HIX				(0x00)
+#define MAF_LOX				(0x04)
+#define NUM_OF_MAF			(33)
+#define MAF_HI_BEGIN			(MAF_BASE + MAF_HIX)
+#define MAF_LO_BEGIN			(MAF_BASE + MAF_LOX)
+#define MAF_HI(index)			(MAF_BASE + (8 * (index)) + (MAF_HIX))
+#define MAF_LO(index)			(MAF_BASE + (8 * (index)) + (MAF_LOX))
+#define MAF_HI_VALID_			(0x80000000)
+#define MAF_HI_TYPE_MASK_		(0x40000000)
+#define MAF_HI_TYPE_SRC_		(0x40000000)
+#define MAF_HI_TYPE_DST_		(0x00000000)
+#define MAF_HI_ADDR_MASK		(0x0000FFFF)
+#define MAF_LO_ADDR_MASK		(0xFFFFFFFF)
+
+#define WUCSR2				(0x600)
+#define WUCSR2_CSUM_DISABLE_		(0x80000000)
+#define WUCSR2_NA_SA_SEL_		(0x00000100)
+#define WUCSR2_NS_RCD_			(0x00000080)
+#define WUCSR2_ARP_RCD_			(0x00000040)
+#define WUCSR2_IPV6_TCPSYN_RCD_		(0x00000020)
+#define WUCSR2_IPV4_TCPSYN_RCD_		(0x00000010)
+#define WUCSR2_NS_OFFLOAD_EN_		(0x00000008)
+#define WUCSR2_ARP_OFFLOAD_EN_		(0x00000004)
+#define WUCSR2_IPV6_TCPSYN_WAKE_EN_	(0x00000002)
+#define WUCSR2_IPV4_TCPSYN_WAKE_EN_	(0x00000001)
+
+#define NS1_IPV6_ADDR_DEST0		(0x610)
+#define NS1_IPV6_ADDR_DEST1		(0x614)
+#define NS1_IPV6_ADDR_DEST2		(0x618)
+#define NS1_IPV6_ADDR_DEST3		(0x61C)
+
+#define NS1_IPV6_ADDR_SRC0		(0x620)
+#define NS1_IPV6_ADDR_SRC1		(0x624)
+#define NS1_IPV6_ADDR_SRC2		(0x628)
+#define NS1_IPV6_ADDR_SRC3		(0x62C)
+
+#define NS1_ICMPV6_ADDR0_0		(0x630)
+#define NS1_ICMPV6_ADDR0_1		(0x634)
+#define NS1_ICMPV6_ADDR0_2		(0x638)
+#define NS1_ICMPV6_ADDR0_3		(0x63C)
+
+#define NS1_ICMPV6_ADDR1_0		(0x640)
+#define NS1_ICMPV6_ADDR1_1		(0x644)
+#define NS1_ICMPV6_ADDR1_2		(0x648)
+#define NS1_ICMPV6_ADDR1_3		(0x64C)
+
+#define NS2_IPV6_ADDR_DEST0		(0x650)
+#define NS2_IPV6_ADDR_DEST1		(0x654)
+#define NS2_IPV6_ADDR_DEST2		(0x658)
+#define NS2_IPV6_ADDR_DEST3		(0x65C)
+
+#define NS2_IPV6_ADDR_SRC0		(0x660)
+#define NS2_IPV6_ADDR_SRC1		(0x664)
+#define NS2_IPV6_ADDR_SRC2		(0x668)
+#define NS2_IPV6_ADDR_SRC3		(0x66C)
+
+#define NS2_ICMPV6_ADDR0_0		(0x670)
+#define NS2_ICMPV6_ADDR0_1		(0x674)
+#define NS2_ICMPV6_ADDR0_2		(0x678)
+#define NS2_ICMPV6_ADDR0_3		(0x67C)
+
+#define NS2_ICMPV6_ADDR1_0		(0x680)
+#define NS2_ICMPV6_ADDR1_1		(0x684)
+#define NS2_ICMPV6_ADDR1_2		(0x688)
+#define NS2_ICMPV6_ADDR1_3		(0x68C)
+
+#define SYN_IPV4_ADDR_SRC		(0x690)
+#define SYN_IPV4_ADDR_DEST		(0x694)
+#define SYN_IPV4_TCP_PORTS		(0x698)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_	    (0x0000FFFF)
+
+#define SYN_IPV6_ADDR_SRC0		(0x69C)
+#define SYN_IPV6_ADDR_SRC1		(0x6A0)
+#define SYN_IPV6_ADDR_SRC2		(0x6A4)
+#define SYN_IPV6_ADDR_SRC3		(0x6A8)
+
+#define SYN_IPV6_ADDR_DEST0		(0x6AC)
+#define SYN_IPV6_ADDR_DEST1		(0x6B0)
+#define SYN_IPV6_ADDR_DEST2		(0x6B4)
+#define SYN_IPV6_ADDR_DEST3		(0x6B8)
+
+#define SYN_IPV6_TCP_PORTS		(0x6BC)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_	    (0x0000FFFF)
+
+#define ARP_SPA				(0x6C0)
+#define ARP_TPA				(0x6C4)
+
+#define PHY_DEV_ID			(0x700)
+#define PHY_DEV_ID_REV_SHIFT_		(28)
+#define PHY_DEV_ID_REV_SHIFT_		(28)
+#define PHY_DEV_ID_REV_MASK_		(0xF0000000)
+#define PHY_DEV_ID_MODEL_SHIFT_		(22)
+#define PHY_DEV_ID_MODEL_MASK_		(0x0FC00000)
+#define PHY_DEV_ID_OUI_MASK_		(0x003FFFFF)
+
+#define OTP_BASE_ADDR			(0x00001000)
+#define OTP_ADDR_RANGE_			(0x1FF)
+
+#define OTP_PWR_DN			(OTP_BASE_ADDR + 4 * 0x00)
+#define OTP_PWR_DN_PWRDN_N_		(0x01)
+
+#define OTP_ADDR1			(OTP_BASE_ADDR + 4 * 0x01)
+#define OTP_ADDR1_15_11			(0x1F)
+
+#define OTP_ADDR2			(OTP_BASE_ADDR + 4 * 0x02)
+#define OTP_ADDR2_10_3			(0xFF)
+
+#define OTP_ADDR3			(OTP_BASE_ADDR + 4 * 0x03)
+#define OTP_ADDR3_2_0			(0x03)
+
+#define OTP_PRGM_DATA			(OTP_BASE_ADDR + 4 * 0x04)
+
+#define OTP_PRGM_MODE			(OTP_BASE_ADDR + 4 * 0x05)
+#define OTP_PRGM_MODE_BYTE_		(0x01)
+
+#define OTP_RD_DATA			(OTP_BASE_ADDR + 4 * 0x06)
+
+#define OTP_FUNC_CMD			(OTP_BASE_ADDR + 4 * 0x08)
+#define OTP_FUNC_CMD_RESET_		(0x04)
+#define OTP_FUNC_CMD_PROGRAM_		(0x02)
+#define OTP_FUNC_CMD_READ_		(0x01)
+
+#define OTP_TST_CMD			(OTP_BASE_ADDR + 4 * 0x09)
+#define OTP_TST_CMD_TEST_DEC_SEL_	(0x10)
+#define OTP_TST_CMD_PRGVRFY_		(0x08)
+#define OTP_TST_CMD_WRTEST_		(0x04)
+#define OTP_TST_CMD_TESTDEC_		(0x02)
+#define OTP_TST_CMD_BLANKCHECK_		(0x01)
+
+#define OTP_CMD_GO			(OTP_BASE_ADDR + 4 * 0x0A)
+#define OTP_CMD_GO_GO_			(0x01)
+
+#define OTP_PASS_FAIL			(OTP_BASE_ADDR + 4 * 0x0B)
+#define OTP_PASS_FAIL_PASS_		(0x02)
+#define OTP_PASS_FAIL_FAIL_		(0x01)
+
+#define OTP_STATUS			(OTP_BASE_ADDR + 4 * 0x0C)
+#define OTP_STATUS_OTP_LOCK_		(0x10)
+#define OTP_STATUS_WEB_			(0x08)
+#define OTP_STATUS_PGMEN		(0x04)
+#define OTP_STATUS_CPUMPEN_		(0x02)
+#define OTP_STATUS_BUSY_		(0x01)
+
+#define OTP_MAX_PRG			(OTP_BASE_ADDR + 4 * 0x0D)
+#define OTP_MAX_PRG_MAX_PROG		(0x1F)
+
+#define OTP_INTR_STATUS			(OTP_BASE_ADDR + 4 * 0x10)
+#define OTP_INTR_STATUS_READY_		(0x01)
+
+#define OTP_INTR_MASK			(OTP_BASE_ADDR + 4 * 0x11)
+#define OTP_INTR_MASK_READY_		(0x01)
+
+#define OTP_RSTB_PW1			(OTP_BASE_ADDR + 4 * 0x14)
+#define OTP_RSTB_PW2			(OTP_BASE_ADDR + 4 * 0x15)
+#define OTP_PGM_PW1			(OTP_BASE_ADDR + 4 * 0x18)
+#define OTP_PGM_PW2			(OTP_BASE_ADDR + 4 * 0x19)
+#define OTP_READ_PW1			(OTP_BASE_ADDR + 4 * 0x1C)
+#define OTP_READ_PW2			(OTP_BASE_ADDR + 4 * 0x1D)
+#define OTP_TCRST			(OTP_BASE_ADDR + 4 * 0x20)
+#define OTP_RSRD			(OTP_BASE_ADDR + 4 * 0x21)
+#define OTP_TREADEN_VAL			(OTP_BASE_ADDR + 4 * 0x22)
+#define OTP_TDLES_VAL			(OTP_BASE_ADDR + 4 * 0x23)
+#define OTP_TWWL_VAL			(OTP_BASE_ADDR + 4 * 0x24)
+#define OTP_TDLEH_VAL			(OTP_BASE_ADDR + 4 * 0x25)
+#define OTP_TWPED_VAL			(OTP_BASE_ADDR + 4 * 0x26)
+#define OTP_TPES_VAL			(OTP_BASE_ADDR + 4 * 0x27)
+#define OTP_TCPS_VAL			(OTP_BASE_ADDR + 4 * 0x28)
+#define OTP_TCPH_VAL			(OTP_BASE_ADDR + 4 * 0x29)
+#define OTP_TPGMVFY_VAL			(OTP_BASE_ADDR + 4 * 0x2A)
+#define OTP_TPEH_VAL			(OTP_BASE_ADDR + 4 * 0x2B)
+#define OTP_TPGRST_VAL			(OTP_BASE_ADDR + 4 * 0x2C)
+#define OTP_TCLES_VAL			(OTP_BASE_ADDR + 4 * 0x2D)
+#define OTP_TCLEH_VAL			(OTP_BASE_ADDR + 4 * 0x2E)
+#define OTP_TRDES_VAL			(OTP_BASE_ADDR + 4 * 0x2F)
+#define OTP_TBCACC_VAL			(OTP_BASE_ADDR + 4 * 0x30)
+#define OTP_TAAC_VAL			(OTP_BASE_ADDR + 4 * 0x31)
+#define OTP_TACCT_VAL			(OTP_BASE_ADDR + 4 * 0x32)
+#define OTP_TRDEP_VAL			(OTP_BASE_ADDR + 4 * 0x38)
+#define OTP_TPGSV_VAL			(OTP_BASE_ADDR + 4 * 0x39)
+#define OTP_TPVSR_VAL			(OTP_BASE_ADDR + 4 * 0x3A)
+#define OTP_TPVHR_VAL			(OTP_BASE_ADDR + 4 * 0x3B)
+#define OTP_TPVSA_VAL			(OTP_BASE_ADDR + 4 * 0x3C)
+
+#define PHY_ID1				(0x02)
+#define PHY_ID2				(0x03)
+
+#define PHY_DEV_ID_OUI_VTSE		(0x04001C)
+#define PHY_DEV_ID_MODEL_VTSE_8502	(0x23)
+
+#define PHY_AUTONEG_ADV			(0x04)
+#define NWAY_AR_NEXT_PAGE_		(0x8000)
+#define NWAY_AR_REMOTE_FAULT_		(0x2000)
+#define NWAY_AR_ASM_DIR_		(0x0800)
+#define NWAY_AR_PAUSE_			(0x0400)
+#define NWAY_AR_100T4_CAPS_		(0x0200)
+#define NWAY_AR_100TX_FD_CAPS_		(0x0100)
+#define NWAY_AR_SELECTOR_FIELD_		(0x001F)
+#define NWAY_AR_100TX_HD_CAPS_		(0x0080)
+#define NWAY_AR_10T_FD_CAPS_		(0x0040)
+#define NWAY_AR_10T_HD_CAPS_		(0x0020)
+#define NWAY_AR_ALL_CAPS_		(NWAY_AR_10T_HD_CAPS_ | \
+					 NWAY_AR_10T_FD_CAPS_ | \
+					 NWAY_AR_100TX_HD_CAPS_ | \
+					 NWAY_AR_100TX_FD_CAPS_)
+#define NWAY_AR_PAUSE_MASK		(NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
+
+#define PHY_LP_ABILITY			(0x05)
+#define NWAY_LPAR_NEXT_PAGE_		(0x8000)
+#define NWAY_LPAR_ACKNOWLEDGE_		(0x4000)
+#define NWAY_LPAR_REMOTE_FAULT_		(0x2000)
+#define NWAY_LPAR_ASM_DIR_		(0x0800)
+#define NWAY_LPAR_PAUSE_		(0x0400)
+#define NWAY_LPAR_100T4_CAPS_		(0x0200)
+#define NWAY_LPAR_100TX_FD_CAPS_	(0x0100)
+#define NWAY_LPAR_100TX_HD_CAPS_	(0x0080)
+#define NWAY_LPAR_10T_FD_CAPS_		(0x0040)
+#define NWAY_LPAR_10T_HD_CAPS_		(0x0020)
+#define NWAY_LPAR_SELECTOR_FIELD_	(0x001F)
+
+#define PHY_AUTONEG_EXP			(0x06)
+#define NWAY_ER_PAR_DETECT_FAULT_	(0x0010)
+#define NWAY_ER_LP_NEXT_PAGE_CAPS_	(0x0008)
+#define NWAY_ER_NEXT_PAGE_CAPS_		(0x0004)
+#define NWAY_ER_PAGE_RXD_		(0x0002)
+#define NWAY_ER_LP_NWAY_CAPS_		(0x0001)
+
+#define PHY_NEXT_PAGE_TX		(0x07)
+#define NPTX_NEXT_PAGE_			(0x8000)
+#define NPTX_MSG_PAGE_			(0x2000)
+#define NPTX_ACKNOWLDGE2_		(0x1000)
+#define NPTX_TOGGLE_			(0x0800)
+#define NPTX_MSG_CODE_FIELD_		(0x0001)
+
+#define PHY_LP_NEXT_PAGE		(0x08)
+#define LP_RNPR_NEXT_PAGE_		(0x8000)
+#define LP_RNPR_ACKNOWLDGE_		(0x4000)
+#define LP_RNPR_MSG_PAGE_		(0x2000)
+#define LP_RNPR_ACKNOWLDGE2_		(0x1000)
+#define LP_RNPR_TOGGLE_			(0x0800)
+#define LP_RNPR_MSG_CODE_FIELD_		(0x0001)
+
+#define PHY_1000T_CTRL			(0x09)
+#define CR_1000T_TEST_MODE_4_		(0x8000)
+#define CR_1000T_TEST_MODE_3_		(0x6000)
+#define CR_1000T_TEST_MODE_2_		(0x4000)
+#define CR_1000T_TEST_MODE_1_		(0x2000)
+#define CR_1000T_MS_ENABLE_		(0x1000)
+#define CR_1000T_MS_VALUE_		(0x0800)
+#define CR_1000T_REPEATER_DTE_		(0x0400)
+#define CR_1000T_FD_CAPS_		(0x0200)
+#define CR_1000T_HD_CAPS_		(0x0100)
+#define CR_1000T_ASYM_PAUSE_		(0x0080)
+#define CR_1000T_TEST_MODE_NORMAL_	(0x0000)
+
+#define PHY_1000T_STATUS		(0x0A)
+#define SR_1000T_MS_CONFIG_FAULT_	(0x8000)
+#define SR_1000T_MS_CONFIG_RES_		(0x4000)
+#define SR_1000T_LOCAL_RX_STATUS_	(0x2000)
+#define SR_1000T_REMOTE_RX_STATUS_	(0x1000)
+#define SR_1000T_LP_FD_CAPS_		(0x0800)
+#define SR_1000T_LP_HD_CAPS_		(0x0400)
+#define SR_1000T_ASYM_PAUSE_DIR_	(0x0100)
+#define SR_1000T_IDLE_ERROR_CNT_	(0x00FF)
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT		12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT		13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT	5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20		20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100		100
+
+#define PHY_EXT_STATUS			(0x0F)
+#define IEEE_ESR_1000X_FD_CAPS_		(0x8000)
+#define IEEE_ESR_1000X_HD_CAPS_		(0x4000)
+#define IEEE_ESR_1000T_FD_CAPS_		(0x2000)
+#define IEEE_ESR_1000T_HD_CAPS_		(0x1000)
+#define PHY_TX_POLARITY_MASK_		(0x0100)
+#define PHY_TX_NORMAL_POLARITY_		(0x0000)
+#define AUTO_POLARITY_DISABLE_		(0x0010)
+
+#define PHY_MMD_CTL			(0x0D)
+#define PHY_MMD_CTRL_OP_MASK_		(0xC000)
+#define PHY_MMD_CTRL_OP_REG_		(0x0000)
+#define PHY_MMD_CTRL_OP_DNI_		(0x4000)
+#define PHY_MMD_CTRL_OP_DPIRW_		(0x8000)
+#define PHY_MMD_CTRL_OP_DPIWO_		(0xC000)
+#define PHY_MMD_CTRL_DEV_ADDR_MASK_	(0x001F)
+
+#define PHY_MMD_REG_DATA		(0x0E)
+
+/* VTSE Vendor Specific registers */
+#define PHY_VTSE_BYPASS				(0x12)
+#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_	(0x0020)
+
+#define PHY_VTSE_INT_MASK			(0x19)
+#define PHY_VTSE_INT_MASK_MDINTPIN_EN_		(0x8000)
+#define PHY_VTSE_INT_MASK_SPEED_CHANGE_		(0x4000)
+#define PHY_VTSE_INT_MASK_LINK_CHANGE_		(0x2000)
+#define PHY_VTSE_INT_MASK_FDX_CHANGE_		(0x1000)
+#define PHY_VTSE_INT_MASK_AUTONEG_ERR_		(0x0800)
+#define PHY_VTSE_INT_MASK_AUTONEG_DONE_		(0x0400)
+#define PHY_VTSE_INT_MASK_POE_DETECT_		(0x0200)
+#define PHY_VTSE_INT_MASK_SYMBOL_ERR_		(0x0100)
+#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_	(0x0080)
+#define PHY_VTSE_INT_MASK_WOL_EVENT_		(0x0040)
+#define PHY_VTSE_INT_MASK_EXTENDED_INT_		(0x0020)
+#define PHY_VTSE_INT_MASK_RESERVED_		(0x0010)
+#define PHY_VTSE_INT_MASK_FALSE_CARRIER_	(0x0008)
+#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_	(0x0004)
+#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_	(0x0002)
+#define PHY_VTSE_INT_MASK_RX__ER_		(0x0001)
+
+#define PHY_VTSE_INT_STS			(0x1A)
+#define PHY_VTSE_INT_STS_INT_ACTIVE_		(0x8000)
+#define PHY_VTSE_INT_STS_SPEED_CHANGE_		(0x4000)
+#define PHY_VTSE_INT_STS_LINK_CHANGE_		(0x2000)
+#define PHY_VTSE_INT_STS_FDX_CHANGE_		(0x1000)
+#define PHY_VTSE_INT_STS_AUTONEG_ERR_		(0x0800)
+#define PHY_VTSE_INT_STS_AUTONEG_DONE_		(0x0400)
+#define PHY_VTSE_INT_STS_POE_DETECT_		(0x0200)
+#define PHY_VTSE_INT_STS_SYMBOL_ERR_		(0x0100)
+#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_	(0x0080)
+#define PHY_VTSE_INT_STS_WOL_EVENT_		(0x0040)
+#define PHY_VTSE_INT_STS_EXTENDED_INT_		(0x0020)
+#define PHY_VTSE_INT_STS_RESERVED_		(0x0010)
+#define PHY_VTSE_INT_STS_FALSE_CARRIER_		(0x0008)
+#define PHY_VTSE_INT_STS_LINK_SPEED_DS_		(0x0004)
+#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_	(0x0002)
+#define PHY_VTSE_INT_STS_RX_ER_			(0x0001)
+
+/* VTSE PHY registers */
+#define PHY_EXT_GPIO_PAGE		(0x1F)
+#define PHY_EXT_GPIO_PAGE_SPACE_0	(0x0000)
+#define PHY_EXT_GPIO_PAGE_SPACE_1	(0x0001)
+#define PHY_EXT_GPIO_PAGE_SPACE_2	(0x0002)
+
+/* Extended Register Page 1 space */
+#define PHY_EXT_MODE_CTRL		(0x13)
+#define PHY_EXT_MODE_CTRL_MDIX_MASK_	(0x000C)
+#define PHY_EXT_MODE_CTRL_AUTO_MDIX_	(0x0000)
+#define PHY_EXT_MODE_CTRL_MDI_		(0x0008)
+#define PHY_EXT_MODE_CTRL_MDI_X_	(0x000C)
+
+#define PHY_ANA_10BASE_T_HD		0x01
+#define PHY_ANA_10BASE_T_FD		0x02
+#define PHY_ANA_100BASE_TX_HD		0x04
+#define PHY_ANA_100BASE_TX_FD		0x08
+#define PHY_ANA_1000BASE_T_FD		0x10
+#define PHY_ANA_ALL_SUPPORTED_MEDIA	(PHY_ANA_10BASE_T_HD |	 \
+					 PHY_ANA_10BASE_T_FD |	 \
+					 PHY_ANA_100BASE_TX_HD | \
+					 PHY_ANA_100BASE_TX_FD | \
+					 PHY_ANA_1000BASE_T_FD)
+/* PHY MMD registers */
+#define PHY_MMD_DEV_3				3
+
+#define PHY_EEE_PCS_STATUS			(0x1)
+#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_		((WORD)0x0800)
+#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_		((WORD)0x0400)
+#define PHY_EEE_PCS_STATUS_TX_LPI_IND_		((WORD)0x0200)
+#define PHY_EEE_PCS_STATUS_RX_LPI_IND_		((WORD)0x0100)
+#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_	((WORD)0x0004)
+
+#define PHY_EEE_CAPABILITIES			(0x14)
+#define PHY_EEE_CAPABILITIES_1000BT_EEE_	((WORD)0x0004)
+#define PHY_EEE_CAPABILITIES_100BT_EEE_		((WORD)0x0002)
+
+#define PHY_MMD_DEV_7				7
+
+#define PHY_EEE_ADVERTISEMENT			(0x3C)
+#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_	((WORD)0x0004)
+#define PHY_EEE_ADVERTISEMENT_100BT_EEE_	((WORD)0x0002)
+
+#define PHY_EEE_LP_ADVERTISEMENT		(0x3D)
+#define PHY_EEE_1000BT_EEE_CAPABLE_		((WORD)0x0004)
+#define PHY_EEE_100BT_EEE_CAPABLE_		((WORD)0x0002)
+#endif /* _LAN78XX_H */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 64a60af..355842b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -752,8 +752,8 @@
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */
-	{QMI_FIXED_INTF(0x1199, 0x68c0, 8)},	/* Sierra Wireless MC73xx */
-	{QMI_FIXED_INTF(0x1199, 0x68c0, 10)},	/* Sierra Wireless MC73xx */
+	{QMI_FIXED_INTF(0x1199, 0x68c0, 8)},	/* Sierra Wireless MC7304/MC7354 */
+	{QMI_FIXED_INTF(0x1199, 0x68c0, 10)},	/* Sierra Wireless MC7304/MC7354 */
 	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
 	{QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
 	{QMI_FIXED_INTF(0x1199, 0x9041, 8)},	/* Sierra Wireless MC7305/MC7355 */
@@ -785,6 +785,7 @@
 	{QMI_FIXED_INTF(0x413c, 0x81a4, 8)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81a8, 8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+	{QMI_FIXED_INTF(0x413c, 0x81b1, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{QMI_FIXED_INTF(0x03f0, 0x581d, 4)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ad8cbc6..fe4ec32 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -339,6 +339,7 @@
 
 /* USB_USB_CTRL */
 #define RX_AGG_DISABLE		0x0010
+#define RX_ZERO_EN		0x0080
 
 /* USB_U2P3_CTRL */
 #define U2P3_ENABLE		0x0001
@@ -622,6 +623,7 @@
 	RTL_VER_03,
 	RTL_VER_04,
 	RTL_VER_05,
+	RTL_VER_06,
 	RTL_VER_MAX
 };
 
@@ -2610,7 +2612,10 @@
 	u32 ocp_data;
 	u16 data;
 
-	ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+	if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+	    tp->version == RTL_VER_05)
+		ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
 	data = r8152_mdio_read(tp, MII_BMCR);
 	if (data & BMCR_PDOWN) {
 		data &= ~BMCR_PDOWN;
@@ -2711,7 +2716,7 @@
 
 	/* rx aggregation */
 	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-	ocp_data &= ~RX_AGG_DISABLE;
+	ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
 	ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3241,7 +3246,7 @@
 
 	/* enable rx aggregation */
 	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-	ocp_data &= ~RX_AGG_DISABLE;
+	ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
 	ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3293,6 +3298,13 @@
 		else
 			ocp_data |= DYNAMIC_BURST;
 		ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+	} else if (tp->version == RTL_VER_06) {
+		ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+		if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+			ocp_data &= ~DYNAMIC_BURST;
+		else
+			ocp_data |= DYNAMIC_BURST;
+		ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
 	}
 
 	ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -3988,6 +4000,10 @@
 		tp->version = RTL_VER_05;
 		tp->mii.supports_gmii = 1;
 		break;
+	case 0x5c30:
+		tp->version = RTL_VER_06;
+		tp->mii.supports_gmii = 1;
+		break;
 	default:
 		netif_info(tp, probe, tp->netdev,
 			   "Unknown version 0x%04x\n", version);
@@ -4033,6 +4049,7 @@
 	case RTL_VER_03:
 	case RTL_VER_04:
 	case RTL_VER_05:
+	case RTL_VER_06:
 		ops->init		= r8153_init;
 		ops->enable		= rtl8153_enable;
 		ops->disable		= rtl8153_disable;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c8186ff..0ef4a5a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -290,6 +290,7 @@
 	.ndo_poll_controller	= veth_poll_controller,
 #endif
 	.ndo_get_iflink		= veth_get_iflink,
+	.ndo_features_check	= passthru_features_check,
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
@@ -305,6 +306,7 @@
 
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	dev->netdev_ops = &veth_netdev_ops;
 	dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 237f8e5..d8838ded 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,12 +40,12 @@
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
 #define GOOD_COPY_LEN	128
 
-/* Weight used for the RX packet size EWMA. The average packet size is used to
- * determine the packet buffer size when refilling RX rings. As the entire RX
- * ring may be refilled at once, the weight is chosen so that the EWMA will be
- * insensitive to short-term, transient changes in packet size.
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
  */
-#define RECEIVE_AVG_WEIGHT 64
+DECLARE_EWMA(pkt_len, 1, 64)
 
 /* Minimum alignment for mergeable packet buffers. */
 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
@@ -85,7 +85,7 @@
 	struct page *pages;
 
 	/* Average packet length for mergeable receive buffers. */
-	struct ewma mrg_avg_pkt_len;
+	struct ewma_pkt_len mrg_avg_pkt_len;
 
 	/* Page frag for packet buffer allocation. */
 	struct page_frag alloc_frag;
@@ -407,7 +407,7 @@
 		}
 	}
 
-	ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 	return head_skb;
 
 err_skb:
@@ -518,7 +518,7 @@
 
 	skb_mark_napi_id(skb, &rq->napi);
 
-	netif_receive_skb(skb);
+	napi_gro_receive(&rq->napi, skb);
 	return;
 
 frame_err:
@@ -540,7 +540,7 @@
 	skb_put(skb, GOOD_PACKET_LEN);
 
 	hdr = skb_vnet_hdr(skb);
-	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+	sg_init_table(rq->sg, 2);
 	sg_set_buf(rq->sg, hdr, vi->hdr_len);
 	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
 
@@ -600,12 +600,12 @@
 	return err;
 }
 
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
 {
 	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	unsigned int len;
 
-	len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
 			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
 	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
 }
@@ -756,7 +756,7 @@
 	/* Out of packets? */
 	if (received < budget) {
 		r = virtqueue_enable_cb_prepare(rq->vq);
-		napi_complete(napi);
+		napi_complete_done(napi, received);
 		if (unlikely(virtqueue_poll(rq->vq, r)) &&
 		    napi_schedule_prep(napi)) {
 			virtqueue_disable_cb(rq->vq);
@@ -893,7 +893,7 @@
 	if (vi->mergeable_rx_bufs)
 		hdr->num_buffers = 0;
 
-	sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
+	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
 	if (can_push) {
 		__skb_push(skb, hdr_len);
 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
@@ -1615,7 +1615,7 @@
 		napi_hash_add(&vi->rq[i].napi);
 
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
-		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
+		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
 	}
 
@@ -1658,7 +1658,7 @@
 {
 	struct virtnet_info *vi = netdev_priv(queue->dev);
 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
-	struct ewma *avg;
+	struct ewma_pkt_len *avg;
 
 	BUG_ON(queue_index >= vi->max_queue_pairs);
 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
new file mode 100644
index 0000000..e7094fb
--- /dev/null
+++ b/drivers/net/vrf.c
@@ -0,0 +1,710 @@
+/*
+ * vrf.c: device driver to encapsulate a VRF space
+ *
+ * Copyright (c) 2015 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * Based on dummy, team and ipvlan drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/hashtable.h>
+
+#include <linux/inetdevice.h>
+#include <net/arp.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/ip6_route.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/vrf.h>
+
+#define DRV_NAME	"vrf"
+#define DRV_VERSION	"1.0"
+
+#define vrf_is_slave(dev)   ((dev)->flags & IFF_SLAVE)
+
+#define vrf_master_get_rcu(dev) \
+	((struct net_device *)rcu_dereference(dev->rx_handler_data))
+
+struct pcpu_dstats {
+	u64			tx_pkts;
+	u64			tx_bytes;
+	u64			tx_drps;
+	u64			rx_pkts;
+	u64			rx_bytes;
+	struct u64_stats_sync	syncp;
+};
+
+static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
+{
+	return dst;
+}
+
+static int vrf_ip_local_out(struct sk_buff *skb)
+{
+	return ip_local_out(skb);
+}
+
+static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
+{
+	/* TO-DO: return max ethernet size? */
+	return dst->dev->mtu;
+}
+
+static void vrf_dst_destroy(struct dst_entry *dst)
+{
+	/* our dst lives forever - or until the device is closed */
+}
+
+static unsigned int vrf_default_advmss(const struct dst_entry *dst)
+{
+	return 65535 - 40;
+}
+
+static struct dst_ops vrf_dst_ops = {
+	.family		= AF_INET,
+	.local_out	= vrf_ip_local_out,
+	.check		= vrf_ip_check,
+	.mtu		= vrf_v4_mtu,
+	.destroy	= vrf_dst_destroy,
+	.default_advmss	= vrf_default_advmss,
+};
+
+static bool is_ip_rx_frame(struct sk_buff *skb)
+{
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+	case htons(ETH_P_IPV6):
+		return true;
+	}
+	return false;
+}
+
+static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+{
+	vrf_dev->stats.tx_errors++;
+	kfree_skb(skb);
+}
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+
+	if (is_ip_rx_frame(skb)) {
+		struct net_device *dev = vrf_master_get_rcu(skb->dev);
+		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+		u64_stats_update_begin(&dstats->syncp);
+		dstats->rx_pkts++;
+		dstats->rx_bytes += skb->len;
+		u64_stats_update_end(&dstats->syncp);
+
+		skb->dev = dev;
+
+		return RX_HANDLER_ANOTHER;
+	}
+	return RX_HANDLER_PASS;
+}
+
+static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
+						 struct rtnl_link_stats64 *stats)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		const struct pcpu_dstats *dstats;
+		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+		unsigned int start;
+
+		dstats = per_cpu_ptr(dev->dstats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&dstats->syncp);
+			tbytes = dstats->tx_bytes;
+			tpkts = dstats->tx_pkts;
+			tdrops = dstats->tx_drps;
+			rbytes = dstats->rx_bytes;
+			rpkts = dstats->rx_pkts;
+		} while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
+		stats->tx_bytes += tbytes;
+		stats->tx_packets += tpkts;
+		stats->tx_dropped += tdrops;
+		stats->rx_bytes += rbytes;
+		stats->rx_packets += rpkts;
+	}
+	return stats;
+}
+
+static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+					   struct net_device *dev)
+{
+	vrf_tx_error(dev, skb);
+	return NET_XMIT_DROP;
+}
+
+static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
+			    struct net_device *vrf_dev)
+{
+	struct rtable *rt;
+	int err = 1;
+
+	rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
+	if (IS_ERR(rt))
+		goto out;
+
+	/* TO-DO: what about broadcast ? */
+	if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+		ip_rt_put(rt);
+		goto out;
+	}
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, &rt->dst);
+	err = 0;
+out:
+	return err;
+}
+
+static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
+					   struct net_device *vrf_dev)
+{
+	struct iphdr *ip4h = ip_hdr(skb);
+	int ret = NET_XMIT_DROP;
+	struct flowi4 fl4 = {
+		/* needed to match OIF rule */
+		.flowi4_oif = vrf_dev->ifindex,
+		.flowi4_iif = LOOPBACK_IFINDEX,
+		.flowi4_tos = RT_TOS(ip4h->tos),
+		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
+		.daddr = ip4h->daddr,
+	};
+
+	if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
+		goto err;
+
+	if (!ip4h->saddr) {
+		ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
+					       RT_SCOPE_LINK);
+	}
+
+	ret = ip_local_out(skb);
+	if (unlikely(net_xmit_eval(ret)))
+		vrf_dev->stats.tx_errors++;
+	else
+		ret = NET_XMIT_SUCCESS;
+
+out:
+	return ret;
+err:
+	vrf_tx_error(vrf_dev, skb);
+	goto out;
+}
+
+static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
+{
+	/* strip the ethernet header added for pass through VRF device */
+	__skb_pull(skb, skb_network_offset(skb));
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		return vrf_process_v4_outbound(skb, dev);
+	case htons(ETH_P_IPV6):
+		return vrf_process_v6_outbound(skb, dev);
+	default:
+		vrf_tx_error(dev, skb);
+		return NET_XMIT_DROP;
+	}
+}
+
+static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+
+	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+		u64_stats_update_begin(&dstats->syncp);
+		dstats->tx_pkts++;
+		dstats->tx_bytes += skb->len;
+		u64_stats_update_end(&dstats->syncp);
+	} else {
+		this_cpu_inc(dev->dstats->tx_drps);
+	}
+
+	return ret;
+}
+
+/* modelled after ip_finish_output2 */
+static int vrf_finish_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	struct rtable *rt = (struct rtable *)dst;
+	struct net_device *dev = dst->dev;
+	unsigned int hh_len = LL_RESERVED_SPACE(dev);
+	struct neighbour *neigh;
+	u32 nexthop;
+	int ret = -EINVAL;
+
+	/* Be paranoid, rather than too clever. */
+	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+		struct sk_buff *skb2;
+
+		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
+		if (!skb2) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		if (skb->sk)
+			skb_set_owner_w(skb2, skb->sk);
+
+		consume_skb(skb);
+		skb = skb2;
+	}
+
+	rcu_read_lock_bh();
+
+	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+	if (unlikely(!neigh))
+		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+	if (!IS_ERR(neigh))
+		ret = dst_neigh_output(dst, neigh, skb);
+
+	rcu_read_unlock_bh();
+err:
+	if (unlikely(ret < 0))
+		vrf_tx_error(skb->dev, skb);
+	return ret;
+}
+
+static int vrf_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct net_device *dev = skb_dst(skb)->dev;
+
+	IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+
+	skb->dev = dev;
+	skb->protocol = htons(ETH_P_IP);
+
+	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
+			    NULL, dev,
+			    vrf_finish_output,
+			    !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static void vrf_rtable_destroy(struct net_vrf *vrf)
+{
+	struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+
+	dst_destroy(dst);
+	vrf->rth = NULL;
+}
+
+static struct rtable *vrf_rtable_create(struct net_device *dev)
+{
+	struct rtable *rth;
+
+	rth = dst_alloc(&vrf_dst_ops, dev, 2,
+			DST_OBSOLETE_NONE,
+			(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+	if (rth) {
+		rth->dst.output	= vrf_output;
+		rth->rt_genid	= rt_genid_ipv4(dev_net(dev));
+		rth->rt_flags	= 0;
+		rth->rt_type	= RTN_UNICAST;
+		rth->rt_is_input = 0;
+		rth->rt_iif	= 0;
+		rth->rt_pmtu	= 0;
+		rth->rt_gateway	= 0;
+		rth->rt_uses_gateway = 0;
+		INIT_LIST_HEAD(&rth->rt_uncached);
+		rth->rt_uncached_list = NULL;
+	}
+
+	return rth;
+}
+
+/**************************** device handling ********************/
+
+/* cycle interface to flush neighbor cache and move routes across tables */
+static void cycle_netdev(struct net_device *dev)
+{
+	unsigned int flags = dev->flags;
+	int ret;
+
+	if (!netif_running(dev))
+		return;
+
+	ret = dev_change_flags(dev, flags & ~IFF_UP);
+	if (ret >= 0)
+		ret = dev_change_flags(dev, flags);
+
+	if (ret < 0) {
+		netdev_err(dev,
+			   "Failed to cycle device %s; route tables might be wrong!\n",
+			   dev->name);
+	}
+}
+
+static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
+					  struct net_device *dev)
+{
+	struct list_head *head = &queue->all_slaves;
+	struct slave *slave;
+
+	list_for_each_entry(slave, head, list) {
+		if (slave->dev == dev)
+			return slave;
+	}
+
+	return NULL;
+}
+
+/* inverse of __vrf_insert_slave */
+static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
+{
+	list_del(&slave->list);
+}
+
+static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
+{
+	list_add(&slave->list, &queue->all_slaves);
+}
+
+static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
+	struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct slave_queue *queue = &vrf->queue;
+	int ret = -ENOMEM;
+
+	if (!slave || !vrf_ptr)
+		goto out_fail;
+
+	slave->dev = port_dev;
+	vrf_ptr->ifindex = dev->ifindex;
+	vrf_ptr->tb_id = vrf->tb_id;
+
+	/* register the packet handler for slave ports */
+	ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
+	if (ret) {
+		netdev_err(port_dev,
+			   "Device %s failed to register rx_handler\n",
+			   port_dev->name);
+		goto out_fail;
+	}
+
+	ret = netdev_master_upper_dev_link(port_dev, dev);
+	if (ret < 0)
+		goto out_unregister;
+
+	port_dev->flags |= IFF_SLAVE;
+	__vrf_insert_slave(queue, slave);
+	rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
+	cycle_netdev(port_dev);
+
+	return 0;
+
+out_unregister:
+	netdev_rx_handler_unregister(port_dev);
+out_fail:
+	kfree(vrf_ptr);
+	kfree(slave);
+	return ret;
+}
+
+static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
+		return -EINVAL;
+
+	return do_vrf_add_slave(dev, port_dev);
+}
+
+/* inverse of do_vrf_add_slave */
+static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct slave_queue *queue = &vrf->queue;
+	struct slave *slave;
+
+	RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
+
+	netdev_upper_dev_unlink(port_dev, dev);
+	port_dev->flags &= ~IFF_SLAVE;
+
+	netdev_rx_handler_unregister(port_dev);
+
+	/* after netdev_rx_handler_unregister for synchronize_rcu */
+	kfree(vrf_ptr);
+
+	cycle_netdev(port_dev);
+
+	slave = __vrf_find_slave_dev(queue, port_dev);
+	if (slave)
+		__vrf_remove_slave(queue, slave);
+
+	kfree(slave);
+
+	return 0;
+}
+
+static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	return do_vrf_del_slave(dev, port_dev);
+}
+
+static void vrf_dev_uninit(struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct slave_queue *queue = &vrf->queue;
+	struct list_head *head = &queue->all_slaves;
+	struct slave *slave, *next;
+
+	vrf_rtable_destroy(vrf);
+
+	list_for_each_entry_safe(slave, next, head, list)
+		vrf_del_slave(dev, slave->dev);
+
+	free_percpu(dev->dstats);
+	dev->dstats = NULL;
+}
+
+static int vrf_dev_init(struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	INIT_LIST_HEAD(&vrf->queue.all_slaves);
+
+	dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
+	if (!dev->dstats)
+		goto out_nomem;
+
+	/* create the default dst which points back to us */
+	vrf->rth = vrf_rtable_create(dev);
+	if (!vrf->rth)
+		goto out_stats;
+
+	dev->flags = IFF_MASTER | IFF_NOARP;
+
+	return 0;
+
+out_stats:
+	free_percpu(dev->dstats);
+	dev->dstats = NULL;
+out_nomem:
+	return -ENOMEM;
+}
+
+static const struct net_device_ops vrf_netdev_ops = {
+	.ndo_init		= vrf_dev_init,
+	.ndo_uninit		= vrf_dev_uninit,
+	.ndo_start_xmit		= vrf_xmit,
+	.ndo_get_stats64	= vrf_get_stats64,
+	.ndo_add_slave		= vrf_add_slave,
+	.ndo_del_slave		= vrf_del_slave,
+};
+
+static void vrf_get_drvinfo(struct net_device *dev,
+			    struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops vrf_ethtool_ops = {
+	.get_drvinfo	= vrf_get_drvinfo,
+};
+
+static void vrf_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	/* Initialize the device structure. */
+	dev->netdev_ops = &vrf_netdev_ops;
+	dev->ethtool_ops = &vrf_ethtool_ops;
+	dev->destructor = free_netdev;
+
+	/* Fill in device structure with ethernet-generic values. */
+	eth_hw_addr_random(dev);
+
+	/* don't acquire vrf device's netif_tx_lock when transmitting */
+	dev->features |= NETIF_F_LLTX;
+
+	/* don't allow vrf devices to change network namespaces. */
+	dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static void vrf_dellink(struct net_device *dev, struct list_head *head)
+{
+	struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+
+	RCU_INIT_POINTER(dev->vrf_ptr, NULL);
+	kfree_rcu(vrf_ptr, rcu);
+	unregister_netdevice_queue(dev, head);
+}
+
+static int vrf_newlink(struct net *src_net, struct net_device *dev,
+		       struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct net_vrf_dev *vrf_ptr;
+	int err;
+
+	if (!data || !data[IFLA_VRF_TABLE])
+		return -EINVAL;
+
+	vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+
+	dev->priv_flags |= IFF_VRF_MASTER;
+
+	err = -ENOMEM;
+	vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
+	if (!vrf_ptr)
+		goto out_fail;
+
+	vrf_ptr->ifindex = dev->ifindex;
+	vrf_ptr->tb_id = vrf->tb_id;
+
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto out_fail;
+
+	rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
+
+	return 0;
+
+out_fail:
+	kfree(vrf_ptr);
+	free_netdev(dev);
+	return err;
+}
+
+static size_t vrf_nl_getsize(const struct net_device *dev)
+{
+	return nla_total_size(sizeof(u32));  /* IFLA_VRF_TABLE */
+}
+
+static int vrf_fillinfo(struct sk_buff *skb,
+			const struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
+}
+
+static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
+	[IFLA_VRF_TABLE] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vrf_link_ops __read_mostly = {
+	.kind		= DRV_NAME,
+	.priv_size	= sizeof(struct net_vrf),
+
+	.get_size	= vrf_nl_getsize,
+	.policy		= vrf_nl_policy,
+	.validate	= vrf_validate,
+	.fill_info	= vrf_fillinfo,
+
+	.newlink	= vrf_newlink,
+	.dellink	= vrf_dellink,
+	.setup		= vrf_setup,
+	.maxtype	= IFLA_VRF_MAX,
+};
+
+static int vrf_device_event(struct notifier_block *unused,
+			    unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	/* only care about unregister events to drop slave references */
+	if (event == NETDEV_UNREGISTER) {
+		struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+		struct net_device *vrf_dev;
+
+		if (!vrf_ptr || netif_is_vrf(dev))
+			goto out;
+
+		vrf_dev = netdev_master_upper_dev_get(dev);
+		vrf_del_slave(vrf_dev, dev);
+	}
+out:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block vrf_notifier_block __read_mostly = {
+	.notifier_call = vrf_device_event,
+};
+
+static int __init vrf_init_module(void)
+{
+	int rc;
+
+	vrf_dst_ops.kmem_cachep =
+		kmem_cache_create("vrf_ip_dst_cache",
+				  sizeof(struct rtable), 0,
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+
+	if (!vrf_dst_ops.kmem_cachep)
+		return -ENOMEM;
+
+	register_netdevice_notifier(&vrf_notifier_block);
+
+	rc = rtnl_link_register(&vrf_link_ops);
+	if (rc < 0)
+		goto error;
+
+	return 0;
+
+error:
+	unregister_netdevice_notifier(&vrf_notifier_block);
+	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+	return rc;
+}
+
+static void __exit vrf_cleanup_module(void)
+{
+	rtnl_link_unregister(&vrf_link_ops);
+	unregister_netdevice_notifier(&vrf_notifier_block);
+	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+}
+
+module_init(vrf_init_module);
+module_exit(vrf_cleanup_module);
+MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
+MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5bc4b1e..ce988fd 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -49,15 +49,12 @@
 #include <net/ip6_tunnel.h>
 #include <net/ip6_checksum.h>
 #endif
+#include <net/dst_metadata.h>
 
 #define VXLAN_VERSION	"0.1"
 
 #define PORT_HASH_BITS	8
 #define PORT_HASH_SIZE  (1<<PORT_HASH_BITS)
-#define VNI_HASH_BITS	10
-#define VNI_HASH_SIZE	(1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS	8
-#define FDB_HASH_SIZE	(1<<FDB_HASH_BITS)
 #define FDB_AGE_DEFAULT 300 /* 5 min */
 #define FDB_AGE_INTERVAL (10 * HZ)	/* rescan interval */
 
@@ -74,9 +71,13 @@
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
 static int vxlan_net_id;
+static struct rtnl_link_ops vxlan_link_ops;
 
 static const u8 all_zeros_mac[ETH_ALEN];
 
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+					 bool no_share, u32 flags);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
 	struct list_head  vxlan_list;
@@ -84,21 +85,6 @@
 	spinlock_t	  sock_lock;
 };
 
-union vxlan_addr {
-	struct sockaddr_in sin;
-	struct sockaddr_in6 sin6;
-	struct sockaddr sa;
-};
-
-struct vxlan_rdst {
-	union vxlan_addr	 remote_ip;
-	__be16			 remote_port;
-	u32			 remote_vni;
-	u32			 remote_ifindex;
-	struct list_head	 list;
-	struct rcu_head		 rcu;
-};
-
 /* Forwarding table entry */
 struct vxlan_fdb {
 	struct hlist_node hlist;	/* linked list of entries */
@@ -106,40 +92,21 @@
 	unsigned long	  updated;	/* jiffies */
 	unsigned long	  used;
 	struct list_head  remotes;
+	u8		  eth_addr[ETH_ALEN];
 	u16		  state;	/* see ndm_state */
 	u8		  flags;	/* see ndm_flags */
-	u8		  eth_addr[ETH_ALEN];
-};
-
-/* Pseudo network device */
-struct vxlan_dev {
-	struct hlist_node hlist;	/* vni hash table */
-	struct list_head  next;		/* vxlan's per namespace list */
-	struct vxlan_sock *vn_sock;	/* listening socket */
-	struct net_device *dev;
-	struct net	  *net;		/* netns for packet i/o */
-	struct vxlan_rdst default_dst;	/* default destination */
-	union vxlan_addr  saddr;	/* source address */
-	__be16		  dst_port;
-	__u16		  port_min;	/* source port range */
-	__u16		  port_max;
-	__u8		  tos;		/* TOS override */
-	__u8		  ttl;
-	u32		  flags;	/* VXLAN_F_* in vxlan.h */
-
-	unsigned long	  age_interval;
-	struct timer_list age_timer;
-	spinlock_t	  hash_lock;
-	unsigned int	  addrcnt;
-	unsigned int	  addrmax;
-
-	struct hlist_head fdb_head[FDB_HASH_SIZE];
 };
 
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 static struct workqueue_struct *vxlan_wq;
 
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+	return vs->flags & VXLAN_F_COLLECT_METADATA ||
+	       ip_tunnel_collect_metadata();
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline
 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
@@ -269,7 +236,7 @@
 
 	hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
 		if (inet_sk(vs->sock->sk)->inet_sport == port &&
-		    inet_sk(vs->sock->sk)->sk.sk_family == family &&
+		    vxlan_get_sk_family(vs) == family &&
 		    vs->flags == flags)
 			return vs;
 	}
@@ -345,7 +312,7 @@
 	if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
 		goto nla_put_failure;
 
-	if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
+	if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
 	    nla_put_be16(skb, NDA_PORT, rdst->remote_port))
 		goto nla_put_failure;
 	if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
@@ -552,10 +519,10 @@
 					  u32 data, struct gro_remcsum *grc,
 					  bool nopartial)
 {
-	size_t start, offset, plen;
+	size_t start, offset;
 
 	if (skb->remcsum_offload)
-		return NULL;
+		return vh;
 
 	if (!NAPI_GRO_CB(skb)->csum_valid)
 		return NULL;
@@ -565,17 +532,8 @@
 			  offsetof(struct udphdr, check) :
 			  offsetof(struct tcphdr, check));
 
-	plen = hdrlen + offset + sizeof(u16);
-
-	/* Pull checksum that will be written */
-	if (skb_gro_header_hard(skb, off + plen)) {
-		vh = skb_gro_header_slow(skb, off + plen, off);
-		if (!vh)
-			return NULL;
-	}
-
-	skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
-				start, offset, grc, nopartial);
+	vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+				     start, offset, grc, nopartial);
 
 	skb->remcsum_offload = 1;
 
@@ -606,7 +564,6 @@
 			goto out;
 	}
 
-	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
 	skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
 
 	flags = ntohl(vh->vx_flags);
@@ -621,6 +578,8 @@
 			goto out;
 	}
 
+	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
 	flush = 0;
 
 	for (p = *head; p; p = p->next) {
@@ -658,7 +617,7 @@
 	struct net_device *dev;
 	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
-	sa_family_t sa_family = sk->sk_family;
+	sa_family_t sa_family = vxlan_get_sk_family(vs);
 	__be16 port = inet_sk(sk)->inet_sport;
 	int err;
 
@@ -683,7 +642,7 @@
 	struct net_device *dev;
 	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
-	sa_family_t sa_family = sk->sk_family;
+	sa_family_t sa_family = vxlan_get_sk_family(vs);
 	__be16 port = inet_sk(sk)->inet_sport;
 
 	rcu_read_lock();
@@ -749,7 +708,8 @@
 		if (!(flags & NLM_F_CREATE))
 			return -ENOENT;
 
-		if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+		if (vxlan->cfg.addrmax &&
+		    vxlan->addrcnt >= vxlan->cfg.addrmax)
 			return -ENOSPC;
 
 		/* Disallow replace to add a multicast entry */
@@ -835,7 +795,7 @@
 			return -EINVAL;
 		*port = nla_get_be16(tb[NDA_PORT]);
 	} else {
-		*port = vxlan->dst_port;
+		*port = vxlan->cfg.dst_port;
 	}
 
 	if (tb[NDA_VNI]) {
@@ -963,10 +923,10 @@
 		hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
 			struct vxlan_rdst *rd;
 
-			if (idx < cb->args[0])
-				goto skip;
-
 			list_for_each_entry_rcu(rd, &f->remotes, list) {
+				if (idx < cb->args[0])
+					goto skip;
+
 				err = vxlan_fdb_info(skb, vxlan, f,
 						     NETLINK_CB(cb->skb).portid,
 						     cb->nlh->nlmsg_seq,
@@ -974,9 +934,9 @@
 						     NLM_F_MULTI, rd);
 				if (err < 0)
 					goto out;
-			}
 skip:
-			++idx;
+				++idx;
+			}
 		}
 	}
 out:
@@ -1021,7 +981,7 @@
 			vxlan_fdb_create(vxlan, src_mac, src_ip,
 					 NUD_REACHABLE,
 					 NLM_F_EXCL|NLM_F_CREATE,
-					 vxlan->dst_port,
+					 vxlan->cfg.dst_port,
 					 vxlan->default_dst.remote_vni,
 					 0, NTF_SELF);
 		spin_unlock(&vxlan->hash_lock);
@@ -1062,7 +1022,7 @@
 	return false;
 }
 
-void vxlan_sock_release(struct vxlan_sock *vs)
+static void vxlan_sock_release(struct vxlan_sock *vs)
 {
 	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
@@ -1078,7 +1038,6 @@
 
 	queue_work(vxlan_wq, &vs->del_work);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
 /* Update multicast group membership when first VNI on
  * multicast address is brought up
@@ -1143,6 +1102,9 @@
 {
 	size_t start, offset, plen;
 
+	if (skb->remcsum_offload)
+		return vh;
+
 	start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
 	offset = start + ((data & VXLAN_RCO_UDP) ?
 			  offsetof(struct udphdr, check) :
@@ -1161,13 +1123,112 @@
 	return vh;
 }
 
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+		      struct vxlan_metadata *md, u32 vni,
+		      struct metadata_dst *tun_dst)
+{
+	struct iphdr *oip = NULL;
+	struct ipv6hdr *oip6 = NULL;
+	struct vxlan_dev *vxlan;
+	struct pcpu_sw_netstats *stats;
+	union vxlan_addr saddr;
+	int err = 0;
+	union vxlan_addr *remote_ip;
+
+	/* For flow based devices, map all packets to VNI 0 */
+	if (vs->flags & VXLAN_F_COLLECT_METADATA)
+		vni = 0;
+
+	/* Is this VNI defined? */
+	vxlan = vxlan_vs_find_vni(vs, vni);
+	if (!vxlan)
+		goto drop;
+
+	remote_ip = &vxlan->default_dst.remote_ip;
+	skb_reset_mac_header(skb);
+	skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
+	skb->protocol = eth_type_trans(skb, vxlan->dev);
+	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+	/* Ignore packet loops (and multicast echo) */
+	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+		goto drop;
+
+	/* Re-examine inner Ethernet packet */
+	if (remote_ip->sa.sa_family == AF_INET) {
+		oip = ip_hdr(skb);
+		saddr.sin.sin_addr.s_addr = oip->saddr;
+		saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else {
+		oip6 = ipv6_hdr(skb);
+		saddr.sin6.sin6_addr = oip6->saddr;
+		saddr.sa.sa_family = AF_INET6;
+#endif
+	}
+
+	if (tun_dst) {
+		skb_dst_set(skb, (struct dst_entry *)tun_dst);
+		tun_dst = NULL;
+	}
+
+	if ((vxlan->flags & VXLAN_F_LEARN) &&
+	    vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+		goto drop;
+
+	skb_reset_network_header(skb);
+	/* In flow-based mode, GBP is carried in dst_metadata */
+	if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
+		skb->mark = md->gbp;
+
+	if (oip6)
+		err = IP6_ECN_decapsulate(oip6, skb);
+	if (oip)
+		err = IP_ECN_decapsulate(oip, skb);
+
+	if (unlikely(err)) {
+		if (log_ecn_error) {
+			if (oip6)
+				net_info_ratelimited("non-ECT from %pI6\n",
+						     &oip6->saddr);
+			if (oip)
+				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+						     &oip->saddr, oip->tos);
+		}
+		if (err > 1) {
+			++vxlan->dev->stats.rx_frame_errors;
+			++vxlan->dev->stats.rx_errors;
+			goto drop;
+		}
+	}
+
+	stats = this_cpu_ptr(vxlan->dev->tstats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->syncp);
+
+	gro_cells_receive(&vxlan->gro_cells, skb);
+
+	return;
+drop:
+	if (tun_dst)
+		dst_release((struct dst_entry *)tun_dst);
+
+	/* Consume bad packet */
+	kfree_skb(skb);
+}
+
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
+	struct metadata_dst *tun_dst = NULL;
+	struct ip_tunnel_info *info;
 	struct vxlan_sock *vs;
 	struct vxlanhdr *vxh;
 	u32 flags, vni;
-	struct vxlan_metadata md = {0};
+	struct vxlan_metadata _md;
+	struct vxlan_metadata *md = &_md;
 
 	/* Need Vxlan and inner Ethernet header to be present */
 	if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1202,6 +1263,19 @@
 		vni &= VXLAN_VNI_MASK;
 	}
 
+	if (vxlan_collect_metadata(vs)) {
+		tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
+					 cpu_to_be64(vni >> 8), sizeof(*md));
+
+		if (!tun_dst)
+			goto drop;
+
+		info = &tun_dst->u.tun_info;
+		md = ip_tunnel_info_opts(info);
+	} else {
+		memset(md, 0, sizeof(*md));
+	}
+
 	/* For backwards compatibility, only allow reserved fields to be
 	 * used by VXLAN extensions if explicitly requested.
 	 */
@@ -1209,13 +1283,16 @@
 		struct vxlanhdr_gbp *gbp;
 
 		gbp = (struct vxlanhdr_gbp *)vxh;
-		md.gbp = ntohs(gbp->policy_id);
+		md->gbp = ntohs(gbp->policy_id);
+
+		if (tun_dst)
+			info->key.tun_flags |= TUNNEL_VXLAN_OPT;
 
 		if (gbp->dont_learn)
-			md.gbp |= VXLAN_GBP_DONT_LEARN;
+			md->gbp |= VXLAN_GBP_DONT_LEARN;
 
 		if (gbp->policy_applied)
-			md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+			md->gbp |= VXLAN_GBP_POLICY_APPLIED;
 
 		flags &= ~VXLAN_GBP_USED_BITS;
 	}
@@ -1233,8 +1310,7 @@
 		goto bad_flags;
 	}
 
-	md.vni = vxh->vx_vni;
-	vs->rcv(vs, skb, &md);
+	vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
 	return 0;
 
 drop:
@@ -1247,93 +1323,13 @@
 		   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
 
 error:
+	if (tun_dst)
+		dst_release((struct dst_entry *)tun_dst);
+
 	/* Return non vxlan pkt */
 	return 1;
 }
 
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-		      struct vxlan_metadata *md)
-{
-	struct iphdr *oip = NULL;
-	struct ipv6hdr *oip6 = NULL;
-	struct vxlan_dev *vxlan;
-	struct pcpu_sw_netstats *stats;
-	union vxlan_addr saddr;
-	__u32 vni;
-	int err = 0;
-	union vxlan_addr *remote_ip;
-
-	vni = ntohl(md->vni) >> 8;
-	/* Is this VNI defined? */
-	vxlan = vxlan_vs_find_vni(vs, vni);
-	if (!vxlan)
-		goto drop;
-
-	remote_ip = &vxlan->default_dst.remote_ip;
-	skb_reset_mac_header(skb);
-	skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
-	skb->protocol = eth_type_trans(skb, vxlan->dev);
-	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
-	/* Ignore packet loops (and multicast echo) */
-	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
-		goto drop;
-
-	/* Re-examine inner Ethernet packet */
-	if (remote_ip->sa.sa_family == AF_INET) {
-		oip = ip_hdr(skb);
-		saddr.sin.sin_addr.s_addr = oip->saddr;
-		saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
-	} else {
-		oip6 = ipv6_hdr(skb);
-		saddr.sin6.sin6_addr = oip6->saddr;
-		saddr.sa.sa_family = AF_INET6;
-#endif
-	}
-
-	if ((vxlan->flags & VXLAN_F_LEARN) &&
-	    vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
-		goto drop;
-
-	skb_reset_network_header(skb);
-	skb->mark = md->gbp;
-
-	if (oip6)
-		err = IP6_ECN_decapsulate(oip6, skb);
-	if (oip)
-		err = IP_ECN_decapsulate(oip, skb);
-
-	if (unlikely(err)) {
-		if (log_ecn_error) {
-			if (oip6)
-				net_info_ratelimited("non-ECT from %pI6\n",
-						     &oip6->saddr);
-			if (oip)
-				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
-						     &oip->saddr, oip->tos);
-		}
-		if (err > 1) {
-			++vxlan->dev->stats.rx_frame_errors;
-			++vxlan->dev->stats.rx_errors;
-			goto drop;
-		}
-	}
-
-	stats = this_cpu_ptr(vxlan->dev->tstats);
-	u64_stats_update_begin(&stats->syncp);
-	stats->rx_packets++;
-	stats->rx_bytes += skb->len;
-	u64_stats_update_end(&stats->syncp);
-
-	netif_rx(skb);
-
-	return;
-drop:
-	/* Consume bad packet */
-	kfree_skb(skb);
-}
-
 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1672,7 +1668,7 @@
 			   struct sk_buff *skb,
 			   struct net_device *dev, struct in6_addr *saddr,
 			   struct in6_addr *daddr, __u8 prio, __u8 ttl,
-			   __be16 src_port, __be16 dst_port,
+			   __be16 src_port, __be16 dst_port, __be32 vni,
 			   struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
 	struct vxlanhdr *vxh;
@@ -1722,7 +1718,7 @@
 
 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 	vxh->vx_flags = htonl(VXLAN_HF_VNI);
-	vxh->vx_vni = md->vni;
+	vxh->vx_vni = vni;
 
 	if (type & SKB_GSO_TUNNEL_REMCSUM) {
 		u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1755,10 +1751,10 @@
 }
 #endif
 
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-		   __be16 src_port, __be16 dst_port,
-		   struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+			  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+			  __be16 src_port, __be16 dst_port, __be32 vni,
+			  struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
 	struct vxlanhdr *vxh;
 	int min_headroom;
@@ -1801,7 +1797,7 @@
 
 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 	vxh->vx_flags = htonl(VXLAN_HF_VNI);
-	vxh->vx_vni = md->vni;
+	vxh->vx_vni = vni;
 
 	if (type & SKB_GSO_TUNNEL_REMCSUM) {
 		u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1828,7 +1824,6 @@
 				   ttl, df, src_port, dst_port, xnet,
 				   !(vxflags & VXLAN_F_UDP_CSUM));
 }
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
 /* Bypass encapsulation if the destination is local */
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
@@ -1878,22 +1873,48 @@
 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 			   struct vxlan_rdst *rdst, bool did_rsc)
 {
+	struct ip_tunnel_info *info;
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct sock *sk = vxlan->vn_sock->sock->sk;
+	unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
 	struct rtable *rt = NULL;
 	const struct iphdr *old_iph;
 	struct flowi4 fl4;
 	union vxlan_addr *dst;
-	struct vxlan_metadata md;
+	union vxlan_addr remote_ip;
+	struct vxlan_metadata _md;
+	struct vxlan_metadata *md = &_md;
 	__be16 src_port = 0, dst_port;
 	u32 vni;
 	__be16 df = 0;
 	__u8 tos, ttl;
 	int err;
+	u32 flags = vxlan->flags;
 
-	dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
-	vni = rdst->remote_vni;
-	dst = &rdst->remote_ip;
+	info = skb_tunnel_info(skb);
+
+	if (rdst) {
+		dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+		vni = rdst->remote_vni;
+		dst = &rdst->remote_ip;
+	} else {
+		if (!info) {
+			WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+				  dev->name);
+			goto drop;
+		}
+		if (family != ip_tunnel_info_af(info))
+			goto drop;
+
+		dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+		vni = be64_to_cpu(info->key.tun_id);
+		remote_ip.sa.sa_family = family;
+		if (family == AF_INET)
+			remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
+		else
+			remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
+		dst = &remote_ip;
+	}
 
 	if (vxlan_addr_any(dst)) {
 		if (did_rsc) {
@@ -1906,25 +1927,43 @@
 
 	old_iph = ip_hdr(skb);
 
-	ttl = vxlan->ttl;
+	ttl = vxlan->cfg.ttl;
 	if (!ttl && vxlan_addr_multicast(dst))
 		ttl = 1;
 
-	tos = vxlan->tos;
+	tos = vxlan->cfg.tos;
 	if (tos == 1)
 		tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-	src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
-				     vxlan->port_max, true);
+	src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+				     vxlan->cfg.port_max, true);
+
+	if (info) {
+		if (info->key.tun_flags & TUNNEL_CSUM)
+			flags |= VXLAN_F_UDP_CSUM;
+		else
+			flags &= ~VXLAN_F_UDP_CSUM;
+
+		ttl = info->key.ttl;
+		tos = info->key.tos;
+
+		if (info->options_len)
+			md = ip_tunnel_info_opts(info);
+	} else {
+		md->gbp = skb->mark;
+	}
 
 	if (dst->sa.sa_family == AF_INET) {
+		if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+			df = htons(IP_DF);
+
 		memset(&fl4, 0, sizeof(fl4));
-		fl4.flowi4_oif = rdst->remote_ifindex;
+		fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
 		fl4.flowi4_tos = RT_TOS(tos);
 		fl4.flowi4_mark = skb->mark;
 		fl4.flowi4_proto = IPPROTO_UDP;
 		fl4.daddr = dst->sin.sin_addr.s_addr;
-		fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
+		fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
 
 		rt = ip_route_output_key(vxlan->net, &fl4);
 		if (IS_ERR(rt)) {
@@ -1958,14 +1997,11 @@
 
 		tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-		md.vni = htonl(vni << 8);
-		md.gbp = skb->mark;
-
 		err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
 				     dst->sin.sin_addr.s_addr, tos, ttl, df,
-				     src_port, dst_port, &md,
+				     src_port, dst_port, htonl(vni << 8), md,
 				     !net_eq(vxlan->net, dev_net(vxlan->dev)),
-				     vxlan->flags);
+				     flags);
 		if (err < 0) {
 			/* skb is already freed. */
 			skb = NULL;
@@ -1977,16 +2013,16 @@
 	} else {
 		struct dst_entry *ndst;
 		struct flowi6 fl6;
-		u32 flags;
+		u32 rt6i_flags;
 
 		memset(&fl6, 0, sizeof(fl6));
-		fl6.flowi6_oif = rdst->remote_ifindex;
+		fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
 		fl6.daddr = dst->sin6.sin6_addr;
-		fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+		fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
 		fl6.flowi6_mark = skb->mark;
 		fl6.flowi6_proto = IPPROTO_UDP;
 
-		if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+		if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
 			netdev_dbg(dev, "no route to %pI6\n",
 				   &dst->sin6.sin6_addr);
 			dev->stats.tx_carrier_errors++;
@@ -2002,9 +2038,9 @@
 		}
 
 		/* Bypass encapsulation if the destination is local */
-		flags = ((struct rt6_info *)ndst)->rt6i_flags;
-		if (flags & RTF_LOCAL &&
-		    !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+		rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+		if (rt6i_flags & RTF_LOCAL &&
+		    !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
 			struct vxlan_dev *dst_vxlan;
 
 			dst_release(ndst);
@@ -2018,13 +2054,10 @@
 		}
 
 		ttl = ttl ? : ip6_dst_hoplimit(ndst);
-		md.vni = htonl(vni << 8);
-		md.gbp = skb->mark;
-
 		err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
-				      0, ttl, src_port, dst_port, &md,
+				      0, ttl, src_port, dst_port, htonl(vni << 8), md,
 				      !net_eq(vxlan->net, dev_net(vxlan->dev)),
-				      vxlan->flags);
+				      flags);
 #endif
 	}
 
@@ -2051,11 +2084,14 @@
 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
+	const struct ip_tunnel_info *info;
 	struct ethhdr *eth;
 	bool did_rsc = false;
 	struct vxlan_rdst *rdst, *fdst = NULL;
 	struct vxlan_fdb *f;
 
+	info = skb_tunnel_info(skb);
+
 	skb_reset_mac_header(skb);
 	eth = eth_hdr(skb);
 
@@ -2078,6 +2114,12 @@
 #endif
 	}
 
+	if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
+	    info && info->mode & IP_TUNNEL_INFO_TX) {
+		vxlan_xmit_one(skb, dev, NULL, false);
+		return NETDEV_TX_OK;
+	}
+
 	f = vxlan_find_mac(vxlan, eth->h_dest);
 	did_rsc = false;
 
@@ -2143,7 +2185,7 @@
 			if (f->state & NUD_PERMANENT)
 				continue;
 
-			timeout = f->used + vxlan->age_interval * HZ;
+			timeout = f->used + vxlan->cfg.age_interval * HZ;
 			if (time_before_eq(timeout, jiffies)) {
 				netdev_dbg(vxlan->dev,
 					   "garbage collect %pM\n",
@@ -2207,8 +2249,8 @@
 	struct vxlan_sock *vs;
 	int ret = 0;
 
-	vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
-			    false, vxlan->flags);
+	vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
+			    vxlan->cfg.no_share, vxlan->flags);
 	if (IS_ERR(vs))
 		return PTR_ERR(vs);
 
@@ -2224,7 +2266,7 @@
 		}
 	}
 
-	if (vxlan->age_interval)
+	if (vxlan->cfg.age_interval)
 		mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
 
 	return ret;
@@ -2335,7 +2377,7 @@
 	for (i = 0; i < PORT_HASH_SIZE; ++i) {
 		hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
 			port = inet_sk(vs->sock->sk)->inet_sport;
-			sa_family = vs->sock->sk->sk_family;
+			sa_family = vxlan_get_sk_family(vs);
 			dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
 							    port);
 		}
@@ -2361,7 +2403,6 @@
 	dev->destructor = free_netdev;
 	SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
-	dev->tx_queue_len = 0;
 	dev->features	|= NETIF_F_LLTX;
 	dev->features	|= NETIF_F_SG | NETIF_F_HW_CSUM;
 	dev->features   |= NETIF_F_RXCSUM;
@@ -2373,7 +2414,7 @@
 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 	netif_keep_dst(dev);
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 
 	INIT_LIST_HEAD(&vxlan->next);
 	spin_lock_init(&vxlan->hash_lock);
@@ -2382,10 +2423,12 @@
 	vxlan->age_timer.function = vxlan_cleanup;
 	vxlan->age_timer.data = (unsigned long) vxlan;
 
-	vxlan->dst_port = htons(vxlan_port);
+	vxlan->cfg.dst_port = htons(vxlan_port);
 
 	vxlan->dev = dev;
 
+	gro_cells_init(&vxlan->gro_cells, dev);
+
 	for (h = 0; h < FDB_HASH_SIZE; ++h)
 		INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
 }
@@ -2407,6 +2450,7 @@
 	[IFLA_VXLAN_RSC]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_L2MISS]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_L3MISS]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_COLLECT_METADATA]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_PORT]	= { .type = NLA_U16 },
 	[IFLA_VXLAN_UDP_CSUM]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]	= { .type = NLA_U8 },
@@ -2486,6 +2530,7 @@
 		udp_conf.family = AF_INET6;
 		udp_conf.use_udp6_rx_checksums =
 		    !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+		udp_conf.ipv6_v6only = 1;
 	} else {
 		udp_conf.family = AF_INET;
 	}
@@ -2502,7 +2547,6 @@
 
 /* Create new listen socket if needed */
 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
-					      vxlan_rcv_t *rcv, void *data,
 					      u32 flags)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2531,8 +2575,6 @@
 
 	vs->sock = sock;
 	atomic_set(&vs->refcnt, 1);
-	vs->rcv = rcv;
-	vs->data = data;
 	vs->flags = (flags & VXLAN_F_RCV_FLAGS);
 
 	/* Initialize the vxlan udp offloads structure */
@@ -2556,9 +2598,8 @@
 	return vs;
 }
 
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-				  vxlan_rcv_t *rcv, void *data,
-				  bool no_share, u32 flags)
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+					 bool no_share, u32 flags)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 	struct vxlan_sock *vs;
@@ -2568,7 +2609,7 @@
 		spin_lock(&vn->sock_lock);
 		vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
 				     flags);
-		if (vs && vs->rcv == rcv) {
+		if (vs) {
 			if (!atomic_add_unless(&vs->refcnt, 1, 0))
 				vs = ERR_PTR(-EBUSY);
 			spin_unlock(&vn->sock_lock);
@@ -2577,58 +2618,38 @@
 		spin_unlock(&vn->sock_lock);
 	}
 
-	return vxlan_socket_create(net, port, rcv, data, flags);
+	return vxlan_socket_create(net, port, flags);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
 
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
-			 struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+			       struct vxlan_config *conf)
 {
 	struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_rdst *dst = &vxlan->default_dst;
-	__u32 vni;
 	int err;
 	bool use_ipv6 = false;
-
-	if (!data[IFLA_VXLAN_ID])
-		return -EINVAL;
+	__be16 default_port = vxlan->cfg.dst_port;
 
 	vxlan->net = src_net;
 
-	vni = nla_get_u32(data[IFLA_VXLAN_ID]);
-	dst->remote_vni = vni;
+	dst->remote_vni = conf->vni;
+
+	memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
 
 	/* Unless IPv6 is explicitly requested, assume IPv4 */
-	dst->remote_ip.sa.sa_family = AF_INET;
-	if (data[IFLA_VXLAN_GROUP]) {
-		dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
-	} else if (data[IFLA_VXLAN_GROUP6]) {
-		if (!IS_ENABLED(CONFIG_IPV6))
-			return -EPFNOSUPPORT;
+	if (!dst->remote_ip.sa.sa_family)
+		dst->remote_ip.sa.sa_family = AF_INET;
 
-		dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
-		dst->remote_ip.sa.sa_family = AF_INET6;
+	if (dst->remote_ip.sa.sa_family == AF_INET6 ||
+	    vxlan->cfg.saddr.sa.sa_family == AF_INET6)
 		use_ipv6 = true;
-	}
 
-	if (data[IFLA_VXLAN_LOCAL]) {
-		vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
-		vxlan->saddr.sa.sa_family = AF_INET;
-	} else if (data[IFLA_VXLAN_LOCAL6]) {
-		if (!IS_ENABLED(CONFIG_IPV6))
-			return -EPFNOSUPPORT;
-
-		/* TODO: respect scope id */
-		vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
-		vxlan->saddr.sa.sa_family = AF_INET6;
-		use_ipv6 = true;
-	}
-
-	if (data[IFLA_VXLAN_LINK] &&
-	    (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
+	if (conf->remote_ifindex) {
 		struct net_device *lowerdev
-			 = __dev_get_by_index(src_net, dst->remote_ifindex);
+			 = __dev_get_by_index(src_net, conf->remote_ifindex);
+
+		dst->remote_ifindex = conf->remote_ifindex;
 
 		if (!lowerdev) {
 			pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2646,7 +2667,7 @@
 		}
 #endif
 
-		if (!tb[IFLA_MTU])
+		if (!conf->mtu)
 			dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
 		dev->needed_headroom = lowerdev->hard_header_len +
@@ -2654,75 +2675,17 @@
 	} else if (use_ipv6)
 		vxlan->flags |= VXLAN_F_IPV6;
 
-	if (data[IFLA_VXLAN_TOS])
-		vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+	memcpy(&vxlan->cfg, conf, sizeof(*conf));
+	if (!vxlan->cfg.dst_port)
+		vxlan->cfg.dst_port = default_port;
+	vxlan->flags |= conf->flags;
 
-	if (data[IFLA_VXLAN_TTL])
-		vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+	if (!vxlan->cfg.age_interval)
+		vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
 
-	if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
-		vxlan->flags |= VXLAN_F_LEARN;
-
-	if (data[IFLA_VXLAN_AGEING])
-		vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
-	else
-		vxlan->age_interval = FDB_AGE_DEFAULT;
-
-	if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
-		vxlan->flags |= VXLAN_F_PROXY;
-
-	if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
-		vxlan->flags |= VXLAN_F_RSC;
-
-	if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
-		vxlan->flags |= VXLAN_F_L2MISS;
-
-	if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
-		vxlan->flags |= VXLAN_F_L3MISS;
-
-	if (data[IFLA_VXLAN_LIMIT])
-		vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
-
-	if (data[IFLA_VXLAN_PORT_RANGE]) {
-		const struct ifla_vxlan_port_range *p
-			= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
-		vxlan->port_min = ntohs(p->low);
-		vxlan->port_max = ntohs(p->high);
-	}
-
-	if (data[IFLA_VXLAN_PORT])
-		vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
-
-	if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
-		vxlan->flags |= VXLAN_F_UDP_CSUM;
-
-	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
-	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
-		vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
-
-	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
-	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
-		vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
-
-	if (data[IFLA_VXLAN_REMCSUM_TX] &&
-	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
-		vxlan->flags |= VXLAN_F_REMCSUM_TX;
-
-	if (data[IFLA_VXLAN_REMCSUM_RX] &&
-	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
-		vxlan->flags |= VXLAN_F_REMCSUM_RX;
-
-	if (data[IFLA_VXLAN_GBP])
-		vxlan->flags |= VXLAN_F_GBP;
-
-	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
-		vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
-
-	if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
-			   vxlan->dst_port, vxlan->flags)) {
-		pr_info("duplicate VNI %u\n", vni);
+	if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
+			   vxlan->cfg.dst_port, vxlan->flags))
 		return -EEXIST;
-	}
 
 	dev->ethtool_ops = &vxlan_ethtool_ops;
 
@@ -2732,7 +2695,7 @@
 				       &vxlan->default_dst.remote_ip,
 				       NUD_REACHABLE|NUD_PERMANENT,
 				       NLM_F_EXCL|NLM_F_CREATE,
-				       vxlan->dst_port,
+				       vxlan->cfg.dst_port,
 				       vxlan->default_dst.remote_vni,
 				       vxlan->default_dst.remote_ifindex,
 				       NTF_SELF);
@@ -2751,6 +2714,151 @@
 	return 0;
 }
 
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+				    u8 name_assign_type, struct vxlan_config *conf)
+{
+	struct nlattr *tb[IFLA_MAX+1];
+	struct net_device *dev;
+	int err;
+
+	memset(&tb, 0, sizeof(tb));
+
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &vxlan_link_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	err = vxlan_dev_configure(net, dev, conf);
+	if (err < 0) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+			 struct nlattr *tb[], struct nlattr *data[])
+{
+	struct vxlan_config conf;
+	int err;
+
+	if (!data[IFLA_VXLAN_ID])
+		return -EINVAL;
+
+	memset(&conf, 0, sizeof(conf));
+	conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+	if (data[IFLA_VXLAN_GROUP]) {
+		conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+	} else if (data[IFLA_VXLAN_GROUP6]) {
+		if (!IS_ENABLED(CONFIG_IPV6))
+			return -EPFNOSUPPORT;
+
+		conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+		conf.remote_ip.sa.sa_family = AF_INET6;
+	}
+
+	if (data[IFLA_VXLAN_LOCAL]) {
+		conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+		conf.saddr.sa.sa_family = AF_INET;
+	} else if (data[IFLA_VXLAN_LOCAL6]) {
+		if (!IS_ENABLED(CONFIG_IPV6))
+			return -EPFNOSUPPORT;
+
+		/* TODO: respect scope id */
+		conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+		conf.saddr.sa.sa_family = AF_INET6;
+	}
+
+	if (data[IFLA_VXLAN_LINK])
+		conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
+	if (data[IFLA_VXLAN_TOS])
+		conf.tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+
+	if (data[IFLA_VXLAN_TTL])
+		conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+
+	if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
+		conf.flags |= VXLAN_F_LEARN;
+
+	if (data[IFLA_VXLAN_AGEING])
+		conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+
+	if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
+		conf.flags |= VXLAN_F_PROXY;
+
+	if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
+		conf.flags |= VXLAN_F_RSC;
+
+	if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
+		conf.flags |= VXLAN_F_L2MISS;
+
+	if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
+		conf.flags |= VXLAN_F_L3MISS;
+
+	if (data[IFLA_VXLAN_LIMIT])
+		conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+	if (data[IFLA_VXLAN_COLLECT_METADATA] &&
+	    nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+		conf.flags |= VXLAN_F_COLLECT_METADATA;
+
+	if (data[IFLA_VXLAN_PORT_RANGE]) {
+		const struct ifla_vxlan_port_range *p
+			= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
+		conf.port_min = ntohs(p->low);
+		conf.port_max = ntohs(p->high);
+	}
+
+	if (data[IFLA_VXLAN_PORT])
+		conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+
+	if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
+		conf.flags |= VXLAN_F_UDP_CSUM;
+
+	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
+	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
+		conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+
+	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
+	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
+		conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+
+	if (data[IFLA_VXLAN_REMCSUM_TX] &&
+	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+		conf.flags |= VXLAN_F_REMCSUM_TX;
+
+	if (data[IFLA_VXLAN_REMCSUM_RX] &&
+	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+		conf.flags |= VXLAN_F_REMCSUM_RX;
+
+	if (data[IFLA_VXLAN_GBP])
+		conf.flags |= VXLAN_F_GBP;
+
+	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
+		conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+
+	err = vxlan_dev_configure(src_net, dev, &conf);
+	switch (err) {
+	case -ENODEV:
+		pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
+		break;
+
+	case -EPERM:
+		pr_info("IPv6 is disabled via sysctl\n");
+		break;
+
+	case -EEXIST:
+		pr_info("duplicate VNI %u\n", conf.vni);
+		break;
+	}
+
+	return err;
+}
+
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2761,6 +2869,7 @@
 		hlist_del_rcu(&vxlan->hlist);
 	spin_unlock(&vn->sock_lock);
 
+	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
 	unregister_netdevice_queue(dev, head);
 }
@@ -2779,6 +2888,7 @@
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_RSC */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_L2MISS */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_L3MISS */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_COLLECT_METADATA */
 		nla_total_size(sizeof(__u32)) +	/* IFLA_VXLAN_AGEING */
 		nla_total_size(sizeof(__u32)) +	/* IFLA_VXLAN_LIMIT */
 		nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2796,8 +2906,8 @@
 	const struct vxlan_dev *vxlan = netdev_priv(dev);
 	const struct vxlan_rdst *dst = &vxlan->default_dst;
 	struct ifla_vxlan_port_range ports = {
-		.low =  htons(vxlan->port_min),
-		.high = htons(vxlan->port_max),
+		.low =  htons(vxlan->cfg.port_min),
+		.high = htons(vxlan->cfg.port_max),
 	};
 
 	if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
@@ -2820,22 +2930,22 @@
 	if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
 		goto nla_put_failure;
 
-	if (!vxlan_addr_any(&vxlan->saddr)) {
-		if (vxlan->saddr.sa.sa_family == AF_INET) {
+	if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+		if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
 			if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
-					    vxlan->saddr.sin.sin_addr.s_addr))
+					    vxlan->cfg.saddr.sin.sin_addr.s_addr))
 				goto nla_put_failure;
 #if IS_ENABLED(CONFIG_IPV6)
 		} else {
 			if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
-					     &vxlan->saddr.sin6.sin6_addr))
+					     &vxlan->cfg.saddr.sin6.sin6_addr))
 				goto nla_put_failure;
 #endif
 		}
 	}
 
-	if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
-	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+	if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
 	    nla_put_u8(skb, IFLA_VXLAN_LEARNING,
 			!!(vxlan->flags & VXLAN_F_LEARN)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -2845,9 +2955,11 @@
 			!!(vxlan->flags & VXLAN_F_L2MISS)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_L3MISS,
 			!!(vxlan->flags & VXLAN_F_L3MISS)) ||
-	    nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
-	    nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
-	    nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+	    nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
+		       !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+	    nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+	    nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+	    nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
 	    nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
 			!!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
@@ -2966,8 +3078,10 @@
 		/* If vxlan->dev is in the same netns, it has already been added
 		 * to the list by the previous loop.
 		 */
-		if (!net_eq(dev_net(vxlan->dev), net))
+		if (!net_eq(dev_net(vxlan->dev), net)) {
+			gro_cells_destroy(&vxlan->gro_cells);
 			unregister_netdevice_queue(vxlan->dev, &list);
+		}
 	}
 
 	unregister_netdevice_many(&list);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 3ebed1c..e92aaf6 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1096,7 +1096,7 @@
 	}
 	dev->netdev_ops = &pvc_ops;
 	dev->mtu = HDLC_MAX_MTU;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->ml_priv = pvc;
 
 	if (register_netdevice(dev) != 0) {
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9729e69..c04fb00 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -11,7 +11,8 @@
 		 wmi-tlv.o \
 		 bmi.o \
 		 hw.o \
-		 p2p.o
+		 p2p.o \
+		 swap.o
 
 ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 31a9906..df7c761 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -178,7 +178,7 @@
 };
 
 /* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
 
 #define BMI_CE_NUM_TO_TARG 0
 #define BMI_CE_NUM_TO_HOST 1
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e508c65..cf28fbe 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -452,6 +452,7 @@
 {
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
+	struct ath10k *ar = ce_state->ar;
 	unsigned int sw_index = dest_ring->sw_index;
 
 	struct ce_desc *base = dest_ring->base_addr_owner_space;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 0eddb20..5c903e15 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -21,7 +21,7 @@
 #include "hif.h"
 
 /* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 8
+#define CE_COUNT_MAX 12
 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
 
 /* Descriptor rings must be aligned to this boundary */
@@ -38,8 +38,13 @@
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
-#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB  2
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS	(1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS	(1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
 
 struct ce_desc {
 	__le32 addr;
@@ -423,8 +428,10 @@
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
 
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB		8
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK		0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+				ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+				ar->regs->ce_wrap_intr_sum_host_msi_mask
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
 	(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
 		CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 59496a9..b87b986 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -31,16 +31,19 @@
 #include "wmi-ops.h"
 
 unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
 static bool uart_print;
 static bool skip_otp;
 
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
@@ -49,6 +52,8 @@
 		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
 		.uart_pin = 7,
 		.has_shifted_cc_wraparound = true,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			.dir = QCA988X_HW_2_0_FW_DIR,
 			.fw = QCA988X_HW_2_0_FW_FILE,
@@ -63,6 +68,8 @@
 		.name = "qca6174 hw2.1",
 		.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
 		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
 			.fw = QCA6174_HW_2_1_FW_FILE,
@@ -77,6 +84,8 @@
 		.name = "qca6174 hw3.0",
 		.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
 		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			.dir = QCA6174_HW_3_0_FW_DIR,
 			.fw = QCA6174_HW_3_0_FW_FILE,
@@ -91,6 +100,8 @@
 		.name = "qca6174 hw3.2",
 		.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
 		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			/* uses same binaries as hw3.0 */
 			.dir = QCA6174_HW_3_0_FW_DIR,
@@ -101,8 +112,69 @@
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
 		},
 	},
+	{
+		.id = QCA99X0_HW_2_0_DEV_VERSION,
+		.name = "qca99x0 hw2.0",
+		.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.otp_exe_param = 0x00000700,
+		.continuous_frag_desc = true,
+		.channel_counters_freq_hz = 150000,
+		.fw = {
+			.dir = QCA99X0_HW_2_0_FW_DIR,
+			.fw = QCA99X0_HW_2_0_FW_FILE,
+			.otp = QCA99X0_HW_2_0_OTP_FILE,
+			.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+			.board_size = QCA99X0_BOARD_DATA_SZ,
+			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+		},
+	},
 };
 
+static const char *const ath10k_core_fw_feature_str[] = {
+	[ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+	[ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+	[ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+	[ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+	[ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+	[ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+	[ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+	[ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+	[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+	[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+						   size_t buf_len,
+						   enum ath10k_fw_features feat)
+{
+	if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+	    WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+		return scnprintf(buf, buf_len, "bit%d", feat);
+	}
+
+	return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+				     char *buf,
+				     size_t buf_len)
+{
+	unsigned int len = 0;
+	int i;
+
+	for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+		if (test_bit(i, ar->fw_features)) {
+			if (len > 0)
+				len += scnprintf(buf + len, buf_len - len, ",");
+
+			len += ath10k_core_get_fw_feature_str(buf + len,
+							      buf_len - len,
+							      i);
+		}
+	}
+}
+
 static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@@ -164,6 +236,17 @@
 		return ret;
 	}
 
+	/* Some devices have a special sanity check that verifies the PCI
+	 * Device ID is written to this host interest var. It is known to be
+	 * required to boot QCA6164.
+	 */
+	ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext,
+				 ar->dev_id);
+	if (ret) {
+		ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -355,6 +438,7 @@
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
 	u32 result, address = ar->hw_params.patch_load_addr;
+	u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
 	int ret;
 
 	ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@@ -380,7 +464,7 @@
 		return ret;
 	}
 
-	ret = ath10k_bmi_execute(ar, address, 0, &result);
+	ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
 	if (ret) {
 		ath10k_err(ar, "could not execute otp (%d)\n", ret);
 		return ret;
@@ -412,6 +496,13 @@
 		data = ar->firmware_data;
 		data_len = ar->firmware_len;
 		mode_name = "normal";
+		ret = ath10k_swap_code_seg_configure(ar,
+				ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+		if (ret) {
+			ath10k_err(ar, "failed to configure fw code swap: %d\n",
+				   ret);
+			return ret;
+		}
 		break;
 	case ATH10K_FIRMWARE_MODE_UTF:
 		data = ar->testmode.utf->data;
@@ -451,6 +542,8 @@
 	if (!IS_ERR(ar->cal_file))
 		release_firmware(ar->cal_file);
 
+	ath10k_swap_code_seg_release(ar);
+
 	ar->board = NULL;
 	ar->board_data = NULL;
 	ar->board_len = 0;
@@ -464,6 +557,7 @@
 	ar->firmware_len = 0;
 
 	ar->cal_file = NULL;
+
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -737,6 +831,13 @@
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
 				   ar->htt.op_version);
 			break;
+		case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found fw code swap image ie (%zd B)\n",
+				   ie_len);
+			ar->swap.firmware_codeswap_data = data;
+			ar->swap.firmware_codeswap_len = ie_len;
+			break;
 		default:
 			ath10k_warn(ar, "Unknown FW IE: %u\n",
 				    le32_to_cpu(hdr->id));
@@ -991,6 +1092,46 @@
 		return -EINVAL;
 	}
 
+	ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+	switch (ath10k_cryptmode_param) {
+	case ATH10K_CRYPT_MODE_HW:
+		clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+		clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+		break;
+	case ATH10K_CRYPT_MODE_SW:
+		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+			      ar->fw_features)) {
+			ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+			return -EINVAL;
+		}
+
+		set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+		set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+		break;
+	default:
+		ath10k_info(ar, "invalid cryptmode: %d\n",
+			    ath10k_cryptmode_param);
+		return -EINVAL;
+	}
+
+	ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+	ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+		/* Workaround:
+		 *
+		 * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+		 * and causes enormous performance issues (malformed frames,
+		 * etc).
+		 *
+		 * Disabling A-MSDU makes RAW mode stable with heavy traffic
+		 * albeit a bit slower compared to regular operation.
+		 */
+		ar->htt.max_num_amsdu = 1;
+	}
+
 	/* Backwards compatibility for firmwares without
 	 * ATH10K_FW_IE_WMI_OP_VERSION.
 	 */
@@ -1014,6 +1155,7 @@
 		ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
 		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
 			WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -1023,6 +1165,7 @@
 		ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
 		ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
 		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_TLV:
 		ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@@ -1033,6 +1176,17 @@
 		ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
 		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
 			WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+		ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+		ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+		ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+		ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+		ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1056,6 +1210,7 @@
 		case ATH10K_FW_WMI_OP_VERSION_TLV:
 			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
 			break;
+		case ATH10K_FW_WMI_OP_VERSION_10_4:
 		case ATH10K_FW_WMI_OP_VERSION_UNSET:
 		case ATH10K_FW_WMI_OP_VERSION_MAX:
 			WARN_ON(1);
@@ -1272,13 +1427,13 @@
 void ath10k_core_stop(struct ath10k *ar)
 {
 	lockdep_assert_held(&ar->conf_mutex);
+	ath10k_debug_stop(ar);
 
 	/* try to suspend target */
 	if (ar->state != ATH10K_STATE_RESTARTING &&
 	    ar->state != ATH10K_STATE_UTF)
 		ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
 
-	ath10k_debug_stop(ar);
 	ath10k_hif_stop(ar);
 	ath10k_htt_tx_free(&ar->htt);
 	ath10k_htt_rx_free(&ar->htt);
@@ -1330,6 +1485,13 @@
 		goto err_free_firmware_files;
 	}
 
+	ret = ath10k_swap_code_seg_init(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+			   ret);
+		goto err_free_firmware_files;
+	}
+
 	mutex_lock(&ar->conf_mutex);
 
 	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1470,9 +1632,15 @@
 	switch (hw_rev) {
 	case ATH10K_HW_QCA988X:
 		ar->regs = &qca988x_regs;
+		ar->hw_values = &qca988x_values;
 		break;
 	case ATH10K_HW_QCA6174:
 		ar->regs = &qca6174_regs;
+		ar->hw_values = &qca6174_values;
+		break;
+	case ATH10K_HW_QCA99X0:
+		ar->regs = &qca99x0_regs;
+		ar->hw_values = &qca99x0_values;
 		break;
 	default:
 		ath10k_err(ar, "unsupported core hardware revision %d\n",
@@ -1497,6 +1665,10 @@
 	if (!ar->workqueue)
 		goto err_free_mac;
 
+	ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+	if (!ar->workqueue_aux)
+		goto err_free_wq;
+
 	mutex_init(&ar->conf_mutex);
 	spin_lock_init(&ar->data_lock);
 
@@ -1517,10 +1689,12 @@
 
 	ret = ath10k_debug_create(ar);
 	if (ret)
-		goto err_free_wq;
+		goto err_free_aux_wq;
 
 	return ar;
 
+err_free_aux_wq:
+	destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
 	destroy_workqueue(ar->workqueue);
 
@@ -1536,6 +1710,9 @@
 	flush_workqueue(ar->workqueue);
 	destroy_workqueue(ar->workqueue);
 
+	flush_workqueue(ar->workqueue_aux);
+	destroy_workqueue(ar->workqueue_aux);
+
 	ath10k_debug_destroy(ar);
 	ath10k_mac_destroy(ar);
 }
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 78094f23c..1254214 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -36,6 +36,7 @@
 #include "spectral.h"
 #include "thermal.h"
 #include "wow.h"
+#include "swap.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -91,6 +92,7 @@
 		u8 tid;
 		u16 freq;
 		bool is_offchan;
+		bool nohwcrypt;
 		struct ath10k_htt_txbuf *txbuf;
 		u32 txbuf_paddr;
 	} __packed htt;
@@ -151,6 +153,7 @@
 	const struct wmi_ops *ops;
 
 	u32 num_mem_chunks;
+	u32 rx_decap_mode;
 	struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
 };
 
@@ -327,8 +330,8 @@
 			u32 uapsd;
 		} sta;
 		struct {
-			/* 127 stations; wmi limit */
-			u8 tim_bitmap[16];
+			/* 512 stations */
+			u8 tim_bitmap[64];
 			u8 tim_len;
 			u32 ssid_len;
 			u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -340,6 +343,7 @@
 	} u;
 
 	bool use_cts_prot;
+	bool nohwcrypt;
 	int num_legacy_stations;
 	int txpower;
 	struct wmi_wmm_params_all_arg wmm_params;
@@ -381,9 +385,6 @@
 	u32 reg_addr;
 	u32 nf_cal_period;
 
-	u8 htt_max_amsdu;
-	u8 htt_max_ampdu;
-
 	struct ath10k_fw_crash_data *fw_crash_data;
 };
 
@@ -452,16 +453,21 @@
 	ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
 
 	/* Don't trust error code from otp.bin */
-	ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+	ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
 
 	/* Some firmware revisions pad 4th hw address to 4 byte boundary making
 	 * it 8 bytes long in Native Wifi Rx decap.
 	 */
-	ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+	ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
 
 	/* Firmware supports bypassing PLL setting on init. */
 	ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
 
+	/* Raw mode support. If supported, FW supports receiving and trasmitting
+	 * frames in raw mode.
+	 */
+	ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -475,6 +481,15 @@
 	 * waiters should immediately cancel instead of waiting for a time out.
 	 */
 	ATH10K_FLAG_CRASH_FLUSH,
+
+	/* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+	 * Raw mode supports both hardware and software crypto. Native WiFi only
+	 * supports hardware crypto.
+	 */
+	ATH10K_FLAG_RAW_MODE,
+
+	/* Disable HW crypto engine */
+	ATH10K_FLAG_HW_CRYPTO_DISABLED,
 };
 
 enum ath10k_cal_mode {
@@ -483,6 +498,13 @@
 	ATH10K_CAL_MODE_DT,
 };
 
+enum ath10k_crypt_mode {
+	/* Only use hardware crypto engine */
+	ATH10K_CRYPT_MODE_HW,
+	/* Only use software crypto engine */
+	ATH10K_CRYPT_MODE_SW,
+};
+
 static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
 {
 	switch (mode) {
@@ -532,6 +554,7 @@
 	u8 mac_addr[ETH_ALEN];
 
 	enum ath10k_hw_rev hw_rev;
+	u16 dev_id;
 	u32 chip_id;
 	u32 target_version;
 	u8 fw_version_major;
@@ -545,6 +568,7 @@
 	u32 ht_cap_info;
 	u32 vht_cap_info;
 	u32 num_rf_chains;
+	u32 max_spatial_stream;
 	/* protected by conf_mutex */
 	bool ani_enabled;
 
@@ -560,6 +584,7 @@
 	struct completion target_suspend;
 
 	const struct ath10k_hw_regs *regs;
+	const struct ath10k_hw_values *hw_values;
 	struct ath10k_bmi bmi;
 	struct ath10k_wmi wmi;
 	struct ath10k_htc htc;
@@ -570,6 +595,7 @@
 		const char *name;
 		u32 patch_load_addr;
 		int uart_pin;
+		u32 otp_exe_param;
 
 		/* This is true if given HW chip has a quirky Cycle Counter
 		 * wraparound which resets to 0x7fffffff instead of 0. All
@@ -578,6 +604,14 @@
 		 */
 		bool has_shifted_cc_wraparound;
 
+		/* Some of chip expects fragment descriptor to be continuous
+		 * memory for any TX operation. Set continuous_frag_desc flag
+		 * for the hardware which have such requirement.
+		 */
+		bool continuous_frag_desc;
+
+		u32 channel_counters_freq_hz;
+
 		struct ath10k_hw_params_fw {
 			const char *dir;
 			const char *fw;
@@ -602,6 +636,12 @@
 
 	const struct firmware *cal_file;
 
+	struct {
+		const void *firmware_codeswap_data;
+		size_t firmware_codeswap_len;
+		struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+	} swap;
+
 	char spec_board_id[100];
 	bool spec_board_loaded;
 
@@ -617,6 +657,7 @@
 		bool is_roc;
 		int vdev_id;
 		int roc_freq;
+		bool roc_notify;
 	} scan;
 
 	struct {
@@ -656,6 +697,8 @@
 	struct completion vdev_setup_done;
 
 	struct workqueue_struct *workqueue;
+	/* Auxiliary workqueue */
+	struct workqueue_struct *workqueue_aux;
 
 	/* prevents concurrent FW reconfiguration */
 	struct mutex conf_mutex;
@@ -675,6 +718,11 @@
 	int max_num_stations;
 	int max_num_vdevs;
 	int max_num_tdls_vdevs;
+	int num_active_peers;
+	int num_tids;
+
+	struct work_struct svc_rdy_work;
+	struct sk_buff *svc_rdy_skb;
 
 	struct work_struct offchan_tx_work;
 	struct sk_buff_head offchan_tx_queue;
@@ -749,6 +797,9 @@
 				  enum ath10k_hw_rev hw_rev,
 				  const struct ath10k_hif_ops *hif_ops);
 void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+				     char *buf,
+				     size_t max_len);
 
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8fa606a..bf033f4 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,7 +124,11 @@
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-	ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+	char fw_features[128] = {};
+
+	ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+	ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
 		    ar->hw_params.name,
 		    ar->target_version,
 		    ar->chip_id,
@@ -137,8 +141,12 @@
 		    ar->htt.target_version_major,
 		    ar->htt.target_version_minor,
 		    ar->wmi.op_version,
+		    ar->htt.op_version,
 		    ath10k_cal_mode_str(ar->cal_mode),
-		    ar->max_num_stations);
+		    ar->max_num_stations,
+		    test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+		    !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
+		    fw_features);
 	ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
 		    config_enabled(CONFIG_ATH10K_DEBUG),
 		    config_enabled(CONFIG_ATH10K_DEBUGFS),
@@ -315,7 +323,7 @@
 	ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
 	if (ret) {
 		ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
-		goto unlock;
+		goto free;
 	}
 
 	/* Stat data may exceed htc-wmi buffer limit. In such case firmware
@@ -378,7 +386,6 @@
 	ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
 	ath10k_debug_fw_stats_peers_free(&stats.peers);
 
-unlock:
 	spin_unlock_bh(&ar->data_lock);
 }
 
@@ -1357,12 +1364,8 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (ar->debug.htt_max_amsdu)
-		amsdu = ar->debug.htt_max_amsdu;
-
-	if (ar->debug.htt_max_ampdu)
-		ampdu = ar->debug.htt_max_ampdu;
-
+	amsdu = ar->htt.max_num_amsdu;
+	ampdu = ar->htt.max_num_ampdu;
 	mutex_unlock(&ar->conf_mutex);
 
 	len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
@@ -1396,8 +1399,8 @@
 		goto out;
 
 	res = count;
-	ar->debug.htt_max_amsdu = amsdu;
-	ar->debug.htt_max_ampdu = ampdu;
+	ar->htt.max_num_amsdu = amsdu;
+	ar->htt.max_num_ampdu = ampdu;
 
 out:
 	mutex_unlock(&ar->conf_mutex);
@@ -1899,9 +1902,6 @@
 	if (ar->debug.htt_stats_mask != 0)
 		cancel_delayed_work(&ar->debug.htt_stats_dwork);
 
-	ar->debug.htt_max_amsdu = 0;
-	ar->debug.htt_max_ampdu = 0;
-
 	ath10k_wmi_pdev_pktlog_disable(ar);
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 85bfa2a..32d9ff1 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -145,8 +145,10 @@
 	skb_cb->eid = eid;
 	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
 	ret = dma_mapping_error(dev, skb_cb->paddr);
-	if (ret)
+	if (ret) {
+		ret = -EIO;
 		goto err_credits;
+	}
 
 	sg_item.transfer_id = ep->eid;
 	sg_item.transfer_context = skb;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 6da6ef2..3e6ba63 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -102,6 +102,43 @@
 	[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
 };
 
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+	[HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+				HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+				HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+				HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+				HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+	[HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+	[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+				HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+	[HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+				HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
 int ath10k_htt_connect(struct ath10k_htt *htt)
 {
 	struct ath10k_htc_svc_conn_req conn_req;
@@ -147,6 +184,10 @@
 		2; /* ip4 dscp or ip6 priority */
 
 	switch (ar->htt.op_version) {
+	case ATH10K_FW_HTT_OP_VERSION_10_4:
+		ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+		break;
 	case ATH10K_FW_HTT_OP_VERSION_10_1:
 		ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
 		ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@@ -205,8 +246,31 @@
 	}
 
 	status = ath10k_htt_verify_version(htt);
+	if (status) {
+		ath10k_warn(ar, "failed to verify htt version: %d\n",
+			    status);
+		return status;
+	}
+
+	status = ath10k_htt_send_frag_desc_bank_cfg(htt);
 	if (status)
 		return status;
 
-	return ath10k_htt_send_rx_ring_cfg_ll(htt);
+	status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+	if (status) {
+		ath10k_warn(ar, "failed to setup rx ring: %d\n",
+			    status);
+		return status;
+	}
+
+	status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+					     htt->max_num_ampdu,
+					     htt->max_num_amsdu);
+	if (status) {
+		ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+			    status);
+		return status;
+	}
+
+	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 7e8a0d8..5731875 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -83,10 +83,39 @@
  * around the mask + shift defs.
  */
 struct htt_data_tx_desc_frag {
-	__le32 paddr;
-	__le32 len;
+	union {
+		struct double_word_addr {
+			__le32 paddr;
+			__le32 len;
+		} __packed dword_addr;
+		struct triple_word_addr {
+			__le32 paddr_lo;
+			__le16 paddr_hi;
+			__le16 len_16;
+		} __packed tword_addr;
+	} __packed;
 } __packed;
 
+struct htt_msdu_ext_desc {
+	__le32 tso_flag[3];
+	__le16 ip_identification;
+	u8 flags;
+	u8 reserved;
+	struct htt_data_tx_desc_frag frags[6];
+};
+
+#define	HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE		BIT(0)
+#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE	BIT(1)
+#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE	BIT(2)
+#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE	BIT(3)
+#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE	BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
 enum htt_data_tx_desc_flags0 {
 	HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
 	HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -255,6 +284,9 @@
 } __packed;
 
 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+	__le32 rate;
+} __packed;
 
 struct htt_mgmt_tx_desc {
 	u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
@@ -263,6 +295,9 @@
 	__le32 len;
 	__le32 vdev_id;
 	u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+	union {
+		struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+	} __packed;
 } __packed;
 
 enum htt_mgmt_tx_status {
@@ -349,6 +384,38 @@
 	HTT_TLV_T2H_NUM_MSGS
 };
 
+enum htt_10_4_t2h_msg_type {
+	HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+	HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
+	HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+	HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+	HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+	HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+	HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+	HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+	HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+	HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+	HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+	HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
+	HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+	HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+	HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
+	HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
+	HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
+	HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
+	HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+	HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
+	HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
+	HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF          = 0x17,
+	HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
+	/* 0x19 to 0x2f are reserved */
+	HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND     = 0x30,
+	/* keep this last */
+	HTT_10_4_T2H_NUM_MSGS
+};
+
 enum htt_t2h_msg_type {
 	HTT_T2H_MSG_TYPE_VERSION_CONF,
 	HTT_T2H_MSG_TYPE_RX_IND,
@@ -375,6 +442,10 @@
 	HTT_T2H_MSG_TYPE_AGGR_CONF,
 	HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
 	HTT_T2H_MSG_TYPE_TEST,
+	HTT_T2H_MSG_TYPE_EN_STATS,
+	HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+	HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
 	/* keep this last */
 	HTT_T2H_NUM_MSGS
 };
@@ -1325,6 +1396,8 @@
 	u8 target_version_minor;
 	struct completion target_version_received;
 	enum ath10k_fw_htt_op_version op_version;
+	u8 max_num_amsdu;
+	u8 max_num_ampdu;
 
 	const enum htt_t2h_msg_type *t2h_msg_types;
 	u32 t2h_msg_types_max;
@@ -1430,6 +1503,11 @@
 
 	/* rx_status template */
 	struct ieee80211_rx_status rx_status;
+
+	struct {
+		dma_addr_t paddr;
+		struct htt_msdu_ext_desc *vaddr;
+	} frag_desc;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1482,6 +1560,12 @@
 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7	/* 2^7 = 128 */
 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
 
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
 int ath10k_htt_connect(struct ath10k_htt *htt);
 int ath10k_htt_init(struct ath10k *ar);
 int ath10k_htt_setup(struct ath10k_htt *htt);
@@ -1497,6 +1581,7 @@
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
 				u8 max_subfrms_ampdu,
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 89eb16b..1b7a043 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -368,7 +368,7 @@
 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
-		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
 		msdu_chained = rx_desc->frag_info.ring2_more_count;
 
@@ -394,7 +394,7 @@
 			msdu_chaining = 1;
 		}
 
-		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+		last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
 				RX_MSDU_END_INFO0_LAST_MSDU;
 
 		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
@@ -740,7 +740,7 @@
 	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
 		return NULL;
 
-	if (!(rxd->msdu_end.info0 &
+	if (!(rxd->msdu_end.common.info0 &
 	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
 		return NULL;
 
@@ -991,9 +991,9 @@
 	bool is_last;
 
 	rxd = (void *)msdu->data - sizeof(*rxd);
-	is_first = !!(rxd->msdu_end.info0 &
+	is_first = !!(rxd->msdu_end.common.info0 &
 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-	is_last = !!(rxd->msdu_end.info0 &
+	is_last = !!(rxd->msdu_end.common.info0 &
 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 
 	/* Delivered decapped frame:
@@ -1017,9 +1017,8 @@
 	skb_trim(msdu, msdu->len - FCS_LEN);
 
 	/* In most cases this will be true for sniffed frames. It makes sense
-	 * to deliver them as-is without stripping the crypto param. This would
-	 * also make sense for software based decryption (which is not
-	 * implemented in ath10k).
+	 * to deliver them as-is without stripping the crypto param. This is
+	 * necessary for software based decryption.
 	 *
 	 * If there's no error then the frame is decrypted. At least that is
 	 * the case for frames that come in via fragmented rx indication.
@@ -1104,9 +1103,9 @@
 	rxd = (void *)msdu->data - sizeof(*rxd);
 	hdr = (void *)rxd->rx_hdr_status;
 
-	is_first = !!(rxd->msdu_end.info0 &
+	is_first = !!(rxd->msdu_end.common.info0 &
 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-	is_last = !!(rxd->msdu_end.info0 &
+	is_last = !!(rxd->msdu_end.common.info0 &
 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 	is_amsdu = !(is_first && is_last);
 
@@ -1201,7 +1200,6 @@
 {
 	struct htt_rx_desc *rxd;
 	enum rx_msdu_decap_format decap;
-	struct ieee80211_hdr *hdr;
 
 	/* First msdu's decapped header:
 	 * [802.11 header] <-- padded to 4 bytes long
@@ -1215,8 +1213,7 @@
 	 */
 
 	rxd = (void *)msdu->data - sizeof(*rxd);
-	hdr = (void *)rxd->rx_hdr_status;
-	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
 	switch (decap) {
@@ -1246,7 +1243,7 @@
 
 	rxd = (void *)skb->data - sizeof(*rxd);
 	flags = __le32_to_cpu(rxd->attention.flags);
-	info = __le32_to_cpu(rxd->msdu_start.info1);
+	info = __le32_to_cpu(rxd->msdu_start.common.info1);
 
 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1439,7 +1436,7 @@
 
 	first = skb_peek(amsdu);
 	rxd = (void *)first->data - sizeof(*rxd);
-	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
 	if (!chained)
@@ -1633,8 +1630,6 @@
 	__le16 msdu_id;
 	int i;
 
-	lockdep_assert_held(&htt->tx_lock);
-
 	switch (status) {
 	case HTT_DATA_TX_STATUS_NO_ACK:
 		tx_done.no_ack = true;
@@ -1759,14 +1754,14 @@
 		__skb_queue_tail(amsdu, msdu);
 
 		rxd = (void *)msdu->data - sizeof(*rxd);
-		if (rxd->msdu_end.info0 &
+		if (rxd->msdu_end.common.info0 &
 		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
 			break;
 	}
 
 	msdu = skb_peek_tail(amsdu);
 	rxd = (void *)msdu->data - sizeof(*rxd);
-	if (!(rxd->msdu_end.info0 &
+	if (!(rxd->msdu_end.common.info0 &
 	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
 		skb_queue_splice_init(amsdu, list);
 		return -EAGAIN;
@@ -2000,15 +1995,11 @@
 			break;
 		}
 
-		spin_lock_bh(&htt->tx_lock);
 		ath10k_txrx_tx_unref(htt, &tx_done);
-		spin_unlock_bh(&htt->tx_lock);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-		spin_lock_bh(&htt->tx_lock);
-		__skb_queue_tail(&htt->tx_compl_q, skb);
-		spin_unlock_bh(&htt->tx_lock);
+		skb_queue_tail(&htt->tx_compl_q, skb);
 		tasklet_schedule(&htt->txrx_compl_task);
 		return;
 	case HTT_T2H_MSG_TYPE_SEC_IND: {
@@ -2074,6 +2065,12 @@
 		break;
 	case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
 		break;
+	case HTT_T2H_MSG_TYPE_AGGR_CONF:
+		break;
+	case HTT_T2H_MSG_TYPE_EN_STATS:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+	case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
 	default:
 		ath10k_warn(ar, "htt event (%d) not handled\n",
 			    resp->hdr.msg_type);
@@ -2093,12 +2090,10 @@
 	struct htt_resp *resp;
 	struct sk_buff *skb;
 
-	spin_lock_bh(&htt->tx_lock);
-	while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+	while ((skb = skb_dequeue(&htt->tx_compl_q))) {
 		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
 		dev_kfree_skb_any(skb);
 	}
-	spin_unlock_bh(&htt->tx_lock);
 
 	spin_lock_bh(&htt->rx_ring.lock);
 	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a60ef7d..43aa5e2 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -63,7 +63,8 @@
 
 	lockdep_assert_held(&htt->tx_lock);
 
-	ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+	ret = idr_alloc(&htt->pending_tx, skb, 0,
+			htt->max_num_pending_tx, GFP_ATOMIC);
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 
@@ -84,6 +85,7 @@
 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
+	int ret, size;
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 		   htt->max_num_pending_tx);
@@ -94,11 +96,31 @@
 	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
 				       sizeof(struct ath10k_htt_txbuf), 4, 0);
 	if (!htt->tx_pool) {
-		idr_destroy(&htt->pending_tx);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto free_idr_pending_tx;
 	}
 
+	if (!ar->hw_params.continuous_frag_desc)
+		goto skip_frag_desc_alloc;
+
+	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+						  &htt->frag_desc.paddr,
+						  GFP_DMA);
+	if (!htt->frag_desc.vaddr) {
+		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+		ret = -ENOMEM;
+		goto free_tx_pool;
+	}
+
+skip_frag_desc_alloc:
 	return 0;
+
+free_tx_pool:
+	dma_pool_destroy(htt->tx_pool);
+free_idr_pending_tx:
+	idr_destroy(&htt->pending_tx);
+	return ret;
 }
 
 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@@ -112,18 +134,25 @@
 	tx_done.discard = 1;
 	tx_done.msdu_id = msdu_id;
 
-	spin_lock_bh(&htt->tx_lock);
 	ath10k_txrx_tx_unref(htt, &tx_done);
-	spin_unlock_bh(&htt->tx_lock);
 
 	return 0;
 }
 
 void ath10k_htt_tx_free(struct ath10k_htt *htt)
 {
+	int size;
+
 	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 	idr_destroy(&htt->pending_tx);
 	dma_pool_destroy(htt->tx_pool);
+
+	if (htt->frag_desc.vaddr) {
+		size = htt->max_num_pending_tx *
+				  sizeof(struct htt_msdu_ext_desc);
+		dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+				  htt->frag_desc.paddr);
+	}
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -201,6 +230,49 @@
 	return 0;
 }
 
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int ret, size;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	if (!htt->frag_desc.paddr) {
+		ath10k_warn(ar, "invalid frag desc memory\n");
+		return -EINVAL;
+	}
+
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+	skb = ath10k_htc_alloc_skb(ar, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+	cmd->frag_desc_bank_cfg.info = 0;
+	cmd->frag_desc_bank_cfg.num_banks = 1;
+	cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+	cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+				__cpu_to_le32(htt->frag_desc.paddr);
+	cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
+	cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+				__cpu_to_le16(htt->max_num_pending_tx - 1);
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
@@ -355,12 +427,11 @@
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0) {
-		spin_unlock_bh(&htt->tx_lock);
 		goto err_tx_dec;
 	}
 	msdu_id = res;
-	spin_unlock_bh(&htt->tx_lock);
 
 	txdesc = ath10k_htc_alloc_skb(ar, len);
 	if (!txdesc) {
@@ -371,11 +442,15 @@
 	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
 				       DMA_TO_DEVICE);
 	res = dma_mapping_error(dev, skb_cb->paddr);
-	if (res)
+	if (res) {
+		res = -EIO;
 		goto err_free_txdesc;
+	}
 
 	skb_put(txdesc, len);
 	cmd = (struct htt_cmd *)txdesc->data;
+	memset(cmd, 0, len);
+
 	cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
 	cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
 	cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
@@ -422,6 +497,7 @@
 	u16 msdu_id, flags1 = 0;
 	dma_addr_t paddr = 0;
 	u32 frags_paddr = 0;
+	struct htt_msdu_ext_desc *ext_desc = NULL;
 
 	res = ath10k_htt_tx_inc_pending(htt);
 	if (res)
@@ -429,12 +505,11 @@
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0) {
-		spin_unlock_bh(&htt->tx_lock);
 		goto err_tx_dec;
 	}
 	msdu_id = res;
-	spin_unlock_bh(&htt->tx_lock);
 
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
@@ -450,14 +525,20 @@
 	if ((ieee80211_is_action(hdr->frame_control) ||
 	     ieee80211_is_deauth(hdr->frame_control) ||
 	     ieee80211_is_disassoc(hdr->frame_control)) &&
-	     ieee80211_has_protected(hdr->frame_control))
+	     ieee80211_has_protected(hdr->frame_control)) {
 		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	} else if (!skb_cb->htt.nohwcrypt &&
+		   skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	}
 
 	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
 				       DMA_TO_DEVICE);
 	res = dma_mapping_error(dev, skb_cb->paddr);
-	if (res)
+	if (res) {
+		res = -EIO;
 		goto err_free_txbuf;
+	}
 
 	switch (skb_cb->txmode) {
 	case ATH10K_HW_TXRX_RAW:
@@ -465,16 +546,30 @@
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 		/* pass through */
 	case ATH10K_HW_TXRX_ETHERNET:
-		frags = skb_cb->htt.txbuf->frags;
+		if (ar->hw_params.continuous_frag_desc) {
+			memset(&htt->frag_desc.vaddr[msdu_id], 0,
+			       sizeof(struct htt_msdu_ext_desc));
+			frags = (struct htt_data_tx_desc_frag *)
+				&htt->frag_desc.vaddr[msdu_id].frags;
+			ext_desc = &htt->frag_desc.vaddr[msdu_id];
+			frags[0].tword_addr.paddr_lo =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi = 0;
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
 
-		frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
-		frags[0].len = __cpu_to_le32(msdu->len);
-		frags[1].paddr = 0;
-		frags[1].len = 0;
+			frags_paddr =  htt->frag_desc.paddr +
+				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
+		} else {
+			frags = skb_cb->htt.txbuf->frags;
+			frags[0].dword_addr.paddr =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+			frags[1].dword_addr.paddr = 0;
+			frags[1].dword_addr.len = 0;
 
+			frags_paddr = skb_cb->htt.txbuf_paddr;
+		}
 		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-
-		frags_paddr = skb_cb->htt.txbuf_paddr;
 		break;
 	case ATH10K_HW_TXRX_MGMT:
 		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -508,14 +603,20 @@
 			prefetch_len);
 	skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
+	if (skb_cb->htt.nohwcrypt)
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
 	if (!skb_cb->is_protected)
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
 	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
 	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
-	if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
 		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
 		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+		if (ar->hw_params.continuous_frag_desc)
+			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
 	}
 
 	/* Prevent firmware from sending up tx inspection requests. There's
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 5997f00..7b84d08 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -34,8 +34,15 @@
 	.ce7_base_address		= 0x00059000,
 	.soc_reset_control_si0_rst_mask	= 0x00000001,
 	.soc_reset_control_ce_rst_mask	= 0x00040000,
-	.soc_chip_id_address		= 0x00ec,
-	.scratch_3_address		= 0x0030,
+	.soc_chip_id_address		= 0x000000ec,
+	.scratch_3_address		= 0x00000030,
+	.fw_indicator_address		= 0x00009030,
+	.pcie_local_base_address	= 0x00080000,
+	.ce_wrap_intr_sum_host_msi_lsb	= 0x00000008,
+	.ce_wrap_intr_sum_host_msi_mask	= 0x0000ff00,
+	.pcie_intr_fw_mask		= 0x00000400,
+	.pcie_intr_ce_mask_all		= 0x0007f800,
+	.pcie_intr_clr_address		= 0x00000014,
 };
 
 const struct ath10k_hw_regs qca6174_regs = {
@@ -54,8 +61,79 @@
 	.ce7_base_address			= 0x00036000,
 	.soc_reset_control_si0_rst_mask		= 0x00000000,
 	.soc_reset_control_ce_rst_mask		= 0x00000001,
-	.soc_chip_id_address			= 0x000f0,
-	.scratch_3_address			= 0x0028,
+	.soc_chip_id_address			= 0x000000f0,
+	.scratch_3_address			= 0x00000028,
+	.fw_indicator_address			= 0x0003a028,
+	.pcie_local_base_address		= 0x00080000,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x00000008,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x0000ff00,
+	.pcie_intr_fw_mask			= 0x00000400,
+	.pcie_intr_ce_mask_all			= 0x0007f800,
+	.pcie_intr_clr_address			= 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+	.rtc_state_cold_reset_mask		= 0x00000400,
+	.rtc_soc_base_address			= 0x00080000,
+	.rtc_wmac_base_address			= 0x00000000,
+	.soc_core_base_address			= 0x00082000,
+	.ce_wrapper_base_address		= 0x0004d000,
+	.ce0_base_address			= 0x0004a000,
+	.ce1_base_address			= 0x0004a400,
+	.ce2_base_address			= 0x0004a800,
+	.ce3_base_address			= 0x0004ac00,
+	.ce4_base_address			= 0x0004b000,
+	.ce5_base_address			= 0x0004b400,
+	.ce6_base_address			= 0x0004b800,
+	.ce7_base_address			= 0x0004bc00,
+	/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+	 * CE0 and CE1 no other copy engine is directly referred in the code.
+	 * It is not really neccessary to assign address for newly supported
+	 * CEs in this address table.
+	 *	Copy Engine		Address
+	 *	CE8			0x0004c000
+	 *	CE9			0x0004c400
+	 *	CE10			0x0004c800
+	 *	CE11			0x0004cc00
+	 */
+	.soc_reset_control_si0_rst_mask		= 0x00000001,
+	.soc_reset_control_ce_rst_mask		= 0x00000100,
+	.soc_chip_id_address			= 0x000000ec,
+	.scratch_3_address			= 0x00040050,
+	.fw_indicator_address			= 0x00040050,
+	.pcie_local_base_address		= 0x00000000,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x0000000c,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x00fff000,
+	.pcie_intr_fw_mask			= 0x00100000,
+	.pcie_intr_ce_mask_all			= 0x000fff00,
+	.pcie_intr_clr_address			= 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+	.rtc_state_val_on		= 3,
+	.ce_count			= 8,
+	.msi_assign_ce_max		= 7,
+	.num_target_ce_config_wlan	= 7,
+	.ce_desc_meta_data_mask		= 0xFFFC,
+	.ce_desc_meta_data_lsb		= 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+	.rtc_state_val_on		= 3,
+	.ce_count			= 8,
+	.msi_assign_ce_max		= 7,
+	.num_target_ce_config_wlan	= 7,
+	.ce_desc_meta_data_mask		= 0xFFFC,
+	.ce_desc_meta_data_lsb		= 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+	.rtc_state_val_on		= 5,
+	.ce_count			= 12,
+	.msi_assign_ce_max		= 12,
+	.num_target_ce_config_wlan	= 10,
+	.ce_desc_meta_data_mask		= 0xFFF0,
+	.ce_desc_meta_data_lsb		= 4,
 };
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
@@ -74,6 +152,6 @@
 	cc -= cc_prev - cc_fix;
 	rcc -= rcc_prev;
 
-	survey->time = CCNT_TO_MSEC(cc);
-	survey->time_busy = CCNT_TO_MSEC(rcc);
+	survey->time = CCNT_TO_MSEC(ar, cc);
+	survey->time_busy = CCNT_TO_MSEC(ar, rcc);
 }
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 85cca29..23afcda 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -72,6 +72,18 @@
 #define QCA6174_HW_3_0_BOARD_DATA_FILE	"board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR	0x1234
 
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV     0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
+#define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR	0x1234
+
 #define ATH10K_FW_API2_FILE		"firmware-2.bin"
 #define ATH10K_FW_API3_FILE		"firmware-3.bin"
 
@@ -112,6 +124,9 @@
 	 * FW API 5 and above.
 	 */
 	ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+	/* Code swap image for firmware binary */
+	ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
 };
 
 enum ath10k_fw_wmi_op_version {
@@ -122,6 +137,7 @@
 	ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
 	ATH10K_FW_WMI_OP_VERSION_TLV = 4,
 	ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+	ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
 
 	/* keep last */
 	ATH10K_FW_WMI_OP_VERSION_MAX,
@@ -137,6 +153,8 @@
 
 	ATH10K_FW_HTT_OP_VERSION_TLV = 3,
 
+	ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
 	/* keep last */
 	ATH10K_FW_HTT_OP_VERSION_MAX,
 };
@@ -144,6 +162,7 @@
 enum ath10k_hw_rev {
 	ATH10K_HW_QCA988X,
 	ATH10K_HW_QCA6174,
+	ATH10K_HW_QCA99X0,
 };
 
 struct ath10k_hw_regs {
@@ -164,26 +183,50 @@
 	u32 soc_reset_control_ce_rst_mask;
 	u32 soc_chip_id_address;
 	u32 scratch_3_address;
+	u32 fw_indicator_address;
+	u32 pcie_local_base_address;
+	u32 ce_wrap_intr_sum_host_msi_lsb;
+	u32 ce_wrap_intr_sum_host_msi_mask;
+	u32 pcie_intr_fw_mask;
+	u32 pcie_intr_ce_mask_all;
+	u32 pcie_intr_clr_address;
 };
 
 extern const struct ath10k_hw_regs qca988x_regs;
 extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+	u32 rtc_state_val_on;
+	u8 ce_count;
+	u8 msi_assign_ce_max;
+	u8 num_target_ce_config_wlan;
+	u16 ce_desc_meta_data_mask;
+	u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
 				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
 
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
 
 /* Known pecularities:
- *  - current FW doesn't support raw rx mode (last tested v599)
- *  - current FW dumps upon raw tx mode (last tested v599)
  *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
  *  - raw have FCS, nwifi doesn't
  *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
  *    param, llc/snap) are aligned to 4byte boundaries each */
 enum ath10k_hw_txrx_mode {
 	ATH10K_HW_TXRX_RAW = 0,
+
+	/* Native Wifi decap mode is used to align IP frames to 4-byte
+	 * boundaries and avoid a very expensive re-alignment in mac80211.
+	 */
 	ATH10K_HW_TXRX_NATIVE_WIFI = 1,
 	ATH10K_HW_TXRX_ETHERNET = 2,
 
@@ -245,10 +288,6 @@
 #define TARGET_RX_TIMEOUT_LO_PRI		100
 #define TARGET_RX_TIMEOUT_HI_PRI		40
 
-/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
- * avoid a very expensive re-alignment in mac80211. */
-#define TARGET_RX_DECAP_MODE			ATH10K_HW_TXRX_NATIVE_WIFI
-
 #define TARGET_SCAN_MAX_PENDING_REQS		4
 #define TARGET_BMISS_OFFLOAD_MAX_VDEV		3
 #define TARGET_ROAM_OFFLOAD_MAX_VDEV		3
@@ -283,7 +322,6 @@
 #define TARGET_10X_RX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
 #define TARGET_10X_RX_TIMEOUT_LO_PRI		100
 #define TARGET_10X_RX_TIMEOUT_HI_PRI		40
-#define TARGET_10X_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI
 #define TARGET_10X_SCAN_MAX_PENDING_REQS	4
 #define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV	2
 #define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV	2
@@ -310,8 +348,70 @@
 #define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS		22
 
+/* Diagnostic Window */
+#define CE_DIAG_PIPE	7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS			16
+#define TARGET_10_4_NUM_STATIONS		32
+#define TARGET_10_4_NUM_PEERS			((TARGET_10_4_NUM_STATIONS) + \
+						 (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS		0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX	512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS		50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS		0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS	0
+#define TARGET_10_4_NUM_PEER_KEYS		2
+#define TARGET_10_4_TGT_NUM_TIDS		((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT		32
+#define TARGET_10_4_TX_CHAIN_MASK		(BIT(0) | BIT(1) | \
+						 BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK		(BIT(0) | BIT(1) | \
+						 BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI		100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI		40
+
+#define TARGET_10_4_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS		4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES   8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS		0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS	0
+#define TARGET_10_4_MCAST2UCAST_MODE		0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE		1024
+#define TARGET_10_4_NUM_WDS_ENTRIES		32
+#define TARGET_10_4_DMA_BURST_SIZE		1
+#define TARGET_10_4_MAC_AGGR_DELIM		0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG			0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS		2
+#define TARGET_10_4_MAX_PEER_EXT_STATS		16
+#define TARGET_10_4_SMART_ANT_CAP		0
+#define TARGET_10_4_BK_MIN_FREE			0
+#define TARGET_10_4_BE_MIN_FREE			0
+#define TARGET_10_4_VI_MIN_FREE			0
+#define TARGET_10_4_VO_MIN_FREE			0
+#define TARGET_10_4_RX_BATCH_MODE		1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG	0
+#define TARGET_10_4_ATF_CONFIG			0
+#define TARGET_10_4_IPHDR_PAD_CONFIG		1
+#define TARGET_10_4_QWRAP_CONFIG		0
+
 /* Number of Copy Engines supported */
-#define CE_COUNT 8
+#define CE_COUNT ar->hw_values->ce_count
 
 /*
  * Total number of PCIe MSI interrupts requested for all interrupt sources.
@@ -335,10 +435,10 @@
 
 /* MSIs for Copy Engines */
 #define MSI_ASSIGN_CE_INITIAL	1
-#define MSI_ASSIGN_CE_MAX	7
+#define MSI_ASSIGN_CE_MAX	ar->hw_values->msi_assign_ce_max
 
 /* as of IP3.7.1 */
-#define RTC_STATE_V_ON				3
+#define RTC_STATE_V_ON				ar->hw_values->rtc_state_val_on
 
 #define RTC_STATE_COLD_RESET_MASK		ar->regs->rtc_state_cold_reset_mask
 #define RTC_STATE_V_LSB				0
@@ -374,7 +474,7 @@
 #define CE7_BASE_ADDRESS			ar->regs->ce7_base_address
 #define DBI_BASE_ADDRESS			0x00060000
 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS	0x0006c000
-#define PCIE_LOCAL_BASE_ADDRESS			0x00080000
+#define PCIE_LOCAL_BASE_ADDRESS		ar->regs->pcie_local_base_address
 
 #define SOC_RESET_CONTROL_ADDRESS		0x00000000
 #define SOC_RESET_CONTROL_OFFSET		0x00000000
@@ -448,24 +548,25 @@
 #define CORE_CTRL_ADDRESS			0x0000
 #define PCIE_INTR_ENABLE_ADDRESS		0x0008
 #define PCIE_INTR_CAUSE_ADDRESS			0x000c
-#define PCIE_INTR_CLR_ADDRESS			0x0014
+#define PCIE_INTR_CLR_ADDRESS			ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS			ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS			0x0010
 
-/* Cycle counters are running at 88MHz */
-#define CCNT_TO_MSEC(x) ((x) / 88000)
+#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
 
 /* Firmware indications to the Host via SCRATCH_3 register. */
-#define FW_INDICATOR_ADDRESS	(SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_INDICATOR_ADDRESS			ar->regs->fw_indicator_address
 #define FW_IND_EVENT_PENDING			1
 #define FW_IND_INITIALIZED			2
 
 /* HOST_REG interrupt from firmware */
-#define PCIE_INTR_FIRMWARE_MASK			0x00000400
-#define PCIE_INTR_CE_MASK_ALL			0x0007f800
+#define PCIE_INTR_FIRMWARE_MASK			ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL			ar->regs->pcie_intr_ce_mask_all
 
 #define DRAM_BASE_ADDRESS			0x00400000
 
+#define PCIE_BAR_REG_ADDRESS			0x40030
+
 #define MISSING 0
 
 #define SYSTEM_SLEEP_OFFSET			SOC_SYSTEM_SLEEP_OFFSET
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 218b6af..64674c9 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -197,6 +197,10 @@
 		return -EOPNOTSUPP;
 	}
 
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	}
+
 	if (cmd == DISABLE_KEY) {
 		arg.key_cipher = WMI_CIPHER_NONE;
 		arg.key_data = NULL;
@@ -218,6 +222,9 @@
 
 	reinit_completion(&ar->install_key_done);
 
+	if (arvif->nohwcrypt)
+		return 1;
+
 	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
 	if (ret)
 		return ret;
@@ -240,6 +247,10 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
+		    arvif->vif->type != NL80211_IFTYPE_ADHOC))
+		return -EINVAL;
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
 	spin_unlock_bh(&ar->data_lock);
@@ -251,21 +262,34 @@
 		if (arvif->wep_keys[i] == NULL)
 			continue;
 
-		flags = 0;
-		flags |= WMI_KEY_PAIRWISE;
+		switch (arvif->vif->type) {
+		case NL80211_IFTYPE_AP:
+			flags = WMI_KEY_PAIRWISE;
 
-		ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
-					 addr, flags);
-		if (ret)
-			return ret;
+			if (arvif->def_wep_key_idx == i)
+				flags |= WMI_KEY_TX_USAGE;
 
-		flags = 0;
-		flags |= WMI_KEY_GROUP;
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr, flags);
+			if (ret < 0)
+				return ret;
+			break;
+		case NL80211_IFTYPE_ADHOC:
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr,
+						 WMI_KEY_PAIRWISE);
+			if (ret < 0)
+				return ret;
 
-		ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
-					 addr, flags);
-		if (ret)
-			return ret;
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr, WMI_KEY_GROUP);
+			if (ret < 0)
+				return ret;
+			break;
+		default:
+			WARN_ON(1);
+			return -EINVAL;
+		}
 
 		spin_lock_bh(&ar->data_lock);
 		peer->keys[i] = arvif->wep_keys[i];
@@ -280,6 +304,9 @@
 	 *
 	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
 	 */
+	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
+		return 0;
+
 	if (arvif->def_wep_key_idx == -1)
 		return 0;
 
@@ -322,10 +349,10 @@
 		/* key flags are not required to delete the key */
 		ret = ath10k_install_key(arvif, peer->keys[i],
 					 DISABLE_KEY, addr, flags);
-		if (ret && first_errno == 0)
+		if (ret < 0 && first_errno == 0)
 			first_errno = ret;
 
-		if (ret)
+		if (ret < 0)
 			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
 				    i, ret);
 
@@ -398,7 +425,7 @@
 			break;
 		/* key flags are not required to delete the key */
 		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
-		if (ret && first_errno == 0)
+		if (ret < 0 && first_errno == 0)
 			first_errno = ret;
 
 		if (ret)
@@ -591,11 +618,19 @@
 static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
 			      enum wmi_peer_type peer_type)
 {
+	struct ath10k_vif *arvif;
+	int num_peers = 0;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (ar->num_peers >= ar->max_num_peers)
+	num_peers = ar->num_peers;
+
+	/* Each vdev consumes a peer entry as well */
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		num_peers++;
+
+	if (num_peers >= ar->max_num_peers)
 		return -ENOBUFS;
 
 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
@@ -671,20 +706,6 @@
 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
-static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
-{
-	struct ath10k *ar = arvif->ar;
-	u32 vdev_param;
-
-	if (value != 0xFFFFFFFF)
-		value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
-				ATH10K_FRAGMT_THRESHOLD_MIN,
-				ATH10K_FRAGMT_THRESHOLD_MAX);
-
-	vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
-	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
-}
-
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 {
 	int ret;
@@ -836,7 +857,7 @@
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
 	struct cfg80211_chan_def *chandef = NULL;
-	struct ieee80211_channel *channel = chandef->chan;
+	struct ieee80211_channel *channel = NULL;
 	struct wmi_vdev_start_request_arg arg = {};
 	int ret = 0;
 
@@ -1668,7 +1689,7 @@
 	return 0;
 }
 
-static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
 {
 	struct ath10k_vif *arvif;
 	int num = 0;
@@ -1676,7 +1697,7 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	list_for_each_entry(arvif, &ar->arvifs, list)
-		if (arvif->ps)
+		if (arvif->is_started)
 			num++;
 
 	return num;
@@ -1700,7 +1721,7 @@
 
 	enable_ps = arvif->ps;
 
-	if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
 		      ar->fw_features)) {
 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@@ -2502,6 +2523,9 @@
 	u32 param;
 	u32 value;
 
+	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+		return 0;
+
 	if (!(ar->vht_cap_info &
 	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
 	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -2995,6 +3019,8 @@
 						   IEEE80211_IFACE_ITER_RESUME_ALL,
 						   ath10k_mac_tx_unlock_iter,
 						   ar);
+
+	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
 }
 
 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
@@ -3034,38 +3060,16 @@
 
 	lockdep_assert_held(&ar->htt.tx_lock);
 
-	switch (pause_id) {
-	case WMI_TLV_TX_PAUSE_ID_MCC:
-	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
-	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
-	case WMI_TLV_TX_PAUSE_ID_AP_PS:
-	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
-		switch (action) {
-		case WMI_TLV_TX_PAUSE_ACTION_STOP:
-			ath10k_mac_vif_tx_lock(arvif, pause_id);
-			break;
-		case WMI_TLV_TX_PAUSE_ACTION_WAKE:
-			ath10k_mac_vif_tx_unlock(arvif, pause_id);
-			break;
-		default:
-			ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
-				    action, arvif->vdev_id);
-			break;
-		}
+	switch (action) {
+	case WMI_TLV_TX_PAUSE_ACTION_STOP:
+		ath10k_mac_vif_tx_lock(arvif, pause_id);
 		break;
-	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
-	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
-	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
-	case WMI_TLV_TX_PAUSE_ID_HOST:
+	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+		ath10k_mac_vif_tx_unlock(arvif, pause_id);
+		break;
 	default:
-		/* FIXME: Some pause_ids aren't vdev specific. Instead they
-		 * target peer_id and tid. Implementing these could improve
-		 * traffic scheduling fairness across multiple connected
-		 * stations in AP/IBSS modes.
-		 */
-		ath10k_dbg(ar, ATH10K_DBG_MAC,
-			   "mac ignoring unsupported tx pause vdev %i id %d\n",
-			   arvif->vdev_id, pause_id);
+		ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+			    action, arvif->vdev_id);
 		break;
 	}
 }
@@ -3082,12 +3086,15 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct ath10k_mac_tx_pause *arg = data;
 
+	if (arvif->vdev_id != arg->vdev_id)
+		return;
+
 	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
 }
 
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-				enum wmi_tlv_tx_pause_id pause_id,
-				enum wmi_tlv_tx_pause_action action)
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+				     enum wmi_tlv_tx_pause_id pause_id,
+				     enum wmi_tlv_tx_pause_action action)
 {
 	struct ath10k_mac_tx_pause arg = {
 		.vdev_id = vdev_id,
@@ -3168,13 +3175,30 @@
 	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
 	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
 	 * to work with Ethernet txmode so use it.
+	 *
+	 * FIXME: Check if raw mode works with TDLS.
 	 */
 	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
 		return ATH10K_HW_TXRX_ETHERNET;
 
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		return ATH10K_HW_TXRX_RAW;
+
 	return ATH10K_HW_TXRX_NATIVE_WIFI;
 }
 
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+				     struct sk_buff *skb) {
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+			 IEEE80211_TX_CTL_INJECTED;
+	if ((info->flags & mask) == mask)
+		return false;
+	if (vif)
+		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+	return true;
+}
+
 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
  * Control in the header.
  */
@@ -3341,6 +3365,7 @@
 	int vdev_id;
 	int ret;
 	unsigned long time_left;
+	bool tmp_peer_created = false;
 
 	/* FW requirement: We must create a peer before FW will send out
 	 * an offchannel frame. Otherwise the frame will be stuck and
@@ -3378,6 +3403,7 @@
 			if (ret)
 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
 					    peer_addr, vdev_id, ret);
+			tmp_peer_created = (ret == 0);
 		}
 
 		spin_lock_bh(&ar->data_lock);
@@ -3393,7 +3419,7 @@
 			ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
 				    skb);
 
-		if (!peer) {
+		if (!peer && tmp_peer_created) {
 			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
 			if (ret)
 				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
@@ -3449,14 +3475,13 @@
 	case ATH10K_SCAN_IDLE:
 		break;
 	case ATH10K_SCAN_RUNNING:
-		if (ar->scan.is_roc)
-			ieee80211_remain_on_channel_expired(ar->hw);
-		/* fall through */
 	case ATH10K_SCAN_ABORTING:
 		if (!ar->scan.is_roc)
 			ieee80211_scan_completed(ar->hw,
 						 (ar->scan.state ==
 						  ATH10K_SCAN_ABORTING));
+		else if (ar->scan.roc_notify)
+			ieee80211_remain_on_channel_expired(ar->hw);
 		/* fall through */
 	case ATH10K_SCAN_STARTING:
 		ar->scan.state = ATH10K_SCAN_IDLE;
@@ -3620,6 +3645,7 @@
 	ATH10K_SKB_CB(skb)->htt.is_offchan = false;
 	ATH10K_SKB_CB(skb)->htt.freq = 0;
 	ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+	ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
 	ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
 	ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
 	ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
@@ -3635,12 +3661,11 @@
 		ath10k_tx_h_8023(skb);
 		break;
 	case ATH10K_HW_TXRX_RAW:
-		/* FIXME: Packet injection isn't implemented. It should be
-		 * doable with firmware 10.2 on qca988x.
-		 */
-		WARN_ON_ONCE(1);
-		ieee80211_free_txskb(hw, skb);
-		return;
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			WARN_ON_ONCE(1);
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
 	}
 
 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -4039,6 +4064,43 @@
 	return 1;
 }
 
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+	u32 value = 0;
+	struct ath10k *ar = arvif->ar;
+
+	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+		return 0;
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+		value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+		value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+
+	if (!value)
+		return 0;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					 ar->wmi.vdev_param->txbf, value);
+}
+
 /*
  * TODO:
  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -4080,6 +4142,12 @@
 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
 	}
 
+	if (ar->num_peers >= ar->max_num_peers) {
+		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+		ret = -ENOBUFS;
+		goto err;
+	}
+
 	if (ar->free_vdev_map == 0) {
 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
 		ret = -EBUSY;
@@ -4159,6 +4227,14 @@
 			goto err;
 		}
 	}
+	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+		arvif->nohwcrypt = true;
+
+	if (arvif->nohwcrypt &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+		goto err;
+	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
@@ -4257,16 +4333,16 @@
 		}
 	}
 
-	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+	ret = ath10k_mac_set_txbf_conf(arvif);
 	if (ret) {
-		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
 		goto err_peer_delete;
 	}
 
-	ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
 	if (ret) {
-		ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
 		goto err_peer_delete;
 	}
@@ -4287,6 +4363,11 @@
 		}
 	}
 
+	spin_lock_bh(&ar->htt.tx_lock);
+	if (!ar->tx_paused)
+		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
@@ -4641,9 +4722,6 @@
 	arg.vdev_id = arvif->vdev_id;
 	arg.scan_id = ATH10K_SCAN_ID;
 
-	if (!req->no_cck)
-		arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
-
 	if (req->ie_len) {
 		arg.ie_len = req->ie_len;
 		memcpy(arg.ie, req->ie, arg.ie_len);
@@ -4751,6 +4829,9 @@
 	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
 		return 1;
 
+	if (arvif->nohwcrypt)
+		return 1;
+
 	if (key->keyidx > WMI_MAX_KEY_INDEX)
 		return -ENOSPC;
 
@@ -4820,6 +4901,7 @@
 
 	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
 	if (ret) {
+		WARN_ON(ret > 0);
 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
 			    arvif->vdev_id, peer_addr, ret);
 		goto exit;
@@ -4835,13 +4917,16 @@
 
 		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
 		if (ret) {
+			WARN_ON(ret > 0);
 			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
 				    arvif->vdev_id, peer_addr, ret);
 			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
 						  peer_addr, flags);
-			if (ret2)
+			if (ret2) {
+				WARN_ON(ret2 > 0);
 				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
 					    arvif->vdev_id, peer_addr, ret2);
+			}
 			goto exit;
 		}
 	}
@@ -5462,6 +5547,7 @@
 		ar->scan.is_roc = true;
 		ar->scan.vdev_id = arvif->vdev_id;
 		ar->scan.roc_freq = chan->center_freq;
+		ar->scan.roc_notify = true;
 		ret = 0;
 		break;
 	case ATH10K_SCAN_STARTING:
@@ -5525,7 +5611,13 @@
 	struct ath10k *ar = hw->priv;
 
 	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->scan.roc_notify = false;
+	spin_unlock_bh(&ar->data_lock);
+
 	ath10k_scan_abort(ar);
+
 	mutex_unlock(&ar->conf_mutex);
 
 	cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5561,12 +5653,27 @@
 	return ret;
 }
 
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	/* Even though there's a WMI enum for fragmentation threshold no known
+	 * firmware actually implements it. Moreover it is not possible to rely
+	 * frame fragmentation to mac80211 because firmware clears the "more
+	 * fragments" bit in frame control making it impossible for remote
+	 * devices to reassemble frames.
+	 *
+	 * Hence implement a dummy callback just to say fragmentation isn't
+	 * supported. This effectively prevents mac80211 from doing frame
+	 * fragmentation in software.
+	 */
+	return -EOPNOTSUPP;
+}
+
 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			 u32 queues, bool drop)
 {
 	struct ath10k *ar = hw->priv;
 	bool skip;
-	int ret;
+	long time_left;
 
 	/* mac80211 doesn't care if we really xmit queued frames or not
 	 * we'll collect those frames either way if we stop/delete vdevs */
@@ -5578,7 +5685,7 @@
 	if (ar->state == ATH10K_STATE_WEDGED)
 		goto skip;
 
-	ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
 			bool empty;
 
 			spin_lock_bh(&ar->htt.tx_lock);
@@ -5592,9 +5699,9 @@
 			(empty || skip);
 		}), ATH10K_FLUSH_TIMEOUT_HZ);
 
-	if (ret <= 0 || skip)
-		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
-			    skip, ar->state, ret);
+	if (time_left == 0 || skip)
+		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+			    skip, ar->state, time_left);
 
 skip:
 	mutex_unlock(&ar->conf_mutex);
@@ -6219,6 +6326,13 @@
 
 	arvif->is_started = true;
 
+	ret = ath10k_mac_vif_setup_ps(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_stop;
+	}
+
 	if (vif->type == NL80211_IFTYPE_MONITOR) {
 		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
 		if (ret) {
@@ -6236,6 +6350,7 @@
 err_stop:
 	ath10k_vdev_stop(arvif);
 	arvif->is_started = false;
+	ath10k_mac_vif_setup_ps(arvif);
 
 err:
 	mutex_unlock(&ar->conf_mutex);
@@ -6395,6 +6510,7 @@
 	.remain_on_channel		= ath10k_remain_on_channel,
 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
 	.set_rts_threshold		= ath10k_set_rts_threshold,
+	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
 	.flush				= ath10k_flush,
 	.tx_last_beacon			= ath10k_tx_last_beacon,
 	.set_antenna			= ath10k_set_antenna,
@@ -6565,8 +6681,11 @@
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
 	{
 		.max = 2,
-		.types = BIT(NL80211_IFTYPE_STATION) |
-			 BIT(NL80211_IFTYPE_AP) |
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_AP) |
 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
 			 BIT(NL80211_IFTYPE_P2P_GO),
 	},
@@ -6576,6 +6695,26 @@
 	},
 };
 
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP) |
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
 	{
 		.max = 1,
@@ -6594,7 +6733,7 @@
 	{
 		.limits = ath10k_tlv_if_limit,
 		.num_different_channels = 1,
-		.max_interfaces = 3,
+		.max_interfaces = 4,
 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
 	},
 	{
@@ -6608,11 +6747,17 @@
 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
 	{
 		.limits = ath10k_tlv_if_limit,
-		.num_different_channels = 2,
-		.max_interfaces = 3,
+		.num_different_channels = 1,
+		.max_interfaces = 4,
 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
 	},
 	{
+		.limits = ath10k_tlv_qcs_if_limit,
+		.num_different_channels = 2,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+	},
+	{
 		.limits = ath10k_tlv_if_limit_ibss,
 		.num_different_channels = 1,
 		.max_interfaces = 2,
@@ -6620,6 +6765,33 @@
 	},
 };
 
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max	= 16,
+		.types	= BIT(NL80211_IFTYPE_AP)
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+	{
+		.limits = ath10k_10_4_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+		.max_interfaces = 16,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+					BIT(NL80211_CHAN_WIDTH_20) |
+					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
+};
+
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
 	struct ieee80211_sta_vht_cap vht_cap = {0};
@@ -6844,7 +7016,6 @@
 	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
 	ieee80211_hw_set(ar->hw, AP_LINK_PS);
 	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
-	ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
@@ -6852,6 +7023,9 @@
 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 
+	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
@@ -6902,6 +7076,8 @@
 		goto err_free;
 	}
 
+	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
 	/*
 	 * on LL hardware queues are managed entirely by the FW
 	 * so we only advertise to mac we can do the queues thing
@@ -6941,6 +7117,11 @@
 		ar->hw->wiphy->n_iface_combinations =
 			ARRAY_SIZE(ath10k_10x_if_comb);
 		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_10_4_if_comb);
+		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
 		WARN_ON(1);
@@ -6948,7 +7129,8 @@
 		goto err_free;
 	}
 
-	ar->hw->netdev_features = NETIF_F_HW_CSUM;
+	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		ar->hw->netdev_features = NETIF_F_HW_CSUM;
 
 	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		/* Init ath dfs pattern detector */
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index b291f06..e3cefe4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -61,9 +61,9 @@
 
 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-				enum wmi_tlv_tx_pause_id pause_id,
-				enum wmi_tlv_tx_pause_action action);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+				     enum wmi_tlv_tx_pause_id pause_id,
+				     enum wmi_tlv_tx_pause_action action);
 
 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
 			     u8 hw_rate);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ea656e0..1046ab6 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,11 +58,15 @@
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
 
 #define QCA988X_2_0_DEVICE_ID	(0x003c)
+#define QCA6164_2_1_DEVICE_ID	(0x0041)
 #define QCA6174_2_1_DEVICE_ID	(0x003e)
+#define QCA99X0_2_0_DEVICE_ID	(0x0040)
 
 static const struct pci_device_id ath10k_pci_id_table[] = {
 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
 	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+	{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
 	{0}
 };
 
@@ -72,16 +76,25 @@
 	 * because of that.
 	 */
 	{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+	{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
 };
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 static int ath10k_pci_cold_reset(struct ath10k *ar);
-static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
 static int ath10k_pci_init_irq(struct ath10k *ar);
 static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -90,6 +103,7 @@
 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
 			       struct ath10k_ce_pipe *rx_pipe,
 			       struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
 	/* CE0: host->target HTC control and raw streams */
@@ -155,6 +169,38 @@
 		.src_sz_max = DIAG_TRANSFER_LIMIT,
 		.dest_nentries = 2,
 	},
+
+	/* CE8: target->host pktlog */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+	},
+
+	/* CE9 target autonomous qcache memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE10: target autonomous hif memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE11: target autonomous hif memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
 };
 
 /* Target firmware's Copy Engine configuration. */
@@ -232,6 +278,38 @@
 	},
 
 	/* CE7 used only by Host */
+	{
+		.pipenum = __cpu_to_le32(7),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(0),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE8 target->host packtlog */
+	{
+		.pipenum = __cpu_to_le32(8),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(64),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE9 target autonomous qcache memcpy */
+	{
+		.pipenum = __cpu_to_le32(9),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* It not necessary to send target wlan configuration for CE10 & CE11
+	 * as these CEs are not actively used in target.
+	 */
 };
 
 /*
@@ -479,6 +557,12 @@
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret;
 
+	if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+		ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+			    offset, offset + sizeof(value), ar_pci->mem_len);
+		return;
+	}
+
 	ret = ath10k_pci_wake(ar);
 	if (ret) {
 		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@@ -496,6 +580,12 @@
 	u32 val;
 	int ret;
 
+	if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+		ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+			    offset, offset + sizeof(val), ar_pci->mem_len);
+		return 0;
+	}
+
 	ret = ath10k_pci_wake(ar);
 	if (ret) {
 		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@@ -678,6 +768,26 @@
 	ath10k_pci_rx_post(ar);
 }
 
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+	u32 val = 0;
+
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+		val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					  CORE_CTRL_ADDRESS) &
+		       0x7ff) << 21;
+		break;
+	case ATH10K_HW_QCA99X0:
+		val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+		break;
+	}
+
+	val |= 0x100000 | (addr & 0xfffff);
+	return val;
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -740,8 +850,7 @@
 		 * convert it from Target CPU virtual address space
 		 * to CE address space
 		 */
-		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
-						     address);
+		address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
 		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 					    0);
@@ -899,7 +1008,7 @@
 	 * to
 	 *    CE address space
 	 */
-	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+	address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
 	remaining_bytes = orig_nbytes;
 	ce_data = ce_data_base;
@@ -1331,20 +1440,42 @@
 {
 	u32 val;
 
-	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-	val &= ~CORE_CTRL_PCIE_REG_31_MASK;
-
-	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					CORE_CTRL_ADDRESS);
+		val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+				   CORE_CTRL_ADDRESS, val);
+		break;
+	case ATH10K_HW_QCA99X0:
+		/* TODO: Find appropriate register configuration for QCA99X0
+		 *  to mask irq/MSI.
+		 */
+		 break;
+	}
 }
 
 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
 {
 	u32 val;
 
-	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-	val |= CORE_CTRL_PCIE_REG_31_MASK;
-
-	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					CORE_CTRL_ADDRESS);
+		val |= CORE_CTRL_PCIE_REG_31_MASK;
+		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+				   CORE_CTRL_ADDRESS, val);
+		break;
+	case ATH10K_HW_QCA99X0:
+		/* TODO: Find appropriate register configuration for QCA99X0
+		 *  to unmask irq/MSI.
+		 */
+		break;
+	}
 }
 
 static void ath10k_pci_irq_disable(struct ath10k *ar)
@@ -1506,7 +1637,7 @@
 	 * masked. To prevent the device from asserting the interrupt reset it
 	 * before proceeding with cleanup.
 	 */
-	ath10k_pci_warm_reset(ar);
+	ath10k_pci_safe_chip_reset(ar);
 
 	ath10k_pci_irq_disable(ar);
 	ath10k_pci_irq_sync(ar);
@@ -1546,8 +1677,10 @@
 
 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
 	ret = dma_mapping_error(ar->dev, req_paddr);
-	if (ret)
+	if (ret) {
+		ret = -EIO;
 		goto err_dma;
+	}
 
 	if (resp && resp_len) {
 		tresp = kzalloc(*resp_len, GFP_KERNEL);
@@ -1559,8 +1692,10 @@
 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
 					    DMA_FROM_DEVICE);
 		ret = dma_mapping_error(ar->dev, resp_paddr);
-		if (ret)
+		if (ret) {
+			ret = EIO;
 			goto err_req;
+		}
 
 		xfer.wait_for_resp = true;
 		xfer.resp_len = 0;
@@ -1687,7 +1822,9 @@
 
 	switch (ar_pci->pdev->device) {
 	case QCA988X_2_0_DEVICE_ID:
+	case QCA99X0_2_0_DEVICE_ID:
 		return 1;
+	case QCA6164_2_1_DEVICE_ID:
 	case QCA6174_2_1_DEVICE_ID:
 		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
 		case QCA6174_HW_1_0_CHIP_ID_REV:
@@ -1757,7 +1894,8 @@
 
 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
 					target_ce_config_wlan,
-					sizeof(target_ce_config_wlan));
+					sizeof(struct ce_pipe_config) *
+					NUM_TARGET_CE_CONFIG_WLAN);
 
 	if (ret != 0) {
 		ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@@ -1871,7 +2009,7 @@
 		}
 
 		/* Last CE is Diagnostic Window */
-		if (i == CE_COUNT - 1) {
+		if (i == CE_DIAG_PIPE) {
 			ar_pci->ce_diag = pipe->ce_hdl;
 			continue;
 		}
@@ -2016,6 +2154,18 @@
 	return 0;
 }
 
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+	if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+		return ath10k_pci_warm_reset(ar);
+	} else if (QCA_REV_99X0(ar)) {
+		ath10k_pci_irq_disable(ar);
+		return ath10k_pci_qca99x0_chip_reset(ar);
+	} else {
+		return -ENOTSUPP;
+	}
+}
+
 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
 {
 	int i, ret;
@@ -2122,12 +2272,38 @@
 	return 0;
 }
 
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+	ret = ath10k_pci_cold_reset(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+	return 0;
+}
+
 static int ath10k_pci_chip_reset(struct ath10k *ar)
 {
 	if (QCA_REV_988X(ar))
 		return ath10k_pci_qca988x_chip_reset(ar);
 	else if (QCA_REV_6174(ar))
 		return ath10k_pci_qca6174_chip_reset(ar);
+	else if (QCA_REV_99X0(ar))
+		return ath10k_pci_qca99x0_chip_reset(ar);
 	else
 		return -ENOTSUPP;
 }
@@ -2602,7 +2778,6 @@
 
 static int ath10k_pci_cold_reset(struct ath10k *ar)
 {
-	int i;
 	u32 val;
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
@@ -2618,23 +2793,18 @@
 	val |= 1;
 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-					  RTC_STATE_COLD_RESET_MASK)
-			break;
-		msleep(1);
-	}
+	/* After writing into SOC_GLOBAL_RESET to put device into
+	 * reset and pulling out of reset pcie may not be stable
+	 * for any immediate pcie register access and cause bus error,
+	 * add delay before any pcie access request to fix this issue.
+	 */
+	msleep(20);
 
 	/* Pull Target, including PCIe, out of RESET. */
 	val &= ~1;
 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-					    RTC_STATE_COLD_RESET_MASK))
-			break;
-		msleep(1);
-	}
+	msleep(20);
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
 
@@ -2679,6 +2849,7 @@
 	pci_set_master(pdev);
 
 	/* Arrange for access to Target SoC registers. */
+	ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
 	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
 	if (!ar_pci->mem) {
 		ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@@ -2742,9 +2913,13 @@
 	case QCA988X_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA988X;
 		break;
+	case QCA6164_2_1_DEVICE_ID:
 	case QCA6174_2_1_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA6174;
 		break;
+	case QCA99X0_2_0_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA99X0;
+		break;
 	default:
 		WARN_ON(1);
 		return -ENOTSUPP;
@@ -2763,6 +2938,7 @@
 	ar_pci->pdev = pdev;
 	ar_pci->dev = &pdev->dev;
 	ar_pci->ar = ar;
+	ar->dev_id = pci_dev->device;
 
 	if (pdev->subsystem_vendor || pdev->subsystem_device)
 		scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d7696dd..8d364fb 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -162,6 +162,7 @@
 	struct device *dev;
 	struct ath10k *ar;
 	void __iomem *mem;
+	size_t mem_len;
 
 	/*
 	 * Number of MSI interrupts granted, 0 --> using legacy PCI line
@@ -236,18 +237,6 @@
 #define CDC_WAR_MAGIC_STR   0xceef0000
 #define CDC_WAR_DATA_CE     4
 
-/*
- * TODO: Should be a function call specific to each Target-type.
- * This convoluted macro converts from Target CPU Virtual Address Space to CE
- * Address Space. As part of this process, we conservatively fetch the current
- * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
- * for this device; but that's not guaranteed.
- */
-#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)			\
-	(((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS |		\
-	  CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |				\
-	 0x100000 | ((addr) & 0xfffff))
-
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
 
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 492b5a5..ca8d168 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,6 +422,12 @@
 #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
 #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
 
+#define RX_MSDU_START_INFO2_DA_IDX_MASK         0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB          0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB  16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST      BIT(11)
+
 /* The decapped header (rx_hdr_status) contains the following:
  *  a) 802.11 header
  *  [padding to 4 bytes]
@@ -449,12 +455,23 @@
 	RX_MSDU_DECAP_8023_SNAP_LLC = 3
 };
 
-struct rx_msdu_start {
+struct rx_msdu_start_common {
 	__le32 info0; /* %RX_MSDU_START_INFO0_ */
 	__le32 flow_id_crc;
 	__le32 info1; /* %RX_MSDU_START_INFO1_ */
 } __packed;
 
+struct rx_msdu_start_qca99x0 {
+	__le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+	struct rx_msdu_start_common common;
+	union {
+		struct rx_msdu_start_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
 /*
  * msdu_length
  *		MSDU length in bytes after decapsulation.  This field is
@@ -540,7 +557,7 @@
 #define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
 #define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
 
-struct rx_msdu_end {
+struct rx_msdu_end_common {
 	__le16 ip_hdr_cksum;
 	__le16 tcp_hdr_cksum;
 	u8 key_id_octet;
@@ -549,6 +566,36 @@
 	__le32 info0;
 } __packed;
 
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK     0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB      0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK   0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB    10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK  0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB   16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE      BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK    0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB     0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK    0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB     6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK  0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB   12
+
+struct rx_msdu_end_qca99x0 {
+	__le32 ipv6_crc;
+	__le32 tcp_seq_no;
+	__le32 tcp_ack_no;
+	__le32 info1;
+	__le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+	struct rx_msdu_end_common common;
+	union {
+		struct rx_msdu_end_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
 /*
  *ip_hdr_chksum
  *		This can include the IP header checksum or the pseudo header
@@ -870,7 +917,11 @@
 #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
 #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
 
-#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK       0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB        2
+#define RX_PPDU_END_INFO1_BB_DATA             BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID      BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE           BIT(15)
 
 struct rx_ppdu_end_common {
 	__le32 evm_p0;
@@ -891,13 +942,13 @@
 	__le32 evm_p15;
 	__le32 tsf_timestamp;
 	__le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
 	u8 locationing_timestamp;
 	u8 phy_err_code;
 	__le16 flags; /* %RX_PPDU_END_FLAGS_ */
 	__le32 info0; /* %RX_PPDU_END_INFO0_ */
-} __packed;
-
-struct rx_ppdu_end_qca988x {
 	__le16 bb_length;
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
@@ -909,16 +960,126 @@
 #define RX_PPDU_END_RTT_NORMAL_MODE            BIT(31)
 
 struct rx_ppdu_end_qca6174 {
+	u8 locationing_timestamp;
+	u8 phy_err_code;
+	__le16 flags; /* %RX_PPDU_END_FLAGS_ */
+	__le32 info0; /* %RX_PPDU_END_INFO0_ */
 	__le32 rtt; /* %RX_PPDU_END_RTT_ */
 	__le16 bb_length;
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+#define RX_PKT_END_INFO0_RX_SUCCESS              BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX     BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP     BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART        BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP      BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART         BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK       0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB        0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK         0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB          18
+#define RX_LOCATION_INFO_PKT_BW_MASK             0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB              20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB  23
+#define RX_LOCATION_INFO_CIR_STATUS              BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE       BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X     BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE            BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID       BIT(31)
+
+struct rx_pkt_end {
+	__le32 info0; /* %RX_PKT_END_INFO0_ */
+	__le32 phy_timestamp_1;
+	__le32 phy_timestamp_2;
+	__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+	RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_NAP          = BIT(4),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING     = BIT(5),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY     = BIT(6),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE       = BIT(7),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH     = BIT(8),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART    = BIT(9),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE    = BIT(10),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER     = BIT(12),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING      = BIT(13),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC  = BIT(14),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE        = BIT(15),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH      = BIT(16),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART     = BIT(17),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE     = BIT(18),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP  = BIT(19),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_CRC          = BIT(20),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH       = BIT(21),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_RATE         = BIT(22),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF          = BIT(23),
+	RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+	RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD     = BIT(25),
+	RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN   = BIT(26),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW       = BIT(27),
+	RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC         = BIT(29),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA        = BIT(30),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG        = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP            = BIT(0),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM           = BIT(1),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM     = BIT(2),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0    = BIT(3),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63   = BIT(5),
+	RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER  = BIT(6),
+	RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP          = BIT(7),
+	RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT    = BIT(8),
+	RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK     = BIT(9),
+	RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION       = BIT(10),
+	RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK        = BIT(11),
+	RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX    = BIT(12),
+	RX_PHY_PPDU_END_INFO1_ERR_RX_CBF             = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+	__le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+	__le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK          0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB           0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK        0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB         0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK          BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID       BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID  BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL    BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC      BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE       BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+	struct rx_pkt_end rx_pkt_end;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+	__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+	__le16 bb_length;
+	__le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
 struct rx_ppdu_end {
 	struct rx_ppdu_end_common common;
 	union {
 		struct rx_ppdu_end_qca988x qca988x;
 		struct rx_ppdu_end_qca6174 qca6174;
+		struct rx_ppdu_end_qca99x0 qca99x0;
 	} __packed;
 } __packed;
 
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 8dcd424..4671cfb 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -57,7 +57,7 @@
 }
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
-				const struct wmi_phyerr *phyerr,
+				struct wmi_phyerr_ev_arg *phyerr,
 				const struct phyerr_fft_report *fftr,
 				size_t bin_len, u64 tsf)
 {
@@ -73,6 +73,15 @@
 	if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
 		return -EINVAL;
 
+	/* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in
+	 * report mode 2. First 64 bytes carries inband tones (-32 to +31)
+	 * and last 4 byte carries band edge detection data (+32) mainly
+	 * used in radar detection purpose. Strip last 4 byte to make bin
+	 * size is valid one.
+	 */
+	if (bin_len == 68)
+		bin_len -= 4;
+
 	reg0 = __le32_to_cpu(fftr->reg0);
 	reg1 = __le32_to_cpu(fftr->reg1);
 
@@ -118,15 +127,14 @@
 	fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
 	fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
 
-	freq1 = __le16_to_cpu(phyerr->freq1);
-	freq2 = __le16_to_cpu(phyerr->freq2);
+	freq1 = phyerr->freq1;
+	freq2 = phyerr->freq2;
 	fft_sample->freq1 = __cpu_to_be16(freq1);
 	fft_sample->freq2 = __cpu_to_be16(freq2);
 
 	chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
 
-	fft_sample->noise = __cpu_to_be16(
-			__le16_to_cpu(phyerr->nf_chains[chain_idx]));
+	fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
 
 	bins = (u8 *)fftr;
 	bins += sizeof(*fftr);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 042f5b3..89b0ad7 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -47,7 +47,7 @@
 #ifdef CONFIG_ATH10K_DEBUGFS
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
-				const struct wmi_phyerr *phyerr,
+				struct wmi_phyerr_ev_arg *phyerr,
 				const struct phyerr_fft_report *fftr,
 				size_t bin_len, u64 tsf);
 int ath10k_spectral_start(struct ath10k *ar);
@@ -59,7 +59,7 @@
 
 static inline int
 ath10k_spectral_process_fft(struct ath10k *ar,
-			    const struct wmi_phyerr *phyerr,
+			    struct wmi_phyerr_ev_arg *phyerr,
 			    const struct phyerr_fft_report *fftr,
 			    size_t bin_len, u64 tsf)
 {
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 0000000..3ca3fae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+				     struct ath10k_swap_code_seg_info *seg_info,
+				     const void *data, size_t data_len)
+{
+	u8 *virt_addr = seg_info->virt_address[0];
+	u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+	const u8 *fw_data = data;
+	union ath10k_swap_code_seg_item *swap_item;
+	u32 length = 0;
+	u32 payload_len;
+	u32 total_payload_len = 0;
+	u32 size_left = data_len;
+
+	/* Parse swap bin and copy the content to host allocated memory.
+	 * The format is Address, length and value. The last 4-bytes is
+	 * target write address. Currently address field is not used.
+	 */
+	seg_info->target_addr = -1;
+	while (size_left >= sizeof(*swap_item)) {
+		swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+		payload_len = __le32_to_cpu(swap_item->tlv.length);
+		if ((payload_len > size_left) ||
+		    (payload_len == 0 &&
+		     size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+			ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+				   payload_len);
+			return -EINVAL;
+		}
+
+		if (payload_len == 0) {
+			if (memcmp(swap_item->tail.magic_signature, swap_magic,
+				   ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+				ath10k_err(ar, "refusing an invalid swap file\n");
+				return -EINVAL;
+			}
+			seg_info->target_addr =
+				__le32_to_cpu(swap_item->tail.bmi_write_addr);
+			break;
+		}
+
+		memcpy(virt_addr, swap_item->tlv.data, payload_len);
+		virt_addr += payload_len;
+		length = payload_len +  sizeof(struct ath10k_swap_code_seg_tlv);
+		size_left -= length;
+		fw_data += length;
+		total_payload_len += payload_len;
+	}
+
+	if (seg_info->target_addr == -1) {
+		ath10k_err(ar, "failed to parse invalid swap file\n");
+		return -EINVAL;
+	}
+	seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+	return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+			  struct ath10k_swap_code_seg_info *seg_info)
+{
+	u32 seg_size;
+
+	if (!seg_info)
+		return;
+
+	if (!seg_info->virt_address[0])
+		return;
+
+	seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+	dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+			  seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+	struct ath10k_swap_code_seg_info *seg_info;
+	void *virt_addr;
+	dma_addr_t paddr;
+
+	swap_bin_len = roundup(swap_bin_len, 2);
+	if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+		ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+			   swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+		return NULL;
+	}
+
+	seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+	if (!seg_info)
+		return NULL;
+
+	virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+				       GFP_KERNEL);
+	if (!virt_addr) {
+		ath10k_err(ar, "failed to allocate dma coherent memory\n");
+		return NULL;
+	}
+
+	seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+	seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+	seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+	seg_info->seg_hw_info.num_segs =
+			__cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+	seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+	seg_info->virt_address[0] = virt_addr;
+	seg_info->paddr[0] = paddr;
+
+	return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+				   enum ath10k_swap_code_seg_bin_type type)
+{
+	int ret;
+	struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+	switch (type) {
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+		if (!ar->swap.firmware_swap_code_seg_info)
+			return 0;
+
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+		seg_info = ar->swap.firmware_swap_code_seg_info;
+		break;
+	default:
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+		ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+			    type);
+		return 0;
+	}
+
+	ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+				      &seg_info->seg_hw_info,
+				      sizeof(seg_info->seg_hw_info));
+	if (ret) {
+		ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+			   ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+	ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+	ar->swap.firmware_codeswap_data = NULL;
+	ar->swap.firmware_codeswap_len = 0;
+	ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+	int ret;
+	struct ath10k_swap_code_seg_info *seg_info;
+
+	if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+		return 0;
+
+	seg_info = ath10k_swap_code_seg_alloc(ar,
+					      ar->swap.firmware_codeswap_len);
+	if (!seg_info) {
+		ath10k_err(ar, "failed to allocate fw code swap segment\n");
+		return -ENOMEM;
+	}
+
+	ret = ath10k_swap_code_seg_fill(ar, seg_info,
+					ar->swap.firmware_codeswap_data,
+					ar->swap.firmware_codeswap_len);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+			    ret);
+		ath10k_swap_code_seg_free(ar, seg_info);
+		return ret;
+	}
+
+	ar->swap.firmware_swap_code_seg_info = seg_info;
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 0000000..5c89952
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX	(512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ	12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX		16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED	1
+
+struct ath10k_swap_code_seg_tlv {
+	__le32 address;
+	__le32 length;
+	u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+	u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+	__le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+	struct ath10k_swap_code_seg_tlv tlv;
+	struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+	/* Swap binary image size */
+	__le32 swap_size;
+	__le32 num_segs;
+
+	/* Swap data size */
+	__le32 size;
+	__le32 size_log2;
+	__le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+	__le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+	struct ath10k_swap_code_seg_hw_info seg_hw_info;
+	void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+	u32 target_addr;
+	dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+				   enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index a417aae..768bef6 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -450,4 +450,7 @@
 #define QCA6174_BOARD_DATA_SZ     8192
 #define QCA6174_BOARD_EXT_DATA_SZ 0
 
+#define QCA99X0_BOARD_DATA_SZ	  12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
 #endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 826500b..e4a9c4c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -53,8 +53,6 @@
 	struct ath10k_skb_cb *skb_cb;
 	struct sk_buff *msdu;
 
-	lockdep_assert_held(&htt->tx_lock);
-
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
 		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
 		   tx_done->msdu_id, !!tx_done->discard,
@@ -66,12 +64,19 @@
 		return;
 	}
 
+	spin_lock_bh(&htt->tx_lock);
 	msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
 	if (!msdu) {
 		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
 			    tx_done->msdu_id);
+		spin_unlock_bh(&htt->tx_lock);
 		return;
 	}
+	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+	__ath10k_htt_tx_dec_pending(htt);
+	if (htt->num_pending_tx == 0)
+		wake_up(&htt->empty_tx_wq);
+	spin_unlock_bh(&htt->tx_lock);
 
 	skb_cb = ATH10K_SKB_CB(msdu);
 
@@ -90,7 +95,7 @@
 
 	if (tx_done->discard) {
 		ieee80211_free_txskb(htt->ar->hw, msdu);
-		goto exit;
+		return;
 	}
 
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -104,12 +109,6 @@
 
 	ieee80211_tx_status(htt->ar->hw, msdu);
 	/* we do not own the msdu anymore */
-
-exit:
-	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-	__ath10k_htt_tx_dec_pending(htt);
-	if (htt->num_pending_tx == 0)
-		wake_up(&htt->empty_tx_wq);
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -147,9 +146,9 @@
 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
 				       const u8 *addr, bool expect_mapped)
 {
-	int ret;
+	long time_left;
 
-	ret = wait_event_timeout(ar->peer_mapping_wq, ({
+	time_left = wait_event_timeout(ar->peer_mapping_wq, ({
 			bool mapped;
 
 			spin_lock_bh(&ar->data_lock);
@@ -160,7 +159,7 @@
 			 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
 		}), 3*HZ);
 
-	if (ret <= 0)
+	if (time_left == 0)
 		return -ETIMEDOUT;
 
 	return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 47fe2e7..248ffc3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -37,8 +37,10 @@
 			      struct wmi_peer_kick_ev_arg *arg);
 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
 			 struct wmi_swba_ev_arg *arg);
-	int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
-			   struct wmi_phyerr_ev_arg *arg);
+	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
+			       struct wmi_phyerr_hdr_arg *arg);
+	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
+			   int left_len, struct wmi_phyerr_ev_arg *arg);
 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
 			    struct wmi_svc_rdy_ev_arg *arg);
 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
@@ -49,6 +51,7 @@
 			    struct wmi_roam_ev_arg *arg);
 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
 			      struct wmi_wow_ev_arg *arg);
+	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -260,13 +263,23 @@
 }
 
 static inline int
-ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
-		       struct wmi_phyerr_ev_arg *arg)
+ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
+			   struct wmi_phyerr_hdr_arg *arg)
+{
+	if (!ar->wmi.ops->pull_phyerr_hdr)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
+		       int left_len, struct wmi_phyerr_ev_arg *arg)
 {
 	if (!ar->wmi.ops->pull_phyerr)
 		return -EOPNOTSUPP;
 
-	return ar->wmi.ops->pull_phyerr(ar, skb, arg);
+	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
 }
 
 static inline int
@@ -319,6 +332,15 @@
 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
 }
 
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+	if (!ar->wmi.ops->get_txbf_conf_scheme)
+		return WMI_TXBF_CONF_UNSUPPORTED;
+
+	return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
 static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8fdba38..b5849b3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -377,12 +377,34 @@
 		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
 		   pause_id, action, vdev_map, peer_id, tid_map);
 
-	for (vdev_id = 0; vdev_map; vdev_id++) {
-		if (!(vdev_map & BIT(vdev_id)))
-			continue;
+	switch (pause_id) {
+	case WMI_TLV_TX_PAUSE_ID_MCC:
+	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PS:
+	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+		for (vdev_id = 0; vdev_map; vdev_id++) {
+			if (!(vdev_map & BIT(vdev_id)))
+				continue;
 
-		vdev_map &= ~BIT(vdev_id);
-		ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+			vdev_map &= ~BIT(vdev_id);
+			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+							action);
+		}
+		break;
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+	case WMI_TLV_TX_PAUSE_ID_HOST:
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unsupported tx pause id %d\n",
+			   pause_id);
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unknown tx pause vdev %d\n",
+			   pause_id);
+		break;
 	}
 
 	kfree(tb);
@@ -497,7 +519,7 @@
 		break;
 	case WMI_TLV_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_TLV_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -709,6 +731,8 @@
 					 const void *ptr, void *data)
 {
 	struct wmi_tlv_swba_parse *swba = data;
+	struct wmi_tim_info_arg *tim_info_arg;
+	const struct wmi_tim_info *tim_info_ev = ptr;
 
 	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
 		return -EPROTO;
@@ -716,7 +740,21 @@
 	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
 		return -ENOBUFS;
 
-	swba->arg->tim_info[swba->n_tim++] = ptr;
+	if (__le32_to_cpu(tim_info_ev->tim_len) >
+	     sizeof(tim_info_ev->tim_bitmap)) {
+		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+		return -EPROTO;
+	}
+
+	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+	tim_info_arg->tim_len = tim_info_ev->tim_len;
+	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+	swba->n_tim++;
+
 	return 0;
 }
 
@@ -800,9 +838,9 @@
 	return 0;
 }
 
-static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
-					    struct sk_buff *skb,
-					    struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+						struct sk_buff *skb,
+						struct wmi_phyerr_hdr_arg *arg)
 {
 	const void **tb;
 	const struct wmi_tlv_phyerr_ev *ev;
@@ -824,10 +862,10 @@
 		return -EPROTO;
 	}
 
-	arg->num_phyerrs  = ev->num_phyerrs;
-	arg->tsf_l32 = ev->tsf_l32;
-	arg->tsf_u32 = ev->tsf_u32;
-	arg->buf_len = ev->buf_len;
+	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = __le32_to_cpu(ev->buf_len);
 	arg->phyerrs = phyerrs;
 
 	kfree(tb);
@@ -1241,6 +1279,11 @@
 	return skb;
 }
 
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+	return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
 				     u32 param_value)
@@ -1335,7 +1378,7 @@
 	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
 	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
 	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
-	cfg->rx_decap_mode = __cpu_to_le32(1);
+	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
 	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
 	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -3151,6 +3194,38 @@
 	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
 	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
 	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -3204,6 +3279,48 @@
 	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
 	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -3262,6 +3379,22 @@
 	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
@@ -3274,12 +3407,14 @@
 	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
-	.pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
+	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
 	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
 	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
 	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
 	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 6c046c2..ce01107 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -148,6 +148,48 @@
 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.X WMI cmd track */
@@ -271,6 +313,48 @@
 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.2.4 WMI cmd track */
@@ -393,6 +477,231 @@
 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+	.init_cmdid = WMI_10_4_INIT_CMDID,
+	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10_4_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+	.tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+	.wlan_peer_caching_add_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+	.wlan_peer_caching_evict_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+	.wlan_peer_caching_restore_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+	.wlan_peer_caching_print_all_peers_info_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+	.peer_add_proxy_sta_entry_cmdid =
+			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+	.nan_cmdid = WMI_10_4_NAN_CMDID,
+	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+	.pdev_smart_ant_set_rx_antenna_cmdid =
+			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	.peer_smart_ant_set_tx_antenna_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	.peer_smart_ant_set_train_info_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	.peer_smart_ant_set_node_config_ops_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	.pdev_set_antenna_switch_table_cmdid =
+			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+	.pdev_ratepwr_chainmsk_table_cmdid =
+			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+	.pdev_get_ani_ofdm_config_cmdid =
+			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+	.vdev_filter_neighbor_rx_packets_cmdid =
+			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+	.pdev_bss_chan_info_request_cmdid =
+			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -452,6 +761,22 @@
 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 					WMI_VDEV_PARAM_UNSUPPORTED,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -511,6 +836,22 @@
 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -569,6 +910,97 @@
 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_10_4_VDEV_PARAM_WDS,
+	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_10_4_VDEV_PARAM_SGI,
+	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_10_4_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
+	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	.early_rx_bmiss_sample_cycle =
+	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -621,6 +1053,48 @@
 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -674,6 +1148,48 @@
 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -727,6 +1243,48 @@
 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 /* firmware 10.2 specific mappings */
@@ -849,6 +1407,139 @@
 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	.pdev_stats_update_period =
+			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period =
+			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period =
+			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+	.dcs = WMI_10_4_PDEV_PARAM_DCS,
+	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+	.smart_antenna_default_antenna =
+			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	.remove_mcast2ucast_buffer =
+			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	.peer_sta_ps_statechg_enable =
+			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
 };
 
 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1232,6 +1923,8 @@
 			return "completed [preempted]";
 		case WMI_SCAN_REASON_TIMEDOUT:
 			return "completed [timedout]";
+		case WMI_SCAN_REASON_INTERNAL_FAILURE:
+			return "completed [internal err]";
 		case WMI_SCAN_REASON_MAX:
 			break;
 		}
@@ -1246,6 +1939,10 @@
 		return "preempted";
 	case WMI_SCAN_EVENT_START_FAILED:
 		return "start failed";
+	case WMI_SCAN_EVENT_RESTARTED:
+		return "restarted";
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+		return "foreign channel exit";
 	default:
 		return "unknown";
 	}
@@ -1321,6 +2018,8 @@
 		break;
 	case WMI_SCAN_EVENT_DEQUEUED:
 	case WMI_SCAN_EVENT_PREEMPTED:
+	case WMI_SCAN_EVENT_RESTARTED:
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
 	default:
 		break;
 	}
@@ -1433,6 +2132,40 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct wmi_mgmt_rx_ev_arg *arg)
+{
+	struct wmi_10_4_mgmt_rx_event *ev;
+	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+	size_t pull_len;
+	u32 msdu_len;
+
+	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+	ev_hdr = &ev->hdr;
+	pull_len = sizeof(*ev);
+
+	if (skb->len < pull_len)
+		return -EPROTO;
+
+	skb_pull(skb, pull_len);
+	arg->channel = ev_hdr->channel;
+	arg->buf_len = ev_hdr->buf_len;
+	arg->status = ev_hdr->status;
+	arg->snr = ev_hdr->snr;
+	arg->phy_mode = ev_hdr->phy_mode;
+	arg->rate = ev_hdr->rate;
+
+	msdu_len = __le32_to_cpu(arg->buf_len);
+	if (skb->len < msdu_len)
+		return -EPROTO;
+
+	/* Make sure bytes added for padding are removed. */
+	skb_trim(skb, msdu_len);
+
+	return 0;
+}
+
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_mgmt_rx_ev_arg arg = {};
@@ -1593,6 +2326,29 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct wmi_ch_info_ev_arg *arg)
+{
+	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->err_code = ev->err_code;
+	arg->freq = ev->freq;
+	arg->cmd_flags = ev->cmd_flags;
+	arg->noise_floor = ev->noise_floor;
+	arg->rx_clear_count = ev->rx_clear_count;
+	arg->cycle_count = ev->cycle_count;
+	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+	arg->rx_frame_count = ev->rx_frame_count;
+
+	return 0;
+}
+
 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_ch_info_ev_arg arg = {};
@@ -1656,8 +2412,10 @@
 		ar->ch_info_can_report_survey = true;
 	}
 
-	ar->survey_last_rx_clear_count = rx_clear_count;
-	ar->survey_last_cycle_count = cycle_count;
+	if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+		ar->survey_last_rx_clear_count = rx_clear_count;
+		ar->survey_last_cycle_count = cycle_count;
+	}
 
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -2149,33 +2907,42 @@
 static void ath10k_wmi_update_tim(struct ath10k *ar,
 				  struct ath10k_vif *arvif,
 				  struct sk_buff *bcn,
-				  const struct wmi_tim_info *tim_info)
+				  const struct wmi_tim_info_arg *tim_info)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
 	struct ieee80211_tim_ie *tim;
 	u8 *ies, *ie;
 	u8 ie_len, pvm_len;
 	__le32 t;
-	u32 v;
+	u32 v, tim_len;
+
+	/* When FW reports 0 in tim_len, ensure atleast first byte
+	 * in tim_bitmap is considered for pvm calculation.
+	 */
+	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
 
 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
 	 * we must copy the bitmap upon change and reuse it later */
 	if (__le32_to_cpu(tim_info->tim_changed)) {
 		int i;
 
-		BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
-			     sizeof(tim_info->tim_bitmap));
+		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
+			tim_len = sizeof(arvif->u.ap.tim_bitmap);
+		}
 
-		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+		for (i = 0; i < tim_len; i++) {
 			t = tim_info->tim_bitmap[i / 4];
 			v = __le32_to_cpu(t);
 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
 		}
 
-		/* FW reports either length 0 or 16
-		 * so we calculate this on our own */
+		/* FW reports either length 0 or length based on max supported
+		 * station. so we calculate this on our own
+		 */
 		arvif->u.ap.tim_len = 0;
-		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+		for (i = 0; i < tim_len; i++)
 			if (arvif->u.ap.tim_bitmap[i])
 				arvif->u.ap.tim_len = i;
 
@@ -2199,7 +2966,7 @@
 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
 
 	if (pvm_len < arvif->u.ap.tim_len) {
-		int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+		int expand_size = tim_len - pvm_len;
 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
 		void *next_ie = ie + 2 + ie_len;
 
@@ -2214,7 +2981,7 @@
 		}
 	}
 
-	if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+	if (pvm_len > tim_len) {
 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
 		return;
 	}
@@ -2278,7 +3045,21 @@
 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
 			break;
 
-		arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
 		i++;
 	}
@@ -2286,12 +3067,74 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+					   struct sk_buff *skb,
+					   struct wmi_swba_ev_arg *arg)
+{
+	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+	u32 map, tim_len;
+	size_t i;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_map = ev->vdev_map;
+
+	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+		if (!(map & BIT(0)))
+			continue;
+
+		/* If this happens there were some changes in firmware and
+		 * ath10k should update the max size of tim_info array.
+		 */
+		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+			break;
+
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+		if (tim_len) {
+			/* Exclude 4 byte guard length */
+			tim_len -= 4;
+			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+		} else {
+			arg->tim_info[i].tim_len = 0;
+		}
+
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+		/* 10.4 firmware doesn't have p2p support. notice of absence
+		 * info can be ignored for now.
+		 */
+
+		i++;
+	}
+
+	return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+	return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_swba_ev_arg arg = {};
 	u32 map;
 	int i = -1;
-	const struct wmi_tim_info *tim_info;
+	const struct wmi_tim_info_arg *tim_info;
 	const struct wmi_p2p_noa_info *noa_info;
 	struct ath10k_vif *arvif;
 	struct sk_buff *bcn;
@@ -2320,7 +3163,7 @@
 			break;
 		}
 
-		tim_info = arg.tim_info[i];
+		tim_info = &arg.tim_info[i];
 		noa_info = arg.noa_info[i];
 
 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
@@ -2335,6 +3178,10 @@
 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
 
+		/* TODO: Only first 4 word from tim_bitmap is dumped.
+		 * Extend debug code to dump full tim_bitmap.
+		 */
+
 		arvif = ath10k_get_arvif(ar, vdev_id);
 		if (arvif == NULL) {
 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
@@ -2391,6 +3238,7 @@
 				ath10k_warn(ar, "failed to map beacon: %d\n",
 					    ret);
 				dev_kfree_skb_any(bcn);
+				ret = -EIO;
 				goto skip;
 			}
 
@@ -2424,7 +3272,7 @@
 }
 
 static void ath10k_dfs_radar_report(struct ath10k *ar,
-				    const struct wmi_phyerr *phyerr,
+				    struct wmi_phyerr_ev_arg *phyerr,
 				    const struct phyerr_radar_report *rr,
 				    u64 tsf)
 {
@@ -2468,7 +3316,7 @@
 	}
 
 	/* report event to DFS pattern detector */
-	tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
+	tsf32l = phyerr->tsf_timestamp;
 	tsf64 = tsf & (~0xFFFFFFFFULL);
 	tsf64 |= tsf32l;
 
@@ -2513,7 +3361,7 @@
 }
 
 static int ath10k_dfs_fft_report(struct ath10k *ar,
-				 const struct wmi_phyerr *phyerr,
+				 struct wmi_phyerr_ev_arg *phyerr,
 				 const struct phyerr_fft_report *fftr,
 				 u64 tsf)
 {
@@ -2551,7 +3399,7 @@
 }
 
 void ath10k_wmi_event_dfs(struct ath10k *ar,
-			  const struct wmi_phyerr *phyerr,
+			  struct wmi_phyerr_ev_arg *phyerr,
 			  u64 tsf)
 {
 	int buf_len, tlv_len, res, i = 0;
@@ -2560,11 +3408,11 @@
 	const struct phyerr_fft_report *fftr;
 	const u8 *tlv_buf;
 
-	buf_len = __le32_to_cpu(phyerr->buf_len);
+	buf_len = phyerr->buf_len;
 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
 		   phyerr->phy_err_code, phyerr->rssi_combined,
-		   __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
+		   phyerr->tsf_timestamp, tsf, buf_len);
 
 	/* Skip event if DFS disabled */
 	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
@@ -2616,7 +3464,7 @@
 }
 
 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
-				    const struct wmi_phyerr *phyerr,
+				    struct wmi_phyerr_ev_arg *phyerr,
 				    u64 tsf)
 {
 	int buf_len, tlv_len, res, i = 0;
@@ -2625,7 +3473,7 @@
 	const struct phyerr_fft_report *fftr;
 	size_t fftr_len;
 
-	buf_len = __le32_to_cpu(phyerr->buf_len);
+	buf_len = phyerr->buf_len;
 
 	while (i < buf_len) {
 		if (i + sizeof(*tlv) > buf_len) {
@@ -2658,7 +3506,7 @@
 							  fftr, fftr_len,
 							  tsf);
 			if (res < 0) {
-				ath10k_warn(ar, "failed to process fft report: %d\n",
+				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
 					    res);
 				return;
 			}
@@ -2669,65 +3517,169 @@
 	}
 }
 
-static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
-					struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct wmi_phyerr_hdr_arg *arg)
 {
 	struct wmi_phyerr_event *ev = (void *)skb->data;
 
 	if (skb->len < sizeof(*ev))
 		return -EPROTO;
 
-	arg->num_phyerrs = ev->num_phyerrs;
-	arg->tsf_l32 = ev->tsf_l32;
-	arg->tsf_u32 = ev->tsf_u32;
-	arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
+	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = skb->len - sizeof(*ev);
 	arg->phyerrs = ev->phyerrs;
 
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+						 struct sk_buff *skb,
+						 struct wmi_phyerr_hdr_arg *arg)
+{
+	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	/* 10.4 firmware always reports only one phyerr */
+	arg->num_phyerrs = 1;
+
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = skb->len;
+	arg->phyerrs = skb->data;
+
+	return 0;
+}
+
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
+				 const void *phyerr_buf,
+				 int left_len,
+				 struct wmi_phyerr_ev_arg *arg)
+{
+	const struct wmi_phyerr *phyerr = phyerr_buf;
+	int i;
+
+	if (left_len < sizeof(*phyerr)) {
+		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+			    left_len, sizeof(*phyerr));
+		return -EINVAL;
+	}
+
+	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+	arg->freq1 = __le16_to_cpu(phyerr->freq1);
+	arg->freq2 = __le16_to_cpu(phyerr->freq2);
+	arg->rssi_combined = phyerr->rssi_combined;
+	arg->chan_width_mhz = phyerr->chan_width_mhz;
+	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+	arg->buf = phyerr->buf;
+	arg->hdr_len = sizeof(*phyerr);
+
+	for (i = 0; i < 4; i++)
+		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+	switch (phyerr->phy_err_code) {
+	case PHY_ERROR_GEN_SPECTRAL_SCAN:
+		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+		break;
+	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
+		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
+		break;
+	case PHY_ERROR_GEN_RADAR:
+		arg->phy_err_code = PHY_ERROR_RADAR;
+		break;
+	default:
+		arg->phy_err_code = PHY_ERROR_UNKNOWN;
+		break;
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
+					     const void *phyerr_buf,
+					     int left_len,
+					     struct wmi_phyerr_ev_arg *arg)
+{
+	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
+	u32 phy_err_mask;
+	int i;
+
+	if (left_len < sizeof(*phyerr)) {
+		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+			    left_len, sizeof(*phyerr));
+		return -EINVAL;
+	}
+
+	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+	arg->freq1 = __le16_to_cpu(phyerr->freq1);
+	arg->freq2 = __le16_to_cpu(phyerr->freq2);
+	arg->rssi_combined = phyerr->rssi_combined;
+	arg->chan_width_mhz = phyerr->chan_width_mhz;
+	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+	arg->buf = phyerr->buf;
+	arg->hdr_len = sizeof(*phyerr);
+
+	for (i = 0; i < 4; i++)
+		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
+
+	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
+		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
+		arg->phy_err_code = PHY_ERROR_RADAR;
+	else
+		arg->phy_err_code = PHY_ERROR_UNKNOWN;
+
+	return 0;
+}
+
 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
 {
-	struct wmi_phyerr_ev_arg arg = {};
-	const struct wmi_phyerr *phyerr;
+	struct wmi_phyerr_hdr_arg hdr_arg = {};
+	struct wmi_phyerr_ev_arg phyerr_arg = {};
+	const void *phyerr;
 	u32 count, i, buf_len, phy_err_code;
 	u64 tsf;
 	int left_len, ret;
 
 	ATH10K_DFS_STAT_INC(ar, phy_errors);
 
-	ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
+	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
 	if (ret) {
-		ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
+		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
 		return;
 	}
 
-	left_len = __le32_to_cpu(arg.buf_len);
-
 	/* Check number of included events */
-	count = __le32_to_cpu(arg.num_phyerrs);
+	count = hdr_arg.num_phyerrs;
 
-	tsf = __le32_to_cpu(arg.tsf_u32);
+	left_len = hdr_arg.buf_len;
+
+	tsf = hdr_arg.tsf_u32;
 	tsf <<= 32;
-	tsf |= __le32_to_cpu(arg.tsf_l32);
+	tsf |= hdr_arg.tsf_l32;
 
 	ath10k_dbg(ar, ATH10K_DBG_WMI,
 		   "wmi event phyerr count %d tsf64 0x%llX\n",
 		   count, tsf);
 
-	phyerr = arg.phyerrs;
+	phyerr = hdr_arg.phyerrs;
 	for (i = 0; i < count; i++) {
-		/* Check if we can read event header */
-		if (left_len < sizeof(*phyerr)) {
-			ath10k_warn(ar, "single event (%d) wrong head len\n",
+		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
+		if (ret) {
+			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
 				    i);
 			return;
 		}
 
-		left_len -= sizeof(*phyerr);
-
-		buf_len = __le32_to_cpu(phyerr->buf_len);
-		phy_err_code = phyerr->phy_err_code;
+		left_len -= phyerr_arg.hdr_len;
+		buf_len = phyerr_arg.buf_len;
+		phy_err_code = phyerr_arg.phy_err_code;
 
 		if (left_len < buf_len) {
 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
@@ -2738,20 +3690,20 @@
 
 		switch (phy_err_code) {
 		case PHY_ERROR_RADAR:
-			ath10k_wmi_event_dfs(ar, phyerr, tsf);
+			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
 			break;
 		case PHY_ERROR_SPECTRAL_SCAN:
-			ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
 			break;
 		case PHY_ERROR_FALSE_RADAR_EXT:
-			ath10k_wmi_event_dfs(ar, phyerr, tsf);
-			ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
 			break;
 		default:
 			break;
 		}
 
-		phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
+		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
 	}
 }
 
@@ -2949,7 +3901,7 @@
 	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
 							   pool_size,
 							   &paddr,
-							   GFP_ATOMIC);
+							   GFP_KERNEL);
 	if (!ar->wmi.mem_chunks[idx].vaddr) {
 		ath10k_warn(ar, "failed to allocate memory chunk\n");
 		return -ENOMEM;
@@ -3038,12 +3990,19 @@
 	return 0;
 }
 
-void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
 {
+	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+	struct sk_buff *skb = ar->svc_rdy_skb;
 	struct wmi_svc_rdy_ev_arg arg = {};
 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
 	int ret;
 
+	if (!skb) {
+		ath10k_warn(ar, "invalid service ready event skb\n");
+		return;
+	}
+
 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
 	if (ret) {
 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
@@ -3075,10 +4034,10 @@
 	if (ar->fw_api == 1 && ar->fw_version_build > 636)
 		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
 
-	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+	if (ar->num_rf_chains > ar->max_spatial_stream) {
 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
-			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
-		ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+			    ar->num_rf_chains, ar->max_spatial_stream);
+		ar->num_rf_chains = ar->max_spatial_stream;
 	}
 
 	ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
@@ -3101,20 +4060,39 @@
 		return;
 	}
 
+	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+				    TARGET_10_4_NUM_VDEVS;
+		ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+				       TARGET_10_4_NUM_VDEVS;
+		ar->num_tids = ar->num_active_peers * 2;
+		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+	}
+
+	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+	 * and WMI_SERVICE_IRAM_TIDS, etc.
+	 */
+
 	for (i = 0; i < num_mem_reqs; ++i) {
 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
 
-		if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+			if (ar->num_active_peers)
+				num_units = ar->num_active_peers + 1;
+			else
+				num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
 			/* number of units to allocate is number of
 			 * peers, 1 extra for self peer on target */
 			/* this needs to be tied, host and target
 			 * can get out of sync */
-			num_units = TARGET_10X_NUM_PEERS + 1;
-		else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
-			num_units = TARGET_10X_NUM_VDEVS + 1;
+			num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+			num_units = ar->max_num_vdevs + 1;
+		}
 
 		ath10k_dbg(ar, ATH10K_DBG_WMI,
 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
@@ -3144,9 +4122,17 @@
 		   __le32_to_cpu(arg.eeprom_rd),
 		   __le32_to_cpu(arg.num_mem_reqs));
 
+	dev_kfree_skb(skb);
+	ar->svc_rdy_skb = NULL;
 	complete(&ar->wmi.service_ready);
 }
 
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+	ar->svc_rdy_skb = skb;
+	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
 				     struct wmi_rdy_ev_arg *arg)
 {
@@ -3318,7 +4304,7 @@
 		break;
 	case WMI_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -3439,7 +4425,7 @@
 		break;
 	case WMI_10X_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_10X_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -3550,7 +4536,7 @@
 		break;
 	case WMI_10_2_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_10_2_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -3576,6 +4562,76 @@
 	dev_kfree_skb(skb);
 }
 
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_10_4_event_id id;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	switch (id) {
+	case WMI_10_4_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_10_4_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_10_4_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_10_4_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_10_4_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_10_4_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_10_4_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_10_4_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_10_4_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_10_4_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_10_4_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_10_4_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received event id %d not implemented\n", id);
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	int ret;
@@ -3762,8 +4818,7 @@
 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
-	config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
-
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	config.scan_max_pending_reqs =
 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
 
@@ -3831,8 +4886,7 @@
 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
-
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	config.scan_max_pending_reqs =
 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
 
@@ -3897,7 +4951,7 @@
 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 
 	config.scan_max_pending_reqs =
 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@@ -3950,6 +5004,88 @@
 	return buf;
 }
 
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd_10_4 *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config_10_4 config = {};
+	u32 len;
+
+	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+	config.num_peers = __cpu_to_le32(ar->max_num_peers);
+	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+	config.num_tids = __cpu_to_le32(ar->num_tids);
+
+	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+	config.num_offload_reorder_buffs =
+			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+	config.tx_chain_mask  = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+	config.rx_chain_mask  = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+	config.rx_decap_mode	    = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+	config.bmiss_offload_max_vdev =
+			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+	config.roam_offload_max_vdev  =
+			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+	config.roam_offload_max_ap_profiles =
+			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+	config.rx_skip_defrag_timeout_dup_detection_check =
+	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+	config.gtk_offload_max_vdev =
+			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+	config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+	config.max_peer_ext_stats =
+			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+	config.tt_support =
+			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(ar, len);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+	memcpy(&cmd->resource_config, &config, sizeof(config));
+	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+	return buf;
+}
+
 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
 {
 	if (arg->ie_len && !arg->ie)
@@ -4172,7 +5308,6 @@
 		| WMI_SCAN_EVENT_BSS_CHANNEL
 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
 		| WMI_SCAN_EVENT_DEQUEUED;
-	arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
 	arg->n_bssids = 1;
 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -5170,6 +6305,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
@@ -5241,6 +6377,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5306,6 +6443,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5367,6 +6505,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5412,9 +6551,73 @@
 	/* .gen_adaptive_qcs not implemented */
 };
 
+static const struct wmi_ops wmi_10_4_ops = {
+	.rx = ath10k_wmi_10_4_op_rx,
+	.map_svc = wmi_10_4_svc_map,
+
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
+	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_init = ath10k_wmi_10_4_op_gen_init,
+	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+
+	/* shared with 10.2 */
+	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+};
+
 int ath10k_wmi_attach(struct ath10k *ar)
 {
 	switch (ar->wmi.op_version) {
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->wmi.ops = &wmi_10_4_ops;
+		ar->wmi.cmd = &wmi_10_4_cmd_map;
+		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
 		ar->wmi.ops = &wmi_10_2_4_ops;
@@ -5452,6 +6655,8 @@
 	init_completion(&ar->wmi.service_ready);
 	init_completion(&ar->wmi.unified_ready);
 
+	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
 	return 0;
 }
 
@@ -5459,6 +6664,11 @@
 {
 	int i;
 
+	cancel_work_sync(&ar->svc_rdy_work);
+
+	if (ar->svc_rdy_skb)
+		dev_kfree_skb(ar->svc_rdy_skb);
+
 	/* free the host memory chunks requested by firmware */
 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
 		dma_free_coherent(ar->dev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index cf44a3d..52d3503 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -150,6 +150,12 @@
 	WMI_SERVICE_SAP_AUTH_OFFLOAD,
 	WMI_SERVICE_ATF,
 	WMI_SERVICE_COEX_GPIO,
+	WMI_SERVICE_ENHANCED_PROXY_STA,
+	WMI_SERVICE_TT,
+	WMI_SERVICE_PEER_CACHING,
+	WMI_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_SERVICE_BSS_CHANNEL_INFO_64,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -218,6 +224,51 @@
 	WMI_MAIN_SERVICE_TX_ENCAP,
 };
 
+enum wmi_10_4_service {
+	WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_10_4_SERVICE_SCAN_OFFLOAD,
+	WMI_10_4_SERVICE_ROAM_OFFLOAD,
+	WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_10_4_SERVICE_STA_PWRSAVE,
+	WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_10_4_SERVICE_AP_UAPSD,
+	WMI_10_4_SERVICE_AP_DFS,
+	WMI_10_4_SERVICE_11AC,
+	WMI_10_4_SERVICE_BLOCKACK,
+	WMI_10_4_SERVICE_PHYERR,
+	WMI_10_4_SERVICE_BCN_FILTER,
+	WMI_10_4_SERVICE_RTT,
+	WMI_10_4_SERVICE_RATECTRL,
+	WMI_10_4_SERVICE_WOW,
+	WMI_10_4_SERVICE_RATECTRL_CACHE,
+	WMI_10_4_SERVICE_IRAM_TIDS,
+	WMI_10_4_SERVICE_BURST,
+	WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_10_4_SERVICE_GTK_OFFLOAD,
+	WMI_10_4_SERVICE_SCAN_SCH,
+	WMI_10_4_SERVICE_CSA_OFFLOAD,
+	WMI_10_4_SERVICE_CHATTER,
+	WMI_10_4_SERVICE_COEX_FREQAVOID,
+	WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+	WMI_10_4_SERVICE_FORCE_FW_HANG,
+	WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_10_4_SERVICE_GPIO,
+	WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+	WMI_10_4_SERVICE_TX_ENCAP,
+	WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	WMI_10_4_SERVICE_EARLY_RX,
+	WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+	WMI_10_4_SERVICE_TT,
+	WMI_10_4_SERVICE_ATF,
+	WMI_10_4_SERVICE_PEER_CACHING,
+	WMI_10_4_SERVICE_COEX_GPIO,
+	WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
 static inline char *wmi_service_name(int service_id)
 {
 #define SVCSTR(x) case x: return #x
@@ -299,6 +350,12 @@
 	SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
 	SVCSTR(WMI_SERVICE_ATF);
 	SVCSTR(WMI_SERVICE_COEX_GPIO);
+	SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+	SVCSTR(WMI_SERVICE_TT);
+	SVCSTR(WMI_SERVICE_PEER_CACHING);
+	SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+	SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+	SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
 	default:
 		return NULL;
 	}
@@ -437,6 +494,95 @@
 	       WMI_SERVICE_TX_ENCAP, len);
 }
 
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+				    size_t len)
+{
+	SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+	       WMI_SERVICE_BEACON_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+	       WMI_SERVICE_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+	       WMI_SERVICE_ROAM_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+	       WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+	       WMI_SERVICE_STA_PWRSAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+	       WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+	       WMI_SERVICE_AP_UAPSD, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+	       WMI_SERVICE_AP_DFS, len);
+	SVCMAP(WMI_10_4_SERVICE_11AC,
+	       WMI_SERVICE_11AC, len);
+	SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+	       WMI_SERVICE_BLOCKACK, len);
+	SVCMAP(WMI_10_4_SERVICE_PHYERR,
+	       WMI_SERVICE_PHYERR, len);
+	SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+	       WMI_SERVICE_BCN_FILTER, len);
+	SVCMAP(WMI_10_4_SERVICE_RTT,
+	       WMI_SERVICE_RTT, len);
+	SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+	       WMI_SERVICE_RATECTRL, len);
+	SVCMAP(WMI_10_4_SERVICE_WOW,
+	       WMI_SERVICE_WOW, len);
+	SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+	       WMI_SERVICE_RATECTRL_CACHE, len);
+	SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+	       WMI_SERVICE_IRAM_TIDS, len);
+	SVCMAP(WMI_10_4_SERVICE_BURST,
+	       WMI_SERVICE_BURST, len);
+	SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+	       WMI_SERVICE_GTK_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+	       WMI_SERVICE_SCAN_SCH, len);
+	SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+	       WMI_SERVICE_CSA_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_CHATTER,
+	       WMI_SERVICE_CHATTER, len);
+	SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+	       WMI_SERVICE_COEX_FREQAVOID, len);
+	SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+	       WMI_SERVICE_PACKET_POWER_SAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+	       WMI_SERVICE_FORCE_FW_HANG, len);
+	SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_GPIO,
+	       WMI_SERVICE_GPIO, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+	       WMI_SERVICE_STA_KEEP_ALIVE, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+	       WMI_SERVICE_TX_ENCAP, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	       WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+	SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+	       WMI_SERVICE_EARLY_RX, len);
+	SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+	       WMI_SERVICE_ENHANCED_PROXY_STA, len);
+	SVCMAP(WMI_10_4_SERVICE_TT,
+	       WMI_SERVICE_TT, len);
+	SVCMAP(WMI_10_4_SERVICE_ATF,
+	       WMI_SERVICE_ATF, len);
+	SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+	       WMI_SERVICE_PEER_CACHING, len);
+	SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+	       WMI_SERVICE_COEX_GPIO, len);
+	SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+	       WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+	SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+	       WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+	SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+	       WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
 #undef SVCMAP
 
 /* 2 word representation of MAC addr */
@@ -565,6 +711,48 @@
 	u32 tdls_set_state_cmdid;
 	u32 tdls_peer_update_cmdid;
 	u32 adaptive_qcs_cmdid;
+	u32 scan_update_request_cmdid;
+	u32 vdev_standby_response_cmdid;
+	u32 vdev_resume_response_cmdid;
+	u32 wlan_peer_caching_add_peer_cmdid;
+	u32 wlan_peer_caching_evict_peer_cmdid;
+	u32 wlan_peer_caching_restore_peer_cmdid;
+	u32 wlan_peer_caching_print_all_peers_info_cmdid;
+	u32 peer_update_wds_entry_cmdid;
+	u32 peer_add_proxy_sta_entry_cmdid;
+	u32 rtt_keepalive_cmdid;
+	u32 oem_req_cmdid;
+	u32 nan_cmdid;
+	u32 vdev_ratemask_cmdid;
+	u32 qboost_cfg_cmdid;
+	u32 pdev_smart_ant_enable_cmdid;
+	u32 pdev_smart_ant_set_rx_antenna_cmdid;
+	u32 peer_smart_ant_set_tx_antenna_cmdid;
+	u32 peer_smart_ant_set_train_info_cmdid;
+	u32 peer_smart_ant_set_node_config_ops_cmdid;
+	u32 pdev_set_antenna_switch_table_cmdid;
+	u32 pdev_set_ctl_table_cmdid;
+	u32 pdev_set_mimogain_table_cmdid;
+	u32 pdev_ratepwr_table_cmdid;
+	u32 pdev_ratepwr_chainmsk_table_cmdid;
+	u32 pdev_fips_cmdid;
+	u32 tt_set_conf_cmdid;
+	u32 fwtest_cmdid;
+	u32 vdev_atf_request_cmdid;
+	u32 peer_atf_request_cmdid;
+	u32 pdev_get_ani_cck_config_cmdid;
+	u32 pdev_get_ani_ofdm_config_cmdid;
+	u32 pdev_reserve_ast_entry_cmdid;
+	u32 pdev_get_nfcal_power_cmdid;
+	u32 pdev_get_tpc_cmdid;
+	u32 pdev_get_ast_info_cmdid;
+	u32 vdev_set_dscp_tid_map_cmdid;
+	u32 pdev_get_info_cmdid;
+	u32 vdev_get_info_cmdid;
+	u32 vdev_filter_neighbor_rx_packets_cmdid;
+	u32 mu_cal_start_cmdid;
+	u32 set_cca_params_cmdid;
+	u32 pdev_bss_chan_info_request_cmdid;
 };
 
 /*
@@ -1220,6 +1408,216 @@
 	WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
 };
 
+enum wmi_10_4_cmd_id {
+	WMI_10_4_START_CMDID = 0x9000,
+	WMI_10_4_END_CMDID = 0x9FFF,
+	WMI_10_4_INIT_CMDID,
+	WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+	WMI_10_4_STOP_SCAN_CMDID,
+	WMI_10_4_SCAN_CHAN_LIST_CMDID,
+	WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+	WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+	WMI_10_4_ECHO_CMDID,
+	WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+	WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+	WMI_10_4_PDEV_SET_PARAM_CMDID,
+	WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+	WMI_10_4_VDEV_CREATE_CMDID,
+	WMI_10_4_VDEV_DELETE_CMDID,
+	WMI_10_4_VDEV_START_REQUEST_CMDID,
+	WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+	WMI_10_4_VDEV_UP_CMDID,
+	WMI_10_4_VDEV_STOP_CMDID,
+	WMI_10_4_VDEV_DOWN_CMDID,
+	WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+	WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+	WMI_10_4_VDEV_SET_PARAM_CMDID,
+	WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+	WMI_10_4_PEER_CREATE_CMDID,
+	WMI_10_4_PEER_DELETE_CMDID,
+	WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+	WMI_10_4_PEER_SET_PARAM_CMDID,
+	WMI_10_4_PEER_ASSOC_CMDID,
+	WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+	WMI_10_4_PEER_MCAST_GROUP_CMDID,
+	WMI_10_4_BCN_TX_CMDID,
+	WMI_10_4_PDEV_SEND_BCN_CMDID,
+	WMI_10_4_BCN_PRB_TMPL_CMDID,
+	WMI_10_4_BCN_FILTER_RX_CMDID,
+	WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+	WMI_10_4_MGMT_TX_CMDID,
+	WMI_10_4_PRB_TMPL_CMDID,
+	WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+	WMI_10_4_ADDBA_SEND_CMDID,
+	WMI_10_4_ADDBA_STATUS_CMDID,
+	WMI_10_4_DELBA_SEND_CMDID,
+	WMI_10_4_ADDBA_SET_RESP_CMDID,
+	WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+	WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+	WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+	WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+	WMI_10_4_DBGLOG_CFG_CMDID,
+	WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+	WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+	WMI_10_4_PDEV_QVIT_CMDID,
+	WMI_10_4_ROAM_SCAN_MODE,
+	WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_10_4_ROAM_SCAN_PERIOD,
+	WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_10_4_ROAM_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_PERIOD,
+	WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+	WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_10_4_P2P_GO_SET_BEACON_IE,
+	WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+	WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+	WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+	WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+	WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+	WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+	WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	WMI_10_4_PDEV_SUSPEND_CMDID,
+	WMI_10_4_PDEV_RESUME_CMDID,
+	WMI_10_4_ADD_BCN_FILTER_CMDID,
+	WMI_10_4_RMV_BCN_FILTER_CMDID,
+	WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+	WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_10_4_WOW_ENABLE_CMDID,
+	WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	WMI_10_4_RTT_MEASREQ_CMDID,
+	WMI_10_4_RTT_TSF_CMDID,
+	WMI_10_4_RTT_KEEPALIVE_CMDID,
+	WMI_10_4_OEM_REQ_CMDID,
+	WMI_10_4_NAN_CMDID,
+	WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_10_4_REQUEST_STATS_CMDID,
+	WMI_10_4_GPIO_CONFIG_CMDID,
+	WMI_10_4_GPIO_OUTPUT_CMDID,
+	WMI_10_4_VDEV_RATEMASK_CMDID,
+	WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+	WMI_10_4_GTK_OFFLOAD_CMDID,
+	WMI_10_4_QBOOST_CFG_CMDID,
+	WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+	WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+	WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+	WMI_10_4_FORCE_FW_HANG_CMDID,
+	WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+	WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+	WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	WMI_10_4_PDEV_FIPS_CMDID,
+	WMI_10_4_TT_SET_CONF_CMDID,
+	WMI_10_4_FWTEST_CMDID,
+	WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+	WMI_10_4_PEER_ATF_REQUEST_CMDID,
+	WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+	WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+	WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+	WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+	WMI_10_4_PDEV_GET_TPC_CMDID,
+	WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+	WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+	WMI_10_4_PDEV_GET_INFO_CMDID,
+	WMI_10_4_VDEV_GET_INFO_CMDID,
+	WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+	WMI_10_4_MU_CAL_START_CMDID,
+	WMI_10_4_SET_CCA_PARAMS_CMDID,
+	WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+	WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+	WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+	WMI_10_4_READY_EVENTID,
+	WMI_10_4_DEBUG_MESG_EVENTID,
+	WMI_10_4_START_EVENTID = 0x9000,
+	WMI_10_4_END_EVENTID = 0x9FFF,
+	WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+	WMI_10_4_ECHO_EVENTID,
+	WMI_10_4_UPDATE_STATS_EVENTID,
+	WMI_10_4_INST_RSSI_STATS_EVENTID,
+	WMI_10_4_VDEV_START_RESP_EVENTID,
+	WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+	WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+	WMI_10_4_VDEV_STOPPED_EVENTID,
+	WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+	WMI_10_4_HOST_SWBA_EVENTID,
+	WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_10_4_MGMT_RX_EVENTID,
+	WMI_10_4_CHAN_INFO_EVENTID,
+	WMI_10_4_PHYERR_EVENTID,
+	WMI_10_4_ROAM_EVENTID,
+	WMI_10_4_PROFILE_MATCH,
+	WMI_10_4_DEBUG_PRINT_EVENTID,
+	WMI_10_4_PDEV_QVIT_EVENTID,
+	WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+	WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+	WMI_10_4_RTT_KEEPALIVE_EVENTID,
+	WMI_10_4_OEM_CAPABILITY_EVENTID,
+	WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+	WMI_10_4_NAN_EVENTID,
+	WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+	WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+	WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+	WMI_10_4_DCS_INTERFERENCE_EVENTID,
+	WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+	WMI_10_4_CSA_HANDLING_EVENTID,
+	WMI_10_4_GPIO_INPUT_EVENTID,
+	WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+	WMI_10_4_GENERIC_BUFFER_EVENTID,
+	WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+	WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+	WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+	WMI_10_4_WDS_PEER_EVENTID,
+	WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+	WMI_10_4_PDEV_FIPS_EVENTID,
+	WMI_10_4_TT_STATS_EVENTID,
+	WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+	WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+	WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+	WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+	WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+	WMI_10_4_PDEV_TPC_EVENTID,
+	WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+	WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+	WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+	WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+	WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
 enum wmi_phy_mode {
 	MODE_11A        = 0,   /* 11a Mode */
 	MODE_11G        = 1,   /* 11b/g Mode */
@@ -1349,7 +1747,8 @@
 /* Indicate reason for channel switch */
 #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
 
-#define WMI_MAX_SPATIAL_STREAM   3
+#define WMI_MAX_SPATIAL_STREAM        3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM   4
 
 /* HT Capabilities*/
 #define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
@@ -1979,8 +2378,224 @@
 	__le32 feature_mask;
 } __packed;
 
-#define NUM_UNITS_IS_NUM_VDEVS   0x1
-#define NUM_UNITS_IS_NUM_PEERS   0x2
+#define NUM_UNITS_IS_NUM_VDEVS         BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS         BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS  BIT(2)
+
+struct wmi_resource_config_10_4 {
+	/* Number of virtual devices (VAPs) to support */
+	__le32 num_vdevs;
+
+	/* Number of peer nodes to support */
+	__le32 num_peers;
+
+	/* Number of active peer nodes to support */
+	__le32 num_active_peers;
+
+	/* In offload mode, target supports features like WOW, chatter and other
+	 * protocol offloads. In order to support them some functionalities like
+	 * reorder buffering, PN checking need to be done in target.
+	 * This determines maximum number of peers supported by target in
+	 * offload mode.
+	 */
+	__le32 num_offload_peers;
+
+	/* Number of reorder buffers available for doing target based reorder
+	 * Rx reorder buffering
+	 */
+	__le32 num_offload_reorder_buffs;
+
+	/* Number of keys per peer */
+	__le32 num_peer_keys;
+
+	/* Total number of TX/RX data TIDs */
+	__le32 num_tids;
+
+	/* Max skid for resolving hash collisions.
+	 * The address search table is sparse, so that if two MAC addresses
+	 * result in the same hash value, the second of these conflicting
+	 * entries can slide to the next index in the address search table,
+	 * and use it, if it is unoccupied.  This ast_skid_limit parameter
+	 * specifies the upper bound on how many subsequent indices to search
+	 * over to find an unoccupied space.
+	 */
+	__le32 ast_skid_limit;
+
+	/* The nominal chain mask for transmit.
+	 * The chain mask may be modified dynamically, e.g. to operate AP tx
+	 * with a reduced number of chains if no clients are associated.
+	 * This configuration parameter specifies the nominal chain-mask that
+	 * should be used when not operating with a reduced set of tx chains.
+	 */
+	__le32 tx_chain_mask;
+
+	/* The nominal chain mask for receive.
+	 * The chain mask may be modified dynamically, e.g. for a client to use
+	 * a reduced number of chains for receive if the traffic to the client
+	 * is low enough that it doesn't require downlink MIMO or antenna
+	 * diversity. This configuration parameter specifies the nominal
+	 * chain-mask that should be used when not operating with a reduced
+	 * set of rx chains.
+	 */
+	__le32 rx_chain_mask;
+
+	/* What rx reorder timeout (ms) to use for the AC.
+	 * Each WMM access class (voice, video, best-effort, background) will
+	 * have its own timeout value to dictate how long to wait for missing
+	 * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+	 * been received. This parameter specifies the timeout in milliseconds
+	 * for each class.
+	 */
+	__le32 rx_timeout_pri[4];
+
+	/* What mode the rx should decap packets to.
+	 * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+	 * This setting also determines the default TX behavior, however TX
+	 * behavior can be modified on a per VAP basis during VAP init
+	 */
+	__le32 rx_decap_mode;
+
+	__le32 scan_max_pending_req;
+
+	__le32 bmiss_offload_max_vdev;
+
+	__le32 roam_offload_max_vdev;
+
+	__le32 roam_offload_max_ap_profiles;
+
+	/* How many groups to use for mcast->ucast conversion.
+	 * The target's WAL maintains a table to hold information regarding
+	 * which peers belong to a given multicast group, so that if
+	 * multicast->unicast conversion is enabled, the target can convert
+	 * multicast tx frames to a series of unicast tx frames, to each peer
+	 * within the multicast group. This num_mcast_groups configuration
+	 * parameter tells the target how many multicast groups to provide
+	 * storage for within its multicast group membership table.
+	 */
+	__le32 num_mcast_groups;
+
+	/* Size to alloc for the mcast membership table.
+	 * This num_mcast_table_elems configuration parameter tells the target
+	 * how many peer elements it needs to provide storage for in its
+	 * multicast group membership table. These multicast group membership
+	 * table elements are shared by the multicast groups stored within
+	 * the table.
+	 */
+	__le32 num_mcast_table_elems;
+
+	/* Whether/how to do multicast->unicast conversion.
+	 * This configuration parameter specifies whether the target should
+	 * perform multicast --> unicast conversion on transmit, and if so,
+	 * what to do if it finds no entries in its multicast group membership
+	 * table for the multicast IP address in the tx frame.
+	 * Configuration value:
+	 * 0 -> Do not perform multicast to unicast conversion.
+	 * 1 -> Convert multicast frames to unicast, if the IP multicast address
+	 *      from the tx frame is found in the multicast group membership
+	 *      table.  If the IP multicast address is not found, drop the frame
+	 * 2 -> Convert multicast frames to unicast, if the IP multicast address
+	 *      from the tx frame is found in the multicast group membership
+	 *      table.  If the IP multicast address is not found, transmit the
+	 *      frame as multicast.
+	 */
+	__le32 mcast2ucast_mode;
+
+	/* How much memory to allocate for a tx PPDU dbg log.
+	 * This parameter controls how much memory the target will allocate to
+	 * store a log of tx PPDU meta-information (how large the PPDU was,
+	 * when it was sent, whether it was successful, etc.)
+	 */
+	__le32 tx_dbg_log_size;
+
+	/* How many AST entries to be allocated for WDS */
+	__le32 num_wds_entries;
+
+	/* MAC DMA burst size. 0 -default, 1 -256B */
+	__le32 dma_burst_size;
+
+	/* Fixed delimiters to be inserted after every MPDU to account for
+	 * interface latency to avoid underrun.
+	 */
+	__le32 mac_aggr_delim;
+
+	/* Determine whether target is responsible for detecting duplicate
+	 * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+	 * is always performed on the target.
+	 *
+	 * 0: target responsible for frag timeout and dup checking
+	 * 1: host responsible for frag timeout and dup checking
+	 */
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+
+	/* Configuration for VoW : No of Video nodes to be supported and max
+	 * no of descriptors for each video link (node).
+	 */
+	__le32 vow_config;
+
+	/* Maximum vdev that could use gtk offload */
+	__le32 gtk_offload_max_vdev;
+
+	/* Number of msdu descriptors target should use */
+	__le32 num_msdu_desc;
+
+	/* Max number of tx fragments per MSDU.
+	 * This parameter controls the max number of tx fragments per MSDU.
+	 * This will passed by target as part of the WMI_SERVICE_READY event
+	 * and is overridden by the OS shim as required.
+	 */
+	__le32 max_frag_entries;
+
+	/* Max number of extended peer stats.
+	 * This parameter controls the max number of peers for which extended
+	 * statistics are supported by target
+	 */
+	__le32 max_peer_ext_stats;
+
+	/* Smart antenna capabilities information.
+	 * 1 - Smart antenna is enabled
+	 * 0 - Smart antenna is disabled
+	 * In future this can contain smart antenna specific capabilities.
+	 */
+	__le32 smart_ant_cap;
+
+	/* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+	 * during init.
+	 */
+	__le32 bk_minfree;
+	__le32 be_minfree;
+	__le32 vi_minfree;
+	__le32 vo_minfree;
+
+	/* Rx batch mode capability.
+	 * 1 - Rx batch mode enabled
+	 * 0 - Rx batch mode disabled
+	 */
+	__le32 rx_batchmode;
+
+	/* Thermal throttling capability.
+	 * 1 - Capable of thermal throttling
+	 * 0 - Not capable of thermal throttling
+	 */
+	__le32 tt_support;
+
+	/* ATF configuration.
+	 * 1  - Enable ATF
+	 * 0  - Disable ATF
+	 */
+	__le32 atf_config;
+
+	/* Configure padding to manage IP header un-alignment
+	 * 1  - Enable padding
+	 * 0  - Disable padding
+	 */
+	__le32 iphdr_pad_config;
+
+	/* qwrap configuration
+	 * 1  - This is qwrap configuration
+	 * 0  - This is not qwrap
+	 */
+	__le32 qwrap_config;
+} __packed;
 
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
@@ -2014,6 +2629,11 @@
 	struct wmi_host_mem_chunks mem_chunks;
 } __packed;
 
+struct wmi_init_cmd_10_4 {
+	struct wmi_resource_config_10_4 resource_config;
+	struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
 struct wmi_chan_list_entry {
 	__le16 freq;
 	u8 phy_mode; /* valid for 10.2 only */
@@ -2260,15 +2880,17 @@
 };
 
 enum wmi_scan_event_type {
-	WMI_SCAN_EVENT_STARTED         = 0x1,
-	WMI_SCAN_EVENT_COMPLETED       = 0x2,
-	WMI_SCAN_EVENT_BSS_CHANNEL     = 0x4,
-	WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
-	WMI_SCAN_EVENT_DEQUEUED        = 0x10,
-	WMI_SCAN_EVENT_PREEMPTED       = 0x20, /* possibly by high-prio scan */
-	WMI_SCAN_EVENT_START_FAILED    = 0x40,
-	WMI_SCAN_EVENT_RESTARTED       = 0x80,
-	WMI_SCAN_EVENT_MAX             = 0x8000
+	WMI_SCAN_EVENT_STARTED              = BIT(0),
+	WMI_SCAN_EVENT_COMPLETED            = BIT(1),
+	WMI_SCAN_EVENT_BSS_CHANNEL          = BIT(2),
+	WMI_SCAN_EVENT_FOREIGN_CHANNEL      = BIT(3),
+	WMI_SCAN_EVENT_DEQUEUED             = BIT(4),
+	/* possibly by high-prio scan */
+	WMI_SCAN_EVENT_PREEMPTED            = BIT(5),
+	WMI_SCAN_EVENT_START_FAILED         = BIT(6),
+	WMI_SCAN_EVENT_RESTARTED            = BIT(7),
+	WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+	WMI_SCAN_EVENT_MAX                  = BIT(15),
 };
 
 enum wmi_scan_completion_reason {
@@ -2276,6 +2898,7 @@
 	WMI_SCAN_REASON_CANCELLED,
 	WMI_SCAN_REASON_PREEMPTED,
 	WMI_SCAN_REASON_TIMEDOUT,
+	WMI_SCAN_REASON_INTERNAL_FAILURE,
 	WMI_SCAN_REASON_MAX,
 };
 
@@ -2329,15 +2952,40 @@
 	u8 buf[0];
 } __packed;
 
+struct wmi_10_4_mgmt_rx_hdr {
+	__le32 channel;
+	__le32 snr;
+	    u8 rssi_ctl[4];
+	__le32 rate;
+	__le32 phy_mode;
+	__le32 buf_len;
+	__le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+	struct wmi_10_4_mgmt_rx_hdr hdr;
+	u8 buf[0];
+} __packed;
+
 #define WMI_RX_STATUS_OK			0x00
 #define WMI_RX_STATUS_ERR_CRC			0x01
 #define WMI_RX_STATUS_ERR_DECRYPT		0x08
 #define WMI_RX_STATUS_ERR_MIC			0x10
 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20
 
-#define PHY_ERROR_SPECTRAL_SCAN		0x26
-#define PHY_ERROR_FALSE_RADAR_EXT		0x24
-#define PHY_ERROR_RADAR				0x05
+#define PHY_ERROR_GEN_SPECTRAL_SCAN		0x26
+#define PHY_ERROR_GEN_FALSE_RADAR_EXT		0x24
+#define PHY_ERROR_GEN_RADAR			0x05
+
+#define PHY_ERROR_10_4_RADAR_MASK               0x4
+#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK       0x4000000
+
+enum phy_err_type {
+	PHY_ERROR_UNKNOWN,
+	PHY_ERROR_SPECTRAL_SCAN,
+	PHY_ERROR_FALSE_RADAR_EXT,
+	PHY_ERROR_RADAR
+};
 
 struct wmi_phyerr {
 	__le32 tsf_timestamp;
@@ -2360,6 +3008,23 @@
 	struct wmi_phyerr phyerrs[0];
 } __packed;
 
+struct wmi_10_4_phyerr_event {
+	__le32 tsf_l32;
+	__le32 tsf_u32;
+	__le16 freq1;
+	__le16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u8 rsvd0;
+	__le32 rssi_chains[4];
+	__le16 nf_chains[4];
+	__le32 phy_err_mask[2];
+	__le32 tsf_timestamp;
+	__le32 buf_len;
+	u8 buf[0];
+} __packed;
+
 #define PHYERR_TLV_SIG				0xBB
 #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT	0xFB
 #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY	0xF8
@@ -2613,6 +3278,48 @@
 	u32 burst_dur;
 	u32 burst_enable;
 	u32 cal_period;
+	u32 aggr_burst;
+	u32 rx_decap_mode;
+	u32 smart_antenna_default_antenna;
+	u32 igmpmld_override;
+	u32 igmpmld_tid;
+	u32 antenna_gain;
+	u32 rx_filter;
+	u32 set_mcast_to_ucast_tid;
+	u32 proxy_sta_mode;
+	u32 set_mcast2ucast_mode;
+	u32 set_mcast2ucast_buffer;
+	u32 remove_mcast2ucast_buffer;
+	u32 peer_sta_ps_statechg_enable;
+	u32 igmpmld_ac_override;
+	u32 block_interbss;
+	u32 set_disable_reset_cmdid;
+	u32 set_msdu_ttl_cmdid;
+	u32 set_ppdu_duration_cmdid;
+	u32 txbf_sound_period_cmdid;
+	u32 set_promisc_mode_cmdid;
+	u32 set_burst_mode_cmdid;
+	u32 en_stats;
+	u32 mu_group_policy;
+	u32 noise_detection;
+	u32 noise_threshold;
+	u32 dpd_enable;
+	u32 set_mcast_bcast_echo;
+	u32 atf_strict_sch;
+	u32 atf_sched_duration;
+	u32 ant_plzn;
+	u32 mgmt_retry_limit;
+	u32 sensitivity_level;
+	u32 signed_txpower_2g;
+	u32 signed_txpower_5g;
+	u32 enable_per_tid_amsdu;
+	u32 enable_per_tid_ampdu;
+	u32 cca_threshold;
+	u32 rts_fixed_rate;
+	u32 pdev_reset;
+	u32 wapi_mbssid_offset;
+	u32 arp_srcaddr;
+	u32 arp_dstaddr;
 };
 
 #define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2828,6 +3535,100 @@
 	WMI_10X_PDEV_PARAM_CAL_PERIOD
 };
 
+enum wmi_10_4_pdev_param {
+	WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+	WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+	WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+	WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+	WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+	WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+	WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+	WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+	WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+	WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+	WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+	WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_PMF_QOS,
+	WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_DCS,
+	WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+	WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+	WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+	WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+	WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+	WMI_10_4_PDEV_PARAM_PROXY_STA,
+	WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+	WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+	WMI_10_4_PDEV_PARAM_AGGR_BURST,
+	WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+	WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+	WMI_10_4_PDEV_PARAM_BURST_DUR,
+	WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+	WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+	WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+	WMI_10_4_PDEV_PARAM_RX_FILTER,
+	WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+	WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+	WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+	WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+	WMI_10_4_PDEV_PARAM_EN_STATS,
+	WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+	WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+	WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+	WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+	WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+	WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+	WMI_10_4_PDEV_PARAM_ANT_PLZN,
+	WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+	WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+	WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+	WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+	WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+	WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+	WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+	WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+	WMI_10_4_PDEV_PARAM_PDEV_RESET,
+	WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+	WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+	WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
 struct wmi_pdev_set_param_cmd {
 	__le32 param_id;
 	__le32 param_value;
@@ -3506,6 +4307,22 @@
 	u32 drop_unencry;
 	u32 tx_encap_type;
 	u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+	u32 rc_num_retries;
+	u32 cabq_maxdur;
+	u32 mfptest_set;
+	u32 rts_fixed_rate;
+	u32 vht_sgimask;
+	u32 vht80_ratemask;
+	u32 early_rx_adjust_enable;
+	u32 early_rx_tgt_bmiss_num;
+	u32 early_rx_bmiss_sample_cycle;
+	u32 early_rx_slop_step;
+	u32 early_rx_init_slop;
+	u32 early_rx_adjust_pause;
+	u32 proxy_sta;
+	u32 meru_vc;
+	u32 rx_decap_type;
+	u32 bw_nss_ratemask;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -3764,11 +4581,85 @@
 	WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
 };
 
+enum wmi_10_4_vdev_param {
+	WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+	WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+	WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+	WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+	WMI_10_4_VDEV_PARAM_SLOT_TIME,
+	WMI_10_4_VDEV_PARAM_PREAMBLE,
+	WMI_10_4_VDEV_PARAM_SWBA_TIME,
+	WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+	WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+	WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+	WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	WMI_10_4_VDEV_PARAM_WDS,
+	WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+	WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+	WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+	WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+	WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+	WMI_10_4_VDEV_PARAM_CHWIDTH,
+	WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+	WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+	WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+	WMI_10_4_VDEV_PARAM_MGMT_RATE,
+	WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+	WMI_10_4_VDEV_PARAM_FIXED_RATE,
+	WMI_10_4_VDEV_PARAM_SGI,
+	WMI_10_4_VDEV_PARAM_LDPC,
+	WMI_10_4_VDEV_PARAM_TX_STBC,
+	WMI_10_4_VDEV_PARAM_RX_STBC,
+	WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+	WMI_10_4_VDEV_PARAM_DEF_KEYID,
+	WMI_10_4_VDEV_PARAM_NSS,
+	WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+	WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+	WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+	WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+	WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+	WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+	WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+	WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+	WMI_10_4_VDEV_PARAM_TXBF,
+	WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+	WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+	WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+	WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+	WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+	WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+	WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	WMI_10_4_VDEV_PARAM_PROXY_STA,
+	WMI_10_4_VDEV_PARAM_MERU_VC,
+	WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+	WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
+#define WMI_TXBF_STS_CAP_OFFSET_LSB	4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK	0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB	8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK	0xf00
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG		0x1
 /* slot time short */
@@ -4305,6 +5196,14 @@
 	__le32 tim_num_ps_pending;
 } __packed;
 
+struct wmi_tim_info_arg {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	const __le32 *tim_bitmap;
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
 /* Maximum number of NOA Descriptors supported */
 #define WMI_P2P_MAX_NOA_DESCRIPTORS 4
 #define WMI_P2P_OPPPS_ENABLE_BIT	BIT(0)
@@ -4336,6 +5235,47 @@
 	struct wmi_bcn_info bcn_info[0];
 } __packed;
 
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	__le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+	/* Bit 0 - Flag to indicate an update in NOA schedule
+	 * Bits 7-1 - Reserved
+	 */
+	u8 changed;
+	/* NOA index */
+	u8 index;
+	/* Bit 0 - Opp PS state of the AP
+	 * Bits 1-7 - Ctwindow in TUs
+	 */
+	u8 ctwindow_oppps;
+	/* Number of NOA descriptors */
+	u8 num_descriptors;
+
+	struct wmi_p2p_noa_descriptor
+		noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+	struct wmi_10_4_tim_info tim_info;
+	struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+	__le32 vdev_map;
+	struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
 #define WMI_MAX_AP_VDEV 16
 
 struct wmi_tbtt_offset_event {
@@ -4660,11 +5600,24 @@
 	__le32 cycle_count;
 } __packed;
 
+struct wmi_10_4_chan_info_event {
+	__le32 err_code;
+	__le32 freq;
+	__le32 cmd_flags;
+	__le32 noise_floor;
+	__le32 rx_clear_count;
+	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
+} __packed;
+
 struct wmi_peer_sta_kickout_event {
 	struct wmi_mac_addr peer_macaddr;
 } __packed;
 
 #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1)
 
 /* Beacon filter wmi command info */
 #define BCN_FLT_MAX_SUPPORTED_IES	256
@@ -4840,6 +5793,9 @@
 	__le32 noise_floor;
 	__le32 rx_clear_count;
 	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
 };
 
 struct wmi_vdev_start_ev_arg {
@@ -4855,16 +5811,29 @@
 
 struct wmi_swba_ev_arg {
 	__le32 vdev_map;
-	const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+	struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
 	const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
 };
 
 struct wmi_phyerr_ev_arg {
-	__le32 num_phyerrs;
-	__le32 tsf_l32;
-	__le32 tsf_u32;
-	__le32 buf_len;
-	const struct wmi_phyerr *phyerrs;
+	u32 tsf_timestamp;
+	u16 freq1;
+	u16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u16 nf_chains[4];
+	u32 buf_len;
+	const u8 *buf;
+	u8 hdr_len;
+};
+
+struct wmi_phyerr_hdr_arg {
+	u32 num_phyerrs;
+	u32 tsf_l32;
+	u32 tsf_u32;
+	u32 buf_len;
+	const void *phyerrs;
 };
 
 struct wmi_svc_rdy_ev_arg {
@@ -5085,6 +6054,12 @@
 	u32 pref_offchan_bw;
 };
 
+enum wmi_txbf_conf {
+	WMI_TXBF_CONF_UNSUPPORTED,
+	WMI_TXBF_CONF_BEFORE_ASSOC,
+	WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
@@ -5136,9 +6111,9 @@
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_dfs(struct ath10k *ar,
-			  const struct wmi_phyerr *phyerr, u64 tsf);
+			  struct wmi_phyerr_ev_arg *phyerr, u64 tsf);
 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
-				    const struct wmi_phyerr *phyerr,
+				    struct wmi_phyerr_ev_arg *phyerr,
 				    u64 tsf);
 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
@@ -5167,5 +6142,6 @@
 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
-
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
+				 int left_len, struct wmi_phyerr_ev_arg *arg);
 #endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index a68d8fd..8e02b38 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -301,8 +301,26 @@
 		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
 
 exit:
+	if (ret) {
+		switch (ar->state) {
+		case ATH10K_STATE_ON:
+			ar->state = ATH10K_STATE_RESTARTING;
+			ret = 1;
+			break;
+		case ATH10K_STATE_OFF:
+		case ATH10K_STATE_RESTARTING:
+		case ATH10K_STATE_RESTARTED:
+		case ATH10K_STATE_UTF:
+		case ATH10K_STATE_WEDGED:
+			ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+				    ar->state);
+			ret = -EIO;
+			break;
+		}
+	}
+
 	mutex_unlock(&ar->conf_mutex);
-	return ret ? 1 : 0;
+	return ret;
 }
 
 int ath10k_wow_init(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 2399a39..b1278f9 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -5,7 +5,6 @@
 	select MAC80211_LEDS
 	select LEDS_CLASS
 	select NEW_LEDS
-	select AVERAGE
 	select ATH5K_AHB if ATH25
 	select ATH5K_PCI if !ATH25
 	---help---
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 5c00875..38be270 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -223,7 +223,7 @@
 ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
 			 bool ofdm_trigger)
 {
-	int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+	int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
 		ofdm_trigger ? "ODFM" : "CCK");
@@ -309,7 +309,7 @@
 static void
 ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
 {
-	int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+	int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
 
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index e22b0e7..fa6e89e 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1252,6 +1252,8 @@
 #define ATH5K_TXQ_LEN_MAX	(ATH_TXBUF / 4)		/* bufs per queue */
 #define ATH5K_TXQ_LEN_LOW	(ATH5K_TXQ_LEN_MAX / 2)	/* low mark */
 
+DECLARE_EWMA(beacon_rssi, 1024, 8)
+
 /* Driver state associated with an instance of a device */
 struct ath5k_hw {
 	struct ath_common       common;
@@ -1432,7 +1434,7 @@
 	struct ath5k_nfcal_hist ah_nfcal_hist;
 
 	/* average beacon RSSI in our BSS (used by ANI) */
-	struct ewma		ah_beacon_rssi_avg;
+	struct ewma_beacon_rssi	ah_beacon_rssi_avg;
 
 	/* noise floor from last periodic calibration */
 	s32			ah_noise_floor;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 23552f4..342563a 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1430,7 +1430,7 @@
 	trace_ath5k_rx(ah, skb);
 
 	if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
-		ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+		ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
 
 		/* check beacons in IBSS mode */
 		if (ah->opmode == NL80211_IFTYPE_ADHOC)
@@ -2936,7 +2936,7 @@
 	ah->ah_cal_next_short = jiffies +
 		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
 
-	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+	ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
 
 	/* clear survey data and cycle counters */
 	memset(&ah->survey, 0, sizeof(ah->survey));
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index c70782e..654a1e3 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -722,7 +722,7 @@
 			st->mib_intr);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"beacon RSSI average:\t%d\n",
-			(int)ewma_read(&ah->ah_beacon_rssi_avg));
+			(int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
 
 #define CC_PRINT(_struct, _field) \
 	_struct._field, \
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 14cab14..112d8a9 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -427,7 +427,7 @@
 };
 
 /*
- * credit distibution code that is passed into the distrbution function,
+ * credit distribution code that is passed into the distribution function,
  * there are mandatory and optional codes that must be handled
  */
 enum htc_credit_dist_reason {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fc595b9..c5f8bc4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -455,7 +455,7 @@
 #define AR_PHY_MODE              (AR_SM_BASE + 0x8)
 #define AR_PHY_ACTIVE            (AR_SM_BASE + 0xc)
 #define AR_PHY_SPUR_MASK_A       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + 0x24)
+#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
 #define AR_PHY_SPECTRAL_SCAN     (AR_SM_BASE + 0x28)
 #define AR_PHY_RADAR_BW_FILTER   (AR_SM_BASE + 0x2c)
 #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
@@ -495,7 +495,7 @@
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A                       0x3FF
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S                     0
 
-#define AR_PHY_TEST              (AR_SM_BASE + 0x160)
+#define AR_PHY_TEST              (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
 
 #define AR_PHY_TEST_BBB_OBS_SEL       0x780000
 #define AR_PHY_TEST_BBB_OBS_SEL_S     19
@@ -521,24 +521,29 @@
 #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S	  29
 
 
-#define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
+#define AR_PHY_TSTDAC            (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
 
-#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
 
 #define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ	0x00000008
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S	3
 
-#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + 0x174)
-#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + 0x178)
-#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + 0x17c)
-#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + 0x180)
-#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + 0x190)
-#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + 0x194)
+#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
 
 #define AR_PHY_HEAVYCLIP_CTL     (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
 #define AR_PHY_HEAVYCLIP_20      (AR_SM_BASE + 0x1a8)
 #define AR_PHY_HEAVYCLIP_40      (AR_SM_BASE + 0x1ac)
+#define AR_PHY_HEAVYCLIP_1	 (AR_SM_BASE + 0x19c)
+#define AR_PHY_HEAVYCLIP_2	 (AR_SM_BASE + 0x1a0)
+#define AR_PHY_HEAVYCLIP_3	 (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_4	 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_5	 (AR_SM_BASE + 0x1ac)
 #define AR_PHY_ILLEGAL_TXRATE    (AR_SM_BASE + 0x1b0)
 
 #define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a7a81b3..c85c479 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -172,14 +172,6 @@
 	struct sk_buff_head complete_q;
 };
 
-struct ath_atx_ac {
-	struct ath_txq *txq;
-	struct list_head list;
-	struct list_head tid_q;
-	bool clear_ps_filter;
-	bool sched;
-};
-
 struct ath_frame_info {
 	struct ath_buf *bf;
 	u16 framelen;
@@ -242,7 +234,7 @@
 	struct sk_buff_head buf_q;
 	struct sk_buff_head retry_q;
 	struct ath_node *an;
-	struct ath_atx_ac *ac;
+	struct ath_txq *txq;
 	unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
 	u16 seq_start;
 	u16 seq_next;
@@ -252,8 +244,8 @@
 	int baw_tail;   /* next unused tx buffer slot */
 
 	s8 bar_index;
-	bool sched;
 	bool active;
+	bool clear_ps_filter;
 };
 
 struct ath_node {
@@ -261,7 +253,6 @@
 	struct ieee80211_sta *sta; /* station struct we're part of */
 	struct ieee80211_vif *vif; /* interface with which we're associated */
 	struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
-	struct ath_atx_ac ac[IEEE80211_NUM_ACS];
 
 	u16 maxampdu;
 	u8 mpdudensity;
@@ -410,6 +401,12 @@
 	ATH_OFFCHANNEL_ROC_DONE,
 };
 
+enum ath_roc_complete_reason {
+	ATH_ROC_COMPLETE_EXPIRE,
+	ATH_ROC_COMPLETE_ABORT,
+	ATH_ROC_COMPLETE_CANCEL,
+};
+
 struct ath_offchannel {
 	struct ath_chanctx chan;
 	struct timer_list timer;
@@ -471,7 +468,8 @@
 void ath_chanctx_set_next(struct ath_softc *sc, bool force);
 void ath_offchannel_next(struct ath_softc *sc);
 void ath_scan_complete(struct ath_softc *sc, bool abort);
-void ath_roc_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc,
+		      enum ath_roc_complete_reason reason);
 struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
 
 #else
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 2066650..90f5773 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -915,18 +915,27 @@
 	}
 }
 
-void ath_roc_complete(struct ath_softc *sc, bool abort)
+void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-	if (abort)
-		ath_dbg(common, CHAN_CTX, "RoC aborted\n");
-	else
-		ath_dbg(common, CHAN_CTX, "RoC expired\n");
-
 	sc->offchannel.roc_vif = NULL;
 	sc->offchannel.roc_chan = NULL;
-	ieee80211_remain_on_channel_expired(sc->hw);
+
+	switch (reason) {
+	case ATH_ROC_COMPLETE_ABORT:
+		ath_dbg(common, CHAN_CTX, "RoC aborted\n");
+		ieee80211_remain_on_channel_expired(sc->hw);
+		break;
+	case ATH_ROC_COMPLETE_EXPIRE:
+		ath_dbg(common, CHAN_CTX, "RoC expired\n");
+		ieee80211_remain_on_channel_expired(sc->hw);
+		break;
+	case ATH_ROC_COMPLETE_CANCEL:
+		ath_dbg(common, CHAN_CTX, "RoC canceled\n");
+		break;
+	}
+
 	ath_offchannel_next(sc);
 	ath9k_ps_restore(sc);
 }
@@ -1058,7 +1067,7 @@
 	case ATH_OFFCHANNEL_ROC_START:
 	case ATH_OFFCHANNEL_ROC_WAIT:
 		sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
-		ath_roc_complete(sc, false);
+		ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
 		break;
 	default:
 		break;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index dbf8f49..da32c8f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -765,6 +765,8 @@
 		[RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
 		[RESET_TYPE_MCI] = "MCI Reset",
 		[RESET_TYPE_CALIBRATION] = "Calibration error",
+		[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
+		[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
 	};
 	int i;
 
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index a8e9319..cd68c5f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -50,6 +50,8 @@
 	RESET_TYPE_BEACON_STUCK,
 	RESET_TYPE_MCI,
 	RESET_TYPE_CALIBRATION,
+	RESET_TX_DMA_ERROR,
+	RESET_RX_DMA_ERROR,
 	__RESET_TYPE_MAX
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index ffca918..c2ca57a 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -26,12 +26,11 @@
 	struct ath_node *an = file->private_data;
 	struct ath_softc *sc = an->sc;
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	struct ath_txq *txq;
 	u32 len = 0, size = 4096;
 	char *buf;
 	size_t retval;
-	int tidno, acno;
+	int tidno;
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
@@ -49,26 +48,13 @@
 			 an->mpdudensity);
 
 	len += scnprintf(buf + len, size - len,
-			 "%2s%7s\n", "AC", "SCHED");
-
-	for (acno = 0, ac = &an->ac[acno];
-	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
-		txq = ac->txq;
-		ath_txq_lock(sc, txq);
-		len += scnprintf(buf + len, size - len,
-				 "%2d%7d\n",
-				 acno, ac->sched);
-		ath_txq_unlock(sc, txq);
-	}
-
-	len += scnprintf(buf + len, size - len,
 			 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
 			 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
 			 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
 
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-		txq = tid->ac->txq;
+		txq = tid->txq;
 		ath_txq_lock(sc, txq);
 		if (tid->active) {
 			len += scnprintf(buf + len, size - len,
@@ -80,7 +66,7 @@
 					 tid->baw_head,
 					 tid->baw_tail,
 					 tid->bar_index,
-					 tid->sched);
+					 !list_empty(&tid->list));
 		}
 		ath_txq_unlock(sc, txq);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index e98a9ea..1ece42c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -30,6 +30,157 @@
 	u8 pulse_length_pri;
 };
 
+/**** begin: CHIRP ************************************************************/
+
+/* min and max gradients for defined FCC chirping pulses, given by
+ * - 20MHz chirp width over a pulse width of  50us
+ * -  5MHz chirp width over a pulse width of 100us
+ */
+static const int BIN_DELTA_MIN		= 1;
+static const int BIN_DELTA_MAX		= 10;
+
+/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
+#define NUM_DIFFS 3
+static const int FFT_NUM_SAMPLES	= (NUM_DIFFS + 1);
+
+/* Threshold for difference of delta peaks */
+static const int MAX_DIFF		= 2;
+
+/* width range to be checked for chirping */
+static const int MIN_CHIRP_PULSE_WIDTH	= 20;
+static const int MAX_CHIRP_PULSE_WIDTH	= 110;
+
+struct ath9k_dfs_fft_20 {
+	u8 bin[28];
+	u8 lower_bins[3];
+} __packed;
+struct ath9k_dfs_fft_40 {
+	u8 bin[64];
+	u8 lower_bins[3];
+	u8 upper_bins[3];
+} __packed;
+
+static inline int fft_max_index(u8 *bins)
+{
+	return (bins[2] & 0xfc) >> 2;
+}
+static inline int fft_max_magnitude(u8 *bins)
+{
+	return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10;
+}
+static inline u8 fft_bitmap_weight(u8 *bins)
+{
+	return bins[0] & 0x3f;
+}
+
+static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
+				    bool is_ctl, bool is_ext)
+{
+	const int DFS_UPPER_BIN_OFFSET = 64;
+	/* if detected radar on both channels, select the significant one */
+	if (is_ctl && is_ext) {
+		/* first check wether channels have 'strong' bins */
+		is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
+		is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
+
+		/* if still unclear, take higher magnitude */
+		if (is_ctl && is_ext) {
+			int mag_lower = fft_max_magnitude(fft->lower_bins);
+			int mag_upper = fft_max_magnitude(fft->upper_bins);
+			if (mag_upper > mag_lower)
+				is_ctl = false;
+			else
+				is_ext = false;
+		}
+	}
+	if (is_ctl)
+		return fft_max_index(fft->lower_bins);
+	return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET;
+}
+static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
+				 int datalen, bool is_ctl, bool is_ext)
+{
+	int i;
+	int max_bin[FFT_NUM_SAMPLES];
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	int prev_delta;
+
+	if (IS_CHAN_HT40(ah->curchan)) {
+		struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data;
+		int num_fft_packets = datalen / sizeof(*fft);
+		if (num_fft_packets == 0)
+			return false;
+
+		ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
+			datalen, num_fft_packets);
+		if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+			ath_dbg(common, DFS, "not enough packets for chirp\n");
+			return false;
+		}
+		/* HW sometimes adds 2 garbage bytes in front of FFT samples */
+		if ((datalen % sizeof(*fft)) == 2) {
+			fft = (struct ath9k_dfs_fft_40 *) (data + 2);
+			ath_dbg(common, DFS, "fixing datalen by 2\n");
+		}
+		if (IS_CHAN_HT40MINUS(ah->curchan)) {
+			int temp = is_ctl;
+			is_ctl = is_ext;
+			is_ext = temp;
+		}
+		for (i = 0; i < FFT_NUM_SAMPLES; i++)
+			max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
+							      is_ext);
+	} else {
+		struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data;
+		int num_fft_packets = datalen / sizeof(*fft);
+		if (num_fft_packets == 0)
+			return false;
+		ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
+			datalen, num_fft_packets);
+		if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+			ath_dbg(common, DFS, "not enough packets for chirp\n");
+			return false;
+		}
+		/* in ht20, this is a 6-bit signed number => shift it to 0 */
+		for (i = 0; i < FFT_NUM_SAMPLES; i++)
+			max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20;
+	}
+	ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n",
+		max_bin[0], max_bin[1], max_bin[2], max_bin[3]);
+
+	/* Check for chirp attributes within specs
+	 * a) delta of adjacent max_bins is within range
+	 * b) delta of adjacent deltas are within tolerance
+	 */
+	prev_delta = 0;
+	for (i = 0; i < NUM_DIFFS; i++) {
+		int ddelta = -1;
+		int delta = max_bin[i + 1] - max_bin[i];
+
+		/* ensure gradient is within valid range */
+		if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) {
+			ath_dbg(common, DFS, "CHIRP: invalid delta %d "
+				"in sample %d\n", delta, i);
+			return false;
+		}
+		if (i == 0)
+			goto done;
+		ddelta = delta - prev_delta;
+		if (abs(ddelta) > MAX_DIFF) {
+			ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n",
+				ddelta);
+			return false;
+		}
+done:
+		ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n",
+			i, delta, ddelta);
+		prev_delta = delta;
+	}
+	return true;
+}
+/**** end: CHIRP **************************************************************/
+
 /* convert pulse duration to usecs, considering clock mode */
 static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
 {
@@ -113,12 +264,6 @@
 		return false;
 	}
 
-	/*
-	 * TODO: check chirping pulses
-	 *	 checks for chirping are dependent on the DFS regulatory domain
-	 *	 used, which is yet TBD
-	 */
-
 	/* convert duration to usecs */
 	pe->width = dur_to_usecs(sc->sc_ah, dur);
 	pe->rssi = rssi;
@@ -190,6 +335,16 @@
 	if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
 		return;
 
+	if (pe.width > MIN_CHIRP_PULSE_WIDTH &&
+	    pe.width < MAX_CHIRP_PULSE_WIDTH) {
+		bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND);
+		bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND);
+		int clen = datalen - 3;
+		pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext);
+	} else {
+		pe.chirp = false;
+	}
+
 	ath_dbg(common, DFS,
 		"ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
 		"width=%d, rssi=%d, delta_ts=%llu\n",
@@ -198,7 +353,8 @@
 	sc->dfs_prev_pulse_ts = pe.ts;
 	if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
 		ath9k_dfs_process_radar_pulse(sc, &pe);
-	if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+	if (IS_CHAN_HT40(ah->curchan) &&
+	    ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
 		pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
 		ath9k_dfs_process_radar_pulse(sc, &pe);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 39eaf9b..1e84882 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -74,7 +74,7 @@
 
 static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
 {
-	int time_left;
+	unsigned long time_left;
 
 	if (atomic_read(&priv->htc->tgt_ready) > 0) {
 		atomic_dec(&priv->htc->tgt_ready);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index dab1323..172a9ff4a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -794,8 +794,11 @@
 		common->ani.longcal_timer = timestamp;
 	}
 
-	/* Short calibration applies only while caldone is false */
-	if (!common->ani.caldone) {
+	/*
+	 * Short calibration applies only while caldone
+	 * is false or -ETIMEDOUT
+	 */
+	if (common->ani.caldone <= 0) {
 		if ((timestamp - common->ani.shortcal_timer) >=
 		    short_cal_interval) {
 			shortcal = true;
@@ -844,7 +847,11 @@
 	*/
 	cal_interval = ATH_LONG_CALINTERVAL;
 	cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
-	if (!common->ani.caldone)
+	/*
+	 * Short calibration applies only while caldone
+	 * is false or -ETIMEDOUT
+	 */
+	if (common->ani.caldone <= 0)
 		cal_interval = min(cal_interval, (u32)short_cal_interval);
 
 	ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d2408da..2294709 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -146,7 +146,8 @@
 {
 	struct sk_buff *skb;
 	struct htc_config_pipe_msg *cp_msg;
-	int ret, time_left;
+	int ret;
+	unsigned long time_left;
 
 	skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
 	if (!skb) {
@@ -184,7 +185,8 @@
 {
 	struct sk_buff *skb;
 	struct htc_comp_msg *comp_msg;
-	int ret = 0, time_left;
+	int ret = 0;
+	unsigned long time_left;
 
 	skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
 	if (!skb) {
@@ -236,7 +238,8 @@
 	struct sk_buff *skb;
 	struct htc_endpoint *endpoint;
 	struct htc_conn_svc_msg *conn_msg;
-	int ret, time_left;
+	int ret;
+	unsigned long time_left;
 
 	/* Find an available endpoint */
 	endpoint = get_next_avail_ep(target->endpoint);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a31a680..1dd0339 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -3186,6 +3186,7 @@
 	{ AR_SREV_VERSION_9550,         "9550" },
 	{ AR_SREV_VERSION_9565,         "9565" },
 	{ AR_SREV_VERSION_9531,         "9531" },
+	{ AR_SREV_VERSION_9561,         "9561" },
 };
 
 /* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index eff0e53..57f95f2 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -736,13 +736,14 @@
 				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
 				 BIT(NL80211_IFTYPE_P2P_GO) },
 	{ .max = 1,	.types = BIT(NL80211_IFTYPE_ADHOC) },
+	{ .max = 1,	.types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination if_comb_multi[] = {
 	{
 		.limits = if_limits_multi,
 		.n_limits = ARRAY_SIZE(if_limits_multi),
-		.max_interfaces = 2,
+		.max_interfaces = 3,
 		.num_different_channels = 2,
 		.beacon_int_infra_match = true,
 	},
@@ -826,6 +827,7 @@
 	ieee80211_hw_set(hw, SIGNAL_DBM);
 	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
 	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 
 	if (ath9k_ps_enable)
 		ieee80211_hw_set(hw, SUPPORTS_PS);
@@ -855,6 +857,10 @@
 			BIT(NL80211_IFTYPE_MESH_POINT) |
 			BIT(NL80211_IFTYPE_WDS);
 
+		if (ath9k_is_chanctx_enabled())
+			hw->wiphy->interface_modes |=
+					BIT(NL80211_IFTYPE_P2P_DEVICE);
+
 			hw->wiphy->iface_combinations = if_comb;
 			hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 90631d7..5ad0fee 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -172,7 +172,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_tx_control txctl;
-	int time_left;
+	unsigned long time_left;
 
 	memset(&txctl, 0, sizeof(txctl));
 	txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index cfd45cb..c27143b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1459,13 +1459,18 @@
 				   u64 multicast)
 {
 	struct ath_softc *sc = hw->priv;
+	struct ath_chanctx *ctx;
 	u32 rfilt;
 
 	changed_flags &= SUPPORTED_FILTERS;
 	*total_flags &= SUPPORTED_FILTERS;
 
 	spin_lock_bh(&sc->chan_lock);
-	sc->cur_chan->rxfilter = *total_flags;
+	ath_for_each_chanctx(sc, ctx)
+		ctx->rxfilter = *total_flags;
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+	sc->offchannel.chan.rxfilter = *total_flags;
+#endif
 	spin_unlock_bh(&sc->chan_lock);
 
 	ath9k_ps_wakeup(sc);
@@ -2246,7 +2251,7 @@
 
 		del_timer_sync(&sc->offchannel.timer);
 		if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-			ath_roc_complete(sc, true);
+			ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
 	}
 
 	if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
@@ -2355,7 +2360,7 @@
 
 	if (sc->offchannel.roc_vif) {
 		if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-			ath_roc_complete(sc, true);
+			ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
 	}
 
 	mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c75fb1..d3189da 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -491,10 +491,9 @@
 
 	if (!(ah->ah_flags & AH_UNPLUGGED) &&
 	    unlikely(!stopped)) {
-		ath_err(ath9k_hw_common(sc->sc_ah),
-			"Could not stop RX, we could be "
-			"confusing the DMA engine when we start RX up\n");
-		ATH_DBG_WARN_ON_ONCE(!stopped);
+		ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+			"Failed to stop Rx DMA\n");
+		RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
 	}
 	return stopped && !reset;
 }
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index ca533b4..9c16e2a 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -299,7 +299,8 @@
 		       sizeof(struct wmi_cmd_hdr);
 	struct sk_buff *skb;
 	u8 *data;
-	int time_left, ret = 0;
+	unsigned long time_left;
+	int ret = 0;
 
 	if (ah->ah_flags & AH_UNPLUGGED)
 		return 0;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3ad79bb..3e3dac3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -106,7 +106,6 @@
 static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
 			     struct ath_atx_tid *tid)
 {
-	struct ath_atx_ac *ac = tid->ac;
 	struct list_head *list;
 	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
 	struct ath_chanctx *ctx = avp->chanctx;
@@ -114,19 +113,9 @@
 	if (!ctx)
 		return;
 
-	if (tid->sched)
-		return;
-
-	tid->sched = true;
-	list_add_tail(&tid->list, &ac->tid_q);
-
-	if (ac->sched)
-		return;
-
-	ac->sched = true;
-
 	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
-	list_add_tail(&ac->list, list);
+	if (list_empty(&tid->list))
+		list_add_tail(&tid->list, list);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -208,7 +197,7 @@
 static void
 ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-	struct ath_txq *txq = tid->ac->txq;
+	struct ath_txq *txq = tid->txq;
 	struct ieee80211_tx_info *tx_info;
 	struct sk_buff *skb, *tskb;
 	struct ath_buf *bf;
@@ -237,7 +226,7 @@
 
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-	struct ath_txq *txq = tid->ac->txq;
+	struct ath_txq *txq = tid->txq;
 	struct sk_buff *skb;
 	struct ath_buf *bf;
 	struct list_head bf_head;
@@ -644,7 +633,7 @@
 			ath_tx_queue_tid(sc, txq, tid);
 
 			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
-				tid->ac->clear_ps_filter = true;
+				tid->clear_ps_filter = true;
 		}
 	}
 
@@ -734,7 +723,7 @@
 	struct ieee80211_tx_rate *rates;
 	u32 max_4ms_framelen, frmlen;
 	u16 aggr_limit, bt_aggr_limit, legacy = 0;
-	int q = tid->ac->txq->mac80211_qnum;
+	int q = tid->txq->mac80211_qnum;
 	int i;
 
 	skb = bf->bf_mpdu;
@@ -1471,8 +1460,8 @@
 	if (list_empty(&bf_q))
 		return false;
 
-	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
-		tid->ac->clear_ps_filter = false;
+	if (tid->clear_ps_filter || tid->an->no_ps_filter) {
+		tid->clear_ps_filter = false;
 		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 	}
 
@@ -1491,7 +1480,7 @@
 
 	an = (struct ath_node *)sta->drv_priv;
 	txtid = ATH_AN_2_TID(an, tid);
-	txq = txtid->ac->txq;
+	txq = txtid->txq;
 
 	ath_txq_lock(sc, txq);
 
@@ -1525,7 +1514,7 @@
 {
 	struct ath_node *an = (struct ath_node *)sta->drv_priv;
 	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
-	struct ath_txq *txq = txtid->ac->txq;
+	struct ath_txq *txq = txtid->txq;
 
 	ath_txq_lock(sc, txq);
 	txtid->active = false;
@@ -1538,7 +1527,6 @@
 		       struct ath_node *an)
 {
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	struct ath_txq *txq;
 	bool buffered;
 	int tidno;
@@ -1546,25 +1534,18 @@
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-		ac = tid->ac;
-		txq = ac->txq;
+		txq = tid->txq;
 
 		ath_txq_lock(sc, txq);
 
-		if (!tid->sched) {
+		if (list_empty(&tid->list)) {
 			ath_txq_unlock(sc, txq);
 			continue;
 		}
 
 		buffered = ath_tid_has_buffered(tid);
 
-		tid->sched = false;
-		list_del(&tid->list);
-
-		if (ac->sched) {
-			ac->sched = false;
-			list_del(&ac->list);
-		}
+		list_del_init(&tid->list);
 
 		ath_txq_unlock(sc, txq);
 
@@ -1575,18 +1556,16 @@
 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
 {
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	struct ath_txq *txq;
 	int tidno;
 
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-		ac = tid->ac;
-		txq = ac->txq;
+		txq = tid->txq;
 
 		ath_txq_lock(sc, txq);
-		ac->clear_ps_filter = true;
+		tid->clear_ps_filter = true;
 
 		if (ath_tid_has_buffered(tid)) {
 			ath_tx_queue_tid(sc, txq, tid);
@@ -1606,7 +1585,7 @@
 
 	an = (struct ath_node *)sta->drv_priv;
 	tid = ATH_AN_2_TID(an, tidno);
-	txq = tid->ac->txq;
+	txq = tid->txq;
 
 	ath_txq_lock(sc, txq);
 
@@ -1645,7 +1624,7 @@
 
 		tid = ATH_AN_2_TID(an, i);
 
-		ath_txq_lock(sc, tid->ac->txq);
+		ath_txq_lock(sc, tid->txq);
 		while (nframes > 0) {
 			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
 			if (!bf)
@@ -1669,7 +1648,7 @@
 			if (an->sta && !ath_tid_has_buffered(tid))
 				ieee80211_sta_set_buffered(an->sta, i, false);
 		}
-		ath_txq_unlock_complete(sc, tid->ac->txq);
+		ath_txq_unlock_complete(sc, tid->txq);
 	}
 
 	if (list_empty(&bf_q))
@@ -1883,8 +1862,11 @@
 			npend |= BIT(i);
 	}
 
-	if (npend)
-		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+	if (npend) {
+		RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
+		ath_dbg(common, RESET,
+			"Failed to stop TX DMA, queues=0x%03x!\n", npend);
+	}
 
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 		if (!ATH_TXQ_SETUP(sc, i))
@@ -1915,9 +1897,8 @@
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	struct ath_atx_ac *ac, *last_ac;
 	struct ath_atx_tid *tid, *last_tid;
-	struct list_head *ac_list;
+	struct list_head *tid_list;
 	bool sent = false;
 
 	if (txq->mac80211_qnum < 0)
@@ -1927,63 +1908,45 @@
 		return;
 
 	spin_lock_bh(&sc->chan_lock);
-	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+	tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
 
-	if (list_empty(ac_list)) {
+	if (list_empty(tid_list)) {
 		spin_unlock_bh(&sc->chan_lock);
 		return;
 	}
 
 	rcu_read_lock();
 
-	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
-	while (!list_empty(ac_list)) {
+	last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
+	while (!list_empty(tid_list)) {
 		bool stop = false;
 
 		if (sc->cur_chan->stopped)
 			break;
 
-		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
-		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
-		list_del(&ac->list);
-		ac->sched = false;
+		tid = list_first_entry(tid_list, struct ath_atx_tid, list);
+		list_del_init(&tid->list);
 
-		while (!list_empty(&ac->tid_q)) {
+		if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+			sent = true;
 
-			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
-					       list);
-			list_del(&tid->list);
-			tid->sched = false;
-
-			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
-				sent = true;
-
-			/*
-			 * add tid to round-robin queue if more frames
-			 * are pending for the tid
-			 */
-			if (ath_tid_has_buffered(tid))
-				ath_tx_queue_tid(sc, txq, tid);
-
-			if (stop || tid == last_tid)
-				break;
-		}
-
-		if (!list_empty(&ac->tid_q) && !ac->sched) {
-			ac->sched = true;
-			list_add_tail(&ac->list, ac_list);
-		}
+		/*
+		 * add tid to round-robin queue if more frames
+		 * are pending for the tid
+		 */
+		if (ath_tid_has_buffered(tid))
+			ath_tx_queue_tid(sc, txq, tid);
 
 		if (stop)
 			break;
 
-		if (ac == last_ac) {
+		if (tid == last_tid) {
 			if (!sent)
 				break;
 
 			sent = false;
-			last_ac = list_entry(ac_list->prev,
-					     struct ath_atx_ac, list);
+			last_tid = list_entry(tid_list->prev,
+					      struct ath_atx_tid, list);
 		}
 	}
 
@@ -2373,10 +2336,10 @@
 		txq = sc->tx.uapsdq;
 		ath_txq_lock(sc, txq);
 	} else if (txctl->an && queue) {
-		WARN_ON(tid->ac->txq != txctl->txq);
+		WARN_ON(tid->txq != txctl->txq);
 
 		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
-			tid->ac->clear_ps_filter = true;
+			tid->clear_ps_filter = true;
 
 		/*
 		 * Add this frame to software queue for scheduling later
@@ -2470,8 +2433,8 @@
 	bf = list_first_entry(&bf_q, struct ath_buf, list);
 	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
 
-	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
-		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+	if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
 		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
 			sizeof(*hdr), DMA_TO_DEVICE);
 	}
@@ -2870,7 +2833,6 @@
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
 {
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	int tidno, acno;
 
 	for (tidno = 0, tid = &an->tid[tidno];
@@ -2881,26 +2843,18 @@
 		tid->seq_start = tid->seq_next = 0;
 		tid->baw_size  = WME_MAX_BA;
 		tid->baw_head  = tid->baw_tail = 0;
-		tid->sched     = false;
 		tid->active	   = false;
+		tid->clear_ps_filter = true;
 		__skb_queue_head_init(&tid->buf_q);
 		__skb_queue_head_init(&tid->retry_q);
+		INIT_LIST_HEAD(&tid->list);
 		acno = TID_TO_WME_AC(tidno);
-		tid->ac = &an->ac[acno];
-	}
-
-	for (acno = 0, ac = &an->ac[acno];
-	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
-		ac->sched    = false;
-		ac->clear_ps_filter = true;
-		ac->txq = sc->tx.txq_map[acno];
-		INIT_LIST_HEAD(&ac->tid_q);
+		tid->txq = sc->tx.txq_map[acno];
 	}
 }
 
 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
-	struct ath_atx_ac *ac;
 	struct ath_atx_tid *tid;
 	struct ath_txq *txq;
 	int tidno;
@@ -2908,20 +2862,12 @@
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-		ac = tid->ac;
-		txq = ac->txq;
+		txq = tid->txq;
 
 		ath_txq_lock(sc, txq);
 
-		if (tid->sched) {
-			list_del(&tid->list);
-			tid->sched = false;
-		}
-
-		if (ac->sched) {
-			list_del(&ac->list);
-			tid->ac->sched = false;
-		}
+		if (!list_empty(&tid->list))
+			list_del_init(&tid->list);
 
 		ath_tid_drain(sc, txq, tid);
 		tid->active = false;
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index 508eccf..d59d83e 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -40,6 +40,8 @@
 		return "P2P-CLIENT";
 	case NL80211_IFTYPE_P2P_GO:
 		return "P2P-GO";
+	case NL80211_IFTYPE_OCB:
+		return "OCB";
 	default:
 		return "UNKNOWN";
 	}
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 1b5ad19..cc5c592 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -273,7 +273,7 @@
 				tmp_false_count++;
 			}
 		}
-		if (ps.count < min_count)
+		if (ps.count <= min_count)
 			/* did not reach minimum count, drop sequence */
 			continue;
 
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 050506f..64b4326 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -12,6 +12,7 @@
 wil6210-y += rx_reorder.o
 wil6210-y += ioctl.o
 wil6210-y += fw.o
+wil6210-y += pm.o
 wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
new file mode 100644
index 0000000..c131b5e
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+	__le32	boot_loader_ready;		/* 0x880A3C driver will poll
+						 * this Dword until BL will
+						 * set it to 1 (initial value
+						 * should be 0)
+						 */
+	__le32	boot_loader_struct_version;	/* 0x880A40 BL struct ver. */
+	__le16	rf_type;			/* 0x880A44 connected RF ID */
+	__le16	rf_status;			/* 0x880A46 RF status,
+						 * 0 is OK else error
+						 */
+	__le32	baseband_type;			/* 0x880A48 board type ID */
+	u8	mac_address[6];			/* 0x880A4c BL mac address */
+	u8	bl_version_major;		/* 0x880A52 BL ver. major */
+	u8	bl_version_minor;		/* 0x880A53 BL ver. minor */
+	__le16	bl_version_subminor;		/* 0x880A54 BL ver. subminor */
+	__le16	bl_version_build;		/* 0x880A56 BL ver. build */
+	/* valid only for version 2 and above */
+	__le32  bl_assert_code;         /* 0x880A58 BL Assert code */
+	__le32  bl_assert_blink;        /* 0x880A5C BL Assert Branch */
+	__le32  bl_reserved[22];        /* 0x880A60 - 0x880AB4 */
+	__le32  bl_magic_number;        /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+	__le32	boot_loader_ready;		/* 0x880A3C driver will poll
+						 * this Dword until BL will
+						 * set it to 1 (initial value
+						 * should be 0)
+						 */
+#define BL_READY (1)	/* ready indication */
+	__le32	boot_loader_struct_version;	/* 0x880A40 BL struct ver. */
+	__le32	rf_type;			/* 0x880A44 connected RF ID */
+	__le32	baseband_type;			/* 0x880A48 board type ID */
+	u8	mac_address[6];			/* 0x880A4c BL mac address */
+} __packed;
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c79cfe0..20d07ef 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -336,12 +336,9 @@
 	else
 		wil_dbg_misc(wil, "Scan has no IE's\n");
 
-	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
-			request->ie);
-	if (rc) {
-		wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+	if (rc)
 		goto out;
-	}
 
 	rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
 			cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
@@ -462,10 +459,8 @@
 	 * ies in FW.
 	 */
 	rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
-	if (rc) {
-		wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+	if (rc)
 		goto out;
-	}
 
 	/* WMI_CONNECT_CMD */
 	memset(&conn, 0, sizeof(conn));
@@ -722,113 +717,58 @@
 {
 	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
 	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-	int rc = 0;
 
 	if (bcon->probe_resp_len <= hlen)
 		return 0;
 
+/* always use IE's from full probe frame, they has more info
+ * notable RSN
+ */
+	bcon->proberesp_ies = f->u.probe_resp.variable;
+	bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
 	if (!bcon->assocresp_ies) {
-		bcon->assocresp_ies = f->u.probe_resp.variable;
-		bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
-		rc = 1;
+		bcon->assocresp_ies = bcon->proberesp_ies;
+		bcon->assocresp_ies_len = bcon->proberesp_ies_len;
 	}
 
+	return 1;
+}
+
+/* internal functions for device reset and starting AP */
+static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
+				 struct cfg80211_beacon_data *bcon)
+{
+	int rc;
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+			bcon->proberesp_ies);
+	if (rc)
+		return rc;
+
+	rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+			bcon->assocresp_ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+	if (rc)
+		return rc;
+
+	rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
+#endif
+
 	return rc;
 }
 
-static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
-				      struct net_device *ndev,
-				      struct cfg80211_beacon_data *bcon)
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+				  struct net_device *ndev,
+				  const u8 *ssid, size_t ssid_len, u32 privacy,
+				  int bi, u8 chan,
+				  struct cfg80211_beacon_data *bcon,
+				  u8 hidden_ssid)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-	const u8 *pr_ies = NULL;
-	size_t pr_ies_len = 0;
 	int rc;
-
-	wil_dbg_misc(wil, "%s()\n", __func__);
-	wil_print_bcon_data(bcon);
-
-	if (bcon->probe_resp_len > hlen) {
-		pr_ies = f->u.probe_resp.variable;
-		pr_ies_len = bcon->probe_resp_len - hlen;
-	}
-
-	if (wil_fix_bcon(wil, bcon)) {
-		wil_dbg_misc(wil, "Fixed bcon\n");
-		wil_print_bcon_data(bcon);
-	}
-
-	/* FW do not form regular beacon, so bcon IE's are not set
-	 * For the DMG bcon, when it will be supported, bcon IE's will
-	 * be reused; add something like:
-	 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-	 * bcon->beacon_ies);
-	 */
-	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-	if (rc) {
-		wil_err(wil, "set_ie(PROBE_RESP) failed\n");
-		return rc;
-	}
-
-	rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
-			bcon->assocresp_ies_len,
-			bcon->assocresp_ies);
-	if (rc) {
-		wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
-		return rc;
-	}
-
-	return 0;
-}
-
-static int wil_cfg80211_start_ap(struct wiphy *wiphy,
-				 struct net_device *ndev,
-				 struct cfg80211_ap_settings *info)
-{
-	int rc = 0;
-	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	struct wireless_dev *wdev = ndev->ieee80211_ptr;
-	struct ieee80211_channel *channel = info->chandef.chan;
-	struct cfg80211_beacon_data *bcon = &info->beacon;
-	struct cfg80211_crypto_settings *crypto = &info->crypto;
 	u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
-	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-	const u8 *pr_ies = NULL;
-	size_t pr_ies_len = 0;
-	u8 hidden_ssid;
-
-	wil_dbg_misc(wil, "%s()\n", __func__);
-
-	if (!channel) {
-		wil_err(wil, "AP: No channel???\n");
-		return -EINVAL;
-	}
-
-	wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
-		     channel->center_freq, info->privacy ? "secure" : "open");
-	wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
-		     info->privacy, info->auth_type);
-	wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
-		     info->hidden_ssid);
-	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
-		     info->dtim_period);
-	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
-			     info->ssid, info->ssid_len);
-	wil_print_bcon_data(bcon);
-	wil_print_crypto(wil, crypto);
-
-	if (bcon->probe_resp_len > hlen) {
-		pr_ies = f->u.probe_resp.variable;
-		pr_ies_len = bcon->probe_resp_len - hlen;
-	}
-
-	if (wil_fix_bcon(wil, bcon)) {
-		wil_dbg_misc(wil, "Fixed bcon\n");
-		wil_print_bcon_data(bcon);
-	}
 
 	wil_set_recovery_state(wil, fw_recovery_idle);
 
@@ -839,24 +779,96 @@
 	if (rc)
 		goto out;
 
-	rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+	rc = wmi_set_ssid(wil, ssid_len, ssid);
 	if (rc)
 		goto out;
 
-	/* IE's */
-	/* bcon 'head IE's are not relevant for 60g band */
-	/*
-	 * FW do not form regular beacon, so bcon IE's are not set
-	 * For the DMG bcon, when it will be supported, bcon IE's will
-	 * be reused; add something like:
-	 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-	 * bcon->beacon_ies);
-	 */
-	wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-	wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
-		   bcon->assocresp_ies);
+	rc = _wil_cfg80211_set_ies(wiphy, bcon);
+	if (rc)
+		goto out;
 
-	wil->privacy = info->privacy;
+	wil->privacy = privacy;
+	wil->channel = chan;
+	wil->hidden_ssid = hidden_ssid;
+
+	netif_carrier_on(ndev);
+
+	rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+	if (rc)
+		goto err_pcp_start;
+
+	rc = wil_bcast_init(wil);
+	if (rc)
+		goto err_bcast;
+
+	goto out; /* success */
+
+err_bcast:
+	wmi_pcp_stop(wil);
+err_pcp_start:
+	netif_carrier_off(ndev);
+out:
+	mutex_unlock(&wil->mutex);
+	return rc;
+}
+
+static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
+				      struct net_device *ndev,
+				      struct cfg80211_beacon_data *bcon)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+	u32 privacy = 0;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+	wil_print_bcon_data(bcon);
+
+	if (wil_fix_bcon(wil, bcon)) {
+		wil_dbg_misc(wil, "Fixed bcon\n");
+		wil_print_bcon_data(bcon);
+	}
+
+	if (bcon->proberesp_ies &&
+	    cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
+			     bcon->proberesp_ies_len))
+		privacy = 1;
+
+	/* in case privacy has changed, need to restart the AP */
+	if (wil->privacy != privacy) {
+		struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+		wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+			     wil->privacy, privacy);
+
+		rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
+					    wdev->ssid_len, privacy,
+					    wdev->beacon_interval,
+					    wil->channel, bcon,
+					    wil->hidden_ssid);
+	} else {
+		rc = _wil_cfg80211_set_ies(wiphy, bcon);
+	}
+
+	return rc;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+				 struct net_device *ndev,
+				 struct cfg80211_ap_settings *info)
+{
+	int rc;
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	struct ieee80211_channel *channel = info->chandef.chan;
+	struct cfg80211_beacon_data *bcon = &info->beacon;
+	struct cfg80211_crypto_settings *crypto = &info->crypto;
+	u8 hidden_ssid;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+
+	if (!channel) {
+		wil_err(wil, "AP: No channel???\n");
+		return -EINVAL;
+	}
 
 	switch (info->hidden_ssid) {
 	case NL80211_HIDDEN_SSID_NOT_IN_USE:
@@ -872,28 +884,32 @@
 		break;
 
 	default:
-		rc = -EOPNOTSUPP;
-		goto out;
+		wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+		return -EOPNOTSUPP;
+	}
+	wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+		     channel->center_freq, info->privacy ? "secure" : "open");
+	wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
+		     info->privacy, info->auth_type);
+	wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
+		     info->hidden_ssid);
+	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
+		     info->dtim_period);
+	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+			     info->ssid, info->ssid_len);
+	wil_print_bcon_data(bcon);
+	wil_print_crypto(wil, crypto);
+
+	if (wil_fix_bcon(wil, bcon)) {
+		wil_dbg_misc(wil, "Fixed bcon\n");
+		wil_print_bcon_data(bcon);
 	}
 
-	netif_carrier_on(ndev);
+	rc = _wil_cfg80211_start_ap(wiphy, ndev,
+				    info->ssid, info->ssid_len, info->privacy,
+				    info->beacon_interval, channel->hw_value,
+				    bcon, hidden_ssid);
 
-	rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
-			   channel->hw_value, hidden_ssid);
-	if (rc)
-		goto err_pcp_start;
-
-	rc = wil_bcast_init(wil);
-	if (rc)
-		goto err_bcast;
-
-	goto out; /* success */
-err_bcast:
-	wmi_pcp_stop(wil);
-err_pcp_start:
-	netif_carrier_off(ndev);
-out:
-	mutex_unlock(&wil->mutex);
 	return rc;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 75219a1b..613ca2b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -62,7 +62,7 @@
 	seq_printf(s, "  swhead = %d\n", vring->swhead);
 	seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
 	if (x) {
-		v = ioread32(x);
+		v = readl(x);
 		seq_printf(s, "0x%08x = %d\n", v, v);
 	} else {
 		seq_puts(s, "???\n");
@@ -268,7 +268,7 @@
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-	iowrite32(val, (void __iomem *)data);
+	writel(val, (void __iomem *)data);
 	wmb(); /* make sure write propagated to HW */
 
 	return 0;
@@ -276,7 +276,7 @@
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-	*val = ioread32((void __iomem *)data);
+	*val = readl((void __iomem *)data);
 
 	return 0;
 }
@@ -306,7 +306,7 @@
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
-			wil_debugfs_ulong_set, "%llu\n");
+			wil_debugfs_ulong_set, "0x%llx\n");
 
 static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
 					       struct dentry *parent,
@@ -477,7 +477,7 @@
 	void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
-		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
@@ -1344,6 +1344,7 @@
 {
 	int i;
 	u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+	unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
 
 	seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
 		   r->head_seq_num);
@@ -1353,7 +1354,10 @@
 		else
 			seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
 	}
-	seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+	seq_printf(s,
+		   "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
+		   r->total, drop_dup + drop_old, drop_dup, drop_old,
+		   r->ssn_last_drop);
 }
 
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 0ea695f..7053b62 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -50,19 +50,13 @@
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
-	tx_itr_en = ioread32(wil->csr +
-			     HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+	tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
 	if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
-		tx_itr_val =
-			ioread32(wil->csr +
-				 HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+		tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
 
-	rx_itr_en = ioread32(wil->csr +
-			     HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+	rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
 	if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
-		rx_itr_val =
-			ioread32(wil->csr +
-				 HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+		rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
 	cp->tx_coalesce_usecs = tx_itr_val;
 	cp->rx_coalesce_usecs = rx_itr_val;
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 4428345..82aae2d 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -22,16 +22,6 @@
 MODULE_FIRMWARE(WIL_FW_NAME);
 MODULE_FIRMWARE(WIL_FW2_NAME);
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static
 void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
 			size_t count)
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 157f5ef..d30657e 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -221,12 +221,12 @@
 
 		FW_ADDR_CHECK(dst, block[i].addr, "address");
 
-		x = ioread32(dst);
+		x = readl(dst);
 		y = (x & m) | (v & ~m);
 		wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
 			   "(old 0x%08x val 0x%08x mask 0x%08x)\n",
 			   le32_to_cpu(block[i].addr), y, x, v, m);
-		iowrite32(y, dst);
+		writel(y, dst);
 		wmb(); /* finish before processing next record */
 	}
 
@@ -239,18 +239,18 @@
 {
 	unsigned delay = 0;
 
-	iowrite32(a, gwa_addr);
-	iowrite32(gw_cmd, gwa_cmd);
+	writel(a, gwa_addr);
+	writel(gw_cmd, gwa_cmd);
 	wmb(); /* finish before activate gw */
 
-	iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+	writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
 	do {
 		udelay(1); /* typical time is few usec */
 		if (delay++ > 100) {
 			wil_err_fw(wil, "gw timeout\n");
 			return -EINVAL;
 		}
-	} while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+	} while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
 
 	return 0;
 }
@@ -305,7 +305,7 @@
 		wil_dbg_fw(wil, "  gw write[%3d] [0x%08x] <== 0x%08x\n",
 			   i, a, v);
 
-		iowrite32(v, gwa_val);
+		writel(v, gwa_val);
 		rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
 		if (rc)
 			return rc;
@@ -372,7 +372,7 @@
 				sizeof(v), false);
 
 		for (k = 0; k < ARRAY_SIZE(block->value); k++)
-			iowrite32(v[k], gwa_val[k]);
+			writel(v[k], gwa_val[k]);
 		rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
 		if (rc)
 			return rc;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 28ffc18..a371f036 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -61,13 +61,13 @@
 
 static inline void wil_icr_clear(u32 x, void __iomem *addr)
 {
-	iowrite32(x, addr);
+	writel(x, addr);
 }
 #endif /* defined(CONFIG_WIL6210_ISR_COR) */
 
 static inline u32 wil_ioread32_and_clear(void __iomem *addr)
 {
-	u32 x = ioread32(addr);
+	u32 x = readl(addr);
 
 	wil_icr_clear(x, addr);
 
@@ -76,54 +76,47 @@
 
 static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_TX_ICR) +
-		  offsetof(struct RGF_ICR, IMS));
+	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_RX_ICR) +
-		  offsetof(struct RGF_ICR, IMS));
+	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-		  offsetof(struct RGF_ICR, IMS));
+	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
 {
 	wil_dbg_irq(wil, "%s()\n", __func__);
 
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+	wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
 
 	clear_bit(wil_status_irqen, wil->status);
 }
 
 void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IMC_TX, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_TX_ICR) +
-		  offsetof(struct RGF_ICR, IMC));
+	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_TX);
 }
 
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IMC_RX, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_RX_ICR) +
-		  offsetof(struct RGF_ICR, IMC));
+	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_RX);
 }
 
 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IMC_MISC, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-		  offsetof(struct RGF_ICR, IMC));
+	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_MISC);
 }
 
 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -132,8 +125,7 @@
 
 	set_bit(wil_status_irqen, wil->status);
 
-	iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
-		  HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+	wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
 }
 
 void wil_mask_irq(struct wil6210_priv *wil)
@@ -150,12 +142,12 @@
 {
 	wil_dbg_irq(wil, "%s()\n", __func__);
 
-	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
-		  offsetof(struct RGF_ICR, ICC));
-	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
-		  offsetof(struct RGF_ICR, ICC));
-	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-		  offsetof(struct RGF_ICR, ICC));
+	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
+	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
+	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
 
 	wil6210_unmask_irq_pseudo(wil);
 	wil6210_unmask_irq_tx(wil);
@@ -163,9 +155,6 @@
 	wil6210_unmask_irq_misc(wil);
 }
 
-/* target write operation */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
 	wil_dbg_irq(wil, "%s()\n", __func__);
@@ -177,44 +166,42 @@
 		return;
 
 	/* Disable and clear tx counter before (re)configuration */
-	W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+	wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
 	wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
 		 wil->tx_max_burst_duration);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_TX_CNT_CTL,
-	  BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+	      BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
 
 	/* Disable and clear tx idle counter before (re)configuration */
-	W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
 	wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
 		 wil->tx_interframe_timeout);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
-				      BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+	      BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
 
 	/* Disable and clear rx counter before (re)configuration */
-	W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+	wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
 	wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
 		 wil->rx_max_burst_duration);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_RX_CNT_CTL,
-	  BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+	      BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
 
 	/* Disable and clear rx idle counter before (re)configuration */
-	W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
 	wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
 		 wil->rx_interframe_timeout);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
-				      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+	      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
 }
 
-#undef W
-
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
@@ -452,27 +439,24 @@
 		u32 icr_rx = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_RX_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_rx = ioread32(wil->csr +
-				HOSTADDR(RGF_DMA_EP_RX_ICR) +
-				offsetof(struct RGF_ICR, IMV));
+		u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+				   offsetof(struct RGF_ICR, IMV));
 		u32 icm_tx = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_TX_ICR) +
 				offsetof(struct RGF_ICR, ICM));
 		u32 icr_tx = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_TX_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_tx = ioread32(wil->csr +
-				HOSTADDR(RGF_DMA_EP_TX_ICR) +
-				offsetof(struct RGF_ICR, IMV));
+		u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+				   offsetof(struct RGF_ICR, IMV));
 		u32 icm_misc = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 				offsetof(struct RGF_ICR, ICM));
 		u32 icr_misc = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_misc = ioread32(wil->csr +
-				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-				offsetof(struct RGF_ICR, IMV));
+		u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+				     offsetof(struct RGF_ICR, IMV));
 		wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
 				"Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
 				"Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -492,7 +476,7 @@
 {
 	irqreturn_t rc = IRQ_HANDLED;
 	struct wil6210_priv *wil = cookie;
-	u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+	u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
 
 	/**
 	 * pseudo_cause is Clear-On-Read, no need to ACK
@@ -541,48 +525,12 @@
 	return rc;
 }
 
-static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
-{
-	int rc;
-	/*
-	 * IRQ's are in the following order:
-	 * - Tx
-	 * - Rx
-	 * - Misc
-	 */
-
-	rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
-			 WIL_NAME"_tx", wil);
-	if (rc)
-		return rc;
-
-	rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
-			 WIL_NAME"_rx", wil);
-	if (rc)
-		goto free0;
-
-	rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
-				  wil6210_irq_misc_thread,
-				  IRQF_SHARED, WIL_NAME"_misc", wil);
-	if (rc)
-		goto free1;
-
-	return 0;
-	/* error branch */
-free1:
-	free_irq(irq + 1, wil);
-free0:
-	free_irq(irq, wil);
-
-	return rc;
-}
-
 /* can't use wil_ioread32_and_clear because ICC value is not set yet */
 static inline void wil_clear32(void __iomem *addr)
 {
-	u32 x = ioread32(addr);
+	u32 x = readl(addr);
 
-	iowrite32(x, addr);
+	writel(x, addr);
 }
 
 void wil6210_clear_irq(struct wil6210_priv *wil)
@@ -596,19 +544,16 @@
 	wmb(); /* make sure write completed */
 }
 
-int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 {
 	int rc;
 
-	wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+	wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
 
-	if (wil->n_msi == 3)
-		rc = wil6210_request_3msi(wil, irq);
-	else
-		rc = request_threaded_irq(irq, wil6210_hardirq,
-					  wil6210_thread_irq,
-					  wil->n_msi ? 0 : IRQF_SHARED,
-					  WIL_NAME, wil);
+	rc = request_threaded_irq(irq, wil6210_hardirq,
+				  wil6210_thread_irq,
+				  use_msi ? 0 : IRQF_SHARED,
+				  WIL_NAME, wil);
 	return rc;
 }
 
@@ -618,8 +563,4 @@
 
 	wil_mask_irq(wil);
 	free_irq(irq, wil);
-	if (wil->n_msi == 3) {
-		free_irq(irq + 1, wil);
-		free_irq(irq + 2, wil);
-	}
 }
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index e9c0673..f7f9486 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -76,11 +76,11 @@
 	/* operation */
 	switch (io.op & wil_mmio_op_mask) {
 	case wil_mmio_read:
-		io.val = ioread32(a);
+		io.val = readl(a);
 		need_copy = true;
 		break;
 	case wil_mmio_write:
-		iowrite32(io.val, a);
+		writel(io.val, a);
 		wmb(); /* make sure write propagated to HW */
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 6ca6193..2fb04c5 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,6 +21,7 @@
 #include "wil6210.h"
 #include "txrx.h"
 #include "wmi.h"
+#include "boot_loader.h"
 
 #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
@@ -270,8 +271,7 @@
 
 	clear_bit(wil_status_fwready, wil->status);
 	wil_err(wil, "Scan timeout detected, start fw error recovery\n");
-	wil->recovery_state = fw_recovery_pending;
-	schedule_work(&wil->fw_error_worker);
+	wil_fw_error_recovery(wil);
 }
 
 static int wil_wait_for_recovery(struct wil6210_priv *wil)
@@ -528,26 +528,16 @@
 	destroy_workqueue(wil->wmi_wq);
 }
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static inline void wil_halt_cpu(struct wil6210_priv *wil)
 {
-	W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
-	W(RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
+	wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+	wil_w(wil, RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
 }
 
 static inline void wil_release_cpu(struct wil6210_priv *wil)
 {
 	/* Start CPU */
-	W(RGF_USER_USER_CPU_0, 1);
+	wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
 static int wil_target_reset(struct wil6210_priv *wil)
@@ -558,56 +548,60 @@
 	wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
 	/* Clear MAC link up */
-	S(RGF_HP_CTRL, BIT(15));
-	S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
-	S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+	wil_s(wil, RGF_HP_CTRL, BIT(15));
+	wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+	wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
 
 	wil_halt_cpu(wil);
 
 	/* clear all boot loader "ready" bits */
-	W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+	wil_w(wil, RGF_USER_BL +
+	      offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
 	/* Clear Fw Download notification */
-	C(RGF_USER_USAGE_6, BIT(0));
+	wil_c(wil, RGF_USER_USAGE_6, BIT(0));
 
-	S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+	wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
 	/* XTAL stabilization should take about 3ms */
 	usleep_range(5000, 7000);
-	x = R(RGF_CAF_PLL_LOCK_STATUS);
+	x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
 	if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
 		wil_err(wil, "Xtal stabilization timeout\n"
 			"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
 		return -ETIME;
 	}
 	/* switch 10k to XTAL*/
-	C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+	wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
 	/* 40 MHz */
-	C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
 
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
 
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+	/* reset A2 PCIE AHB */
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
 	/* wait until device ready. typical time is 20..80 msec */
 	do {
 		msleep(RST_DELAY);
-		x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+		x = wil_r(wil, RGF_USER_BL +
+			  offsetof(struct bl_dedicated_registers_v0,
+				   boot_loader_ready));
 		if (x1 != x) {
 			wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
 			x1 = x;
@@ -617,13 +611,13 @@
 				x);
 			return -ETIME;
 		}
-	} while (x != BIT_BL_READY);
+	} while (x != BL_READY);
 
-	C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
 	/* enable fix for HW bug related to the SA/DA swap in AP Rx */
-	S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
-	  BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+	wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+	      BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
 
 	wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
 	return 0;
@@ -641,29 +635,93 @@
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
 	struct net_device *ndev = wil_to_ndev(wil);
-	struct RGF_BL bl;
+	union {
+		struct bl_dedicated_registers_v0 bl0;
+		struct bl_dedicated_registers_v1 bl1;
+	} bl;
+	u32 bl_ver;
+	u8 *mac;
+	u16 rf_status;
 
-	wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
-	le32_to_cpus(&bl.ready);
-	le32_to_cpus(&bl.version);
-	le32_to_cpus(&bl.rf_type);
-	le32_to_cpus(&bl.baseband_type);
+	wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+			     sizeof(bl));
+	bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+	mac = bl.bl0.mac_address;
 
-	if (!is_valid_ether_addr(bl.mac_address)) {
-		wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+	if (bl_ver == 0) {
+		le32_to_cpus(&bl.bl0.rf_type);
+		le32_to_cpus(&bl.bl0.baseband_type);
+		rf_status = 0; /* actually, unknown */
+		wil_info(wil,
+			 "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+			 bl_ver, mac,
+			 bl.bl0.rf_type, bl.bl0.baseband_type);
+		wil_info(wil, "Boot Loader build unknown for struct v0\n");
+	} else {
+		le16_to_cpus(&bl.bl1.rf_type);
+		rf_status = le16_to_cpu(bl.bl1.rf_status);
+		le32_to_cpus(&bl.bl1.baseband_type);
+		le16_to_cpus(&bl.bl1.bl_version_subminor);
+		le16_to_cpus(&bl.bl1.bl_version_build);
+		wil_info(wil,
+			 "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+			 bl_ver, mac,
+			 bl.bl1.rf_type, rf_status,
+			 bl.bl1.baseband_type);
+		wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+			 bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+			 bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+	}
+
+	if (!is_valid_ether_addr(mac)) {
+		wil_err(wil, "BL: Invalid MAC %pM\n", mac);
 		return -EINVAL;
 	}
 
-	ether_addr_copy(ndev->perm_addr, bl.mac_address);
+	ether_addr_copy(ndev->perm_addr, mac);
 	if (!is_valid_ether_addr(ndev->dev_addr))
-		ether_addr_copy(ndev->dev_addr, bl.mac_address);
-	wil_info(wil,
-		 "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
-		 bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+		ether_addr_copy(ndev->dev_addr, mac);
+
+	if (rf_status) {/* bad RF cable? */
+		wil_err(wil, "RF communication error 0x%04x",
+			rf_status);
+		return -EAGAIN;
+	}
 
 	return 0;
 }
 
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+	u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+	u32 bl_ver = wil_r(wil, RGF_USER_BL +
+			   offsetof(struct bl_dedicated_registers_v0,
+				    boot_loader_struct_version));
+
+	if (bl_ver < 2)
+		return;
+
+	bl_assert_code = wil_r(wil, RGF_USER_BL +
+			       offsetof(struct bl_dedicated_registers_v1,
+					bl_assert_code));
+	bl_assert_blink = wil_r(wil, RGF_USER_BL +
+				offsetof(struct bl_dedicated_registers_v1,
+					 bl_assert_blink));
+	bl_magic_number = wil_r(wil, RGF_USER_BL +
+				offsetof(struct bl_dedicated_registers_v1,
+					 bl_magic_number));
+
+	if (is_err) {
+		wil_err(wil,
+			"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+			bl_assert_code, bl_assert_blink, bl_magic_number);
+	} else {
+		wil_dbg_misc(wil,
+			     "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+			     bl_assert_code, bl_assert_blink, bl_magic_number);
+	}
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
 	ulong to = msecs_to_jiffies(1000);
@@ -690,9 +748,6 @@
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
-	if (wil->hw_version == HW_VER_UNKNOWN)
-		return -ENODEV;
-
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 	WARN_ON(test_bit(wil_status_napi_en, wil->status));
 
@@ -707,6 +762,9 @@
 		return 0;
 	}
 
+	if (wil->hw_version == HW_VER_UNKNOWN)
+		return -ENODEV;
+
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
 	wil_bcast_fini(wil);
@@ -729,12 +787,17 @@
 	flush_workqueue(wil->wq_service);
 	flush_workqueue(wil->wmi_wq);
 
+	wil_bl_crash_info(wil, false);
 	rc = wil_target_reset(wil);
 	wil_rx_fini(wil);
-	if (rc)
+	if (rc) {
+		wil_bl_crash_info(wil, true);
 		return rc;
+	}
 
 	rc = wil_get_bl_info(wil);
+	if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
+		rc = 0;
 	if (rc)
 		return rc;
 
@@ -752,7 +815,7 @@
 			return rc;
 
 		/* Mark FW as loaded from host */
-		S(RGF_USER_USAGE_6, 1);
+		wil_s(wil, RGF_USER_USAGE_6, 1);
 
 		/* clear any interrupts which on-card-firmware
 		 * may have set
@@ -760,8 +823,8 @@
 		wil6210_clear_irq(wil);
 		/* CAF_ICR - clear and mask */
 		/* it is W1C, clear by writing back same value */
-		S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
-		W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+		wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+		wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
 
 		wil_release_cpu(wil);
 	}
@@ -785,11 +848,6 @@
 	return rc;
 }
 
-#undef R
-#undef W
-#undef S
-#undef C
-
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
 	wil_dbg_misc(wil, "starting fw error recovery\n");
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 8ef18ac..e3b3c8f 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -173,7 +173,10 @@
 	wil_set_ethtoolops(ndev);
 	ndev->ieee80211_ptr = wdev;
 	ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
-			    NETIF_F_SG | NETIF_F_GRO;
+			    NETIF_F_SG | NETIF_F_GRO |
+			    NETIF_F_TSO | NETIF_F_TSO6 |
+			    NETIF_F_RXHASH;
+
 	ndev->features |= ndev->hw_features;
 	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
 	wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index aa3ecc6..feff1ef 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -21,16 +21,14 @@
 
 #include "wil6210.h"
 
-static int use_msi = 1;
-module_param(use_msi, int, S_IRUGO);
-MODULE_PARM_DESC(use_msi,
-		 " Use MSI interrupt: "
-		 "0 - don't, 1 - (default) - single, or 3");
+static bool use_msi = true;
+module_param(use_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
-	u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+	u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 
@@ -50,24 +48,12 @@
 
 void wil_disable_irq(struct wil6210_priv *wil)
 {
-	int irq = wil->pdev->irq;
-
-	disable_irq(irq);
-	if (wil->n_msi == 3) {
-		disable_irq(irq + 1);
-		disable_irq(irq + 2);
-	}
+	disable_irq(wil->pdev->irq);
 }
 
 void wil_enable_irq(struct wil6210_priv *wil)
 {
-	int irq = wil->pdev->irq;
-
-	enable_irq(irq);
-	if (wil->n_msi == 3) {
-		enable_irq(irq + 1);
-		enable_irq(irq + 2);
-	}
+	enable_irq(wil->pdev->irq);
 }
 
 /* Bus ops */
@@ -80,6 +66,7 @@
 	 * and only MSI should be used
 	 */
 	int msi_only = pdev->msi_enabled;
+	bool _use_msi = use_msi;
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -87,41 +74,20 @@
 
 	pci_set_master(pdev);
 
-	/*
-	 * how many MSI interrupts to request?
-	 */
-	switch (use_msi) {
-	case 3:
-	case 1:
-		wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
-		break;
-	case 0:
-		wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
-		break;
-	default:
-		wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
-		use_msi = 1;
-	}
+	wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
 
-	if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
-		wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
-		use_msi = 1;
-	}
-
-	if (use_msi == 1 && pci_enable_msi(pdev)) {
+	if (use_msi && pci_enable_msi(pdev)) {
 		wil_err(wil, "pci_enable_msi failed, use INTx\n");
-		use_msi = 0;
+		_use_msi = false;
 	}
 
-	wil->n_msi = use_msi;
-
-	if ((wil->n_msi == 0) && msi_only) {
+	if (!_use_msi && msi_only) {
 		wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
 		rc = -ENODEV;
 		goto stop_master;
 	}
 
-	rc = wil6210_init_irq(wil, pdev->irq);
+	rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
 	if (rc)
 		goto stop_master;
 
@@ -293,11 +259,80 @@
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
+#ifdef CONFIG_PM
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+	int rc = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	rc = wil_can_suspend(wil, is_runtime);
+	if (rc)
+		goto out;
+
+	rc = wil_suspend(wil, is_runtime);
+	if (rc)
+		goto out;
+
+	/* TODO: how do I bring card in low power state? */
+
+	/* disable bus mastering */
+	pci_clear_master(pdev);
+	/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+
+out:
+	return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+	int rc = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	/* allow master */
+	pci_set_master(pdev);
+
+	rc = wil_resume(wil, is_runtime);
+	if (rc)
+		pci_clear_master(pdev);
+
+	return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_suspend(struct device *dev)
+{
+	return wil6210_suspend(dev, false);
+}
+
+static int wil6210_pm_resume(struct device *dev)
+{
+	return wil6210_resume(dev, false);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+};
+
 static struct pci_driver wil6210_driver = {
 	.probe		= wil_pcie_probe,
 	.remove		= wil_pcie_remove,
 	.id_table	= wil6210_pcie_ids,
 	.name		= WIL_NAME,
+	.driver		= {
+		.pm = &wil6210_pm_ops,
+	},
 };
 
 static int __init wil6210_driver_init(void)
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
new file mode 100644
index 0000000..0b7ecbc
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct wireless_dev *wdev = wil->wdev;
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	switch (wdev->iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		break;
+	/* AP-like interface - can't suspend */
+	default:
+		wil_dbg_pm(wil, "AP-like interface\n");
+		rc = -EBUSY;
+		break;
+	}
+
+	wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+		   is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+	return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct net_device *ndev = wil_to_ndev(wil);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	/* if netif up, hardware is alive, shut it down */
+	if (ndev->flags & IFF_UP) {
+		rc = wil_down(wil);
+		if (rc) {
+			wil_err(wil, "wil_down : %d\n", rc);
+			goto out;
+		}
+	}
+
+	if (wil->platform_ops.suspend)
+		rc = wil->platform_ops.suspend(wil->platform_handle);
+
+out:
+	wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+		   is_runtime ? "runtime" : "system", rc);
+	return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct net_device *ndev = wil_to_ndev(wil);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	if (wil->platform_ops.resume) {
+		rc = wil->platform_ops.resume(wil->platform_handle);
+		if (rc) {
+			wil_err(wil, "platform_ops.resume : %d\n", rc);
+			goto out;
+		}
+	}
+
+	/* if netif up, bring hardware up
+	 * During open(), IFF_UP set after actual device method
+	 * invocation. This prevent recursive call to wil_up()
+	 */
+	if (ndev->flags & IFF_UP)
+		rc = wil_up(wil);
+
+out:
+	wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+		   is_runtime ? "runtime" : "system", rc);
+	return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index ca10dcf..9238c1a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -121,6 +121,7 @@
 		goto out;
 	}
 
+	r->total++;
 	hseq = r->head_seq_num;
 
 	/** Due to the race between WMI events, where BACK establishment
@@ -153,6 +154,9 @@
 	/* frame with out of date sequence number */
 	if (seq_less(seq, r->head_seq_num)) {
 		r->ssn_last_drop = seq;
+		r->drop_old++;
+		wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+			     seq, r->head_seq_num);
 		dev_kfree_skb(skb);
 		goto out;
 	}
@@ -173,6 +177,8 @@
 
 	/* check if we already stored this frame */
 	if (r->reorder_buf[index]) {
+		r->drop_dup++;
+		wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
 		dev_kfree_skb(skb);
 		goto out;
 	}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index aa20af8..6229110 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -509,7 +509,7 @@
 			break;
 		}
 	}
-	iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+	wil_w(wil, v->hwtail, v->swtail);
 
 	return rc;
 }
@@ -541,6 +541,14 @@
 		[GRO_DROP]		= "GRO_DROP",
 	};
 
+	if (ndev->features & NETIF_F_RXHASH)
+		/* fake L4 to ensure it won't be re-calculated later
+		 * set hash to any non-zero value to activate rps
+		 * mechanism, core will be chosen according
+		 * to user-level rps configuration.
+		 */
+		skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
+
 	skb_orphan(skb);
 
 	if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
@@ -1058,14 +1066,52 @@
 static inline
 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
 {
-	d->mac.d[2] |= ((nr_frags + 1) <<
-		       MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+	d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
 }
 
-static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
-					 struct vring_tx_desc *d,
-					 struct sk_buff *skb)
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+					  struct sk_buff *skb,
+					  int tso_desc_type, bool is_ipv4,
+					  int tcp_hdr_len, int skb_net_hdr_len)
 {
+	d->dma.b11 = ETH_HLEN; /* MAC header length */
+	d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+	d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+	/* L4 header len: TCP header length */
+	d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+	/* Setup TSO: bit and desc type */
+	d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+		(tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+	d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+	d->dma.ip_length = skb_net_hdr_len;
+	/* Enable TCP/UDP checksum */
+	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+	/* Calculate pseudo-header */
+	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/**
+ * Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+				     struct sk_buff *skb){
 	int protocol;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1110,6 +1156,305 @@
 	return 0;
 }
 
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+	      BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+	      BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+	d->dma.d0 |= wil_tso_type_lst <<
+		  DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
+			      struct sk_buff *skb)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	/* point to descriptors in shared memory */
+	volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+				      *_first_desc = NULL;
+
+	/* pointers to shadow descriptors */
+	struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+			     *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+			     *first_desc = &first_desc_mem;
+
+	/* pointer to shadow descriptors' context */
+	struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+	int descs_used = 0; /* total number of used descriptors */
+	int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+	u32 swhead = vring->swhead;
+	int used, avail = wil_vring_avail_tx(vring);
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	int min_desc_required = nr_frags + 1;
+	int mss = skb_shinfo(skb)->gso_size;	/* payload size w/o headers */
+	int f, len, hdrlen, headlen;
+	int vring_index = vring - wil->vring_tx;
+	struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+	uint i = swhead;
+	dma_addr_t pa;
+	const skb_frag_t *frag = NULL;
+	int rem_data = mss;
+	int lenmss;
+	int hdr_compensation_need = true;
+	int desc_tso_type = wil_tso_type_first;
+	bool is_ipv4;
+	int tcp_hdr_len;
+	int skb_net_hdr_len;
+	int gso_type;
+
+	wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+		     __func__, skb->len, vring_index);
+
+	if (unlikely(!txdata->enabled))
+		return -EINVAL;
+
+	/* A typical page 4K is 3-4 payloads, we assume each fragment
+	 * is a full payload, that's how min_desc_required has been
+	 * calculated. In real we might need more or less descriptors,
+	 * this is the initial check only.
+	 */
+	if (unlikely(avail < min_desc_required)) {
+		wil_err_ratelimited(wil,
+				    "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+				    vring_index, min_desc_required);
+		return -ENOMEM;
+	}
+
+	/* Header Length = MAC header len + IP header len + TCP header len*/
+	hdrlen = ETH_HLEN +
+		(int)skb_network_header_len(skb) +
+		tcp_hdrlen(skb);
+
+	gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+	switch (gso_type) {
+	case SKB_GSO_TCPV4:
+		/* TCP v4, zero out the IP length and IPv4 checksum fields
+		 * as required by the offloading doc
+		 */
+		ip_hdr(skb)->tot_len = 0;
+		ip_hdr(skb)->check = 0;
+		is_ipv4 = true;
+		break;
+	case SKB_GSO_TCPV6:
+		/* TCP v6, zero out the payload length */
+		ipv6_hdr(skb)->payload_len = 0;
+		is_ipv4 = false;
+		break;
+	default:
+		/* other than TCPv4 or TCPv6 types are not supported for TSO.
+		 * It is also illegal for both to be set simultaneously
+		 */
+		return -EINVAL;
+	}
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return -EINVAL;
+
+	/* tcp header length and skb network header length are fixed for all
+	 * packet's descriptors - read then once here
+	 */
+	tcp_hdr_len = tcp_hdrlen(skb);
+	skb_net_hdr_len = skb_network_header_len(skb);
+
+	_hdr_desc = &vring->va[i].tx;
+
+	pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, pa))) {
+		wil_err(wil, "TSO: Skb head DMA map error\n");
+		goto err_exit;
+	}
+
+	wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+	wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+				      tcp_hdr_len, skb_net_hdr_len);
+	wil_tx_last_desc(hdr_desc);
+
+	vring->ctx[i].mapped_as = wil_mapped_as_single;
+	hdr_ctx = &vring->ctx[i];
+
+	descs_used++;
+	headlen = skb_headlen(skb) - hdrlen;
+
+	for (f = headlen ? -1 : 0; f < nr_frags; f++)  {
+		if (headlen) {
+			len = headlen;
+			wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+				     len);
+		} else {
+			frag = &skb_shinfo(skb)->frags[f];
+			len = frag->size;
+			wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+		}
+
+		while (len) {
+			wil_dbg_txrx(wil,
+				     "TSO: len %d, rem_data %d, descs_used %d\n",
+				     len, rem_data, descs_used);
+
+			if (descs_used == avail)  {
+				wil_err(wil, "TSO: ring overflow\n");
+				goto dma_error;
+			}
+
+			lenmss = min_t(int, rem_data, len);
+			i = (swhead + descs_used) % vring->size;
+			wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+			if (!headlen) {
+				pa = skb_frag_dma_map(dev, frag,
+						      frag->size - len, lenmss,
+						      DMA_TO_DEVICE);
+				vring->ctx[i].mapped_as = wil_mapped_as_page;
+			} else {
+				pa = dma_map_single(dev,
+						    skb->data +
+						    skb_headlen(skb) - headlen,
+						    lenmss,
+						    DMA_TO_DEVICE);
+				vring->ctx[i].mapped_as = wil_mapped_as_single;
+				headlen -= lenmss;
+			}
+
+			if (unlikely(dma_mapping_error(dev, pa)))
+				goto dma_error;
+
+			_desc = &vring->va[i].tx;
+
+			if (!_first_desc) {
+				_first_desc = _desc;
+				first_ctx = &vring->ctx[i];
+				d = first_desc;
+			} else {
+				d = &desc_mem;
+			}
+
+			wil_tx_desc_map(d, pa, lenmss, vring_index);
+			wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+						      is_ipv4, tcp_hdr_len,
+						      skb_net_hdr_len);
+
+			/* use tso_type_first only once */
+			desc_tso_type = wil_tso_type_mid;
+
+			descs_used++;  /* desc used so far */
+			sg_desc_cnt++; /* desc used for this segment */
+			len -= lenmss;
+			rem_data -= lenmss;
+
+			wil_dbg_txrx(wil,
+				     "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+				     len, rem_data, descs_used, sg_desc_cnt);
+
+			/* Close the segment if reached mss size or last frag*/
+			if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+				if (hdr_compensation_need) {
+					/* first segment include hdr desc for
+					 * release
+					 */
+					hdr_ctx->nr_frags = sg_desc_cnt;
+					wil_tx_desc_set_nr_frags(first_desc,
+								 sg_desc_cnt +
+								 1);
+					hdr_compensation_need = false;
+				} else {
+					wil_tx_desc_set_nr_frags(first_desc,
+								 sg_desc_cnt);
+				}
+				first_ctx->nr_frags = sg_desc_cnt - 1;
+
+				wil_tx_last_desc(d);
+
+				/* first descriptor may also be the last
+				 * for this mss - make sure not to copy
+				 * it twice
+				 */
+				if (first_desc != d)
+					*_first_desc = *first_desc;
+
+				/*last descriptor will be copied at the end
+				 * of this TS processing
+				 */
+				if (f < nr_frags - 1 || len > 0)
+					*_desc = *d;
+
+				rem_data = mss;
+				_first_desc = NULL;
+				sg_desc_cnt = 0;
+			} else if (first_desc != d) /* update mid descriptor */
+					*_desc = *d;
+		}
+	}
+
+	/* first descriptor may also be the last.
+	 * in this case d pointer is invalid
+	 */
+	if (_first_desc == _desc)
+		d = first_desc;
+
+	/* Last data descriptor */
+	wil_set_tx_desc_last_tso(d);
+	*_desc = *d;
+
+	/* Fill the total number of descriptors in first desc (hdr)*/
+	wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+	*_hdr_desc = *hdr_desc;
+
+	/* hold reference to skb
+	 * to prevent skb release before accounting
+	 * in case of immediate "tx done"
+	 */
+	vring->ctx[i].skb = skb_get(skb);
+
+	/* performance monitoring */
+	used = wil_vring_used_tx(vring);
+	if (wil_val_in_range(vring_idle_trsh,
+			     used, used + descs_used)) {
+		txdata->idle += get_cycles() - txdata->last_idle;
+		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+			     vring_index, used, used + descs_used);
+	}
+
+	/* advance swhead */
+	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+	wil_vring_advance_head(vring, descs_used);
+
+	/* make sure all writes to descriptors (shared memory) are done before
+	 * committing them to HW
+	 */
+	wmb();
+
+	wil_w(wil, vring->hwtail, vring->swhead);
+	return 0;
+
+dma_error:
+	wil_err(wil, "TSO: DMA map page error\n");
+	while (descs_used > 0) {
+		struct wil_ctx *ctx;
+
+		i = (swhead + descs_used) % vring->size;
+		d = (struct vring_tx_desc *)&vring->va[i].tx;
+		_desc = &vring->va[i].tx;
+		*d = *_desc;
+		_desc->dma.status = TX_DMA_STATUS_DU;
+		ctx = &vring->ctx[i];
+		wil_txdesc_unmap(dev, d, ctx);
+		if (ctx->skb)
+			dev_kfree_skb_any(ctx->skb);
+		memset(ctx, 0, sizeof(*ctx));
+		descs_used--;
+	}
+
+err_exit:
+	return -EINVAL;
+}
+
 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
 			  struct sk_buff *skb)
 {
@@ -1128,7 +1473,8 @@
 	bool mcast = (vring_index == wil->bcast_vring);
 	uint len = skb_headlen(skb);
 
-	wil_dbg_txrx(wil, "%s()\n", __func__);
+	wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+		     __func__, skb->len, vring_index);
 
 	if (unlikely(!txdata->enabled))
 		return -EINVAL;
@@ -1159,14 +1505,14 @@
 			d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
 	}
 	/* Process TCP/UDP checksum offloading */
-	if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+	if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
 		wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
 			vring_index);
 		goto dma_error;
 	}
 
 	vring->ctx[i].nr_frags = nr_frags;
-	wil_tx_desc_set_nr_frags(d, nr_frags);
+	wil_tx_desc_set_nr_frags(d, nr_frags + 1);
 
 	/* middle segments */
 	for (; f < nr_frags; f++) {
@@ -1190,7 +1536,7 @@
 		 * if it succeeded for 1-st descriptor,
 		 * it will succeed here too
 		 */
-		wil_tx_desc_offload_cksum_set(wil, d, skb);
+		wil_tx_desc_offload_setup(d, skb);
 	}
 	/* for the last seg only */
 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
@@ -1221,7 +1567,13 @@
 	wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
 		     vring->swhead);
 	trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
-	iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+	/* make sure all writes to descriptors (shared memory) are done before
+	 * committing them to HW
+	 */
+	wmb();
+
+	wil_w(wil, vring->hwtail, vring->swhead);
 
 	return 0;
  dma_error:
@@ -1254,8 +1606,12 @@
 	int rc;
 
 	spin_lock(&txdata->lock);
-	rc = __wil_tx_vring(wil, vring, skb);
+
+	rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
+	     (wil, vring, skb);
+
 	spin_unlock(&txdata->lock);
+
 	return rc;
 }
 
@@ -1382,7 +1738,8 @@
 		struct wil_ctx *ctx = &vring->ctx[vring->swtail];
 		/**
 		 * For the fragmented skb, HW will set DU bit only for the
-		 * last fragment. look for it
+		 * last fragment. look for it.
+		 * In TSO the first DU will include hdr desc
 		 */
 		int lf = (vring->swtail + ctx->nr_frags) % vring->size;
 		/* TODO: check we are not past head */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 0c46384..82a8f9a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -291,6 +291,14 @@
 	__le16 length;
 } __packed;
 
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+	wil_tso_type_hdr = 0,
+	wil_tso_type_first = 1,
+	wil_tso_type_mid  = 2,
+	wil_tso_type_lst  = 3,
+};
+
 /* Rx descriptor - MAC part
  * [dword 0]
  * bit  0.. 3 : tid:4 The QoS (b3-0) TID Field
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 275355d..dd4ea92 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -127,16 +127,6 @@
 	u32 IMC; /* Mask Clear, write 1 to clear */
 } __packed;
 
-struct RGF_BL {
-	u32 ready;		/* 0x880A3C bit [0] */
-#define BIT_BL_READY	BIT(0)
-	u32 version;		/* 0x880A40 version of the BL struct */
-	u32 rf_type;		/* 0x880A44 ID of the connected RF */
-	u32 baseband_type;	/* 0x880A48 ID of the baseband */
-	u8  mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
-	u8 pad[2];
-} __packed;
-
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
@@ -262,9 +252,8 @@
 };
 
 /* popular locations */
-#define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
-#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
-	offsetof(struct RGF_ICR, ICS))
+#define RGF_MBOX   RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX   HOSTADDR(RGF_MBOX)
 #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
 
 /* ISR register bits */
@@ -434,12 +423,12 @@
  * @ssn: Starting Sequence Number expected to be aggregated.
  * @buf_size: buffer size for incoming A-MPDUs
  * @timeout: reset timer value (in TUs).
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
  * @dialog_token: dialog token for aggregation session
- * @rcu_head: RCU head used for freeing this struct
- *
- * This structure's lifetime is managed by RCU, assignments to
- * the array holding it must hold the aggregation mutex.
- *
+ * @first_time: true when this buffer used 1-st time
  */
 struct wil_tid_ampdu_rx {
 	struct sk_buff **reorder_buf;
@@ -453,6 +442,9 @@
 	u16 buf_size;
 	u16 timeout;
 	u16 ssn_last_drop;
+	unsigned long long total; /* frames processed */
+	unsigned long long drop_dup;
+	unsigned long long drop_old;
 	u8 dialog_token;
 	bool first_time; /* is it 1-st time this buffer used? */
 };
@@ -543,7 +535,6 @@
 
 struct wil6210_priv {
 	struct pci_dev *pdev;
-	int n_msi;
 	struct wireless_dev *wdev;
 	void __iomem *csr;
 	DECLARE_BITMAP(status, wil_status_last);
@@ -559,6 +550,8 @@
 	/* profile */
 	u32 monitor_flags;
 	u32 privacy; /* secure connection? */
+	u8 hidden_ssid; /* relevant in AP mode */
+	u16 channel; /* relevant in AP mode */
 	int sinfo_gen;
 	u32 ap_isolate; /* no intra-BSS communication */
 	/* interrupt moderation */
@@ -654,6 +647,33 @@
 #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
 #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
 #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+	return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+	writel(val, wil->csr + HOSTADDR(reg));
+	wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+	wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+	wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
 
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,	\
@@ -744,7 +764,7 @@
 void wil_back_tx_flush(struct wil6210_priv *wil);
 
 void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
 void wil_mask_irq(struct wil6210_priv *wil);
 void wil_unmask_irq(struct wil6210_priv *wil);
@@ -796,4 +816,8 @@
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name);
 
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index de15f14..2e831bf 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "linux/device.h"
+#include <linux/device.h>
 #include "wil_platform.h"
 
 int __init wil_platform_modinit(void)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index c759759..2f35d4c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -228,8 +228,8 @@
 	wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
 	/* wait till FW finish with previous command */
 	for (retry = 5; retry > 0; retry--) {
-		r->tail = ioread32(wil->csr + HOST_MBOX +
-				   offsetof(struct wil6210_mbox_ctl, tx.tail));
+		r->tail = wil_r(wil, RGF_MBOX +
+				offsetof(struct wil6210_mbox_ctl, tx.tail));
 		if (next_head != r->tail)
 			break;
 		msleep(20);
@@ -254,16 +254,16 @@
 	wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
 	wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
 	/* mark entry as full */
-	iowrite32(1, wil->csr + HOSTADDR(r->head) +
-		  offsetof(struct wil6210_mbox_ring_desc, sync));
+	wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
 	/* advance next ptr */
-	iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
-		  offsetof(struct wil6210_mbox_ctl, tx.head));
+	wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+	      r->head = next_head);
 
 	trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
 
 	/* interrupt to FW */
-	iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+	wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+	      SW_INT_MBOX);
 
 	return 0;
 }
@@ -312,22 +312,44 @@
 	struct wiphy *wiphy = wil_to_wiphy(wil);
 	struct ieee80211_mgmt *rx_mgmt_frame =
 			(struct ieee80211_mgmt *)data->payload;
-	int ch_no = data->info.channel+1;
-	u32 freq = ieee80211_channel_to_frequency(ch_no,
-			IEEE80211_BAND_60GHZ);
-	struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
-	s32 signal = data->info.sqi;
-	__le16 fc = rx_mgmt_frame->frame_control;
-	u32 d_len = le32_to_cpu(data->info.len);
-	u16 d_status = le16_to_cpu(data->info.status);
+	int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+	int ch_no;
+	u32 freq;
+	struct ieee80211_channel *channel;
+	s32 signal;
+	__le16 fc;
+	u32 d_len;
+	u16 d_status;
 
-	wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+	if (flen < 0) {
+		wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+		return;
+	}
+
+	d_len = le32_to_cpu(data->info.len);
+	if (d_len != flen) {
+		wil_err(wil,
+			"MGMT Rx: length mismatch, d_len %d should be %d\n",
+			d_len, flen);
+		return;
+	}
+
+	ch_no = data->info.channel + 1;
+	freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+	channel = ieee80211_get_channel(wiphy, freq);
+	signal = data->info.sqi;
+	d_status = le16_to_cpu(data->info.status);
+	fc = rx_mgmt_frame->frame_control;
+
+	wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
 		    data->info.channel, data->info.mcs, data->info.snr,
 		    data->info.sqi);
 	wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
 		    le16_to_cpu(fc));
 	wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
 		    data->info.qid, data->info.mid, data->info.cid);
+	wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+			 d_len, true);
 
 	if (!channel) {
 		wil_err(wil, "Frame on unsupported channel\n");
@@ -363,6 +385,17 @@
 	}
 }
 
+static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+	struct wmi_tx_mgmt_packet_event *data = d;
+	struct ieee80211_mgmt *mgmt_frame =
+			(struct ieee80211_mgmt *)data->payload;
+	int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+	wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+			 flen, true);
+}
+
 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
 				  void *d, int len)
 {
@@ -659,6 +692,7 @@
 	{WMI_READY_EVENTID,		wmi_evt_ready},
 	{WMI_FW_READY_EVENTID,		wmi_evt_fw_ready},
 	{WMI_RX_MGMT_PACKET_EVENTID,	wmi_evt_rx_mgmt},
+	{WMI_TX_MGMT_PACKET_EVENTID,		wmi_evt_tx_mgmt},
 	{WMI_SCAN_COMPLETE_EVENTID,	wmi_evt_scan_complete},
 	{WMI_CONNECT_EVENTID,		wmi_evt_connect},
 	{WMI_DISCONNECT_EVENTID,	wmi_evt_disconnect},
@@ -695,8 +729,8 @@
 		u16 len;
 		bool q;
 
-		r->head = ioread32(wil->csr + HOST_MBOX +
-				   offsetof(struct wil6210_mbox_ctl, rx.head));
+		r->head = wil_r(wil, RGF_MBOX +
+				offsetof(struct wil6210_mbox_ctl, rx.head));
 		if (r->tail == r->head)
 			break;
 
@@ -734,8 +768,8 @@
 		cmd = (void *)&evt->event.wmi;
 		wil_memcpy_fromio_32(cmd, src, len);
 		/* mark entry as empty */
-		iowrite32(0, wil->csr + HOSTADDR(r->tail) +
-			  offsetof(struct wil6210_mbox_ring_desc, sync));
+		wil_w(wil, r->tail +
+		      offsetof(struct wil6210_mbox_ring_desc, sync), 0);
 		/* indicate */
 		if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
 		    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
@@ -754,8 +788,8 @@
 		/* advance tail */
 		r->tail = r->base + ((r->tail - r->base +
 			  sizeof(struct wil6210_mbox_ring_desc)) % r->size);
-		iowrite32(r->tail, wil->csr + HOST_MBOX +
-			  offsetof(struct wil6210_mbox_ctl, rx.tail));
+		wil_w(wil, RGF_MBOX +
+		      offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
 
 		/* add to the pending list */
 		spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -772,7 +806,7 @@
 	     u16 reply_id, void *reply, u8 reply_size, int to_msec)
 {
 	int rc;
-	int remain;
+	unsigned long remain;
 
 	mutex_lock(&wil->wmi_mutex);
 
@@ -988,12 +1022,21 @@
 
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
 {
+	static const char *const names[] = {
+		[WMI_FRAME_BEACON]	= "BEACON",
+		[WMI_FRAME_PROBE_REQ]	= "PROBE_REQ",
+		[WMI_FRAME_PROBE_RESP]	= "WMI_FRAME_PROBE_RESP",
+		[WMI_FRAME_ASSOC_REQ]	= "WMI_FRAME_ASSOC_REQ",
+		[WMI_FRAME_ASSOC_RESP]	= "WMI_FRAME_ASSOC_RESP",
+	};
 	int rc;
 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
 	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
 
-	if (!cmd)
-		return -ENOMEM;
+	if (!cmd) {
+		rc = -ENOMEM;
+		goto out;
+	}
 	if (!ie)
 		ie_len = 0;
 
@@ -1003,6 +1046,12 @@
 	memcpy(cmd->ie_info, ie, ie_len);
 	rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
 	kfree(cmd);
+out:
+	if (rc) {
+		const char *name = type < ARRAY_SIZE(names) ?
+				   names[type] : "??";
+		wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+	}
 
 	return rc;
 }
@@ -1129,15 +1178,42 @@
 
 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
 {
+	int rc;
+	u16 reason_code;
 	struct wmi_disconnect_sta_cmd cmd = {
 		.disconnect_reason = cpu_to_le16(reason),
 	};
+	struct {
+		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_disconnect_event evt;
+	} __packed reply;
 
 	ether_addr_copy(cmd.dst_mac, mac);
 
 	wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
 
-	return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+	rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
+		      WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+	/* failure to disconnect in reasonable time treated as FW error */
+	if (rc) {
+		wil_fw_error_recovery(wil);
+		return rc;
+	}
+
+	/* call event handler manually after processing wmi_call,
+	 * to avoid deadlock - disconnect event handler acquires wil->mutex
+	 * while it is already held here
+	 */
+	reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
+
+	wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+		    reply.evt.bssid, reason_code,
+		    reply.evt.disconnect_reason);
+
+	wil->sinfo_gen++;
+	wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+
+	return 0;
 }
 
 int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
@@ -1279,7 +1355,7 @@
 		/* search for handler */
 		if (!wmi_evt_call_handler(wil, id, evt_data,
 					  len - sizeof(*wmi))) {
-			wil_err(wil, "Unhandled event 0x%04x\n", id);
+			wil_info(wil, "Unhandled event 0x%04x\n", id);
 		}
 	} else {
 		wil_err(wil, "Unknown event type\n");
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 916123a..a335f94 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -929,8 +929,8 @@
 	b43_lo_write(dev, &cal->ctl);
 }
 
-/* Periodic LO maintanance work */
-void b43_lo_g_maintanance_work(struct b43_wldev *dev)
+/* Periodic LO maintenance work */
+void b43_lo_g_maintenance_work(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_g *gphy = phy->g;
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 3b27e20..7b4df38 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -80,7 +80,7 @@
 
 void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
 
-void b43_lo_g_maintanance_work(struct b43_wldev *dev);
+void b43_lo_g_maintenance_work(struct b43_wldev *dev);
 void b43_lo_g_cleanup(struct b43_wldev *dev);
 void b43_lo_g_init(struct b43_wldev *dev);
 
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 727ce6e..462310e 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -3004,7 +3004,7 @@
 		   phy->rev == 1) {
 		//TODO: implement rev1 workaround
 	}
-	b43_lo_g_maintanance_work(dev);
+	b43_lo_g_maintenance_work(dev);
 	b43_mac_enable(dev);
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index d86d1f1..a293275 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -469,6 +469,36 @@
 	return NULL;
 }
 
+static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
+				     struct brcmf_cfg80211_vif *vif,
+				     enum nl80211_iftype new_type)
+{
+	int iftype_num[NUM_NL80211_IFTYPES];
+	struct brcmf_cfg80211_vif *pos;
+
+	memset(&iftype_num[0], 0, sizeof(iftype_num));
+	list_for_each_entry(pos, &cfg->vif_list, list)
+		if (pos == vif)
+			iftype_num[new_type]++;
+		else
+			iftype_num[pos->wdev.iftype]++;
+
+	return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
+
+static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
+				  enum nl80211_iftype new_type)
+{
+	int iftype_num[NUM_NL80211_IFTYPES];
+	struct brcmf_cfg80211_vif *pos;
+
+	memset(&iftype_num[0], 0, sizeof(iftype_num));
+	list_for_each_entry(pos, &cfg->vif_list, list)
+		iftype_num[pos->wdev.iftype]++;
+
+	iftype_num[new_type]++;
+	return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
 
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
 				 struct brcmf_wsec_key_le *key_le)
@@ -663,8 +693,14 @@
 						     struct vif_params *params)
 {
 	struct wireless_dev *wdev;
+	int err;
 
 	brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+	err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type);
+	if (err) {
+		brcmf_err("iface validation failed: err=%d\n", err);
+		return ERR_PTR(err);
+	}
 	switch (type) {
 	case NL80211_IFTYPE_ADHOC:
 	case NL80211_IFTYPE_STATION:
@@ -823,8 +859,12 @@
 	s32 ap = 0;
 	s32 err = 0;
 
-	brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type);
-
+	brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
+	err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
+	if (err) {
+		brcmf_err("iface validation failed: err=%d\n", err);
+		return err;
+	}
 	switch (type) {
 	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_WDS:
@@ -5695,63 +5735,132 @@
 	}
 };
 
+/**
+ * brcmf_setup_ifmodes() - determine interface modes and combinations.
+ *
+ * @wiphy: wiphy object.
+ * @ifp: interface object needed for feat module api.
+ *
+ * The interface modes and combinations are determined dynamically here
+ * based on firmware functionality.
+ *
+ * no p2p and no mbss:
+ *
+ *	#STA <= 1, #AP <= 1, channels = 1, 2 total
+ *
+ * no p2p and mbss:
+ *
+ *	#STA <= 1, #AP <= 1, channels = 1, 2 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, no mchan, and mbss:
+ *
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
+ *	#STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, mchan, and mbss:
+ *
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
+ *	#STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ */
 static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
 	struct ieee80211_iface_combination *combo = NULL;
-	struct ieee80211_iface_limit *limits = NULL;
-	int i = 0, max_iface_cnt;
+	struct ieee80211_iface_limit *c0_limits = NULL;
+	struct ieee80211_iface_limit *p2p_limits = NULL;
+	struct ieee80211_iface_limit *mbss_limits = NULL;
+	bool mbss, p2p;
+	int i, c, n_combos;
 
-	combo = kzalloc(sizeof(*combo), GFP_KERNEL);
+	mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
+	p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+
+	n_combos = 1 + !!p2p + !!mbss;
+	combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
 	if (!combo)
 		goto err;
 
-	limits = kzalloc(sizeof(*limits) * 4, GFP_KERNEL);
-	if (!limits)
+	c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+	if (!c0_limits)
 		goto err;
 
+	if (p2p) {
+		p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
+		if (!p2p_limits)
+			goto err;
+	}
+
+	if (mbss) {
+		mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+		if (!mbss_limits)
+			goto err;
+	}
+
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				 BIT(NL80211_IFTYPE_ADHOC) |
 				 BIT(NL80211_IFTYPE_AP);
 
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
-		combo->num_different_channels = 2;
-	else
-		combo->num_different_channels = 1;
-
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
-		limits[i].max = 1;
-		limits[i++].types = BIT(NL80211_IFTYPE_STATION);
-		limits[i].max = 4;
-		limits[i++].types = BIT(NL80211_IFTYPE_AP);
-		max_iface_cnt = 5;
-	} else {
-		limits[i].max = 2;
-		limits[i++].types = BIT(NL80211_IFTYPE_STATION) |
-				    BIT(NL80211_IFTYPE_AP);
-		max_iface_cnt = 2;
-	}
-
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P)) {
+	c = 0;
+	i = 0;
+	combo[c].num_different_channels = 1;
+	c0_limits[i].max = 1;
+	c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+	if (p2p) {
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+			combo[c].num_different_channels = 2;
 		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
 					  BIT(NL80211_IFTYPE_P2P_GO) |
 					  BIT(NL80211_IFTYPE_P2P_DEVICE);
-		limits[i].max = 1;
-		limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
-				    BIT(NL80211_IFTYPE_P2P_GO);
-		limits[i].max = 1;
-		limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
-		max_iface_cnt += 2;
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				       BIT(NL80211_IFTYPE_P2P_GO);
+	} else {
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
 	}
-	combo->max_interfaces = max_iface_cnt;
-	combo->limits = limits;
-	combo->n_limits = i;
+	combo[c].max_interfaces = i;
+	combo[c].n_limits = i;
+	combo[c].limits = c0_limits;
 
+	if (p2p) {
+		c++;
+		i = 0;
+		combo[c].num_different_channels = 1;
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		combo[c].max_interfaces = i;
+		combo[c].n_limits = i;
+		combo[c].limits = p2p_limits;
+	}
+
+	if (mbss) {
+		c++;
+		combo[c].beacon_int_infra_match = true;
+		combo[c].num_different_channels = 1;
+		mbss_limits[0].max = 4;
+		mbss_limits[0].types = BIT(NL80211_IFTYPE_AP);
+		combo[c].max_interfaces = 4;
+		combo[c].n_limits = 1;
+		combo[c].limits = mbss_limits;
+	}
+	wiphy->n_iface_combinations = n_combos;
 	wiphy->iface_combinations = combo;
-	wiphy->n_iface_combinations = 1;
 	return 0;
 
 err:
-	kfree(limits);
+	kfree(c0_limits);
+	kfree(p2p_limits);
+	kfree(mbss_limits);
 	kfree(combo);
 	return -ENOMEM;
 }
@@ -5785,7 +5894,10 @@
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
+	struct brcmf_pub *drvr = ifp->drvr;
+	const struct ieee80211_iface_combination *combo;
 	struct ieee80211_supported_band *band;
+	u16 max_interfaces = 0;
 	__le32 bandlist[3];
 	u32 n_bands;
 	int err, i;
@@ -5798,6 +5910,24 @@
 	if (err)
 		return err;
 
+	for (i = 0, combo = wiphy->iface_combinations;
+	     i < wiphy->n_iface_combinations; i++, combo++) {
+		max_interfaces = max(max_interfaces, combo->max_interfaces);
+	}
+
+	for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses);
+	     i++) {
+		u8 *addr = drvr->addresses[i].addr;
+
+		memcpy(addr, drvr->mac, ETH_ALEN);
+		if (i) {
+			addr[0] |= BIT(1);
+			addr[ETH_ALEN - 1] ^= i;
+		}
+	}
+	wiphy->addresses = drvr->addresses;
+	wiphy->n_addresses = i;
+
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 	wiphy->cipher_suites = __wl_cipher_suites;
 	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
@@ -6059,11 +6189,15 @@
 
 static void brcmf_free_wiphy(struct wiphy *wiphy)
 {
+	int i;
+
 	if (!wiphy)
 		return;
 
-	if (wiphy->iface_combinations)
-		kfree(wiphy->iface_combinations->limits);
+	if (wiphy->iface_combinations) {
+		for (i = 0; i < wiphy->n_iface_combinations; i++)
+			kfree(wiphy->iface_combinations[i].limits);
+	}
 	kfree(wiphy->iface_combinations);
 	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
 		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index fd74a9c..7463041 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -21,6 +21,7 @@
 #ifndef BRCMFMAC_CORE_H
 #define BRCMFMAC_CORE_H
 
+#include <net/cfg80211.h>
 #include "fweh.h"
 
 #define TOE_TX_CSUM_OL		0x00000001
@@ -118,6 +119,8 @@
 	/* Multicast data packets sent to dongle */
 	unsigned long tx_multicast;
 
+	struct mac_address addresses[BRCMF_MAX_IFS];
+
 	struct brcmf_if *iflist[BRCMF_MAX_IFS];
 
 	struct mutex proto_block;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 743f16b..971920f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -19,6 +19,7 @@
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/bcm47xx_nvram.h>
 
 #include "debug.h"
 #include "firmware.h"
@@ -426,19 +427,33 @@
 	struct brcmf_fw *fwctx = ctx;
 	u32 nvram_length = 0;
 	void *nvram = NULL;
+	u8 *data = NULL;
+	size_t data_len;
+	bool raw_nvram;
 
 	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-	if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
-		goto fail;
-
-	if (fw) {
-		nvram = brcmf_fw_nvram_strip(fw->data, fw->size, &nvram_length,
-					     fwctx->domain_nr, fwctx->bus_nr);
-		release_firmware(fw);
-		if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+	if (fw && fw->data) {
+		data = (u8 *)fw->data;
+		data_len = fw->size;
+		raw_nvram = false;
+	} else {
+		data = bcm47xx_nvram_get_contents(&data_len);
+		if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
 			goto fail;
+		raw_nvram = true;
 	}
 
+	if (data)
+		nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
+					     fwctx->domain_nr, fwctx->bus_nr);
+
+	if (raw_nvram)
+		bcm47xx_nvram_release_contents(data);
+	if (fw)
+		release_firmware(fw);
+	if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+		goto fail;
+
 	fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
 	kfree(fwctx);
 	return;
@@ -473,15 +488,9 @@
 	if (!ret)
 		return;
 
-	/* when nvram is optional call .done() callback here */
-	if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
-		fwctx->done(fwctx->dev, fw, NULL, 0);
-		kfree(fwctx);
-		return;
-	}
+	brcmf_fw_request_nvram_done(NULL, fwctx);
+	return;
 
-	/* failed nvram request */
-	release_firmware(fw);
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
 	device_release_driver(fwctx->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 5944063..8d1ab4a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -194,11 +194,15 @@
 	spin_lock_irqsave(&flow->block_lock, flags);
 
 	ring = flow->rings[flowid];
+	if (ring->blocked == blocked) {
+		spin_unlock_irqrestore(&flow->block_lock, flags);
+		return;
+	}
 	ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 
 	currently_blocked = false;
 	for (i = 0; i < flow->nrofrings; i++) {
-		if (flow->rings[i]) {
+		if ((flow->rings[i]) && (i != flowid)) {
 			ring = flow->rings[i];
 			if ((ring->status == RING_OPEN) &&
 			    (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
@@ -209,8 +213,8 @@
 			}
 		}
 	}
-	ring->blocked = blocked;
-	if (currently_blocked == blocked) {
+	flow->rings[flowid]->blocked = blocked;
+	if (currently_blocked) {
 		spin_unlock_irqrestore(&flow->block_lock, flags);
 		return;
 	}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index cbf033f..1326898 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -85,7 +85,6 @@
 	BRCMF_ENUM_DEF(IF, 54) \
 	BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
 	BRCMF_ENUM_DEF(RSSI, 56) \
-	BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
 	BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
 	BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
 	BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
@@ -103,8 +102,7 @@
 	BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
 	BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
 	BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
-	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
-	BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
+	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
 
 #define BRCMF_ENUM_DEF(id, val) \
 	BRCMF_E_##id = (val),
@@ -112,7 +110,11 @@
 /* firmware event codes sent by the dongle */
 enum brcmf_fweh_event_code {
 	BRCMF_FWEH_EVENT_ENUM_DEFLIST
-	BRCMF_E_LAST
+	/* this determines event mask length which must match
+	 * minimum length check in device firmware so it is
+	 * hard-coded here.
+	 */
+	BRCMF_E_LAST = 139
 };
 #undef BRCMF_ENUM_DEF
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 898c380..7b2136c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -1360,6 +1360,60 @@
 	}
 }
 
+#ifdef DEBUG
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_commonring *commonring;
+	u16 i;
+	struct brcmf_flowring_ring *ring;
+	struct brcmf_flowring_hash *hash;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+	seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+	seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+	seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+	seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+
+	seq_printf(seq, "\nh2d_flowrings: depth %u\n",
+		   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+	seq_puts(seq, "Active flowrings:\n");
+	hash = msgbuf->flow->hash;
+	for (i = 0; i < msgbuf->flow->nrofrings; i++) {
+		if (!msgbuf->flow->rings[i])
+			continue;
+		ring = msgbuf->flow->rings[i];
+		if (ring->status != RING_OPEN)
+			continue;
+		commonring = msgbuf->flowrings[i];
+		hash = &msgbuf->flow->hash[ring->hash_id];
+		seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
+				"        ifidx %u, fifo %u, da %pM\n",
+				i, commonring->r_ptr, commonring->w_ptr,
+				skb_queue_len(&ring->skblist), ring->blocked,
+				hash->ifidx, hash->fifo, hash->mac);
+	}
+
+	return 0;
+}
+#else
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif
 
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
 {
@@ -1460,6 +1514,8 @@
 	spin_lock_init(&msgbuf->flowring_work_lock);
 	INIT_LIST_HEAD(&msgbuf->work_queue);
 
+	brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
+
 	return 0;
 
 fail:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index d36f5f3..f990e3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -2564,15 +2564,6 @@
 	}
 }
 
-static void atomic_orr(int val, atomic_t *v)
-{
-	int old_val;
-
-	old_val = atomic_read(v);
-	while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
-		old_val = atomic_read(v);
-}
-
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
 	struct brcmf_core *buscore;
@@ -2595,7 +2586,7 @@
 	if (val) {
 		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
 		bus->sdcnt.f1regdata++;
-		atomic_orr(val, &bus->intstatus);
+		atomic_or(val, &bus->intstatus);
 	}
 
 	return ret;
@@ -2712,7 +2703,7 @@
 
 	/* Keep still-pending events for next scheduling */
 	if (intstatus)
-		atomic_orr(intstatus, &bus->intstatus);
+		atomic_or(intstatus, &bus->intstatus);
 
 	brcmf_sdio_clrintr(bus);
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index ab775a5..d2c5747 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1472,9 +1472,7 @@
 	wl->timers = t;
 
 #ifdef DEBUG
-	t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
-	if (t->name)
-		strcpy(t->name, name);
+	t->name = kstrdup(name, GFP_ATOMIC);
 #endif
 
 	return t;
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 7603546..29185ae 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -467,7 +467,6 @@
 	.remove		= cw1200_spi_disconnect,
 	.driver = {
 		.name		= "cw1200_wlan_spi",
-		.bus            = &spi_bus_type,
 		.owner          = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm		= &cw1200_pm_ops,
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 01de1a3..80d4228 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -865,7 +865,7 @@
 
 	switch(type) {
 	case HOSTAP_INTERFACE_AP:
-		dev->tx_queue_len = 0;	/* use main radio device queue */
+		dev->priv_flags |= IFF_NO_QUEUE;	/* use main radio device queue */
 		dev->netdev_ops = &hostap_mgmt_netdev_ops;
 		dev->type = ARPHRD_IEEE80211;
 		dev->header_ops = &hostap_80211_ops;
@@ -874,7 +874,7 @@
 		dev->netdev_ops = &hostap_master_ops;
 		break;
 	default:
-		dev->tx_queue_len = 0;	/* use main radio device queue */
+		dev->priv_flags |= IFF_NO_QUEUE;	/* use main radio device queue */
 		dev->netdev_ops = &hostap_netdev_ops;
 	}
 
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 08eb229..36818c7 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1410,7 +1410,7 @@
 static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
 {
 
-#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
 
 	struct host_command cmd = {
 		.host_command = CARD_DISABLE_PHY_OFF,
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 7f4cb69..af1b3e6 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3259,7 +3259,7 @@
 
 	while (size && PAGE_SIZE - len) {
 		hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-				   PAGE_SIZE - len, 1);
+				   PAGE_SIZE - len, true);
 		len = strlen(buf);
 		if (PAGE_SIZE - len)
 			buf[len++] = '\n';
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index 3440101..908b9f4 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -515,12 +515,8 @@
 	    scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
 		      eeprom_ver);
 	for (ofs = 0; ofs < eeprom_len; ofs += 16) {
-		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
-				   buf_size - pos, 0);
-		pos += strlen(buf + pos);
-		if (buf_size - pos > 0)
-			buf[pos++] = '\n';
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index c160dad..991def8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -122,9 +122,8 @@
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
-				 struct iwl_rx_cmd_buffer *rxb,
-				 struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 
@@ -216,11 +215,9 @@
 		       struct ieee80211_sta *sta, u16 tid);
 int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-				   struct iwl_rx_cmd_buffer *rxb,
-				   struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 static inline u32 iwl_tx_status_to_mac80211(u32 status)
 {
@@ -277,9 +274,6 @@
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd);
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -332,8 +326,7 @@
 
 int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		      struct ieee80211_sta *sta);
 
@@ -480,7 +473,7 @@
 } while (0)
 #endif				/* CONFIG_IWLWIFI_DEBUG */
 
-extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1];
 
 static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
 {
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 0ffb6ff..b15e44f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -310,12 +310,8 @@
 	pos += scnprintf(buf + pos, buf_size - pos,
 			 "NVM version: 0x%x\n", nvm_ver);
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-				   buf_size - pos, 0);
-		pos += strlen(buf + pos);
-		if (buf_size - pos > 0)
-			buf[pos++] = '\n';
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3811878..0ba3e56 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -669,6 +669,8 @@
 	/* ieee device used by generic ieee processing code */
 	struct ieee80211_hw *hw;
 
+	struct napi_struct *napi;
+
 	struct list_head calib_results;
 
 	struct workqueue_struct *workqueue;
@@ -678,9 +680,8 @@
 	enum ieee80211_band band;
 	u8 valid_contexts;
 
-	int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-				       struct iwl_rx_cmd_buffer *rxb,
-				       struct iwl_device_cmd *cmd);
+	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb);
 
 	struct iwl_notif_wait_data notif_wait;
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 1d2223d..ab45819 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -659,9 +659,8 @@
 	return need_update;
 }
 
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+					 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@@ -669,7 +668,7 @@
 
 	if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
 		/* bt coex disabled */
-		return 0;
+		return;
 	}
 
 	IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -714,7 +713,6 @@
 	/* FIXME: based on notification, adjust the prio_boost */
 
 	priv->bt_ci_compliance = coex->bt_ci_compliance;
-	return 0;
 }
 
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 7acaa26..453f7c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -250,12 +250,24 @@
 		}
 	}
 
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+		goto error;
+	}
+
 	ret = iwl_run_init_ucode(priv);
 	if (ret) {
 		IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
 		goto error;
 	}
 
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+		goto error;
+	}
+
 	ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
 	if (ret) {
 		IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@@ -432,7 +444,7 @@
 		u32 error_id;
 	} err_info;
 	struct iwl_notification_wait status_wait;
-	static const u8 status_cmd[] = {
+	static const u16 status_cmd[] = {
 		REPLY_WOWLAN_GET_STATUS,
 	};
 	struct iwlagn_wowlan_status status_data = {};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 234e30f..e7616f0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -2029,17 +2029,6 @@
 	return false;
 }
 
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
-			 struct napi_struct *napi,
-			 struct net_device *napi_dev,
-			 int (*poll)(struct napi_struct *, int),
-			 int weight)
-{
-	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-	ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
 	.start = iwl_op_mode_dvm_start,
 	.stop = iwl_op_mode_dvm_stop,
@@ -2052,7 +2041,6 @@
 	.cmd_queue_full = iwl_cmd_queue_full,
 	.nic_config = iwl_nic_config,
 	.wimax_active = iwl_wimax_active,
-	.napi_add = iwl_napi_add,
 };
 
 /*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 3bd7c86..cef921c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1416,11 +1416,11 @@
 /*
  * Try to switch to new modulation mode from legacy
  */
-static int rs_move_legacy_other(struct iwl_priv *priv,
-				struct iwl_lq_sta *lq_sta,
-				struct ieee80211_conf *conf,
-				struct ieee80211_sta *sta,
-				int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+				 struct iwl_lq_sta *lq_sta,
+				 struct ieee80211_conf *conf,
+				 struct ieee80211_sta *sta,
+				 int index)
 {
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct iwl_scale_tbl_info *search_tbl =
@@ -1575,7 +1575,7 @@
 
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
 
 out:
 	lq_sta->search_better_tbl = 1;
@@ -1584,17 +1584,15 @@
 		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
-	return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from SISO
  */
-static int rs_move_siso_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+				  struct iwl_lq_sta *lq_sta,
+				  struct ieee80211_conf *conf,
+				  struct ieee80211_sta *sta, int index)
 {
 	u8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1747,7 +1745,7 @@
 			break;
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
 
  out:
 	lq_sta->search_better_tbl = 1;
@@ -1756,17 +1754,15 @@
 		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
-
-	return 0;
 }
 
 /*
  * Try to switch to new modulation mode from MIMO2
  */
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+				   struct iwl_lq_sta *lq_sta,
+				   struct ieee80211_conf *conf,
+				   struct ieee80211_sta *sta, int index)
 {
 	s8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1917,7 +1913,7 @@
 			break;
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
  out:
 	lq_sta->search_better_tbl = 1;
 	tbl->action++;
@@ -1926,17 +1922,15 @@
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
 
-	return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from MIMO3
  */
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+				   struct iwl_lq_sta *lq_sta,
+				   struct ieee80211_conf *conf,
+				   struct ieee80211_sta *sta, int index)
 {
 	s8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -2093,7 +2087,7 @@
 			break;
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
  out:
 	lq_sta->search_better_tbl = 1;
 	tbl->action++;
@@ -2101,9 +2095,6 @@
 		tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
-
-	return 0;
-
 }
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index debec96..4a45b0b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -39,7 +39,7 @@
 
 #define IWL_CMD_ENTRY(x) [x] = #x
 
-const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = {
 	IWL_CMD_ENTRY(REPLY_ALIVE),
 	IWL_CMD_ENTRY(REPLY_ERROR),
 	IWL_CMD_ENTRY(REPLY_ECHO),
@@ -123,9 +123,8 @@
  *
  ******************************************************************************/
 
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+				  struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -136,11 +135,9 @@
 		err_resp->cmd_id,
 		le16_to_cpu(err_resp->bad_cmd_seq_num),
 		le32_to_cpu(err_resp->error_info));
-	return 0;
 }
 
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_csa_notification *csa = (void *)pkt->data;
@@ -152,7 +149,7 @@
 	struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
 
 	if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-		return 0;
+		return;
 
 	if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
 		rxon->channel = csa->channel;
@@ -165,13 +162,11 @@
 			le16_to_cpu(csa->channel));
 		iwl_chswitch_done(priv, false);
 	}
-	return 0;
 }
 
 
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
-					  struct iwl_rx_cmd_buffer *rxb,
-					  struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+					     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_spectrum_notification *report = (void *)pkt->data;
@@ -179,17 +174,15 @@
 	if (!report->state) {
 		IWL_DEBUG_11H(priv,
 			"Spectrum Measure Notification: Start\n");
-		return 0;
+		return;
 	}
 
 	memcpy(&priv->measure_report, report, sizeof(*report));
 	priv->measurement_status |= MEASUREMENT_READY;
-	return 0;
 }
 
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+				     struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -197,24 +190,20 @@
 	IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
 		     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
 #endif
-	return 0;
 }
 
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+						struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 __maybe_unused len = iwl_rx_packet_len(pkt);
 	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
 			"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
 	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
-	return 0;
 }
 
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@@ -232,8 +221,6 @@
 #endif
 
 	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-	return 0;
 }
 
 /**
@@ -448,9 +435,8 @@
 }
 #endif
 
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_cmd_buffer *rxb,
-			      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	unsigned long stamp = jiffies;
 	const int reg_recalib_period = 60;
@@ -505,7 +491,7 @@
 			  len, sizeof(struct iwl_bt_notif_statistics),
 			  sizeof(struct iwl_notif_statistics));
 		spin_unlock(&priv->statistics.lock);
-		return 0;
+		return;
 	}
 
 	change = common->temperature != priv->statistics.common.temperature ||
@@ -550,13 +536,10 @@
 		priv->lib->temperature(priv);
 
 	spin_unlock(&priv->statistics.lock);
-
-	return 0;
 }
 
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_notif_statistics *stats = (void *)pkt->data;
@@ -572,15 +555,14 @@
 #endif
 		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
 	}
-	iwlagn_rx_statistics(priv, rxb, cmd);
-	return 0;
+
+	iwlagn_rx_statistics(priv, rxb);
 }
 
 /* Handle notification from uCode that card's power state is changing
  * due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -627,12 +609,10 @@
 	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
 		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
 			test_bit(STATUS_RF_KILL_HW, &priv->status));
-	return 0;
 }
 
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
-				       struct iwl_rx_cmd_buffer *rxb,
-				       struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+					  struct iwl_rx_cmd_buffer *rxb)
 
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -649,14 +629,12 @@
 		if (!test_bit(STATUS_SCANNING, &priv->status))
 			iwl_init_sensitivity(priv);
 	}
-	return 0;
 }
 
 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -664,7 +642,6 @@
 	priv->ampdu_ref++;
 	memcpy(&priv->last_phy_res, pkt->data,
 	       sizeof(struct iwl_rx_phy_res));
-	return 0;
 }
 
 /*
@@ -786,7 +763,7 @@
 
 	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-	ieee80211_rx(priv->hw, skb);
+	ieee80211_rx_napi(priv->hw, skb, priv->napi);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -890,9 +867,8 @@
 }
 
 /* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+			       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct ieee80211_hdr *header;
 	struct ieee80211_rx_status rx_status = {};
@@ -906,7 +882,7 @@
 
 	if (!priv->last_phy_res_valid) {
 		IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-		return 0;
+		return;
 	}
 	phy_res = &priv->last_phy_res;
 	amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -919,14 +895,14 @@
 	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
 		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
 				phy_res->cfg_phy_cnt);
-		return 0;
+		return;
 	}
 
 	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
 	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
 		IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
 				le32_to_cpu(rx_pkt_status));
-		return 0;
+		return;
 	}
 
 	/* This will be used in several places later */
@@ -998,12 +974,10 @@
 
 	iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
 				    rxb, &rx_status);
-	return 0;
 }
 
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_wipan_noa_data *new_data, *old_data;
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1041,8 +1015,6 @@
 
 	if (old_data)
 		kfree_rcu(old_data, rcu_head);
-
-	return 0;
 }
 
 /**
@@ -1053,8 +1025,7 @@
  */
 void iwl_setup_rx_handlers(struct iwl_priv *priv)
 {
-	int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+	void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 	handlers = priv->rx_handlers;
 
@@ -1102,12 +1073,11 @@
 		iwlagn_bt_rx_handler_setup(priv);
 }
 
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-		    struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-	int err = 0;
 
 	/*
 	 * Do the notification wait before RX handlers so
@@ -1121,12 +1091,11 @@
 	 *   rx_handlers table.  See iwl_setup_rx_handlers() */
 	if (priv->rx_handlers[pkt->hdr.cmd]) {
 		priv->rx_handlers_stats[pkt->hdr.cmd]++;
-		err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+		priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
 	} else {
 		/* No handling needed */
 		IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
 			     iwl_dvm_get_cmd_string(pkt->hdr.cmd),
 			     pkt->hdr.cmd);
 	}
-	return err;
 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6..85ceceb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -123,7 +124,7 @@
 	__le32 old_filter = send->filter_flags;
 	u8 old_dev_type = send->dev_type;
 	int ret;
-	static const u8 deactivate_cmd[] = {
+	static const u16 deactivate_cmd[] = {
 		REPLY_WIPAN_DEACTIVATION_COMPLETE
 	};
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 43bef90..6481594 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -247,9 +247,8 @@
 }
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
-			      struct iwl_rx_cmd_buffer *rxb,
-			      struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+			      struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -257,13 +256,11 @@
 
 	IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
 #endif
-	return 0;
 }
 
 /* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+				    struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanstart_notification *notif = (void *)pkt->data;
@@ -277,14 +274,11 @@
 		       le32_to_cpu(notif->tsf_high),
 		       le32_to_cpu(notif->tsf_low),
 		       notif->status, notif->beacon_timer);
-
-	return 0;
 }
 
 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+				      struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -303,13 +297,11 @@
 		       le32_to_cpu(notif->statistics[0]),
 		       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
 #endif
-	return 0;
 }
 
 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
-				       struct iwl_rx_cmd_buffer *rxb,
-				       struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@@ -356,7 +348,6 @@
 		queue_work(priv->workqueue,
 			   &priv->bt_traffic_change_work);
 	}
-	return 0;
 }
 
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 6ec86ad..0fa67d3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -60,41 +60,28 @@
 	return 0;
 }
 
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
-				    struct iwl_addsta_cmd *addsta,
-				    struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+				     struct iwl_rx_packet *pkt)
 {
 	struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
-	u8 sta_id = addsta->sta.sta_id;
-	int ret = -EIO;
 
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			pkt->hdr.flags);
-		return ret;
-	}
-
-	IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
-		       sta_id);
+	IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
 
 	spin_lock_bh(&priv->sta_lock);
 
 	switch (add_sta_resp->status) {
 	case ADD_STA_SUCCESS_MSK:
 		IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-		ret = iwl_sta_ucode_activate(priv, sta_id);
 		break;
 	case ADD_STA_NO_ROOM_IN_TABLE:
-		IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
-			sta_id);
+		IWL_ERR(priv, "Adding station failed, no room in table.\n");
 		break;
 	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-		IWL_ERR(priv, "Adding station %d failed, no block ack "
-			"resource.\n", sta_id);
+		IWL_ERR(priv,
+			"Adding station failed, no block ack resource.\n");
 		break;
 	case ADD_STA_MODIFY_NON_EXIST_STA:
-		IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
-			sta_id);
+		IWL_ERR(priv, "Attempting to modify non-existing station\n");
 		break;
 	default:
 		IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@@ -102,37 +89,14 @@
 		break;
 	}
 
-	IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-		       priv->stations[sta_id].sta.mode ==
-		       STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-		       sta_id, priv->stations[sta_id].sta.sta.addr);
-
-	/*
-	 * XXX: The MAC address in the command buffer is often changed from
-	 * the original sent to the device. That is, the MAC address
-	 * written to the command buffer often is not the same MAC address
-	 * read from the command buffer when the command returns. This
-	 * issue has not yet been resolved and this debugging is left to
-	 * observe the problem.
-	 */
-	IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-		       priv->stations[sta_id].sta.mode ==
-		       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
-		       addsta->sta.addr);
 	spin_unlock_bh(&priv->sta_lock);
-
-	return ret;
 }
 
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
-	if (!cmd)
-		return 0;
-
-	return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+	iwl_process_add_sta_resp(priv, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -146,6 +110,8 @@
 		.len = { sizeof(*sta), },
 	};
 	u8 sta_id __maybe_unused = sta->sta.sta_id;
+	struct iwl_rx_packet *pkt;
+	struct iwl_add_sta_resp *add_sta_resp;
 
 	IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
 		       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
@@ -159,16 +125,22 @@
 
 	if (ret || (flags & CMD_ASYNC))
 		return ret;
-	/*else the command was successfully sent in SYNC mode, need to free
-	 * the reply page */
+
+	pkt = cmd.resp_pkt;
+	add_sta_resp = (void *)pkt->data;
+
+	/* debug messages are printed in the handler */
+	if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+		spin_lock_bh(&priv->sta_lock);
+		ret = iwl_sta_ucode_activate(priv, sta_id);
+		spin_unlock_bh(&priv->sta_lock);
+	} else {
+		ret = -EIO;
+	}
 
 	iwl_free_resp(&cmd);
 
-	if (cmd.handler_status)
-		IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
-			cmd.handler_status);
-
-	return cmd.handler_status;
+	return ret;
 }
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@@ -452,6 +424,7 @@
 	struct iwl_rx_packet *pkt;
 	int ret;
 	struct iwl_rem_sta_cmd rm_sta_cmd;
+	struct iwl_rem_sta_resp *rem_sta_resp;
 
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_REMOVE_STA,
@@ -471,29 +444,23 @@
 		return ret;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-			  pkt->hdr.flags);
+	rem_sta_resp = (void *)pkt->data;
+
+	switch (rem_sta_resp->status) {
+	case REM_STA_SUCCESS_MSK:
+		if (!temporary) {
+			spin_lock_bh(&priv->sta_lock);
+			iwl_sta_ucode_deactivate(priv, sta_id);
+			spin_unlock_bh(&priv->sta_lock);
+		}
+		IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+		break;
+	default:
 		ret = -EIO;
+		IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+		break;
 	}
 
-	if (!ret) {
-		struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
-		switch (rem_sta_resp->status) {
-		case REM_STA_SUCCESS_MSK:
-			if (!temporary) {
-				spin_lock_bh(&priv->sta_lock);
-				iwl_sta_ucode_deactivate(priv, sta_id);
-				spin_unlock_bh(&priv->sta_lock);
-			}
-			IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
-			break;
-		default:
-			ret = -EIO;
-			IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
-			break;
-		}
-	}
 	iwl_free_resp(&cmd);
 
 	return ret;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 275df12..bddd197 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1128,8 +1128,7 @@
 	}
 }
 
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1273,8 +1272,6 @@
 		skb = __skb_dequeue(&skbs);
 		ieee80211_tx_status(priv->hw, skb);
 	}
-
-	return 0;
 }
 
 /**
@@ -1283,9 +1280,8 @@
  * Handles block-acknowledge notification from device, which reports success
  * of frames sent via aggregation.
  */
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-				   struct iwl_rx_cmd_buffer *rxb,
-				   struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@@ -1306,7 +1302,7 @@
 	if (scd_flow >= priv->cfg->base_params->num_of_queues) {
 		IWL_ERR(priv,
 			"BUG_ON scd_flow is bigger than number of queues\n");
-		return 0;
+		return;
 	}
 
 	sta_id = ba_resp->sta_id;
@@ -1319,7 +1315,7 @@
 		if (unlikely(ba_resp->bitmap))
 			IWL_ERR(priv, "Received BA when not expected\n");
 		spin_unlock_bh(&priv->sta_lock);
-		return 0;
+		return;
 	}
 
 	if (unlikely(scd_flow != agg->txq_id)) {
@@ -1333,7 +1329,7 @@
 				    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
 				    scd_flow, sta_id, tid, agg->txq_id);
 		spin_unlock_bh(&priv->sta_lock);
-		return 0;
+		return;
 	}
 
 	__skb_queue_head_init(&reclaimed_skbs);
@@ -1413,6 +1409,4 @@
 		skb = __skb_dequeue(&reclaimed_skbs);
 		ieee80211_tx_status(priv->hw, skb);
 	}
-
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 5244e43..931a8e4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -3,6 +3,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -327,7 +328,7 @@
 	const struct fw_img *fw;
 	int ret;
 	enum iwl_ucode_type old_type;
-	static const u8 alive_cmd[] = { REPLY_ALIVE };
+	static const u16 alive_cmd[] = { REPLY_ALIVE };
 
 	fw = iwl_get_ucode_image(priv, ucode_type);
 	if (WARN_ON(!fw))
@@ -406,7 +407,7 @@
 int iwl_run_init_ucode(struct iwl_priv *priv)
 {
 	struct iwl_notification_wait calib_wait;
-	static const u8 calib_complete[] = {
+	static const u16 calib_complete[] = {
 		CALIBRATION_RES_NOTIFICATION,
 		CALIBRATION_COMPLETE_NOTIFICATION
 	};
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index cc35f79..6951aba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,14 +69,14 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX	15
+#define IWL7260_UCODE_API_MAX	17
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK	12
 #define IWL3165_UCODE_API_OK	13
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN	10
+#define IWL7260_UCODE_API_MIN	12
 #define IWL3165_UCODE_API_MIN	13
 
 /* NVM versions */
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 72040cd..197abe4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,13 +69,13 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX	15
+#define IWL8000_UCODE_API_MAX	17
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK	12
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN	10
+#define IWL8000_UCODE_API_MIN	12
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION		0x0a1d
@@ -97,8 +97,9 @@
 #define DEFAULT_NVM_FILE_FAMILY_8000B		"nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000C		"nvmData-8000C"
 
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO	28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO	21
+#define MAX_TX_AGG_SIZE_8260_SDIO	40
 
 /* Max A-MPDU exponent for HT and VHT */
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO	IEEE80211_HT_MAX_AMPDU_32K
@@ -154,6 +155,7 @@
 	.led_mode = IWL_LED_RF_STATE,					\
 	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,		\
 	.d0i3 = true,							\
+	.features = NETIF_F_RXCSUM,					\
 	.non_shared_ant = ANT_A,					\
 	.dccm_offset = IWL8260_DCCM_OFFSET,				\
 	.dccm_len = IWL8260_DCCM_LEN,					\
@@ -203,6 +205,7 @@
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+	.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
 	.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -216,6 +219,7 @@
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+	.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
 	.bt_shared_single_ant = true,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 08c14af..939fa22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -297,6 +297,7 @@
  *	mode set
  * @d0i3: device uses d0i3 instead of d3
  * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
  * @pwr_tx_backoffs: translation table between power limits and backoffs
  * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
  * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@@ -348,6 +349,7 @@
 	bool no_power_up_nic_in_init;
 	const char *default_nvm_file_B_step;
 	const char *default_nvm_file_C_step;
+	netdev_features_t features;
 	unsigned int max_rx_agg_size;
 	bool disable_dummy_notification;
 	unsigned int max_tx_agg_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index faa17f2..543abea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -200,6 +200,7 @@
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING       (1 << 24) /* SDIO PAGING */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
@@ -210,6 +211,7 @@
 				 CSR_INT_BIT_HW_ERR  | \
 				 CSR_INT_BIT_FH_TX   | \
 				 CSR_INT_BIT_SW_ERR  | \
+				 CSR_INT_BIT_PAGING  | \
 				 CSR_INT_BIT_RF_KILL | \
 				 CSR_INT_BIT_SW_RX   | \
 				 CSR_INT_BIT_WAKEUP  | \
@@ -422,6 +424,7 @@
 
 /* DRAM INT TABLE */
 #define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER	(1 << 28)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
index 04e6649..71a78ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
@@ -35,8 +35,8 @@
 TRACE_EVENT(iwlwifi_dev_tx_data,
 	TP_PROTO(const struct device *dev,
 		 struct sk_buff *skb,
-		 void *data, size_t data_len),
-	TP_ARGS(dev, skb, data, data_len),
+		 u8 hdr_len, size_t data_len),
+	TP_ARGS(dev, skb, hdr_len, data_len),
 	TP_STRUCT__entry(
 		DEV_ENTRY
 
@@ -45,7 +45,8 @@
 	TP_fast_assign(
 		DEV_ASSIGN;
 		if (iwl_trace_data(skb))
-			memcpy(__get_dynamic_array(data), data, data_len);
+			skb_copy_bits(skb, hdr_len,
+				      __get_dynamic_array(data), data_len);
 	),
 	TP_printk("[%s] TX frame data", __get_str(dev))
 );
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
index 948ce08..eb4b99a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -36,7 +36,7 @@
 TRACE_EVENT(iwlwifi_dev_hcmd,
 	TP_PROTO(const struct device *dev,
 		 struct iwl_host_cmd *cmd, u16 total_size,
-		 struct iwl_cmd_header *hdr),
+		 struct iwl_cmd_header_wide *hdr),
 	TP_ARGS(dev, cmd, total_size, hdr),
 	TP_STRUCT__entry(
 		DEV_ENTRY
@@ -44,11 +44,14 @@
 		__field(u32, flags)
 	),
 	TP_fast_assign(
-		int i, offset = sizeof(*hdr);
+		int i, offset = sizeof(struct iwl_cmd_header);
+
+		if (hdr->group_id)
+			offset = sizeof(struct iwl_cmd_header_wide);
 
 		DEV_ASSIGN;
 		__entry->flags = cmd->flags;
-		memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+		memcpy(__get_dynamic_array(hcmd), hdr, offset);
 
 		for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 			if (!cmd->len[i])
@@ -58,8 +61,9 @@
 			offset += cmd->len[i];
 		}
 	),
-	TP_printk("[%s] hcmd %#.2x (%ssync)",
-		  __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+	TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+		  __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+		  ((u8 *)__get_dynamic_array(hcmd))[0],
 		  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6685259..a86aa5b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -372,6 +372,30 @@
 	return 0;
 }
 
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+				const u32 len)
+{
+	struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+	struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+	if (len < sizeof(*fw_capa))
+		return -EINVAL;
+
+	capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+	capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+	capa->max_ap_cache_per_scan =
+		le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+	capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+	capa->max_scan_reporting_threshold =
+		le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+	capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+	capa->max_significant_change_aps =
+		le32_to_cpu(fw_capa->max_significant_change_aps);
+	capa->max_bssid_history_entries =
+		le32_to_cpu(fw_capa->max_bssid_history_entries);
+	return 0;
+}
+
 /*
  * Gets uCode section from tlv.
  */
@@ -573,13 +597,15 @@
 	size_t len = ucode_raw->size;
 	const u8 *data;
 	u32 tlv_len;
+	u32 usniffer_img;
 	enum iwl_ucode_tlv_type tlv_type;
 	const u8 *tlv_data;
 	char buildstr[25];
-	u32 build;
+	u32 build, paging_mem_size;
 	int num_of_cpus;
 	bool usniffer_images = false;
 	bool usniffer_req = false;
+	bool gscan_capa = false;
 
 	if (len < sizeof(*ucode)) {
 		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@
 					    IWL_UCODE_REGULAR_USNIFFER,
 					    tlv_len);
 			break;
+		case IWL_UCODE_TLV_PAGING:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+			IWL_DEBUG_FW(drv,
+				     "Paging: paging enabled (size = %u bytes)\n",
+				     paging_mem_size);
+
+			if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+				IWL_ERR(drv,
+					"Paging: driver supports up to %lu bytes for paging image\n",
+					MAX_PAGING_IMAGE_SIZE);
+				return -EINVAL;
+			}
+
+			if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+				IWL_ERR(drv,
+					"Paging: image isn't multiple %lu\n",
+					FW_PAGING_SIZE);
+				return -EINVAL;
+			}
+
+			drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+				paging_mem_size;
+			usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+			drv->fw.img[usniffer_img].paging_mem_size =
+				paging_mem_size;
+			break;
 		case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
 			if (tlv_len != sizeof(u32))
 				goto invalid_tlv_len;
 			drv->fw.sdio_adma_addr =
 				le32_to_cpup((__le32 *)tlv_data);
 			break;
+		case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+			if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+				goto invalid_tlv_len;
+			gscan_capa = true;
+			break;
 		default:
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			break;
@@ -979,6 +1039,16 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * If ucode advertises that it supports GSCAN but GSCAN
+	 * capabilities TLV is not present, warn and continue without GSCAN.
+	 */
+	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+	    WARN(!gscan_capa,
+		 "GSCAN is supported but capabilities TLV is unavailable\n"))
+		__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+			    capa->_capa);
+
 	return 0;
 
  invalid_tlv_len:
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 21302b6..acc3d18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -713,12 +713,12 @@
 	struct ieee80211_channel *chan = &data->channels[0];
 	int n = 0, idx = 0;
 
-	while (chan->band != band && idx < n_channels)
+	while (idx < n_channels && chan->band != band)
 		chan = &data->channels[++idx];
 
 	sband->channels = &data->channels[idx];
 
-	while (chan->band == band && idx < n_channels) {
+	while (idx < n_channels && chan->band == band) {
 		chan = &data->channels[++idx];
 		n++;
 	}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d45dc02..d560648 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,12 +438,6 @@
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  * 	host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index e57dbd0..af5b320 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -84,6 +84,8 @@
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  *	Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *	&struct iwl_fw_error_dump_rb
  */
 enum iwl_fw_error_dump_type {
 	/* 0 is deprecated */
@@ -97,6 +99,7 @@
 	IWL_FW_ERROR_DUMP_FH_REGS = 8,
 	IWL_FW_ERROR_DUMP_MEM = 9,
 	IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+	IWL_FW_ERROR_DUMP_RB = 11,
 
 	IWL_FW_ERROR_DUMP_MAX,
 };
@@ -223,6 +226,20 @@
 };
 
 /**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+	__le32 index;
+	__le32 rxq;
+	__le32 reserved;
+	u8 data[];
+};
+
+/**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
  * Returns: next data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index a9b5ae4..84653e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -132,12 +132,14 @@
 	IWL_UCODE_TLV_API_CHANGES_SET	= 29,
 	IWL_UCODE_TLV_ENABLED_CAPABILITIES	= 30,
 	IWL_UCODE_TLV_N_SCAN_CHANNELS		= 31,
+	IWL_UCODE_TLV_PAGING		= 32,
 	IWL_UCODE_TLV_SEC_RT_USNIFFER	= 34,
 	IWL_UCODE_TLV_SDIO_ADMA_ADDR	= 35,
 	IWL_UCODE_TLV_FW_VERSION	= 36,
 	IWL_UCODE_TLV_FW_DBG_DEST	= 38,
 	IWL_UCODE_TLV_FW_DBG_CONF	= 39,
 	IWL_UCODE_TLV_FW_DBG_TRIGGER	= 40,
+	IWL_UCODE_TLV_FW_GSCAN_CAPA	= 50,
 };
 
 struct iwl_ucode_tlv {
@@ -247,9 +249,7 @@
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- *	regardless of the band or the number of the probes. FW will calculate
- *	the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
  * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
  *	through the dedicated host command.
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@@ -259,6 +259,8 @@
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
  *	instead of 3.
+ * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ *	(command version 3) that supports per-chain limits
  */
 enum iwl_ucode_tlv_api {
 	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
@@ -266,7 +268,7 @@
 	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= (__force iwl_ucode_tlv_api_t)9,
 	IWL_UCODE_TLV_API_HDC_PHASE_0		= (__force iwl_ucode_tlv_api_t)10,
 	IWL_UCODE_TLV_API_TX_POWER_DEV		= (__force iwl_ucode_tlv_api_t)11,
-	IWL_UCODE_TLV_API_BASIC_DWELL		= (__force iwl_ucode_tlv_api_t)13,
+	IWL_UCODE_TLV_API_WIDE_CMD_HDR		= (__force iwl_ucode_tlv_api_t)14,
 	IWL_UCODE_TLV_API_SCD_CFG		= (__force iwl_ucode_tlv_api_t)15,
 	IWL_UCODE_TLV_API_SINGLE_SCAN_EBS	= (__force iwl_ucode_tlv_api_t)16,
 	IWL_UCODE_TLV_API_ASYNC_DTM		= (__force iwl_ucode_tlv_api_t)17,
@@ -274,6 +276,7 @@
 	IWL_UCODE_TLV_API_STATS_V10		= (__force iwl_ucode_tlv_api_t)19,
 	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
 	IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY	= (__force iwl_ucode_tlv_api_t)24,
+	IWL_UCODE_TLV_API_TX_POWER_CHAIN	= (__force iwl_ucode_tlv_api_t)27,
 };
 
 typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -284,6 +287,7 @@
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
  * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
  * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
  * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
  * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
  *	tx power value into TPC Report action frame and Link Measurement Report
@@ -298,6 +302,7 @@
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -305,12 +310,14 @@
  *	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *	is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  */
 enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
 	IWL_UCODE_TLV_CAPA_LAR_SUPPORT			= (__force iwl_ucode_tlv_capa_t)1,
 	IWL_UCODE_TLV_CAPA_UMAC_SCAN			= (__force iwl_ucode_tlv_capa_t)2,
 	IWL_UCODE_TLV_CAPA_BEAMFORMER			= (__force iwl_ucode_tlv_capa_t)3,
+	IWL_UCODE_TLV_CAPA_TOF_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)5,
 	IWL_UCODE_TLV_CAPA_TDLS_SUPPORT			= (__force iwl_ucode_tlv_capa_t)6,
 	IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT	= (__force iwl_ucode_tlv_capa_t)8,
 	IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT	= (__force iwl_ucode_tlv_capa_t)9,
@@ -320,10 +327,12 @@
 	IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH		= (__force iwl_ucode_tlv_capa_t)13,
 	IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)18,
 	IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT		= (__force iwl_ucode_tlv_capa_t)19,
+	IWL_UCODE_TLV_CAPA_CSUM_SUPPORT			= (__force iwl_ucode_tlv_capa_t)21,
 	IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS		= (__force iwl_ucode_tlv_capa_t)22,
 	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= (__force iwl_ucode_tlv_capa_t)28,
 	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
 	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
+	IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT		= (__force iwl_ucode_tlv_capa_t)31,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -341,8 +350,9 @@
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION	0xAAAABBBB
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
@@ -412,6 +422,12 @@
 	PRPH_ASSIGN,
 	PRPH_SETBIT,
 	PRPH_CLEARBIT,
+
+	INDIRECT_ASSIGN,
+	INDIRECT_SETBIT,
+	INDIRECT_CLEARBIT,
+
+	PRPH_BLOCKBIT,
 };
 
 /**
@@ -485,10 +501,13 @@
  *
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *	collect only monitor data
  */
 enum iwl_fw_dbg_trigger_mode {
 	IWL_FW_DBG_TRIGGER_START = BIT(0),
 	IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+	IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 
 /**
@@ -718,4 +737,28 @@
 	struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *	change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *	hold.
+ */
+struct iwl_fw_gscan_capabilities {
+	__le32 max_scan_cache_size;
+	__le32 max_scan_buckets;
+	__le32 max_ap_cache_per_scan;
+	__le32 max_rssi_sample_size;
+	__le32 max_scan_reporting_threshold;
+	__le32 max_hotlist_aps;
+	__le32 max_significant_change_aps;
+	__le32 max_bssid_history_entries;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 3e3c9d8..45e7321 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -133,6 +133,7 @@
 struct fw_img {
 	struct fw_desc sec[IWL_UCODE_SECTION_MAX];
 	bool is_dual_cpus;
+	u32 paging_mem_size;
 };
 
 struct iwl_sf_region {
@@ -140,6 +141,48 @@
 	u32 size;
 };
 
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS	0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+	dma_addr_t fw_paging_phys;
+	struct page *fw_paging_block;
+	u32 fw_paging_size;
+};
+
 /**
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * @size: a number of entries
@@ -151,6 +194,30 @@
 } __packed;
 
 /**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *	change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *	hold.
+ */
+struct iwl_gscan_capabilities {
+	u32 max_scan_cache_size;
+	u32 max_scan_buckets;
+	u32 max_ap_cache_per_scan;
+	u32 max_rssi_sample_size;
+	u32 max_scan_reporting_threshold;
+	u32 max_hotlist_aps;
+	u32 max_significant_change_aps;
+	u32 max_bssid_history_entries;
+};
+
+/**
  * struct iwl_fw - variables associated with the firmware
  *
  * @ucode_ver: ucode version from the ucode file
@@ -208,6 +275,7 @@
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 	u8 dbg_dest_reg_num;
+	struct iwl_gscan_capabilities gscan_capa;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index b5bc959..6caf2af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -98,7 +99,8 @@
 				continue;
 
 			for (i = 0; i < w->n_cmds; i++) {
-				if (w->cmds[i] == pkt->hdr.cmd) {
+				if (w->cmds[i] ==
+				    WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
 					found = true;
 					break;
 				}
@@ -136,7 +138,7 @@
 void
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
 			   struct iwl_notification_wait *wait_entry,
-			   const u8 *cmds, int n_cmds,
+			   const u16 *cmds, int n_cmds,
 			   bool (*fn)(struct iwl_notif_wait_data *notif_wait,
 				      struct iwl_rx_packet *pkt, void *data),
 			   void *fn_data)
@@ -147,7 +149,7 @@
 	wait_entry->fn = fn;
 	wait_entry->fn_data = fn_data;
 	wait_entry->n_cmds = n_cmds;
-	memcpy(wait_entry->cmds, cmds, n_cmds);
+	memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
 	wait_entry->triggered = false;
 	wait_entry->aborted = false;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 95af97a..dbe8234 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -105,7 +106,7 @@
 		   struct iwl_rx_packet *pkt, void *data);
 	void *fn_data;
 
-	u8 cmds[MAX_NOTIF_CMDS];
+	u16 cmds[MAX_NOTIF_CMDS];
 	u8 n_cmds;
 	bool triggered, aborted;
 };
@@ -121,7 +122,7 @@
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
 			   struct iwl_notification_wait *wait_entry,
-			   const u8 *cmds, int n_cmds,
+			   const u16 *cmds, int n_cmds,
 			   bool (*fn)(struct iwl_notif_wait_data *notif_data,
 				      struct iwl_rx_packet *pkt, void *data),
 			   void *fn_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ce1cdd7..b47fe9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -116,10 +116,6 @@
  *	May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *	HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- *	but the higher layers need to know about it (in particular mac80211 to
- *	to able to call the right NAPI RX functions); this function is needed
- *	to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  *	Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -148,13 +144,8 @@
 				     const struct iwl_fw *fw,
 				     struct dentry *dbgfs_dir);
 	void (*stop)(struct iwl_op_mode *op_mode);
-	int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-		  struct iwl_device_cmd *cmd);
-	void (*napi_add)(struct iwl_op_mode *op_mode,
-			 struct napi_struct *napi,
-			 struct net_device *napi_dev,
-			 int (*poll)(struct napi_struct *, int),
-			 int weight);
+	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		   struct iwl_rx_cmd_buffer *rxb);
 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -188,11 +179,11 @@
 	op_mode->ops->stop(op_mode);
 }
 
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+				  struct napi_struct *napi,
+				  struct iwl_rx_cmd_buffer *rxb)
 {
-	return op_mode->ops->rx(op_mode, rxb, cmd);
+	return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@@ -260,15 +251,4 @@
 	return op_mode->ops->exit_d0i3(op_mode);
 }
 
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
-					struct napi_struct *napi,
-					struct net_device *napi_dev,
-					int (*poll)(struct napi_struct *, int),
-					int weight)
-{
-	if (!op_mode->ops->napi_add)
-		return;
-	op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
 #endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5af1c77..3ab777f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -253,6 +253,7 @@
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS	(16)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK	(0x007F0000)
 #define SCD_GP_CTRL_ENABLE_31_QUEUES		BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE		BIT(18)
 
 /* Context Data */
 #define SCD_CONTEXT_MEM_LOWER_BOUND	(SCD_MEM_LOWER_BOUND + 0x600)
@@ -291,6 +292,9 @@
 
 /*********************** END TX SCHEDULER *************************************/
 
+/* tcp checksum offload */
+#define RX_EN_CSUM		(0x00a00d88)
+
 /* Oscillator clock */
 #define OSC_CLK				(0xa04068)
 #define OSC_CLK_FORCE_CONTROL		(0x8)
@@ -379,6 +383,8 @@
 #define AUX_MISC_MASTER1_SMPHR_STATUS	0xA20800
 #define RSA_ENABLE			0xA24B08
 #define PREG_AUX_BUS_WPROT_0		0xA04CC0
+#define SB_CPU_1_STATUS			0xA01E30
+#define SB_CPU_2_STATUS			0xA01E34
 
 /* FW chicken bits */
 #define LMPM_CHICK			0xA01FF8
@@ -386,4 +392,10 @@
 	LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF			0xA03824
+enum {
+	LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
 #endif				/* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 87a230a..c829c50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -122,6 +122,40 @@
 #define INDEX_TO_SEQ(i)	((i) & 0xff)
 #define SEQ_RX_FRAME	cpu_to_le16(0x8000)
 
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+	return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+	return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+	return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+	return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP	1
+
 /**
  * struct iwl_cmd_header
  *
@@ -130,7 +164,7 @@
  */
 struct iwl_cmd_header {
 	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
-	u8 flags;	/* 0:5 reserved, 6 abort, 7 internal */
+	u8 group_id;
 	/*
 	 * The driver sets up the sequence number to values of its choosing.
 	 * uCode does not use this value, but passes it back to the driver
@@ -154,9 +188,22 @@
 	__le16 sequence;
 } __packed;
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+	u8 cmd;
+	u8 group_id;
+	__le16 sequence;
+	__le16 length;
+	u8 reserved;
+	u8 version;
+} __packed;
 
 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
 #define FH_RSCSR_FRAME_INVALID		0x55550000
@@ -201,6 +248,8 @@
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  *	(i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ *	check that we leave enough room for the TBs bitmap which needs 20 bits.
  */
 enum CMD_MODE {
 	CMD_ASYNC		= BIT(0),
@@ -210,6 +259,8 @@
 	CMD_SEND_IN_IDLE	= BIT(4),
 	CMD_MAKE_TRANS_IDLE	= BIT(5),
 	CMD_WAKE_UP_TRANS	= BIT(6),
+
+	CMD_TB_BITMAP_POS	= 11,
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -222,8 +273,18 @@
  * aren't fully copied and use other TFD space.
  */
 struct iwl_device_cmd {
-	struct iwl_cmd_header hdr;	/* uCode API */
-	u8 payload[DEF_CMD_PAYLOAD_SIZE];
+	union {
+		struct {
+			struct iwl_cmd_header hdr;	/* uCode API */
+			u8 payload[DEF_CMD_PAYLOAD_SIZE];
+		};
+		struct {
+			struct iwl_cmd_header_wide hdr_wide;
+			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+					sizeof(struct iwl_cmd_header_wide) +
+					sizeof(struct iwl_cmd_header)];
+		};
+	};
 } __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -261,24 +322,22 @@
  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
  * @_rx_page_order: (internally used to free response packet)
  * @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- *	(put in setup_rx_handlers) - valid for SYNC mode only
  * @flags: can be CMD_*
  * @len: array of the lengths of the chunks in data
  * @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ *	version and group as well
  */
 struct iwl_host_cmd {
 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
 	struct iwl_rx_packet *resp_pkt;
 	unsigned long _rx_page_addr;
 	u32 _rx_page_order;
-	int handler_status;
 
 	u32 flags;
+	u32 id;
 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
-	u8 id;
 };
 
 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@@ -379,6 +438,7 @@
  * @bc_table_dword: set to true if the BC table expects the byte count to be
  *	in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
  * @command_names: array of command names, must be 256 entries
  *	(one for each command); for debugging only
  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -396,6 +456,7 @@
 	bool rx_buf_size_8k;
 	bool bc_table_dword;
 	bool scd_set_active;
+	bool wide_cmd_header;
 	const char *const *command_names;
 
 	u32 sdio_adma_addr;
@@ -544,10 +605,12 @@
 			      u32 value);
 	void (*ref)(struct iwl_trans *trans);
 	void (*unref)(struct iwl_trans *trans);
-	void (*suspend)(struct iwl_trans *trans);
+	int  (*suspend)(struct iwl_trans *trans);
 	void (*resume)(struct iwl_trans *trans);
 
-	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+						 struct iwl_fw_dbg_trigger_tlv
+						 *trigger);
 };
 
 /**
@@ -584,6 +647,8 @@
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  * @hw_id: a u32 with the ID of the device / sub-device.
  *	Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -603,6 +668,12 @@
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ *	from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ *	the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ *	downloading them to the FW. The buffer is allocated in the opmode
  */
 struct iwl_trans {
 	const struct iwl_trans_ops *ops;
@@ -612,6 +683,7 @@
 	unsigned long status;
 
 	struct device *dev;
+	u32 max_skb_frags;
 	u32 hw_rev;
 	u32 hw_id;
 	char hw_id_str[52];
@@ -639,6 +711,14 @@
 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
 	u8 dbg_dest_reg_num;
 
+	/*
+	 * Paging parameters - All of the parameters should be set by the
+	 * opmode when paging is enabled
+	 */
+	u32 paging_req_addr;
+	struct iwl_fw_paging *paging_db;
+	void *paging_download_buf;
+
 	enum iwl_d0i3_mode d0i3_mode;
 
 	bool wowlan_d0i3;
@@ -730,7 +810,8 @@
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
 	might_sleep();
-	trans->ops->d3_suspend(trans, test);
+	if (trans->ops->d3_suspend)
+		trans->ops->d3_suspend(trans, test);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -738,6 +819,9 @@
 				      bool test)
 {
 	might_sleep();
+	if (!trans->ops->d3_resume)
+		return 0;
+
 	return trans->ops->d3_resume(trans, status, test);
 }
 
@@ -753,10 +837,12 @@
 		trans->ops->unref(trans);
 }
 
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
 {
-	if (trans->ops->suspend)
-		trans->ops->suspend(trans);
+	if (!trans->ops->suspend)
+		return 0;
+
+	return trans->ops->suspend(trans);
 }
 
 static inline void iwl_trans_resume(struct iwl_trans *trans)
@@ -766,11 +852,12 @@
 }
 
 static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+		    struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 	if (!trans->ops->dump_data)
 		return NULL;
-	return trans->ops->dump_data(trans);
+	return trans->ops->dump_data(trans, trigger);
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 2d7c3ea..8c2c3d1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -6,6 +6,7 @@
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index b4737e2..e290ac6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -725,15 +725,17 @@
 	}
 }
 
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-			     struct iwl_rx_cmd_buffer *rxb,
-			     struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+		iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+		return;
+	}
 
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
 	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -748,12 +750,6 @@
 	memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
 	iwl_mvm_bt_coex_notif_handle(mvm);
-
-	/*
-	 * This is an async handler for a notification, returning anything other
-	 * than 0 doesn't make sense even if HCMD failed.
-	 */
-	return 0;
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -947,9 +943,8 @@
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@@ -957,20 +952,23 @@
 	u8 __maybe_unused lower_bound, upper_bound;
 	u8 lut;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+		iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+		return;
+	}
 
 	if (!iwl_mvm_bt_is_plcr_supported(mvm))
-		return 0;
+		return;
 
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Ignore updates if we are in force mode */
 	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return 0;
+		return;
 
 	if (ant_isolation ==  mvm->last_ant_isol)
-		return 0;
+		return;
 
 	for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
 		if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -989,7 +987,7 @@
 	mvm->last_ant_isol = ant_isolation;
 
 	if (mvm->last_corun_lut == lut)
-		return 0;
+		return;
 
 	mvm->last_corun_lut = lut;
 
@@ -1000,6 +998,8 @@
 	memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
 	       sizeof(cmd.corun_lut40));
 
-	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
-				    sizeof(cmd), &cmd);
+	if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+				 sizeof(cmd), &cmd))
+		IWL_ERR(mvm,
+			"failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 6ac6de2..61c07b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -1058,9 +1058,8 @@
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-				 struct iwl_rx_cmd_buffer *rxb,
-				 struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+				  struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@@ -1083,12 +1082,6 @@
 	memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
 
 	iwl_mvm_bt_coex_notif_handle(mvm);
-
-	/*
-	 * This is an async handler for a notification, returning anything other
-	 * than 0 doesn't make sense even if HCMD failed.
-	 */
-	return 0;
 }
 
 static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@@ -1250,14 +1243,12 @@
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 ant_isolation = le32_to_cpup((void *)pkt->data);
 	u8 __maybe_unused lower_bound, upper_bound;
-	int ret;
 	u8 lut;
 
 	struct iwl_bt_coex_cmd_old *bt_cmd;
@@ -1268,16 +1259,16 @@
 	};
 
 	if (!iwl_mvm_bt_is_plcr_supported(mvm))
-		return 0;
+		return;
 
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Ignore updates if we are in force mode */
 	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return 0;
+		return;
 
 	if (ant_isolation ==  mvm->last_ant_isol)
-		return 0;
+		return;
 
 	for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
 		if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -1296,13 +1287,13 @@
 	mvm->last_ant_isol = ant_isolation;
 
 	if (mvm->last_corun_lut == lut)
-		return 0;
+		return;
 
 	mvm->last_corun_lut = lut;
 
 	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
 	if (!bt_cmd)
-		return 0;
+		return;
 	cmd.data[0] = bt_cmd;
 
 	bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@@ -1317,8 +1308,8 @@
 	memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
 	       sizeof(bt_cmd->bt4_corun_lut40));
 
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (iwl_mvm_send_cmd(mvm, &cmd))
+		IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
 
 	kfree(bt_cmd);
-	return ret;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index beba375..b8ee312 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -102,6 +102,7 @@
 #define IWL_MVM_QUOTA_THRESHOLD			4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO		0
+#define IWL_MVM_TOF_IS_RESPONDER		0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 4165d10..04264e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -1145,7 +1145,7 @@
 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
 {
 	struct iwl_notification_wait wait_d3;
-	static const u8 d3_notif[] = { D3_CONFIG_CMD };
+	static const u16 d3_notif[] = { D3_CONFIG_CMD };
 	int ret;
 
 	iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@@ -1168,13 +1168,17 @@
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
 
-	iwl_trans_suspend(mvm->trans);
+	ret = iwl_trans_suspend(mvm->trans);
+	if (ret)
+		return ret;
+
 	mvm->trans->wowlan_d0i3 = wowlan->any;
 	if (mvm->trans->wowlan_d0i3) {
 		/* 'any' trigger means d0i3 usage */
 		if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-			int ret = iwl_mvm_enter_d0i3_sync(mvm);
+			ret = iwl_mvm_enter_d0i3_sync(mvm);
 
 			if (ret)
 				return ret;
@@ -1183,6 +1187,9 @@
 		mutex_lock(&mvm->d0i3_suspend_mutex);
 		__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
 		mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+		iwl_trans_d3_suspend(mvm->trans, false);
+
 		return 0;
 	}
 
@@ -1935,28 +1942,59 @@
 	return 1;
 }
 
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
+{
+	iwl_trans_resume(mvm->trans);
+
+	return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+	bool exit_now;
+	enum iwl_d3_status d3_status;
+
+	iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+	/*
+	 * make sure to clear D0I3_DEFER_WAKEUP before
+	 * calling iwl_trans_resume(), which might wait
+	 * for d0i3 exit completion.
+	 */
+	mutex_lock(&mvm->d0i3_suspend_mutex);
+	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+					&mvm->d0i3_suspend_flags);
+	mutex_unlock(&mvm->d0i3_suspend_mutex);
+	if (exit_now) {
+		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+		_iwl_mvm_exit_d0i3(mvm);
+	}
+
+	iwl_trans_resume(mvm->trans);
+
+	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+		int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
+
+		if (ret)
+			return ret;
+		/*
+		 * d0i3 exit will be deferred until reconfig_complete.
+		 * make sure there we are out of d0i3.
+		 */
+	}
+	return 0;
+}
+
 int iwl_mvm_resume(struct ieee80211_hw *hw)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-	iwl_trans_resume(mvm->trans);
-
-	if (mvm->hw->wiphy->wowlan_config->any) {
-		/* 'any' trigger means d0i3 usage */
-		if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-			int ret = iwl_mvm_exit_d0i3(hw->priv);
-
-			if (ret)
-				return ret;
-			/*
-			 * d0i3 exit will be deferred until reconfig_complete.
-			 * make sure there we are out of d0i3.
-			 */
-		}
-		return 0;
-	}
-
-	return __iwl_mvm_resume(mvm, false);
+	/* 'any' trigger means d0i3 was used */
+	if (hw->wiphy->wowlan_config->any)
+		return iwl_mvm_resume_d0i3(mvm);
+	else
+		return iwl_mvm_resume_d3(mvm);
 }
 
 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5c8a65d..383a316 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -63,6 +63,7 @@
  *
  *****************************************************************************/
 #include "mvm.h"
+#include "fw-api-tof.h"
 #include "debugfs.h"
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -497,6 +498,731 @@
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+	int len = strlen(name);
+
+	return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+					  char *buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = -EINVAL;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("tof_disabled=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.tof_disabled = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.one_sided_disabled = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.is_debug_mode = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("is_buf=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.is_buf_required = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_config_cmd(mvm);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_config_cmd *cmd;
+
+	cmd = &mvm->tof_data.tof_cfg;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+			 cmd->tof_disabled);
+	pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+			 cmd->one_sided_disabled);
+	pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+			 cmd->is_debug_mode);
+	pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+			 cmd->is_buf_required);
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+						    char *buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("burst_period=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (!ret)
+			mvm->tof_data.responder_cfg.burst_period =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.min_delta_ftm = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("burst_duration=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.burst_duration = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("abort_responder=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.abort_responder = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("get_ch_est=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.get_ch_est = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("channel_num=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.channel_num = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("bandwidth=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.bandwidth = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("rate=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.rate = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("bssid=", buf);
+	if (data) {
+		u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("toa_offset=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.toa_offset =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ftm_per_burst = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("asap_mode=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.asap_mode = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+						   char __user *user_buf,
+						   size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_responder_config_cmd *cmd;
+
+	cmd = &mvm->tof_data.responder_cfg;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+			 le16_to_cpu(cmd->burst_period));
+	pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+			 cmd->burst_duration);
+	pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+			 cmd->bandwidth);
+	pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+			 cmd->channel_num);
+	pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+			 cmd->ctrl_ch_position);
+	pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+			 cmd->bssid);
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+			 cmd->min_delta_ftm);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+			 cmd->num_of_burst_exp);
+	pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+	pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+			 cmd->abort_responder);
+	pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+			 cmd->get_ch_est);
+	pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+			 cmd->recv_sta_req_params);
+	pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+			 cmd->ftm_per_burst);
+	pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+			 cmd->ftm_resp_ts_avail);
+	pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+			 cmd->asap_mode);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "tsf_timer_offset_msecs = %d\n",
+			 le16_to_cpu(cmd->tsf_timer_offset_msecs));
+	pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+			 le16_to_cpu(cmd->toa_offset));
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+						 char *buf, size_t count,
+						 loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("request_id=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.request_id = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("initiator=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.initiator = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.one_sided_los_disable = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("req_timeout=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.req_timeout = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("report_policy=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.report_policy = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_random=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.macaddr_random = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("num_of_ap=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.num_of_ap = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_template=", buf);
+	if (data) {
+		u8 mac[ETH_ALEN];
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+	if (data) {
+		u8 mac[ETH_ALEN];
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+	}
+
+	data = iwl_dbgfs_is_match("ap=", buf);
+	if (data) {
+		struct iwl_tof_range_req_ap_entry ap;
+		int size = sizeof(struct iwl_tof_range_req_ap_entry);
+		u16 burst_period;
+		u8 *mac = ap.bssid;
+		unsigned int i;
+
+		if (sscanf(data, "%u %hhd %hhx %hhx"
+			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+			   "%hhx %hhx %hx"
+			   "%hhx %hhx %x"
+			   "%hhx %hhx %hhx %hhx",
+			   &i, &ap.channel_num, &ap.bandwidth,
+			   &ap.ctrl_ch_position,
+			   mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+			   &ap.measure_type, &ap.num_of_bursts,
+			   &burst_period,
+			   &ap.samples_per_burst, &ap.retries_per_sample,
+			   &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+			   &ap.enable_dyn_ack, &ap.rssi) != 20) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (i >= IWL_MVM_TOF_MAX_APS) {
+			IWL_ERR(mvm, "Invalid AP index %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ap.burst_period = cpu_to_le16(burst_period);
+
+		memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_request=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[512];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_range_req_cmd *cmd;
+	int i;
+
+	cmd = &mvm->tof_data.range_req;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+			 cmd->request_id);
+	pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+			 cmd->initiator);
+	pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+			 cmd->one_sided_los_disable);
+	pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+			 cmd->req_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+			 cmd->report_policy);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+			 cmd->macaddr_random);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+			 cmd->macaddr_template);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+			 cmd->macaddr_mask);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+			 cmd->num_of_ap);
+	for (i = 0; i < cmd->num_of_ap; i++) {
+		struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"ap %.2d: channel_num=%hhx bw=%hhx"
+				" control=%hhx bssid=%pM type=%hhx"
+				" num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+				" retries=%hhx tsf_delta=%x location_req=%hhx "
+				" asap=%hhx enable=%hhx rssi=%hhx\n",
+				i, ap->channel_num, ap->bandwidth,
+				ap->ctrl_ch_position, ap->bssid,
+				ap->measure_type, ap->num_of_bursts,
+				ap->burst_period, ap->samples_per_burst,
+				ap->retries_per_sample, ap->tsf_delta,
+				ap->location_req, ap->asap_mode,
+				ap->enable_dyn_ack, ap->rssi);
+	}
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+						 char *buf,
+						 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.min_delta_ftm = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_range_req_ext_cmd *cmd;
+
+	cmd = &mvm->tof_data.range_req_ext;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "tsf_timer_offset_msec = %hx\n",
+			 cmd->tsf_timer_offset_msec);
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+			 cmd->min_delta_ftm);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw20M = %hhx\n",
+			 cmd->ftm_format_and_bw20M);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw40M = %hhx\n",
+			 cmd->ftm_format_and_bw40M);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw80M = %hhx\n",
+			 cmd->ftm_format_and_bw80M);
+
+	mutex_unlock(&mvm->mutex);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+					       char *buf,
+					       size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	int abort_id;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("abort_id=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.last_abort_id = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_abort=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			abort_id = mvm->tof_data.last_abort_id;
+			ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[32];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	int last_abort_id;
+
+	mutex_lock(&mvm->mutex);
+	last_abort_id = mvm->tof_data.last_abort_id;
+	mutex_unlock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+			 last_abort_id);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+						 char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char *buf;
+	int pos = 0;
+	const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+	struct iwl_tof_range_rsp_ntfy *cmd;
+	int i, ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+	cmd = &mvm->tof_data.range_resp;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+			 cmd->request_id);
+	pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+			 cmd->request_status);
+	pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+			 cmd->last_in_batch);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+			 cmd->num_of_aps);
+	for (i = 0; i < cmd->num_of_aps; i++) {
+		struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+				" rtt=%x rtt_var=%x rtt_spread=%x"
+				" rssi=%hhx  rssi_spread=%hhx"
+				" range=%x range_var=%x"
+				" time_stamp=%x\n",
+				i, ap->bssid, ap->measure_status,
+				ap->measure_bw,
+				ap->rtt, ap->rtt_variance, ap->rtt_spread,
+				ap->rssi, ap->rssi_spread, ap->range,
+				ap->range_variance, ap->timestamp);
+	}
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
 static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
 					   size_t count, loff_t *ppos)
 {
@@ -628,6 +1354,12 @@
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -671,6 +1403,25 @@
 		MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
 					 S_IRUSR | S_IWUSR);
 
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+	    !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+		if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+			MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+						 mvmvif->dbgfs_dir,
+						 S_IRUSR | S_IWUSR);
+
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+					 S_IRUSR);
+	}
+
 	/*
 	 * Create symlink for convenience pointing to interface specific
 	 * debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index ffb4b5c..7d69a55 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -949,9 +949,10 @@
 					   char *buf, size_t count,
 					   loff_t *ppos)
 {
-	int ret, conf_id;
+	unsigned int conf_id;
+	int ret;
 
-	ret = kstrtoint(buf, 0, &conf_id);
+	ret = kstrtouint(buf, 0, &conf_id);
 	if (ret)
 		return ret;
 
@@ -974,7 +975,7 @@
 	if (ret)
 		return ret;
 
-	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
 
 	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
@@ -1200,12 +1201,7 @@
 	if (ptr) {
 		for (ofs = 0; ofs < len; ofs += 16) {
 			pos += scnprintf(buf + pos, bufsz - pos,
-					 "0x%.4x ", ofs);
-			hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
-					   bufsz - pos, false);
-			pos += strlen(buf + pos);
-			if (bufsz - pos > 0)
-				buf[pos++] = '\n';
+					 "0x%.4x %16ph\n", ofs, ptr + ofs);
 		}
 	} else {
 		pos += scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index d7658d1..20521be 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -339,8 +339,13 @@
 	IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE			= BIT(8),
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS			= BIT(9),
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE		= BIT(10),
-	/* BIT(11) reserved */
+	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL		= BIT(11),
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET		= BIT(12),
+	IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET			= BIT(13),
+	IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER		= BIT(14),
+	IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN		= BIT(15),
+	IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN			= BIT(16),
+
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
 struct iwl_wowlan_gtk_status {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index b1baa33..7005fa4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -310,17 +312,22 @@
 	__le16 pwr_restriction;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
 
+enum iwl_dev_tx_power_cmd_mode {
+	IWL_TX_POWER_MODE_SET_MAC = 0,
+	IWL_TX_POWER_MODE_SET_DEVICE = 1,
+	IWL_TX_POWER_MODE_SET_CHAINS = 2,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
+
 /**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * REDUCE_TX_POWER_CMD = 0x9f
- * @set_mode: 0 - MAC tx power, 1 - device tx power
+ * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
  * @mac_context_id: id of the mac ctx for which we are reducing TX power.
  * @pwr_restriction: TX power restriction in 1/8 dBms.
  * @dev_24: device TX power restriction in 1/8 dBms
  * @dev_52_low: device TX power restriction upper band - low
  * @dev_52_high: device TX power restriction upper band - high
  */
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v2 {
 	__le32 set_mode;
 	__le32 mac_context_id;
 	__le16 pwr_restriction;
@@ -329,6 +336,20 @@
 	__le16 dev_52_high;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
 
+#define IWL_NUM_CHAIN_LIMITS	2
+#define IWL_NUM_SUB_BANDS	5
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v2: version 2 of the command, embedded here for easier software handling
+ * @per_chain_restriction: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd {
+	/* v3 is just an extension of v2 - keep this here */
+	struct iwl_dev_tx_power_cmd_v2 v2;
+	__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
+
 #define IWL_DEV_MAX_TX_POWER 0x7FFF
 
 /**
@@ -413,7 +434,7 @@
 #define IWL_BF_TEMP_FAST_FILTER_MIN 0
 
 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255
 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 737774a..660cc1c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -87,41 +87,6 @@
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK:  scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- *	(not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- *	asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
-	SCAN_COMP_STATUS_OK = 0x1,
-	SCAN_COMP_STATUS_ABORT = 0x2,
-	SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
-	SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
-	SCAN_COMP_STATUS_ERR_PROBE = 0x5,
-	SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
-	SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
-	SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
-	SCAN_COMP_STATUS_ERR_COEX = 0x9,
-	SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
-	SCAN_COMP_STATUS_ITERATION_END = 0x0B,
-	SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
 /* scan offload */
 #define IWL_SCAN_MAX_BLACKLIST_LEN	64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN	16
@@ -144,71 +109,6 @@
 };
 
 /**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags:		see enum iwl_scan_flags
- * @channel_count:	channels in channel list
- * @quiet_time:		dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th:	quiet channel num of packets threshold
- * @good_CRC_th:	passive to active promotion threshold
- * @rx_chain:		RXON rx chain.
- * @max_out_time:	max TUs to be out of associated channel
- * @suspend_time:	pause scan this TUs when returning to service channel
- * @flags:		RXON flags
- * @filter_flags:	RXONfilter
- * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan:	list of SSIDs for directed active scan
- * @scan_type:		see enum iwl_scan_type.
- * @rep_count:		repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
-	__le16 len;
-	u8 scan_flags;
-	u8 channel_count;
-	__le16 quiet_time;
-	__le16 quiet_plcp_th;
-	__le16 good_CRC_th;
-	__le16 rx_chain;
-	__le32 max_out_time;
-	__le32 suspend_time;
-	/* RX_ON_FLAGS_API_S_VER_1 */
-	__le32 flags;
-	__le32 filter_flags;
-	struct iwl_tx_cmd tx_cmd[2];
-	/* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-	__le32 scan_type;
-	__le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
-	IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE		= BIT(0),
-	IWL_SCAN_OFFLOAD_CHANNEL_NARROW		= BIT(22),
-	IWL_SCAN_OFFLOAD_CHANNEL_FULL		= BIT(24),
-	IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL	= BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type:	bitmap; bits 1-20 are for directed scan to i'th ssid and
- *	see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd:		scan command fixed part
- * @data:		scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
-	struct iwl_scan_offload_cmd scan_cmd;
-	u8 data[0];
-} __packed;
-
-/**
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:		MAC address to filter out
  * @reported_rssi:	AP rssi reported to the host
@@ -298,35 +198,6 @@
 };
 
 /**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line:		last schedule line executed (fast or regular)
- * @last_schedule_iteration:	last scan iteration executed before scan abort
- * @status:			enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
-	u8 last_schedule_line;
-	u8 last_schedule_iteration;
-	u8 status;
-	u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap:	SSIDs indexes found in this iteration
- * @client_bitmap:	clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
-	__le16 ssid_bitmap;
-	u8 client_bitmap;
-	u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
-/**
  * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
  * @tx_flags: combination of TX_CMD_FLG_*
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
@@ -550,18 +421,6 @@
 
 /* UMAC Scan API */
 
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size:	size of the command (not including header)
- * @reserved0:	for future use and alignment
- * @ver:	API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
-	__le16 size;
-	u8 reserved0;
-	u8 ver;
-} __packed;
-
 /* The maximum of either of these cannot exceed 8, because we use an
  * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
  */
@@ -621,7 +480,6 @@
 
 /**
  * struct iwl_scan_config
- * @hdr: umac command header
  * @flags:			enum scan_config_flags
  * @tx_chains:			valid_tx antenna - ANT_* definitions
  * @rx_chains:			valid_rx antenna - ANT_* definitions
@@ -639,7 +497,6 @@
  * @channel_array:		default supported channels
  */
 struct iwl_scan_config {
-	struct iwl_mvm_umac_cmd_hdr hdr;
 	__le32 flags;
 	__le32 tx_chains;
 	__le32 rx_chains;
@@ -735,7 +592,6 @@
 
 /**
  * struct iwl_scan_req_umac
- * @hdr: umac command header
  * @flags: &enum iwl_umac_scan_flags
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -754,7 +610,6 @@
  *	&struct iwl_scan_req_umac_tail
  */
 struct iwl_scan_req_umac {
-	struct iwl_mvm_umac_cmd_hdr hdr;
 	__le32 flags;
 	__le32 uid;
 	__le32 ooc_priority;
@@ -776,12 +631,10 @@
 
 /**
  * struct iwl_umac_scan_abort
- * @hdr: umac command header
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @flags: reserved
  */
 struct iwl_umac_scan_abort {
-	struct iwl_mvm_umac_cmd_hdr hdr;
 	__le32 uid;
 	__le32 flags;
 } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 21dd5b7..493a8bd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -366,8 +366,8 @@
  * ( MGMT_MCAST_KEY = 0x1f )
  * @ctrl_flags: %iwl_sta_key_flag
  * @IGTK:
- * @K1: IGTK master key
- * @K2: IGTK sub key
+ * @K1: unused
+ * @K2: unused
  * @sta_id: station ID that support IGTK
  * @key_id:
  * @receive_seq_cnt: initial RSC/PN needed for replay check
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
new file mode 100644
index 0000000..eed6271
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+	TOF_RANGE_REQ_CMD = 0x1,
+	TOF_CONFIG_CMD = 0x2,
+	TOF_RANGE_ABORT_CMD = 0x3,
+	TOF_RANGE_REQ_EXT_CMD = 0x4,
+	TOF_RESPONDER_CONFIG_CMD = 0x5,
+	TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+	TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+	TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+	TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+	TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+	TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 tof_disabled;
+	u8 one_sided_disabled;
+	u8 is_debug_mode;
+	u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ *		  The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ *		   The minimum delay between two sequential FTM Responses
+ *		   in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ *		   The total time for all FTMs handshake in the same burst.
+ *		   Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ *		   The number of bursts for the current ToF request. Affect
+ *		   the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ *		     (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ *			 params and use the recomended Initiator params.
+ *			 0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ *	     the center frequency.
+ *	     40MHz  0 below center, 1 above center
+ *	     80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *				 1  the far  20MHz to the center
+ *		    bit[2]  as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ *		  '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ *		purposes, simulating station movement by adding various values
+ *		to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+	__le32 sub_grp_cmd_id;
+	__le16 burst_period;
+	u8 min_delta_ftm;
+	u8 burst_duration;
+	u8 num_of_burst_exp;
+	u8 get_ch_est;
+	u8 abort_responder;
+	u8 recv_sta_req_params;
+	u8 channel_num;
+	u8 bandwidth;
+	u8 rate;
+	u8 ctrl_ch_position;
+	u8 ftm_per_burst;
+	u8 ftm_resp_ts_avail;
+	u8 asap_mode;
+	u8 sta_id;
+	__le16 tsf_timer_offset_msecs;
+	__le16 toa_offset;
+	u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ *		   in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ *			value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ *			value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ *			value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+	__le32 sub_grp_cmd_id;
+	__le16 tsf_timer_offset_msec;
+	__le16 reserved;
+	u8 min_delta_ftm;
+	u8 ftm_format_and_bw20M;
+	u8 ftm_format_and_bw40M;
+	u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ *	     center frequency.
+ *	     40MHz  0 below center, 1 above center
+ *	     80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *				 1  the far  20MHz to the center
+ *		    bit[2]  as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP.  2s Exponent of the
+ *		   number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ *		  periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ *		       1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ *			in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ *	       The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ *			      Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ *	    0  Initiator interact with regular AP
+ *	    1  Initiator interact with Responder machine: need to send the
+ *	    Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ *	    use it for its ch est measurement (this flag will be set when we
+ *	    configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ *	  leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+	u8 channel_num;
+	u8 bandwidth;
+	u8 tsf_delta_direction;
+	u8 ctrl_ch_position;
+	u8 bssid[ETH_ALEN];
+	u8 measure_type;
+	u8 num_of_bursts;
+	__le16 burst_period;
+	u8 samples_per_burst;
+	u8 retries_per_sample;
+	__le32 tsf_delta;
+	u8 location_req;
+	u8 asap_mode;
+	u8 enable_dyn_ack;
+	s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ *			      possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ *				 timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ *				  earlier of: measurements completion / timeout
+ *				  expiration.
+ */
+enum iwl_tof_response_mode {
+	IWL_MVM_TOF_RESPOSE_ASAP = 1,
+	IWL_MVM_TOF_RESPOSE_TIMEOUT,
+	IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ *		sent back in the range response
+ * @initiator: 0- NW initiated,  1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ *			   '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ *	     This is equivalent to the session time configured to the
+ *	     LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ *		   the range report will be uploaded as a batch when ready or
+ *		   when the session is done (successfully / partially).
+ *		   one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ *	            '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ *		  Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 request_id;
+	u8 initiator;
+	u8 one_sided_los_disable;
+	u8 req_timeout;
+	u8 report_policy;
+	u8 los_det_disable;
+	u8 num_of_ap;
+	u8 macaddr_random;
+	u8 macaddr_template[ETH_ALEN];
+	u8 macaddr_mask[ETH_ALEN];
+	struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ *	 current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ *	       values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ *	        measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ *	       uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+	u8 bssid[ETH_ALEN];
+	u8 measure_status;
+	u8 measure_bw;
+	__le32 rtt;
+	__le32 rtt_variance;
+	__le32 rtt_spread;
+	s8 rssi;
+	u8 rssi_spread;
+	__le16 reserved;
+	__le32 range;
+	__le32 range_variance;
+	__le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+	u8 request_id;
+	u8 request_status;
+	u8 last_in_batch;
+	u8 num_of_aps;
+	struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE  (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+	u8 token;
+	u8 role;
+	__le16 reserved;
+	u8 initiator_bssid[ETH_ALEN];
+	u8 responder_bssid[ETH_ALEN];
+	u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+	u8 bssid[ETH_ALEN];
+	u8 request_token;
+	u8 status;
+	__le16 report_ie_len;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 request_id;
+	u8 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 81c4ea3..853698a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -124,6 +124,18 @@
 	TX_CMD_FLG_HCCA_CHUNK		= BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+	PM_FRAME_NONE		= 0,
+	PM_FRAME_MGMT		= 2,
+	PM_FRAME_ASSOC		= 3,
+};
+
 /*
  * TX command security control
  */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 16e9ef4..4af7513a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -75,6 +75,7 @@
 #include "fw-api-coex.h"
 #include "fw-api-scan.h"
 #include "fw-api-stats.h"
+#include "fw-api-tof.h"
 
 /* Tx queue numbers */
 enum {
@@ -119,6 +120,9 @@
 	ADD_STA = 0x18,
 	REMOVE_STA = 0x19,
 
+	/* paging get item */
+	FW_GET_ITEM_CMD = 0x1a,
+
 	/* TX */
 	TX_CMD = 0x1c,
 	TXPATH_FLUSH = 0x1e,
@@ -148,6 +152,9 @@
 
 	LQ_CMD = 0x4e,
 
+	/* paging block to FW cpu2 */
+	FW_PAGING_BLOCK_CMD = 0x4f,
+
 	/* Scan offload */
 	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 	SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -163,6 +170,10 @@
 	CALIB_RES_NOTIF_PHY_DB = 0x6b,
 	/* PHY_DB_CMD = 0x6c, */
 
+	/* ToF - 802.11mc FTM */
+	TOF_CMD = 0x10,
+	TOF_NOTIFICATION = 0x11,
+
 	/* Power - legacy power table command */
 	POWER_TABLE_CMD = 0x77,
 	PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@@ -365,6 +376,50 @@
 	u8 data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
+#define NUM_OF_FW_PAGING_BLOCKS	33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+	__le32 flags;
+	__le32 block_size;
+	__le32 block_num;
+	__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ *	download
+ */
+enum iwl_fw_item_id {
+	IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+	__le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+	__le32 item_id;
+	__le32 item_byte_cnt;
+	__le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
 /**
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * @offset: offset in bytes into the section
@@ -1080,10 +1135,33 @@
 	__le16 frame_time;
 } __packed;
 
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+	CSUM_RXA_RESERVED_MASK	= 0x000f,
+	CSUM_RXA_MICSIZE_MASK	= 0x00f0,
+	CSUM_RXA_HEADERLEN_MASK	= 0x1f00,
+	CSUM_RXA_PADD		= BIT(13),
+	CSUM_RXA_AMSDU		= BIT(14),
+	CSUM_RXA_ENA		= BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
 struct iwl_rx_mpdu_res_start {
 	__le16 byte_count;
-	__le16 reserved;
-} __packed;
+	__le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
 
 /**
  * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@@ -1136,6 +1214,8 @@
  * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
  * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
  * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
  * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
  * @RX_MPDU_RES_STATUS_STA_ID_MSK:
  * @RX_MPDU_RES_STATUS_RRF_KILL:
@@ -1165,6 +1245,8 @@
 	RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP		= BIT(13),
 	RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT		= BIT(14),
 	RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME		= BIT(15),
+	RX_MPDU_RES_STATUS_CSUM_DONE			= BIT(16),
+	RX_MPDU_RES_STATUS_CSUM_OK			= BIT(17),
 	RX_MPDU_RES_STATUS_HASH_INDEX_MSK		= (0x3F0000),
 	RX_MPDU_RES_STATUS_STA_ID_MSK			= (0x1f000000),
 	RX_MPDU_RES_STATUS_RRF_KILL			= BIT(29),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index eb10c5e..4a0ce83 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -106,6 +106,306 @@
 				    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+	int i;
+
+	if (!mvm->fw_paging_db[0].fw_paging_block)
+		return;
+
+	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+		if (!mvm->fw_paging_db[i].fw_paging_block) {
+			IWL_DEBUG_FW(mvm,
+				     "Paging: block %d already freed, continue to next page\n",
+				     i);
+
+			continue;
+		}
+
+		__free_pages(mvm->fw_paging_db[i].fw_paging_block,
+			     get_order(mvm->fw_paging_db[i].fw_paging_size));
+	}
+	kfree(mvm->trans->paging_download_buf);
+	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+	int sec_idx, idx;
+	u32 offset = 0;
+
+	/*
+	 * find where is the paging image start point:
+	 * if CPU2 exist and it's in paging format, then the image looks like:
+	 * CPU1 sections (2 or more)
+	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+	 * CPU2 sections (not paged)
+	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+	 * non paged to CPU2 paging sec
+	 * CPU2 paging CSS
+	 * CPU2 paging image (including instruction and data)
+	 */
+	for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+			sec_idx++;
+			break;
+		}
+	}
+
+	if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+		IWL_ERR(mvm, "driver didn't find paging image\n");
+		iwl_free_fw_paging(mvm);
+		return -EINVAL;
+	}
+
+	/* copy the CSS block to the dram */
+	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+		     sec_idx);
+
+	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+	       image->sec[sec_idx].data,
+	       mvm->fw_paging_db[0].fw_paging_size);
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: copied %d CSS bytes to first block\n",
+		     mvm->fw_paging_db[0].fw_paging_size);
+
+	sec_idx++;
+
+	/*
+	 * copy the paging blocks to the dram
+	 * loop index start from 1 since that CSS block already copied to dram
+	 * and CSS index is 0.
+	 * loop stop at num_of_paging_blk since that last block is not full.
+	 */
+	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		       image->sec[sec_idx].data + offset,
+		       mvm->fw_paging_db[idx].fw_paging_size);
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: copied %d paging bytes to block %d\n",
+			     mvm->fw_paging_db[idx].fw_paging_size,
+			     idx);
+
+		offset += mvm->fw_paging_db[idx].fw_paging_size;
+	}
+
+	/* copy the last paging block */
+	if (mvm->num_of_pages_in_last_blk > 0) {
+		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		       image->sec[sec_idx].data + offset,
+		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: copied %d pages in the last block %d\n",
+			     mvm->num_of_pages_in_last_blk, idx);
+	}
+
+	return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+				   const struct fw_img *image)
+{
+	struct page *block;
+	dma_addr_t phys = 0;
+	int blk_idx = 0;
+	int order, num_of_pages;
+	int dma_enabled;
+
+	if (mvm->fw_paging_db[0].fw_paging_block)
+		return 0;
+
+	dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+	mvm->num_of_paging_blk = ((num_of_pages - 1) /
+				    NUM_OF_PAGE_PER_GROUP) + 1;
+
+	mvm->num_of_pages_in_last_blk =
+		num_of_pages -
+		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+		     mvm->num_of_paging_blk,
+		     mvm->num_of_pages_in_last_blk);
+
+	/* allocate block of 4Kbytes for paging CSS */
+	order = get_order(FW_PAGING_SIZE);
+	block = alloc_pages(GFP_KERNEL, order);
+	if (!block) {
+		/* free all the previous pages since we failed */
+		iwl_free_fw_paging(mvm);
+		return -ENOMEM;
+	}
+
+	mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+	mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+	if (dma_enabled) {
+		phys = dma_map_page(mvm->trans->dev, block, 0,
+				    PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(mvm->trans->dev, phys)) {
+			/*
+			 * free the previous pages and the current one since
+			 * we failed to map_page.
+			 */
+			iwl_free_fw_paging(mvm);
+			return -ENOMEM;
+		}
+		mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+	} else {
+		mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+			blk_idx << BLOCK_2_EXP_SIZE;
+	}
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+		     order);
+
+	/*
+	 * allocate blocks in dram.
+	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
+	 */
+	for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+		/* allocate block of PAGING_BLOCK_SIZE (32K) */
+		order = get_order(PAGING_BLOCK_SIZE);
+		block = alloc_pages(GFP_KERNEL, order);
+		if (!block) {
+			/* free all the previous pages since we failed */
+			iwl_free_fw_paging(mvm);
+			return -ENOMEM;
+		}
+
+		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+		mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+		if (dma_enabled) {
+			phys = dma_map_page(mvm->trans->dev, block, 0,
+					    PAGE_SIZE << order,
+					    DMA_BIDIRECTIONAL);
+			if (dma_mapping_error(mvm->trans->dev, phys)) {
+				/*
+				 * free the previous pages and the current one
+				 * since we failed to map_page.
+				 */
+				iwl_free_fw_paging(mvm);
+				return -ENOMEM;
+			}
+			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+		} else {
+			mvm->fw_paging_db[blk_idx].fw_paging_phys =
+				PAGING_ADDR_SIG |
+				blk_idx << BLOCK_2_EXP_SIZE;
+		}
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+			     order);
+	}
+
+	return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+			      const struct fw_img *fw)
+{
+	int ret;
+
+	ret = iwl_alloc_fw_paging_mem(mvm, fw);
+	if (ret)
+		return ret;
+
+	return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+	int blk_idx;
+	__le32 dev_phy_addr;
+	struct iwl_fw_paging_cmd fw_paging_cmd = {
+		.flags =
+			cpu_to_le32(PAGING_CMD_IS_SECURED |
+				    PAGING_CMD_IS_ENABLED |
+				    (mvm->num_of_pages_in_last_blk <<
+				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
+	};
+
+	/* loop for for all paging blocks + CSS block */
+	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+		dev_phy_addr =
+			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+				    PAGE_2_EXP_SIZE);
+		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+	int ret;
+	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+	};
+
+	struct iwl_fw_get_item_resp *item_resp;
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &fw_get_item_cmd, },
+	};
+
+	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret) {
+		IWL_ERR(mvm,
+			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+			ret);
+		return ret;
+	}
+
+	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+		IWL_ERR(mvm,
+			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+			le32_to_cpu(item_resp->item_id));
+		ret = -EIO;
+		goto exit;
+	}
+
+	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+						  GFP_KERNEL);
+	if (!mvm->trans->paging_download_buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+	mvm->trans->paging_db = mvm->fw_paging_db;
+	IWL_DEBUG_FW(mvm,
+		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+		     mvm->trans->paging_req_addr);
+
+exit:
+	iwl_free_resp(&cmd);
+
+	return ret;
+}
+
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
 			 struct iwl_rx_packet *pkt, void *data)
 {
@@ -213,7 +513,7 @@
 	const struct fw_img *fw;
 	int ret, i;
 	enum iwl_ucode_type old_type = mvm->cur_ucode;
-	static const u8 alive_cmd[] = { MVM_ALIVE };
+	static const u16 alive_cmd[] = { MVM_ALIVE };
 	struct iwl_sf_region st_fwrd_space;
 
 	if (ucode_type == IWL_UCODE_REGULAR &&
@@ -244,6 +544,11 @@
 	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
 				    MVM_UCODE_ALIVE_TIMEOUT);
 	if (ret) {
+		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+			IWL_ERR(mvm,
+				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
 		mvm->cur_ucode = old_type;
 		return ret;
 	}
@@ -269,6 +574,40 @@
 	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
 	/*
+	 * configure and operate fw paging mechanism.
+	 * driver configures the paging flow only once, CPU2 paging image
+	 * included in the IWL_UCODE_INIT image.
+	 */
+	if (fw->paging_mem_size) {
+		/*
+		 * When dma is not enabled, the driver needs to copy / write
+		 * the downloaded / uploaded page to / from the smem.
+		 * This gets the location of the place were the pages are
+		 * stored.
+		 */
+		if (!is_device_dma_capable(mvm->trans->dev)) {
+			ret = iwl_trans_get_paging_item(mvm);
+			if (ret) {
+				IWL_ERR(mvm, "failed to get FW paging item\n");
+				return ret;
+			}
+		}
+
+		ret = iwl_save_fw_paging(mvm, fw);
+		if (ret) {
+			IWL_ERR(mvm, "failed to save the FW paging image\n");
+			return ret;
+		}
+
+		ret = iwl_send_paging_cmd(mvm, fw);
+		if (ret) {
+			IWL_ERR(mvm, "failed to send the paging cmd\n");
+			iwl_free_fw_paging(mvm);
+			return ret;
+		}
+	}
+
+	/*
 	 * Note: all the queues are enabled as part of the interface
 	 * initialization, but in firmware restart scenarios they
 	 * could be stopped, so wake them up. In firmware restart,
@@ -314,7 +653,7 @@
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
 	struct iwl_notification_wait calib_wait;
-	static const u8 init_complete[] = {
+	static const u16 init_complete[] = {
 		INIT_COMPLETE_NOTIF,
 		CALIB_RES_NOTIF_PHY_DB
 	};
@@ -444,12 +783,6 @@
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
-			pkt->hdr.flags);
-		goto exit;
-	}
-
 	mem_cfg = (void *)pkt->data;
 
 	mvm->shared_mem_cfg.shared_mem_addr =
@@ -473,14 +806,18 @@
 		le32_to_cpu(mem_cfg->page_buff_size);
 	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
-exit:
 	iwl_free_resp(&cmd);
 }
 
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 				struct iwl_mvm_dump_desc *desc,
-				unsigned int delay)
+				struct iwl_fw_dbg_trigger_tlv *trigger)
 {
+	unsigned int delay = 0;
+
+	if (trigger)
+		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
 	if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
 		return -EBUSY;
 
@@ -491,6 +828,7 @@
 		 le32_to_cpu(desc->trig_desc.type));
 
 	mvm->fw_dump_desc = desc;
+	mvm->fw_dump_trig = trigger;
 
 	queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
@@ -498,7 +836,8 @@
 }
 
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-			   const char *str, size_t len, unsigned int delay)
+			   const char *str, size_t len,
+			   struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 	struct iwl_mvm_dump_desc *desc;
 
@@ -510,14 +849,13 @@
 	desc->trig_desc.type = cpu_to_le32(trig);
 	memcpy(desc->trig_desc.data, str, len);
 
-	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
 }
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
 				const char *fmt, ...)
 {
-	unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
 	u16 occurrences = le16_to_cpu(trigger->occurrences);
 	int ret, len = 0;
 	char buf[64];
@@ -541,8 +879,9 @@
 		len = strlen(buf) + 1;
 	}
 
-	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
-				     len, delay);
+	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+				     trigger);
+
 	if (ret)
 		return ret;
 
@@ -676,8 +1015,7 @@
 		goto error;
 	}
 
-	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
-		iwl_mvm_get_shared_mem_conf(mvm);
+	iwl_mvm_get_shared_mem_conf(mvm);
 
 	ret = iwl_mvm_sf_update(mvm, NULL, false);
 	if (ret)
@@ -760,6 +1098,10 @@
 			goto error;
 	}
 
+	if (iwl_mvm_is_csum_supported(mvm) &&
+	    mvm->cfg->features & NETIF_F_RXCSUM)
+		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
 	/* allow FW/transport low power modes if not during restart */
 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
 		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -815,9 +1157,8 @@
 	return ret;
 }
 
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -828,13 +1169,10 @@
 			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
 			  (flags & CT_KILL_CARD_DISABLED) ?
 			  "Reached" : "Not reached");
-
-	return 0;
 }
 
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@@ -845,5 +1183,4 @@
 		       le32_to_cpu(mfuart_notif->external_ver),
 		       le32_to_cpu(mfuart_notif->status),
 		       le32_to_cpu(mfuart_notif->duration));
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 1812dd0..3424315 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -1312,9 +1312,8 @@
 	}
 }
 
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@@ -1365,8 +1364,6 @@
 			RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
 		}
 	}
-
-	return 0;
 }
 
 static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@@ -1415,9 +1412,8 @@
 		iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@@ -1434,5 +1430,4 @@
 						   IEEE80211_IFACE_ITER_NORMAL,
 						   iwl_mvm_beacon_loss_iterator,
 						   mb);
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dfdab38..aa8c2b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -641,6 +641,7 @@
 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+		ieee80211_hw_set(hw, TDLS_WIDER_BW);
 	}
 
 	if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -649,6 +650,10 @@
 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 	}
 
+	hw->netdev_features |= mvm->cfg->features;
+	if (!iwl_mvm_is_csum_supported(mvm))
+		hw->netdev_features &= ~NETIF_F_RXCSUM;
+
 	ret = ieee80211_register_hw(mvm->hw);
 	if (ret)
 		iwl_mvm_leds_exit(mvm);
@@ -1120,9 +1125,14 @@
 	u32 file_len, fifo_data_len = 0;
 	u32 smem_len = mvm->cfg->smem_len;
 	u32 sram2_len = mvm->cfg->dccm2_len;
+	bool monitor_dump_only = false;
 
 	lockdep_assert_held(&mvm->mutex);
 
+	if (mvm->fw_dump_trig &&
+	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+		monitor_dump_only = true;
+
 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
 	if (!fw_error_dump)
 		return;
@@ -1174,6 +1184,20 @@
 		   fifo_data_len +
 		   sizeof(*dump_info);
 
+	/* Make room for the SMEM, if it exists */
+	if (smem_len)
+		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+	/* Make room for the secondary SRAM, if it exists */
+	if (sram2_len)
+		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+	/* If we only want a monitor dump, reset the file length */
+	if (monitor_dump_only) {
+		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+			   sizeof(*dump_info);
+	}
+
 	/*
 	 * In 8000 HW family B-step include the ICCM (which resides separately)
 	 */
@@ -1186,14 +1210,6 @@
 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
 			    mvm->fw_dump_desc->len;
 
-	/* Make room for the SMEM, if it exists */
-	if (smem_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-	/* Make room for the secondary SRAM, if it exists */
-	if (sram2_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
 	dump_file = vzalloc(file_len);
 	if (!dump_file) {
 		kfree(fw_error_dump);
@@ -1239,6 +1255,10 @@
 		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
+	/* In case we only want monitor dump, skip to dump trasport data */
+	if (monitor_dump_only)
+		goto dump_trans_data;
+
 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
 	dump_mem = (void *)dump_data->data;
@@ -1282,7 +1302,9 @@
 					 dump_mem->data, IWL8260_ICCM_LEN);
 	}
 
-	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+						       mvm->fw_dump_trig);
 	fw_error_dump->op_mode_len = file_len;
 	if (fw_error_dump->trans_ptr)
 		file_len += fw_error_dump->trans_ptr->len;
@@ -1291,6 +1313,7 @@
 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 
+	mvm->fw_dump_trig = NULL;
 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
@@ -1433,22 +1456,9 @@
 
 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
 {
-	bool exit_now;
-
 	if (!iwl_mvm_is_d0i3_supported(mvm))
 		return;
 
-	mutex_lock(&mvm->d0i3_suspend_mutex);
-	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
-					&mvm->d0i3_suspend_flags);
-	mutex_unlock(&mvm->d0i3_suspend_mutex);
-
-	if (exit_now) {
-		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
-		_iwl_mvm_exit_d0i3(mvm);
-	}
-
 	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
 		if (!wait_event_timeout(mvm->d0i3_exit_waitq,
 					!test_bit(IWL_MVM_STATUS_IN_D0I3,
@@ -1585,20 +1595,23 @@
 				s16 tx_power)
 {
 	struct iwl_dev_tx_power_cmd cmd = {
-		.set_mode = 0,
-		.mac_context_id =
+		.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+		.v2.mac_context_id =
 			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
-		.pwr_restriction = cpu_to_le16(8 * tx_power),
+		.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
 	};
+	int len = sizeof(cmd);
 
 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
 		return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 
 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
-		cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+		cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 
-	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
-				    sizeof(cmd), &cmd);
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
+		len = sizeof(cmd.v2);
+
+	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
 
 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -1664,6 +1677,8 @@
 		goto out_unlock;
 	}
 
+	mvmvif->features |= hw->netdev_features;
+
 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
 	if (ret)
 		goto out_release;
@@ -2880,10 +2895,11 @@
 	switch (key->cipher) {
 	case WLAN_CIPHER_SUITE_TKIP:
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-		/* fall-through */
-	case WLAN_CIPHER_SUITE_CCMP:
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
 		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
 		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
 		break;
@@ -3025,7 +3041,7 @@
 	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
-	static const u8 time_event_response[] = { HOT_SPOT_CMD };
+	static const u16 time_event_response[] = { HOT_SPOT_CMD };
 	struct iwl_notification_wait wait_time_event;
 	struct iwl_hs20_roc_req aux_roc_req = {
 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2d4bad5..b95a07e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -80,6 +80,7 @@
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
+#include "tof.h"
 
 #define IWL_INVALID_MAC80211_QUEUE	0xff
 #define IWL_MVM_MAX_ADDRESSES		5
@@ -122,8 +123,7 @@
  *	be up'ed after the INIT fw asserted. This is useful to be able to use
  *	proprietary tools over testmode to debug the INIT fw.
  * @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- *	Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
  */
 struct iwl_mvm_mod_params {
 	bool init_dbg;
@@ -357,6 +357,7 @@
  *	# of received beacons accumulated over FW restart, and the current
  *	average signal of beacons retrieved from the firmware
  * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
  */
 struct iwl_mvm_vif {
 	struct iwl_mvm *mvm;
@@ -437,6 +438,9 @@
 	/* Indicates that CSA countdown may be started */
 	bool csa_countdown;
 	bool csa_failed;
+
+	/* TCP Checksum Offload */
+	netdev_features_t features;
 };
 
 static inline struct iwl_mvm_vif *
@@ -606,6 +610,11 @@
 	/* NVM sections */
 	struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
+	/* Paging section */
+	struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+	u16 num_of_paging_blk;
+	u16 num_of_pages_in_last_blk;
+
 	/* EEPROM MAC addresses */
 	struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 
@@ -686,6 +695,7 @@
 	 * can hold 16 keys at most. Reflect this fact.
 	 */
 	unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+	u8 fw_key_deleted[STA_KEY_MAX_NUM];
 
 	/* references taken by the driver and spinlock protecting them */
 	spinlock_t refs_lock;
@@ -698,6 +708,7 @@
 	u8 fw_dbg_conf;
 	struct delayed_work fw_dump_wk;
 	struct iwl_mvm_dump_desc *fw_dump_desc;
+	struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
 
 #ifdef CONFIG_IWLWIFI_LEDS
 	struct led_classdev led;
@@ -822,6 +833,7 @@
 	struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
 
 	u32 ciphers[6];
+	struct iwl_mvm_tof_data tof_data;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -941,6 +953,12 @@
 		IWL_MVM_BT_COEX_RRC;
 }
 
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -974,12 +992,12 @@
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
 				  struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
 				      u32 flags, u16 len, const void *data);
 int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
 					 struct iwl_host_cmd *cmd,
 					 u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
 					     u16 len, const void *data,
 					     u32 *status);
 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -988,10 +1006,6 @@
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 			struct iwl_tx_cmd *tx_cmd,
 			struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-			       struct ieee80211_tx_info *info,
-			       struct iwl_tx_cmd *tx_cmd,
-			       struct sk_buff *skb_frag);
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 			    struct ieee80211_tx_info *info,
 			    struct ieee80211_sta *sta, __le16 fc);
@@ -1003,6 +1017,17 @@
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+					   struct iwl_tx_cmd *tx_cmd)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+	tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+	memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+	if (info->flags & IEEE80211_TX_CTL_AMPDU)
+		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
 static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 {
 	flush_work(&mvm->async_handlers_wk);
@@ -1011,9 +1036,8 @@
 /* Statistics */
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
 				  struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
@@ -1059,27 +1083,20 @@
  * FW notifications / CMD responses handlers
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		      struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM PHY */
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1106,12 +1123,10 @@
 u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1135,29 +1150,24 @@
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 			     struct ieee80211_vif *vif,
 			     struct cfg80211_sched_scan_request *req,
 			     struct ieee80211_scan_ies *ies,
 			     int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1196,9 +1206,8 @@
 				 char *buf, int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
 
 #ifdef CONFIG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@@ -1254,9 +1263,8 @@
 
 /* BT Coex */
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-			     struct iwl_rx_cmd_buffer *rxb,
-			     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			   enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -1274,9 +1282,8 @@
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-				 struct iwl_rx_cmd_buffer *rxb,
-				 struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+				  struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			       enum ieee80211_rssi_event_data);
 u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@@ -1285,9 +1292,8 @@
 					 struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
 					enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+				       struct iwl_rx_cmd_buffer *rxb);
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1376,9 +1382,8 @@
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-		       struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+			struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1390,9 +1395,8 @@
 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
 		   enum iwl_mcc_source src_id);
 int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb);
 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
 						  const char *alpha2,
 						  enum iwl_mcc_source src_id,
@@ -1431,8 +1435,7 @@
 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
 					struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
 
 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
@@ -1442,10 +1445,11 @@
 
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-			   const char *str, size_t len, unsigned int delay);
+			   const char *str, size_t len,
+			   struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 				struct iwl_mvm_dump_desc *desc,
-				unsigned int delay);
+				struct iwl_fw_dbg_trigger_tlv *trigger);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 2a6be35..328187d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -139,12 +139,6 @@
 		return ret;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
-			pkt->hdr.flags);
-		ret = -EIO;
-		goto exit;
-	}
 
 	/* Extract NVM response */
 	nvm_resp = (void *)pkt->data;
@@ -652,12 +646,6 @@
 		return ERR_PTR(ret);
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
-			pkt->hdr.flags);
-		ret = -EIO;
-		goto exit;
-	}
 
 	/* Extract MCC response */
 	mcc_resp = (void *)pkt->data;
@@ -839,9 +827,8 @@
 	return retval;
 }
 
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
@@ -852,7 +839,7 @@
 	lockdep_assert_held(&mvm->mutex);
 
 	if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
-		return 0;
+		return;
 
 	mcc[0] = notif->mcc >> 8;
 	mcc[1] = notif->mcc & 0xff;
@@ -864,10 +851,8 @@
 		      mcc, src);
 	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
 	if (IS_ERR_OR_NULL(regd))
-		return 0;
+		return;
 
 	regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
 	kfree(regd);
-
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index e4fa500..a37de3f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -201,14 +201,15 @@
 }
 
 struct iwl_rx_handlers {
-	u8 cmd_id;
+	u16 cmd_id;
 	bool async;
-	int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		  struct iwl_device_cmd *cmd);
+	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 #define RX_HANDLER(_cmd_id, _fn, _async)	\
 	{ .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async)	\
+	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
 
 /*
  * Handlers for fw notifications
@@ -221,7 +222,6 @@
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-	RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
 	RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@@ -261,12 +261,14 @@
 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
 		   true),
 	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+	RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
 
 };
 #undef RX_HANDLER
+#undef RX_HANDLER_GRP
 #define CMD(x) [x] = #x
 
-static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
 	CMD(MVM_ALIVE),
 	CMD(REPLY_ERROR),
 	CMD(INIT_COMPLETE_NOTIF),
@@ -286,8 +288,10 @@
 	CMD(PHY_CONFIGURATION_CMD),
 	CMD(CALIB_RES_NOTIF_PHY_DB),
 	CMD(SET_CALIB_DEFAULT_CMD),
+	CMD(FW_PAGING_BLOCK_CMD),
 	CMD(ADD_STA_KEY),
 	CMD(ADD_STA),
+	CMD(FW_GET_ITEM_CMD),
 	CMD(REMOVE_STA),
 	CMD(LQ_CMD),
 	CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -470,6 +474,8 @@
 	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
 	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
 	trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+	trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+					       IWL_UCODE_TLV_API_WIDE_CMD_HDR);
 
 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
 		trans_cfg.bc_table_dword = true;
@@ -576,6 +582,8 @@
 	/* rpm starts with a taken ref. only set the appropriate bit here. */
 	mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
 
+	iwl_mvm_tof_init(mvm);
+
 	return op_mode;
 
  out_unregister:
@@ -623,14 +631,15 @@
 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
 		kfree(mvm->nvm_sections[i].data);
 
+	iwl_mvm_tof_clean(mvm);
+
 	ieee80211_free_hw(mvm->hw);
 }
 
 struct iwl_async_handler_entry {
 	struct list_head list;
 	struct iwl_rx_cmd_buffer rxb;
-	int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		  struct iwl_device_cmd *cmd);
+	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
@@ -667,9 +676,7 @@
 	spin_unlock_bh(&mvm->async_handlers_lock);
 
 	list_for_each_entry_safe(entry, tmp, &local_list, list) {
-		if (entry->fn(mvm, &entry->rxb, NULL))
-			IWL_WARN(mvm,
-				 "returned value from ASYNC handlers are ignored\n");
+		entry->fn(mvm, &entry->rxb);
 		iwl_free_rxb(&entry->rxb);
 		list_del(&entry->list);
 		kfree(entry);
@@ -698,24 +705,30 @@
 		if (!cmds_trig->cmds[i].cmd_id)
 			break;
 
-		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
 			continue;
 
 		iwl_mvm_fw_dbg_collect_trig(mvm, trig,
-					    "CMD 0x%02x received",
-					    pkt->hdr.cmd);
+					    "CMD 0x%02x.%02x received",
+					    pkt->hdr.group_id, pkt->hdr.cmd);
 		break;
 	}
 }
 
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+				struct napi_struct *napi,
+				struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	u8 i;
 
+	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+		return;
+	}
+
 	iwl_mvm_rx_check_trigger(mvm, pkt);
 
 	/*
@@ -729,16 +742,18 @@
 		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
 		struct iwl_async_handler_entry *entry;
 
-		if (rx_h->cmd_id != pkt->hdr.cmd)
+		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
 			continue;
 
-		if (!rx_h->async)
-			return rx_h->fn(mvm, rxb, cmd);
+		if (!rx_h->async) {
+			rx_h->fn(mvm, rxb);
+			return;
+		}
 
 		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 		/* we can't do much... */
 		if (!entry)
-			return 0;
+			return;
 
 		entry->rxb._page = rxb_steal_page(rxb);
 		entry->rxb._offset = rxb->_offset;
@@ -750,8 +765,6 @@
 		schedule_work(&mvm->async_handlers_wk);
 		break;
 	}
-
-	return 0;
 }
 
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
@@ -903,7 +916,8 @@
 	 * can't recover this since we're already half suspended.
 	 */
 	if (!mvm->restart_fw && fw_error) {
-		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+					    NULL);
 	} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
 				    &mvm->status)) {
 		struct iwl_mvm_reprobe *reprobe;
@@ -1100,9 +1114,7 @@
 
 	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 
-	/* make sure we have no running tx while configuring the qos */
 	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-	synchronize_net();
 
 	/*
 	 * iwl_mvm_ref_sync takes a reference before checking the flag.
@@ -1130,6 +1142,9 @@
 		mvm->d0i3_offloading = false;
 	}
 
+	/* make sure we have no running tx while configuring the seqno */
+	synchronize_net();
+
 	iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
 	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
 				   sizeof(wowlan_config_cmd),
@@ -1156,15 +1171,25 @@
 	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 }
 
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
-					 struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+	struct iwl_mvm *mvm;
+	u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+					    struct ieee80211_vif *vif)
 {
-	struct iwl_mvm *mvm = data;
+	struct iwl_mvm_wakeup_reason_iter_data *data = _data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
 	if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
-	    mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-		iwl_mvm_connection_loss(mvm, vif, "D0i3");
+	    data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+		if (data->wakeup_reasons &
+		    IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+			iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+		else
+			ieee80211_beacon_loss(vif);
+	}
 }
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1232,7 +1257,7 @@
 	};
 	struct iwl_wowlan_status *status;
 	int ret;
-	u32 disconnection_reasons, wakeup_reasons;
+	u32 handled_reasons, wakeup_reasons;
 	__le16 *qos_seq = NULL;
 
 	mutex_lock(&mvm->mutex);
@@ -1249,13 +1274,18 @@
 
 	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 
-	disconnection_reasons =
-		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
-	if (wakeup_reasons & disconnection_reasons)
+	handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+				IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+	if (wakeup_reasons & handled_reasons) {
+		struct iwl_mvm_wakeup_reason_iter_data data = {
+			.mvm = mvm,
+			.wakeup_reasons = wakeup_reasons,
+		};
+
 		ieee80211_iterate_active_interfaces(
 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-			iwl_mvm_d0i3_disconnect_iter, mvm);
+			iwl_mvm_d0i3_wakeup_reason_iter, &data);
+	}
 out:
 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
@@ -1308,17 +1338,6 @@
 	return _iwl_mvm_exit_d0i3(mvm);
 }
 
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
-			     struct napi_struct *napi,
-			     struct net_device *napi_dev,
-			     int (*poll)(struct napi_struct *, int),
-			     int weight)
-{
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-	ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
 	.start = iwl_op_mode_mvm_start,
 	.stop = iwl_op_mode_mvm_stop,
@@ -1332,5 +1351,4 @@
 	.nic_config = iwl_mvm_nic_config,
 	.enter_d0i3 = iwl_mvm_enter_d0i3,
 	.exit_d0i3 = iwl_mvm_exit_d0i3,
-	.napi_add = iwl_mvm_napi_add,
 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d2c6ba9..4645877 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -112,11 +112,12 @@
 static
 void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
 					  struct ieee80211_vif *vif,
-					  struct iwl_beacon_filter_cmd *cmd)
+					  struct iwl_beacon_filter_cmd *cmd,
+					  bool d0i3)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-	if (vif->bss_conf.cqm_rssi_thold) {
+	if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
 		cmd->bf_energy_delta =
 			cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
 		/* fw uses an absolute value for this */
@@ -287,27 +288,6 @@
 	return true;
 }
 
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
-	int numerator;
-	int dtim_interval = dtimper * bi;
-
-	if (WARN_ON(!dtim_interval))
-		return 0;
-
-	if (dtimper == 1) {
-		if (bi > 100)
-			numerator = 408;
-		else
-			numerator = 510;
-	} else if (dtimper < 10) {
-		numerator = 612;
-	} else {
-		return 0;
-	}
-	return max(1, (numerator / dtim_interval));
-}
-
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 {
 	struct ieee80211_chanctx_conf *chanctx_conf;
@@ -357,8 +337,8 @@
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-	if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-	    !mvmvif->pm_enabled)
+	if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+	    (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
 		return;
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -377,11 +357,8 @@
 	if (!radar_detect && (dtimper < 10) &&
 	    (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
 	     mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-		cmd->skip_dtim_periods =
-			iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
-		if (cmd->skip_dtim_periods)
-			cmd->flags |=
-				cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		cmd->skip_dtim_periods = 3;
 	}
 
 	if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
@@ -509,9 +486,8 @@
 		       ETH_ALEN);
 }
 
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
@@ -520,8 +496,6 @@
 	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
-	return 0;
 }
 
 struct iwl_power_vifs {
@@ -810,7 +784,7 @@
 	    vif->type != NL80211_IFTYPE_STATION || vif->p2p)
 		return 0;
 
-	iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+	iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
 	if (!d0i3)
 		iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
 	ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index daff1d0..5ae9c8a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -177,7 +177,8 @@
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-	if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+	if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+	    iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
 		return false;
 
 	if (mvm->nvm_data->sku_cap_mimo_disabled)
@@ -2403,7 +2404,7 @@
 	u8 rate_idx;
 };
 
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
 	{ -60, IWL_RATE_54M_INDEX },
 	{ -64, IWL_RATE_48M_INDEX },
 	{ -68, IWL_RATE_36M_INDEX },
@@ -2416,7 +2417,7 @@
 	{ S8_MIN, IWL_RATE_1M_INDEX },
 };
 
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
 	{ -60, IWL_RATE_54M_INDEX },
 	{ -64, IWL_RATE_48M_INDEX },
 	{ -72, IWL_RATE_36M_INDEX },
@@ -2427,6 +2428,124 @@
 	{ S8_MIN, IWL_RATE_6M_INDEX },
 };
 
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+	{ -60, IWL_RATE_MCS_7_INDEX },
+	{ -64, IWL_RATE_MCS_6_INDEX },
+	{ -68, IWL_RATE_MCS_5_INDEX },
+	{ -72, IWL_RATE_MCS_4_INDEX },
+	{ -80, IWL_RATE_MCS_3_INDEX },
+	{ -84, IWL_RATE_MCS_2_INDEX },
+	{ -85, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+	{ -60, IWL_RATE_MCS_8_INDEX },
+	{ -64, IWL_RATE_MCS_7_INDEX },
+	{ -68, IWL_RATE_MCS_6_INDEX },
+	{ -72, IWL_RATE_MCS_5_INDEX },
+	{ -80, IWL_RATE_MCS_4_INDEX },
+	{ -84, IWL_RATE_MCS_3_INDEX },
+	{ -85, IWL_RATE_MCS_2_INDEX },
+	{ -87, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+	{ -60, IWL_RATE_MCS_9_INDEX },
+	{ -64, IWL_RATE_MCS_8_INDEX },
+	{ -68, IWL_RATE_MCS_7_INDEX },
+	{ -72, IWL_RATE_MCS_6_INDEX },
+	{ -80, IWL_RATE_MCS_5_INDEX },
+	{ -84, IWL_RATE_MCS_4_INDEX },
+	{ -85, IWL_RATE_MCS_3_INDEX },
+	{ -87, IWL_RATE_MCS_2_INDEX },
+	{ -88, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+				 struct ieee80211_sta *sta,
+				 struct iwl_lq_sta *lq_sta)
+{
+	struct rs_rate *rate = &lq_sta->optimal_rate;
+
+	if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+	else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+	else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		rate->type = LQ_LEGACY_A;
+	else
+		rate->type = LQ_LEGACY_G;
+
+	rate->bw = rs_bw_from_sta_bw(sta);
+	rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+	/* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+	if (is_mimo(rate)) {
+		lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+	} else if (is_siso(rate)) {
+		lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+	} else {
+		lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+		if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+			lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+		} else {
+			lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+		}
+	}
+
+	if (is_vht(rate)) {
+		if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+			lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+		} else {
+			lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+		}
+	} else if (is_ht(rate)) {
+		lq_sta->optimal_rates = rs_optimal_rates_ht;
+		lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+	}
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+					   struct iwl_lq_sta *lq_sta)
+{
+	struct rs_rate *rate = &lq_sta->optimal_rate;
+	int i;
+
+	rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+				     BITS_PER_LONG);
+
+	for (i = 0; i < lq_sta->optimal_nentries; i++) {
+		int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+		if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+		    (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+			rate->index = rate_idx;
+			break;
+		}
+	}
+
+	rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+	return rate;
+}
+
 /* Choose an initial legacy rate and antenna to use based on the RSSI
  * of last Rx
  */
@@ -2468,12 +2587,12 @@
 
 	if (band == IEEE80211_BAND_5GHZ) {
 		rate->type = LQ_LEGACY_A;
-		initial_rates = rs_init_rates_5ghz;
-		nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+		initial_rates = rs_optimal_rates_5ghz_legacy;
+		nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
 	} else {
 		rate->type = LQ_LEGACY_G;
-		initial_rates = rs_init_rates_24ghz;
-		nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+		initial_rates = rs_optimal_rates_24ghz_legacy;
+		nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
 	}
 
 	if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
@@ -2496,10 +2615,21 @@
 			 struct iwl_lq_sta *lq_sta,
 			 struct ieee80211_rx_status *rx_status)
 {
+	int i;
+
 	lq_sta->pers.chains = rx_status->chains;
 	lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
 	lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
 	lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+	lq_sta->pers.last_rssi = S8_MIN;
+
+	for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+		if (!(lq_sta->pers.chains & BIT(i)))
+			continue;
+
+		if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+			lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+	}
 }
 
 /**
@@ -2538,6 +2668,7 @@
 	rate = &tbl->rate;
 
 	rs_get_initial_rate(mvm, lq_sta, band, rate);
+	rs_init_optimal_rate(mvm, sta, lq_sta);
 
 	WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
 	if (rate->ant == ANT_A)
@@ -2560,6 +2691,8 @@
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_lq_sta *lq_sta = mvm_sta;
+	struct rs_rate *optimal_rate;
+	u32 last_ucode_rate;
 
 	if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
 		/* if vif isn't initialized mvm doesn't know about
@@ -2583,8 +2716,18 @@
 
 	iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
 				  info->band, &info->control.rates[0]);
-
 	info->control.rates[0].count = 1;
+
+	/* Report the optimal rate based on rssi and STA caps if we haven't
+	 * converged yet (too little traffic) or exploring other modulations
+	 */
+	if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+		optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+		last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+							  optimal_rate);
+		iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+					  &txrc->reported_rate);
+	}
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -2605,6 +2748,7 @@
 #endif
 	lq_sta->pers.chains = 0;
 	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+	lq_sta->pers.last_rssi = S8_MIN;
 
 	return &sta_priv->lq_sta;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 2a3da31..81314ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -316,6 +317,14 @@
 	u8 max_siso_rate_idx;
 	u8 max_mimo2_rate_idx;
 
+	/* Optimal rate based on RSSI and STA caps.
+	 * Used only to reflect link speed to userspace.
+	 */
+	struct rs_rate optimal_rate;
+	unsigned long optimal_rate_mask;
+	const struct rs_init_rate_info *optimal_rates;
+	int optimal_nentries;
+
 	u8 missed_rate_counter;
 
 	struct iwl_lq_cmd lq;
@@ -341,6 +350,7 @@
 #endif
 		u8 chains;
 		s8 chain_signal[IEEE80211_MAX_CHAINS];
+		s8 last_rssi;
 		struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
 		struct iwl_mvm *drv;
 	} pers;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 8f1d93b..c37c10a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -61,6 +61,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include <linux/skbuff.h>
 #include "iwl-trans.h"
 #include "mvm.h"
 #include "fw-api.h"
@@ -71,8 +72,7 @@
  * Copies the phy information in mvm->last_phy_info, it will be used when the
  * actual data will come from the fw in the next packet.
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -86,8 +86,6 @@
 		spin_unlock(&mvm->drv_stats_lock);
 	}
 #endif
-
-	return 0;
 }
 
 /*
@@ -96,6 +94,7 @@
  * Adds the rxb to a new skb and give it to mac80211
  */
 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+					    struct napi_struct *napi,
 					    struct sk_buff *skb,
 					    struct ieee80211_hdr *hdr, u16 len,
 					    u32 ampdu_status, u8 crypt_len,
@@ -129,7 +128,7 @@
 				fraglen, rxb->truesize);
 	}
 
-	ieee80211_rx(mvm->hw, skb);
+	ieee80211_rx_napi(mvm->hw, skb, napi);
 }
 
 /*
@@ -237,13 +236,26 @@
 	return 0;
 }
 
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+			    struct sk_buff *skb,
+			    u32 status)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+	if (mvmvif->features & NETIF_F_RXCSUM &&
+	    status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+	    status & RX_MPDU_RES_STATUS_CSUM_OK)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 /*
  * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
  *
  * Handles the actual data of the Rx packet from the fw
  */
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb)
 {
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_rx_status *rx_status;
@@ -271,7 +283,7 @@
 	skb = alloc_skb(128, GFP_ATOMIC);
 	if (!skb) {
 		IWL_ERR(mvm, "alloc_skb failed\n");
-		return 0;
+		return;
 	}
 
 	rx_status = IEEE80211_SKB_RXCB(skb);
@@ -284,14 +296,14 @@
 		IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
 			       rx_pkt_status);
 		kfree_skb(skb);
-		return 0;
+		return;
 	}
 
 	if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
 		IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
 			       phy_info->cfg_phy_cnt);
 		kfree_skb(skb);
-		return 0;
+		return;
 	}
 
 	/*
@@ -366,6 +378,9 @@
 		}
 	}
 
+	if (sta && ieee80211_is_data(hdr->frame_control))
+		iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
 	rcu_read_unlock();
 
 	/* set the preamble flag if appropriate */
@@ -429,9 +444,8 @@
 	iwl_mvm_update_frame_stats(mvm, rate_n_flags,
 				   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 #endif
-	iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+	iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
 					crypt_len, rxb);
-	return 0;
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
@@ -623,10 +637,7 @@
 		iwl_rx_packet_payload_len(pkt));
 }
 
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5514ad6..56559d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,10 +72,60 @@
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
-struct iwl_mvm_scan_params {
-	u32 max_out_time;
+enum iwl_mvm_scan_type {
+	IWL_SCAN_TYPE_UNASSOC,
+	IWL_SCAN_TYPE_WILD,
+	IWL_SCAN_TYPE_MILD,
+	IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+	IWL_MVM_TRAFFIC_LOW,
+	IWL_MVM_TRAFFIC_MEDIUM,
+	IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+	u32 dwell_active;
+	u32 dwell_passive;
+	u32 dwell_fragmented;
 	u32 suspend_time;
-	bool passive_fragmented;
+	u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+	[IWL_SCAN_TYPE_UNASSOC] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 0,
+		.max_out_time = 0,
+	},
+	[IWL_SCAN_TYPE_WILD] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 30,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_MILD] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 120,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_FRAGMENTED] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 95,
+		.max_out_time = 44,
+	},
+};
+
+struct iwl_mvm_scan_params {
+	enum iwl_mvm_scan_type type;
 	u32 n_channels;
 	u16 delay;
 	int n_ssids;
@@ -90,15 +140,7 @@
 	int n_match_sets;
 	struct iwl_scan_probe_req preq;
 	struct cfg80211_match_set *match_sets;
-	struct _dwell {
-		u16 passive;
-		u16 active;
-		u16 fragmented;
-	} dwell[IEEE80211_NUM_BANDS];
-	struct {
-		u8 iterations;
-		u8 full_scan_mul; /* not used for UMAC */
-	} schedule[2];
+	u8 iterations[2];
 };
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -147,34 +189,6 @@
 		return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
-				    enum ieee80211_band band, int n_ssids)
-{
-	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-		return 10;
-	if (band == IEEE80211_BAND_2GHZ)
-		return 20  + 3 * (n_ssids + 1);
-	return 10  + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
-				     enum ieee80211_band band)
-{
-	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-			return 110;
-	return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
 					    struct ieee80211_vif *vif)
 {
@@ -186,90 +200,39 @@
 		*global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
-				    struct ieee80211_vif *vif,
-				    struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+	return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct iwl_mvm_scan_params *params)
 {
 	int global_cnt = 0;
-	enum ieee80211_band band;
-	u8 frag_passive_dwell = 0;
+	enum iwl_mvm_traffic_load load;
+	bool low_latency;
 
 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_scan_condition_iterator,
 					    &global_cnt);
 	if (!global_cnt)
-		goto not_bound;
+		return IWL_SCAN_TYPE_UNASSOC;
 
-	params->suspend_time = 30;
-	params->max_out_time = 120;
+	load = iwl_mvm_get_traffic_load(mvm);
+	low_latency = iwl_mvm_low_latency(mvm);
 
-	if (iwl_mvm_low_latency(mvm)) {
-		if (fw_has_api(&mvm->fw->ucode_capa,
-			       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+	if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+		return IWL_SCAN_TYPE_FRAGMENTED;
 
-			params->suspend_time = 105;
-			/*
-			 * If there is more than one active interface make
-			 * passive scan more fragmented.
-			 */
-			frag_passive_dwell = 40;
-			params->max_out_time = frag_passive_dwell;
-		} else {
-			params->suspend_time = 120;
-			params->max_out_time = 120;
-		}
-	}
+	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+		return IWL_SCAN_TYPE_MILD;
 
-	if (frag_passive_dwell &&
-	    fw_has_api(&mvm->fw->ucode_capa,
-		       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-		/*
-		 * P2P device scan should not be fragmented to avoid negative
-		 * impact on P2P device discovery. Configure max_out_time to be
-		 * equal to dwell time on passive channel. Take a longest
-		 * possible value, one that corresponds to 2GHz band
-		 */
-		if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-			u32 passive_dwell =
-				iwl_mvm_get_passive_dwell(mvm,
-							  IEEE80211_BAND_2GHZ);
-			params->max_out_time = passive_dwell;
-		} else {
-			params->passive_fragmented = true;
-		}
-	}
-
-	if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-	    (params->max_out_time > 200))
-		params->max_out_time = 200;
-
-not_bound:
-
-	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-		if (params->passive_fragmented)
-			params->dwell[band].fragmented = frag_passive_dwell;
-
-		params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
-									band);
-		params->dwell[band].active =
-			iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
-	}
-
-	IWL_DEBUG_SCAN(mvm,
-		       "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
-		       params->max_out_time, params->suspend_time,
-		       params->passive_fragmented);
-	IWL_DEBUG_SCAN(mvm,
-		       "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
-		       params->dwell[IEEE80211_BAND_2GHZ].passive,
-		       params->dwell[IEEE80211_BAND_2GHZ].active,
-		       params->dwell[IEEE80211_BAND_2GHZ].fragmented);
-	IWL_DEBUG_SCAN(mvm,
-		       "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
-		       params->dwell[IEEE80211_BAND_5GHZ].passive,
-		       params->dwell[IEEE80211_BAND_5GHZ].active,
-		       params->dwell[IEEE80211_BAND_5GHZ].fragmented);
+	return IWL_SCAN_TYPE_WILD;
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -327,9 +290,8 @@
 	return buf;
 }
 
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
@@ -341,17 +303,13 @@
 		       iwl_mvm_dump_channel_list(notif->results,
 						 notif->scanned_channels, buf,
 						 sizeof(buf)));
-	return 0;
 }
 
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
 	ieee80211_sched_scan_results(mvm->hw);
-
-	return 0;
 }
 
 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
@@ -368,9 +326,8 @@
 	}
 }
 
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
@@ -395,6 +352,11 @@
 		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 			       aborted ? "aborted" : "completed",
 			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+		IWL_DEBUG_SCAN(mvm,
+			       "Last line %d, Last iteration %d, Time after last iteration %d\n",
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
 
 		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
 	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -406,9 +368,14 @@
 	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
 		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 
-		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 			       aborted ? "aborted" : "completed",
 			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+		IWL_DEBUG_SCAN(mvm,
+			       "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
 
 		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 		ieee80211_sched_scan_stopped(mvm->hw);
@@ -426,8 +393,6 @@
 	mvm->last_ebs_successful =
 			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
 			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
-	return 0;
 }
 
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -751,13 +716,11 @@
 				    struct iwl_scan_req_lmac *cmd,
 				    struct iwl_mvm_scan_params *params)
 {
-	cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-	cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-	if (params->passive_fragmented)
-		cmd->fragmented_dwell =
-				params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-	cmd->max_out_time = cpu_to_le32(params->max_out_time);
-	cmd->suspend_time = cpu_to_le32(params->suspend_time);
+	cmd->active_dwell = scan_timing[params->type].dwell_active;
+	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 	cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
@@ -794,7 +757,7 @@
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 {
-	return params->schedule[0].iterations + params->schedule[1].iterations;
+	return params->iterations[0] + params->iterations[1];
 }
 
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -808,7 +771,7 @@
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-	if (params->passive_fragmented)
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
@@ -861,11 +824,11 @@
 	ssid_bitmap <<= 1;
 
 	cmd->schedule[0].delay = cpu_to_le16(params->interval);
-	cmd->schedule[0].iterations = params->schedule[0].iterations;
-	cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+	cmd->schedule[0].iterations = params->iterations[0];
+	cmd->schedule[0].full_scan_mul = 1;
 	cmd->schedule[1].delay = cpu_to_le16(params->interval);
-	cmd->schedule[1].iterations = params->schedule[1].iterations;
-	cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+	cmd->schedule[1].iterations = params->iterations[1];
+	cmd->schedule[1].full_scan_mul = 1;
 
 	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
 		cmd->channel_opt[0].flags =
@@ -937,9 +900,9 @@
 	int num_channels =
 		mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
 		mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-	int ret, i, j = 0, cmd_size, data_size;
+	int ret, i, j = 0, cmd_size;
 	struct iwl_host_cmd cmd = {
-		.id = SCAN_CFG_CMD,
+		.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 	};
 
 	if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
@@ -951,8 +914,6 @@
 	if (!scan_config)
 		return -ENOMEM;
 
-	data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
-	scan_config->hdr.size = cpu_to_le16(data_size);
 	scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
 					 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
 					 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@ -1013,13 +974,11 @@
 				    struct iwl_scan_req_umac *cmd,
 				    struct iwl_mvm_scan_params *params)
 {
-	cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-	cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-	if (params->passive_fragmented)
-		cmd->fragmented_dwell =
-				params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-	cmd->max_out_time = cpu_to_le32(params->max_out_time);
-	cmd->suspend_time = cpu_to_le32(params->suspend_time);
+	cmd->active_dwell = scan_timing[params->type].dwell_active;
+	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 	cmd->scan_priority =
 		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
@@ -1059,7 +1018,7 @@
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-	if (params->passive_fragmented)
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1099,8 +1058,6 @@
 		return uid;
 
 	memset(cmd, 0, ksize(cmd));
-	cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-				    sizeof(struct iwl_mvm_umac_cmd_hdr));
 
 	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
@@ -1230,17 +1187,15 @@
 	params.n_match_sets = 0;
 	params.match_sets = NULL;
 
-	params.schedule[0].iterations = 1;
-	params.schedule[0].full_scan_mul = 0;
-	params.schedule[1].iterations = 0;
-	params.schedule[1].full_scan_mul = 0;
+	params.iterations[0] = 1;
+	params.iterations[1] = 0;
 
-	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
 	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-		hcmd.id = SCAN_REQ_UMAC;
+		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
 		ret = iwl_mvm_scan_umac(mvm, vif, &params,
 					IWL_MVM_SCAN_REGULAR);
 	} else {
@@ -1313,10 +1268,10 @@
 	params.n_match_sets = req->n_match_sets;
 	params.match_sets = req->match_sets;
 
-	params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-	params.schedule[0].full_scan_mul = 1;
-	params.schedule[1].iterations = 0xff;
-	params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+	params.iterations[0] = 0;
+	params.iterations[1] = 0xff;
+
+	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
 	if (req->interval > U16_MAX) {
 		IWL_DEBUG_SCAN(mvm,
@@ -1339,8 +1294,6 @@
 		params.delay = req->delay;
 	}
 
-	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
 	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
 	if (ret)
 		return ret;
@@ -1348,7 +1301,7 @@
 	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-		hcmd.id = SCAN_REQ_UMAC;
+		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
 		ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
 	} else {
 		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -1374,9 +1327,8 @@
 	return ret;
 }
 
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
@@ -1384,7 +1336,7 @@
 	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
 	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
-		return 0;
+		return;
 
 	/* if the scan is already stopping, we don't need to notify mac80211 */
 	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
@@ -1395,26 +1347,26 @@
 	}
 
 	mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
 	IWL_DEBUG_SCAN(mvm,
 		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
 		       uid, mvm->scan_uid_status[uid],
 		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
 				"completed" : "aborted",
 		       iwl_mvm_ebs_status_str(notif->ebs_status));
+	IWL_DEBUG_SCAN(mvm,
+		       "Last line %d, Last iteration %d, Time from last iteration %d\n",
+		       notif->last_schedule, notif->last_iter,
+		       __le32_to_cpu(notif->time_from_last_iter));
 
 	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
 	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
 		mvm->last_ebs_successful = false;
 
 	mvm->scan_uid_status[uid] = 0;
-
-	return 0;
 }
 
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
@@ -1426,15 +1378,11 @@
 		       iwl_mvm_dump_channel_list(notif->results,
 						 notif->scanned_channels, buf,
 						 sizeof(buf)));
-	return 0;
 }
 
 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
-	struct iwl_umac_scan_abort cmd = {
-		.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
-					sizeof(struct iwl_mvm_umac_cmd_hdr)),
-	};
+	struct iwl_umac_scan_abort cmd = {};
 	int uid, ret;
 
 	lockdep_assert_held(&mvm->mutex);
@@ -1451,7 +1399,10 @@
 
 	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+	ret = iwl_mvm_send_cmd_pdu(mvm,
+				   iwl_cmd_id(SCAN_ABORT_UMAC,
+					      IWL_ALWAYS_LONG_GROUP, 0),
+				   0, sizeof(cmd), &cmd);
 	if (!ret)
 		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
@@ -1461,7 +1412,7 @@
 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
 	struct iwl_notification_wait wait_scan_done;
-	static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+	static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
 					      SCAN_OFFLOAD_COMPLETE, };
 	int ret;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 26f076e..df216cd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1148,18 +1148,31 @@
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
 {
-	int i;
+	int i, max = -1, max_offs = -1;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+	/* Pick the unused key offset with the highest 'deleted'
+	 * counter. Every time a key is deleted, all the counters
+	 * are incremented and the one that was just deleted is
+	 * reset to zero. Thus, the highest counter is the one
+	 * that was deleted longest ago. Pick that one.
+	 */
+	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+		if (test_bit(i, mvm->fw_key_table))
+			continue;
+		if (mvm->fw_key_deleted[i] > max) {
+			max = mvm->fw_key_deleted[i];
+			max_offs = i;
+		}
+	}
 
-	if (i == STA_KEY_MAX_NUM)
+	if (max_offs < 0)
 		return STA_KEY_IDX_INVALID;
 
-	__set_bit(i, mvm->fw_key_table);
+	__set_bit(max_offs, mvm->fw_key_table);
 
-	return i;
+	return max_offs;
 }
 
 static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@ -1277,8 +1290,6 @@
 		const u8 *pn;
 
 		memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
-		ieee80211_aes_cmac_calculate_k1_k2(keyconf,
-						   igtk_cmd.K1, igtk_cmd.K2);
 		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
 		pn = seq.aes_cmac.pn;
 		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -1479,7 +1490,7 @@
 {
 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
 	u8 sta_id;
-	int ret;
+	int ret, i;
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -1498,6 +1509,13 @@
 		return -ENOENT;
 	}
 
+	/* track which key was deleted last */
+	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+		if (mvm->fw_key_deleted[i] < U8_MAX)
+			mvm->fw_key_deleted[i]++;
+	}
+	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
 	if (sta_id == IWL_MVM_STATION_COUNT) {
 		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
 		return 0;
@@ -1661,9 +1679,8 @@
 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
@@ -1671,15 +1688,13 @@
 	u32 sta_id = le32_to_cpu(notif->sta_id);
 
 	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
-		return 0;
+		return;
 
 	rcu_read_lock();
 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 	if (!IS_ERR_OR_NULL(sta))
 		ieee80211_sta_eosp(sta);
 	rcu_read_unlock();
-
-	return 0;
 }
 
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 748f5dc..eedb215 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -378,9 +378,8 @@
 			     struct ieee80211_sta *sta, u32 iv32,
 			     u16 *phase1key);
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb);
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index a87b506..fe2fa56 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -169,18 +169,11 @@
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
-			pkt->hdr.flags);
-		goto exit;
-	}
 
-	if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
-		goto exit;
+	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
 
 	/* we don't really care about the response at this point */
 
-exit:
 	iwl_free_resp(&cmd);
 }
 
@@ -261,8 +254,7 @@
 		mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
 }
 
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
@@ -277,17 +269,17 @@
 	/* can fail sometimes */
 	if (!le32_to_cpu(notif->status)) {
 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-		goto out;
+		return;
 	}
 
 	if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
-		goto out;
+		return;
 
 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 					lockdep_is_held(&mvm->mutex));
 	/* the station may not be here, but if it is, it must be a TDLS peer */
 	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
-		goto out;
+		return;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	vif = mvmsta->vif;
@@ -301,9 +293,6 @@
 			 msecs_to_jiffies(delay));
 
 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
-	return 0;
 }
 
 static int
@@ -471,13 +460,19 @@
 	cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
 
 	info = IEEE80211_SKB_CB(skb);
-	if (info->control.hw_key)
-		iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+	hdr = (void *)skb->data;
+	if (info->control.hw_key) {
+		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+			rcu_read_unlock();
+			ret = -EINVAL;
+			goto out;
+		}
+		iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+	}
 
 	iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
 			   mvmsta->sta_id);
 
-	hdr = (void *)skb->data;
 	iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
 				hdr->frame_control);
 	rcu_read_unlock();
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index e472729..dbd7d54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -410,9 +410,8 @@
 /*
  * The Rx handler for time event notifications
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_time_event_notif *notif = (void *)pkt->data;
@@ -433,8 +432,6 @@
 	}
 unlock:
 	spin_unlock_bh(&mvm->time_event_lock);
-
-	return 0;
 }
 
 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@ -503,7 +500,7 @@
 				       struct iwl_mvm_time_event_data *te_data,
 				       struct iwl_time_event_cmd *te_cmd)
 {
-	static const u8 time_event_response[] = { TIME_EVENT_CMD };
+	static const u16 time_event_response[] = { TIME_EVENT_CMD };
 	struct iwl_notification_wait wait_time_event;
 	int ret;
 
@@ -566,7 +563,7 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-	const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
 	struct iwl_notification_wait wait_te_notif;
 	struct iwl_time_event_cmd time_cmd = {};
 
@@ -599,8 +596,7 @@
 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
 
-	time_cmd.apply_time =
-		cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+	time_cmd.apply_time = cpu_to_le32(0);
 
 	time_cmd.max_frags = TE_V2_FRAG_NONE;
 	time_cmd.max_delay = cpu_to_le32(max_delay);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index de4fbc6..cbdf8e5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -157,9 +157,8 @@
 /*
  * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
 
 /**
  * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
new file mode 100644
index 0000000..380972f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.c
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return;
+
+	memset(tof_data, 0, sizeof(*tof_data));
+
+	tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (IWL_MVM_TOF_IS_RESPONDER) {
+		tof_data->responder_cfg.sub_grp_cmd_id =
+			cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+		tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+	}
+#endif
+
+	tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+	tof_data->range_req.req_timeout = 1;
+	tof_data->range_req.initiator = 1;
+	tof_data->range_req.report_policy = 3;
+
+	tof_data->range_req_ext.sub_grp_cmd_id =
+		cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return;
+
+	memset(tof_data, 0, sizeof(*tof_data));
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+			     struct ieee80211_vif *vif)
+{
+	bool *enabled = _data;
+
+	/* non bss vif exists */
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION)
+		*enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+	struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+	bool enabled;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_tof_iterator, &enabled);
+	if (!enabled) {
+		IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+		return -EINVAL;
+	}
+
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+	struct iwl_tof_range_abort_cmd cmd = {
+		.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+		.request_id = id,
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (id != mvm->tof_data.active_range_request) {
+		IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+			id, mvm->tof_data.active_range_request);
+		return -EINVAL;
+	}
+
+	/* after abort is sent there's no active request anymore */
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif)
+{
+	struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+		IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+		return -EIO;
+	}
+
+	cmd->sta_id = mvmvif->bcast_sta.sta_id;
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif)
+{
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.len = { sizeof(mvm->tof_data.range_req), },
+		/* no copy because of the command size */
+		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+		IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+		return -EIO;
+	}
+
+	/* nesting of range requests is not supported in FW */
+	if (mvm->tof_data.active_range_request !=
+		IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+		IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+			mvm->tof_data.active_range_request);
+		return -EIO;
+	}
+
+	mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+	cmd.data[0] = &mvm->tof_data.range_req;
+	return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif)
+{
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+		IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+		return -EIO;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(mvm->tof_data.range_req_ext),
+				    &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+	if (resp->request_id != mvm->tof_data.active_range_request) {
+		IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+			resp->request_id, mvm->tof_data.active_range_request);
+		return -EIO;
+	}
+
+	memcpy(&mvm->tof_data.range_resp, resp,
+	       sizeof(struct iwl_tof_range_rsp_ntfy));
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+	return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+	IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+	return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_neighbor_report *report =
+		(struct iwl_tof_neighbor_report *)data;
+
+	IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+		       report->bssid, report->request_token, report->status);
+	return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+	case TOF_RANGE_RESPONSE_NOTIF:
+		iwl_mvm_tof_range_resp(mvm, resp->data);
+		break;
+	case TOF_MCSI_DEBUG_NOTIF:
+		iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+		break;
+	case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+		iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+		break;
+	default:
+	       IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+		       resp->sub_grp_cmd_id);
+	       break;
+	}
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
new file mode 100644
index 0000000..50ae8ad
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+	struct iwl_tof_config_cmd tof_cfg;
+	struct iwl_tof_range_req_cmd range_req;
+	struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+	struct iwl_tof_range_rsp_ntfy range_resp;
+	u8 last_abort_id;
+	u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 80d07db..fe7145c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -154,24 +155,20 @@
 	return true;
 }
 
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-		       struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	int temp;
 
 	/* the notification is handled synchronously in ctkill, so skip here */
 	if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
-		return 0;
+		return;
 
 	temp = iwl_mvm_temp_notif_parse(mvm, pkt);
 	if (temp < 0)
-		return 0;
+		return;
 
 	iwl_mvm_tt_temp_changed(mvm, temp);
-
-	return 0;
 }
 
 static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -187,7 +184,7 @@
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
 	struct iwl_notification_wait wait_temp_notif;
-	static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+	static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
 	int ret, temp;
 
 	lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 8911686..6df5aad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -153,18 +153,20 @@
 
 	if (ieee80211_is_mgmt(fc)) {
 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+		else if (ieee80211_is_action(fc))
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 		else
-			tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 
 		/* The spec allows Action frames in A-MPDU, we don't support
 		 * it
 		 */
 		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
 	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
-		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 	} else {
-		tx_cmd->pm_frame_timeout = 0;
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 	}
 
 	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
@@ -268,19 +270,29 @@
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-			       struct ieee80211_tx_info *info,
-			       struct iwl_tx_cmd *tx_cmd,
-			       struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+				      struct ieee80211_tx_info *info,
+				      struct iwl_tx_cmd *tx_cmd,
+				      struct sk_buff *skb_frag,
+				      int hdrlen)
 {
 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+	u8 *crypto_hdr = skb_frag->data + hdrlen;
+	u64 pn;
 
 	switch (keyconf->cipher) {
 	case WLAN_CIPHER_SUITE_CCMP:
-		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-		if (info->flags & IEEE80211_TX_CTL_AMPDU)
-			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+	case WLAN_CIPHER_SUITE_CCMP_256:
+		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+		pn = atomic64_inc_return(&keyconf->tx_pn);
+		crypto_hdr[0] = pn;
+		crypto_hdr[2] = 0;
+		crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+		crypto_hdr[1] = pn >> 8;
+		crypto_hdr[4] = pn >> 16;
+		crypto_hdr[5] = pn >> 24;
+		crypto_hdr[6] = pn >> 32;
+		crypto_hdr[7] = pn >> 40;
 		break;
 
 	case WLAN_CIPHER_SUITE_TKIP:
@@ -308,7 +320,7 @@
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-		      struct ieee80211_sta *sta, u8 sta_id)
+		      int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -325,7 +337,7 @@
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	if (info->control.hw_key)
-		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 
 	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 
@@ -346,6 +358,7 @@
 	struct iwl_device_cmd *dev_cmd;
 	struct iwl_tx_cmd *tx_cmd;
 	u8 sta_id;
+	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
 	if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
 		return -1;
@@ -366,23 +379,34 @@
 		IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
 	/*
-	 * If the interface on which frame is sent is the P2P_DEVICE
+	 * If the interface on which the frame is sent is the P2P_DEVICE
 	 * or an AP/GO interface use the broadcast station associated
-	 * with it; otherwise use the AUX station.
+	 * with it; otherwise if the interface is a managed interface
+	 * use the AP station associated with it for multicast traffic
+	 * (this is not possible for unicast packets as a TLDS discovery
+	 * response are sent without a station entry); otherwise use the
+	 * AUX station.
 	 */
-	if (info->control.vif &&
-	    (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-	     info->control.vif->type == NL80211_IFTYPE_AP)) {
+	sta_id = mvm->aux_sta.sta_id;
+	if (info->control.vif) {
 		struct iwl_mvm_vif *mvmvif =
 			iwl_mvm_vif_from_mac80211(info->control.vif);
-		sta_id = mvmvif->bcast_sta.sta_id;
-	} else {
-		sta_id = mvm->aux_sta.sta_id;
+
+		if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+		    info->control.vif->type == NL80211_IFTYPE_AP)
+			sta_id = mvmvif->bcast_sta.sta_id;
+		else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+			 is_multicast_ether_addr(hdr->addr1)) {
+			u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+			if (ap_sta_id != IWL_MVM_STATION_COUNT)
+				sta_id = ap_sta_id;
+		}
 	}
 
 	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
 
-	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
 	if (!dev_cmd)
 		return -1;
 
@@ -390,7 +414,7 @@
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+	memcpy(tx_cmd->hdr, hdr, hdrlen);
 
 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
 		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@ -416,9 +440,11 @@
 	u8 tid = IWL_MAX_TID_COUNT;
 	u8 txq_id = info->hw_queue;
 	bool is_data_qos = false, is_ampdu = false;
+	int hdrlen;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	fc = hdr->frame_control;
+	hdrlen = ieee80211_hdrlen(fc);
 
 	if (WARN_ON_ONCE(!mvmsta))
 		return -1;
@@ -426,7 +452,7 @@
 	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
 		return -1;
 
-	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
 	if (!dev_cmd)
 		goto drop;
 
@@ -458,7 +484,7 @@
 	}
 
 	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+	memcpy(tx_cmd->hdr, hdr, hdrlen);
 
 	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
@@ -911,8 +937,7 @@
 	rcu_read_unlock();
 }
 
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		      struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
@@ -921,8 +946,6 @@
 		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
 	else
 		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
-	return 0;
 }
 
 static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
@@ -942,8 +965,7 @@
 		(void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
@@ -965,7 +987,7 @@
 	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
 		      tid >= IWL_MAX_TID_COUNT,
 		      "sta_id %d tid %d", sta_id, tid))
-		return 0;
+		return;
 
 	rcu_read_lock();
 
@@ -974,7 +996,7 @@
 	/* Reclaiming frames for a station that has been deleted ? */
 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 		rcu_read_unlock();
-		return 0;
+		return;
 	}
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -985,7 +1007,7 @@
 			"invalid BA notification: Q %d, tid %d, flow %d\n",
 			tid_data->txq_id, tid, scd_flow);
 		rcu_read_unlock();
-		return 0;
+		return;
 	}
 
 	spin_lock_bh(&mvmsta->lock);
@@ -1072,8 +1094,6 @@
 		skb = __skb_dequeue(&reclaimed_skbs);
 		ieee80211_tx_status(mvm->hw, skb);
 	}
-
-	return 0;
 }
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 03f8e06..a7d4342 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -108,7 +108,7 @@
 	return ret;
 }
 
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
 			 u32 flags, u16 len, const void *data)
 {
 	struct iwl_host_cmd cmd = {
@@ -166,11 +166,6 @@
 		goto out_free_resp;
 	}
 
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		ret = -EIO;
-		goto out_free_resp;
-	}
-
 	resp_len = iwl_rx_packet_payload_len(pkt);
 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 		ret = -EIO;
@@ -187,7 +182,7 @@
 /*
  * We assume that the caller set the status to the sucess value
  */
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
 				const void *data, u32 *status)
 {
 	struct iwl_host_cmd cmd = {
@@ -243,8 +238,7 @@
 	return fw_rate_idx_to_plcp[rate_idx];
 }
 
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -256,7 +250,6 @@
 		le32_to_cpu(err_resp->error_service));
 	IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
 		le64_to_cpu(err_resp->timestamp));
-	return 0;
 }
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 9f65c1c..b0825c4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -614,6 +614,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
 
 	/* Before you put code here, think about WoWLAN. You cannot check here
@@ -631,20 +632,16 @@
 		return 0;
 
 	/*
-	 * On suspend, ict is disabled, and the interrupt mask
-	 * gets cleared. Reconfigure them both in case of d0i3
-	 * image. Otherwise, only enable rfkill interrupt (in
-	 * order to keep track of the rfkill status)
+	 * Enable rfkill interrupt (in order to keep track of
+	 * the rfkill status)
 	 */
-	if (trans->wowlan_d0i3) {
-		iwl_pcie_reset_ict(trans);
-		iwl_enable_interrupts(trans);
-	} else {
-		iwl_enable_rfkill_int(trans);
-	}
+	iwl_enable_rfkill_int(trans);
 
 	hw_rfkill = iwl_is_rfkill_set(trans);
+
+	mutex_lock(&trans_pcie->mutex);
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+	mutex_unlock(&trans_pcie->mutex);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 376b84e..feb2f7e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,6 +44,21 @@
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +92,29 @@
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
 	__le32 *bd;
 	dma_addr_t bd_dma;
-	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 	u32 read;
 	u32 write;
 	u32 free_count;
+	u32 used_count;
 	u32 write_actual;
 	struct list_head rx_free;
 	struct list_head rx_used;
@@ -107,6 +122,32 @@
 	struct iwl_rb_status *rb_stts;
 	dma_addr_t rb_stts_dma;
 	spinlock_t lock;
+	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *	the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *	of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+	struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+	atomic_t req_pending;
+	atomic_t req_ready;
+	struct list_head rbd_allocated;
+	struct list_head rbd_empty;
+	spinlock_t lock;
+	struct workqueue_struct *alloc_wq;
+	struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +291,7 @@
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -264,8 +305,10 @@
  * @rx_buf_size_8k: 8 kB RX buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
  * @rx_page_order: page order for receive buffer size
  * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
  * @fw_mon_phys: physical address of the buffer for the firmware monitor
  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
@@ -273,7 +316,7 @@
  */
 struct iwl_trans_pcie {
 	struct iwl_rxq rxq;
-	struct work_struct rx_replenish;
+	struct iwl_rb_allocator rba;
 	struct iwl_trans *trans;
 	struct iwl_drv *drv;
 
@@ -285,9 +328,11 @@
 	dma_addr_t ict_tbl_dma;
 	int ict_index;
 	bool use_ict;
+	bool is_down;
 	struct isr_statistics isr_stats;
 
 	spinlock_t irq_lock;
+	struct mutex mutex;
 	u32 inta_mask;
 	u32 scd_base_addr;
 	struct iwl_dma_ptr scd_bc_tbls;
@@ -314,6 +359,7 @@
 	bool rx_buf_size_8k;
 	bool bc_table_dword;
 	bool scd_set_active;
+	bool wide_cmd_header;
 	u32 rx_page_order;
 
 	const char *const *command_names;
@@ -385,7 +431,7 @@
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-			    struct iwl_rx_cmd_buffer *rxb, int handler_status);
+			    struct iwl_rx_cmd_buffer *rxb);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 			    struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index adad8d0..e06591f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -74,16 +74,29 @@
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
@@ -92,18 +105,32 @@
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@
 		rxq->free_count--;
 	}
 	spin_unlock(&rxq->lock);
-	/* If the pre-allocated buffer pool is dropping low, schedule to
-	 * refill it */
-	if (rxq->free_count <= RX_LOW_WATERMARK)
-		schedule_work(&trans_pcie->rx_replenish);
 
 	/* If we've added more space for the firmware to place data, tell it.
 	 * Increment device's write pointer in multiples of 8. */
@@ -255,6 +278,45 @@
 }
 
 /*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+					   gfp_t priority)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct page *page;
+	gfp_t gfp_mask = priority;
+
+	if (rxq->free_count > RX_LOW_WATERMARK)
+		gfp_mask |= __GFP_NOWARN;
+
+	if (trans_pcie->rx_page_order > 0)
+		gfp_mask |= __GFP_COMP;
+
+	/* Alloc a new receive buffer */
+	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+	if (!page) {
+		if (net_ratelimit())
+			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+				       trans_pcie->rx_page_order);
+		/* Issue an error if the hardware has consumed more than half
+		 * of its free buffer list and we don't have enough
+		 * pre-allocated buffers.
+`		 */
+		if (rxq->free_count <= RX_LOW_WATERMARK &&
+		    iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+		    net_ratelimit())
+			IWL_CRIT(trans,
+				 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+				 rxq->free_count);
+		return NULL;
+	}
+	return page;
+}
+
+/*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
  * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -269,7 +331,6 @@
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	struct page *page;
-	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock(&rxq->lock);
@@ -279,32 +340,10 @@
 		}
 		spin_unlock(&rxq->lock);
 
-		if (rxq->free_count > RX_LOW_WATERMARK)
-			gfp_mask |= __GFP_NOWARN;
-
-		if (trans_pcie->rx_page_order > 0)
-			gfp_mask |= __GFP_COMP;
-
 		/* Alloc a new receive buffer */
-		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-		if (!page) {
-			if (net_ratelimit())
-				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-					   "order: %d\n",
-					   trans_pcie->rx_page_order);
-
-			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-			    net_ratelimit())
-				IWL_CRIT(trans, "Failed to alloc_pages with %s."
-					 "Only %u free buffers remaining.\n",
-					 priority == GFP_ATOMIC ?
-					 "GFP_ATOMIC" : "GFP_KERNEL",
-					 rxq->free_count);
-			/* We don't reschedule replenish work here -- we will
-			 * call the restock method and if it still needs
-			 * more buffers it will schedule replenish */
+		page = iwl_pcie_rx_alloc_page(trans, priority);
+		if (!page)
 			return;
-		}
 
 		spin_lock(&rxq->lock);
 
@@ -355,7 +394,7 @@
 
 	lockdep_assert_held(&rxq->lock);
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+	for (i = 0; i < RX_QUEUE_SIZE; i++) {
 		if (!rxq->pool[i].page)
 			continue;
 		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +411,164 @@
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-	iwl_pcie_rxq_alloc_rbs(trans, gfp);
+	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
 	iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie =
-	    container_of(data, struct iwl_trans_pcie, rx_replenish);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	struct list_head local_empty;
+	int pending = atomic_xchg(&rba->req_pending, 0);
 
-	iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+	/* If we were scheduled - there is at least one request */
+	spin_lock(&rba->lock);
+	/* swap out the rba->rbd_empty to a local list */
+	list_replace_init(&rba->rbd_empty, &local_empty);
+	spin_unlock(&rba->lock);
+
+	while (pending) {
+		int i;
+		struct list_head local_allocated;
+
+		INIT_LIST_HEAD(&local_allocated);
+
+		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+			struct iwl_rx_mem_buffer *rxb;
+			struct page *page;
+
+			/* List should never be empty - each reused RBD is
+			 * returned to the list, and initial pool covers any
+			 * possible gap between the time the page is allocated
+			 * to the time the RBD is added.
+			 */
+			BUG_ON(list_empty(&local_empty));
+			/* Get the first rxb from the rbd list */
+			rxb = list_first_entry(&local_empty,
+					       struct iwl_rx_mem_buffer, list);
+			BUG_ON(rxb->page);
+
+			/* Alloc a new receive buffer */
+			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+			if (!page)
+				continue;
+			rxb->page = page;
+
+			/* Get physical address of the RB */
+			rxb->page_dma = dma_map_page(trans->dev, page, 0,
+					PAGE_SIZE << trans_pcie->rx_page_order,
+					DMA_FROM_DEVICE);
+			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+				rxb->page = NULL;
+				__free_pages(page, trans_pcie->rx_page_order);
+				continue;
+			}
+			/* dma address must be no more than 36 bits */
+			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+			/* and also 256 byte aligned! */
+			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+			/* move the allocated entry to the out list */
+			list_move(&rxb->list, &local_allocated);
+			i++;
+		}
+
+		pending--;
+		if (!pending) {
+			pending = atomic_xchg(&rba->req_pending, 0);
+			IWL_DEBUG_RX(trans,
+				     "Pending allocation requests = %d\n",
+				     pending);
+		}
+
+		spin_lock(&rba->lock);
+		/* add the allocated rbds to the allocator allocated list */
+		list_splice_tail(&local_allocated, &rba->rbd_allocated);
+		/* get more empty RBDs for current pending requests */
+		list_splice_tail_init(&rba->rbd_empty, &local_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_ready);
+	}
+
+	spin_lock(&rba->lock);
+	/* return unused rbds to the allocator empty list */
+	list_splice_tail(&local_empty, &rba->rbd_empty);
+	spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+				     struct iwl_rx_mem_buffer
+				     *out[RX_CLAIM_REQ_ALLOC])
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	/*
+	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+	 * function will return -ENOMEM, as there are no ready requests.
+	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
+	 * req_ready > 0, i.e. - there are ready requests and the function
+	 * hands one request to the caller.
+	 */
+	if (atomic_dec_if_positive(&rba->req_ready) < 0)
+		return -ENOMEM;
+
+	spin_lock(&rba->lock);
+	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+		/* Get next free Rx buffer, remove it from free list */
+		out[i] = list_first_entry(&rba->rbd_allocated,
+			       struct iwl_rx_mem_buffer, list);
+		list_del(&out[i]->list);
+	}
+	spin_unlock(&rba->lock);
+
+	return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+	struct iwl_rb_allocator *rba_p =
+		container_of(data, struct iwl_rb_allocator, rx_alloc);
+	struct iwl_trans_pcie *trans_pcie =
+		container_of(rba_p, struct iwl_trans_pcie, rba);
+
+	iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	struct device *dev = trans->dev;
 
 	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
 	spin_lock_init(&rxq->lock);
+	spin_lock_init(&rba->lock);
 
 	if (WARN_ON(rxq->bd || rxq->rb_stts))
 		return -EINVAL;
@@ -487,15 +658,49 @@
 	INIT_LIST_HEAD(&rxq->rx_free);
 	INIT_LIST_HEAD(&rxq->rx_used);
 	rxq->free_count = 0;
+	rxq->used_count = 0;
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
 		list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	INIT_LIST_HEAD(&rba->rbd_allocated);
+	INIT_LIST_HEAD(&rba->rbd_empty);
+
+	for (i = 0; i < RX_POOL_SIZE; i++)
+		list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	for (i = 0; i < RX_POOL_SIZE; i++) {
+		if (!rba->pool[i].page)
+			continue;
+		dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+			       PAGE_SIZE << trans_pcie->rx_page_order,
+			       DMA_FROM_DEVICE);
+		__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+		rba->pool[i].page = NULL;
+	}
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	int i, err;
 
 	if (!rxq->bd) {
@@ -503,11 +708,21 @@
 		if (err)
 			return err;
 	}
+	if (!rba->alloc_wq)
+		rba->alloc_wq = alloc_workqueue("rb_allocator",
+						WQ_HIGHPRI | WQ_UNBOUND, 1);
+	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+	spin_lock(&rba->lock);
+	atomic_set(&rba->req_pending, 0);
+	atomic_set(&rba->req_ready, 0);
+	/* free all first - we might be reconfigured for a different size */
+	iwl_pcie_rx_free_rba(trans);
+	iwl_pcie_rx_init_rba(rba);
+	spin_unlock(&rba->lock);
 
 	spin_lock(&rxq->lock);
 
-	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
 	/* free all first - we might be reconfigured for a different size */
 	iwl_pcie_rxq_free_rbs(trans);
 	iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +737,7 @@
 	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 	spin_unlock(&rxq->lock);
 
-	iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+	iwl_pcie_rx_replenish(trans);
 
 	iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +752,7 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
 	/*if rxq->bd is NULL, it means that nothing has been allocated,
 	 * exit now */
@@ -545,7 +761,15 @@
 		return;
 	}
 
-	cancel_work_sync(&trans_pcie->rx_replenish);
+	cancel_work_sync(&rba->rx_alloc);
+	if (rba->alloc_wq) {
+		destroy_workqueue(rba->alloc_wq);
+		rba->alloc_wq = NULL;
+	}
+
+	spin_lock(&rba->lock);
+	iwl_pcie_rx_free_rba(trans);
+	spin_unlock(&rba->lock);
 
 	spin_lock(&rxq->lock);
 	iwl_pcie_rxq_free_rbs(trans);
@@ -566,8 +790,49 @@
 	rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+				  struct iwl_rx_mem_buffer *rxb,
+				  struct iwl_rxq *rxq, bool emergency)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+	/* Move the RBD to the used list, will be moved to allocator in batches
+	 * before claiming or posting a request*/
+	list_add_tail(&rxb->list, &rxq->rx_used);
+
+	if (unlikely(emergency))
+		return;
+
+	/* Count the allocator owned RBDs */
+	rxq->used_count++;
+
+	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
+	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+	 * after but we still need to post another request.
+	 */
+	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+		/* Move the 2 RBDs to the allocator ownership.
+		 Allocator has another 6 from pool for the request completion*/
+		spin_lock(&rba->lock);
+		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_pending);
+		queue_work(rba->alloc_wq, &rba->rx_alloc);
+	}
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
-				struct iwl_rx_mem_buffer *rxb)
+				struct iwl_rx_mem_buffer *rxb,
+				bool emergency)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -583,10 +848,9 @@
 
 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
 		struct iwl_rx_packet *pkt;
-		struct iwl_device_cmd *cmd;
 		u16 sequence;
 		bool reclaim;
-		int index, cmd_index, err, len;
+		int index, cmd_index, len;
 		struct iwl_rx_cmd_buffer rxcb = {
 			._offset = offset,
 			._rx_page_order = trans_pcie->rx_page_order,
@@ -634,12 +898,7 @@
 		index = SEQ_TO_INDEX(sequence);
 		cmd_index = get_cmd_index(&txq->q, index);
 
-		if (reclaim)
-			cmd = txq->entries[cmd_index].cmd;
-		else
-			cmd = NULL;
-
-		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+		iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
 
 		if (reclaim) {
 			kzfree(txq->entries[cmd_index].free_buf);
@@ -657,7 +916,7 @@
 			 * iwl_trans_send_cmd()
 			 * as we reclaim the driver command queue */
 			if (!rxcb._page_stolen)
-				iwl_pcie_hcmd_complete(trans, &rxcb, err);
+				iwl_pcie_hcmd_complete(trans, &rxcb);
 			else
 				IWL_WARN(trans, "Claim null rxb?\n");
 		}
@@ -688,13 +947,13 @@
 			 */
 			__free_pages(rxb->page, trans_pcie->rx_page_order);
 			rxb->page = NULL;
-			list_add_tail(&rxb->list, &rxq->rx_used);
+			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 		} else {
 			list_add_tail(&rxb->list, &rxq->rx_free);
 			rxq->free_count++;
 		}
 	} else
-		list_add_tail(&rxb->list, &rxq->rx_used);
+		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 
 /*
@@ -704,10 +963,8 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
-	u32 r, i;
-	u8 fill_rx = 0;
-	u32 count = 8;
-	int total_empty;
+	u32 r, i, j, count = 0;
+	bool emergency = false;
 
 restart:
 	spin_lock(&rxq->lock);
@@ -720,36 +977,74 @@
 	if (i == r)
 		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - rxq->write_actual;
-	if (total_empty < 0)
-		total_empty += RX_QUEUE_SIZE;
-
-	if (total_empty > (RX_QUEUE_SIZE / 2))
-		fill_rx = 1;
-
 	while (i != r) {
 		struct iwl_rx_mem_buffer *rxb;
 
+		if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+			emergency = true;
+
 		rxb = rxq->queue[i];
 		rxq->queue[i] = NULL;
 
 		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
 			     r, i, rxb);
-		iwl_pcie_rx_handle_rb(trans, rxb);
+		iwl_pcie_rx_handle_rb(trans, rxb, emergency);
 
 		i = (i + 1) & RX_QUEUE_MASK;
-		/* If there are a lot of unused frames,
-		 * restock the Rx queue so ucode wont assert. */
-		if (fill_rx) {
-			count++;
-			if (count >= 8) {
-				rxq->read = i;
-				spin_unlock(&rxq->lock);
-				iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-				count = 0;
-				goto restart;
+
+		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+		 * try to claim the pre-allocated buffers from the allocator */
+		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+			struct iwl_rb_allocator *rba = &trans_pcie->rba;
+			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+			    !emergency) {
+				/* Add the remaining 6 empty RBDs
+				* for allocator use
+				 */
+				spin_lock(&rba->lock);
+				list_splice_tail_init(&rxq->rx_used,
+						      &rba->rbd_empty);
+				spin_unlock(&rba->lock);
 			}
+
+			/* If not ready - continue, will try to reclaim later.
+			* No need to reschedule work - allocator exits only on
+			* success */
+			if (!iwl_pcie_rx_allocator_get(trans, out)) {
+				/* If success - then RX_CLAIM_REQ_ALLOC
+				 * buffers were retrieved and should be added
+				 * to free list */
+				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+					list_add_tail(&out[j]->list,
+						      &rxq->rx_free);
+					rxq->free_count++;
+				}
+			}
+		}
+		if (emergency) {
+			count++;
+			if (count == 8) {
+				count = 0;
+				if (rxq->used_count < RX_QUEUE_SIZE / 3)
+					emergency = false;
+				spin_unlock(&rxq->lock);
+				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+				spin_lock(&rxq->lock);
+			}
+		}
+		/* handle restock for three cases, can be all of them at once:
+		* - we just pulled buffers from the allocator
+		* - we have 8+ unstolen pages accumulated
+		* - we are in emergency and allocated buffers
+		 */
+		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+			rxq->read = i;
+			spin_unlock(&rxq->lock);
+			iwl_pcie_rxq_restock(trans);
+			goto restart;
 		}
 	}
 
@@ -757,10 +1052,20 @@
 	rxq->read = i;
 	spin_unlock(&rxq->lock);
 
-	if (fill_rx)
-		iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-	else
-		iwl_pcie_rxq_restock(trans);
+	/*
+	 * handle a case where in emergency there are some unallocated RBDs.
+	 * those RBDs are in the used list, but are not tracked by the queue's
+	 * used_count which counts allocator owned RBDs.
+	 * unallocated emergency RBDs must be allocated on exit, otherwise
+	 * when called again the function may not be in emergency mode and
+	 * they will be handed to the allocator with no tracking in the RBD
+	 * allocator counters, which will lead to them never being claimed back
+	 * by the queue.
+	 * by allocating them here, they are now in the queue free list, and
+	 * will be restocked by the next call of iwl_pcie_rxq_restock.
+	 */
+	if (unlikely(emergency && count))
+		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
 
 	if (trans_pcie->napi.poll)
 		napi_gro_flush(&trans_pcie->napi, false);
@@ -772,6 +1077,7 @@
 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
 
 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
 	if (trans->cfg->internal_wimax_coex &&
@@ -795,6 +1101,9 @@
 	iwl_trans_fw_error(trans);
 	local_bh_enable();
 
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+		del_timer(&trans_pcie->txq[i].stuck_timer);
+
 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 	wake_up(&trans_pcie->wait_command_queue);
 }
@@ -1003,7 +1312,9 @@
 
 		isr_stats->rfkill++;
 
+		mutex_lock(&trans_pcie->mutex);
 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+		mutex_unlock(&trans_pcie->mutex);
 		if (hw_rfkill) {
 			set_bit(STATUS_RFKILL, &trans->status);
 			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@ -1195,8 +1506,9 @@
 
 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
-	val |= CSR_DRAM_INT_TBL_ENABLE;
-	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+	val |= CSR_DRAM_INT_TBL_ENABLE |
+	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
+	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
 
 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 9e144e7..6ba7d30 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -780,8 +780,15 @@
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
 
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
 		if (!image->sec[i].data ||
-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 			IWL_DEBUG_FW(trans,
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     i);
@@ -829,8 +836,15 @@
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
 
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
 		if (!image->sec[i].data ||
-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 			IWL_DEBUG_FW(trans,
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     i);
@@ -897,6 +911,14 @@
 		case PRPH_CLEARBIT:
 			iwl_clear_bits_prph(trans, addr, BIT(val));
 			break;
+		case PRPH_BLOCKBIT:
+			if (iwl_read_prph(trans, addr) & BIT(val)) {
+				IWL_ERR(trans,
+					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+					val, addr);
+				goto monitor;
+			}
+			break;
 		default:
 			IWL_ERR(trans, "FW debug - unknown OP %d\n",
 				dest->reg_ops[i].op);
@@ -904,6 +926,7 @@
 		}
 	}
 
+monitor:
 	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
 		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
 			       trans_pcie->fw_mon_phys >> dest->base_shift);
@@ -998,13 +1021,25 @@
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 				   const struct fw_img *fw, bool run_in_rfkill)
 {
-	int ret;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
+	int ret;
+
+	mutex_lock(&trans_pcie->mutex);
+
+	/* Someone called stop_device, don't try to start_fw */
+	if (trans_pcie->is_down) {
+		IWL_WARN(trans,
+			 "Can't start_fw since the HW hasn't been started\n");
+		ret = EIO;
+		goto out;
+	}
 
 	/* This may fail if AMT took ownership of the device */
 	if (iwl_pcie_prepare_card_hw(trans)) {
 		IWL_WARN(trans, "Exit HW not ready\n");
-		return -EIO;
+		ret = -EIO;
+		goto out;
 	}
 
 	iwl_enable_rfkill_int(trans);
@@ -1016,15 +1051,17 @@
 	else
 		clear_bit(STATUS_RFKILL, &trans->status);
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-	if (hw_rfkill && !run_in_rfkill)
-		return -ERFKILL;
+	if (hw_rfkill && !run_in_rfkill) {
+		ret = -ERFKILL;
+		goto out;
+	}
 
 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
 	ret = iwl_pcie_nic_init(trans);
 	if (ret) {
 		IWL_ERR(trans, "Unable to init nic\n");
-		return ret;
+		goto out;
 	}
 
 	/* make sure rfkill handshake bits are cleared */
@@ -1042,9 +1079,13 @@
 
 	/* Load the given image to the HW */
 	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-		return iwl_pcie_load_given_ucode_8000(trans, fw);
+		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
 	else
-		return iwl_pcie_load_given_ucode(trans, fw);
+		ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+	mutex_unlock(&trans_pcie->mutex);
+	return ret;
 }
 
 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -1053,11 +1094,18 @@
 	iwl_pcie_tx_start(trans, scd_addr);
 }
 
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill, was_hw_rfkill;
 
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	if (trans_pcie->is_down)
+		return;
+
+	trans_pcie->is_down = true;
+
 	was_hw_rfkill = iwl_is_rfkill_set(trans);
 
 	/* tell the device to stop sending interrupts */
@@ -1147,14 +1195,36 @@
 	iwl_pcie_prepare_card_hw(trans);
 }
 
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	mutex_lock(&trans_pcie->mutex);
+	_iwl_trans_pcie_stop_device(trans, low_power);
+	mutex_unlock(&trans_pcie->mutex);
+}
+
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 {
+	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
 	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-		iwl_trans_pcie_stop_device(trans, true);
+		_iwl_trans_pcie_stop_device(trans, true);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (trans->wowlan_d0i3) {
+		/* Enable persistence mode to avoid reset */
+		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+	}
+
 	iwl_disable_interrupts(trans);
 
 	/*
@@ -1166,17 +1236,21 @@
 
 	iwl_pcie_disable_ict(trans);
 
+	synchronize_irq(trans_pcie->pci_dev->irq);
+
 	iwl_clear_bit(trans, CSR_GP_CNTRL,
 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 	iwl_clear_bit(trans, CSR_GP_CNTRL,
 		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-	/*
-	 * reset TX queues -- some of their registers reset during S3
-	 * so if we don't reset everything here the D3 image would try
-	 * to execute some invalid memory upon resume
-	 */
-	iwl_trans_pcie_tx_reset(trans);
+	if (!trans->wowlan_d0i3) {
+		/*
+		 * reset TX queues -- some of their registers reset during S3
+		 * so if we don't reset everything here the D3 image would try
+		 * to execute some invalid memory upon resume
+		 */
+		iwl_trans_pcie_tx_reset(trans);
+	}
 
 	iwl_pcie_set_pwr(trans, true);
 }
@@ -1218,12 +1292,18 @@
 
 	iwl_pcie_set_pwr(trans, false);
 
-	iwl_trans_pcie_tx_reset(trans);
+	if (trans->wowlan_d0i3) {
+		iwl_clear_bit(trans, CSR_GP_CNTRL,
+			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+	} else {
+		iwl_trans_pcie_tx_reset(trans);
 
-	ret = iwl_pcie_rx_init(trans);
-	if (ret) {
-		IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
-		return ret;
+		ret = iwl_pcie_rx_init(trans);
+		if (ret) {
+			IWL_ERR(trans,
+				"Failed to resume the device (RX reset)\n");
+			return ret;
+		}
 	}
 
 	val = iwl_read32(trans, CSR_RESET);
@@ -1235,11 +1315,14 @@
 	return 0;
 }
 
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
 	int err;
 
+	lockdep_assert_held(&trans_pcie->mutex);
+
 	err = iwl_pcie_prepare_card_hw(trans);
 	if (err) {
 		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
@@ -1256,20 +1339,38 @@
 	/* From now on, the op_mode will be kept updated about RF kill state */
 	iwl_enable_rfkill_int(trans);
 
+	/* Set is_down to false here so that...*/
+	trans_pcie->is_down = false;
+
 	hw_rfkill = iwl_is_rfkill_set(trans);
 	if (hw_rfkill)
 		set_bit(STATUS_RFKILL, &trans->status);
 	else
 		clear_bit(STATUS_RFKILL, &trans->status);
+	/* ... rfkill can call stop_device and set it false if needed */
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
 	return 0;
 }
 
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	mutex_lock(&trans_pcie->mutex);
+	ret = _iwl_trans_pcie_start_hw(trans, low_power);
+	mutex_unlock(&trans_pcie->mutex);
+
+	return ret;
+}
+
 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+	mutex_lock(&trans_pcie->mutex);
+
 	/* disable interrupts - don't enable HW RF kill interrupt */
 	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
@@ -1282,6 +1383,10 @@
 	spin_unlock(&trans_pcie->irq_lock);
 
 	iwl_pcie_disable_ict(trans);
+
+	mutex_unlock(&trans_pcie->mutex);
+
+	synchronize_irq(trans_pcie->pci_dev->irq);
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1342,6 +1447,7 @@
 	else
 		trans_pcie->rx_page_order = get_order(4 * 1024);
 
+	trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
 	trans_pcie->command_names = trans_cfg->command_names;
 	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
 	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1354,11 +1460,10 @@
 	 * As this function may be called again in some corner cases don't
 	 * do anything if NAPI was already initialized.
 	 */
-	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+	if (!trans_pcie->napi.poll) {
 		init_dummy_netdev(&trans_pcie->napi_dev);
-		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
-				     &trans_pcie->napi_dev,
-				     iwl_pcie_dummy_napi_poll, 64);
+		netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+			       iwl_pcie_dummy_napi_poll, 64);
 	}
 }
 
@@ -2185,6 +2290,47 @@
 	return prph_len;
 }
 
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+				   struct iwl_fw_error_dump_data **data,
+				   int allocated_rb_nums)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	u32 i, r, j, rb_len = 0;
+
+	spin_lock(&rxq->lock);
+
+	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+	for (i = rxq->read, j = 0;
+	     i != r && j < allocated_rb_nums;
+	     i = (i + 1) & RX_QUEUE_MASK, j++) {
+		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+		struct iwl_fw_error_dump_rb *rb;
+
+		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+			       DMA_FROM_DEVICE);
+
+		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+		rb = (void *)(*data)->data;
+		rb->index = cpu_to_le32(i);
+		memcpy(rb->data, page_address(rxb->page), max_len);
+		/* remap the page for the free benefit */
+		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+						     max_len,
+						     DMA_FROM_DEVICE);
+
+		*data = iwl_fw_error_next_data(*data);
+	}
+
+	spin_unlock(&rxq->lock);
+
+	return rb_len;
+}
 #define IWL_CSR_TO_DUMP (0x250)
 
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2254,17 +2400,97 @@
 	return monitor_len;
 }
 
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+			    struct iwl_fw_error_dump_data **data,
+			    u32 monitor_len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 len = 0;
+
+	if ((trans_pcie->fw_mon_page &&
+	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+	    trans->dbg_dest_tlv) {
+		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+		u32 base, write_ptr, wrap_cnt;
+
+		/* If there was a dest TLV - use the values from there */
+		if (trans->dbg_dest_tlv) {
+			write_ptr =
+				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+		} else {
+			base = MON_BUFF_BASE_ADDR;
+			write_ptr = MON_BUFF_WRPTR;
+			wrap_cnt = MON_BUFF_CYCLE_CNT;
+		}
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+		fw_mon_data = (void *)(*data)->data;
+		fw_mon_data->fw_mon_wr_ptr =
+			cpu_to_le32(iwl_read_prph(trans, write_ptr));
+		fw_mon_data->fw_mon_cycle_cnt =
+			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+		fw_mon_data->fw_mon_base_ptr =
+			cpu_to_le32(iwl_read_prph(trans, base));
+
+		len += sizeof(**data) + sizeof(*fw_mon_data);
+		if (trans_pcie->fw_mon_page) {
+			/*
+			 * The firmware is now asserted, it won't write anything
+			 * to the buffer. CPU can take ownership to fetch the
+			 * data. The buffer will be handed back to the device
+			 * before the firmware will be restarted.
+			 */
+			dma_sync_single_for_cpu(trans->dev,
+						trans_pcie->fw_mon_phys,
+						trans_pcie->fw_mon_size,
+						DMA_FROM_DEVICE);
+			memcpy(fw_mon_data->data,
+			       page_address(trans_pcie->fw_mon_page),
+			       trans_pcie->fw_mon_size);
+
+			monitor_len = trans_pcie->fw_mon_size;
+		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+			/*
+			 * Update pointers to reflect actual values after
+			 * shifting
+			 */
+			base = iwl_read_prph(trans, base) <<
+			       trans->dbg_dest_tlv->base_shift;
+			iwl_trans_read_mem(trans, base, fw_mon_data->data,
+					   monitor_len / sizeof(u32));
+		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+			monitor_len =
+				iwl_trans_pci_dump_marbh_monitor(trans,
+								 fw_mon_data,
+								 monitor_len);
+		} else {
+			/* Didn't match anything - output no monitor data */
+			monitor_len = 0;
+		}
+
+		len += monitor_len;
+		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+	}
+
+	return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+			  struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_fw_error_dump_data *data;
 	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_fw_error_dump_txcmd *txcmd;
 	struct iwl_trans_dump_data *dump_data;
-	u32 len;
+	u32 len, num_rbs;
 	u32 monitor_len;
 	int i, ptr;
+	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
 
 	/* transport dump header */
 	len = sizeof(*dump_data);
@@ -2273,22 +2499,6 @@
 	len += sizeof(*data) +
 		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
-	/* CSR registers */
-	len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
-	/* PRPH registers */
-	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
-		/* The range includes both boundaries */
-		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
-			iwl_prph_dump_addr[i].start + 4;
-
-		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
-			num_bytes_in_chunk;
-	}
-
-	/* FH registers */
-	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
 	/* FW monitor */
 	if (trans_pcie->fw_mon_page) {
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2316,6 +2526,45 @@
 		monitor_len = 0;
 	}
 
+	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+		dump_data = vzalloc(len);
+		if (!dump_data)
+			return NULL;
+
+		data = (void *)dump_data->data;
+		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+		dump_data->len = len;
+
+		return dump_data;
+	}
+
+	/* CSR registers */
+	len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+	/* PRPH registers */
+	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+		/* The range includes both boundaries */
+		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+			iwl_prph_dump_addr[i].start + 4;
+
+		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+		       num_bytes_in_chunk;
+	}
+
+	/* FH registers */
+	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+	if (dump_rbs) {
+		/* RBs */
+		num_rbs = le16_to_cpu(ACCESS_ONCE(
+				      trans_pcie->rxq.rb_stts->closed_rb_num))
+				      & 0x0FFF;
+		num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+		len += num_rbs * (sizeof(*data) +
+				  sizeof(struct iwl_fw_error_dump_rb) +
+				  (PAGE_SIZE << trans_pcie->rx_page_order));
+	}
+
 	dump_data = vzalloc(len);
 	if (!dump_data)
 		return NULL;
@@ -2352,74 +2601,10 @@
 	len += iwl_trans_pcie_dump_prph(trans, &data);
 	len += iwl_trans_pcie_dump_csr(trans, &data);
 	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
-	/* data is already pointing to the next section */
+	if (dump_rbs)
+		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
-	if ((trans_pcie->fw_mon_page &&
-	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
-	    trans->dbg_dest_tlv) {
-		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-		u32 base, write_ptr, wrap_cnt;
-
-		/* If there was a dest TLV - use the values from there */
-		if (trans->dbg_dest_tlv) {
-			write_ptr =
-				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
-			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
-			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-		} else {
-			base = MON_BUFF_BASE_ADDR;
-			write_ptr = MON_BUFF_WRPTR;
-			wrap_cnt = MON_BUFF_CYCLE_CNT;
-		}
-
-		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
-		fw_mon_data = (void *)data->data;
-		fw_mon_data->fw_mon_wr_ptr =
-			cpu_to_le32(iwl_read_prph(trans, write_ptr));
-		fw_mon_data->fw_mon_cycle_cnt =
-			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
-		fw_mon_data->fw_mon_base_ptr =
-			cpu_to_le32(iwl_read_prph(trans, base));
-
-		len += sizeof(*data) + sizeof(*fw_mon_data);
-		if (trans_pcie->fw_mon_page) {
-			/*
-			 * The firmware is now asserted, it won't write anything
-			 * to the buffer. CPU can take ownership to fetch the
-			 * data. The buffer will be handed back to the device
-			 * before the firmware will be restarted.
-			 */
-			dma_sync_single_for_cpu(trans->dev,
-						trans_pcie->fw_mon_phys,
-						trans_pcie->fw_mon_size,
-						DMA_FROM_DEVICE);
-			memcpy(fw_mon_data->data,
-			       page_address(trans_pcie->fw_mon_page),
-			       trans_pcie->fw_mon_size);
-
-			monitor_len = trans_pcie->fw_mon_size;
-		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
-			/*
-			 * Update pointers to reflect actual values after
-			 * shifting
-			 */
-			base = iwl_read_prph(trans, base) <<
-			       trans->dbg_dest_tlv->base_shift;
-			iwl_trans_read_mem(trans, base, fw_mon_data->data,
-					   monitor_len / sizeof(u32));
-		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
-			monitor_len =
-				iwl_trans_pci_dump_marbh_monitor(trans,
-								 fw_mon_data,
-								 monitor_len);
-		} else {
-			/* Didn't match anything - output no monitor data */
-			monitor_len = 0;
-		}
-
-		len += monitor_len;
-		data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
-	}
+	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
 	dump_data->len = len;
 
@@ -2482,12 +2667,15 @@
 	if (!trans)
 		return ERR_PTR(-ENOMEM);
 
+	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	trans_pcie->trans = trans;
 	spin_lock_init(&trans_pcie->irq_lock);
 	spin_lock_init(&trans_pcie->reg_lock);
 	spin_lock_init(&trans_pcie->ref_lock);
+	mutex_init(&trans_pcie->mutex);
 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
 	ret = pci_enable_device(pdev);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 607acb5..a8c8a4a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -219,8 +219,6 @@
 
 	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
-	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
 	sta_id = tx_cmd->sta_id;
 	sec_ctl = tx_cmd->sec_ctl;
 
@@ -239,6 +237,9 @@
 	if (trans_pcie->bc_table_dword)
 		len = DIV_ROUND_UP(len, 4);
 
+	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+		return;
+
 	bc_ent = cpu_to_le16(len | (sta_id << 12));
 
 	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -387,11 +388,18 @@
 
 	/* first TB is never freed - it's the scratchbuf data */
 
-	for (i = 1; i < num_tbs; i++)
-		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-				 iwl_pcie_tfd_tb_get_len(tfd, i),
-				 DMA_TO_DEVICE);
-
+	for (i = 1; i < num_tbs; i++) {
+		if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+			dma_unmap_page(trans->dev,
+				       iwl_pcie_tfd_tb_get_addr(tfd, i),
+				       iwl_pcie_tfd_tb_get_len(tfd, i),
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(trans->dev,
+					 iwl_pcie_tfd_tb_get_addr(tfd, i),
+					 iwl_pcie_tfd_tb_get_len(tfd, i),
+					 DMA_TO_DEVICE);
+	}
 	tfd->num_tbs = 0;
 }
 
@@ -467,7 +475,7 @@
 
 	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
-	return 0;
+	return num_tbs;
 }
 
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -915,6 +923,7 @@
 		}
 	}
 
+	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
 	if (trans->cfg->base_params->num_of_queues > 20)
 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1320,13 +1329,24 @@
 	int idx;
 	u16 copy_size, cmd_size, scratch_size;
 	bool had_nocopy = false;
+	u8 group_id = iwl_cmd_groupid(cmd->id);
 	int i, ret;
 	u32 cmd_pos;
 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
-	copy_size = sizeof(out_cmd->hdr);
-	cmd_size = sizeof(out_cmd->hdr);
+	if (WARN(!trans_pcie->wide_cmd_header &&
+		 group_id > IWL_ALWAYS_LONG_GROUP,
+		 "unsupported wide command %#x\n", cmd->id))
+		return -EINVAL;
+
+	if (group_id != 0) {
+		copy_size = sizeof(struct iwl_cmd_header_wide);
+		cmd_size = sizeof(struct iwl_cmd_header_wide);
+	} else {
+		copy_size = sizeof(struct iwl_cmd_header);
+		cmd_size = sizeof(struct iwl_cmd_header);
+	}
 
 	/* need one for the header if the first is NOCOPY */
 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
@@ -1416,16 +1436,32 @@
 		out_meta->source = cmd;
 
 	/* set up the header */
+	if (group_id != 0) {
+		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+		out_cmd->hdr_wide.group_id = group_id;
+		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+		out_cmd->hdr_wide.length =
+			cpu_to_le16(cmd_size -
+				    sizeof(struct iwl_cmd_header_wide));
+		out_cmd->hdr_wide.reserved = 0;
+		out_cmd->hdr_wide.sequence =
+			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+						 INDEX_TO_SEQ(q->write_ptr));
 
-	out_cmd->hdr.cmd = cmd->id;
-	out_cmd->hdr.flags = 0;
-	out_cmd->hdr.sequence =
-		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
-					 INDEX_TO_SEQ(q->write_ptr));
+		cmd_pos = sizeof(struct iwl_cmd_header_wide);
+		copy_size = sizeof(struct iwl_cmd_header_wide);
+	} else {
+		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+		out_cmd->hdr.sequence =
+			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+						 INDEX_TO_SEQ(q->write_ptr));
+		out_cmd->hdr.group_id = 0;
+
+		cmd_pos = sizeof(struct iwl_cmd_header);
+		copy_size = sizeof(struct iwl_cmd_header);
+	}
 
 	/* and copy the data that needs to be copied */
-	cmd_pos = offsetof(struct iwl_device_cmd, payload);
-	copy_size = sizeof(out_cmd->hdr);
 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 		int copy;
 
@@ -1464,9 +1500,10 @@
 	}
 
 	IWL_DEBUG_HC(trans,
-		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
 		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
-		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+		     group_id, out_cmd->hdr.cmd,
+		     le16_to_cpu(out_cmd->hdr.sequence),
 		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
 	/* start the TFD with the scratchbuf */
@@ -1516,12 +1553,14 @@
 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
 	}
 
+	BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+		     sizeof(out_meta->flags) * BITS_PER_BYTE);
 	out_meta->flags = cmd->flags;
 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
 		kzfree(txq->entries[idx].free_buf);
 	txq->entries[idx].free_buf = dup_buf;
 
-	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 
 	/* start timer if queue currently empty */
 	if (q->read_ptr == q->write_ptr && txq->wd_timeout)
@@ -1552,15 +1591,13 @@
 /*
  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
  * @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- *	(put in setup_rx_handlers)
  *
  * If an Rx buffer has an async callback associated with it the callback
  * will be executed.  The attached skb (if present) will only be freed
  * if the callback returns 1
  */
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
+			    struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1599,7 +1636,6 @@
 		meta->source->resp_pkt = pkt;
 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
-		meta->source->handler_status = handler_status;
 	}
 
 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
@@ -1762,7 +1798,7 @@
 		      struct iwl_device_cmd *dev_cmd, int txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_hdr *hdr;
 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 	struct iwl_cmd_meta *out_meta;
 	struct iwl_txq *txq;
@@ -1771,9 +1807,10 @@
 	void *tb1_addr;
 	u16 len, tb1_len, tb2_len;
 	bool wait_write_ptr;
-	__le16 fc = hdr->frame_control;
-	u8 hdr_len = ieee80211_hdrlen(fc);
+	__le16 fc;
+	u8 hdr_len;
 	u16 wifi_seq;
+	int i;
 
 	txq = &trans_pcie->txq[txq_id];
 	q = &txq->q;
@@ -1782,6 +1819,18 @@
 		      "TX on unused queue %d\n", txq_id))
 		return -EINVAL;
 
+	if (skb_is_nonlinear(skb) &&
+	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+	    __skb_linearize(skb))
+		return -ENOMEM;
+
+	/* mac80211 always puts the full header into the SKB's head,
+	 * so there's no need to check if it's readable there
+	 */
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = hdr->frame_control;
+	hdr_len = ieee80211_hdrlen(fc);
+
 	spin_lock(&txq->lock);
 
 	/* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1812,6 +1861,7 @@
 
 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
 	out_meta = &txq->entries[q->write_ptr].meta;
+	out_meta->flags = 0;
 
 	/*
 	 * The second TB (tb1) points to the remainder of the TX command
@@ -1845,9 +1895,9 @@
 
 	/*
 	 * Set up TFD's third entry to point directly to remainder
-	 * of skb, if any (802.11 null frames have no payload).
+	 * of skb's head, if any
 	 */
-	tb2_len = skb->len - hdr_len;
+	tb2_len = skb_headlen(skb) - hdr_len;
 	if (tb2_len > 0) {
 		dma_addr_t tb2_phys = dma_map_single(trans->dev,
 						     skb->data + hdr_len,
@@ -1860,6 +1910,29 @@
 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
 	}
 
+	/* set up the remaining entries to point to the data */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		dma_addr_t tb_phys;
+		int tb_idx;
+
+		if (!skb_frag_size(frag))
+			continue;
+
+		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+			iwl_pcie_tfd_unmap(trans, out_meta,
+					   &txq->tfds[q->write_ptr]);
+			goto out_err;
+		}
+		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+						skb_frag_size(frag), false);
+
+		out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+	}
+
 	/* Set up entry for this TFD in Tx byte-count array */
 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
@@ -1869,7 +1942,7 @@
 			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
 			     skb->data + hdr_len, tb2_len);
 	trace_iwlwifi_dev_tx_data(trans->dev, skb,
-				  skb->data + hdr_len, tb2_len);
+				  hdr_len, skb->len - hdr_len);
 
 	wait_write_ptr = ieee80211_has_morefrags(fc);
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 99e873d..520bef8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2399,6 +2399,7 @@
 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	ieee80211_hw_set(hw, MFP_CAPABLE);
 	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, TDLS_WIDER_BW);
 	if (rctbl)
 		ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
 
@@ -2676,7 +2677,7 @@
 	dev->netdev_ops = &hwsim_netdev_ops;
 	dev->destructor = free_netdev;
 	ether_setup(dev);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211_RADIOTAP;
 	eth_zero_addr(dev->dev_addr);
 	dev->dev_addr[0] = 0x12;
@@ -3120,8 +3121,10 @@
 		goto failure;
 
 	rc = netlink_register_notifier(&hwsim_netlink_notifier);
-	if (rc)
+	if (rc) {
+		genl_unregister_family(&hwsim_genl_family);
 		goto failure;
+	}
 
 	return 0;
 
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 7217da4..57a80cf 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -112,7 +112,9 @@
 	if (!skb)
 		return;
 
-	ieee80211_rx_ni(dev->hw, skb);
+	spin_lock(&dev->mac_lock);
+	ieee80211_rx(dev->hw, skb);
+	spin_unlock(&dev->mac_lock);
 }
 
 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
@@ -236,23 +238,42 @@
 	skb = q->e[q->start].skb;
 	trace_mt_tx_dma_done(dev, skb);
 
-	mt7601u_tx_status(dev, skb);
+	__skb_queue_tail(&dev->tx_skb_done, skb);
+	tasklet_schedule(&dev->tx_tasklet);
 
 	if (q->used == q->entries - q->entries / 8)
 		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 
 	q->start = (q->start + 1) % q->entries;
 	q->used--;
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
 
-	if (urb->status)
-		goto out;
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+	struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+	struct sk_buff_head skbs;
+	unsigned long flags;
+
+	__skb_queue_head_init(&skbs);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
 
 	set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
 	if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
 		queue_delayed_work(dev->stat_wq, &dev->stat_work,
 				   msecs_to_jiffies(10));
-out:
+
+	skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
 	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	while (!skb_queue_empty(&skbs)) {
+		struct sk_buff *skb = __skb_dequeue(&skbs);
+
+		mt7601u_tx_status(dev, skb);
+	}
 }
 
 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
@@ -475,6 +496,7 @@
 {
 	int ret = -ENOMEM;
 
+	tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
 	tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
 
 	ret = mt7601u_alloc_tx(dev);
@@ -502,4 +524,6 @@
 
 	mt7601u_free_rx(dev);
 	mt7601u_free_tx(dev);
+
+	tasklet_kill(&dev->tx_tasklet);
 }
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index df3dd56..26190fd 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -454,8 +454,10 @@
 	spin_lock_init(&dev->tx_lock);
 	spin_lock_init(&dev->rx_lock);
 	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->mac_lock);
 	spin_lock_init(&dev->con_mon_lock);
 	atomic_set(&dev->avg_ampdu_len, 1);
+	skb_queue_head_init(&dev->tx_skb_done);
 
 	dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
 	if (!dev->stat_wq) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index 7514bce..e21c53e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -181,7 +181,11 @@
 	}
 
 	mt76_mac_fill_tx_status(dev, &info, stat);
+
+	spin_lock_bh(&dev->mac_lock);
 	ieee80211_tx_status_noskb(dev->hw, sta, &info);
+	spin_unlock_bh(&dev->mac_lock);
+
 	rcu_read_unlock();
 }
 
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index 9102be6b..428bd2f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -141,12 +141,13 @@
 /**
  * struct mt7601u_dev - adapter structure
  * @lock:		protects @wcid->tx_rate.
+ * @mac_lock:		locks out mac80211's tx status and rx paths.
  * @tx_lock:		protects @tx_q and changes of MT7601U_STATE_*_STATS
-			flags in @state.
+ *			flags in @state.
  * @rx_lock:		protects @rx_q.
  * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
  * @mutex:		ensures exclusive access from mac80211 callbacks.
- * @vendor_req_mutex:	ensures atomicity of vendor requests.
+ * @vendor_req_mutex:	protects @vend_buf, ensures atomicity of split writes.
  * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
  *			(accesses to RF and BBP).
  * @hw_atomic_mutex:	ensures exclusive access to HW during critical
@@ -177,6 +178,7 @@
 	struct mt76_wcid __rcu *wcid[N_WCIDS];
 
 	spinlock_t lock;
+	spinlock_t mac_lock;
 
 	const u16 *beacon_offsets;
 
@@ -184,6 +186,8 @@
 	struct mt7601u_eeprom_params *ee;
 
 	struct mutex vendor_req_mutex;
+	void *vend_buf;
+
 	struct mutex reg_atomic_mutex;
 	struct mutex hw_atomic_mutex;
 
@@ -197,7 +201,9 @@
 
 	/* TX */
 	spinlock_t tx_lock;
+	struct tasklet_struct tx_tasklet;
 	struct mt7601u_tx_queue *tx_q;
+	struct sk_buff_head tx_skb_done;
 
 	atomic_t avg_ampdu_len;
 
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index 0be2080..a0a33dc 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -116,7 +116,10 @@
 	ieee80211_tx_info_clear_status(info);
 	info->status.rates[0].idx = -1;
 	info->flags |= IEEE80211_TX_STAT_ACK;
+
+	spin_lock(&dev->mac_lock);
 	ieee80211_tx_status(dev->hw, skb);
+	spin_unlock(&dev->mac_lock);
 }
 
 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index 54dba40..416c604 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -92,10 +92,9 @@
 	complete(cmpl);
 }
 
-static int
-__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-			 const u8 direction, const u16 val, const u16 offset,
-			 void *buf, const size_t buflen)
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen)
 {
 	int i, ret;
 	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
@@ -110,6 +109,8 @@
 		trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
 				  buf, buflen, ret);
 
+		if (ret == -ENODEV)
+			set_bit(MT7601U_STATE_REMOVED, &dev->state);
 		if (ret >= 0 || ret == -ENODEV)
 			return ret;
 
@@ -122,25 +123,6 @@
 	return ret;
 }
 
-int
-mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-		       const u8 direction, const u16 val, const u16 offset,
-		       void *buf, const size_t buflen)
-{
-	int ret;
-
-	mutex_lock(&dev->vendor_req_mutex);
-
-	ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
-				       buf, buflen);
-	if (ret == -ENODEV)
-		set_bit(MT7601U_STATE_REMOVED, &dev->state);
-
-	mutex_unlock(&dev->vendor_req_mutex);
-
-	return ret;
-}
-
 void mt7601u_vendor_reset(struct mt7601u_dev *dev)
 {
 	mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
@@ -150,19 +132,21 @@
 u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
 {
 	int ret;
-	__le32 reg;
-	u32 val;
+	u32 val = ~0;
 
 	WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
 
+	mutex_lock(&dev->vendor_req_mutex);
+
 	ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
-				     0, offset, &reg, sizeof(reg));
-	val = le32_to_cpu(reg);
-	if (ret > 0 && ret != sizeof(reg)) {
+				     0, offset, dev->vend_buf, MT_VEND_BUF);
+	if (ret == MT_VEND_BUF)
+		val = get_unaligned_le32(dev->vend_buf);
+	else if (ret > 0)
 		dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
 			ret, offset);
-		val = ~0;
-	}
+
+	mutex_unlock(&dev->vendor_req_mutex);
 
 	trace_reg_read(dev, offset, val);
 	return val;
@@ -173,12 +157,17 @@
 {
 	int ret;
 
+	mutex_lock(&dev->vendor_req_mutex);
+
 	ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
 				     val & 0xffff, offset, NULL, 0);
-	if (ret)
-		return ret;
-	return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
-				      val >> 16, offset + 2, NULL, 0);
+	if (!ret)
+		ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+					     val >> 16, offset + 2, NULL, 0);
+
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return ret;
 }
 
 void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
@@ -275,6 +264,12 @@
 
 	usb_set_intfdata(usb_intf, dev);
 
+	dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+	if (!dev->vend_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	ret = mt7601u_assign_pipes(usb_intf, dev);
 	if (ret)
 		goto err;
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
index 49e188f..bc18202 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.h
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -23,6 +23,8 @@
 
 #define MT_VEND_DEV_MODE_RESET	1
 
+#define MT_VEND_BUF		sizeof(__le32)
+
 enum mt_vendor_req {
 	MT_VEND_DEV_MODE = 1,
 	MT_VEND_WRITE = 2,
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 48edf38..317d991 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -9,36 +9,36 @@
 	  mwifiex.
 
 config MWIFIEX_SDIO
-	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
+	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
 	depends on MWIFIEX && MMC
 	select FW_LOADER
 	select WANT_DEV_COREDUMP
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8786/8787/8797/8887/8897 chipsets with SDIO interface.
+	  8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_sdio.
 
 config MWIFIEX_PCIE
-	tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
+	tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997"
 	depends on MWIFIEX && PCI
 	select FW_LOADER
 	select WANT_DEV_COREDUMP
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8766/8897 chipsets with PCIe interface.
+	  8766/8897/8997 chipsets with PCIe interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_pcie.
 
 config MWIFIEX_USB
-	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
+	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
 	depends on MWIFIEX && USB
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8797/8897 chipset with USB interface.
+	  8797/8897/8997 chipset with USB interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index b15e4c7..ff63cb5 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -19,6 +19,7 @@
 
 #include "cfg80211.h"
 #include "main.h"
+#include "11n.h"
 
 static char *reg_alpha2;
 module_param(reg_alpha2, charp, 0);
@@ -34,12 +35,38 @@
 	},
 };
 
-static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta = {
 	.limits = mwifiex_ap_sta_limits,
 	.num_different_channels = 1,
 	.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
 	.max_interfaces = MWIFIEX_MAX_BSS_NUM,
 	.beacon_int_infra_match = true,
+	.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				BIT(NL80211_CHAN_WIDTH_20) |
+				BIT(NL80211_CHAN_WIDTH_40),
+};
+
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta_vht = {
+	.limits = mwifiex_ap_sta_limits,
+	.num_different_channels = 1,
+	.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+	.max_interfaces = MWIFIEX_MAX_BSS_NUM,
+	.beacon_int_infra_match = true,
+	.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				BIT(NL80211_CHAN_WIDTH_20) |
+				BIT(NL80211_CHAN_WIDTH_40) |
+				BIT(NL80211_CHAN_WIDTH_80),
+};
+
+static const struct
+ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = {
+	.limits = mwifiex_ap_sta_limits,
+	.num_different_channels = 2,
+	.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+	.max_interfaces = MWIFIEX_MAX_BSS_NUM,
+	.beacon_int_infra_match = true,
 };
 
 /*
@@ -441,7 +468,7 @@
  *      - Country codes
  *      - Sub bands (first channel, number of channels, maximum Tx power)
  */
-static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 {
 	u8 no_of_triplet = 0;
 	struct ieee80211_country_ie_triplet *t;
@@ -804,10 +831,13 @@
 		priv->bss_type = MWIFIEX_BSS_TYPE_STA;
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
-	case NL80211_IFTYPE_P2P_GO:
 		priv->bss_role =  MWIFIEX_BSS_ROLE_STA;
 		priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
 		break;
+	case NL80211_IFTYPE_P2P_GO:
+		priv->bss_role =  MWIFIEX_BSS_ROLE_UAP;
+		priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+		break;
 	case NL80211_IFTYPE_AP:
 		priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
 		priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
@@ -1115,8 +1145,10 @@
 	case NL80211_IFTYPE_P2P_GO:
 		switch (type) {
 		case NL80211_IFTYPE_STATION:
-			if (mwifiex_cfg80211_init_p2p_client(priv))
+			if (mwifiex_cfg80211_deinit_p2p(priv))
 				return -EFAULT;
+			priv->adapter->curr_iface_comb.p2p_intf--;
+			priv->adapter->curr_iface_comb.sta_intf++;
 			dev->ieee80211_ptr->iftype = type;
 			break;
 		case NL80211_IFTYPE_ADHOC:
@@ -2788,6 +2820,7 @@
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
 	struct mwifiex_adapter *adapter = priv->adapter;
+	struct sk_buff *skb, *tmp;
 
 #ifdef CONFIG_DEBUG_FS
 	mwifiex_dev_debugfs_remove(priv);
@@ -2795,6 +2828,9 @@
 
 	mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
+	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
 
@@ -2954,7 +2990,6 @@
 					MWIFIEX_MEF_MAX_BYTESEQ)) {
 			mwifiex_dbg(priv->adapter, ERROR,
 				    "Pattern not supported\n");
-			kfree(mef_entry);
 			return -EOPNOTSUPP;
 		}
 
@@ -3036,9 +3071,12 @@
 
 	mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
 
-	if (wowlan->n_patterns || wowlan->magic_pkt)
+	if (wowlan->n_patterns || wowlan->magic_pkt) {
 		ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
 						   &mef_entry[1], wowlan);
+		if (ret)
+			goto err;
+	}
 
 	if (!mef_cfg.criteria)
 		mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -3048,6 +3086,8 @@
 	ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
 			HostCmd_ACT_GEN_SET, 0,
 			&mef_cfg, true);
+
+err:
 	kfree(mef_entry);
 	return ret;
 }
@@ -3360,6 +3400,72 @@
 }
 
 static int
+mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
+				  const u8 *addr, u8 oper_class,
+				  struct cfg80211_chan_def *chandef)
+{
+	struct mwifiex_sta_node *sta_ptr;
+	unsigned long flags;
+	u16 chan;
+	u8 second_chan_offset, band;
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+	sta_ptr = mwifiex_get_sta_entry(priv, addr);
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+	if (!sta_ptr) {
+		wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+			  __func__, addr);
+		return -ENOENT;
+	}
+
+	if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
+	      WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+		wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
+		return -ENOENT;
+	}
+
+	if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+	    sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+		wiphy_err(wiphy, "channel switch is running, abort request\n");
+		return -EALREADY;
+	}
+
+	chan = chandef->chan->hw_value;
+	second_chan_offset = mwifiex_get_sec_chan_offset(chan);
+	band = chandef->chan->band;
+	mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band);
+
+	return 0;
+}
+
+static void
+mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
+					 struct net_device *dev,
+					 const u8 *addr)
+{
+	struct mwifiex_sta_node *sta_ptr;
+	unsigned long flags;
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+	sta_ptr = mwifiex_get_sta_entry(priv, addr);
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+	if (!sta_ptr) {
+		wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+			  __func__, addr);
+	} else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+		     sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
+		     sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+		wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
+			  addr);
+	} else
+		mwifiex_stop_tdls_cs(priv, addr);
+}
+
+static int
 mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
 			     const u8 *mac, struct station_parameters *params)
 {
@@ -3575,6 +3681,8 @@
 	.set_coalesce = mwifiex_cfg80211_set_coalesce,
 	.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
 	.tdls_oper = mwifiex_cfg80211_tdls_oper,
+	.tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch,
+	.tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
 	.add_station = mwifiex_cfg80211_add_station,
 	.change_station = mwifiex_cfg80211_change_station,
 	.get_channel = mwifiex_cfg80211_get_channel,
@@ -3672,7 +3780,12 @@
 	else
 		wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
 
-	wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+	if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+	else if (adapter->is_hw_11ac_capable)
+		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht;
+	else
+		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
 	wiphy->n_iface_combinations = 1;
 
 	/* Initialize cipher suits */
@@ -3709,6 +3822,9 @@
 			   NL80211_FEATURE_INACTIVITY_TIMER |
 			   NL80211_FEATURE_NEED_OBSS_SCAN;
 
+	if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+		wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+
 	if (adapter->fw_api_ver == MWIFIEX_FW_V15)
 		wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
 
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 207da40..45ae38e 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -167,8 +167,6 @@
 		mwifiex_dbg(adapter, ERROR,
 			    "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
 			cmd_code);
-		if (cmd_node->wait_q_enabled)
-			mwifiex_complete_cmd(adapter, cmd_node);
 		mwifiex_recycle_cmd_node(adapter, cmd_node);
 		queue_work(adapter->workqueue, &adapter->main_work);
 		return -1;
@@ -809,17 +807,6 @@
 	adapter->is_cmd_timedout = 0;
 
 	resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
-	if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
-		mwifiex_dbg(adapter, ERROR,
-			    "CMD_RESP: %#x been canceled\n",
-			    le16_to_cpu(resp->command));
-		mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
-		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-		adapter->curr_cmd = NULL;
-		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
-		return -1;
-	}
-
 	if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
 		/* Copy original response back to response buffer */
 		struct mwifiex_ds_misc_cmd *hostcmd;
@@ -989,12 +976,13 @@
 
 		if (cmd_node->wait_q_enabled) {
 			adapter->cmd_wait_q.status = -ETIMEDOUT;
-			wake_up_interruptible(&adapter->cmd_wait_q.wait);
 			mwifiex_cancel_pending_ioctl(adapter);
 		}
 	}
-	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
 		mwifiex_init_fw_complete(adapter);
+		return;
+	}
 
 	if (adapter->if_ops.device_dump)
 		adapter->if_ops.device_dump(adapter);
@@ -1024,6 +1012,7 @@
 		adapter->curr_cmd->wait_q_enabled = false;
 		adapter->cmd_wait_q.status = -1;
 		mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+		/* no recycle probably wait for response */
 	}
 	/* Cancel all pending command */
 	spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
@@ -1032,11 +1021,8 @@
 		list_del(&cmd_node->list);
 		spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 
-		if (cmd_node->wait_q_enabled) {
+		if (cmd_node->wait_q_enabled)
 			adapter->cmd_wait_q.status = -1;
-			mwifiex_complete_cmd(adapter, cmd_node);
-			cmd_node->wait_q_enabled = false;
-		}
 		mwifiex_recycle_cmd_node(adapter, cmd_node);
 		spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
 	}
@@ -1094,12 +1080,18 @@
 	    (adapter->curr_cmd->wait_q_enabled)) {
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
 		cmd_node = adapter->curr_cmd;
-		cmd_node->wait_q_enabled = false;
-		cmd_node->cmd_flag |= CMD_F_CANCELED;
-		mwifiex_recycle_cmd_node(adapter, cmd_node);
-		mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+		/* setting curr_cmd to NULL is quite dangerous, because
+		 * mwifiex_process_cmdresp checks curr_cmd to be != NULL
+		 * at the beginning then relies on it and dereferences
+		 * it at will
+		 * this probably works since mwifiex_cmd_timeout_func
+		 * is the only caller of this function and responses
+		 * at that point
+		 */
 		adapter->curr_cmd = NULL;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+
+		mwifiex_recycle_cmd_node(adapter, cmd_node);
 	}
 
 	/* Cancel all pending scan command */
@@ -1129,7 +1121,6 @@
 			}
 		}
 	}
-	adapter->cmd_wait_q.status = -1;
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 51e3447..098e1f1 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -141,6 +141,9 @@
 	TDLS_SETUP_COMPLETE,
 	TDLS_SETUP_FAILURE,
 	TDLS_LINK_TEARDOWN,
+	TDLS_CHAN_SWITCHING,
+	TDLS_IN_BASE_CHAN,
+	TDLS_IN_OFF_CHAN,
 };
 
 enum mwifiex_tdls_error_code {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index cd09051..3ec2ac8 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,14 +169,17 @@
 #define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
 #define TLV_TYPE_SCAN_CHANNEL_GAP   (PROPRIETARY_TLV_BASE_ID + 197)
 #define TLV_TYPE_API_REV            (PROPRIETARY_TLV_BASE_ID + 199)
 #define TLV_TYPE_CHANNEL_STATS      (PROPRIETARY_TLV_BASE_ID + 198)
 #define TLV_BTCOEX_WL_AGGR_WINSIZE  (PROPRIETARY_TLV_BASE_ID + 202)
 #define TLV_BTCOEX_WL_SCANTIME      (PROPRIETARY_TLV_BASE_ID + 203)
+#define TLV_TYPE_BSS_MODE           (PROPRIETARY_TLV_BASE_ID + 206)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -200,6 +203,7 @@
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
 #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
 
 #define MWIFIEX_DEF_HT_CAP	(IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -359,6 +363,8 @@
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG			      0x0112
+#define HostCmd_CMD_TDLS_CONFIG                       0x0100
+#define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 
@@ -432,7 +438,6 @@
 
 
 #define CMD_F_HOSTCMD           (1 << 0)
-#define CMD_F_CANCELED          (1 << 1)
 
 #define HostCmd_CMD_ID_MASK             0x0fff
 
@@ -509,8 +514,10 @@
 #define EVENT_TDLS_GENERIC_EVENT        0x00000052
 #define EVENT_RADAR_DETECTED		0x00000053
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
+#define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
+#define EVENT_MULTI_CHAN_INFO           0x0000006a
 #define EVENT_TX_STATUS_REPORT		0x00000074
 #define EVENT_BT_COEX_WLAN_PARA_CHANGE	0X00000076
 
@@ -545,7 +552,27 @@
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
 #define ACT_TDLS_CONFIG            0x02
-#define TDLS_EVENT_LINK_TEAR_DOWN  3
+
+#define TDLS_EVENT_LINK_TEAR_DOWN      3
+#define TDLS_EVENT_CHAN_SWITCH_RESULT  7
+#define TDLS_EVENT_START_CHAN_SWITCH   8
+#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9
+
+#define TDLS_BASE_CHANNEL	       0
+#define TDLS_OFF_CHANNEL	       1
+
+#define ACT_TDLS_CS_ENABLE_CONFIG 0x00
+#define ACT_TDLS_CS_INIT	  0x06
+#define ACT_TDLS_CS_STOP	  0x07
+#define ACT_TDLS_CS_PARAMS	  0x08
+
+#define MWIFIEX_DEF_CS_UNIT_TIME	2
+#define MWIFIEX_DEF_CS_THR_OTHERLINK	10
+#define MWIFIEX_DEF_THR_DIRECTLINK	0
+#define MWIFIEX_DEF_CS_TIME		10
+#define MWIFIEX_DEF_CS_TIMEOUT		16
+#define MWIFIEX_DEF_CS_REG_CLASS	12
+#define MWIFIEX_DEF_CS_PERIODICITY	1
 
 #define MWIFIEX_FW_V15		   15
 
@@ -658,6 +685,7 @@
 enum mwifiex_chan_scan_mode_bitmasks {
 	MWIFIEX_PASSIVE_SCAN = BIT(0),
 	MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
+	MWIFIEX_HIDDEN_SSID_REPORT = BIT(4),
 };
 
 struct mwifiex_chan_scan_param_set {
@@ -1131,6 +1159,13 @@
 	u8 ht_info;
 } __packed;
 
+struct mwifiex_tx_pause_tlv {
+	struct mwifiex_ie_types_header header;
+	u8 peermac[ETH_ALEN];
+	u8 tx_pause;
+	u8 pkt_cnt;
+} __packed;
+
 enum Host_Sleep_Action {
 	HS_CONFIGURE = 0x0001,
 	HS_ACTIVATE  = 0x0002,
@@ -1249,6 +1284,36 @@
 	u8 peer_mac[ETH_ALEN];
 } __packed;
 
+struct mwifiex_tdls_config {
+	__le16 enable;
+};
+
+struct mwifiex_tdls_config_cs_params {
+	u8 unit_time;
+	u8 thr_otherlink;
+	u8 thr_directlink;
+};
+
+struct mwifiex_tdls_init_cs_params {
+	u8 peer_mac[ETH_ALEN];
+	u8 primary_chan;
+	u8 second_chan_offset;
+	u8 band;
+	__le16 switch_time;
+	__le16 switch_timeout;
+	u8 reg_class;
+	u8 periodicity;
+} __packed;
+
+struct mwifiex_tdls_stop_cs_params {
+	u8 peer_mac[ETH_ALEN];
+};
+
+struct host_cmd_ds_tdls_config {
+	__le16 tdls_action;
+	u8 tdls_data[1];
+} __packed;
+
 struct mwifiex_chan_desc {
 	__le16 start_freq;
 	u8 chan_width;
@@ -1370,6 +1435,11 @@
 	u8    tlv_buffer[1];
 } __packed;
 
+struct mwifiex_ie_types_bss_mode {
+	struct mwifiex_ie_types_header  header;
+	u8 bss_mode;
+} __packed;
+
 struct mwifiex_ie_types_bss_scan_rsp {
 	struct mwifiex_ie_types_header header;
 	u8 bssid[ETH_ALEN];
@@ -1908,6 +1978,12 @@
 	__le32 passed;
 } __packed;
 
+struct mwifiex_ie_types_multi_chan_info {
+	struct mwifiex_ie_types_header header;
+	__le16 status;
+	u8 tlv_buffer[0];
+} __packed;
+
 struct meas_rpt_map {
 	u8 rssi:3;
 	u8 unmeasured:1;
@@ -1927,10 +2003,18 @@
 	__le16 events;
 } __packed;
 
+struct chan_switch_result {
+	u8 cur_chan;
+	u8 status;
+	u8 reason;
+} __packed;
+
 struct mwifiex_tdls_generic_event {
 	__le16 type;
 	u8 peer_mac[ETH_ALEN];
 	union {
+		struct chan_switch_result switch_result;
+		u8 cs_stop_reason;
 		__le16 reason_code;
 		__le16 reserved;
 	} u;
@@ -1971,6 +2055,11 @@
 	struct coalesce_receive_filt_rule rule[0];
 } __packed;
 
+struct host_cmd_ds_multi_chan_policy {
+	__le16 action;
+	__le16 policy;
+} __packed;
+
 struct host_cmd_ds_command {
 	__le16 command;
 	__le16 size;
@@ -2035,9 +2124,11 @@
 		struct host_cmd_ds_sta_list sta_list;
 		struct host_cmd_11ac_vht_cfg vht_cfg;
 		struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+		struct host_cmd_ds_tdls_config tdls_config;
 		struct host_cmd_ds_tdls_oper tdls_oper;
 		struct host_cmd_ds_chan_rpt_req chan_rpt_req;
 		struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
+		struct host_cmd_ds_multi_chan_policy mc_policy;
 	} params;
 } __packed;
 
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 0ba8945..abf52d2 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -409,6 +409,8 @@
 	int ret;
 
 	ret = mwifiex_uap_parse_tail_ies(priv, info);
+
+	if (ret)
 		return ret;
 
 	return mwifiex_set_mgmt_beacon_data_ies(priv, info);
@@ -477,6 +479,7 @@
 						   ar_ie, &priv->assocresp_idx);
 
 done:
+	kfree(gen_ie);
 	kfree(beacon_ie);
 	kfree(pr_ie);
 	kfree(ar_ie);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index df7fdc0..5d3ae63 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -77,7 +77,7 @@
 
 	priv->media_connected = false;
 	eth_broadcast_addr(priv->curr_addr);
-
+	priv->port_open = false;
 	priv->pkt_tx_ctrl = 0;
 	priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 	priv->data_rate = 0;	/* Initially indicate the rate as auto */
@@ -301,7 +301,7 @@
 	adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
 	adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
 	adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
-
+	adapter->active_scan_triggered = false;
 	setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
 		    (unsigned long)adapter);
 }
@@ -499,6 +499,7 @@
 		INIT_LIST_HEAD(&priv->sta_list);
 		INIT_LIST_HEAD(&priv->auto_tdls_list);
 		skb_queue_head_init(&priv->tdls_txq);
+		skb_queue_head_init(&priv->bypass_txq);
 
 		spin_lock_init(&priv->tx_ba_stream_tbl_lock);
 		spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -550,11 +551,6 @@
 		}
 	}
 
-	if (adapter->if_ops.init_fw_port) {
-		if (adapter->if_ops.init_fw_port(adapter))
-			return -1;
-	}
-
 	for (i = 0; i < adapter->priv_num; i++) {
 		if (adapter->priv[i]) {
 			ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 56b024a..3cda1f9 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -783,6 +783,8 @@
 
 	if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
 		priv->scan_block = true;
+	else
+		priv->port_open = true;
 
 done:
 	/* Need to indicate IOCTL complete */
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3ba4e0e..278dc94 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -276,6 +276,7 @@
 		     !adapter->pm_wakeup_fw_try) &&
 		    (is_command_pending(adapter) ||
 		     !skb_queue_empty(&adapter->tx_data_q) ||
+		     !mwifiex_bypass_txlist_empty(adapter) ||
 		     !mwifiex_wmm_lists_empty(adapter))) {
 			adapter->pm_wakeup_fw_try = true;
 			mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -299,9 +300,16 @@
 
 			if ((!adapter->scan_chan_gap_enabled &&
 			     adapter->scan_processing) || adapter->data_sent ||
+			     mwifiex_is_tdls_chan_switching
+			     (mwifiex_get_priv(adapter,
+					       MWIFIEX_BSS_ROLE_STA)) ||
 			    (mwifiex_wmm_lists_empty(adapter) &&
+			     mwifiex_bypass_txlist_empty(adapter) &&
 			     skb_queue_empty(&adapter->tx_data_q))) {
 				if (adapter->cmd_sent || adapter->curr_cmd ||
+					!mwifiex_is_send_cmd_allowed
+						(mwifiex_get_priv(adapter,
+						MWIFIEX_BSS_ROLE_STA)) ||
 				    (!is_command_pending(adapter)))
 					break;
 			}
@@ -342,7 +350,9 @@
 			continue;
 		}
 
-		if (!adapter->cmd_sent && !adapter->curr_cmd) {
+		if (!adapter->cmd_sent && !adapter->curr_cmd &&
+		    mwifiex_is_send_cmd_allowed
+		    (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
 			if (mwifiex_exec_next_cmd(adapter) == -1) {
 				ret = -1;
 				break;
@@ -365,7 +375,25 @@
 
 		if ((adapter->scan_chan_gap_enabled ||
 		     !adapter->scan_processing) &&
-		    !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
+		    !adapter->data_sent &&
+		    !mwifiex_bypass_txlist_empty(adapter) &&
+		    !mwifiex_is_tdls_chan_switching
+			(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+			mwifiex_process_bypass_tx(adapter);
+			if (adapter->hs_activated) {
+				adapter->is_hs_configured = false;
+				mwifiex_hs_activated_event
+					(mwifiex_get_priv
+					 (adapter, MWIFIEX_BSS_ROLE_ANY),
+					 false);
+			}
+		}
+
+		if ((adapter->scan_chan_gap_enabled ||
+		     !adapter->scan_processing) &&
+		    !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
+		    !mwifiex_is_tdls_chan_switching
+			(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
 			mwifiex_wmm_process_tx(adapter);
 			if (adapter->hs_activated) {
 				adapter->is_hs_configured = false;
@@ -379,6 +407,7 @@
 		if (adapter->delay_null_pkt && !adapter->cmd_sent &&
 		    !adapter->curr_cmd && !is_command_pending(adapter) &&
 		    (mwifiex_wmm_lists_empty(adapter) &&
+		     mwifiex_bypass_txlist_empty(adapter) &&
 		     skb_queue_empty(&adapter->tx_data_q))) {
 			if (!mwifiex_send_null_packet
 			    (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -649,6 +678,26 @@
 	return 0;
 }
 
+static bool
+mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
+			struct sk_buff *skb)
+{
+	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+
+	if (ntohs(eth_hdr->h_proto) == ETH_P_PAE ||
+	    mwifiex_is_skb_mgmt_frame(skb) ||
+	    (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+	     ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+	     (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) {
+		mwifiex_dbg(priv->adapter, DATA,
+			    "bypass txqueue; eth type %#x, mgmt %d\n",
+			     ntohs(eth_hdr->h_proto),
+			     mwifiex_is_skb_mgmt_frame(skb));
+		return true;
+	}
+
+	return false;
+}
 /*
  * Add buffer into wmm tx queue and queue work to transmit it.
  */
@@ -666,8 +715,14 @@
 		}
 	}
 
-	atomic_inc(&priv->adapter->tx_pending);
-	mwifiex_wmm_add_buf_txqueue(priv, skb);
+	if (mwifiex_bypass_tx_queue(priv, skb)) {
+		atomic_inc(&priv->adapter->tx_pending);
+		atomic_inc(&priv->adapter->bypass_tx_pending);
+		mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
+	 } else {
+		atomic_inc(&priv->adapter->tx_pending);
+		mwifiex_wmm_add_buf_txqueue(priv, skb);
+	 }
 
 	mwifiex_queue_main_work(priv->adapter);
 
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index ae98b5b..6b95121 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -281,6 +281,7 @@
 	u8 amsdu_in_ampdu;
 	u16 total_pkt_count;
 	bool tdls_link;
+	bool tx_paused;
 };
 
 struct mwifiex_tid_tbl {
@@ -294,6 +295,7 @@
 struct mwifiex_wmm_desc {
 	struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
 	u32 packets_out[MAX_NUM_TID];
+	u32 pkts_paused[MAX_NUM_TID];
 	/* spin lock to protect ra_list */
 	spinlock_t ra_list_spinlock;
 	struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
@@ -517,6 +519,7 @@
 	u8 frame_type;
 	u8 curr_addr[ETH_ALEN];
 	u8 media_connected;
+	u8 port_open;
 	u32 num_tx_timeout;
 	/* track consecutive timeout */
 	u8 tx_timeout_cnt;
@@ -662,6 +665,8 @@
 	struct cfg80211_beacon_data beacon_after;
 	struct mwifiex_11h_intf_state state_11h;
 	struct mwifiex_ds_mem_rw mem_rw;
+	struct sk_buff_head bypass_txq;
+	struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
 };
 
 
@@ -768,6 +773,7 @@
 	u8 tdls_status;
 	struct mwifiex_tdls_capab tdls_cap;
 	struct mwifiex_station_stats stats;
+	u8 tx_pause;
 };
 
 struct mwifiex_auto_tdls_peer {
@@ -831,6 +837,7 @@
 	wait_queue_head_t init_wait_q;
 	void *card;
 	struct mwifiex_if_ops if_ops;
+	atomic_t bypass_tx_pending;
 	atomic_t rx_pending;
 	atomic_t tx_pending;
 	atomic_t cmd_pending;
@@ -979,6 +986,8 @@
 	u8 coex_win_size;
 	u8 coex_tx_win_size;
 	u8 coex_rx_win_size;
+	bool drcs_enabled;
+	u8 active_scan_triggered;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1330,6 +1339,21 @@
 	return 0;
 }
 
+static inline u8 mwifiex_is_tdls_link_setup(u8 status)
+{
+	switch (status) {
+	case TDLS_SETUP_COMPLETE:
+	case TDLS_CHAN_SWITCHING:
+	case TDLS_IN_BASE_CHAN:
+	case TDLS_IN_OFF_CHAN:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
 			     u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1458,6 +1482,9 @@
 mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
 struct mwifiex_sta_node *
 mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
 int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
 				 u8 action_code, u8 dialog_token,
 				 u16 status_code, const u8 *extra_ies,
@@ -1488,6 +1515,13 @@
 void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
 void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
 void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv);
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac);
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+			  u8 primary_chan, u8 second_chan_offset, u8 band);
+
 int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
 					  struct host_cmd_ds_command *cmd,
 					  void *data_buf);
@@ -1522,6 +1556,12 @@
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+				    struct sk_buff *event);
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+				      struct sk_buff *event_skb);
+
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
 void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 77b9055..408b684 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -266,12 +266,17 @@
 	{
 		PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		.driver_data = (unsigned long) &mwifiex_pcie8766,
+		.driver_data = (unsigned long)&mwifiex_pcie8766,
 	},
 	{
 		PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		.driver_data = (unsigned long) &mwifiex_pcie8897,
+		.driver_data = (unsigned long)&mwifiex_pcie8897,
+	},
+	{
+		PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		.driver_data = (unsigned long)&mwifiex_pcie8997,
 	},
 	{},
 };
@@ -1082,6 +1087,7 @@
 			card->txbd_rdptr++;
 			break;
 		case PCIE_DEVICE_ID_MARVELL_88W8897:
+		case PCIE_DEVICE_ID_MARVELL_88W8997:
 			card->txbd_rdptr += reg->ring_tx_start_ptr;
 			break;
 		}
@@ -1179,6 +1185,7 @@
 			card->txbd_wrptr++;
 			break;
 		case PCIE_DEVICE_ID_MARVELL_88W8897:
+		case PCIE_DEVICE_ID_MARVELL_88W8997:
 			card->txbd_wrptr += reg->ring_tx_start_ptr;
 			break;
 		}
@@ -1807,6 +1814,8 @@
 
 	if (!card->evt_buf_list[rdptr]) {
 		skb_push(skb, INTF_HEADER_LEN);
+		skb_put(skb, MAX_EVENT_SIZE - skb->len);
+		memset(skb->data, 0, MAX_EVENT_SIZE);
 		if (mwifiex_map_pci_memory(adapter, skb,
 					   MAX_EVENT_SIZE,
 					   PCI_DMA_FROMDEVICE))
@@ -2731,3 +2740,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 0e7ee8b..48e549c 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -30,10 +30,12 @@
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
 #define PCIE_DEVICE_ID_MARVELL_88W8766P		(0x2b30)
 #define PCIE_DEVICE_ID_MARVELL_88W8897		(0x2b38)
+#define PCIE_DEVICE_ID_MARVELL_88W8997		(0x2b42)
 
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD			0x20
@@ -197,7 +199,38 @@
 	.sleep_cookie = 0,
 	.fw_dump_ctrl = 0xcf4,
 	.fw_dump_start = 0xcf8,
-	.fw_dump_end = 0xcff
+	.fw_dump_end = 0xcff,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+	.cmd_addr_lo = PCIE_SCRATCH_0_REG,
+	.cmd_addr_hi = PCIE_SCRATCH_1_REG,
+	.cmd_size = PCIE_SCRATCH_2_REG,
+	.fw_status = PCIE_SCRATCH_3_REG,
+	.cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+	.cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+	.tx_rdptr = 0xC1A4,
+	.tx_wrptr = 0xC1A8,
+	.rx_rdptr = 0xC1A8,
+	.rx_wrptr = 0xC1A4,
+	.evt_rdptr = PCIE_SCRATCH_10_REG,
+	.evt_wrptr = PCIE_SCRATCH_11_REG,
+	.drv_rdy = PCIE_SCRATCH_12_REG,
+	.tx_start_ptr = 16,
+	.tx_mask = 0x0FFF0000,
+	.tx_wrap_mask = 0x01FF0000,
+	.rx_mask = 0x00000FFF,
+	.rx_wrap_mask = 0x000001FF,
+	.tx_rollover_ind = BIT(28),
+	.rx_rollover_ind = BIT(12),
+	.evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+	.ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+	.ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+	.ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+	.ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+	.ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+	.pfu_enabled = 1,
+	.sleep_cookie = 0,
 };
 
 struct mwifiex_pcie_device {
@@ -227,6 +260,15 @@
 	.can_ext_scan = true,
 };
 
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+	.firmware       = PCIE8997_DEFAULT_FW_NAME,
+	.reg            = &mwifiex_reg_8997,
+	.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+	.can_dump_fw = false,
+	.can_ext_scan = true,
+};
+
 struct mwifiex_evt_buf_desc {
 	u64 paddr;
 	u16 len;
@@ -325,6 +367,7 @@
 			return 1;
 		break;
 	case PCIE_DEVICE_ID_MARVELL_88W8897:
+	case PCIE_DEVICE_ID_MARVELL_88W8997:
 		if (((card->txbd_wrptr & reg->tx_mask) !=
 		     (card->txbd_rdptr & reg->tx_mask)) ||
 		    ((card->txbd_wrptr & reg->tx_rollover_ind) ==
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index baf9715..5847863 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -527,7 +527,8 @@
 
 			if (ch->flags & IEEE80211_CHAN_NO_IR)
 				scan_chan_list[chan_idx].chan_scan_mode_bitmap
-					|= MWIFIEX_PASSIVE_SCAN;
+					|= (MWIFIEX_PASSIVE_SCAN |
+					    MWIFIEX_HIDDEN_SSID_REPORT);
 			else
 				scan_chan_list[chan_idx].chan_scan_mode_bitmap
 					&= ~MWIFIEX_PASSIVE_SCAN;
@@ -823,6 +824,7 @@
 	int i;
 	u8 ssid_filter;
 	struct mwifiex_ie_types_htcap *ht_cap;
+	struct mwifiex_ie_types_bss_mode *bss_mode;
 
 	/* The tlv_buf_len is calculated for each scan command.  The TLVs added
 	   in this routine will be preserved since the routine that sends the
@@ -908,6 +910,10 @@
 				wildcard_ssid_tlv->max_ssid_length =
 							IEEE80211_MAX_SSID_LEN;
 
+			if (!memcmp(user_scan_in->ssid_list[i].ssid,
+				    "DIRECT-", 7))
+				wildcard_ssid_tlv->max_ssid_length = 0xfe;
+
 			memcpy(wildcard_ssid_tlv->ssid,
 			       user_scan_in->ssid_list[i].ssid, ssid_len);
 
@@ -968,6 +974,15 @@
 	else
 		*max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
 
+	if (adapter->ext_scan) {
+		bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
+		bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE);
+		bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode));
+		bss_mode->bss_mode = scan_cfg_out->bss_mode;
+		tlv_pos += sizeof(bss_mode->header) +
+			   le16_to_cpu(bss_mode->header.len);
+	}
+
 	/* If the input config or adapter has the number of Probes set,
 	   add tlv */
 	if (num_probes) {
@@ -1035,7 +1050,8 @@
 			if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
 				(scan_chan_list +
 				 chan_idx)->chan_scan_mode_bitmap
-					|= MWIFIEX_PASSIVE_SCAN;
+					|= (MWIFIEX_PASSIVE_SCAN |
+					    MWIFIEX_HIDDEN_SSID_REPORT);
 			else
 				(scan_chan_list +
 				 chan_idx)->chan_scan_mode_bitmap
@@ -1586,6 +1602,62 @@
 	return ret;
 }
 
+/* This function checks if SSID string contains all zeroes or length is zero */
+static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid)
+{
+	int idx;
+
+	for (idx = 0; idx < ssid->ssid_len; idx++) {
+		if (ssid->ssid[idx])
+			return false;
+	}
+
+	return true;
+}
+
+/* This function checks if any hidden SSID found in passive scan channels
+ * and save those channels for specific SSID active scan
+ */
+static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
+					     struct cfg80211_bss *bss)
+{
+	struct mwifiex_bssdescriptor *bss_desc;
+	int ret;
+	int chid;
+
+	/* Allocate and fill new bss descriptor */
+	bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL);
+	if (!bss_desc)
+		return -ENOMEM;
+
+	ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
+	if (ret)
+		goto done;
+
+	if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) {
+		mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n");
+		for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) {
+			if (priv->hidden_chan[chid].chan_number ==
+			    bss->channel->hw_value)
+				break;
+
+			if (!priv->hidden_chan[chid].chan_number) {
+				priv->hidden_chan[chid].chan_number =
+					bss->channel->hw_value;
+				priv->hidden_chan[chid].radio_type =
+					bss->channel->band;
+				priv->hidden_chan[chid].scan_type =
+					MWIFIEX_SCAN_TYPE_ACTIVE;
+				break;
+			}
+		}
+	}
+
+done:
+	kfree(bss_desc);
+	return 0;
+}
+
 static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
 					  struct cfg80211_bss *bss)
 {
@@ -1775,6 +1847,14 @@
 				    .mac_address, ETH_ALEN))
 				mwifiex_update_curr_bss_params(priv, bss);
 			cfg80211_put_bss(priv->wdev.wiphy, bss);
+
+			if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+			    (chan->flags & IEEE80211_CHAN_NO_IR)) {
+				mwifiex_dbg(adapter, INFO,
+					    "radar or passive channel %d\n",
+					    channel);
+				mwifiex_save_hidden_ssid_channels(priv, bss);
+			}
 		}
 	} else {
 		mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
@@ -1798,6 +1878,57 @@
 	}
 }
 
+/* This function checks if any hidden SSID found in passive scan channels
+ * and do specific SSID active scan for those channels
+ */
+static int
+mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
+{
+	int ret;
+	struct mwifiex_adapter *adapter = priv->adapter;
+	u8 id = 0;
+	struct mwifiex_user_scan_cfg  *user_scan_cfg;
+
+	if (adapter->active_scan_triggered) {
+		adapter->active_scan_triggered = false;
+		return 0;
+	}
+
+	if (!priv->hidden_chan[0].chan_number) {
+		mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n");
+		return 0;
+	}
+	user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+
+	if (!user_scan_cfg)
+		return -ENOMEM;
+
+	memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
+
+	for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
+		if (!priv->hidden_chan[id].chan_number)
+			break;
+		memcpy(&user_scan_cfg->chan_list[id],
+		       &priv->hidden_chan[id],
+		       sizeof(struct mwifiex_user_scan_chan));
+	}
+
+	adapter->active_scan_triggered = true;
+	user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
+	user_scan_cfg->ssid_list = priv->scan_request->ssids;
+
+	ret = mwifiex_scan_networks(priv, user_scan_cfg);
+	kfree(user_scan_cfg);
+
+	memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan));
+
+	if (ret) {
+		dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
 static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
 {
 	struct mwifiex_adapter *adapter = priv->adapter;
@@ -1811,6 +1942,8 @@
 		adapter->scan_processing = false;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
+		mwifiex_active_scan_req_for_passive_chan(priv);
+
 		if (!adapter->ext_scan)
 			mwifiex_complete_scan(priv);
 
@@ -1837,15 +1970,17 @@
 		adapter->scan_processing = false;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
-		if (priv->scan_request) {
-			mwifiex_dbg(adapter, INFO,
-				    "info: aborting scan\n");
-			cfg80211_scan_done(priv->scan_request, 1);
-			priv->scan_request = NULL;
-		} else {
-			priv->scan_aborting = false;
-			mwifiex_dbg(adapter, INFO,
-				    "info: scan already aborted\n");
+		if (!adapter->active_scan_triggered) {
+			if (priv->scan_request) {
+				mwifiex_dbg(adapter, INFO,
+					    "info: aborting scan\n");
+				cfg80211_scan_done(priv->scan_request, 1);
+				priv->scan_request = NULL;
+			} else {
+				priv->scan_aborting = false;
+				mwifiex_dbg(adapter, INFO,
+					    "info: scan already aborted\n");
+			}
 		}
 	} else {
 		/* Get scan command from scan_pending_q and put to
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index a0b121f..5d05c6f 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,10 @@
 
 static struct semaphore add_remove_card_sem;
 
+static struct memory_type_mapping generic_mem_type_map[] = {
+	{"DUMP", NULL, 0, 0xDD},
+};
+
 static struct memory_type_mapping mem_type_mapping_tbl[] = {
 	{"ITCM", NULL, 0, 0xF0},
 	{"DTCM", NULL, 0, 0xF1},
@@ -91,6 +95,7 @@
 		return -ENOMEM;
 
 	card->func = func;
+	card->device_id = id;
 
 	func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
@@ -107,6 +112,7 @@
 		card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
 		card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
 		card->can_dump_fw = data->can_dump_fw;
+		card->fw_dump_enh = data->fw_dump_enh;
 		card->can_auto_tdls = data->can_auto_tdls;
 		card->can_ext_scan = data->can_ext_scan;
 	}
@@ -287,6 +293,8 @@
 #define SDIO_DEVICE_ID_MARVELL_8887   (0x9135)
 /* Device ID for SD8801 */
 #define SDIO_DEVICE_ID_MARVELL_8801   (0x9139)
+/* Device ID for SD8997 */
+#define SDIO_DEVICE_ID_MARVELL_8997   (0x9141)
 
 
 /* WLAN IDs */
@@ -303,6 +311,8 @@
 		.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
 		.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+		.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
 	{},
 };
 
@@ -910,6 +920,8 @@
 	if (!fwbuf)
 		return -ENOMEM;
 
+	sdio_claim_host(card->func);
+
 	/* Perform firmware data transfer */
 	do {
 		/* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
@@ -1014,6 +1026,8 @@
 		offset += txlen;
 	} while (true);
 
+	sdio_release_host(card->func);
+
 	mwifiex_dbg(adapter, MSG,
 		    "info: FW download over, size %d bytes\n", offset);
 
@@ -1964,8 +1978,13 @@
 	adapter->dev = &func->dev;
 
 	strcpy(adapter->fw_name, card->firmware);
-	adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
-	adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+	if (card->fw_dump_enh) {
+		adapter->mem_type_mapping_tbl = generic_mem_type_map;
+		adapter->num_mem_types = 1;
+	} else {
+		adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+		adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+	}
 
 	return 0;
 }
@@ -2107,26 +2126,46 @@
 		    port, card->mp_data_port_mask);
 }
 
+static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+{
+	struct sdio_func *func = card->func;
+	const struct sdio_device_id *device_id = card->device_id;
+
+	/* TODO mmc_hw_reset does not require destroying and re-probing the
+	 * whole adapter. Hence there was no need to for this rube-goldberg
+	 * design to reload the fw from an external workqueue. If we don't
+	 * destroy the adapter we could reload the fw from
+	 * mwifiex_main_work_queue directly.
+	 * The real difficulty with fw reset is to restore all the user
+	 * settings applied through ioctl. By destroying and recreating the
+	 * adapter, we take the easy way out, since we rely on user space to
+	 * restore them. We assume that user space will treat the new
+	 * incarnation of the adapter(interfaces) as if they had been just
+	 * discovered and initializes them from scratch.
+	 */
+
+	mwifiex_sdio_remove(func);
+
+	/* power cycle the adapter */
+	sdio_claim_host(func);
+	mmc_hw_reset(func->card->host);
+	sdio_release_host(func);
+
+	mwifiex_sdio_probe(func, device_id);
+}
+
 static struct mwifiex_adapter *save_adapter;
 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
 	struct sdio_mmc_card *card = adapter->card;
-	struct mmc_host *target = card->func->card->host;
 
-	/* The actual reset operation must be run outside of driver thread.
-	 * This is because mmc_remove_host() will cause the device to be
-	 * instantly destroyed, and the driver then needs to end its thread,
-	 * leading to a deadlock.
-	 *
-	 * We run it in a totally independent workqueue.
+	/* TODO card pointer is unprotected. If the adapter is removed
+	 * physically, sdio core might trigger mwifiex_sdio_remove, before this
+	 * workqueue is run, which will destroy the adapter struct. When this
+	 * workqueue eventually exceutes it will dereference an invalid adapter
+	 * pointer
 	 */
-
-	mwifiex_dbg(adapter, WARN, "Resetting card...\n");
-	mmc_remove_host(target);
-	/* 200ms delay is based on experiment with sdhci controller */
-	mdelay(200);
-	target->rescan_entered = 0; /* rescan non-removable cards */
-	mmc_add_host(target);
+	mwifiex_recreate_adapter(card);
 }
 
 /* This function read/write firmware */
@@ -2138,8 +2177,8 @@
 	int ret, tries;
 	u8 ctrl_data = 0;
 
-	sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
-		    &ret);
+	sdio_writeb(card->func, card->reg->fw_dump_host_ready,
+		    card->reg->fw_dump_ctrl, &ret);
 	if (ret) {
 		mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
 		return RDWR_STATUS_FAILURE;
@@ -2155,10 +2194,10 @@
 			break;
 		if (doneflag && ctrl_data == doneflag)
 			return RDWR_STATUS_DONE;
-		if (ctrl_data != FW_DUMP_HOST_READY) {
+		if (ctrl_data != card->reg->fw_dump_host_ready) {
 			mwifiex_dbg(adapter, WARN,
-				    "The ctrl reg was changed, re-try again!\n");
-			sdio_writeb(card->func, FW_DUMP_HOST_READY,
+				    "The ctrl reg was changed, re-try again\n");
+			sdio_writeb(card->func, card->reg->fw_dump_host_ready,
 				    card->reg->fw_dump_ctrl, &ret);
 			if (ret) {
 				mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
@@ -2167,7 +2206,7 @@
 		}
 		usleep_range(100, 200);
 	}
-	if (ctrl_data == FW_DUMP_HOST_READY) {
+	if (ctrl_data == card->reg->fw_dump_host_ready) {
 		mwifiex_dbg(adapter, ERROR,
 			    "Fail to pull ctrl_data\n");
 		return RDWR_STATUS_FAILURE;
@@ -2300,10 +2339,129 @@
 	sdio_release_host(card->func);
 }
 
+static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter)
+{
+	struct sdio_mmc_card *card = adapter->card;
+	struct memory_type_mapping *entry = &generic_mem_type_map[0];
+	unsigned int reg, reg_start, reg_end;
+	u8 start_flag = 0, done_flag = 0;
+	u8 *dbg_ptr, *end_ptr;
+	enum rdwr_status stat;
+	int ret = -1, tries;
+
+	if (!card->fw_dump_enh)
+		return;
+
+	if (entry->mem_ptr) {
+		vfree(entry->mem_ptr);
+		entry->mem_ptr = NULL;
+	}
+	entry->mem_size = 0;
+
+	mwifiex_pm_wakeup_card(adapter);
+	sdio_claim_host(card->func);
+
+	mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
+
+	stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+	if (stat == RDWR_STATUS_FAILURE)
+		goto done;
+
+	reg_start = card->reg->fw_dump_start;
+	reg_end = card->reg->fw_dump_end;
+	for (reg = reg_start; reg <= reg_end; reg++) {
+		for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+			start_flag = sdio_readb(card->func, reg, &ret);
+			if (ret) {
+				mwifiex_dbg(adapter, ERROR,
+					    "SDIO read err\n");
+				goto done;
+			}
+			if (start_flag == 0)
+				break;
+			if (tries == MAX_POLL_TRIES) {
+				mwifiex_dbg(adapter, ERROR,
+					    "FW not ready to dump\n");
+				ret = -1;
+				goto done;
+			}
+		}
+		usleep_range(100, 200);
+	}
+
+	entry->mem_ptr = vmalloc(0xf0000 + 1);
+	if (!entry->mem_ptr) {
+		ret = -1;
+		goto done;
+	}
+	dbg_ptr = entry->mem_ptr;
+	entry->mem_size = 0xf0000;
+	end_ptr = dbg_ptr + entry->mem_size;
+
+	done_flag = entry->done_flag;
+	mwifiex_dbg(adapter, DUMP,
+		    "Start %s output, please wait...\n", entry->mem_name);
+
+	while (true) {
+		stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+		if (stat == RDWR_STATUS_FAILURE)
+			goto done;
+		for (reg = reg_start; reg <= reg_end; reg++) {
+			*dbg_ptr = sdio_readb(card->func, reg, &ret);
+			if (ret) {
+				mwifiex_dbg(adapter, ERROR,
+					    "SDIO read err\n");
+				goto done;
+			}
+			dbg_ptr++;
+			if (dbg_ptr >= end_ptr) {
+				u8 *tmp_ptr;
+
+				tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1);
+				if (!tmp_ptr)
+					goto done;
+
+				memcpy(tmp_ptr, entry->mem_ptr,
+				       entry->mem_size);
+				vfree(entry->mem_ptr);
+				entry->mem_ptr = tmp_ptr;
+				tmp_ptr = NULL;
+				dbg_ptr = entry->mem_ptr + entry->mem_size;
+				entry->mem_size += 0x4000;
+				end_ptr = entry->mem_ptr + entry->mem_size;
+			}
+		}
+		if (stat == RDWR_STATUS_DONE) {
+			entry->mem_size = dbg_ptr - entry->mem_ptr;
+			mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n",
+				    entry->mem_name, entry->mem_size);
+			ret = 0;
+			break;
+		}
+	}
+	mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
+
+done:
+	if (ret) {
+		mwifiex_dbg(adapter, ERROR, "firmware dump failed\n");
+		if (entry->mem_ptr) {
+			vfree(entry->mem_ptr);
+			entry->mem_ptr = NULL;
+		}
+		entry->mem_size = 0;
+	}
+	sdio_release_host(card->func);
+}
+
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
+	struct sdio_mmc_card *card = adapter->card;
+
 	mwifiex_drv_info_dump(adapter);
-	mwifiex_sdio_fw_dump(adapter);
+	if (card->fw_dump_enh)
+		mwifiex_sdio_generic_fw_dump(adapter);
+	else
+		mwifiex_sdio_fw_dump(adapter);
 	mwifiex_upload_device_dump(adapter);
 }
 
@@ -2510,3 +2668,4 @@
 MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6f645cf..b9fbc5cf 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -35,6 +35,7 @@
 #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
 #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
 #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
 
 #define BLOCK_MODE	1
 #define BYTE_MODE	0
@@ -222,6 +223,7 @@
 	u8 cmd_cfg_1;
 	u8 cmd_cfg_2;
 	u8 cmd_cfg_3;
+	u8 fw_dump_host_ready;
 	u8 fw_dump_ctrl;
 	u8 fw_dump_start;
 	u8 fw_dump_end;
@@ -257,11 +259,15 @@
 	bool supports_sdio_new_mode;
 	bool has_control_mask;
 	bool can_dump_fw;
+	bool fw_dump_enh;
 	bool can_auto_tdls;
 	bool can_ext_scan;
 
 	struct mwifiex_sdio_mpa_tx mpa_tx;
 	struct mwifiex_sdio_mpa_rx mpa_rx;
+
+	/* needed for card reset */
+	const struct sdio_device_id *device_id;
 };
 
 struct mwifiex_sdio_device {
@@ -275,6 +281,7 @@
 	bool supports_sdio_new_mode;
 	bool has_control_mask;
 	bool can_dump_fw;
+	bool fw_dump_enh;
 	bool can_auto_tdls;
 	bool can_ext_scan;
 };
@@ -350,6 +357,7 @@
 	.cmd_cfg_1 = 0xb9,
 	.cmd_cfg_2 = 0xba,
 	.cmd_cfg_3 = 0xbb,
+	.fw_dump_host_ready = 0xee,
 	.fw_dump_ctrl = 0xe2,
 	.fw_dump_start = 0xe3,
 	.fw_dump_end = 0xea,
@@ -361,6 +369,59 @@
 				 0x59, 0x5c, 0x5d},
 };
 
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+	.start_rd_port = 0,
+	.start_wr_port = 0,
+	.base_0_reg = 0xF8,
+	.base_1_reg = 0xF9,
+	.poll_reg = 0x5C,
+	.host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+			CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+	.host_int_rsr_reg = 0x4,
+	.host_int_status_reg = 0x0C,
+	.host_int_mask_reg = 0x08,
+	.status_reg_0 = 0xE8,
+	.status_reg_1 = 0xE9,
+	.sdio_int_mask = 0xff,
+	.data_port_mask = 0xffffffff,
+	.io_port_0_reg = 0xE4,
+	.io_port_1_reg = 0xE5,
+	.io_port_2_reg = 0xE6,
+	.max_mp_regs = 196,
+	.rd_bitmap_l = 0x10,
+	.rd_bitmap_u = 0x11,
+	.rd_bitmap_1l = 0x12,
+	.rd_bitmap_1u = 0x13,
+	.wr_bitmap_l = 0x14,
+	.wr_bitmap_u = 0x15,
+	.wr_bitmap_1l = 0x16,
+	.wr_bitmap_1u = 0x17,
+	.rd_len_p0_l = 0x18,
+	.rd_len_p0_u = 0x19,
+	.card_misc_cfg_reg = 0xd8,
+	.card_cfg_2_1_reg = 0xd9,
+	.cmd_rd_len_0 = 0xc0,
+	.cmd_rd_len_1 = 0xc1,
+	.cmd_rd_len_2 = 0xc2,
+	.cmd_rd_len_3 = 0xc3,
+	.cmd_cfg_0 = 0xc4,
+	.cmd_cfg_1 = 0xc5,
+	.cmd_cfg_2 = 0xc6,
+	.cmd_cfg_3 = 0xc7,
+	.fw_dump_host_ready = 0xcc,
+	.fw_dump_ctrl = 0xf0,
+	.fw_dump_start = 0xf1,
+	.fw_dump_end = 0xf8,
+	.func1_dump_reg_start = 0x10,
+	.func1_dump_reg_end = 0x17,
+	.func1_scratch_reg = 0xe8,
+	.func1_spec_reg_num = 13,
+	.func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+				 0x60, 0x61, 0x62, 0x64,
+				 0x65, 0x66, 0x68, 0x69,
+				 0x6a},
+};
+
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
 	.start_rd_port = 0,
 	.start_wr_port = 0,
@@ -469,6 +530,22 @@
 	.can_ext_scan = true,
 };
 
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+	.firmware = SD8997_DEFAULT_FW_NAME,
+	.reg = &mwifiex_reg_sd8997,
+	.max_ports = 32,
+	.mp_agg_pkt_limit = 16,
+	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+	.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+	.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+	.supports_sdio_new_mode = true,
+	.has_control_mask = false,
+	.can_dump_fw = true,
+	.fw_dump_enh = true,
+	.can_auto_tdls = false,
+	.can_ext_scan = true,
+};
+
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
 	.firmware = SD8887_DEFAULT_FW_NAME,
 	.reg = &mwifiex_reg_sd8887,
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 037adcd..a49a80d 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -26,6 +26,10 @@
 #include "11n.h"
 #include "11ac.h"
 
+static bool drcs;
+module_param(drcs, bool, 0644);
+MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0");
+
 static bool disable_auto_ds;
 module_param(disable_auto_ds, bool, 0);
 MODULE_PARM_DESC(disable_auto_ds,
@@ -1512,6 +1516,22 @@
 }
 
 static int
+mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
+			  struct host_cmd_ds_command *cmd,
+			  u16 cmd_action, void *data_buf)
+{
+	struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy;
+	const u16 *drcs_info = data_buf;
+
+	mc_pol->action = cpu_to_le16(cmd_action);
+	mc_pol->policy = cpu_to_le16(*drcs_info);
+	cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY);
+	cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) +
+				S_DS_GEN);
+	return 0;
+}
+
+static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
 			 struct host_cmd_ds_command *cmd,
 			 u16 cmd_action, void *data_buf)
@@ -1576,6 +1596,50 @@
 }
 
 static int
+mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
+			struct host_cmd_ds_command *cmd,
+			u16 cmd_action, void *data_buf)
+{
+	struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config;
+	struct mwifiex_tdls_init_cs_params *config;
+	struct mwifiex_tdls_config *init_config;
+	u16 len;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
+	cmd->size = cpu_to_le16(S_DS_GEN);
+	tdls_config->tdls_action = cpu_to_le16(cmd_action);
+	le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+
+	switch (cmd_action) {
+	case ACT_TDLS_CS_ENABLE_CONFIG:
+		init_config = data_buf;
+		len = sizeof(*init_config);
+		memcpy(tdls_config->tdls_data, init_config, len);
+		break;
+	case ACT_TDLS_CS_INIT:
+		config = data_buf;
+		len = sizeof(*config);
+		memcpy(tdls_config->tdls_data, config, len);
+		break;
+	case ACT_TDLS_CS_STOP:
+		len = sizeof(struct mwifiex_tdls_stop_cs_params);
+		memcpy(tdls_config->tdls_data, data_buf, len);
+		break;
+	case ACT_TDLS_CS_PARAMS:
+		len = sizeof(struct mwifiex_tdls_config_cs_params);
+		memcpy(tdls_config->tdls_data, data_buf, len);
+		break;
+	default:
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Unknown TDLS configuration\n");
+		return -ENOTSUPP;
+	}
+
+	le16_add_cpu(&cmd->size, len);
+	return 0;
+}
+
+static int
 mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
 		      struct host_cmd_ds_command *cmd,
 		      void *data_buf)
@@ -1933,10 +1997,12 @@
 		if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
 			cmd_ptr->params.bss_mode.con_type =
 				CONNECTION_TYPE_ADHOC;
-		else if (priv->bss_mode == NL80211_IFTYPE_STATION)
+		else if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+			 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT)
 			cmd_ptr->params.bss_mode.con_type =
 				CONNECTION_TYPE_INFRA;
-		else if (priv->bss_mode == NL80211_IFTYPE_AP)
+		else if (priv->bss_mode == NL80211_IFTYPE_AP ||
+			 priv->bss_mode == NL80211_IFTYPE_P2P_GO)
 			cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
 		cmd_ptr->size = cpu_to_le16(sizeof(struct
 				host_cmd_ds_set_bss_mode) + S_DS_GEN);
@@ -1958,6 +2024,10 @@
 	case HostCmd_CMD_TDLS_OPER:
 		ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
 		break;
+	case HostCmd_CMD_TDLS_CONFIG:
+		ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action,
+					      data_buf);
+		break;
 	case HostCmd_CMD_CHAN_REPORT_REQUEST:
 		ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
 							    data_buf);
@@ -1966,6 +2036,10 @@
 		ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
 						   data_buf);
 		break;
+	case HostCmd_CMD_MC_POLICY:
+		ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+						data_buf);
+		break;
 	default:
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2082,6 +2156,18 @@
 			if (ret)
 				return -1;
 		}
+
+		if (drcs) {
+			adapter->drcs_enabled = true;
+			if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+				ret = mwifiex_send_cmd(priv,
+						       HostCmd_CMD_MC_POLICY,
+						       HostCmd_ACT_GEN_SET, 0,
+						       &adapter->drcs_enabled,
+						       true);
+			if (ret)
+				return -1;
+		}
 	}
 
 	/* get tx rate */
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index b645884..87b69d8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -599,6 +599,7 @@
 				    "info: key: GTK is set\n");
 			priv->wpa_is_gtk_set = true;
 			priv->scan_block = false;
+			priv->port_open = true;
 		}
 	}
 
@@ -629,6 +630,7 @@
 			mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
 			priv->wpa_is_gtk_set = true;
 			priv->scan_block = false;
+			priv->port_open = true;
 		}
 	}
 
@@ -893,7 +895,7 @@
 	case ACT_TDLS_DELETE:
 		if (reason) {
 			if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
-				mwifiex_dbg(priv->adapter, ERROR,
+				mwifiex_dbg(priv->adapter, MSG,
 					    "TDLS link delete for %pM failed: reason %d\n",
 					    cmd_tdls_oper->peer_mac, reason);
 			else
@@ -1191,12 +1193,15 @@
 		break;
 	case HostCmd_CMD_TDLS_OPER:
 		ret = mwifiex_ret_tdls_oper(priv, resp);
+	case HostCmd_CMD_MC_POLICY:
 		break;
 	case HostCmd_CMD_CHAN_REPORT_REQUEST:
 		break;
 	case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
 		ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
 		break;
+	case HostCmd_CMD_TDLS_CONFIG:
+		break;
 	default:
 		mwifiex_dbg(adapter, ERROR,
 			    "CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 848de26..3d18c58 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,7 @@
 	priv->media_connected = false;
 
 	priv->scan_block = false;
+	priv->port_open = false;
 
 	if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
 	    ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
@@ -153,6 +154,7 @@
 	struct mwifiex_sta_node *sta_ptr;
 	struct mwifiex_tdls_generic_event *tdls_evt =
 			(void *)event_skb->data + sizeof(adapter->event_cause);
+	u8 *mac = tdls_evt->peer_mac;
 
 	/* reserved 2 bytes are not mandatory in tdls event */
 	if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
@@ -175,6 +177,59 @@
 					   le16_to_cpu(tdls_evt->u.reason_code),
 					   GFP_KERNEL);
 		break;
+	case TDLS_EVENT_CHAN_SWITCH_RESULT:
+		mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n");
+		mwifiex_dbg(adapter, EVENT,
+			    "status=0x%x, reason=0x%x cur_chan=%d\n",
+			    tdls_evt->u.switch_result.status,
+			    tdls_evt->u.switch_result.reason,
+			    tdls_evt->u.switch_result.cur_chan);
+
+		/* tdls channel switch failed */
+		if (tdls_evt->u.switch_result.status != 0) {
+			switch (tdls_evt->u.switch_result.cur_chan) {
+			case TDLS_BASE_CHANNEL:
+				sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+				break;
+			case TDLS_OFF_CHANNEL:
+				sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+				break;
+			default:
+				break;
+			}
+			return ret;
+		}
+
+		/* tdls channel switch success */
+		switch (tdls_evt->u.switch_result.cur_chan) {
+		case TDLS_BASE_CHANNEL:
+			if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN)
+				break;
+			mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+								  false);
+			sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+			break;
+		case TDLS_OFF_CHANNEL:
+			if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)
+				break;
+			mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+								  true);
+			sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+			break;
+		default:
+			break;
+		}
+
+		break;
+	case TDLS_EVENT_START_CHAN_SWITCH:
+		mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n");
+		sta_ptr->tdls_status = TDLS_CHAN_SWITCHING;
+		break;
+	case TDLS_EVENT_CHAN_SWITCH_STOPPED:
+		mwifiex_dbg(adapter, EVENT,
+			    "tdls chan switch stopped, reason=%d\n",
+			    tdls_evt->u.cs_stop_reason);
+		break;
 	default:
 		break;
 	}
@@ -182,6 +237,145 @@
 	return ret;
 }
 
+static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
+					 struct mwifiex_ie_types_header *tlv)
+{
+	struct mwifiex_tx_pause_tlv *tp;
+	struct mwifiex_sta_node *sta_ptr;
+	unsigned long flags;
+
+	tp = (void *)tlv;
+	mwifiex_dbg(priv->adapter, EVENT,
+		    "uap tx_pause: %pM pause=%d, pkts=%d\n",
+		    tp->peermac, tp->tx_pause,
+		    tp->pkt_cnt);
+
+	if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) {
+		if (tp->tx_pause)
+			priv->port_open = false;
+		else
+			priv->port_open = true;
+	} else if (is_multicast_ether_addr(tp->peermac)) {
+		mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
+	} else {
+		spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+		sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+		if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+			sta_ptr->tx_pause = tp->tx_pause;
+			mwifiex_update_ralist_tx_pause(priv, tp->peermac,
+						       tp->tx_pause);
+		}
+	}
+}
+
+static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
+					 struct mwifiex_ie_types_header *tlv)
+{
+	struct mwifiex_tx_pause_tlv *tp;
+	struct mwifiex_sta_node *sta_ptr;
+	int status;
+	unsigned long flags;
+
+	tp = (void *)tlv;
+	mwifiex_dbg(priv->adapter, EVENT,
+		    "sta tx_pause: %pM pause=%d, pkts=%d\n",
+		    tp->peermac, tp->tx_pause,
+		    tp->pkt_cnt);
+
+	if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) {
+		if (tp->tx_pause)
+			priv->port_open = false;
+		else
+			priv->port_open = true;
+	} else {
+		if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+			return;
+
+		status = mwifiex_get_tdls_link_status(priv, tp->peermac);
+		if (mwifiex_is_tdls_link_setup(status)) {
+			spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+			sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+			if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+				sta_ptr->tx_pause = tp->tx_pause;
+				mwifiex_update_ralist_tx_pause(priv,
+							       tp->peermac,
+							       tp->tx_pause);
+			}
+		}
+	}
+}
+
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+				      struct sk_buff *event_skb)
+{
+	struct mwifiex_ie_types_multi_chan_info *chan_info;
+	u16 status;
+
+	chan_info = (void *)event_skb->data + sizeof(u32);
+
+	if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "unknown TLV in chan_info event\n");
+		return;
+	}
+
+	status = le16_to_cpu(chan_info->status);
+
+	if (status) {
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "multi-channel operation started\n");
+	} else {
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "multi-channel operation over\n");
+	}
+}
+
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+				    struct sk_buff *event_skb)
+{
+	struct mwifiex_ie_types_header *tlv;
+	u16 tlv_type, tlv_len;
+	int tlv_buf_left;
+
+	if (!priv->media_connected) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tx_pause event while disconnected; bss_role=%d\n",
+			    priv->bss_role);
+		return;
+	}
+
+	tlv_buf_left = event_skb->len - sizeof(u32);
+	tlv = (void *)event_skb->data + sizeof(u32);
+
+	while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+		tlv_type = le16_to_cpu(tlv->type);
+		tlv_len  = le16_to_cpu(tlv->len);
+		if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+		    tlv_buf_left) {
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n",
+				    tlv_len, tlv_buf_left);
+			break;
+		}
+		if (tlv_type == TLV_TYPE_TX_PAUSE) {
+			if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+				mwifiex_process_sta_tx_pause(priv, tlv);
+			else
+				mwifiex_process_uap_tx_pause(priv, tlv);
+		}
+
+		tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+				tlv_len;
+		tlv = (void *)((u8 *)tlv + tlv_len +
+			       sizeof(struct mwifiex_ie_types_header));
+	}
+
+}
+
 /*
 * This function handles coex events generated by firmware
 */
@@ -359,7 +553,7 @@
 
 	case EVENT_PS_AWAKE:
 		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
-		if (!adapter->pps_uapsd_mode &&
+		if (!adapter->pps_uapsd_mode && priv->port_open &&
 		    priv->media_connected && adapter->sleep_period.period) {
 				adapter->pps_uapsd_mode = true;
 				mwifiex_dbg(adapter, EVENT,
@@ -438,6 +632,7 @@
 
 	case EVENT_PORT_RELEASE:
 		mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
+		priv->port_open = true;
 		break;
 
 	case EVENT_EXT_SCAN_REPORT:
@@ -573,6 +768,16 @@
 		ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
 		break;
 
+	case EVENT_TX_DATA_PAUSE:
+		mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+		mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+		break;
+
+	case EVENT_MULTI_CHAN_INFO:
+		mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+		mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+		break;
+
 	case EVENT_TX_STATUS_REPORT:
 		mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
 		mwifiex_parse_tx_status_event(priv, adapter->event_body);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d8b7d9c..a6c8a4f 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,8 +66,8 @@
 	if (status <= 0) {
 		if (status == 0)
 			status = -ETIMEDOUT;
-		mwifiex_dbg(adapter, ERROR,
-			    "cmd_wait_q terminated: %d\n", status);
+		mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n",
+			    status);
 		mwifiex_cancel_all_pending_cmd(adapter);
 		return status;
 	}
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 2faa1bc..b3e163d 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -49,7 +49,7 @@
 		tid = skb->priority;
 		tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
 
-		if (status == TDLS_SETUP_COMPLETE) {
+		if (mwifiex_is_tdls_link_setup(status)) {
 			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
 			ra_list->tdls_link = true;
 			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -164,7 +164,7 @@
 	pos = (void *)skb_put(skb, 4);
 	*pos++ = WLAN_EID_AID;
 	*pos++ = 2;
-	*pos++ = le16_to_cpu(assoc_rsp->a_id);
+	memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
 
 	return;
 }
@@ -355,6 +355,7 @@
 	extcap->ieee_hdr.len = 8;
 	memset(extcap->ext_capab, 0, 8);
 	extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+	extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH;
 
 	if (priv->adapter->is_hw_11ac_capable)
 		extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
@@ -1071,6 +1072,11 @@
 			for (i = 0; i < MAX_NUM_TID; i++)
 				sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
 		}
+		if (sta_ptr->tdls_cap.extcap.ext_capab[3] &
+		    WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) {
+			mwifiex_config_tdls_enable(priv);
+			mwifiex_config_tdls_cs_params(priv);
+		}
 
 		memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
 		mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
@@ -1141,7 +1147,7 @@
 
 	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
 	list_for_each_entry(sta_ptr, &priv->sta_list, list) {
-		if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+		if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
 			ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
 			peer++;
 			count++;
@@ -1295,7 +1301,7 @@
 			if ((link_status == TDLS_NOT_SETUP) &&
 			    (peer->tdls_status == TDLS_SETUP_INPROGRESS))
 				peer->failure_count++;
-			else if (link_status == TDLS_SETUP_COMPLETE)
+			else if (mwifiex_is_tdls_link_setup(link_status))
 				peer->failure_count = 0;
 
 			peer->tdls_status = link_status;
@@ -1367,7 +1373,7 @@
 
 		if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
 		     !tdls_peer->rssi) &&
-		    tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+		    mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) {
 			tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
 			mwifiex_dbg(priv->adapter, MSG,
 				    "teardown TDLS link,peer=%pM rssi=%d\n",
@@ -1416,3 +1422,67 @@
 		mwifiex_flush_auto_tdls_list(priv);
 	}
 }
+
+static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable)
+{
+	struct mwifiex_tdls_config config;
+
+	config.enable = cpu_to_le16(enable);
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true);
+}
+
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv)
+{
+	return mwifiex_config_tdls(priv, true);
+}
+
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv)
+{
+	return mwifiex_config_tdls(priv, false);
+}
+
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv)
+{
+	struct mwifiex_tdls_config_cs_params config_tdls_cs_params;
+
+	config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME;
+	config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK;
+	config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK;
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_PARAMS, 0,
+				&config_tdls_cs_params, true);
+}
+
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac)
+{
+	struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params;
+
+	ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac);
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_STOP, 0,
+				&stop_tdls_cs_params, true);
+}
+
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+			  u8 primary_chan, u8 second_chan_offset, u8 band)
+{
+	struct mwifiex_tdls_init_cs_params start_tdls_cs_params;
+
+	ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac);
+	start_tdls_cs_params.primary_chan = primary_chan;
+	start_tdls_cs_params.second_chan_offset = second_chan_offset;
+	start_tdls_cs_params.band = band;
+
+	start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME);
+	start_tdls_cs_params.switch_timeout =
+					cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT);
+	start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS;
+	start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY;
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_INIT, 0,
+				&start_tdls_cs_params, true);
+}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 5ed9b79..8b1e5b5 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -370,8 +370,28 @@
 			/* consumes ack_skb */
 			skb_complete_wifi_ack(ack_skb, !tx_status->status);
 		} else {
+			/* Remove broadcast address which was added by driver */
+			memmove(ack_skb->data +
+				sizeof(struct ieee80211_hdr_3addr) +
+				MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16),
+				ack_skb->data +
+				sizeof(struct ieee80211_hdr_3addr) +
+				MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+				ETH_ALEN, ack_skb->len -
+				(sizeof(struct ieee80211_hdr_3addr) +
+				MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+				ETH_ALEN));
+			ack_skb->len = ack_skb->len - ETH_ALEN;
+			/* Remove driver's proprietary header including 2 bytes
+			 * of packet length and pass actual management frame buffer
+			 * to cfg80211.
+			 */
 			cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
-						ack_skb->data, ack_skb->len,
+						ack_skb->data +
+						MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+						sizeof(u16), ack_skb->len -
+						(MWIFIEX_MGMT_FRAME_HEADER_SIZE
+						 + sizeof(u16)),
 						!tx_status->status, GFP_ATOMIC);
 			dev_kfree_skb_any(ack_skb);
 		}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index b749300..4d5a6e3 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -808,7 +808,7 @@
 			     struct mwifiex_uap_bss_param *bss_cfg,
 			     struct cfg80211_chan_def chandef)
 {
-	u8 config_bands = 0;
+	u8 config_bands = 0, old_bands = priv->adapter->config_bands;
 
 	priv->bss_chandef = chandef;
 
@@ -834,6 +834,11 @@
 	}
 
 	priv->adapter->config_bands = config_bands;
+
+	if (old_bands != config_bands) {
+		mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
+		mwifiex_dnld_txpwr_table(priv);
+	}
 }
 
 int mwifiex_config_start_uap(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7bc1f85..46c972a 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -41,6 +41,8 @@
 	mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
 			 event->data, event->len);
 
+	skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+
 	while ((evt_len >= sizeof(tlv_hdr->header))) {
 		tlv_hdr = (struct mwifiex_ie_types_data *)curr;
 		tlv_len = le16_to_cpu(tlv_hdr->header.len);
@@ -176,6 +178,7 @@
 		break;
 	case EVENT_UAP_BSS_IDLE:
 		priv->media_connected = false;
+		priv->port_open = false;
 		if (netif_carrier_ok(priv->netdev))
 			netif_carrier_off(priv->netdev);
 		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -185,6 +188,7 @@
 		break;
 	case EVENT_UAP_BSS_ACTIVE:
 		priv->media_connected = true;
+		priv->port_open = true;
 		if (!netif_carrier_ok(priv->netdev))
 			netif_carrier_on(priv->netdev);
 		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
@@ -192,6 +196,7 @@
 	case EVENT_UAP_BSS_START:
 		mwifiex_dbg(adapter, EVENT,
 			    "AP EVENT: event id: %#x\n", eventcause);
+		priv->port_open = false;
 		memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
 		       ETH_ALEN);
 		if (priv->hist_data)
@@ -297,6 +302,16 @@
 		mwifiex_bt_coex_wlan_param_update_event(priv,
 							adapter->event_skb);
 		break;
+	case EVENT_TX_DATA_PAUSE:
+		mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+		mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+		break;
+
+	case EVENT_MULTI_CHAN_INFO:
+		mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+		mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+		break;
+
 	default:
 		mwifiex_dbg(adapter, EVENT,
 			    "event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index aada934..5e789b2 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -47,6 +47,11 @@
 	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
 				       USB_CLASS_VENDOR_SPEC,
 				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+	/* 8997 */
+	{USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
+	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
+				       USB_CLASS_VENDOR_SPEC,
+				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
 	{ }	/* Terminating entry */
 };
 
@@ -244,9 +249,11 @@
 	if (card->rx_cmd_ep == context->ep) {
 		mwifiex_usb_submit_rx_urb(context, size);
 	} else {
-		context->skb = NULL;
-		if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+		if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
 			mwifiex_usb_submit_rx_urb(context, size);
+		}else{
+			context->skb = NULL;
+		}
 	}
 
 	return;
@@ -380,12 +387,14 @@
 	case USB8797_PID_1:
 	case USB8801_PID_1:
 	case USB8897_PID_1:
+	case USB8997_PID_1:
 		card->usb_boot_state = USB8XXX_FW_DNLD;
 		break;
 	case USB8766_PID_2:
 	case USB8797_PID_2:
 	case USB8801_PID_2:
 	case USB8897_PID_2:
+	case USB8997_PID_2:
 		card->usb_boot_state = USB8XXX_FW_READY;
 		break;
 	default:
@@ -812,6 +821,12 @@
 	adapter->dev = &card->udev->dev;
 
 	switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+	case USB8997_PID_1:
+	case USB8997_PID_2:
+		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+		strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
+		adapter->ext_scan = true;
+		break;
 	case USB8897_PID_1:
 	case USB8897_PID_2:
 		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
@@ -868,8 +883,10 @@
 
 	/* Allocate memory for transmit */
 	fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
-	if (!fwdata)
+	if (!fwdata) {
+		ret = -ENOMEM;
 		goto fw_exit;
+	}
 
 	/* Allocate memory for receive */
 	recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
@@ -1119,3 +1136,4 @@
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 57e1a57..f0051f8 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -32,6 +32,8 @@
 #define USB8897_PID_2		0x2046
 #define USB8801_PID_1		0x2049
 #define USB8801_PID_2		0x204a
+#define USB8997_PID_1		0x204d
+#define USB8997_PID_2		0x204e
 
 
 #define USB8XXX_FW_DNLD		1
@@ -46,6 +48,7 @@
 #define USB8797_DEFAULT_FW_NAME	"mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME	"mrvl/usb8801_uapsta.bin"
 #define USB8897_DEFAULT_FW_NAME	"mrvl/usb8897_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME	"mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE	620
 #define FW_DNLD_RX_BUF_SIZE	2048
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 790e619..0cec8a6 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -126,6 +126,10 @@
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
 {
 
+	if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
+		if (adapter->if_ops.init_fw_port)
+			adapter->if_ops.init_fw_port(adapter);
+
 	adapter->init_wait_q_woken = true;
 	wake_up_interruptible(&adapter->init_wait_q);
 	return 0;
@@ -496,16 +500,12 @@
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
 			 struct cmd_ctrl_node *cmd_node)
 {
-	mwifiex_dbg(adapter, CMD,
-		    "cmd completed: status=%d\n",
+	WARN_ON(!cmd_node->wait_q_enabled);
+	mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n",
 		    adapter->cmd_wait_q.status);
 
-	*(cmd_node->condition) = true;
-
-	if (adapter->cmd_wait_q.status == -ETIMEDOUT)
-		mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
-	else
-		wake_up_interruptible(&adapter->cmd_wait_q.wait);
+	*cmd_node->condition = true;
+	wake_up_interruptible(&adapter->cmd_wait_q.wait);
 
 	return 0;
 }
@@ -531,6 +531,65 @@
 	return NULL;
 }
 
+static struct mwifiex_sta_node *
+mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status)
+{
+	struct mwifiex_sta_node *node;
+
+	list_for_each_entry(node, &priv->sta_list, list) {
+		if (node->tdls_status == status)
+			return node;
+	}
+
+	return NULL;
+}
+
+/* If tdls channel switching is on-going, tx data traffic should be
+ * blocked until the switching stage completed.
+ */
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
+{
+	struct mwifiex_sta_node *sta_ptr;
+
+	if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+		return false;
+
+	sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING);
+	if (sta_ptr)
+		return true;
+
+	return false;
+}
+
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+{
+	struct mwifiex_sta_node *sta_ptr;
+
+	if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+		return false;
+
+	sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN);
+	if (sta_ptr)
+		return true;
+
+	return false;
+}
+
+/* If tdls channel switching is on-going or tdls operate on off-channel,
+ * cmd path should be blocked until tdls switched to base-channel.
+ */
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv)
+{
+	if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+		return true;
+
+	if (mwifiex_is_tdls_chan_switching(priv) ||
+	    mwifiex_is_tdls_off_chan(priv))
+		return false;
+
+	return true;
+}
+
 /* This function will add a sta_node entry to associated station list
  * table with the given mac address.
  * If entry exist already, existing entry is returned.
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index a8ea21c..173d366 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -160,9 +160,10 @@
 		ra_list->tdls_link = false;
 		ra_list->ba_status = BA_SETUP_NONE;
 		ra_list->amsdu_in_ampdu = false;
+		ra_list->tx_paused = false;
 		if (!mwifiex_queuing_ra_based(priv)) {
-			if (mwifiex_get_tdls_link_status(priv, ra) ==
-			    TDLS_SETUP_COMPLETE) {
+			if (mwifiex_is_tdls_link_setup
+				(mwifiex_get_tdls_link_status(priv, ra))) {
 				ra_list->tdls_link = true;
 				ra_list->is_11n_enabled =
 					mwifiex_tdls_peer_11n_enabled(priv, ra);
@@ -448,6 +449,11 @@
 	}
 }
 
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
+{
+	return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+}
+
 /*
  * This function checks if WMM Tx queue is empty.
  */
@@ -459,6 +465,8 @@
 
 	for (i = 0; i < adapter->priv_num; ++i) {
 		priv = adapter->priv[i];
+		if (priv && !priv->port_open)
+			continue;
 		if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
 			return false;
 	}
@@ -580,6 +588,10 @@
 	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 
+	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+	atomic_set(&priv->adapter->bypass_tx_pending, 0);
+
 	idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
 	idr_destroy(&priv->ack_status_frames);
 }
@@ -603,6 +615,88 @@
 	return NULL;
 }
 
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+				    u8 tx_pause)
+{
+	struct mwifiex_ra_list_tbl *ra_list;
+	u32 pkt_cnt = 0, tx_pkts_queued;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+	for (i = 0; i < MAX_NUM_TID; ++i) {
+		ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
+		if (ra_list && ra_list->tx_paused != tx_pause) {
+			pkt_cnt += ra_list->total_pkt_count;
+			ra_list->tx_paused = tx_pause;
+			if (tx_pause)
+				priv->wmm.pkts_paused[i] +=
+					ra_list->total_pkt_count;
+			else
+				priv->wmm.pkts_paused[i] -=
+					ra_list->total_pkt_count;
+		}
+	}
+
+	if (pkt_cnt) {
+		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+		if (tx_pause)
+			tx_pkts_queued -= pkt_cnt;
+		else
+			tx_pkts_queued += pkt_cnt;
+
+		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+	}
+	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/* This function update non-tdls peer ralist tx_pause while
+ * tdls channel swithing
+ */
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+					       u8 *mac, u8 tx_pause)
+{
+	struct mwifiex_ra_list_tbl *ra_list;
+	u32 pkt_cnt = 0, tx_pkts_queued;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+	for (i = 0; i < MAX_NUM_TID; ++i) {
+		list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
+				    list) {
+			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
+				continue;
+
+			if (ra_list && ra_list->tx_paused != tx_pause) {
+				pkt_cnt += ra_list->total_pkt_count;
+				ra_list->tx_paused = tx_pause;
+				if (tx_pause)
+					priv->wmm.pkts_paused[i] +=
+						ra_list->total_pkt_count;
+				else
+					priv->wmm.pkts_paused[i] -=
+						ra_list->total_pkt_count;
+			}
+		}
+	}
+
+	if (pkt_cnt) {
+		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+		if (tx_pause)
+			tx_pkts_queued -= pkt_cnt;
+		else
+			tx_pkts_queued += pkt_cnt;
+
+		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+	}
+	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
 /*
  * This function retrieves an RA list node for a given TID and
  * RA address pair.
@@ -670,6 +764,18 @@
 }
 
 /*
+ * This function adds a packet to bypass TX queue.
+ * This is special TX queue for packets which can be sent even when port_open
+ * is false.
+ */
+void
+mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+				   struct sk_buff *skb)
+{
+	skb_queue_tail(&priv->bypass_txq, skb);
+}
+
+/*
  * This function adds a packet to WMM queue.
  *
  * In disconnected state the packet is immediately dropped and the
@@ -723,6 +829,9 @@
 	    !mwifiex_is_skb_mgmt_frame(skb)) {
 		switch (tdls_status) {
 		case TDLS_SETUP_COMPLETE:
+		case TDLS_CHAN_SWITCHING:
+		case TDLS_IN_BASE_CHAN:
+		case TDLS_IN_OFF_CHAN:
 			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
 							      ra);
 			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -765,7 +874,10 @@
 		atomic_set(&priv->wmm.highest_queued_prio,
 			   priv->tos_to_tid_inv[tid_down]);
 
-	atomic_inc(&priv->wmm.tx_pkts_queued);
+	if (ra_list->tx_paused)
+		priv->wmm.pkts_paused[tid_down]++;
+	else
+		atomic_inc(&priv->wmm.tx_pkts_queued);
 
 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
 }
@@ -970,7 +1082,8 @@
 
 			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
 
-			if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
+			if (!priv_tmp->port_open ||
+			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
 				continue;
 
 			/* iterate over the WMM queues of the BSS */
@@ -987,7 +1100,8 @@
 				list_for_each_entry(ptr, &tid_ptr->ra_list,
 						    list) {
 
-					if (!skb_queue_empty(&ptr->skb_head))
+					if (!ptr->tx_paused &&
+					    !skb_queue_empty(&ptr->skb_head))
 						/* holds both locks */
 						goto found;
 				}
@@ -1339,6 +1453,38 @@
 	return 0;
 }
 
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
+{
+	struct mwifiex_tx_param tx_param;
+	struct sk_buff *skb;
+	struct mwifiex_txinfo *tx_info;
+	struct mwifiex_private *priv;
+	int i;
+
+	if (adapter->data_sent || adapter->tx_lock_flag)
+		return;
+
+	for (i = 0; i < adapter->priv_num; ++i) {
+		priv = adapter->priv[i];
+
+		if (skb_queue_empty(&priv->bypass_txq))
+			continue;
+
+		skb = skb_dequeue(&priv->bypass_txq);
+		tx_info = MWIFIEX_SKB_TXCB(skb);
+
+		/* no aggregation for bypass packets */
+		tx_param.next_pkt_len = 0;
+
+		if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
+			skb_queue_head(&priv->bypass_txq, skb);
+			tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
+		} else {
+			atomic_dec(&adapter->bypass_tx_pending);
+		}
+	}
+}
+
 /*
  * This function transmits the highest priority packet awaiting in the
  * WMM Queues.
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 48ece0b..38f0976 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,12 +99,16 @@
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
 				 struct sk_buff *skb);
+void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+					struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
 			      struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter);
 int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
 			    struct mwifiex_ra_list_tbl *ra_list, int tid);
 
@@ -126,6 +130,10 @@
 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
 			    const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+				    u8 tx_pause);
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+					       u8 *mac, u8 tx_pause);
 
 struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
 					*priv, u8 tid, const u8 *ra_addr);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 77361af..9420fc6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5019,35 +5019,36 @@
 		memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
 
 		rcu_read_unlock();
-	}
 
-	if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-	    !priv->ap_fw) {
-		rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
-		if (rc)
-			goto out;
+		if (changed & BSS_CHANGED_ASSOC) {
+			if (!priv->ap_fw) {
+				rc = mwl8k_cmd_set_rate(hw, vif,
+							ap_legacy_rates,
+							ap_mcs_rates);
+				if (rc)
+					goto out;
 
-		rc = mwl8k_cmd_use_fixed_rate_sta(hw);
-		if (rc)
-			goto out;
-	} else {
-		if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-		    priv->ap_fw) {
-			int idx;
-			int rate;
+				rc = mwl8k_cmd_use_fixed_rate_sta(hw);
+				if (rc)
+					goto out;
+			} else {
+				int idx;
+				int rate;
 
-			/* Use AP firmware specific rate command.
-			 */
-			idx = ffs(vif->bss_conf.basic_rates);
-			if (idx)
-				idx--;
+				/* Use AP firmware specific rate command.
+				 */
+				idx = ffs(vif->bss_conf.basic_rates);
+				if (idx)
+					idx--;
 
-			if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-				rate = mwl8k_rates_24[idx].hw_value;
-			else
-				rate = mwl8k_rates_50[idx].hw_value;
+				if (hw->conf.chandef.chan->band ==
+				    IEEE80211_BAND_2GHZ)
+					rate = mwl8k_rates_24[idx].hw_value;
+				else
+					rate = mwl8k_rates_50[idx].hw_value;
 
-			mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+				mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+			}
 		}
 	}
 
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index c410180..7b5c554 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2321,8 +2321,6 @@
 	struct orinoco_rx_data *rx_data, *temp;
 	struct orinoco_scan_data *sd, *sdtemp;
 
-	wiphy_unregister(wiphy);
-
 	/* If the tasklet is scheduled when we call tasklet_kill it
 	 * will run one final time. However the tasklet will only
 	 * drain priv->rx_list if the hw is still available. */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index c0a2737..a956f96 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -118,6 +118,7 @@
 
 	orinoco_cs_release(link);
 
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_orinocodev(priv);
 }				/* orinoco_cs_detach */
 
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 1b543e3..048693b 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -223,13 +223,15 @@
 	err = orinoco_if_add(priv, 0, 0, NULL);
 	if (err) {
 		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-		goto fail;
+		goto fail_wiphy;
 	}
 
 	pci_set_drvdata(pdev, priv);
 
 	return 0;
 
+ fail_wiphy:
+	wiphy_unregister(priv_to_wiphy(priv));
  fail:
 	free_irq(pdev->irq, priv);
 
@@ -263,6 +265,7 @@
 	iowrite16(0, card->bridge_io + 10);
 
 	orinoco_if_del(priv);
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_irq(pdev->irq, priv);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 74219d5..4938a22 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -173,13 +173,15 @@
 	err = orinoco_if_add(priv, 0, 0, NULL);
 	if (err) {
 		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-		goto fail;
+		goto fail_wiphy;
 	}
 
 	pci_set_drvdata(pdev, priv);
 
 	return 0;
 
+ fail_wiphy:
+	wiphy_unregister(priv_to_wiphy(priv));
  fail:
 	free_irq(pdev->irq, priv);
 
@@ -203,6 +205,7 @@
 	struct orinoco_private *priv = pci_get_drvdata(pdev);
 
 	orinoco_if_del(priv);
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_irq(pdev->irq, priv);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 8b04523..2213520 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -262,13 +262,15 @@
 	err = orinoco_if_add(priv, 0, 0, NULL);
 	if (err) {
 		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-		goto fail;
+		goto fail_wiphy;
 	}
 
 	pci_set_drvdata(pdev, priv);
 
 	return 0;
 
+ fail_wiphy:
+	wiphy_unregister(priv_to_wiphy(priv));
  fail:
 	free_irq(pdev->irq, priv);
 
@@ -299,6 +301,7 @@
 	struct orinoco_pci_card *card = priv->card;
 
 	orinoco_if_del(priv);
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_irq(pdev->irq, priv);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 91f0544..26a57d7 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1502,6 +1502,7 @@
 	if (upriv->dev) {
 		struct orinoco_private *priv = ndev_priv(upriv->dev);
 		orinoco_if_del(priv);
+		wiphy_unregister(priv_to_wiphy(upriv));
 		free_orinocodev(priv);
 	}
 }
@@ -1695,6 +1696,7 @@
 	if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
 		upriv->dev = NULL;
 		err("%s: orinoco_if_add() failed", __func__);
+		wiphy_unregister(priv_to_wiphy(priv));
 		goto error;
 	}
 	upriv->dev = priv->ndev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 1c6788ae..40d7231 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -203,8 +203,10 @@
 
 	/* Copy firmware into DMA-accessible memory */
 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
-	if (!fw)
-		return -ENOMEM;
+	if (!fw) {
+		status = -ENOMEM;
+		goto out;
+	}
 	len = fw_entry->size;
 
 	if (len % 4)
@@ -217,6 +219,8 @@
 
 	status = rsi_copy_to_card(common, fw, len, num_blocks);
 	kfree(fw);
+
+out:
 	release_firmware(fw_entry);
 	return status;
 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 30c2cf7..de49008 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -148,8 +148,10 @@
 
 	/* Copy firmware into DMA-accessible memory */
 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
-	if (!fw)
-		return -ENOMEM;
+	if (!fw) {
+		status = -ENOMEM;
+		goto out;
+	}
 	len = fw_entry->size;
 
 	if (len % 4)
@@ -162,6 +164,8 @@
 
 	status = rsi_copy_to_card(common, fw, len, num_blocks);
 	kfree(fw);
+
+out:
 	release_firmware(fw_entry);
 	return status;
 }
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2b4ef25..de62f5d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -240,7 +240,6 @@
 
 config RT2X00_LIB
 	tristate
-	select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
 	bool
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index afba073..78cc035 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -54,7 +54,7 @@
 #define CSR_REG_BASE			0x0400
 #define CSR_REG_SIZE			0x0100
 #define EEPROM_BASE			0x0000
-#define EEPROM_SIZE			0x006a
+#define EEPROM_SIZE			0x006e
 #define BBP_BASE			0x0000
 #define BBP_SIZE			0x0060
 #define RF_BASE				0x0004
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 9bb398b..3282ddb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -254,6 +254,8 @@
 	int tx_failed;
 };
 
+DECLARE_EWMA(rssi, 1024, 8)
+
 /*
  * Antenna settings about the currently active link.
  */
@@ -285,7 +287,7 @@
 	 * Similar to the avg_rssi in the link_qual structure
 	 * this value is updated by using the walking average.
 	 */
-	struct ewma rssi_ant;
+	struct ewma_rssi rssi_ant;
 };
 
 /*
@@ -314,7 +316,7 @@
 	/*
 	 * Currently active average RSSI value
 	 */
-	struct ewma avg_rssi;
+	struct ewma_rssi avg_rssi;
 
 	/*
 	 * Work structure for scheduling periodic link tuning.
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 9b941c0..017188e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -33,15 +33,11 @@
  */
 #define DEFAULT_RSSI		-128
 
-/* Constants for EWMA calculations. */
-#define RT2X00_EWMA_FACTOR	1024
-#define RT2X00_EWMA_WEIGHT	8
-
-static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma)
 {
 	unsigned long avg;
 
-	avg = ewma_read(ewma);
+	avg = ewma_rssi_read(ewma);
 	if (avg)
 		return -avg;
 
@@ -76,8 +72,7 @@
 
 static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
 {
-	ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
-		  RT2X00_EWMA_WEIGHT);
+	ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant);
 }
 
 static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -225,12 +220,12 @@
 	/*
 	 * Update global RSSI
 	 */
-	ewma_add(&link->avg_rssi, -rxdesc->rssi);
+	ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi);
 
 	/*
 	 * Update antenna RSSI
 	 */
-	ewma_add(&ant->rssi_ant, -rxdesc->rssi);
+	ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi);
 }
 
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -285,8 +280,7 @@
 	 */
 	rt2x00dev->link.count = 0;
 	memset(qual, 0, sizeof(*qual));
-	ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
-		  RT2X00_EWMA_WEIGHT);
+	ewma_rssi_init(&rt2x00dev->link.avg_rssi);
 
 	/*
 	 * Restore the VGC level as stored in the registers,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
index c8058aa..6291256 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -200,7 +200,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl92c_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -209,7 +209,7 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
 	pfwdata = rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 	RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
@@ -219,10 +219,10 @@
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x), Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
-			  (int)sizeof(struct rtl92c_firmware_header));
+			  (int)sizeof(struct rtlwifi_firmware_header));
 
-		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
index 05e944e..21bd4a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
@@ -37,7 +37,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT		3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)		\
-	((_pfwhdr->signature&0xFFFF) == 0x88E1)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1)
 #define USE_OLD_WOWLAN_DEBUG_FW			0
 
 #define H2C_88E_RSVDPAGE_LOC_LEN		5
@@ -131,25 +131,6 @@
 #define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define	FW_PWR_STATE_RF_OFF		0
 
-struct rtl92c_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodesize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8188e_h2c_cmd {
 	H2C_88E_RSVDPAGE = 0,
 	H2C_88E_JOINBSSRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 0aca6f4..03cbe4c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -39,6 +39,7 @@
 #define BT_RSSI_STATE_SPECIAL_LOW	BIT_OFFSET_LEN_MASK_32(2, 1)
 #define BT_RSSI_STATE_BG_EDCA_LOW	BIT_OFFSET_LEN_MASK_32(3, 1)
 #define BT_RSSI_STATE_TXPOWER_LOW	BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_MASK				0x00ffffff
 
 #define RTLPRIV			(struct rtl_priv *)
 #define GET_UNDECORATED_AVERAGE_RSSI(_priv)	\
@@ -312,7 +313,7 @@
 	struct dig_t *digtable = &rtlpriv->dm_digtable;
 	u32 isbt;
 
-	/* modify DIG lower bound, deal with abnorally large false alarm */
+	/* modify DIG lower bound, deal with abnormally large false alarm */
 	if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
 		digtable->large_fa_hit++;
 		if (digtable->forbidden_igi < digtable->cur_igvalue) {
@@ -1536,13 +1537,11 @@
 		return false;
 
 	bt_state = rtl_read_byte(rtlpriv, 0x4fd);
-	bt_tx = rtl_read_dword(rtlpriv, 0x488);
-	bt_tx = bt_tx & 0x00ffffff;
-	bt_pri = rtl_read_dword(rtlpriv, 0x48c);
-	bt_pri = bt_pri & 0x00ffffff;
+	bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK;
+	bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK;
 	polling = rtl_read_dword(rtlpriv, 0x490);
 
-	if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
+	if (bt_tx == BT_MASK && bt_pri == BT_MASK &&
 	    polling == 0xffffffff && bt_state == 0xff)
 		return false;
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 14b819e..43fcb25 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -221,7 +221,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl92c_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -230,19 +230,19 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
 	pfwdata = (u8 *)rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 	if (IS_FW_HEADER_EXIST(pfwheader)) {
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
-			  (int)sizeof(struct rtl92c_firmware_header));
+			  (int)sizeof(struct rtlwifi_firmware_header));
 
-		rtlhal->fw_version = pfwheader->version;
+		rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 		rtlhal->fw_subversion = pfwheader->subversion;
-		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	_rtl92c_enable_fw_download(hw, true);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index e9f4281..864806c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -69,25 +69,6 @@
 	((GET_CVID_CUT_VERSION(version) == \
 		CHIP_VENDOR_UMC_B_CUT) ? true : false) : false)
 
-struct rtl92c_firmware_header {
-	__le16 signature;
-	u8 category;
-	u8 function;
-	__le16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	__le16 ramcodeSize;
-	__le16 rsvd2;
-	__le32 svnindex;
-	__le32 rsvd3;
-	__le32 rsvd4;
-	__le32 rsvd5;
-};
-
 #define pagenum_128(_len)	(u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
 
 #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)			\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
index c940a87..74a479a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -32,24 +32,15 @@
 /*-------------------------------------------------------------------------
  *	Chip specific
  *-------------------------------------------------------------------------*/
-#define CHIP_8723			BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV		BIT(3) /* RTL8723 Driver Revised */
 #define NORMAL_CHIP			BIT(4)
 #define CHIP_VENDOR_UMC			BIT(5)
 #define CHIP_VENDOR_UMC_B_CUT		BIT(6)
 
-#define IS_8723_SERIES(version)		\
-	(((version) & CHIP_8723) ? true : false)
-
 #define IS_92C_1T2R(version)		\
 	(((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
 
 #define IS_VENDOR_UMC(version)		\
 	(((version) & CHIP_VENDOR_UMC) ? true : false)
 
-#define IS_VENDOR_8723_A_CUT(version)	\
-	(((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
-	false : true) : false)
-
 #define CHIP_BONDING_92C_1T2R	0x1
 #define CHIP_BONDING_IDENTIFIER(_value)	(((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 767358a..25db369 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -818,26 +818,29 @@
 
 static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
 {
-	u16			value16;
-
+	u16 value16;
+	u32 value32;
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-	mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
-		      RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
-		      RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
-	rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+	value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
+		   RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+		   RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32));
 	/* Accept all multicast address */
 	rtl_write_dword(rtlpriv,  REG_MAR, 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv,  REG_MAR + 4, 0xFFFFFFFF);
 	/* Accept all management frames */
 	value16 = 0xFFFF;
-	rtl92c_set_mgt_filter(hw, value16);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+				      (u8 *)(&value16));
 	/* Reject all control frame - default value is 0 */
-	rtl92c_set_ctrl_filter(hw, 0x0);
+	value16 = 0x0;
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+				      (u8 *)(&value16));
 	/* Accept all data frames */
 	value16 = 0xFFFF;
-	rtl92c_set_data_filter(hw, value16);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER,
+				      (u8 *)(&value16));
 }
 
 static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
@@ -988,17 +991,6 @@
 	}
 }
 
-static void _update_mac_setting(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
-	mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
-	mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-	mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-	mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
 int rtl92cu_hw_init(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1068,7 +1060,6 @@
 	}
 	_rtl92cu_hw_configure(hw);
 	_InitPABias(hw);
-	_update_mac_setting(hw);
 	rtl92c_dm_init(hw);
 exit:
 	local_irq_restore(flags);
@@ -1620,7 +1611,6 @@
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
 	enum wireless_mode wirelessmode = mac->mode;
 	u8 idx = 0;
 
@@ -1829,63 +1819,10 @@
 						u4b_ac_param);
 				break;
 			default:
-				RT_ASSERT(false,
-					  "SetHwReg8185(): invalid aci: %d !\n",
+				RT_ASSERT(false, "invalid aci: %d !\n",
 					  e_aci);
 				break;
 			}
-			if (rtlusb->acm_method != EACMWAY2_SW)
-				rtlpriv->cfg->ops->set_hw_reg(hw,
-					 HW_VAR_ACM_CTRL, &e_aci);
-			break;
-		}
-	case HW_VAR_ACM_CTRL:{
-			u8 e_aci = *val;
-			union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
-							(&(mac->ac[0].aifs));
-			u8 acm = p_aci_aifsn->f.acm;
-			u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
-
-			acm_ctrl =
-			    acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
-			if (acm) {
-				switch (e_aci) {
-				case AC0_BE:
-					acm_ctrl |= AcmHw_BeqEn;
-					break;
-				case AC2_VI:
-					acm_ctrl |= AcmHw_ViqEn;
-					break;
-				case AC3_VO:
-					acm_ctrl |= AcmHw_VoqEn;
-					break;
-				default:
-					RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-						 "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
-						 acm);
-					break;
-				}
-			} else {
-				switch (e_aci) {
-				case AC0_BE:
-					acm_ctrl &= (~AcmHw_BeqEn);
-					break;
-				case AC2_VI:
-					acm_ctrl &= (~AcmHw_ViqEn);
-					break;
-				case AC3_VO:
-					acm_ctrl &= (~AcmHw_VoqEn);
-					break;
-				default:
-					RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-						 "switch case not processed\n");
-					break;
-				}
-			}
-			RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
-				 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
-				 acm_ctrl);
-			rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
 			break;
 		}
 	case HW_VAR_RCR:{
@@ -1999,12 +1936,15 @@
 		}
 	case HW_VAR_MGT_FILTER:
 		rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+		mac->rx_mgt_filter = *(u16 *)val;
 		break;
 	case HW_VAR_CTRL_FILTER:
 		rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+		mac->rx_ctrl_filter = *(u16 *)val;
 		break;
 	case HW_VAR_DATA_FILTER:
 		rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+		mac->rx_data_filter = *(u16 *)val;
 		break;
 	default:
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -2280,7 +2220,6 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
 	u8 u1tmp = 0;
 	bool actuallyset = false;
@@ -2357,20 +2296,7 @@
 		if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
 			/* Enable register area 0x0-0xc. */
 			rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
-			if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
-				/*
-				 * We should configure HW PDn source for WiFi
-				 * ONLY, and then our HW will be set in
-				 * power-down mode if PDn source from all
-				 * functions are configured.
-				 */
-				u1tmp = rtl_read_byte(rtlpriv,
-						      REG_MULTI_FUNC_CTRL);
-				rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
-					       (u1tmp|WL_HWPDN_EN));
-			} else {
-				rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
-			}
+			rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
 		}
 		if (e_rfpowerstate_toset == ERFOFF) {
 			if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 490a7cf..0357133 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -69,8 +69,6 @@
 		chip_version = NORMAL_CHIP;
 		chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
 		chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
-		/* RTL8723 with BT function. */
-		chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
 		if (IS_VENDOR_UMC(chip_version))
 			chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
 					 CHIP_VENDOR_UMC_B_CUT : 0);
@@ -78,10 +76,6 @@
 			value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
 			chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
 				 CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
-		} else if (IS_8723_SERIES(chip_version)) {
-			value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
-			chip_version |= ((value32 & RF_RL_ID) ?
-					  CHIP_8723_DRV_REV : 0);
 		}
 	}
 	rtlhal->version  = (enum version_8192c)chip_version;
@@ -114,12 +108,6 @@
 	case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
 		versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
 		break;
-	case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
-		versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
-		break;
-	case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
-		versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
-		break;
 	case VERSION_TEST_CHIP_92C:
 		versionid = "TEST_CHIP_92C";
 		break;
@@ -405,59 +393,9 @@
 void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	u32 u4b_ac_param;
 
 	rtl92c_dm_init_edca_turbo(hw);
-	u4b_ac_param = (u32) mac->ac[aci].aifs;
-	u4b_ac_param |=
-	    ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
-	    AC_PARAM_ECW_MIN_OFFSET;
-	u4b_ac_param |=
-	    ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
-	    AC_PARAM_ECW_MAX_OFFSET;
-	u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
-			 AC_PARAM_TXOP_OFFSET;
-	RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
-		 aci, u4b_ac_param);
-	switch (aci) {
-	case AC1_BK:
-		rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
-		break;
-	case AC0_BE:
-		rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
-		break;
-	case AC2_VI:
-		rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
-		break;
-	case AC3_VO:
-		rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
-		break;
-	default:
-		RT_ASSERT(false, "invalid aci: %d !\n", aci);
-		break;
-	}
-}
-
-/*-------------------------------------------------------------------------
- * HW MAC Address
- *-------------------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
-{
-	u32 i;
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	for (i = 0 ; i < ETH_ALEN ; i++)
-		rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
-
-	RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
-		 "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
-		 rtl_read_byte(rtlpriv, REG_MACID),
-		 rtl_read_byte(rtlpriv, REG_MACID+1),
-		 rtl_read_byte(rtlpriv, REG_MACID+2),
-		 rtl_read_byte(rtlpriv, REG_MACID+3),
-		 rtl_read_byte(rtlpriv, REG_MACID+4),
-		 rtl_read_byte(rtlpriv, REG_MACID+5));
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
 }
 
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
@@ -656,47 +594,6 @@
 	rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
 }
 
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-}
-
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
-}
-
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-}
-
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
-}
-
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	return rtl_read_word(rtlpriv,  REG_RXFLTMAP2);
-}
-
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
-}
 /*==============================================================*/
 
 static u8 _rtl92c_query_rxpwrpercentage(char antpower)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index e34f0f1..553a4bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -48,7 +48,6 @@
 /*---------------------------------------------------------------
  *	Hardware init functions
  *---------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
 void rtl92c_init_interrupt(struct ieee80211_hw *hw);
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
 
@@ -73,15 +72,6 @@
 void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
 void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
 
-/* For filter */
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
-
-
 u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
 
 struct rx_fwinfo_92c {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 23806c2..fd4a535 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -321,6 +321,7 @@
 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
 	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 587b8c5..7c1db7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -420,7 +420,7 @@
 		 "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
 		 de_digtable->recover_cnt, de_digtable->rx_gain_min);
 
-	/* deal with abnorally large false alarm */
+	/* deal with abnormally large false alarm */
 	if (falsealm_cnt->cnt_all > 10000) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 			 "dm_DIG(): Abnormally false alarm case\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index 1646e7c..8a38daa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -110,28 +110,6 @@
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)	\
 	SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
 
-struct rtl92d_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodeSize;
-	u16 rsvd2;
-
-	u32 svnindex;
-	u32 rsvd3;
-
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 int rtl92d_download_fw(struct ieee80211_hw *hw);
 void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
 			 u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 1961b8e..bb06fe8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3515,14 +3515,14 @@
 	for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
 	     rfpath++) {
 		if (rtlhal->current_bandtype == BAND_ON_2_4G) {
-			/* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+			/* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
 			rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
 				      BIT(18), 0);
 			/* RF0x0b[16:14] =3b'111 */
 			rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
 				      0x1c000, 0x07);
 		} else {
-			/* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+			/* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
 			rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
 				      BIT(16) | BIT(18),
 				      (BIT(16) | BIT(8)) >> 8);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index 232865c..0708eed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -198,7 +198,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl92c_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -207,8 +207,8 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
-	rtlhal->fw_version = pfwheader->version;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+	rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 	rtlhal->fw_subversion = pfwheader->subversion;
 	pfwdata = (u8 *)rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
@@ -219,10 +219,10 @@
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
-			  (int)sizeof(struct rtl92c_firmware_header));
+			  (int)sizeof(struct rtlwifi_firmware_header));
 
-		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	} else {
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware no Header, Signature(%#x)\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
index 3e2a48e..069da1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
@@ -33,7 +33,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT		3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x92E0)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0)
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
 #define H2C_92E_RSVDPAGE_LOC_LEN		5
@@ -89,25 +89,6 @@
 #define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define	FW_PWR_STATE_RF_OFF	0
 
-struct rtl92c_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodesize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8192e_h2c_cmd {
 	H2C_92E_RSVDPAGE = 0,
 	H2C_92E_MSRRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
index a863a44..018340a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
@@ -449,7 +449,7 @@
 				 "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
 				  rate_section, path, txnum);
 			break;
-		};
+		}
 	} else {
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 			 "Invalid Band %d\n", band);
@@ -489,7 +489,7 @@
 				 "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
 				  rate_section, path, txnum);
 			break;
-		};
+		}
 	} else {
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 			 "Invalid Band %d()\n", band);
@@ -853,7 +853,7 @@
 		else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
 			index = (u8)((regaddr - 0xE20) / 4);
 		break;
-	};
+	}
 	return index;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 8280bab..3859b3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -205,9 +205,9 @@
 	return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-	return (hdr->signature & 0xfff0) == 0x2300;
+	return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
 }
 
 static struct rtl_hal_ops rtl8723e_hal_ops = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 7bf88d9..d091f1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -209,9 +209,9 @@
 	return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-	return (hdr->signature & 0xfff0) == 0x5300;
+	return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
 }
 
 static struct rtl_hal_ops rtl8723be_hal_ops = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
index dd698e7..a2f5e89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
@@ -253,7 +253,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl8723e_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -263,7 +263,7 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
 	pfwdata = rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 
@@ -275,10 +275,10 @@
 		RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
 			 "Firmware Version(%d), Signature(%#x), Size(%d)\n",
 			 pfwheader->version, pfwheader->signature,
-			 (int)sizeof(struct rtl8723e_firmware_header));
+			 (int)sizeof(struct rtlwifi_firmware_header));
 
-		pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl8723e_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
index 3ebafc8..8ea372d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
@@ -50,25 +50,6 @@
 	VERSION_UNKNOWN = 0xFF,
 };
 
-struct rtl8723e_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodesize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8723be_cmd {
 	H2C_8723BE_RSVDPAGE = 0,
 	H2C_8723BE_JOINBSSRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
index 95e9562..525eb23 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
@@ -210,7 +210,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl8821a_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -228,8 +228,8 @@
 			return 1;
 
 		pfwheader =
-		  (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware;
-		rtlhal->fw_version = pfwheader->version;
+		  (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware;
+		rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 		rtlhal->fw_subversion = pfwheader->subversion;
 		pfwdata = (u8 *)rtlhal->wowlan_firmware;
 		fwsize = rtlhal->wowlan_fwsize;
@@ -238,8 +238,8 @@
 			return 1;
 
 		pfwheader =
-		  (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
-		rtlhal->fw_version = pfwheader->version;
+		  (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+		rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 		rtlhal->fw_subversion = pfwheader->subversion;
 		pfwdata = (u8 *)rtlhal->pfirmware;
 		fwsize = rtlhal->fwsize;
@@ -255,8 +255,8 @@
 			 "Firmware Version(%d), Signature(%#x)\n",
 			 pfwheader->version, pfwheader->signature);
 
-		pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	if (rtlhal->mac_func_enable) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
index 591c14c..8f5b4aa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
@@ -34,10 +34,10 @@
 #define FW_8821AE_POLLING_TIMEOUT_COUNT	6000
 
 #define IS_FW_HEADER_EXIST_8812(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x9500)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500)
 
 #define IS_FW_HEADER_EXIST_8821(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x2100)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100)
 
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
@@ -137,25 +137,6 @@
 #define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define	FW_PWR_STATE_RF_OFF	0
 
-struct rtl8821a_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodeSize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8812_c2h_evt {
 	C2H_8812_DBG = 0,
 	C2H_8812_LB = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 3236d44..b7f18e21 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2180,7 +2180,7 @@
 
 	rtl_write_byte(rtlpriv, MSR, bt_msr);
 	rtlpriv->cfg->ops->led_control(hw, ledaction);
-	if ((bt_msr & 0xfc) == MSR_AP)
+	if ((bt_msr & MSR_MASK) == MSR_AP)
 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
 	else
 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
index 53668fc..1d6110f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
@@ -429,6 +429,7 @@
 #define	MSR_ADHOC				0x01
 #define	MSR_INFRA				0x02
 #define	MSR_AP					0x03
+#define MSR_MASK				0x03
 
 #define	RRSR_RSC_OFFSET				21
 #define	RRSR_SHORT_OFFSET			23
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 2b770b5..b90ca61 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -222,6 +222,25 @@
 #define	WOL_REASON_REALWOW_V2_WAKEUPPKT	BIT(9)
 #define	WOL_REASON_REALWOW_V2_ACKLOST	BIT(10)
 
+struct rtlwifi_firmware_header {
+	__le16 signature;
+	u8 category;
+	u8 function;
+	__le16 version;
+	u8 subversion;
+	u8 rsvd1;
+	u8 month;
+	u8 date;
+	u8 hour;
+	u8 minute;
+	__le16 ramcodeSize;
+	__le16 rsvd2;
+	__le32 svnindex;
+	__le32 rsvd3;
+	__le32 rsvd4;
+	__le32 rsvd5;
+};
+
 struct txpower_info_2g {
 	u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
 	u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -2064,16 +2083,12 @@
 	bool tx_enable_sw_calc_duration;
 };
 
-struct rtl92c_firmware_header;
-
 struct rtl_wow_pattern {
 	u8 type;
 	u16 crc;
 	u32 mask[4];
 };
 
-struct rtl8723e_firmware_header;
-
 struct rtl_hal_ops {
 	int (*init_sw_vars) (struct ieee80211_hw *hw);
 	void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -2177,7 +2192,7 @@
 	void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
 			      u32 cmd_len, u8 *p_cmdbuffer);
 	bool (*get_btc_status) (void);
-	bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr);
+	bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
 	u32 (*rx_command_packet)(struct ieee80211_hw *hw,
 				 struct rtl_stats status, struct sk_buff *skb);
 	void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 0c0d5cd..7c355ff 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -118,7 +118,11 @@
 	if (passive)
 		scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-	cmd->params.role_id = wlvif->role_id;
+	/* scan on the dev role if the regular one is not started */
+	if (wlcore_is_p2p_mgmt(wlvif))
+		cmd->params.role_id = wlvif->dev_role_id;
+	else
+		cmd->params.role_id = wlvif->role_id;
 
 	if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
 		ret = -EINVAL;
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 67f2a0e..4be0409 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -282,3 +282,30 @@
 	kfree(acx);
 	return ret;
 }
+
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
+{
+	struct acx_dynamic_fw_traces_cfg *acx;
+	int ret;
+
+	wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
+		     wl->dynamic_fw_traces);
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
+
+	ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("acx config dynamic fw traces failed: %d", ret);
+		goto out;
+	}
+out:
+	kfree(acx);
+	return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 4afccd4..342a299 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -35,7 +35,8 @@
 	ACX_PEER_CAP			 = 0x0056,
 	ACX_INTERRUPT_NOTIFY		 = 0x0057,
 	ACX_RX_BA_FILTER		 = 0x0058,
-	ACX_AP_SLEEP_CFG                 = 0x0059
+	ACX_AP_SLEEP_CFG                 = 0x0059,
+	ACX_DYNAMIC_TRACES_CFG		 = 0x005A,
 };
 
 /* numbers of bits the length field takes (add 1 for the actual number) */
@@ -92,27 +93,26 @@
 
 
 struct wl18xx_acx_error_stats {
-	u32 error_frame;
-	u32 error_null_Frame_tx_start;
-	u32 error_numll_frame_cts_start;
-	u32 error_bar_retry;
-	u32 error_frame_cts_nul_flid;
+	u32 error_frame_non_ctrl;
+	u32 error_frame_ctrl;
+	u32 error_frame_during_protection;
+	u32 null_frame_tx_start;
+	u32 null_frame_cts_start;
+	u32 bar_retry;
+	u32 num_frame_cts_nul_flid;
+	u32 tx_abort_failure;
+	u32 tx_resume_failure;
+	u32 rx_cmplt_db_overflow_cnt;
+	u32 elp_while_rx_exch;
+	u32 elp_while_tx_exch;
+	u32 elp_while_tx;
+	u32 elp_while_nvic_pending;
+	u32 rx_excessive_frame_len;
+	u32 burst_mismatch;
+	u32 tbc_exch_mismatch;
 } __packed;
 
-struct wl18xx_acx_debug_stats {
-	u32 debug1;
-	u32 debug2;
-	u32 debug3;
-	u32 debug4;
-	u32 debug5;
-	u32 debug6;
-} __packed;
-
-struct wl18xx_acx_ring_stats {
-	u32 prepared_descs;
-	u32 tx_cmplt;
-} __packed;
-
+#define NUM_OF_RATES_INDEXES 30
 struct wl18xx_acx_tx_stats {
 	u32 tx_prepared_descs;
 	u32 tx_cmplt;
@@ -122,7 +122,7 @@
 	u32 tx_data_programmed;
 	u32 tx_burst_programmed;
 	u32 tx_starts;
-	u32 tx_imm_resp;
+	u32 tx_stop;
 	u32 tx_start_templates;
 	u32 tx_start_int_templates;
 	u32 tx_start_fw_gen;
@@ -131,13 +131,14 @@
 	u32 tx_exch;
 	u32 tx_retry_template;
 	u32 tx_retry_data;
+	u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES];
 	u32 tx_exch_pending;
 	u32 tx_exch_expiry;
 	u32 tx_done_template;
 	u32 tx_done_data;
 	u32 tx_done_int_template;
-	u32 tx_frame_checksum;
-	u32 tx_checksum_result;
+	u32 tx_cfe1;
+	u32 tx_cfe2;
 	u32 frag_called;
 	u32 frag_mpdu_alloc_failed;
 	u32 frag_init_called;
@@ -165,11 +166,8 @@
 	u32 rx_cmplt_task;
 	u32 rx_phy_hdr;
 	u32 rx_timeout;
+	u32 rx_rts_timeout;
 	u32 rx_timeout_wa;
-	u32 rx_wa_density_dropped_frame;
-	u32 rx_wa_ba_not_expected;
-	u32 rx_frame_checksum;
-	u32 rx_checksum_result;
 	u32 defrag_called;
 	u32 defrag_init_called;
 	u32 defrag_in_process_called;
@@ -179,6 +177,7 @@
 	u32 decrypt_key_not_found;
 	u32 defrag_need_decrypt;
 	u32 rx_tkip_replays;
+	u32 rx_xfr;
 } __packed;
 
 struct wl18xx_acx_isr_stats {
@@ -193,21 +192,13 @@
 	u32 connection_out_of_sync;
 	u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
 	u32 rcvd_awake_bcns_cnt;
-} __packed;
-
-struct wl18xx_acx_event_stats {
-	u32 calibration;
-	u32 rx_mismatch;
-	u32 rx_mem_empty;
-} __packed;
-
-struct wl18xx_acx_ps_poll_stats {
-	u32 ps_poll_timeouts;
-	u32 upsd_timeouts;
-	u32 upsd_max_ap_turn;
-	u32 ps_poll_max_ap_turn;
-	u32 ps_poll_utilization;
-	u32 upsd_utilization;
+	u32 sleep_time_count;
+	u32 sleep_time_avg;
+	u32 sleep_cycle_avg;
+	u32 sleep_percent;
+	u32 ap_sleep_active_conf;
+	u32 ap_sleep_user_conf;
+	u32 ap_sleep_counter;
 } __packed;
 
 struct wl18xx_acx_rx_filter_stats {
@@ -227,11 +218,11 @@
 } __packed;
 
 #define AGGR_STATS_TX_AGG	16
-#define AGGR_STATS_TX_RATE	16
 #define AGGR_STATS_RX_SIZE_LEN	16
 
 struct wl18xx_acx_aggr_stats {
-	u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+	u32 tx_agg_rate[AGGR_STATS_TX_AGG];
+	u32 tx_agg_len[AGGR_STATS_TX_AGG];
 	u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
 } __packed;
 
@@ -240,8 +231,6 @@
 struct wl18xx_acx_pipeline_stats {
 	u32 hs_tx_stat_fifo_int;
 	u32 hs_rx_stat_fifo_int;
-	u32 tcp_tx_stat_fifo_int;
-	u32 tcp_rx_stat_fifo_int;
 	u32 enc_tx_stat_fifo_int;
 	u32 enc_rx_stat_fifo_int;
 	u32 rx_complete_stat_fifo_int;
@@ -249,38 +238,61 @@
 	u32 post_proc_swi;
 	u32 sec_frag_swi;
 	u32 pre_to_defrag_swi;
-	u32 defrag_to_csum_swi;
-	u32 csum_to_rx_xfer_swi;
+	u32 defrag_to_rx_xfer_swi;
 	u32 dec_packet_in;
 	u32 dec_packet_in_fifo_full;
 	u32 dec_packet_out;
-	u32 cs_rx_packet_in;
-	u32 cs_rx_packet_out;
 	u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+	u16 padding;
 } __packed;
 
-struct wl18xx_acx_mem_stats {
-	u32 rx_free_mem_blks;
-	u32 tx_free_mem_blks;
-	u32 fwlog_free_mem_blks;
-	u32 fw_gen_free_mem_blks;
+#define DIVERSITY_STATS_NUM_OF_ANT	2
+
+struct wl18xx_acx_diversity_stats {
+	u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT];
+	u32 total_num_of_toggles;
+} __packed;
+
+struct wl18xx_acx_thermal_stats {
+	u16 irq_thr_low;
+	u16 irq_thr_high;
+	u16 tx_stop;
+	u16 tx_resume;
+	u16 false_irq;
+	u16 adc_source_unexpected;
+} __packed;
+
+#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18
+struct wl18xx_acx_calib_failure_stats {
+	u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS];
+	u32 calib_count;
+} __packed;
+
+struct wl18xx_roaming_stats {
+	s32 rssi_level;
+} __packed;
+
+struct wl18xx_dfs_stats {
+	u32 num_of_radar_detections;
 } __packed;
 
 struct wl18xx_acx_statistics {
 	struct acx_header header;
 
 	struct wl18xx_acx_error_stats		error;
-	struct wl18xx_acx_debug_stats		debug;
 	struct wl18xx_acx_tx_stats		tx;
 	struct wl18xx_acx_rx_stats		rx;
 	struct wl18xx_acx_isr_stats		isr;
 	struct wl18xx_acx_pwr_stats		pwr;
-	struct wl18xx_acx_ps_poll_stats		ps_poll;
 	struct wl18xx_acx_rx_filter_stats	rx_filter;
 	struct wl18xx_acx_rx_rate_stats		rx_rate;
 	struct wl18xx_acx_aggr_stats		aggr_size;
 	struct wl18xx_acx_pipeline_stats	pipeline;
-	struct wl18xx_acx_mem_stats		mem;
+	struct wl18xx_acx_diversity_stats	diversity;
+	struct wl18xx_acx_thermal_stats		thermal;
+	struct wl18xx_acx_calib_failure_stats	calib;
+	struct wl18xx_roaming_stats		roaming;
+	struct wl18xx_dfs_stats			dfs;
 } __packed;
 
 struct wl18xx_acx_clear_statistics {
@@ -367,6 +379,15 @@
 	u8 idle_conn_thresh;
 } __packed;
 
+/*
+ * ACX_DYNAMIC_TRACES_CFG
+ * configure the FW dynamic traces
+ */
+struct acx_dynamic_fw_traces_cfg {
+	struct acx_header header;
+	__le32 dynamic_fw_traces;
+} __packed;
+
 int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
 				  u32 sdio_blk_size, u32 extra_mem_blks,
 				  u32 len_field_size);
@@ -380,5 +401,6 @@
 int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
 int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
 int wl18xx_acx_ap_sleep(struct wl1271 *wl);
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
 
 #endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 5fbd223..4edfe28 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -36,18 +36,23 @@
 	DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
 
 
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
-
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
@@ -57,7 +62,7 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
@@ -66,13 +71,15 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
+				  NUM_OF_RATES_INDEXES);
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
@@ -97,11 +104,8 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
@@ -111,6 +115,7 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
 
@@ -120,14 +125,13 @@
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
 				  PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
 WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
-
-
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
@@ -141,14 +145,14 @@
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
 
-WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
-				  AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
+				  AGGR_STATS_TX_AGG);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
+				  AGGR_STATS_TX_AGG);
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
 				  AGGR_STATS_RX_SIZE_LEN);
 
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
@@ -156,21 +160,32 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
 				  PIPE_STATS_HW_FIFO);
 
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
+				  DIVERSITY_STATS_NUM_OF_ANT);
+WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
+				  WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
+WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
 
 static ssize_t conf_read(struct file *file, char __user *user_buf,
 			 size_t count, loff_t *ppos)
@@ -281,6 +296,55 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t dynamic_fw_traces_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct wl1271 *wl = file->private_data;
+	unsigned long value;
+	int ret;
+
+	ret = kstrtoul_from_user(user_buf, count, 0, &value);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&wl->mutex);
+
+	wl->dynamic_fw_traces = value;
+
+	if (unlikely(wl->state != WLCORE_STATE_ON))
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wl18xx_acx_dynamic_fw_traces(wl);
+	if (ret < 0)
+		count = ret;
+
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+	return count;
+}
+
+static ssize_t dynamic_fw_traces_read(struct file *file,
+					char __user *userbuf,
+					size_t count, loff_t *ppos)
+{
+	struct wl1271 *wl = file->private_data;
+	return wl1271_format_buffer(userbuf, count, ppos,
+				    "%d\n", wl->dynamic_fw_traces);
+}
+
+static const struct file_operations dynamic_fw_traces_ops = {
+	.read = dynamic_fw_traces_read,
+	.write = dynamic_fw_traces_write,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
 int wl18xx_debugfs_add_files(struct wl1271 *wl,
 			     struct dentry *rootdir)
 {
@@ -301,18 +365,23 @@
 
 	DEBUGFS_ADD(clear_fw_stats, stats);
 
-	DEBUGFS_FWSTATS_ADD(debug, debug1);
-	DEBUGFS_FWSTATS_ADD(debug, debug2);
-	DEBUGFS_FWSTATS_ADD(debug, debug3);
-	DEBUGFS_FWSTATS_ADD(debug, debug4);
-	DEBUGFS_FWSTATS_ADD(debug, debug5);
-	DEBUGFS_FWSTATS_ADD(debug, debug6);
-
-	DEBUGFS_FWSTATS_ADD(error, error_frame);
-	DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
-	DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
-	DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
-	DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+	DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
+	DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
+	DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
+	DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
+	DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
+	DEBUGFS_FWSTATS_ADD(error, bar_retry);
+	DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
+	DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
+	DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
+	DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
+	DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
+	DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
+	DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
 
 	DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
 	DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
@@ -322,7 +391,7 @@
 	DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
 	DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
 	DEBUGFS_FWSTATS_ADD(tx, tx_starts);
-	DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+	DEBUGFS_FWSTATS_ADD(tx, tx_stop);
 	DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
 	DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
 	DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
@@ -331,13 +400,14 @@
 	DEBUGFS_FWSTATS_ADD(tx, tx_exch);
 	DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
 	DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+	DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
 	DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
 	DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
 	DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
 	DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
 	DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
-	DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
-	DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+	DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
+	DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
 	DEBUGFS_FWSTATS_ADD(tx, frag_called);
 	DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
 	DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
@@ -362,11 +432,8 @@
 	DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
 	DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
 	DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+	DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
 	DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
-	DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
-	DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
-	DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
-	DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_called);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
@@ -376,6 +443,7 @@
 	DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
 	DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+	DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
 
 	DEBUGFS_FWSTATS_ADD(isr, irqs);
 
@@ -384,13 +452,13 @@
 	DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
 	DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
 	DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
-
-	DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
-	DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
-	DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
-	DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
-	DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
-	DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
+	DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
+	DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
+	DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
 
 	DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
 	DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
@@ -404,12 +472,11 @@
 
 	DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
 
-	DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+	DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
+	DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
 	DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
 
 	DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
-	DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
-	DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
 	DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
 	DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
 	DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
@@ -417,22 +484,33 @@
 	DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
 	DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
 	DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
-	DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
-	DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+	DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
 	DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
 	DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
 	DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
-	DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
-	DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
 	DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
 
-	DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
-	DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
-	DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
-	DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+	DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
+	DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
+
+	DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
+	DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
+	DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
+	DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
+	DEBUGFS_FWSTATS_ADD(thermal, false_irq);
+	DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
+
+	DEBUGFS_FWSTATS_ADD(calib, fail_count);
+
+	DEBUGFS_FWSTATS_ADD(calib, calib_count);
+
+	DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
+
+	DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
 
 	DEBUGFS_ADD(conf, moddir);
 	DEBUGFS_ADD(radar_detection, moddir);
+	DEBUGFS_ADD(dynamic_fw_traces, moddir);
 
 	return 0;
 
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 548bb9e..09c7e09 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -112,6 +112,14 @@
 	return 0;
 }
 
+static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+{
+	u32 clock;
+	/* convert the MSB+LSB to a u32 TSF value */
+	clock = (tsf_msb << 16) | tsf_lsb;
+	wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+}
+
 int wl18xx_process_mailbox_events(struct wl1271 *wl)
 {
 	struct wl18xx_event_mailbox *mbox = wl->mbox;
@@ -128,6 +136,11 @@
 			wl18xx_scan_completed(wl, wl->scan_wlvif);
 	}
 
+	if (vector & TIME_SYNC_EVENT_ID)
+		wlcore_event_time_sync(wl,
+				mbox->time_sync_tsf_msb,
+				mbox->time_sync_tsf_lsb);
+
 	if (vector & RADAR_DETECTED_EVENT_ID) {
 		wl1271_info("radar event: channel %d type %s",
 			    mbox->radar_channel,
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 266ee87..f3d4f13 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,8 +38,9 @@
 	REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
 	DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
 	PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
-	SMART_CONFIG_SYNC_EVENT_ID		 = BIT(22),
-	SMART_CONFIG_DECODE_EVENT_ID		 = BIT(23),
+	SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
+	SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
+	TIME_SYNC_EVENT_ID                       = BIT(24),
 };
 
 enum wl18xx_radar_types {
@@ -95,13 +96,16 @@
 	/* smart config sync channel */
 	u8 sc_sync_channel;
 	u8 sc_sync_band;
-	u8 padding2[2];
 
+	/* time sync msb*/
+	u16 time_sync_tsf_msb;
 	/* radar detect */
 	u8 radar_channel;
 	u8 radar_type;
 
-	u8 padding3[2];
+	/* time sync lsb*/
+	u16 time_sync_tsf_lsb;
+
 } __packed;
 
 int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 49aca2c..abbf054 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -422,6 +422,8 @@
 		.num_probe_reqs			= 2,
 		.rssi_threshold			= -90,
 		.snr_threshold			= 0,
+		.num_short_intervals		= SCAN_MAX_SHORT_INTERVALS,
+		.long_interval			= 30000,
 	},
 	.ht = {
 		.rx_ba_win_size = 32,
@@ -1026,8 +1028,8 @@
 		CHANNEL_SWITCH_COMPLETE_EVENT_ID |
 		DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
 		SMART_CONFIG_SYNC_EVENT_ID |
-		SMART_CONFIG_DECODE_EVENT_ID;
-;
+		SMART_CONFIG_DECODE_EVENT_ID |
+		TIME_SYNC_EVENT_ID;
 
 	wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
 
@@ -1159,6 +1161,11 @@
 	if (ret < 0)
 		return ret;
 
+	/* set the dynamic fw traces bitmap */
+	ret = wl18xx_acx_dynamic_fw_traces(wl);
+	if (ret < 0)
+		return ret;
+
 	if (checksum_param) {
 		ret = wl18xx_acx_set_checksum_state(wl);
 		if (ret != 0)
@@ -1797,7 +1804,7 @@
 
 static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
 	{
-		.max = 3,
+		.max = 2,
 		.types = BIT(NL80211_IFTYPE_STATION),
 	},
 	{
@@ -1806,6 +1813,10 @@
 			 BIT(NL80211_IFTYPE_P2P_GO) |
 			 BIT(NL80211_IFTYPE_P2P_CLIENT),
 	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
 };
 
 static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
@@ -1813,6 +1824,48 @@
 		.max = 2,
 		.types = BIT(NL80211_IFTYPE_AP),
 	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
 };
 
 static const struct ieee80211_iface_combination
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 98666f2..c938c49 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -51,7 +51,11 @@
 		goto out;
 	}
 
-	cmd->role_id = wlvif->role_id;
+	/* scan on the dev role if the regular one is not started */
+	if (wlcore_is_p2p_mgmt(wlvif))
+		cmd->role_id = wlvif->dev_role_id;
+	else
+		cmd->role_id = wlvif->role_id;
 
 	if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
 		ret = -EINVAL;
@@ -223,9 +227,20 @@
 				    SCAN_TYPE_PERIODIC);
 	wl18xx_adjust_channels(cmd, cmd_channels);
 
-	cmd->short_cycles_sec = 0;
-	cmd->long_cycles_sec = cpu_to_le16(req->interval);
-	cmd->short_cycles_count = 0;
+	if (c->num_short_intervals && c->long_interval &&
+	    c->long_interval > req->interval) {
+		cmd->short_cycles_msec = cpu_to_le16(req->interval);
+		cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
+		cmd->short_cycles_count = c->num_short_intervals;
+	} else {
+		cmd->short_cycles_msec = 0;
+		cmd->long_cycles_msec = cpu_to_le16(req->interval);
+		cmd->short_cycles_count = 0;
+	}
+	wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
+		     le16_to_cpu(cmd->short_cycles_msec),
+		     le16_to_cpu(cmd->long_cycles_msec),
+		     cmd->short_cycles_count);
 
 	cmd->total_cycles = 0;
 
diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h
index 2e636aa..66a763f 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.h
+++ b/drivers/net/wireless/ti/wl18xx/scan.h
@@ -74,8 +74,8 @@
 	u8 dfs;		   /* number of dfs channels in 5ghz */
 	u8 passive_active; /* number of passive before active channels 2.4ghz */
 
-	__le16 short_cycles_sec;
-	__le16 long_cycles_sec;
+	__le16 short_cycles_msec;
+	__le16 long_cycles_msec;
 	u8 short_cycles_count;
 	u8 total_cycles; /* 0 - infinite */
 	u8 padding[2];
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 68919f8..f01d24b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -2003,12 +2003,15 @@
 		      wlvif->bss_type == BSS_TYPE_IBSS)))
 		return -EINVAL;
 
-	ret = wl12xx_cmd_role_enable(wl,
-				     wl12xx_wlvif_to_vif(wlvif)->addr,
-				     WL1271_ROLE_DEVICE,
-				     &wlvif->dev_role_id);
-	if (ret < 0)
-		goto out;
+	/* the dev role is already started for p2p mgmt interfaces */
+	if (!wlcore_is_p2p_mgmt(wlvif)) {
+		ret = wl12xx_cmd_role_enable(wl,
+					     wl12xx_wlvif_to_vif(wlvif)->addr,
+					     WL1271_ROLE_DEVICE,
+					     &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+	}
 
 	ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
 	if (ret < 0)
@@ -2023,7 +2026,8 @@
 out_stop:
 	wl12xx_cmd_role_stop_dev(wl, wlvif);
 out_disable:
-	wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+	if (!wlcore_is_p2p_mgmt(wlvif))
+		wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 out:
 	return ret;
 }
@@ -2052,10 +2056,42 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
-	if (ret < 0)
-		goto out;
+	if (!wlcore_is_p2p_mgmt(wlvif)) {
+		ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+	}
 
 out:
 	return ret;
 }
+
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			   u8 feature, u8 enable, u8 value)
+{
+	struct wlcore_cmd_generic_cfg *cmd;
+	int ret;
+
+	wl1271_debug(DEBUG_CMD,
+		     "cmd generic cfg (role %d feature %d enable %d value %d)",
+		     wlvif->role_id, feature, enable, value);
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->role_id = wlvif->role_id;
+	cmd->feature = feature;
+	cmd->enable = enable;
+	cmd->value = value;
+
+	ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
+	if (ret < 0) {
+		wl1271_error("failed to send generic cfg command");
+		goto out_free;
+	}
+out_free:
+	kfree(cmd);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e14cd40..8dc46c0 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -92,6 +92,8 @@
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
 				     enum ieee80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			   u8 feature, u8 enable, u8 value);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
@@ -652,6 +654,19 @@
 	u8 padding[3];
 } __packed;
 
+enum wlcore_generic_cfg_feature {
+	WLCORE_CFG_FEATURE_RADAR_DEBUG = 2,
+};
+
+struct wlcore_cmd_generic_cfg {
+	struct wl1271_cmd_header header;
+
+	u8 role_id;
+	u8 feature;
+	u8 enable;
+	u8 value;
+} __packed;
+
 struct wl12xx_cmd_config_fwlog {
 	struct wl1271_cmd_header header;
 
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 166add0..52a9d1b 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1186,6 +1186,15 @@
 
 	/* SNR threshold to be used for filtering */
 	s8 snr_threshold;
+
+	/*
+	 * number of short intervals scheduled scan cycles before
+	 * switching to long intervals
+	 */
+	u8 num_short_intervals;
+
+	/* interval between each long scheduled scan cycle (in ms) */
+	u16 long_interval;
 } __packed;
 
 struct conf_ht_setting {
@@ -1352,7 +1361,7 @@
  * version, the two LSB are the lower driver's private conf
  * version.
  */
-#define WLCORE_CONF_VERSION	(0x0006 << 16)
+#define WLCORE_CONF_VERSION	(0x0007 << 16)
 #define WLCORE_CONF_MASK	0xffff0000
 #define WLCORE_CONF_SIZE	(sizeof(struct wlcore_conf_header) +	\
 				 sizeof(struct wlcore_conf))
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 5ca1fb1..e92f263 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -348,7 +348,7 @@
 }
 
 /* generic sta initialization (non vif-specific) */
-static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret;
 
diff --git a/drivers/net/wireless/ti/wlcore/init.h b/drivers/net/wireless/ti/wlcore/init.h
index a45fbfdd..fd1cdb6 100644
--- a/drivers/net/wireless/ti/wlcore/init.h
+++ b/drivers/net/wireless/ti/wlcore/init.h
@@ -35,5 +35,6 @@
 int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
 int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 #endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 337223b..e819369 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1792,6 +1792,9 @@
 
 	wl->wow_enabled = true;
 	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlcore_is_p2p_mgmt(wlvif))
+			continue;
+
 		ret = wl1271_configure_suspend(wl, wlvif, wow);
 		if (ret < 0) {
 			mutex_unlock(&wl->mutex);
@@ -1901,6 +1904,9 @@
 		goto out;
 
 	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlcore_is_p2p_mgmt(wlvif))
+			continue;
+
 		wl1271_configure_resume(wl, wlvif);
 	}
 
@@ -2256,6 +2262,7 @@
 		wlvif->p2p = 1;
 		/* fall-through */
 	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		wlvif->bss_type = BSS_TYPE_STA_BSS;
 		break;
 	case NL80211_IFTYPE_ADHOC:
@@ -2477,7 +2484,8 @@
 {
 	struct wlcore_hw_queue_iter_data *iter_data = data;
 
-	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
 		return;
 
 	if (iter_data->cur_running || vif == iter_data->vif) {
@@ -2495,6 +2503,11 @@
 	struct wlcore_hw_queue_iter_data iter_data = {};
 	int i, q_base;
 
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+		return 0;
+	}
+
 	iter_data.vif = vif;
 
 	/* mark all bits taken by active interfaces */
@@ -2618,14 +2631,27 @@
 			goto out;
 	}
 
-	ret = wl12xx_cmd_role_enable(wl, vif->addr,
-				     role_type, &wlvif->role_id);
-	if (ret < 0)
-		goto out;
+	if (!wlcore_is_p2p_mgmt(wlvif)) {
+		ret = wl12xx_cmd_role_enable(wl, vif->addr,
+					     role_type, &wlvif->role_id);
+		if (ret < 0)
+			goto out;
 
-	ret = wl1271_init_vif_specific(wl, vif);
-	if (ret < 0)
-		goto out;
+		ret = wl1271_init_vif_specific(wl, vif);
+		if (ret < 0)
+			goto out;
+
+	} else {
+		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
+					     &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+
+		/* needed mainly for configuring rate policies */
+		ret = wl1271_sta_hw_init(wl, wlvif);
+		if (ret < 0)
+			goto out;
+	}
 
 	list_add(&wlvif->list, &wl->wlvif_list);
 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
@@ -2696,9 +2722,15 @@
 				wl12xx_stop_dev(wl, wlvif);
 		}
 
-		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
-		if (ret < 0)
-			goto deinit;
+		if (!wlcore_is_p2p_mgmt(wlvif)) {
+			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
+			if (ret < 0)
+				goto deinit;
+		} else {
+			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+			if (ret < 0)
+				goto deinit;
+		}
 
 		wl1271_ps_elp_sleep(wl);
 	}
@@ -3088,6 +3120,9 @@
 {
 	int ret;
 
+	if (wlcore_is_p2p_mgmt(wlvif))
+		return 0;
+
 	if (conf->power_level != wlvif->power_level) {
 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
 		if (ret < 0)
@@ -3207,6 +3242,9 @@
 		goto out;
 
 	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlcore_is_p2p_mgmt(wlvif))
+			continue;
+
 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
 			if (*total & FIF_ALLMULTI)
 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
@@ -4837,6 +4875,9 @@
 	u8 ps_scheme;
 	int ret = 0;
 
+	if (wlcore_is_p2p_mgmt(wlvif))
+		return 0;
+
 	mutex_lock(&wl->mutex);
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
@@ -6078,8 +6119,10 @@
 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
-		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+					 BIT(NL80211_IFTYPE_AP) |
+					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
+					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					 BIT(NL80211_IFTYPE_P2P_GO);
 	wl->hw->wiphy->max_scan_ssids = 1;
 	wl->hw->wiphy->max_sched_scan_ssids = 16;
 	wl->hw->wiphy->max_match_sets = 16;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index e125974..5b29273 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -74,7 +74,14 @@
 	if (desc->rate <= wl->hw_min_ht_rate)
 		status->flag |= RX_FLAG_HT;
 
-	status->signal = desc->rssi;
+	/*
+	* Read the signal level and antenna diversity indication.
+	* The msb in the signal level is always set as it is a
+	* negative number.
+	* The antenna indication is the msb of the rssi.
+	*/
+	status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
+	status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
 
 	/*
 	 * FIXME: In wl1251, the SNR should be divided by two.  In wl1271 we
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index a3b1618..f5a7087c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -30,6 +30,9 @@
 #define WL1271_RX_MAX_RSSI -30
 #define WL1271_RX_MIN_RSSI -95
 
+#define RSSI_LEVEL_BITMASK	0x7F
+#define ANT_DIVERSITY_BITMASK	BIT(7)
+
 #define SHORT_PREAMBLE_BIT   BIT(0)
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 4dadd0c..782eb29 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -83,6 +83,12 @@
 #define MAX_CHANNELS_5GHZ	42
 
 #define SCAN_MAX_CYCLE_INTERVALS 16
+
+/* The FW intervals can take up to 16 entries.
+ * The 1st entry isn't used (scan is immediate). The last
+ * entry should be used for the long_interval
+ */
+#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2)
 #define SCAN_MAX_BANDS 3
 
 enum {
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index ea7e07a..c172da5 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -293,7 +293,8 @@
 	/* Use block mode for transferring over one block size of data */
 	func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-	if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
+	ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+	if (ret)
 		goto out_free_glue;
 
 	/* if sdio can keep power while host is suspended, enable wow */
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 7f363fa..a1b6040 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -500,6 +500,9 @@
 	/* interface combinations supported by the hw */
 	const struct ieee80211_iface_combination *iface_combinations;
 	u8 n_iface_combinations;
+
+	/* dynamic fw traces */
+	u32 dynamic_fw_traces;
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 39efc6d..27c5687 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -503,6 +503,11 @@
 	return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
 }
 
+static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif)
+{
+	return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE;
+}
+
 #define wl12xx_for_each_wlvif(wl, wlvif) \
 		list_for_each_entry(wlvif, &wl->wlvif_list, list)
 
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b3..6dc76c1 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -210,12 +210,22 @@
 	VIF_STATUS_CONNECTED,
 };
 
+struct xenvif_mcast_addr {
+	struct list_head entry;
+	struct rcu_head rcu;
+	u8 addr[6];
+};
+
+#define XEN_NETBK_MCAST_MAX 64
+
 struct xenvif {
 	/* Unique identifier for this interface. */
 	domid_t          domid;
 	unsigned int     handle;
 
 	u8               fe_dev_addr[6];
+	struct list_head fe_mcast_addr;
+	unsigned int     fe_mcast_count;
 
 	/* Frontend feature information. */
 	int gso_mask;
@@ -224,6 +234,7 @@
 	u8 can_sg:1;
 	u8 ip_csum:1;
 	u8 ipv6_csum:1;
+	u8 multicast_control:1;
 
 	/* Is this interface disabled? True when backend discovers
 	 * frontend is rogue.
@@ -325,9 +336,6 @@
 		queue->pending_prod + queue->pending_cons;
 }
 
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
 
 extern bool separate_tx_rx_irq;
@@ -344,4 +352,8 @@
 				 struct sk_buff *skb);
 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
 
+/* Multicast control */
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
+void xenvif_mcast_addr_list_free(struct xenvif *vif);
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 28577a3..e7bd63e 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -171,6 +171,13 @@
 	    !xenvif_schedulable(vif))
 		goto drop;
 
+	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
+		struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+		if (!xenvif_mcast_match(vif, eth->h_dest))
+			goto drop;
+	}
+
 	cb = XENVIF_RX_CB(skb);
 	cb->expires = jiffies + vif->drain_timeout;
 
@@ -427,6 +434,7 @@
 	vif->num_queues = 0;
 
 	spin_lock_init(&vif->lock);
+	INIT_LIST_HEAD(&vif->fe_mcast_addr);
 
 	dev->netdev_ops	= &xenvif_netdev_ops;
 	dev->hw_features = NETIF_F_SG |
@@ -661,6 +669,8 @@
 
 		xenvif_unmap_frontend_rings(queue);
 	}
+
+	xenvif_mcast_addr_list_free(vif);
 }
 
 /* Reverse the relevant parts of xenvif_init_queue().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3f44b52..42569b9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1157,6 +1157,80 @@
 	return false;
 }
 
+/* No locking is required in xenvif_mcast_add/del() as they are
+ * only ever invoked from NAPI poll. An RCU list is used because
+ * xenvif_mcast_match() is called asynchronously, during start_xmit.
+ */
+
+static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
+{
+	struct xenvif_mcast_addr *mcast;
+
+	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
+		if (net_ratelimit())
+			netdev_err(vif->dev,
+				   "Too many multicast addresses\n");
+		return -ENOSPC;
+	}
+
+	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+	if (!mcast)
+		return -ENOMEM;
+
+	ether_addr_copy(mcast->addr, addr);
+	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
+	vif->fe_mcast_count++;
+
+	return 0;
+}
+
+static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
+{
+	struct xenvif_mcast_addr *mcast;
+
+	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+		if (ether_addr_equal(addr, mcast->addr)) {
+			--vif->fe_mcast_count;
+			list_del_rcu(&mcast->entry);
+			kfree_rcu(mcast, rcu);
+			break;
+		}
+	}
+}
+
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
+{
+	struct xenvif_mcast_addr *mcast;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+		if (ether_addr_equal(addr, mcast->addr)) {
+			rcu_read_unlock();
+			return true;
+		}
+	}
+	rcu_read_unlock();
+
+	return false;
+}
+
+void xenvif_mcast_addr_list_free(struct xenvif *vif)
+{
+	/* No need for locking or RCU here. NAPI poll and TX queue
+	 * are stopped.
+	 */
+	while (!list_empty(&vif->fe_mcast_addr)) {
+		struct xenvif_mcast_addr *mcast;
+
+		mcast = list_first_entry(&vif->fe_mcast_addr,
+					 struct xenvif_mcast_addr,
+					 entry);
+		--vif->fe_mcast_count;
+		list_del(&mcast->entry);
+		kfree(mcast);
+	}
+}
+
 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 				     int budget,
 				     unsigned *copy_ops,
@@ -1215,6 +1289,31 @@
 				break;
 		}
 
+		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
+			struct xen_netif_extra_info *extra;
+
+			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
+			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
+
+			make_tx_response(queue, &txreq,
+					 (ret == 0) ?
+					 XEN_NETIF_RSP_OKAY :
+					 XEN_NETIF_RSP_ERROR);
+			push_tx_responses(queue);
+			continue;
+		}
+
+		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
+			struct xen_netif_extra_info *extra;
+
+			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
+			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
+
+			make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+			push_tx_responses(queue);
+			continue;
+		}
+
 		ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
 		if (unlikely(ret < 0))
 			break;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index ec383b0..929a6e7 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -327,6 +327,14 @@
 			goto abort_transaction;
 		}
 
+		/* We support multicast-control. */
+		err = xenbus_printf(xbt, dev->nodename,
+				    "feature-multicast-control", "%d", 1);
+		if (err) {
+			message = "writing feature-multicast-control";
+			goto abort_transaction;
+		}
+
 		err = xenbus_transaction_end(xbt, 0);
 	} while (err == -EAGAIN);
 
@@ -1016,6 +1024,11 @@
 		val = 0;
 	vif->ipv6_csum = !!val;
 
+	if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
+			 "%d", &val) < 0)
+		val = 0;
+	vif->multicast_control = !!val;
+
 	return 0;
 }
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f948c46..e27e6d2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1336,7 +1336,7 @@
 
 	netif_carrier_off(info->netdev);
 
-	for (i = 0; i < num_queues; ++i) {
+	for (i = 0; i < num_queues && info->queues; ++i) {
 		struct netfront_queue *queue = &info->queues[i];
 
 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
@@ -1348,7 +1348,8 @@
 		queue->tx_evtchn = queue->rx_evtchn = 0;
 		queue->tx_irq = queue->rx_irq = 0;
 
-		napi_synchronize(&queue->napi);
+		if (netif_running(info->netdev))
+			napi_synchronize(&queue->napi);
 
 		xennet_release_tx_bufs(queue);
 		xennet_release_rx_bufs(queue);
@@ -2101,7 +2102,8 @@
 
 	unregister_netdev(info->netdev);
 
-	xennet_destroy_queues(info);
+	if (info->queues)
+		xennet_destroy_queues(info);
 	xennet_free_netdev(info->netdev);
 
 	return 0;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 722673c..6639cd1 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -74,4 +74,5 @@
 source "drivers/nfc/st21nfca/Kconfig"
 source "drivers/nfc/st-nci/Kconfig"
 source "drivers/nfc/nxp-nci/Kconfig"
+source "drivers/nfc/s3fwrn5/Kconfig"
 endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 368b6df..2757fe1b 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -14,3 +14,4 @@
 obj-$(CONFIG_NFC_ST21NFCA)  	+= st21nfca/
 obj-$(CONFIG_NFC_ST_NCI)	+= st-nci/
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
+obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 2f77f1d..fac80c6 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -318,19 +318,15 @@
 	struct i2c_client *client = phy->i2c_dev;
 	struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq;
 
-	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2);
-	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1);
-	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0);
+	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW);
+	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW);
+	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN);
 
 	if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) {
 		nfc_err(&client->dev, "No GPIOs\n");
 		return -EINVAL;
 	}
 
-	gpiod_direction_output(gpiod_en, 0);
-	gpiod_direction_output(gpiod_fw, 0);
-	gpiod_direction_input(gpiod_irq);
-
 	client->irq = gpiod_to_irq(gpiod_irq);
 	if (client->irq < 0) {
 		nfc_err(&client->dev, "No IRQ\n");
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
new file mode 100644
index 0000000..7e3b255
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -0,0 +1,19 @@
+config NFC_S3FWRN5
+	tristate
+	---help---
+	  Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
+	  of chip. It's intended to be used by PHYs to avoid duplicating lots
+	  of common code.
+
+config NFC_S3FWRN5_I2C
+	tristate "Samsung S3FWRN5 I2C support"
+	depends on NFC_NCI && I2C
+	select NFC_S3FWRN5
+	default n
+	---help---
+	  This module adds support for an I2C interface to the S3FWRN5 chip.
+	  Select this if your platform is using the I2C bus.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called s3fwrn5_i2c.ko.
+	  Say N if unsure.
diff --git a/drivers/nfc/s3fwrn5/Makefile b/drivers/nfc/s3fwrn5/Makefile
new file mode 100644
index 0000000..3381c34
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Samsung S3FWRN5 NFC driver
+#
+
+s3fwrn5-objs = core.o firmware.o nci.o
+s3fwrn5_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5.o
+obj-$(CONFIG_NFC_S3FWRN5_I2C) += s3fwrn5_i2c.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
new file mode 100644
index 0000000..0d866ca
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -0,0 +1,219 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <net/nfc/nci_core.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+#include "nci.h"
+
+#define S3FWRN5_NFC_PROTOCOLS  (NFC_PROTO_JEWEL_MASK | \
+				NFC_PROTO_MIFARE_MASK | \
+				NFC_PROTO_FELICA_MASK | \
+				NFC_PROTO_ISO14443_MASK | \
+				NFC_PROTO_ISO14443_B_MASK | \
+				NFC_PROTO_ISO15693_MASK)
+
+static int s3fwrn5_firmware_update(struct s3fwrn5_info *info)
+{
+	bool need_update;
+	int ret;
+
+	s3fwrn5_fw_init(&info->fw_info, "sec_s3fwrn5_firmware.bin");
+
+	/* Update firmware */
+
+	s3fwrn5_set_wake(info, false);
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_FW);
+
+	ret = s3fwrn5_fw_setup(&info->fw_info);
+	if (ret < 0)
+		return ret;
+
+	need_update = s3fwrn5_fw_check_version(&info->fw_info,
+		info->ndev->manufact_specific_info);
+	if (!need_update)
+		goto out;
+
+	dev_info(&info->ndev->nfc_dev->dev, "Detected new firmware version\n");
+
+	ret = s3fwrn5_fw_download(&info->fw_info);
+	if (ret < 0)
+		goto out;
+
+	/* Update RF configuration */
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+
+	s3fwrn5_set_wake(info, true);
+	ret = s3fwrn5_nci_rf_configure(info, "sec_s3fwrn5_rfreg.bin");
+	s3fwrn5_set_wake(info, false);
+
+out:
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+	s3fwrn5_fw_cleanup(&info->fw_info);
+	return ret;
+}
+
+static int s3fwrn5_nci_open(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+	if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_COLD)
+		return  -EBUSY;
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+	s3fwrn5_set_wake(info, true);
+
+	return 0;
+}
+
+static int s3fwrn5_nci_close(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+	s3fwrn5_set_wake(info, false);
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+	return 0;
+}
+
+static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+	int ret;
+
+	mutex_lock(&info->mutex);
+
+	if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
+		mutex_unlock(&info->mutex);
+		return -EINVAL;
+	}
+
+	ret = s3fwrn5_write(info, skb);
+	if (ret < 0)
+		kfree_skb(skb);
+
+	mutex_unlock(&info->mutex);
+	return ret;
+}
+
+static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+	int ret;
+
+	ret = s3fwrn5_firmware_update(info);
+	if (ret < 0)
+		goto out;
+
+	/* NCI core reset */
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+	s3fwrn5_set_wake(info, true);
+
+	ret = nci_core_reset(info->ndev);
+	if (ret < 0)
+		goto out;
+
+	ret = nci_core_init(info->ndev);
+
+out:
+	return ret;
+}
+
+static struct nci_ops s3fwrn5_nci_ops = {
+	.open = s3fwrn5_nci_open,
+	.close = s3fwrn5_nci_close,
+	.send = s3fwrn5_nci_send,
+	.post_setup = s3fwrn5_nci_post_setup,
+};
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+	struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload)
+{
+	struct s3fwrn5_info *info;
+	int ret;
+
+	info = devm_kzalloc(pdev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->phy_id = phy_id;
+	info->pdev = pdev;
+	info->phy_ops = phy_ops;
+	info->max_payload = max_payload;
+	mutex_init(&info->mutex);
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+	s3fwrn5_nci_get_prop_ops(&s3fwrn5_nci_ops.prop_ops,
+		&s3fwrn5_nci_ops.n_prop_ops);
+
+	info->ndev = nci_allocate_device(&s3fwrn5_nci_ops,
+		S3FWRN5_NFC_PROTOCOLS, 0, 0);
+	if (!info->ndev)
+		return -ENOMEM;
+
+	nci_set_parent_dev(info->ndev, pdev);
+	nci_set_drvdata(info->ndev, info);
+
+	ret = nci_register_device(info->ndev);
+	if (ret < 0) {
+		nci_free_device(info->ndev);
+		return ret;
+	}
+
+	info->fw_info.ndev = info->ndev;
+
+	*ndev = info->ndev;
+
+	return ret;
+}
+EXPORT_SYMBOL(s3fwrn5_probe);
+
+void s3fwrn5_remove(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+	nci_unregister_device(ndev);
+	nci_free_device(ndev);
+}
+EXPORT_SYMBOL(s3fwrn5_remove);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+	enum s3fwrn5_mode mode)
+{
+	switch (mode) {
+	case S3FWRN5_MODE_NCI:
+		return nci_recv_frame(ndev, skb);
+	case S3FWRN5_MODE_FW:
+		return s3fwrn5_fw_recv_frame(ndev, skb);
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(s3fwrn5_recv_frame);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S3FWRN5 NFC driver");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
new file mode 100644
index 0000000..64a9025
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -0,0 +1,511 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+
+struct s3fwrn5_fw_version {
+	__u8 major;
+	__u8 build1;
+	__u8 build2;
+	__u8 target;
+};
+
+static int s3fwrn5_fw_send_msg(struct s3fwrn5_fw_info *fw_info,
+	struct sk_buff *msg, struct sk_buff **rsp)
+{
+	struct s3fwrn5_info *info =
+		container_of(fw_info, struct s3fwrn5_info, fw_info);
+	long ret;
+
+	reinit_completion(&fw_info->completion);
+
+	ret = s3fwrn5_write(info, msg);
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_interruptible_timeout(
+		&fw_info->completion, msecs_to_jiffies(1000));
+	if (ret < 0)
+		return ret;
+	else if (ret == 0)
+		return -ENXIO;
+
+	if (!fw_info->rsp)
+		return -EINVAL;
+
+	*rsp = fw_info->rsp;
+	fw_info->rsp = NULL;
+
+	return 0;
+}
+
+static int s3fwrn5_fw_prep_msg(struct s3fwrn5_fw_info *fw_info,
+	struct sk_buff **msg, u8 type, u8 code, const void *data, u16 len)
+{
+	struct s3fwrn5_fw_header hdr;
+	struct sk_buff *skb;
+
+	hdr.type = type | fw_info->parity;
+	fw_info->parity ^= 0x80;
+	hdr.code = code;
+	hdr.len = len;
+
+	skb = alloc_skb(S3FWRN5_FW_HDR_SIZE + len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, S3FWRN5_FW_HDR_SIZE), &hdr, S3FWRN5_FW_HDR_SIZE);
+	if (len)
+		memcpy(skb_put(skb, len), data, len);
+
+	*msg = skb;
+
+	return 0;
+}
+
+static int s3fwrn5_fw_get_bootinfo(struct s3fwrn5_fw_info *fw_info,
+	struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret;
+
+	/* Send GET_BOOTINFO command */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_GET_BOOTINFO, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(bootinfo, rsp->data + S3FWRN5_FW_HDR_SIZE, 10);
+
+out:
+	kfree_skb(rsp);
+	return ret;
+}
+
+static int s3fwrn5_fw_enter_update_mode(struct s3fwrn5_fw_info *fw_info,
+	const void *hash_data, u16 hash_size,
+	const void *sig_data, u16 sig_size)
+{
+	struct s3fwrn5_fw_cmd_enter_updatemode args;
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret;
+
+	/* Send ENTER_UPDATE_MODE command */
+
+	args.hashcode_size = hash_size;
+	args.signature_size = sig_size;
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_ENTER_UPDATE_MODE, &args, sizeof(args));
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EPROTO;
+		goto out;
+	}
+
+	kfree_skb(rsp);
+
+	/* Send hashcode data */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+		hash_data, hash_size);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EPROTO;
+		goto out;
+	}
+
+	kfree_skb(rsp);
+
+	/* Send signature data */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+		sig_data, sig_size);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+		ret = -EPROTO;
+
+out:
+	kfree_skb(rsp);
+	return ret;
+}
+
+static int s3fwrn5_fw_update_sector(struct s3fwrn5_fw_info *fw_info,
+	u32 base_addr, const void *data)
+{
+	struct s3fwrn5_fw_cmd_update_sector args;
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret, i;
+
+	/* Send UPDATE_SECTOR command */
+
+	args.base_address = base_addr;
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_UPDATE_SECTOR, &args, sizeof(args));
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EPROTO;
+		goto err;
+	}
+
+	kfree_skb(rsp);
+
+	/* Send data split into 256-byte packets */
+
+	for (i = 0; i < 16; ++i) {
+		ret = s3fwrn5_fw_prep_msg(fw_info, &msg,
+			S3FWRN5_FW_MSG_DATA, 0, data+256*i, 256);
+		if (ret < 0)
+			break;
+
+		ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+		kfree_skb(msg);
+		if (ret < 0)
+			break;
+
+		hdr = (struct s3fwrn5_fw_header *) rsp->data;
+		if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+			ret = -EPROTO;
+			goto err;
+		}
+
+		kfree_skb(rsp);
+	}
+
+	return ret;
+
+err:
+	kfree_skb(rsp);
+	return ret;
+}
+
+static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info)
+{
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret;
+
+	/* Send COMPLETE_UPDATE_MODE command */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+		ret = -EPROTO;
+
+	kfree_skb(rsp);
+
+	return ret;
+}
+
+/*
+ * Firmware header stucture:
+ *
+ * 0x00 - 0x0B : Date and time string (w/o NUL termination)
+ * 0x10 - 0x13 : Firmware version
+ * 0x14 - 0x17 : Signature address
+ * 0x18 - 0x1B : Signature size
+ * 0x1C - 0x1F : Firmware image address
+ * 0x20 - 0x23 : Firmware sectors count
+ * 0x24 - 0x27 : Custom signature address
+ * 0x28 - 0x2B : Custom signature size
+ */
+
+#define S3FWRN5_FW_IMAGE_HEADER_SIZE 44
+
+static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+	struct s3fwrn5_fw_image *fw = &fw_info->fw;
+	u32 sig_off;
+	u32 image_off;
+	u32 custom_sig_off;
+	int ret;
+
+	ret = request_firmware(&fw->fw, fw_info->fw_name,
+		&fw_info->ndev->nfc_dev->dev);
+	if (ret < 0)
+		return ret;
+
+	if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
+		return -EINVAL;
+
+	memcpy(fw->date, fw->fw->data + 0x00, 12);
+	fw->date[12] = '\0';
+
+	memcpy(&fw->version, fw->fw->data + 0x10, 4);
+
+	memcpy(&sig_off, fw->fw->data + 0x14, 4);
+	fw->sig = fw->fw->data + sig_off;
+	memcpy(&fw->sig_size, fw->fw->data + 0x18, 4);
+
+	memcpy(&image_off, fw->fw->data + 0x1C, 4);
+	fw->image = fw->fw->data + image_off;
+	memcpy(&fw->image_sectors, fw->fw->data + 0x20, 4);
+
+	memcpy(&custom_sig_off, fw->fw->data + 0x24, 4);
+	fw->custom_sig = fw->fw->data + custom_sig_off;
+	memcpy(&fw->custom_sig_size, fw->fw->data + 0x28, 4);
+
+	return 0;
+}
+
+static void s3fwrn5_fw_release_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+	release_firmware(fw_info->fw.fw);
+}
+
+static int s3fwrn5_fw_get_base_addr(
+	struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo, u32 *base_addr)
+{
+	int i;
+	struct {
+		u8 version[4];
+		u32 base_addr;
+	} match[] = {
+		{{0x05, 0x00, 0x00, 0x00}, 0x00005000},
+		{{0x05, 0x00, 0x00, 0x01}, 0x00003000},
+		{{0x05, 0x00, 0x00, 0x02}, 0x00003000},
+		{{0x05, 0x00, 0x00, 0x03}, 0x00003000},
+		{{0x05, 0x00, 0x00, 0x05}, 0x00003000}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(match); ++i)
+		if (bootinfo->hw_version[0] == match[i].version[0] &&
+			bootinfo->hw_version[1] == match[i].version[1] &&
+			bootinfo->hw_version[3] == match[i].version[3]) {
+			*base_addr = match[i].base_addr;
+			return 0;
+		}
+
+	return -EINVAL;
+}
+
+static inline bool
+s3fwrn5_fw_is_custom(struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+	return !!bootinfo->hw_version[2];
+}
+
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
+{
+	struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
+	int ret;
+
+	/* Get firmware data */
+
+	ret = s3fwrn5_fw_request_firmware(fw_info);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Failed to get fw file, ret=%02x\n", ret);
+		return ret;
+	}
+
+	/* Get bootloader info */
+
+	ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Failed to get bootinfo, ret=%02x\n", ret);
+		goto err;
+	}
+
+	/* Match hardware version to obtain firmware base address */
+
+	ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Unknown hardware version\n");
+		goto err;
+	}
+
+	fw_info->sector_size = bootinfo.sector_size;
+
+	fw_info->sig_size = s3fwrn5_fw_is_custom(&bootinfo) ?
+		fw_info->fw.custom_sig_size : fw_info->fw.sig_size;
+	fw_info->sig = s3fwrn5_fw_is_custom(&bootinfo) ?
+		fw_info->fw.custom_sig : fw_info->fw.sig;
+
+	return 0;
+
+err:
+	s3fwrn5_fw_release_firmware(fw_info);
+	return ret;
+}
+
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version)
+{
+	struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version;
+	struct s3fwrn5_fw_version *old = (void *) &version;
+
+	if (new->major > old->major)
+		return true;
+	if (new->build1 > old->build1)
+		return true;
+	if (new->build2 > old->build2)
+		return true;
+
+	return false;
+}
+
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
+{
+	struct s3fwrn5_fw_image *fw = &fw_info->fw;
+	u8 hash_data[SHA1_DIGEST_SIZE];
+	struct scatterlist sg;
+	struct hash_desc desc;
+	u32 image_size, off;
+	int ret;
+
+	image_size = fw_info->sector_size * fw->image_sectors;
+
+	/* Compute SHA of firmware data */
+
+	sg_init_one(&sg, fw->image, image_size);
+	desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+	crypto_hash_init(&desc);
+	crypto_hash_update(&desc, &sg, image_size);
+	crypto_hash_final(&desc, hash_data);
+	crypto_free_hash(desc.tfm);
+
+	/* Firmware update process */
+
+	dev_info(&fw_info->ndev->nfc_dev->dev,
+		"Firmware update: %s\n", fw_info->fw_name);
+
+	ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
+		SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Unable to enter update mode\n");
+		goto out;
+	}
+
+	for (off = 0; off < image_size; off += fw_info->sector_size) {
+		ret = s3fwrn5_fw_update_sector(fw_info,
+			fw_info->base_addr + off, fw->image + off);
+		if (ret < 0) {
+			dev_err(&fw_info->ndev->nfc_dev->dev,
+				"Firmware update error (code=%d)\n", ret);
+			goto out;
+		}
+	}
+
+	ret = s3fwrn5_fw_complete_update_mode(fw_info);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Unable to complete update mode\n");
+		goto out;
+	}
+
+	dev_info(&fw_info->ndev->nfc_dev->dev,
+		"Firmware update: success\n");
+
+out:
+	return ret;
+}
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name)
+{
+	fw_info->parity = 0x00;
+	fw_info->rsp = NULL;
+	fw_info->fw.fw = NULL;
+	strcpy(fw_info->fw_name, fw_name);
+	init_completion(&fw_info->completion);
+}
+
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info)
+{
+	s3fwrn5_fw_release_firmware(fw_info);
+}
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+	struct s3fwrn5_fw_info *fw_info = &info->fw_info;
+
+	BUG_ON(fw_info->rsp);
+
+	fw_info->rsp = skb;
+
+	complete(&fw_info->completion);
+
+	return 0;
+}
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
new file mode 100644
index 0000000..1ec0647
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/firmware.h
@@ -0,0 +1,111 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_FIRMWARE_H_
+#define __LOCAL_S3FWRN5_FIRMWARE_H_
+
+/* FW Message Types */
+#define S3FWRN5_FW_MSG_CMD			0x00
+#define S3FWRN5_FW_MSG_RSP			0x01
+#define S3FWRN5_FW_MSG_DATA			0x02
+
+/* FW Return Codes */
+#define S3FWRN5_FW_RET_SUCCESS			0x00
+#define S3FWRN5_FW_RET_MESSAGE_TYPE_INVALID	0x01
+#define S3FWRN5_FW_RET_COMMAND_INVALID		0x02
+#define S3FWRN5_FW_RET_PAGE_DATA_OVERFLOW	0x03
+#define S3FWRN5_FW_RET_SECT_DATA_OVERFLOW	0x04
+#define S3FWRN5_FW_RET_AUTHENTICATION_FAIL	0x05
+#define S3FWRN5_FW_RET_FLASH_OPERATION_FAIL	0x06
+#define S3FWRN5_FW_RET_ADDRESS_OUT_OF_RANGE	0x07
+#define S3FWRN5_FW_RET_PARAMETER_INVALID	0x08
+
+/* ---- FW Packet structures ---- */
+#define S3FWRN5_FW_HDR_SIZE 4
+
+struct s3fwrn5_fw_header {
+	__u8 type;
+	__u8 code;
+	__u16 len;
+};
+
+#define S3FWRN5_FW_CMD_RESET			0x00
+
+#define S3FWRN5_FW_CMD_GET_BOOTINFO		0x01
+
+struct s3fwrn5_fw_cmd_get_bootinfo_rsp {
+	__u8 hw_version[4];
+	__u16 sector_size;
+	__u16 page_size;
+	__u16 frame_max_size;
+	__u16 hw_buffer_size;
+};
+
+#define S3FWRN5_FW_CMD_ENTER_UPDATE_MODE	0x02
+
+struct s3fwrn5_fw_cmd_enter_updatemode {
+	__u16 hashcode_size;
+	__u16 signature_size;
+};
+
+#define S3FWRN5_FW_CMD_UPDATE_SECTOR		0x04
+
+struct s3fwrn5_fw_cmd_update_sector {
+	__u32 base_address;
+};
+
+#define S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE	0x05
+
+struct s3fwrn5_fw_image {
+	const struct firmware *fw;
+
+	char date[13];
+	u32 version;
+	const void *sig;
+	u32 sig_size;
+	const void *image;
+	u32 image_sectors;
+	const void *custom_sig;
+	u32 custom_sig_size;
+};
+
+struct s3fwrn5_fw_info {
+	struct nci_dev *ndev;
+	struct s3fwrn5_fw_image fw;
+	char fw_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+
+	const void *sig;
+	u32 sig_size;
+	u32 sector_size;
+	u32 base_addr;
+
+	struct completion completion;
+	struct sk_buff *rsp;
+	char parity;
+};
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name);
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info);
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version);
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info);
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info);
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+
+#endif /* __LOCAL_S3FWRN5_FIRMWARE_H_ */
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
new file mode 100644
index 0000000..b4dd7dd
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -0,0 +1,306 @@
+/*
+ * I2C Link Layer for Samsung S3FWRN5 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+
+#include <net/nfc/nfc.h>
+
+#include "s3fwrn5.h"
+
+#define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
+
+#define S3FWRN5_I2C_MAX_PAYLOAD 32
+#define S3FWRN5_EN_WAIT_TIME 150
+
+struct s3fwrn5_i2c_phy {
+	struct i2c_client *i2c_dev;
+	struct nci_dev *ndev;
+
+	unsigned int gpio_en;
+	unsigned int gpio_fw_wake;
+
+	struct mutex mutex;
+
+	enum s3fwrn5_mode mode;
+	unsigned int irq_skip:1;
+};
+
+static void s3fwrn5_i2c_set_wake(void *phy_id, bool wake)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+
+	mutex_lock(&phy->mutex);
+	gpio_set_value(phy->gpio_fw_wake, wake);
+	msleep(S3FWRN5_EN_WAIT_TIME/2);
+	mutex_unlock(&phy->mutex);
+}
+
+static void s3fwrn5_i2c_set_mode(void *phy_id, enum s3fwrn5_mode mode)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+
+	mutex_lock(&phy->mutex);
+
+	if (phy->mode == mode)
+		goto out;
+
+	phy->mode = mode;
+
+	gpio_set_value(phy->gpio_en, 1);
+	gpio_set_value(phy->gpio_fw_wake, 0);
+	if (mode == S3FWRN5_MODE_FW)
+		gpio_set_value(phy->gpio_fw_wake, 1);
+
+	if (mode != S3FWRN5_MODE_COLD) {
+		msleep(S3FWRN5_EN_WAIT_TIME);
+		gpio_set_value(phy->gpio_en, 0);
+		msleep(S3FWRN5_EN_WAIT_TIME/2);
+	}
+
+	phy->irq_skip = true;
+
+out:
+	mutex_unlock(&phy->mutex);
+}
+
+static enum s3fwrn5_mode s3fwrn5_i2c_get_mode(void *phy_id)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+	enum s3fwrn5_mode mode;
+
+	mutex_lock(&phy->mutex);
+
+	mode = phy->mode;
+
+	mutex_unlock(&phy->mutex);
+
+	return mode;
+}
+
+static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+	int ret;
+
+	mutex_lock(&phy->mutex);
+
+	phy->irq_skip = false;
+
+	ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+	if (ret == -EREMOTEIO) {
+		/* Retry, chip was in standby */
+		usleep_range(110000, 120000);
+		ret  = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+	}
+
+	mutex_unlock(&phy->mutex);
+
+	if (ret < 0)
+		return ret;
+
+	if (ret != skb->len)
+		return -EREMOTEIO;
+
+	return 0;
+}
+
+static struct s3fwrn5_phy_ops i2c_phy_ops = {
+	.set_wake = s3fwrn5_i2c_set_wake,
+	.set_mode = s3fwrn5_i2c_set_mode,
+	.get_mode = s3fwrn5_i2c_get_mode,
+	.write = s3fwrn5_i2c_write,
+};
+
+static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
+{
+	struct sk_buff *skb;
+	size_t hdr_size;
+	size_t data_len;
+	char hdr[4];
+	int ret;
+
+	hdr_size = (phy->mode == S3FWRN5_MODE_NCI) ?
+		NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE;
+	ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size);
+	if (ret < 0)
+		return ret;
+
+	if (ret < hdr_size)
+		return -EBADMSG;
+
+	data_len = (phy->mode == S3FWRN5_MODE_NCI) ?
+		((struct nci_ctrl_hdr *)hdr)->plen :
+		((struct s3fwrn5_fw_header *)hdr)->len;
+
+	skb = alloc_skb(hdr_size + data_len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, hdr_size), hdr, hdr_size);
+
+	if (data_len == 0)
+		goto out;
+
+	ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len);
+	if (ret != data_len) {
+		kfree_skb(skb);
+		return -EBADMSG;
+	}
+
+out:
+	return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
+}
+
+static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+	int ret = 0;
+
+	if (!phy || !phy->ndev) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	mutex_lock(&phy->mutex);
+
+	if (phy->irq_skip)
+		goto out;
+
+	switch (phy->mode) {
+	case S3FWRN5_MODE_NCI:
+	case S3FWRN5_MODE_FW:
+		ret = s3fwrn5_i2c_read(phy);
+		break;
+	case S3FWRN5_MODE_COLD:
+		ret = -EREMOTEIO;
+		break;
+	}
+
+out:
+	mutex_unlock(&phy->mutex);
+
+	return IRQ_HANDLED;
+}
+
+static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
+{
+	struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+	struct device_node *np = client->dev.of_node;
+
+	if (!np)
+		return -ENODEV;
+
+	phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
+	if (!gpio_is_valid(phy->gpio_en))
+		return -ENODEV;
+
+	phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
+	if (!gpio_is_valid(phy->gpio_fw_wake))
+		return -ENODEV;
+
+	return 0;
+}
+
+static int s3fwrn5_i2c_probe(struct i2c_client *client,
+				  const struct i2c_device_id *id)
+{
+	struct s3fwrn5_i2c_phy *phy;
+	int ret;
+
+	phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	mutex_init(&phy->mutex);
+	phy->mode = S3FWRN5_MODE_COLD;
+	phy->irq_skip = true;
+
+	phy->i2c_dev = client;
+	i2c_set_clientdata(client, phy);
+
+	ret = s3fwrn5_i2c_parse_dt(client);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
+		GPIOF_OUT_INIT_HIGH, "s3fwrn5_en");
+	if (ret < 0)
+		return ret;
+
+	ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw_wake,
+		GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake");
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
+		S3FWRN5_I2C_MAX_PAYLOAD);
+	if (ret < 0)
+		return ret;
+
+	ret = request_threaded_irq(phy->i2c_dev->irq, NULL,
+		s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+		S3FWRN5_I2C_DRIVER_NAME, phy);
+	if (ret)
+		s3fwrn5_remove(phy->ndev);
+
+	return ret;
+}
+
+static int s3fwrn5_i2c_remove(struct i2c_client *client)
+{
+	struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+
+	s3fwrn5_remove(phy->ndev);
+
+	return 0;
+}
+
+static struct i2c_device_id s3fwrn5_i2c_id_table[] = {
+	{S3FWRN5_I2C_DRIVER_NAME, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
+
+static const struct of_device_id of_s3fwrn5_i2c_match[] = {
+	{ .compatible = "samsung,s3fwrn5-i2c", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
+
+static struct i2c_driver s3fwrn5_i2c_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = S3FWRN5_I2C_DRIVER_NAME,
+		.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
+	},
+	.probe = s3fwrn5_i2c_probe,
+	.remove = s3fwrn5_i2c_remove,
+	.id_table = s3fwrn5_i2c_id_table,
+};
+
+module_i2c_driver(s3fwrn5_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for Samsung S3FWRN5");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
new file mode 100644
index 0000000..ace0071
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -0,0 +1,165 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+
+#include "s3fwrn5.h"
+#include "nci.h"
+
+static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	__u8 status = skb->data[0];
+
+	nci_req_complete(ndev, status);
+	return 0;
+}
+
+static struct nci_prop_ops s3fwrn5_nci_prop_ops[] = {
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_AGAIN),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_GET_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_SET_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_GET_RFREG_VER),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_SET_RFREG_VER),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_START_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_STOP_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_FW_CFG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_WR_RESET),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+};
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n)
+{
+	*ops = s3fwrn5_nci_prop_ops;
+	*n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
+}
+
+#define S3FWRN5_RFREG_SECTION_SIZE 252
+
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
+{
+	const struct firmware *fw;
+	struct nci_prop_fw_cfg_cmd fw_cfg;
+	struct nci_prop_set_rfreg_cmd set_rfreg;
+	struct nci_prop_stop_rfreg_cmd stop_rfreg;
+	u32 checksum;
+	int i, len;
+	int ret;
+
+	ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+	if (ret < 0)
+		return ret;
+
+	/* Compute rfreg checksum */
+
+	checksum = 0;
+	for (i = 0; i < fw->size; i += 4)
+		checksum += *((u32 *)(fw->data+i));
+
+	/* Set default clock configuration for external crystal */
+
+	fw_cfg.clk_type = 0x01;
+	fw_cfg.clk_speed = 0xff;
+	fw_cfg.clk_req = 0xff;
+	ret = nci_prop_cmd(info->ndev, NCI_PROP_FW_CFG,
+		sizeof(fw_cfg), (__u8 *)&fw_cfg);
+	if (ret < 0)
+		goto out;
+
+	/* Start rfreg configuration */
+
+	dev_info(&info->ndev->nfc_dev->dev,
+		"rfreg configuration update: %s\n", fw_name);
+
+	ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
+	if (ret < 0) {
+		dev_err(&info->ndev->nfc_dev->dev,
+			"Unable to start rfreg update\n");
+		goto out;
+	}
+
+	/* Update rfreg */
+
+	set_rfreg.index = 0;
+	for (i = 0; i < fw->size; i += S3FWRN5_RFREG_SECTION_SIZE) {
+		len = (fw->size - i < S3FWRN5_RFREG_SECTION_SIZE) ?
+			(fw->size - i) : S3FWRN5_RFREG_SECTION_SIZE;
+		memcpy(set_rfreg.data, fw->data+i, len);
+		ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
+			len+1, (__u8 *)&set_rfreg);
+		if (ret < 0) {
+			dev_err(&info->ndev->nfc_dev->dev,
+				"rfreg update error (code=%d)\n", ret);
+			goto out;
+		}
+		set_rfreg.index++;
+	}
+
+	/* Finish rfreg configuration */
+
+	stop_rfreg.checksum = checksum & 0xffff;
+	ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
+		sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
+	if (ret < 0) {
+		dev_err(&info->ndev->nfc_dev->dev,
+			"Unable to stop rfreg update\n");
+		goto out;
+	}
+
+	dev_info(&info->ndev->nfc_dev->dev,
+		"rfreg configuration update: success\n");
+out:
+	release_firmware(fw);
+	return ret;
+}
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
new file mode 100644
index 0000000..0e68d43
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -0,0 +1,89 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_NCI_H_
+#define __LOCAL_S3FWRN5_NCI_H_
+
+#include "s3fwrn5.h"
+
+#define NCI_PROP_AGAIN		0x01
+
+#define NCI_PROP_GET_RFREG	0x21
+#define NCI_PROP_SET_RFREG	0x22
+
+struct nci_prop_set_rfreg_cmd {
+	__u8 index;
+	__u8 data[252];
+};
+
+struct nci_prop_set_rfreg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_GET_RFREG_VER	0x24
+
+struct nci_prop_get_rfreg_ver_rsp {
+	__u8 status;
+	__u8 data[8];
+};
+
+#define NCI_PROP_SET_RFREG_VER	0x25
+
+struct nci_prop_set_rfreg_ver_cmd {
+	__u8 data[8];
+};
+
+struct nci_prop_set_rfreg_ver_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_START_RFREG	0x26
+
+struct nci_prop_start_rfreg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_STOP_RFREG	0x27
+
+struct nci_prop_stop_rfreg_cmd {
+	__u16 checksum;
+};
+
+struct nci_prop_stop_rfreg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_FW_CFG		0x28
+
+struct nci_prop_fw_cfg_cmd {
+	__u8 clk_type;
+	__u8 clk_speed;
+	__u8 clk_req;
+};
+
+struct nci_prop_fw_cfg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_WR_RESET	0x2f
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n);
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
+
+#endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
new file mode 100644
index 0000000..89210d4
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/s3fwrn5.h
@@ -0,0 +1,99 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_H_
+#define __LOCAL_S3FWRN5_H_
+
+#include <linux/nfc.h>
+
+#include <net/nfc/nci_core.h>
+
+#include "firmware.h"
+
+enum s3fwrn5_mode {
+	S3FWRN5_MODE_COLD,
+	S3FWRN5_MODE_NCI,
+	S3FWRN5_MODE_FW,
+};
+
+struct s3fwrn5_phy_ops {
+	void (*set_wake)(void *id, bool sleep);
+	void (*set_mode)(void *id, enum s3fwrn5_mode);
+	enum s3fwrn5_mode (*get_mode)(void *id);
+	int (*write)(void *id, struct sk_buff *skb);
+};
+
+struct s3fwrn5_info {
+	struct nci_dev *ndev;
+	void *phy_id;
+	struct device *pdev;
+
+	struct s3fwrn5_phy_ops *phy_ops;
+	unsigned int max_payload;
+
+	struct s3fwrn5_fw_info fw_info;
+
+	struct mutex mutex;
+};
+
+static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
+	enum s3fwrn5_mode mode)
+{
+	if (!info->phy_ops->set_mode)
+		return -ENOTSUPP;
+
+	info->phy_ops->set_mode(info->phy_id, mode);
+
+	return 0;
+}
+
+static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
+{
+	if (!info->phy_ops->get_mode)
+		return -ENOTSUPP;
+
+	return info->phy_ops->get_mode(info->phy_id);
+}
+
+static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
+{
+	if (!info->phy_ops->set_wake)
+		return -ENOTSUPP;
+
+	info->phy_ops->set_wake(info->phy_id, wake);
+
+	return 0;
+}
+
+static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
+{
+	if (!info->phy_ops->write)
+		return -ENOTSUPP;
+
+	return info->phy_ops->write(info->phy_id, skb);
+}
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+	struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload);
+void s3fwrn5_remove(struct nci_dev *ndev);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+	enum s3fwrn5_mode mode);
+
+#endif /* __LOCAL_S3FWRN5_H_ */
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
index fc3904c..e7c6db9 100644
--- a/drivers/nfc/st-nci/Kconfig
+++ b/drivers/nfc/st-nci/Kconfig
@@ -21,3 +21,14 @@
 
 	  If you choose to build a module, it'll be called st-nci_i2c.
 	  Say N if unsure.
+
+config NFC_ST_NCI_SPI
+	tristate "NFC ST NCI spi support"
+	depends on NFC_ST_NCI && SPI
+	---help---
+	  This module adds support for an SPI interface to the
+	  STMicroelectronics NFC NCI chips familly.
+	  Select this if your platform is using the spi bus.
+
+	  If you choose to build a module, it'll be called st-nci_spi.
+	  Say N if unsure.
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
index 0df157d..348ce76 100644
--- a/drivers/nfc/st-nci/Makefile
+++ b/drivers/nfc/st-nci/Makefile
@@ -7,3 +7,6 @@
 
 st-nci_i2c-objs = i2c.o
 obj-$(CONFIG_NFC_ST_NCI_I2C) += st-nci_i2c.o
+
+st-nci_spi-objs = spi.o
+obj-$(CONFIG_NFC_ST_NCI_SPI) += st-nci_spi.o
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 06175ce..707ed2e 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -25,15 +25,15 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/nfc.h>
-#include <linux/platform_data/st_nci.h>
+#include <linux/platform_data/st-nci.h>
 
 #include "ndlc.h"
 
-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
 
 /* ndlc header */
-#define ST21NFCB_FRAME_HEADROOM	1
-#define ST21NFCB_FRAME_TAILROOM 0
+#define ST_NCI_FRAME_HEADROOM	1
+#define ST_NCI_FRAME_TAILROOM 0
 
 #define ST_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
 #define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
@@ -118,15 +118,10 @@
 /*
  * Reads an ndlc frame and returns it in a newly allocated sk_buff.
  * returns:
- * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at
- * end of read)
- * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF
- * at end of read)
+ * 0 : if received frame is complete
  * -EREMOTEIO : i2c read error (fatal)
  * -EBADMSG : frame was incorrect and discarded
- * (value returned from st_nci_i2c_repack)
- * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
- * the read length end sequence
+ * -ENOMEM : cannot allocate skb, frame dropped
  */
 static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
 				 struct sk_buff **skb)
@@ -179,7 +174,7 @@
 /*
  * Reads an ndlc frame from the chip.
  *
- * On ST21NFCB, IRQ goes in idle state when read starts.
+ * On ST_NCI, IRQ goes in idle state when read starts.
  */
 static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
 {
@@ -325,12 +320,12 @@
 		}
 	} else {
 		nfc_err(&client->dev,
-			"st21nfcb platform resources not available\n");
+			"st_nci platform resources not available\n");
 		return -ENODEV;
 	}
 
 	r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
-			ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
 			&phy->ndlc);
 	if (r < 0) {
 		nfc_err(&client->dev, "Unable to register ndlc layer\n");
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 56c6a4c..d2cf84e 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -171,6 +171,8 @@
 		if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) {
 			switch (pcb & PCB_SYNC_MASK) {
 			case PCB_SYNC_ACK:
+				skb = skb_dequeue(&ndlc->ack_pending_q);
+				kfree_skb(skb);
 				del_timer_sync(&ndlc->t1_timer);
 				del_timer_sync(&ndlc->t2_timer);
 				ndlc->t2_active = false;
@@ -192,12 +194,13 @@
 					  msecs_to_jiffies(NDLC_TIMER_T1_WAIT));
 				break;
 			default:
-				pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
 				kfree_skb(skb);
 				break;
 			}
-		} else {
+		} else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) {
 			nci_recv_frame(ndlc->ndev, skb);
+		} else {
+			kfree_skb(skb);
 		}
 	}
 }
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
new file mode 100644
index 0000000..598a58c
--- /dev/null
+++ b/drivers/nfc/st-nci/spi.c
@@ -0,0 +1,392 @@
+/*
+ * SPI Link Layer for ST NCI based Driver
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/st-nci.h>
+
+#include "ndlc.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+
+/* ndlc header */
+#define ST_NCI_FRAME_HEADROOM	1
+#define ST_NCI_FRAME_TAILROOM 0
+
+#define ST_NCI_SPI_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
+#define ST_NCI_SPI_MAX_SIZE 250 /* req 4.2.1 */
+
+#define ST_NCI_SPI_DRIVER_NAME "st_nci_spi"
+
+static struct spi_device_id st_nci_spi_id_table[] = {
+	{ST_NCI_SPI_DRIVER_NAME, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
+
+struct st_nci_spi_phy {
+	struct spi_device *spi_dev;
+	struct llt_ndlc *ndlc;
+
+	unsigned int gpio_reset;
+	unsigned int irq_polarity;
+};
+
+#define SPI_DUMP_SKB(info, skb)					\
+do {								\
+	pr_debug("%s:\n", info);				\
+	print_hex_dump(KERN_DEBUG, "spi: ", DUMP_PREFIX_OFFSET,	\
+		       16, 1, (skb)->data, (skb)->len, 0);	\
+} while (0)
+
+static int st_nci_spi_enable(void *phy_id)
+{
+	struct st_nci_spi_phy *phy = phy_id;
+
+	gpio_set_value(phy->gpio_reset, 0);
+	usleep_range(10000, 15000);
+	gpio_set_value(phy->gpio_reset, 1);
+	usleep_range(80000, 85000);
+
+	if (phy->ndlc->powered == 0)
+		enable_irq(phy->spi_dev->irq);
+
+	return 0;
+}
+
+static void st_nci_spi_disable(void *phy_id)
+{
+	struct st_nci_spi_phy *phy = phy_id;
+
+	disable_irq_nosync(phy->spi_dev->irq);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st_nci_spi_write(void *phy_id, struct sk_buff *skb)
+{
+	int r;
+	struct st_nci_spi_phy *phy = phy_id;
+	struct spi_device *dev = phy->spi_dev;
+	struct sk_buff *skb_rx;
+	u8 buf[ST_NCI_SPI_MAX_SIZE];
+	struct spi_transfer spi_xfer = {
+		.tx_buf = skb->data,
+		.rx_buf = buf,
+		.len = skb->len,
+	};
+
+	SPI_DUMP_SKB("st_nci_spi_write", skb);
+
+	if (phy->ndlc->hard_fault != 0)
+		return phy->ndlc->hard_fault;
+
+	r = spi_sync_transfer(dev, &spi_xfer, 1);
+	/*
+	 * We may have received some valuable data on miso line.
+	 * Send them back in the ndlc state machine.
+	 */
+	if (!r) {
+		skb_rx = alloc_skb(skb->len, GFP_KERNEL);
+		if (!skb_rx) {
+			r = -ENOMEM;
+			goto exit;
+		}
+
+		skb_put(skb_rx, skb->len);
+		memcpy(skb_rx->data, buf, skb->len);
+		ndlc_recv(phy->ndlc, skb_rx);
+	}
+
+exit:
+	return r;
+}
+
+/*
+ * Reads an ndlc frame and returns it in a newly allocated sk_buff.
+ * returns:
+ * 0 : if received frame is complete
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int st_nci_spi_read(struct st_nci_spi_phy *phy,
+			struct sk_buff **skb)
+{
+	int r;
+	u8 len;
+	u8 buf[ST_NCI_SPI_MAX_SIZE];
+	struct spi_device *dev = phy->spi_dev;
+	struct spi_transfer spi_xfer = {
+		.rx_buf = buf,
+		.len = ST_NCI_SPI_MIN_SIZE,
+	};
+
+	r = spi_sync_transfer(dev, &spi_xfer, 1);
+	if (r < 0)
+		return -EREMOTEIO;
+
+	len = be16_to_cpu(*(__be16 *) (buf + 2));
+	if (len > ST_NCI_SPI_MAX_SIZE) {
+		nfc_err(&dev->dev, "invalid frame len\n");
+		phy->ndlc->hard_fault = 1;
+		return -EBADMSG;
+	}
+
+	*skb = alloc_skb(ST_NCI_SPI_MIN_SIZE + len, GFP_KERNEL);
+	if (*skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(*skb, ST_NCI_SPI_MIN_SIZE);
+	skb_put(*skb, ST_NCI_SPI_MIN_SIZE);
+	memcpy((*skb)->data, buf, ST_NCI_SPI_MIN_SIZE);
+
+	if (!len)
+		return 0;
+
+	spi_xfer.len = len;
+	r = spi_sync_transfer(dev, &spi_xfer, 1);
+	if (r < 0) {
+		kfree_skb(*skb);
+		return -EREMOTEIO;
+	}
+
+	skb_put(*skb, len);
+	memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len);
+
+	SPI_DUMP_SKB("spi frame read", *skb);
+
+	return 0;
+}
+
+/*
+ * Reads an ndlc frame from the chip.
+ *
+ * On ST21NFCB, IRQ goes in idle state when read starts.
+ */
+static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
+{
+	struct st_nci_spi_phy *phy = phy_id;
+	struct spi_device *dev;
+	struct sk_buff *skb = NULL;
+	int r;
+
+	if (!phy || !phy->ndlc || irq != phy->spi_dev->irq) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	dev = phy->spi_dev;
+	dev_dbg(&dev->dev, "IRQ\n");
+
+	if (phy->ndlc->hard_fault)
+		return IRQ_HANDLED;
+
+	if (!phy->ndlc->powered) {
+		st_nci_spi_disable(phy);
+		return IRQ_HANDLED;
+	}
+
+	r = st_nci_spi_read(phy, &skb);
+	if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
+		return IRQ_HANDLED;
+
+	ndlc_recv(phy->ndlc, skb);
+
+	return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops spi_phy_ops = {
+	.write = st_nci_spi_write,
+	.enable = st_nci_spi_enable,
+	.disable = st_nci_spi_disable,
+};
+
+#ifdef CONFIG_OF
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+	struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+	struct device_node *pp;
+	int gpio;
+	int r;
+
+	pp = dev->dev.of_node;
+	if (!pp)
+		return -ENODEV;
+
+	/* Get GPIO from device tree */
+	gpio = of_get_named_gpio(pp, "reset-gpios", 0);
+	if (gpio < 0) {
+		nfc_err(&dev->dev,
+			"Failed to retrieve reset-gpios from device tree\n");
+		return gpio;
+	}
+
+	/* GPIO request and configuration */
+	r = devm_gpio_request_one(&dev->dev, gpio,
+				GPIOF_OUT_INIT_HIGH, "clf_reset");
+	if (r) {
+		nfc_err(&dev->dev, "Failed to request reset pin\n");
+		return r;
+	}
+	phy->gpio_reset = gpio;
+
+	phy->irq_polarity = irq_get_trigger_type(dev->irq);
+
+	return 0;
+}
+#else
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+	return -ENODEV;
+}
+#endif
+
+static int st_nci_spi_request_resources(struct spi_device *dev)
+{
+	struct st_nci_nfc_platform_data *pdata;
+	struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+	int r;
+
+	pdata = dev->dev.platform_data;
+	if (pdata == NULL) {
+		nfc_err(&dev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	/* store for later use */
+	phy->gpio_reset = pdata->gpio_reset;
+	phy->irq_polarity = pdata->irq_polarity;
+
+	r = devm_gpio_request_one(&dev->dev,
+			phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset");
+	if (r) {
+		pr_err("%s : reset gpio_request failed\n", __FILE__);
+		return r;
+	}
+
+	return 0;
+}
+
+static int st_nci_spi_probe(struct spi_device *dev)
+{
+	struct st_nci_spi_phy *phy;
+	struct st_nci_nfc_platform_data *pdata;
+	int r;
+
+	dev_dbg(&dev->dev, "%s\n", __func__);
+	dev_dbg(&dev->dev, "IRQ: %d\n", dev->irq);
+
+	/* Check SPI platform functionnalities */
+	if (!dev) {
+		pr_debug("%s: dev is NULL. Device is not accessible.\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	phy = devm_kzalloc(&dev->dev, sizeof(struct st_nci_spi_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->spi_dev = dev;
+
+	spi_set_drvdata(dev, phy);
+
+	pdata = dev->dev.platform_data;
+	if (!pdata && dev->dev.of_node) {
+		r = st_nci_spi_of_request_resources(dev);
+		if (r) {
+			nfc_err(&dev->dev, "No platform data\n");
+			return r;
+		}
+	} else if (pdata) {
+		r = st_nci_spi_request_resources(dev);
+		if (r) {
+			nfc_err(&dev->dev,
+				"Cannot get platform resources\n");
+			return r;
+		}
+	} else {
+		nfc_err(&dev->dev,
+			"st_nci platform resources not available\n");
+		return -ENODEV;
+	}
+
+	r = ndlc_probe(phy, &spi_phy_ops, &dev->dev,
+			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
+			&phy->ndlc);
+	if (r < 0) {
+		nfc_err(&dev->dev, "Unable to register ndlc layer\n");
+		return r;
+	}
+
+	r = devm_request_threaded_irq(&dev->dev, dev->irq, NULL,
+				st_nci_irq_thread_fn,
+				phy->irq_polarity | IRQF_ONESHOT,
+				ST_NCI_SPI_DRIVER_NAME, phy);
+	if (r < 0)
+		nfc_err(&dev->dev, "Unable to register IRQ handler\n");
+
+	return r;
+}
+
+static int st_nci_spi_remove(struct spi_device *dev)
+{
+	struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+
+	dev_dbg(&dev->dev, "%s\n", __func__);
+
+	ndlc_remove(phy->ndlc);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st_nci_spi_match[] = {
+	{ .compatible = "st,st21nfcb-spi", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_st_nci_spi_match);
+#endif
+
+static struct spi_driver st_nci_spi_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = ST_NCI_SPI_DRIVER_NAME,
+		.of_match_table = of_match_ptr(of_st_nci_spi_match),
+	},
+	.probe = st_nci_spi_probe,
+	.id_table = st_nci_spi_id_table,
+	.remove = st_nci_spi_remove,
+};
+
+module_spi_driver(st_nci_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/st-nci_se.c
index 97addfa..c742ef6 100644
--- a/drivers/nfc/st-nci/st-nci_se.c
+++ b/drivers/nfc/st-nci/st-nci_se.c
@@ -189,14 +189,14 @@
 				ST_NCI_DEVICE_MGNT_GATE,
 				ST_NCI_DEVICE_MGNT_PIPE);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Get pipe list */
 	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
 			ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
 			&skb_pipe_list);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Complete the existing gate_pipe table */
 	for (i = 0; i < skb_pipe_list->len; i++) {
@@ -222,6 +222,7 @@
 		    dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
 			pr_err("Unexpected apdu_reader pipe on host %x\n",
 			       dm_pipe_info->src_host_id);
+			kfree_skb(skb_pipe_info);
 			continue;
 		}
 
@@ -241,13 +242,12 @@
 			ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
 						dm_pipe_info->src_host_id;
 		}
+		kfree_skb(skb_pipe_info);
 	}
 
 	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
 	       sizeof(st_nci_gates));
 
-free_info:
-	kfree_skb(skb_pipe_info);
 	kfree_skb(skb_pipe_list);
 	return r;
 }
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index d251f72..0512865 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -148,14 +148,14 @@
 				ST21NFCA_DEVICE_MGNT_GATE,
 				ST21NFCA_DEVICE_MGNT_PIPE);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Get pipe list */
 	r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
 			ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
 			&skb_pipe_list);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Complete the existing gate_pipe table */
 	for (i = 0; i < skb_pipe_list->len; i++) {
@@ -181,6 +181,7 @@
 			info->src_host_id != ST21NFCA_ESE_HOST_ID) {
 			pr_err("Unexpected apdu_reader pipe on host %x\n",
 				info->src_host_id);
+			kfree_skb(skb_pipe_info);
 			continue;
 		}
 
@@ -200,6 +201,7 @@
 			hdev->pipes[st21nfca_gates[j].pipe].dest_host =
 							info->src_host_id;
 		}
+		kfree_skb(skb_pipe_info);
 	}
 
 	/*
@@ -214,13 +216,12 @@
 					st21nfca_gates[i].gate,
 					st21nfca_gates[i].pipe);
 			if (r < 0)
-				goto free_info;
+				goto free_list;
 		}
 	}
 
 	memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
-free_info:
-	kfree_skb(skb_pipe_info);
+free_list:
 	kfree_skb(skb_pipe_list);
 	return r;
 }
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 85b4d86..70b0707 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -336,7 +336,7 @@
 
 #define TRF7970A_NFC_TARGET_LEVEL_RFDET(v)	((v) & 0x07)
 #define TRF7970A_NFC_TARGET_LEVEL_HI_RF		BIT(3)
-#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN	BIT(3)
+#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN	BIT(5)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES	(0x0 << 6)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES	(0x1 << 6)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES	(0x2 << 6)
@@ -629,7 +629,9 @@
 	}
 
 	if (trf->adjust_resp_len) {
-		skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+		if (trf->rx_skb)
+			skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+
 		trf->adjust_resp_len = false;
 	}
 
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0749656..6e82bc42 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -967,7 +967,9 @@
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
-#define MAX_PHYS_ADDR	((phys_addr_t)~0)
+#ifndef MAX_MEMBLOCK_ADDR
+#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
+#endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
@@ -984,16 +986,16 @@
 	}
 	size &= PAGE_MASK;
 
-	if (base > MAX_PHYS_ADDR) {
+	if (base > MAX_MEMBLOCK_ADDR) {
 		pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
 				base, base + size);
 		return;
 	}
 
-	if (base + size - 1 > MAX_PHYS_ADDR) {
+	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
 		pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
-				((u64)MAX_PHYS_ADDR) + 1, base + size);
-		size = MAX_PHYS_ADDR - base + 1;
+				((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
+		size = MAX_MEMBLOCK_ADDR - base + 1;
 	}
 
 	if (base + size < phys_offset) {
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fdc60db..1350fa2 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -16,6 +16,7 @@
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/of.h>
+#include <linux/of_gpio.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 #include <linux/module.h>
@@ -266,7 +267,8 @@
 bool of_phy_is_fixed_link(struct device_node *np)
 {
 	struct device_node *dn;
-	int len;
+	int len, err;
+	const char *managed;
 
 	/* New binding */
 	dn = of_get_child_by_name(np, "fixed-link");
@@ -275,6 +277,10 @@
 		return true;
 	}
 
+	err = of_property_read_string(np, "managed", &managed);
+	if (err == 0 && strcmp(managed, "auto") != 0)
+		return true;
+
 	/* Old binding */
 	if (of_get_property(np, "fixed-link", &len) &&
 	    len == (5 * sizeof(__be32)))
@@ -289,8 +295,19 @@
 	struct fixed_phy_status status = {};
 	struct device_node *fixed_link_node;
 	const __be32 *fixed_link_prop;
-	int len;
+	int link_gpio;
+	int len, err;
 	struct phy_device *phy;
+	const char *managed;
+
+	err = of_property_read_string(np, "managed", &managed);
+	if (err == 0) {
+		if (strcmp(managed, "in-band-status") == 0) {
+			/* status is zeroed, namely its .link member */
+			phy = fixed_phy_register(PHY_POLL, &status, -1, np);
+			return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+		}
+	}
 
 	/* New binding */
 	fixed_link_node = of_get_child_by_name(np, "fixed-link");
@@ -303,8 +320,13 @@
 		status.pause = of_property_read_bool(fixed_link_node, "pause");
 		status.asym_pause = of_property_read_bool(fixed_link_node,
 							  "asym-pause");
+		link_gpio = of_get_named_gpio_flags(fixed_link_node,
+						    "link-gpios", 0, NULL);
 		of_node_put(fixed_link_node);
-		phy = fixed_phy_register(PHY_POLL, &status, np);
+		if (link_gpio == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np);
 		return IS_ERR(phy) ? PTR_ERR(phy) : 0;
 	}
 
@@ -316,7 +338,7 @@
 		status.speed = be32_to_cpu(fixed_link_prop[2]);
 		status.pause = be32_to_cpu(fixed_link_prop[3]);
 		status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
-		phy = fixed_phy_register(PHY_POLL, &status, np);
+		phy = fixed_phy_register(PHY_POLL, &status, -1, np);
 		return IS_ERR(phy) ? PTR_ERR(phy) : 0;
 	}
 
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 984a8ff..483f919 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -296,20 +296,18 @@
 		goto err0;
 	}
 
-	clk = clk_get(&dev->dev, NULL);
+	clk = devm_clk_get(&dev->dev, NULL);
 	if (IS_ERR(clk))
 		return -ENODEV;
 
 	pxa2xx_drv_pcmcia_ops(ops);
 
-	sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
-	if (!sinfo) {
-		clk_put(clk);
+	sinfo = devm_kzalloc(&dev->dev, SKT_DEV_INFO_SIZE(ops->nr),
+			     GFP_KERNEL);
+	if (!sinfo)
 		return -ENOMEM;
-	}
 
 	sinfo->nskt = ops->nr;
-	sinfo->clk = clk;
 
 	/* Initialize processor specific parameters */
 	for (i = 0; i < ops->nr; i++) {
@@ -332,8 +330,7 @@
 err1:
 	while (--i >= 0)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
-	clk_put(clk);
-	kfree(sinfo);
+
 err0:
 	return ret;
 }
@@ -343,13 +340,9 @@
 	struct skt_dev_info *sinfo = platform_get_drvdata(dev);
 	int i;
 
-	platform_set_drvdata(dev, NULL);
-
 	for (i = 0; i < sinfo->nskt; i++)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
 
-	clk_put(sinfo->clk);
-	kfree(sinfo);
 	return 0;
 }
 
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 8039452..66acdc8 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,8 +93,6 @@
 	for (i = 0; i < sinfo->nskt; i++)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
 
-	clk_put(sinfo->clk);
-	kfree(sinfo);
 	return 0;
 }
 
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 80b8e9d..a1531fe 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -135,8 +135,13 @@
 	int (*add)(struct soc_pcmcia_socket *))
 {
 	struct sa1111_pcmcia_socket *s;
+	struct clk *clk;
 	int i, ret = 0;
 
+	clk = devm_clk_get(&dev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
 	ops->socket_state = sa1111_pcmcia_socket_state;
 
 	for (i = 0; i < ops->nr; i++) {
@@ -145,12 +150,8 @@
 			return -ENOMEM;
 
 		s->soc.nr = ops->first + i;
-		s->soc.clk = clk_get(&dev->dev, NULL);
-		if (IS_ERR(s->soc.clk)) {
-			ret = PTR_ERR(s->soc.clk);
-			kfree(s);
-			return ret;
-		}
+		s->soc.clk = clk;
+
 		soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
 		s->dev = dev;
 		if (s->soc.nr) {
@@ -226,7 +227,6 @@
 	for (; s; s = next) {
 		next = s->next;
 		soc_pcmcia_remove_one(&s->soc);
-		clk_put(s->soc.clk);
 		kfree(s);
 	}
 
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index cf6de2c..9f6ec87 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -222,18 +222,17 @@
 	int i, ret = 0;
 	struct clk *clk;
 
-	clk = clk_get(dev, NULL);
+	clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(clk))
 		return PTR_ERR(clk);
 
 	sa11xx_drv_pcmcia_ops(ops);
 
-	sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
+	sinfo = devm_kzalloc(dev, SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
 	if (!sinfo)
 		return -ENOMEM;
 
 	sinfo->nskt = nr;
-	sinfo->clk = clk;
 
 	/* Initialize processor specific parameters */
 	for (i = 0; i < nr; i++) {
@@ -251,8 +250,6 @@
 	if (ret) {
 		while (--i >= 0)
 			soc_pcmcia_remove_one(&sinfo->skt[i]);
-		clk_put(clk);
-		kfree(sinfo);
 	} else {
 		dev_set_drvdata(dev, sinfo);
 	}
@@ -261,16 +258,6 @@
 }
 EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
 
-static int __init sa11xx_pcmcia_init(void)
-{
-	return 0;
-}
-fs_initcall(sa11xx_pcmcia_init);
-
-static void __exit sa11xx_pcmcia_exit(void) {}
-
-module_exit(sa11xx_pcmcia_exit);
-
 MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
 MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11xx core socket driver");
 MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index e6fcbea..94762a5 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -68,7 +68,6 @@
 
 struct skt_dev_info {
 	int nskt;
-	struct clk *clk;
 	struct soc_pcmcia_socket skt[0];
 };
 
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
new file mode 100644
index 0000000..d9de36e
--- /dev/null
+++ b/drivers/perf/Kconfig
@@ -0,0 +1,15 @@
+#
+# Performance Monitor Drivers
+#
+
+menu "Performance monitor support"
+
+config ARM_PMU
+	depends on PERF_EVENTS && ARM
+	bool "ARM PMU framework"
+	default y
+	help
+	  Say y if you want to use CPU performance monitors on ARM-based
+	  systems.
+
+endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
new file mode 100644
index 0000000..acd2397
--- /dev/null
+++ b/drivers/perf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_PMU) += arm_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
new file mode 100644
index 0000000..2365a32
--- /dev/null
+++ b/drivers/perf/arm_pmu.c
@@ -0,0 +1,921 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+
+static int
+armpmu_map_cache_event(const unsigned (*cache_map)
+				      [PERF_COUNT_HW_CACHE_MAX]
+				      [PERF_COUNT_HW_CACHE_OP_MAX]
+				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
+		       u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result, ret;
+
+	cache_type = (config >>  0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return -EINVAL;
+
+	cache_op = (config >>  8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return -EINVAL;
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+	if (ret == CACHE_OP_UNSUPPORTED)
+		return -ENOENT;
+
+	return ret;
+}
+
+static int
+armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+{
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -EINVAL;
+
+	mapping = (*event_map)[config];
+	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+	return (int)(config & raw_event_mask);
+}
+
+int
+armpmu_map_event(struct perf_event *event,
+		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+		 const unsigned (*cache_map)
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX],
+		 u32 raw_event_mask)
+{
+	u64 config = event->attr.config;
+	int type = event->attr.type;
+
+	if (type == event->pmu->type)
+		return armpmu_map_raw_event(raw_event_mask, config);
+
+	switch (type) {
+	case PERF_TYPE_HARDWARE:
+		return armpmu_map_hw_event(event_map, config);
+	case PERF_TYPE_HW_CACHE:
+		return armpmu_map_cache_event(cache_map, config);
+	case PERF_TYPE_RAW:
+		return armpmu_map_raw_event(raw_event_mask, config);
+	}
+
+	return -ENOENT;
+}
+
+int armpmu_event_set_period(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	s64 left = local64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int ret = 0;
+
+	if (unlikely(left <= -period)) {
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (unlikely(left <= 0)) {
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	/*
+	 * Limit the maximum period to prevent the counter value
+	 * from overtaking the one we are about to program. In
+	 * effect we are reducing max_period to account for
+	 * interrupt latency (and we are being very conservative).
+	 */
+	if (left > (armpmu->max_period >> 1))
+		left = armpmu->max_period >> 1;
+
+	local64_set(&hwc->prev_count, (u64)-left);
+
+	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+u64 armpmu_event_update(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+again:
+	prev_raw_count = local64_read(&hwc->prev_count);
+	new_raw_count = armpmu->read_counter(event);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+			     new_raw_count) != prev_raw_count)
+		goto again;
+
+	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+
+	return new_raw_count;
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+	armpmu_event_update(event);
+}
+
+static void
+armpmu_stop(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to update the counter, so ignore
+	 * PERF_EF_UPDATE, see comments in armpmu_start().
+	 */
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		armpmu->disable(event);
+		armpmu_event_update(event);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static void armpmu_start(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to reprogram the period, so ignore
+	 * PERF_EF_RELOAD, see the comment below.
+	 */
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+	/*
+	 * Set the period again. Some counters can't be stopped, so when we
+	 * were stopped we simply disabled the IRQ source and the counter
+	 * may have been left counting. If we don't do this step then we may
+	 * get an interrupt too soon or *way* too late if the overflow has
+	 * happened since disabling.
+	 */
+	armpmu_event_set_period(event);
+	armpmu->enable(event);
+}
+
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	armpmu_stop(event, PERF_EF_UPDATE);
+	hw_events->events[idx] = NULL;
+	clear_bit(idx, hw_events->used_mask);
+	if (armpmu->clear_event_idx)
+		armpmu->clear_event_idx(hw_events, event);
+
+	perf_event_update_userpage(event);
+}
+
+static int
+armpmu_add(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	/* An event following a process won't be stopped earlier */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return -ENOENT;
+
+	perf_pmu_disable(event->pmu);
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = armpmu->get_event_idx(hw_events, event);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then make
+	 * sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	armpmu->disable(event);
+	hw_events->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		armpmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static int
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+			       struct perf_event *event)
+{
+	struct arm_pmu *armpmu;
+
+	if (is_software_event(event))
+		return 1;
+
+	/*
+	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+	 * core perf code won't check that the pmu->ctx == leader->ctx
+	 * until after pmu->event_init(event).
+	 */
+	if (event->pmu != pmu)
+		return 0;
+
+	if (event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+		return 1;
+
+	armpmu = to_arm_pmu(event->pmu);
+	return armpmu->get_event_idx(hw_events, event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct pmu_hw_events fake_pmu;
+
+	/*
+	 * Initialise the fake PMU. We only need to populate the
+	 * used_mask for the purposes of validation.
+	 */
+	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
+
+	if (!validate_event(event->pmu, &fake_pmu, leader))
+		return -EINVAL;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(event->pmu, &fake_pmu, sibling))
+			return -EINVAL;
+	}
+
+	if (!validate_event(event->pmu, &fake_pmu, event))
+		return -EINVAL;
+
+	return 0;
+}
+
+static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+{
+	struct arm_pmu *armpmu;
+	struct platform_device *plat_device;
+	struct arm_pmu_platdata *plat;
+	int ret;
+	u64 start_clock, finish_clock;
+
+	/*
+	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
+	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
+	 * do any necessary shifting, we just need to perform the first
+	 * dereference.
+	 */
+	armpmu = *(void **)dev;
+	plat_device = armpmu->plat_device;
+	plat = dev_get_platdata(&plat_device->dev);
+
+	start_clock = sched_clock();
+	if (plat && plat->handle_irq)
+		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
+	else
+		ret = armpmu->handle_irq(irq, armpmu);
+	finish_clock = sched_clock();
+
+	perf_sample_event_took(finish_clock - start_clock);
+	return ret;
+}
+
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+	armpmu->free_irq(armpmu);
+}
+
+static int
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
+{
+	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
+	if (err) {
+		armpmu_release_hardware(armpmu);
+		return err;
+	}
+
+	return 0;
+}
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	atomic_t *active_events	 = &armpmu->active_events;
+	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
+
+	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+		armpmu_release_hardware(armpmu);
+		mutex_unlock(pmu_reserve_mutex);
+	}
+}
+
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+	return attr->exclude_idle || attr->exclude_user ||
+	       attr->exclude_kernel || attr->exclude_hv;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping;
+
+	mapping = armpmu->map_event(event);
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet. For SMP systems, each core has it's own PMU so we can't do any
+	 * clever allocation or constraints checking at this point.
+	 */
+	hwc->idx		= -1;
+	hwc->config_base	= 0;
+	hwc->config		= 0;
+	hwc->event_base		= 0;
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 */
+	if ((!armpmu->set_event_filter ||
+	     armpmu->set_event_filter(hwc, &event->attr)) &&
+	     event_requires_mode_exclusion(&event->attr)) {
+		pr_debug("ARM performance counters do not support "
+			 "mode exclusion\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Store the event encoding into the config_base field.
+	 */
+	hwc->config_base	    |= (unsigned long)mapping;
+
+	if (!is_sampling_event(event)) {
+		/*
+		 * For non-sampling runs, limit the sample_period to half
+		 * of the counter width. That way, the new counter value
+		 * is far less likely to overtake the previous one unless
+		 * you have some serious IRQ latency issues.
+		 */
+		hwc->sample_period  = armpmu->max_period >> 1;
+		hwc->last_period    = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	if (event->group_leader != event) {
+		if (validate_group(event) != 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int armpmu_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	int err = 0;
+	atomic_t *active_events = &armpmu->active_events;
+
+	/*
+	 * Reject CPU-affine events for CPUs that are of a different class to
+	 * that which this PMU handles. Process-following events (where
+	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
+	 * reject them later (in armpmu_add) if they're scheduled on a
+	 * different class of CPU.
+	 */
+	if (event->cpu != -1 &&
+		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+		return -ENOENT;
+
+	/* does not support taken branch sampling */
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	if (armpmu->map_event(event) == -ENOENT)
+		return -ENOENT;
+
+	event->destroy = hw_perf_event_destroy;
+
+	if (!atomic_inc_not_zero(active_events)) {
+		mutex_lock(&armpmu->reserve_mutex);
+		if (atomic_read(active_events) == 0)
+			err = armpmu_reserve_hardware(armpmu);
+
+		if (!err)
+			atomic_inc(active_events);
+		mutex_unlock(&armpmu->reserve_mutex);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static void armpmu_enable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+	/* For task-bound events we may be called on other CPUs */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return;
+
+	if (enabled)
+		armpmu->start(armpmu);
+}
+
+static void armpmu_disable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+	/* For task-bound events we may be called on other CPUs */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return;
+
+	armpmu->stop(armpmu);
+}
+
+/*
+ * In heterogeneous systems, events are specific to a particular
+ * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
+ * the same microarchitecture.
+ */
+static int armpmu_filter_match(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	unsigned int cpu = smp_processor_id();
+	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+}
+
+static void armpmu_init(struct arm_pmu *armpmu)
+{
+	atomic_set(&armpmu->active_events, 0);
+	mutex_init(&armpmu->reserve_mutex);
+
+	armpmu->pmu = (struct pmu) {
+		.pmu_enable	= armpmu_enable,
+		.pmu_disable	= armpmu_disable,
+		.event_init	= armpmu_event_init,
+		.add		= armpmu_add,
+		.del		= armpmu_del,
+		.start		= armpmu_start,
+		.stop		= armpmu_stop,
+		.read		= armpmu_read,
+		.filter_match	= armpmu_filter_match,
+	};
+}
+
+int armpmu_register(struct arm_pmu *armpmu, int type)
+{
+	armpmu_init(armpmu);
+	pr_info("enabled with %s PMU driver, %d counters available\n",
+			armpmu->name, armpmu->num_events);
+	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
+}
+
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *__oprofile_cpu_pmu;
+
+/*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+const char *perf_pmu_name(void)
+{
+	if (!__oprofile_cpu_pmu)
+		return NULL;
+
+	return __oprofile_cpu_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+	int max_events = 0;
+
+	if (__oprofile_cpu_pmu != NULL)
+		max_events = __oprofile_cpu_pmu->num_events;
+
+	return max_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+static void cpu_pmu_enable_percpu_irq(void *data)
+{
+	int irq = *(int *)data;
+
+	enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void cpu_pmu_disable_percpu_irq(void *data)
+{
+	int irq = *(int *)data;
+
+	disable_percpu_irq(irq);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+{
+	int i, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
+		free_percpu_irq(irq, &hw_events->percpu_pmu);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			int cpu = i;
+
+			if (cpu_pmu->irq_affinity)
+				cpu = cpu_pmu->irq_affinity[i];
+
+			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+				continue;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq >= 0)
+				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+		}
+	}
+}
+
+static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+{
+	int i, err, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	if (irqs < 1) {
+		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+		return 0;
+	}
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		err = request_percpu_irq(irq, handler, "arm-pmu",
+					 &hw_events->percpu_pmu);
+		if (err) {
+			pr_err("unable to request IRQ%d for ARM PMU counters\n",
+				irq);
+			return err;
+		}
+		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			int cpu = i;
+
+			err = 0;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq < 0)
+				continue;
+
+			if (cpu_pmu->irq_affinity)
+				cpu = cpu_pmu->irq_affinity[i];
+
+			/*
+			 * If we have a single PMU interrupt that we can't shift,
+			 * assume that we're running on a uniprocessor machine and
+			 * continue. Otherwise, continue without this interrupt.
+			 */
+			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+					irq, cpu);
+				continue;
+			}
+
+			err = request_irq(irq, handler,
+					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+			if (err) {
+				pr_err("unable to request IRQ%d for ARM PMU counters\n",
+					irq);
+				return err;
+			}
+
+			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+			  void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+
+	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+		return NOTIFY_DONE;
+
+	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return NOTIFY_DONE;
+
+	if (pmu->reset)
+		pmu->reset(pmu);
+	else
+		return NOTIFY_DONE;
+
+	return NOTIFY_OK;
+}
+
+static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	int err;
+	int cpu;
+	struct pmu_hw_events __percpu *cpu_hw_events;
+
+	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
+	if (!cpu_hw_events)
+		return -ENOMEM;
+
+	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
+	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
+	if (err)
+		goto out_hw_events;
+
+	for_each_possible_cpu(cpu) {
+		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
+		raw_spin_lock_init(&events->pmu_lock);
+		events->percpu_pmu = cpu_pmu;
+	}
+
+	cpu_pmu->hw_events	= cpu_hw_events;
+	cpu_pmu->request_irq	= cpu_pmu_request_irq;
+	cpu_pmu->free_irq	= cpu_pmu_free_irq;
+
+	/* Ensure the PMU has sane values out of reset. */
+	if (cpu_pmu->reset)
+		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
+			 cpu_pmu, 1);
+
+	/* If no interrupts available, set the corresponding capability flag */
+	if (!platform_get_irq(cpu_pmu->plat_device, 0))
+		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+	return 0;
+
+out_hw_events:
+	free_percpu(cpu_hw_events);
+	return err;
+}
+
+static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
+{
+	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+	free_percpu(cpu_pmu->hw_events);
+}
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int probe_current_pmu(struct arm_pmu *pmu,
+			     const struct pmu_probe_info *info)
+{
+	int cpu = get_cpu();
+	unsigned int cpuid = read_cpuid_id();
+	int ret = -ENODEV;
+
+	pr_info("probing PMU on CPU %d\n", cpu);
+
+	for (; info->init != NULL; info++) {
+		if ((cpuid & info->mask) != info->cpuid)
+			continue;
+		ret = info->init(pmu);
+		break;
+	}
+
+	put_cpu();
+	return ret;
+}
+
+static int of_pmu_irq_cfg(struct arm_pmu *pmu)
+{
+	int *irqs, i = 0;
+	bool using_spi = false;
+	struct platform_device *pdev = pmu->plat_device;
+
+	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+	if (!irqs)
+		return -ENOMEM;
+
+	do {
+		struct device_node *dn;
+		int cpu, irq;
+
+		/* See if we have an affinity entry */
+		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
+		if (!dn)
+			break;
+
+		/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
+		irq = platform_get_irq(pdev, i);
+		if (irq >= 0) {
+			bool spi = !irq_is_percpu(irq);
+
+			if (i > 0 && spi != using_spi) {
+				pr_err("PPI/SPI IRQ type mismatch for %s!\n",
+					dn->name);
+				kfree(irqs);
+				return -EINVAL;
+			}
+
+			using_spi = spi;
+		}
+
+		/* Now look up the logical CPU number */
+		for_each_possible_cpu(cpu)
+			if (dn == of_cpu_device_node_get(cpu))
+				break;
+
+		if (cpu >= nr_cpu_ids) {
+			pr_warn("Failed to find logical CPU for %s\n",
+				dn->name);
+			of_node_put(dn);
+			cpumask_setall(&pmu->supported_cpus);
+			break;
+		}
+		of_node_put(dn);
+
+		/* For SPIs, we need to track the affinity per IRQ */
+		if (using_spi) {
+			if (i >= pdev->num_resources) {
+				of_node_put(dn);
+				break;
+			}
+
+			irqs[i] = cpu;
+		}
+
+		/* Keep track of the CPUs containing this PMU type */
+		cpumask_set_cpu(cpu, &pmu->supported_cpus);
+		of_node_put(dn);
+		i++;
+	} while (1);
+
+	/* If we didn't manage to parse anything, claim to support all CPUs */
+	if (cpumask_weight(&pmu->supported_cpus) == 0)
+		cpumask_setall(&pmu->supported_cpus);
+
+	/* If we matched up the IRQ affinities, use them to route the SPIs */
+	if (using_spi && i == pdev->num_resources)
+		pmu->irq_affinity = irqs;
+	else
+		kfree(irqs);
+
+	return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+			 const struct of_device_id *of_table,
+			 const struct pmu_probe_info *probe_table)
+{
+	const struct of_device_id *of_id;
+	const int (*init_fn)(struct arm_pmu *);
+	struct device_node *node = pdev->dev.of_node;
+	struct arm_pmu *pmu;
+	int ret = -ENODEV;
+
+	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+	if (!pmu) {
+		pr_info("failed to allocate PMU device!\n");
+		return -ENOMEM;
+	}
+
+	if (!__oprofile_cpu_pmu)
+		__oprofile_cpu_pmu = pmu;
+
+	pmu->plat_device = pdev;
+
+	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
+		init_fn = of_id->data;
+
+		ret = of_pmu_irq_cfg(pmu);
+		if (!ret)
+			ret = init_fn(pmu);
+	} else {
+		ret = probe_current_pmu(pmu, probe_table);
+		cpumask_setall(&pmu->supported_cpus);
+	}
+
+	if (ret) {
+		pr_info("failed to probe PMU!\n");
+		goto out_free;
+	}
+
+	ret = cpu_pmu_init(pmu);
+	if (ret)
+		goto out_free;
+
+	ret = armpmu_register(pmu, -1);
+	if (ret)
+		goto out_destroy;
+
+	return 0;
+
+out_destroy:
+	cpu_pmu_destroy(pmu);
+out_free:
+	pr_info("failed to register PMU devices!\n");
+	kfree(pmu);
+	return ret;
+}
diff --git a/drivers/phy/phy-tusb1210.c b/drivers/phy/phy-tusb1210.c
index 2535d79..4f6d5e7 100644
--- a/drivers/phy/phy-tusb1210.c
+++ b/drivers/phy/phy-tusb1210.c
@@ -61,32 +61,26 @@
 
 static int tusb1210_probe(struct ulpi *ulpi)
 {
-	struct gpio_desc *gpio;
 	struct tusb1210 *tusb;
 	u8 val, reg;
-	int ret;
 
 	tusb = devm_kzalloc(&ulpi->dev, sizeof(*tusb), GFP_KERNEL);
 	if (!tusb)
 		return -ENOMEM;
 
-	gpio = devm_gpiod_get(&ulpi->dev, "reset");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		gpiod_set_value_cansleep(gpio, 1);
-		tusb->gpio_reset = gpio;
-	}
+	tusb->gpio_reset = devm_gpiod_get_optional(&ulpi->dev, "reset",
+						   GPIOD_OUT_LOW);
+	if (IS_ERR(tusb->gpio_reset))
+		return PTR_ERR(tusb->gpio_reset);
 
-	gpio = devm_gpiod_get(&ulpi->dev, "cs");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		gpiod_set_value_cansleep(gpio, 1);
-		tusb->gpio_cs = gpio;
-	}
+	gpiod_set_value_cansleep(tusb->gpio_reset, 1);
+
+	tusb->gpio_cs = devm_gpiod_get_optional(&ulpi->dev, "cs",
+						GPIOD_OUT_LOW);
+	if (IS_ERR(tusb->gpio_cs))
+		return PTR_ERR(tusb->gpio_cs);
+
+	gpiod_set_value_cansleep(tusb->gpio_cs, 1);
 
 	/*
 	 * VENDOR_SPECIFIC2 register in TUSB1210 can be used for configuring eye
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 100d9ac..84dd2ed 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,6 +82,12 @@
 	  Requires ACPI/FDT device enumeration code to set up a platform
 	  device.
 
+config PINCTRL_DIGICOLOR
+	bool
+	depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
+	select PINMUX
+	select GENERIC_PINCONF
+
 config PINCTRL_LANTIQ
 	bool
 	depends on LANTIQ
@@ -240,6 +246,7 @@
 source "drivers/pinctrl/sh-pfc/Kconfig"
 source "drivers/pinctrl/spear/Kconfig"
 source "drivers/pinctrl/sunxi/Kconfig"
+source "drivers/pinctrl/uniphier/Kconfig"
 source "drivers/pinctrl/vt8500/Kconfig"
 source "drivers/pinctrl/mediatek/Kconfig"
 
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index f4216d9..cad077c 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -2,12 +2,10 @@
 
 subdir-ccflags-$(CONFIG_DEBUG_PINCTRL)	+= -DDEBUG
 
-obj-$(CONFIG_PINCTRL)		+= core.o pinctrl-utils.o
+obj-y				+= core.o pinctrl-utils.o
 obj-$(CONFIG_PINMUX)		+= pinmux.o
 obj-$(CONFIG_PINCONF)		+= pinconf.o
-ifeq ($(CONFIG_OF),y)
-obj-$(CONFIG_PINCTRL)		+= devicetree.o
-endif
+obj-$(CONFIG_OF)		+= devicetree.o
 obj-$(CONFIG_GENERIC_PINCONF)	+= pinconf-generic.o
 obj-$(CONFIG_PINCTRL_ADI2)	+= pinctrl-adi2.o
 obj-$(CONFIG_PINCTRL_AS3722)	+= pinctrl-as3722.o
@@ -15,6 +13,7 @@
 obj-$(CONFIG_PINCTRL_BF60x)	+= pinctrl-adi2-bf60x.o
 obj-$(CONFIG_PINCTRL_AT91)	+= pinctrl-at91.o
 obj-$(CONFIG_PINCTRL_AMD)	+= pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_DIGICOLOR)	+= pinctrl-digicolor.o
 obj-$(CONFIG_PINCTRL_FALCON)	+= pinctrl-falcon.o
 obj-$(CONFIG_PINCTRL_MESON)	+= meson/
 obj-$(CONFIG_PINCTRL_PALMAS)	+= pinctrl-palmas.o
@@ -51,5 +50,6 @@
 obj-$(CONFIG_PINCTRL_SH_PFC)	+= sh-pfc/
 obj-$(CONFIG_PLAT_SPEAR)	+= spear/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
+obj-$(CONFIG_ARCH_UNIPHIER)	+= uniphier/
 obj-$(CONFIG_ARCH_VT8500)	+= vt8500/
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 6177315..8efa235 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -586,9 +586,9 @@
 		ret = __bcm2835_gpio_irq_set_type_disabled(pc, gpio, type);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(data->irq, handle_edge_irq);
+		irq_set_handler_locked(data, handle_edge_irq);
 	else
-		__irq_set_handler_locked(data->irq, handle_level_irq);
+		irq_set_handler_locked(data, handle_level_irq);
 
 	spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 
@@ -989,7 +989,6 @@
 		irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
 				handle_level_irq);
 		irq_set_chip_data(irq, pc);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	for (i = 0; i < BCM2835_NUM_BANKS; i++) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 8b8f3a0..69723e0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -231,8 +231,7 @@
 
 	pindesc = pin_desc_get(pctldev, number);
 	if (pindesc != NULL) {
-		pr_err("pin %d already registered on %s\n", number,
-		       pctldev->desc->name);
+		dev_err(pctldev->dev, "pin %d already registered\n", number);
 		return -EINVAL;
 	}
 
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 0bbf7d7..fe04e74 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -97,13 +97,7 @@
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 {
-	struct pinctrl_dev *pctldev;
-
-	pctldev = get_pinctrl_dev_from_of_node(np);
-	if (!pctldev)
-		return NULL;
-
-	return pctldev;
+	return get_pinctrl_dev_from_of_node(np);
 }
 
 static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 12ef544..debe121 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -87,6 +87,13 @@
 	help
 	  Say Y here to enable the imx6sx pinctrl driver
 
+config PINCTRL_IMX6UL
+	bool "IMX6UL pinctrl driver"
+	depends on SOC_IMX6UL
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx6ul pinctrl driver
+
 config PINCTRL_IMX7D
 	bool "IMX7D pinctrl driver"
 	depends on SOC_IMX7D
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 343cb43..d44c9e2 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -12,6 +12,7 @@
 obj-$(CONFIG_PINCTRL_IMX6Q)	+= pinctrl-imx6dl.o
 obj-$(CONFIG_PINCTRL_IMX6SL)	+= pinctrl-imx6sl.o
 obj-$(CONFIG_PINCTRL_IMX6SX)	+= pinctrl-imx6sx.o
+obj-$(CONFIG_PINCTRL_IMX6UL)	+= pinctrl-imx6ul.o
 obj-$(CONFIG_PINCTRL_IMX7D)	+= pinctrl-imx7d.o
 obj-$(CONFIG_PINCTRL_VF610)	+= pinctrl-vf610.o
 obj-$(CONFIG_PINCTRL_MXS)	+= pinctrl-mxs.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
new file mode 100644
index 0000000..08e7576
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6ul_pads {
+	MX6UL_PAD_RESERVE0 = 0,
+	MX6UL_PAD_RESERVE1 = 1,
+	MX6UL_PAD_RESERVE2 = 2,
+	MX6UL_PAD_RESERVE3 = 3,
+	MX6UL_PAD_RESERVE4 = 4,
+	MX6UL_PAD_RESERVE5 = 5,
+	MX6UL_PAD_RESERVE6 = 6,
+	MX6UL_PAD_RESERVE7 = 7,
+	MX6UL_PAD_RESERVE8 = 8,
+	MX6UL_PAD_RESERVE9 = 9,
+	MX6UL_PAD_RESERVE10 = 10,
+	MX6UL_PAD_SNVS_TAMPER4 = 11,
+	MX6UL_PAD_RESERVE12 = 12,
+	MX6UL_PAD_RESERVE13 = 13,
+	MX6UL_PAD_RESERVE14 = 14,
+	MX6UL_PAD_RESERVE15 = 15,
+	MX6UL_PAD_RESERVE16 = 16,
+	MX6UL_PAD_JTAG_MOD = 17,
+	MX6UL_PAD_JTAG_TMS = 18,
+	MX6UL_PAD_JTAG_TDO = 19,
+	MX6UL_PAD_JTAG_TDI = 20,
+	MX6UL_PAD_JTAG_TCK = 21,
+	MX6UL_PAD_JTAG_TRST_B = 22,
+	MX6UL_PAD_GPIO1_IO00 = 23,
+	MX6UL_PAD_GPIO1_IO01 = 24,
+	MX6UL_PAD_GPIO1_IO02 = 25,
+	MX6UL_PAD_GPIO1_IO03 = 26,
+	MX6UL_PAD_GPIO1_IO04 = 27,
+	MX6UL_PAD_GPIO1_IO05 = 28,
+	MX6UL_PAD_GPIO1_IO06 = 29,
+	MX6UL_PAD_GPIO1_IO07 = 30,
+	MX6UL_PAD_GPIO1_IO08 = 31,
+	MX6UL_PAD_GPIO1_IO09 = 32,
+	MX6UL_PAD_UART1_TX_DATA = 33,
+	MX6UL_PAD_UART1_RX_DATA = 34,
+	MX6UL_PAD_UART1_CTS_B = 35,
+	MX6UL_PAD_UART1_RTS_B = 36,
+	MX6UL_PAD_UART2_TX_DATA = 37,
+	MX6UL_PAD_UART2_RX_DATA = 38,
+	MX6UL_PAD_UART2_CTS_B = 39,
+	MX6UL_PAD_UART2_RTS_B = 40,
+	MX6UL_PAD_UART3_TX_DATA = 41,
+	MX6UL_PAD_UART3_RX_DATA = 42,
+	MX6UL_PAD_UART3_CTS_B = 43,
+	MX6UL_PAD_UART3_RTS_B = 44,
+	MX6UL_PAD_UART4_TX_DATA = 45,
+	MX6UL_PAD_UART4_RX_DATA = 46,
+	MX6UL_PAD_UART5_TX_DATA = 47,
+	MX6UL_PAD_UART5_RX_DATA = 48,
+	MX6UL_PAD_ENET1_RX_DATA0 = 49,
+	MX6UL_PAD_ENET1_RX_DATA1 = 50,
+	MX6UL_PAD_ENET1_RX_EN = 51,
+	MX6UL_PAD_ENET1_TX_DATA0 = 52,
+	MX6UL_PAD_ENET1_TX_DATA1 = 53,
+	MX6UL_PAD_ENET1_TX_EN = 54,
+	MX6UL_PAD_ENET1_TX_CLK = 55,
+	MX6UL_PAD_ENET1_RX_ER = 56,
+	MX6UL_PAD_ENET2_RX_DATA0 = 57,
+	MX6UL_PAD_ENET2_RX_DATA1 = 58,
+	MX6UL_PAD_ENET2_RX_EN = 59,
+	MX6UL_PAD_ENET2_TX_DATA0 = 60,
+	MX6UL_PAD_ENET2_TX_DATA1 = 61,
+	MX6UL_PAD_ENET2_TX_EN = 62,
+	MX6UL_PAD_ENET2_TX_CLK = 63,
+	MX6UL_PAD_ENET2_RX_ER = 64,
+	MX6UL_PAD_LCD_CLK = 65,
+	MX6UL_PAD_LCD_ENABLE = 66,
+	MX6UL_PAD_LCD_HSYNC = 67,
+	MX6UL_PAD_LCD_VSYNC = 68,
+	MX6UL_PAD_LCD_RESET = 69,
+	MX6UL_PAD_LCD_DATA00 = 70,
+	MX6UL_PAD_LCD_DATA01 = 71,
+	MX6UL_PAD_LCD_DATA02 = 72,
+	MX6UL_PAD_LCD_DATA03 = 73,
+	MX6UL_PAD_LCD_DATA04 = 74,
+	MX6UL_PAD_LCD_DATA05 = 75,
+	MX6UL_PAD_LCD_DATA06 = 76,
+	MX6UL_PAD_LCD_DATA07 = 77,
+	MX6UL_PAD_LCD_DATA08 = 78,
+	MX6UL_PAD_LCD_DATA09 = 79,
+	MX6UL_PAD_LCD_DATA10 = 80,
+	MX6UL_PAD_LCD_DATA11 = 81,
+	MX6UL_PAD_LCD_DATA12 = 82,
+	MX6UL_PAD_LCD_DATA13 = 83,
+	MX6UL_PAD_LCD_DATA14 = 84,
+	MX6UL_PAD_LCD_DATA15 = 85,
+	MX6UL_PAD_LCD_DATA16 = 86,
+	MX6UL_PAD_LCD_DATA17 = 87,
+	MX6UL_PAD_LCD_DATA18 = 88,
+	MX6UL_PAD_LCD_DATA19 = 89,
+	MX6UL_PAD_LCD_DATA20 = 90,
+	MX6UL_PAD_LCD_DATA21 = 91,
+	MX6UL_PAD_LCD_DATA22 = 92,
+	MX6UL_PAD_LCD_DATA23 = 93,
+	MX6UL_PAD_NAND_RE_B = 94,
+	MX6UL_PAD_NAND_WE_B = 95,
+	MX6UL_PAD_NAND_DATA00 = 96,
+	MX6UL_PAD_NAND_DATA01 = 97,
+	MX6UL_PAD_NAND_DATA02 = 98,
+	MX6UL_PAD_NAND_DATA03 = 99,
+	MX6UL_PAD_NAND_DATA04 = 100,
+	MX6UL_PAD_NAND_DATA05 = 101,
+	MX6UL_PAD_NAND_DATA06 = 102,
+	MX6UL_PAD_NAND_DATA07 = 103,
+	MX6UL_PAD_NAND_ALE = 104,
+	MX6UL_PAD_NAND_WP_B = 105,
+	MX6UL_PAD_NAND_READY_B = 106,
+	MX6UL_PAD_NAND_CE0_B = 107,
+	MX6UL_PAD_NAND_CE1_B = 108,
+	MX6UL_PAD_NAND_CLE = 109,
+	MX6UL_PAD_NAND_DQS = 110,
+	MX6UL_PAD_SD1_CMD = 111,
+	MX6UL_PAD_SD1_CLK = 112,
+	MX6UL_PAD_SD1_DATA0 = 113,
+	MX6UL_PAD_SD1_DATA1 = 114,
+	MX6UL_PAD_SD1_DATA2 = 115,
+	MX6UL_PAD_SD1_DATA3 = 116,
+	MX6UL_PAD_CSI_MCLK = 117,
+	MX6UL_PAD_CSI_PIXCLK = 118,
+	MX6UL_PAD_CSI_VSYNC = 119,
+	MX6UL_PAD_CSI_HSYNC = 120,
+	MX6UL_PAD_CSI_DATA00 = 121,
+	MX6UL_PAD_CSI_DATA01 = 122,
+	MX6UL_PAD_CSI_DATA02 = 123,
+	MX6UL_PAD_CSI_DATA03 = 124,
+	MX6UL_PAD_CSI_DATA04 = 125,
+	MX6UL_PAD_CSI_DATA05 = 126,
+	MX6UL_PAD_CSI_DATA06 = 127,
+	MX6UL_PAD_CSI_DATA07 = 128,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE2),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE3),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE4),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE5),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE6),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE7),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE8),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE9),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE10),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SNVS_TAMPER4),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE12),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE13),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE14),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE15),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE16),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_MOD),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TMS),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDO),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDI),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TCK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TRST_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO08),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO09),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART4_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART4_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART5_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART5_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_ER),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_ER),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_ENABLE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_HSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_VSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_RESET),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA08),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA09),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA10),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA11),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA12),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA13),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA14),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA15),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA16),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA17),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA18),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA19),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA20),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA21),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA22),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA23),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_RE_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WE_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_ALE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WP_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_READY_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE0_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE1_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CLE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DQS),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CMD),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA2),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA3),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_MCLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_PIXCLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_VSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_HSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA07),
+};
+
+static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = {
+	.pins = imx6ul_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx6ul_pinctrl_pads),
+};
+
+static struct of_device_id imx6ul_pinctrl_of_match[] = {
+	{ .compatible = "fsl,imx6ul-iomuxc", },
+	{ /* sentinel */ }
+};
+
+static int imx6ul_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx6ul_pinctrl_info);
+}
+
+static struct platform_driver imx6ul_pinctrl_driver = {
+	.driver = {
+		.name = "imx6ul-pinctrl",
+		.of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
+	},
+	.probe = imx6ul_pinctrl_probe,
+	.remove = imx_pinctrl_remove,
+};
+
+static int __init imx6ul_pinctrl_init(void)
+{
+	return platform_driver_register(&imx6ul_pinctrl_driver);
+}
+arch_initcall(imx6ul_pinctrl_init);
+
+static void __exit imx6ul_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx6ul_pinctrl_driver);
+}
+module_exit(imx6ul_pinctrl_exit);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
+MODULE_DESCRIPTION("Freescale imx6ul pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 2062c22..dac4865 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -12,11 +12,6 @@
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
 
 #include <linux/kernel.h>
@@ -146,7 +141,7 @@
 struct byt_gpio {
 	struct gpio_chip		chip;
 	struct platform_device		*pdev;
-	spinlock_t			lock;
+	raw_spinlock_t			lock;
 	void __iomem			*reg_base;
 	struct pinctrl_gpio_range	*range;
 	struct byt_gpio_pin_context	*saved_context;
@@ -174,11 +169,11 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
 	writel(value, reg);
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
@@ -201,6 +196,9 @@
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
 	u32 value, gpio_mux;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	/*
 	 * In most cases, func pin mux 000 means GPIO function.
@@ -214,18 +212,16 @@
 	value = readl(reg) & BYT_PIN_MUX;
 	gpio_mux = byt_get_gpio_mux(vg, offset);
 	if (WARN_ON(gpio_mux != value)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&vg->lock, flags);
 		value = readl(reg) & ~BYT_PIN_MUX;
 		value |= gpio_mux;
 		writel(value, reg);
-		spin_unlock_irqrestore(&vg->lock, flags);
 
 		dev_warn(&vg->pdev->dev,
 			 "pin %u forcibly re-configured as GPIO\n", offset);
 	}
 
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
 	pm_runtime_get(&vg->pdev->dev);
 
 	return 0;
@@ -250,7 +246,7 @@
 	if (offset >= vg->chip.ngpio)
 		return -EINVAL;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 
 	WARN(value & BYT_DIRECT_IRQ_EN,
@@ -265,11 +261,11 @@
 	writel(value, reg);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -277,7 +273,15 @@
 static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
-	return readl(reg) & BYT_LEVEL;
+	struct byt_gpio *vg = to_byt_gpio(chip);
+	unsigned long flags;
+	u32 val;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+	val = readl(reg);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+	return val & BYT_LEVEL;
 }
 
 static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -287,7 +291,7 @@
 	unsigned long flags;
 	u32 old_val;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	old_val = readl(reg);
 
@@ -296,7 +300,7 @@
 	else
 		writel(old_val & ~BYT_LEVEL, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -306,13 +310,13 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	value = readl(reg) | BYT_DIR_MASK;
 	value &= ~BYT_INPUT_EN;		/* active low */
 	writel(value, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -326,7 +330,7 @@
 	unsigned long flags;
 	u32 reg_val;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	/*
 	 * Before making any direction modifications, do a check if gpio
@@ -345,7 +349,7 @@
 	else
 		writel(reg_val & ~BYT_LEVEL, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -354,18 +358,19 @@
 {
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	int i;
-	unsigned long flags;
 	u32 conf0, val, offs;
 
-	spin_lock_irqsave(&vg->lock, flags);
-
 	for (i = 0; i < vg->chip.ngpio; i++) {
 		const char *pull_str = NULL;
 		const char *pull = NULL;
+		unsigned long flags;
 		const char *label;
 		offs = vg->range->pins[i] * 16;
+
+		raw_spin_lock_irqsave(&vg->lock, flags);
 		conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
 		val = readl(vg->reg_base + offs + BYT_VAL_REG);
+		raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 		label = gpiochip_is_requested(chip, i);
 		if (!label)
@@ -418,7 +423,6 @@
 
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -450,8 +454,10 @@
 	unsigned offset = irqd_to_hwirq(d);
 	void __iomem *reg;
 
+	raw_spin_lock(&vg->lock);
 	reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
 	writel(BIT(offset % 32), reg);
+	raw_spin_unlock(&vg->lock);
 }
 
 static void byt_irq_unmask(struct irq_data *d)
@@ -463,9 +469,9 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
-
 	reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 
 	switch (irqd_get_trigger_type(d)) {
@@ -486,7 +492,7 @@
 
 	writel(value, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_irq_mask(struct irq_data *d)
@@ -578,7 +584,7 @@
 	if (IS_ERR(vg->reg_base))
 		return PTR_ERR(vg->reg_base);
 
-	spin_lock_init(&vg->lock);
+	raw_spin_lock_init(&vg->lock);
 
 	gc = &vg->chip;
 	gc->label = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3f737da..2d5d3dd 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -174,7 +174,7 @@
 	struct pinctrl_dev *pctldev;
 	struct gpio_chip chip;
 	void __iomem *regs;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	unsigned intr_lines[16];
 	const struct chv_community *community;
 	u32 saved_intmask;
@@ -720,13 +720,13 @@
 	u32 ctrl0, ctrl1;
 	bool locked;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
 	ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
 	locked = chv_pad_locked(pctrl, offset);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
 		seq_puts(s, "GPIO ");
@@ -789,14 +789,14 @@
 
 	grp = &pctrl->community->groups[group];
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	/* Check first that the pad is not locked */
 	for (i = 0; i < grp->npins; i++) {
 		if (chv_pad_locked(pctrl, grp->pins[i])) {
 			dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
 				 grp->pins[i]);
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EBUSY;
 		}
 	}
@@ -839,7 +839,7 @@
 			pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
 	}
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -853,13 +853,13 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	if (chv_pad_locked(pctrl, offset)) {
 		value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
 		if (!(value & CHV_PADCTRL0_GPIOEN)) {
 			/* Locked so cannot enable */
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EBUSY;
 		}
 	} else {
@@ -899,7 +899,7 @@
 		chv_writel(value, reg);
 	}
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -913,13 +913,13 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
 	value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
 	chv_writel(value, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -931,7 +931,7 @@
 	unsigned long flags;
 	u32 ctrl0;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
 	if (input)
@@ -940,7 +940,7 @@
 		ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
 	chv_writel(ctrl0, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -965,10 +965,10 @@
 	u16 arg = 0;
 	u32 term;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
 
@@ -1042,7 +1042,7 @@
 	unsigned long flags;
 	u32 ctrl0, pull;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(reg);
 
 	switch (param) {
@@ -1065,7 +1065,7 @@
 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
 			break;
 		default:
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EINVAL;
 		}
 
@@ -1083,7 +1083,7 @@
 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
 			break;
 		default:
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EINVAL;
 		}
 
@@ -1091,12 +1091,12 @@
 		break;
 
 	default:
-		spin_unlock_irqrestore(&pctrl->lock, flags);
+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 		return -EINVAL;
 	}
 
 	chv_writel(ctrl0, reg);
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -1169,9 +1169,12 @@
 {
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
 	int pin = chv_gpio_offset_to_pin(pctrl, offset);
+	unsigned long flags;
 	u32 ctrl0, cfg;
 
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1189,7 +1192,7 @@
 	void __iomem *reg;
 	u32 ctrl0;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
 	ctrl0 = readl(reg);
@@ -1201,7 +1204,7 @@
 
 	chv_writel(ctrl0, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1209,8 +1212,11 @@
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
 	unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
 	u32 ctrl0, direction;
+	unsigned long flags;
 
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1248,14 +1254,14 @@
 	int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
 	u32 intr_line;
 
-	spin_lock(&pctrl->lock);
+	raw_spin_lock(&pctrl->lock);
 
 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
 	intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
 	chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
 
-	spin_unlock(&pctrl->lock);
+	raw_spin_unlock(&pctrl->lock);
 }
 
 static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -1266,7 +1272,7 @@
 	u32 value, intr_line;
 	unsigned long flags;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1279,7 +1285,7 @@
 		value |= BIT(intr_line);
 	chv_writel(value, pctrl->regs + CHV_INTMASK);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1313,6 +1319,7 @@
 		unsigned long flags;
 		u32 intsel, value;
 
+		raw_spin_lock_irqsave(&pctrl->lock, flags);
 		intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 		intsel &= CHV_PADCTRL0_INTSEL_MASK;
 		intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1323,12 +1330,11 @@
 		else
 			handler = handle_edge_irq;
 
-		spin_lock_irqsave(&pctrl->lock, flags);
 		if (!pctrl->intr_lines[intsel]) {
-			__irq_set_handler_locked(d->irq, handler);
+			irq_set_handler_locked(d, handler);
 			pctrl->intr_lines[intsel] = offset;
 		}
-		spin_unlock_irqrestore(&pctrl->lock, flags);
+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 	}
 
 	chv_gpio_irq_unmask(d);
@@ -1344,7 +1350,7 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	/*
 	 * Pins which can be used as shared interrupt are configured in
@@ -1389,11 +1395,11 @@
 	pctrl->intr_lines[value] = offset;
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -1412,7 +1418,7 @@
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long pending;
 	u32 intr_line;
 
@@ -1505,7 +1511,7 @@
 	if (i == ARRAY_SIZE(chv_communities))
 		return -ENODEV;
 
-	spin_lock_init(&pctrl->lock);
+	raw_spin_lock_init(&pctrl->lock);
 	pctrl->dev = &pdev->dev;
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index f9ee0d6..bb377c1 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -758,9 +758,9 @@
 	writel(value, reg);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -840,7 +840,7 @@
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int i;
 
 	chained_irq_enter(chip, desc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index d0c811d..ad27184 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -385,6 +385,7 @@
 	.driver = {
 		.name = "mediatek-mt8173-pinctrl",
 		.of_match_table = mt8173_pctrl_match,
+		.pm = &mtk_eint_pm_ops,
 	},
 };
 
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index ad1ea16..7726c6c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -33,6 +33,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <dt-bindings/pinctrl/mt65xx.h>
 
 #include "../core.h"
@@ -702,7 +703,7 @@
 
 	ret = mtk_pctrl_is_function_valid(pctl, g->pin, function);
 	if (!ret) {
-		dev_err(pctl->dev, "invaild function %d on group %d .\n",
+		dev_err(pctl->dev, "invalid function %d on group %d .\n",
 				function, group);
 		return -EINVAL;
 	}
@@ -1062,6 +1063,77 @@
 	return 0;
 }
 
+static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+	int shift = d->hwirq & 0x1f;
+	int reg = d->hwirq >> 5;
+
+	if (on)
+		pctl->wake_mask[reg] |= BIT(shift);
+	else
+		pctl->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+
+static void mtk_eint_chip_write_mask(const struct mtk_eint_offsets *chip,
+		void __iomem *eint_reg_base, u32 *buf)
+{
+	int port;
+	void __iomem *reg;
+
+	for (port = 0; port < chip->ports; port++) {
+		reg = eint_reg_base + (port << 2);
+		writel_relaxed(~buf[port], reg + chip->mask_set);
+		writel_relaxed(buf[port], reg + chip->mask_clr);
+	}
+}
+
+static void mtk_eint_chip_read_mask(const struct mtk_eint_offsets *chip,
+		void __iomem *eint_reg_base, u32 *buf)
+{
+	int port;
+	void __iomem *reg;
+
+	for (port = 0; port < chip->ports; port++) {
+		reg = eint_reg_base + chip->mask + (port << 2);
+		buf[port] = ~readl_relaxed(reg);
+		/* Mask is 0 when irq is enabled, and 1 when disabled. */
+	}
+}
+
+static int mtk_eint_suspend(struct device *device)
+{
+	void __iomem *reg;
+	struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+	const struct mtk_eint_offsets *eint_offsets =
+			&pctl->devdata->eint_offsets;
+
+	reg = pctl->eint_reg_base;
+	mtk_eint_chip_read_mask(eint_offsets, reg, pctl->cur_mask);
+	mtk_eint_chip_write_mask(eint_offsets, reg, pctl->wake_mask);
+
+	return 0;
+}
+
+static int mtk_eint_resume(struct device *device)
+{
+	struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+	const struct mtk_eint_offsets *eint_offsets =
+			&pctl->devdata->eint_offsets;
+
+	mtk_eint_chip_write_mask(eint_offsets,
+			pctl->eint_reg_base, pctl->cur_mask);
+
+	return 0;
+}
+
+const struct dev_pm_ops mtk_eint_pm_ops = {
+	.suspend = mtk_eint_suspend,
+	.resume = mtk_eint_resume,
+};
+
 static void mtk_eint_ack(struct irq_data *d)
 {
 	struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
@@ -1076,10 +1148,12 @@
 
 static struct irq_chip mtk_pinctrl_irq_chip = {
 	.name = "mt-eint",
+	.irq_disable = mtk_eint_mask,
 	.irq_mask = mtk_eint_mask,
 	.irq_unmask = mtk_eint_unmask,
 	.irq_ack = mtk_eint_ack,
 	.irq_set_type = mtk_eint_set_type,
+	.irq_set_wake = mtk_eint_irq_set_wake,
 	.irq_request_resources = mtk_pinctrl_irq_request_resources,
 	.irq_release_resources = mtk_pinctrl_irq_release_resources,
 };
@@ -1118,8 +1192,8 @@
 
 static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct mtk_pinctrl *pctl = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
 	unsigned int status, eint_num;
 	int offset, index, virq;
 	const struct mtk_eint_offsets *eint_offsets =
@@ -1202,12 +1276,6 @@
 	return 0;
 }
 
-static struct pinctrl_desc mtk_pctrl_desc = {
-	.confops	= &mtk_pconf_ops,
-	.pctlops	= &mtk_pctrl_ops,
-	.pmxops		= &mtk_pmx_ops,
-};
-
 int mtk_pctrl_init(struct platform_device *pdev,
 		const struct mtk_pinctrl_devdata *data,
 		struct regmap *regmap)
@@ -1217,7 +1285,7 @@
 	struct device_node *np = pdev->dev.of_node, *node;
 	struct property *prop;
 	struct resource *res;
-	int i, ret, irq;
+	int i, ret, irq, ports_buf;
 
 	pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
 	if (!pctl)
@@ -1265,12 +1333,17 @@
 
 	for (i = 0; i < pctl->devdata->npins; i++)
 		pins[i] = pctl->devdata->pins[i].pin;
-	mtk_pctrl_desc.name = dev_name(&pdev->dev);
-	mtk_pctrl_desc.owner = THIS_MODULE;
-	mtk_pctrl_desc.pins = pins;
-	mtk_pctrl_desc.npins = pctl->devdata->npins;
+
+	pctl->pctl_desc.name = dev_name(&pdev->dev);
+	pctl->pctl_desc.owner = THIS_MODULE;
+	pctl->pctl_desc.pins = pins;
+	pctl->pctl_desc.npins = pctl->devdata->npins;
+	pctl->pctl_desc.confops = &mtk_pconf_ops;
+	pctl->pctl_desc.pctlops = &mtk_pctrl_ops;
+	pctl->pctl_desc.pmxops = &mtk_pmx_ops;
 	pctl->dev = &pdev->dev;
-	pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl);
+
+	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
 	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
 		return PTR_ERR(pctl->pctl_dev);
@@ -1319,6 +1392,21 @@
 		goto chip_error;
 	}
 
+	ports_buf = pctl->devdata->eint_offsets.ports;
+	pctl->wake_mask = devm_kcalloc(&pdev->dev, ports_buf,
+					sizeof(*pctl->wake_mask), GFP_KERNEL);
+	if (!pctl->wake_mask) {
+		ret = -ENOMEM;
+		goto chip_error;
+	}
+
+	pctl->cur_mask = devm_kcalloc(&pdev->dev, ports_buf,
+					sizeof(*pctl->cur_mask), GFP_KERNEL);
+	if (!pctl->cur_mask) {
+		ret = -ENOMEM;
+		goto chip_error;
+	}
+
 	pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
 					     sizeof(int), GFP_KERNEL);
 	if (!pctl->eint_dual_edges) {
@@ -1348,11 +1436,9 @@
 		irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
 			handle_level_irq);
 		irq_set_chip_data(virq, pctl);
-		set_irq_flags(virq, IRQF_VALID);
 	};
 
 	irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
-	set_irq_flags(irq, IRQF_VALID);
 	return 0;
 
 chip_error:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 30213e5..55a5343 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -256,6 +256,7 @@
 struct mtk_pinctrl {
 	struct regmap	*regmap1;
 	struct regmap	*regmap2;
+	struct pinctrl_desc pctl_desc;
 	struct device           *dev;
 	struct gpio_chip	*chip;
 	struct mtk_pinctrl_group	*groups;
@@ -266,6 +267,8 @@
 	void __iomem		*eint_reg_base;
 	struct irq_domain	*domain;
 	int			*eint_dual_edges;
+	u32 *wake_mask;
+	u32 *cur_mask;
 };
 
 int mtk_pctrl_init(struct platform_device *pdev,
@@ -281,4 +284,6 @@
 		const struct mtk_pin_ies_smt_set *ies_smt_infos, unsigned int info_num,
 		unsigned int pin, unsigned char align, int value);
 
+extern const struct dev_pm_ops mtk_eint_pm_ops;
+
 #endif /* __PINCTRL_MTK_COMMON_H */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index c748407..8392083 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -355,25 +355,6 @@
 	PINCTRL_PIN(DB8500_PIN_AC27, "GPIO267_AC27"),
 };
 
-#define DB8500_GPIO_RANGE(a, b, c) { .name = "DB8500", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8500_ranges[] = {
-	DB8500_GPIO_RANGE(0, 0, 32),
-	DB8500_GPIO_RANGE(1, 32, 5),
-	DB8500_GPIO_RANGE(2, 64, 32),
-	DB8500_GPIO_RANGE(3, 96, 2),
-	DB8500_GPIO_RANGE(4, 128, 32),
-	DB8500_GPIO_RANGE(5, 160, 12),
-	DB8500_GPIO_RANGE(6, 192, 32),
-	DB8500_GPIO_RANGE(7, 224, 7),
-	DB8500_GPIO_RANGE(8, 256, 12),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -1238,8 +1219,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
-	.gpio_ranges = nmk_db8500_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_db8500_ranges),
 	.pins = nmk_db8500_pins,
 	.npins = ARRAY_SIZE(nmk_db8500_pins),
 	.functions = nmk_db8500_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
index d7ba544..2860eaf 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
@@ -341,28 +341,6 @@
 	PINCTRL_PIN(DB8540_PIN_D17, "GPIO267_D17"),
 };
 
-#define DB8540_GPIO_RANGE(a, b, c) { .name = "db8540", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8540_ranges[] = {
-	DB8540_GPIO_RANGE(0, 0, 18),
-	DB8540_GPIO_RANGE(0, 22, 7),
-	DB8540_GPIO_RANGE(1, 33, 6),
-	DB8540_GPIO_RANGE(2, 64, 4),
-	DB8540_GPIO_RANGE(2, 70, 18),
-	DB8540_GPIO_RANGE(3, 116, 12),
-	DB8540_GPIO_RANGE(4, 128, 32),
-	DB8540_GPIO_RANGE(5, 160, 9),
-	DB8540_GPIO_RANGE(6, 192, 23),
-	DB8540_GPIO_RANGE(6, 219, 5),
-	DB8540_GPIO_RANGE(7, 224, 9),
-	DB8540_GPIO_RANGE(8, 256, 12),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -1247,8 +1225,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
-	.gpio_ranges = nmk_db8540_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges),
 	.pins = nmk_db8540_pins,
 	.npins = ARRAY_SIZE(nmk_db8540_pins),
 	.functions = nmk_db8540_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 2cd7147..587b222 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -264,20 +264,6 @@
 	PINCTRL_PIN(STN8815_PIN_J22, "GPIO123_J22"),
 };
 
-#define STN8815_GPIO_RANGE(a, b, c) { .name = "STN8815", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_stn8815_ranges[] = {
-	STN8815_GPIO_RANGE(0, 0, 32),
-	STN8815_GPIO_RANGE(1, 32, 32),
-	STN8815_GPIO_RANGE(2, 64, 32),
-	STN8815_GPIO_RANGE(3, 96, 28),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -285,9 +271,11 @@
  */
 
 /* Altfunction A */
-static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
-	STN8815_PIN_C5, STN8815_PIN_A4, STN8815_PIN_B5, STN8815_PIN_D6,
-	STN8815_PIN_C6, STN8815_PIN_B6 };
+static const unsigned u0txrx_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5 };
+static const unsigned u0ctsrts_a_1_pins[] = { STN8815_PIN_C5, STN8815_PIN_B6 };
+/* Modem pins: DCD, DSR, RI, DTR */
+static const unsigned u0modem_a_1_pins[] = { STN8815_PIN_A4, STN8815_PIN_B5,
+	STN8815_PIN_D6, STN8815_PIN_C6 };
 static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
 	STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
 	STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
@@ -304,7 +292,9 @@
 			.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
 
 static const struct nmk_pingroup nmk_stn8815_groups[] = {
-	STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
 	STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
@@ -318,7 +308,7 @@
 #define STN8815_FUNC_GROUPS(a, b...)	   \
 static const char * const a##_groups[] = { b };
 
-STN8815_FUNC_GROUPS(u0, "u0_a_1");
+STN8815_FUNC_GROUPS(u0, "u0txrx_a_1", "u0ctsrts_a_1", "u0modem_a_1");
 STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1", "mmcsd_b_1");
 STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
 STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
@@ -342,8 +332,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
-	.gpio_ranges = nmk_stn8815_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_stn8815_ranges),
 	.pins = nmk_stn8815_pins,
 	.npins = ARRAY_SIZE(nmk_stn8815_pins),
 	.functions = nmk_stn8815_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 809d884..352ede1 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -203,6 +203,7 @@
 
 #define GPIO_BLOCK_SHIFT 5
 #define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)
 
 /* Register in the logic block */
 #define NMK_GPIO_DAT	0x00
@@ -282,8 +283,7 @@
 	void __iomem *prcm_base;
 };
 
-static struct nmk_gpio_chip *
-nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
 
 static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
 
@@ -843,10 +843,9 @@
 	clk_disable(nmk_chip->clk);
 }
 
-static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
-				   u32 status)
+static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
 {
-	struct irq_chip *host_chip = irq_get_chip(irq);
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 
 	chained_irq_enter(host_chip, desc);
@@ -871,17 +870,16 @@
 	status = readl(nmk_chip->addr + NMK_GPIO_IS);
 	clk_disable(nmk_chip->clk);
 
-	__nmk_gpio_irq_handler(irq, desc, status);
+	__nmk_gpio_irq_handler(desc, status);
 }
 
-static void nmk_gpio_latent_irq_handler(unsigned int irq,
-					   struct irq_desc *desc)
+static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 	u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
 
-	__nmk_gpio_irq_handler(irq, desc, status);
+	__nmk_gpio_irq_handler(desc, status);
 }
 
 /* I/O Functions */
@@ -1012,6 +1010,7 @@
 		int irq = gpio_to_irq(gpio);
 		struct irq_desc	*desc = irq_to_desc(irq);
 		int pullidx = 0;
+		int val;
 
 		if (pull)
 			pullidx = data_out ? 1 : 2;
@@ -1021,6 +1020,10 @@
 			   label ?: "(none)",
 			   pulls[pullidx],
 			   (mode < 0) ? "unknown" : modes[mode]);
+
+		val = nmk_gpio_get_input(chip, offset);
+		seq_printf(s, " VAL %d", val);
+
 		/*
 		 * This races with request_irq(), set_irq_type(),
 		 * and set_irq_wake() ... but those are "rare".
@@ -1162,29 +1165,90 @@
 	}
 }
 
+/*
+ * We will allocate memory for the state container using devm* allocators
+ * binding to the first device reaching this point, it doesn't matter if
+ * it is the pin controller or GPIO driver. However we need to use the right
+ * platform device when looking up resources so pay attention to pdev.
+ */
+static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
+						struct platform_device *pdev)
+{
+	struct nmk_gpio_chip *nmk_chip;
+	struct platform_device *gpio_pdev;
+	struct gpio_chip *chip;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *base;
+	u32 id;
+
+	gpio_pdev = of_find_device_by_node(np);
+	if (!gpio_pdev) {
+		pr_err("populate \"%s\": device not found\n", np->name);
+		return ERR_PTR(-ENODEV);
+	}
+	if (of_property_read_u32(np, "gpio-bank", &id)) {
+		dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Already populated? */
+	nmk_chip = nmk_gpio_chips[id];
+	if (nmk_chip)
+		return nmk_chip;
+
+	nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
+	if (!nmk_chip)
+		return ERR_PTR(-ENOMEM);
+
+	nmk_chip->bank = id;
+	chip = &nmk_chip->chip;
+	chip->base = id * NMK_GPIO_PER_CHIP;
+	chip->ngpio = NMK_GPIO_PER_CHIP;
+	chip->label = dev_name(&gpio_pdev->dev);
+	chip->dev = &gpio_pdev->dev;
+
+	res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return base;
+	nmk_chip->addr = base;
+
+	clk = clk_get(&gpio_pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return (void *) clk;
+	clk_prepare(clk);
+	nmk_chip->clk = clk;
+
+	BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+	nmk_gpio_chips[id] = nmk_chip;
+	return nmk_chip;
+}
+
 static int nmk_gpio_probe(struct platform_device *dev)
 {
 	struct device_node *np = dev->dev.of_node;
 	struct nmk_gpio_chip *nmk_chip;
 	struct gpio_chip *chip;
 	struct irq_chip *irqchip;
-	struct resource *res;
-	struct clk *clk;
 	int latent_irq;
 	bool supports_sleepmode;
-	void __iomem *base;
 	int irq;
 	int ret;
 
+	nmk_chip = nmk_gpio_populate_chip(np, dev);
+	if (IS_ERR(nmk_chip)) {
+		dev_err(&dev->dev, "could not populate nmk chip struct\n");
+		return PTR_ERR(nmk_chip);
+	}
+
 	if (of_get_property(np, "st,supports-sleepmode", NULL))
 		supports_sleepmode = true;
 	else
 		supports_sleepmode = false;
 
-	if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
-		dev_err(&dev->dev, "gpio-bank property not found\n");
-		return -EINVAL;
-	}
+	/* Correct platform device ID */
+	dev->id = nmk_chip->bank;
 
 	irq = platform_get_irq(dev, 0);
 	if (irq < 0)
@@ -1193,27 +1257,10 @@
 	/* It's OK for this IRQ not to be present */
 	latent_irq = platform_get_irq(dev, 1);
 
-	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(&dev->dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
-
-	clk = devm_clk_get(&dev->dev, NULL);
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
-	clk_prepare(clk);
-
-	nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
-	if (!nmk_chip)
-		return -ENOMEM;
-
 	/*
 	 * The virt address in nmk_chip->addr is in the nomadik register space,
 	 * so we can simply convert the resource address, without remapping
 	 */
-	nmk_chip->bank = dev->id;
-	nmk_chip->clk = clk;
-	nmk_chip->addr = base;
 	nmk_chip->parent_irq = irq;
 	nmk_chip->latent_parent_irq = latent_irq;
 	nmk_chip->sleepmode = supports_sleepmode;
@@ -1228,10 +1275,6 @@
 	chip->set = nmk_gpio_set_output;
 	chip->dbg_show = nmk_gpio_dbg_show;
 	chip->can_sleep = false;
-	chip->base = dev->id * NMK_GPIO_PER_CHIP;
-	chip->ngpio = NMK_GPIO_PER_CHIP;
-	chip->label = dev_name(&dev->dev);
-	chip->dev = &dev->dev;
 	chip->owner = THIS_MODULE;
 
 	irqchip = &nmk_chip->irqchip;
@@ -1253,14 +1296,10 @@
 	clk_disable(nmk_chip->clk);
 	chip->of_node = np;
 
-	ret = gpiochip_add(&nmk_chip->chip);
+	ret = gpiochip_add(chip);
 	if (ret)
 		return ret;
 
-	BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
-
-	nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
-
 	platform_set_drvdata(dev, nmk_chip);
 
 	/*
@@ -1320,35 +1359,40 @@
 	return 0;
 }
 
-static struct pinctrl_gpio_range *
-nmk_match_gpio_range(struct pinctrl_dev *pctldev, unsigned offset)
+static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin)
 {
-	struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
 	int i;
+	struct nmk_gpio_chip *nmk_gpio;
 
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
-		struct pinctrl_gpio_range *range;
-
-		range = &npct->soc->gpio_ranges[i];
-		if (offset >= range->pin_base &&
-		    offset <= (range->pin_base + range->npins - 1))
-			return range;
+	for(i = 0; i < NMK_MAX_BANKS; i++) {
+		nmk_gpio = nmk_gpio_chips[i];
+		if (!nmk_gpio)
+			continue;
+		if (pin >= nmk_gpio->chip.base &&
+			pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio)
+			return nmk_gpio;
 	}
 	return NULL;
 }
 
+static struct gpio_chip *find_gc_from_pin(unsigned pin)
+{
+	struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin);
+
+	if (nmk_gpio)
+		return &nmk_gpio->chip;
+	return NULL;
+}
+
 static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
 		   unsigned offset)
 {
-	struct pinctrl_gpio_range *range;
-	struct gpio_chip *chip;
+	struct gpio_chip *chip = find_gc_from_pin(offset);
 
-	range = nmk_match_gpio_range(pctldev, offset);
-	if (!range || !range->gc) {
+	if (!chip) {
 		seq_printf(s, "invalid pin offset");
 		return;
 	}
-	chip = range->gc;
 	nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
 }
 
@@ -1693,25 +1737,16 @@
 	}
 
 	for (i = 0; i < g->npins; i++) {
-		struct pinctrl_gpio_range *range;
 		struct nmk_gpio_chip *nmk_chip;
-		struct gpio_chip *chip;
 		unsigned bit;
 
-		range = nmk_match_gpio_range(pctldev, g->pins[i]);
-		if (!range) {
+		nmk_chip = find_nmk_gpio_from_pin(g->pins[i]);
+		if (!nmk_chip) {
 			dev_err(npct->dev,
 				"invalid pin offset %d in group %s at index %d\n",
 				g->pins[i], g->name, i);
 			goto out_glitch;
 		}
-		if (!range->gc) {
-			dev_err(npct->dev, "GPIO chip missing in range for pin offset %d in group %s at index %d\n",
-				g->pins[i], g->name, i);
-			goto out_glitch;
-		}
-		chip = range->gc;
-		nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 		dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting);
 
 		clk_enable(nmk_chip->clk);
@@ -1827,25 +1862,17 @@
 	};
 	struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
 	struct nmk_gpio_chip *nmk_chip;
-	struct pinctrl_gpio_range *range;
-	struct gpio_chip *chip;
 	unsigned bit;
 	pin_cfg_t cfg;
 	int pull, slpm, output, val, i;
 	bool lowemi, gpiomode, sleep;
 
-	range = nmk_match_gpio_range(pctldev, pin);
-	if (!range) {
-		dev_err(npct->dev, "invalid pin offset %d\n", pin);
+	nmk_chip = find_nmk_gpio_from_pin(pin);
+	if (!nmk_chip) {
+		dev_err(npct->dev,
+			"invalid pin offset %d\n", pin);
 		return -EINVAL;
 	}
-	if (!range->gc) {
-		dev_err(npct->dev, "GPIO chip missing in range for pin %d\n",
-			pin);
-		return -EINVAL;
-	}
-	chip = range->gc;
-	nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 
 	for (i = 0; i < num_configs; i++) {
 		/*
@@ -1997,6 +2024,31 @@
 	if (version == PINCTRL_NMK_DB8540)
 		nmk_pinctrl_db8540_init(&npct->soc);
 
+	/*
+	 * Since we depend on the GPIO chips to provide clock and register base
+	 * for the pin control operations, make sure that we have these
+	 * populated before we continue. Follow the phandles to instantiate
+	 * them. The GPIO portion of the actual hardware may be probed before
+	 * or after this point: it shouldn't matter as the APIs are orthogonal.
+	 */
+	for (i = 0; i < NMK_MAX_BANKS; i++) {
+		struct device_node *gpio_np;
+		struct nmk_gpio_chip *nmk_chip;
+
+		gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
+		if (gpio_np) {
+			dev_info(&pdev->dev,
+				 "populate NMK GPIO %d \"%s\"\n",
+				 i, gpio_np->name);
+			nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
+			if (IS_ERR(nmk_chip))
+				dev_err(&pdev->dev,
+					"could not populate nmk chip struct "
+					"- continue anyway\n");
+			of_node_put(gpio_np);
+		}
+	}
+
 	prcm_np = of_parse_phandle(np, "prcm", 0);
 	if (prcm_np)
 		npct->prcm_base = of_iomap(prcm_np, 0);
@@ -2011,19 +2063,6 @@
 		}
 	}
 
-	/*
-	 * We need all the GPIO drivers to probe FIRST, or we will not be able
-	 * to obtain references to the struct gpio_chip * for them, and we
-	 * need this to proceed.
-	 */
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
-		if (!nmk_gpio_chips[npct->soc->gpio_ranges[i].id]) {
-			dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-			return -EPROBE_DEFER;
-		}
-		npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[npct->soc->gpio_ranges[i].id]->chip;
-	}
-
 	nmk_pinctrl_desc.pins = npct->soc->pins;
 	nmk_pinctrl_desc.npins = npct->soc->npins;
 	npct->dev = &pdev->dev;
@@ -2034,10 +2073,6 @@
 		return PTR_ERR(npct->pctl);
 	}
 
-	/* We will handle a range of GPIO pins */
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++)
-		pinctrl_add_gpio_range(npct->pctl, &npct->soc->gpio_ranges[i]);
-
 	platform_set_drvdata(pdev, npct);
 	dev_info(&pdev->dev, "initialized Nomadik pin control driver\n");
 
@@ -2072,15 +2107,15 @@
 
 static int __init nmk_gpio_init(void)
 {
-	int ret;
+	return platform_driver_register(&nmk_gpio_driver);
+}
+subsys_initcall(nmk_gpio_init);
 
-	ret = platform_driver_register(&nmk_gpio_driver);
-	if (ret)
-		return ret;
+static int __init nmk_pinctrl_init(void)
+{
 	return platform_driver_register(&nmk_pinctrl_driver);
 }
-
-core_initcall(nmk_gpio_init);
+core_initcall(nmk_pinctrl_init);
 
 MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
 MODULE_DESCRIPTION("Nomadik GPIO Driver");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index d8215f1..30bba2a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -121,8 +121,6 @@
 
 /**
  * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
- * @gpio_ranges: An array of GPIO ranges for this SoC
- * @gpio_num_ranges: The number of GPIO ranges for this SoC
  * @pins:	An array describing all pins the pin controller affects.
  *		All pins which are also GPIOs must be listed first within the
  *		array, and be numbered identically to the GPIO controller's
@@ -137,8 +135,6 @@
  * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
  */
 struct nmk_pinctrl_soc_data {
-	struct pinctrl_gpio_range *gpio_ranges;
-	unsigned gpio_num_ranges;
 	const struct pinctrl_pin_desc *pins;
 	unsigned npins;
 	const struct nmk_function *functions;
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 1fc09dc..29a7bb1 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -61,8 +61,8 @@
 	const struct pinconf_ops *ops = pctldev->desc->confops;
 
 	if (!ops || !ops->pin_config_get) {
-		dev_dbg(pctldev->dev, "cannot get pin configuration, missing "
-			"pin_config_get() function in driver\n");
+		dev_dbg(pctldev->dev,
+			"cannot get pin configuration, .pin_config_get missing in driver\n");
 		return -ENOTSUPP;
 	}
 
@@ -202,18 +202,34 @@
 
 #ifdef CONFIG_DEBUG_FS
 
-void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
+		      unsigned long *configs, unsigned num_configs)
 {
-	struct pinctrl_dev *pctldev;
 	const struct pinconf_ops *confops;
 	int i;
 
-	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
 	if (pctldev)
 		confops = pctldev->desc->confops;
 	else
 		confops = NULL;
 
+	for (i = 0; i < num_configs; i++) {
+		seq_puts(s, "config ");
+		if (confops && confops->pin_config_config_dbg_show)
+			confops->pin_config_config_dbg_show(pctldev, s,
+							    configs[i]);
+		else
+			seq_printf(s, "%08lx", configs[i]);
+		seq_puts(s, "\n");
+	}
+}
+
+void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+{
+	struct pinctrl_dev *pctldev;
+
+	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+
 	switch (map->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
 		seq_printf(s, "pin ");
@@ -227,15 +243,8 @@
 
 	seq_printf(s, "%s\n", map->data.configs.group_or_pin);
 
-	for (i = 0; i < map->data.configs.num_configs; i++) {
-		seq_printf(s, "config ");
-		if (confops && confops->pin_config_config_dbg_show)
-			confops->pin_config_config_dbg_show(pctldev, s,
-						map->data.configs.configs[i]);
-		else
-			seq_printf(s, "%08lx", map->data.configs.configs[i]);
-		seq_printf(s, "\n");
-	}
+	pinconf_show_config(s, pctldev, map->data.configs.configs,
+			    map->data.configs.num_configs);
 }
 
 void pinconf_show_setting(struct seq_file *s,
@@ -243,9 +252,7 @@
 {
 	struct pinctrl_dev *pctldev = setting->pctldev;
 	const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
-	const struct pinconf_ops *confops = pctldev->desc->confops;
 	struct pin_desc *desc;
-	int i;
 
 	switch (setting->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
@@ -269,17 +276,8 @@
 	 * FIXME: We should really get the pin controler to dump the config
 	 * values, so they can be decoded to something meaningful.
 	 */
-	for (i = 0; i < setting->data.configs.num_configs; i++) {
-		seq_printf(s, " ");
-		if (confops && confops->pin_config_config_dbg_show)
-			confops->pin_config_config_dbg_show(pctldev, s,
-				setting->data.configs.configs[i]);
-		else
-			seq_printf(s, "%08lx",
-				   setting->data.configs.configs[i]);
-	}
-
-	seq_printf(s, "\n");
+	pinconf_show_config(s, pctldev, setting->data.configs.configs,
+			    setting->data.configs.num_configs);
 }
 
 static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
@@ -412,10 +410,8 @@
 	const struct pinctrl_map *map;
 	const struct pinctrl_map *found = NULL;
 	struct pinctrl_dev *pctldev;
-	const struct pinconf_ops *confops = NULL;
 	struct dbg_cfg *dbg = &pinconf_dbg_conf;
 	int i, j;
-	unsigned long config;
 
 	mutex_lock(&pinctrl_maps_mutex);
 
@@ -449,16 +445,10 @@
 	}
 
 	pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
-	config = *found->data.configs.configs;
-	seq_printf(s, "Dev %s has config of %s in state %s: 0x%08lX\n",
-			dbg->dev_name, dbg->pin_name,
-			dbg->state_name, config);
-
-	if (pctldev)
-		confops = pctldev->desc->confops;
-
-	if (confops && confops->pin_config_config_dbg_show)
-		confops->pin_config_config_dbg_show(pctldev, s, config);
+	seq_printf(s, "Dev %s has config of %s in state %s:\n",
+		   dbg->dev_name, dbg->pin_name, dbg->state_name);
+	pinconf_show_config(s, pctldev, found->data.configs.configs,
+			    found->data.configs.num_configs);
 
 exit:
 	mutex_unlock(&pinctrl_maps_mutex);
@@ -470,10 +460,12 @@
  * pinconf_dbg_config_write() - modify the pinctrl config in the pinctrl
  * map, of a dev/pin/state entry based on user entries to pinconf-config
  * @user_buf: contains the modification request with expected format:
- *     modify config_pin <devicename> <state> <pinname> <newvalue>
+ *     modify <config> <devicename> <state> <name> <newvalue>
  * modify is literal string, alternatives like add/delete not supported yet
- * config_pin is literal, alternatives like config_mux not supported yet
- * <devicename> <state> <pinname> are values that should match the pinctrl-maps
+ * <config> is the configuration to be changed. Supported configs are
+ *     "config_pin" or "config_group", alternatives like config_mux are not
+ *     supported yet.
+ * <devicename> <state> <name> are values that should match the pinctrl-maps
  * <newvalue> reflects the new config and is driver dependant
  */
 static ssize_t pinconf_dbg_config_write(struct file *file,
@@ -511,13 +503,19 @@
 	if (strcmp(token, "modify"))
 		return -EINVAL;
 
-	/* Get arg type: "config_pin" type supported so far */
+	/*
+	 * Get arg type: "config_pin" and "config_group"
+	 *                types are supported so far
+	 */
 	token = strsep(&b, " ");
 	if (!token)
 		return -EINVAL;
-	if (strcmp(token, "config_pin"))
+	if (!strcmp(token, "config_pin"))
+		dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
+	else if (!strcmp(token, "config_group"))
+		dbg->map_type = PIN_MAP_TYPE_CONFIGS_GROUP;
+	else
 		return -EINVAL;
-	dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
 
 	/* get arg 'device_name' */
 	token = strsep(&b, " ");
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
index 4cb59fe..fcfa008 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf60x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -394,25 +394,25 @@
 static const unsigned short lp0_mux[] = {
 	P_LP0_CLK, P_LP0_ACK, P_LP0_D0, P_LP0_D1, P_LP0_D2,
 	P_LP0_D3, P_LP0_D4, P_LP0_D5, P_LP0_D6, P_LP0_D7,
-        0
+	0
 };
 
 static const unsigned short lp1_mux[] = {
 	P_LP1_CLK, P_LP1_ACK, P_LP1_D0, P_LP1_D1, P_LP1_D2,
 	P_LP1_D3, P_LP1_D4, P_LP1_D5, P_LP1_D6, P_LP1_D7,
-        0
+	0
 };
 
 static const unsigned short lp2_mux[] = {
 	P_LP2_CLK, P_LP2_ACK, P_LP2_D0, P_LP2_D1, P_LP2_D2,
 	P_LP2_D3, P_LP2_D4, P_LP2_D5, P_LP2_D6, P_LP2_D7,
-        0
+	0
 };
 
 static const unsigned short lp3_mux[] = {
 	P_LP3_CLK, P_LP3_ACK, P_LP3_D0, P_LP3_D1, P_LP3_D2,
 	P_LP3_D3, P_LP3_D4, P_LP3_D5, P_LP3_D6, P_LP3_D7,
-        0
+	0
 };
 
 static const struct adi_pin_group adi_pin_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index c3c3d23..a5976eb 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -427,10 +427,10 @@
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 		writel(pintmask, &pint_regs->edge_set);
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	} else {
 		writel(pintmask, &pint_regs->edge_clear);
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	}
 
 out:
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index d8e3f7c..5e86bb8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -420,7 +420,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_FALLING:
@@ -428,7 +428,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
@@ -436,7 +436,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_HIGH:
@@ -445,7 +445,7 @@
 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
 		pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
@@ -454,7 +454,7 @@
 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
 		pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_NONE:
@@ -492,8 +492,9 @@
 	.irq_set_type = amd_gpio_irq_set_type,
 };
 
-static void amd_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	u32 i;
 	u32 off;
 	u32 reg;
@@ -501,7 +502,7 @@
 	u64 reg64;
 	int handled = 0;
 	unsigned long flags;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct amd_gpio *gpio_dev = to_amd_gpio(gc);
 
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index a082447..bae0012 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -320,6 +320,9 @@
 static void __iomem *pin_to_controller(struct at91_pinctrl *info,
 				 unsigned int bank)
 {
+	if (!gpio_chips[bank])
+		return NULL;
+
 	return gpio_chips[bank]->regbase;
 }
 
@@ -729,6 +732,10 @@
 		pin = &pins_conf[i];
 		at91_pin_dbg(info->dev, pin);
 		pio = pin_to_controller(info, pin->bank);
+
+		if (!pio)
+			continue;
+
 		mask = pin_to_mask(pin->pin);
 		at91_mux_disable_interrupt(pio, mask);
 		switch (pin->mux) {
@@ -848,6 +855,10 @@
 	*config = 0;
 	dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
 	pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+	if (!pio)
+		return -EINVAL;
+
 	pin = pin_id % MAX_NB_GPIO_PER_BANK;
 
 	if (at91_mux_get_multidrive(pio, pin))
@@ -889,6 +900,10 @@
 			"%s:%d, pin_id=%d, config=0x%lx",
 			__func__, __LINE__, pin_id, config);
 		pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+		if (!pio)
+			return -EINVAL;
+
 		pin = pin_id % MAX_NB_GPIO_PER_BANK;
 		mask = pin_to_mask(pin);
 
@@ -1444,22 +1459,22 @@
 
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
@@ -1468,7 +1483,7 @@
 		 * disable additional interrupt modes:
 		 * fall back to default behavior
 		 */
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_AIMDR);
 		return 0;
 	case IRQ_TYPE_NONE:
@@ -1488,28 +1503,6 @@
 	/* the interrupt is already cleared before by reading ISR */
 }
 
-static int gpio_irq_request_res(struct irq_data *d)
-{
-	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
-	unsigned	pin = d->hwirq;
-	int ret;
-
-	ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
-	if (ret)
-		dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
-			d->hwirq);
-
-	return ret;
-}
-
-static void gpio_irq_release_res(struct irq_data *d)
-{
-	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
-	unsigned	pin = d->hwirq;
-
-	gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
-}
-
 #ifdef CONFIG_PM
 
 static u32 wakeups[MAX_GPIO_BANKS];
@@ -1585,8 +1578,6 @@
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
 	.irq_ack	= gpio_irq_ack,
-	.irq_request_resources = gpio_irq_request_res,
-	.irq_release_resources = gpio_irq_release_res,
 	.irq_disable	= gpio_irq_mask,
 	.irq_mask	= gpio_irq_mask,
 	.irq_unmask	= gpio_irq_unmask,
@@ -1596,7 +1587,7 @@
 
 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
 	struct at91_gpio_chip *at91_gpio = container_of(gpio_chip,
 					   struct at91_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 29cbbab..3731cc6 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,10 +519,11 @@
 	.irq_set_type		= u300_gpio_irq_type,
 };
 
-static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
-	struct irq_chip *parent_chip = irq_get_chip(irq);
-	struct gpio_chip *chip = irq_get_handler_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct u300_gpio *gpio = to_u300_gpio(chip);
 	struct u300_gpio_port *port = &gpio->ports[irq - chip->base];
 	int pinoffset = port->number << 3; /* get the right stride */
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
new file mode 100644
index 0000000..461fffc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -0,0 +1,378 @@
+/*
+ *  Driver for Conexant Digicolor General Purpose Pin Mapping
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * TODO:
+ * - GPIO interrupt support
+ * - Pin pad configuration (pull up/down, strength)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include "pinctrl-utils.h"
+
+#define DRIVER_NAME	"pinctrl-digicolor"
+
+#define GP_CLIENTSEL(clct)	((clct)*8 + 0x20)
+#define GP_DRIVE0(clct)		(GP_CLIENTSEL(clct) + 2)
+#define GP_OUTPUT0(clct)	(GP_CLIENTSEL(clct) + 3)
+#define GP_INPUT(clct)		(GP_CLIENTSEL(clct) + 6)
+
+#define PIN_COLLECTIONS		('R' - 'A' + 1)
+#define PINS_PER_COLLECTION	8
+#define PINS_COUNT		(PIN_COLLECTIONS * PINS_PER_COLLECTION)
+
+struct dc_pinmap {
+	void __iomem		*regs;
+	struct device		*dev;
+	struct pinctrl_dev	*pctl;
+
+	struct pinctrl_desc	*desc;
+	const char		*pin_names[PINS_COUNT];
+
+	struct gpio_chip	chip;
+	spinlock_t		lock;
+};
+
+static int dc_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return PINS_COUNT;
+}
+
+static const char *dc_get_group_name(struct pinctrl_dev *pctldev,
+				     unsigned selector)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	/* Exactly one group per pin */
+	return pmap->desc->pins[selector].name;
+}
+
+static int dc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+			     const unsigned **pins,
+			     unsigned *num_pins)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pmap->desc->pins[selector].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static struct pinctrl_ops dc_pinctrl_ops = {
+	.get_groups_count	= dc_get_groups_count,
+	.get_group_name		= dc_get_group_name,
+	.get_group_pins		= dc_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_pin,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static const char *const dc_functions[] = {
+	"gpio",
+	"client_a",
+	"client_b",
+	"client_c",
+};
+
+static int dc_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(dc_functions);
+}
+
+static const char *dc_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
+{
+	return dc_functions[selector];
+}
+
+static int dc_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+			 const char * const **groups,
+			 unsigned * const num_groups)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pmap->pin_names;
+	*num_groups = PINS_COUNT;
+
+	return 0;
+}
+
+static void dc_client_sel(int pin_num, int *reg, int *bit)
+{
+	*bit = (pin_num % PINS_PER_COLLECTION) * 2;
+	*reg = GP_CLIENTSEL(pin_num/PINS_PER_COLLECTION);
+
+	if (*bit >= PINS_PER_COLLECTION) {
+		*bit -= PINS_PER_COLLECTION;
+		*reg += 1;
+	}
+}
+
+static int dc_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+		      unsigned group)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+	int bit_off, reg_off;
+	u8 reg;
+
+	dc_client_sel(group, &reg_off, &bit_off);
+
+	reg = readb_relaxed(pmap->regs + reg_off);
+	reg &= ~(3 << bit_off);
+	reg |= (selector << bit_off);
+	writeb_relaxed(reg, pmap->regs + reg_off);
+
+	return 0;
+}
+
+static int dc_pmx_request_gpio(struct pinctrl_dev *pcdev,
+			       struct pinctrl_gpio_range *range,
+			       unsigned offset)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pcdev);
+	int bit_off, reg_off;
+	u8 reg;
+
+	dc_client_sel(offset, &reg_off, &bit_off);
+
+	reg = readb_relaxed(pmap->regs + reg_off);
+	if ((reg & (3 << bit_off)) != 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static struct pinmux_ops dc_pmxops = {
+	.get_functions_count	= dc_get_functions_count,
+	.get_function_name	= dc_get_fname,
+	.get_function_groups	= dc_get_groups,
+	.set_mux		= dc_set_mux,
+	.gpio_request_enable	= dc_pmx_request_gpio,
+};
+
+static int dc_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+	return pinctrl_request_gpio(chip->base + gpio);
+}
+
+static void dc_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+	pinctrl_free_gpio(chip->base + gpio);
+}
+
+static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 drive;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	drive = readb_relaxed(pmap->regs + reg_off);
+	drive &= ~BIT(bit_off);
+	writeb_relaxed(drive, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+
+	return 0;
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value);
+
+static int dc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+				    int value)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 drive;
+	unsigned long flags;
+
+	dc_gpio_set(chip, gpio, value);
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	drive = readb_relaxed(pmap->regs + reg_off);
+	drive |= BIT(bit_off);
+	writeb_relaxed(drive, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+
+	return 0;
+}
+
+static int dc_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_INPUT(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 input;
+
+	input = readb_relaxed(pmap->regs + reg_off);
+
+	return !!(input & BIT(bit_off));
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_OUTPUT0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 output;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	output = readb_relaxed(pmap->regs + reg_off);
+	if (value)
+		output |= BIT(bit_off);
+	else
+		output &= ~BIT(bit_off);
+	writeb_relaxed(output, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+}
+
+static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
+{
+	struct gpio_chip *chip = &pmap->chip;
+	int ret;
+
+	chip->label		= DRIVER_NAME;
+	chip->dev		= pmap->dev;
+	chip->request		= dc_gpio_request;
+	chip->free		= dc_gpio_free;
+	chip->direction_input	= dc_gpio_direction_input;
+	chip->direction_output	= dc_gpio_direction_output;
+	chip->get		= dc_gpio_get;
+	chip->set		= dc_gpio_set;
+	chip->base		= -1;
+	chip->ngpio		= PINS_COUNT;
+	chip->of_node		= np;
+	chip->of_gpio_n_cells	= 2;
+
+	spin_lock_init(&pmap->lock);
+
+	ret = gpiochip_add(chip);
+	if (ret < 0)
+		return ret;
+
+	ret = gpiochip_add_pin_range(chip, dev_name(pmap->dev), 0, 0,
+				     PINS_COUNT);
+	if (ret < 0) {
+		gpiochip_remove(chip);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dc_pinctrl_probe(struct platform_device *pdev)
+{
+	struct dc_pinmap *pmap;
+	struct resource *r;
+	struct pinctrl_pin_desc *pins;
+	struct pinctrl_desc *pctl_desc;
+	char *pin_names;
+	int name_len = strlen("GP_xx") + 1;
+	int i, j, ret;
+
+	pmap = devm_kzalloc(&pdev->dev, sizeof(*pmap), GFP_KERNEL);
+	if (!pmap)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(pmap->regs))
+		return PTR_ERR(pmap->regs);
+
+	pins = devm_kzalloc(&pdev->dev, sizeof(*pins)*PINS_COUNT, GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+	pin_names = devm_kzalloc(&pdev->dev, name_len * PINS_COUNT,
+				 GFP_KERNEL);
+	if (!pin_names)
+		return -ENOMEM;
+
+	for (i = 0; i < PIN_COLLECTIONS; i++) {
+		for (j = 0; j < PINS_PER_COLLECTION; j++) {
+			int pin_id = i*PINS_PER_COLLECTION + j;
+			char *name = &pin_names[pin_id * name_len];
+
+			snprintf(name, name_len, "GP_%c%c", 'A'+i, '0'+j);
+
+			pins[pin_id].number = pin_id;
+			pins[pin_id].name = name;
+			pmap->pin_names[pin_id] = name;
+		}
+	}
+
+	pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+	if (!pctl_desc)
+		return -ENOMEM;
+
+	pctl_desc->name	= DRIVER_NAME,
+	pctl_desc->owner = THIS_MODULE,
+	pctl_desc->pctlops = &dc_pinctrl_ops,
+	pctl_desc->pmxops = &dc_pmxops,
+	pctl_desc->npins = PINS_COUNT;
+	pctl_desc->pins = pins;
+	pmap->desc = pctl_desc;
+
+	pmap->dev = &pdev->dev;
+
+	pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
+	if (!pmap->pctl) {
+		dev_err(&pdev->dev, "pinctrl driver registration failed\n");
+		return -EINVAL;
+	}
+
+	ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
+	if (ret < 0) {
+		pinctrl_unregister(pmap->pctl);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dc_pinctrl_remove(struct platform_device *pdev)
+{
+	struct dc_pinmap *pmap = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(pmap->pctl);
+	gpiochip_remove(&pmap->chip);
+
+	return 0;
+}
+
+static const struct of_device_id dc_pinctrl_ids[] = {
+	{ .compatible = "cnxt,cx92755-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pinctrl_ids);
+
+static struct platform_driver dc_pinctrl_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = dc_pinctrl_ids,
+	},
+	.probe = dc_pinctrl_probe,
+	.remove = dc_pinctrl_remove,
+};
+module_platform_driver(dc_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 347c763..f0bebbe 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -37,6 +37,9 @@
 #define LPC18XX_SCU_PIN_EHD_MASK	0x300
 #define LPC18XX_SCU_PIN_EHD_POS		8
 
+#define LPC18XX_SCU_USB1_EPD		BIT(2)
+#define LPC18XX_SCU_USB1_EPWR		BIT(4)
+
 #define LPC18XX_SCU_I2C0_EFP		BIT(0)
 #define LPC18XX_SCU_I2C0_EHD		BIT(2)
 #define LPC18XX_SCU_I2C0_EZI		BIT(3)
@@ -617,8 +620,31 @@
 
 static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
 {
-	/* TODO */
-	return -ENOTSUPP;
+	switch (param) {
+	case PIN_CONFIG_LOW_POWER_MODE:
+		if (reg & LPC18XX_SCU_USB1_EPWR)
+			*arg = 0;
+		else
+			*arg = 1;
+		break;
+
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (reg & LPC18XX_SCU_USB1_EPD)
+			return -EINVAL;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (reg & LPC18XX_SCU_USB1_EPD)
+			*arg = 1;
+		else
+			return -EINVAL;
+		break;
+
+	default:
+		return -ENOTSUPP;
+	}
+
+	return 0;
 }
 
 static int lpc18xx_pconf_get_i2c0(enum pin_config_param param, int *arg, u32 reg,
@@ -782,8 +808,28 @@
 				  enum pin_config_param param,
 				  u16 param_val, u32 *reg)
 {
-	/* TODO */
-	return -ENOTSUPP;
+	switch (param) {
+	case PIN_CONFIG_LOW_POWER_MODE:
+		if (param_val)
+			*reg &= ~LPC18XX_SCU_USB1_EPWR;
+		else
+			*reg |= LPC18XX_SCU_USB1_EPWR;
+		break;
+
+	case PIN_CONFIG_BIAS_DISABLE:
+		*reg &= ~LPC18XX_SCU_USB1_EPD;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		*reg |= LPC18XX_SCU_USB1_EPD;
+		break;
+
+	default:
+		dev_err(pctldev->dev, "Property not supported\n");
+		return -ENOTSUPP;
+	}
+
+	return 0;
 }
 
 static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 63100be..3dc2ae1 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1310,9 +1310,11 @@
 	return 0;
 }
 
-static void pistachio_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pistachio_gpio_irq_handler(unsigned int __irq,
+				       struct irq_desc *desc)
 {
-	struct gpio_chip *gc = irq_get_handler_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct pistachio_gpio_bank *bank = gc_to_bank(gc);
 	struct irq_chip *chip = irq_get_chip(irq);
 	unsigned long pending;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 9affcd7..c5246c0 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -945,6 +945,7 @@
 	if (ret < 0)
 		return ret;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -956,6 +957,7 @@
 	writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
 
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -1389,6 +1391,7 @@
 	unsigned long flags;
 	u32 data;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl(reg);
@@ -1398,6 +1401,7 @@
 	writel(data, reg);
 
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 }
 
 /*
@@ -1409,7 +1413,9 @@
 	struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
 	u32 data;
 
+	clk_enable(bank->clk);
 	data = readl(bank->reg_base + GPIO_EXT_PORT);
+	clk_disable(bank->clk);
 	data >>= offset;
 	data &= 1;
 	return data;
@@ -1469,10 +1475,10 @@
  * Interrupt handling
  */
 
-static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
 	u32 pend;
 
 	dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1482,7 +1488,7 @@
 	pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int irq, virq;
 
 		irq = __ffs(pend);
 		pend &= ~BIT(irq);
@@ -1546,6 +1552,7 @@
 	if (ret < 0)
 		return ret;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -1555,9 +1562,9 @@
 	spin_unlock_irqrestore(&bank->slock, flags);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
 	spin_lock_irqsave(&bank->slock, flags);
 	irq_gc_lock(gc);
@@ -1603,6 +1610,7 @@
 	default:
 		irq_gc_unlock(gc);
 		spin_unlock_irqrestore(&bank->slock, flags);
+		clk_disable(bank->clk);
 		return -EINVAL;
 	}
 
@@ -1611,6 +1619,7 @@
 
 	irq_gc_unlock(gc);
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -1620,8 +1629,10 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct rockchip_pin_bank *bank = gc->private;
 
+	clk_enable(bank->clk);
 	bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
 	irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
+	clk_disable(bank->clk);
 }
 
 static void rockchip_irq_resume(struct irq_data *d)
@@ -1629,7 +1640,27 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct rockchip_pin_bank *bank = gc->private;
 
+	clk_enable(bank->clk);
 	irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
+	clk_disable(bank->clk);
+}
+
+static void rockchip_irq_gc_mask_clr_bit(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct rockchip_pin_bank *bank = gc->private;
+
+	clk_enable(bank->clk);
+	irq_gc_mask_clr_bit(d);
+}
+
+void rockchip_irq_gc_mask_set_bit(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct rockchip_pin_bank *bank = gc->private;
+
+	irq_gc_mask_set_bit(d);
+	clk_disable(bank->clk);
 }
 
 static int rockchip_interrupts_register(struct platform_device *pdev,
@@ -1640,7 +1671,7 @@
 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 	struct irq_chip_generic *gc;
 	int ret;
-	int i;
+	int i, j;
 
 	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
 		if (!bank->valid) {
@@ -1649,11 +1680,19 @@
 			continue;
 		}
 
+		ret = clk_enable(bank->clk);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
+				bank->name);
+			continue;
+		}
+
 		bank->domain = irq_domain_add_linear(bank->of_node, 32,
 						&irq_generic_chip_ops, NULL);
 		if (!bank->domain) {
 			dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
 				 bank->name);
+			clk_disable(bank->clk);
 			continue;
 		}
 
@@ -1664,6 +1703,7 @@
 			dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
 				bank->name);
 			irq_domain_remove(bank->domain);
+			clk_disable(bank->clk);
 			continue;
 		}
 
@@ -1681,16 +1721,23 @@
 		gc->chip_types[0].regs.mask = GPIO_INTMASK;
 		gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
 		gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
-		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_mask = rockchip_irq_gc_mask_set_bit;
+		gc->chip_types[0].chip.irq_unmask =
+						  rockchip_irq_gc_mask_clr_bit;
 		gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
 		gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
 		gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
 		gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
 		gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
-		irq_set_handler_data(bank->irq, bank);
-		irq_set_chained_handler(bank->irq, rockchip_irq_demux);
+		irq_set_chained_handler_and_data(bank->irq,
+						 rockchip_irq_demux, bank);
+
+		/* map the gpio irqs here, when the clock is still running */
+		for (j = 0 ; j < 32 ; j++)
+			irq_create_mapping(bank->domain, j);
+
+		clk_disable(bank->clk);
 	}
 
 	return 0;
@@ -1808,7 +1855,7 @@
 	if (IS_ERR(bank->clk))
 		return PTR_ERR(bank->clk);
 
-	return clk_prepare_enable(bank->clk);
+	return clk_prepare(bank->clk);
 }
 
 static const struct of_device_id rockchip_pinctrl_dt_match[];
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 0b8d480..bf548c2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1684,7 +1684,7 @@
 	struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip;
 
-	chip = irq_get_chip(irq);
+	chip = irq_desc_get_chip(desc);
 	chained_irq_enter(chip, desc);
 	pcs_irq_handle(pcs_soc);
 	/* REVISIT: export and add handle_bad_irq(irq, desc)? */
@@ -1716,12 +1716,7 @@
 	irq_set_chip_data(irq, pcs_soc);
 	irq_set_chip_and_handler(irq, &pcs->chip,
 				 handle_level_irq);
-
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -1768,9 +1763,9 @@
 			return res;
 		}
 	} else {
-		irq_set_handler_data(pcs_soc->irq, pcs_soc);
-		irq_set_chained_handler(pcs_soc->irq,
-					pcs_irq_chain_handler);
+		irq_set_chained_handler_and_data(pcs_soc->irq,
+						 pcs_irq_chain_handler,
+						 pcs_soc);
 	}
 
 	/*
@@ -1983,7 +1978,6 @@
 };
 
 static const struct pcs_soc_data pinctrl_single_dra7 = {
-	.flags = PCS_QUIRK_SHARED_IRQ,
 	.irq_enable_mask = (1 << 24),	/* WAKEUPENABLE */
 	.irq_status_mask = (1 << 25),	/* WAKEUPEVENT */
 };
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index c262e5f..f8338d2 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1463,7 +1463,7 @@
 static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
 	/* interrupt dedicated per bank */
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct st_gpio_bank *bank = gpio_chip_to_bank(gc);
 
@@ -1474,8 +1474,8 @@
 
 static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct st_pinctrl *info = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct st_pinctrl *info = irq_desc_get_handler_data(desc);
 	unsigned long status;
 	int n;
 
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 7ce23b6..5aafea8 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -706,10 +706,10 @@
 		"gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
 		"gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
 		"gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_wp_grp"};
-static const char * const smc0_nor_groups[] = {"smc0_nor"};
+static const char * const smc0_nor_groups[] = {"smc0_nor_grp"};
 static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"};
 static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"};
-static const char * const smc0_nand_groups[] = {"smc0_nand"};
+static const char * const smc0_nand_groups[] = {"smc0_nand_grp"};
 static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp",
 		"can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp",
 		"can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp",
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index e7ae890..67e08cb 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -322,8 +322,7 @@
 		selector++;
 	}
 
-	pr_err("%s does not support function %s\n",
-	       pinctrl_dev_get_name(pctldev), function);
+	dev_err(pctldev->dev, "function '%s' not supported\n", function);
 	return -EINVAL;
 }
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 58f5632..383263a 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -63,6 +63,14 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found on the Qualcomm 8916 platform.
 
+config PINCTRL_QDF2XXX
+	tristate "Qualcomm Technologies QDF2xxx pin controller driver"
+	depends on GPIOLIB && ACPI
+	select PINCTRL_MSM
+	help
+	  This is the GPIO driver for the TLMM block found on the
+	  Qualcomm Technologies QDF2xxx SOCs.
+
 config PINCTRL_QCOM_SPMI_PMIC
        tristate "Qualcomm SPMI PMIC pin controller driver"
        depends on GPIOLIB && OF && SPMI
@@ -76,4 +84,16 @@
          which are using SPMI for communication with SoC. Example PMIC's
          devices are pm8841, pm8941 and pma8084.
 
+config PINCTRL_QCOM_SSBI_PMIC
+       tristate "Qualcomm SSBI PMIC pin controller driver"
+       depends on GPIOLIB && OF
+       select PINMUX
+       select PINCONF
+       select GENERIC_PINCONF
+       help
+         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+         Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+         which are using SSBI for communication with SoC. Example PMIC's
+         devices are pm8058 and pm8921.
+
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 3666c70..13b190e 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -7,5 +7,8 @@
 obj-$(CONFIG_PINCTRL_MSM8960)	+= pinctrl-msm8960.o
 obj-$(CONFIG_PINCTRL_MSM8X74)	+= pinctrl-msm8x74.o
 obj-$(CONFIG_PINCTRL_MSM8916)	+= pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_QDF2XXX)	+= pinctrl-qdf2xxx.o
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e457d52..492cdd5 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -28,6 +28,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/reboot.h>
+#include <linux/pm.h>
 
 #include "../core.h"
 #include "../pinconf.h"
@@ -733,9 +734,9 @@
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
@@ -764,12 +765,13 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
-static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int irq_pin;
 	int handled = 0;
 	u32 val;
@@ -855,6 +857,13 @@
 	return NOTIFY_DONE;
 }
 
+static struct msm_pinctrl *poweroff_pctrl;
+
+static void msm_ps_hold_poweroff(void)
+{
+	msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
+}
+
 static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
 {
 	int i;
@@ -867,6 +876,8 @@
 			if (register_restart_handler(&pctrl->restart_nb))
 				dev_err(pctrl->dev,
 					"failed to setup restart handler.\n");
+			poweroff_pctrl = pctrl;
+			pm_power_off = msm_ps_hold_poweroff;
 			break;
 		}
 }
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
new file mode 100644
index 0000000..e9ff3bc
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * GPIO and pin control functions on this SOC are handled by the "TLMM"
+ * device.  The driver which controls this device is pinctrl-msm.c.  Each
+ * SOC with a TLMM is expected to create a client driver that registers
+ * with pinctrl-msm.c.  This means that all TLMM drivers are pin control
+ * drivers.
+ *
+ * This pin control driver is intended to be used only an ACPI-enabled
+ * system.  As such, UEFI will handle all pin control configuration, so
+ * this driver does not provide pin control functions.  It is effectively
+ * a GPIO-only driver.  The alternative is to duplicate the GPIO code of
+ * pinctrl-msm.c into another driver.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/acpi.h>
+
+#include "pinctrl-msm.h"
+
+static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
+
+static int qdf2xxx_pinctrl_probe(struct platform_device *pdev)
+{
+	struct pinctrl_pin_desc *pins;
+	struct msm_pingroup *groups;
+	unsigned int i;
+	u32 num_gpios;
+	int ret;
+
+	/* Query the number of GPIOs from ACPI */
+	ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios);
+	if (ret < 0)
+		return ret;
+
+	if (!num_gpios) {
+		dev_warn(&pdev->dev, "missing num-gpios property\n");
+		return -ENODEV;
+	}
+
+	pins = devm_kcalloc(&pdev->dev, num_gpios,
+		sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+	groups = devm_kcalloc(&pdev->dev, num_gpios,
+		sizeof(struct msm_pingroup), GFP_KERNEL);
+
+	for (i = 0; i < num_gpios; i++) {
+		pins[i].number = i;
+
+		groups[i].npins = 1,
+		groups[i].pins = &pins[i].number;
+		groups[i].ctl_reg = 0x10000 * i;
+		groups[i].io_reg = 0x04 + 0x10000 * i;
+		groups[i].intr_cfg_reg = 0x08 + 0x10000 * i;
+		groups[i].intr_status_reg = 0x0c + 0x10000 * i;
+		groups[i].intr_target_reg = 0x08 + 0x10000 * i;
+
+		groups[i].mux_bit = 2;
+		groups[i].pull_bit = 0;
+		groups[i].drv_bit = 6;
+		groups[i].oe_bit = 9;
+		groups[i].in_bit = 0;
+		groups[i].out_bit = 1;
+		groups[i].intr_enable_bit = 0;
+		groups[i].intr_status_bit = 0;
+		groups[i].intr_target_bit = 5;
+		groups[i].intr_target_kpss_val = 1;
+		groups[i].intr_raw_status_bit = 4;
+		groups[i].intr_polarity_bit = 1;
+		groups[i].intr_detection_bit = 2;
+		groups[i].intr_detection_width = 2;
+	}
+
+	qdf2xxx_pinctrl.pins = pins;
+	qdf2xxx_pinctrl.groups = groups;
+	qdf2xxx_pinctrl.npins = num_gpios;
+	qdf2xxx_pinctrl.ngroups = num_gpios;
+	qdf2xxx_pinctrl.ngpios = num_gpios;
+
+	return msm_pinctrl_probe(pdev, &qdf2xxx_pinctrl);
+}
+
+static const struct acpi_device_id qdf2xxx_acpi_ids[] = {
+	{"QCOM8001"},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, qdf2xxx_acpi_ids);
+
+static struct platform_driver qdf2xxx_pinctrl_driver = {
+	.driver = {
+		.name = "qdf2xxx-pinctrl",
+		.acpi_match_table = ACPI_PTR(qdf2xxx_acpi_ids),
+	},
+	.probe = qdf2xxx_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init qdf2xxx_pinctrl_init(void)
+{
+	return platform_driver_register(&qdf2xxx_pinctrl_driver);
+}
+arch_initcall(qdf2xxx_pinctrl_init);
+
+static void __exit qdf2xxx_pinctrl_exit(void)
+{
+	platform_driver_unregister(&qdf2xxx_pinctrl_driver);
+}
+module_exit(qdf2xxx_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies QDF2xxx pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 3121de9..e3be3ce 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -61,7 +61,9 @@
 #define PMIC_MPP_REG_DIG_PULL_CTL		0x42
 #define PMIC_MPP_REG_DIG_IN_CTL			0x43
 #define PMIC_MPP_REG_EN_CTL			0x46
+#define PMIC_MPP_REG_AOUT_CTL			0x48
 #define PMIC_MPP_REG_AIN_CTL			0x4a
+#define PMIC_MPP_REG_SINK_CTL			0x4c
 
 /* PMIC_MPP_REG_MODE_CTL */
 #define PMIC_MPP_REG_MODE_VALUE_MASK		0x1
@@ -85,11 +87,25 @@
 #define PMIC_MPP_REG_AIN_ROUTE_SHIFT		0
 #define PMIC_MPP_REG_AIN_ROUTE_MASK		0x7
 
+#define PMIC_MPP_MODE_DIGITAL_INPUT		0
+#define PMIC_MPP_MODE_DIGITAL_OUTPUT		1
+#define PMIC_MPP_MODE_DIGITAL_BIDIR		2
+#define PMIC_MPP_MODE_ANALOG_BIDIR		3
+#define PMIC_MPP_MODE_ANALOG_INPUT		4
+#define PMIC_MPP_MODE_ANALOG_OUTPUT		5
+#define PMIC_MPP_MODE_CURRENT_SINK		6
+
+#define PMIC_MPP_SELECTOR_NORMAL		0
+#define PMIC_MPP_SELECTOR_PAIRED		1
+#define PMIC_MPP_SELECTOR_DTEST_FIRST		4
+
 #define PMIC_MPP_PHYSICAL_OFFSET		1
 
 /* Qualcomm specific pin configurations */
 #define PMIC_MPP_CONF_AMUX_ROUTE		(PIN_CONFIG_END + 1)
-#define PMIC_MPP_CONF_ANALOG_MODE		(PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_ANALOG_LEVEL		(PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_DTEST_SELECTOR		(PIN_CONFIG_END + 3)
+#define PMIC_MPP_CONF_PAIRED			(PIN_CONFIG_END + 4)
 
 /**
  * struct pmic_mpp_pad - keep current MPP settings
@@ -99,13 +115,15 @@
  * @out_value: Cached pin output value.
  * @output_enabled: Set to true if MPP output logic is enabled.
  * @input_enabled: Set to true if MPP input buffer logic is enabled.
- * @analog_mode: Set to true when MPP should operate in Analog Input, Analog
- *	Output or Bidirectional Analog mode.
+ * @paired: Pin operates in paired mode
  * @num_sources: Number of power-sources supported by this MPP.
  * @power_source: Current power-source used.
  * @amux_input: Set the source for analog input.
+ * @aout_level: Analog output level
  * @pullup: Pullup resistor value. Valid in Bidirectional mode only.
  * @function: See pmic_mpp_functions[].
+ * @drive_strength: Amount of current in sink mode
+ * @dtest: DTEST route selector
  */
 struct pmic_mpp_pad {
 	u16		base;
@@ -114,12 +132,15 @@
 	bool		out_value;
 	bool		output_enabled;
 	bool		input_enabled;
-	bool		analog_mode;
+	bool		paired;
 	unsigned int	num_sources;
 	unsigned int	power_source;
 	unsigned int	amux_input;
+	unsigned int	aout_level;
 	unsigned int	pullup;
 	unsigned int	function;
+	unsigned int	drive_strength;
+	unsigned int	dtest;
 };
 
 struct pmic_mpp_state {
@@ -129,25 +150,32 @@
 	struct gpio_chip chip;
 };
 
-struct pmic_mpp_bindings {
-	const char	*property;
-	unsigned	param;
+static const struct pinconf_generic_params pmic_mpp_bindings[] = {
+	{"qcom,amux-route",	PMIC_MPP_CONF_AMUX_ROUTE,	0},
+	{"qcom,analog-level",	PMIC_MPP_CONF_ANALOG_LEVEL,	0},
+	{"qcom,dtest",		PMIC_MPP_CONF_DTEST_SELECTOR,	0},
+	{"qcom,paired",		PMIC_MPP_CONF_PAIRED,		0},
 };
 
-static struct pmic_mpp_bindings pmic_mpp_bindings[] = {
-	{"qcom,amux-route",	PMIC_MPP_CONF_AMUX_ROUTE},
-	{"qcom,analog-mode",	PMIC_MPP_CONF_ANALOG_MODE},
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pmic_conf_items[] = {
+	PCONFDUMP(PMIC_MPP_CONF_AMUX_ROUTE, "analog mux", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false),
 };
+#endif
 
 static const char *const pmic_mpp_groups[] = {
 	"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
 };
 
+#define PMIC_MPP_DIGITAL	0
+#define PMIC_MPP_ANALOG		1
+#define PMIC_MPP_SINK		2
+
 static const char *const pmic_mpp_functions[] = {
-	PMIC_MPP_FUNC_NORMAL, PMIC_MPP_FUNC_PAIRED,
-	"reserved1", "reserved2",
-	PMIC_MPP_FUNC_DTEST1, PMIC_MPP_FUNC_DTEST2,
-	PMIC_MPP_FUNC_DTEST3, PMIC_MPP_FUNC_DTEST4,
+	"digital", "analog", "sink"
 };
 
 static inline struct pmic_mpp_state *to_mpp_state(struct gpio_chip *chip)
@@ -204,118 +232,11 @@
 	return 0;
 }
 
-static int pmic_mpp_parse_dt_config(struct device_node *np,
-				    struct pinctrl_dev *pctldev,
-				    unsigned long **configs,
-				    unsigned int *nconfs)
-{
-	struct pmic_mpp_bindings *par;
-	unsigned long cfg;
-	int ret, i;
-	u32 val;
-
-	for (i = 0; i < ARRAY_SIZE(pmic_mpp_bindings); i++) {
-		par = &pmic_mpp_bindings[i];
-		ret = of_property_read_u32(np, par->property, &val);
-
-		/* property not found */
-		if (ret == -EINVAL)
-			continue;
-
-		/* use zero as default value, when no value is specified */
-		if (ret)
-			val = 0;
-
-		dev_dbg(pctldev->dev, "found %s with value %u\n",
-			par->property, val);
-
-		cfg = pinconf_to_config_packed(par->param, val);
-
-		ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int pmic_mpp_dt_subnode_to_map(struct pinctrl_dev *pctldev,
-				      struct device_node *np,
-				      struct pinctrl_map **map,
-				      unsigned *reserv, unsigned *nmaps,
-				      enum pinctrl_map_type type)
-{
-	unsigned long *configs = NULL;
-	unsigned nconfs = 0;
-	struct property *prop;
-	const char *group;
-	int ret;
-
-	ret = pmic_mpp_parse_dt_config(np, pctldev, &configs, &nconfs);
-	if (ret < 0)
-		return ret;
-
-	if (!nconfs)
-		return 0;
-
-	ret = of_property_count_strings(np, "pins");
-	if (ret < 0)
-		goto exit;
-
-	ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
-	if (ret < 0)
-		goto exit;
-
-	of_property_for_each_string(np, "pins", prop, group) {
-		ret = pinctrl_utils_add_map_configs(pctldev, map,
-						    reserv, nmaps, group,
-						    configs, nconfs, type);
-		if (ret < 0)
-			break;
-	}
-exit:
-	kfree(configs);
-	return ret;
-}
-
-static int pmic_mpp_dt_node_to_map(struct pinctrl_dev *pctldev,
-				   struct device_node *np_config,
-				   struct pinctrl_map **map, unsigned *nmaps)
-{
-	struct device_node *np;
-	enum pinctrl_map_type type;
-	unsigned reserv;
-	int ret;
-
-	ret = 0;
-	*map = NULL;
-	*nmaps = 0;
-	reserv = 0;
-	type = PIN_MAP_TYPE_CONFIGS_GROUP;
-
-	for_each_child_of_node(np_config, np) {
-		ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
-							&reserv, nmaps, type);
-		if (ret)
-			break;
-
-		ret = pmic_mpp_dt_subnode_to_map(pctldev, np, map, &reserv,
-						 nmaps, type);
-		if (ret)
-			break;
-	}
-
-	if (ret < 0)
-		pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
-
-	return ret;
-}
-
 static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
 	.get_groups_count	= pmic_mpp_get_groups_count,
 	.get_group_name		= pmic_mpp_get_group_name,
 	.get_group_pins		= pmic_mpp_get_group_pins,
-	.dt_node_to_map		= pmic_mpp_dt_node_to_map,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
 	.dt_free_map		= pinctrl_utils_dt_free_map,
 };
 
@@ -340,6 +261,53 @@
 	return 0;
 }
 
+static int pmic_mpp_write_mode_ctl(struct pmic_mpp_state *state,
+				   struct pmic_mpp_pad *pad)
+{
+	unsigned int mode;
+	unsigned int sel;
+	unsigned int val;
+	unsigned int en;
+
+	switch (pad->function) {
+	case PMIC_MPP_ANALOG:
+		if (pad->input_enabled && pad->output_enabled)
+			mode = PMIC_MPP_MODE_ANALOG_BIDIR;
+		else if (pad->input_enabled)
+			mode = PMIC_MPP_MODE_ANALOG_INPUT;
+		else
+			mode = PMIC_MPP_MODE_ANALOG_OUTPUT;
+		break;
+	case PMIC_MPP_DIGITAL:
+		if (pad->input_enabled && pad->output_enabled)
+			mode = PMIC_MPP_MODE_DIGITAL_BIDIR;
+		else if (pad->input_enabled)
+			mode = PMIC_MPP_MODE_DIGITAL_INPUT;
+		else
+			mode = PMIC_MPP_MODE_DIGITAL_OUTPUT;
+		break;
+	case PMIC_MPP_SINK:
+	default:
+		mode = PMIC_MPP_MODE_CURRENT_SINK;
+		break;
+	}
+
+	if (pad->dtest)
+		sel = PMIC_MPP_SELECTOR_DTEST_FIRST + pad->dtest - 1;
+	else if (pad->paired)
+		sel = PMIC_MPP_SELECTOR_PAIRED;
+	else
+		sel = PMIC_MPP_SELECTOR_NORMAL;
+
+	en = !!pad->out_value;
+
+	val = mode << PMIC_MPP_REG_MODE_DIR_SHIFT |
+	      sel << PMIC_MPP_REG_MODE_FUNCTION_SHIFT |
+	      en;
+
+	return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+}
+
 static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
 				unsigned pin)
 {
@@ -352,31 +320,7 @@
 
 	pad->function = function;
 
-	if (!pad->analog_mode) {
-		val = 0;	/* just digital input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 2; /* digital input and output */
-			else
-				val = 1; /* just digital output */
-		}
-	} else {
-		val = 4;	/* just analog input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 3; /* analog input and output */
-			else
-				val = 5; /* just analog output */
-		}
-	}
-
-	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
-
-	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
-	if (ret < 0)
-		return ret;
+	ret = pmic_mpp_write_mode_ctl(state, pad);
 
 	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
 
@@ -433,11 +377,20 @@
 	case PIN_CONFIG_OUTPUT:
 		arg = pad->out_value;
 		break;
+	case PMIC_MPP_CONF_DTEST_SELECTOR:
+		arg = pad->dtest;
+		break;
 	case PMIC_MPP_CONF_AMUX_ROUTE:
 		arg = pad->amux_input;
 		break;
-	case PMIC_MPP_CONF_ANALOG_MODE:
-		arg = pad->analog_mode;
+	case PMIC_MPP_CONF_PAIRED:
+		arg = pad->paired;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = pad->drive_strength;
+		break;
+	case PMIC_MPP_CONF_ANALOG_LEVEL:
+		arg = pad->aout_level;
 		break;
 	default:
 		return -EINVAL;
@@ -459,6 +412,9 @@
 
 	pad = pctldev->desc->pins[pin].drv_data;
 
+	/* Make it possible to enable the pin, by not setting high impedance */
+	pad->is_enabled = true;
+
 	for (i = 0; i < nconfs; i++) {
 		param = pinconf_to_config_param(configs[i]);
 		arg = pinconf_to_config_argument(configs[i]);
@@ -497,13 +453,22 @@
 			pad->output_enabled = true;
 			pad->out_value = arg;
 			break;
+		case PMIC_MPP_CONF_DTEST_SELECTOR:
+			pad->dtest = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			arg = pad->drive_strength;
+			break;
 		case PMIC_MPP_CONF_AMUX_ROUTE:
 			if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
 				return -EINVAL;
 			pad->amux_input = arg;
 			break;
-		case PMIC_MPP_CONF_ANALOG_MODE:
-			pad->analog_mode = true;
+		case PMIC_MPP_CONF_ANALOG_LEVEL:
+			pad->aout_level = arg;
+			break;
+		case PMIC_MPP_CONF_PAIRED:
+			pad->paired = !!arg;
 			break;
 		default:
 			return -EINVAL;
@@ -528,29 +493,17 @@
 	if (ret < 0)
 		return ret;
 
-	if (!pad->analog_mode) {
-		val = 0;	/* just digital input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 2; /* digital input and output */
-			else
-				val = 1; /* just digital output */
-		}
-	} else {
-		val = 4;	/* just analog input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 3; /* analog input and output */
-			else
-				val = 5; /* just analog output */
-		}
-	}
+	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_AOUT_CTL, pad->aout_level);
+	if (ret < 0)
+		return ret;
 
-	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
+	ret = pmic_mpp_write_mode_ctl(state, pad);
+	if (ret < 0)
+		return ret;
 
-	return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
+
+	return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
 }
 
 static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
@@ -558,20 +511,17 @@
 {
 	struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
 	struct pmic_mpp_pad *pad;
-	int ret, val;
+	int ret;
 
 	static const char *const biases[] = {
 		"0.6kOhm", "10kOhm", "30kOhm", "Disabled"
 	};
 
-
 	pad = pctldev->desc->pins[pin].drv_data;
 
 	seq_printf(s, " mpp%-2d:", pin + PMIC_MPP_PHYSICAL_OFFSET);
 
-	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
-
-	if (val < 0 || !(val >> PMIC_MPP_REG_MASTER_EN_SHIFT)) {
+	if (!pad->is_enabled) {
 		seq_puts(s, " ---");
 	} else {
 
@@ -585,15 +535,20 @@
 		}
 
 		seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
-		seq_printf(s, " %-4s", pad->analog_mode ? "ana" : "dig");
 		seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
 		seq_printf(s, " vin-%d", pad->power_source);
+		seq_printf(s, " %d", pad->aout_level);
 		seq_printf(s, " %-8s", biases[pad->pullup]);
 		seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+		if (pad->dtest)
+			seq_printf(s, " dtest%d", pad->dtest);
+		if (pad->paired)
+			seq_puts(s, " paired");
 	}
 }
 
 static const struct pinconf_ops pmic_mpp_pinconf_ops = {
+	.is_generic = true,
 	.pin_config_group_get		= pmic_mpp_config_get,
 	.pin_config_group_set		= pmic_mpp_config_set,
 	.pin_config_group_dbg_show	= pmic_mpp_config_dbg_show,
@@ -709,6 +664,7 @@
 			     struct pmic_mpp_pad *pad)
 {
 	int type, subtype, val, dir;
+	unsigned int sel;
 
 	type = pmic_mpp_read(state, pad, PMIC_MPP_REG_TYPE);
 	if (type < 0)
@@ -751,43 +707,53 @@
 	dir &= PMIC_MPP_REG_MODE_DIR_MASK;
 
 	switch (dir) {
-	case 0:
+	case PMIC_MPP_MODE_DIGITAL_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 1:
+	case PMIC_MPP_MODE_DIGITAL_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 2:
+	case PMIC_MPP_MODE_DIGITAL_BIDIR:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 3:
+	case PMIC_MPP_MODE_ANALOG_BIDIR:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
 		break;
-	case 4:
+	case PMIC_MPP_MODE_ANALOG_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
 		break;
-	case 5:
+	case PMIC_MPP_MODE_ANALOG_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
+		break;
+	case PMIC_MPP_MODE_CURRENT_SINK:
+		pad->input_enabled = false;
+		pad->output_enabled = true;
+		pad->function = PMIC_MPP_SINK;
 		break;
 	default:
 		dev_err(state->dev, "unknown MPP direction\n");
 		return -ENODEV;
 	}
 
-	pad->function = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	pad->function &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+	sel = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+	sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+
+	if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST)
+		pad->dtest = sel + 1;
+	else if (sel == PMIC_MPP_SELECTOR_PAIRED)
+		pad->paired = true;
 
 	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_VIN_CTL);
 	if (val < 0)
@@ -810,8 +776,24 @@
 	pad->amux_input = val >> PMIC_MPP_REG_AIN_ROUTE_SHIFT;
 	pad->amux_input &= PMIC_MPP_REG_AIN_ROUTE_MASK;
 
-	/* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
-	pad->is_enabled = true;
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_SINK_CTL);
+	if (val < 0)
+		return val;
+
+	pad->drive_strength = val;
+
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL);
+	if (val < 0)
+		return val;
+
+	pad->aout_level = val;
+
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
+	if (val < 0)
+		return val;
+
+	pad->is_enabled = !!val;
+
 	return 0;
 }
 
@@ -866,6 +848,12 @@
 	pctrldesc->pins = pindesc;
 	pctrldesc->npins = npins;
 
+	pctrldesc->num_custom_params = ARRAY_SIZE(pmic_mpp_bindings);
+	pctrldesc->custom_params = pmic_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrldesc->custom_conf_items = pmic_conf_items;
+#endif
+
 	for (i = 0; i < npins; i++, pindesc++) {
 		pad = &pads[i];
 		pindesc->drv_data = pad;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
new file mode 100644
index 0000000..c978b31
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* mode */
+#define PM8XXX_GPIO_MODE_ENABLED	BIT(0)
+#define PM8XXX_GPIO_MODE_INPUT		0
+#define PM8XXX_GPIO_MODE_OUTPUT		2
+
+/* output buffer */
+#define PM8XXX_GPIO_PUSH_PULL		0
+#define PM8XXX_GPIO_OPEN_DRAIN		1
+
+/* bias */
+#define PM8XXX_GPIO_BIAS_PU_30		0
+#define PM8XXX_GPIO_BIAS_PU_1P5		1
+#define PM8XXX_GPIO_BIAS_PU_31P5	2
+#define PM8XXX_GPIO_BIAS_PU_1P5_30	3
+#define PM8XXX_GPIO_BIAS_PD		4
+#define PM8XXX_GPIO_BIAS_NP		5
+
+/* GPIO registers */
+#define SSBI_REG_ADDR_GPIO_BASE		0x150
+#define SSBI_REG_ADDR_GPIO(n)		(SSBI_REG_ADDR_GPIO_BASE + n)
+
+#define PM8XXX_BANK_WRITE		BIT(7)
+
+#define PM8XXX_MAX_GPIOS               44
+
+/* custom pinconf parameters */
+#define PM8XXX_QCOM_DRIVE_STRENGH      (PIN_CONFIG_END + 1)
+#define PM8XXX_QCOM_PULL_UP_STRENGTH   (PIN_CONFIG_END + 2)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg:               address of the control register
+ * @irq:               IRQ from the PMIC interrupt controller
+ * @power_source:      logical selected voltage source, mapping in static data
+ *                     is used translate to register values
+ * @mode:              operating mode for the pin (input/output)
+ * @open_drain:        output buffer configured as open-drain (vs push-pull)
+ * @output_value:      configured output value
+ * @bias:              register view of configured bias
+ * @pull_up_strength:  placeholder for selected pull up strength
+ *                     only used to configure bias when pull up is selected
+ * @output_strength:   selector of output-strength
+ * @disable:           pin disabled / configured as tristate
+ * @function:          pinmux selector
+ * @inverted:          pin logic is inverted
+ */
+struct pm8xxx_pin_data {
+	unsigned reg;
+	int irq;
+	u8 power_source;
+	u8 mode;
+	bool open_drain;
+	bool output_value;
+	u8 bias;
+	u8 pull_up_strength;
+	u8 output_strength;
+	bool disable;
+	u8 function;
+	bool inverted;
+};
+
+struct pm8xxx_gpio {
+	struct device *dev;
+	struct regmap *regmap;
+	struct pinctrl_dev *pctrl;
+	struct gpio_chip chip;
+
+	struct pinctrl_desc desc;
+	unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
+	{"qcom,drive-strength",		PM8XXX_QCOM_DRIVE_STRENGH,	0},
+	{"qcom,pull-up-strength",	PM8XXX_QCOM_PULL_UP_STRENGTH,	0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[ARRAY_SIZE(pm8xxx_gpio_bindings)] = {
+	PCONFDUMP(PM8XXX_QCOM_DRIVE_STRENGH, "drive-strength", NULL, true),
+	PCONFDUMP(PM8XXX_QCOM_PULL_UP_STRENGTH,  "pull up strength", NULL, true),
+};
+#endif
+
+static const char * const pm8xxx_groups[PM8XXX_MAX_GPIOS] = {
+	"gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
+	"gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+	"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+	"gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+	"gpio44",
+};
+
+static const char * const pm8xxx_gpio_functions[] = {
+	PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
+	PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
+	PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
+	PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+};
+
+static int pm8xxx_read_bank(struct pm8xxx_gpio *pctrl,
+			    struct pm8xxx_pin_data *pin, int bank)
+{
+	unsigned int val = bank << 4;
+	int ret;
+
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to select bank %d\n", bank);
+		return ret;
+	}
+
+	ret = regmap_read(pctrl->regmap, pin->reg, &val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to read register %d\n", bank);
+		return ret;
+	}
+
+	return val;
+}
+
+static int pm8xxx_write_bank(struct pm8xxx_gpio *pctrl,
+			     struct pm8xxx_pin_data *pin,
+			     int bank,
+			     u8 val)
+{
+	int ret;
+
+	val |= PM8XXX_BANK_WRITE;
+	val |= bank << 4;
+
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret)
+		dev_err(pctrl->dev, "failed to write register\n");
+
+	return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+					 unsigned group)
+{
+	return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 const unsigned **pins,
+				 unsigned *num_pins)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctrl->desc.pins[group].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+	.get_groups_count	= pm8xxx_get_groups_count,
+	.get_group_name		= pm8xxx_get_group_name,
+	.get_group_pins         = pm8xxx_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(pm8xxx_gpio_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+					    unsigned function)
+{
+	return pm8xxx_gpio_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+				      unsigned function,
+				      const char * const **groups,
+				      unsigned * const num_groups)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pm8xxx_groups;
+	*num_groups = pctrl->npins;
+	return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+				 unsigned function,
+				 unsigned group)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+	u8 val;
+
+	pin->function = function;
+	val = pin->function << 1;
+
+	pm8xxx_write_bank(pctrl, pin, 4, val);
+
+	return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+	.get_functions_count	= pm8xxx_get_functions_count,
+	.get_function_name	= pm8xxx_get_function_name,
+	.get_function_groups	= pm8xxx_get_function_groups,
+	.set_mux		= pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *config)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param = pinconf_to_config_param(*config);
+	unsigned arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
+		break;
+	case PM8XXX_QCOM_PULL_UP_STRENGTH:
+		arg = pin->pull_up_strength;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = pin->disable;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
+			arg = pin->output_value;
+		else
+			arg = 0;
+		break;
+	case PIN_CONFIG_POWER_SOURCE:
+		arg = pin->power_source;
+		break;
+	case PM8XXX_QCOM_DRIVE_STRENGH:
+		arg = pin->output_strength;
+		break;
+	case PIN_CONFIG_DRIVE_PUSH_PULL:
+		arg = !pin->open_drain;
+		break;
+	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+		arg = pin->open_drain;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *configs,
+				 unsigned num_configs)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param;
+	unsigned arg;
+	unsigned i;
+	u8 banks = 0;
+	u8 val;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			pin->bias = PM8XXX_GPIO_BIAS_NP;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			pin->bias = PM8XXX_GPIO_BIAS_PD;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PM8XXX_QCOM_PULL_UP_STRENGTH:
+			if (arg > PM8XXX_GPIO_BIAS_PU_1P5_30) {
+				dev_err(pctrl->dev, "invalid pull-up strength\n");
+				return -EINVAL;
+			}
+			pin->pull_up_strength = arg;
+			/* FALLTHROUGH */
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pin->bias = pin->pull_up_strength;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pin->disable = 1;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pin->mode = PM8XXX_GPIO_MODE_INPUT;
+			banks |= BIT(0) | BIT(1);
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+			pin->output_value = !!arg;
+			banks |= BIT(0) | BIT(1);
+			break;
+		case PIN_CONFIG_POWER_SOURCE:
+			pin->power_source = arg;
+			banks |= BIT(0);
+			break;
+		case PM8XXX_QCOM_DRIVE_STRENGH:
+			if (arg > PMIC_GPIO_STRENGTH_LOW) {
+				dev_err(pctrl->dev, "invalid drive strength\n");
+				return -EINVAL;
+			}
+			pin->output_strength = arg;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_DRIVE_PUSH_PULL:
+			pin->open_drain = 0;
+			banks |= BIT(1);
+			break;
+		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+			pin->open_drain = 1;
+			banks |= BIT(1);
+			break;
+		default:
+			dev_err(pctrl->dev,
+				"unsupported config parameter: %x\n",
+				param);
+			return -EINVAL;
+		}
+	}
+
+	if (banks & BIT(0)) {
+		val = pin->power_source << 1;
+		val |= PM8XXX_GPIO_MODE_ENABLED;
+		pm8xxx_write_bank(pctrl, pin, 0, val);
+	}
+
+	if (banks & BIT(1)) {
+		val = pin->mode << 2;
+		val |= pin->open_drain << 1;
+		val |= pin->output_value;
+		pm8xxx_write_bank(pctrl, pin, 1, val);
+	}
+
+	if (banks & BIT(2)) {
+		val = pin->bias << 1;
+		pm8xxx_write_bank(pctrl, pin, 2, val);
+	}
+
+	if (banks & BIT(3)) {
+		val = pin->output_strength << 2;
+		val |= pin->disable;
+		pm8xxx_write_bank(pctrl, pin, 3, val);
+	}
+
+	if (banks & BIT(4)) {
+		val = pin->function << 1;
+		pm8xxx_write_bank(pctrl, pin, 4, val);
+	}
+
+	if (banks & BIT(5)) {
+		val = 0;
+		if (!pin->inverted)
+			val |= BIT(3);
+		pm8xxx_write_bank(pctrl, pin, 5, val);
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_group_get = pm8xxx_pin_config_get,
+	.pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+	.name = "pm8xxx_gpio",
+	.pctlops = &pm8xxx_pinctrl_ops,
+	.pmxops = &pm8xxx_pinmux_ops,
+	.confops = &pm8xxx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->mode = PM8XXX_GPIO_MODE_INPUT;
+	val = pin->mode << 2;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+
+	return 0;
+}
+
+static int pm8xxx_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset,
+					int value)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+	pin->output_value = !!value;
+
+	val = pin->mode << 2;
+	val |= pin->open_drain << 1;
+	val |= pin->output_value;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+
+	return 0;
+}
+
+static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	bool state;
+	int ret;
+
+	if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
+		ret = pin->output_value;
+	} else {
+		ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+		if (!ret)
+			ret = !!state;
+	}
+
+	return ret;
+}
+
+static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->output_value = !!value;
+
+	val = pin->mode << 2;
+	val |= pin->open_drain << 1;
+	val |= pin->output_value;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+}
+
+static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
+				const struct of_phandle_args *gpio_desc,
+				u32 *flags)
+{
+	if (chip->of_gpio_n_cells < 2)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpio_desc->args[1];
+
+	return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned offset,
+				  unsigned gpio)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	static const char * const modes[] = {
+		"in", "both", "out", "off"
+	};
+	static const char * const biases[] = {
+		"pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
+		"pull-up 1.5uA + 30uA boost", "pull-down 10uA", "no pull"
+	};
+	static const char * const buffer_types[] = {
+		"push-pull", "open-drain"
+	};
+	static const char * const strengths[] = {
+		"no", "high", "medium", "low"
+	};
+
+	seq_printf(s, " gpio%-2d:", offset + 1);
+	if (pin->disable) {
+		seq_puts(s, " ---");
+	} else {
+		seq_printf(s, " %-4s", modes[pin->mode]);
+		seq_printf(s, " %-7s", pm8xxx_gpio_functions[pin->function]);
+		seq_printf(s, " VIN%d", pin->power_source);
+		seq_printf(s, " %-27s", biases[pin->bias]);
+		seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
+		seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+		seq_printf(s, " %-7s", strengths[pin->output_strength]);
+		if (pin->inverted)
+			seq_puts(s, " inverted");
+	}
+}
+
+static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned gpio = chip->base;
+	unsigned i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		pm8xxx_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define msm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_gpio_template = {
+	.direction_input = pm8xxx_gpio_direction_input,
+	.direction_output = pm8xxx_gpio_direction_output,
+	.get = pm8xxx_gpio_get,
+	.set = pm8xxx_gpio_set,
+	.of_xlate = pm8xxx_gpio_of_xlate,
+	.to_irq = pm8xxx_gpio_to_irq,
+	.dbg_show = pm8xxx_gpio_dbg_show,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl,
+			       struct pm8xxx_pin_data *pin)
+{
+	int val;
+
+	val = pm8xxx_read_bank(pctrl, pin, 0);
+	if (val < 0)
+		return val;
+
+	pin->power_source = (val >> 1) & 0x7;
+
+	val = pm8xxx_read_bank(pctrl, pin, 1);
+	if (val < 0)
+		return val;
+
+	pin->mode = (val >> 2) & 0x3;
+	pin->open_drain = !!(val & BIT(1));
+	pin->output_value = val & BIT(0);
+
+	val = pm8xxx_read_bank(pctrl, pin, 2);
+	if (val < 0)
+		return val;
+
+	pin->bias = (val >> 1) & 0x7;
+	if (pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30)
+		pin->pull_up_strength = pin->bias;
+	else
+		pin->pull_up_strength = PM8XXX_GPIO_BIAS_PU_30;
+
+	val = pm8xxx_read_bank(pctrl, pin, 3);
+	if (val < 0)
+		return val;
+
+	pin->output_strength = (val >> 2) & 0x3;
+	pin->disable = val & BIT(0);
+
+	val = pm8xxx_read_bank(pctrl, pin, 4);
+	if (val < 0)
+		return val;
+
+	pin->function = (val >> 1) & 0x7;
+
+	val = pm8xxx_read_bank(pctrl, pin, 5);
+	if (val < 0)
+		return val;
+
+	pin->inverted = !(val & BIT(3));
+
+	return 0;
+}
+
+static const struct of_device_id pm8xxx_gpio_of_match[] = {
+	{ .compatible = "qcom,pm8018-gpio", .data = (void *)6 },
+	{ .compatible = "qcom,pm8038-gpio", .data = (void *)12 },
+	{ .compatible = "qcom,pm8058-gpio", .data = (void *)40 },
+	{ .compatible = "qcom,pm8917-gpio", .data = (void *)38 },
+	{ .compatible = "qcom,pm8921-gpio", .data = (void *)44 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match);
+
+static int pm8xxx_gpio_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pin_data *pin_data;
+	struct pinctrl_pin_desc *pins;
+	struct pm8xxx_gpio *pctrl;
+	int ret;
+	int i;
+
+	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	if (!pctrl)
+		return -ENOMEM;
+
+	pctrl->dev = &pdev->dev;
+	pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+	pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pctrl->regmap) {
+		dev_err(&pdev->dev, "parent regmap unavailable\n");
+		return -ENXIO;
+	}
+
+	pctrl->desc = pm8xxx_pinctrl_desc;
+	pctrl->desc.npins = pctrl->npins;
+
+	pins = devm_kcalloc(&pdev->dev,
+			    pctrl->desc.npins,
+			    sizeof(struct pinctrl_pin_desc),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	pin_data = devm_kcalloc(&pdev->dev,
+				pctrl->desc.npins,
+				sizeof(struct pm8xxx_pin_data),
+				GFP_KERNEL);
+	if (!pin_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pctrl->desc.npins; i++) {
+		pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
+		pin_data[i].irq = platform_get_irq(pdev, i);
+		if (pin_data[i].irq < 0) {
+			dev_err(&pdev->dev,
+				"missing interrupts for pin %d\n", i);
+			return pin_data[i].irq;
+		}
+
+		ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+		if (ret)
+			return ret;
+
+		pins[i].number = i;
+		pins[i].name = pm8xxx_groups[i];
+		pins[i].drv_data = &pin_data[i];
+	}
+	pctrl->desc.pins = pins;
+
+	pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_gpio_bindings);
+	pctrl->desc.custom_params = pm8xxx_gpio_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	if (!pctrl->pctrl) {
+		dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
+		return -ENODEV;
+	}
+
+	pctrl->chip = pm8xxx_gpio_template;
+	pctrl->chip.base = -1;
+	pctrl->chip.dev = &pdev->dev;
+	pctrl->chip.of_node = pdev->dev.of_node;
+	pctrl->chip.of_gpio_n_cells = 2;
+	pctrl->chip.label = dev_name(pctrl->dev);
+	pctrl->chip.ngpio = pctrl->npins;
+	ret = gpiochip_add(&pctrl->chip);
+	if (ret) {
+		dev_err(&pdev->dev, "failed register gpiochip\n");
+		goto unregister_pinctrl;
+	}
+
+	ret = gpiochip_add_pin_range(&pctrl->chip,
+				     dev_name(pctrl->dev),
+				     0, 0, pctrl->chip.ngpio);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to add pin range\n");
+		goto unregister_gpiochip;
+	}
+
+	platform_set_drvdata(pdev, pctrl);
+
+	dev_dbg(&pdev->dev, "Qualcomm pm8xxx gpio driver probed\n");
+
+	return 0;
+
+unregister_gpiochip:
+	gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+	pinctrl_unregister(pctrl->pctrl);
+
+	return ret;
+}
+
+static int pm8xxx_gpio_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&pctrl->chip);
+
+	pinctrl_unregister(pctrl->pctrl);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_gpio_driver = {
+	.driver = {
+		.name = "qcom-ssbi-gpio",
+		.of_match_table = pm8xxx_gpio_of_match,
+	},
+	.probe = pm8xxx_gpio_probe,
+	.remove = pm8xxx_gpio_remove,
+};
+
+module_platform_driver(pm8xxx_gpio_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
new file mode 100644
index 0000000..2d1b69f
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* MPP registers */
+#define SSBI_REG_ADDR_MPP_BASE		0x50
+#define SSBI_REG_ADDR_MPP(n)		(SSBI_REG_ADDR_MPP_BASE + n)
+
+/* MPP Type: type */
+#define PM8XXX_MPP_TYPE_D_INPUT         0
+#define PM8XXX_MPP_TYPE_D_OUTPUT        1
+#define PM8XXX_MPP_TYPE_D_BI_DIR        2
+#define PM8XXX_MPP_TYPE_A_INPUT         3
+#define PM8XXX_MPP_TYPE_A_OUTPUT        4
+#define PM8XXX_MPP_TYPE_SINK            5
+#define PM8XXX_MPP_TYPE_DTEST_SINK      6
+#define PM8XXX_MPP_TYPE_DTEST_OUTPUT    7
+
+/* Digital Input: control */
+#define PM8XXX_MPP_DIN_TO_INT           0
+#define PM8XXX_MPP_DIN_TO_DBUS1         1
+#define PM8XXX_MPP_DIN_TO_DBUS2         2
+#define PM8XXX_MPP_DIN_TO_DBUS3         3
+
+/* Digital Output: control */
+#define PM8XXX_MPP_DOUT_CTRL_LOW        0
+#define PM8XXX_MPP_DOUT_CTRL_HIGH       1
+#define PM8XXX_MPP_DOUT_CTRL_MPP        2
+#define PM8XXX_MPP_DOUT_CTRL_INV_MPP    3
+
+/* Bidirectional: control */
+#define PM8XXX_MPP_BI_PULLUP_1KOHM      0
+#define PM8XXX_MPP_BI_PULLUP_OPEN       1
+#define PM8XXX_MPP_BI_PULLUP_10KOHM     2
+#define PM8XXX_MPP_BI_PULLUP_30KOHM     3
+
+/* Analog Output: control */
+#define PM8XXX_MPP_AOUT_CTRL_DISABLE            0
+#define PM8XXX_MPP_AOUT_CTRL_ENABLE             1
+#define PM8XXX_MPP_AOUT_CTRL_MPP_HIGH_EN        2
+#define PM8XXX_MPP_AOUT_CTRL_MPP_LOW_EN         3
+
+/* Current Sink: control */
+#define PM8XXX_MPP_CS_CTRL_DISABLE      0
+#define PM8XXX_MPP_CS_CTRL_ENABLE       1
+#define PM8XXX_MPP_CS_CTRL_MPP_HIGH_EN  2
+#define PM8XXX_MPP_CS_CTRL_MPP_LOW_EN   3
+
+/* DTEST Current Sink: control */
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN1    0
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN2    1
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN3    2
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN4    3
+
+/* DTEST Digital Output: control */
+#define PM8XXX_MPP_DTEST_DBUS1          0
+#define PM8XXX_MPP_DTEST_DBUS2          1
+#define PM8XXX_MPP_DTEST_DBUS3          2
+#define PM8XXX_MPP_DTEST_DBUS4          3
+
+/* custom pinconf parameters */
+#define PM8XXX_CONFIG_AMUX		(PIN_CONFIG_END + 1)
+#define PM8XXX_CONFIG_DTEST_SELECTOR	(PIN_CONFIG_END + 2)
+#define PM8XXX_CONFIG_ALEVEL		(PIN_CONFIG_END + 3)
+#define PM8XXX_CONFIG_PAIRED		(PIN_CONFIG_END + 4)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg:		address of the control register
+ * @irq:		IRQ from the PMIC interrupt controller
+ * @mode:		operating mode for the pin (digital, analog or current sink)
+ * @input:		pin is input
+ * @output:		pin is output
+ * @high_z:		pin is floating
+ * @paired:		mpp operates in paired mode
+ * @output_value:	logical output value of the mpp
+ * @power_source:	selected power source
+ * @dtest:		DTEST route selector
+ * @amux:		input muxing in analog mode
+ * @aout_level:		selector of the output in analog mode
+ * @drive_strength:	drive strength of the current sink
+ * @pullup:		pull up value, when in digital bidirectional mode
+ */
+struct pm8xxx_pin_data {
+	unsigned reg;
+	int irq;
+
+	u8 mode;
+
+	bool input;
+	bool output;
+	bool high_z;
+	bool paired;
+	bool output_value;
+
+	u8 power_source;
+	u8 dtest;
+	u8 amux;
+	u8 aout_level;
+	u8 drive_strength;
+	unsigned pullup;
+};
+
+struct pm8xxx_mpp {
+	struct device *dev;
+	struct regmap *regmap;
+	struct pinctrl_dev *pctrl;
+	struct gpio_chip chip;
+
+	struct pinctrl_desc desc;
+	unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_mpp_bindings[] = {
+	{"qcom,amux-route",	PM8XXX_CONFIG_AMUX,		0},
+	{"qcom,analog-level",	PM8XXX_CONFIG_ALEVEL,		0},
+	{"qcom,dtest",		PM8XXX_CONFIG_DTEST_SELECTOR,	0},
+	{"qcom,paired",		PM8XXX_CONFIG_PAIRED,		0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[] = {
+	PCONFDUMP(PM8XXX_CONFIG_AMUX, "analog mux", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_ALEVEL, "analog level", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_DTEST_SELECTOR, "dtest", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_PAIRED, "paired", NULL, false),
+};
+#endif
+
+#define PM8XXX_MAX_MPPS	12
+static const char * const pm8xxx_groups[PM8XXX_MAX_MPPS] = {
+	"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
+	"mpp9", "mpp10", "mpp11", "mpp12",
+};
+
+#define PM8XXX_MPP_DIGITAL	0
+#define PM8XXX_MPP_ANALOG	1
+#define PM8XXX_MPP_SINK		2
+
+static const char * const pm8xxx_mpp_functions[] = {
+	"digital", "analog", "sink",
+};
+
+static int pm8xxx_mpp_update(struct pm8xxx_mpp *pctrl,
+			     struct pm8xxx_pin_data *pin)
+{
+	unsigned level;
+	unsigned ctrl;
+	unsigned type;
+	int ret;
+	u8 val;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		if (pin->dtest) {
+			type = PM8XXX_MPP_TYPE_DTEST_OUTPUT;
+			ctrl = pin->dtest - 1;
+		} else if (pin->input && pin->output) {
+			type = PM8XXX_MPP_TYPE_D_BI_DIR;
+			if (pin->high_z)
+				ctrl = PM8XXX_MPP_BI_PULLUP_OPEN;
+			else if (pin->pullup == 600)
+				ctrl = PM8XXX_MPP_BI_PULLUP_1KOHM;
+			else if (pin->pullup == 10000)
+				ctrl = PM8XXX_MPP_BI_PULLUP_10KOHM;
+			else
+				ctrl = PM8XXX_MPP_BI_PULLUP_30KOHM;
+		} else if (pin->input) {
+			type = PM8XXX_MPP_TYPE_D_INPUT;
+			if (pin->dtest)
+				ctrl = pin->dtest;
+			else
+				ctrl = PM8XXX_MPP_DIN_TO_INT;
+		} else {
+			type = PM8XXX_MPP_TYPE_D_OUTPUT;
+			ctrl = !!pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		}
+
+		level = pin->power_source;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		if (pin->output) {
+			type = PM8XXX_MPP_TYPE_A_OUTPUT;
+			level = pin->aout_level;
+			ctrl = pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		} else {
+			type = PM8XXX_MPP_TYPE_A_INPUT;
+			level = pin->amux;
+			ctrl = 0;
+		}
+		break;
+	case PM8XXX_MPP_SINK:
+		level = (pin->drive_strength / 5) - 1;
+		if (pin->dtest) {
+			type = PM8XXX_MPP_TYPE_DTEST_SINK;
+			ctrl = pin->dtest - 1;
+		} else {
+			type = PM8XXX_MPP_TYPE_SINK;
+			ctrl = pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val = type << 5 | level << 2 | ctrl;
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret)
+		dev_err(pctrl->dev, "failed to write register\n");
+
+	return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+					 unsigned group)
+{
+	return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 const unsigned **pins,
+				 unsigned *num_pins)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctrl->desc.pins[group].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+	.get_groups_count	= pm8xxx_get_groups_count,
+	.get_group_name		= pm8xxx_get_group_name,
+	.get_group_pins         = pm8xxx_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(pm8xxx_mpp_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+					    unsigned function)
+{
+	return pm8xxx_mpp_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+				      unsigned function,
+				      const char * const **groups,
+				      unsigned * const num_groups)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pm8xxx_groups;
+	*num_groups = pctrl->npins;
+	return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+				 unsigned function,
+				 unsigned group)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+
+	pin->mode = function;
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+	.get_functions_count	= pm8xxx_get_functions_count,
+	.get_function_name	= pm8xxx_get_function_name,
+	.get_function_groups	= pm8xxx_get_function_groups,
+	.set_mux		= pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *config)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param = pinconf_to_config_param(*config);
+	unsigned arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pin->pullup;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = pin->high_z;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pin->input;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		arg = pin->output_value;
+		break;
+	case PIN_CONFIG_POWER_SOURCE:
+		arg = pin->power_source;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = pin->drive_strength;
+		break;
+	case PM8XXX_CONFIG_DTEST_SELECTOR:
+		arg = pin->dtest;
+		break;
+	case PM8XXX_CONFIG_AMUX:
+		arg = pin->amux;
+		break;
+	case PM8XXX_CONFIG_ALEVEL:
+		arg = pin->aout_level;
+		break;
+	case PM8XXX_CONFIG_PAIRED:
+		arg = pin->paired;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *configs,
+				 unsigned num_configs)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param;
+	unsigned arg;
+	unsigned i;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pin->pullup = arg;
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pin->high_z = true;
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pin->input = true;
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pin->output = true;
+			pin->output_value = !!arg;
+			break;
+		case PIN_CONFIG_POWER_SOURCE:
+			pin->power_source = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			pin->drive_strength = arg;
+			break;
+		case PM8XXX_CONFIG_DTEST_SELECTOR:
+			pin->dtest = arg;
+			break;
+		case PM8XXX_CONFIG_AMUX:
+			pin->amux = arg;
+			break;
+		case PM8XXX_CONFIG_ALEVEL:
+			pin->aout_level = arg;
+			break;
+		case PM8XXX_CONFIG_PAIRED:
+			pin->paired = !!arg;
+			break;
+		default:
+			dev_err(pctrl->dev,
+				"unsupported config parameter: %x\n",
+				param);
+			return -EINVAL;
+		}
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_group_get = pm8xxx_pin_config_get,
+	.pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+	.name = "pm8xxx_mpp",
+	.pctlops = &pm8xxx_pinctrl_ops,
+	.pmxops = &pm8xxx_pinmux_ops,
+	.confops = &pm8xxx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_mpp_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		pin->input = true;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		pin->input = true;
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_SINK:
+		return -EINVAL;
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static int pm8xxx_mpp_direction_output(struct gpio_chip *chip,
+					unsigned offset,
+					int value)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		pin->input = false;
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_SINK:
+		pin->input = false;
+		pin->output = true;
+		break;
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	bool state;
+	int ret;
+
+	if (!pin->input)
+		return pin->output_value;
+
+	ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+	if (!ret)
+		ret = !!state;
+
+	return ret;
+}
+
+static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	pin->output_value = !!value;
+
+	pm8xxx_mpp_update(pctrl, pin);
+}
+
+static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip,
+				const struct of_phandle_args *gpio_desc,
+				u32 *flags)
+{
+	if (chip->of_gpio_n_cells < 2)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpio_desc->args[1];
+
+	return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_mpp_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned offset,
+				  unsigned gpio)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	static const char * const aout_lvls[] = {
+		"1v25", "1v25_2", "0v625", "0v3125", "mpp", "abus1", "abus2",
+		"abus3"
+	};
+
+	static const char * const amuxs[] = {
+		"amux5", "amux6", "amux7", "amux8", "amux9", "abus1", "abus2",
+		"abus3",
+	};
+
+	seq_printf(s, " mpp%-2d:", offset + 1);
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		seq_puts(s, " digital ");
+		if (pin->dtest) {
+			seq_printf(s, "dtest%d\n", pin->dtest);
+		} else if (pin->input && pin->output) {
+			if (pin->high_z)
+				seq_puts(s, "bi-dir high-z");
+			else
+				seq_printf(s, "bi-dir %dOhm", pin->pullup);
+		} else if (pin->input) {
+			if (pin->dtest)
+				seq_printf(s, "in dtest%d", pin->dtest);
+			else
+				seq_puts(s, "in gpio");
+		} else if (pin->output) {
+			seq_puts(s, "out ");
+
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		}
+		break;
+	case PM8XXX_MPP_ANALOG:
+		seq_puts(s, " analog ");
+		if (pin->output) {
+			seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		} else {
+			seq_printf(s, "input mux %s", amuxs[pin->amux]);
+		}
+		break;
+	case PM8XXX_MPP_SINK:
+		seq_printf(s, " sink %dmA ", pin->drive_strength);
+		if (pin->dtest) {
+			seq_printf(s, "dtest%d", pin->dtest);
+		} else {
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		}
+		break;
+	}
+
+}
+
+static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned gpio = chip->base;
+	unsigned i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		pm8xxx_mpp_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define msm_mpp_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_mpp_template = {
+	.direction_input = pm8xxx_mpp_direction_input,
+	.direction_output = pm8xxx_mpp_direction_output,
+	.get = pm8xxx_mpp_get,
+	.set = pm8xxx_mpp_set,
+	.of_xlate = pm8xxx_mpp_of_xlate,
+	.to_irq = pm8xxx_mpp_to_irq,
+	.dbg_show = pm8xxx_mpp_dbg_show,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl,
+			       struct pm8xxx_pin_data *pin)
+{
+	unsigned int val;
+	unsigned level;
+	unsigned ctrl;
+	unsigned type;
+	int ret;
+
+	ret = regmap_read(pctrl->regmap, pin->reg, &val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to read register\n");
+		return ret;
+	}
+
+	type = (val >> 5) & 7;
+	level = (val >> 2) & 7;
+	ctrl = (val) & 3;
+
+	switch (type) {
+	case PM8XXX_MPP_TYPE_D_INPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->input = true;
+		pin->power_source = level;
+		pin->dtest = ctrl;
+		break;
+	case PM8XXX_MPP_TYPE_D_OUTPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->output = true;
+		pin->power_source = level;
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_D_BI_DIR:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->input = true;
+		pin->output = true;
+		pin->power_source = level;
+		switch (ctrl) {
+		case PM8XXX_MPP_BI_PULLUP_1KOHM:
+			pin->pullup = 600;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_OPEN:
+			pin->high_z = true;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_10KOHM:
+			pin->pullup = 10000;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_30KOHM:
+			pin->pullup = 30000;
+			break;
+		}
+		break;
+	case PM8XXX_MPP_TYPE_A_INPUT:
+		pin->mode = PM8XXX_MPP_ANALOG;
+		pin->input = true;
+		pin->amux = level;
+		break;
+	case PM8XXX_MPP_TYPE_A_OUTPUT:
+		pin->mode = PM8XXX_MPP_ANALOG;
+		pin->output = true;
+		pin->aout_level = level;
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_SINK:
+		pin->mode = PM8XXX_MPP_SINK;
+		pin->drive_strength = 5 * (level + 1);
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_DTEST_SINK:
+		pin->mode = PM8XXX_MPP_SINK;
+		pin->dtest = ctrl + 1;
+		pin->drive_strength = 5 * (level + 1);
+		break;
+	case PM8XXX_MPP_TYPE_DTEST_OUTPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->power_source = level;
+		if (ctrl >= 1)
+			pin->dtest = ctrl;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id pm8xxx_mpp_of_match[] = {
+	{ .compatible = "qcom,pm8018-mpp", .data = (void *)6 },
+	{ .compatible = "qcom,pm8038-mpp", .data = (void *)6 },
+	{ .compatible = "qcom,pm8917-mpp", .data = (void *)10 },
+	{ .compatible = "qcom,pm8821-mpp", .data = (void *)4 },
+	{ .compatible = "qcom,pm8921-mpp", .data = (void *)12 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match);
+
+static int pm8xxx_mpp_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pin_data *pin_data;
+	struct pinctrl_pin_desc *pins;
+	struct pm8xxx_mpp *pctrl;
+	int ret;
+	int i;
+
+	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	if (!pctrl)
+		return -ENOMEM;
+
+	pctrl->dev = &pdev->dev;
+	pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+	pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pctrl->regmap) {
+		dev_err(&pdev->dev, "parent regmap unavailable\n");
+		return -ENXIO;
+	}
+
+	pctrl->desc = pm8xxx_pinctrl_desc;
+	pctrl->desc.npins = pctrl->npins;
+
+	pins = devm_kcalloc(&pdev->dev,
+			    pctrl->desc.npins,
+			    sizeof(struct pinctrl_pin_desc),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	pin_data = devm_kcalloc(&pdev->dev,
+				pctrl->desc.npins,
+				sizeof(struct pm8xxx_pin_data),
+				GFP_KERNEL);
+	if (!pin_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pctrl->desc.npins; i++) {
+		pin_data[i].reg = SSBI_REG_ADDR_MPP(i);
+		pin_data[i].irq = platform_get_irq(pdev, i);
+		if (pin_data[i].irq < 0) {
+			dev_err(&pdev->dev,
+				"missing interrupts for pin %d\n", i);
+			return pin_data[i].irq;
+		}
+
+		ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+		if (ret)
+			return ret;
+
+		pins[i].number = i;
+		pins[i].name = pm8xxx_groups[i];
+		pins[i].drv_data = &pin_data[i];
+	}
+	pctrl->desc.pins = pins;
+
+	pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_mpp_bindings);
+	pctrl->desc.custom_params = pm8xxx_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	if (!pctrl->pctrl) {
+		dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
+		return -ENODEV;
+	}
+
+	pctrl->chip = pm8xxx_mpp_template;
+	pctrl->chip.base = -1;
+	pctrl->chip.dev = &pdev->dev;
+	pctrl->chip.of_node = pdev->dev.of_node;
+	pctrl->chip.of_gpio_n_cells = 2;
+	pctrl->chip.label = dev_name(pctrl->dev);
+	pctrl->chip.ngpio = pctrl->npins;
+	ret = gpiochip_add(&pctrl->chip);
+	if (ret) {
+		dev_err(&pdev->dev, "failed register gpiochip\n");
+		goto unregister_pinctrl;
+	}
+
+	ret = gpiochip_add_pin_range(&pctrl->chip,
+				     dev_name(pctrl->dev),
+				     0, 0, pctrl->chip.ngpio);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to add pin range\n");
+		goto unregister_gpiochip;
+	}
+
+	platform_set_drvdata(pdev, pctrl);
+
+	dev_dbg(&pdev->dev, "Qualcomm pm8xxx mpp driver probed\n");
+
+	return 0;
+
+unregister_gpiochip:
+	gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+	pinctrl_unregister(pctrl->pctrl);
+
+	return ret;
+}
+
+static int pm8xxx_mpp_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_mpp *pctrl = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&pctrl->chip);
+
+	pinctrl_unregister(pctrl->pctrl);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_mpp_driver = {
+	.driver = {
+		.name = "qcom-ssbi-mpp",
+		.of_match_table = pm8xxx_mpp_of_match,
+	},
+	.probe = pm8xxx_mpp_probe,
+	.remove = pm8xxx_mpp_remove,
+};
+
+module_platform_driver(pm8xxx_mpp_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx MPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b18dabb..5f45caa 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -148,9 +148,9 @@
 	}
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irqd->irq, handle_edge_irq);
+		irq_set_handler_locked(irqd, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irqd->irq, handle_level_irq);
+		irq_set_handler_locked(irqd, handle_level_irq);
 
 	con = readl(d->virt_base + reg_con);
 	con &= ~(EXYNOS_EINT_CON_MASK << shift);
@@ -256,7 +256,6 @@
 	irq_set_chip_data(virq, b);
 	irq_set_chip_and_handler(virq, &b->irq_chip->chip,
 					handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -422,9 +421,9 @@
 /* interrupt handler for wakeup interrupts 0..15 */
 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
 {
-	struct exynos_weint_data *eintd = irq_get_handler_data(irq);
+	struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pin_bank *bank = eintd->bank;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int eint_irq;
 
 	chained_irq_enter(chip, desc);
@@ -454,8 +453,8 @@
 /* interrupt handler for wakeup interrupt 16 */
 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct exynos_muxed_weint_data *eintd = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata;
 	unsigned long pend;
 	unsigned long mask;
@@ -542,8 +541,9 @@
 			}
 			weint_data[idx].irq = idx;
 			weint_data[idx].bank = bank;
-			irq_set_handler_data(irq, &weint_data[idx]);
-			irq_set_chained_handler(irq, exynos_irq_eint0_15);
+			irq_set_chained_handler_and_data(irq,
+							 exynos_irq_eint0_15,
+							 &weint_data[idx]);
 		}
 	}
 
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index f5619fb..9ce0b86 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -44,9 +44,7 @@
 #define PIN_NAME_LENGTH		10
 
 #define GROUP_SUFFIX		"-grp"
-#define GSUFFIX_LEN		sizeof(GROUP_SUFFIX)
 #define FUNCTION_SUFFIX		"-mux"
-#define FSUFFIX_LEN		sizeof(FUNCTION_SUFFIX)
 
 /*
  * pin configuration type and its value are packed together into a 16-bits.
@@ -205,22 +203,17 @@
 
 	/* Allocate memory for pin-map entries */
 	map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
-	if (!map) {
-		dev_err(dev, "could not alloc memory for pin-maps\n");
+	if (!map)
 		return -ENOMEM;
-	}
 	*nmaps = 0;
 
 	/*
 	 * Allocate memory for pin group name. The pin group name is derived
 	 * from the node name from which these map entries are be created.
 	 */
-	gname = kzalloc(strlen(np->name) + GSUFFIX_LEN, GFP_KERNEL);
-	if (!gname) {
-		dev_err(dev, "failed to alloc memory for group name\n");
+	gname = kasprintf(GFP_KERNEL, "%s%s", np->name, GROUP_SUFFIX);
+	if (!gname)
 		goto free_map;
-	}
-	snprintf(gname, strlen(np->name) + 4, "%s%s", np->name, GROUP_SUFFIX);
 
 	/*
 	 * don't have config options? then skip over to creating function
@@ -231,10 +224,8 @@
 
 	/* Allocate memory for config entries */
 	cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
-	if (!cfg) {
-		dev_err(dev, "failed to alloc memory for configs\n");
+	if (!cfg)
 		goto free_gname;
-	}
 
 	/* Prepare a list of config settings */
 	for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
@@ -254,13 +245,10 @@
 skip_cfgs:
 	/* create the function map entry */
 	if (of_find_property(np, "samsung,exynos5440-pin-function", NULL)) {
-		fname = kzalloc(strlen(np->name) + FSUFFIX_LEN,	GFP_KERNEL);
-		if (!fname) {
-			dev_err(dev, "failed to alloc memory for func name\n");
+		fname = kasprintf(GFP_KERNEL,
+				  "%s%s", np->name, FUNCTION_SUFFIX);
+		if (!fname)
 			goto free_cfg;
-		}
-		snprintf(fname, strlen(np->name) + 4, "%s%s", np->name,
-			 FUNCTION_SUFFIX);
 
 		map[*nmaps].data.mux.group = gname;
 		map[*nmaps].data.mux.function = fname;
@@ -651,10 +639,8 @@
 	}
 
 	*pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
-	if (!*pin_list) {
-		dev_err(dev, "failed to allocate memory for pin list\n");
+	if (!*pin_list)
 		return -ENOMEM;
-	}
 
 	return of_property_read_u32_array(cfg_np, "samsung,exynos5440-pins",
 			*pin_list, *npins);
@@ -682,17 +668,15 @@
 		return -EINVAL;
 
 	groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
-	if (!groups) {
-		dev_err(dev, "failed allocate memory for ping group list\n");
+	if (!groups)
 		return -EINVAL;
-	}
+
 	grp = groups;
 
 	functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
-	if (!functions) {
-		dev_err(dev, "failed to allocate memory for function list\n");
+	if (!functions)
 		return -EINVAL;
-	}
+
 	func = functions;
 
 	/*
@@ -710,14 +694,10 @@
 		}
 
 		/* derive pin group name from the node name */
-		gname = devm_kzalloc(dev, strlen(cfg_np->name) + GSUFFIX_LEN,
-					GFP_KERNEL);
-		if (!gname) {
-			dev_err(dev, "failed to alloc memory for group name\n");
+		gname = devm_kasprintf(dev, GFP_KERNEL,
+				       "%s%s", cfg_np->name, GROUP_SUFFIX);
+		if (!gname)
 			return -ENOMEM;
-		}
-		snprintf(gname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
-			 GROUP_SUFFIX);
 
 		grp->name = gname;
 		grp->pins = pin_list;
@@ -731,22 +711,15 @@
 			continue;
 
 		/* derive function name from the node name */
-		fname = devm_kzalloc(dev, strlen(cfg_np->name) + FSUFFIX_LEN,
-					GFP_KERNEL);
-		if (!fname) {
-			dev_err(dev, "failed to alloc memory for func name\n");
+		fname = devm_kasprintf(dev, GFP_KERNEL,
+				       "%s%s", cfg_np->name, FUNCTION_SUFFIX);
+		if (!fname)
 			return -ENOMEM;
-		}
-		snprintf(fname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
-			 FUNCTION_SUFFIX);
 
 		func->name = fname;
 		func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
-		if (!func->groups) {
-			dev_err(dev, "failed to alloc memory for group list "
-					"in pin function");
+		if (!func->groups)
 			return -ENOMEM;
-		}
 		func->groups[0] = gname;
 		func->num_groups = gname ? 1 : 0;
 		func->function = function;
@@ -774,10 +747,8 @@
 	int pin, ret;
 
 	ctrldesc = devm_kzalloc(dev, sizeof(*ctrldesc), GFP_KERNEL);
-	if (!ctrldesc) {
-		dev_err(dev, "could not allocate memory for pinctrl desc\n");
+	if (!ctrldesc)
 		return -ENOMEM;
-	}
 
 	ctrldesc->name = "exynos5440-pinctrl";
 	ctrldesc->owner = THIS_MODULE;
@@ -787,10 +758,8 @@
 
 	pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
 				EXYNOS5440_MAX_PINS, GFP_KERNEL);
-	if (!pindesc) {
-		dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+	if (!pindesc)
 		return -ENOMEM;
-	}
 	ctrldesc->pins = pindesc;
 	ctrldesc->npins = EXYNOS5440_MAX_PINS;
 
@@ -804,10 +773,8 @@
 	 */
 	pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
 					ctrldesc->npins, GFP_KERNEL);
-	if (!pin_names) {
-		dev_err(&pdev->dev, "mem alloc for pin names failed\n");
+	if (!pin_names)
 		return -ENOMEM;
-	}
 
 	/* for each pin, set the name of the pin */
 	for (pin = 0; pin < ctrldesc->npins; pin++) {
@@ -844,10 +811,8 @@
 	int ret;
 
 	gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
-	if (!gc) {
-		dev_err(&pdev->dev, "mem alloc for gpio_chip failed\n");
+	if (!gc)
 		return -ENOMEM;
-	}
 
 	priv->gc = gc;
 	gc->base = 0;
@@ -929,7 +894,6 @@
 	irq_set_chip_data(virq, d);
 	irq_set_chip_and_handler(virq, &exynos5440_gpio_irq_chip,
 					handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -949,10 +913,8 @@
 
 	intd = devm_kzalloc(dev, sizeof(*intd) * EXYNOS5440_MAX_GPIO_INT,
 					GFP_KERNEL);
-	if (!intd) {
-		dev_err(dev, "failed to allocate memory for gpio intr data\n");
+	if (!intd)
 		return -ENOMEM;
-	}
 
 	for (i = 0; i < EXYNOS5440_MAX_GPIO_INT; i++) {
 		irq = irq_of_parse_and_map(dev->of_node, i);
@@ -995,10 +957,8 @@
 	}
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		dev_err(dev, "could not allocate memory for private data\n");
+	if (!priv)
 		return -ENOMEM;
-	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 01b43db..019844d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -131,13 +131,13 @@
 	}
 }
 
-static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
+static void s3c24xx_eint_set_handler(struct irq_data *d, unsigned int type)
 {
 	/* Edge- and level-triggered interrupts need different handlers */
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 }
 
 static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
@@ -181,7 +181,7 @@
 		return -EINVAL;
 	}
 
-	s3c24xx_eint_set_handler(data->irq, type);
+	s3c24xx_eint_set_handler(data, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINT_REG(index);
@@ -243,7 +243,7 @@
 static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	unsigned int virq;
 
 	/* the first 4 eints have a simple 1 to 1 mapping */
@@ -297,9 +297,9 @@
 
 static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	unsigned int virq;
 
 	chained_irq_enter(chip, desc);
@@ -357,11 +357,11 @@
 	.irq_set_type	= s3c24xx_eint_type,
 };
 
-static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
+static inline void s3c24xx_demux_eint(struct irq_desc *desc,
 				      u32 offset, u32 range)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c24xx_eint_data *data = irq_get_handler_data(irq);
+	struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_irq_chip(desc);
 	struct samsung_pinctrl_drv_data *d = data->drvdata;
 	unsigned int pend, mask;
 
@@ -374,7 +374,7 @@
 	pend &= range;
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int virq, irq;
 
 		irq = __ffs(pend);
 		pend &= ~(1 << irq);
@@ -390,12 +390,12 @@
 
 static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
 {
-	s3c24xx_demux_eint(irq, desc, 0, 0xf0);
+	s3c24xx_demux_eint(desc, 0, 0xf0);
 }
 
 static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
 {
-	s3c24xx_demux_eint(irq, desc, 8, 0xffff00);
+	s3c24xx_demux_eint(desc, 8, 0xffff00);
 }
 
 static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = {
@@ -437,7 +437,6 @@
 					 handle_edge_irq);
 	}
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -457,7 +456,6 @@
 
 	irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq);
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index ec8cc3b..f5ea40a 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -260,13 +260,13 @@
 	return trigger;
 }
 
-static void s3c64xx_irq_set_handler(unsigned int irq, unsigned int type)
+static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type)
 {
 	/* Edge- and level-triggered interrupts need different handlers */
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 }
 
 static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
@@ -356,7 +356,7 @@
 		return -EINVAL;
 	}
 
-	s3c64xx_irq_set_handler(irqd->irq, type);
+	s3c64xx_irq_set_handler(irqd, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINTCON_REG(bank->eint_offset);
@@ -395,7 +395,6 @@
 	irq_set_chip_and_handler(virq,
 				&s3c64xx_gpio_irq_chip, handle_level_irq);
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
@@ -410,8 +409,8 @@
 
 static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c64xx_eint_gpio_data *data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
 
 	chained_irq_enter(chip, desc);
@@ -567,7 +566,7 @@
 		return -EINVAL;
 	}
 
-	s3c64xx_irq_set_handler(irqd->irq, type);
+	s3c64xx_irq_set_handler(irqd, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINT0CON0_REG;
@@ -599,11 +598,10 @@
 	.irq_set_type	= s3c64xx_eint0_irq_set_type,
 };
 
-static inline void s3c64xx_irq_demux_eint(unsigned int irq,
-					struct irq_desc *desc, u32 range)
+static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c64xx_eint0_data *data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
 	unsigned int pend, mask;
 
@@ -616,11 +614,10 @@
 	pend &= range;
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int virq, irq;
 
 		irq = fls(pend) - 1;
 		pend &= ~(1 << irq);
-
 		virq = irq_linear_revmap(data->domains[irq], data->pins[irq]);
 		/*
 		 * Something must be really wrong if an unmapped EINT
@@ -636,22 +633,22 @@
 
 static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xf);
+	s3c64xx_irq_demux_eint(desc, 0xf);
 }
 
 static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff0);
+	s3c64xx_irq_demux_eint(desc, 0xff0);
 }
 
 static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff000);
+	s3c64xx_irq_demux_eint(desc, 0xff000);
 }
 
 static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff00000);
+	s3c64xx_irq_demux_eint(desc, 0xff00000);
 }
 
 static irq_flow_handler_t s3c64xx_eint0_handlers[NUM_EINT0_IRQ] = {
@@ -673,7 +670,6 @@
 	irq_set_chip_and_handler(virq,
 				&s3c64xx_eint0_irq_chip, handle_level_irq);
 	irq_set_chip_data(virq, ddata);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 865d235..fb9c448 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,24 +29,25 @@
 static int sh_pfc_map_resources(struct sh_pfc *pfc,
 				struct platform_device *pdev)
 {
-	unsigned int num_windows = 0;
-	unsigned int num_irqs = 0;
+	unsigned int num_windows, num_irqs;
 	struct sh_pfc_window *windows;
 	unsigned int *irqs = NULL;
 	struct resource *res;
 	unsigned int i;
+	int irq;
 
 	/* Count the MEM and IRQ resources. */
-	for (i = 0; i < pdev->num_resources; ++i) {
-		switch (resource_type(&pdev->resource[i])) {
-		case IORESOURCE_MEM:
-			num_windows++;
+	for (num_windows = 0;; num_windows++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, num_windows);
+		if (!res)
 			break;
-
-		case IORESOURCE_IRQ:
-			num_irqs++;
+	}
+	for (num_irqs = 0;; num_irqs++) {
+		irq = platform_get_irq(pdev, num_irqs);
+		if (irq == -EPROBE_DEFER)
+			return irq;
+		if (irq < 0)
 			break;
-		}
 	}
 
 	if (num_windows == 0)
@@ -72,22 +73,17 @@
 	}
 
 	/* Fill them. */
-	for (i = 0, res = pdev->resource; i < pdev->num_resources; i++, res++) {
-		switch (resource_type(res)) {
-		case IORESOURCE_MEM:
-			windows->phys = res->start;
-			windows->size = resource_size(res);
-			windows->virt = devm_ioremap_resource(pfc->dev, res);
-			if (IS_ERR(windows->virt))
-				return -ENOMEM;
-			windows++;
-			break;
-
-		case IORESOURCE_IRQ:
-			*irqs++ = res->start;
-			break;
-		}
+	for (i = 0; i < num_windows; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		windows->phys = res->start;
+		windows->size = resource_size(res);
+		windows->virt = devm_ioremap_resource(pfc->dev, res);
+		if (IS_ERR(windows->virt))
+			return -ENOMEM;
+		windows++;
 	}
+	for (i = 0; i < num_irqs; i++)
+		*irqs++ = platform_get_irq(pdev, i);
 
 	return 0;
 }
@@ -591,9 +587,6 @@
 }
 
 static const struct platform_device_id sh_pfc_id_table[] = {
-#ifdef CONFIG_PINCTRL_PFC_R8A7740
-	{ "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_R8A7778
 	{ "pfc-r8a7778", (kernel_ulong_t)&r8a7778_pinmux_info },
 #endif
@@ -609,9 +602,6 @@
 #ifdef CONFIG_PINCTRL_PFC_SH7269
 	{ "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
 #endif
-#ifdef CONFIG_PINCTRL_PFC_SH73A0
-	{ "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_SH7720
 	{ "pfc-sh7720", (kernel_ulong_t)&sh7720_pinmux_info },
 #endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index d0bb145..82ef186 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -22,10 +22,6 @@
 #include <linux/kernel.h>
 #include <linux/pinctrl/pinconf-generic.h>
 
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
 #include "core.h"
 #include "sh_pfc.h"
 
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index baab81e..fc344a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -27,10 +27,27 @@
 #include "core.h"
 #include "sh_pfc.h"
 
+#define PORT_GP_30(bank, fn, sfx)					\
+	PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),	\
+	PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),	\
+	PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),	\
+	PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),	\
+	PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),	\
+	PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),	\
+	PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),	\
+	PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),	\
+	PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),	\
+	PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),	\
+	PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),	\
+	PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),	\
+	PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx),	\
+	PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx),	\
+	PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx)
+
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
-	PORT_GP_32(1, fn, sfx),						\
-	PORT_GP_32(2, fn, sfx),						\
+	PORT_GP_30(1, fn, sfx),						\
+	PORT_GP_30(2, fn, sfx),						\
 	PORT_GP_32(3, fn, sfx),						\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx)
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 3ddf23e..25e8117 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -14,15 +14,30 @@
 #include "core.h"
 #include "sh_pfc.h"
 
+#define PORT_GP_26(bank, fn, sfx)					\
+	PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),	\
+	PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),	\
+	PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),	\
+	PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),	\
+	PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),	\
+	PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),	\
+	PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),	\
+	PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),	\
+	PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),	\
+	PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),	\
+	PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),	\
+	PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),	\
+	PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx)
+
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
-	PORT_GP_32(1, fn, sfx),						\
+	PORT_GP_26(1, fn, sfx),						\
 	PORT_GP_32(2, fn, sfx),						\
 	PORT_GP_32(3, fn, sfx),						\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx),						\
 	PORT_GP_32(6, fn, sfx),						\
-	PORT_GP_32(7, fn, sfx)
+	PORT_GP_26(7, fn, sfx)
 
 enum {
 	PINMUX_RESERVED = 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index bfdcac4..5248685 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -2770,6 +2770,24 @@
 static const unsigned int sdhi2_wp_mux[] = {
 	SD2_WP_MARK,
 };
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+	RCAR_GP_PIN(5, 24), /* PWEN */
+	RCAR_GP_PIN(5, 25), /* OVC */
+};
+static const unsigned int usb0_mux[] = {
+	USB0_PWEN_MARK,
+	USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+	RCAR_GP_PIN(5, 26), /* PWEN */
+	RCAR_GP_PIN(5, 27), /* OVC */
+};
+static const unsigned int usb1_mux[] = {
+	USB1_PWEN_MARK,
+	USB1_OVC_MARK,
+};
 
 static const struct sh_pfc_pin_group pinmux_groups[] = {
 	SH_PFC_PIN_GROUP(eth_link),
@@ -2945,6 +2963,8 @@
 	SH_PFC_PIN_GROUP(sdhi2_ctrl),
 	SH_PFC_PIN_GROUP(sdhi2_cd),
 	SH_PFC_PIN_GROUP(sdhi2_wp),
+	SH_PFC_PIN_GROUP(usb0),
+	SH_PFC_PIN_GROUP(usb1),
 };
 
 static const char * const eth_groups[] = {
@@ -3219,6 +3239,14 @@
 	"sdhi2_wp",
 };
 
+static const char * const usb0_groups[] = {
+	"usb0",
+};
+
+static const char * const usb1_groups[] = {
+	"usb1",
+};
+
 static const struct sh_pfc_function pinmux_functions[] = {
 	SH_PFC_FUNCTION(eth),
 	SH_PFC_FUNCTION(hscif0),
@@ -3253,6 +3281,8 @@
 	SH_PFC_FUNCTION(sdhi0),
 	SH_PFC_FUNCTION(sdhi1),
 	SH_PFC_FUNCTION(sdhi2),
+	SH_PFC_FUNCTION(usb0),
+	SH_PFC_FUNCTION(usb1),
 };
 
 static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index d2efbfb..0975265 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -26,10 +26,6 @@
 #include <linux/regulator/machine.h>
 #include <linux/slab.h>
 
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
 #include "core.h"
 #include "sh_pfc.h"
 
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index ff67896..863c3e3 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -40,6 +40,10 @@
 
 	struct pinctrl_pin_desc *pins;
 	struct sh_pfc_pin_config *configs;
+
+	const char *func_prop_name;
+	const char *groups_prop_name;
+	const char *pins_prop_name;
 };
 
 static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
@@ -96,10 +100,13 @@
 	return 0;
 }
 
-static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
+static int sh_pfc_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+				    struct device_node *np,
 				    struct pinctrl_map **map,
 				    unsigned int *num_maps, unsigned int *index)
 {
+	struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+	struct device *dev = pmx->pfc->dev;
 	struct pinctrl_map *maps = *map;
 	unsigned int nmaps = *num_maps;
 	unsigned int idx = *index;
@@ -113,10 +120,27 @@
 	const char *pin;
 	int ret;
 
+	/* Support both the old Renesas-specific properties and the new standard
+	 * properties. Mixing old and new properties isn't allowed, neither
+	 * inside a subnode nor across subnodes.
+	 */
+	if (!pmx->func_prop_name) {
+		if (of_find_property(np, "groups", NULL) ||
+		    of_find_property(np, "pins", NULL)) {
+			pmx->func_prop_name = "function";
+			pmx->groups_prop_name = "groups";
+			pmx->pins_prop_name = "pins";
+		} else {
+			pmx->func_prop_name = "renesas,function";
+			pmx->groups_prop_name = "renesas,groups";
+			pmx->pins_prop_name = "renesas,pins";
+		}
+	}
+
 	/* Parse the function and configuration properties. At least a function
 	 * or one configuration must be specified.
 	 */
-	ret = of_property_read_string(np, "renesas,function", &function);
+	ret = of_property_read_string(np, pmx->func_prop_name, &function);
 	if (ret < 0 && ret != -EINVAL) {
 		dev_err(dev, "Invalid function in DT\n");
 		return ret;
@@ -129,11 +153,12 @@
 	if (!function && num_configs == 0) {
 		dev_err(dev,
 			"DT node must contain at least a function or config\n");
+		ret = -ENODEV;
 		goto done;
 	}
 
 	/* Count the number of pins and groups and reallocate mappings. */
-	ret = of_property_count_strings(np, "renesas,pins");
+	ret = of_property_count_strings(np, pmx->pins_prop_name);
 	if (ret == -EINVAL) {
 		num_pins = 0;
 	} else if (ret < 0) {
@@ -143,7 +168,7 @@
 		num_pins = ret;
 	}
 
-	ret = of_property_count_strings(np, "renesas,groups");
+	ret = of_property_count_strings(np, pmx->groups_prop_name);
 	if (ret == -EINVAL) {
 		num_groups = 0;
 	} else if (ret < 0) {
@@ -174,7 +199,7 @@
 	*num_maps = nmaps;
 
 	/* Iterate over pins and groups and create the mappings. */
-	of_property_for_each_string(np, "renesas,groups", prop, group) {
+	of_property_for_each_string(np, pmx->groups_prop_name, prop, group) {
 		if (function) {
 			maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
 			maps[idx].data.mux.group = group;
@@ -198,7 +223,7 @@
 		goto done;
 	}
 
-	of_property_for_each_string(np, "renesas,pins", prop, pin) {
+	of_property_for_each_string(np, pmx->pins_prop_name, prop, pin) {
 		ret = sh_pfc_map_add_config(&maps[idx], pin,
 					    PIN_MAP_TYPE_CONFIGS_PIN,
 					    configs, num_configs);
@@ -246,7 +271,7 @@
 	index = 0;
 
 	for_each_child_of_node(np, child) {
-		ret = sh_pfc_dt_subnode_to_map(dev, child, map, num_maps,
+		ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps,
 					       &index);
 		if (ret < 0)
 			goto done;
@@ -254,7 +279,8 @@
 
 	/* If no mapping has been found in child nodes try the config node. */
 	if (*num_maps == 0) {
-		ret = sh_pfc_dt_subnode_to_map(dev, np, map, num_maps, &index);
+		ret = sh_pfc_dt_subnode_to_map(pctldev, np, map, num_maps,
+					       &index);
 		if (ret < 0)
 			goto done;
 	}
@@ -465,6 +491,9 @@
 	case PIN_CONFIG_BIAS_PULL_DOWN:
 		return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
 
+	case PIN_CONFIG_POWER_SOURCE:
+		return pin->configs & SH_PFC_PIN_CFG_IO_VOLTAGE;
+
 	default:
 		return false;
 	}
@@ -477,7 +506,6 @@
 	struct sh_pfc *pfc = pmx->pfc;
 	enum pin_config_param param = pinconf_to_config_param(*config);
 	unsigned long flags;
-	unsigned int bias;
 
 	if (!sh_pfc_pinconf_validate(pfc, _pin, param))
 		return -ENOTSUPP;
@@ -485,7 +513,9 @@
 	switch (param) {
 	case PIN_CONFIG_BIAS_DISABLE:
 	case PIN_CONFIG_BIAS_PULL_UP:
-	case PIN_CONFIG_BIAS_PULL_DOWN:
+	case PIN_CONFIG_BIAS_PULL_DOWN: {
+		unsigned int bias;
+
 		if (!pfc->info->ops || !pfc->info->ops->get_bias)
 			return -ENOTSUPP;
 
@@ -498,6 +528,24 @@
 
 		*config = 0;
 		break;
+	}
+
+	case PIN_CONFIG_POWER_SOURCE: {
+		int ret;
+
+		if (!pfc->info->ops || !pfc->info->ops->get_io_voltage)
+			return -ENOTSUPP;
+
+		spin_lock_irqsave(&pfc->lock, flags);
+		ret = pfc->info->ops->get_io_voltage(pfc, _pin);
+		spin_unlock_irqrestore(&pfc->lock, flags);
+
+		if (ret < 0)
+			return ret;
+
+		*config = ret;
+		break;
+	}
 
 	default:
 		return -ENOTSUPP;
@@ -534,6 +582,24 @@
 
 			break;
 
+		case PIN_CONFIG_POWER_SOURCE: {
+			unsigned int arg =
+				pinconf_to_config_argument(configs[i]);
+			int ret;
+
+			if (!pfc->info->ops || !pfc->info->ops->set_io_voltage)
+				return -ENOTSUPP;
+
+			spin_lock_irqsave(&pfc->lock, flags);
+			ret = pfc->info->ops->set_io_voltage(pfc, _pin, arg);
+			spin_unlock_irqrestore(&pfc->lock, flags);
+
+			if (ret)
+				return ret;
+
+			break;
+		}
+
 		default:
 			return -ENOTSUPP;
 		}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 0874cfe..15afd49 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -12,6 +12,7 @@
 #define __SH_PFC_H
 
 #include <linux/bug.h>
+#include <linux/pinctrl/pinconf-generic.h>
 #include <linux/stringify.h>
 
 enum {
@@ -26,6 +27,7 @@
 #define SH_PFC_PIN_CFG_OUTPUT		(1 << 1)
 #define SH_PFC_PIN_CFG_PULL_UP		(1 << 2)
 #define SH_PFC_PIN_CFG_PULL_DOWN	(1 << 3)
+#define SH_PFC_PIN_CFG_IO_VOLTAGE	(1 << 4)
 #define SH_PFC_PIN_CFG_NO_GPIO		(1 << 31)
 
 struct sh_pfc_pin {
@@ -121,6 +123,9 @@
 	unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
 	void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
 			 unsigned int bias);
+	int (*get_io_voltage)(struct sh_pfc *pfc, unsigned int pin);
+	int (*set_io_voltage)(struct sh_pfc *pfc, unsigned int pin,
+			      u16 voltage_mV);
 };
 
 struct sh_pfc_soc_info {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9384e0a..9df0c5f 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -148,6 +148,19 @@
 #define DIV_DISABLE	0x1
 #define DIV_ENABLE	0x0
 
+/* Number of Function input disable registers */
+#define NUM_OF_IN_DISABLE_REG	0x2
+
+/* Offset of Function input disable registers */
+#define IN_DISABLE_0_REG_SET		0x0A00
+#define IN_DISABLE_0_REG_CLR		0x0A04
+#define IN_DISABLE_1_REG_SET		0x0A08
+#define IN_DISABLE_1_REG_CLR		0x0A0C
+#define IN_DISABLE_VAL_0_REG_SET	0x0A80
+#define IN_DISABLE_VAL_0_REG_CLR	0x0A84
+#define IN_DISABLE_VAL_1_REG_SET	0x0A88
+#define IN_DISABLE_VAL_1_REG_CLR	0x0A8C
+
 struct dt_params {
 	const char *property;
 	int value;
@@ -197,6 +210,16 @@
 	}
 
 /**
+ * struct atlas7_pad_status - Atlas7 Pad status
+ */
+struct atlas7_pad_status {
+	u8 func;
+	u8 pull;
+	u8 dstr;
+	u8 reserved;
+};
+
+/**
  * struct atlas7_pad_mux - Atlas7 mux
  * @bank:		The bank of this pad's registers on.
  * @pin	:		The ID of this Pad.
@@ -285,6 +308,9 @@
 /* Platform info of atlas7 pinctrl */
 #define ATLAS7_PINCTRL_REG_BANKS	2
 #define ATLAS7_PINCTRL_BANK_0_PINS	18
+#define ATLAS7_PINCTRL_BANK_1_PINS	141
+#define ATLAS7_PINCTRL_TOTAL_PINS	\
+	(ATLAS7_PINCTRL_BANK_0_PINS + ATLAS7_PINCTRL_BANK_1_PINS)
 
 /**
  * Atlas7 GPIO Chip
@@ -316,6 +342,7 @@
 	unsigned int gpio_offset;
 	unsigned int ngpio;
 	const unsigned int *gpio_pins;
+	u32 sleep_data[NGPIO_OF_BANK];
 };
 
 struct atlas7_gpio_chip {
@@ -343,6 +370,9 @@
 	struct pinctrl_desc pctl_desc;
 	struct atlas7_pinctrl_data *pctl_data;
 	void __iomem *regs[ATLAS7_PINCTRL_REG_BANKS];
+	u32 status_ds[NUM_OF_IN_DISABLE_REG];
+	u32 status_dsv[NUM_OF_IN_DISABLE_REG];
+	struct atlas7_pad_status sleep_data[ATLAS7_PINCTRL_TOTAL_PINS];
 };
 
 /*
@@ -3480,6 +3510,160 @@
 	.confs_cnt = ARRAY_SIZE(atlas7_ioc_pad_confs),
 };
 
+/* Simple map data structure */
+struct map_data {
+	u8 idx;
+	u8 data;
+};
+
+/**
+ * struct atlas7_pull_info - Atlas7 Pad pull info
+ * @type:The type of this Pad.
+ * @mask:The mas value of this pin's pull bits.
+ * @v2s: The map of pull register value to pull status.
+ * @s2v: The map of pull status to pull register value.
+ */
+struct atlas7_pull_info {
+	u8 pad_type;
+	u8 mask;
+	const struct map_data *v2s;
+	const struct map_data *s2v;
+};
+
+/* Pull Register value map to status */
+static const struct map_data p4we_pull_v2s[] = {
+	{ P4WE_PULL_UP, PULL_UP },
+	{ P4WE_HIGH_HYSTERESIS, HIGH_HYSTERESIS },
+	{ P4WE_HIGH_Z, HIGH_Z },
+	{ P4WE_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data p16st_pull_v2s[] = {
+	{ P16ST_PULL_UP, PULL_UP },
+	{ PD, PULL_UNKNOWN },
+	{ P16ST_HIGH_Z, HIGH_Z },
+	{ P16ST_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data pm31_pull_v2s[] = {
+	{ PM31_PULL_DISABLED, PULL_DOWN },
+	{ PM31_PULL_ENABLED, PULL_UP },
+};
+
+static const struct map_data pangd_pull_v2s[] = {
+	{ PANGD_PULL_UP, PULL_UP },
+	{ PD, PULL_UNKNOWN },
+	{ PANGD_HIGH_Z, HIGH_Z },
+	{ PANGD_PULL_DOWN, PULL_DOWN },
+};
+
+/* Pull status map to register value */
+static const struct map_data p4we_pull_s2v[] = {
+	{ PULL_UP, P4WE_PULL_UP },
+	{ HIGH_HYSTERESIS, P4WE_HIGH_HYSTERESIS },
+	{ HIGH_Z, P4WE_HIGH_Z },
+	{ PULL_DOWN, P4WE_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data p16st_pull_s2v[] = {
+	{ PULL_UP, P16ST_PULL_UP },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, P16ST_HIGH_Z },
+	{ PULL_DOWN, P16ST_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data pm31_pull_s2v[] = {
+	{ PULL_UP, PM31_PULL_ENABLED },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, -1 },
+	{ PULL_DOWN, PM31_PULL_DISABLED },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data pangd_pull_s2v[] = {
+	{ PULL_UP, PANGD_PULL_UP },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, PANGD_HIGH_Z },
+	{ PULL_DOWN, PANGD_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct atlas7_pull_info atlas7_pull_map[] = {
+	{ PAD_T_4WE_PD, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+	{ PAD_T_4WE_PU, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+	{ PAD_T_16ST, P16ST_PULL_MASK, p16st_pull_v2s, p16st_pull_s2v },
+	{ PAD_T_M31_0204_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0204_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0610_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0610_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_AD, PANGD_PULL_MASK, pangd_pull_v2s, pangd_pull_s2v },
+};
+
+/**
+ * struct atlas7_ds_ma_info - Atlas7 Pad DriveStrength & currents info
+ * @ma:		The Drive Strength in current value .
+ * @ds_16st:	The correspond raw value of 16st pad.
+ * @ds_4we:	The correspond raw value of 4we pad.
+ * @ds_0204m31:	The correspond raw value of 0204m31 pad.
+ * @ds_0610m31:	The correspond raw value of 0610m31 pad.
+ */
+struct atlas7_ds_ma_info {
+	u32 ma;
+	u32 ds_16st;
+	u32 ds_4we;
+	u32 ds_0204m31;
+	u32 ds_0610m31;
+};
+
+static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = {
+	{ 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL },
+	{ 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL },
+	{ 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0 },
+	{ 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL },
+	{ 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1 },
+	{ 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL },
+	{ 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL },
+	{ 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL },
+	{ 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL },
+	{ 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL },
+	{ 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL },
+	{ 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL },
+	{ 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL },
+	{ 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL },
+	{ 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL },
+	{ 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL },
+};
+
+/**
+ * struct atlas7_ds_info - Atlas7 Pad DriveStrength info
+ * @type:		The type of this Pad.
+ * @mask:		The mask value of this pin's pull bits.
+ * @imval:		The immediate value of drives trength register.
+ */
+struct atlas7_ds_info {
+	u8 type;
+	u8 mask;
+	u8 imval;
+	u8 reserved;
+};
+
+static const struct atlas7_ds_info atlas7_ds_map[] = {
+	{ PAD_T_4WE_PD, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+	{ PAD_T_4WE_PU, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+	{ PAD_T_16ST, DS_4BIT_MASK, DS_4BIT_IM_VAL },
+	{ PAD_T_M31_0204_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0204_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0610_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0610_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_AD, DS_NULL, DS_NULL },
+};
+
 static inline u32 atlas7_pin_to_bank(u32 pin)
 {
 	return (pin >= ATLAS7_PINCTRL_BANK_0_PINS) ? 1 : 0;
@@ -3682,49 +3866,22 @@
 	return 0;
 }
 
-struct atlas7_ds_info {
-	u32 ma;
-	u32 ds_16st;
-	u32 ds_4we;
-	u32 ds_0204m31;
-	u32 ds_0610m31;
-};
-
-const struct atlas7_ds_info atlas7_ds_map[] = {
-	{ 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL},
-	{ 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL},
-	{ 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0},
-	{ 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL},
-	{ 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1},
-	{ 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL},
-	{ 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL},
-	{ 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL},
-	{ 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL},
-	{ 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL},
-	{ 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL},
-	{ 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL},
-	{ 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL},
-	{ 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL},
-	{ 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL},
-	{ 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL},
-};
-
 static u32 convert_current_to_drive_strength(u32 type, u32 ma)
 {
 	int idx;
 
-	for (idx = 0; idx < ARRAY_SIZE(atlas7_ds_map); idx++) {
-		if (atlas7_ds_map[idx].ma != ma)
+	for (idx = 0; idx < ARRAY_SIZE(atlas7_ma2ds_map); idx++) {
+		if (atlas7_ma2ds_map[idx].ma != ma)
 			continue;
 
 		if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU)
-			return atlas7_ds_map[idx].ds_4we;
+			return atlas7_ma2ds_map[idx].ds_4we;
 		else if (type == PAD_T_16ST)
-			return atlas7_ds_map[idx].ds_16st;
+			return atlas7_ma2ds_map[idx].ds_16st;
 		else if (type == PAD_T_M31_0204_PD || type == PAD_T_M31_0204_PU)
-			return atlas7_ds_map[idx].ds_0204m31;
+			return atlas7_ma2ds_map[idx].ds_0204m31;
 		else if (type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU)
-			return atlas7_ds_map[idx].ds_0610m31;
+			return atlas7_ma2ds_map[idx].ds_0610m31;
 	}
 
 	return DS_NULL;
@@ -3735,78 +3892,21 @@
 {
 	struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 	struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
-	u32 type = conf->type;
-	u32 shift = conf->pupd_bit;
-	u32 bank = atlas7_pin_to_bank(pin);
-	void __iomem *pull_sel_reg, *pull_clr_reg;
+	const struct atlas7_pull_info *pull_info;
+	u32 bank;
+	unsigned long regv;
+	void __iomem *pull_sel_reg;
 
+	bank = atlas7_pin_to_bank(pin);
+	pull_info = &atlas7_pull_map[conf->type];
 	pull_sel_reg = pmx->regs[bank] + conf->pupd_reg;
-	pull_clr_reg = CLR_REG(pull_sel_reg);
 
-	if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
-		writel(P4WE_PULL_MASK << shift, pull_clr_reg);
+	/* Retrieve correspond register value from table by sel */
+	regv = pull_info->s2v[sel].data & pull_info->mask;
 
-		if (sel == PULL_UP)
-			writel(P4WE_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_HYSTERESIS)
-			writel(P4WE_HIGH_HYSTERESIS << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(P4WE_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(P4WE_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for 4WEPAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_16ST) {
-		writel(P16ST_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(P16ST_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(P16ST_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(P16ST_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for 16STPAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_M31_0204_PD ||
-		type == PAD_T_M31_0204_PU ||
-		type == PAD_T_M31_0610_PD ||
-		type == PAD_T_M31_0610_PU) {
-		writel(PM31_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(PM31_PULL_ENABLED << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(PM31_PULL_DISABLED << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for M31PAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_AD) {
-		writel(PANGD_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(PANGD_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(PANGD_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(PANGD_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for A/D PAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else {
-			pr_err("Unknown Pad type[%d] for pull select PAD#%d\n",
-				type, pin);
-			return -ENOTSUPP;
-	}
+	/* Clear & Set new value to pull register */
+	writel(pull_info->mask << conf->pupd_bit, CLR_REG(pull_sel_reg));
+	writel(regv << conf->pupd_bit, pull_sel_reg);
 
 	pr_debug("PIN_CFG ### SET PIN#%d PULL SELECTOR:%d == OK ####\n",
 		pin, sel);
@@ -3818,43 +3918,25 @@
 {
 	struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 	struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
-	u32 type = conf->type;
-	u32 shift = conf->drvstr_bit;
-	u32 bank = atlas7_pin_to_bank(pin);
-	void __iomem *ds_sel_reg, *ds_clr_reg;
+	const struct atlas7_ds_info *ds_info;
+	u32 bank;
+	void __iomem *ds_sel_reg;
 
+	ds_info = &atlas7_ds_map[conf->type];
+	if (sel & (~(ds_info->mask)))
+		goto unsupport;
+
+	bank = atlas7_pin_to_bank(pin);
 	ds_sel_reg = pmx->regs[bank] + conf->drvstr_reg;
-	ds_clr_reg = CLR_REG(ds_sel_reg);
-	if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
-		if (sel & (~DS_2BIT_MASK))
-			goto unsupport;
 
-		writel(DS_2BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
+	writel(ds_info->imval << conf->drvstr_bit, CLR_REG(ds_sel_reg));
+	writel(sel << conf->drvstr_bit, ds_sel_reg);
 
-		return 0;
-	} else if (type == PAD_T_16ST) {
-		if (sel & (~DS_4BIT_MASK))
-			goto unsupport;
-
-		writel(DS_4BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
-
-		return 0;
-	} else if (type == PAD_T_M31_0204_PD ||	type == PAD_T_M31_0204_PU ||
-		type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU) {
-		if (sel & (~DS_1BIT_MASK))
-			goto unsupport;
-
-		writel(DS_1BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
-
-		return 0;
-	}
+	return 0;
 
 unsupport:
 	pr_err("Pad#%d type[%d] doesn't support ds code[%d]!\n",
-		pin, type, sel);
+		pin, conf->type, sel);
 	return -ENOTSUPP;
 }
 
@@ -4101,14 +4183,135 @@
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_pinmux_suspend_noirq(struct device *dev)
+{
+	struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+	struct atlas7_pad_status *status;
+	struct atlas7_pad_config *conf;
+	const struct atlas7_ds_info *ds_info;
+	const struct atlas7_pull_info *pull_info;
+	int idx;
+	u32 bank;
+	unsigned long regv;
+
+	for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+		/* Get this Pad's descriptor from PINCTRL */
+		conf = &pmx->pctl_data->confs[idx];
+		bank = atlas7_pin_to_bank(idx);
+		status = &pmx->sleep_data[idx];
+
+		/* Save Function selector */
+		regv = readl(pmx->regs[bank] + conf->mux_reg);
+		status->func = (regv >> conf->mux_bit) & FUNC_CLEAR_MASK;
+
+		/* Check if Pad is in Analogue selector */
+		if (conf->ad_ctrl_reg == -1)
+			goto save_ds_sel;
+
+		regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
+		if (!(regv & (conf->ad_ctrl_bit << ANA_CLEAR_MASK)))
+			status->func = FUNC_ANALOGUE;
+
+save_ds_sel:
+		if (conf->drvstr_reg == -1)
+			goto save_pull_sel;
+
+		/* Save Drive Strength selector */
+		ds_info = &atlas7_ds_map[conf->type];
+		regv = readl(pmx->regs[bank] + conf->drvstr_reg);
+		status->dstr = (regv >> conf->drvstr_bit) & ds_info->mask;
+
+save_pull_sel:
+		/* Save Pull selector */
+		pull_info = &atlas7_pull_map[conf->type];
+		regv = readl(pmx->regs[bank] + conf->pupd_reg);
+		regv = (regv >> conf->pupd_bit) & pull_info->mask;
+		status->pull = pull_info->v2s[regv].data;
+	}
+
+	/*
+	 * Save disable input selector, this selector is not for Pin,
+	 * but for Mux function.
+	 */
+	for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+		pmx->status_ds[idx] = readl(pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_SET + 0x8 * idx);
+		pmx->status_dsv[idx] = readl(pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+	}
+
+	return 0;
+}
+
+static int atlas7_pinmux_resume_noirq(struct device *dev)
+{
+	struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+	struct atlas7_pad_status *status;
+	struct atlas7_pad_config *conf;
+	int idx;
+	u32 bank;
+
+	for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+		/* Get this Pad's descriptor from PINCTRL */
+		conf = &pmx->pctl_data->confs[idx];
+		bank = atlas7_pin_to_bank(idx);
+		status = &pmx->sleep_data[idx];
+
+		/* Restore Function selector */
+		__atlas7_pmx_pin_enable(pmx, idx, (u32)status->func & 0xff);
+
+		if (FUNC_ANALOGUE == status->func)
+			goto restore_pull_sel;
+
+		/* Restore Drive Strength selector */
+		__altas7_pinctrl_set_drive_strength_sel(pmx->pctl, idx,
+						(u32)status->dstr & 0xff);
+
+restore_pull_sel:
+		/* Restore Pull selector */
+		altas7_pinctrl_set_pull_sel(pmx->pctl, idx,
+						(u32)status->pull & 0xff);
+	}
+
+	/*
+	 * Restore disable input selector, this selector is not for Pin,
+	 * but for Mux function
+	 */
+	for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+		writel(~0, pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_CLR + 0x8 * idx);
+		writel(pmx->status_ds[idx], pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_SET + 0x8 * idx);
+		writel(~0, pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_CLR + 0x8 * idx);
+		writel(pmx->status_dsv[idx], pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops atlas7_pinmux_pm_ops = {
+	.suspend_noirq = atlas7_pinmux_suspend_noirq,
+	.resume_noirq = atlas7_pinmux_resume_noirq,
+	.freeze_noirq = atlas7_pinmux_suspend_noirq,
+	.restore_noirq = atlas7_pinmux_resume_noirq,
+};
+#endif
+
 static const struct of_device_id atlas7_pinmux_ids[] = {
 	{ .compatible = "sirf,atlas7-ioc",},
+	{},
 };
 
 static struct platform_driver atlas7_pinmux_driver = {
 	.driver = {
 		.name = "atlas7-ioc",
 		.of_match_table = atlas7_pinmux_ids,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &atlas7_pinmux_pm_ops,
+#endif
 	},
 	.probe = atlas7_pinmux_probe,
 };
@@ -4286,14 +4489,15 @@
 	.irq_set_type = atlas7_gpio_irq_type,
 };
 
-static void atlas7_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
 	struct atlas7_gpio_bank *bank = NULL;
 	u32 status, ctrl;
 	int pin_in_bank = 0, idx;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 
 	for (idx = 0; idx < a7gc->nbank; idx++) {
 		bank = &a7gc->banks[idx];
@@ -4496,6 +4700,7 @@
 
 static const struct of_device_id atlas7_gpio_ids[] = {
 	{ .compatible = "sirf,atlas7-gpio", },
+	{},
 };
 
 static int atlas7_gpio_probe(struct platform_device *pdev)
@@ -4612,17 +4817,65 @@
 		BUG_ON(!bank->pctldev);
 	}
 
+	platform_set_drvdata(pdev, a7gc);
 	dev_info(&pdev->dev, "add to system.\n");
 	return 0;
 failed:
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_gpio_suspend_noirq(struct device *dev)
+{
+	struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+	struct atlas7_gpio_bank *bank;
+	void __iomem *ctrl_reg;
+	u32 idx, pin;
+
+	for (idx = 0; idx < a7gc->nbank; idx++) {
+		bank = &a7gc->banks[idx];
+		for (pin = 0; pin < bank->ngpio; pin++) {
+			ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+			bank->sleep_data[pin] = readl(ctrl_reg);
+		}
+	}
+
+	return 0;
+}
+
+static int atlas7_gpio_resume_noirq(struct device *dev)
+{
+	struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+	struct atlas7_gpio_bank *bank;
+	void __iomem *ctrl_reg;
+	u32 idx, pin;
+
+	for (idx = 0; idx < a7gc->nbank; idx++) {
+		bank = &a7gc->banks[idx];
+		for (pin = 0; pin < bank->ngpio; pin++) {
+			ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+			writel(bank->sleep_data[pin], ctrl_reg);
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops atlas7_gpio_pm_ops = {
+	.suspend_noirq = atlas7_gpio_suspend_noirq,
+	.resume_noirq = atlas7_gpio_resume_noirq,
+	.freeze_noirq = atlas7_gpio_suspend_noirq,
+	.restore_noirq = atlas7_gpio_resume_noirq,
+};
+#endif
+
 static struct platform_driver atlas7_gpio_driver = {
 	.driver = {
 		.name = "atlas7-gpio",
-		.owner = THIS_MODULE,
 		.of_match_table = atlas7_gpio_ids,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &atlas7_gpio_pm_ops,
+#endif
 	},
 	.probe = atlas7_gpio_probe,
 };
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 8ba26e4..f8bd9fb 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,14 +545,15 @@
 	.irq_set_type = sirfsoc_gpio_irq_type,
 };
 
-static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct sirfsoc_gpio_chip *sgpio = to_sirfsoc_gpio(gc);
 	struct sirfsoc_gpio_bank *bank;
 	u32 status, ctrl;
 	int idx = 0;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int i;
 
 	for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 7376a97..862a096 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -135,7 +135,14 @@
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x2, "ir0")),		/* TX */
+		  SUNXI_FUNCTION(0x2, "ir0"),		/* TX */
+		/*
+		 * The SPDIF block is not referenced at all in the A10 user
+		 * manual. However it is described in the code leaked and the
+		 * pin descriptions are declared in the A20 user manual which
+		 * is pin compatible with this device.
+		 */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF MCLK */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -176,11 +183,15 @@
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "i2s"),		/* DI */
-		  SUNXI_FUNCTION(0x3, "ac97")),		/* DI */
+		  SUNXI_FUNCTION(0x3, "ac97"),		/* DI */
+		/* Undocumented mux function - See SPDIF MCLK above */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF IN */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x2, "spi2")),		/* CS1 */
+		  SUNXI_FUNCTION(0x2, "spi2"),		/* CS1 */
+		/* Undocumented mux function - See SPDIF MCLK above */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF OUT */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f09573e..fb4669c0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -588,7 +588,6 @@
 static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	struct irq_desc *desc = container_of(d, struct irq_desc, irq_data);
 	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
 	unsigned long flags;
@@ -615,16 +614,17 @@
 		return -EINVAL;
 	}
 
-	if (type & IRQ_TYPE_LEVEL_MASK) {
-		d->chip = &sunxi_pinctrl_level_irq_chip;
-		desc->handle_irq = handle_fasteoi_irq;
-	} else {
-		d->chip = &sunxi_pinctrl_edge_irq_chip;
-		desc->handle_irq = handle_edge_irq;
-	}
-
 	spin_lock_irqsave(&pctl->lock, flags);
 
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_chip_handler_name_locked(d->irq,
+						   &sunxi_pinctrl_level_irq_chip,
+						   handle_fasteoi_irq, NULL);
+	else
+		__irq_set_chip_handler_name_locked(d->irq,
+						   &sunxi_pinctrl_edge_irq_chip,
+						   handle_edge_irq, NULL);
+
 	regval = readl(pctl->membase + reg);
 	regval &= ~(IRQ_CFG_IRQ_MASK << index);
 	writel(regval | (mode << index), pctl->membase + reg);
@@ -685,6 +685,7 @@
 }
 
 static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
+	.name		= "sunxi_pio_edge",
 	.irq_ack	= sunxi_pinctrl_irq_ack,
 	.irq_mask	= sunxi_pinctrl_irq_mask,
 	.irq_unmask	= sunxi_pinctrl_irq_unmask,
@@ -695,6 +696,7 @@
 };
 
 static struct irq_chip sunxi_pinctrl_level_irq_chip = {
+	.name		= "sunxi_pio_level",
 	.irq_eoi	= sunxi_pinctrl_irq_ack,
 	.irq_mask	= sunxi_pinctrl_irq_mask,
 	.irq_unmask	= sunxi_pinctrl_irq_unmask,
@@ -709,10 +711,42 @@
 			  IRQCHIP_EOI_IF_HANDLED,
 };
 
-static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
+				      struct device_node *node,
+				      const u32 *intspec,
+				      unsigned int intsize,
+				      unsigned long *out_hwirq,
+				      unsigned int *out_type)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
+	struct sunxi_desc_function *desc;
+	int pin, base;
+
+	if (intsize < 3)
+		return -EINVAL;
+
+	base = PINS_PER_BANK * intspec[0];
+	pin = base + intspec[1];
+
+	desc = sunxi_pinctrl_desc_find_function_by_pin(d->host_data,
+						       pin, "irq");
+	if (!desc)
+		return -EINVAL;
+
+	*out_hwirq = desc->irqbank * PINS_PER_BANK + desc->irqnum;
+	*out_type = intspec[2];
+
+	return 0;
+}
+
+static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
+	.xlate		= sunxi_pinctrl_irq_of_xlate,
+};
+
+static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct sunxi_pinctrl *pctl = irq_desc_get_handler_data(desc);
 	unsigned long bank, reg, val;
 
 	for (bank = 0; bank < pctl->desc->irq_banks; bank++)
@@ -983,8 +1017,8 @@
 
 	pctl->domain = irq_domain_add_linear(node,
 					     pctl->desc->irq_banks * IRQ_PER_BANK,
-					     &irq_domain_simple_ops,
-					     NULL);
+					     &sunxi_pinctrl_irq_domain_ops,
+					     pctl);
 	if (!pctl->domain) {
 		dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
 		ret = -ENOMEM;
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
new file mode 100644
index 0000000..eab23ef
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -0,0 +1,32 @@
+if ARCH_UNIPHIER
+
+config PINCTRL_UNIPHIER_CORE
+	bool
+	select PINMUX
+	select GENERIC_PINCONF
+
+config PINCTRL_UNIPHIER_PH1_LD4
+	tristate "UniPhier PH1-LD4 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO4
+	tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_SLD8
+	tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO5
+	tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PROXSTREAM2
+	tristate "UniPhier ProXstream2 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_LD6B
+	tristate "UniPhier PH1-LD6b SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+endif
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
new file mode 100644
index 0000000..e215b10
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_PINCTRL_UNIPHIER_CORE)		+= pinctrl-uniphier-core.o
+
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4)		+= pinctrl-ph1-ld4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4)		+= pinctrl-ph1-pro4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_SLD8)		+= pinctrl-ph1-sld8.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO5)		+= pinctrl-ph1-pro5.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PROXSTREAM2)	+= pinctrl-proxstream2.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD6B)		+= pinctrl-ph1-ld6b.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
new file mode 100644
index 0000000..7beb87e
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld4_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "EA1", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "EA2", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "EA3", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "EA4", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "EA5", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "EA6", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "EA7", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "EA8", 0,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "EA9", 0,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "EA10", 0,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "EA11", 0,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "EA12", 0,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "EA13", 0,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "EA14", 0,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "EA15", 0,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "ECLK", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(18, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(19, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(20, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(22, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     146, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(23, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     147, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(24, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     148, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     149, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     150, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     151, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_8_12_16_20,
+			     152, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_8_12_16_20,
+			     153, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_8_12_16_20,
+			     154, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_8_12_16_20,
+			     155, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "RMII_RXD0", 6,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "RMII_RXD1", 6,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "RMII_CRS_DV", 6,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "RMII_RXER", 6,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(36, "RMII_REFCLK", 6,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(37, "RMII_TXD0", 6,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(38, "RMII_TXD1", 6,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "RMII_TXEN", 6,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "MDC", 6,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "MDIO", 6,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "MDIO_INTL", 6,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "PHYRSTL", 6,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     156, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     157, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     158, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     159, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     160, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     161, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(50, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(53, "USB0VBUS", 0,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "USB0OD", 0,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "USB1VBUS", 0,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB1OD", 0,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "PCRESET", 0,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "PCREG", 0,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "PCCE2", 0,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "PCVS1", 0,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "PCCD2", 0,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "PCCD1", 0,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "PCREADY", 0,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "PCDOE", 0,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "PCCE1", 0,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "PCWE", 0,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "PCOE", 0,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "PCWAIT", 0,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "PCIOWR", 0,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "PCIORD", 0,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HS0DIN0", 0,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HS0DIN1", 0,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HS0DIN2", 0,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HS0DIN3", 0,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HS0DIN4", 0,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HS0DIN5", 0,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HS0DIN6", 0,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HS0DIN7", 0,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HS0BCLKIN", 0,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HS0VALIN", 0,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS0SYNCIN", 0,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HSDOUT0", 0,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HSDOUT1", 0,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HSDOUT2", 0,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HSDOUT3", 0,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HSDOUT4", 0,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HSDOUT5", 0,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HSDOUT6", 0,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HSDOUT7", 0,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HSBCLKOUT", 0,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HSVALOUT", 0,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "HSSYNCOUT", 0,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "AGCI", 3,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "AGCR", 4,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "AGCBS", 5,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "IECOUT", 0,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "ASMCK", 0,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "HIN", 1,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(113, "VIN", 2,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(114, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(115, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(116, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(117, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "TCON6", 0,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "TCON7", 0,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "PWMA", 0,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "XIRQ1", 0,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XIRQ2", 0,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "XIRQ3", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "XIRQ4", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "XIRQ5", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ6", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ7", 0,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ8", 0,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ9", 0,
+			     119, UNIPHIER_PIN_DRV_4_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ10", 0,
+			     120, UNIPHIER_PIN_DRV_4_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ11", 0,
+			     121, UNIPHIER_PIN_DRV_4_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ14", 0,
+			     122, UNIPHIER_PIN_DRV_4_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "PORT00", 0,
+			     123, UNIPHIER_PIN_DRV_4_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "PORT01", 0,
+			     124, UNIPHIER_PIN_DRV_4_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "PORT02", 0,
+			     125, UNIPHIER_PIN_DRV_4_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "PORT03", 0,
+			     126, UNIPHIER_PIN_DRV_4_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "PORT04", 0,
+			     127, UNIPHIER_PIN_DRV_4_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "PORT05", 0,
+			     128, UNIPHIER_PIN_DRV_4_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "PORT06", 0,
+			     129, UNIPHIER_PIN_DRV_4_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "PORT07", 0,
+			     130, UNIPHIER_PIN_DRV_4_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "PORT10", 0,
+			     131, UNIPHIER_PIN_DRV_4_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "PORT11", 0,
+			     132, UNIPHIER_PIN_DRV_4_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "PORT12", 0,
+			     133, UNIPHIER_PIN_DRV_4_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PORT13", 0,
+			     134, UNIPHIER_PIN_DRV_4_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "PORT14", 0,
+			     135, UNIPHIER_PIN_DRV_4_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "PORT15", 0,
+			     136, UNIPHIER_PIN_DRV_4_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "PORT16", 0,
+			     137, UNIPHIER_PIN_DRV_4_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+			     138, UNIPHIER_PIN_DRV_4_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "PORT20", 0,
+			     139, UNIPHIER_PIN_DRV_4_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "PORT21", 0,
+			     140, UNIPHIER_PIN_DRV_4_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "PORT22", 0,
+			     141, UNIPHIER_PIN_DRV_4_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "PORT23", 0,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "PORT24", UNIPHIER_PIN_IECTRL_NONE,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "PORT25", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "PORT26", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(159, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(160, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(164, "NANDRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {24, 25, 26, 27, 28, 29, 30, 31, 158, 159,
+				     160, 161, 162, 163, 164};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {85, 88};
+static const unsigned uart0_muxvals[] = {1, 1};
+static const unsigned uart1_pins[] = {155, 156};
+static const unsigned uart1_muxvals[] = {13, 13};
+static const unsigned uart1b_pins[] = {69, 70};
+static const unsigned uart1b_muxvals[] = {23, 23};
+static const unsigned uart2_pins[] = {128, 129};
+static const unsigned uart2_muxvals[] = {13, 13};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {53, 54};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {55, 56};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {155, 156};
+static const unsigned usb2_muxvals[] = {4, 4};
+static const unsigned usb2b_pins[] = {67, 68};
+static const unsigned usb2b_muxvals[] = {23, 23};
+static const unsigned port_range0_pins[] = {
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT0x */
+	143, 144, 145, 146, 147, 148, 149, 150,		/* PORT1x */
+	151, 152, 153, 154, 155, 156, 157, 0,		/* PORT2x */
+	1, 2, 3, 4, 5, 120, 121, 122,			/* PORT3x */
+	24, 25, 26, 27, 28, 29, 30, 31,			/* PORT4x */
+	40, 41, 42, 43, 44, 45, 46, 47,			/* PORT5x */
+	48, 49, 50, 51, 52, 53, 54, 55,			/* PORT6x */
+	56, 85, 84, 59, 82, 61, 64, 65,			/* PORT7x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT8x */
+	66, 67, 68, 69, 70, 71, 72, 73,			/* PORT9x */
+	74, 75, 89, 86, 78, 79, 80, 81,			/* PORT10x */
+	60, 83, 58, 57, 88, 87, 77, 76,			/* PORT11x */
+	90, 91, 92, 93, 94, 95, 96, 97,			/* PORT12x */
+	98, 99, 100, 6, 101, 114, 115, 116,		/* PORT13x */
+	103, 108, 21, 22, 23, 117, 118, 119,		/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,				/* PORT0x */
+	0, 0, 0, 0, 0, 0, 0, 0,				/* PORT1x */
+	0, 0, 0, 0, 0, 0, 0, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	7,						/* PORT166 */
+};
+static const unsigned port_range1_muxvals[] = {
+	15,						/* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+	151, 123, 124, 125, 126, 127, 128, 129,		/* XIRQ0-7 */
+	130, 131, 132, 133, 62,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+	14, 0, 0, 0, 0, 0, 0, 0,			/* XIRQ0-7 */
+	0, 0, 0, 0, 14,					/* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+	134, 63,					/* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+	0, 14,						/* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart1b),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb2b),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2", "usb2b"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-164 missing */
+	/* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld4_pindata = {
+	.groups = ph1_ld4_groups,
+	.groups_count = ARRAY_SIZE(ph1_ld4_groups),
+	.functions = ph1_ld4_functions,
+	.functions_count = ARRAY_SIZE(ph1_ld4_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld4_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_ld4_pins,
+	.npins = ARRAY_SIZE(ph1_ld4_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_ld4_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_ld4_pinctrl_desc,
+				      &ph1_ld4_pindata);
+}
+
+static const struct of_device_id ph1_ld4_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-ld4-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match);
+
+static struct platform_driver ph1_ld4_pinctrl_driver = {
+	.probe = ph1_ld4_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_ld4_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_ld4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
new file mode 100644
index 0000000..9720e697
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld6b-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld6b_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(113, "SBO0", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "SBI0", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0,
+			     192, UNIPHIER_PIN_DRV_4_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned adinter_pins[] = {
+	159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+	173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+	187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+	201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+	215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+	229, 230, 231, 232, 233, 234,
+};
+static const unsigned adinter_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0,
+};
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {115, 116};
+static const unsigned i2c2_muxvals[] = {1, 1};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {1, 1};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+				     42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {135, 136};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart0b_pins[] = {11, 12};
+static const unsigned uart0b_muxvals[] = {2, 2};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart1b_pins[] = {113, 114};
+static const unsigned uart1b_muxvals[] = {1, 1};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {2, 2};
+static const unsigned uart2b_pins[] = {86, 87};
+static const unsigned uart2b_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	127, 128, 129, 130, 131, 132, 133, 134,		/* PORT0x */
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT1x */
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT2x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT3x */
+	16, 17, 18, 19, 21, 22, 23, 24,			/* PORT4x */
+	25, 30, 31, 32, 33, 34, 35, 36,			/* PORT5x */
+	37, 38, 39, 40, 41, 42, 43, 44,			/* PORT6x */
+	45, 46, 47, 48, 49, 50, 51, 52,			/* PORT7x */
+	53, 54, 55, 56, 57, 58, 59, 60,			/* PORT8x */
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT9x */
+	69, 70, 71, 76, 77, 78, 79, 80,			/* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+	81, 82, 83, 84, 85, 86, 87, 88,			/* PORT12x */
+	89, 90, 95, 96, 97, 98, 99, 100,		/* PORT13x */
+	101, 102, 103, 104, 105, 106, 107, 108,		/* PORT14x */
+	118, 119, 120, 121, 122, 123, 124, 125,		/* PORT15x */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* PORT16x */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* PORT17x */
+	109, 110, 111, 112, 113, 114, 115, 116,		/* PORT18x */
+	117, 143, 144, 145, 146, 147, 148, 149,		/* PORT19x */
+	150, 151, 152, 153, 154, 155, 156, 157,		/* PORT20x */
+	158, 159, 160, 161, 162, 163, 164, 165,		/* PORT21x */
+	166, 178, 179, 180, 181, 182, 183, 184,		/* PORT22x */
+	185, 187, 188, 189, 190, 191, 192, 193,		/* PORT23x */
+	194, 195, 196, 197, 198, 199, 200, 201,		/* PORT24x */
+	202, 203, 204, 205, 206, 207, 208, 209,		/* PORT25x */
+	210, 211, 212, 213, 214, 215, 216, 217,		/* PORT26x */
+	218, 219, 220, 221, 223, 224, 225, 226,		/* PORT27x */
+	227, 228, 229, 230, 231, 232, 233, 234,		/* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT15x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT16x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT17x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+	118, 119, 120, 121, 122, 123, 124, 125,		/* XIRQ0-7 */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* XIRQ8-15 */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(adinter),
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart1b),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart2b),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const adinter_groups[] = {"adinter"};
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2", "uart2b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	/* port110-117 missing */
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	"port150", "port151", "port152", "port153",
+	"port154", "port155", "port156", "port157",
+	"port160", "port161", "port162", "port163",
+	"port164", "port165", "port166", "port167",
+	"port170", "port171", "port172", "port173",
+	"port174", "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = {
+	.groups = ph1_ld6b_groups,
+	.groups_count = ARRAY_SIZE(ph1_ld6b_groups),
+	.functions = ph1_ld6b_functions,
+	.functions_count = ARRAY_SIZE(ph1_ld6b_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld6b_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_ld6b_pins,
+	.npins = ARRAY_SIZE(ph1_ld6b_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc,
+				      &ph1_ld6b_pindata);
+}
+
+static const struct of_device_id ph1_ld6b_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-ld6b-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match);
+
+static struct platform_driver ph1_ld6b_pinctrl_driver = {
+	.probe = ph1_ld6b_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_ld6b_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_ld6b_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
new file mode 100644
index 0000000..96921e4
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
@@ -0,0 +1,1554 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro4_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_NONE,
+			     39, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_8_12_16_20,
+			     40, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_8_12_16_20,
+			     41, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     42, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_8_12_16_20,
+			     43, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_8_12_16_20,
+			     44, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_8_12_16_20,
+			     45, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     46, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_8_12_16_20,
+			     47, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_8_12_16_20,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_NONE,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE,
+			     119, UNIPHIER_PIN_DRV_4_8,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE,
+			     120, UNIPHIER_PIN_DRV_4_8,
+			     120, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     121, UNIPHIER_PIN_DRV_4_8,
+			     121, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     122, UNIPHIER_PIN_DRV_4_8,
+			     122, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     123, UNIPHIER_PIN_DRV_4_8,
+			     123, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     124, UNIPHIER_PIN_DRV_4_8,
+			     124, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     125, UNIPHIER_PIN_DRV_4_8,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     126, UNIPHIER_PIN_DRV_4_8,
+			     126, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE,
+			     127, UNIPHIER_PIN_DRV_4_8,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     128, UNIPHIER_PIN_DRV_4_8,
+			     128, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE,
+			     129, UNIPHIER_PIN_DRV_4_8,
+			     129, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE,
+			     130, UNIPHIER_PIN_DRV_4_8,
+			     130, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE,
+			     131, UNIPHIER_PIN_DRV_4_8,
+			     131, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     132, UNIPHIER_PIN_DRV_4_8,
+			     132, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE,
+			     133, UNIPHIER_PIN_DRV_4_8,
+			     133, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE,
+			     134, UNIPHIER_PIN_DRV_4_8,
+			     134, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE,
+			     135, UNIPHIER_PIN_DRV_4_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     136, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_8_12_16_20,
+			     137, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_8_12_16_20,
+			     138, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_8_12_16_20,
+			     139, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_8_12_16_20,
+			     141, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_8_12_16_20,
+			     140, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6,
+			     159, UNIPHIER_PIN_DRV_4_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6,
+			     160, UNIPHIER_PIN_DRV_4_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6,
+			     161, UNIPHIER_PIN_DRV_4_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "MDC", 6,
+			     162, UNIPHIER_PIN_DRV_4_8,
+			     162, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(177, "MDIO", 6,
+			     163, UNIPHIER_PIN_DRV_4_8,
+			     163, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6,
+			     164, UNIPHIER_PIN_DRV_4_8,
+			     164, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6,
+			     165, UNIPHIER_PIN_DRV_4_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     166, UNIPHIER_PIN_DRV_4_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     167, UNIPHIER_PIN_DRV_4_8,
+			     167, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     168, UNIPHIER_PIN_DRV_4_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     169, UNIPHIER_PIN_DRV_4_8,
+			     169, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     170, UNIPHIER_PIN_DRV_4_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     171, UNIPHIER_PIN_DRV_4_8,
+			     171, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE,
+			     172, UNIPHIER_PIN_DRV_4_8,
+			     172, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     173, UNIPHIER_PIN_DRV_4_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     174, UNIPHIER_PIN_DRV_4_8,
+			     174, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     175, UNIPHIER_PIN_DRV_4_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE,
+			     176, UNIPHIER_PIN_DRV_4_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE,
+			     177, UNIPHIER_PIN_DRV_4_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE,
+			     178, UNIPHIER_PIN_DRV_4_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE,
+			     179, UNIPHIER_PIN_DRV_4_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE,
+			     180, UNIPHIER_PIN_DRV_4_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE,
+			     181, UNIPHIER_PIN_DRV_4_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE,
+			     182, UNIPHIER_PIN_DRV_4_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE,
+			     183, UNIPHIER_PIN_DRV_4_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE,
+			     184, UNIPHIER_PIN_DRV_4_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE,
+			     185, UNIPHIER_PIN_DRV_4_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE,
+			     186, UNIPHIER_PIN_DRV_4_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE,
+			     192, UNIPHIER_PIN_DRV_NONE,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     203, UNIPHIER_PIN_DRV_4_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     204, UNIPHIER_PIN_DRV_4_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE,
+			     205, UNIPHIER_PIN_DRV_4_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE,
+			     206, UNIPHIER_PIN_DRV_4_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     213, UNIPHIER_PIN_DRV_4_8,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     214, UNIPHIER_PIN_DRV_4_8,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE,
+			     235, UNIPHIER_PIN_DRV_4_8,
+			     235, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE,
+			     236, UNIPHIER_PIN_DRV_4_8,
+			     236, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE,
+			     237, UNIPHIER_PIN_DRV_4_8,
+			     237, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE,
+			     238, UNIPHIER_PIN_DRV_4_8,
+			     238, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE,
+			     239, UNIPHIER_PIN_DRV_4_8,
+			     239, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE,
+			     240, UNIPHIER_PIN_DRV_4_8,
+			     240, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE,
+			     241, UNIPHIER_PIN_DRV_4_8,
+			     241, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE,
+			     242, UNIPHIER_PIN_DRV_4_8,
+			     242, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE,
+			     243, UNIPHIER_PIN_DRV_4_8,
+			     243, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE,
+			     244, UNIPHIER_PIN_DRV_4_8,
+			     244, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE,
+			     245, UNIPHIER_PIN_DRV_4_8,
+			     245, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE,
+			     246, UNIPHIER_PIN_DRV_4_8,
+			     246, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE,
+			     247, UNIPHIER_PIN_DRV_4_8,
+			     247, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE,
+			     248, UNIPHIER_PIN_DRV_4_8,
+			     248, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE,
+			     249, UNIPHIER_PIN_DRV_4_8,
+			     249, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE,
+			     250, UNIPHIER_PIN_DRV_4_8,
+			     250, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE,
+			     251, UNIPHIER_PIN_DRV_4_8,
+			     251, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE,
+			     252, UNIPHIER_PIN_DRV_4_8,
+			     252, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE,
+			     253, UNIPHIER_PIN_DRV_4_8,
+			     253, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE,
+			     254, UNIPHIER_PIN_DRV_4_8,
+			     254, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE,
+			     255, UNIPHIER_PIN_DRV_4_8,
+			     255, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE,
+			     256, UNIPHIER_PIN_DRV_4_8,
+			     256, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE,
+			     257, UNIPHIER_PIN_DRV_4_8,
+			     257, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE,
+			     258, UNIPHIER_PIN_DRV_4_8,
+			     258, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE,
+			     259, UNIPHIER_PIN_DRV_4_8,
+			     259, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE,
+			     260, UNIPHIER_PIN_DRV_4_8,
+			     260, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE,
+			     261, UNIPHIER_PIN_DRV_4_8,
+			     261, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE,
+			     262, UNIPHIER_PIN_DRV_4_8,
+			     262, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE,
+			     263, UNIPHIER_PIN_DRV_4_8,
+			     263, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE,
+			     264, UNIPHIER_PIN_DRV_4_8,
+			     264, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+			     265, UNIPHIER_PIN_DRV_4_8,
+			     265, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     266, UNIPHIER_PIN_DRV_4_8,
+			     266, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     267, UNIPHIER_PIN_DRV_4_8,
+			     267, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     268, UNIPHIER_PIN_DRV_4_8,
+			     268, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     269, UNIPHIER_PIN_DRV_4_8,
+			     269, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     270, UNIPHIER_PIN_DRV_4_8,
+			     270, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     271, UNIPHIER_PIN_DRV_4_8,
+			     271, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     272, UNIPHIER_PIN_DRV_4_8,
+			     272, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     273, UNIPHIER_PIN_DRV_4_8,
+			     273, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     274, UNIPHIER_PIN_DRV_4_8,
+			     274, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     275, UNIPHIER_PIN_DRV_4_8,
+			     275, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     276, UNIPHIER_PIN_DRV_4_8,
+			     276, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE,
+			     277, UNIPHIER_PIN_DRV_4_8,
+			     277, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     278, UNIPHIER_PIN_DRV_4_8,
+			     278, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     279, UNIPHIER_PIN_DRV_4_8,
+			     279, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     280, UNIPHIER_PIN_DRV_4_8,
+			     280, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     281, UNIPHIER_PIN_DRV_4_8,
+			     281, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     282, UNIPHIER_PIN_DRV_4_8,
+			     282, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+			     284, UNIPHIER_PIN_DRV_4_8,
+			     284, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+			     285, UNIPHIER_PIN_DRV_4_8,
+			     285, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+			     286, UNIPHIER_PIN_DRV_4_8,
+			     286, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+			     287, UNIPHIER_PIN_DRV_4_8,
+			     287, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+			     288, UNIPHIER_PIN_DRV_4_8,
+			     288, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     289, UNIPHIER_PIN_DRV_4_8,
+			     289, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     290, UNIPHIER_PIN_DRV_4_8,
+			     290, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+			     291, UNIPHIER_PIN_DRV_4_8,
+			     291, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+			     292, UNIPHIER_PIN_DRV_4_8,
+			     292, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+			     293, UNIPHIER_PIN_DRV_4_8,
+			     293, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+			     294, UNIPHIER_PIN_DRV_4_8,
+			     294, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+			     295, UNIPHIER_PIN_DRV_4_8,
+			     295, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+			     296, UNIPHIER_PIN_DRV_4_8,
+			     296, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+			     297, UNIPHIER_PIN_DRV_4_8,
+			     297, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+			     298, UNIPHIER_PIN_DRV_4_8,
+			     298, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+			     299, UNIPHIER_PIN_DRV_4_8,
+			     299, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE,
+			     300, UNIPHIER_PIN_DRV_4_8,
+			     300, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE,
+			     301, UNIPHIER_PIN_DRV_4_8,
+			     301, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE,
+			     302, UNIPHIER_PIN_DRV_4_8,
+			     302, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     303, UNIPHIER_PIN_DRV_4_8,
+			     303, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     304, UNIPHIER_PIN_DRV_4_8,
+			     304, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     305, UNIPHIER_PIN_DRV_4_8,
+			     305, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     306, UNIPHIER_PIN_DRV_4_8,
+			     306, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE,
+			     307, UNIPHIER_PIN_DRV_4_8,
+			     307, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     308, UNIPHIER_PIN_DRV_4_8,
+			     308, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE,
+			     309, UNIPHIER_PIN_DRV_4_8,
+			     309, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE,
+			     310, UNIPHIER_PIN_DRV_4_8,
+			     310, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE,
+			     311, UNIPHIER_PIN_DRV_4_8,
+			     311, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE,
+			     312, UNIPHIER_PIN_DRV_NONE,
+			     312, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {142, 143};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {144, 145};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {146, 147};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {148, 149};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c6_pins[] = {308, 309};
+static const unsigned i2c6_muxvals[] = {6, 6};
+static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+				     50, 51, 52, 53, 54};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {131, 132};
+static const unsigned nand_cs1_muxvals[] = {1, 1};
+static const unsigned uart0_pins[] = {127, 128};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart1_pins[] = {129, 130};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {131, 132};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {88, 89};
+static const unsigned uart3_muxvals[] = {2, 2};
+static const unsigned usb0_pins[] = {180, 181};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {182, 183};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {184, 185};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	300, 301, 302, 303, 304, 305, 306, 307,		/* PORT0x */
+	308, 309, 310, 311, 312, 313, 314, 315,		/* PORT1x */
+	316, 317, 318, 16, 17, 18, 19, 20,		/* PORT2x */
+	21, 22, 23, 4, 93, 94, 95, 63,			/* PORT3x */
+	123, 122, 124, 125, 126, 141, 202, 203,		/* PORT4x */
+	204, 226, 227, 290, 291, 233, 280, 281,		/* PORT5x */
+	8, 7, 10, 29, 30, 48, 49, 50,			/* PORT6x */
+	40, 41, 42, 43, 44, 45, 46, 47,			/* PORT7x */
+	54, 51, 52, 53, 127, 128, 129, 130,		/* PORT8x */
+	131, 132, 57, 60, 134, 133, 135, 136,		/* PORT9x */
+	138, 137, 140, 139, 64, 65, 66, 67,		/* PORT10x */
+	107, 106, 105, 104, 113, 112, 111, 110,		/* PORT11x */
+	68, 69, 70, 71, 72, 73, 74, 75,			/* PORT12x */
+	76, 77, 78, 79, 80, 81, 82, 83,			/* PORT13x */
+	84, 85, 86, 87, 88, 89, 90, 91,			/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT0x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT1x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT2x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT3x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT4x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT5x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT6x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT7x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT8x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT9x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT10x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT11x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT12x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT13x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	13, 14, 15,					/* PORT175-177 */
+	157, 158, 156, 154, 150, 151, 152, 153,		/* PORT18x */
+	326, 327, 325, 323, 319, 320, 321, 322,		/* PORT19x */
+	160, 161, 162, 163, 164, 165, 166, 167,		/* PORT20x */
+	168, 169, 170, 171, 172, 173, 174, 175,		/* PORT21x */
+	180, 181, 182, 183, 184, 185, 187, 188,		/* PORT22x */
+	193, 194, 195, 196, 197, 198, 199, 200,		/* PORT23x */
+	191, 192, 215, 216, 217, 218, 219, 220,		/* PORT24x */
+	222, 223, 224, 225, 228, 229, 230, 231,		/* PORT25x */
+	282, 283, 284, 285, 286, 287, 288, 289,		/* PORT26x */
+	292, 293, 294, 295, 296, 236, 237, 238,		/* PORT27x */
+	275, 276, 277, 278, 239, 240, 249, 250,		/* PORT28x */
+	251, 252, 261, 262, 263, 264, 273, 274,		/* PORT29x */
+	31, 32, 33, 34, 35, 36, 37, 38,			/* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+	7, 7, 7,					/* PORT175-177 */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT18x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT19x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT20x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT21x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT22x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT23x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT24x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT25x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT26x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT27x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT28x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT29x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+	11, 9, 12, 96, 97, 98, 108, 114,		/* XIRQ0-7 */
+	234, 186, 99, 100, 101, 102, 184, 301,		/* XIRQ8-15 */
+	302, 303, 304, 305, 306,			/* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+	7, 7, 7, 7, 7, 7, 7, 7,				/* XIRQ0-7 */
+	7, 7, 7, 7, 7, 7, 2, 2,				/* XIRQ8-15 */
+	2, 2, 2, 2, 2,					/* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+	184, 310, 316,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+	2, 2, 2,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 2),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-174 missing */
+	/* none */ "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+	"port290", "port291", "port292", "port293",
+	"port294", "port295", "port296", "port297",
+	"port300", "port301", "port302", "port303",
+	"port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20",
+	"xirq14b", "xirq17b", "xirq18b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro4_pindata = {
+	.groups = ph1_pro4_groups,
+	.groups_count = ARRAY_SIZE(ph1_pro4_groups),
+	.functions = ph1_pro4_functions,
+	.functions_count = ARRAY_SIZE(ph1_pro4_functions),
+	.mux_bits = 4,
+	.reg_stride = 8,
+	.load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro4_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_pro4_pins,
+	.npins = ARRAY_SIZE(ph1_pro4_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_pro4_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_pro4_pinctrl_desc,
+				      &ph1_pro4_pindata);
+}
+
+static const struct of_device_id ph1_pro4_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-pro4-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match);
+
+static struct platform_driver ph1_pro4_pinctrl_driver = {
+	.probe = ph1_pro4_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_pro4_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_pro4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
new file mode 100644
index 0000000..9af4559
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
@@ -0,0 +1,1351 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro5-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro5_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "AEXCKA1", 0,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "AEXCKA2", 0,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "CK27EXI", 0,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "CK54EXI", 0,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(13, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(14, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(15, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(16, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(18, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(19, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(20, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(33, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(34, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(35, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(40, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(41, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(42, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(43, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(44, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "TXD0", 0,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "RXD0", 0,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(49, "TXD1", 0,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(50, "RXD1", 0,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(53, "TXD3", 0,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(54, "RXD3", 0,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(55, "MS0CS0", 0,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "MS0DO", 0,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "MS0DI", 0,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "MS0CLK", 0,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "CSCLK", 0,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "CSBPTM", 0,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "CSBMTP", 0,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "XCINTP", 0,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "XCINTM", 0,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "XCMPREQ", 0,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "XSRST", 0,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "PWMA", 0,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "XIRQ0", 0,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "XIRQ1", 0,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "XIRQ2", 0,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "XIRQ3", 0,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "XIRQ4", 0,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "XIRQ5", 0,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "XIRQ6", 0,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "XIRQ7", 0,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "XIRQ8", 0,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "XIRQ9", 0,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "XIRQ10", 0,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "XIRQ11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "XIRQ12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "XIRQ13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "XIRQ14", 0,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "XIRQ15", 0,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "XIRQ16", 0,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "XIRQ19", 0,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "XIRQ20", 0,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "PORT00", 0,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "PORT01", 0,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "PORT02", 0,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "PORT03", 0,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "PORT04", 0,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "PORT05", 0,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "PORT06", 0,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "PORT07", 0,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "PORT10", 0,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "PORT11", 0,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "PORT12", 0,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "PORT13", 0,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "PORT14", 0,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "PORT15", 0,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "PORT16", 0,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "PORT17", 0,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "T0HPD", 0,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "T1HPD", 0,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "R0HPD", 0,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "R1HPD", 0,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "XPERST", 0,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(110, "XPEWAKE", 0,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "XPECLKRQ", 0,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(118, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     118, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(119, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(120, "SPISYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "SPISCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "SPITXD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "SPIRXD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(126, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(128, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(130, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "CH7CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "CH7PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "CH7VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "CH7DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "AI1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "AI1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "AI1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "AI3ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "AI3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "AI3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "AI3D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     196, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(197, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     197, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(198, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "AO2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "AO2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "AO2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "AO4DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "AO4BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "AO4LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "AO4DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "VI1C0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "VI1C1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "VI1C2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "VI1C3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "VI1C4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "VI1C5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "VI1C6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "VI1C7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "VI1C8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "VI1C9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "VI1Y0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "VI1Y1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "VI1Y2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "VI1Y3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "VI1Y4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "VI1Y5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "VI1Y6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "VI1Y7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "VI1Y8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "VI1Y9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(235, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     235, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(236, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     236, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(237, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     237, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(238, "VO1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     238, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(239, "VO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     239, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(240, "VO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     240, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(241, "VO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     241, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(242, "VO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     242, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(243, "VO1D4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     243, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(244, "VO1D5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     244, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(245, "VO1D6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     245, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(246, "VO1D7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     246, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(247, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     247, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(248, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     248, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(249, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     249, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(250, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(251, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(252, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(253, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(254, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(255, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const unsigned i2c0_pins[] = {112, 113};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {114, 115};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {116, 117};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c5_pins[] = {87, 88};
+static const unsigned i2c5_muxvals[] = {2, 2};
+static const unsigned i2c5b_pins[] = {196, 197};
+static const unsigned i2c5b_muxvals[] = {2, 2};
+static const unsigned i2c5c_pins[] = {215, 216};
+static const unsigned i2c5c_muxvals[] = {2, 2};
+static const unsigned i2c6_pins[] = {101, 102};
+static const unsigned i2c6_muxvals[] = {2, 2};
+static const unsigned nand_pins[] = {19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
+				     31, 32, 33, 34, 35};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {26, 27};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {47, 48};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart0b_pins[] = {227, 228};
+static const unsigned uart0b_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {49, 50};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {51, 52};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {53, 54};
+static const unsigned uart3_muxvals[] = {0, 0};
+static const unsigned usb0_pins[] = {124, 125};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {126, 127};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {128, 129};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	89, 90, 91, 92, 93, 94, 95, 96,			/* PORT0x */
+	97, 98, 99, 100, 101, 102, 103, 104,		/* PORT1x */
+	251, 252, 253, 254, 255, 247, 248, 249,		/* PORT2x */
+	39, 40, 41, 42, 43, 44, 45, 46,			/* PORT3x */
+	156, 157, 158, 159, 160, 161, 162, 163,		/* PORT4x */
+	164, 165, 166, 167, 168, 169, 170, 171,		/* PORT5x */
+	190, 191, 192, 193, 194, 195, 196, 197,		/* PORT6x */
+	198, 199, 200, 201, 202, 203, 204, 205,		/* PORT7x */
+	120, 121, 122, 123, 55, 56, 57, 58,		/* PORT8x */
+	124, 125, 126, 127, 49, 50, 53, 54,		/* PORT9x */
+	148, 149, 150, 151, 152, 153, 154, 155,		/* PORT10x */
+	133, 134, 131, 130, 138, 139, 136, 135,		/* PORT11x */
+	28, 29, 30, 31, 32, 33, 34, 35,			/* PORT12x */
+	179, 180, 181, 182, 186, 187, 188, 189,		/* PORT13x */
+	4, 5, 6, 7, 8, 9, 10, 11,			/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	109, 110, 111,					/* PORT175-177 */
+	206, 207, 208, 209, 210, 211, 212, 213,		/* PORT18x */
+	12, 13, 14, 15, 16, 17, 107, 108,		/* PORT19x */
+	140, 141, 142, 143, 144, 145, 146, 147,		/* PORT20x */
+	59, 60, 61, 62, 63, 64, 65, 66,			/* PORT21x */
+	214, 215, 216, 217, 218, 219, 220, 221,		/* PORT22x */
+	222, 223, 224, 225, 226, 227, 228, 229,		/* PORT23x */
+	19, 20, 21, 22, 23, 24, 25, 26,			/* PORT24x */
+	230, 231, 232, 233, 234, 235, 236, 237,		/* PORT25x */
+	239, 240, 241, 242, 243, 244, 245, 246,		/* PORT26x */
+	172, 173, 174, 175, 176, 177, 178, 129,		/* PORT27x */
+	0, 1, 2, 67, 85, 86, 87, 88,			/* PORT28x */
+	105, 106, 18, 27, 36, 128, 132, 137,		/* PORT29x */
+	183, 184, 185, 84, 47, 48, 51, 52,		/* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15,					/* PORT175-177 */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT29x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+	68, 69, 70, 71, 72, 73, 74, 75,			/* XIRQ0-7 */
+	76, 77, 78, 79, 80, 81, 82, 83,			/* XIRQ8-15 */
+	84, 85, 86, 87, 88,				/* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14,				/* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+	91, 92, 239, 144, 240, 156, 241, 106, 128,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14, 14,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c5),
+	UNIPHIER_PINCTRL_GROUP(i2c5b),
+	UNIPHIER_PINCTRL_GROUP(i2c5c),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17c, xirq_alternatives, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18c, xirq_alternatives, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 8),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-174 missing */
+	/* none */ "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+	"port290", "port291", "port292", "port293",
+	"port294", "port295", "port296", "port297",
+	"port300", "port301", "port302", "port303",
+	"port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20",
+	"xirq3b", "xirq4b", "xirq16b", "xirq17b", "xirq17c",
+	"xirq18b", "xirq18c", "xirq19b", "xirq20b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c5),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro5_pindata = {
+	.groups = ph1_pro5_groups,
+	.groups_count = ARRAY_SIZE(ph1_pro5_groups),
+	.functions = ph1_pro5_functions,
+	.functions_count = ARRAY_SIZE(ph1_pro5_functions),
+	.mux_bits = 4,
+	.reg_stride = 8,
+	.load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro5_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_pro5_pins,
+	.npins = ARRAY_SIZE(ph1_pro5_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_pro5_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_pro5_pinctrl_desc,
+				      &ph1_pro5_pindata);
+}
+
+static const struct of_device_id ph1_pro5_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-pro5-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match);
+
+static struct platform_driver ph1_pro5_pinctrl_driver = {
+	.probe = ph1_pro5_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_pro5_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_pro5_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro5 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
new file mode 100644
index 0000000..7e9dae5
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-sld8-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(16, "XNFWE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "NFALE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "NFCLE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "XNFWP_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "XNFCE0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "NANDRYBY0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(22, "XNFCE1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(23, "NANDRYBY1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     120, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(24, "NFD0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     121, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "NFD1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     122, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "NFD2_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     123, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "NFD3_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     124, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "NFD4_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_8_12_16_20,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "NFD5_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_8_12_16_20,
+			     126, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "NFD6_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_8_12_16_20,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_8_12_16_20,
+			     128, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {15, 16, 17, 18, 19, 20, 21, 24, 25, 26,
+				     27, 28, 29, 30, 31};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {70, 71};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {114, 115};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {112, 113};
+static const unsigned uart2_muxvals[] = {1, 1};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {41, 42};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {43, 44};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {114, 115};
+static const unsigned usb2_muxvals[] = {1, 1};
+static const unsigned port_range0_pins[] = {
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT0x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT1x */
+	32, 33, 34, 35, 36, 37, 38, 39,			/* PORT2x */
+	59, 60, 61, 62, 63, 64, 65, 66,			/* PORT3x */
+	95, 96, 97, 98, 99, 100, 101, 57,		/* PORT4x */
+	70, 71, 72, 73, 74, 75, 76, 77,			/* PORT5x */
+	81, 83, 84, 85, 86, 89, 90, 91,			/* PORT6x */
+	118, 119, 120, 121, 122, 53, 54, 55,		/* PORT7x */
+	41, 42, 43, 44, 79, 80, 18, 19,			/* PORT8x */
+	110, 111, 112, 113, 114, 115, 16, 17,		/* PORT9x */
+	40, 67, 68, 69, 78, 92, 93, 94,			/* PORT10x */
+	48, 49, 46, 45, 123, 124, 125, 126,		/* PORT11x */
+	47, 127, 20, 56, 22,				/* PORT120-124 */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15,				/* PORT120-124 */
+};
+static const unsigned port_range1_pins[] = {
+	116, 117,					/* PORT130-131 */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15,						/* PORT130-131 */
+};
+static const unsigned port_range2_pins[] = {
+	102, 103, 104, 105, 106, 107, 108, 109,		/* PORT14x */
+};
+static const unsigned port_range2_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range3_pins[] = {
+	23,						/* PORT166 */
+};
+static const unsigned port_range3_muxvals[] = {
+	15,						/* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+	128, 129, 130, 131, 132, 133, 134, 135,		/* XIRQ0-7 */
+	82, 87, 88, 50, 51,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,				/* XIRQ0-7 */
+	14, 14, 14, 14, 14,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+	52, 58,						/* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+	14, 14,						/* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range2, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range2, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range2, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range2, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range2, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range2, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range2, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range2, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range3, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-164 missing */
+	/* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_sld8_pindata = {
+	.groups = ph1_sld8_groups,
+	.groups_count = ARRAY_SIZE(ph1_sld8_groups),
+	.functions = ph1_sld8_functions,
+	.functions_count = ARRAY_SIZE(ph1_sld8_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_sld8_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_sld8_pins,
+	.npins = ARRAY_SIZE(ph1_sld8_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_sld8_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_sld8_pinctrl_desc,
+				      &ph1_sld8_pindata);
+}
+
+static const struct of_device_id ph1_sld8_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-sld8-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match);
+
+static struct platform_driver ph1_sld8_pinctrl_driver = {
+	.probe = ph1_sld8_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_sld8_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_sld8_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-sLD8 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
new file mode 100644
index 0000000..3f036e2
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "proxstream2-pinctrl"
+
+static const struct pinctrl_pin_desc proxstream2_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "SMTCLK0CG", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(25, "SMTCLK1CG", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(26, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(27, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(28, "XIRQ19", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "XIRQ20", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "XIRQ9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "XIRQ10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "XIRQ16", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "STS0CLKO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "STS0SYNCO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "STS0VALO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "STS0DATAO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "PORT163", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "PORT165", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "PORT166", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "PORT132", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "PORT133", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(113, "TXD2", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "RXD2", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "PORT190", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "VI1G2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "VI1G3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "VI1G4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "VI1G5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "VI1G6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "VI1G7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "VI1G8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "VI1G9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "VI1R2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "VI1R3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "VI1R4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "VI1R5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "VI1R6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "VI1R7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "VI1R8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "VI1R9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "PORT222", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "PORT223", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "PORT224", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "PORT225", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "PORT226", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "PORT227", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "PORT230", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "FANPWM", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "HRDDCSDA0", 0,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "HRDDCSCL0", 0,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "HRDDCSDA1", 0,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "HRDDCSCL1", 0,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "HTDDCSDA0", 0,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "HTDDCSCL0", 0,
+			     192, UNIPHIER_PIN_DRV_4_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "HTDDCSDA1", 0,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "HTDDCSCL1", 0,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "PORT241", 0,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "PORT242", 0,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "PORT243", 0,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "MVSYNC", 0,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "SPISYNC0", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "CK54EXI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "AEXCKA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "AEXCKA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "CK27EXI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "STCDIN", 0,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "PHSYNI", 0,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "PVSYNI", 0,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "MVSYN", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "STCV", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "PORT262", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "USB0VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "USB1VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "PORT265", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "CK25O", 0,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "TXD0", 0,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "RXD0", 0,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "TXD3", 0,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "RXD3", 0,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "PORT273", 0,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "STCDOUTC", 0,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "PORT274", 0,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "PORT275", 0,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "PORT276", 0,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "PORT277", 0,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "PORT280", 0,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "PORT281", 0,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "PORT282", 0,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "PORT283", 0,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "PORT284", 0,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "PORT285", 0,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "T0HPD", 0,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "T1HPD", 0,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {9, 9, 9, 9};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {8, 8};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {8, 8};
+static const unsigned i2c2_pins[] = {171, 172};
+static const unsigned i2c2_muxvals[] = {8, 8};
+static const unsigned i2c3_pins[] = {159, 160};
+static const unsigned i2c3_muxvals[] = {8, 8};
+static const unsigned i2c5_pins[] = {183, 184};
+static const unsigned i2c5_muxvals[] = {11, 11};
+static const unsigned i2c6_pins[] = {185, 186};
+static const unsigned i2c6_muxvals[] = {11, 11};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+				     42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+					8, 8};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {8, 8};
+static const unsigned uart0_pins[] = {217, 218};
+static const unsigned uart0_muxvals[] = {8, 8};
+static const unsigned uart0b_pins[] = {179, 180};
+static const unsigned uart0b_muxvals[] = {10, 10};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {8, 8};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {8, 8};
+static const unsigned uart3_pins[] = {219, 220};
+static const unsigned uart3_muxvals[] = {8, 8};
+static const unsigned uart3b_pins[] = {181, 182};
+static const unsigned uart3b_muxvals[] = {10, 10};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {8, 8};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {8, 8};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {8, 8};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {8, 8};
+static const unsigned port_range0_pins[] = {
+	127, 128, 129, 130, 131, 132, 133, 134,		/* PORT0x */
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT1x */
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT2x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT3x */
+	16, 17, 18, 19, 21, 22, 23, 24,			/* PORT4x */
+	25, 30, 31, 32, 33, 34, 35, 36,			/* PORT5x */
+	37, 38, 39, 40, 41, 42, 43, 44,			/* PORT6x */
+	45, 46, 47, 48, 49, 50, 51, 52,			/* PORT7x */
+	53, 54, 55, 56, 57, 58, 59, 60,			/* PORT8x */
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT9x */
+	69, 70, 71, 76, 77, 78, 79, 80,			/* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+	81, 82, 83, 84, 85, 86, 87, 88,			/* PORT12x */
+	89, 90, 95, 96, 97, 98, 99, 100,		/* PORT13x */
+	101, 102, 103, 104, 105, 106, 107, 108,		/* PORT14x */
+	118, 119, 120, 121, 122, 123, 124, 125,		/* PORT15x */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* PORT16x */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* PORT17x */
+	109, 110, 111, 112, 113, 114, 115, 116,		/* PORT18x */
+	117, 143, 144, 145, 146, 147, 148, 149,		/* PORT19x */
+	150, 151, 152, 153, 154, 155, 156, 157,		/* PORT20x */
+	158, 159, 160, 161, 162, 163, 164, 165,		/* PORT21x */
+	166, 178, 179, 180, 181, 182, 183, 184,		/* PORT22x */
+	185, 187, 188, 189, 190, 191, 192, 193,		/* PORT23x */
+	194, 195, 196, 197, 198, 199, 200, 201,		/* PORT24x */
+	202, 203, 204, 205, 206, 207, 208, 209,		/* PORT25x */
+	210, 211, 212, 213, 214, 215, 216, 217,		/* PORT26x */
+	218, 219, 220, 221, 223, 224, 225, 226,		/* PORT27x */
+	227, 228, 229, 230, 231, 232, 233, 234,		/* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT15x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT16x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT17x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+	118, 119, 120, 121, 122, 123, 124, 125,		/* XIRQ0-7 */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* XIRQ8-15 */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group proxstream2_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c5),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(uart3b),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3", "uart3b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	/* port110-117 missing */
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	"port150", "port151", "port152", "port153",
+	"port154", "port155", "port156", "port157",
+	"port160", "port161", "port162", "port163",
+	"port164", "port165", "port166", "port167",
+	"port170", "port171", "port172", "port173",
+	"port174", "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function proxstream2_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c5),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata proxstream2_pindata = {
+	.groups = proxstream2_groups,
+	.groups_count = ARRAY_SIZE(proxstream2_groups),
+	.functions = proxstream2_functions,
+	.functions_count = ARRAY_SIZE(proxstream2_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc proxstream2_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = proxstream2_pins,
+	.npins = ARRAY_SIZE(proxstream2_pins),
+	.owner = THIS_MODULE,
+};
+
+static int proxstream2_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &proxstream2_pinctrl_desc,
+				      &proxstream2_pindata);
+}
+
+static const struct of_device_id proxstream2_pinctrl_match[] = {
+	{ .compatible = "socionext,proxstream2-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match);
+
+static struct platform_driver proxstream2_pinctrl_driver = {
+	.probe = proxstream2_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = proxstream2_pinctrl_match,
+	},
+};
+module_platform_driver(proxstream2_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier ProXstream2 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
new file mode 100644
index 0000000..918f3b6
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/mfd/syscon.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-uniphier.h"
+
+struct uniphier_pinctrl_priv {
+	struct pinctrl_dev *pctldev;
+	struct regmap *regmap;
+	struct uniphier_pinctrl_socdata *socdata;
+};
+
+static int uniphier_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->groups_count;
+}
+
+static const char *uniphier_pctl_get_group_name(struct pinctrl_dev *pctldev,
+						unsigned selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->groups[selector].name;
+}
+
+static int uniphier_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+					unsigned selector,
+					const unsigned **pins,
+					unsigned *num_pins)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = priv->socdata->groups[selector].pins;
+	*num_pins = priv->socdata->groups[selector].num_pins;
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+				       struct seq_file *s, unsigned offset)
+{
+	const struct pinctrl_pin_desc *pin = &pctldev->desc->pins[offset];
+	const char *pull_dir, *drv_str;
+
+	switch (uniphier_pin_get_pull_dir(pin->drv_data)) {
+	case UNIPHIER_PIN_PULL_UP:
+		pull_dir = "UP";
+		break;
+	case UNIPHIER_PIN_PULL_DOWN:
+		pull_dir = "DOWN";
+		break;
+	case UNIPHIER_PIN_PULL_NONE:
+		pull_dir = "NONE";
+		break;
+	default:
+		BUG();
+	}
+
+	switch (uniphier_pin_get_drv_str(pin->drv_data)) {
+	case UNIPHIER_PIN_DRV_4_8:
+		drv_str = "4/8(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		drv_str = "8/12/16/20(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_4:
+		drv_str = "4(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_5:
+		drv_str = "5(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_8:
+		drv_str = "8(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_NONE:
+		drv_str = "NONE";
+		break;
+	default:
+		BUG();
+	}
+
+	seq_printf(s, " PULL_DIR=%s  DRV_STR=%s", pull_dir, drv_str);
+}
+#endif
+
+static const struct pinctrl_ops uniphier_pctlops = {
+	.get_groups_count = uniphier_pctl_get_groups_count,
+	.get_group_name = uniphier_pctl_get_group_name,
+	.get_group_pins = uniphier_pctl_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+	.pin_dbg_show = uniphier_pctl_pin_dbg_show,
+#endif
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+	.dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
+				      const struct pinctrl_pin_desc *pin,
+				      enum pin_config_param param)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_pull_dir pull_dir =
+				uniphier_pin_get_pull_dir(pin->drv_data);
+	unsigned int pupdctrl, reg, shift, val;
+	unsigned int expected = 1;
+	int ret;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+			return 0;
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+		    pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+			return -EINVAL;
+		expected = 0;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_UP)
+			return -EINVAL;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_DOWN)
+			return -EINVAL;
+		break;
+	default:
+		BUG();
+	}
+
+	pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+	reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+	shift = pupdctrl % 32;
+
+	ret = regmap_read(priv->regmap, reg, &val);
+	if (ret)
+		return ret;
+
+	val = (val >> shift) & 1;
+
+	return (val == expected) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
+				       const struct pinctrl_pin_desc *pin,
+				       u16 *strength)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_drv_str drv_str =
+				uniphier_pin_get_drv_str(pin->drv_data);
+	const unsigned int strength_4_8[] = {4, 8};
+	const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20};
+	const unsigned int *supported_strength;
+	unsigned int drvctrl, reg, shift, mask, width, val;
+	int ret;
+
+	switch (drv_str) {
+	case UNIPHIER_PIN_DRV_4_8:
+		supported_strength = strength_4_8;
+		width = 1;
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		supported_strength = strength_8_12_16_20;
+		width = 2;
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_4:
+		*strength = 4;
+		return 0;
+	case UNIPHIER_PIN_DRV_FIXED_5:
+		*strength = 5;
+		return 0;
+	case UNIPHIER_PIN_DRV_FIXED_8:
+		*strength = 8;
+		return 0;
+	default:
+		/* drive strength control is not supported for this pin */
+		return -EINVAL;
+	}
+
+	drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+	drvctrl *= width;
+
+	reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+			     UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+	reg += drvctrl / 32 * 4;
+	shift = drvctrl % 32;
+	mask = (1U << width) - 1;
+
+	ret = regmap_read(priv->regmap, reg, &val);
+	if (ret)
+		return ret;
+
+	*strength = supported_strength[(val >> shift) & mask];
+
+	return 0;
+}
+
+static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev,
+					const struct pinctrl_pin_desc *pin)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+	unsigned int val;
+	int ret;
+
+	if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+		/* This pin is always input-enabled. */
+		return 0;
+
+	ret = regmap_read(priv->regmap, UNIPHIER_PINCTRL_IECTRL, &val);
+	if (ret)
+		return ret;
+
+	return val & BIT(iectrl) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
+					unsigned pin,
+					unsigned long *configs)
+{
+	const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+	enum pin_config_param param = pinconf_to_config_param(*configs);
+	bool has_arg = false;
+	u16 arg;
+	int ret;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+	case PIN_CONFIG_BIAS_PULL_UP:
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		ret = uniphier_conf_pin_bias_get(pctldev, pin_desc, param);
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		ret = uniphier_conf_pin_drive_get(pctldev, pin_desc, &arg);
+		has_arg = true;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		ret = uniphier_conf_pin_input_enable_get(pctldev, pin_desc);
+		break;
+	default:
+		/* unsupported parameter */
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret == 0 && has_arg)
+		*configs = pinconf_to_config_packed(param, arg);
+
+	return ret;
+}
+
+static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
+				      const struct pinctrl_pin_desc *pin,
+				      enum pin_config_param param,
+				      u16 arg)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_pull_dir pull_dir =
+				uniphier_pin_get_pull_dir(pin->drv_data);
+	unsigned int pupdctrl, reg, shift;
+	unsigned int val = 1;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+			return 0;
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+		    pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED) {
+			dev_err(pctldev->dev,
+				"can not disable pull register for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		val = 0;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED && arg != 0)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_UP) {
+			dev_err(pctldev->dev,
+				"pull-up is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		if (arg == 0) {
+			dev_err(pctldev->dev, "pull-up can not be total\n");
+			return -EINVAL;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED && arg != 0)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_DOWN) {
+			dev_err(pctldev->dev,
+				"pull-down is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		if (arg == 0) {
+			dev_err(pctldev->dev, "pull-down can not be total\n");
+			return -EINVAL;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE) {
+			dev_err(pctldev->dev,
+				"pull-up/down is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+
+		if (arg == 0)
+			return 0; /* configuration ingored */
+		break;
+	default:
+		BUG();
+	}
+
+	pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+	reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+	shift = pupdctrl % 32;
+
+	return regmap_update_bits(priv->regmap, reg, 1 << shift, val << shift);
+}
+
+static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
+				       const struct pinctrl_pin_desc *pin,
+				       u16 strength)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_drv_str drv_str =
+				uniphier_pin_get_drv_str(pin->drv_data);
+	const unsigned int strength_4_8[] = {4, 8, -1};
+	const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20, -1};
+	const unsigned int *supported_strength;
+	unsigned int drvctrl, reg, shift, mask, width, val;
+
+	switch (drv_str) {
+	case UNIPHIER_PIN_DRV_4_8:
+		supported_strength = strength_4_8;
+		width = 1;
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		supported_strength = strength_8_12_16_20;
+		width = 2;
+		break;
+	default:
+		dev_err(pctldev->dev,
+			"cannot change drive strength for pin %u (%s)\n",
+			pin->number, pin->name);
+		return -EINVAL;
+	}
+
+	for (val = 0; supported_strength[val] > 0; val++) {
+		if (supported_strength[val] > strength)
+			break;
+	}
+
+	if (val == 0) {
+		dev_err(pctldev->dev,
+			"unsupported drive strength %u mA for pin %u (%s)\n",
+			strength, pin->number, pin->name);
+		return -EINVAL;
+	}
+
+	val--;
+
+	drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+	drvctrl *= width;
+
+	reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+			     UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+	reg += drvctrl / 32 * 4;
+	shift = drvctrl % 32;
+	mask = (1U << width) - 1;
+
+	return regmap_update_bits(priv->regmap, reg,
+				  mask << shift, val << shift);
+}
+
+static int uniphier_conf_pin_input_enable(struct pinctrl_dev *pctldev,
+					  const struct pinctrl_pin_desc *pin,
+					  u16 enable)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+
+	if (enable == 0) {
+		/*
+		 * Multiple pins share one input enable, so per-pin disabling
+		 * is impossible.
+		 */
+		dev_err(pctldev->dev, "unable to disable input\n");
+		return -EINVAL;
+	}
+
+	if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+		/* This pin is always input-enabled. nothing to do. */
+		return 0;
+
+	return regmap_update_bits(priv->regmap, UNIPHIER_PINCTRL_IECTRL,
+				  BIT(iectrl), BIT(iectrl));
+}
+
+static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
+					unsigned pin,
+					unsigned long *configs,
+					unsigned num_configs)
+{
+	const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+	int i, ret;
+
+	for (i = 0; i < num_configs; i++) {
+		enum pin_config_param param =
+					pinconf_to_config_param(configs[i]);
+		u16 arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+		case PIN_CONFIG_BIAS_PULL_UP:
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+		case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+			ret = uniphier_conf_pin_bias_set(pctldev, pin_desc,
+							 param, arg);
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			ret = uniphier_conf_pin_drive_set(pctldev, pin_desc,
+							  arg);
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			ret = uniphier_conf_pin_input_enable(pctldev,
+							     pin_desc, arg);
+			break;
+		default:
+			dev_err(pctldev->dev,
+				"unsupported configuration parameter %u\n",
+				param);
+			return -EINVAL;
+		}
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+					      unsigned selector,
+					      unsigned long *configs,
+					      unsigned num_configs)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const unsigned *pins = priv->socdata->groups[selector].pins;
+	unsigned num_pins = priv->socdata->groups[selector].num_pins;
+	int i, ret;
+
+	for (i = 0; i < num_pins; i++) {
+		ret = uniphier_conf_pin_config_set(pctldev, pins[i],
+						   configs, num_configs);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops uniphier_confops = {
+	.is_generic = true,
+	.pin_config_get = uniphier_conf_pin_config_get,
+	.pin_config_set = uniphier_conf_pin_config_set,
+	.pin_config_group_set = uniphier_conf_pin_config_group_set,
+};
+
+static int uniphier_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->functions_count;
+}
+
+static const char *uniphier_pmx_get_function_name(struct pinctrl_dev *pctldev,
+						  unsigned selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->functions[selector].name;
+}
+
+static int uniphier_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+					    unsigned selector,
+					    const char * const **groups,
+					    unsigned *num_groups)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = priv->socdata->functions[selector].groups;
+	*num_groups = priv->socdata->functions[selector].num_groups;
+
+	return 0;
+}
+
+static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
+				    unsigned muxval)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned mux_bits = priv->socdata->mux_bits;
+	unsigned reg_stride = priv->socdata->reg_stride;
+	unsigned reg, reg_end, shift, mask;
+	int ret;
+
+	reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
+	reg_end = reg + reg_stride;
+	shift = pin * mux_bits % 32;
+	mask = (1U << mux_bits) - 1;
+
+	/*
+	 * If reg_stride is greater than 4, the MSB of each pinsel shall be
+	 * stored in the offset+4.
+	 */
+	for (; reg < reg_end; reg += 4) {
+		ret = regmap_update_bits(priv->regmap, reg,
+					 mask << shift, muxval << shift);
+		if (ret)
+			return ret;
+		muxval >>= mux_bits;
+	}
+
+	if (priv->socdata->load_pinctrl) {
+		ret = regmap_write(priv->regmap,
+				   UNIPHIER_PINCTRL_LOAD_PINMUX, 1);
+		if (ret)
+			return ret;
+	}
+
+	/* some pins need input-enabling */
+	return uniphier_conf_pin_input_enable(pctldev,
+					      &pctldev->desc->pins[pin], 1);
+}
+
+static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev,
+				unsigned func_selector,
+				unsigned group_selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const struct uniphier_pinctrl_group *grp =
+					&priv->socdata->groups[group_selector];
+	int i;
+	int ret;
+
+	for (i = 0; i < grp->num_pins; i++) {
+		ret = uniphier_pmx_set_one_mux(pctldev, grp->pins[i],
+					       grp->muxvals[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+					    struct pinctrl_gpio_range *range,
+					    unsigned offset)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const struct uniphier_pinctrl_group *groups = priv->socdata->groups;
+	int groups_count = priv->socdata->groups_count;
+	enum uniphier_pinmux_gpio_range_type range_type;
+	int i, j;
+
+	if (strstr(range->name, "irq"))
+		range_type = UNIPHIER_PINMUX_GPIO_RANGE_IRQ;
+	else
+		range_type = UNIPHIER_PINMUX_GPIO_RANGE_PORT;
+
+	for (i = 0; i < groups_count; i++) {
+		if (groups[i].range_type != range_type)
+			continue;
+
+		for (j = 0; j < groups[i].num_pins; j++)
+			if (groups[i].pins[j] == offset)
+				goto found;
+	}
+
+	dev_err(pctldev->dev, "pin %u does not support GPIO\n", offset);
+	return -EINVAL;
+
+found:
+	return uniphier_pmx_set_one_mux(pctldev, offset, groups[i].muxvals[j]);
+}
+
+static const struct pinmux_ops uniphier_pmxops = {
+	.get_functions_count = uniphier_pmx_get_functions_count,
+	.get_function_name = uniphier_pmx_get_function_name,
+	.get_function_groups = uniphier_pmx_get_function_groups,
+	.set_mux = uniphier_pmx_set_mux,
+	.gpio_request_enable = uniphier_pmx_gpio_request_enable,
+	.strict = true,
+};
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+			   struct pinctrl_desc *desc,
+			   struct uniphier_pinctrl_socdata *socdata)
+{
+	struct device *dev = &pdev->dev;
+	struct uniphier_pinctrl_priv *priv;
+
+	if (!socdata ||
+	    !socdata->groups ||
+	    !socdata->groups_count ||
+	    !socdata->functions ||
+	    !socdata->functions_count ||
+	    !socdata->mux_bits ||
+	    !socdata->reg_stride) {
+		dev_err(dev, "pinctrl socdata lacks necessary members\n");
+		return -EINVAL;
+	}
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = syscon_node_to_regmap(dev->of_node);
+	if (IS_ERR(priv->regmap)) {
+		dev_err(dev, "failed to get regmap\n");
+		return PTR_ERR(priv->regmap);
+	}
+
+	priv->socdata = socdata;
+	desc->pctlops = &uniphier_pctlops;
+	desc->pmxops = &uniphier_pmxops;
+	desc->confops = &uniphier_confops;
+
+	priv->pctldev = pinctrl_register(desc, dev, priv);
+	if (IS_ERR(priv->pctldev)) {
+		dev_err(dev, "failed to register UniPhier pinctrl driver\n");
+		return PTR_ERR(priv->pctldev);
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_probe);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev)
+{
+	struct uniphier_pinctrl_priv *priv = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(priv->pctldev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_remove);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
new file mode 100644
index 0000000..e1e98b8
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_UNIPHIER_H__
+#define __PINCTRL_UNIPHIER_H__
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define UNIPHIER_PINCTRL_PINMUX_BASE	0x0
+#define UNIPHIER_PINCTRL_LOAD_PINMUX	0x700
+#define UNIPHIER_PINCTRL_DRVCTRL_BASE	0x800
+#define UNIPHIER_PINCTRL_DRV2CTRL_BASE	0x900
+#define UNIPHIER_PINCTRL_PUPDCTRL_BASE	0xa00
+#define UNIPHIER_PINCTRL_IECTRL		0xd00
+
+/* input enable control register bit */
+#define UNIPHIER_PIN_IECTRL_SHIFT	0
+#define UNIPHIER_PIN_IECTRL_BITS	8
+#define UNIPHIER_PIN_IECTRL_MASK	((1UL << (UNIPHIER_PIN_IECTRL_BITS)) \
+					 - 1)
+
+/* drive strength control register number */
+#define UNIPHIER_PIN_DRVCTRL_SHIFT	((UNIPHIER_PIN_IECTRL_SHIFT) + \
+					(UNIPHIER_PIN_IECTRL_BITS))
+#define UNIPHIER_PIN_DRVCTRL_BITS	9
+#define UNIPHIER_PIN_DRVCTRL_MASK	((1UL << (UNIPHIER_PIN_DRVCTRL_BITS)) \
+					 - 1)
+
+/* supported drive strength (mA) */
+#define UNIPHIER_PIN_DRV_STR_SHIFT	((UNIPHIER_PIN_DRVCTRL_SHIFT) + \
+					 (UNIPHIER_PIN_DRVCTRL_BITS))
+#define UNIPHIER_PIN_DRV_STR_BITS	3
+#define UNIPHIER_PIN_DRV_STR_MASK	((1UL << (UNIPHIER_PIN_DRV_STR_BITS)) \
+					 - 1)
+
+/* pull-up / pull-down register number */
+#define UNIPHIER_PIN_PUPDCTRL_SHIFT	((UNIPHIER_PIN_DRV_STR_SHIFT) + \
+					 (UNIPHIER_PIN_DRV_STR_BITS))
+#define UNIPHIER_PIN_PUPDCTRL_BITS	9
+#define UNIPHIER_PIN_PUPDCTRL_MASK	((1UL << (UNIPHIER_PIN_PUPDCTRL_BITS))\
+					 - 1)
+
+/* direction of pull register */
+#define UNIPHIER_PIN_PULL_DIR_SHIFT	((UNIPHIER_PIN_PUPDCTRL_SHIFT) + \
+					 (UNIPHIER_PIN_PUPDCTRL_BITS))
+#define UNIPHIER_PIN_PULL_DIR_BITS	3
+#define UNIPHIER_PIN_PULL_DIR_MASK	((1UL << (UNIPHIER_PIN_PULL_DIR_BITS))\
+					 - 1)
+
+#if UNIPHIER_PIN_PULL_DIR_SHIFT + UNIPHIER_PIN_PULL_DIR_BITS > BITS_PER_LONG
+#error "unable to pack pin attributes."
+#endif
+
+#define UNIPHIER_PIN_IECTRL_NONE	(UNIPHIER_PIN_IECTRL_MASK)
+
+/* selectable drive strength */
+enum uniphier_pin_drv_str {
+	UNIPHIER_PIN_DRV_4_8,		/* 2 level control: 4/8 mA */
+	UNIPHIER_PIN_DRV_8_12_16_20,	/* 4 level control: 8/12/16/20 mA */
+	UNIPHIER_PIN_DRV_FIXED_4,	/* fixed to 4mA */
+	UNIPHIER_PIN_DRV_FIXED_5,	/* fixed to 5mA */
+	UNIPHIER_PIN_DRV_FIXED_8,	/* fixed to 8mA */
+	UNIPHIER_PIN_DRV_NONE,		/* no support (input only pin) */
+};
+
+/* direction of pull register (no pin supports bi-directional pull biasing) */
+enum uniphier_pin_pull_dir {
+	UNIPHIER_PIN_PULL_UP,		/* pull-up or disabled */
+	UNIPHIER_PIN_PULL_DOWN,		/* pull-down or disabled */
+	UNIPHIER_PIN_PULL_UP_FIXED,	/* always pull-up */
+	UNIPHIER_PIN_PULL_DOWN_FIXED,	/* always pull-down */
+	UNIPHIER_PIN_PULL_NONE,		/* no pull register */
+};
+
+#define UNIPHIER_PIN_IECTRL(x) \
+	(((x) & (UNIPHIER_PIN_IECTRL_MASK)) << (UNIPHIER_PIN_IECTRL_SHIFT))
+#define UNIPHIER_PIN_DRVCTRL(x) \
+	(((x) & (UNIPHIER_PIN_DRVCTRL_MASK)) << (UNIPHIER_PIN_DRVCTRL_SHIFT))
+#define UNIPHIER_PIN_DRV_STR(x) \
+	(((x) & (UNIPHIER_PIN_DRV_STR_MASK)) << (UNIPHIER_PIN_DRV_STR_SHIFT))
+#define UNIPHIER_PIN_PUPDCTRL(x) \
+	(((x) & (UNIPHIER_PIN_PUPDCTRL_MASK)) << (UNIPHIER_PIN_PUPDCTRL_SHIFT))
+#define UNIPHIER_PIN_PULL_DIR(x) \
+	(((x) & (UNIPHIER_PIN_PULL_DIR_MASK)) << (UNIPHIER_PIN_PULL_DIR_SHIFT))
+
+#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_str, pupdctrl, pull_dir)\
+				(UNIPHIER_PIN_IECTRL(iectrl) |		\
+				 UNIPHIER_PIN_DRVCTRL(drvctrl) |	\
+				 UNIPHIER_PIN_DRV_STR(drv_str) |	\
+				 UNIPHIER_PIN_PUPDCTRL(pupdctrl) |	\
+				 UNIPHIER_PIN_PULL_DIR(pull_dir))
+
+static inline unsigned int uniphier_pin_get_iectrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_IECTRL_SHIFT) &
+						UNIPHIER_PIN_IECTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drvctrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_DRVCTRL_SHIFT) &
+						UNIPHIER_PIN_DRVCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drv_str(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_STR_SHIFT) &
+						UNIPHIER_PIN_DRV_STR_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pupdctrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_PUPDCTRL_SHIFT) &
+						UNIPHIER_PIN_PUPDCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pull_dir(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_PULL_DIR_SHIFT) &
+						UNIPHIER_PIN_PULL_DIR_MASK;
+}
+
+enum uniphier_pinmux_gpio_range_type {
+	UNIPHIER_PINMUX_GPIO_RANGE_PORT,
+	UNIPHIER_PINMUX_GPIO_RANGE_IRQ,
+	UNIPHIER_PINMUX_GPIO_RANGE_NONE,
+};
+
+struct uniphier_pinctrl_group {
+	const char *name;
+	const unsigned *pins;
+	unsigned num_pins;
+	const unsigned *muxvals;
+	enum uniphier_pinmux_gpio_range_type range_type;
+};
+
+struct uniphier_pinmux_function {
+	const char *name;
+	const char * const *groups;
+	unsigned num_groups;
+};
+
+struct uniphier_pinctrl_socdata {
+	const struct uniphier_pinctrl_group *groups;
+	int groups_count;
+	const struct uniphier_pinmux_function *functions;
+	int functions_count;
+	unsigned mux_bits;
+	unsigned reg_stride;
+	bool load_pinctrl;
+};
+
+#define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g)			\
+{									\
+	.number = a,							\
+	.name = b,							\
+	.drv_data = (void *)UNIPHIER_PIN_ATTR_PACKED(c, d, e, f, g),	\
+}
+
+#define __UNIPHIER_PINCTRL_GROUP(grp, type)				\
+	{								\
+		.name = #grp,						\
+		.pins = grp##_pins,					\
+		.num_pins = ARRAY_SIZE(grp##_pins),			\
+		.muxvals = grp##_muxvals +				\
+			BUILD_BUG_ON_ZERO(ARRAY_SIZE(grp##_pins) !=	\
+					  ARRAY_SIZE(grp##_muxvals)),	\
+		.range_type = type,					\
+	}
+
+#define UNIPHIER_PINCTRL_GROUP(grp)					\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_NONE)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(grp)			\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_PORT)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(grp)			\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_IRQ)
+
+#define UNIPHIER_PINCTRL_GROUP_SINGLE(grp, array, ofst)			\
+	{								\
+		.name = #grp,						\
+		.pins = array##_pins + ofst,				\
+		.num_pins = 1,						\
+		.muxvals = array##_muxvals + ofst,			\
+	}
+
+#define UNIPHIER_PINMUX_FUNCTION(func)					\
+	{								\
+		.name = #func,						\
+		.groups = func##_groups,				\
+		.num_groups = ARRAY_SIZE(func##_groups),		\
+	}
+
+struct platform_device;
+struct pinctrl_desc;
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+			   struct pinctrl_desc *desc,
+			   struct uniphier_pinctrl_socdata *socdata);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev);
+
+#endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index a04019a..0207274 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -23,7 +23,7 @@
 
 #include <linux/dmi.h>
 #include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -111,6 +111,7 @@
 	.irqflags		= IRQF_TRIGGER_FALLING,
 	.t19_num_keys		= ARRAY_SIZE(mxt_t19_keys),
 	.t19_keymap		= mxt_t19_keys,
+	.suspend_mode		= MXT_SUSPEND_T9_CTRL,
 };
 
 static struct i2c_board_info atmel_224s_tp_device = {
@@ -121,6 +122,7 @@
 
 static struct mxt_platform_data atmel_1664s_platform_data = {
 	.irqflags		= IRQF_TRIGGER_FALLING,
+	.suspend_mode		= MXT_SUSPEND_T9_CTRL,
 };
 
 static struct i2c_board_info atmel_1664s_device = {
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 105cfff..28b2a12 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,7 +33,7 @@
 #include <linux/suspend.h>
 #include <linux/acpi.h>
 #include <asm/intel_pmc_ipc.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 /*
  * IPC registers
@@ -473,9 +473,9 @@
 	},
 };
 
-static struct lpc_ich_info tco_info = {
+static struct itco_wdt_platform_data tco_info = {
 	.name = "Apollo Lake SoC",
-	.iTCO_version = 3,
+	.version = 3,
 };
 
 static int ipc_create_punit_device(void)
@@ -552,8 +552,7 @@
 		goto err;
 	}
 
-	ret = platform_device_add_data(pdev, &tco_info,
-				       sizeof(struct lpc_ich_info));
+	ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info));
 	if (ret) {
 		dev_err(ipcdev.dev, "Failed to add tco platform data\n");
 		goto err;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b1541f4..948d9ab 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -111,6 +111,13 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-clps711x.
 
+config PWM_CRC
+	bool "Intel Crystalcove (CRC) PWM support"
+	depends on X86 && INTEL_SOC_PMIC
+	help
+	  Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
+	  control.
+
 config PWM_EP93XX
 	tristate "Cirrus Logic EP93xx PWM support"
 	depends on ARCH_EP93XX
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index ec50eb5..d186f35 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_PWM_BCM2835)	+= pwm-bcm2835.o
 obj-$(CONFIG_PWM_BFIN)		+= pwm-bfin.o
 obj-$(CONFIG_PWM_CLPS711X)	+= pwm-clps711x.o
+obj-$(CONFIG_PWM_CRC)		+= pwm-crc.o
 obj-$(CONFIG_PWM_EP93XX)	+= pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)	+= pwm-fsl-ftm.o
 obj-$(CONFIG_PWM_IMG)		+= pwm-img.o
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
new file mode 100644
index 0000000..7101c70
--- /dev/null
+++ b/drivers/pwm/pwm-crc.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Shobhit Kumar <shobhit.kumar@intel.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/pwm.h>
+
+#define PWM0_CLK_DIV		0x4B
+#define  PWM_OUTPUT_ENABLE	BIT(7)
+#define  PWM_DIV_CLK_0		0x00 /* DIVIDECLK = BASECLK */
+#define  PWM_DIV_CLK_100	0x63 /* DIVIDECLK = BASECLK/100 */
+#define  PWM_DIV_CLK_128	0x7F /* DIVIDECLK = BASECLK/128 */
+
+#define PWM0_DUTY_CYCLE		0x4E
+#define BACKLIGHT_EN		0x51
+
+#define PWM_MAX_LEVEL		0xFF
+
+#define PWM_BASE_CLK		6000000  /* 6 MHz */
+#define PWM_MAX_PERIOD_NS	21333    /* 46.875KHz */
+
+/**
+ * struct crystalcove_pwm - Crystal Cove PWM controller
+ * @chip: the abstract pwm_chip structure.
+ * @regmap: the regmap from the parent device.
+ */
+struct crystalcove_pwm {
+	struct pwm_chip chip;
+	struct regmap *regmap;
+};
+
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+{
+	return container_of(pc, struct crystalcove_pwm, chip);
+}
+
+static int crc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+	regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 1);
+
+	return 0;
+}
+
+static void crc_pwm_disable(struct pwm_chip *c, struct pwm_device *pwm)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+
+	regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
+}
+
+static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
+			  int duty_ns, int period_ns)
+{
+	struct crystalcove_pwm *crc_pwm = to_crc_pwm(c);
+	struct device *dev = crc_pwm->chip.dev;
+	int level;
+
+	if (period_ns > PWM_MAX_PERIOD_NS) {
+		dev_err(dev, "un-supported period_ns\n");
+		return -EINVAL;
+	}
+
+	if (pwm->period != period_ns) {
+		int clk_div;
+
+		/* changing the clk divisor, need to disable fisrt */
+		crc_pwm_disable(c, pwm);
+		clk_div = PWM_BASE_CLK * period_ns / NSEC_PER_SEC;
+
+		regmap_write(crc_pwm->regmap, PWM0_CLK_DIV,
+					clk_div | PWM_OUTPUT_ENABLE);
+
+		/* enable back */
+		crc_pwm_enable(c, pwm);
+	}
+
+	/* change the pwm duty cycle */
+	level = duty_ns * PWM_MAX_LEVEL / period_ns;
+	regmap_write(crc_pwm->regmap, PWM0_DUTY_CYCLE, level);
+
+	return 0;
+}
+
+static const struct pwm_ops crc_pwm_ops = {
+	.config = crc_pwm_config,
+	.enable = crc_pwm_enable,
+	.disable = crc_pwm_disable,
+};
+
+static int crystalcove_pwm_probe(struct platform_device *pdev)
+{
+	struct crystalcove_pwm *pwm;
+	struct device *dev = pdev->dev.parent;
+	struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+	pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+	if (!pwm)
+		return -ENOMEM;
+
+	pwm->chip.dev = &pdev->dev;
+	pwm->chip.ops = &crc_pwm_ops;
+	pwm->chip.base = -1;
+	pwm->chip.npwm = 1;
+
+	/* get the PMIC regmap */
+	pwm->regmap = pmic->regmap;
+
+	platform_set_drvdata(pdev, pwm);
+
+	return pwmchip_add(&pwm->chip);
+}
+
+static int crystalcove_pwm_remove(struct platform_device *pdev)
+{
+	struct crystalcove_pwm *pwm = platform_get_drvdata(pdev);
+
+	return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver crystalcove_pwm_driver = {
+	.probe = crystalcove_pwm_probe,
+	.remove = crystalcove_pwm_remove,
+	.driver = {
+		.name = "crystal_cove_pwm",
+	},
+};
+
+builtin_platform_driver(crystalcove_pwm_driver);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 3f9d0ac..74c0a33 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -208,7 +208,7 @@
 		return -EINVAL;
 	}
 
-	/* LPC can either run in RTC or WDT mode */
+	/* LPC can either run as a Clocksource or in RTC or WDT mode */
 	if (mode != ST_LPC_MODE_RTC)
 		return -ENODEV;
 
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 01a7339..c00ac46 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -529,7 +529,7 @@
 	list_add_tail(&port->list, &adapter->port_list);
 	write_unlock_irq(&adapter->port_list_lock);
 
-	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
+	atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
 
 	return port;
 
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index acde3f5..3fb4109 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -190,7 +190,7 @@
 		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
 			if (scsi_device_get(sdev))
 				return NULL;
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
 				&zfcp_sdev->status);
 		erp_action = &zfcp_sdev->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
@@ -206,7 +206,7 @@
 		if (!get_device(&port->dev))
 			return NULL;
 		zfcp_erp_action_dismiss_port(port);
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 		erp_action->port = port;
@@ -217,7 +217,7 @@
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		kref_get(&adapter->ref);
 		zfcp_erp_action_dismiss_adapter(adapter);
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
 		erp_action = &adapter->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 		if (!(atomic_read(&adapter->status) &
@@ -254,7 +254,7 @@
 	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
 	if (!act)
 		goto out;
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
@@ -486,14 +486,14 @@
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
 		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
 		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
 static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
@@ -502,7 +502,7 @@
 
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
 		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -642,7 +642,7 @@
 	read_lock_irqsave(&adapter->erp_lock, flags);
 	if (list_empty(&adapter->erp_ready_head) &&
 	    list_empty(&adapter->erp_running_head)) {
-			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+			atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
 					  &adapter->status);
 			wake_up(&adapter->erp_done_wqh);
 	}
@@ -665,16 +665,16 @@
 	int sleep = 1;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
 
 	for (retries = 7; retries; retries--) {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+		atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 				  &adapter->status);
 		write_lock_irq(&adapter->erp_lock);
 		zfcp_erp_action_to_running(erp_action);
 		write_unlock_irq(&adapter->erp_lock);
 		if (zfcp_fsf_exchange_config_data(erp_action)) {
-			atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+			atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 					  &adapter->status);
 			return ZFCP_ERP_FAILED;
 		}
@@ -692,7 +692,7 @@
 		sleep *= 2;
 	}
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+	atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 			  &adapter->status);
 
 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
@@ -764,7 +764,7 @@
 	/* all ports and LUNs are closed */
 	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 }
 
@@ -773,7 +773,7 @@
 	struct zfcp_adapter *adapter = act->adapter;
 
 	if (zfcp_qdio_open(adapter->qdio)) {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+		atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 				  &adapter->status);
 		return ZFCP_ERP_FAILED;
@@ -784,7 +784,7 @@
 		return ZFCP_ERP_FAILED;
 	}
 
-	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+	atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
 
 	return ZFCP_ERP_SUCCEEDED;
 }
@@ -948,7 +948,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
 			  &zfcp_sdev->status);
 }
 
@@ -1187,18 +1187,18 @@
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &zfcp_sdev->status);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &erp_action->port->status);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &erp_action->adapter->status);
 		break;
 	}
@@ -1422,19 +1422,19 @@
 	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
-	atomic_set_mask(mask, &adapter->status);
+	atomic_or(mask, &adapter->status);
 
 	if (!common_mask)
 		return;
 
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
-		atomic_set_mask(common_mask, &port->status);
+		atomic_or(common_mask, &port->status);
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host)
-		atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
 	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
@@ -1453,7 +1453,7 @@
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 
-	atomic_clear_mask(mask, &adapter->status);
+	atomic_andnot(mask, &adapter->status);
 
 	if (!common_mask)
 		return;
@@ -1463,7 +1463,7 @@
 
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list) {
-		atomic_clear_mask(common_mask, &port->status);
+		atomic_andnot(common_mask, &port->status);
 		if (clear_counter)
 			atomic_set(&port->erp_counter, 0);
 	}
@@ -1471,7 +1471,7 @@
 
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host) {
-		atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
 		if (clear_counter)
 			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
 	}
@@ -1491,7 +1491,7 @@
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	unsigned long flags;
 
-	atomic_set_mask(mask, &port->status);
+	atomic_or(mask, &port->status);
 
 	if (!common_mask)
 		return;
@@ -1499,7 +1499,7 @@
 	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			atomic_set_mask(common_mask,
+			atomic_or(common_mask,
 					&sdev_to_zfcp(sdev)->status);
 	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
@@ -1518,7 +1518,7 @@
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 	unsigned long flags;
 
-	atomic_clear_mask(mask, &port->status);
+	atomic_andnot(mask, &port->status);
 
 	if (!common_mask)
 		return;
@@ -1529,7 +1529,7 @@
 	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port) {
-			atomic_clear_mask(common_mask,
+			atomic_andnot(common_mask,
 					  &sdev_to_zfcp(sdev)->status);
 			if (clear_counter)
 				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
@@ -1546,7 +1546,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_set_mask(mask, &zfcp_sdev->status);
+	atomic_or(mask, &zfcp_sdev->status);
 }
 
 /**
@@ -1558,7 +1558,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(mask, &zfcp_sdev->status);
+	atomic_andnot(mask, &zfcp_sdev->status);
 
 	if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 		atomic_set(&zfcp_sdev->erp_counter, 0);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 25d49f3..237688a 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -508,7 +508,7 @@
 	/* port is good, unblock rport without going through erp */
 	zfcp_scsi_schedule_rport_register(port);
  out:
-	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	put_device(&port->dev);
 	kmem_cache_free(zfcp_fc_req_cache, fc_req);
 }
@@ -564,14 +564,14 @@
 	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
 		goto out;
 
-	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 
 	retval = zfcp_fc_adisc(port);
 	if (retval == 0)
 		return;
 
 	/* send of ADISC was not possible */
-	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
 
 out:
@@ -640,7 +640,7 @@
 	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
 		return;
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
+	atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
 
 	if ((port->supported_classes != 0) ||
 	    !list_empty(&port->unit_list))
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 4ac73e0..522a633 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -114,7 +114,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
 		return;
 
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 
 	zfcp_scsi_schedule_rports_block(adapter);
 
@@ -345,7 +345,7 @@
 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
 		break;
 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 				&adapter->status);
 		break;
 	case FSF_PROT_DUPLICATE_REQUEST_ID:
@@ -554,7 +554,7 @@
 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
 			return;
 		}
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		break;
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -567,7 +567,7 @@
 
 		/* avoids adapter shutdown to be able to recognize
 		 * events such as LINK UP */
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
@@ -1394,9 +1394,9 @@
 		break;
 	case FSF_GOOD:
 		port->handle = header->port_handle;
-		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
+		atomic_or(ZFCP_STATUS_COMMON_OPEN |
 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
+		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
 		                  &port->status);
 		/* check whether D_ID has changed during open */
 		/*
@@ -1677,10 +1677,10 @@
 	case FSF_PORT_BOXED:
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
-		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
-				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
 						  &sdev_to_zfcp(sdev)->status);
 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -1700,10 +1700,10 @@
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
 		 */
-		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
-				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
 						  &sdev_to_zfcp(sdev)->status);
 		break;
 	}
@@ -1766,7 +1766,7 @@
 
 	zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
 			  &zfcp_sdev->status);
 
@@ -1822,7 +1822,7 @@
 
 	case FSF_GOOD:
 		zfcp_sdev->lun_handle = header->lun_handle;
-		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
 		break;
 	}
 }
@@ -1913,7 +1913,7 @@
 		}
 		break;
 	case FSF_GOOD:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
 		break;
 	}
 }
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 495e1cb..dbf2b547 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -349,7 +349,7 @@
 
 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
 	spin_lock_irq(&qdio->req_q_lock);
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 	spin_unlock_irq(&qdio->req_q_lock);
 
 	wake_up(&qdio->req_q_wq);
@@ -384,7 +384,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
 		return -EIO;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 			  &qdio->adapter->status);
 
 	zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -396,14 +396,14 @@
 		goto failed_qdio;
 
 	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
 				&qdio->adapter->status);
 
 	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
 	} else {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
 	}
 
@@ -427,7 +427,7 @@
 	/* set index of first available SBALS / number of available SBALS */
 	qdio->req_q_idx = 0;
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
 	if (adapter->scsi_host) {
 		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
@@ -499,6 +499,6 @@
 
 	rc = ccw_device_siosl(adapter->ccw_device);
 	if (!rc)
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 				&adapter->status);
 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 98d06d1..d5cdc47 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2051,9 +2051,49 @@
 	return rc;
 }
 
+static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
+				      struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+	struct fc_vport_identifiers vpid;
+	uint i, created = 0;
+
+	if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
+		BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
+		goto done;
+	}
+
+	/* Sanity check the first entry to make sure it's not 0 */
+	if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 &&
+	    wwn_to_u64(npiv_tbl->wwpn[0]) == 0) {
+		BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n");
+		goto done;
+	}
+
+	vpid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+	vpid.vport_type = FC_PORTTYPE_NPIV;
+	vpid.disable = false;
+
+	for (i = 0; i < npiv_tbl->count; i++) {
+		vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+		vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
+		scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
+		    "NPIV[%u]:%016llx-%016llx",
+		    created, vpid.port_name, vpid.node_name);
+		if (fc_vport_create(lport->host, 0, &vpid))
+			created++;
+		else
+			BNX2FC_HBA_DBG(lport, "Failed to create vport\n");
+	}
+done:
+	return created;
+}
+
 static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
 {
 	struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+	struct bnx2fc_hba *hba;
+	struct cnic_fc_npiv_tbl npiv_tbl;
+	struct fc_lport *lport;
 
 	if (interface->enabled == false) {
 		if (!ctlr->lp) {
@@ -2064,6 +2104,32 @@
 			interface->enabled = true;
 		}
 	}
+
+	/* Create static NPIV ports if any are contained in NVRAM */
+	hba = interface->hba;
+	lport = ctlr->lp;
+
+	if (!hba)
+		goto done;
+
+	if (!hba->cnic)
+		goto done;
+
+	if (!lport)
+		goto done;
+
+	if (!lport->host)
+		goto done;
+
+	if (!hba->cnic->get_fc_npiv_tbl)
+		goto done;
+
+	memset(&npiv_tbl, 0, sizeof(npiv_tbl));
+	if (hba->cnic->get_fc_npiv_tbl(hba->cnic, &npiv_tbl))
+		goto done;
+
+	bnx2fc_npiv_create_vports(lport, &npiv_tbl);
+done:
 	return 0;
 }
 
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 0813163..195c41d 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@
 err:
 	sg = table->sgl;
 	for (i -= 1; i >= 0; i--) {
-		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+		gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
 			      sg->length);
 		sg = sg_next(sg);
 	}
@@ -109,7 +109,7 @@
 							DMA_BIDIRECTIONAL);
 
 	for_each_sg(table->sgl, sg, table->nents, i) {
-		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+		gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
 			      sg->length);
 	}
 	chunk_heap->allocated -= allocated_size;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 22853d3..d1a33a9 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -70,7 +70,14 @@
  */
 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
 {
-	if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
+	int port;
+
+	if (octeon_has_feature(OCTEON_FEATURE_PKND))
+		port = work->word0.pip.cn68xx.pknd;
+	else
+		port = work->word1.cn38xx.ipprt;
+
+	if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
 		/*
 		 * Ignore length errors on min size packets. Some
 		 * equipment incorrectly pads packets to 64+4FCS
@@ -87,8 +94,8 @@
 		 * packet to determine if we can remove a non spec
 		 * preamble and generate a correct packet.
 		 */
-		int interface = cvmx_helper_get_interface_num(work->ipprt);
-		int index = cvmx_helper_get_interface_index_num(work->ipprt);
+		int interface = cvmx_helper_get_interface_num(port);
+		int index = cvmx_helper_get_interface_index_num(port);
 		union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
 
 		gmxx_rxx_frm_ctl.u64 =
@@ -99,7 +106,7 @@
 			    cvmx_phys_to_ptr(work->packet_ptr.s.addr);
 			int i = 0;
 
-			while (i < work->len - 1) {
+			while (i < work->word1.len - 1) {
 				if (*ptr != 0x55)
 					break;
 				ptr++;
@@ -109,18 +116,18 @@
 			if (*ptr == 0xd5) {
 				/*
 				  printk_ratelimited("Port %d received 0xd5 preamble\n",
-					  work->ipprt);
+					  port);
 				 */
 				work->packet_ptr.s.addr += i + 1;
-				work->len -= i + 5;
+				work->word1.len -= i + 5;
 			} else if ((*ptr & 0xf) == 0xd) {
 				/*
 				  printk_ratelimited("Port %d received 0x?d preamble\n",
-					  work->ipprt);
+					  port);
 				 */
 				work->packet_ptr.s.addr += i;
-				work->len -= i + 4;
-				for (i = 0; i < work->len; i++) {
+				work->word1.len -= i + 4;
+				for (i = 0; i < work->word1.len; i++) {
 					*ptr =
 					    ((*ptr & 0xf0) >> 4) |
 					    ((*(ptr + 1) & 0xf) << 4);
@@ -128,7 +135,7 @@
 				}
 			} else {
 				printk_ratelimited("Port %d unknown preamble, packet dropped\n",
-						   work->ipprt);
+						   port);
 				/*
 				   cvmx_helper_dump_packet(work);
 				 */
@@ -138,7 +145,7 @@
 		}
 	} else {
 		printk_ratelimited("Port %d receive error code %d, packet dropped\n",
-				   work->ipprt, work->word2.snoip.err_code);
+				   port, work->word2.snoip.err_code);
 		cvm_oct_free_work(work);
 		return 1;
 	}
@@ -172,9 +179,16 @@
 	}
 
 	/* Only allow work for our group (and preserve priorities) */
-	old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
-	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
-		       (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
+		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
+				1ull << pow_receive_group);
+		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+	} else {
+		old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
+		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
+			(old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+	}
 
 	if (USE_ASYNC_IOBDMA) {
 		cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
@@ -186,6 +200,7 @@
 		struct sk_buff **pskb = NULL;
 		int skb_in_hw;
 		cvmx_wqe_t *work;
+		int port;
 
 		if (USE_ASYNC_IOBDMA && did_work_request)
 			work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
@@ -195,12 +210,19 @@
 		prefetch(work);
 		did_work_request = 0;
 		if (work == NULL) {
-			union cvmx_pow_wq_int wq_int;
+			if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+				cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
+					       1ull << pow_receive_group);
+				cvmx_write_csr(CVMX_SSO_WQ_INT,
+					       1ull << pow_receive_group);
+			} else {
+				union cvmx_pow_wq_int wq_int;
 
-			wq_int.u64 = 0;
-			wq_int.s.iq_dis = 1 << pow_receive_group;
-			wq_int.s.wq_int = 1 << pow_receive_group;
-			cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+				wq_int.u64 = 0;
+				wq_int.s.iq_dis = 1 << pow_receive_group;
+				wq_int.s.wq_int = 1 << pow_receive_group;
+				cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+			}
 			break;
 		}
 		pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
@@ -220,7 +242,13 @@
 			prefetch(&skb->head);
 			prefetch(&skb->len);
 		}
-		prefetch(cvm_oct_device[work->ipprt]);
+
+		if (octeon_has_feature(OCTEON_FEATURE_PKND))
+			port = work->word0.pip.cn68xx.pknd;
+		else
+			port = work->word1.cn38xx.ipprt;
+
+		prefetch(cvm_oct_device[port]);
 
 		/* Immediately throw away all packets with receive errors */
 		if (unlikely(work->word2.snoip.rcv_error)) {
@@ -237,7 +265,7 @@
 			skb->data = skb->head + work->packet_ptr.s.addr -
 				cvmx_ptr_to_phys(skb->head);
 			prefetch(skb->data);
-			skb->len = work->len;
+			skb->len = work->word1.len;
 			skb_set_tail_pointer(skb, skb->len);
 			packet_not_copied = 1;
 		} else {
@@ -245,7 +273,7 @@
 			 * We have to copy the packet. First allocate
 			 * an skbuff for it.
 			 */
-			skb = dev_alloc_skb(work->len);
+			skb = dev_alloc_skb(work->word1.len);
 			if (!skb) {
 				cvm_oct_free_work(work);
 				continue;
@@ -268,13 +296,14 @@
 					else
 						ptr += 6;
 				}
-				memcpy(skb_put(skb, work->len), ptr, work->len);
+				memcpy(skb_put(skb, work->word1.len), ptr,
+				       work->word1.len);
 				/* No packet buffers to free */
 			} else {
 				int segments = work->word2.s.bufs;
 				union cvmx_buf_ptr segment_ptr =
 				    work->packet_ptr;
-				int len = work->len;
+				int len = work->word1.len;
 
 				while (segments--) {
 					union cvmx_buf_ptr next_ptr =
@@ -310,10 +339,9 @@
 			}
 			packet_not_copied = 0;
 		}
-
-		if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
-			   cvm_oct_device[work->ipprt])) {
-			struct net_device *dev = cvm_oct_device[work->ipprt];
+		if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
+			   cvm_oct_device[port])) {
+			struct net_device *dev = cvm_oct_device[port];
 			struct octeon_ethernet *priv = netdev_priv(dev);
 
 			/*
@@ -333,7 +361,7 @@
 					skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 				/* Increment RX stats for virtual ports */
-				if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+				if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
 #ifdef CONFIG_64BIT
 					atomic64_add(1,
 						     (atomic64_t *)&priv->stats.rx_packets);
@@ -368,7 +396,7 @@
 			 * doesn't exist.
 			 */
 			printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
-				   work->ipprt);
+				   port);
 			dev_kfree_skb_irq(skb);
 		}
 		/*
@@ -390,7 +418,13 @@
 		}
 	}
 	/* Restore the original POW group mask */
-	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
+		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+	} else {
+		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+	}
+
 	if (USE_ASYNC_IOBDMA) {
 		/* Restore the scratch area */
 		cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
@@ -422,8 +456,6 @@
 {
 	int i;
 	struct net_device *dev_for_napi = NULL;
-	union cvmx_pow_wq_int_thrx int_thr;
-	union cvmx_pow_wq_int_pc int_pc;
 
 	for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
 		if (cvm_oct_device[i]) {
@@ -449,15 +481,34 @@
 
 	disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
 
-	int_thr.u64 = 0;
-	int_thr.s.tc_en = 1;
-	int_thr.s.tc_thr = 1;
 	/* Enable POW interrupt when our port has at least one packet */
-	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_sso_wq_int_thrx int_thr;
+		union cvmx_pow_wq_int_pc int_pc;
 
-	int_pc.u64 = 0;
-	int_pc.s.pc_thr = 5;
-	cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+		int_thr.u64 = 0;
+		int_thr.s.tc_en = 1;
+		int_thr.s.tc_thr = 1;
+		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
+			       int_thr.u64);
+
+		int_pc.u64 = 0;
+		int_pc.s.pc_thr = 5;
+		cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+	} else {
+		union cvmx_pow_wq_int_thrx int_thr;
+		union cvmx_pow_wq_int_pc int_pc;
+
+		int_thr.u64 = 0;
+		int_thr.s.tc_en = 1;
+		int_thr.s.tc_thr = 1;
+		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
+			       int_thr.u64);
+
+		int_pc.u64 = 0;
+		int_pc.s.pc_thr = 5;
+		cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+	}
 
 	/* Schedule NAPI now. This will indirectly enable the interrupt. */
 	napi_schedule(&cvm_oct_napi);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index e2df041..9e2116f 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -591,13 +591,14 @@
 	 * Fill in some of the work queue fields. We may need to add
 	 * more if the software at the other end needs them.
 	 */
-	work->hw_chksum = skb->csum;
-	work->len = skb->len;
-	work->ipprt = priv->port;
-	work->qos = priv->port & 0x7;
-	work->grp = pow_send_group;
-	work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
-	work->tag = pow_send_group;	/* FIXME */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+		work->word0.pip.cn38xx.hw_chksum = skb->csum;
+	work->word1.len = skb->len;
+	cvmx_wqe_set_port(work, priv->port);
+	cvmx_wqe_set_qos(work, priv->port & 0x7);
+	cvmx_wqe_set_grp(work, pow_send_group);
+	work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	work->word1.tag = pow_send_group;	/* FIXME */
 	/* Default to zero. Sets of zero later are commented out */
 	work->word2.u64 = 0;
 	work->word2.s.bufs = 1;
@@ -677,8 +678,8 @@
 	}
 
 	/* Submit the packet to the POW */
-	cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
-			     work->grp);
+	cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
+			     cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
 	priv->stats.tx_packets++;
 	priv->stats.tx_bytes += skb->len;
 	dev_consume_skb_any(skb);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 1ba789a..45f024b 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -8,6 +8,10 @@
  * published by the Free Software Foundation.
  */
 
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+
 /**
  * cvm_oct_get_buffer_ptr - convert packet data address to pointer
  * @packet_ptr: Packet data hardware address
@@ -28,14 +32,12 @@
  */
 static inline int INTERFACE(int ipd_port)
 {
-	if (ipd_port < 32)	/* Interface 0 or 1 for RGMII,GMII,SPI, etc */
-		return ipd_port >> 4;
-	else if (ipd_port < 36)	/* Interface 2 for NPI */
-		return 2;
-	else if (ipd_port < 40)	/* Interface 3 for loopback */
-		return 3;
-	else if (ipd_port == 40)	/* Non existent interface for POW0 */
-		return 4;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+
+	if (interface >= 0)
+		return interface;
+	else if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+		return 10;
 	panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
 }
 
@@ -47,7 +49,5 @@
  */
 static inline int INDEX(int ipd_port)
 {
-	if (ipd_port < 32)
-		return ipd_port & 15;
-	return ipd_port & 3;
+	return cvmx_helper_get_interface_index_num(ipd_port);
 }
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 0718b35..7274fda 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -152,7 +152,7 @@
 			     num_packet_buffers);
 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
 		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
-				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
 
 #ifdef __LITTLE_ENDIAN
 	{
@@ -860,7 +860,10 @@
 	int port;
 
 	/* Disable POW interrupt */
-	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
+	else
+		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
 
 	cvmx_ipd_disable();
 
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 123468a..b8d7d04 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -480,7 +480,7 @@
 	/* dev->destructor = free_netdev; */
 	PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n");
 	ether_setup(dev);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211_RADIOTAP;
 	eth_zero_addr(dev->dev_addr);
 
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 384cf88..47a833f 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -138,16 +138,12 @@
 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
 			max_sg_per_table : total_sg_needed;
 
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-
 		/*
 		 * Reserve extra element for chain entry
 		 */
 		if (sg_per_table < total_sg_needed)
 			chain_entry = 1;
 
-#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
-
 		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
 				GFP_KERNEL);
 		if (!sg) {
@@ -158,15 +154,11 @@
 
 		sg_init_table(sg, sg_per_table + chain_entry);
 
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-
 		if (i > 0) {
 			sg_chain(sg_table[i - 1].sg_table,
 				 max_sg_per_table + 1, sg);
 		}
 
-#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
-
 		sg_table[i].sg_table = sg;
 		sg_table[i].rd_sg_count = sg_per_table;
 		sg_table[i].page_start_offset = page_offset;
@@ -430,42 +422,6 @@
 	prot_sg = &prot_table->sg_table[prot_page -
 					prot_table->page_start_offset];
 
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-
-	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
-				   PAGE_SIZE);
-
-	/*
-	 * Allocate temporaly contiguous scatterlist entries if prot pages
-	 * straddles multiple scatterlist tables.
-	 */
-	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
-		int i;
-
-		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
-		if (!prot_sg)
-			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-		need_to_release = true;
-		sg_init_table(prot_sg, prot_npages);
-
-		for (i = 0; i < prot_npages; i++) {
-			if (prot_page + i > prot_table->page_end_offset) {
-				prot_table = rd_get_prot_table(dev,
-								prot_page + i);
-				if (!prot_table) {
-					kfree(prot_sg);
-					return rc;
-				}
-				sg_unmark_end(&prot_sg[i - 1]);
-			}
-			prot_sg[i] = prot_table->sg_table[prot_page + i -
-						prot_table->page_start_offset];
-		}
-	}
-
-#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
-
 	if (is_read)
 		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
 				    prot_sg, prot_offset);
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 4190199..a75146f 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -240,9 +240,9 @@
 {
 	struct hvsi_control *header = (struct hvsi_control *)packet;
 
-	switch (header->verb) {
+	switch (be16_to_cpu(header->verb)) {
 		case VSV_MODEM_CTL_UPDATE:
-			if ((header->word & HVSI_TSCD) == 0) {
+			if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
 				/* CD went away; no more connection */
 				pr_debug("hvsi%i: CD dropped\n", hp->index);
 				hp->mctrl &= TIOCM_CD;
@@ -267,6 +267,7 @@
 static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
 {
 	struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
+	uint32_t mctrl_word;
 
 	switch (hp->state) {
 		case HVSI_WAIT_FOR_VER_RESPONSE:
@@ -274,9 +275,10 @@
 			break;
 		case HVSI_WAIT_FOR_MCTRL_RESPONSE:
 			hp->mctrl = 0;
-			if (resp->u.mctrl_word & HVSI_TSDTR)
+			mctrl_word = be32_to_cpu(resp->u.mctrl_word);
+			if (mctrl_word & HVSI_TSDTR)
 				hp->mctrl |= TIOCM_DTR;
-			if (resp->u.mctrl_word & HVSI_TSCD)
+			if (mctrl_word & HVSI_TSCD)
 				hp->mctrl |= TIOCM_CD;
 			__set_state(hp, HVSI_OPEN);
 			break;
@@ -295,10 +297,10 @@
 
 	packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query_response);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-	packet.verb = VSV_SEND_VERSION_NUMBER;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+	packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
 	packet.u.version = HVSI_VERSION;
-	packet.query_seqno = query_seqno+1;
+	packet.query_seqno = cpu_to_be16(query_seqno+1);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -319,7 +321,7 @@
 
 	switch (hp->state) {
 		case HVSI_WAIT_FOR_VER_QUERY:
-			hvsi_version_respond(hp, query->hdr.seqno);
+			hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
 			__set_state(hp, HVSI_OPEN);
 			break;
 		default:
@@ -555,8 +557,8 @@
 
 	packet.hdr.type = VS_QUERY_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-	packet.verb = verb;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+	packet.verb = cpu_to_be16(verb);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -596,14 +598,14 @@
 	struct hvsi_control packet __ALIGNED__;
 	int wrote;
 
-	packet.hdr.type = VS_CONTROL_PACKET_HEADER,
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = sizeof(struct hvsi_control);
-	packet.verb = VSV_SET_MODEM_CTL;
-	packet.mask = HVSI_TSDTR;
+	packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+	packet.mask = cpu_to_be32(HVSI_TSDTR);
 
 	if (mctrl & TIOCM_DTR)
-		packet.word = HVSI_TSDTR;
+		packet.word = cpu_to_be32(HVSI_TSDTR);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -680,7 +682,7 @@
 	BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
 
 	packet.hdr.type = VS_DATA_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = count + sizeof(struct hvsi_header);
 	memcpy(&packet.data, buf, count);
 
@@ -697,9 +699,9 @@
 	struct hvsi_control packet __ALIGNED__;
 
 	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = 6;
-	packet.verb = VSV_CLOSE_PROTOCOL;
+	packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -1180,7 +1182,7 @@
 	/* search device tree for vty nodes */
 	for_each_compatible_node(vty, "serial", "hvterm-protocol") {
 		struct hvsi_struct *hp;
-		const uint32_t *vtermno, *irq;
+		const __be32 *vtermno, *irq;
 
 		vtermno = of_get_property(vty, "reg", NULL);
 		irq = of_get_property(vty, "interrupts", NULL);
@@ -1202,11 +1204,11 @@
 		hp->index = hvsi_count;
 		hp->inbuf_end = hp->inbuf;
 		hp->state = HVSI_CLOSED;
-		hp->vtermno = *vtermno;
-		hp->virq = irq_create_mapping(NULL, irq[0]);
+		hp->vtermno = be32_to_cpup(vtermno);
+		hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
 		if (hp->virq == 0) {
 			printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
-				__func__, irq[0]);
+			       __func__, be32_to_cpup(irq));
 			tty_port_destroy(&hp->port);
 			continue;
 		}
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 358323c..a8c8cfd 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -879,6 +879,11 @@
 	.chars_in_buffer	= mips_ejtag_fdc_tty_chars_in_buffer,
 };
 
+int __weak get_c0_fdc_int(void)
+{
+	return -1;
+}
+
 static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
 {
 	int ret, nport;
@@ -967,9 +972,7 @@
 	wake_up_process(priv->thread);
 
 	/* Look for an FDC IRQ */
-	priv->irq = -1;
-	if (get_c0_fdc_int)
-		priv->irq = get_c0_fdc_int();
+	priv->irq = get_c0_fdc_int();
 
 	/* Try requesting the IRQ */
 	if (priv->irq >= 0) {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 8bf495f..e0606c0 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -22,9 +22,7 @@
 source "drivers/gpu/host1x/Kconfig"
 source "drivers/gpu/ipu-v3/Kconfig"
 
-menu "Direct Rendering Manager"
 source "drivers/gpu/drm/Kconfig"
-endmenu
 
 menu "Frame buffer Devices"
 source "drivers/video/fbdev/Kconfig"
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0505b79..5ffa4b4 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -299,6 +299,13 @@
 	  If you have an Sharp SL-6000 Zaurus say Y to enable a driver
 	  for its backlight
 
+config BACKLIGHT_PM8941_WLED
+	tristate "Qualcomm PM8941 WLED Driver"
+	select REGMAP
+	help
+	  If you have the Qualcomm PM8941, say Y to enable a driver for the
+	  WLED block.
+
 config BACKLIGHT_SAHARA
 	tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
 	depends on X86
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index d67073f..16ec534 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_BACKLIGHT_OT200)		+= ot200_bl.o
 obj-$(CONFIG_BACKLIGHT_PANDORA)		+= pandora_bl.o
 obj-$(CONFIG_BACKLIGHT_PCF50633)	+= pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_PM8941_WLED)	+= pm8941-wled.o
 obj-$(CONFIG_BACKLIGHT_PWM)		+= pwm_bl.o
 obj-$(CONFIG_BACKLIGHT_SAHARA)		+= kb3886_bl.o
 obj-$(CONFIG_BACKLIGHT_SKY81452)	+= sky81452-backlight.o
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 88116b4..f88df9e 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -73,6 +73,7 @@
 	struct device *dev;
 	struct lp855x_platform_data *pdata;
 	struct pwm_device *pwm;
+	struct regulator *supply;	/* regulator for VDD input */
 };
 
 static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
@@ -378,13 +379,6 @@
 		pdata->rom_data = &rom[0];
 	}
 
-	pdata->supply = devm_regulator_get(dev, "power");
-	if (IS_ERR(pdata->supply)) {
-		if (PTR_ERR(pdata->supply) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		pdata->supply = NULL;
-	}
-
 	lp->pdata = pdata;
 
 	return 0;
@@ -425,8 +419,15 @@
 	else
 		lp->mode = REGISTER_BASED;
 
-	if (lp->pdata->supply) {
-		ret = regulator_enable(lp->pdata->supply);
+	lp->supply = devm_regulator_get(lp->dev, "power");
+	if (IS_ERR(lp->supply)) {
+		if (PTR_ERR(lp->supply) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		lp->supply = NULL;
+	}
+
+	if (lp->supply) {
+		ret = regulator_enable(lp->supply);
 		if (ret < 0) {
 			dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
 			return ret;
@@ -464,8 +465,8 @@
 
 	lp->bl->props.brightness = 0;
 	backlight_update_status(lp->bl);
-	if (lp->pdata->supply)
-		regulator_disable(lp->pdata->supply);
+	if (lp->supply)
+		regulator_disable(lp->supply);
 	sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
 
 	return 0;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index e418d5b..5d583d7 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -221,8 +221,7 @@
 {
 	struct backlight_device *bl_dev = bl->bl_dev;
 
-	if (bl_dev)
-		backlight_device_unregister(bl_dev);
+	backlight_device_unregister(bl_dev);
 }
 
 static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
diff --git a/drivers/video/backlight/pm8941-wled.c b/drivers/video/backlight/pm8941-wled.c
new file mode 100644
index 0000000..c704c32
--- /dev/null
+++ b/drivers/video/backlight/pm8941-wled.c
@@ -0,0 +1,427 @@
+/* Copyright (c) 2015, Sony Mobile Communications, AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define PM8941_WLED_REG_VAL_BASE		0x40
+#define  PM8941_WLED_REG_VAL_MAX		0xFFF
+
+#define PM8941_WLED_REG_MOD_EN			0x46
+#define  PM8941_WLED_REG_MOD_EN_BIT		BIT(7)
+#define  PM8941_WLED_REG_MOD_EN_MASK		BIT(7)
+
+#define PM8941_WLED_REG_SYNC			0x47
+#define  PM8941_WLED_REG_SYNC_MASK		0x07
+#define  PM8941_WLED_REG_SYNC_LED1		BIT(0)
+#define  PM8941_WLED_REG_SYNC_LED2		BIT(1)
+#define  PM8941_WLED_REG_SYNC_LED3		BIT(2)
+#define  PM8941_WLED_REG_SYNC_ALL		0x07
+#define  PM8941_WLED_REG_SYNC_CLEAR		0x00
+
+#define PM8941_WLED_REG_FREQ			0x4c
+#define  PM8941_WLED_REG_FREQ_MASK		0x0f
+
+#define PM8941_WLED_REG_OVP			0x4d
+#define  PM8941_WLED_REG_OVP_MASK		0x03
+
+#define PM8941_WLED_REG_BOOST			0x4e
+#define  PM8941_WLED_REG_BOOST_MASK		0x07
+
+#define PM8941_WLED_REG_SINK			0x4f
+#define  PM8941_WLED_REG_SINK_MASK		0xe0
+#define  PM8941_WLED_REG_SINK_SHFT		0x05
+
+/* Per-'string' registers below */
+#define PM8941_WLED_REG_STR_OFFSET		0x10
+
+#define PM8941_WLED_REG_STR_MOD_EN_BASE		0x60
+#define  PM8941_WLED_REG_STR_MOD_MASK		BIT(7)
+#define  PM8941_WLED_REG_STR_MOD_EN		BIT(7)
+
+#define PM8941_WLED_REG_STR_SCALE_BASE		0x62
+#define  PM8941_WLED_REG_STR_SCALE_MASK		0x1f
+
+#define PM8941_WLED_REG_STR_MOD_SRC_BASE	0x63
+#define  PM8941_WLED_REG_STR_MOD_SRC_MASK	0x01
+#define  PM8941_WLED_REG_STR_MOD_SRC_INT	0x00
+#define  PM8941_WLED_REG_STR_MOD_SRC_EXT	0x01
+
+#define PM8941_WLED_REG_STR_CABC_BASE		0x66
+#define  PM8941_WLED_REG_STR_CABC_MASK		BIT(7)
+#define  PM8941_WLED_REG_STR_CABC_EN		BIT(7)
+
+struct pm8941_wled_config {
+	u32 i_boost_limit;
+	u32 ovp;
+	u32 switch_freq;
+	u32 num_strings;
+	u32 i_limit;
+	bool cs_out_en;
+	bool ext_gen;
+	bool cabc_en;
+};
+
+struct pm8941_wled {
+	const char *name;
+	struct regmap *regmap;
+	u16 addr;
+
+	struct pm8941_wled_config cfg;
+};
+
+static int pm8941_wled_update_status(struct backlight_device *bl)
+{
+	struct pm8941_wled *wled = bl_get_data(bl);
+	u16 val = bl->props.brightness;
+	u8 ctrl = 0;
+	int rc;
+	int i;
+
+	if (bl->props.power != FB_BLANK_UNBLANK ||
+	    bl->props.fb_blank != FB_BLANK_UNBLANK ||
+	    bl->props.state & BL_CORE_FBBLANK)
+		val = 0;
+
+	if (val != 0)
+		ctrl = PM8941_WLED_REG_MOD_EN_BIT;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_MOD_EN,
+			PM8941_WLED_REG_MOD_EN_MASK, ctrl);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < wled->cfg.num_strings; ++i) {
+		u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
+
+		rc = regmap_bulk_write(wled->regmap,
+				wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
+				v, 2);
+		if (rc)
+			return rc;
+	}
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_SYNC,
+			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_SYNC,
+			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
+	return rc;
+}
+
+static int pm8941_wled_setup(struct pm8941_wled *wled)
+{
+	int rc;
+	int i;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_OVP,
+			PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_BOOST,
+			PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_FREQ,
+			PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
+	if (rc)
+		return rc;
+
+	if (wled->cfg.cs_out_en) {
+		u8 all = (BIT(wled->cfg.num_strings) - 1)
+				<< PM8941_WLED_REG_SINK_SHFT;
+
+		rc = regmap_update_bits(wled->regmap,
+				wled->addr + PM8941_WLED_REG_SINK,
+				PM8941_WLED_REG_SINK_MASK, all);
+		if (rc)
+			return rc;
+	}
+
+	for (i = 0; i < wled->cfg.num_strings; ++i) {
+		u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
+				PM8941_WLED_REG_STR_MOD_MASK,
+				PM8941_WLED_REG_STR_MOD_EN);
+		if (rc)
+			return rc;
+
+		if (wled->cfg.ext_gen) {
+			rc = regmap_update_bits(wled->regmap,
+					addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
+					PM8941_WLED_REG_STR_MOD_SRC_MASK,
+					PM8941_WLED_REG_STR_MOD_SRC_EXT);
+			if (rc)
+				return rc;
+		}
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_SCALE_BASE,
+				PM8941_WLED_REG_STR_SCALE_MASK,
+				wled->cfg.i_limit);
+		if (rc)
+			return rc;
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_CABC_BASE,
+				PM8941_WLED_REG_STR_CABC_MASK,
+				wled->cfg.cabc_en ?
+					PM8941_WLED_REG_STR_CABC_EN : 0);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static const struct pm8941_wled_config pm8941_wled_config_defaults = {
+	.i_boost_limit = 3,
+	.i_limit = 20,
+	.ovp = 2,
+	.switch_freq = 5,
+	.num_strings = 0,
+	.cs_out_en = false,
+	.ext_gen = false,
+	.cabc_en = false,
+};
+
+struct pm8941_wled_var_cfg {
+	const u32 *values;
+	u32 (*fn)(u32);
+	int size;
+};
+
+static const u32 pm8941_wled_i_boost_limit_values[] = {
+	105, 385, 525, 805, 980, 1260, 1400, 1680,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
+	.values = pm8941_wled_i_boost_limit_values,
+	.size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
+};
+
+static const u32 pm8941_wled_ovp_values[] = {
+	35, 32, 29, 27,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
+	.values = pm8941_wled_ovp_values,
+	.size = ARRAY_SIZE(pm8941_wled_ovp_values),
+};
+
+static u32 pm8941_wled_num_strings_values_fn(u32 idx)
+{
+	return idx + 1;
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
+	.fn = pm8941_wled_num_strings_values_fn,
+	.size = 3,
+};
+
+static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
+{
+	return 19200 / (2 * (1 + idx));
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
+	.fn = pm8941_wled_switch_freq_values_fn,
+	.size = 16,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
+	.size = 26,
+};
+
+static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
+{
+	if (idx >= cfg->size)
+		return UINT_MAX;
+	if (cfg->fn)
+		return cfg->fn(idx);
+	if (cfg->values)
+		return cfg->values[idx];
+	return idx;
+}
+
+static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
+{
+	struct pm8941_wled_config *cfg = &wled->cfg;
+	u32 val;
+	int rc;
+	u32 c;
+	int i;
+	int j;
+
+	const struct {
+		const char *name;
+		u32 *val_ptr;
+		const struct pm8941_wled_var_cfg *cfg;
+	} u32_opts[] = {
+		{
+			"qcom,current-boost-limit",
+			&cfg->i_boost_limit,
+			.cfg = &pm8941_wled_i_boost_limit_cfg,
+		},
+		{
+			"qcom,current-limit",
+			&cfg->i_limit,
+			.cfg = &pm8941_wled_i_limit_cfg,
+		},
+		{
+			"qcom,ovp",
+			&cfg->ovp,
+			.cfg = &pm8941_wled_ovp_cfg,
+		},
+		{
+			"qcom,switching-freq",
+			&cfg->switch_freq,
+			.cfg = &pm8941_wled_switch_freq_cfg,
+		},
+		{
+			"qcom,num-strings",
+			&cfg->num_strings,
+			.cfg = &pm8941_wled_num_strings_cfg,
+		},
+	};
+	const struct {
+		const char *name;
+		bool *val_ptr;
+	} bool_opts[] = {
+		{ "qcom,cs-out", &cfg->cs_out_en, },
+		{ "qcom,ext-gen", &cfg->ext_gen, },
+		{ "qcom,cabc", &cfg->cabc_en, },
+	};
+
+	rc = of_property_read_u32(dev->of_node, "reg", &val);
+	if (rc || val > 0xffff) {
+		dev_err(dev, "invalid IO resources\n");
+		return rc ? rc : -EINVAL;
+	}
+	wled->addr = val;
+
+	rc = of_property_read_string(dev->of_node, "label", &wled->name);
+	if (rc)
+		wled->name = dev->of_node->name;
+
+	*cfg = pm8941_wled_config_defaults;
+	for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
+		rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
+		if (rc == -EINVAL) {
+			continue;
+		} else if (rc) {
+			dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
+			return rc;
+		}
+
+		c = UINT_MAX;
+		for (j = 0; c != val; j++) {
+			c = pm8941_wled_values(u32_opts[i].cfg, j);
+			if (c == UINT_MAX) {
+				dev_err(dev, "invalid value for '%s'\n",
+					u32_opts[i].name);
+				return -EINVAL;
+			}
+		}
+
+		dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
+		*u32_opts[i].val_ptr = j;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
+		if (of_property_read_bool(dev->of_node, bool_opts[i].name))
+			*bool_opts[i].val_ptr = true;
+	}
+
+	cfg->num_strings = cfg->num_strings + 1;
+
+	return 0;
+}
+
+static const struct backlight_ops pm8941_wled_ops = {
+	.update_status = pm8941_wled_update_status,
+};
+
+static int pm8941_wled_probe(struct platform_device *pdev)
+{
+	struct backlight_properties props;
+	struct backlight_device *bl;
+	struct pm8941_wled *wled;
+	struct regmap *regmap;
+	int rc;
+
+	regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!regmap) {
+		dev_err(&pdev->dev, "Unable to get regmap\n");
+		return -EINVAL;
+	}
+
+	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+	if (!wled)
+		return -ENOMEM;
+
+	wled->regmap = regmap;
+
+	rc = pm8941_wled_configure(wled, &pdev->dev);
+	if (rc)
+		return rc;
+
+	rc = pm8941_wled_setup(wled);
+	if (rc)
+		return rc;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = PM8941_WLED_REG_VAL_MAX;
+	bl = devm_backlight_device_register(&pdev->dev, wled->name,
+					    &pdev->dev, wled,
+					    &pm8941_wled_ops, &props);
+	if (IS_ERR(bl))
+		return PTR_ERR(bl);
+
+	return 0;
+};
+
+static const struct of_device_id pm8941_wled_match_table[] = {
+	{ .compatible = "qcom,pm8941-wled" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
+
+static struct platform_driver pm8941_wled_driver = {
+	.probe = pm8941_wled_probe,
+	.driver	= {
+		.name = "pm8941-wled",
+		.of_match_table	= pm8941_wled_match_table,
+	},
+};
+
+module_platform_driver(pm8941_wled_driver);
+
+MODULE_DESCRIPTION("pm8941 wled driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 052fa1b..d414c7a 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -65,7 +65,7 @@
 
 	if (brightness > 0) {
 		ret = regmap_write(regmap, SKY81452_REG0, brightness - 1);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			return ret;
 
 		return regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
@@ -87,12 +87,12 @@
 	int ret;
 
 	ret = kstrtoul(buf, 16, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	ret = regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
 					value << CTZ(SKY81452_EN));
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	return count;
@@ -108,7 +108,7 @@
 
 	reg = !strcmp(attr->attr.name, "open") ? SKY81452_REG5 : SKY81452_REG4;
 	ret = regmap_read(regmap, reg, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	if (value & SKY81452_SHRT) {
@@ -136,7 +136,7 @@
 	int ret;
 
 	ret = regmap_read(regmap, SKY81452_REG4, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	*buf = 0;
@@ -196,7 +196,7 @@
 	pdata->gpio_enable = of_get_gpio(np, 0);
 
 	ret = of_property_count_u32_elems(np, "led-sources");
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pdata->enable = SKY81452_EN >> CTZ(SKY81452_EN);
 	} else {
 		num_entry = ret;
@@ -205,7 +205,7 @@
 
 		ret = of_property_read_u32_array(np, "led-sources", sources,
 					num_entry);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(dev, "led-sources node is invalid.\n");
 			return ERR_PTR(-EINVAL);
 		}
@@ -218,12 +218,12 @@
 	ret = of_property_read_u32(np,
 			"skyworks,short-detection-threshold-volt",
 			&pdata->short_detection_threshold);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pdata->short_detection_threshold = 7;
 
 	ret = of_property_read_u32(np, "skyworks,current-limit-mA",
 			&pdata->boost_current_limit);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pdata->boost_current_limit = 2750;
 
 	of_node_put(np);
@@ -278,14 +278,14 @@
 	if (gpio_is_valid(pdata->gpio_enable)) {
 		ret = devm_gpio_request_one(dev, pdata->gpio_enable,
 					GPIOF_OUT_INIT_HIGH, "sky81452-en");
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(dev, "failed to request GPIO. err=%d\n", ret);
 			return ret;
 		}
 	}
 
 	ret = sky81452_bl_init_device(regmap, pdata);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "failed to initialize. err=%d\n", ret);
 		return ret;
 	}
@@ -302,8 +302,8 @@
 
 	platform_set_drvdata(pdev, bd);
 
-	ret  = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
-	if (IS_ERR_VALUE(ret)) {
+	ret = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
+	if (ret < 0) {
 		dev_err(dev, "failed to create attribute. err=%d\n", ret);
 		return ret;
 	}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 3ad6765..83742d8 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -158,6 +158,7 @@
 	{ "tosa-bl", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, tosa_bl_id);
 
 static struct i2c_driver tosa_bl_driver = {
 	.driver = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index ba97efc..0712806 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -9,7 +9,7 @@
 	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && \
 		!SUPERH && !BLACKFIN && !AVR32 && !MN10300 && !CRIS && \
 		(!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER) && \
-		!ARM64
+		!ARM64 && !ARC
 	default y
 	help
 	  Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 241fafd..55c4b5b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -797,7 +797,8 @@
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
 	select WATCHDOG_CORE
-	select LPC_ICH
+	select LPC_ICH if !EXPERT
+	select I2C_I801 if !EXPERT
 	---help---
 	  Hardware driver for the intel TCO timer based watchdog devices.
 	  These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 3c3fd41..0acc6c5 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,8 +66,7 @@
 #include <linux/spinlock.h>		/* For spin_lock/spin_unlock/... */
 #include <linux/uaccess.h>		/* For copy_to_user/put_user/... */
 #include <linux/io.h>			/* For inb/outb/... */
-#include <linux/mfd/core.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #include "iTCO_vendor.h"
 
@@ -146,59 +145,67 @@
 	return iTCO_wdt_private.iTCO_version == 3 ? ticks : (ticks * 6) / 10;
 }
 
+static inline u32 no_reboot_bit(void)
+{
+	u32 enable_bit;
+
+	switch (iTCO_wdt_private.iTCO_version) {
+	case 3:
+		enable_bit = 0x00000010;
+		break;
+	case 2:
+		enable_bit = 0x00000020;
+		break;
+	case 4:
+	case 1:
+	default:
+		enable_bit = 0x00000002;
+		break;
+	}
+
+	return enable_bit;
+}
+
 static void iTCO_wdt_set_NO_REBOOT_bit(void)
 {
 	u32 val32;
 
 	/* Set the NO_REBOOT bit: this disables reboots */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	if (iTCO_wdt_private.iTCO_version >= 2) {
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 |= 0x00000010;
-		writel(val32, iTCO_wdt_private.gcs_pmc);
-	} else if (iTCO_wdt_private.iTCO_version == 2) {
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 |= 0x00000020;
+		val32 |= no_reboot_bit();
 		writel(val32, iTCO_wdt_private.gcs_pmc);
 	} else if (iTCO_wdt_private.iTCO_version == 1) {
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		val32 |= 0x00000002;
+		val32 |= no_reboot_bit();
 		pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
 	}
 }
 
 static int iTCO_wdt_unset_NO_REBOOT_bit(void)
 {
-	int ret = 0;
-	u32 val32;
+	u32 enable_bit = no_reboot_bit();
+	u32 val32 = 0;
 
 	/* Unset the NO_REBOOT bit: this enables reboots */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	if (iTCO_wdt_private.iTCO_version >= 2) {
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 &= 0xffffffef;
+		val32 &= ~enable_bit;
 		writel(val32, iTCO_wdt_private.gcs_pmc);
 
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		if (val32 & 0x00000010)
-			ret = -EIO;
-	} else if (iTCO_wdt_private.iTCO_version == 2) {
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 &= 0xffffffdf;
-		writel(val32, iTCO_wdt_private.gcs_pmc);
-
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		if (val32 & 0x00000020)
-			ret = -EIO;
 	} else if (iTCO_wdt_private.iTCO_version == 1) {
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		val32 &= 0xfffffffd;
+		val32 &= ~enable_bit;
 		pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
 
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		if (val32 & 0x00000002)
-			ret = -EIO;
 	}
 
-	return ret; /* returns: 0 = OK, -EIO = Error */
+	if (val32 & enable_bit)
+		return -EIO;
+
+	return 0;
 }
 
 static int iTCO_wdt_start(struct watchdog_device *wd_dev)
@@ -418,9 +425,9 @@
 {
 	int ret = -ENODEV;
 	unsigned long val32;
-	struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev);
+	struct itco_wdt_platform_data *pdata = dev_get_platdata(&dev->dev);
 
-	if (!ich_info)
+	if (!pdata)
 		goto out;
 
 	spin_lock_init(&iTCO_wdt_private.io_lock);
@@ -435,7 +442,7 @@
 	if (!iTCO_wdt_private.smi_res)
 		goto out;
 
-	iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+	iTCO_wdt_private.iTCO_version = pdata->version;
 	iTCO_wdt_private.dev = dev;
 	iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
 
@@ -501,15 +508,24 @@
 	}
 
 	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
-		ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
+		pdata->name, pdata->version, (u64)TCOBASE);
 
 	/* Clear out the (probably old) status */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	switch (iTCO_wdt_private.iTCO_version) {
+	case 4:
+		outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
+		outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
+		break;
+	case 3:
 		outl(0x20008, TCO1_STS);
-	} else {
+		break;
+	case 2:
+	case 1:
+	default:
 		outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
 		outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
 		outw(0x0004, TCO2_STS);	/* Clear BOOT_STS bit */
+		break;
 	}
 
 	iTCO_wdt_watchdog_dev.bootstatus = 0;
diff --git a/fs/Kconfig b/fs/Kconfig
index 011f433..da3f32f 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -11,18 +11,15 @@
 if BLOCK
 
 source "fs/ext2/Kconfig"
-source "fs/ext3/Kconfig"
 source "fs/ext4/Kconfig"
-source "fs/jbd/Kconfig"
 source "fs/jbd2/Kconfig"
 
 config FS_MBCACHE
 # Meta block cache for Extended Attributes (ext2/ext3/ext4)
 	tristate
 	default y if EXT2_FS=y && EXT2_FS_XATTR
-	default y if EXT3_FS=y && EXT3_FS_XATTR
 	default y if EXT4_FS=y
-	default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS
+	default m if EXT2_FS_XATTR || EXT4_FS
 
 source "fs/reiserfs/Kconfig"
 source "fs/jfs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index cb20e4b..f79cf40 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_SIGNALFD)		+= signalfd.o
 obj-$(CONFIG_TIMERFD)		+= timerfd.o
 obj-$(CONFIG_EVENTFD)		+= eventfd.o
+obj-$(CONFIG_USERFAULTFD)	+= userfaultfd.o
 obj-$(CONFIG_AIO)               += aio.o
 obj-$(CONFIG_FS_DAX)		+= dax.o
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
@@ -62,12 +63,10 @@
 # Do not add any filesystems before this line
 obj-$(CONFIG_FSCACHE)		+= fscache/
 obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
-obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
 obj-$(CONFIG_EXT2_FS)		+= ext2/
 # We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
 # unless explicitly requested by rootfstype
 obj-$(CONFIG_EXT4_FS)		+= ext4/
-obj-$(CONFIG_JBD)		+= jbd/
 obj-$(CONFIG_JBD2)		+= jbd2/
 obj-$(CONFIG_CRAMFS)		+= cramfs/
 obj-$(CONFIG_SQUASHFS)		+= squashfs/
diff --git a/fs/aio.c b/fs/aio.c
index 480440f..155f842 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -308,15 +308,9 @@
 	}
 }
 
-static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+static int aio_ring_mremap(struct vm_area_struct *vma)
 {
-	vma->vm_flags |= VM_DONTEXPAND;
-	vma->vm_ops = &generic_file_vm_ops;
-	return 0;
-}
-
-static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
-{
+	struct file *file = vma->vm_file;
 	struct mm_struct *mm = vma->vm_mm;
 	struct kioctx_table *table;
 	int i, res = -EINVAL;
@@ -342,9 +336,24 @@
 	return res;
 }
 
+static const struct vm_operations_struct aio_ring_vm_ops = {
+	.mremap		= aio_ring_mremap,
+#if IS_ENABLED(CONFIG_MMU)
+	.fault		= filemap_fault,
+	.map_pages	= filemap_map_pages,
+	.page_mkwrite	= filemap_page_mkwrite,
+#endif
+};
+
+static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	vma->vm_flags |= VM_DONTEXPAND;
+	vma->vm_ops = &aio_ring_vm_ops;
+	return 0;
+}
+
 static const struct file_operations aio_ring_fops = {
 	.mmap = aio_ring_mmap,
-	.mremap = aio_ring_remap,
 };
 
 #if IS_ENABLED(CONFIG_MIGRATION)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 802fabb..ecbc63d 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -206,10 +206,33 @@
 		return -ENOMEM;
 
 	ref->root_id = root_id;
-	if (key)
+	if (key) {
 		ref->key_for_search = *key;
-	else
+		/*
+		 * We can often find data backrefs with an offset that is too
+		 * large (>= LLONG_MAX, maximum allowed file offset) due to
+		 * underflows when subtracting a file's offset with the data
+		 * offset of its corresponding extent data item. This can
+		 * happen for example in the clone ioctl.
+		 * So if we detect such case we set the search key's offset to
+		 * zero to make sure we will find the matching file extent item
+		 * at add_all_parents(), otherwise we will miss it because the
+		 * offset taken form the backref is much larger then the offset
+		 * of the file extent item. This can make us scan a very large
+		 * number of file extent items, but at least it will not make
+		 * us miss any.
+		 * This is an ugly workaround for a behaviour that should have
+		 * never existed, but it does and a fix for the clone ioctl
+		 * would touch a lot of places, cause backwards incompatibility
+		 * and would not fix the problem for extents cloned with older
+		 * kernels.
+		 */
+		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
+		    ref->key_for_search.offset >= LLONG_MAX)
+			ref->key_for_search.offset = 0;
+	} else {
 		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
+	}
 
 	ref->inode_list = NULL;
 	ref->level = level;
@@ -632,7 +655,7 @@
 			struct btrfs_delayed_tree_ref *ref;
 
 			ref = btrfs_delayed_node_to_tree_ref(node);
-			ret = __add_prelim_ref(prefs, ref->root, NULL,
+			ret = __add_prelim_ref(prefs, 0, NULL,
 					       ref->level + 1, ref->parent,
 					       node->bytenr,
 					       node->ref_mod * sgn, GFP_ATOMIC);
@@ -664,11 +687,7 @@
 			struct btrfs_delayed_data_ref *ref;
 
 			ref = btrfs_delayed_node_to_data_ref(node);
-
-			key.objectid = ref->objectid;
-			key.type = BTRFS_EXTENT_DATA_KEY;
-			key.offset = ref->offset;
-			ret = __add_prelim_ref(prefs, ref->root, &key, 0,
+			ret = __add_prelim_ref(prefs, 0, NULL, 0,
 					       ref->parent, node->bytenr,
 					       node->ref_mod * sgn, GFP_ATOMIC);
 			break;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 54114b4..5f745ea 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1159,8 +1159,10 @@
 
 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
-		if (ret)
+		if (ret) {
+			btrfs_abort_transaction(trans, root, ret);
 			return ret;
+		}
 	}
 
 	if (buf == root->node) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index aac314e..938efe3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1300,7 +1300,7 @@
 	/* for raid56, this is a full stripe, without parity */
 	unsigned long full_stripe_len;
 
-	unsigned int ro:1;
+	unsigned int ro;
 	unsigned int iref:1;
 	unsigned int has_caching_ctl:1;
 	unsigned int removed:1;
@@ -1518,12 +1518,6 @@
 	 */
 	struct mutex ordered_operations_mutex;
 
-	/*
-	 * Same as ordered_operations_mutex except this is for ordered extents
-	 * and not the operations.
-	 */
-	struct mutex ordered_extent_flush_mutex;
-
 	struct rw_semaphore commit_root_sem;
 
 	struct rw_semaphore cleanup_work_sem;
@@ -3437,6 +3431,8 @@
 			     struct btrfs_root *root, u64 group_start,
 			     struct extent_map *em);
 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
+void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
+void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 				       struct btrfs_root *root);
 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
@@ -3495,9 +3491,9 @@
 void btrfs_block_rsv_release(struct btrfs_root *root,
 			     struct btrfs_block_rsv *block_rsv,
 			     u64 num_bytes);
-int btrfs_set_block_group_ro(struct btrfs_root *root,
+int btrfs_inc_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache);
-void btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_dec_block_group_ro(struct btrfs_root *root,
 			      struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
@@ -4073,6 +4069,7 @@
 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
 		     unsigned int line, int errno, const char *fmt, ...);
 
+const char *btrfs_decode_error(int errno);
 
 __cold
 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
@@ -4185,8 +4182,7 @@
 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root, struct extent_buffer *buf,
 			  struct extent_buffer *cow);
-void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
-			      struct btrfs_pending_snapshot *pending,
+void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
 			      u64 *bytes_to_reserve);
 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
 			      struct btrfs_pending_snapshot *pending);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5e307bd..9ebd34f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1730,6 +1730,7 @@
 	bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
 	bdi->congested_fn	= btrfs_congested_fn;
 	bdi->congested_data	= info;
+	bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
 	return 0;
 }
 
@@ -2613,7 +2614,6 @@
 
 
 	mutex_init(&fs_info->ordered_operations_mutex);
-	mutex_init(&fs_info->ordered_extent_flush_mutex);
 	mutex_init(&fs_info->tree_log_mutex);
 	mutex_init(&fs_info->chunk_mutex);
 	mutex_init(&fs_info->transaction_kthread_mutex);
@@ -2955,8 +2955,9 @@
 	if (fs_info->fs_devices->missing_devices >
 	     fs_info->num_tolerated_disk_barrier_failures &&
 	    !(sb->s_flags & MS_RDONLY)) {
-		printk(KERN_WARNING "BTRFS: "
-			"too many missing devices, writeable mount is not allowed\n");
+		pr_warn("BTRFS: missing devices(%llu) exceeds the limit(%d), writeable mount is not allowed\n",
+			fs_info->fs_devices->missing_devices,
+			fs_info->num_tolerated_disk_barrier_failures);
 		goto fail_sysfs;
 	}
 
@@ -3763,6 +3764,15 @@
 	cancel_work_sync(&fs_info->async_reclaim_work);
 
 	if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+		/*
+		 * If the cleaner thread is stopped and there are
+		 * block groups queued for removal, the deletion will be
+		 * skipped when we quit the cleaner thread.
+		 */
+		mutex_lock(&root->fs_info->cleaner_mutex);
+		btrfs_delete_unused_bgs(root->fs_info);
+		mutex_unlock(&root->fs_info->cleaner_mutex);
+
 		ret = btrfs_commit_super(root);
 		if (ret)
 			btrfs_err(fs_info, "commit super ret %d", ret);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 07204bf..5411f0a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1316,8 +1316,7 @@
 	return ret;
 }
 
-static noinline u32 extent_data_ref_count(struct btrfs_root *root,
-					  struct btrfs_path *path,
+static noinline u32 extent_data_ref_count(struct btrfs_path *path,
 					  struct btrfs_extent_inline_ref *iref)
 {
 	struct btrfs_key key;
@@ -1883,10 +1882,77 @@
 	return ret;
 }
 
-static int btrfs_issue_discard(struct block_device *bdev,
-				u64 start, u64 len)
+#define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
+static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
+			       u64 *discarded_bytes)
 {
-	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
+	int j, ret = 0;
+	u64 bytes_left, end;
+	u64 aligned_start = ALIGN(start, 1 << 9);
+
+	if (WARN_ON(start != aligned_start)) {
+		len -= aligned_start - start;
+		len = round_down(len, 1 << 9);
+		start = aligned_start;
+	}
+
+	*discarded_bytes = 0;
+
+	if (!len)
+		return 0;
+
+	end = start + len;
+	bytes_left = len;
+
+	/* Skip any superblocks on this device. */
+	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
+		u64 sb_start = btrfs_sb_offset(j);
+		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
+		u64 size = sb_start - start;
+
+		if (!in_range(sb_start, start, bytes_left) &&
+		    !in_range(sb_end, start, bytes_left) &&
+		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
+			continue;
+
+		/*
+		 * Superblock spans beginning of range.  Adjust start and
+		 * try again.
+		 */
+		if (sb_start <= start) {
+			start += sb_end - start;
+			if (start > end) {
+				bytes_left = 0;
+				break;
+			}
+			bytes_left = end - start;
+			continue;
+		}
+
+		if (size) {
+			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
+						   GFP_NOFS, 0);
+			if (!ret)
+				*discarded_bytes += size;
+			else if (ret != -EOPNOTSUPP)
+				return ret;
+		}
+
+		start = sb_end;
+		if (start > end) {
+			bytes_left = 0;
+			break;
+		}
+		bytes_left = end - start;
+	}
+
+	if (bytes_left) {
+		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
+					   GFP_NOFS, 0);
+		if (!ret)
+			*discarded_bytes += bytes_left;
+	}
+	return ret;
 }
 
 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
@@ -1907,14 +1973,16 @@
 
 
 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
+			u64 bytes;
 			if (!stripe->dev->can_discard)
 				continue;
 
 			ret = btrfs_issue_discard(stripe->dev->bdev,
 						  stripe->physical,
-						  stripe->length);
+						  stripe->length,
+						  &bytes);
 			if (!ret)
-				discarded_bytes += stripe->length;
+				discarded_bytes += bytes;
 			else if (ret != -EOPNOTSUPP)
 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
 
@@ -6062,20 +6130,19 @@
 			       struct btrfs_root *root)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_block_group_cache *block_group, *tmp;
+	struct list_head *deleted_bgs;
 	struct extent_io_tree *unpin;
 	u64 start;
 	u64 end;
 	int ret;
 
-	if (trans->aborted)
-		return 0;
-
 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
 		unpin = &fs_info->freed_extents[1];
 	else
 		unpin = &fs_info->freed_extents[0];
 
-	while (1) {
+	while (!trans->aborted) {
 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
 		ret = find_first_extent_bit(unpin, 0, &start, &end,
 					    EXTENT_DIRTY, NULL);
@@ -6094,6 +6161,34 @@
 		cond_resched();
 	}
 
+	/*
+	 * Transaction is finished.  We don't need the lock anymore.  We
+	 * do need to clean up the block groups in case of a transaction
+	 * abort.
+	 */
+	deleted_bgs = &trans->transaction->deleted_bgs;
+	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
+		u64 trimmed = 0;
+
+		ret = -EROFS;
+		if (!trans->aborted)
+			ret = btrfs_discard_extent(root,
+						   block_group->key.objectid,
+						   block_group->key.offset,
+						   &trimmed);
+
+		list_del_init(&block_group->bg_list);
+		btrfs_put_block_group_trimming(block_group);
+		btrfs_put_block_group(block_group);
+
+		if (ret) {
+			const char *errstr = btrfs_decode_error(ret);
+			btrfs_warn(fs_info,
+				   "Discard failed while removing blockgroup: errno=%d %s\n",
+				   ret, errstr);
+		}
+	}
+
 	return 0;
 }
 
@@ -6349,7 +6444,7 @@
 	} else {
 		if (found_extent) {
 			BUG_ON(is_data && refs_to_drop !=
-			       extent_data_ref_count(root, path, iref));
+			       extent_data_ref_count(path, iref));
 			if (iref) {
 				BUG_ON(path->slots[0] != extent_slot);
 			} else {
@@ -7567,9 +7662,6 @@
 
 /*
  * finds a free extent and does all the dirty work required for allocation
- * returns the key for the extent through ins, and a tree buffer for
- * the first block of the extent through buf.
- *
  * returns the tree buffer or an ERR_PTR on error.
  */
 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
@@ -8723,14 +8815,13 @@
 	return flags;
 }
 
-static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
+static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
 	u64 num_bytes;
 	u64 min_allocable_bytes;
 	int ret = -ENOSPC;
 
-
 	/*
 	 * We need some metadata space and system metadata space for
 	 * allocating chunks in some corner cases until we force to set
@@ -8747,6 +8838,7 @@
 	spin_lock(&cache->lock);
 
 	if (cache->ro) {
+		cache->ro++;
 		ret = 0;
 		goto out;
 	}
@@ -8758,7 +8850,7 @@
 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
 	    min_allocable_bytes <= sinfo->total_bytes) {
 		sinfo->bytes_readonly += num_bytes;
-		cache->ro = 1;
+		cache->ro++;
 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
 		ret = 0;
 	}
@@ -8768,7 +8860,7 @@
 	return ret;
 }
 
-int btrfs_set_block_group_ro(struct btrfs_root *root,
+int btrfs_inc_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache)
 
 {
@@ -8776,8 +8868,6 @@
 	u64 alloc_flags;
 	int ret;
 
-	BUG_ON(cache->ro);
-
 again:
 	trans = btrfs_join_transaction(root);
 	if (IS_ERR(trans))
@@ -8820,7 +8910,7 @@
 			goto out;
 	}
 
-	ret = set_block_group_ro(cache, 0);
+	ret = inc_block_group_ro(cache, 0);
 	if (!ret)
 		goto out;
 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
@@ -8828,7 +8918,7 @@
 			     CHUNK_ALLOC_FORCE);
 	if (ret < 0)
 		goto out;
-	ret = set_block_group_ro(cache, 0);
+	ret = inc_block_group_ro(cache, 0);
 out:
 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
 		alloc_flags = update_block_group_flags(root, cache->flags);
@@ -8891,7 +8981,7 @@
 	return free_bytes;
 }
 
-void btrfs_set_block_group_rw(struct btrfs_root *root,
+void btrfs_dec_block_group_ro(struct btrfs_root *root,
 			      struct btrfs_block_group_cache *cache)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
@@ -8901,11 +8991,13 @@
 
 	spin_lock(&sinfo->lock);
 	spin_lock(&cache->lock);
-	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
-		    cache->bytes_super - btrfs_block_group_used(&cache->item);
-	sinfo->bytes_readonly -= num_bytes;
-	cache->ro = 0;
-	list_del_init(&cache->ro_list);
+	if (!--cache->ro) {
+		num_bytes = cache->key.offset - cache->reserved -
+			    cache->pinned - cache->bytes_super -
+			    btrfs_block_group_used(&cache->item);
+		sinfo->bytes_readonly -= num_bytes;
+		list_del_init(&cache->ro_list);
+	}
 	spin_unlock(&cache->lock);
 	spin_unlock(&sinfo->lock);
 }
@@ -9421,7 +9513,7 @@
 
 		set_avail_alloc_bits(root->fs_info, cache->flags);
 		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
-			set_block_group_ro(cache, 1);
+			inc_block_group_ro(cache, 1);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
 			spin_lock(&info->unused_bgs_lock);
 			/* Should always be true but just in case. */
@@ -9449,11 +9541,11 @@
 		list_for_each_entry(cache,
 				&space_info->block_groups[BTRFS_RAID_RAID0],
 				list)
-			set_block_group_ro(cache, 1);
+			inc_block_group_ro(cache, 1);
 		list_for_each_entry(cache,
 				&space_info->block_groups[BTRFS_RAID_SINGLE],
 				list)
-			set_block_group_ro(cache, 1);
+			inc_block_group_ro(cache, 1);
 	}
 
 	init_global_block_rsv(info);
@@ -9834,6 +9926,11 @@
 	 * currently running transaction might finish and a new one start,
 	 * allowing for new block groups to be created that can reuse the same
 	 * physical device locations unless we take this special care.
+	 *
+	 * There may also be an implicit trim operation if the file system
+	 * is mounted with -odiscard. The same protections must remain
+	 * in place until the extents have been discarded completely when
+	 * the transaction commit has completed.
 	 */
 	remove_em = (atomic_read(&block_group->trimming) == 0);
 	/*
@@ -9908,6 +10005,7 @@
 	spin_lock(&fs_info->unused_bgs_lock);
 	while (!list_empty(&fs_info->unused_bgs)) {
 		u64 start, end;
+		int trimming;
 
 		block_group = list_first_entry(&fs_info->unused_bgs,
 					       struct btrfs_block_group_cache,
@@ -9941,7 +10039,7 @@
 		spin_unlock(&block_group->lock);
 
 		/* We don't want to force the issue, only flip if it's ok. */
-		ret = set_block_group_ro(block_group, 0);
+		ret = inc_block_group_ro(block_group, 0);
 		up_write(&space_info->groups_sem);
 		if (ret < 0) {
 			ret = 0;
@@ -9955,7 +10053,7 @@
 		/* 1 for btrfs_orphan_reserve_metadata() */
 		trans = btrfs_start_transaction(root, 1);
 		if (IS_ERR(trans)) {
-			btrfs_set_block_group_rw(root, block_group);
+			btrfs_dec_block_group_ro(root, block_group);
 			ret = PTR_ERR(trans);
 			goto next;
 		}
@@ -9982,14 +10080,14 @@
 				  EXTENT_DIRTY, GFP_NOFS);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-			btrfs_set_block_group_rw(root, block_group);
+			btrfs_dec_block_group_ro(root, block_group);
 			goto end_trans;
 		}
 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
 				  EXTENT_DIRTY, GFP_NOFS);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-			btrfs_set_block_group_rw(root, block_group);
+			btrfs_dec_block_group_ro(root, block_group);
 			goto end_trans;
 		}
 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
@@ -10007,12 +10105,39 @@
 		spin_unlock(&block_group->lock);
 		spin_unlock(&space_info->lock);
 
+		/* DISCARD can flip during remount */
+		trimming = btrfs_test_opt(root, DISCARD);
+
+		/* Implicit trim during transaction commit. */
+		if (trimming)
+			btrfs_get_block_group_trimming(block_group);
+
 		/*
 		 * Btrfs_remove_chunk will abort the transaction if things go
 		 * horribly wrong.
 		 */
 		ret = btrfs_remove_chunk(trans, root,
 					 block_group->key.objectid);
+
+		if (ret) {
+			if (trimming)
+				btrfs_put_block_group_trimming(block_group);
+			goto end_trans;
+		}
+
+		/*
+		 * If we're not mounted with -odiscard, we can just forget
+		 * about this block group. Otherwise we'll need to wait
+		 * until transaction commit to do the actual discard.
+		 */
+		if (trimming) {
+			WARN_ON(!list_empty(&block_group->bg_list));
+			spin_lock(&trans->transaction->deleted_bgs_lock);
+			list_move(&block_group->bg_list,
+				  &trans->transaction->deleted_bgs);
+			spin_unlock(&trans->transaction->deleted_bgs_lock);
+			btrfs_get_block_group(block_group);
+		}
 end_trans:
 		btrfs_end_transaction(trans, root);
 next:
@@ -10066,10 +10191,99 @@
 	return unpin_extent_range(root, start, end, false);
 }
 
+/*
+ * It used to be that old block groups would be left around forever.
+ * Iterating over them would be enough to trim unused space.  Since we
+ * now automatically remove them, we also need to iterate over unallocated
+ * space.
+ *
+ * We don't want a transaction for this since the discard may take a
+ * substantial amount of time.  We don't require that a transaction be
+ * running, but we do need to take a running transaction into account
+ * to ensure that we're not discarding chunks that were released in
+ * the current transaction.
+ *
+ * Holding the chunks lock will prevent other threads from allocating
+ * or releasing chunks, but it won't prevent a running transaction
+ * from committing and releasing the memory that the pending chunks
+ * list head uses.  For that, we need to take a reference to the
+ * transaction.
+ */
+static int btrfs_trim_free_extents(struct btrfs_device *device,
+				   u64 minlen, u64 *trimmed)
+{
+	u64 start = 0, len = 0;
+	int ret;
+
+	*trimmed = 0;
+
+	/* Not writeable = nothing to do. */
+	if (!device->writeable)
+		return 0;
+
+	/* No free space = nothing to do. */
+	if (device->total_bytes <= device->bytes_used)
+		return 0;
+
+	ret = 0;
+
+	while (1) {
+		struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
+		struct btrfs_transaction *trans;
+		u64 bytes;
+
+		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
+		if (ret)
+			return ret;
+
+		down_read(&fs_info->commit_root_sem);
+
+		spin_lock(&fs_info->trans_lock);
+		trans = fs_info->running_transaction;
+		if (trans)
+			atomic_inc(&trans->use_count);
+		spin_unlock(&fs_info->trans_lock);
+
+		ret = find_free_dev_extent_start(trans, device, minlen, start,
+						 &start, &len);
+		if (trans)
+			btrfs_put_transaction(trans);
+
+		if (ret) {
+			up_read(&fs_info->commit_root_sem);
+			mutex_unlock(&fs_info->chunk_mutex);
+			if (ret == -ENOSPC)
+				ret = 0;
+			break;
+		}
+
+		ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
+		up_read(&fs_info->commit_root_sem);
+		mutex_unlock(&fs_info->chunk_mutex);
+
+		if (ret)
+			break;
+
+		start += len;
+		*trimmed += bytes;
+
+		if (fatal_signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched();
+	}
+
+	return ret;
+}
+
 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_group_cache *cache = NULL;
+	struct btrfs_device *device;
+	struct list_head *devices;
 	u64 group_trimmed;
 	u64 start;
 	u64 end;
@@ -10124,6 +10338,18 @@
 		cache = next_block_group(fs_info->tree_root, cache);
 	}
 
+	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	devices = &root->fs_info->fs_devices->alloc_list;
+	list_for_each_entry(device, devices, dev_alloc_list) {
+		ret = btrfs_trim_free_extents(device, range->minlen,
+					      &group_trimmed);
+		if (ret)
+			break;
+
+		trimmed += group_trimmed;
+	}
+	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
 	range->len = trimmed;
 	return ret;
 }
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 68b12bb..f1018cf 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2723,6 +2723,12 @@
 		btrfs_bio->csum = NULL;
 		btrfs_bio->csum_allocated = NULL;
 		btrfs_bio->end_io = NULL;
+
+#ifdef CONFIG_BLK_CGROUP
+		/* FIXME, put this into bio_clone_bioset */
+		if (bio->bi_css)
+			bio_associate_blkcg(new, bio->bi_css);
+#endif
 	}
 	return new;
 }
@@ -2783,6 +2789,7 @@
 }
 
 static int submit_extent_page(int rw, struct extent_io_tree *tree,
+			      struct writeback_control *wbc,
 			      struct page *page, sector_t sector,
 			      size_t size, unsigned long offset,
 			      struct block_device *bdev,
@@ -2817,6 +2824,8 @@
 			}
 			bio = NULL;
 		} else {
+			if (wbc)
+				wbc_account_io(wbc, page, page_size);
 			return 0;
 		}
 	}
@@ -2829,6 +2838,10 @@
 	bio_add_page(bio, page, page_size, offset);
 	bio->bi_end_io = end_io_func;
 	bio->bi_private = tree;
+	if (wbc) {
+		wbc_init_bio(wbc, bio);
+		wbc_account_io(wbc, page, page_size);
+	}
 
 	if (bio_ret)
 		*bio_ret = bio;
@@ -3039,7 +3052,7 @@
 		}
 
 		pnr -= page->index;
-		ret = submit_extent_page(rw, tree, page,
+		ret = submit_extent_page(rw, tree, NULL, page,
 					 sector, disk_io_size, pg_offset,
 					 bdev, bio, pnr,
 					 end_bio_extent_readpage, mirror_num,
@@ -3434,7 +3447,7 @@
 				       page->index, cur, end);
 			}
 
-			ret = submit_extent_page(write_flags, tree, page,
+			ret = submit_extent_page(write_flags, tree, wbc, page,
 						 sector, iosize, pg_offset,
 						 bdev, &epd->bio, max_nr,
 						 end_bio_extent_writepage,
@@ -3738,7 +3751,7 @@
 
 		clear_page_dirty_for_io(p);
 		set_page_writeback(p);
-		ret = submit_extent_page(rw, tree, p, offset >> 9,
+		ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
 					 -1, end_bio_extent_buffer_writepage,
 					 0, epd->bio_flags, bio_flags);
@@ -4603,9 +4616,7 @@
 {
 	struct extent_buffer *eb = NULL;
 
-	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS);
-	if (eb == NULL)
-		return NULL;
+	eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
 	eb->start = start;
 	eb->len = len;
 	eb->fs_info = fs_info;
@@ -4863,7 +4874,7 @@
 		return NULL;
 
 	for (i = 0; i < num_pages; i++, index++) {
-		p = find_or_create_page(mapping, index, GFP_NOFS);
+		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
 		if (!p)
 			goto free_eb;
 
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index fb5a6b1..abe3a66 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -3272,35 +3272,23 @@
 	return ret;
 }
 
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
-			   u64 *trimmed, u64 start, u64 end, u64 minlen)
+void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
 {
-	int ret;
+	atomic_inc(&cache->trimming);
+}
 
-	*trimmed = 0;
+void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
+{
+	struct extent_map_tree *em_tree;
+	struct extent_map *em;
+	bool cleanup;
 
 	spin_lock(&block_group->lock);
-	if (block_group->removed) {
-		spin_unlock(&block_group->lock);
-		return 0;
-	}
-	atomic_inc(&block_group->trimming);
+	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
+		   block_group->removed);
 	spin_unlock(&block_group->lock);
 
-	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
-	if (ret)
-		goto out;
-
-	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
-out:
-	spin_lock(&block_group->lock);
-	if (atomic_dec_and_test(&block_group->trimming) &&
-	    block_group->removed) {
-		struct extent_map_tree *em_tree;
-		struct extent_map *em;
-
-		spin_unlock(&block_group->lock);
-
+	if (cleanup) {
 		lock_chunks(block_group->fs_info->chunk_root);
 		em_tree = &block_group->fs_info->mapping_tree.map_tree;
 		write_lock(&em_tree->lock);
@@ -3324,10 +3312,31 @@
 		 * this block group have left 1 entry each one. Free them.
 		 */
 		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
-	} else {
-		spin_unlock(&block_group->lock);
 	}
+}
 
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+			   u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+	int ret;
+
+	*trimmed = 0;
+
+	spin_lock(&block_group->lock);
+	if (block_group->removed) {
+		spin_unlock(&block_group->lock);
+		return 0;
+	}
+	btrfs_get_block_group_trimming(block_group);
+	spin_unlock(&block_group->lock);
+
+	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
+	if (ret)
+		goto out;
+
+	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
+out:
+	btrfs_put_block_group_trimming(block_group);
 	return ret;
 }
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f924d9a..237da01 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3658,6 +3658,35 @@
 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
 			&BTRFS_I(inode)->runtime_flags);
 
+	/*
+	 * We don't persist the id of the transaction where an unlink operation
+	 * against the inode was last made. So here we assume the inode might
+	 * have been evicted, and therefore the exact value of last_unlink_trans
+	 * lost, and set it to last_trans to avoid metadata inconsistencies
+	 * between the inode and its parent if the inode is fsync'ed and the log
+	 * replayed. For example, in the scenario:
+	 *
+	 * touch mydir/foo
+	 * ln mydir/foo mydir/bar
+	 * sync
+	 * unlink mydir/bar
+	 * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
+	 * xfs_io -c fsync mydir/foo
+	 * <power failure>
+	 * mount fs, triggers fsync log replay
+	 *
+	 * We must make sure that when we fsync our inode foo we also log its
+	 * parent inode, otherwise after log replay the parent still has the
+	 * dentry with the "bar" name but our inode foo has a link count of 1
+	 * and doesn't have an inode ref with the name "bar" anymore.
+	 *
+	 * Setting last_unlink_trans to last_trans is a pessimistic approach,
+	 * but it guarantees correctness at the expense of ocassional full
+	 * transaction commits on fsync if our inode is a directory, or if our
+	 * inode is not a directory, logging its parent unnecessarily.
+	 */
+	BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+
 	path->slots[0]++;
 	if (inode->i_nlink != 1 ||
 	    path->slots[0] >= btrfs_header_nritems(leaf))
@@ -7958,7 +7987,11 @@
 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 				       u64 first_sector, gfp_t gfp_flags)
 {
-	return btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
+	struct bio *bio;
+	bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
+	if (bio)
+		bio_associate_current(bio);
+	return bio;
 }
 
 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0770c91..0adf542 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1030,6 +1030,7 @@
 	struct extent_map *em;
 	int ret = 1;
 	bool next_mergeable = true;
+	bool prev_mergeable = true;
 
 	/*
 	 * make sure that once we start defragging an extent, we keep on
@@ -1050,13 +1051,16 @@
 		goto out;
 	}
 
+	if (!*defrag_end)
+		prev_mergeable = false;
+
 	next_mergeable = defrag_check_next_extent(inode, em);
 	/*
 	 * we hit a real extent, if it is big or the next extent is not a
 	 * real extent, don't bother defragging it
 	 */
 	if (!compress && (*last_len == 0 || *last_len >= thresh) &&
-	    (em->len >= thresh || !next_mergeable))
+	    (em->len >= thresh || (!next_mergeable && !prev_mergeable)))
 		ret = 0;
 out:
 	/*
@@ -1933,6 +1937,7 @@
 	u64 found_transid;
 	struct extent_buffer *leaf;
 	struct btrfs_ioctl_search_header sh;
+	struct btrfs_key test;
 	unsigned long item_off;
 	unsigned long item_len;
 	int nritems;
@@ -2016,12 +2021,17 @@
 	}
 advance_key:
 	ret = 0;
-	if (key->offset < (u64)-1 && key->offset < sk->max_offset)
+	test.objectid = sk->max_objectid;
+	test.type = sk->max_type;
+	test.offset = sk->max_offset;
+	if (btrfs_comp_cpu_keys(key, &test) >= 0)
+		ret = 1;
+	else if (key->offset < (u64)-1)
 		key->offset++;
-	else if (key->type < (u8)-1 && key->type < sk->max_type) {
+	else if (key->type < (u8)-1) {
 		key->offset = 0;
 		key->type++;
-	} else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) {
+	} else if (key->objectid < (u64)-1) {
 		key->offset = 0;
 		key->type = 0;
 		key->objectid++;
@@ -2842,8 +2852,7 @@
 		swap(inode1, inode2);
 
 	mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
-	if (inode1 != inode2)
-		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+	mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
 }
 
 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
@@ -2861,8 +2870,7 @@
 		swap(loff1, loff2);
 	}
 	lock_extent_range(inode1, loff1, len);
-	if (inode1 != inode2)
-		lock_extent_range(inode2, loff2, len);
+	lock_extent_range(inode2, loff2, len);
 }
 
 struct cmp_pages {
@@ -3787,13 +3795,7 @@
 		goto out_fput;
 
 	if (!same_inode) {
-		if (inode < src) {
-			mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
-			mutex_lock_nested(&src->i_mutex, I_MUTEX_CHILD);
-		} else {
-			mutex_lock_nested(&src->i_mutex, I_MUTEX_PARENT);
-			mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
-		}
+		btrfs_double_inode_lock(src, inode);
 	} else {
 		mutex_lock(&src->i_mutex);
 	}
@@ -3843,8 +3845,7 @@
 
 		lock_extent_range(src, lock_start, lock_len);
 	} else {
-		lock_extent_range(src, off, len);
-		lock_extent_range(inode, destoff, len);
+		btrfs_double_extent_lock(src, off, inode, destoff, len);
 	}
 
 	ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
@@ -3855,9 +3856,7 @@
 
 		unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
 	} else {
-		unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
-		unlock_extent(&BTRFS_I(inode)->io_tree, destoff,
-			      destoff + len - 1);
+		btrfs_double_extent_unlock(src, off, inode, destoff, len);
 	}
 	/*
 	 * Truncate page cache pages so that future reads will see the cloned
@@ -3866,17 +3865,10 @@
 	truncate_inode_pages_range(&inode->i_data, destoff,
 				   PAGE_CACHE_ALIGN(destoff + len) - 1);
 out_unlock:
-	if (!same_inode) {
-		if (inode < src) {
-			mutex_unlock(&src->i_mutex);
-			mutex_unlock(&inode->i_mutex);
-		} else {
-			mutex_unlock(&inode->i_mutex);
-			mutex_unlock(&src->i_mutex);
-		}
-	} else {
+	if (!same_inode)
+		btrfs_double_inode_unlock(src, inode);
+	else
 		mutex_unlock(&src->i_mutex);
-	}
 out_fput:
 	fdput(src_file);
 out_drop_write:
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index f8229ef..d7e6baf 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -241,6 +241,7 @@
  */
 void btrfs_tree_lock(struct extent_buffer *eb)
 {
+	WARN_ON(eb->lock_owner == current->pid);
 again:
 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0a02e24..fcf7265 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -61,9 +61,10 @@
 #define RBIO_CACHE_SIZE 1024
 
 enum btrfs_rbio_ops {
-	BTRFS_RBIO_WRITE	= 0,
-	BTRFS_RBIO_READ_REBUILD	= 1,
-	BTRFS_RBIO_PARITY_SCRUB	= 2,
+	BTRFS_RBIO_WRITE,
+	BTRFS_RBIO_READ_REBUILD,
+	BTRFS_RBIO_PARITY_SCRUB,
+	BTRFS_RBIO_REBUILD_MISSING,
 };
 
 struct btrfs_raid_bio {
@@ -602,6 +603,10 @@
 	    cur->operation == BTRFS_RBIO_PARITY_SCRUB)
 		return 0;
 
+	if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
+	    cur->operation == BTRFS_RBIO_REBUILD_MISSING)
+		return 0;
+
 	return 1;
 }
 
@@ -793,7 +798,10 @@
 
 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
 				async_read_rebuild(next);
-			else if (next->operation == BTRFS_RBIO_WRITE) {
+			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
+				steal_rbio(rbio, next);
+				async_read_rebuild(next);
+			} else if (next->operation == BTRFS_RBIO_WRITE) {
 				steal_rbio(rbio, next);
 				async_rmw_stripe(next);
 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
@@ -1805,7 +1813,8 @@
 	faila = rbio->faila;
 	failb = rbio->failb;
 
-	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
+	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
 		spin_lock_irq(&rbio->bio_list_lock);
 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 		spin_unlock_irq(&rbio->bio_list_lock);
@@ -1830,7 +1839,8 @@
 			 * if we're rebuilding a read, we have to use
 			 * pages from the bio list
 			 */
-			if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
+			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
 			    (stripe == faila || stripe == failb)) {
 				page = page_in_rbio(rbio, stripe, pagenr, 0);
 			} else {
@@ -1939,7 +1949,8 @@
 			 * if we're rebuilding a read, we have to use
 			 * pages from the bio list
 			 */
-			if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
+			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
 			    (stripe == faila || stripe == failb)) {
 				page = page_in_rbio(rbio, stripe, pagenr, 0);
 			} else {
@@ -1961,6 +1972,8 @@
 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 
 		rbio_orig_end_io(rbio, err);
+	} else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
+		rbio_orig_end_io(rbio, err);
 	} else if (err == 0) {
 		rbio->faila = -1;
 		rbio->failb = -1;
@@ -2096,7 +2109,8 @@
 	return 0;
 
 cleanup:
-	if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
+	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
+	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
 		rbio_orig_end_io(rbio, -EIO);
 	return -EIO;
 }
@@ -2227,8 +2241,9 @@
 	return rbio;
 }
 
-void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
-				   struct page *page, u64 logical)
+/* Used for both parity scrub and missing. */
+void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
+			    u64 logical)
 {
 	int stripe_offset;
 	int index;
@@ -2662,3 +2677,55 @@
 	if (!lock_stripe_add(rbio))
 		async_scrub_parity(rbio);
 }
+
+/* The following code is used for dev replace of a missing RAID 5/6 device. */
+
+struct btrfs_raid_bio *
+raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+			  struct btrfs_bio *bbio, u64 length)
+{
+	struct btrfs_raid_bio *rbio;
+
+	rbio = alloc_rbio(root, bbio, length);
+	if (IS_ERR(rbio))
+		return NULL;
+
+	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
+	bio_list_add(&rbio->bio_list, bio);
+	/*
+	 * This is a special bio which is used to hold the completion handler
+	 * and make the scrub rbio is similar to the other types
+	 */
+	ASSERT(!bio->bi_iter.bi_size);
+
+	rbio->faila = find_logical_bio_stripe(rbio, bio);
+	if (rbio->faila == -1) {
+		BUG();
+		kfree(rbio);
+		return NULL;
+	}
+
+	return rbio;
+}
+
+static void missing_raid56_work(struct btrfs_work *work)
+{
+	struct btrfs_raid_bio *rbio;
+
+	rbio = container_of(work, struct btrfs_raid_bio, work);
+	__raid56_parity_recover(rbio);
+}
+
+static void async_missing_raid56(struct btrfs_raid_bio *rbio)
+{
+	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+			missing_raid56_work, NULL, NULL);
+
+	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
+}
+
+void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
+{
+	if (!lock_stripe_add(rbio))
+		async_missing_raid56(rbio);
+}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 2b5d797..8b69469 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -48,15 +48,21 @@
 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len);
 
+void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
+			    u64 logical);
+
 struct btrfs_raid_bio *
 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len,
 			       struct btrfs_device *scrub_dev,
 			       unsigned long *dbitmap, int stripe_nsectors);
-void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
-				   struct page *page, u64 logical);
 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
 
+struct btrfs_raid_bio *
+raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+			  struct btrfs_bio *bbio, u64 length);
+void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
+
 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
 #endif
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 0e7beea..4645cd1 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -328,6 +328,7 @@
 	struct btrfs_device *prev_dev;
 	u32 blocksize;
 	u64 length;
+	int real_stripes;
 	int nzones = 0;
 	int i;
 	unsigned long index = logical >> PAGE_CACHE_SHIFT;
@@ -369,7 +370,8 @@
 		goto error;
 	}
 
-	for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
+	real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
+	for (nzones = 0; nzones < real_stripes; ++nzones) {
 		struct reada_zone *zone;
 
 		dev = bbio->stripes[nzones].dev;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 88cbb59..303babe 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2523,8 +2523,7 @@
  * counted. return -ENOENT if the block is root of reloc tree.
  */
 static noinline_for_stack
-struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
-				   struct backref_node *node)
+struct btrfs_root *select_one_root(struct backref_node *node)
 {
 	struct backref_node *next;
 	struct btrfs_root *root;
@@ -2912,7 +2911,7 @@
 		return 0;
 
 	BUG_ON(node->processed);
-	root = select_one_root(trans, node);
+	root = select_one_root(node);
 	if (root == ERR_PTR(-ENOENT)) {
 		update_processed_blocks(rc, node);
 		goto out;
@@ -3755,8 +3754,7 @@
  * helper to find next unprocessed extent
  */
 static noinline_for_stack
-int find_next_extent(struct btrfs_trans_handle *trans,
-		     struct reloc_control *rc, struct btrfs_path *path,
+int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
 		     struct btrfs_key *extent_key)
 {
 	struct btrfs_key key;
@@ -3951,7 +3949,7 @@
 			continue;
 		}
 
-		ret = find_next_extent(trans, rc, path, &key);
+		ret = find_next_extent(rc, path, &key);
 		if (ret < 0)
 			err = ret;
 		if (ret != 0)
@@ -3976,6 +3974,10 @@
 			       sizeof(struct btrfs_extent_item_v0));
 			ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
 						  &path_change);
+			if (ret < 0) {
+				err = ret;
+				break;
+			}
 			if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
 				flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
 			else
@@ -4140,7 +4142,7 @@
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root;
 	struct btrfs_key key;
-	u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
+	u64 objectid;
 	int err = 0;
 
 	root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
@@ -4215,14 +4217,12 @@
 	rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
 	BUG_ON(!rc->block_group);
 
-	if (!rc->block_group->ro) {
-		ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
-		if (ret) {
-			err = ret;
-			goto out;
-		}
-		rw = 1;
+	ret = btrfs_inc_block_group_ro(extent_root, rc->block_group);
+	if (ret) {
+		err = ret;
+		goto out;
 	}
+	rw = 1;
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -4294,7 +4294,7 @@
 	WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
 out:
 	if (err && rw)
-		btrfs_set_block_group_rw(extent_root, rc->block_group);
+		btrfs_dec_block_group_ro(extent_root, rc->block_group);
 	iput(rc->data_inode);
 	btrfs_put_block_group(rc->block_group);
 	kfree(rc);
@@ -4594,8 +4594,7 @@
  * called before creating snapshot. it calculates metadata reservation
  * requried for relocating tree blocks in the snapshot
  */
-void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
-			      struct btrfs_pending_snapshot *pending,
+void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
 			      u64 *bytes_to_reserve)
 {
 	struct btrfs_root *root;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9c146d8..9a11db0 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -125,6 +125,7 @@
 		/* It is for the data with checksum */
 		unsigned int	data_corrected:1;
 	};
+	struct btrfs_work	work;
 };
 
 /* Used for the chunks with parity stripe such RAID5/6 */
@@ -332,11 +333,14 @@
 	}
 }
 
-static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+static void scrub_pause_on(struct btrfs_fs_info *fs_info)
 {
 	atomic_inc(&fs_info->scrubs_paused);
 	wake_up(&fs_info->scrub_pause_wait);
+}
 
+static void scrub_pause_off(struct btrfs_fs_info *fs_info)
+{
 	mutex_lock(&fs_info->scrub_lock);
 	__scrub_blocked_if_needed(fs_info);
 	atomic_dec(&fs_info->scrubs_paused);
@@ -345,6 +349,12 @@
 	wake_up(&fs_info->scrub_pause_wait);
 }
 
+static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
+{
+	scrub_pause_on(fs_info);
+	scrub_pause_off(fs_info);
+}
+
 /*
  * used for workers that require transaction commits (i.e., for the
  * NOCOW case)
@@ -2074,21 +2084,7 @@
 	sbio = sctx->bios[sctx->curr];
 	sctx->curr = -1;
 	scrub_pending_bio_inc(sctx);
-
-	if (!sbio->bio->bi_bdev) {
-		/*
-		 * this case should not happen. If btrfs_map_block() is
-		 * wrong, it could happen for dev-replace operations on
-		 * missing devices when no mirrors are available, but in
-		 * this case it should already fail the mount.
-		 * This case is handled correctly (but _very_ slowly).
-		 */
-		printk_ratelimited(KERN_WARNING
-			"BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
-		bio_io_error(sbio->bio);
-	} else {
-		btrfsic_submit_bio(READ, sbio->bio);
-	}
+	btrfsic_submit_bio(READ, sbio->bio);
 }
 
 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
@@ -2165,6 +2161,134 @@
 	return 0;
 }
 
+static void scrub_missing_raid56_end_io(struct bio *bio)
+{
+	struct scrub_block *sblock = bio->bi_private;
+	struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
+
+	if (bio->bi_error)
+		sblock->no_io_error_seen = 0;
+
+	btrfs_queue_work(fs_info->scrub_workers, &sblock->work);
+}
+
+static void scrub_missing_raid56_worker(struct btrfs_work *work)
+{
+	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
+	struct scrub_ctx *sctx = sblock->sctx;
+	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	unsigned int is_metadata;
+	unsigned int have_csum;
+	u8 *csum;
+	u64 generation;
+	u64 logical;
+	struct btrfs_device *dev;
+
+	is_metadata = !(sblock->pagev[0]->flags & BTRFS_EXTENT_FLAG_DATA);
+	have_csum = sblock->pagev[0]->have_csum;
+	csum = sblock->pagev[0]->csum;
+	generation = sblock->pagev[0]->generation;
+	logical = sblock->pagev[0]->logical;
+	dev = sblock->pagev[0]->dev;
+
+	if (sblock->no_io_error_seen) {
+		scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
+					     have_csum, csum, generation,
+					     sctx->csum_size);
+	}
+
+	if (!sblock->no_io_error_seen) {
+		spin_lock(&sctx->stat_lock);
+		sctx->stat.read_errors++;
+		spin_unlock(&sctx->stat_lock);
+		printk_ratelimited_in_rcu(KERN_ERR
+			"BTRFS: I/O error rebulding logical %llu for dev %s\n",
+			logical, rcu_str_deref(dev->name));
+	} else if (sblock->header_error || sblock->checksum_error) {
+		spin_lock(&sctx->stat_lock);
+		sctx->stat.uncorrectable_errors++;
+		spin_unlock(&sctx->stat_lock);
+		printk_ratelimited_in_rcu(KERN_ERR
+			"BTRFS: failed to rebuild valid logical %llu for dev %s\n",
+			logical, rcu_str_deref(dev->name));
+	} else {
+		scrub_write_block_to_dev_replace(sblock);
+	}
+
+	scrub_block_put(sblock);
+
+	if (sctx->is_dev_replace &&
+	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
+		mutex_lock(&sctx->wr_ctx.wr_lock);
+		scrub_wr_submit(sctx);
+		mutex_unlock(&sctx->wr_ctx.wr_lock);
+	}
+
+	scrub_pending_bio_dec(sctx);
+}
+
+static void scrub_missing_raid56_pages(struct scrub_block *sblock)
+{
+	struct scrub_ctx *sctx = sblock->sctx;
+	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	u64 length = sblock->page_count * PAGE_SIZE;
+	u64 logical = sblock->pagev[0]->logical;
+	struct btrfs_bio *bbio;
+	struct bio *bio;
+	struct btrfs_raid_bio *rbio;
+	int ret;
+	int i;
+
+	ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
+			       &bbio, 0, 1);
+	if (ret || !bbio || !bbio->raid_map)
+		goto bbio_out;
+
+	if (WARN_ON(!sctx->is_dev_replace ||
+		    !(bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK))) {
+		/*
+		 * We shouldn't be scrubbing a missing device. Even for dev
+		 * replace, we should only get here for RAID 5/6. We either
+		 * managed to mount something with no mirrors remaining or
+		 * there's a bug in scrub_remap_extent()/btrfs_map_block().
+		 */
+		goto bbio_out;
+	}
+
+	bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
+	if (!bio)
+		goto bbio_out;
+
+	bio->bi_iter.bi_sector = logical >> 9;
+	bio->bi_private = sblock;
+	bio->bi_end_io = scrub_missing_raid56_end_io;
+
+	rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
+	if (!rbio)
+		goto rbio_out;
+
+	for (i = 0; i < sblock->page_count; i++) {
+		struct scrub_page *spage = sblock->pagev[i];
+
+		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
+	}
+
+	btrfs_init_work(&sblock->work, btrfs_scrub_helper,
+			scrub_missing_raid56_worker, NULL, NULL);
+	scrub_block_get(sblock);
+	scrub_pending_bio_inc(sctx);
+	raid56_submit_missing_rbio(rbio);
+	return;
+
+rbio_out:
+	bio_put(bio);
+bbio_out:
+	btrfs_put_bbio(bbio);
+	spin_lock(&sctx->stat_lock);
+	sctx->stat.malloc_errors++;
+	spin_unlock(&sctx->stat_lock);
+}
+
 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 		       u64 physical, struct btrfs_device *dev, u64 flags,
 		       u64 gen, int mirror_num, u8 *csum, int force,
@@ -2228,19 +2352,27 @@
 	}
 
 	WARN_ON(sblock->page_count == 0);
-	for (index = 0; index < sblock->page_count; index++) {
-		struct scrub_page *spage = sblock->pagev[index];
-		int ret;
+	if (dev->missing) {
+		/*
+		 * This case should only be hit for RAID 5/6 device replace. See
+		 * the comment in scrub_missing_raid56_pages() for details.
+		 */
+		scrub_missing_raid56_pages(sblock);
+	} else {
+		for (index = 0; index < sblock->page_count; index++) {
+			struct scrub_page *spage = sblock->pagev[index];
+			int ret;
 
-		ret = scrub_add_page_to_rd_bio(sctx, spage);
-		if (ret) {
-			scrub_block_put(sblock);
-			return ret;
+			ret = scrub_add_page_to_rd_bio(sctx, spage);
+			if (ret) {
+				scrub_block_put(sblock);
+				return ret;
+			}
 		}
-	}
 
-	if (force)
-		scrub_submit(sctx);
+		if (force)
+			scrub_submit(sctx);
+	}
 
 	/* last one frees, either here or in bio completion for last page */
 	scrub_block_put(sblock);
@@ -2551,6 +2683,11 @@
 	u8 csum[BTRFS_CSUM_SIZE];
 	u32 blocksize;
 
+	if (dev->missing) {
+		scrub_parity_mark_sectors_error(sparity, logical, len);
+		return 0;
+	}
+
 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
 		blocksize = sctx->sectorsize;
 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
@@ -2689,7 +2826,7 @@
 			   sparity->nsectors))
 		goto out;
 
-	length = sparity->logic_end - sparity->logic_start + 1;
+	length = sparity->logic_end - sparity->logic_start;
 	ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
 			       sparity->logic_start,
 			       &length, &bbio, 0, 1);
@@ -2712,8 +2849,7 @@
 		goto rbio_out;
 
 	list_for_each_entry(spage, &sparity->spages, list)
-		raid56_parity_add_scrub_pages(rbio, spage->page,
-					      spage->logical);
+		raid56_add_scrub_pages(rbio, spage->page, spage->logical);
 
 	scrub_pending_bio_inc(sctx);
 	raid56_parity_submit_scrub_rbio(rbio);
@@ -2761,6 +2897,7 @@
 	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_root *csum_root = fs_info->csum_root;
 	struct btrfs_extent_item *extent;
+	struct btrfs_bio *bbio = NULL;
 	u64 flags;
 	int ret;
 	int slot;
@@ -2770,6 +2907,7 @@
 	u64 extent_logical;
 	u64 extent_physical;
 	u64 extent_len;
+	u64 mapped_length;
 	struct btrfs_device *extent_dev;
 	struct scrub_parity *sparity;
 	int nsectors;
@@ -2843,6 +2981,10 @@
 			}
 			btrfs_item_key_to_cpu(l, &key, slot);
 
+			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+			    key.type != BTRFS_METADATA_ITEM_KEY)
+				goto next;
+
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
 				bytes = root->nodesize;
 			else
@@ -2851,11 +2993,7 @@
 			if (key.objectid + bytes <= logic_start)
 				goto next;
 
-			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
-			    key.type != BTRFS_METADATA_ITEM_KEY)
-				goto next;
-
-			if (key.objectid > logic_end) {
+			if (key.objectid >= logic_end) {
 				stop_loop = 1;
 				break;
 			}
@@ -2868,11 +3006,12 @@
 			flags = btrfs_extent_flags(l, extent);
 			generation = btrfs_extent_generation(l, extent);
 
-			if (key.objectid < logic_start &&
-			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
-				btrfs_err(fs_info,
-					  "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
-					   key.objectid, logic_start);
+			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
+			    (key.objectid < logic_start ||
+			     key.objectid + bytes >
+			     logic_start + map->stripe_len)) {
+				btrfs_err(fs_info, "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
+					  key.objectid, logic_start);
 				goto next;
 			}
 again:
@@ -2892,10 +3031,21 @@
 			scrub_parity_mark_sectors_data(sparity, extent_logical,
 						       extent_len);
 
-			scrub_remap_extent(fs_info, extent_logical,
-					   extent_len, &extent_physical,
-					   &extent_dev,
-					   &extent_mirror_num);
+			mapped_length = extent_len;
+			ret = btrfs_map_block(fs_info, READ, extent_logical,
+					      &mapped_length, &bbio, 0);
+			if (!ret) {
+				if (!bbio || mapped_length < extent_len)
+					ret = -EIO;
+			}
+			if (ret) {
+				btrfs_put_bbio(bbio);
+				goto out;
+			}
+			extent_physical = bbio->stripes[0].physical;
+			extent_mirror_num = bbio->mirror_num;
+			extent_dev = bbio->stripes[0].dev;
+			btrfs_put_bbio(bbio);
 
 			ret = btrfs_lookup_csums_range(csum_root,
 						extent_logical,
@@ -2910,10 +3060,12 @@
 						      extent_dev, flags,
 						      generation,
 						      extent_mirror_num);
+
+			scrub_free_csums(sctx);
+
 			if (ret)
 				goto out;
 
-			scrub_free_csums(sctx);
 			if (extent_logical + extent_len <
 			    key.objectid + bytes) {
 				logic_start += map->stripe_len;
@@ -2942,7 +3094,7 @@
 out:
 	if (ret < 0)
 		scrub_parity_mark_sectors_error(sparity, logic_start,
-						logic_end - logic_start + 1);
+						logic_end - logic_start);
 	scrub_parity_put(sparity);
 	scrub_submit(sctx);
 	mutex_lock(&sctx->wr_ctx.wr_lock);
@@ -3091,22 +3243,6 @@
 	 */
 	ret = 0;
 	while (physical < physical_end) {
-		/* for raid56, we skip parity stripe */
-		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
-			ret = get_raid56_logic_offset(physical, num,
-					map, &logical, &stripe_logical);
-			logical += base;
-			if (ret) {
-				stripe_logical += base;
-				stripe_end = stripe_logical + increment - 1;
-				ret = scrub_raid56_parity(sctx, map, scrub_dev,
-						ppath, stripe_logical,
-						stripe_end);
-				if (ret)
-					goto out;
-				goto skip;
-			}
-		}
 		/*
 		 * canceled?
 		 */
@@ -3131,6 +3267,24 @@
 			scrub_blocked_if_needed(fs_info);
 		}
 
+		/* for raid56, we skip parity stripe */
+		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+			ret = get_raid56_logic_offset(physical, num, map,
+						      &logical,
+						      &stripe_logical);
+			logical += base;
+			if (ret) {
+				stripe_logical += base;
+				stripe_end = stripe_logical + increment;
+				ret = scrub_raid56_parity(sctx, map, scrub_dev,
+							  ppath, stripe_logical,
+							  stripe_end);
+				if (ret)
+					goto out;
+				goto skip;
+			}
+		}
+
 		if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
 			key.type = BTRFS_METADATA_ITEM_KEY;
 		else
@@ -3175,6 +3329,10 @@
 			}
 			btrfs_item_key_to_cpu(l, &key, slot);
 
+			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
+			    key.type != BTRFS_METADATA_ITEM_KEY)
+				goto next;
+
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
 				bytes = root->nodesize;
 			else
@@ -3183,10 +3341,6 @@
 			if (key.objectid + bytes <= logical)
 				goto next;
 
-			if (key.type != BTRFS_EXTENT_ITEM_KEY &&
-			    key.type != BTRFS_METADATA_ITEM_KEY)
-				goto next;
-
 			if (key.objectid >= logical + map->stripe_len) {
 				/* out of this device extent */
 				if (key.objectid >= logic_end)
@@ -3199,8 +3353,10 @@
 			flags = btrfs_extent_flags(l, extent);
 			generation = btrfs_extent_generation(l, extent);
 
-			if (key.objectid < logical &&
-			    (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
+			if ((flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) &&
+			    (key.objectid < logical ||
+			     key.objectid + bytes >
+			     logical + map->stripe_len)) {
 				btrfs_err(fs_info,
 					   "scrub: tree block %llu spanning "
 					   "stripes, ignored. logical=%llu",
@@ -3234,9 +3390,11 @@
 						   &extent_dev,
 						   &extent_mirror_num);
 
-			ret = btrfs_lookup_csums_range(csum_root, logical,
-						logical + map->stripe_len - 1,
-						&sctx->csum_list, 1);
+			ret = btrfs_lookup_csums_range(csum_root,
+						       extent_logical,
+						       extent_logical +
+						       extent_len - 1,
+						       &sctx->csum_list, 1);
 			if (ret)
 				goto out;
 
@@ -3244,10 +3402,12 @@
 					   extent_physical, extent_dev, flags,
 					   generation, extent_mirror_num,
 					   extent_logical - logical + physical);
+
+			scrub_free_csums(sctx);
+
 			if (ret)
 				goto out;
 
-			scrub_free_csums(sctx);
 			if (extent_logical + extent_len <
 			    key.objectid + bytes) {
 				if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
@@ -3265,7 +3425,7 @@
 					if (ret && physical < physical_end) {
 						stripe_logical += base;
 						stripe_end = stripe_logical +
-								increment - 1;
+								increment;
 						ret = scrub_raid56_parity(sctx,
 							map, scrub_dev, ppath,
 							stripe_logical,
@@ -3374,7 +3534,7 @@
 	u64 chunk_tree;
 	u64 chunk_objectid;
 	u64 chunk_offset;
-	int ret;
+	int ret = 0;
 	int slot;
 	struct extent_buffer *l;
 	struct btrfs_key key;
@@ -3402,8 +3562,14 @@
 			if (path->slots[0] >=
 			    btrfs_header_nritems(path->nodes[0])) {
 				ret = btrfs_next_leaf(root, path);
-				if (ret)
+				if (ret < 0)
 					break;
+				if (ret > 0) {
+					ret = 0;
+					break;
+				}
+			} else {
+				ret = 0;
 			}
 		}
 
@@ -3445,6 +3611,22 @@
 		if (!cache)
 			goto skip;
 
+		/*
+		 * we need call btrfs_inc_block_group_ro() with scrubs_paused,
+		 * to avoid deadlock caused by:
+		 * btrfs_inc_block_group_ro()
+		 * -> btrfs_wait_for_commit()
+		 * -> btrfs_commit_transaction()
+		 * -> btrfs_scrub_pause()
+		 */
+		scrub_pause_on(fs_info);
+		ret = btrfs_inc_block_group_ro(root, cache);
+		scrub_pause_off(fs_info);
+		if (ret) {
+			btrfs_put_block_group(cache);
+			break;
+		}
+
 		dev_replace->cursor_right = found_key.offset + length;
 		dev_replace->cursor_left = found_key.offset;
 		dev_replace->item_needs_writeback = 1;
@@ -3470,8 +3652,8 @@
 
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->bios_in_flight) == 0);
-		atomic_inc(&fs_info->scrubs_paused);
-		wake_up(&fs_info->scrub_pause_wait);
+
+		scrub_pause_on(fs_info);
 
 		/*
 		 * must be called before we decrease @scrub_paused.
@@ -3482,11 +3664,9 @@
 			   atomic_read(&sctx->workers_pending) == 0);
 		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
 
-		mutex_lock(&fs_info->scrub_lock);
-		__scrub_blocked_if_needed(fs_info);
-		atomic_dec(&fs_info->scrubs_paused);
-		mutex_unlock(&fs_info->scrub_lock);
-		wake_up(&fs_info->scrub_pause_wait);
+		scrub_pause_off(fs_info);
+
+		btrfs_dec_block_group_ro(root, cache);
 
 		btrfs_put_block_group(cache);
 		if (ret)
@@ -3510,11 +3690,7 @@
 
 	btrfs_free_path(path);
 
-	/*
-	 * ret can still be 1 from search_slot or next_leaf,
-	 * that's not an error
-	 */
-	return ret < 0 ? ret : 0;
+	return ret;
 }
 
 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6bad633..2b07b35 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -69,7 +69,7 @@
 
 static int btrfs_remount(struct super_block *sb, int *flags, char *data);
 
-static const char *btrfs_decode_error(int errno)
+const char *btrfs_decode_error(int errno)
 {
 	char *errstr = "unknown";
 
@@ -1033,6 +1033,7 @@
 	sb->s_flags |= MS_POSIXACL;
 #endif
 	sb->s_flags |= MS_I_VERSION;
+	sb->s_iflags |= SB_I_CGROUPWB;
 	err = open_ctree(sb, fs_devices, (char *)data);
 	if (err) {
 		printk(KERN_ERR "BTRFS: open_ctree failed\n");
@@ -1650,6 +1651,17 @@
 
 		sb->s_flags |= MS_RDONLY;
 
+		/*
+		 * Setting MS_RDONLY will put the cleaner thread to
+		 * sleep at the next loop if it's already active.
+		 * If it's already asleep, we'll leave unused block
+		 * groups on disk until we're mounted read-write again
+		 * unless we clean them up here.
+		 */
+		mutex_lock(&root->fs_info->cleaner_mutex);
+		btrfs_delete_unused_bgs(fs_info);
+		mutex_unlock(&root->fs_info->cleaner_mutex);
+
 		btrfs_dev_replace_suspend_for_unmount(fs_info);
 		btrfs_scrub_cancel(fs_info);
 		btrfs_pause_balance(fs_info);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f5021fc..68ad89e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -258,6 +258,8 @@
 	mutex_init(&cur_trans->cache_write_mutex);
 	cur_trans->num_dirty_bgs = 0;
 	spin_lock_init(&cur_trans->dirty_bgs_lock);
+	INIT_LIST_HEAD(&cur_trans->deleted_bgs);
+	spin_lock_init(&cur_trans->deleted_bgs_lock);
 	list_add_tail(&cur_trans->list, &fs_info->trans_list);
 	extent_io_tree_init(&cur_trans->dirty_pages,
 			     fs_info->btree_inode->i_mapping);
@@ -1301,7 +1303,7 @@
 	 */
 	btrfs_set_skip_qgroup(trans, objectid);
 
-	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
+	btrfs_reloc_pre_snapshot(pending, &to_reserve);
 
 	if (to_reserve > 0) {
 		pending->error = btrfs_block_rsv_add(root,
@@ -1893,8 +1895,11 @@
 			spin_unlock(&root->fs_info->trans_lock);
 
 			wait_for_commit(root, prev_trans);
+			ret = prev_trans->aborted;
 
 			btrfs_put_transaction(prev_trans);
+			if (ret)
+				goto cleanup_transaction;
 		} else {
 			spin_unlock(&root->fs_info->trans_lock);
 		}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index eb09c20..edc2fbc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -74,6 +74,8 @@
 	 */
 	struct mutex cache_write_mutex;
 	spinlock_t dirty_bgs_lock;
+	struct list_head deleted_bgs;
+	spinlock_t deleted_bgs_lock;
 	struct btrfs_delayed_ref_root delayed_refs;
 	int aborted;
 	int dirty_bg_run;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9c45431..1bbaace 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -140,55 +140,46 @@
 			   struct btrfs_root *root,
 			   struct btrfs_log_ctx *ctx)
 {
-	int index;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&root->log_mutex);
+
 	if (root->log_root) {
 		if (btrfs_need_log_full_commit(root->fs_info, trans)) {
 			ret = -EAGAIN;
 			goto out;
 		}
+
 		if (!root->log_start_pid) {
-			root->log_start_pid = current->pid;
 			clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
+			root->log_start_pid = current->pid;
 		} else if (root->log_start_pid != current->pid) {
 			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
 		}
+	} else {
+		mutex_lock(&root->fs_info->tree_log_mutex);
+		if (!root->fs_info->log_root_tree)
+			ret = btrfs_init_log_root_tree(trans, root->fs_info);
+		mutex_unlock(&root->fs_info->tree_log_mutex);
+		if (ret)
+			goto out;
 
-		atomic_inc(&root->log_batch);
-		atomic_inc(&root->log_writers);
-		if (ctx) {
-			index = root->log_transid % 2;
-			list_add_tail(&ctx->list, &root->log_ctxs[index]);
-			ctx->log_transid = root->log_transid;
-		}
-		mutex_unlock(&root->log_mutex);
-		return 0;
-	}
-
-	ret = 0;
-	mutex_lock(&root->fs_info->tree_log_mutex);
-	if (!root->fs_info->log_root_tree)
-		ret = btrfs_init_log_root_tree(trans, root->fs_info);
-	mutex_unlock(&root->fs_info->tree_log_mutex);
-	if (ret)
-		goto out;
-
-	if (!root->log_root) {
 		ret = btrfs_add_log_tree(trans, root);
 		if (ret)
 			goto out;
+
+		clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
+		root->log_start_pid = current->pid;
 	}
-	clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
-	root->log_start_pid = current->pid;
+
 	atomic_inc(&root->log_batch);
 	atomic_inc(&root->log_writers);
 	if (ctx) {
-		index = root->log_transid % 2;
+		int index = root->log_transid % 2;
 		list_add_tail(&ctx->list, &root->log_ctxs[index]);
 		ctx->log_transid = root->log_transid;
 	}
+
 out:
 	mutex_unlock(&root->log_mutex);
 	return ret;
@@ -731,12 +722,66 @@
 						&ordered_sums, 0);
 			if (ret)
 				goto out;
+			/*
+			 * Now delete all existing cums in the csum root that
+			 * cover our range. We do this because we can have an
+			 * extent that is completely referenced by one file
+			 * extent item and partially referenced by another
+			 * file extent item (like after using the clone or
+			 * extent_same ioctls). In this case if we end up doing
+			 * the replay of the one that partially references the
+			 * extent first, and we do not do the csum deletion
+			 * below, we can get 2 csum items in the csum tree that
+			 * overlap each other. For example, imagine our log has
+			 * the two following file extent items:
+			 *
+			 * key (257 EXTENT_DATA 409600)
+			 *     extent data disk byte 12845056 nr 102400
+			 *     extent data offset 20480 nr 20480 ram 102400
+			 *
+			 * key (257 EXTENT_DATA 819200)
+			 *     extent data disk byte 12845056 nr 102400
+			 *     extent data offset 0 nr 102400 ram 102400
+			 *
+			 * Where the second one fully references the 100K extent
+			 * that starts at disk byte 12845056, and the log tree
+			 * has a single csum item that covers the entire range
+			 * of the extent:
+			 *
+			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+			 *
+			 * After the first file extent item is replayed, the
+			 * csum tree gets the following csum item:
+			 *
+			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+			 *
+			 * Which covers the 20K sub-range starting at offset 20K
+			 * of our extent. Now when we replay the second file
+			 * extent item, if we do not delete existing csum items
+			 * that cover any of its blocks, we end up getting two
+			 * csum items in our csum tree that overlap each other:
+			 *
+			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
+			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
+			 *
+			 * Which is a problem, because after this anyone trying
+			 * to lookup up for the checksum of any block of our
+			 * extent starting at an offset of 40K or higher, will
+			 * end up looking at the second csum item only, which
+			 * does not contain the checksum for any block starting
+			 * at offset 40K or higher of our extent.
+			 */
 			while (!list_empty(&ordered_sums)) {
 				struct btrfs_ordered_sum *sums;
 				sums = list_entry(ordered_sums.next,
 						struct btrfs_ordered_sum,
 						list);
 				if (!ret)
+					ret = btrfs_del_csums(trans,
+						      root->fs_info->csum_root,
+						      sums->bytenr,
+						      sums->len);
+				if (!ret)
 					ret = btrfs_csum_file_blocks(trans,
 						root->fs_info->csum_root,
 						sums);
@@ -1549,9 +1594,8 @@
  */
 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
 				    struct btrfs_root *root,
-				    struct btrfs_path *path,
 				    u64 dirid, u64 index,
-				    char *name, int name_len, u8 type,
+				    char *name, int name_len,
 				    struct btrfs_key *location)
 {
 	struct inode *inode;
@@ -1613,6 +1657,9 @@
  * not exist in the FS, it is skipped.  fsyncs on directories
  * do not force down inodes inside that directory, just changes to the
  * names or unlinks in a directory.
+ *
+ * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
+ * non-existing inode) and 1 if the name was replayed.
  */
 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
 				    struct btrfs_root *root,
@@ -1631,6 +1678,7 @@
 	int exists;
 	int ret = 0;
 	bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
+	bool name_added = false;
 
 	dir = read_one_inode(root, key->objectid);
 	if (!dir)
@@ -1708,6 +1756,8 @@
 	}
 	kfree(name);
 	iput(dir);
+	if (!ret && name_added)
+		ret = 1;
 	return ret;
 
 insert:
@@ -1719,10 +1769,12 @@
 		goto out;
 	}
 	btrfs_release_path(path);
-	ret = insert_one_name(trans, root, path, key->objectid, key->offset,
-			      name, name_len, log_type, &log_key);
+	ret = insert_one_name(trans, root, key->objectid, key->offset,
+			      name, name_len, &log_key);
 	if (ret && ret != -ENOENT && ret != -EEXIST)
 		goto out;
+	if (!ret)
+		name_added = true;
 	update_size = false;
 	ret = 0;
 	goto out;
@@ -1740,12 +1792,13 @@
 					struct extent_buffer *eb, int slot,
 					struct btrfs_key *key)
 {
-	int ret;
+	int ret = 0;
 	u32 item_size = btrfs_item_size_nr(eb, slot);
 	struct btrfs_dir_item *di;
 	int name_len;
 	unsigned long ptr;
 	unsigned long ptr_end;
+	struct btrfs_path *fixup_path = NULL;
 
 	ptr = btrfs_item_ptr_offset(eb, slot);
 	ptr_end = ptr + item_size;
@@ -1755,12 +1808,59 @@
 			return -EIO;
 		name_len = btrfs_dir_name_len(eb, di);
 		ret = replay_one_name(trans, root, path, eb, di, key);
-		if (ret)
-			return ret;
+		if (ret < 0)
+			break;
 		ptr = (unsigned long)(di + 1);
 		ptr += name_len;
+
+		/*
+		 * If this entry refers to a non-directory (directories can not
+		 * have a link count > 1) and it was added in the transaction
+		 * that was not committed, make sure we fixup the link count of
+		 * the inode it the entry points to. Otherwise something like
+		 * the following would result in a directory pointing to an
+		 * inode with a wrong link that does not account for this dir
+		 * entry:
+		 *
+		 * mkdir testdir
+		 * touch testdir/foo
+		 * touch testdir/bar
+		 * sync
+		 *
+		 * ln testdir/bar testdir/bar_link
+		 * ln testdir/foo testdir/foo_link
+		 * xfs_io -c "fsync" testdir/bar
+		 *
+		 * <power failure>
+		 *
+		 * mount fs, log replay happens
+		 *
+		 * File foo would remain with a link count of 1 when it has two
+		 * entries pointing to it in the directory testdir. This would
+		 * make it impossible to ever delete the parent directory has
+		 * it would result in stale dentries that can never be deleted.
+		 */
+		if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
+			struct btrfs_key di_key;
+
+			if (!fixup_path) {
+				fixup_path = btrfs_alloc_path();
+				if (!fixup_path) {
+					ret = -ENOMEM;
+					break;
+				}
+			}
+
+			btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+			ret = link_to_fixup_dir(trans, root, fixup_path,
+						di_key.objectid);
+			if (ret)
+				break;
+		}
+		ret = 0;
 	}
-	return 0;
+	btrfs_free_path(fixup_path);
+	return ret;
 }
 
 /*
@@ -2535,8 +2635,7 @@
 	return ret;
 }
 
-static void wait_log_commit(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, int transid)
+static void wait_log_commit(struct btrfs_root *root, int transid)
 {
 	DEFINE_WAIT(wait);
 	int index = transid % 2;
@@ -2561,8 +2660,7 @@
 		 atomic_read(&root->log_commit[index]));
 }
 
-static void wait_for_writer(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root)
+static void wait_for_writer(struct btrfs_root *root)
 {
 	DEFINE_WAIT(wait);
 
@@ -2642,7 +2740,7 @@
 
 	index1 = log_transid % 2;
 	if (atomic_read(&root->log_commit[index1])) {
-		wait_log_commit(trans, root, log_transid);
+		wait_log_commit(root, log_transid);
 		mutex_unlock(&root->log_mutex);
 		return ctx->log_ret;
 	}
@@ -2651,7 +2749,7 @@
 
 	/* wait for previous tree log sync to complete */
 	if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
-		wait_log_commit(trans, root, log_transid - 1);
+		wait_log_commit(root, log_transid - 1);
 
 	while (1) {
 		int batch = atomic_read(&root->log_batch);
@@ -2662,7 +2760,7 @@
 			schedule_timeout_uninterruptible(1);
 			mutex_lock(&root->log_mutex);
 		}
-		wait_for_writer(trans, root);
+		wait_for_writer(root);
 		if (batch == atomic_read(&root->log_batch))
 			break;
 	}
@@ -2759,7 +2857,7 @@
 		ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
 						mark);
 		btrfs_wait_logged_extents(trans, log, log_transid);
-		wait_log_commit(trans, log_root_tree,
+		wait_log_commit(log_root_tree,
 				root_log_ctx.log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
 		if (!ret)
@@ -2770,11 +2868,11 @@
 	atomic_set(&log_root_tree->log_commit[index2], 1);
 
 	if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
-		wait_log_commit(trans, log_root_tree,
+		wait_log_commit(log_root_tree,
 				root_log_ctx.log_transid - 1);
 	}
 
-	wait_for_writer(trans, log_root_tree);
+	wait_for_writer(log_root_tree);
 
 	/*
 	 * now that we've moved on to the tree of log tree roots,
@@ -4904,6 +5002,94 @@
 	return ret;
 }
 
+static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+				 struct inode *inode,
+				 struct btrfs_log_ctx *ctx)
+{
+	int ret;
+	struct btrfs_path *path;
+	struct btrfs_key key;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	const u64 ino = btrfs_ino(inode);
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+	path->skip_locking = 1;
+	path->search_commit_root = 1;
+
+	key.objectid = ino;
+	key.type = BTRFS_INODE_REF_KEY;
+	key.offset = 0;
+	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+	if (ret < 0)
+		goto out;
+
+	while (true) {
+		struct extent_buffer *leaf = path->nodes[0];
+		int slot = path->slots[0];
+		u32 cur_offset = 0;
+		u32 item_size;
+		unsigned long ptr;
+
+		if (slot >= btrfs_header_nritems(leaf)) {
+			ret = btrfs_next_leaf(root, path);
+			if (ret < 0)
+				goto out;
+			else if (ret > 0)
+				break;
+			continue;
+		}
+
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		/* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
+		if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
+			break;
+
+		item_size = btrfs_item_size_nr(leaf, slot);
+		ptr = btrfs_item_ptr_offset(leaf, slot);
+		while (cur_offset < item_size) {
+			struct btrfs_key inode_key;
+			struct inode *dir_inode;
+
+			inode_key.type = BTRFS_INODE_ITEM_KEY;
+			inode_key.offset = 0;
+
+			if (key.type == BTRFS_INODE_EXTREF_KEY) {
+				struct btrfs_inode_extref *extref;
+
+				extref = (struct btrfs_inode_extref *)
+					(ptr + cur_offset);
+				inode_key.objectid = btrfs_inode_extref_parent(
+					leaf, extref);
+				cur_offset += sizeof(*extref);
+				cur_offset += btrfs_inode_extref_name_len(leaf,
+					extref);
+			} else {
+				inode_key.objectid = key.offset;
+				cur_offset = item_size;
+			}
+
+			dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
+					       root, NULL);
+			/* If parent inode was deleted, skip it. */
+			if (IS_ERR(dir_inode))
+				continue;
+
+			ret = btrfs_log_inode(trans, root, dir_inode,
+					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
+			iput(dir_inode);
+			if (ret)
+				goto out;
+		}
+		path->slots[0]++;
+	}
+	ret = 0;
+out:
+	btrfs_free_path(path);
+	return ret;
+}
+
 /*
  * helper function around btrfs_log_inode to make sure newly created
  * parent directories also end up in the log.  A minimal inode and backref
@@ -4923,9 +5109,6 @@
 	struct dentry *old_parent = NULL;
 	int ret = 0;
 	u64 last_committed = root->fs_info->last_trans_committed;
-	const struct dentry * const first_parent = parent;
-	const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
-				 last_committed);
 	bool log_dentries = false;
 	struct inode *orig_inode = inode;
 
@@ -4986,6 +5169,53 @@
 	if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries)
 		log_dentries = true;
 
+	/*
+	 * On unlink we must make sure all our current and old parent directores
+	 * inodes are fully logged. This is to prevent leaving dangling
+	 * directory index entries in directories that were our parents but are
+	 * not anymore. Not doing this results in old parent directory being
+	 * impossible to delete after log replay (rmdir will always fail with
+	 * error -ENOTEMPTY).
+	 *
+	 * Example 1:
+	 *
+	 * mkdir testdir
+	 * touch testdir/foo
+	 * ln testdir/foo testdir/bar
+	 * sync
+	 * unlink testdir/bar
+	 * xfs_io -c fsync testdir/foo
+	 * <power failure>
+	 * mount fs, triggers log replay
+	 *
+	 * If we don't log the parent directory (testdir), after log replay the
+	 * directory still has an entry pointing to the file inode using the bar
+	 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
+	 * the file inode has a link count of 1.
+	 *
+	 * Example 2:
+	 *
+	 * mkdir testdir
+	 * touch foo
+	 * ln foo testdir/foo2
+	 * ln foo testdir/foo3
+	 * sync
+	 * unlink testdir/foo3
+	 * xfs_io -c fsync foo
+	 * <power failure>
+	 * mount fs, triggers log replay
+	 *
+	 * Similar as the first example, after log replay the parent directory
+	 * testdir still has an entry pointing to the inode file with name foo3
+	 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
+	 * and has a link count of 2.
+	 */
+	if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
+		ret = btrfs_log_all_parents(trans, orig_inode, ctx);
+		if (ret)
+			goto end_trans;
+	}
+
 	while (1) {
 		if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb)
 			break;
@@ -4994,23 +5224,9 @@
 		if (root != BTRFS_I(inode)->root)
 			break;
 
-		/*
-		 * On unlink we must make sure our immediate parent directory
-		 * inode is fully logged. This is to prevent leaving dangling
-		 * directory index entries and a wrong directory inode's i_size.
-		 * Not doing so can result in a directory being impossible to
-		 * delete after log replay (rmdir will always fail with error
-		 * -ENOTEMPTY).
-		 */
-		if (did_unlink && parent == first_parent)
-			inode_only = LOG_INODE_ALL;
-		else
-			inode_only = LOG_INODE_EXISTS;
-
-		if (BTRFS_I(inode)->generation >
-		    root->fs_info->last_trans_committed ||
-		    inode_only == LOG_INODE_ALL) {
-			ret = btrfs_log_inode(trans, root, inode, inode_only,
+		if (BTRFS_I(inode)->generation > last_committed) {
+			ret = btrfs_log_inode(trans, root, inode,
+					      LOG_INODE_EXISTS,
 					      0, LLONG_MAX, ctx);
 			if (ret)
 				goto end_trans;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 762476f..76201d6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1116,15 +1116,18 @@
 	return ret;
 }
 
-static int contains_pending_extent(struct btrfs_trans_handle *trans,
+static int contains_pending_extent(struct btrfs_transaction *transaction,
 				   struct btrfs_device *device,
 				   u64 *start, u64 len)
 {
+	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
 	struct extent_map *em;
-	struct list_head *search_list = &trans->transaction->pending_chunks;
+	struct list_head *search_list = &fs_info->pinned_chunks;
 	int ret = 0;
 	u64 physical_start = *start;
 
+	if (transaction)
+		search_list = &transaction->pending_chunks;
 again:
 	list_for_each_entry(em, search_list, list) {
 		struct map_lookup *map;
@@ -1159,8 +1162,8 @@
 			}
 		}
 	}
-	if (search_list == &trans->transaction->pending_chunks) {
-		search_list = &trans->root->fs_info->pinned_chunks;
+	if (search_list != &fs_info->pinned_chunks) {
+		search_list = &fs_info->pinned_chunks;
 		goto again;
 	}
 
@@ -1169,12 +1172,13 @@
 
 
 /*
- * find_free_dev_extent - find free space in the specified device
- * @device:	the device which we search the free space in
- * @num_bytes:	the size of the free space that we need
- * @start:	store the start of the free space.
- * @len:	the size of the free space. that we find, or the size of the max
- * 		free space if we don't find suitable free space
+ * find_free_dev_extent_start - find free space in the specified device
+ * @device:	  the device which we search the free space in
+ * @num_bytes:	  the size of the free space that we need
+ * @search_start: the position from which to begin the search
+ * @start:	  store the start of the free space.
+ * @len:	  the size of the free space. that we find, or the size
+ *		  of the max free space if we don't find suitable free space
  *
  * this uses a pretty simple search, the expectation is that it is
  * called very infrequently and that a given device has a small number
@@ -1188,9 +1192,9 @@
  * But if we don't find suitable free space, it is used to store the size of
  * the max free space.
  */
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-			 struct btrfs_device *device, u64 num_bytes,
-			 u64 *start, u64 *len)
+int find_free_dev_extent_start(struct btrfs_transaction *transaction,
+			       struct btrfs_device *device, u64 num_bytes,
+			       u64 search_start, u64 *start, u64 *len)
 {
 	struct btrfs_key key;
 	struct btrfs_root *root = device->dev_root;
@@ -1200,19 +1204,11 @@
 	u64 max_hole_start;
 	u64 max_hole_size;
 	u64 extent_end;
-	u64 search_start;
 	u64 search_end = device->total_bytes;
 	int ret;
 	int slot;
 	struct extent_buffer *l;
 
-	/* FIXME use last free of some kind */
-
-	/* we don't want to overwrite the superblock on the drive,
-	 * so we make sure to start at an offset of at least 1MB
-	 */
-	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -1273,7 +1269,7 @@
 			 * Have to check before we set max_hole_start, otherwise
 			 * we could end up sending back this offset anyway.
 			 */
-			if (contains_pending_extent(trans, device,
+			if (contains_pending_extent(transaction, device,
 						    &search_start,
 						    hole_size)) {
 				if (key.offset >= search_start) {
@@ -1322,7 +1318,7 @@
 	if (search_end > search_start) {
 		hole_size = search_end - search_start;
 
-		if (contains_pending_extent(trans, device, &search_start,
+		if (contains_pending_extent(transaction, device, &search_start,
 					    hole_size)) {
 			btrfs_release_path(path);
 			goto again;
@@ -1348,6 +1344,24 @@
 	return ret;
 }
 
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+			 struct btrfs_device *device, u64 num_bytes,
+			 u64 *start, u64 *len)
+{
+	struct btrfs_root *root = device->dev_root;
+	u64 search_start;
+
+	/* FIXME use last free of some kind */
+
+	/*
+	 * we don't want to overwrite the superblock on the drive,
+	 * so we make sure to start at an offset of at least 1MB
+	 */
+	search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
+	return find_free_dev_extent_start(trans->transaction, device,
+					  num_bytes, search_start, start, len);
+}
+
 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
 			  struct btrfs_device *device,
 			  u64 start, u64 *dev_extent_len)
@@ -2755,9 +2769,7 @@
 	return ret;
 }
 
-static int btrfs_relocate_chunk(struct btrfs_root *root,
-				u64 chunk_objectid,
-				u64 chunk_offset)
+static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
 {
 	struct btrfs_root *extent_root;
 	struct btrfs_trans_handle *trans;
@@ -2785,7 +2797,9 @@
 		return -ENOSPC;
 
 	/* step one, relocate all the extents inside this chunk */
+	btrfs_scrub_pause(root);
 	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
+	btrfs_scrub_continue(root);
 	if (ret)
 		return ret;
 
@@ -2855,7 +2869,6 @@
 
 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
 			ret = btrfs_relocate_chunk(chunk_root,
-						   found_key.objectid,
 						   found_key.offset);
 			if (ret == -ENOSPC)
 				failed++;
@@ -3375,7 +3388,6 @@
 		}
 
 		ret = btrfs_relocate_chunk(chunk_root,
-					   found_key.objectid,
 					   found_key.offset);
 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		if (ret && ret != -ENOSPC)
@@ -4077,7 +4089,6 @@
 	struct btrfs_dev_extent *dev_extent = NULL;
 	struct btrfs_path *path;
 	u64 length;
-	u64 chunk_objectid;
 	u64 chunk_offset;
 	int ret;
 	int slot;
@@ -4154,11 +4165,10 @@
 			break;
 		}
 
-		chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
 		btrfs_release_path(path);
 
-		ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
+		ret = btrfs_relocate_chunk(root, chunk_offset);
 		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
 		if (ret && ret != -ENOSPC)
 			goto done;
@@ -4200,7 +4210,8 @@
 		u64 start = new_size;
 		u64 len = old_size - new_size;
 
-		if (contains_pending_extent(trans, device, &start, len)) {
+		if (contains_pending_extent(trans->transaction, device,
+					    &start, len)) {
 			unlock_chunks(root);
 			checked_pending_chunks = true;
 			failed = 0;
@@ -5071,9 +5082,7 @@
 		 * and the stripes
 		 */
 		sizeof(u64) * (total_stripes),
-		GFP_NOFS);
-	if (!bbio)
-		return NULL;
+		GFP_NOFS|__GFP_NOFAIL);
 
 	atomic_set(&bbio->error, 0);
 	atomic_set(&bbio->refs, 1);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 95842a9..2ca784a 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -453,6 +453,9 @@
 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+int find_free_dev_extent_start(struct btrfs_transaction *transaction,
+			 struct btrfs_device *device, u64 num_bytes,
+			 u64 search_start, u64 *start, u64 *max_avail);
 int find_free_dev_extent(struct btrfs_trans_handle *trans,
 			 struct btrfs_device *device, u64 num_bytes,
 			 u64 *start, u64 *max_avail);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index d1c833c..7b6bfcb 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -479,7 +479,7 @@
 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
 		seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
-		seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
+		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
 
 	return 0;
 }
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0a9fb6b..6a1119e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -394,17 +394,17 @@
 	struct sockaddr *srcaddr;
 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 
-	seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
+	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
 	cifs_show_security(s, tcon->ses);
 	cifs_show_cache_flavor(s, cifs_sb);
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
 		seq_puts(s, ",multiuser");
 	else if (tcon->ses->user_name)
-		seq_printf(s, ",username=%s", tcon->ses->user_name);
+		seq_show_option(s, "username", tcon->ses->user_name);
 
 	if (tcon->ses->domainName)
-		seq_printf(s, ",domain=%s", tcon->ses->domainName);
+		seq_show_option(s, "domain", tcon->ses->domainName);
 
 	if (srcaddr->sa_family != AF_UNSPEC) {
 		struct sockaddr_in *saddr4;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 754fd6c..87e9d79 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -120,12 +120,11 @@
 	struct cbuf cb;
 	int retries;
 #define MAX_CONNECT_RETRIES 3
-	int sctp_assoc;
 	struct hlist_node list;
 	struct connection *othercon;
 	struct work_struct rwork; /* Receive workqueue */
 	struct work_struct swork; /* Send workqueue */
-	bool try_new_addr;
+	void (*orig_error_report)(struct sock *sk);
 };
 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
 
@@ -252,26 +251,6 @@
 	return con;
 }
 
-/* This is a bit drastic, but only called when things go wrong */
-static struct connection *assoc2con(int assoc_id)
-{
-	int i;
-	struct connection *con;
-
-	mutex_lock(&connections_lock);
-
-	for (i = 0 ; i < CONN_HASH_SIZE; i++) {
-		hlist_for_each_entry(con, &connection_hash[i], list) {
-			if (con->sctp_assoc == assoc_id) {
-				mutex_unlock(&connections_lock);
-				return con;
-			}
-		}
-	}
-	mutex_unlock(&connections_lock);
-	return NULL;
-}
-
 static struct dlm_node_addr *find_node_addr(int nodeid)
 {
 	struct dlm_node_addr *na;
@@ -322,14 +301,14 @@
 	spin_lock(&dlm_node_addrs_spin);
 	na = find_node_addr(nodeid);
 	if (na && na->addr_count) {
+		memcpy(&sas, na->addr[na->curr_addr_index],
+		       sizeof(struct sockaddr_storage));
+
 		if (try_new_addr) {
 			na->curr_addr_index++;
 			if (na->curr_addr_index == na->addr_count)
 				na->curr_addr_index = 0;
 		}
-
-		memcpy(&sas, na->addr[na->curr_addr_index ],
-			sizeof(struct sockaddr_storage));
 	}
 	spin_unlock(&dlm_node_addrs_spin);
 
@@ -459,18 +438,23 @@
 
 static void lowcomms_state_change(struct sock *sk)
 {
-	if (sk->sk_state == TCP_ESTABLISHED)
+	/* SCTP layer is not calling sk_data_ready when the connection
+	 * is done, so we catch the signal through here. Also, it
+	 * doesn't switch socket state when entering shutdown, so we
+	 * skip the write in that case.
+	 */
+	if (sk->sk_shutdown) {
+		if (sk->sk_shutdown == RCV_SHUTDOWN)
+			lowcomms_data_ready(sk);
+	} else if (sk->sk_state == TCP_ESTABLISHED) {
 		lowcomms_write_space(sk);
+	}
 }
 
 int dlm_lowcomms_connect_node(int nodeid)
 {
 	struct connection *con;
 
-	/* with sctp there's no connecting without sending */
-	if (dlm_config.ci_protocol != 0)
-		return 0;
-
 	if (nodeid == dlm_our_nodeid())
 		return 0;
 
@@ -481,6 +465,43 @@
 	return 0;
 }
 
+static void lowcomms_error_report(struct sock *sk)
+{
+	struct connection *con = sock2con(sk);
+	struct sockaddr_storage saddr;
+
+	if (nodeid_to_addr(con->nodeid, &saddr, NULL, false)) {
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d, port %d, "
+				   "sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, dlm_config.ci_tcp_port,
+				   sk->sk_err, sk->sk_err_soft);
+		return;
+	} else if (saddr.ss_family == AF_INET) {
+		struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
+
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d at %pI4, port %d, "
+				   "sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, &sin4->sin_addr.s_addr,
+				   dlm_config.ci_tcp_port, sk->sk_err,
+				   sk->sk_err_soft);
+	} else {
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
+
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d at %u.%u.%u.%u, "
+				   "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, sin6->sin6_addr.s6_addr32[0],
+				   sin6->sin6_addr.s6_addr32[1],
+				   sin6->sin6_addr.s6_addr32[2],
+				   sin6->sin6_addr.s6_addr32[3],
+				   dlm_config.ci_tcp_port, sk->sk_err,
+				   sk->sk_err_soft);
+	}
+	con->orig_error_report(sk);
+}
+
 /* Make a socket active */
 static void add_sock(struct socket *sock, struct connection *con)
 {
@@ -492,6 +513,8 @@
 	con->sock->sk->sk_state_change = lowcomms_state_change;
 	con->sock->sk->sk_user_data = con;
 	con->sock->sk->sk_allocation = GFP_NOFS;
+	con->orig_error_report = con->sock->sk->sk_error_report;
+	con->sock->sk->sk_error_report = lowcomms_error_report;
 }
 
 /* Add the port number to an IPv6 or 4 sockaddr and return the address
@@ -514,17 +537,24 @@
 }
 
 /* Close a remote connection and tidy up */
-static void close_connection(struct connection *con, bool and_other)
+static void close_connection(struct connection *con, bool and_other,
+			     bool tx, bool rx)
 {
-	mutex_lock(&con->sock_mutex);
+	clear_bit(CF_CONNECT_PENDING, &con->flags);
+	clear_bit(CF_WRITE_PENDING, &con->flags);
+	if (tx && cancel_work_sync(&con->swork))
+		log_print("canceled swork for node %d", con->nodeid);
+	if (rx && cancel_work_sync(&con->rwork))
+		log_print("canceled rwork for node %d", con->nodeid);
 
+	mutex_lock(&con->sock_mutex);
 	if (con->sock) {
 		sock_release(con->sock);
 		con->sock = NULL;
 	}
 	if (con->othercon && and_other) {
 		/* Will only re-enter once. */
-		close_connection(con->othercon, false);
+		close_connection(con->othercon, false, true, true);
 	}
 	if (con->rx_page) {
 		__free_page(con->rx_page);
@@ -535,254 +565,6 @@
 	mutex_unlock(&con->sock_mutex);
 }
 
-/* We only send shutdown messages to nodes that are not part of the cluster */
-static void sctp_send_shutdown(sctp_assoc_t associd)
-{
-	static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
-	struct msghdr outmessage;
-	struct cmsghdr *cmsg;
-	struct sctp_sndrcvinfo *sinfo;
-	int ret;
-	struct connection *con;
-
-	con = nodeid2con(0,0);
-	BUG_ON(con == NULL);
-
-	outmessage.msg_name = NULL;
-	outmessage.msg_namelen = 0;
-	outmessage.msg_control = outcmsg;
-	outmessage.msg_controllen = sizeof(outcmsg);
-	outmessage.msg_flags = MSG_EOR;
-
-	cmsg = CMSG_FIRSTHDR(&outmessage);
-	cmsg->cmsg_level = IPPROTO_SCTP;
-	cmsg->cmsg_type = SCTP_SNDRCV;
-	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo = CMSG_DATA(cmsg);
-	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-
-	sinfo->sinfo_flags |= MSG_EOF;
-	sinfo->sinfo_assoc_id = associd;
-
-	ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0);
-
-	if (ret != 0)
-		log_print("send EOF to node failed: %d", ret);
-}
-
-static void sctp_init_failed_foreach(struct connection *con)
-{
-
-	/*
-	 * Don't try to recover base con and handle race where the
-	 * other node's assoc init creates a assoc and we get that
-	 * notification, then we get a notification that our attempt
-	 * failed due. This happens when we are still trying the primary
-	 * address, but the other node has already tried secondary addrs
-	 * and found one that worked.
-	 */
-	if (!con->nodeid || con->sctp_assoc)
-		return;
-
-	log_print("Retrying SCTP association init for node %d\n", con->nodeid);
-
-	con->try_new_addr = true;
-	con->sctp_assoc = 0;
-	if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) {
-		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
-			queue_work(send_workqueue, &con->swork);
-	}
-}
-
-/* INIT failed but we don't know which node...
-   restart INIT on all pending nodes */
-static void sctp_init_failed(void)
-{
-	mutex_lock(&connections_lock);
-
-	foreach_conn(sctp_init_failed_foreach);
-
-	mutex_unlock(&connections_lock);
-}
-
-static void retry_failed_sctp_send(struct connection *recv_con,
-				   struct sctp_send_failed *sn_send_failed,
-				   char *buf)
-{
-	int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed);
-	struct dlm_mhandle *mh;
-	struct connection *con;
-	char *retry_buf;
-	int nodeid = sn_send_failed->ssf_info.sinfo_ppid;
-
-	log_print("Retry sending %d bytes to node id %d", len, nodeid);
-	
-	if (!nodeid) {
-		log_print("Shouldn't resend data via listening connection.");
-		return;
-	}
-
-	con = nodeid2con(nodeid, 0);
-	if (!con) {
-		log_print("Could not look up con for nodeid %d\n",
-			  nodeid);
-		return;
-	}
-
-	mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf);
-	if (!mh) {
-		log_print("Could not allocate buf for retry.");
-		return;
-	}
-	memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len);
-	dlm_lowcomms_commit_buffer(mh);
-
-	/*
-	 * If we got a assoc changed event before the send failed event then
-	 * we only need to retry the send.
-	 */
-	if (con->sctp_assoc) {
-		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
-			queue_work(send_workqueue, &con->swork);
-	} else
-		sctp_init_failed_foreach(con);
-}
-
-/* Something happened to an association */
-static void process_sctp_notification(struct connection *con,
-				      struct msghdr *msg, char *buf)
-{
-	union sctp_notification *sn = (union sctp_notification *)buf;
-	struct linger linger;
-
-	switch (sn->sn_header.sn_type) {
-	case SCTP_SEND_FAILED:
-		retry_failed_sctp_send(con, &sn->sn_send_failed, buf);
-		break;
-	case SCTP_ASSOC_CHANGE:
-		switch (sn->sn_assoc_change.sac_state) {
-		case SCTP_COMM_UP:
-		case SCTP_RESTART:
-		{
-			/* Check that the new node is in the lockspace */
-			struct sctp_prim prim;
-			int nodeid;
-			int prim_len, ret;
-			int addr_len;
-			struct connection *new_con;
-
-			/*
-			 * We get this before any data for an association.
-			 * We verify that the node is in the cluster and
-			 * then peel off a socket for it.
-			 */
-			if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
-				log_print("COMM_UP for invalid assoc ID %d",
-					 (int)sn->sn_assoc_change.sac_assoc_id);
-				sctp_init_failed();
-				return;
-			}
-			memset(&prim, 0, sizeof(struct sctp_prim));
-			prim_len = sizeof(struct sctp_prim);
-			prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;
-
-			ret = kernel_getsockopt(con->sock,
-						IPPROTO_SCTP,
-						SCTP_PRIMARY_ADDR,
-						(char*)&prim,
-						&prim_len);
-			if (ret < 0) {
-				log_print("getsockopt/sctp_primary_addr on "
-					  "new assoc %d failed : %d",
-					  (int)sn->sn_assoc_change.sac_assoc_id,
-					  ret);
-
-				/* Retry INIT later */
-				new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
-				if (new_con)
-					clear_bit(CF_CONNECT_PENDING, &con->flags);
-				return;
-			}
-			make_sockaddr(&prim.ssp_addr, 0, &addr_len);
-			if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
-				unsigned char *b=(unsigned char *)&prim.ssp_addr;
-				log_print("reject connect from unknown addr");
-				print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
-						     b, sizeof(struct sockaddr_storage));
-				sctp_send_shutdown(prim.ssp_assoc_id);
-				return;
-			}
-
-			new_con = nodeid2con(nodeid, GFP_NOFS);
-			if (!new_con)
-				return;
-
-			/* Peel off a new sock */
-			lock_sock(con->sock->sk);
-			ret = sctp_do_peeloff(con->sock->sk,
-				sn->sn_assoc_change.sac_assoc_id,
-				&new_con->sock);
-			release_sock(con->sock->sk);
-			if (ret < 0) {
-				log_print("Can't peel off a socket for "
-					  "connection %d to node %d: err=%d",
-					  (int)sn->sn_assoc_change.sac_assoc_id,
-					  nodeid, ret);
-				return;
-			}
-			add_sock(new_con->sock, new_con);
-
-			linger.l_onoff = 1;
-			linger.l_linger = 0;
-			ret = kernel_setsockopt(new_con->sock, SOL_SOCKET, SO_LINGER,
-						(char *)&linger, sizeof(linger));
-			if (ret < 0)
-				log_print("set socket option SO_LINGER failed");
-
-			log_print("connecting to %d sctp association %d",
-				 nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
-
-			new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id;
-			new_con->try_new_addr = false;
-			/* Send any pending writes */
-			clear_bit(CF_CONNECT_PENDING, &new_con->flags);
-			clear_bit(CF_INIT_PENDING, &new_con->flags);
-			if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) {
-				queue_work(send_workqueue, &new_con->swork);
-			}
-			if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags))
-				queue_work(recv_workqueue, &new_con->rwork);
-		}
-		break;
-
-		case SCTP_COMM_LOST:
-		case SCTP_SHUTDOWN_COMP:
-		{
-			con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
-			if (con) {
-				con->sctp_assoc = 0;
-			}
-		}
-		break;
-
-		case SCTP_CANT_STR_ASSOC:
-		{
-			/* Will retry init when we get the send failed notification */
-			log_print("Can't start SCTP association - retrying");
-		}
-		break;
-
-		default:
-			log_print("unexpected SCTP assoc change id=%d state=%d",
-				  (int)sn->sn_assoc_change.sac_assoc_id,
-				  sn->sn_assoc_change.sac_state);
-		}
-	default:
-		; /* fall through */
-	}
-}
-
 /* Data received from remote end */
 static int receive_from_sock(struct connection *con)
 {
@@ -793,7 +575,6 @@
 	int r;
 	int call_again_soon = 0;
 	int nvec;
-	char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
 
 	mutex_lock(&con->sock_mutex);
 
@@ -801,6 +582,10 @@
 		ret = -EAGAIN;
 		goto out_close;
 	}
+	if (con->nodeid == 0) {
+		ret = -EINVAL;
+		goto out_close;
+	}
 
 	if (con->rx_page == NULL) {
 		/*
@@ -813,11 +598,6 @@
 		cbuf_init(&con->cb, PAGE_CACHE_SIZE);
 	}
 
-	/* Only SCTP needs these really */
-	memset(&incmsg, 0, sizeof(incmsg));
-	msg.msg_control = incmsg;
-	msg.msg_controllen = sizeof(incmsg);
-
 	/*
 	 * iov[0] is the bit of the circular buffer between the current end
 	 * point (cb.base + cb.len) and the end of the buffer.
@@ -843,31 +623,18 @@
 			       MSG_DONTWAIT | MSG_NOSIGNAL);
 	if (ret <= 0)
 		goto out_close;
-
-	/* Process SCTP notifications */
-	if (msg.msg_flags & MSG_NOTIFICATION) {
-		msg.msg_control = incmsg;
-		msg.msg_controllen = sizeof(incmsg);
-
-		process_sctp_notification(con, &msg,
-				page_address(con->rx_page) + con->cb.base);
-		mutex_unlock(&con->sock_mutex);
-		return 0;
-	}
-	BUG_ON(con->nodeid == 0);
-
-	if (ret == len)
+	else if (ret == len)
 		call_again_soon = 1;
+
 	cbuf_add(&con->cb, ret);
 	ret = dlm_process_incoming_buffer(con->nodeid,
 					  page_address(con->rx_page),
 					  con->cb.base, con->cb.len,
 					  PAGE_CACHE_SIZE);
 	if (ret == -EBADMSG) {
-		log_print("lowcomms: addr=%p, base=%u, len=%u, "
-			  "iov_len=%u, iov_base[0]=%p, read=%d",
-			  page_address(con->rx_page), con->cb.base, con->cb.len,
-			  len, iov[0].iov_base, r);
+		log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
+			  page_address(con->rx_page), con->cb.base,
+			  con->cb.len, r);
 	}
 	if (ret < 0)
 		goto out_close;
@@ -892,7 +659,7 @@
 out_close:
 	mutex_unlock(&con->sock_mutex);
 	if (ret != -EAGAIN) {
-		close_connection(con, false);
+		close_connection(con, false, true, false);
 		/* Reconnect when there is something to send */
 	}
 	/* Don't return success if we really got EOF */
@@ -1033,6 +800,120 @@
 	return result;
 }
 
+static int sctp_accept_from_sock(struct connection *con)
+{
+	/* Check that the new node is in the lockspace */
+	struct sctp_prim prim;
+	int nodeid;
+	int prim_len, ret;
+	int addr_len;
+	struct connection *newcon;
+	struct connection *addcon;
+	struct socket *newsock;
+
+	mutex_lock(&connections_lock);
+	if (!dlm_allow_conn) {
+		mutex_unlock(&connections_lock);
+		return -1;
+	}
+	mutex_unlock(&connections_lock);
+
+	mutex_lock_nested(&con->sock_mutex, 0);
+
+	ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
+	if (ret < 0)
+		goto accept_err;
+
+	memset(&prim, 0, sizeof(struct sctp_prim));
+	prim_len = sizeof(struct sctp_prim);
+
+	ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
+				(char *)&prim, &prim_len);
+	if (ret < 0) {
+		log_print("getsockopt/sctp_primary_addr failed: %d", ret);
+		goto accept_err;
+	}
+
+	make_sockaddr(&prim.ssp_addr, 0, &addr_len);
+	if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
+		unsigned char *b = (unsigned char *)&prim.ssp_addr;
+
+		log_print("reject connect from unknown addr");
+		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
+				     b, sizeof(struct sockaddr_storage));
+		goto accept_err;
+	}
+
+	newcon = nodeid2con(nodeid, GFP_NOFS);
+	if (!newcon) {
+		ret = -ENOMEM;
+		goto accept_err;
+	}
+
+	mutex_lock_nested(&newcon->sock_mutex, 1);
+
+	if (newcon->sock) {
+		struct connection *othercon = newcon->othercon;
+
+		if (!othercon) {
+			othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
+			if (!othercon) {
+				log_print("failed to allocate incoming socket");
+				mutex_unlock(&newcon->sock_mutex);
+				ret = -ENOMEM;
+				goto accept_err;
+			}
+			othercon->nodeid = nodeid;
+			othercon->rx_action = receive_from_sock;
+			mutex_init(&othercon->sock_mutex);
+			INIT_WORK(&othercon->swork, process_send_sockets);
+			INIT_WORK(&othercon->rwork, process_recv_sockets);
+			set_bit(CF_IS_OTHERCON, &othercon->flags);
+		}
+		if (!othercon->sock) {
+			newcon->othercon = othercon;
+			othercon->sock = newsock;
+			newsock->sk->sk_user_data = othercon;
+			add_sock(newsock, othercon);
+			addcon = othercon;
+		} else {
+			printk("Extra connection from node %d attempted\n", nodeid);
+			ret = -EAGAIN;
+			mutex_unlock(&newcon->sock_mutex);
+			goto accept_err;
+		}
+	} else {
+		newsock->sk->sk_user_data = newcon;
+		newcon->rx_action = receive_from_sock;
+		add_sock(newsock, newcon);
+		addcon = newcon;
+	}
+
+	log_print("connected to %d", nodeid);
+
+	mutex_unlock(&newcon->sock_mutex);
+
+	/*
+	 * Add it to the active queue in case we got data
+	 * between processing the accept adding the socket
+	 * to the read_sockets list
+	 */
+	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
+		queue_work(recv_workqueue, &addcon->rwork);
+	mutex_unlock(&con->sock_mutex);
+
+	return 0;
+
+accept_err:
+	mutex_unlock(&con->sock_mutex);
+	if (newsock)
+		sock_release(newsock);
+	if (ret != -EAGAIN)
+		log_print("error accepting connection from node: %d", ret);
+
+	return ret;
+}
+
 static void free_entry(struct writequeue_entry *e)
 {
 	__free_page(e->page);
@@ -1057,97 +938,129 @@
 	}
 }
 
+/*
+ * sctp_bind_addrs - bind a SCTP socket to all our addresses
+ */
+static int sctp_bind_addrs(struct connection *con, uint16_t port)
+{
+	struct sockaddr_storage localaddr;
+	int i, addr_len, result = 0;
+
+	for (i = 0; i < dlm_local_count; i++) {
+		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
+		make_sockaddr(&localaddr, port, &addr_len);
+
+		if (!i)
+			result = kernel_bind(con->sock,
+					     (struct sockaddr *)&localaddr,
+					     addr_len);
+		else
+			result = kernel_setsockopt(con->sock, SOL_SCTP,
+						   SCTP_SOCKOPT_BINDX_ADD,
+						   (char *)&localaddr, addr_len);
+
+		if (result < 0) {
+			log_print("Can't bind to %d addr number %d, %d.\n",
+				  port, i + 1, result);
+			break;
+		}
+	}
+	return result;
+}
+
 /* Initiate an SCTP association.
    This is a special case of send_to_sock() in that we don't yet have a
    peeled-off socket for this association, so we use the listening socket
    and add the primary IP address of the remote node.
  */
-static void sctp_init_assoc(struct connection *con)
+static void sctp_connect_to_sock(struct connection *con)
 {
-	struct sockaddr_storage rem_addr;
-	char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
-	struct msghdr outmessage;
-	struct cmsghdr *cmsg;
-	struct sctp_sndrcvinfo *sinfo;
-	struct connection *base_con;
-	struct writequeue_entry *e;
-	int len, offset;
-	int ret;
-	int addrlen;
-	struct kvec iov[1];
+	struct sockaddr_storage daddr;
+	int one = 1;
+	int result;
+	int addr_len;
+	struct socket *sock;
+
+	if (con->nodeid == 0) {
+		log_print("attempt to connect sock 0 foiled");
+		return;
+	}
 
 	mutex_lock(&con->sock_mutex);
-	if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
-		goto unlock;
 
-	if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr,
-			   con->try_new_addr)) {
+	/* Some odd races can cause double-connects, ignore them */
+	if (con->retries++ > MAX_CONNECT_RETRIES)
+		goto out;
+
+	if (con->sock) {
+		log_print("node %d already connected.", con->nodeid);
+		goto out;
+	}
+
+	memset(&daddr, 0, sizeof(daddr));
+	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
+	if (result < 0) {
 		log_print("no address for nodeid %d", con->nodeid);
-		goto unlock;
-	}
-	base_con = nodeid2con(0, 0);
-	BUG_ON(base_con == NULL);
-
-	make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen);
-
-	outmessage.msg_name = &rem_addr;
-	outmessage.msg_namelen = addrlen;
-	outmessage.msg_control = outcmsg;
-	outmessage.msg_controllen = sizeof(outcmsg);
-	outmessage.msg_flags = MSG_EOR;
-
-	spin_lock(&con->writequeue_lock);
-
-	if (list_empty(&con->writequeue)) {
-		spin_unlock(&con->writequeue_lock);
-		log_print("writequeue empty for nodeid %d", con->nodeid);
-		goto unlock;
+		goto out;
 	}
 
-	e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
-	len = e->len;
-	offset = e->offset;
+	/* Create a socket to communicate with */
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_STREAM, IPPROTO_SCTP, &sock);
+	if (result < 0)
+		goto socket_err;
 
-	/* Send the first block off the write queue */
-	iov[0].iov_base = page_address(e->page)+offset;
-	iov[0].iov_len = len;
-	spin_unlock(&con->writequeue_lock);
+	sock->sk->sk_user_data = con;
+	con->rx_action = receive_from_sock;
+	con->connect_action = sctp_connect_to_sock;
+	add_sock(sock, con);
 
-	if (rem_addr.ss_family == AF_INET) {
-		struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr;
-		log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr);
-	} else {
-		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr;
-		log_print("Trying to connect to %pI6", &sin6->sin6_addr);
-	}
+	/* Bind to all addresses. */
+	if (sctp_bind_addrs(con, 0))
+		goto bind_err;
 
-	cmsg = CMSG_FIRSTHDR(&outmessage);
-	cmsg->cmsg_level = IPPROTO_SCTP;
-	cmsg->cmsg_type = SCTP_SNDRCV;
-	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	sinfo = CMSG_DATA(cmsg);
-	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-	sinfo->sinfo_ppid = cpu_to_le32(con->nodeid);
-	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo->sinfo_flags |= SCTP_ADDR_OVER;
+	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
 
-	ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len);
-	if (ret < 0) {
-		log_print("Send first packet to node %d failed: %d",
-			  con->nodeid, ret);
+	log_print("connecting to %d", con->nodeid);
 
-		/* Try again later */
+	/* Turn off Nagle's algorithm */
+	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
+			  sizeof(one));
+
+	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
+				   O_NONBLOCK);
+	if (result == -EINPROGRESS)
+		result = 0;
+	if (result == 0)
+		goto out;
+
+
+bind_err:
+	con->sock = NULL;
+	sock_release(sock);
+
+socket_err:
+	/*
+	 * Some errors are fatal and this list might need adjusting. For other
+	 * errors we try again until the max number of retries is reached.
+	 */
+	if (result != -EHOSTUNREACH &&
+	    result != -ENETUNREACH &&
+	    result != -ENETDOWN &&
+	    result != -EINVAL &&
+	    result != -EPROTONOSUPPORT) {
+		log_print("connect %d try %d error %d", con->nodeid,
+			  con->retries, result);
+		mutex_unlock(&con->sock_mutex);
+		msleep(1000);
 		clear_bit(CF_CONNECT_PENDING, &con->flags);
-		clear_bit(CF_INIT_PENDING, &con->flags);
-	}
-	else {
-		spin_lock(&con->writequeue_lock);
-		writequeue_entry_complete(e, ret);
-		spin_unlock(&con->writequeue_lock);
+		lowcomms_connect_sock(con);
+		return;
 	}
 
-unlock:
+out:
 	mutex_unlock(&con->sock_mutex);
+	set_bit(CF_WRITE_PENDING, &con->flags);
 }
 
 /* Connect a new socket to its peer */
@@ -1236,11 +1149,13 @@
 			  con->retries, result);
 		mutex_unlock(&con->sock_mutex);
 		msleep(1000);
+		clear_bit(CF_CONNECT_PENDING, &con->flags);
 		lowcomms_connect_sock(con);
 		return;
 	}
 out:
 	mutex_unlock(&con->sock_mutex);
+	set_bit(CF_WRITE_PENDING, &con->flags);
 	return;
 }
 
@@ -1325,37 +1240,11 @@
 	}
 }
 
-/* Bind to an IP address. SCTP allows multiple address so it can do
-   multi-homing */
-static int add_sctp_bind_addr(struct connection *sctp_con,
-			      struct sockaddr_storage *addr,
-			      int addr_len, int num)
-{
-	int result = 0;
-
-	if (num == 1)
-		result = kernel_bind(sctp_con->sock,
-				     (struct sockaddr *) addr,
-				     addr_len);
-	else
-		result = kernel_setsockopt(sctp_con->sock, SOL_SCTP,
-					   SCTP_SOCKOPT_BINDX_ADD,
-					   (char *)addr, addr_len);
-
-	if (result < 0)
-		log_print("Can't bind to port %d addr number %d",
-			  dlm_config.ci_tcp_port, num);
-
-	return result;
-}
-
 /* Initialise SCTP socket and bind to all interfaces */
 static int sctp_listen_for_all(void)
 {
 	struct socket *sock = NULL;
-	struct sockaddr_storage localaddr;
-	struct sctp_event_subscribe subscribe;
-	int result = -EINVAL, num = 1, i, addr_len;
+	int result = -EINVAL;
 	struct connection *con = nodeid2con(0, GFP_NOFS);
 	int bufsize = NEEDED_RMEM;
 	int one = 1;
@@ -1366,33 +1255,17 @@
 	log_print("Using SCTP for communications");
 
 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
-				  SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
+				  SOCK_STREAM, IPPROTO_SCTP, &sock);
 	if (result < 0) {
 		log_print("Can't create comms socket, check SCTP is loaded");
 		goto out;
 	}
 
-	/* Listen for events */
-	memset(&subscribe, 0, sizeof(subscribe));
-	subscribe.sctp_data_io_event = 1;
-	subscribe.sctp_association_event = 1;
-	subscribe.sctp_send_failure_event = 1;
-	subscribe.sctp_shutdown_event = 1;
-	subscribe.sctp_partial_delivery_event = 1;
-
 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
 				 (char *)&bufsize, sizeof(bufsize));
 	if (result)
 		log_print("Error increasing buffer space on socket %d", result);
 
-	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
-				   (char *)&subscribe, sizeof(subscribe));
-	if (result < 0) {
-		log_print("Failed to set SCTP_EVENTS on socket: result=%d",
-			  result);
-		goto create_delsock;
-	}
-
 	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
 				   sizeof(one));
 	if (result < 0)
@@ -1402,19 +1275,12 @@
 	sock->sk->sk_user_data = con;
 	con->sock = sock;
 	con->sock->sk->sk_data_ready = lowcomms_data_ready;
-	con->rx_action = receive_from_sock;
-	con->connect_action = sctp_init_assoc;
+	con->rx_action = sctp_accept_from_sock;
+	con->connect_action = sctp_connect_to_sock;
 
-	/* Bind to all interfaces. */
-	for (i = 0; i < dlm_local_count; i++) {
-		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
-		make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len);
-
-		result = add_sctp_bind_addr(con, &localaddr, addr_len, num);
-		if (result)
-			goto create_delsock;
-		++num;
-	}
+	/* Bind to all addresses. */
+	if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
+		goto create_delsock;
 
 	result = sock->ops->listen(sock, 5);
 	if (result < 0) {
@@ -1612,14 +1478,13 @@
 
 send_error:
 	mutex_unlock(&con->sock_mutex);
-	close_connection(con, false);
+	close_connection(con, false, false, true);
 	lowcomms_connect_sock(con);
 	return;
 
 out_connect:
 	mutex_unlock(&con->sock_mutex);
-	if (!test_bit(CF_INIT_PENDING, &con->flags))
-		lowcomms_connect_sock(con);
+	lowcomms_connect_sock(con);
 }
 
 static void clean_one_writequeue(struct connection *con)
@@ -1644,15 +1509,9 @@
 	log_print("closing connection to node %d", nodeid);
 	con = nodeid2con(nodeid, 0);
 	if (con) {
-		clear_bit(CF_CONNECT_PENDING, &con->flags);
-		clear_bit(CF_WRITE_PENDING, &con->flags);
 		set_bit(CF_CLOSE, &con->flags);
-		if (cancel_work_sync(&con->swork))
-			log_print("canceled swork for node %d", nodeid);
-		if (cancel_work_sync(&con->rwork))
-			log_print("canceled rwork for node %d", nodeid);
+		close_connection(con, true, true, true);
 		clean_one_writequeue(con);
-		close_connection(con, true);
 	}
 
 	spin_lock(&dlm_node_addrs_spin);
@@ -1685,10 +1544,8 @@
 {
 	struct connection *con = container_of(work, struct connection, swork);
 
-	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags))
 		con->connect_action(con);
-		set_bit(CF_WRITE_PENDING, &con->flags);
-	}
 	if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags))
 		send_to_sock(con);
 }
@@ -1735,7 +1592,7 @@
 
 static void free_conn(struct connection *con)
 {
-	close_connection(con, true);
+	close_connection(con, true, true, true);
 	if (con->othercon)
 		kmem_cache_free(con_cache, con->othercon);
 	hlist_del(&con->list);
@@ -1806,7 +1663,7 @@
 	dlm_allow_conn = 0;
 	con = nodeid2con(0,0);
 	if (con) {
-		close_connection(con, false);
+		close_connection(con, false, true, true);
 		kmem_cache_free(con_cache, con);
 	}
 fail_destroy:
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 75ecc0d..173b387 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -782,6 +782,7 @@
 	DECLARE_WAITQUEUE(wait, current);
 	struct dlm_callback cb;
 	int rv, resid, copy_lvb = 0;
+	int old_mode, new_mode;
 
 	if (count == sizeof(struct dlm_device_version)) {
 		rv = copy_version_to_user(buf, count);
@@ -838,6 +839,9 @@
 
 	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
 
+	/* rem_lkb_callback sets a new lkb_last_cast */
+	old_mode = lkb->lkb_last_cast.mode;
+
 	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
 	if (rv < 0) {
 		/* this shouldn't happen; lkb should have been removed from
@@ -861,9 +865,6 @@
 	}
 
 	if (cb.flags & DLM_CB_CAST) {
-		int old_mode, new_mode;
-
-		old_mode = lkb->lkb_last_cast.mode;
 		new_mode = cb.mode;
 
 		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 5c04a0d..efe5fb2 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -577,7 +577,10 @@
 		goto fail;
 	}
 
-	dquot_initialize(inode);
+	err = dquot_initialize(inode);
+	if (err)
+		goto fail_drop;
+
 	err = dquot_alloc_inode(inode);
 	if (err)
 		goto fail_drop;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 5c09776..a3a404c 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1552,8 +1552,11 @@
 	if (error)
 		return error;
 
-	if (is_quota_modification(inode, iattr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, iattr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
 	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
 		error = dquot_transfer(inode, iattr);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 13ec54a..b4841e3 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -96,8 +96,11 @@
 static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
 {
 	struct inode *inode;
+	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode = ext2_new_inode(dir, mode, &dentry->d_name);
 	if (IS_ERR(inode))
@@ -143,7 +146,9 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode = ext2_new_inode (dir, mode, &dentry->d_name);
 	err = PTR_ERR(inode);
@@ -169,7 +174,9 @@
 	if (l > sb->s_blocksize)
 		goto out;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto out;
 
 	inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO, &dentry->d_name);
 	err = PTR_ERR(inode);
@@ -212,7 +219,9 @@
 	struct inode *inode = d_inode(old_dentry);
 	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode->i_ctime = CURRENT_TIME_SEC;
 	inode_inc_link_count(inode);
@@ -233,7 +242,9 @@
 	struct inode * inode;
 	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode_inc_link_count(dir);
 
@@ -279,13 +290,17 @@
 	struct inode * inode = d_inode(dentry);
 	struct ext2_dir_entry_2 * de;
 	struct page * page;
-	int err = -ENOENT;
+	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto out;
 
 	de = ext2_find_entry (dir, &dentry->d_name, &page);
-	if (!de)
+	if (!de) {
+		err = -ENOENT;
 		goto out;
+	}
 
 	err = ext2_delete_entry (de, page);
 	if (err)
@@ -323,14 +338,21 @@
 	struct ext2_dir_entry_2 * dir_de = NULL;
 	struct page * old_page;
 	struct ext2_dir_entry_2 * old_de;
-	int err = -ENOENT;
+	int err;
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	err = dquot_initialize(old_dir);
+	if (err)
+		goto out;
+
+	err = dquot_initialize(new_dir);
+	if (err)
+		goto out;
 
 	old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page);
-	if (!old_de)
+	if (!old_de) {
+		err = -ENOENT;
 		goto out;
+	}
 
 	if (S_ISDIR(old_inode->i_mode)) {
 		err = -EIO;
diff --git a/fs/ext3/Kconfig b/fs/ext3/Kconfig
deleted file mode 100644
index e8c6ba0..0000000
--- a/fs/ext3/Kconfig
+++ /dev/null
@@ -1,89 +0,0 @@
-config EXT3_FS
-	tristate "Ext3 journalling file system support"
-	select JBD
-	help
-	  This is the journalling version of the Second extended file system
-	  (often called ext3), the de facto standard Linux file system
-	  (method to organize files on a storage device) for hard disks.
-
-	  The journalling code included in this driver means you do not have
-	  to run e2fsck (file system checker) on your file systems after a
-	  crash.  The journal keeps track of any changes that were being made
-	  at the time the system crashed, and can ensure that your file system
-	  is consistent without the need for a lengthy check.
-
-	  Other than adding the journal to the file system, the on-disk format
-	  of ext3 is identical to ext2.  It is possible to freely switch
-	  between using the ext3 driver and the ext2 driver, as long as the
-	  file system has been cleanly unmounted, or e2fsck is run on the file
-	  system.
-
-	  To add a journal on an existing ext2 file system or change the
-	  behavior of ext3 file systems, you can use the tune2fs utility ("man
-	  tune2fs").  To modify attributes of files and directories on ext3
-	  file systems, use chattr ("man chattr").  You need to be using
-	  e2fsprogs version 1.20 or later in order to create ext3 journals
-	  (available at <http://sourceforge.net/projects/e2fsprogs/>).
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called ext3.
-
-config EXT3_DEFAULTS_TO_ORDERED
-	bool "Default to 'data=ordered' in ext3"
-	depends on EXT3_FS
-	default y
-	help
-	  The journal mode options for ext3 have different tradeoffs
-	  between when data is guaranteed to be on disk and
-	  performance.	The use of "data=writeback" can cause
-	  unwritten data to appear in files after an system crash or
-	  power failure, which can be a security issue.	 However,
-	  "data=ordered" mode can also result in major performance
-	  problems, including seconds-long delays before an fsync()
-	  call returns.	 For details, see:
-
-	  http://ext4.wiki.kernel.org/index.php/Ext3_data_mode_tradeoffs
-
-	  If you have been historically happy with ext3's performance,
-	  data=ordered mode will be a safe choice and you should
-	  answer 'y' here.  If you understand the reliability and data
-	  privacy issues of data=writeback and are willing to make
-	  that trade off, answer 'n'.
-
-config EXT3_FS_XATTR
-	bool "Ext3 extended attributes"
-	depends on EXT3_FS
-	default y
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-	  You need this for POSIX ACL support on ext3.
-
-config EXT3_FS_POSIX_ACL
-	bool "Ext3 POSIX Access Control Lists"
-	depends on EXT3_FS_XATTR
-	select FS_POSIX_ACL
-	help
-	  Posix Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the Posix ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config EXT3_FS_SECURITY
-	bool "Ext3 Security Labels"
-	depends on EXT3_FS_XATTR
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the ext3 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
deleted file mode 100644
index e77766a..0000000
--- a/fs/ext3/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux ext3-filesystem routines.
-#
-
-obj-$(CONFIG_EXT3_FS) += ext3.o
-
-ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
-
-ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
-ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-ext3-$(CONFIG_EXT3_FS_SECURITY)	 += xattr_security.o
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
deleted file mode 100644
index 8bbaf5b..0000000
--- a/fs/ext3/acl.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * linux/fs/ext3/acl.c
- *
- * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * Convert from filesystem to in-memory representation.
- */
-static struct posix_acl *
-ext3_acl_from_disk(const void *value, size_t size)
-{
-	const char *end = (char *)value + size;
-	int n, count;
-	struct posix_acl *acl;
-
-	if (!value)
-		return NULL;
-	if (size < sizeof(ext3_acl_header))
-		 return ERR_PTR(-EINVAL);
-	if (((ext3_acl_header *)value)->a_version !=
-	    cpu_to_le32(EXT3_ACL_VERSION))
-		return ERR_PTR(-EINVAL);
-	value = (char *)value + sizeof(ext3_acl_header);
-	count = ext3_acl_count(size);
-	if (count < 0)
-		return ERR_PTR(-EINVAL);
-	if (count == 0)
-		return NULL;
-	acl = posix_acl_alloc(count, GFP_NOFS);
-	if (!acl)
-		return ERR_PTR(-ENOMEM);
-	for (n=0; n < count; n++) {
-		ext3_acl_entry *entry =
-			(ext3_acl_entry *)value;
-		if ((char *)value + sizeof(ext3_acl_entry_short) > end)
-			goto fail;
-		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
-		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
-		switch(acl->a_entries[n].e_tag) {
-			case ACL_USER_OBJ:
-			case ACL_GROUP_OBJ:
-			case ACL_MASK:
-			case ACL_OTHER:
-				value = (char *)value +
-					sizeof(ext3_acl_entry_short);
-				break;
-
-			case ACL_USER:
-				value = (char *)value + sizeof(ext3_acl_entry);
-				if ((char *)value > end)
-					goto fail;
-				acl->a_entries[n].e_uid =
-					make_kuid(&init_user_ns,
-						  le32_to_cpu(entry->e_id));
-				break;
-			case ACL_GROUP:
-				value = (char *)value + sizeof(ext3_acl_entry);
-				if ((char *)value > end)
-					goto fail;
-				acl->a_entries[n].e_gid =
-					make_kgid(&init_user_ns,
-						  le32_to_cpu(entry->e_id));
-				break;
-
-			default:
-				goto fail;
-		}
-	}
-	if (value != end)
-		goto fail;
-	return acl;
-
-fail:
-	posix_acl_release(acl);
-	return ERR_PTR(-EINVAL);
-}
-
-/*
- * Convert from in-memory to filesystem representation.
- */
-static void *
-ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
-{
-	ext3_acl_header *ext_acl;
-	char *e;
-	size_t n;
-
-	*size = ext3_acl_size(acl->a_count);
-	ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
-			sizeof(ext3_acl_entry), GFP_NOFS);
-	if (!ext_acl)
-		return ERR_PTR(-ENOMEM);
-	ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
-	e = (char *)ext_acl + sizeof(ext3_acl_header);
-	for (n=0; n < acl->a_count; n++) {
-		const struct posix_acl_entry *acl_e = &acl->a_entries[n];
-		ext3_acl_entry *entry = (ext3_acl_entry *)e;
-		entry->e_tag  = cpu_to_le16(acl_e->e_tag);
-		entry->e_perm = cpu_to_le16(acl_e->e_perm);
-		switch(acl_e->e_tag) {
-			case ACL_USER:
-				entry->e_id = cpu_to_le32(
-					from_kuid(&init_user_ns, acl_e->e_uid));
-				e += sizeof(ext3_acl_entry);
-				break;
-			case ACL_GROUP:
-				entry->e_id = cpu_to_le32(
-					from_kgid(&init_user_ns, acl_e->e_gid));
-				e += sizeof(ext3_acl_entry);
-				break;
-
-			case ACL_USER_OBJ:
-			case ACL_GROUP_OBJ:
-			case ACL_MASK:
-			case ACL_OTHER:
-				e += sizeof(ext3_acl_entry_short);
-				break;
-
-			default:
-				goto fail;
-		}
-	}
-	return (char *)ext_acl;
-
-fail:
-	kfree(ext_acl);
-	return ERR_PTR(-EINVAL);
-}
-
-/*
- * Inode operation get_posix_acl().
- *
- * inode->i_mutex: don't care
- */
-struct posix_acl *
-ext3_get_acl(struct inode *inode, int type)
-{
-	int name_index;
-	char *value = NULL;
-	struct posix_acl *acl;
-	int retval;
-
-	switch (type) {
-	case ACL_TYPE_ACCESS:
-		name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
-		break;
-	case ACL_TYPE_DEFAULT:
-		name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
-		break;
-	default:
-		BUG();
-	}
-
-	retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
-	if (retval > 0) {
-		value = kmalloc(retval, GFP_NOFS);
-		if (!value)
-			return ERR_PTR(-ENOMEM);
-		retval = ext3_xattr_get(inode, name_index, "", value, retval);
-	}
-	if (retval > 0)
-		acl = ext3_acl_from_disk(value, retval);
-	else if (retval == -ENODATA || retval == -ENOSYS)
-		acl = NULL;
-	else
-		acl = ERR_PTR(retval);
-	kfree(value);
-
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
-	return acl;
-}
-
-/*
- * Set the access or default ACL of an inode.
- *
- * inode->i_mutex: down unless called from ext3_new_inode
- */
-static int
-__ext3_set_acl(handle_t *handle, struct inode *inode, int type,
-	     struct posix_acl *acl)
-{
-	int name_index;
-	void *value = NULL;
-	size_t size = 0;
-	int error;
-
-	switch(type) {
-		case ACL_TYPE_ACCESS:
-			name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
-			if (acl) {
-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
-				if (error < 0)
-					return error;
-				else {
-					inode->i_ctime = CURRENT_TIME_SEC;
-					ext3_mark_inode_dirty(handle, inode);
-					if (error == 0)
-						acl = NULL;
-				}
-			}
-			break;
-
-		case ACL_TYPE_DEFAULT:
-			name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
-			if (!S_ISDIR(inode->i_mode))
-				return acl ? -EACCES : 0;
-			break;
-
-		default:
-			return -EINVAL;
-	}
-	if (acl) {
-		value = ext3_acl_to_disk(acl, &size);
-		if (IS_ERR(value))
-			return (int)PTR_ERR(value);
-	}
-
-	error = ext3_xattr_set_handle(handle, inode, name_index, "",
-				      value, size, 0);
-
-	kfree(value);
-
-	if (!error)
-		set_cached_acl(inode, type, acl);
-
-	return error;
-}
-
-int
-ext3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
-{
-	handle_t *handle;
-	int error, retries = 0;
-
-retry:
-	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	error = __ext3_set_acl(handle, inode, type, acl);
-	ext3_journal_stop(handle);
-	if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-	return error;
-}
-
-/*
- * Initialize the ACLs of a new inode. Called from ext3_new_inode.
- *
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
- */
-int
-ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
-{
-	struct posix_acl *default_acl, *acl;
-	int error;
-
-	error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
-	if (error)
-		return error;
-
-	if (default_acl) {
-		error = __ext3_set_acl(handle, inode, ACL_TYPE_DEFAULT,
-				       default_acl);
-		posix_acl_release(default_acl);
-	}
-	if (acl) {
-		if (!error)
-			error = __ext3_set_acl(handle, inode, ACL_TYPE_ACCESS,
-					       acl);
-		posix_acl_release(acl);
-	}
-	return error;
-}
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
deleted file mode 100644
index ea1c69e..0000000
--- a/fs/ext3/acl.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-  File: fs/ext3/acl.h
-
-  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-
-#include <linux/posix_acl_xattr.h>
-
-#define EXT3_ACL_VERSION	0x0001
-
-typedef struct {
-	__le16		e_tag;
-	__le16		e_perm;
-	__le32		e_id;
-} ext3_acl_entry;
-
-typedef struct {
-	__le16		e_tag;
-	__le16		e_perm;
-} ext3_acl_entry_short;
-
-typedef struct {
-	__le32		a_version;
-} ext3_acl_header;
-
-static inline size_t ext3_acl_size(int count)
-{
-	if (count <= 4) {
-		return sizeof(ext3_acl_header) +
-		       count * sizeof(ext3_acl_entry_short);
-	} else {
-		return sizeof(ext3_acl_header) +
-		       4 * sizeof(ext3_acl_entry_short) +
-		       (count - 4) * sizeof(ext3_acl_entry);
-	}
-}
-
-static inline int ext3_acl_count(size_t size)
-{
-	ssize_t s;
-	size -= sizeof(ext3_acl_header);
-	s = size - 4 * sizeof(ext3_acl_entry_short);
-	if (s < 0) {
-		if (size % sizeof(ext3_acl_entry_short))
-			return -1;
-		return size / sizeof(ext3_acl_entry_short);
-	} else {
-		if (s % sizeof(ext3_acl_entry))
-			return -1;
-		return s / sizeof(ext3_acl_entry) + 4;
-	}
-}
-
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-
-/* acl.c */
-extern struct posix_acl *ext3_get_acl(struct inode *inode, int type);
-extern int ext3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
-
-#else  /* CONFIG_EXT3_FS_POSIX_ACL */
-#include <linux/sched.h>
-#define ext3_get_acl NULL
-#define ext3_set_acl NULL
-
-static inline int
-ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
-{
-	return 0;
-}
-#endif  /* CONFIG_EXT3_FS_POSIX_ACL */
-
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
deleted file mode 100644
index 158b5d4..0000000
--- a/fs/ext3/balloc.c
+++ /dev/null
@@ -1,2158 +0,0 @@
-/*
- *  linux/fs/ext3/balloc.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/quotaops.h>
-#include <linux/blkdev.h>
-#include "ext3.h"
-
-/*
- * balloc.c contains the blocks allocation and deallocation routines
- */
-
-/*
- * The free blocks are managed by bitmaps.  A file system contains several
- * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
- * block for inodes, N blocks for the inode table and data blocks.
- *
- * The file system contains group descriptors which are located after the
- * super block.  Each descriptor contains the number of the bitmap block and
- * the free blocks count in the block.  The descriptors are loaded in memory
- * when a file system is mounted (see ext3_fill_super).
- */
-
-
-#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
-
-/*
- * Calculate the block group number and offset, given a block number
- */
-static void ext3_get_group_no_and_offset(struct super_block *sb,
-	ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
-	if (offsetp)
-		*offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
-	if (blockgrpp)
-		*blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
-}
-
-/**
- * ext3_get_group_desc() -- load group descriptor from disk
- * @sb:			super block
- * @block_group:	given block group
- * @bh:			pointer to the buffer head to store the block
- *			group descriptor
- */
-struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-					     unsigned int block_group,
-					     struct buffer_head ** bh)
-{
-	unsigned long group_desc;
-	unsigned long offset;
-	struct ext3_group_desc * desc;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (block_group >= sbi->s_groups_count) {
-		ext3_error (sb, "ext3_get_group_desc",
-			    "block_group >= groups_count - "
-			    "block_group = %d, groups_count = %lu",
-			    block_group, sbi->s_groups_count);
-
-		return NULL;
-	}
-	smp_rmb();
-
-	group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
-	offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
-	if (!sbi->s_group_desc[group_desc]) {
-		ext3_error (sb, "ext3_get_group_desc",
-			    "Group descriptor not loaded - "
-			    "block_group = %d, group_desc = %lu, desc = %lu",
-			     block_group, group_desc, offset);
-		return NULL;
-	}
-
-	desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
-	if (bh)
-		*bh = sbi->s_group_desc[group_desc];
-	return desc + offset;
-}
-
-static int ext3_valid_block_bitmap(struct super_block *sb,
-					struct ext3_group_desc *desc,
-					unsigned int block_group,
-					struct buffer_head *bh)
-{
-	ext3_grpblk_t offset;
-	ext3_grpblk_t next_zero_bit;
-	ext3_fsblk_t bitmap_blk;
-	ext3_fsblk_t group_first_block;
-
-	group_first_block = ext3_group_first_block_no(sb, block_group);
-
-	/* check whether block bitmap block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-	offset = bitmap_blk - group_first_block;
-	if (!ext3_test_bit(offset, bh->b_data))
-		/* bad block bitmap */
-		goto err_out;
-
-	/* check whether the inode bitmap block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
-	offset = bitmap_blk - group_first_block;
-	if (!ext3_test_bit(offset, bh->b_data))
-		/* bad block bitmap */
-		goto err_out;
-
-	/* check whether the inode table block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_inode_table);
-	offset = bitmap_blk - group_first_block;
-	next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
-				offset + EXT3_SB(sb)->s_itb_per_group,
-				offset);
-	if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
-		/* good bitmap for inode tables */
-		return 1;
-
-err_out:
-	ext3_error(sb, __func__,
-			"Invalid block bitmap - "
-			"block_group = %d, block = %lu",
-			block_group, bitmap_blk);
-	return 0;
-}
-
-/**
- * read_block_bitmap()
- * @sb:			super block
- * @block_group:	given block group
- *
- * Read the bitmap for a given block_group,and validate the
- * bits for block/inode/inode tables are set in the bitmaps
- *
- * Return buffer_head on success or NULL in case of failure.
- */
-static struct buffer_head *
-read_block_bitmap(struct super_block *sb, unsigned int block_group)
-{
-	struct ext3_group_desc * desc;
-	struct buffer_head * bh = NULL;
-	ext3_fsblk_t bitmap_blk;
-
-	desc = ext3_get_group_desc(sb, block_group, NULL);
-	if (!desc)
-		return NULL;
-	trace_ext3_read_block_bitmap(sb, block_group);
-	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-	bh = sb_getblk(sb, bitmap_blk);
-	if (unlikely(!bh)) {
-		ext3_error(sb, __func__,
-			    "Cannot read block bitmap - "
-			    "block_group = %d, block_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_block_bitmap));
-		return NULL;
-	}
-	if (likely(bh_uptodate_or_lock(bh)))
-		return bh;
-
-	if (bh_submit_read(bh) < 0) {
-		brelse(bh);
-		ext3_error(sb, __func__,
-			    "Cannot read block bitmap - "
-			    "block_group = %d, block_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_block_bitmap));
-		return NULL;
-	}
-	ext3_valid_block_bitmap(sb, desc, block_group, bh);
-	/*
-	 * file system mounted not to panic on error, continue with corrupt
-	 * bitmap
-	 */
-	return bh;
-}
-/*
- * The reservation window structure operations
- * --------------------------------------------
- * Operations include:
- * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
- *
- * We use a red-black tree to represent per-filesystem reservation
- * windows.
- *
- */
-
-/**
- * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
- * @rb_root:		root of per-filesystem reservation rb tree
- * @verbose:		verbose mode
- * @fn:			function which wishes to dump the reservation map
- *
- * If verbose is turned on, it will print the whole block reservation
- * windows(start, end).	Otherwise, it will only print out the "bad" windows,
- * those windows that overlap with their immediate neighbors.
- */
-#if 1
-static void __rsv_window_dump(struct rb_root *root, int verbose,
-			      const char *fn)
-{
-	struct rb_node *n;
-	struct ext3_reserve_window_node *rsv, *prev;
-	int bad;
-
-restart:
-	n = rb_first(root);
-	bad = 0;
-	prev = NULL;
-
-	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
-	while (n) {
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-		if (verbose)
-			printk("reservation window 0x%p "
-			       "start:  %lu, end:  %lu\n",
-			       rsv, rsv->rsv_start, rsv->rsv_end);
-		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
-			printk("Bad reservation %p (start >= end)\n",
-			       rsv);
-			bad = 1;
-		}
-		if (prev && prev->rsv_end >= rsv->rsv_start) {
-			printk("Bad reservation %p (prev->end >= start)\n",
-			       rsv);
-			bad = 1;
-		}
-		if (bad) {
-			if (!verbose) {
-				printk("Restarting reservation walk in verbose mode\n");
-				verbose = 1;
-				goto restart;
-			}
-		}
-		n = rb_next(n);
-		prev = rsv;
-	}
-	printk("Window map complete.\n");
-	BUG_ON(bad);
-}
-#define rsv_window_dump(root, verbose) \
-	__rsv_window_dump((root), (verbose), __func__)
-#else
-#define rsv_window_dump(root, verbose) do {} while (0)
-#endif
-
-/**
- * goal_in_my_reservation()
- * @rsv:		inode's reservation window
- * @grp_goal:		given goal block relative to the allocation block group
- * @group:		the current allocation block group
- * @sb:			filesystem super block
- *
- * Test if the given goal block (group relative) is within the file's
- * own block reservation window range.
- *
- * If the reservation window is outside the goal allocation group, return 0;
- * grp_goal (given goal block) could be -1, which means no specific
- * goal block. In this case, always return 1.
- * If the goal block is within the reservation window, return 1;
- * otherwise, return 0;
- */
-static int
-goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
-			unsigned int group, struct super_block * sb)
-{
-	ext3_fsblk_t group_first_block, group_last_block;
-
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	if ((rsv->_rsv_start > group_last_block) ||
-	    (rsv->_rsv_end < group_first_block))
-		return 0;
-	if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
-		|| (grp_goal + group_first_block > rsv->_rsv_end)))
-		return 0;
-	return 1;
-}
-
-/**
- * search_reserve_window()
- * @rb_root:		root of reservation tree
- * @goal:		target allocation block
- *
- * Find the reserved window which includes the goal, or the previous one
- * if the goal is not in any window.
- * Returns NULL if there are no windows or if all windows start after the goal.
- */
-static struct ext3_reserve_window_node *
-search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
-{
-	struct rb_node *n = root->rb_node;
-	struct ext3_reserve_window_node *rsv;
-
-	if (!n)
-		return NULL;
-
-	do {
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-
-		if (goal < rsv->rsv_start)
-			n = n->rb_left;
-		else if (goal > rsv->rsv_end)
-			n = n->rb_right;
-		else
-			return rsv;
-	} while (n);
-	/*
-	 * We've fallen off the end of the tree: the goal wasn't inside
-	 * any particular node.  OK, the previous node must be to one
-	 * side of the interval containing the goal.  If it's the RHS,
-	 * we need to back up one.
-	 */
-	if (rsv->rsv_start > goal) {
-		n = rb_prev(&rsv->rsv_node);
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-	}
-	return rsv;
-}
-
-/**
- * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
- * @sb:			super block
- * @rsv:		reservation window to add
- *
- * Must be called with rsv_lock hold.
- */
-void ext3_rsv_window_add(struct super_block *sb,
-		    struct ext3_reserve_window_node *rsv)
-{
-	struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
-	struct rb_node *node = &rsv->rsv_node;
-	ext3_fsblk_t start = rsv->rsv_start;
-
-	struct rb_node ** p = &root->rb_node;
-	struct rb_node * parent = NULL;
-	struct ext3_reserve_window_node *this;
-
-	trace_ext3_rsv_window_add(sb, rsv);
-	while (*p)
-	{
-		parent = *p;
-		this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
-
-		if (start < this->rsv_start)
-			p = &(*p)->rb_left;
-		else if (start > this->rsv_end)
-			p = &(*p)->rb_right;
-		else {
-			rsv_window_dump(root, 1);
-			BUG();
-		}
-	}
-
-	rb_link_node(node, parent, p);
-	rb_insert_color(node, root);
-}
-
-/**
- * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
- * @sb:			super block
- * @rsv:		reservation window to remove
- *
- * Mark the block reservation window as not allocated, and unlink it
- * from the filesystem reservation window rb tree. Must be called with
- * rsv_lock hold.
- */
-static void rsv_window_remove(struct super_block *sb,
-			      struct ext3_reserve_window_node *rsv)
-{
-	rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	rsv->rsv_alloc_hit = 0;
-	rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
-}
-
-/*
- * rsv_is_empty() -- Check if the reservation window is allocated.
- * @rsv:		given reservation window to check
- *
- * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
- */
-static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
-{
-	/* a valid reservation end block could not be 0 */
-	return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-}
-
-/**
- * ext3_init_block_alloc_info()
- * @inode:		file inode structure
- *
- * Allocate and initialize the	reservation window structure, and
- * link the window to the ext3 inode structure at last
- *
- * The reservation window structure is only dynamically allocated
- * and linked to ext3 inode the first time the open file
- * needs a new block. So, before every ext3_new_block(s) call, for
- * regular files, we should check whether the reservation window
- * structure exists or not. In the latter case, this function is called.
- * Fail to do so will result in block reservation being turned off for that
- * open file.
- *
- * This function is called from ext3_get_blocks_handle(), also called
- * when setting the reservation window size through ioctl before the file
- * is open for write (needs block allocation).
- *
- * Needs truncate_mutex protection prior to call this function.
- */
-void ext3_init_block_alloc_info(struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *block_i;
-	struct super_block *sb = inode->i_sb;
-
-	block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
-	if (block_i) {
-		struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
-
-		rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-		rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-
-		/*
-		 * if filesystem is mounted with NORESERVATION, the goal
-		 * reservation window size is set to zero to indicate
-		 * block reservation is off
-		 */
-		if (!test_opt(sb, RESERVATION))
-			rsv->rsv_goal_size = 0;
-		else
-			rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
-		rsv->rsv_alloc_hit = 0;
-		block_i->last_alloc_logical_block = 0;
-		block_i->last_alloc_physical_block = 0;
-	}
-	ei->i_block_alloc_info = block_i;
-}
-
-/**
- * ext3_discard_reservation()
- * @inode:		inode
- *
- * Discard(free) block reservation window on last file close, or truncate
- * or at last iput().
- *
- * It is being called in three cases:
- *	ext3_release_file(): last writer close the file
- *	ext3_clear_inode(): last iput(), when nobody link to this file.
- *	ext3_truncate(): when the block indirect map is about to change.
- *
- */
-void ext3_discard_reservation(struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
-	struct ext3_reserve_window_node *rsv;
-	spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
-
-	if (!block_i)
-		return;
-
-	rsv = &block_i->rsv_window_node;
-	if (!rsv_is_empty(&rsv->rsv_window)) {
-		spin_lock(rsv_lock);
-		if (!rsv_is_empty(&rsv->rsv_window)) {
-			trace_ext3_discard_reservation(inode, rsv);
-			rsv_window_remove(inode->i_sb, rsv);
-		}
-		spin_unlock(rsv_lock);
-	}
-}
-
-/**
- * ext3_free_blocks_sb() -- Free given blocks and update quota
- * @handle:			handle to this transaction
- * @sb:				super block
- * @block:			start physical block to free
- * @count:			number of blocks to free
- * @pdquot_freed_blocks:	pointer to quota
- */
-void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
-			 ext3_fsblk_t block, unsigned long count,
-			 unsigned long *pdquot_freed_blocks)
-{
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *gd_bh;
-	unsigned long block_group;
-	ext3_grpblk_t bit;
-	unsigned long i;
-	unsigned long overflow;
-	struct ext3_group_desc * desc;
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi;
-	int err = 0, ret;
-	ext3_grpblk_t group_freed;
-
-	*pdquot_freed_blocks = 0;
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	if (block < le32_to_cpu(es->s_first_data_block) ||
-	    block + count < block ||
-	    block + count > le32_to_cpu(es->s_blocks_count)) {
-		ext3_error (sb, "ext3_free_blocks",
-			    "Freeing blocks not in datazone - "
-			    "block = "E3FSBLK", count = %lu", block, count);
-		goto error_return;
-	}
-
-	ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
-
-do_more:
-	overflow = 0;
-	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-		      EXT3_BLOCKS_PER_GROUP(sb);
-	bit = (block - le32_to_cpu(es->s_first_data_block)) %
-		      EXT3_BLOCKS_PER_GROUP(sb);
-	/*
-	 * Check to see if we are freeing blocks across a group
-	 * boundary.
-	 */
-	if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-		count -= overflow;
-	}
-	brelse(bitmap_bh);
-	bitmap_bh = read_block_bitmap(sb, block_group);
-	if (!bitmap_bh)
-		goto error_return;
-	desc = ext3_get_group_desc (sb, block_group, &gd_bh);
-	if (!desc)
-		goto error_return;
-
-	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
-	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
-	    in_range (block, le32_to_cpu(desc->bg_inode_table),
-		      sbi->s_itb_per_group) ||
-	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
-		      sbi->s_itb_per_group)) {
-		ext3_error (sb, "ext3_free_blocks",
-			    "Freeing blocks in system zones - "
-			    "Block = "E3FSBLK", count = %lu",
-			    block, count);
-		goto error_return;
-	}
-
-	/*
-	 * We are about to start releasing blocks in the bitmap,
-	 * so we need undo access.
-	 */
-	/* @@@ check errors */
-	BUFFER_TRACE(bitmap_bh, "getting undo access");
-	err = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (err)
-		goto error_return;
-
-	/*
-	 * We are about to modify some metadata.  Call the journal APIs
-	 * to unshare ->b_data if a currently-committing transaction is
-	 * using it
-	 */
-	BUFFER_TRACE(gd_bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, gd_bh);
-	if (err)
-		goto error_return;
-
-	jbd_lock_bh_state(bitmap_bh);
-
-	for (i = 0, group_freed = 0; i < count; i++) {
-		/*
-		 * An HJ special.  This is expensive...
-		 */
-#ifdef CONFIG_JBD_DEBUG
-		jbd_unlock_bh_state(bitmap_bh);
-		{
-			struct buffer_head *debug_bh;
-			debug_bh = sb_find_get_block(sb, block + i);
-			if (debug_bh) {
-				BUFFER_TRACE(debug_bh, "Deleted!");
-				if (!bh2jh(bitmap_bh)->b_committed_data)
-					BUFFER_TRACE(debug_bh,
-						"No committed data in bitmap");
-				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
-				__brelse(debug_bh);
-			}
-		}
-		jbd_lock_bh_state(bitmap_bh);
-#endif
-		if (need_resched()) {
-			jbd_unlock_bh_state(bitmap_bh);
-			cond_resched();
-			jbd_lock_bh_state(bitmap_bh);
-		}
-		/* @@@ This prevents newly-allocated data from being
-		 * freed and then reallocated within the same
-		 * transaction.
-		 *
-		 * Ideally we would want to allow that to happen, but to
-		 * do so requires making journal_forget() capable of
-		 * revoking the queued write of a data block, which
-		 * implies blocking on the journal lock.  *forget()
-		 * cannot block due to truncate races.
-		 *
-		 * Eventually we can fix this by making journal_forget()
-		 * return a status indicating whether or not it was able
-		 * to revoke the buffer.  On successful revoke, it is
-		 * safe not to set the allocation bit in the committed
-		 * bitmap, because we know that there is no outstanding
-		 * activity on the buffer any more and so it is safe to
-		 * reallocate it.
-		 */
-		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
-		J_ASSERT_BH(bitmap_bh,
-				bh2jh(bitmap_bh)->b_committed_data != NULL);
-		ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
-				bh2jh(bitmap_bh)->b_committed_data);
-
-		/*
-		 * We clear the bit in the bitmap after setting the committed
-		 * data bit, because this is the reverse order to that which
-		 * the allocator uses.
-		 */
-		BUFFER_TRACE(bitmap_bh, "clear bit");
-		if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-						bit + i, bitmap_bh->b_data)) {
-			jbd_unlock_bh_state(bitmap_bh);
-			ext3_error(sb, __func__,
-				"bit already cleared for block "E3FSBLK,
-				 block + i);
-			jbd_lock_bh_state(bitmap_bh);
-			BUFFER_TRACE(bitmap_bh, "bit already cleared");
-		} else {
-			group_freed++;
-		}
-	}
-	jbd_unlock_bh_state(bitmap_bh);
-
-	spin_lock(sb_bgl_lock(sbi, block_group));
-	le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
-	spin_unlock(sb_bgl_lock(sbi, block_group));
-	percpu_counter_add(&sbi->s_freeblocks_counter, count);
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-
-	/* And the group descriptor block */
-	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-	ret = ext3_journal_dirty_metadata(handle, gd_bh);
-	if (!err) err = ret;
-	*pdquot_freed_blocks += group_freed;
-
-	if (overflow && !err) {
-		block += count;
-		count = overflow;
-		goto do_more;
-	}
-
-error_return:
-	brelse(bitmap_bh);
-	ext3_std_error(sb, err);
-	return;
-}
-
-/**
- * ext3_free_blocks() -- Free given blocks and update quota
- * @handle:		handle for this transaction
- * @inode:		inode
- * @block:		start physical block to free
- * @count:		number of blocks to count
- */
-void ext3_free_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t block, unsigned long count)
-{
-	struct super_block *sb = inode->i_sb;
-	unsigned long dquot_freed_blocks;
-
-	trace_ext3_free_blocks(inode, block, count);
-	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-	if (dquot_freed_blocks)
-		dquot_free_block(inode, dquot_freed_blocks);
-	return;
-}
-
-/**
- * ext3_test_allocatable()
- * @nr:			given allocation block group
- * @bh:			bufferhead contains the bitmap of the given block group
- *
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy.  This
- * prevents deletes from freeing up the page for reuse until we have
- * committed the delete transaction.
- *
- * If we didn't do this, then deleting something and reallocating it as
- * data would allow the old block to be overwritten before the
- * transaction committed (because we force data to disk before commit).
- * This would lead to corruption if we crashed between overwriting the
- * data and committing the delete.
- *
- * @@@ We may want to make this allocation behaviour conditional on
- * data-writes at some point, and disable it for metadata allocations or
- * sync-data inodes.
- */
-static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
-{
-	int ret;
-	struct journal_head *jh = bh2jh(bh);
-
-	if (ext3_test_bit(nr, bh->b_data))
-		return 0;
-
-	jbd_lock_bh_state(bh);
-	if (!jh->b_committed_data)
-		ret = 1;
-	else
-		ret = !ext3_test_bit(nr, jh->b_committed_data);
-	jbd_unlock_bh_state(bh);
-	return ret;
-}
-
-/**
- * bitmap_search_next_usable_block()
- * @start:		the starting block (group relative) of the search
- * @bh:			bufferhead contains the block group bitmap
- * @maxblocks:		the ending block (group relative) of the reservation
- *
- * The bitmap search --- search forward alternately through the actual
- * bitmap on disk and the last-committed copy in journal, until we find a
- * bit free in both bitmaps.
- */
-static ext3_grpblk_t
-bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
-					ext3_grpblk_t maxblocks)
-{
-	ext3_grpblk_t next;
-	struct journal_head *jh = bh2jh(bh);
-
-	while (start < maxblocks) {
-		next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
-		if (next >= maxblocks)
-			return -1;
-		if (ext3_test_allocatable(next, bh))
-			return next;
-		jbd_lock_bh_state(bh);
-		if (jh->b_committed_data)
-			start = ext3_find_next_zero_bit(jh->b_committed_data,
-							maxblocks, next);
-		jbd_unlock_bh_state(bh);
-	}
-	return -1;
-}
-
-/**
- * find_next_usable_block()
- * @start:		the starting block (group relative) to find next
- *			allocatable block in bitmap.
- * @bh:			bufferhead contains the block group bitmap
- * @maxblocks:		the ending block (group relative) for the search
- *
- * Find an allocatable block in a bitmap.  We honor both the bitmap and
- * its last-committed copy (if that exists), and perform the "most
- * appropriate allocation" algorithm of looking for a free block near
- * the initial goal; then for a free byte somewhere in the bitmap; then
- * for any free bit in the bitmap.
- */
-static ext3_grpblk_t
-find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
-			ext3_grpblk_t maxblocks)
-{
-	ext3_grpblk_t here, next;
-	char *p, *r;
-
-	if (start > 0) {
-		/*
-		 * The goal was occupied; search forward for a free
-		 * block within the next XX blocks.
-		 *
-		 * end_goal is more or less random, but it has to be
-		 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
-		 * next 64-bit boundary is simple..
-		 */
-		ext3_grpblk_t end_goal = (start + 63) & ~63;
-		if (end_goal > maxblocks)
-			end_goal = maxblocks;
-		here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
-		if (here < end_goal && ext3_test_allocatable(here, bh))
-			return here;
-		ext3_debug("Bit not found near goal\n");
-	}
-
-	here = start;
-	if (here < 0)
-		here = 0;
-
-	p = bh->b_data + (here >> 3);
-	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
-	next = (r - bh->b_data) << 3;
-
-	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
-		return next;
-
-	/*
-	 * The bitmap search --- search forward alternately through the actual
-	 * bitmap and the last-committed copy until we find a bit free in
-	 * both
-	 */
-	here = bitmap_search_next_usable_block(here, bh, maxblocks);
-	return here;
-}
-
-/**
- * claim_block()
- * @lock:		the spin lock for this block group
- * @block:		the free block (group relative) to allocate
- * @bh:			the buffer_head contains the block group bitmap
- *
- * We think we can allocate this block in this bitmap.  Try to set the bit.
- * If that succeeds then check that nobody has allocated and then freed the
- * block since we saw that is was not marked in b_committed_data.  If it _was_
- * allocated and freed then clear the bit in the bitmap again and return
- * zero (failure).
- */
-static inline int
-claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
-{
-	struct journal_head *jh = bh2jh(bh);
-	int ret;
-
-	if (ext3_set_bit_atomic(lock, block, bh->b_data))
-		return 0;
-	jbd_lock_bh_state(bh);
-	if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
-		ext3_clear_bit_atomic(lock, block, bh->b_data);
-		ret = 0;
-	} else {
-		ret = 1;
-	}
-	jbd_unlock_bh_state(bh);
-	return ret;
-}
-
-/**
- * ext3_try_to_allocate()
- * @sb:			superblock
- * @handle:		handle to this transaction
- * @group:		given allocation block group
- * @bitmap_bh:		bufferhead holds the block bitmap
- * @grp_goal:		given target block within the group
- * @count:		target number of blocks to allocate
- * @my_rsv:		reservation window
- *
- * Attempt to allocate blocks within a give range. Set the range of allocation
- * first, then find the first free bit(s) from the bitmap (within the range),
- * and at last, allocate the blocks by claiming the found free bit as allocated.
- *
- * To set the range of this allocation:
- *	if there is a reservation window, only try to allocate block(s) from the
- *	file's own reservation window;
- *	Otherwise, the allocation range starts from the give goal block, ends at
- *	the block group's last block.
- *
- * If we failed to allocate the desired block then we may end up crossing to a
- * new bitmap.  In that case we must release write access to the old one via
- * ext3_journal_release_buffer(), else we'll run out of credits.
- */
-static ext3_grpblk_t
-ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
-			struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
-			unsigned long *count, struct ext3_reserve_window *my_rsv)
-{
-	ext3_fsblk_t group_first_block;
-	ext3_grpblk_t start, end;
-	unsigned long num = 0;
-
-	/* we do allocation within the reservation window if we have a window */
-	if (my_rsv) {
-		group_first_block = ext3_group_first_block_no(sb, group);
-		if (my_rsv->_rsv_start >= group_first_block)
-			start = my_rsv->_rsv_start - group_first_block;
-		else
-			/* reservation window cross group boundary */
-			start = 0;
-		end = my_rsv->_rsv_end - group_first_block + 1;
-		if (end > EXT3_BLOCKS_PER_GROUP(sb))
-			/* reservation window crosses group boundary */
-			end = EXT3_BLOCKS_PER_GROUP(sb);
-		if ((start <= grp_goal) && (grp_goal < end))
-			start = grp_goal;
-		else
-			grp_goal = -1;
-	} else {
-		if (grp_goal > 0)
-			start = grp_goal;
-		else
-			start = 0;
-		end = EXT3_BLOCKS_PER_GROUP(sb);
-	}
-
-	BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
-
-repeat:
-	if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
-		grp_goal = find_next_usable_block(start, bitmap_bh, end);
-		if (grp_goal < 0)
-			goto fail_access;
-		if (!my_rsv) {
-			int i;
-
-			for (i = 0; i < 7 && grp_goal > start &&
-					ext3_test_allocatable(grp_goal - 1,
-								bitmap_bh);
-					i++, grp_goal--)
-				;
-		}
-	}
-	start = grp_goal;
-
-	if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
-		grp_goal, bitmap_bh)) {
-		/*
-		 * The block was allocated by another thread, or it was
-		 * allocated and then freed by another thread
-		 */
-		start++;
-		grp_goal++;
-		if (start >= end)
-			goto fail_access;
-		goto repeat;
-	}
-	num++;
-	grp_goal++;
-	while (num < *count && grp_goal < end
-		&& ext3_test_allocatable(grp_goal, bitmap_bh)
-		&& claim_block(sb_bgl_lock(EXT3_SB(sb), group),
-				grp_goal, bitmap_bh)) {
-		num++;
-		grp_goal++;
-	}
-	*count = num;
-	return grp_goal - num;
-fail_access:
-	*count = num;
-	return -1;
-}
-
-/**
- *	find_next_reservable_window():
- *		find a reservable space within the given range.
- *		It does not allocate the reservation window for now:
- *		alloc_new_reservation() will do the work later.
- *
- *	@search_head: the head of the searching list;
- *		This is not necessarily the list head of the whole filesystem
- *
- *		We have both head and start_block to assist the search
- *		for the reservable space. The list starts from head,
- *		but we will shift to the place where start_block is,
- *		then start from there, when looking for a reservable space.
- *
- *	@my_rsv: the reservation window
- *
- *	@sb: the super block
- *
- *	@start_block: the first block we consider to start
- *			the real search from
- *
- *	@last_block:
- *		the maximum block number that our goal reservable space
- *		could start from. This is normally the last block in this
- *		group. The search will end when we found the start of next
- *		possible reservable space is out of this boundary.
- *		This could handle the cross boundary reservation window
- *		request.
- *
- *	basically we search from the given range, rather than the whole
- *	reservation double linked list, (start_block, last_block)
- *	to find a free region that is of my size and has not
- *	been reserved.
- *
- */
-static int find_next_reservable_window(
-				struct ext3_reserve_window_node *search_head,
-				struct ext3_reserve_window_node *my_rsv,
-				struct super_block * sb,
-				ext3_fsblk_t start_block,
-				ext3_fsblk_t last_block)
-{
-	struct rb_node *next;
-	struct ext3_reserve_window_node *rsv, *prev;
-	ext3_fsblk_t cur;
-	int size = my_rsv->rsv_goal_size;
-
-	/* TODO: make the start of the reservation window byte-aligned */
-	/* cur = *start_block & ~7;*/
-	cur = start_block;
-	rsv = search_head;
-	if (!rsv)
-		return -1;
-
-	while (1) {
-		if (cur <= rsv->rsv_end)
-			cur = rsv->rsv_end + 1;
-
-		/* TODO?
-		 * in the case we could not find a reservable space
-		 * that is what is expected, during the re-search, we could
-		 * remember what's the largest reservable space we could have
-		 * and return that one.
-		 *
-		 * For now it will fail if we could not find the reservable
-		 * space with expected-size (or more)...
-		 */
-		if (cur > last_block)
-			return -1;		/* fail */
-
-		prev = rsv;
-		next = rb_next(&rsv->rsv_node);
-		rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
-
-		/*
-		 * Reached the last reservation, we can just append to the
-		 * previous one.
-		 */
-		if (!next)
-			break;
-
-		if (cur + size <= rsv->rsv_start) {
-			/*
-			 * Found a reserveable space big enough.  We could
-			 * have a reservation across the group boundary here
-			 */
-			break;
-		}
-	}
-	/*
-	 * we come here either :
-	 * when we reach the end of the whole list,
-	 * and there is empty reservable space after last entry in the list.
-	 * append it to the end of the list.
-	 *
-	 * or we found one reservable space in the middle of the list,
-	 * return the reservation window that we could append to.
-	 * succeed.
-	 */
-
-	if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
-		rsv_window_remove(sb, my_rsv);
-
-	/*
-	 * Let's book the whole available window for now.  We will check the
-	 * disk bitmap later and then, if there are free blocks then we adjust
-	 * the window size if it's larger than requested.
-	 * Otherwise, we will remove this node from the tree next time
-	 * call find_next_reservable_window.
-	 */
-	my_rsv->rsv_start = cur;
-	my_rsv->rsv_end = cur + size - 1;
-	my_rsv->rsv_alloc_hit = 0;
-
-	if (prev != my_rsv)
-		ext3_rsv_window_add(sb, my_rsv);
-
-	return 0;
-}
-
-/**
- *	alloc_new_reservation()--allocate a new reservation window
- *
- *		To make a new reservation, we search part of the filesystem
- *		reservation list (the list that inside the group). We try to
- *		allocate a new reservation window near the allocation goal,
- *		or the beginning of the group, if there is no goal.
- *
- *		We first find a reservable space after the goal, then from
- *		there, we check the bitmap for the first free block after
- *		it. If there is no free block until the end of group, then the
- *		whole group is full, we failed. Otherwise, check if the free
- *		block is inside the expected reservable space, if so, we
- *		succeed.
- *		If the first free block is outside the reservable space, then
- *		start from the first free block, we search for next available
- *		space, and go on.
- *
- *	on succeed, a new reservation will be found and inserted into the list
- *	It contains at least one free block, and it does not overlap with other
- *	reservation windows.
- *
- *	failed: we failed to find a reservation window in this group
- *
- *	@my_rsv: the reservation window
- *
- *	@grp_goal: The goal (group-relative).  It is where the search for a
- *		free reservable space should start from.
- *		if we have a grp_goal(grp_goal >0 ), then start from there,
- *		no grp_goal(grp_goal = -1), we start from the first block
- *		of the group.
- *
- *	@sb: the super block
- *	@group: the group we are trying to allocate in
- *	@bitmap_bh: the block group block bitmap
- *
- */
-static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
-		ext3_grpblk_t grp_goal, struct super_block *sb,
-		unsigned int group, struct buffer_head *bitmap_bh)
-{
-	struct ext3_reserve_window_node *search_head;
-	ext3_fsblk_t group_first_block, group_end_block, start_block;
-	ext3_grpblk_t first_free_block;
-	struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
-	unsigned long size;
-	int ret;
-	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
-
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	if (grp_goal < 0)
-		start_block = group_first_block;
-	else
-		start_block = grp_goal + group_first_block;
-
-	trace_ext3_alloc_new_reservation(sb, start_block);
-	size = my_rsv->rsv_goal_size;
-
-	if (!rsv_is_empty(&my_rsv->rsv_window)) {
-		/*
-		 * if the old reservation is cross group boundary
-		 * and if the goal is inside the old reservation window,
-		 * we will come here when we just failed to allocate from
-		 * the first part of the window. We still have another part
-		 * that belongs to the next group. In this case, there is no
-		 * point to discard our window and try to allocate a new one
-		 * in this group(which will fail). we should
-		 * keep the reservation window, just simply move on.
-		 *
-		 * Maybe we could shift the start block of the reservation
-		 * window to the first block of next group.
-		 */
-
-		if ((my_rsv->rsv_start <= group_end_block) &&
-				(my_rsv->rsv_end > group_end_block) &&
-				(start_block >= my_rsv->rsv_start))
-			return -1;
-
-		if ((my_rsv->rsv_alloc_hit >
-		     (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
-			/*
-			 * if the previously allocation hit ratio is
-			 * greater than 1/2, then we double the size of
-			 * the reservation window the next time,
-			 * otherwise we keep the same size window
-			 */
-			size = size * 2;
-			if (size > EXT3_MAX_RESERVE_BLOCKS)
-				size = EXT3_MAX_RESERVE_BLOCKS;
-			my_rsv->rsv_goal_size= size;
-		}
-	}
-
-	spin_lock(rsv_lock);
-	/*
-	 * shift the search start to the window near the goal block
-	 */
-	search_head = search_reserve_window(fs_rsv_root, start_block);
-
-	/*
-	 * find_next_reservable_window() simply finds a reservable window
-	 * inside the given range(start_block, group_end_block).
-	 *
-	 * To make sure the reservation window has a free bit inside it, we
-	 * need to check the bitmap after we found a reservable window.
-	 */
-retry:
-	ret = find_next_reservable_window(search_head, my_rsv, sb,
-						start_block, group_end_block);
-
-	if (ret == -1) {
-		if (!rsv_is_empty(&my_rsv->rsv_window))
-			rsv_window_remove(sb, my_rsv);
-		spin_unlock(rsv_lock);
-		return -1;
-	}
-
-	/*
-	 * On success, find_next_reservable_window() returns the
-	 * reservation window where there is a reservable space after it.
-	 * Before we reserve this reservable space, we need
-	 * to make sure there is at least a free block inside this region.
-	 *
-	 * searching the first free bit on the block bitmap and copy of
-	 * last committed bitmap alternatively, until we found a allocatable
-	 * block. Search start from the start block of the reservable space
-	 * we just found.
-	 */
-	spin_unlock(rsv_lock);
-	first_free_block = bitmap_search_next_usable_block(
-			my_rsv->rsv_start - group_first_block,
-			bitmap_bh, group_end_block - group_first_block + 1);
-
-	if (first_free_block < 0) {
-		/*
-		 * no free block left on the bitmap, no point
-		 * to reserve the space. return failed.
-		 */
-		spin_lock(rsv_lock);
-		if (!rsv_is_empty(&my_rsv->rsv_window))
-			rsv_window_remove(sb, my_rsv);
-		spin_unlock(rsv_lock);
-		return -1;		/* failed */
-	}
-
-	start_block = first_free_block + group_first_block;
-	/*
-	 * check if the first free block is within the
-	 * free space we just reserved
-	 */
-	if (start_block >= my_rsv->rsv_start &&
-	    start_block <= my_rsv->rsv_end) {
-		trace_ext3_reserved(sb, start_block, my_rsv);
-		return 0;		/* success */
-	}
-	/*
-	 * if the first free bit we found is out of the reservable space
-	 * continue search for next reservable space,
-	 * start from where the free block is,
-	 * we also shift the list head to where we stopped last time
-	 */
-	search_head = my_rsv;
-	spin_lock(rsv_lock);
-	goto retry;
-}
-
-/**
- * try_to_extend_reservation()
- * @my_rsv:		given reservation window
- * @sb:			super block
- * @size:		the delta to extend
- *
- * Attempt to expand the reservation window large enough to have
- * required number of free blocks
- *
- * Since ext3_try_to_allocate() will always allocate blocks within
- * the reservation window range, if the window size is too small,
- * multiple blocks allocation has to stop at the end of the reservation
- * window. To make this more efficient, given the total number of
- * blocks needed and the current size of the window, we try to
- * expand the reservation window size if necessary on a best-effort
- * basis before ext3_new_blocks() tries to allocate blocks,
- */
-static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
-			struct super_block *sb, int size)
-{
-	struct ext3_reserve_window_node *next_rsv;
-	struct rb_node *next;
-	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
-
-	if (!spin_trylock(rsv_lock))
-		return;
-
-	next = rb_next(&my_rsv->rsv_node);
-
-	if (!next)
-		my_rsv->rsv_end += size;
-	else {
-		next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
-
-		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
-			my_rsv->rsv_end += size;
-		else
-			my_rsv->rsv_end = next_rsv->rsv_start - 1;
-	}
-	spin_unlock(rsv_lock);
-}
-
-/**
- * ext3_try_to_allocate_with_rsv()
- * @sb:			superblock
- * @handle:		handle to this transaction
- * @group:		given allocation block group
- * @bitmap_bh:		bufferhead holds the block bitmap
- * @grp_goal:		given target block within the group
- * @my_rsv:		reservation window
- * @count:		target number of blocks to allocate
- * @errp:		pointer to store the error code
- *
- * This is the main function used to allocate a new block and its reservation
- * window.
- *
- * Each time when a new block allocation is need, first try to allocate from
- * its own reservation.  If it does not have a reservation window, instead of
- * looking for a free bit on bitmap first, then look up the reservation list to
- * see if it is inside somebody else's reservation window, we try to allocate a
- * reservation window for it starting from the goal first. Then do the block
- * allocation within the reservation window.
- *
- * This will avoid keeping on searching the reservation list again and
- * again when somebody is looking for a free block (without
- * reservation), and there are lots of free blocks, but they are all
- * being reserved.
- *
- * We use a red-black tree for the per-filesystem reservation list.
- *
- */
-static ext3_grpblk_t
-ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
-			unsigned int group, struct buffer_head *bitmap_bh,
-			ext3_grpblk_t grp_goal,
-			struct ext3_reserve_window_node * my_rsv,
-			unsigned long *count, int *errp)
-{
-	ext3_fsblk_t group_first_block, group_last_block;
-	ext3_grpblk_t ret = 0;
-	int fatal;
-	unsigned long num = *count;
-
-	*errp = 0;
-
-	/*
-	 * Make sure we use undo access for the bitmap, because it is critical
-	 * that we do the frozen_data COW on bitmap buffers in all cases even
-	 * if the buffer is in BJ_Forget state in the committing transaction.
-	 */
-	BUFFER_TRACE(bitmap_bh, "get undo access for new block");
-	fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (fatal) {
-		*errp = fatal;
-		return -1;
-	}
-
-	/*
-	 * we don't deal with reservation when
-	 * filesystem is mounted without reservation
-	 * or the file is not a regular file
-	 * or last attempt to allocate a block with reservation turned on failed
-	 */
-	if (my_rsv == NULL ) {
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
-						grp_goal, count, NULL);
-		goto out;
-	}
-	/*
-	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
-	 * first block is a filesystem wide block number
-	 * first block is the block number of the first block in this group
-	 */
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	/*
-	 * Basically we will allocate a new block from inode's reservation
-	 * window.
-	 *
-	 * We need to allocate a new reservation window, if:
-	 * a) inode does not have a reservation window; or
-	 * b) last attempt to allocate a block from existing reservation
-	 *    failed; or
-	 * c) we come here with a goal and with a reservation window
-	 *
-	 * We do not need to allocate a new reservation window if we come here
-	 * at the beginning with a goal and the goal is inside the window, or
-	 * we don't have a goal but already have a reservation window.
-	 * then we could go to allocate from the reservation window directly.
-	 */
-	while (1) {
-		if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
-			!goal_in_my_reservation(&my_rsv->rsv_window,
-						grp_goal, group, sb)) {
-			if (my_rsv->rsv_goal_size < *count)
-				my_rsv->rsv_goal_size = *count;
-			ret = alloc_new_reservation(my_rsv, grp_goal, sb,
-							group, bitmap_bh);
-			if (ret < 0)
-				break;			/* failed */
-
-			if (!goal_in_my_reservation(&my_rsv->rsv_window,
-							grp_goal, group, sb))
-				grp_goal = -1;
-		} else if (grp_goal >= 0) {
-			int curr = my_rsv->rsv_end -
-					(grp_goal + group_first_block) + 1;
-
-			if (curr < *count)
-				try_to_extend_reservation(my_rsv, sb,
-							*count - curr);
-		}
-
-		if ((my_rsv->rsv_start > group_last_block) ||
-				(my_rsv->rsv_end < group_first_block)) {
-			rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
-			BUG();
-		}
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
-					   grp_goal, &num, &my_rsv->rsv_window);
-		if (ret >= 0) {
-			my_rsv->rsv_alloc_hit += num;
-			*count = num;
-			break;				/* succeed */
-		}
-		num = *count;
-	}
-out:
-	if (ret >= 0) {
-		BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
-					"bitmap block");
-		fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
-		if (fatal) {
-			*errp = fatal;
-			return -1;
-		}
-		return ret;
-	}
-
-	BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
-	ext3_journal_release_buffer(handle, bitmap_bh);
-	return ret;
-}
-
-/**
- * ext3_has_free_blocks()
- * @sbi:		in-core super block structure.
- *
- * Check if filesystem has at least 1 free block available for allocation.
- */
-static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
-{
-	ext3_fsblk_t free_blocks, root_blocks;
-
-	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-		!use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
-		(gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-		 !in_group_p (sbi->s_resgid))) {
-		return 0;
-	}
-	return 1;
-}
-
-/**
- * ext3_should_retry_alloc()
- * @sb:			super block
- * @retries		number of attemps has been made
- *
- * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
- * it is profitable to retry the operation, this function will wait
- * for the current or committing transaction to complete, and then
- * return TRUE.
- *
- * if the total number of retries exceed three times, return FALSE.
- */
-int ext3_should_retry_alloc(struct super_block *sb, int *retries)
-{
-	if (!ext3_has_free_blocks(EXT3_SB(sb), 0) || (*retries)++ > 3)
-		return 0;
-
-	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
-
-	return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
-}
-
-/**
- * ext3_new_blocks() -- core block(s) allocation function
- * @handle:		handle to this transaction
- * @inode:		file inode
- * @goal:		given target block(filesystem wide)
- * @count:		target number of blocks to allocate
- * @errp:		error code
- *
- * ext3_new_blocks uses a goal block to assist allocation.  It tries to
- * allocate block(s) from the block group contains the goal block first. If that
- * fails, it will try to allocate block(s) from other block groups without
- * any specific goal block.
- *
- */
-ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, unsigned long *count, int *errp)
-{
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *gdp_bh;
-	int group_no;
-	int goal_group;
-	ext3_grpblk_t grp_target_blk;	/* blockgroup relative goal block */
-	ext3_grpblk_t grp_alloc_blk;	/* blockgroup-relative allocated block*/
-	ext3_fsblk_t ret_block;		/* filesyetem-wide allocated block */
-	int bgi;			/* blockgroup iteration index */
-	int fatal = 0, err;
-	int performed_allocation = 0;
-	ext3_grpblk_t free_blocks;	/* number of free blocks in a group */
-	struct super_block *sb;
-	struct ext3_group_desc *gdp;
-	struct ext3_super_block *es;
-	struct ext3_sb_info *sbi;
-	struct ext3_reserve_window_node *my_rsv = NULL;
-	struct ext3_block_alloc_info *block_i;
-	unsigned short windowsz = 0;
-#ifdef EXT3FS_DEBUG
-	static int goal_hits, goal_attempts;
-#endif
-	unsigned long ngroups;
-	unsigned long num = *count;
-
-	*errp = -ENOSPC;
-	sb = inode->i_sb;
-
-	/*
-	 * Check quota for allocation of this block.
-	 */
-	err = dquot_alloc_block(inode, num);
-	if (err) {
-		*errp = err;
-		return 0;
-	}
-
-	trace_ext3_request_blocks(inode, goal, num);
-
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	ext3_debug("goal=%lu.\n", goal);
-	/*
-	 * Allocate a block from reservation only when
-	 * filesystem is mounted with reservation(default,-o reservation), and
-	 * it's a regular file, and
-	 * the desired window size is greater than 0 (One could use ioctl
-	 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
-	 * reservation on that particular file)
-	 */
-	block_i = EXT3_I(inode)->i_block_alloc_info;
-	if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
-		my_rsv = &block_i->rsv_window_node;
-
-	if (!ext3_has_free_blocks(sbi, IS_NOQUOTA(inode))) {
-		*errp = -ENOSPC;
-		goto out;
-	}
-
-	/*
-	 * First, test whether the goal block is free.
-	 */
-	if (goal < le32_to_cpu(es->s_first_data_block) ||
-	    goal >= le32_to_cpu(es->s_blocks_count))
-		goal = le32_to_cpu(es->s_first_data_block);
-	group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
-			EXT3_BLOCKS_PER_GROUP(sb);
-	goal_group = group_no;
-retry_alloc:
-	gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-	if (!gdp)
-		goto io_error;
-
-	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-	/*
-	 * if there is not enough free blocks to make a new resevation
-	 * turn off reservation for this allocation
-	 */
-	if (my_rsv && (free_blocks < windowsz)
-		&& (free_blocks > 0)
-		&& (rsv_is_empty(&my_rsv->rsv_window)))
-		my_rsv = NULL;
-
-	if (free_blocks > 0) {
-		grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
-				EXT3_BLOCKS_PER_GROUP(sb));
-		bitmap_bh = read_block_bitmap(sb, group_no);
-		if (!bitmap_bh)
-			goto io_error;
-		grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
-					group_no, bitmap_bh, grp_target_blk,
-					my_rsv,	&num, &fatal);
-		if (fatal)
-			goto out;
-		if (grp_alloc_blk >= 0)
-			goto allocated;
-	}
-
-	ngroups = EXT3_SB(sb)->s_groups_count;
-	smp_rmb();
-
-	/*
-	 * Now search the rest of the groups.  We assume that
-	 * group_no and gdp correctly point to the last group visited.
-	 */
-	for (bgi = 0; bgi < ngroups; bgi++) {
-		group_no++;
-		if (group_no >= ngroups)
-			group_no = 0;
-		gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp)
-			goto io_error;
-		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-		/*
-		 * skip this group (and avoid loading bitmap) if there
-		 * are no free blocks
-		 */
-		if (!free_blocks)
-			continue;
-		/*
-		 * skip this group if the number of
-		 * free blocks is less than half of the reservation
-		 * window size.
-		 */
-		if (my_rsv && (free_blocks <= (windowsz/2)))
-			continue;
-
-		brelse(bitmap_bh);
-		bitmap_bh = read_block_bitmap(sb, group_no);
-		if (!bitmap_bh)
-			goto io_error;
-		/*
-		 * try to allocate block(s) from this group, without a goal(-1).
-		 */
-		grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
-					group_no, bitmap_bh, -1, my_rsv,
-					&num, &fatal);
-		if (fatal)
-			goto out;
-		if (grp_alloc_blk >= 0)
-			goto allocated;
-	}
-	/*
-	 * We may end up a bogus earlier ENOSPC error due to
-	 * filesystem is "full" of reservations, but
-	 * there maybe indeed free blocks available on disk
-	 * In this case, we just forget about the reservations
-	 * just do block allocation as without reservations.
-	 */
-	if (my_rsv) {
-		my_rsv = NULL;
-		windowsz = 0;
-		group_no = goal_group;
-		goto retry_alloc;
-	}
-	/* No space left on the device */
-	*errp = -ENOSPC;
-	goto out;
-
-allocated:
-
-	ext3_debug("using block group %d(%d)\n",
-			group_no, gdp->bg_free_blocks_count);
-
-	BUFFER_TRACE(gdp_bh, "get_write_access");
-	fatal = ext3_journal_get_write_access(handle, gdp_bh);
-	if (fatal)
-		goto out;
-
-	ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
-
-	if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
-	    in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
-	    in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
-		      EXT3_SB(sb)->s_itb_per_group) ||
-	    in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
-		      EXT3_SB(sb)->s_itb_per_group)) {
-		ext3_error(sb, "ext3_new_block",
-			    "Allocating block in system zone - "
-			    "blocks from "E3FSBLK", length %lu",
-			     ret_block, num);
-		/*
-		 * claim_block() marked the blocks we allocated as in use. So we
-		 * may want to selectively mark some of the blocks as free.
-		 */
-		goto retry_alloc;
-	}
-
-	performed_allocation = 1;
-
-#ifdef CONFIG_JBD_DEBUG
-	{
-		struct buffer_head *debug_bh;
-
-		/* Record bitmap buffer state in the newly allocated block */
-		debug_bh = sb_find_get_block(sb, ret_block);
-		if (debug_bh) {
-			BUFFER_TRACE(debug_bh, "state when allocated");
-			BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
-			brelse(debug_bh);
-		}
-	}
-	jbd_lock_bh_state(bitmap_bh);
-	spin_lock(sb_bgl_lock(sbi, group_no));
-	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
-		int i;
-
-		for (i = 0; i < num; i++) {
-			if (ext3_test_bit(grp_alloc_blk+i,
-					bh2jh(bitmap_bh)->b_committed_data)) {
-				printk("%s: block was unexpectedly set in "
-					"b_committed_data\n", __func__);
-			}
-		}
-	}
-	ext3_debug("found bit %d\n", grp_alloc_blk);
-	spin_unlock(sb_bgl_lock(sbi, group_no));
-	jbd_unlock_bh_state(bitmap_bh);
-#endif
-
-	if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
-		ext3_error(sb, "ext3_new_block",
-			    "block("E3FSBLK") >= blocks count(%d) - "
-			    "block_group = %d, es == %p ", ret_block,
-			le32_to_cpu(es->s_blocks_count), group_no, es);
-		goto out;
-	}
-
-	/*
-	 * It is up to the caller to add the new buffer to a journal
-	 * list of some description.  We don't know in advance whether
-	 * the caller wants to use it as metadata or data.
-	 */
-	ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
-			ret_block, goal_hits, goal_attempts);
-
-	spin_lock(sb_bgl_lock(sbi, group_no));
-	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
-	spin_unlock(sb_bgl_lock(sbi, group_no));
-	percpu_counter_sub(&sbi->s_freeblocks_counter, num);
-
-	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
-	fatal = ext3_journal_dirty_metadata(handle, gdp_bh);
-	if (fatal)
-		goto out;
-
-	*errp = 0;
-	brelse(bitmap_bh);
-
-	if (num < *count) {
-		dquot_free_block(inode, *count-num);
-		*count = num;
-	}
-
-	trace_ext3_allocate_blocks(inode, goal, num,
-				   (unsigned long long)ret_block);
-
-	return ret_block;
-
-io_error:
-	*errp = -EIO;
-out:
-	if (fatal) {
-		*errp = fatal;
-		ext3_std_error(sb, fatal);
-	}
-	/*
-	 * Undo the block allocation
-	 */
-	if (!performed_allocation)
-		dquot_free_block(inode, *count);
-	brelse(bitmap_bh);
-	return 0;
-}
-
-ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int *errp)
-{
-	unsigned long count = 1;
-
-	return ext3_new_blocks(handle, inode, goal, &count, errp);
-}
-
-/**
- * ext3_count_free_blocks() -- count filesystem free blocks
- * @sb:		superblock
- *
- * Adds up the number of free blocks from each block group.
- */
-ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
-{
-	ext3_fsblk_t desc_count;
-	struct ext3_group_desc *gdp;
-	int i;
-	unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
-#ifdef EXT3FS_DEBUG
-	struct ext3_super_block *es;
-	ext3_fsblk_t bitmap_count;
-	unsigned long x;
-	struct buffer_head *bitmap_bh = NULL;
-
-	es = EXT3_SB(sb)->s_es;
-	desc_count = 0;
-	bitmap_count = 0;
-	gdp = NULL;
-
-	smp_rmb();
-	for (i = 0; i < ngroups; i++) {
-		gdp = ext3_get_group_desc(sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
-		brelse(bitmap_bh);
-		bitmap_bh = read_block_bitmap(sb, i);
-		if (bitmap_bh == NULL)
-			continue;
-
-		x = ext3_count_free(bitmap_bh, sb->s_blocksize);
-		printk("group %d: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
-		bitmap_count += x;
-	}
-	brelse(bitmap_bh);
-	printk("ext3_count_free_blocks: stored = "E3FSBLK
-		", computed = "E3FSBLK", "E3FSBLK"\n",
-	       (ext3_fsblk_t)le32_to_cpu(es->s_free_blocks_count),
-		desc_count, bitmap_count);
-	return bitmap_count;
-#else
-	desc_count = 0;
-	smp_rmb();
-	for (i = 0; i < ngroups; i++) {
-		gdp = ext3_get_group_desc(sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
-	}
-
-	return desc_count;
-#endif
-}
-
-static inline int test_root(int a, int b)
-{
-	int num = b;
-
-	while (a > num)
-		num *= b;
-	return num == a;
-}
-
-static int ext3_group_sparse(int group)
-{
-	if (group <= 1)
-		return 1;
-	if (!(group & 1))
-		return 0;
-	return (test_root(group, 7) || test_root(group, 5) ||
-		test_root(group, 3));
-}
-
-/**
- *	ext3_bg_has_super - number of blocks used by the superblock in group
- *	@sb: superblock for filesystem
- *	@group: group number to check
- *
- *	Return the number of blocks used by the superblock (primary or backup)
- *	in this group.  Currently this will be only 0 or 1.
- */
-int ext3_bg_has_super(struct super_block *sb, int group)
-{
-	if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-				EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
-			!ext3_group_sparse(group))
-		return 0;
-	return 1;
-}
-
-static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
-{
-	unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
-	unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
-	unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
-
-	if (group == first || group == first + 1 || group == last)
-		return 1;
-	return 0;
-}
-
-static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
-{
-	return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
-}
-
-/**
- *	ext3_bg_num_gdb - number of blocks used by the group table in group
- *	@sb: superblock for filesystem
- *	@group: group number to check
- *
- *	Return the number of blocks used by the group descriptor table
- *	(primary or backup) in this group.  In the future there may be a
- *	different number of descriptor blocks in each group.
- */
-unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
-{
-	unsigned long first_meta_bg =
-			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
-	unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
-			metagroup < first_meta_bg)
-		return ext3_bg_num_gdb_nometa(sb,group);
-
-	return ext3_bg_num_gdb_meta(sb,group);
-
-}
-
-/**
- * ext3_trim_all_free -- function to trim all free space in alloc. group
- * @sb:			super block for file system
- * @group:		allocation group to trim
- * @start:		first group block to examine
- * @max:		last group block to examine
- * @gdp:		allocation group description structure
- * @minblocks:		minimum extent block count
- *
- * ext3_trim_all_free walks through group's block bitmap searching for free
- * blocks. When the free block is found, it tries to allocate this block and
- * consequent free block to get the biggest free extent possible, until it
- * reaches any used block. Then issue a TRIM command on this extent and free
- * the extent in the block bitmap. This is done until whole group is scanned.
- */
-static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
-					unsigned int group,
-					ext3_grpblk_t start, ext3_grpblk_t max,
-					ext3_grpblk_t minblocks)
-{
-	handle_t *handle;
-	ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
-	ext3_fsblk_t discard_block;
-	struct ext3_sb_info *sbi;
-	struct buffer_head *gdp_bh, *bitmap_bh = NULL;
-	struct ext3_group_desc *gdp;
-	int err = 0, ret = 0;
-
-	/*
-	 * We will update one block bitmap, and one group descriptor
-	 */
-	handle = ext3_journal_start_sb(sb, 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	bitmap_bh = read_block_bitmap(sb, group);
-	if (!bitmap_bh) {
-		err = -EIO;
-		goto err_out;
-	}
-
-	BUFFER_TRACE(bitmap_bh, "getting undo access");
-	err = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (err)
-		goto err_out;
-
-	gdp = ext3_get_group_desc(sb, group, &gdp_bh);
-	if (!gdp) {
-		err = -EIO;
-		goto err_out;
-	}
-
-	BUFFER_TRACE(gdp_bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, gdp_bh);
-	if (err)
-		goto err_out;
-
-	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-	sbi = EXT3_SB(sb);
-
-	 /* Walk through the whole group */
-	while (start <= max) {
-		start = bitmap_search_next_usable_block(start, bitmap_bh, max);
-		if (start < 0)
-			break;
-		next = start;
-
-		/*
-		 * Allocate contiguous free extents by setting bits in the
-		 * block bitmap
-		 */
-		while (next <= max
-			&& claim_block(sb_bgl_lock(sbi, group),
-					next, bitmap_bh)) {
-			next++;
-		}
-
-		 /* We did not claim any blocks */
-		if (next == start)
-			continue;
-
-		discard_block = (ext3_fsblk_t)start +
-				ext3_group_first_block_no(sb, group);
-
-		/* Update counters */
-		spin_lock(sb_bgl_lock(sbi, group));
-		le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
-		spin_unlock(sb_bgl_lock(sbi, group));
-		percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
-
-		free_blocks -= next - start;
-		/* Do not issue a TRIM on extents smaller than minblocks */
-		if ((next - start) < minblocks)
-			goto free_extent;
-
-		trace_ext3_discard_blocks(sb, discard_block, next - start);
-		 /* Send the TRIM command down to the device */
-		err = sb_issue_discard(sb, discard_block, next - start,
-				       GFP_NOFS, 0);
-		count += (next - start);
-free_extent:
-		freed = 0;
-
-		/*
-		 * Clear bits in the bitmap
-		 */
-		for (bit = start; bit < next; bit++) {
-			BUFFER_TRACE(bitmap_bh, "clear bit");
-			if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
-						bit, bitmap_bh->b_data)) {
-				ext3_error(sb, __func__,
-					"bit already cleared for block "E3FSBLK,
-					 (unsigned long)bit);
-				BUFFER_TRACE(bitmap_bh, "bit already cleared");
-			} else {
-				freed++;
-			}
-		}
-
-		/* Update couters */
-		spin_lock(sb_bgl_lock(sbi, group));
-		le16_add_cpu(&gdp->bg_free_blocks_count, freed);
-		spin_unlock(sb_bgl_lock(sbi, group));
-		percpu_counter_add(&sbi->s_freeblocks_counter, freed);
-
-		start = next;
-		if (err < 0) {
-			if (err != -EOPNOTSUPP)
-				ext3_warning(sb, __func__, "Discard command "
-					     "returned error %d\n", err);
-			break;
-		}
-
-		if (fatal_signal_pending(current)) {
-			err = -ERESTARTSYS;
-			break;
-		}
-
-		cond_resched();
-
-		/* No more suitable extents */
-		if (free_blocks < minblocks)
-			break;
-	}
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
-	if (!err)
-		err = ret;
-
-	/* And the group descriptor block */
-	BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
-	ret = ext3_journal_dirty_metadata(handle, gdp_bh);
-	if (!err)
-		err = ret;
-
-	ext3_debug("trimmed %d blocks in the group %d\n",
-		count, group);
-
-err_out:
-	if (err)
-		count = err;
-	ext3_journal_stop(handle);
-	brelse(bitmap_bh);
-
-	return count;
-}
-
-/**
- * ext3_trim_fs() -- trim ioctl handle function
- * @sb:			superblock for filesystem
- * @start:		First Byte to trim
- * @len:		number of Bytes to trim from start
- * @minlen:		minimum extent length in Bytes
- *
- * ext3_trim_fs goes through all allocation groups containing Bytes from
- * start to start+len. For each such a group ext3_trim_all_free function
- * is invoked to trim all free space.
- */
-int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
-{
-	ext3_grpblk_t last_block, first_block;
-	unsigned long group, first_group, last_group;
-	struct ext3_group_desc *gdp;
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-	uint64_t start, minlen, end, trimmed = 0;
-	ext3_fsblk_t first_data_blk =
-			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-	ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
-	int ret = 0;
-
-	start = range->start >> sb->s_blocksize_bits;
-	end = start + (range->len >> sb->s_blocksize_bits) - 1;
-	minlen = range->minlen >> sb->s_blocksize_bits;
-
-	if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
-	    start >= max_blks ||
-	    range->len < sb->s_blocksize)
-		return -EINVAL;
-	if (end >= max_blks)
-		end = max_blks - 1;
-	if (end <= first_data_blk)
-		goto out;
-	if (start < first_data_blk)
-		start = first_data_blk;
-
-	smp_rmb();
-
-	/* Determine first and last group to examine based on start and len */
-	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
-				     &first_group, &first_block);
-	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end,
-				     &last_group, &last_block);
-
-	/* end now represents the last block to discard in this group */
-	end = EXT3_BLOCKS_PER_GROUP(sb) - 1;
-
-	for (group = first_group; group <= last_group; group++) {
-		gdp = ext3_get_group_desc(sb, group, NULL);
-		if (!gdp)
-			break;
-
-		/*
-		 * For all the groups except the last one, last block will
-		 * always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to
-		 * change it for the last group, note that last_block is
-		 * already computed earlier by ext3_get_group_no_and_offset()
-		 */
-		if (group == last_group)
-			end = last_block;
-
-		if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) {
-			ret = ext3_trim_all_free(sb, group, first_block,
-						 end, minlen);
-			if (ret < 0)
-				break;
-			trimmed += ret;
-		}
-
-		/*
-		 * For every group except the first one, we are sure
-		 * that the first block to discard will be block #0.
-		 */
-		first_block = 0;
-	}
-
-	if (ret > 0)
-		ret = 0;
-
-out:
-	range->len = trimmed * sb->s_blocksize;
-	return ret;
-}
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
deleted file mode 100644
index ef9c643..0000000
--- a/fs/ext3/bitmap.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  linux/fs/ext3/bitmap.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- */
-
-#include "ext3.h"
-
-#ifdef EXT3FS_DEBUG
-
-unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
-{
-	return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars);
-}
-
-#endif  /*  EXT3FS_DEBUG  */
-
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
deleted file mode 100644
index 17742ee..0000000
--- a/fs/ext3/dir.c
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- *  linux/fs/ext3/dir.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/dir.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 directory handling functions
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *
- * Hash Tree Directory indexing (c) 2001  Daniel Phillips
- *
- */
-
-#include <linux/compat.h>
-#include "ext3.h"
-
-static unsigned char ext3_filetype_table[] = {
-	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-};
-
-static int ext3_dx_readdir(struct file *, struct dir_context *);
-
-static unsigned char get_dtype(struct super_block *sb, int filetype)
-{
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
-	    (filetype >= EXT3_FT_MAX))
-		return DT_UNKNOWN;
-
-	return (ext3_filetype_table[filetype]);
-}
-
-/**
- * Check if the given dir-inode refers to an htree-indexed directory
- * (or a directory which could potentially get converted to use htree
- * indexing).
- *
- * Return 1 if it is a dx dir, 0 if not
- */
-static int is_dx_dir(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-
-	if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-		     EXT3_FEATURE_COMPAT_DIR_INDEX) &&
-	    ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
-	     ((inode->i_size >> sb->s_blocksize_bits) == 1)))
-		return 1;
-
-	return 0;
-}
-
-int ext3_check_dir_entry (const char * function, struct inode * dir,
-			  struct ext3_dir_entry_2 * de,
-			  struct buffer_head * bh,
-			  unsigned long offset)
-{
-	const char * error_msg = NULL;
-	const int rlen = ext3_rec_len_from_disk(de->rec_len);
-
-	if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
-		error_msg = "rec_len is smaller than minimal";
-	else if (unlikely(rlen % 4 != 0))
-		error_msg = "rec_len % 4 != 0";
-	else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
-		error_msg = "rec_len is too small for name_len";
-	else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
-		error_msg = "directory entry across blocks";
-	else if (unlikely(le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
-		error_msg = "inode out of bounds";
-
-	if (unlikely(error_msg != NULL))
-		ext3_error (dir->i_sb, function,
-			"bad entry in directory #%lu: %s - "
-			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-			dir->i_ino, error_msg, offset,
-			(unsigned long) le32_to_cpu(de->inode),
-			rlen, de->name_len);
-
-	return error_msg == NULL ? 1 : 0;
-}
-
-static int ext3_readdir(struct file *file, struct dir_context *ctx)
-{
-	unsigned long offset;
-	int i;
-	struct ext3_dir_entry_2 *de;
-	int err;
-	struct inode *inode = file_inode(file);
-	struct super_block *sb = inode->i_sb;
-	int dir_has_error = 0;
-
-	if (is_dx_dir(inode)) {
-		err = ext3_dx_readdir(file, ctx);
-		if (err != ERR_BAD_DX_DIR)
-			return err;
-		/*
-		 * We don't set the inode dirty flag since it's not
-		 * critical that it get flushed back to the disk.
-		 */
-		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
-	}
-	offset = ctx->pos & (sb->s_blocksize - 1);
-
-	while (ctx->pos < inode->i_size) {
-		unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb);
-		struct buffer_head map_bh;
-		struct buffer_head *bh = NULL;
-
-		map_bh.b_state = 0;
-		err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
-		if (err > 0) {
-			pgoff_t index = map_bh.b_blocknr >>
-					(PAGE_CACHE_SHIFT - inode->i_blkbits);
-			if (!ra_has_index(&file->f_ra, index))
-				page_cache_sync_readahead(
-					sb->s_bdev->bd_inode->i_mapping,
-					&file->f_ra, file,
-					index, 1);
-			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
-			bh = ext3_bread(NULL, inode, blk, 0, &err);
-		}
-
-		/*
-		 * We ignore I/O errors on directories so users have a chance
-		 * of recovering data when there's a bad sector
-		 */
-		if (!bh) {
-			if (!dir_has_error) {
-				ext3_error(sb, __func__, "directory #%lu "
-					"contains a hole at offset %lld",
-					inode->i_ino, ctx->pos);
-				dir_has_error = 1;
-			}
-			/* corrupt size?  Maybe no more blocks to read */
-			if (ctx->pos > inode->i_blocks << 9)
-				break;
-			ctx->pos += sb->s_blocksize - offset;
-			continue;
-		}
-
-		/* If the dir block has changed since the last call to
-		 * readdir(2), then we might be pointing to an invalid
-		 * dirent right now.  Scan from the start of the block
-		 * to make sure. */
-		if (offset && file->f_version != inode->i_version) {
-			for (i = 0; i < sb->s_blocksize && i < offset; ) {
-				de = (struct ext3_dir_entry_2 *)
-					(bh->b_data + i);
-				/* It's too expensive to do a full
-				 * dirent test each time round this
-				 * loop, but we do have to test at
-				 * least that it is non-zero.  A
-				 * failure will be detected in the
-				 * dirent test below. */
-				if (ext3_rec_len_from_disk(de->rec_len) <
-						EXT3_DIR_REC_LEN(1))
-					break;
-				i += ext3_rec_len_from_disk(de->rec_len);
-			}
-			offset = i;
-			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
-				| offset;
-			file->f_version = inode->i_version;
-		}
-
-		while (ctx->pos < inode->i_size
-		       && offset < sb->s_blocksize) {
-			de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
-			if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
-						   bh, offset)) {
-				/* On error, skip the to the
-                                   next block. */
-				ctx->pos = (ctx->pos |
-						(sb->s_blocksize - 1)) + 1;
-				break;
-			}
-			offset += ext3_rec_len_from_disk(de->rec_len);
-			if (le32_to_cpu(de->inode)) {
-				if (!dir_emit(ctx, de->name, de->name_len,
-					      le32_to_cpu(de->inode),
-					      get_dtype(sb, de->file_type))) {
-					brelse(bh);
-					return 0;
-				}
-			}
-			ctx->pos += ext3_rec_len_from_disk(de->rec_len);
-		}
-		offset = 0;
-		brelse (bh);
-		if (ctx->pos < inode->i_size)
-			if (!dir_relax(inode))
-				return 0;
-	}
-	return 0;
-}
-
-static inline int is_32bit_api(void)
-{
-#ifdef CONFIG_COMPAT
-	return is_compat_task();
-#else
-	return (BITS_PER_LONG == 32);
-#endif
-}
-
-/*
- * These functions convert from the major/minor hash to an f_pos
- * value for dx directories
- *
- * Upper layer (for example NFS) should specify FMODE_32BITHASH or
- * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
- * directly on both 32-bit and 64-bit nodes, under such case, neither
- * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
- */
-static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return major >> 1;
-	else
-		return ((__u64)(major >> 1) << 32) | (__u64)minor;
-}
-
-static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return (pos << 1) & 0xffffffff;
-	else
-		return ((pos >> 32) << 1) & 0xffffffff;
-}
-
-static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return 0;
-	else
-		return pos & 0xffffffff;
-}
-
-/*
- * Return 32- or 64-bit end-of-file for dx directories
- */
-static inline loff_t ext3_get_htree_eof(struct file *filp)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return EXT3_HTREE_EOF_32BIT;
-	else
-		return EXT3_HTREE_EOF_64BIT;
-}
-
-
-/*
- * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
- * non-htree and htree directories, where the "offset" is in terms
- * of the filename hash value instead of the byte offset.
- *
- * Because we may return a 64-bit hash that is well beyond s_maxbytes,
- * we need to pass the max hash as the maximum allowable offset in
- * the htree directory case.
- *
- * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
- *       will be invalid once the directory was converted into a dx directory
- */
-static loff_t ext3_dir_llseek(struct file *file, loff_t offset, int whence)
-{
-	struct inode *inode = file->f_mapping->host;
-	int dx_dir = is_dx_dir(inode);
-	loff_t htree_max = ext3_get_htree_eof(file);
-
-	if (likely(dx_dir))
-		return generic_file_llseek_size(file, offset, whence,
-					        htree_max, htree_max);
-	else
-		return generic_file_llseek(file, offset, whence);
-}
-
-/*
- * This structure holds the nodes of the red-black tree used to store
- * the directory entry in hash order.
- */
-struct fname {
-	__u32		hash;
-	__u32		minor_hash;
-	struct rb_node	rb_hash;
-	struct fname	*next;
-	__u32		inode;
-	__u8		name_len;
-	__u8		file_type;
-	char		name[0];
-};
-
-/*
- * This functoin implements a non-recursive way of freeing all of the
- * nodes in the red-black tree.
- */
-static void free_rb_tree_fname(struct rb_root *root)
-{
-	struct fname *fname, *next;
-
-	rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash)
-		do {
-			struct fname *old = fname;
-			fname = fname->next;
-			kfree(old);
-		} while (fname);
-
-	*root = RB_ROOT;
-}
-
-static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
-							   loff_t pos)
-{
-	struct dir_private_info *p;
-
-	p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
-	if (!p)
-		return NULL;
-	p->curr_hash = pos2maj_hash(filp, pos);
-	p->curr_minor_hash = pos2min_hash(filp, pos);
-	return p;
-}
-
-void ext3_htree_free_dir_info(struct dir_private_info *p)
-{
-	free_rb_tree_fname(&p->root);
-	kfree(p);
-}
-
-/*
- * Given a directory entry, enter it into the fname rb tree.
- */
-int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-			     __u32 minor_hash,
-			     struct ext3_dir_entry_2 *dirent)
-{
-	struct rb_node **p, *parent = NULL;
-	struct fname * fname, *new_fn;
-	struct dir_private_info *info;
-	int len;
-
-	info = (struct dir_private_info *) dir_file->private_data;
-	p = &info->root.rb_node;
-
-	/* Create and allocate the fname structure */
-	len = sizeof(struct fname) + dirent->name_len + 1;
-	new_fn = kzalloc(len, GFP_KERNEL);
-	if (!new_fn)
-		return -ENOMEM;
-	new_fn->hash = hash;
-	new_fn->minor_hash = minor_hash;
-	new_fn->inode = le32_to_cpu(dirent->inode);
-	new_fn->name_len = dirent->name_len;
-	new_fn->file_type = dirent->file_type;
-	memcpy(new_fn->name, dirent->name, dirent->name_len);
-	new_fn->name[dirent->name_len] = 0;
-
-	while (*p) {
-		parent = *p;
-		fname = rb_entry(parent, struct fname, rb_hash);
-
-		/*
-		 * If the hash and minor hash match up, then we put
-		 * them on a linked list.  This rarely happens...
-		 */
-		if ((new_fn->hash == fname->hash) &&
-		    (new_fn->minor_hash == fname->minor_hash)) {
-			new_fn->next = fname->next;
-			fname->next = new_fn;
-			return 0;
-		}
-
-		if (new_fn->hash < fname->hash)
-			p = &(*p)->rb_left;
-		else if (new_fn->hash > fname->hash)
-			p = &(*p)->rb_right;
-		else if (new_fn->minor_hash < fname->minor_hash)
-			p = &(*p)->rb_left;
-		else /* if (new_fn->minor_hash > fname->minor_hash) */
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&new_fn->rb_hash, parent, p);
-	rb_insert_color(&new_fn->rb_hash, &info->root);
-	return 0;
-}
-
-
-
-/*
- * This is a helper function for ext3_dx_readdir.  It calls filldir
- * for all entres on the fname linked list.  (Normally there is only
- * one entry on the linked list, unless there are 62 bit hash collisions.)
- */
-static bool call_filldir(struct file *file, struct dir_context *ctx,
-			struct fname *fname)
-{
-	struct dir_private_info *info = file->private_data;
-	struct inode *inode = file_inode(file);
-	struct super_block *sb = inode->i_sb;
-
-	if (!fname) {
-		printk("call_filldir: called with null fname?!?\n");
-		return true;
-	}
-	ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
-	while (fname) {
-		if (!dir_emit(ctx, fname->name, fname->name_len,
-				fname->inode,
-				get_dtype(sb, fname->file_type))) {
-			info->extra_fname = fname;
-			return false;
-		}
-		fname = fname->next;
-	}
-	return true;
-}
-
-static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
-{
-	struct dir_private_info *info = file->private_data;
-	struct inode *inode = file_inode(file);
-	struct fname *fname;
-	int	ret;
-
-	if (!info) {
-		info = ext3_htree_create_dir_info(file, ctx->pos);
-		if (!info)
-			return -ENOMEM;
-		file->private_data = info;
-	}
-
-	if (ctx->pos == ext3_get_htree_eof(file))
-		return 0;	/* EOF */
-
-	/* Some one has messed with f_pos; reset the world */
-	if (info->last_pos != ctx->pos) {
-		free_rb_tree_fname(&info->root);
-		info->curr_node = NULL;
-		info->extra_fname = NULL;
-		info->curr_hash = pos2maj_hash(file, ctx->pos);
-		info->curr_minor_hash = pos2min_hash(file, ctx->pos);
-	}
-
-	/*
-	 * If there are any leftover names on the hash collision
-	 * chain, return them first.
-	 */
-	if (info->extra_fname) {
-		if (!call_filldir(file, ctx, info->extra_fname))
-			goto finished;
-		info->extra_fname = NULL;
-		goto next_node;
-	} else if (!info->curr_node)
-		info->curr_node = rb_first(&info->root);
-
-	while (1) {
-		/*
-		 * Fill the rbtree if we have no more entries,
-		 * or the inode has changed since we last read in the
-		 * cached entries.
-		 */
-		if ((!info->curr_node) ||
-		    (file->f_version != inode->i_version)) {
-			info->curr_node = NULL;
-			free_rb_tree_fname(&info->root);
-			file->f_version = inode->i_version;
-			ret = ext3_htree_fill_tree(file, info->curr_hash,
-						   info->curr_minor_hash,
-						   &info->next_hash);
-			if (ret < 0)
-				return ret;
-			if (ret == 0) {
-				ctx->pos = ext3_get_htree_eof(file);
-				break;
-			}
-			info->curr_node = rb_first(&info->root);
-		}
-
-		fname = rb_entry(info->curr_node, struct fname, rb_hash);
-		info->curr_hash = fname->hash;
-		info->curr_minor_hash = fname->minor_hash;
-		if (!call_filldir(file, ctx, fname))
-			break;
-	next_node:
-		info->curr_node = rb_next(info->curr_node);
-		if (info->curr_node) {
-			fname = rb_entry(info->curr_node, struct fname,
-					 rb_hash);
-			info->curr_hash = fname->hash;
-			info->curr_minor_hash = fname->minor_hash;
-		} else {
-			if (info->next_hash == ~0) {
-				ctx->pos = ext3_get_htree_eof(file);
-				break;
-			}
-			info->curr_hash = info->next_hash;
-			info->curr_minor_hash = 0;
-		}
-	}
-finished:
-	info->last_pos = ctx->pos;
-	return 0;
-}
-
-static int ext3_release_dir (struct inode * inode, struct file * filp)
-{
-       if (filp->private_data)
-		ext3_htree_free_dir_info(filp->private_data);
-
-	return 0;
-}
-
-const struct file_operations ext3_dir_operations = {
-	.llseek		= ext3_dir_llseek,
-	.read		= generic_read_dir,
-	.iterate	= ext3_readdir,
-	.unlocked_ioctl = ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.fsync		= ext3_sync_file,
-	.release	= ext3_release_dir,
-};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
deleted file mode 100644
index f483a80..0000000
--- a/fs/ext3/ext3.h
+++ /dev/null
@@ -1,1332 +0,0 @@
-/*
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/magic.h>
-#include <linux/bug.h>
-#include <linux/blockgroup_lock.h>
-
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT3FS_DEBUG to produce debug messages
- */
-#undef EXT3FS_DEBUG
-
-/*
- * Define EXT3_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT3_DEFAULT_RESERVE_BLOCKS     8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT3_MAX_RESERVE_BLOCKS         1027
-#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
-
-/*
- * Debug code
- */
-#ifdef EXT3FS_DEBUG
-#define ext3_debug(f, a...)						\
-	do {								\
-		printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:",	\
-			__FILE__, __LINE__, __func__);		\
-		printk (KERN_DEBUG f, ## a);				\
-	} while (0)
-#else
-#define ext3_debug(f, a...)	do {} while (0)
-#endif
-
-/*
- * Special inodes numbers
- */
-#define	EXT3_BAD_INO		 1	/* Bad blocks inode */
-#define EXT3_ROOT_INO		 2	/* Root inode */
-#define EXT3_BOOT_LOADER_INO	 5	/* Boot loader inode */
-#define EXT3_UNDEL_DIR_INO	 6	/* Undelete directory inode */
-#define EXT3_RESIZE_INO		 7	/* Reserved group descriptors inode */
-#define EXT3_JOURNAL_INO	 8	/* Journal inode */
-
-/* First non-reserved inode for old ext3 filesystems */
-#define EXT3_GOOD_OLD_FIRST_INO	11
-
-/*
- * Maximal count of links to a file
- */
-#define EXT3_LINK_MAX		32000
-
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT3_MIN_BLOCK_SIZE		1024
-#define	EXT3_MAX_BLOCK_SIZE		65536
-#define EXT3_MIN_BLOCK_LOG_SIZE		10
-#define EXT3_BLOCK_SIZE(s)		((s)->s_blocksize)
-#define	EXT3_ADDR_PER_BLOCK(s)		(EXT3_BLOCK_SIZE(s) / sizeof (__u32))
-#define EXT3_BLOCK_SIZE_BITS(s)	((s)->s_blocksize_bits)
-#define	EXT3_ADDR_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_addr_per_block_bits)
-#define EXT3_INODE_SIZE(s)		(EXT3_SB(s)->s_inode_size)
-#define EXT3_FIRST_INO(s)		(EXT3_SB(s)->s_first_ino)
-
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT3_MIN_FRAG_SIZE		1024
-#define	EXT3_MAX_FRAG_SIZE		4096
-#define EXT3_MIN_FRAG_LOG_SIZE		  10
-#define EXT3_FRAG_SIZE(s)		(EXT3_SB(s)->s_frag_size)
-#define EXT3_FRAGS_PER_BLOCK(s)		(EXT3_SB(s)->s_frags_per_block)
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext3_group_desc
-{
-	__le32	bg_block_bitmap;		/* Blocks bitmap block */
-	__le32	bg_inode_bitmap;		/* Inodes bitmap block */
-	__le32	bg_inode_table;		/* Inodes table block */
-	__le16	bg_free_blocks_count;	/* Free blocks count */
-	__le16	bg_free_inodes_count;	/* Free inodes count */
-	__le16	bg_used_dirs_count;	/* Directories count */
-	__u16	bg_pad;
-	__le32	bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#define EXT3_BLOCKS_PER_GROUP(s)	(EXT3_SB(s)->s_blocks_per_group)
-#define EXT3_DESC_PER_BLOCK(s)		(EXT3_SB(s)->s_desc_per_block)
-#define EXT3_INODES_PER_GROUP(s)	(EXT3_SB(s)->s_inodes_per_group)
-#define EXT3_DESC_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_desc_per_block_bits)
-
-/*
- * Constants relative to the data blocks
- */
-#define	EXT3_NDIR_BLOCKS		12
-#define	EXT3_IND_BLOCK			EXT3_NDIR_BLOCKS
-#define	EXT3_DIND_BLOCK			(EXT3_IND_BLOCK + 1)
-#define	EXT3_TIND_BLOCK			(EXT3_DIND_BLOCK + 1)
-#define	EXT3_N_BLOCKS			(EXT3_TIND_BLOCK + 1)
-
-/*
- * Inode flags
- */
-#define	EXT3_SECRM_FL			0x00000001 /* Secure deletion */
-#define	EXT3_UNRM_FL			0x00000002 /* Undelete */
-#define	EXT3_COMPR_FL			0x00000004 /* Compress file */
-#define EXT3_SYNC_FL			0x00000008 /* Synchronous updates */
-#define EXT3_IMMUTABLE_FL		0x00000010 /* Immutable file */
-#define EXT3_APPEND_FL			0x00000020 /* writes to file may only append */
-#define EXT3_NODUMP_FL			0x00000040 /* do not dump file */
-#define EXT3_NOATIME_FL			0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define EXT3_DIRTY_FL			0x00000100
-#define EXT3_COMPRBLK_FL		0x00000200 /* One or more compressed clusters */
-#define EXT3_NOCOMPR_FL			0x00000400 /* Don't compress */
-#define EXT3_ECOMPR_FL			0x00000800 /* Compression error */
-/* End compression flags --- maybe not all used */
-#define EXT3_INDEX_FL			0x00001000 /* hash-indexed directory */
-#define EXT3_IMAGIC_FL			0x00002000 /* AFS directory */
-#define EXT3_JOURNAL_DATA_FL		0x00004000 /* file data should be journaled */
-#define EXT3_NOTAIL_FL			0x00008000 /* file tail should not be merged */
-#define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
-#define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
-#define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
-
-#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
-			   EXT3_SYNC_FL | EXT3_NODUMP_FL |\
-			   EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
-			   EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
-			   EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
-{
-	if (S_ISDIR(mode))
-		return flags;
-	else if (S_ISREG(mode))
-		return flags & EXT3_REG_FLMASK;
-	else
-		return flags & EXT3_OTHER_FLMASK;
-}
-
-/* Used to pass group descriptor data when online resize is done */
-struct ext3_new_group_input {
-	__u32 group;            /* Group number for this data */
-	__u32 block_bitmap;     /* Absolute block number of block bitmap */
-	__u32 inode_bitmap;     /* Absolute block number of inode bitmap */
-	__u32 inode_table;      /* Absolute block number of inode table start */
-	__u32 blocks_count;     /* Total number of blocks in this group */
-	__u16 reserved_blocks;  /* Number of reserved blocks in this group */
-	__u16 unused;
-};
-
-/* The struct ext3_new_group_input in kernel space, with free_blocks_count */
-struct ext3_new_group_data {
-	__u32 group;
-	__u32 block_bitmap;
-	__u32 inode_bitmap;
-	__u32 inode_table;
-	__u32 blocks_count;
-	__u16 reserved_blocks;
-	__u16 unused;
-	__u32 free_blocks_count;
-};
-
-
-/*
- * ioctl commands
- */
-#define	EXT3_IOC_GETFLAGS		FS_IOC_GETFLAGS
-#define	EXT3_IOC_SETFLAGS		FS_IOC_SETFLAGS
-#define	EXT3_IOC_GETVERSION		_IOR('f', 3, long)
-#define	EXT3_IOC_SETVERSION		_IOW('f', 4, long)
-#define EXT3_IOC_GROUP_EXTEND		_IOW('f', 7, unsigned long)
-#define EXT3_IOC_GROUP_ADD		_IOW('f', 8,struct ext3_new_group_input)
-#define	EXT3_IOC_GETVERSION_OLD		FS_IOC_GETVERSION
-#define	EXT3_IOC_SETVERSION_OLD		FS_IOC_SETVERSION
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC_WAIT_FOR_READONLY	_IOR('f', 99, long)
-#endif
-#define EXT3_IOC_GETRSVSZ		_IOR('f', 5, long)
-#define EXT3_IOC_SETRSVSZ		_IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT3_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
-#define EXT3_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
-#define EXT3_IOC32_GETVERSION		_IOR('f', 3, int)
-#define EXT3_IOC32_SETVERSION		_IOW('f', 4, int)
-#define EXT3_IOC32_GETRSVSZ		_IOR('f', 5, int)
-#define EXT3_IOC32_SETRSVSZ		_IOW('f', 6, int)
-#define EXT3_IOC32_GROUP_EXTEND		_IOW('f', 7, unsigned int)
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC32_WAIT_FOR_READONLY	_IOR('f', 99, int)
-#endif
-#define EXT3_IOC32_GETVERSION_OLD	FS_IOC32_GETVERSION
-#define EXT3_IOC32_SETVERSION_OLD	FS_IOC32_SETVERSION
-
-/* Number of supported quota types */
-#define EXT3_MAXQUOTAS 2
-
-/*
- *  Mount options
- */
-struct ext3_mount_options {
-	unsigned long s_mount_opt;
-	kuid_t s_resuid;
-	kgid_t s_resgid;
-	unsigned long s_commit_interval;
-#ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[EXT3_MAXQUOTAS];
-#endif
-};
-
-/*
- * Structure of an inode on the disk
- */
-struct ext3_inode {
-	__le16	i_mode;		/* File mode */
-	__le16	i_uid;		/* Low 16 bits of Owner Uid */
-	__le32	i_size;		/* Size in bytes */
-	__le32	i_atime;	/* Access time */
-	__le32	i_ctime;	/* Creation time */
-	__le32	i_mtime;	/* Modification time */
-	__le32	i_dtime;	/* Deletion Time */
-	__le16	i_gid;		/* Low 16 bits of Group Id */
-	__le16	i_links_count;	/* Links count */
-	__le32	i_blocks;	/* Blocks count */
-	__le32	i_flags;	/* File flags */
-	union {
-		struct {
-			__u32  l_i_reserved1;
-		} linux1;
-		struct {
-			__u32  h_i_translator;
-		} hurd1;
-		struct {
-			__u32  m_i_reserved1;
-		} masix1;
-	} osd1;				/* OS dependent 1 */
-	__le32	i_block[EXT3_N_BLOCKS];/* Pointers to blocks */
-	__le32	i_generation;	/* File version (for NFS) */
-	__le32	i_file_acl;	/* File ACL */
-	__le32	i_dir_acl;	/* Directory ACL */
-	__le32	i_faddr;	/* Fragment address */
-	union {
-		struct {
-			__u8	l_i_frag;	/* Fragment number */
-			__u8	l_i_fsize;	/* Fragment size */
-			__u16	i_pad1;
-			__le16	l_i_uid_high;	/* these 2 fields    */
-			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
-		} linux2;
-		struct {
-			__u8	h_i_frag;	/* Fragment number */
-			__u8	h_i_fsize;	/* Fragment size */
-			__u16	h_i_mode_high;
-			__u16	h_i_uid_high;
-			__u16	h_i_gid_high;
-			__u32	h_i_author;
-		} hurd2;
-		struct {
-			__u8	m_i_frag;	/* Fragment number */
-			__u8	m_i_fsize;	/* Fragment size */
-			__u16	m_pad1;
-			__u32	m_i_reserved2[2];
-		} masix2;
-	} osd2;				/* OS dependent 2 */
-	__le16	i_extra_isize;
-	__le16	i_pad1;
-};
-
-#define i_size_high	i_dir_acl
-
-#define i_reserved1	osd1.linux1.l_i_reserved1
-#define i_frag		osd2.linux2.l_i_frag
-#define i_fsize		osd2.linux2.l_i_fsize
-#define i_uid_low	i_uid
-#define i_gid_low	i_gid
-#define i_uid_high	osd2.linux2.l_i_uid_high
-#define i_gid_high	osd2.linux2.l_i_gid_high
-#define i_reserved2	osd2.linux2.l_i_reserved2
-
-/*
- * File system states
- */
-#define	EXT3_VALID_FS			0x0001	/* Unmounted cleanly */
-#define	EXT3_ERROR_FS			0x0002	/* Errors detected */
-#define	EXT3_ORPHAN_FS			0x0004	/* Orphans being recovered */
-
-/*
- * Misc. filesystem flags
- */
-#define EXT2_FLAGS_SIGNED_HASH		0x0001  /* Signed dirhash in use */
-#define EXT2_FLAGS_UNSIGNED_HASH	0x0002  /* Unsigned dirhash in use */
-#define EXT2_FLAGS_TEST_FILESYS		0x0004	/* to test development code */
-
-/*
- * Mount flags
- */
-#define EXT3_MOUNT_CHECK		0x00001	/* Do mount-time checks */
-/* EXT3_MOUNT_OLDALLOC was there */
-#define EXT3_MOUNT_GRPID		0x00004	/* Create files with directory's group */
-#define EXT3_MOUNT_DEBUG		0x00008	/* Some debugging messages */
-#define EXT3_MOUNT_ERRORS_CONT		0x00010	/* Continue on errors */
-#define EXT3_MOUNT_ERRORS_RO		0x00020	/* Remount fs ro on errors */
-#define EXT3_MOUNT_ERRORS_PANIC		0x00040	/* Panic on errors */
-#define EXT3_MOUNT_MINIX_DF		0x00080	/* Mimics the Minix statfs */
-#define EXT3_MOUNT_NOLOAD		0x00100	/* Don't use existing journal*/
-#define EXT3_MOUNT_ABORT		0x00200	/* Fatal error detected */
-#define EXT3_MOUNT_DATA_FLAGS		0x00C00	/* Mode for data writes: */
-#define EXT3_MOUNT_JOURNAL_DATA		0x00400	/* Write data to journal */
-#define EXT3_MOUNT_ORDERED_DATA		0x00800	/* Flush data before commit */
-#define EXT3_MOUNT_WRITEBACK_DATA	0x00C00	/* No data ordering */
-#define EXT3_MOUNT_UPDATE_JOURNAL	0x01000	/* Update the journal format */
-#define EXT3_MOUNT_NO_UID32		0x02000  /* Disable 32-bit UIDs */
-#define EXT3_MOUNT_XATTR_USER		0x04000	/* Extended user attributes */
-#define EXT3_MOUNT_POSIX_ACL		0x08000	/* POSIX Access Control Lists */
-#define EXT3_MOUNT_RESERVATION		0x10000	/* Preallocation */
-#define EXT3_MOUNT_BARRIER		0x20000 /* Use block barriers */
-#define EXT3_MOUNT_QUOTA		0x80000 /* Some quota option set */
-#define EXT3_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
-#define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
-#define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
-						  * error in ordered mode */
-
-/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
-#ifndef _LINUX_EXT2_FS_H
-#define clear_opt(o, opt)		o &= ~EXT3_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT3_MOUNT_##opt
-#define test_opt(sb, opt)		(EXT3_SB(sb)->s_mount_opt & \
-					 EXT3_MOUNT_##opt)
-#else
-#define EXT2_MOUNT_NOLOAD		EXT3_MOUNT_NOLOAD
-#define EXT2_MOUNT_ABORT		EXT3_MOUNT_ABORT
-#define EXT2_MOUNT_DATA_FLAGS		EXT3_MOUNT_DATA_FLAGS
-#endif
-
-#define ext3_set_bit			__set_bit_le
-#define ext3_set_bit_atomic		ext2_set_bit_atomic
-#define ext3_clear_bit			__clear_bit_le
-#define ext3_clear_bit_atomic		ext2_clear_bit_atomic
-#define ext3_test_bit			test_bit_le
-#define ext3_find_next_zero_bit		find_next_zero_bit_le
-
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT3_DFL_MAX_MNT_COUNT		20	/* Allow 20 mounts */
-#define EXT3_DFL_CHECKINTERVAL		0	/* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT3_ERRORS_CONTINUE		1	/* Continue execution */
-#define EXT3_ERRORS_RO			2	/* Remount fs read-only */
-#define EXT3_ERRORS_PANIC		3	/* Panic */
-#define EXT3_ERRORS_DEFAULT		EXT3_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext3_super_block {
-/*00*/	__le32	s_inodes_count;		/* Inodes count */
-	__le32	s_blocks_count;		/* Blocks count */
-	__le32	s_r_blocks_count;	/* Reserved blocks count */
-	__le32	s_free_blocks_count;	/* Free blocks count */
-/*10*/	__le32	s_free_inodes_count;	/* Free inodes count */
-	__le32	s_first_data_block;	/* First Data Block */
-	__le32	s_log_block_size;	/* Block size */
-	__le32	s_log_frag_size;	/* Fragment size */
-/*20*/	__le32	s_blocks_per_group;	/* # Blocks per group */
-	__le32	s_frags_per_group;	/* # Fragments per group */
-	__le32	s_inodes_per_group;	/* # Inodes per group */
-	__le32	s_mtime;		/* Mount time */
-/*30*/	__le32	s_wtime;		/* Write time */
-	__le16	s_mnt_count;		/* Mount count */
-	__le16	s_max_mnt_count;	/* Maximal mount count */
-	__le16	s_magic;		/* Magic signature */
-	__le16	s_state;		/* File system state */
-	__le16	s_errors;		/* Behaviour when detecting errors */
-	__le16	s_minor_rev_level;	/* minor revision level */
-/*40*/	__le32	s_lastcheck;		/* time of last check */
-	__le32	s_checkinterval;	/* max. time between checks */
-	__le32	s_creator_os;		/* OS */
-	__le32	s_rev_level;		/* Revision level */
-/*50*/	__le16	s_def_resuid;		/* Default uid for reserved blocks */
-	__le16	s_def_resgid;		/* Default gid for reserved blocks */
-	/*
-	 * These fields are for EXT3_DYNAMIC_REV superblocks only.
-	 *
-	 * Note: the difference between the compatible feature set and
-	 * the incompatible feature set is that if there is a bit set
-	 * in the incompatible feature set that the kernel doesn't
-	 * know about, it should refuse to mount the filesystem.
-	 *
-	 * e2fsck's requirements are more strict; if it doesn't know
-	 * about a feature in either the compatible or incompatible
-	 * feature set, it must abort and not try to meddle with
-	 * things it doesn't understand...
-	 */
-	__le32	s_first_ino;		/* First non-reserved inode */
-	__le16   s_inode_size;		/* size of inode structure */
-	__le16	s_block_group_nr;	/* block group # of this superblock */
-	__le32	s_feature_compat;	/* compatible feature set */
-/*60*/	__le32	s_feature_incompat;	/* incompatible feature set */
-	__le32	s_feature_ro_compat;	/* readonly-compatible feature set */
-/*68*/	__u8	s_uuid[16];		/* 128-bit uuid for volume */
-/*78*/	char	s_volume_name[16];	/* volume name */
-/*88*/	char	s_last_mounted[64];	/* directory where last mounted */
-/*C8*/	__le32	s_algorithm_usage_bitmap; /* For compression */
-	/*
-	 * Performance hints.  Directory preallocation should only
-	 * happen if the EXT3_FEATURE_COMPAT_DIR_PREALLOC flag is on.
-	 */
-	__u8	s_prealloc_blocks;	/* Nr of blocks to try to preallocate*/
-	__u8	s_prealloc_dir_blocks;	/* Nr to preallocate for dirs */
-	__le16	s_reserved_gdt_blocks;	/* Per group desc for online growth */
-	/*
-	 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
-	 */
-/*D0*/	__u8	s_journal_uuid[16];	/* uuid of journal superblock */
-/*E0*/	__le32	s_journal_inum;		/* inode number of journal file */
-	__le32	s_journal_dev;		/* device number of journal file */
-	__le32	s_last_orphan;		/* start of list of inodes to delete */
-	__le32	s_hash_seed[4];		/* HTREE hash seed */
-	__u8	s_def_hash_version;	/* Default hash version to use */
-	__u8	s_reserved_char_pad;
-	__u16	s_reserved_word_pad;
-	__le32	s_default_mount_opts;
-	__le32	s_first_meta_bg;	/* First metablock block group */
-	__le32	s_mkfs_time;		/* When the filesystem was created */
-	__le32	s_jnl_blocks[17];	/* Backup of the journal inode */
-	/* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
-/*150*/	__le32	s_blocks_count_hi;	/* Blocks count */
-	__le32	s_r_blocks_count_hi;	/* Reserved blocks count */
-	__le32	s_free_blocks_count_hi;	/* Free blocks count */
-	__le16	s_min_extra_isize;	/* All inodes have at least # bytes */
-	__le16	s_want_extra_isize; 	/* New inodes should reserve # bytes */
-	__le32	s_flags;		/* Miscellaneous flags */
-	__le16  s_raid_stride;		/* RAID stride */
-	__le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
-	__le64  s_mmp_block;            /* Block for multi-mount protection */
-	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
-	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
-	__u8	s_reserved_char_pad2;
-	__le16  s_reserved_pad;
-	__u32   s_reserved[162];        /* Padding to the end of the block */
-};
-
-/* data type for block offset of block group */
-typedef int ext3_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext3_fsblk_t;
-
-#define E3FSBLK "%lu"
-
-struct ext3_reserve_window {
-	ext3_fsblk_t	_rsv_start;	/* First byte reserved */
-	ext3_fsblk_t	_rsv_end;	/* Last byte reserved or 0 */
-};
-
-struct ext3_reserve_window_node {
-	struct rb_node		rsv_node;
-	__u32			rsv_goal_size;
-	__u32			rsv_alloc_hit;
-	struct ext3_reserve_window	rsv_window;
-};
-
-struct ext3_block_alloc_info {
-	/* information about reservation window */
-	struct ext3_reserve_window_node	rsv_window_node;
-	/*
-	 * was i_next_alloc_block in ext3_inode_info
-	 * is the logical (file-relative) number of the
-	 * most-recently-allocated block in this file.
-	 * We use this for detecting linearly ascending allocation requests.
-	 */
-	__u32                   last_alloc_logical_block;
-	/*
-	 * Was i_next_alloc_goal in ext3_inode_info
-	 * is the *physical* companion to i_next_alloc_block.
-	 * it the physical block number of the block which was most-recentl
-	 * allocated to this file.  This give us the goal (target) for the next
-	 * allocation when we detect linearly ascending requests.
-	 */
-	ext3_fsblk_t		last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * third extended file system inode data in memory
- */
-struct ext3_inode_info {
-	__le32	i_data[15];	/* unconverted */
-	__u32	i_flags;
-#ifdef EXT3_FRAGMENTS
-	__u32	i_faddr;
-	__u8	i_frag_no;
-	__u8	i_frag_size;
-#endif
-	ext3_fsblk_t	i_file_acl;
-	__u32	i_dir_acl;
-	__u32	i_dtime;
-
-	/*
-	 * i_block_group is the number of the block group which contains
-	 * this file's inode.  Constant across the lifetime of the inode,
-	 * it is ued for making block allocation decisions - we try to
-	 * place a file's data blocks near its inode block, and new inodes
-	 * near to their parent directory's inode.
-	 */
-	__u32	i_block_group;
-	unsigned long	i_state_flags;	/* Dynamic state flags for ext3 */
-
-	/* block reservation info */
-	struct ext3_block_alloc_info *i_block_alloc_info;
-
-	__u32	i_dir_start_lookup;
-#ifdef CONFIG_EXT3_FS_XATTR
-	/*
-	 * Extended attributes can be read independently of the main file
-	 * data. Taking i_mutex even when reading would cause contention
-	 * between readers of EAs and writers of regular file data, so
-	 * instead we synchronize on xattr_sem when reading or changing
-	 * EAs.
-	 */
-	struct rw_semaphore xattr_sem;
-#endif
-
-	struct list_head i_orphan;	/* unlinked but open inodes */
-
-	/*
-	 * i_disksize keeps track of what the inode size is ON DISK, not
-	 * in memory.  During truncate, i_size is set to the new size by
-	 * the VFS prior to calling ext3_truncate(), but the filesystem won't
-	 * set i_disksize to 0 until the truncate is actually under way.
-	 *
-	 * The intent is that i_disksize always represents the blocks which
-	 * are used by this file.  This allows recovery to restart truncate
-	 * on orphans if we crash during truncate.  We actually write i_disksize
-	 * into the on-disk inode when writing inodes out, instead of i_size.
-	 *
-	 * The only time when i_disksize and i_size may be different is when
-	 * a truncate is in progress.  The only things which change i_disksize
-	 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
-	 */
-	loff_t	i_disksize;
-
-	/* on-disk additional length */
-	__u16 i_extra_isize;
-
-	/*
-	 * truncate_mutex is for serialising ext3_truncate() against
-	 * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
-	 * data tree are chopped off during truncate. We can't do that in
-	 * ext3 because whenever we perform intermediate commits during
-	 * truncate, the inode and all the metadata blocks *must* be in a
-	 * consistent state which allows truncation of the orphans to restart
-	 * during recovery.  Hence we must fix the get_block-vs-truncate race
-	 * by other means, so we have truncate_mutex.
-	 */
-	struct mutex truncate_mutex;
-
-	/*
-	 * Transactions that contain inode's metadata needed to complete
-	 * fsync and fdatasync, respectively.
-	 */
-	atomic_t i_sync_tid;
-	atomic_t i_datasync_tid;
-
-#ifdef CONFIG_QUOTA
-	struct dquot *i_dquot[MAXQUOTAS];
-#endif
-
-	struct inode vfs_inode;
-};
-
-/*
- * third extended-fs super-block data in memory
- */
-struct ext3_sb_info {
-	unsigned long s_frag_size;	/* Size of a fragment in bytes */
-	unsigned long s_frags_per_block;/* Number of fragments per block */
-	unsigned long s_inodes_per_block;/* Number of inodes per block */
-	unsigned long s_frags_per_group;/* Number of fragments in a group */
-	unsigned long s_blocks_per_group;/* Number of blocks in a group */
-	unsigned long s_inodes_per_group;/* Number of inodes in a group */
-	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
-	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
-	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
-	unsigned long s_groups_count;	/* Number of groups in the fs */
-	unsigned long s_overhead_last;  /* Last calculated overhead */
-	unsigned long s_blocks_last;    /* Last seen block count */
-	struct buffer_head * s_sbh;	/* Buffer containing the super block */
-	struct ext3_super_block * s_es;	/* Pointer to the super block in the buffer */
-	struct buffer_head ** s_group_desc;
-	unsigned long  s_mount_opt;
-	ext3_fsblk_t s_sb_block;
-	kuid_t s_resuid;
-	kgid_t s_resgid;
-	unsigned short s_mount_state;
-	unsigned short s_pad;
-	int s_addr_per_block_bits;
-	int s_desc_per_block_bits;
-	int s_inode_size;
-	int s_first_ino;
-	spinlock_t s_next_gen_lock;
-	u32 s_next_generation;
-	u32 s_hash_seed[4];
-	int s_def_hash_version;
-	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
-	struct percpu_counter s_freeblocks_counter;
-	struct percpu_counter s_freeinodes_counter;
-	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock *s_blockgroup_lock;
-
-	/* root of the per fs reservation window tree */
-	spinlock_t s_rsv_window_lock;
-	struct rb_root s_rsv_window_root;
-	struct ext3_reserve_window_node s_rsv_window_head;
-
-	/* Journaling */
-	struct inode * s_journal_inode;
-	struct journal_s * s_journal;
-	struct list_head s_orphan;
-	struct mutex s_orphan_lock;
-	struct mutex s_resize_lock;
-	unsigned long s_commit_interval;
-	struct block_device *journal_bdev;
-#ifdef CONFIG_QUOTA
-	char *s_qf_names[EXT3_MAXQUOTAS];	/* Names of quota files with journalled quota */
-	int s_jquota_fmt;			/* Format of quota to use */
-#endif
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
-{
-	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
-{
-	return container_of(inode, struct ext3_inode_info, vfs_inode);
-}
-
-static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
-{
-	return ino == EXT3_ROOT_INO ||
-		ino == EXT3_JOURNAL_INO ||
-		ino == EXT3_RESIZE_INO ||
-		(ino >= EXT3_FIRST_INO(sb) &&
-		 ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
-}
-
-/*
- * Inode dynamic state flags
- */
-enum {
-	EXT3_STATE_JDATA,		/* journaled data exists */
-	EXT3_STATE_NEW,			/* inode is newly created */
-	EXT3_STATE_XATTR,		/* has in-inode xattrs */
-	EXT3_STATE_FLUSH_ON_CLOSE,	/* flush dirty pages on close */
-};
-
-static inline int ext3_test_inode_state(struct inode *inode, int bit)
-{
-	return test_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_set_inode_state(struct inode *inode, int bit)
-{
-	set_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_clear_inode_state(struct inode *inode, int bit)
-{
-	clear_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
-
-/*
- * Codes for operating systems
- */
-#define EXT3_OS_LINUX		0
-#define EXT3_OS_HURD		1
-#define EXT3_OS_MASIX		2
-#define EXT3_OS_FREEBSD		3
-#define EXT3_OS_LITES		4
-
-/*
- * Revision levels
- */
-#define EXT3_GOOD_OLD_REV	0	/* The good old (original) format */
-#define EXT3_DYNAMIC_REV	1	/* V2 format w/ dynamic inode sizes */
-
-#define EXT3_CURRENT_REV	EXT3_GOOD_OLD_REV
-#define EXT3_MAX_SUPP_REV	EXT3_DYNAMIC_REV
-
-#define EXT3_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT3_HAS_COMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_RO_COMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_INCOMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT3_SET_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT3_SET_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT3_SET_INCOMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT3_CLEAR_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_INCOMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT3_FEATURE_COMPAT_DIR_PREALLOC	0x0001
-#define EXT3_FEATURE_COMPAT_IMAGIC_INODES	0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL		0x0004
-#define EXT3_FEATURE_COMPAT_EXT_ATTR		0x0008
-#define EXT3_FEATURE_COMPAT_RESIZE_INODE	0x0010
-#define EXT3_FEATURE_COMPAT_DIR_INDEX		0x0020
-
-#define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
-#define EXT3_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
-#define EXT3_FEATURE_RO_COMPAT_BTREE_DIR	0x0004
-
-#define EXT3_FEATURE_INCOMPAT_COMPRESSION	0x0001
-#define EXT3_FEATURE_INCOMPAT_FILETYPE		0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER		0x0004 /* Needs recovery */
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV	0x0008 /* Journal device */
-#define EXT3_FEATURE_INCOMPAT_META_BG		0x0010
-
-#define EXT3_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT3_FEATURE_INCOMPAT_SUPP	(EXT3_FEATURE_INCOMPAT_FILETYPE| \
-					 EXT3_FEATURE_INCOMPAT_RECOVER| \
-					 EXT3_FEATURE_INCOMPAT_META_BG)
-#define EXT3_FEATURE_RO_COMPAT_SUPP	(EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-					 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
-					 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define	EXT3_DEF_RESUID		0
-#define	EXT3_DEF_RESGID		0
-
-/*
- * Default mount options
- */
-#define EXT3_DEFM_DEBUG		0x0001
-#define EXT3_DEFM_BSDGROUPS	0x0002
-#define EXT3_DEFM_XATTR_USER	0x0004
-#define EXT3_DEFM_ACL		0x0008
-#define EXT3_DEFM_UID16		0x0010
-#define EXT3_DEFM_JMODE		0x0060
-#define EXT3_DEFM_JMODE_DATA	0x0020
-#define EXT3_DEFM_JMODE_ORDERED	0x0040
-#define EXT3_DEFM_JMODE_WBACK	0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT3_NAME_LEN 255
-
-struct ext3_dir_entry {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__le16	name_len;		/* Name length */
-	char	name[EXT3_NAME_LEN];	/* File name */
-};
-
-/*
- * The new version of the directory entry.  Since EXT3 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext3_dir_entry_2 {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__u8	name_len;		/* Name length */
-	__u8	file_type;
-	char	name[EXT3_NAME_LEN];	/* File name */
-};
-
-/*
- * Ext3 directory file types.  Only the low 3 bits are used.  The
- * other bits are reserved for now.
- */
-#define EXT3_FT_UNKNOWN		0
-#define EXT3_FT_REG_FILE	1
-#define EXT3_FT_DIR		2
-#define EXT3_FT_CHRDEV		3
-#define EXT3_FT_BLKDEV		4
-#define EXT3_FT_FIFO		5
-#define EXT3_FT_SOCK		6
-#define EXT3_FT_SYMLINK		7
-
-#define EXT3_FT_MAX		8
-
-/*
- * EXT3_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT3_DIR_PAD			4
-#define EXT3_DIR_ROUND			(EXT3_DIR_PAD - 1)
-#define EXT3_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT3_DIR_ROUND) & \
-					 ~EXT3_DIR_ROUND)
-#define EXT3_MAX_REC_LEN		((1<<16)-1)
-
-/*
- * Tests against MAX_REC_LEN etc were put in place for 64k block
- * sizes; if that is not possible on this arch, we can skip
- * those tests and speed things up.
- */
-static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
-{
-	unsigned len = le16_to_cpu(dlen);
-
-#if (PAGE_CACHE_SIZE >= 65536)
-	if (len == EXT3_MAX_REC_LEN)
-		return 1 << 16;
-#endif
-	return len;
-}
-
-static inline __le16 ext3_rec_len_to_disk(unsigned len)
-{
-#if (PAGE_CACHE_SIZE >= 65536)
-	if (len == (1 << 16))
-		return cpu_to_le16(EXT3_MAX_REC_LEN);
-	else if (len > (1 << 16))
-		BUG();
-#endif
-	return cpu_to_le16(len);
-}
-
-/*
- * Hash Tree Directory indexing
- * (c) Daniel Phillips, 2001
- */
-
-#define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
-				      EXT3_FEATURE_COMPAT_DIR_INDEX) && \
-		      (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
-#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
-#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
-
-/* Legal values for the dx_root hash_version field: */
-
-#define DX_HASH_LEGACY		0
-#define DX_HASH_HALF_MD4	1
-#define DX_HASH_TEA		2
-#define DX_HASH_LEGACY_UNSIGNED	3
-#define DX_HASH_HALF_MD4_UNSIGNED	4
-#define DX_HASH_TEA_UNSIGNED		5
-
-/* hash info structure used by the directory hash */
-struct dx_hash_info
-{
-	u32		hash;
-	u32		minor_hash;
-	int		hash_version;
-	u32		*seed;
-};
-
-
-/* 32 and 64 bit signed EOF for dx directories */
-#define EXT3_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
-#define EXT3_HTREE_EOF_64BIT   ((1ULL << (64 - 1)) - 1)
-
-
-/*
- * Control parameters used by ext3_htree_next_block
- */
-#define HASH_NB_ALWAYS		1
-
-
-/*
- * Describe an inode's exact location on disk and in memory
- */
-struct ext3_iloc
-{
-	struct buffer_head *bh;
-	unsigned long offset;
-	unsigned long block_group;
-};
-
-static inline struct ext3_inode *ext3_raw_inode(struct ext3_iloc *iloc)
-{
-	return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
-}
-
-/*
- * This structure is stuffed into the struct file's private_data field
- * for directories.  It is where we put information so that we can do
- * readdir operations in hash tree order.
- */
-struct dir_private_info {
-	struct rb_root	root;
-	struct rb_node	*curr_node;
-	struct fname	*extra_fname;
-	loff_t		last_pos;
-	__u32		curr_hash;
-	__u32		curr_minor_hash;
-	__u32		next_hash;
-};
-
-/* calculate the first block number of the group */
-static inline ext3_fsblk_t
-ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
-{
-	return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
-		le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-}
-
-/*
- * Special error return code only used by dx_probe() and its callers.
- */
-#define ERR_BAD_DX_DIR	-75000
-
-/*
- * Function prototypes
- */
-
-/*
- * Ok, these declarations are also in <linux/kernel.h> but none of the
- * ext3 source programs needs to include it so they are duplicated here.
- */
-# define NORET_TYPE    /**/
-# define ATTRIB_NORET  __attribute__((noreturn))
-# define NORET_AND     noreturn,
-
-/* balloc.c */
-extern int ext3_bg_has_super(struct super_block *sb, int group);
-extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
-extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int *errp);
-extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, unsigned long *count, int *errp);
-extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t block, unsigned long count);
-extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
-				 ext3_fsblk_t block, unsigned long count,
-				unsigned long *pdquot_freed_blocks);
-extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
-extern void ext3_check_blocks_bitmap (struct super_block *);
-extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-						    unsigned int block_group,
-						    struct buffer_head ** bh);
-extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
-extern void ext3_init_block_alloc_info(struct inode *);
-extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
-extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
-
-/* dir.c */
-extern int ext3_check_dir_entry(const char *, struct inode *,
-				struct ext3_dir_entry_2 *,
-				struct buffer_head *, unsigned long);
-extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-				    __u32 minor_hash,
-				    struct ext3_dir_entry_2 *dirent);
-extern void ext3_htree_free_dir_info(struct dir_private_info *p);
-
-/* fsync.c */
-extern int ext3_sync_file(struct file *, loff_t, loff_t, int);
-
-/* hash.c */
-extern int ext3fs_dirhash(const char *name, int len, struct
-			  dx_hash_info *hinfo);
-
-/* ialloc.c */
-extern struct inode * ext3_new_inode (handle_t *, struct inode *,
-				      const struct qstr *, umode_t);
-extern void ext3_free_inode (handle_t *, struct inode *);
-extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
-extern unsigned long ext3_count_free_inodes (struct super_block *);
-extern unsigned long ext3_count_dirs (struct super_block *);
-extern void ext3_check_inodes_bitmap (struct super_block *);
-extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
-
-
-/* inode.c */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-		struct buffer_head *bh, ext3_fsblk_t blocknr);
-struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-	sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
-	int create);
-
-extern struct inode *ext3_iget(struct super_block *, unsigned long);
-extern int  ext3_write_inode (struct inode *, struct writeback_control *);
-extern int  ext3_setattr (struct dentry *, struct iattr *);
-extern void ext3_evict_inode (struct inode *);
-extern int  ext3_sync_inode (handle_t *, struct inode *);
-extern void ext3_discard_reservation (struct inode *);
-extern void ext3_dirty_inode(struct inode *, int);
-extern int ext3_change_inode_journal_flag(struct inode *, int);
-extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
-extern int ext3_can_truncate(struct inode *inode);
-extern void ext3_truncate(struct inode *inode);
-extern void ext3_set_inode_flags(struct inode *);
-extern void ext3_get_inode_flags(struct ext3_inode_info *);
-extern void ext3_set_aops(struct inode *inode);
-extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		       u64 start, u64 len);
-
-/* ioctl.c */
-extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
-extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* namei.c */
-extern int ext3_orphan_add(handle_t *, struct inode *);
-extern int ext3_orphan_del(handle_t *, struct inode *);
-extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-				__u32 start_minor_hash, __u32 *next_hash);
-
-/* resize.c */
-extern int ext3_group_add(struct super_block *sb,
-				struct ext3_new_group_data *input);
-extern int ext3_group_extend(struct super_block *sb,
-				struct ext3_super_block *es,
-				ext3_fsblk_t n_blocks_count);
-
-/* super.c */
-extern __printf(3, 4)
-void ext3_error(struct super_block *, const char *, const char *, ...);
-extern void __ext3_std_error (struct super_block *, const char *, int);
-extern __printf(3, 4)
-void ext3_abort(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_warning(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_msg(struct super_block *, const char *, const char *, ...);
-extern void ext3_update_dynamic_rev (struct super_block *sb);
-
-#define ext3_std_error(sb, errno)				\
-do {								\
-	if ((errno))						\
-		__ext3_std_error((sb), __func__, (errno));	\
-} while (0)
-
-/*
- * Inodes and files operations
- */
-
-/* dir.c */
-extern const struct file_operations ext3_dir_operations;
-
-/* file.c */
-extern const struct inode_operations ext3_file_inode_operations;
-extern const struct file_operations ext3_file_operations;
-
-/* namei.c */
-extern const struct inode_operations ext3_dir_inode_operations;
-extern const struct inode_operations ext3_special_inode_operations;
-
-/* symlink.c */
-extern const struct inode_operations ext3_symlink_inode_operations;
-extern const struct inode_operations ext3_fast_symlink_inode_operations;
-
-#define EXT3_JOURNAL(inode)	(EXT3_SB((inode)->i_sb)->s_journal)
-
-/* Define the number of blocks we need to account to a transaction to
- * modify one block of data.
- *
- * We may have to touch one inode, one bitmap buffer, up to three
- * indirection blocks, the group and superblock summaries, and the data
- * block to complete the transaction.  */
-
-#define EXT3_SINGLEDATA_TRANS_BLOCKS	8U
-
-/* Extended attribute operations touch at most two data buffers,
- * two bitmap buffers, and two group summaries, in addition to the inode
- * and the superblock, which are already accounted for. */
-
-#define EXT3_XATTR_TRANS_BLOCKS		6U
-
-/* Define the minimum size for a transaction which modifies data.  This
- * needs to take into account the fact that we may end up modifying two
- * quota files too (one for the group, one for the user quota).  The
- * superblock only gets updated once, of course, so don't bother
- * counting that again for the quota updates. */
-
-#define EXT3_DATA_TRANS_BLOCKS(sb)	(EXT3_SINGLEDATA_TRANS_BLOCKS + \
-					 EXT3_XATTR_TRANS_BLOCKS - 2 + \
-					 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
-
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
- * generous.  We can grow the delete transaction later if necessary. */
-
-#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
-
-/* Define an arbitrary limit for the amount of data we will anticipate
- * writing to any given transaction.  For unbounded transactions such as
- * write(2) and truncate(2) we can write more than this, but we always
- * start off at the maximum transaction size and grow the transaction
- * optimistically as we go. */
-
-#define EXT3_MAX_TRANS_DATA		64U
-
-/* We break up a large truncate or write transaction once the handle's
- * buffer credits gets this low, we need either to extend the
- * transaction or to start a new one.  Reserve enough space here for
- * inode, bitmap, superblock, group and indirection updates for at least
- * one block, plus two quota updates.  Quota allocations are not
- * needed. */
-
-#define EXT3_RESERVE_TRANS_BLOCKS	12U
-
-#define EXT3_INDEX_EXTRA_TRANS_BLOCKS	8
-
-#ifdef CONFIG_QUOTA
-/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-/* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
-#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
-#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
-#else
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
-#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
-#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
-#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
-
-int
-ext3_mark_iloc_dirty(handle_t *handle,
-		     struct inode *inode,
-		     struct ext3_iloc *iloc);
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			struct ext3_iloc *iloc);
-
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
-
-/*
- * Wrapper functions with which ext3 calls into JBD.  The intent here is
- * to allow these to be turned into appropriate stubs so ext3 can control
- * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
- * been done yet.
- */
-
-static inline void ext3_journal_release_buffer(handle_t *handle,
-						struct buffer_head *bh)
-{
-	journal_release_buffer(handle, bh);
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh);
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-#define ext3_journal_get_undo_access(handle, bh) \
-	__ext3_journal_get_undo_access(__func__, (handle), (bh))
-#define ext3_journal_get_write_access(handle, bh) \
-	__ext3_journal_get_write_access(__func__, (handle), (bh))
-#define ext3_journal_revoke(handle, blocknr, bh) \
-	__ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
-#define ext3_journal_get_create_access(handle, bh) \
-	__ext3_journal_get_create_access(__func__, (handle), (bh))
-#define ext3_journal_dirty_metadata(handle, bh) \
-	__ext3_journal_dirty_metadata(__func__, (handle), (bh))
-#define ext3_journal_forget(handle, bh) \
-	__ext3_journal_forget(__func__, (handle), (bh))
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
-
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext3_journal_stop(const char *where, handle_t *handle);
-
-static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
-{
-	return ext3_journal_start_sb(inode->i_sb, nblocks);
-}
-
-#define ext3_journal_stop(handle) \
-	__ext3_journal_stop(__func__, (handle))
-
-static inline handle_t *ext3_journal_current_handle(void)
-{
-	return journal_current_handle();
-}
-
-static inline int ext3_journal_extend(handle_t *handle, int nblocks)
-{
-	return journal_extend(handle, nblocks);
-}
-
-static inline int ext3_journal_restart(handle_t *handle, int nblocks)
-{
-	return journal_restart(handle, nblocks);
-}
-
-static inline int ext3_journal_blocks_per_page(struct inode *inode)
-{
-	return journal_blocks_per_page(inode);
-}
-
-static inline int ext3_journal_force_commit(journal_t *journal)
-{
-	return journal_force_commit(journal);
-}
-
-/* super.c */
-int ext3_force_commit(struct super_block *sb);
-
-static inline int ext3_should_journal_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 1;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
-		return 1;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_order_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_writeback_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
-		return 1;
-	return 0;
-}
-
-#include <trace/events/ext3.h>
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
deleted file mode 100644
index 785a326..0000000
--- a/fs/ext3/ext3_jbd.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Interface between ext3 and JBD
- */
-
-#include "ext3.h"
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_undo_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_write_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_forget(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh)
-{
-	int err = journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_get_create_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
deleted file mode 100644
index 3b8f650..0000000
--- a/fs/ext3/file.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *  linux/fs/ext3/file.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/file.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 fs regular file handling primitives
- *
- *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *	(jj@sunsite.ms.mff.cuni.cz)
- */
-
-#include <linux/quotaops.h>
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * Called when an inode is released. Note that this is different
- * from ext3_file_open: open gets called at every open, but release
- * gets called only when /all/ the files are closed.
- */
-static int ext3_release_file (struct inode * inode, struct file * filp)
-{
-	if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) {
-		filemap_flush(inode->i_mapping);
-		ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
-	}
-	/* if we are the last writer on the inode, drop the block reservation */
-	if ((filp->f_mode & FMODE_WRITE) &&
-			(atomic_read(&inode->i_writecount) == 1))
-	{
-		mutex_lock(&EXT3_I(inode)->truncate_mutex);
-		ext3_discard_reservation(inode);
-		mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-	}
-	if (is_dx(inode) && filp->private_data)
-		ext3_htree_free_dir_info(filp->private_data);
-
-	return 0;
-}
-
-const struct file_operations ext3_file_operations = {
-	.llseek		= generic_file_llseek,
-	.read_iter	= generic_file_read_iter,
-	.write_iter	= generic_file_write_iter,
-	.unlocked_ioctl	= ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.mmap		= generic_file_mmap,
-	.open		= dquot_file_open,
-	.release	= ext3_release_file,
-	.fsync		= ext3_sync_file,
-	.splice_read	= generic_file_splice_read,
-	.splice_write	= iter_file_splice_write,
-};
-
-const struct inode_operations ext3_file_inode_operations = {
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-	.fiemap		= ext3_fiemap,
-};
-
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
deleted file mode 100644
index 1cb9c7e..0000000
--- a/fs/ext3/fsync.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *  linux/fs/ext3/fsync.c
- *
- *  Copyright (C) 1993  Stephen Tweedie (sct@redhat.com)
- *  from
- *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
- *                      Laboratoire MASI - Institut Blaise Pascal
- *                      Universite Pierre et Marie Curie (Paris VI)
- *  from
- *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3fs fsync primitive
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *
- *  Removed unnecessary code duplication for little endian machines
- *  and excessive __inline__s.
- *        Andi Kleen, 1997
- *
- * Major simplications and cleanup - we only need to do the metadata, because
- * we can depend on generic_block_fdatasync() to sync the data blocks.
- */
-
-#include <linux/blkdev.h>
-#include <linux/writeback.h>
-#include "ext3.h"
-
-/*
- * akpm: A new design for ext3_sync_file().
- *
- * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
- * There cannot be a transaction open by this task.
- * Another task could have dirtied this inode.  Its data can be in any
- * state in the journalling system.
- *
- * What we do is just kick off a commit and wait on it.  This will snapshot the
- * inode to disk.
- */
-
-int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
-{
-	struct inode *inode = file->f_mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-	int ret, needs_barrier = 0;
-	tid_t commit_tid;
-
-	trace_ext3_sync_file_enter(file, datasync);
-
-	if (inode->i_sb->s_flags & MS_RDONLY) {
-		/* Make sure that we read updated state */
-		smp_rmb();
-		if (EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)
-			return -EROFS;
-		return 0;
-	}
-	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
-		goto out;
-
-	J_ASSERT(ext3_journal_current_handle() == NULL);
-
-	/*
-	 * data=writeback,ordered:
-	 *  The caller's filemap_fdatawrite()/wait will sync the data.
-	 *  Metadata is in the journal, we wait for a proper transaction
-	 *  to commit here.
-	 *
-	 * data=journal:
-	 *  filemap_fdatawrite won't do anything (the buffers are clean).
-	 *  ext3_force_commit will write the file data into the journal and
-	 *  will wait on that.
-	 *  filemap_fdatawait() will encounter a ton of newly-dirtied pages
-	 *  (they were dirtied by commit).  But that's OK - the blocks are
-	 *  safe in-journal, which is all fsync() needs to ensure.
-	 */
-	if (ext3_should_journal_data(inode)) {
-		ret = ext3_force_commit(inode->i_sb);
-		goto out;
-	}
-
-	if (datasync)
-		commit_tid = atomic_read(&ei->i_datasync_tid);
-	else
-		commit_tid = atomic_read(&ei->i_sync_tid);
-
-	if (test_opt(inode->i_sb, BARRIER) &&
-	    !journal_trans_will_send_data_barrier(journal, commit_tid))
-		needs_barrier = 1;
-	log_start_commit(journal, commit_tid);
-	ret = log_wait_commit(journal, commit_tid);
-
-	/*
-	 * In case we didn't commit a transaction, we have to flush
-	 * disk caches manually so that data really is on persistent
-	 * storage
-	 */
-	if (needs_barrier) {
-		int err;
-
-		err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
-		if (!ret)
-			ret = err;
-	}
-out:
-	trace_ext3_sync_file_exit(inode, ret);
-	return ret;
-}
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
deleted file mode 100644
index ede315c..0000000
--- a/fs/ext3/hash.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- *  linux/fs/ext3/hash.c
- *
- * Copyright (C) 2002 by Theodore Ts'o
- *
- * This file is released under the GPL v2.
- *
- * This file may be redistributed under the terms of the GNU Public
- * License.
- */
-
-#include "ext3.h"
-#include <linux/cryptohash.h>
-
-#define DELTA 0x9E3779B9
-
-static void TEA_transform(__u32 buf[4], __u32 const in[])
-{
-	__u32	sum = 0;
-	__u32	b0 = buf[0], b1 = buf[1];
-	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
-	int	n = 16;
-
-	do {
-		sum += DELTA;
-		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
-		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
-	} while(--n);
-
-	buf[0] += b0;
-	buf[1] += b1;
-}
-
-
-/* The old legacy hash */
-static __u32 dx_hack_hash_unsigned(const char *name, int len)
-{
-	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	const unsigned char *ucp = (const unsigned char *) name;
-
-	while (len--) {
-		hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
-
-		if (hash & 0x80000000)
-			hash -= 0x7fffffff;
-		hash1 = hash0;
-		hash0 = hash;
-	}
-	return hash0 << 1;
-}
-
-static __u32 dx_hack_hash_signed(const char *name, int len)
-{
-	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	const signed char *scp = (const signed char *) name;
-
-	while (len--) {
-		hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
-
-		if (hash & 0x80000000)
-			hash -= 0x7fffffff;
-		hash1 = hash0;
-		hash0 = hash;
-	}
-	return hash0 << 1;
-}
-
-static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
-{
-	__u32	pad, val;
-	int	i;
-	const signed char *scp = (const signed char *) msg;
-
-	pad = (__u32)len | ((__u32)len << 8);
-	pad |= pad << 16;
-
-	val = pad;
-	if (len > num*4)
-		len = num * 4;
-	for (i = 0; i < len; i++) {
-		if ((i % 4) == 0)
-			val = pad;
-		val = ((int) scp[i]) + (val << 8);
-		if ((i % 4) == 3) {
-			*buf++ = val;
-			val = pad;
-			num--;
-		}
-	}
-	if (--num >= 0)
-		*buf++ = val;
-	while (--num >= 0)
-		*buf++ = pad;
-}
-
-static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
-{
-	__u32	pad, val;
-	int	i;
-	const unsigned char *ucp = (const unsigned char *) msg;
-
-	pad = (__u32)len | ((__u32)len << 8);
-	pad |= pad << 16;
-
-	val = pad;
-	if (len > num*4)
-		len = num * 4;
-	for (i=0; i < len; i++) {
-		if ((i % 4) == 0)
-			val = pad;
-		val = ((int) ucp[i]) + (val << 8);
-		if ((i % 4) == 3) {
-			*buf++ = val;
-			val = pad;
-			num--;
-		}
-	}
-	if (--num >= 0)
-		*buf++ = val;
-	while (--num >= 0)
-		*buf++ = pad;
-}
-
-/*
- * Returns the hash of a filename.  If len is 0 and name is NULL, then
- * this function can be used to test whether or not a hash version is
- * supported.
- *
- * The seed is an 4 longword (32 bits) "secret" which can be used to
- * uniquify a hash.  If the seed is all zero's, then some default seed
- * may be used.
- *
- * A particular hash version specifies whether or not the seed is
- * represented, and whether or not the returned hash is 32 bits or 64
- * bits.  32 bit hashes will return 0 for the minor hash.
- */
-int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
-{
-	__u32	hash;
-	__u32	minor_hash = 0;
-	const char	*p;
-	int		i;
-	__u32		in[8], buf[4];
-	void		(*str2hashbuf)(const char *, int, __u32 *, int) =
-				str2hashbuf_signed;
-
-	/* Initialize the default seed for the hash checksum functions */
-	buf[0] = 0x67452301;
-	buf[1] = 0xefcdab89;
-	buf[2] = 0x98badcfe;
-	buf[3] = 0x10325476;
-
-	/* Check to see if the seed is all zero's */
-	if (hinfo->seed) {
-		for (i=0; i < 4; i++) {
-			if (hinfo->seed[i])
-				break;
-		}
-		if (i < 4)
-			memcpy(buf, hinfo->seed, sizeof(buf));
-	}
-
-	switch (hinfo->hash_version) {
-	case DX_HASH_LEGACY_UNSIGNED:
-		hash = dx_hack_hash_unsigned(name, len);
-		break;
-	case DX_HASH_LEGACY:
-		hash = dx_hack_hash_signed(name, len);
-		break;
-	case DX_HASH_HALF_MD4_UNSIGNED:
-		str2hashbuf = str2hashbuf_unsigned;
-	case DX_HASH_HALF_MD4:
-		p = name;
-		while (len > 0) {
-			(*str2hashbuf)(p, len, in, 8);
-			half_md4_transform(buf, in);
-			len -= 32;
-			p += 32;
-		}
-		minor_hash = buf[2];
-		hash = buf[1];
-		break;
-	case DX_HASH_TEA_UNSIGNED:
-		str2hashbuf = str2hashbuf_unsigned;
-	case DX_HASH_TEA:
-		p = name;
-		while (len > 0) {
-			(*str2hashbuf)(p, len, in, 4);
-			TEA_transform(buf, in);
-			len -= 16;
-			p += 16;
-		}
-		hash = buf[0];
-		minor_hash = buf[1];
-		break;
-	default:
-		hinfo->hash = 0;
-		return -1;
-	}
-	hash = hash & ~1;
-	if (hash == (EXT3_HTREE_EOF_32BIT << 1))
-		hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
-	hinfo->hash = hash;
-	hinfo->minor_hash = minor_hash;
-	return 0;
-}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
deleted file mode 100644
index 3ad242e..0000000
--- a/fs/ext3/ialloc.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- *  linux/fs/ext3/ialloc.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  BSD ufs-inspired inode and directory allocation by
- *  Stephen Tweedie (sct@redhat.com), 1993
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/quotaops.h>
-#include <linux/random.h>
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * ialloc.c contains the inodes allocation and deallocation routines
- */
-
-/*
- * The free inodes are managed by bitmaps.  A file system contains several
- * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
- * block for inodes, N blocks for the inode table and data blocks.
- *
- * The file system contains group descriptors which are located after the
- * super block.  Each descriptor contains the number of the bitmap block and
- * the free blocks count in the block.
- */
-
-
-/*
- * Read the inode allocation bitmap for a given block_group, reading
- * into the specified slot in the superblock's bitmap cache.
- *
- * Return buffer_head of bitmap on success or NULL.
- */
-static struct buffer_head *
-read_inode_bitmap(struct super_block * sb, unsigned long block_group)
-{
-	struct ext3_group_desc *desc;
-	struct buffer_head *bh = NULL;
-
-	desc = ext3_get_group_desc(sb, block_group, NULL);
-	if (!desc)
-		goto error_out;
-
-	bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
-	if (!bh)
-		ext3_error(sb, "read_inode_bitmap",
-			    "Cannot read inode bitmap - "
-			    "block_group = %lu, inode_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_inode_bitmap));
-error_out:
-	return bh;
-}
-
-/*
- * NOTE! When we get the inode, we're the only people
- * that have access to it, and as such there are no
- * race conditions we have to worry about. The inode
- * is not on the hash-lists, and it cannot be reached
- * through the filesystem because the directory entry
- * has been deleted earlier.
- *
- * HOWEVER: we must make sure that we get no aliases,
- * which means that we have to call "clear_inode()"
- * _before_ we mark the inode not in use in the inode
- * bitmaps. Otherwise a newly created file might use
- * the same inode number (not actually the same pointer
- * though), and then we'd have two inodes sharing the
- * same inode number and space on the harddisk.
- */
-void ext3_free_inode (handle_t *handle, struct inode * inode)
-{
-	struct super_block * sb = inode->i_sb;
-	int is_directory;
-	unsigned long ino;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
-	unsigned long block_group;
-	unsigned long bit;
-	struct ext3_group_desc * gdp;
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi;
-	int fatal = 0, err;
-
-	if (atomic_read(&inode->i_count) > 1) {
-		printk ("ext3_free_inode: inode has count=%d\n",
-					atomic_read(&inode->i_count));
-		return;
-	}
-	if (inode->i_nlink) {
-		printk ("ext3_free_inode: inode has nlink=%d\n",
-			inode->i_nlink);
-		return;
-	}
-	if (!sb) {
-		printk("ext3_free_inode: inode on nonexistent device\n");
-		return;
-	}
-	sbi = EXT3_SB(sb);
-
-	ino = inode->i_ino;
-	ext3_debug ("freeing inode %lu\n", ino);
-	trace_ext3_free_inode(inode);
-
-	is_directory = S_ISDIR(inode->i_mode);
-
-	es = EXT3_SB(sb)->s_es;
-	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-		ext3_error (sb, "ext3_free_inode",
-			    "reserved or nonexistent inode %lu", ino);
-		goto error_return;
-	}
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
-	bitmap_bh = read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh)
-		goto error_return;
-
-	BUFFER_TRACE(bitmap_bh, "get_write_access");
-	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
-	if (fatal)
-		goto error_return;
-
-	/* Ok, now we can actually update the inode bitmaps.. */
-	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-					bit, bitmap_bh->b_data))
-		ext3_error (sb, "ext3_free_inode",
-			      "bit already cleared for inode %lu", ino);
-	else {
-		gdp = ext3_get_group_desc (sb, block_group, &bh2);
-
-		BUFFER_TRACE(bh2, "get_write_access");
-		fatal = ext3_journal_get_write_access(handle, bh2);
-		if (fatal) goto error_return;
-
-		if (gdp) {
-			spin_lock(sb_bgl_lock(sbi, block_group));
-			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
-			if (is_directory)
-				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
-			spin_unlock(sb_bgl_lock(sbi, block_group));
-			percpu_counter_inc(&sbi->s_freeinodes_counter);
-			if (is_directory)
-				percpu_counter_dec(&sbi->s_dirs_counter);
-
-		}
-		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, bh2);
-		if (!fatal) fatal = err;
-	}
-	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-	if (!fatal)
-		fatal = err;
-
-error_return:
-	brelse(bitmap_bh);
-	ext3_std_error(sb, fatal);
-}
-
-/*
- * Orlov's allocator for directories.
- *
- * We always try to spread first-level directories.
- *
- * If there are blockgroups with both free inodes and free blocks counts
- * not worse than average we return one with smallest directory count.
- * Otherwise we simply return a random group.
- *
- * For the rest rules look so:
- *
- * It's OK to put directory into a group unless
- * it has too many directories already (max_dirs) or
- * it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks).
- * Parent's group is preferred, if it doesn't satisfy these
- * conditions we search cyclically through the rest. If none
- * of the groups look good we just look for a group with more
- * free inodes than average (starting at parent's group).
- *
- * Debt is incremented each time we allocate a directory and decremented
- * when we allocate an inode, within 0--255.
- */
-
-static int find_group_orlov(struct super_block *sb, struct inode *parent)
-{
-	int parent_group = EXT3_I(parent)->i_block_group;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int ngroups = sbi->s_groups_count;
-	int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
-	unsigned int freei, avefreei;
-	ext3_fsblk_t freeb, avefreeb;
-	unsigned int ndirs;
-	int max_dirs, min_inodes;
-	ext3_grpblk_t min_blocks;
-	int group = -1, i;
-	struct ext3_group_desc *desc;
-
-	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
-	avefreei = freei / ngroups;
-	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-	avefreeb = freeb / ngroups;
-	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
-
-	if ((parent == d_inode(sb->s_root)) ||
-	    (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) {
-		int best_ndir = inodes_per_group;
-		int best_group = -1;
-
-		group = prandom_u32();
-		parent_group = (unsigned)group % ngroups;
-		for (i = 0; i < ngroups; i++) {
-			group = (parent_group + i) % ngroups;
-			desc = ext3_get_group_desc (sb, group, NULL);
-			if (!desc || !desc->bg_free_inodes_count)
-				continue;
-			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
-				continue;
-			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
-				continue;
-			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
-				continue;
-			best_group = group;
-			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
-		}
-		if (best_group >= 0)
-			return best_group;
-		goto fallback;
-	}
-
-	max_dirs = ndirs / ngroups + inodes_per_group / 16;
-	min_inodes = avefreei - inodes_per_group / 4;
-	min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
-
-	for (i = 0; i < ngroups; i++) {
-		group = (parent_group + i) % ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
-			continue;
-		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
-			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
-			continue;
-		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
-			continue;
-		return group;
-	}
-
-fallback:
-	for (i = 0; i < ngroups; i++) {
-		group = (parent_group + i) % ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
-			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
-			return group;
-	}
-
-	if (avefreei) {
-		/*
-		 * The free-inodes counter is approximate, and for really small
-		 * filesystems the above test can fail to find any blockgroups
-		 */
-		avefreei = 0;
-		goto fallback;
-	}
-
-	return -1;
-}
-
-static int find_group_other(struct super_block *sb, struct inode *parent)
-{
-	int parent_group = EXT3_I(parent)->i_block_group;
-	int ngroups = EXT3_SB(sb)->s_groups_count;
-	struct ext3_group_desc *desc;
-	int group, i;
-
-	/*
-	 * Try to place the inode in its parent directory
-	 */
-	group = parent_group;
-	desc = ext3_get_group_desc (sb, group, NULL);
-	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-			le16_to_cpu(desc->bg_free_blocks_count))
-		return group;
-
-	/*
-	 * We're going to place this inode in a different blockgroup from its
-	 * parent.  We want to cause files in a common directory to all land in
-	 * the same blockgroup.  But we want files which are in a different
-	 * directory which shares a blockgroup with our parent to land in a
-	 * different blockgroup.
-	 *
-	 * So add our directory's i_ino into the starting point for the hash.
-	 */
-	group = (group + parent->i_ino) % ngroups;
-
-	/*
-	 * Use a quadratic hash to find a group with a free inode and some free
-	 * blocks.
-	 */
-	for (i = 1; i < ngroups; i <<= 1) {
-		group += i;
-		if (group >= ngroups)
-			group -= ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-				le16_to_cpu(desc->bg_free_blocks_count))
-			return group;
-	}
-
-	/*
-	 * That failed: try linear search for a free inode, even if that group
-	 * has no free blocks.
-	 */
-	group = parent_group;
-	for (i = 0; i < ngroups; i++) {
-		if (++group >= ngroups)
-			group = 0;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
-			return group;
-	}
-
-	return -1;
-}
-
-/*
- * There are two policies for allocating an inode.  If the new inode is
- * a directory, then a forward search is made for a block group with both
- * free space and a low directory-to-inode ratio; if that fails, then of
- * the groups with above-average free space, that group with the fewest
- * directories already is chosen.
- *
- * For other inodes, search forward from the parent directory's block
- * group to find a free inode.
- */
-struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
-			     const struct qstr *qstr, umode_t mode)
-{
-	struct super_block *sb;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
-	int group;
-	unsigned long ino = 0;
-	struct inode * inode;
-	struct ext3_group_desc * gdp = NULL;
-	struct ext3_super_block * es;
-	struct ext3_inode_info *ei;
-	struct ext3_sb_info *sbi;
-	int err = 0;
-	struct inode *ret;
-	int i;
-
-	/* Cannot create files in a deleted directory */
-	if (!dir || !dir->i_nlink)
-		return ERR_PTR(-EPERM);
-
-	sb = dir->i_sb;
-	trace_ext3_request_inode(dir, mode);
-	inode = new_inode(sb);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	ei = EXT3_I(inode);
-
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	if (S_ISDIR(mode))
-		group = find_group_orlov(sb, dir);
-	else
-		group = find_group_other(sb, dir);
-
-	err = -ENOSPC;
-	if (group == -1)
-		goto out;
-
-	for (i = 0; i < sbi->s_groups_count; i++) {
-		err = -EIO;
-
-		gdp = ext3_get_group_desc(sb, group, &bh2);
-		if (!gdp)
-			goto fail;
-
-		brelse(bitmap_bh);
-		bitmap_bh = read_inode_bitmap(sb, group);
-		if (!bitmap_bh)
-			goto fail;
-
-		ino = 0;
-
-repeat_in_this_group:
-		ino = ext3_find_next_zero_bit((unsigned long *)
-				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
-		if (ino < EXT3_INODES_PER_GROUP(sb)) {
-
-			BUFFER_TRACE(bitmap_bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, bitmap_bh);
-			if (err)
-				goto fail;
-
-			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
-						ino, bitmap_bh->b_data)) {
-				/* we won it */
-				BUFFER_TRACE(bitmap_bh,
-					"call ext3_journal_dirty_metadata");
-				err = ext3_journal_dirty_metadata(handle,
-								bitmap_bh);
-				if (err)
-					goto fail;
-				goto got;
-			}
-			/* we lost it */
-			journal_release_buffer(handle, bitmap_bh);
-
-			if (++ino < EXT3_INODES_PER_GROUP(sb))
-				goto repeat_in_this_group;
-		}
-
-		/*
-		 * This case is possible in concurrent environment.  It is very
-		 * rare.  We cannot repeat the find_group_xxx() call because
-		 * that will simply return the same blockgroup, because the
-		 * group descriptor metadata has not yet been updated.
-		 * So we just go onto the next blockgroup.
-		 */
-		if (++group == sbi->s_groups_count)
-			group = 0;
-	}
-	err = -ENOSPC;
-	goto out;
-
-got:
-	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
-	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-		ext3_error (sb, "ext3_new_inode",
-			    "reserved inode or inode > inodes count - "
-			    "block_group = %d, inode=%lu", group, ino);
-		err = -EIO;
-		goto fail;
-	}
-
-	BUFFER_TRACE(bh2, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh2);
-	if (err) goto fail;
-	spin_lock(sb_bgl_lock(sbi, group));
-	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
-	if (S_ISDIR(mode)) {
-		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
-	}
-	spin_unlock(sb_bgl_lock(sbi, group));
-	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bh2);
-	if (err) goto fail;
-
-	percpu_counter_dec(&sbi->s_freeinodes_counter);
-	if (S_ISDIR(mode))
-		percpu_counter_inc(&sbi->s_dirs_counter);
-
-
-	if (test_opt(sb, GRPID)) {
-		inode->i_mode = mode;
-		inode->i_uid = current_fsuid();
-		inode->i_gid = dir->i_gid;
-	} else
-		inode_init_owner(inode, dir, mode);
-
-	inode->i_ino = ino;
-	/* This is the optimal IO size (for stat), not the fs block size */
-	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-
-	memset(ei->i_data, 0, sizeof(ei->i_data));
-	ei->i_dir_start_lookup = 0;
-	ei->i_disksize = 0;
-
-	ei->i_flags =
-		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
-#ifdef EXT3_FRAGMENTS
-	ei->i_faddr = 0;
-	ei->i_frag_no = 0;
-	ei->i_frag_size = 0;
-#endif
-	ei->i_file_acl = 0;
-	ei->i_dir_acl = 0;
-	ei->i_dtime = 0;
-	ei->i_block_alloc_info = NULL;
-	ei->i_block_group = group;
-
-	ext3_set_inode_flags(inode);
-	if (IS_DIRSYNC(inode))
-		handle->h_sync = 1;
-	if (insert_inode_locked(inode) < 0) {
-		/*
-		 * Likely a bitmap corruption causing inode to be allocated
-		 * twice.
-		 */
-		err = -EIO;
-		goto fail;
-	}
-	spin_lock(&sbi->s_next_gen_lock);
-	inode->i_generation = sbi->s_next_generation++;
-	spin_unlock(&sbi->s_next_gen_lock);
-
-	ei->i_state_flags = 0;
-	ext3_set_inode_state(inode, EXT3_STATE_NEW);
-
-	/* See comment in ext3_iget for explanation */
-	if (ino >= EXT3_FIRST_INO(sb) + 1 &&
-	    EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
-		ei->i_extra_isize =
-			sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
-	} else {
-		ei->i_extra_isize = 0;
-	}
-
-	ret = inode;
-	dquot_initialize(inode);
-	err = dquot_alloc_inode(inode);
-	if (err)
-		goto fail_drop;
-
-	err = ext3_init_acl(handle, inode, dir);
-	if (err)
-		goto fail_free_drop;
-
-	err = ext3_init_security(handle, inode, dir, qstr);
-	if (err)
-		goto fail_free_drop;
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	if (err) {
-		ext3_std_error(sb, err);
-		goto fail_free_drop;
-	}
-
-	ext3_debug("allocating inode %lu\n", inode->i_ino);
-	trace_ext3_allocate_inode(inode, dir, mode);
-	goto really_out;
-fail:
-	ext3_std_error(sb, err);
-out:
-	iput(inode);
-	ret = ERR_PTR(err);
-really_out:
-	brelse(bitmap_bh);
-	return ret;
-
-fail_free_drop:
-	dquot_free_inode(inode);
-
-fail_drop:
-	dquot_drop(inode);
-	inode->i_flags |= S_NOQUOTA;
-	clear_nlink(inode);
-	unlock_new_inode(inode);
-	iput(inode);
-	brelse(bitmap_bh);
-	return ERR_PTR(err);
-}
-
-/* Verify that we are loading a valid orphan from disk */
-struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
-{
-	unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
-	unsigned long block_group;
-	int bit;
-	struct buffer_head *bitmap_bh;
-	struct inode *inode = NULL;
-	long err = -EIO;
-
-	/* Error cases - e2fsck has already cleaned up for us */
-	if (ino > max_ino) {
-		ext3_warning(sb, __func__,
-			     "bad orphan ino %lu!  e2fsck was run?", ino);
-		goto error;
-	}
-
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
-	bitmap_bh = read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh) {
-		ext3_warning(sb, __func__,
-			     "inode bitmap error for orphan %lu", ino);
-		goto error;
-	}
-
-	/* Having the inode bit set should be a 100% indicator that this
-	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
-	 * inodes that were being truncated, so we can't check i_nlink==0.
-	 */
-	if (!ext3_test_bit(bit, bitmap_bh->b_data))
-		goto bad_orphan;
-
-	inode = ext3_iget(sb, ino);
-	if (IS_ERR(inode))
-		goto iget_failed;
-
-	/*
-	 * If the orphans has i_nlinks > 0 then it should be able to be
-	 * truncated, otherwise it won't be removed from the orphan list
-	 * during processing and an infinite loop will result.
-	 */
-	if (inode->i_nlink && !ext3_can_truncate(inode))
-		goto bad_orphan;
-
-	if (NEXT_ORPHAN(inode) > max_ino)
-		goto bad_orphan;
-	brelse(bitmap_bh);
-	return inode;
-
-iget_failed:
-	err = PTR_ERR(inode);
-	inode = NULL;
-bad_orphan:
-	ext3_warning(sb, __func__,
-		     "bad orphan inode %lu!  e2fsck was run?", ino);
-	printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
-	       bit, (unsigned long long)bitmap_bh->b_blocknr,
-	       ext3_test_bit(bit, bitmap_bh->b_data));
-	printk(KERN_NOTICE "inode=%p\n", inode);
-	if (inode) {
-		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
-		       is_bad_inode(inode));
-		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
-		       NEXT_ORPHAN(inode));
-		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
-		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
-		/* Avoid freeing blocks if we got a bad deleted inode */
-		if (inode->i_nlink == 0)
-			inode->i_blocks = 0;
-		iput(inode);
-	}
-	brelse(bitmap_bh);
-error:
-	return ERR_PTR(err);
-}
-
-unsigned long ext3_count_free_inodes (struct super_block * sb)
-{
-	unsigned long desc_count;
-	struct ext3_group_desc *gdp;
-	int i;
-#ifdef EXT3FS_DEBUG
-	struct ext3_super_block *es;
-	unsigned long bitmap_count, x;
-	struct buffer_head *bitmap_bh = NULL;
-
-	es = EXT3_SB(sb)->s_es;
-	desc_count = 0;
-	bitmap_count = 0;
-	gdp = NULL;
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-		brelse(bitmap_bh);
-		bitmap_bh = read_inode_bitmap(sb, i);
-		if (!bitmap_bh)
-			continue;
-
-		x = ext3_count_free(bitmap_bh, EXT3_INODES_PER_GROUP(sb) / 8);
-		printk("group %d: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
-		bitmap_count += x;
-	}
-	brelse(bitmap_bh);
-	printk("ext3_count_free_inodes: stored = %u, computed = %lu, %lu\n",
-		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
-	return desc_count;
-#else
-	desc_count = 0;
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-		cond_resched();
-	}
-	return desc_count;
-#endif
-}
-
-/* Called at mount-time, super-block is locked */
-unsigned long ext3_count_dirs (struct super_block * sb)
-{
-	unsigned long count = 0;
-	int i;
-
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		struct ext3_group_desc *gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		count += le16_to_cpu(gdp->bg_used_dirs_count);
-	}
-	return count;
-}
-
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
deleted file mode 100644
index 6c7e546..0000000
--- a/fs/ext3/inode.c
+++ /dev/null
@@ -1,3574 +0,0 @@
-/*
- *  linux/fs/ext3/inode.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/inode.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Goal-directed block allocation by Stephen Tweedie
- *	(sct@redhat.com), 1993, 1998
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *	(jj@sunsite.ms.mff.cuni.cz)
- *
- *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
- */
-
-#include <linux/highuid.h>
-#include <linux/quotaops.h>
-#include <linux/writeback.h>
-#include <linux/mpage.h>
-#include <linux/namei.h>
-#include <linux/uio.h>
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-static int ext3_writepage_trans_blocks(struct inode *inode);
-static int ext3_block_truncate_page(struct inode *inode, loff_t from);
-
-/*
- * Test whether an inode is a fast symlink.
- */
-static int ext3_inode_is_fast_symlink(struct inode *inode)
-{
-	int ea_blocks = EXT3_I(inode)->i_file_acl ?
-		(inode->i_sb->s_blocksize >> 9) : 0;
-
-	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
-}
-
-/*
- * The ext3 forget function must perform a revoke if we are freeing data
- * which has been journaled.  Metadata (eg. indirect blocks) must be
- * revoked in all cases.
- *
- * "bh" may be NULL: a metadata block may have been freed from memory
- * but there may still be a record of it in the journal, and that record
- * still needs to be revoked.
- */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-			struct buffer_head *bh, ext3_fsblk_t blocknr)
-{
-	int err;
-
-	might_sleep();
-
-	trace_ext3_forget(inode, is_metadata, blocknr);
-	BUFFER_TRACE(bh, "enter");
-
-	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
-		  "data mode %lx\n",
-		  bh, is_metadata, inode->i_mode,
-		  test_opt(inode->i_sb, DATA_FLAGS));
-
-	/* Never use the revoke function if we are doing full data
-	 * journaling: there is no need to, and a V1 superblock won't
-	 * support it.  Otherwise, only skip the revoke on un-journaled
-	 * data blocks. */
-
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
-	    (!is_metadata && !ext3_should_journal_data(inode))) {
-		if (bh) {
-			BUFFER_TRACE(bh, "call journal_forget");
-			return ext3_journal_forget(handle, bh);
-		}
-		return 0;
-	}
-
-	/*
-	 * data!=journal && (is_metadata || should_journal_data(inode))
-	 */
-	BUFFER_TRACE(bh, "call ext3_journal_revoke");
-	err = ext3_journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_abort(inode->i_sb, __func__,
-			   "error %d when attempting revoke", err);
-	BUFFER_TRACE(bh, "exit");
-	return err;
-}
-
-/*
- * Work out how many blocks we need to proceed with the next chunk of a
- * truncate transaction.
- */
-static unsigned long blocks_for_truncate(struct inode *inode)
-{
-	unsigned long needed;
-
-	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
-
-	/* Give ourselves just enough room to cope with inodes in which
-	 * i_blocks is corrupt: we've seen disk corruptions in the past
-	 * which resulted in random data in an inode which looked enough
-	 * like a regular file for ext3 to try to delete it.  Things
-	 * will go a bit crazy if that happens, but at least we should
-	 * try not to panic the whole kernel. */
-	if (needed < 2)
-		needed = 2;
-
-	/* But we need to bound the transaction so we don't overflow the
-	 * journal. */
-	if (needed > EXT3_MAX_TRANS_DATA)
-		needed = EXT3_MAX_TRANS_DATA;
-
-	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
-}
-
-/*
- * Truncate transactions can be complex and absolutely huge.  So we need to
- * be able to restart the transaction at a conventient checkpoint to make
- * sure we don't overflow the journal.
- *
- * start_transaction gets us a new handle for a truncate transaction,
- * and extend_transaction tries to extend the existing one a bit.  If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- */
-static handle_t *start_transaction(struct inode *inode)
-{
-	handle_t *result;
-
-	result = ext3_journal_start(inode, blocks_for_truncate(inode));
-	if (!IS_ERR(result))
-		return result;
-
-	ext3_std_error(inode->i_sb, PTR_ERR(result));
-	return result;
-}
-
-/*
- * Try to extend this transaction for the purposes of truncation.
- *
- * Returns 0 if we managed to create more room.  If we can't create more
- * room, and the transaction must be restarted we return 1.
- */
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
-{
-	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
-		return 0;
-	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
-		return 0;
-	return 1;
-}
-
-/*
- * Restart the transaction associated with *handle.  This does a commit,
- * so before we call here everything must be consistently dirtied against
- * this transaction.
- */
-static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
-{
-	int ret;
-
-	jbd_debug(2, "restarting handle %p\n", handle);
-	/*
-	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
-	 * At this moment, get_block can be called only for blocks inside
-	 * i_size since page cache has been already dropped and writes are
-	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
-	 */
-	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
-	mutex_lock(&EXT3_I(inode)->truncate_mutex);
-	return ret;
-}
-
-/*
- * Called at inode eviction from icache
- */
-void ext3_evict_inode (struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *rsv;
-	handle_t *handle;
-	int want_delete = 0;
-
-	trace_ext3_evict_inode(inode);
-	if (!inode->i_nlink && !is_bad_inode(inode)) {
-		dquot_initialize(inode);
-		want_delete = 1;
-	}
-
-	/*
-	 * When journalling data dirty buffers are tracked only in the journal.
-	 * So although mm thinks everything is clean and ready for reaping the
-	 * inode might still have some pages to write in the running
-	 * transaction or waiting to be checkpointed. Thus calling
-	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
-	 * these buffers can cause data loss. Also even if we did not discard
-	 * these buffers, we would have no way to find them after the inode
-	 * is reaped and thus user could see stale data if he tries to read
-	 * them before the transaction is checkpointed. So be careful and
-	 * force everything to disk here... We use ei->i_datasync_tid to
-	 * store the newest transaction containing inode's data.
-	 *
-	 * Note that directories do not have this problem because they don't
-	 * use page cache.
-	 *
-	 * The s_journal check handles the case when ext3_get_journal() fails
-	 * and puts the journal inode.
-	 */
-	if (inode->i_nlink && ext3_should_journal_data(inode) &&
-	    EXT3_SB(inode->i_sb)->s_journal &&
-	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
-	    inode->i_ino != EXT3_JOURNAL_INO) {
-		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
-		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-
-		log_start_commit(journal, commit_tid);
-		log_wait_commit(journal, commit_tid);
-		filemap_write_and_wait(&inode->i_data);
-	}
-	truncate_inode_pages_final(&inode->i_data);
-
-	ext3_discard_reservation(inode);
-	rsv = ei->i_block_alloc_info;
-	ei->i_block_alloc_info = NULL;
-	if (unlikely(rsv))
-		kfree(rsv);
-
-	if (!want_delete)
-		goto no_delete;
-
-	handle = start_transaction(inode);
-	if (IS_ERR(handle)) {
-		/*
-		 * If we're going to skip the normal cleanup, we still need to
-		 * make sure that the in-core orphan linked list is properly
-		 * cleaned up.
-		 */
-		ext3_orphan_del(NULL, inode);
-		goto no_delete;
-	}
-
-	if (IS_SYNC(inode))
-		handle->h_sync = 1;
-	inode->i_size = 0;
-	if (inode->i_blocks)
-		ext3_truncate(inode);
-	/*
-	 * Kill off the orphan record created when the inode lost the last
-	 * link.  Note that ext3_orphan_del() has to be able to cope with the
-	 * deletion of a non-existent orphan - ext3_truncate() could
-	 * have removed the record.
-	 */
-	ext3_orphan_del(handle, inode);
-	ei->i_dtime = get_seconds();
-
-	/*
-	 * One subtle ordering requirement: if anything has gone wrong
-	 * (transaction abort, IO errors, whatever), then we can still
-	 * do these next steps (the fs will already have been marked as
-	 * having errors), but we can't free the inode if the mark_dirty
-	 * fails.
-	 */
-	if (ext3_mark_inode_dirty(handle, inode)) {
-		/* If that failed, just dquot_drop() and be done with that */
-		dquot_drop(inode);
-		clear_inode(inode);
-	} else {
-		ext3_xattr_delete_inode(handle, inode);
-		dquot_free_inode(inode);
-		dquot_drop(inode);
-		clear_inode(inode);
-		ext3_free_inode(handle, inode);
-	}
-	ext3_journal_stop(handle);
-	return;
-no_delete:
-	clear_inode(inode);
-	dquot_drop(inode);
-}
-
-typedef struct {
-	__le32	*p;
-	__le32	key;
-	struct buffer_head *bh;
-} Indirect;
-
-static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
-{
-	p->key = *(p->p = v);
-	p->bh = bh;
-}
-
-static int verify_chain(Indirect *from, Indirect *to)
-{
-	while (from <= to && from->key == *from->p)
-		from++;
-	return (from > to);
-}
-
-/**
- *	ext3_block_to_path - parse the block number into array of offsets
- *	@inode: inode in question (we are only interested in its superblock)
- *	@i_block: block number to be parsed
- *	@offsets: array to store the offsets in
- *      @boundary: set this non-zero if the referred-to block is likely to be
- *             followed (on disk) by an indirect block.
- *
- *	To store the locations of file's data ext3 uses a data structure common
- *	for UNIX filesystems - tree of pointers anchored in the inode, with
- *	data blocks at leaves and indirect blocks in intermediate nodes.
- *	This function translates the block number into path in that tree -
- *	return value is the path length and @offsets[n] is the offset of
- *	pointer to (n+1)th node in the nth one. If @block is out of range
- *	(negative or too large) warning is printed and zero returned.
- *
- *	Note: function doesn't find node addresses, so no IO is needed. All
- *	we need to know is the capacity of indirect blocks (taken from the
- *	inode->i_sb).
- */
-
-/*
- * Portability note: the last comparison (check that we fit into triple
- * indirect block) is spelled differently, because otherwise on an
- * architecture with 32-bit longs and 8Kb pages we might get into trouble
- * if our filesystem had 8Kb blocks. We might use long long, but that would
- * kill us on x86. Oh, well, at least the sign propagation does not matter -
- * i_block would have to be negative in the very beginning, so we would not
- * get there at all.
- */
-
-static int ext3_block_to_path(struct inode *inode,
-			long i_block, int offsets[4], int *boundary)
-{
-	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
-	const long direct_blocks = EXT3_NDIR_BLOCKS,
-		indirect_blocks = ptrs,
-		double_blocks = (1 << (ptrs_bits * 2));
-	int n = 0;
-	int final = 0;
-
-	if (i_block < 0) {
-		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
-	} else if (i_block < direct_blocks) {
-		offsets[n++] = i_block;
-		final = direct_blocks;
-	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
-		offsets[n++] = EXT3_IND_BLOCK;
-		offsets[n++] = i_block;
-		final = ptrs;
-	} else if ((i_block -= indirect_blocks) < double_blocks) {
-		offsets[n++] = EXT3_DIND_BLOCK;
-		offsets[n++] = i_block >> ptrs_bits;
-		offsets[n++] = i_block & (ptrs - 1);
-		final = ptrs;
-	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
-		offsets[n++] = EXT3_TIND_BLOCK;
-		offsets[n++] = i_block >> (ptrs_bits * 2);
-		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
-		offsets[n++] = i_block & (ptrs - 1);
-		final = ptrs;
-	} else {
-		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
-	}
-	if (boundary)
-		*boundary = final - 1 - (i_block & (ptrs - 1));
-	return n;
-}
-
-/**
- *	ext3_get_branch - read the chain of indirect blocks leading to data
- *	@inode: inode in question
- *	@depth: depth of the chain (1 - direct pointer, etc.)
- *	@offsets: offsets of pointers in inode/indirect blocks
- *	@chain: place to store the result
- *	@err: here we store the error value
- *
- *	Function fills the array of triples <key, p, bh> and returns %NULL
- *	if everything went OK or the pointer to the last filled triple
- *	(incomplete one) otherwise. Upon the return chain[i].key contains
- *	the number of (i+1)-th block in the chain (as it is stored in memory,
- *	i.e. little-endian 32-bit), chain[i].p contains the address of that
- *	number (it points into struct inode for i==0 and into the bh->b_data
- *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
- *	block for i>0 and NULL for i==0. In other words, it holds the block
- *	numbers of the chain, addresses they were taken from (and where we can
- *	verify that chain did not change) and buffer_heads hosting these
- *	numbers.
- *
- *	Function stops when it stumbles upon zero pointer (absent block)
- *		(pointer to last triple returned, *@err == 0)
- *	or when it gets an IO error reading an indirect block
- *		(ditto, *@err == -EIO)
- *	or when it notices that chain had been changed while it was reading
- *		(ditto, *@err == -EAGAIN)
- *	or when it reads all @depth-1 indirect blocks successfully and finds
- *	the whole chain, all way to the data (returns %NULL, *err == 0).
- */
-static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
-				 Indirect chain[4], int *err)
-{
-	struct super_block *sb = inode->i_sb;
-	Indirect *p = chain;
-	struct buffer_head *bh;
-
-	*err = 0;
-	/* i_data is not going away, no lock needed */
-	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
-	if (!p->key)
-		goto no_block;
-	while (--depth) {
-		bh = sb_bread(sb, le32_to_cpu(p->key));
-		if (!bh)
-			goto failure;
-		/* Reader: pointers */
-		if (!verify_chain(chain, p))
-			goto changed;
-		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
-		/* Reader: end */
-		if (!p->key)
-			goto no_block;
-	}
-	return NULL;
-
-changed:
-	brelse(bh);
-	*err = -EAGAIN;
-	goto no_block;
-failure:
-	*err = -EIO;
-no_block:
-	return p;
-}
-
-/**
- *	ext3_find_near - find a place for allocation with sufficient locality
- *	@inode: owner
- *	@ind: descriptor of indirect block.
- *
- *	This function returns the preferred place for block allocation.
- *	It is used when heuristic for sequential allocation fails.
- *	Rules are:
- *	  + if there is a block to the left of our position - allocate near it.
- *	  + if pointer will live in indirect block - allocate near that block.
- *	  + if pointer will live in inode - allocate in the same
- *	    cylinder group.
- *
- * In the latter case we colour the starting block by the callers PID to
- * prevent it from clashing with concurrent allocations for a different inode
- * in the same block group.   The PID is used here so that functionally related
- * files will be close-by on-disk.
- *
- *	Caller must make sure that @ind is valid and will stay that way.
- */
-static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
-	__le32 *p;
-	ext3_fsblk_t bg_start;
-	ext3_grpblk_t colour;
-
-	/* Try to find previous block */
-	for (p = ind->p - 1; p >= start; p--) {
-		if (*p)
-			return le32_to_cpu(*p);
-	}
-
-	/* No such thing, so let's try location of indirect block */
-	if (ind->bh)
-		return ind->bh->b_blocknr;
-
-	/*
-	 * It is going to be referred to from the inode itself? OK, just put it
-	 * into the same cylinder group then.
-	 */
-	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
-	colour = (current->pid % 16) *
-			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-	return bg_start + colour;
-}
-
-/**
- *	ext3_find_goal - find a preferred place for allocation.
- *	@inode: owner
- *	@block:  block we want
- *	@partial: pointer to the last triple within a chain
- *
- *	Normally this function find the preferred place for block allocation,
- *	returns it.
- */
-
-static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
-				   Indirect *partial)
-{
-	struct ext3_block_alloc_info *block_i;
-
-	block_i =  EXT3_I(inode)->i_block_alloc_info;
-
-	/*
-	 * try the heuristic for sequential allocation,
-	 * failing that at least try to get decent locality.
-	 */
-	if (block_i && (block == block_i->last_alloc_logical_block + 1)
-		&& (block_i->last_alloc_physical_block != 0)) {
-		return block_i->last_alloc_physical_block + 1;
-	}
-
-	return ext3_find_near(inode, partial);
-}
-
-/**
- *	ext3_blks_to_allocate - Look up the block map and count the number
- *	of direct blocks need to be allocated for the given branch.
- *
- *	@branch: chain of indirect blocks
- *	@k: number of blocks need for indirect blocks
- *	@blks: number of data blocks to be mapped.
- *	@blocks_to_boundary:  the offset in the indirect block
- *
- *	return the total number of blocks to be allocate, including the
- *	direct and indirect blocks.
- */
-static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
-		int blocks_to_boundary)
-{
-	unsigned long count = 0;
-
-	/*
-	 * Simple case, [t,d]Indirect block(s) has not allocated yet
-	 * then it's clear blocks on that path have not allocated
-	 */
-	if (k > 0) {
-		/* right now we don't handle cross boundary allocation */
-		if (blks < blocks_to_boundary + 1)
-			count += blks;
-		else
-			count += blocks_to_boundary + 1;
-		return count;
-	}
-
-	count++;
-	while (count < blks && count <= blocks_to_boundary &&
-		le32_to_cpu(*(branch[0].p + count)) == 0) {
-		count++;
-	}
-	return count;
-}
-
-/**
- *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
- *	@handle: handle for this transaction
- *	@inode: owner
- *	@goal: preferred place for allocation
- *	@indirect_blks: the number of blocks need to allocate for indirect
- *			blocks
- *	@blks:	number of blocks need to allocated for direct blocks
- *	@new_blocks: on return it will store the new block numbers for
- *	the indirect blocks(if needed) and the first direct block,
- *	@err: here we store the error value
- *
- *	return the number of direct blocks allocated
- */
-static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int indirect_blks, int blks,
-			ext3_fsblk_t new_blocks[4], int *err)
-{
-	int target, i;
-	unsigned long count = 0;
-	int index = 0;
-	ext3_fsblk_t current_block = 0;
-	int ret = 0;
-
-	/*
-	 * Here we try to allocate the requested multiple blocks at once,
-	 * on a best-effort basis.
-	 * To build a branch, we should allocate blocks for
-	 * the indirect blocks(if not allocated yet), and at least
-	 * the first direct block of this branch.  That's the
-	 * minimum number of blocks need to allocate(required)
-	 */
-	target = blks + indirect_blks;
-
-	while (1) {
-		count = target;
-		/* allocating blocks for indirect blocks and direct blocks */
-		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
-		if (*err)
-			goto failed_out;
-
-		target -= count;
-		/* allocate blocks for indirect blocks */
-		while (index < indirect_blks && count) {
-			new_blocks[index++] = current_block++;
-			count--;
-		}
-
-		if (count > 0)
-			break;
-	}
-
-	/* save the new block number for the first direct block */
-	new_blocks[index] = current_block;
-
-	/* total number of blocks allocated for direct blocks */
-	ret = count;
-	*err = 0;
-	return ret;
-failed_out:
-	for (i = 0; i <index; i++)
-		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-	return ret;
-}
-
-/**
- *	ext3_alloc_branch - allocate and set up a chain of blocks.
- *	@handle: handle for this transaction
- *	@inode: owner
- *	@indirect_blks: number of allocated indirect blocks
- *	@blks: number of allocated direct blocks
- *	@goal: preferred place for allocation
- *	@offsets: offsets (in the blocks) to store the pointers to next.
- *	@branch: place to store the chain in.
- *
- *	This function allocates blocks, zeroes out all but the last one,
- *	links them into chain and (if we are synchronous) writes them to disk.
- *	In other words, it prepares a branch that can be spliced onto the
- *	inode. It stores the information about that chain in the branch[], in
- *	the same format as ext3_get_branch() would do. We are calling it after
- *	we had read the existing part of chain and partial points to the last
- *	triple of that (one with zero ->key). Upon the exit we have the same
- *	picture as after the successful ext3_get_block(), except that in one
- *	place chain is disconnected - *branch->p is still zero (we did not
- *	set the last link), but branch->key contains the number that should
- *	be placed into *branch->p to fill that gap.
- *
- *	If allocation fails we free all blocks we've allocated (and forget
- *	their buffer_heads) and return the error value the from failed
- *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
- *	as described above and return 0.
- */
-static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
-			int indirect_blks, int *blks, ext3_fsblk_t goal,
-			int *offsets, Indirect *branch)
-{
-	int blocksize = inode->i_sb->s_blocksize;
-	int i, n = 0;
-	int err = 0;
-	struct buffer_head *bh;
-	int num;
-	ext3_fsblk_t new_blocks[4];
-	ext3_fsblk_t current_block;
-
-	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
-				*blks, new_blocks, &err);
-	if (err)
-		return err;
-
-	branch[0].key = cpu_to_le32(new_blocks[0]);
-	/*
-	 * metadata blocks and data blocks are allocated.
-	 */
-	for (n = 1; n <= indirect_blks;  n++) {
-		/*
-		 * Get buffer_head for parent block, zero it out
-		 * and set the pointer to new one, then send
-		 * parent to disk.
-		 */
-		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
-		if (unlikely(!bh)) {
-			err = -ENOMEM;
-			goto failed;
-		}
-		branch[n].bh = bh;
-		lock_buffer(bh);
-		BUFFER_TRACE(bh, "call get_create_access");
-		err = ext3_journal_get_create_access(handle, bh);
-		if (err) {
-			unlock_buffer(bh);
-			brelse(bh);
-			goto failed;
-		}
-
-		memset(bh->b_data, 0, blocksize);
-		branch[n].p = (__le32 *) bh->b_data + offsets[n];
-		branch[n].key = cpu_to_le32(new_blocks[n]);
-		*branch[n].p = branch[n].key;
-		if ( n == indirect_blks) {
-			current_block = new_blocks[n];
-			/*
-			 * End of chain, update the last new metablock of
-			 * the chain to point to the new allocated
-			 * data blocks numbers
-			 */
-			for (i=1; i < num; i++)
-				*(branch[n].p + i) = cpu_to_le32(++current_block);
-		}
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-
-		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, bh);
-		if (err)
-			goto failed;
-	}
-	*blks = num;
-	return err;
-failed:
-	/* Allocation failed, free what we already allocated */
-	for (i = 1; i <= n ; i++) {
-		BUFFER_TRACE(branch[i].bh, "call journal_forget");
-		ext3_journal_forget(handle, branch[i].bh);
-	}
-	for (i = 0; i < indirect_blks; i++)
-		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-
-	ext3_free_blocks(handle, inode, new_blocks[i], num);
-
-	return err;
-}
-
-/**
- * ext3_splice_branch - splice the allocated branch onto inode.
- * @handle: handle for this transaction
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @where: location of missing link
- * @num:   number of indirect blocks we are adding
- * @blks:  number of direct blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
- */
-static int ext3_splice_branch(handle_t *handle, struct inode *inode,
-			long block, Indirect *where, int num, int blks)
-{
-	int i;
-	int err = 0;
-	struct ext3_block_alloc_info *block_i;
-	ext3_fsblk_t current_block;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct timespec now;
-
-	block_i = ei->i_block_alloc_info;
-	/*
-	 * If we're splicing into a [td]indirect block (as opposed to the
-	 * inode) then we need to get write access to the [td]indirect block
-	 * before the splice.
-	 */
-	if (where->bh) {
-		BUFFER_TRACE(where->bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, where->bh);
-		if (err)
-			goto err_out;
-	}
-	/* That's it */
-
-	*where->p = where->key;
-
-	/*
-	 * Update the host buffer_head or inode to point to more just allocated
-	 * direct blocks blocks
-	 */
-	if (num == 0 && blks > 1) {
-		current_block = le32_to_cpu(where->key) + 1;
-		for (i = 1; i < blks; i++)
-			*(where->p + i ) = cpu_to_le32(current_block++);
-	}
-
-	/*
-	 * update the most recently allocated logical & physical block
-	 * in i_block_alloc_info, to assist find the proper goal block for next
-	 * allocation
-	 */
-	if (block_i) {
-		block_i->last_alloc_logical_block = block + blks - 1;
-		block_i->last_alloc_physical_block =
-				le32_to_cpu(where[num].key) + blks - 1;
-	}
-
-	/* We are done with atomic stuff, now do the rest of housekeeping */
-	now = CURRENT_TIME_SEC;
-	if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
-		inode->i_ctime = now;
-		ext3_mark_inode_dirty(handle, inode);
-	}
-	/* ext3_mark_inode_dirty already updated i_sync_tid */
-	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-
-	/* had we spliced it onto indirect block? */
-	if (where->bh) {
-		/*
-		 * If we spliced it onto an indirect block, we haven't
-		 * altered the inode.  Note however that if it is being spliced
-		 * onto an indirect block at the very end of the file (the
-		 * file is growing) then we *will* alter the inode to reflect
-		 * the new i_size.  But that is not done here - it is done in
-		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
-		 */
-		jbd_debug(5, "splicing indirect only\n");
-		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, where->bh);
-		if (err)
-			goto err_out;
-	} else {
-		/*
-		 * OK, we spliced it into the inode itself on a direct block.
-		 * Inode was dirtied above.
-		 */
-		jbd_debug(5, "splicing direct\n");
-	}
-	return err;
-
-err_out:
-	for (i = 1; i <= num; i++) {
-		BUFFER_TRACE(where[i].bh, "call journal_forget");
-		ext3_journal_forget(handle, where[i].bh);
-		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
-	}
-	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
-
-	return err;
-}
-
-/*
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
- * to tree, set linkage between the newborn blocks, write them if sync is
- * required, recheck the path, free and repeat if check fails, otherwise
- * set the last missing link (that will protect us from any truncate-generated
- * removals - all blocks on the path are immune now) and possibly force the
- * write on the parent block.
- * That has a nice additional property: no special recovery from the failed
- * allocations is needed - we simply release blocks and do not touch anything
- * reachable from inode.
- *
- * `handle' can be NULL if create == 0.
- *
- * The BKL may not be held on entry here.  Be sure to take it early.
- * return > 0, # of blocks mapped or allocated.
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- */
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-		sector_t iblock, unsigned long maxblocks,
-		struct buffer_head *bh_result,
-		int create)
-{
-	int err = -EIO;
-	int offsets[4];
-	Indirect chain[4];
-	Indirect *partial;
-	ext3_fsblk_t goal;
-	int indirect_blks;
-	int blocks_to_boundary = 0;
-	int depth;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	int count = 0;
-	ext3_fsblk_t first_block = 0;
-
-
-	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
-	J_ASSERT(handle != NULL || create == 0);
-	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
-
-	if (depth == 0)
-		goto out;
-
-	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
-
-	/* Simplest case - block found, no allocation needed */
-	if (!partial) {
-		first_block = le32_to_cpu(chain[depth - 1].key);
-		clear_buffer_new(bh_result);
-		count++;
-		/*map more blocks*/
-		while (count < maxblocks && count <= blocks_to_boundary) {
-			ext3_fsblk_t blk;
-
-			if (!verify_chain(chain, chain + depth - 1)) {
-				/*
-				 * Indirect block might be removed by
-				 * truncate while we were reading it.
-				 * Handling of that case: forget what we've
-				 * got now. Flag the err as EAGAIN, so it
-				 * will reread.
-				 */
-				err = -EAGAIN;
-				count = 0;
-				break;
-			}
-			blk = le32_to_cpu(*(chain[depth-1].p + count));
-
-			if (blk == first_block + count)
-				count++;
-			else
-				break;
-		}
-		if (err != -EAGAIN)
-			goto got_it;
-	}
-
-	/* Next simple case - plain lookup or failed read of indirect block */
-	if (!create || err == -EIO)
-		goto cleanup;
-
-	/*
-	 * Block out ext3_truncate while we alter the tree
-	 */
-	mutex_lock(&ei->truncate_mutex);
-
-	/*
-	 * If the indirect block is missing while we are reading
-	 * the chain(ext3_get_branch() returns -EAGAIN err), or
-	 * if the chain has been changed after we grab the semaphore,
-	 * (either because another process truncated this branch, or
-	 * another get_block allocated this branch) re-grab the chain to see if
-	 * the request block has been allocated or not.
-	 *
-	 * Since we already block the truncate/other get_block
-	 * at this point, we will have the current copy of the chain when we
-	 * splice the branch into the tree.
-	 */
-	if (err == -EAGAIN || !verify_chain(chain, partial)) {
-		while (partial > chain) {
-			brelse(partial->bh);
-			partial--;
-		}
-		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
-		if (!partial) {
-			count++;
-			mutex_unlock(&ei->truncate_mutex);
-			if (err)
-				goto cleanup;
-			clear_buffer_new(bh_result);
-			goto got_it;
-		}
-	}
-
-	/*
-	 * Okay, we need to do block allocation.  Lazily initialize the block
-	 * allocation info here if necessary
-	*/
-	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
-		ext3_init_block_alloc_info(inode);
-
-	goal = ext3_find_goal(inode, iblock, partial);
-
-	/* the number of blocks need to allocate for [d,t]indirect blocks */
-	indirect_blks = (chain + depth) - partial - 1;
-
-	/*
-	 * Next look up the indirect map to count the totoal number of
-	 * direct blocks to allocate for this branch.
-	 */
-	count = ext3_blks_to_allocate(partial, indirect_blks,
-					maxblocks, blocks_to_boundary);
-	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
-				offsets + (partial - chain), partial);
-
-	/*
-	 * The ext3_splice_branch call will free and forget any buffers
-	 * on the new chain if there is a failure, but that risks using
-	 * up transaction credits, especially for bitmaps where the
-	 * credits cannot be returned.  Can we handle this somehow?  We
-	 * may need to return -EAGAIN upwards in the worst case.  --sct
-	 */
-	if (!err)
-		err = ext3_splice_branch(handle, inode, iblock,
-					partial, indirect_blks, count);
-	mutex_unlock(&ei->truncate_mutex);
-	if (err)
-		goto cleanup;
-
-	set_buffer_new(bh_result);
-got_it:
-	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-	if (count > blocks_to_boundary)
-		set_buffer_boundary(bh_result);
-	err = count;
-	/* Clean up and exit */
-	partial = chain + depth - 1;	/* the whole chain */
-cleanup:
-	while (partial > chain) {
-		BUFFER_TRACE(partial->bh, "call brelse");
-		brelse(partial->bh);
-		partial--;
-	}
-	BUFFER_TRACE(bh_result, "returned");
-out:
-	trace_ext3_get_blocks_exit(inode, iblock,
-				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
-				   count, err);
-	return err;
-}
-
-/* Maximum number of blocks we map for direct IO at once. */
-#define DIO_MAX_BLOCKS 4096
-/*
- * Number of credits we need for writing DIO_MAX_BLOCKS:
- * We need sb + group descriptor + bitmap + inode -> 4
- * For B blocks with A block pointers per block we need:
- * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
- * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
- */
-#define DIO_CREDITS 25
-
-static int ext3_get_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	int ret = 0, started = 0;
-	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
-
-	if (create && !handle) {	/* Direct IO write... */
-		if (max_blocks > DIO_MAX_BLOCKS)
-			max_blocks = DIO_MAX_BLOCKS;
-		handle = ext3_journal_start(inode, DIO_CREDITS +
-				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto out;
-		}
-		started = 1;
-	}
-
-	ret = ext3_get_blocks_handle(handle, inode, iblock,
-					max_blocks, bh_result, create);
-	if (ret > 0) {
-		bh_result->b_size = (ret << inode->i_blkbits);
-		ret = 0;
-	}
-	if (started)
-		ext3_journal_stop(handle);
-out:
-	return ret;
-}
-
-int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		u64 start, u64 len)
-{
-	return generic_block_fiemap(inode, fieinfo, start, len,
-				    ext3_get_block);
-}
-
-/*
- * `handle' can be NULL if create is zero
- */
-struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
-				long block, int create, int *errp)
-{
-	struct buffer_head dummy;
-	int fatal = 0, err;
-
-	J_ASSERT(handle != NULL || create == 0);
-
-	dummy.b_state = 0;
-	dummy.b_blocknr = -1000;
-	buffer_trace_init(&dummy.b_history);
-	err = ext3_get_blocks_handle(handle, inode, block, 1,
-					&dummy, create);
-	/*
-	 * ext3_get_blocks_handle() returns number of blocks
-	 * mapped. 0 in case of a HOLE.
-	 */
-	if (err > 0) {
-		WARN_ON(err > 1);
-		err = 0;
-	}
-	*errp = err;
-	if (!err && buffer_mapped(&dummy)) {
-		struct buffer_head *bh;
-		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-		if (unlikely(!bh)) {
-			*errp = -ENOMEM;
-			goto err;
-		}
-		if (buffer_new(&dummy)) {
-			J_ASSERT(create != 0);
-			J_ASSERT(handle != NULL);
-
-			/*
-			 * Now that we do not always journal data, we should
-			 * keep in mind whether this should always journal the
-			 * new buffer as metadata.  For now, regular file
-			 * writes use ext3_get_block instead, so it's not a
-			 * problem.
-			 */
-			lock_buffer(bh);
-			BUFFER_TRACE(bh, "call get_create_access");
-			fatal = ext3_journal_get_create_access(handle, bh);
-			if (!fatal && !buffer_uptodate(bh)) {
-				memset(bh->b_data,0,inode->i_sb->s_blocksize);
-				set_buffer_uptodate(bh);
-			}
-			unlock_buffer(bh);
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (!fatal)
-				fatal = err;
-		} else {
-			BUFFER_TRACE(bh, "not a new buffer");
-		}
-		if (fatal) {
-			*errp = fatal;
-			brelse(bh);
-			bh = NULL;
-		}
-		return bh;
-	}
-err:
-	return NULL;
-}
-
-struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
-			       int block, int create, int *err)
-{
-	struct buffer_head * bh;
-
-	bh = ext3_getblk(handle, inode, block, create, err);
-	if (!bh)
-		return bh;
-	if (bh_uptodate_or_lock(bh))
-		return bh;
-	get_bh(bh);
-	bh->b_end_io = end_buffer_read_sync;
-	submit_bh(READ | REQ_META | REQ_PRIO, bh);
-	wait_on_buffer(bh);
-	if (buffer_uptodate(bh))
-		return bh;
-	put_bh(bh);
-	*err = -EIO;
-	return NULL;
-}
-
-static int walk_page_buffers(	handle_t *handle,
-				struct buffer_head *head,
-				unsigned from,
-				unsigned to,
-				int *partial,
-				int (*fn)(	handle_t *handle,
-						struct buffer_head *bh))
-{
-	struct buffer_head *bh;
-	unsigned block_start, block_end;
-	unsigned blocksize = head->b_size;
-	int err, ret = 0;
-	struct buffer_head *next;
-
-	for (	bh = head, block_start = 0;
-		ret == 0 && (bh != head || !block_start);
-		block_start = block_end, bh = next)
-	{
-		next = bh->b_this_page;
-		block_end = block_start + blocksize;
-		if (block_end <= from || block_start >= to) {
-			if (partial && !buffer_uptodate(bh))
-				*partial = 1;
-			continue;
-		}
-		err = (*fn)(handle, bh);
-		if (!ret)
-			ret = err;
-	}
-	return ret;
-}
-
-/*
- * To preserve ordering, it is essential that the hole instantiation and
- * the data write be encapsulated in a single transaction.  We cannot
- * close off a transaction and start a new one between the ext3_get_block()
- * and the commit_write().  So doing the journal_start at the start of
- * prepare_write() is the right place.
- *
- * Also, this function can nest inside ext3_writepage() ->
- * block_write_full_page(). In that case, we *know* that ext3_writepage()
- * has generated enough buffer credits to do the whole page.  So we won't
- * block on the journal in that case, which is good, because the caller may
- * be PF_MEMALLOC.
- *
- * By accident, ext3 can be reentered when a transaction is open via
- * quota file writes.  If we were to commit the transaction while thus
- * reentered, there can be a deadlock - we would be holding a quota
- * lock, and the commit would never complete if another thread had a
- * transaction open and was blocking on the quota lock - a ranking
- * violation.
- *
- * So what we do is to rely on the fact that journal_stop/journal_start
- * will _not_ run commit under these circumstances because handle->h_ref
- * is elevated.  We'll still have enough credits for the tiny quotafile
- * write.
- */
-static int do_journal_get_write_access(handle_t *handle,
-					struct buffer_head *bh)
-{
-	int dirty = buffer_dirty(bh);
-	int ret;
-
-	if (!buffer_mapped(bh) || buffer_freed(bh))
-		return 0;
-	/*
-	 * __block_prepare_write() could have dirtied some buffers. Clean
-	 * the dirty bit as jbd2_journal_get_write_access() could complain
-	 * otherwise about fs integrity issues. Setting of the dirty bit
-	 * by __block_prepare_write() isn't a real problem here as we clear
-	 * the bit before releasing a page lock and thus writeback cannot
-	 * ever write the buffer.
-	 */
-	if (dirty)
-		clear_buffer_dirty(bh);
-	ret = ext3_journal_get_write_access(handle, bh);
-	if (!ret && dirty)
-		ret = ext3_journal_dirty_metadata(handle, bh);
-	return ret;
-}
-
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext3_truncate_failed_write(struct inode *inode)
-{
-	truncate_inode_pages(inode->i_mapping, inode->i_size);
-	ext3_truncate(inode);
-}
-
-/*
- * Truncate blocks that were not used by direct IO write. We have to zero out
- * the last file block as well because direct IO might have written to it.
- */
-static void ext3_truncate_failed_direct_write(struct inode *inode)
-{
-	ext3_block_truncate_page(inode, inode->i_size);
-	ext3_truncate(inode);
-}
-
-static int ext3_write_begin(struct file *file, struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned flags,
-				struct page **pagep, void **fsdata)
-{
-	struct inode *inode = mapping->host;
-	int ret;
-	handle_t *handle;
-	int retries = 0;
-	struct page *page;
-	pgoff_t index;
-	unsigned from, to;
-	/* Reserve one block more for addition to orphan list in case
-	 * we allocate blocks but write fails for some reason */
-	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
-
-	trace_ext3_write_begin(inode, pos, len, flags);
-
-	index = pos >> PAGE_CACHE_SHIFT;
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + len;
-
-retry:
-	page = grab_cache_page_write_begin(mapping, index, flags);
-	if (!page)
-		return -ENOMEM;
-	*pagep = page;
-
-	handle = ext3_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		unlock_page(page);
-		page_cache_release(page);
-		ret = PTR_ERR(handle);
-		goto out;
-	}
-	ret = __block_write_begin(page, pos, len, ext3_get_block);
-	if (ret)
-		goto write_begin_failed;
-
-	if (ext3_should_journal_data(inode)) {
-		ret = walk_page_buffers(handle, page_buffers(page),
-				from, to, NULL, do_journal_get_write_access);
-	}
-write_begin_failed:
-	if (ret) {
-		/*
-		 * block_write_begin may have instantiated a few blocks
-		 * outside i_size.  Trim these off again. Don't need
-		 * i_size_read because we hold i_mutex.
-		 *
-		 * Add inode to orphan list in case we crash before truncate
-		 * finishes. Do this only if ext3_can_truncate() agrees so
-		 * that orphan processing code is happy.
-		 */
-		if (pos + len > inode->i_size && ext3_can_truncate(inode))
-			ext3_orphan_add(handle, inode);
-		ext3_journal_stop(handle);
-		unlock_page(page);
-		page_cache_release(page);
-		if (pos + len > inode->i_size)
-			ext3_truncate_failed_write(inode);
-	}
-	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-out:
-	return ret;
-}
-
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_data(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(__func__, __func__,
-						bh, handle, err);
-	return err;
-}
-
-/* For ordered writepage and write_end functions */
-static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
-{
-	/*
-	 * Write could have mapped the buffer but it didn't copy the data in
-	 * yet. So avoid filing such buffer into a transaction.
-	 */
-	if (buffer_mapped(bh) && buffer_uptodate(bh))
-		return ext3_journal_dirty_data(handle, bh);
-	return 0;
-}
-
-/* For write_end() in data=journal mode */
-static int write_end_fn(handle_t *handle, struct buffer_head *bh)
-{
-	if (!buffer_mapped(bh) || buffer_freed(bh))
-		return 0;
-	set_buffer_uptodate(bh);
-	return ext3_journal_dirty_metadata(handle, bh);
-}
-
-/*
- * This is nasty and subtle: ext3_write_begin() could have allocated blocks
- * for the whole page but later we failed to copy the data in. Update inode
- * size according to what we managed to copy. The rest is going to be
- * truncated in write_end function.
- */
-static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
-{
-	/* What matters to us is i_disksize. We don't write i_size anywhere */
-	if (pos + copied > inode->i_size)
-		i_size_write(inode, pos + copied);
-	if (pos + copied > EXT3_I(inode)->i_disksize) {
-		EXT3_I(inode)->i_disksize = pos + copied;
-		mark_inode_dirty(inode);
-	}
-}
-
-/*
- * We need to pick up the new inode size which generic_commit_write gave us
- * `file' can be NULL - eg, when called from page_symlink().
- *
- * ext3 never places buffers on inode->i_mapping->private_list.  metadata
- * buffers are managed internally.
- */
-static int ext3_ordered_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = file->f_mapping->host;
-	unsigned from, to;
-	int ret = 0, ret2;
-
-	trace_ext3_ordered_write_end(inode, pos, len, copied);
-	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + copied;
-	ret = walk_page_buffers(handle, page_buffers(page),
-		from, to, NULL, journal_dirty_data_fn);
-
-	if (ret == 0)
-		update_file_sizes(inode, pos, copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ret2 = ext3_journal_stop(handle);
-	if (!ret)
-		ret = ret2;
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-static int ext3_writeback_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = file->f_mapping->host;
-	int ret;
-
-	trace_ext3_writeback_write_end(inode, pos, len, copied);
-	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-	update_file_sizes(inode, pos, copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ret = ext3_journal_stop(handle);
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-static int ext3_journalled_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	int ret = 0, ret2;
-	int partial = 0;
-	unsigned from, to;
-
-	trace_ext3_journalled_write_end(inode, pos, len, copied);
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + len;
-
-	if (copied < len) {
-		if (!PageUptodate(page))
-			copied = 0;
-		page_zero_new_buffers(page, from + copied, to);
-		to = from + copied;
-	}
-
-	ret = walk_page_buffers(handle, page_buffers(page), from,
-				to, &partial, write_end_fn);
-	if (!partial)
-		SetPageUptodate(page);
-
-	if (pos + copied > inode->i_size)
-		i_size_write(inode, pos + copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
-	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-	if (inode->i_size > ei->i_disksize) {
-		ei->i_disksize = inode->i_size;
-		ret2 = ext3_mark_inode_dirty(handle, inode);
-		if (!ret)
-			ret = ret2;
-	}
-
-	ret2 = ext3_journal_stop(handle);
-	if (!ret)
-		ret = ret2;
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-/*
- * bmap() is special.  It gets used by applications such as lilo and by
- * the swapper to find the on-disk block of a specific piece of data.
- *
- * Naturally, this is dangerous if the block concerned is still in the
- * journal.  If somebody makes a swapfile on an ext3 data-journaling
- * filesystem and enables swap, then they may get a nasty shock when the
- * data getting swapped to that swapfile suddenly gets overwritten by
- * the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache.
- *
- * So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache.
- */
-static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
-{
-	struct inode *inode = mapping->host;
-	journal_t *journal;
-	int err;
-
-	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
-		/*
-		 * This is a REALLY heavyweight approach, but the use of
-		 * bmap on dirty files is expected to be extremely rare:
-		 * only if we run lilo or swapon on a freshly made file
-		 * do we expect this to happen.
-		 *
-		 * (bmap requires CAP_SYS_RAWIO so this does not
-		 * represent an unprivileged user DOS attack --- we'd be
-		 * in trouble if mortal users could trigger this path at
-		 * will.)
-		 *
-		 * NB. EXT3_STATE_JDATA is not set on files other than
-		 * regular files.  If somebody wants to bmap a directory
-		 * or symlink and gets confused because the buffer
-		 * hasn't yet been flushed to disk, they deserve
-		 * everything they get.
-		 */
-
-		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
-		journal = EXT3_JOURNAL(inode);
-		journal_lock_updates(journal);
-		err = journal_flush(journal);
-		journal_unlock_updates(journal);
-
-		if (err)
-			return 0;
-	}
-
-	return generic_block_bmap(mapping,block,ext3_get_block);
-}
-
-static int bget_one(handle_t *handle, struct buffer_head *bh)
-{
-	get_bh(bh);
-	return 0;
-}
-
-static int bput_one(handle_t *handle, struct buffer_head *bh)
-{
-	put_bh(bh);
-	return 0;
-}
-
-static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
-{
-	return !buffer_mapped(bh);
-}
-
-/*
- * Note that whenever we need to map blocks we start a transaction even if
- * we're not journalling data.  This is to preserve ordering: any hole
- * instantiation within __block_write_full_page -> ext3_get_block() should be
- * journalled along with the data so we don't crash and then get metadata which
- * refers to old data.
- *
- * In all journalling modes block_write_full_page() will start the I/O.
- *
- * We don't honour synchronous mounts for writepage().  That would be
- * disastrous.  Any write() or metadata operation will sync the fs for
- * us.
- */
-static int ext3_ordered_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	struct buffer_head *page_bufs;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	/*
-	 * We give up here if we're reentered, because it might be for a
-	 * different filesystem.
-	 */
-	if (ext3_journal_current_handle())
-		goto out_fail;
-
-	trace_ext3_ordered_writepage(page);
-	if (!page_has_buffers(page)) {
-		create_empty_buffers(page, inode->i_sb->s_blocksize,
-				(1 << BH_Dirty)|(1 << BH_Uptodate));
-		page_bufs = page_buffers(page);
-	} else {
-		page_bufs = page_buffers(page);
-		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
-				       NULL, buffer_unmapped)) {
-			/* Provide NULL get_block() to catch bugs if buffers
-			 * weren't really mapped */
-			return block_write_full_page(page, NULL, wbc);
-		}
-	}
-	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out_fail;
-	}
-
-	walk_page_buffers(handle, page_bufs, 0,
-			PAGE_CACHE_SIZE, NULL, bget_one);
-
-	ret = block_write_full_page(page, ext3_get_block, wbc);
-
-	/*
-	 * The page can become unlocked at any point now, and
-	 * truncate can then come in and change things.  So we
-	 * can't touch *page from now on.  But *page_bufs is
-	 * safe due to elevated refcount.
-	 */
-
-	/*
-	 * And attach them to the current transaction.  But only if
-	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
-	 * and generally junk.
-	 */
-	if (ret == 0)
-		ret = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
-					NULL, journal_dirty_data_fn);
-	walk_page_buffers(handle, page_bufs, 0,
-			PAGE_CACHE_SIZE, NULL, bput_one);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-
-out_fail:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
-	return ret;
-}
-
-static int ext3_writeback_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	if (ext3_journal_current_handle())
-		goto out_fail;
-
-	trace_ext3_writeback_writepage(page);
-	if (page_has_buffers(page)) {
-		if (!walk_page_buffers(NULL, page_buffers(page), 0,
-				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
-			/* Provide NULL get_block() to catch bugs if buffers
-			 * weren't really mapped */
-			return block_write_full_page(page, NULL, wbc);
-		}
-	}
-
-	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out_fail;
-	}
-
-	ret = block_write_full_page(page, ext3_get_block, wbc);
-
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-
-out_fail:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
-	return ret;
-}
-
-static int ext3_journalled_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	trace_ext3_journalled_writepage(page);
-	if (!page_has_buffers(page) || PageChecked(page)) {
-		if (ext3_journal_current_handle())
-			goto no_write;
-
-		handle = ext3_journal_start(inode,
-					    ext3_writepage_trans_blocks(inode));
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto no_write;
-		}
-		/*
-		 * It's mmapped pagecache.  Add buffers and journal it.  There
-		 * doesn't seem much point in redirtying the page here.
-		 */
-		ClearPageChecked(page);
-		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
-					  ext3_get_block);
-		if (ret != 0) {
-			ext3_journal_stop(handle);
-			goto out_unlock;
-		}
-		ret = walk_page_buffers(handle, page_buffers(page), 0,
-			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
-
-		err = walk_page_buffers(handle, page_buffers(page), 0,
-				PAGE_CACHE_SIZE, NULL, write_end_fn);
-		if (ret == 0)
-			ret = err;
-		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
-		atomic_set(&EXT3_I(inode)->i_datasync_tid,
-			   handle->h_transaction->t_tid);
-		unlock_page(page);
-		err = ext3_journal_stop(handle);
-		if (!ret)
-			ret = err;
-	} else {
-		/*
-		 * It is a page full of checkpoint-mode buffers. Go and write
-		 * them. They should have been already mapped when they went
-		 * to the journal so provide NULL get_block function to catch
-		 * errors.
-		 */
-		ret = block_write_full_page(page, NULL, wbc);
-	}
-out:
-	return ret;
-
-no_write:
-	redirty_page_for_writepage(wbc, page);
-out_unlock:
-	unlock_page(page);
-	goto out;
-}
-
-static int ext3_readpage(struct file *file, struct page *page)
-{
-	trace_ext3_readpage(page);
-	return mpage_readpage(page, ext3_get_block);
-}
-
-static int
-ext3_readpages(struct file *file, struct address_space *mapping,
-		struct list_head *pages, unsigned nr_pages)
-{
-	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
-}
-
-static void ext3_invalidatepage(struct page *page, unsigned int offset,
-				unsigned int length)
-{
-	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
-
-	trace_ext3_invalidatepage(page, offset, length);
-
-	/*
-	 * If it's a full truncate we just forget about the pending dirtying
-	 */
-	if (offset == 0 && length == PAGE_CACHE_SIZE)
-		ClearPageChecked(page);
-
-	journal_invalidatepage(journal, page, offset, length);
-}
-
-static int ext3_releasepage(struct page *page, gfp_t wait)
-{
-	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
-
-	trace_ext3_releasepage(page);
-	WARN_ON(PageChecked(page));
-	if (!page_has_buffers(page))
-		return 0;
-	return journal_try_to_free_buffers(journal, page, wait);
-}
-
-/*
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list.  So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file. But current
- * VFS code falls back into buffered path in that case so we are safe.
- */
-static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t offset)
-{
-	struct file *file = iocb->ki_filp;
-	struct inode *inode = file->f_mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	handle_t *handle;
-	ssize_t ret;
-	int orphan = 0;
-	size_t count = iov_iter_count(iter);
-	int retries = 0;
-
-	trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
-
-	if (iov_iter_rw(iter) == WRITE) {
-		loff_t final_size = offset + count;
-
-		if (final_size > inode->i_size) {
-			/* Credits for sb + inode write */
-			handle = ext3_journal_start(inode, 2);
-			if (IS_ERR(handle)) {
-				ret = PTR_ERR(handle);
-				goto out;
-			}
-			ret = ext3_orphan_add(handle, inode);
-			if (ret) {
-				ext3_journal_stop(handle);
-				goto out;
-			}
-			orphan = 1;
-			ei->i_disksize = inode->i_size;
-			ext3_journal_stop(handle);
-		}
-	}
-
-retry:
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
-	/*
-	 * In case of error extending write may have instantiated a few
-	 * blocks outside i_size. Trim these off again.
-	 */
-	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
-		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
-
-		if (end > isize)
-			ext3_truncate_failed_direct_write(inode);
-	}
-	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-
-	if (orphan) {
-		int err;
-
-		/* Credits for sb + inode write */
-		handle = ext3_journal_start(inode, 2);
-		if (IS_ERR(handle)) {
-			/* This is really bad luck. We've written the data
-			 * but cannot extend i_size. Truncate allocated blocks
-			 * and pretend the write failed... */
-			ext3_truncate_failed_direct_write(inode);
-			ret = PTR_ERR(handle);
-			if (inode->i_nlink)
-				ext3_orphan_del(NULL, inode);
-			goto out;
-		}
-		if (inode->i_nlink)
-			ext3_orphan_del(handle, inode);
-		if (ret > 0) {
-			loff_t end = offset + ret;
-			if (end > inode->i_size) {
-				ei->i_disksize = end;
-				i_size_write(inode, end);
-				/*
-				 * We're going to return a positive `ret'
-				 * here due to non-zero-length I/O, so there's
-				 * no way of reporting error returns from
-				 * ext3_mark_inode_dirty() to userspace.  So
-				 * ignore it.
-				 */
-				ext3_mark_inode_dirty(handle, inode);
-			}
-		}
-		err = ext3_journal_stop(handle);
-		if (ret == 0)
-			ret = err;
-	}
-out:
-	trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
-	return ret;
-}
-
-/*
- * Pages can be marked dirty completely asynchronously from ext3's journalling
- * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
- * much here because ->set_page_dirty is called under VFS locks.  The page is
- * not necessarily locked.
- *
- * We cannot just dirty the page and leave attached buffers clean, because the
- * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
- * or jbddirty because all the journalling code will explode.
- *
- * So what we do is to mark the page "pending dirty" and next time writepage
- * is called, propagate that into the buffers appropriately.
- */
-static int ext3_journalled_set_page_dirty(struct page *page)
-{
-	SetPageChecked(page);
-	return __set_page_dirty_nobuffers(page);
-}
-
-static const struct address_space_operations ext3_ordered_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_ordered_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_ordered_write_end,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.direct_IO		= ext3_direct_IO,
-	.migratepage		= buffer_migrate_page,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.is_dirty_writeback	= buffer_check_dirty_writeback,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-static const struct address_space_operations ext3_writeback_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_writeback_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_writeback_write_end,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.direct_IO		= ext3_direct_IO,
-	.migratepage		= buffer_migrate_page,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-static const struct address_space_operations ext3_journalled_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_journalled_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_journalled_write_end,
-	.set_page_dirty		= ext3_journalled_set_page_dirty,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-void ext3_set_aops(struct inode *inode)
-{
-	if (ext3_should_order_data(inode))
-		inode->i_mapping->a_ops = &ext3_ordered_aops;
-	else if (ext3_should_writeback_data(inode))
-		inode->i_mapping->a_ops = &ext3_writeback_aops;
-	else
-		inode->i_mapping->a_ops = &ext3_journalled_aops;
-}
-
-/*
- * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
- * up to the end of the block which corresponds to `from'.
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
-static int ext3_block_truncate_page(struct inode *inode, loff_t from)
-{
-	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
-	unsigned blocksize, iblock, length, pos;
-	struct page *page;
-	handle_t *handle = NULL;
-	struct buffer_head *bh;
-	int err = 0;
-
-	/* Truncated on block boundary - nothing to do */
-	blocksize = inode->i_sb->s_blocksize;
-	if ((from & (blocksize - 1)) == 0)
-		return 0;
-
-	page = grab_cache_page(inode->i_mapping, index);
-	if (!page)
-		return -ENOMEM;
-	length = blocksize - (offset & (blocksize - 1));
-	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-
-	if (!page_has_buffers(page))
-		create_empty_buffers(page, blocksize, 0);
-
-	/* Find the buffer that contains "offset" */
-	bh = page_buffers(page);
-	pos = blocksize;
-	while (offset >= pos) {
-		bh = bh->b_this_page;
-		iblock++;
-		pos += blocksize;
-	}
-
-	err = 0;
-	if (buffer_freed(bh)) {
-		BUFFER_TRACE(bh, "freed: skip");
-		goto unlock;
-	}
-
-	if (!buffer_mapped(bh)) {
-		BUFFER_TRACE(bh, "unmapped");
-		ext3_get_block(inode, iblock, bh, 0);
-		/* unmapped? It's a hole - nothing to do */
-		if (!buffer_mapped(bh)) {
-			BUFFER_TRACE(bh, "still unmapped");
-			goto unlock;
-		}
-	}
-
-	/* Ok, it's mapped. Make sure it's up-to-date */
-	if (PageUptodate(page))
-		set_buffer_uptodate(bh);
-
-	if (!bh_uptodate_or_lock(bh)) {
-		err = bh_submit_read(bh);
-		/* Uhhuh. Read error. Complain and punt. */
-		if (err)
-			goto unlock;
-	}
-
-	/* data=writeback mode doesn't need transaction to zero-out data */
-	if (!ext3_should_writeback_data(inode)) {
-		/* We journal at most one block */
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			clear_highpage(page);
-			flush_dcache_page(page);
-			err = PTR_ERR(handle);
-			goto unlock;
-		}
-	}
-
-	if (ext3_should_journal_data(inode)) {
-		BUFFER_TRACE(bh, "get write access");
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err)
-			goto stop;
-	}
-
-	zero_user(page, offset, length);
-	BUFFER_TRACE(bh, "zeroed end of block");
-
-	err = 0;
-	if (ext3_should_journal_data(inode)) {
-		err = ext3_journal_dirty_metadata(handle, bh);
-	} else {
-		if (ext3_should_order_data(inode))
-			err = ext3_journal_dirty_data(handle, bh);
-		mark_buffer_dirty(bh);
-	}
-stop:
-	if (handle)
-		ext3_journal_stop(handle);
-
-unlock:
-	unlock_page(page);
-	page_cache_release(page);
-	return err;
-}
-
-/*
- * Probably it should be a library function... search for first non-zero word
- * or memcmp with zero_page, whatever is better for particular architecture.
- * Linus?
- */
-static inline int all_zeroes(__le32 *p, __le32 *q)
-{
-	while (p < q)
-		if (*p++)
-			return 0;
-	return 1;
-}
-
-/**
- *	ext3_find_shared - find the indirect blocks for partial truncation.
- *	@inode:	  inode in question
- *	@depth:	  depth of the affected branch
- *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
- *	@chain:	  place to store the pointers to partial indirect blocks
- *	@top:	  place to the (detached) top of branch
- *
- *	This is a helper function used by ext3_truncate().
- *
- *	When we do truncate() we may have to clean the ends of several
- *	indirect blocks but leave the blocks themselves alive. Block is
- *	partially truncated if some data below the new i_size is referred
- *	from it (and it is on the path to the first completely truncated
- *	data block, indeed).  We have to free the top of that path along
- *	with everything to the right of the path. Since no allocation
- *	past the truncation point is possible until ext3_truncate()
- *	finishes, we may safely do the latter, but top of branch may
- *	require special attention - pageout below the truncation point
- *	might try to populate it.
- *
- *	We atomically detach the top of branch from the tree, store the
- *	block number of its root in *@top, pointers to buffer_heads of
- *	partially truncated blocks - in @chain[].bh and pointers to
- *	their last elements that should not be removed - in
- *	@chain[].p. Return value is the pointer to last filled element
- *	of @chain.
- *
- *	The work left to caller to do the actual freeing of subtrees:
- *		a) free the subtree starting from *@top
- *		b) free the subtrees whose roots are stored in
- *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
- *		c) free the subtrees growing from the inode past the @chain[0].
- *			(no partially truncated stuff there).  */
-
-static Indirect *ext3_find_shared(struct inode *inode, int depth,
-			int offsets[4], Indirect chain[4], __le32 *top)
-{
-	Indirect *partial, *p;
-	int k, err;
-
-	*top = 0;
-	/* Make k index the deepest non-null offset + 1 */
-	for (k = depth; k > 1 && !offsets[k-1]; k--)
-		;
-	partial = ext3_get_branch(inode, k, offsets, chain, &err);
-	/* Writer: pointers */
-	if (!partial)
-		partial = chain + k-1;
-	/*
-	 * If the branch acquired continuation since we've looked at it -
-	 * fine, it should all survive and (new) top doesn't belong to us.
-	 */
-	if (!partial->key && *partial->p)
-		/* Writer: end */
-		goto no_top;
-	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
-		;
-	/*
-	 * OK, we've found the last block that must survive. The rest of our
-	 * branch should be detached before unlocking. However, if that rest
-	 * of branch is all ours and does not grow immediately from the inode
-	 * it's easier to cheat and just decrement partial->p.
-	 */
-	if (p == chain + k - 1 && p > chain) {
-		p->p--;
-	} else {
-		*top = *p->p;
-		/* Nope, don't do this in ext3.  Must leave the tree intact */
-#if 0
-		*p->p = 0;
-#endif
-	}
-	/* Writer: end */
-
-	while(partial > p) {
-		brelse(partial->bh);
-		partial--;
-	}
-no_top:
-	return partial;
-}
-
-/*
- * Zero a number of block pointers in either an inode or an indirect block.
- * If we restart the transaction we must again get write access to the
- * indirect block for further modification.
- *
- * We release `count' blocks on disk, but (last - first) may be greater
- * than `count' because there can be holes in there.
- */
-static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
-		struct buffer_head *bh, ext3_fsblk_t block_to_free,
-		unsigned long count, __le32 *first, __le32 *last)
-{
-	__le32 *p;
-	if (try_to_extend_transaction(handle, inode)) {
-		if (bh) {
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			if (ext3_journal_dirty_metadata(handle, bh))
-				return;
-		}
-		ext3_mark_inode_dirty(handle, inode);
-		truncate_restart_transaction(handle, inode);
-		if (bh) {
-			BUFFER_TRACE(bh, "retaking write access");
-			if (ext3_journal_get_write_access(handle, bh))
-				return;
-		}
-	}
-
-	/*
-	 * Any buffers which are on the journal will be in memory. We find
-	 * them on the hash table so journal_revoke() will run journal_forget()
-	 * on them.  We've already detached each block from the file, so
-	 * bforget() in journal_forget() should be safe.
-	 *
-	 * AKPM: turn on bforget in journal_forget()!!!
-	 */
-	for (p = first; p < last; p++) {
-		u32 nr = le32_to_cpu(*p);
-		if (nr) {
-			struct buffer_head *bh;
-
-			*p = 0;
-			bh = sb_find_get_block(inode->i_sb, nr);
-			ext3_forget(handle, 0, inode, bh, nr);
-		}
-	}
-
-	ext3_free_blocks(handle, inode, block_to_free, count);
-}
-
-/**
- * ext3_free_data - free a list of data blocks
- * @handle:	handle for this transaction
- * @inode:	inode we are dealing with
- * @this_bh:	indirect buffer_head which contains *@first and *@last
- * @first:	array of block numbers
- * @last:	points immediately past the end of array
- *
- * We are freeing all blocks referred from that array (numbers are stored as
- * little-endian 32-bit) and updating @inode->i_blocks appropriately.
- *
- * We accumulate contiguous runs of blocks to free.  Conveniently, if these
- * blocks are contiguous then releasing them at one time will only affect one
- * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
- * actually use a lot of journal space.
- *
- * @this_bh will be %NULL if @first and @last point into the inode's direct
- * block pointers.
- */
-static void ext3_free_data(handle_t *handle, struct inode *inode,
-			   struct buffer_head *this_bh,
-			   __le32 *first, __le32 *last)
-{
-	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
-	unsigned long count = 0;	    /* Number of blocks in the run */
-	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
-					       corresponding to
-					       block_to_free */
-	ext3_fsblk_t nr;		    /* Current block # */
-	__le32 *p;			    /* Pointer into inode/ind
-					       for current block */
-	int err;
-
-	if (this_bh) {				/* For indirect block */
-		BUFFER_TRACE(this_bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, this_bh);
-		/* Important: if we can't update the indirect pointers
-		 * to the blocks, we can't free them. */
-		if (err)
-			return;
-	}
-
-	for (p = first; p < last; p++) {
-		nr = le32_to_cpu(*p);
-		if (nr) {
-			/* accumulate blocks to free if they're contiguous */
-			if (count == 0) {
-				block_to_free = nr;
-				block_to_free_p = p;
-				count = 1;
-			} else if (nr == block_to_free + count) {
-				count++;
-			} else {
-				ext3_clear_blocks(handle, inode, this_bh,
-						  block_to_free,
-						  count, block_to_free_p, p);
-				block_to_free = nr;
-				block_to_free_p = p;
-				count = 1;
-			}
-		}
-	}
-
-	if (count > 0)
-		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
-				  count, block_to_free_p, p);
-
-	if (this_bh) {
-		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
-
-		/*
-		 * The buffer head should have an attached journal head at this
-		 * point. However, if the data is corrupted and an indirect
-		 * block pointed to itself, it would have been detached when
-		 * the block was cleared. Check for this instead of OOPSing.
-		 */
-		if (bh2jh(this_bh))
-			ext3_journal_dirty_metadata(handle, this_bh);
-		else
-			ext3_error(inode->i_sb, "ext3_free_data",
-				   "circular indirect block detected, "
-				   "inode=%lu, block=%llu",
-				   inode->i_ino,
-				   (unsigned long long)this_bh->b_blocknr);
-	}
-}
-
-/**
- *	ext3_free_branches - free an array of branches
- *	@handle: JBD handle for this transaction
- *	@inode:	inode we are dealing with
- *	@parent_bh: the buffer_head which contains *@first and *@last
- *	@first:	array of block numbers
- *	@last:	pointer immediately past the end of array
- *	@depth:	depth of the branches to free
- *
- *	We are freeing all blocks referred from these branches (numbers are
- *	stored as little-endian 32-bit) and updating @inode->i_blocks
- *	appropriately.
- */
-static void ext3_free_branches(handle_t *handle, struct inode *inode,
-			       struct buffer_head *parent_bh,
-			       __le32 *first, __le32 *last, int depth)
-{
-	ext3_fsblk_t nr;
-	__le32 *p;
-
-	if (is_handle_aborted(handle))
-		return;
-
-	if (depth--) {
-		struct buffer_head *bh;
-		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-		p = last;
-		while (--p >= first) {
-			nr = le32_to_cpu(*p);
-			if (!nr)
-				continue;		/* A hole */
-
-			/* Go read the buffer for the next level down */
-			bh = sb_bread(inode->i_sb, nr);
-
-			/*
-			 * A read failure? Report error and clear slot
-			 * (should be rare).
-			 */
-			if (!bh) {
-				ext3_error(inode->i_sb, "ext3_free_branches",
-					   "Read failure, inode=%lu, block="E3FSBLK,
-					   inode->i_ino, nr);
-				continue;
-			}
-
-			/* This zaps the entire block.  Bottom up. */
-			BUFFER_TRACE(bh, "free child branches");
-			ext3_free_branches(handle, inode, bh,
-					   (__le32*)bh->b_data,
-					   (__le32*)bh->b_data + addr_per_block,
-					   depth);
-
-			/*
-			 * Everything below this this pointer has been
-			 * released.  Now let this top-of-subtree go.
-			 *
-			 * We want the freeing of this indirect block to be
-			 * atomic in the journal with the updating of the
-			 * bitmap block which owns it.  So make some room in
-			 * the journal.
-			 *
-			 * We zero the parent pointer *after* freeing its
-			 * pointee in the bitmaps, so if extend_transaction()
-			 * for some reason fails to put the bitmap changes and
-			 * the release into the same transaction, recovery
-			 * will merely complain about releasing a free block,
-			 * rather than leaking blocks.
-			 */
-			if (is_handle_aborted(handle))
-				return;
-			if (try_to_extend_transaction(handle, inode)) {
-				ext3_mark_inode_dirty(handle, inode);
-				truncate_restart_transaction(handle, inode);
-			}
-
-			/*
-			 * We've probably journalled the indirect block several
-			 * times during the truncate.  But it's no longer
-			 * needed and we now drop it from the transaction via
-			 * journal_revoke().
-			 *
-			 * That's easy if it's exclusively part of this
-			 * transaction.  But if it's part of the committing
-			 * transaction then journal_forget() will simply
-			 * brelse() it.  That means that if the underlying
-			 * block is reallocated in ext3_get_block(),
-			 * unmap_underlying_metadata() will find this block
-			 * and will try to get rid of it.  damn, damn. Thus
-			 * we don't allow a block to be reallocated until
-			 * a transaction freeing it has fully committed.
-			 *
-			 * We also have to make sure journal replay after a
-			 * crash does not overwrite non-journaled data blocks
-			 * with old metadata when the block got reallocated for
-			 * data.  Thus we have to store a revoke record for a
-			 * block in the same transaction in which we free the
-			 * block.
-			 */
-			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-
-			ext3_free_blocks(handle, inode, nr, 1);
-
-			if (parent_bh) {
-				/*
-				 * The block which we have just freed is
-				 * pointed to by an indirect block: journal it
-				 */
-				BUFFER_TRACE(parent_bh, "get_write_access");
-				if (!ext3_journal_get_write_access(handle,
-								   parent_bh)){
-					*p = 0;
-					BUFFER_TRACE(parent_bh,
-					"call ext3_journal_dirty_metadata");
-					ext3_journal_dirty_metadata(handle,
-								    parent_bh);
-				}
-			}
-		}
-	} else {
-		/* We have reached the bottom of the tree. */
-		BUFFER_TRACE(parent_bh, "free data blocks");
-		ext3_free_data(handle, inode, parent_bh, first, last);
-	}
-}
-
-int ext3_can_truncate(struct inode *inode)
-{
-	if (S_ISREG(inode->i_mode))
-		return 1;
-	if (S_ISDIR(inode->i_mode))
-		return 1;
-	if (S_ISLNK(inode->i_mode))
-		return !ext3_inode_is_fast_symlink(inode);
-	return 0;
-}
-
-/*
- * ext3_truncate()
- *
- * We block out ext3_get_block() block instantiations across the entire
- * transaction, and VFS/VM ensures that ext3_truncate() cannot run
- * simultaneously on behalf of the same inode.
- *
- * As we work through the truncate and commit bits of it to the journal there
- * is one core, guiding principle: the file's tree must always be consistent on
- * disk.  We must be able to restart the truncate after a crash.
- *
- * The file's tree may be transiently inconsistent in memory (although it
- * probably isn't), but whenever we close off and commit a journal transaction,
- * the contents of (the filesystem + the journal) must be consistent and
- * restartable.  It's pretty simple, really: bottom up, right to left (although
- * left-to-right works OK too).
- *
- * Note that at recovery time, journal replay occurs *before* the restart of
- * truncate against the orphan inode list.
- *
- * The committed inode has the new, desired i_size (which is the same as
- * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
- * that this inode's truncate did not complete and it will again call
- * ext3_truncate() to have another go.  So there will be instantiated blocks
- * to the right of the truncation point in a crashed ext3 filesystem.  But
- * that's fine - as long as they are linked from the inode, the post-crash
- * ext3_truncate() run will find them and release them.
- */
-void ext3_truncate(struct inode *inode)
-{
-	handle_t *handle;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	__le32 *i_data = ei->i_data;
-	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-	int offsets[4];
-	Indirect chain[4];
-	Indirect *partial;
-	__le32 nr = 0;
-	int n;
-	long last_block;
-	unsigned blocksize = inode->i_sb->s_blocksize;
-
-	trace_ext3_truncate_enter(inode);
-
-	if (!ext3_can_truncate(inode))
-		goto out_notrans;
-
-	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
-		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
-
-	handle = start_transaction(inode);
-	if (IS_ERR(handle))
-		goto out_notrans;
-
-	last_block = (inode->i_size + blocksize-1)
-					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
-	n = ext3_block_to_path(inode, last_block, offsets, NULL);
-	if (n == 0)
-		goto out_stop;	/* error */
-
-	/*
-	 * OK.  This truncate is going to happen.  We add the inode to the
-	 * orphan list, so that if this truncate spans multiple transactions,
-	 * and we crash, we will resume the truncate when the filesystem
-	 * recovers.  It also marks the inode dirty, to catch the new size.
-	 *
-	 * Implication: the file must always be in a sane, consistent
-	 * truncatable state while each transaction commits.
-	 */
-	if (ext3_orphan_add(handle, inode))
-		goto out_stop;
-
-	/*
-	 * The orphan list entry will now protect us from any crash which
-	 * occurs before the truncate completes, so it is now safe to propagate
-	 * the new, shorter inode size (held for now in i_size) into the
-	 * on-disk inode. We do this via i_disksize, which is the value which
-	 * ext3 *really* writes onto the disk inode.
-	 */
-	ei->i_disksize = inode->i_size;
-
-	/*
-	 * From here we block out all ext3_get_block() callers who want to
-	 * modify the block allocation tree.
-	 */
-	mutex_lock(&ei->truncate_mutex);
-
-	if (n == 1) {		/* direct blocks */
-		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
-			       i_data + EXT3_NDIR_BLOCKS);
-		goto do_indirects;
-	}
-
-	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
-	/* Kill the top of shared branch (not detached) */
-	if (nr) {
-		if (partial == chain) {
-			/* Shared branch grows from the inode */
-			ext3_free_branches(handle, inode, NULL,
-					   &nr, &nr+1, (chain+n-1) - partial);
-			*partial->p = 0;
-			/*
-			 * We mark the inode dirty prior to restart,
-			 * and prior to stop.  No need for it here.
-			 */
-		} else {
-			/* Shared branch grows from an indirect block */
-			ext3_free_branches(handle, inode, partial->bh,
-					partial->p,
-					partial->p+1, (chain+n-1) - partial);
-		}
-	}
-	/* Clear the ends of indirect blocks on the shared branch */
-	while (partial > chain) {
-		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
-				   (__le32*)partial->bh->b_data+addr_per_block,
-				   (chain+n-1) - partial);
-		BUFFER_TRACE(partial->bh, "call brelse");
-		brelse (partial->bh);
-		partial--;
-	}
-do_indirects:
-	/* Kill the remaining (whole) subtrees */
-	switch (offsets[0]) {
-	default:
-		nr = i_data[EXT3_IND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
-			i_data[EXT3_IND_BLOCK] = 0;
-		}
-	case EXT3_IND_BLOCK:
-		nr = i_data[EXT3_DIND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
-			i_data[EXT3_DIND_BLOCK] = 0;
-		}
-	case EXT3_DIND_BLOCK:
-		nr = i_data[EXT3_TIND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
-			i_data[EXT3_TIND_BLOCK] = 0;
-		}
-	case EXT3_TIND_BLOCK:
-		;
-	}
-
-	ext3_discard_reservation(inode);
-
-	mutex_unlock(&ei->truncate_mutex);
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, inode);
-
-	/*
-	 * In a multi-transaction truncate, we only make the final transaction
-	 * synchronous
-	 */
-	if (IS_SYNC(inode))
-		handle->h_sync = 1;
-out_stop:
-	/*
-	 * If this was a simple ftruncate(), and the file will remain alive
-	 * then we need to clear up the orphan record which we created above.
-	 * However, if this was a real unlink then we were called by
-	 * ext3_evict_inode(), and we allow that function to clean up the
-	 * orphan info for us.
-	 */
-	if (inode->i_nlink)
-		ext3_orphan_del(handle, inode);
-
-	ext3_journal_stop(handle);
-	trace_ext3_truncate_exit(inode);
-	return;
-out_notrans:
-	/*
-	 * Delete the inode from orphan list so that it doesn't stay there
-	 * forever and trigger assertion on umount.
-	 */
-	if (inode->i_nlink)
-		ext3_orphan_del(NULL, inode);
-	trace_ext3_truncate_exit(inode);
-}
-
-static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
-		unsigned long ino, struct ext3_iloc *iloc)
-{
-	unsigned long block_group;
-	unsigned long offset;
-	ext3_fsblk_t block;
-	struct ext3_group_desc *gdp;
-
-	if (!ext3_valid_inum(sb, ino)) {
-		/*
-		 * This error is already checked for in namei.c unless we are
-		 * looking at an NFS filehandle, in which case no error
-		 * report is needed
-		 */
-		return 0;
-	}
-
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	gdp = ext3_get_group_desc(sb, block_group, NULL);
-	if (!gdp)
-		return 0;
-	/*
-	 * Figure out the offset within the block group inode table
-	 */
-	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
-		EXT3_INODE_SIZE(sb);
-	block = le32_to_cpu(gdp->bg_inode_table) +
-		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
-
-	iloc->block_group = block_group;
-	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
-	return block;
-}
-
-/*
- * ext3_get_inode_loc returns with an extra refcount against the inode's
- * underlying buffer_head on success. If 'in_mem' is true, we have all
- * data in memory that is needed to recreate the on-disk version of this
- * inode.
- */
-static int __ext3_get_inode_loc(struct inode *inode,
-				struct ext3_iloc *iloc, int in_mem)
-{
-	ext3_fsblk_t block;
-	struct buffer_head *bh;
-
-	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
-	if (!block)
-		return -EIO;
-
-	bh = sb_getblk(inode->i_sb, block);
-	if (unlikely(!bh)) {
-		ext3_error (inode->i_sb, "ext3_get_inode_loc",
-				"unable to read inode block - "
-				"inode=%lu, block="E3FSBLK,
-				 inode->i_ino, block);
-		return -ENOMEM;
-	}
-	if (!buffer_uptodate(bh)) {
-		lock_buffer(bh);
-
-		/*
-		 * If the buffer has the write error flag, we have failed
-		 * to write out another inode in the same block.  In this
-		 * case, we don't have to read the block because we may
-		 * read the old inode data successfully.
-		 */
-		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
-			set_buffer_uptodate(bh);
-
-		if (buffer_uptodate(bh)) {
-			/* someone brought it uptodate while we waited */
-			unlock_buffer(bh);
-			goto has_buffer;
-		}
-
-		/*
-		 * If we have all information of the inode in memory and this
-		 * is the only valid inode in the block, we need not read the
-		 * block.
-		 */
-		if (in_mem) {
-			struct buffer_head *bitmap_bh;
-			struct ext3_group_desc *desc;
-			int inodes_per_buffer;
-			int inode_offset, i;
-			int block_group;
-			int start;
-
-			block_group = (inode->i_ino - 1) /
-					EXT3_INODES_PER_GROUP(inode->i_sb);
-			inodes_per_buffer = bh->b_size /
-				EXT3_INODE_SIZE(inode->i_sb);
-			inode_offset = ((inode->i_ino - 1) %
-					EXT3_INODES_PER_GROUP(inode->i_sb));
-			start = inode_offset & ~(inodes_per_buffer - 1);
-
-			/* Is the inode bitmap in cache? */
-			desc = ext3_get_group_desc(inode->i_sb,
-						block_group, NULL);
-			if (!desc)
-				goto make_io;
-
-			bitmap_bh = sb_getblk(inode->i_sb,
-					le32_to_cpu(desc->bg_inode_bitmap));
-			if (unlikely(!bitmap_bh))
-				goto make_io;
-
-			/*
-			 * If the inode bitmap isn't in cache then the
-			 * optimisation may end up performing two reads instead
-			 * of one, so skip it.
-			 */
-			if (!buffer_uptodate(bitmap_bh)) {
-				brelse(bitmap_bh);
-				goto make_io;
-			}
-			for (i = start; i < start + inodes_per_buffer; i++) {
-				if (i == inode_offset)
-					continue;
-				if (ext3_test_bit(i, bitmap_bh->b_data))
-					break;
-			}
-			brelse(bitmap_bh);
-			if (i == start + inodes_per_buffer) {
-				/* all other inodes are free, so skip I/O */
-				memset(bh->b_data, 0, bh->b_size);
-				set_buffer_uptodate(bh);
-				unlock_buffer(bh);
-				goto has_buffer;
-			}
-		}
-
-make_io:
-		/*
-		 * There are other valid inodes in the buffer, this inode
-		 * has in-inode xattrs, or we don't have this inode in memory.
-		 * Read the block from disk.
-		 */
-		trace_ext3_load_inode(inode);
-		get_bh(bh);
-		bh->b_end_io = end_buffer_read_sync;
-		submit_bh(READ | REQ_META | REQ_PRIO, bh);
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			ext3_error(inode->i_sb, "ext3_get_inode_loc",
-					"unable to read inode block - "
-					"inode=%lu, block="E3FSBLK,
-					inode->i_ino, block);
-			brelse(bh);
-			return -EIO;
-		}
-	}
-has_buffer:
-	iloc->bh = bh;
-	return 0;
-}
-
-int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
-{
-	/* We have all inode data except xattrs in memory here. */
-	return __ext3_get_inode_loc(inode, iloc,
-		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
-}
-
-void ext3_set_inode_flags(struct inode *inode)
-{
-	unsigned int flags = EXT3_I(inode)->i_flags;
-
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
-	if (flags & EXT3_SYNC_FL)
-		inode->i_flags |= S_SYNC;
-	if (flags & EXT3_APPEND_FL)
-		inode->i_flags |= S_APPEND;
-	if (flags & EXT3_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
-	if (flags & EXT3_NOATIME_FL)
-		inode->i_flags |= S_NOATIME;
-	if (flags & EXT3_DIRSYNC_FL)
-		inode->i_flags |= S_DIRSYNC;
-}
-
-/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
-void ext3_get_inode_flags(struct ext3_inode_info *ei)
-{
-	unsigned int flags = ei->vfs_inode.i_flags;
-
-	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
-			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
-	if (flags & S_SYNC)
-		ei->i_flags |= EXT3_SYNC_FL;
-	if (flags & S_APPEND)
-		ei->i_flags |= EXT3_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT3_IMMUTABLE_FL;
-	if (flags & S_NOATIME)
-		ei->i_flags |= EXT3_NOATIME_FL;
-	if (flags & S_DIRSYNC)
-		ei->i_flags |= EXT3_DIRSYNC_FL;
-}
-
-struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
-{
-	struct ext3_iloc iloc;
-	struct ext3_inode *raw_inode;
-	struct ext3_inode_info *ei;
-	struct buffer_head *bh;
-	struct inode *inode;
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-	transaction_t *transaction;
-	long ret;
-	int block;
-	uid_t i_uid;
-	gid_t i_gid;
-
-	inode = iget_locked(sb, ino);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	if (!(inode->i_state & I_NEW))
-		return inode;
-
-	ei = EXT3_I(inode);
-	ei->i_block_alloc_info = NULL;
-
-	ret = __ext3_get_inode_loc(inode, &iloc, 0);
-	if (ret < 0)
-		goto bad_inode;
-	bh = iloc.bh;
-	raw_inode = ext3_raw_inode(&iloc);
-	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
-	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
-	if(!(test_opt (inode->i_sb, NO_UID32))) {
-		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
-		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
-	}
-	i_uid_write(inode, i_uid);
-	i_gid_write(inode, i_gid);
-	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
-	inode->i_size = le32_to_cpu(raw_inode->i_size);
-	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
-	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
-	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
-	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-
-	ei->i_state_flags = 0;
-	ei->i_dir_start_lookup = 0;
-	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
-	/* We now have enough fields to check if the inode was active or not.
-	 * This is needed because nfsd might try to access dead inodes
-	 * the test is that same one that e2fsck uses
-	 * NeilBrown 1999oct15
-	 */
-	if (inode->i_nlink == 0) {
-		if (inode->i_mode == 0 ||
-		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
-			/* this inode is deleted */
-			brelse (bh);
-			ret = -ESTALE;
-			goto bad_inode;
-		}
-		/* The only unlinked inodes we let through here have
-		 * valid i_mode and are being read by the orphan
-		 * recovery code: that's fine, we're about to complete
-		 * the process of deleting those. */
-	}
-	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
-	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
-#ifdef EXT3_FRAGMENTS
-	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
-	ei->i_frag_no = raw_inode->i_frag;
-	ei->i_frag_size = raw_inode->i_fsize;
-#endif
-	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
-	if (!S_ISREG(inode->i_mode)) {
-		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
-	} else {
-		inode->i_size |=
-			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
-	}
-	ei->i_disksize = inode->i_size;
-	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
-	ei->i_block_group = iloc.block_group;
-	/*
-	 * NOTE! The in-memory inode i_data array is in little-endian order
-	 * even on big-endian machines: we do NOT byteswap the block numbers!
-	 */
-	for (block = 0; block < EXT3_N_BLOCKS; block++)
-		ei->i_data[block] = raw_inode->i_block[block];
-	INIT_LIST_HEAD(&ei->i_orphan);
-
-	/*
-	 * Set transaction id's of transactions that have to be committed
-	 * to finish f[data]sync. We set them to currently running transaction
-	 * as we cannot be sure that the inode or some of its metadata isn't
-	 * part of the transaction - the inode could have been reclaimed and
-	 * now it is reread from disk.
-	 */
-	if (journal) {
-		tid_t tid;
-
-		spin_lock(&journal->j_state_lock);
-		if (journal->j_running_transaction)
-			transaction = journal->j_running_transaction;
-		else
-			transaction = journal->j_committing_transaction;
-		if (transaction)
-			tid = transaction->t_tid;
-		else
-			tid = journal->j_commit_sequence;
-		spin_unlock(&journal->j_state_lock);
-		atomic_set(&ei->i_sync_tid, tid);
-		atomic_set(&ei->i_datasync_tid, tid);
-	}
-
-	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
-	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
-		/*
-		 * When mke2fs creates big inodes it does not zero out
-		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
-		 * so ignore those first few inodes.
-		 */
-		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-		    EXT3_INODE_SIZE(inode->i_sb)) {
-			brelse (bh);
-			ret = -EIO;
-			goto bad_inode;
-		}
-		if (ei->i_extra_isize == 0) {
-			/* The extra space is currently unused. Use it. */
-			ei->i_extra_isize = sizeof(struct ext3_inode) -
-					    EXT3_GOOD_OLD_INODE_SIZE;
-		} else {
-			__le32 *magic = (void *)raw_inode +
-					EXT3_GOOD_OLD_INODE_SIZE +
-					ei->i_extra_isize;
-			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
-				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
-		}
-	} else
-		ei->i_extra_isize = 0;
-
-	if (S_ISREG(inode->i_mode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-	} else if (S_ISDIR(inode->i_mode)) {
-		inode->i_op = &ext3_dir_inode_operations;
-		inode->i_fop = &ext3_dir_operations;
-	} else if (S_ISLNK(inode->i_mode)) {
-		if (ext3_inode_is_fast_symlink(inode)) {
-			inode->i_op = &ext3_fast_symlink_inode_operations;
-			nd_terminate_link(ei->i_data, inode->i_size,
-				sizeof(ei->i_data) - 1);
-			inode->i_link = (char *)ei->i_data;
-		} else {
-			inode->i_op = &ext3_symlink_inode_operations;
-			ext3_set_aops(inode);
-		}
-	} else {
-		inode->i_op = &ext3_special_inode_operations;
-		if (raw_inode->i_block[0])
-			init_special_inode(inode, inode->i_mode,
-			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
-		else
-			init_special_inode(inode, inode->i_mode,
-			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
-	}
-	brelse (iloc.bh);
-	ext3_set_inode_flags(inode);
-	unlock_new_inode(inode);
-	return inode;
-
-bad_inode:
-	iget_failed(inode);
-	return ERR_PTR(ret);
-}
-
-/*
- * Post the struct inode info into an on-disk inode location in the
- * buffer-cache.  This gobbles the caller's reference to the
- * buffer_head in the inode location struct.
- *
- * The caller must have write access to iloc->bh.
- */
-static int ext3_do_update_inode(handle_t *handle,
-				struct inode *inode,
-				struct ext3_iloc *iloc)
-{
-	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct buffer_head *bh = iloc->bh;
-	int err = 0, rc, block;
-	int need_datasync = 0;
-	__le32 disksize;
-	uid_t i_uid;
-	gid_t i_gid;
-
-again:
-	/* we can't allow multiple procs in here at once, its a bit racey */
-	lock_buffer(bh);
-
-	/* For fields not not tracking in the in-memory inode,
-	 * initialise them to zero for new inodes. */
-	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
-		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
-
-	ext3_get_inode_flags(ei);
-	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-	i_uid = i_uid_read(inode);
-	i_gid = i_gid_read(inode);
-	if(!(test_opt(inode->i_sb, NO_UID32))) {
-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
-/*
- * Fix up interoperability with old kernels. Otherwise, old inodes get
- * re-used with the upper 16 bits of the uid/gid intact
- */
-		if(!ei->i_dtime) {
-			raw_inode->i_uid_high =
-				cpu_to_le16(high_16_bits(i_uid));
-			raw_inode->i_gid_high =
-				cpu_to_le16(high_16_bits(i_gid));
-		} else {
-			raw_inode->i_uid_high = 0;
-			raw_inode->i_gid_high = 0;
-		}
-	} else {
-		raw_inode->i_uid_low =
-			cpu_to_le16(fs_high2lowuid(i_uid));
-		raw_inode->i_gid_low =
-			cpu_to_le16(fs_high2lowgid(i_gid));
-		raw_inode->i_uid_high = 0;
-		raw_inode->i_gid_high = 0;
-	}
-	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
-	disksize = cpu_to_le32(ei->i_disksize);
-	if (disksize != raw_inode->i_size) {
-		need_datasync = 1;
-		raw_inode->i_size = disksize;
-	}
-	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
-	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-#ifdef EXT3_FRAGMENTS
-	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
-	raw_inode->i_frag = ei->i_frag_no;
-	raw_inode->i_fsize = ei->i_frag_size;
-#endif
-	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
-	if (!S_ISREG(inode->i_mode)) {
-		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
-	} else {
-		disksize = cpu_to_le32(ei->i_disksize >> 32);
-		if (disksize != raw_inode->i_size_high) {
-			raw_inode->i_size_high = disksize;
-			need_datasync = 1;
-		}
-		if (ei->i_disksize > 0x7fffffffULL) {
-			struct super_block *sb = inode->i_sb;
-			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
-			    EXT3_SB(sb)->s_es->s_rev_level ==
-					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
-			       /* If this is the first large file
-				* created, add a flag to the superblock.
-				*/
-				unlock_buffer(bh);
-				err = ext3_journal_get_write_access(handle,
-						EXT3_SB(sb)->s_sbh);
-				if (err)
-					goto out_brelse;
-
-				ext3_update_dynamic_rev(sb);
-				EXT3_SET_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
-				handle->h_sync = 1;
-				err = ext3_journal_dirty_metadata(handle,
-						EXT3_SB(sb)->s_sbh);
-				/* get our lock and start over */
-				goto again;
-			}
-		}
-	}
-	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
-	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-		if (old_valid_dev(inode->i_rdev)) {
-			raw_inode->i_block[0] =
-				cpu_to_le32(old_encode_dev(inode->i_rdev));
-			raw_inode->i_block[1] = 0;
-		} else {
-			raw_inode->i_block[0] = 0;
-			raw_inode->i_block[1] =
-				cpu_to_le32(new_encode_dev(inode->i_rdev));
-			raw_inode->i_block[2] = 0;
-		}
-	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
-		raw_inode->i_block[block] = ei->i_data[block];
-
-	if (ei->i_extra_isize)
-		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
-
-	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-	unlock_buffer(bh);
-	rc = ext3_journal_dirty_metadata(handle, bh);
-	if (!err)
-		err = rc;
-	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
-
-	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
-	if (need_datasync)
-		atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-out_brelse:
-	brelse (bh);
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * ext3_write_inode()
- *
- * We are called from a few places:
- *
- * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
- *   Here, there will be no transaction running. We wait for any running
- *   transaction to commit.
- *
- * - Within flush work (for sys_sync(), kupdate and such).
- *   We wait on commit, if told to.
- *
- * - Within iput_final() -> write_inode_now()
- *   We wait on commit, if told to.
- *
- * In all cases it is actually safe for us to return without doing anything,
- * because the inode has been copied into a raw inode buffer in
- * ext3_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
- * writeback.
- *
- * Note that we are absolutely dependent upon all inode dirtiers doing the
- * right thing: they *must* call mark_inode_dirty() after dirtying info in
- * which we are interested.
- *
- * It would be a bug for them to not do this.  The code:
- *
- *	mark_inode_dirty(inode)
- *	stuff();
- *	inode->i_size = expr;
- *
- * is in error because write_inode() could occur while `stuff()' is running,
- * and the new i_size will be lost.  Plus the inode will no longer be on the
- * superblock's dirty inode list.
- */
-int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
-	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
-		return 0;
-
-	if (ext3_journal_current_handle()) {
-		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
-		dump_stack();
-		return -EIO;
-	}
-
-	/*
-	 * No need to force transaction in WB_SYNC_NONE mode. Also
-	 * ext3_sync_fs() will force the commit after everything is
-	 * written.
-	 */
-	if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
-		return 0;
-
-	return ext3_force_commit(inode->i_sb);
-}
-
-/*
- * ext3_setattr()
- *
- * Called from notify_change.
- *
- * We want to trap VFS attempts to truncate the file as soon as
- * possible.  In particular, we want to make sure that when the VFS
- * shrinks i_size, we put the inode on the orphan list and modify
- * i_disksize immediately, so that during the subsequent flushing of
- * dirty pages and freeing of disk blocks, we can guarantee that any
- * commit will leave the blocks being flushed in an unused state on
- * disk.  (On recovery, the inode will get truncated and the blocks will
- * be freed, so we have a strong guarantee that no future commit will
- * leave these blocks visible to the user.)
- *
- * Called with inode->sem down.
- */
-int ext3_setattr(struct dentry *dentry, struct iattr *attr)
-{
-	struct inode *inode = d_inode(dentry);
-	int error, rc = 0;
-	const unsigned int ia_valid = attr->ia_valid;
-
-	error = inode_change_ok(inode, attr);
-	if (error)
-		return error;
-
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
-	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
-	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
-		handle_t *handle;
-
-		/* (user+group)*(old+new) structure, inode write (sb,
-		 * inode block, ? - but truncate inode update has it) */
-		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
-					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
-		if (IS_ERR(handle)) {
-			error = PTR_ERR(handle);
-			goto err_out;
-		}
-		error = dquot_transfer(inode, attr);
-		if (error) {
-			ext3_journal_stop(handle);
-			return error;
-		}
-		/* Update corresponding info in inode so that everything is in
-		 * one transaction */
-		if (attr->ia_valid & ATTR_UID)
-			inode->i_uid = attr->ia_uid;
-		if (attr->ia_valid & ATTR_GID)
-			inode->i_gid = attr->ia_gid;
-		error = ext3_mark_inode_dirty(handle, inode);
-		ext3_journal_stop(handle);
-	}
-
-	if (attr->ia_valid & ATTR_SIZE)
-		inode_dio_wait(inode);
-
-	if (S_ISREG(inode->i_mode) &&
-	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
-		handle_t *handle;
-
-		handle = ext3_journal_start(inode, 3);
-		if (IS_ERR(handle)) {
-			error = PTR_ERR(handle);
-			goto err_out;
-		}
-
-		error = ext3_orphan_add(handle, inode);
-		if (error) {
-			ext3_journal_stop(handle);
-			goto err_out;
-		}
-		EXT3_I(inode)->i_disksize = attr->ia_size;
-		error = ext3_mark_inode_dirty(handle, inode);
-		ext3_journal_stop(handle);
-		if (error) {
-			/* Some hard fs error must have happened. Bail out. */
-			ext3_orphan_del(NULL, inode);
-			goto err_out;
-		}
-		rc = ext3_block_truncate_page(inode, attr->ia_size);
-		if (rc) {
-			/* Cleanup orphan list and exit */
-			handle = ext3_journal_start(inode, 3);
-			if (IS_ERR(handle)) {
-				ext3_orphan_del(NULL, inode);
-				goto err_out;
-			}
-			ext3_orphan_del(handle, inode);
-			ext3_journal_stop(handle);
-			goto err_out;
-		}
-	}
-
-	if ((attr->ia_valid & ATTR_SIZE) &&
-	    attr->ia_size != i_size_read(inode)) {
-		truncate_setsize(inode, attr->ia_size);
-		ext3_truncate(inode);
-	}
-
-	setattr_copy(inode, attr);
-	mark_inode_dirty(inode);
-
-	if (ia_valid & ATTR_MODE)
-		rc = posix_acl_chmod(inode, inode->i_mode);
-
-err_out:
-	ext3_std_error(inode->i_sb, error);
-	if (!error)
-		error = rc;
-	return error;
-}
-
-
-/*
- * How many blocks doth make a writepage()?
- *
- * With N blocks per page, it may be:
- * N data blocks
- * 2 indirect block
- * 2 dindirect
- * 1 tindirect
- * N+5 bitmap blocks (from the above)
- * N+5 group descriptor summary blocks
- * 1 inode block
- * 1 superblock.
- * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
- *
- * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
- *
- * With ordered or writeback data it's the same, less the N data blocks.
- *
- * If the inode's direct blocks can hold an integral number of pages then a
- * page cannot straddle two indirect blocks, and we can only touch one indirect
- * and dindirect block, and the "5" above becomes "3".
- *
- * This still overestimates under most circumstances.  If we were to pass the
- * start and end offsets in here as well we could do block_to_path() on each
- * block and work out the exact number of indirects which are touched.  Pah.
- */
-
-static int ext3_writepage_trans_blocks(struct inode *inode)
-{
-	int bpp = ext3_journal_blocks_per_page(inode);
-	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
-	int ret;
-
-	if (ext3_should_journal_data(inode))
-		ret = 3 * (bpp + indirects) + 2;
-	else
-		ret = 2 * (bpp + indirects) + indirects + 2;
-
-#ifdef CONFIG_QUOTA
-	/* We know that structure was already allocated during dquot_initialize so
-	 * we will be updating only the data blocks + inodes */
-	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-#endif
-
-	return ret;
-}
-
-/*
- * The caller must have previously called ext3_reserve_inode_write().
- * Give this, we know that the caller already has write access to iloc->bh.
- */
-int ext3_mark_iloc_dirty(handle_t *handle,
-		struct inode *inode, struct ext3_iloc *iloc)
-{
-	int err = 0;
-
-	/* the do_update_inode consumes one bh->b_count */
-	get_bh(iloc->bh);
-
-	/* ext3_do_update_inode() does journal_dirty_metadata */
-	err = ext3_do_update_inode(handle, inode, iloc);
-	put_bh(iloc->bh);
-	return err;
-}
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int
-ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			 struct ext3_iloc *iloc)
-{
-	int err = 0;
-	if (handle) {
-		err = ext3_get_inode_loc(inode, iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc->bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, iloc->bh);
-			if (err) {
-				brelse(iloc->bh);
-				iloc->bh = NULL;
-			}
-		}
-	}
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * What we do here is to mark the in-core inode as clean with respect to inode
- * dirtiness (it may still be data-dirty).
- * This means that the in-core inode may be reaped by prune_icache
- * without having to perform any I/O.  This is a very good thing,
- * because *any* task may call prune_icache - even ones which
- * have a transaction open against a different journal.
- *
- * Is this cheating?  Not really.  Sure, we haven't written the
- * inode out, but prune_icache isn't a user-visible syncing function.
- * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
- * we start and wait on commits.
- */
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
-{
-	struct ext3_iloc iloc;
-	int err;
-
-	might_sleep();
-	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (!err)
-		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	return err;
-}
-
-/*
- * ext3_dirty_inode() is called from __mark_inode_dirty()
- *
- * We're really interested in the case where a file is being extended.
- * i_size has been changed by generic_commit_write() and we thus need
- * to include the updated inode in the current transaction.
- *
- * Also, dquot_alloc_space() will always dirty the inode when blocks
- * are allocated to the file.
- *
- * If the inode is marked synchronous, we don't honour that here - doing
- * so would cause a commit on atime updates, which we don't bother doing.
- * We handle synchronous inodes at the highest possible level.
- */
-void ext3_dirty_inode(struct inode *inode, int flags)
-{
-	handle_t *current_handle = ext3_journal_current_handle();
-	handle_t *handle;
-
-	handle = ext3_journal_start(inode, 2);
-	if (IS_ERR(handle))
-		goto out;
-	if (current_handle &&
-		current_handle->h_transaction != handle->h_transaction) {
-		/* This task has a transaction open against a different fs */
-		printk(KERN_EMERG "%s: transactions do not match!\n",
-		       __func__);
-	} else {
-		jbd_debug(5, "marking dirty.  outer handle=%p\n",
-				current_handle);
-		ext3_mark_inode_dirty(handle, inode);
-	}
-	ext3_journal_stop(handle);
-out:
-	return;
-}
-
-#if 0
-/*
- * Bind an inode's backing buffer_head into this transaction, to prevent
- * it from being flushed to disk early.  Unlike
- * ext3_reserve_inode_write, this leaves behind no bh reference and
- * returns no iloc structure, so the caller needs to repeat the iloc
- * lookup to mark the inode dirty later.
- */
-static int ext3_pin_inode(handle_t *handle, struct inode *inode)
-{
-	struct ext3_iloc iloc;
-
-	int err = 0;
-	if (handle) {
-		err = ext3_get_inode_loc(inode, &iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc.bh, "get_write_access");
-			err = journal_get_write_access(handle, iloc.bh);
-			if (!err)
-				err = ext3_journal_dirty_metadata(handle,
-								  iloc.bh);
-			brelse(iloc.bh);
-		}
-	}
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-#endif
-
-int ext3_change_inode_journal_flag(struct inode *inode, int val)
-{
-	journal_t *journal;
-	handle_t *handle;
-	int err;
-
-	/*
-	 * We have to be very careful here: changing a data block's
-	 * journaling status dynamically is dangerous.  If we write a
-	 * data block to the journal, change the status and then delete
-	 * that block, we risk forgetting to revoke the old log record
-	 * from the journal and so a subsequent replay can corrupt data.
-	 * So, first we make sure that the journal is empty and that
-	 * nobody is changing anything.
-	 */
-
-	journal = EXT3_JOURNAL(inode);
-	if (is_journal_aborted(journal))
-		return -EROFS;
-
-	journal_lock_updates(journal);
-	journal_flush(journal);
-
-	/*
-	 * OK, there are no updates running now, and all cached data is
-	 * synced to disk.  We are now in a completely consistent state
-	 * which doesn't have anything in the journal, and we know that
-	 * no filesystem updates are running, so it is safe to modify
-	 * the inode's in-core data-journaling state flag now.
-	 */
-
-	if (val)
-		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
-	else
-		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
-	ext3_set_aops(inode);
-
-	journal_unlock_updates(journal);
-
-	/* Finally we can mark the inode as dirty. */
-
-	handle = ext3_journal_start(inode, 1);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	handle->h_sync = 1;
-	ext3_journal_stop(handle);
-	ext3_std_error(inode->i_sb, err);
-
-	return err;
-}
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
deleted file mode 100644
index 4d96e9a..0000000
--- a/fs/ext3/ioctl.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * linux/fs/ext3/ioctl.c
- *
- * Copyright (C) 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- */
-
-#include <linux/mount.h>
-#include <linux/compat.h>
-#include <asm/uaccess.h>
-#include "ext3.h"
-
-long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	struct inode *inode = file_inode(filp);
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	unsigned int flags;
-	unsigned short rsv_window_size;
-
-	ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
-
-	switch (cmd) {
-	case EXT3_IOC_GETFLAGS:
-		ext3_get_inode_flags(ei);
-		flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
-		return put_user(flags, (int __user *) arg);
-	case EXT3_IOC_SETFLAGS: {
-		handle_t *handle = NULL;
-		int err;
-		struct ext3_iloc iloc;
-		unsigned int oldflags;
-		unsigned int jflag;
-
-		if (!inode_owner_or_capable(inode))
-			return -EACCES;
-
-		if (get_user(flags, (int __user *) arg))
-			return -EFAULT;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		flags = ext3_mask_flags(inode->i_mode, flags);
-
-		mutex_lock(&inode->i_mutex);
-
-		/* Is it quota file? Do not allow user to mess with it */
-		err = -EPERM;
-		if (IS_NOQUOTA(inode))
-			goto flags_out;
-
-		oldflags = ei->i_flags;
-
-		/* The JOURNAL_DATA flag is modifiable only by root */
-		jflag = flags & EXT3_JOURNAL_DATA_FL;
-
-		/*
-		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
-		 * the relevant capability.
-		 *
-		 * This test looks nicer. Thanks to Pauline Middelink
-		 */
-		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
-			if (!capable(CAP_LINUX_IMMUTABLE))
-				goto flags_out;
-		}
-
-		/*
-		 * The JOURNAL_DATA flag can only be changed by
-		 * the relevant capability.
-		 */
-		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) {
-			if (!capable(CAP_SYS_RESOURCE))
-				goto flags_out;
-		}
-
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto flags_out;
-		}
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-		err = ext3_reserve_inode_write(handle, inode, &iloc);
-		if (err)
-			goto flags_err;
-
-		flags = flags & EXT3_FL_USER_MODIFIABLE;
-		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
-		ei->i_flags = flags;
-
-		ext3_set_inode_flags(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
-
-		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-flags_err:
-		ext3_journal_stop(handle);
-		if (err)
-			goto flags_out;
-
-		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL))
-			err = ext3_change_inode_journal_flag(inode, jflag);
-flags_out:
-		mutex_unlock(&inode->i_mutex);
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GETVERSION:
-	case EXT3_IOC_GETVERSION_OLD:
-		return put_user(inode->i_generation, (int __user *) arg);
-	case EXT3_IOC_SETVERSION:
-	case EXT3_IOC_SETVERSION_OLD: {
-		handle_t *handle;
-		struct ext3_iloc iloc;
-		__u32 generation;
-		int err;
-
-		if (!inode_owner_or_capable(inode))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-		if (get_user(generation, (int __user *) arg)) {
-			err = -EFAULT;
-			goto setversion_out;
-		}
-
-		mutex_lock(&inode->i_mutex);
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto unlock_out;
-		}
-		err = ext3_reserve_inode_write(handle, inode, &iloc);
-		if (err == 0) {
-			inode->i_ctime = CURRENT_TIME_SEC;
-			inode->i_generation = generation;
-			err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-		}
-		ext3_journal_stop(handle);
-
-unlock_out:
-		mutex_unlock(&inode->i_mutex);
-setversion_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GETRSVSZ:
-		if (test_opt(inode->i_sb, RESERVATION)
-			&& S_ISREG(inode->i_mode)
-			&& ei->i_block_alloc_info) {
-			rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
-			return put_user(rsv_window_size, (int __user *)arg);
-		}
-		return -ENOTTY;
-	case EXT3_IOC_SETRSVSZ: {
-		int err;
-
-		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
-			return -ENOTTY;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (!inode_owner_or_capable(inode)) {
-			err = -EACCES;
-			goto setrsvsz_out;
-		}
-
-		if (get_user(rsv_window_size, (int __user *)arg)) {
-			err = -EFAULT;
-			goto setrsvsz_out;
-		}
-
-		if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS)
-			rsv_window_size = EXT3_MAX_RESERVE_BLOCKS;
-
-		/*
-		 * need to allocate reservation structure for this inode
-		 * before set the window size
-		 */
-		mutex_lock(&ei->truncate_mutex);
-		if (!ei->i_block_alloc_info)
-			ext3_init_block_alloc_info(inode);
-
-		if (ei->i_block_alloc_info){
-			struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
-			rsv->rsv_goal_size = rsv_window_size;
-		}
-		mutex_unlock(&ei->truncate_mutex);
-setrsvsz_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GROUP_EXTEND: {
-		ext3_fsblk_t n_blocks_count;
-		struct super_block *sb = inode->i_sb;
-		int err, err2;
-
-		if (!capable(CAP_SYS_RESOURCE))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (get_user(n_blocks_count, (__u32 __user *)arg)) {
-			err = -EFAULT;
-			goto group_extend_out;
-		}
-		err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err2 = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err == 0)
-			err = err2;
-group_extend_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GROUP_ADD: {
-		struct ext3_new_group_data input;
-		struct super_block *sb = inode->i_sb;
-		int err, err2;
-
-		if (!capable(CAP_SYS_RESOURCE))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
-				sizeof(input))) {
-			err = -EFAULT;
-			goto group_add_out;
-		}
-
-		err = ext3_group_add(sb, &input);
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err2 = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err == 0)
-			err = err2;
-group_add_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case FITRIM: {
-
-		struct super_block *sb = inode->i_sb;
-		struct fstrim_range range;
-		int ret = 0;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
-				   sizeof(range)))
-			return -EFAULT;
-
-		ret = ext3_trim_fs(sb, &range);
-		if (ret < 0)
-			return ret;
-
-		if (copy_to_user((struct fstrim_range __user *)arg, &range,
-				 sizeof(range)))
-			return -EFAULT;
-
-		return 0;
-	}
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-#ifdef CONFIG_COMPAT
-long ext3_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	/* These are just misnamed, they actually get/put from/to user an int */
-	switch (cmd) {
-	case EXT3_IOC32_GETFLAGS:
-		cmd = EXT3_IOC_GETFLAGS;
-		break;
-	case EXT3_IOC32_SETFLAGS:
-		cmd = EXT3_IOC_SETFLAGS;
-		break;
-	case EXT3_IOC32_GETVERSION:
-		cmd = EXT3_IOC_GETVERSION;
-		break;
-	case EXT3_IOC32_SETVERSION:
-		cmd = EXT3_IOC_SETVERSION;
-		break;
-	case EXT3_IOC32_GROUP_EXTEND:
-		cmd = EXT3_IOC_GROUP_EXTEND;
-		break;
-	case EXT3_IOC32_GETVERSION_OLD:
-		cmd = EXT3_IOC_GETVERSION_OLD;
-		break;
-	case EXT3_IOC32_SETVERSION_OLD:
-		cmd = EXT3_IOC_SETVERSION_OLD;
-		break;
-#ifdef CONFIG_JBD_DEBUG
-	case EXT3_IOC32_WAIT_FOR_READONLY:
-		cmd = EXT3_IOC_WAIT_FOR_READONLY;
-		break;
-#endif
-	case EXT3_IOC32_GETRSVSZ:
-		cmd = EXT3_IOC_GETRSVSZ;
-		break;
-	case EXT3_IOC32_SETRSVSZ:
-		cmd = EXT3_IOC_SETRSVSZ;
-		break;
-	case EXT3_IOC_GROUP_ADD:
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-	return ext3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
-}
-#endif
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
deleted file mode 100644
index c9e767c..0000000
--- a/fs/ext3/namei.c
+++ /dev/null
@@ -1,2586 +0,0 @@
-/*
- *  linux/fs/ext3/namei.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/namei.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *  Directory entry file type support and forward compatibility hooks
- *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
- *  Hash Tree Directory indexing (c)
- *	Daniel Phillips, 2001
- *  Hash Tree Directory indexing porting
- *	Christopher Li, 2002
- *  Hash Tree Directory indexing cleanup
- *	Theodore Ts'o, 2002
- */
-
-#include <linux/quotaops.h>
-#include "ext3.h"
-#include "namei.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * define how far ahead to read directories while searching them.
- */
-#define NAMEI_RA_CHUNKS  2
-#define NAMEI_RA_BLOCKS  4
-#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-
-static struct buffer_head *ext3_append(handle_t *handle,
-					struct inode *inode,
-					u32 *block, int *err)
-{
-	struct buffer_head *bh;
-
-	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
-
-	if ((bh = ext3_dir_bread(handle, inode, *block, 1, err))) {
-		inode->i_size += inode->i_sb->s_blocksize;
-		EXT3_I(inode)->i_disksize = inode->i_size;
-		*err = ext3_journal_get_write_access(handle, bh);
-		if (*err) {
-			brelse(bh);
-			bh = NULL;
-		}
-	}
-	return bh;
-}
-
-#ifndef assert
-#define assert(test) J_ASSERT(test)
-#endif
-
-#ifdef DX_DEBUG
-#define dxtrace(command) command
-#else
-#define dxtrace(command)
-#endif
-
-struct fake_dirent
-{
-	__le32 inode;
-	__le16 rec_len;
-	u8 name_len;
-	u8 file_type;
-};
-
-struct dx_countlimit
-{
-	__le16 limit;
-	__le16 count;
-};
-
-struct dx_entry
-{
-	__le32 hash;
-	__le32 block;
-};
-
-/*
- * dx_root_info is laid out so that if it should somehow get overlaid by a
- * dirent the two low bits of the hash version will be zero.  Therefore, the
- * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
- */
-
-struct dx_root
-{
-	struct fake_dirent dot;
-	char dot_name[4];
-	struct fake_dirent dotdot;
-	char dotdot_name[4];
-	struct dx_root_info
-	{
-		__le32 reserved_zero;
-		u8 hash_version;
-		u8 info_length; /* 8 */
-		u8 indirect_levels;
-		u8 unused_flags;
-	}
-	info;
-	struct dx_entry	entries[0];
-};
-
-struct dx_node
-{
-	struct fake_dirent fake;
-	struct dx_entry	entries[0];
-};
-
-
-struct dx_frame
-{
-	struct buffer_head *bh;
-	struct dx_entry *entries;
-	struct dx_entry *at;
-};
-
-struct dx_map_entry
-{
-	u32 hash;
-	u16 offs;
-	u16 size;
-};
-
-static inline unsigned dx_get_block (struct dx_entry *entry);
-static void dx_set_block (struct dx_entry *entry, unsigned value);
-static inline unsigned dx_get_hash (struct dx_entry *entry);
-static void dx_set_hash (struct dx_entry *entry, unsigned value);
-static unsigned dx_get_count (struct dx_entry *entries);
-static unsigned dx_get_limit (struct dx_entry *entries);
-static void dx_set_count (struct dx_entry *entries, unsigned value);
-static void dx_set_limit (struct dx_entry *entries, unsigned value);
-static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
-static unsigned dx_node_limit (struct inode *dir);
-static struct dx_frame *dx_probe(struct qstr *entry,
-				 struct inode *dir,
-				 struct dx_hash_info *hinfo,
-				 struct dx_frame *frame,
-				 int *err);
-static void dx_release (struct dx_frame *frames);
-static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
-			struct dx_hash_info *hinfo, struct dx_map_entry map[]);
-static void dx_sort_map(struct dx_map_entry *map, unsigned count);
-static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
-		struct dx_map_entry *offsets, int count);
-static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize);
-static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
-static int ext3_htree_next_block(struct inode *dir, __u32 hash,
-				 struct dx_frame *frame,
-				 struct dx_frame *frames,
-				 __u32 *start_hash);
-static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
-			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
-			int *err);
-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode);
-
-/*
- * p is at least 6 bytes before the end of page
- */
-static inline struct ext3_dir_entry_2 *
-ext3_next_entry(struct ext3_dir_entry_2 *p)
-{
-	return (struct ext3_dir_entry_2 *)((char *)p +
-		ext3_rec_len_from_disk(p->rec_len));
-}
-
-/*
- * Future: use high four bits of block for coalesce-on-delete flags
- * Mask them off for now.
- */
-
-static inline unsigned dx_get_block (struct dx_entry *entry)
-{
-	return le32_to_cpu(entry->block) & 0x00ffffff;
-}
-
-static inline void dx_set_block (struct dx_entry *entry, unsigned value)
-{
-	entry->block = cpu_to_le32(value);
-}
-
-static inline unsigned dx_get_hash (struct dx_entry *entry)
-{
-	return le32_to_cpu(entry->hash);
-}
-
-static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
-{
-	entry->hash = cpu_to_le32(value);
-}
-
-static inline unsigned dx_get_count (struct dx_entry *entries)
-{
-	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
-}
-
-static inline unsigned dx_get_limit (struct dx_entry *entries)
-{
-	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
-}
-
-static inline void dx_set_count (struct dx_entry *entries, unsigned value)
-{
-	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
-}
-
-static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
-{
-	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
-}
-
-static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
-{
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
-		EXT3_DIR_REC_LEN(2) - infosize;
-	return entry_space / sizeof(struct dx_entry);
-}
-
-static inline unsigned dx_node_limit (struct inode *dir)
-{
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
-	return entry_space / sizeof(struct dx_entry);
-}
-
-/*
- * Debug
- */
-#ifdef DX_DEBUG
-static void dx_show_index (char * label, struct dx_entry *entries)
-{
-        int i, n = dx_get_count (entries);
-        printk("%s index ", label);
-        for (i = 0; i < n; i++)
-        {
-                printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
-        }
-        printk("\n");
-}
-
-struct stats
-{
-	unsigned names;
-	unsigned space;
-	unsigned bcount;
-};
-
-static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
-				 int size, int show_names)
-{
-	unsigned names = 0, space = 0;
-	char *base = (char *) de;
-	struct dx_hash_info h = *hinfo;
-
-	printk("names: ");
-	while ((char *) de < base + size)
-	{
-		if (de->inode)
-		{
-			if (show_names)
-			{
-				int len = de->name_len;
-				char *name = de->name;
-				while (len--) printk("%c", *name++);
-				ext3fs_dirhash(de->name, de->name_len, &h);
-				printk(":%x.%u ", h.hash,
-				       (unsigned) ((char *) de - base));
-			}
-			space += EXT3_DIR_REC_LEN(de->name_len);
-			names++;
-		}
-		de = ext3_next_entry(de);
-	}
-	printk("(%i)\n", names);
-	return (struct stats) { names, space, 1 };
-}
-
-struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
-			     struct dx_entry *entries, int levels)
-{
-	unsigned blocksize = dir->i_sb->s_blocksize;
-	unsigned count = dx_get_count (entries), names = 0, space = 0, i;
-	unsigned bcount = 0;
-	struct buffer_head *bh;
-	int err;
-	printk("%i indexed blocks...\n", count);
-	for (i = 0; i < count; i++, entries++)
-	{
-		u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
-		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
-		struct stats stats;
-		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
-		if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
-		stats = levels?
-		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
-		   dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
-		names += stats.names;
-		space += stats.space;
-		bcount += stats.bcount;
-		brelse (bh);
-	}
-	if (bcount)
-		printk("%snames %u, fullness %u (%u%%)\n", levels?"":"   ",
-			names, space/bcount,(space/bcount)*100/blocksize);
-	return (struct stats) { names, space, bcount};
-}
-#endif /* DX_DEBUG */
-
-/*
- * Probe for a directory leaf block to search.
- *
- * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
- * error in the directory index, and the caller should fall back to
- * searching the directory normally.  The callers of dx_probe **MUST**
- * check for this error code, and make sure it never gets reflected
- * back to userspace.
- */
-static struct dx_frame *
-dx_probe(struct qstr *entry, struct inode *dir,
-	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
-{
-	unsigned count, indirect;
-	struct dx_entry *at, *entries, *p, *q, *m;
-	struct dx_root *root;
-	struct buffer_head *bh;
-	struct dx_frame *frame = frame_in;
-	u32 hash;
-
-	frame->bh = NULL;
-	if (!(bh = ext3_dir_bread(NULL, dir, 0, 0, err))) {
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-	root = (struct dx_root *) bh->b_data;
-	if (root->info.hash_version != DX_HASH_TEA &&
-	    root->info.hash_version != DX_HASH_HALF_MD4 &&
-	    root->info.hash_version != DX_HASH_LEGACY) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unrecognised inode hash code %d",
-			     root->info.hash_version);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-	hinfo->hash_version = root->info.hash_version;
-	if (hinfo->hash_version <= DX_HASH_TEA)
-		hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-	if (entry)
-		ext3fs_dirhash(entry->name, entry->len, hinfo);
-	hash = hinfo->hash;
-
-	if (root->info.unused_flags & 1) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unimplemented inode hash flags: %#06x",
-			     root->info.unused_flags);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	if ((indirect = root->info.indirect_levels) > 1) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unimplemented inode hash depth: %#06x",
-			     root->info.indirect_levels);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	entries = (struct dx_entry *) (((char *)&root->info) +
-				       root->info.info_length);
-
-	if (dx_get_limit(entries) != dx_root_limit(dir,
-						   root->info.info_length)) {
-		ext3_warning(dir->i_sb, __func__,
-			     "dx entry: limit != root limit");
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	dxtrace (printk("Look up %x", hash));
-	while (1)
-	{
-		count = dx_get_count(entries);
-		if (!count || count > dx_get_limit(entries)) {
-			ext3_warning(dir->i_sb, __func__,
-				     "dx entry: no count or count > limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-
-		p = entries + 1;
-		q = entries + count - 1;
-		while (p <= q)
-		{
-			m = p + (q - p)/2;
-			dxtrace(printk("."));
-			if (dx_get_hash(m) > hash)
-				q = m - 1;
-			else
-				p = m + 1;
-		}
-
-		if (0) // linear search cross check
-		{
-			unsigned n = count - 1;
-			at = entries;
-			while (n--)
-			{
-				dxtrace(printk(","));
-				if (dx_get_hash(++at) > hash)
-				{
-					at--;
-					break;
-				}
-			}
-			assert (at == p - 1);
-		}
-
-		at = p - 1;
-		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
-		frame->bh = bh;
-		frame->entries = entries;
-		frame->at = at;
-		if (!indirect--) return frame;
-		if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(at), 0, err))) {
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-		at = entries = ((struct dx_node *) bh->b_data)->entries;
-		if (dx_get_limit(entries) != dx_node_limit (dir)) {
-			ext3_warning(dir->i_sb, __func__,
-				     "dx entry: limit != node limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-		frame++;
-		frame->bh = NULL;
-	}
-fail2:
-	while (frame >= frame_in) {
-		brelse(frame->bh);
-		frame--;
-	}
-fail:
-	if (*err == ERR_BAD_DX_DIR)
-		ext3_warning(dir->i_sb, __func__,
-			     "Corrupt dir inode %ld, running e2fsck is "
-			     "recommended.", dir->i_ino);
-	return NULL;
-}
-
-static void dx_release (struct dx_frame *frames)
-{
-	if (frames[0].bh == NULL)
-		return;
-
-	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
-		brelse(frames[1].bh);
-	brelse(frames[0].bh);
-}
-
-/*
- * This function increments the frame pointer to search the next leaf
- * block, and reads in the necessary intervening nodes if the search
- * should be necessary.  Whether or not the search is necessary is
- * controlled by the hash parameter.  If the hash value is even, then
- * the search is only continued if the next block starts with that
- * hash value.  This is used if we are searching for a specific file.
- *
- * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
- *
- * This function returns 1 if the caller should continue to search,
- * or 0 if it should not.  If there is an error reading one of the
- * index blocks, it will a negative error code.
- *
- * If start_hash is non-null, it will be filled in with the starting
- * hash of the next page.
- */
-static int ext3_htree_next_block(struct inode *dir, __u32 hash,
-				 struct dx_frame *frame,
-				 struct dx_frame *frames,
-				 __u32 *start_hash)
-{
-	struct dx_frame *p;
-	struct buffer_head *bh;
-	int err, num_frames = 0;
-	__u32 bhash;
-
-	p = frame;
-	/*
-	 * Find the next leaf page by incrementing the frame pointer.
-	 * If we run out of entries in the interior node, loop around and
-	 * increment pointer in the parent node.  When we break out of
-	 * this loop, num_frames indicates the number of interior
-	 * nodes need to be read.
-	 */
-	while (1) {
-		if (++(p->at) < p->entries + dx_get_count(p->entries))
-			break;
-		if (p == frames)
-			return 0;
-		num_frames++;
-		p--;
-	}
-
-	/*
-	 * If the hash is 1, then continue only if the next page has a
-	 * continuation hash of any value.  This is used for readdir
-	 * handling.  Otherwise, check to see if the hash matches the
-	 * desired contiuation hash.  If it doesn't, return since
-	 * there's no point to read in the successive index pages.
-	 */
-	bhash = dx_get_hash(p->at);
-	if (start_hash)
-		*start_hash = bhash;
-	if ((hash & 1) == 0) {
-		if ((bhash & ~1) != hash)
-			return 0;
-	}
-	/*
-	 * If the hash is HASH_NB_ALWAYS, we always go to the next
-	 * block so no check is necessary
-	 */
-	while (num_frames--) {
-		if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(p->at),
-					  0, &err)))
-			return err; /* Failure */
-		p++;
-		brelse (p->bh);
-		p->bh = bh;
-		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
-	}
-	return 1;
-}
-
-
-/*
- * This function fills a red-black tree with information from a
- * directory block.  It returns the number directory entries loaded
- * into the tree.  If there is an error it is returned in err.
- */
-static int htree_dirblock_to_tree(struct file *dir_file,
-				  struct inode *dir, int block,
-				  struct dx_hash_info *hinfo,
-				  __u32 start_hash, __u32 start_minor_hash)
-{
-	struct buffer_head *bh;
-	struct ext3_dir_entry_2 *de, *top;
-	int err = 0, count = 0;
-
-	dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
-
-	if (!(bh = ext3_dir_bread(NULL, dir, block, 0, &err)))
-		return err;
-
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	top = (struct ext3_dir_entry_2 *) ((char *) de +
-					   dir->i_sb->s_blocksize -
-					   EXT3_DIR_REC_LEN(0));
-	for (; de < top; de = ext3_next_entry(de)) {
-		if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
-					(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
-						+((char *)de - bh->b_data))) {
-			/* silently ignore the rest of the block */
-			break;
-		}
-		ext3fs_dirhash(de->name, de->name_len, hinfo);
-		if ((hinfo->hash < start_hash) ||
-		    ((hinfo->hash == start_hash) &&
-		     (hinfo->minor_hash < start_minor_hash)))
-			continue;
-		if (de->inode == 0)
-			continue;
-		if ((err = ext3_htree_store_dirent(dir_file,
-				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
-			brelse(bh);
-			return err;
-		}
-		count++;
-	}
-	brelse(bh);
-	return count;
-}
-
-
-/*
- * This function fills a red-black tree with information from a
- * directory.  We start scanning the directory in hash order, starting
- * at start_hash and start_minor_hash.
- *
- * This function returns the number of entries inserted into the tree,
- * or a negative error code.
- */
-int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-			 __u32 start_minor_hash, __u32 *next_hash)
-{
-	struct dx_hash_info hinfo;
-	struct ext3_dir_entry_2 *de;
-	struct dx_frame frames[2], *frame;
-	struct inode *dir;
-	int block, err;
-	int count = 0;
-	int ret;
-	__u32 hashval;
-
-	dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
-		       start_minor_hash));
-	dir = file_inode(dir_file);
-	if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
-		hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
-		if (hinfo.hash_version <= DX_HASH_TEA)
-			hinfo.hash_version +=
-				EXT3_SB(dir->i_sb)->s_hash_unsigned;
-		hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
-					       start_hash, start_minor_hash);
-		*next_hash = ~0;
-		return count;
-	}
-	hinfo.hash = start_hash;
-	hinfo.minor_hash = 0;
-	frame = dx_probe(NULL, file_inode(dir_file), &hinfo, frames, &err);
-	if (!frame)
-		return err;
-
-	/* Add '.' and '..' from the htree header */
-	if (!start_hash && !start_minor_hash) {
-		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
-		if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
-			goto errout;
-		count++;
-	}
-	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
-		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
-		de = ext3_next_entry(de);
-		if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0)
-			goto errout;
-		count++;
-	}
-
-	while (1) {
-		block = dx_get_block(frame->at);
-		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
-					     start_hash, start_minor_hash);
-		if (ret < 0) {
-			err = ret;
-			goto errout;
-		}
-		count += ret;
-		hashval = ~0;
-		ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
-					    frame, frames, &hashval);
-		*next_hash = hashval;
-		if (ret < 0) {
-			err = ret;
-			goto errout;
-		}
-		/*
-		 * Stop if:  (a) there are no more entries, or
-		 * (b) we have inserted at least one entry and the
-		 * next hash value is not a continuation
-		 */
-		if ((ret == 0) ||
-		    (count && ((hashval & 1) == 0)))
-			break;
-	}
-	dx_release(frames);
-	dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
-		       count, *next_hash));
-	return count;
-errout:
-	dx_release(frames);
-	return (err);
-}
-
-
-/*
- * Directory block splitting, compacting
- */
-
-/*
- * Create map of hash values, offsets, and sizes, stored at end of block.
- * Returns number of entries mapped.
- */
-static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
-		struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
-{
-	int count = 0;
-	char *base = (char *) de;
-	struct dx_hash_info h = *hinfo;
-
-	while ((char *) de < base + blocksize)
-	{
-		if (de->name_len && de->inode) {
-			ext3fs_dirhash(de->name, de->name_len, &h);
-			map_tail--;
-			map_tail->hash = h.hash;
-			map_tail->offs = (u16) ((char *) de - base);
-			map_tail->size = le16_to_cpu(de->rec_len);
-			count++;
-			cond_resched();
-		}
-		/* XXX: do we need to check rec_len == 0 case? -Chris */
-		de = ext3_next_entry(de);
-	}
-	return count;
-}
-
-/* Sort map by hash value */
-static void dx_sort_map (struct dx_map_entry *map, unsigned count)
-{
-        struct dx_map_entry *p, *q, *top = map + count - 1;
-        int more;
-        /* Combsort until bubble sort doesn't suck */
-        while (count > 2)
-	{
-                count = count*10/13;
-                if (count - 9 < 2) /* 9, 10 -> 11 */
-                        count = 11;
-                for (p = top, q = p - count; q >= map; p--, q--)
-                        if (p->hash < q->hash)
-                                swap(*p, *q);
-        }
-        /* Garden variety bubble sort */
-        do {
-                more = 0;
-                q = top;
-                while (q-- > map)
-		{
-                        if (q[1].hash >= q[0].hash)
-				continue;
-                        swap(*(q+1), *q);
-                        more = 1;
-		}
-	} while(more);
-}
-
-static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
-{
-	struct dx_entry *entries = frame->entries;
-	struct dx_entry *old = frame->at, *new = old + 1;
-	int count = dx_get_count(entries);
-
-	assert(count < dx_get_limit(entries));
-	assert(old < entries + count);
-	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
-	dx_set_hash(new, hash);
-	dx_set_block(new, block);
-	dx_set_count(entries, count + 1);
-}
-
-static void ext3_update_dx_flag(struct inode *inode)
-{
-	if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-				     EXT3_FEATURE_COMPAT_DIR_INDEX))
-		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
-}
-
-/*
- * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
- *
- * `len <= EXT3_NAME_LEN' is guaranteed by caller.
- * `de != NULL' is guaranteed by caller.
- */
-static inline int ext3_match (int len, const char * const name,
-			      struct ext3_dir_entry_2 * de)
-{
-	if (len != de->name_len)
-		return 0;
-	if (!de->inode)
-		return 0;
-	return !memcmp(name, de->name, len);
-}
-
-/*
- * Returns 0 if not found, -1 on failure, and 1 on success
- */
-static inline int search_dirblock(struct buffer_head * bh,
-				  struct inode *dir,
-				  struct qstr *child,
-				  unsigned long offset,
-				  struct ext3_dir_entry_2 ** res_dir)
-{
-	struct ext3_dir_entry_2 * de;
-	char * dlimit;
-	int de_len;
-	const char *name = child->name;
-	int namelen = child->len;
-
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	dlimit = bh->b_data + dir->i_sb->s_blocksize;
-	while ((char *) de < dlimit) {
-		/* this code is executed quadratically often */
-		/* do minimal checking `by hand' */
-
-		if ((char *) de + namelen <= dlimit &&
-		    ext3_match (namelen, name, de)) {
-			/* found a match - just to be sure, do a full check */
-			if (!ext3_check_dir_entry("ext3_find_entry",
-						  dir, de, bh, offset))
-				return -1;
-			*res_dir = de;
-			return 1;
-		}
-		/* prevent looping on a bad block */
-		de_len = ext3_rec_len_from_disk(de->rec_len);
-		if (de_len <= 0)
-			return -1;
-		offset += de_len;
-		de = (struct ext3_dir_entry_2 *) ((char *) de + de_len);
-	}
-	return 0;
-}
-
-
-/*
- *	ext3_find_entry()
- *
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
- * entry - you'll have to do that yourself if you want to.
- *
- * The returned buffer_head has ->b_count elevated.  The caller is expected
- * to brelse() it when appropriate.
- */
-static struct buffer_head *ext3_find_entry(struct inode *dir,
-					struct qstr *entry,
-					struct ext3_dir_entry_2 **res_dir)
-{
-	struct super_block * sb;
-	struct buffer_head * bh_use[NAMEI_RA_SIZE];
-	struct buffer_head * bh, *ret = NULL;
-	unsigned long start, block, b;
-	const u8 *name = entry->name;
-	int ra_max = 0;		/* Number of bh's in the readahead
-				   buffer, bh_use[] */
-	int ra_ptr = 0;		/* Current index into readahead
-				   buffer */
-	int num = 0;
-	int nblocks, i, err;
-	int namelen;
-
-	*res_dir = NULL;
-	sb = dir->i_sb;
-	namelen = entry->len;
-	if (namelen > EXT3_NAME_LEN)
-		return NULL;
-	if ((namelen <= 2) && (name[0] == '.') &&
-	    (name[1] == '.' || name[1] == 0)) {
-		/*
-		 * "." or ".." will only be in the first block
-		 * NFS may look up ".."; "." should be handled by the VFS
-		 */
-		block = start = 0;
-		nblocks = 1;
-		goto restart;
-	}
-	if (is_dx(dir)) {
-		bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
-		/*
-		 * On success, or if the error was file not found,
-		 * return.  Otherwise, fall back to doing a search the
-		 * old fashioned way.
-		 */
-		if (bh || (err != ERR_BAD_DX_DIR))
-			return bh;
-		dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
-	}
-	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
-	start = EXT3_I(dir)->i_dir_start_lookup;
-	if (start >= nblocks)
-		start = 0;
-	block = start;
-restart:
-	do {
-		/*
-		 * We deal with the read-ahead logic here.
-		 */
-		if (ra_ptr >= ra_max) {
-			/* Refill the readahead buffer */
-			ra_ptr = 0;
-			b = block;
-			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
-				/*
-				 * Terminate if we reach the end of the
-				 * directory and must wrap, or if our
-				 * search has finished at this block.
-				 */
-				if (b >= nblocks || (num && block == start)) {
-					bh_use[ra_max] = NULL;
-					break;
-				}
-				num++;
-				bh = ext3_getblk(NULL, dir, b++, 0, &err);
-				bh_use[ra_max] = bh;
-				if (bh && !bh_uptodate_or_lock(bh)) {
-					get_bh(bh);
-					bh->b_end_io = end_buffer_read_sync;
-					submit_bh(READ | REQ_META | REQ_PRIO,
-						  bh);
-				}
-			}
-		}
-		if ((bh = bh_use[ra_ptr++]) == NULL)
-			goto next;
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			/* read error, skip block & hope for the best */
-			ext3_error(sb, __func__, "reading directory #%lu "
-				   "offset %lu", dir->i_ino, block);
-			brelse(bh);
-			goto next;
-		}
-		i = search_dirblock(bh, dir, entry,
-			    block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
-		if (i == 1) {
-			EXT3_I(dir)->i_dir_start_lookup = block;
-			ret = bh;
-			goto cleanup_and_exit;
-		} else {
-			brelse(bh);
-			if (i < 0)
-				goto cleanup_and_exit;
-		}
-	next:
-		if (++block >= nblocks)
-			block = 0;
-	} while (block != start);
-
-	/*
-	 * If the directory has grown while we were searching, then
-	 * search the last part of the directory before giving up.
-	 */
-	block = nblocks;
-	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
-	if (block < nblocks) {
-		start = 0;
-		goto restart;
-	}
-
-cleanup_and_exit:
-	/* Clean up the read-ahead blocks */
-	for (; ra_ptr < ra_max; ra_ptr++)
-		brelse (bh_use[ra_ptr]);
-	return ret;
-}
-
-static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
-			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
-			int *err)
-{
-	struct super_block *sb = dir->i_sb;
-	struct dx_hash_info	hinfo;
-	struct dx_frame frames[2], *frame;
-	struct buffer_head *bh;
-	unsigned long block;
-	int retval;
-
-	if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
-		return NULL;
-	do {
-		block = dx_get_block(frame->at);
-		if (!(bh = ext3_dir_bread (NULL, dir, block, 0, err)))
-			goto errout;
-
-		retval = search_dirblock(bh, dir, entry,
-					 block << EXT3_BLOCK_SIZE_BITS(sb),
-					 res_dir);
-		if (retval == 1) {
-			dx_release(frames);
-			return bh;
-		}
-		brelse(bh);
-		if (retval == -1) {
-			*err = ERR_BAD_DX_DIR;
-			goto errout;
-		}
-
-		/* Check to see if we should continue to search */
-		retval = ext3_htree_next_block(dir, hinfo.hash, frame,
-					       frames, NULL);
-		if (retval < 0) {
-			ext3_warning(sb, __func__,
-			     "error reading index page in directory #%lu",
-			     dir->i_ino);
-			*err = retval;
-			goto errout;
-		}
-	} while (retval == 1);
-
-	*err = -ENOENT;
-errout:
-	dxtrace(printk("%s not found\n", entry->name));
-	dx_release (frames);
-	return NULL;
-}
-
-static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
-{
-	struct inode * inode;
-	struct ext3_dir_entry_2 * de;
-	struct buffer_head * bh;
-
-	if (dentry->d_name.len > EXT3_NAME_LEN)
-		return ERR_PTR(-ENAMETOOLONG);
-
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	inode = NULL;
-	if (bh) {
-		unsigned long ino = le32_to_cpu(de->inode);
-		brelse (bh);
-		if (!ext3_valid_inum(dir->i_sb, ino)) {
-			ext3_error(dir->i_sb, "ext3_lookup",
-				   "bad inode number: %lu", ino);
-			return ERR_PTR(-EIO);
-		}
-		inode = ext3_iget(dir->i_sb, ino);
-		if (inode == ERR_PTR(-ESTALE)) {
-			ext3_error(dir->i_sb, __func__,
-					"deleted inode referenced: %lu",
-					ino);
-			return ERR_PTR(-EIO);
-		}
-	}
-	return d_splice_alias(inode, dentry);
-}
-
-
-struct dentry *ext3_get_parent(struct dentry *child)
-{
-	unsigned long ino;
-	struct qstr dotdot = QSTR_INIT("..", 2);
-	struct ext3_dir_entry_2 * de;
-	struct buffer_head *bh;
-
-	bh = ext3_find_entry(d_inode(child), &dotdot, &de);
-	if (!bh)
-		return ERR_PTR(-ENOENT);
-	ino = le32_to_cpu(de->inode);
-	brelse(bh);
-
-	if (!ext3_valid_inum(d_inode(child)->i_sb, ino)) {
-		ext3_error(d_inode(child)->i_sb, "ext3_get_parent",
-			   "bad inode number: %lu", ino);
-		return ERR_PTR(-EIO);
-	}
-
-	return d_obtain_alias(ext3_iget(d_inode(child)->i_sb, ino));
-}
-
-#define S_SHIFT 12
-static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
-	[S_IFREG >> S_SHIFT]	= EXT3_FT_REG_FILE,
-	[S_IFDIR >> S_SHIFT]	= EXT3_FT_DIR,
-	[S_IFCHR >> S_SHIFT]	= EXT3_FT_CHRDEV,
-	[S_IFBLK >> S_SHIFT]	= EXT3_FT_BLKDEV,
-	[S_IFIFO >> S_SHIFT]	= EXT3_FT_FIFO,
-	[S_IFSOCK >> S_SHIFT]	= EXT3_FT_SOCK,
-	[S_IFLNK >> S_SHIFT]	= EXT3_FT_SYMLINK,
-};
-
-static inline void ext3_set_de_type(struct super_block *sb,
-				struct ext3_dir_entry_2 *de,
-				umode_t mode) {
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE))
-		de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
-}
-
-/*
- * Move count entries from end of map between two memory locations.
- * Returns pointer to last entry moved.
- */
-static struct ext3_dir_entry_2 *
-dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
-{
-	unsigned rec_len = 0;
-
-	while (count--) {
-		struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
-		rec_len = EXT3_DIR_REC_LEN(de->name_len);
-		memcpy (to, de, rec_len);
-		((struct ext3_dir_entry_2 *) to)->rec_len =
-				ext3_rec_len_to_disk(rec_len);
-		de->inode = 0;
-		map++;
-		to += rec_len;
-	}
-	return (struct ext3_dir_entry_2 *) (to - rec_len);
-}
-
-/*
- * Compact each dir entry in the range to the minimal rec_len.
- * Returns pointer to last entry in range.
- */
-static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize)
-{
-	struct ext3_dir_entry_2 *next, *to, *prev;
-	struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *)base;
-	unsigned rec_len = 0;
-
-	prev = to = de;
-	while ((char *)de < base + blocksize) {
-		next = ext3_next_entry(de);
-		if (de->inode && de->name_len) {
-			rec_len = EXT3_DIR_REC_LEN(de->name_len);
-			if (de > to)
-				memmove(to, de, rec_len);
-			to->rec_len = ext3_rec_len_to_disk(rec_len);
-			prev = to;
-			to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
-		}
-		de = next;
-	}
-	return prev;
-}
-
-/*
- * Split a full leaf block to make room for a new dir entry.
- * Allocate a new block, and move entries so that they are approx. equally full.
- * Returns pointer to de in block into which the new entry will be inserted.
- */
-static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-			struct buffer_head **bh,struct dx_frame *frame,
-			struct dx_hash_info *hinfo, int *error)
-{
-	unsigned blocksize = dir->i_sb->s_blocksize;
-	unsigned count, continued;
-	struct buffer_head *bh2;
-	u32 newblock;
-	u32 hash2;
-	struct dx_map_entry *map;
-	char *data1 = (*bh)->b_data, *data2;
-	unsigned split, move, size;
-	struct ext3_dir_entry_2 *de = NULL, *de2;
-	int	err = 0, i;
-
-	bh2 = ext3_append (handle, dir, &newblock, &err);
-	if (!(bh2)) {
-		brelse(*bh);
-		*bh = NULL;
-		goto errout;
-	}
-
-	BUFFER_TRACE(*bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, *bh);
-	if (err)
-		goto journal_error;
-
-	BUFFER_TRACE(frame->bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, frame->bh);
-	if (err)
-		goto journal_error;
-
-	data2 = bh2->b_data;
-
-	/* create map in the end of data2 block */
-	map = (struct dx_map_entry *) (data2 + blocksize);
-	count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
-			     blocksize, hinfo, map);
-	map -= count;
-	dx_sort_map (map, count);
-	/* Split the existing block in the middle, size-wise */
-	size = 0;
-	move = 0;
-	for (i = count-1; i >= 0; i--) {
-		/* is more than half of this entry in 2nd half of the block? */
-		if (size + map[i].size/2 > blocksize/2)
-			break;
-		size += map[i].size;
-		move++;
-	}
-	/* map index at which we will split */
-	split = count - move;
-	hash2 = map[split].hash;
-	continued = hash2 == map[split - 1].hash;
-	dxtrace(printk("Split block %i at %x, %i/%i\n",
-		dx_get_block(frame->at), hash2, split, count-split));
-
-	/* Fancy dance to stay within two buffers */
-	de2 = dx_move_dirents(data1, data2, map + split, count - split);
-	de = dx_pack_dirents(data1,blocksize);
-	de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
-	de2->rec_len = ext3_rec_len_to_disk(data2 + blocksize - (char *) de2);
-	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
-	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
-
-	/* Which block gets the new entry? */
-	if (hinfo->hash >= hash2)
-	{
-		swap(*bh, bh2);
-		de = de2;
-	}
-	dx_insert_block (frame, hash2 + continued, newblock);
-	err = ext3_journal_dirty_metadata (handle, bh2);
-	if (err)
-		goto journal_error;
-	err = ext3_journal_dirty_metadata (handle, frame->bh);
-	if (err)
-		goto journal_error;
-	brelse (bh2);
-	dxtrace(dx_show_index ("frame", frame->entries));
-	return de;
-
-journal_error:
-	brelse(*bh);
-	brelse(bh2);
-	*bh = NULL;
-	ext3_std_error(dir->i_sb, err);
-errout:
-	*error = err;
-	return NULL;
-}
-
-
-/*
- * Add a new entry into a directory (leaf) block.  If de is non-NULL,
- * it points to a directory entry which is guaranteed to be large
- * enough for new directory entry.  If de is NULL, then
- * add_dirent_to_buf will attempt search the directory block for
- * space.  It will return -ENOSPC if no space is available, and -EIO
- * and -EEXIST if directory entry already exists.
- *
- * NOTE!  bh is NOT released in the case where ENOSPC is returned.  In
- * all other cases bh is released.
- */
-static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode, struct ext3_dir_entry_2 *de,
-			     struct buffer_head * bh)
-{
-	struct inode	*dir = d_inode(dentry->d_parent);
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	unsigned long	offset = 0;
-	unsigned short	reclen;
-	int		nlen, rlen, err;
-	char		*top;
-
-	reclen = EXT3_DIR_REC_LEN(namelen);
-	if (!de) {
-		de = (struct ext3_dir_entry_2 *)bh->b_data;
-		top = bh->b_data + dir->i_sb->s_blocksize - reclen;
-		while ((char *) de <= top) {
-			if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
-						  bh, offset)) {
-				brelse (bh);
-				return -EIO;
-			}
-			if (ext3_match (namelen, name, de)) {
-				brelse (bh);
-				return -EEXIST;
-			}
-			nlen = EXT3_DIR_REC_LEN(de->name_len);
-			rlen = ext3_rec_len_from_disk(de->rec_len);
-			if ((de->inode? rlen - nlen: rlen) >= reclen)
-				break;
-			de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
-			offset += rlen;
-		}
-		if ((char *) de > top)
-			return -ENOSPC;
-	}
-	BUFFER_TRACE(bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh);
-	if (err) {
-		ext3_std_error(dir->i_sb, err);
-		brelse(bh);
-		return err;
-	}
-
-	/* By now the buffer is marked for journaling */
-	nlen = EXT3_DIR_REC_LEN(de->name_len);
-	rlen = ext3_rec_len_from_disk(de->rec_len);
-	if (de->inode) {
-		struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
-		de1->rec_len = ext3_rec_len_to_disk(rlen - nlen);
-		de->rec_len = ext3_rec_len_to_disk(nlen);
-		de = de1;
-	}
-	de->file_type = EXT3_FT_UNKNOWN;
-	if (inode) {
-		de->inode = cpu_to_le32(inode->i_ino);
-		ext3_set_de_type(dir->i_sb, de, inode->i_mode);
-	} else
-		de->inode = 0;
-	de->name_len = namelen;
-	memcpy (de->name, name, namelen);
-	/*
-	 * XXX shouldn't update any times until successful
-	 * completion of syscall, but too many callers depend
-	 * on this.
-	 *
-	 * XXX similarly, too many callers depend on
-	 * ext3_new_inode() setting the times, but error
-	 * recovery deletes the inode, so the worst that can
-	 * happen is that the times are slightly out of date
-	 * and/or different from the directory change time.
-	 */
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(dir);
-	dir->i_version++;
-	ext3_mark_inode_dirty(handle, dir);
-	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_std_error(dir->i_sb, err);
-	brelse(bh);
-	return 0;
-}
-
-/*
- * This converts a one block unindexed directory to a 3 block indexed
- * directory, and adds the dentry to the indexed directory.
- */
-static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
-			    struct inode *inode, struct buffer_head *bh)
-{
-	struct inode	*dir = d_inode(dentry->d_parent);
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	struct buffer_head *bh2;
-	struct dx_root	*root;
-	struct dx_frame	frames[2], *frame;
-	struct dx_entry *entries;
-	struct ext3_dir_entry_2	*de, *de2;
-	char		*data1, *top;
-	unsigned	len;
-	int		retval;
-	unsigned	blocksize;
-	struct dx_hash_info hinfo;
-	u32		block;
-	struct fake_dirent *fde;
-
-	blocksize =  dir->i_sb->s_blocksize;
-	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
-	retval = ext3_journal_get_write_access(handle, bh);
-	if (retval) {
-		ext3_std_error(dir->i_sb, retval);
-		brelse(bh);
-		return retval;
-	}
-	root = (struct dx_root *) bh->b_data;
-
-	/* The 0th block becomes the root, move the dirents out */
-	fde = &root->dotdot;
-	de = (struct ext3_dir_entry_2 *)((char *)fde +
-			ext3_rec_len_from_disk(fde->rec_len));
-	if ((char *) de >= (((char *) root) + blocksize)) {
-		ext3_error(dir->i_sb, __func__,
-			   "invalid rec_len for '..' in inode %lu",
-			   dir->i_ino);
-		brelse(bh);
-		return -EIO;
-	}
-	len = ((char *) root) + blocksize - (char *) de;
-
-	bh2 = ext3_append (handle, dir, &block, &retval);
-	if (!(bh2)) {
-		brelse(bh);
-		return retval;
-	}
-	EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
-	data1 = bh2->b_data;
-
-	memcpy (data1, de, len);
-	de = (struct ext3_dir_entry_2 *) data1;
-	top = data1 + len;
-	while ((char *)(de2 = ext3_next_entry(de)) < top)
-		de = de2;
-	de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
-	/* Initialize the root; the dot dirents already exist */
-	de = (struct ext3_dir_entry_2 *) (&root->dotdot);
-	de->rec_len = ext3_rec_len_to_disk(blocksize - EXT3_DIR_REC_LEN(2));
-	memset (&root->info, 0, sizeof(root->info));
-	root->info.info_length = sizeof(root->info);
-	root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
-	entries = root->entries;
-	dx_set_block (entries, 1);
-	dx_set_count (entries, 1);
-	dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
-
-	/* Initialize as for dx_probe */
-	hinfo.hash_version = root->info.hash_version;
-	if (hinfo.hash_version <= DX_HASH_TEA)
-		hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-	ext3fs_dirhash(name, namelen, &hinfo);
-	frame = frames;
-	frame->entries = entries;
-	frame->at = entries;
-	frame->bh = bh;
-	bh = bh2;
-	/*
-	 * Mark buffers dirty here so that if do_split() fails we write a
-	 * consistent set of buffers to disk.
-	 */
-	ext3_journal_dirty_metadata(handle, frame->bh);
-	ext3_journal_dirty_metadata(handle, bh);
-	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-	if (!de) {
-		ext3_mark_inode_dirty(handle, dir);
-		dx_release(frames);
-		return retval;
-	}
-	dx_release(frames);
-
-	return add_dirent_to_buf(handle, dentry, inode, de, bh);
-}
-
-/*
- *	ext3_add_entry()
- *
- * adds a file entry to the specified directory, using the same
- * semantics as ext3_find_entry(). It returns NULL if it failed.
- *
- * NOTE!! The inode part of 'de' is left at 0 - which means you
- * may not sleep between calling this and putting something into
- * the entry, as someone else might have used it while you slept.
- */
-static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
-	struct inode *inode)
-{
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 *de;
-	struct super_block * sb;
-	int	retval;
-	int	dx_fallback=0;
-	unsigned blocksize;
-	u32 block, blocks;
-
-	sb = dir->i_sb;
-	blocksize = sb->s_blocksize;
-	if (!dentry->d_name.len)
-		return -EINVAL;
-	if (is_dx(dir)) {
-		retval = ext3_dx_add_entry(handle, dentry, inode);
-		if (!retval || (retval != ERR_BAD_DX_DIR))
-			return retval;
-		EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
-		dx_fallback++;
-		ext3_mark_inode_dirty(handle, dir);
-	}
-	blocks = dir->i_size >> sb->s_blocksize_bits;
-	for (block = 0; block < blocks; block++) {
-		if (!(bh = ext3_dir_bread(handle, dir, block, 0, &retval)))
-			return retval;
-
-		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-		if (retval != -ENOSPC)
-			return retval;
-
-		if (blocks == 1 && !dx_fallback &&
-		    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
-			return make_indexed_dir(handle, dentry, inode, bh);
-		brelse(bh);
-	}
-	bh = ext3_append(handle, dir, &block, &retval);
-	if (!bh)
-		return retval;
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	de->inode = 0;
-	de->rec_len = ext3_rec_len_to_disk(blocksize);
-	return add_dirent_to_buf(handle, dentry, inode, de, bh);
-}
-
-/*
- * Returns 0 for success, or a negative error value
- */
-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode)
-{
-	struct dx_frame frames[2], *frame;
-	struct dx_entry *entries, *at;
-	struct dx_hash_info hinfo;
-	struct buffer_head * bh;
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct super_block * sb = dir->i_sb;
-	struct ext3_dir_entry_2 *de;
-	int err;
-
-	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-	if (!frame)
-		return err;
-	entries = frame->entries;
-	at = frame->at;
-
-	if (!(bh = ext3_dir_bread(handle, dir, dx_get_block(frame->at), 0, &err)))
-		goto cleanup;
-
-	BUFFER_TRACE(bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh);
-	if (err)
-		goto journal_error;
-
-	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-	if (err != -ENOSPC) {
-		bh = NULL;
-		goto cleanup;
-	}
-
-	/* Block full, should compress but for now just split */
-	dxtrace(printk("using %u of %u node entries\n",
-		       dx_get_count(entries), dx_get_limit(entries)));
-	/* Need to split index? */
-	if (dx_get_count(entries) == dx_get_limit(entries)) {
-		u32 newblock;
-		unsigned icount = dx_get_count(entries);
-		int levels = frame - frames;
-		struct dx_entry *entries2;
-		struct dx_node *node2;
-		struct buffer_head *bh2;
-
-		if (levels && (dx_get_count(frames->entries) ==
-			       dx_get_limit(frames->entries))) {
-			ext3_warning(sb, __func__,
-				     "Directory index full!");
-			err = -ENOSPC;
-			goto cleanup;
-		}
-		bh2 = ext3_append (handle, dir, &newblock, &err);
-		if (!(bh2))
-			goto cleanup;
-		node2 = (struct dx_node *)(bh2->b_data);
-		entries2 = node2->entries;
-		memset(&node2->fake, 0, sizeof(struct fake_dirent));
-		node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
-		BUFFER_TRACE(frame->bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, frame->bh);
-		if (err)
-			goto journal_error;
-		if (levels) {
-			unsigned icount1 = icount/2, icount2 = icount - icount1;
-			unsigned hash2 = dx_get_hash(entries + icount1);
-			dxtrace(printk("Split index %i/%i\n", icount1, icount2));
-
-			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
-			err = ext3_journal_get_write_access(handle,
-							     frames[0].bh);
-			if (err)
-				goto journal_error;
-
-			memcpy ((char *) entries2, (char *) (entries + icount1),
-				icount2 * sizeof(struct dx_entry));
-			dx_set_count (entries, icount1);
-			dx_set_count (entries2, icount2);
-			dx_set_limit (entries2, dx_node_limit(dir));
-
-			/* Which index block gets the new entry? */
-			if (at - entries >= icount1) {
-				frame->at = at = at - entries - icount1 + entries2;
-				frame->entries = entries = entries2;
-				swap(frame->bh, bh2);
-			}
-			dx_insert_block (frames + 0, hash2, newblock);
-			dxtrace(dx_show_index ("node", frames[1].entries));
-			dxtrace(dx_show_index ("node",
-			       ((struct dx_node *) bh2->b_data)->entries));
-			err = ext3_journal_dirty_metadata(handle, bh2);
-			if (err)
-				goto journal_error;
-			brelse (bh2);
-		} else {
-			dxtrace(printk("Creating second level index...\n"));
-			memcpy((char *) entries2, (char *) entries,
-			       icount * sizeof(struct dx_entry));
-			dx_set_limit(entries2, dx_node_limit(dir));
-
-			/* Set up root */
-			dx_set_count(entries, 1);
-			dx_set_block(entries + 0, newblock);
-			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
-
-			/* Add new access path frame */
-			frame = frames + 1;
-			frame->at = at = at - entries + entries2;
-			frame->entries = entries = entries2;
-			frame->bh = bh2;
-			err = ext3_journal_get_write_access(handle,
-							     frame->bh);
-			if (err)
-				goto journal_error;
-		}
-		err = ext3_journal_dirty_metadata(handle, frames[0].bh);
-		if (err)
-			goto journal_error;
-	}
-	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-	if (!de)
-		goto cleanup;
-	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
-	bh = NULL;
-	goto cleanup;
-
-journal_error:
-	ext3_std_error(dir->i_sb, err);
-cleanup:
-	if (bh)
-		brelse(bh);
-	dx_release(frames);
-	return err;
-}
-
-/*
- * ext3_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
-static int ext3_delete_entry (handle_t *handle,
-			      struct inode * dir,
-			      struct ext3_dir_entry_2 * de_del,
-			      struct buffer_head * bh)
-{
-	struct ext3_dir_entry_2 * de, * pde;
-	int i;
-
-	i = 0;
-	pde = NULL;
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	while (i < bh->b_size) {
-		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
-			return -EIO;
-		if (de == de_del)  {
-			int err;
-
-			BUFFER_TRACE(bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, bh);
-			if (err)
-				goto journal_error;
-
-			if (pde)
-				pde->rec_len = ext3_rec_len_to_disk(
-					ext3_rec_len_from_disk(pde->rec_len) +
-					ext3_rec_len_from_disk(de->rec_len));
-			else
-				de->inode = 0;
-			dir->i_version++;
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (err) {
-journal_error:
-				ext3_std_error(dir->i_sb, err);
-				return err;
-			}
-			return 0;
-		}
-		i += ext3_rec_len_from_disk(de->rec_len);
-		pde = de;
-		de = ext3_next_entry(de);
-	}
-	return -ENOENT;
-}
-
-static int ext3_add_nondir(handle_t *handle,
-		struct dentry *dentry, struct inode *inode)
-{
-	int err = ext3_add_entry(handle, dentry, inode);
-	if (!err) {
-		ext3_mark_inode_dirty(handle, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
-		return 0;
-	}
-	drop_nlink(inode);
-	unlock_new_inode(inode);
-	iput(inode);
-	return err;
-}
-
-/*
- * By the time this is called, we already have created
- * the directory cache entry for the new file, but it
- * is so far negative - it has no inode.
- *
- * If the create succeeds, we fill in the inode information
- * with d_instantiate().
- */
-static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode,
-		bool excl)
-{
-	handle_t *handle;
-	struct inode * inode;
-	int err, retries = 0;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-		err = ext3_add_nondir(handle, dentry, inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-static int ext3_mknod (struct inode * dir, struct dentry *dentry,
-			umode_t mode, dev_t rdev)
-{
-	handle_t *handle;
-	struct inode *inode;
-	int err, retries = 0;
-
-	if (!new_valid_dev(rdev))
-		return -EINVAL;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT3_FS_XATTR
-		inode->i_op = &ext3_special_inode_operations;
-#endif
-		err = ext3_add_nondir(handle, dentry, inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-static int ext3_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
-	handle_t *handle;
-	struct inode *inode;
-	int err, retries = 0;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
-			  4 + EXT3_XATTR_TRANS_BLOCKS);
-
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	inode = ext3_new_inode (handle, dir, NULL, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-		d_tmpfile(dentry, inode);
-		err = ext3_orphan_add(handle, inode);
-		if (err)
-			goto err_unlock_inode;
-		mark_inode_dirty(inode);
-		unlock_new_inode(inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-err_unlock_inode:
-	ext3_journal_stop(handle);
-	unlock_new_inode(inode);
-	return err;
-}
-
-static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
-{
-	handle_t *handle;
-	struct inode * inode;
-	struct buffer_head * dir_block = NULL;
-	struct ext3_dir_entry_2 * de;
-	int err, retries = 0;
-
-	if (dir->i_nlink >= EXT3_LINK_MAX)
-		return -EMLINK;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFDIR | mode);
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
-
-	inode->i_op = &ext3_dir_inode_operations;
-	inode->i_fop = &ext3_dir_operations;
-	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
-	if (!(dir_block = ext3_dir_bread(handle, inode, 0, 1, &err)))
-		goto out_clear_inode;
-
-	BUFFER_TRACE(dir_block, "get_write_access");
-	err = ext3_journal_get_write_access(handle, dir_block);
-	if (err)
-		goto out_clear_inode;
-
-	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
-	de->inode = cpu_to_le32(inode->i_ino);
-	de->name_len = 1;
-	de->rec_len = ext3_rec_len_to_disk(EXT3_DIR_REC_LEN(de->name_len));
-	strcpy (de->name, ".");
-	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
-	de = ext3_next_entry(de);
-	de->inode = cpu_to_le32(dir->i_ino);
-	de->rec_len = ext3_rec_len_to_disk(inode->i_sb->s_blocksize -
-					EXT3_DIR_REC_LEN(1));
-	de->name_len = 2;
-	strcpy (de->name, "..");
-	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
-	set_nlink(inode, 2);
-	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, dir_block);
-	if (err)
-		goto out_clear_inode;
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	if (!err)
-		err = ext3_add_entry (handle, dentry, inode);
-
-	if (err) {
-out_clear_inode:
-		clear_nlink(inode);
-		unlock_new_inode(inode);
-		ext3_mark_inode_dirty(handle, inode);
-		iput (inode);
-		goto out_stop;
-	}
-	inc_nlink(dir);
-	ext3_update_dx_flag(dir);
-	err = ext3_mark_inode_dirty(handle, dir);
-	if (err)
-		goto out_clear_inode;
-
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
-out_stop:
-	brelse(dir_block);
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-/*
- * routine to check that the specified directory is empty (for rmdir)
- */
-static int empty_dir (struct inode * inode)
-{
-	unsigned long offset;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de, * de1;
-	struct super_block * sb;
-	int err = 0;
-
-	sb = inode->i_sb;
-	if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
-	    !(bh = ext3_dir_bread(NULL, inode, 0, 0, &err))) {
-		if (err)
-			ext3_error(inode->i_sb, __func__,
-				   "error %d reading directory #%lu offset 0",
-				   err, inode->i_ino);
-		else
-			ext3_warning(inode->i_sb, __func__,
-				     "bad directory (dir #%lu) - no data block",
-				     inode->i_ino);
-		return 1;
-	}
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	de1 = ext3_next_entry(de);
-	if (le32_to_cpu(de->inode) != inode->i_ino ||
-			!le32_to_cpu(de1->inode) ||
-			strcmp (".", de->name) ||
-			strcmp ("..", de1->name)) {
-		ext3_warning (inode->i_sb, "empty_dir",
-			      "bad directory (dir #%lu) - no `.' or `..'",
-			      inode->i_ino);
-		brelse (bh);
-		return 1;
-	}
-	offset = ext3_rec_len_from_disk(de->rec_len) +
-			ext3_rec_len_from_disk(de1->rec_len);
-	de = ext3_next_entry(de1);
-	while (offset < inode->i_size ) {
-		if (!bh ||
-			(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
-			err = 0;
-			brelse (bh);
-			if (!(bh = ext3_dir_bread (NULL, inode,
-				offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err))) {
-				if (err)
-					ext3_error(sb, __func__,
-						   "error %d reading directory"
-						   " #%lu offset %lu",
-						   err, inode->i_ino, offset);
-				offset += sb->s_blocksize;
-				continue;
-			}
-			de = (struct ext3_dir_entry_2 *) bh->b_data;
-		}
-		if (!ext3_check_dir_entry("empty_dir", inode, de, bh, offset)) {
-			de = (struct ext3_dir_entry_2 *)(bh->b_data +
-							 sb->s_blocksize);
-			offset = (offset | (sb->s_blocksize - 1)) + 1;
-			continue;
-		}
-		if (le32_to_cpu(de->inode)) {
-			brelse (bh);
-			return 0;
-		}
-		offset += ext3_rec_len_from_disk(de->rec_len);
-		de = ext3_next_entry(de);
-	}
-	brelse (bh);
-	return 1;
-}
-
-/* ext3_orphan_add() links an unlinked or truncated inode into a list of
- * such inodes, starting at the superblock, in case we crash before the
- * file is closed/deleted, or in case the inode truncate spans multiple
- * transactions and the last transaction is not recovered after a crash.
- *
- * At filesystem recovery time, we walk this list deleting unlinked
- * inodes and truncating linked inodes in ext3_orphan_cleanup().
- */
-int ext3_orphan_add(handle_t *handle, struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ext3_iloc iloc;
-	int err = 0, rc;
-
-	mutex_lock(&EXT3_SB(sb)->s_orphan_lock);
-	if (!list_empty(&EXT3_I(inode)->i_orphan))
-		goto out_unlock;
-
-	/* Orphan handling is only valid for files with data blocks
-	 * being truncated, or files being unlinked. */
-
-	/* @@@ FIXME: Observation from aviro:
-	 * I think I can trigger J_ASSERT in ext3_orphan_add().  We block
-	 * here (on s_orphan_lock), so race with ext3_link() which might bump
-	 * ->i_nlink. For, say it, character device. Not a regular file,
-	 * not a directory, not a symlink and ->i_nlink > 0.
-	 *
-	 * tytso, 4/25/2009: I'm not sure how that could happen;
-	 * shouldn't the fs core protect us from these sort of
-	 * unlink()/link() races?
-	 */
-	J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-		S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
-
-	BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-	if (err)
-		goto out_unlock;
-
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (err)
-		goto out_unlock;
-
-	/* Insert this inode at the head of the on-disk orphan list... */
-	NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
-	EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	if (!err)
-		err = rc;
-
-	/* Only add to the head of the in-memory list if all the
-	 * previous operations succeeded.  If the orphan_add is going to
-	 * fail (possibly taking the journal offline), we can't risk
-	 * leaving the inode on the orphan list: stray orphan-list
-	 * entries can cause panics at unmount time.
-	 *
-	 * This is safe: on error we're going to ignore the orphan list
-	 * anyway on the next recovery. */
-	if (!err)
-		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-
-	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
-	jbd_debug(4, "orphan inode %lu will point to %d\n",
-			inode->i_ino, NEXT_ORPHAN(inode));
-out_unlock:
-	mutex_unlock(&EXT3_SB(sb)->s_orphan_lock);
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * ext3_orphan_del() removes an unlinked or truncated inode from the list
- * of such inodes stored on disk, because it is finally being cleaned up.
- */
-int ext3_orphan_del(handle_t *handle, struct inode *inode)
-{
-	struct list_head *prev;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_sb_info *sbi;
-	unsigned long ino_next;
-	struct ext3_iloc iloc;
-	int err = 0;
-
-	mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
-	if (list_empty(&ei->i_orphan))
-		goto out;
-
-	ino_next = NEXT_ORPHAN(inode);
-	prev = ei->i_orphan.prev;
-	sbi = EXT3_SB(inode->i_sb);
-
-	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
-
-	list_del_init(&ei->i_orphan);
-
-	/* If we're on an error path, we may not have a valid
-	 * transaction handle with which to update the orphan list on
-	 * disk, but we still need to remove the inode from the linked
-	 * list in memory. */
-	if (!handle)
-		goto out;
-
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (err)
-		goto out_err;
-
-	if (prev == &sbi->s_orphan) {
-		jbd_debug(4, "superblock will point to %lu\n", ino_next);
-		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, sbi->s_sbh);
-		if (err)
-			goto out_brelse;
-		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-		err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
-	} else {
-		struct ext3_iloc iloc2;
-		struct inode *i_prev =
-			&list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode;
-
-		jbd_debug(4, "orphan inode %lu will point to %lu\n",
-			  i_prev->i_ino, ino_next);
-		err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
-		if (err)
-			goto out_brelse;
-		NEXT_ORPHAN(i_prev) = ino_next;
-		err = ext3_mark_iloc_dirty(handle, i_prev, &iloc2);
-	}
-	if (err)
-		goto out_brelse;
-	NEXT_ORPHAN(inode) = 0;
-	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-
-out_err:
-	ext3_std_error(inode->i_sb, err);
-out:
-	mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
-	return err;
-
-out_brelse:
-	brelse(iloc.bh);
-	goto out_err;
-}
-
-static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
-{
-	int retval;
-	struct inode * inode;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de;
-	handle_t *handle;
-
-	/* Initialize quotas before so that eventual writes go in
-	 * separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
-
-	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	retval = -ENOENT;
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	if (!bh)
-		goto end_rmdir;
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = d_inode(dentry);
-
-	retval = -EIO;
-	if (le32_to_cpu(de->inode) != inode->i_ino)
-		goto end_rmdir;
-
-	retval = -ENOTEMPTY;
-	if (!empty_dir (inode))
-		goto end_rmdir;
-
-	retval = ext3_delete_entry(handle, dir, de, bh);
-	if (retval)
-		goto end_rmdir;
-	if (inode->i_nlink != 2)
-		ext3_warning (inode->i_sb, "ext3_rmdir",
-			      "empty directory has nlink!=2 (%d)",
-			      inode->i_nlink);
-	inode->i_version++;
-	clear_nlink(inode);
-	/* There's no need to set i_disksize: the fact that i_nlink is
-	 * zero will ensure that the right thing happens during any
-	 * recovery. */
-	inode->i_size = 0;
-	ext3_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, inode);
-	drop_nlink(dir);
-	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
-
-end_rmdir:
-	ext3_journal_stop(handle);
-	brelse (bh);
-	return retval;
-}
-
-static int ext3_unlink(struct inode * dir, struct dentry *dentry)
-{
-	int retval;
-	struct inode * inode;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de;
-	handle_t *handle;
-
-	trace_ext3_unlink_enter(dir, dentry);
-	/* Initialize quotas before so that eventual writes go
-	 * in separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
-
-	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	retval = -ENOENT;
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	if (!bh)
-		goto end_unlink;
-
-	inode = d_inode(dentry);
-
-	retval = -EIO;
-	if (le32_to_cpu(de->inode) != inode->i_ino)
-		goto end_unlink;
-
-	if (!inode->i_nlink) {
-		ext3_warning (inode->i_sb, "ext3_unlink",
-			      "Deleting nonexistent file (%lu), %d",
-			      inode->i_ino, inode->i_nlink);
-		set_nlink(inode, 1);
-	}
-	retval = ext3_delete_entry(handle, dir, de, bh);
-	if (retval)
-		goto end_unlink;
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
-	drop_nlink(inode);
-	if (!inode->i_nlink)
-		ext3_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime;
-	ext3_mark_inode_dirty(handle, inode);
-	retval = 0;
-
-end_unlink:
-	ext3_journal_stop(handle);
-	brelse (bh);
-	trace_ext3_unlink_exit(dentry, retval);
-	return retval;
-}
-
-static int ext3_symlink (struct inode * dir,
-		struct dentry *dentry, const char * symname)
-{
-	handle_t *handle;
-	struct inode * inode;
-	int l, err, retries = 0;
-	int credits;
-
-	l = strlen(symname)+1;
-	if (l > dir->i_sb->s_blocksize)
-		return -ENAMETOOLONG;
-
-	dquot_initialize(dir);
-
-	if (l > EXT3_N_BLOCKS * 4) {
-		/*
-		 * For non-fast symlinks, we just allocate inode and put it on
-		 * orphan list in the first transaction => we need bitmap,
-		 * group descriptor, sb, inode block, quota blocks, and
-		 * possibly selinux xattr blocks.
-		 */
-		credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
-			  EXT3_XATTR_TRANS_BLOCKS;
-	} else {
-		/*
-		 * Fast symlink. We have to add entry to directory
-		 * (EXT3_DATA_TRANS_BLOCKS + EXT3_INDEX_EXTRA_TRANS_BLOCKS),
-		 * allocate new inode (bitmap, group descriptor, inode block,
-		 * quota blocks, sb is already counted in previous macros).
-		 */
-		credits = EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-			  EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-			  EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
-	}
-retry:
-	handle = ext3_journal_start(dir, credits);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFLNK|S_IRWXUGO);
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
-
-	if (l > EXT3_N_BLOCKS * 4) {
-		inode->i_op = &ext3_symlink_inode_operations;
-		ext3_set_aops(inode);
-		/*
-		 * We cannot call page_symlink() with transaction started
-		 * because it calls into ext3_write_begin() which acquires page
-		 * lock which ranks below transaction start (and it can also
-		 * wait for journal commit if we are running out of space). So
-		 * we have to stop transaction now and restart it when symlink
-		 * contents is written. 
-		 *
-		 * To keep fs consistent in case of crash, we have to put inode
-		 * to orphan list in the mean time.
-		 */
-		drop_nlink(inode);
-		err = ext3_orphan_add(handle, inode);
-		ext3_journal_stop(handle);
-		if (err)
-			goto err_drop_inode;
-		err = __page_symlink(inode, symname, l, 1);
-		if (err)
-			goto err_drop_inode;
-		/*
-		 * Now inode is being linked into dir (EXT3_DATA_TRANS_BLOCKS
-		 * + EXT3_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
-		 */
-		handle = ext3_journal_start(dir,
-				EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-				EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto err_drop_inode;
-		}
-		set_nlink(inode, 1);
-		err = ext3_orphan_del(handle, inode);
-		if (err) {
-			ext3_journal_stop(handle);
-			drop_nlink(inode);
-			goto err_drop_inode;
-		}
-	} else {
-		inode->i_op = &ext3_fast_symlink_inode_operations;
-		inode->i_link = (char*)&EXT3_I(inode)->i_data;
-		memcpy(inode->i_link, symname, l);
-		inode->i_size = l-1;
-	}
-	EXT3_I(inode)->i_disksize = inode->i_size;
-	err = ext3_add_nondir(handle, dentry, inode);
-out_stop:
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-err_drop_inode:
-	unlock_new_inode(inode);
-	iput(inode);
-	return err;
-}
-
-static int ext3_link (struct dentry * old_dentry,
-		struct inode * dir, struct dentry *dentry)
-{
-	handle_t *handle;
-	struct inode *inode = d_inode(old_dentry);
-	int err, retries = 0;
-
-	if (inode->i_nlink >= EXT3_LINK_MAX)
-		return -EMLINK;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode->i_ctime = CURRENT_TIME_SEC;
-	inc_nlink(inode);
-	ihold(inode);
-
-	err = ext3_add_entry(handle, dentry, inode);
-	if (!err) {
-		ext3_mark_inode_dirty(handle, inode);
-		/* this can happen only for tmpfile being
-		 * linked the first time
-		 */
-		if (inode->i_nlink == 1)
-			ext3_orphan_del(handle, inode);
-		d_instantiate(dentry, inode);
-	} else {
-		drop_nlink(inode);
-		iput(inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-#define PARENT_INO(buffer) \
-	(ext3_next_entry((struct ext3_dir_entry_2 *)(buffer))->inode)
-
-/*
- * Anybody can rename anything with this: the permission checks are left to the
- * higher-level routines.
- */
-static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
-			   struct inode * new_dir,struct dentry *new_dentry)
-{
-	handle_t *handle;
-	struct inode * old_inode, * new_inode;
-	struct buffer_head * old_bh, * new_bh, * dir_bh;
-	struct ext3_dir_entry_2 * old_de, * new_de;
-	int retval, flush_file = 0;
-
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
-
-	old_bh = new_bh = dir_bh = NULL;
-
-	/* Initialize quotas before so that eventual writes go
-	 * in separate transaction */
-	if (d_really_is_positive(new_dentry))
-		dquot_initialize(d_inode(new_dentry));
-	handle = ext3_journal_start(old_dir, 2 *
-					EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
-		handle->h_sync = 1;
-
-	old_bh = ext3_find_entry(old_dir, &old_dentry->d_name, &old_de);
-	/*
-	 *  Check for inode number is _not_ due to possible IO errors.
-	 *  We might rmdir the source, keep it as pwd of some process
-	 *  and merrily kill the link to whatever was created under the
-	 *  same name. Goodbye sticky bit ;-<
-	 */
-	old_inode = d_inode(old_dentry);
-	retval = -ENOENT;
-	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
-		goto end_rename;
-
-	new_inode = d_inode(new_dentry);
-	new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de);
-	if (new_bh) {
-		if (!new_inode) {
-			brelse (new_bh);
-			new_bh = NULL;
-		}
-	}
-	if (S_ISDIR(old_inode->i_mode)) {
-		if (new_inode) {
-			retval = -ENOTEMPTY;
-			if (!empty_dir (new_inode))
-				goto end_rename;
-		}
-		retval = -EIO;
-		dir_bh = ext3_dir_bread(handle, old_inode, 0, 0, &retval);
-		if (!dir_bh)
-			goto end_rename;
-		if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
-			goto end_rename;
-		retval = -EMLINK;
-		if (!new_inode && new_dir!=old_dir &&
-				new_dir->i_nlink >= EXT3_LINK_MAX)
-			goto end_rename;
-	}
-	if (!new_bh) {
-		retval = ext3_add_entry (handle, new_dentry, old_inode);
-		if (retval)
-			goto end_rename;
-	} else {
-		BUFFER_TRACE(new_bh, "get write access");
-		retval = ext3_journal_get_write_access(handle, new_bh);
-		if (retval)
-			goto journal_error;
-		new_de->inode = cpu_to_le32(old_inode->i_ino);
-		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
-					      EXT3_FEATURE_INCOMPAT_FILETYPE))
-			new_de->file_type = old_de->file_type;
-		new_dir->i_version++;
-		new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
-		ext3_mark_inode_dirty(handle, new_dir);
-		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
-		retval = ext3_journal_dirty_metadata(handle, new_bh);
-		if (retval)
-			goto journal_error;
-		brelse(new_bh);
-		new_bh = NULL;
-	}
-
-	/*
-	 * Like most other Unix systems, set the ctime for inodes on a
-	 * rename.
-	 */
-	old_inode->i_ctime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, old_inode);
-
-	/*
-	 * ok, that's it
-	 */
-	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
-	    old_de->name_len != old_dentry->d_name.len ||
-	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
-	    (retval = ext3_delete_entry(handle, old_dir,
-					old_de, old_bh)) == -ENOENT) {
-		/* old_de could have moved from under us during htree split, so
-		 * make sure that we are deleting the right entry.  We might
-		 * also be pointing to a stale entry in the unused part of
-		 * old_bh so just checking inum and the name isn't enough. */
-		struct buffer_head *old_bh2;
-		struct ext3_dir_entry_2 *old_de2;
-
-		old_bh2 = ext3_find_entry(old_dir, &old_dentry->d_name,
-					  &old_de2);
-		if (old_bh2) {
-			retval = ext3_delete_entry(handle, old_dir,
-						   old_de2, old_bh2);
-			brelse(old_bh2);
-		}
-	}
-	if (retval) {
-		ext3_warning(old_dir->i_sb, "ext3_rename",
-				"Deleting old file (%lu), %d, error=%d",
-				old_dir->i_ino, old_dir->i_nlink, retval);
-	}
-
-	if (new_inode) {
-		drop_nlink(new_inode);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
-	}
-	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(old_dir);
-	if (dir_bh) {
-		BUFFER_TRACE(dir_bh, "get_write_access");
-		retval = ext3_journal_get_write_access(handle, dir_bh);
-		if (retval)
-			goto journal_error;
-		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
-		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
-		retval = ext3_journal_dirty_metadata(handle, dir_bh);
-		if (retval) {
-journal_error:
-			ext3_std_error(new_dir->i_sb, retval);
-			goto end_rename;
-		}
-		drop_nlink(old_dir);
-		if (new_inode) {
-			drop_nlink(new_inode);
-		} else {
-			inc_nlink(new_dir);
-			ext3_update_dx_flag(new_dir);
-			ext3_mark_inode_dirty(handle, new_dir);
-		}
-	}
-	ext3_mark_inode_dirty(handle, old_dir);
-	if (new_inode) {
-		ext3_mark_inode_dirty(handle, new_inode);
-		if (!new_inode->i_nlink)
-			ext3_orphan_add(handle, new_inode);
-		if (ext3_should_writeback_data(new_inode))
-			flush_file = 1;
-	}
-	retval = 0;
-
-end_rename:
-	brelse (dir_bh);
-	brelse (old_bh);
-	brelse (new_bh);
-	ext3_journal_stop(handle);
-	if (retval == 0 && flush_file)
-		filemap_flush(old_inode->i_mapping);
-	return retval;
-}
-
-/*
- * directories can handle most operations...
- */
-const struct inode_operations ext3_dir_inode_operations = {
-	.create		= ext3_create,
-	.lookup		= ext3_lookup,
-	.link		= ext3_link,
-	.unlink		= ext3_unlink,
-	.symlink	= ext3_symlink,
-	.mkdir		= ext3_mkdir,
-	.rmdir		= ext3_rmdir,
-	.mknod		= ext3_mknod,
-	.tmpfile	= ext3_tmpfile,
-	.rename		= ext3_rename,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-};
-
-const struct inode_operations ext3_special_inode_operations = {
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-};
diff --git a/fs/ext3/namei.h b/fs/ext3/namei.h
deleted file mode 100644
index 46304d8..0000000
--- a/fs/ext3/namei.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*  linux/fs/ext3/namei.h
- *
- * Copyright (C) 2005 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
-*/
-
-extern struct dentry *ext3_get_parent(struct dentry *child);
-
-static inline struct buffer_head *ext3_dir_bread(handle_t *handle,
-						 struct inode *inode,
-						 int block, int create,
-						 int *err)
-{
-	struct buffer_head *bh;
-
-	bh = ext3_bread(handle, inode, block, create, err);
-
-	if (!bh && !(*err)) {
-		*err = -EIO;
-		ext3_error(inode->i_sb, __func__,
-			   "Directory hole detected on inode %lu\n",
-			   inode->i_ino);
-		return NULL;
-	}
-	return bh;
-}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
deleted file mode 100644
index 2710565..0000000
--- a/fs/ext3/resize.c
+++ /dev/null
@@ -1,1117 +0,0 @@
-/*
- *  linux/fs/ext3/resize.c
- *
- * Support for resizing an ext3 filesystem while it is mounted.
- *
- * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
- *
- * This could probably be made into a module, because it is not often in use.
- */
-
-
-#define EXT3FS_DEBUG
-
-#include "ext3.h"
-
-
-#define outside(b, first, last)	((b) < (first) || (b) >= (last))
-#define inside(b, first, last)	((b) >= (first) && (b) < (last))
-
-static int verify_group_input(struct super_block *sb,
-			      struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	ext3_fsblk_t start = le32_to_cpu(es->s_blocks_count);
-	ext3_fsblk_t end = start + input->blocks_count;
-	unsigned group = input->group;
-	ext3_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
-	unsigned overhead = ext3_bg_has_super(sb, group) ?
-		(1 + ext3_bg_num_gdb(sb, group) +
-		 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
-	ext3_fsblk_t metaend = start + overhead;
-	struct buffer_head *bh = NULL;
-	ext3_grpblk_t free_blocks_count;
-	int err = -EINVAL;
-
-	input->free_blocks_count = free_blocks_count =
-		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks "
-		       "(%d free, %u reserved)\n",
-		       ext3_bg_has_super(sb, input->group) ? "normal" :
-		       "no-super", input->group, input->blocks_count,
-		       free_blocks_count, input->reserved_blocks);
-
-	if (group != sbi->s_groups_count)
-		ext3_warning(sb, __func__,
-			     "Cannot add at group %u (only %lu groups)",
-			     input->group, sbi->s_groups_count);
-	else if ((start - le32_to_cpu(es->s_first_data_block)) %
-		 EXT3_BLOCKS_PER_GROUP(sb))
-		ext3_warning(sb, __func__, "Last group not full");
-	else if (input->reserved_blocks > input->blocks_count / 5)
-		ext3_warning(sb, __func__, "Reserved blocks too high (%u)",
-			     input->reserved_blocks);
-	else if (free_blocks_count < 0)
-		ext3_warning(sb, __func__, "Bad blocks count %u",
-			     input->blocks_count);
-	else if (!(bh = sb_bread(sb, end - 1)))
-		ext3_warning(sb, __func__,
-			     "Cannot read last block ("E3FSBLK")",
-			     end - 1);
-	else if (outside(input->block_bitmap, start, end))
-		ext3_warning(sb, __func__,
-			     "Block bitmap not in group (block %u)",
-			     input->block_bitmap);
-	else if (outside(input->inode_bitmap, start, end))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap not in group (block %u)",
-			     input->inode_bitmap);
-	else if (outside(input->inode_table, start, end) ||
-	         outside(itend - 1, start, end))
-		ext3_warning(sb, __func__,
-			     "Inode table not in group (blocks %u-"E3FSBLK")",
-			     input->inode_table, itend - 1);
-	else if (input->inode_bitmap == input->block_bitmap)
-		ext3_warning(sb, __func__,
-			     "Block bitmap same as inode bitmap (%u)",
-			     input->block_bitmap);
-	else if (inside(input->block_bitmap, input->inode_table, itend))
-		ext3_warning(sb, __func__,
-			     "Block bitmap (%u) in inode table (%u-"E3FSBLK")",
-			     input->block_bitmap, input->inode_table, itend-1);
-	else if (inside(input->inode_bitmap, input->inode_table, itend))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
-			     input->inode_bitmap, input->inode_table, itend-1);
-	else if (inside(input->block_bitmap, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Block bitmap (%u) in GDT table"
-			     " ("E3FSBLK"-"E3FSBLK")",
-			     input->block_bitmap, start, metaend - 1);
-	else if (inside(input->inode_bitmap, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap (%u) in GDT table"
-			     " ("E3FSBLK"-"E3FSBLK")",
-			     input->inode_bitmap, start, metaend - 1);
-	else if (inside(input->inode_table, start, metaend) ||
-	         inside(itend - 1, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Inode table (%u-"E3FSBLK") overlaps"
-			     "GDT table ("E3FSBLK"-"E3FSBLK")",
-			     input->inode_table, itend - 1, start, metaend - 1);
-	else
-		err = 0;
-	brelse(bh);
-
-	return err;
-}
-
-static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
-				  ext3_fsblk_t blk)
-{
-	struct buffer_head *bh;
-	int err;
-
-	bh = sb_getblk(sb, blk);
-	if (unlikely(!bh))
-		return ERR_PTR(-ENOMEM);
-	if ((err = ext3_journal_get_write_access(handle, bh))) {
-		brelse(bh);
-		bh = ERR_PTR(err);
-	} else {
-		lock_buffer(bh);
-		memset(bh->b_data, 0, sb->s_blocksize);
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-	}
-
-	return bh;
-}
-
-/*
- * To avoid calling the atomic setbit hundreds or thousands of times, we only
- * need to use it within a single byte (to ensure we get endianness right).
- * We can use memset for the rest of the bitmap as there are no other users.
- */
-static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
-{
-	int i;
-
-	if (start_bit >= end_bit)
-		return;
-
-	ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
-	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
-		ext3_set_bit(i, bitmap);
-	if (i < end_bit)
-		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
-}
-
-/*
- * If we have fewer than thresh credits, extend by EXT3_MAX_TRANS_DATA.
- * If that fails, restart the transaction & regain write access for the
- * buffer head which is used for block_bitmap modifications.
- */
-static int extend_or_restart_transaction(handle_t *handle, int thresh,
-					 struct buffer_head *bh)
-{
-	int err;
-
-	if (handle->h_buffer_credits >= thresh)
-		return 0;
-
-	err = ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA);
-	if (err < 0)
-		return err;
-	if (err) {
-		err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA);
-		if (err)
-			return err;
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-/*
- * Set up the block and inode bitmaps, and the inode table for the new group.
- * This doesn't need to be part of the main transaction, since we are only
- * changing blocks outside the actual filesystem.  We still do journaling to
- * ensure the recovery is correct in case of a failure just after resize.
- * If any part of this fails, we simply abort the resize.
- */
-static int setup_new_group_blocks(struct super_block *sb,
-				  struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	ext3_fsblk_t start = ext3_group_first_block_no(sb, input->group);
-	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
-		le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
-	unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
-	struct buffer_head *bh;
-	handle_t *handle;
-	ext3_fsblk_t block;
-	ext3_grpblk_t bit;
-	int i;
-	int err = 0, err2;
-
-	/* This transaction may be extended/restarted along the way */
-	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
-
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	mutex_lock(&sbi->s_resize_lock);
-	if (input->group != sbi->s_groups_count) {
-		err = -EBUSY;
-		goto exit_journal;
-	}
-
-	if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
-		err = PTR_ERR(bh);
-		goto exit_journal;
-	}
-
-	if (ext3_bg_has_super(sb, input->group)) {
-		ext3_debug("mark backup superblock %#04lx (+0)\n", start);
-		ext3_set_bit(0, bh->b_data);
-	}
-
-	/* Copy all of the GDT blocks into the backup in this group */
-	for (i = 0, bit = 1, block = start + 1;
-	     i < gdblocks; i++, block++, bit++) {
-		struct buffer_head *gdb;
-
-		ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		gdb = sb_getblk(sb, block);
-		if (unlikely(!gdb)) {
-			err = -ENOMEM;
-			goto exit_bh;
-		}
-		if ((err = ext3_journal_get_write_access(handle, gdb))) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		lock_buffer(gdb);
-		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
-		set_buffer_uptodate(gdb);
-		unlock_buffer(gdb);
-		err = ext3_journal_dirty_metadata(handle, gdb);
-		if (err) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		ext3_set_bit(bit, bh->b_data);
-		brelse(gdb);
-	}
-
-	/* Zero out all of the reserved backup group descriptor table blocks */
-	for (i = 0, bit = gdblocks + 1, block = start + bit;
-	     i < reserved_gdb; i++, block++, bit++) {
-		struct buffer_head *gdb;
-
-		ext3_debug("clear reserved block %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		if (IS_ERR(gdb = bclean(handle, sb, block))) {
-			err = PTR_ERR(gdb);
-			goto exit_bh;
-		}
-		err = ext3_journal_dirty_metadata(handle, gdb);
-		if (err) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		ext3_set_bit(bit, bh->b_data);
-		brelse(gdb);
-	}
-	ext3_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
-		   input->block_bitmap - start);
-	ext3_set_bit(input->block_bitmap - start, bh->b_data);
-	ext3_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
-		   input->inode_bitmap - start);
-	ext3_set_bit(input->inode_bitmap - start, bh->b_data);
-
-	/* Zero out all of the inode table blocks */
-	for (i = 0, block = input->inode_table, bit = block - start;
-	     i < sbi->s_itb_per_group; i++, bit++, block++) {
-		struct buffer_head *it;
-
-		ext3_debug("clear inode block %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		if (IS_ERR(it = bclean(handle, sb, block))) {
-			err = PTR_ERR(it);
-			goto exit_bh;
-		}
-		err = ext3_journal_dirty_metadata(handle, it);
-		if (err) {
-			brelse(it);
-			goto exit_bh;
-		}
-		brelse(it);
-		ext3_set_bit(bit, bh->b_data);
-	}
-
-	err = extend_or_restart_transaction(handle, 2, bh);
-	if (err)
-		goto exit_bh;
-
-	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	err = ext3_journal_dirty_metadata(handle, bh);
-	if (err)
-		goto exit_bh;
-	brelse(bh);
-
-	/* Mark unused entries in inode bitmap used */
-	ext3_debug("clear inode bitmap %#04x (+%ld)\n",
-		   input->inode_bitmap, input->inode_bitmap - start);
-	if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
-		err = PTR_ERR(bh);
-		goto exit_journal;
-	}
-
-	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	err = ext3_journal_dirty_metadata(handle, bh);
-exit_bh:
-	brelse(bh);
-
-exit_journal:
-	mutex_unlock(&sbi->s_resize_lock);
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-
-	return err;
-}
-
-/*
- * Iterate through the groups which hold BACKUP superblock/GDT copies in an
- * ext3 filesystem.  The counters should be initialized to 1, 5, and 7 before
- * calling this for the first time.  In a sparse filesystem it will be the
- * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
- * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
- */
-static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
-				  unsigned *five, unsigned *seven)
-{
-	unsigned *min = three;
-	int mult = 3;
-	unsigned ret;
-
-	if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-		ret = *min;
-		*min += 1;
-		return ret;
-	}
-
-	if (*five < *min) {
-		min = five;
-		mult = 5;
-	}
-	if (*seven < *min) {
-		min = seven;
-		mult = 7;
-	}
-
-	ret = *min;
-	*min *= mult;
-
-	return ret;
-}
-
-/*
- * Check that all of the backup GDT blocks are held in the primary GDT block.
- * It is assumed that they are stored in group order.  Returns the number of
- * groups in current filesystem that have BACKUPS, or -ve error code.
- */
-static int verify_reserved_gdb(struct super_block *sb,
-			       struct buffer_head *primary)
-{
-	const ext3_fsblk_t blk = primary->b_blocknr;
-	const unsigned long end = EXT3_SB(sb)->s_groups_count;
-	unsigned three = 1;
-	unsigned five = 5;
-	unsigned seven = 7;
-	unsigned grp;
-	__le32 *p = (__le32 *)primary->b_data;
-	int gdbackups = 0;
-
-	while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
-		if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
-			ext3_warning(sb, __func__,
-				     "reserved GDT "E3FSBLK
-				     " missing grp %d ("E3FSBLK")",
-				     blk, grp,
-				     grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
-			return -EINVAL;
-		}
-		if (++gdbackups > EXT3_ADDR_PER_BLOCK(sb))
-			return -EFBIG;
-	}
-
-	return gdbackups;
-}
-
-/*
- * Called when we need to bring a reserved group descriptor table block into
- * use from the resize inode.  The primary copy of the new GDT block currently
- * is an indirect block (under the double indirect block in the resize inode).
- * The new backup GDT blocks will be stored as leaf blocks in this indirect
- * block, in group order.  Even though we know all the block numbers we need,
- * we check to ensure that the resize inode has actually reserved these blocks.
- *
- * Don't need to update the block bitmaps because the blocks are still in use.
- *
- * We get all of the error cases out of the way, so that we are sure to not
- * fail once we start modifying the data on disk, because JBD has no rollback.
- */
-static int add_new_gdb(handle_t *handle, struct inode *inode,
-		       struct ext3_new_group_data *input,
-		       struct buffer_head **primary)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-	unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
-	ext3_fsblk_t gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
-	struct buffer_head **o_group_desc, **n_group_desc;
-	struct buffer_head *dind;
-	int gdbackups;
-	struct ext3_iloc iloc;
-	__le32 *data;
-	int err;
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG
-		       "EXT3-fs: ext3_add_new_gdb: adding group block %lu\n",
-		       gdb_num);
-
-	/*
-	 * If we are not using the primary superblock/GDT copy don't resize,
-	 * because the user tools have no way of handling this.  Probably a
-	 * bad time to do it anyways.
-	 */
-	if (EXT3_SB(sb)->s_sbh->b_blocknr !=
-	    le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
-		ext3_warning(sb, __func__,
-			"won't resize using backup superblock at %llu",
-			(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
-		return -EPERM;
-	}
-
-	*primary = sb_bread(sb, gdblock);
-	if (!*primary)
-		return -EIO;
-
-	if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
-		err = gdbackups;
-		goto exit_bh;
-	}
-
-	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_bh;
-	}
-
-	data = (__le32 *)dind->b_data;
-	if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
-		ext3_warning(sb, __func__,
-			     "new group %u GDT block "E3FSBLK" not reserved",
-			     input->group, gdblock);
-		err = -EINVAL;
-		goto exit_dind;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh)))
-		goto exit_dind;
-
-	if ((err = ext3_journal_get_write_access(handle, *primary)))
-		goto exit_sbh;
-
-	if ((err = ext3_journal_get_write_access(handle, dind)))
-		goto exit_primary;
-
-	/* ext3_reserve_inode_write() gets a reference on the iloc */
-	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
-		goto exit_dindj;
-
-	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
-			GFP_NOFS);
-	if (!n_group_desc) {
-		err = -ENOMEM;
-		ext3_warning (sb, __func__,
-			      "not enough memory for %lu groups", gdb_num + 1);
-		goto exit_inode;
-	}
-
-	/*
-	 * Finally, we have all of the possible failures behind us...
-	 *
-	 * Remove new GDT block from inode double-indirect block and clear out
-	 * the new GDT block for use (which also "frees" the backup GDT blocks
-	 * from the reserved inode).  We don't need to change the bitmaps for
-	 * these blocks, because they are marked as in-use from being in the
-	 * reserved inode, and will become GDT blocks (primary and backup).
-	 */
-	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
-	err = ext3_journal_dirty_metadata(handle, dind);
-	if (err)
-		goto exit_group_desc;
-	brelse(dind);
-	dind = NULL;
-	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
-	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	if (err)
-		goto exit_group_desc;
-	memset((*primary)->b_data, 0, sb->s_blocksize);
-	err = ext3_journal_dirty_metadata(handle, *primary);
-	if (err)
-		goto exit_group_desc;
-
-	o_group_desc = EXT3_SB(sb)->s_group_desc;
-	memcpy(n_group_desc, o_group_desc,
-	       EXT3_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
-	n_group_desc[gdb_num] = *primary;
-	EXT3_SB(sb)->s_group_desc = n_group_desc;
-	EXT3_SB(sb)->s_gdb_count++;
-	kfree(o_group_desc);
-
-	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	if (err)
-		goto exit_inode;
-
-	return 0;
-
-exit_group_desc:
-	kfree(n_group_desc);
-exit_inode:
-	//ext3_journal_release_buffer(handle, iloc.bh);
-	brelse(iloc.bh);
-exit_dindj:
-	//ext3_journal_release_buffer(handle, dind);
-exit_primary:
-	//ext3_journal_release_buffer(handle, *primary);
-exit_sbh:
-	//ext3_journal_release_buffer(handle, *primary);
-exit_dind:
-	brelse(dind);
-exit_bh:
-	brelse(*primary);
-
-	ext3_debug("leaving with error %d\n", err);
-	return err;
-}
-
-/*
- * Called when we are adding a new group which has a backup copy of each of
- * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
- * We need to add these reserved backup GDT blocks to the resize inode, so
- * that they are kept for future resizing and not allocated to files.
- *
- * Each reserved backup GDT block will go into a different indirect block.
- * The indirect blocks are actually the primary reserved GDT blocks,
- * so we know in advance what their block numbers are.  We only get the
- * double-indirect block to verify it is pointing to the primary reserved
- * GDT blocks so we don't overwrite a data block by accident.  The reserved
- * backup GDT blocks are stored in their reserved primary GDT block.
- */
-static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
-			      struct ext3_new_group_data *input)
-{
-	struct super_block *sb = inode->i_sb;
-	int reserved_gdb =le16_to_cpu(EXT3_SB(sb)->s_es->s_reserved_gdt_blocks);
-	struct buffer_head **primary;
-	struct buffer_head *dind;
-	struct ext3_iloc iloc;
-	ext3_fsblk_t blk;
-	__le32 *data, *end;
-	int gdbackups = 0;
-	int res, i;
-	int err;
-
-	primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
-	if (!primary)
-		return -ENOMEM;
-
-	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_free;
-	}
-
-	blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
-	data = (__le32 *)dind->b_data + (EXT3_SB(sb)->s_gdb_count %
-					 EXT3_ADDR_PER_BLOCK(sb));
-	end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
-
-	/* Get each reserved primary GDT block and verify it holds backups */
-	for (res = 0; res < reserved_gdb; res++, blk++) {
-		if (le32_to_cpu(*data) != blk) {
-			ext3_warning(sb, __func__,
-				     "reserved block "E3FSBLK
-				     " not at offset %ld",
-				     blk,
-				     (long)(data - (__le32 *)dind->b_data));
-			err = -EINVAL;
-			goto exit_bh;
-		}
-		primary[res] = sb_bread(sb, blk);
-		if (!primary[res]) {
-			err = -EIO;
-			goto exit_bh;
-		}
-		if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
-			brelse(primary[res]);
-			err = gdbackups;
-			goto exit_bh;
-		}
-		if (++data >= end)
-			data = (__le32 *)dind->b_data;
-	}
-
-	for (i = 0; i < reserved_gdb; i++) {
-		if ((err = ext3_journal_get_write_access(handle, primary[i]))) {
-			/*
-			int j;
-			for (j = 0; j < i; j++)
-				ext3_journal_release_buffer(handle, primary[j]);
-			 */
-			goto exit_bh;
-		}
-	}
-
-	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
-		goto exit_bh;
-
-	/*
-	 * Finally we can add each of the reserved backup GDT blocks from
-	 * the new group to its reserved primary GDT block.
-	 */
-	blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
-	for (i = 0; i < reserved_gdb; i++) {
-		int err2;
-		data = (__le32 *)primary[i]->b_data;
-		/* printk("reserving backup %lu[%u] = %lu\n",
-		       primary[i]->b_blocknr, gdbackups,
-		       blk + primary[i]->b_blocknr); */
-		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
-		err2 = ext3_journal_dirty_metadata(handle, primary[i]);
-		if (!err)
-			err = err2;
-	}
-	inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
-	ext3_mark_iloc_dirty(handle, inode, &iloc);
-
-exit_bh:
-	while (--res >= 0)
-		brelse(primary[res]);
-	brelse(dind);
-
-exit_free:
-	kfree(primary);
-
-	return err;
-}
-
-/*
- * Update the backup copies of the ext3 metadata.  These don't need to be part
- * of the main resize transaction, because e2fsck will re-write them if there
- * is a problem (basically only OOM will cause a problem).  However, we
- * _should_ update the backups if possible, in case the primary gets trashed
- * for some reason and we need to run e2fsck from a backup superblock.  The
- * important part is that the new block and inode counts are in the backup
- * superblocks, and the location of the new group metadata in the GDT backups.
- *
- * We do not need take the s_resize_lock for this, because these
- * blocks are not otherwise touched by the filesystem code when it is
- * mounted.  We don't need to worry about last changing from
- * sbi->s_groups_count, because the worst that can happen is that we
- * do not copy the full number of backups at this time.  The resize
- * which changed s_groups_count will backup again.
- */
-static void update_backups(struct super_block *sb,
-			   int blk_off, char *data, int size)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	const unsigned long last = sbi->s_groups_count;
-	const int bpg = EXT3_BLOCKS_PER_GROUP(sb);
-	unsigned three = 1;
-	unsigned five = 5;
-	unsigned seven = 7;
-	unsigned group;
-	int rest = sb->s_blocksize - size;
-	handle_t *handle;
-	int err = 0, err2;
-
-	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
-	if (IS_ERR(handle)) {
-		group = 1;
-		err = PTR_ERR(handle);
-		goto exit_err;
-	}
-
-	while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) {
-		struct buffer_head *bh;
-
-		/* Out of journal space, and can't get more - abort - so sad */
-		if (handle->h_buffer_credits == 0 &&
-		    ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA) &&
-		    (err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA)))
-			break;
-
-		bh = sb_getblk(sb, group * bpg + blk_off);
-		if (unlikely(!bh)) {
-			err = -ENOMEM;
-			break;
-		}
-		ext3_debug("update metadata backup %#04lx\n",
-			  (unsigned long)bh->b_blocknr);
-		if ((err = ext3_journal_get_write_access(handle, bh))) {
-			brelse(bh);
-			break;
-		}
-		lock_buffer(bh);
-		memcpy(bh->b_data, data, size);
-		if (rest)
-			memset(bh->b_data + size, 0, rest);
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		err = ext3_journal_dirty_metadata(handle, bh);
-		brelse(bh);
-		if (err)
-			break;
-	}
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-
-	/*
-	 * Ugh! Need to have e2fsck write the backup copies.  It is too
-	 * late to revert the resize, we shouldn't fail just because of
-	 * the backup copies (they are only needed in case of corruption).
-	 *
-	 * However, if we got here we have a journal problem too, so we
-	 * can't really start a transaction to mark the superblock.
-	 * Chicken out and just set the flag on the hope it will be written
-	 * to disk, and if not - we will simply wait until next fsck.
-	 */
-exit_err:
-	if (err) {
-		ext3_warning(sb, __func__,
-			     "can't update backup for group %d (err %d), "
-			     "forcing fsck on next reboot", group, err);
-		sbi->s_mount_state &= ~EXT3_VALID_FS;
-		sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
-		mark_buffer_dirty(sbi->s_sbh);
-	}
-}
-
-/* Add group descriptor data to an existing or new group descriptor block.
- * Ensure we handle all possible error conditions _before_ we start modifying
- * the filesystem, because we cannot abort the transaction and not have it
- * write the data to disk.
- *
- * If we are on a GDT block boundary, we need to get the reserved GDT block.
- * Otherwise, we may need to add backup GDT blocks for a sparse group.
- *
- * We only need to hold the superblock lock while we are actually adding
- * in the new group's counts to the superblock.  Prior to that we have
- * not really "added" the group at all.  We re-check that we are still
- * adding in the last group in case things have changed since verifying.
- */
-int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
-		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
-	struct buffer_head *primary = NULL;
-	struct ext3_group_desc *gdp;
-	struct inode *inode = NULL;
-	handle_t *handle;
-	int gdb_off, gdb_num;
-	int err, err2;
-
-	gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
-	gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb);
-
-	if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-		ext3_warning(sb, __func__,
-			     "Can't resize non-sparse filesystem further");
-		return -EPERM;
-	}
-
-	if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
-	    le32_to_cpu(es->s_blocks_count)) {
-		ext3_warning(sb, __func__, "blocks_count overflow\n");
-		return -EINVAL;
-	}
-
-	if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
-	    le32_to_cpu(es->s_inodes_count)) {
-		ext3_warning(sb, __func__, "inodes_count overflow\n");
-		return -EINVAL;
-	}
-
-	if (reserved_gdb || gdb_off == 0) {
-		if (!EXT3_HAS_COMPAT_FEATURE(sb,
-					     EXT3_FEATURE_COMPAT_RESIZE_INODE)
-		    || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
-			ext3_warning(sb, __func__,
-				     "No reserved GDT blocks, can't resize");
-			return -EPERM;
-		}
-		inode = ext3_iget(sb, EXT3_RESIZE_INO);
-		if (IS_ERR(inode)) {
-			ext3_warning(sb, __func__,
-				     "Error opening resize inode");
-			return PTR_ERR(inode);
-		}
-	}
-
-	if ((err = verify_group_input(sb, input)))
-		goto exit_put;
-
-	if ((err = setup_new_group_blocks(sb, input)))
-		goto exit_put;
-
-	/*
-	 * We will always be modifying at least the superblock and a GDT
-	 * block.  If we are adding a group past the last current GDT block,
-	 * we will also modify the inode and the dindirect block.  If we
-	 * are adding a group with superblock/GDT backups  we will also
-	 * modify each of the reserved GDT dindirect blocks.
-	 */
-	handle = ext3_journal_start_sb(sb,
-				       ext3_bg_has_super(sb, input->group) ?
-				       3 + reserved_gdb : 4);
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		goto exit_put;
-	}
-
-	mutex_lock(&sbi->s_resize_lock);
-	if (input->group != sbi->s_groups_count) {
-		ext3_warning(sb, __func__,
-			     "multiple resizers run on filesystem!");
-		err = -EBUSY;
-		goto exit_journal;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle, sbi->s_sbh)))
-		goto exit_journal;
-
-	/*
-	 * We will only either add reserved group blocks to a backup group
-	 * or remove reserved blocks for the first group in a new group block.
-	 * Doing both would be mean more complex code, and sane people don't
-	 * use non-sparse filesystems anymore.  This is already checked above.
-	 */
-	if (gdb_off) {
-		primary = sbi->s_group_desc[gdb_num];
-		if ((err = ext3_journal_get_write_access(handle, primary)))
-			goto exit_journal;
-
-		if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) &&
-		    (err = reserve_backup_gdb(handle, inode, input)))
-			goto exit_journal;
-	} else if ((err = add_new_gdb(handle, inode, input, &primary)))
-		goto exit_journal;
-
-	/*
-	 * OK, now we've set up the new group.  Time to make it active.
-	 *
-	 * We do not lock all allocations via s_resize_lock
-	 * so we have to be safe wrt. concurrent accesses the group
-	 * data.  So we need to be careful to set all of the relevant
-	 * group descriptor data etc. *before* we enable the group.
-	 *
-	 * The key field here is sbi->s_groups_count: as long as
-	 * that retains its old value, nobody is going to access the new
-	 * group.
-	 *
-	 * So first we update all the descriptor metadata for the new
-	 * group; then we update the total disk blocks count; then we
-	 * update the groups count to enable the group; then finally we
-	 * update the free space counts so that the system can start
-	 * using the new disk blocks.
-	 */
-
-	/* Update group descriptor block for new group */
-	gdp = (struct ext3_group_desc *)primary->b_data + gdb_off;
-
-	gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
-	gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
-	gdp->bg_inode_table = cpu_to_le32(input->inode_table);
-	gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
-	gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
-
-	/*
-	 * Make the new blocks and inodes valid next.  We do this before
-	 * increasing the group count so that once the group is enabled,
-	 * all of its blocks and inodes are already valid.
-	 *
-	 * We always allocate group-by-group, then block-by-block or
-	 * inode-by-inode within a group, so enabling these
-	 * blocks/inodes before the group is live won't actually let us
-	 * allocate the new space yet.
-	 */
-	le32_add_cpu(&es->s_blocks_count, input->blocks_count);
-	le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
-
-	/*
-	 * We need to protect s_groups_count against other CPUs seeing
-	 * inconsistent state in the superblock.
-	 *
-	 * The precise rules we use are:
-	 *
-	 * * Writers of s_groups_count *must* hold s_resize_lock
-	 * AND
-	 * * Writers must perform a smp_wmb() after updating all dependent
-	 *   data and before modifying the groups count
-	 *
-	 * * Readers must hold s_resize_lock over the access
-	 * OR
-	 * * Readers must perform an smp_rmb() after reading the groups count
-	 *   and before reading any dependent data.
-	 *
-	 * NB. These rules can be relaxed when checking the group count
-	 * while freeing data, as we can only allocate from a block
-	 * group after serialising against the group count, and we can
-	 * only then free after serialising in turn against that
-	 * allocation.
-	 */
-	smp_wmb();
-
-	/* Update the global fs size fields */
-	sbi->s_groups_count++;
-
-	err = ext3_journal_dirty_metadata(handle, primary);
-	if (err)
-		goto exit_journal;
-
-	/* Update the reserved block counts only once the new group is
-	 * active. */
-	le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
-
-	/* Update the free space counts */
-	percpu_counter_add(&sbi->s_freeblocks_counter,
-			   input->free_blocks_count);
-	percpu_counter_add(&sbi->s_freeinodes_counter,
-			   EXT3_INODES_PER_GROUP(sb));
-
-	err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
-
-exit_journal:
-	mutex_unlock(&sbi->s_resize_lock);
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-	if (!err) {
-		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
-			       sizeof(struct ext3_super_block));
-		update_backups(sb, primary->b_blocknr, primary->b_data,
-			       primary->b_size);
-	}
-exit_put:
-	iput(inode);
-	return err;
-} /* ext3_group_add */
-
-/* Extend the filesystem to the new number of blocks specified.  This entry
- * point is only used to extend the current filesystem to the end of the last
- * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
- * for emergencies (because it has no dependencies on reserved blocks).
- *
- * If we _really_ wanted, we could use default values to call ext3_group_add()
- * allow the "remount" trick to work for arbitrary resizing, assuming enough
- * GDT blocks are reserved to grow to the desired size.
- */
-int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
-		      ext3_fsblk_t n_blocks_count)
-{
-	ext3_fsblk_t o_blocks_count;
-	ext3_grpblk_t last;
-	ext3_grpblk_t add;
-	struct buffer_head * bh;
-	handle_t *handle;
-	int err;
-	unsigned long freed_blocks;
-
-	/* We don't need to worry about locking wrt other resizers just
-	 * yet: we're going to revalidate es->s_blocks_count after
-	 * taking the s_resize_lock below. */
-	o_blocks_count = le32_to_cpu(es->s_blocks_count);
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
-		       " up to "E3FSBLK" blocks\n",
-		       o_blocks_count, n_blocks_count);
-
-	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
-		return 0;
-
-	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-		printk(KERN_ERR "EXT3-fs: filesystem on %s:"
-			" too large to resize to "E3FSBLK" blocks safely\n",
-			sb->s_id, n_blocks_count);
-		if (sizeof(sector_t) < 8)
-			ext3_warning(sb, __func__,
-			"CONFIG_LBDAF not enabled\n");
-		return -EINVAL;
-	}
-
-	if (n_blocks_count < o_blocks_count) {
-		ext3_warning(sb, __func__,
-			     "can't shrink FS - resize aborted");
-		return -EBUSY;
-	}
-
-	/* Handle the remaining blocks in the last group only. */
-	last = (o_blocks_count - le32_to_cpu(es->s_first_data_block)) %
-		EXT3_BLOCKS_PER_GROUP(sb);
-
-	if (last == 0) {
-		ext3_warning(sb, __func__,
-			     "need to use ext2online to resize further");
-		return -EPERM;
-	}
-
-	add = EXT3_BLOCKS_PER_GROUP(sb) - last;
-
-	if (o_blocks_count + add < o_blocks_count) {
-		ext3_warning(sb, __func__, "blocks_count overflow");
-		return -EINVAL;
-	}
-
-	if (o_blocks_count + add > n_blocks_count)
-		add = n_blocks_count - o_blocks_count;
-
-	if (o_blocks_count + add < n_blocks_count)
-		ext3_warning(sb, __func__,
-			     "will only finish group ("E3FSBLK
-			     " blocks, %u new)",
-			     o_blocks_count + add, add);
-
-	/* See if the device is actually as big as what was requested */
-	bh = sb_bread(sb, o_blocks_count + add -1);
-	if (!bh) {
-		ext3_warning(sb, __func__,
-			     "can't read last block, resize aborted");
-		return -ENOSPC;
-	}
-	brelse(bh);
-
-	/* We will update the superblock, one block bitmap, and
-	 * one group descriptor via ext3_free_blocks().
-	 */
-	handle = ext3_journal_start_sb(sb, 3);
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		ext3_warning(sb, __func__, "error %d on journal start",err);
-		goto exit_put;
-	}
-
-	mutex_lock(&EXT3_SB(sb)->s_resize_lock);
-	if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
-		ext3_warning(sb, __func__,
-			     "multiple resizers run on filesystem!");
-		mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-		ext3_journal_stop(handle);
-		err = -EBUSY;
-		goto exit_put;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle,
-						 EXT3_SB(sb)->s_sbh))) {
-		ext3_warning(sb, __func__,
-			     "error %d on journal write access", err);
-		mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-		ext3_journal_stop(handle);
-		goto exit_put;
-	}
-	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-	if (err) {
-		ext3_warning(sb, __func__,
-			     "error %d on journal dirty metadata", err);
-		ext3_journal_stop(handle);
-		goto exit_put;
-	}
-	ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
-		   o_blocks_count, o_blocks_count + add);
-	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
-	ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
-		   o_blocks_count, o_blocks_count + add);
-	if ((err = ext3_journal_stop(handle)))
-		goto exit_put;
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n",
-		       le32_to_cpu(es->s_blocks_count));
-	update_backups(sb, EXT3_SB(sb)->s_sbh->b_blocknr, (char *)es,
-		       sizeof(struct ext3_super_block));
-exit_put:
-	return err;
-} /* ext3_group_extend */
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
deleted file mode 100644
index 5ed0044..0000000
--- a/fs/ext3/super.c
+++ /dev/null
@@ -1,3165 +0,0 @@
-/*
- *  linux/fs/ext3/super.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/inode.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/parser.h>
-#include <linux/exportfs.h>
-#include <linux/statfs.h>
-#include <linux/random.h>
-#include <linux/mount.h>
-#include <linux/quotaops.h>
-#include <linux/seq_file.h>
-#include <linux/log2.h>
-#include <linux/cleancache.h>
-#include <linux/namei.h>
-
-#include <asm/uaccess.h>
-
-#define CREATE_TRACE_POINTS
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-#include "namei.h"
-
-#ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
-  #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
-#else
-  #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA
-#endif
-
-static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
-			     unsigned long journal_devnum);
-static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
-			       unsigned int);
-static int ext3_commit_super(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       int sync);
-static void ext3_mark_recovery_complete(struct super_block * sb,
-					struct ext3_super_block * es);
-static void ext3_clear_journal_err(struct super_block * sb,
-				   struct ext3_super_block * es);
-static int ext3_sync_fs(struct super_block *sb, int wait);
-static const char *ext3_decode_error(struct super_block * sb, int errno,
-				     char nbuf[16]);
-static int ext3_remount (struct super_block * sb, int * flags, char * data);
-static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf);
-static int ext3_unfreeze(struct super_block *sb);
-static int ext3_freeze(struct super_block *sb);
-
-/*
- * Wrappers for journal_start/end.
- */
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
-{
-	journal_t *journal;
-
-	if (sb->s_flags & MS_RDONLY)
-		return ERR_PTR(-EROFS);
-
-	/* Special case here: if the journal has aborted behind our
-	 * backs (eg. EIO in the commit thread), then we still need to
-	 * take the FS itself readonly cleanly. */
-	journal = EXT3_SB(sb)->s_journal;
-	if (is_journal_aborted(journal)) {
-		ext3_abort(sb, __func__,
-			   "Detected aborted journal");
-		return ERR_PTR(-EROFS);
-	}
-
-	return journal_start(journal, nblocks);
-}
-
-int __ext3_journal_stop(const char *where, handle_t *handle)
-{
-	struct super_block *sb;
-	int err;
-	int rc;
-
-	sb = handle->h_transaction->t_journal->j_private;
-	err = handle->h_err;
-	rc = journal_stop(handle);
-
-	if (!err)
-		err = rc;
-	if (err)
-		__ext3_std_error(sb, where, err);
-	return err;
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err)
-{
-	char nbuf[16];
-	const char *errstr = ext3_decode_error(NULL, err, nbuf);
-
-	if (bh)
-		BUFFER_TRACE(bh, "abort");
-
-	if (!handle->h_err)
-		handle->h_err = err;
-
-	if (is_handle_aborted(handle))
-		return;
-
-	printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n",
-		caller, errstr, err_fn);
-
-	journal_abort_handle(handle);
-}
-
-void ext3_msg(struct super_block *sb, const char *prefix,
-		const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
-
-	va_end(args);
-}
-
-/* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
- * On ext2, we can store the error state of the filesystem in the
- * superblock.  That is not possible on ext3, because we may have other
- * write ordering constraints on the superblock which prevent us from
- * writing it out straight away; and given that the journal is about to
- * be aborted, we can't rely on the current, or future, transactions to
- * write out the superblock safely.
- *
- * We'll just use the journal_abort() error code to record an error in
- * the journal instead.  On recovery, the journal will complain about
- * that error until we've noted it down and cleared it.
- */
-
-static void ext3_handle_error(struct super_block *sb)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-	es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-
-	if (sb->s_flags & MS_RDONLY)
-		return;
-
-	if (!test_opt (sb, ERRORS_CONT)) {
-		journal_t *journal = EXT3_SB(sb)->s_journal;
-
-		set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
-		if (journal)
-			journal_abort(journal, -EIO);
-	}
-	if (test_opt (sb, ERRORS_RO)) {
-		ext3_msg(sb, KERN_CRIT,
-			"error: remounting filesystem read-only");
-		/*
-		 * Make sure updated value of ->s_mount_state will be visible
-		 * before ->s_flags update.
-		 */
-		smp_wmb();
-		sb->s_flags |= MS_RDONLY;
-	}
-	ext3_commit_super(sb, es, 1);
-	if (test_opt(sb, ERRORS_PANIC))
-		panic("EXT3-fs (%s): panic forced after error\n",
-			sb->s_id);
-}
-
-void ext3_error(struct super_block *sb, const char *function,
-		const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-
-	ext3_handle_error(sb);
-}
-
-static const char *ext3_decode_error(struct super_block * sb, int errno,
-				     char nbuf[16])
-{
-	char *errstr = NULL;
-
-	switch (errno) {
-	case -EIO:
-		errstr = "IO failure";
-		break;
-	case -ENOMEM:
-		errstr = "Out of memory";
-		break;
-	case -EROFS:
-		if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT)
-			errstr = "Journal has aborted";
-		else
-			errstr = "Readonly filesystem";
-		break;
-	default:
-		/* If the caller passed in an extra buffer for unknown
-		 * errors, textualise them now.  Else we just return
-		 * NULL. */
-		if (nbuf) {
-			/* Check for truncated error codes... */
-			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
-				errstr = nbuf;
-		}
-		break;
-	}
-
-	return errstr;
-}
-
-/* __ext3_std_error decodes expected errors from journaling functions
- * automatically and invokes the appropriate error response.  */
-
-void __ext3_std_error (struct super_block * sb, const char * function,
-		       int errno)
-{
-	char nbuf[16];
-	const char *errstr;
-
-	/* Special case: if the error is EROFS, and we're not already
-	 * inside a transaction, then there's really no point in logging
-	 * an error. */
-	if (errno == -EROFS && journal_current_handle() == NULL &&
-	    (sb->s_flags & MS_RDONLY))
-		return;
-
-	errstr = ext3_decode_error(sb, errno, nbuf);
-	ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr);
-
-	ext3_handle_error(sb);
-}
-
-/*
- * ext3_abort is a much stronger failure handler than ext3_error.  The
- * abort function may be used to deal with unrecoverable failures such
- * as journal IO errors or ENOMEM at a critical moment in log management.
- *
- * We unconditionally force the filesystem into an ABORT|READONLY state,
- * unless the error response on the fs has been set to panic in which
- * case we take the easy way out and panic immediately.
- */
-
-void ext3_abort(struct super_block *sb, const char *function,
-		 const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-
-	if (test_opt(sb, ERRORS_PANIC))
-		panic("EXT3-fs: panic from previous error\n");
-
-	if (sb->s_flags & MS_RDONLY)
-		return;
-
-	ext3_msg(sb, KERN_CRIT,
-		"error: remounting filesystem read-only");
-	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-	set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
-	/*
-	 * Make sure updated value of ->s_mount_state will be visible
-	 * before ->s_flags update.
-	 */
-	smp_wmb();
-	sb->s_flags |= MS_RDONLY;
-
-	if (EXT3_SB(sb)->s_journal)
-		journal_abort(EXT3_SB(sb)->s_journal, -EIO);
-}
-
-void ext3_warning(struct super_block *sb, const char *function,
-		  const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-}
-
-void ext3_update_dynamic_rev(struct super_block *sb)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
-		return;
-
-	ext3_msg(sb, KERN_WARNING,
-		"warning: updating to rev %d because of "
-		"new feature flag, running e2fsck is recommended",
-		EXT3_DYNAMIC_REV);
-
-	es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO);
-	es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE);
-	es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV);
-	/* leave es->s_feature_*compat flags alone */
-	/* es->s_uuid will be set by e2fsck if empty */
-
-	/*
-	 * The rest of the superblock fields should be zero, and if not it
-	 * means they are likely already in use, so leave them alone.  We
-	 * can leave it up to e2fsck to clean up any inconsistencies there.
-	 */
-}
-
-/*
- * Open the external journal device
- */
-static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
-{
-	struct block_device *bdev;
-	char b[BDEVNAME_SIZE];
-
-	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
-	if (IS_ERR(bdev))
-		goto fail;
-	return bdev;
-
-fail:
-	ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld",
-		__bdevname(dev, b), PTR_ERR(bdev));
-
-	return NULL;
-}
-
-/*
- * Release the journal device
- */
-static void ext3_blkdev_put(struct block_device *bdev)
-{
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-}
-
-static void ext3_blkdev_remove(struct ext3_sb_info *sbi)
-{
-	struct block_device *bdev;
-	bdev = sbi->journal_bdev;
-	if (bdev) {
-		ext3_blkdev_put(bdev);
-		sbi->journal_bdev = NULL;
-	}
-}
-
-static inline struct inode *orphan_list_entry(struct list_head *l)
-{
-	return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode;
-}
-
-static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
-{
-	struct list_head *l;
-
-	ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d",
-	       le32_to_cpu(sbi->s_es->s_last_orphan));
-
-	ext3_msg(sb, KERN_ERR, "sb_info orphan list:");
-	list_for_each(l, &sbi->s_orphan) {
-		struct inode *inode = orphan_list_entry(l);
-		ext3_msg(sb, KERN_ERR, "  "
-		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
-		       inode->i_sb->s_id, inode->i_ino, inode,
-		       inode->i_mode, inode->i_nlink,
-		       NEXT_ORPHAN(inode));
-	}
-}
-
-static void ext3_put_super (struct super_block * sb)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	int i, err;
-
-	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-	ext3_xattr_put_super(sb);
-	err = journal_destroy(sbi->s_journal);
-	sbi->s_journal = NULL;
-	if (err < 0)
-		ext3_abort(sb, __func__, "Couldn't clean up the journal");
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		es->s_state = cpu_to_le16(sbi->s_mount_state);
-		BUFFER_TRACE(sbi->s_sbh, "marking dirty");
-		mark_buffer_dirty(sbi->s_sbh);
-		ext3_commit_super(sb, es, 1);
-	}
-
-	for (i = 0; i < sbi->s_gdb_count; i++)
-		brelse(sbi->s_group_desc[i]);
-	kfree(sbi->s_group_desc);
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	brelse(sbi->s_sbh);
-#ifdef CONFIG_QUOTA
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
-#endif
-
-	/* Debugging code just in case the in-memory inode orphan list
-	 * isn't empty.  The on-disk one can be non-empty if we've
-	 * detected an error and taken the fs readonly, but the
-	 * in-memory list had better be clean by this point. */
-	if (!list_empty(&sbi->s_orphan))
-		dump_orphan_list(sb, sbi);
-	J_ASSERT(list_empty(&sbi->s_orphan));
-
-	invalidate_bdev(sb->s_bdev);
-	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
-		/*
-		 * Invalidate the journal device's buffers.  We don't want them
-		 * floating about in memory - the physical journal device may
-		 * hotswapped, and it breaks the `ro-after' testing code.
-		 */
-		sync_blockdev(sbi->journal_bdev);
-		invalidate_bdev(sbi->journal_bdev);
-		ext3_blkdev_remove(sbi);
-	}
-	sb->s_fs_info = NULL;
-	kfree(sbi->s_blockgroup_lock);
-	mutex_destroy(&sbi->s_orphan_lock);
-	mutex_destroy(&sbi->s_resize_lock);
-	kfree(sbi);
-}
-
-static struct kmem_cache *ext3_inode_cachep;
-
-/*
- * Called inside transaction, so use GFP_NOFS
- */
-static struct inode *ext3_alloc_inode(struct super_block *sb)
-{
-	struct ext3_inode_info *ei;
-
-	ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
-	if (!ei)
-		return NULL;
-	ei->i_block_alloc_info = NULL;
-	ei->vfs_inode.i_version = 1;
-	atomic_set(&ei->i_datasync_tid, 0);
-	atomic_set(&ei->i_sync_tid, 0);
-#ifdef CONFIG_QUOTA
-	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
-#endif
-
-	return &ei->vfs_inode;
-}
-
-static int ext3_drop_inode(struct inode *inode)
-{
-	int drop = generic_drop_inode(inode);
-
-	trace_ext3_drop_inode(inode, drop);
-	return drop;
-}
-
-static void ext3_i_callback(struct rcu_head *head)
-{
-	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
-}
-
-static void ext3_destroy_inode(struct inode *inode)
-{
-	if (!list_empty(&(EXT3_I(inode)->i_orphan))) {
-		printk("EXT3 Inode %p: orphan list check failed!\n",
-			EXT3_I(inode));
-		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
-				EXT3_I(inode), sizeof(struct ext3_inode_info),
-				false);
-		dump_stack();
-	}
-	call_rcu(&inode->i_rcu, ext3_i_callback);
-}
-
-static void init_once(void *foo)
-{
-	struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
-
-	INIT_LIST_HEAD(&ei->i_orphan);
-#ifdef CONFIG_EXT3_FS_XATTR
-	init_rwsem(&ei->xattr_sem);
-#endif
-	mutex_init(&ei->truncate_mutex);
-	inode_init_once(&ei->vfs_inode);
-}
-
-static int __init init_inodecache(void)
-{
-	ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
-					     sizeof(struct ext3_inode_info),
-					     0, (SLAB_RECLAIM_ACCOUNT|
-						SLAB_MEM_SPREAD),
-					     init_once);
-	if (ext3_inode_cachep == NULL)
-		return -ENOMEM;
-	return 0;
-}
-
-static void destroy_inodecache(void)
-{
-	/*
-	 * Make sure all delayed rcu free inodes are flushed before we
-	 * destroy cache.
-	 */
-	rcu_barrier();
-	kmem_cache_destroy(ext3_inode_cachep);
-}
-
-static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
-{
-#if defined(CONFIG_QUOTA)
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sbi->s_jquota_fmt) {
-		char *fmtname = "";
-
-		switch (sbi->s_jquota_fmt) {
-		case QFMT_VFS_OLD:
-			fmtname = "vfsold";
-			break;
-		case QFMT_VFS_V0:
-			fmtname = "vfsv0";
-			break;
-		case QFMT_VFS_V1:
-			fmtname = "vfsv1";
-			break;
-		}
-		seq_printf(seq, ",jqfmt=%s", fmtname);
-	}
-
-	if (sbi->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
-
-	if (sbi->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
-
-	if (test_opt(sb, USRQUOTA))
-		seq_puts(seq, ",usrquota");
-
-	if (test_opt(sb, GRPQUOTA))
-		seq_puts(seq, ",grpquota");
-#endif
-}
-
-static char *data_mode_string(unsigned long mode)
-{
-	switch (mode) {
-	case EXT3_MOUNT_JOURNAL_DATA:
-		return "journal";
-	case EXT3_MOUNT_ORDERED_DATA:
-		return "ordered";
-	case EXT3_MOUNT_WRITEBACK_DATA:
-		return "writeback";
-	}
-	return "unknown";
-}
-
-/*
- * Show an option if
- *  - it's set to a non-default value OR
- *  - if the per-sb default is different from the global default
- */
-static int ext3_show_options(struct seq_file *seq, struct dentry *root)
-{
-	struct super_block *sb = root->d_sb;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	unsigned long def_mount_opts;
-
-	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-
-	if (sbi->s_sb_block != 1)
-		seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
-	if (test_opt(sb, MINIX_DF))
-		seq_puts(seq, ",minixdf");
-	if (test_opt(sb, GRPID))
-		seq_puts(seq, ",grpid");
-	if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS))
-		seq_puts(seq, ",nogrpid");
-	if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT3_DEF_RESUID)) ||
-	    le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) {
-		seq_printf(seq, ",resuid=%u",
-				from_kuid_munged(&init_user_ns, sbi->s_resuid));
-	}
-	if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT3_DEF_RESGID)) ||
-	    le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) {
-		seq_printf(seq, ",resgid=%u",
-				from_kgid_munged(&init_user_ns, sbi->s_resgid));
-	}
-	if (test_opt(sb, ERRORS_RO)) {
-		int def_errors = le16_to_cpu(es->s_errors);
-
-		if (def_errors == EXT3_ERRORS_PANIC ||
-		    def_errors == EXT3_ERRORS_CONTINUE) {
-			seq_puts(seq, ",errors=remount-ro");
-		}
-	}
-	if (test_opt(sb, ERRORS_CONT))
-		seq_puts(seq, ",errors=continue");
-	if (test_opt(sb, ERRORS_PANIC))
-		seq_puts(seq, ",errors=panic");
-	if (test_opt(sb, NO_UID32))
-		seq_puts(seq, ",nouid32");
-	if (test_opt(sb, DEBUG))
-		seq_puts(seq, ",debug");
-#ifdef CONFIG_EXT3_FS_XATTR
-	if (test_opt(sb, XATTR_USER))
-		seq_puts(seq, ",user_xattr");
-	if (!test_opt(sb, XATTR_USER) &&
-	    (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
-		seq_puts(seq, ",nouser_xattr");
-	}
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	if (test_opt(sb, POSIX_ACL))
-		seq_puts(seq, ",acl");
-	if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL))
-		seq_puts(seq, ",noacl");
-#endif
-	if (!test_opt(sb, RESERVATION))
-		seq_puts(seq, ",noreservation");
-	if (sbi->s_commit_interval) {
-		seq_printf(seq, ",commit=%u",
-			   (unsigned) (sbi->s_commit_interval / HZ));
-	}
-
-	/*
-	 * Always display barrier state so it's clear what the status is.
-	 */
-	seq_puts(seq, ",barrier=");
-	seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
-	seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
-	if (test_opt(sb, DATA_ERR_ABORT))
-		seq_puts(seq, ",data_err=abort");
-
-	if (test_opt(sb, NOLOAD))
-		seq_puts(seq, ",norecovery");
-
-	ext3_show_quota_options(seq, sb);
-
-	return 0;
-}
-
-
-static struct inode *ext3_nfs_get_inode(struct super_block *sb,
-		u64 ino, u32 generation)
-{
-	struct inode *inode;
-
-	if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO)
-		return ERR_PTR(-ESTALE);
-	if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count))
-		return ERR_PTR(-ESTALE);
-
-	/* iget isn't really right if the inode is currently unallocated!!
-	 *
-	 * ext3_read_inode will return a bad_inode if the inode had been
-	 * deleted, so we should be safe.
-	 *
-	 * Currently we don't know the generation for parent directory, so
-	 * a generation of 0 means "accept any"
-	 */
-	inode = ext3_iget(sb, ino);
-	if (IS_ERR(inode))
-		return ERR_CAST(inode);
-	if (generation && inode->i_generation != generation) {
-		iput(inode);
-		return ERR_PTR(-ESTALE);
-	}
-
-	return inode;
-}
-
-static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid,
-		int fh_len, int fh_type)
-{
-	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
-				    ext3_nfs_get_inode);
-}
-
-static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid,
-		int fh_len, int fh_type)
-{
-	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
-				    ext3_nfs_get_inode);
-}
-
-/*
- * Try to release metadata pages (indirect blocks, directories) which are
- * mapped via the block device.  Since these pages could have journal heads
- * which would prevent try_to_free_buffers() from freeing them, we must use
- * jbd layer's try_to_free_buffers() function to release them.
- */
-static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
-				 gfp_t wait)
-{
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-
-	WARN_ON(PageChecked(page));
-	if (!page_has_buffers(page))
-		return 0;
-	if (journal)
-		return journal_try_to_free_buffers(journal, page, 
-						   wait & ~__GFP_WAIT);
-	return try_to_free_buffers(page);
-}
-
-#ifdef CONFIG_QUOTA
-#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
-#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-
-static int ext3_write_dquot(struct dquot *dquot);
-static int ext3_acquire_dquot(struct dquot *dquot);
-static int ext3_release_dquot(struct dquot *dquot);
-static int ext3_mark_dquot_dirty(struct dquot *dquot);
-static int ext3_write_info(struct super_block *sb, int type);
-static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 struct path *path);
-static int ext3_quota_on_mount(struct super_block *sb, int type);
-static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
-			       size_t len, loff_t off);
-static ssize_t ext3_quota_write(struct super_block *sb, int type,
-				const char *data, size_t len, loff_t off);
-static struct dquot **ext3_get_dquots(struct inode *inode)
-{
-	return EXT3_I(inode)->i_dquot;
-}
-
-static const struct dquot_operations ext3_quota_operations = {
-	.write_dquot	= ext3_write_dquot,
-	.acquire_dquot	= ext3_acquire_dquot,
-	.release_dquot	= ext3_release_dquot,
-	.mark_dirty	= ext3_mark_dquot_dirty,
-	.write_info	= ext3_write_info,
-	.alloc_dquot	= dquot_alloc,
-	.destroy_dquot	= dquot_destroy,
-};
-
-static const struct quotactl_ops ext3_qctl_operations = {
-	.quota_on	= ext3_quota_on,
-	.quota_off	= dquot_quota_off,
-	.quota_sync	= dquot_quota_sync,
-	.get_state	= dquot_get_state,
-	.set_info	= dquot_set_dqinfo,
-	.get_dqblk	= dquot_get_dqblk,
-	.set_dqblk	= dquot_set_dqblk
-};
-#endif
-
-static const struct super_operations ext3_sops = {
-	.alloc_inode	= ext3_alloc_inode,
-	.destroy_inode	= ext3_destroy_inode,
-	.write_inode	= ext3_write_inode,
-	.dirty_inode	= ext3_dirty_inode,
-	.drop_inode	= ext3_drop_inode,
-	.evict_inode	= ext3_evict_inode,
-	.put_super	= ext3_put_super,
-	.sync_fs	= ext3_sync_fs,
-	.freeze_fs	= ext3_freeze,
-	.unfreeze_fs	= ext3_unfreeze,
-	.statfs		= ext3_statfs,
-	.remount_fs	= ext3_remount,
-	.show_options	= ext3_show_options,
-#ifdef CONFIG_QUOTA
-	.quota_read	= ext3_quota_read,
-	.quota_write	= ext3_quota_write,
-	.get_dquots	= ext3_get_dquots,
-#endif
-	.bdev_try_to_free_page = bdev_try_to_free_page,
-};
-
-static const struct export_operations ext3_export_ops = {
-	.fh_to_dentry = ext3_fh_to_dentry,
-	.fh_to_parent = ext3_fh_to_parent,
-	.get_parent = ext3_get_parent,
-};
-
-enum {
-	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
-	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
-	Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
-	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
-	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
-	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
-	Opt_journal_path,
-	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
-	Opt_data_err_abort, Opt_data_err_ignore,
-	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
-	Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
-	Opt_resize, Opt_usrquota, Opt_grpquota
-};
-
-static const match_table_t tokens = {
-	{Opt_bsd_df, "bsddf"},
-	{Opt_minix_df, "minixdf"},
-	{Opt_grpid, "grpid"},
-	{Opt_grpid, "bsdgroups"},
-	{Opt_nogrpid, "nogrpid"},
-	{Opt_nogrpid, "sysvgroups"},
-	{Opt_resgid, "resgid=%u"},
-	{Opt_resuid, "resuid=%u"},
-	{Opt_sb, "sb=%u"},
-	{Opt_err_cont, "errors=continue"},
-	{Opt_err_panic, "errors=panic"},
-	{Opt_err_ro, "errors=remount-ro"},
-	{Opt_nouid32, "nouid32"},
-	{Opt_nocheck, "nocheck"},
-	{Opt_nocheck, "check=none"},
-	{Opt_debug, "debug"},
-	{Opt_oldalloc, "oldalloc"},
-	{Opt_orlov, "orlov"},
-	{Opt_user_xattr, "user_xattr"},
-	{Opt_nouser_xattr, "nouser_xattr"},
-	{Opt_acl, "acl"},
-	{Opt_noacl, "noacl"},
-	{Opt_reservation, "reservation"},
-	{Opt_noreservation, "noreservation"},
-	{Opt_noload, "noload"},
-	{Opt_noload, "norecovery"},
-	{Opt_nobh, "nobh"},
-	{Opt_bh, "bh"},
-	{Opt_commit, "commit=%u"},
-	{Opt_journal_update, "journal=update"},
-	{Opt_journal_inum, "journal=%u"},
-	{Opt_journal_dev, "journal_dev=%u"},
-	{Opt_journal_path, "journal_path=%s"},
-	{Opt_abort, "abort"},
-	{Opt_data_journal, "data=journal"},
-	{Opt_data_ordered, "data=ordered"},
-	{Opt_data_writeback, "data=writeback"},
-	{Opt_data_err_abort, "data_err=abort"},
-	{Opt_data_err_ignore, "data_err=ignore"},
-	{Opt_offusrjquota, "usrjquota="},
-	{Opt_usrjquota, "usrjquota=%s"},
-	{Opt_offgrpjquota, "grpjquota="},
-	{Opt_grpjquota, "grpjquota=%s"},
-	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
-	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
-	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
-	{Opt_grpquota, "grpquota"},
-	{Opt_noquota, "noquota"},
-	{Opt_quota, "quota"},
-	{Opt_usrquota, "usrquota"},
-	{Opt_barrier, "barrier=%u"},
-	{Opt_barrier, "barrier"},
-	{Opt_nobarrier, "nobarrier"},
-	{Opt_resize, "resize"},
-	{Opt_err, NULL},
-};
-
-static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
-{
-	ext3_fsblk_t	sb_block;
-	char		*options = (char *) *data;
-
-	if (!options || strncmp(options, "sb=", 3) != 0)
-		return 1;	/* Default location */
-	options += 3;
-	/*todo: use simple_strtoll with >32bit ext3 */
-	sb_block = simple_strtoul(options, &options, 0);
-	if (*options && *options != ',') {
-		ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s",
-		       (char *) *data);
-		return 1;
-	}
-	if (*options == ',')
-		options++;
-	*data = (void *) options;
-	return sb_block;
-}
-
-#ifdef CONFIG_QUOTA
-static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	char *qname;
-
-	if (sb_any_quota_loaded(sb) &&
-		!sbi->s_qf_names[qtype]) {
-		ext3_msg(sb, KERN_ERR,
-			"Cannot change journaled "
-			"quota options when quota turned on");
-		return 0;
-	}
-	qname = match_strdup(args);
-	if (!qname) {
-		ext3_msg(sb, KERN_ERR,
-			"Not enough memory for storing quotafile name");
-		return 0;
-	}
-	if (sbi->s_qf_names[qtype]) {
-		int same = !strcmp(sbi->s_qf_names[qtype], qname);
-
-		kfree(qname);
-		if (!same) {
-			ext3_msg(sb, KERN_ERR,
-				 "%s quota file already specified",
-				 QTYPE2NAME(qtype));
-		}
-		return same;
-	}
-	if (strchr(qname, '/')) {
-		ext3_msg(sb, KERN_ERR,
-			"quotafile must be on filesystem root");
-		kfree(qname);
-		return 0;
-	}
-	sbi->s_qf_names[qtype] = qname;
-	set_opt(sbi->s_mount_opt, QUOTA);
-	return 1;
-}
-
-static int clear_qf_name(struct super_block *sb, int qtype) {
-
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sb_any_quota_loaded(sb) &&
-		sbi->s_qf_names[qtype]) {
-		ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options"
-			" when quota turned on");
-		return 0;
-	}
-	if (sbi->s_qf_names[qtype]) {
-		kfree(sbi->s_qf_names[qtype]);
-		sbi->s_qf_names[qtype] = NULL;
-	}
-	return 1;
-}
-#endif
-
-static int parse_options (char *options, struct super_block *sb,
-			  unsigned int *inum, unsigned long *journal_devnum,
-			  ext3_fsblk_t *n_blocks_count, int is_remount)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	char * p;
-	substring_t args[MAX_OPT_ARGS];
-	int data_opt = 0;
-	int option;
-	kuid_t uid;
-	kgid_t gid;
-	char *journal_path;
-	struct inode *journal_inode;
-	struct path path;
-	int error;
-
-#ifdef CONFIG_QUOTA
-	int qfmt;
-#endif
-
-	if (!options)
-		return 1;
-
-	while ((p = strsep (&options, ",")) != NULL) {
-		int token;
-		if (!*p)
-			continue;
-		/*
-		 * Initialize args struct so we know whether arg was
-		 * found; some options take optional arguments.
-		 */
-		args[0].to = args[0].from = NULL;
-		token = match_token(p, tokens, args);
-		switch (token) {
-		case Opt_bsd_df:
-			clear_opt (sbi->s_mount_opt, MINIX_DF);
-			break;
-		case Opt_minix_df:
-			set_opt (sbi->s_mount_opt, MINIX_DF);
-			break;
-		case Opt_grpid:
-			set_opt (sbi->s_mount_opt, GRPID);
-			break;
-		case Opt_nogrpid:
-			clear_opt (sbi->s_mount_opt, GRPID);
-			break;
-		case Opt_resuid:
-			if (match_int(&args[0], &option))
-				return 0;
-			uid = make_kuid(current_user_ns(), option);
-			if (!uid_valid(uid)) {
-				ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option);
-				return 0;
-
-			}
-			sbi->s_resuid = uid;
-			break;
-		case Opt_resgid:
-			if (match_int(&args[0], &option))
-				return 0;
-			gid = make_kgid(current_user_ns(), option);
-			if (!gid_valid(gid)) {
-				ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option);
-				return 0;
-			}
-			sbi->s_resgid = gid;
-			break;
-		case Opt_sb:
-			/* handled by get_sb_block() instead of here */
-			/* *sb_block = match_int(&args[0]); */
-			break;
-		case Opt_err_panic:
-			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt (sbi->s_mount_opt, ERRORS_RO);
-			set_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			break;
-		case Opt_err_ro:
-			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt (sbi->s_mount_opt, ERRORS_RO);
-			break;
-		case Opt_err_cont:
-			clear_opt (sbi->s_mount_opt, ERRORS_RO);
-			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt (sbi->s_mount_opt, ERRORS_CONT);
-			break;
-		case Opt_nouid32:
-			set_opt (sbi->s_mount_opt, NO_UID32);
-			break;
-		case Opt_nocheck:
-			clear_opt (sbi->s_mount_opt, CHECK);
-			break;
-		case Opt_debug:
-			set_opt (sbi->s_mount_opt, DEBUG);
-			break;
-		case Opt_oldalloc:
-			ext3_msg(sb, KERN_WARNING,
-				"Ignoring deprecated oldalloc option");
-			break;
-		case Opt_orlov:
-			ext3_msg(sb, KERN_WARNING,
-				"Ignoring deprecated orlov option");
-			break;
-#ifdef CONFIG_EXT3_FS_XATTR
-		case Opt_user_xattr:
-			set_opt (sbi->s_mount_opt, XATTR_USER);
-			break;
-		case Opt_nouser_xattr:
-			clear_opt (sbi->s_mount_opt, XATTR_USER);
-			break;
-#else
-		case Opt_user_xattr:
-		case Opt_nouser_xattr:
-			ext3_msg(sb, KERN_INFO,
-				"(no)user_xattr options not supported");
-			break;
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-		case Opt_acl:
-			set_opt(sbi->s_mount_opt, POSIX_ACL);
-			break;
-		case Opt_noacl:
-			clear_opt(sbi->s_mount_opt, POSIX_ACL);
-			break;
-#else
-		case Opt_acl:
-		case Opt_noacl:
-			ext3_msg(sb, KERN_INFO,
-				"(no)acl options not supported");
-			break;
-#endif
-		case Opt_reservation:
-			set_opt(sbi->s_mount_opt, RESERVATION);
-			break;
-		case Opt_noreservation:
-			clear_opt(sbi->s_mount_opt, RESERVATION);
-			break;
-		case Opt_journal_update:
-			/* @@@ FIXME */
-			/* Eventually we will want to be able to create
-			   a journal file here.  For now, only allow the
-			   user to specify an existing inode to be the
-			   journal file. */
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-					"journal on remount");
-				return 0;
-			}
-			set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
-			break;
-		case Opt_journal_inum:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*inum = option;
-			break;
-		case Opt_journal_dev:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*journal_devnum = option;
-			break;
-		case Opt_journal_path:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-
-			journal_path = match_strdup(&args[0]);
-			if (!journal_path) {
-				ext3_msg(sb, KERN_ERR, "error: could not dup "
-					"journal device string");
-				return 0;
-			}
-
-			error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
-			if (error) {
-				ext3_msg(sb, KERN_ERR, "error: could not find "
-					"journal device path: error %d", error);
-				kfree(journal_path);
-				return 0;
-			}
-
-			journal_inode = d_inode(path.dentry);
-			if (!S_ISBLK(journal_inode->i_mode)) {
-				ext3_msg(sb, KERN_ERR, "error: journal path %s "
-					"is not a block device", journal_path);
-				path_put(&path);
-				kfree(journal_path);
-				return 0;
-			}
-
-			*journal_devnum = new_encode_dev(journal_inode->i_rdev);
-			path_put(&path);
-			kfree(journal_path);
-			break;
-		case Opt_noload:
-			set_opt (sbi->s_mount_opt, NOLOAD);
-			break;
-		case Opt_commit:
-			if (match_int(&args[0], &option))
-				return 0;
-			if (option < 0)
-				return 0;
-			if (option == 0)
-				option = JBD_DEFAULT_MAX_COMMIT_AGE;
-			sbi->s_commit_interval = HZ * option;
-			break;
-		case Opt_data_journal:
-			data_opt = EXT3_MOUNT_JOURNAL_DATA;
-			goto datacheck;
-		case Opt_data_ordered:
-			data_opt = EXT3_MOUNT_ORDERED_DATA;
-			goto datacheck;
-		case Opt_data_writeback:
-			data_opt = EXT3_MOUNT_WRITEBACK_DATA;
-		datacheck:
-			if (is_remount) {
-				if (test_opt(sb, DATA_FLAGS) == data_opt)
-					break;
-				ext3_msg(sb, KERN_ERR,
-					"error: cannot change "
-					"data mode on remount. The filesystem "
-					"is mounted in data=%s mode and you "
-					"try to remount it in data=%s mode.",
-					data_mode_string(test_opt(sb,
-							DATA_FLAGS)),
-					data_mode_string(data_opt));
-				return 0;
-			} else {
-				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
-				sbi->s_mount_opt |= data_opt;
-			}
-			break;
-		case Opt_data_err_abort:
-			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
-			break;
-		case Opt_data_err_ignore:
-			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
-			break;
-#ifdef CONFIG_QUOTA
-		case Opt_usrjquota:
-			if (!set_qf_name(sb, USRQUOTA, &args[0]))
-				return 0;
-			break;
-		case Opt_grpjquota:
-			if (!set_qf_name(sb, GRPQUOTA, &args[0]))
-				return 0;
-			break;
-		case Opt_offusrjquota:
-			if (!clear_qf_name(sb, USRQUOTA))
-				return 0;
-			break;
-		case Opt_offgrpjquota:
-			if (!clear_qf_name(sb, GRPQUOTA))
-				return 0;
-			break;
-		case Opt_jqfmt_vfsold:
-			qfmt = QFMT_VFS_OLD;
-			goto set_qf_format;
-		case Opt_jqfmt_vfsv0:
-			qfmt = QFMT_VFS_V0;
-			goto set_qf_format;
-		case Opt_jqfmt_vfsv1:
-			qfmt = QFMT_VFS_V1;
-set_qf_format:
-			if (sb_any_quota_loaded(sb) &&
-			    sbi->s_jquota_fmt != qfmt) {
-				ext3_msg(sb, KERN_ERR, "error: cannot change "
-					"journaled quota options when "
-					"quota turned on.");
-				return 0;
-			}
-			sbi->s_jquota_fmt = qfmt;
-			break;
-		case Opt_quota:
-		case Opt_usrquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, USRQUOTA);
-			break;
-		case Opt_grpquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, GRPQUOTA);
-			break;
-		case Opt_noquota:
-			if (sb_any_quota_loaded(sb)) {
-				ext3_msg(sb, KERN_ERR, "error: cannot change "
-					"quota options when quota turned on.");
-				return 0;
-			}
-			clear_opt(sbi->s_mount_opt, QUOTA);
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
-			break;
-#else
-		case Opt_quota:
-		case Opt_usrquota:
-		case Opt_grpquota:
-			ext3_msg(sb, KERN_ERR,
-				"error: quota options not supported.");
-			break;
-		case Opt_usrjquota:
-		case Opt_grpjquota:
-		case Opt_offusrjquota:
-		case Opt_offgrpjquota:
-		case Opt_jqfmt_vfsold:
-		case Opt_jqfmt_vfsv0:
-		case Opt_jqfmt_vfsv1:
-			ext3_msg(sb, KERN_ERR,
-				"error: journaled quota options not "
-				"supported.");
-			break;
-		case Opt_noquota:
-			break;
-#endif
-		case Opt_abort:
-			set_opt(sbi->s_mount_opt, ABORT);
-			break;
-		case Opt_nobarrier:
-			clear_opt(sbi->s_mount_opt, BARRIER);
-			break;
-		case Opt_barrier:
-			if (args[0].from) {
-				if (match_int(&args[0], &option))
-					return 0;
-			} else
-				option = 1;	/* No argument, default to 1 */
-			if (option)
-				set_opt(sbi->s_mount_opt, BARRIER);
-			else
-				clear_opt(sbi->s_mount_opt, BARRIER);
-			break;
-		case Opt_ignore:
-			break;
-		case Opt_resize:
-			if (!is_remount) {
-				ext3_msg(sb, KERN_ERR,
-					"error: resize option only available "
-					"for remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option) != 0)
-				return 0;
-			*n_blocks_count = option;
-			break;
-		case Opt_nobh:
-			ext3_msg(sb, KERN_WARNING,
-				"warning: ignoring deprecated nobh option");
-			break;
-		case Opt_bh:
-			ext3_msg(sb, KERN_WARNING,
-				"warning: ignoring deprecated bh option");
-			break;
-		default:
-			ext3_msg(sb, KERN_ERR,
-				"error: unrecognized mount option \"%s\" "
-				"or missing value", p);
-			return 0;
-		}
-	}
-#ifdef CONFIG_QUOTA
-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
-		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
-
-		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
-			ext3_msg(sb, KERN_ERR, "error: old and new quota "
-					"format mixing.");
-			return 0;
-		}
-
-		if (!sbi->s_jquota_fmt) {
-			ext3_msg(sb, KERN_ERR, "error: journaled quota format "
-					"not specified.");
-			return 0;
-		}
-	}
-#endif
-	return 1;
-}
-
-static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
-			    int read_only)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int res = 0;
-
-	if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) {
-		ext3_msg(sb, KERN_ERR,
-			"error: revision level too high, "
-			"forcing read-only mode");
-		res = MS_RDONLY;
-	}
-	if (read_only)
-		return res;
-	if (!(sbi->s_mount_state & EXT3_VALID_FS))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: mounting unchecked fs, "
-			"running e2fsck is recommended");
-	else if ((sbi->s_mount_state & EXT3_ERROR_FS))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: mounting fs with errors, "
-			"running e2fsck is recommended");
-	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
-		 le16_to_cpu(es->s_mnt_count) >=
-			le16_to_cpu(es->s_max_mnt_count))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: maximal mount count reached, "
-			"running e2fsck is recommended");
-	else if (le32_to_cpu(es->s_checkinterval) &&
-		(le32_to_cpu(es->s_lastcheck) +
-			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: checktime reached, "
-			"running e2fsck is recommended");
-#if 0
-		/* @@@ We _will_ want to clear the valid bit if we find
-                   inconsistencies, to force a fsck at reboot.  But for
-                   a plain journaled filesystem we can keep it set as
-                   valid forever! :) */
-	es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
-#endif
-	if (!le16_to_cpu(es->s_max_mnt_count))
-		es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
-	le16_add_cpu(&es->s_mnt_count, 1);
-	es->s_mtime = cpu_to_le32(get_seconds());
-	ext3_update_dynamic_rev(sb);
-	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-
-	ext3_commit_super(sb, es, 1);
-	if (test_opt(sb, DEBUG))
-		ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, "
-				"bpg=%lu, ipg=%lu, mo=%04lx]",
-			sb->s_blocksize,
-			sbi->s_groups_count,
-			EXT3_BLOCKS_PER_GROUP(sb),
-			EXT3_INODES_PER_GROUP(sb),
-			sbi->s_mount_opt);
-
-	if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
-		char b[BDEVNAME_SIZE];
-		ext3_msg(sb, KERN_INFO, "using external journal on %s",
-			bdevname(EXT3_SB(sb)->s_journal->j_dev, b));
-	} else {
-		ext3_msg(sb, KERN_INFO, "using internal journal");
-	}
-	cleancache_init_fs(sb);
-	return res;
-}
-
-/* Called at mount-time, super-block is locked */
-static int ext3_check_descriptors(struct super_block *sb)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int i;
-
-	ext3_debug ("Checking group descriptors");
-
-	for (i = 0; i < sbi->s_groups_count; i++) {
-		struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
-		ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i);
-		ext3_fsblk_t last_block;
-
-		if (i == sbi->s_groups_count - 1)
-			last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
-		else
-			last_block = first_block +
-				(EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-		if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
-		    le32_to_cpu(gdp->bg_block_bitmap) > last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Block bitmap for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_block_bitmap));
-			return 0;
-		}
-		if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
-		    le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Inode bitmap for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_inode_bitmap));
-			return 0;
-		}
-		if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
-		    le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
-		    last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Inode table for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_inode_table));
-			return 0;
-		}
-	}
-
-	sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
-	sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb));
-	return 1;
-}
-
-
-/* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at
- * the superblock) which were deleted from all directories, but held open by
- * a process at the time of a crash.  We walk the list and try to delete these
- * inodes at recovery time (only with a read-write filesystem).
- *
- * In order to keep the orphan inode chain consistent during traversal (in
- * case of crash during recovery), we link each inode into the superblock
- * orphan list_head and handle it the same way as an inode deletion during
- * normal operation (which journals the operations for us).
- *
- * We only do an iget() and an iput() on each inode, which is very safe if we
- * accidentally point at an in-use or already deleted inode.  The worst that
- * can happen in this case is that we get a "bit already cleared" message from
- * ext3_free_inode().  The only reason we would point at a wrong inode is if
- * e2fsck was run on this filesystem, and it must have already done the orphan
- * inode cleanup for us, so we can safely abort without any further action.
- */
-static void ext3_orphan_cleanup (struct super_block * sb,
-				 struct ext3_super_block * es)
-{
-	unsigned int s_flags = sb->s_flags;
-	int nr_orphans = 0, nr_truncates = 0;
-#ifdef CONFIG_QUOTA
-	int i;
-#endif
-	if (!es->s_last_orphan) {
-		jbd_debug(4, "no orphan inodes to clean up\n");
-		return;
-	}
-
-	if (bdev_read_only(sb->s_bdev)) {
-		ext3_msg(sb, KERN_ERR, "error: write access "
-			"unavailable, skipping orphan cleanup.");
-		return;
-	}
-
-	/* Check if feature set allows readwrite operations */
-	if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
-		ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
-			 "unknown ROCOMPAT features");
-		return;
-	}
-
-	if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
-		/* don't clear list on RO mount w/ errors */
-		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
-			jbd_debug(1, "Errors on filesystem, "
-				  "clearing orphan list.\n");
-			es->s_last_orphan = 0;
-		}
-		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
-		return;
-	}
-
-	if (s_flags & MS_RDONLY) {
-		ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
-		sb->s_flags &= ~MS_RDONLY;
-	}
-#ifdef CONFIG_QUOTA
-	/* Needed for iput() to work correctly and not trash data */
-	sb->s_flags |= MS_ACTIVE;
-	/* Turn on quotas so that they are updated correctly */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		if (EXT3_SB(sb)->s_qf_names[i]) {
-			int ret = ext3_quota_on_mount(sb, i);
-			if (ret < 0)
-				ext3_msg(sb, KERN_ERR,
-					"error: cannot turn on journaled "
-					"quota: %d", ret);
-		}
-	}
-#endif
-
-	while (es->s_last_orphan) {
-		struct inode *inode;
-
-		inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
-		if (IS_ERR(inode)) {
-			es->s_last_orphan = 0;
-			break;
-		}
-
-		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-		dquot_initialize(inode);
-		if (inode->i_nlink) {
-			printk(KERN_DEBUG
-				"%s: truncating inode %lu to %Ld bytes\n",
-				__func__, inode->i_ino, inode->i_size);
-			jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
-				  inode->i_ino, inode->i_size);
-			ext3_truncate(inode);
-			nr_truncates++;
-		} else {
-			printk(KERN_DEBUG
-				"%s: deleting unreferenced inode %lu\n",
-				__func__, inode->i_ino);
-			jbd_debug(2, "deleting unreferenced inode %lu\n",
-				  inode->i_ino);
-			nr_orphans++;
-		}
-		iput(inode);  /* The delete magic happens here! */
-	}
-
-#define PLURAL(x) (x), ((x)==1) ? "" : "s"
-
-	if (nr_orphans)
-		ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
-		       PLURAL(nr_orphans));
-	if (nr_truncates)
-		ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
-		       PLURAL(nr_truncates));
-#ifdef CONFIG_QUOTA
-	/* Turn quotas off */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		if (sb_dqopt(sb)->files[i])
-			dquot_quota_off(sb, i);
-	}
-#endif
-	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
-}
-
-/*
- * Maximal file size.  There is a direct, and {,double-,triple-}indirect
- * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
- * We need to be 1 filesystem block less than the 2^32 sector limit.
- */
-static loff_t ext3_max_size(int bits)
-{
-	loff_t res = EXT3_NDIR_BLOCKS;
-	int meta_blocks;
-	loff_t upper_limit;
-
-	/* This is calculated to be the largest file size for a
-	 * dense, file such that the total number of
-	 * sectors in the file, including data and all indirect blocks,
-	 * does not exceed 2^32 -1
-	 * __u32 i_blocks representing the total number of
-	 * 512 bytes blocks of the file
-	 */
-	upper_limit = (1LL << 32) - 1;
-
-	/* total blocks in file system block size */
-	upper_limit >>= (bits - 9);
-
-
-	/* indirect blocks */
-	meta_blocks = 1;
-	/* double indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2));
-	/* tripple indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
-	upper_limit -= meta_blocks;
-	upper_limit <<= bits;
-
-	res += 1LL << (bits-2);
-	res += 1LL << (2*(bits-2));
-	res += 1LL << (3*(bits-2));
-	res <<= bits;
-	if (res > upper_limit)
-		res = upper_limit;
-
-	if (res > MAX_LFS_FILESIZE)
-		res = MAX_LFS_FILESIZE;
-
-	return res;
-}
-
-static ext3_fsblk_t descriptor_loc(struct super_block *sb,
-				    ext3_fsblk_t logic_sb_block,
-				    int nr)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	unsigned long bg, first_meta_bg;
-	int has_super = 0;
-
-	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
-	    nr < first_meta_bg)
-		return (logic_sb_block + nr + 1);
-	bg = sbi->s_desc_per_block * nr;
-	if (ext3_bg_has_super(sb, bg))
-		has_super = 1;
-	return (has_super + ext3_group_first_block_no(sb, bg));
-}
-
-
-static int ext3_fill_super (struct super_block *sb, void *data, int silent)
-{
-	struct buffer_head * bh;
-	struct ext3_super_block *es = NULL;
-	struct ext3_sb_info *sbi;
-	ext3_fsblk_t block;
-	ext3_fsblk_t sb_block = get_sb_block(&data, sb);
-	ext3_fsblk_t logic_sb_block;
-	unsigned long offset = 0;
-	unsigned int journal_inum = 0;
-	unsigned long journal_devnum = 0;
-	unsigned long def_mount_opts;
-	struct inode *root;
-	int blocksize;
-	int hblock;
-	int db_count;
-	int i;
-	int needs_recovery;
-	int ret = -EINVAL;
-	__le32 features;
-	int err;
-
-	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-	if (!sbi)
-		return -ENOMEM;
-
-	sbi->s_blockgroup_lock =
-		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
-	if (!sbi->s_blockgroup_lock) {
-		kfree(sbi);
-		return -ENOMEM;
-	}
-	sb->s_fs_info = sbi;
-	sbi->s_sb_block = sb_block;
-
-	blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
-	if (!blocksize) {
-		ext3_msg(sb, KERN_ERR, "error: unable to set blocksize");
-		goto out_fail;
-	}
-
-	/*
-	 * The ext3 superblock will not be buffer aligned for other than 1kB
-	 * block sizes.  We need to calculate the offset from buffer start.
-	 */
-	if (blocksize != EXT3_MIN_BLOCK_SIZE) {
-		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
-		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-	} else {
-		logic_sb_block = sb_block;
-	}
-
-	if (!(bh = sb_bread(sb, logic_sb_block))) {
-		ext3_msg(sb, KERN_ERR, "error: unable to read superblock");
-		goto out_fail;
-	}
-	/*
-	 * Note: s_es must be initialized as soon as possible because
-	 *       some ext3 macro-instructions depend on its value
-	 */
-	es = (struct ext3_super_block *) (bh->b_data + offset);
-	sbi->s_es = es;
-	sb->s_magic = le16_to_cpu(es->s_magic);
-	if (sb->s_magic != EXT3_SUPER_MAGIC)
-		goto cantfind_ext3;
-
-	/* Set defaults before we parse the mount options */
-	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-	if (def_mount_opts & EXT3_DEFM_DEBUG)
-		set_opt(sbi->s_mount_opt, DEBUG);
-	if (def_mount_opts & EXT3_DEFM_BSDGROUPS)
-		set_opt(sbi->s_mount_opt, GRPID);
-	if (def_mount_opts & EXT3_DEFM_UID16)
-		set_opt(sbi->s_mount_opt, NO_UID32);
-#ifdef CONFIG_EXT3_FS_XATTR
-	if (def_mount_opts & EXT3_DEFM_XATTR_USER)
-		set_opt(sbi->s_mount_opt, XATTR_USER);
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	if (def_mount_opts & EXT3_DEFM_ACL)
-		set_opt(sbi->s_mount_opt, POSIX_ACL);
-#endif
-	if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
-		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
-	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
-		set_opt(sbi->s_mount_opt, ORDERED_DATA);
-	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
-
-	if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
-		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
-	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE)
-		set_opt(sbi->s_mount_opt, ERRORS_CONT);
-	else
-		set_opt(sbi->s_mount_opt, ERRORS_RO);
-
-	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
-	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
-
-	/* enable barriers by default */
-	set_opt(sbi->s_mount_opt, BARRIER);
-	set_opt(sbi->s_mount_opt, RESERVATION);
-
-	if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
-			    NULL, 0))
-		goto failed_mount;
-
-	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
-
-	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
-	    (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
-	     EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
-	     EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: feature flags set on rev 0 fs, "
-			"running e2fsck is recommended");
-	/*
-	 * Check feature flags regardless of the revision level, since we
-	 * previously didn't change the revision level when setting the flags,
-	 * so there is a chance incompat flags are set on a rev 0 filesystem.
-	 */
-	features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP);
-	if (features) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount because of unsupported "
-			"optional features (%x)", le32_to_cpu(features));
-		goto failed_mount;
-	}
-	features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP);
-	if (!(sb->s_flags & MS_RDONLY) && features) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount RDWR because of unsupported "
-			"optional features (%x)", le32_to_cpu(features));
-		goto failed_mount;
-	}
-	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
-
-	if (blocksize < EXT3_MIN_BLOCK_SIZE ||
-	    blocksize > EXT3_MAX_BLOCK_SIZE) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount because of unsupported "
-			"filesystem blocksize %d", blocksize);
-		goto failed_mount;
-	}
-
-	hblock = bdev_logical_block_size(sb->s_bdev);
-	if (sb->s_blocksize != blocksize) {
-		/*
-		 * Make sure the blocksize for the filesystem is larger
-		 * than the hardware sectorsize for the machine.
-		 */
-		if (blocksize < hblock) {
-			ext3_msg(sb, KERN_ERR,
-				"error: fsblocksize %d too small for "
-				"hardware sectorsize %d", blocksize, hblock);
-			goto failed_mount;
-		}
-
-		brelse (bh);
-		if (!sb_set_blocksize(sb, blocksize)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: bad blocksize %d", blocksize);
-			goto out_fail;
-		}
-		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
-		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-		bh = sb_bread(sb, logic_sb_block);
-		if (!bh) {
-			ext3_msg(sb, KERN_ERR,
-			       "error: can't read superblock on 2nd try");
-			goto failed_mount;
-		}
-		es = (struct ext3_super_block *)(bh->b_data + offset);
-		sbi->s_es = es;
-		if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: magic mismatch");
-			goto failed_mount;
-		}
-	}
-
-	sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits);
-
-	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) {
-		sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE;
-		sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO;
-	} else {
-		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
-		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
-		if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) ||
-		    (!is_power_of_2(sbi->s_inode_size)) ||
-		    (sbi->s_inode_size > blocksize)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: unsupported inode size: %d",
-				sbi->s_inode_size);
-			goto failed_mount;
-		}
-	}
-	sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
-				   le32_to_cpu(es->s_log_frag_size);
-	if (blocksize != sbi->s_frag_size) {
-		ext3_msg(sb, KERN_ERR,
-		       "error: fragsize %lu != blocksize %u (unsupported)",
-		       sbi->s_frag_size, blocksize);
-		goto failed_mount;
-	}
-	sbi->s_frags_per_block = 1;
-	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
-	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
-	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-	if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0)
-		goto cantfind_ext3;
-	sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb);
-	if (sbi->s_inodes_per_block == 0)
-		goto cantfind_ext3;
-	sbi->s_itb_per_group = sbi->s_inodes_per_group /
-					sbi->s_inodes_per_block;
-	sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc);
-	sbi->s_sbh = bh;
-	sbi->s_mount_state = le16_to_cpu(es->s_state);
-	sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb));
-	sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb));
-	for (i = 0; i < 4; i++)
-		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
-	sbi->s_def_hash_version = es->s_def_hash_version;
-	i = le32_to_cpu(es->s_flags);
-	if (i & EXT2_FLAGS_UNSIGNED_HASH)
-		sbi->s_hash_unsigned = 3;
-	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
-#ifdef __CHAR_UNSIGNED__
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
-		sbi->s_hash_unsigned = 3;
-#else
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
-#endif
-	}
-
-	if (sbi->s_blocks_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"#blocks per group too big: %lu",
-			sbi->s_blocks_per_group);
-		goto failed_mount;
-	}
-	if (sbi->s_frags_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"error: #fragments per group too big: %lu",
-			sbi->s_frags_per_group);
-		goto failed_mount;
-	}
-	if (sbi->s_inodes_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"error: #inodes per group too big: %lu",
-			sbi->s_inodes_per_group);
-		goto failed_mount;
-	}
-
-	err = generic_check_addressable(sb->s_blocksize_bits,
-					le32_to_cpu(es->s_blocks_count));
-	if (err) {
-		ext3_msg(sb, KERN_ERR,
-			"error: filesystem is too large to mount safely");
-		if (sizeof(sector_t) < 8)
-			ext3_msg(sb, KERN_ERR,
-				"error: CONFIG_LBDAF not enabled");
-		ret = err;
-		goto failed_mount;
-	}
-
-	if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
-		goto cantfind_ext3;
-	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
-			       le32_to_cpu(es->s_first_data_block) - 1)
-				       / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
-	db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
-	sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
-				    GFP_KERNEL);
-	if (sbi->s_group_desc == NULL) {
-		ext3_msg(sb, KERN_ERR,
-			"error: not enough memory");
-		ret = -ENOMEM;
-		goto failed_mount;
-	}
-
-	bgl_lock_init(sbi->s_blockgroup_lock);
-
-	for (i = 0; i < db_count; i++) {
-		block = descriptor_loc(sb, logic_sb_block, i);
-		sbi->s_group_desc[i] = sb_bread(sb, block);
-		if (!sbi->s_group_desc[i]) {
-			ext3_msg(sb, KERN_ERR,
-				"error: can't read group descriptor %d", i);
-			db_count = i;
-			goto failed_mount2;
-		}
-	}
-	if (!ext3_check_descriptors (sb)) {
-		ext3_msg(sb, KERN_ERR,
-			"error: group descriptors corrupted");
-		goto failed_mount2;
-	}
-	sbi->s_gdb_count = db_count;
-	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
-	spin_lock_init(&sbi->s_next_gen_lock);
-
-	/* per fileystem reservation list head & lock */
-	spin_lock_init(&sbi->s_rsv_window_lock);
-	sbi->s_rsv_window_root = RB_ROOT;
-	/* Add a single, static dummy reservation to the start of the
-	 * reservation window list --- it gives us a placeholder for
-	 * append-at-start-of-list which makes the allocation logic
-	 * _much_ simpler. */
-	sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
-	sbi->s_rsv_window_head.rsv_goal_size = 0;
-	ext3_rsv_window_add(sb, &sbi->s_rsv_window_head);
-
-	/*
-	 * set up enough so that it can read an inode
-	 */
-	sb->s_op = &ext3_sops;
-	sb->s_export_op = &ext3_export_ops;
-	sb->s_xattr = ext3_xattr_handlers;
-#ifdef CONFIG_QUOTA
-	sb->s_qcop = &ext3_qctl_operations;
-	sb->dq_op = &ext3_quota_operations;
-	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
-#endif
-	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
-	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
-	mutex_init(&sbi->s_orphan_lock);
-	mutex_init(&sbi->s_resize_lock);
-
-	sb->s_root = NULL;
-
-	needs_recovery = (es->s_last_orphan != 0 ||
-			  EXT3_HAS_INCOMPAT_FEATURE(sb,
-				    EXT3_FEATURE_INCOMPAT_RECOVER));
-
-	/*
-	 * The first inode we look at is the journal inode.  Don't try
-	 * root first: it may be modified in the journal!
-	 */
-	if (!test_opt(sb, NOLOAD) &&
-	    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
-		if (ext3_load_journal(sb, es, journal_devnum))
-			goto failed_mount2;
-	} else if (journal_inum) {
-		if (ext3_create_journal(sb, es, journal_inum))
-			goto failed_mount2;
-	} else {
-		if (!silent)
-			ext3_msg(sb, KERN_ERR,
-				"error: no journal found. "
-				"mounting ext3 over ext2?");
-		goto failed_mount2;
-	}
-	err = percpu_counter_init(&sbi->s_freeblocks_counter,
-			ext3_count_free_blocks(sb), GFP_KERNEL);
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_freeinodes_counter,
-				ext3_count_free_inodes(sb), GFP_KERNEL);
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_dirs_counter,
-				ext3_count_dirs(sb), GFP_KERNEL);
-	}
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error: insufficient memory");
-		ret = err;
-		goto failed_mount3;
-	}
-
-	/* We have now updated the journal if required, so we can
-	 * validate the data journaling mode. */
-	switch (test_opt(sb, DATA_FLAGS)) {
-	case 0:
-		/* No mode set, assume a default based on the journal
-                   capabilities: ORDERED_DATA if the journal can
-                   cope, else JOURNAL_DATA */
-		if (journal_check_available_features
-		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE))
-			set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE);
-		else
-			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
-		break;
-
-	case EXT3_MOUNT_ORDERED_DATA:
-	case EXT3_MOUNT_WRITEBACK_DATA:
-		if (!journal_check_available_features
-		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: journal does not support "
-				"requested data journaling mode");
-			goto failed_mount3;
-		}
-	default:
-		break;
-	}
-
-	/*
-	 * The journal_load will have done any necessary log recovery,
-	 * so we can safely mount the rest of the filesystem now.
-	 */
-
-	root = ext3_iget(sb, EXT3_ROOT_INO);
-	if (IS_ERR(root)) {
-		ext3_msg(sb, KERN_ERR, "error: get root inode failed");
-		ret = PTR_ERR(root);
-		goto failed_mount3;
-	}
-	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
-		iput(root);
-		ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
-		goto failed_mount3;
-	}
-	sb->s_root = d_make_root(root);
-	if (!sb->s_root) {
-		ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
-		ret = -ENOMEM;
-		goto failed_mount3;
-	}
-
-	if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY))
-		sb->s_flags |= MS_RDONLY;
-
-	EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
-	ext3_orphan_cleanup(sb, es);
-	EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
-	if (needs_recovery) {
-		ext3_mark_recovery_complete(sb, es);
-		ext3_msg(sb, KERN_INFO, "recovery complete");
-	}
-	ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode",
-		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
-		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
-		"writeback");
-
-	return 0;
-
-cantfind_ext3:
-	if (!silent)
-		ext3_msg(sb, KERN_INFO,
-			"error: can't find ext3 filesystem on dev %s.",
-		       sb->s_id);
-	goto failed_mount;
-
-failed_mount3:
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	journal_destroy(sbi->s_journal);
-failed_mount2:
-	for (i = 0; i < db_count; i++)
-		brelse(sbi->s_group_desc[i]);
-	kfree(sbi->s_group_desc);
-failed_mount:
-#ifdef CONFIG_QUOTA
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
-#endif
-	ext3_blkdev_remove(sbi);
-	brelse(bh);
-out_fail:
-	sb->s_fs_info = NULL;
-	kfree(sbi->s_blockgroup_lock);
-	kfree(sbi);
-	return ret;
-}
-
-/*
- * Setup any per-fs journal parameters now.  We'll do this both on
- * initial mount, once the journal has been initialised but before we've
- * done any recovery; and again on any subsequent remount.
- */
-static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sbi->s_commit_interval)
-		journal->j_commit_interval = sbi->s_commit_interval;
-	/* We could also set up an ext3-specific default for the commit
-	 * interval here, but for now we'll just fall back to the jbd
-	 * default. */
-
-	spin_lock(&journal->j_state_lock);
-	if (test_opt(sb, BARRIER))
-		journal->j_flags |= JFS_BARRIER;
-	else
-		journal->j_flags &= ~JFS_BARRIER;
-	if (test_opt(sb, DATA_ERR_ABORT))
-		journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR;
-	else
-		journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR;
-	spin_unlock(&journal->j_state_lock);
-}
-
-static journal_t *ext3_get_journal(struct super_block *sb,
-				   unsigned int journal_inum)
-{
-	struct inode *journal_inode;
-	journal_t *journal;
-
-	/* First, test for the existence of a valid inode on disk.  Bad
-	 * things happen if we iget() an unused inode, as the subsequent
-	 * iput() will try to delete it. */
-
-	journal_inode = ext3_iget(sb, journal_inum);
-	if (IS_ERR(journal_inode)) {
-		ext3_msg(sb, KERN_ERR, "error: no journal found");
-		return NULL;
-	}
-	if (!journal_inode->i_nlink) {
-		make_bad_inode(journal_inode);
-		iput(journal_inode);
-		ext3_msg(sb, KERN_ERR, "error: journal inode is deleted");
-		return NULL;
-	}
-
-	jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
-		  journal_inode, journal_inode->i_size);
-	if (!S_ISREG(journal_inode->i_mode)) {
-		ext3_msg(sb, KERN_ERR, "error: invalid journal inode");
-		iput(journal_inode);
-		return NULL;
-	}
-
-	journal = journal_init_inode(journal_inode);
-	if (!journal) {
-		ext3_msg(sb, KERN_ERR, "error: could not load journal inode");
-		iput(journal_inode);
-		return NULL;
-	}
-	journal->j_private = sb;
-	ext3_init_journal_params(sb, journal);
-	return journal;
-}
-
-static journal_t *ext3_get_dev_journal(struct super_block *sb,
-				       dev_t j_dev)
-{
-	struct buffer_head * bh;
-	journal_t *journal;
-	ext3_fsblk_t start;
-	ext3_fsblk_t len;
-	int hblock, blocksize;
-	ext3_fsblk_t sb_block;
-	unsigned long offset;
-	struct ext3_super_block * es;
-	struct block_device *bdev;
-
-	bdev = ext3_blkdev_get(j_dev, sb);
-	if (bdev == NULL)
-		return NULL;
-
-	blocksize = sb->s_blocksize;
-	hblock = bdev_logical_block_size(bdev);
-	if (blocksize < hblock) {
-		ext3_msg(sb, KERN_ERR,
-			"error: blocksize too small for journal device");
-		goto out_bdev;
-	}
-
-	sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
-	offset = EXT3_MIN_BLOCK_SIZE % blocksize;
-	set_blocksize(bdev, blocksize);
-	if (!(bh = __bread(bdev, sb_block, blocksize))) {
-		ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of "
-			"external journal");
-		goto out_bdev;
-	}
-
-	es = (struct ext3_super_block *) (bh->b_data + offset);
-	if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
-	    !(le32_to_cpu(es->s_feature_incompat) &
-	      EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
-		ext3_msg(sb, KERN_ERR, "error: external journal has "
-			"bad superblock");
-		brelse(bh);
-		goto out_bdev;
-	}
-
-	if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
-		ext3_msg(sb, KERN_ERR, "error: journal UUID does not match");
-		brelse(bh);
-		goto out_bdev;
-	}
-
-	len = le32_to_cpu(es->s_blocks_count);
-	start = sb_block + 1;
-	brelse(bh);	/* we're done with the superblock */
-
-	journal = journal_init_dev(bdev, sb->s_bdev,
-					start, len, blocksize);
-	if (!journal) {
-		ext3_msg(sb, KERN_ERR,
-			"error: failed to create device journal");
-		goto out_bdev;
-	}
-	journal->j_private = sb;
-	if (!bh_uptodate_or_lock(journal->j_sb_buffer)) {
-		if (bh_submit_read(journal->j_sb_buffer)) {
-			ext3_msg(sb, KERN_ERR, "I/O error on journal device");
-			goto out_journal;
-		}
-	}
-	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
-		ext3_msg(sb, KERN_ERR,
-			"error: external journal has more than one "
-			"user (unsupported) - %d",
-			be32_to_cpu(journal->j_superblock->s_nr_users));
-		goto out_journal;
-	}
-	EXT3_SB(sb)->journal_bdev = bdev;
-	ext3_init_journal_params(sb, journal);
-	return journal;
-out_journal:
-	journal_destroy(journal);
-out_bdev:
-	ext3_blkdev_put(bdev);
-	return NULL;
-}
-
-static int ext3_load_journal(struct super_block *sb,
-			     struct ext3_super_block *es,
-			     unsigned long journal_devnum)
-{
-	journal_t *journal;
-	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
-	dev_t journal_dev;
-	int err = 0;
-	int really_read_only;
-
-	if (journal_devnum &&
-	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-		ext3_msg(sb, KERN_INFO, "external journal device major/minor "
-			"numbers have changed");
-		journal_dev = new_decode_dev(journal_devnum);
-	} else
-		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-
-	really_read_only = bdev_read_only(sb->s_bdev);
-
-	/*
-	 * Are we loading a blank journal or performing recovery after a
-	 * crash?  For recovery, we need to check in advance whether we
-	 * can get read-write access to the device.
-	 */
-
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) {
-		if (sb->s_flags & MS_RDONLY) {
-			ext3_msg(sb, KERN_INFO,
-				"recovery required on readonly filesystem");
-			if (really_read_only) {
-				ext3_msg(sb, KERN_ERR, "error: write access "
-					"unavailable, cannot proceed");
-				return -EROFS;
-			}
-			ext3_msg(sb, KERN_INFO,
-				"write access will be enabled during recovery");
-		}
-	}
-
-	if (journal_inum && journal_dev) {
-		ext3_msg(sb, KERN_ERR, "error: filesystem has both journal "
-		       "and inode journals");
-		return -EINVAL;
-	}
-
-	if (journal_inum) {
-		if (!(journal = ext3_get_journal(sb, journal_inum)))
-			return -EINVAL;
-	} else {
-		if (!(journal = ext3_get_dev_journal(sb, journal_dev)))
-			return -EINVAL;
-	}
-
-	if (!(journal->j_flags & JFS_BARRIER))
-		printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
-
-	if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
-		err = journal_update_format(journal);
-		if (err)  {
-			ext3_msg(sb, KERN_ERR, "error updating journal");
-			journal_destroy(journal);
-			return err;
-		}
-	}
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER))
-		err = journal_wipe(journal, !really_read_only);
-	if (!err)
-		err = journal_load(journal);
-
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error loading journal");
-		journal_destroy(journal);
-		return err;
-	}
-
-	EXT3_SB(sb)->s_journal = journal;
-	ext3_clear_journal_err(sb, es);
-
-	if (!really_read_only && journal_devnum &&
-	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-		es->s_journal_dev = cpu_to_le32(journal_devnum);
-
-		/* Make sure we flush the recovery flag to disk. */
-		ext3_commit_super(sb, es, 1);
-	}
-
-	return 0;
-}
-
-static int ext3_create_journal(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       unsigned int journal_inum)
-{
-	journal_t *journal;
-	int err;
-
-	if (sb->s_flags & MS_RDONLY) {
-		ext3_msg(sb, KERN_ERR,
-			"error: readonly filesystem when trying to "
-			"create journal");
-		return -EROFS;
-	}
-
-	journal = ext3_get_journal(sb, journal_inum);
-	if (!journal)
-		return -EINVAL;
-
-	ext3_msg(sb, KERN_INFO, "creating new journal on inode %u",
-	       journal_inum);
-
-	err = journal_create(journal);
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error creating journal");
-		journal_destroy(journal);
-		return -EIO;
-	}
-
-	EXT3_SB(sb)->s_journal = journal;
-
-	ext3_update_dynamic_rev(sb);
-	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-	EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL);
-
-	es->s_journal_inum = cpu_to_le32(journal_inum);
-
-	/* Make sure we flush the recovery flag to disk. */
-	ext3_commit_super(sb, es, 1);
-
-	return 0;
-}
-
-static int ext3_commit_super(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       int sync)
-{
-	struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
-	int error = 0;
-
-	if (!sbh)
-		return error;
-
-	if (buffer_write_io_error(sbh)) {
-		/*
-		 * Oh, dear.  A previous attempt to write the
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		ext3_msg(sb, KERN_ERR, "previous I/O error to "
-		       "superblock detected");
-		clear_buffer_write_io_error(sbh);
-		set_buffer_uptodate(sbh);
-	}
-	/*
-	 * If the file system is mounted read-only, don't update the
-	 * superblock write time.  This avoids updating the superblock
-	 * write time when we are mounting the root file system
-	 * read/only but we need to replay the journal; at that point,
-	 * for people who are east of GMT and who make their clock
-	 * tick in localtime for Windows bug-for-bug compatibility,
-	 * the clock is set in the future, and this will cause e2fsck
-	 * to complain and force a full file system check.
-	 */
-	if (!(sb->s_flags & MS_RDONLY))
-		es->s_wtime = cpu_to_le32(get_seconds());
-	es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
-	es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
-	BUFFER_TRACE(sbh, "marking dirty");
-	mark_buffer_dirty(sbh);
-	if (sync) {
-		error = sync_dirty_buffer(sbh);
-		if (buffer_write_io_error(sbh)) {
-			ext3_msg(sb, KERN_ERR, "I/O error while writing "
-			       "superblock");
-			clear_buffer_write_io_error(sbh);
-			set_buffer_uptodate(sbh);
-		}
-	}
-	return error;
-}
-
-
-/*
- * Have we just finished recovery?  If so, and if we are mounting (or
- * remounting) the filesystem readonly, then we will end up with a
- * consistent fs on disk.  Record that fact.
- */
-static void ext3_mark_recovery_complete(struct super_block * sb,
-					struct ext3_super_block * es)
-{
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-
-	journal_lock_updates(journal);
-	if (journal_flush(journal) < 0)
-		goto out;
-
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
-	    sb->s_flags & MS_RDONLY) {
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, es, 1);
-	}
-
-out:
-	journal_unlock_updates(journal);
-}
-
-/*
- * If we are mounting (or read-write remounting) a filesystem whose journal
- * has recorded an error from a previous lifetime, move that error to the
- * main filesystem now.
- */
-static void ext3_clear_journal_err(struct super_block *sb,
-				   struct ext3_super_block *es)
-{
-	journal_t *journal;
-	int j_errno;
-	const char *errstr;
-
-	journal = EXT3_SB(sb)->s_journal;
-
-	/*
-	 * Now check for any error status which may have been recorded in the
-	 * journal by a prior ext3_error() or ext3_abort()
-	 */
-
-	j_errno = journal_errno(journal);
-	if (j_errno) {
-		char nbuf[16];
-
-		errstr = ext3_decode_error(sb, j_errno, nbuf);
-		ext3_warning(sb, __func__, "Filesystem error recorded "
-			     "from previous mount: %s", errstr);
-		ext3_warning(sb, __func__, "Marking fs in need of "
-			     "filesystem check.");
-
-		EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-		es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-		ext3_commit_super (sb, es, 1);
-
-		journal_clear_err(journal);
-	}
-}
-
-/*
- * Force the running and committing transactions to commit,
- * and wait on the commit.
- */
-int ext3_force_commit(struct super_block *sb)
-{
-	journal_t *journal;
-	int ret;
-
-	if (sb->s_flags & MS_RDONLY)
-		return 0;
-
-	journal = EXT3_SB(sb)->s_journal;
-	ret = ext3_journal_force_commit(journal);
-	return ret;
-}
-
-static int ext3_sync_fs(struct super_block *sb, int wait)
-{
-	tid_t target;
-
-	trace_ext3_sync_fs(sb, wait);
-	/*
-	 * Writeback quota in non-journalled quota case - journalled quota has
-	 * no dirty dquots
-	 */
-	dquot_writeback_dquots(sb, -1);
-	if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
-		if (wait)
-			log_wait_commit(EXT3_SB(sb)->s_journal, target);
-	}
-	return 0;
-}
-
-/*
- * LVM calls this function before a (read-only) snapshot is created.  This
- * gives us a chance to flush the journal completely and mark the fs clean.
- */
-static int ext3_freeze(struct super_block *sb)
-{
-	int error = 0;
-	journal_t *journal;
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		journal = EXT3_SB(sb)->s_journal;
-
-		/* Now we set up the journal barrier. */
-		journal_lock_updates(journal);
-
-		/*
-		 * We don't want to clear needs_recovery flag when we failed
-		 * to flush the journal.
-		 */
-		error = journal_flush(journal);
-		if (error < 0)
-			goto out;
-
-		/* Journal blocked and flushed, clear needs_recovery flag. */
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
-		if (error)
-			goto out;
-	}
-	return 0;
-
-out:
-	journal_unlock_updates(journal);
-	return error;
-}
-
-/*
- * Called by LVM after the snapshot is done.  We need to reset the RECOVER
- * flag here, even though the filesystem is not technically dirty yet.
- */
-static int ext3_unfreeze(struct super_block *sb)
-{
-	if (!(sb->s_flags & MS_RDONLY)) {
-		/* Reser the needs_recovery flag before the fs is unlocked. */
-		EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-	}
-	return 0;
-}
-
-static int ext3_remount (struct super_block * sb, int * flags, char * data)
-{
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	ext3_fsblk_t n_blocks_count = 0;
-	unsigned long old_sb_flags;
-	struct ext3_mount_options old_opts;
-	int enable_quota = 0;
-	int err;
-#ifdef CONFIG_QUOTA
-	int i;
-#endif
-
-	sync_filesystem(sb);
-
-	/* Store the original options */
-	old_sb_flags = sb->s_flags;
-	old_opts.s_mount_opt = sbi->s_mount_opt;
-	old_opts.s_resuid = sbi->s_resuid;
-	old_opts.s_resgid = sbi->s_resgid;
-	old_opts.s_commit_interval = sbi->s_commit_interval;
-#ifdef CONFIG_QUOTA
-	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		if (sbi->s_qf_names[i]) {
-			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
-							 GFP_KERNEL);
-			if (!old_opts.s_qf_names[i]) {
-				int j;
-
-				for (j = 0; j < i; j++)
-					kfree(old_opts.s_qf_names[j]);
-				return -ENOMEM;
-			}
-		} else
-			old_opts.s_qf_names[i] = NULL;
-#endif
-
-	/*
-	 * Allow the "check" option to be passed as a remount option.
-	 */
-	if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) {
-		err = -EINVAL;
-		goto restore_opts;
-	}
-
-	if (test_opt(sb, ABORT))
-		ext3_abort(sb, __func__, "Abort forced by user");
-
-	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
-
-	es = sbi->s_es;
-
-	ext3_init_journal_params(sb, sbi->s_journal);
-
-	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
-		n_blocks_count > le32_to_cpu(es->s_blocks_count)) {
-		if (test_opt(sb, ABORT)) {
-			err = -EROFS;
-			goto restore_opts;
-		}
-
-		if (*flags & MS_RDONLY) {
-			err = dquot_suspend(sb, -1);
-			if (err < 0)
-				goto restore_opts;
-
-			/*
-			 * First of all, the unconditional stuff we have to do
-			 * to disable replay of the journal when we next remount
-			 */
-			sb->s_flags |= MS_RDONLY;
-
-			/*
-			 * OK, test if we are remounting a valid rw partition
-			 * readonly, and if so set the rdonly flag and then
-			 * mark the partition as valid again.
-			 */
-			if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) &&
-			    (sbi->s_mount_state & EXT3_VALID_FS))
-				es->s_state = cpu_to_le16(sbi->s_mount_state);
-
-			ext3_mark_recovery_complete(sb, es);
-		} else {
-			__le32 ret;
-			if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					~EXT3_FEATURE_RO_COMPAT_SUPP))) {
-				ext3_msg(sb, KERN_WARNING,
-					"warning: couldn't remount RDWR "
-					"because of unsupported optional "
-					"features (%x)", le32_to_cpu(ret));
-				err = -EROFS;
-				goto restore_opts;
-			}
-
-			/*
-			 * If we have an unprocessed orphan list hanging
-			 * around from a previously readonly bdev mount,
-			 * require a full umount & mount for now.
-			 */
-			if (es->s_last_orphan) {
-				ext3_msg(sb, KERN_WARNING, "warning: couldn't "
-				       "remount RDWR because of unprocessed "
-				       "orphan inode list.  Please "
-				       "umount & mount instead.");
-				err = -EINVAL;
-				goto restore_opts;
-			}
-
-			/*
-			 * Mounting a RDONLY partition read-write, so reread
-			 * and store the current valid flag.  (It may have
-			 * been changed by e2fsck since we originally mounted
-			 * the partition.)
-			 */
-			ext3_clear_journal_err(sb, es);
-			sbi->s_mount_state = le16_to_cpu(es->s_state);
-			if ((err = ext3_group_extend(sb, es, n_blocks_count)))
-				goto restore_opts;
-			if (!ext3_setup_super (sb, es, 0))
-				sb->s_flags &= ~MS_RDONLY;
-			enable_quota = 1;
-		}
-	}
-#ifdef CONFIG_QUOTA
-	/* Release old quota file names */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(old_opts.s_qf_names[i]);
-#endif
-	if (enable_quota)
-		dquot_resume(sb, -1);
-	return 0;
-restore_opts:
-	sb->s_flags = old_sb_flags;
-	sbi->s_mount_opt = old_opts.s_mount_opt;
-	sbi->s_resuid = old_opts.s_resuid;
-	sbi->s_resgid = old_opts.s_resgid;
-	sbi->s_commit_interval = old_opts.s_commit_interval;
-#ifdef CONFIG_QUOTA
-	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		kfree(sbi->s_qf_names[i]);
-		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
-	}
-#endif
-	return err;
-}
-
-static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
-{
-	struct super_block *sb = dentry->d_sb;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	u64 fsid;
-
-	if (test_opt(sb, MINIX_DF)) {
-		sbi->s_overhead_last = 0;
-	} else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
-		unsigned long ngroups = sbi->s_groups_count, i;
-		ext3_fsblk_t overhead = 0;
-		smp_rmb();
-
-		/*
-		 * Compute the overhead (FS structures).  This is constant
-		 * for a given filesystem unless the number of block groups
-		 * changes so we cache the previous value until it does.
-		 */
-
-		/*
-		 * All of the blocks before first_data_block are
-		 * overhead
-		 */
-		overhead = le32_to_cpu(es->s_first_data_block);
-
-		/*
-		 * Add the overhead attributed to the superblock and
-		 * block group descriptors.  If the sparse superblocks
-		 * feature is turned on, then not all groups have this.
-		 */
-		for (i = 0; i < ngroups; i++) {
-			overhead += ext3_bg_has_super(sb, i) +
-				ext3_bg_num_gdb(sb, i);
-			cond_resched();
-		}
-
-		/*
-		 * Every block group has an inode bitmap, a block
-		 * bitmap, and an inode table.
-		 */
-		overhead += ngroups * (2 + sbi->s_itb_per_group);
-
-		/* Add the internal journal blocks as well */
-		if (sbi->s_journal && !sbi->journal_bdev)
-			overhead += sbi->s_journal->j_maxlen;
-
-		sbi->s_overhead_last = overhead;
-		smp_wmb();
-		sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
-	}
-
-	buf->f_type = EXT3_SUPER_MAGIC;
-	buf->f_bsize = sb->s_blocksize;
-	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
-	buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
-	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
-	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
-		buf->f_bavail = 0;
-	buf->f_files = le32_to_cpu(es->s_inodes_count);
-	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
-	buf->f_namelen = EXT3_NAME_LEN;
-	fsid = le64_to_cpup((void *)es->s_uuid) ^
-	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
-	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
-	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
-	return 0;
-}
-
-/* Helper function for writing quotas on sync - we need to start transaction before quota file
- * is locked for write. Otherwise the are possible deadlocks:
- * Process 1                         Process 2
- * ext3_create()                     quota_sync()
- *   journal_start()                   write_dquot()
- *   dquot_initialize()                       down(dqio_mutex)
- *     down(dqio_mutex)                    journal_start()
- *
- */
-
-#ifdef CONFIG_QUOTA
-
-static inline struct inode *dquot_to_inode(struct dquot *dquot)
-{
-	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
-}
-
-static int ext3_write_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-	struct inode *inode;
-
-	inode = dquot_to_inode(dquot);
-	handle = ext3_journal_start(inode,
-					EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_commit(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_acquire_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-
-	handle = ext3_journal_start(dquot_to_inode(dquot),
-					EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_acquire(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_release_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-
-	handle = ext3_journal_start(dquot_to_inode(dquot),
-					EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle)) {
-		/* Release dquot anyway to avoid endless cycle in dqput() */
-		dquot_release(dquot);
-		return PTR_ERR(handle);
-	}
-	ret = dquot_release(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_mark_dquot_dirty(struct dquot *dquot)
-{
-	/* Are we journaling quotas? */
-	if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
-	    EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
-		dquot_mark_dquot_dirty(dquot);
-		return ext3_write_dquot(dquot);
-	} else {
-		return dquot_mark_dquot_dirty(dquot);
-	}
-}
-
-static int ext3_write_info(struct super_block *sb, int type)
-{
-	int ret, err;
-	handle_t *handle;
-
-	/* Data block + inode block */
-	handle = ext3_journal_start(d_inode(sb->s_root), 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_commit_info(sb, type);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-/*
- * Turn on quotas during mount time - we need to find
- * the quota file and such...
- */
-static int ext3_quota_on_mount(struct super_block *sb, int type)
-{
-	return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type],
-					EXT3_SB(sb)->s_jquota_fmt, type);
-}
-
-/*
- * Standard function to be called on quota_on
- */
-static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 struct path *path)
-{
-	int err;
-
-	if (!test_opt(sb, QUOTA))
-		return -EINVAL;
-
-	/* Quotafile not on the same filesystem? */
-	if (path->dentry->d_sb != sb)
-		return -EXDEV;
-	/* Journaling quota? */
-	if (EXT3_SB(sb)->s_qf_names[type]) {
-		/* Quotafile not of fs root? */
-		if (path->dentry->d_parent != sb->s_root)
-			ext3_msg(sb, KERN_WARNING,
-				"warning: Quota file not on filesystem root. "
-				"Journaled quota will not work.");
-	}
-
-	/*
-	 * When we journal data on quota file, we have to flush journal to see
-	 * all updates to the file when we bypass pagecache...
-	 */
-	if (ext3_should_journal_data(d_inode(path->dentry))) {
-		/*
-		 * We don't need to lock updates but journal_flush() could
-		 * otherwise be livelocked...
-		 */
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err)
-			return err;
-	}
-
-	return dquot_quota_on(sb, type, format_id, path);
-}
-
-/* Read data from quotafile - avoid pagecache and such because we cannot afford
- * acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and no one else should touch the files)
- * we don't have to be afraid of races */
-static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
-			       size_t len, loff_t off)
-{
-	struct inode *inode = sb_dqopt(sb)->files[type];
-	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
-	int tocopy;
-	size_t toread;
-	struct buffer_head *bh;
-	loff_t i_size = i_size_read(inode);
-
-	if (off > i_size)
-		return 0;
-	if (off+len > i_size)
-		len = i_size-off;
-	toread = len;
-	while (toread > 0) {
-		tocopy = sb->s_blocksize - offset < toread ?
-				sb->s_blocksize - offset : toread;
-		bh = ext3_bread(NULL, inode, blk, 0, &err);
-		if (err)
-			return err;
-		if (!bh)	/* A hole? */
-			memset(data, 0, tocopy);
-		else
-			memcpy(data, bh->b_data+offset, tocopy);
-		brelse(bh);
-		offset = 0;
-		toread -= tocopy;
-		data += tocopy;
-		blk++;
-	}
-	return len;
-}
-
-/* Write to quotafile (we know the transaction is already started and has
- * enough credits) */
-static ssize_t ext3_quota_write(struct super_block *sb, int type,
-				const char *data, size_t len, loff_t off)
-{
-	struct inode *inode = sb_dqopt(sb)->files[type];
-	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
-	int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL;
-	struct buffer_head *bh;
-	handle_t *handle = journal_current_handle();
-
-	if (!handle) {
-		ext3_msg(sb, KERN_WARNING,
-			"warning: quota write (off=%llu, len=%llu)"
-			" cancelled because transaction is not started.",
-			(unsigned long long)off, (unsigned long long)len);
-		return -EIO;
-	}
-
-	/*
-	 * Since we account only one data block in transaction credits,
-	 * then it is impossible to cross a block boundary.
-	 */
-	if (sb->s_blocksize - offset < len) {
-		ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
-			" cancelled because not block aligned",
-			(unsigned long long)off, (unsigned long long)len);
-		return -EIO;
-	}
-	bh = ext3_bread(handle, inode, blk, 1, &err);
-	if (!bh)
-		goto out;
-	if (journal_quota) {
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err) {
-			brelse(bh);
-			goto out;
-		}
-	}
-	lock_buffer(bh);
-	memcpy(bh->b_data+offset, data, len);
-	flush_dcache_page(bh->b_page);
-	unlock_buffer(bh);
-	if (journal_quota)
-		err = ext3_journal_dirty_metadata(handle, bh);
-	else {
-		/* Always do at least ordered writes for quotas */
-		err = ext3_journal_dirty_data(handle, bh);
-		mark_buffer_dirty(bh);
-	}
-	brelse(bh);
-out:
-	if (err)
-		return err;
-	if (inode->i_size < off + len) {
-		i_size_write(inode, off + len);
-		EXT3_I(inode)->i_disksize = inode->i_size;
-	}
-	inode->i_version++;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	ext3_mark_inode_dirty(handle, inode);
-	return len;
-}
-
-#endif
-
-static struct dentry *ext3_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data)
-{
-	return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super);
-}
-
-static struct file_system_type ext3_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "ext3",
-	.mount		= ext3_mount,
-	.kill_sb	= kill_block_super,
-	.fs_flags	= FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("ext3");
-
-static int __init init_ext3_fs(void)
-{
-	int err = init_ext3_xattr();
-	if (err)
-		return err;
-	err = init_inodecache();
-	if (err)
-		goto out1;
-        err = register_filesystem(&ext3_fs_type);
-	if (err)
-		goto out;
-	return 0;
-out:
-	destroy_inodecache();
-out1:
-	exit_ext3_xattr();
-	return err;
-}
-
-static void __exit exit_ext3_fs(void)
-{
-	unregister_filesystem(&ext3_fs_type);
-	destroy_inodecache();
-	exit_ext3_xattr();
-}
-
-MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
-MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
-MODULE_LICENSE("GPL");
-module_init(init_ext3_fs)
-module_exit(exit_ext3_fs)
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
deleted file mode 100644
index c08c590..0000000
--- a/fs/ext3/symlink.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *  linux/fs/ext3/symlink.c
- *
- * Only fast symlinks left here - the rest is done by generic code. AV, 1999
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/symlink.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 symlink handling code
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-const struct inode_operations ext3_symlink_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= page_follow_link_light,
-	.put_link	= page_put_link,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-};
-
-const struct inode_operations ext3_fast_symlink_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= simple_follow_link,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-};
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
deleted file mode 100644
index 7cf3650..0000000
--- a/fs/ext3/xattr.c
+++ /dev/null
@@ -1,1330 +0,0 @@
-/*
- * linux/fs/ext3/xattr.c
- *
- * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * Fix by Harrison Xing <harrison@mountainviewdata.com>.
- * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
- * Extended attributes for symlinks and special files added per
- *  suggestion of Luka Renko <luka.renko@hermes.si>.
- * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
- *  Red Hat Inc.
- * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
- *  and Andreas Gruenbacher <agruen@suse.de>.
- */
-
-/*
- * Extended attributes are stored directly in inodes (on file systems with
- * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
- * field contains the block number if an inode uses an additional block. All
- * attributes must fit in the inode and one additional block. Blocks that
- * contain the identical set of attributes may be shared among several inodes.
- * Identical blocks are detected by keeping a cache of blocks that have
- * recently been accessed.
- *
- * The attributes in inodes and on blocks have a different header; the entries
- * are stored in the same format:
- *
- *   +------------------+
- *   | header           |
- *   | entry 1          | |
- *   | entry 2          | | growing downwards
- *   | entry 3          | v
- *   | four null bytes  |
- *   | . . .            |
- *   | value 1          | ^
- *   | value 3          | | growing upwards
- *   | value 2          | |
- *   +------------------+
- *
- * The header is followed by multiple entry descriptors. In disk blocks, the
- * entry descriptors are kept sorted. In inodes, they are unsorted. The
- * attribute values are aligned to the end of the block in no specific order.
- *
- * Locking strategy
- * ----------------
- * EXT3_I(inode)->i_file_acl is protected by EXT3_I(inode)->xattr_sem.
- * EA blocks are only changed if they are exclusive to an inode, so
- * holding xattr_sem also means that nothing but the EA block's reference
- * count can change. Multiple writers to the same block are synchronized
- * by the buffer lock.
- */
-
-#include "ext3.h"
-#include <linux/mbcache.h>
-#include <linux/quotaops.h>
-#include "xattr.h"
-#include "acl.h"
-
-#define BHDR(bh) ((struct ext3_xattr_header *)((bh)->b_data))
-#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr))
-#define BFIRST(bh) ENTRY(BHDR(bh)+1)
-#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-
-#define IHDR(inode, raw_inode) \
-	((struct ext3_xattr_ibody_header *) \
-		((void *)raw_inode + \
-		 EXT3_GOOD_OLD_INODE_SIZE + \
-		 EXT3_I(inode)->i_extra_isize))
-#define IFIRST(hdr) ((struct ext3_xattr_entry *)((hdr)+1))
-
-#ifdef EXT3_XATTR_DEBUG
-# define ea_idebug(inode, f...) do { \
-		printk(KERN_DEBUG "inode %s:%lu: ", \
-			inode->i_sb->s_id, inode->i_ino); \
-		printk(f); \
-		printk("\n"); \
-	} while (0)
-# define ea_bdebug(bh, f...) do { \
-		char b[BDEVNAME_SIZE]; \
-		printk(KERN_DEBUG "block %s:%lu: ", \
-			bdevname(bh->b_bdev, b), \
-			(unsigned long) bh->b_blocknr); \
-		printk(f); \
-		printk("\n"); \
-	} while (0)
-#else
-# define ea_idebug(f...)
-# define ea_bdebug(f...)
-#endif
-
-static void ext3_xattr_cache_insert(struct buffer_head *);
-static struct buffer_head *ext3_xattr_cache_find(struct inode *,
-						 struct ext3_xattr_header *,
-						 struct mb_cache_entry **);
-static void ext3_xattr_rehash(struct ext3_xattr_header *,
-			      struct ext3_xattr_entry *);
-static int ext3_xattr_list(struct dentry *dentry, char *buffer,
-			   size_t buffer_size);
-
-static struct mb_cache *ext3_xattr_cache;
-
-static const struct xattr_handler *ext3_xattr_handler_map[] = {
-	[EXT3_XATTR_INDEX_USER]		     = &ext3_xattr_user_handler,
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	[EXT3_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
-	[EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
-#endif
-	[EXT3_XATTR_INDEX_TRUSTED]	     = &ext3_xattr_trusted_handler,
-#ifdef CONFIG_EXT3_FS_SECURITY
-	[EXT3_XATTR_INDEX_SECURITY]	     = &ext3_xattr_security_handler,
-#endif
-};
-
-const struct xattr_handler *ext3_xattr_handlers[] = {
-	&ext3_xattr_user_handler,
-	&ext3_xattr_trusted_handler,
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	&posix_acl_access_xattr_handler,
-	&posix_acl_default_xattr_handler,
-#endif
-#ifdef CONFIG_EXT3_FS_SECURITY
-	&ext3_xattr_security_handler,
-#endif
-	NULL
-};
-
-static inline const struct xattr_handler *
-ext3_xattr_handler(int name_index)
-{
-	const struct xattr_handler *handler = NULL;
-
-	if (name_index > 0 && name_index < ARRAY_SIZE(ext3_xattr_handler_map))
-		handler = ext3_xattr_handler_map[name_index];
-	return handler;
-}
-
-/*
- * Inode operation listxattr()
- *
- * d_inode(dentry)->i_mutex: don't care
- */
-ssize_t
-ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
-	return ext3_xattr_list(dentry, buffer, size);
-}
-
-static int
-ext3_xattr_check_names(struct ext3_xattr_entry *entry, void *end)
-{
-	while (!IS_LAST_ENTRY(entry)) {
-		struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(entry);
-		if ((void *)next >= end)
-			return -EIO;
-		entry = next;
-	}
-	return 0;
-}
-
-static inline int
-ext3_xattr_check_block(struct buffer_head *bh)
-{
-	int error;
-
-	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-	    BHDR(bh)->h_blocks != cpu_to_le32(1))
-		return -EIO;
-	error = ext3_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
-	return error;
-}
-
-static inline int
-ext3_xattr_check_entry(struct ext3_xattr_entry *entry, size_t size)
-{
-	size_t value_size = le32_to_cpu(entry->e_value_size);
-
-	if (entry->e_value_block != 0 || value_size > size ||
-	    le16_to_cpu(entry->e_value_offs) + value_size > size)
-		return -EIO;
-	return 0;
-}
-
-static int
-ext3_xattr_find_entry(struct ext3_xattr_entry **pentry, int name_index,
-		      const char *name, size_t size, int sorted)
-{
-	struct ext3_xattr_entry *entry;
-	size_t name_len;
-	int cmp = 1;
-
-	if (name == NULL)
-		return -EINVAL;
-	name_len = strlen(name);
-	entry = *pentry;
-	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
-		cmp = name_index - entry->e_name_index;
-		if (!cmp)
-			cmp = name_len - entry->e_name_len;
-		if (!cmp)
-			cmp = memcmp(name, entry->e_name, name_len);
-		if (cmp <= 0 && (sorted || cmp == 0))
-			break;
-	}
-	*pentry = entry;
-	if (!cmp && ext3_xattr_check_entry(entry, size))
-			return -EIO;
-	return cmp ? -ENODATA : 0;
-}
-
-static int
-ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
-		     void *buffer, size_t buffer_size)
-{
-	struct buffer_head *bh = NULL;
-	struct ext3_xattr_entry *entry;
-	size_t size;
-	int error;
-
-	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
-		  name_index, name, buffer, (long)buffer_size);
-
-	error = -ENODATA;
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	if (!bh)
-		goto cleanup;
-	ea_bdebug(bh, "b_count=%d, refcount=%d",
-		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext3_xattr_check_block(bh)) {
-bad_block:	ext3_error(inode->i_sb, __func__,
-			   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-			   EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		goto cleanup;
-	}
-	ext3_xattr_cache_insert(bh);
-	entry = BFIRST(bh);
-	error = ext3_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
-	if (error == -EIO)
-		goto bad_block;
-	if (error)
-		goto cleanup;
-	size = le32_to_cpu(entry->e_value_size);
-	if (buffer) {
-		error = -ERANGE;
-		if (size > buffer_size)
-			goto cleanup;
-		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
-		       size);
-	}
-	error = size;
-
-cleanup:
-	brelse(bh);
-	return error;
-}
-
-static int
-ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-		     void *buffer, size_t buffer_size)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_xattr_entry *entry;
-	struct ext3_inode *raw_inode;
-	struct ext3_iloc iloc;
-	size_t size;
-	void *end;
-	int error;
-
-	if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
-		return -ENODATA;
-	error = ext3_get_inode_loc(inode, &iloc);
-	if (error)
-		return error;
-	raw_inode = ext3_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
-	entry = IFIRST(header);
-	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	error = ext3_xattr_check_names(entry, end);
-	if (error)
-		goto cleanup;
-	error = ext3_xattr_find_entry(&entry, name_index, name,
-				      end - (void *)entry, 0);
-	if (error)
-		goto cleanup;
-	size = le32_to_cpu(entry->e_value_size);
-	if (buffer) {
-		error = -ERANGE;
-		if (size > buffer_size)
-			goto cleanup;
-		memcpy(buffer, (void *)IFIRST(header) +
-		       le16_to_cpu(entry->e_value_offs), size);
-	}
-	error = size;
-
-cleanup:
-	brelse(iloc.bh);
-	return error;
-}
-
-/*
- * ext3_xattr_get()
- *
- * Copy an extended attribute into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-int
-ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-	       void *buffer, size_t buffer_size)
-{
-	int error;
-
-	down_read(&EXT3_I(inode)->xattr_sem);
-	error = ext3_xattr_ibody_get(inode, name_index, name, buffer,
-				     buffer_size);
-	if (error == -ENODATA)
-		error = ext3_xattr_block_get(inode, name_index, name, buffer,
-					     buffer_size);
-	up_read(&EXT3_I(inode)->xattr_sem);
-	return error;
-}
-
-static int
-ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
-			char *buffer, size_t buffer_size)
-{
-	size_t rest = buffer_size;
-
-	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
-		const struct xattr_handler *handler =
-			ext3_xattr_handler(entry->e_name_index);
-
-		if (handler) {
-			size_t size = handler->list(dentry, buffer, rest,
-						    entry->e_name,
-						    entry->e_name_len,
-						    handler->flags);
-			if (buffer) {
-				if (size > rest)
-					return -ERANGE;
-				buffer += size;
-			}
-			rest -= size;
-		}
-	}
-	return buffer_size - rest;
-}
-
-static int
-ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct buffer_head *bh = NULL;
-	int error;
-
-	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
-		  buffer, (long)buffer_size);
-
-	error = 0;
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	error = -EIO;
-	if (!bh)
-		goto cleanup;
-	ea_bdebug(bh, "b_count=%d, refcount=%d",
-		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext3_xattr_check_block(bh)) {
-		ext3_error(inode->i_sb, __func__,
-			   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-			   EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		goto cleanup;
-	}
-	ext3_xattr_cache_insert(bh);
-	error = ext3_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
-cleanup:
-	brelse(bh);
-
-	return error;
-}
-
-static int
-ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_inode *raw_inode;
-	struct ext3_iloc iloc;
-	void *end;
-	int error;
-
-	if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
-		return 0;
-	error = ext3_get_inode_loc(inode, &iloc);
-	if (error)
-		return error;
-	raw_inode = ext3_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
-	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	error = ext3_xattr_check_names(IFIRST(header), end);
-	if (error)
-		goto cleanup;
-	error = ext3_xattr_list_entries(dentry, IFIRST(header),
-					buffer, buffer_size);
-
-cleanup:
-	brelse(iloc.bh);
-	return error;
-}
-
-/*
- * ext3_xattr_list()
- *
- * Copy a list of attribute names into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-static int
-ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	int i_error, b_error;
-
-	down_read(&EXT3_I(d_inode(dentry))->xattr_sem);
-	i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size);
-	if (i_error < 0) {
-		b_error = 0;
-	} else {
-		if (buffer) {
-			buffer += i_error;
-			buffer_size -= i_error;
-		}
-		b_error = ext3_xattr_block_list(dentry, buffer, buffer_size);
-		if (b_error < 0)
-			i_error = 0;
-	}
-	up_read(&EXT3_I(d_inode(dentry))->xattr_sem);
-	return i_error + b_error;
-}
-
-/*
- * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
- * not set, set it.
- */
-static void ext3_xattr_update_super_block(handle_t *handle,
-					  struct super_block *sb)
-{
-	if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
-		return;
-
-	if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
-		EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
-		ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	}
-}
-
-/*
- * Release the xattr block BH: If the reference count is > 1, decrement
- * it; otherwise free the block.
- */
-static void
-ext3_xattr_release_block(handle_t *handle, struct inode *inode,
-			 struct buffer_head *bh)
-{
-	struct mb_cache_entry *ce = NULL;
-	int error = 0;
-
-	ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
-	error = ext3_journal_get_write_access(handle, bh);
-	if (error)
-		 goto out;
-
-	lock_buffer(bh);
-
-	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
-		ea_bdebug(bh, "refcount now=0; freeing");
-		if (ce)
-			mb_cache_entry_free(ce);
-		ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-		get_bh(bh);
-		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-	} else {
-		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
-		error = ext3_journal_dirty_metadata(handle, bh);
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-		dquot_free_block(inode, 1);
-		ea_bdebug(bh, "refcount now=%d; releasing",
-			  le32_to_cpu(BHDR(bh)->h_refcount));
-		if (ce)
-			mb_cache_entry_release(ce);
-	}
-	unlock_buffer(bh);
-out:
-	ext3_std_error(inode->i_sb, error);
-	return;
-}
-
-struct ext3_xattr_info {
-	int name_index;
-	const char *name;
-	const void *value;
-	size_t value_len;
-};
-
-struct ext3_xattr_search {
-	struct ext3_xattr_entry *first;
-	void *base;
-	void *end;
-	struct ext3_xattr_entry *here;
-	int not_found;
-};
-
-static int
-ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s)
-{
-	struct ext3_xattr_entry *last;
-	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
-
-	/* Compute min_offs and last. */
-	last = s->first;
-	for (; !IS_LAST_ENTRY(last); last = EXT3_XATTR_NEXT(last)) {
-		if (!last->e_value_block && last->e_value_size) {
-			size_t offs = le16_to_cpu(last->e_value_offs);
-			if (offs < min_offs)
-				min_offs = offs;
-		}
-	}
-	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
-	if (!s->not_found) {
-		if (!s->here->e_value_block && s->here->e_value_size) {
-			size_t size = le32_to_cpu(s->here->e_value_size);
-			free += EXT3_XATTR_SIZE(size);
-		}
-		free += EXT3_XATTR_LEN(name_len);
-	}
-	if (i->value) {
-		if (free < EXT3_XATTR_LEN(name_len) +
-			   EXT3_XATTR_SIZE(i->value_len))
-			return -ENOSPC;
-	}
-
-	if (i->value && s->not_found) {
-		/* Insert the new name. */
-		size_t size = EXT3_XATTR_LEN(name_len);
-		size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
-		memmove((void *)s->here + size, s->here, rest);
-		memset(s->here, 0, size);
-		s->here->e_name_index = i->name_index;
-		s->here->e_name_len = name_len;
-		memcpy(s->here->e_name, i->name, name_len);
-	} else {
-		if (!s->here->e_value_block && s->here->e_value_size) {
-			void *first_val = s->base + min_offs;
-			size_t offs = le16_to_cpu(s->here->e_value_offs);
-			void *val = s->base + offs;
-			size_t size = EXT3_XATTR_SIZE(
-				le32_to_cpu(s->here->e_value_size));
-
-			if (i->value && size == EXT3_XATTR_SIZE(i->value_len)) {
-				/* The old and the new value have the same
-				   size. Just replace. */
-				s->here->e_value_size =
-					cpu_to_le32(i->value_len);
-				memset(val + size - EXT3_XATTR_PAD, 0,
-				       EXT3_XATTR_PAD); /* Clear pad bytes. */
-				memcpy(val, i->value, i->value_len);
-				return 0;
-			}
-
-			/* Remove the old value. */
-			memmove(first_val + size, first_val, val - first_val);
-			memset(first_val, 0, size);
-			s->here->e_value_size = 0;
-			s->here->e_value_offs = 0;
-			min_offs += size;
-
-			/* Adjust all value offsets. */
-			last = s->first;
-			while (!IS_LAST_ENTRY(last)) {
-				size_t o = le16_to_cpu(last->e_value_offs);
-				if (!last->e_value_block &&
-				    last->e_value_size && o < offs)
-					last->e_value_offs =
-						cpu_to_le16(o + size);
-				last = EXT3_XATTR_NEXT(last);
-			}
-		}
-		if (!i->value) {
-			/* Remove the old name. */
-			size_t size = EXT3_XATTR_LEN(name_len);
-			last = ENTRY((void *)last - size);
-			memmove(s->here, (void *)s->here + size,
-				(void *)last - (void *)s->here + sizeof(__u32));
-			memset(last, 0, size);
-		}
-	}
-
-	if (i->value) {
-		/* Insert the new value. */
-		s->here->e_value_size = cpu_to_le32(i->value_len);
-		if (i->value_len) {
-			size_t size = EXT3_XATTR_SIZE(i->value_len);
-			void *val = s->base + min_offs - size;
-			s->here->e_value_offs = cpu_to_le16(min_offs - size);
-			memset(val + size - EXT3_XATTR_PAD, 0,
-			       EXT3_XATTR_PAD); /* Clear the pad bytes. */
-			memcpy(val, i->value, i->value_len);
-		}
-	}
-	return 0;
-}
-
-struct ext3_xattr_block_find {
-	struct ext3_xattr_search s;
-	struct buffer_head *bh;
-};
-
-static int
-ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
-		      struct ext3_xattr_block_find *bs)
-{
-	struct super_block *sb = inode->i_sb;
-	int error;
-
-	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-		  i->name_index, i->name, i->value, (long)i->value_len);
-
-	if (EXT3_I(inode)->i_file_acl) {
-		/* The inode already has an extended attribute block. */
-		bs->bh = sb_bread(sb, EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bs->bh)
-			goto cleanup;
-		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
-			atomic_read(&(bs->bh->b_count)),
-			le32_to_cpu(BHDR(bs->bh)->h_refcount));
-		if (ext3_xattr_check_block(bs->bh)) {
-			ext3_error(sb, __func__,
-				"inode %lu: bad block "E3FSBLK, inode->i_ino,
-				EXT3_I(inode)->i_file_acl);
-			error = -EIO;
-			goto cleanup;
-		}
-		/* Find the named attribute. */
-		bs->s.base = BHDR(bs->bh);
-		bs->s.first = BFIRST(bs->bh);
-		bs->s.end = bs->bh->b_data + bs->bh->b_size;
-		bs->s.here = bs->s.first;
-		error = ext3_xattr_find_entry(&bs->s.here, i->name_index,
-					      i->name, bs->bh->b_size, 1);
-		if (error && error != -ENODATA)
-			goto cleanup;
-		bs->s.not_found = error;
-	}
-	error = 0;
-
-cleanup:
-	return error;
-}
-
-static int
-ext3_xattr_block_set(handle_t *handle, struct inode *inode,
-		     struct ext3_xattr_info *i,
-		     struct ext3_xattr_block_find *bs)
-{
-	struct super_block *sb = inode->i_sb;
-	struct buffer_head *new_bh = NULL;
-	struct ext3_xattr_search *s = &bs->s;
-	struct mb_cache_entry *ce = NULL;
-	int error = 0;
-
-#define header(x) ((struct ext3_xattr_header *)(x))
-
-	if (i->value && i->value_len > sb->s_blocksize)
-		return -ENOSPC;
-	if (s->base) {
-		ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
-					bs->bh->b_blocknr);
-		error = ext3_journal_get_write_access(handle, bs->bh);
-		if (error)
-			goto cleanup;
-		lock_buffer(bs->bh);
-
-		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
-			if (ce) {
-				mb_cache_entry_free(ce);
-				ce = NULL;
-			}
-			ea_bdebug(bs->bh, "modifying in-place");
-			error = ext3_xattr_set_entry(i, s);
-			if (!error) {
-				if (!IS_LAST_ENTRY(s->first))
-					ext3_xattr_rehash(header(s->base),
-							  s->here);
-				ext3_xattr_cache_insert(bs->bh);
-			}
-			unlock_buffer(bs->bh);
-			if (error == -EIO)
-				goto bad_block;
-			if (!error)
-				error = ext3_journal_dirty_metadata(handle,
-								    bs->bh);
-			if (error)
-				goto cleanup;
-			goto inserted;
-		} else {
-			int offset = (char *)s->here - bs->bh->b_data;
-
-			unlock_buffer(bs->bh);
-			journal_release_buffer(handle, bs->bh);
-
-			if (ce) {
-				mb_cache_entry_release(ce);
-				ce = NULL;
-			}
-			ea_bdebug(bs->bh, "cloning");
-			s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
-			error = -ENOMEM;
-			if (s->base == NULL)
-				goto cleanup;
-			memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
-			s->first = ENTRY(header(s->base)+1);
-			header(s->base)->h_refcount = cpu_to_le32(1);
-			s->here = ENTRY(s->base + offset);
-			s->end = s->base + bs->bh->b_size;
-		}
-	} else {
-		/* Allocate a buffer where we construct the new block. */
-		s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
-		/* assert(header == s->base) */
-		error = -ENOMEM;
-		if (s->base == NULL)
-			goto cleanup;
-		header(s->base)->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
-		header(s->base)->h_blocks = cpu_to_le32(1);
-		header(s->base)->h_refcount = cpu_to_le32(1);
-		s->first = ENTRY(header(s->base)+1);
-		s->here = ENTRY(header(s->base)+1);
-		s->end = s->base + sb->s_blocksize;
-	}
-
-	error = ext3_xattr_set_entry(i, s);
-	if (error == -EIO)
-		goto bad_block;
-	if (error)
-		goto cleanup;
-	if (!IS_LAST_ENTRY(s->first))
-		ext3_xattr_rehash(header(s->base), s->here);
-
-inserted:
-	if (!IS_LAST_ENTRY(s->first)) {
-		new_bh = ext3_xattr_cache_find(inode, header(s->base), &ce);
-		if (new_bh) {
-			/* We found an identical block in the cache. */
-			if (new_bh == bs->bh)
-				ea_bdebug(new_bh, "keeping");
-			else {
-				/* The old block is released after updating
-				   the inode. */
-				error = dquot_alloc_block(inode, 1);
-				if (error)
-					goto cleanup;
-				error = ext3_journal_get_write_access(handle,
-								      new_bh);
-				if (error)
-					goto cleanup_dquot;
-				lock_buffer(new_bh);
-				le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
-				ea_bdebug(new_bh, "reusing; refcount now=%d",
-					le32_to_cpu(BHDR(new_bh)->h_refcount));
-				unlock_buffer(new_bh);
-				error = ext3_journal_dirty_metadata(handle,
-								    new_bh);
-				if (error)
-					goto cleanup_dquot;
-			}
-			mb_cache_entry_release(ce);
-			ce = NULL;
-		} else if (bs->bh && s->base == bs->bh->b_data) {
-			/* We were modifying this block in-place. */
-			ea_bdebug(bs->bh, "keeping this block");
-			new_bh = bs->bh;
-			get_bh(new_bh);
-		} else {
-			/* We need to allocate a new block */
-			ext3_fsblk_t goal = ext3_group_first_block_no(sb,
-						EXT3_I(inode)->i_block_group);
-			ext3_fsblk_t block;
-
-			/*
-			 * Protect us agaist concurrent allocations to the
-			 * same inode from ext3_..._writepage(). Reservation
-			 * code does not expect racing allocations.
-			 */
-			mutex_lock(&EXT3_I(inode)->truncate_mutex);
-			block = ext3_new_block(handle, inode, goal, &error);
-			mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-			if (error)
-				goto cleanup;
-			ea_idebug(inode, "creating block %d", block);
-
-			new_bh = sb_getblk(sb, block);
-			if (unlikely(!new_bh)) {
-getblk_failed:
-				ext3_free_blocks(handle, inode, block, 1);
-				error = -ENOMEM;
-				goto cleanup;
-			}
-			lock_buffer(new_bh);
-			error = ext3_journal_get_create_access(handle, new_bh);
-			if (error) {
-				unlock_buffer(new_bh);
-				goto getblk_failed;
-			}
-			memcpy(new_bh->b_data, s->base, new_bh->b_size);
-			set_buffer_uptodate(new_bh);
-			unlock_buffer(new_bh);
-			ext3_xattr_cache_insert(new_bh);
-			error = ext3_journal_dirty_metadata(handle, new_bh);
-			if (error)
-				goto cleanup;
-		}
-	}
-
-	/* Update the inode. */
-	EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
-
-	/* Drop the previous xattr block. */
-	if (bs->bh && bs->bh != new_bh)
-		ext3_xattr_release_block(handle, inode, bs->bh);
-	error = 0;
-
-cleanup:
-	if (ce)
-		mb_cache_entry_release(ce);
-	brelse(new_bh);
-	if (!(bs->bh && s->base == bs->bh->b_data))
-		kfree(s->base);
-
-	return error;
-
-cleanup_dquot:
-	dquot_free_block(inode, 1);
-	goto cleanup;
-
-bad_block:
-	ext3_error(inode->i_sb, __func__,
-		   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-		   EXT3_I(inode)->i_file_acl);
-	goto cleanup;
-
-#undef header
-}
-
-struct ext3_xattr_ibody_find {
-	struct ext3_xattr_search s;
-	struct ext3_iloc iloc;
-};
-
-static int
-ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
-		      struct ext3_xattr_ibody_find *is)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_inode *raw_inode;
-	int error;
-
-	if (EXT3_I(inode)->i_extra_isize == 0)
-		return 0;
-	raw_inode = ext3_raw_inode(&is->iloc);
-	header = IHDR(inode, raw_inode);
-	is->s.base = is->s.first = IFIRST(header);
-	is->s.here = is->s.first;
-	is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) {
-		error = ext3_xattr_check_names(IFIRST(header), is->s.end);
-		if (error)
-			return error;
-		/* Find the named attribute. */
-		error = ext3_xattr_find_entry(&is->s.here, i->name_index,
-					      i->name, is->s.end -
-					      (void *)is->s.base, 0);
-		if (error && error != -ENODATA)
-			return error;
-		is->s.not_found = error;
-	}
-	return 0;
-}
-
-static int
-ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
-		     struct ext3_xattr_info *i,
-		     struct ext3_xattr_ibody_find *is)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_xattr_search *s = &is->s;
-	int error;
-
-	if (EXT3_I(inode)->i_extra_isize == 0)
-		return -ENOSPC;
-	error = ext3_xattr_set_entry(i, s);
-	if (error)
-		return error;
-	header = IHDR(inode, ext3_raw_inode(&is->iloc));
-	if (!IS_LAST_ENTRY(s->first)) {
-		header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
-		ext3_set_inode_state(inode, EXT3_STATE_XATTR);
-	} else {
-		header->h_magic = cpu_to_le32(0);
-		ext3_clear_inode_state(inode, EXT3_STATE_XATTR);
-	}
-	return 0;
-}
-
-/*
- * ext3_xattr_set_handle()
- *
- * Create, replace or remove an extended attribute for this inode.  Value
- * is NULL to remove an existing extended attribute, and non-NULL to
- * either replace an existing extended attribute, or create a new extended
- * attribute. The flags XATTR_REPLACE and XATTR_CREATE
- * specify that an extended attribute must exist and must not exist
- * previous to the call, respectively.
- *
- * Returns 0, or a negative error number on failure.
- */
-int
-ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-		      const char *name, const void *value, size_t value_len,
-		      int flags)
-{
-	struct ext3_xattr_info i = {
-		.name_index = name_index,
-		.name = name,
-		.value = value,
-		.value_len = value_len,
-
-	};
-	struct ext3_xattr_ibody_find is = {
-		.s = { .not_found = -ENODATA, },
-	};
-	struct ext3_xattr_block_find bs = {
-		.s = { .not_found = -ENODATA, },
-	};
-	int error;
-
-	if (!name)
-		return -EINVAL;
-	if (strlen(name) > 255)
-		return -ERANGE;
-	down_write(&EXT3_I(inode)->xattr_sem);
-	error = ext3_get_inode_loc(inode, &is.iloc);
-	if (error)
-		goto cleanup;
-
-	error = ext3_journal_get_write_access(handle, is.iloc.bh);
-	if (error)
-		goto cleanup;
-
-	if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) {
-		struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
-		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
-		ext3_clear_inode_state(inode, EXT3_STATE_NEW);
-	}
-
-	error = ext3_xattr_ibody_find(inode, &i, &is);
-	if (error)
-		goto cleanup;
-	if (is.s.not_found)
-		error = ext3_xattr_block_find(inode, &i, &bs);
-	if (error)
-		goto cleanup;
-	if (is.s.not_found && bs.s.not_found) {
-		error = -ENODATA;
-		if (flags & XATTR_REPLACE)
-			goto cleanup;
-		error = 0;
-		if (!value)
-			goto cleanup;
-	} else {
-		error = -EEXIST;
-		if (flags & XATTR_CREATE)
-			goto cleanup;
-	}
-	if (!value) {
-		if (!is.s.not_found)
-			error = ext3_xattr_ibody_set(handle, inode, &i, &is);
-		else if (!bs.s.not_found)
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-	} else {
-		error = ext3_xattr_ibody_set(handle, inode, &i, &is);
-		if (!error && !bs.s.not_found) {
-			i.value = NULL;
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-		} else if (error == -ENOSPC) {
-			if (EXT3_I(inode)->i_file_acl && !bs.s.base) {
-				error = ext3_xattr_block_find(inode, &i, &bs);
-				if (error)
-					goto cleanup;
-			}
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-			if (error)
-				goto cleanup;
-			if (!is.s.not_found) {
-				i.value = NULL;
-				error = ext3_xattr_ibody_set(handle, inode, &i,
-							     &is);
-			}
-		}
-	}
-	if (!error) {
-		ext3_xattr_update_super_block(handle, inode->i_sb);
-		inode->i_ctime = CURRENT_TIME_SEC;
-		error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
-		/*
-		 * The bh is consumed by ext3_mark_iloc_dirty, even with
-		 * error != 0.
-		 */
-		is.iloc.bh = NULL;
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-	}
-
-cleanup:
-	brelse(is.iloc.bh);
-	brelse(bs.bh);
-	up_write(&EXT3_I(inode)->xattr_sem);
-	return error;
-}
-
-/*
- * ext3_xattr_set()
- *
- * Like ext3_xattr_set_handle, but start from an inode. This extended
- * attribute modification is a filesystem transaction by itself.
- *
- * Returns 0, or a negative error number on failure.
- */
-int
-ext3_xattr_set(struct inode *inode, int name_index, const char *name,
-	       const void *value, size_t value_len, int flags)
-{
-	handle_t *handle;
-	int error, retries = 0;
-
-retry:
-	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle)) {
-		error = PTR_ERR(handle);
-	} else {
-		int error2;
-
-		error = ext3_xattr_set_handle(handle, inode, name_index, name,
-					      value, value_len, flags);
-		error2 = ext3_journal_stop(handle);
-		if (error == -ENOSPC &&
-		    ext3_should_retry_alloc(inode->i_sb, &retries))
-			goto retry;
-		if (error == 0)
-			error = error2;
-	}
-
-	return error;
-}
-
-/*
- * ext3_xattr_delete_inode()
- *
- * Free extended attribute resources associated with this inode. This
- * is called immediately before an inode is freed. We have exclusive
- * access to the inode.
- */
-void
-ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-	struct buffer_head *bh = NULL;
-
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	if (!bh) {
-		ext3_error(inode->i_sb, __func__,
-			"inode %lu: block "E3FSBLK" read error", inode->i_ino,
-			EXT3_I(inode)->i_file_acl);
-		goto cleanup;
-	}
-	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-	    BHDR(bh)->h_blocks != cpu_to_le32(1)) {
-		ext3_error(inode->i_sb, __func__,
-			"inode %lu: bad block "E3FSBLK, inode->i_ino,
-			EXT3_I(inode)->i_file_acl);
-		goto cleanup;
-	}
-	ext3_xattr_release_block(handle, inode, bh);
-	EXT3_I(inode)->i_file_acl = 0;
-
-cleanup:
-	brelse(bh);
-}
-
-/*
- * ext3_xattr_put_super()
- *
- * This is called when a file system is unmounted.
- */
-void
-ext3_xattr_put_super(struct super_block *sb)
-{
-	mb_cache_shrink(sb->s_bdev);
-}
-
-/*
- * ext3_xattr_cache_insert()
- *
- * Create a new entry in the extended attribute cache, and insert
- * it unless such an entry is already in the cache.
- *
- * Returns 0, or a negative error number on failure.
- */
-static void
-ext3_xattr_cache_insert(struct buffer_head *bh)
-{
-	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
-	struct mb_cache_entry *ce;
-	int error;
-
-	ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS);
-	if (!ce) {
-		ea_bdebug(bh, "out of memory");
-		return;
-	}
-	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
-	if (error) {
-		mb_cache_entry_free(ce);
-		if (error == -EBUSY) {
-			ea_bdebug(bh, "already in cache");
-			error = 0;
-		}
-	} else {
-		ea_bdebug(bh, "inserting [%x]", (int)hash);
-		mb_cache_entry_release(ce);
-	}
-}
-
-/*
- * ext3_xattr_cmp()
- *
- * Compare two extended attribute blocks for equality.
- *
- * Returns 0 if the blocks are equal, 1 if they differ, and
- * a negative error number on errors.
- */
-static int
-ext3_xattr_cmp(struct ext3_xattr_header *header1,
-	       struct ext3_xattr_header *header2)
-{
-	struct ext3_xattr_entry *entry1, *entry2;
-
-	entry1 = ENTRY(header1+1);
-	entry2 = ENTRY(header2+1);
-	while (!IS_LAST_ENTRY(entry1)) {
-		if (IS_LAST_ENTRY(entry2))
-			return 1;
-		if (entry1->e_hash != entry2->e_hash ||
-		    entry1->e_name_index != entry2->e_name_index ||
-		    entry1->e_name_len != entry2->e_name_len ||
-		    entry1->e_value_size != entry2->e_value_size ||
-		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
-			return 1;
-		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
-			return -EIO;
-		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
-			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
-			   le32_to_cpu(entry1->e_value_size)))
-			return 1;
-
-		entry1 = EXT3_XATTR_NEXT(entry1);
-		entry2 = EXT3_XATTR_NEXT(entry2);
-	}
-	if (!IS_LAST_ENTRY(entry2))
-		return 1;
-	return 0;
-}
-
-/*
- * ext3_xattr_cache_find()
- *
- * Find an identical extended attribute block.
- *
- * Returns a pointer to the block found, or NULL if such a block was
- * not found or an error occurred.
- */
-static struct buffer_head *
-ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header,
-		      struct mb_cache_entry **pce)
-{
-	__u32 hash = le32_to_cpu(header->h_hash);
-	struct mb_cache_entry *ce;
-
-	if (!header->h_hash)
-		return NULL;  /* never share */
-	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-again:
-	ce = mb_cache_entry_find_first(ext3_xattr_cache, inode->i_sb->s_bdev,
-				       hash);
-	while (ce) {
-		struct buffer_head *bh;
-
-		if (IS_ERR(ce)) {
-			if (PTR_ERR(ce) == -EAGAIN)
-				goto again;
-			break;
-		}
-		bh = sb_bread(inode->i_sb, ce->e_block);
-		if (!bh) {
-			ext3_error(inode->i_sb, __func__,
-				"inode %lu: block %lu read error",
-				inode->i_ino, (unsigned long) ce->e_block);
-		} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
-				EXT3_XATTR_REFCOUNT_MAX) {
-			ea_idebug(inode, "block %lu refcount %d>=%d",
-				  (unsigned long) ce->e_block,
-				  le32_to_cpu(BHDR(bh)->h_refcount),
-					  EXT3_XATTR_REFCOUNT_MAX);
-		} else if (ext3_xattr_cmp(header, BHDR(bh)) == 0) {
-			*pce = ce;
-			return bh;
-		}
-		brelse(bh);
-		ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
-	}
-	return NULL;
-}
-
-#define NAME_HASH_SHIFT 5
-#define VALUE_HASH_SHIFT 16
-
-/*
- * ext3_xattr_hash_entry()
- *
- * Compute the hash of an extended attribute.
- */
-static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header,
-					 struct ext3_xattr_entry *entry)
-{
-	__u32 hash = 0;
-	char *name = entry->e_name;
-	int n;
-
-	for (n=0; n < entry->e_name_len; n++) {
-		hash = (hash << NAME_HASH_SHIFT) ^
-		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
-		       *name++;
-	}
-
-	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
-		__le32 *value = (__le32 *)((char *)header +
-			le16_to_cpu(entry->e_value_offs));
-		for (n = (le32_to_cpu(entry->e_value_size) +
-		     EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) {
-			hash = (hash << VALUE_HASH_SHIFT) ^
-			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
-			       le32_to_cpu(*value++);
-		}
-	}
-	entry->e_hash = cpu_to_le32(hash);
-}
-
-#undef NAME_HASH_SHIFT
-#undef VALUE_HASH_SHIFT
-
-#define BLOCK_HASH_SHIFT 16
-
-/*
- * ext3_xattr_rehash()
- *
- * Re-compute the extended attribute hash value after an entry has changed.
- */
-static void ext3_xattr_rehash(struct ext3_xattr_header *header,
-			      struct ext3_xattr_entry *entry)
-{
-	struct ext3_xattr_entry *here;
-	__u32 hash = 0;
-
-	ext3_xattr_hash_entry(header, entry);
-	here = ENTRY(header+1);
-	while (!IS_LAST_ENTRY(here)) {
-		if (!here->e_hash) {
-			/* Block is not shared if an entry's hash value == 0 */
-			hash = 0;
-			break;
-		}
-		hash = (hash << BLOCK_HASH_SHIFT) ^
-		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
-		       le32_to_cpu(here->e_hash);
-		here = EXT3_XATTR_NEXT(here);
-	}
-	header->h_hash = cpu_to_le32(hash);
-}
-
-#undef BLOCK_HASH_SHIFT
-
-int __init
-init_ext3_xattr(void)
-{
-	ext3_xattr_cache = mb_cache_create("ext3_xattr", 6);
-	if (!ext3_xattr_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void
-exit_ext3_xattr(void)
-{
-	if (ext3_xattr_cache)
-		mb_cache_destroy(ext3_xattr_cache);
-	ext3_xattr_cache = NULL;
-}
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
deleted file mode 100644
index 32e93eb..0000000
--- a/fs/ext3/xattr.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
-  File: fs/ext3/xattr.h
-
-  On-disk format of extended attributes for the ext3 filesystem.
-
-  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-
-#include <linux/xattr.h>
-
-/* Magic value in attribute blocks */
-#define EXT3_XATTR_MAGIC		0xEA020000
-
-/* Maximum number of references to one attribute block */
-#define EXT3_XATTR_REFCOUNT_MAX		1024
-
-/* Name indexes */
-#define EXT3_XATTR_INDEX_USER			1
-#define EXT3_XATTR_INDEX_POSIX_ACL_ACCESS	2
-#define EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT	3
-#define EXT3_XATTR_INDEX_TRUSTED		4
-#define	EXT3_XATTR_INDEX_LUSTRE			5
-#define EXT3_XATTR_INDEX_SECURITY	        6
-
-struct ext3_xattr_header {
-	__le32	h_magic;	/* magic number for identification */
-	__le32	h_refcount;	/* reference count */
-	__le32	h_blocks;	/* number of disk blocks used */
-	__le32	h_hash;		/* hash value of all attributes */
-	__u32	h_reserved[4];	/* zero right now */
-};
-
-struct ext3_xattr_ibody_header {
-	__le32	h_magic;	/* magic number for identification */
-};
-
-struct ext3_xattr_entry {
-	__u8	e_name_len;	/* length of name */
-	__u8	e_name_index;	/* attribute name index */
-	__le16	e_value_offs;	/* offset in disk block of value */
-	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
-	__le32	e_value_size;	/* size of attribute value */
-	__le32	e_hash;		/* hash value of name and value */
-	char	e_name[0];	/* attribute name */
-};
-
-#define EXT3_XATTR_PAD_BITS		2
-#define EXT3_XATTR_PAD		(1<<EXT3_XATTR_PAD_BITS)
-#define EXT3_XATTR_ROUND		(EXT3_XATTR_PAD-1)
-#define EXT3_XATTR_LEN(name_len) \
-	(((name_len) + EXT3_XATTR_ROUND + \
-	sizeof(struct ext3_xattr_entry)) & ~EXT3_XATTR_ROUND)
-#define EXT3_XATTR_NEXT(entry) \
-	( (struct ext3_xattr_entry *)( \
-	  (char *)(entry) + EXT3_XATTR_LEN((entry)->e_name_len)) )
-#define EXT3_XATTR_SIZE(size) \
-	(((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND)
-
-# ifdef CONFIG_EXT3_FS_XATTR
-
-extern const struct xattr_handler ext3_xattr_user_handler;
-extern const struct xattr_handler ext3_xattr_trusted_handler;
-extern const struct xattr_handler ext3_xattr_security_handler;
-
-extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
-
-extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
-extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
-extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
-
-extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
-extern void ext3_xattr_put_super(struct super_block *);
-
-extern int init_ext3_xattr(void);
-extern void exit_ext3_xattr(void);
-
-extern const struct xattr_handler *ext3_xattr_handlers[];
-
-# else  /* CONFIG_EXT3_FS_XATTR */
-
-static inline int
-ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-	       void *buffer, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int
-ext3_xattr_set(struct inode *inode, int name_index, const char *name,
-	       const void *value, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int
-ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-	       const char *name, const void *value, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void
-ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-}
-
-static inline void
-ext3_xattr_put_super(struct super_block *sb)
-{
-}
-
-static inline int
-init_ext3_xattr(void)
-{
-	return 0;
-}
-
-static inline void
-exit_ext3_xattr(void)
-{
-}
-
-#define ext3_xattr_handlers	NULL
-
-# endif  /* CONFIG_EXT3_FS_XATTR */
-
-#ifdef CONFIG_EXT3_FS_SECURITY
-extern int ext3_init_security(handle_t *handle, struct inode *inode,
-			      struct inode *dir, const struct qstr *qstr);
-#else
-static inline int ext3_init_security(handle_t *handle, struct inode *inode,
-				     struct inode *dir, const struct qstr *qstr)
-{
-	return 0;
-}
-#endif
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
deleted file mode 100644
index c9506d5..0000000
--- a/fs/ext3/xattr_security.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * linux/fs/ext3/xattr_security.c
- * Handler for storing security labels as extended attributes.
- */
-
-#include <linux/security.h>
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
-			 const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_security_get(struct dentry *dentry, const char *name,
-		void *buffer, size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_security_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
-			      name, value, size, flags);
-}
-
-static int ext3_initxattrs(struct inode *inode,
-			   const struct xattr *xattr_array,
-			   void *fs_info)
-{
-	const struct xattr *xattr;
-	handle_t *handle = fs_info;
-	int err = 0;
-
-	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-		err = ext3_xattr_set_handle(handle, inode,
-					    EXT3_XATTR_INDEX_SECURITY,
-					    xattr->name, xattr->value,
-					    xattr->value_len, 0);
-		if (err < 0)
-			break;
-	}
-	return err;
-}
-
-int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
-		   const struct qstr *qstr)
-{
-	return security_inode_init_security(inode, dir, qstr,
-					    &ext3_initxattrs, handle);
-}
-
-const struct xattr_handler ext3_xattr_security_handler = {
-	.prefix	= XATTR_SECURITY_PREFIX,
-	.list	= ext3_xattr_security_list,
-	.get	= ext3_xattr_security_get,
-	.set	= ext3_xattr_security_set,
-};
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
deleted file mode 100644
index 206cc66..0000000
--- a/fs/ext3/xattr_trusted.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * linux/fs/ext3/xattr_trusted.c
- * Handler for trusted extended attributes.
- *
- * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
-		const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return 0;
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_trusted_get(struct dentry *dentry, const char *name,
-		       void *buffer, size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_trusted_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED, name,
-			      value, size, flags);
-}
-
-const struct xattr_handler ext3_xattr_trusted_handler = {
-	.prefix	= XATTR_TRUSTED_PREFIX,
-	.list	= ext3_xattr_trusted_list,
-	.get	= ext3_xattr_trusted_get,
-	.set	= ext3_xattr_trusted_set,
-};
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
deleted file mode 100644
index 021508a..0000000
--- a/fs/ext3/xattr_user.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * linux/fs/ext3/xattr_user.c
- * Handler for extended user attributes.
- *
- * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_user_list(struct dentry *dentry, char *list, size_t list_size,
-		const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_USER_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return 0;
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_USER_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_user_get(struct dentry *dentry, const char *name, void *buffer,
-		size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return -EOPNOTSUPP;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_USER,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_user_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return -EOPNOTSUPP;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_USER,
-			      name, value, size, flags);
-}
-
-const struct xattr_handler ext3_xattr_user_handler = {
-	.prefix	= XATTR_USER_PREFIX,
-	.list	= ext3_xattr_user_list,
-	.get	= ext3_xattr_user_get,
-	.set	= ext3_xattr_user_set,
-};
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index bf8bc8a..47728da 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -1,5 +1,38 @@
+# Ext3 configs are here for backward compatibility with old configs which may
+# have EXT3_FS set but not EXT4_FS set and thus would result in non-bootable
+# kernels after the removal of ext3 driver.
+config EXT3_FS
+	tristate "The Extended 3 (ext3) filesystem"
+	# These must match EXT4_FS selects...
+	select EXT4_FS
+	select JBD2
+	select CRC16
+	select CRYPTO
+	select CRYPTO_CRC32C
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
+config EXT3_FS_POSIX_ACL
+	bool "Ext3 POSIX Access Control Lists"
+	depends on EXT3_FS
+	select EXT4_FS_POSIX_ACL
+	select FS_POSIX_ACL
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
+config EXT3_FS_SECURITY
+	bool "Ext3 Security Labels"
+	depends on EXT3_FS
+	select EXT4_FS_SECURITY
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
 config EXT4_FS
 	tristate "The Extended 4 (ext4) filesystem"
+	# Please update EXT3_FS selects when changing these
 	select JBD2
 	select CRC16
 	select CRYPTO
@@ -16,26 +49,27 @@
 	  up fsck time.  For more information, please see the web pages at
 	  http://ext4.wiki.kernel.org.
 
-	  The ext4 filesystem will support mounting an ext3
-	  filesystem; while there will be some performance gains from
-	  the delayed allocation and inode table readahead, the best
-	  performance gains will require enabling ext4 features in the
-	  filesystem, or formatting a new filesystem as an ext4
-	  filesystem initially.
+	  The ext4 filesystem supports mounting an ext3 filesystem; while there
+	  are some performance gains from the delayed allocation and inode
+	  table readahead, the best performance gains require enabling ext4
+	  features in the filesystem using tune2fs, or formatting a new
+	  filesystem as an ext4 filesystem initially. Without explicit enabling
+	  of ext4 features, the on disk filesystem format stays fully backward
+	  compatible.
 
 	  To compile this file system support as a module, choose M here. The
 	  module will be called ext4.
 
 	  If unsure, say N.
 
-config EXT4_USE_FOR_EXT23
+config EXT4_USE_FOR_EXT2
 	bool "Use ext4 for ext2/ext3 file systems"
 	depends on EXT4_FS
-	depends on EXT3_FS=n || EXT2_FS=n
+	depends on EXT2_FS=n
 	default y
 	help
-	  Allow the ext4 file system driver code to be used for ext2 or
-	  ext3 file system mounts.  This allows users to reduce their
+	  Allow the ext4 file system driver code to be used for ext2
+	  file system mounts.  This allows users to reduce their
 	  compiled kernel size by using one file system driver for
 	  ext2, ext3, and ext4 file systems.
 
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index 7dc4eb5..847f919 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -19,7 +19,6 @@
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
-#include <linux/key.h>
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/random.h>
@@ -329,6 +328,10 @@
 			return oname->len;
 		}
 	}
+	if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) {
+		EXT4_ERROR_INODE(inode, "encrypted inode too small");
+		return -EUCLEAN;
+	}
 	if (EXT4_I(inode)->i_crypt_info)
 		return ext4_fname_decrypt(inode, iname, oname);
 
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 442d24e..1d510c1 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -30,7 +30,7 @@
 
 /**
  * ext4_derive_key_aes() - Derive a key using AES-128-ECB
- * @deriving_key: Encryption key used for derivatio.
+ * @deriving_key: Encryption key used for derivation.
  * @source_key:   Source key to which to apply derivation.
  * @derived_key:  Derived key.
  *
@@ -220,6 +220,8 @@
 	BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
 	res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
 				  raw_key);
+	if (res)
+		goto out;
 got_key:
 	ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
 	if (!ctfm || IS_ERR(ctfm)) {
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
index 02c4e5d..a640ec2 100644
--- a/fs/ext4/crypto_policy.c
+++ b/fs/ext4/crypto_policy.c
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 
+#include "ext4_jbd2.h"
 #include "ext4.h"
 #include "xattr.h"
 
@@ -49,7 +50,8 @@
 	struct inode *inode, const struct ext4_encryption_policy *policy)
 {
 	struct ext4_encryption_context ctx;
-	int res = 0;
+	handle_t *handle;
+	int res, res2;
 
 	res = ext4_convert_inline_data(inode);
 	if (res)
@@ -78,11 +80,22 @@
 	BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
 	get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
 
+	handle = ext4_journal_start(inode, EXT4_HT_MISC,
+				    ext4_jbd2_credits_xattr(inode));
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
 			     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
 			     sizeof(ctx), 0);
-	if (!res)
+	if (!res) {
 		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+		res = ext4_mark_inode_dirty(handle, inode);
+		if (res)
+			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
+	}
+	res2 = ext4_journal_stop(handle);
+	if (!res)
+		res = res2;
 	return res;
 }
 
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f5e9f04..32071f5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -187,7 +187,7 @@
 } ext4_io_end_t;
 
 struct ext4_io_submit {
-	int			io_op;
+	struct writeback_control *io_wbc;
 	struct bio		*io_bio;
 	ext4_io_end_t		*io_end;
 	sector_t		io_next_block;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 173c1ae..619bfc1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -721,7 +721,7 @@
 	struct ext4_group_desc *gdp = NULL;
 	struct ext4_inode_info *ei;
 	struct ext4_sb_info *sbi;
-	int ret2, err = 0;
+	int ret2, err;
 	struct inode *ret;
 	ext4_group_t i;
 	ext4_group_t flex_group;
@@ -769,7 +769,9 @@
 		inode->i_gid = dir->i_gid;
 	} else
 		inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
+	err = dquot_initialize(inode);
+	if (err)
+		goto out;
 
 	if (!goal)
 		goal = sbi->s_inode_goal;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cecf9aa..29f1af7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4661,8 +4661,11 @@
 	if (error)
 		return error;
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
 		handle_t *handle;
@@ -4725,6 +4728,14 @@
 				error = ext4_orphan_add(handle, inode);
 				orphan = 1;
 			}
+			/*
+			 * Update c/mtime on truncate up, ext4_truncate() will
+			 * update c/mtime in shrink case below
+			 */
+			if (!shrink) {
+				inode->i_mtime = ext4_current_time(inode);
+				inode->i_ctime = inode->i_mtime;
+			}
 			down_write(&EXT4_I(inode)->i_data_sem);
 			EXT4_I(inode)->i_disksize = attr->ia_size;
 			rc = ext4_mark_inode_dirty(handle, inode);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 8313ca3..6eb1a61 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -69,6 +69,7 @@
 			  ext4_fsblk_t mmp_block)
 {
 	struct mmp_struct *mmp;
+	int ret;
 
 	if (*bh)
 		clear_buffer_uptodate(*bh);
@@ -76,33 +77,36 @@
 	/* This would be sb_bread(sb, mmp_block), except we need to be sure
 	 * that the MD RAID device cache has been bypassed, and that the read
 	 * is not blocked in the elevator. */
-	if (!*bh)
+	if (!*bh) {
 		*bh = sb_getblk(sb, mmp_block);
-	if (!*bh)
-		return -ENOMEM;
-	if (*bh) {
-		get_bh(*bh);
-		lock_buffer(*bh);
-		(*bh)->b_end_io = end_buffer_read_sync;
-		submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
-		wait_on_buffer(*bh);
-		if (!buffer_uptodate(*bh)) {
-			brelse(*bh);
-			*bh = NULL;
+		if (!*bh) {
+			ret = -ENOMEM;
+			goto warn_exit;
 		}
 	}
-	if (unlikely(!*bh)) {
-		ext4_warning(sb, "Error while reading MMP block %llu",
-			     mmp_block);
-		return -EIO;
+
+	get_bh(*bh);
+	lock_buffer(*bh);
+	(*bh)->b_end_io = end_buffer_read_sync;
+	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
+	wait_on_buffer(*bh);
+	if (!buffer_uptodate(*bh)) {
+		brelse(*bh);
+		*bh = NULL;
+		ret = -EIO;
+		goto warn_exit;
 	}
 
 	mmp = (struct mmp_struct *)((*bh)->b_data);
-	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
-	    !ext4_mmp_csum_verify(sb, mmp))
-		return -EINVAL;
+	if (le32_to_cpu(mmp->mmp_magic) == EXT4_MMP_MAGIC &&
+	    ext4_mmp_csum_verify(sb, mmp))
+		return 0;
+	ret = -EINVAL;
 
-	return 0;
+warn_exit:
+	ext4_warning(sb, "Error %d while reading MMP block %llu",
+		     ret, mmp_block);
+	return ret;
 }
 
 /*
@@ -111,7 +115,7 @@
 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
 		    const char *function, unsigned int line, const char *msg)
 {
-	__ext4_warning(sb, function, line, msg);
+	__ext4_warning(sb, function, line, "%s", msg);
 	__ext4_warning(sb, function, line,
 		       "MMP failure info: last update time: %llu, last update "
 		       "node: %s, last update device: %s\n",
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 011dcfb..9f61e76 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2436,7 +2436,9 @@
 	struct inode *inode;
 	int err, credits, retries = 0;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2470,7 +2472,9 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2499,7 +2503,9 @@
 	struct inode *inode;
 	int err, retries = 0;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 retry:
 	inode = ext4_new_inode_start_handle(dir, mode,
@@ -2612,7 +2618,9 @@
 	if (EXT4_DIR_LINK_MAX(dir))
 		return -EMLINK;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2910,8 +2918,12 @@
 
 	/* Initialize quotas before so that eventual writes go in
 	 * separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(d_inode(dentry));
+	if (retval)
+		return retval;
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -2980,8 +2992,12 @@
 	trace_ext4_unlink_enter(dir, dentry);
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(d_inode(dentry));
+	if (retval)
+		return retval;
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -3066,7 +3082,9 @@
 		goto err_free_sd;
 	}
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto err_free_sd;
 
 	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
 		/*
@@ -3197,7 +3215,9 @@
 	if (ext4_encrypted_inode(dir) &&
 	    !ext4_is_child_context_consistent_with_parent(dir, inode))
 		return -EPERM;
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 retry:
 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -3476,13 +3496,20 @@
 	int credits;
 	u8 old_file_type;
 
-	dquot_initialize(old.dir);
-	dquot_initialize(new.dir);
+	retval = dquot_initialize(old.dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new.dir);
+	if (retval)
+		return retval;
 
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	if (new.inode)
-		dquot_initialize(new.inode);
+	if (new.inode) {
+		retval = dquot_initialize(new.inode);
+		if (retval)
+			return retval;
+	}
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
 	if (IS_ERR(old.bh))
@@ -3678,8 +3705,12 @@
 							   new.inode)))
 		return -EPERM;
 
-	dquot_initialize(old.dir);
-	dquot_initialize(new.dir);
+	retval = dquot_initialize(old.dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new.dir);
+	if (retval)
+		return retval;
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
 				 &old.de, &old.inlined);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 8a9d63a..84ba4d2 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -354,8 +354,10 @@
 	struct bio *bio = io->io_bio;
 
 	if (bio) {
+		int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
+			    WRITE_SYNC : WRITE;
 		bio_get(io->io_bio);
-		submit_bio(io->io_op, io->io_bio);
+		submit_bio(io_op, io->io_bio);
 		bio_put(io->io_bio);
 	}
 	io->io_bio = NULL;
@@ -364,7 +366,7 @@
 void ext4_io_submit_init(struct ext4_io_submit *io,
 			 struct writeback_control *wbc)
 {
-	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
+	io->io_wbc = wbc;
 	io->io_bio = NULL;
 	io->io_end = NULL;
 }
@@ -377,6 +379,7 @@
 	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 	if (!bio)
 		return -ENOMEM;
+	wbc_init_bio(io->io_wbc, bio);
 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
 	bio->bi_end_io = ext4_end_bio;
@@ -405,6 +408,7 @@
 	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 	if (ret != bh->b_size)
 		goto submit_and_retry;
+	wbc_account_io(io->io_wbc, page, bh->b_size);
 	io->io_next_block++;
 	return 0;
 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 58987b5..a63c7b0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -60,6 +60,7 @@
 static struct mutex ext4_li_mtx;
 static struct ext4_features *ext4_feat;
 static int ext4_mballoc_ready;
+static struct ratelimit_state ext4_mount_msg_ratelimit;
 
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
 			     unsigned long journal_devnum);
@@ -84,7 +85,7 @@
 static void ext4_clear_request_list(void);
 static int ext4_reserve_clusters(struct ext4_sb_info *, ext4_fsblk_t);
 
-#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
 static struct file_system_type ext2_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext2",
@@ -100,7 +101,6 @@
 #endif
 
 
-#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext3",
@@ -111,9 +111,6 @@
 MODULE_ALIAS_FS("ext3");
 MODULE_ALIAS("ext3");
 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
-#else
-#define IS_EXT3_SB(sb) (0)
-#endif
 
 static int ext4_verify_csum_type(struct super_block *sb,
 				 struct ext4_super_block *es)
@@ -325,6 +322,22 @@
 	ext4_commit_super(sb, 1);
 }
 
+/*
+ * The del_gendisk() function uninitializes the disk-specific data
+ * structures, including the bdi structure, without telling anyone
+ * else.  Once this happens, any attempt to call mark_buffer_dirty()
+ * (for example, by ext4_commit_super), will cause a kernel OOPS.
+ * This is a kludge to prevent these oops until we can put in a proper
+ * hook in del_gendisk() to inform the VFS and file system layers.
+ */
+static int block_device_ejected(struct super_block *sb)
+{
+	struct inode *bd_inode = sb->s_bdev->bd_inode;
+	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
+
+	return bdi->dev == NULL;
+}
+
 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
 {
 	struct super_block		*sb = journal->j_private;
@@ -1394,9 +1407,9 @@
 	{Opt_stripe, 0, MOPT_GTE0},
 	{Opt_resuid, 0, MOPT_GTE0},
 	{Opt_resgid, 0, MOPT_GTE0},
-	{Opt_journal_dev, 0, MOPT_GTE0},
-	{Opt_journal_path, 0, MOPT_STRING},
-	{Opt_journal_ioprio, 0, MOPT_GTE0},
+	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
+	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
+	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
 	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
@@ -1763,10 +1776,10 @@
 	}
 
 	if (sbi->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
+		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
 
 	if (sbi->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
+		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
 #endif
 }
 
@@ -3643,6 +3656,8 @@
 		}
 		if (test_opt(sb, DELALLOC))
 			clear_opt(sb, DELALLOC);
+	} else {
+		sb->s_iflags |= SB_I_CGROUPWB;
 	}
 
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -4275,9 +4290,10 @@
 				 "the device does not support discard");
 	}
 
-	ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-		 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
-		 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
+	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
+		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
+			 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
 	if (es->s_error_count)
 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -4617,7 +4633,7 @@
 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
 	int error = 0;
 
-	if (!sbh)
+	if (!sbh || block_device_ejected(sb))
 		return error;
 	if (buffer_write_io_error(sbh)) {
 		/*
@@ -4665,7 +4681,8 @@
 	ext4_superblock_csum_set(sb);
 	mark_buffer_dirty(sbh);
 	if (sync) {
-		error = sync_dirty_buffer(sbh);
+		error = __sync_dirty_buffer(sbh,
+			test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
 		if (error)
 			return error;
 
@@ -4833,10 +4850,11 @@
 		error = jbd2_journal_flush(journal);
 		if (error < 0)
 			goto out;
+
+		/* Journal blocked and flushed, clear needs_recovery flag. */
+		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	}
 
-	/* Journal blocked and flushed, clear needs_recovery flag. */
-	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	error = ext4_commit_super(sb, 1);
 out:
 	if (journal)
@@ -4854,8 +4872,11 @@
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
-	/* Reset the needs_recovery flag before the fs is unlocked. */
-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	if (EXT4_SB(sb)->s_journal) {
+		/* Reset the needs_recovery flag before the fs is unlocked. */
+		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	}
+
 	ext4_commit_super(sb, 1);
 	return 0;
 }
@@ -5500,7 +5521,7 @@
 	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
 }
 
-#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
 static inline void register_as_ext2(void)
 {
 	int err = register_filesystem(&ext2_fs_type);
@@ -5530,7 +5551,6 @@
 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
 #endif
 
-#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static inline void register_as_ext3(void)
 {
 	int err = register_filesystem(&ext3_fs_type);
@@ -5556,11 +5576,6 @@
 		return 0;
 	return 1;
 }
-#else
-static inline void register_as_ext3(void) { }
-static inline void unregister_as_ext3(void) { }
-static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; }
-#endif
 
 static struct file_system_type ext4_fs_type = {
 	.owner		= THIS_MODULE,
@@ -5610,6 +5625,7 @@
 {
 	int i, err;
 
+	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
 	ext4_li_info = NULL;
 	mutex_init(&ext4_li_mtx);
 
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index c629762..b0a9dc9 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -45,7 +45,7 @@
 	default y
 	help
 	  Posix Access Control Lists (ACLs) support permissions for users and
-	  gourps beyond the owner/group/world scheme.
+	  groups beyond the owner/group/world scheme.
 
 	  To learn more about Access Control Lists, visit the POSIX ACLs for
 	  Linux website <http://acl.bestbits.at/>.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 396be1a..08e101e 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -2,6 +2,7 @@
 
 f2fs-y		:= dir.o file.o inode.o namei.o hash.o super.o inline.o
 f2fs-y		+= checkpoint.o gc.o data.o node.o segment.o recovery.o
+f2fs-y		+= shrinker.o extent_cache.o
 f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
 f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
 f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b70bbe1..c5a38e3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -69,14 +69,24 @@
 
 	fio.page = page;
 
-	if (f2fs_submit_page_bio(&fio))
+	if (f2fs_submit_page_bio(&fio)) {
+		f2fs_put_page(page, 1);
 		goto repeat;
+	}
 
 	lock_page(page);
 	if (unlikely(page->mapping != mapping)) {
 		f2fs_put_page(page, 1);
 		goto repeat;
 	}
+
+	/*
+	 * if there is any IO error when accessing device, make our filesystem
+	 * readonly and make sure do not write checkpoint with non-uptodate
+	 * meta page.
+	 */
+	if (unlikely(!PageUptodate(page)))
+		f2fs_stop_checkpoint(sbi);
 out:
 	return page;
 }
@@ -326,26 +336,18 @@
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
 	struct inode_management *im = &sbi->im[type];
-	struct ino_entry *e;
+	struct ino_entry *e, *tmp;
+
+	tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
 retry:
-	if (radix_tree_preload(GFP_NOFS)) {
-		cond_resched();
-		goto retry;
-	}
+	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
 
 	spin_lock(&im->ino_lock);
-
 	e = radix_tree_lookup(&im->ino_root, ino);
 	if (!e) {
-		e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
-		if (!e) {
-			spin_unlock(&im->ino_lock);
-			radix_tree_preload_end();
-			goto retry;
-		}
+		e = tmp;
 		if (radix_tree_insert(&im->ino_root, ino, e)) {
 			spin_unlock(&im->ino_lock);
-			kmem_cache_free(ino_entry_slab, e);
 			radix_tree_preload_end();
 			goto retry;
 		}
@@ -358,6 +360,9 @@
 	}
 	spin_unlock(&im->ino_lock);
 	radix_tree_preload_end();
+
+	if (e != tmp)
+		kmem_cache_free(ino_entry_slab, tmp);
 }
 
 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -458,24 +463,34 @@
 	__remove_ino_entry(sbi, ino, ORPHAN_INO);
 }
 
-static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-	struct inode *inode = f2fs_iget(sbi->sb, ino);
-	f2fs_bug_on(sbi, IS_ERR(inode));
+	struct inode *inode;
+
+	inode = f2fs_iget(sbi->sb, ino);
+	if (IS_ERR(inode)) {
+		/*
+		 * there should be a bug that we can't find the entry
+		 * to orphan inode.
+		 */
+		f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
+		return PTR_ERR(inode);
+	}
+
 	clear_nlink(inode);
 
 	/* truncate all the data during iput */
 	iput(inode);
+	return 0;
 }
 
-void recover_orphan_inodes(struct f2fs_sb_info *sbi)
+int recover_orphan_inodes(struct f2fs_sb_info *sbi)
 {
 	block_t start_blk, orphan_blocks, i, j;
+	int err;
 
 	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
-		return;
-
-	set_sbi_flag(sbi, SBI_POR_DOING);
+		return 0;
 
 	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
 	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
@@ -489,14 +504,17 @@
 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
 		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
 			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
-			recover_orphan_inode(sbi, ino);
+			err = recover_orphan_inode(sbi, ino);
+			if (err) {
+				f2fs_put_page(page, 1);
+				return err;
+			}
 		}
 		f2fs_put_page(page, 1);
 	}
 	/* clear Orphan Flag */
 	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-	clear_sbi_flag(sbi, SBI_POR_DOING);
-	return;
+	return 0;
 }
 
 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
@@ -504,7 +522,7 @@
 	struct list_head *head;
 	struct f2fs_orphan_block *orphan_blk = NULL;
 	unsigned int nentries = 0;
-	unsigned short index;
+	unsigned short index = 1;
 	unsigned short orphan_blocks;
 	struct page *page = NULL;
 	struct ino_entry *orphan = NULL;
@@ -512,11 +530,6 @@
 
 	orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
 
-	for (index = 0; index < orphan_blocks; index++)
-		grab_meta_page(sbi, start_blk + index);
-
-	index = 1;
-
 	/*
 	 * we don't need to do spin_lock(&im->ino_lock) here, since all the
 	 * orphan inode operations are covered under f2fs_lock_op().
@@ -527,12 +540,10 @@
 	/* loop for each orphan inode entry and write them in Jornal block */
 	list_for_each_entry(orphan, head, list) {
 		if (!page) {
-			page = find_get_page(META_MAPPING(sbi), start_blk++);
-			f2fs_bug_on(sbi, !page);
+			page = grab_meta_page(sbi, start_blk++);
 			orphan_blk =
 				(struct f2fs_orphan_block *)page_address(page);
 			memset(orphan_blk, 0, sizeof(*orphan_blk));
-			f2fs_put_page(page, 0);
 		}
 
 		orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
@@ -704,7 +715,8 @@
 	struct inode_entry *new;
 	int ret = 0;
 
-	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+			!S_ISLNK(inode->i_mode))
 		return;
 
 	if (!S_ISDIR(inode->i_mode)) {
@@ -892,12 +904,15 @@
 	__u32 crc32 = 0;
 	int i;
 	int cp_payload_blks = __cp_payload(sbi);
+	block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
+	bool invalidate = false;
 
 	/*
 	 * This avoids to conduct wrong roll-forward operations and uses
 	 * metapages, so should be called prior to sync_meta_pages below.
 	 */
-	discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
+	if (discard_next_dnode(sbi, discard_blk))
+		invalidate = true;
 
 	/* Flush all the NAT/SIT pages */
 	while (get_pages(sbi, F2FS_DIRTY_META)) {
@@ -1026,6 +1041,14 @@
 	/* wait for previous submitted meta pages writeback */
 	wait_on_all_pages_writeback(sbi);
 
+	/*
+	 * invalidate meta page which is used temporarily for zeroing out
+	 * block at the end of warm node chain.
+	 */
+	if (invalidate)
+		invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
+								discard_blk);
+
 	release_dirty_inode(sbi);
 
 	if (unlikely(f2fs_cp_error(sbi)))
diff --git a/fs/f2fs/crypto_key.c b/fs/f2fs/crypto_key.c
index 95b8f93..9f77de2 100644
--- a/fs/f2fs/crypto_key.c
+++ b/fs/f2fs/crypto_key.c
@@ -92,8 +92,7 @@
 	if (!ci)
 		return;
 
-	if (ci->ci_keyring_key)
-		key_put(ci->ci_keyring_key);
+	key_put(ci->ci_keyring_key);
 	crypto_free_ablkcipher(ci->ci_ctfm);
 	kmem_cache_free(f2fs_crypt_info_cachep, ci);
 }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c414d49..a82abe9 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -14,6 +14,7 @@
 #include <linux/mpage.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
+#include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/prefetch.h>
@@ -26,9 +27,6 @@
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
-static struct kmem_cache *extent_tree_slab;
-static struct kmem_cache *extent_node_slab;
-
 static void f2fs_read_end_io(struct bio *bio)
 {
 	struct bio_vec *bvec;
@@ -92,8 +90,7 @@
 {
 	struct bio *bio;
 
-	/* No failure on bio allocation */
-	bio = bio_alloc(GFP_NOIO, npages);
+	bio = f2fs_bio_alloc(npages);
 
 	bio->bi_bdev = sbi->sb->s_bdev;
 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
@@ -158,7 +155,6 @@
 
 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 		bio_put(bio);
-		f2fs_put_page(page, 1);
 		return -EFAULT;
 	}
 
@@ -266,645 +262,17 @@
 	return err;
 }
 
-static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
+int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
 {
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	pgoff_t start_fofs, end_fofs;
-	block_t start_blkaddr;
-
-	read_lock(&fi->ext_lock);
-	if (fi->ext.len == 0) {
-		read_unlock(&fi->ext_lock);
-		return false;
-	}
-
-	stat_inc_total_hit(inode->i_sb);
-
-	start_fofs = fi->ext.fofs;
-	end_fofs = fi->ext.fofs + fi->ext.len - 1;
-	start_blkaddr = fi->ext.blk;
-
-	if (pgofs >= start_fofs && pgofs <= end_fofs) {
-		*ei = fi->ext;
-		stat_inc_read_hit(inode->i_sb);
-		read_unlock(&fi->ext_lock);
-		return true;
-	}
-	read_unlock(&fi->ext_lock);
-	return false;
-}
-
-static bool update_extent_info(struct inode *inode, pgoff_t fofs,
-								block_t blkaddr)
-{
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	pgoff_t start_fofs, end_fofs;
-	block_t start_blkaddr, end_blkaddr;
-	int need_update = true;
-
-	write_lock(&fi->ext_lock);
-
-	start_fofs = fi->ext.fofs;
-	end_fofs = fi->ext.fofs + fi->ext.len - 1;
-	start_blkaddr = fi->ext.blk;
-	end_blkaddr = fi->ext.blk + fi->ext.len - 1;
-
-	/* Drop and initialize the matched extent */
-	if (fi->ext.len == 1 && fofs == start_fofs)
-		fi->ext.len = 0;
-
-	/* Initial extent */
-	if (fi->ext.len == 0) {
-		if (blkaddr != NULL_ADDR) {
-			fi->ext.fofs = fofs;
-			fi->ext.blk = blkaddr;
-			fi->ext.len = 1;
-		}
-		goto end_update;
-	}
-
-	/* Front merge */
-	if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
-		fi->ext.fofs--;
-		fi->ext.blk--;
-		fi->ext.len++;
-		goto end_update;
-	}
-
-	/* Back merge */
-	if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
-		fi->ext.len++;
-		goto end_update;
-	}
-
-	/* Split the existing extent */
-	if (fi->ext.len > 1 &&
-		fofs >= start_fofs && fofs <= end_fofs) {
-		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
-			fi->ext.len = fofs - start_fofs;
-		} else {
-			fi->ext.fofs = fofs + 1;
-			fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
-			fi->ext.len -= fofs - start_fofs + 1;
-		}
-	} else {
-		need_update = false;
-	}
-
-	/* Finally, if the extent is very fragmented, let's drop the cache. */
-	if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
-		fi->ext.len = 0;
-		set_inode_flag(fi, FI_NO_EXTENT);
-		need_update = true;
-	}
-end_update:
-	write_unlock(&fi->ext_lock);
-	return need_update;
-}
-
-static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_info *ei,
-				struct rb_node *parent, struct rb_node **p)
-{
-	struct extent_node *en;
-
-	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
-	if (!en)
-		return NULL;
-
-	en->ei = *ei;
-	INIT_LIST_HEAD(&en->list);
-
-	rb_link_node(&en->rb_node, parent, p);
-	rb_insert_color(&en->rb_node, &et->root);
-	et->count++;
-	atomic_inc(&sbi->total_ext_node);
-	return en;
-}
-
-static void __detach_extent_node(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	rb_erase(&en->rb_node, &et->root);
-	et->count--;
-	atomic_dec(&sbi->total_ext_node);
-
-	if (et->cached_en == en)
-		et->cached_en = NULL;
-}
-
-static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
-							nid_t ino)
-{
-	struct extent_tree *et;
-
-	down_read(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
-	if (!et) {
-		up_read(&sbi->extent_tree_lock);
-		return NULL;
-	}
-	atomic_inc(&et->refcount);
-	up_read(&sbi->extent_tree_lock);
-
-	return et;
-}
-
-static struct extent_tree *__grab_extent_tree(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	nid_t ino = inode->i_ino;
-
-	down_write(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
-	if (!et) {
-		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
-		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
-		memset(et, 0, sizeof(struct extent_tree));
-		et->ino = ino;
-		et->root = RB_ROOT;
-		et->cached_en = NULL;
-		rwlock_init(&et->lock);
-		atomic_set(&et->refcount, 0);
-		et->count = 0;
-		sbi->total_ext_tree++;
-	}
-	atomic_inc(&et->refcount);
-	up_write(&sbi->extent_tree_lock);
-
-	return et;
-}
-
-static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
-							unsigned int fofs)
-{
-	struct rb_node *node = et->root.rb_node;
-	struct extent_node *en;
-
-	if (et->cached_en) {
-		struct extent_info *cei = &et->cached_en->ei;
-
-		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
-			return et->cached_en;
-	}
-
-	while (node) {
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (fofs < en->ei.fofs) {
-			node = node->rb_left;
-		} else if (fofs >= en->ei.fofs + en->ei.len) {
-			node = node->rb_right;
-		} else {
-			et->cached_en = en;
-			return en;
-		}
-	}
-	return NULL;
-}
-
-static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	struct extent_node *prev;
-	struct rb_node *node;
-
-	node = rb_prev(&en->rb_node);
-	if (!node)
-		return NULL;
-
-	prev = rb_entry(node, struct extent_node, rb_node);
-	if (__is_back_mergeable(&en->ei, &prev->ei)) {
-		en->ei.fofs = prev->ei.fofs;
-		en->ei.blk = prev->ei.blk;
-		en->ei.len += prev->ei.len;
-		__detach_extent_node(sbi, et, prev);
-		return prev;
-	}
-	return NULL;
-}
-
-static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	struct extent_node *next;
-	struct rb_node *node;
-
-	node = rb_next(&en->rb_node);
-	if (!node)
-		return NULL;
-
-	next = rb_entry(node, struct extent_node, rb_node);
-	if (__is_front_mergeable(&en->ei, &next->ei)) {
-		en->ei.len += next->ei.len;
-		__detach_extent_node(sbi, et, next);
-		return next;
-	}
-	return NULL;
-}
-
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_info *ei,
-				struct extent_node **den)
-{
-	struct rb_node **p = &et->root.rb_node;
-	struct rb_node *parent = NULL;
-	struct extent_node *en;
-
-	while (*p) {
-		parent = *p;
-		en = rb_entry(parent, struct extent_node, rb_node);
-
-		if (ei->fofs < en->ei.fofs) {
-			if (__is_front_mergeable(ei, &en->ei)) {
-				f2fs_bug_on(sbi, !den);
-				en->ei.fofs = ei->fofs;
-				en->ei.blk = ei->blk;
-				en->ei.len += ei->len;
-				*den = __try_back_merge(sbi, et, en);
-				return en;
-			}
-			p = &(*p)->rb_left;
-		} else if (ei->fofs >= en->ei.fofs + en->ei.len) {
-			if (__is_back_mergeable(ei, &en->ei)) {
-				f2fs_bug_on(sbi, !den);
-				en->ei.len += ei->len;
-				*den = __try_front_merge(sbi, et, en);
-				return en;
-			}
-			p = &(*p)->rb_right;
-		} else {
-			f2fs_bug_on(sbi, 1);
-		}
-	}
-
-	return __attach_extent_node(sbi, et, ei, parent, p);
-}
-
-static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
-					struct extent_tree *et, bool free_all)
-{
-	struct rb_node *node, *next;
-	struct extent_node *en;
-	unsigned int count = et->count;
-
-	node = rb_first(&et->root);
-	while (node) {
-		next = rb_next(node);
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (free_all) {
-			spin_lock(&sbi->extent_lock);
-			if (!list_empty(&en->list))
-				list_del_init(&en->list);
-			spin_unlock(&sbi->extent_lock);
-		}
-
-		if (free_all || list_empty(&en->list)) {
-			__detach_extent_node(sbi, et, en);
-			kmem_cache_free(extent_node_slab, en);
-		}
-		node = next;
-	}
-
-	return count - et->count;
-}
-
-static void f2fs_init_extent_tree(struct inode *inode,
-						struct f2fs_extent *i_ext)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en;
 	struct extent_info ei;
+	struct inode *inode = dn->inode;
 
-	if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
-		return;
-
-	et = __grab_extent_tree(inode);
-
-	write_lock(&et->lock);
-	if (et->count)
-		goto out;
-
-	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
-		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
-
-	en = __insert_extent_tree(sbi, et, &ei, NULL);
-	if (en) {
-		et->cached_en = en;
-
-		spin_lock(&sbi->extent_lock);
-		list_add_tail(&en->list, &sbi->extent_list);
-		spin_unlock(&sbi->extent_lock);
-	}
-out:
-	write_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-}
-
-static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en;
-
-	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
-
-	et = __find_extent_tree(sbi, inode->i_ino);
-	if (!et)
-		return false;
-
-	read_lock(&et->lock);
-	en = __lookup_extent_tree(et, pgofs);
-	if (en) {
-		*ei = en->ei;
-		spin_lock(&sbi->extent_lock);
-		if (!list_empty(&en->list))
-			list_move_tail(&en->list, &sbi->extent_list);
-		spin_unlock(&sbi->extent_lock);
-		stat_inc_read_hit(sbi->sb);
-	}
-	stat_inc_total_hit(sbi->sb);
-	read_unlock(&et->lock);
-
-	trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
-
-	atomic_dec(&et->refcount);
-	return en ? true : false;
-}
-
-static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
-							block_t blkaddr)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
-	struct extent_node *den = NULL;
-	struct extent_info ei, dei;
-	unsigned int endofs;
-
-	trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
-
-	et = __grab_extent_tree(inode);
-
-	write_lock(&et->lock);
-
-	/* 1. lookup and remove existing extent info in cache */
-	en = __lookup_extent_tree(et, fofs);
-	if (!en)
-		goto update_extent;
-
-	dei = en->ei;
-	__detach_extent_node(sbi, et, en);
-
-	/* 2. if extent can be split more, split and insert the left part */
-	if (dei.len > 1) {
-		/*  insert left part of split extent into cache */
-		if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
-			set_extent_info(&ei, dei.fofs, dei.blk,
-							fofs - dei.fofs);
-			en1 = __insert_extent_tree(sbi, et, &ei, NULL);
-		}
-
-		/* insert right part of split extent into cache */
-		endofs = dei.fofs + dei.len - 1;
-		if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
-			set_extent_info(&ei, fofs + 1,
-				fofs - dei.fofs + dei.blk, endofs - fofs);
-			en2 = __insert_extent_tree(sbi, et, &ei, NULL);
-		}
+	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+		dn->data_blkaddr = ei.blk + index - ei.fofs;
+		return 0;
 	}
 
-update_extent:
-	/* 3. update extent in extent cache */
-	if (blkaddr) {
-		set_extent_info(&ei, fofs, blkaddr, 1);
-		en3 = __insert_extent_tree(sbi, et, &ei, &den);
-	}
-
-	/* 4. update in global extent list */
-	spin_lock(&sbi->extent_lock);
-	if (en && !list_empty(&en->list))
-		list_del(&en->list);
-	/*
-	 * en1 and en2 split from en, they will become more and more smaller
-	 * fragments after splitting several times. So if the length is smaller
-	 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
-	 */
-	if (en1)
-		list_add_tail(&en1->list, &sbi->extent_list);
-	if (en2)
-		list_add_tail(&en2->list, &sbi->extent_list);
-	if (en3) {
-		if (list_empty(&en3->list))
-			list_add_tail(&en3->list, &sbi->extent_list);
-		else
-			list_move_tail(&en3->list, &sbi->extent_list);
-	}
-	if (den && !list_empty(&den->list))
-		list_del(&den->list);
-	spin_unlock(&sbi->extent_lock);
-
-	/* 5. release extent node */
-	if (en)
-		kmem_cache_free(extent_node_slab, en);
-	if (den)
-		kmem_cache_free(extent_node_slab, den);
-
-	write_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-}
-
-void f2fs_preserve_extent_tree(struct inode *inode)
-{
-	struct extent_tree *et;
-	struct extent_info *ext = &F2FS_I(inode)->ext;
-	bool sync = false;
-
-	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		return;
-
-	et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
-	if (!et) {
-		if (ext->len) {
-			ext->len = 0;
-			update_inode_page(inode);
-		}
-		return;
-	}
-
-	read_lock(&et->lock);
-	if (et->count) {
-		struct extent_node *en;
-
-		if (et->cached_en) {
-			en = et->cached_en;
-		} else {
-			struct rb_node *node = rb_first(&et->root);
-
-			if (!node)
-				node = rb_last(&et->root);
-			en = rb_entry(node, struct extent_node, rb_node);
-		}
-
-		if (__is_extent_same(ext, &en->ei))
-			goto out;
-
-		*ext = en->ei;
-		sync = true;
-	} else if (ext->len) {
-		ext->len = 0;
-		sync = true;
-	}
-out:
-	read_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-
-	if (sync)
-		update_inode_page(inode);
-}
-
-void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
-{
-	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
-	struct extent_node *en, *tmp;
-	unsigned long ino = F2FS_ROOT_INO(sbi);
-	struct radix_tree_iter iter;
-	void **slot;
-	unsigned int found;
-	unsigned int node_cnt = 0, tree_cnt = 0;
-
-	if (!test_opt(sbi, EXTENT_CACHE))
-		return;
-
-	if (available_free_memory(sbi, EXTENT_CACHE))
-		return;
-
-	spin_lock(&sbi->extent_lock);
-	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
-		if (!nr_shrink--)
-			break;
-		list_del_init(&en->list);
-	}
-	spin_unlock(&sbi->extent_lock);
-
-	down_read(&sbi->extent_tree_lock);
-	while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
-				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
-		unsigned i;
-
-		ino = treevec[found - 1]->ino + 1;
-		for (i = 0; i < found; i++) {
-			struct extent_tree *et = treevec[i];
-
-			atomic_inc(&et->refcount);
-			write_lock(&et->lock);
-			node_cnt += __free_extent_tree(sbi, et, false);
-			write_unlock(&et->lock);
-			atomic_dec(&et->refcount);
-		}
-	}
-	up_read(&sbi->extent_tree_lock);
-
-	down_write(&sbi->extent_tree_lock);
-	radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
-							F2FS_ROOT_INO(sbi)) {
-		struct extent_tree *et = (struct extent_tree *)*slot;
-
-		if (!atomic_read(&et->refcount) && !et->count) {
-			radix_tree_delete(&sbi->extent_tree_root, et->ino);
-			kmem_cache_free(extent_tree_slab, et);
-			sbi->total_ext_tree--;
-			tree_cnt++;
-		}
-	}
-	up_write(&sbi->extent_tree_lock);
-
-	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
-}
-
-void f2fs_destroy_extent_tree(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	unsigned int node_cnt = 0;
-
-	if (!test_opt(sbi, EXTENT_CACHE))
-		return;
-
-	et = __find_extent_tree(sbi, inode->i_ino);
-	if (!et)
-		goto out;
-
-	/* free all extent info belong to this extent tree */
-	write_lock(&et->lock);
-	node_cnt = __free_extent_tree(sbi, et, true);
-	write_unlock(&et->lock);
-
-	atomic_dec(&et->refcount);
-
-	/* try to find and delete extent tree entry in radix tree */
-	down_write(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
-	if (!et) {
-		up_write(&sbi->extent_tree_lock);
-		goto out;
-	}
-	f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
-	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
-	kmem_cache_free(extent_tree_slab, et);
-	sbi->total_ext_tree--;
-	up_write(&sbi->extent_tree_lock);
-out:
-	trace_f2fs_destroy_extent_tree(inode, node_cnt);
-	return;
-}
-
-void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
-{
-	if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		f2fs_init_extent_tree(inode, i_ext);
-
-	write_lock(&F2FS_I(inode)->ext_lock);
-	get_extent_info(&F2FS_I(inode)->ext, *i_ext);
-	write_unlock(&F2FS_I(inode)->ext_lock);
-}
-
-static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
-{
-	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
-		return false;
-
-	if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		return f2fs_lookup_extent_tree(inode, pgofs, ei);
-
-	return lookup_extent_info(inode, pgofs, ei);
-}
-
-void f2fs_update_extent_cache(struct dnode_of_data *dn)
-{
-	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
-	pgoff_t fofs;
-
-	f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-	if (is_inode_flag_set(fi, FI_NO_EXTENT))
-		return;
-
-	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
-							dn->ofs_in_node;
-
-	if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
-		return f2fs_update_extent_tree(dn->inode, fofs,
-							dn->data_blkaddr);
-
-	if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
-		sync_inode_page(dn);
+	return f2fs_reserve_block(dn, index);
 }
 
 struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
@@ -935,15 +303,13 @@
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-	if (err) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(err);
-	}
+	if (err)
+		goto put_err;
 	f2fs_put_dnode(&dn);
 
 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(-ENOENT);
+		err = -ENOENT;
+		goto put_err;
 	}
 got_it:
 	if (PageUptodate(page)) {
@@ -968,8 +334,12 @@
 	fio.page = page;
 	err = f2fs_submit_page_bio(&fio);
 	if (err)
-		return ERR_PTR(err);
+		goto put_err;
 	return page;
+
+put_err:
+	f2fs_put_page(page, 1);
+	return ERR_PTR(err);
 }
 
 struct page *find_data_page(struct inode *inode, pgoff_t index)
@@ -1030,7 +400,8 @@
  *
  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir.
+ * Note that, ipage is set only by make_empty_dir, and if any error occur,
+ * ipage should be released by this function.
  */
 struct page *get_new_data_page(struct inode *inode,
 		struct page *ipage, pgoff_t index, bool new_i_size)
@@ -1041,8 +412,14 @@
 	int err;
 repeat:
 	page = grab_cache_page(mapping, index);
-	if (!page)
+	if (!page) {
+		/*
+		 * before exiting, we should make sure ipage will be released
+		 * if any error occur.
+		 */
+		f2fs_put_page(ipage, 1);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	set_new_dnode(&dn, inode, ipage, NULL, 0);
 	err = f2fs_reserve_block(&dn, index);
@@ -1107,8 +484,6 @@
 
 	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
 								&sum, seg);
-
-	/* direct IO doesn't use extent cache to maximize the performance */
 	set_data_blkaddr(dn);
 
 	/* update i_size */
@@ -1117,6 +492,9 @@
 	if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
 		i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
 
+	/* direct IO doesn't use extent cache to maximize the performance */
+	f2fs_drop_largest_extent(dn->inode, fofs);
+
 	return 0;
 }
 
@@ -1183,7 +561,7 @@
  *     c. give the block addresses to blockdev
  */
 static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
-			int create, bool fiemap)
+						int create, int flag)
 {
 	unsigned int maxblocks = map->m_len;
 	struct dnode_of_data dn;
@@ -1217,8 +595,19 @@
 			err = 0;
 		goto unlock_out;
 	}
-	if (dn.data_blkaddr == NEW_ADDR && !fiemap)
-		goto put_out;
+	if (dn.data_blkaddr == NEW_ADDR) {
+		if (flag == F2FS_GET_BLOCK_BMAP) {
+			err = -ENOENT;
+			goto put_out;
+		} else if (flag == F2FS_GET_BLOCK_READ ||
+				flag == F2FS_GET_BLOCK_DIO) {
+			goto put_out;
+		}
+		/*
+		 * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
+		 * mark it as mapped and unwritten block.
+		 */
+	}
 
 	if (dn.data_blkaddr != NULL_ADDR) {
 		map->m_flags = F2FS_MAP_MAPPED;
@@ -1233,6 +622,8 @@
 		map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
 		map->m_pblk = dn.data_blkaddr;
 	} else {
+		if (flag == F2FS_GET_BLOCK_BMAP)
+			err = -ENOENT;
 		goto put_out;
 	}
 
@@ -1255,7 +646,9 @@
 				err = 0;
 			goto unlock_out;
 		}
-		if (dn.data_blkaddr == NEW_ADDR && !fiemap)
+
+		if (dn.data_blkaddr == NEW_ADDR &&
+				flag != F2FS_GET_BLOCK_FIEMAP)
 			goto put_out;
 
 		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -1297,7 +690,7 @@
 }
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh, int create, bool fiemap)
+			struct buffer_head *bh, int create, int flag)
 {
 	struct f2fs_map_blocks map;
 	int ret;
@@ -1305,7 +698,7 @@
 	map.m_lblk = iblock;
 	map.m_len = bh->b_size >> inode->i_blkbits;
 
-	ret = f2fs_map_blocks(inode, &map, create, fiemap);
+	ret = f2fs_map_blocks(inode, &map, create, flag);
 	if (!ret) {
 		map_bh(bh, inode->i_sb, map.m_pblk);
 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
@@ -1315,15 +708,23 @@
 }
 
 static int get_data_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
+			struct buffer_head *bh_result, int create, int flag)
 {
-	return __get_data_block(inode, iblock, bh_result, create, false);
+	return __get_data_block(inode, iblock, bh_result, create, flag);
 }
 
-static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+static int get_data_block_dio(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh_result, int create)
 {
-	return __get_data_block(inode, iblock, bh_result, create, true);
+	return __get_data_block(inode, iblock, bh_result, create,
+						F2FS_GET_BLOCK_DIO);
+}
+
+static int get_data_block_bmap(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return __get_data_block(inode, iblock, bh_result, create,
+						F2FS_GET_BLOCK_BMAP);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1367,7 +768,8 @@
 	memset(&map_bh, 0, sizeof(struct buffer_head));
 	map_bh.b_size = len;
 
-	ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0);
+	ret = get_data_block(inode, start_blk, &map_bh, 0,
+					F2FS_GET_BLOCK_FIEMAP);
 	if (ret)
 		goto out;
 
@@ -1770,6 +1172,137 @@
 	return ret;
 }
 
+/*
+ * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * The major change is making write step of cold data page separately from
+ * warm/hot data page.
+ */
+static int f2fs_write_cache_pages(struct address_space *mapping,
+			struct writeback_control *wbc, writepage_t writepage,
+			void *data)
+{
+	int ret = 0;
+	int done = 0;
+	struct pagevec pvec;
+	int nr_pages;
+	pgoff_t uninitialized_var(writeback_index);
+	pgoff_t index;
+	pgoff_t end;		/* Inclusive */
+	pgoff_t done_index;
+	int cycled;
+	int range_whole = 0;
+	int tag;
+	int step = 0;
+
+	pagevec_init(&pvec, 0);
+next:
+	if (wbc->range_cyclic) {
+		writeback_index = mapping->writeback_index; /* prev offset */
+		index = writeback_index;
+		if (index == 0)
+			cycled = 1;
+		else
+			cycled = 0;
+		end = -1;
+	} else {
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+			range_whole = 1;
+		cycled = 1; /* ignore range_cyclic tests */
+	}
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+		tag = PAGECACHE_TAG_TOWRITE;
+	else
+		tag = PAGECACHE_TAG_DIRTY;
+retry:
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+		tag_pages_for_writeback(mapping, index, end);
+	done_index = index;
+	while (!done && (index <= end)) {
+		int i;
+
+		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+		if (nr_pages == 0)
+			break;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			if (page->index > end) {
+				done = 1;
+				break;
+			}
+
+			done_index = page->index;
+
+			lock_page(page);
+
+			if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+				unlock_page(page);
+				continue;
+			}
+
+			if (!PageDirty(page)) {
+				/* someone wrote it for us */
+				goto continue_unlock;
+			}
+
+			if (step == is_cold_data(page))
+				goto continue_unlock;
+
+			if (PageWriteback(page)) {
+				if (wbc->sync_mode != WB_SYNC_NONE)
+					f2fs_wait_on_page_writeback(page, DATA);
+				else
+					goto continue_unlock;
+			}
+
+			BUG_ON(PageWriteback(page));
+			if (!clear_page_dirty_for_io(page))
+				goto continue_unlock;
+
+			ret = (*writepage)(page, wbc, data);
+			if (unlikely(ret)) {
+				if (ret == AOP_WRITEPAGE_ACTIVATE) {
+					unlock_page(page);
+					ret = 0;
+				} else {
+					done_index = page->index + 1;
+					done = 1;
+					break;
+				}
+			}
+
+			if (--wbc->nr_to_write <= 0 &&
+			    wbc->sync_mode == WB_SYNC_NONE) {
+				done = 1;
+				break;
+			}
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	if (step < 1) {
+		step++;
+		goto next;
+	}
+
+	if (!cycled && !done) {
+		cycled = 1;
+		index = 0;
+		end = writeback_index - 1;
+		goto retry;
+	}
+	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+		mapping->writeback_index = done_index;
+
+	return ret;
+}
+
 static int f2fs_write_data_pages(struct address_space *mapping,
 			    struct writeback_control *wbc)
 {
@@ -1785,6 +1318,10 @@
 	if (!mapping->a_ops->writepage)
 		return 0;
 
+	/* skip writing if there is no dirty page in this inode */
+	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
+		return 0;
+
 	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
 			available_free_memory(sbi, DIRTY_DENTS))
@@ -1800,12 +1337,11 @@
 		mutex_lock(&sbi->writepages);
 		locked = true;
 	}
-	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	if (locked)
 		mutex_unlock(&sbi->writepages);
 
-	f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
 	remove_dirty_dir_inode(inode);
 
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
@@ -1832,7 +1368,8 @@
 {
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct page *page, *ipage;
+	struct page *page = NULL;
+	struct page *ipage;
 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 	struct dnode_of_data dn;
 	int err = 0;
@@ -1882,25 +1419,28 @@
 		if (err)
 			goto put_fail;
 	}
-	err = f2fs_reserve_block(&dn, index);
+
+	err = f2fs_get_block(&dn, index);
 	if (err)
 		goto put_fail;
 put_next:
 	f2fs_put_dnode(&dn);
 	f2fs_unlock_op(sbi);
 
-	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
-		return 0;
-
 	f2fs_wait_on_page_writeback(page, DATA);
 
+	if (len == PAGE_CACHE_SIZE)
+		goto out_update;
+	if (PageUptodate(page))
+		goto out_clear;
+
 	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
 		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 		unsigned end = start + len;
 
 		/* Reading beyond i_size is simple: memset to zero */
 		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
-		goto out;
+		goto out_update;
 	}
 
 	if (dn.data_blkaddr == NEW_ADDR) {
@@ -1920,7 +1460,6 @@
 
 		lock_page(page);
 		if (unlikely(!PageUptodate(page))) {
-			f2fs_put_page(page, 1);
 			err = -EIO;
 			goto fail;
 		}
@@ -1932,14 +1471,13 @@
 		/* avoid symlink page */
 		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
 			err = f2fs_decrypt_one(inode, page);
-			if (err) {
-				f2fs_put_page(page, 1);
+			if (err)
 				goto fail;
-			}
 		}
 	}
-out:
+out_update:
 	SetPageUptodate(page);
+out_clear:
 	clear_cold_data(page);
 	return 0;
 
@@ -1947,8 +1485,8 @@
 	f2fs_put_dnode(&dn);
 unlock_fail:
 	f2fs_unlock_op(sbi);
-	f2fs_put_page(page, 1);
 fail:
+	f2fs_put_page(page, 1);
 	f2fs_write_failed(mapping, pos + len);
 	return err;
 }
@@ -1979,9 +1517,6 @@
 {
 	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
 
-	if (iov_iter_rw(iter) == READ)
-		return 0;
-
 	if (offset & blocksize_mask)
 		return -EINVAL;
 
@@ -2010,15 +1545,16 @@
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		return 0;
 
-	if (check_direct_IO(inode, iter, offset))
-		return 0;
+	err = check_direct_IO(inode, iter, offset);
+	if (err)
+		return err;
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
 	if (iov_iter_rw(iter) == WRITE)
 		__allocate_data_blocks(inode, offset, count);
 
-	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
+	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
 	if (err < 0 && iov_iter_rw(iter) == WRITE)
 		f2fs_write_failed(mapping, offset + count);
 
@@ -2045,6 +1581,11 @@
 		else
 			inode_dec_dirty_pages(inode);
 	}
+
+	/* This is atomic written page, keep Private */
+	if (IS_ATOMIC_WRITTEN_PAGE(page))
+		return;
+
 	ClearPagePrivate(page);
 }
 
@@ -2054,6 +1595,10 @@
 	if (PageDirty(page))
 		return 0;
 
+	/* This is atomic written page, keep Private */
+	if (IS_ATOMIC_WRITTEN_PAGE(page))
+		return 0;
+
 	ClearPagePrivate(page);
 	return 1;
 }
@@ -2068,8 +1613,15 @@
 	SetPageUptodate(page);
 
 	if (f2fs_is_atomic_file(inode)) {
-		register_inmem_page(inode, page);
-		return 1;
+		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+			register_inmem_page(inode, page);
+			return 1;
+		}
+		/*
+		 * Previously, this page has been registered, we just
+		 * return here.
+		 */
+		return 0;
 	}
 
 	if (!PageDirty(page)) {
@@ -2090,38 +1642,7 @@
 		if (err)
 			return err;
 	}
-	return generic_block_bmap(mapping, block, get_data_block);
-}
-
-void init_extent_cache_info(struct f2fs_sb_info *sbi)
-{
-	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
-	init_rwsem(&sbi->extent_tree_lock);
-	INIT_LIST_HEAD(&sbi->extent_list);
-	spin_lock_init(&sbi->extent_lock);
-	sbi->total_ext_tree = 0;
-	atomic_set(&sbi->total_ext_node, 0);
-}
-
-int __init create_extent_cache(void)
-{
-	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
-			sizeof(struct extent_tree));
-	if (!extent_tree_slab)
-		return -ENOMEM;
-	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
-			sizeof(struct extent_node));
-	if (!extent_node_slab) {
-		kmem_cache_destroy(extent_tree_slab);
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-void destroy_extent_cache(void)
-{
-	kmem_cache_destroy(extent_node_slab);
-	kmem_cache_destroy(extent_tree_slab);
+	return generic_block_bmap(mapping, block, get_data_block_bmap);
 }
 
 const struct address_space_operations f2fs_dblock_aops = {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 75176e0..d013d84 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -33,8 +33,11 @@
 	int i;
 
 	/* validation check of the segment numbers */
-	si->hit_ext = sbi->read_hit_ext;
-	si->total_ext = sbi->total_hit_ext;
+	si->hit_largest = atomic_read(&sbi->read_hit_largest);
+	si->hit_cached = atomic_read(&sbi->read_hit_cached);
+	si->hit_rbtree = atomic_read(&sbi->read_hit_rbtree);
+	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
+	si->total_ext = atomic_read(&sbi->total_hit_ext);
 	si->ext_tree = sbi->total_ext_tree;
 	si->ext_node = atomic_read(&sbi->total_ext_node);
 	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
@@ -49,6 +52,7 @@
 	si->valid_count = valid_user_blocks(sbi);
 	si->valid_node_count = valid_node_count(sbi);
 	si->valid_inode_count = valid_inode_count(sbi);
+	si->inline_xattr = atomic_read(&sbi->inline_xattr);
 	si->inline_inode = atomic_read(&sbi->inline_inode);
 	si->inline_dir = atomic_read(&sbi->inline_dir);
 	si->utilization = utilization(sbi);
@@ -226,6 +230,8 @@
 		seq_printf(s, "Other: %u)\n  - Data: %u\n",
 			   si->valid_node_count - si->valid_inode_count,
 			   si->valid_count - si->valid_node_count);
+		seq_printf(s, "  - Inline_xattr Inode: %u\n",
+			   si->inline_xattr);
 		seq_printf(s, "  - Inline_data Inode: %u\n",
 			   si->inline_inode);
 		seq_printf(s, "  - Inline_dentry Inode: %u\n",
@@ -276,10 +282,16 @@
 				si->bg_data_blks);
 		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
 				si->bg_node_blks);
-		seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
-			   si->hit_ext, si->total_ext);
-		seq_printf(s, "\nExtent Tree Count: %d\n", si->ext_tree);
-		seq_printf(s, "\nExtent Node Count: %d\n", si->ext_node);
+		seq_puts(s, "\nExtent Cache:\n");
+		seq_printf(s, "  - Hit Count: L1-1:%d L1-2:%d L2:%d\n",
+				si->hit_largest, si->hit_cached,
+				si->hit_rbtree);
+		seq_printf(s, "  - Hit Ratio: %d%% (%d / %d)\n",
+				!si->total_ext ? 0 :
+				(si->hit_total * 100) / si->total_ext,
+				si->hit_total, si->total_ext);
+		seq_printf(s, "  - Inner Struct Count: tree: %d, node: %d\n",
+				si->ext_tree, si->ext_node);
 		seq_puts(s, "\nBalancing F2FS Async:\n");
 		seq_printf(s, "  - inmem: %4d, wb: %4d\n",
 			   si->inmem_pages, si->wb_pages);
@@ -366,6 +378,12 @@
 	si->sbi = sbi;
 	sbi->stat_info = si;
 
+	atomic_set(&sbi->total_hit_ext, 0);
+	atomic_set(&sbi->read_hit_rbtree, 0);
+	atomic_set(&sbi->read_hit_largest, 0);
+	atomic_set(&sbi->read_hit_cached, 0);
+
+	atomic_set(&sbi->inline_xattr, 0);
 	atomic_set(&sbi->inline_inode, 0);
 	atomic_set(&sbi->inline_dir, 0);
 	atomic_set(&sbi->inplace_count, 0);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a34ebd8..8f15fc1 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -718,8 +718,8 @@
 	if (inode)
 		f2fs_drop_nlink(dir, inode, NULL);
 
-	if (bit_pos == NR_DENTRY_IN_BLOCK) {
-		truncate_hole(dir, page->index, page->index + 1);
+	if (bit_pos == NR_DENTRY_IN_BLOCK &&
+			!truncate_hole(dir, page->index, page->index + 1)) {
 		clear_page_dirty_for_io(page);
 		ClearPagePrivate(page);
 		ClearPageUptodate(page);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
new file mode 100644
index 0000000..997ac86
--- /dev/null
+++ b/fs/f2fs/extent_cache.c
@@ -0,0 +1,791 @@
+/*
+ * f2fs extent cache support
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Samsung Electronics
+ * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
+ *          Chao Yu <chao2.yu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include <trace/events/f2fs.h>
+
+static struct kmem_cache *extent_tree_slab;
+static struct kmem_cache *extent_node_slab;
+
+static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct rb_node *parent, struct rb_node **p)
+{
+	struct extent_node *en;
+
+	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+	if (!en)
+		return NULL;
+
+	en->ei = *ei;
+	INIT_LIST_HEAD(&en->list);
+
+	rb_link_node(&en->rb_node, parent, p);
+	rb_insert_color(&en->rb_node, &et->root);
+	et->count++;
+	atomic_inc(&sbi->total_ext_node);
+	return en;
+}
+
+static void __detach_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_node *en)
+{
+	rb_erase(&en->rb_node, &et->root);
+	et->count--;
+	atomic_dec(&sbi->total_ext_node);
+
+	if (et->cached_en == en)
+		et->cached_en = NULL;
+}
+
+static struct extent_tree *__grab_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et;
+	nid_t ino = inode->i_ino;
+
+	down_write(&sbi->extent_tree_lock);
+	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+	if (!et) {
+		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+		memset(et, 0, sizeof(struct extent_tree));
+		et->ino = ino;
+		et->root = RB_ROOT;
+		et->cached_en = NULL;
+		rwlock_init(&et->lock);
+		atomic_set(&et->refcount, 0);
+		et->count = 0;
+		sbi->total_ext_tree++;
+	}
+	atomic_inc(&et->refcount);
+	up_write(&sbi->extent_tree_lock);
+
+	/* never died until evict_inode */
+	F2FS_I(inode)->extent_tree = et;
+
+	return et;
+}
+
+static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, unsigned int fofs)
+{
+	struct rb_node *node = et->root.rb_node;
+	struct extent_node *en = et->cached_en;
+
+	if (en) {
+		struct extent_info *cei = &en->ei;
+
+		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
+			stat_inc_cached_node_hit(sbi);
+			return en;
+		}
+	}
+
+	while (node) {
+		en = rb_entry(node, struct extent_node, rb_node);
+
+		if (fofs < en->ei.fofs) {
+			node = node->rb_left;
+		} else if (fofs >= en->ei.fofs + en->ei.len) {
+			node = node->rb_right;
+		} else {
+			stat_inc_rbtree_node_hit(sbi);
+			return en;
+		}
+	}
+	return NULL;
+}
+
+static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei)
+{
+	struct rb_node **p = &et->root.rb_node;
+	struct extent_node *en;
+
+	en = __attach_extent_node(sbi, et, ei, NULL, p);
+	if (!en)
+		return NULL;
+
+	et->largest = en->ei;
+	et->cached_en = en;
+	return en;
+}
+
+static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+					struct extent_tree *et, bool free_all)
+{
+	struct rb_node *node, *next;
+	struct extent_node *en;
+	unsigned int count = et->count;
+
+	node = rb_first(&et->root);
+	while (node) {
+		next = rb_next(node);
+		en = rb_entry(node, struct extent_node, rb_node);
+
+		if (free_all) {
+			spin_lock(&sbi->extent_lock);
+			if (!list_empty(&en->list))
+				list_del_init(&en->list);
+			spin_unlock(&sbi->extent_lock);
+		}
+
+		if (free_all || list_empty(&en->list)) {
+			__detach_extent_node(sbi, et, en);
+			kmem_cache_free(extent_node_slab, en);
+		}
+		node = next;
+	}
+
+	return count - et->count;
+}
+
+static void __drop_largest_extent(struct inode *inode, pgoff_t fofs)
+{
+	struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
+
+	if (largest->fofs <= fofs && largest->fofs + largest->len > fofs)
+		largest->len = 0;
+}
+
+void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
+	__drop_largest_extent(inode, fofs);
+}
+
+void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et;
+	struct extent_node *en;
+	struct extent_info ei;
+
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
+	et = __grab_extent_tree(inode);
+
+	if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
+		return;
+
+	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
+		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+
+	write_lock(&et->lock);
+	if (et->count)
+		goto out;
+
+	en = __init_extent_tree(sbi, et, &ei);
+	if (en) {
+		spin_lock(&sbi->extent_lock);
+		list_add_tail(&en->list, &sbi->extent_list);
+		spin_unlock(&sbi->extent_lock);
+	}
+out:
+	write_unlock(&et->lock);
+}
+
+static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+							struct extent_info *ei)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en;
+	bool ret = false;
+
+	f2fs_bug_on(sbi, !et);
+
+	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+
+	read_lock(&et->lock);
+
+	if (et->largest.fofs <= pgofs &&
+			et->largest.fofs + et->largest.len > pgofs) {
+		*ei = et->largest;
+		ret = true;
+		stat_inc_largest_node_hit(sbi);
+		goto out;
+	}
+
+	en = __lookup_extent_tree(sbi, et, pgofs);
+	if (en) {
+		*ei = en->ei;
+		spin_lock(&sbi->extent_lock);
+		if (!list_empty(&en->list))
+			list_move_tail(&en->list, &sbi->extent_list);
+		et->cached_en = en;
+		spin_unlock(&sbi->extent_lock);
+		ret = true;
+	}
+out:
+	stat_inc_total_hit(sbi);
+	read_unlock(&et->lock);
+
+	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
+	return ret;
+}
+
+
+/*
+ * lookup extent at @fofs, if hit, return the extent
+ * if not, return NULL and
+ * @prev_ex: extent before fofs
+ * @next_ex: extent after fofs
+ * @insert_p: insert point for new extent at fofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
+				unsigned int fofs,
+				struct extent_node **prev_ex,
+				struct extent_node **next_ex,
+				struct rb_node ***insert_p,
+				struct rb_node **insert_parent)
+{
+	struct rb_node **pnode = &et->root.rb_node;
+	struct rb_node *parent = NULL, *tmp_node;
+	struct extent_node *en = et->cached_en;
+
+	*insert_p = NULL;
+	*insert_parent = NULL;
+	*prev_ex = NULL;
+	*next_ex = NULL;
+
+	if (RB_EMPTY_ROOT(&et->root))
+		return NULL;
+
+	if (en) {
+		struct extent_info *cei = &en->ei;
+
+		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
+			goto lookup_neighbors;
+	}
+
+	while (*pnode) {
+		parent = *pnode;
+		en = rb_entry(*pnode, struct extent_node, rb_node);
+
+		if (fofs < en->ei.fofs)
+			pnode = &(*pnode)->rb_left;
+		else if (fofs >= en->ei.fofs + en->ei.len)
+			pnode = &(*pnode)->rb_right;
+		else
+			goto lookup_neighbors;
+	}
+
+	*insert_p = pnode;
+	*insert_parent = parent;
+
+	en = rb_entry(parent, struct extent_node, rb_node);
+	tmp_node = parent;
+	if (parent && fofs > en->ei.fofs)
+		tmp_node = rb_next(parent);
+	*next_ex = tmp_node ?
+		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+
+	tmp_node = parent;
+	if (parent && fofs < en->ei.fofs)
+		tmp_node = rb_prev(parent);
+	*prev_ex = tmp_node ?
+		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	return NULL;
+
+lookup_neighbors:
+	if (fofs == en->ei.fofs) {
+		/* lookup prev node for merging backward later */
+		tmp_node = rb_prev(&en->rb_node);
+		*prev_ex = tmp_node ?
+			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	}
+	if (fofs == en->ei.fofs + en->ei.len - 1) {
+		/* lookup next node for merging frontward later */
+		tmp_node = rb_next(&en->rb_node);
+		*next_ex = tmp_node ?
+			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	}
+	return en;
+}
+
+static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct extent_node **den,
+				struct extent_node *prev_ex,
+				struct extent_node *next_ex)
+{
+	struct extent_node *en = NULL;
+
+	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+		prev_ex->ei.len += ei->len;
+		ei = &prev_ex->ei;
+		en = prev_ex;
+	}
+
+	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+		if (en) {
+			__detach_extent_node(sbi, et, prev_ex);
+			*den = prev_ex;
+		}
+		next_ex->ei.fofs = ei->fofs;
+		next_ex->ei.blk = ei->blk;
+		next_ex->ei.len += ei->len;
+		en = next_ex;
+	}
+
+	if (en) {
+		if (en->ei.len > et->largest.len)
+			et->largest = en->ei;
+		et->cached_en = en;
+	}
+	return en;
+}
+
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct rb_node **insert_p,
+				struct rb_node *insert_parent)
+{
+	struct rb_node **p = &et->root.rb_node;
+	struct rb_node *parent = NULL;
+	struct extent_node *en = NULL;
+
+	if (insert_p && insert_parent) {
+		parent = insert_parent;
+		p = insert_p;
+		goto do_insert;
+	}
+
+	while (*p) {
+		parent = *p;
+		en = rb_entry(parent, struct extent_node, rb_node);
+
+		if (ei->fofs < en->ei.fofs)
+			p = &(*p)->rb_left;
+		else if (ei->fofs >= en->ei.fofs + en->ei.len)
+			p = &(*p)->rb_right;
+		else
+			f2fs_bug_on(sbi, 1);
+	}
+do_insert:
+	en = __attach_extent_node(sbi, et, ei, parent, p);
+	if (!en)
+		return NULL;
+
+	if (en->ei.len > et->largest.len)
+		et->largest = en->ei;
+	et->cached_en = en;
+	return en;
+}
+
+unsigned int f2fs_update_extent_tree_range(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int len)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
+	struct extent_node *prev_en = NULL, *next_en = NULL;
+	struct extent_info ei, dei, prev;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	unsigned int end = fofs + len;
+	unsigned int pos = (unsigned int)fofs;
+
+	if (!et)
+		return false;
+
+	write_lock(&et->lock);
+
+	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+		write_unlock(&et->lock);
+		return false;
+	}
+
+	prev = et->largest;
+	dei.len = 0;
+
+	/* we do not guarantee that the largest extent is cached all the time */
+	__drop_largest_extent(inode, fofs);
+
+	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
+	en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
+					&insert_p, &insert_parent);
+	if (!en) {
+		if (next_en) {
+			en = next_en;
+			f2fs_bug_on(sbi, en->ei.fofs <= pos);
+			pos = en->ei.fofs;
+		} else {
+			/*
+			 * skip searching in the tree since there is no
+			 * larger extent node in the cache.
+			 */
+			goto update_extent;
+		}
+	}
+
+	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
+	while (en) {
+		struct rb_node *node;
+
+		if (pos >= end)
+			break;
+
+		dei = en->ei;
+		en1 = en2 = NULL;
+
+		node = rb_next(&en->rb_node);
+
+		/*
+		 * 2.1 there are four cases when we invalidate blkaddr in extent
+		 * node, |V: valid address, X: will be invalidated|
+		 */
+		/* case#1, invalidate right part of extent node |VVVVVXXXXX| */
+		if (pos > dei.fofs && end >= dei.fofs + dei.len) {
+			en->ei.len = pos - dei.fofs;
+
+			if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
+				__detach_extent_node(sbi, et, en);
+				insert_p = NULL;
+				insert_parent = NULL;
+				goto update;
+			}
+
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest = en->ei;
+			goto next;
+		}
+
+		/* case#2, invalidate left part of extent node |XXXXXVVVVV| */
+		if (pos <= dei.fofs && end < dei.fofs + dei.len) {
+			en->ei.fofs = end;
+			en->ei.blk += end - dei.fofs;
+			en->ei.len -= end - dei.fofs;
+
+			if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
+				__detach_extent_node(sbi, et, en);
+				insert_p = NULL;
+				insert_parent = NULL;
+				goto update;
+			}
+
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest = en->ei;
+			goto next;
+		}
+
+		__detach_extent_node(sbi, et, en);
+
+		/*
+		 * if we remove node in rb-tree, our parent node pointer may
+		 * point the wrong place, discard them.
+		 */
+		insert_p = NULL;
+		insert_parent = NULL;
+
+		/* case#3, invalidate entire extent node |XXXXXXXXXX| */
+		if (pos <= dei.fofs && end >= dei.fofs + dei.len) {
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest.len = 0;
+			goto update;
+		}
+
+		/*
+		 * case#4, invalidate data in the middle of extent node
+		 * |VVVXXXXVVV|
+		 */
+		if (dei.len > F2FS_MIN_EXTENT_LEN) {
+			unsigned int endofs;
+
+			/*  insert left part of split extent into cache */
+			if (pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+				set_extent_info(&ei, dei.fofs, dei.blk,
+							pos - dei.fofs);
+				en1 = __insert_extent_tree(sbi, et, &ei,
+								NULL, NULL);
+			}
+
+			/* insert right part of split extent into cache */
+			endofs = dei.fofs + dei.len;
+			if (endofs - end >= F2FS_MIN_EXTENT_LEN) {
+				set_extent_info(&ei, end,
+						end - dei.fofs + dei.blk,
+						endofs - end);
+				en2 = __insert_extent_tree(sbi, et, &ei,
+								NULL, NULL);
+			}
+		}
+update:
+		/* 2.2 update in global extent list */
+		spin_lock(&sbi->extent_lock);
+		if (en && !list_empty(&en->list))
+			list_del(&en->list);
+		if (en1)
+			list_add_tail(&en1->list, &sbi->extent_list);
+		if (en2)
+			list_add_tail(&en2->list, &sbi->extent_list);
+		spin_unlock(&sbi->extent_lock);
+
+		/* 2.3 release extent node */
+		if (en)
+			kmem_cache_free(extent_node_slab, en);
+next:
+		en = node ? rb_entry(node, struct extent_node, rb_node) : NULL;
+		next_en = en;
+		if (en)
+			pos = en->ei.fofs;
+	}
+
+update_extent:
+	/* 3. update extent in extent cache */
+	if (blkaddr) {
+		struct extent_node *den = NULL;
+
+		set_extent_info(&ei, fofs, blkaddr, len);
+		en3 = __try_merge_extent_node(sbi, et, &ei, &den,
+							prev_en, next_en);
+		if (!en3)
+			en3 = __insert_extent_tree(sbi, et, &ei,
+						insert_p, insert_parent);
+
+		/* give up extent_cache, if split and small updates happen */
+		if (dei.len >= 1 &&
+				prev.len < F2FS_MIN_EXTENT_LEN &&
+				et->largest.len < F2FS_MIN_EXTENT_LEN) {
+			et->largest.len = 0;
+			set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
+		}
+
+		spin_lock(&sbi->extent_lock);
+		if (en3) {
+			if (list_empty(&en3->list))
+				list_add_tail(&en3->list, &sbi->extent_list);
+			else
+				list_move_tail(&en3->list, &sbi->extent_list);
+		}
+		if (den && !list_empty(&den->list))
+			list_del(&den->list);
+		spin_unlock(&sbi->extent_lock);
+
+		if (den)
+			kmem_cache_free(extent_node_slab, den);
+	}
+
+	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+		__free_extent_tree(sbi, et, true);
+
+	write_unlock(&et->lock);
+
+	return !__is_extent_same(&prev, &et->largest);
+}
+
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
+	struct extent_node *en, *tmp;
+	unsigned long ino = F2FS_ROOT_INO(sbi);
+	struct radix_tree_root *root = &sbi->extent_tree_root;
+	unsigned int found;
+	unsigned int node_cnt = 0, tree_cnt = 0;
+	int remained;
+
+	if (!test_opt(sbi, EXTENT_CACHE))
+		return 0;
+
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	/* 1. remove unreferenced extent tree */
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			if (!atomic_read(&et->refcount)) {
+				write_lock(&et->lock);
+				node_cnt += __free_extent_tree(sbi, et, true);
+				write_unlock(&et->lock);
+
+				radix_tree_delete(root, et->ino);
+				kmem_cache_free(extent_tree_slab, et);
+				sbi->total_ext_tree--;
+				tree_cnt++;
+
+				if (node_cnt + tree_cnt >= nr_shrink)
+					goto unlock_out;
+			}
+		}
+	}
+	up_write(&sbi->extent_tree_lock);
+
+	/* 2. remove LRU extent entries */
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	remained = nr_shrink - (node_cnt + tree_cnt);
+
+	spin_lock(&sbi->extent_lock);
+	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
+		if (!remained--)
+			break;
+		list_del_init(&en->list);
+	}
+	spin_unlock(&sbi->extent_lock);
+
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			write_lock(&et->lock);
+			node_cnt += __free_extent_tree(sbi, et, false);
+			write_unlock(&et->lock);
+
+			if (node_cnt + tree_cnt >= nr_shrink)
+				break;
+		}
+	}
+unlock_out:
+	up_write(&sbi->extent_tree_lock);
+out:
+	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+
+	return node_cnt + tree_cnt;
+}
+
+unsigned int f2fs_destroy_extent_node(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	unsigned int node_cnt = 0;
+
+	if (!et)
+		return 0;
+
+	write_lock(&et->lock);
+	node_cnt = __free_extent_tree(sbi, et, true);
+	write_unlock(&et->lock);
+
+	return node_cnt;
+}
+
+void f2fs_destroy_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	unsigned int node_cnt = 0;
+
+	if (!et)
+		return;
+
+	if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
+		atomic_dec(&et->refcount);
+		return;
+	}
+
+	/* free all extent info belong to this extent tree */
+	node_cnt = f2fs_destroy_extent_node(inode);
+
+	/* delete extent tree entry in radix tree */
+	down_write(&sbi->extent_tree_lock);
+	atomic_dec(&et->refcount);
+	f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+	kmem_cache_free(extent_tree_slab, et);
+	sbi->total_ext_tree--;
+	up_write(&sbi->extent_tree_lock);
+
+	F2FS_I(inode)->extent_tree = NULL;
+
+	trace_f2fs_destroy_extent_tree(inode, node_cnt);
+}
+
+bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+					struct extent_info *ei)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return false;
+
+	return f2fs_lookup_extent_tree(inode, pgofs, ei);
+}
+
+void f2fs_update_extent_cache(struct dnode_of_data *dn)
+{
+	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
+	pgoff_t fofs;
+
+	if (!f2fs_may_extent_tree(dn->inode))
+		return;
+
+	f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
+
+
+	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+							dn->ofs_in_node;
+
+	if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
+		sync_inode_page(dn);
+}
+
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+				pgoff_t fofs, block_t blkaddr, unsigned int len)
+
+{
+	if (!f2fs_may_extent_tree(dn->inode))
+		return;
+
+	if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
+		sync_inode_page(dn);
+}
+
+void init_extent_cache_info(struct f2fs_sb_info *sbi)
+{
+	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+	init_rwsem(&sbi->extent_tree_lock);
+	INIT_LIST_HEAD(&sbi->extent_list);
+	spin_lock_init(&sbi->extent_lock);
+	sbi->total_ext_tree = 0;
+	atomic_set(&sbi->total_ext_node, 0);
+}
+
+int __init create_extent_cache(void)
+{
+	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
+			sizeof(struct extent_tree));
+	if (!extent_tree_slab)
+		return -ENOMEM;
+	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
+			sizeof(struct extent_node));
+	if (!extent_node_slab) {
+		kmem_cache_destroy(extent_tree_slab);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void destroy_extent_cache(void)
+{
+	kmem_cache_destroy(extent_node_slab);
+	kmem_cache_destroy(extent_tree_slab);
+}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a8327ed..f1a90ff 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -19,6 +19,7 @@
 #include <linux/magic.h>
 #include <linux/kobject.h>
 #include <linux/sched.h>
+#include <linux/bio.h>
 
 #ifdef CONFIG_F2FS_CHECK_FS
 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
@@ -228,6 +229,7 @@
 #define F2FS_IOC_START_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 3)
 #define F2FS_IOC_RELEASE_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 4)
 #define F2FS_IOC_ABORT_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GARBAGE_COLLECT	_IO(F2FS_IOCTL_MAGIC, 6)
 
 #define F2FS_IOC_SET_ENCRYPTION_POLICY					\
 		_IOR('f', 19, struct f2fs_encryption_policy)
@@ -320,7 +322,7 @@
 					 */
 };
 
-#define F2FS_LINK_MAX		32000	/* maximum link count per file */
+#define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
@@ -349,6 +351,7 @@
 	nid_t ino;			/* inode number */
 	struct rb_root root;		/* root of extent info rb-tree */
 	struct extent_node *cached_en;	/* recently accessed extent node */
+	struct extent_info largest;	/* largested extent info */
 	rwlock_t lock;			/* protect extent info rb-tree */
 	atomic_t refcount;		/* reference count of rb-tree */
 	unsigned int count;		/* # of extent node in rb-tree*/
@@ -372,6 +375,12 @@
 	unsigned int m_flags;
 };
 
+/* for flag in get_data_block */
+#define F2FS_GET_BLOCK_READ		0
+#define F2FS_GET_BLOCK_DIO		1
+#define F2FS_GET_BLOCK_FIEMAP		2
+#define F2FS_GET_BLOCK_BMAP		3
+
 /*
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  */
@@ -420,14 +429,13 @@
 	unsigned int clevel;		/* maximum level of given file name */
 	nid_t i_xattr_nid;		/* node id that contains xattrs */
 	unsigned long long xattr_ver;	/* cp version of xattr modification */
-	struct extent_info ext;		/* in-memory extent cache entry */
-	rwlock_t ext_lock;		/* rwlock for single extent cache */
 	struct inode_entry *dirty_dir;	/* the pointer of dirty dir */
 
-	struct radix_tree_root inmem_root;	/* radix tree for inmem pages */
 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
 	struct mutex inmem_lock;	/* lock for inmemory pages */
 
+	struct extent_tree *extent_tree;	/* cached extent_tree entry */
+
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
 	/* Encryption params */
 	struct f2fs_crypt_info *i_crypt_info;
@@ -779,7 +787,11 @@
 	unsigned int segment_count[2];		/* # of allocated segments */
 	unsigned int block_count[2];		/* # of allocated blocks */
 	atomic_t inplace_count;		/* # of inplace update */
-	int total_hit_ext, read_hit_ext;	/* extent cache hit ratio */
+	atomic_t total_hit_ext;			/* # of lookup extent cache */
+	atomic_t read_hit_rbtree;		/* # of hit rbtree extent node */
+	atomic_t read_hit_largest;		/* # of hit largest extent node */
+	atomic_t read_hit_cached;		/* # of hit cached extent node */
+	atomic_t inline_xattr;			/* # of inline_xattr inodes */
 	atomic_t inline_inode;			/* # of inline_data inodes */
 	atomic_t inline_dir;			/* # of inline_dentry inodes */
 	int bg_gc;				/* background gc calls */
@@ -791,6 +803,11 @@
 	/* For sysfs suppport */
 	struct kobject s_kobj;
 	struct completion s_kobj_unregister;
+
+	/* For shrinker support */
+	struct list_head s_list;
+	struct mutex umount_mutex;
+	unsigned int shrinker_run_no;
 };
 
 /*
@@ -1039,7 +1056,8 @@
 
 static inline void inode_dec_dirty_pages(struct inode *inode)
 {
-	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+			!S_ISLNK(inode->i_mode))
 		return;
 
 	atomic_dec(&F2FS_I(inode)->dirty_pages);
@@ -1234,16 +1252,24 @@
 						gfp_t flags)
 {
 	void *entry;
-retry:
-	entry = kmem_cache_alloc(cachep, flags);
-	if (!entry) {
-		cond_resched();
-		goto retry;
-	}
 
+	entry = kmem_cache_alloc(cachep, flags);
+	if (!entry)
+		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
 	return entry;
 }
 
+static inline struct bio *f2fs_bio_alloc(int npages)
+{
+	struct bio *bio;
+
+	/* No failure on bio allocation */
+	bio = bio_alloc(GFP_NOIO, npages);
+	if (!bio)
+		bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+	return bio;
+}
+
 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
 				unsigned long index, void *item)
 {
@@ -1342,6 +1368,7 @@
 	FI_INC_LINK,		/* need to increment i_nlink */
 	FI_ACL_MODE,		/* indicate acl mode */
 	FI_NO_ALLOC,		/* should not allocate any blocks */
+	FI_FREE_NID,		/* free allocated nide */
 	FI_UPDATE_DIR,		/* should update inode block for consistency */
 	FI_DELAY_IPUT,		/* used for the recovery */
 	FI_NO_EXTENT,		/* not to use the extent cache */
@@ -1541,6 +1568,17 @@
 	return false;
 }
 
+static inline bool f2fs_may_extent_tree(struct inode *inode)
+{
+	mode_t mode = inode->i_mode;
+
+	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+			is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+		return false;
+
+	return S_ISREG(mode);
+}
+
 #define get_inode_mode(i) \
 	((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -1557,7 +1595,7 @@
 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 void truncate_data_blocks(struct dnode_of_data *);
 int truncate_blocks(struct inode *, u64, bool);
-void f2fs_truncate(struct inode *);
+int f2fs_truncate(struct inode *, bool);
 int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int f2fs_setattr(struct dentry *, struct iattr *);
 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
@@ -1649,7 +1687,7 @@
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int truncate_xattr_node(struct inode *, struct page *);
 int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
-void remove_inode_page(struct inode *);
+int remove_inode_page(struct inode *);
 struct page *new_inode_page(struct inode *);
 struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
 void ra_node_page(struct f2fs_sb_info *, nid_t);
@@ -1660,6 +1698,7 @@
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+int try_to_free_nids(struct f2fs_sb_info *, int);
 void recover_inline_xattr(struct inode *, struct page *);
 void recover_xattr_data(struct inode *, struct page *, block_t);
 int recover_inode_page(struct f2fs_sb_info *, struct page *);
@@ -1675,7 +1714,7 @@
  * segment.c
  */
 void register_inmem_page(struct inode *, struct page *);
-void commit_inmem_pages(struct inode *, bool);
+int commit_inmem_pages(struct inode *, bool);
 void f2fs_balance_fs(struct f2fs_sb_info *);
 void f2fs_balance_fs_bg(struct f2fs_sb_info *);
 int f2fs_issue_flush(struct f2fs_sb_info *);
@@ -1685,7 +1724,7 @@
 void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
 void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
 void release_discard_addrs(struct f2fs_sb_info *);
-void discard_next_dnode(struct f2fs_sb_info *, block_t);
+bool discard_next_dnode(struct f2fs_sb_info *, block_t);
 int npages_for_summary_flush(struct f2fs_sb_info *, bool);
 void allocate_new_segments(struct f2fs_sb_info *);
 int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -1727,7 +1766,7 @@
 void release_orphan_inode(struct f2fs_sb_info *);
 void add_orphan_inode(struct f2fs_sb_info *, nid_t);
 void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
-void recover_orphan_inodes(struct f2fs_sb_info *);
+int recover_orphan_inodes(struct f2fs_sb_info *);
 int get_valid_checkpoint(struct f2fs_sb_info *);
 void update_dirty_page(struct inode *, struct page *);
 void add_dirty_dir_inode(struct inode *);
@@ -1746,21 +1785,14 @@
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 int reserve_new_block(struct dnode_of_data *);
+int f2fs_get_block(struct dnode_of_data *, pgoff_t);
 int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
-void f2fs_destroy_extent_tree(struct inode *);
-void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
-void f2fs_update_extent_cache(struct dnode_of_data *);
-void f2fs_preserve_extent_tree(struct inode *);
 struct page *get_read_data_page(struct inode *, pgoff_t, int);
 struct page *find_data_page(struct inode *, pgoff_t);
 struct page *get_lock_data_page(struct inode *, pgoff_t);
 struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
 int do_write_data_page(struct f2fs_io_info *);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
-void init_extent_cache_info(struct f2fs_sb_info *);
-int __init create_extent_cache(void);
-void destroy_extent_cache(void);
 void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
 int f2fs_release_page(struct page *, gfp_t);
 
@@ -1788,11 +1820,13 @@
 	struct f2fs_sb_info *sbi;
 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
 	int main_area_segs, main_area_sections, main_area_zones;
-	int hit_ext, total_ext, ext_tree, ext_node;
+	int hit_largest, hit_cached, hit_rbtree, hit_total, total_ext;
+	int ext_tree, ext_node;
 	int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
 	int nats, dirty_nats, sits, dirty_sits, fnids;
 	int total_count, utilization;
-	int bg_gc, inline_inode, inline_dir, inmem_pages, wb_pages;
+	int bg_gc, inmem_pages, wb_pages;
+	int inline_xattr, inline_inode, inline_dir;
 	unsigned int valid_count, valid_node_count, valid_inode_count;
 	unsigned int bimodal, avg_vblocks;
 	int util_free, util_valid, util_invalid;
@@ -1823,8 +1857,20 @@
 #define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
 #define stat_inc_dirty_dir(sbi)		((sbi)->n_dirty_dirs++)
 #define stat_dec_dirty_dir(sbi)		((sbi)->n_dirty_dirs--)
-#define stat_inc_total_hit(sb)		((F2FS_SB(sb))->total_hit_ext++)
-#define stat_inc_read_hit(sb)		((F2FS_SB(sb))->read_hit_ext++)
+#define stat_inc_total_hit(sbi)		(atomic_inc(&(sbi)->total_hit_ext))
+#define stat_inc_rbtree_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_rbtree))
+#define stat_inc_largest_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_largest))
+#define stat_inc_cached_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_cached))
+#define stat_inc_inline_xattr(inode)					\
+	do {								\
+		if (f2fs_has_inline_xattr(inode))			\
+			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
+	} while (0)
+#define stat_dec_inline_xattr(inode)					\
+	do {								\
+		if (f2fs_has_inline_xattr(inode))			\
+			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
+	} while (0)
 #define stat_inc_inline_inode(inode)					\
 	do {								\
 		if (f2fs_has_inline_data(inode))			\
@@ -1894,7 +1940,11 @@
 #define stat_inc_dirty_dir(sbi)
 #define stat_dec_dirty_dir(sbi)
 #define stat_inc_total_hit(sb)
-#define stat_inc_read_hit(sb)
+#define stat_inc_rbtree_node_hit(sb)
+#define stat_inc_largest_node_hit(sbi)
+#define stat_inc_cached_node_hit(sbi)
+#define stat_inc_inline_xattr(inode)
+#define stat_dec_inline_xattr(inode)
 #define stat_inc_inline_inode(inode)
 #define stat_dec_inline_inode(inode)
 #define stat_inc_inline_dir(inode)
@@ -1950,6 +2000,30 @@
 						struct f2fs_str *);
 
 /*
+ * shrinker.c
+ */
+unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *);
+unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
+void f2fs_join_shrinker(struct f2fs_sb_info *);
+void f2fs_leave_shrinker(struct f2fs_sb_info *);
+
+/*
+ * extent_cache.c
+ */
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
+void f2fs_drop_largest_extent(struct inode *, pgoff_t);
+void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+unsigned int f2fs_destroy_extent_node(struct inode *);
+void f2fs_destroy_extent_tree(struct inode *);
+bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
+void f2fs_update_extent_cache(struct dnode_of_data *);
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+						pgoff_t, block_t, unsigned int);
+void init_extent_cache_info(struct f2fs_sb_info *);
+int __init create_extent_cache(void);
+void destroy_extent_cache(void);
+
+/*
  * crypto support
  */
 static inline int f2fs_encrypted_inode(struct inode *inode)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b0f38c3..8120f86 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -27,6 +27,7 @@
 #include "segment.h"
 #include "xattr.h"
 #include "acl.h"
+#include "gc.h"
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
@@ -85,6 +86,8 @@
 mapped:
 	/* fill the page */
 	f2fs_wait_on_page_writeback(page, DATA);
+	/* if gced page is attached, don't write to cold segment */
+	clear_cold_data(page);
 out:
 	sb_end_pagefault(inode->i_sb);
 	return block_page_mkwrite_return(err);
@@ -203,8 +206,8 @@
 	}
 
 	/* if the inode is dirty, let's recover all the time */
-	if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) {
-		update_inode_page(inode);
+	if (!datasync) {
+		f2fs_write_inode(inode, NULL);
 		goto go_write;
 	}
 
@@ -442,9 +445,9 @@
 
 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
-	int nr_free = 0, ofs = dn->ofs_in_node;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_node *raw_node;
+	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 	__le32 *addr;
 
 	raw_node = F2FS_NODE(dn->node_page);
@@ -457,14 +460,22 @@
 
 		dn->data_blkaddr = NULL_ADDR;
 		set_data_blkaddr(dn);
-		f2fs_update_extent_cache(dn);
 		invalidate_blocks(sbi, blkaddr);
 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 			clear_inode_flag(F2FS_I(dn->inode),
 						FI_FIRST_BLOCK_WRITTEN);
 		nr_free++;
 	}
+
 	if (nr_free) {
+		pgoff_t fofs;
+		/*
+		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
+		 * we will invalidate all blkaddr in the whole range.
+		 */
+		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
+						F2FS_I(dn->inode)) + ofs;
+		f2fs_update_extent_cache_range(dn, fofs, 0, len);
 		dec_valid_block_count(sbi, dn->inode, nr_free);
 		set_page_dirty(dn->node_page);
 		sync_inode_page(dn);
@@ -576,24 +587,30 @@
 	return err;
 }
 
-void f2fs_truncate(struct inode *inode)
+int f2fs_truncate(struct inode *inode, bool lock)
 {
+	int err;
+
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 				S_ISLNK(inode->i_mode)))
-		return;
+		return 0;
 
 	trace_f2fs_truncate(inode);
 
 	/* we should check inline_data size */
 	if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
-		if (f2fs_convert_inline_inode(inode))
-			return;
+		err = f2fs_convert_inline_inode(inode);
+		if (err)
+			return err;
 	}
 
-	if (!truncate_blocks(inode, i_size_read(inode), true)) {
-		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-		mark_inode_dirty(inode);
-	}
+	err = truncate_blocks(inode, i_size_read(inode), lock);
+	if (err)
+		return err;
+
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	mark_inode_dirty(inode);
+	return 0;
 }
 
 int f2fs_getattr(struct vfsmount *mnt,
@@ -653,7 +670,9 @@
 
 		if (attr->ia_size <= i_size_read(inode)) {
 			truncate_setsize(inode, attr->ia_size);
-			f2fs_truncate(inode);
+			err = f2fs_truncate(inode, true);
+			if (err)
+				return err;
 			f2fs_balance_fs(F2FS_I_SB(inode));
 		} else {
 			/*
@@ -692,14 +711,14 @@
 	.fiemap		= f2fs_fiemap,
 };
 
-static void fill_zero(struct inode *inode, pgoff_t index,
+static int fill_zero(struct inode *inode, pgoff_t index,
 					loff_t start, loff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct page *page;
 
 	if (!len)
-		return;
+		return 0;
 
 	f2fs_balance_fs(sbi);
 
@@ -707,12 +726,14 @@
 	page = get_new_data_page(inode, NULL, index, false);
 	f2fs_unlock_op(sbi);
 
-	if (!IS_ERR(page)) {
-		f2fs_wait_on_page_writeback(page, DATA);
-		zero_user(page, start, len);
-		set_page_dirty(page);
-		f2fs_put_page(page, 1);
-	}
+	if (IS_ERR(page))
+		return PTR_ERR(page);
+
+	f2fs_wait_on_page_writeback(page, DATA);
+	zero_user(page, start, len);
+	set_page_dirty(page);
+	f2fs_put_page(page, 1);
+	return 0;
 }
 
 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
@@ -760,14 +781,22 @@
 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
 	if (pg_start == pg_end) {
-		fill_zero(inode, pg_start, off_start,
+		ret = fill_zero(inode, pg_start, off_start,
 						off_end - off_start);
+		if (ret)
+			return ret;
 	} else {
-		if (off_start)
-			fill_zero(inode, pg_start++, off_start,
-					PAGE_CACHE_SIZE - off_start);
-		if (off_end)
-			fill_zero(inode, pg_end, 0, off_end);
+		if (off_start) {
+			ret = fill_zero(inode, pg_start++, off_start,
+						PAGE_CACHE_SIZE - off_start);
+			if (ret)
+				return ret;
+		}
+		if (off_end) {
+			ret = fill_zero(inode, pg_end, 0, off_end);
+			if (ret)
+				return ret;
+		}
 
 		if (pg_start < pg_end) {
 			struct address_space *mapping = inode->i_mapping;
@@ -797,11 +826,11 @@
 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 	int ret = 0;
 
-	f2fs_lock_op(sbi);
-
 	for (; end < nrpages; start++, end++) {
 		block_t new_addr, old_addr;
 
+		f2fs_lock_op(sbi);
+
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
 		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
 		if (ret && ret != -ENOENT) {
@@ -817,13 +846,16 @@
 		if (new_addr == NULL_ADDR) {
 			set_new_dnode(&dn, inode, NULL, NULL, 0);
 			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
-			if (ret && ret != -ENOENT)
+			if (ret && ret != -ENOENT) {
 				goto out;
-			else if (ret == -ENOENT)
+			} else if (ret == -ENOENT) {
+				f2fs_unlock_op(sbi);
 				continue;
+			}
 
 			if (dn.data_blkaddr == NULL_ADDR) {
 				f2fs_put_dnode(&dn);
+				f2fs_unlock_op(sbi);
 				continue;
 			} else {
 				truncate_data_blocks_range(&dn, 1);
@@ -862,8 +894,9 @@
 
 			f2fs_put_dnode(&dn);
 		}
+		f2fs_unlock_op(sbi);
 	}
-	ret = 0;
+	return 0;
 out:
 	f2fs_unlock_op(sbi);
 	return ret;
@@ -885,6 +918,14 @@
 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
 		return -EINVAL;
 
+	f2fs_balance_fs(F2FS_I_SB(inode));
+
+	if (f2fs_has_inline_data(inode)) {
+		ret = f2fs_convert_inline_inode(inode);
+		if (ret)
+			return ret;
+	}
+
 	pg_start = offset >> PAGE_CACHE_SHIFT;
 	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
 
@@ -946,14 +987,21 @@
 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
 	if (pg_start == pg_end) {
-		fill_zero(inode, pg_start, off_start, off_end - off_start);
+		ret = fill_zero(inode, pg_start, off_start,
+						off_end - off_start);
+		if (ret)
+			return ret;
+
 		if (offset + len > new_size)
 			new_size = offset + len;
 		new_size = max_t(loff_t, new_size, offset + len);
 	} else {
 		if (off_start) {
-			fill_zero(inode, pg_start++, off_start,
-					PAGE_CACHE_SIZE - off_start);
+			ret = fill_zero(inode, pg_start++, off_start,
+						PAGE_CACHE_SIZE - off_start);
+			if (ret)
+				return ret;
+
 			new_size = max_t(loff_t, new_size,
 						pg_start << PAGE_CACHE_SHIFT);
 		}
@@ -995,7 +1043,10 @@
 		}
 
 		if (off_end) {
-			fill_zero(inode, pg_end, 0, off_end);
+			ret = fill_zero(inode, pg_end, 0, off_end);
+			if (ret)
+				goto out;
+
 			new_size = max_t(loff_t, new_size, offset + len);
 		}
 	}
@@ -1033,6 +1084,12 @@
 
 	f2fs_balance_fs(sbi);
 
+	if (f2fs_has_inline_data(inode)) {
+		ret = f2fs_convert_inline_inode(inode);
+		if (ret)
+			return ret;
+	}
+
 	ret = truncate_blocks(inode, i_size_read(inode), true);
 	if (ret)
 		return ret;
@@ -1302,6 +1359,7 @@
 static int f2fs_ioc_start_atomic_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
@@ -1311,9 +1369,12 @@
 	if (f2fs_is_atomic_file(inode))
 		return 0;
 
-	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+	ret = f2fs_convert_inline_inode(inode);
+	if (ret)
+		return ret;
 
-	return f2fs_convert_inline_inode(inode);
+	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+	return 0;
 }
 
 static int f2fs_ioc_commit_atomic_write(struct file *filp)
@@ -1333,10 +1394,13 @@
 
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
-		commit_inmem_pages(inode, false);
+		ret = commit_inmem_pages(inode, false);
+		if (ret)
+			goto err_out;
 	}
 
-	ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
+	ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
+err_out:
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1344,6 +1408,7 @@
 static int f2fs_ioc_start_volatile_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
@@ -1351,9 +1416,12 @@
 	if (f2fs_is_volatile_file(inode))
 		return 0;
 
-	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+	ret = f2fs_convert_inline_inode(inode);
+	if (ret)
+		return ret;
 
-	return f2fs_convert_inline_inode(inode);
+	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+	return 0;
 }
 
 static int f2fs_ioc_release_volatile_write(struct file *filp)
@@ -1389,7 +1457,7 @@
 
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
-		commit_inmem_pages(inode, false);
+		commit_inmem_pages(inode, true);
 	}
 
 	if (f2fs_is_volatile_file(inode))
@@ -1544,6 +1612,35 @@
 	return 0;
 }
 
+static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+{
+	struct inode *inode = file_inode(filp);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	__u32 i, count;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (get_user(count, (__u32 __user *)arg))
+		return -EFAULT;
+
+	if (!count || count > F2FS_BATCH_GC_MAX_NUM)
+		return -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (!mutex_trylock(&sbi->gc_mutex))
+			break;
+
+		if (f2fs_gc(sbi))
+			break;
+	}
+
+	if (put_user(i, (__u32 __user *)arg))
+		return -EFAULT;
+
+	return 0;
+}
+
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
@@ -1573,6 +1670,8 @@
 		return f2fs_ioc_get_encryption_policy(filp, arg);
 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
+	case F2FS_IOC_GARBAGE_COLLECT:
+		return f2fs_ioc_gc(filp, arg);
 	default:
 		return -ENOTTY;
 	}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 22fb5ef..782b8e7 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -391,23 +391,27 @@
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static void gc_node_segment(struct f2fs_sb_info *sbi,
+static int gc_node_segment(struct f2fs_sb_info *sbi,
 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
 	bool initial = true;
 	struct f2fs_summary *entry;
+	block_t start_addr;
 	int off;
 
+	start_addr = START_BLOCK(sbi, segno);
+
 next_step:
 	entry = sum;
 
 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 		nid_t nid = le32_to_cpu(entry->nid);
 		struct page *node_page;
+		struct node_info ni;
 
 		/* stop BG_GC if there is not enough free sections. */
 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-			return;
+			return 0;
 
 		if (check_valid_map(sbi, segno, off) == 0)
 			continue;
@@ -426,6 +430,12 @@
 			continue;
 		}
 
+		get_node_info(sbi, nid, &ni);
+		if (ni.blk_addr != start_addr + off) {
+			f2fs_put_page(node_page, 1);
+			continue;
+		}
+
 		/* set page dirty and write it */
 		if (gc_type == FG_GC) {
 			f2fs_wait_on_page_writeback(node_page, NODE);
@@ -451,13 +461,11 @@
 		};
 		sync_node_pages(sbi, 0, &wbc);
 
-		/*
-		 * In the case of FG_GC, it'd be better to reclaim this victim
-		 * completely.
-		 */
-		if (get_valid_blocks(sbi, segno, 1) != 0)
-			goto next_step;
+		/* return 1 only if FG_GC succefully reclaimed one */
+		if (get_valid_blocks(sbi, segno, 1) == 0)
+			return 1;
 	}
+	return 0;
 }
 
 /*
@@ -487,7 +495,7 @@
 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
 }
 
-static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
 {
 	struct page *node_page;
@@ -500,13 +508,13 @@
 
 	node_page = get_node_page(sbi, nid);
 	if (IS_ERR(node_page))
-		return 0;
+		return false;
 
 	get_node_info(sbi, nid, dni);
 
 	if (sum->version != dni->version) {
 		f2fs_put_page(node_page, 1);
-		return 0;
+		return false;
 	}
 
 	*nofs = ofs_of_node(node_page);
@@ -514,8 +522,8 @@
 	f2fs_put_page(node_page, 1);
 
 	if (source_blkaddr != blkaddr)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 static void move_encrypted_block(struct inode *inode, block_t bidx)
@@ -552,7 +560,10 @@
 	fio.page = page;
 	fio.blk_addr = dn.data_blkaddr;
 
-	fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr);
+	fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
+					fio.blk_addr,
+					FGP_LOCK|FGP_CREAT,
+					GFP_NOFS);
 	if (!fio.encrypted_page)
 		goto put_out;
 
@@ -636,7 +647,7 @@
  * If the parent node is not valid or the data block address is different,
  * the victim data block is ignored.
  */
-static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 {
 	struct super_block *sb = sbi->sb;
@@ -659,7 +670,7 @@
 
 		/* stop BG_GC if there is not enough free sections. */
 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-			return;
+			return 0;
 
 		if (check_valid_map(sbi, segno, off) == 0)
 			continue;
@@ -670,7 +681,7 @@
 		}
 
 		/* Get an inode by ino with checking validity */
-		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
+		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
 			continue;
 
 		if (phase == 1) {
@@ -724,15 +735,11 @@
 	if (gc_type == FG_GC) {
 		f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
-		/*
-		 * In the case of FG_GC, it'd be better to reclaim this victim
-		 * completely.
-		 */
-		if (get_valid_blocks(sbi, segno, 1) != 0) {
-			phase = 2;
-			goto next_step;
-		}
+		/* return 1 only if FG_GC succefully reclaimed one */
+		if (get_valid_blocks(sbi, segno, 1) == 0)
+			return 1;
 	}
+	return 0;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -748,12 +755,13 @@
 	return ret;
 }
 
-static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
 				struct gc_inode_list *gc_list, int gc_type)
 {
 	struct page *sum_page;
 	struct f2fs_summary_block *sum;
 	struct blk_plug plug;
+	int nfree = 0;
 
 	/* read segment summary of victim */
 	sum_page = get_sum_page(sbi, segno);
@@ -773,10 +781,11 @@
 
 	switch (GET_SUM_TYPE((&sum->footer))) {
 	case SUM_TYPE_NODE:
-		gc_node_segment(sbi, sum->entries, segno, gc_type);
+		nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
 		break;
 	case SUM_TYPE_DATA:
-		gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
+		nfree = gc_data_segment(sbi, sum->entries, gc_list,
+							segno, gc_type);
 		break;
 	}
 	blk_finish_plug(&plug);
@@ -785,11 +794,13 @@
 	stat_inc_call_count(sbi->stat_info);
 
 	f2fs_put_page(sum_page, 0);
+	return nfree;
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi)
 {
-	unsigned int segno, i;
+	unsigned int segno = NULL_SEGNO;
+	unsigned int i;
 	int gc_type = BG_GC;
 	int nfree = 0;
 	int ret = -1;
@@ -808,10 +819,11 @@
 
 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
 		gc_type = FG_GC;
-		write_checkpoint(sbi, &cpc);
+		if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
+			write_checkpoint(sbi, &cpc);
 	}
 
-	if (!__get_victim(sbi, &segno, gc_type))
+	if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
 		goto stop;
 	ret = 0;
 
@@ -821,13 +833,10 @@
 								META_SSA);
 
 	for (i = 0; i < sbi->segs_per_sec; i++)
-		do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
+		nfree += do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
 
-	if (gc_type == FG_GC) {
+	if (gc_type == FG_GC)
 		sbi->cur_victim_sec = NULL_SEGNO;
-		nfree++;
-		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
-	}
 
 	if (has_not_enough_free_secs(sbi, nfree))
 		goto gc_more;
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b4a65be..c5a055b 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -19,6 +19,12 @@
 #define LIMIT_INVALID_BLOCK	40 /* percentage over total user space */
 #define LIMIT_FREE_BLOCK	40 /* percentage over invalid + free space */
 
+/*
+ * with this macro, we can control the max time we do garbage collection,
+ * when user triggers batch mode gc by ioctl.
+ */
+#define F2FS_BATCH_GC_MAX_NUM		16
+
 /* Search max. number of dirty segments to select a victim segment */
 #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index a13ffcc..3d143be 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -360,6 +360,10 @@
 	return 0;
 }
 
+/*
+ * NOTE: ipage is grabbed by caller, but if any error occurs, we should
+ * release ipage in this function.
+ */
 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
 				struct f2fs_inline_dentry *inline_dentry)
 {
@@ -369,8 +373,10 @@
 	int err;
 
 	page = grab_cache_page(dir->i_mapping, 0);
-	if (!page)
+	if (!page) {
+		f2fs_put_page(ipage, 1);
 		return -ENOMEM;
+	}
 
 	set_new_dnode(&dn, dir, ipage, NULL, 0);
 	err = f2fs_reserve_block(&dn, 0);
@@ -378,13 +384,21 @@
 		goto out;
 
 	f2fs_wait_on_page_writeback(page, DATA);
-	zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
 
 	dentry_blk = kmap_atomic(page);
 
 	/* copy data from inline dentry block to new dentry block */
 	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
 					INLINE_DENTRY_BITMAP_SIZE);
+	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
+			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
+	/*
+	 * we do not need to zero out remainder part of dentry and filename
+	 * field, since we have used bitmap for marking the usage status of
+	 * them, besides, we can also ignore copying/zeroing reserved space
+	 * of dentry block, because them haven't been used so far.
+	 */
 	memcpy(dentry_blk->dentry, inline_dentry->dentry,
 			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
 	memcpy(dentry_blk->filename, inline_dentry->filename,
@@ -434,8 +448,9 @@
 						slots, NR_INLINE_DENTRY);
 	if (bit_pos >= NR_INLINE_DENTRY) {
 		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
-		if (!err)
-			err = -EAGAIN;
+		if (err)
+			return err;
+		err = -EAGAIN;
 		goto out;
 	}
 
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 2550868..35aae65 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -12,7 +12,6 @@
 #include <linux/f2fs_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
-#include <linux/bitops.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -34,8 +33,8 @@
 		new_fl |= S_NOATIME;
 	if (flags & FS_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	set_mask_bits(&inode->i_flags,
-			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+	inode_set_flags(inode, new_fl,
+			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
 }
 
 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -139,7 +138,7 @@
 	fi->i_pino = le32_to_cpu(ri->i_pino);
 	fi->i_dir_level = ri->i_dir_level;
 
-	f2fs_init_extent_cache(inode, &ri->i_ext);
+	f2fs_init_extent_tree(inode, &ri->i_ext);
 
 	get_inline_info(fi, ri);
 
@@ -155,6 +154,7 @@
 
 	f2fs_put_page(node_page, 1);
 
+	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
 
@@ -237,10 +237,11 @@
 	ri->i_size = cpu_to_le64(i_size_read(inode));
 	ri->i_blocks = cpu_to_le64(inode->i_blocks);
 
-	read_lock(&F2FS_I(inode)->ext_lock);
-	set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
-	read_unlock(&F2FS_I(inode)->ext_lock);
-
+	if (F2FS_I(inode)->extent_tree)
+		set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
+							&ri->i_ext);
+	else
+		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
 	set_raw_inline(F2FS_I(inode), ri);
 
 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@@ -314,7 +315,9 @@
 void f2fs_evict_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	nid_t xnid = fi->i_xattr_nid;
+	int err = 0;
 
 	/* some remained atomic pages should discarded */
 	if (f2fs_is_atomic_file(inode))
@@ -330,41 +333,62 @@
 	f2fs_bug_on(sbi, get_dirty_pages(inode));
 	remove_dirty_dir_inode(inode);
 
+	f2fs_destroy_extent_tree(inode);
+
 	if (inode->i_nlink || is_bad_inode(inode))
 		goto no_delete;
 
 	sb_start_intwrite(inode->i_sb);
-	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
+	set_inode_flag(fi, FI_NO_ALLOC);
 	i_size_write(inode, 0);
 
 	if (F2FS_HAS_BLOCKS(inode))
-		f2fs_truncate(inode);
+		err = f2fs_truncate(inode, true);
 
-	f2fs_lock_op(sbi);
-	remove_inode_page(inode);
-	f2fs_unlock_op(sbi);
+	if (!err) {
+		f2fs_lock_op(sbi);
+		err = remove_inode_page(inode);
+		f2fs_unlock_op(sbi);
+	}
 
 	sb_end_intwrite(inode->i_sb);
 no_delete:
+	stat_dec_inline_xattr(inode);
 	stat_dec_inline_dir(inode);
 	stat_dec_inline_inode(inode);
 
-	/* update extent info in inode */
-	if (inode->i_nlink)
-		f2fs_preserve_extent_tree(inode);
-	f2fs_destroy_extent_tree(inode);
-
 	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
 	if (xnid)
 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
-	if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE))
+	if (is_inode_flag_set(fi, FI_APPEND_WRITE))
 		add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
-	if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE))
+	if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
 		add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
+	if (is_inode_flag_set(fi, FI_FREE_NID)) {
+		if (err && err != -ENOENT)
+			alloc_nid_done(sbi, inode->i_ino);
+		else
+			alloc_nid_failed(sbi, inode->i_ino);
+		clear_inode_flag(fi, FI_FREE_NID);
+	}
+
+	if (err && err != -ENOENT) {
+		if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
+			/*
+			 * get here because we failed to release resource
+			 * of inode previously, reminder our user to run fsck
+			 * for fixing.
+			 */
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"inode (ino:%lu) resource leak, run fsck "
+				"to fix this issue!", inode->i_ino);
+		}
+	}
 out_clear:
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
-	if (F2FS_I(inode)->i_crypt_info)
-		f2fs_free_encryption_info(inode, F2FS_I(inode)->i_crypt_info);
+	if (fi->i_crypt_info)
+		f2fs_free_encryption_info(inode, fi->i_crypt_info);
 #endif
 	clear_inode(inode);
 }
@@ -373,6 +397,7 @@
 void handle_failed_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	int err = 0;
 
 	clear_nlink(inode);
 	make_bad_inode(inode);
@@ -380,13 +405,29 @@
 
 	i_size_write(inode, 0);
 	if (F2FS_HAS_BLOCKS(inode))
-		f2fs_truncate(inode);
+		err = f2fs_truncate(inode, false);
 
-	remove_inode_page(inode);
+	if (!err)
+		err = remove_inode_page(inode);
 
-	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
-	clear_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
-	alloc_nid_failed(sbi, inode->i_ino);
+	/*
+	 * if we skip truncate_node in remove_inode_page bacause we failed
+	 * before, it's better to find another way to release resource of
+	 * this inode (e.g. valid block count, node block or nid). Here we
+	 * choose to add this inode to orphan list, so that we can call iput
+	 * for releasing in orphan recovery flow.
+	 *
+	 * Note: we should add inode to orphan list before f2fs_unlock_op()
+	 * so we can prevent losing this orphan when encoutering checkpoint
+	 * and following suddenly power-off.
+	 */
+	if (err && err != -ENOENT) {
+		err = acquire_orphan_inode(sbi);
+		if (!err)
+			add_orphan_inode(sbi, inode->i_ino);
+	}
+
+	set_inode_flag(F2FS_I(inode), FI_FREE_NID);
 	f2fs_unlock_op(sbi);
 
 	/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index fdbae21..a680bf3 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -53,7 +53,7 @@
 	if (err) {
 		err = -EINVAL;
 		nid_free = true;
-		goto out;
+		goto fail;
 	}
 
 	/* If the directory encrypted, then we should encrypt the inode. */
@@ -65,6 +65,9 @@
 	if (f2fs_may_inline_dentry(inode))
 		set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
 
+	f2fs_init_extent_tree(inode, NULL);
+
+	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
 
@@ -72,15 +75,12 @@
 	mark_inode_dirty(inode);
 	return inode;
 
-out:
-	clear_nlink(inode);
-	unlock_new_inode(inode);
 fail:
 	trace_f2fs_new_inode(inode, err);
 	make_bad_inode(inode);
-	iput(inode);
 	if (nid_free)
-		alloc_nid_failed(sbi, ino);
+		set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+	iput(inode);
 	return ERR_PTR(err);
 }
 
@@ -89,7 +89,14 @@
 	size_t slen = strlen(s);
 	size_t sublen = strlen(sub);
 
-	if (sublen > slen)
+	/*
+	 * filename format of multimedia file should be defined as:
+	 * "filename + '.' + extension".
+	 */
+	if (slen < sublen + 2)
+		return 0;
+
+	if (s[slen - sublen - 1] != '.')
 		return 0;
 
 	return !strncasecmp(s + slen - sublen, sub, sublen);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7dd63b7..27d1a74 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -159,7 +159,7 @@
 
 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
 	if (!head) {
-		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
+		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
 
 		INIT_LIST_HEAD(&head->entry_list);
 		INIT_LIST_HEAD(&head->set_list);
@@ -246,7 +246,7 @@
 {
 	struct nat_entry *new;
 
-	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
+	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
 	f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
 	memset(new, 0, sizeof(struct nat_entry));
 	nat_set_nid(new, nid);
@@ -306,6 +306,10 @@
 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 		unsigned char version = nat_get_version(e);
 		nat_set_version(e, inc_node_version(version));
+
+		/* in order to reuse the nid */
+		if (nm_i->next_scan_nid > ni->nid)
+			nm_i->next_scan_nid = ni->nid;
 	}
 
 	/* change address */
@@ -328,11 +332,11 @@
 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	int nr = nr_shrink;
 
-	if (available_free_memory(sbi, NAT_ENTRIES))
+	if (!down_write_trylock(&nm_i->nat_tree_lock))
 		return 0;
 
-	down_write(&nm_i->nat_tree_lock);
 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
 		struct nat_entry *ne;
 		ne = list_first_entry(&nm_i->nat_entries,
@@ -341,7 +345,7 @@
 		nr_shrink--;
 	}
 	up_write(&nm_i->nat_tree_lock);
-	return nr_shrink;
+	return nr - nr_shrink;
 }
 
 /*
@@ -898,17 +902,20 @@
  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
  */
-void remove_inode_page(struct inode *inode)
+int remove_inode_page(struct inode *inode)
 {
 	struct dnode_of_data dn;
+	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-	if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
-		return;
+	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+	if (err)
+		return err;
 
-	if (truncate_xattr_node(inode, dn.inode_page)) {
+	err = truncate_xattr_node(inode, dn.inode_page);
+	if (err) {
 		f2fs_put_dnode(&dn);
-		return;
+		return err;
 	}
 
 	/* remove potential inline_data blocks */
@@ -922,6 +929,7 @@
 
 	/* will put inode & node pages */
 	truncate_node(&dn);
+	return 0;
 }
 
 struct page *new_inode_page(struct inode *inode)
@@ -991,8 +999,7 @@
 /*
  * Caller should do after getting the following values.
  * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE: f2fs_put_page(page, 1)
- * error: nothing
+ * LOCKED_PAGE or error: f2fs_put_page(page, 1)
  */
 static int read_node_page(struct page *page, int rw)
 {
@@ -1010,7 +1017,6 @@
 
 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
 		ClearPageUptodate(page);
-		f2fs_put_page(page, 1);
 		return -ENOENT;
 	}
 
@@ -1041,10 +1047,7 @@
 		return;
 
 	err = read_node_page(apage, READA);
-	if (err == 0)
-		f2fs_put_page(apage, 0);
-	else if (err == LOCKED_PAGE)
-		f2fs_put_page(apage, 1);
+	f2fs_put_page(apage, err ? 1 : 0);
 }
 
 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1057,10 +1060,12 @@
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err < 0)
+	if (err < 0) {
+		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
-	else if (err != LOCKED_PAGE)
+	} else if (err != LOCKED_PAGE) {
 		lock_page(page);
+	}
 
 	if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
 		ClearPageUptodate(page);
@@ -1096,10 +1101,12 @@
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err < 0)
+	if (err < 0) {
+		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
-	else if (err == LOCKED_PAGE)
+	} else if (err == LOCKED_PAGE) {
 		goto page_hit;
+	}
 
 	blk_start_plug(&plug);
 
@@ -1533,7 +1540,7 @@
 		if (unlikely(nid >= nm_i->max_nid))
 			nid = 0;
 
-		if (i++ == FREE_NID_PAGES)
+		if (++i >= FREE_NID_PAGES)
 			break;
 	}
 
@@ -1570,6 +1577,8 @@
 
 	/* We should not use stale free nids created by build_free_nids */
 	if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
+		struct node_info ni;
+
 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
 		list_for_each_entry(i, &nm_i->free_nid_list, list)
 			if (i->state == NID_NEW)
@@ -1580,6 +1589,13 @@
 		i->state = NID_ALLOC;
 		nm_i->fcnt--;
 		spin_unlock(&nm_i->free_nid_list_lock);
+
+		/* check nid is allocated already */
+		get_node_info(sbi, *nid, &ni);
+		if (ni.blk_addr != NULL_ADDR) {
+			alloc_nid_done(sbi, *nid);
+			goto retry;
+		}
 		return true;
 	}
 	spin_unlock(&nm_i->free_nid_list_lock);
@@ -1636,6 +1652,32 @@
 		kmem_cache_free(free_nid_slab, i);
 }
 
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	struct free_nid *i, *next;
+	int nr = nr_shrink;
+
+	if (!mutex_trylock(&nm_i->build_lock))
+		return 0;
+
+	spin_lock(&nm_i->free_nid_list_lock);
+	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+		if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+			break;
+		if (i->state == NID_ALLOC)
+			continue;
+		__del_from_free_nid_list(nm_i, i);
+		kmem_cache_free(free_nid_slab, i);
+		nm_i->fcnt--;
+		nr_shrink--;
+	}
+	spin_unlock(&nm_i->free_nid_list_lock);
+	mutex_unlock(&nm_i->build_lock);
+
+	return nr - nr_shrink;
+}
+
 void recover_inline_xattr(struct inode *inode, struct page *page)
 {
 	void *src_addr, *dst_addr;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 24a8c1d..faec2ca 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -399,14 +399,35 @@
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
-	for (; start < end; start++) {
+	for (; start < end; start++, dn.ofs_in_node++) {
 		block_t src, dest;
 
 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
 		dest = datablock_addr(page, dn.ofs_in_node);
 
-		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR &&
-			is_valid_blkaddr(sbi, dest, META_POR)) {
+		/* skip recovering if dest is the same as src */
+		if (src == dest)
+			continue;
+
+		/* dest is invalid, just invalidate src block */
+		if (dest == NULL_ADDR) {
+			truncate_data_blocks_range(&dn, 1);
+			continue;
+		}
+
+		/*
+		 * dest is reserved block, invalidate src block
+		 * and then reserve one new block in dnode page.
+		 */
+		if (dest == NEW_ADDR) {
+			truncate_data_blocks_range(&dn, 1);
+			err = reserve_new_block(&dn);
+			f2fs_bug_on(sbi, err);
+			continue;
+		}
+
+		/* dest is valid block, try to recover from src to dest */
+		if (is_valid_blkaddr(sbi, dest, META_POR)) {
 
 			if (src == NULL_ADDR) {
 				err = reserve_new_block(&dn);
@@ -424,7 +445,6 @@
 							ni.version, false);
 			recovered++;
 		}
-		dn.ofs_in_node++;
 	}
 
 	if (IS_INODE(dn.node_page))
@@ -525,14 +545,12 @@
 
 	INIT_LIST_HEAD(&inode_list);
 
-	/* step #1: find fsynced inode numbers */
-	set_sbi_flag(sbi, SBI_POR_DOING);
-
 	/* prevent checkpoint */
 	mutex_lock(&sbi->cp_mutex);
 
 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
+	/* step #1: find fsynced inode numbers */
 	err = find_fsync_dnodes(sbi, &inode_list);
 	if (err)
 		goto out;
@@ -561,11 +579,20 @@
 
 	clear_sbi_flag(sbi, SBI_POR_DOING);
 	if (err) {
-		discard_next_dnode(sbi, blkaddr);
+		bool invalidate = false;
+
+		if (discard_next_dnode(sbi, blkaddr))
+			invalidate = true;
 
 		/* Flush all the NAT/SIT pages */
 		while (get_pages(sbi, F2FS_DIRTY_META))
 			sync_meta_pages(sbi, META, LONG_MAX);
+
+		/* invalidate temporary meta page */
+		if (invalidate)
+			invalidate_mapping_pages(META_MAPPING(sbi),
+							blkaddr, blkaddr);
+
 		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
 		mutex_unlock(&sbi->cp_mutex);
 	} else if (need_writecp) {
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 61b97f9..78e6d06 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -197,28 +197,20 @@
 {
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct inmem_pages *new;
-	int err;
 
-	SetPagePrivate(page);
 	f2fs_trace_pid(page);
 
+	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
+	SetPagePrivate(page);
+
 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
 
 	/* add atomic page indices to the list */
 	new->page = page;
 	INIT_LIST_HEAD(&new->list);
-retry:
+
 	/* increase reference count with clean state */
 	mutex_lock(&fi->inmem_lock);
-	err = radix_tree_insert(&fi->inmem_root, page->index, new);
-	if (err == -EEXIST) {
-		mutex_unlock(&fi->inmem_lock);
-		kmem_cache_free(inmem_entry_slab, new);
-		return;
-	} else if (err) {
-		mutex_unlock(&fi->inmem_lock);
-		goto retry;
-	}
 	get_page(page);
 	list_add_tail(&new->list, &fi->inmem_pages);
 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
@@ -227,7 +219,7 @@
 	trace_f2fs_register_inmem_page(page, INMEM);
 }
 
-void commit_inmem_pages(struct inode *inode, bool abort)
+int commit_inmem_pages(struct inode *inode, bool abort)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -239,6 +231,7 @@
 		.rw = WRITE_SYNC | REQ_PRIO,
 		.encrypted_page = NULL,
 	};
+	int err = 0;
 
 	/*
 	 * The abort is true only when f2fs_evict_inode is called.
@@ -254,8 +247,8 @@
 
 	mutex_lock(&fi->inmem_lock);
 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
+		lock_page(cur->page);
 		if (!abort) {
-			lock_page(cur->page);
 			if (cur->page->mapping == inode->i_mapping) {
 				set_page_dirty(cur->page);
 				f2fs_wait_on_page_writeback(cur->page, DATA);
@@ -263,15 +256,20 @@
 					inode_dec_dirty_pages(inode);
 				trace_f2fs_commit_inmem_page(cur->page, INMEM);
 				fio.page = cur->page;
-				do_write_data_page(&fio);
+				err = do_write_data_page(&fio);
 				submit_bio = true;
+				if (err) {
+					unlock_page(cur->page);
+					break;
+				}
 			}
-			f2fs_put_page(cur->page, 1);
 		} else {
 			trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
-			put_page(cur->page);
 		}
-		radix_tree_delete(&fi->inmem_root, cur->page->index);
+		set_page_private(cur->page, 0);
+		ClearPagePrivate(cur->page);
+		f2fs_put_page(cur->page, 1);
+
 		list_del(&cur->list);
 		kmem_cache_free(inmem_entry_slab, cur);
 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
@@ -283,6 +281,7 @@
 		if (submit_bio)
 			f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	}
+	return err;
 }
 
 /*
@@ -304,10 +303,18 @@
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 {
 	/* try to shrink extent cache when there is no enough memory */
-	f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
+	if (!available_free_memory(sbi, EXTENT_CACHE))
+		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
 
-	/* check the # of cached NAT entries and prefree segments */
-	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+	/* check the # of cached NAT entries */
+	if (!available_free_memory(sbi, NAT_ENTRIES))
+		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+
+	if (!available_free_memory(sbi, FREE_NIDS))
+		try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+
+	/* checkpoint is the only way to shrink partial cached entries */
+	if (!available_free_memory(sbi, NAT_ENTRIES) ||
 			excess_prefree_segs(sbi) ||
 			!available_free_memory(sbi, INO_ENTRIES))
 		f2fs_sync_fs(sbi->sb, true);
@@ -323,10 +330,12 @@
 		return 0;
 
 	if (!llist_empty(&fcc->issue_list)) {
-		struct bio *bio = bio_alloc(GFP_NOIO, 0);
+		struct bio *bio;
 		struct flush_cmd *cmd, *next;
 		int ret;
 
+		bio = f2fs_bio_alloc(0);
+
 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
@@ -358,8 +367,15 @@
 	if (test_opt(sbi, NOBARRIER))
 		return 0;
 
-	if (!test_opt(sbi, FLUSH_MERGE))
-		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
+	if (!test_opt(sbi, FLUSH_MERGE)) {
+		struct bio *bio = f2fs_bio_alloc(0);
+		int ret;
+
+		bio->bi_bdev = sbi->sb->s_bdev;
+		ret = submit_bio_wait(WRITE_FLUSH, bio);
+		bio_put(bio);
+		return ret;
+	}
 
 	init_completion(&cmd.wait);
 
@@ -503,7 +519,7 @@
 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
 }
 
-void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
+bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
 {
 	int err = -ENOTSUPP;
 
@@ -513,13 +529,16 @@
 		unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 
 		if (f2fs_test_bit(offset, se->discard_map))
-			return;
+			return false;
 
 		err = f2fs_issue_discard(sbi, blkaddr, 1);
 	}
 
-	if (err)
+	if (err) {
 		update_meta_page(sbi, NULL, blkaddr);
+		return true;
+	}
+	return false;
 }
 
 static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -1218,7 +1237,8 @@
 	mutex_lock(&sit_i->sentry_lock);
 
 	/* direct_io'ed data is aligned to the segment for better performance */
-	if (direct_io && curseg->next_blkoff)
+	if (direct_io && curseg->next_blkoff &&
+				!has_not_enough_free_secs(sbi, 0))
 		__allocate_new_segments(sbi, type);
 
 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
@@ -1733,7 +1753,7 @@
 static struct sit_entry_set *grab_sit_entry_set(void)
 {
 	struct sit_entry_set *ses =
-			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
+			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
 
 	ses->entry_cnt = 0;
 	INIT_LIST_HEAD(&ses->set_list);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 79e7b87..b6e4ed1 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -177,6 +177,15 @@
 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
 };
 
+/*
+ * this value is set in page as a private data which indicate that
+ * the page is atomically written, and it is in inmem_pages list.
+ */
+#define ATOMIC_WRITTEN_PAGE		0x0000ffff
+
+#define IS_ATOMIC_WRITTEN_PAGE(page)			\
+		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+
 struct inmem_pages {
 	struct list_head list;
 	struct page *page;
@@ -555,16 +564,15 @@
 	return curseg->next_blkoff;
 }
 
-#ifdef CONFIG_F2FS_CHECK_FS
 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-	BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
+	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
 {
-	BUG_ON(blk_addr < SEG0_BLKADDR(sbi));
-	BUG_ON(blk_addr >= MAX_BLKADDR(sbi));
+	f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
+					|| blk_addr >= MAX_BLKADDR(sbi));
 }
 
 /*
@@ -573,16 +581,11 @@
 static inline void check_block_count(struct f2fs_sb_info *sbi,
 		int segno, struct f2fs_sit_entry *raw_sit)
 {
+#ifdef CONFIG_F2FS_CHECK_FS
 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
 	int valid_blocks = 0;
 	int cur_pos = 0, next_pos;
 
-	/* check segment usage */
-	BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
-
-	/* check boundary of a given segment number */
-	BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
-
 	/* check bitmap with valid block count */
 	do {
 		if (is_valid) {
@@ -598,35 +601,11 @@
 		is_valid = !is_valid;
 	} while (cur_pos < sbi->blocks_per_seg);
 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
-}
-#else
-static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
-{
-	if (segno > TOTAL_SEGS(sbi) - 1)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
-
-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
-{
-	if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
-
-/*
- * Summary block is always treated as an invalid block
- */
-static inline void check_block_count(struct f2fs_sb_info *sbi,
-		int segno, struct f2fs_sit_entry *raw_sit)
-{
-	/* check segment usage */
-	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-
-	/* check boundary of a given segment number */
-	if (segno > TOTAL_SEGS(sbi) - 1)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
 #endif
+	/* check segment usage, and check boundary of a given segment number */
+	f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+					|| segno > TOTAL_SEGS(sbi) - 1);
+}
 
 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
 						unsigned int start)
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
new file mode 100644
index 0000000..da0d8e0
--- /dev/null
+++ b/fs/f2fs/shrinker.c
@@ -0,0 +1,139 @@
+/*
+ * f2fs shrinker support
+ *   the basic infra was copied from fs/ubifs/shrinker.c
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+
+static LIST_HEAD(f2fs_list);
+static DEFINE_SPINLOCK(f2fs_list_lock);
+static unsigned int shrinker_run_no;
+
+static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
+{
+	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+}
+
+static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+{
+	if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
+		return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
+	return 0;
+}
+
+static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
+{
+	return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+}
+
+unsigned long f2fs_shrink_count(struct shrinker *shrink,
+				struct shrink_control *sc)
+{
+	struct f2fs_sb_info *sbi;
+	struct list_head *p;
+	unsigned long count = 0;
+
+	spin_lock(&f2fs_list_lock);
+	p = f2fs_list.next;
+	while (p != &f2fs_list) {
+		sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+		/* stop f2fs_put_super */
+		if (!mutex_trylock(&sbi->umount_mutex)) {
+			p = p->next;
+			continue;
+		}
+		spin_unlock(&f2fs_list_lock);
+
+		/* count extent cache entries */
+		count += __count_extent_cache(sbi);
+
+		/* shrink clean nat cache entries */
+		count += __count_nat_entries(sbi);
+
+		/* count free nids cache entries */
+		count += __count_free_nids(sbi);
+
+		spin_lock(&f2fs_list_lock);
+		p = p->next;
+		mutex_unlock(&sbi->umount_mutex);
+	}
+	spin_unlock(&f2fs_list_lock);
+	return count;
+}
+
+unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+				struct shrink_control *sc)
+{
+	unsigned long nr = sc->nr_to_scan;
+	struct f2fs_sb_info *sbi;
+	struct list_head *p;
+	unsigned int run_no;
+	unsigned long freed = 0;
+
+	spin_lock(&f2fs_list_lock);
+	do {
+		run_no = ++shrinker_run_no;
+	} while (run_no == 0);
+	p = f2fs_list.next;
+	while (p != &f2fs_list) {
+		sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+		if (sbi->shrinker_run_no == run_no)
+			break;
+
+		/* stop f2fs_put_super */
+		if (!mutex_trylock(&sbi->umount_mutex)) {
+			p = p->next;
+			continue;
+		}
+		spin_unlock(&f2fs_list_lock);
+
+		sbi->shrinker_run_no = run_no;
+
+		/* shrink extent cache entries */
+		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
+
+		/* shrink clean nat cache entries */
+		if (freed < nr)
+			freed += try_to_free_nats(sbi, nr - freed);
+
+		/* shrink free nids cache entries */
+		if (freed < nr)
+			freed += try_to_free_nids(sbi, nr - freed);
+
+		spin_lock(&f2fs_list_lock);
+		p = p->next;
+		list_move_tail(&sbi->s_list, &f2fs_list);
+		mutex_unlock(&sbi->umount_mutex);
+		if (freed >= nr)
+			break;
+	}
+	spin_unlock(&f2fs_list_lock);
+	return freed;
+}
+
+void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
+{
+	spin_lock(&f2fs_list_lock);
+	list_add_tail(&sbi->s_list, &f2fs_list);
+	spin_unlock(&f2fs_list_lock);
+}
+
+void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+{
+	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
+
+	spin_lock(&f2fs_list_lock);
+	list_del(&sbi->s_list);
+	spin_unlock(&f2fs_list_lock);
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a06b0b4..f794781 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -39,6 +39,13 @@
 static struct kmem_cache *f2fs_inode_cachep;
 static struct kset *f2fs_kset;
 
+/* f2fs-wide shrinker description */
+static struct shrinker f2fs_shrinker_info = {
+	.scan_objects = f2fs_shrink_scan,
+	.count_objects = f2fs_shrink_count,
+	.seeks = DEFAULT_SEEKS,
+};
+
 enum {
 	Opt_gc_background,
 	Opt_disable_roll_forward,
@@ -58,6 +65,7 @@
 	Opt_nobarrier,
 	Opt_fastboot,
 	Opt_extent_cache,
+	Opt_noextent_cache,
 	Opt_noinline_data,
 	Opt_err,
 };
@@ -81,6 +89,7 @@
 	{Opt_nobarrier, "nobarrier"},
 	{Opt_fastboot, "fastboot"},
 	{Opt_extent_cache, "extent_cache"},
+	{Opt_noextent_cache, "noextent_cache"},
 	{Opt_noinline_data, "noinline_data"},
 	{Opt_err, NULL},
 };
@@ -382,6 +391,9 @@
 		case Opt_extent_cache:
 			set_opt(sbi, EXTENT_CACHE);
 			break;
+		case Opt_noextent_cache:
+			clear_opt(sbi, EXTENT_CACHE);
+			break;
 		case Opt_noinline_data:
 			clear_opt(sbi, INLINE_DATA);
 			break;
@@ -410,9 +422,7 @@
 	atomic_set(&fi->dirty_pages, 0);
 	fi->i_current_depth = 1;
 	fi->i_advise = 0;
-	rwlock_init(&fi->ext_lock);
 	init_rwsem(&fi->i_sem);
-	INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
 	INIT_LIST_HEAD(&fi->inmem_pages);
 	mutex_init(&fi->inmem_lock);
 
@@ -441,17 +451,22 @@
 	 */
 	if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
 		if (!inode->i_nlink && !is_bad_inode(inode)) {
+			/* to avoid evict_inode call simultaneously */
+			atomic_inc(&inode->i_count);
 			spin_unlock(&inode->i_lock);
 
 			/* some remained atomic pages should discarded */
 			if (f2fs_is_atomic_file(inode))
 				commit_inmem_pages(inode, true);
 
+			/* should remain fi->extent_tree for writepage */
+			f2fs_destroy_extent_node(inode);
+
 			sb_start_intwrite(inode->i_sb);
 			i_size_write(inode, 0);
 
 			if (F2FS_HAS_BLOCKS(inode))
-				f2fs_truncate(inode);
+				f2fs_truncate(inode, true);
 
 			sb_end_intwrite(inode->i_sb);
 
@@ -461,6 +476,7 @@
 					F2FS_I(inode)->i_crypt_info);
 #endif
 			spin_lock(&inode->i_lock);
+			atomic_dec(&inode->i_count);
 		}
 		return 0;
 	}
@@ -498,9 +514,11 @@
 	}
 	kobject_del(&sbi->s_kobj);
 
-	f2fs_destroy_stats(sbi);
 	stop_gc_thread(sbi);
 
+	/* prevent remaining shrinker jobs */
+	mutex_lock(&sbi->umount_mutex);
+
 	/*
 	 * We don't need to do checkpoint when superblock is clean.
 	 * But, the previous checkpoint was not done by umount, it needs to do
@@ -514,6 +532,9 @@
 		write_checkpoint(sbi, &cpc);
 	}
 
+	/* write_checkpoint can update stat informaion */
+	f2fs_destroy_stats(sbi);
+
 	/*
 	 * normally superblock is clean, so we need to release this.
 	 * In addition, EIO will skip do checkpoint, we need this as well.
@@ -521,6 +542,9 @@
 	release_dirty_inode(sbi);
 	release_discard_addrs(sbi);
 
+	f2fs_leave_shrinker(sbi);
+	mutex_unlock(&sbi->umount_mutex);
+
 	iput(sbi->node_inode);
 	iput(sbi->meta_inode);
 
@@ -647,6 +671,8 @@
 		seq_puts(seq, ",fastboot");
 	if (test_opt(sbi, EXTENT_CACHE))
 		seq_puts(seq, ",extent_cache");
+	else
+		seq_puts(seq, ",noextent_cache");
 	seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 
 	return 0;
@@ -667,7 +693,7 @@
 		struct seg_entry *se = get_seg_entry(sbi, i);
 
 		if ((i % 10) == 0)
-			seq_printf(seq, "%-5d", i);
+			seq_printf(seq, "%-10d", i);
 		seq_printf(seq, "%d|%-3u", se->type,
 					get_valid_blocks(sbi, i, 1));
 		if ((i % 10) == 9 || i == (total_segs - 1))
@@ -699,6 +725,7 @@
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_DATA);
+	set_opt(sbi, EXTENT_CACHE);
 
 #ifdef CONFIG_F2FS_FS_XATTR
 	set_opt(sbi, XATTR_USER);
@@ -970,6 +997,9 @@
 
 	sbi->dir_level = DEF_DIR_LEVEL;
 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
+
+	INIT_LIST_HEAD(&sbi->s_list);
+	mutex_init(&sbi->umount_mutex);
 }
 
 /*
@@ -1135,7 +1165,9 @@
 	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
 	init_rwsem(&sbi->node_write);
-	clear_sbi_flag(sbi, SBI_POR_DOING);
+
+	/* disallow all the data/node/meta page writes */
+	set_sbi_flag(sbi, SBI_POR_DOING);
 	spin_lock_init(&sbi->stat_lock);
 
 	init_rwsem(&sbi->read_io.io_rwsem);
@@ -1212,8 +1244,12 @@
 		goto free_nm;
 	}
 
+	f2fs_join_shrinker(sbi);
+
 	/* if there are nt orphan nodes free them */
-	recover_orphan_inodes(sbi);
+	err = recover_orphan_inodes(sbi);
+	if (err)
+		goto free_node_inode;
 
 	/* read root inode and dentry */
 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
@@ -1275,6 +1311,8 @@
 			goto free_kobj;
 		}
 	}
+	/* recover_fsync_data() cleared this already */
+	clear_sbi_flag(sbi, SBI_POR_DOING);
 
 	/*
 	 * If filesystem is not mounted as read-only then
@@ -1308,7 +1346,10 @@
 	dput(sb->s_root);
 	sb->s_root = NULL;
 free_node_inode:
+	mutex_lock(&sbi->umount_mutex);
+	f2fs_leave_shrinker(sbi);
 	iput(sbi->node_inode);
+	mutex_unlock(&sbi->umount_mutex);
 free_nm:
 	destroy_node_manager(sbi);
 free_sm:
@@ -1404,13 +1445,20 @@
 	err = f2fs_init_crypto();
 	if (err)
 		goto free_kset;
-	err = register_filesystem(&f2fs_fs_type);
+
+	err = register_shrinker(&f2fs_shrinker_info);
 	if (err)
 		goto free_crypto;
+
+	err = register_filesystem(&f2fs_fs_type);
+	if (err)
+		goto free_shrinker;
 	f2fs_create_root_stats();
 	f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
 	return 0;
 
+free_shrinker:
+	unregister_shrinker(&f2fs_shrinker_info);
 free_crypto:
 	f2fs_exit_crypto();
 free_kset:
@@ -1433,6 +1481,7 @@
 {
 	remove_proc_entry("fs/f2fs", NULL);
 	f2fs_destroy_root_stats();
+	unregister_shrinker(&f2fs_shrinker_info);
 	unregister_filesystem(&f2fs_fs_type);
 	f2fs_exit_crypto();
 	destroy_extent_cache();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 07449b98..4de2286 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -499,9 +499,12 @@
 
 	len = strlen(name);
 
-	if (len > F2FS_NAME_LEN || size > MAX_VALUE_LEN(inode))
+	if (len > F2FS_NAME_LEN)
 		return -ERANGE;
 
+	if (size > MAX_VALUE_LEN(inode))
+		return -E2BIG;
+
 	base_addr = read_all_xattrs(inode, ipage);
 	if (!base_addr)
 		goto exit;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2982445..894fb01 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1334,11 +1334,11 @@
 	if (is_ancestor(root, sdp->sd_master_dir))
 		seq_puts(s, ",meta");
 	if (args->ar_lockproto[0])
-		seq_printf(s, ",lockproto=%s", args->ar_lockproto);
+		seq_show_option(s, "lockproto", args->ar_lockproto);
 	if (args->ar_locktable[0])
-		seq_printf(s, ",locktable=%s", args->ar_locktable);
+		seq_show_option(s, "locktable", args->ar_locktable);
 	if (args->ar_hostdata[0])
-		seq_printf(s, ",hostdata=%s", args->ar_hostdata);
+		seq_show_option(s, "hostdata", args->ar_hostdata);
 	if (args->ar_spectator)
 		seq_puts(s, ",spectator");
 	if (args->ar_localflocks)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 55c03b9..4574fdd 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -136,9 +136,9 @@
 	struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
 
 	if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
-		seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
+		seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4);
 	if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
-		seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
+		seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4);
 	seq_printf(seq, ",uid=%u,gid=%u",
 			from_kuid_munged(&init_user_ns, sbi->s_uid),
 			from_kgid_munged(&init_user_ns, sbi->s_gid));
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index c90b72e..bb806e5 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -218,9 +218,9 @@
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
 
 	if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
-		seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
+		seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4);
 	if (sbi->type != HFSPLUS_DEF_CR_TYPE)
-		seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
+		seq_show_option_n(seq, "type", (char *)&sbi->type, 4);
 	seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
 			from_kuid_munged(&init_user_ns, sbi->uid),
 			from_kgid_munged(&init_user_ns, sbi->gid));
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 059597b..2ac99db 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -260,7 +260,7 @@
 	size_t offset = strlen(root_ino) + 1;
 
 	if (strlen(root_path) > offset)
-		seq_printf(seq, ",%s", root_path + offset);
+		seq_show_option(seq, root_path + offset, NULL);
 
 	if (append)
 		seq_puts(seq, ",append");
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 8057fe4..f626114 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -10,6 +10,30 @@
 #include <linux/blkdev.h>
 #include "hpfs_fn.h"
 
+secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
+{
+	unsigned i;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
+		if (sbi->hotfix_from[i] == sec) {
+			return sbi->hotfix_to[i];
+		}
+	}
+	return sec;
+}
+
+unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
+{
+	unsigned i;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
+		if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
+			n = sbi->hotfix_from[i] - sec;
+		}
+	}
+	return n;
+}
+
 void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
 {
 	struct buffer_head *bh;
@@ -18,6 +42,9 @@
 	if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
 		return;
 
+	if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
+		return;
+
 	bh = sb_find_get_block(s, secno);
 	if (bh) {
 		if (buffer_uptodate(bh)) {
@@ -51,7 +78,7 @@
 
 	cond_resched();
 
-	*bhp = bh = sb_bread(s, secno);
+	*bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
 	if (bh != NULL)
 		return bh->b_data;
 	else {
@@ -71,7 +98,7 @@
 
 	cond_resched();
 
-	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
+	if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
 		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
 		set_buffer_uptodate(bh);
 		return bh->b_data;
@@ -99,10 +126,10 @@
 
 	hpfs_prefetch_sectors(s, secno, 4 + ahead);
 
-	if (!(qbh->bh[0] = sb_bread(s, secno + 0))) goto bail0;
-	if (!(qbh->bh[1] = sb_bread(s, secno + 1))) goto bail1;
-	if (!(qbh->bh[2] = sb_bread(s, secno + 2))) goto bail2;
-	if (!(qbh->bh[3] = sb_bread(s, secno + 3))) goto bail3;
+	if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
+	if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
+	if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
+	if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
 
 	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
 	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 7ca28d6..d3bcdd9 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -83,6 +83,11 @@
 	if (s) {
 		if (bh_result->b_size >> 9 < n_secs)
 			n_secs = bh_result->b_size >> 9;
+		n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
+		if (unlikely(!n_secs)) {
+			s = hpfs_search_hotfix_map(inode->i_sb, s);
+			n_secs = 1;
+		}
 		map_bh(bh_result, inode->i_sb, s);
 		bh_result->b_size = n_secs << 9;
 		goto ret_0;
@@ -101,7 +106,7 @@
 	inode->i_blocks++;
 	hpfs_i(inode)->mmu_private += 512;
 	set_buffer_new(bh_result);
-	map_bh(bh_result, inode->i_sb, s);
+	map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
 	ret_0:
 	r = 0;
 	ret_r:
@@ -181,7 +186,7 @@
 
 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 {
-	return generic_block_bmap(mapping,block,hpfs_get_block);
+	return generic_block_bmap(mapping, block, hpfs_get_block);
 }
 
 const struct address_space_operations hpfs_aops = {
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c4867b5..975654a 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -88,6 +88,10 @@
 	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
 	int sb_timeshift;
 	struct rcu_head rcu;
+
+	unsigned n_hotfixes;
+	secno hotfix_from[256];
+	secno hotfix_to[256];
 };
 
 /* Four 512-byte buffers and the 2k block obtained by concatenating them */
@@ -217,6 +221,8 @@
 
 /* buffer.c */
 
+secno hpfs_search_hotfix_map(struct super_block *s, secno sec);
+unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n);
 void hpfs_prefetch_sectors(struct super_block *, unsigned, int);
 void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int);
 void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **);
@@ -285,6 +291,7 @@
 void hpfs_prefetch_bitmap(struct super_block *, unsigned);
 unsigned char *hpfs_load_code_page(struct super_block *, secno);
 __le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
 struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 442770e..a69bbc1 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -130,6 +130,32 @@
 	return b;
 }
 
+void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock)
+{
+	struct quad_buffer_head qbh;
+	u32 *directory;
+	u32 n_hotfixes, n_used_hotfixes;
+	unsigned i;
+
+	n_hotfixes = le32_to_cpu(spareblock->n_spares);
+	n_used_hotfixes = le32_to_cpu(spareblock->n_spares_used);
+
+	if (n_hotfixes > 256 || n_used_hotfixes > n_hotfixes) {
+		hpfs_error(s, "invalid number of hotfixes: %u, used: %u", n_hotfixes, n_used_hotfixes);
+		return;
+	}
+	if (!(directory = hpfs_map_4sectors(s, le32_to_cpu(spareblock->hotfix_map), &qbh, 0))) {
+		hpfs_error(s, "can't load hotfix map");
+		return;
+	}
+	for (i = 0; i < n_used_hotfixes; i++) {
+		hpfs_sb(s)->hotfix_from[i] = le32_to_cpu(directory[i]);
+		hpfs_sb(s)->hotfix_to[i] = le32_to_cpu(directory[n_hotfixes + i]);
+	}
+	hpfs_sb(s)->n_hotfixes = n_used_hotfixes;
+	hpfs_brelse4(&qbh);
+}
+
 /*
  * Load fnode to memory
  */
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index a0872f2..9e92c9c 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -8,6 +8,17 @@
 #include <linux/sched.h>
 #include "hpfs_fn.h"
 
+static void hpfs_update_directory_times(struct inode *dir)
+{
+	time_t t = get_seconds();
+	if (t == dir->i_mtime.tv_sec &&
+	    t == dir->i_ctime.tv_sec)
+		return;
+	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
+	dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
+	hpfs_write_inode_nolock(dir);
+}
+
 static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	const unsigned char *name = dentry->d_name.name;
@@ -99,6 +110,7 @@
 		result->i_mode = mode | S_IFDIR;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -187,6 +199,7 @@
 		result->i_mode = mode | S_IFREG;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -262,6 +275,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	brelse(bh);
 	hpfs_unlock(dir->i_sb);
@@ -340,6 +354,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -423,6 +438,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -477,6 +494,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -595,7 +614,7 @@
 		goto end1;
 	}
 
-	end:
+end:
 	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
 	if (S_ISDIR(i->i_mode)) {
 		inc_nlink(new_dir);
@@ -610,6 +629,10 @@
 		brelse(bh);
 	}
 end1:
+	if (!err) {
+		hpfs_update_directory_times(old_dir);
+		hpfs_update_directory_times(new_dir);
+	}
 	hpfs_unlock(i->i_sb);
 	return err;
 }
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 68a9bed..a561591 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -628,6 +628,9 @@
 		goto bail4;
 	}
 
+	if (spareblock->n_spares_used)
+		hpfs_load_hotfix_map(s, spareblock);
+
 	/* Load bitmap directory */
 	if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
 		goto bail4;
@@ -647,18 +650,6 @@
 		mark_buffer_dirty(bh2);
 	}
 
-	if (spareblock->hotfixes_used || spareblock->n_spares_used) {
-		if (errs >= 2) {
-			pr_err("Hotfixes not supported here, try chkdsk\n");
-			mark_dirty(s, 0);
-			goto bail4;
-		}
-		hpfs_error(s, "hotfixes not supported here, try chkdsk");
-		if (errs == 0)
-			pr_err("Proceeding, but your filesystem will be probably corrupted by this driver...\n");
-		else
-			pr_err("This driver may read bad files or crash when operating on disk with hotfixes.\n");
-	}
 	if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
 		if (errs >= 2) {
 			pr_err("Spare dnodes used, try chkdsk\n");
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig
deleted file mode 100644
index 4e28bee..0000000
--- a/fs/jbd/Kconfig
+++ /dev/null
@@ -1,30 +0,0 @@
-config JBD
-	tristate
-	help
-	  This is a generic journalling layer for block devices.  It is
-	  currently used by the ext3 file system, but it could also be
-	  used to add journal support to other file systems or block
-	  devices such as RAID or LVM.
-
-	  If you are using the ext3 file system, you need to say Y here.
-	  If you are not using ext3 then you will probably want to say N.
-
-	  To compile this device as a module, choose M here: the module will be
-	  called jbd.  If you are compiling ext3 into the kernel, you
-	  cannot compile this code as a module.
-
-config JBD_DEBUG
-	bool "JBD (ext3) debugging support"
-	depends on JBD && DEBUG_FS
-	help
-	  If you are using the ext3 journaled file system (or potentially any
-	  other file system/device using JBD), this option allows you to
-	  enable debugging output while the system is running, in order to
-	  help track down any problems you are having.  By default the
-	  debugging output will be turned off.
-
-	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /sys/kernel/debug/jbd/jbd-debug", where N is a
-	  number between 1 and 5, the higher the number, the more debugging
-	  output is generated.  To turn debugging off again, do
-	  "echo 0 > /sys/kernel/debug/jbd/jbd-debug".
diff --git a/fs/jbd/Makefile b/fs/jbd/Makefile
deleted file mode 100644
index 54aca48..0000000
--- a/fs/jbd/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux journaling routines.
-#
-
-obj-$(CONFIG_JBD) += jbd.o
-
-jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
deleted file mode 100644
index 08c0304..0000000
--- a/fs/jbd/checkpoint.c
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * linux/fs/jbd/checkpoint.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1999 Red Hat Software --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Checkpoint routines for the generic filesystem journaling code.
- * Part of the ext2fs journaling system.
- *
- * Checkpointing is the process of ensuring that a section of the log is
- * committed fully to disk, so that that portion of the log can be
- * reused.
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <trace/events/jbd.h>
-
-/*
- * Unlink a buffer from a transaction checkpoint list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink_first(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	jh->b_cpnext->b_cpprev = jh->b_cpprev;
-	jh->b_cpprev->b_cpnext = jh->b_cpnext;
-	if (transaction->t_checkpoint_list == jh) {
-		transaction->t_checkpoint_list = jh->b_cpnext;
-		if (transaction->t_checkpoint_list == jh)
-			transaction->t_checkpoint_list = NULL;
-	}
-}
-
-/*
- * Unlink a buffer from a transaction checkpoint(io) list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	__buffer_unlink_first(jh);
-	if (transaction->t_checkpoint_io_list == jh) {
-		transaction->t_checkpoint_io_list = jh->b_cpnext;
-		if (transaction->t_checkpoint_io_list == jh)
-			transaction->t_checkpoint_io_list = NULL;
-	}
-}
-
-/*
- * Move a buffer from the checkpoint list to the checkpoint io list
- *
- * Called with j_list_lock held
- */
-static inline void __buffer_relink_io(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	__buffer_unlink_first(jh);
-
-	if (!transaction->t_checkpoint_io_list) {
-		jh->b_cpnext = jh->b_cpprev = jh;
-	} else {
-		jh->b_cpnext = transaction->t_checkpoint_io_list;
-		jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
-		jh->b_cpprev->b_cpnext = jh;
-		jh->b_cpnext->b_cpprev = jh;
-	}
-	transaction->t_checkpoint_io_list = jh;
-}
-
-/*
- * Try to release a checkpointed buffer from its transaction.
- * Returns 1 if we released it and 2 if we also released the
- * whole transaction.
- *
- * Requires j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
- */
-static int __try_to_free_cp_buf(struct journal_head *jh)
-{
-	int ret = 0;
-	struct buffer_head *bh = jh2bh(jh);
-
-	if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
-	    !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
-		/*
-		 * Get our reference so that bh cannot be freed before
-		 * we unlock it
-		 */
-		get_bh(bh);
-		JBUFFER_TRACE(jh, "remove from checkpoint list");
-		ret = __journal_remove_checkpoint(jh) + 1;
-		jbd_unlock_bh_state(bh);
-		BUFFER_TRACE(bh, "release");
-		__brelse(bh);
-	} else {
-		jbd_unlock_bh_state(bh);
-	}
-	return ret;
-}
-
-/*
- * __log_wait_for_space: wait until there is space in the journal.
- *
- * Called under j-state_lock *only*.  It will be unlocked if we have to wait
- * for a checkpoint to free up some space in the log.
- */
-void __log_wait_for_space(journal_t *journal)
-{
-	int nblocks, space_left;
-	assert_spin_locked(&journal->j_state_lock);
-
-	nblocks = jbd_space_needed(journal);
-	while (__log_space_left(journal) < nblocks) {
-		if (journal->j_flags & JFS_ABORT)
-			return;
-		spin_unlock(&journal->j_state_lock);
-		mutex_lock(&journal->j_checkpoint_mutex);
-
-		/*
-		 * Test again, another process may have checkpointed while we
-		 * were waiting for the checkpoint lock. If there are no
-		 * transactions ready to be checkpointed, try to recover
-		 * journal space by calling cleanup_journal_tail(), and if
-		 * that doesn't work, by waiting for the currently committing
-		 * transaction to complete.  If there is absolutely no way
-		 * to make progress, this is either a BUG or corrupted
-		 * filesystem, so abort the journal and leave a stack
-		 * trace for forensic evidence.
-		 */
-		spin_lock(&journal->j_state_lock);
-		spin_lock(&journal->j_list_lock);
-		nblocks = jbd_space_needed(journal);
-		space_left = __log_space_left(journal);
-		if (space_left < nblocks) {
-			int chkpt = journal->j_checkpoint_transactions != NULL;
-			tid_t tid = 0;
-
-			if (journal->j_committing_transaction)
-				tid = journal->j_committing_transaction->t_tid;
-			spin_unlock(&journal->j_list_lock);
-			spin_unlock(&journal->j_state_lock);
-			if (chkpt) {
-				log_do_checkpoint(journal);
-			} else if (cleanup_journal_tail(journal) == 0) {
-				/* We were able to recover space; yay! */
-				;
-			} else if (tid) {
-				log_wait_commit(journal, tid);
-			} else {
-				printk(KERN_ERR "%s: needed %d blocks and "
-				       "only had %d space available\n",
-				       __func__, nblocks, space_left);
-				printk(KERN_ERR "%s: no way to get more "
-				       "journal space\n", __func__);
-				WARN_ON(1);
-				journal_abort(journal, 0);
-			}
-			spin_lock(&journal->j_state_lock);
-		} else {
-			spin_unlock(&journal->j_list_lock);
-		}
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-}
-
-/*
- * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
- * The caller must restart a list walk.  Wait for someone else to run
- * jbd_unlock_bh_state().
- */
-static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
-	__releases(journal->j_list_lock)
-{
-	get_bh(bh);
-	spin_unlock(&journal->j_list_lock);
-	jbd_lock_bh_state(bh);
-	jbd_unlock_bh_state(bh);
-	put_bh(bh);
-}
-
-/*
- * Clean up transaction's list of buffers submitted for io.
- * We wait for any pending IO to complete and remove any clean
- * buffers. Note that we take the buffers in the opposite ordering
- * from the one in which they were submitted for IO.
- *
- * Return 0 on success, and return <0 if some buffers have failed
- * to be written out.
- *
- * Called with j_list_lock held.
- */
-static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	tid_t this_tid;
-	int released = 0;
-	int ret = 0;
-
-	this_tid = transaction->t_tid;
-restart:
-	/* Did somebody clean up the transaction in the meanwhile? */
-	if (journal->j_checkpoint_transactions != transaction ||
-			transaction->t_tid != this_tid)
-		return ret;
-	while (!released && transaction->t_checkpoint_io_list) {
-		jh = transaction->t_checkpoint_io_list;
-		bh = jh2bh(jh);
-		if (!jbd_trylock_bh_state(bh)) {
-			jbd_sync_bh(journal, bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			jbd_unlock_bh_state(bh);
-			wait_on_buffer(bh);
-			/* the journal_head may have gone by now */
-			BUFFER_TRACE(bh, "brelse");
-			__brelse(bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-
-		/*
-		 * Now in whatever state the buffer currently is, we know that
-		 * it has been written out and so we can drop it from the list
-		 */
-		released = __journal_remove_checkpoint(jh);
-		jbd_unlock_bh_state(bh);
-		__brelse(bh);
-	}
-
-	return ret;
-}
-
-#define NR_BATCH	64
-
-static void
-__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
-{
-	int i;
-	struct blk_plug plug;
-
-	blk_start_plug(&plug);
-	for (i = 0; i < *batch_count; i++)
-		write_dirty_buffer(bhs[i], WRITE_SYNC);
-	blk_finish_plug(&plug);
-
-	for (i = 0; i < *batch_count; i++) {
-		struct buffer_head *bh = bhs[i];
-		clear_buffer_jwrite(bh);
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-	}
-	*batch_count = 0;
-}
-
-/*
- * Try to flush one buffer from the checkpoint list to disk.
- *
- * Return 1 if something happened which requires us to abort the current
- * scan of the checkpoint list.  Return <0 if the buffer has failed to
- * be written out.
- *
- * Called with j_list_lock held and drops it if 1 is returned
- * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
- */
-static int __process_buffer(journal_t *journal, struct journal_head *jh,
-			struct buffer_head **bhs, int *batch_count)
-{
-	struct buffer_head *bh = jh2bh(jh);
-	int ret = 0;
-
-	if (buffer_locked(bh)) {
-		get_bh(bh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		wait_on_buffer(bh);
-		/* the journal_head may have gone by now */
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-		ret = 1;
-	} else if (jh->b_transaction != NULL) {
-		transaction_t *t = jh->b_transaction;
-		tid_t tid = t->t_tid;
-
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		log_start_commit(journal, tid);
-		log_wait_commit(journal, tid);
-		ret = 1;
-	} else if (!buffer_dirty(bh)) {
-		ret = 1;
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-		get_bh(bh);
-		J_ASSERT_JH(jh, !buffer_jbddirty(bh));
-		BUFFER_TRACE(bh, "remove from checkpoint");
-		__journal_remove_checkpoint(jh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		__brelse(bh);
-	} else {
-		/*
-		 * Important: we are about to write the buffer, and
-		 * possibly block, while still holding the journal lock.
-		 * We cannot afford to let the transaction logic start
-		 * messing around with this buffer before we write it to
-		 * disk, as that would break recoverability.
-		 */
-		BUFFER_TRACE(bh, "queue");
-		get_bh(bh);
-		J_ASSERT_BH(bh, !buffer_jwrite(bh));
-		set_buffer_jwrite(bh);
-		bhs[*batch_count] = bh;
-		__buffer_relink_io(jh);
-		jbd_unlock_bh_state(bh);
-		(*batch_count)++;
-		if (*batch_count == NR_BATCH) {
-			spin_unlock(&journal->j_list_lock);
-			__flush_batch(journal, bhs, batch_count);
-			ret = 1;
-		}
-	}
-	return ret;
-}
-
-/*
- * Perform an actual checkpoint. We take the first transaction on the
- * list of transactions to be checkpointed and send all its buffers
- * to disk. We submit larger chunks of data at once.
- *
- * The journal should be locked before calling this function.
- * Called with j_checkpoint_mutex held.
- */
-int log_do_checkpoint(journal_t *journal)
-{
-	transaction_t *transaction;
-	tid_t this_tid;
-	int result;
-
-	jbd_debug(1, "Start checkpoint\n");
-
-	/*
-	 * First thing: if there are any transactions in the log which
-	 * don't need checkpointing, just eliminate them from the
-	 * journal straight away.
-	 */
-	result = cleanup_journal_tail(journal);
-	trace_jbd_checkpoint(journal, result);
-	jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
-	if (result <= 0)
-		return result;
-
-	/*
-	 * OK, we need to start writing disk blocks.  Take one transaction
-	 * and write it.
-	 */
-	result = 0;
-	spin_lock(&journal->j_list_lock);
-	if (!journal->j_checkpoint_transactions)
-		goto out;
-	transaction = journal->j_checkpoint_transactions;
-	this_tid = transaction->t_tid;
-restart:
-	/*
-	 * If someone cleaned up this transaction while we slept, we're
-	 * done (maybe it's a new transaction, but it fell at the same
-	 * address).
-	 */
-	if (journal->j_checkpoint_transactions == transaction &&
-			transaction->t_tid == this_tid) {
-		int batch_count = 0;
-		struct buffer_head *bhs[NR_BATCH];
-		struct journal_head *jh;
-		int retry = 0, err;
-
-		while (!retry && transaction->t_checkpoint_list) {
-			struct buffer_head *bh;
-
-			jh = transaction->t_checkpoint_list;
-			bh = jh2bh(jh);
-			if (!jbd_trylock_bh_state(bh)) {
-				jbd_sync_bh(journal, bh);
-				retry = 1;
-				break;
-			}
-			retry = __process_buffer(journal, jh, bhs,&batch_count);
-			if (retry < 0 && !result)
-				result = retry;
-			if (!retry && (need_resched() ||
-				spin_needbreak(&journal->j_list_lock))) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-				break;
-			}
-		}
-
-		if (batch_count) {
-			if (!retry) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-			}
-			__flush_batch(journal, bhs, &batch_count);
-		}
-
-		if (retry) {
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		/*
-		 * Now we have cleaned up the first transaction's checkpoint
-		 * list. Let's clean up the second one
-		 */
-		err = __wait_cp_io(journal, transaction);
-		if (!result)
-			result = err;
-	}
-out:
-	spin_unlock(&journal->j_list_lock);
-	if (result < 0)
-		journal_abort(journal, result);
-	else
-		result = cleanup_journal_tail(journal);
-
-	return (result < 0) ? result : 0;
-}
-
-/*
- * Check the list of checkpoint transactions for the journal to see if
- * we have already got rid of any since the last update of the log tail
- * in the journal superblock.  If so, we can instantly roll the
- * superblock forward to remove those transactions from the log.
- *
- * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
- *
- * This is the only part of the journaling code which really needs to be
- * aware of transaction aborts.  Checkpointing involves writing to the
- * main filesystem area rather than to the journal, so it can proceed
- * even in abort state, but we must not update the super block if
- * checkpointing may have failed.  Otherwise, we would lose some metadata
- * buffers which should be written-back to the filesystem.
- */
-
-int cleanup_journal_tail(journal_t *journal)
-{
-	transaction_t * transaction;
-	tid_t		first_tid;
-	unsigned int	blocknr, freed;
-
-	if (is_journal_aborted(journal))
-		return 1;
-
-	/*
-	 * OK, work out the oldest transaction remaining in the log, and
-	 * the log block it starts at.
-	 *
-	 * If the log is now empty, we need to work out which is the
-	 * next transaction ID we will write, and where it will
-	 * start.
-	 */
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&journal->j_list_lock);
-	transaction = journal->j_checkpoint_transactions;
-	if (transaction) {
-		first_tid = transaction->t_tid;
-		blocknr = transaction->t_log_start;
-	} else if ((transaction = journal->j_committing_transaction) != NULL) {
-		first_tid = transaction->t_tid;
-		blocknr = transaction->t_log_start;
-	} else if ((transaction = journal->j_running_transaction) != NULL) {
-		first_tid = transaction->t_tid;
-		blocknr = journal->j_head;
-	} else {
-		first_tid = journal->j_transaction_sequence;
-		blocknr = journal->j_head;
-	}
-	spin_unlock(&journal->j_list_lock);
-	J_ASSERT(blocknr != 0);
-
-	/* If the oldest pinned transaction is at the tail of the log
-           already then there's not much we can do right now. */
-	if (journal->j_tail_sequence == first_tid) {
-		spin_unlock(&journal->j_state_lock);
-		return 1;
-	}
-	spin_unlock(&journal->j_state_lock);
-
-	/*
-	 * We need to make sure that any blocks that were recently written out
-	 * --- perhaps by log_do_checkpoint() --- are flushed out before we
-	 * drop the transactions from the journal. Similarly we need to be sure
-	 * superblock makes it to disk before next transaction starts reusing
-	 * freed space (otherwise we could replay some blocks of the new
-	 * transaction thinking they belong to the old one). So we use
-	 * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
-	 * with an appropriately sized journal, but we need this to guarantee
-	 * correctness.  Fortunately cleanup_journal_tail() doesn't get called
-	 * all that often.
-	 */
-	journal_update_sb_log_tail(journal, first_tid, blocknr,
-				   WRITE_FLUSH_FUA);
-
-	spin_lock(&journal->j_state_lock);
-	/* OK, update the superblock to recover the freed space.
-	 * Physical blocks come first: have we wrapped beyond the end of
-	 * the log?  */
-	freed = blocknr - journal->j_tail;
-	if (blocknr < journal->j_tail)
-		freed = freed + journal->j_last - journal->j_first;
-
-	trace_jbd_cleanup_journal_tail(journal, first_tid, blocknr, freed);
-	jbd_debug(1,
-		  "Cleaning journal tail from %d to %d (offset %u), "
-		  "freeing %u\n",
-		  journal->j_tail_sequence, first_tid, blocknr, freed);
-
-	journal->j_free += freed;
-	journal->j_tail_sequence = first_tid;
-	journal->j_tail = blocknr;
-	spin_unlock(&journal->j_state_lock);
-	return 0;
-}
-
-
-/* Checkpoint list management */
-
-/*
- * journal_clean_one_cp_list
- *
- * Find all the written-back checkpoint buffers in the given list and release
- * them.
- *
- * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
- */
-
-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
-{
-	struct journal_head *last_jh;
-	struct journal_head *next_jh = jh;
-	int ret, freed = 0;
-
-	*released = 0;
-	if (!jh)
-		return 0;
-
-	last_jh = jh->b_cpprev;
-	do {
-		jh = next_jh;
-		next_jh = jh->b_cpnext;
-		/* Use trylock because of the ranking */
-		if (jbd_trylock_bh_state(jh2bh(jh))) {
-			ret = __try_to_free_cp_buf(jh);
-			if (ret) {
-				freed++;
-				if (ret == 2) {
-					*released = 1;
-					return freed;
-				}
-			}
-		}
-		/*
-		 * This function only frees up some memory
-		 * if possible so we dont have an obligation
-		 * to finish processing. Bail out if preemption
-		 * requested:
-		 */
-		if (need_resched())
-			return freed;
-	} while (jh != last_jh);
-
-	return freed;
-}
-
-/*
- * journal_clean_checkpoint_list
- *
- * Find all the written-back checkpoint buffers in the journal and release them.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
- */
-
-int __journal_clean_checkpoint_list(journal_t *journal)
-{
-	transaction_t *transaction, *last_transaction, *next_transaction;
-	int ret = 0;
-	int released;
-
-	transaction = journal->j_checkpoint_transactions;
-	if (!transaction)
-		goto out;
-
-	last_transaction = transaction->t_cpprev;
-	next_transaction = transaction;
-	do {
-		transaction = next_transaction;
-		next_transaction = transaction->t_cpnext;
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_list, &released);
-		/*
-		 * This function only frees up some memory if possible so we
-		 * dont have an obligation to finish processing. Bail out if
-		 * preemption requested:
-		 */
-		if (need_resched())
-			goto out;
-		if (released)
-			continue;
-		/*
-		 * It is essential that we are as careful as in the case of
-		 * t_checkpoint_list with removing the buffer from the list as
-		 * we can possibly see not yet submitted buffers on io_list
-		 */
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list, &released);
-		if (need_resched())
-			goto out;
-	} while (transaction != last_transaction);
-out:
-	return ret;
-}
-
-/*
- * journal_remove_checkpoint: called after a buffer has been committed
- * to disk (either by being write-back flushed to disk, or being
- * committed to the log).
- *
- * We cannot safely clean a transaction out of the log until all of the
- * buffer updates committed in that transaction have safely been stored
- * elsewhere on disk.  To achieve this, all of the buffers in a
- * transaction need to be maintained on the transaction's checkpoint
- * lists until they have been rewritten, at which point this function is
- * called to remove the buffer from the existing transaction's
- * checkpoint lists.
- *
- * The function returns 1 if it frees the transaction, 0 otherwise.
- * The function can free jh and bh.
- *
- * This function is called with j_list_lock held.
- * This function is called with jbd_lock_bh_state(jh2bh(jh))
- */
-
-int __journal_remove_checkpoint(struct journal_head *jh)
-{
-	transaction_t *transaction;
-	journal_t *journal;
-	int ret = 0;
-
-	JBUFFER_TRACE(jh, "entry");
-
-	if ((transaction = jh->b_cp_transaction) == NULL) {
-		JBUFFER_TRACE(jh, "not on transaction");
-		goto out;
-	}
-	journal = transaction->t_journal;
-
-	JBUFFER_TRACE(jh, "removing from transaction");
-	__buffer_unlink(jh);
-	jh->b_cp_transaction = NULL;
-	journal_put_journal_head(jh);
-
-	if (transaction->t_checkpoint_list != NULL ||
-	    transaction->t_checkpoint_io_list != NULL)
-		goto out;
-
-	/*
-	 * There is one special case to worry about: if we have just pulled the
-	 * buffer off a running or committing transaction's checkpoing list,
-	 * then even if the checkpoint list is empty, the transaction obviously
-	 * cannot be dropped!
-	 *
-	 * The locking here around t_state is a bit sleazy.
-	 * See the comment at the end of journal_commit_transaction().
-	 */
-	if (transaction->t_state != T_FINISHED)
-		goto out;
-
-	/* OK, that was the last buffer for the transaction: we can now
-	   safely remove this transaction from the log */
-
-	__journal_drop_transaction(journal, transaction);
-
-	/* Just in case anybody was waiting for more transactions to be
-           checkpointed... */
-	wake_up(&journal->j_wait_logspace);
-	ret = 1;
-out:
-	return ret;
-}
-
-/*
- * journal_insert_checkpoint: put a committed buffer onto a checkpoint
- * list so that we know when it is safe to clean the transaction out of
- * the log.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- */
-void __journal_insert_checkpoint(struct journal_head *jh,
-			       transaction_t *transaction)
-{
-	JBUFFER_TRACE(jh, "entry");
-	J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
-	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
-
-	/* Get reference for checkpointing transaction */
-	journal_grab_journal_head(jh2bh(jh));
-	jh->b_cp_transaction = transaction;
-
-	if (!transaction->t_checkpoint_list) {
-		jh->b_cpnext = jh->b_cpprev = jh;
-	} else {
-		jh->b_cpnext = transaction->t_checkpoint_list;
-		jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
-		jh->b_cpprev->b_cpnext = jh;
-		jh->b_cpnext->b_cpprev = jh;
-	}
-	transaction->t_checkpoint_list = jh;
-}
-
-/*
- * We've finished with this transaction structure: adios...
- *
- * The transaction must have no links except for the checkpoint by this
- * point.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- */
-
-void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
-{
-	assert_spin_locked(&journal->j_list_lock);
-	if (transaction->t_cpnext) {
-		transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
-		transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
-		if (journal->j_checkpoint_transactions == transaction)
-			journal->j_checkpoint_transactions =
-				transaction->t_cpnext;
-		if (journal->j_checkpoint_transactions == transaction)
-			journal->j_checkpoint_transactions = NULL;
-	}
-
-	J_ASSERT(transaction->t_state == T_FINISHED);
-	J_ASSERT(transaction->t_buffers == NULL);
-	J_ASSERT(transaction->t_sync_datalist == NULL);
-	J_ASSERT(transaction->t_forget == NULL);
-	J_ASSERT(transaction->t_iobuf_list == NULL);
-	J_ASSERT(transaction->t_shadow_list == NULL);
-	J_ASSERT(transaction->t_log_list == NULL);
-	J_ASSERT(transaction->t_checkpoint_list == NULL);
-	J_ASSERT(transaction->t_checkpoint_io_list == NULL);
-	J_ASSERT(transaction->t_updates == 0);
-	J_ASSERT(journal->j_committing_transaction != transaction);
-	J_ASSERT(journal->j_running_transaction != transaction);
-
-	trace_jbd_drop_transaction(journal, transaction);
-	jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
-	kfree(transaction);
-}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
deleted file mode 100644
index bb217dc..0000000
--- a/fs/jbd/commit.c
+++ /dev/null
@@ -1,1021 +0,0 @@
-/*
- * linux/fs/jbd/commit.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal commit routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <trace/events/jbd.h>
-
-/*
- * Default IO end handler for temporary BJ_IO buffer_heads.
- */
-static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
-{
-	BUFFER_TRACE(bh, "");
-	if (uptodate)
-		set_buffer_uptodate(bh);
-	else
-		clear_buffer_uptodate(bh);
-	unlock_buffer(bh);
-}
-
-/*
- * When an ext3-ordered file is truncated, it is possible that many pages are
- * not successfully freed, because they are attached to a committing transaction.
- * After the transaction commits, these pages are left on the LRU, with no
- * ->mapping, and with attached buffers.  These pages are trivially reclaimable
- * by the VM, but their apparent absence upsets the VM accounting, and it makes
- * the numbers in /proc/meminfo look odd.
- *
- * So here, we have a buffer which has just come off the forget list.  Look to
- * see if we can strip all buffers from the backing page.
- *
- * Called under journal->j_list_lock.  The caller provided us with a ref
- * against the buffer, and we drop that here.
- */
-static void release_buffer_page(struct buffer_head *bh)
-{
-	struct page *page;
-
-	if (buffer_dirty(bh))
-		goto nope;
-	if (atomic_read(&bh->b_count) != 1)
-		goto nope;
-	page = bh->b_page;
-	if (!page)
-		goto nope;
-	if (page->mapping)
-		goto nope;
-
-	/* OK, it's a truncated page */
-	if (!trylock_page(page))
-		goto nope;
-
-	page_cache_get(page);
-	__brelse(bh);
-	try_to_free_buffers(page);
-	unlock_page(page);
-	page_cache_release(page);
-	return;
-
-nope:
-	__brelse(bh);
-}
-
-/*
- * Decrement reference counter for data buffer. If it has been marked
- * 'BH_Freed', release it and the page to which it belongs if possible.
- */
-static void release_data_buffer(struct buffer_head *bh)
-{
-	if (buffer_freed(bh)) {
-		WARN_ON_ONCE(buffer_dirty(bh));
-		clear_buffer_freed(bh);
-		clear_buffer_mapped(bh);
-		clear_buffer_new(bh);
-		clear_buffer_req(bh);
-		bh->b_bdev = NULL;
-		release_buffer_page(bh);
-	} else
-		put_bh(bh);
-}
-
-/*
- * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
- * held.  For ranking reasons we must trylock.  If we lose, schedule away and
- * return 0.  j_list_lock is dropped in this case.
- */
-static int inverted_lock(journal_t *journal, struct buffer_head *bh)
-{
-	if (!jbd_trylock_bh_state(bh)) {
-		spin_unlock(&journal->j_list_lock);
-		schedule();
-		return 0;
-	}
-	return 1;
-}
-
-/* Done it all: now write the commit record.  We should have
- * cleaned up our previous buffers by now, so if we are in abort
- * mode we can now just skip the rest of the journal write
- * entirely.
- *
- * Returns 1 if the journal needs to be aborted or 0 on success
- */
-static int journal_write_commit_record(journal_t *journal,
-					transaction_t *commit_transaction)
-{
-	struct journal_head *descriptor;
-	struct buffer_head *bh;
-	journal_header_t *header;
-	int ret;
-
-	if (is_journal_aborted(journal))
-		return 0;
-
-	descriptor = journal_get_descriptor_buffer(journal);
-	if (!descriptor)
-		return 1;
-
-	bh = jh2bh(descriptor);
-
-	header = (journal_header_t *)(bh->b_data);
-	header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
-	header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
-	header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
-
-	JBUFFER_TRACE(descriptor, "write commit block");
-	set_buffer_dirty(bh);
-
-	if (journal->j_flags & JFS_BARRIER)
-		ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
-	else
-		ret = sync_dirty_buffer(bh);
-
-	put_bh(bh);		/* One for getblk() */
-	journal_put_journal_head(descriptor);
-
-	return (ret == -EIO);
-}
-
-static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
-				   int write_op)
-{
-	int i;
-
-	for (i = 0; i < bufs; i++) {
-		wbuf[i]->b_end_io = end_buffer_write_sync;
-		/*
-		 * Here we write back pagecache data that may be mmaped. Since
-		 * we cannot afford to clean the page and set PageWriteback
-		 * here due to lock ordering (page lock ranks above transaction
-		 * start), the data can change while IO is in flight. Tell the
-		 * block layer it should bounce the bio pages if stable data
-		 * during write is required.
-		 *
-		 * We use up our safety reference in submit_bh().
-		 */
-		_submit_bh(write_op, wbuf[i], 1 << BIO_SNAP_STABLE);
-	}
-}
-
-/*
- *  Submit all the data buffers to disk
- */
-static int journal_submit_data_buffers(journal_t *journal,
-				       transaction_t *commit_transaction,
-				       int write_op)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	int locked;
-	int bufs = 0;
-	struct buffer_head **wbuf = journal->j_wbuf;
-	int err = 0;
-
-	/*
-	 * Whenever we unlock the journal and sleep, things can get added
-	 * onto ->t_sync_datalist, so we have to keep looping back to
-	 * write_out_data until we *know* that the list is empty.
-	 *
-	 * Cleanup any flushed data buffers from the data list.  Even in
-	 * abort mode, we want to flush this out as soon as possible.
-	 */
-write_out_data:
-	cond_resched();
-	spin_lock(&journal->j_list_lock);
-
-	while (commit_transaction->t_sync_datalist) {
-		jh = commit_transaction->t_sync_datalist;
-		bh = jh2bh(jh);
-		locked = 0;
-
-		/* Get reference just to make sure buffer does not disappear
-		 * when we are forced to drop various locks */
-		get_bh(bh);
-		/* If the buffer is dirty, we need to submit IO and hence
-		 * we need the buffer lock. We try to lock the buffer without
-		 * blocking. If we fail, we need to drop j_list_lock and do
-		 * blocking lock_buffer().
-		 */
-		if (buffer_dirty(bh)) {
-			if (!trylock_buffer(bh)) {
-				BUFFER_TRACE(bh, "needs blocking lock");
-				spin_unlock(&journal->j_list_lock);
-				trace_jbd_do_submit_data(journal,
-						     commit_transaction);
-				/* Write out all data to prevent deadlocks */
-				journal_do_submit_data(wbuf, bufs, write_op);
-				bufs = 0;
-				lock_buffer(bh);
-				spin_lock(&journal->j_list_lock);
-			}
-			locked = 1;
-		}
-		/* We have to get bh_state lock. Again out of order, sigh. */
-		if (!inverted_lock(journal, bh)) {
-			jbd_lock_bh_state(bh);
-			spin_lock(&journal->j_list_lock);
-		}
-		/* Someone already cleaned up the buffer? */
-		if (!buffer_jbd(bh) || bh2jh(bh) != jh
-			|| jh->b_transaction != commit_transaction
-			|| jh->b_jlist != BJ_SyncData) {
-			jbd_unlock_bh_state(bh);
-			if (locked)
-				unlock_buffer(bh);
-			BUFFER_TRACE(bh, "already cleaned up");
-			release_data_buffer(bh);
-			continue;
-		}
-		if (locked && test_clear_buffer_dirty(bh)) {
-			BUFFER_TRACE(bh, "needs writeout, adding to array");
-			wbuf[bufs++] = bh;
-			__journal_file_buffer(jh, commit_transaction,
-						BJ_Locked);
-			jbd_unlock_bh_state(bh);
-			if (bufs == journal->j_wbufsize) {
-				spin_unlock(&journal->j_list_lock);
-				trace_jbd_do_submit_data(journal,
-						     commit_transaction);
-				journal_do_submit_data(wbuf, bufs, write_op);
-				bufs = 0;
-				goto write_out_data;
-			}
-		} else if (!locked && buffer_locked(bh)) {
-			__journal_file_buffer(jh, commit_transaction,
-						BJ_Locked);
-			jbd_unlock_bh_state(bh);
-			put_bh(bh);
-		} else {
-			BUFFER_TRACE(bh, "writeout complete: unfile");
-			if (unlikely(!buffer_uptodate(bh)))
-				err = -EIO;
-			__journal_unfile_buffer(jh);
-			jbd_unlock_bh_state(bh);
-			if (locked)
-				unlock_buffer(bh);
-			release_data_buffer(bh);
-		}
-
-		if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
-			spin_unlock(&journal->j_list_lock);
-			goto write_out_data;
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-	trace_jbd_do_submit_data(journal, commit_transaction);
-	journal_do_submit_data(wbuf, bufs, write_op);
-
-	return err;
-}
-
-/*
- * journal_commit_transaction
- *
- * The primary function for committing a transaction to the log.  This
- * function is called by the journal thread to begin a complete commit.
- */
-void journal_commit_transaction(journal_t *journal)
-{
-	transaction_t *commit_transaction;
-	struct journal_head *jh, *new_jh, *descriptor;
-	struct buffer_head **wbuf = journal->j_wbuf;
-	int bufs;
-	int flags;
-	int err;
-	unsigned int blocknr;
-	ktime_t start_time;
-	u64 commit_time;
-	char *tagp = NULL;
-	journal_header_t *header;
-	journal_block_tag_t *tag = NULL;
-	int space_left = 0;
-	int first_tag = 0;
-	int tag_flag;
-	int i;
-	struct blk_plug plug;
-	int write_op = WRITE;
-
-	/*
-	 * First job: lock down the current transaction and wait for
-	 * all outstanding updates to complete.
-	 */
-
-	/* Do we need to erase the effects of a prior journal_flush? */
-	if (journal->j_flags & JFS_FLUSHED) {
-		jbd_debug(3, "super block updated\n");
-		mutex_lock(&journal->j_checkpoint_mutex);
-		/*
-		 * We hold j_checkpoint_mutex so tail cannot change under us.
-		 * We don't need any special data guarantees for writing sb
-		 * since journal is empty and it is ok for write to be
-		 * flushed only with transaction commit.
-		 */
-		journal_update_sb_log_tail(journal, journal->j_tail_sequence,
-					   journal->j_tail, WRITE_SYNC);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	} else {
-		jbd_debug(3, "superblock not updated\n");
-	}
-
-	J_ASSERT(journal->j_running_transaction != NULL);
-	J_ASSERT(journal->j_committing_transaction == NULL);
-
-	commit_transaction = journal->j_running_transaction;
-
-	trace_jbd_start_commit(journal, commit_transaction);
-	jbd_debug(1, "JBD: starting commit of transaction %d\n",
-			commit_transaction->t_tid);
-
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(commit_transaction->t_state == T_RUNNING);
-	commit_transaction->t_state = T_LOCKED;
-
-	trace_jbd_commit_locking(journal, commit_transaction);
-	spin_lock(&commit_transaction->t_handle_lock);
-	while (commit_transaction->t_updates) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_updates, &wait,
-					TASK_UNINTERRUPTIBLE);
-		if (commit_transaction->t_updates) {
-			spin_unlock(&commit_transaction->t_handle_lock);
-			spin_unlock(&journal->j_state_lock);
-			schedule();
-			spin_lock(&journal->j_state_lock);
-			spin_lock(&commit_transaction->t_handle_lock);
-		}
-		finish_wait(&journal->j_wait_updates, &wait);
-	}
-	spin_unlock(&commit_transaction->t_handle_lock);
-
-	J_ASSERT (commit_transaction->t_outstanding_credits <=
-			journal->j_max_transaction_buffers);
-
-	/*
-	 * First thing we are allowed to do is to discard any remaining
-	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
-	 * that there are no such buffers: if a large filesystem
-	 * operation like a truncate needs to split itself over multiple
-	 * transactions, then it may try to do a journal_restart() while
-	 * there are still BJ_Reserved buffers outstanding.  These must
-	 * be released cleanly from the current transaction.
-	 *
-	 * In this case, the filesystem must still reserve write access
-	 * again before modifying the buffer in the new transaction, but
-	 * we do not require it to remember exactly which old buffers it
-	 * has reserved.  This is consistent with the existing behaviour
-	 * that multiple journal_get_write_access() calls to the same
-	 * buffer are perfectly permissible.
-	 */
-	while (commit_transaction->t_reserved_list) {
-		jh = commit_transaction->t_reserved_list;
-		JBUFFER_TRACE(jh, "reserved, unused: refile");
-		/*
-		 * A journal_get_undo_access()+journal_release_buffer() may
-		 * leave undo-committed data.
-		 */
-		if (jh->b_committed_data) {
-			struct buffer_head *bh = jh2bh(jh);
-
-			jbd_lock_bh_state(bh);
-			jbd_free(jh->b_committed_data, bh->b_size);
-			jh->b_committed_data = NULL;
-			jbd_unlock_bh_state(bh);
-		}
-		journal_refile_buffer(journal, jh);
-	}
-
-	/*
-	 * Now try to drop any written-back buffers from the journal's
-	 * checkpoint lists.  We do this *before* commit because it potentially
-	 * frees some memory
-	 */
-	spin_lock(&journal->j_list_lock);
-	__journal_clean_checkpoint_list(journal);
-	spin_unlock(&journal->j_list_lock);
-
-	jbd_debug (3, "JBD: commit phase 1\n");
-
-	/*
-	 * Clear revoked flag to reflect there is no revoked buffers
-	 * in the next transaction which is going to be started.
-	 */
-	journal_clear_buffer_revoked_flags(journal);
-
-	/*
-	 * Switch to a new revoke table.
-	 */
-	journal_switch_revoke_table(journal);
-
-	trace_jbd_commit_flushing(journal, commit_transaction);
-	commit_transaction->t_state = T_FLUSH;
-	journal->j_committing_transaction = commit_transaction;
-	journal->j_running_transaction = NULL;
-	start_time = ktime_get();
-	commit_transaction->t_log_start = journal->j_head;
-	wake_up(&journal->j_wait_transaction_locked);
-	spin_unlock(&journal->j_state_lock);
-
-	jbd_debug (3, "JBD: commit phase 2\n");
-
-	if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
-		write_op = WRITE_SYNC;
-
-	/*
-	 * Now start flushing things to disk, in the order they appear
-	 * on the transaction lists.  Data blocks go first.
-	 */
-	blk_start_plug(&plug);
-	err = journal_submit_data_buffers(journal, commit_transaction,
-					  write_op);
-	blk_finish_plug(&plug);
-
-	/*
-	 * Wait for all previously submitted IO to complete.
-	 */
-	spin_lock(&journal->j_list_lock);
-	while (commit_transaction->t_locked_list) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_locked_list->b_tprev;
-		bh = jh2bh(jh);
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			wait_on_buffer(bh);
-			spin_lock(&journal->j_list_lock);
-		}
-		if (unlikely(!buffer_uptodate(bh))) {
-			if (!trylock_page(bh->b_page)) {
-				spin_unlock(&journal->j_list_lock);
-				lock_page(bh->b_page);
-				spin_lock(&journal->j_list_lock);
-			}
-			if (bh->b_page->mapping)
-				set_bit(AS_EIO, &bh->b_page->mapping->flags);
-
-			unlock_page(bh->b_page);
-			SetPageError(bh->b_page);
-			err = -EIO;
-		}
-		if (!inverted_lock(journal, bh)) {
-			put_bh(bh);
-			spin_lock(&journal->j_list_lock);
-			continue;
-		}
-		if (buffer_jbd(bh) && bh2jh(bh) == jh &&
-		    jh->b_transaction == commit_transaction &&
-		    jh->b_jlist == BJ_Locked)
-			__journal_unfile_buffer(jh);
-		jbd_unlock_bh_state(bh);
-		release_data_buffer(bh);
-		cond_resched_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	if (err) {
-		char b[BDEVNAME_SIZE];
-
-		printk(KERN_WARNING
-			"JBD: Detected IO errors while flushing file data "
-			"on %s\n", bdevname(journal->j_fs_dev, b));
-		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
-			journal_abort(journal, err);
-		err = 0;
-	}
-
-	blk_start_plug(&plug);
-
-	journal_write_revoke_records(journal, commit_transaction, write_op);
-
-	/*
-	 * If we found any dirty or locked buffers, then we should have
-	 * looped back up to the write_out_data label.  If there weren't
-	 * any then journal_clean_data_list should have wiped the list
-	 * clean by now, so check that it is in fact empty.
-	 */
-	J_ASSERT (commit_transaction->t_sync_datalist == NULL);
-
-	jbd_debug (3, "JBD: commit phase 3\n");
-
-	/*
-	 * Way to go: we have now written out all of the data for a
-	 * transaction!  Now comes the tricky part: we need to write out
-	 * metadata.  Loop over the transaction's entire buffer list:
-	 */
-	spin_lock(&journal->j_state_lock);
-	commit_transaction->t_state = T_COMMIT;
-	spin_unlock(&journal->j_state_lock);
-
-	trace_jbd_commit_logging(journal, commit_transaction);
-	J_ASSERT(commit_transaction->t_nr_buffers <=
-		 commit_transaction->t_outstanding_credits);
-
-	descriptor = NULL;
-	bufs = 0;
-	while (commit_transaction->t_buffers) {
-
-		/* Find the next buffer to be journaled... */
-
-		jh = commit_transaction->t_buffers;
-
-		/* If we're in abort mode, we just un-journal the buffer and
-		   release it. */
-
-		if (is_journal_aborted(journal)) {
-			clear_buffer_jbddirty(jh2bh(jh));
-			JBUFFER_TRACE(jh, "journal is aborting: refile");
-			journal_refile_buffer(journal, jh);
-			/* If that was the last one, we need to clean up
-			 * any descriptor buffers which may have been
-			 * already allocated, even if we are now
-			 * aborting. */
-			if (!commit_transaction->t_buffers)
-				goto start_journal_io;
-			continue;
-		}
-
-		/* Make sure we have a descriptor block in which to
-		   record the metadata buffer. */
-
-		if (!descriptor) {
-			struct buffer_head *bh;
-
-			J_ASSERT (bufs == 0);
-
-			jbd_debug(4, "JBD: get descriptor\n");
-
-			descriptor = journal_get_descriptor_buffer(journal);
-			if (!descriptor) {
-				journal_abort(journal, -EIO);
-				continue;
-			}
-
-			bh = jh2bh(descriptor);
-			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
-				(unsigned long long)bh->b_blocknr, bh->b_data);
-			header = (journal_header_t *)&bh->b_data[0];
-			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
-			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
-
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			space_left = bh->b_size - sizeof(journal_header_t);
-			first_tag = 1;
-			set_buffer_jwrite(bh);
-			set_buffer_dirty(bh);
-			wbuf[bufs++] = bh;
-
-			/* Record it so that we can wait for IO
-                           completion later */
-			BUFFER_TRACE(bh, "ph3: file as descriptor");
-			journal_file_buffer(descriptor, commit_transaction,
-					BJ_LogCtl);
-		}
-
-		/* Where is the buffer to be written? */
-
-		err = journal_next_log_block(journal, &blocknr);
-		/* If the block mapping failed, just abandon the buffer
-		   and repeat this loop: we'll fall into the
-		   refile-on-abort condition above. */
-		if (err) {
-			journal_abort(journal, err);
-			continue;
-		}
-
-		/*
-		 * start_this_handle() uses t_outstanding_credits to determine
-		 * the free space in the log, but this counter is changed
-		 * by journal_next_log_block() also.
-		 */
-		commit_transaction->t_outstanding_credits--;
-
-		/* Bump b_count to prevent truncate from stumbling over
-                   the shadowed buffer!  @@@ This can go if we ever get
-                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
-		get_bh(jh2bh(jh));
-
-		/* Make a temporary IO buffer with which to write it out
-                   (this will requeue both the metadata buffer and the
-                   temporary IO buffer). new_bh goes on BJ_IO*/
-
-		set_buffer_jwrite(jh2bh(jh));
-		/*
-		 * akpm: journal_write_metadata_buffer() sets
-		 * new_bh->b_transaction to commit_transaction.
-		 * We need to clean this up before we release new_bh
-		 * (which is of type BJ_IO)
-		 */
-		JBUFFER_TRACE(jh, "ph3: write metadata");
-		flags = journal_write_metadata_buffer(commit_transaction,
-						      jh, &new_jh, blocknr);
-		set_buffer_jwrite(jh2bh(new_jh));
-		wbuf[bufs++] = jh2bh(new_jh);
-
-		/* Record the new block's tag in the current descriptor
-                   buffer */
-
-		tag_flag = 0;
-		if (flags & 1)
-			tag_flag |= JFS_FLAG_ESCAPE;
-		if (!first_tag)
-			tag_flag |= JFS_FLAG_SAME_UUID;
-
-		tag = (journal_block_tag_t *) tagp;
-		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
-		tag->t_flags = cpu_to_be32(tag_flag);
-		tagp += sizeof(journal_block_tag_t);
-		space_left -= sizeof(journal_block_tag_t);
-
-		if (first_tag) {
-			memcpy (tagp, journal->j_uuid, 16);
-			tagp += 16;
-			space_left -= 16;
-			first_tag = 0;
-		}
-
-		/* If there's no more to do, or if the descriptor is full,
-		   let the IO rip! */
-
-		if (bufs == journal->j_wbufsize ||
-		    commit_transaction->t_buffers == NULL ||
-		    space_left < sizeof(journal_block_tag_t) + 16) {
-
-			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
-
-			/* Write an end-of-descriptor marker before
-                           submitting the IOs.  "tag" still points to
-                           the last tag we set up. */
-
-			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
-
-start_journal_io:
-			for (i = 0; i < bufs; i++) {
-				struct buffer_head *bh = wbuf[i];
-				lock_buffer(bh);
-				clear_buffer_dirty(bh);
-				set_buffer_uptodate(bh);
-				bh->b_end_io = journal_end_buffer_io_sync;
-				/*
-				 * In data=journal mode, here we can end up
-				 * writing pagecache data that might be
-				 * mmapped. Since we can't afford to clean the
-				 * page and set PageWriteback (see the comment
-				 * near the other use of _submit_bh()), the
-				 * data can change while the write is in
-				 * flight.  Tell the block layer to bounce the
-				 * bio pages if stable pages are required.
-				 */
-				_submit_bh(write_op, bh, 1 << BIO_SNAP_STABLE);
-			}
-			cond_resched();
-
-			/* Force a new descriptor to be generated next
-                           time round the loop. */
-			descriptor = NULL;
-			bufs = 0;
-		}
-	}
-
-	blk_finish_plug(&plug);
-
-	/* Lo and behold: we have just managed to send a transaction to
-           the log.  Before we can commit it, wait for the IO so far to
-           complete.  Control buffers being written are on the
-           transaction's t_log_list queue, and metadata buffers are on
-           the t_iobuf_list queue.
-
-	   Wait for the buffers in reverse order.  That way we are
-	   less likely to be woken up until all IOs have completed, and
-	   so we incur less scheduling load.
-	*/
-
-	jbd_debug(3, "JBD: commit phase 4\n");
-
-	/*
-	 * akpm: these are BJ_IO, and j_list_lock is not needed.
-	 * See __journal_try_to_free_buffer.
-	 */
-wait_for_iobuf:
-	while (commit_transaction->t_iobuf_list != NULL) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_iobuf_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_iobuf;
-		}
-		if (cond_resched())
-			goto wait_for_iobuf;
-
-		if (unlikely(!buffer_uptodate(bh)))
-			err = -EIO;
-
-		clear_buffer_jwrite(bh);
-
-		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
-		journal_unfile_buffer(journal, jh);
-
-		/*
-		 * ->t_iobuf_list should contain only dummy buffer_heads
-		 * which were created by journal_write_metadata_buffer().
-		 */
-		BUFFER_TRACE(bh, "dumping temporary bh");
-		journal_put_journal_head(jh);
-		__brelse(bh);
-		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
-		free_buffer_head(bh);
-
-		/* We also have to unlock and free the corresponding
-                   shadowed buffer */
-		jh = commit_transaction->t_shadow_list->b_tprev;
-		bh = jh2bh(jh);
-		clear_buffer_jwrite(bh);
-		J_ASSERT_BH(bh, buffer_jbddirty(bh));
-
-		/* The metadata is now released for reuse, but we need
-                   to remember it against this transaction so that when
-                   we finally commit, we can do any checkpointing
-                   required. */
-		JBUFFER_TRACE(jh, "file as BJ_Forget");
-		journal_file_buffer(jh, commit_transaction, BJ_Forget);
-		/*
-		 * Wake up any transactions which were waiting for this
-		 * IO to complete. The barrier must be here so that changes
-		 * by journal_file_buffer() take effect before wake_up_bit()
-		 * does the waitqueue check.
-		 */
-		smp_mb();
-		wake_up_bit(&bh->b_state, BH_Unshadow);
-		JBUFFER_TRACE(jh, "brelse shadowed buffer");
-		__brelse(bh);
-	}
-
-	J_ASSERT (commit_transaction->t_shadow_list == NULL);
-
-	jbd_debug(3, "JBD: commit phase 5\n");
-
-	/* Here we wait for the revoke record and descriptor record buffers */
- wait_for_ctlbuf:
-	while (commit_transaction->t_log_list != NULL) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_log_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_ctlbuf;
-		}
-		if (cond_resched())
-			goto wait_for_ctlbuf;
-
-		if (unlikely(!buffer_uptodate(bh)))
-			err = -EIO;
-
-		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
-		clear_buffer_jwrite(bh);
-		journal_unfile_buffer(journal, jh);
-		journal_put_journal_head(jh);
-		__brelse(bh);		/* One for getblk */
-		/* AKPM: bforget here */
-	}
-
-	if (err)
-		journal_abort(journal, err);
-
-	jbd_debug(3, "JBD: commit phase 6\n");
-
-	/* All metadata is written, now write commit record and do cleanup */
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(commit_transaction->t_state == T_COMMIT);
-	commit_transaction->t_state = T_COMMIT_RECORD;
-	spin_unlock(&journal->j_state_lock);
-
-	if (journal_write_commit_record(journal, commit_transaction))
-		err = -EIO;
-
-	if (err)
-		journal_abort(journal, err);
-
-	/* End of a transaction!  Finally, we can do checkpoint
-           processing: any buffers committed as a result of this
-           transaction can be removed from any checkpoint list it was on
-           before. */
-
-	jbd_debug(3, "JBD: commit phase 7\n");
-
-	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
-	J_ASSERT(commit_transaction->t_buffers == NULL);
-	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
-	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
-	J_ASSERT(commit_transaction->t_shadow_list == NULL);
-	J_ASSERT(commit_transaction->t_log_list == NULL);
-
-restart_loop:
-	/*
-	 * As there are other places (journal_unmap_buffer()) adding buffers
-	 * to this list we have to be careful and hold the j_list_lock.
-	 */
-	spin_lock(&journal->j_list_lock);
-	while (commit_transaction->t_forget) {
-		transaction_t *cp_transaction;
-		struct buffer_head *bh;
-		int try_to_free = 0;
-
-		jh = commit_transaction->t_forget;
-		spin_unlock(&journal->j_list_lock);
-		bh = jh2bh(jh);
-		/*
-		 * Get a reference so that bh cannot be freed before we are
-		 * done with it.
-		 */
-		get_bh(bh);
-		jbd_lock_bh_state(bh);
-		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
-			jh->b_transaction == journal->j_running_transaction);
-
-		/*
-		 * If there is undo-protected committed data against
-		 * this buffer, then we can remove it now.  If it is a
-		 * buffer needing such protection, the old frozen_data
-		 * field now points to a committed version of the
-		 * buffer, so rotate that field to the new committed
-		 * data.
-		 *
-		 * Otherwise, we can just throw away the frozen data now.
-		 */
-		if (jh->b_committed_data) {
-			jbd_free(jh->b_committed_data, bh->b_size);
-			jh->b_committed_data = NULL;
-			if (jh->b_frozen_data) {
-				jh->b_committed_data = jh->b_frozen_data;
-				jh->b_frozen_data = NULL;
-			}
-		} else if (jh->b_frozen_data) {
-			jbd_free(jh->b_frozen_data, bh->b_size);
-			jh->b_frozen_data = NULL;
-		}
-
-		spin_lock(&journal->j_list_lock);
-		cp_transaction = jh->b_cp_transaction;
-		if (cp_transaction) {
-			JBUFFER_TRACE(jh, "remove from old cp transaction");
-			__journal_remove_checkpoint(jh);
-		}
-
-		/* Only re-checkpoint the buffer_head if it is marked
-		 * dirty.  If the buffer was added to the BJ_Forget list
-		 * by journal_forget, it may no longer be dirty and
-		 * there's no point in keeping a checkpoint record for
-		 * it. */
-
-		/*
-		 * A buffer which has been freed while still being journaled by
-		 * a previous transaction.
-		 */
-		if (buffer_freed(bh)) {
-			/*
-			 * If the running transaction is the one containing
-			 * "add to orphan" operation (b_next_transaction !=
-			 * NULL), we have to wait for that transaction to
-			 * commit before we can really get rid of the buffer.
-			 * So just clear b_modified to not confuse transaction
-			 * credit accounting and refile the buffer to
-			 * BJ_Forget of the running transaction. If the just
-			 * committed transaction contains "add to orphan"
-			 * operation, we can completely invalidate the buffer
-			 * now. We are rather throughout in that since the
-			 * buffer may be still accessible when blocksize <
-			 * pagesize and it is attached to the last partial
-			 * page.
-			 */
-			jh->b_modified = 0;
-			if (!jh->b_next_transaction) {
-				clear_buffer_freed(bh);
-				clear_buffer_jbddirty(bh);
-				clear_buffer_mapped(bh);
-				clear_buffer_new(bh);
-				clear_buffer_req(bh);
-				bh->b_bdev = NULL;
-			}
-		}
-
-		if (buffer_jbddirty(bh)) {
-			JBUFFER_TRACE(jh, "add to new checkpointing trans");
-			__journal_insert_checkpoint(jh, commit_transaction);
-			if (is_journal_aborted(journal))
-				clear_buffer_jbddirty(bh);
-		} else {
-			J_ASSERT_BH(bh, !buffer_dirty(bh));
-			/*
-			 * The buffer on BJ_Forget list and not jbddirty means
-			 * it has been freed by this transaction and hence it
-			 * could not have been reallocated until this
-			 * transaction has committed. *BUT* it could be
-			 * reallocated once we have written all the data to
-			 * disk and before we process the buffer on BJ_Forget
-			 * list.
-			 */
-			if (!jh->b_next_transaction)
-				try_to_free = 1;
-		}
-		JBUFFER_TRACE(jh, "refile or unfile freed buffer");
-		__journal_refile_buffer(jh);
-		jbd_unlock_bh_state(bh);
-		if (try_to_free)
-			release_buffer_page(bh);
-		else
-			__brelse(bh);
-		cond_resched_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-	/*
-	 * This is a bit sleazy.  We use j_list_lock to protect transition
-	 * of a transaction into T_FINISHED state and calling
-	 * __journal_drop_transaction(). Otherwise we could race with
-	 * other checkpointing code processing the transaction...
-	 */
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&journal->j_list_lock);
-	/*
-	 * Now recheck if some buffers did not get attached to the transaction
-	 * while the lock was dropped...
-	 */
-	if (commit_transaction->t_forget) {
-		spin_unlock(&journal->j_list_lock);
-		spin_unlock(&journal->j_state_lock);
-		goto restart_loop;
-	}
-
-	/* Done with this transaction! */
-
-	jbd_debug(3, "JBD: commit phase 8\n");
-
-	J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
-
-	commit_transaction->t_state = T_FINISHED;
-	J_ASSERT(commit_transaction == journal->j_committing_transaction);
-	journal->j_commit_sequence = commit_transaction->t_tid;
-	journal->j_committing_transaction = NULL;
-	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
-
-	/*
-	 * weight the commit time higher than the average time so we don't
-	 * react too strongly to vast changes in commit time
-	 */
-	if (likely(journal->j_average_commit_time))
-		journal->j_average_commit_time = (commit_time*3 +
-				journal->j_average_commit_time) / 4;
-	else
-		journal->j_average_commit_time = commit_time;
-
-	spin_unlock(&journal->j_state_lock);
-
-	if (commit_transaction->t_checkpoint_list == NULL &&
-	    commit_transaction->t_checkpoint_io_list == NULL) {
-		__journal_drop_transaction(journal, commit_transaction);
-	} else {
-		if (journal->j_checkpoint_transactions == NULL) {
-			journal->j_checkpoint_transactions = commit_transaction;
-			commit_transaction->t_cpnext = commit_transaction;
-			commit_transaction->t_cpprev = commit_transaction;
-		} else {
-			commit_transaction->t_cpnext =
-				journal->j_checkpoint_transactions;
-			commit_transaction->t_cpprev =
-				commit_transaction->t_cpnext->t_cpprev;
-			commit_transaction->t_cpnext->t_cpprev =
-				commit_transaction;
-			commit_transaction->t_cpprev->t_cpnext =
-				commit_transaction;
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	trace_jbd_end_commit(journal, commit_transaction);
-	jbd_debug(1, "JBD: commit %d complete, head %d\n",
-		  journal->j_commit_sequence, journal->j_tail_sequence);
-
-	wake_up(&journal->j_wait_done_commit);
-}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
deleted file mode 100644
index c46a79a..0000000
--- a/fs/jbd/journal.c
+++ /dev/null
@@ -1,2145 +0,0 @@
-/*
- * linux/fs/jbd/journal.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Generic filesystem journal-writing code; part of the ext2fs
- * journaling system.
- *
- * This file manages journals: areas of disk reserved for logging
- * transactional updates.  This includes the kernel journaling thread
- * which is responsible for scheduling updates to the log.
- *
- * We do not actually manage the physical storage of the journal in this
- * file: that is left to a per-journal policy function, which allows us
- * to store the journal within a filesystem-specified area for ext2
- * journaling (ext2 can use a reserved inode for storing the log).
- */
-
-#include <linux/module.h>
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/freezer.h>
-#include <linux/pagemap.h>
-#include <linux/kthread.h>
-#include <linux/poison.h>
-#include <linux/proc_fs.h>
-#include <linux/debugfs.h>
-#include <linux/ratelimit.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/jbd.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-
-EXPORT_SYMBOL(journal_start);
-EXPORT_SYMBOL(journal_restart);
-EXPORT_SYMBOL(journal_extend);
-EXPORT_SYMBOL(journal_stop);
-EXPORT_SYMBOL(journal_lock_updates);
-EXPORT_SYMBOL(journal_unlock_updates);
-EXPORT_SYMBOL(journal_get_write_access);
-EXPORT_SYMBOL(journal_get_create_access);
-EXPORT_SYMBOL(journal_get_undo_access);
-EXPORT_SYMBOL(journal_dirty_data);
-EXPORT_SYMBOL(journal_dirty_metadata);
-EXPORT_SYMBOL(journal_release_buffer);
-EXPORT_SYMBOL(journal_forget);
-#if 0
-EXPORT_SYMBOL(journal_sync_buffer);
-#endif
-EXPORT_SYMBOL(journal_flush);
-EXPORT_SYMBOL(journal_revoke);
-
-EXPORT_SYMBOL(journal_init_dev);
-EXPORT_SYMBOL(journal_init_inode);
-EXPORT_SYMBOL(journal_update_format);
-EXPORT_SYMBOL(journal_check_used_features);
-EXPORT_SYMBOL(journal_check_available_features);
-EXPORT_SYMBOL(journal_set_features);
-EXPORT_SYMBOL(journal_create);
-EXPORT_SYMBOL(journal_load);
-EXPORT_SYMBOL(journal_destroy);
-EXPORT_SYMBOL(journal_abort);
-EXPORT_SYMBOL(journal_errno);
-EXPORT_SYMBOL(journal_ack_err);
-EXPORT_SYMBOL(journal_clear_err);
-EXPORT_SYMBOL(log_wait_commit);
-EXPORT_SYMBOL(log_start_commit);
-EXPORT_SYMBOL(journal_start_commit);
-EXPORT_SYMBOL(journal_force_commit_nested);
-EXPORT_SYMBOL(journal_wipe);
-EXPORT_SYMBOL(journal_blocks_per_page);
-EXPORT_SYMBOL(journal_invalidatepage);
-EXPORT_SYMBOL(journal_try_to_free_buffers);
-EXPORT_SYMBOL(journal_force_commit);
-
-static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
-static void __journal_abort_soft (journal_t *journal, int errno);
-static const char *journal_dev_name(journal_t *journal, char *buffer);
-
-#ifdef CONFIG_JBD_DEBUG
-void __jbd_debug(int level, const char *file, const char *func,
-		 unsigned int line, const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	if (level > journal_enable_debug)
-		return;
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
-	va_end(args);
-}
-EXPORT_SYMBOL(__jbd_debug);
-#endif
-
-/*
- * Helper function used to manage commit timeouts
- */
-
-static void commit_timeout(unsigned long __data)
-{
-	struct task_struct * p = (struct task_struct *) __data;
-
-	wake_up_process(p);
-}
-
-/*
- * kjournald: The main thread function used to manage a logging device
- * journal.
- *
- * This kernel thread is responsible for two things:
- *
- * 1) COMMIT:  Every so often we need to commit the current state of the
- *    filesystem to disk.  The journal thread is responsible for writing
- *    all of the metadata buffers to disk.
- *
- * 2) CHECKPOINT: We cannot reuse a used section of the log file until all
- *    of the data in that part of the log has been rewritten elsewhere on
- *    the disk.  Flushing these old buffers to reclaim space in the log is
- *    known as checkpointing, and this thread is responsible for that job.
- */
-
-static int kjournald(void *arg)
-{
-	journal_t *journal = arg;
-	transaction_t *transaction;
-
-	/*
-	 * Set up an interval timer which can be used to trigger a commit wakeup
-	 * after the commit interval expires
-	 */
-	setup_timer(&journal->j_commit_timer, commit_timeout,
-			(unsigned long)current);
-
-	set_freezable();
-
-	/* Record that the journal thread is running */
-	journal->j_task = current;
-	wake_up(&journal->j_wait_done_commit);
-
-	printk(KERN_INFO "kjournald starting.  Commit interval %ld seconds\n",
-			journal->j_commit_interval / HZ);
-
-	/*
-	 * And now, wait forever for commit wakeup events.
-	 */
-	spin_lock(&journal->j_state_lock);
-
-loop:
-	if (journal->j_flags & JFS_UNMOUNT)
-		goto end_loop;
-
-	jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
-		journal->j_commit_sequence, journal->j_commit_request);
-
-	if (journal->j_commit_sequence != journal->j_commit_request) {
-		jbd_debug(1, "OK, requests differ\n");
-		spin_unlock(&journal->j_state_lock);
-		del_timer_sync(&journal->j_commit_timer);
-		journal_commit_transaction(journal);
-		spin_lock(&journal->j_state_lock);
-		goto loop;
-	}
-
-	wake_up(&journal->j_wait_done_commit);
-	if (freezing(current)) {
-		/*
-		 * The simpler the better. Flushing journal isn't a
-		 * good idea, because that depends on threads that may
-		 * be already stopped.
-		 */
-		jbd_debug(1, "Now suspending kjournald\n");
-		spin_unlock(&journal->j_state_lock);
-		try_to_freeze();
-		spin_lock(&journal->j_state_lock);
-	} else {
-		/*
-		 * We assume on resume that commits are already there,
-		 * so we don't sleep
-		 */
-		DEFINE_WAIT(wait);
-		int should_sleep = 1;
-
-		prepare_to_wait(&journal->j_wait_commit, &wait,
-				TASK_INTERRUPTIBLE);
-		if (journal->j_commit_sequence != journal->j_commit_request)
-			should_sleep = 0;
-		transaction = journal->j_running_transaction;
-		if (transaction && time_after_eq(jiffies,
-						transaction->t_expires))
-			should_sleep = 0;
-		if (journal->j_flags & JFS_UNMOUNT)
-			should_sleep = 0;
-		if (should_sleep) {
-			spin_unlock(&journal->j_state_lock);
-			schedule();
-			spin_lock(&journal->j_state_lock);
-		}
-		finish_wait(&journal->j_wait_commit, &wait);
-	}
-
-	jbd_debug(1, "kjournald wakes\n");
-
-	/*
-	 * Were we woken up by a commit wakeup event?
-	 */
-	transaction = journal->j_running_transaction;
-	if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
-		journal->j_commit_request = transaction->t_tid;
-		jbd_debug(1, "woke because of timeout\n");
-	}
-	goto loop;
-
-end_loop:
-	spin_unlock(&journal->j_state_lock);
-	del_timer_sync(&journal->j_commit_timer);
-	journal->j_task = NULL;
-	wake_up(&journal->j_wait_done_commit);
-	jbd_debug(1, "Journal thread exiting.\n");
-	return 0;
-}
-
-static int journal_start_thread(journal_t *journal)
-{
-	struct task_struct *t;
-
-	t = kthread_run(kjournald, journal, "kjournald");
-	if (IS_ERR(t))
-		return PTR_ERR(t);
-
-	wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
-	return 0;
-}
-
-static void journal_kill_thread(journal_t *journal)
-{
-	spin_lock(&journal->j_state_lock);
-	journal->j_flags |= JFS_UNMOUNT;
-
-	while (journal->j_task) {
-		wake_up(&journal->j_wait_commit);
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_done_commit,
-				journal->j_task == NULL);
-		spin_lock(&journal->j_state_lock);
-	}
-	spin_unlock(&journal->j_state_lock);
-}
-
-/*
- * journal_write_metadata_buffer: write a metadata buffer to the journal.
- *
- * Writes a metadata buffer to a given disk block.  The actual IO is not
- * performed but a new buffer_head is constructed which labels the data
- * to be written with the correct destination disk block.
- *
- * Any magic-number escaping which needs to be done will cause a
- * copy-out here.  If the buffer happens to start with the
- * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
- * magic number is only written to the log for descripter blocks.  In
- * this case, we copy the data and replace the first word with 0, and we
- * return a result code which indicates that this buffer needs to be
- * marked as an escaped buffer in the corresponding log descriptor
- * block.  The missing word can then be restored when the block is read
- * during recovery.
- *
- * If the source buffer has already been modified by a new transaction
- * since we took the last commit snapshot, we use the frozen copy of
- * that data for IO.  If we end up using the existing buffer_head's data
- * for the write, then we *have* to lock the buffer to prevent anyone
- * else from using and possibly modifying it while the IO is in
- * progress.
- *
- * The function returns a pointer to the buffer_heads to be used for IO.
- *
- * We assume that the journal has already been locked in this function.
- *
- * Return value:
- *  <0: Error
- * >=0: Finished OK
- *
- * On success:
- * Bit 0 set == escape performed on the data
- * Bit 1 set == buffer copy-out performed (kfree the data after IO)
- */
-
-int journal_write_metadata_buffer(transaction_t *transaction,
-				  struct journal_head  *jh_in,
-				  struct journal_head **jh_out,
-				  unsigned int blocknr)
-{
-	int need_copy_out = 0;
-	int done_copy_out = 0;
-	int do_escape = 0;
-	char *mapped_data;
-	struct buffer_head *new_bh;
-	struct journal_head *new_jh;
-	struct page *new_page;
-	unsigned int new_offset;
-	struct buffer_head *bh_in = jh2bh(jh_in);
-	journal_t *journal = transaction->t_journal;
-
-	/*
-	 * The buffer really shouldn't be locked: only the current committing
-	 * transaction is allowed to write it, so nobody else is allowed
-	 * to do any IO.
-	 *
-	 * akpm: except if we're journalling data, and write() output is
-	 * also part of a shared mapping, and another thread has
-	 * decided to launch a writepage() against this buffer.
-	 */
-	J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
-
-	new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
-	/* keep subsequent assertions sane */
-	atomic_set(&new_bh->b_count, 1);
-	new_jh = journal_add_journal_head(new_bh);	/* This sleeps */
-
-	/*
-	 * If a new transaction has already done a buffer copy-out, then
-	 * we use that version of the data for the commit.
-	 */
-	jbd_lock_bh_state(bh_in);
-repeat:
-	if (jh_in->b_frozen_data) {
-		done_copy_out = 1;
-		new_page = virt_to_page(jh_in->b_frozen_data);
-		new_offset = offset_in_page(jh_in->b_frozen_data);
-	} else {
-		new_page = jh2bh(jh_in)->b_page;
-		new_offset = offset_in_page(jh2bh(jh_in)->b_data);
-	}
-
-	mapped_data = kmap_atomic(new_page);
-	/*
-	 * Check for escaping
-	 */
-	if (*((__be32 *)(mapped_data + new_offset)) ==
-				cpu_to_be32(JFS_MAGIC_NUMBER)) {
-		need_copy_out = 1;
-		do_escape = 1;
-	}
-	kunmap_atomic(mapped_data);
-
-	/*
-	 * Do we need to do a data copy?
-	 */
-	if (need_copy_out && !done_copy_out) {
-		char *tmp;
-
-		jbd_unlock_bh_state(bh_in);
-		tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
-		jbd_lock_bh_state(bh_in);
-		if (jh_in->b_frozen_data) {
-			jbd_free(tmp, bh_in->b_size);
-			goto repeat;
-		}
-
-		jh_in->b_frozen_data = tmp;
-		mapped_data = kmap_atomic(new_page);
-		memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
-		kunmap_atomic(mapped_data);
-
-		new_page = virt_to_page(tmp);
-		new_offset = offset_in_page(tmp);
-		done_copy_out = 1;
-	}
-
-	/*
-	 * Did we need to do an escaping?  Now we've done all the
-	 * copying, we can finally do so.
-	 */
-	if (do_escape) {
-		mapped_data = kmap_atomic(new_page);
-		*((unsigned int *)(mapped_data + new_offset)) = 0;
-		kunmap_atomic(mapped_data);
-	}
-
-	set_bh_page(new_bh, new_page, new_offset);
-	new_jh->b_transaction = NULL;
-	new_bh->b_size = jh2bh(jh_in)->b_size;
-	new_bh->b_bdev = transaction->t_journal->j_dev;
-	new_bh->b_blocknr = blocknr;
-	set_buffer_mapped(new_bh);
-	set_buffer_dirty(new_bh);
-
-	*jh_out = new_jh;
-
-	/*
-	 * The to-be-written buffer needs to get moved to the io queue,
-	 * and the original buffer whose contents we are shadowing or
-	 * copying is moved to the transaction's shadow queue.
-	 */
-	JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-	spin_lock(&journal->j_list_lock);
-	__journal_file_buffer(jh_in, transaction, BJ_Shadow);
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh_in);
-
-	JBUFFER_TRACE(new_jh, "file as BJ_IO");
-	journal_file_buffer(new_jh, transaction, BJ_IO);
-
-	return do_escape | (done_copy_out << 1);
-}
-
-/*
- * Allocation code for the journal file.  Manage the space left in the
- * journal, so that we can begin checkpointing when appropriate.
- */
-
-/*
- * __log_space_left: Return the number of free blocks left in the journal.
- *
- * Called with the journal already locked.
- *
- * Called under j_state_lock
- */
-
-int __log_space_left(journal_t *journal)
-{
-	int left = journal->j_free;
-
-	assert_spin_locked(&journal->j_state_lock);
-
-	/*
-	 * Be pessimistic here about the number of those free blocks which
-	 * might be required for log descriptor control blocks.
-	 */
-
-#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
-
-	left -= MIN_LOG_RESERVED_BLOCKS;
-
-	if (left <= 0)
-		return 0;
-	left -= (left >> 3);
-	return left;
-}
-
-/*
- * Called under j_state_lock.  Returns true if a transaction commit was started.
- */
-int __log_start_commit(journal_t *journal, tid_t target)
-{
-	/*
-	 * The only transaction we can possibly wait upon is the
-	 * currently running transaction (if it exists).  Otherwise,
-	 * the target tid must be an old one.
-	 */
-	if (journal->j_commit_request != target &&
-	    journal->j_running_transaction &&
-	    journal->j_running_transaction->t_tid == target) {
-		/*
-		 * We want a new commit: OK, mark the request and wakeup the
-		 * commit thread.  We do _not_ do the commit ourselves.
-		 */
-
-		journal->j_commit_request = target;
-		jbd_debug(1, "JBD: requesting commit %d/%d\n",
-			  journal->j_commit_request,
-			  journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
-		return 1;
-	} else if (!tid_geq(journal->j_commit_request, target))
-		/* This should never happen, but if it does, preserve
-		   the evidence before kjournald goes into a loop and
-		   increments j_commit_sequence beyond all recognition. */
-		WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
-		    journal->j_commit_request, journal->j_commit_sequence,
-		    target, journal->j_running_transaction ?
-		    journal->j_running_transaction->t_tid : 0);
-	return 0;
-}
-
-int log_start_commit(journal_t *journal, tid_t tid)
-{
-	int ret;
-
-	spin_lock(&journal->j_state_lock);
-	ret = __log_start_commit(journal, tid);
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-
-/*
- * Force and wait upon a commit if the calling process is not within
- * transaction.  This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
- *
- * We can only force the running transaction if we don't have an active handle;
- * otherwise, we will deadlock.
- *
- * Returns true if a transaction was started.
- */
-int journal_force_commit_nested(journal_t *journal)
-{
-	transaction_t *transaction = NULL;
-	tid_t tid;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_running_transaction && !current->journal_info) {
-		transaction = journal->j_running_transaction;
-		__log_start_commit(journal, transaction->t_tid);
-	} else if (journal->j_committing_transaction)
-		transaction = journal->j_committing_transaction;
-
-	if (!transaction) {
-		spin_unlock(&journal->j_state_lock);
-		return 0;	/* Nothing to retry */
-	}
-
-	tid = transaction->t_tid;
-	spin_unlock(&journal->j_state_lock);
-	log_wait_commit(journal, tid);
-	return 1;
-}
-
-/*
- * Start a commit of the current running transaction (if any).  Returns true
- * if a transaction is going to be committed (or is currently already
- * committing), and fills its tid in at *ptid
- */
-int journal_start_commit(journal_t *journal, tid_t *ptid)
-{
-	int ret = 0;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_running_transaction) {
-		tid_t tid = journal->j_running_transaction->t_tid;
-
-		__log_start_commit(journal, tid);
-		/* There's a running transaction and we've just made sure
-		 * it's commit has been scheduled. */
-		if (ptid)
-			*ptid = tid;
-		ret = 1;
-	} else if (journal->j_committing_transaction) {
-		/*
-		 * If commit has been started, then we have to wait for
-		 * completion of that transaction.
-		 */
-		if (ptid)
-			*ptid = journal->j_committing_transaction->t_tid;
-		ret = 1;
-	}
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-
-/*
- * Wait for a specified commit to complete.
- * The caller may not hold the journal lock.
- */
-int log_wait_commit(journal_t *journal, tid_t tid)
-{
-	int err = 0;
-
-#ifdef CONFIG_JBD_DEBUG
-	spin_lock(&journal->j_state_lock);
-	if (!tid_geq(journal->j_commit_request, tid)) {
-		printk(KERN_ERR
-		       "%s: error: j_commit_request=%d, tid=%d\n",
-		       __func__, journal->j_commit_request, tid);
-	}
-	spin_unlock(&journal->j_state_lock);
-#endif
-	spin_lock(&journal->j_state_lock);
-	/*
-	 * Not running or committing trans? Must be already committed. This
-	 * saves us from waiting for a *long* time when tid overflows.
-	 */
-	if (!((journal->j_running_transaction &&
-	       journal->j_running_transaction->t_tid == tid) ||
-	      (journal->j_committing_transaction &&
-	       journal->j_committing_transaction->t_tid == tid)))
-		goto out_unlock;
-
-	if (!tid_geq(journal->j_commit_waited, tid))
-		journal->j_commit_waited = tid;
-	while (tid_gt(tid, journal->j_commit_sequence)) {
-		jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
-				  tid, journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_done_commit,
-				!tid_gt(tid, journal->j_commit_sequence));
-		spin_lock(&journal->j_state_lock);
-	}
-out_unlock:
-	spin_unlock(&journal->j_state_lock);
-
-	if (unlikely(is_journal_aborted(journal)))
-		err = -EIO;
-	return err;
-}
-
-/*
- * Return 1 if a given transaction has not yet sent barrier request
- * connected with a transaction commit. If 0 is returned, transaction
- * may or may not have sent the barrier. Used to avoid sending barrier
- * twice in common cases.
- */
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
-{
-	int ret = 0;
-	transaction_t *commit_trans;
-
-	if (!(journal->j_flags & JFS_BARRIER))
-		return 0;
-	spin_lock(&journal->j_state_lock);
-	/* Transaction already committed? */
-	if (tid_geq(journal->j_commit_sequence, tid))
-		goto out;
-	/*
-	 * Transaction is being committed and we already proceeded to
-	 * writing commit record?
-	 */
-	commit_trans = journal->j_committing_transaction;
-	if (commit_trans && commit_trans->t_tid == tid &&
-	    commit_trans->t_state >= T_COMMIT_RECORD)
-		goto out;
-	ret = 1;
-out:
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
-
-/*
- * Log buffer allocation routines:
- */
-
-int journal_next_log_block(journal_t *journal, unsigned int *retp)
-{
-	unsigned int blocknr;
-
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(journal->j_free > 1);
-
-	blocknr = journal->j_head;
-	journal->j_head++;
-	journal->j_free--;
-	if (journal->j_head == journal->j_last)
-		journal->j_head = journal->j_first;
-	spin_unlock(&journal->j_state_lock);
-	return journal_bmap(journal, blocknr, retp);
-}
-
-/*
- * Conversion of logical to physical block numbers for the journal
- *
- * On external journals the journal blocks are identity-mapped, so
- * this is a no-op.  If needed, we can use j_blk_offset - everything is
- * ready.
- */
-int journal_bmap(journal_t *journal, unsigned int blocknr,
-		 unsigned int *retp)
-{
-	int err = 0;
-	unsigned int ret;
-
-	if (journal->j_inode) {
-		ret = bmap(journal->j_inode, blocknr);
-		if (ret)
-			*retp = ret;
-		else {
-			char b[BDEVNAME_SIZE];
-
-			printk(KERN_ALERT "%s: journal block not found "
-					"at offset %u on %s\n",
-				__func__,
-				blocknr,
-				bdevname(journal->j_dev, b));
-			err = -EIO;
-			__journal_abort_soft(journal, err);
-		}
-	} else {
-		*retp = blocknr; /* +journal->j_blk_offset */
-	}
-	return err;
-}
-
-/*
- * We play buffer_head aliasing tricks to write data/metadata blocks to
- * the journal without copying their contents, but for journal
- * descriptor blocks we do need to generate bona fide buffers.
- *
- * After the caller of journal_get_descriptor_buffer() has finished modifying
- * the buffer's contents they really should run flush_dcache_page(bh->b_page).
- * But we don't bother doing that, so there will be coherency problems with
- * mmaps of blockdevs which hold live JBD-controlled filesystems.
- */
-struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
-{
-	struct buffer_head *bh;
-	unsigned int blocknr;
-	int err;
-
-	err = journal_next_log_block(journal, &blocknr);
-
-	if (err)
-		return NULL;
-
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh)
-		return NULL;
-	lock_buffer(bh);
-	memset(bh->b_data, 0, journal->j_blocksize);
-	set_buffer_uptodate(bh);
-	unlock_buffer(bh);
-	BUFFER_TRACE(bh, "return this buffer");
-	return journal_add_journal_head(bh);
-}
-
-/*
- * Management for journal control blocks: functions to create and
- * destroy journal_t structures, and to initialise and read existing
- * journal blocks from disk.  */
-
-/* First: create and setup a journal_t object in memory.  We initialise
- * very few fields yet: that has to wait until we have created the
- * journal structures from from scratch, or loaded them from disk. */
-
-static journal_t * journal_init_common (void)
-{
-	journal_t *journal;
-	int err;
-
-	journal = kzalloc(sizeof(*journal), GFP_KERNEL);
-	if (!journal)
-		goto fail;
-
-	init_waitqueue_head(&journal->j_wait_transaction_locked);
-	init_waitqueue_head(&journal->j_wait_logspace);
-	init_waitqueue_head(&journal->j_wait_done_commit);
-	init_waitqueue_head(&journal->j_wait_checkpoint);
-	init_waitqueue_head(&journal->j_wait_commit);
-	init_waitqueue_head(&journal->j_wait_updates);
-	mutex_init(&journal->j_checkpoint_mutex);
-	spin_lock_init(&journal->j_revoke_lock);
-	spin_lock_init(&journal->j_list_lock);
-	spin_lock_init(&journal->j_state_lock);
-
-	journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
-
-	/* The journal is marked for error until we succeed with recovery! */
-	journal->j_flags = JFS_ABORT;
-
-	/* Set up a default-sized revoke table for the new mount. */
-	err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
-	if (err) {
-		kfree(journal);
-		goto fail;
-	}
-	return journal;
-fail:
-	return NULL;
-}
-
-/* journal_init_dev and journal_init_inode:
- *
- * Create a journal structure assigned some fixed set of disk blocks to
- * the journal.  We don't actually touch those disk blocks yet, but we
- * need to set up all of the mapping information to tell the journaling
- * system where the journal blocks are.
- *
- */
-
-/**
- *  journal_t * journal_init_dev() - creates and initialises a journal structure
- *  @bdev: Block device on which to create the journal
- *  @fs_dev: Device which hold journalled filesystem for this journal.
- *  @start: Block nr Start of journal.
- *  @len:  Length of the journal in blocks.
- *  @blocksize: blocksize of journalling device
- *
- *  Returns: a newly created journal_t *
- *
- *  journal_init_dev creates a journal which maps a fixed contiguous
- *  range of blocks on an arbitrary block device.
- *
- */
-journal_t * journal_init_dev(struct block_device *bdev,
-			struct block_device *fs_dev,
-			int start, int len, int blocksize)
-{
-	journal_t *journal = journal_init_common();
-	struct buffer_head *bh;
-	int n;
-
-	if (!journal)
-		return NULL;
-
-	/* journal descriptor can store up to n blocks -bzzz */
-	journal->j_blocksize = blocksize;
-	n = journal->j_blocksize / sizeof(journal_block_tag_t);
-	journal->j_wbufsize = n;
-	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
-			__func__);
-		goto out_err;
-	}
-	journal->j_dev = bdev;
-	journal->j_fs_dev = fs_dev;
-	journal->j_blk_offset = start;
-	journal->j_maxlen = len;
-
-	bh = __getblk(journal->j_dev, start, journal->j_blocksize);
-	if (!bh) {
-		printk(KERN_ERR
-		       "%s: Cannot get buffer for journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-	journal->j_sb_buffer = bh;
-	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
-	return journal;
-out_err:
-	kfree(journal->j_wbuf);
-	kfree(journal);
-	return NULL;
-}
-
-/**
- *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
- *  @inode: An inode to create the journal in
- *
- * journal_init_inode creates a journal which maps an on-disk inode as
- * the journal.  The inode must exist already, must support bmap() and
- * must have all data blocks preallocated.
- */
-journal_t * journal_init_inode (struct inode *inode)
-{
-	struct buffer_head *bh;
-	journal_t *journal = journal_init_common();
-	int err;
-	int n;
-	unsigned int blocknr;
-
-	if (!journal)
-		return NULL;
-
-	journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
-	journal->j_inode = inode;
-	jbd_debug(1,
-		  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
-		  journal, inode->i_sb->s_id, inode->i_ino,
-		  (long long) inode->i_size,
-		  inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
-
-	journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
-	journal->j_blocksize = inode->i_sb->s_blocksize;
-
-	/* journal descriptor can store up to n blocks -bzzz */
-	n = journal->j_blocksize / sizeof(journal_block_tag_t);
-	journal->j_wbufsize = n;
-	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
-			__func__);
-		goto out_err;
-	}
-
-	err = journal_bmap(journal, 0, &blocknr);
-	/* If that failed, give up */
-	if (err) {
-		printk(KERN_ERR "%s: Cannot locate journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-
-	bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh) {
-		printk(KERN_ERR
-		       "%s: Cannot get buffer for journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-	journal->j_sb_buffer = bh;
-	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
-	return journal;
-out_err:
-	kfree(journal->j_wbuf);
-	kfree(journal);
-	return NULL;
-}
-
-/*
- * If the journal init or create aborts, we need to mark the journal
- * superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock.
- */
-static void journal_fail_superblock (journal_t *journal)
-{
-	struct buffer_head *bh = journal->j_sb_buffer;
-	brelse(bh);
-	journal->j_sb_buffer = NULL;
-}
-
-/*
- * Given a journal_t structure, initialise the various fields for
- * startup of a new journaling session.  We use this both when creating
- * a journal, and after recovering an old journal to reset it for
- * subsequent use.
- */
-
-static int journal_reset(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-	unsigned int first, last;
-
-	first = be32_to_cpu(sb->s_first);
-	last = be32_to_cpu(sb->s_maxlen);
-	if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
-		printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n",
-		       first, last);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	journal->j_first = first;
-	journal->j_last = last;
-
-	journal->j_head = first;
-	journal->j_tail = first;
-	journal->j_free = last - first;
-
-	journal->j_tail_sequence = journal->j_transaction_sequence;
-	journal->j_commit_sequence = journal->j_transaction_sequence - 1;
-	journal->j_commit_request = journal->j_commit_sequence;
-
-	journal->j_max_transaction_buffers = journal->j_maxlen / 4;
-
-	/*
-	 * As a special case, if the on-disk copy is already marked as needing
-	 * no recovery (s_start == 0), then we can safely defer the superblock
-	 * update until the next commit by setting JFS_FLUSHED.  This avoids
-	 * attempting a write to a potential-readonly device.
-	 */
-	if (sb->s_start == 0) {
-		jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
-			"(start %u, seq %d, errno %d)\n",
-			journal->j_tail, journal->j_tail_sequence,
-			journal->j_errno);
-		journal->j_flags |= JFS_FLUSHED;
-	} else {
-		/* Lock here to make assertions happy... */
-		mutex_lock(&journal->j_checkpoint_mutex);
-		/*
-		 * Update log tail information. We use WRITE_FUA since new
-		 * transaction will start reusing journal space and so we
-		 * must make sure information about current log tail is on
-		 * disk before that.
-		 */
-		journal_update_sb_log_tail(journal,
-					   journal->j_tail_sequence,
-					   journal->j_tail,
-					   WRITE_FUA);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-	return journal_start_thread(journal);
-}
-
-/**
- * int journal_create() - Initialise the new journal file
- * @journal: Journal to create. This structure must have been initialised
- *
- * Given a journal_t structure which tells us which disk blocks we can
- * use, create a new journal superblock and initialise all of the
- * journal fields from scratch.
- **/
-int journal_create(journal_t *journal)
-{
-	unsigned int blocknr;
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int i, err;
-
-	if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
-		printk (KERN_ERR "Journal length (%d blocks) too short.\n",
-			journal->j_maxlen);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	if (journal->j_inode == NULL) {
-		/*
-		 * We don't know what block to start at!
-		 */
-		printk(KERN_EMERG
-		       "%s: creation of journal on external device!\n",
-		       __func__);
-		BUG();
-	}
-
-	/* Zero out the entire journal on disk.  We cannot afford to
-	   have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
-	jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
-	for (i = 0; i < journal->j_maxlen; i++) {
-		err = journal_bmap(journal, i, &blocknr);
-		if (err)
-			return err;
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		if (unlikely(!bh))
-			return -ENOMEM;
-		lock_buffer(bh);
-		memset (bh->b_data, 0, journal->j_blocksize);
-		BUFFER_TRACE(bh, "marking dirty");
-		mark_buffer_dirty(bh);
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		__brelse(bh);
-	}
-
-	sync_blockdev(journal->j_dev);
-	jbd_debug(1, "JBD: journal cleared.\n");
-
-	/* OK, fill in the initial static fields in the new superblock */
-	sb = journal->j_superblock;
-
-	sb->s_header.h_magic	 = cpu_to_be32(JFS_MAGIC_NUMBER);
-	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
-
-	sb->s_blocksize	= cpu_to_be32(journal->j_blocksize);
-	sb->s_maxlen	= cpu_to_be32(journal->j_maxlen);
-	sb->s_first	= cpu_to_be32(1);
-
-	journal->j_transaction_sequence = 1;
-
-	journal->j_flags &= ~JFS_ABORT;
-	journal->j_format_version = 2;
-
-	return journal_reset(journal);
-}
-
-static void journal_write_superblock(journal_t *journal, int write_op)
-{
-	struct buffer_head *bh = journal->j_sb_buffer;
-	int ret;
-
-	trace_journal_write_superblock(journal, write_op);
-	if (!(journal->j_flags & JFS_BARRIER))
-		write_op &= ~(REQ_FUA | REQ_FLUSH);
-	lock_buffer(bh);
-	if (buffer_write_io_error(bh)) {
-		char b[BDEVNAME_SIZE];
-		/*
-		 * Oh, dear.  A previous attempt to write the journal
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		printk(KERN_ERR "JBD: previous I/O error detected "
-		       "for journal superblock update for %s.\n",
-		       journal_dev_name(journal, b));
-		clear_buffer_write_io_error(bh);
-		set_buffer_uptodate(bh);
-	}
-
-	get_bh(bh);
-	bh->b_end_io = end_buffer_write_sync;
-	ret = submit_bh(write_op, bh);
-	wait_on_buffer(bh);
-	if (buffer_write_io_error(bh)) {
-		clear_buffer_write_io_error(bh);
-		set_buffer_uptodate(bh);
-		ret = -EIO;
-	}
-	if (ret) {
-		char b[BDEVNAME_SIZE];
-		printk(KERN_ERR "JBD: Error %d detected "
-		       "when updating journal superblock for %s.\n",
-		       ret, journal_dev_name(journal, b));
-	}
-}
-
-/**
- * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
- * @journal: The journal to update.
- * @tail_tid: TID of the new transaction at the tail of the log
- * @tail_block: The first block of the transaction at the tail of the log
- * @write_op: With which operation should we write the journal sb
- *
- * Update a journal's superblock information about log tail and write it to
- * disk, waiting for the IO to complete.
- */
-void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
-				unsigned int tail_block, int write_op)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
-	jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
-		  tail_block, tail_tid);
-
-	sb->s_sequence = cpu_to_be32(tail_tid);
-	sb->s_start    = cpu_to_be32(tail_block);
-
-	journal_write_superblock(journal, write_op);
-
-	/* Log is no longer empty */
-	spin_lock(&journal->j_state_lock);
-	WARN_ON(!sb->s_sequence);
-	journal->j_flags &= ~JFS_FLUSHED;
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * mark_journal_empty() - Mark on disk journal as empty.
- * @journal: The journal to update.
- *
- * Update a journal's dynamic superblock fields to show that journal is empty.
- * Write updated superblock to disk waiting for IO to complete.
- */
-static void mark_journal_empty(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
-	spin_lock(&journal->j_state_lock);
-	/* Is it already empty? */
-	if (sb->s_start == 0) {
-		spin_unlock(&journal->j_state_lock);
-		return;
-	}
-	jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
-        	  journal->j_tail_sequence);
-
-	sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
-	sb->s_start    = cpu_to_be32(0);
-	spin_unlock(&journal->j_state_lock);
-
-	journal_write_superblock(journal, WRITE_FUA);
-
-	spin_lock(&journal->j_state_lock);
-	/* Log is empty */
-	journal->j_flags |= JFS_FLUSHED;
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * journal_update_sb_errno() - Update error in the journal.
- * @journal: The journal to update.
- *
- * Update a journal's errno.  Write updated superblock to disk waiting for IO
- * to complete.
- */
-static void journal_update_sb_errno(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	spin_lock(&journal->j_state_lock);
-	jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
-        	  journal->j_errno);
-	sb->s_errno = cpu_to_be32(journal->j_errno);
-	spin_unlock(&journal->j_state_lock);
-
-	journal_write_superblock(journal, WRITE_SYNC);
-}
-
-/*
- * Read the superblock for a given journal, performing initial
- * validation of the format.
- */
-
-static int journal_get_superblock(journal_t *journal)
-{
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int err = -EIO;
-
-	bh = journal->j_sb_buffer;
-
-	J_ASSERT(bh != NULL);
-	if (!buffer_uptodate(bh)) {
-		ll_rw_block(READ, 1, &bh);
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			printk (KERN_ERR
-				"JBD: IO error reading journal superblock\n");
-			goto out;
-		}
-	}
-
-	sb = journal->j_superblock;
-
-	err = -EINVAL;
-
-	if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
-	    sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
-		printk(KERN_WARNING "JBD: no valid journal superblock found\n");
-		goto out;
-	}
-
-	switch(be32_to_cpu(sb->s_header.h_blocktype)) {
-	case JFS_SUPERBLOCK_V1:
-		journal->j_format_version = 1;
-		break;
-	case JFS_SUPERBLOCK_V2:
-		journal->j_format_version = 2;
-		break;
-	default:
-		printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
-		goto out;
-	}
-
-	if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
-		journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
-	else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
-		printk (KERN_WARNING "JBD: journal file too short\n");
-		goto out;
-	}
-
-	if (be32_to_cpu(sb->s_first) == 0 ||
-	    be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
-		printk(KERN_WARNING
-			"JBD: Invalid start block of journal: %u\n",
-			be32_to_cpu(sb->s_first));
-		goto out;
-	}
-
-	return 0;
-
-out:
-	journal_fail_superblock(journal);
-	return err;
-}
-
-/*
- * Load the on-disk journal superblock and read the key fields into the
- * journal_t.
- */
-
-static int load_superblock(journal_t *journal)
-{
-	int err;
-	journal_superblock_t *sb;
-
-	err = journal_get_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-
-	journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
-	journal->j_tail = be32_to_cpu(sb->s_start);
-	journal->j_first = be32_to_cpu(sb->s_first);
-	journal->j_last = be32_to_cpu(sb->s_maxlen);
-	journal->j_errno = be32_to_cpu(sb->s_errno);
-
-	return 0;
-}
-
-
-/**
- * int journal_load() - Read journal from disk.
- * @journal: Journal to act on.
- *
- * Given a journal_t structure which tells us which disk blocks contain
- * a journal, read the journal from disk to initialise the in-memory
- * structures.
- */
-int journal_load(journal_t *journal)
-{
-	int err;
-	journal_superblock_t *sb;
-
-	err = load_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-	/* If this is a V2 superblock, then we have to check the
-	 * features flags on it. */
-
-	if (journal->j_format_version >= 2) {
-		if ((sb->s_feature_ro_compat &
-		     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
-		    (sb->s_feature_incompat &
-		     ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
-			printk (KERN_WARNING
-				"JBD: Unrecognised features on journal\n");
-			return -EINVAL;
-		}
-	}
-
-	/* Let the recovery code check whether it needs to recover any
-	 * data from the journal. */
-	if (journal_recover(journal))
-		goto recovery_error;
-
-	/* OK, we've finished with the dynamic journal bits:
-	 * reinitialise the dynamic contents of the superblock in memory
-	 * and reset them on disk. */
-	if (journal_reset(journal))
-		goto recovery_error;
-
-	journal->j_flags &= ~JFS_ABORT;
-	journal->j_flags |= JFS_LOADED;
-	return 0;
-
-recovery_error:
-	printk (KERN_WARNING "JBD: recovery failed\n");
-	return -EIO;
-}
-
-/**
- * void journal_destroy() - Release a journal_t structure.
- * @journal: Journal to act on.
- *
- * Release a journal_t structure once it is no longer in use by the
- * journaled object.
- * Return <0 if we couldn't clean up the journal.
- */
-int journal_destroy(journal_t *journal)
-{
-	int err = 0;
-
-	
-	/* Wait for the commit thread to wake up and die. */
-	journal_kill_thread(journal);
-
-	/* Force a final log commit */
-	if (journal->j_running_transaction)
-		journal_commit_transaction(journal);
-
-	/* Force any old transactions to disk */
-
-	/* We cannot race with anybody but must keep assertions happy */
-	mutex_lock(&journal->j_checkpoint_mutex);
-	/* Totally anal locking here... */
-	spin_lock(&journal->j_list_lock);
-	while (journal->j_checkpoint_transactions != NULL) {
-		spin_unlock(&journal->j_list_lock);
-		log_do_checkpoint(journal);
-		spin_lock(&journal->j_list_lock);
-	}
-
-	J_ASSERT(journal->j_running_transaction == NULL);
-	J_ASSERT(journal->j_committing_transaction == NULL);
-	J_ASSERT(journal->j_checkpoint_transactions == NULL);
-	spin_unlock(&journal->j_list_lock);
-
-	if (journal->j_sb_buffer) {
-		if (!is_journal_aborted(journal)) {
-			journal->j_tail_sequence =
-				++journal->j_transaction_sequence;
-			mark_journal_empty(journal);
-		} else
-			err = -EIO;
-		brelse(journal->j_sb_buffer);
-	}
-	mutex_unlock(&journal->j_checkpoint_mutex);
-
-	iput(journal->j_inode);
-	if (journal->j_revoke)
-		journal_destroy_revoke(journal);
-	kfree(journal->j_wbuf);
-	kfree(journal);
-
-	return err;
-}
-
-
-/**
- *int journal_check_used_features () - Check if features specified are used.
- * @journal: Journal to check.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Check whether the journal uses all of a given set of
- * features.  Return true (non-zero) if it does.
- **/
-
-int journal_check_used_features (journal_t *journal, unsigned long compat,
-				 unsigned long ro, unsigned long incompat)
-{
-	journal_superblock_t *sb;
-
-	if (!compat && !ro && !incompat)
-		return 1;
-	if (journal->j_format_version == 1)
-		return 0;
-
-	sb = journal->j_superblock;
-
-	if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
-	    ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
-	    ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
-		return 1;
-
-	return 0;
-}
-
-/**
- * int journal_check_available_features() - Check feature set in journalling layer
- * @journal: Journal to check.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Check whether the journaling code supports the use of
- * all of a given set of features on this journal.  Return true
- * (non-zero) if it can. */
-
-int journal_check_available_features (journal_t *journal, unsigned long compat,
-				      unsigned long ro, unsigned long incompat)
-{
-	if (!compat && !ro && !incompat)
-		return 1;
-
-	/* We can support any known requested features iff the
-	 * superblock is in version 2.  Otherwise we fail to support any
-	 * extended sb features. */
-
-	if (journal->j_format_version != 2)
-		return 0;
-
-	if ((compat   & JFS_KNOWN_COMPAT_FEATURES) == compat &&
-	    (ro       & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
-	    (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
-		return 1;
-
-	return 0;
-}
-
-/**
- * int journal_set_features () - Mark a given journal feature in the superblock
- * @journal: Journal to act on.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Mark a given journal feature as present on the
- * superblock.  Returns true if the requested features could be set.
- *
- */
-
-int journal_set_features (journal_t *journal, unsigned long compat,
-			  unsigned long ro, unsigned long incompat)
-{
-	journal_superblock_t *sb;
-
-	if (journal_check_used_features(journal, compat, ro, incompat))
-		return 1;
-
-	if (!journal_check_available_features(journal, compat, ro, incompat))
-		return 0;
-
-	jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
-		  compat, ro, incompat);
-
-	sb = journal->j_superblock;
-
-	sb->s_feature_compat    |= cpu_to_be32(compat);
-	sb->s_feature_ro_compat |= cpu_to_be32(ro);
-	sb->s_feature_incompat  |= cpu_to_be32(incompat);
-
-	return 1;
-}
-
-
-/**
- * int journal_update_format () - Update on-disk journal structure.
- * @journal: Journal to act on.
- *
- * Given an initialised but unloaded journal struct, poke about in the
- * on-disk structure to update it to the most recent supported version.
- */
-int journal_update_format (journal_t *journal)
-{
-	journal_superblock_t *sb;
-	int err;
-
-	err = journal_get_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-
-	switch (be32_to_cpu(sb->s_header.h_blocktype)) {
-	case JFS_SUPERBLOCK_V2:
-		return 0;
-	case JFS_SUPERBLOCK_V1:
-		return journal_convert_superblock_v1(journal, sb);
-	default:
-		break;
-	}
-	return -EINVAL;
-}
-
-static int journal_convert_superblock_v1(journal_t *journal,
-					 journal_superblock_t *sb)
-{
-	int offset, blocksize;
-	struct buffer_head *bh;
-
-	printk(KERN_WARNING
-		"JBD: Converting superblock from version 1 to 2.\n");
-
-	/* Pre-initialise new fields to zero */
-	offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb);
-	blocksize = be32_to_cpu(sb->s_blocksize);
-	memset(&sb->s_feature_compat, 0, blocksize-offset);
-
-	sb->s_nr_users = cpu_to_be32(1);
-	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
-	journal->j_format_version = 2;
-
-	bh = journal->j_sb_buffer;
-	BUFFER_TRACE(bh, "marking dirty");
-	mark_buffer_dirty(bh);
-	sync_dirty_buffer(bh);
-	return 0;
-}
-
-
-/**
- * int journal_flush () - Flush journal
- * @journal: Journal to act on.
- *
- * Flush all data for a given journal to disk and empty the journal.
- * Filesystems can use this when remounting readonly to ensure that
- * recovery does not need to happen on remount.
- */
-
-int journal_flush(journal_t *journal)
-{
-	int err = 0;
-	transaction_t *transaction = NULL;
-
-	spin_lock(&journal->j_state_lock);
-
-	/* Force everything buffered to the log... */
-	if (journal->j_running_transaction) {
-		transaction = journal->j_running_transaction;
-		__log_start_commit(journal, transaction->t_tid);
-	} else if (journal->j_committing_transaction)
-		transaction = journal->j_committing_transaction;
-
-	/* Wait for the log commit to complete... */
-	if (transaction) {
-		tid_t tid = transaction->t_tid;
-
-		spin_unlock(&journal->j_state_lock);
-		log_wait_commit(journal, tid);
-	} else {
-		spin_unlock(&journal->j_state_lock);
-	}
-
-	/* ...and flush everything in the log out to disk. */
-	spin_lock(&journal->j_list_lock);
-	while (!err && journal->j_checkpoint_transactions != NULL) {
-		spin_unlock(&journal->j_list_lock);
-		mutex_lock(&journal->j_checkpoint_mutex);
-		err = log_do_checkpoint(journal);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-		spin_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	if (is_journal_aborted(journal))
-		return -EIO;
-
-	mutex_lock(&journal->j_checkpoint_mutex);
-	cleanup_journal_tail(journal);
-
-	/* Finally, mark the journal as really needing no recovery.
-	 * This sets s_start==0 in the underlying superblock, which is
-	 * the magic code for a fully-recovered superblock.  Any future
-	 * commits of data to the journal will restore the current
-	 * s_start value. */
-	mark_journal_empty(journal);
-	mutex_unlock(&journal->j_checkpoint_mutex);
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(!journal->j_running_transaction);
-	J_ASSERT(!journal->j_committing_transaction);
-	J_ASSERT(!journal->j_checkpoint_transactions);
-	J_ASSERT(journal->j_head == journal->j_tail);
-	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
-	spin_unlock(&journal->j_state_lock);
-	return 0;
-}
-
-/**
- * int journal_wipe() - Wipe journal contents
- * @journal: Journal to act on.
- * @write: flag (see below)
- *
- * Wipe out all of the contents of a journal, safely.  This will produce
- * a warning if the journal contains any valid recovery information.
- * Must be called between journal_init_*() and journal_load().
- *
- * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
- * we merely suppress recovery.
- */
-
-int journal_wipe(journal_t *journal, int write)
-{
-	int err = 0;
-
-	J_ASSERT (!(journal->j_flags & JFS_LOADED));
-
-	err = load_superblock(journal);
-	if (err)
-		return err;
-
-	if (!journal->j_tail)
-		goto no_recovery;
-
-	printk (KERN_WARNING "JBD: %s recovery information on journal\n",
-		write ? "Clearing" : "Ignoring");
-
-	err = journal_skip_recovery(journal);
-	if (write) {
-		/* Lock to make assertions happy... */
-		mutex_lock(&journal->j_checkpoint_mutex);
-		mark_journal_empty(journal);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-
- no_recovery:
-	return err;
-}
-
-/*
- * journal_dev_name: format a character string to describe on what
- * device this journal is present.
- */
-
-static const char *journal_dev_name(journal_t *journal, char *buffer)
-{
-	struct block_device *bdev;
-
-	if (journal->j_inode)
-		bdev = journal->j_inode->i_sb->s_bdev;
-	else
-		bdev = journal->j_dev;
-
-	return bdevname(bdev, buffer);
-}
-
-/*
- * Journal abort has very specific semantics, which we describe
- * for journal abort.
- *
- * Two internal function, which provide abort to te jbd layer
- * itself are here.
- */
-
-/*
- * Quick version for internal journal use (doesn't lock the journal).
- * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
- * and don't attempt to make any other journal updates.
- */
-static void __journal_abort_hard(journal_t *journal)
-{
-	transaction_t *transaction;
-	char b[BDEVNAME_SIZE];
-
-	if (journal->j_flags & JFS_ABORT)
-		return;
-
-	printk(KERN_ERR "Aborting journal on device %s.\n",
-		journal_dev_name(journal, b));
-
-	spin_lock(&journal->j_state_lock);
-	journal->j_flags |= JFS_ABORT;
-	transaction = journal->j_running_transaction;
-	if (transaction)
-		__log_start_commit(journal, transaction->t_tid);
-	spin_unlock(&journal->j_state_lock);
-}
-
-/* Soft abort: record the abort error status in the journal superblock,
- * but don't do any other IO. */
-static void __journal_abort_soft (journal_t *journal, int errno)
-{
-	if (journal->j_flags & JFS_ABORT)
-		return;
-
-	if (!journal->j_errno)
-		journal->j_errno = errno;
-
-	__journal_abort_hard(journal);
-
-	if (errno)
-		journal_update_sb_errno(journal);
-}
-
-/**
- * void journal_abort () - Shutdown the journal immediately.
- * @journal: the journal to shutdown.
- * @errno:   an error number to record in the journal indicating
- *           the reason for the shutdown.
- *
- * Perform a complete, immediate shutdown of the ENTIRE
- * journal (not of a single transaction).  This operation cannot be
- * undone without closing and reopening the journal.
- *
- * The journal_abort function is intended to support higher level error
- * recovery mechanisms such as the ext2/ext3 remount-readonly error
- * mode.
- *
- * Journal abort has very specific semantics.  Any existing dirty,
- * unjournaled buffers in the main filesystem will still be written to
- * disk by bdflush, but the journaling mechanism will be suspended
- * immediately and no further transaction commits will be honoured.
- *
- * Any dirty, journaled buffers will be written back to disk without
- * hitting the journal.  Atomicity cannot be guaranteed on an aborted
- * filesystem, but we _do_ attempt to leave as much data as possible
- * behind for fsck to use for cleanup.
- *
- * Any attempt to get a new transaction handle on a journal which is in
- * ABORT state will just result in an -EROFS error return.  A
- * journal_stop on an existing handle will return -EIO if we have
- * entered abort state during the update.
- *
- * Recursive transactions are not disturbed by journal abort until the
- * final journal_stop, which will receive the -EIO error.
- *
- * Finally, the journal_abort call allows the caller to supply an errno
- * which will be recorded (if possible) in the journal superblock.  This
- * allows a client to record failure conditions in the middle of a
- * transaction without having to complete the transaction to record the
- * failure to disk.  ext3_error, for example, now uses this
- * functionality.
- *
- * Errors which originate from within the journaling layer will NOT
- * supply an errno; a null errno implies that absolutely no further
- * writes are done to the journal (unless there are any already in
- * progress).
- *
- */
-
-void journal_abort(journal_t *journal, int errno)
-{
-	__journal_abort_soft(journal, errno);
-}
-
-/**
- * int journal_errno () - returns the journal's error state.
- * @journal: journal to examine.
- *
- * This is the errno numbet set with journal_abort(), the last
- * time the journal was mounted - if the journal was stopped
- * without calling abort this will be 0.
- *
- * If the journal has been aborted on this mount time -EROFS will
- * be returned.
- */
-int journal_errno(journal_t *journal)
-{
-	int err;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_flags & JFS_ABORT)
-		err = -EROFS;
-	else
-		err = journal->j_errno;
-	spin_unlock(&journal->j_state_lock);
-	return err;
-}
-
-/**
- * int journal_clear_err () - clears the journal's error state
- * @journal: journal to act on.
- *
- * An error must be cleared or Acked to take a FS out of readonly
- * mode.
- */
-int journal_clear_err(journal_t *journal)
-{
-	int err = 0;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_flags & JFS_ABORT)
-		err = -EROFS;
-	else
-		journal->j_errno = 0;
-	spin_unlock(&journal->j_state_lock);
-	return err;
-}
-
-/**
- * void journal_ack_err() - Ack journal err.
- * @journal: journal to act on.
- *
- * An error must be cleared or Acked to take a FS out of readonly
- * mode.
- */
-void journal_ack_err(journal_t *journal)
-{
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_errno)
-		journal->j_flags |= JFS_ACK_ERR;
-	spin_unlock(&journal->j_state_lock);
-}
-
-int journal_blocks_per_page(struct inode *inode)
-{
-	return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-}
-
-/*
- * Journal_head storage management
- */
-static struct kmem_cache *journal_head_cache;
-#ifdef CONFIG_JBD_DEBUG
-static atomic_t nr_journal_heads = ATOMIC_INIT(0);
-#endif
-
-static int journal_init_journal_head_cache(void)
-{
-	int retval;
-
-	J_ASSERT(journal_head_cache == NULL);
-	journal_head_cache = kmem_cache_create("journal_head",
-				sizeof(struct journal_head),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
-	retval = 0;
-	if (!journal_head_cache) {
-		retval = -ENOMEM;
-		printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
-	}
-	return retval;
-}
-
-static void journal_destroy_journal_head_cache(void)
-{
-	if (journal_head_cache) {
-		kmem_cache_destroy(journal_head_cache);
-		journal_head_cache = NULL;
-	}
-}
-
-/*
- * journal_head splicing and dicing
- */
-static struct journal_head *journal_alloc_journal_head(void)
-{
-	struct journal_head *ret;
-
-#ifdef CONFIG_JBD_DEBUG
-	atomic_inc(&nr_journal_heads);
-#endif
-	ret = kmem_cache_zalloc(journal_head_cache, GFP_NOFS);
-	if (ret == NULL) {
-		jbd_debug(1, "out of memory for journal_head\n");
-		printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-				   __func__);
-
-		while (ret == NULL) {
-			yield();
-			ret = kmem_cache_zalloc(journal_head_cache, GFP_NOFS);
-		}
-	}
-	return ret;
-}
-
-static void journal_free_journal_head(struct journal_head *jh)
-{
-#ifdef CONFIG_JBD_DEBUG
-	atomic_dec(&nr_journal_heads);
-	memset(jh, JBD_POISON_FREE, sizeof(*jh));
-#endif
-	kmem_cache_free(journal_head_cache, jh);
-}
-
-/*
- * A journal_head is attached to a buffer_head whenever JBD has an
- * interest in the buffer.
- *
- * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
- * is set.  This bit is tested in core kernel code where we need to take
- * JBD-specific actions.  Testing the zeroness of ->b_private is not reliable
- * there.
- *
- * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
- *
- * When a buffer has its BH_JBD bit set it is immune from being released by
- * core kernel code, mainly via ->b_count.
- *
- * A journal_head is detached from its buffer_head when the journal_head's
- * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
- * transaction (b_cp_transaction) hold their references to b_jcount.
- *
- * Various places in the kernel want to attach a journal_head to a buffer_head
- * _before_ attaching the journal_head to a transaction.  To protect the
- * journal_head in this situation, journal_add_journal_head elevates the
- * journal_head's b_jcount refcount by one.  The caller must call
- * journal_put_journal_head() to undo this.
- *
- * So the typical usage would be:
- *
- *	(Attach a journal_head if needed.  Increments b_jcount)
- *	struct journal_head *jh = journal_add_journal_head(bh);
- *	...
- *      (Get another reference for transaction)
- *      journal_grab_journal_head(bh);
- *      jh->b_transaction = xxx;
- *      (Put original reference)
- *      journal_put_journal_head(jh);
- */
-
-/*
- * Give a buffer_head a journal_head.
- *
- * May sleep.
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh;
-	struct journal_head *new_jh = NULL;
-
-repeat:
-	if (!buffer_jbd(bh))
-		new_jh = journal_alloc_journal_head();
-
-	jbd_lock_bh_journal_head(bh);
-	if (buffer_jbd(bh)) {
-		jh = bh2jh(bh);
-	} else {
-		J_ASSERT_BH(bh,
-			(atomic_read(&bh->b_count) > 0) ||
-			(bh->b_page && bh->b_page->mapping));
-
-		if (!new_jh) {
-			jbd_unlock_bh_journal_head(bh);
-			goto repeat;
-		}
-
-		jh = new_jh;
-		new_jh = NULL;		/* We consumed it */
-		set_buffer_jbd(bh);
-		bh->b_private = jh;
-		jh->b_bh = bh;
-		get_bh(bh);
-		BUFFER_TRACE(bh, "added journal_head");
-	}
-	jh->b_jcount++;
-	jbd_unlock_bh_journal_head(bh);
-	if (new_jh)
-		journal_free_journal_head(new_jh);
-	return bh->b_private;
-}
-
-/*
- * Grab a ref against this buffer_head's journal_head.  If it ended up not
- * having a journal_head, return NULL
- */
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh = NULL;
-
-	jbd_lock_bh_journal_head(bh);
-	if (buffer_jbd(bh)) {
-		jh = bh2jh(bh);
-		jh->b_jcount++;
-	}
-	jbd_unlock_bh_journal_head(bh);
-	return jh;
-}
-
-static void __journal_remove_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh = bh2jh(bh);
-
-	J_ASSERT_JH(jh, jh->b_jcount >= 0);
-	J_ASSERT_JH(jh, jh->b_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
-	J_ASSERT_BH(bh, buffer_jbd(bh));
-	J_ASSERT_BH(bh, jh2bh(jh) == bh);
-	BUFFER_TRACE(bh, "remove journal_head");
-	if (jh->b_frozen_data) {
-		printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
-		jbd_free(jh->b_frozen_data, bh->b_size);
-	}
-	if (jh->b_committed_data) {
-		printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
-		jbd_free(jh->b_committed_data, bh->b_size);
-	}
-	bh->b_private = NULL;
-	jh->b_bh = NULL;	/* debug, really */
-	clear_buffer_jbd(bh);
-	journal_free_journal_head(jh);
-}
-
-/*
- * Drop a reference on the passed journal_head.  If it fell to zero then
- * release the journal_head from the buffer_head.
- */
-void journal_put_journal_head(struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	jbd_lock_bh_journal_head(bh);
-	J_ASSERT_JH(jh, jh->b_jcount > 0);
-	--jh->b_jcount;
-	if (!jh->b_jcount) {
-		__journal_remove_journal_head(bh);
-		jbd_unlock_bh_journal_head(bh);
-		__brelse(bh);
-	} else
-		jbd_unlock_bh_journal_head(bh);
-}
-
-/*
- * debugfs tunables
- */
-#ifdef CONFIG_JBD_DEBUG
-
-u8 journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(journal_enable_debug);
-
-static struct dentry *jbd_debugfs_dir;
-static struct dentry *jbd_debug;
-
-static void __init jbd_create_debugfs_entry(void)
-{
-	jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
-	if (jbd_debugfs_dir)
-		jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
-					       jbd_debugfs_dir,
-					       &journal_enable_debug);
-}
-
-static void __exit jbd_remove_debugfs_entry(void)
-{
-	debugfs_remove(jbd_debug);
-	debugfs_remove(jbd_debugfs_dir);
-}
-
-#else
-
-static inline void jbd_create_debugfs_entry(void)
-{
-}
-
-static inline void jbd_remove_debugfs_entry(void)
-{
-}
-
-#endif
-
-struct kmem_cache *jbd_handle_cache;
-
-static int __init journal_init_handle_cache(void)
-{
-	jbd_handle_cache = kmem_cache_create("journal_handle",
-				sizeof(handle_t),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
-	if (jbd_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD: failed to create handle cache\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static void journal_destroy_handle_cache(void)
-{
-	if (jbd_handle_cache)
-		kmem_cache_destroy(jbd_handle_cache);
-}
-
-/*
- * Module startup and shutdown
- */
-
-static int __init journal_init_caches(void)
-{
-	int ret;
-
-	ret = journal_init_revoke_caches();
-	if (ret == 0)
-		ret = journal_init_journal_head_cache();
-	if (ret == 0)
-		ret = journal_init_handle_cache();
-	return ret;
-}
-
-static void journal_destroy_caches(void)
-{
-	journal_destroy_revoke_caches();
-	journal_destroy_journal_head_cache();
-	journal_destroy_handle_cache();
-}
-
-static int __init journal_init(void)
-{
-	int ret;
-
-	BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
-
-	ret = journal_init_caches();
-	if (ret != 0)
-		journal_destroy_caches();
-	jbd_create_debugfs_entry();
-	return ret;
-}
-
-static void __exit journal_exit(void)
-{
-#ifdef CONFIG_JBD_DEBUG
-	int n = atomic_read(&nr_journal_heads);
-	if (n)
-		printk(KERN_ERR "JBD: leaked %d journal_heads!\n", n);
-#endif
-	jbd_remove_debugfs_entry();
-	journal_destroy_caches();
-}
-
-MODULE_LICENSE("GPL");
-module_init(journal_init);
-module_exit(journal_exit);
-
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
deleted file mode 100644
index a748fe2..0000000
--- a/fs/jbd/recovery.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * linux/fs/jbd/recovery.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal recovery routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- */
-
-#ifndef __KERNEL__
-#include "jfs_user.h"
-#else
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/blkdev.h>
-#endif
-
-/*
- * Maintain information about the progress of the recovery job, so that
- * the different passes can carry information between them.
- */
-struct recovery_info
-{
-	tid_t		start_transaction;
-	tid_t		end_transaction;
-
-	int		nr_replays;
-	int		nr_revokes;
-	int		nr_revoke_hits;
-};
-
-enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
-static int do_one_pass(journal_t *journal,
-				struct recovery_info *info, enum passtype pass);
-static int scan_revoke_records(journal_t *, struct buffer_head *,
-				tid_t, struct recovery_info *);
-
-#ifdef __KERNEL__
-
-/* Release readahead buffers after use */
-static void journal_brelse_array(struct buffer_head *b[], int n)
-{
-	while (--n >= 0)
-		brelse (b[n]);
-}
-
-
-/*
- * When reading from the journal, we are going through the block device
- * layer directly and so there is no readahead being done for us.  We
- * need to implement any readahead ourselves if we want it to happen at
- * all.  Recovery is basically one long sequential read, so make sure we
- * do the IO in reasonably large chunks.
- *
- * This is not so critical that we need to be enormously clever about
- * the readahead size, though.  128K is a purely arbitrary, good-enough
- * fixed value.
- */
-
-#define MAXBUF 8
-static int do_readahead(journal_t *journal, unsigned int start)
-{
-	int err;
-	unsigned int max, nbufs, next;
-	unsigned int blocknr;
-	struct buffer_head *bh;
-
-	struct buffer_head * bufs[MAXBUF];
-
-	/* Do up to 128K of readahead */
-	max = start + (128 * 1024 / journal->j_blocksize);
-	if (max > journal->j_maxlen)
-		max = journal->j_maxlen;
-
-	/* Do the readahead itself.  We'll submit MAXBUF buffer_heads at
-	 * a time to the block device IO layer. */
-
-	nbufs = 0;
-
-	for (next = start; next < max; next++) {
-		err = journal_bmap(journal, next, &blocknr);
-
-		if (err) {
-			printk (KERN_ERR "JBD: bad block at offset %u\n",
-				next);
-			goto failed;
-		}
-
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		if (!bh) {
-			err = -ENOMEM;
-			goto failed;
-		}
-
-		if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
-			bufs[nbufs++] = bh;
-			if (nbufs == MAXBUF) {
-				ll_rw_block(READ, nbufs, bufs);
-				journal_brelse_array(bufs, nbufs);
-				nbufs = 0;
-			}
-		} else
-			brelse(bh);
-	}
-
-	if (nbufs)
-		ll_rw_block(READ, nbufs, bufs);
-	err = 0;
-
-failed:
-	if (nbufs)
-		journal_brelse_array(bufs, nbufs);
-	return err;
-}
-
-#endif /* __KERNEL__ */
-
-
-/*
- * Read a block from the journal
- */
-
-static int jread(struct buffer_head **bhp, journal_t *journal,
-		 unsigned int offset)
-{
-	int err;
-	unsigned int blocknr;
-	struct buffer_head *bh;
-
-	*bhp = NULL;
-
-	if (offset >= journal->j_maxlen) {
-		printk(KERN_ERR "JBD: corrupted journal superblock\n");
-		return -EIO;
-	}
-
-	err = journal_bmap(journal, offset, &blocknr);
-
-	if (err) {
-		printk (KERN_ERR "JBD: bad block at offset %u\n",
-			offset);
-		return err;
-	}
-
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh)
-		return -ENOMEM;
-
-	if (!buffer_uptodate(bh)) {
-		/* If this is a brand new buffer, start readahead.
-                   Otherwise, we assume we are already reading it.  */
-		if (!buffer_req(bh))
-			do_readahead(journal, offset);
-		wait_on_buffer(bh);
-	}
-
-	if (!buffer_uptodate(bh)) {
-		printk (KERN_ERR "JBD: Failed to read block at offset %u\n",
-			offset);
-		brelse(bh);
-		return -EIO;
-	}
-
-	*bhp = bh;
-	return 0;
-}
-
-
-/*
- * Count the number of in-use tags in a journal descriptor block.
- */
-
-static int count_tags(struct buffer_head *bh, int size)
-{
-	char *			tagp;
-	journal_block_tag_t *	tag;
-	int			nr = 0;
-
-	tagp = &bh->b_data[sizeof(journal_header_t)];
-
-	while ((tagp - bh->b_data + sizeof(journal_block_tag_t)) <= size) {
-		tag = (journal_block_tag_t *) tagp;
-
-		nr++;
-		tagp += sizeof(journal_block_tag_t);
-		if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
-			tagp += 16;
-
-		if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
-			break;
-	}
-
-	return nr;
-}
-
-
-/* Make sure we wrap around the log correctly! */
-#define wrap(journal, var)						\
-do {									\
-	if (var >= (journal)->j_last)					\
-		var -= ((journal)->j_last - (journal)->j_first);	\
-} while (0)
-
-/**
- * journal_recover - recovers a on-disk journal
- * @journal: the journal to recover
- *
- * The primary function for recovering the log contents when mounting a
- * journaled device.
- *
- * Recovery is done in three passes.  In the first pass, we look for the
- * end of the log.  In the second, we assemble the list of revoke
- * blocks.  In the third and final pass, we replay any un-revoked blocks
- * in the log.
- */
-int journal_recover(journal_t *journal)
-{
-	int			err, err2;
-	journal_superblock_t *	sb;
-
-	struct recovery_info	info;
-
-	memset(&info, 0, sizeof(info));
-	sb = journal->j_superblock;
-
-	/*
-	 * The journal superblock's s_start field (the current log head)
-	 * is always zero if, and only if, the journal was cleanly
-	 * unmounted.
-	 */
-
-	if (!sb->s_start) {
-		jbd_debug(1, "No recovery required, last transaction %d\n",
-			  be32_to_cpu(sb->s_sequence));
-		journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
-		return 0;
-	}
-
-	err = do_one_pass(journal, &info, PASS_SCAN);
-	if (!err)
-		err = do_one_pass(journal, &info, PASS_REVOKE);
-	if (!err)
-		err = do_one_pass(journal, &info, PASS_REPLAY);
-
-	jbd_debug(1, "JBD: recovery, exit status %d, "
-		  "recovered transactions %u to %u\n",
-		  err, info.start_transaction, info.end_transaction);
-	jbd_debug(1, "JBD: Replayed %d and revoked %d/%d blocks\n",
-		  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
-
-	/* Restart the log at the next transaction ID, thus invalidating
-	 * any existing commit records in the log. */
-	journal->j_transaction_sequence = ++info.end_transaction;
-
-	journal_clear_revoke(journal);
-	err2 = sync_blockdev(journal->j_fs_dev);
-	if (!err)
-		err = err2;
-	/* Flush disk caches to get replayed data on the permanent storage */
-	if (journal->j_flags & JFS_BARRIER) {
-		err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
-		if (!err)
-			err = err2;
-	}
-
-	return err;
-}
-
-/**
- * journal_skip_recovery - Start journal and wipe exiting records
- * @journal: journal to startup
- *
- * Locate any valid recovery information from the journal and set up the
- * journal structures in memory to ignore it (presumably because the
- * caller has evidence that it is out of date).
- * This function does'nt appear to be exorted..
- *
- * We perform one pass over the journal to allow us to tell the user how
- * much recovery information is being erased, and to let us initialise
- * the journal transaction sequence numbers to the next unused ID.
- */
-int journal_skip_recovery(journal_t *journal)
-{
-	int			err;
-	struct recovery_info	info;
-
-	memset (&info, 0, sizeof(info));
-
-	err = do_one_pass(journal, &info, PASS_SCAN);
-
-	if (err) {
-		printk(KERN_ERR "JBD: error %d scanning journal\n", err);
-		++journal->j_transaction_sequence;
-	} else {
-#ifdef CONFIG_JBD_DEBUG
-		int dropped = info.end_transaction -
-			      be32_to_cpu(journal->j_superblock->s_sequence);
-		jbd_debug(1,
-			  "JBD: ignoring %d transaction%s from the journal.\n",
-			  dropped, (dropped == 1) ? "" : "s");
-#endif
-		journal->j_transaction_sequence = ++info.end_transaction;
-	}
-
-	journal->j_tail = 0;
-	return err;
-}
-
-static int do_one_pass(journal_t *journal,
-			struct recovery_info *info, enum passtype pass)
-{
-	unsigned int		first_commit_ID, next_commit_ID;
-	unsigned int		next_log_block;
-	int			err, success = 0;
-	journal_superblock_t *	sb;
-	journal_header_t *	tmp;
-	struct buffer_head *	bh;
-	unsigned int		sequence;
-	int			blocktype;
-
-	/*
-	 * First thing is to establish what we expect to find in the log
-	 * (in terms of transaction IDs), and where (in terms of log
-	 * block offsets): query the superblock.
-	 */
-
-	sb = journal->j_superblock;
-	next_commit_ID = be32_to_cpu(sb->s_sequence);
-	next_log_block = be32_to_cpu(sb->s_start);
-
-	first_commit_ID = next_commit_ID;
-	if (pass == PASS_SCAN)
-		info->start_transaction = first_commit_ID;
-
-	jbd_debug(1, "Starting recovery pass %d\n", pass);
-
-	/*
-	 * Now we walk through the log, transaction by transaction,
-	 * making sure that each transaction has a commit block in the
-	 * expected place.  Each complete transaction gets replayed back
-	 * into the main filesystem.
-	 */
-
-	while (1) {
-		int			flags;
-		char *			tagp;
-		journal_block_tag_t *	tag;
-		struct buffer_head *	obh;
-		struct buffer_head *	nbh;
-
-		cond_resched();
-
-		/* If we already know where to stop the log traversal,
-		 * check right now that we haven't gone past the end of
-		 * the log. */
-
-		if (pass != PASS_SCAN)
-			if (tid_geq(next_commit_ID, info->end_transaction))
-				break;
-
-		jbd_debug(2, "Scanning for sequence ID %u at %u/%u\n",
-			  next_commit_ID, next_log_block, journal->j_last);
-
-		/* Skip over each chunk of the transaction looking
-		 * either the next descriptor block or the final commit
-		 * record. */
-
-		jbd_debug(3, "JBD: checking block %u\n", next_log_block);
-		err = jread(&bh, journal, next_log_block);
-		if (err)
-			goto failed;
-
-		next_log_block++;
-		wrap(journal, next_log_block);
-
-		/* What kind of buffer is it?
-		 *
-		 * If it is a descriptor block, check that it has the
-		 * expected sequence number.  Otherwise, we're all done
-		 * here. */
-
-		tmp = (journal_header_t *)bh->b_data;
-
-		if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
-			brelse(bh);
-			break;
-		}
-
-		blocktype = be32_to_cpu(tmp->h_blocktype);
-		sequence = be32_to_cpu(tmp->h_sequence);
-		jbd_debug(3, "Found magic %d, sequence %d\n",
-			  blocktype, sequence);
-
-		if (sequence != next_commit_ID) {
-			brelse(bh);
-			break;
-		}
-
-		/* OK, we have a valid descriptor block which matches
-		 * all of the sequence number checks.  What are we going
-		 * to do with it?  That depends on the pass... */
-
-		switch(blocktype) {
-		case JFS_DESCRIPTOR_BLOCK:
-			/* If it is a valid descriptor block, replay it
-			 * in pass REPLAY; otherwise, just skip over the
-			 * blocks it describes. */
-			if (pass != PASS_REPLAY) {
-				next_log_block +=
-					count_tags(bh, journal->j_blocksize);
-				wrap(journal, next_log_block);
-				brelse(bh);
-				continue;
-			}
-
-			/* A descriptor block: we can now write all of
-			 * the data blocks.  Yay, useful work is finally
-			 * getting done here! */
-
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			while ((tagp - bh->b_data +sizeof(journal_block_tag_t))
-			       <= journal->j_blocksize) {
-				unsigned int io_block;
-
-				tag = (journal_block_tag_t *) tagp;
-				flags = be32_to_cpu(tag->t_flags);
-
-				io_block = next_log_block++;
-				wrap(journal, next_log_block);
-				err = jread(&obh, journal, io_block);
-				if (err) {
-					/* Recover what we can, but
-					 * report failure at the end. */
-					success = err;
-					printk (KERN_ERR
-						"JBD: IO error %d recovering "
-						"block %u in log\n",
-						err, io_block);
-				} else {
-					unsigned int blocknr;
-
-					J_ASSERT(obh != NULL);
-					blocknr = be32_to_cpu(tag->t_blocknr);
-
-					/* If the block has been
-					 * revoked, then we're all done
-					 * here. */
-					if (journal_test_revoke
-					    (journal, blocknr,
-					     next_commit_ID)) {
-						brelse(obh);
-						++info->nr_revoke_hits;
-						goto skip_write;
-					}
-
-					/* Find a buffer for the new
-					 * data being restored */
-					nbh = __getblk(journal->j_fs_dev,
-							blocknr,
-							journal->j_blocksize);
-					if (nbh == NULL) {
-						printk(KERN_ERR
-						       "JBD: Out of memory "
-						       "during recovery.\n");
-						err = -ENOMEM;
-						brelse(bh);
-						brelse(obh);
-						goto failed;
-					}
-
-					lock_buffer(nbh);
-					memcpy(nbh->b_data, obh->b_data,
-							journal->j_blocksize);
-					if (flags & JFS_FLAG_ESCAPE) {
-						*((__be32 *)nbh->b_data) =
-						cpu_to_be32(JFS_MAGIC_NUMBER);
-					}
-
-					BUFFER_TRACE(nbh, "marking dirty");
-					set_buffer_uptodate(nbh);
-					mark_buffer_dirty(nbh);
-					BUFFER_TRACE(nbh, "marking uptodate");
-					++info->nr_replays;
-					/* ll_rw_block(WRITE, 1, &nbh); */
-					unlock_buffer(nbh);
-					brelse(obh);
-					brelse(nbh);
-				}
-
-			skip_write:
-				tagp += sizeof(journal_block_tag_t);
-				if (!(flags & JFS_FLAG_SAME_UUID))
-					tagp += 16;
-
-				if (flags & JFS_FLAG_LAST_TAG)
-					break;
-			}
-
-			brelse(bh);
-			continue;
-
-		case JFS_COMMIT_BLOCK:
-			/* Found an expected commit block: not much to
-			 * do other than move on to the next sequence
-			 * number. */
-			brelse(bh);
-			next_commit_ID++;
-			continue;
-
-		case JFS_REVOKE_BLOCK:
-			/* If we aren't in the REVOKE pass, then we can
-			 * just skip over this block. */
-			if (pass != PASS_REVOKE) {
-				brelse(bh);
-				continue;
-			}
-
-			err = scan_revoke_records(journal, bh,
-						  next_commit_ID, info);
-			brelse(bh);
-			if (err)
-				goto failed;
-			continue;
-
-		default:
-			jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
-				  blocktype);
-			brelse(bh);
-			goto done;
-		}
-	}
-
- done:
-	/*
-	 * We broke out of the log scan loop: either we came to the
-	 * known end of the log or we found an unexpected block in the
-	 * log.  If the latter happened, then we know that the "current"
-	 * transaction marks the end of the valid log.
-	 */
-
-	if (pass == PASS_SCAN)
-		info->end_transaction = next_commit_ID;
-	else {
-		/* It's really bad news if different passes end up at
-		 * different places (but possible due to IO errors). */
-		if (info->end_transaction != next_commit_ID) {
-			printk (KERN_ERR "JBD: recovery pass %d ended at "
-				"transaction %u, expected %u\n",
-				pass, next_commit_ID, info->end_transaction);
-			if (!success)
-				success = -EIO;
-		}
-	}
-
-	return success;
-
- failed:
-	return err;
-}
-
-
-/* Scan a revoke record, marking all blocks mentioned as revoked. */
-
-static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
-			       tid_t sequence, struct recovery_info *info)
-{
-	journal_revoke_header_t *header;
-	int offset, max;
-
-	header = (journal_revoke_header_t *) bh->b_data;
-	offset = sizeof(journal_revoke_header_t);
-	max = be32_to_cpu(header->r_count);
-
-	while (offset < max) {
-		unsigned int blocknr;
-		int err;
-
-		blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
-		offset += 4;
-		err = journal_set_revoke(journal, blocknr, sequence);
-		if (err)
-			return err;
-		++info->nr_revokes;
-	}
-	return 0;
-}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
deleted file mode 100644
index dcead63..0000000
--- a/fs/jbd/revoke.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * linux/fs/jbd/revoke.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
- *
- * Copyright 2000 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal revoke routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- *
- * Revoke is the mechanism used to prevent old log records for deleted
- * metadata from being replayed on top of newer data using the same
- * blocks.  The revoke mechanism is used in two separate places:
- *
- * + Commit: during commit we write the entire list of the current
- *   transaction's revoked blocks to the journal
- *
- * + Recovery: during recovery we record the transaction ID of all
- *   revoked blocks.  If there are multiple revoke records in the log
- *   for a single block, only the last one counts, and if there is a log
- *   entry for a block beyond the last revoke, then that log entry still
- *   gets replayed.
- *
- * We can get interactions between revokes and new log data within a
- * single transaction:
- *
- * Block is revoked and then journaled:
- *   The desired end result is the journaling of the new block, so we
- *   cancel the revoke before the transaction commits.
- *
- * Block is journaled and then revoked:
- *   The revoke must take precedence over the write of the block, so we
- *   need either to cancel the journal entry or to write the revoke
- *   later in the log than the log block.  In this case, we choose the
- *   latter: journaling a block cancels any revoke record for that block
- *   in the current transaction, so any revoke for that block in the
- *   transaction must have happened after the block was journaled and so
- *   the revoke must take precedence.
- *
- * Block is revoked and then written as data:
- *   The data write is allowed to succeed, but the revoke is _not_
- *   cancelled.  We still need to prevent old log records from
- *   overwriting the new data.  We don't even need to clear the revoke
- *   bit here.
- *
- * We cache revoke status of a buffer in the current transaction in b_states
- * bits.  As the name says, revokevalid flag indicates that the cached revoke
- * status of a buffer is valid and we can rely on the cached status.
- *
- * Revoke information on buffers is a tri-state value:
- *
- * RevokeValid clear:	no cached revoke status, need to look it up
- * RevokeValid set, Revoked clear:
- *			buffer has not been revoked, and cancel_revoke
- *			need do nothing.
- * RevokeValid set, Revoked set:
- *			buffer has been revoked.
- *
- * Locking rules:
- * We keep two hash tables of revoke records. One hashtable belongs to the
- * running transaction (is pointed to by journal->j_revoke), the other one
- * belongs to the committing transaction. Accesses to the second hash table
- * happen only from the kjournald and no other thread touches this table.  Also
- * journal_switch_revoke_table() which switches which hashtable belongs to the
- * running and which to the committing transaction is called only from
- * kjournald. Therefore we need no locks when accessing the hashtable belonging
- * to the committing transaction.
- *
- * All users operating on the hash table belonging to the running transaction
- * have a handle to the transaction. Therefore they are safe from kjournald
- * switching hash tables under them. For operations on the lists of entries in
- * the hash table j_revoke_lock is used.
- *
- * Finally, also replay code uses the hash tables but at this moment no one else
- * can touch them (filesystem isn't mounted yet) and hence no locking is
- * needed.
- */
-
-#ifndef __KERNEL__
-#include "jfs_user.h"
-#else
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/bio.h>
-#endif
-#include <linux/log2.h>
-#include <linux/hash.h>
-
-static struct kmem_cache *revoke_record_cache;
-static struct kmem_cache *revoke_table_cache;
-
-/* Each revoke record represents one single revoked block.  During
-   journal replay, this involves recording the transaction ID of the
-   last transaction to revoke this block. */
-
-struct jbd_revoke_record_s
-{
-	struct list_head  hash;
-	tid_t		  sequence;	/* Used for recovery only */
-	unsigned int	  blocknr;
-};
-
-
-/* The revoke table is just a simple hash table of revoke records. */
-struct jbd_revoke_table_s
-{
-	/* It is conceivable that we might want a larger hash table
-	 * for recovery.  Must be a power of two. */
-	int		  hash_size;
-	int		  hash_shift;
-	struct list_head *hash_table;
-};
-
-
-#ifdef __KERNEL__
-static void write_one_revoke_record(journal_t *, transaction_t *,
-				    struct journal_head **, int *,
-				    struct jbd_revoke_record_s *, int);
-static void flush_descriptor(journal_t *, struct journal_head *, int, int);
-#endif
-
-/* Utility functions to maintain the revoke table */
-
-static inline int hash(journal_t *journal, unsigned int block)
-{
-	struct jbd_revoke_table_s *table = journal->j_revoke;
-
-	return hash_32(block, table->hash_shift);
-}
-
-static int insert_revoke_hash(journal_t *journal, unsigned int blocknr,
-			      tid_t seq)
-{
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-
-repeat:
-	record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
-	if (!record)
-		goto oom;
-
-	record->sequence = seq;
-	record->blocknr = blocknr;
-	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
-	spin_lock(&journal->j_revoke_lock);
-	list_add(&record->hash, hash_list);
-	spin_unlock(&journal->j_revoke_lock);
-	return 0;
-
-oom:
-	if (!journal_oom_retry)
-		return -ENOMEM;
-	jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
-	yield();
-	goto repeat;
-}
-
-/* Find a revoke record in the journal's hash table. */
-
-static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
-						      unsigned int blocknr)
-{
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-
-	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
-
-	spin_lock(&journal->j_revoke_lock);
-	record = (struct jbd_revoke_record_s *) hash_list->next;
-	while (&(record->hash) != hash_list) {
-		if (record->blocknr == blocknr) {
-			spin_unlock(&journal->j_revoke_lock);
-			return record;
-		}
-		record = (struct jbd_revoke_record_s *) record->hash.next;
-	}
-	spin_unlock(&journal->j_revoke_lock);
-	return NULL;
-}
-
-void journal_destroy_revoke_caches(void)
-{
-	if (revoke_record_cache) {
-		kmem_cache_destroy(revoke_record_cache);
-		revoke_record_cache = NULL;
-	}
-	if (revoke_table_cache) {
-		kmem_cache_destroy(revoke_table_cache);
-		revoke_table_cache = NULL;
-	}
-}
-
-int __init journal_init_revoke_caches(void)
-{
-	J_ASSERT(!revoke_record_cache);
-	J_ASSERT(!revoke_table_cache);
-
-	revoke_record_cache = kmem_cache_create("revoke_record",
-					   sizeof(struct jbd_revoke_record_s),
-					   0,
-					   SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
-					   NULL);
-	if (!revoke_record_cache)
-		goto record_cache_failure;
-
-	revoke_table_cache = kmem_cache_create("revoke_table",
-					   sizeof(struct jbd_revoke_table_s),
-					   0, SLAB_TEMPORARY, NULL);
-	if (!revoke_table_cache)
-		goto table_cache_failure;
-
-	return 0;
-
-table_cache_failure:
-	journal_destroy_revoke_caches();
-record_cache_failure:
-	return -ENOMEM;
-}
-
-static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
-{
-	int i;
-	struct jbd_revoke_table_s *table;
-
-	table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
-	if (!table)
-		goto out;
-
-	table->hash_size = hash_size;
-	table->hash_shift = ilog2(hash_size);
-	table->hash_table =
-		kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
-	if (!table->hash_table) {
-		kmem_cache_free(revoke_table_cache, table);
-		table = NULL;
-		goto out;
-	}
-
-	for (i = 0; i < hash_size; i++)
-		INIT_LIST_HEAD(&table->hash_table[i]);
-
-out:
-	return table;
-}
-
-static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table)
-{
-	int i;
-	struct list_head *hash_list;
-
-	for (i = 0; i < table->hash_size; i++) {
-		hash_list = &table->hash_table[i];
-		J_ASSERT(list_empty(hash_list));
-	}
-
-	kfree(table->hash_table);
-	kmem_cache_free(revoke_table_cache, table);
-}
-
-/* Initialise the revoke table for a given journal to a given size. */
-int journal_init_revoke(journal_t *journal, int hash_size)
-{
-	J_ASSERT(journal->j_revoke_table[0] == NULL);
-	J_ASSERT(is_power_of_2(hash_size));
-
-	journal->j_revoke_table[0] = journal_init_revoke_table(hash_size);
-	if (!journal->j_revoke_table[0])
-		goto fail0;
-
-	journal->j_revoke_table[1] = journal_init_revoke_table(hash_size);
-	if (!journal->j_revoke_table[1])
-		goto fail1;
-
-	journal->j_revoke = journal->j_revoke_table[1];
-
-	spin_lock_init(&journal->j_revoke_lock);
-
-	return 0;
-
-fail1:
-	journal_destroy_revoke_table(journal->j_revoke_table[0]);
-fail0:
-	return -ENOMEM;
-}
-
-/* Destroy a journal's revoke table.  The table must already be empty! */
-void journal_destroy_revoke(journal_t *journal)
-{
-	journal->j_revoke = NULL;
-	if (journal->j_revoke_table[0])
-		journal_destroy_revoke_table(journal->j_revoke_table[0]);
-	if (journal->j_revoke_table[1])
-		journal_destroy_revoke_table(journal->j_revoke_table[1]);
-}
-
-
-#ifdef __KERNEL__
-
-/*
- * journal_revoke: revoke a given buffer_head from the journal.  This
- * prevents the block from being replayed during recovery if we take a
- * crash after this current transaction commits.  Any subsequent
- * metadata writes of the buffer in this transaction cancel the
- * revoke.
- *
- * Note that this call may block --- it is up to the caller to make
- * sure that there are no further calls to journal_write_metadata
- * before the revoke is complete.  In ext3, this implies calling the
- * revoke before clearing the block bitmap when we are deleting
- * metadata.
- *
- * Revoke performs a journal_forget on any buffer_head passed in as a
- * parameter, but does _not_ forget the buffer_head if the bh was only
- * found implicitly.
- *
- * bh_in may not be a journalled buffer - it may have come off
- * the hash tables without an attached journal_head.
- *
- * If bh_in is non-zero, journal_revoke() will decrement its b_count
- * by one.
- */
-
-int journal_revoke(handle_t *handle, unsigned int blocknr,
-		   struct buffer_head *bh_in)
-{
-	struct buffer_head *bh = NULL;
-	journal_t *journal;
-	struct block_device *bdev;
-	int err;
-
-	might_sleep();
-	if (bh_in)
-		BUFFER_TRACE(bh_in, "enter");
-
-	journal = handle->h_transaction->t_journal;
-	if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
-		J_ASSERT (!"Cannot set revoke feature!");
-		return -EINVAL;
-	}
-
-	bdev = journal->j_fs_dev;
-	bh = bh_in;
-
-	if (!bh) {
-		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
-		if (bh)
-			BUFFER_TRACE(bh, "found on hash");
-	}
-#ifdef JBD_EXPENSIVE_CHECKING
-	else {
-		struct buffer_head *bh2;
-
-		/* If there is a different buffer_head lying around in
-		 * memory anywhere... */
-		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
-		if (bh2) {
-			/* ... and it has RevokeValid status... */
-			if (bh2 != bh && buffer_revokevalid(bh2))
-				/* ...then it better be revoked too,
-				 * since it's illegal to create a revoke
-				 * record against a buffer_head which is
-				 * not marked revoked --- that would
-				 * risk missing a subsequent revoke
-				 * cancel. */
-				J_ASSERT_BH(bh2, buffer_revoked(bh2));
-			put_bh(bh2);
-		}
-	}
-#endif
-
-	/* We really ought not ever to revoke twice in a row without
-           first having the revoke cancelled: it's illegal to free a
-           block twice without allocating it in between! */
-	if (bh) {
-		if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
-				 "inconsistent data on disk")) {
-			if (!bh_in)
-				brelse(bh);
-			return -EIO;
-		}
-		set_buffer_revoked(bh);
-		set_buffer_revokevalid(bh);
-		if (bh_in) {
-			BUFFER_TRACE(bh_in, "call journal_forget");
-			journal_forget(handle, bh_in);
-		} else {
-			BUFFER_TRACE(bh, "call brelse");
-			__brelse(bh);
-		}
-	}
-
-	jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in);
-	err = insert_revoke_hash(journal, blocknr,
-				handle->h_transaction->t_tid);
-	BUFFER_TRACE(bh_in, "exit");
-	return err;
-}
-
-/*
- * Cancel an outstanding revoke.  For use only internally by the
- * journaling code (called from journal_get_write_access).
- *
- * We trust buffer_revoked() on the buffer if the buffer is already
- * being journaled: if there is no revoke pending on the buffer, then we
- * don't do anything here.
- *
- * This would break if it were possible for a buffer to be revoked and
- * discarded, and then reallocated within the same transaction.  In such
- * a case we would have lost the revoked bit, but when we arrived here
- * the second time we would still have a pending revoke to cancel.  So,
- * do not trust the Revoked bit on buffers unless RevokeValid is also
- * set.
- */
-int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
-{
-	struct jbd_revoke_record_s *record;
-	journal_t *journal = handle->h_transaction->t_journal;
-	int need_cancel;
-	int did_revoke = 0;	/* akpm: debug */
-	struct buffer_head *bh = jh2bh(jh);
-
-	jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
-
-	/* Is the existing Revoke bit valid?  If so, we trust it, and
-	 * only perform the full cancel if the revoke bit is set.  If
-	 * not, we can't trust the revoke bit, and we need to do the
-	 * full search for a revoke record. */
-	if (test_set_buffer_revokevalid(bh)) {
-		need_cancel = test_clear_buffer_revoked(bh);
-	} else {
-		need_cancel = 1;
-		clear_buffer_revoked(bh);
-	}
-
-	if (need_cancel) {
-		record = find_revoke_record(journal, bh->b_blocknr);
-		if (record) {
-			jbd_debug(4, "cancelled existing revoke on "
-				  "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
-			spin_lock(&journal->j_revoke_lock);
-			list_del(&record->hash);
-			spin_unlock(&journal->j_revoke_lock);
-			kmem_cache_free(revoke_record_cache, record);
-			did_revoke = 1;
-		}
-	}
-
-#ifdef JBD_EXPENSIVE_CHECKING
-	/* There better not be one left behind by now! */
-	record = find_revoke_record(journal, bh->b_blocknr);
-	J_ASSERT_JH(jh, record == NULL);
-#endif
-
-	/* Finally, have we just cleared revoke on an unhashed
-	 * buffer_head?  If so, we'd better make sure we clear the
-	 * revoked status on any hashed alias too, otherwise the revoke
-	 * state machine will get very upset later on. */
-	if (need_cancel) {
-		struct buffer_head *bh2;
-		bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
-		if (bh2) {
-			if (bh2 != bh)
-				clear_buffer_revoked(bh2);
-			__brelse(bh2);
-		}
-	}
-	return did_revoke;
-}
-
-/*
- * journal_clear_revoked_flags clears revoked flag of buffers in
- * revoke table to reflect there is no revoked buffer in the next
- * transaction which is going to be started.
- */
-void journal_clear_buffer_revoked_flags(journal_t *journal)
-{
-	struct jbd_revoke_table_s *revoke = journal->j_revoke;
-	int i = 0;
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		struct list_head *hash_list;
-		struct list_head *list_entry;
-		hash_list = &revoke->hash_table[i];
-
-		list_for_each(list_entry, hash_list) {
-			struct jbd_revoke_record_s *record;
-			struct buffer_head *bh;
-			record = (struct jbd_revoke_record_s *)list_entry;
-			bh = __find_get_block(journal->j_fs_dev,
-					      record->blocknr,
-					      journal->j_blocksize);
-			if (bh) {
-				clear_buffer_revoked(bh);
-				__brelse(bh);
-			}
-		}
-	}
-}
-
-/* journal_switch_revoke table select j_revoke for next transaction
- * we do not want to suspend any processing until all revokes are
- * written -bzzz
- */
-void journal_switch_revoke_table(journal_t *journal)
-{
-	int i;
-
-	if (journal->j_revoke == journal->j_revoke_table[0])
-		journal->j_revoke = journal->j_revoke_table[1];
-	else
-		journal->j_revoke = journal->j_revoke_table[0];
-
-	for (i = 0; i < journal->j_revoke->hash_size; i++)
-		INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
-}
-
-/*
- * Write revoke records to the journal for all entries in the current
- * revoke hash, deleting the entries as we go.
- */
-void journal_write_revoke_records(journal_t *journal,
-				  transaction_t *transaction, int write_op)
-{
-	struct journal_head *descriptor;
-	struct jbd_revoke_record_s *record;
-	struct jbd_revoke_table_s *revoke;
-	struct list_head *hash_list;
-	int i, offset, count;
-
-	descriptor = NULL;
-	offset = 0;
-	count = 0;
-
-	/* select revoke table for committing transaction */
-	revoke = journal->j_revoke == journal->j_revoke_table[0] ?
-		journal->j_revoke_table[1] : journal->j_revoke_table[0];
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		hash_list = &revoke->hash_table[i];
-
-		while (!list_empty(hash_list)) {
-			record = (struct jbd_revoke_record_s *)
-				hash_list->next;
-			write_one_revoke_record(journal, transaction,
-						&descriptor, &offset,
-						record, write_op);
-			count++;
-			list_del(&record->hash);
-			kmem_cache_free(revoke_record_cache, record);
-		}
-	}
-	if (descriptor)
-		flush_descriptor(journal, descriptor, offset, write_op);
-	jbd_debug(1, "Wrote %d revoke records\n", count);
-}
-
-/*
- * Write out one revoke record.  We need to create a new descriptor
- * block if the old one is full or if we have not already created one.
- */
-
-static void write_one_revoke_record(journal_t *journal,
-				    transaction_t *transaction,
-				    struct journal_head **descriptorp,
-				    int *offsetp,
-				    struct jbd_revoke_record_s *record,
-				    int write_op)
-{
-	struct journal_head *descriptor;
-	int offset;
-	journal_header_t *header;
-
-	/* If we are already aborting, this all becomes a noop.  We
-           still need to go round the loop in
-           journal_write_revoke_records in order to free all of the
-           revoke records: only the IO to the journal is omitted. */
-	if (is_journal_aborted(journal))
-		return;
-
-	descriptor = *descriptorp;
-	offset = *offsetp;
-
-	/* Make sure we have a descriptor with space left for the record */
-	if (descriptor) {
-		if (offset == journal->j_blocksize) {
-			flush_descriptor(journal, descriptor, offset, write_op);
-			descriptor = NULL;
-		}
-	}
-
-	if (!descriptor) {
-		descriptor = journal_get_descriptor_buffer(journal);
-		if (!descriptor)
-			return;
-		header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
-		header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-		header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
-		header->h_sequence  = cpu_to_be32(transaction->t_tid);
-
-		/* Record it so that we can wait for IO completion later */
-		JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
-		journal_file_buffer(descriptor, transaction, BJ_LogCtl);
-
-		offset = sizeof(journal_revoke_header_t);
-		*descriptorp = descriptor;
-	}
-
-	* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
-		cpu_to_be32(record->blocknr);
-	offset += 4;
-	*offsetp = offset;
-}
-
-/*
- * Flush a revoke descriptor out to the journal.  If we are aborting,
- * this is a noop; otherwise we are generating a buffer which needs to
- * be waited for during commit, so it has to go onto the appropriate
- * journal buffer list.
- */
-
-static void flush_descriptor(journal_t *journal,
-			     struct journal_head *descriptor,
-			     int offset, int write_op)
-{
-	journal_revoke_header_t *header;
-	struct buffer_head *bh = jh2bh(descriptor);
-
-	if (is_journal_aborted(journal)) {
-		put_bh(bh);
-		return;
-	}
-
-	header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
-	header->r_count = cpu_to_be32(offset);
-	set_buffer_jwrite(bh);
-	BUFFER_TRACE(bh, "write");
-	set_buffer_dirty(bh);
-	write_dirty_buffer(bh, write_op);
-}
-#endif
-
-/*
- * Revoke support for recovery.
- *
- * Recovery needs to be able to:
- *
- *  record all revoke records, including the tid of the latest instance
- *  of each revoke in the journal
- *
- *  check whether a given block in a given transaction should be replayed
- *  (ie. has not been revoked by a revoke record in that or a subsequent
- *  transaction)
- *
- *  empty the revoke table after recovery.
- */
-
-/*
- * First, setting revoke records.  We create a new revoke record for
- * every block ever revoked in the log as we scan it for recovery, and
- * we update the existing records if we find multiple revokes for a
- * single block.
- */
-
-int journal_set_revoke(journal_t *journal,
-		       unsigned int blocknr,
-		       tid_t sequence)
-{
-	struct jbd_revoke_record_s *record;
-
-	record = find_revoke_record(journal, blocknr);
-	if (record) {
-		/* If we have multiple occurrences, only record the
-		 * latest sequence number in the hashed record */
-		if (tid_gt(sequence, record->sequence))
-			record->sequence = sequence;
-		return 0;
-	}
-	return insert_revoke_hash(journal, blocknr, sequence);
-}
-
-/*
- * Test revoke records.  For a given block referenced in the log, has
- * that block been revoked?  A revoke record with a given transaction
- * sequence number revokes all blocks in that transaction and earlier
- * ones, but later transactions still need replayed.
- */
-
-int journal_test_revoke(journal_t *journal,
-			unsigned int blocknr,
-			tid_t sequence)
-{
-	struct jbd_revoke_record_s *record;
-
-	record = find_revoke_record(journal, blocknr);
-	if (!record)
-		return 0;
-	if (tid_gt(sequence, record->sequence))
-		return 0;
-	return 1;
-}
-
-/*
- * Finally, once recovery is over, we need to clear the revoke table so
- * that it can be reused by the running filesystem.
- */
-
-void journal_clear_revoke(journal_t *journal)
-{
-	int i;
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-	struct jbd_revoke_table_s *revoke;
-
-	revoke = journal->j_revoke;
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		hash_list = &revoke->hash_table[i];
-		while (!list_empty(hash_list)) {
-			record = (struct jbd_revoke_record_s*) hash_list->next;
-			list_del(&record->hash);
-			kmem_cache_free(revoke_record_cache, record);
-		}
-	}
-}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
deleted file mode 100644
index 1695ba8..0000000
--- a/fs/jbd/transaction.c
+++ /dev/null
@@ -1,2237 +0,0 @@
-/*
- * linux/fs/jbd/transaction.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Generic filesystem transaction handling code; part of the ext2fs
- * journaling system.
- *
- * This file manages transactions (compound commits managed by the
- * journaling code) and handles (individual atomic operations by the
- * filesystem).
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/hrtimer.h>
-
-static void __journal_temp_unlink_buffer(struct journal_head *jh);
-
-/*
- * get_transaction: obtain a new transaction_t object.
- *
- * Simply allocate and initialise a new transaction.  Create it in
- * RUNNING state and add it to the current journal (which should not
- * have an existing running transaction: we only make a new transaction
- * once we have started to commit the old one).
- *
- * Preconditions:
- *	The journal MUST be locked.  We don't perform atomic mallocs on the
- *	new transaction	and we can't block without protecting against other
- *	processes trying to touch the journal while it is in transition.
- *
- * Called under j_state_lock
- */
-
-static transaction_t *
-get_transaction(journal_t *journal, transaction_t *transaction)
-{
-	transaction->t_journal = journal;
-	transaction->t_state = T_RUNNING;
-	transaction->t_start_time = ktime_get();
-	transaction->t_tid = journal->j_transaction_sequence++;
-	transaction->t_expires = jiffies + journal->j_commit_interval;
-	spin_lock_init(&transaction->t_handle_lock);
-
-	/* Set up the commit timer for the new transaction. */
-	journal->j_commit_timer.expires =
-				round_jiffies_up(transaction->t_expires);
-	add_timer(&journal->j_commit_timer);
-
-	J_ASSERT(journal->j_running_transaction == NULL);
-	journal->j_running_transaction = transaction;
-
-	return transaction;
-}
-
-/*
- * Handle management.
- *
- * A handle_t is an object which represents a single atomic update to a
- * filesystem, and which tracks all of the modifications which form part
- * of that one update.
- */
-
-/*
- * start_this_handle: Given a handle, deal with any locking or stalling
- * needed to make sure that there is enough journal space for the handle
- * to begin.  Attach the handle to a transaction and set up the
- * transaction's buffer credits.
- */
-
-static int start_this_handle(journal_t *journal, handle_t *handle)
-{
-	transaction_t *transaction;
-	int needed;
-	int nblocks = handle->h_buffer_credits;
-	transaction_t *new_transaction = NULL;
-	int ret = 0;
-
-	if (nblocks > journal->j_max_transaction_buffers) {
-		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
-		       current->comm, nblocks,
-		       journal->j_max_transaction_buffers);
-		ret = -ENOSPC;
-		goto out;
-	}
-
-alloc_transaction:
-	if (!journal->j_running_transaction) {
-		new_transaction = kzalloc(sizeof(*new_transaction),
-						GFP_NOFS|__GFP_NOFAIL);
-		if (!new_transaction) {
-			ret = -ENOMEM;
-			goto out;
-		}
-	}
-
-	jbd_debug(3, "New handle %p going live.\n", handle);
-
-repeat:
-
-	/*
-	 * We need to hold j_state_lock until t_updates has been incremented,
-	 * for proper journal barrier handling
-	 */
-	spin_lock(&journal->j_state_lock);
-repeat_locked:
-	if (is_journal_aborted(journal) ||
-	    (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
-		spin_unlock(&journal->j_state_lock);
-		ret = -EROFS;
-		goto out;
-	}
-
-	/* Wait on the journal's transaction barrier if necessary */
-	if (journal->j_barrier_count) {
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_transaction_locked,
-				journal->j_barrier_count == 0);
-		goto repeat;
-	}
-
-	if (!journal->j_running_transaction) {
-		if (!new_transaction) {
-			spin_unlock(&journal->j_state_lock);
-			goto alloc_transaction;
-		}
-		get_transaction(journal, new_transaction);
-		new_transaction = NULL;
-	}
-
-	transaction = journal->j_running_transaction;
-
-	/*
-	 * If the current transaction is locked down for commit, wait for the
-	 * lock to be released.
-	 */
-	if (transaction->t_state == T_LOCKED) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_transaction_locked,
-					&wait, TASK_UNINTERRUPTIBLE);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * If there is not enough space left in the log to write all potential
-	 * buffers requested by this operation, we need to stall pending a log
-	 * checkpoint to free some more log space.
-	 */
-	spin_lock(&transaction->t_handle_lock);
-	needed = transaction->t_outstanding_credits + nblocks;
-
-	if (needed > journal->j_max_transaction_buffers) {
-		/*
-		 * If the current transaction is already too large, then start
-		 * to commit it: we can then go back and attach this handle to
-		 * a new transaction.
-		 */
-		DEFINE_WAIT(wait);
-
-		jbd_debug(2, "Handle %p starting new commit...\n", handle);
-		spin_unlock(&transaction->t_handle_lock);
-		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
-				TASK_UNINTERRUPTIBLE);
-		__log_start_commit(journal, transaction->t_tid);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * The commit code assumes that it can get enough log space
-	 * without forcing a checkpoint.  This is *critical* for
-	 * correctness: a checkpoint of a buffer which is also
-	 * associated with a committing transaction creates a deadlock,
-	 * so commit simply cannot force through checkpoints.
-	 *
-	 * We must therefore ensure the necessary space in the journal
-	 * *before* starting to dirty potentially checkpointed buffers
-	 * in the new transaction.
-	 *
-	 * The worst part is, any transaction currently committing can
-	 * reduce the free space arbitrarily.  Be careful to account for
-	 * those buffers when checkpointing.
-	 */
-
-	/*
-	 * @@@ AKPM: This seems rather over-defensive.  We're giving commit
-	 * a _lot_ of headroom: 1/4 of the journal plus the size of
-	 * the committing transaction.  Really, we only need to give it
-	 * committing_transaction->t_outstanding_credits plus "enough" for
-	 * the log control blocks.
-	 * Also, this test is inconsistent with the matching one in
-	 * journal_extend().
-	 */
-	if (__log_space_left(journal) < jbd_space_needed(journal)) {
-		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
-		spin_unlock(&transaction->t_handle_lock);
-		__log_wait_for_space(journal);
-		goto repeat_locked;
-	}
-
-	/* OK, account for the buffers that this operation expects to
-	 * use and add the handle to the running transaction. */
-
-	handle->h_transaction = transaction;
-	transaction->t_outstanding_credits += nblocks;
-	transaction->t_updates++;
-	transaction->t_handle_count++;
-	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
-		  handle, nblocks, transaction->t_outstanding_credits,
-		  __log_space_left(journal));
-	spin_unlock(&transaction->t_handle_lock);
-	spin_unlock(&journal->j_state_lock);
-
-	lock_map_acquire(&handle->h_lockdep_map);
-out:
-	if (unlikely(new_transaction))		/* It's usually NULL */
-		kfree(new_transaction);
-	return ret;
-}
-
-static struct lock_class_key jbd_handle_key;
-
-/* Allocate a new handle.  This should probably be in a slab... */
-static handle_t *new_handle(int nblocks)
-{
-	handle_t *handle = jbd_alloc_handle(GFP_NOFS);
-	if (!handle)
-		return NULL;
-	handle->h_buffer_credits = nblocks;
-	handle->h_ref = 1;
-
-	lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0);
-
-	return handle;
-}
-
-/**
- * handle_t *journal_start() - Obtain a new handle.
- * @journal: Journal to start transaction on.
- * @nblocks: number of block buffer we might modify
- *
- * We make sure that the transaction can guarantee at least nblocks of
- * modified buffers in the log.  We block until the log can guarantee
- * that much space.
- *
- * This function is visible to journal users (like ext3fs), so is not
- * called with the journal already locked.
- *
- * Return a pointer to a newly allocated handle, or an ERR_PTR() value
- * on failure.
- */
-handle_t *journal_start(journal_t *journal, int nblocks)
-{
-	handle_t *handle = journal_current_handle();
-	int err;
-
-	if (!journal)
-		return ERR_PTR(-EROFS);
-
-	if (handle) {
-		J_ASSERT(handle->h_transaction->t_journal == journal);
-		handle->h_ref++;
-		return handle;
-	}
-
-	handle = new_handle(nblocks);
-	if (!handle)
-		return ERR_PTR(-ENOMEM);
-
-	current->journal_info = handle;
-
-	err = start_this_handle(journal, handle);
-	if (err < 0) {
-		jbd_free_handle(handle);
-		current->journal_info = NULL;
-		handle = ERR_PTR(err);
-	}
-	return handle;
-}
-
-/**
- * int journal_extend() - extend buffer credits.
- * @handle:  handle to 'extend'
- * @nblocks: nr blocks to try to extend by.
- *
- * Some transactions, such as large extends and truncates, can be done
- * atomically all at once or in several stages.  The operation requests
- * a credit for a number of buffer modications in advance, but can
- * extend its credit if it needs more.
- *
- * journal_extend tries to give the running handle more buffer credits.
- * It does not guarantee that allocation - this is a best-effort only.
- * The calling process MUST be able to deal cleanly with a failure to
- * extend here.
- *
- * Return 0 on success, non-zero on failure.
- *
- * return code < 0 implies an error
- * return code > 0 implies normal transaction-full status.
- */
-int journal_extend(handle_t *handle, int nblocks)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int result;
-	int wanted;
-
-	result = -EIO;
-	if (is_handle_aborted(handle))
-		goto out;
-
-	result = 1;
-
-	spin_lock(&journal->j_state_lock);
-
-	/* Don't extend a locked-down transaction! */
-	if (handle->h_transaction->t_state != T_RUNNING) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "transaction not running\n", handle, nblocks);
-		goto error_out;
-	}
-
-	spin_lock(&transaction->t_handle_lock);
-	wanted = transaction->t_outstanding_credits + nblocks;
-
-	if (wanted > journal->j_max_transaction_buffers) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "transaction too large\n", handle, nblocks);
-		goto unlock;
-	}
-
-	if (wanted > __log_space_left(journal)) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "insufficient log space\n", handle, nblocks);
-		goto unlock;
-	}
-
-	handle->h_buffer_credits += nblocks;
-	transaction->t_outstanding_credits += nblocks;
-	result = 0;
-
-	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
-unlock:
-	spin_unlock(&transaction->t_handle_lock);
-error_out:
-	spin_unlock(&journal->j_state_lock);
-out:
-	return result;
-}
-
-
-/**
- * int journal_restart() - restart a handle.
- * @handle:  handle to restart
- * @nblocks: nr credits requested
- *
- * Restart a handle for a multi-transaction filesystem
- * operation.
- *
- * If the journal_extend() call above fails to grant new buffer credits
- * to a running handle, a call to journal_restart will commit the
- * handle's transaction so far and reattach the handle to a new
- * transaction capabable of guaranteeing the requested number of
- * credits.
- */
-
-int journal_restart(handle_t *handle, int nblocks)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int ret;
-
-	/* If we've had an abort of any type, don't even think about
-	 * actually doing the restart! */
-	if (is_handle_aborted(handle))
-		return 0;
-
-	/*
-	 * First unlink the handle from its current transaction, and start the
-	 * commit on that.
-	 */
-	J_ASSERT(transaction->t_updates > 0);
-	J_ASSERT(journal_current_handle() == handle);
-
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&transaction->t_handle_lock);
-	transaction->t_outstanding_credits -= handle->h_buffer_credits;
-	transaction->t_updates--;
-
-	if (!transaction->t_updates)
-		wake_up(&journal->j_wait_updates);
-	spin_unlock(&transaction->t_handle_lock);
-
-	jbd_debug(2, "restarting handle %p\n", handle);
-	__log_start_commit(journal, transaction->t_tid);
-	spin_unlock(&journal->j_state_lock);
-
-	lock_map_release(&handle->h_lockdep_map);
-	handle->h_buffer_credits = nblocks;
-	ret = start_this_handle(journal, handle);
-	return ret;
-}
-
-
-/**
- * void journal_lock_updates () - establish a transaction barrier.
- * @journal:  Journal to establish a barrier on.
- *
- * This locks out any further updates from being started, and blocks until all
- * existing updates have completed, returning only once the journal is in a
- * quiescent state with no updates running.
- *
- * We do not use simple mutex for synchronization as there are syscalls which
- * want to return with filesystem locked and that trips up lockdep. Also
- * hibernate needs to lock filesystem but locked mutex then blocks hibernation.
- * Since locking filesystem is rare operation, we use simple counter and
- * waitqueue for locking.
- */
-void journal_lock_updates(journal_t *journal)
-{
-	DEFINE_WAIT(wait);
-
-wait:
-	/* Wait for previous locked operation to finish */
-	wait_event(journal->j_wait_transaction_locked,
-		   journal->j_barrier_count == 0);
-
-	spin_lock(&journal->j_state_lock);
-	/*
-	 * Check reliably under the lock whether we are the ones winning the race
-	 * and locking the journal
-	 */
-	if (journal->j_barrier_count > 0) {
-		spin_unlock(&journal->j_state_lock);
-		goto wait;
-	}
-	++journal->j_barrier_count;
-
-	/* Wait until there are no running updates */
-	while (1) {
-		transaction_t *transaction = journal->j_running_transaction;
-
-		if (!transaction)
-			break;
-
-		spin_lock(&transaction->t_handle_lock);
-		if (!transaction->t_updates) {
-			spin_unlock(&transaction->t_handle_lock);
-			break;
-		}
-		prepare_to_wait(&journal->j_wait_updates, &wait,
-				TASK_UNINTERRUPTIBLE);
-		spin_unlock(&transaction->t_handle_lock);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_updates, &wait);
-		spin_lock(&journal->j_state_lock);
-	}
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * void journal_unlock_updates (journal_t* journal) - release barrier
- * @journal:  Journal to release the barrier on.
- *
- * Release a transaction barrier obtained with journal_lock_updates().
- */
-void journal_unlock_updates (journal_t *journal)
-{
-	J_ASSERT(journal->j_barrier_count != 0);
-
-	spin_lock(&journal->j_state_lock);
-	--journal->j_barrier_count;
-	spin_unlock(&journal->j_state_lock);
-	wake_up(&journal->j_wait_transaction_locked);
-}
-
-static void warn_dirty_buffer(struct buffer_head *bh)
-{
-	char b[BDEVNAME_SIZE];
-
-	printk(KERN_WARNING
-	       "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
-	       "There's a risk of filesystem corruption in case of system "
-	       "crash.\n",
-	       bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
-}
-
-/*
- * If the buffer is already part of the current transaction, then there
- * is nothing we need to do.  If it is already part of a prior
- * transaction which we are still committing to disk, then we need to
- * make sure that we do not overwrite the old copy: we do copy-out to
- * preserve the copy going to disk.  We also account the buffer against
- * the handle's metadata buffer credits (unless the buffer is already
- * part of the transaction, that is).
- *
- */
-static int
-do_get_write_access(handle_t *handle, struct journal_head *jh,
-			int force_copy)
-{
-	struct buffer_head *bh;
-	transaction_t *transaction;
-	journal_t *journal;
-	int error;
-	char *frozen_buffer = NULL;
-	int need_copy = 0;
-
-	if (is_handle_aborted(handle))
-		return -EROFS;
-
-	transaction = handle->h_transaction;
-	journal = transaction->t_journal;
-
-	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
-
-	JBUFFER_TRACE(jh, "entry");
-repeat:
-	bh = jh2bh(jh);
-
-	/* @@@ Need to check for errors here at some point. */
-
-	lock_buffer(bh);
-	jbd_lock_bh_state(bh);
-
-	/* We now hold the buffer lock so it is safe to query the buffer
-	 * state.  Is the buffer dirty?
-	 *
-	 * If so, there are two possibilities.  The buffer may be
-	 * non-journaled, and undergoing a quite legitimate writeback.
-	 * Otherwise, it is journaled, and we don't expect dirty buffers
-	 * in that state (the buffers should be marked JBD_Dirty
-	 * instead.)  So either the IO is being done under our own
-	 * control and this is a bug, or it's a third party IO such as
-	 * dump(8) (which may leave the buffer scheduled for read ---
-	 * ie. locked but not dirty) or tune2fs (which may actually have
-	 * the buffer dirtied, ugh.)  */
-
-	if (buffer_dirty(bh)) {
-		/*
-		 * First question: is this buffer already part of the current
-		 * transaction or the existing committing transaction?
-		 */
-		if (jh->b_transaction) {
-			J_ASSERT_JH(jh,
-				jh->b_transaction == transaction ||
-				jh->b_transaction ==
-					journal->j_committing_transaction);
-			if (jh->b_next_transaction)
-				J_ASSERT_JH(jh, jh->b_next_transaction ==
-							transaction);
-			warn_dirty_buffer(bh);
-		}
-		/*
-		 * In any case we need to clean the dirty flag and we must
-		 * do it under the buffer lock to be sure we don't race
-		 * with running write-out.
-		 */
-		JBUFFER_TRACE(jh, "Journalling dirty buffer");
-		clear_buffer_dirty(bh);
-		set_buffer_jbddirty(bh);
-	}
-
-	unlock_buffer(bh);
-
-	error = -EROFS;
-	if (is_handle_aborted(handle)) {
-		jbd_unlock_bh_state(bh);
-		goto out;
-	}
-	error = 0;
-
-	/*
-	 * The buffer is already part of this transaction if b_transaction or
-	 * b_next_transaction points to it
-	 */
-	if (jh->b_transaction == transaction ||
-	    jh->b_next_transaction == transaction)
-		goto done;
-
-	/*
-	 * this is the first time this transaction is touching this buffer,
-	 * reset the modified flag
-	 */
-	jh->b_modified = 0;
-
-	/*
-	 * If there is already a copy-out version of this buffer, then we don't
-	 * need to make another one
-	 */
-	if (jh->b_frozen_data) {
-		JBUFFER_TRACE(jh, "has frozen data");
-		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-		jh->b_next_transaction = transaction;
-		goto done;
-	}
-
-	/* Is there data here we need to preserve? */
-
-	if (jh->b_transaction && jh->b_transaction != transaction) {
-		JBUFFER_TRACE(jh, "owned by older transaction");
-		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-
-		/* There is one case we have to be very careful about.
-		 * If the committing transaction is currently writing
-		 * this buffer out to disk and has NOT made a copy-out,
-		 * then we cannot modify the buffer contents at all
-		 * right now.  The essence of copy-out is that it is the
-		 * extra copy, not the primary copy, which gets
-		 * journaled.  If the primary copy is already going to
-		 * disk then we cannot do copy-out here. */
-
-		if (jh->b_jlist == BJ_Shadow) {
-			DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
-			wait_queue_head_t *wqh;
-
-			wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
-
-			JBUFFER_TRACE(jh, "on shadow: sleep");
-			jbd_unlock_bh_state(bh);
-			/* commit wakes up all shadow buffers after IO */
-			for ( ; ; ) {
-				prepare_to_wait(wqh, &wait.wait,
-						TASK_UNINTERRUPTIBLE);
-				if (jh->b_jlist != BJ_Shadow)
-					break;
-				schedule();
-			}
-			finish_wait(wqh, &wait.wait);
-			goto repeat;
-		}
-
-		/* Only do the copy if the currently-owning transaction
-		 * still needs it.  If it is on the Forget list, the
-		 * committing transaction is past that stage.  The
-		 * buffer had better remain locked during the kmalloc,
-		 * but that should be true --- we hold the journal lock
-		 * still and the buffer is already on the BUF_JOURNAL
-		 * list so won't be flushed.
-		 *
-		 * Subtle point, though: if this is a get_undo_access,
-		 * then we will be relying on the frozen_data to contain
-		 * the new value of the committed_data record after the
-		 * transaction, so we HAVE to force the frozen_data copy
-		 * in that case. */
-
-		if (jh->b_jlist != BJ_Forget || force_copy) {
-			JBUFFER_TRACE(jh, "generate frozen data");
-			if (!frozen_buffer) {
-				JBUFFER_TRACE(jh, "allocate memory for buffer");
-				jbd_unlock_bh_state(bh);
-				frozen_buffer =
-					jbd_alloc(jh2bh(jh)->b_size,
-							 GFP_NOFS);
-				if (!frozen_buffer) {
-					printk(KERN_ERR
-					       "%s: OOM for frozen_buffer\n",
-					       __func__);
-					JBUFFER_TRACE(jh, "oom!");
-					error = -ENOMEM;
-					jbd_lock_bh_state(bh);
-					goto done;
-				}
-				goto repeat;
-			}
-			jh->b_frozen_data = frozen_buffer;
-			frozen_buffer = NULL;
-			need_copy = 1;
-		}
-		jh->b_next_transaction = transaction;
-	}
-
-
-	/*
-	 * Finally, if the buffer is not journaled right now, we need to make
-	 * sure it doesn't get written to disk before the caller actually
-	 * commits the new data
-	 */
-	if (!jh->b_transaction) {
-		JBUFFER_TRACE(jh, "no transaction");
-		J_ASSERT_JH(jh, !jh->b_next_transaction);
-		JBUFFER_TRACE(jh, "file as BJ_Reserved");
-		spin_lock(&journal->j_list_lock);
-		__journal_file_buffer(jh, transaction, BJ_Reserved);
-		spin_unlock(&journal->j_list_lock);
-	}
-
-done:
-	if (need_copy) {
-		struct page *page;
-		int offset;
-		char *source;
-
-		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
-			    "Possible IO failure.\n");
-		page = jh2bh(jh)->b_page;
-		offset = offset_in_page(jh2bh(jh)->b_data);
-		source = kmap_atomic(page);
-		memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
-		kunmap_atomic(source);
-	}
-	jbd_unlock_bh_state(bh);
-
-	/*
-	 * If we are about to journal a buffer, then any revoke pending on it is
-	 * no longer valid
-	 */
-	journal_cancel_revoke(handle, jh);
-
-out:
-	if (unlikely(frozen_buffer))	/* It's usually NULL */
-		jbd_free(frozen_buffer, bh->b_size);
-
-	JBUFFER_TRACE(jh, "exit");
-	return error;
-}
-
-/**
- * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
- * @handle: transaction to add buffer modifications to
- * @bh:     bh to be used for metadata writes
- *
- * Returns an error code or 0 on success.
- *
- * In full data journalling mode the buffer may be of type BJ_AsyncData,
- * because we're write()ing a buffer which is also part of a shared mapping.
- */
-
-int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
-{
-	struct journal_head *jh = journal_add_journal_head(bh);
-	int rc;
-
-	/* We do not want to get caught playing with fields which the
-	 * log thread also manipulates.  Make sure that the buffer
-	 * completes any outstanding IO before proceeding. */
-	rc = do_get_write_access(handle, jh, 0);
-	journal_put_journal_head(jh);
-	return rc;
-}
-
-
-/*
- * When the user wants to journal a newly created buffer_head
- * (ie. getblk() returned a new buffer and we are going to populate it
- * manually rather than reading off disk), then we need to keep the
- * buffer_head locked until it has been completely filled with new
- * data.  In this case, we should be able to make the assertion that
- * the bh is not already part of an existing transaction.
- *
- * The buffer should already be locked by the caller by this point.
- * There is no lock ranking violation: it was a newly created,
- * unlocked buffer beforehand. */
-
-/**
- * int journal_get_create_access () - notify intent to use newly created bh
- * @handle: transaction to new buffer to
- * @bh: new buffer.
- *
- * Call this if you create a new bh.
- */
-int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh = journal_add_journal_head(bh);
-	int err;
-
-	jbd_debug(5, "journal_head %p\n", jh);
-	err = -EROFS;
-	if (is_handle_aborted(handle))
-		goto out;
-	err = 0;
-
-	JBUFFER_TRACE(jh, "entry");
-	/*
-	 * The buffer may already belong to this transaction due to pre-zeroing
-	 * in the filesystem's new_block code.  It may also be on the previous,
-	 * committing transaction's lists, but it HAS to be in Forget state in
-	 * that case: the transaction must have deleted the buffer for it to be
-	 * reused here.
-	 */
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
-		jh->b_transaction == NULL ||
-		(jh->b_transaction == journal->j_committing_transaction &&
-			  jh->b_jlist == BJ_Forget)));
-
-	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
-
-	if (jh->b_transaction == NULL) {
-		/*
-		 * Previous journal_forget() could have left the buffer
-		 * with jbddirty bit set because it was being committed. When
-		 * the commit finished, we've filed the buffer for
-		 * checkpointing and marked it dirty. Now we are reallocating
-		 * the buffer so the transaction freeing it must have
-		 * committed and so it's safe to clear the dirty bit.
-		 */
-		clear_buffer_dirty(jh2bh(jh));
-
-		/* first access by this transaction */
-		jh->b_modified = 0;
-
-		JBUFFER_TRACE(jh, "file as BJ_Reserved");
-		__journal_file_buffer(jh, transaction, BJ_Reserved);
-	} else if (jh->b_transaction == journal->j_committing_transaction) {
-		/* first access by this transaction */
-		jh->b_modified = 0;
-
-		JBUFFER_TRACE(jh, "set next transaction");
-		jh->b_next_transaction = transaction;
-	}
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-
-	/*
-	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
-	 * blocks which contain freed but then revoked metadata.  We need
-	 * to cancel the revoke in case we end up freeing it yet again
-	 * and the reallocating as data - this would cause a second revoke,
-	 * which hits an assertion error.
-	 */
-	JBUFFER_TRACE(jh, "cancelling revoke");
-	journal_cancel_revoke(handle, jh);
-out:
-	journal_put_journal_head(jh);
-	return err;
-}
-
-/**
- * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences
- * @handle: transaction
- * @bh: buffer to undo
- *
- * Sometimes there is a need to distinguish between metadata which has
- * been committed to disk and that which has not.  The ext3fs code uses
- * this for freeing and allocating space, we have to make sure that we
- * do not reuse freed space until the deallocation has been committed,
- * since if we overwrote that space we would make the delete
- * un-rewindable in case of a crash.
- *
- * To deal with that, journal_get_undo_access requests write access to a
- * buffer for parts of non-rewindable operations such as delete
- * operations on the bitmaps.  The journaling code must keep a copy of
- * the buffer's contents prior to the undo_access call until such time
- * as we know that the buffer has definitely been committed to disk.
- *
- * We never need to know which transaction the committed data is part
- * of, buffers touched here are guaranteed to be dirtied later and so
- * will be committed to a new transaction in due course, at which point
- * we can discard the old committed data pointer.
- *
- * Returns error number or 0 on success.
- */
-int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
-{
-	int err;
-	struct journal_head *jh = journal_add_journal_head(bh);
-	char *committed_data = NULL;
-
-	JBUFFER_TRACE(jh, "entry");
-
-	/*
-	 * Do this first --- it can drop the journal lock, so we want to
-	 * make sure that obtaining the committed_data is done
-	 * atomically wrt. completion of any outstanding commits.
-	 */
-	err = do_get_write_access(handle, jh, 1);
-	if (err)
-		goto out;
-
-repeat:
-	if (!jh->b_committed_data) {
-		committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS);
-		if (!committed_data) {
-			printk(KERN_ERR "%s: No memory for committed data\n",
-				__func__);
-			err = -ENOMEM;
-			goto out;
-		}
-	}
-
-	jbd_lock_bh_state(bh);
-	if (!jh->b_committed_data) {
-		/* Copy out the current buffer contents into the
-		 * preserved, committed copy. */
-		JBUFFER_TRACE(jh, "generate b_committed data");
-		if (!committed_data) {
-			jbd_unlock_bh_state(bh);
-			goto repeat;
-		}
-
-		jh->b_committed_data = committed_data;
-		committed_data = NULL;
-		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
-	}
-	jbd_unlock_bh_state(bh);
-out:
-	journal_put_journal_head(jh);
-	if (unlikely(committed_data))
-		jbd_free(committed_data, bh->b_size);
-	return err;
-}
-
-/**
- * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed
- * @handle: transaction
- * @bh: bufferhead to mark
- *
- * Description:
- * Mark a buffer as containing dirty data which needs to be flushed before
- * we can commit the current transaction.
- *
- * The buffer is placed on the transaction's data list and is marked as
- * belonging to the transaction.
- *
- * Returns error number or 0 on success.
- *
- * journal_dirty_data() can be called via page_launder->ext3_writepage
- * by kswapd.
- */
-int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
-{
-	journal_t *journal = handle->h_transaction->t_journal;
-	int need_brelse = 0;
-	struct journal_head *jh;
-	int ret = 0;
-
-	if (is_handle_aborted(handle))
-		return ret;
-
-	jh = journal_add_journal_head(bh);
-	JBUFFER_TRACE(jh, "entry");
-
-	/*
-	 * The buffer could *already* be dirty.  Writeout can start
-	 * at any time.
-	 */
-	jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
-
-	/*
-	 * What if the buffer is already part of a running transaction?
-	 *
-	 * There are two cases:
-	 * 1) It is part of the current running transaction.  Refile it,
-	 *    just in case we have allocated it as metadata, deallocated
-	 *    it, then reallocated it as data.
-	 * 2) It is part of the previous, still-committing transaction.
-	 *    If all we want to do is to guarantee that the buffer will be
-	 *    written to disk before this new transaction commits, then
-	 *    being sure that the *previous* transaction has this same
-	 *    property is sufficient for us!  Just leave it on its old
-	 *    transaction.
-	 *
-	 * In case (2), the buffer must not already exist as metadata
-	 * --- that would violate write ordering (a transaction is free
-	 * to write its data at any point, even before the previous
-	 * committing transaction has committed).  The caller must
-	 * never, ever allow this to happen: there's nothing we can do
-	 * about it in this layer.
-	 */
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	/* Now that we have bh_state locked, are we really still mapped? */
-	if (!buffer_mapped(bh)) {
-		JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
-		goto no_journal;
-	}
-
-	if (jh->b_transaction) {
-		JBUFFER_TRACE(jh, "has transaction");
-		if (jh->b_transaction != handle->h_transaction) {
-			JBUFFER_TRACE(jh, "belongs to older transaction");
-			J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-
-			/* @@@ IS THIS TRUE  ? */
-			/*
-			 * Not any more.  Scenario: someone does a write()
-			 * in data=journal mode.  The buffer's transaction has
-			 * moved into commit.  Then someone does another
-			 * write() to the file.  We do the frozen data copyout
-			 * and set b_next_transaction to point to j_running_t.
-			 * And while we're in that state, someone does a
-			 * writepage() in an attempt to pageout the same area
-			 * of the file via a shared mapping.  At present that
-			 * calls journal_dirty_data(), and we get right here.
-			 * It may be too late to journal the data.  Simply
-			 * falling through to the next test will suffice: the
-			 * data will be dirty and wil be checkpointed.  The
-			 * ordering comments in the next comment block still
-			 * apply.
-			 */
-			//J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-
-			/*
-			 * If we're journalling data, and this buffer was
-			 * subject to a write(), it could be metadata, forget
-			 * or shadow against the committing transaction.  Now,
-			 * someone has dirtied the same darn page via a mapping
-			 * and it is being writepage()'d.
-			 * We *could* just steal the page from commit, with some
-			 * fancy locking there.  Instead, we just skip it -
-			 * don't tie the page's buffers to the new transaction
-			 * at all.
-			 * Implication: if we crash before the writepage() data
-			 * is written into the filesystem, recovery will replay
-			 * the write() data.
-			 */
-			if (jh->b_jlist != BJ_None &&
-					jh->b_jlist != BJ_SyncData &&
-					jh->b_jlist != BJ_Locked) {
-				JBUFFER_TRACE(jh, "Not stealing");
-				goto no_journal;
-			}
-
-			/*
-			 * This buffer may be undergoing writeout in commit.  We
-			 * can't return from here and let the caller dirty it
-			 * again because that can cause the write-out loop in
-			 * commit to never terminate.
-			 */
-			if (buffer_dirty(bh)) {
-				get_bh(bh);
-				spin_unlock(&journal->j_list_lock);
-				jbd_unlock_bh_state(bh);
-				need_brelse = 1;
-				sync_dirty_buffer(bh);
-				jbd_lock_bh_state(bh);
-				spin_lock(&journal->j_list_lock);
-				/* Since we dropped the lock... */
-				if (!buffer_mapped(bh)) {
-					JBUFFER_TRACE(jh, "buffer got unmapped");
-					goto no_journal;
-				}
-				/* The buffer may become locked again at any
-				   time if it is redirtied */
-			}
-
-			/*
-			 * We cannot remove the buffer with io error from the
-			 * committing transaction, because otherwise it would
-			 * miss the error and the commit would not abort.
-			 */
-			if (unlikely(!buffer_uptodate(bh))) {
-				ret = -EIO;
-				goto no_journal;
-			}
-			/* We might have slept so buffer could be refiled now */
-			if (jh->b_transaction != NULL &&
-			    jh->b_transaction != handle->h_transaction) {
-				JBUFFER_TRACE(jh, "unfile from commit");
-				__journal_temp_unlink_buffer(jh);
-				/* It still points to the committing
-				 * transaction; move it to this one so
-				 * that the refile assert checks are
-				 * happy. */
-				jh->b_transaction = handle->h_transaction;
-			}
-			/* The buffer will be refiled below */
-
-		}
-		/*
-		 * Special case --- the buffer might actually have been
-		 * allocated and then immediately deallocated in the previous,
-		 * committing transaction, so might still be left on that
-		 * transaction's metadata lists.
-		 */
-		if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
-			JBUFFER_TRACE(jh, "not on correct data list: unfile");
-			J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
-			JBUFFER_TRACE(jh, "file as data");
-			__journal_file_buffer(jh, handle->h_transaction,
-						BJ_SyncData);
-		}
-	} else {
-		JBUFFER_TRACE(jh, "not on a transaction");
-		__journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
-	}
-no_journal:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	if (need_brelse) {
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-	}
-	JBUFFER_TRACE(jh, "exit");
-	journal_put_journal_head(jh);
-	return ret;
-}
-
-/**
- * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
- * @handle: transaction to add buffer to.
- * @bh: buffer to mark
- *
- * Mark dirty metadata which needs to be journaled as part of the current
- * transaction.
- *
- * The buffer is placed on the transaction's metadata list and is marked
- * as belonging to the transaction.
- *
- * Returns error number or 0 on success.
- *
- * Special care needs to be taken if the buffer already belongs to the
- * current committing transaction (in which case we should have frozen
- * data present for that commit).  In that case, we don't relink the
- * buffer: that only gets done when the old transaction finally
- * completes its commit.
- */
-int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh = bh2jh(bh);
-
-	jbd_debug(5, "journal_head %p\n", jh);
-	JBUFFER_TRACE(jh, "entry");
-	if (is_handle_aborted(handle))
-		goto out;
-
-	jbd_lock_bh_state(bh);
-
-	if (jh->b_modified == 0) {
-		/*
-		 * This buffer's got modified and becoming part
-		 * of the transaction. This needs to be done
-		 * once a transaction -bzzz
-		 */
-		jh->b_modified = 1;
-		J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
-		handle->h_buffer_credits--;
-	}
-
-	/*
-	 * fastpath, to avoid expensive locking.  If this buffer is already
-	 * on the running transaction's metadata list there is nothing to do.
-	 * Nobody can take it off again because there is a handle open.
-	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
-	 * result in this test being false, so we go in and take the locks.
-	 */
-	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
-		JBUFFER_TRACE(jh, "fastpath");
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_running_transaction);
-		goto out_unlock_bh;
-	}
-
-	set_buffer_jbddirty(bh);
-
-	/*
-	 * Metadata already on the current transaction list doesn't
-	 * need to be filed.  Metadata on another transaction's list must
-	 * be committing, and will be refiled once the commit completes:
-	 * leave it alone for now.
-	 */
-	if (jh->b_transaction != transaction) {
-		JBUFFER_TRACE(jh, "already on other transaction");
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-		J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
-		/* And this case is illegal: we can't reuse another
-		 * transaction's data buffer, ever. */
-		goto out_unlock_bh;
-	}
-
-	/* That test should have eliminated the following case: */
-	J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
-
-	JBUFFER_TRACE(jh, "file as BJ_Metadata");
-	spin_lock(&journal->j_list_lock);
-	__journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
-	spin_unlock(&journal->j_list_lock);
-out_unlock_bh:
-	jbd_unlock_bh_state(bh);
-out:
-	JBUFFER_TRACE(jh, "exit");
-	return 0;
-}
-
-/*
- * journal_release_buffer: undo a get_write_access without any buffer
- * updates, if the update decided in the end that it didn't need access.
- *
- */
-void
-journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
-	BUFFER_TRACE(bh, "entry");
-}
-
-/**
- * void journal_forget() - bforget() for potentially-journaled buffers.
- * @handle: transaction handle
- * @bh:     bh to 'forget'
- *
- * We can only do the bforget if there are no commits pending against the
- * buffer.  If the buffer is dirty in the current running transaction we
- * can safely unlink it.
- *
- * bh may not be a journalled buffer at all - it may be a non-JBD
- * buffer which came off the hashtable.  Check for this.
- *
- * Decrements bh->b_count by one.
- *
- * Allow this call even if the handle has aborted --- it may be part of
- * the caller's cleanup after an abort.
- */
-int journal_forget (handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh;
-	int drop_reserve = 0;
-	int err = 0;
-	int was_modified = 0;
-
-	BUFFER_TRACE(bh, "entry");
-
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	if (!buffer_jbd(bh))
-		goto not_jbd;
-	jh = bh2jh(bh);
-
-	/* Critical error: attempting to delete a bitmap buffer, maybe?
-	 * Don't do any jbd operations, and return an error. */
-	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
-			 "inconsistent data on disk")) {
-		err = -EIO;
-		goto not_jbd;
-	}
-
-	/* keep track of whether or not this transaction modified us */
-	was_modified = jh->b_modified;
-
-	/*
-	 * The buffer's going from the transaction, we must drop
-	 * all references -bzzz
-	 */
-	jh->b_modified = 0;
-
-	if (jh->b_transaction == handle->h_transaction) {
-		J_ASSERT_JH(jh, !jh->b_frozen_data);
-
-		/* If we are forgetting a buffer which is already part
-		 * of this transaction, then we can just drop it from
-		 * the transaction immediately. */
-		clear_buffer_dirty(bh);
-		clear_buffer_jbddirty(bh);
-
-		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
-
-		/*
-		 * we only want to drop a reference if this transaction
-		 * modified the buffer
-		 */
-		if (was_modified)
-			drop_reserve = 1;
-
-		/*
-		 * We are no longer going to journal this buffer.
-		 * However, the commit of this transaction is still
-		 * important to the buffer: the delete that we are now
-		 * processing might obsolete an old log entry, so by
-		 * committing, we can satisfy the buffer's checkpoint.
-		 *
-		 * So, if we have a checkpoint on the buffer, we should
-		 * now refile the buffer on our BJ_Forget list so that
-		 * we know to remove the checkpoint after we commit.
-		 */
-
-		if (jh->b_cp_transaction) {
-			__journal_temp_unlink_buffer(jh);
-			__journal_file_buffer(jh, transaction, BJ_Forget);
-		} else {
-			__journal_unfile_buffer(jh);
-			if (!buffer_jbd(bh)) {
-				spin_unlock(&journal->j_list_lock);
-				jbd_unlock_bh_state(bh);
-				__bforget(bh);
-				goto drop;
-			}
-		}
-	} else if (jh->b_transaction) {
-		J_ASSERT_JH(jh, (jh->b_transaction ==
-				 journal->j_committing_transaction));
-		/* However, if the buffer is still owned by a prior
-		 * (committing) transaction, we can't drop it yet... */
-		JBUFFER_TRACE(jh, "belongs to older transaction");
-		/* ... but we CAN drop it from the new transaction if we
-		 * have also modified it since the original commit. */
-
-		if (jh->b_next_transaction) {
-			J_ASSERT(jh->b_next_transaction == transaction);
-			jh->b_next_transaction = NULL;
-
-			/*
-			 * only drop a reference if this transaction modified
-			 * the buffer
-			 */
-			if (was_modified)
-				drop_reserve = 1;
-		}
-	}
-
-not_jbd:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	__brelse(bh);
-drop:
-	if (drop_reserve) {
-		/* no need to reserve log space for this block -bzzz */
-		handle->h_buffer_credits++;
-	}
-	return err;
-}
-
-/**
- * int journal_stop() - complete a transaction
- * @handle: tranaction to complete.
- *
- * All done for a particular handle.
- *
- * There is not much action needed here.  We just return any remaining
- * buffer credits to the transaction and remove the handle.  The only
- * complication is that we need to start a commit operation if the
- * filesystem is marked for synchronous update.
- *
- * journal_stop itself will not usually return an error, but it may
- * do so in unusual circumstances.  In particular, expect it to
- * return -EIO if a journal_abort has been executed since the
- * transaction began.
- */
-int journal_stop(handle_t *handle)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int err;
-	pid_t pid;
-
-	J_ASSERT(journal_current_handle() == handle);
-
-	if (is_handle_aborted(handle))
-		err = -EIO;
-	else {
-		J_ASSERT(transaction->t_updates > 0);
-		err = 0;
-	}
-
-	if (--handle->h_ref > 0) {
-		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
-			  handle->h_ref);
-		return err;
-	}
-
-	jbd_debug(4, "Handle %p going down\n", handle);
-
-	/*
-	 * Implement synchronous transaction batching.  If the handle
-	 * was synchronous, don't force a commit immediately.  Let's
-	 * yield and let another thread piggyback onto this transaction.
-	 * Keep doing that while new threads continue to arrive.
-	 * It doesn't cost much - we're about to run a commit and sleep
-	 * on IO anyway.  Speeds up many-threaded, many-dir operations
-	 * by 30x or more...
-	 *
-	 * We try and optimize the sleep time against what the underlying disk
-	 * can do, instead of having a static sleep time.  This is useful for
-	 * the case where our storage is so fast that it is more optimal to go
-	 * ahead and force a flush and wait for the transaction to be committed
-	 * than it is to wait for an arbitrary amount of time for new writers to
-	 * join the transaction.  We achieve this by measuring how long it takes
-	 * to commit a transaction, and compare it with how long this
-	 * transaction has been running, and if run time < commit time then we
-	 * sleep for the delta and commit.  This greatly helps super fast disks
-	 * that would see slowdowns as more threads started doing fsyncs.
-	 *
-	 * But don't do this if this process was the most recent one to
-	 * perform a synchronous write.  We do this to detect the case where a
-	 * single process is doing a stream of sync writes.  No point in waiting
-	 * for joiners in that case.
-	 */
-	pid = current->pid;
-	if (handle->h_sync && journal->j_last_sync_writer != pid) {
-		u64 commit_time, trans_time;
-
-		journal->j_last_sync_writer = pid;
-
-		spin_lock(&journal->j_state_lock);
-		commit_time = journal->j_average_commit_time;
-		spin_unlock(&journal->j_state_lock);
-
-		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
-						   transaction->t_start_time));
-
-		commit_time = min_t(u64, commit_time,
-				    1000*jiffies_to_usecs(1));
-
-		if (trans_time < commit_time) {
-			ktime_t expires = ktime_add_ns(ktime_get(),
-						       commit_time);
-			set_current_state(TASK_UNINTERRUPTIBLE);
-			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
-		}
-	}
-
-	current->journal_info = NULL;
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&transaction->t_handle_lock);
-	transaction->t_outstanding_credits -= handle->h_buffer_credits;
-	transaction->t_updates--;
-	if (!transaction->t_updates) {
-		wake_up(&journal->j_wait_updates);
-		if (journal->j_barrier_count)
-			wake_up(&journal->j_wait_transaction_locked);
-	}
-
-	/*
-	 * If the handle is marked SYNC, we need to set another commit
-	 * going!  We also want to force a commit if the current
-	 * transaction is occupying too much of the log, or if the
-	 * transaction is too old now.
-	 */
-	if (handle->h_sync ||
-			transaction->t_outstanding_credits >
-				journal->j_max_transaction_buffers ||
-			time_after_eq(jiffies, transaction->t_expires)) {
-		/* Do this even for aborted journals: an abort still
-		 * completes the commit thread, it just doesn't write
-		 * anything to disk. */
-		tid_t tid = transaction->t_tid;
-
-		spin_unlock(&transaction->t_handle_lock);
-		jbd_debug(2, "transaction too old, requesting commit for "
-					"handle %p\n", handle);
-		/* This is non-blocking */
-		__log_start_commit(journal, transaction->t_tid);
-		spin_unlock(&journal->j_state_lock);
-
-		/*
-		 * Special case: JFS_SYNC synchronous updates require us
-		 * to wait for the commit to complete.
-		 */
-		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
-			err = log_wait_commit(journal, tid);
-	} else {
-		spin_unlock(&transaction->t_handle_lock);
-		spin_unlock(&journal->j_state_lock);
-	}
-
-	lock_map_release(&handle->h_lockdep_map);
-
-	jbd_free_handle(handle);
-	return err;
-}
-
-/**
- * int journal_force_commit() - force any uncommitted transactions
- * @journal: journal to force
- *
- * For synchronous operations: force any uncommitted transactions
- * to disk.  May seem kludgy, but it reuses all the handle batching
- * code in a very simple manner.
- */
-int journal_force_commit(journal_t *journal)
-{
-	handle_t *handle;
-	int ret;
-
-	handle = journal_start(journal, 1);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-	} else {
-		handle->h_sync = 1;
-		ret = journal_stop(handle);
-	}
-	return ret;
-}
-
-/*
- *
- * List management code snippets: various functions for manipulating the
- * transaction buffer lists.
- *
- */
-
-/*
- * Append a buffer to a transaction list, given the transaction's list head
- * pointer.
- *
- * j_list_lock is held.
- *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
- */
-
-static inline void
-__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
-{
-	if (!*list) {
-		jh->b_tnext = jh->b_tprev = jh;
-		*list = jh;
-	} else {
-		/* Insert at the tail of the list to preserve order */
-		struct journal_head *first = *list, *last = first->b_tprev;
-		jh->b_tprev = last;
-		jh->b_tnext = first;
-		last->b_tnext = first->b_tprev = jh;
-	}
-}
-
-/*
- * Remove a buffer from a transaction list, given the transaction's list
- * head pointer.
- *
- * Called with j_list_lock held, and the journal may not be locked.
- *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
- */
-
-static inline void
-__blist_del_buffer(struct journal_head **list, struct journal_head *jh)
-{
-	if (*list == jh) {
-		*list = jh->b_tnext;
-		if (*list == jh)
-			*list = NULL;
-	}
-	jh->b_tprev->b_tnext = jh->b_tnext;
-	jh->b_tnext->b_tprev = jh->b_tprev;
-}
-
-/*
- * Remove a buffer from the appropriate transaction list.
- *
- * Note that this function can *change* the value of
- * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
- * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller
- * is holding onto a copy of one of thee pointers, it could go bad.
- * Generally the caller needs to re-read the pointer from the transaction_t.
- *
- * Called under j_list_lock.  The journal may not be locked.
- */
-static void __journal_temp_unlink_buffer(struct journal_head *jh)
-{
-	struct journal_head **list = NULL;
-	transaction_t *transaction;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	transaction = jh->b_transaction;
-	if (transaction)
-		assert_spin_locked(&transaction->t_journal->j_list_lock);
-
-	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
-	if (jh->b_jlist != BJ_None)
-		J_ASSERT_JH(jh, transaction != NULL);
-
-	switch (jh->b_jlist) {
-	case BJ_None:
-		return;
-	case BJ_SyncData:
-		list = &transaction->t_sync_datalist;
-		break;
-	case BJ_Metadata:
-		transaction->t_nr_buffers--;
-		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
-		list = &transaction->t_buffers;
-		break;
-	case BJ_Forget:
-		list = &transaction->t_forget;
-		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
-	case BJ_Shadow:
-		list = &transaction->t_shadow_list;
-		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
-	case BJ_Reserved:
-		list = &transaction->t_reserved_list;
-		break;
-	case BJ_Locked:
-		list = &transaction->t_locked_list;
-		break;
-	}
-
-	__blist_del_buffer(list, jh);
-	jh->b_jlist = BJ_None;
-	if (test_clear_buffer_jbddirty(bh))
-		mark_buffer_dirty(bh);	/* Expose it to the VM */
-}
-
-/*
- * Remove buffer from all transactions.
- *
- * Called with bh_state lock and j_list_lock
- *
- * jh and bh may be already freed when this function returns.
- */
-void __journal_unfile_buffer(struct journal_head *jh)
-{
-	__journal_temp_unlink_buffer(jh);
-	jh->b_transaction = NULL;
-	journal_put_journal_head(jh);
-}
-
-void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	/* Get reference so that buffer cannot be freed before we unlock it */
-	get_bh(bh);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	__journal_unfile_buffer(jh);
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	__brelse(bh);
-}
-
-/*
- * Called from journal_try_to_free_buffers().
- *
- * Called under jbd_lock_bh_state(bh)
- */
-static void
-__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
-{
-	struct journal_head *jh;
-
-	jh = bh2jh(bh);
-
-	if (buffer_locked(bh) || buffer_dirty(bh))
-		goto out;
-
-	if (jh->b_next_transaction != NULL)
-		goto out;
-
-	spin_lock(&journal->j_list_lock);
-	if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
-		if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
-			/* A written-back ordered data buffer */
-			JBUFFER_TRACE(jh, "release data");
-			__journal_unfile_buffer(jh);
-		}
-	} else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
-		/* written-back checkpointed metadata buffer */
-		if (jh->b_jlist == BJ_None) {
-			JBUFFER_TRACE(jh, "remove from checkpoint list");
-			__journal_remove_checkpoint(jh);
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-out:
-	return;
-}
-
-/**
- * int journal_try_to_free_buffers() - try to free page buffers.
- * @journal: journal for operation
- * @page: to try and free
- * @gfp_mask: we use the mask to detect how hard should we try to release
- * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
- * release the buffers.
- *
- *
- * For all the buffers on this page,
- * if they are fully written out ordered data, move them onto BUF_CLEAN
- * so try_to_free_buffers() can reap them.
- *
- * This function returns non-zero if we wish try_to_free_buffers()
- * to be called. We do this if the page is releasable by try_to_free_buffers().
- * We also do it if the page has locked or dirty buffers and the caller wants
- * us to perform sync or async writeout.
- *
- * This complicates JBD locking somewhat.  We aren't protected by the
- * BKL here.  We wish to remove the buffer from its committing or
- * running transaction's ->t_datalist via __journal_unfile_buffer.
- *
- * This may *change* the value of transaction_t->t_datalist, so anyone
- * who looks at t_datalist needs to lock against this function.
- *
- * Even worse, someone may be doing a journal_dirty_data on this
- * buffer.  So we need to lock against that.  journal_dirty_data()
- * will come out of the lock with the buffer dirty, which makes it
- * ineligible for release here.
- *
- * Who else is affected by this?  hmm...  Really the only contender
- * is do_get_write_access() - it could be looking at the buffer while
- * journal_try_to_free_buffer() is changing its state.  But that
- * cannot happen because we never reallocate freed data as metadata
- * while the data is part of a transaction.  Yes?
- *
- * Return 0 on failure, 1 on success
- */
-int journal_try_to_free_buffers(journal_t *journal,
-				struct page *page, gfp_t gfp_mask)
-{
-	struct buffer_head *head;
-	struct buffer_head *bh;
-	int ret = 0;
-
-	J_ASSERT(PageLocked(page));
-
-	head = page_buffers(page);
-	bh = head;
-	do {
-		struct journal_head *jh;
-
-		/*
-		 * We take our own ref against the journal_head here to avoid
-		 * having to add tons of locking around each instance of
-		 * journal_put_journal_head().
-		 */
-		jh = journal_grab_journal_head(bh);
-		if (!jh)
-			continue;
-
-		jbd_lock_bh_state(bh);
-		__journal_try_to_free_buffer(journal, bh);
-		journal_put_journal_head(jh);
-		jbd_unlock_bh_state(bh);
-		if (buffer_jbd(bh))
-			goto busy;
-	} while ((bh = bh->b_this_page) != head);
-
-	ret = try_to_free_buffers(page);
-
-busy:
-	return ret;
-}
-
-/*
- * This buffer is no longer needed.  If it is on an older transaction's
- * checkpoint list we need to record it on this transaction's forget list
- * to pin this buffer (and hence its checkpointing transaction) down until
- * this transaction commits.  If the buffer isn't on a checkpoint list, we
- * release it.
- * Returns non-zero if JBD no longer has an interest in the buffer.
- *
- * Called under j_list_lock.
- *
- * Called under jbd_lock_bh_state(bh).
- */
-static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
-{
-	int may_free = 1;
-	struct buffer_head *bh = jh2bh(jh);
-
-	if (jh->b_cp_transaction) {
-		JBUFFER_TRACE(jh, "on running+cp transaction");
-		__journal_temp_unlink_buffer(jh);
-		/*
-		 * We don't want to write the buffer anymore, clear the
-		 * bit so that we don't confuse checks in
-		 * __journal_file_buffer
-		 */
-		clear_buffer_dirty(bh);
-		__journal_file_buffer(jh, transaction, BJ_Forget);
-		may_free = 0;
-	} else {
-		JBUFFER_TRACE(jh, "on running transaction");
-		__journal_unfile_buffer(jh);
-	}
-	return may_free;
-}
-
-/*
- * journal_invalidatepage
- *
- * This code is tricky.  It has a number of cases to deal with.
- *
- * There are two invariants which this code relies on:
- *
- * i_size must be updated on disk before we start calling invalidatepage on the
- * data.
- *
- *  This is done in ext3 by defining an ext3_setattr method which
- *  updates i_size before truncate gets going.  By maintaining this
- *  invariant, we can be sure that it is safe to throw away any buffers
- *  attached to the current transaction: once the transaction commits,
- *  we know that the data will not be needed.
- *
- *  Note however that we can *not* throw away data belonging to the
- *  previous, committing transaction!
- *
- * Any disk blocks which *are* part of the previous, committing
- * transaction (and which therefore cannot be discarded immediately) are
- * not going to be reused in the new running transaction
- *
- *  The bitmap committed_data images guarantee this: any block which is
- *  allocated in one transaction and removed in the next will be marked
- *  as in-use in the committed_data bitmap, so cannot be reused until
- *  the next transaction to delete the block commits.  This means that
- *  leaving committing buffers dirty is quite safe: the disk blocks
- *  cannot be reallocated to a different file and so buffer aliasing is
- *  not possible.
- *
- *
- * The above applies mainly to ordered data mode.  In writeback mode we
- * don't make guarantees about the order in which data hits disk --- in
- * particular we don't guarantee that new dirty data is flushed before
- * transaction commit --- so it is always safe just to discard data
- * immediately in that mode.  --sct
- */
-
-/*
- * The journal_unmap_buffer helper function returns zero if the buffer
- * concerned remains pinned as an anonymous buffer belonging to an older
- * transaction.
- *
- * We're outside-transaction here.  Either or both of j_running_transaction
- * and j_committing_transaction may be NULL.
- */
-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
-				int partial_page)
-{
-	transaction_t *transaction;
-	struct journal_head *jh;
-	int may_free = 1;
-
-	BUFFER_TRACE(bh, "entry");
-
-retry:
-	/*
-	 * It is safe to proceed here without the j_list_lock because the
-	 * buffers cannot be stolen by try_to_free_buffers as long as we are
-	 * holding the page lock. --sct
-	 */
-
-	if (!buffer_jbd(bh))
-		goto zap_buffer_unlocked;
-
-	spin_lock(&journal->j_state_lock);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	jh = journal_grab_journal_head(bh);
-	if (!jh)
-		goto zap_buffer_no_jh;
-
-	/*
-	 * We cannot remove the buffer from checkpoint lists until the
-	 * transaction adding inode to orphan list (let's call it T)
-	 * is committed.  Otherwise if the transaction changing the
-	 * buffer would be cleaned from the journal before T is
-	 * committed, a crash will cause that the correct contents of
-	 * the buffer will be lost.  On the other hand we have to
-	 * clear the buffer dirty bit at latest at the moment when the
-	 * transaction marking the buffer as freed in the filesystem
-	 * structures is committed because from that moment on the
-	 * block can be reallocated and used by a different page.
-	 * Since the block hasn't been freed yet but the inode has
-	 * already been added to orphan list, it is safe for us to add
-	 * the buffer to BJ_Forget list of the newest transaction.
-	 *
-	 * Also we have to clear buffer_mapped flag of a truncated buffer
-	 * because the buffer_head may be attached to the page straddling
-	 * i_size (can happen only when blocksize < pagesize) and thus the
-	 * buffer_head can be reused when the file is extended again. So we end
-	 * up keeping around invalidated buffers attached to transactions'
-	 * BJ_Forget list just to stop checkpointing code from cleaning up
-	 * the transaction this buffer was modified in.
-	 */
-	transaction = jh->b_transaction;
-	if (transaction == NULL) {
-		/* First case: not on any transaction.  If it
-		 * has no checkpoint link, then we can zap it:
-		 * it's a writeback-mode buffer so we don't care
-		 * if it hits disk safely. */
-		if (!jh->b_cp_transaction) {
-			JBUFFER_TRACE(jh, "not on any transaction: zap");
-			goto zap_buffer;
-		}
-
-		if (!buffer_dirty(bh)) {
-			/* bdflush has written it.  We can drop it now */
-			goto zap_buffer;
-		}
-
-		/* OK, it must be in the journal but still not
-		 * written fully to disk: it's metadata or
-		 * journaled data... */
-
-		if (journal->j_running_transaction) {
-			/* ... and once the current transaction has
-			 * committed, the buffer won't be needed any
-			 * longer. */
-			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
-			may_free = __dispose_buffer(jh,
-					journal->j_running_transaction);
-			goto zap_buffer;
-		} else {
-			/* There is no currently-running transaction. So the
-			 * orphan record which we wrote for this file must have
-			 * passed into commit.  We must attach this buffer to
-			 * the committing transaction, if it exists. */
-			if (journal->j_committing_transaction) {
-				JBUFFER_TRACE(jh, "give to committing trans");
-				may_free = __dispose_buffer(jh,
-					journal->j_committing_transaction);
-				goto zap_buffer;
-			} else {
-				/* The orphan record's transaction has
-				 * committed.  We can cleanse this buffer */
-				clear_buffer_jbddirty(bh);
-				goto zap_buffer;
-			}
-		}
-	} else if (transaction == journal->j_committing_transaction) {
-		JBUFFER_TRACE(jh, "on committing transaction");
-		if (jh->b_jlist == BJ_Locked) {
-			/*
-			 * The buffer is on the committing transaction's locked
-			 * list.  We have the buffer locked, so I/O has
-			 * completed.  So we can nail the buffer now.
-			 */
-			may_free = __dispose_buffer(jh, transaction);
-			goto zap_buffer;
-		}
-		/*
-		 * The buffer is committing, we simply cannot touch
-		 * it. If the page is straddling i_size we have to wait
-		 * for commit and try again.
-		 */
-		if (partial_page) {
-			tid_t tid = journal->j_committing_transaction->t_tid;
-
-			journal_put_journal_head(jh);
-			spin_unlock(&journal->j_list_lock);
-			jbd_unlock_bh_state(bh);
-			spin_unlock(&journal->j_state_lock);
-			unlock_buffer(bh);
-			log_wait_commit(journal, tid);
-			lock_buffer(bh);
-			goto retry;
-		}
-		/*
-		 * OK, buffer won't be reachable after truncate. We just set
-		 * j_next_transaction to the running transaction (if there is
-		 * one) and mark buffer as freed so that commit code knows it
-		 * should clear dirty bits when it is done with the buffer.
-		 */
-		set_buffer_freed(bh);
-		if (journal->j_running_transaction && buffer_jbddirty(bh))
-			jh->b_next_transaction = journal->j_running_transaction;
-		journal_put_journal_head(jh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		spin_unlock(&journal->j_state_lock);
-		return 0;
-	} else {
-		/* Good, the buffer belongs to the running transaction.
-		 * We are writing our own transaction's data, not any
-		 * previous one's, so it is safe to throw it away
-		 * (remember that we expect the filesystem to have set
-		 * i_size already for this truncate so recovery will not
-		 * expose the disk blocks we are discarding here.) */
-		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
-		JBUFFER_TRACE(jh, "on running transaction");
-		may_free = __dispose_buffer(jh, transaction);
-	}
-
-zap_buffer:
-	/*
-	 * This is tricky. Although the buffer is truncated, it may be reused
-	 * if blocksize < pagesize and it is attached to the page straddling
-	 * EOF. Since the buffer might have been added to BJ_Forget list of the
-	 * running transaction, journal_get_write_access() won't clear
-	 * b_modified and credit accounting gets confused. So clear b_modified
-	 * here. */
-	jh->b_modified = 0;
-	journal_put_journal_head(jh);
-zap_buffer_no_jh:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	spin_unlock(&journal->j_state_lock);
-zap_buffer_unlocked:
-	clear_buffer_dirty(bh);
-	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
-	clear_buffer_mapped(bh);
-	clear_buffer_req(bh);
-	clear_buffer_new(bh);
-	bh->b_bdev = NULL;
-	return may_free;
-}
-
-/**
- * void journal_invalidatepage() - invalidate a journal page
- * @journal: journal to use for flush
- * @page:    page to flush
- * @offset:  offset of the range to invalidate
- * @length:  length of the range to invalidate
- *
- * Reap page buffers containing data in specified range in page.
- */
-void journal_invalidatepage(journal_t *journal,
-		      struct page *page,
-		      unsigned int offset,
-		      unsigned int length)
-{
-	struct buffer_head *head, *bh, *next;
-	unsigned int stop = offset + length;
-	unsigned int curr_off = 0;
-	int partial_page = (offset || length < PAGE_CACHE_SIZE);
-	int may_free = 1;
-
-	if (!PageLocked(page))
-		BUG();
-	if (!page_has_buffers(page))
-		return;
-
-	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
-	/* We will potentially be playing with lists other than just the
-	 * data lists (especially for journaled data mode), so be
-	 * cautious in our locking. */
-
-	head = bh = page_buffers(page);
-	do {
-		unsigned int next_off = curr_off + bh->b_size;
-		next = bh->b_this_page;
-
-		if (next_off > stop)
-			return;
-
-		if (offset <= curr_off) {
-			/* This block is wholly outside the truncation point */
-			lock_buffer(bh);
-			may_free &= journal_unmap_buffer(journal, bh,
-							 partial_page);
-			unlock_buffer(bh);
-		}
-		curr_off = next_off;
-		bh = next;
-
-	} while (bh != head);
-
-	if (!partial_page) {
-		if (may_free && try_to_free_buffers(page))
-			J_ASSERT(!page_has_buffers(page));
-	}
-}
-
-/*
- * File a buffer on the given transaction list.
- */
-void __journal_file_buffer(struct journal_head *jh,
-			transaction_t *transaction, int jlist)
-{
-	struct journal_head **list = NULL;
-	int was_dirty = 0;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	assert_spin_locked(&transaction->t_journal->j_list_lock);
-
-	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
-	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
-				jh->b_transaction == NULL);
-
-	if (jh->b_transaction && jh->b_jlist == jlist)
-		return;
-
-	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
-	    jlist == BJ_Shadow || jlist == BJ_Forget) {
-		/*
-		 * For metadata buffers, we track dirty bit in buffer_jbddirty
-		 * instead of buffer_dirty. We should not see a dirty bit set
-		 * here because we clear it in do_get_write_access but e.g.
-		 * tune2fs can modify the sb and set the dirty bit at any time
-		 * so we try to gracefully handle that.
-		 */
-		if (buffer_dirty(bh))
-			warn_dirty_buffer(bh);
-		if (test_clear_buffer_dirty(bh) ||
-		    test_clear_buffer_jbddirty(bh))
-			was_dirty = 1;
-	}
-
-	if (jh->b_transaction)
-		__journal_temp_unlink_buffer(jh);
-	else
-		journal_grab_journal_head(bh);
-	jh->b_transaction = transaction;
-
-	switch (jlist) {
-	case BJ_None:
-		J_ASSERT_JH(jh, !jh->b_committed_data);
-		J_ASSERT_JH(jh, !jh->b_frozen_data);
-		return;
-	case BJ_SyncData:
-		list = &transaction->t_sync_datalist;
-		break;
-	case BJ_Metadata:
-		transaction->t_nr_buffers++;
-		list = &transaction->t_buffers;
-		break;
-	case BJ_Forget:
-		list = &transaction->t_forget;
-		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
-	case BJ_Shadow:
-		list = &transaction->t_shadow_list;
-		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
-	case BJ_Reserved:
-		list = &transaction->t_reserved_list;
-		break;
-	case BJ_Locked:
-		list =  &transaction->t_locked_list;
-		break;
-	}
-
-	__blist_add_buffer(list, jh);
-	jh->b_jlist = jlist;
-
-	if (was_dirty)
-		set_buffer_jbddirty(bh);
-}
-
-void journal_file_buffer(struct journal_head *jh,
-				transaction_t *transaction, int jlist)
-{
-	jbd_lock_bh_state(jh2bh(jh));
-	spin_lock(&transaction->t_journal->j_list_lock);
-	__journal_file_buffer(jh, transaction, jlist);
-	spin_unlock(&transaction->t_journal->j_list_lock);
-	jbd_unlock_bh_state(jh2bh(jh));
-}
-
-/*
- * Remove a buffer from its current buffer list in preparation for
- * dropping it from its current transaction entirely.  If the buffer has
- * already started to be used by a subsequent transaction, refile the
- * buffer on that transaction's metadata list.
- *
- * Called under j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh))
- *
- * jh and bh may be already free when this function returns
- */
-void __journal_refile_buffer(struct journal_head *jh)
-{
-	int was_dirty, jlist;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	if (jh->b_transaction)
-		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
-
-	/* If the buffer is now unused, just drop it. */
-	if (jh->b_next_transaction == NULL) {
-		__journal_unfile_buffer(jh);
-		return;
-	}
-
-	/*
-	 * It has been modified by a later transaction: add it to the new
-	 * transaction's metadata list.
-	 */
-
-	was_dirty = test_clear_buffer_jbddirty(bh);
-	__journal_temp_unlink_buffer(jh);
-	/*
-	 * We set b_transaction here because b_next_transaction will inherit
-	 * our jh reference and thus __journal_file_buffer() must not take a
-	 * new one.
-	 */
-	jh->b_transaction = jh->b_next_transaction;
-	jh->b_next_transaction = NULL;
-	if (buffer_freed(bh))
-		jlist = BJ_Forget;
-	else if (jh->b_modified)
-		jlist = BJ_Metadata;
-	else
-		jlist = BJ_Reserved;
-	__journal_file_buffer(jh, jh->b_transaction, jlist);
-	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
-
-	if (was_dirty)
-		set_buffer_jbddirty(bh);
-}
-
-/*
- * __journal_refile_buffer() with necessary locking added. We take our bh
- * reference so that we can safely unlock bh.
- *
- * The jh and bh may be freed by this call.
- */
-void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	/* Get reference so that buffer cannot be freed before we unlock it */
-	get_bh(bh);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	__journal_refile_buffer(jh);
-	jbd_unlock_bh_state(bh);
-	spin_unlock(&journal->j_list_lock);
-	__brelse(bh);
-}
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 4227dc4..8c44654 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -417,12 +417,12 @@
  * journal_clean_one_cp_list
  *
  * Find all the written-back checkpoint buffers in the given list and
- * release them.
+ * release them. If 'destroy' is set, clean all buffers unconditionally.
  *
  * Called with j_list_lock held.
  * Returns 1 if we freed the transaction, 0 otherwise.
  */
-static int journal_clean_one_cp_list(struct journal_head *jh)
+static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
 {
 	struct journal_head *last_jh;
 	struct journal_head *next_jh = jh;
@@ -436,7 +436,10 @@
 	do {
 		jh = next_jh;
 		next_jh = jh->b_cpnext;
-		ret = __try_to_free_cp_buf(jh);
+		if (!destroy)
+			ret = __try_to_free_cp_buf(jh);
+		else
+			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
 		if (!ret)
 			return freed;
 		if (ret == 2)
@@ -459,10 +462,11 @@
  * journal_clean_checkpoint_list
  *
  * Find all the written-back checkpoint buffers in the journal and release them.
+ * If 'destroy' is set, release all buffers unconditionally.
  *
  * Called with j_list_lock held.
  */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
 {
 	transaction_t *transaction, *last_transaction, *next_transaction;
 	int ret;
@@ -476,7 +480,8 @@
 	do {
 		transaction = next_transaction;
 		next_transaction = transaction->t_cpnext;
-		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
+		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
+						destroy);
 		/*
 		 * This function only frees up some memory if possible so we
 		 * dont have an obligation to finish processing. Bail out if
@@ -492,7 +497,7 @@
 		 * we can possibly see not yet submitted buffers on io_list
 		 */
 		ret = journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list);
+				t_checkpoint_io_list, destroy);
 		if (need_resched())
 			return;
 		/*
@@ -506,6 +511,28 @@
 }
 
 /*
+ * Remove buffers from all checkpoint lists as journal is aborted and we just
+ * need to free memory
+ */
+void jbd2_journal_destroy_checkpoint(journal_t *journal)
+{
+	/*
+	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
+	 * early due to a need of rescheduling.
+	 */
+	while (1) {
+		spin_lock(&journal->j_list_lock);
+		if (!journal->j_checkpoint_transactions) {
+			spin_unlock(&journal->j_list_lock);
+			break;
+		}
+		__jbd2_journal_clean_checkpoint_list(journal, true);
+		spin_unlock(&journal->j_list_lock);
+		cond_resched();
+	}
+}
+
+/*
  * journal_remove_checkpoint: called after a buffer has been committed
  * to disk (either by being write-back flushed to disk, or being
  * committed to the log).
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index b73e021..362e5f6 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -510,7 +510,7 @@
 	 * frees some memory
 	 */
 	spin_lock(&journal->j_list_lock);
-	__jbd2_journal_clean_checkpoint_list(journal);
+	__jbd2_journal_clean_checkpoint_list(journal, false);
 	spin_unlock(&journal->j_list_lock);
 
 	jbd_debug(3, "JBD2: commit phase 1\n");
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 4ff3fad..8270fe9 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1456,7 +1456,7 @@
 	sb->s_errno    = cpu_to_be32(journal->j_errno);
 	read_unlock(&journal->j_state_lock);
 
-	jbd2_write_superblock(journal, WRITE_SYNC);
+	jbd2_write_superblock(journal, WRITE_FUA);
 }
 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
@@ -1693,8 +1693,17 @@
 	while (journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
 		mutex_lock(&journal->j_checkpoint_mutex);
-		jbd2_log_do_checkpoint(journal);
+		err = jbd2_log_do_checkpoint(journal);
 		mutex_unlock(&journal->j_checkpoint_mutex);
+		/*
+		 * If checkpointing failed, just free the buffers to avoid
+		 * looping forever
+		 */
+		if (err) {
+			jbd2_journal_destroy_checkpoint(journal);
+			spin_lock(&journal->j_list_lock);
+			break;
+		}
 		spin_lock(&journal->j_list_lock);
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index f3d0617..6b8338e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -204,6 +204,20 @@
 		 * attach this handle to a new transaction.
 		 */
 		atomic_sub(total, &t->t_outstanding_credits);
+
+		/*
+		 * Is the number of reserved credits in the current transaction too
+		 * big to fit this handle? Wait until reserved credits are freed.
+		 */
+		if (atomic_read(&journal->j_reserved_credits) + total >
+		    journal->j_max_transaction_buffers) {
+			read_unlock(&journal->j_state_lock);
+			wait_event(journal->j_wait_reserved,
+				   atomic_read(&journal->j_reserved_credits) + total <=
+				   journal->j_max_transaction_buffers);
+			return 1;
+		}
+
 		wait_transaction_locked(journal);
 		return 1;
 	}
@@ -262,20 +276,24 @@
 	int		rsv_blocks = 0;
 	unsigned long ts = jiffies;
 
-	/*
-	 * 1/2 of transaction can be reserved so we can practically handle
-	 * only 1/2 of maximum transaction size per operation
-	 */
-	if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
-		printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
-		       current->comm, blocks,
-		       journal->j_max_transaction_buffers / 2);
-		return -ENOSPC;
-	}
-
 	if (handle->h_rsv_handle)
 		rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
 
+	/*
+	 * Limit the number of reserved credits to 1/2 of maximum transaction
+	 * size and limit the number of total credits to not exceed maximum
+	 * transaction size per operation.
+	 */
+	if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
+	    (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
+		printk(KERN_ERR "JBD2: %s wants too many credits "
+		       "credits:%d rsv_credits:%d max:%d\n",
+		       current->comm, blocks, rsv_blocks,
+		       journal->j_max_transaction_buffers);
+		WARN_ON(1);
+		return -ENOSPC;
+	}
+
 alloc_transaction:
 	if (!journal->j_running_transaction) {
 		/*
@@ -1280,8 +1298,6 @@
 	triggers->t_abort(triggers, jh2bh(jh));
 }
 
-
-
 /**
  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
  * @handle: transaction to add buffer to.
@@ -1314,12 +1330,41 @@
 
 	if (is_handle_aborted(handle))
 		return -EROFS;
-	journal = transaction->t_journal;
-	jh = jbd2_journal_grab_journal_head(bh);
-	if (!jh) {
+	if (!buffer_jbd(bh)) {
 		ret = -EUCLEAN;
 		goto out;
 	}
+	/*
+	 * We don't grab jh reference here since the buffer must be part
+	 * of the running transaction.
+	 */
+	jh = bh2jh(bh);
+	/*
+	 * This and the following assertions are unreliable since we may see jh
+	 * in inconsistent state unless we grab bh_state lock. But this is
+	 * crucial to catch bugs so let's do a reliable check until the
+	 * lockless handling is fully proven.
+	 */
+	if (jh->b_transaction != transaction &&
+	    jh->b_next_transaction != transaction) {
+		jbd_lock_bh_state(bh);
+		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+				jh->b_next_transaction == transaction);
+		jbd_unlock_bh_state(bh);
+	}
+	if (jh->b_modified == 1) {
+		/* If it's in our transaction it must be in BJ_Metadata list. */
+		if (jh->b_transaction == transaction &&
+		    jh->b_jlist != BJ_Metadata) {
+			jbd_lock_bh_state(bh);
+			J_ASSERT_JH(jh, jh->b_transaction != transaction ||
+					jh->b_jlist == BJ_Metadata);
+			jbd_unlock_bh_state(bh);
+		}
+		goto out;
+	}
+
+	journal = transaction->t_journal;
 	jbd_debug(5, "journal_head %p\n", jh);
 	JBUFFER_TRACE(jh, "entry");
 
@@ -1410,7 +1455,6 @@
 	spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
 	jbd_unlock_bh_state(bh);
-	jbd2_journal_put_journal_head(jh);
 out:
 	JBUFFER_TRACE(jh, "exit");
 	return ret;
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index b9dc23c..0e026a7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -107,8 +107,11 @@
 	if (rc)
 		return rc;
 
-	if (is_quota_modification(inode, iattr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, iattr)) {
+		rc = dquot_initialize(inode);
+		if (rc)
+			return rc;
+	}
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
 	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
 		rc = dquot_transfer(inode, iattr);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 6b0f816..cf7936f 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -109,7 +109,9 @@
 	/*
 	 * Allocate inode to quota.
 	 */
-	dquot_initialize(inode);
+	rc = dquot_initialize(inode);
+	if (rc)
+		goto fail_drop;
 	rc = dquot_alloc_inode(inode);
 	if (rc)
 		goto fail_drop;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a5ac97b..35976bd 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -86,7 +86,9 @@
 
 	jfs_info("jfs_create: dip:0x%p name:%pd", dip, dentry);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	/*
 	 * search parent directory for entry/freespace
@@ -218,7 +220,9 @@
 
 	jfs_info("jfs_mkdir: dip:0x%p name:%pd", dip, dentry);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	/*
 	 * search parent directory for entry/freespace
@@ -355,8 +359,12 @@
 	jfs_info("jfs_rmdir: dip:0x%p name:%pd", dip, dentry);
 
 	/* Init inode for quota operations. */
-	dquot_initialize(dip);
-	dquot_initialize(ip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out;
+	rc = dquot_initialize(ip);
+	if (rc)
+		goto out;
 
 	/* directory must be empty to be removed */
 	if (!dtEmpty(ip)) {
@@ -483,8 +491,12 @@
 	jfs_info("jfs_unlink: dip:0x%p name:%pd", dip, dentry);
 
 	/* Init inode for quota operations. */
-	dquot_initialize(dip);
-	dquot_initialize(ip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out;
+	rc = dquot_initialize(ip);
+	if (rc)
+		goto out;
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
@@ -799,7 +811,9 @@
 
 	jfs_info("jfs_link: %pd %pd", old_dentry, dentry);
 
-	dquot_initialize(dir);
+	rc = dquot_initialize(dir);
+	if (rc)
+		goto out;
 
 	tid = txBegin(ip->i_sb, 0);
 
@@ -810,7 +824,7 @@
 	 * scan parent directory for entry/freespace
 	 */
 	if ((rc = get_UCSname(&dname, dentry)))
-		goto out;
+		goto out_tx;
 
 	if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
 		goto free_dname;
@@ -842,12 +856,13 @@
       free_dname:
 	free_UCSname(&dname);
 
-      out:
+      out_tx:
 	txEnd(tid);
 
 	mutex_unlock(&JFS_IP(ip)->commit_mutex);
 	mutex_unlock(&JFS_IP(dir)->commit_mutex);
 
+      out:
 	jfs_info("jfs_link: rc:%d", rc);
 	return rc;
 }
@@ -891,7 +906,9 @@
 
 	jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	ssize = strlen(name) + 1;
 
@@ -1082,8 +1099,12 @@
 
 	jfs_info("jfs_rename: %pd %pd", old_dentry, new_dentry);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	rc = dquot_initialize(old_dir);
+	if (rc)
+		goto out1;
+	rc = dquot_initialize(new_dir);
+	if (rc)
+		goto out1;
 
 	old_ip = d_inode(old_dentry);
 	new_ip = d_inode(new_dentry);
@@ -1130,7 +1151,9 @@
 	} else if (new_ip) {
 		IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
 		/* Init inode for quota operations. */
-		dquot_initialize(new_ip);
+		rc = dquot_initialize(new_ip);
+		if (rc)
+			goto out_unlock;
 	}
 
 	/*
@@ -1318,6 +1341,7 @@
 
 		clear_cflag(COMMIT_Stale, old_dir);
 	}
+      out_unlock:
 	if (new_ip && !S_ISDIR(new_ip->i_mode))
 		IWRITE_UNLOCK(new_ip);
       out3:
@@ -1353,7 +1377,9 @@
 
 	jfs_info("jfs_mknod: %pd", dentry);
 
-	dquot_initialize(dir);
+	rc = dquot_initialize(dir);
+	if (rc)
+		goto out;
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 55505cb..d678bcc 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -322,6 +322,11 @@
 	return error;
 }
 
+static struct svc_serv_ops lockd_sv_ops = {
+	.svo_shutdown		= svc_rpcb_cleanup,
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+};
+
 static struct svc_serv *lockd_create_svc(void)
 {
 	struct svc_serv *serv;
@@ -350,7 +355,7 @@
 		nlm_timeout = LOCKD_DFLT_TIMEO;
 	nlmsvc_timeout = nlm_timeout * HZ;
 
-	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
+	serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops);
 	if (!serv) {
 		printk(KERN_WARNING "lockd_up: create service failed\n");
 		return ERR_PTR(-ENOMEM);
@@ -586,6 +591,7 @@
 
 	INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
 	INIT_LIST_HEAD(&ln->lockd_manager.list);
+	ln->lockd_manager.block_opens = false;
 	spin_lock_init(&ln->nsm_clnt_lock);
 	return 0;
 }
diff --git a/fs/locks.c b/fs/locks.c
index d3d558b..2a54c80 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1568,6 +1568,7 @@
  * 			    desired lease.
  * @dentry:	dentry to check
  * @arg:	type of lease that we're trying to acquire
+ * @flags:	current lock flags
  *
  * Check to see if there's an existing open fd on this file that would
  * conflict with the lease we're trying to set.
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 682529c..2c4a0b56 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -308,6 +308,10 @@
 	return ret;
 }
 
+static struct svc_serv_ops nfs_cb_sv_ops = {
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+};
+
 static struct svc_serv *nfs_callback_create_svc(int minorversion)
 {
 	struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
@@ -333,7 +337,7 @@
 		printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
 			cb_info->users);
 
-	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+	serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, &nfs_cb_sv_ops);
 	if (!serv) {
 		printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
 		return ERR_PTR(-ENOMEM);
diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
index ae6e58e..fd8c9a5 100644
--- a/fs/nfs_common/grace.c
+++ b/fs/nfs_common/grace.c
@@ -63,14 +63,33 @@
  * lock reclaims.
  */
 int
-locks_in_grace(struct net *net)
+__state_in_grace(struct net *net, bool open)
 {
 	struct list_head *grace_list = net_generic(net, grace_net_id);
+	struct lock_manager *lm;
 
-	return !list_empty(grace_list);
+	if (!open)
+		return !list_empty(grace_list);
+
+	list_for_each_entry(lm, grace_list, list) {
+		if (lm->block_opens)
+			return true;
+	}
+	return false;
+}
+
+int locks_in_grace(struct net *net)
+{
+	return __state_in_grace(net, 0);
 }
 EXPORT_SYMBOL_GPL(locks_in_grace);
 
+int opens_in_grace(struct net *net)
+{
+	return __state_in_grace(net, 1);
+}
+EXPORT_SYMBOL_GPL(opens_in_grace);
+
 static int __net_init
 grace_init_net(struct net *net)
 {
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index f79521a..b4d84b5 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1075,73 +1075,6 @@
 	return rv;
 }
 
-/* Iterator */
-
-static void *e_start(struct seq_file *m, loff_t *pos)
-	__acquires(((struct cache_detail *)m->private)->hash_lock)
-{
-	loff_t n = *pos;
-	unsigned hash, export;
-	struct cache_head *ch;
-	struct cache_detail *cd = m->private;
-	struct cache_head **export_table = cd->hash_table;
-
-	read_lock(&cd->hash_lock);
-	if (!n--)
-		return SEQ_START_TOKEN;
-	hash = n >> 32;
-	export = n & ((1LL<<32) - 1);
-
-	
-	for (ch=export_table[hash]; ch; ch=ch->next)
-		if (!export--)
-			return ch;
-	n &= ~((1LL<<32) - 1);
-	do {
-		hash++;
-		n += 1LL<<32;
-	} while(hash < EXPORT_HASHMAX && export_table[hash]==NULL);
-	if (hash >= EXPORT_HASHMAX)
-		return NULL;
-	*pos = n+1;
-	return export_table[hash];
-}
-
-static void *e_next(struct seq_file *m, void *p, loff_t *pos)
-{
-	struct cache_head *ch = p;
-	int hash = (*pos >> 32);
-	struct cache_detail *cd = m->private;
-	struct cache_head **export_table = cd->hash_table;
-
-	if (p == SEQ_START_TOKEN)
-		hash = 0;
-	else if (ch->next == NULL) {
-		hash++;
-		*pos += 1LL<<32;
-	} else {
-		++*pos;
-		return ch->next;
-	}
-	*pos &= ~((1LL<<32) - 1);
-	while (hash < EXPORT_HASHMAX && export_table[hash] == NULL) {
-		hash++;
-		*pos += 1LL<<32;
-	}
-	if (hash >= EXPORT_HASHMAX)
-		return NULL;
-	++*pos;
-	return export_table[hash];
-}
-
-static void e_stop(struct seq_file *m, void *p)
-	__releases(((struct cache_detail *)m->private)->hash_lock)
-{
-	struct cache_detail *cd = m->private;
-
-	read_unlock(&cd->hash_lock);
-}
-
 static struct flags {
 	int flag;
 	char *name[2];
@@ -1270,9 +1203,9 @@
 }
 
 const struct seq_operations nfs_exports_op = {
-	.start	= e_start,
-	.next	= e_next,
-	.stop	= e_stop,
+	.start	= cache_seq_start,
+	.next	= cache_seq_next,
+	.stop	= cache_seq_stop,
 	.show	= e_show,
 };
 
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 1f52bfc..2e31507 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -6,6 +6,7 @@
 
 #include <linux/sunrpc/cache.h>
 #include <uapi/linux/nfsd/export.h>
+#include <linux/nfs4.h>
 
 struct knfsd_fh;
 struct svc_fh;
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index a3f3490..23cc85d 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -37,9 +37,7 @@
 
 #include <linux/in.h>
 #include <linux/sunrpc/svc.h>
-
-/* XXX from linux/nfs_idmap.h */
-#define IDMAP_NAMESZ 128
+#include <linux/nfs_idmap.h>
 
 #ifdef CONFIG_NFSD_V4
 int nfsd_idmap_init(struct net *);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index ea6749a..d8b16c2 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -110,6 +110,7 @@
 	unsigned int max_connections;
 
 	u32 clientid_counter;
+	u32 clverifier_counter;
 
 	struct svc_serv *nfsd_serv;
 };
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index d54701f..1580ea6 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -44,13 +44,13 @@
 
 	inode = d_inode(fh->fh_dentry);
 
-	if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+	if (argp->mask & ~NFS_ACL_MASK)
 		RETURN_STATUS(nfserr_inval);
 	resp->mask = argp->mask;
 
 	nfserr = fh_getattr(fh, &resp->stat);
 	if (nfserr)
-		goto fail;
+		RETURN_STATUS(nfserr);
 
 	if (resp->mask & (NFS_ACL|NFS_ACLCNT)) {
 		acl = get_acl(inode, ACL_TYPE_ACCESS);
@@ -202,7 +202,7 @@
 	if (!p)
 		return 0;
 	argp->mask = ntohl(*p++);
-	if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) ||
+	if (argp->mask & ~NFS_ACL_MASK ||
 	    !xdr_argsize_check(rqstp, p))
 		return 0;
 
@@ -293,9 +293,7 @@
 				  resp->acl_default,
 				  resp->mask & NFS_DFACL,
 				  NFS_ACL_DEFAULT);
-	if (n <= 0)
-		return 0;
-	return 1;
+	return (n > 0);
 }
 
 static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p,
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 882b1a1..01df4cd 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -41,7 +41,7 @@
 
 	inode = d_inode(fh->fh_dentry);
 
-	if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+	if (argp->mask & ~NFS_ACL_MASK)
 		RETURN_STATUS(nfserr_inval);
 	resp->mask = argp->mask;
 
@@ -148,7 +148,7 @@
 	if (!p)
 		return 0;
 	args->mask = ntohl(*p++);
-	if (args->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT) ||
+	if (args->mask & ~NFS_ACL_MASK ||
 	    !xdr_argsize_check(rqstp, p))
 		return 0;
 
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index eb5accf..6adabd6 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -34,8 +34,10 @@
  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/fs.h>
 #include <linux/slab.h>
-#include <linux/nfs_fs.h>
+#include <linux/posix_acl.h>
+
 #include "nfsfh.h"
 #include "nfsd.h"
 #include "acl.h"
@@ -100,7 +102,7 @@
 /* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the
  * side of being more restrictive, so the mode bit mapping below is
  * pessimistic.  An optimistic version would be needed to handle DENY's,
- * but we espect to coalesce all ALLOWs and DENYs before mapping to mode
+ * but we expect to coalesce all ALLOWs and DENYs before mapping to mode
  * bits. */
 
 static void
@@ -458,7 +460,7 @@
 	state->empty = 1;
 	/*
 	 * In the worst case, each individual acl could be for a distinct
-	 * named user or group, but we don't no which, so we allocate
+	 * named user or group, but we don't know which, so we allocate
 	 * enough space for either:
 	 */
 	alloc = sizeof(struct posix_ace_state_array)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index a492018..e7f50c4 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -435,12 +435,12 @@
 	 */
 	status = 0;
 out:
-	if (status)
-		nfsd4_mark_cb_fault(cb->cb_clp, status);
+	cb->cb_seq_status = status;
 	return status;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return -EIO;
+	status = -EIO;
+	goto out;
 }
 
 static int decode_cb_sequence4res(struct xdr_stream *xdr,
@@ -451,11 +451,10 @@
 	if (cb->cb_minorversion == 0)
 		return 0;
 
-	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status);
-	if (unlikely(status || cb->cb_status))
+	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
+	if (unlikely(status || cb->cb_seq_status))
 		return status;
 
-	cb->cb_update_seq_nr = true;
 	return decode_cb_sequence4resok(xdr, cb);
 }
 
@@ -527,7 +526,7 @@
 
 	if (cb != NULL) {
 		status = decode_cb_sequence4res(xdr, cb);
-		if (unlikely(status || cb->cb_status))
+		if (unlikely(status || cb->cb_seq_status))
 			return status;
 	}
 
@@ -617,7 +616,7 @@
 
 	if (cb) {
 		status = decode_cb_sequence4res(xdr, cb);
-		if (unlikely(status || cb->cb_status))
+		if (unlikely(status || cb->cb_seq_status))
 			return status;
 	}
 	return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
@@ -876,7 +875,11 @@
 	u32 minorversion = clp->cl_minorversion;
 
 	cb->cb_minorversion = minorversion;
-	cb->cb_update_seq_nr = false;
+	/*
+	 * cb_seq_status is only set in decode_cb_sequence4res,
+	 * and so will remain 1 if an rpc level failure occurs.
+	 */
+	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
 	if (minorversion) {
 		if (!nfsd41_cb_get_slot(clp, task))
@@ -885,6 +888,84 @@
 	rpc_call_start(task);
 }
 
+static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
+{
+	struct nfs4_client *clp = cb->cb_clp;
+	struct nfsd4_session *session = clp->cl_cb_session;
+	bool ret = true;
+
+	if (!clp->cl_minorversion) {
+		/*
+		 * If the backchannel connection was shut down while this
+		 * task was queued, we need to resubmit it after setting up
+		 * a new backchannel connection.
+		 *
+		 * Note that if we lost our callback connection permanently
+		 * the submission code will error out, so we don't need to
+		 * handle that case here.
+		 */
+		if (task->tk_flags & RPC_TASK_KILLED)
+			goto need_restart;
+
+		return true;
+	}
+
+	switch (cb->cb_seq_status) {
+	case 0:
+		/*
+		 * No need for lock, access serialized in nfsd4_cb_prepare
+		 *
+		 * RFC5661 20.9.3
+		 * If CB_SEQUENCE returns an error, then the state of the slot
+		 * (sequence ID, cached reply) MUST NOT change.
+		 */
+		++session->se_cb_seq_nr;
+		break;
+	case -ESERVERFAULT:
+		++session->se_cb_seq_nr;
+	case 1:
+	case -NFS4ERR_BADSESSION:
+		nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
+		ret = false;
+		break;
+	case -NFS4ERR_DELAY:
+		if (!rpc_restart_call(task))
+			goto out;
+
+		rpc_delay(task, 2 * HZ);
+		return false;
+	case -NFS4ERR_BADSLOT:
+		goto retry_nowait;
+	case -NFS4ERR_SEQ_MISORDERED:
+		if (session->se_cb_seq_nr != 1) {
+			session->se_cb_seq_nr = 1;
+			goto retry_nowait;
+		}
+		break;
+	default:
+		dprintk("%s: unprocessed error %d\n", __func__,
+			cb->cb_seq_status);
+	}
+
+	clear_bit(0, &clp->cl_cb_slot_busy);
+	rpc_wake_up_next(&clp->cl_cb_waitq);
+	dprintk("%s: freed slot, new seqid=%d\n", __func__,
+		clp->cl_cb_session->se_cb_seq_nr);
+
+	if (task->tk_flags & RPC_TASK_KILLED)
+		goto need_restart;
+out:
+	return ret;
+retry_nowait:
+	if (rpc_restart_call_prepare(task))
+		ret = false;
+	goto out;
+need_restart:
+	task->tk_status = 0;
+	cb->cb_need_restart = true;
+	return false;
+}
+
 static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
 {
 	struct nfsd4_callback *cb = calldata;
@@ -893,37 +974,8 @@
 	dprintk("%s: minorversion=%d\n", __func__,
 		clp->cl_minorversion);
 
-	if (clp->cl_minorversion) {
-		/*
-		 * No need for lock, access serialized in nfsd4_cb_prepare
-		 *
-		 * RFC5661 20.9.3
-		 * If CB_SEQUENCE returns an error, then the state of the slot
-		 * (sequence ID, cached reply) MUST NOT change.
-		 */
-		if (cb->cb_update_seq_nr)
-			++clp->cl_cb_session->se_cb_seq_nr;
-
-		clear_bit(0, &clp->cl_cb_slot_busy);
-		rpc_wake_up_next(&clp->cl_cb_waitq);
-		dprintk("%s: freed slot, new seqid=%d\n", __func__,
-			clp->cl_cb_session->se_cb_seq_nr);
-	}
-
-	/*
-	 * If the backchannel connection was shut down while this
-	 * task was queued, we need to resubmit it after setting up
-	 * a new backchannel connection.
-	 *
-	 * Note that if we lost our callback connection permanently
-	 * the submission code will error out, so we don't need to
-	 * handle that case here.
-	 */
-	if (task->tk_flags & RPC_TASK_KILLED) {
-		task->tk_status = 0;
-		cb->cb_need_restart = true;
+	if (!nfsd4_cb_sequence_done(task, cb))
 		return;
-	}
 
 	if (cb->cb_status) {
 		WARN_ON_ONCE(task->tk_status);
@@ -1099,8 +1151,8 @@
 	cb->cb_msg.rpc_resp = cb;
 	cb->cb_ops = ops;
 	INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
+	cb->cb_seq_status = 1;
 	cb->cb_status = 0;
-	cb->cb_update_seq_nr = false;
 	cb->cb_need_restart = false;
 }
 
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index e1b3d3d..5b20577 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -59,9 +59,6 @@
  * that.
  */
 
-#define IDMAP_TYPE_USER  0
-#define IDMAP_TYPE_GROUP 1
-
 struct ent {
 	struct cache_head h;
 	int               type;		       /* User / Group */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 90cfda7..4ce6b97 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -276,13 +276,13 @@
 			nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
 
 		/*
-		 * Following rfc 3530 14.2.16, use the returned bitmask
-		 * to indicate which attributes we used to store the
-		 * verifier:
+		 * Following rfc 3530 14.2.16, and rfc 5661 18.16.4
+		 * use the returned bitmask to indicate which attributes
+		 * we used to store the verifier:
 		 */
-		if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
-			open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
-							FATTR4_WORD1_TIME_MODIFY);
+		if (nfsd_create_is_exclusive(open->op_createmode) && status == 0)
+			open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
+						FATTR4_WORD1_TIME_MODIFY);
 	} else
 		/*
 		 * Note this may exit with the parent still locked.
@@ -362,7 +362,6 @@
 {
 	__be32 status;
 	struct svc_fh *resfh = NULL;
-	struct nfsd4_compoundres *resp;
 	struct net *net = SVC_NET(rqstp);
 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
@@ -389,8 +388,7 @@
 		copy_clientid(&open->op_clientid, cstate->session);
 
 	/* check seqid for replay. set nfs4_owner */
-	resp = rqstp->rq_resp;
-	status = nfsd4_process_open1(&resp->cstate, open, nn);
+	status = nfsd4_process_open1(cstate, open, nn);
 	if (status == nfserr_replay_me) {
 		struct nfs4_replay *rp = &open->op_openowner->oo_owner.so_replay;
 		fh_put(&cstate->current_fh);
@@ -417,10 +415,10 @@
 	/* Openowner is now set, so sequence id will get bumped.  Now we need
 	 * these checks before we do any creates: */
 	status = nfserr_grace;
-	if (locks_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
+	if (opens_in_grace(net) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
 		goto out;
 	status = nfserr_no_grace;
-	if (!locks_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+	if (!opens_in_grace(net) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
 		goto out;
 
 	switch (open->op_claim_type) {
@@ -829,7 +827,7 @@
 {
 	__be32 status;
 
-	if (locks_in_grace(SVC_NET(rqstp)))
+	if (opens_in_grace(SVC_NET(rqstp)))
 		return nfserr_grace;
 	status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
 			     remove->rm_name, remove->rm_namelen);
@@ -848,7 +846,7 @@
 
 	if (!cstate->save_fh.fh_dentry)
 		return status;
-	if (locks_in_grace(SVC_NET(rqstp)) &&
+	if (opens_in_grace(SVC_NET(rqstp)) &&
 		!(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK))
 		return nfserr_grace;
 	status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
@@ -1364,10 +1362,6 @@
 		goto out;
 	}
 
-	nfserr = ops->proc_layoutcommit(inode, lcp);
-	if (nfserr)
-		goto out_put_stid;
-
 	if (new_size > i_size_read(inode)) {
 		lcp->lc_size_chg = 1;
 		lcp->lc_newsize = new_size;
@@ -1375,7 +1369,7 @@
 		lcp->lc_size_chg = 0;
 	}
 
-out_put_stid:
+	nfserr = ops->proc_layoutcommit(inode, lcp);
 	nfs4_put_stid(&ls->ls_stid);
 out:
 	return nfserr;
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index d88ea7b..e3d4709 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -272,6 +272,7 @@
 		.ctx.actor = nfsd4_build_namelist,
 		.names = LIST_HEAD_INIT(ctx.names)
 	};
+	struct name_list *entry, *tmp;
 	int status;
 
 	status = nfs4_save_creds(&original_cred);
@@ -286,9 +287,8 @@
 
 	status = iterate_dir(nn->rec_file, &ctx.ctx);
 	mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT);
-	while (!list_empty(&ctx.names)) {
-		struct name_list *entry;
-		entry = list_entry(ctx.names.next, struct name_list, list);
+
+	list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
 		if (!status) {
 			struct dentry *dentry;
 			dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
@@ -304,6 +304,12 @@
 	}
 	mutex_unlock(&d_inode(dir)->i_mutex);
 	nfs4_reset_creds(original_cred);
+
+	list_for_each_entry_safe(entry, tmp, &ctx.names, list) {
+		dprintk("NFSD: %s. Left entry %s\n", __func__, entry->name);
+		list_del(&entry->list);
+		kfree(entry);
+	}
 	return status;
 }
 
@@ -541,8 +547,7 @@
 
 	/* XXX: The legacy code won't work in a container */
 	if (net != &init_net) {
-		WARN(1, KERN_ERR "NFSD: attempt to initialize legacy client "
-			"tracking in a container!\n");
+		pr_warn("NFSD: attempt to initialize legacy client tracking in a container ignored.\n");
 		return -EINVAL;
 	}
 
@@ -1254,8 +1259,7 @@
 
 	/* XXX: The usermode helper s not working in container yet. */
 	if (net != &init_net) {
-		WARN(1, KERN_ERR "NFSD: attempt to initialize umh client "
-			"tracking in a container!\n");
+		pr_warn("NFSD: attempt to initialize umh client tracking in a container ignored.\n");
 		return -EINVAL;
 	}
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 9520271..0f1d569 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -777,13 +777,16 @@
 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
 }
 
-static void
+static bool
 unhash_delegation_locked(struct nfs4_delegation *dp)
 {
 	struct nfs4_file *fp = dp->dl_stid.sc_file;
 
 	lockdep_assert_held(&state_lock);
 
+	if (list_empty(&dp->dl_perfile))
+		return false;
+
 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
 	/* Ensure that deleg break won't try to requeue it */
 	++dp->dl_time;
@@ -792,16 +795,21 @@
 	list_del_init(&dp->dl_recall_lru);
 	list_del_init(&dp->dl_perfile);
 	spin_unlock(&fp->fi_lock);
+	return true;
 }
 
 static void destroy_delegation(struct nfs4_delegation *dp)
 {
+	bool unhashed;
+
 	spin_lock(&state_lock);
-	unhash_delegation_locked(dp);
+	unhashed = unhash_delegation_locked(dp);
 	spin_unlock(&state_lock);
-	put_clnt_odstate(dp->dl_clnt_odstate);
-	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
-	nfs4_put_stid(&dp->dl_stid);
+	if (unhashed) {
+		put_clnt_odstate(dp->dl_clnt_odstate);
+		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
+		nfs4_put_stid(&dp->dl_stid);
+	}
 }
 
 static void revoke_delegation(struct nfs4_delegation *dp)
@@ -990,6 +998,12 @@
 	}
 }
 
+static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
+{
+	kfree(sop->so_owner.data);
+	sop->so_ops->so_free(sop);
+}
+
 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
 {
 	struct nfs4_client *clp = sop->so_client;
@@ -1000,20 +1014,23 @@
 		return;
 	sop->so_ops->so_unhash(sop);
 	spin_unlock(&clp->cl_lock);
-	kfree(sop->so_owner.data);
-	sop->so_ops->so_free(sop);
+	nfs4_free_stateowner(sop);
 }
 
-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
+static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
 {
 	struct nfs4_file *fp = stp->st_stid.sc_file;
 
 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
 
+	if (list_empty(&stp->st_perfile))
+		return false;
+
 	spin_lock(&fp->fi_lock);
-	list_del(&stp->st_perfile);
+	list_del_init(&stp->st_perfile);
 	spin_unlock(&fp->fi_lock);
 	list_del(&stp->st_perstateowner);
+	return true;
 }
 
 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
@@ -1063,25 +1080,27 @@
 	list_add(&stp->st_locks, reaplist);
 }
 
-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
 {
 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
 
 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
 
 	list_del_init(&stp->st_locks);
-	unhash_ol_stateid(stp);
 	nfs4_unhash_stid(&stp->st_stid);
+	return unhash_ol_stateid(stp);
 }
 
 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
 {
 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+	bool unhashed;
 
 	spin_lock(&oo->oo_owner.so_client->cl_lock);
-	unhash_lock_stateid(stp);
+	unhashed = unhash_lock_stateid(stp);
 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
-	nfs4_put_stid(&stp->st_stid);
+	if (unhashed)
+		nfs4_put_stid(&stp->st_stid);
 }
 
 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
@@ -1129,7 +1148,7 @@
 	while (!list_empty(&lo->lo_owner.so_stateids)) {
 		stp = list_first_entry(&lo->lo_owner.so_stateids,
 				struct nfs4_ol_stateid, st_perstateowner);
-		unhash_lock_stateid(stp);
+		WARN_ON(!unhash_lock_stateid(stp));
 		put_ol_stateid_locked(stp, &reaplist);
 	}
 	spin_unlock(&clp->cl_lock);
@@ -1142,21 +1161,26 @@
 {
 	struct nfs4_ol_stateid *stp;
 
+	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
+
 	while (!list_empty(&open_stp->st_locks)) {
 		stp = list_entry(open_stp->st_locks.next,
 				struct nfs4_ol_stateid, st_locks);
-		unhash_lock_stateid(stp);
+		WARN_ON(!unhash_lock_stateid(stp));
 		put_ol_stateid_locked(stp, reaplist);
 	}
 }
 
-static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
+static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
 				struct list_head *reaplist)
 {
+	bool unhashed;
+
 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
 
-	unhash_ol_stateid(stp);
+	unhashed = unhash_ol_stateid(stp);
 	release_open_stateid_locks(stp, reaplist);
+	return unhashed;
 }
 
 static void release_open_stateid(struct nfs4_ol_stateid *stp)
@@ -1164,8 +1188,8 @@
 	LIST_HEAD(reaplist);
 
 	spin_lock(&stp->st_stid.sc_client->cl_lock);
-	unhash_open_stateid(stp, &reaplist);
-	put_ol_stateid_locked(stp, &reaplist);
+	if (unhash_open_stateid(stp, &reaplist))
+		put_ol_stateid_locked(stp, &reaplist);
 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
 	free_ol_stateid_reaplist(&reaplist);
 }
@@ -1210,8 +1234,8 @@
 	while (!list_empty(&oo->oo_owner.so_stateids)) {
 		stp = list_first_entry(&oo->oo_owner.so_stateids,
 				struct nfs4_ol_stateid, st_perstateowner);
-		unhash_open_stateid(stp, &reaplist);
-		put_ol_stateid_locked(stp, &reaplist);
+		if (unhash_open_stateid(stp, &reaplist))
+			put_ol_stateid_locked(stp, &reaplist);
 	}
 	spin_unlock(&clp->cl_lock);
 	free_ol_stateid_reaplist(&reaplist);
@@ -1714,7 +1738,7 @@
 	spin_lock(&state_lock);
 	while (!list_empty(&clp->cl_delegations)) {
 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
-		unhash_delegation_locked(dp);
+		WARN_ON(!unhash_delegation_locked(dp));
 		list_add(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&state_lock);
@@ -1894,7 +1918,7 @@
 	 * __force to keep sparse happy
 	 */
 	verf[0] = (__force __be32)get_seconds();
-	verf[1] = (__force __be32)nn->clientid_counter;
+	verf[1] = (__force __be32)nn->clverifier_counter++;
 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
 }
 
@@ -2241,6 +2265,9 @@
 	 * Also note we should probably be using this in 4.0 case too.
 	 */
 	return !list_empty(&clp->cl_openowners)
+#ifdef CONFIG_NFSD_PNFS
+		|| !list_empty(&clp->cl_lo_states)
+#endif
 		|| !list_empty(&clp->cl_delegations)
 		|| !list_empty(&clp->cl_sessions);
 }
@@ -2547,11 +2574,9 @@
 			goto out_free_conn;
 		cs_slot = &conf->cl_cs_slot;
 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
-		if (status == nfserr_replay_cache) {
-			status = nfsd4_replay_create_session(cr_ses, cs_slot);
-			goto out_free_conn;
-		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
-			status = nfserr_seq_misordered;
+		if (status) {
+			if (status == nfserr_replay_cache)
+				status = nfsd4_replay_create_session(cr_ses, cs_slot);
 			goto out_free_conn;
 		}
 	} else if (unconf) {
@@ -3041,10 +3066,11 @@
 	unconf = find_unconfirmed_client_by_name(&clname, nn);
 	if (unconf)
 		unhash_client_locked(unconf);
-	if (conf && same_verf(&conf->cl_verifier, &clverifier))
+	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
 		/* case 1: probable callback update */
 		copy_clid(new, conf);
-	else /* case 4 (new client) or cases 2, 3 (client reboot): */
+		gen_confirm(new, nn);
+	} else /* case 4 (new client) or cases 2, 3 (client reboot): */
 		gen_clid(new, nn);
 	new->cl_minorversion = 0;
 	gen_callback(new, setclid, rqstp);
@@ -3085,10 +3111,11 @@
 	/*
 	 * We try hard to give out unique clientid's, so if we get an
 	 * attempt to confirm the same clientid with a different cred,
-	 * there's a bug somewhere.  Let's charitably assume it's our
-	 * bug.
+	 * the client may be buggy; this should never happen.
+	 *
+	 * Nevertheless, RFC 7530 recommends INUSE for this case:
 	 */
-	status = nfserr_serverfault;
+	status = nfserr_clid_inuse;
 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
 		goto out;
 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
@@ -3315,7 +3342,8 @@
 		hash_openowner(oo, clp, strhashval);
 		ret = oo;
 	} else
-		nfs4_free_openowner(&oo->oo_owner);
+		nfs4_free_stateowner(&oo->oo_owner);
+
 	spin_unlock(&clp->cl_lock);
 	return ret;
 }
@@ -3482,6 +3510,9 @@
 {
 	struct nfs4_delegation *dp = cb_to_delegation(cb);
 
+	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
+	        return 1;
+
 	switch (task->tk_status) {
 	case 0:
 		return 1;
@@ -3885,12 +3916,6 @@
 	return status;
 }
 
-static void
-nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
-{
-	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
-}
-
 /* Should we give out recallable state?: */
 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
 {
@@ -3923,7 +3948,7 @@
 static int nfs4_setlease(struct nfs4_delegation *dp)
 {
 	struct nfs4_file *fp = dp->dl_stid.sc_file;
-	struct file_lock *fl, *ret;
+	struct file_lock *fl;
 	struct file *filp;
 	int status = 0;
 
@@ -3934,10 +3959,10 @@
 	if (!filp) {
 		/* We should always have a readable file here */
 		WARN_ON_ONCE(1);
+		locks_free_lock(fl);
 		return -EBADF;
 	}
 	fl->fl_file = filp;
-	ret = fl;
 	status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
 	if (fl)
 		locks_free_lock(fl);
@@ -4063,7 +4088,8 @@
 		case NFS4_OPEN_CLAIM_FH:
 			/*
 			 * Let's not give out any delegations till everyone's
-			 * had the chance to reclaim theirs....
+			 * had the chance to reclaim theirs, *and* until
+			 * NLM locks have all been reclaimed:
 			 */
 			if (locks_in_grace(clp->net))
 				goto out_no_deleg;
@@ -4209,7 +4235,7 @@
 	if (fp)
 		put_nfs4_file(fp);
 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
-		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
+		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
 	/*
 	* To finish the open response, we just need to set the rflags.
 	*/
@@ -4338,14 +4364,12 @@
 	spin_lock(&state_lock);
 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-		if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
-			continue;
 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
 			t = dp->dl_time - cutoff;
 			new_timeo = min(new_timeo, t);
 			break;
 		}
-		unhash_delegation_locked(dp);
+		WARN_ON(!unhash_delegation_locked(dp));
 		list_add(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&state_lock);
@@ -4440,7 +4464,7 @@
 {
 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
 		return nfs_ok;
-	else if (locks_in_grace(net)) {
+	else if (opens_in_grace(net)) {
 		/* Answer in remaining cases depends on existence of
 		 * conflicting state; so we must wait out the grace period. */
 		return nfserr_grace;
@@ -4459,7 +4483,7 @@
 static inline int
 grace_disallows_io(struct net *net, struct inode *inode)
 {
-	return locks_in_grace(net) && mandatory_lock(inode);
+	return opens_in_grace(net) && mandatory_lock(inode);
 }
 
 /* Returns true iff a is later than b: */
@@ -4751,7 +4775,7 @@
 		if (check_for_locks(stp->st_stid.sc_file,
 				    lockowner(stp->st_stateowner)))
 			break;
-		unhash_lock_stateid(stp);
+		WARN_ON(!unhash_lock_stateid(stp));
 		spin_unlock(&cl->cl_lock);
 		nfs4_put_stid(s);
 		ret = nfs_ok;
@@ -4967,20 +4991,23 @@
 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
 {
 	struct nfs4_client *clp = s->st_stid.sc_client;
+	bool unhashed;
 	LIST_HEAD(reaplist);
 
 	s->st_stid.sc_type = NFS4_CLOSED_STID;
 	spin_lock(&clp->cl_lock);
-	unhash_open_stateid(s, &reaplist);
+	unhashed = unhash_open_stateid(s, &reaplist);
 
 	if (clp->cl_minorversion) {
-		put_ol_stateid_locked(s, &reaplist);
+		if (unhashed)
+			put_ol_stateid_locked(s, &reaplist);
 		spin_unlock(&clp->cl_lock);
 		free_ol_stateid_reaplist(&reaplist);
 	} else {
 		spin_unlock(&clp->cl_lock);
 		free_ol_stateid_reaplist(&reaplist);
-		move_to_close_lru(s, clp->net);
+		if (unhashed)
+			move_to_close_lru(s, clp->net);
 	}
 }
 
@@ -5045,9 +5072,6 @@
 	return status;
 }
 
-
-#define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
-
 static inline u64
 end_offset(u64 start, u64 len)
 {
@@ -5139,8 +5163,7 @@
 }
 
 static struct nfs4_lockowner *
-find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
-		struct nfs4_client *clp)
+find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
 {
 	unsigned int strhashval = ownerstr_hashval(owner);
 	struct nfs4_stateowner *so;
@@ -5158,13 +5181,12 @@
 }
 
 static struct nfs4_lockowner *
-find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
-		struct nfs4_client *clp)
+find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
 {
 	struct nfs4_lockowner *lo;
 
 	spin_lock(&clp->cl_lock);
-	lo = find_lockowner_str_locked(clid, owner, clp);
+	lo = find_lockowner_str_locked(clp, owner);
 	spin_unlock(&clp->cl_lock);
 	return lo;
 }
@@ -5208,14 +5230,14 @@
 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
 	lo->lo_owner.so_ops = &lockowner_ops;
 	spin_lock(&clp->cl_lock);
-	ret = find_lockowner_str_locked(&clp->cl_clientid,
-			&lock->lk_new_owner, clp);
+	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
 	if (ret == NULL) {
 		list_add(&lo->lo_owner.so_strhash,
 			 &clp->cl_ownerstr_hashtbl[strhashval]);
 		ret = lo;
 	} else
-		nfs4_free_lockowner(&lo->lo_owner);
+		nfs4_free_stateowner(&lo->lo_owner);
+
 	spin_unlock(&clp->cl_lock);
 	return ret;
 }
@@ -5298,8 +5320,8 @@
 static int
 check_lock_length(u64 offset, u64 length)
 {
-	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
-	     LOFF_OVERFLOW(offset, length)));
+	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
+		(length > ~offset)));
 }
 
 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
@@ -5328,9 +5350,9 @@
 	struct nfs4_lockowner *lo;
 	unsigned int strhashval;
 
-	lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
+	lo = find_lockowner_str(cl, &lock->lk_new_owner);
 	if (!lo) {
-		strhashval = ownerstr_hashval(&lock->v.new.owner);
+		strhashval = ownerstr_hashval(&lock->lk_new_owner);
 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
 		if (lo == NULL)
 			return nfserr_jukebox;
@@ -5391,7 +5413,7 @@
 	if (lock->lk_is_new) {
 		if (nfsd4_has_session(cstate))
 			/* See rfc 5661 18.10.3: given clientid is ignored: */
-			memcpy(&lock->v.new.clientid,
+			memcpy(&lock->lk_new_clientid,
 				&cstate->session->se_client->cl_clientid,
 				sizeof(clientid_t));
 
@@ -5409,7 +5431,7 @@
 		open_sop = openowner(open_stp->st_stateowner);
 		status = nfserr_bad_stateid;
 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
-						&lock->v.new.clientid))
+						&lock->lk_new_clientid))
 			goto out;
 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
 							&lock_stp, &new);
@@ -5603,8 +5625,7 @@
 		goto out;
 	}
 
-	lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
-				cstate->clp);
+	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
 	if (lo)
 		file_lock->fl_owner = (fl_owner_t)lo;
 	file_lock->fl_pid = current->tgid;
@@ -6019,7 +6040,7 @@
 
 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
 				    struct list_head *collect,
-				    void (*func)(struct nfs4_ol_stateid *))
+				    bool (*func)(struct nfs4_ol_stateid *))
 {
 	struct nfs4_openowner *oop;
 	struct nfs4_ol_stateid *stp, *st_next;
@@ -6033,9 +6054,9 @@
 			list_for_each_entry_safe(lst, lst_next,
 					&stp->st_locks, st_locks) {
 				if (func) {
-					func(lst);
-					nfsd_inject_add_lock_to_list(lst,
-								collect);
+					if (func(lst))
+						nfsd_inject_add_lock_to_list(lst,
+									collect);
 				}
 				++count;
 				/*
@@ -6305,7 +6326,7 @@
 				continue;
 
 			atomic_inc(&clp->cl_refcount);
-			unhash_delegation_locked(dp);
+			WARN_ON(!unhash_delegation_locked(dp));
 			list_add(&dp->dl_recall_lru, victims);
 		}
 		++count;
@@ -6584,6 +6605,7 @@
 		return ret;
 	nn->boot_time = get_seconds();
 	nn->grace_ended = false;
+	nn->nfsd4_manager.block_opens = true;
 	locks_start_grace(net, &nn->nfsd4_manager);
 	nfsd4_client_tracking_init(net);
 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
@@ -6602,7 +6624,7 @@
 	ret = set_callback_cred();
 	if (ret)
 		return -ENOMEM;
-	laundry_wq = create_singlethread_workqueue("nfsd4");
+	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
 	if (laundry_wq == NULL) {
 		ret = -ENOMEM;
 		goto out_recovery;
@@ -6635,7 +6657,7 @@
 	spin_lock(&state_lock);
 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
-		unhash_delegation_locked(dp);
+		WARN_ON(!unhash_delegation_locked(dp));
 		list_add(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&state_lock);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 75e0563..51c9e9c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2140,6 +2140,27 @@
 		return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
 }
 
+static inline __be32
+nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
+{
+	__be32 *p;
+
+	if (layout_type) {
+		p = xdr_reserve_space(xdr, 8);
+		if (!p)
+			return nfserr_resource;
+		*p++ = cpu_to_be32(1);
+		*p++ = cpu_to_be32(layout_type);
+	} else {
+		p = xdr_reserve_space(xdr, 4);
+		if (!p)
+			return nfserr_resource;
+		*p++ = cpu_to_be32(0);
+	}
+
+	return 0;
+}
+
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
 			      FATTR4_WORD0_RDATTR_ERROR)
 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
@@ -2205,6 +2226,39 @@
 	return err;
 }
 
+static __be32
+nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
+{
+	__be32 *p;
+
+	if (bmval2) {
+		p = xdr_reserve_space(xdr, 16);
+		if (!p)
+			goto out_resource;
+		*p++ = cpu_to_be32(3);
+		*p++ = cpu_to_be32(bmval0);
+		*p++ = cpu_to_be32(bmval1);
+		*p++ = cpu_to_be32(bmval2);
+	} else if (bmval1) {
+		p = xdr_reserve_space(xdr, 12);
+		if (!p)
+			goto out_resource;
+		*p++ = cpu_to_be32(2);
+		*p++ = cpu_to_be32(bmval0);
+		*p++ = cpu_to_be32(bmval1);
+	} else {
+		p = xdr_reserve_space(xdr, 8);
+		if (!p)
+			goto out_resource;
+		*p++ = cpu_to_be32(1);
+		*p++ = cpu_to_be32(bmval0);
+	}
+
+	return 0;
+out_resource:
+	return nfserr_resource;
+}
+
 /*
  * Note: @fhp can be NULL; in this case, we might have to compose the filehandle
  * ourselves.
@@ -2301,28 +2355,9 @@
 	}
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
 
-	if (bmval2) {
-		p = xdr_reserve_space(xdr, 16);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(3);
-		*p++ = cpu_to_be32(bmval0);
-		*p++ = cpu_to_be32(bmval1);
-		*p++ = cpu_to_be32(bmval2);
-	} else if (bmval1) {
-		p = xdr_reserve_space(xdr, 12);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(2);
-		*p++ = cpu_to_be32(bmval0);
-		*p++ = cpu_to_be32(bmval1);
-	} else {
-		p = xdr_reserve_space(xdr, 8);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(1);
-		*p++ = cpu_to_be32(bmval0);
-	}
+	status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
+	if (status)
+		goto out;
 
 	attrlen_offset = xdr->buf->len;
 	p = xdr_reserve_space(xdr, 4);
@@ -2675,6 +2710,9 @@
 		*p++ = cpu_to_be32(stat.mtime.tv_nsec);
 	}
 	if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+		struct kstat parent_stat;
+		u64 ino = stat.ino;
+
 		p = xdr_reserve_space(xdr, 8);
 		if (!p)
                 	goto out_resource;
@@ -2683,25 +2721,25 @@
 		 * and this is the root of a cross-mounted filesystem.
 		 */
 		if (ignore_crossmnt == 0 &&
-		    dentry == exp->ex_path.mnt->mnt_root)
-			get_parent_attributes(exp, &stat);
-		p = xdr_encode_hyper(p, stat.ino);
+		    dentry == exp->ex_path.mnt->mnt_root) {
+			err = get_parent_attributes(exp, &parent_stat);
+			if (err)
+				goto out_nfserr;
+			ino = parent_stat.ino;
+		}
+		p = xdr_encode_hyper(p, ino);
 	}
 #ifdef CONFIG_NFSD_PNFS
-	if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
-	    (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
-		if (exp->ex_layout_type) {
-			p = xdr_reserve_space(xdr, 8);
-			if (!p)
-				goto out_resource;
-			*p++ = cpu_to_be32(1);
-			*p++ = cpu_to_be32(exp->ex_layout_type);
-		} else {
-			p = xdr_reserve_space(xdr, 4);
-			if (!p)
-				goto out_resource;
-			*p++ = cpu_to_be32(0);
-		}
+	if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
+		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+		if (status)
+			goto out;
+	}
+
+	if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
+		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+		if (status)
+			goto out;
 	}
 
 	if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
@@ -2711,21 +2749,20 @@
 		*p++ = cpu_to_be32(stat.blksize);
 	}
 #endif /* CONFIG_NFSD_PNFS */
+	if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
+		status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
+						  NFSD_SUPPATTR_EXCLCREAT_WORD1,
+						  NFSD_SUPPATTR_EXCLCREAT_WORD2);
+		if (status)
+			goto out;
+	}
+
 	if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
 		status = nfsd4_encode_security_label(xdr, rqstp, context,
 								contextlen);
 		if (status)
 			goto out;
 	}
-	if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
-		p = xdr_reserve_space(xdr, 16);
-		if (!p)
-			goto out_resource;
-		*p++ = cpu_to_be32(3);
-		*p++ = cpu_to_be32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
-		*p++ = cpu_to_be32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
-		*p++ = cpu_to_be32(NFSD_SUPPATTR_EXCLCREAT_WORD2);
-	}
 
 	attrlen = htonl(xdr->buf->len - attrlen_offset - 4);
 	write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4);
@@ -3044,13 +3081,12 @@
 	__be32 *p;
 
 	if (!nfserr) {
-		p = xdr_reserve_space(xdr, 32);
+		p = xdr_reserve_space(xdr, 20);
 		if (!p)
 			return nfserr_resource;
-		p = encode_cinfo(p, &create->cr_cinfo);
-		*p++ = cpu_to_be32(2);
-		*p++ = cpu_to_be32(create->cr_bmval[0]);
-		*p++ = cpu_to_be32(create->cr_bmval[1]);
+		encode_cinfo(p, &create->cr_cinfo);
+		nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
+				create->cr_bmval[1], create->cr_bmval[2]);
 	}
 	return nfserr;
 }
@@ -3190,16 +3226,22 @@
 	nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
 	if (nfserr)
 		goto out;
-	p = xdr_reserve_space(xdr, 40);
+	p = xdr_reserve_space(xdr, 24);
 	if (!p)
 		return nfserr_resource;
 	p = encode_cinfo(p, &open->op_cinfo);
 	*p++ = cpu_to_be32(open->op_rflags);
-	*p++ = cpu_to_be32(2);
-	*p++ = cpu_to_be32(open->op_bmval[0]);
-	*p++ = cpu_to_be32(open->op_bmval[1]);
-	*p++ = cpu_to_be32(open->op_delegate_type);
 
+	nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
+					open->op_bmval[2]);
+	if (nfserr)
+		goto out;
+
+	p = xdr_reserve_space(xdr, 4);
+	if (!p)
+		return nfserr_resource;
+
+	*p++ = cpu_to_be32(open->op_delegate_type);
 	switch (open->op_delegate_type) {
 	case NFS4_OPEN_DELEGATE_NONE:
 		break;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 9277cc9..ad4e237 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -391,6 +391,14 @@
 	return ret;
 }
 
+static struct svc_serv_ops nfsd_thread_sv_ops = {
+	.svo_shutdown		= nfsd_last_thread,
+	.svo_function		= nfsd,
+	.svo_enqueue_xprt	= svc_xprt_do_enqueue,
+	.svo_setup		= svc_set_num_threads,
+	.svo_module		= THIS_MODULE,
+};
+
 int nfsd_create_serv(struct net *net)
 {
 	int error;
@@ -405,7 +413,7 @@
 		nfsd_max_blksize = nfsd_get_default_max_blksize();
 	nfsd_reset_versions();
 	nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
-				      nfsd_last_thread, nfsd, THIS_MODULE);
+						&nfsd_thread_sv_ops);
 	if (nn->nfsd_serv == NULL)
 		return -ENOMEM;
 
@@ -500,8 +508,8 @@
 	/* apply the new numbers */
 	svc_get(nn->nfsd_serv);
 	for (i = 0; i < n; i++) {
-		err = svc_set_num_threads(nn->nfsd_serv, &nn->nfsd_serv->sv_pools[i],
-				    	  nthreads[i]);
+		err = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
+				&nn->nfsd_serv->sv_pools[i], nthreads[i]);
 		if (err)
 			break;
 	}
@@ -540,7 +548,8 @@
 	error = nfsd_startup_net(nrservs, net);
 	if (error)
 		goto out_destroy;
-	error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
+	error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
+			NULL, nrservs);
 	if (error)
 		goto out_shutdown;
 	/* We are holding a reference to nn->nfsd_serv which
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 4874ce5..583ffc1 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -67,8 +67,8 @@
 	struct rpc_message cb_msg;
 	struct nfsd4_callback_ops *cb_ops;
 	struct work_struct cb_work;
+	int cb_seq_status;
 	int cb_status;
-	bool cb_update_seq_nr;
 	bool cb_need_restart;
 };
 
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index b5e077a..45c0497 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1249,12 +1249,6 @@
 
 #ifdef CONFIG_NFSD_V3
 
-static inline int nfsd_create_is_exclusive(int createmode)
-{
-	return createmode == NFS3_CREATE_EXCLUSIVE
-	       || createmode == NFS4_CREATE_EXCLUSIVE4_1;
-}
-
 /*
  * NFSv3 and NFSv4 version of nfsd_create
  */
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 5be875e..fee2451 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -131,4 +131,10 @@
 	return nfserrno(vfs_getattr(&p, stat));
 }
 
+static inline int nfsd_create_is_exclusive(int createmode)
+{
+	return createmode == NFS3_CREATE_EXCLUSIVE
+	       || createmode == NFS4_CREATE_EXCLUSIVE4_1;
+}
+
 #endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 44523f4..6faaf71 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -154,6 +154,7 @@
 	struct dnotify_struct *dn;
 	struct dnotify_struct **prev;
 	struct inode *inode;
+	bool free = false;
 
 	inode = file_inode(filp);
 	if (!S_ISDIR(inode->i_mode))
@@ -182,11 +183,15 @@
 
 	/* nothing else could have found us thanks to the dnotify_groups
 	   mark_mutex */
-	if (dn_mark->dn == NULL)
-		fsnotify_destroy_mark_locked(fsn_mark, dnotify_group);
+	if (dn_mark->dn == NULL) {
+		fsnotify_detach_mark(fsn_mark);
+		free = true;
+	}
 
 	mutex_unlock(&dnotify_group->mark_mutex);
 
+	if (free)
+		fsnotify_free_mark(fsn_mark);
 	fsnotify_put_mark(fsn_mark);
 }
 
@@ -362,9 +367,10 @@
 	spin_unlock(&fsn_mark->lock);
 
 	if (destroy)
-		fsnotify_destroy_mark_locked(fsn_mark, dnotify_group);
-
+		fsnotify_detach_mark(fsn_mark);
 	mutex_unlock(&dnotify_group->mark_mutex);
+	if (destroy)
+		fsnotify_free_mark(fsn_mark);
 	fsnotify_put_mark(fsn_mark);
 out_err:
 	if (new_fsn_mark)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index cf27550..8e8e6bc 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -529,8 +529,10 @@
 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
 						 &destroy_mark);
 	if (destroy_mark)
-		fsnotify_destroy_mark_locked(fsn_mark, group);
+		fsnotify_detach_mark(fsn_mark);
 	mutex_unlock(&group->mark_mutex);
+	if (destroy_mark)
+		fsnotify_free_mark(fsn_mark);
 
 	fsnotify_put_mark(fsn_mark);
 	if (removed & real_mount(mnt)->mnt_fsnotify_mask)
@@ -557,8 +559,10 @@
 	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
 						 &destroy_mark);
 	if (destroy_mark)
-		fsnotify_destroy_mark_locked(fsn_mark, group);
+		fsnotify_detach_mark(fsn_mark);
 	mutex_unlock(&group->mark_mutex);
+	if (destroy_mark)
+		fsnotify_free_mark(fsn_mark);
 
 	/* matches the fsnotify_find_inode_mark() */
 	fsnotify_put_mark(fsn_mark);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 58b7cdb..6b6f0d47 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -76,7 +76,8 @@
 	struct inotify_inode_mark *inode_mark;
 	struct inode *inode;
 
-	if (!(mark->flags & (FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_INODE)))
+	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE) ||
+	    !(mark->flags & FSNOTIFY_MARK_FLAG_INODE))
 		return;
 
 	inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index dd3fb0b..db39de2 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -26,7 +26,6 @@
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
-#include "../mount.h"
 
 /*
  * Clear all of the marks on an inode when it is being evicted from core
@@ -205,6 +204,16 @@
 		mnt = NULL;
 
 	/*
+	 * Optimization: srcu_read_lock() has a memory barrier which can
+	 * be expensive.  It protects walking the *_fsnotify_marks lists.
+	 * However, if we do not walk the lists, we do not have to do
+	 * SRCU because we have no references to any objects and do not
+	 * need SRCU to keep them "alive".
+	 */
+	if (hlist_empty(&to_tell->i_fsnotify_marks) &&
+	    (!mnt || hlist_empty(&mnt->mnt_fsnotify_marks)))
+		return 0;
+	/*
 	 * if this is a modify event we may need to clear the ignored masks
 	 * otherwise return if neither the inode nor the vfsmount care about
 	 * this type of event.
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 13a00be..b44c68a 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -6,6 +6,8 @@
 #include <linux/srcu.h>
 #include <linux/types.h>
 
+#include "../mount.h"
+
 /* destroy all events sitting in this groups notification queue */
 extern void fsnotify_flush_notify(struct fsnotify_group *group);
 
@@ -38,15 +40,22 @@
 extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
 /* inode specific destruction of a mark */
 extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
-/* Destroy all marks in the given list */
-extern void fsnotify_destroy_marks(struct list_head *to_free);
 /* Find mark belonging to given group in the list of marks */
 extern struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
 						struct fsnotify_group *group);
-/* run the list of all marks associated with inode and flag them to be freed */
-extern void fsnotify_clear_marks_by_inode(struct inode *inode);
-/* run the list of all marks associated with vfsmount and flag them to be freed */
-extern void fsnotify_clear_marks_by_mount(struct vfsmount *mnt);
+/* Destroy all marks in the given list protected by 'lock' */
+extern void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock);
+/* run the list of all marks associated with inode and destroy them */
+static inline void fsnotify_clear_marks_by_inode(struct inode *inode)
+{
+	fsnotify_destroy_marks(&inode->i_fsnotify_marks, &inode->i_lock);
+}
+/* run the list of all marks associated with vfsmount and destroy them */
+static inline void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
+{
+	fsnotify_destroy_marks(&real_mount(mnt)->mnt_fsnotify_marks,
+			       &mnt->mnt_root->d_lock);
+}
 /*
  * update the dentry->d_flags of all of inode's children to indicate if inode cares
  * about events that happen to its children.
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 3daf513..474a3ce 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -65,26 +65,6 @@
 }
 
 /*
- * Given an inode, destroy all of the marks associated with that inode.
- */
-void fsnotify_clear_marks_by_inode(struct inode *inode)
-{
-	struct fsnotify_mark *mark;
-	struct hlist_node *n;
-	LIST_HEAD(free_list);
-
-	spin_lock(&inode->i_lock);
-	hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, obj_list) {
-		list_add(&mark->free_list, &free_list);
-		hlist_del_init_rcu(&mark->obj_list);
-		fsnotify_get_mark(mark);
-	}
-	spin_unlock(&inode->i_lock);
-
-	fsnotify_destroy_marks(&free_list);
-}
-
-/*
  * Given a group clear all of the inode marks associated with that group.
  */
 void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 39ddcaf..fc0df44 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -122,26 +122,27 @@
 }
 
 /*
- * Any time a mark is getting freed we end up here.
- * The caller had better be holding a reference to this mark so we don't actually
- * do the final put under the mark->lock
+ * Remove mark from inode / vfsmount list, group list, drop inode reference
+ * if we got one.
+ *
+ * Must be called with group->mark_mutex held.
  */
-void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
-				  struct fsnotify_group *group)
+void fsnotify_detach_mark(struct fsnotify_mark *mark)
 {
 	struct inode *inode = NULL;
+	struct fsnotify_group *group = mark->group;
 
 	BUG_ON(!mutex_is_locked(&group->mark_mutex));
 
 	spin_lock(&mark->lock);
 
 	/* something else already called this function on this mark */
-	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
+	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
 		spin_unlock(&mark->lock);
 		return;
 	}
 
-	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+	mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
 
 	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
 		inode = mark->inode;
@@ -150,6 +151,12 @@
 		fsnotify_destroy_vfsmount_mark(mark);
 	else
 		BUG();
+	/*
+	 * Note that we didn't update flags telling whether inode cares about
+	 * what's happening with children. We update these flags from
+	 * __fsnotify_parent() lazily when next event happens on one of our
+	 * children.
+	 */
 
 	list_del_init(&mark->g_list);
 
@@ -157,18 +164,32 @@
 
 	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
 		iput(inode);
-	/* release lock temporarily */
-	mutex_unlock(&group->mark_mutex);
+
+	atomic_dec(&group->num_marks);
+}
+
+/*
+ * Free fsnotify mark. The freeing is actually happening from a kthread which
+ * first waits for srcu period end. Caller must have a reference to the mark
+ * or be protected by fsnotify_mark_srcu.
+ */
+void fsnotify_free_mark(struct fsnotify_mark *mark)
+{
+	struct fsnotify_group *group = mark->group;
+
+	spin_lock(&mark->lock);
+	/* something else already called this function on this mark */
+	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
+		spin_unlock(&mark->lock);
+		return;
+	}
+	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+	spin_unlock(&mark->lock);
 
 	spin_lock(&destroy_lock);
 	list_add(&mark->g_list, &destroy_list);
 	spin_unlock(&destroy_lock);
 	wake_up(&destroy_waitq);
-	/*
-	 * We don't necessarily have a ref on mark from caller so the above destroy
-	 * may have actually freed it, unless this group provides a 'freeing_mark'
-	 * function which must be holding a reference.
-	 */
 
 	/*
 	 * Some groups like to know that marks are being freed.  This is a
@@ -177,50 +198,45 @@
 	 */
 	if (group->ops->freeing_mark)
 		group->ops->freeing_mark(mark, group);
-
-	/*
-	 * __fsnotify_update_child_dentry_flags(inode);
-	 *
-	 * I really want to call that, but we can't, we have no idea if the inode
-	 * still exists the second we drop the mark->lock.
-	 *
-	 * The next time an event arrive to this inode from one of it's children
-	 * __fsnotify_parent will see that the inode doesn't care about it's
-	 * children and will update all of these flags then.  So really this
-	 * is just a lazy update (and could be a perf win...)
-	 */
-
-	atomic_dec(&group->num_marks);
-
-	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
 }
 
 void fsnotify_destroy_mark(struct fsnotify_mark *mark,
 			   struct fsnotify_group *group)
 {
 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
-	fsnotify_destroy_mark_locked(mark, group);
+	fsnotify_detach_mark(mark);
 	mutex_unlock(&group->mark_mutex);
+	fsnotify_free_mark(mark);
 }
 
-/*
- * Destroy all marks in the given list. The marks must be already detached from
- * the original inode / vfsmount.
- */
-void fsnotify_destroy_marks(struct list_head *to_free)
+void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock)
 {
-	struct fsnotify_mark *mark, *lmark;
-	struct fsnotify_group *group;
+	struct fsnotify_mark *mark;
 
-	list_for_each_entry_safe(mark, lmark, to_free, free_list) {
-		spin_lock(&mark->lock);
-		fsnotify_get_group(mark->group);
-		group = mark->group;
-		spin_unlock(&mark->lock);
-
-		fsnotify_destroy_mark(mark, group);
+	while (1) {
+		/*
+		 * We have to be careful since we can race with e.g.
+		 * fsnotify_clear_marks_by_group() and once we drop 'lock',
+		 * mark can get removed from the obj_list and destroyed. But
+		 * we are holding mark reference so mark cannot be freed and
+		 * calling fsnotify_destroy_mark() more than once is fine.
+		 */
+		spin_lock(lock);
+		if (hlist_empty(head)) {
+			spin_unlock(lock);
+			break;
+		}
+		mark = hlist_entry(head->first, struct fsnotify_mark, obj_list);
+		/*
+		 * We don't update i_fsnotify_mask / mnt_fsnotify_mask here
+		 * since inode / mount is going away anyway. So just remove
+		 * mark from the list.
+		 */
+		hlist_del_init_rcu(&mark->obj_list);
+		fsnotify_get_mark(mark);
+		spin_unlock(lock);
+		fsnotify_destroy_mark(mark, mark->group);
 		fsnotify_put_mark(mark);
-		fsnotify_put_group(group);
 	}
 }
 
@@ -332,7 +348,7 @@
 	 * inode->i_lock
 	 */
 	spin_lock(&mark->lock);
-	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
+	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
 
 	fsnotify_get_group(group);
 	mark->group = group;
@@ -438,8 +454,9 @@
 		}
 		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
 		fsnotify_get_mark(mark);
-		fsnotify_destroy_mark_locked(mark, group);
+		fsnotify_detach_mark(mark);
 		mutex_unlock(&group->mark_mutex);
+		fsnotify_free_mark(mark);
 		fsnotify_put_mark(mark);
 	}
 }
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index 326b148..a8fcab6 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -28,25 +28,6 @@
 
 #include <linux/fsnotify_backend.h>
 #include "fsnotify.h"
-#include "../mount.h"
-
-void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
-{
-	struct fsnotify_mark *mark;
-	struct hlist_node *n;
-	struct mount *m = real_mount(mnt);
-	LIST_HEAD(free_list);
-
-	spin_lock(&mnt->mnt_root->d_lock);
-	hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, obj_list) {
-		list_add(&mark->free_list, &free_list);
-		hlist_del_init_rcu(&mark->obj_list);
-		fsnotify_get_mark(mark);
-	}
-	spin_unlock(&mnt->mnt_root->d_lock);
-
-	fsnotify_destroy_marks(&free_list);
-}
 
 void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
 {
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index c1128bc..d1a8535 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2204,17 +2204,12 @@
 	return true;
 #ifdef NTFS_RW
 iput_usnjrnl_err_out:
-	if (vol->usnjrnl_j_ino)
-		iput(vol->usnjrnl_j_ino);
-	if (vol->usnjrnl_max_ino)
-		iput(vol->usnjrnl_max_ino);
-	if (vol->usnjrnl_ino)
-		iput(vol->usnjrnl_ino);
+	iput(vol->usnjrnl_j_ino);
+	iput(vol->usnjrnl_max_ino);
+	iput(vol->usnjrnl_ino);
 iput_quota_err_out:
-	if (vol->quota_q_ino)
-		iput(vol->quota_q_ino);
-	if (vol->quota_ino)
-		iput(vol->quota_ino);
+	iput(vol->quota_q_ino);
+	iput(vol->quota_ino);
 	iput(vol->extend_ino);
 #endif /* NTFS_RW */
 iput_sec_err_out:
@@ -2223,8 +2218,7 @@
 	iput(vol->root_ino);
 iput_logfile_err_out:
 #ifdef NTFS_RW
-	if (vol->logfile_ino)
-		iput(vol->logfile_ino);
+	iput(vol->logfile_ino);
 iput_vol_err_out:
 #endif /* NTFS_RW */
 	iput(vol->vol_ino);
@@ -2254,8 +2248,7 @@
 	iput(vol->mftbmp_ino);
 iput_mirr_err_out:
 #ifdef NTFS_RW
-	if (vol->mftmirr_ino)
-		iput(vol->mftmirr_ino);
+	iput(vol->mftmirr_ino);
 #endif /* NTFS_RW */
 	return false;
 }
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index c58a1bc..0cdf497 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -284,7 +284,19 @@
 
 int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
-	return ocfs2_set_acl(NULL, inode, NULL, type, acl, NULL, NULL);
+	struct buffer_head *bh = NULL;
+	int status = 0;
+
+	status = ocfs2_inode_lock(inode, &bh, 1);
+	if (status < 0) {
+		if (status != -ENOENT)
+			mlog_errno(status);
+		return status;
+	}
+	status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL);
+	ocfs2_inode_unlock(inode, 1);
+	brelse(bh);
+	return status;
 }
 
 struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
@@ -292,19 +304,21 @@
 	struct ocfs2_super *osb;
 	struct buffer_head *di_bh = NULL;
 	struct posix_acl *acl;
-	int ret = -EAGAIN;
+	int ret;
 
 	osb = OCFS2_SB(inode->i_sb);
 	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
 		return NULL;
-
-	ret = ocfs2_read_inode_block(inode, &di_bh);
-	if (ret < 0)
+	ret = ocfs2_inode_lock(inode, &di_bh, 0);
+	if (ret < 0) {
+		if (ret != -ENOENT)
+			mlog_errno(ret);
 		return ERR_PTR(ret);
+	}
 
 	acl = ocfs2_get_acl_nolock(inode, type, di_bh);
 
+	ocfs2_inode_unlock(inode, 0);
 	brelse(di_bh);
-
 	return acl;
 }
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 5997c00..86181d6 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -908,32 +908,30 @@
 	 */
 
 	if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
-		ocfs2_error(sb,
-			    "Extent block #%llu has bad signature %.*s",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    eb->h_signature);
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Extent block #%llu has bad signature %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 eb->h_signature);
+		goto bail;
 	}
 
 	if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb,
-			    "Extent block #%llu has an invalid h_blkno "
-			    "of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(eb->h_blkno));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Extent block #%llu has an invalid h_blkno of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(eb->h_blkno));
+		goto bail;
 	}
 
 	if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Extent block #%llu has an invalid "
-			    "h_fs_generation of #%u",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(eb->h_fs_generation));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Extent block #%llu has an invalid h_fs_generation of #%u\n",
+				 (unsigned long long)bh->b_blocknr,
+				 le32_to_cpu(eb->h_fs_generation));
+		goto bail;
 	}
-
-	return 0;
+bail:
+	return rc;
 }
 
 int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
@@ -1446,8 +1444,7 @@
 	while(le16_to_cpu(el->l_tree_depth) > 1) {
 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has empty "
-				    "extent list (next_free_rec == 0)",
+				    "Owner %llu has empty extent list (next_free_rec == 0)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
 			status = -EIO;
 			goto bail;
@@ -1456,9 +1453,7 @@
 		blkno = le64_to_cpu(el->l_recs[i].e_blkno);
 		if (!blkno) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has extent "
-				    "list where extent # %d has no physical "
-				    "block start",
+				    "Owner %llu has extent list where extent # %d has no physical block start\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
 			status = -EIO;
 			goto bail;
@@ -1788,8 +1783,7 @@
 	while (el->l_tree_depth) {
 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
-				    "Owner %llu has empty extent list at "
-				    "depth %u\n",
+				    "Owner %llu has empty extent list at depth %u\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 				    le16_to_cpu(el->l_tree_depth));
 			ret = -EROFS;
@@ -1814,8 +1808,7 @@
 		blkno = le64_to_cpu(el->l_recs[i].e_blkno);
 		if (blkno == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
-				    "Owner %llu has bad blkno in extent list "
-				    "at depth %u (index %d)\n",
+				    "Owner %llu has bad blkno in extent list at depth %u (index %d)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 				    le16_to_cpu(el->l_tree_depth), i);
 			ret = -EROFS;
@@ -1836,8 +1829,7 @@
 		if (le16_to_cpu(el->l_next_free_rec) >
 		    le16_to_cpu(el->l_count)) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(ci),
-				    "Owner %llu has bad count in extent list "
-				    "at block %llu (next free=%u, count=%u)\n",
+				    "Owner %llu has bad count in extent list at block %llu (next free=%u, count=%u)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(ci),
 				    (unsigned long long)bh->b_blocknr,
 				    le16_to_cpu(el->l_next_free_rec),
@@ -2116,8 +2108,7 @@
 
 	if (left_el->l_next_free_rec != left_el->l_count) {
 		ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-			    "Inode %llu has non-full interior leaf node %llu"
-			    "(next free = %u)",
+			    "Inode %llu has non-full interior leaf node %llu (next free = %u)\n",
 			    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 			    (unsigned long long)left_leaf_bh->b_blocknr,
 			    le16_to_cpu(left_el->l_next_free_rec));
@@ -2256,8 +2247,7 @@
 		 * If we got here, we never found a valid node where
 		 * the tree indicated one should be.
 		 */
-		ocfs2_error(sb,
-			    "Invalid extent tree at extent block %llu\n",
+		ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
 			    (unsigned long long)blkno);
 		ret = -EROFS;
 		goto out;
@@ -2872,8 +2862,7 @@
 		 * If we got here, we never found a valid node where
 		 * the tree indicated one should be.
 		 */
-		ocfs2_error(sb,
-			    "Invalid extent tree at extent block %llu\n",
+		ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
 			    (unsigned long long)blkno);
 		ret = -EROFS;
 		goto out;
@@ -3131,6 +3120,30 @@
 	return ret;
 }
 
+static int ocfs2_remove_rightmost_empty_extent(struct ocfs2_super *osb,
+				struct ocfs2_extent_tree *et,
+				struct ocfs2_path *path,
+				struct ocfs2_cached_dealloc_ctxt *dealloc)
+{
+	handle_t *handle;
+	int ret;
+	int credits = path->p_tree_depth * 2 + 1;
+
+	handle = ocfs2_start_trans(osb, credits);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		mlog_errno(ret);
+		return ret;
+	}
+
+	ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc);
+	if (ret)
+		mlog_errno(ret);
+
+	ocfs2_commit_trans(osb, handle);
+	return ret;
+}
+
 /*
  * Left rotation of btree records.
  *
@@ -3200,7 +3213,7 @@
 		if (le16_to_cpu(el->l_next_free_rec) == 0) {
 			ret = -EIO;
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has empty extent block at %llu",
+				    "Owner %llu has empty extent block at %llu\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 				    (unsigned long long)le64_to_cpu(eb->h_blkno));
 			goto out;
@@ -3930,7 +3943,7 @@
 		next_free = le16_to_cpu(el->l_next_free_rec);
 		if (next_free == 0) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu has a bad extent list",
+				    "Owner %llu has a bad extent list\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
 			ret = -EIO;
 			return;
@@ -4355,10 +4368,7 @@
 				bh = path_leaf_bh(left_path);
 				eb = (struct ocfs2_extent_block *)bh->b_data;
 				ocfs2_error(sb,
-					    "Extent block #%llu has an "
-					    "invalid l_next_free_rec of "
-					    "%d.  It should have "
-					    "matched the l_count of %d",
+					    "Extent block #%llu has an invalid l_next_free_rec of %d.  It should have matched the l_count of %d\n",
 					    (unsigned long long)le64_to_cpu(eb->h_blkno),
 					    le16_to_cpu(new_el->l_next_free_rec),
 					    le16_to_cpu(new_el->l_count));
@@ -4413,8 +4423,7 @@
 				bh = path_leaf_bh(right_path);
 				eb = (struct ocfs2_extent_block *)bh->b_data;
 				ocfs2_error(sb,
-					    "Extent block #%llu has an "
-					    "invalid l_next_free_rec of %d",
+					    "Extent block #%llu has an invalid l_next_free_rec of %d\n",
 					    (unsigned long long)le64_to_cpu(eb->h_blkno),
 					    le16_to_cpu(new_el->l_next_free_rec));
 				status = -EINVAL;
@@ -4970,10 +4979,9 @@
 		split_index = ocfs2_search_extent_list(el, cpos);
 		if (split_index == -1) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-					"Owner %llu has an extent at cpos %u "
-					"which can no longer be found.\n",
-					(unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
-					cpos);
+				    "Owner %llu has an extent at cpos %u which can no longer be found\n",
+				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+				    cpos);
 			ret = -EROFS;
 			goto out;
 		}
@@ -5158,10 +5166,9 @@
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
 		ocfs2_error(sb,
-			    "Owner %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
-			     (unsigned long long)
-			     ocfs2_metadata_cache_owner(et->et_ci), cpos);
+			    "Owner %llu has an extent at cpos %u which can no longer be found\n",
+			    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
+			    cpos);
 		ret = -EROFS;
 		goto out;
 	}
@@ -5228,9 +5235,7 @@
 		cpos, len, phys);
 
 	if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents "
-			    "that are being written to, but the feature bit "
-			    "is not set in the super block.",
+		ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents that are being written to, but the feature bit is not set in the super block\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno);
 		ret = -EROFS;
 		goto out;
@@ -5514,8 +5519,7 @@
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
 		ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-			    "Owner %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
+			    "Owner %llu has an extent at cpos %u which can no longer be found\n",
 			    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 			    cpos);
 		ret = -EROFS;
@@ -5580,7 +5584,7 @@
 		index = ocfs2_search_extent_list(el, cpos);
 		if (index == -1) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu: split at cpos %u lost record.",
+				    "Owner %llu: split at cpos %u lost record\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 				    cpos);
 			ret = -EROFS;
@@ -5596,8 +5600,7 @@
 			ocfs2_rec_clusters(el, rec);
 		if (rec_range != trunc_range) {
 			ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
-				    "Owner %llu: error after split at cpos %u"
-				    "trunc len %u, existing record is (%u,%u)",
+				    "Owner %llu: error after split at cpos %u trunc len %u, existing record is (%u,%u)\n",
 				    (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
 				    cpos, len, le32_to_cpu(rec->e_cpos),
 				    ocfs2_rec_clusters(el, rec));
@@ -6175,7 +6178,7 @@
 		iput(tl_inode);
 	brelse(tl_bh);
 
-	if (status < 0 && (*tl_copy)) {
+	if (status < 0) {
 		kfree(*tl_copy);
 		*tl_copy = NULL;
 		mlog_errno(status);
@@ -7108,15 +7111,23 @@
 		 * to check it up here before changing the tree.
 		*/
 		if (root_el->l_tree_depth && rec->e_int_clusters == 0) {
-			ocfs2_error(inode->i_sb, "Inode %lu has an empty "
+			mlog(ML_ERROR, "Inode %lu has an empty "
 				    "extent record, depth %u\n", inode->i_ino,
 				    le16_to_cpu(root_el->l_tree_depth));
-			status = -EROFS;
-			goto bail;
+			status = ocfs2_remove_rightmost_empty_extent(osb,
+					&et, path, &dealloc);
+			if (status) {
+				mlog_errno(status);
+				goto bail;
+			}
+
+			ocfs2_reinit_path(path, 1);
+			goto start;
+		} else {
+			trunc_cpos = le32_to_cpu(rec->e_cpos);
+			trunc_len = 0;
+			blkno = 0;
 		}
-		trunc_cpos = le32_to_cpu(rec->e_cpos);
-		trunc_len = 0;
-		blkno = 0;
 	} else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) {
 		/*
 		 * Truncate entire record.
@@ -7204,8 +7215,7 @@
 	    !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
 	    !ocfs2_supports_inline_data(osb)) {
 		ocfs2_error(inode->i_sb,
-			    "Inline data flags for inode %llu don't agree! "
-			    "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
+			    "Inline data flags for inode %llu don't agree! Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
 			    le16_to_cpu(di->i_dyn_features),
 			    OCFS2_I(inode)->ip_dyn_features,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0f5fd9d..64b11d9 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -227,7 +227,7 @@
 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
 
 	if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
-		ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag",
+		ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno);
 		return -EROFS;
 	}
@@ -237,7 +237,7 @@
 	if (size > PAGE_CACHE_SIZE ||
 	    size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
 		ocfs2_error(inode->i_sb,
-			    "Inode %llu has with inline data has bad size: %Lu",
+			    "Inode %llu has with inline data has bad size: %Lu\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
 			    (unsigned long long)size);
 		return -EROFS;
@@ -533,10 +533,14 @@
 
 	inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
 
+	down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
 	/* This figures out the size of the next contiguous block, and
 	 * our logical offset */
 	ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
 					  &contig_blocks, &ext_flags);
+	up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
 	if (ret) {
 		mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
 		     (unsigned long long)iblock);
@@ -557,6 +561,8 @@
 
 		alloc_locked = 1;
 
+		down_write(&OCFS2_I(inode)->ip_alloc_sem);
+
 		/* fill hole, allocate blocks can't be larger than the size
 		 * of the hole */
 		clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
@@ -569,6 +575,7 @@
 		ret = ocfs2_extend_allocation(inode, cpos,
 				clusters_to_alloc, 0);
 		if (ret < 0) {
+			up_write(&OCFS2_I(inode)->ip_alloc_sem);
 			mlog_errno(ret);
 			goto bail;
 		}
@@ -576,11 +583,13 @@
 		ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
 				&contig_blocks, &ext_flags);
 		if (ret < 0) {
+			up_write(&OCFS2_I(inode)->ip_alloc_sem);
 			mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
 					(unsigned long long)iblock);
 			ret = -EIO;
 			goto bail;
 		}
+		up_write(&OCFS2_I(inode)->ip_alloc_sem);
 	}
 
 	/*
@@ -627,10 +636,13 @@
 		mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
 	}
 
-	ocfs2_iocb_clear_rw_locked(iocb);
+	/* Let rw unlock to be done later to protect append direct io write */
+	if (offset + bytes <= i_size_read(inode)) {
+		ocfs2_iocb_clear_rw_locked(iocb);
 
-	level = ocfs2_iocb_rw_locked_level(iocb);
-	ocfs2_rw_unlock(inode, level);
+		level = ocfs2_iocb_rw_locked_level(iocb);
+		ocfs2_rw_unlock(inode, level);
+	}
 }
 
 static int ocfs2_releasepage(struct page *page, gfp_t wait)
@@ -832,12 +844,17 @@
 
 		/* zeroing out the previously allocated cluster tail
 		 * that but not zeroed */
-		if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+		if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
+			down_read(&OCFS2_I(inode)->ip_alloc_sem);
 			ret = ocfs2_direct_IO_zero_extend(osb, inode, offset,
 					zero_len_tail, cluster_align_tail);
-		else
+			up_read(&OCFS2_I(inode)->ip_alloc_sem);
+		} else {
+			down_write(&OCFS2_I(inode)->ip_alloc_sem);
 			ret = ocfs2_direct_IO_extend_no_holes(osb, inode,
 					offset);
+			up_write(&OCFS2_I(inode)->ip_alloc_sem);
+		}
 		if (ret < 0) {
 			mlog_errno(ret);
 			ocfs2_inode_unlock(inode, 1);
@@ -857,7 +874,8 @@
 	written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
 				       offset, ocfs2_direct_IO_get_blocks,
 				       ocfs2_dio_end_io, NULL, 0);
-	if (unlikely(written < 0)) {
+	/* overwrite aio may return -EIOCBQUEUED, and it is not an error */
+	if ((written < 0) && (written != -EIOCBQUEUED)) {
 		loff_t i_size = i_size_read(inode);
 
 		if (offset + count > i_size) {
@@ -876,12 +894,14 @@
 
 					ocfs2_inode_unlock(inode, 1);
 					brelse(di_bh);
+					di_bh = NULL;
 					goto clean_orphan;
 				}
 			}
 
 			ocfs2_inode_unlock(inode, 1);
 			brelse(di_bh);
+			di_bh = NULL;
 
 			ret = jbd2_journal_force_commit(journal);
 			if (ret < 0)
@@ -936,10 +956,12 @@
 		if (tmp_ret < 0) {
 			ret = tmp_ret;
 			mlog_errno(ret);
+			brelse(di_bh);
 			goto out;
 		}
 
 		ocfs2_inode_unlock(inode, 1);
+		brelse(di_bh);
 
 		tmp_ret = jbd2_journal_force_commit(journal);
 		if (tmp_ret < 0) {
@@ -2185,10 +2207,7 @@
 		if (ret)
 			goto out_commit;
 	}
-	/*
-	 * We don't want this to fail in ocfs2_write_end(), so do it
-	 * here.
-	 */
+
 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
 				      OCFS2_JOURNAL_ACCESS_WRITE);
 	if (ret) {
@@ -2345,7 +2364,7 @@
 			   loff_t pos, unsigned len, unsigned copied,
 			   struct page *page, void *fsdata)
 {
-	int i;
+	int i, ret;
 	unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
 	struct inode *inode = mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -2354,6 +2373,14 @@
 	handle_t *handle = wc->w_handle;
 	struct page *tmppage;
 
+	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
+			OCFS2_JOURNAL_ACCESS_WRITE);
+	if (ret) {
+		copied = ret;
+		mlog_errno(ret);
+		goto out;
+	}
+
 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 		ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
 		goto out_write_size;
@@ -2409,6 +2436,7 @@
 	ocfs2_update_inode_fsync_trans(handle, inode, 1);
 	ocfs2_journal_dirty(handle, wc->w_di_bh);
 
+out:
 	/* unlock pages before dealloc since it needs acquiring j_trans_barrier
 	 * lock, or it will cause a deadlock since journal commit threads holds
 	 * this lock and will ask for the page lock when flushing the data.
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 1edcb14..fe50ded 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -316,6 +316,12 @@
 		bh = bhs[i];
 
 		if (!(flags & OCFS2_BH_READAHEAD)) {
+			if (status) {
+				/* Clear the rest of the buffers on error */
+				put_bh(bh);
+				bhs[i] = NULL;
+				continue;
+			}
 			/* We know this can't have changed as we hold the
 			 * owner sem. Avoid doing any work on the bh if the
 			 * journal has it. */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 140de3c..fa15deb 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -36,7 +36,7 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/bitmap.h>
-
+#include <linux/ktime.h>
 #include "heartbeat.h"
 #include "tcp.h"
 #include "nodemanager.h"
@@ -1060,37 +1060,6 @@
 	return ret;
 }
 
-/* Subtract b from a, storing the result in a. a *must* have a larger
- * value than b. */
-static void o2hb_tv_subtract(struct timeval *a,
-			     struct timeval *b)
-{
-	/* just return 0 when a is after b */
-	if (a->tv_sec < b->tv_sec ||
-	    (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) {
-		a->tv_sec = 0;
-		a->tv_usec = 0;
-		return;
-	}
-
-	a->tv_sec -= b->tv_sec;
-	a->tv_usec -= b->tv_usec;
-	while ( a->tv_usec < 0 ) {
-		a->tv_sec--;
-		a->tv_usec += 1000000;
-	}
-}
-
-static unsigned int o2hb_elapsed_msecs(struct timeval *start,
-				       struct timeval *end)
-{
-	struct timeval res = *end;
-
-	o2hb_tv_subtract(&res, start);
-
-	return res.tv_sec * 1000 + res.tv_usec / 1000;
-}
-
 /*
  * we ride the region ref that the region dir holds.  before the region
  * dir is removed and drops it ref it will wait to tear down this
@@ -1101,7 +1070,7 @@
 	int i, ret;
 	struct o2hb_region *reg = data;
 	struct o2hb_bio_wait_ctxt write_wc;
-	struct timeval before_hb, after_hb;
+	ktime_t before_hb, after_hb;
 	unsigned int elapsed_msec;
 
 	mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
@@ -1118,18 +1087,18 @@
 		 * hr_timeout_ms between disk writes. On busy systems
 		 * this should result in a heartbeat which is less
 		 * likely to time itself out. */
-		do_gettimeofday(&before_hb);
+		before_hb = ktime_get_real();
 
 		ret = o2hb_do_disk_heartbeat(reg);
 
-		do_gettimeofday(&after_hb);
-		elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
+		after_hb = ktime_get_real();
+
+		elapsed_msec = (unsigned int)
+				ktime_ms_delta(after_hb, before_hb);
 
 		mlog(ML_HEARTBEAT,
-		     "start = %lu.%lu, end = %lu.%lu, msec = %u, ret = %d\n",
-		     before_hb.tv_sec, (unsigned long) before_hb.tv_usec,
-		     after_hb.tv_sec, (unsigned long) after_hb.tv_usec,
-		     elapsed_msec, ret);
+		     "start = %lld, end = %lld, msec = %u, ret = %d\n",
+		     before_hb.tv64, after_hb.tv64, elapsed_msec, ret);
 
 		if (!kthread_should_stop() &&
 		    elapsed_msec < reg->hr_timeout_ms) {
@@ -1619,17 +1588,13 @@
 	struct o2hb_disk_slot *slot;
 
 	reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
-	if (reg->hr_tmp_block == NULL) {
-		mlog_errno(-ENOMEM);
+	if (reg->hr_tmp_block == NULL)
 		return -ENOMEM;
-	}
 
 	reg->hr_slots = kcalloc(reg->hr_blocks,
 				sizeof(struct o2hb_disk_slot), GFP_KERNEL);
-	if (reg->hr_slots == NULL) {
-		mlog_errno(-ENOMEM);
+	if (reg->hr_slots == NULL)
 		return -ENOMEM;
-	}
 
 	for(i = 0; i < reg->hr_blocks; i++) {
 		slot = &reg->hr_slots[i];
@@ -1645,17 +1610,13 @@
 
 	reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
 				    GFP_KERNEL);
-	if (!reg->hr_slot_data) {
-		mlog_errno(-ENOMEM);
+	if (!reg->hr_slot_data)
 		return -ENOMEM;
-	}
 
 	for(i = 0; i < reg->hr_num_pages; i++) {
 		page = alloc_page(GFP_KERNEL);
-		if (!page) {
-			mlog_errno(-ENOMEM);
+		if (!page)
 			return -ENOMEM;
-		}
 
 		reg->hr_slot_data[i] = page;
 
@@ -1687,10 +1648,8 @@
 	struct o2hb_disk_heartbeat_block *hb_block;
 
 	ret = o2hb_read_slots(reg, reg->hr_blocks);
-	if (ret) {
-		mlog_errno(ret);
+	if (ret)
 		goto out;
-	}
 
 	/* We only want to get an idea of the values initially in each
 	 * slot, so we do no verification - o2hb_check_slot will
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 02878a8..ffecf89 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -480,33 +480,26 @@
 
 	trailer = ocfs2_trailer_from_bh(bh, dir->i_sb);
 	if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) {
-		rc = -EINVAL;
-		ocfs2_error(dir->i_sb,
-			    "Invalid dirblock #%llu: "
-			    "signature = %.*s\n",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    trailer->db_signature);
+		rc = ocfs2_error(dir->i_sb,
+				 "Invalid dirblock #%llu: signature = %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 trailer->db_signature);
 		goto out;
 	}
 	if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) {
-		rc = -EINVAL;
-		ocfs2_error(dir->i_sb,
-			    "Directory block #%llu has an invalid "
-			    "db_blkno of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(trailer->db_blkno));
+		rc = ocfs2_error(dir->i_sb,
+				 "Directory block #%llu has an invalid db_blkno of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 		goto out;
 	}
 	if (le64_to_cpu(trailer->db_parent_dinode) !=
 	    OCFS2_I(dir)->ip_blkno) {
-		rc = -EINVAL;
-		ocfs2_error(dir->i_sb,
-			    "Directory block #%llu on dinode "
-			    "#%llu has an invalid parent_dinode "
-			    "of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)OCFS2_I(dir)->ip_blkno,
-			    (unsigned long long)le64_to_cpu(trailer->db_blkno));
+		rc = ocfs2_error(dir->i_sb,
+				 "Directory block #%llu on dinode #%llu has an invalid parent_dinode of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)OCFS2_I(dir)->ip_blkno,
+				 (unsigned long long)le64_to_cpu(trailer->db_blkno));
 		goto out;
 	}
 out:
@@ -604,14 +597,13 @@
 	}
 
 	if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) {
-		ocfs2_error(sb,
-			    "Dir Index Root # %llu has bad signature %.*s",
-			    (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
-			    7, dx_root->dr_signature);
-		return -EINVAL;
+		ret = ocfs2_error(sb,
+				  "Dir Index Root # %llu has bad signature %.*s\n",
+				  (unsigned long long)le64_to_cpu(dx_root->dr_blkno),
+				  7, dx_root->dr_signature);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di,
@@ -648,12 +640,11 @@
 	}
 
 	if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) {
-		ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s",
-			    7, dx_leaf->dl_signature);
-		return -EROFS;
+		ret = ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s\n",
+				  7, dx_leaf->dl_signature);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno,
@@ -812,11 +803,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "btree tree block %llu\n", inode->i_ino,
-				    (unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(inode->i_sb,
+					  "Inode %lu has non zero tree depth in btree tree block %llu\n",
+					  inode->i_ino,
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -832,11 +822,11 @@
 	}
 
 	if (!found) {
-		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-			    "record (%u, %u, 0) in btree", inode->i_ino,
-			    le32_to_cpu(rec->e_cpos),
-			    ocfs2_rec_clusters(el, rec));
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb,
+				  "Inode %lu has bad extent record (%u, %u, 0) in btree\n",
+				  inode->i_ino,
+				  le32_to_cpu(rec->e_cpos),
+				  ocfs2_rec_clusters(el, rec));
 		goto out;
 	}
 
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 7df88a6..6918f30 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1465,39 +1465,46 @@
 	if (status == -ENOPROTOOPT) {
 		status = 0;
 		*response = JOIN_OK_NO_MAP;
-	} else if (packet.code == JOIN_DISALLOW ||
-		   packet.code == JOIN_OK_NO_MAP) {
-		*response = packet.code;
-	} else if (packet.code == JOIN_PROTOCOL_MISMATCH) {
-		mlog(ML_NOTICE,
-		     "This node requested DLM locking protocol %u.%u and "
-		     "filesystem locking protocol %u.%u.  At least one of "
-		     "the protocol versions on node %d is not compatible, "
-		     "disconnecting\n",
-		     dlm->dlm_locking_proto.pv_major,
-		     dlm->dlm_locking_proto.pv_minor,
-		     dlm->fs_locking_proto.pv_major,
-		     dlm->fs_locking_proto.pv_minor,
-		     node);
-		status = -EPROTO;
-		*response = packet.code;
-	} else if (packet.code == JOIN_OK) {
-		*response = packet.code;
-		/* Use the same locking protocol as the remote node */
-		dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
-		dlm->fs_locking_proto.pv_minor = packet.fs_minor;
-		mlog(0,
-		     "Node %d responds JOIN_OK with DLM locking protocol "
-		     "%u.%u and fs locking protocol %u.%u\n",
-		     node,
-		     dlm->dlm_locking_proto.pv_major,
-		     dlm->dlm_locking_proto.pv_minor,
-		     dlm->fs_locking_proto.pv_major,
-		     dlm->fs_locking_proto.pv_minor);
 	} else {
-		status = -EINVAL;
-		mlog(ML_ERROR, "invalid response %d from node %u\n",
-		     packet.code, node);
+		*response = packet.code;
+		switch (packet.code) {
+		case JOIN_DISALLOW:
+		case JOIN_OK_NO_MAP:
+			break;
+		case JOIN_PROTOCOL_MISMATCH:
+			mlog(ML_NOTICE,
+			     "This node requested DLM locking protocol %u.%u and "
+			     "filesystem locking protocol %u.%u.  At least one of "
+			     "the protocol versions on node %d is not compatible, "
+			     "disconnecting\n",
+			     dlm->dlm_locking_proto.pv_major,
+			     dlm->dlm_locking_proto.pv_minor,
+			     dlm->fs_locking_proto.pv_major,
+			     dlm->fs_locking_proto.pv_minor,
+			     node);
+			status = -EPROTO;
+			break;
+		case JOIN_OK:
+			/* Use the same locking protocol as the remote node */
+			dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
+			dlm->fs_locking_proto.pv_minor = packet.fs_minor;
+			mlog(0,
+			     "Node %d responds JOIN_OK with DLM locking protocol "
+			     "%u.%u and fs locking protocol %u.%u\n",
+			     node,
+			     dlm->dlm_locking_proto.pv_major,
+			     dlm->dlm_locking_proto.pv_minor,
+			     dlm->fs_locking_proto.pv_major,
+			     dlm->fs_locking_proto.pv_minor);
+			break;
+		default:
+			status = -EINVAL;
+			mlog(ML_ERROR, "invalid response %d from node %u\n",
+			     packet.code, node);
+			/* Reset response to JOIN_DISALLOW */
+			*response = JOIN_DISALLOW;
+			break;
+		}
 	}
 
 	mlog(0, "status %d, node %d response is %d\n", status, node,
@@ -1725,12 +1732,13 @@
 
 	o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
 			    dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
+	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
+			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
+
 	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
 	if (status)
 		goto bail;
 
-	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
-			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
 	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
 	if (status)
 		goto bail;
@@ -1845,8 +1853,6 @@
 					sizeof(struct dlm_exit_domain),
 					dlm_begin_exit_domain_handler,
 					dlm, NULL, &dlm->dlm_domain_handlers);
-	if (status)
-		goto bail;
 
 bail:
 	if (status)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index fdf4b41..46b8b2b 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -498,16 +498,6 @@
 	mlog(0, "destroying lockres %.*s\n", res->lockname.len,
 	     res->lockname.name);
 
-	spin_lock(&dlm->track_lock);
-	if (!list_empty(&res->tracking))
-		list_del_init(&res->tracking);
-	else {
-		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
-		     res->lockname.len, res->lockname.name);
-		dlm_print_one_lock_resource(res);
-	}
-	spin_unlock(&dlm->track_lock);
-
 	atomic_dec(&dlm->res_cur_count);
 
 	if (!hlist_unhashed(&res->hash_node) ||
@@ -795,8 +785,18 @@
 		dlm_lockres_grab_inflight_ref(dlm, tmpres);
 
 		spin_unlock(&tmpres->spinlock);
-		if (res)
+		if (res) {
+			spin_lock(&dlm->track_lock);
+			if (!list_empty(&res->tracking))
+				list_del_init(&res->tracking);
+			else
+				mlog(ML_ERROR, "Resource %.*s not "
+						"on the Tracking list\n",
+						res->lockname.len,
+						res->lockname.name);
+			spin_unlock(&dlm->track_lock);
 			dlm_lockres_put(res);
+		}
 		res = tmpres;
 		goto leave;
 	}
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index ce12e0b..d0e436d 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1776,7 +1776,7 @@
 				     struct dlm_migratable_lockres *mres)
 {
 	struct dlm_migratable_lock *ml;
-	struct list_head *queue, *iter;
+	struct list_head *queue;
 	struct list_head *tmpq = NULL;
 	struct dlm_lock *newlock = NULL;
 	struct dlm_lockstatus *lksb = NULL;
@@ -1821,9 +1821,7 @@
 			spin_lock(&res->spinlock);
 			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
 				tmpq = dlm_list_idx_to_ptr(res, j);
-				list_for_each(iter, tmpq) {
-					lock = list_entry(iter,
-						  struct dlm_lock, list);
+				list_for_each_entry(lock, tmpq, list) {
 					if (lock->ml.cookie == ml->cookie)
 						break;
 					lock = NULL;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 69aac6f..2e5e6d5 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -211,6 +211,16 @@
 
 	__dlm_unhash_lockres(dlm, res);
 
+	spin_lock(&dlm->track_lock);
+	if (!list_empty(&res->tracking))
+		list_del_init(&res->tracking);
+	else {
+		mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
+				res->lockname.len, res->lockname.name);
+		__dlm_print_one_lock_resource(res);
+	}
+	spin_unlock(&dlm->track_lock);
+
 	/* lockres is not in the hash now.  drop the flag and wake up
 	 * any processes waiting in dlm_get_lock_resource. */
 	if (!master) {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 23157e4..1c91103 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3035,8 +3035,6 @@
 	ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
 
 	osb->cconn = conn;
-
-	status = 0;
 bail:
 	if (status < 0) {
 		ocfs2_dlm_shutdown_debug(osb);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 767370b..e4719e0 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -305,8 +305,8 @@
 
 	if (el->l_tree_depth) {
 		ocfs2_error(inode->i_sb,
-			    "Inode %lu has non zero tree depth in "
-			    "leaf block %llu\n", inode->i_ino,
+			    "Inode %lu has non zero tree depth in leaf block %llu\n",
+			    inode->i_ino,
 			    (unsigned long long)eb_bh->b_blocknr);
 		ret = -EROFS;
 		goto out;
@@ -441,8 +441,8 @@
 
 		if (el->l_tree_depth) {
 			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "leaf block %llu\n", inode->i_ino,
+				    "Inode %lu has non zero tree depth in leaf block %llu\n",
+				    inode->i_ino,
 				    (unsigned long long)eb_bh->b_blocknr);
 			ret = -EROFS;
 			goto out;
@@ -475,8 +475,9 @@
 	BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
 
 	if (!rec->e_blkno) {
-		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-			    "record (%u, %u, 0)", inode->i_ino,
+		ocfs2_error(inode->i_sb,
+			    "Inode %lu has bad extent record (%u, %u, 0)\n",
+			    inode->i_ino,
 			    le32_to_cpu(rec->e_cpos),
 			    ocfs2_rec_clusters(el, rec));
 		ret = -EROFS;
@@ -564,8 +565,8 @@
 
 		if (el->l_tree_depth) {
 			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "xattr leaf block %llu\n", inode->i_ino,
+				    "Inode %lu has non zero tree depth in xattr leaf block %llu\n",
+				    inode->i_ino,
 				    (unsigned long long)eb_bh->b_blocknr);
 			ret = -EROFS;
 			goto out;
@@ -582,8 +583,9 @@
 		BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
 
 		if (!rec->e_blkno) {
-			ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-				    "record (%u, %u, 0) in xattr", inode->i_ino,
+			ocfs2_error(inode->i_sb,
+				    "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
+				    inode->i_ino,
 				    le32_to_cpu(rec->e_cpos),
 				    ocfs2_rec_clusters(el, rec));
 			ret = -EROFS;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 719f7f4..0e5b451 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -105,8 +105,11 @@
 			      file->f_path.dentry->d_name.len,
 			      file->f_path.dentry->d_name.name, mode);
 
-	if (file->f_mode & FMODE_WRITE)
-		dquot_initialize(inode);
+	if (file->f_mode & FMODE_WRITE) {
+		status = dquot_initialize(inode);
+		if (status)
+			goto leave;
+	}
 
 	spin_lock(&oi->ip_lock);
 
@@ -1127,6 +1130,7 @@
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	int status = 0, size_change;
+	int inode_locked = 0;
 	struct inode *inode = d_inode(dentry);
 	struct super_block *sb = inode->i_sb;
 	struct ocfs2_super *osb = OCFS2_SB(sb);
@@ -1155,8 +1159,11 @@
 	if (status)
 		return status;
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		status = dquot_initialize(inode);
+		if (status)
+			return status;
+	}
 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
 	if (size_change) {
 		status = ocfs2_rw_lock(inode, 1);
@@ -1172,6 +1179,7 @@
 			mlog_errno(status);
 		goto bail_unlock_rw;
 	}
+	inode_locked = 1;
 
 	if (size_change) {
 		status = inode_newsize_ok(inode, attr->ia_size);
@@ -1209,8 +1217,8 @@
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
 			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
-			if (!transfer_to[USRQUOTA]) {
-				status = -ESRCH;
+			if (IS_ERR(transfer_to[USRQUOTA])) {
+				status = PTR_ERR(transfer_to[USRQUOTA]);
 				goto bail_unlock;
 			}
 		}
@@ -1218,8 +1226,8 @@
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
 			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
-			if (!transfer_to[GRPQUOTA]) {
-				status = -ESRCH;
+			if (IS_ERR(transfer_to[GRPQUOTA])) {
+				status = PTR_ERR(transfer_to[GRPQUOTA]);
 				goto bail_unlock;
 			}
 		}
@@ -1252,7 +1260,10 @@
 bail_commit:
 	ocfs2_commit_trans(osb, handle);
 bail_unlock:
-	ocfs2_inode_unlock(inode, 1);
+	if (status) {
+		ocfs2_inode_unlock(inode, 1);
+		inode_locked = 0;
+	}
 bail_unlock_rw:
 	if (size_change)
 		ocfs2_rw_unlock(inode, 1);
@@ -1268,6 +1279,8 @@
 		if (status < 0)
 			mlog_errno(status);
 	}
+	if (inode_locked)
+		ocfs2_inode_unlock(inode, 1);
 
 	return status;
 }
@@ -2256,8 +2269,6 @@
 	ssize_t written = 0;
 	ssize_t ret;
 	size_t count = iov_iter_count(from), orig_count;
-	loff_t old_size;
-	u32 old_clusters;
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -2265,6 +2276,8 @@
 			       OCFS2_MOUNT_COHERENCY_BUFFERED);
 	int unaligned_dio = 0;
 	int dropped_dio = 0;
+	int append_write = ((iocb->ki_pos + count) >=
+			i_size_read(inode) ? 1 : 0);
 
 	trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
 		(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -2284,8 +2297,9 @@
 	/*
 	 * Concurrent O_DIRECT writes are allowed with
 	 * mount_option "coherency=buffered".
+	 * For append write, we must take rw EX.
 	 */
-	rw_level = (!direct_io || full_coherency);
+	rw_level = (!direct_io || full_coherency || append_write);
 
 	ret = ocfs2_rw_lock(inode, rw_level);
 	if (ret < 0) {
@@ -2358,13 +2372,6 @@
 		ocfs2_iocb_set_unaligned_aio(iocb);
 	}
 
-	/*
-	 * To later detect whether a journal commit for sync writes is
-	 * necessary, we sample i_size, and cluster count here.
-	 */
-	old_size = i_size_read(inode);
-	old_clusters = OCFS2_I(inode)->ip_clusters;
-
 	/* communicate with ocfs2_dio_end_io */
 	ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
@@ -2372,6 +2379,20 @@
 	/* buffered aio wouldn't have proper lock coverage today */
 	BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
 
+	/*
+	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
+	 * function pointer which is called when o_direct io completes so that
+	 * it can unlock our rw lock.
+	 * Unfortunately there are error cases which call end_io and others
+	 * that don't.  so we don't have to unlock the rw_lock if either an
+	 * async dio is going to do it in the future or an end_io after an
+	 * error has already done it.
+	 */
+	if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
+		rw_level = -1;
+		unaligned_dio = 0;
+	}
+
 	if (unlikely(written <= 0))
 		goto no_sync;
 
@@ -2396,21 +2417,7 @@
 	}
 
 no_sync:
-	/*
-	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
-	 * function pointer which is called when o_direct io completes so that
-	 * it can unlock our rw lock.
-	 * Unfortunately there are error cases which call end_io and others
-	 * that don't.  so we don't have to unlock the rw_lock if either an
-	 * async dio is going to do it in the future or an end_io after an
-	 * error has already done it.
-	 */
-	if ((ret == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
-		rw_level = -1;
-		unaligned_dio = 0;
-	}
-
-	if (unaligned_dio) {
+	if (unaligned_dio && ocfs2_iocb_is_unaligned_aio(iocb)) {
 		ocfs2_iocb_clear_unaligned_aio(iocb);
 		mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
 	}
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index b254416..8f87e05 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -971,6 +971,7 @@
 	int wipe, status;
 	sigset_t oldset;
 	struct buffer_head *di_bh = NULL;
+	struct ocfs2_dinode *di = NULL;
 
 	trace_ocfs2_delete_inode(inode->i_ino,
 				 (unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -1025,6 +1026,14 @@
 		goto bail_unlock_nfs_sync;
 	}
 
+	di = (struct ocfs2_dinode *)di_bh->b_data;
+	/* Skip inode deletion and wait for dio orphan entry recovered
+	 * first */
+	if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
+		ocfs2_cleanup_delete_inode(inode, 0);
+		goto bail_unlock_inode;
+	}
+
 	/* Query the cluster. This will be the final decision made
 	 * before we go ahead and wipe the inode. */
 	status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
@@ -1191,17 +1200,19 @@
 int ocfs2_drop_inode(struct inode *inode)
 {
 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
-	int res;
 
 	trace_ocfs2_drop_inode((unsigned long long)oi->ip_blkno,
 				inode->i_nlink, oi->ip_flags);
 
-	if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
-		res = 1;
-	else
-		res = generic_drop_inode(inode);
+	assert_spin_locked(&inode->i_lock);
+	inode->i_state |= I_WILL_FREE;
+	spin_unlock(&inode->i_lock);
+	write_inode_now(inode, 1);
+	spin_lock(&inode->i_lock);
+	WARN_ON(inode->i_state & I_NEW);
+	inode->i_state &= ~I_WILL_FREE;
 
-	return res;
+	return 1;
 }
 
 /*
@@ -1350,32 +1361,32 @@
 	rc = -EINVAL;
 
 	if (!OCFS2_IS_VALID_DINODE(di)) {
-		ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    di->i_signature);
+		rc = ocfs2_error(sb, "Invalid dinode #%llu: signature = %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 di->i_signature);
 		goto bail;
 	}
 
 	if (le64_to_cpu(di->i_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(di->i_blkno));
+		rc = ocfs2_error(sb, "Invalid dinode #%llu: i_blkno is %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(di->i_blkno));
 		goto bail;
 	}
 
 	if (!(di->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
-		ocfs2_error(sb,
-			    "Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
-			    (unsigned long long)bh->b_blocknr);
+		rc = ocfs2_error(sb,
+				 "Invalid dinode #%llu: OCFS2_VALID_FL not set\n",
+				 (unsigned long long)bh->b_blocknr);
 		goto bail;
 	}
 
 	if (le32_to_cpu(di->i_fs_generation) !=
 	    OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Invalid dinode #%llu: fs_generation is %u\n",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(di->i_fs_generation));
+		rc = ocfs2_error(sb,
+				 "Invalid dinode #%llu: fs_generation is %u\n",
+				 (unsigned long long)bh->b_blocknr,
+				 le32_to_cpu(di->i_fs_generation));
 		goto bail;
 	}
 
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 5e86b24..ca3431e 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -81,8 +81,6 @@
 	tid_t i_sync_tid;
 	tid_t i_datasync_tid;
 
-	wait_queue_head_t append_dio_wq;
-
 	struct dquot *i_dquot[MAXQUOTAS];
 };
 
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 7c099f7..ff82b28 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -374,7 +374,7 @@
 		mlog_errno(PTR_ERR(handle));
 
 		if (is_journal_aborted(journal)) {
-			ocfs2_abort(osb->sb, "Detected aborted journal");
+			ocfs2_abort(osb->sb, "Detected aborted journal\n");
 			handle = ERR_PTR(-EROFS);
 		}
 	} else {
@@ -668,7 +668,23 @@
 		mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
 		mlog(ML_ERROR, "b_blocknr=%llu\n",
 		     (unsigned long long)bh->b_blocknr);
-		BUG();
+
+		lock_buffer(bh);
+		/*
+		 * A previous attempt to write this buffer head failed.
+		 * Nothing we can do but to retry the write and hope for
+		 * the best.
+		 */
+		if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
+			clear_buffer_write_io_error(bh);
+			set_buffer_uptodate(bh);
+		}
+
+		if (!buffer_uptodate(bh)) {
+			unlock_buffer(bh);
+			return -EIO;
+		}
+		unlock_buffer(bh);
 	}
 
 	/* Set the current transaction information on the ci so
@@ -2170,6 +2186,7 @@
 		iter = oi->ip_next_orphan;
 		oi->ip_next_orphan = NULL;
 
+		mutex_lock(&inode->i_mutex);
 		ret = ocfs2_rw_lock(inode, 1);
 		if (ret < 0) {
 			mlog_errno(ret);
@@ -2193,7 +2210,9 @@
 			 * ocfs2_delete_inode. */
 			oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
 			spin_unlock(&oi->ip_lock);
-		} else if ((orphan_reco_type == ORPHAN_NEED_TRUNCATE) &&
+		}
+
+		if ((orphan_reco_type == ORPHAN_NEED_TRUNCATE) &&
 				(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
 			ret = ocfs2_truncate_file(inode, di_bh,
 					i_size_read(inode));
@@ -2206,17 +2225,16 @@
 			ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
 			if (ret)
 				mlog_errno(ret);
-
-			wake_up(&OCFS2_I(inode)->append_dio_wq);
 		} /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */
 unlock_inode:
 		ocfs2_inode_unlock(inode, 1);
+		brelse(di_bh);
+		di_bh = NULL;
 unlock_rw:
 		ocfs2_rw_unlock(inode, 1);
 next:
+		mutex_unlock(&inode->i_mutex);
 		iput(inode);
-		brelse(di_bh);
-		di_bh = NULL;
 		inode = iter;
 	}
 
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 857bbbc..0a4457f 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -665,8 +665,7 @@
 #ifdef CONFIG_OCFS2_DEBUG_FS
 	if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
 	    ocfs2_local_alloc_count_bits(alloc)) {
-		ocfs2_error(osb->sb, "local alloc inode %llu says it has "
-			    "%u used bits, but a count shows %u",
+		ocfs2_error(osb->sb, "local alloc inode %llu says it has %u used bits, but a count shows %u\n",
 			    (unsigned long long)le64_to_cpu(alloc->i_blkno),
 			    le32_to_cpu(alloc->id1.bitmap1.i_used),
 			    ocfs2_local_alloc_count_bits(alloc));
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 56a768d..124471d 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -99,11 +99,9 @@
 
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
-		ocfs2_error(inode->i_sb,
-			    "Inode %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
-			    (unsigned long long)ino, cpos);
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb,
+				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
+				  (unsigned long long)ino, cpos);
 		goto out;
 	}
 
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 6e6abb9..b7dfac2 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -200,11 +200,12 @@
 static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
 {
 	struct inode *inode;
+	int status;
 
 	inode = new_inode(dir->i_sb);
 	if (!inode) {
 		mlog(ML_ERROR, "new_inode failed!\n");
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	/* populate as many fields early on as possible - many of
@@ -213,7 +214,10 @@
 	if (S_ISDIR(mode))
 		set_nlink(inode, 2);
 	inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
+	status = dquot_initialize(inode);
+	if (status)
+		return ERR_PTR(status);
+
 	return inode;
 }
 
@@ -264,7 +268,11 @@
 			  (unsigned long long)OCFS2_I(dir)->ip_blkno,
 			  (unsigned long)dev, mode);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		return status;
+	}
 
 	/* get our super block */
 	osb = OCFS2_SB(dir->i_sb);
@@ -311,8 +319,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, mode);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto leave;
 	}
@@ -708,7 +717,11 @@
 	if (S_ISDIR(inode->i_mode))
 		return -EPERM;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err) {
+		mlog_errno(err);
+		return err;
+	}
 
 	err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
 			&parent_fe_bh, dir, 0);
@@ -896,7 +909,11 @@
 			   (unsigned long long)OCFS2_I(dir)->ip_blkno,
 			   (unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		return status;
+	}
 
 	BUG_ON(d_inode(dentry->d_parent) != dir);
 
@@ -1018,11 +1035,6 @@
 	if (handle)
 		ocfs2_commit_trans(osb, handle);
 
-	if (child_locked)
-		ocfs2_inode_unlock(inode, 1);
-
-	ocfs2_inode_unlock(dir, 1);
-
 	if (orphan_dir) {
 		/* This was locked for us in ocfs2_prepare_orphan_dir() */
 		ocfs2_inode_unlock(orphan_dir, 1);
@@ -1030,6 +1042,11 @@
 		iput(orphan_dir);
 	}
 
+	if (child_locked)
+		ocfs2_inode_unlock(inode, 1);
+
+	ocfs2_inode_unlock(dir, 1);
+
 	brelse(fe_bh);
 	brelse(parent_node_bh);
 
@@ -1230,8 +1247,16 @@
 			   old_dentry->d_name.len, old_dentry->d_name.name,
 			   new_dentry->d_name.len, new_dentry->d_name.name);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	status = dquot_initialize(old_dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
+	status = dquot_initialize(new_dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
 
 	osb = OCFS2_SB(old_dir->i_sb);
 
@@ -1284,6 +1309,11 @@
 	}
 	parents_locked = 1;
 
+	if (!new_dir->i_nlink) {
+		status = -EACCES;
+		goto bail;
+	}
+
 	/* make sure both dirs have bhs
 	 * get an extra ref on old_dir_bh if old==new */
 	if (!new_dir_bh) {
@@ -1544,12 +1574,25 @@
 	status = ocfs2_find_entry(old_dentry->d_name.name,
 				  old_dentry->d_name.len, old_dir,
 				  &old_entry_lookup);
-	if (status)
+	if (status) {
+		if (!is_journal_aborted(osb->journal->j_journal)) {
+			ocfs2_error(osb->sb, "new entry %.*s is added, but old entry %.*s "
+					"is not deleted.",
+					new_dentry->d_name.len, new_dentry->d_name.name,
+					old_dentry->d_name.len, old_dentry->d_name.name);
+		}
 		goto bail;
+	}
 
 	status = ocfs2_delete_entry(handle, old_dir, &old_entry_lookup);
 	if (status < 0) {
 		mlog_errno(status);
+		if (!is_journal_aborted(osb->journal->j_journal)) {
+			ocfs2_error(osb->sb, "new entry %.*s is added, but old entry %.*s "
+					"is not deleted.",
+					new_dentry->d_name.len, new_dentry->d_name.name,
+					old_dentry->d_name.len, old_dentry->d_name.name);
+		}
 		goto bail;
 	}
 
@@ -1608,21 +1651,9 @@
 	ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir);
 	status = 0;
 bail:
-	if (rename_lock)
-		ocfs2_rename_unlock(osb);
-
 	if (handle)
 		ocfs2_commit_trans(osb, handle);
 
-	if (parents_locked)
-		ocfs2_double_unlock(old_dir, new_dir);
-
-	if (old_child_locked)
-		ocfs2_inode_unlock(old_inode, 1);
-
-	if (new_child_locked)
-		ocfs2_inode_unlock(new_inode, 1);
-
 	if (orphan_dir) {
 		/* This was locked for us in ocfs2_prepare_orphan_dir() */
 		ocfs2_inode_unlock(orphan_dir, 1);
@@ -1630,6 +1661,18 @@
 		iput(orphan_dir);
 	}
 
+	if (new_child_locked)
+		ocfs2_inode_unlock(new_inode, 1);
+
+	if (old_child_locked)
+		ocfs2_inode_unlock(old_inode, 1);
+
+	if (parents_locked)
+		ocfs2_double_unlock(old_dir, new_dir);
+
+	if (rename_lock)
+		ocfs2_rename_unlock(osb);
+
 	if (new_inode)
 		sync_mapping_buffers(old_inode->i_mapping);
 
@@ -1786,7 +1829,11 @@
 	trace_ocfs2_symlink_begin(dir, dentry, symname,
 				  dentry->d_name.len, dentry->d_name.name);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
 
 	sb = dir->i_sb;
 	osb = OCFS2_SB(sb);
@@ -1831,8 +1878,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, S_IFLNK | S_IRWXUGO);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto bail;
 	}
@@ -2485,8 +2533,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, mode);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto leave;
 	}
@@ -2570,27 +2619,6 @@
 	return status;
 }
 
-static int ocfs2_dio_orphan_recovered(struct inode *inode)
-{
-	int ret;
-	struct buffer_head *di_bh = NULL;
-	struct ocfs2_dinode *di = NULL;
-
-	ret = ocfs2_inode_lock(inode, &di_bh, 1);
-	if (ret < 0) {
-		mlog_errno(ret);
-		return 0;
-	}
-
-	di = (struct ocfs2_dinode *) di_bh->b_data;
-	ret = !(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL));
-	ocfs2_inode_unlock(inode, 1);
-	brelse(di_bh);
-
-	return ret;
-}
-
-#define OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL 10000
 int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb,
 	struct inode *inode)
 {
@@ -2602,7 +2630,6 @@
 	handle_t *handle = NULL;
 	struct ocfs2_dinode *di = NULL;
 
-restart:
 	status = ocfs2_inode_lock(inode, &di_bh, 1);
 	if (status < 0) {
 		mlog_errno(status);
@@ -2612,15 +2639,21 @@
 	di = (struct ocfs2_dinode *) di_bh->b_data;
 	/*
 	 * Another append dio crashed?
-	 * If so, wait for recovery first.
+	 * If so, manually recover it first.
 	 */
 	if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) {
-		ocfs2_inode_unlock(inode, 1);
-		brelse(di_bh);
-		wait_event_interruptible_timeout(OCFS2_I(inode)->append_dio_wq,
-				ocfs2_dio_orphan_recovered(inode),
-				msecs_to_jiffies(OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL));
-		goto restart;
+		status = ocfs2_truncate_file(inode, di_bh, i_size_read(inode));
+		if (status < 0) {
+			if (status != -ENOSPC)
+				mlog_errno(status);
+			goto bail_unlock_inode;
+		}
+
+		status = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 0, 0);
+		if (status < 0) {
+			mlog_errno(status);
+			goto bail_unlock_inode;
+		}
 	}
 
 	status = ocfs2_prepare_orphan_dir(osb, &orphan_dir_inode,
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 690ddc6..7a01262 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -286,6 +286,8 @@
 	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
 
 	OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
+	OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
+	OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
 };
 
 #define OCFS2_OSB_SOFT_RO	0x0001
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 3d0b63d..8a54fd8 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -138,8 +138,7 @@
 
 	if (i_size_read(inode) >> inode->i_sb->s_blocksize_bits <= v_block) {
 		ocfs2_error(inode->i_sb,
-			    "Quota file %llu is probably corrupted! Requested "
-			    "to read block %Lu but file has size only %Lu\n",
+			    "Quota file %llu is probably corrupted! Requested to read block %Lu but file has size only %Lu\n",
 			    (unsigned long long)OCFS2_I(inode)->ip_blkno,
 			    (unsigned long long)v_block,
 			    (unsigned long long)i_size_read(inode));
@@ -499,8 +498,8 @@
 			dquot = dqget(sb,
 				      make_kqid(&init_user_ns, type,
 						le64_to_cpu(dqblk->dqb_id)));
-			if (!dquot) {
-				status = -EIO;
+			if (IS_ERR(dquot)) {
+				status = PTR_ERR(dquot);
 				mlog(ML_ERROR, "Failed to get quota structure "
 				     "for id %u, type %d. Cannot finish quota "
 				     "file recovery.\n",
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b69dd14..e5d57cd 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -102,32 +102,30 @@
 
 
 	if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
-		ocfs2_error(sb,
-			    "Refcount block #%llu has bad signature %.*s",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    rb->rf_signature);
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Refcount block #%llu has bad signature %.*s\n",
+				 (unsigned long long)bh->b_blocknr, 7,
+				 rb->rf_signature);
+		goto out;
 	}
 
 	if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb,
-			    "Refcount block #%llu has an invalid rf_blkno "
-			    "of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(rb->rf_blkno));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Refcount block #%llu has an invalid rf_blkno of %llu\n",
+				 (unsigned long long)bh->b_blocknr,
+				 (unsigned long long)le64_to_cpu(rb->rf_blkno));
+		goto out;
 	}
 
 	if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Refcount block #%llu has an invalid "
-			    "rf_fs_generation of #%u",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(rb->rf_fs_generation));
-		return -EINVAL;
+		rc = ocfs2_error(sb,
+				 "Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
+				 (unsigned long long)bh->b_blocknr,
+				 le32_to_cpu(rb->rf_fs_generation));
+		goto out;
 	}
-
-	return 0;
+out:
+	return rc;
 }
 
 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
@@ -1102,12 +1100,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(sb,
-			"refcount tree %llu has non zero tree "
-			"depth in leaf btree tree block %llu\n",
-			(unsigned long long)ocfs2_metadata_cache_owner(ci),
-			(unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(sb,
+					  "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
+					  (unsigned long long)ocfs2_metadata_cache_owner(ci),
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -2359,10 +2355,8 @@
 					   cpos, len, phys);
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
-			    "tree, but the feature bit is not set in the "
-			    "super block.", inode->i_ino);
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
+				  inode->i_ino);
 		goto out;
 	}
 
@@ -2545,10 +2539,8 @@
 	u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
-			    "tree, but the feature bit is not set in the "
-			    "super block.", inode->i_ino);
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
+				  inode->i_ino);
 		goto out;
 	}
 
@@ -2672,11 +2664,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "leaf block %llu\n", inode->i_ino,
-				    (unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(inode->i_sb,
+					  "Inode %lu has non zero tree depth in leaf block %llu\n",
+					  inode->i_ino,
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -3106,11 +3097,9 @@
 
 	index = ocfs2_search_extent_list(el, cpos);
 	if (index == -1) {
-		ocfs2_error(sb,
-			    "Inode %llu has an extent at cpos %u which can no "
-			    "longer be found.\n",
-			    (unsigned long long)ino, cpos);
-		ret = -EROFS;
+		ret = ocfs2_error(sb,
+				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
+				  (unsigned long long)ino, cpos);
 		goto out;
 	}
 
@@ -3376,10 +3365,8 @@
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
-		ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
-			    "tree, but the feature bit is not set in the "
-			    "super block.", inode->i_ino);
-		return -EROFS;
+		return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
+				   inode->i_ino);
 	}
 
 	ocfs2_init_dealloc_ctxt(&context->dealloc);
@@ -4419,8 +4406,9 @@
 	}
 
 	mutex_lock(&inode->i_mutex);
-	dquot_initialize(dir);
-	error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
+	error = dquot_initialize(dir);
+	if (!error)
+		error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
 	mutex_unlock(&inode->i_mutex);
 	if (!error)
 		fsnotify_create(dir, new_dentry);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 4479029..d83d260 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -149,10 +149,8 @@
 	brelse(ac->ac_bh);
 	ac->ac_bh = NULL;
 	ac->ac_resv = NULL;
-	if (ac->ac_find_loc_priv) {
-		kfree(ac->ac_find_loc_priv);
-		ac->ac_find_loc_priv = NULL;
-	}
+	kfree(ac->ac_find_loc_priv);
+	ac->ac_find_loc_priv = NULL;
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -167,12 +165,12 @@
 }
 
 #define do_error(fmt, ...)						\
-	do{								\
-		if (resize)					\
-			mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__);	\
-		else							\
-			ocfs2_error(sb, fmt, ##__VA_ARGS__);		\
-	} while (0)
+do {									\
+	if (resize)							\
+		mlog(ML_ERROR, fmt, ##__VA_ARGS__);			\
+	else								\
+		return ocfs2_error(sb, fmt, ##__VA_ARGS__);		\
+} while (0)
 
 static int ocfs2_validate_gd_self(struct super_block *sb,
 				  struct buffer_head *bh,
@@ -181,44 +179,35 @@
 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 
 	if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
-		do_error("Group descriptor #%llu has bad signature %.*s",
+		do_error("Group descriptor #%llu has bad signature %.*s\n",
 			 (unsigned long long)bh->b_blocknr, 7,
 			 gd->bg_signature);
-		return -EINVAL;
 	}
 
 	if (le64_to_cpu(gd->bg_blkno) != bh->b_blocknr) {
-		do_error("Group descriptor #%llu has an invalid bg_blkno "
-			 "of %llu",
+		do_error("Group descriptor #%llu has an invalid bg_blkno of %llu\n",
 			 (unsigned long long)bh->b_blocknr,
 			 (unsigned long long)le64_to_cpu(gd->bg_blkno));
-		return -EINVAL;
 	}
 
 	if (le32_to_cpu(gd->bg_generation) != OCFS2_SB(sb)->fs_generation) {
-		do_error("Group descriptor #%llu has an invalid "
-			 "fs_generation of #%u",
+		do_error("Group descriptor #%llu has an invalid fs_generation of #%u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le32_to_cpu(gd->bg_generation));
-		return -EINVAL;
 	}
 
 	if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
-		do_error("Group descriptor #%llu has bit count %u but "
-			 "claims that %u are free",
+		do_error("Group descriptor #%llu has bit count %u but claims that %u are free\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_bits),
 			 le16_to_cpu(gd->bg_free_bits_count));
-		return -EINVAL;
 	}
 
 	if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
-		do_error("Group descriptor #%llu has bit count %u but "
-			 "max bitmap bits of %u",
+		do_error("Group descriptor #%llu has bit count %u but max bitmap bits of %u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_bits),
 			 8 * le16_to_cpu(gd->bg_size));
-		return -EINVAL;
 	}
 
 	return 0;
@@ -233,20 +222,17 @@
 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
 
 	if (di->i_blkno != gd->bg_parent_dinode) {
-		do_error("Group descriptor #%llu has bad parent "
-			 "pointer (%llu, expected %llu)",
+		do_error("Group descriptor #%llu has bad parent pointer (%llu, expected %llu)\n",
 			 (unsigned long long)bh->b_blocknr,
 			 (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
 			 (unsigned long long)le64_to_cpu(di->i_blkno));
-		return -EINVAL;
 	}
 
 	max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
 	if (le16_to_cpu(gd->bg_bits) > max_bits) {
-		do_error("Group descriptor #%llu has bit count of %u",
+		do_error("Group descriptor #%llu has bit count of %u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_bits));
-		return -EINVAL;
 	}
 
 	/* In resize, we may meet the case bg_chain == cl_next_free_rec. */
@@ -254,10 +240,9 @@
 	     le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
 	    ((le16_to_cpu(gd->bg_chain) ==
 	     le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
-		do_error("Group descriptor #%llu has bad chain %u",
+		do_error("Group descriptor #%llu has bad chain %u\n",
 			 (unsigned long long)bh->b_blocknr,
 			 le16_to_cpu(gd->bg_chain));
-		return -EINVAL;
 	}
 
 	return 0;
@@ -384,11 +369,10 @@
 	struct super_block * sb = alloc_inode->i_sb;
 
 	if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
-		ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
-			    "b_blocknr (%llu)",
-			    (unsigned long long)group_blkno,
-			    (unsigned long long) bg_bh->b_blocknr);
-		status = -EIO;
+		status = ocfs2_error(alloc_inode->i_sb,
+				     "group block (%llu) != b_blocknr (%llu)\n",
+				     (unsigned long long)group_blkno,
+				     (unsigned long long) bg_bh->b_blocknr);
 		goto bail;
 	}
 
@@ -834,9 +818,9 @@
 	BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
 
 	if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
-		ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
-			    (unsigned long long)le64_to_cpu(fe->i_blkno));
-		status = -EIO;
+		status = ocfs2_error(alloc_inode->i_sb,
+				     "Invalid chain allocator %llu\n",
+				     (unsigned long long)le64_to_cpu(fe->i_blkno));
 		goto bail;
 	}
 
@@ -1370,12 +1354,11 @@
 
 	le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
 	if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
-		ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
-			    " count %u but claims %u are freed. num_bits %d",
-			    (unsigned long long)le64_to_cpu(bg->bg_blkno),
-			    le16_to_cpu(bg->bg_bits),
-			    le16_to_cpu(bg->bg_free_bits_count), num_bits);
-		return -EROFS;
+		return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
+				   (unsigned long long)le64_to_cpu(bg->bg_blkno),
+				   le16_to_cpu(bg->bg_bits),
+				   le16_to_cpu(bg->bg_free_bits_count),
+				   num_bits);
 	}
 	while(num_bits--)
 		ocfs2_set_bit(bit_off++, bitmap);
@@ -1905,13 +1888,11 @@
 
 	if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
 	    le32_to_cpu(fe->id1.bitmap1.i_total)) {
-		ocfs2_error(ac->ac_inode->i_sb,
-			    "Chain allocator dinode %llu has %u used "
-			    "bits but only %u total.",
-			    (unsigned long long)le64_to_cpu(fe->i_blkno),
-			    le32_to_cpu(fe->id1.bitmap1.i_used),
-			    le32_to_cpu(fe->id1.bitmap1.i_total));
-		status = -EIO;
+		status = ocfs2_error(ac->ac_inode->i_sb,
+				     "Chain allocator dinode %llu has %u used bits but only %u total\n",
+				     (unsigned long long)le64_to_cpu(fe->i_blkno),
+				     le32_to_cpu(fe->id1.bitmap1.i_used),
+				     le32_to_cpu(fe->id1.bitmap1.i_total));
 		goto bail;
 	}
 
@@ -2429,12 +2410,11 @@
 	}
 	le16_add_cpu(&bg->bg_free_bits_count, num_bits);
 	if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
-		ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
-			    " count %u but claims %u are freed. num_bits %d",
-			    (unsigned long long)le64_to_cpu(bg->bg_blkno),
-			    le16_to_cpu(bg->bg_bits),
-			    le16_to_cpu(bg->bg_free_bits_count), num_bits);
-		return -EROFS;
+		return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
+				   (unsigned long long)le64_to_cpu(bg->bg_blkno),
+				   le16_to_cpu(bg->bg_bits),
+				   le16_to_cpu(bg->bg_free_bits_count),
+				   num_bits);
 	}
 
 	if (undo_fn)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 403c566..2de4c8a 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -192,6 +192,7 @@
 	Opt_resv_level,
 	Opt_dir_resv_level,
 	Opt_journal_async_commit,
+	Opt_err_cont,
 	Opt_err,
 };
 
@@ -224,6 +225,7 @@
 	{Opt_resv_level, "resv_level=%u"},
 	{Opt_dir_resv_level, "dir_resv_level=%u"},
 	{Opt_journal_async_commit, "journal_async_commit"},
+	{Opt_err_cont, "errors=continue"},
 	{Opt_err, NULL}
 };
 
@@ -1330,10 +1332,19 @@
 			mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
 			break;
 		case Opt_err_panic:
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
 			mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
 			break;
 		case Opt_err_ro:
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
 			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
+			mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS;
+			break;
+		case Opt_err_cont:
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
+			mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
+			mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT;
 			break;
 		case Opt_data_ordered:
 			mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
@@ -1530,6 +1541,8 @@
 
 	if (opts & OCFS2_MOUNT_ERRORS_PANIC)
 		seq_printf(s, ",errors=panic");
+	else if (opts & OCFS2_MOUNT_ERRORS_CONT)
+		seq_printf(s, ",errors=continue");
 	else
 		seq_printf(s, ",errors=remount-ro");
 
@@ -1550,8 +1563,8 @@
 		seq_printf(s, ",localflocks,");
 
 	if (osb->osb_cluster_stack[0])
-		seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN,
-			   osb->osb_cluster_stack);
+		seq_show_option_n(s, "cluster_stack", osb->osb_cluster_stack,
+				  OCFS2_STACK_LABEL_LEN);
 	if (opts & OCFS2_MOUNT_USRQUOTA)
 		seq_printf(s, ",usrquota");
 	if (opts & OCFS2_MOUNT_GRPQUOTA)
@@ -1746,8 +1759,6 @@
 	ocfs2_lock_res_init_once(&oi->ip_inode_lockres);
 	ocfs2_lock_res_init_once(&oi->ip_open_lockres);
 
-	init_waitqueue_head(&oi->append_dio_wq);
-
 	ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode),
 				  &ocfs2_inode_caching_ops);
 
@@ -2541,31 +2552,43 @@
 	memset(osb, 0, sizeof(struct ocfs2_super));
 }
 
-/* Put OCFS2 into a readonly state, or (if the user specifies it),
- * panic(). We do not support continue-on-error operation. */
-static void ocfs2_handle_error(struct super_block *sb)
+/* Depending on the mount option passed, perform one of the following:
+ * Put OCFS2 into a readonly state (default)
+ * Return EIO so that only the process errs
+ * Fix the error as if fsck.ocfs2 -y
+ * panic
+ */
+static int ocfs2_handle_error(struct super_block *sb)
 {
 	struct ocfs2_super *osb = OCFS2_SB(sb);
-
-	if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC)
-		panic("OCFS2: (device %s): panic forced after error\n",
-		      sb->s_id);
+	int rv = 0;
 
 	ocfs2_set_osb_flag(osb, OCFS2_OSB_ERROR_FS);
+	pr_crit("On-disk corruption discovered. "
+		"Please run fsck.ocfs2 once the filesystem is unmounted.\n");
 
-	if (sb->s_flags & MS_RDONLY &&
-	    (ocfs2_is_soft_readonly(osb) ||
-	     ocfs2_is_hard_readonly(osb)))
-		return;
+	if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC) {
+		panic("OCFS2: (device %s): panic forced after error\n",
+		      sb->s_id);
+	} else if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_CONT) {
+		pr_crit("OCFS2: Returning error to the calling process.\n");
+		rv = -EIO;
+	} else { /* default option */
+		rv = -EROFS;
+		if (sb->s_flags & MS_RDONLY &&
+				(ocfs2_is_soft_readonly(osb) ||
+				 ocfs2_is_hard_readonly(osb)))
+			return rv;
 
-	printk(KERN_CRIT "File system is now read-only due to the potential "
-	       "of on-disk corruption. Please run fsck.ocfs2 once the file "
-	       "system is unmounted.\n");
-	sb->s_flags |= MS_RDONLY;
-	ocfs2_set_ro_flag(osb, 0);
+		pr_crit("OCFS2: File system is now read-only.\n");
+		sb->s_flags |= MS_RDONLY;
+		ocfs2_set_ro_flag(osb, 0);
+	}
+
+	return rv;
 }
 
-void __ocfs2_error(struct super_block *sb, const char *function,
+int __ocfs2_error(struct super_block *sb, const char *function,
 		  const char *fmt, ...)
 {
 	struct va_format vaf;
@@ -2577,12 +2600,12 @@
 
 	/* Not using mlog here because we want to show the actual
 	 * function the error came from. */
-	printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV\n",
+	printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %pV",
 	       sb->s_id, function, &vaf);
 
 	va_end(args);
 
-	ocfs2_handle_error(sb);
+	return ocfs2_handle_error(sb);
 }
 
 /* Handle critical errors. This is intentionally more drastic than
@@ -2599,7 +2622,7 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV\n",
+	printk(KERN_CRIT "OCFS2: abort (device %s): %s: %pV",
 	       sb->s_id, function, &vaf);
 
 	va_end(args);
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h
index 74ff74c..b477d0b 100644
--- a/fs/ocfs2/super.h
+++ b/fs/ocfs2/super.h
@@ -32,16 +32,18 @@
 				  int node_num);
 
 __printf(3, 4)
-void __ocfs2_error(struct super_block *sb, const char *function,
+int __ocfs2_error(struct super_block *sb, const char *function,
 		   const char *fmt, ...);
 
-#define ocfs2_error(sb, fmt, args...) __ocfs2_error(sb, __PRETTY_FUNCTION__, fmt, ##args)
+#define ocfs2_error(sb, fmt, ...)					\
+	__ocfs2_error(sb, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__)
 
 __printf(3, 4)
 void __ocfs2_abort(struct super_block *sb, const char *function,
 		   const char *fmt, ...);
 
-#define ocfs2_abort(sb, fmt, args...) __ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##args)
+#define ocfs2_abort(sb, fmt, ...)					\
+	__ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__)
 
 /*
  * Void signal blockers, because in-kernel sigprocmask() only fails
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 889f379..ebfdea7 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -499,30 +499,24 @@
 	 */
 
 	if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) {
-		ocfs2_error(sb,
-			    "Extended attribute block #%llu has bad "
-			    "signature %.*s",
-			    (unsigned long long)bh->b_blocknr, 7,
-			    xb->xb_signature);
-		return -EINVAL;
+		return ocfs2_error(sb,
+				   "Extended attribute block #%llu has bad signature %.*s\n",
+				   (unsigned long long)bh->b_blocknr, 7,
+				   xb->xb_signature);
 	}
 
 	if (le64_to_cpu(xb->xb_blkno) != bh->b_blocknr) {
-		ocfs2_error(sb,
-			    "Extended attribute block #%llu has an "
-			    "invalid xb_blkno of %llu",
-			    (unsigned long long)bh->b_blocknr,
-			    (unsigned long long)le64_to_cpu(xb->xb_blkno));
-		return -EINVAL;
+		return ocfs2_error(sb,
+				   "Extended attribute block #%llu has an invalid xb_blkno of %llu\n",
+				   (unsigned long long)bh->b_blocknr,
+				   (unsigned long long)le64_to_cpu(xb->xb_blkno));
 	}
 
 	if (le32_to_cpu(xb->xb_fs_generation) != OCFS2_SB(sb)->fs_generation) {
-		ocfs2_error(sb,
-			    "Extended attribute block #%llu has an invalid "
-			    "xb_fs_generation of #%u",
-			    (unsigned long long)bh->b_blocknr,
-			    le32_to_cpu(xb->xb_fs_generation));
-		return -EINVAL;
+		return ocfs2_error(sb,
+				   "Extended attribute block #%llu has an invalid xb_fs_generation of #%u\n",
+				   (unsigned long long)bh->b_blocknr,
+				   le32_to_cpu(xb->xb_fs_generation));
 	}
 
 	return 0;
@@ -3694,11 +3688,10 @@
 		el = &eb->h_list;
 
 		if (el->l_tree_depth) {
-			ocfs2_error(inode->i_sb,
-				    "Inode %lu has non zero tree depth in "
-				    "xattr tree block %llu\n", inode->i_ino,
-				    (unsigned long long)eb_bh->b_blocknr);
-			ret = -EROFS;
+			ret = ocfs2_error(inode->i_sb,
+					  "Inode %lu has non zero tree depth in xattr tree block %llu\n",
+					  inode->i_ino,
+					  (unsigned long long)eb_bh->b_blocknr);
 			goto out;
 		}
 	}
@@ -3713,11 +3706,10 @@
 	}
 
 	if (!e_blkno) {
-		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
-			    "record (%u, %u, 0) in xattr", inode->i_ino,
-			    le32_to_cpu(rec->e_cpos),
-			    ocfs2_rec_clusters(el, rec));
-		ret = -EROFS;
+		ret = ocfs2_error(inode->i_sb, "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
+				  inode->i_ino,
+				  le32_to_cpu(rec->e_cpos),
+				  ocfs2_rec_clusters(el, rec));
 		goto out;
 	}
 
@@ -7334,6 +7326,9 @@
 	const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
 	const size_t total_len = prefix_len + name_len + 1;
 
+	if (!capable(CAP_SYS_ADMIN))
+		return 0;
+
 	if (list && total_len <= list_size) {
 		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
 		memcpy(list + prefix_len, name, name_len);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 7466ff3..79073d6 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -588,10 +588,10 @@
 	struct super_block *sb = dentry->d_sb;
 	struct ovl_fs *ufs = sb->s_fs_info;
 
-	seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
+	seq_show_option(m, "lowerdir", ufs->config.lowerdir);
 	if (ufs->config.upperdir) {
-		seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
-		seq_printf(m, ",workdir=%s", ufs->config.workdir);
+		seq_show_option(m, "upperdir", ufs->config.upperdir);
+		seq_show_option(m, "workdir", ufs->config.workdir);
 	}
 	return 0;
 }
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ce065cf..f60f012 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -308,7 +308,8 @@
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
 {
 	const struct cred *cred;
-	kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset;
+	kernel_cap_t cap_inheritable, cap_permitted, cap_effective,
+			cap_bset, cap_ambient;
 
 	rcu_read_lock();
 	cred = __task_cred(p);
@@ -316,12 +317,14 @@
 	cap_permitted	= cred->cap_permitted;
 	cap_effective	= cred->cap_effective;
 	cap_bset	= cred->cap_bset;
+	cap_ambient	= cred->cap_ambient;
 	rcu_read_unlock();
 
 	render_cap_t(m, "CapInh:\t", &cap_inheritable);
 	render_cap_t(m, "CapPrm:\t", &cap_permitted);
 	render_cap_t(m, "CapEff:\t", &cap_effective);
 	render_cap_t(m, "CapBnd:\t", &cap_bset);
+	render_cap_t(m, "CapAmb:\t", &cap_ambient);
 }
 
 static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ca1e091..3b4d825 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -597,6 +597,8 @@
 		[ilog2(VM_HUGEPAGE)]	= "hg",
 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
 		[ilog2(VM_MERGEABLE)]	= "mg",
+		[ilog2(VM_UFFD_MISSING)]= "um",
+		[ilog2(VM_UFFD_WP)]	= "uw",
 	};
 	size_t i;
 
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 20d1f74..fed66e2c9 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -247,7 +247,7 @@
 EXPORT_SYMBOL(dqstats);
 
 static qsize_t inode_get_rsv_space(struct inode *inode);
-static void __dquot_initialize(struct inode *inode, int type);
+static int __dquot_initialize(struct inode *inode, int type);
 
 static inline unsigned int
 hashfn(const struct super_block *sb, struct kqid qid)
@@ -832,16 +832,17 @@
 struct dquot *dqget(struct super_block *sb, struct kqid qid)
 {
 	unsigned int hashent = hashfn(sb, qid);
-	struct dquot *dquot = NULL, *empty = NULL;
+	struct dquot *dquot, *empty = NULL;
 
         if (!sb_has_quota_active(sb, qid.type))
-		return NULL;
+		return ERR_PTR(-ESRCH);
 we_slept:
 	spin_lock(&dq_list_lock);
 	spin_lock(&dq_state_lock);
 	if (!sb_has_quota_active(sb, qid.type)) {
 		spin_unlock(&dq_state_lock);
 		spin_unlock(&dq_list_lock);
+		dquot = ERR_PTR(-ESRCH);
 		goto out;
 	}
 	spin_unlock(&dq_state_lock);
@@ -876,11 +877,15 @@
 	 * already finished or it will be canceled due to dq_count > 1 test */
 	wait_on_dquot(dquot);
 	/* Read the dquot / allocate space in quota file */
-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
-	    sb->dq_op->acquire_dquot(dquot) < 0) {
-		dqput(dquot);
-		dquot = NULL;
-		goto out;
+	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+		int err;
+
+		err = sb->dq_op->acquire_dquot(dquot);
+		if (err < 0) {
+			dqput(dquot);
+			dquot = ERR_PTR(err);
+			goto out;
+		}
 	}
 #ifdef CONFIG_QUOTA_DEBUG
 	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
@@ -1390,15 +1395,16 @@
  * It is better to call this function outside of any transaction as it
  * might need a lot of space in journal for dquot structure allocation.
  */
-static void __dquot_initialize(struct inode *inode, int type)
+static int __dquot_initialize(struct inode *inode, int type)
 {
 	int cnt, init_needed = 0;
 	struct dquot **dquots, *got[MAXQUOTAS];
 	struct super_block *sb = inode->i_sb;
 	qsize_t rsv;
+	int ret = 0;
 
 	if (!dquot_active(inode))
-		return;
+		return 0;
 
 	dquots = i_dquot(inode);
 
@@ -1407,6 +1413,7 @@
 		struct kqid qid;
 		kprojid_t projid;
 		int rc;
+		struct dquot *dquot;
 
 		got[cnt] = NULL;
 		if (type != -1 && cnt != type)
@@ -1438,16 +1445,25 @@
 			qid = make_kqid_projid(projid);
 			break;
 		}
-		got[cnt] = dqget(sb, qid);
+		dquot = dqget(sb, qid);
+		if (IS_ERR(dquot)) {
+			/* We raced with somebody turning quotas off... */
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		got[cnt] = dquot;
 	}
 
 	/* All required i_dquot has been initialized */
 	if (!init_needed)
-		return;
+		return 0;
 
 	spin_lock(&dq_data_lock);
 	if (IS_NOQUOTA(inode))
-		goto out_err;
+		goto out_lock;
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (type != -1 && cnt != type)
 			continue;
@@ -1469,15 +1485,18 @@
 				dquot_resv_space(dquots[cnt], rsv);
 		}
 	}
-out_err:
+out_lock:
 	spin_unlock(&dq_data_lock);
+out_put:
 	/* Drop unused references */
 	dqput_all(got);
+
+	return ret;
 }
 
-void dquot_initialize(struct inode *inode)
+int dquot_initialize(struct inode *inode)
 {
-	__dquot_initialize(inode, -1);
+	return __dquot_initialize(inode, -1);
 }
 EXPORT_SYMBOL(dquot_initialize);
 
@@ -1961,18 +1980,37 @@
 int dquot_transfer(struct inode *inode, struct iattr *iattr)
 {
 	struct dquot *transfer_to[MAXQUOTAS] = {};
+	struct dquot *dquot;
 	struct super_block *sb = inode->i_sb;
 	int ret;
 
 	if (!dquot_active(inode))
 		return 0;
 
-	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
-		transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
-	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
-		transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
-
+	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
+		dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
+		if (IS_ERR(dquot)) {
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		transfer_to[USRQUOTA] = dquot;
+	}
+	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
+		dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
+		if (IS_ERR(dquot)) {
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		transfer_to[GRPQUOTA] = dquot;
+	}
 	ret = __dquot_transfer(inode, transfer_to);
+out_put:
 	dqput_all(transfer_to);
 	return ret;
 }
@@ -2518,8 +2556,8 @@
 	struct dquot *dquot;
 
 	dquot = dqget(sb, qid);
-	if (!dquot)
-		return -ESRCH;
+	if (IS_ERR(dquot))
+		return PTR_ERR(dquot);
 	do_get_dqblk(dquot, di);
 	dqput(dquot);
 
@@ -2631,8 +2669,8 @@
 	int rc;
 
 	dquot = dqget(sb, qid);
-	if (!dquot) {
-		rc = -ESRCH;
+	if (IS_ERR(dquot)) {
+		rc = PTR_ERR(dquot);
 		goto out;
 	}
 	rc = do_set_dqblk(dquot, di);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 86ded73..3746367 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -141,9 +141,9 @@
 	if (tstate->flags & QCI_ROOT_SQUASH)
 		uinfo.dqi_flags |= DQF_ROOT_SQUASH;
 	uinfo.dqi_valid = IIF_ALL;
-	if (!ret && copy_to_user(addr, &uinfo, sizeof(uinfo)))
+	if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
 		return -EFAULT;
-	return ret;
+	return 0;
 }
 
 static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index f6f2fba..3d8e7e6 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3319,8 +3319,11 @@
 	/* must be turned off for recursive notify_change calls */
 	ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	reiserfs_write_lock(inode->i_sb);
 	if (attr->ia_valid & ATTR_SIZE) {
 		/*
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index b55a074..5f1c9c2 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -613,8 +613,7 @@
 	 * we have to set uid and gid here
 	 */
 	inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
-	return 0;
+	return dquot_initialize(inode);
 }
 
 static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
@@ -633,12 +632,18 @@
 	struct reiserfs_transaction_handle th;
 	struct reiserfs_security_handle security;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -710,12 +715,18 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -787,7 +798,9 @@
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
 		 REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
 	/*
@@ -800,7 +813,11 @@
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -899,7 +916,9 @@
 	    JOURNAL_PER_BALANCE_CNT * 2 + 2 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	reiserfs_write_lock(dir->i_sb);
 	retval = journal_begin(&th, dir->i_sb, jbegin_count);
@@ -985,7 +1004,9 @@
 	int jbegin_count;
 	unsigned long savelink;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	inode = d_inode(dentry);
 
@@ -1095,12 +1116,18 @@
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
 		 REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb));
 
-	dquot_initialize(parent_dir);
+	retval = dquot_initialize(parent_dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(parent_dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, parent_dir, mode);
+	retval = new_inode_init(inode, parent_dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	retval = reiserfs_security_init(parent_dir, inode, &dentry->d_name,
 					&security);
@@ -1184,7 +1211,9 @@
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	reiserfs_write_lock(dir->i_sb);
 	if (inode->i_nlink >= REISERFS_LINK_MAX) {
@@ -1308,8 +1337,12 @@
 	    JOURNAL_PER_BALANCE_CNT * 3 + 5 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	retval = dquot_initialize(old_dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new_dir);
+	if (retval)
+		return retval;
 
 	old_inode = d_inode(old_dentry);
 	new_dentry_inode = d_inode(new_dentry);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 0e4cf72..4a62fe8 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -714,18 +714,20 @@
 		seq_puts(seq, ",acl");
 
 	if (REISERFS_SB(s)->s_jdev)
-		seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
+		seq_show_option(seq, "jdev", REISERFS_SB(s)->s_jdev);
 
 	if (journal->j_max_commit_age != journal->j_default_max_commit_age)
 		seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
 
 #ifdef CONFIG_QUOTA
 	if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
+		seq_show_option(seq, "usrjquota",
+				REISERFS_SB(s)->s_qf_names[USRQUOTA]);
 	else if (opts & (1 << REISERFS_USRQUOTA))
 		seq_puts(seq, ",usrquota");
 	if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
+		seq_show_option(seq, "grpjquota",
+				REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
 	else if (opts & (1 << REISERFS_GRPQUOTA))
 		seq_puts(seq, ",grpquota");
 	if (REISERFS_SB(s)->s_jquota_fmt) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b96f190..81155b9 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2070,6 +2070,7 @@
 	struct udf_options uopt;
 	struct kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
+	bool lvid_open = false;
 
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
 	uopt.uid = INVALID_UID;
@@ -2216,8 +2217,10 @@
 			 le16_to_cpu(ts.year), ts.month, ts.day,
 			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
 	}
-	if (!(sb->s_flags & MS_RDONLY))
+	if (!(sb->s_flags & MS_RDONLY)) {
 		udf_open_lvid(sb);
+		lvid_open = true;
+	}
 
 	/* Assign the root inode */
 	/* assign inodes by physical block number */
@@ -2248,7 +2251,7 @@
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
 		unload_nls(sbi->s_nls_map);
 #endif
-	if (!(sb->s_flags & MS_RDONLY))
+	if (lvid_open)
 		udf_close_lvid(sb);
 	brelse(sbi->s_lvid_bh);
 	udf_sb_free_partitions(sb);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
new file mode 100644
index 0000000..634e676
--- /dev/null
+++ b/fs/userfaultfd.c
@@ -0,0 +1,1330 @@
+/*
+ *  fs/userfaultfd.c
+ *
+ *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
+ *  Copyright (C) 2008-2009 Red Hat, Inc.
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ *
+ *  Some part derived from fs/eventfd.c (anon inode setup) and
+ *  mm/ksm.c (mm hashing).
+ */
+
+#include <linux/hashtable.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/bug.h>
+#include <linux/anon_inodes.h>
+#include <linux/syscalls.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/mempolicy.h>
+#include <linux/ioctl.h>
+#include <linux/security.h>
+
+static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
+
+enum userfaultfd_state {
+	UFFD_STATE_WAIT_API,
+	UFFD_STATE_RUNNING,
+};
+
+/*
+ * Start with fault_pending_wqh and fault_wqh so they're more likely
+ * to be in the same cacheline.
+ */
+struct userfaultfd_ctx {
+	/* waitqueue head for the pending (i.e. not read) userfaults */
+	wait_queue_head_t fault_pending_wqh;
+	/* waitqueue head for the userfaults */
+	wait_queue_head_t fault_wqh;
+	/* waitqueue head for the pseudo fd to wakeup poll/read */
+	wait_queue_head_t fd_wqh;
+	/* a refile sequence protected by fault_pending_wqh lock */
+	struct seqcount refile_seq;
+	/* pseudo fd refcounting */
+	atomic_t refcount;
+	/* userfaultfd syscall flags */
+	unsigned int flags;
+	/* state machine */
+	enum userfaultfd_state state;
+	/* released */
+	bool released;
+	/* mm with one ore more vmas attached to this userfaultfd_ctx */
+	struct mm_struct *mm;
+};
+
+struct userfaultfd_wait_queue {
+	struct uffd_msg msg;
+	wait_queue_t wq;
+	struct userfaultfd_ctx *ctx;
+};
+
+struct userfaultfd_wake_range {
+	unsigned long start;
+	unsigned long len;
+};
+
+static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
+				     int wake_flags, void *key)
+{
+	struct userfaultfd_wake_range *range = key;
+	int ret;
+	struct userfaultfd_wait_queue *uwq;
+	unsigned long start, len;
+
+	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+	ret = 0;
+	/* len == 0 means wake all */
+	start = range->start;
+	len = range->len;
+	if (len && (start > uwq->msg.arg.pagefault.address ||
+		    start + len <= uwq->msg.arg.pagefault.address))
+		goto out;
+	ret = wake_up_state(wq->private, mode);
+	if (ret)
+		/*
+		 * Wake only once, autoremove behavior.
+		 *
+		 * After the effect of list_del_init is visible to the
+		 * other CPUs, the waitqueue may disappear from under
+		 * us, see the !list_empty_careful() in
+		 * handle_userfault(). try_to_wake_up() has an
+		 * implicit smp_mb__before_spinlock, and the
+		 * wq->private is read before calling the extern
+		 * function "wake_up_state" (which in turns calls
+		 * try_to_wake_up). While the spin_lock;spin_unlock;
+		 * wouldn't be enough, the smp_mb__before_spinlock is
+		 * enough to avoid an explicit smp_mb() here.
+		 */
+		list_del_init(&wq->task_list);
+out:
+	return ret;
+}
+
+/**
+ * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
+ * context.
+ * @ctx: [in] Pointer to the userfaultfd context.
+ *
+ * Returns: In case of success, returns not zero.
+ */
+static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
+{
+	if (!atomic_inc_not_zero(&ctx->refcount))
+		BUG();
+}
+
+/**
+ * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
+ * context.
+ * @ctx: [in] Pointer to userfaultfd context.
+ *
+ * The userfaultfd context reference must have been previously acquired either
+ * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
+ */
+static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
+{
+	if (atomic_dec_and_test(&ctx->refcount)) {
+		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
+		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
+		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
+		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
+		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
+		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
+		mmput(ctx->mm);
+		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+	}
+}
+
+static inline void msg_init(struct uffd_msg *msg)
+{
+	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
+	/*
+	 * Must use memset to zero out the paddings or kernel data is
+	 * leaked to userland.
+	 */
+	memset(msg, 0, sizeof(struct uffd_msg));
+}
+
+static inline struct uffd_msg userfault_msg(unsigned long address,
+					    unsigned int flags,
+					    unsigned long reason)
+{
+	struct uffd_msg msg;
+	msg_init(&msg);
+	msg.event = UFFD_EVENT_PAGEFAULT;
+	msg.arg.pagefault.address = address;
+	if (flags & FAULT_FLAG_WRITE)
+		/*
+		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WRITE was set in the
+		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
+		 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
+		 * was a read fault, otherwise if set it means it's
+		 * a write fault.
+		 */
+		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
+	if (reason & VM_UFFD_WP)
+		/*
+		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
+		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
+		 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
+		 * a missing fault, otherwise if set it means it's a
+		 * write protect fault.
+		 */
+		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
+	return msg;
+}
+
+/*
+ * Verify the pagetables are still not ok after having reigstered into
+ * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
+ * userfault that has already been resolved, if userfaultfd_read and
+ * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
+ * threads.
+ */
+static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
+					 unsigned long address,
+					 unsigned long flags,
+					 unsigned long reason)
+{
+	struct mm_struct *mm = ctx->mm;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd, _pmd;
+	pte_t *pte;
+	bool ret = true;
+
+	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+	pmd = pmd_offset(pud, address);
+	/*
+	 * READ_ONCE must function as a barrier with narrower scope
+	 * and it must be equivalent to:
+	 *	_pmd = *pmd; barrier();
+	 *
+	 * This is to deal with the instability (as in
+	 * pmd_trans_unstable) of the pmd.
+	 */
+	_pmd = READ_ONCE(*pmd);
+	if (!pmd_present(_pmd))
+		goto out;
+
+	ret = false;
+	if (pmd_trans_huge(_pmd))
+		goto out;
+
+	/*
+	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
+	 * and use the standard pte_offset_map() instead of parsing _pmd.
+	 */
+	pte = pte_offset_map(pmd, address);
+	/*
+	 * Lockless access: we're in a wait_event so it's ok if it
+	 * changes under us.
+	 */
+	if (pte_none(*pte))
+		ret = true;
+	pte_unmap(pte);
+
+out:
+	return ret;
+}
+
+/*
+ * The locking rules involved in returning VM_FAULT_RETRY depending on
+ * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
+ * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
+ * recommendation in __lock_page_or_retry is not an understatement.
+ *
+ * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
+ * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
+ * not set.
+ *
+ * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
+ * set, VM_FAULT_RETRY can still be returned if and only if there are
+ * fatal_signal_pending()s, and the mmap_sem must be released before
+ * returning it.
+ */
+int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+		     unsigned int flags, unsigned long reason)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	struct userfaultfd_ctx *ctx;
+	struct userfaultfd_wait_queue uwq;
+	int ret;
+	bool must_wait, return_to_userland;
+
+	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+
+	ret = VM_FAULT_SIGBUS;
+	ctx = vma->vm_userfaultfd_ctx.ctx;
+	if (!ctx)
+		goto out;
+
+	BUG_ON(ctx->mm != mm);
+
+	VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
+	VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
+
+	/*
+	 * If it's already released don't get it. This avoids to loop
+	 * in __get_user_pages if userfaultfd_release waits on the
+	 * caller of handle_userfault to release the mmap_sem.
+	 */
+	if (unlikely(ACCESS_ONCE(ctx->released)))
+		goto out;
+
+	/*
+	 * Check that we can return VM_FAULT_RETRY.
+	 *
+	 * NOTE: it should become possible to return VM_FAULT_RETRY
+	 * even if FAULT_FLAG_TRIED is set without leading to gup()
+	 * -EBUSY failures, if the userfaultfd is to be extended for
+	 * VM_UFFD_WP tracking and we intend to arm the userfault
+	 * without first stopping userland access to the memory. For
+	 * VM_UFFD_MISSING userfaults this is enough for now.
+	 */
+	if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) {
+		/*
+		 * Validate the invariant that nowait must allow retry
+		 * to be sure not to return SIGBUS erroneously on
+		 * nowait invocations.
+		 */
+		BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT);
+#ifdef CONFIG_DEBUG_VM
+		if (printk_ratelimit()) {
+			printk(KERN_WARNING
+			       "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags);
+			dump_stack();
+		}
+#endif
+		goto out;
+	}
+
+	/*
+	 * Handle nowait, not much to do other than tell it to retry
+	 * and wait.
+	 */
+	ret = VM_FAULT_RETRY;
+	if (flags & FAULT_FLAG_RETRY_NOWAIT)
+		goto out;
+
+	/* take the reference before dropping the mmap_sem */
+	userfaultfd_ctx_get(ctx);
+
+	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
+	uwq.wq.private = current;
+	uwq.msg = userfault_msg(address, flags, reason);
+	uwq.ctx = ctx;
+
+	return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
+		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	/*
+	 * After the __add_wait_queue the uwq is visible to userland
+	 * through poll/read().
+	 */
+	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
+	/*
+	 * The smp_mb() after __set_current_state prevents the reads
+	 * following the spin_unlock to happen before the list_add in
+	 * __add_wait_queue.
+	 */
+	set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
+			  TASK_KILLABLE);
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+
+	must_wait = userfaultfd_must_wait(ctx, address, flags, reason);
+	up_read(&mm->mmap_sem);
+
+	if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
+		   (return_to_userland ? !signal_pending(current) :
+		    !fatal_signal_pending(current)))) {
+		wake_up_poll(&ctx->fd_wqh, POLLIN);
+		schedule();
+		ret |= VM_FAULT_MAJOR;
+	}
+
+	__set_current_state(TASK_RUNNING);
+
+	if (return_to_userland) {
+		if (signal_pending(current) &&
+		    !fatal_signal_pending(current)) {
+			/*
+			 * If we got a SIGSTOP or SIGCONT and this is
+			 * a normal userland page fault, just let
+			 * userland return so the signal will be
+			 * handled and gdb debugging works.  The page
+			 * fault code immediately after we return from
+			 * this function is going to release the
+			 * mmap_sem and it's not depending on it
+			 * (unlike gup would if we were not to return
+			 * VM_FAULT_RETRY).
+			 *
+			 * If a fatal signal is pending we still take
+			 * the streamlined VM_FAULT_RETRY failure path
+			 * and there's no need to retake the mmap_sem
+			 * in such case.
+			 */
+			down_read(&mm->mmap_sem);
+			ret = 0;
+		}
+	}
+
+	/*
+	 * Here we race with the list_del; list_add in
+	 * userfaultfd_ctx_read(), however because we don't ever run
+	 * list_del_init() to refile across the two lists, the prev
+	 * and next pointers will never point to self. list_add also
+	 * would never let any of the two pointers to point to
+	 * self. So list_empty_careful won't risk to see both pointers
+	 * pointing to self at any time during the list refile. The
+	 * only case where list_del_init() is called is the full
+	 * removal in the wake function and there we don't re-list_add
+	 * and it's fine not to block on the spinlock. The uwq on this
+	 * kernel stack can be released after the list_del_init.
+	 */
+	if (!list_empty_careful(&uwq.wq.task_list)) {
+		spin_lock(&ctx->fault_pending_wqh.lock);
+		/*
+		 * No need of list_del_init(), the uwq on the stack
+		 * will be freed shortly anyway.
+		 */
+		list_del(&uwq.wq.task_list);
+		spin_unlock(&ctx->fault_pending_wqh.lock);
+	}
+
+	/*
+	 * ctx may go away after this if the userfault pseudo fd is
+	 * already released.
+	 */
+	userfaultfd_ctx_put(ctx);
+
+out:
+	return ret;
+}
+
+static int userfaultfd_release(struct inode *inode, struct file *file)
+{
+	struct userfaultfd_ctx *ctx = file->private_data;
+	struct mm_struct *mm = ctx->mm;
+	struct vm_area_struct *vma, *prev;
+	/* len == 0 means wake all */
+	struct userfaultfd_wake_range range = { .len = 0, };
+	unsigned long new_flags;
+
+	ACCESS_ONCE(ctx->released) = true;
+
+	/*
+	 * Flush page faults out of all CPUs. NOTE: all page faults
+	 * must be retried without returning VM_FAULT_SIGBUS if
+	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
+	 * changes while handle_userfault released the mmap_sem. So
+	 * it's critical that released is set to true (above), before
+	 * taking the mmap_sem for writing.
+	 */
+	down_write(&mm->mmap_sem);
+	prev = NULL;
+	for (vma = mm->mmap; vma; vma = vma->vm_next) {
+		cond_resched();
+		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
+		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
+		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
+			prev = vma;
+			continue;
+		}
+		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
+		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+				 new_flags, vma->anon_vma,
+				 vma->vm_file, vma->vm_pgoff,
+				 vma_policy(vma),
+				 NULL_VM_UFFD_CTX);
+		if (prev)
+			vma = prev;
+		else
+			prev = vma;
+		vma->vm_flags = new_flags;
+		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+	}
+	up_write(&mm->mmap_sem);
+
+	/*
+	 * After no new page faults can wait on this fault_*wqh, flush
+	 * the last page faults that may have been already waiting on
+	 * the fault_*wqh.
+	 */
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range);
+	__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range);
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+
+	wake_up_poll(&ctx->fd_wqh, POLLHUP);
+	userfaultfd_ctx_put(ctx);
+	return 0;
+}
+
+/* fault_pending_wqh.lock must be hold by the caller */
+static inline struct userfaultfd_wait_queue *find_userfault(
+	struct userfaultfd_ctx *ctx)
+{
+	wait_queue_t *wq;
+	struct userfaultfd_wait_queue *uwq;
+
+	VM_BUG_ON(!spin_is_locked(&ctx->fault_pending_wqh.lock));
+
+	uwq = NULL;
+	if (!waitqueue_active(&ctx->fault_pending_wqh))
+		goto out;
+	/* walk in reverse to provide FIFO behavior to read userfaults */
+	wq = list_last_entry(&ctx->fault_pending_wqh.task_list,
+			     typeof(*wq), task_list);
+	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+out:
+	return uwq;
+}
+
+static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
+{
+	struct userfaultfd_ctx *ctx = file->private_data;
+	unsigned int ret;
+
+	poll_wait(file, &ctx->fd_wqh, wait);
+
+	switch (ctx->state) {
+	case UFFD_STATE_WAIT_API:
+		return POLLERR;
+	case UFFD_STATE_RUNNING:
+		/*
+		 * poll() never guarantees that read won't block.
+		 * userfaults can be waken before they're read().
+		 */
+		if (unlikely(!(file->f_flags & O_NONBLOCK)))
+			return POLLERR;
+		/*
+		 * lockless access to see if there are pending faults
+		 * __pollwait last action is the add_wait_queue but
+		 * the spin_unlock would allow the waitqueue_active to
+		 * pass above the actual list_add inside
+		 * add_wait_queue critical section. So use a full
+		 * memory barrier to serialize the list_add write of
+		 * add_wait_queue() with the waitqueue_active read
+		 * below.
+		 */
+		ret = 0;
+		smp_mb();
+		if (waitqueue_active(&ctx->fault_pending_wqh))
+			ret = POLLIN;
+		return ret;
+	default:
+		BUG();
+	}
+}
+
+static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
+				    struct uffd_msg *msg)
+{
+	ssize_t ret;
+	DECLARE_WAITQUEUE(wait, current);
+	struct userfaultfd_wait_queue *uwq;
+
+	/* always take the fd_wqh lock before the fault_pending_wqh lock */
+	spin_lock(&ctx->fd_wqh.lock);
+	__add_wait_queue(&ctx->fd_wqh, &wait);
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock(&ctx->fault_pending_wqh.lock);
+		uwq = find_userfault(ctx);
+		if (uwq) {
+			/*
+			 * Use a seqcount to repeat the lockless check
+			 * in wake_userfault() to avoid missing
+			 * wakeups because during the refile both
+			 * waitqueue could become empty if this is the
+			 * only userfault.
+			 */
+			write_seqcount_begin(&ctx->refile_seq);
+
+			/*
+			 * The fault_pending_wqh.lock prevents the uwq
+			 * to disappear from under us.
+			 *
+			 * Refile this userfault from
+			 * fault_pending_wqh to fault_wqh, it's not
+			 * pending anymore after we read it.
+			 *
+			 * Use list_del() by hand (as
+			 * userfaultfd_wake_function also uses
+			 * list_del_init() by hand) to be sure nobody
+			 * changes __remove_wait_queue() to use
+			 * list_del_init() in turn breaking the
+			 * !list_empty_careful() check in
+			 * handle_userfault(). The uwq->wq.task_list
+			 * must never be empty at any time during the
+			 * refile, or the waitqueue could disappear
+			 * from under us. The "wait_queue_head_t"
+			 * parameter of __remove_wait_queue() is unused
+			 * anyway.
+			 */
+			list_del(&uwq->wq.task_list);
+			__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
+
+			write_seqcount_end(&ctx->refile_seq);
+
+			/* careful to always initialize msg if ret == 0 */
+			*msg = uwq->msg;
+			spin_unlock(&ctx->fault_pending_wqh.lock);
+			ret = 0;
+			break;
+		}
+		spin_unlock(&ctx->fault_pending_wqh.lock);
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+		if (no_wait) {
+			ret = -EAGAIN;
+			break;
+		}
+		spin_unlock(&ctx->fd_wqh.lock);
+		schedule();
+		spin_lock(&ctx->fd_wqh.lock);
+	}
+	__remove_wait_queue(&ctx->fd_wqh, &wait);
+	__set_current_state(TASK_RUNNING);
+	spin_unlock(&ctx->fd_wqh.lock);
+
+	return ret;
+}
+
+static ssize_t userfaultfd_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct userfaultfd_ctx *ctx = file->private_data;
+	ssize_t _ret, ret = 0;
+	struct uffd_msg msg;
+	int no_wait = file->f_flags & O_NONBLOCK;
+
+	if (ctx->state == UFFD_STATE_WAIT_API)
+		return -EINVAL;
+
+	for (;;) {
+		if (count < sizeof(msg))
+			return ret ? ret : -EINVAL;
+		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
+		if (_ret < 0)
+			return ret ? ret : _ret;
+		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
+			return ret ? ret : -EFAULT;
+		ret += sizeof(msg);
+		buf += sizeof(msg);
+		count -= sizeof(msg);
+		/*
+		 * Allow to read more than one fault at time but only
+		 * block if waiting for the very first one.
+		 */
+		no_wait = O_NONBLOCK;
+	}
+}
+
+static void __wake_userfault(struct userfaultfd_ctx *ctx,
+			     struct userfaultfd_wake_range *range)
+{
+	unsigned long start, end;
+
+	start = range->start;
+	end = range->start + range->len;
+
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	/* wake all in the range and autoremove */
+	if (waitqueue_active(&ctx->fault_pending_wqh))
+		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0,
+				     range);
+	if (waitqueue_active(&ctx->fault_wqh))
+		__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range);
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+}
+
+static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
+					   struct userfaultfd_wake_range *range)
+{
+	unsigned seq;
+	bool need_wakeup;
+
+	/*
+	 * To be sure waitqueue_active() is not reordered by the CPU
+	 * before the pagetable update, use an explicit SMP memory
+	 * barrier here. PT lock release or up_read(mmap_sem) still
+	 * have release semantics that can allow the
+	 * waitqueue_active() to be reordered before the pte update.
+	 */
+	smp_mb();
+
+	/*
+	 * Use waitqueue_active because it's very frequent to
+	 * change the address space atomically even if there are no
+	 * userfaults yet. So we take the spinlock only when we're
+	 * sure we've userfaults to wake.
+	 */
+	do {
+		seq = read_seqcount_begin(&ctx->refile_seq);
+		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
+			waitqueue_active(&ctx->fault_wqh);
+		cond_resched();
+	} while (read_seqcount_retry(&ctx->refile_seq, seq));
+	if (need_wakeup)
+		__wake_userfault(ctx, range);
+}
+
+static __always_inline int validate_range(struct mm_struct *mm,
+					  __u64 start, __u64 len)
+{
+	__u64 task_size = mm->task_size;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	if (len & ~PAGE_MASK)
+		return -EINVAL;
+	if (!len)
+		return -EINVAL;
+	if (start < mmap_min_addr)
+		return -EINVAL;
+	if (start >= task_size)
+		return -EINVAL;
+	if (len > task_size - start)
+		return -EINVAL;
+	return 0;
+}
+
+static int userfaultfd_register(struct userfaultfd_ctx *ctx,
+				unsigned long arg)
+{
+	struct mm_struct *mm = ctx->mm;
+	struct vm_area_struct *vma, *prev, *cur;
+	int ret;
+	struct uffdio_register uffdio_register;
+	struct uffdio_register __user *user_uffdio_register;
+	unsigned long vm_flags, new_flags;
+	bool found;
+	unsigned long start, end, vma_end;
+
+	user_uffdio_register = (struct uffdio_register __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_register, user_uffdio_register,
+			   sizeof(uffdio_register)-sizeof(__u64)))
+		goto out;
+
+	ret = -EINVAL;
+	if (!uffdio_register.mode)
+		goto out;
+	if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
+				     UFFDIO_REGISTER_MODE_WP))
+		goto out;
+	vm_flags = 0;
+	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
+		vm_flags |= VM_UFFD_MISSING;
+	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
+		vm_flags |= VM_UFFD_WP;
+		/*
+		 * FIXME: remove the below error constraint by
+		 * implementing the wprotect tracking mode.
+		 */
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = validate_range(mm, uffdio_register.range.start,
+			     uffdio_register.range.len);
+	if (ret)
+		goto out;
+
+	start = uffdio_register.range.start;
+	end = start + uffdio_register.range.len;
+
+	down_write(&mm->mmap_sem);
+	vma = find_vma_prev(mm, start, &prev);
+
+	ret = -ENOMEM;
+	if (!vma)
+		goto out_unlock;
+
+	/* check that there's at least one vma in the range */
+	ret = -EINVAL;
+	if (vma->vm_start >= end)
+		goto out_unlock;
+
+	/*
+	 * Search for not compatible vmas.
+	 *
+	 * FIXME: this shall be relaxed later so that it doesn't fail
+	 * on tmpfs backed vmas (in addition to the current allowance
+	 * on anonymous vmas).
+	 */
+	found = false;
+	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+		cond_resched();
+
+		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
+		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
+
+		/* check not compatible vmas */
+		ret = -EINVAL;
+		if (cur->vm_ops)
+			goto out_unlock;
+
+		/*
+		 * Check that this vma isn't already owned by a
+		 * different userfaultfd. We can't allow more than one
+		 * userfaultfd to own a single vma simultaneously or we
+		 * wouldn't know which one to deliver the userfaults to.
+		 */
+		ret = -EBUSY;
+		if (cur->vm_userfaultfd_ctx.ctx &&
+		    cur->vm_userfaultfd_ctx.ctx != ctx)
+			goto out_unlock;
+
+		found = true;
+	}
+	BUG_ON(!found);
+
+	if (vma->vm_start < start)
+		prev = vma;
+
+	ret = 0;
+	do {
+		cond_resched();
+
+		BUG_ON(vma->vm_ops);
+		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
+		       vma->vm_userfaultfd_ctx.ctx != ctx);
+
+		/*
+		 * Nothing to do: this vma is already registered into this
+		 * userfaultfd and with the right tracking mode too.
+		 */
+		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
+		    (vma->vm_flags & vm_flags) == vm_flags)
+			goto skip;
+
+		if (vma->vm_start > start)
+			start = vma->vm_start;
+		vma_end = min(end, vma->vm_end);
+
+		new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
+		prev = vma_merge(mm, prev, start, vma_end, new_flags,
+				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+				 vma_policy(vma),
+				 ((struct vm_userfaultfd_ctx){ ctx }));
+		if (prev) {
+			vma = prev;
+			goto next;
+		}
+		if (vma->vm_start < start) {
+			ret = split_vma(mm, vma, start, 1);
+			if (ret)
+				break;
+		}
+		if (vma->vm_end > end) {
+			ret = split_vma(mm, vma, end, 0);
+			if (ret)
+				break;
+		}
+	next:
+		/*
+		 * In the vma_merge() successful mprotect-like case 8:
+		 * the next vma was merged into the current one and
+		 * the current one has not been updated yet.
+		 */
+		vma->vm_flags = new_flags;
+		vma->vm_userfaultfd_ctx.ctx = ctx;
+
+	skip:
+		prev = vma;
+		start = vma->vm_end;
+		vma = vma->vm_next;
+	} while (vma && vma->vm_start < end);
+out_unlock:
+	up_write(&mm->mmap_sem);
+	if (!ret) {
+		/*
+		 * Now that we scanned all vmas we can already tell
+		 * userland which ioctls methods are guaranteed to
+		 * succeed on this range.
+		 */
+		if (put_user(UFFD_API_RANGE_IOCTLS,
+			     &user_uffdio_register->ioctls))
+			ret = -EFAULT;
+	}
+out:
+	return ret;
+}
+
+static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+				  unsigned long arg)
+{
+	struct mm_struct *mm = ctx->mm;
+	struct vm_area_struct *vma, *prev, *cur;
+	int ret;
+	struct uffdio_range uffdio_unregister;
+	unsigned long new_flags;
+	bool found;
+	unsigned long start, end, vma_end;
+	const void __user *buf = (void __user *)arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
+		goto out;
+
+	ret = validate_range(mm, uffdio_unregister.start,
+			     uffdio_unregister.len);
+	if (ret)
+		goto out;
+
+	start = uffdio_unregister.start;
+	end = start + uffdio_unregister.len;
+
+	down_write(&mm->mmap_sem);
+	vma = find_vma_prev(mm, start, &prev);
+
+	ret = -ENOMEM;
+	if (!vma)
+		goto out_unlock;
+
+	/* check that there's at least one vma in the range */
+	ret = -EINVAL;
+	if (vma->vm_start >= end)
+		goto out_unlock;
+
+	/*
+	 * Search for not compatible vmas.
+	 *
+	 * FIXME: this shall be relaxed later so that it doesn't fail
+	 * on tmpfs backed vmas (in addition to the current allowance
+	 * on anonymous vmas).
+	 */
+	found = false;
+	ret = -EINVAL;
+	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+		cond_resched();
+
+		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
+		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
+
+		/*
+		 * Check not compatible vmas, not strictly required
+		 * here as not compatible vmas cannot have an
+		 * userfaultfd_ctx registered on them, but this
+		 * provides for more strict behavior to notice
+		 * unregistration errors.
+		 */
+		if (cur->vm_ops)
+			goto out_unlock;
+
+		found = true;
+	}
+	BUG_ON(!found);
+
+	if (vma->vm_start < start)
+		prev = vma;
+
+	ret = 0;
+	do {
+		cond_resched();
+
+		BUG_ON(vma->vm_ops);
+
+		/*
+		 * Nothing to do: this vma is already registered into this
+		 * userfaultfd and with the right tracking mode too.
+		 */
+		if (!vma->vm_userfaultfd_ctx.ctx)
+			goto skip;
+
+		if (vma->vm_start > start)
+			start = vma->vm_start;
+		vma_end = min(end, vma->vm_end);
+
+		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
+		prev = vma_merge(mm, prev, start, vma_end, new_flags,
+				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+				 vma_policy(vma),
+				 NULL_VM_UFFD_CTX);
+		if (prev) {
+			vma = prev;
+			goto next;
+		}
+		if (vma->vm_start < start) {
+			ret = split_vma(mm, vma, start, 1);
+			if (ret)
+				break;
+		}
+		if (vma->vm_end > end) {
+			ret = split_vma(mm, vma, end, 0);
+			if (ret)
+				break;
+		}
+	next:
+		/*
+		 * In the vma_merge() successful mprotect-like case 8:
+		 * the next vma was merged into the current one and
+		 * the current one has not been updated yet.
+		 */
+		vma->vm_flags = new_flags;
+		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+
+	skip:
+		prev = vma;
+		start = vma->vm_end;
+		vma = vma->vm_next;
+	} while (vma && vma->vm_start < end);
+out_unlock:
+	up_write(&mm->mmap_sem);
+out:
+	return ret;
+}
+
+/*
+ * userfaultfd_wake may be used in combination with the
+ * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
+ */
+static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
+			    unsigned long arg)
+{
+	int ret;
+	struct uffdio_range uffdio_wake;
+	struct userfaultfd_wake_range range;
+	const void __user *buf = (void __user *)arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
+	if (ret)
+		goto out;
+
+	range.start = uffdio_wake.start;
+	range.len = uffdio_wake.len;
+
+	/*
+	 * len == 0 means wake all and we don't want to wake all here,
+	 * so check it again to be sure.
+	 */
+	VM_BUG_ON(!range.len);
+
+	wake_userfault(ctx, &range);
+	ret = 0;
+
+out:
+	return ret;
+}
+
+static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
+			    unsigned long arg)
+{
+	__s64 ret;
+	struct uffdio_copy uffdio_copy;
+	struct uffdio_copy __user *user_uffdio_copy;
+	struct userfaultfd_wake_range range;
+
+	user_uffdio_copy = (struct uffdio_copy __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
+			   /* don't copy "copy" last field */
+			   sizeof(uffdio_copy)-sizeof(__s64)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
+	if (ret)
+		goto out;
+	/*
+	 * double check for wraparound just in case. copy_from_user()
+	 * will later check uffdio_copy.src + uffdio_copy.len to fit
+	 * in the userland range.
+	 */
+	ret = -EINVAL;
+	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
+		goto out;
+	if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
+		goto out;
+
+	ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
+			   uffdio_copy.len);
+	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
+		return -EFAULT;
+	if (ret < 0)
+		goto out;
+	BUG_ON(!ret);
+	/* len == 0 would wake all */
+	range.len = ret;
+	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
+		range.start = uffdio_copy.dst;
+		wake_userfault(ctx, &range);
+	}
+	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
+out:
+	return ret;
+}
+
+static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
+				unsigned long arg)
+{
+	__s64 ret;
+	struct uffdio_zeropage uffdio_zeropage;
+	struct uffdio_zeropage __user *user_uffdio_zeropage;
+	struct userfaultfd_wake_range range;
+
+	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
+			   /* don't copy "zeropage" last field */
+			   sizeof(uffdio_zeropage)-sizeof(__s64)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
+			     uffdio_zeropage.range.len);
+	if (ret)
+		goto out;
+	ret = -EINVAL;
+	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
+		goto out;
+
+	ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
+			     uffdio_zeropage.range.len);
+	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
+		return -EFAULT;
+	if (ret < 0)
+		goto out;
+	/* len == 0 would wake all */
+	BUG_ON(!ret);
+	range.len = ret;
+	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
+		range.start = uffdio_zeropage.range.start;
+		wake_userfault(ctx, &range);
+	}
+	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
+out:
+	return ret;
+}
+
+/*
+ * userland asks for a certain API version and we return which bits
+ * and ioctl commands are implemented in this kernel for such API
+ * version or -EINVAL if unknown.
+ */
+static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+			   unsigned long arg)
+{
+	struct uffdio_api uffdio_api;
+	void __user *buf = (void __user *)arg;
+	int ret;
+
+	ret = -EINVAL;
+	if (ctx->state != UFFD_STATE_WAIT_API)
+		goto out;
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
+		goto out;
+	if (uffdio_api.api != UFFD_API || uffdio_api.features) {
+		memset(&uffdio_api, 0, sizeof(uffdio_api));
+		if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+			goto out;
+		ret = -EINVAL;
+		goto out;
+	}
+	uffdio_api.features = UFFD_API_FEATURES;
+	uffdio_api.ioctls = UFFD_API_IOCTLS;
+	ret = -EFAULT;
+	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+		goto out;
+	ctx->state = UFFD_STATE_RUNNING;
+	ret = 0;
+out:
+	return ret;
+}
+
+static long userfaultfd_ioctl(struct file *file, unsigned cmd,
+			      unsigned long arg)
+{
+	int ret = -EINVAL;
+	struct userfaultfd_ctx *ctx = file->private_data;
+
+	if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
+		return -EINVAL;
+
+	switch(cmd) {
+	case UFFDIO_API:
+		ret = userfaultfd_api(ctx, arg);
+		break;
+	case UFFDIO_REGISTER:
+		ret = userfaultfd_register(ctx, arg);
+		break;
+	case UFFDIO_UNREGISTER:
+		ret = userfaultfd_unregister(ctx, arg);
+		break;
+	case UFFDIO_WAKE:
+		ret = userfaultfd_wake(ctx, arg);
+		break;
+	case UFFDIO_COPY:
+		ret = userfaultfd_copy(ctx, arg);
+		break;
+	case UFFDIO_ZEROPAGE:
+		ret = userfaultfd_zeropage(ctx, arg);
+		break;
+	}
+	return ret;
+}
+
+#ifdef CONFIG_PROC_FS
+static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
+{
+	struct userfaultfd_ctx *ctx = f->private_data;
+	wait_queue_t *wq;
+	struct userfaultfd_wait_queue *uwq;
+	unsigned long pending = 0, total = 0;
+
+	spin_lock(&ctx->fault_pending_wqh.lock);
+	list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
+		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+		pending++;
+		total++;
+	}
+	list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
+		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
+		total++;
+	}
+	spin_unlock(&ctx->fault_pending_wqh.lock);
+
+	/*
+	 * If more protocols will be added, there will be all shown
+	 * separated by a space. Like this:
+	 *	protocols: aa:... bb:...
+	 */
+	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
+		   pending, total, UFFD_API, UFFD_API_FEATURES,
+		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
+}
+#endif
+
+static const struct file_operations userfaultfd_fops = {
+#ifdef CONFIG_PROC_FS
+	.show_fdinfo	= userfaultfd_show_fdinfo,
+#endif
+	.release	= userfaultfd_release,
+	.poll		= userfaultfd_poll,
+	.read		= userfaultfd_read,
+	.unlocked_ioctl = userfaultfd_ioctl,
+	.compat_ioctl	= userfaultfd_ioctl,
+	.llseek		= noop_llseek,
+};
+
+static void init_once_userfaultfd_ctx(void *mem)
+{
+	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
+
+	init_waitqueue_head(&ctx->fault_pending_wqh);
+	init_waitqueue_head(&ctx->fault_wqh);
+	init_waitqueue_head(&ctx->fd_wqh);
+	seqcount_init(&ctx->refile_seq);
+}
+
+/**
+ * userfaultfd_file_create - Creates an userfaultfd file pointer.
+ * @flags: Flags for the userfaultfd file.
+ *
+ * This function creates an userfaultfd file pointer, w/out installing
+ * it into the fd table. This is useful when the userfaultfd file is
+ * used during the initialization of data structures that require
+ * extra setup after the userfaultfd creation. So the userfaultfd
+ * creation is split into the file pointer creation phase, and the
+ * file descriptor installation phase.  In this way races with
+ * userspace closing the newly installed file descriptor can be
+ * avoided.  Returns an userfaultfd file pointer, or a proper error
+ * pointer.
+ */
+static struct file *userfaultfd_file_create(int flags)
+{
+	struct file *file;
+	struct userfaultfd_ctx *ctx;
+
+	BUG_ON(!current->mm);
+
+	/* Check the UFFD_* constants for consistency.  */
+	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
+	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
+
+	file = ERR_PTR(-EINVAL);
+	if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
+		goto out;
+
+	file = ERR_PTR(-ENOMEM);
+	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
+	if (!ctx)
+		goto out;
+
+	atomic_set(&ctx->refcount, 1);
+	ctx->flags = flags;
+	ctx->state = UFFD_STATE_WAIT_API;
+	ctx->released = false;
+	ctx->mm = current->mm;
+	/* prevent the mm struct to be freed */
+	atomic_inc(&ctx->mm->mm_users);
+
+	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
+				  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
+	if (IS_ERR(file))
+		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+out:
+	return file;
+}
+
+SYSCALL_DEFINE1(userfaultfd, int, flags)
+{
+	int fd, error;
+	struct file *file;
+
+	error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
+	if (error < 0)
+		return error;
+	fd = error;
+
+	file = userfaultfd_file_create(flags);
+	if (IS_ERR(file)) {
+		error = PTR_ERR(file);
+		goto err_put_unused_fd;
+	}
+	fd_install(fd, file);
+
+	return fd;
+
+err_put_unused_fd:
+	put_unused_fd(fd);
+
+	return error;
+}
+
+static int __init userfaultfd_init(void)
+{
+	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
+						sizeof(struct userfaultfd_ctx),
+						0,
+						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+						init_once_userfaultfd_ctx);
+	return 0;
+}
+__initcall(userfaultfd_init);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 1fb16562..bbd9b1f 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -511,9 +511,9 @@
 		seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
 
 	if (mp->m_logname)
-		seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
+		seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname);
 	if (mp->m_rtname)
-		seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
+		seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname);
 
 	if (mp->m_dalign > 0)
 		seq_printf(m, "," MNTOPT_SUNIT "=%d",
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf0..a94cbeb 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -23,236 +23,159 @@
 typedef atomic64_t atomic_long_t;
 
 #define ATOMIC_LONG_INIT(i)	ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x)	atomic64 ## x
 
-static inline long atomic_long_read(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
-	(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
-	(atomic64_xchg((atomic64_t *)(v), (new)))
-
-#else  /*  BITS_PER_LONG == 64  */
+#else
 
 typedef atomic_t atomic_long_t;
 
 #define ATOMIC_LONG_INIT(i)	ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
+#define ATOMIC_LONG_PFX(x)	atomic ## x
 
-	return (long)atomic_read(v);
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo)						\
+static inline long atomic_long_read##mo(atomic_long_t *l)		\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	return (long)ATOMIC_LONG_PFX(_read##mo)(v);			\
 }
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
 
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
-	atomic_t *v = (atomic_t *)l;
+#undef ATOMIC_LONG_READ_OP
 
-	atomic_set(v, i);
+#define ATOMIC_LONG_SET_OP(mo)						\
+static inline void atomic_long_set##mo(atomic_long_t *l, long i)	\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	ATOMIC_LONG_PFX(_set##mo)(v, i);				\
 }
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo)					\
+static inline long							\
+atomic_long_##op##_return##mo(long i, atomic_long_t *l)			\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v);		\
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_xchg_relaxed(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+	(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
 
 static inline void atomic_long_inc(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_inc(v);
+	ATOMIC_LONG_PFX(_inc)(v);
 }
 
 static inline void atomic_long_dec(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_dec(v);
+	ATOMIC_LONG_PFX(_dec)(v);
 }
 
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_add(i, v);
+	ATOMIC_LONG_PFX(_add)(i, v);
 }
 
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_sub(i, v);
+	ATOMIC_LONG_PFX(_sub)(i, v);
 }
 
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_sub_and_test(i, v);
+	return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
 }
 
 static inline int atomic_long_dec_and_test(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_dec_and_test(v);
+	return ATOMIC_LONG_PFX(_dec_and_test)(v);
 }
 
 static inline int atomic_long_inc_and_test(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_inc_and_test(v);
+	return ATOMIC_LONG_PFX(_inc_and_test)(v);
 }
 
 static inline int atomic_long_add_negative(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
-
-	return (long)atomic_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
-
-	return (long)atomic_sub_return(i, v);
+	return ATOMIC_LONG_PFX(_add_negative)(i, v);
 }
 
 static inline long atomic_long_inc_return(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_inc_return(v);
+	return (long)ATOMIC_LONG_PFX(_inc_return)(v);
 }
 
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_dec_return(v);
+	return (long)ATOMIC_LONG_PFX(_dec_return)(v);
 }
 
 static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_add_unless(v, a, u);
+	return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
 }
 
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
-	(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
-	(atomic_xchg((atomic_t *)(v), (new)))
-
-#endif  /*  BITS_PER_LONG == 64  */
+#define atomic_long_inc_not_zero(l) \
+	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
 
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1973ad2..d4d7e33 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@
 ATOMIC_OP_RETURN(sub, -)
 #endif
 
-#ifndef atomic_clear_mask
+#ifndef atomic_and
 ATOMIC_OP(and, &)
-#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
 #endif
 
-#ifndef atomic_set_mask
-#define CONFIG_ARCH_HAS_ATOMIC_OR
+#ifndef atomic_or
 ATOMIC_OP(or, |)
-#define atomic_set_mask(i, v)	atomic_or((i), (v))
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
 #endif
 
 #undef ATOMIC_OP_RETURN
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 30ad9c8..d48e78c 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
 
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 55e3abc..b42afad 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -108,12 +108,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54..54a8e65 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
 /*
  * External function declarations
  */
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
 {
 	return !(atomic_read(&lock->cnts) & _QW_WMASK);
 }
 
 /**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
 {
 	return !atomic_read(&lock->cnts);
 }
 
 /**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
 {
 	u32 cnts;
 
 	cnts = atomic_read(&lock->cnts);
 	if (likely(!(cnts & _QW_WMASK))) {
-		cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 		if (likely(!(cnts & _QW_WMASK)))
 			return 1;
 		atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@
 }
 
 /**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
 {
 	u32 cnts;
 
@@ -89,78 +89,70 @@
 	if (unlikely(cnts))
 		return 0;
 
-	return likely(atomic_cmpxchg(&lock->cnts,
-				     cnts, cnts | _QW_LOCKED) == cnts);
+	return likely(atomic_cmpxchg_acquire(&lock->cnts,
+					     cnts, cnts | _QW_LOCKED) == cnts);
 }
 /**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
  */
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
 {
 	u32 cnts;
 
-	cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 	if (likely(!(cnts & _QW_WMASK)))
 		return;
 
 	/* The slowpath will decrement the reader count, if necessary. */
-	queue_read_lock_slowpath(lock);
+	queued_read_lock_slowpath(lock, cnts);
 }
 
 /**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
 {
 	/* Optimize for the unfair lock case where the fair flag is 0. */
-	if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+	if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
 		return;
 
-	queue_write_lock_slowpath(lock);
+	queued_write_lock_slowpath(lock);
 }
 
 /**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_read_unlock(struct qrwlock *lock)
+static inline void queued_read_unlock(struct qrwlock *lock)
 {
 	/*
 	 * Atomically decrement the reader count
 	 */
-	smp_mb__before_atomic();
-	atomic_sub(_QR_BIAS, &lock->cnts);
+	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
 }
 
-#ifndef queue_write_unlock
 /**
- * queue_write_unlock - release write lock of a queue rwlock
+ * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_unlock(struct qrwlock *lock)
+static inline void queued_write_unlock(struct qrwlock *lock)
 {
-	/*
-	 * If the writer field is atomic, it can be cleared directly.
-	 * Otherwise, an atomic subtraction will be used to clear it.
-	 */
-	smp_mb__before_atomic();
-	atomic_sub(_QW_LOCKED, &lock->cnts);
+	smp_store_release((u8 *)&lock->cnts, 0);
 }
-#endif
 
 /*
  * Remapping rwlock architecture specific functions to the corresponding
  * queue rwlock functions.
  */
-#define arch_read_can_lock(l)	queue_read_can_lock(l)
-#define arch_write_can_lock(l)	queue_write_can_lock(l)
-#define arch_read_lock(l)	queue_read_lock(l)
-#define arch_write_lock(l)	queue_write_lock(l)
-#define arch_read_trylock(l)	queue_read_trylock(l)
-#define arch_write_trylock(l)	queue_write_trylock(l)
-#define arch_read_unlock(l)	queue_read_unlock(l)
-#define arch_write_unlock(l)	queue_write_unlock(l)
+#define arch_read_can_lock(l)	queued_read_can_lock(l)
+#define arch_write_can_lock(l)	queued_write_can_lock(l)
+#define arch_read_lock(l)	queued_read_lock(l)
+#define arch_write_lock(l)	queued_write_lock(l)
+#define arch_read_trylock(l)	queued_read_trylock(l)
+#define arch_write_trylock(l)	queued_write_trylock(l)
+#define arch_read_unlock(l)	queued_read_unlock(l)
+#define arch_write_unlock(l)	queued_write_unlock(l)
 
 #endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 96670e7..35f99b6 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,14 +25,6 @@
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 
-static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
-					struct scatterlist *sg2)
-{
-	sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
-	sg1[num - 1].page_link &= ~0x02;
-	sg1[num - 1].page_link |= 0x01;
-}
-
 static inline void scatterwalk_crypto_chain(struct scatterlist *head,
 					    struct scatterlist *sg,
 					    int chain, int num)
@@ -43,7 +35,7 @@
 	}
 
 	if (sg)
-		scatterwalk_sg_chain(head, num, sg);
+		sg_chain(head, num, sg);
 	else
 		sg_mark_end(head);
 }
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index de13bfc..bae79f3 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -12,6 +12,8 @@
 
 #include <drm/drmP.h>
 
+struct dw_hdmi;
+
 enum {
 	DW_HDMI_RES_8,
 	DW_HDMI_RES_10,
@@ -59,4 +61,9 @@
 		 void *data, struct drm_encoder *encoder,
 		 struct resource *iores, int irq,
 		 const struct dw_hdmi_plat_data *plat_data);
+
+void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
+void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
+void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+
 #endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 5aa5197..8b5ce7c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -137,17 +137,18 @@
 /*@{*/
 
 /* driver capabilities and requirements mask */
-#define DRIVER_USE_AGP     0x1
-#define DRIVER_PCI_DMA     0x8
-#define DRIVER_SG          0x10
-#define DRIVER_HAVE_DMA    0x20
-#define DRIVER_HAVE_IRQ    0x40
-#define DRIVER_IRQ_SHARED  0x80
-#define DRIVER_GEM         0x1000
-#define DRIVER_MODESET     0x2000
-#define DRIVER_PRIME       0x4000
-#define DRIVER_RENDER      0x8000
-#define DRIVER_ATOMIC      0x10000
+#define DRIVER_USE_AGP			0x1
+#define DRIVER_PCI_DMA			0x8
+#define DRIVER_SG			0x10
+#define DRIVER_HAVE_DMA			0x20
+#define DRIVER_HAVE_IRQ			0x40
+#define DRIVER_IRQ_SHARED		0x80
+#define DRIVER_GEM			0x1000
+#define DRIVER_MODESET			0x2000
+#define DRIVER_PRIME			0x4000
+#define DRIVER_RENDER			0x8000
+#define DRIVER_ATOMIC			0x10000
+#define DRIVER_KMS_LEGACY_CONTEXT	0x20000
 
 /***********************************************************************/
 /** \name Macros to make printk easier */
@@ -675,13 +676,12 @@
 
 	/* currently active master for this node. Protected by master_mutex */
 	struct drm_master *master;
-	struct drm_mode_group mode_group;
 };
 
 
 struct drm_pending_vblank_event {
 	struct drm_pending_event base;
-	int pipe;
+	unsigned int pipe;
 	struct drm_event_vblank event;
 };
 
@@ -700,7 +700,7 @@
 					/* for wraparound handling */
 	u32 last_wait;			/* Last vblank seqno waited per CRTC */
 	unsigned int inmodeset;		/* Display driver is setting mode */
-	int crtc;			/* crtc index */
+	unsigned int pipe;		/* crtc index */
 	bool enabled;			/* so we don't call enable more than
 					   once per disable */
 };
@@ -887,6 +887,7 @@
 /*@{*/
 
 				/* Driver support (drm_drv.h) */
+extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
 extern long drm_ioctl(struct file *filp,
 		      unsigned int cmd, unsigned long arg);
 extern long drm_compat_ioctl(struct file *filp,
@@ -920,34 +921,34 @@
 extern int drm_irq_install(struct drm_device *dev, int irq);
 extern int drm_irq_uninstall(struct drm_device *dev);
 
-extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
 			   struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
 extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
 				     struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
-				     struct drm_pending_vblank_event *e);
+extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+				  struct drm_pending_vblank_event *e);
 extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
 				       struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
 extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, int crtc);
-extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
 extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
 extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
-extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
 extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
 
 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-						 int crtc, int *max_error,
+						 unsigned int pipe, int *max_error,
 						 struct timeval *vblank_time,
 						 unsigned flags,
 						 const struct drm_crtc *refcrtc,
@@ -968,8 +969,8 @@
 }
 
 /* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
 
 				/* Stub support (drm_stub.h) */
 extern struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a3a913..e67aeac 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -166,7 +166,8 @@
 static inline bool
 drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
 {
-	return state->mode_changed || state->active_changed;
+	return state->mode_changed || state->active_changed ||
+	       state->connectors_changed;
 }
 
 
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index cc1fee8..11266d1 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -87,8 +87,8 @@
 				struct drm_framebuffer *fb,
 				struct drm_pending_vblank_event *event,
 				uint32_t flags);
-void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
-				      int mode);
+int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+				     int mode);
 
 /* default implementations for state handling */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3b4d8a4..faaeff7 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -255,12 +255,13 @@
  * @crtc: backpointer to the CRTC
  * @enable: whether the CRTC should be enabled, gates all other state
  * @active: whether the CRTC is actively displaying (used for DPMS)
- * @mode_changed: for use by helpers and drivers when computing state updates
- * @active_changed: for use by helpers and drivers when computing state updates
+ * @planes_changed: planes on this crtc are updated
+ * @mode_changed: crtc_state->mode or crtc_state->enable has been changed
+ * @active_changed: crtc_state->active has been toggled.
+ * @connectors_changed: connectors to this crtc have been updated
  * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
  * @last_vblank_count: for helpers and drivers to capture the vblank of the
  * 	update to ensure framebuffer cleanup isn't done too early
- * @planes_changed: for use by helpers and drivers when computing state updates
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
  * @mode: current mode timings
  * @event: optional pointer to a DRM event to signal upon completion of the
@@ -283,6 +284,7 @@
 	bool planes_changed : 1;
 	bool mode_changed : 1;
 	bool active_changed : 1;
+	bool connectors_changed : 1;
 
 	/* attached planes bitmask:
 	 * WARNING: transitional helpers do not maintain plane_mask so
@@ -525,7 +527,7 @@
  * etc.
  */
 struct drm_connector_funcs {
-	void (*dpms)(struct drm_connector *connector, int mode);
+	int (*dpms)(struct drm_connector *connector, int mode);
 	void (*save)(struct drm_connector *connector);
 	void (*restore)(struct drm_connector *connector);
 	void (*reset)(struct drm_connector *connector);
@@ -861,7 +863,7 @@
 
 	uint32_t possible_crtcs;
 	uint32_t *format_types;
-	uint32_t format_count;
+	unsigned int format_count;
 	bool format_default;
 
 	struct drm_crtc *crtc;
@@ -1016,29 +1018,6 @@
 };
 
 /**
- * struct drm_mode_group - group of mode setting resources for potential sub-grouping
- * @num_crtcs: CRTC count
- * @num_encoders: encoder count
- * @num_connectors: connector count
- * @num_bridges: bridge count
- * @id_list: list of KMS object IDs in this group
- *
- * Currently this simply tracks the global mode setting state.  But in the
- * future it could allow groups of objects to be set aside into independent
- * control groups for use by different user level processes (e.g. two X servers
- * running simultaneously on different heads, each with their own mode
- * configuration and freedom of mode setting).
- */
-struct drm_mode_group {
-	uint32_t num_crtcs;
-	uint32_t num_encoders;
-	uint32_t num_connectors;
-
-	/* list of object IDs for this group */
-	uint32_t *id_list;
-};
-
-/**
  * struct drm_mode_config - Mode configuration control structure
  * @mutex: mutex protecting KMS related lists and structures
  * @connection_mutex: ww mutex protecting connector state and routing
@@ -1289,13 +1268,13 @@
 				    unsigned long possible_crtcs,
 				    const struct drm_plane_funcs *funcs,
 				    const uint32_t *formats,
-				    uint32_t format_count,
+				    unsigned int format_count,
 				    enum drm_plane_type type);
 extern int drm_plane_init(struct drm_device *dev,
 			  struct drm_plane *plane,
 			  unsigned long possible_crtcs,
 			  const struct drm_plane_funcs *funcs,
-			  const uint32_t *formats, uint32_t format_count,
+			  const uint32_t *formats, unsigned int format_count,
 			  bool is_primary);
 extern void drm_plane_cleanup(struct drm_plane *plane);
 extern unsigned int drm_plane_index(struct drm_plane *plane);
@@ -1322,9 +1301,6 @@
 extern void drm_fb_release(struct drm_file *file_priv);
 extern void drm_property_destroy_user_blobs(struct drm_device *dev,
                                             struct drm_file *file_priv);
-extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
-extern void drm_mode_group_destroy(struct drm_mode_group *group);
-extern void drm_reinit_primary_mode_group(struct drm_device *dev);
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
 				 struct i2c_adapter *adapter);
@@ -1577,8 +1553,45 @@
 }
 
 /* Plane list iterator for legacy (overlay only) planes. */
-#define drm_for_each_legacy_plane(plane, planelist) \
-	list_for_each_entry(plane, planelist, head) \
+#define drm_for_each_legacy_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \
 		if (plane->type == DRM_PLANE_TYPE_OVERLAY)
 
+#define drm_for_each_plane(plane, dev) \
+	list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+
+#define drm_for_each_crtc(crtc, dev) \
+	list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+static inline void
+assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
+{
+	/*
+	 * The connector hotadd/remove code currently grabs both locks when
+	 * updating lists. Hence readers need only hold either of them to be
+	 * safe and the check amounts to
+	 *
+	 * WARN_ON(not_holding(A) && not_holding(B)).
+	 */
+	WARN_ON(!mutex_is_locked(&mode_config->mutex) &&
+		!drm_modeset_is_locked(&mode_config->connection_mutex));
+}
+
+#define drm_for_each_connector(connector, dev) \
+	for (assert_drm_connector_list_read_locked(&(dev)->mode_config),	\
+	     connector = list_first_entry(&(dev)->mode_config.connector_list,	\
+					  struct drm_connector, head);		\
+	     &connector->head != (&(dev)->mode_config.connector_list);		\
+	     connector = list_next_entry(connector, head))
+
+#define drm_for_each_encoder(encoder, dev) \
+	list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head)
+
+#define drm_for_each_fb(fb, dev) \
+	for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)),		\
+	     fb = list_first_entry(&(dev)->mode_config.fb_list,	\
+					  struct drm_framebuffer, head);	\
+	     &fb->head != (&(dev)->mode_config.fb_list);			\
+	     fb = list_next_entry(fb, head))
+
 #endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 918aa68..2a747a9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -108,8 +108,10 @@
 	/* atomic helpers */
 	int (*atomic_check)(struct drm_crtc *crtc,
 			    struct drm_crtc_state *state);
-	void (*atomic_begin)(struct drm_crtc *crtc);
-	void (*atomic_flush)(struct drm_crtc *crtc);
+	void (*atomic_begin)(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state);
+	void (*atomic_flush)(struct drm_crtc *crtc,
+			     struct drm_crtc_state *old_crtc_state);
 };
 
 /**
@@ -190,7 +192,7 @@
 extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
 extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
-extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
 extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
 
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2e86f64..499e9f6 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -420,7 +420,7 @@
 
 #define DP_TEST_SINK_MISC		    0x246
 # define DP_TEST_CRC_SUPPORTED		    (1 << 5)
-# define DP_TEST_COUNT_MASK		    0x7
+# define DP_TEST_COUNT_MASK		    0xf
 
 #define DP_TEST_RESPONSE		    0x260
 # define DP_TEST_ACK			    (1 << 0)
@@ -578,6 +578,7 @@
 u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
 					  int lane);
 
+#define DP_BRANCH_OUI_HEADER_SIZE	0xc
 #define DP_RECEIVER_CAP_SIZE		0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE	2
 
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 0dfd94def..dbab462 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -122,6 +122,7 @@
 	bool delayed_hotplug;
 };
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
 			   const struct drm_fb_helper_funcs *funcs);
 int drm_fb_helper_init(struct drm_device *dev,
@@ -136,11 +137,38 @@
 			    struct fb_info *info);
 
 bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+
+struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
 void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 			    uint32_t depth);
 
+void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
+
+ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
+			       size_t count, loff_t *ppos);
+ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
+				size_t count, loff_t *ppos);
+
+void drm_fb_helper_sys_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect);
+void drm_fb_helper_sys_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area);
+void drm_fb_helper_sys_imageblit(struct fb_info *info,
+				 const struct fb_image *image);
+
+void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+				const struct fb_fillrect *rect);
+void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+				const struct fb_copyarea *area);
+void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+				 const struct fb_image *image);
+
+void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state);
+
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
@@ -158,4 +186,188 @@
 int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 				       struct drm_connector *connector);
+#else
+static inline void drm_fb_helper_prepare(struct drm_device *dev,
+					struct drm_fb_helper *helper,
+					const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline int drm_fb_helper_init(struct drm_device *dev,
+		       struct drm_fb_helper *helper, int crtc_count,
+		       int max_conn)
+{
+	return 0;
+}
+
+static inline void drm_fb_helper_fini(struct drm_fb_helper *helper)
+{
+}
+
+static inline int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+					    struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_set_par(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+					  struct fb_info *info)
+{
+	return 0;
+}
+
+static inline bool
+drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+	return true;
+}
+
+static inline struct fb_info *
+drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
+{
+	return NULL;
+}
+
+static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline void drm_fb_helper_fill_var(struct fb_info *info,
+					  struct drm_fb_helper *fb_helper,
+					  uint32_t fb_width, uint32_t fb_height)
+{
+}
+
+static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+					  uint32_t depth)
+{
+}
+
+static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
+					struct fb_info *info)
+{
+	return 0;
+}
+
+static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
+{
+}
+
+static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
+					     char __user *buf, size_t count,
+					     loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
+					      const char __user *buf,
+					      size_t count, loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
+					      const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
+					      const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
+					       const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
+					      const struct fb_fillrect *rect)
+{
+}
+
+static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
+					      const struct fb_copyarea *area)
+{
+}
+
+static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
+					       const struct fb_image *image)
+{
+}
+
+static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
+					     int state)
+{
+}
+
+static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
+					       int bpp_sel)
+{
+	return 0;
+}
+
+static inline int
+drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+	return 0;
+}
+
+static inline struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+		       int width, int height)
+{
+	return NULL;
+}
+
+static inline struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+		      int width, int height)
+{
+	return NULL;
+}
+
+static inline int
+drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+				struct drm_connector *connector)
+{
+	return 0;
+}
+
+static inline int
+drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+				   struct drm_connector *connector)
+{
+	return 0;
+}
+#endif
 #endif
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 70595ff..5dd18bf 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,7 +130,6 @@
 struct drm_plane;
 
 void drm_modeset_lock_all(struct drm_device *dev);
-int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
 void drm_modeset_unlock_all(struct drm_device *dev);
 void drm_modeset_lock_crtc(struct drm_crtc *crtc,
 			   struct drm_plane *plane);
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 96e1628..dda401b 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -43,9 +43,8 @@
  * planes.
  */
 
-extern int drm_crtc_init(struct drm_device *dev,
-			 struct drm_crtc *crtc,
-			 const struct drm_crtc_funcs *funcs);
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		  const struct drm_crtc_funcs *funcs);
 
 /**
  * drm_plane_helper_funcs - helper operations for CRTCs
@@ -79,26 +78,26 @@
 	plane->helper_private = funcs;
 }
 
-extern int drm_plane_helper_check_update(struct drm_plane *plane,
-					 struct drm_crtc *crtc,
-					 struct drm_framebuffer *fb,
-					 struct drm_rect *src,
-					 struct drm_rect *dest,
-					 const struct drm_rect *clip,
-					 int min_scale,
-					 int max_scale,
-					 bool can_position,
-					 bool can_update_disabled,
-					 bool *visible);
-extern int drm_primary_helper_update(struct drm_plane *plane,
-				     struct drm_crtc *crtc,
-				     struct drm_framebuffer *fb,
-				     int crtc_x, int crtc_y,
-				     unsigned int crtc_w, unsigned int crtc_h,
-				     uint32_t src_x, uint32_t src_y,
-				     uint32_t src_w, uint32_t src_h);
-extern int drm_primary_helper_disable(struct drm_plane *plane);
-extern void drm_primary_helper_destroy(struct drm_plane *plane);
+int drm_plane_helper_check_update(struct drm_plane *plane,
+				  struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  struct drm_rect *src,
+				  struct drm_rect *dest,
+				  const struct drm_rect *clip,
+				  int min_scale,
+				  int max_scale,
+				  bool can_position,
+				  bool can_update_disabled,
+				  bool *visible);
+int drm_primary_helper_update(struct drm_plane *plane,
+			      struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb,
+			      int crtc_x, int crtc_y,
+			      unsigned int crtc_w, unsigned int crtc_h,
+			      uint32_t src_x, uint32_t src_y,
+			      uint32_t src_w, uint32_t src_h);
+int drm_primary_helper_disable(struct drm_plane *plane);
+void drm_primary_helper_destroy(struct drm_plane *plane);
 extern const struct drm_plane_funcs drm_primary_helper_funcs;
 
 int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c9a8b64..b2d56dd 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,6 +34,17 @@
 		void (*codec_wake_override)(struct device *, bool enable);
 		int (*get_cdclk_freq)(struct device *);
 	} *ops;
+
+	const struct i915_audio_component_audio_ops {
+		void *audio_ptr;
+		/**
+		 * Call from i915 driver, notifying the HDA driver that
+		 * pin sense and/or ELD information has changed.
+		 * @audio_ptr:		HDA driver object
+		 * @port:		Which port has changed (PORTA / PORTB / PORTC etc)
+		 */
+		void (*pin_eld_notify)(void *audio_ptr, int port);
+	} *audio_ops;
 };
 
 #endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index b08bdad..9e9bddaa5 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,8 +3,8 @@
 #ifndef _DRM_INTEL_GTT_H
 #define	_DRM_INTEL_GTT_H
 
-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
-		   phys_addr_t *mappable_base, unsigned long *mappable_end);
+void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, u64 *mappable_end);
 
 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 		     struct agp_bridge_data *bridge);
diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h
new file mode 100644
index 0000000..ad9e6ec
--- /dev/null
+++ b/include/dt-bindings/dma/axi-dmac.h
@@ -0,0 +1,48 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__
+#define __DT_BINDINGS_DMA_AXI_DMAC_H__
+
+#define AXI_DMAC_BUS_TYPE_AXI_MM		0
+#define AXI_DMAC_BUS_TYPE_AXI_STREAM	1
+#define AXI_DMAC_BUS_TYPE_FIFO			2
+
+#endif
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
deleted file mode 100644
index df017fd..0000000
--- a/include/dt-bindings/dma/jz4780-dma.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
-#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
-
-/*
- * Request type numbers for the JZ4780 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define JZ4780_DMA_I2S1_TX	0x4
-#define JZ4780_DMA_I2S1_RX	0x5
-#define JZ4780_DMA_I2S0_TX	0x6
-#define JZ4780_DMA_I2S0_RX	0x7
-#define JZ4780_DMA_AUTO		0x8
-#define JZ4780_DMA_SADC_RX	0x9
-#define JZ4780_DMA_UART4_TX	0xc
-#define JZ4780_DMA_UART4_RX	0xd
-#define JZ4780_DMA_UART3_TX	0xe
-#define JZ4780_DMA_UART3_RX	0xf
-#define JZ4780_DMA_UART2_TX	0x10
-#define JZ4780_DMA_UART2_RX	0x11
-#define JZ4780_DMA_UART1_TX	0x12
-#define JZ4780_DMA_UART1_RX	0x13
-#define JZ4780_DMA_UART0_TX	0x14
-#define JZ4780_DMA_UART0_RX	0x15
-#define JZ4780_DMA_SSI0_TX	0x16
-#define JZ4780_DMA_SSI0_RX	0x17
-#define JZ4780_DMA_SSI1_TX	0x18
-#define JZ4780_DMA_SSI1_RX	0x19
-#define JZ4780_DMA_MSC0_TX	0x1a
-#define JZ4780_DMA_MSC0_RX	0x1b
-#define JZ4780_DMA_MSC1_TX	0x1c
-#define JZ4780_DMA_MSC1_RX	0x1d
-#define JZ4780_DMA_MSC2_TX	0x1e
-#define JZ4780_DMA_MSC2_RX	0x1f
-#define JZ4780_DMA_PCM0_TX	0x20
-#define JZ4780_DMA_PCM0_RX	0x21
-#define JZ4780_DMA_SMB0_TX	0x24
-#define JZ4780_DMA_SMB0_RX	0x25
-#define JZ4780_DMA_SMB1_TX	0x26
-#define JZ4780_DMA_SMB1_RX	0x27
-#define JZ4780_DMA_SMB2_TX	0x28
-#define JZ4780_DMA_SMB2_RX	0x29
-#define JZ4780_DMA_SMB3_TX	0x2a
-#define JZ4780_DMA_SMB3_RX	0x2b
-#define JZ4780_DMA_SMB4_TX	0x2c
-#define JZ4780_DMA_SMB4_RX	0x2d
-#define JZ4780_DMA_DES_TX	0x2e
-#define JZ4780_DMA_DES_RX	0x2f
-
-#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
index e3e6c75..d05894a 100644
--- a/include/dt-bindings/mfd/st-lpc.h
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -11,5 +11,6 @@
 
 #define ST_LPC_MODE_RTC		0
 #define ST_LPC_MODE_WDT		1
+#define ST_LPC_MODE_CLKSRC	2
 
 #endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index c102054..a15c170 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -7,6 +7,47 @@
 #define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
 
 /* power-source */
+
+/* Digital Input/Output: level [PM8058] */
+#define PM8058_MPP_VPH			0
+#define PM8058_MPP_S3			1
+#define PM8058_MPP_L2			2
+#define PM8058_MPP_L3			3
+
+/* Digital Input/Output: level [PM8901] */
+#define PM8901_MPP_MSMIO		0
+#define PM8901_MPP_DIG			1
+#define PM8901_MPP_L5			2
+#define PM8901_MPP_S4			3
+#define PM8901_MPP_VPH			4
+
+/* Digital Input/Output: level [PM8921] */
+#define PM8921_MPP_S4			1
+#define PM8921_MPP_L15			3
+#define PM8921_MPP_L17			4
+#define PM8921_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8821] */
+#define PM8821_MPP_1P8			0
+#define PM8821_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8018] */
+#define PM8018_MPP_L4			0
+#define PM8018_MPP_L14			1
+#define PM8018_MPP_S3			2
+#define PM8018_MPP_L6			3
+#define PM8018_MPP_L2			4
+#define PM8018_MPP_L5			5
+#define PM8018_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8038] */
+#define PM8038_MPP_L20			0
+#define PM8038_MPP_L11			1
+#define PM8038_MPP_L5			2
+#define PM8038_MPP_L15			3
+#define PM8038_MPP_L17			4
+#define PM8038_MPP_VPH			7
+
 #define PM8841_MPP_VPH			0
 #define PM8841_MPP_S3			2
 
@@ -37,6 +78,16 @@
 #define PMIC_MPP_AMUX_ROUTE_ABUS3	6
 #define PMIC_MPP_AMUX_ROUTE_ABUS4	7
 
+/* Analog Output: level */
+#define PMIC_MPP_AOUT_LVL_1V25		0
+#define PMIC_MPP_AOUT_LVL_1V25_2	1
+#define PMIC_MPP_AOUT_LVL_0V625		2
+#define PMIC_MPP_AOUT_LVL_0V3125	3
+#define PMIC_MPP_AOUT_LVL_MPP		4
+#define PMIC_MPP_AOUT_LVL_ABUS1		5
+#define PMIC_MPP_AOUT_LVL_ABUS2		6
+#define PMIC_MPP_AOUT_LVL_ABUS3		7
+
 /* To be used with "function" */
 #define PMIC_MPP_FUNC_NORMAL		"normal"
 #define PMIC_MPP_FUNC_PAIRED		"paired"
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a85..00a5763 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
 #ifndef _LINUX_ATOMIC_H
 #define _LINUX_ATOMIC_H
 #include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define  atomic_read_acquire(v)		smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define  atomic_set_release(v, i)	smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...)				\
+({									\
+	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
+	smp_mb__after_atomic();						\
+	__ret;								\
+})
+
+#define __atomic_op_release(op, args...)				\
+({									\
+	smp_mb__before_atomic();					\
+	op##_relaxed(args);						\
+})
+
+#define __atomic_op_fence(op, args...)					\
+({									\
+	typeof(op##_relaxed(args)) __ret;				\
+	smp_mb__before_atomic();					\
+	__ret = op##_relaxed(args);					\
+	smp_mb__after_atomic();						\
+	__ret;								\
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define  atomic_add_return_relaxed	atomic_add_return
+#define  atomic_add_return_acquire	atomic_add_return
+#define  atomic_add_return_release	atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define  atomic_add_return_acquire(...)					\
+	__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define  atomic_add_return_release(...)					\
+	__atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define  atomic_add_return(...)						\
+	__atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define  atomic_sub_return_relaxed	atomic_sub_return
+#define  atomic_sub_return_acquire	atomic_sub_return
+#define  atomic_sub_return_release	atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define  atomic_sub_return_acquire(...)					\
+	__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define  atomic_sub_return_release(...)					\
+	__atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define  atomic_sub_return(...)						\
+	__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define  atomic_xchg_relaxed		atomic_xchg
+#define  atomic_xchg_acquire		atomic_xchg
+#define  atomic_xchg_release		atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define  atomic_xchg_acquire(...)					\
+	__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define  atomic_xchg_release(...)					\
+	__atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define  atomic_xchg(...)						\
+	__atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define  atomic_cmpxchg_relaxed		atomic_cmpxchg
+#define  atomic_cmpxchg_acquire		atomic_cmpxchg
+#define  atomic_cmpxchg_release		atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define  atomic_cmpxchg_acquire(...)					\
+	__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define  atomic_cmpxchg_release(...)					\
+	__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define  atomic_cmpxchg(...)						\
+	__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define  atomic64_read_acquire(v)	smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define  atomic64_set_release(v, i)	smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define  atomic64_add_return_relaxed	atomic64_add_return
+#define  atomic64_add_return_acquire	atomic64_add_return
+#define  atomic64_add_return_release	atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define  atomic64_add_return_acquire(...)				\
+	__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define  atomic64_add_return_release(...)				\
+	__atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define  atomic64_add_return(...)					\
+	__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define  atomic64_sub_return_relaxed	atomic64_sub_return
+#define  atomic64_sub_return_acquire	atomic64_sub_return
+#define  atomic64_sub_return_release	atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define  atomic64_sub_return_acquire(...)				\
+	__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define  atomic64_sub_return_release(...)				\
+	__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define  atomic64_sub_return(...)					\
+	__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define  atomic64_xchg_relaxed		atomic64_xchg
+#define  atomic64_xchg_acquire		atomic64_xchg
+#define  atomic64_xchg_release		atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define  atomic64_xchg_acquire(...)					\
+	__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define  atomic64_xchg_release(...)					\
+	__atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define  atomic64_xchg(...)						\
+	__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define  atomic64_cmpxchg_relaxed	atomic64_cmpxchg
+#define  atomic64_cmpxchg_acquire	atomic64_cmpxchg
+#define  atomic64_cmpxchg_release	atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define  atomic64_cmpxchg_acquire(...)					\
+	__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define  atomic64_cmpxchg_release(...)					\
+	__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define  atomic64_cmpxchg(...)						\
+	__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define  cmpxchg_relaxed		cmpxchg
+#define  cmpxchg_acquire		cmpxchg
+#define  cmpxchg_release		cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define  cmpxchg_acquire(...)						\
+	__atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define  cmpxchg_release(...)						\
+	__atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define  cmpxchg(...)							\
+	__atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define  cmpxchg64_relaxed		cmpxchg64
+#define  cmpxchg64_acquire		cmpxchg64
+#define  cmpxchg64_release		cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define  cmpxchg64_acquire(...)						\
+	__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define  cmpxchg64_release(...)						\
+	__atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define  cmpxchg64(...)							\
+	__atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define  xchg_relaxed			xchg
+#define  xchg_acquire			xchg
+#define  xchg_release			xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define  xchg_acquire(...)		__atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define  xchg_release(...)		__atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define  xchg(...)			__atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
 
 /**
  * atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@
 #define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
 #endif
 
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+	atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	atomic_or(mask, v);
+}
+
 /**
  * atomic_inc_not_zero_hint - increment if not null
  * @v: pointer of type atomic_t
@@ -111,21 +451,16 @@
 }
 #endif
 
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
-	int old;
-	int new;
-
-	do {
-		old = atomic_read(v);
-		new = old | i;
-	} while (atomic_cmpxchg(v, old, new) != old);
-}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
-
 #include <asm-generic/atomic-long.h>
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #endif
+
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+	atomic64_and(~i, v);
+}
+#endif
+
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd..d04aa58 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
 
 /* Exponentially weighted moving average (EWMA) */
 
-/* For more documentation see lib/average.c */
-
-struct ewma {
-	unsigned long internal;
-	unsigned long factor;
-	unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
-		      unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
-	return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight)				\
+	struct ewma_##name {						\
+		unsigned long internal;					\
+	};								\
+	static inline void ewma_##name##_init(struct ewma_##name *e)	\
+	{								\
+		BUILD_BUG_ON(!__builtin_constant_p(_factor));		\
+		BUILD_BUG_ON(!__builtin_constant_p(_weight));		\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_factor);			\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_weight);			\
+		e->internal = 0;					\
+	}								\
+	static inline unsigned long					\
+	ewma_##name##_read(struct ewma_##name *e)			\
+	{								\
+		BUILD_BUG_ON(!__builtin_constant_p(_factor));		\
+		BUILD_BUG_ON(!__builtin_constant_p(_weight));		\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_factor);			\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_weight);			\
+		return e->internal >> ilog2(_factor);			\
+	}								\
+	static inline void ewma_##name##_add(struct ewma_##name *e,	\
+					     unsigned long val)		\
+	{								\
+		unsigned long internal = ACCESS_ONCE(e->internal);	\
+		unsigned long weight = ilog2(_weight);			\
+		unsigned long factor = ilog2(_factor);			\
+									\
+		BUILD_BUG_ON(!__builtin_constant_p(_factor));		\
+		BUILD_BUG_ON(!__builtin_constant_p(_weight));		\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_factor);			\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_weight);			\
+									\
+		ACCESS_ONCE(e->internal) = internal ?			\
+			(((internal << weight) - internal) +		\
+				(val << factor)) >> weight :		\
+			(val << factor);				\
+	}
 
 #endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea94..ed3768f 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@
 #define BGPIOF_UNREADABLE_REG_DIR	BIT(2) /* reg_dir is unreadable */
 #define BGPIOF_BIG_ENDIAN_BYTE_ORDER	BIT(3)
 #define BGPIOF_READ_OUTPUT_REG_SET     BIT(4) /* reg_set stores output value */
+#define BGPIOF_NO_OUTPUT		BIT(5) /* only input */
 
 #endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf..cf03843 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@
 	spinlock_t gpio_lock;
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 	struct gpio_chip gpio;
-	struct irq_domain *irq_domain;
 #endif
 };
 
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 4b7b4eb..e813013 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -118,9 +118,8 @@
 #define BIO_USER_MAPPED 4	/* contains user pages */
 #define BIO_NULL_MAPPED 5	/* contains invalid user pages */
 #define BIO_QUIET	6	/* Make BIO Quiet */
-#define BIO_SNAP_STABLE	7	/* bio data must be snapshotted during write */
-#define BIO_CHAIN	8	/* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED	9	/* bio has elevated ->bi_cnt */
+#define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
 
 /*
  * Flags starting here get preserved by bio_reset() - this includes
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476..f57d7fe 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
 #include <uapi/linux/bpf.h>
 #include <linux/workqueue.h>
 #include <linux/file.h>
+#include <linux/perf_event.h>
 
 struct bpf_map;
 
@@ -24,6 +25,10 @@
 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
 	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
 	int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+	/* funcs called by prog_array and perf_event_array map */
+	void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+	void (*map_fd_put_ptr) (void *ptr);
 };
 
 struct bpf_map {
@@ -142,13 +147,13 @@
 	bool owner_jited;
 	union {
 		char value[0] __aligned(8);
-		struct bpf_prog *prog[0] __aligned(8);
+		void *ptrs[0] __aligned(8);
 	};
 };
 #define MAX_TAIL_CALL_CNT 32
 
 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
 
@@ -185,6 +190,7 @@
 extern const struct bpf_func_proto bpf_map_update_elem_proto;
 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
 
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
 extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@
 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae..c836eb2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@
 	({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-	({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__val = (__force typeof(x)) (val) }; \
+	__write_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
 
 /**
  * READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 8b6c083..8d70e13 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -137,6 +137,7 @@
 	kernel_cap_t	cap_permitted;	/* caps we're permitted */
 	kernel_cap_t	cap_effective;	/* caps we can actually use */
 	kernel_cap_t	cap_bset;	/* capability bounding set */
+	kernel_cap_t	cap_ambient;	/* Ambient capability set */
 #ifdef CONFIG_KEYS
 	unsigned char	jit_keyring;	/* default keyring to attach requested
 					 * keys to */
@@ -212,6 +213,13 @@
 }
 #endif
 
+static inline bool cap_ambient_invariant_ok(const struct cred *cred)
+{
+	return cap_issubset(cred->cap_ambient,
+			    cap_intersect(cred->cap_permitted,
+					  cred->cap_inheritable));
+}
+
 /**
  * get_new_cred - Get a reference on a new set of credentials
  * @cred: The new credentials to reference
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb4..7ea9184 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@
 	DMA_XOR_VAL,
 	DMA_PQ_VAL,
 	DMA_MEMSET,
+	DMA_MEMSET_SG,
 	DMA_INTERRUPT,
 	DMA_SG,
 	DMA_PRIVATE,
@@ -183,6 +184,8 @@
  *  operation it continues the calculation with new sources
  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
  *  on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ *  cleared or freed
  */
 enum dma_ctrl_flags {
 	DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@
 	DMA_PREP_PQ_DISABLE_Q = (1 << 3),
 	DMA_PREP_CONTINUE = (1 << 4),
 	DMA_PREP_FENCE = (1 << 5),
+	DMA_CTRL_REUSE = (1 << 6),
 };
 
 /**
@@ -400,6 +404,8 @@
  * @cmd_pause: true, if pause and thereby resume is supported
  * @cmd_terminate: true, if terminate cmd is supported
  * @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
  */
 struct dma_slave_caps {
 	u32 src_addr_widths;
@@ -408,6 +414,7 @@
 	bool cmd_pause;
 	bool cmd_terminate;
 	enum dma_residue_granularity residue_granularity;
+	bool descriptor_reuse;
 };
 
 static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@
 	dma_addr_t phys;
 	struct dma_chan *chan;
 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+	int (*desc_free)(struct dma_async_tx_descriptor *tx);
 	dma_async_tx_callback callback;
 	void *callback_param;
 	struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@
 };
 
 /**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+	DMAENGINE_ALIGN_1_BYTE = 0,
+	DMAENGINE_ALIGN_2_BYTES = 1,
+	DMAENGINE_ALIGN_4_BYTES = 2,
+	DMAENGINE_ALIGN_8_BYTES = 3,
+	DMAENGINE_ALIGN_16_BYTES = 4,
+	DMAENGINE_ALIGN_32_BYTES = 5,
+	DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
  * struct dma_device - info on the entity supplying DMA services
  * @chancnt: how many DMA channels are supported
  * @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@
  * @device_prep_dma_pq: prepares a pq operation
  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
  * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
  * @device_prep_slave_sg: prepares a slave dma operation
  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@
 	dma_cap_mask_t  cap_mask;
 	unsigned short max_xor;
 	unsigned short max_pq;
-	u8 copy_align;
-	u8 xor_align;
-	u8 pq_align;
-	u8 fill_align;
+	enum dmaengine_alignment copy_align;
+	enum dmaengine_alignment xor_align;
+	enum dmaengine_alignment pq_align;
+	enum dmaengine_alignment fill_align;
 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
 
 	int dev_id;
@@ -682,6 +705,9 @@
 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
 		unsigned long flags);
+	struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+		struct dma_chan *chan, struct scatterlist *sg,
+		unsigned int nents, int value, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
 		struct dma_chan *chan, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@
 	return desc->tx_submit(desc);
 }
 
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+					 size_t off1, size_t off2, size_t len)
 {
 	size_t mask;
 
@@ -1155,6 +1182,39 @@
 }
 #endif
 
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_slave_caps caps;
+
+	dma_get_slave_caps(tx->chan, &caps);
+
+	if (caps.descriptor_reuse) {
+		tx->flags |= DMA_CTRL_REUSE;
+		return 0;
+	} else {
+		return -EPERM;
+	}
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+	tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+	return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+	/* this is supported for reusable desc, so check that */
+	if (dmaengine_desc_test_reuse(desc))
+		return desc->desc_free(desc);
+	else
+		return -EPERM;
+}
+
 /* --- DMA device --- */
 
 int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@
 static inline struct dma_chan
 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
 				  dma_filter_fn fn, void *fn_param,
-				  struct device *dev, char *name)
+				  struct device *dev, const char *name)
 {
 	struct dma_chan *chan;
 
@@ -1177,6 +1237,9 @@
 	if (chan)
 		return chan;
 
+	if (!fn || !fn_param)
+		return NULL;
+
 	return __dma_request_channel(mask, fn, fn_param);
 }
 #endif /* DMAENGINE_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f87..eb049c6 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@
 
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 	return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
-		((a[2] ^ b[2]) & m)) == 0;
+		(__force int)((a[2] ^ b[2]) & m)) == 0;
 #else
 	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
 #endif
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a..25c6324 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@
 
 #define GET_DENTRY_SLOTS(x)	((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
 
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK	214
-
 /* MAX level for dir lookup */
 #define MAX_DIR_HASH_DEPTH	63
 
 /* MAX buckets in one level of dir */
 #define MAX_DIR_BUCKETS		(1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
 
+/*
+ * space utilization of regular dentry and inline dentry
+ *		regular dentry			inline dentry
+ * bitmap	1 * 27 = 27			1 * 23 = 23
+ * reserved	1 * 3 = 3			1 * 7 = 7
+ * dentry	11 * 214 = 2354			11 * 182 = 2002
+ * filename	8 * 214 = 1712			8 * 182 = 1456
+ * total	4096				3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK	214	/* the number of dentry in a block */
 #define SIZE_OF_DIR_ENTRY	11	/* by byte */
 #define SIZE_OF_DENTRY_BITMAP	((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
 					BITS_PER_BYTE)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6..fa2cab9 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/printk.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 
@@ -354,6 +355,16 @@
 		   offsetof(struct bpf_prog, insns[proglen]));
 }
 
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+	/* When classic BPF programs have been loaded and the arch
+	 * does not have a classic BPF JIT (anymore), they have been
+	 * converted via bpf_migrate_filter() to eBPF and thus always
+	 * have an unspec program type.
+	 */
+	return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
 
 #ifdef CONFIG_BPF_JIT
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 				u32 pass, void *image)
 {
-	pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
-	       flen, proglen, pass, image);
+	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+	       proglen, pass, image, current->comm, task_pid_nr(current));
+
 	if (image)
 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
 			       16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fbd780c..dc634a5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -943,12 +943,18 @@
 
 struct lock_manager {
 	struct list_head list;
+	/*
+	 * NFSv4 and up also want opens blocked during the grace period;
+	 * NLM doesn't care:
+	 */
+	bool block_opens;
 };
 
 struct net;
 void locks_start_grace(struct net *, struct lock_manager *);
 void locks_end_grace(struct lock_manager *);
 int locks_in_grace(struct net *);
+int opens_in_grace(struct net *);
 
 /* that will die - we need it for nfs_lock_info */
 #include <linux/nfs_fs_i.h>
@@ -1612,7 +1618,6 @@
 	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
 	int (*mmap) (struct file *, struct vm_area_struct *);
-	int (*mremap)(struct file *, struct vm_area_struct *);
 	int (*open) (struct inode *, struct file *);
 	int (*flush) (struct file *, fl_owner_t id);
 	int (*release) (struct inode *, struct file *);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321e..0023088 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@
 
 	u32 nand_stat;
 	wait_queue_head_t nand_wait;
+	bool little_endian;
 };
 
 extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
 
+static inline u32 ifc_in32(void __iomem *addr)
+{
+	u32 val;
+
+	if (fsl_ifc_ctrl_dev->little_endian)
+		val = ioread32(addr);
+	else
+		val = ioread32be(addr);
+
+	return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+	u16 val;
+
+	if (fsl_ifc_ctrl_dev->little_endian)
+		val = ioread16(addr);
+	else
+		val = ioread16be(addr);
+
+	return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+	return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+	if (fsl_ifc_ctrl_dev->little_endian)
+		iowrite32(val, addr);
+	else
+		iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+	if (fsl_ifc_ctrl_dev->little_endian)
+		iowrite16(val, addr);
+	else
+		iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+	iowrite8(val, addr);
+}
 
 #endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 65a517d..e0727d7 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -195,40 +195,49 @@
 #define FSNOTIFY_EVENT_INODE	2
 
 /*
- * a mark is simply an object attached to an in core inode which allows an
+ * A mark is simply an object attached to an in core inode which allows an
  * fsnotify listener to indicate they are either no longer interested in events
  * of a type matching mask or only interested in those events.
  *
- * these are flushed when an inode is evicted from core and may be flushed
- * when the inode is modified (as seen by fsnotify_access).  Some fsnotify users
- * (such as dnotify) will flush these when the open fd is closed and not at
- * inode eviction or modification.
+ * These are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access).  Some fsnotify
+ * users (such as dnotify) will flush these when the open fd is closed and not
+ * at inode eviction or modification.
+ *
+ * Text in brackets is showing the lock(s) protecting modifications of a
+ * particular entry. obj_lock means either inode->i_lock or
+ * mnt->mnt_root->d_lock depending on the mark type.
  */
 struct fsnotify_mark {
-	__u32 mask;			/* mask this mark is for */
-	/* we hold ref for each i_list and g_list.  also one ref for each 'thing'
+	/* Mask this mark is for [mark->lock, group->mark_mutex] */
+	__u32 mask;
+	/* We hold one for presence in g_list. Also one ref for each 'thing'
 	 * in kernel that found and may be using this mark. */
-	atomic_t refcnt;		/* active things looking at this mark */
-	struct fsnotify_group *group;	/* group this mark is for */
-	struct list_head g_list;	/* list of marks by group->i_fsnotify_marks
-					 * Also reused for queueing mark into
-					 * destroy_list when it's waiting for
-					 * the end of SRCU period before it can
-					 * be freed */
-	spinlock_t lock;		/* protect group and inode */
-	struct hlist_node obj_list;	/* list of marks for inode / vfsmount */
-	struct list_head free_list;	/* tmp list used when freeing this mark */
-	union {
+	atomic_t refcnt;
+	/* Group this mark is for. Set on mark creation, stable until last ref
+	 * is dropped */
+	struct fsnotify_group *group;
+	/* List of marks by group->i_fsnotify_marks. Also reused for queueing
+	 * mark into destroy_list when it's waiting for the end of SRCU period
+	 * before it can be freed. [group->mark_mutex] */
+	struct list_head g_list;
+	/* Protects inode / mnt pointers, flags, masks */
+	spinlock_t lock;
+	/* List of marks for inode / vfsmount [obj_lock] */
+	struct hlist_node obj_list;
+	union {	/* Object pointer [mark->lock, group->mark_mutex] */
 		struct inode *inode;	/* inode this mark is associated with */
 		struct vfsmount *mnt;	/* vfsmount this mark is associated with */
 	};
-	__u32 ignored_mask;		/* events types to ignore */
+	/* Events types to ignore [mark->lock, group->mark_mutex] */
+	__u32 ignored_mask;
 #define FSNOTIFY_MARK_FLAG_INODE		0x01
 #define FSNOTIFY_MARK_FLAG_VFSMOUNT		0x02
 #define FSNOTIFY_MARK_FLAG_OBJECT_PINNED	0x04
 #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY	0x08
 #define FSNOTIFY_MARK_FLAG_ALIVE		0x10
-	unsigned int flags;		/* vfsmount or inode mark? */
+#define FSNOTIFY_MARK_FLAG_ATTACHED		0x20
+	unsigned int flags;		/* flags [mark->lock] */
 	void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
 };
 
@@ -345,8 +354,10 @@
 /* given a group and a mark, flag mark to be freed when all references are dropped */
 extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
 				  struct fsnotify_group *group);
-extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
-					 struct fsnotify_group *group);
+/* detach mark from inode / mount list, group list, drop inode reference */
+extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
+/* free mark */
+extern void fsnotify_free_mark(struct fsnotify_mark *mark);
 /* run all the marks in a group, and clear all of the vfsmount marks */
 extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
 /* run all the marks in a group, and clear all of the inode marks */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 5383bb1..7ff168d 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -59,6 +59,8 @@
 
 	genpool_algo_t algo;		/* allocation function */
 	void *data;
+
+	const char *name;
 };
 
 /*
@@ -118,8 +120,8 @@
 		unsigned long start, unsigned int nr, void *data);
 
 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
-		int min_alloc_order, int nid);
-extern struct gen_pool *gen_pool_get(struct device *dev);
+		int min_alloc_order, int nid, const char *name);
+extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
 
 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
 			size_t size);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255..14cac67 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@
 int gpiod_count(struct device *dev, const char *con_id);
 
 /* Acquire and dispose GPIOs */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
 					 const char *con_id,
 					 enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 					       const char *con_id,
 					       unsigned int idx,
 					       enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 						  const char *con_id,
 						  enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
 							const char *con_id,
 							unsigned int index,
 							enum gpiod_flags flags);
@@ -70,18 +70,18 @@
 void gpiod_put(struct gpio_desc *desc);
 void gpiod_put_array(struct gpio_descs *descs);
 
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 					      const char *con_id,
 					      enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 						    const char *con_id,
 						    unsigned int idx,
 						    enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 						       const char *con_id,
 						       enum gpiod_flags flags);
 struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
 			      unsigned int index, enum gpiod_flags flags);
 struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
 						     const char *con_id,
@@ -146,31 +146,31 @@
 	return 0;
 }
 
-static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
-						const char *con_id,
-						enum gpiod_flags flags)
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+						       const char *con_id,
+						       enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 static inline struct gpio_desc *__must_check
-__gpiod_get_index(struct device *dev,
-		  const char *con_id,
-		  unsigned int idx,
-		  enum gpiod_flags flags)
+gpiod_get_index(struct device *dev,
+		const char *con_id,
+		unsigned int idx,
+		enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__gpiod_get_optional(struct device *dev, const char *con_id,
-		     enum gpiod_flags flags)
+gpiod_get_optional(struct device *dev, const char *con_id,
+		   enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__gpiod_get_index_optional(struct device *dev, const char *con_id,
-			   unsigned int index, enum gpiod_flags flags)
+gpiod_get_index_optional(struct device *dev, const char *con_id,
+			 unsigned int index, enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
@@ -206,7 +206,7 @@
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get(struct device *dev,
+devm_gpiod_get(struct device *dev,
 		 const char *con_id,
 		 enum gpiod_flags flags)
 {
@@ -214,7 +214,7 @@
 }
 static inline
 struct gpio_desc *__must_check
-__devm_gpiod_get_index(struct device *dev,
+devm_gpiod_get_index(struct device *dev,
 		       const char *con_id,
 		       unsigned int idx,
 		       enum gpiod_flags flags)
@@ -223,14 +223,14 @@
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_optional(struct device *dev, const char *con_id,
 			  enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
 				unsigned int index, enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@
 
 #endif /* CONFIG_GPIOLIB */
 
-/*
- * Vararg-hacks! This is done to transition the kernel to always pass
- * the options flags argument to the below functions. During a transition
- * phase these vararg macros make both old-and-newstyle code compile,
- * but when all calls to the elder API are removed, these should go away
- * and the __gpiod_get() etc functions above be renamed just gpiod_get()
- * etc.
- */
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
-#define __gpiod_get_index(dev, con_id, index, flags, ...)		\
-	__gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
-#define __gpiod_get_optional(dev, con_id, flags, ...)			\
-	__gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...)	\
-	__gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...)				\
-	__gpiod_get_index_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get(dev, con_id, flags, ...)			\
-	__devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...)		\
-	__devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...)				\
-	__devm_gpiod_get_index(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...)		\
-	__devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...)				\
-	__devm_gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...)	\
-	__devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...)			\
-	__devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
-
 #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd..1aed31c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/lockdep.h>
 #include <linux/pinctrl/pinctrl.h>
 
 struct device;
@@ -64,6 +65,17 @@
  *	registers.
  * @irq_not_threaded: flag must be set if @can_sleep is set but the
  *	IRQs don't need to be threaded
+ * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
+ * @irqdomain: Interrupt translation domain; responsible for mapping
+ *	between GPIO hwirq number and linux irq number
+ * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
+ * @irq_handler: the irq handler to use (often a predefined irq core function)
+ *	for GPIO IRQs, provided by GPIO driver
+ * @irq_default_type: default IRQ triggering type applied during GPIO driver
+ *	initialization, provided by GPIO driver
+ * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
+ *	provided by GPIO driver
+ * @lock_key: per GPIO IRQ chip lockdep class
  *
  * A gpio_chip can help platforms abstract various sources of GPIOs so
  * they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@
 	irq_flow_handler_t	irq_handler;
 	unsigned int		irq_default_type;
 	int			irq_parent;
+	struct lock_class_key	*lock_key;
 #endif
 
 #if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@
 		int parent_irq,
 		irq_flow_handler_t parent_handler);
 
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-		struct irq_chip *irqchip,
-		unsigned int first_irq,
-		irq_flow_handler_t handler,
-		unsigned int type);
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+			  struct irq_chip *irqchip,
+			  unsigned int first_irq,
+			  irq_flow_handler_t handler,
+			  unsigned int type,
+			  struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_irqchip_add(...)				\
+(								\
+	({							\
+		static struct lock_class_key _key;		\
+		_gpiochip_irqchip_add(__VA_ARGS__, &_key);	\
+	})							\
+)
+#else
+#define gpiochip_irqchip_add(...)				\
+	_gpiochip_irqchip_add(__VA_ARGS__, NULL)
+#endif
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index e270614..c0d712d 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -57,5 +57,6 @@
 }
 
 void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
 
 #endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
deleted file mode 100644
index 02bf6ea..0000000
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Atmel maXTouch Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_ATMEL_MXT_TS_H
-#define __LINUX_ATMEL_MXT_TS_H
-
-#include <linux/types.h>
-
-/* The platform data for the Atmel maXTouch touchscreen driver */
-struct mxt_platform_data {
-	unsigned long irqflags;
-	u8 t19_num_keys;
-	const unsigned int *t19_keymap;
-};
-
-#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897..cfa906f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@
 #define WLAN_EXT_CAPA5_TDLS_PROHIBITED	BIT(6)
 #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED	BIT(7)
 
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED	BIT(5)
 #define WLAN_EXT_CAPA8_OPMODE_NOTIF	BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED	BIT(7)
 
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE	0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad48..9084292 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@
 	return (struct igmpv3_query *)skb_transport_header(skb);
 }
 
+extern int sysctl_igmp_llm_reports;
 extern int sysctl_igmp_max_memberships;
 extern int sysctl_igmp_max_msf;
 extern int sysctl_igmp_qrv;
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h
deleted file mode 100644
index 7bae83b..0000000
--- a/include/linux/input/pixcir_ts.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef	_PIXCIR_I2C_TS_H
-#define	_PIXCIR_I2C_TS_H
-
-/*
- * Register map
- */
-#define PIXCIR_REG_POWER_MODE	51
-#define PIXCIR_REG_INT_MODE	52
-
-/*
- * Power modes:
- * active: max scan speed
- * idle: lower scan speed with automatic transition to active on touch
- * halt: datasheet says sleep but this is more like halt as the chip
- *       clocks are cut and it can only be brought out of this mode
- *	 using the RESET pin.
- */
-enum pixcir_power_mode {
-	PIXCIR_POWER_ACTIVE,
-	PIXCIR_POWER_IDLE,
-	PIXCIR_POWER_HALT,
-};
-
-#define PIXCIR_POWER_MODE_MASK	0x03
-#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * diff coordinates: interrupt is asserted when coordinates change
- * level on touch: interrupt level asserted during touch
- * pulse on touch: interrupt pulse asserted druing touch
- *
- */
-enum pixcir_int_mode {
-	PIXCIR_INT_PERIODICAL,
-	PIXCIR_INT_DIFF_COORD,
-	PIXCIR_INT_LEVEL_TOUCH,
-	PIXCIR_INT_PULSE_TOUCH,
-};
-
-#define PIXCIR_INT_MODE_MASK	0x03
-#define PIXCIR_INT_ENABLE	(1UL << 3)
-#define PIXCIR_INT_POL_HIGH	(1UL << 2)
-
-/**
- * struct pixcir_irc_chip_data - chip related data
- * @max_fingers:	Max number of fingers reported simultaneously by h/w
- * @has_hw_ids:		Hardware supports finger tracking IDs
- *
- */
-struct pixcir_i2c_chip_data {
-	u8 max_fingers;
-	bool has_hw_ids;
-};
-
-struct pixcir_ts_platform_data {
-	int x_max;
-	int y_max;
-	int gpio_attb;		/* GPIO connected to ATTB line */
-	struct pixcir_i2c_chip_data chip;
-};
-
-#endif
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea..c91e137 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
 #ifndef _TOUCHSCREEN_H
 #define _TOUCHSCREEN_H
 
-#include <linux/input.h>
+struct input_dev;
 
-#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
-#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev,
-					       bool multitouch)
-{
-}
-#endif
+void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
 
 #endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c6..f1f32af 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@
 	__s32		max_desync_factor;
 	__s32		max_addresses;
 	__s32		accept_ra_defrtr;
+	__s32		accept_ra_min_hop_limit;
 	__s32		accept_ra_pinfo;
+	__s32		ignore_routes_with_linkdown;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	__s32		accept_ra_rtr_pref;
 	__s32		rtr_probe_interval;
@@ -57,6 +59,7 @@
 		bool initialized;
 		struct in6_addr secret;
 	} stable_secret;
+	__s32		use_oif_addrs_only;
 	void		*sysctl;
 };
 
@@ -94,7 +97,6 @@
 struct inet6_skb_parm {
 	int			iif;
 	__be16			ra;
-	__u16			hop;
 	__u16			dst0;
 	__u16			srcrt;
 	__u16			dst1;
@@ -111,6 +113,7 @@
 #define IP6SKB_REROUTED		4
 #define IP6SKB_ROUTERALERT	8
 #define IP6SKB_FRAGMENTED      16
+#define IP6SKB_HOPBYHOP        32
 };
 
 #define IP6CB(skb)	((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad37..4e68616 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
 
 /* Shared Global Counter */
 #define GIC_SH_COUNTER_31_00_OFS	0x0010
+/* 64-bit counter register for CM3 */
+#define GIC_SH_COUNTER_OFS		GIC_SH_COUNTER_31_00_OFS
 #define GIC_SH_COUNTER_63_32_OFS	0x0014
 #define GIC_SH_REVISIONID_OFS		0x0020
 
 /* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr)		(((intr) / 32) * 4)
-#define GIC_INTR_BIT(intr)		((intr) % 32)
+#define GIC_INTR_OFS(intr) ({				\
+	unsigned bits = mips_cm_is64 ? 64 : 32;		\
+	unsigned reg_idx = (intr) / bits;		\
+	unsigned reg_width = bits / 8;			\
+							\
+	reg_idx * reg_width;				\
+})
+#define GIC_INTR_BIT(intr)		((intr) % (mips_cm_is64 ? 64 : 32))
 
 /* Polarity : Reset Value is always 0 */
 #define GIC_SH_SET_POLARITY_OFS		0x0100
@@ -98,6 +106,8 @@
 #define GIC_VPE_WD_COUNT0_OFS		0x0094
 #define GIC_VPE_WD_INITIAL0_OFS		0x0098
 #define GIC_VPE_COMPARE_LO_OFS		0x00a0
+/* 64-bit Compare register on CM3 */
+#define GIC_VPE_COMPARE_OFS		GIC_VPE_COMPARE_LO_OFS
 #define GIC_VPE_COMPARE_HI_OFS		0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE_OFS	0x0100
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d326152..0000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs.  Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks.  By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
-		 unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
-	__jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...)    /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
-	return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
-	free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle.  Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process.  To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time.  When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s		handle_t;	/* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s	journal_t;	/* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK	1
-#define JFS_COMMIT_BLOCK	2
-#define JFS_SUPERBLOCK_V1	3
-#define JFS_SUPERBLOCK_V2	4
-#define JFS_REVOKE_BLOCK	5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
-	__be32		h_magic;
-	__be32		h_blocktype;
-	__be32		h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
-	__be32		t_blocknr;	/* The on-disk block number */
-	__be32		t_flags;	/* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
-	journal_header_t r_header;
-	__be32		 r_count;	/* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE		1	/* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID	2	/* block has same uuid as previous */
-#define JFS_FLAG_DELETED	4	/* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG	8	/* last tag in this descriptor block */
-
-
-/*
- * The journal superblock.  All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
-	journal_header_t s_header;
-
-/* 0x000C */
-	/* Static information describing the journal */
-	__be32	s_blocksize;		/* journal device blocksize */
-	__be32	s_maxlen;		/* total blocks in journal file */
-	__be32	s_first;		/* first block of log information */
-
-/* 0x0018 */
-	/* Dynamic information describing the current state of the log */
-	__be32	s_sequence;		/* first commit ID expected in log */
-	__be32	s_start;		/* blocknr of start of log */
-
-/* 0x0020 */
-	/* Error value, as set by journal_abort(). */
-	__be32	s_errno;
-
-/* 0x0024 */
-	/* Remaining fields are only valid in a version-2 superblock */
-	__be32	s_feature_compat;	/* compatible feature set */
-	__be32	s_feature_incompat;	/* incompatible feature set */
-	__be32	s_feature_ro_compat;	/* readonly-compatible feature set */
-/* 0x0030 */
-	__u8	s_uuid[16];		/* 128-bit uuid for journal */
-
-/* 0x0040 */
-	__be32	s_nr_users;		/* Nr of filesystems sharing log */
-
-	__be32	s_dynsuper;		/* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
-	__be32	s_max_transaction;	/* Limit of journal blocks per trans.*/
-	__be32	s_max_trans_data;	/* Limit of data blocks per trans. */
-
-/* 0x0050 */
-	__u32	s_padding[44];
-
-/* 0x0100 */
-	__u8	s_users[16*48];		/* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask)					\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask)				\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask)				\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE	0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES	0
-#define JFS_KNOWN_ROCOMPAT_FEATURES	0
-#define JFS_KNOWN_INCOMPAT_FEATURES	JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
-	BH_JBD			/* Has an attached ext3 journal_head */
-	  = BH_PrivateStart,
-	BH_JWrite,		/* Being written to log (@@@ DEBUGGING) */
-	BH_Freed,		/* Has been freed (truncated) */
-	BH_Revoked,		/* Has been revoked from the log */
-	BH_RevokeValid,		/* Revoked flag is valid */
-	BH_JBDDirty,		/* Is dirty but journaled */
-	BH_State,		/* Pins most journal_head state */
-	BH_JournalHead,		/* Pins bh->b_private and jh->b_bh */
-	BH_Unshadow,		/* Dummy bit, for BJ_Shadow wakeup filtering */
-	BH_JBDPrivateStart,	/* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert)	BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr)	J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr)	J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...)		J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...)	J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...)	J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...)					     \
-	({								     \
-		int val = (expr);					     \
-		if (!val) {						     \
-			printk(KERN_ERR					     \
-				"EXT3-fs unexpected failure: %s;\n",# expr); \
-			printk(KERN_ERR why "\n");			     \
-		}							     \
-		val;							     \
-	})
-#define J_EXPECT(expr, why...)		__journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...)	__journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...)	__journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
-	/* Which compound transaction is this update a part of? */
-	transaction_t		*h_transaction;
-
-	/* Number of remaining buffers we are allowed to dirty: */
-	int			h_buffer_credits;
-
-	/* Reference count on this handle */
-	int			h_ref;
-
-	/* Field for caller's use to track errors through large fs */
-	/* operations */
-	int			h_err;
-
-	/* Flags [no locking] */
-	unsigned int	h_sync:		1;	/* sync-on-close */
-	unsigned int	h_jdata:	1;	/* force data journaling */
-	unsigned int	h_aborted:	1;	/* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map	h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism.  It
- * tracks a compound transaction through its various states:
- *
- * RUNNING:	accepting new updates
- * LOCKED:	Updates still running but we don't accept new ones
- * RUNDOWN:	Updates are tidying up but have finished requesting
- *		new buffers to modify (state not used for now)
- * FLUSH:       All updates complete, but we are still writing to disk
- * COMMIT:      All data on disk, writing commit record
- * FINISHED:	We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- *    j_list_lock
- *      ->jbd_lock_bh_journal_head()	(This is "innermost")
- *
- *    j_state_lock
- *    ->jbd_lock_bh_state()
- *
- *    jbd_lock_bh_state()
- *    ->j_list_lock
- *
- *    j_state_lock
- *    ->t_handle_lock
- *
- *    j_state_lock
- *    ->j_list_lock			(journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
-	/* Pointer to the journal for this transaction. [no locking] */
-	journal_t		*t_journal;
-
-	/* Sequence number for this transaction [no locking] */
-	tid_t			t_tid;
-
-	/*
-	 * Transaction's current state
-	 * [no locking - only kjournald alters this]
-	 * [j_list_lock] guards transition of a transaction into T_FINISHED
-	 * state and subsequent call of __journal_drop_transaction()
-	 * FIXME: needs barriers
-	 * KLUDGE: [use j_state_lock]
-	 */
-	enum {
-		T_RUNNING,
-		T_LOCKED,
-		T_FLUSH,
-		T_COMMIT,
-		T_COMMIT_RECORD,
-		T_FINISHED
-	}			t_state;
-
-	/*
-	 * Where in the log does this transaction's commit start? [no locking]
-	 */
-	unsigned int		t_log_start;
-
-	/* Number of buffers on the t_buffers list [j_list_lock] */
-	int			t_nr_buffers;
-
-	/*
-	 * Doubly-linked circular list of all buffers reserved but not yet
-	 * modified by this transaction [j_list_lock]
-	 */
-	struct journal_head	*t_reserved_list;
-
-	/*
-	 * Doubly-linked circular list of all buffers under writeout during
-	 * commit [j_list_lock]
-	 */
-	struct journal_head	*t_locked_list;
-
-	/*
-	 * Doubly-linked circular list of all metadata buffers owned by this
-	 * transaction [j_list_lock]
-	 */
-	struct journal_head	*t_buffers;
-
-	/*
-	 * Doubly-linked circular list of all data buffers still to be
-	 * flushed before this transaction can be committed [j_list_lock]
-	 */
-	struct journal_head	*t_sync_datalist;
-
-	/*
-	 * Doubly-linked circular list of all forget buffers (superseded
-	 * buffers which we can un-checkpoint once this transaction commits)
-	 * [j_list_lock]
-	 */
-	struct journal_head	*t_forget;
-
-	/*
-	 * Doubly-linked circular list of all buffers still to be flushed before
-	 * this transaction can be checkpointed. [j_list_lock]
-	 */
-	struct journal_head	*t_checkpoint_list;
-
-	/*
-	 * Doubly-linked circular list of all buffers submitted for IO while
-	 * checkpointing. [j_list_lock]
-	 */
-	struct journal_head	*t_checkpoint_io_list;
-
-	/*
-	 * Doubly-linked circular list of temporary buffers currently undergoing
-	 * IO in the log [j_list_lock]
-	 */
-	struct journal_head	*t_iobuf_list;
-
-	/*
-	 * Doubly-linked circular list of metadata buffers being shadowed by log
-	 * IO.  The IO buffers on the iobuf list and the shadow buffers on this
-	 * list match each other one for one at all times. [j_list_lock]
-	 */
-	struct journal_head	*t_shadow_list;
-
-	/*
-	 * Doubly-linked circular list of control buffers being written to the
-	 * log. [j_list_lock]
-	 */
-	struct journal_head	*t_log_list;
-
-	/*
-	 * Protects info related to handles
-	 */
-	spinlock_t		t_handle_lock;
-
-	/*
-	 * Number of outstanding updates running on this transaction
-	 * [t_handle_lock]
-	 */
-	int			t_updates;
-
-	/*
-	 * Number of buffers reserved for use by all handles in this transaction
-	 * handle but not yet modified. [t_handle_lock]
-	 */
-	int			t_outstanding_credits;
-
-	/*
-	 * Forward and backward links for the circular list of all transactions
-	 * awaiting checkpoint. [j_list_lock]
-	 */
-	transaction_t		*t_cpnext, *t_cpprev;
-
-	/*
-	 * When will the transaction expire (become due for commit), in jiffies?
-	 * [no locking]
-	 */
-	unsigned long		t_expires;
-
-	/*
-	 * When this transaction started, in nanoseconds [no locking]
-	 */
-	ktime_t			t_start_time;
-
-	/*
-	 * How many handles used this transaction? [t_handle_lock]
-	 */
-	int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags:  General journaling state flags
- * @j_errno:  Is there an outstanding uncleared error on the journal (from a
- *     prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count:  Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- *  waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- *  to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint:  Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- *  journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- *     journal
- * @j_fs_dev: Device which holds the client fs.  For internal journal this will
- *     be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal.  If present, all journal
- *     block numbers are mapped into this inode via bmap().
- * @j_tail_sequence:  Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- *  transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- *     commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- *     is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers:  Maximum number of metadata buffers to allow in a
- *     single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- *  a commit?
- * @j_commit_timer:  The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- *     current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- *	number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- *	takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
-	/* General journaling state flags [j_state_lock] */
-	unsigned long		j_flags;
-
-	/*
-	 * Is there an outstanding uncleared error on the journal (from a prior
-	 * abort)? [j_state_lock]
-	 */
-	int			j_errno;
-
-	/* The superblock buffer */
-	struct buffer_head	*j_sb_buffer;
-	journal_superblock_t	*j_superblock;
-
-	/* Version of the superblock format */
-	int			j_format_version;
-
-	/*
-	 * Protect the various scalars in the journal
-	 */
-	spinlock_t		j_state_lock;
-
-	/*
-	 * Number of processes waiting to create a barrier lock [j_state_lock]
-	 */
-	int			j_barrier_count;
-
-	/*
-	 * Transactions: The current running transaction...
-	 * [j_state_lock] [caller holding open handle]
-	 */
-	transaction_t		*j_running_transaction;
-
-	/*
-	 * the transaction we are pushing to disk
-	 * [j_state_lock] [caller holding open handle]
-	 */
-	transaction_t		*j_committing_transaction;
-
-	/*
-	 * ... and a linked circular list of all transactions waiting for
-	 * checkpointing. [j_list_lock]
-	 */
-	transaction_t		*j_checkpoint_transactions;
-
-	/*
-	 * Wait queue for waiting for a locked transaction to start committing,
-	 * or for a barrier lock to be released
-	 */
-	wait_queue_head_t	j_wait_transaction_locked;
-
-	/* Wait queue for waiting for checkpointing to complete */
-	wait_queue_head_t	j_wait_logspace;
-
-	/* Wait queue for waiting for commit to complete */
-	wait_queue_head_t	j_wait_done_commit;
-
-	/* Wait queue to trigger checkpointing */
-	wait_queue_head_t	j_wait_checkpoint;
-
-	/* Wait queue to trigger commit */
-	wait_queue_head_t	j_wait_commit;
-
-	/* Wait queue to wait for updates to complete */
-	wait_queue_head_t	j_wait_updates;
-
-	/* Semaphore for locking against concurrent checkpoints */
-	struct mutex		j_checkpoint_mutex;
-
-	/*
-	 * Journal head: identifies the first unused block in the journal.
-	 * [j_state_lock]
-	 */
-	unsigned int		j_head;
-
-	/*
-	 * Journal tail: identifies the oldest still-used block in the journal.
-	 * [j_state_lock]
-	 */
-	unsigned int		j_tail;
-
-	/*
-	 * Journal free: how many free blocks are there in the journal?
-	 * [j_state_lock]
-	 */
-	unsigned int		j_free;
-
-	/*
-	 * Journal start and end: the block numbers of the first usable block
-	 * and one beyond the last usable block in the journal. [j_state_lock]
-	 */
-	unsigned int		j_first;
-	unsigned int		j_last;
-
-	/*
-	 * Device, blocksize and starting block offset for the location where we
-	 * store the journal.
-	 */
-	struct block_device	*j_dev;
-	int			j_blocksize;
-	unsigned int		j_blk_offset;
-
-	/*
-	 * Device which holds the client fs.  For internal journal this will be
-	 * equal to j_dev.
-	 */
-	struct block_device	*j_fs_dev;
-
-	/* Total maximum capacity of the journal region on disk. */
-	unsigned int		j_maxlen;
-
-	/*
-	 * Protects the buffer lists and internal buffer state.
-	 */
-	spinlock_t		j_list_lock;
-
-	/* Optional inode where we store the journal.  If present, all */
-	/* journal block numbers are mapped into this inode via */
-	/* bmap(). */
-	struct inode		*j_inode;
-
-	/*
-	 * Sequence number of the oldest transaction in the log [j_state_lock]
-	 */
-	tid_t			j_tail_sequence;
-
-	/*
-	 * Sequence number of the next transaction to grant [j_state_lock]
-	 */
-	tid_t			j_transaction_sequence;
-
-	/*
-	 * Sequence number of the most recently committed transaction
-	 * [j_state_lock].
-	 */
-	tid_t			j_commit_sequence;
-
-	/*
-	 * Sequence number of the most recent transaction wanting commit
-	 * [j_state_lock]
-	 */
-	tid_t			j_commit_request;
-
-	/*
-	 * Sequence number of the most recent transaction someone is waiting
-	 * for to commit.
-	 * [j_state_lock]
-	 */
-	tid_t                   j_commit_waited;
-
-	/*
-	 * Journal uuid: identifies the object (filesystem, LVM volume etc)
-	 * backed by this journal.  This will eventually be replaced by an array
-	 * of uuids, allowing us to index multiple devices within a single
-	 * journal and to perform atomic updates across them.
-	 */
-	__u8			j_uuid[16];
-
-	/* Pointer to the current commit thread for this journal */
-	struct task_struct	*j_task;
-
-	/*
-	 * Maximum number of metadata buffers to allow in a single compound
-	 * commit transaction
-	 */
-	int			j_max_transaction_buffers;
-
-	/*
-	 * What is the maximum transaction lifetime before we begin a commit?
-	 */
-	unsigned long		j_commit_interval;
-
-	/* The timer used to wakeup the commit thread: */
-	struct timer_list	j_commit_timer;
-
-	/*
-	 * The revoke table: maintains the list of revoked blocks in the
-	 * current transaction.  [j_revoke_lock]
-	 */
-	spinlock_t		j_revoke_lock;
-	struct jbd_revoke_table_s *j_revoke;
-	struct jbd_revoke_table_s *j_revoke_table[2];
-
-	/*
-	 * array of bhs for journal_commit_transaction
-	 */
-	struct buffer_head	**j_wbuf;
-	int			j_wbufsize;
-
-	/*
-	 * this is the pid of the last person to run a synchronous operation
-	 * through the journal.
-	 */
-	pid_t			j_last_sync_writer;
-
-	/*
-	 * the average amount of time in nanoseconds it takes to commit a
-	 * transaction to the disk.  [j_state_lock]
-	 */
-	u64			j_average_commit_time;
-
-	/*
-	 * An opaque pointer to fs-private information.  ext3 puts its
-	 * superblock pointer here
-	 */
-	void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT	0x001	/* Journal thread is being destroyed */
-#define JFS_ABORT	0x002	/* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR	0x004	/* The errno in the sb has been acked */
-#define JFS_FLUSHED	0x008	/* The journal superblock has been flushed */
-#define JFS_LOADED	0x010	/* The journal superblock has been loaded */
-#define JFS_BARRIER	0x020	/* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR	0x040  /* Abort the journal on file
-						* data write error in ordered
-						* mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t	  *transaction,
-			      struct journal_head  *jh_in,
-			      struct journal_head **jh_out,
-			      unsigned int blocknr);
-
-/* Transaction locking */
-extern void		__wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase.  j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe.  We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
-	return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int	 journal_restart (handle_t *, int nblocks);
-extern int	 journal_extend (handle_t *, int nblocks);
-extern int	 journal_get_write_access(handle_t *, struct buffer_head *);
-extern int	 journal_get_create_access (handle_t *, struct buffer_head *);
-extern int	 journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int	 journal_dirty_data (handle_t *, struct buffer_head *);
-extern int	 journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void	 journal_release_buffer (handle_t *, struct buffer_head *);
-extern int	 journal_forget (handle_t *, struct buffer_head *);
-extern void	 journal_sync_buffer (struct buffer_head *);
-extern void	 journal_invalidatepage(journal_t *,
-				struct page *, unsigned int, unsigned int);
-extern int	 journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int	 journal_stop(handle_t *);
-extern int	 journal_flush (journal_t *);
-extern void	 journal_lock_updates (journal_t *);
-extern void	 journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
-				struct block_device *fs_dev,
-				int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int	   journal_update_format (journal_t *);
-extern int	   journal_check_used_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_check_available_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_set_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_create     (journal_t *);
-extern int	   journal_load       (journal_t *journal);
-extern int	   journal_destroy    (journal_t *);
-extern int	   journal_recover    (journal_t *journal);
-extern int	   journal_wipe       (journal_t *, int);
-extern int	   journal_skip_recovery	(journal_t *);
-extern void	   journal_update_sb_log_tail	(journal_t *, tid_t, unsigned int,
-						 int);
-extern void	   journal_abort      (journal_t *, int);
-extern int	   journal_errno      (journal_t *);
-extern void	   journal_ack_err    (journal_t *);
-extern int	   journal_clear_err  (journal_t *);
-extern int	   journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int	   journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
-	return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
-	kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int	   journal_init_revoke(journal_t *, int);
-extern void	   journal_destroy_revoke_caches(void);
-extern int	   journal_init_revoke_caches(void);
-
-extern void	   journal_destroy_revoke(journal_t *);
-extern int	   journal_revoke (handle_t *,
-				unsigned int, struct buffer_head *);
-extern int	   journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void	   journal_write_revoke_records(journal_t *,
-						transaction_t *, int);
-
-/* Recovery revoke support */
-extern int	journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int	journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void	journal_clear_revoke(journal_t *);
-extern void	journal_switch_revoke_table(journal_t *journal);
-extern void	journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void	__journal_drop_transaction(journal_t *, transaction_t *);
-extern int	cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag.  This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
-	return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
-	if (handle->h_aborted)
-		return 1;
-	return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
-	handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__   */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
-	int difference = (x - y);
-	return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
-	int difference = (x - y);
-	return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started.  Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
-	int nblocks = journal->j_max_transaction_buffers;
-	if (journal->j_committing_transaction)
-		nblocks += journal->j_committing_transaction->
-					t_outstanding_credits;
-	return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None		0	/* Not journaled */
-#define BJ_SyncData	1	/* Normal data: flush before commit */
-#define BJ_Metadata	2	/* Normal journaled metadata */
-#define BJ_Forget	3	/* Buffer superseded by this transaction */
-#define BJ_IO		4	/* Buffer is for temporary IO use */
-#define BJ_Shadow	5	/* Buffer contents being shadowed to the log */
-#define BJ_LogCtl	6	/* Buffer contains log descriptors */
-#define BJ_Reserved	7	/* Buffer is reserved for access by journal */
-#define BJ_Locked	8	/* Locked for I/O during commit */
-#define BJ_Types	9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh)	do {} while (0)
-#define print_buffer_fields(bh)	do {} while (0)
-#define print_buffer_trace(bh)	do {} while (0)
-#define BUFFER_TRACE(bh, info)	do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info)	do {} while (0)
-#define JBUFFER_TRACE(jh, info)	do {} while (0)
-
-#endif	/* __KERNEL__ */
-
-#endif	/* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640a..df07e78 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <linux/bit_spinlock.h>
 #include <crypto/hash.h>
 #endif
 
@@ -336,7 +337,45 @@
 BUFFER_FNS(Shadow, shadow)
 BUFFER_FNS(Verified, verified)
 
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+	return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+	return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+	bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+	return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+	return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+	bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+	bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+	bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
 
 #define J_ASSERT(assert)	BUG_ON(!(assert))
 
@@ -1042,8 +1081,9 @@
 extern void jbd2_journal_commit_transaction(journal_t *);
 
 /* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
 int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc5343..0000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
-	return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
-	return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
-	bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
-	return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
-	return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
-	bit_spin_unlock(BH_State, &bh->b_state);
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
-	bit_spin_lock(BH_JournalHead, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
-	bit_spin_unlock(BH_JournalHead, &bh->b_state);
-}
-
-#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473..7f653e8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,52 @@
  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
  * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
  *
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_key_likely()
+ * statick_key_unlikely()
+ *
  * Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
  *
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
  *
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * In addtion to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'. The inc()/dec()
+ * interface is meant to be used exclusively from the inc()/dec() for a given
+ * key.
+ *
+ * Since this relies on modifying code, the branch modifying functions
  * must be considered absolute slow paths (machine wide synchronization etc.).
  * OTOH, since the affected branches are unconditional, their runtime overhead
  * will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +64,10 @@
  * cause significant performance degradation. Struct static_key_deferred and
  * static_key_slow_dec_deferred() provide for this.
  *
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
  *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- *   if (static_key_true(&my_key)) {
- *   }
- *
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
  */
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +111,8 @@
 #ifndef __ASSEMBLY__
 
 enum jump_label_type {
-	JUMP_LABEL_DISABLE = 0,
-	JUMP_LABEL_ENABLE,
+	JUMP_LABEL_NOP = 0,
+	JUMP_LABEL_JMP,
 };
 
 struct module;
@@ -101,33 +126,18 @@
 
 #ifdef HAVE_JUMP_LABEL
 
-#define JUMP_LABEL_TYPE_FALSE_BRANCH	0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH	1UL
-#define JUMP_LABEL_TYPE_MASK		1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
-	return (struct jump_entry *)((unsigned long)key->entries
-						& ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
-	if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
-	    JUMP_LABEL_TYPE_TRUE_BRANCH)
-		return true;
-	return false;
-}
+#define JUMP_TYPE_FALSE	0UL
+#define JUMP_TYPE_TRUE	1UL
+#define JUMP_TYPE_MASK	1UL
 
 static __always_inline bool static_key_false(struct static_key *key)
 {
-	return arch_static_branch(key);
+	return arch_static_branch(key, false);
 }
 
 static __always_inline bool static_key_true(struct static_key *key)
 {
-	return !static_key_false(key);
+	return !arch_static_branch(key, true);
 }
 
 extern struct jump_entry __start___jump_table[];
@@ -145,12 +155,12 @@
 extern void static_key_slow_dec(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 
-#define STATIC_KEY_INIT_TRUE ((struct static_key)		\
+#define STATIC_KEY_INIT_TRUE					\
 	{ .enabled = ATOMIC_INIT(1),				\
-	  .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key)		\
+	  .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE					\
 	{ .enabled = ATOMIC_INIT(0),				\
-	  .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+	  .entries = (void *)JUMP_TYPE_FALSE }
 
 #else  /* !HAVE_JUMP_LABEL */
 
@@ -198,10 +208,8 @@
 	return 0;
 }
 
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
-		{ .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
-		{ .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE	{ .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE	{ .enabled = ATOMIC_INIT(0) }
 
 #endif	/* HAVE_JUMP_LABEL */
 
@@ -213,6 +221,157 @@
 	return static_key_count(key) > 0;
 }
 
+static inline void static_key_enable(struct static_key *key)
+{
+	int count = static_key_count(key);
+
+	WARN_ON_ONCE(count < 0 || count > 1);
+
+	if (!count)
+		static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
+{
+	int count = static_key_count(key);
+
+	WARN_ON_ONCE(count < 0 || count > 1);
+
+	if (count)
+		static_key_slow_dec(key);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+	struct static_key key;
+};
+
+struct static_key_false {
+	struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name)	\
+	struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name)	\
+	struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch|	likely (1)	      |	unlikely (0)
+ * -----------+-----------------------+------------------
+ *            |                       |
+ *  true (1)  |	   ...		      |	   ...
+ *            |    NOP		      |	   JMP L
+ *            |    <br-stmts>	      |	1: ...
+ *            |	L: ...		      |
+ *            |			      |
+ *            |			      |	L: <br-stmts>
+ *            |			      |	   jmp 1b
+ *            |                       |
+ * -----------+-----------------------+------------------
+ *            |                       |
+ *  false (0) |	   ...		      |	   ...
+ *            |    JMP L	      |	   NOP
+ *            |    <br-stmts>	      |	1: ...
+ *            |	L: ...		      |
+ *            |			      |
+ *            |			      |	L: <br-stmts>
+ *            |			      |	   jmp 1b
+ *            |                       |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ *	enabled	type	branch	  instuction
+ * -----------------------------+-----------
+ *	0	0	0	| NOP
+ *	0	0	1	| JMP
+ *	0	1	0	| NOP
+ *	0	1	1	| JMP
+ *
+ *	1	0	0	| JMP
+ *	1	0	1	| NOP
+ *	1	1	0	| JMP
+ *	1	1	1	| NOP
+ *
+ * Which gives the following functions:
+ *
+ *   dynamic: instruction = enabled ^ branch
+ *   static:  instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+extern bool ____wrong_branch_error(void);
+
+#define static_branch_likely(x)							\
+({										\
+	bool branch;								\
+	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
+		branch = !arch_static_branch(&(x)->key, true);			\
+	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+		branch = !arch_static_branch_jump(&(x)->key, true);		\
+	else									\
+		branch = ____wrong_branch_error();				\
+	branch;									\
+})
+
+#define static_branch_unlikely(x)						\
+({										\
+	bool branch;								\
+	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
+		branch = arch_static_branch_jump(&(x)->key, false);		\
+	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+		branch = arch_static_branch(&(x)->key, false);			\
+	else									\
+		branch = ____wrong_branch_error();				\
+	branch;									\
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x)		likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x)	unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x)		static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x)		static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x)		static_key_enable(&(x)->key)
+#define static_branch_disable(x)	static_key_disable(&(x)->key)
+
 #endif	/* _LINUX_JUMP_LABEL_H */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 869b21d..e691b6a 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -11,7 +11,7 @@
 					   const char namefmt[], ...);
 
 #define kthread_create(threadfn, data, namefmt, arg...) \
-	kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
+	kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
 
 
 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0..fd4ca0b 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/atomic.h>
 #include <linux/kernel.h>
-#include <asm/cmpxchg.h>
 
 struct llist_head {
 	struct llist_node *first;
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283..8fcad63 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@
 /* page 0 basic: slave adder 0x60 */
 
 #define PM800_STATUS_1			(0x01)
-#define PM800_ONKEY_STS1		(1 << 0)
-#define PM800_EXTON_STS1		(1 << 1)
-#define PM800_CHG_STS1			(1 << 2)
-#define PM800_BAT_STS1			(1 << 3)
-#define PM800_VBUS_STS1			(1 << 4)
-#define PM800_LDO_PGOOD_STS1	(1 << 5)
-#define PM800_BUCK_PGOOD_STS1	(1 << 6)
+#define PM800_ONKEY_STS1		BIT(0)
+#define PM800_EXTON_STS1		BIT(1)
+#define PM800_CHG_STS1			BIT(2)
+#define PM800_BAT_STS1			BIT(3)
+#define PM800_VBUS_STS1			BIT(4)
+#define PM800_LDO_PGOOD_STS1		BIT(5)
+#define PM800_BUCK_PGOOD_STS1		BIT(6)
 
 #define PM800_STATUS_2			(0x02)
-#define PM800_RTC_ALARM_STS2	(1 << 0)
+#define PM800_RTC_ALARM_STS2		BIT(0)
 
 /* Wakeup Registers */
-#define PM800_WAKEUP1		(0x0D)
+#define PM800_WAKEUP1			(0x0D)
 
-#define PM800_WAKEUP2		(0x0E)
-#define PM800_WAKEUP2_INV_INT		(1 << 0)
-#define PM800_WAKEUP2_INT_CLEAR		(1 << 1)
-#define PM800_WAKEUP2_INT_MASK		(1 << 2)
+#define PM800_WAKEUP2			(0x0E)
+#define PM800_WAKEUP2_INV_INT		BIT(0)
+#define PM800_WAKEUP2_INT_CLEAR		BIT(1)
+#define PM800_WAKEUP2_INT_MASK		BIT(2)
 
-#define PM800_POWER_UP_LOG	(0x10)
+#define PM800_POWER_UP_LOG		(0x10)
 
 /* Referance and low power registers */
 #define PM800_LOW_POWER1		(0x20)
 #define PM800_LOW_POWER2		(0x21)
-#define PM800_LOW_POWER_CONFIG3	(0x22)
-#define PM800_LOW_POWER_CONFIG4	(0x23)
+#define PM800_LOW_POWER_CONFIG3		(0x22)
+#define PM800_LOW_POWER_CONFIG4		(0x23)
 
 /* GPIO register */
 #define PM800_GPIO_0_1_CNTRL		(0x30)
-#define PM800_GPIO0_VAL				(1 << 0)
+#define PM800_GPIO0_VAL			BIT(0)
 #define PM800_GPIO0_GPIO_MODE(x)	(x << 1)
-#define PM800_GPIO1_VAL				(1 << 4)
+#define PM800_GPIO1_VAL			BIT(4)
 #define PM800_GPIO1_GPIO_MODE(x)	(x << 5)
 
 #define PM800_GPIO_2_3_CNTRL		(0x31)
-#define PM800_GPIO2_VAL				(1 << 0)
+#define PM800_GPIO2_VAL			BIT(0)
 #define PM800_GPIO2_GPIO_MODE(x)	(x << 1)
-#define PM800_GPIO3_VAL				(1 << 4)
+#define PM800_GPIO3_VAL			BIT(4)
 #define PM800_GPIO3_GPIO_MODE(x)	(x << 5)
 #define PM800_GPIO3_MODE_MASK		0x1F
 #define PM800_GPIO3_HEADSET_MODE	PM800_GPIO3_GPIO_MODE(6)
 
-#define PM800_GPIO_4_CNTRL			(0x32)
-#define PM800_GPIO4_VAL				(1 << 0)
+#define PM800_GPIO_4_CNTRL		(0x32)
+#define PM800_GPIO4_VAL			BIT(0)
 #define PM800_GPIO4_GPIO_MODE(x)	(x << 1)
 
 #define PM800_HEADSET_CNTRL		(0x38)
-#define PM800_HEADSET_DET_EN		(1 << 7)
-#define PM800_HSDET_SLP			(1 << 1)
+#define PM800_HEADSET_DET_EN		BIT(7)
+#define PM800_HSDET_SLP			BIT(1)
 /* PWM register */
-#define PM800_PWM1		(0x40)
-#define PM800_PWM2		(0x41)
-#define PM800_PWM3		(0x42)
-#define PM800_PWM4		(0x43)
+#define PM800_PWM1			(0x40)
+#define PM800_PWM2			(0x41)
+#define PM800_PWM3			(0x42)
+#define PM800_PWM4			(0x43)
 
 /* RTC Registers */
 #define PM800_RTC_CONTROL		(0xD0)
@@ -123,55 +123,55 @@
 #define PM800_RTC_MISC4			(0xE4)
 #define PM800_RTC_MISC5			(0xE7)
 /* bit definitions of RTC Register 1 (0xD0) */
-#define PM800_ALARM1_EN			(1 << 0)
-#define PM800_ALARM_WAKEUP		(1 << 4)
-#define PM800_ALARM			(1 << 5)
-#define PM800_RTC1_USE_XO		(1 << 7)
+#define PM800_ALARM1_EN			BIT(0)
+#define PM800_ALARM_WAKEUP		BIT(4)
+#define PM800_ALARM			BIT(5)
+#define PM800_RTC1_USE_XO		BIT(7)
 
 /* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
 
 /* buck registers */
-#define PM800_SLEEP_BUCK1	(0x30)
+#define PM800_SLEEP_BUCK1		(0x30)
 
 /* BUCK Sleep Mode Register 1: BUCK[1..4] */
-#define PM800_BUCK_SLP1		(0x5A)
-#define PM800_BUCK1_SLP1_SHIFT	0
-#define PM800_BUCK1_SLP1_MASK	(0x3 << PM800_BUCK1_SLP1_SHIFT)
+#define PM800_BUCK_SLP1			(0x5A)
+#define PM800_BUCK1_SLP1_SHIFT		0
+#define PM800_BUCK1_SLP1_MASK		(0x3 << PM800_BUCK1_SLP1_SHIFT)
 
 /* page 2 GPADC: slave adder 0x02 */
 #define PM800_GPADC_MEAS_EN1		(0x01)
-#define PM800_MEAS_EN1_VBAT         (1 << 2)
+#define PM800_MEAS_EN1_VBAT		BIT(2)
 #define PM800_GPADC_MEAS_EN2		(0x02)
-#define PM800_MEAS_EN2_RFTMP        (1 << 0)
-#define PM800_MEAS_GP0_EN			(1 << 2)
-#define PM800_MEAS_GP1_EN			(1 << 3)
-#define PM800_MEAS_GP2_EN			(1 << 4)
-#define PM800_MEAS_GP3_EN			(1 << 5)
-#define PM800_MEAS_GP4_EN			(1 << 6)
+#define PM800_MEAS_EN2_RFTMP		BIT(0)
+#define PM800_MEAS_GP0_EN		BIT(2)
+#define PM800_MEAS_GP1_EN		BIT(3)
+#define PM800_MEAS_GP2_EN		BIT(4)
+#define PM800_MEAS_GP3_EN		BIT(5)
+#define PM800_MEAS_GP4_EN		BIT(6)
 
 #define PM800_GPADC_MISC_CONFIG1	(0x05)
 #define PM800_GPADC_MISC_CONFIG2	(0x06)
-#define PM800_GPADC_MISC_GPFSM_EN	(1 << 0)
+#define PM800_GPADC_MISC_GPFSM_EN	BIT(0)
 #define PM800_GPADC_SLOW_MODE(x)	(x << 3)
 
-#define PM800_GPADC_MISC_CONFIG3		(0x09)
-#define PM800_GPADC_MISC_CONFIG4		(0x0A)
+#define PM800_GPADC_MISC_CONFIG3	(0x09)
+#define PM800_GPADC_MISC_CONFIG4	(0x0A)
 
-#define PM800_GPADC_PREBIAS1			(0x0F)
+#define PM800_GPADC_PREBIAS1		(0x0F)
 #define PM800_GPADC0_GP_PREBIAS_TIME(x)	(x << 0)
-#define PM800_GPADC_PREBIAS2			(0x10)
+#define PM800_GPADC_PREBIAS2		(0x10)
 
-#define PM800_GP_BIAS_ENA1				(0x14)
-#define PM800_GPADC_GP_BIAS_EN0			(1 << 0)
-#define PM800_GPADC_GP_BIAS_EN1			(1 << 1)
-#define PM800_GPADC_GP_BIAS_EN2			(1 << 2)
-#define PM800_GPADC_GP_BIAS_EN3			(1 << 3)
+#define PM800_GP_BIAS_ENA1		(0x14)
+#define PM800_GPADC_GP_BIAS_EN0		BIT(0)
+#define PM800_GPADC_GP_BIAS_EN1		BIT(1)
+#define PM800_GPADC_GP_BIAS_EN2		BIT(2)
+#define PM800_GPADC_GP_BIAS_EN3		BIT(3)
 
 #define PM800_GP_BIAS_OUT1		(0x15)
-#define PM800_BIAS_OUT_GP0		(1 << 0)
-#define PM800_BIAS_OUT_GP1		(1 << 1)
-#define PM800_BIAS_OUT_GP2		(1 << 2)
-#define PM800_BIAS_OUT_GP3		(1 << 3)
+#define PM800_BIAS_OUT_GP0		BIT(0)
+#define PM800_BIAS_OUT_GP1		BIT(1)
+#define PM800_BIAS_OUT_GP2		BIT(2)
+#define PM800_BIAS_OUT_GP3		BIT(3)
 
 #define PM800_GPADC0_LOW_TH		0x20
 #define PM800_GPADC1_LOW_TH		0x21
@@ -222,37 +222,37 @@
 
 #define PM805_INT_STATUS1		(0x03)
 
-#define PM805_INT1_HP1_SHRT		(1 << 0)
-#define PM805_INT1_HP2_SHRT		(1 << 1)
-#define PM805_INT1_MIC_CONFLICT		(1 << 2)
-#define PM805_INT1_CLIP_FAULT		(1 << 3)
-#define PM805_INT1_LDO_OFF			(1 << 4)
-#define PM805_INT1_SRC_DPLL_LOCK	(1 << 5)
+#define PM805_INT1_HP1_SHRT		BIT(0)
+#define PM805_INT1_HP2_SHRT		BIT(1)
+#define PM805_INT1_MIC_CONFLICT		BIT(2)
+#define PM805_INT1_CLIP_FAULT		BIT(3)
+#define PM805_INT1_LDO_OFF		BIT(4)
+#define PM805_INT1_SRC_DPLL_LOCK	BIT(5)
 
 #define PM805_INT_STATUS2		(0x04)
 
-#define PM805_INT2_MIC_DET			(1 << 0)
-#define PM805_INT2_SHRT_BTN_DET		(1 << 1)
-#define PM805_INT2_VOLM_BTN_DET		(1 << 2)
-#define PM805_INT2_VOLP_BTN_DET		(1 << 3)
-#define PM805_INT2_RAW_PLL_FAULT	(1 << 4)
-#define PM805_INT2_FINE_PLL_FAULT	(1 << 5)
+#define PM805_INT2_MIC_DET		BIT(0)
+#define PM805_INT2_SHRT_BTN_DET		BIT(1)
+#define PM805_INT2_VOLM_BTN_DET		BIT(2)
+#define PM805_INT2_VOLP_BTN_DET		BIT(3)
+#define PM805_INT2_RAW_PLL_FAULT	BIT(4)
+#define PM805_INT2_FINE_PLL_FAULT	BIT(5)
 
 #define PM805_INT_MASK1			(0x05)
 #define PM805_INT_MASK2			(0x06)
-#define PM805_SHRT_BTN_DET		(1 << 1)
+#define PM805_SHRT_BTN_DET		BIT(1)
 
 /* number of status and int reg in a row */
 #define PM805_INT_REG_NUM		(2)
 
 #define PM805_MIC_DET1			(0x07)
-#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET_EN_MIC_DET	BIT(0)
 #define PM805_MIC_DET2			(0x08)
-#define PM805_MIC_DET_STATUS1	(0x09)
+#define PM805_MIC_DET_STATUS1		(0x09)
 
-#define PM805_MIC_DET_STATUS3	(0x0A)
-#define PM805_AUTO_SEQ_STATUS1	(0x0B)
-#define PM805_AUTO_SEQ_STATUS2	(0x0C)
+#define PM805_MIC_DET_STATUS3		(0x0A)
+#define PM805_AUTO_SEQ_STATUS1		(0x0B)
+#define PM805_AUTO_SEQ_STATUS2		(0x0C)
 
 #define PM805_ADC_SETTING1		(0x10)
 #define PM805_ADC_SETTING2		(0x11)
@@ -261,7 +261,7 @@
 #define PM805_ADC_GAIN2			(0x13)
 #define PM805_DMIC_SETTING		(0x15)
 #define PM805_DWS_SETTING		(0x16)
-#define PM805_MIC_CONFLICT_STS	(0x17)
+#define PM805_MIC_CONFLICT_STS		(0x17)
 
 #define PM805_PDM_SETTING1		(0x20)
 #define PM805_PDM_SETTING2		(0x21)
@@ -270,11 +270,11 @@
 #define PM805_PDM_CONTROL2		(0x24)
 #define PM805_PDM_CONTROL3		(0x25)
 
-#define PM805_HEADPHONE_SETTING			(0x26)
-#define PM805_HEADPHONE_GAIN_A2A		(0x27)
-#define PM805_HEADPHONE_SHORT_STATE		(0x28)
-#define PM805_EARPHONE_SETTING			(0x29)
-#define PM805_AUTO_SEQ_SETTING			(0x2A)
+#define PM805_HEADPHONE_SETTING		(0x26)
+#define PM805_HEADPHONE_GAIN_A2A	(0x27)
+#define PM805_HEADPHONE_SHORT_STATE	(0x28)
+#define PM805_EARPHONE_SETTING		(0x29)
+#define PM805_AUTO_SEQ_SETTING		(0x2A)
 
 struct pm80x_rtc_pdata {
 	int		vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4..79e607e 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@
 	WM5110 = 2,
 	WM8997 = 3,
 	WM8280 = 4,
+	WM8998 = 5,
+	WM1814 = 6,
 };
 
 #define ARIZONA_IRQ_GP1                    0
@@ -165,6 +167,7 @@
 
 int wm5110_patch(struct arizona *arizona);
 int wm8997_patch(struct arizona *arizona);
+int wm8998_patch(struct arizona *arizona);
 
 extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
 				     bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4fa..1dc3858 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@
 	 * useful for systems where and I2S bus with multiple data
 	 * lines is mastered.
 	 */
-	int max_channels_clocked[ARIZONA_MAX_AIF];
+	unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
 
 	/** GPIO5 is used for jack detection */
 	bool jd_gpio5;
@@ -125,22 +125,22 @@
 	unsigned int hpdet_channel;
 
 	/** Extra debounce timeout used during initial mic detection (ms) */
-	int micd_detect_debounce;
+	unsigned int micd_detect_debounce;
 
 	/** GPIO for mic detection polarity */
 	int micd_pol_gpio;
 
 	/** Mic detect ramp rate */
-	int micd_bias_start_time;
+	unsigned int micd_bias_start_time;
 
 	/** Mic detect sample rate */
-	int micd_rate;
+	unsigned int micd_rate;
 
 	/** Mic detect debounce level */
-	int micd_dbtime;
+	unsigned int micd_dbtime;
 
 	/** Mic detect timeout (ms) */
-	int micd_timeout;
+	unsigned int micd_timeout;
 
 	/** Force MICBIAS on for mic detect */
 	bool micd_force_micbias;
@@ -162,6 +162,8 @@
 	/**
 	 * Mode of input structures
 	 * One of the ARIZONA_INMODE_xxx values
+	 * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
+	 * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
 	 */
 	int inmode[ARIZONA_MAX_INPUT];
 
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36..fdd70b3 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
 #define ARIZONA_PWM_DRIVE_3                      0x32
 #define ARIZONA_WAKE_CONTROL                     0x40
 #define ARIZONA_SEQUENCE_CONTROL                 0x41
+#define ARIZONA_SPARE_TRIGGERS                   0x42
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1    0x61
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2    0x62
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3    0x63
@@ -139,6 +140,7 @@
 #define ARIZONA_MIC_DETECT_LEVEL_2		 0x2A7
 #define ARIZONA_MIC_DETECT_LEVEL_3		 0x2A8
 #define ARIZONA_MIC_DETECT_LEVEL_4		 0x2A9
+#define ARIZONA_MIC_DETECT_4                     0x2AB
 #define ARIZONA_MIC_NOISE_MIX_CONTROL_1          0x2C3
 #define ARIZONA_ISOLATION_CONTROL                0x2CB
 #define ARIZONA_JACK_DETECT_ANALOGUE             0x2D3
@@ -225,14 +227,18 @@
 #define ARIZONA_DAC_VOLUME_LIMIT_6R              0x43E
 #define ARIZONA_NOISE_GATE_SELECT_6R             0x43F
 #define ARIZONA_DRE_ENABLE                       0x440
+#define ARIZONA_DRE_CONTROL_1                    0x441
 #define ARIZONA_DRE_CONTROL_2                    0x442
 #define ARIZONA_DRE_CONTROL_3                    0x443
+#define ARIZONA_EDRE_ENABLE                      0x448
 #define ARIZONA_DAC_AEC_CONTROL_1                0x450
+#define ARIZONA_DAC_AEC_CONTROL_2                0x451
 #define ARIZONA_NOISE_GATE_CONTROL               0x458
 #define ARIZONA_PDM_SPK1_CTRL_1                  0x490
 #define ARIZONA_PDM_SPK1_CTRL_2                  0x491
 #define ARIZONA_PDM_SPK2_CTRL_1                  0x492
 #define ARIZONA_PDM_SPK2_CTRL_2                  0x493
+#define ARIZONA_HP_TEST_CTRL_13                  0x49A
 #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL           0x4A0
 #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL           0x4A1
 #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL           0x4A2
@@ -310,6 +316,10 @@
 #define ARIZONA_AIF3_TX_ENABLES                  0x599
 #define ARIZONA_AIF3_RX_ENABLES                  0x59A
 #define ARIZONA_AIF3_FORCE_WRITE                 0x59B
+#define ARIZONA_SPD1_TX_CONTROL                  0x5C2
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1         0x5C3
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2         0x5C4
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3         0x5C5
 #define ARIZONA_SLIMBUS_FRAMER_REF_GEAR          0x5E3
 #define ARIZONA_SLIMBUS_RATES_1                  0x5E5
 #define ARIZONA_SLIMBUS_RATES_2                  0x5E6
@@ -643,6 +653,10 @@
 #define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME        0x7FD
 #define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE        0x7FE
 #define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME        0x7FF
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE       0x800
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME       0x801
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE       0x808
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME       0x809
 #define ARIZONA_EQ1MIX_INPUT_1_SOURCE            0x880
 #define ARIZONA_EQ1MIX_INPUT_1_VOLUME            0x881
 #define ARIZONA_EQ1MIX_INPUT_2_SOURCE            0x882
@@ -868,6 +882,7 @@
 #define ARIZONA_GPIO5_CTRL                       0xC04
 #define ARIZONA_IRQ_CTRL_1                       0xC0F
 #define ARIZONA_GPIO_DEBOUNCE_CONFIG             0xC10
+#define ARIZONA_GP_SWITCH_1                      0xC18
 #define ARIZONA_MISC_PAD_CTRL_1                  0xC20
 #define ARIZONA_MISC_PAD_CTRL_2                  0xC21
 #define ARIZONA_MISC_PAD_CTRL_3                  0xC22
@@ -1169,6 +1184,13 @@
 #define ARIZONA_DSP4_SCRATCH_1                   0x1441
 #define ARIZONA_DSP4_SCRATCH_2                   0x1442
 #define ARIZONA_DSP4_SCRATCH_3                   0x1443
+#define ARIZONA_FRF_COEFF_1                      0x1700
+#define ARIZONA_FRF_COEFF_2                      0x1701
+#define ARIZONA_FRF_COEFF_3                      0x1702
+#define ARIZONA_FRF_COEFF_4                      0x1703
+#define ARIZONA_V2_DAC_COMP_1                    0x1704
+#define ARIZONA_V2_DAC_COMP_2                    0x1705
+
 
 /*
  * Field Definitions.
@@ -1431,6 +1453,42 @@
 #define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH               1  /* WSEQ_ENA_JD2_RISE */
 
 /*
+ * R66 (0x42) - Spare Triggers
+ */
+#define ARIZONA_WS_TRG8                          0x0080  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_MASK                     0x0080  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_SHIFT                         7  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_WIDTH                         1  /* WS_TRG8 */
+#define ARIZONA_WS_TRG7                          0x0040  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_MASK                     0x0040  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_SHIFT                         6  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_WIDTH                         1  /* WS_TRG7 */
+#define ARIZONA_WS_TRG6                          0x0020  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_MASK                     0x0020  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_SHIFT                         5  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_WIDTH                         1  /* WS_TRG6 */
+#define ARIZONA_WS_TRG5                          0x0010  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_MASK                     0x0010  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_SHIFT                         4  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_WIDTH                         1  /* WS_TRG5 */
+#define ARIZONA_WS_TRG4                          0x0008  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_MASK                     0x0008  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_SHIFT                         3  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_WIDTH                         1  /* WS_TRG4 */
+#define ARIZONA_WS_TRG3                          0x0004  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_MASK                     0x0004  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_SHIFT                         2  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_WIDTH                         1  /* WS_TRG3 */
+#define ARIZONA_WS_TRG2                          0x0002  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_MASK                     0x0002  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_SHIFT                         1  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_WIDTH                         1  /* WS_TRG2 */
+#define ARIZONA_WS_TRG1                          0x0001  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_MASK                     0x0001  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_SHIFT                         0  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_WIDTH                         1  /* WS_TRG1 */
+
+/*
  * R97 (0x61) - Sample Rate Sequence Select 1
  */
 #define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF  /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
 #define ARIZONA_HP_IDAC_STEER_MASK               0x0004  /* HP_IDAC_STEER */
 #define ARIZONA_HP_IDAC_STEER_SHIFT                   2  /* HP_IDAC_STEER */
 #define ARIZONA_HP_IDAC_STEER_WIDTH                   1  /* HP_IDAC_STEER */
+#define WM8998_HP_RATE_MASK                      0x0006  /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_SHIFT                          1  /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_WIDTH                          2  /* HP_RATE - [2:1] */
 #define ARIZONA_HP_RATE                          0x0002  /* HP_RATE */
 #define ARIZONA_HP_RATE_MASK                     0x0002  /* HP_RATE */
 #define ARIZONA_HP_RATE_SHIFT                         1  /* HP_RATE */
@@ -2413,6 +2474,16 @@
 #define ARIZONA_MICD_STS_WIDTH                        1  /* MICD_STS */
 
 /*
+ * R683 (0x2AB) - Mic Detect 4
+ */
+#define ARIZONA_MICDET_ADCVAL_DIFF_MASK          0xFF00  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT              8  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH              8  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_MASK               0x007F  /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_SHIFT                   0  /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_WIDTH                   7  /* MICDET_ADCVAL - [15:8] */
+
+/*
  * R707 (0x2C3) - Mic noise mix control 1
  */
 #define ARIZONA_MICMUTE_RATE_MASK                0x7800  /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
 /*
  * R785 (0x311) - ADC Digital Volume 1L
  */
+#define ARIZONA_IN1L_SRC_MASK                    0x4000  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SHIFT                       14  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_WIDTH                        1  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SE_MASK                 0x2000  /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_SHIFT                    13  /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_WIDTH                     1  /* IN1L_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -2560,6 +2637,12 @@
 /*
  * R789 (0x315) - ADC Digital Volume 1R
  */
+#define ARIZONA_IN1R_SRC_MASK                    0x4000  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SHIFT                       14  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_WIDTH                        1  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SE_MASK                 0x2000  /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_SHIFT                    13  /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_WIDTH                     1  /* IN1R_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -2604,6 +2687,12 @@
 /*
  * R793 (0x319) - ADC Digital Volume 2L
  */
+#define ARIZONA_IN2L_SRC_MASK                    0x4000  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SHIFT                       14  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_WIDTH                        1  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SE_MASK                 0x2000  /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_SHIFT                    13  /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_WIDTH                     1  /* IN2L_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -3412,11 +3501,45 @@
 #define ARIZONA_DRE1L_ENA_WIDTH                       1  /* DRE1L_ENA */
 
 /*
+ * R1088 (0x440) - DRE Enable (WM8998)
+ */
+#define WM8998_DRE3L_ENA                          0x0020  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_MASK                     0x0020  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_SHIFT                         5  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_WIDTH                         1  /* DRE3L_ENA */
+#define WM8998_DRE2L_ENA                          0x0008  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_MASK                     0x0008  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_SHIFT                         3  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_WIDTH                         1  /* DRE2L_ENA */
+#define WM8998_DRE2R_ENA                          0x0004  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_MASK                     0x0004  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_SHIFT                         2  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_WIDTH                         1  /* DRE2R_ENA */
+#define WM8998_DRE1L_ENA                          0x0002  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_MASK                     0x0002  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_SHIFT                         1  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_WIDTH                         1  /* DRE1L_ENA */
+#define WM8998_DRE1R_ENA                          0x0001  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_MASK                     0x0001  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_SHIFT                         0  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_WIDTH                         1  /* DRE1R_ENA */
+
+/*
+ * R1089 (0x441) - DRE Control 1
+ */
+#define ARIZONA_DRE_ENV_TC_FAST_MASK             0x0F00  /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_SHIFT                 8  /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_WIDTH                 4  /* DRE_ENV_TC_FAST - [11:8] */
+
+/*
  * R1090 (0x442) - DRE Control 2
  */
 #define ARIZONA_DRE_T_LOW_MASK                   0x3F00  /* DRE_T_LOW - [13:8] */
 #define ARIZONA_DRE_T_LOW_SHIFT                       8  /* DRE_T_LOW - [13:8] */
 #define ARIZONA_DRE_T_LOW_WIDTH                       6  /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK          0x000F  /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT              0  /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH              4  /* DRE_ALOG_VOL_DELAY - [3:0] */
 
 /*
  * R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
 #define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT               0  /* LOW_LEVEL_ABS - [3:0] */
 #define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH               4  /* LOW_LEVEL_ABS - [3:0] */
 
+/* R486 (0x448) - EDRE_Enable
+ */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA              0x0200  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK         0x0200  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT             9  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH             1  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA              0x0100  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK         0x0100  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT             8  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH             1  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA              0x0080  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK         0x0080  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT             7  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH             1  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA              0x0040  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK         0x0040  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT             6  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH             1  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA              0x0020  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK         0x0020  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT             5  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH             1  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA              0x0010  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK         0x0010  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT             4  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH             1  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA              0x0008  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK         0x0008  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT             3  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH             1  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA              0x0004  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK         0x0004  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT             2  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH             1  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA              0x0002  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK         0x0002  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT             1  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH             1  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA              0x0001  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK         0x0001  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT             0  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH             1  /* EDRE_OUT1R_THR1_ENA */
+
 /*
  * R1104 (0x450) - DAC AEC Control 1
  */
@@ -4308,6 +4474,86 @@
 #define ARIZONA_AIF3_FRC_WR_WIDTH                     1  /* AIF3_FRC_WR */
 
 /*
+ * R1474 (0x5C2) - SPD1 TX Control
+ */
+#define ARIZONA_SPD1_VAL2                        0x2000  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_MASK                   0x2000  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_SHIFT                      13  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_WIDTH                       1  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL1                        0x1000  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_MASK                   0x1000  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_SHIFT                      12  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_WIDTH                       1  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_RATE_MASK                   0x00F0  /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_SHIFT                       4  /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_WIDTH                       4  /* SPD1_RATE */
+#define ARIZONA_SPD1_ENA                         0x0001  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_MASK                    0x0001  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_SHIFT                        0  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_WIDTH                        1  /* SPD1_ENA */
+
+/*
+ * R1475 (0x5C3) - SPD1 TX Channel Status 1
+ */
+#define ARIZONA_SPD1_CATCODE_MASK                0xFF00  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_SHIFT                    8  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_WIDTH                    8  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CHSTMODE_MASK               0x00C0  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_SHIFT                   6  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_WIDTH                   2  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_PREEMPH_MASK                0x0038  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_SHIFT                    3  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_WIDTH                    3  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_NOCOPY                      0x0004  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_MASK                 0x0004  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_SHIFT                     2  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_WIDTH                     1  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOAUDIO                     0x0002  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_MASK                0x0002  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_SHIFT                    1  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_WIDTH                    1  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_PRO                         0x0001  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_MASK                    0x0001  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_SHIFT                        0  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_WIDTH                        1  /* SPD1_PRO */
+
+/*
+ * R1475 (0x5C4) - SPD1 TX Channel Status 2
+ */
+#define ARIZONA_SPD1_FREQ_MASK                   0xF000  /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_SHIFT                      12  /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_WIDTH                       4  /* SPD1_FREQ */
+#define ARIZONA_SPD1_CHNUM2_MASK                 0x0F00  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_SHIFT                     8  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_WIDTH                     4  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM1_MASK                 0x00F0  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_SHIFT                     4  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_WIDTH                     4  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_SRCNUM_MASK                 0x000F  /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_SHIFT                     0  /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_WIDTH                     4  /* SPD1_SRCNUM */
+
+/*
+ * R1475 (0x5C5) - SPD1 TX Channel Status 3
+ */
+#define ARIZONA_SPD1_ORGSAMP_MASK                 0x0F00  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_SHIFT                     8  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_WIDTH                     4  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_TXWL_MASK                    0x00E0  /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_SHIFT                        5  /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_WIDTH                        3  /* SPD1_TXWL */
+#define ARIZONA_SPD1_MAXWL                        0x0010  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_MASK                   0x0010  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_SHIFT                       4  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_WIDTH                       1  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_CS31_30_MASK                 0x000C  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_SHIFT                     2  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_WIDTH                     2  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CLKACU_MASK                  0x0003  /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_SHIFT                      2  /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_WIDTH                      0  /* SPD1_CLKACU */
+
+/*
  * R1507 (0x5E3) - SLIMbus Framer Ref Gear
  */
 #define ARIZONA_SLIMCLK_SRC                      0x0010  /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
 #define ARIZONA_GP_DBTIME_WIDTH                       4  /* GP_DBTIME - [15:12] */
 
 /*
+ * R3096 (0xC18) - GP Switch 1
+ */
+#define ARIZONA_SW1_MODE_MASK                    0x0003  /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_SHIFT                        0  /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_WIDTH                        2  /* SW1_MODE - [1:0] */
+
+/*
  * R3104 (0xC20) - Misc Pad Ctrl 1
  */
 #define ARIZONA_LDO1ENA_PD                       0x8000  /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
 /*
  * R3366 (0xD26) - Interrupt Raw Status 8
  */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS            0x8000  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK       0x8000  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT          15  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH           1  /* SPDIF_OVERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS            0x0400  /* AIF3_UNDERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK       0x0400  /* AIF3_UNDERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT          10  /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853..cc8ad1e 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
 #define __LINUX_MFD_AXP20X_H
 
 enum {
-	AXP202_ID = 0,
+	AXP152_ID = 0,
+	AXP202_ID,
 	AXP209_ID,
 	AXP221_ID,
 	AXP288_ID,
@@ -22,6 +23,24 @@
 #define AXP20X_DATACACHE(m)		(0x04 + (m))
 
 /* Power supply */
+#define AXP152_PWR_OP_MODE		0x01
+#define AXP152_LDO3456_DC1234_CTRL	0x12
+#define AXP152_ALDO_OP_MODE		0x13
+#define AXP152_LDO0_CTRL		0x15
+#define AXP152_DCDC2_V_OUT		0x23
+#define AXP152_DCDC2_V_SCAL		0x25
+#define AXP152_DCDC1_V_OUT		0x26
+#define AXP152_DCDC3_V_OUT		0x27
+#define AXP152_ALDO12_V_OUT		0x28
+#define AXP152_DLDO1_V_OUT		0x29
+#define AXP152_DLDO2_V_OUT		0x2a
+#define AXP152_DCDC4_V_OUT		0x2b
+#define AXP152_V_OFF			0x31
+#define AXP152_OFF_CTRL			0x32
+#define AXP152_PEK_KEY			0x36
+#define AXP152_DCDC_FREQ		0x37
+#define AXP152_DCDC_MODE		0x80
+
 #define AXP20X_PWR_INPUT_STATUS		0x00
 #define AXP20X_PWR_OP_MODE		0x01
 #define AXP20X_USB_OTG_STATUS		0x02
@@ -69,6 +88,13 @@
 #define AXP22X_CHRG_CTRL3		0x35
 
 /* Interrupt */
+#define AXP152_IRQ1_EN			0x40
+#define AXP152_IRQ2_EN			0x41
+#define AXP152_IRQ3_EN			0x42
+#define AXP152_IRQ1_STATE		0x48
+#define AXP152_IRQ2_STATE		0x49
+#define AXP152_IRQ3_STATE		0x4a
+
 #define AXP20X_IRQ1_EN			0x40
 #define AXP20X_IRQ2_EN			0x41
 #define AXP20X_IRQ3_EN			0x42
@@ -127,6 +153,19 @@
 #define AXP22X_PWREN_CTRL2		0x8d
 
 /* GPIO */
+#define AXP152_GPIO0_CTRL		0x90
+#define AXP152_GPIO1_CTRL		0x91
+#define AXP152_GPIO2_CTRL		0x92
+#define AXP152_GPIO3_CTRL		0x93
+#define AXP152_LDOGPIO2_V_OUT		0x96
+#define AXP152_GPIO_INPUT		0x97
+#define AXP152_PWM0_FREQ_X		0x98
+#define AXP152_PWM0_FREQ_Y		0x99
+#define AXP152_PWM0_DUTY_CYCLE		0x9a
+#define AXP152_PWM1_FREQ_X		0x9b
+#define AXP152_PWM1_FREQ_Y		0x9c
+#define AXP152_PWM1_DUTY_CYCLE		0x9d
+
 #define AXP20X_GPIO0_CTRL		0x90
 #define AXP20X_LDO5_V_OUT		0x91
 #define AXP20X_GPIO1_CTRL		0x92
@@ -151,6 +190,12 @@
 #define AXP20X_CC_CTRL			0xb8
 #define AXP20X_FG_RES			0xb9
 
+/* OCV */
+#define AXP20X_RDC_H			0xba
+#define AXP20X_RDC_L			0xbb
+#define AXP20X_OCV(m)			(0xc0 + (m))
+#define AXP20X_OCV_MAX			0xf
+
 /* AXP22X specific registers */
 #define AXP22X_BATLOW_THRES1		0xe6
 
@@ -218,6 +263,26 @@
 
 /* IRQs */
 enum {
+	AXP152_IRQ_LDO0IN_CONNECT = 1,
+	AXP152_IRQ_LDO0IN_REMOVAL,
+	AXP152_IRQ_ALDO0IN_CONNECT,
+	AXP152_IRQ_ALDO0IN_REMOVAL,
+	AXP152_IRQ_DCDC1_V_LOW,
+	AXP152_IRQ_DCDC2_V_LOW,
+	AXP152_IRQ_DCDC3_V_LOW,
+	AXP152_IRQ_DCDC4_V_LOW,
+	AXP152_IRQ_PEK_SHORT,
+	AXP152_IRQ_PEK_LONG,
+	AXP152_IRQ_TIMER,
+	AXP152_IRQ_PEK_RIS_EDGE,
+	AXP152_IRQ_PEK_FAL_EDGE,
+	AXP152_IRQ_GPIO3_INPUT,
+	AXP152_IRQ_GPIO2_INPUT,
+	AXP152_IRQ_GPIO1_INPUT,
+	AXP152_IRQ_GPIO0_INPUT,
+};
+
+enum {
 	AXP20X_IRQ_ACIN_OVER_V = 1,
 	AXP20X_IRQ_ACIN_PLUGIN,
 	AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 0000000..376ba84
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_DA9062_CORE_H__
+#define __MFD_DA9062_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9062/registers.h>
+
+/* Interrupts */
+enum da9062_irqs {
+	/* IRQ A */
+	DA9062_IRQ_ONKEY,
+	DA9062_IRQ_ALARM,
+	DA9062_IRQ_TICK,
+	DA9062_IRQ_WDG_WARN,
+	DA9062_IRQ_SEQ_RDY,
+	/* IRQ B*/
+	DA9062_IRQ_TEMP,
+	DA9062_IRQ_LDO_LIM,
+	DA9062_IRQ_DVC_RDY,
+	DA9062_IRQ_VDD_WARN,
+	/* IRQ C */
+	DA9062_IRQ_GPI0,
+	DA9062_IRQ_GPI1,
+	DA9062_IRQ_GPI2,
+	DA9062_IRQ_GPI3,
+	DA9062_IRQ_GPI4,
+
+	DA9062_NUM_IRQ,
+};
+
+struct da9062 {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regmap_irq_chip_data *regmap_irq;
+};
+
+#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 0000000..97790d1
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
+/*
+ * registers.h - REGISTERS H for DA9062
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DA9062_H__
+#define __DA9062_H__
+
+#define DA9062_PMIC_DEVICE_ID		0x62
+#define DA9062_PMIC_VARIANT_MRC_AA	0x01
+
+#define DA9062_I2C_PAGE_SEL_SHIFT	1
+
+/*
+ * Registers
+ */
+
+#define DA9062AA_PAGE_CON		0x000
+#define DA9062AA_STATUS_A		0x001
+#define DA9062AA_STATUS_B		0x002
+#define DA9062AA_STATUS_D		0x004
+#define DA9062AA_FAULT_LOG		0x005
+#define DA9062AA_EVENT_A		0x006
+#define DA9062AA_EVENT_B		0x007
+#define DA9062AA_EVENT_C		0x008
+#define DA9062AA_IRQ_MASK_A		0x00A
+#define DA9062AA_IRQ_MASK_B		0x00B
+#define DA9062AA_IRQ_MASK_C		0x00C
+#define DA9062AA_CONTROL_A		0x00E
+#define DA9062AA_CONTROL_B		0x00F
+#define DA9062AA_CONTROL_C		0x010
+#define DA9062AA_CONTROL_D		0x011
+#define DA9062AA_CONTROL_E		0x012
+#define DA9062AA_CONTROL_F		0x013
+#define DA9062AA_PD_DIS			0x014
+#define DA9062AA_GPIO_0_1		0x015
+#define DA9062AA_GPIO_2_3		0x016
+#define DA9062AA_GPIO_4			0x017
+#define DA9062AA_GPIO_WKUP_MODE		0x01C
+#define DA9062AA_GPIO_MODE0_4		0x01D
+#define DA9062AA_GPIO_OUT0_2		0x01E
+#define DA9062AA_GPIO_OUT3_4		0x01F
+#define DA9062AA_BUCK2_CONT		0x020
+#define DA9062AA_BUCK1_CONT		0x021
+#define DA9062AA_BUCK4_CONT		0x022
+#define DA9062AA_BUCK3_CONT		0x024
+#define DA9062AA_LDO1_CONT		0x026
+#define DA9062AA_LDO2_CONT		0x027
+#define DA9062AA_LDO3_CONT		0x028
+#define DA9062AA_LDO4_CONT		0x029
+#define DA9062AA_DVC_1			0x032
+#define DA9062AA_COUNT_S		0x040
+#define DA9062AA_COUNT_MI		0x041
+#define DA9062AA_COUNT_H		0x042
+#define DA9062AA_COUNT_D		0x043
+#define DA9062AA_COUNT_MO		0x044
+#define DA9062AA_COUNT_Y		0x045
+#define DA9062AA_ALARM_S		0x046
+#define DA9062AA_ALARM_MI		0x047
+#define DA9062AA_ALARM_H		0x048
+#define DA9062AA_ALARM_D		0x049
+#define DA9062AA_ALARM_MO		0x04A
+#define DA9062AA_ALARM_Y		0x04B
+#define DA9062AA_SECOND_A		0x04C
+#define DA9062AA_SECOND_B		0x04D
+#define DA9062AA_SECOND_C		0x04E
+#define DA9062AA_SECOND_D		0x04F
+#define DA9062AA_SEQ			0x081
+#define DA9062AA_SEQ_TIMER		0x082
+#define DA9062AA_ID_2_1			0x083
+#define DA9062AA_ID_4_3			0x084
+#define DA9062AA_ID_12_11		0x088
+#define DA9062AA_ID_14_13		0x089
+#define DA9062AA_ID_16_15		0x08A
+#define DA9062AA_ID_22_21		0x08D
+#define DA9062AA_ID_24_23		0x08E
+#define DA9062AA_ID_26_25		0x08F
+#define DA9062AA_ID_28_27		0x090
+#define DA9062AA_ID_30_29		0x091
+#define DA9062AA_ID_32_31		0x092
+#define DA9062AA_SEQ_A			0x095
+#define DA9062AA_SEQ_B			0x096
+#define DA9062AA_WAIT			0x097
+#define DA9062AA_EN_32K			0x098
+#define DA9062AA_RESET			0x099
+#define DA9062AA_BUCK_ILIM_A		0x09A
+#define DA9062AA_BUCK_ILIM_B		0x09B
+#define DA9062AA_BUCK_ILIM_C		0x09C
+#define DA9062AA_BUCK2_CFG		0x09D
+#define DA9062AA_BUCK1_CFG		0x09E
+#define DA9062AA_BUCK4_CFG		0x09F
+#define DA9062AA_BUCK3_CFG		0x0A0
+#define DA9062AA_VBUCK2_A		0x0A3
+#define DA9062AA_VBUCK1_A		0x0A4
+#define DA9062AA_VBUCK4_A		0x0A5
+#define DA9062AA_VBUCK3_A		0x0A7
+#define DA9062AA_VLDO1_A		0x0A9
+#define DA9062AA_VLDO2_A		0x0AA
+#define DA9062AA_VLDO3_A		0x0AB
+#define DA9062AA_VLDO4_A		0x0AC
+#define DA9062AA_VBUCK2_B		0x0B4
+#define DA9062AA_VBUCK1_B		0x0B5
+#define DA9062AA_VBUCK4_B		0x0B6
+#define DA9062AA_VBUCK3_B		0x0B8
+#define DA9062AA_VLDO1_B		0x0BA
+#define DA9062AA_VLDO2_B		0x0BB
+#define DA9062AA_VLDO3_B		0x0BC
+#define DA9062AA_VLDO4_B		0x0BD
+#define DA9062AA_BBAT_CONT		0x0C5
+#define DA9062AA_INTERFACE		0x105
+#define DA9062AA_CONFIG_A		0x106
+#define DA9062AA_CONFIG_B		0x107
+#define DA9062AA_CONFIG_C		0x108
+#define DA9062AA_CONFIG_D		0x109
+#define DA9062AA_CONFIG_E		0x10A
+#define DA9062AA_CONFIG_G		0x10C
+#define DA9062AA_CONFIG_H		0x10D
+#define DA9062AA_CONFIG_I		0x10E
+#define DA9062AA_CONFIG_J		0x10F
+#define DA9062AA_CONFIG_K		0x110
+#define DA9062AA_CONFIG_M		0x112
+#define DA9062AA_TRIM_CLDR		0x120
+#define DA9062AA_GP_ID_0		0x121
+#define DA9062AA_GP_ID_1		0x122
+#define DA9062AA_GP_ID_2		0x123
+#define DA9062AA_GP_ID_3		0x124
+#define DA9062AA_GP_ID_4		0x125
+#define DA9062AA_GP_ID_5		0x126
+#define DA9062AA_GP_ID_6		0x127
+#define DA9062AA_GP_ID_7		0x128
+#define DA9062AA_GP_ID_8		0x129
+#define DA9062AA_GP_ID_9		0x12A
+#define DA9062AA_GP_ID_10		0x12B
+#define DA9062AA_GP_ID_11		0x12C
+#define DA9062AA_GP_ID_12		0x12D
+#define DA9062AA_GP_ID_13		0x12E
+#define DA9062AA_GP_ID_14		0x12F
+#define DA9062AA_GP_ID_15		0x130
+#define DA9062AA_GP_ID_16		0x131
+#define DA9062AA_GP_ID_17		0x132
+#define DA9062AA_GP_ID_18		0x133
+#define DA9062AA_GP_ID_19		0x134
+#define DA9062AA_DEVICE_ID		0x181
+#define DA9062AA_VARIANT_ID		0x182
+#define DA9062AA_CUSTOMER_ID		0x183
+#define DA9062AA_CONFIG_ID		0x184
+
+/*
+ * Bit fields
+ */
+
+/* DA9062AA_PAGE_CON = 0x000 */
+#define DA9062AA_PAGE_SHIFT		0
+#define DA9062AA_PAGE_MASK		0x3f
+#define DA9062AA_WRITE_MODE_SHIFT	6
+#define DA9062AA_WRITE_MODE_MASK	BIT(6)
+#define DA9062AA_REVERT_SHIFT		7
+#define DA9062AA_REVERT_MASK		BIT(7)
+
+/* DA9062AA_STATUS_A = 0x001 */
+#define DA9062AA_NONKEY_SHIFT		0
+#define DA9062AA_NONKEY_MASK		0x01
+#define DA9062AA_DVC_BUSY_SHIFT		2
+#define DA9062AA_DVC_BUSY_MASK		BIT(2)
+
+/* DA9062AA_STATUS_B = 0x002 */
+#define DA9062AA_GPI0_SHIFT		0
+#define DA9062AA_GPI0_MASK		0x01
+#define DA9062AA_GPI1_SHIFT		1
+#define DA9062AA_GPI1_MASK		BIT(1)
+#define DA9062AA_GPI2_SHIFT		2
+#define DA9062AA_GPI2_MASK		BIT(2)
+#define DA9062AA_GPI3_SHIFT		3
+#define DA9062AA_GPI3_MASK		BIT(3)
+#define DA9062AA_GPI4_SHIFT		4
+#define DA9062AA_GPI4_MASK		BIT(4)
+
+/* DA9062AA_STATUS_D = 0x004 */
+#define DA9062AA_LDO1_ILIM_SHIFT	0
+#define DA9062AA_LDO1_ILIM_MASK		0x01
+#define DA9062AA_LDO2_ILIM_SHIFT	1
+#define DA9062AA_LDO2_ILIM_MASK		BIT(1)
+#define DA9062AA_LDO3_ILIM_SHIFT	2
+#define DA9062AA_LDO3_ILIM_MASK		BIT(2)
+#define DA9062AA_LDO4_ILIM_SHIFT	3
+#define DA9062AA_LDO4_ILIM_MASK		BIT(3)
+
+/* DA9062AA_FAULT_LOG = 0x005 */
+#define DA9062AA_TWD_ERROR_SHIFT	0
+#define DA9062AA_TWD_ERROR_MASK		0x01
+#define DA9062AA_POR_SHIFT		1
+#define DA9062AA_POR_MASK		BIT(1)
+#define DA9062AA_VDD_FAULT_SHIFT	2
+#define DA9062AA_VDD_FAULT_MASK		BIT(2)
+#define DA9062AA_VDD_START_SHIFT	3
+#define DA9062AA_VDD_START_MASK		BIT(3)
+#define DA9062AA_TEMP_CRIT_SHIFT	4
+#define DA9062AA_TEMP_CRIT_MASK		BIT(4)
+#define DA9062AA_KEY_RESET_SHIFT	5
+#define DA9062AA_KEY_RESET_MASK		BIT(5)
+#define DA9062AA_NSHUTDOWN_SHIFT	6
+#define DA9062AA_NSHUTDOWN_MASK		BIT(6)
+#define DA9062AA_WAIT_SHUT_SHIFT	7
+#define DA9062AA_WAIT_SHUT_MASK		BIT(7)
+
+/* DA9062AA_EVENT_A = 0x006 */
+#define DA9062AA_E_NONKEY_SHIFT		0
+#define DA9062AA_E_NONKEY_MASK		0x01
+#define DA9062AA_E_ALARM_SHIFT		1
+#define DA9062AA_E_ALARM_MASK		BIT(1)
+#define DA9062AA_E_TICK_SHIFT		2
+#define DA9062AA_E_TICK_MASK		BIT(2)
+#define DA9062AA_E_WDG_WARN_SHIFT	3
+#define DA9062AA_E_WDG_WARN_MASK	BIT(3)
+#define DA9062AA_E_SEQ_RDY_SHIFT	4
+#define DA9062AA_E_SEQ_RDY_MASK		BIT(4)
+#define DA9062AA_EVENTS_B_SHIFT		5
+#define DA9062AA_EVENTS_B_MASK		BIT(5)
+#define DA9062AA_EVENTS_C_SHIFT		6
+#define DA9062AA_EVENTS_C_MASK		BIT(6)
+
+/* DA9062AA_EVENT_B = 0x007 */
+#define DA9062AA_E_TEMP_SHIFT		1
+#define DA9062AA_E_TEMP_MASK		BIT(1)
+#define DA9062AA_E_LDO_LIM_SHIFT	3
+#define DA9062AA_E_LDO_LIM_MASK		BIT(3)
+#define DA9062AA_E_DVC_RDY_SHIFT	5
+#define DA9062AA_E_DVC_RDY_MASK		BIT(5)
+#define DA9062AA_E_VDD_WARN_SHIFT	7
+#define DA9062AA_E_VDD_WARN_MASK	BIT(7)
+
+/* DA9062AA_EVENT_C = 0x008 */
+#define DA9062AA_E_GPI0_SHIFT		0
+#define DA9062AA_E_GPI0_MASK		0x01
+#define DA9062AA_E_GPI1_SHIFT		1
+#define DA9062AA_E_GPI1_MASK		BIT(1)
+#define DA9062AA_E_GPI2_SHIFT		2
+#define DA9062AA_E_GPI2_MASK		BIT(2)
+#define DA9062AA_E_GPI3_SHIFT		3
+#define DA9062AA_E_GPI3_MASK		BIT(3)
+#define DA9062AA_E_GPI4_SHIFT		4
+#define DA9062AA_E_GPI4_MASK		BIT(4)
+
+/* DA9062AA_IRQ_MASK_A = 0x00A */
+#define DA9062AA_M_NONKEY_SHIFT		0
+#define DA9062AA_M_NONKEY_MASK		0x01
+#define DA9062AA_M_ALARM_SHIFT		1
+#define DA9062AA_M_ALARM_MASK		BIT(1)
+#define DA9062AA_M_TICK_SHIFT		2
+#define DA9062AA_M_TICK_MASK		BIT(2)
+#define DA9062AA_M_WDG_WARN_SHIFT	3
+#define DA9062AA_M_WDG_WARN_MASK	BIT(3)
+#define DA9062AA_M_SEQ_RDY_SHIFT	4
+#define DA9062AA_M_SEQ_RDY_MASK		BIT(4)
+
+/* DA9062AA_IRQ_MASK_B = 0x00B */
+#define DA9062AA_M_TEMP_SHIFT		1
+#define DA9062AA_M_TEMP_MASK		BIT(1)
+#define DA9062AA_M_LDO_LIM_SHIFT	3
+#define DA9062AA_M_LDO_LIM_MASK		BIT(3)
+#define DA9062AA_M_DVC_RDY_SHIFT	5
+#define DA9062AA_M_DVC_RDY_MASK		BIT(5)
+#define DA9062AA_M_VDD_WARN_SHIFT	7
+#define DA9062AA_M_VDD_WARN_MASK	BIT(7)
+
+/* DA9062AA_IRQ_MASK_C = 0x00C */
+#define DA9062AA_M_GPI0_SHIFT		0
+#define DA9062AA_M_GPI0_MASK		0x01
+#define DA9062AA_M_GPI1_SHIFT		1
+#define DA9062AA_M_GPI1_MASK		BIT(1)
+#define DA9062AA_M_GPI2_SHIFT		2
+#define DA9062AA_M_GPI2_MASK		BIT(2)
+#define DA9062AA_M_GPI3_SHIFT		3
+#define DA9062AA_M_GPI3_MASK		BIT(3)
+#define DA9062AA_M_GPI4_SHIFT		4
+#define DA9062AA_M_GPI4_MASK		BIT(4)
+
+/* DA9062AA_CONTROL_A = 0x00E */
+#define DA9062AA_SYSTEM_EN_SHIFT	0
+#define DA9062AA_SYSTEM_EN_MASK		0x01
+#define DA9062AA_POWER_EN_SHIFT		1
+#define DA9062AA_POWER_EN_MASK		BIT(1)
+#define DA9062AA_POWER1_EN_SHIFT	2
+#define DA9062AA_POWER1_EN_MASK		BIT(2)
+#define DA9062AA_STANDBY_SHIFT		3
+#define DA9062AA_STANDBY_MASK		BIT(3)
+#define DA9062AA_M_SYSTEM_EN_SHIFT	4
+#define DA9062AA_M_SYSTEM_EN_MASK	BIT(4)
+#define DA9062AA_M_POWER_EN_SHIFT	5
+#define DA9062AA_M_POWER_EN_MASK	BIT(5)
+#define DA9062AA_M_POWER1_EN_SHIFT	6
+#define DA9062AA_M_POWER1_EN_MASK	BIT(6)
+
+/* DA9062AA_CONTROL_B = 0x00F */
+#define DA9062AA_WATCHDOG_PD_SHIFT	1
+#define DA9062AA_WATCHDOG_PD_MASK	BIT(1)
+#define DA9062AA_FREEZE_EN_SHIFT	2
+#define DA9062AA_FREEZE_EN_MASK		BIT(2)
+#define DA9062AA_NRES_MODE_SHIFT	3
+#define DA9062AA_NRES_MODE_MASK		BIT(3)
+#define DA9062AA_NONKEY_LOCK_SHIFT	4
+#define DA9062AA_NONKEY_LOCK_MASK	BIT(4)
+#define DA9062AA_NFREEZE_SHIFT		5
+#define DA9062AA_NFREEZE_MASK		(0x03 << 5)
+#define DA9062AA_BUCK_SLOWSTART_SHIFT	7
+#define DA9062AA_BUCK_SLOWSTART_MASK	BIT(7)
+
+/* DA9062AA_CONTROL_C = 0x010 */
+#define DA9062AA_DEBOUNCING_SHIFT	0
+#define DA9062AA_DEBOUNCING_MASK	0x07
+#define DA9062AA_AUTO_BOOT_SHIFT	3
+#define DA9062AA_AUTO_BOOT_MASK		BIT(3)
+#define DA9062AA_OTPREAD_EN_SHIFT	4
+#define DA9062AA_OTPREAD_EN_MASK	BIT(4)
+#define DA9062AA_SLEW_RATE_SHIFT	5
+#define DA9062AA_SLEW_RATE_MASK		(0x03 << 5)
+#define DA9062AA_DEF_SUPPLY_SHIFT	7
+#define DA9062AA_DEF_SUPPLY_MASK	BIT(7)
+
+/* DA9062AA_CONTROL_D = 0x011 */
+#define DA9062AA_TWDSCALE_SHIFT		0
+#define DA9062AA_TWDSCALE_MASK		0x07
+
+/* DA9062AA_CONTROL_E = 0x012 */
+#define DA9062AA_RTC_MODE_PD_SHIFT	0
+#define DA9062AA_RTC_MODE_PD_MASK	0x01
+#define DA9062AA_RTC_MODE_SD_SHIFT	1
+#define DA9062AA_RTC_MODE_SD_MASK	BIT(1)
+#define DA9062AA_RTC_EN_SHIFT		2
+#define DA9062AA_RTC_EN_MASK		BIT(2)
+#define DA9062AA_V_LOCK_SHIFT		7
+#define DA9062AA_V_LOCK_MASK		BIT(7)
+
+/* DA9062AA_CONTROL_F = 0x013 */
+#define DA9062AA_WATCHDOG_SHIFT		0
+#define DA9062AA_WATCHDOG_MASK		0x01
+#define DA9062AA_SHUTDOWN_SHIFT		1
+#define DA9062AA_SHUTDOWN_MASK		BIT(1)
+#define DA9062AA_WAKE_UP_SHIFT		2
+#define DA9062AA_WAKE_UP_MASK		BIT(2)
+
+/* DA9062AA_PD_DIS = 0x014 */
+#define DA9062AA_GPI_DIS_SHIFT		0
+#define DA9062AA_GPI_DIS_MASK		0x01
+#define DA9062AA_PMIF_DIS_SHIFT		2
+#define DA9062AA_PMIF_DIS_MASK		BIT(2)
+#define DA9062AA_CLDR_PAUSE_SHIFT	4
+#define DA9062AA_CLDR_PAUSE_MASK	BIT(4)
+#define DA9062AA_BBAT_DIS_SHIFT		5
+#define DA9062AA_BBAT_DIS_MASK		BIT(5)
+#define DA9062AA_OUT32K_PAUSE_SHIFT	6
+#define DA9062AA_OUT32K_PAUSE_MASK	BIT(6)
+#define DA9062AA_PMCONT_DIS_SHIFT	7
+#define DA9062AA_PMCONT_DIS_MASK	BIT(7)
+
+/* DA9062AA_GPIO_0_1 = 0x015 */
+#define DA9062AA_GPIO0_PIN_SHIFT	0
+#define DA9062AA_GPIO0_PIN_MASK		0x03
+#define DA9062AA_GPIO0_TYPE_SHIFT	2
+#define DA9062AA_GPIO0_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO0_WEN_SHIFT	3
+#define DA9062AA_GPIO0_WEN_MASK		BIT(3)
+#define DA9062AA_GPIO1_PIN_SHIFT	4
+#define DA9062AA_GPIO1_PIN_MASK		(0x03 << 4)
+#define DA9062AA_GPIO1_TYPE_SHIFT	6
+#define DA9062AA_GPIO1_TYPE_MASK	BIT(6)
+#define DA9062AA_GPIO1_WEN_SHIFT	7
+#define DA9062AA_GPIO1_WEN_MASK		BIT(7)
+
+/* DA9062AA_GPIO_2_3 = 0x016 */
+#define DA9062AA_GPIO2_PIN_SHIFT	0
+#define DA9062AA_GPIO2_PIN_MASK		0x03
+#define DA9062AA_GPIO2_TYPE_SHIFT	2
+#define DA9062AA_GPIO2_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO2_WEN_SHIFT	3
+#define DA9062AA_GPIO2_WEN_MASK		BIT(3)
+#define DA9062AA_GPIO3_PIN_SHIFT	4
+#define DA9062AA_GPIO3_PIN_MASK		(0x03 << 4)
+#define DA9062AA_GPIO3_TYPE_SHIFT	6
+#define DA9062AA_GPIO3_TYPE_MASK	BIT(6)
+#define DA9062AA_GPIO3_WEN_SHIFT	7
+#define DA9062AA_GPIO3_WEN_MASK		BIT(7)
+
+/* DA9062AA_GPIO_4 = 0x017 */
+#define DA9062AA_GPIO4_PIN_SHIFT	0
+#define DA9062AA_GPIO4_PIN_MASK		0x03
+#define DA9062AA_GPIO4_TYPE_SHIFT	2
+#define DA9062AA_GPIO4_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO4_WEN_SHIFT	3
+#define DA9062AA_GPIO4_WEN_MASK		BIT(3)
+
+/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
+#define DA9062AA_GPIO0_WKUP_MODE_SHIFT	0
+#define DA9062AA_GPIO0_WKUP_MODE_MASK	0x01
+#define DA9062AA_GPIO1_WKUP_MODE_SHIFT	1
+#define DA9062AA_GPIO1_WKUP_MODE_MASK	BIT(1)
+#define DA9062AA_GPIO2_WKUP_MODE_SHIFT	2
+#define DA9062AA_GPIO2_WKUP_MODE_MASK	BIT(2)
+#define DA9062AA_GPIO3_WKUP_MODE_SHIFT	3
+#define DA9062AA_GPIO3_WKUP_MODE_MASK	BIT(3)
+#define DA9062AA_GPIO4_WKUP_MODE_SHIFT	4
+#define DA9062AA_GPIO4_WKUP_MODE_MASK	BIT(4)
+
+/* DA9062AA_GPIO_MODE0_4 = 0x01D */
+#define DA9062AA_GPIO0_MODE_SHIFT	0
+#define DA9062AA_GPIO0_MODE_MASK	0x01
+#define DA9062AA_GPIO1_MODE_SHIFT	1
+#define DA9062AA_GPIO1_MODE_MASK	BIT(1)
+#define DA9062AA_GPIO2_MODE_SHIFT	2
+#define DA9062AA_GPIO2_MODE_MASK	BIT(2)
+#define DA9062AA_GPIO3_MODE_SHIFT	3
+#define DA9062AA_GPIO3_MODE_MASK	BIT(3)
+#define DA9062AA_GPIO4_MODE_SHIFT	4
+#define DA9062AA_GPIO4_MODE_MASK	BIT(4)
+
+/* DA9062AA_GPIO_OUT0_2 = 0x01E */
+#define DA9062AA_GPIO0_OUT_SHIFT	0
+#define DA9062AA_GPIO0_OUT_MASK		0x07
+#define DA9062AA_GPIO1_OUT_SHIFT	3
+#define DA9062AA_GPIO1_OUT_MASK		(0x07 << 3)
+#define DA9062AA_GPIO2_OUT_SHIFT	6
+#define DA9062AA_GPIO2_OUT_MASK		(0x03 << 6)
+
+/* DA9062AA_GPIO_OUT3_4 = 0x01F */
+#define DA9062AA_GPIO3_OUT_SHIFT	0
+#define DA9062AA_GPIO3_OUT_MASK		0x07
+#define DA9062AA_GPIO4_OUT_SHIFT	3
+#define DA9062AA_GPIO4_OUT_MASK		(0x03 << 3)
+
+/* DA9062AA_BUCK2_CONT = 0x020 */
+#define DA9062AA_BUCK2_EN_SHIFT		0
+#define DA9062AA_BUCK2_EN_MASK		0x01
+#define DA9062AA_BUCK2_GPI_SHIFT	1
+#define DA9062AA_BUCK2_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK2_CONF_SHIFT	3
+#define DA9062AA_BUCK2_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK2_GPI_SHIFT	5
+#define DA9062AA_VBUCK2_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK1_CONT = 0x021 */
+#define DA9062AA_BUCK1_EN_SHIFT		0
+#define DA9062AA_BUCK1_EN_MASK		0x01
+#define DA9062AA_BUCK1_GPI_SHIFT	1
+#define DA9062AA_BUCK1_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK1_CONF_SHIFT	3
+#define DA9062AA_BUCK1_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK1_GPI_SHIFT	5
+#define DA9062AA_VBUCK1_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK4_CONT = 0x022 */
+#define DA9062AA_BUCK4_EN_SHIFT		0
+#define DA9062AA_BUCK4_EN_MASK		0x01
+#define DA9062AA_BUCK4_GPI_SHIFT	1
+#define DA9062AA_BUCK4_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK4_CONF_SHIFT	3
+#define DA9062AA_BUCK4_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK4_GPI_SHIFT	5
+#define DA9062AA_VBUCK4_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK3_CONT = 0x024 */
+#define DA9062AA_BUCK3_EN_SHIFT		0
+#define DA9062AA_BUCK3_EN_MASK		0x01
+#define DA9062AA_BUCK3_GPI_SHIFT	1
+#define DA9062AA_BUCK3_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK3_CONF_SHIFT	3
+#define DA9062AA_BUCK3_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK3_GPI_SHIFT	5
+#define DA9062AA_VBUCK3_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_LDO1_CONT = 0x026 */
+#define DA9062AA_LDO1_EN_SHIFT		0
+#define DA9062AA_LDO1_EN_MASK		0x01
+#define DA9062AA_LDO1_GPI_SHIFT		1
+#define DA9062AA_LDO1_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO1_PD_DIS_SHIFT	3
+#define DA9062AA_LDO1_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO1_GPI_SHIFT	5
+#define DA9062AA_VLDO1_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO1_CONF_SHIFT	7
+#define DA9062AA_LDO1_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO2_CONT = 0x027 */
+#define DA9062AA_LDO2_EN_SHIFT		0
+#define DA9062AA_LDO2_EN_MASK		0x01
+#define DA9062AA_LDO2_GPI_SHIFT		1
+#define DA9062AA_LDO2_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO2_PD_DIS_SHIFT	3
+#define DA9062AA_LDO2_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO2_GPI_SHIFT	5
+#define DA9062AA_VLDO2_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO2_CONF_SHIFT	7
+#define DA9062AA_LDO2_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO3_CONT = 0x028 */
+#define DA9062AA_LDO3_EN_SHIFT		0
+#define DA9062AA_LDO3_EN_MASK		0x01
+#define DA9062AA_LDO3_GPI_SHIFT		1
+#define DA9062AA_LDO3_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO3_PD_DIS_SHIFT	3
+#define DA9062AA_LDO3_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO3_GPI_SHIFT	5
+#define DA9062AA_VLDO3_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO3_CONF_SHIFT	7
+#define DA9062AA_LDO3_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO4_CONT = 0x029 */
+#define DA9062AA_LDO4_EN_SHIFT		0
+#define DA9062AA_LDO4_EN_MASK		0x01
+#define DA9062AA_LDO4_GPI_SHIFT		1
+#define DA9062AA_LDO4_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO4_PD_DIS_SHIFT	3
+#define DA9062AA_LDO4_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO4_GPI_SHIFT	5
+#define DA9062AA_VLDO4_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO4_CONF_SHIFT	7
+#define DA9062AA_LDO4_CONF_MASK		BIT(7)
+
+/* DA9062AA_DVC_1 = 0x032 */
+#define DA9062AA_VBUCK1_SEL_SHIFT	0
+#define DA9062AA_VBUCK1_SEL_MASK	0x01
+#define DA9062AA_VBUCK2_SEL_SHIFT	1
+#define DA9062AA_VBUCK2_SEL_MASK	BIT(1)
+#define DA9062AA_VBUCK4_SEL_SHIFT	2
+#define DA9062AA_VBUCK4_SEL_MASK	BIT(2)
+#define DA9062AA_VBUCK3_SEL_SHIFT	3
+#define DA9062AA_VBUCK3_SEL_MASK	BIT(3)
+#define DA9062AA_VLDO1_SEL_SHIFT	4
+#define DA9062AA_VLDO1_SEL_MASK		BIT(4)
+#define DA9062AA_VLDO2_SEL_SHIFT	5
+#define DA9062AA_VLDO2_SEL_MASK		BIT(5)
+#define DA9062AA_VLDO3_SEL_SHIFT	6
+#define DA9062AA_VLDO3_SEL_MASK		BIT(6)
+#define DA9062AA_VLDO4_SEL_SHIFT	7
+#define DA9062AA_VLDO4_SEL_MASK		BIT(7)
+
+/* DA9062AA_COUNT_S = 0x040 */
+#define DA9062AA_COUNT_SEC_SHIFT	0
+#define DA9062AA_COUNT_SEC_MASK		0x3f
+#define DA9062AA_RTC_READ_SHIFT		7
+#define DA9062AA_RTC_READ_MASK		BIT(7)
+
+/* DA9062AA_COUNT_MI = 0x041 */
+#define DA9062AA_COUNT_MIN_SHIFT	0
+#define DA9062AA_COUNT_MIN_MASK		0x3f
+
+/* DA9062AA_COUNT_H = 0x042 */
+#define DA9062AA_COUNT_HOUR_SHIFT	0
+#define DA9062AA_COUNT_HOUR_MASK	0x1f
+
+/* DA9062AA_COUNT_D = 0x043 */
+#define DA9062AA_COUNT_DAY_SHIFT	0
+#define DA9062AA_COUNT_DAY_MASK		0x1f
+
+/* DA9062AA_COUNT_MO = 0x044 */
+#define DA9062AA_COUNT_MONTH_SHIFT	0
+#define DA9062AA_COUNT_MONTH_MASK	0x0f
+
+/* DA9062AA_COUNT_Y = 0x045 */
+#define DA9062AA_COUNT_YEAR_SHIFT	0
+#define DA9062AA_COUNT_YEAR_MASK	0x3f
+#define DA9062AA_MONITOR_SHIFT		6
+#define DA9062AA_MONITOR_MASK		BIT(6)
+
+/* DA9062AA_ALARM_S = 0x046 */
+#define DA9062AA_ALARM_SEC_SHIFT	0
+#define DA9062AA_ALARM_SEC_MASK		0x3f
+#define DA9062AA_ALARM_STATUS_SHIFT	6
+#define DA9062AA_ALARM_STATUS_MASK	(0x03 << 6)
+
+/* DA9062AA_ALARM_MI = 0x047 */
+#define DA9062AA_ALARM_MIN_SHIFT	0
+#define DA9062AA_ALARM_MIN_MASK		0x3f
+
+/* DA9062AA_ALARM_H = 0x048 */
+#define DA9062AA_ALARM_HOUR_SHIFT	0
+#define DA9062AA_ALARM_HOUR_MASK	0x1f
+
+/* DA9062AA_ALARM_D = 0x049 */
+#define DA9062AA_ALARM_DAY_SHIFT	0
+#define DA9062AA_ALARM_DAY_MASK		0x1f
+
+/* DA9062AA_ALARM_MO = 0x04A */
+#define DA9062AA_ALARM_MONTH_SHIFT	0
+#define DA9062AA_ALARM_MONTH_MASK	0x0f
+#define DA9062AA_TICK_TYPE_SHIFT	4
+#define DA9062AA_TICK_TYPE_MASK		BIT(4)
+#define DA9062AA_TICK_WAKE_SHIFT	5
+#define DA9062AA_TICK_WAKE_MASK		BIT(5)
+
+/* DA9062AA_ALARM_Y = 0x04B */
+#define DA9062AA_ALARM_YEAR_SHIFT	0
+#define DA9062AA_ALARM_YEAR_MASK	0x3f
+#define DA9062AA_ALARM_ON_SHIFT		6
+#define DA9062AA_ALARM_ON_MASK		BIT(6)
+#define DA9062AA_TICK_ON_SHIFT		7
+#define DA9062AA_TICK_ON_MASK		BIT(7)
+
+/* DA9062AA_SECOND_A = 0x04C */
+#define DA9062AA_SECONDS_A_SHIFT	0
+#define DA9062AA_SECONDS_A_MASK		0xff
+
+/* DA9062AA_SECOND_B = 0x04D */
+#define DA9062AA_SECONDS_B_SHIFT	0
+#define DA9062AA_SECONDS_B_MASK		0xff
+
+/* DA9062AA_SECOND_C = 0x04E */
+#define DA9062AA_SECONDS_C_SHIFT	0
+#define DA9062AA_SECONDS_C_MASK		0xff
+
+/* DA9062AA_SECOND_D = 0x04F */
+#define DA9062AA_SECONDS_D_SHIFT	0
+#define DA9062AA_SECONDS_D_MASK		0xff
+
+/* DA9062AA_SEQ = 0x081 */
+#define DA9062AA_SEQ_POINTER_SHIFT	0
+#define DA9062AA_SEQ_POINTER_MASK	0x0f
+#define DA9062AA_NXT_SEQ_START_SHIFT	4
+#define DA9062AA_NXT_SEQ_START_MASK	(0x0f << 4)
+
+/* DA9062AA_SEQ_TIMER = 0x082 */
+#define DA9062AA_SEQ_TIME_SHIFT		0
+#define DA9062AA_SEQ_TIME_MASK		0x0f
+#define DA9062AA_SEQ_DUMMY_SHIFT	4
+#define DA9062AA_SEQ_DUMMY_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_2_1 = 0x083 */
+#define DA9062AA_LDO1_STEP_SHIFT	0
+#define DA9062AA_LDO1_STEP_MASK		0x0f
+#define DA9062AA_LDO2_STEP_SHIFT	4
+#define DA9062AA_LDO2_STEP_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_4_3 = 0x084 */
+#define DA9062AA_LDO3_STEP_SHIFT	0
+#define DA9062AA_LDO3_STEP_MASK		0x0f
+#define DA9062AA_LDO4_STEP_SHIFT	4
+#define DA9062AA_LDO4_STEP_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_12_11 = 0x088 */
+#define DA9062AA_PD_DIS_STEP_SHIFT	4
+#define DA9062AA_PD_DIS_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_14_13 = 0x089 */
+#define DA9062AA_BUCK1_STEP_SHIFT	0
+#define DA9062AA_BUCK1_STEP_MASK	0x0f
+#define DA9062AA_BUCK2_STEP_SHIFT	4
+#define DA9062AA_BUCK2_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_16_15 = 0x08A */
+#define DA9062AA_BUCK4_STEP_SHIFT	0
+#define DA9062AA_BUCK4_STEP_MASK	0x0f
+#define DA9062AA_BUCK3_STEP_SHIFT	4
+#define DA9062AA_BUCK3_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_22_21 = 0x08D */
+#define DA9062AA_GP_RISE1_STEP_SHIFT	0
+#define DA9062AA_GP_RISE1_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL1_STEP_SHIFT	4
+#define DA9062AA_GP_FALL1_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_24_23 = 0x08E */
+#define DA9062AA_GP_RISE2_STEP_SHIFT	0
+#define DA9062AA_GP_RISE2_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL2_STEP_SHIFT	4
+#define DA9062AA_GP_FALL2_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_26_25 = 0x08F */
+#define DA9062AA_GP_RISE3_STEP_SHIFT	0
+#define DA9062AA_GP_RISE3_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL3_STEP_SHIFT	4
+#define DA9062AA_GP_FALL3_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_28_27 = 0x090 */
+#define DA9062AA_GP_RISE4_STEP_SHIFT	0
+#define DA9062AA_GP_RISE4_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL4_STEP_SHIFT	4
+#define DA9062AA_GP_FALL4_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_30_29 = 0x091 */
+#define DA9062AA_GP_RISE5_STEP_SHIFT	0
+#define DA9062AA_GP_RISE5_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL5_STEP_SHIFT	4
+#define DA9062AA_GP_FALL5_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_32_31 = 0x092 */
+#define DA9062AA_WAIT_STEP_SHIFT	0
+#define DA9062AA_WAIT_STEP_MASK		0x0f
+#define DA9062AA_EN32K_STEP_SHIFT	4
+#define DA9062AA_EN32K_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_SEQ_A = 0x095 */
+#define DA9062AA_SYSTEM_END_SHIFT	0
+#define DA9062AA_SYSTEM_END_MASK	0x0f
+#define DA9062AA_POWER_END_SHIFT	4
+#define DA9062AA_POWER_END_MASK		(0x0f << 4)
+
+/* DA9062AA_SEQ_B = 0x096 */
+#define DA9062AA_MAX_COUNT_SHIFT	0
+#define DA9062AA_MAX_COUNT_MASK		0x0f
+#define DA9062AA_PART_DOWN_SHIFT	4
+#define DA9062AA_PART_DOWN_MASK		(0x0f << 4)
+
+/* DA9062AA_WAIT = 0x097 */
+#define DA9062AA_WAIT_TIME_SHIFT	0
+#define DA9062AA_WAIT_TIME_MASK		0x0f
+#define DA9062AA_WAIT_MODE_SHIFT	4
+#define DA9062AA_WAIT_MODE_MASK		BIT(4)
+#define DA9062AA_TIME_OUT_SHIFT		5
+#define DA9062AA_TIME_OUT_MASK		BIT(5)
+#define DA9062AA_WAIT_DIR_SHIFT		6
+#define DA9062AA_WAIT_DIR_MASK		(0x03 << 6)
+
+/* DA9062AA_EN_32K = 0x098 */
+#define DA9062AA_STABILISATION_TIME_SHIFT	0
+#define DA9062AA_STABILISATION_TIME_MASK	0x07
+#define DA9062AA_CRYSTAL_SHIFT			3
+#define DA9062AA_CRYSTAL_MASK			BIT(3)
+#define DA9062AA_DELAY_MODE_SHIFT		4
+#define DA9062AA_DELAY_MODE_MASK		BIT(4)
+#define DA9062AA_OUT_CLOCK_SHIFT		5
+#define DA9062AA_OUT_CLOCK_MASK			BIT(5)
+#define DA9062AA_RTC_CLOCK_SHIFT		6
+#define DA9062AA_RTC_CLOCK_MASK			BIT(6)
+#define DA9062AA_EN_32KOUT_SHIFT		7
+#define DA9062AA_EN_32KOUT_MASK			BIT(7)
+
+/* DA9062AA_RESET = 0x099 */
+#define DA9062AA_RESET_TIMER_SHIFT	0
+#define DA9062AA_RESET_TIMER_MASK	0x3f
+#define DA9062AA_RESET_EVENT_SHIFT	6
+#define DA9062AA_RESET_EVENT_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK_ILIM_A = 0x09A */
+#define DA9062AA_BUCK3_ILIM_SHIFT	0
+#define DA9062AA_BUCK3_ILIM_MASK	0x0f
+
+/* DA9062AA_BUCK_ILIM_B = 0x09B */
+#define DA9062AA_BUCK4_ILIM_SHIFT	0
+#define DA9062AA_BUCK4_ILIM_MASK	0x0f
+
+/* DA9062AA_BUCK_ILIM_C = 0x09C */
+#define DA9062AA_BUCK1_ILIM_SHIFT	0
+#define DA9062AA_BUCK1_ILIM_MASK	0x0f
+#define DA9062AA_BUCK2_ILIM_SHIFT	4
+#define DA9062AA_BUCK2_ILIM_MASK	(0x0f << 4)
+
+/* DA9062AA_BUCK2_CFG = 0x09D */
+#define DA9062AA_BUCK2_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK2_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK2_MODE_SHIFT	6
+#define DA9062AA_BUCK2_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK1_CFG = 0x09E */
+#define DA9062AA_BUCK1_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK1_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK1_MODE_SHIFT	6
+#define DA9062AA_BUCK1_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK4_CFG = 0x09F */
+#define DA9062AA_BUCK4_VTTR_EN_SHIFT	3
+#define DA9062AA_BUCK4_VTTR_EN_MASK	BIT(3)
+#define DA9062AA_BUCK4_VTT_EN_SHIFT	4
+#define DA9062AA_BUCK4_VTT_EN_MASK	BIT(4)
+#define DA9062AA_BUCK4_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK4_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK4_MODE_SHIFT	6
+#define DA9062AA_BUCK4_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK3_CFG = 0x0A0 */
+#define DA9062AA_BUCK3_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK3_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK3_MODE_SHIFT	6
+#define DA9062AA_BUCK3_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_VBUCK2_A = 0x0A3 */
+#define DA9062AA_VBUCK2_A_SHIFT		0
+#define DA9062AA_VBUCK2_A_MASK		0x7f
+#define DA9062AA_BUCK2_SL_A_SHIFT	7
+#define DA9062AA_BUCK2_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK1_A = 0x0A4 */
+#define DA9062AA_VBUCK1_A_SHIFT		0
+#define DA9062AA_VBUCK1_A_MASK		0x7f
+#define DA9062AA_BUCK1_SL_A_SHIFT	7
+#define DA9062AA_BUCK1_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK4_A = 0x0A5 */
+#define DA9062AA_VBUCK4_A_SHIFT		0
+#define DA9062AA_VBUCK4_A_MASK		0x7f
+#define DA9062AA_BUCK4_SL_A_SHIFT	7
+#define DA9062AA_BUCK4_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK3_A = 0x0A7 */
+#define DA9062AA_VBUCK3_A_SHIFT		0
+#define DA9062AA_VBUCK3_A_MASK		0x7f
+#define DA9062AA_BUCK3_SL_A_SHIFT	7
+#define DA9062AA_BUCK3_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VLDO1_A = 0x0A9 */
+#define DA9062AA_VLDO1_A_SHIFT		0
+#define DA9062AA_VLDO1_A_MASK		0x3f
+#define DA9062AA_LDO1_SL_A_SHIFT	7
+#define DA9062AA_LDO1_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO2_A = 0x0AA */
+#define DA9062AA_VLDO2_A_SHIFT		0
+#define DA9062AA_VLDO2_A_MASK		0x3f
+#define DA9062AA_LDO2_SL_A_SHIFT	7
+#define DA9062AA_LDO2_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO3_A = 0x0AB */
+#define DA9062AA_VLDO3_A_SHIFT		0
+#define DA9062AA_VLDO3_A_MASK		0x3f
+#define DA9062AA_LDO3_SL_A_SHIFT	7
+#define DA9062AA_LDO3_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO4_A = 0x0AC */
+#define DA9062AA_VLDO4_A_SHIFT		0
+#define DA9062AA_VLDO4_A_MASK		0x3f
+#define DA9062AA_LDO4_SL_A_SHIFT	7
+#define DA9062AA_LDO4_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VBUCK2_B = 0x0B4 */
+#define DA9062AA_VBUCK2_B_SHIFT		0
+#define DA9062AA_VBUCK2_B_MASK		0x7f
+#define DA9062AA_BUCK2_SL_B_SHIFT	7
+#define DA9062AA_BUCK2_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK1_B = 0x0B5 */
+#define DA9062AA_VBUCK1_B_SHIFT		0
+#define DA9062AA_VBUCK1_B_MASK		0x7f
+#define DA9062AA_BUCK1_SL_B_SHIFT	7
+#define DA9062AA_BUCK1_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK4_B = 0x0B6 */
+#define DA9062AA_VBUCK4_B_SHIFT		0
+#define DA9062AA_VBUCK4_B_MASK		0x7f
+#define DA9062AA_BUCK4_SL_B_SHIFT	7
+#define DA9062AA_BUCK4_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK3_B = 0x0B8 */
+#define DA9062AA_VBUCK3_B_SHIFT		0
+#define DA9062AA_VBUCK3_B_MASK		0x7f
+#define DA9062AA_BUCK3_SL_B_SHIFT	7
+#define DA9062AA_BUCK3_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VLDO1_B = 0x0BA */
+#define DA9062AA_VLDO1_B_SHIFT		0
+#define DA9062AA_VLDO1_B_MASK		0x3f
+#define DA9062AA_LDO1_SL_B_SHIFT	7
+#define DA9062AA_LDO1_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO2_B = 0x0BB */
+#define DA9062AA_VLDO2_B_SHIFT		0
+#define DA9062AA_VLDO2_B_MASK		0x3f
+#define DA9062AA_LDO2_SL_B_SHIFT	7
+#define DA9062AA_LDO2_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO3_B = 0x0BC */
+#define DA9062AA_VLDO3_B_SHIFT		0
+#define DA9062AA_VLDO3_B_MASK		0x3f
+#define DA9062AA_LDO3_SL_B_SHIFT	7
+#define DA9062AA_LDO3_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO4_B = 0x0BD */
+#define DA9062AA_VLDO4_B_SHIFT		0
+#define DA9062AA_VLDO4_B_MASK		0x3f
+#define DA9062AA_LDO4_SL_B_SHIFT	7
+#define DA9062AA_LDO4_SL_B_MASK		BIT(7)
+
+/* DA9062AA_BBAT_CONT = 0x0C5 */
+#define DA9062AA_BCHG_VSET_SHIFT	0
+#define DA9062AA_BCHG_VSET_MASK		0x0f
+#define DA9062AA_BCHG_ISET_SHIFT	4
+#define DA9062AA_BCHG_ISET_MASK		(0x0f << 4)
+
+/* DA9062AA_INTERFACE = 0x105 */
+#define DA9062AA_IF_BASE_ADDR_SHIFT	4
+#define DA9062AA_IF_BASE_ADDR_MASK	(0x0f << 4)
+
+/* DA9062AA_CONFIG_A = 0x106 */
+#define DA9062AA_PM_I_V_SHIFT		0
+#define DA9062AA_PM_I_V_MASK		0x01
+#define DA9062AA_PM_O_TYPE_SHIFT	2
+#define DA9062AA_PM_O_TYPE_MASK		BIT(2)
+#define DA9062AA_IRQ_TYPE_SHIFT		3
+#define DA9062AA_IRQ_TYPE_MASK		BIT(3)
+#define DA9062AA_PM_IF_V_SHIFT		4
+#define DA9062AA_PM_IF_V_MASK		BIT(4)
+#define DA9062AA_PM_IF_FMP_SHIFT	5
+#define DA9062AA_PM_IF_FMP_MASK		BIT(5)
+#define DA9062AA_PM_IF_HSM_SHIFT	6
+#define DA9062AA_PM_IF_HSM_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_B = 0x107 */
+#define DA9062AA_VDD_FAULT_ADJ_SHIFT	0
+#define DA9062AA_VDD_FAULT_ADJ_MASK	0x0f
+#define DA9062AA_VDD_HYST_ADJ_SHIFT	4
+#define DA9062AA_VDD_HYST_ADJ_MASK	(0x07 << 4)
+
+/* DA9062AA_CONFIG_C = 0x108 */
+#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT	2
+#define DA9062AA_BUCK_ACTV_DISCHRG_MASK		BIT(2)
+#define DA9062AA_BUCK1_CLK_INV_SHIFT		3
+#define DA9062AA_BUCK1_CLK_INV_MASK		BIT(3)
+#define DA9062AA_BUCK4_CLK_INV_SHIFT		4
+#define DA9062AA_BUCK4_CLK_INV_MASK		BIT(4)
+#define DA9062AA_BUCK3_CLK_INV_SHIFT		6
+#define DA9062AA_BUCK3_CLK_INV_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_D = 0x109 */
+#define DA9062AA_GPI_V_SHIFT		0
+#define DA9062AA_GPI_V_MASK		0x01
+#define DA9062AA_NIRQ_MODE_SHIFT	1
+#define DA9062AA_NIRQ_MODE_MASK		BIT(1)
+#define DA9062AA_SYSTEM_EN_RD_SHIFT	2
+#define DA9062AA_SYSTEM_EN_RD_MASK	BIT(2)
+#define DA9062AA_FORCE_RESET_SHIFT	5
+#define DA9062AA_FORCE_RESET_MASK	BIT(5)
+
+/* DA9062AA_CONFIG_E = 0x10A */
+#define DA9062AA_BUCK1_AUTO_SHIFT	0
+#define DA9062AA_BUCK1_AUTO_MASK	0x01
+#define DA9062AA_BUCK2_AUTO_SHIFT	1
+#define DA9062AA_BUCK2_AUTO_MASK	BIT(1)
+#define DA9062AA_BUCK4_AUTO_SHIFT	2
+#define DA9062AA_BUCK4_AUTO_MASK	BIT(2)
+#define DA9062AA_BUCK3_AUTO_SHIFT	4
+#define DA9062AA_BUCK3_AUTO_MASK	BIT(4)
+
+/* DA9062AA_CONFIG_G = 0x10C */
+#define DA9062AA_LDO1_AUTO_SHIFT	0
+#define DA9062AA_LDO1_AUTO_MASK		0x01
+#define DA9062AA_LDO2_AUTO_SHIFT	1
+#define DA9062AA_LDO2_AUTO_MASK		BIT(1)
+#define DA9062AA_LDO3_AUTO_SHIFT	2
+#define DA9062AA_LDO3_AUTO_MASK		BIT(2)
+#define DA9062AA_LDO4_AUTO_SHIFT	3
+#define DA9062AA_LDO4_AUTO_MASK		BIT(3)
+
+/* DA9062AA_CONFIG_H = 0x10D */
+#define DA9062AA_BUCK1_2_MERGE_SHIFT	3
+#define DA9062AA_BUCK1_2_MERGE_MASK	BIT(3)
+#define DA9062AA_BUCK2_OD_SHIFT		5
+#define DA9062AA_BUCK2_OD_MASK		BIT(5)
+#define DA9062AA_BUCK1_OD_SHIFT		6
+#define DA9062AA_BUCK1_OD_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_I = 0x10E */
+#define DA9062AA_NONKEY_PIN_SHIFT	0
+#define DA9062AA_NONKEY_PIN_MASK	0x03
+#define DA9062AA_nONKEY_SD_SHIFT	2
+#define DA9062AA_nONKEY_SD_MASK		BIT(2)
+#define DA9062AA_WATCHDOG_SD_SHIFT	3
+#define DA9062AA_WATCHDOG_SD_MASK	BIT(3)
+#define DA9062AA_KEY_SD_MODE_SHIFT	4
+#define DA9062AA_KEY_SD_MODE_MASK	BIT(4)
+#define DA9062AA_HOST_SD_MODE_SHIFT	5
+#define DA9062AA_HOST_SD_MODE_MASK	BIT(5)
+#define DA9062AA_INT_SD_MODE_SHIFT	6
+#define DA9062AA_INT_SD_MODE_MASK	BIT(6)
+#define DA9062AA_LDO_SD_SHIFT		7
+#define DA9062AA_LDO_SD_MASK		BIT(7)
+
+/* DA9062AA_CONFIG_J = 0x10F */
+#define DA9062AA_KEY_DELAY_SHIFT	0
+#define DA9062AA_KEY_DELAY_MASK		0x03
+#define DA9062AA_SHUT_DELAY_SHIFT	2
+#define DA9062AA_SHUT_DELAY_MASK	(0x03 << 2)
+#define DA9062AA_RESET_DURATION_SHIFT	4
+#define DA9062AA_RESET_DURATION_MASK	(0x03 << 4)
+#define DA9062AA_TWOWIRE_TO_SHIFT	6
+#define DA9062AA_TWOWIRE_TO_MASK	BIT(6)
+#define DA9062AA_IF_RESET_SHIFT		7
+#define DA9062AA_IF_RESET_MASK		BIT(7)
+
+/* DA9062AA_CONFIG_K = 0x110 */
+#define DA9062AA_GPIO0_PUPD_SHIFT	0
+#define DA9062AA_GPIO0_PUPD_MASK	0x01
+#define DA9062AA_GPIO1_PUPD_SHIFT	1
+#define DA9062AA_GPIO1_PUPD_MASK	BIT(1)
+#define DA9062AA_GPIO2_PUPD_SHIFT	2
+#define DA9062AA_GPIO2_PUPD_MASK	BIT(2)
+#define DA9062AA_GPIO3_PUPD_SHIFT	3
+#define DA9062AA_GPIO3_PUPD_MASK	BIT(3)
+#define DA9062AA_GPIO4_PUPD_SHIFT	4
+#define DA9062AA_GPIO4_PUPD_MASK	BIT(4)
+
+/* DA9062AA_CONFIG_M = 0x112 */
+#define DA9062AA_NSHUTDOWN_PU_SHIFT	1
+#define DA9062AA_NSHUTDOWN_PU_MASK	BIT(1)
+#define DA9062AA_WDG_MODE_SHIFT		3
+#define DA9062AA_WDG_MODE_MASK		BIT(3)
+#define DA9062AA_OSC_FRQ_SHIFT		4
+#define DA9062AA_OSC_FRQ_MASK		(0x0f << 4)
+
+/* DA9062AA_TRIM_CLDR = 0x120 */
+#define DA9062AA_TRIM_CLDR_SHIFT	0
+#define DA9062AA_TRIM_CLDR_MASK		0xff
+
+/* DA9062AA_GP_ID_0 = 0x121 */
+#define DA9062AA_GP_0_SHIFT		0
+#define DA9062AA_GP_0_MASK		0xff
+
+/* DA9062AA_GP_ID_1 = 0x122 */
+#define DA9062AA_GP_1_SHIFT		0
+#define DA9062AA_GP_1_MASK		0xff
+
+/* DA9062AA_GP_ID_2 = 0x123 */
+#define DA9062AA_GP_2_SHIFT		0
+#define DA9062AA_GP_2_MASK		0xff
+
+/* DA9062AA_GP_ID_3 = 0x124 */
+#define DA9062AA_GP_3_SHIFT		0
+#define DA9062AA_GP_3_MASK		0xff
+
+/* DA9062AA_GP_ID_4 = 0x125 */
+#define DA9062AA_GP_4_SHIFT		0
+#define DA9062AA_GP_4_MASK		0xff
+
+/* DA9062AA_GP_ID_5 = 0x126 */
+#define DA9062AA_GP_5_SHIFT		0
+#define DA9062AA_GP_5_MASK		0xff
+
+/* DA9062AA_GP_ID_6 = 0x127 */
+#define DA9062AA_GP_6_SHIFT		0
+#define DA9062AA_GP_6_MASK		0xff
+
+/* DA9062AA_GP_ID_7 = 0x128 */
+#define DA9062AA_GP_7_SHIFT		0
+#define DA9062AA_GP_7_MASK		0xff
+
+/* DA9062AA_GP_ID_8 = 0x129 */
+#define DA9062AA_GP_8_SHIFT		0
+#define DA9062AA_GP_8_MASK		0xff
+
+/* DA9062AA_GP_ID_9 = 0x12A */
+#define DA9062AA_GP_9_SHIFT		0
+#define DA9062AA_GP_9_MASK		0xff
+
+/* DA9062AA_GP_ID_10 = 0x12B */
+#define DA9062AA_GP_10_SHIFT		0
+#define DA9062AA_GP_10_MASK		0xff
+
+/* DA9062AA_GP_ID_11 = 0x12C */
+#define DA9062AA_GP_11_SHIFT		0
+#define DA9062AA_GP_11_MASK		0xff
+
+/* DA9062AA_GP_ID_12 = 0x12D */
+#define DA9062AA_GP_12_SHIFT		0
+#define DA9062AA_GP_12_MASK		0xff
+
+/* DA9062AA_GP_ID_13 = 0x12E */
+#define DA9062AA_GP_13_SHIFT		0
+#define DA9062AA_GP_13_MASK		0xff
+
+/* DA9062AA_GP_ID_14 = 0x12F */
+#define DA9062AA_GP_14_SHIFT		0
+#define DA9062AA_GP_14_MASK		0xff
+
+/* DA9062AA_GP_ID_15 = 0x130 */
+#define DA9062AA_GP_15_SHIFT		0
+#define DA9062AA_GP_15_MASK		0xff
+
+/* DA9062AA_GP_ID_16 = 0x131 */
+#define DA9062AA_GP_16_SHIFT		0
+#define DA9062AA_GP_16_MASK		0xff
+
+/* DA9062AA_GP_ID_17 = 0x132 */
+#define DA9062AA_GP_17_SHIFT		0
+#define DA9062AA_GP_17_MASK		0xff
+
+/* DA9062AA_GP_ID_18 = 0x133 */
+#define DA9062AA_GP_18_SHIFT		0
+#define DA9062AA_GP_18_MASK		0xff
+
+/* DA9062AA_GP_ID_19 = 0x134 */
+#define DA9062AA_GP_19_SHIFT		0
+#define DA9062AA_GP_19_MASK		0xff
+
+/* DA9062AA_DEVICE_ID = 0x181 */
+#define DA9062AA_DEV_ID_SHIFT		0
+#define DA9062AA_DEV_ID_MASK		0xff
+
+/* DA9062AA_VARIANT_ID = 0x182 */
+#define DA9062AA_VRC_SHIFT		0
+#define DA9062AA_VRC_MASK		0x0f
+#define DA9062AA_MRC_SHIFT		4
+#define DA9062AA_MRC_MASK		(0x0f << 4)
+
+/* DA9062AA_CUSTOMER_ID = 0x183 */
+#define DA9062AA_CUST_ID_SHIFT		0
+#define DA9062AA_CUST_ID_MASK		0xff
+
+/* DA9062AA_CONFIG_ID = 0x184 */
+#define DA9062AA_CONFIG_REV_SHIFT	0
+#define DA9062AA_CONFIG_REV_MASK	0xff
+
+#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d82..621af82 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@
 	DA9063_IRQ_COMP_1V2,
 	DA9063_IRQ_LDO_LIM,
 	DA9063_IRQ_REG_UVOV,
+	DA9063_IRQ_DVC_RDY,
 	DA9063_IRQ_VDD_MON,
 	DA9063_IRQ_WARN,
 	DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac78..2b300b4 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
 #ifndef LPC_ICH_H
 #define LPC_ICH_H
 
-/* Watchdog resources */
-#define ICH_RES_IO_TCO		0
-#define ICH_RES_IO_SMI		1
-#define ICH_RES_MEM_OFF		2
-#define ICH_RES_MEM_GCS_PMC	0
-
 /* GPIO resources */
 #define ICH_RES_GPIO	0
 #define ICH_RES_GPE0	1
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b..45b8e8a 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@
 	int irq;
 	struct irq_domain *irq_domain;
 	struct mutex irqlock;
+	u16 wake_mask[2];
 	u16 irq_masks_cur[2];
 	u16 irq_masks_cache[2];
 };
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12..09cebe5 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@
 
 enum {
 	MLX4_CQE_L2_TUNNEL_IPOK		= 1 << 31,
-	MLX4_CQE_VLAN_PRESENT_MASK	= 1 << 29,
+	MLX4_CQE_CVLAN_PRESENT_MASK	= 1 << 29,
+	MLX4_CQE_SVLAN_PRESENT_MASK	= 1 << 30,
 	MLX4_CQE_L2_TUNNEL		= 1 << 27,
 	MLX4_CQE_L2_TUNNEL_CSUM		= 1 << 26,
 	MLX4_CQE_L2_TUNNEL_IPV4		= 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1c..bcbf8c7 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -211,6 +211,8 @@
 	MLX4_DEV_CAP_FLAG2_ETS_CFG		= 1LL <<  26,
 	MLX4_DEV_CAP_FLAG2_PORT_BEACON		= 1LL <<  27,
 	MLX4_DEV_CAP_FLAG2_IGNORE_FCS		= 1LL <<  28,
+	MLX4_DEV_CAP_FLAG2_PHV_EN		= 1LL <<  29,
+	MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN	= 1LL <<  30,
 };
 
 enum {
@@ -581,6 +583,7 @@
 	u64			phys_port_id[MLX4_MAX_PORTS + 1];
 	int			tunnel_offload_mode;
 	u8			rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+	u8			phv_bit[MLX4_MAX_PORTS + 1];
 	u8			alloc_res_qp_mask;
 	u32			dmfs_high_rate_qpn_base;
 	u32			dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@
 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
 			    u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539..de45a51 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@
 	MLX4_WQE_CTRL_SOLICITED		= 1 << 1,
 	MLX4_WQE_CTRL_IP_CSUM		= 1 << 4,
 	MLX4_WQE_CTRL_TCP_UDP_CSUM	= 1 << 5,
-	MLX4_WQE_CTRL_INS_VLAN		= 1 << 6,
+	MLX4_WQE_CTRL_INS_CVLAN		= 1 << 6,
+	MLX4_WQE_CTRL_INS_SVLAN		= 1 << 7,
 	MLX4_WQE_CTRL_STRONG_ORDER	= 1 << 7,
 	MLX4_WQE_CTRL_FORCE_LOOPBACK	= 1 << 0,
 };
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9..250b1ff 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1182,6 +1182,16 @@
 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
 };
 
+enum {
+	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
+	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
+	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
+	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
+	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
+	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
 {
 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88..8b6d6f2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@
 	MLX5_REG_PMTU		 = 0x5003,
 	MLX5_REG_PTYS		 = 0x5004,
 	MLX5_REG_PAOS		 = 0x5006,
+	MLX5_REG_PFCC            = 0x5007,
+	MLX5_REG_PPCNT		 = 0x5008,
 	MLX5_REG_PMAOS		 = 0x5012,
 	MLX5_REG_PUDE		 = 0x5009,
 	MLX5_REG_PMPE		 = 0x5010,
@@ -151,8 +153,8 @@
 };
 
 enum mlx5_port_status {
-	MLX5_PORT_UP        = 1 << 1,
-	MLX5_PORT_DOWN      = 1 << 2,
+	MLX5_PORT_UP        = 1,
+	MLX5_PORT_DOWN      = 2,
 };
 
 struct mlx5_uuar_info {
@@ -380,7 +382,7 @@
 	u32			index;
 	struct list_head	bf_list;
 	unsigned		free_bf_bmap;
-	void __iomem	       *wc_map;
+	void __iomem	       *bf_map;
 	void __iomem	       *map;
 };
 
@@ -435,6 +437,8 @@
 	struct mlx5_uuar_info	uuari;
 	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
+	struct io_mapping	*bf_mapping;
+
 	/* pages stuff */
 	struct workqueue_struct *pg_wq;
 	struct rb_root		page_root;
@@ -463,6 +467,10 @@
 	/* end: mr staff */
 
 	/* start: alloc staff */
+	/* protect buffer alocation according to numa node */
+	struct mutex            alloc_mutex;
+	int                     numa_node;
+
 	struct mutex            pgdir_mutex;
 	struct list_head        pgdir_list;
 	/* end: alloc staff */
@@ -672,6 +680,8 @@
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@
 			       u8 local_port);
 int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
 			int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-			 enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+			       enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status);
 
 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
 void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
 			      u8 *vl_hw_cap, u8 local_port);
 
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+			  u32 *rx_pause, u32 *tx_pause);
+
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@
 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+		       int node);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
 const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fe..dd20974 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@
 };
 
 enum {
-	MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
-	MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
-	MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+	MLX5_RX_HASH_FN_NONE           = 0x0,
+	MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
+	MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
 };
 
 enum {
@@ -4050,6 +4050,13 @@
 	struct mlx5_ifc_tisc_bits ctx;
 };
 
+struct mlx5_ifc_modify_tir_bitmask_bits {
+	u8	   reserved[0x20];
+
+	u8         reserved1[0x1f];
+	u8         lro[0x1];
+};
+
 struct mlx5_ifc_modify_tir_out_bits {
 	u8         status[0x8];
 	u8         reserved_0[0x18];
@@ -4071,7 +4078,7 @@
 
 	u8         reserved_3[0x20];
 
-	u8         modify_bitmask[0x40];
+	struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
 
 	u8         reserved_4[0x40];
 
@@ -4116,6 +4123,13 @@
 	u8         reserved_1[0x40];
 };
 
+struct mlx5_ifc_rqt_bitmask_bits {
+	u8	   reserved[0x20];
+
+	u8         reserved1[0x1f];
+	u8         rqn_list[0x1];
+};
+
 struct mlx5_ifc_modify_rqt_in_bits {
 	u8         opcode[0x10];
 	u8         reserved_0[0x10];
@@ -4128,7 +4142,7 @@
 
 	u8         reserved_3[0x20];
 
-	u8         modify_bitmask[0x40];
+	struct mlx5_ifc_rqt_bitmask_bits bitmask;
 
 	u8         reserved_4[0x40];
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf6f117..8b257c4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -124,8 +124,10 @@
 #define VM_MAYSHARE	0x00000080
 
 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
+#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
+#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
 
 #define VM_LOCKED	0x00002000
 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
@@ -245,6 +247,7 @@
 struct vm_operations_struct {
 	void (*open)(struct vm_area_struct * area);
 	void (*close)(struct vm_area_struct * area);
+	int (*mremap)(struct vm_area_struct * area);
 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
@@ -1833,7 +1836,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *);
+	struct mempolicy *, struct vm_userfaultfd_ctx);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
 	struct vm_area_struct *, unsigned long addr, int new_below);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 1554957..c8d0a73 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -256,6 +256,16 @@
 						* this region */
 };
 
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+	struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
 /*
  * This struct defines a memory VMM memory area. There is one of these
  * per VM-area/task.  A VM area is any part of the process virtual memory
@@ -322,6 +332,7 @@
 #ifdef CONFIG_NUMA
 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
 #endif
+	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 };
 
 struct core_thread {
@@ -543,6 +554,7 @@
 	TLB_REMOTE_SHOOTDOWN,
 	TLB_LOCAL_SHOOTDOWN,
 	TLB_LOCAL_MM_SHOOTDOWN,
+	TLB_REMOTE_SEND_IPI,
 	NR_TLB_FLUSH_REASONS,
 };
 
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 754c259..ac00e20 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -690,14 +690,6 @@
 #endif
 };
 
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-struct node_active_region {
-	unsigned long start_pfn;
-	unsigned long end_pfn;
-	int nid;
-};
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
 #ifndef CONFIG_DISCONTIGMEM
 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
 extern struct page *mem_map;
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 0000000..ef29eb2
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif  /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa068..049d4b0 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@
 	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)				\
 	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)				\
 	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...)				\
+	do {							\
+		if (0)						\
+			no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+	} while (0)
+#endif
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
 			   struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979d..88a0069 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@
 	unsigned char id_len;
 };
 
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+					    struct netdev_phys_item_id *b)
+{
+	return a->id_len == b->id_len &&
+	       memcmp(a->id, b->id, a->id_len) == 0;
+}
+
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
 				       struct sk_buff *skb);
 
@@ -1041,6 +1048,12 @@
  *	TX queue.
  * int (*ndo_get_iflink)(const struct net_device *dev);
  *	Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ *				  bool proto_down);
+ *	This function is used to pass protocol port error state information
+ *	to the switch driver. The switch driver can react to the proto_down
+ *      by doing a phys down on the associated switch port.
+ *
  */
 struct net_device_ops {
 	int			(*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@
 						      int queue_index,
 						      u32 maxrate);
 	int			(*ndo_get_iflink)(const struct net_device *dev);
+	int			(*ndo_change_proto_down)(struct net_device *dev,
+							 bool proto_down);
 };
 
 /**
@@ -1225,13 +1240,8 @@
  *
  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
  * @IFF_EBRIDGE: Ethernet bridging device
- * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
- * @IFF_MASTER_8023AD: bonding master, 802.3ad
- * @IFF_MASTER_ALB: bonding master, balance-alb
  * @IFF_BONDING: bonding master or slave
- * @IFF_SLAVE_NEEDARP: need ARPs for validation
  * @IFF_ISATAP: ISATAP interface (RFC4214)
- * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
  * @IFF_WAN_HDLC: WAN HDLC device
  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
  *	release skb->dst
@@ -1247,44 +1257,40 @@
  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  *	change when it's running
  * @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
+ * @IFF_OPENVSWITCH: device is a Open vSwitch master
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
 	IFF_EBRIDGE			= 1<<1,
-	IFF_SLAVE_INACTIVE		= 1<<2,
-	IFF_MASTER_8023AD		= 1<<3,
-	IFF_MASTER_ALB			= 1<<4,
-	IFF_BONDING			= 1<<5,
-	IFF_SLAVE_NEEDARP		= 1<<6,
-	IFF_ISATAP			= 1<<7,
-	IFF_MASTER_ARPMON		= 1<<8,
-	IFF_WAN_HDLC			= 1<<9,
-	IFF_XMIT_DST_RELEASE		= 1<<10,
-	IFF_DONT_BRIDGE			= 1<<11,
-	IFF_DISABLE_NETPOLL		= 1<<12,
-	IFF_MACVLAN_PORT		= 1<<13,
-	IFF_BRIDGE_PORT			= 1<<14,
-	IFF_OVS_DATAPATH		= 1<<15,
-	IFF_TX_SKB_SHARING		= 1<<16,
-	IFF_UNICAST_FLT			= 1<<17,
-	IFF_TEAM_PORT			= 1<<18,
-	IFF_SUPP_NOFCS			= 1<<19,
-	IFF_LIVE_ADDR_CHANGE		= 1<<20,
-	IFF_MACVLAN			= 1<<21,
-	IFF_XMIT_DST_RELEASE_PERM	= 1<<22,
-	IFF_IPVLAN_MASTER		= 1<<23,
-	IFF_IPVLAN_SLAVE		= 1<<24,
+	IFF_BONDING			= 1<<2,
+	IFF_ISATAP			= 1<<3,
+	IFF_WAN_HDLC			= 1<<4,
+	IFF_XMIT_DST_RELEASE		= 1<<5,
+	IFF_DONT_BRIDGE			= 1<<6,
+	IFF_DISABLE_NETPOLL		= 1<<7,
+	IFF_MACVLAN_PORT		= 1<<8,
+	IFF_BRIDGE_PORT			= 1<<9,
+	IFF_OVS_DATAPATH		= 1<<10,
+	IFF_TX_SKB_SHARING		= 1<<11,
+	IFF_UNICAST_FLT			= 1<<12,
+	IFF_TEAM_PORT			= 1<<13,
+	IFF_SUPP_NOFCS			= 1<<14,
+	IFF_LIVE_ADDR_CHANGE		= 1<<15,
+	IFF_MACVLAN			= 1<<16,
+	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
+	IFF_IPVLAN_MASTER		= 1<<18,
+	IFF_IPVLAN_SLAVE		= 1<<19,
+	IFF_VRF_MASTER			= 1<<20,
+	IFF_NO_QUEUE			= 1<<21,
+	IFF_OPENVSWITCH			= 1<<22,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
 #define IFF_EBRIDGE			IFF_EBRIDGE
-#define IFF_SLAVE_INACTIVE		IFF_SLAVE_INACTIVE
-#define IFF_MASTER_8023AD		IFF_MASTER_8023AD
-#define IFF_MASTER_ALB			IFF_MASTER_ALB
 #define IFF_BONDING			IFF_BONDING
-#define IFF_SLAVE_NEEDARP		IFF_SLAVE_NEEDARP
 #define IFF_ISATAP			IFF_ISATAP
-#define IFF_MASTER_ARPMON		IFF_MASTER_ARPMON
 #define IFF_WAN_HDLC			IFF_WAN_HDLC
 #define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
 #define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
@@ -1301,6 +1307,9 @@
 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
 #define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
 #define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
+#define IFF_VRF_MASTER			IFF_VRF_MASTER
+#define IFF_NO_QUEUE			IFF_NO_QUEUE
+#define IFF_OPENVSWITCH			IFF_OPENVSWITCH
 
 /**
  *	struct net_device - The DEVICE structure.
@@ -1417,6 +1426,7 @@
  *	@dn_ptr:	DECnet specific data
  *	@ip6_ptr:	IPv6 specific data
  *	@ax25_ptr:	AX.25 specific data
+ *	@vrf_ptr:	VRF specific data
  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
  *
  *	@last_rx:	Time of last Rx
@@ -1448,6 +1458,8 @@
  *
  *	@xps_maps:	XXX: need comments on this one
  *
+ *	@offload_fwd_mark:	Offload device fwding mark
+ *
  *	@trans_start:		Time (in jiffies) of last Tx
  *	@watchdog_timeo:	Represents the timeout that is used by
  *				the watchdog ( see dev_watchdog() )
@@ -1502,6 +1514,10 @@
  *
  *	@qdisc_tx_busylock:	XXX: need comments on this one
  *
+ *	@proto_down:	protocol port state information can be sent to the
+ *			switch driver and used to set the phys state of the
+ *			switch port.
+ *
  *	FIXME: cleanup struct net_device such that network protocol info
  *	moves out.
  */
@@ -1629,6 +1645,7 @@
 	struct dn_dev __rcu     *dn_ptr;
 	struct inet6_dev __rcu	*ip6_ptr;
 	void			*ax25_ptr;
+	struct net_vrf_dev __rcu *vrf_ptr;
 	struct wireless_dev	*ieee80211_ptr;
 	struct wpan_dev		*ieee802154_ptr;
 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1702,10 @@
 	struct xps_dev_maps __rcu *xps_maps;
 #endif
 
+#ifdef CONFIG_NET_SWITCHDEV
+	u32			offload_fwd_mark;
+#endif
+
 	/* These may be needed for future network-power-down code. */
 
 	/*
@@ -1762,6 +1783,7 @@
 #endif
 	struct phy_device *phydev;
 	struct lock_class_key *qdisc_tx_busylock;
+	bool proto_down;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
@@ -2093,6 +2115,13 @@
 	unsigned int flags_changed;
 };
 
+struct netdev_notifier_changeupper_info {
+	struct netdev_notifier_info info; /* must be first */
+	struct net_device *upper_dev; /* new upper dev */
+	bool master; /* is upper dev master */
+	bool linking; /* is the nofication for link or unlink */
+};
+
 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
 					     struct net_device *dev)
 {
@@ -2277,8 +2306,7 @@
 
 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
 {
-	return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
-		skb_gro_offset(skb));
+	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
 }
 
 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2402,58 @@
 	grc->delta = 0;
 }
 
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
-					   int start, int offset,
-					   struct gro_remcsum *grc,
-					   bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+					    unsigned int off, size_t hdrlen,
+					    int start, int offset,
+					    struct gro_remcsum *grc,
+					    bool nopartial)
 {
 	__wsum delta;
+	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 
 	if (!nopartial) {
-		NAPI_GRO_CB(skb)->gro_remcsum_start =
-		    ((unsigned char *)ptr + start) - skb->head;
-		return;
+		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+		return ptr;
 	}
 
-	delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+	ptr = skb_gro_header_fast(skb, off);
+	if (skb_gro_header_hard(skb, off + plen)) {
+		ptr = skb_gro_header_slow(skb, off + plen, off);
+		if (!ptr)
+			return NULL;
+	}
+
+	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+			       start, offset);
 
 	/* Adjust skb->csum since we changed the packet */
 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
 
-	grc->offset = (ptr + offset) - (void *)skb->head;
+	grc->offset = off + hdrlen + offset;
 	grc->delta = delta;
+
+	return ptr;
 }
 
 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
 					   struct gro_remcsum *grc)
 {
+	void *ptr;
+	size_t plen = grc->offset + sizeof(u16);
+
 	if (!grc->delta)
 		return;
 
-	remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+	ptr = skb_gro_header_fast(skb, grc->offset);
+	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+		ptr = skb_gro_header_slow(skb, plen, grc->offset);
+		if (!ptr)
+			return;
+	}
+
+	remcsum_unadjust((__sum16 *)ptr, grc->delta);
 }
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3031,7 @@
 			 struct netdev_phys_item_id *ppid);
 int dev_get_phys_port_name(struct net_device *dev,
 			   char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 				    struct netdev_queue *txq, int *ret);
@@ -3781,6 +3831,42 @@
 	return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_is_vrf(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_VRF_MASTER;
+}
+
+static inline bool netif_is_bridge_master(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_EBRIDGE;
+}
+
+static inline bool netif_is_ovs_master(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_OPENVSWITCH;
+}
+
+static inline bool netif_index_is_vrf(struct net *net, int ifindex)
+{
+	bool rc = false;
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+	struct net_device *dev;
+
+	if (ifindex == 0)
+		return false;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		rc = netif_is_vrf(dev);
+
+	rcu_read_unlock();
+#endif
+	return rc;
+}
+
 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 static inline void netif_keep_dst(struct net_device *dev)
 {
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050df..36a6525 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
 #include <linux/list.h>
 #include <linux/static_key.h>
 #include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
 
 #ifdef CONFIG_NETFILTER
 static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@
 };
 
 /* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			  unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			     unsigned int n);
+
 int nf_register_hook(struct nf_hook_ops *reg);
 void nf_unregister_hook(struct nf_hook_ops *reg);
 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@
 int nf_register_sockopt(struct nf_sockopt_ops *reg);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
 #ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
 				       u_int8_t pf, unsigned int hook)
 {
 	if (__builtin_constant_p(pf) &&
 	    __builtin_constant_p(hook))
 		return static_key_false(&nf_hooks_needed[pf][hook]);
 
-	return !list_empty(nf_hook_list);
+	return !list_empty(hook_list);
 }
 #else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
 				       u_int8_t pf, unsigned int hook)
 {
-	return !list_empty(nf_hook_list);
+	return !list_empty(hook_list);
 }
 #endif
 
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
-	return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
@@ -172,10 +174,13 @@
 				 int (*okfn)(struct sock *, struct sk_buff *),
 				 int thresh)
 {
-	if (nf_hooks_active(pf, hook)) {
+	struct net *net = dev_net(indev ? indev : outdev);
+	struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+	if (nf_hook_list_active(hook_list, pf, hook)) {
 		struct nf_hook_state state;
 
-		nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+		nf_hook_state_init(&state, hook_list, hook, thresh,
 				   pf, indev, outdev, sk, okfn);
 		return nf_hook_slow(skb, &state);
 	}
@@ -363,6 +368,8 @@
 #endif /*CONFIG_NETFILTER*/
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
 
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
 #endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 0000000..5d7cf36
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
+#ifndef _NF_CONNTRACK_ZONES_COMMON_H
+#define _NF_CONNTRACK_ZONES_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_CT_DEFAULT_ZONE_ID	0
+
+#define NF_CT_ZONE_DIR_ORIG	(1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL	(1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR	(NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK		1
+
+struct nf_conntrack_zone {
+	u16	id;
+	u8	flags;
+	u8	dir;
+};
+
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec9757..80ca889 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
 #define _NFNL_ACCT_H_
 
 #include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
 
 enum {
 	NFACCT_NO_QUOTA		= -1,
@@ -11,7 +12,7 @@
 
 struct nf_acct;
 
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
 void nfnl_acct_put(struct nf_acct *acct);
 void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a..b006b71 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
 
 
 #include <linux/netdevice.h>
+#include <linux/static_key.h>
 #include <uapi/linux/netfilter/x_tables.h>
 
 /**
@@ -222,7 +223,6 @@
 	 * @stacksize jumps (number of user chains) can possibly be made.
 	 */
 	unsigned int stacksize;
-	unsigned int __percpu *stackptr;
 	void ***jumpstack;
 
 	unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@
  */
 DECLARE_PER_CPU(seqcount_t, xt_recseq);
 
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
 /**
  * xt_write_recseq_begin - start of a write section
  *
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc6..2437b8a 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 
-#define BRNF_BRIDGED_DNAT		0x02
-#define BRNF_NF_BRIDGE_PREROUTING	0x08
-
 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@
 {
 	return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
 }
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+	return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
 #else
 #define br_drop_fake_rtable(skb)	        do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+	return false;
+}
 #endif /* CONFIG_BRIDGE_NETFILTER */
 
 #endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f..7715746 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
 
 #include <uapi/linux/netfilter_ipv6.h>
 
-
-#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
-			unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
  * if IPv6 is a module.
@@ -30,6 +21,14 @@
 			int (*output)(struct sock *, struct sk_buff *));
 };
 
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+			unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
 extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
 static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
 {
@@ -39,6 +38,7 @@
 #else /* CONFIG_NETFILTER */
 static inline int ipv6_netfilter_init(void) { return 0; }
 static inline void ipv6_netfilter_fini(void) { return; }
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
 #endif /* CONFIG_NETFILTER */
 
 #endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index f94da0e..a91adf6 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -27,9 +27,7 @@
 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void hardlockup_detector_disable(void);
 #else
-static inline void hardlockup_detector_disable(void)
-{
-}
+static inline void hardlockup_detector_disable(void) {}
 #endif
 
 /*
@@ -80,6 +78,17 @@
 				void __user *, size_t *, loff_t *);
 extern int proc_watchdog_cpumask(struct ctl_table *, int,
 				 void __user *, size_t *, loff_t *);
+extern int lockup_detector_suspend(void);
+extern void lockup_detector_resume(void);
+#else
+static inline int lockup_detector_suspend(void)
+{
+	return 0;
+}
+
+static inline void lockup_detector_resume(void)
+{
+}
 #endif
 
 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c0d94ed..b5812c3 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -28,18 +28,32 @@
 	__u32			cc;	/* Controller Configuration */
 	__u32			rsvd1;	/* Reserved */
 	__u32			csts;	/* Controller Status */
-	__u32			rsvd2;	/* Reserved */
+	__u32			nssr;	/* Subsystem Reset */
 	__u32			aqa;	/* Admin Queue Attributes */
 	__u64			asq;	/* Admin SQ Base Address */
 	__u64			acq;	/* Admin CQ Base Address */
+	__u32			cmbloc; /* Controller Memory Buffer Location */
+	__u32			cmbsz;  /* Controller Memory Buffer Size */
 };
 
 #define NVME_CAP_MQES(cap)	((cap) & 0xffff)
 #define NVME_CAP_TIMEOUT(cap)	(((cap) >> 24) & 0xff)
 #define NVME_CAP_STRIDE(cap)	(((cap) >> 32) & 0xf)
+#define NVME_CAP_NSSRC(cap)	(((cap) >> 36) & 0x1)
 #define NVME_CAP_MPSMIN(cap)	(((cap) >> 48) & 0xf)
 #define NVME_CAP_MPSMAX(cap)	(((cap) >> 52) & 0xf)
 
+#define NVME_CMB_BIR(cmbloc)	((cmbloc) & 0x7)
+#define NVME_CMB_OFST(cmbloc)	(((cmbloc) >> 12) & 0xfffff)
+#define NVME_CMB_SZ(cmbsz)	(((cmbsz) >> 12) & 0xfffff)
+#define NVME_CMB_SZU(cmbsz)	(((cmbsz) >> 8) & 0xf)
+
+#define NVME_CMB_WDS(cmbsz)	((cmbsz) & 0x10)
+#define NVME_CMB_RDS(cmbsz)	((cmbsz) & 0x8)
+#define NVME_CMB_LISTS(cmbsz)	((cmbsz) & 0x4)
+#define NVME_CMB_CQS(cmbsz)	((cmbsz) & 0x2)
+#define NVME_CMB_SQS(cmbsz)	((cmbsz) & 0x1)
+
 enum {
 	NVME_CC_ENABLE		= 1 << 0,
 	NVME_CC_CSS_NVM		= 0 << 4,
@@ -55,6 +69,7 @@
 	NVME_CC_IOCQES		= 4 << 20,
 	NVME_CSTS_RDY		= 1 << 0,
 	NVME_CSTS_CFS		= 1 << 1,
+	NVME_CSTS_NSSRO		= 1 << 4,
 	NVME_CSTS_SHST_NORMAL	= 0 << 2,
 	NVME_CSTS_SHST_OCCUR	= 1 << 2,
 	NVME_CSTS_SHST_CMPLT	= 2 << 2,
@@ -97,9 +112,14 @@
 	char serial[20];
 	char model[40];
 	char firmware_rev[8];
+	bool subsystem;
 	u32 max_hw_sectors;
 	u32 stripe_size;
 	u32 page_size;
+	void __iomem *cmb;
+	dma_addr_t cmb_dma_addr;
+	u64 cmb_size;
+	u32 cmbsz;
 	u16 oncs;
 	u16 abort_limit;
 	u8 event_limit;
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe31..f319182 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@
 			      struct of_mm_gpio_chip *mm_gc);
 extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
 
-extern void of_gpiochip_add(struct gpio_chip *gc);
+extern int of_gpiochip_add(struct gpio_chip *gc);
 extern void of_gpiochip_remove(struct gpio_chip *gc);
 extern int of_gpio_simple_xlate(struct gpio_chip *gc,
 				const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@
 	return -ENOSYS;
 }
 
-static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
 static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
 
 #endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 0000000..bfa673b
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ *  linux/arch/arm/include/asm/pmu.h
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ *	interrupt and passed the address of the low level handler,
+ *	and can be used to implement any platform specific handling
+ *	before or after calling it.
+ */
+struct arm_pmu_platdata {
+	irqreturn_t (*handle_irq)(int irq, void *dev,
+				  irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS		32
+
+#define HW_OP_UNSUPPORTED		0xFFFF
+#define C(_x)				PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED		0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
+	},								\
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+	/*
+	 * The events that are active on the PMU for the given index.
+	 */
+	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
+
+	/*
+	 * A 1 bit for an index indicates that the counter is being used for
+	 * an event. A 0 means that the counter can be used.
+	 */
+	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+	/*
+	 * Hardware lock to serialize accesses to PMU registers. Needed for the
+	 * read/modify/write sequences.
+	 */
+	raw_spinlock_t		pmu_lock;
+
+	/*
+	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+	 * already have to allocate this struct per cpu.
+	 */
+	struct arm_pmu		*percpu_pmu;
+};
+
+struct arm_pmu {
+	struct pmu	pmu;
+	cpumask_t	active_irqs;
+	cpumask_t	supported_cpus;
+	int		*irq_affinity;
+	char		*name;
+	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
+	void		(*enable)(struct perf_event *event);
+	void		(*disable)(struct perf_event *event);
+	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
+					 struct perf_event *event);
+	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
+					 struct perf_event *event);
+	int		(*set_event_filter)(struct hw_perf_event *evt,
+					    struct perf_event_attr *attr);
+	u32		(*read_counter)(struct perf_event *event);
+	void		(*write_counter)(struct perf_event *event, u32 val);
+	void		(*start)(struct arm_pmu *);
+	void		(*stop)(struct arm_pmu *);
+	void		(*reset)(void *);
+	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
+	void		(*free_irq)(struct arm_pmu *);
+	int		(*map_event)(struct perf_event *event);
+	int		num_events;
+	atomic_t	active_events;
+	struct mutex	reserve_mutex;
+	u64		max_period;
+	struct platform_device	*plat_device;
+	struct pmu_hw_events	__percpu *hw_events;
+	struct notifier_block	hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+						[PERF_COUNT_HW_CACHE_OP_MAX]
+						[PERF_COUNT_HW_CACHE_RESULT_MAX],
+		     u32 raw_event_mask);
+
+struct pmu_probe_info {
+	unsigned int cpuid;
+	unsigned int mask;
+	int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn)	\
+{					\
+	.cpuid = (_cpuid),		\
+	.mask = (_mask),		\
+	.init = (_fn),			\
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+			 const struct of_device_id *of_table,
+			 const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809..092a0e8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
 extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@
 				void *context);
 extern void perf_pmu_migrate_context(struct pmu *pmu,
 				int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
 extern u64 perf_event_read_value(struct perf_event *event,
 				 u64 *enabled, u64 *running);
 
@@ -979,6 +982,12 @@
 static inline void perf_event_exit_task(struct task_struct *child)	{ }
 static inline void perf_event_free_task(struct task_struct *task)	{ }
 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
+static inline struct perf_event *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+	return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event)	{ return -EINVAL; }
 static inline void perf_event_print_debug(void)				{ }
 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
@@ -1011,6 +1020,7 @@
 static inline void perf_event_disable(struct perf_event *event)		{ }
 static inline int __perf_event_disable(void *info)			{ return -1; }
 static inline void perf_event_task_tick(void)				{ }
+static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
 #endif
 
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f8..962387a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -330,6 +330,7 @@
  * c45_ids: 802.3-c45 Device Identifers if is_c45.
  * is_c45:  Set to true if this phy uses clause 45 addressing.
  * is_internal: Set to true if this phy is internal to a MAC.
+ * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
  * has_fixups: Set to true if this phy has fixups/quirks.
  * suspended: Set to true if this phy has been suspended successfully.
  * state: state of the PHY for management purposes
@@ -368,6 +369,7 @@
 	struct phy_c45_device_ids c45_ids;
 	bool is_c45;
 	bool is_internal;
+	bool is_pseudo_fixed_link;
 	bool has_fixups;
 	bool suspended;
 
@@ -424,6 +426,8 @@
 
 	struct net_device *attached_dev;
 
+	u8 mdix;
+
 	void (*adjust_link)(struct net_device *dev);
 };
 #define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +690,16 @@
 {
 	return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
 		phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/*
+ * phy_is_pseudo_fixed_link - Convenience function for testing if this
+ * PHY is the CPU port facing side of an Ethernet switch, or similar.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
+{
+	return phydev->is_pseudo_fixed_link;
 }
 
 /**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d..2400d2e 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@
 
 #if IS_ENABLED(CONFIG_FIXED_PHY)
 extern int fixed_phy_add(unsigned int irq, int phy_id,
-			 struct fixed_phy_status *status);
+			 struct fixed_phy_status *status,
+			 int link_gpio);
 extern struct phy_device *fixed_phy_register(unsigned int irq,
 					     struct fixed_phy_status *status,
+					     int link_gpio,
 					     struct device_node *np);
 extern void fixed_phy_del(int phy_addr);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@
 			   const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
-				struct fixed_phy_status *status)
+				struct fixed_phy_status *status,
+				int link_gpio)
 {
 	return -ENODEV;
 }
 static inline struct phy_device *fixed_phy_register(unsigned int irq,
 						struct fixed_phy_status *status,
+						int gpio_link,
 						struct device_node *np)
 {
 	return ERR_PTR(-ENODEV);
diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
new file mode 100644
index 0000000..695035a
--- /dev/null
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -0,0 +1,31 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+
+#include <linux/types.h>
+
+enum mxt_suspend_mode {
+	MXT_SUSPEND_DEEP_SLEEP	= 0,
+	MXT_SUSPEND_T9_CTRL	= 1,
+};
+
+/* The platform data for the Atmel maXTouch touchscreen driver */
+struct mxt_platform_data {
+	unsigned long irqflags;
+	u8 t19_num_keys;
+	const unsigned int *t19_keymap;
+	enum mxt_suspend_mode suspend_mode;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519..0000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __GPIO_EM_H__
-#define __GPIO_EM_H__
-
-struct gpio_em_config {
-	unsigned int gpio_base;
-	unsigned int irq_base;
-	unsigned int number_of_pins;
-	const char *pctl_name;
-};
-
-#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 0000000..f16542c
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for the Intel TCO Watchdog
+ */
+
+#ifndef _ITCO_WDT_H_
+#define _ITCO_WDT_H_
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO		0
+#define ICH_RES_IO_SMI		1
+#define ICH_RES_MEM_OFF		2
+#define ICH_RES_MEM_GCS_PMC	0
+
+struct itco_wdt_platform_data {
+	char name[32];
+	unsigned int version;
+};
+
+#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1e..1b2ba24 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@
 		Only valid when mode is PWM_BASED.
  * @size_program : total size of lp855x_rom_data
  * @rom_data : list of new eeprom/eprom registers
- * @supply : regulator that supplies 3V input
  */
 struct lp855x_platform_data {
 	const char *name;
@@ -145,7 +144,6 @@
 	unsigned int period_ns;
 	int size_program;
 	struct lp855x_rom_data *rom_data;
-	struct regulator *supply;
 };
 
 #endif
diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
new file mode 100644
index 0000000..646af6f
--- /dev/null
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -0,0 +1,63 @@
+#ifndef	_PIXCIR_I2C_TS_H
+#define	_PIXCIR_I2C_TS_H
+
+/*
+ * Register map
+ */
+#define PIXCIR_REG_POWER_MODE	51
+#define PIXCIR_REG_INT_MODE	52
+
+/*
+ * Power modes:
+ * active: max scan speed
+ * idle: lower scan speed with automatic transition to active on touch
+ * halt: datasheet says sleep but this is more like halt as the chip
+ *       clocks are cut and it can only be brought out of this mode
+ *	 using the RESET pin.
+ */
+enum pixcir_power_mode {
+	PIXCIR_POWER_ACTIVE,
+	PIXCIR_POWER_IDLE,
+	PIXCIR_POWER_HALT,
+};
+
+#define PIXCIR_POWER_MODE_MASK	0x03
+#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * diff coordinates: interrupt is asserted when coordinates change
+ * level on touch: interrupt level asserted during touch
+ * pulse on touch: interrupt pulse asserted druing touch
+ *
+ */
+enum pixcir_int_mode {
+	PIXCIR_INT_PERIODICAL,
+	PIXCIR_INT_DIFF_COORD,
+	PIXCIR_INT_LEVEL_TOUCH,
+	PIXCIR_INT_PULSE_TOUCH,
+};
+
+#define PIXCIR_INT_MODE_MASK	0x03
+#define PIXCIR_INT_ENABLE	(1UL << 3)
+#define PIXCIR_INT_POL_HIGH	(1UL << 2)
+
+/**
+ * struct pixcir_irc_chip_data - chip related data
+ * @max_fingers:	Max number of fingers reported simultaneously by h/w
+ * @has_hw_ids:		Hardware supports finger tracking IDs
+ *
+ */
+struct pixcir_i2c_chip_data {
+	u8 max_fingers;
+	bool has_hw_ids;
+};
+
+struct pixcir_ts_platform_data {
+	int x_max;
+	int y_max;
+	struct pixcir_i2c_chip_data chip;
+};
+
+#endif
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644
index d9d400a..0000000
--- a/include/linux/platform_data/st_nci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Driver include for ST NCI NFC chip family.
- *
- * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST_NCI_H_
-#define _ST_NCI_H_
-
-#define ST_NCI_DRIVER_NAME "st_nci"
-
-struct st_nci_nfc_platform_data {
-	unsigned int gpio_reset;
-	unsigned int irq_polarity;
-};
-
-#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2..7bdece8 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
 #define _LINUX_INPUT_ZFORCE_TS_H
 
 struct zforce_ts_platdata {
-	int gpio_int;
-	int gpio_rst;
-
 	unsigned int x_max;
 	unsigned int y_max;
 };
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9..a59c6ee 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@
 
 bool device_dma_is_coherent(struct device *dev);
 
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
 #endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 0000000..a682fcc
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY		0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
+
+bool psci_tos_resident_on(int cpu);
+
+struct psci_operations {
+	int (*cpu_suspend)(u32 state, unsigned long entry_point);
+	int (*cpu_off)(u32 state);
+	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+	int (*migrate)(unsigned long cpuid);
+	int (*affinity_info)(unsigned long target_affinity,
+			unsigned long lowest_affinity_level);
+	int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca660..7a57c28 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@
 void inode_sub_rsv_space(struct inode *inode, qsize_t number);
 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
 void dquot_drop(struct inode *inode);
 struct dquot *dqget(struct super_block *sb, struct kqid qid);
 static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@
 	return 0;
 }
 
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
 {
+	return 0;
 }
 
 static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea..4a67590 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -50,6 +50,17 @@
 	unsigned int def;
 };
 
+/**
+ * Register/value pairs for sequences of writes
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ */
+struct reg_sequence {
+	unsigned int reg;
+	unsigned int def;
+};
+
 #ifdef CONFIG_REGMAP
 
 enum regmap_endian {
@@ -410,10 +421,10 @@
 		     const void *val, size_t val_len);
 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
 			size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 			int num_regs);
 int regmap_multi_reg_write_bypassed(struct regmap *map,
-				    const struct reg_default *regs,
+				    const struct reg_sequence *regs,
 				    int num_regs);
 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
 			   const void *val, size_t val_len);
@@ -424,6 +435,8 @@
 		     size_t val_count);
 int regmap_update_bits(struct regmap *map, unsigned int reg,
 		       unsigned int mask, unsigned int val);
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+		       unsigned int mask, unsigned int val);
 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
 			     unsigned int mask, unsigned int val);
 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -450,7 +463,7 @@
 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
 			      const struct regmap_access_table *table);
 
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 			  int num_regs);
 int regmap_parse_val(struct regmap *map, const void *buf,
 				unsigned int *val);
@@ -503,6 +516,8 @@
 
 int regmap_fields_write(struct regmap_field *field, unsigned int id,
 			unsigned int val);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+			unsigned int val);
 int regmap_fields_read(struct regmap_field *field, unsigned int id,
 		       unsigned int *val);
 int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
@@ -645,6 +660,13 @@
 	return -EINVAL;
 }
 
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+				     unsigned int mask, unsigned int val)
+{
+	WARN_ONCE(1, "regmap API is disabled");
+	return -EINVAL;
+}
+
 static inline int regmap_update_bits_async(struct regmap *map,
 					   unsigned int reg,
 					   unsigned int mask, unsigned int val)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c89c53a..29446ae 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -89,6 +89,9 @@
 	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
 	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
 	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+	TTU_BATCH_FLUSH = (1 << 11),	/* Batch TLB flushes where possible
+					 * and caller guarantees they will
+					 * do a final flush if necessary */
 };
 
 #ifdef CONFIG_MMU
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c..556ec1e 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@
 static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
 			    struct scatterlist *sgl)
 {
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-	BUG();
-#endif
-
 	/*
 	 * offset and length are unused for chain entry.  Clear them.
 	 */
@@ -251,6 +247,11 @@
 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
 void sg_init_table(struct scatterlist *, unsigned int);
 void sg_init_one(struct scatterlist *, const void *, unsigned int);
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+	     const off_t skip, const int nb_splits,
+	     const size_t *split_sizes,
+	     struct scatterlist **out, int *out_mapped_nents,
+	     gfp_t gfp_mask);
 
 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 119823d..a4ab9da 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1344,6 +1344,25 @@
 	perf_nr_task_contexts,
 };
 
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+	/*
+	 * Each bit set is a CPU that potentially has a TLB entry for one of
+	 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+	 */
+	struct cpumask cpumask;
+
+	/* True if any bit in cpumask is set */
+	bool flush_required;
+
+	/*
+	 * If true then the PTE was dirty when unmapped. The entry must be
+	 * flushed before IO is initiated or a stale TLB entry potentially
+	 * allows an update without redirtying the page.
+	 */
+	bool writable;
+};
+
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
 	void *stack;
@@ -1700,6 +1719,10 @@
 	unsigned long numa_pages_migrated;
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+	struct tlbflush_unmap_batch tlb_ubc;
+#endif
+
 	struct rcu_head rcu;
 
 	/*
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 912a7c4..d4c7271 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -149,6 +149,41 @@
 #endif
 }
 
+/**
+ * seq_show_options - display mount options with appropriate escapes.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, can be NULL
+ */
+static inline void seq_show_option(struct seq_file *m, const char *name,
+				   const char *value)
+{
+	seq_putc(m, ',');
+	seq_escape(m, name, ",= \t\n\\");
+	if (value) {
+		seq_putc(m, '=');
+		seq_escape(m, value, ", \t\n\\");
+	}
+}
+
+/**
+ * seq_show_option_n - display mount options with appropriate escapes
+ *		       where @value must be a specific length.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, cannot be NULL
+ * @length: the length of @value to display
+ *
+ * This is a macro since this uses "length" to define the size of the
+ * stack buffer.
+ */
+#define seq_show_option_n(m, name, value, length) {	\
+	char val_buf[length + 1];			\
+	strncpy(val_buf, value, length);		\
+	val_buf[length] = '\0';				\
+	seq_show_option(m, name, val_buf);		\
+}
+
 #define SEQ_START_TOKEN ((void *)1)
 /*
  * Helpers for iteration over list_head-s in seq_files
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7..df4ab5d 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
 #include <linux/mod_devicetable.h>
 #include <uapi/linux/serio.h>
 
+extern struct bus_type serio_bus;
+
 struct serio {
 	void *port_data;
 
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba50..d927647 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@
 #if IS_ENABLED(CONFIG_SH_DMAE_BASE)
 bool shdma_chan_filter(struct dma_chan *chan, void *arg);
 #else
-#define shdma_chan_filter NULL
+static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+	return false;
+}
 #endif
 
 #endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b88536..2738d35 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
 #include <net/flow_dissector.h>
 #include <linux/splice.h>
 #include <linux/in6.h>
+#include <net/flow.h>
 
 /* A. Checksumming of received packets by device.
  *
@@ -173,17 +174,24 @@
 		BRNF_PROTO_8021Q,
 		BRNF_PROTO_PPPOE
 	} orig_proto:8;
-	bool			pkt_otherhost;
+	u8			pkt_otherhost:1;
+	u8			in_prerouting:1;
+	u8			bridged_dnat:1;
 	__u16			frag_max_size;
-	unsigned int		mask;
 	struct net_device	*physindev;
 	union {
-		struct net_device *physoutdev;
-		char neigh_header[8];
-	};
-	union {
+		/* prerouting: detect dnat in orig/reply direction */
 		__be32          ipv4_daddr;
 		struct in6_addr ipv6_daddr;
+
+		/* after prerouting + nat detected: store original source
+		 * mac since neigh resolution overwrites it, only used while
+		 * skb is out in neigh layer.
+		 */
+		char neigh_header[8];
+
+		/* always valid & non-NULL from FORWARD on, for physdev match */
+		struct net_device *physoutdev;
 	};
 };
 #endif
@@ -506,6 +514,7 @@
  *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
   *	@napi_id: id of the NAPI struct this skb came from
  *	@secmark: security marking
+ *	@offload_fwd_mark: fwding offload mark
  *	@mark: Generic packet mark
  *	@vlan_proto: vlan encapsulation protocol
  *	@vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@
 		unsigned int	sender_cpu;
 	};
 #endif
+	union {
 #ifdef CONFIG_NETWORK_SECMARK
-	__u32			secmark;
+		__u32		secmark;
 #endif
+#ifdef CONFIG_NET_SWITCHDEV
+		__u32		offload_fwd_mark;
+#endif
+	};
+
 	union {
 		__u32		mark;
 		__u32		reserved_tailroom;
@@ -922,29 +937,6 @@
 	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
 };
 
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
-{
-	skb->l4_hash = (type == PKT_HASH_TYPE_L4);
-	skb->sw_hash = 0;
-	skb->hash = hash;
-}
-
-static inline __u32 skb_get_hash(struct sk_buff *skb)
-{
-	if (!skb->l4_hash && !skb->sw_hash)
-		__skb_get_hash(skb);
-
-	return skb->hash;
-}
-
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
-
-static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
-{
-	return skb->hash;
-}
-
 static inline void skb_clear_hash(struct sk_buff *skb)
 {
 	skb->hash = 0;
@@ -958,6 +950,120 @@
 		skb_clear_hash(skb);
 }
 
+static inline void
+__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
+{
+	skb->l4_hash = is_l4;
+	skb->sw_hash = is_sw;
+	skb->hash = hash;
+}
+
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+	/* Used by drivers to set hash from HW */
+	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
+}
+
+static inline void
+__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+{
+	__skb_set_hash(skb, hash, true, is_l4);
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+		   const struct flow_keys *keys, int hlen);
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+			    void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+					int thoff, u8 ip_proto)
+{
+	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+			     const struct flow_dissector_key *key,
+			     unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+			struct flow_dissector *flow_dissector,
+			void *target_container,
+			void *data, __be16 proto, int nhoff, int hlen,
+			unsigned int flags);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+				    struct flow_dissector *flow_dissector,
+				    void *target_container, unsigned int flags)
+{
+	return __skb_flow_dissect(skb, flow_dissector, target_container,
+				  NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+					      struct flow_keys *flow,
+					      unsigned int flags)
+{
+	memset(flow, 0, sizeof(*flow));
+	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+				  NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+						  void *data, __be16 proto,
+						  int nhoff, int hlen,
+						  unsigned int flags)
+{
+	memset(flow, 0, sizeof(*flow));
+	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+				  data, proto, nhoff, hlen, flags);
+}
+
+static inline __u32 skb_get_hash(struct sk_buff *skb)
+{
+	if (!skb->l4_hash && !skb->sw_hash)
+		__skb_get_hash(skb);
+
+	return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
+
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
+{
+	if (!skb->l4_hash && !skb->sw_hash) {
+		struct flow_keys keys;
+		__u32 hash = __get_hash_from_flowi6(fl6, &keys);
+
+		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+	}
+
+	return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
+{
+	if (!skb->l4_hash && !skb->sw_hash) {
+		struct flow_keys keys;
+		__u32 hash = __get_hash_from_flowi4(fl4, &keys);
+
+		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+	}
+
+	return skb->hash;
+}
+
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+{
+	return skb->hash;
+}
+
 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 {
 	to->hash = from->hash;
@@ -1943,7 +2049,7 @@
 
 	if (skb_transport_header_was_set(skb))
 		return;
-	else if (skb_flow_dissect_flow_keys(skb, &keys))
+	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
 		skb_set_transport_header(skb, keys.control.thoff);
 	else
 		skb_set_transport_header(skb, offset_hint);
@@ -2667,12 +2773,6 @@
 	skb_shinfo(skb)->frag_list = NULL;
 }
 
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
-	frag->next = skb_shinfo(skb)->frag_list;
-	skb_shinfo(skb)->frag_list = frag;
-}
-
 #define skb_walk_frags(skb, iter)	\
 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
@@ -3464,5 +3564,6 @@
 			       skb_network_header(skb);
 	return hdr_len + skb_gso_transport_seglen(skb);
 }
+
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a99f0e5..7e37d44 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -290,6 +290,16 @@
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
 void kmem_cache_free(struct kmem_cache *, void *);
 
+/*
+ * Bulk allocation and freeing operations. These are accellerated in an
+ * allocator specific way to avoid taking locks repeatedly or building
+ * metadata structures unnecessarily.
+ *
+ * Note that interrupts must be enabled when calling these functions.
+ */
+void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
 #ifdef CONFIG_NUMA
 void *__kmalloc_node(size_t size, gfp_t flags, int node);
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index da3c593..e6109a6 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -48,7 +48,16 @@
 	const char			*thread_comm;
 };
 
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+					   const struct cpumask *cpumask);
+
+static inline int
+smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+{
+	return smpboot_register_percpu_thread_cpumask(plug_thread,
+						      cpu_possible_mask);
+}
+
 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
 int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
 					 const struct cpumask *);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c..eead8ab 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@
 	int rx_fifo_size;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
-	void *custom_cfg;
-	void *custom_data;
 	void *bsp_priv;
 };
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
-	int has_gmac;
-	int enh_desc;
-	int tx_coe;
-	int rx_coe;
-	int bugged_jumbo;
-	int pmt;
-	int riwt_off;
-	void (*fix_mac_speed)(void *priv, unsigned int speed);
-	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
-	int (*init)(struct platform_device *pdev, void *priv);
-	void (*exit)(struct platform_device *pdev, void *priv);
-};
 #endif
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 437ddb6..03d3b4c 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -46,7 +46,7 @@
  * 
  */
 struct cache_head {
-	struct cache_head * next;
+	struct hlist_node	cache_list;
 	time_t		expiry_time;	/* After time time, don't use the data */
 	time_t		last_refresh;   /* If CACHE_PENDING, this is when upcall 
 					 * was sent, else this is when update was received
@@ -73,7 +73,7 @@
 struct cache_detail {
 	struct module *		owner;
 	int			hash_size;
-	struct cache_head **	hash_table;
+	struct hlist_head *	hash_table;
 	rwlock_t		hash_lock;
 
 	atomic_t		inuse; /* active user-space update or lookup */
@@ -224,6 +224,11 @@
 					umode_t, struct cache_detail *);
 extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
 
+/* Must store cache_detail in seq_file->private if using next three functions */
+extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
+extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
+extern void cache_seq_stop(struct seq_file *file, void *p);
+
 extern void qword_add(char **bpp, int *lp, char *str);
 extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
 extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fae6fb9..cc0fc71 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -19,11 +19,6 @@
 #include <linux/wait.h>
 #include <linux/mm.h>
 
-/*
- * This is the RPC server thread function prototype
- */
-typedef int		(*svc_thread_fn)(void *);
-
 /* statistics for svc_pool structures */
 struct svc_pool_stats {
 	atomic_long_t	packets;
@@ -54,6 +49,25 @@
 	unsigned long		sp_flags;
 } ____cacheline_aligned_in_smp;
 
+struct svc_serv;
+
+struct svc_serv_ops {
+	/* Callback to use when last thread exits. */
+	void		(*svo_shutdown)(struct svc_serv *, struct net *);
+
+	/* function for service threads to run */
+	int		(*svo_function)(void *);
+
+	/* queue up a transport for servicing */
+	void		(*svo_enqueue_xprt)(struct svc_xprt *);
+
+	/* set up thread (or whatever) execution context */
+	int		(*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+
+	/* optional module to count when adding threads (pooled svcs only) */
+	struct module	*svo_module;
+};
+
 /*
  * RPC service.
  *
@@ -85,16 +99,7 @@
 
 	unsigned int		sv_nrpools;	/* number of thread pools */
 	struct svc_pool *	sv_pools;	/* array of thread pools */
-
-	void			(*sv_shutdown)(struct svc_serv *serv,
-					       struct net *net);
-						/* Callback to use when last thread
-						 * exits.
-						 */
-
-	struct module *		sv_module;	/* optional module to count when
-						 * adding threads */
-	svc_thread_fn		sv_function;	/* main function for threads */
+	struct svc_serv_ops	*sv_ops;	/* server operations */
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 	struct list_head	sv_cb_list;	/* queue for callback requests
 						 * that arrive over the same
@@ -423,19 +428,46 @@
 };
 
 /*
+ * Mode for mapping cpus to pools.
+ */
+enum {
+	SVC_POOL_AUTO = -1,	/* choose one of the others */
+	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
+				 * (legacy & UP mode) */
+	SVC_POOL_PERCPU,	/* one pool per cpu */
+	SVC_POOL_PERNODE	/* one pool per numa node */
+};
+
+struct svc_pool_map {
+	int count;			/* How many svc_servs use us */
+	int mode;			/* Note: int not enum to avoid
+					 * warnings about "enumeration value
+					 * not handled in switch" */
+	unsigned int npools;
+	unsigned int *pool_to;		/* maps pool id to cpu or node */
+	unsigned int *to_pool;		/* maps cpu or node to pool id */
+};
+
+extern struct svc_pool_map svc_pool_map;
+
+/*
  * Function prototypes.
  */
 int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
 void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
 int svc_bind(struct svc_serv *serv, struct net *net);
 struct svc_serv *svc_create(struct svc_program *, unsigned int,
-			    void (*shutdown)(struct svc_serv *, struct net *net));
+			    struct svc_serv_ops *);
+struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
+					struct svc_pool *pool, int node);
 struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
 					struct svc_pool *pool, int node);
+void		   svc_rqst_free(struct svc_rqst *);
 void		   svc_exit_thread(struct svc_rqst *);
+unsigned int	   svc_pool_map_get(void);
+void		   svc_pool_map_put(void);
 struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
-			void (*shutdown)(struct svc_serv *, struct net *net),
-			svc_thread_fn, struct module *);
+			struct svc_serv_ops *);
 int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
 int		   svc_pool_stats_open(struct svc_serv *serv, struct file *file);
 void		   svc_destroy(struct svc_serv *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cb94ee4..d5ee6d8 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -172,13 +172,6 @@
 #define RDMAXPRT_SQ_PENDING	2
 #define RDMAXPRT_CONN_PENDING	3
 
-#define RPCRDMA_MAX_SVC_SEGS	(64)	/* server max scatter/gather */
-#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
-#define RPCRDMA_MAXPAYLOAD	RPCSVC_MAXPAYLOAD
-#else
-#define RPCRDMA_MAXPAYLOAD	(RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT)
-#endif
-
 #define RPCRDMA_LISTEN_BACKLOG  10
 /* The default ORD value is based on two outstanding full-size writes with a
  * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ.  */
@@ -187,6 +180,8 @@
 #define RPCRDMA_MAX_REQUESTS    32
 #define RPCRDMA_MAX_REQ_SIZE    4096
 
+#define RPCSVC_MAXPAYLOAD_RDMA	RPCSVC_MAXPAYLOAD
+
 /* svc_rdma_marshal.c */
 extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
 extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -213,6 +208,8 @@
 
 /* svc_rdma_sendto.c */
 extern int svc_rdma_sendto(struct svc_rqst *);
+extern struct rpcrdma_read_chunk *
+	svc_rdma_get_read_chunk(struct rpcrdma_msg *);
 
 /* svc_rdma_transport.c */
 extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
@@ -225,7 +222,6 @@
 extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
 extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
 extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
-extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
 extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
 extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
 			      struct svc_rdma_fastreg_mr *);
@@ -238,83 +234,4 @@
 extern int svc_rdma_init(void);
 extern void svc_rdma_cleanup(void);
 
-/*
- * Returns the address of the first read chunk or <nul> if no read chunk is
- * present
- */
-static inline struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
-{
-	struct rpcrdma_read_chunk *ch =
-		(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
-
-	if (ch->rc_discrim == 0)
-		return NULL;
-
-	return ch;
-}
-
-/*
- * Returns the address of the first read write array element or <nul> if no
- * write array list is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
-{
-	if (rmsgp->rm_body.rm_chunks[0] != 0
-	    || rmsgp->rm_body.rm_chunks[1] == 0)
-		return NULL;
-
-	return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
-}
-
-/*
- * Returns the address of the first reply array element or <nul> if no
- * reply array is present
- */
-static inline struct rpcrdma_write_array *
-svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
-{
-	struct rpcrdma_read_chunk *rch;
-	struct rpcrdma_write_array *wr_ary;
-	struct rpcrdma_write_array *rp_ary;
-
-	/* XXX: Need to fix when reply list may occur with read-list and/or
-	 * write list */
-	if (rmsgp->rm_body.rm_chunks[0] != 0 ||
-	    rmsgp->rm_body.rm_chunks[1] != 0)
-		return NULL;
-
-	rch = svc_rdma_get_read_chunk(rmsgp);
-	if (rch) {
-		while (rch->rc_discrim)
-			rch++;
-
-		/* The reply list follows an empty write array located
-		 * at 'rc_position' here. The reply array is at rc_target.
-		 */
-		rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
-
-		goto found_it;
-	}
-
-	wr_ary = svc_rdma_get_write_array(rmsgp);
-	if (wr_ary) {
-		rp_ary = (struct rpcrdma_write_array *)
-			&wr_ary->
-			wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
-
-		goto found_it;
-	}
-
-	/* No read list, no write list */
-	rp_ary = (struct rpcrdma_write_array *)
-		&rmsgp->rm_body.rm_chunks[2];
-
- found_it:
-	if (rp_ary->wc_discrim == 0)
-		return NULL;
-
-	return rp_ary;
-}
 #endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79f6f8f..78512cf 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -116,6 +116,7 @@
 		      struct svc_serv *);
 int	svc_create_xprt(struct svc_serv *, const char *, struct net *,
 			const int, const unsigned short, int);
+void	svc_xprt_do_enqueue(struct svc_xprt *xprt);
 void	svc_xprt_enqueue(struct svc_xprt *xprt);
 void	svc_xprt_put(struct svc_xprt *xprt);
 void	svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b45c45b..0800131 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -810,6 +810,7 @@
 asmlinkage long sys_eventfd(unsigned int count);
 asmlinkage long sys_eventfd2(unsigned int count, int flags);
 asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
+asmlinkage long sys_userfaultfd(int flags);
 asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
 asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
 asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c1..d6f2c2c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@
 extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
 
+extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+
 #endif		/* __LINUX_UACCESS_H__ */
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
new file mode 100644
index 0000000..587480a
--- /dev/null
+++ b/include/linux/userfaultfd_k.h
@@ -0,0 +1,85 @@
+/*
+ *  include/linux/userfaultfd_k.h
+ *
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_K_H
+#define _LINUX_USERFAULTFD_K_H
+
+#ifdef CONFIG_USERFAULTFD
+
+#include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
+
+#include <linux/fcntl.h>
+
+/*
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from userfaultfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define UFFD_CLOEXEC O_CLOEXEC
+#define UFFD_NONBLOCK O_NONBLOCK
+
+#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+
+extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+			    unsigned int flags, unsigned long reason);
+
+extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+			    unsigned long src_start, unsigned long len);
+extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
+			      unsigned long dst_start,
+			      unsigned long len);
+
+/* mm helpers */
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+					struct vm_userfaultfd_ctx vm_ctx)
+{
+	return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & VM_UFFD_MISSING;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+	return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP);
+}
+
+#else /* CONFIG_USERFAULTFD */
+
+/* mm helpers */
+static inline int handle_userfault(struct vm_area_struct *vma,
+				   unsigned long address,
+				   unsigned int flags,
+				   unsigned long reason)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
+					struct vm_userfaultfd_ctx vm_ctx)
+{
+	return true;
+}
+
+static inline bool userfaultfd_missing(struct vm_area_struct *vma)
+{
+	return false;
+}
+
+static inline bool userfaultfd_armed(struct vm_area_struct *vma)
+{
+	return false;
+}
+
+#endif /* CONFIG_USERFAULTFD */
+
+#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f..d3d0772 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,7 +147,8 @@
 
 typedef int wait_bit_action_f(struct wait_bit_key *);
 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
+			  void *key);
 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -179,7 +180,7 @@
 #define wake_up_poll(x, m)						\
 	__wake_up(x, TASK_NORMAL, 1, (void *) (m))
 #define wake_up_locked_poll(x, m)					\
-	__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+	__wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m))
 #define wake_up_interruptible_poll(x, m)				\
 	__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
 #define wake_up_interruptible_sync_poll(x, m)				\
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index f47fead..d74a0e9 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -140,12 +140,4 @@
 extern int watchdog_register_device(struct watchdog_device *);
 extern void watchdog_unregister_device(struct watchdog_device *);
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void watchdog_nmi_disable_all(void);
-void watchdog_nmi_enable_all(void);
-#else
-static inline void watchdog_nmi_disable_all(void) {}
-static inline void watchdog_nmi_enable_all(void) {}
-#endif
-
 #endif  /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7a6c1d6..f2ffe5b 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -200,4 +200,14 @@
 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
 			   loff_t *off);
 
+/*
+ * For EEH, a driver may want to assert a PERST will reload the same image
+ * from flash into the FPGA.
+ *
+ * This is a property of the entire adapter, not a single AFU, so drivers
+ * should set this property with care!
+ */
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+				  bool perst_reloads_same_image);
+
 #endif /* _MISC_CXL_H */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index dc03d77..a2f59ec 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -197,6 +197,27 @@
 #define LOWPAN_NHC_UDP_CS_P_11	0xF3 /* source & dest = 0xF0B + 4bit inline */
 #define LOWPAN_NHC_UDP_CS_C	0x04 /* checksum elided */
 
+#define LOWPAN_PRIV_SIZE(llpriv_size)	\
+	(sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+	LOWPAN_LLTYPE_BTLE,
+	LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+	enum lowpan_lltypes lltype;
+
+	/* must be last */
+	u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@
 	return skb->len + uncomp_header - ret;
 }
 
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
 int
 lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
 			 const u8 *saddr, const u8 saddr_type,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 931738b..9d446f13 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -21,6 +21,8 @@
 	struct gnet_stats_rate_est64	tcfc_rate_est;
 	spinlock_t			tcfc_lock;
 	struct rcu_head			tcfc_rcu;
+	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+	struct gnet_stats_queue __percpu *cpu_qstats;
 };
 #define tcf_head	common.tcfc_head
 #define tcf_index	common.tcfc_index
@@ -68,6 +70,17 @@
 	kfree(hf->htab);
 }
 
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+	unsigned long now = jiffies;
+
+	if (tm->lastuse != now)
+		tm->lastuse = now;
+}
+
 #ifdef CONFIG_NET_CLS_ACT
 
 #define ACT_P_CREATED 1
@@ -98,11 +111,10 @@
 };
 
 int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tc_action *a);
 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
 int tcf_hash_check(u32 index, struct tc_action *a, int bind);
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-		    int size, int bind);
+		    int size, int bind, bool cpustats);
 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
 void tcf_hash_insert(struct tc_action *a);
 
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index def59d3..0c3ac5a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -158,8 +158,8 @@
 				 const struct in6_addr *addr);
 	int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
 				 const struct in6_addr *addr);
-	int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
-				struct flowi6 *fl6);
+	int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+			       struct dst_entry **dst, struct flowi6 *fl6);
 	void (*udpv6_encap_enable)(void);
 	void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
 			      const struct in6_addr *daddr,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3bd618d..9e1a59e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -512,9 +512,11 @@
 		HCI_AUTO_CONN_DIRECT,
 		HCI_AUTO_CONN_ALWAYS,
 		HCI_AUTO_CONN_LINK_LOSS,
+		HCI_AUTO_CONN_EXPLICIT,
 	} auto_connect;
 
 	struct hci_conn *conn;
+	bool explicit_connect;
 };
 
 extern struct list_head hci_dev_list;
@@ -639,6 +641,7 @@
 	HCI_CONN_DROP,
 	HCI_CONN_PARAM_REMOVAL_PEND,
 	HCI_CONN_NEW_LINK_KEY,
+	HCI_CONN_SCANNING,
 };
 
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +811,26 @@
 	return NULL;
 }
 
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn  *c;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
+		if (c->type == LE_LINK && c->state == BT_CONNECT &&
+		    !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+			rcu_read_unlock();
+			return c;
+		}
+	}
+
+	rcu_read_unlock();
+
+	return NULL;
+}
+
 int hci_disconnect(struct hci_conn *conn, __u8 reason);
 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +846,9 @@
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+				     u8 dst_type, u8 sec_level,
+				     u16 conn_timeout, u8 role);
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				u8 dst_type, u8 sec_level, u16 conn_timeout,
 				u8 role);
@@ -988,6 +1014,9 @@
 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
 						  bdaddr_t *addr,
 						  u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+						    bdaddr_t *addr,
+						    u8 addr_type);
 
 void hci_uuids_clear(struct hci_dev *hdev);
 
@@ -1297,7 +1326,7 @@
 	if (max >= to_multiplier * 8)
 		return -EINVAL;
 
-	max_latency = (to_multiplier * 8 / max) - 1;
+	max_latency = (to_multiplier * 4 / max) - 1;
 	if (latency > 499 || latency > max_latency)
 		return -EINVAL;
 
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 2239a37..c98afc0 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -55,6 +55,8 @@
 #define L2CAP_INFO_TIMEOUT		msecs_to_jiffies(4000)
 #define L2CAP_MOVE_TIMEOUT		msecs_to_jiffies(4000)
 #define L2CAP_MOVE_ERTX_TIMEOUT		msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD	msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT		msecs_to_jiffies(10000)
 
 #define L2CAP_A2MP_DEFAULT_MTU		670
 
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca2..1797235 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@
 	BOND_OPT_AD_ACTOR_SYS_PRIO,
 	BOND_OPT_AD_ACTOR_SYSTEM,
 	BOND_OPT_AD_USER_PORT_KEY,
+	BOND_OPT_NUM_PEER_NOTIF_ALIAS,
 	BOND_OPT_LAST
 };
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 883fe1e..f0889a2 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2369,8 +2369,7 @@
  *	method returns 0.)
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
- *	registered. Note that this callback may not sleep, and cannot run
- *	concurrently with itself.
+ *	registered. The callback is allowed to sleep.
  *
  * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
  *	Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 290a9a6..76b1ffa 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -34,6 +34,8 @@
 							   int type);
 	void	(*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
 					       struct net_device *dev);
+	int	(*suspend)(struct wpan_phy *wpan_phy);
+	int	(*resume)(struct wpan_phy *wpan_phy);
 	int	(*add_virtual_intf)(struct wpan_phy *wpan_phy,
 				    const char *name,
 				    unsigned char name_assign_type,
@@ -61,6 +63,8 @@
 					 s8 max_frame_retries);
 	int	(*set_lbt_mode)(struct wpan_phy *wpan_phy,
 				struct wpan_dev *wpan_dev, bool mode);
+	int	(*set_ackreq_default)(struct wpan_phy *wpan_phy,
+				      struct wpan_dev *wpan_dev, bool ackreq);
 };
 
 static inline bool
@@ -171,6 +175,9 @@
 	struct list_head list;
 	struct net_device *netdev;
 
+	/* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+	struct net_device *lowpan_dev;
+
 	u32 identifier;
 
 	/* MAC PIB */
@@ -191,6 +198,9 @@
 	bool lbt;
 
 	bool promiscuous_mode;
+
+	/* fallback for acknowledgment bit setting */
+	bool ackreq;
 };
 
 #define to_phy(_dev)	container_of(_dev, struct wpan_phy, dev)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 2d1d73c..9fcaedf 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -140,14 +140,16 @@
 
 struct sk_buff;
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-			      __be32 from, __be32 to, int pseudohdr);
+			      __be32 from, __be32 to, bool pseudohdr);
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 			       const __be32 *from, const __be32 *to,
-			       int pseudohdr);
+			       bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+				     __wsum diff, bool pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
 					    __be16 from, __be16 to,
-					    int pseudohdr)
+					    bool pseudohdr)
 {
 	inet_proto_csum_replace4(sum, skb, (__force __be32)from,
 				 (__force __be32)to, pseudohdr);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d394..ccd6d8b 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@
 	if (classid != sk->sk_classid)
 		sk->sk_classid = classid;
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+	u32 classid = task_cls_state(current)->classid;
+
+	/* Due to the nature of the classifier it is required to ignore all
+	 * packets originating from softirq context as accessing `current'
+	 * would lead to false results.
+	 *
+	 * This test assumes that all callers of dev_queue_xmit() explicitly
+	 * disable bh. Knowing this, it is possible to detect softirq based
+	 * calls by looking at the number of nested bh disable calls because
+	 * softirqs always disables bh.
+	 */
+	if (in_serving_softirq()) {
+		/* If there is an sk_classid we'll use that. */
+		if (!skb->sk)
+			return 0;
+
+		classid = skb->sk->sk_classid;
+	}
+
+	return classid;
+}
 #else /* !CONFIG_CGROUP_NET_CLASSID */
 static inline void sock_update_classid(struct sock *sk)
 {
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+	return 0;
+}
 #endif /* CONFIG_CGROUP_NET_CLASSID */
 #endif  /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fbca63b..b34d812 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -171,6 +171,11 @@
 	return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
 }
 
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+	return !!((ds->dsa_port_mask) & (1 << p));
+}
+
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
 	return ds->phys_port_mask & (1 << p) && ds->ports[p];
@@ -296,12 +301,28 @@
 				     u32 br_port_mask);
 	int	(*port_stp_update)(struct dsa_switch *ds, int port,
 				   u8 state);
-	int	(*fdb_add)(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
-	int	(*fdb_del)(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
-	int	(*fdb_getnext)(struct dsa_switch *ds, int port,
-			       unsigned char *addr, bool *is_static);
+
+	/*
+	 * VLAN support
+	 */
+	int	(*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
+	int	(*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
+	int	(*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+				 bool untagged);
+	int	(*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+	int	(*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
+				unsigned long *ports, unsigned long *untagged);
+
+	/*
+	 * Forwarding database
+	 */
+	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
+				const unsigned char *addr, u16 vid);
+	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
+				const unsigned char *addr, u16 vid);
+	int	(*port_fdb_getnext)(struct dsa_switch *ds, int port,
+				    unsigned char *addr, u16 *vid,
+				    bool *is_static);
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 2bc73f8a..9261d92 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -57,6 +57,7 @@
 #define DST_FAKE_RTABLE		0x0040
 #define DST_XFRM_TUNNEL		0x0080
 #define DST_XFRM_QUEUE		0x0100
+#define DST_METADATA		0x0200
 
 	unsigned short		pending_confirm;
 
@@ -83,12 +84,13 @@
 	__u32			__pad2;
 #endif
 
+#ifdef CONFIG_64BIT
+	struct lwtunnel_state   *lwtstate;
 	/*
 	 * Align __refcnt to a 64 bytes alignment
 	 * (L1_CACHE_SIZE would be too much)
 	 */
-#ifdef CONFIG_64BIT
-	long			__pad_to_align_refcnt[2];
+	long			__pad_to_align_refcnt[1];
 #endif
 	/*
 	 * __refcnt wants to be on a different cache line from
@@ -97,6 +99,9 @@
 	atomic_t		__refcnt;	/* client references	*/
 	int			__use;
 	unsigned long		lastuse;
+#ifndef CONFIG_64BIT
+	struct lwtunnel_state   *lwtstate;
+#endif
 	union {
 		struct dst_entry	*next;
 		struct rtable __rcu	*rt_next;
@@ -202,6 +207,12 @@
 		p[metric-1] = val;
 }
 
+/* Kernel-internal feature bits that are unallocated in user space. */
+#define DST_FEATURE_ECN_CA	(1 << 31)
+
+#define DST_FEATURE_MASK	(DST_FEATURE_ECN_CA)
+#define DST_FEATURE_ECN_MASK	(DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
+
 static inline u32
 dst_feature(const struct dst_entry *dst, u32 feature)
 {
@@ -284,13 +295,18 @@
 	}
 }
 
-static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
 {
-	nskb->_skb_refdst = oskb->_skb_refdst;
+	nskb->_skb_refdst = refdst;
 	if (!(nskb->_skb_refdst & SKB_DST_NOREF))
 		dst_clone(skb_dst(nskb));
 }
 
+static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+{
+	__skb_dst_copy(nskb, oskb->_skb_refdst);
+}
+
 /**
  * skb_dst_force - makes sure skb dst is refcounted
  * @skb: buffer
@@ -356,6 +372,9 @@
 }
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
 		int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+	      struct net_device *dev, int initial_ref, int initial_obsolete,
+	      unsigned short flags);
 void __dst_free(struct dst_entry *dst);
 struct dst_entry *dst_destroy(struct dst_entry *dst);
 
@@ -457,7 +476,7 @@
 	return dst;
 }
 
-void dst_init(void);
+void dst_subsys_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 0000000..af9d538
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,108 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+	struct dst_entry		dst;
+	union {
+		struct ip_tunnel_info	tun_info;
+	} u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+	struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+	if (md_dst && md_dst->dst.flags & DST_METADATA)
+		return md_dst;
+
+	return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
+{
+	struct metadata_dst *md_dst = skb_metadata_dst(skb);
+	struct dst_entry *dst;
+
+	if (md_dst)
+		return &md_dst->u.tun_info;
+
+	dst = skb_dst(skb);
+	if (dst && dst->lwtstate)
+		return lwt_tun_info(dst->lwtstate);
+
+	return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+static inline struct metadata_dst *tun_rx_dst(int md_size)
+{
+	struct metadata_dst *tun_dst;
+
+	tun_dst = metadata_dst_alloc(md_size, GFP_ATOMIC);
+	if (!tun_dst)
+		return NULL;
+
+	tun_dst->u.tun_info.options_len = 0;
+	tun_dst->u.tun_info.mode = 0;
+	return tun_dst;
+}
+
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
+						 __be16 flags,
+						 __be64 tunnel_id,
+						 int md_size)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct metadata_dst *tun_dst;
+
+	tun_dst = tun_rx_dst(md_size);
+	if (!tun_dst)
+		return NULL;
+
+	ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+			   iph->saddr, iph->daddr, iph->tos, iph->ttl,
+			   0, 0, tunnel_id, flags);
+	return tun_dst;
+}
+
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+						 __be16 flags,
+						 __be64 tunnel_id,
+						 int md_size)
+{
+	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	struct metadata_dst *tun_dst;
+	struct ip_tunnel_info *info;
+
+	tun_dst = tun_rx_dst(md_size);
+	if (!tun_dst)
+		return NULL;
+
+	info = &tun_dst->u.tun_info;
+	info->mode = IP_TUNNEL_INFO_IPV6;
+	info->key.tun_flags = flags;
+	info->key.tun_id = tunnel_id;
+	info->key.tp_src = 0;
+	info->key.tp_dst = 0;
+
+	info->key.u.ipv6.src = ip6h->saddr;
+	info->key.u.ipv6.dst = ip6h->daddr;
+	info->key.tos = ipv6_get_dsfield(ip6h);
+	info->key.ttl = ip6h->hop_limit;
+	return tun_dst;
+}
+
+#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 903a55e..4e8f804 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -19,6 +19,7 @@
 	u8			action;
 	/* 3 bytes hole, try to use */
 	u32			target;
+	__be64			tun_id;
 	struct fib_rule __rcu	*ctarget;
 	struct net		*fr_net;
 
diff --git a/include/net/flow.h b/include/net/flow.h
index 8109a15..acd6a09 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -10,6 +10,7 @@
 #include <linux/socket.h>
 #include <linux/in6.h>
 #include <linux/atomic.h>
+#include <net/flow_dissector.h>
 
 /*
  * ifindex generation is per-net namespace, and loopback is
@@ -19,6 +20,10 @@
 
 #define LOOPBACK_IFINDEX	1
 
+struct flowi_tunnel {
+	__be64			tun_id;
+};
+
 struct flowi_common {
 	int	flowic_oif;
 	int	flowic_iif;
@@ -29,7 +34,9 @@
 	__u8	flowic_flags;
 #define FLOWI_FLAG_ANYSRC		0x01
 #define FLOWI_FLAG_KNOWN_NH		0x02
+#define FLOWI_FLAG_VRFSRC		0x04
 	__u32	flowic_secid;
+	struct flowi_tunnel flowic_tun_key;
 };
 
 union flowi_uli {
@@ -66,6 +73,7 @@
 #define flowi4_proto		__fl_common.flowic_proto
 #define flowi4_flags		__fl_common.flowic_flags
 #define flowi4_secid		__fl_common.flowic_secid
+#define flowi4_tun_key		__fl_common.flowic_tun_key
 
 	/* (saddr,daddr) must be grouped, same order as in IP header */
 	__be32			saddr;
@@ -95,6 +103,7 @@
 	fl4->flowi4_proto = proto;
 	fl4->flowi4_flags = flags;
 	fl4->flowi4_secid = 0;
+	fl4->flowi4_tun_key.tun_id = 0;
 	fl4->daddr = daddr;
 	fl4->saddr = saddr;
 	fl4->fl4_dport = dport;
@@ -122,6 +131,7 @@
 #define flowi6_proto		__fl_common.flowic_proto
 #define flowi6_flags		__fl_common.flowic_flags
 #define flowi6_secid		__fl_common.flowic_secid
+#define flowi6_tun_key		__fl_common.flowic_tun_key
 	struct in6_addr		daddr;
 	struct in6_addr		saddr;
 	__be32			flowlabel;
@@ -165,6 +175,7 @@
 #define flowi_proto	u.__fl_common.flowic_proto
 #define flowi_flags	u.__fl_common.flowic_flags
 #define flowi_secid	u.__fl_common.flowic_secid
+#define flowi_tun_key	u.__fl_common.flowic_tun_key
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
@@ -233,4 +244,22 @@
 void flow_cache_flush_deferred(struct net *net);
 extern atomic_t flow_cache_genid;
 
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
+{
+	struct flow_keys keys;
+
+	return __get_hash_from_flowi6(fl6, &keys);
+}
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi4(const struct flowi4 *fl4)
+{
+	struct flow_keys keys;
+
+	return __get_hash_from_flowi4(fl4, &keys);
+}
+
 #endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 1a8c224..8c8548c 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -2,7 +2,6 @@
 #define _NET_FLOW_DISSECTOR_H
 
 #include <linux/types.h>
-#include <linux/skbuff.h>
 #include <linux/in6.h>
 #include <uapi/linux/if_ether.h>
 
@@ -13,8 +12,13 @@
 struct flow_dissector_key_control {
 	u16	thoff;
 	u16	addr_type;
+	u32	flags;
 };
 
+#define FLOW_DIS_IS_FRAGMENT	BIT(0)
+#define FLOW_DIS_FIRST_FRAG	BIT(1)
+#define FLOW_DIS_ENCAPSULATION	BIT(2)
+
 /**
  * struct flow_dissector_key_basic:
  * @thoff: Transport header offset
@@ -123,6 +127,11 @@
 	FLOW_DISSECTOR_KEY_MAX,
 };
 
+#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG		BIT(0)
+#define FLOW_DISSECTOR_F_STOP_AT_L3		BIT(1)
+#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL	BIT(2)
+#define FLOW_DISSECTOR_F_STOP_AT_ENCAP		BIT(3)
+
 struct flow_dissector_key {
 	enum flow_dissector_key_id key_id;
 	size_t offset; /* offset of struct flow_dissector_key_*
@@ -134,23 +143,6 @@
 	unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
 };
 
-void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
-			     const struct flow_dissector_key *key,
-			     unsigned int key_count);
-
-bool __skb_flow_dissect(const struct sk_buff *skb,
-			struct flow_dissector *flow_dissector,
-			void *target_container,
-			void *data, __be16 proto, int nhoff, int hlen);
-
-static inline bool skb_flow_dissect(const struct sk_buff *skb,
-				    struct flow_dissector *flow_dissector,
-				    void *target_container)
-{
-	return __skb_flow_dissect(skb, flow_dissector, target_container,
-				  NULL, 0, 0, 0);
-}
-
 struct flow_keys {
 	struct flow_dissector_key_control control;
 #define FLOW_KEYS_HASH_START_FIELD basic
@@ -170,38 +162,6 @@
 extern struct flow_dissector flow_keys_dissector;
 extern struct flow_dissector flow_keys_buf_dissector;
 
-static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
-					      struct flow_keys *flow)
-{
-	memset(flow, 0, sizeof(*flow));
-	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
-				  NULL, 0, 0, 0);
-}
-
-static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
-						  void *data, __be16 proto,
-						  int nhoff, int hlen)
-{
-	memset(flow, 0, sizeof(*flow));
-	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
-				  data, proto, nhoff, hlen);
-}
-
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
-			    void *data, int hlen_proto);
-
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
-					int thoff, u8 ip_proto)
-{
-	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-
-u32 flow_hash_from_keys(struct flow_keys *keys);
-void __skb_get_hash(struct sk_buff *skb);
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
-		   const struct flow_keys *keys, int hlen);
-
 /* struct flow_keys_digest:
  *
  * This structure is used to hold a digest of the full flow keys. This is a
@@ -217,4 +177,11 @@
 void make_flow_keys_digest(struct flow_keys_digest *digest,
 			   const struct flow_keys *flow);
 
+static inline bool flow_keys_have_l4(struct flow_keys *keys)
+{
+	return (keys->ports.ports || keys->tags.flow_label);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+
 #endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 2a0543a..3106ed6 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,40 +62,9 @@
 	struct geneve_opt options[];
 };
 
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
-	return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
 #ifdef CONFIG_INET
-struct geneve_sock;
-
-typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
-
-struct geneve_sock {
-	struct list_head	list;
-	geneve_rcv_t		*rcv;
-	void			*rcv_data;
-	struct socket		*sock;
-	struct rcu_head		rcu;
-	int			refcnt;
-	struct udp_offload	udp_offloads;
-};
-
-#define GENEVE_VER 0
-#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
-				    geneve_rcv_t *rcv, void *data,
-				    bool no_share, bool ipv6);
-
-void geneve_sock_release(struct geneve_sock *vs);
-
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
-		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
-		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
-		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-		    bool csum, bool xnet);
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+					u8 name_assign_type, u16 dst_port);
 #endif /*ifdef CONFIG_INET */
 
 #endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gre.h b/include/net/gre.h
index b531820..97eafdc 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -4,6 +4,12 @@
 #include <linux/skbuff.h>
 #include <net/ip_tunnels.h>
 
+struct gre_base_hdr {
+	__be16 flags;
+	__be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
 #define GREPROTO_CISCO		0
 #define GREPROTO_PPTP		1
 #define GREPROTO_MAX		2
@@ -14,91 +20,9 @@
 	void (*err_handler)(struct sk_buff *skb, u32 info);
 };
 
-struct gre_base_hdr {
-	__be16 flags;
-	__be16 protocol;
-};
-#define GRE_HEADER_SECTION 4
-
 int gre_add_protocol(const struct gre_protocol *proto, u8 version);
 int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 
-struct gre_cisco_protocol {
-	int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
-	int (*err_handler)(struct sk_buff *skb, u32 info,
-			   const struct tnl_ptk_info *tpi);
-	u8 priority;
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *proto);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
-		      int hdr_len);
-
-static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
-						  bool csum)
-{
-	return iptunnel_handle_offloads(skb, csum,
-					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-}
-
-
-static inline int ip_gre_calc_hlen(__be16 o_flags)
-{
-	int addend = 4;
-
-	if (o_flags&TUNNEL_CSUM)
-		addend += 4;
-	if (o_flags&TUNNEL_KEY)
-		addend += 4;
-	if (o_flags&TUNNEL_SEQ)
-		addend += 4;
-	return addend;
-}
-
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
-	__be16 tflags = 0;
-
-	if (flags & GRE_CSUM)
-		tflags |= TUNNEL_CSUM;
-	if (flags & GRE_ROUTING)
-		tflags |= TUNNEL_ROUTING;
-	if (flags & GRE_KEY)
-		tflags |= TUNNEL_KEY;
-	if (flags & GRE_SEQ)
-		tflags |= TUNNEL_SEQ;
-	if (flags & GRE_STRICT)
-		tflags |= TUNNEL_STRICT;
-	if (flags & GRE_REC)
-		tflags |= TUNNEL_REC;
-	if (flags & GRE_VERSION)
-		tflags |= TUNNEL_VERSION;
-
-	return tflags;
-}
-
-static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
-	__be16 flags = 0;
-
-	if (tflags & TUNNEL_CSUM)
-		flags |= GRE_CSUM;
-	if (tflags & TUNNEL_ROUTING)
-		flags |= GRE_ROUTING;
-	if (tflags & TUNNEL_KEY)
-		flags |= GRE_KEY;
-	if (tflags & TUNNEL_SEQ)
-		flags |= GRE_SEQ;
-	if (tflags & TUNNEL_STRICT)
-		flags |= GRE_STRICT;
-	if (tflags & TUNNEL_REC)
-		flags |= GRE_REC;
-	if (tflags & TUNNEL_VERSION)
-		flags |= GRE_VERSION;
-
-	return flags;
-}
-
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+				       u8 name_assign_type);
 #endif
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 0f712c0..cf6c745 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -32,37 +32,28 @@
 		return;
 	}
 
-	/* We run in BH context */
-	spin_lock(&cell->napi_skbs.lock);
-
 	__skb_queue_tail(&cell->napi_skbs, skb);
 	if (skb_queue_len(&cell->napi_skbs) == 1)
 		napi_schedule(&cell->napi);
-
-	spin_unlock(&cell->napi_skbs.lock);
 }
 
-/* called unser BH context */
+/* called under BH context */
 static inline int gro_cell_poll(struct napi_struct *napi, int budget)
 {
 	struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
 	struct sk_buff *skb;
 	int work_done = 0;
 
-	spin_lock(&cell->napi_skbs.lock);
 	while (work_done < budget) {
 		skb = __skb_dequeue(&cell->napi_skbs);
 		if (!skb)
 			break;
-		spin_unlock(&cell->napi_skbs.lock);
 		napi_gro_receive(napi, skb);
 		work_done++;
-		spin_lock(&cell->napi_skbs.lock);
 	}
 
 	if (work_done < budget)
-		napi_complete(napi);
-	spin_unlock(&cell->napi_skbs.lock);
+		napi_complete_done(napi, work_done);
 	return work_done;
 }
 
@@ -77,7 +68,7 @@
 	for_each_possible_cpu(i) {
 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
-		skb_queue_head_init(&cell->napi_skbs);
+		__skb_queue_head_init(&cell->napi_skbs);
 		netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
 		napi_enable(&cell->napi);
 	}
@@ -92,8 +83,9 @@
 		return;
 	for_each_possible_cpu(i) {
 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
 		netif_napi_del(&cell->napi);
-		skb_queue_purge(&cell->napi_skbs);
+		__skb_queue_purge(&cell->napi_skbs);
 	}
 	free_percpu(gcells->cells);
 	gcells->cells = NULL;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b73c88a..b07d126 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
 
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 360c480..879d6e5 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -100,10 +100,8 @@
 void inet_twsk_free(struct inet_timewait_sock *tw);
 void inet_twsk_put(struct inet_timewait_sock *tw);
 
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-			  struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+			   struct inet_hashinfo *hashinfo);
 
 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 					   struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@
 			   struct inet_hashinfo *hashinfo);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 		     struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d5332dd..4a6009d 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,16 +15,20 @@
 #include <net/ipv6.h>
 #include <linux/atomic.h>
 
-struct inetpeer_addr_base {
-	union {
-		__be32			a4;
-		__be32			a6[4];
-		struct in6_addr		in6;
-	};
+/* IPv4 address key for cache lookups */
+struct ipv4_addr_key {
+	__be32	addr;
+	int	vif;
 };
 
+#define INETPEER_MAXKEYSZ   (sizeof(struct in6_addr) / sizeof(u32))
+
 struct inetpeer_addr {
-	struct inetpeer_addr_base	addr;
+	union {
+		struct ipv4_addr_key	a4;
+		struct in6_addr		a6;
+		u32			key[INETPEER_MAXKEYSZ];
+	};
 	__u16				family;
 };
 
@@ -65,69 +69,33 @@
 	int			total;
 };
 
-#define INETPEER_BASE_BIT	0x1UL
-
-static inline struct inet_peer *inetpeer_ptr(unsigned long val)
-{
-	BUG_ON(val & INETPEER_BASE_BIT);
-	return (struct inet_peer *) val;
-}
-
-static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
-{
-	if (!(val & INETPEER_BASE_BIT))
-		return NULL;
-	val &= ~INETPEER_BASE_BIT;
-	return (struct inet_peer_base *) val;
-}
-
-static inline bool inetpeer_ptr_is_peer(unsigned long val)
-{
-	return !(val & INETPEER_BASE_BIT);
-}
-
-static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
-{
-	/* This implicitly clears INETPEER_BASE_BIT */
-	*val = (unsigned long) peer;
-}
-
-static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
-{
-	unsigned long val = (unsigned long) peer;
-	unsigned long orig = *ptr;
-
-	if (!(orig & INETPEER_BASE_BIT) ||
-	    cmpxchg(ptr, orig, val) != orig)
-		return false;
-	return true;
-}
-
-static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
-{
-	*ptr = (unsigned long) base | INETPEER_BASE_BIT;
-}
-
-static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
-{
-	unsigned long val = *from;
-
-	*to = val;
-	if (inetpeer_ptr_is_peer(val)) {
-		struct inet_peer *peer = inetpeer_ptr(val);
-		atomic_inc(&peer->refcnt);
-	}
-}
-
 void inet_peer_base_init(struct inet_peer_base *);
 
 void inet_initpeers(void) __init;
 
 #define INETPEER_METRICS_NEW	(~(u32) 0)
 
-static inline bool inet_metrics_new(const struct inet_peer *p)
+static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
 {
-	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+	iaddr->a4.addr = ip;
+	iaddr->family = AF_INET;
+}
+
+static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
+{
+	return iaddr->a4.addr;
+}
+
+static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
+					struct in6_addr *in6)
+{
+	iaddr->a6 = *in6;
+	iaddr->family = AF_INET6;
+}
+
+static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
+{
+	return &iaddr->a6;
 }
 
 /* can be called with or without local BH being disabled */
@@ -137,11 +105,12 @@
 
 static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
 						__be32 v4daddr,
-						int create)
+						int vif, int create)
 {
 	struct inetpeer_addr daddr;
 
-	daddr.addr.a4 = v4daddr;
+	daddr.a4.addr = v4daddr;
+	daddr.a4.vif = vif;
 	daddr.family = AF_INET;
 	return inet_getpeer(base, &daddr, create);
 }
@@ -152,23 +121,36 @@
 {
 	struct inetpeer_addr daddr;
 
-	daddr.addr.in6 = *v6daddr;
+	daddr.a6 = *v6daddr;
 	daddr.family = AF_INET6;
 	return inet_getpeer(base, &daddr, create);
 }
 
+static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+				    const struct inetpeer_addr *b)
+{
+	int i, n;
+
+	if (a->family == AF_INET)
+		n = sizeof(a->a4) / sizeof(u32);
+	else
+		n = sizeof(a->a6) / sizeof(u32);
+
+	for (i = 0; i < n; i++) {
+		if (a->key[i] == b->key[i])
+			continue;
+		if (a->key[i] < b->key[i])
+			return -1;
+		return 1;
+	}
+
+	return 0;
+}
+
 /* can be called from BH context or outside */
 void inet_putpeer(struct inet_peer *p);
 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
 void inetpeer_invalidate_tree(struct inet_peer_base *);
 
-/*
- * temporary check to make sure we dont access rid, tcp_ts,
- * tcp_ts_stamp if no refcount is taken on inet_peer
- */
-static inline void inet_peer_refcheck(const struct inet_peer *p)
-{
-	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
-}
 #endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2..9b9ca28 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -202,10 +202,20 @@
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
 unsigned long snmp_fold_field(void __percpu *mib, int offt);
 #if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+			 size_t syncp_offset);
 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
 #else
+static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+					size_t syncp_offset)
+{
+	return snmp_get_cpu_field(mib, cpu, offct);
+
+}
+
 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
 {
 	return snmp_fold_field(mib, offt);
@@ -370,22 +380,6 @@
 	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 }
 
-static inline void inet_set_txhash(struct sock *sk)
-{
-	struct inet_sock *inet = inet_sk(sk);
-	struct flow_keys keys;
-
-	memset(&keys, 0, sizeof(keys));
-
-	keys.addrs.v4addrs.src = inet->inet_saddr;
-	keys.addrs.v4addrs.dst = inet->inet_daddr;
-	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
-	keys.ports.src = inet->inet_sport;
-	keys.ports.dst = inet->inet_dport;
-
-	sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
 {
 	const struct iphdr *iph = skb_gro_network_header(skb);
@@ -474,6 +468,11 @@
 
 #endif
 
+static inline unsigned int ipv4_addr_hash(__be32 ip)
+{
+	return (__force unsigned int) ip;
+}
+
 bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3b76849..063d304 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -51,6 +51,8 @@
 	struct nlattr	*fc_mp;
 
 	struct nl_info	fc_nlinfo;
+	struct nlattr	*fc_encap;
+	u16		fc_encap_type;
 };
 
 struct fib6_node {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5fa643b..a37d043 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -44,7 +44,9 @@
 	u32			fc_flow;
 	u32			fc_nlflags;
 	struct nl_info		fc_nlinfo;
- };
+	struct nlattr		*fc_encap;
+	u16			fc_encap_type;
+};
 
 struct fib_info;
 struct rtable;
@@ -89,6 +91,7 @@
 	struct rtable __rcu * __percpu *nh_pcpu_rth_output;
 	struct rtable __rcu	*nh_rth_input;
 	struct fnhe_hash_bucket	__rcu *nh_exceptions;
+	struct lwtunnel_state	*nh_lwtstate;
 };
 
 /*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index d8214cb..9a6a3ba 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -4,14 +4,15 @@
 #include <linux/if_tunnel.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
+#include <linux/socket.h>
 #include <linux/types.h>
 #include <linux/u64_stats_sync.h>
 #include <net/dsfield.h>
 #include <net/gro_cells.h>
 #include <net/inet_ecn.h>
-#include <net/ip.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -22,6 +23,44 @@
 /* Keep error state on tunnel for 30 sec */
 #define IPTUNNEL_ERR_TIMEO	(30*HZ)
 
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE	offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD	offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN				\
+	(FIELD_SIZEOF(struct ip_tunnel_key, u) -		\
+	 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+
+struct ip_tunnel_key {
+	__be64			tun_id;
+	union {
+		struct {
+			__be32	src;
+			__be32	dst;
+		} ipv4;
+		struct {
+			struct in6_addr src;
+			struct in6_addr dst;
+		} ipv6;
+	} u;
+	__be16			tun_flags;
+	u8			tos;		/* TOS for IPv4, TC for IPv6 */
+	u8			ttl;		/* TTL for IPv4, HL for IPv6 */
+	__be16			tp_src;
+	__be16			tp_dst;
+};
+
+/* Flags for ip_tunnel_info mode. */
+#define IP_TUNNEL_INFO_TX	0x01	/* represents tx tunnel parameters */
+#define IP_TUNNEL_INFO_IPV6	0x02	/* key contains IPv6 addresses */
+
+struct ip_tunnel_info {
+	struct ip_tunnel_key	key;
+	u8			options_len;
+	u8			mode;
+};
+
 /* 6rd prefix/relay information */
 #ifdef CONFIG_IPV6_SIT_6RD
 struct ip_tunnel_6rd_parm {
@@ -33,8 +72,8 @@
 #endif
 
 struct ip_tunnel_encap {
-	__u16			type;
-	__u16			flags;
+	u16			type;
+	u16			flags;
 	__be16			sport;
 	__be16			dport;
 };
@@ -51,6 +90,8 @@
 	__be32				 saddr;
 };
 
+struct metadata_dst;
+
 struct ip_tunnel {
 	struct ip_tunnel __rcu	*next;
 	struct hlist_node hash_node;
@@ -62,8 +103,8 @@
 					 * arrived */
 
 	/* These four fields used only by GRE */
-	__u32		i_seqno;	/* The last seen seqno	*/
-	__u32		o_seqno;	/* The last output seqno */
+	u32		i_seqno;	/* The last seen seqno	*/
+	u32		o_seqno;	/* The last output seqno */
 	int		tun_hlen;	/* Precalculated header length */
 	int		mlink;
 
@@ -84,6 +125,7 @@
 	unsigned int		prl_count;	/* # of entries in PRL */
 	int			ip_tnl_net_id;
 	struct gro_cells	gro_cells;
+	bool			collect_md;
 };
 
 #define TUNNEL_CSUM		__cpu_to_be16(0x01)
@@ -118,6 +160,7 @@
 struct ip_tunnel_net {
 	struct net_device *fb_tunnel_dev;
 	struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+	struct ip_tunnel __rcu *collect_md_tun;
 };
 
 struct ip_tunnel_encap_ops {
@@ -136,6 +179,40 @@
 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
 			    unsigned int num);
 
+static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
+				      __be32 saddr, __be32 daddr,
+				      u8 tos, u8 ttl,
+				      __be16 tp_src, __be16 tp_dst,
+				      __be64 tun_id, __be16 tun_flags)
+{
+	key->tun_id = tun_id;
+	key->u.ipv4.src = saddr;
+	key->u.ipv4.dst = daddr;
+	memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
+	       0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+	key->tos = tos;
+	key->ttl = ttl;
+	key->tun_flags = tun_flags;
+
+	/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+	 * the upper tunnel are used.
+	 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+	 */
+	key->tp_src = tp_src;
+	key->tp_dst = tp_dst;
+
+	/* Clear struct padding. */
+	if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
+		memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
+		       0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
+}
+
+static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
+					       *tun_info)
+{
+	return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
+}
+
 #ifdef CONFIG_INET
 
 int ip_tunnel_init(struct net_device *dev);
@@ -163,7 +240,8 @@
 				   __be32 key);
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-		  const struct tnl_ptk_info *tpi, bool log_ecn_error);
+		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+		  bool log_ecn_error);
 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
 			 struct ip_tunnel_parm *p);
 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -196,8 +274,8 @@
 
 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
-		  __be32 src, __be32 dst, __u8 proto,
-		  __u8 tos, __u8 ttl, __be16 df, bool xnet);
+		  __be32 src, __be32 dst, u8 proto,
+		  u8 tos, u8 ttl, __be16 df, bool xnet);
 
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
 					 int gso_type_mask);
@@ -221,6 +299,57 @@
 	}
 }
 
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
+{
+	return info + 1;
+}
+
+static inline void ip_tunnel_info_opts_get(void *to,
+					   const struct ip_tunnel_info *info)
+{
+	memcpy(to, info + 1, info->options_len);
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+					   const void *from, int len)
+{
+	memcpy(ip_tunnel_info_opts(info), from, len);
+	info->options_len = len;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+	return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+	return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+	return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
 #endif /* CONFIG_INET */
 
 #endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4e3731e..9b9ca87 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -846,6 +846,17 @@
 /* How much time to keep dests in trash */
 #define IP_VS_DEST_TRASH_PERIOD		(120 * HZ)
 
+struct ipvs_sync_daemon_cfg {
+	union nf_inet_addr	mcast_group;
+	int			syncid;
+	u16			sync_maxlen;
+	u16			mcast_port;
+	u8			mcast_af;
+	u8			mcast_ttl;
+	/* multicast interface name */
+	char			mcast_ifn[IP_VS_IFNAME_MAXLEN];
+};
+
 /* IPVS in network namespace */
 struct netns_ipvs {
 	int			gen;		/* Generation */
@@ -961,15 +972,10 @@
 	spinlock_t		sync_buff_lock;
 	struct task_struct	**backup_threads;
 	int			threads_mask;
-	int			send_mesg_maxlen;
-	int			recv_mesg_maxlen;
 	volatile int		sync_state;
-	volatile int		master_syncid;
-	volatile int		backup_syncid;
 	struct mutex		sync_mutex;
-	/* multicast interface name */
-	char			master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-	char			backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+	struct ipvs_sync_daemon_cfg	mcfg;	/* Master Configuration */
+	struct ipvs_sync_daemon_cfg	bcfg;	/* Backup Configuration */
 	/* net name space ptr */
 	struct net		*net;            /* Needed by timer routines */
 	/* Number of heterogeneous destinations, needed becaus heterogeneous
@@ -1408,7 +1414,8 @@
 /* IPVS sync daemon data and function prototypes
  * (from ip_vs_sync.c)
  */
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
+		      int state);
 int stop_sync_thread(struct net *net, int state);
 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb0..711cca4 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,54 +707,69 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
-	struct inet_sock *inet = inet_sk(sk);
-	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct flow_keys keys;
 
-	memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF		0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT	1
+#define IP6_AUTO_FLOW_LABEL_OPTIN	2
+#define IP6_AUTO_FLOW_LABEL_FORCED	3
 
-	memcpy(&keys.addrs.v6addrs.src, &np->saddr,
-	       sizeof(keys.addrs.v6addrs.src));
-	memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
-	       sizeof(keys.addrs.v6addrs.dst));
-	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
-	keys.ports.src = inet->inet_sport;
-	keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX		IP6_AUTO_FLOW_LABEL_FORCED
 
-	sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS	IP6_AUTO_FLOW_LABEL_OPTOUT
 
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-					__be32 flowlabel, bool autolabel)
+					__be32 flowlabel, bool autolabel,
+					struct flowi6 *fl6)
 {
-	if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-		u32 hash;
+	u32 hash;
 
-		hash = skb_get_hash(skb);
+	if (flowlabel ||
+	    net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+	    (!autolabel &&
+	     net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+		return flowlabel;
 
-		/* Since this is being sent on the wire obfuscate hash a bit
-		 * to minimize possbility that any useful information to an
-		 * attacker is leaked. Only lower 20 bits are relevant.
-		 */
-		hash ^= hash >> 12;
+	hash = skb_get_hash_flowi6(skb, fl6);
 
-		flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+	/* Since this is being sent on the wire obfuscate hash a bit
+	 * to minimize possbility that any useful information to an
+	 * attacker is leaked. Only lower 20 bits are relevant.
+	 */
+	rol32(hash, 16);
 
-		if (net->ipv6.sysctl.flowlabel_state_ranges)
-			flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
-	}
+	flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+	if (net->ipv6.sysctl.flowlabel_state_ranges)
+		flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
 
 	return flowlabel;
 }
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+	switch (net->ipv6.sysctl.auto_flowlabels) {
+	case IP6_AUTO_FLOW_LABEL_OFF:
+	case IP6_AUTO_FLOW_LABEL_OPTIN:
+	default:
+		return 0;
+	case IP6_AUTO_FLOW_LABEL_OPTOUT:
+	case IP6_AUTO_FLOW_LABEL_FORCED:
+		return 1;
+	}
+}
 #else
 static inline void ip6_set_txhash(struct sock *sk) { }
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-					__be32 flowlabel, bool autolabel)
+					__be32 flowlabel, bool autolabel,
+					struct flowi6 *fl6)
 {
 	return flowlabel;
 }
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+	return 0;
+}
 #endif
 
 
@@ -832,7 +847,8 @@
 			      &inet6_sk(sk)->cork);
 }
 
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+		   struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 				      const struct in6_addr *final_dst);
 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 0000000..fce0e35
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,175 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS   7
+#define LWTUNNEL_HASH_SIZE   (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT	BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT	BIT(1)
+
+struct lwtunnel_state {
+	__u16		type;
+	__u16		flags;
+	atomic_t	refcnt;
+	int		(*orig_output)(struct sock *sk, struct sk_buff *skb);
+	int		(*orig_input)(struct sk_buff *);
+	int             len;
+	__u8            data[0];
+};
+
+struct lwtunnel_encap_ops {
+	int (*build_state)(struct net_device *dev, struct nlattr *encap,
+			   unsigned int family, const void *cfg,
+			   struct lwtunnel_state **ts);
+	int (*output)(struct sock *sk, struct sk_buff *skb);
+	int (*input)(struct sk_buff *skb);
+	int (*fill_encap)(struct sk_buff *skb,
+			  struct lwtunnel_state *lwtstate);
+	int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+	int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+	kfree(lws);
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+	if (lws)
+		atomic_inc(&lws->refcnt);
+
+	return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+	if (!lws)
+		return;
+
+	if (atomic_dec_and_test(&lws->refcnt))
+		lwtstate_free(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+	if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+		return true;
+
+	return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+	if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+		return true;
+
+	return false;
+}
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+			   unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+			   unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+			 struct nlattr *encap,
+			 unsigned int family, const void *cfg,
+			 struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+			struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
+
+#else
+
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+	return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+	return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+	return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+					 unsigned int num)
+{
+	return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+					 unsigned int num)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+				       struct nlattr *encap,
+				       unsigned int family, const void *cfg,
+				       struct lwtunnel_state **lws)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+				      struct lwtunnel_state *lwtstate)
+{
+	return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+	return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+	return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+				     struct lwtunnel_state *b)
+{
+	return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_input(struct sk_buff *skb)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 6b1077c..e3314e5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -973,6 +973,10 @@
  * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
  *	If this flag is set, the stack cannot do any replay detection
  *	hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ *	flag indicates that the PN was verified for replay protection.
+ *	Note that this flag is also currently only supported when a frame
+ *	is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
  * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
  *	the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1001,6 @@
  * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
  *	number (@ampdu_reference) must be populated and be a distinct number for
  *	each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- *	monitoring purposes only
  * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
  *	subframes of a single A-MPDU
  * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1040,8 @@
 	RX_FLAG_NO_SIGNAL_VAL		= BIT(12),
 	RX_FLAG_HT_GF			= BIT(13),
 	RX_FLAG_AMPDU_DETAILS		= BIT(14),
-	RX_FLAG_AMPDU_REPORT_ZEROLEN	= BIT(15),
-	RX_FLAG_AMPDU_IS_ZEROLEN	= BIT(16),
+	RX_FLAG_PN_VALIDATED		= BIT(15),
+	/* bit 16 free */
 	RX_FLAG_AMPDU_LAST_KNOWN	= BIT(17),
 	RX_FLAG_AMPDU_IS_LAST		= BIT(18),
 	RX_FLAG_AMPDU_DELIM_CRC_ERROR	= BIT(19),
@@ -1491,8 +1492,10 @@
  * 	- Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
  */
 struct ieee80211_key_conf {
+	void *drv_priv;
 	atomic64_t tx_pn;
 	u32 cipher;
 	u8 icv_len;
@@ -1675,7 +1678,6 @@
  * @tdls: indicates whether the STA is a TDLS peer
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *	valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1693,7 +1695,6 @@
 	struct ieee80211_sta_rates __rcu *rates;
 	bool tdls;
 	bool tdls_initiator;
-	bool mfp;
 
 	struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1888,6 +1889,9 @@
  * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
  *	in one command, mac80211 doesn't have to run separate scans per band.
  *
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ *	than then BSS bandwidth for a TDLS link on the base channel.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1920,6 +1924,7 @@
 	IEEE80211_HW_CHANCTX_STA_CSA,
 	IEEE80211_HW_SUPPORTS_CLONED_SKBS,
 	IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+	IEEE80211_HW_TDLS_WIDER_BW,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3701,28 @@
 void ieee80211_restart_hw(struct ieee80211_hw *hw);
 
 /**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- *	driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
  *
- * See also netif_napi_add().
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
  */
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-			struct net_device *napi_dev,
-			int (*poll)(struct napi_struct *, int),
-			int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+		       struct napi_struct *napi);
 
 /**
  * ieee80211_rx - receive frame
@@ -3731,7 +3744,10 @@
  * @hw: the hardware this frame came in on
  * @skb: the buffer to receive, owned by mac80211 after this call
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	ieee80211_rx_napi(hw, skb, NULL);
+}
 
 /**
  * ieee80211_rx_irqsafe - receive frame
@@ -4315,19 +4331,6 @@
 			    struct sk_buff *skb, u8 *p2k);
 
 /**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-					u8 *k1, u8 *k2);
-
-/**
  * ieee80211_get_key_tx_seq - get key TX sequence counter
  *
  * @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index f534a46..b7f9961 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -321,23 +321,6 @@
 void ieee802154_unregister_hw(struct ieee802154_hw *hw);
 
 /**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
-/**
  * ieee802154_rx_irqsafe - receive frame
  *
  * Like ieee802154_rx() but can be called in IRQ context
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 0000000..4757997
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+	u32	label[MAX_NEW_LABELS];
+	u32	labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+	return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b3a7751..aba5695 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -182,7 +182,8 @@
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
 		   const struct in6_addr *solicit,
-		   const struct in6_addr *daddr, const struct in6_addr *saddr);
+		   const struct in6_addr *daddr, const struct in6_addr *saddr,
+		   struct sk_buff *oskb);
 
 void ndisc_send_rs(struct net_device *dev,
 		   const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index bd33e66..8b68384 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -125,6 +125,7 @@
 	unsigned long forced_gc_runs;	/* number of forced GC runs */
 
 	unsigned long unres_discards;	/* number of unresolved drops */
+	unsigned long table_fulls;      /* times even gc couldn't help */
 };
 
 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e951453..2dcea63 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -118,6 +118,9 @@
 #endif
 	struct sock		*nfnl;
 	struct sock		*nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+	struct list_head        nfnl_acct_list;
+#endif
 #endif
 #ifdef CONFIG_WEXT_CORE
 	struct sk_buff_head	wext_nlevents;
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644
index 0000000..42008f1
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644
index 0000000..ed6bd66
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 37cd391..f5e23c6d 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -250,8 +250,12 @@
 void nf_ct_iterate_cleanup(struct net *net,
 			   int (*iter)(struct nf_conn *i, void *data),
 			   void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
 void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
 				   const struct nf_conntrack_tuple *orig,
 				   const struct nf_conntrack_tuple *repl,
 				   gfp_t gfp);
@@ -291,7 +295,9 @@
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags);
 
 #define NF_CT_STAT_INC(net, count)	  __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index f2f0fa3..c03f9c4 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -52,7 +52,8 @@
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+		      const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple);
 
 int __nf_conntrack_confirm(struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 3f3aecb..dce56f0 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -4,7 +4,9 @@
 
 #ifndef _NF_CONNTRACK_EXPECT_H
 #define _NF_CONNTRACK_EXPECT_H
+
 #include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@
 void nf_conntrack_expect_fini(void);
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+		    const struct nf_conntrack_zone *zone,
 		    const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+		      const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+		       const struct nf_conntrack_zone *zone,
 		       const struct nf_conntrack_tuple *tuple);
 
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index dec6336..7e2b1d0 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -54,7 +54,11 @@
 #ifdef CONFIG_NF_CONNTRACK_LABELS
 int nf_conntrack_labels_init(void);
 void nf_conntrack_labels_fini(void);
+int nf_connlabels_get(struct net *net, unsigned int n_bits);
+void nf_connlabels_put(struct net *net);
 #else
 static inline int nf_conntrack_labels_init(void) { return 0; }
 static inline void nf_conntrack_labels_fini(void) {}
+static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline void nf_connlabels_put(struct net *net) {}
 #endif
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 034efe8..4e32512 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -1,25 +1,89 @@
 #ifndef _NF_CONNTRACK_ZONES_H
 #define _NF_CONNTRACK_ZONES_H
 
-#define NF_CT_DEFAULT_ZONE	0
+#include <linux/netfilter/nf_conntrack_zones_common.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack_extend.h>
 
-struct nf_conntrack_zone {
-	u16	id;
-};
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
+{
+	const struct nf_conntrack_zone *nf_ct_zone = NULL;
 
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+	nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
+#endif
+	return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+	zone->id = id;
+	zone->flags = flags;
+	zone->dir = dir;
+
+	return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+		struct nf_conntrack_zone *tmp)
+{
+	const struct nf_conntrack_zone *zone;
+
+	if (!tmpl)
+		return &nf_ct_zone_dflt;
+
+	zone = nf_ct_zone(tmpl);
+	if (zone->flags & NF_CT_FLAG_MARK)
+		zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+	return zone;
+}
+
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+				 const struct nf_conntrack_zone *info)
 {
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	struct nf_conntrack_zone *nf_ct_zone;
-	nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
-	if (nf_ct_zone)
-		return nf_ct_zone->id;
+
+	nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+	if (!nf_ct_zone)
+		return -ENOMEM;
+
+	nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+			info->flags);
 #endif
-	return NF_CT_DEFAULT_ZONE;
+	return 0;
 }
 
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+					  enum ip_conntrack_dir dir)
+{
+	return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+				enum ip_conntrack_dir dir)
+{
+	return nf_ct_zone_matches_dir(zone, dir) ?
+	       zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+				    const struct nf_conntrack_zone *b,
+				    enum ip_conntrack_dir dir)
+{
+	return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+	       nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+					const struct nf_conntrack_zone *b)
+{
+	return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
 #endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644
index 0000000..6b84cf6
--- /dev/null
+++ b/include/net/netfilter/nft_dup.h
@@ -0,0 +1,9 @@
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+	enum nft_registers	sreg_addr:8;
+	enum nft_registers	sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 8d93544..c0368db 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -31,6 +31,7 @@
 	int auto_flowlabels;
 	int icmpv6_time;
 	int anycast_src_echo_reply;
+	int ip_nonlocal_bind;
 	int fwmark_reflect;
 	int idgen_retries;
 	int idgen_delay;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 532e4ba..38aa498 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -14,5 +14,6 @@
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header *nf_log_dir_header;
 #endif
+	struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 };
 #endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 01fc8c5..d0d0f1e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -79,6 +79,7 @@
 	int   (*close)(struct nci_dev *ndev);
 	int   (*send)(struct nci_dev *ndev, struct sk_buff *skb);
 	int   (*setup)(struct nci_dev *ndev);
+	int   (*post_setup)(struct nci_dev *ndev);
 	int   (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
 	__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
 	int   (*discover_se)(struct nci_dev *ndev);
@@ -277,6 +278,8 @@
 			    unsigned long opt),
 		unsigned long opt, __u32 timeout);
 int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+int nci_core_reset(struct nci_dev *ndev);
+int nci_core_init(struct nci_dev *ndev);
 
 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f9e58ae..30afc9a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -203,6 +203,7 @@
 	int n_vendor_cmds;
 
 	struct nfc_ops *ops;
+	struct genl_info *cur_cmd_info;
 };
 #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
 
@@ -318,4 +319,44 @@
 	return 0;
 }
 
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+						 enum nfc_attrs attr,
+						 u32 oui, u32 subcmd,
+						 int approxlen);
+int nfc_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @dev: nfc device
+ * @oui: vendor oui
+ * @approxlen: an upper bound of the length of the data that will
+ *      be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call nfc_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
+				u32 oui, u32 subcmd, int approxlen)
+{
+	return __nfc_alloc_vendor_cmd_reply_skb(dev,
+						NFC_ATTR_VENDOR_DATA,
+						oui,
+						subcmd, approxlen);
+}
+
 #endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b0ab530..cf2713d 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -52,6 +52,8 @@
 
 	NL802154_CMD_SET_LBT_MODE,
 
+	NL802154_CMD_SET_ACKREQ_DEFAULT,
+
 	/* add new commands above here */
 
 	/* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@
 
 	NL802154_ATTR_SUPPORTED_COMMANDS,
 
+	NL802154_ATTR_ACKREQ_DEFAULT,
+
 	/* add attributes here, update the policy in nl802154.c */
 
 	__NL802154_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2342bf1..401038d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -110,10 +110,8 @@
 		__qdisc_run(q);
 }
 
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
-		       struct tcf_result *res);
 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
-		struct tcf_result *res);
+		struct tcf_result *res, bool compat_mode);
 
 static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
 {
diff --git a/include/net/route.h b/include/net/route.h
index fe22d03..cc61cb9 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -188,8 +188,12 @@
 void ip_rt_send_redirect(struct sk_buff *skb);
 
 unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 				__be32 addr);
+unsigned int inet_addr_type_dev_table(struct net *net,
+				      const struct net_device *dev,
+				      __be32 addr);
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
@@ -250,6 +254,9 @@
 	if (inet_sk(sk)->transparent)
 		flow_flags |= FLOWI_FLAG_ANYSRC;
 
+	if (netif_index_is_vrf(sock_net(sk), oif))
+		flow_flags |= FLOWI_FLAG_VRFSRC;
+
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
 			   protocol, flow_flags, dst, src, dport, sport);
 }
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 343d922..18fdb98 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -141,6 +141,7 @@
 				    unsigned char name_assign_type,
 				    const struct rtnl_link_ops *ops,
 				    struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2738f6f..444faa8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -340,6 +340,7 @@
 extern struct Qdisc_ops noop_qdisc_ops;
 extern struct Qdisc_ops pfifo_fast_ops;
 extern struct Qdisc_ops mq_qdisc_ops;
+extern struct Qdisc_ops noqueue_qdisc_ops;
 extern const struct Qdisc_ops *default_qdisc_ops;
 
 struct Qdisc_class_common {
@@ -513,17 +514,20 @@
 	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 }
 
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
-					   const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+				     const struct sk_buff *skb)
 {
-	struct gnet_stats_basic_cpu *bstats =
-				this_cpu_ptr(sch->cpu_bstats);
-
 	u64_stats_update_begin(&bstats->syncp);
 	bstats_update(&bstats->bstats, skb);
 	u64_stats_update_end(&bstats->syncp);
 }
 
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+					   const struct sk_buff *skb)
+{
+	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
 static inline void qdisc_bstats_update(struct Qdisc *sch,
 				       const struct sk_buff *skb)
 {
@@ -547,16 +551,24 @@
 	sch->qstats.drops += count;
 }
 
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
 {
-	sch->qstats.drops++;
+	qstats->drops++;
 }
 
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
 {
-	struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+	qstats->overlimits++;
+}
 
-	qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+	qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+	qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
 }
 
 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index f21f070..43c6abc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -429,7 +429,9 @@
 	void			*sk_security;
 #endif
 	__u32			sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
 	u32			sk_classid;
+#endif
 	struct cg_proto		*sk_cgrp;
 	void			(*sk_state_change)(struct sock *sk);
 	void			(*sk_data_ready)(struct sock *sk);
@@ -1685,6 +1687,20 @@
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
+static inline void sk_set_txhash(struct sock *sk)
+{
+	sk->sk_txhash = prandom_u32();
+
+	if (unlikely(!sk->sk_txhash))
+		sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+	if (sk->sk_txhash)
+		sk_set_txhash(sk);
+}
+
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
 {
@@ -1709,6 +1725,8 @@
 {
 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
+	sk_rethink_txhash(sk);
+
 	if (dst && dst->ops->negative_advice) {
 		ndst = dst->ops->negative_advice(dst);
 
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d5671f1..319baab 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -72,6 +72,7 @@
 		struct switchdev_obj_fdb {		/* PORT_FDB */
 			const unsigned char *addr;
 			u16 vid;
+			u16 ndm_state;
 		} fdb;
 	} u;
 };
@@ -157,6 +158,9 @@
 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
 			    struct net_device *dev,
 			    struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+				 struct net_device *group_dev,
+				 bool joining);
 
 #else
 
@@ -271,6 +275,12 @@
 	return -EOPNOTSUPP;
 }
 
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+					       struct net_device *group_dev,
+					       bool joining)
+{
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index a152e98..958d69c 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -15,7 +15,7 @@
 
 struct tcf_bpf {
 	struct tcf_common	common;
-	struct bpf_prog		*filter;
+	struct bpf_prog __rcu	*filter;
 	union {
 		u32		bpf_fd;
 		u16		bpf_num_ops;
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9fc9b57..592a6bc 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -6,9 +6,10 @@
 struct tcf_gact {
 	struct tcf_common	common;
 #ifdef CONFIG_GACT_PROB
-        u16			tcfg_ptype;
-        u16			tcfg_pval;
-        int			tcfg_paction;
+	u16			tcfg_ptype;
+	u16			tcfg_pval;
+	int			tcfg_paction;
+	atomic_t		packets;
 #endif
 };
 #define to_gact(a) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 4dd77a1..dae96ba 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,7 +8,7 @@
 	int			tcfm_eaction;
 	int			tcfm_ifindex;
 	int			tcfm_ok_push;
-	struct net_device	*tcfm_dev;
+	struct net_device __rcu	*tcfm_dev;
 	struct list_head	tcfm_list;
 };
 #define to_mirred(a) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 950cfec..0cab28c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -281,6 +281,8 @@
 extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_autocorking;
 extern int sysctl_tcp_invalid_ratelimit;
+extern int sysctl_tcp_pacing_ss_ratio;
+extern int sysctl_tcp_pacing_ca_ratio;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -886,7 +888,7 @@
 extern struct tcp_congestion_ops tcp_reno;
 
 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(const char *name);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
 #ifdef CONFIG_INET
 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
 #else
@@ -989,6 +991,11 @@
 
 #define TCP_INFINITE_SSTHRESH	0x7fffffff
 
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+	return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 {
 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1072,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 
 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		return tp->snd_cwnd < 2 * tp->max_packets_out;
 
 	return tp->is_cwnd_limited;
@@ -1160,6 +1167,19 @@
 }
 
 u32 tcp_default_init_rwnd(u32 mss);
+void tcp_cwnd_restart(struct sock *sk, s32 delta);
+
+static inline void tcp_slow_start_after_idle_check(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	s32 delta;
+
+	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+		return;
+	delta = tcp_time_stamp - tp->lsndtime;
+	if (delta > inet_csk(sk)->icsk_rto)
+		tcp_cwnd_restart(sk, delta);
+}
 
 /* Determine a window scaling and initial window to offer. */
 void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 68f0eca..1a47946 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -33,9 +33,6 @@
 
 static inline void twsk_destructor(struct sock *sk)
 {
-	BUG_ON(sk == NULL);
-	BUG_ON(sk->sk_prot == NULL);
-	BUG_ON(sk->sk_prot->twsk_prot == NULL);
 	if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
 		sk->sk_prot->twsk_prot->twsk_destructor(sk);
 }
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index c491c12..cb2f89f 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -31,7 +31,8 @@
 	__be16			peer_udp_port;
 	unsigned int		use_udp_checksums:1,
 				use_udp6_tx_checksums:1,
-				use_udp6_rx_checksums:1;
+				use_udp6_rx_checksums:1,
+				ipv6_v6only:1;
 };
 
 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
@@ -93,6 +94,10 @@
 
 void udp_tunnel_sock_release(struct socket *sock);
 
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
+				    __be16 flags, __be64 tunnel_id,
+				    int md_size);
+
 static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
 							 bool udp_csum)
 {
diff --git a/include/net/vrf.h b/include/net/vrf.h
new file mode 100644
index 0000000..593e609
--- /dev/null
+++ b/include/net/vrf.h
@@ -0,0 +1,178 @@
+/*
+ * include/net/net_vrf.h - adds vrf dev structure definitions
+ * Copyright (c) 2015 Cumulus Networks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_NET_VRF_H
+#define __LINUX_NET_VRF_H
+
+struct net_vrf_dev {
+	struct rcu_head		rcu;
+	int                     ifindex; /* ifindex of master dev */
+	u32                     tb_id;   /* table id for VRF */
+};
+
+struct slave {
+	struct list_head	list;
+	struct net_device	*dev;
+};
+
+struct slave_queue {
+	struct list_head	all_slaves;
+};
+
+struct net_vrf {
+	struct slave_queue	queue;
+	struct rtable           *rth;
+	u32			tb_id;
+};
+
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+/* called with rcu_read_lock() */
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+	struct net_vrf_dev *vrf_ptr;
+	int ifindex = 0;
+
+	if (!dev)
+		return 0;
+
+	if (netif_is_vrf(dev)) {
+		ifindex = dev->ifindex;
+	} else {
+		vrf_ptr = rcu_dereference(dev->vrf_ptr);
+		if (vrf_ptr)
+			ifindex = vrf_ptr->ifindex;
+	}
+
+	return ifindex;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+	int ifindex;
+
+	rcu_read_lock();
+	ifindex = vrf_master_ifindex_rcu(dev);
+	rcu_read_unlock();
+
+	return ifindex;
+}
+
+/* called with rcu_read_lock */
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+	u32 tb_id = 0;
+
+	if (dev) {
+		struct net_vrf_dev *vrf_ptr;
+
+		vrf_ptr = rcu_dereference(dev->vrf_ptr);
+		if (vrf_ptr)
+			tb_id = vrf_ptr->tb_id;
+	}
+	return tb_id;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+	u32 tb_id;
+
+	rcu_read_lock();
+	tb_id = vrf_dev_table_rcu(dev);
+	rcu_read_unlock();
+
+	return tb_id;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	u32 tb_id = 0;
+
+	if (!ifindex)
+		return 0;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		tb_id = vrf_dev_table_rcu(dev);
+
+	rcu_read_unlock();
+
+	return tb_id;
+}
+
+/* called with rtnl */
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+	u32 tb_id = 0;
+
+	if (dev) {
+		struct net_vrf_dev *vrf_ptr;
+
+		vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+		if (vrf_ptr)
+			tb_id = vrf_ptr->tb_id;
+	}
+	return tb_id;
+}
+
+/* caller has already checked netif_is_vrf(dev) */
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+	struct rtable *rth = ERR_PTR(-ENETUNREACH);
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	if (vrf) {
+		rth = vrf->rth;
+		atomic_inc(&rth->dst.__refcnt);
+	}
+	return rth;
+}
+
+#else
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+	return ERR_PTR(-ENETUNREACH);
+}
+#endif
+
+#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0082b5d..480a319 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,6 +7,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/udp.h>
+#include <net/dst_metadata.h>
 
 #define VNI_HASH_BITS	10
 #define VNI_HASH_SIZE	(1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@
 #define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
+#define VNI_HASH_BITS	10
+#define VNI_HASH_SIZE	(1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS	8
+#define FDB_HASH_SIZE	(1<<FDB_HASH_BITS)
+
 struct vxlan_metadata {
-	__be32		vni;
 	u32		gbp;
 };
 
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
-			   struct vxlan_metadata *md);
-
 /* per UDP socket information */
 struct vxlan_sock {
 	struct hlist_node hlist;
-	vxlan_rcv_t	 *rcv;
-	void		 *data;
 	struct work_struct del_work;
 	struct socket	 *sock;
 	struct rcu_head	  rcu;
@@ -117,6 +116,58 @@
 	u32		  flags;
 };
 
+union vxlan_addr {
+	struct sockaddr_in sin;
+	struct sockaddr_in6 sin6;
+	struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+	union vxlan_addr	 remote_ip;
+	__be16			 remote_port;
+	u32			 remote_vni;
+	u32			 remote_ifindex;
+	struct list_head	 list;
+	struct rcu_head		 rcu;
+};
+
+struct vxlan_config {
+	union vxlan_addr	remote_ip;
+	union vxlan_addr	saddr;
+	u32			vni;
+	int			remote_ifindex;
+	int			mtu;
+	__be16			dst_port;
+	__u16			port_min;
+	__u16			port_max;
+	__u8			tos;
+	__u8			ttl;
+	u32			flags;
+	unsigned long		age_interval;
+	unsigned int		addrmax;
+	bool			no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+	struct hlist_node hlist;	/* vni hash table */
+	struct list_head  next;		/* vxlan's per namespace list */
+	struct vxlan_sock *vn_sock;	/* listening socket */
+	struct net_device *dev;
+	struct net	  *net;		/* netns for packet i/o */
+	struct vxlan_rdst default_dst;	/* default destination */
+	u32		  flags;	/* VXLAN_F_* in vxlan.h */
+
+	struct timer_list age_timer;
+	spinlock_t	  hash_lock;
+	unsigned int	  addrcnt;
+	struct gro_cells  gro_cells;
+
+	struct vxlan_config	cfg;
+
+	struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
 #define VXLAN_F_LEARN			0x01
 #define VXLAN_F_PROXY			0x02
 #define VXLAN_F_RSC			0x04
@@ -130,6 +181,7 @@
 #define VXLAN_F_REMCSUM_RX		0x400
 #define VXLAN_F_GBP			0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL	0x1000
+#define VXLAN_F_COLLECT_METADATA	0x2000
 
 /* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
@@ -137,18 +189,16 @@
 #define VXLAN_F_RCV_FLAGS		(VXLAN_F_GBP |			\
 					 VXLAN_F_UDP_ZERO_CSUM6_RX |	\
 					 VXLAN_F_REMCSUM_RX |		\
-					 VXLAN_F_REMCSUM_NOPARTIAL)
+					 VXLAN_F_REMCSUM_NOPARTIAL |	\
+					 VXLAN_F_COLLECT_METADATA)
 
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-				  vxlan_rcv_t *rcv, void *data,
-				  bool no_share, u32 flags);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+				    u8 name_assign_type, struct vxlan_config *conf);
 
-void vxlan_sock_release(struct vxlan_sock *vs);
-
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-		   __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
-		   bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+	return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
 
 static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
 						     netdev_features_t features)
@@ -191,4 +241,10 @@
 {
 }
 #endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+	return vs->sock->sk->sk_family;
+}
+
 #endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f0ee97e..312e3fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -285,10 +285,13 @@
 	unsigned short		family;
 	struct dst_ops		*dst_ops;
 	void			(*garbage_collect)(struct net *net);
-	struct dst_entry	*(*dst_lookup)(struct net *net, int tos,
+	struct dst_entry	*(*dst_lookup)(struct net *net,
+					       int tos, int oif,
 					       const xfrm_address_t *saddr,
 					       const xfrm_address_t *daddr);
-	int			(*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+	int			(*get_saddr)(struct net *net, int oif,
+					     xfrm_address_t *saddr,
+					     xfrm_address_t *daddr);
 	void			(*decode_session)(struct sk_buff *skb,
 						  struct flowi *fl,
 						  int reverse);
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b..74bc8547 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@
 void snd_ac97_suspend(struct snd_ac97 *ac97);
 void snd_ac97_resume(struct snd_ac97 *ac97);
 #endif
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+	unsigned int id_mask);
 
 /* quirk types */
 enum {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index adb5ba5..930b41e 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -4,14 +4,17 @@
 #ifndef __SOUND_HDA_I915_H
 #define __SOUND_HDA_I915_H
 
+#include <drm/i915_component.h>
+
 #ifdef CONFIG_SND_HDA_I915
 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
 int snd_hdac_get_display_clk(struct hdac_bus *bus);
 int snd_hdac_i915_init(struct hdac_bus *bus);
 int snd_hdac_i915_exit(struct hdac_bus *bus);
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
 #else
-static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
 {
 	return 0;
 }
@@ -31,6 +34,10 @@
 {
 	return 0;
 }
+static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
+{
+	return -ENODEV;
+}
 #endif
 
 #endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ae995e5..2ae8812 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -160,6 +160,10 @@
 #define AZX_SPB_BASE			0x08
 /* Interval used to calculate the iterating register offset */
 #define AZX_SPB_INTERVAL		0x08
+/* SPIB base */
+#define AZX_SPB_SPIB			0x00
+/* SPIB MAXFIFO base*/
+#define AZX_SPB_MAXFIFO			0x04
 
 /* registers of Global Time Synchronization Capability Structure */
 #define AZX_GTS_CAP_ID			0x1
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 4caf1fd..49bc836 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -119,6 +119,7 @@
 void snd_hdac_device_unregister(struct hdac_device *codec);
 
 int snd_hdac_refresh_widgets(struct hdac_device *codec);
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
 
 unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
 			       unsigned int verb, unsigned int parm);
@@ -164,15 +165,15 @@
 }
 
 #ifdef CONFIG_PM
-void snd_hdac_power_up(struct hdac_device *codec);
-void snd_hdac_power_down(struct hdac_device *codec);
-void snd_hdac_power_up_pm(struct hdac_device *codec);
-void snd_hdac_power_down_pm(struct hdac_device *codec);
+int snd_hdac_power_up(struct hdac_device *codec);
+int snd_hdac_power_down(struct hdac_device *codec);
+int snd_hdac_power_up_pm(struct hdac_device *codec);
+int snd_hdac_power_down_pm(struct hdac_device *codec);
 #else
-static inline void snd_hdac_power_up(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down(struct hdac_device *codec) {}
-static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
 #endif
 
 /*
@@ -437,6 +438,8 @@
 struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
 					   struct snd_pcm_substream *substream);
 void snd_hdac_stream_release(struct hdac_stream *azx_dev);
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+					int dir, int stream_tag);
 
 int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
 void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 0f89df1..94210dc 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -34,6 +34,7 @@
 void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
 int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
 
 #define ebus_to_hbus(ebus)	(&(ebus)->bus)
 #define hbus_to_ebus(_bus) \
@@ -62,6 +63,8 @@
  * @hstream: hdac_stream
  * @pphc_addr: processing pipe host stream pointer
  * @pplc_addr: processing pipe link stream pointer
+ * @spib_addr: software position in buffers stream pointer
+ * @fifo_addr: software position Max fifos stream pointer
  * @decoupled: stream host and link is decoupled
  * @link_locked: link is locked
  * @link_prepared: link is prepared
@@ -73,6 +76,9 @@
 	void __iomem *pphc_addr;
 	void __iomem *pplc_addr;
 
+	void __iomem *spib_addr;
+	void __iomem *fifo_addr;
+
 	bool decoupled:1;
 	bool link_locked:1;
 	bool link_prepared;
@@ -99,6 +105,11 @@
 				struct hdac_ext_stream *azx_dev, bool decouple);
 void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
 
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream, u32 value);
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream);
+
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
 void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
 void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
@@ -115,6 +126,7 @@
 
 int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
 int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
 void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
 				 int stream);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
@@ -129,4 +141,63 @@
 	writew(((readw(addr + reg) & ~(mask)) | (val)), \
 		addr + reg)
 
+
+struct hdac_ext_device;
+
+/* ops common to all codec drivers */
+struct hdac_ext_codec_ops {
+	int (*build_controls)(struct hdac_ext_device *dev);
+	int (*init)(struct hdac_ext_device *dev);
+	void (*free)(struct hdac_ext_device *dev);
+};
+
+struct hda_dai_map {
+	char *dai_name;
+	hda_nid_t nid;
+	u32	maxbps;
+};
+
+#define HDA_MAX_NIDS 16
+
+/**
+ * struct hdac_ext_device - HDAC Ext device
+ *
+ * @hdac: hdac core device
+ * @nid_list - the dai map which matches the dai-name with the nid
+ * @map_cur_idx - the idx in use in dai_map
+ * @ops - the hda codec ops common to all codec drivers
+ * @pvt_data - private data, for asoc contains asoc codec object
+ */
+struct hdac_ext_device {
+	struct hdac_device hdac;
+	struct hdac_ext_bus *ebus;
+
+	/* soc-dai to nid map */
+	struct hda_dai_map nid_list[HDA_MAX_NIDS];
+	unsigned int map_cur_idx;
+
+	/* codec ops */
+	struct hdac_ext_codec_ops ops;
+
+	void *private_data;
+};
+
+#define to_ehdac_device(dev) (container_of((dev), \
+				 struct hdac_ext_device, hdac))
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_ext_driver {
+	struct hdac_driver hdac;
+
+	int	(*probe)(struct hdac_ext_device *dev);
+	int	(*remove)(struct hdac_ext_device *dev);
+	void	(*shutdown)(struct hdac_ext_device *dev);
+};
+
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
+
+#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+
 #endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c..bb7b2eb 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@
 /*
  * flags
  */
+struct rsnd_ctu_platform_info {
+	u32 flags;
+};
+
+struct rsnd_mix_platform_info {
+	u32 flags;
+};
+
 struct rsnd_dvc_platform_info {
 	u32 flags;
 };
@@ -68,6 +76,8 @@
 struct rsnd_dai_path_info {
 	struct rsnd_ssi_platform_info *ssi;
 	struct rsnd_src_platform_info *src;
+	struct rsnd_ctu_platform_info *ctu;
+	struct rsnd_mix_platform_info *mix;
 	struct rsnd_dvc_platform_info *dvc;
 };
 
@@ -93,6 +103,10 @@
 	int ssi_info_nr;
 	struct rsnd_src_platform_info *src_info;
 	int src_info_nr;
+	struct rsnd_ctu_platform_info *ctu_info;
+	int ctu_info_nr;
+	struct rsnd_mix_platform_info *mix_info;
+	int mix_info_nr;
 	struct rsnd_dvc_platform_info *dvc_info;
 	int dvc_info_nr;
 	struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/rt298.h b/include/sound/rt298.h
new file mode 100644
index 0000000..7fffeaa
--- /dev/null
+++ b/include/sound/rt298.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt286.h -- Platform data for RT286
+ *
+ * Copyright 2013 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT298_H
+#define __LINUX_SND_RT298_H
+
+struct rt298_platform_data {
+	bool cbj_en; /*combo jack enable*/
+	bool gpio2_en; /*GPIO2 enable*/
+	bool suspend_power_off; /* power is off during suspend */
+};
+
+#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a8..5abba03 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@
 			    const struct snd_soc_dapm_route *route, int num);
 int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
 			     const struct snd_soc_dapm_route *route, int num);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
 
 /* dapm events */
 void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@
 struct snd_soc_dapm_path {
 	const char *name;
 
-	/* source (input) and sink (output) widgets */
-	struct snd_soc_dapm_widget *source;
-	struct snd_soc_dapm_widget *sink;
+	/*
+	 * source (input) and sink (output) widgets
+	 * The union is for convience, since it is a lot nicer to type
+	 * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
+	 */
+	union {
+		struct {
+			struct snd_soc_dapm_widget *source;
+			struct snd_soc_dapm_widget *sink;
+		};
+		struct snd_soc_dapm_widget *node[2];
+	};
 
 	/* status */
 	u32 connect:1;	/* source and sink widgets are connected */
@@ -524,8 +534,7 @@
 	int (*connected)(struct snd_soc_dapm_widget *source,
 			 struct snd_soc_dapm_widget *sink);
 
-	struct list_head list_source;
-	struct list_head list_sink;
+	struct list_head list_node[2];
 	struct list_head list_kcontrol;
 	struct list_head list;
 };
@@ -559,8 +568,7 @@
 	unsigned char new_power:1;		/* power from this run */
 	unsigned char power_checked:1;		/* power checked this run */
 	unsigned char is_supply:1;		/* Widget is a supply type widget */
-	unsigned char is_sink:1;		/* Widget is a sink type widget */
-	unsigned char is_source:1;		/* Widget is a source type widget */
+	unsigned char is_ep:2;			/* Widget is a endpoint type widget */
 	int subseq;				/* sort within widget type */
 
 	int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@
 	struct snd_kcontrol **kcontrols;
 	struct snd_soc_dobj dobj;
 
-	/* widget input and outputs */
-	struct list_head sources;
-	struct list_head sinks;
+	/* widget input and output edges */
+	struct list_head edges[2];
 
 	/* used during DAPM updates */
 	struct list_head work_list;
 	struct list_head power_list;
 	struct list_head dirty;
-	int inputs;
-	int outputs;
+	int endpoints[2];
 
 	struct clk *clk;
 };
@@ -672,4 +678,58 @@
 	return dapm->bias_level;
 }
 
+enum snd_soc_dapm_direction {
+	SND_SOC_DAPM_DIR_IN,
+	SND_SOC_DAPM_DIR_OUT
+};
+
+#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
+
+#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
+#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
+ *   specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ *       incoming or outgoing widgets
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
+	list_for_each_entry(p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
+ *   specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ *       incoming or outgoing widgets
+ * @p: The path iterator variable
+ * @next_p: Temporary storage for the next path
+ *
+ *  This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
+ *  it is safe to remove the current path from the list while iterating
+ */
+#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
+	list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
+ *  widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
+	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
+
+/**
+ * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
+ *  a widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_source_path(w, p) \
+	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
+
 #endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 427bc41..086cd7f 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -89,6 +89,13 @@
 		struct snd_ctl_elem_info *uinfo);
 };
 
+/* Bytes ext operations, for TLV byte controls */
+struct snd_soc_tplg_bytes_ext_ops {
+	u32 id;
+	int (*get)(unsigned int __user *bytes, unsigned int size);
+	int (*put)(const unsigned int __user *bytes, unsigned int size);
+};
+
 /*
  * DAPM widget event handlers - used to map handlers onto widgets.
  */
@@ -136,9 +143,13 @@
 	int (*manifest)(struct snd_soc_component *,
 		struct snd_soc_tplg_manifest *);
 
-	/* bespoke kcontrol handlers available for binding */
+	/* vendor specific kcontrol handlers available for binding */
 	const struct snd_soc_tplg_kcontrol_ops *io_ops;
 	int io_ops_count;
+
+	/* vendor specific bytes ext handlers available for binding */
+	const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+	int bytes_ext_ops_count;
 };
 
 #ifdef CONFIG_SND_SOC_TOPOLOGY
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf..884e728 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -526,7 +526,8 @@
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
 struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+	unsigned int id, unsigned int id_mask);
 void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
 
 int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -619,6 +620,7 @@
  * @pin:    name of the pin to update
  * @mask:   bits to check for in reported jack status
  * @invert: if non-zero then pin is enabled when status is not reported
+ * @list:   internal list entry
  */
 struct snd_soc_jack_pin {
 	struct list_head list;
@@ -635,7 +637,7 @@
  * @jack_type: type of jack that is expected for this voltage
  * @debounce_time: debounce_time for jack, codec driver should wait for this
  *		duration before reading the adc for voltages
- * @:list: list container
+ * @list:   internal list entry
  */
 struct snd_soc_jack_zone {
 	unsigned int min_mv;
@@ -651,12 +653,12 @@
  * @gpio:         legacy gpio number
  * @idx:          gpio descriptor index within the function of the GPIO
  *                consumer device
- * @gpiod_dev     GPIO consumer device
+ * @gpiod_dev:    GPIO consumer device
  * @name:         gpio name. Also as connection ID for the GPIO consumer
  *                device function name lookup
  * @report:       value to report when jack detected
  * @invert:       report presence in low state
- * @debouce_time: debouce time in ms
+ * @debounce_time: debounce time in ms
  * @wake:	  enable as wake source
  * @jack_status_check: callback function which overrides the detection
  *		       to provide more complex checks (eg, reading an
@@ -672,11 +674,13 @@
 	int debounce_time;
 	bool wake;
 
+	/* private: */
 	struct snd_soc_jack *jack;
 	struct delayed_work work;
 	struct gpio_desc *desc;
 
 	void *data;
+	/* public: */
 	int (*jack_status_check)(void *data);
 };
 
@@ -758,7 +762,6 @@
 
 	unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
 	unsigned int registered_as_component:1;
-	unsigned int probed:1;
 
 	struct list_head list;
 
@@ -792,7 +795,6 @@
 
 	/* Don't use these, use snd_soc_component_get_dapm() */
 	struct snd_soc_dapm_context dapm;
-	struct snd_soc_dapm_context *dapm_ptr;
 
 	const struct snd_kcontrol_new *controls;
 	unsigned int num_controls;
@@ -832,9 +834,6 @@
 	/* component */
 	struct snd_soc_component component;
 
-	/* Don't access this directly, use snd_soc_codec_get_dapm() */
-	struct snd_soc_dapm_context dapm;
-
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_reg;
 #endif
@@ -1277,7 +1276,7 @@
 static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
 	struct snd_soc_dapm_context *dapm)
 {
-	return container_of(dapm, struct snd_soc_codec, dapm);
+	return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
 }
 
 /**
@@ -1302,7 +1301,7 @@
 static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
 	struct snd_soc_component *component)
 {
-	return component->dapm_ptr;
+	return &component->dapm;
 }
 
 /**
@@ -1314,12 +1313,12 @@
 static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
 	struct snd_soc_codec *codec)
 {
-	return &codec->dapm;
+	return snd_soc_component_get_dapm(&codec->component);
 }
 
 /**
  * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
- * @dapm: The CODEC for which to initialize the DAPM bias level
+ * @codec: The CODEC for which to initialize the DAPM bias level
  * @level: The DAPM level to initialize to
  *
  * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1603,10 @@
 int snd_soc_of_parse_tdm_slot(struct device_node *np,
 			      unsigned int *slots,
 			      unsigned int *slot_width);
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+				   struct snd_soc_codec_conf *codec_conf,
+				   struct device_node *of_node,
+				   const char *propname);
 int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 				   const char *propname);
 unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d..317a1ed 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
 #include <linux/tracepoint.h>
 
 #define DAPM_DIRECT "(direct)"
+#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
 
 struct snd_soc_jack;
 struct snd_soc_codec;
@@ -152,62 +153,38 @@
 		  (int)__entry->path_checks, (int)__entry->neighbour_checks)
 );
 
-TRACE_EVENT(snd_soc_dapm_output_path,
+TRACE_EVENT(snd_soc_dapm_path,
 
 	TP_PROTO(struct snd_soc_dapm_widget *widget,
+		enum snd_soc_dapm_direction dir,
 		struct snd_soc_dapm_path *path),
 
-	TP_ARGS(widget, path),
+	TP_ARGS(widget, dir, path),
 
 	TP_STRUCT__entry(
 		__string(	wname,	widget->name		)
 		__string(	pname,	path->name ? path->name : DAPM_DIRECT)
-		__string(	psname,	path->sink->name	)
-		__field(	int,	path_sink		)
+		__string(	pnname,	path->node[dir]->name	)
+		__field(	int,	path_node		)
 		__field(	int,	path_connect		)
+		__field(	int,	path_dir		)
 	),
 
 	TP_fast_assign(
 		__assign_str(wname, widget->name);
 		__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
-		__assign_str(psname, path->sink->name);
+		__assign_str(pnname, path->node[dir]->name);
 		__entry->path_connect = path->connect;
-		__entry->path_sink = (long)path->sink;
+		__entry->path_node = (long)path->node[dir];
+		__entry->path_dir = dir;
 	),
 
-	TP_printk("%c%s -> %s -> %s",
-		(int) __entry->path_sink &&
+	TP_printk("%c%s %s %s %s %s",
+		(int) __entry->path_node &&
 		(int) __entry->path_connect ? '*' : ' ',
-		__get_str(wname), __get_str(pname), __get_str(psname))
-);
-
-TRACE_EVENT(snd_soc_dapm_input_path,
-
-	TP_PROTO(struct snd_soc_dapm_widget *widget,
-		struct snd_soc_dapm_path *path),
-
-	TP_ARGS(widget, path),
-
-	TP_STRUCT__entry(
-		__string(	wname,	widget->name		)
-		__string(	pname,	path->name ? path->name : DAPM_DIRECT)
-		__string(	psname,	path->source->name	)
-		__field(	int,	path_source		)
-		__field(	int,	path_connect		)
-	),
-
-	TP_fast_assign(
-		__assign_str(wname, widget->name);
-		__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
-		__assign_str(psname, path->source->name);
-		__entry->path_connect = path->connect;
-		__entry->path_source = (long)path->source;
-	),
-
-	TP_printk("%c%s <- %s <- %s",
-		(int) __entry->path_source &&
-		(int) __entry->path_connect ? '*' : ' ',
-		__get_str(wname), __get_str(pname), __get_str(psname))
+		__get_str(wname), DAPM_ARROW(__entry->path_dir),
+		__get_str(pname), DAPM_ARROW(__entry->path_dir),
+		__get_str(pnname))
 );
 
 TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
deleted file mode 100644
index fc733d2..0000000
--- a/include/trace/events/ext3.h
+++ /dev/null
@@ -1,866 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ext3
-
-#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EXT3_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(ext3_free_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	uid_t,	uid			)
-		__field(	gid_t,	gid			)
-		__field(	blkcnt_t, blocks		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->mode	= inode->i_mode;
-		__entry->uid	= i_uid_read(inode);
-		__entry->gid	= i_gid_read(inode);
-		__entry->blocks	= inode->i_blocks;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->uid, __entry->gid,
-		  (unsigned long) __entry->blocks)
-);
-
-TRACE_EVENT(ext3_request_inode,
-	TP_PROTO(struct inode *dir, int mode),
-
-	TP_ARGS(dir, mode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	dir			)
-		__field(	umode_t, mode			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= dir->i_sb->s_dev;
-		__entry->dir	= dir->i_ino;
-		__entry->mode	= mode;
-	),
-
-	TP_printk("dev %d,%d dir %lu mode 0%o",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_allocate_inode,
-	TP_PROTO(struct inode *inode, struct inode *dir, int mode),
-
-	TP_ARGS(inode, dir, mode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	ino_t,	dir			)
-		__field(	umode_t, mode			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->dir	= dir->i_ino;
-		__entry->mode	= mode;
-	),
-
-	TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_evict_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	int,	nlink			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->nlink	= inode->i_nlink;
-	),
-
-	TP_printk("dev %d,%d ino %lu nlink %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->nlink)
-);
-
-TRACE_EVENT(ext3_drop_inode,
-	TP_PROTO(struct inode *inode, int drop),
-
-	TP_ARGS(inode, drop),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	int,	drop			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->drop	= drop;
-	),
-
-	TP_printk("dev %d,%d ino %lu drop %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->drop)
-);
-
-TRACE_EVENT(ext3_mark_inode_dirty,
-	TP_PROTO(struct inode *inode, unsigned long IP),
-
-	TP_ARGS(inode, IP),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(unsigned long,	ip			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->ip	= IP;
-	),
-
-	TP_printk("dev %d,%d ino %lu caller %pS",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, (void *)__entry->ip)
-);
-
-TRACE_EVENT(ext3_write_begin,
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int flags),
-
-	TP_ARGS(inode, pos, len, flags),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned int, len		)
-		__field(	unsigned int, flags		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->pos	= pos;
-		__entry->len	= len;
-		__entry->flags	= flags;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->flags)
-);
-
-DECLARE_EVENT_CLASS(ext3__write_end,
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-			unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned int, len		)
-		__field(	unsigned int, copied		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->pos	= pos;
-		__entry->len	= len;
-		__entry->copied	= copied;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DECLARE_EVENT_CLASS(ext3__page_op,
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	pgoff_t, index			)
-
-	),
-
-	TP_fast_assign(
-		__entry->index	= page->index;
-		__entry->ino	= page->mapping->host->i_ino;
-		__entry->dev	= page->mapping->host->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu page_index %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->index)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_readpage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_releasepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-TRACE_EVENT(ext3_invalidatepage,
-	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
-
-	TP_ARGS(page, offset, length),
-
-	TP_STRUCT__entry(
-		__field(	pgoff_t, index			)
-		__field(	unsigned int, offset		)
-		__field(	unsigned int, length		)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-
-	),
-
-	TP_fast_assign(
-		__entry->index	= page->index;
-		__entry->offset	= offset;
-		__entry->length	= length;
-		__entry->ino	= page->mapping->host->i_ino;
-		__entry->dev	= page->mapping->host->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->index, __entry->offset, __entry->length)
-);
-
-TRACE_EVENT(ext3_discard_blocks,
-	TP_PROTO(struct super_block *sb, unsigned long blk,
-			unsigned long count),
-
-	TP_ARGS(sb, blk, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	blk		)
-		__field(	unsigned long,	count		)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->blk	= blk;
-		__entry->count	= count;
-	),
-
-	TP_printk("dev %d,%d blk %lu count %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->blk, __entry->count)
-);
-
-TRACE_EVENT(ext3_request_blocks,
-	TP_PROTO(struct inode *inode, unsigned long goal,
-		 unsigned long count),
-
-	TP_ARGS(inode, goal, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	unsigned long, count		)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->count	= count;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->count, __entry->goal)
-);
-
-TRACE_EVENT(ext3_allocate_blocks,
-	TP_PROTO(struct inode *inode, unsigned long goal,
-		 unsigned long count, unsigned long block),
-
-	TP_ARGS(inode, goal, count, block),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	unsigned long,	block		)
-		__field(	unsigned long, count		)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->block	= block;
-		__entry->count	= count;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		   __entry->count, __entry->block,
-		  __entry->goal)
-);
-
-TRACE_EVENT(ext3_free_blocks,
-	TP_PROTO(struct inode *inode, unsigned long block,
-		 unsigned long count),
-
-	TP_ARGS(inode, block, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	unsigned long,	block		)
-		__field(	unsigned long,	count		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= inode->i_sb->s_dev;
-		__entry->ino		= inode->i_ino;
-		__entry->mode		= inode->i_mode;
-		__entry->block		= block;
-		__entry->count		= count;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->block, __entry->count)
-);
-
-TRACE_EVENT(ext3_sync_file_enter,
-	TP_PROTO(struct file *file, int datasync),
-
-	TP_ARGS(file, datasync),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	ino_t,	parent			)
-		__field(	int,	datasync		)
-	),
-
-	TP_fast_assign(
-		struct dentry *dentry = file->f_path.dentry;
-
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->datasync	= datasync;
-		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
-	),
-
-	TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->parent, __entry->datasync)
-);
-
-TRACE_EVENT(ext3_sync_file_exit,
-	TP_PROTO(struct inode *inode, int ret),
-
-	TP_ARGS(inode, ret),
-
-	TP_STRUCT__entry(
-		__field(	int,	ret			)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->ret		= ret;
-		__entry->ino		= inode->i_ino;
-		__entry->dev		= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->ret)
-);
-
-TRACE_EVENT(ext3_sync_fs,
-	TP_PROTO(struct super_block *sb, int wait),
-
-	TP_ARGS(sb, wait),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	wait			)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->wait	= wait;
-	),
-
-	TP_printk("dev %d,%d wait %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->wait)
-);
-
-TRACE_EVENT(ext3_rsv_window_add,
-	TP_PROTO(struct super_block *sb,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(sb, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-	),
-
-	TP_printk("dev %d,%d start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_discard_reservation,
-	TP_PROTO(struct inode *inode,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(inode, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long)__entry->ino, __entry->start,
-		  __entry->end)
-);
-
-TRACE_EVENT(ext3_alloc_new_reservation,
-	TP_PROTO(struct super_block *sb, unsigned long goal),
-
-	TP_ARGS(sb, goal),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d goal %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->goal)
-);
-
-TRACE_EVENT(ext3_reserved,
-	TP_PROTO(struct super_block *sb, unsigned long block,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(sb, block, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	block		)
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->block	= block;
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-		__entry->dev	= sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d block %lu, start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->block, __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_forget,
-	TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
-
-	TP_ARGS(inode, is_metadata, block),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	int,	is_metadata		)
-		__field(	unsigned long,	block		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->mode	= inode->i_mode;
-		__entry->is_metadata = is_metadata;
-		__entry->block	= block;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->is_metadata, __entry->block)
-);
-
-TRACE_EVENT(ext3_read_block_bitmap,
-	TP_PROTO(struct super_block *sb, unsigned int group),
-
-	TP_ARGS(sb, group),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	__u32,	group			)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->group	= group;
-	),
-
-	TP_printk("dev %d,%d group %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->group)
-);
-
-TRACE_EVENT(ext3_direct_IO_enter,
-	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
-
-	TP_ARGS(inode, offset, len, rw),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned long,	len		)
-		__field(	int,	rw			)
-	),
-
-	TP_fast_assign(
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->pos	= offset;
-		__entry->len	= len;
-		__entry->rw	= rw;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->rw)
-);
-
-TRACE_EVENT(ext3_direct_IO_exit,
-	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
-		 int rw, int ret),
-
-	TP_ARGS(inode, offset, len, rw, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned long,	len		)
-		__field(	int,	rw			)
-		__field(	int,	ret			)
-	),
-
-	TP_fast_assign(
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->pos	= offset;
-		__entry->len	= len;
-		__entry->rw	= rw;
-		__entry->ret	= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->rw, __entry->ret)
-);
-
-TRACE_EVENT(ext3_unlink_enter,
-	TP_PROTO(struct inode *parent, struct dentry *dentry),
-
-	TP_ARGS(parent, dentry),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	parent			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	size			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->parent		= parent->i_ino;
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->size		= d_inode(dentry)->i_size;
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu size %lld parent %ld",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long)__entry->size,
-		  (unsigned long) __entry->parent)
-);
-
-TRACE_EVENT(ext3_unlink_exit,
-	TP_PROTO(struct dentry *dentry, int ret),
-
-	TP_ARGS(dentry, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	int,	ret			)
-	),
-
-	TP_fast_assign(
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-		__entry->ret		= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->ret)
-);
-
-DECLARE_EVENT_CLASS(ext3__truncate,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	blkcnt_t,	blocks		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->blocks	= inode->i_blocks;
-	),
-
-	TP_printk("dev %d,%d ino %lu blocks %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
-
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
-
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode)
-);
-
-TRACE_EVENT(ext3_get_blocks_enter,
-	TP_PROTO(struct inode *inode, unsigned long lblk,
-		 unsigned long len, int create),
-
-	TP_ARGS(inode, lblk, len, create),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	lblk		)
-		__field(	unsigned long,	len		)
-		__field(	int,		create		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->lblk	= lblk;
-		__entry->len	= len;
-		__entry->create	= create;
-	),
-
-	TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->lblk, __entry->len, __entry->create)
-);
-
-TRACE_EVENT(ext3_get_blocks_exit,
-	TP_PROTO(struct inode *inode, unsigned long lblk,
-		 unsigned long pblk, unsigned long len, int ret),
-
-	TP_ARGS(inode, lblk, pblk, len, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	lblk		)
-		__field(	unsigned long,	pblk		)
-		__field(	unsigned long,	len		)
-		__field(	int,		ret		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->lblk	= lblk;
-		__entry->pblk	= pblk;
-		__entry->len	= len;
-		__entry->ret	= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		   __entry->lblk, __entry->pblk,
-		  __entry->len, __entry->ret)
-);
-
-TRACE_EVENT(ext3_load_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino		)
-		__field(	dev_t,	dev		)
-	),
-
-	TP_fast_assign(
-		__entry->ino		= inode->i_ino;
-		__entry->dev		= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino)
-);
-
-#endif /* _TRACE_EXT3_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2..a019465 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@
 TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
 
 	TP_PROTO(struct inode *inode, unsigned int pgofs,
-						struct extent_node *en),
+						struct extent_info *ei),
 
-	TP_ARGS(inode, pgofs, en),
+	TP_ARGS(inode, pgofs, ei),
 
-	TP_CONDITION(en),
+	TP_CONDITION(ei),
 
 	TP_STRUCT__entry(
 		__field(dev_t,	dev)
@@ -1118,9 +1118,9 @@
 		__entry->dev = inode->i_sb->s_dev;
 		__entry->ino = inode->i_ino;
 		__entry->pgofs = pgofs;
-		__entry->fofs = en->ei.fofs;
-		__entry->blk = en->ei.blk;
-		__entry->len = en->ei.len;
+		__entry->fofs = ei->fofs;
+		__entry->blk = ei->blk;
+		__entry->len = ei->len;
 	),
 
 	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
new file mode 100644
index 0000000..833cfcb
--- /dev/null
+++ b/include/trace/events/fib.h
@@ -0,0 +1,113 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fib
+
+#if !defined(_TRACE_FIB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FIB_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/ip_fib.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fib_table_lookup,
+
+	TP_PROTO(u32 tb_id, const struct flowi4 *flp),
+
+	TP_ARGS(tb_id, flp),
+
+	TP_STRUCT__entry(
+		__field(	u32,	tb_id		)
+		__field(	int,	oif		)
+		__field(	int,	iif		)
+		__field(	__u8,	tos		)
+		__field(	__u8,	scope		)
+		__field(	__u8,	flags		)
+		__array(	__u8,	src,	4	)
+		__array(	__u8,	dst,	4	)
+	),
+
+	TP_fast_assign(
+		__be32 *p32;
+
+		__entry->tb_id = tb_id;
+		__entry->oif = flp->flowi4_oif;
+		__entry->iif = flp->flowi4_iif;
+		__entry->tos = flp->flowi4_tos;
+		__entry->scope = flp->flowi4_scope;
+		__entry->flags = flp->flowi4_flags;
+
+		p32 = (__be32 *) __entry->src;
+		*p32 = flp->saddr;
+
+		p32 = (__be32 *) __entry->dst;
+		*p32 = flp->daddr;
+	),
+
+	TP_printk("table %u oif %d iif %d src %pI4 dst %pI4 tos %d scope %d flags %x",
+		  __entry->tb_id, __entry->oif, __entry->iif,
+		  __entry->src, __entry->dst, __entry->tos, __entry->scope,
+		  __entry->flags)
+);
+
+TRACE_EVENT(fib_table_lookup_nh,
+
+	TP_PROTO(const struct fib_nh *nh),
+
+	TP_ARGS(nh),
+
+	TP_STRUCT__entry(
+		__string(	name,	nh->nh_dev->name)
+		__field(	int,	oif		)
+		__array(	__u8,	src,	4	)
+	),
+
+	TP_fast_assign(
+		__be32 *p32 = (__be32 *) __entry->src;
+
+		__assign_str(name, nh->nh_dev ? nh->nh_dev->name : "not set");
+		__entry->oif = nh->nh_oif;
+		*p32 = nh->nh_saddr;
+	),
+
+	TP_printk("nexthop dev %s oif %d src %pI4",
+		  __get_str(name), __entry->oif, __entry->src)
+);
+
+TRACE_EVENT(fib_validate_source,
+
+	TP_PROTO(const struct net_device *dev, const struct flowi4 *flp),
+
+	TP_ARGS(dev, flp),
+
+	TP_STRUCT__entry(
+		__string(	name,	dev->name	)
+		__field(	int,	oif		)
+		__field(	int,	iif		)
+		__field(	__u8,	tos		)
+		__array(	__u8,	src,	4	)
+		__array(	__u8,	dst,	4	)
+	),
+
+	TP_fast_assign(
+		__be32 *p32;
+
+		__assign_str(name, dev ? dev->name : "not set");
+		__entry->oif = flp->flowi4_oif;
+		__entry->iif = flp->flowi4_iif;
+		__entry->tos = flp->flowi4_tos;
+
+		p32 = (__be32 *) __entry->src;
+		*p32 = flp->saddr;
+
+		p32 = (__be32 *) __entry->dst;
+		*p32 = flp->daddr;
+	),
+
+	TP_printk("dev %s oif %d iif %d tos %d src %pI4 dst %pI4",
+		  __get_str(name), __entry->oif, __entry->iif, __entry->tos,
+		  __entry->src, __entry->dst)
+);
+#endif /* _TRACE_FIB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
deleted file mode 100644
index da6f259..0000000
--- a/include/trace/events/jbd.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM jbd
-
-#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_JBD_H
-
-#include <linux/jbd.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(jbd_checkpoint,
-
-	TP_PROTO(journal_t *journal, int result),
-
-	TP_ARGS(journal, result),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	result			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->result		= result;
-	),
-
-	TP_printk("dev %d,%d result %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->result)
-);
-
-DECLARE_EVENT_CLASS(jbd_commit,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_start_commit,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_locking,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_logging,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-TRACE_EVENT(jbd_drop_transaction,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction)
-);
-
-TRACE_EVENT(jbd_end_commit,
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-		__field(	int,	head			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-		__entry->head		= journal->j_tail_sequence;
-	),
-
-	TP_printk("dev %d,%d transaction %d head %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction, __entry->head)
-);
-
-TRACE_EVENT(jbd_do_submit_data,
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		   __entry->transaction)
-);
-
-TRACE_EVENT(jbd_cleanup_journal_tail,
-
-	TP_PROTO(journal_t *journal, tid_t first_tid,
-		 unsigned long block_nr, unsigned long freed),
-
-	TP_ARGS(journal, first_tid, block_nr, freed),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	tid_t,	tail_sequence		)
-		__field(	tid_t,	first_tid		)
-		__field(unsigned long,	block_nr		)
-		__field(unsigned long,	freed			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->tail_sequence	= journal->j_tail_sequence;
-		__entry->first_tid	= first_tid;
-		__entry->block_nr	= block_nr;
-		__entry->freed		= freed;
-	),
-
-	TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->tail_sequence, __entry->first_tid,
-		  __entry->block_nr, __entry->freed)
-);
-
-TRACE_EVENT(journal_write_superblock,
-	TP_PROTO(journal_t *journal, int write_op),
-
-	TP_ARGS(journal, write_op),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	write_op		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->write_op	= write_op;
-	),
-
-	TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
-		  MINOR(__entry->dev), __entry->write_op)
-);
-
-#endif /* _TRACE_JBD_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index fd1a02c..003dca9 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -529,18 +529,21 @@
 
 	TP_STRUCT__entry(
 		__field(struct svc_xprt *, xprt)
-		__field(struct svc_rqst *, rqst)
+		__field_struct(struct sockaddr_storage, ss)
+		__field(int, pid)
+		__field(unsigned long, flags)
 	),
 
 	TP_fast_assign(
 		__entry->xprt = xprt;
-		__entry->rqst = rqst;
+		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+		__entry->pid = rqst? rqst->rq_task->pid : 0;
+		__entry->flags = xprt ? xprt->xpt_flags : 0;
 	),
 
 	TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
-		(struct sockaddr *)&__entry->xprt->xpt_remote,
-		__entry->rqst ? __entry->rqst->rq_task->pid : 0,
-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
+		(struct sockaddr *)&__entry->ss,
+		__entry->pid, show_svc_xprt_flags(__entry->flags))
 );
 
 TRACE_EVENT(svc_xprt_dequeue,
@@ -589,16 +592,20 @@
 	TP_STRUCT__entry(
 		__field(struct svc_xprt *, xprt)
 		__field(int, len)
+		__field_struct(struct sockaddr_storage, ss)
+		__field(unsigned long, flags)
 	),
 
 	TP_fast_assign(
 		__entry->xprt = xprt;
+		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
 		__entry->len = len;
+		__entry->flags = xprt ? xprt->xpt_flags : 0;
 	),
 
 	TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
-		(struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
+		(struct sockaddr *)&__entry->ss,
+		__entry->len, show_svc_xprt_flags(__entry->flags))
 );
 #endif /* _TRACE_SUNRPC_H */
 
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index 4250f36..bc8815f 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -11,7 +11,8 @@
 	EM(  TLB_FLUSH_ON_TASK_SWITCH,	"flush on task switch" )	\
 	EM(  TLB_REMOTE_SHOOTDOWN,	"remote shootdown" )		\
 	EM(  TLB_LOCAL_SHOOTDOWN,	"local shootdown" )		\
-	EMe( TLB_LOCAL_MM_SHOOTDOWN,	"local mm shootdown" )
+	EM(  TLB_LOCAL_MM_SHOOTDOWN,	"local mm shootdown" )		\
+	EMe( TLB_REMOTE_SEND_IPI,	"remote ipi send" )
 
 /*
  * First define the enums in TLB_FLUSH_REASON to be exported to userspace
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 2f295cd..8c5e8b9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -34,6 +34,13 @@
 /* color index */
 #define DRM_FORMAT_C8		fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
+/* 8 bpp Red */
+#define DRM_FORMAT_R8		fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+
+/* 16 bpp RG */
+#define DRM_FORMAT_RG88		fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
+#define DRM_FORMAT_GR88		fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
+
 /* 8 bpp RGB */
 #define DRM_FORMAT_RGB332	fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
 #define DRM_FORMAT_BGR233	fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index db809b7..dbd16a2 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -354,9 +354,15 @@
 #define I915_PARAM_REVISION              32
 #define I915_PARAM_SUBSLICE_TOTAL	 33
 #define I915_PARAM_EU_TOTAL		 34
+#define I915_PARAM_HAS_GPU_RESET	 35
+#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 
 typedef struct drm_i915_getparam {
-	int param;
+	s32 param;
+	/*
+	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
+	 * compat32 code. Don't repeat this mistake.
+	 */
 	int __user *value;
 } drm_i915_getparam_t;
 
@@ -764,7 +770,12 @@
 #define I915_EXEC_BSD_RING1		(1<<13)
 #define I915_EXEC_BSD_RING2		(2<<13)
 
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+/** Tell the kernel that the batchbuffer is processed by
+ *  the resource streamer.
+ */
+#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
 
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1114,6 +1125,7 @@
 	__u32 size;
 	__u64 param;
 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
 	__u64 value;
 };
 
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index c472bedb..05b2049 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -64,6 +64,7 @@
 #define DRM_VMW_GB_SURFACE_CREATE    23
 #define DRM_VMW_GB_SURFACE_REF       24
 #define DRM_VMW_SYNCCPU              25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
 
 /*************************************************************************/
 /**
@@ -88,6 +89,8 @@
 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
+#define DRM_VMW_PARAM_SCREEN_TARGET    11
+#define DRM_VMW_PARAM_DX               12
 
 /**
  * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -296,7 +299,7 @@
  * Argument to the DRM_VMW_EXECBUF Ioctl.
  */
 
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
 
 struct drm_vmw_execbuf_arg {
 	uint64_t commands;
@@ -305,6 +308,8 @@
 	uint64_t fence_rep;
 	uint32_t version;
 	uint32_t flags;
+	uint32_t context_handle;
+	uint32_t pad64;
 };
 
 /**
@@ -825,7 +830,6 @@
 enum drm_vmw_shader_type {
 	drm_vmw_shader_type_vs = 0,
 	drm_vmw_shader_type_ps,
-	drm_vmw_shader_type_gs
 };
 
 
@@ -907,6 +911,8 @@
  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
  *                    if none.
  * @base_size         Size of the base mip level for all faces.
+ * @array_size        Must be zero for non-DX hardware, and if non-zero
+ *                    svga3d_flags must have proper bind flags setup.
  *
  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
@@ -919,7 +925,7 @@
 	uint32_t multisample_count;
 	uint32_t autogen_filter;
 	uint32_t buffer_handle;
-	uint32_t pad64;
+	uint32_t array_size;
 	struct drm_vmw_size base_size;
 };
 
@@ -1059,4 +1065,28 @@
 	uint32_t pad64;
 };
 
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+	drm_vmw_context_legacy,
+	drm_vmw_context_dx
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+	enum drm_vmw_extended_context req;
+	struct drm_vmw_context_arg rep;
+};
 #endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1ff9942..70ff1d9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -243,6 +243,7 @@
 header-y += llc.h
 header-y += loop.h
 header-y += lp.h
+header-y += lwtunnel.h
 header-y += magic.h
 header-y += major.h
 header-y += map_to_7segment.h
@@ -455,3 +456,4 @@
 header-y += xilinx-v4l2-controls.h
 header-y += zorro.h
 header-y += zorro_ids.h
+header-y += userfaultfd.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d3475e1..1f977dd 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -382,6 +382,9 @@
 #define AUDIT_ARCH_SHEL64	(EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_SPARC	(EM_SPARC)
 #define AUDIT_ARCH_SPARC64	(EM_SPARCV9|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_TILEGX	(EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEGX32	(EM_TILEGX|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEPRO	(EM_TILEPRO|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 
 #define AUDIT_PERM_EXEC		1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 29ef6f9..92a48e2 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -114,6 +114,7 @@
 	BPF_MAP_TYPE_HASH,
 	BPF_MAP_TYPE_ARRAY,
 	BPF_MAP_TYPE_PROG_ARRAY,
+	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -249,6 +250,28 @@
 	 * Return: 0 on success
 	 */
 	BPF_FUNC_get_current_comm,
+
+	/**
+	 * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+	 * @skb: pointer to skb
+	 * Return: classid if != 0
+	 */
+	BPF_FUNC_get_cgroup_classid,
+	BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+	BPF_FUNC_skb_vlan_pop,  /* bpf_skb_vlan_pop(skb) */
+
+	/**
+	 * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+	 * retrieve or populate tunnel metadata
+	 * @skb: pointer to skb
+	 * @key: pointer to 'struct bpf_tunnel_key'
+	 * @size: size of 'struct bpf_tunnel_key'
+	 * @flags: room for future extensions
+	 * Retrun: 0 on success
+	 */
+	BPF_FUNC_skb_get_tunnel_key,
+	BPF_FUNC_skb_set_tunnel_key,
+	BPF_FUNC_perf_event_read,	/* u64 bpf_perf_event_read(&map, index) */
 	__BPF_FUNC_MAX_ID,
 };
 
@@ -269,6 +292,12 @@
 	__u32 ifindex;
 	__u32 tc_index;
 	__u32 cb[5];
+	__u32 hash;
+};
+
+struct bpf_tunnel_key {
+	__u32 tunnel_id;
+	__u32 remote_ipv4;
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index 3060783..df56c8f 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -26,7 +26,7 @@
 /* Version of the device interface */
 #define DLM_DEVICE_VERSION_MAJOR 6
 #define DLM_DEVICE_VERSION_MINOR 0
-#define DLM_DEVICE_VERSION_PATCH 1
+#define DLM_DEVICE_VERSION_PATCH 2
 
 /* struct passed to the lock write */
 struct dlm_lock_params {
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 061aca3..d34611e 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	32
+#define DM_VERSION_MINOR	33
 #define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2015-6-26)"
+#define DM_VERSION_EXTRA	"-ioctl (2015-8-18)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b088296..3429a3b 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -38,6 +38,8 @@
 #define EM_ALTERA_NIOS2	113	/* Altera Nios II soft-core processor */
 #define EM_TI_C6000	140	/* TI C6X DSPs */
 #define EM_AARCH64	183	/* ARM 64 bit */
+#define EM_TILEPRO	188	/* Tilera TILEPro */
+#define EM_TILEGX	191	/* Tilera TILE-Gx */
 #define EM_FRV		0x5441	/* Fujitsu FR-V */
 #define EM_AVR32	0x18ad	/* Atmel AVR32 */
 
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd67aec..cd16291 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1093,6 +1093,11 @@
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
  * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
  * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
  */
 struct ethtool_ts_info {
 	__u32	cmd;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b82d7e..96161b8 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -43,7 +43,7 @@
 	FRA_UNUSED5,
 	FRA_FWMARK,	/* mark */
 	FRA_FLOW,	/* flow/class id */
-	FRA_UNUSED6,
+	FRA_TUN_ID,
 	FRA_SUPPRESS_IFGROUP,
 	FRA_SUPPRESS_PREFIXLEN,
 	FRA_TABLE,	/* Extended table id */
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index eaaea62..3635b77 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -182,6 +182,7 @@
 #define MDB_TEMPORARY 0
 #define MDB_PERMANENT 1
 	__u8 state;
+	__u16 vid;
 	struct {
 		union {
 			__be32	ip4;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 2c7e8e3..3a5f263 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -148,6 +148,7 @@
 	IFLA_PHYS_SWITCH_ID,
 	IFLA_LINK_NETNSID,
 	IFLA_PHYS_PORT_NAME,
+	IFLA_PROTO_DOWN,
 	__IFLA_MAX
 };
 
@@ -229,6 +230,8 @@
 	IFLA_BR_AGEING_TIME,
 	IFLA_BR_STP_STATE,
 	IFLA_BR_PRIORITY,
+	IFLA_BR_VLAN_FILTERING,
+	IFLA_BR_VLAN_PROTOCOL,
 	__IFLA_BR_MAX,
 };
 
@@ -339,6 +342,15 @@
 
 #define MACVLAN_FLAG_NOPROMISC	1
 
+/* VRF section */
+enum {
+	IFLA_VRF_UNSPEC,
+	IFLA_VRF_TABLE,
+	__IFLA_VRF_MAX
+};
+
+#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+
 /* IPVLAN section */
 enum {
 	IFLA_IPVLAN_UNSPEC,
@@ -381,6 +393,7 @@
 	IFLA_VXLAN_REMCSUM_RX,
 	IFLA_VXLAN_GBP,
 	IFLA_VXLAN_REMCSUM_NOPARTIAL,
+	IFLA_VXLAN_COLLECT_METADATA,
 	__IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX	(__IFLA_VXLAN_MAX - 1)
@@ -397,6 +410,8 @@
 	IFLA_GENEVE_REMOTE,
 	IFLA_GENEVE_TTL,
 	IFLA_GENEVE_TOS,
+	IFLA_GENEVE_PORT,	/* destination port */
+	IFLA_GENEVE_COLLECT_METADATA,
 	__IFLA_GENEVE_MAX
 };
 #define IFLA_GENEVE_MAX	(__IFLA_GENEVE_MAX - 1)
@@ -431,6 +446,7 @@
 	IFLA_BOND_AD_ACTOR_SYS_PRIO,
 	IFLA_BOND_AD_USER_PORT_KEY,
 	IFLA_BOND_AD_ACTOR_SYSTEM,
+	IFLA_BOND_TLB_DYNAMIC_LB,
 	__IFLA_BOND_MAX,
 };
 
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index d3d715f8c..9e7edfd 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -55,6 +55,7 @@
 #define PACKET_TX_HAS_OFF		19
 #define PACKET_QDISC_BYPASS		20
 #define PACKET_ROLLOVER_STATS		21
+#define PACKET_FANOUT_DATA		22
 
 #define PACKET_FANOUT_HASH		0
 #define PACKET_FANOUT_LB		1
@@ -62,6 +63,8 @@
 #define PACKET_FANOUT_ROLLOVER		3
 #define PACKET_FANOUT_RND		4
 #define PACKET_FANOUT_QM		5
+#define PACKET_FANOUT_CBPF		6
+#define PACKET_FANOUT_EBPF		7
 #define PACKET_FANOUT_FLAG_ROLLOVER	0x1000
 #define PACKET_FANOUT_FLAG_DEFRAG	0x8000
 
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index bd3cc11..af4de90 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -112,6 +112,7 @@
 	IFLA_GRE_ENCAP_FLAGS,
 	IFLA_GRE_ENCAP_SPORT,
 	IFLA_GRE_ENCAP_DPORT,
+	IFLA_GRE_COLLECT_METADATA,
 	__IFLA_GRE_MAX,
 };
 
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644
index 0000000..7ed9e67
--- /dev/null
+++ b/include/uapi/linux/ila.h
@@ -0,0 +1,15 @@
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+	ILA_ATTR_UNSPEC,
+	ILA_ATTR_LOCATOR,			/* u64 */
+
+	__ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX		(__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 3199243..391395c 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -406,6 +406,11 @@
 	IPVS_DAEMON_ATTR_STATE,		/* sync daemon state (master/backup) */
 	IPVS_DAEMON_ATTR_MCAST_IFN,	/* multicast interface name */
 	IPVS_DAEMON_ATTR_SYNC_ID,	/* SyncID we belong to */
+	IPVS_DAEMON_ATTR_SYNC_MAXLEN,	/* UDP Payload Size */
+	IPVS_DAEMON_ATTR_MCAST_GROUP,	/* IPv4 Multicast Address */
+	IPVS_DAEMON_ATTR_MCAST_GROUP6,	/* IPv6 Multicast Address */
+	IPVS_DAEMON_ATTR_MCAST_PORT,	/* Multicast Port (base) */
+	IPVS_DAEMON_ATTR_MCAST_TTL,	/* Multicast TTL */
 	__IPVS_DAEMON_ATTR_MAX,
 };
 
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 5efa54a..38b4fef 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -171,6 +171,9 @@
 	DEVCONF_USE_OPTIMISTIC,
 	DEVCONF_ACCEPT_RA_MTU,
 	DEVCONF_STABLE_SECRET,
+	DEVCONF_USE_OIF_ADDRS_ONLY,
+	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+	DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644
index 0000000..34141a5
--- /dev/null
+++ b/include/uapi/linux/lwtunnel.h
@@ -0,0 +1,47 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+	LWTUNNEL_ENCAP_NONE,
+	LWTUNNEL_ENCAP_MPLS,
+	LWTUNNEL_ENCAP_IP,
+	LWTUNNEL_ENCAP_ILA,
+	LWTUNNEL_ENCAP_IP6,
+	__LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+enum lwtunnel_ip_t {
+	LWTUNNEL_IP_UNSPEC,
+	LWTUNNEL_IP_ID,
+	LWTUNNEL_IP_DST,
+	LWTUNNEL_IP_SRC,
+	LWTUNNEL_IP_TTL,
+	LWTUNNEL_IP_TOS,
+	LWTUNNEL_IP_SPORT,
+	LWTUNNEL_IP_DPORT,
+	LWTUNNEL_IP_FLAGS,
+	__LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+	LWTUNNEL_IP6_UNSPEC,
+	LWTUNNEL_IP6_ID,
+	LWTUNNEL_IP6_DST,
+	LWTUNNEL_IP6_SRC,
+	LWTUNNEL_IP6_HOPLIMIT,
+	LWTUNNEL_IP6_TC,
+	LWTUNNEL_IP6_SPORT,
+	LWTUNNEL_IP6_DPORT,
+	LWTUNNEL_IP6_FLAGS,
+	__LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
+
+#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 139d4dd..24a6cb1 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -41,4 +41,6 @@
 #define MPLS_LABEL_OAMALERT		14 /* RFC3429 */
 #define MPLS_LABEL_EXTENSION		15 /* RFC7274 */
 
+#define MPLS_LABEL_FIRST_UNRESERVED	16 /* RFC3032 */
+
 #endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644
index 0000000..d80a049
--- /dev/null
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -0,0 +1,28 @@
+/*
+ *	mpls tunnel api
+ *
+ *	Authors:
+ *		Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ *     [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+	MPLS_IPTUNNEL_UNSPEC,
+	MPLS_IPTUNNEL_DST,
+	__MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 2e35c61..788655b 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -106,6 +106,7 @@
 	__u64		ndts_rcv_probes_ucast;
 	__u64		ndts_periodic_gc_runs;
 	__u64		ndts_forced_gc_runs;
+	__u64		ndts_table_fulls;
 };
 
 enum {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index ceeefe6..ed4e776 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -13,6 +13,8 @@
 	SCTP_CONNTRACK_SHUTDOWN_SENT,
 	SCTP_CONNTRACK_SHUTDOWN_RECD,
 	SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+	SCTP_CONNTRACK_HEARTBEAT_SENT,
+	SCTP_CONNTRACK_HEARTBEAT_ACKED,
 	SCTP_CONNTRACK_MAX
 };
 
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a99e6a9..d8c8a7c 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -756,16 +756,25 @@
 };
 #define NFTA_CT_MAX		(__NFTA_CT_MAX - 1)
 
+enum nft_limit_type {
+	NFT_LIMIT_PKTS,
+	NFT_LIMIT_PKT_BYTES
+};
+
 /**
  * enum nft_limit_attributes - nf_tables limit expression netlink attributes
  *
  * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
  * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
  */
 enum nft_limit_attributes {
 	NFTA_LIMIT_UNSPEC,
 	NFTA_LIMIT_RATE,
 	NFTA_LIMIT_UNIT,
+	NFTA_LIMIT_BURST,
+	NFTA_LIMIT_TYPE,
 	__NFTA_LIMIT_MAX
 };
 #define NFTA_LIMIT_MAX		(__NFTA_LIMIT_MAX - 1)
@@ -936,6 +945,20 @@
 #define NFTA_REDIR_MAX		(__NFTA_REDIR_MAX - 1)
 
 /**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+	NFTA_DUP_UNSPEC,
+	NFTA_DUP_SREG_ADDR,
+	NFTA_DUP_SREG_DEV,
+	__NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX		(__NFTA_DUP_MAX - 1)
+
+/**
  * enum nft_gen_attributes - nf_tables ruleset generation attributes
  *
  * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index acad6c5..c1a4e144 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -61,6 +61,7 @@
 	CTA_TUPLE_UNSPEC,
 	CTA_TUPLE_IP,
 	CTA_TUPLE_PROTO,
+	CTA_TUPLE_ZONE,
 	__CTA_TUPLE_MAX
 };
 #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 1ab0b97..f2c10dc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -92,6 +92,8 @@
 	CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
 	CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
 	CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+	CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+	CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
 	__CTA_TIMEOUT_SCTP_MAX
 };
 #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_CT.h b/include/uapi/linux/netfilter/xt_CT.h
index 5a688c1..9e52041 100644
--- a/include/uapi/linux/netfilter/xt_CT.h
+++ b/include/uapi/linux/netfilter/xt_CT.h
@@ -6,7 +6,13 @@
 enum {
 	XT_CT_NOTRACK		= 1 << 0,
 	XT_CT_NOTRACK_ALIAS	= 1 << 1,
-	XT_CT_MASK		= XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+	XT_CT_ZONE_DIR_ORIG	= 1 << 2,
+	XT_CT_ZONE_DIR_REPL	= 1 << 3,
+	XT_CT_ZONE_MARK		= 1 << 4,
+
+	XT_CT_MASK		= XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+				  XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+				  XT_CT_ZONE_MARK,
 };
 
 struct xt_ct_target_info {
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
index 205ed62..cd2e940 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -10,7 +10,9 @@
 	IP6T_ICMP6_ADDR_UNREACH,
 	IP6T_ICMP6_PORT_UNREACH,
 	IP6T_ICMP6_ECHOREPLY,
-	IP6T_TCP_RESET
+	IP6T_TCP_RESET,
+	IP6T_ICMP6_POLICY_FAIL,
+	IP6T_ICMP6_REJECT_ROUTE
 };
 
 struct ip6t_reject_info {
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index cf6a65c..6f3fe16 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -110,6 +110,7 @@
 #define NETLINK_TX_RING			7
 #define NETLINK_LISTEN_ALL_NSID		8
 #define NETLINK_LIST_MEMBERSHIPS	9
+#define NETLINK_CAP_ACK			10
 
 struct nl_pktinfo {
 	__u32	group;
diff --git a/include/uapi/linux/nfsacl.h b/include/uapi/linux/nfsacl.h
index 9bb9771..5527266 100644
--- a/include/uapi/linux/nfsacl.h
+++ b/include/uapi/linux/nfsacl.h
@@ -22,6 +22,7 @@
 #define NFS_ACLCNT		0x0002
 #define NFS_DFACL		0x0004
 #define NFS_DFACLCNT		0x0008
+#define NFS_ACL_MASK		0x000f
 
 /* Flag for Default ACL entries */
 #define NFS_ACL_DEFAULT		0x1000
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 732b32e..8864194 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -584,5 +584,6 @@
 #define NVME_IOCTL_SUBMIT_IO	_IOW('N', 0x42, struct nvme_user_io)
 #define NVME_IOCTL_IO_CMD	_IOWR('N', 0x43, struct nvme_passthru_cmd)
 #define NVME_IOCTL_RESET	_IO('N', 0x44)
+#define NVME_IOCTL_SUBSYS_RESET	_IO('N', 0x45)
 
 #endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1dab776..32e07d8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -164,6 +164,9 @@
  * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
  * output port is actually a tunnel port. Contains the output tunnel key
  * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
+ * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
+ * size.
  *
  * These attributes follow the &struct ovs_header within the Generic Netlink
  * payload for %OVS_PACKET_* commands.
@@ -180,6 +183,7 @@
 	OVS_PACKET_ATTR_UNUSED2,
 	OVS_PACKET_ATTR_PROBE,      /* Packet operation is a feature probe,
 				       error logging should be suppressed. */
+	OVS_PACKET_ATTR_MRU,	    /* Maximum received IP fragment size. */
 	__OVS_PACKET_ATTR_MAX
 };
 
@@ -319,9 +323,13 @@
 	OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
 				 * The implementation may restrict
 				 * the accepted length of the array. */
+	OVS_KEY_ATTR_CT_STATE,	/* u8 bitmask of OVS_CS_F_* */
+	OVS_KEY_ATTR_CT_ZONE,	/* u16 connection tracking zone. */
+	OVS_KEY_ATTR_CT_MARK,	/* u32 connection tracking mark */
+	OVS_KEY_ATTR_CT_LABEL,	/* 16-octet connection tracking label */
 
 #ifdef __KERNEL__
-	OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ovs_tunnel_info */
+	OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
 #endif
 	__OVS_KEY_ATTR_MAX
 };
@@ -431,6 +439,20 @@
 	__u8	nd_tll[ETH_ALEN];
 };
 
+#define OVS_CT_LABEL_LEN	16
+struct ovs_key_ct_label {
+	__u8	ct_label[OVS_CT_LABEL_LEN];
+};
+
+/* OVS_KEY_ATTR_CT_STATE flags */
+#define OVS_CS_F_NEW               0x01 /* Beginning of a new connection. */
+#define OVS_CS_F_ESTABLISHED       0x02 /* Part of an existing connection. */
+#define OVS_CS_F_RELATED           0x04 /* Related to an established
+					 * connection. */
+#define OVS_CS_F_INVALID           0x20 /* Could not track connection. */
+#define OVS_CS_F_REPLY_DIR         0x40 /* Flow is in the reply direction. */
+#define OVS_CS_F_TRACKED           0x80 /* Conntrack has occurred. */
+
 /**
  * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
  * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
@@ -595,6 +617,39 @@
 };
 
 /**
+ * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
+ * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+ * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
+ * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
+ * mask, the corresponding bit in the value is copied to the connection
+ * tracking mark field in the connection.
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+ * mask. For each bit set in the mask, the corresponding bit in the value is
+ * copied to the connection tracking label field in the connection.
+ * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
+ */
+enum ovs_ct_attr {
+	OVS_CT_ATTR_UNSPEC,
+	OVS_CT_ATTR_FLAGS,      /* u8 bitmask of OVS_CT_F_*. */
+	OVS_CT_ATTR_ZONE,       /* u16 zone id. */
+	OVS_CT_ATTR_MARK,       /* mark to associate with this connection. */
+	OVS_CT_ATTR_LABEL,      /* label to associate with this connection. */
+	OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
+				   related connections. */
+	__OVS_CT_ATTR_MAX
+};
+
+#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
+
+/*
+ * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
+ * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
+ * future packets for the same connection to be identified as 'established'
+ * or 'related'.
+ */
+#define OVS_CT_F_COMMIT		0x01
+
+/**
  * enum ovs_action_attr - Action types.
  *
  * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
@@ -623,6 +678,8 @@
  * indicate the new packet contents. This could potentially still be
  * %ETH_P_MPLS if the resulting MPLS label stack is not empty.  If there
  * is no MPLS label stack, as determined by ethertype, no action is taken.
+ * @OVS_ACTION_ATTR_CT: Track the connection. Populate the conntrack-related
+ * entries in the flow key.
  *
  * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
  * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -648,6 +705,7 @@
 				       * data immediately followed by a mask.
 				       * The data must be zero for the unmasked
 				       * bits. */
+	OVS_ACTION_ATTR_CT,           /* One nested OVS_CT_ATTR_* . */
 
 	__OVS_ACTION_ATTR_MAX,	      /* Nothing past this will be accepted
 				       * from userspace. */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 31891d9..a8d0759 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -190,4 +190,11 @@
 # define PR_FP_MODE_FR		(1 << 0)	/* 64b FP registers */
 # define PR_FP_MODE_FRE		(1 << 1)	/* 32b compatibility */
 
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT			47
+# define PR_CAP_AMBIENT_IS_SET		1
+# define PR_CAP_AMBIENT_RAISE		2
+# define PR_CAP_AMBIENT_LOWER		3
+# define PR_CAP_AMBIENT_CLEAR_ALL	4
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd8f07..7020247 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -308,6 +308,8 @@
 	RTA_VIA,
 	RTA_NEWDST,
 	RTA_PREF,
+	RTA_ENCAP_TYPE,
+	RTA_ENCAP,
 	__RTA_MAX
 };
 
@@ -416,10 +418,13 @@
 
 #define RTAX_MAX (__RTAX_MAX - 1)
 
-#define RTAX_FEATURE_ECN	0x00000001
-#define RTAX_FEATURE_SACK	0x00000002
-#define RTAX_FEATURE_TIMESTAMP	0x00000004
-#define RTAX_FEATURE_ALLFRAG	0x00000008
+#define RTAX_FEATURE_ECN	(1 << 0)
+#define RTAX_FEATURE_SACK	(1 << 1)
+#define RTAX_FEATURE_TIMESTAMP	(1 << 2)
+#define RTAX_FEATURE_ALLFRAG	(1 << 3)
+
+#define RTAX_FEATURE_MASK	(RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
+				 RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
 
 struct rta_session {
 	__u8	proto;
diff --git a/include/uapi/linux/securebits.h b/include/uapi/linux/securebits.h
index 985aac9..35ac35c 100644
--- a/include/uapi/linux/securebits.h
+++ b/include/uapi/linux/securebits.h
@@ -43,9 +43,18 @@
 #define SECBIT_KEEP_CAPS	(issecure_mask(SECURE_KEEP_CAPS))
 #define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
 
+/* When set, a process cannot add new capabilities to its ambient set. */
+#define SECURE_NO_CAP_AMBIENT_RAISE		6
+#define SECURE_NO_CAP_AMBIENT_RAISE_LOCKED	7  /* make bit-6 immutable */
+
+#define SECBIT_NO_CAP_AMBIENT_RAISE (issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
+#define SECBIT_NO_CAP_AMBIENT_RAISE_LOCKED \
+			(issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE_LOCKED))
+
 #define SECURE_ALL_BITS		(issecure_mask(SECURE_NOROOT) | \
 				 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
-				 issecure_mask(SECURE_KEEP_CAPS))
+				 issecure_mask(SECURE_KEEP_CAPS) | \
+				 issecure_mask(SECURE_NO_CAP_AMBIENT_RAISE))
 #define SECURE_ALL_LOCKS	(SECURE_ALL_BITS << 1)
 
 #endif /* _UAPI_LINUX_SECUREBITS_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index eee8968..25a9ad8 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,8 @@
 	LINUX_MIB_TCPACKSKIPPEDCHALLENGE,	/* TCPACKSkippedChallenge */
 	LINUX_MIB_TCPWINPROBE,			/* TCPWinProbe */
 	LINUX_MIB_TCPKEEPALIVE,			/* TCPKeepAlive */
+	LINUX_MIB_TCPMTUPFAIL,			/* TCPMTUPFail */
+	LINUX_MIB_TCPMTUPSUCCESS,		/* TCPMTUPSuccess */
 	__LINUX_MIB_MAX
 };
 
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
new file mode 100644
index 0000000..df0e09b
--- /dev/null
+++ b/include/uapi/linux/userfaultfd.h
@@ -0,0 +1,169 @@
+/*
+ *  include/linux/userfaultfd.h
+ *
+ *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ */
+
+#ifndef _LINUX_USERFAULTFD_H
+#define _LINUX_USERFAULTFD_H
+
+#include <linux/types.h>
+
+#include <linux/compiler.h>
+
+#define UFFD_API ((__u64)0xAA)
+/*
+ * After implementing the respective features it will become:
+ * #define UFFD_API_FEATURES (UFFD_FEATURE_PAGEFAULT_FLAG_WP | \
+ *			      UFFD_FEATURE_EVENT_FORK)
+ */
+#define UFFD_API_FEATURES (0)
+#define UFFD_API_IOCTLS				\
+	((__u64)1 << _UFFDIO_REGISTER |		\
+	 (__u64)1 << _UFFDIO_UNREGISTER |	\
+	 (__u64)1 << _UFFDIO_API)
+#define UFFD_API_RANGE_IOCTLS			\
+	((__u64)1 << _UFFDIO_WAKE |		\
+	 (__u64)1 << _UFFDIO_COPY |		\
+	 (__u64)1 << _UFFDIO_ZEROPAGE)
+
+/*
+ * Valid ioctl command number range with this API is from 0x00 to
+ * 0x3F.  UFFDIO_API is the fixed number, everything else can be
+ * changed by implementing a different UFFD_API. If sticking to the
+ * same UFFD_API more ioctl can be added and userland will be aware of
+ * which ioctl the running kernel implements through the ioctl command
+ * bitmask written by the UFFDIO_API.
+ */
+#define _UFFDIO_REGISTER		(0x00)
+#define _UFFDIO_UNREGISTER		(0x01)
+#define _UFFDIO_WAKE			(0x02)
+#define _UFFDIO_COPY			(0x03)
+#define _UFFDIO_ZEROPAGE		(0x04)
+#define _UFFDIO_API			(0x3F)
+
+/* userfaultfd ioctl ids */
+#define UFFDIO 0xAA
+#define UFFDIO_API		_IOWR(UFFDIO, _UFFDIO_API,	\
+				      struct uffdio_api)
+#define UFFDIO_REGISTER		_IOWR(UFFDIO, _UFFDIO_REGISTER, \
+				      struct uffdio_register)
+#define UFFDIO_UNREGISTER	_IOR(UFFDIO, _UFFDIO_UNREGISTER,	\
+				     struct uffdio_range)
+#define UFFDIO_WAKE		_IOR(UFFDIO, _UFFDIO_WAKE,	\
+				     struct uffdio_range)
+#define UFFDIO_COPY		_IOWR(UFFDIO, _UFFDIO_COPY,	\
+				      struct uffdio_copy)
+#define UFFDIO_ZEROPAGE		_IOWR(UFFDIO, _UFFDIO_ZEROPAGE,	\
+				      struct uffdio_zeropage)
+
+/* read() structure */
+struct uffd_msg {
+	__u8	event;
+
+	__u8	reserved1;
+	__u16	reserved2;
+	__u32	reserved3;
+
+	union {
+		struct {
+			__u64	flags;
+			__u64	address;
+		} pagefault;
+
+		struct {
+			/* unused reserved fields */
+			__u64	reserved1;
+			__u64	reserved2;
+			__u64	reserved3;
+		} reserved;
+	} arg;
+} __packed;
+
+/*
+ * Start at 0x12 and not at 0 to be more strict against bugs.
+ */
+#define UFFD_EVENT_PAGEFAULT	0x12
+#if 0 /* not available yet */
+#define UFFD_EVENT_FORK		0x13
+#endif
+
+/* flags for UFFD_EVENT_PAGEFAULT */
+#define UFFD_PAGEFAULT_FLAG_WRITE	(1<<0)	/* If this was a write fault */
+#define UFFD_PAGEFAULT_FLAG_WP		(1<<1)	/* If reason is VM_UFFD_WP */
+
+struct uffdio_api {
+	/* userland asks for an API number and the features to enable */
+	__u64 api;
+	/*
+	 * Kernel answers below with the all available features for
+	 * the API, this notifies userland of which events and/or
+	 * which flags for each event are enabled in the current
+	 * kernel.
+	 *
+	 * Note: UFFD_EVENT_PAGEFAULT and UFFD_PAGEFAULT_FLAG_WRITE
+	 * are to be considered implicitly always enabled in all kernels as
+	 * long as the uffdio_api.api requested matches UFFD_API.
+	 */
+#if 0 /* not available yet */
+#define UFFD_FEATURE_PAGEFAULT_FLAG_WP		(1<<0)
+#define UFFD_FEATURE_EVENT_FORK			(1<<1)
+#endif
+	__u64 features;
+
+	__u64 ioctls;
+};
+
+struct uffdio_range {
+	__u64 start;
+	__u64 len;
+};
+
+struct uffdio_register {
+	struct uffdio_range range;
+#define UFFDIO_REGISTER_MODE_MISSING	((__u64)1<<0)
+#define UFFDIO_REGISTER_MODE_WP		((__u64)1<<1)
+	__u64 mode;
+
+	/*
+	 * kernel answers which ioctl commands are available for the
+	 * range, keep at the end as the last 8 bytes aren't read.
+	 */
+	__u64 ioctls;
+};
+
+struct uffdio_copy {
+	__u64 dst;
+	__u64 src;
+	__u64 len;
+	/*
+	 * There will be a wrprotection flag later that allows to map
+	 * pages wrprotected on the fly. And such a flag will be
+	 * available if the wrprotection ioctl are implemented for the
+	 * range according to the uffdio_register.ioctls.
+	 */
+#define UFFDIO_COPY_MODE_DONTWAKE		((__u64)1<<0)
+	__u64 mode;
+
+	/*
+	 * "copy" is written by the ioctl and must be at the end: the
+	 * copy_from_user will not read the last 8 bytes.
+	 */
+	__s64 copy;
+};
+
+struct uffdio_zeropage {
+	struct uffdio_range range;
+#define UFFDIO_ZEROPAGE_MODE_DONTWAKE		((__u64)1<<0)
+	__u64 mode;
+
+	/*
+	 * "zeropage" is written by the ioctl and must be at the end:
+	 * the copy_from_user will not read the last 8 bytes.
+	 */
+	__s64 zeropage;
+};
+
+#endif /* _LINUX_USERFAULTFD_H */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 99a8ca1..1e889aa 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -29,8 +29,10 @@
 
 #define CXL_START_WORK_AMR		0x0000000000000001ULL
 #define CXL_START_WORK_NUM_IRQS		0x0000000000000002ULL
+#define CXL_START_WORK_ERR_FF		0x0000000000000004ULL
 #define CXL_START_WORK_ALL		(CXL_START_WORK_AMR |\
-					 CXL_START_WORK_NUM_IRQS)
+					 CXL_START_WORK_NUM_IRQS |\
+					 CXL_START_WORK_ERR_FF)
 
 
 /* Possible modes that an afu can be in */
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index 0530e5a..d8fc96e 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -296,6 +296,7 @@
 
 /* Video buffer addresses */
 #define VIDW_BUF_START(_buff)			(0xA0 + ((_buff) * 8))
+#define VIDW_BUF_START_S(_buff)			(0x40A0 + ((_buff) * 8))
 #define VIDW_BUF_START1(_buff)			(0xA4 + ((_buff) * 8))
 #define VIDW_BUF_END(_buff)			(0xD0 + ((_buff) * 8))
 #define VIDW_BUF_END1(_buff)			(0xD4 + ((_buff) * 8))
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 70054cc..252ffd4 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -156,7 +156,9 @@
 /* Types of xen_netif_extra_info descriptors. */
 #define XEN_NETIF_EXTRA_TYPE_NONE	(0)  /* Never used - invalid */
 #define XEN_NETIF_EXTRA_TYPE_GSO	(1)  /* u.gso */
-#define XEN_NETIF_EXTRA_TYPE_MAX	(2)
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD	(2)  /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL	(3)  /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MAX	(4)
 
 /* xen_netif_extra_info flags. */
 #define _XEN_NETIF_EXTRA_FLAG_MORE	(0)
@@ -201,6 +203,10 @@
 			uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
 		} gso;
 
+		struct {
+			uint8_t addr[6]; /* Address to add/remove. */
+		} mcast;
+
 		uint16_t pad[3];
 	} u;
 };
diff --git a/init/Kconfig b/init/Kconfig
index bb9b4dd..2c0e50e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -883,6 +883,16 @@
 	bool
 
 #
+# For architectures that prefer to flush all TLBs after a number of pages
+# are unmapped instead of sending one IPI per page to flush. The architecture
+# must provide guarantees on what happens if a clean TLB cache entry is
+# written after the unmap. Details are in mm/rmap.c near the check for
+# should_defer_flush. The architecture should also consider if the full flush
+# and the refill costs are offset by the savings of sending fewer IPIs.
+config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+	bool
+
+#
 # For architectures that know their GCC __int128 support is sound
 #
 config ARCH_SUPPORTS_INT128
@@ -1576,6 +1586,14 @@
 	  applications use these syscalls, you can disable this option to save
 	  space.
 
+config USERFAULTFD
+	bool "Enable userfaultfd() system call"
+	select ANON_INODES
+	depends on MMU
+	help
+	  Enable the userfaultfd() system call that allows to intercept and
+	  handle page faults in userland.
+
 config PCI_QUIRKS
 	default y
 	bool "Enable PCI quirk workarounds" if EXPERT
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index cb31229..29ace10 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -150,15 +150,15 @@
 }
 late_initcall(register_array_map);
 
-static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
 {
-	/* only bpf_prog file descriptors can be stored in prog_array map */
+	/* only file descriptors can be stored in this type of map */
 	if (attr->value_size != sizeof(u32))
 		return ERR_PTR(-EINVAL);
 	return array_map_alloc(attr);
 }
 
-static void prog_array_map_free(struct bpf_map *map)
+static void fd_array_map_free(struct bpf_map *map)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	int i;
@@ -167,21 +167,21 @@
 
 	/* make sure it's empty */
 	for (i = 0; i < array->map.max_entries; i++)
-		BUG_ON(array->prog[i] != NULL);
+		BUG_ON(array->ptrs[i] != NULL);
 	kvfree(array);
 }
 
-static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 {
 	return NULL;
 }
 
 /* only called from syscall */
-static int prog_array_map_update_elem(struct bpf_map *map, void *key,
-				      void *value, u64 map_flags)
+static int fd_array_map_update_elem(struct bpf_map *map, void *key,
+				    void *value, u64 map_flags)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	struct bpf_prog *prog, *old_prog;
+	void *new_ptr, *old_ptr;
 	u32 index = *(u32 *)key, ufd;
 
 	if (map_flags != BPF_ANY)
@@ -191,57 +191,75 @@
 		return -E2BIG;
 
 	ufd = *(u32 *)value;
-	prog = bpf_prog_get(ufd);
-	if (IS_ERR(prog))
-		return PTR_ERR(prog);
+	new_ptr = map->ops->map_fd_get_ptr(map, ufd);
+	if (IS_ERR(new_ptr))
+		return PTR_ERR(new_ptr);
 
-	if (!bpf_prog_array_compatible(array, prog)) {
-		bpf_prog_put(prog);
-		return -EINVAL;
-	}
-
-	old_prog = xchg(array->prog + index, prog);
-	if (old_prog)
-		bpf_prog_put_rcu(old_prog);
+	old_ptr = xchg(array->ptrs + index, new_ptr);
+	if (old_ptr)
+		map->ops->map_fd_put_ptr(old_ptr);
 
 	return 0;
 }
 
-static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	struct bpf_prog *old_prog;
+	void *old_ptr;
 	u32 index = *(u32 *)key;
 
 	if (index >= array->map.max_entries)
 		return -E2BIG;
 
-	old_prog = xchg(array->prog + index, NULL);
-	if (old_prog) {
-		bpf_prog_put_rcu(old_prog);
+	old_ptr = xchg(array->ptrs + index, NULL);
+	if (old_ptr) {
+		map->ops->map_fd_put_ptr(old_ptr);
 		return 0;
 	} else {
 		return -ENOENT;
 	}
 }
 
+static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct bpf_prog *prog = bpf_prog_get(fd);
+	if (IS_ERR(prog))
+		return prog;
+
+	if (!bpf_prog_array_compatible(array, prog)) {
+		bpf_prog_put(prog);
+		return ERR_PTR(-EINVAL);
+	}
+	return prog;
+}
+
+static void prog_fd_array_put_ptr(void *ptr)
+{
+	struct bpf_prog *prog = ptr;
+
+	bpf_prog_put_rcu(prog);
+}
+
 /* decrement refcnt of all bpf_progs that are stored in this map */
-void bpf_prog_array_map_clear(struct bpf_map *map)
+void bpf_fd_array_map_clear(struct bpf_map *map)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	int i;
 
 	for (i = 0; i < array->map.max_entries; i++)
-		prog_array_map_delete_elem(map, &i);
+		fd_array_map_delete_elem(map, &i);
 }
 
 static const struct bpf_map_ops prog_array_ops = {
-	.map_alloc = prog_array_map_alloc,
-	.map_free = prog_array_map_free,
+	.map_alloc = fd_array_map_alloc,
+	.map_free = fd_array_map_free,
 	.map_get_next_key = array_map_get_next_key,
-	.map_lookup_elem = prog_array_map_lookup_elem,
-	.map_update_elem = prog_array_map_update_elem,
-	.map_delete_elem = prog_array_map_delete_elem,
+	.map_lookup_elem = fd_array_map_lookup_elem,
+	.map_update_elem = fd_array_map_update_elem,
+	.map_delete_elem = fd_array_map_delete_elem,
+	.map_fd_get_ptr = prog_fd_array_get_ptr,
+	.map_fd_put_ptr = prog_fd_array_put_ptr,
 };
 
 static struct bpf_map_type_list prog_array_type __read_mostly = {
@@ -255,3 +273,60 @@
 	return 0;
 }
 late_initcall(register_prog_array_map);
+
+static void perf_event_array_map_free(struct bpf_map *map)
+{
+	bpf_fd_array_map_clear(map);
+	fd_array_map_free(map);
+}
+
+static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+	struct perf_event *event;
+	const struct perf_event_attr *attr;
+
+	event = perf_event_get(fd);
+	if (IS_ERR(event))
+		return event;
+
+	attr = perf_event_attrs(event);
+	if (IS_ERR(attr))
+		return (void *)attr;
+
+	if (attr->type != PERF_TYPE_RAW &&
+	    attr->type != PERF_TYPE_HARDWARE) {
+		perf_event_release_kernel(event);
+		return ERR_PTR(-EINVAL);
+	}
+	return event;
+}
+
+static void perf_event_fd_array_put_ptr(void *ptr)
+{
+	struct perf_event *event = ptr;
+
+	perf_event_release_kernel(event);
+}
+
+static const struct bpf_map_ops perf_event_array_ops = {
+	.map_alloc = fd_array_map_alloc,
+	.map_free = perf_event_array_map_free,
+	.map_get_next_key = array_map_get_next_key,
+	.map_lookup_elem = fd_array_map_lookup_elem,
+	.map_update_elem = fd_array_map_update_elem,
+	.map_delete_elem = fd_array_map_delete_elem,
+	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
+	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
+};
+
+static struct bpf_map_type_list perf_event_array_type __read_mostly = {
+	.ops = &perf_event_array_ops,
+	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+};
+
+static int __init register_perf_event_array_map(void)
+{
+	bpf_register_map_type(&perf_event_array_type);
+	return 0;
+}
+late_initcall(register_perf_event_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c5bedc8..67c380c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -177,6 +177,7 @@
 {
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__bpf_call_base);
 
 /**
  *	__bpf_prog_run - run eBPF program on a given context
@@ -449,11 +450,15 @@
 
 		tail_call_cnt++;
 
-		prog = READ_ONCE(array->prog[index]);
+		prog = READ_ONCE(array->ptrs[index]);
 		if (unlikely(!prog))
 			goto out;
 
-		ARG1 = BPF_R1;
+		/* ARG1 at this point is guaranteed to point to CTX from
+		 * the verifier side due to the fact that the tail call is
+		 * handeled like a helper, that is, bpf_tail_call_proto,
+		 * where arg1_type is ARG_PTR_TO_CTX.
+		 */
 		insn = prog->insnsi;
 		goto select_insn;
 out:
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a1b14d1..dc9b464 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -72,7 +72,7 @@
 		/* prog_array stores refcnt-ed bpf_prog pointers
 		 * release them all when user space closes prog_array_fd
 		 */
-		bpf_prog_array_map_clear(map);
+		bpf_fd_array_map_clear(map);
 
 	bpf_map_put(map);
 	return 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 039d866..ed12e38 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -238,6 +238,14 @@
 	[CONST_IMM]		= "imm",
 };
 
+static const struct {
+	int map_type;
+	int func_id;
+} func_limit[] = {
+	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+};
+
 static void print_verifier_state(struct verifier_env *env)
 {
 	enum bpf_reg_type t;
@@ -648,6 +656,9 @@
 	struct verifier_state *state = &env->cur_state;
 	int size, err = 0;
 
+	if (state->regs[regno].type == PTR_TO_STACK)
+		off += state->regs[regno].imm;
+
 	size = bpf_size_to_bytes(bpf_size);
 	if (size < 0)
 		return size;
@@ -667,7 +678,8 @@
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
 
-	} else if (state->regs[regno].type == FRAME_PTR) {
+	} else if (state->regs[regno].type == FRAME_PTR ||
+		   state->regs[regno].type == PTR_TO_STACK) {
 		if (off >= 0 || off < -MAX_BPF_STACK) {
 			verbose("invalid stack off=%d size=%d\n", off, size);
 			return -EACCES;
@@ -833,6 +845,28 @@
 	return err;
 }
 
+static int check_map_func_compatibility(struct bpf_map *map, int func_id)
+{
+	bool bool_map, bool_func;
+	int i;
+
+	if (!map)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+		bool_map = (map->map_type == func_limit[i].map_type);
+		bool_func = (func_id == func_limit[i].func_id);
+		/* only when map & func pair match it can continue.
+		 * don't allow any other map type to be passed into
+		 * the special func;
+		 */
+		if (bool_map != bool_func)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int check_call(struct verifier_env *env, int func_id)
 {
 	struct verifier_state *state = &env->cur_state;
@@ -908,21 +942,9 @@
 		return -EINVAL;
 	}
 
-	if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
-	    func_id != BPF_FUNC_tail_call)
-		/* prog_array map type needs extra care:
-		 * only allow to pass it into bpf_tail_call() for now.
-		 * bpf_map_delete_elem() can be allowed in the future,
-		 * while bpf_map_update_elem() must only be done via syscall
-		 */
-		return -EINVAL;
-
-	if (func_id == BPF_FUNC_tail_call &&
-	    map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
-		/* don't allow any other map type to be passed into
-		 * bpf_tail_call()
-		 */
-		return -EINVAL;
+	err = check_map_func_compatibility(map, func_id);
+	if (err)
+		return err;
 
 	return 0;
 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f3f5cd5..a8538e4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1342,7 +1342,7 @@
 	if (root != &cgrp_dfl_root)
 		for_each_subsys(ss, ssid)
 			if (root->subsys_mask & (1 << ssid))
-				seq_printf(seq, ",%s", ss->legacy_name);
+				seq_show_option(seq, ss->name, NULL);
 	if (root->flags & CGRP_ROOT_NOPREFIX)
 		seq_puts(seq, ",noprefix");
 	if (root->flags & CGRP_ROOT_XATTR)
@@ -1350,13 +1350,14 @@
 
 	spin_lock(&release_agent_path_lock);
 	if (strlen(root->release_agent_path))
-		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+		seq_show_option(seq, "release_agent",
+				root->release_agent_path);
 	spin_unlock(&release_agent_path_lock);
 
 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
 		seq_puts(seq, ",clone_children");
 	if (strlen(root->name))
-		seq_printf(seq, ",name=%s", root->name);
+		seq_show_option(seq, "name", root->name);
 	return 0;
 }
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ae16867..e818389 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3222,6 +3222,59 @@
 	return __perf_event_count(event);
 }
 
+/*
+ * NMI-safe method to read a local event, that is an event that
+ * is:
+ *   - either for the current task, or for this CPU
+ *   - does not have inherit set, for inherited task events
+ *     will not be local and we cannot read them atomically
+ *   - must not have a pmu::count method
+ */
+u64 perf_event_read_local(struct perf_event *event)
+{
+	unsigned long flags;
+	u64 val;
+
+	/*
+	 * Disabling interrupts avoids all counter scheduling (context
+	 * switches, timer based rotation and IPIs).
+	 */
+	local_irq_save(flags);
+
+	/* If this is a per-task event, it must be for current */
+	WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
+		     event->hw.target != current);
+
+	/* If this is a per-CPU event, it must be for this CPU */
+	WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
+		     event->cpu != smp_processor_id());
+
+	/*
+	 * It must not be an event with inherit set, we cannot read
+	 * all child counters from atomic context.
+	 */
+	WARN_ON_ONCE(event->attr.inherit);
+
+	/*
+	 * It must not have a pmu::count method, those are not
+	 * NMI safe.
+	 */
+	WARN_ON_ONCE(event->pmu->count);
+
+	/*
+	 * If the event is currently on this CPU, its either a per-task event,
+	 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
+	 * oncpu == -1).
+	 */
+	if (event->oncpu == smp_processor_id())
+		event->pmu->read(event);
+
+	val = local64_read(&event->count);
+	local_irq_restore(flags);
+
+	return val;
+}
+
 static u64 perf_event_read(struct perf_event *event)
 {
 	/*
@@ -8718,6 +8771,31 @@
 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
 }
 
+struct perf_event *perf_event_get(unsigned int fd)
+{
+	int err;
+	struct fd f;
+	struct perf_event *event;
+
+	err = perf_fget_light(fd, &f);
+	if (err)
+		return ERR_PTR(err);
+
+	event = f.file->private_data;
+	atomic_long_inc(&event->refcount);
+	fdput(f);
+
+	return event;
+}
+
+const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+	if (!event)
+		return ERR_PTR(-EINVAL);
+
+	return &event->attr;
+}
+
 /*
  * inherit a event from parent task to child task:
  */
diff --git a/kernel/fork.c b/kernel/fork.c
index 03aa2e6..7d5f0f1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -454,8 +454,9 @@
 		tmp->vm_mm = mm;
 		if (anon_vma_fork(tmp, mpnt))
 			goto fail_nomem_anon_vma_fork;
-		tmp->vm_flags &= ~VM_LOCKED;
+		tmp->vm_flags &= ~(VM_LOCKED|VM_UFFD_MISSING|VM_UFFD_WP);
 		tmp->vm_next = tmp->vm_prev = NULL;
+		tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 		file = tmp->vm_file;
 		if (file) {
 			struct inode *inode = file_inode(file);
diff --git a/kernel/futex.c b/kernel/futex.c
index c4a182f..6e443ef 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -64,6 +64,7 @@
 #include <linux/hugetlb.h>
 #include <linux/freezer.h>
 #include <linux/bootmem.h>
+#include <linux/fault-inject.h>
 
 #include <asm/futex.h>
 
@@ -258,6 +259,66 @@
 
 static struct futex_hash_bucket *futex_queues;
 
+/*
+ * Fault injections for futexes.
+ */
+#ifdef CONFIG_FAIL_FUTEX
+
+static struct {
+	struct fault_attr attr;
+
+	u32 ignore_private;
+} fail_futex = {
+	.attr = FAULT_ATTR_INITIALIZER,
+	.ignore_private = 0,
+};
+
+static int __init setup_fail_futex(char *str)
+{
+	return setup_fault_attr(&fail_futex.attr, str);
+}
+__setup("fail_futex=", setup_fail_futex);
+
+static bool should_fail_futex(bool fshared)
+{
+	if (fail_futex.ignore_private && !fshared)
+		return false;
+
+	return should_fail(&fail_futex.attr, 1);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_futex_debugfs(void)
+{
+	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+	struct dentry *dir;
+
+	dir = fault_create_debugfs_attr("fail_futex", NULL,
+					&fail_futex.attr);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	if (!debugfs_create_bool("ignore-private", mode, dir,
+				 &fail_futex.ignore_private)) {
+		debugfs_remove_recursive(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+late_initcall(fail_futex_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#else
+static inline bool should_fail_futex(bool fshared)
+{
+	return false;
+}
+#endif /* CONFIG_FAIL_FUTEX */
+
 static inline void futex_get_mm(union futex_key *key)
 {
 	atomic_inc(&key->private.mm->mm_count);
@@ -413,6 +474,9 @@
 	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(fshared)))
+		return -EFAULT;
+
 	/*
 	 * PROCESS_PRIVATE futexes are fast.
 	 * As the mm cannot disappear under us and the 'key' only needs
@@ -428,6 +492,10 @@
 	}
 
 again:
+	/* Ignore any VERIFY_READ mapping (futex common case) */
+	if (unlikely(should_fail_futex(fshared)))
+		return -EFAULT;
+
 	err = get_user_pages_fast(address, 1, 1, &page);
 	/*
 	 * If write access is not required (eg. FUTEX_WAIT), try
@@ -516,7 +584,7 @@
 		 * A RO anonymous page will never change and thus doesn't make
 		 * sense for futex operations.
 		 */
-		if (ro) {
+		if (unlikely(should_fail_futex(fshared)) || ro) {
 			err = -EFAULT;
 			goto out;
 		}
@@ -974,6 +1042,9 @@
 {
 	u32 uninitialized_var(curval);
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
 		return -EFAULT;
 
@@ -1015,12 +1086,18 @@
 	if (get_futex_value_locked(&uval, uaddr))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	/*
 	 * Detect deadlocks.
 	 */
 	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
 		return -EDEADLK;
 
+	if ((unlikely(should_fail_futex(true))))
+		return -EDEADLK;
+
 	/*
 	 * Lookup existing state first. If it exists, try to attach to
 	 * its pi_state.
@@ -1155,6 +1232,9 @@
 	 */
 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
+	if (unlikely(should_fail_futex(true)))
+		ret = -EFAULT;
+
 	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
 		ret = -EFAULT;
 	else if (curval != uval)
@@ -1457,6 +1537,9 @@
 	if (get_futex_value_locked(&curval, pifutex))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	/*
 	 * Find the top_waiter and determine if there are additional waiters.
 	 * If the caller intends to requeue more than 1 waiter to pifutex,
@@ -2268,8 +2351,11 @@
 /*
  * Userspace tried a 0 -> TID atomic transition of the futex value
  * and failed. The kernel side here does the whole locking operation:
- * if there are waiters then it will block, it does PI, etc. (Due to
- * races the kernel might see a 0 value of the futex too.)
+ * if there are waiters then it will block as a consequence of relying
+ * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
+ * a 0 value of the futex too.).
+ *
+ * Also serves as futex trylock_pi()'ing, and due semantics.
  */
 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
 			 ktime_t *time, int trylock)
@@ -2300,6 +2386,10 @@
 
 	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
 	if (unlikely(ret)) {
+		/*
+		 * Atomic work succeeded and we got the lock,
+		 * or failed. Either way, we do _not_ block.
+		 */
 		switch (ret) {
 		case 1:
 			/* We got the lock. */
@@ -2530,7 +2620,7 @@
  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
  * @uaddr:	the futex we initially wait on (non-pi)
  * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
- * 		the same type, no requeueing from private to shared, etc.
+ *		the same type, no requeueing from private to shared, etc.
  * @val:	the expected value of uaddr
  * @abs_time:	absolute timeout
  * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
@@ -3005,6 +3095,8 @@
 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
 		      cmd == FUTEX_WAIT_BITSET ||
 		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
+		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
+			return -EFAULT;
 		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
 			return -EFAULT;
 		if (!timespec_valid(&ts))
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 52ebaca..f7dd15d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -54,7 +54,7 @@
 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 }
 
-static void jump_label_update(struct static_key *key, int enable);
+static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
@@ -63,13 +63,8 @@
 		return;
 
 	jump_label_lock();
-	if (atomic_read(&key->enabled) == 0) {
-		if (!jump_label_get_branch_default(key))
-			jump_label_update(key, JUMP_LABEL_ENABLE);
-		else
-			jump_label_update(key, JUMP_LABEL_DISABLE);
-	}
-	atomic_inc(&key->enabled);
+	if (atomic_inc_return(&key->enabled) == 1)
+		jump_label_update(key);
 	jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -87,10 +82,7 @@
 		atomic_inc(&key->enabled);
 		schedule_delayed_work(work, rate_limit);
 	} else {
-		if (!jump_label_get_branch_default(key))
-			jump_label_update(key, JUMP_LABEL_DISABLE);
-		else
-			jump_label_update(key, JUMP_LABEL_ENABLE);
+		jump_label_update(key);
 	}
 	jump_label_unlock();
 }
@@ -149,7 +141,7 @@
 	return 0;
 }
 
-/* 
+/*
  * Update code which is definitely not currently executing.
  * Architectures which need heavyweight synchronization to modify
  * running code can override this to make the non-live update case
@@ -158,37 +150,54 @@
 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
 					    enum jump_label_type type)
 {
-	arch_jump_label_transform(entry, type);	
+	arch_jump_label_transform(entry, type);
+}
+
+static inline struct jump_entry *static_key_entries(struct static_key *key)
+{
+	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
+}
+
+static inline bool static_key_type(struct static_key *key)
+{
+	return (unsigned long)key->entries & JUMP_TYPE_MASK;
+}
+
+static inline struct static_key *jump_entry_key(struct jump_entry *entry)
+{
+	return (struct static_key *)((unsigned long)entry->key & ~1UL);
+}
+
+static bool jump_entry_branch(struct jump_entry *entry)
+{
+	return (unsigned long)entry->key & 1UL;
+}
+
+static enum jump_label_type jump_label_type(struct jump_entry *entry)
+{
+	struct static_key *key = jump_entry_key(entry);
+	bool enabled = static_key_enabled(key);
+	bool branch = jump_entry_branch(entry);
+
+	/* See the comment in linux/jump_label.h */
+	return enabled ^ branch;
 }
 
 static void __jump_label_update(struct static_key *key,
 				struct jump_entry *entry,
-				struct jump_entry *stop, int enable)
+				struct jump_entry *stop)
 {
-	for (; (entry < stop) &&
-	      (entry->key == (jump_label_t)(unsigned long)key);
-	      entry++) {
+	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
 		/*
 		 * entry->code set to 0 invalidates module init text sections
 		 * kernel_text_address() verifies we are not in core kernel
 		 * init code, see jump_label_invalidate_module_init().
 		 */
 		if (entry->code && kernel_text_address(entry->code))
-			arch_jump_label_transform(entry, enable);
+			arch_jump_label_transform(entry, jump_label_type(entry));
 	}
 }
 
-static enum jump_label_type jump_label_type(struct static_key *key)
-{
-	bool true_branch = jump_label_get_branch_default(key);
-	bool state = static_key_enabled(key);
-
-	if ((!true_branch && state) || (true_branch && !state))
-		return JUMP_LABEL_ENABLE;
-
-	return JUMP_LABEL_DISABLE;
-}
-
 void __init jump_label_init(void)
 {
 	struct jump_entry *iter_start = __start___jump_table;
@@ -202,8 +211,11 @@
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		struct static_key *iterk;
 
-		iterk = (struct static_key *)(unsigned long)iter->key;
-		arch_jump_label_transform_static(iter, jump_label_type(iterk));
+		/* rewrite NOPs */
+		if (jump_label_type(iter) == JUMP_LABEL_NOP)
+			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
+
+		iterk = jump_entry_key(iter);
 		if (iterk == key)
 			continue;
 
@@ -222,6 +234,16 @@
 
 #ifdef CONFIG_MODULES
 
+static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
+{
+	struct static_key *key = jump_entry_key(entry);
+	bool type = static_key_type(key);
+	bool branch = jump_entry_branch(entry);
+
+	/* See the comment in linux/jump_label.h */
+	return type ^ branch;
+}
+
 struct static_key_mod {
 	struct static_key_mod *next;
 	struct jump_entry *entries;
@@ -243,17 +265,15 @@
 				start, end);
 }
 
-static void __jump_label_mod_update(struct static_key *key, int enable)
+static void __jump_label_mod_update(struct static_key *key)
 {
-	struct static_key_mod *mod = key->next;
+	struct static_key_mod *mod;
 
-	while (mod) {
+	for (mod = key->next; mod; mod = mod->next) {
 		struct module *m = mod->mod;
 
 		__jump_label_update(key, mod->entries,
-				    m->jump_entries + m->num_jump_entries,
-				    enable);
-		mod = mod->next;
+				    m->jump_entries + m->num_jump_entries);
 	}
 }
 
@@ -276,7 +296,9 @@
 		return;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+		/* Only write NOPs for arch_branch_static(). */
+		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
+			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
 	}
 }
 
@@ -297,7 +319,7 @@
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		struct static_key *iterk;
 
-		iterk = (struct static_key *)(unsigned long)iter->key;
+		iterk = jump_entry_key(iter);
 		if (iterk == key)
 			continue;
 
@@ -318,8 +340,9 @@
 		jlm->next = key->next;
 		key->next = jlm;
 
-		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
-			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
+		/* Only update if we've changed from our initial state */
+		if (jump_label_type(iter) != jump_label_init_type(iter))
+			__jump_label_update(key, iter, iter_stop);
 	}
 
 	return 0;
@@ -334,10 +357,10 @@
 	struct static_key_mod *jlm, **prev;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		if (iter->key == (jump_label_t)(unsigned long)key)
+		if (jump_entry_key(iter) == key)
 			continue;
 
-		key = (struct static_key *)(unsigned long)iter->key;
+		key = jump_entry_key(iter);
 
 		if (within_module(iter->key, mod))
 			continue;
@@ -439,14 +462,14 @@
 	return ret;
 }
 
-static void jump_label_update(struct static_key *key, int enable)
+static void jump_label_update(struct static_key *key)
 {
 	struct jump_entry *stop = __stop___jump_table;
-	struct jump_entry *entry = jump_label_get_entries(key);
+	struct jump_entry *entry = static_key_entries(key);
 #ifdef CONFIG_MODULES
 	struct module *mod;
 
-	__jump_label_mod_update(key, enable);
+	__jump_label_mod_update(key);
 
 	preempt_disable();
 	mod = __module_address((unsigned long)key);
@@ -456,7 +479,44 @@
 #endif
 	/* if there are no users, entry can be NULL */
 	if (entry)
-		__jump_label_update(key, entry, stop, enable);
+		__jump_label_update(key, entry, stop);
 }
 
-#endif
+#ifdef CONFIG_STATIC_KEYS_SELFTEST
+static DEFINE_STATIC_KEY_TRUE(sk_true);
+static DEFINE_STATIC_KEY_FALSE(sk_false);
+
+static __init int jump_label_test(void)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		WARN_ON(static_key_enabled(&sk_true.key) != true);
+		WARN_ON(static_key_enabled(&sk_false.key) != false);
+
+		WARN_ON(!static_branch_likely(&sk_true));
+		WARN_ON(!static_branch_unlikely(&sk_true));
+		WARN_ON(static_branch_likely(&sk_false));
+		WARN_ON(static_branch_unlikely(&sk_false));
+
+		static_branch_disable(&sk_true);
+		static_branch_enable(&sk_false);
+
+		WARN_ON(static_key_enabled(&sk_true.key) == true);
+		WARN_ON(static_key_enabled(&sk_false.key) == false);
+
+		WARN_ON(static_branch_likely(&sk_true));
+		WARN_ON(static_branch_unlikely(&sk_true));
+		WARN_ON(!static_branch_likely(&sk_false));
+		WARN_ON(!static_branch_unlikely(&sk_false));
+
+		static_branch_enable(&sk_true);
+		static_branch_disable(&sk_false);
+	}
+
+	return 0;
+}
+late_initcall(jump_label_test);
+#endif /* STATIC_KEYS_SELFTEST */
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 490924c..9ff173d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -248,15 +248,16 @@
  * kthread_create_on_node - create a kthread.
  * @threadfn: the function to run until signal_pending(current).
  * @data: data ptr for @threadfn.
- * @node: memory node number.
+ * @node: task and thread structures for the thread are allocated on this node
  * @namefmt: printf-style name for the thread.
  *
  * Description: This helper function creates and names a kernel
  * thread.  The thread will be stopped: use wake_up_process() to start
- * it.  See also kthread_run().
+ * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
+ * is affine to all CPUs.
  *
  * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give -1.
+ * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
  * When woken, the thread will run @threadfn() with @data as its
  * argument. @threadfn() can either call do_exit() directly if it is a
  * standalone thread for which no one will call kthread_stop(), or
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 7dd5c99..3694204 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -20,7 +20,6 @@
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
-obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 6c5da483..f17a3e3 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -55,27 +55,29 @@
 {
 	while ((cnts & _QW_WMASK) == _QW_LOCKED) {
 		cpu_relax_lowlatency();
-		cnts = smp_load_acquire((u32 *)&lock->cnts);
+		cnts = atomic_read_acquire(&lock->cnts);
 	}
 }
 
 /**
- * queue_read_lock_slowpath - acquire read lock of a queue rwlock
+ * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
+ * @cnts: Current qrwlock lock value
  */
-void queue_read_lock_slowpath(struct qrwlock *lock)
+void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
 {
-	u32 cnts;
-
 	/*
 	 * Readers come here when they cannot get the lock without waiting
 	 */
 	if (unlikely(in_interrupt())) {
 		/*
-		 * Readers in interrupt context will spin until the lock is
-		 * available without waiting in the queue.
+		 * Readers in interrupt context will get the lock immediately
+		 * if the writer is just waiting (not holding the lock yet).
+		 * The rspin_until_writer_unlock() function returns immediately
+		 * in this case. Otherwise, they will spin (with ACQUIRE
+		 * semantics) until the lock is available without waiting in
+		 * the queue.
 		 */
-		cnts = smp_load_acquire((u32 *)&lock->cnts);
 		rspin_until_writer_unlock(lock, cnts);
 		return;
 	}
@@ -87,16 +89,11 @@
 	arch_spin_lock(&lock->lock);
 
 	/*
-	 * At the head of the wait queue now, wait until the writer state
-	 * goes to 0 and then try to increment the reader count and get
-	 * the lock. It is possible that an incoming writer may steal the
-	 * lock in the interim, so it is necessary to check the writer byte
-	 * to make sure that the write lock isn't taken.
+	 * The ACQUIRE semantics of the following spinning code ensure
+	 * that accesses can't leak upwards out of our subsequent critical
+	 * section in the case that the lock is currently held for write.
 	 */
-	while (atomic_read(&lock->cnts) & _QW_WMASK)
-		cpu_relax_lowlatency();
-
-	cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
 	rspin_until_writer_unlock(lock, cnts);
 
 	/*
@@ -104,13 +101,13 @@
 	 */
 	arch_spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(queue_read_lock_slowpath);
+EXPORT_SYMBOL(queued_read_lock_slowpath);
 
 /**
- * queue_write_lock_slowpath - acquire write lock of a queue rwlock
+ * queued_write_lock_slowpath - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-void queue_write_lock_slowpath(struct qrwlock *lock)
+void queued_write_lock_slowpath(struct qrwlock *lock)
 {
 	u32 cnts;
 
@@ -119,7 +116,7 @@
 
 	/* Try to acquire the lock directly if no reader is present */
 	if (!atomic_read(&lock->cnts) &&
-	    (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0))
+	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
 		goto unlock;
 
 	/*
@@ -130,7 +127,7 @@
 		struct __qrwlock *l = (struct __qrwlock *)lock;
 
 		if (!READ_ONCE(l->wmode) &&
-		   (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0))
+		   (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
 			break;
 
 		cpu_relax_lowlatency();
@@ -140,8 +137,8 @@
 	for (;;) {
 		cnts = atomic_read(&lock->cnts);
 		if ((cnts == _QW_WAITING) &&
-		    (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
-				    _QW_LOCKED) == _QW_WAITING))
+		    (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
+					    _QW_LOCKED) == _QW_WAITING))
 			break;
 
 		cpu_relax_lowlatency();
@@ -149,4 +146,4 @@
 unlock:
 	arch_spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(queue_write_lock_slowpath);
+EXPORT_SYMBOL(queued_write_lock_slowpath);
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 38c4920..337c881 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -239,8 +239,8 @@
 
 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_kick_node(struct qspinlock *lock,
+					   struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_head(struct qspinlock *lock,
 					   struct mcs_spinlock *node) { }
 
@@ -440,7 +440,7 @@
 		cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->locked);
-	pv_kick_node(next);
+	pv_kick_node(lock, next);
 
 release:
 	/*
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index df19ae4..c8e6e9a 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -22,9 +22,14 @@
 
 #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
 
+/*
+ * Queue node uses: vcpu_running & vcpu_halted.
+ * Queue head uses: vcpu_running & vcpu_hashed.
+ */
 enum vcpu_state {
 	vcpu_running = 0,
-	vcpu_halted,
+	vcpu_halted,		/* Used only in pv_wait_node */
+	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
 };
 
 struct pv_node {
@@ -153,7 +158,8 @@
 
 /*
  * Wait for node->locked to become true, halt the vcpu after a short spin.
- * pv_kick_node() is used to wake the vcpu again.
+ * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
+ * behalf.
  */
 static void pv_wait_node(struct mcs_spinlock *node)
 {
@@ -172,9 +178,9 @@
 		 *
 		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
 		 *     MB			      MB
-		 * [L] pn->locked		[RmW] pn->state = vcpu_running
+		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
 		 *
-		 * Matches the xchg() from pv_kick_node().
+		 * Matches the cmpxchg() from pv_kick_node().
 		 */
 		smp_store_mb(pn->state, vcpu_halted);
 
@@ -182,9 +188,10 @@
 			pv_wait(&pn->state, vcpu_halted);
 
 		/*
-		 * Reset the vCPU state to avoid unncessary CPU kicking
+		 * If pv_kick_node() changed us to vcpu_hashed, retain that value
+		 * so that pv_wait_head() knows to not also try to hash this lock.
 		 */
-		WRITE_ONCE(pn->state, vcpu_running);
+		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
 
 		/*
 		 * If the locked flag is still not set after wakeup, it is a
@@ -194,6 +201,7 @@
 		 * MCS lock will be released soon.
 		 */
 	}
+
 	/*
 	 * By now our node->locked should be 1 and our caller will not actually
 	 * spin-wait for it. We do however rely on our caller to do a
@@ -202,24 +210,35 @@
 }
 
 /*
- * Called after setting next->locked = 1, used to wake those stuck in
- * pv_wait_node().
+ * Called after setting next->locked = 1 when we're the lock owner.
+ *
+ * Instead of waking the waiters stuck in pv_wait_node() advance their state such
+ * that they're waiting in pv_wait_head(), this avoids a wake/sleep cycle.
  */
-static void pv_kick_node(struct mcs_spinlock *node)
+static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 {
 	struct pv_node *pn = (struct pv_node *)node;
+	struct __qspinlock *l = (void *)lock;
 
 	/*
-	 * Note that because node->locked is already set, this actual
-	 * mcs_spinlock entry could be re-used already.
+	 * If the vCPU is indeed halted, advance its state to match that of
+	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
+	 * observe its next->locked value and advance itself.
 	 *
-	 * This should be fine however, kicking people for no reason is
-	 * harmless.
-	 *
-	 * See the comment in pv_wait_node().
+	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
 	 */
-	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
-		pv_kick(pn->cpu);
+	if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
+		return;
+
+	/*
+	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
+	 *
+	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
+	 * the hash table later on at unlock time, no atomic instruction is
+	 * needed.
+	 */
+	WRITE_ONCE(l->locked, _Q_SLOW_VAL);
+	(void)pv_hash(lock, pn);
 }
 
 /*
@@ -233,6 +252,13 @@
 	struct qspinlock **lp = NULL;
 	int loop;
 
+	/*
+	 * If pv_kick_node() already advanced our state, we don't need to
+	 * insert ourselves into the hash table anymore.
+	 */
+	if (READ_ONCE(pn->state) == vcpu_hashed)
+		lp = (struct qspinlock **)1;
+
 	for (;;) {
 		for (loop = SPIN_THRESHOLD; loop; loop--) {
 			if (!READ_ONCE(l->locked))
@@ -240,17 +266,22 @@
 			cpu_relax();
 		}
 
-		WRITE_ONCE(pn->state, vcpu_halted);
 		if (!lp) { /* ONCE */
+			WRITE_ONCE(pn->state, vcpu_hashed);
 			lp = pv_hash(lock, pn);
+
 			/*
-			 * lp must be set before setting _Q_SLOW_VAL
+			 * We must hash before setting _Q_SLOW_VAL, such that
+			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
+			 * we'll be sure to be able to observe our hash entry.
 			 *
-			 * [S] lp = lock                [RmW] l = l->locked = 0
-			 *     MB                             MB
-			 * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+			 *   [S] pn->state
+			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
+			 *       MB                           RMB
+			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
+			 *                                [L] pn->state
 			 *
-			 * Matches the cmpxchg() in __pv_queued_spin_unlock().
+			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
 			 */
 			if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
 				/*
@@ -287,24 +318,34 @@
 {
 	struct __qspinlock *l = (void *)lock;
 	struct pv_node *node;
-	u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+	u8 locked;
 
 	/*
 	 * We must not unlock if SLOW, because in that case we must first
 	 * unhash. Otherwise it would be possible to have multiple @lock
 	 * entries, which would be BAD.
 	 */
-	if (likely(lockval == _Q_LOCKED_VAL))
+	locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+	if (likely(locked == _Q_LOCKED_VAL))
 		return;
 
-	if (unlikely(lockval != _Q_SLOW_VAL)) {
-		if (debug_locks_silent)
-			return;
-		WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+	if (unlikely(locked != _Q_SLOW_VAL)) {
+		WARN(!debug_locks_silent,
+		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
+		     (unsigned long)lock, atomic_read(&lock->val));
 		return;
 	}
 
 	/*
+	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
+	 * so we need a barrier to order the read of the node data in
+	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
+	 *
+	 * Matches the cmpxchg() in pv_wait_head() setting _Q_SLOW_VAL.
+	 */
+	smp_rmb();
+
+	/*
 	 * Since the above failed to release, this must be the SLOW path.
 	 * Therefore start by looking up the blocked node and unhashing it.
 	 */
@@ -319,8 +360,11 @@
 	/*
 	 * At this point the memory pointed at by lock can be freed/reused,
 	 * however we can still use the pv_node to kick the CPU.
+	 * The other vCPU may not really be halted, but kicking an active
+	 * vCPU is harmless other than the additional latency in completing
+	 * the unlock.
 	 */
-	if (READ_ONCE(node->state) == vcpu_halted)
+	if (READ_ONCE(node->state) == vcpu_hashed)
 		pv_kick(node->cpu);
 }
 /*
diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
deleted file mode 100644
index 1d96dd0..0000000
--- a/kernel/locking/rtmutex-tester.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * RT-Mutex-tester: scriptable tester for rt mutexes
- *
- * started by Thomas Gleixner:
- *
- *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- */
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/sched/rt.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/freezer.h>
-#include <linux/stat.h>
-
-#include "rtmutex.h"
-
-#define MAX_RT_TEST_THREADS	8
-#define MAX_RT_TEST_MUTEXES	8
-
-static spinlock_t rttest_lock;
-static atomic_t rttest_event;
-
-struct test_thread_data {
-	int			opcode;
-	int			opdata;
-	int			mutexes[MAX_RT_TEST_MUTEXES];
-	int			event;
-	struct device		dev;
-};
-
-static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
-static struct task_struct *threads[MAX_RT_TEST_THREADS];
-static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
-
-enum test_opcodes {
-	RTTEST_NOP = 0,
-	RTTEST_SCHEDOT,		/* 1 Sched other, data = nice */
-	RTTEST_SCHEDRT,		/* 2 Sched fifo, data = prio */
-	RTTEST_LOCK,		/* 3 Lock uninterruptible, data = lockindex */
-	RTTEST_LOCKNOWAIT,	/* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
-	RTTEST_LOCKINT,		/* 5 Lock interruptible, data = lockindex */
-	RTTEST_LOCKINTNOWAIT,	/* 6 Lock interruptible no wait in wakeup, data = lockindex */
-	RTTEST_LOCKCONT,	/* 7 Continue locking after the wakeup delay */
-	RTTEST_UNLOCK,		/* 8 Unlock, data = lockindex */
-	/* 9, 10 - reserved for BKL commemoration */
-	RTTEST_SIGNAL = 11,	/* 11 Signal other test thread, data = thread id */
-	RTTEST_RESETEVENT = 98,	/* 98 Reset event counter */
-	RTTEST_RESET = 99,	/* 99 Reset all pending operations */
-};
-
-static int handle_op(struct test_thread_data *td, int lockwakeup)
-{
-	int i, id, ret = -EINVAL;
-
-	switch(td->opcode) {
-
-	case RTTEST_NOP:
-		return 0;
-
-	case RTTEST_LOCKCONT:
-		td->mutexes[td->opdata] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		return 0;
-
-	case RTTEST_RESET:
-		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
-			if (td->mutexes[i] == 4) {
-				rt_mutex_unlock(&mutexes[i]);
-				td->mutexes[i] = 0;
-			}
-		}
-		return 0;
-
-	case RTTEST_RESETEVENT:
-		atomic_set(&rttest_event, 0);
-		return 0;
-
-	default:
-		if (lockwakeup)
-			return ret;
-	}
-
-	switch(td->opcode) {
-
-	case RTTEST_LOCK:
-	case RTTEST_LOCKNOWAIT:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
-			return ret;
-
-		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		rt_mutex_lock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = 4;
-		return 0;
-
-	case RTTEST_LOCKINT:
-	case RTTEST_LOCKINTNOWAIT:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
-			return ret;
-
-		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = ret ? 0 : 4;
-		return ret ? -EINTR : 0;
-
-	case RTTEST_UNLOCK:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
-			return ret;
-
-		td->event = atomic_add_return(1, &rttest_event);
-		rt_mutex_unlock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = 0;
-		return 0;
-
-	default:
-		break;
-	}
-	return ret;
-}
-
-/*
- * Schedule replacement for rtsem_down(). Only called for threads with
- * PF_MUTEX_TESTER set.
- *
- * This allows us to have finegrained control over the event flow.
- *
- */
-void schedule_rt_mutex_test(struct rt_mutex *mutex)
-{
-	int tid, op, dat;
-	struct test_thread_data *td;
-
-	/* We have to lookup the task */
-	for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
-		if (threads[tid] == current)
-			break;
-	}
-
-	BUG_ON(tid == MAX_RT_TEST_THREADS);
-
-	td = &thread_data[tid];
-
-	op = td->opcode;
-	dat = td->opdata;
-
-	switch (op) {
-	case RTTEST_LOCK:
-	case RTTEST_LOCKINT:
-	case RTTEST_LOCKNOWAIT:
-	case RTTEST_LOCKINTNOWAIT:
-		if (mutex != &mutexes[dat])
-			break;
-
-		if (td->mutexes[dat] != 1)
-			break;
-
-		td->mutexes[dat] = 2;
-		td->event = atomic_add_return(1, &rttest_event);
-		break;
-
-	default:
-		break;
-	}
-
-	schedule();
-
-
-	switch (op) {
-	case RTTEST_LOCK:
-	case RTTEST_LOCKINT:
-		if (mutex != &mutexes[dat])
-			return;
-
-		if (td->mutexes[dat] != 2)
-			return;
-
-		td->mutexes[dat] = 3;
-		td->event = atomic_add_return(1, &rttest_event);
-		break;
-
-	case RTTEST_LOCKNOWAIT:
-	case RTTEST_LOCKINTNOWAIT:
-		if (mutex != &mutexes[dat])
-			return;
-
-		if (td->mutexes[dat] != 2)
-			return;
-
-		td->mutexes[dat] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		return;
-
-	default:
-		return;
-	}
-
-	td->opcode = 0;
-
-	for (;;) {
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (td->opcode > 0) {
-			int ret;
-
-			set_current_state(TASK_RUNNING);
-			ret = handle_op(td, 1);
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (td->opcode == RTTEST_LOCKCONT)
-				break;
-			td->opcode = ret;
-		}
-
-		/* Wait for the next command to be executed */
-		schedule();
-	}
-
-	/* Restore previous command and data */
-	td->opcode = op;
-	td->opdata = dat;
-}
-
-static int test_func(void *data)
-{
-	struct test_thread_data *td = data;
-	int ret;
-
-	current->flags |= PF_MUTEX_TESTER;
-	set_freezable();
-	allow_signal(SIGHUP);
-
-	for(;;) {
-
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (td->opcode > 0) {
-			set_current_state(TASK_RUNNING);
-			ret = handle_op(td, 0);
-			set_current_state(TASK_INTERRUPTIBLE);
-			td->opcode = ret;
-		}
-
-		/* Wait for the next command to be executed */
-		schedule();
-		try_to_freeze();
-
-		if (signal_pending(current))
-			flush_signals(current);
-
-		if(kthread_should_stop())
-			break;
-	}
-	return 0;
-}
-
-/**
- * sysfs_test_command - interface for test commands
- * @dev:	thread reference
- * @buf:	command for actual step
- * @count:	length of buffer
- *
- * command syntax:
- *
- * opcode:data
- */
-static ssize_t sysfs_test_command(struct device *dev, struct device_attribute *attr,
-				  const char *buf, size_t count)
-{
-	struct sched_param schedpar;
-	struct test_thread_data *td;
-	char cmdbuf[32];
-	int op, dat, tid, ret;
-
-	td = container_of(dev, struct test_thread_data, dev);
-	tid = td->dev.id;
-
-	/* strings from sysfs write are not 0 terminated! */
-	if (count >= sizeof(cmdbuf))
-		return -EINVAL;
-
-	/* strip of \n: */
-	if (buf[count-1] == '\n')
-		count--;
-	if (count < 1)
-		return -EINVAL;
-
-	memcpy(cmdbuf, buf, count);
-	cmdbuf[count] = 0;
-
-	if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
-		return -EINVAL;
-
-	switch (op) {
-	case RTTEST_SCHEDOT:
-		schedpar.sched_priority = 0;
-		ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
-		if (ret)
-			return ret;
-		set_user_nice(current, 0);
-		break;
-
-	case RTTEST_SCHEDRT:
-		schedpar.sched_priority = dat;
-		ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
-		if (ret)
-			return ret;
-		break;
-
-	case RTTEST_SIGNAL:
-		send_sig(SIGHUP, threads[tid], 0);
-		break;
-
-	default:
-		if (td->opcode > 0)
-			return -EBUSY;
-		td->opdata = dat;
-		td->opcode = op;
-		wake_up_process(threads[tid]);
-	}
-
-	return count;
-}
-
-/**
- * sysfs_test_status - sysfs interface for rt tester
- * @dev:	thread to query
- * @buf:	char buffer to be filled with thread status info
- */
-static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *attr,
-				 char *buf)
-{
-	struct test_thread_data *td;
-	struct task_struct *tsk;
-	char *curr = buf;
-	int i;
-
-	td = container_of(dev, struct test_thread_data, dev);
-	tsk = threads[td->dev.id];
-
-	spin_lock(&rttest_lock);
-
-	curr += sprintf(curr,
-		"O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
-		td->opcode, td->event, tsk->state,
-			(MAX_RT_PRIO - 1) - tsk->prio,
-			(MAX_RT_PRIO - 1) - tsk->normal_prio,
-		tsk->pi_blocked_on);
-
-	for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
-		curr += sprintf(curr, "%d", td->mutexes[i]);
-
-	spin_unlock(&rttest_lock);
-
-	curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
-			mutexes[td->dev.id].owner);
-
-	return curr - buf;
-}
-
-static DEVICE_ATTR(status, S_IRUSR, sysfs_test_status, NULL);
-static DEVICE_ATTR(command, S_IWUSR, NULL, sysfs_test_command);
-
-static struct bus_type rttest_subsys = {
-	.name = "rttest",
-	.dev_name = "rttest",
-};
-
-static int init_test_thread(int id)
-{
-	thread_data[id].dev.bus = &rttest_subsys;
-	thread_data[id].dev.id = id;
-
-	threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
-	if (IS_ERR(threads[id]))
-		return PTR_ERR(threads[id]);
-
-	return device_register(&thread_data[id].dev);
-}
-
-static int init_rttest(void)
-{
-	int ret, i;
-
-	spin_lock_init(&rttest_lock);
-
-	for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
-		rt_mutex_init(&mutexes[i]);
-
-	ret = subsys_system_register(&rttest_subsys, NULL);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
-		ret = init_test_thread(i);
-		if (ret)
-			break;
-		ret = device_create_file(&thread_data[i].dev, &dev_attr_status);
-		if (ret)
-			break;
-		ret = device_create_file(&thread_data[i].dev, &dev_attr_command);
-		if (ret)
-			break;
-	}
-
-	printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
-
-	return ret;
-}
-
-device_initcall(init_rttest);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 5674b07..7781d80 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1120,7 +1120,7 @@
 
 		debug_rt_mutex_print_deadlock(waiter);
 
-		schedule_rt_mutex(lock);
+		schedule();
 
 		raw_spin_lock(&lock->wait_lock);
 		set_current_state(state);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 7844f8f..4f5f83c 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -15,28 +15,6 @@
 #include <linux/rtmutex.h>
 
 /*
- * The rtmutex in kernel tester is independent of rtmutex debugging. We
- * call schedule_rt_mutex_test() instead of schedule() for the tasks which
- * belong to the tester. That way we can delay the wakeup path of those
- * threads to provoke lock stealing and testing of  complex boosting scenarios.
- */
-#ifdef CONFIG_RT_MUTEX_TESTER
-
-extern void schedule_rt_mutex_test(struct rt_mutex *lock);
-
-#define schedule_rt_mutex(_lock)				\
-  do {								\
-	if (!(current->flags & PF_MUTEX_TESTER))		\
-		schedule();					\
-	else							\
-		schedule_rt_mutex_test(_lock);			\
-  } while (0)
-
-#else
-# define schedule_rt_mutex(_lock)			schedule()
-#endif
-
-/*
  * This is the control structure for tasks blocked on a rt_mutex,
  * which is allocated on the kernel stack on of the blocked task.
  *
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8420c2..3595403 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -164,14 +164,12 @@
 
 static void sched_feat_disable(int i)
 {
-	if (static_key_enabled(&sched_feat_keys[i]))
-		static_key_slow_dec(&sched_feat_keys[i]);
+	static_key_disable(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-	if (!static_key_enabled(&sched_feat_keys[i]))
-		static_key_slow_inc(&sched_feat_keys[i]);
+	static_key_enable(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 052e026..272d932 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -106,9 +106,10 @@
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked);
 
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr,
+			  void *key)
 {
-	__wake_up_common(q, mode, 1, 0, key);
+	__wake_up_common(q, mode, nr, 0, key);
 }
 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 
@@ -283,7 +284,7 @@
 	if (!list_empty(&wait->task_list))
 		list_del_init(&wait->task_list);
 	else if (waitqueue_active(q))
-		__wake_up_locked_key(q, mode, key);
+		__wake_up_locked_key(q, mode, 1, key);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL(abort_exclusive_wait);
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 7c434c3..a818cbc 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -113,7 +113,8 @@
 		if (kthread_should_stop()) {
 			__set_current_state(TASK_RUNNING);
 			preempt_enable();
-			if (ht->cleanup)
+			/* cleanup must mirror setup */
+			if (ht->cleanup && td->status != HP_THREAD_NONE)
 				ht->cleanup(td->cpu, cpu_online(td->cpu));
 			kfree(td);
 			return 0;
@@ -259,15 +260,6 @@
 {
 	unsigned int cpu;
 
-	/* Unpark any threads that were voluntarily parked. */
-	for_each_cpu_not(cpu, ht->cpumask) {
-		if (cpu_online(cpu)) {
-			struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
-			if (tsk)
-				kthread_unpark(tsk);
-		}
-	}
-
 	/* We need to destroy also the parked threads of offline cpus */
 	for_each_possible_cpu(cpu) {
 		struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
@@ -281,19 +273,22 @@
 }
 
 /**
- * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
+ * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
+ * 					    to hotplug
  * @plug_thread:	Hotplug thread descriptor
+ * @cpumask:		The cpumask where threads run
  *
  * Creates and starts the threads on all online cpus.
  */
-int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
+int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
+					   const struct cpumask *cpumask)
 {
 	unsigned int cpu;
 	int ret = 0;
 
 	if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
 		return -ENOMEM;
-	cpumask_copy(plug_thread->cpumask, cpu_possible_mask);
+	cpumask_copy(plug_thread->cpumask, cpumask);
 
 	get_online_cpus();
 	mutex_lock(&smpboot_threads_lock);
@@ -301,9 +296,11 @@
 		ret = __smpboot_create_thread(plug_thread, cpu);
 		if (ret) {
 			smpboot_destroy_threads(plug_thread);
+			free_cpumask_var(plug_thread->cpumask);
 			goto out;
 		}
-		smpboot_unpark_thread(plug_thread, cpu);
+		if (cpumask_test_cpu(cpu, cpumask))
+			smpboot_unpark_thread(plug_thread, cpu);
 	}
 	list_add(&plug_thread->list, &hotplug_threads);
 out:
@@ -311,7 +308,7 @@
 	put_online_cpus();
 	return ret;
 }
-EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
+EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
 
 /**
  * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index ca7d84f..03c3875 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -219,6 +219,7 @@
 cond_syscall(sys_eventfd);
 cond_syscall(sys_eventfd2);
 cond_syscall(sys_memfd_create);
+cond_syscall(sys_userfaultfd);
 
 /* performance counters: */
 cond_syscall(sys_perf_event_open);
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 8727032..53fa971 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -18,6 +18,8 @@
  * This is like the signal handler which runs in kernel mode, but it doesn't
  * try to wake up the @task.
  *
+ * Note: there is no ordering guarantee on works queued here.
+ *
  * RETURNS:
  * 0 if succeeds or -ESRCH.
  */
@@ -108,16 +110,6 @@
 		raw_spin_unlock_wait(&task->pi_lock);
 		smp_mb();
 
-		/* Reverse the list to run the works in fifo order */
-		head = NULL;
-		do {
-			next = work->next;
-			work->next = head;
-			head = work;
-			work = next;
-		} while (work);
-
-		work = head;
 		do {
 			next = work->next;
 			work->func(work);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 88a041a..0fe96c7 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -81,13 +81,16 @@
 
 /*
  * limited trace_printk()
- * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
+ * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
  */
 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
 {
 	char *fmt = (char *) (long) r1;
+	bool str_seen = false;
 	int mod[3] = {};
 	int fmt_cnt = 0;
+	u64 unsafe_addr;
+	char buf[64];
 	int i;
 
 	/*
@@ -114,12 +117,37 @@
 		if (fmt[i] == 'l') {
 			mod[fmt_cnt]++;
 			i++;
-		} else if (fmt[i] == 'p') {
+		} else if (fmt[i] == 'p' || fmt[i] == 's') {
 			mod[fmt_cnt]++;
 			i++;
 			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
 				return -EINVAL;
 			fmt_cnt++;
+			if (fmt[i - 1] == 's') {
+				if (str_seen)
+					/* allow only one '%s' per fmt string */
+					return -EINVAL;
+				str_seen = true;
+
+				switch (fmt_cnt) {
+				case 1:
+					unsafe_addr = r3;
+					r3 = (long) buf;
+					break;
+				case 2:
+					unsafe_addr = r4;
+					r4 = (long) buf;
+					break;
+				case 3:
+					unsafe_addr = r5;
+					r5 = (long) buf;
+					break;
+				}
+				buf[0] = 0;
+				strncpy_from_unsafe(buf,
+						    (void *) (long) unsafe_addr,
+						    sizeof(buf));
+			}
 			continue;
 		}
 
@@ -158,6 +186,35 @@
 	return &bpf_trace_printk_proto;
 }
 
+static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
+{
+	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct perf_event *event;
+
+	if (unlikely(index >= array->map.max_entries))
+		return -E2BIG;
+
+	event = (struct perf_event *)array->ptrs[index];
+	if (!event)
+		return -ENOENT;
+
+	/*
+	 * we don't know if the function is run successfully by the
+	 * return value. It can be judged in other places, such as
+	 * eBPF programs.
+	 */
+	return perf_event_read_local(event);
+}
+
+const struct bpf_func_proto bpf_perf_event_read_proto = {
+	.func		= bpf_perf_event_read,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
 	switch (func_id) {
@@ -183,6 +240,8 @@
 		return bpf_get_trace_printk_proto();
 	case BPF_FUNC_get_smp_processor_id:
 		return &bpf_get_smp_processor_id_proto;
+	case BPF_FUNC_perf_event_read:
+		return &bpf_perf_event_read_proto;
 	default:
 		return NULL;
 	}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b7d0cdd..c995644 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -165,11 +165,9 @@
 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 					    void *addr, void *dest)
 {
-	long ret;
 	int maxlen = get_rloc_len(*(u32 *)dest);
 	u8 *dst = get_rloc_data(dest);
-	u8 *src = addr;
-	mm_segment_t old_fs = get_fs();
+	long ret;
 
 	if (!maxlen)
 		return;
@@ -178,23 +176,13 @@
 	 * Try to get string again, since the string can be changed while
 	 * probing.
 	 */
-	set_fs(KERNEL_DS);
-	pagefault_disable();
-
-	do
-		ret = __copy_from_user_inatomic(dst++, src++, 1);
-	while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
-
-	dst[-1] = '\0';
-	pagefault_enable();
-	set_fs(old_fs);
+	ret = strncpy_from_unsafe(dst, addr, maxlen);
 
 	if (ret < 0) {	/* Failed to fetch string */
-		((u8 *)get_rloc_data(dest))[0] = '\0';
+		dst[0] = '\0';
 		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 	} else {
-		*(u32 *)dest = make_data_rloc(src - (u8 *)addr,
-					      get_rloc_offs(*(u32 *)dest));
+		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
 	}
 }
 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index f65a0a0..88fefa6 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -39,6 +39,7 @@
 	cred->cap_inheritable = CAP_EMPTY_SET;
 	cred->cap_permitted = CAP_FULL_SET;
 	cred->cap_effective = CAP_FULL_SET;
+	cred->cap_ambient = CAP_EMPTY_SET;
 	cred->cap_bset = CAP_FULL_SET;
 #ifdef CONFIG_KEYS
 	key_put(cred->request_key_auth);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index a6ffa43..64ed1c3 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,6 +24,7 @@
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
 #include <linux/perf_event.h>
+#include <linux/kthread.h>
 
 /*
  * The run state of the lockup detectors is controlled by the content of the
@@ -66,7 +67,26 @@
 #define for_each_watchdog_cpu(cpu) \
 	for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+/*
+ * The 'watchdog_running' variable is set to 1 when the watchdog threads
+ * are registered/started and is set to 0 when the watchdog threads are
+ * unregistered/stopped, so it is an indicator whether the threads exist.
+ */
 static int __read_mostly watchdog_running;
+/*
+ * If a subsystem has a need to deactivate the watchdog temporarily, it
+ * can use the suspend/resume interface to achieve this. The content of
+ * the 'watchdog_suspended' variable reflects this state. Existing threads
+ * are parked/unparked by the lockup_detector_{suspend|resume} functions
+ * (see comment blocks pertaining to those functions for further details).
+ *
+ * 'watchdog_suspended' also prevents threads from being registered/started
+ * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
+ * of 'watchdog_running' cannot change while the watchdog is deactivated
+ * temporarily (see related code in 'proc' handlers).
+ */
+static int __read_mostly watchdog_suspended;
+
 static u64 __read_mostly sample_period;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -613,46 +633,9 @@
 	}
 }
 
-void watchdog_nmi_enable_all(void)
-{
-	int cpu;
-
-	mutex_lock(&watchdog_proc_mutex);
-
-	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-		goto unlock;
-
-	get_online_cpus();
-	for_each_watchdog_cpu(cpu)
-		watchdog_nmi_enable(cpu);
-	put_online_cpus();
-
-unlock:
-	mutex_unlock(&watchdog_proc_mutex);
-}
-
-void watchdog_nmi_disable_all(void)
-{
-	int cpu;
-
-	mutex_lock(&watchdog_proc_mutex);
-
-	if (!watchdog_running)
-		goto unlock;
-
-	get_online_cpus();
-	for_each_watchdog_cpu(cpu)
-		watchdog_nmi_disable(cpu);
-	put_online_cpus();
-
-unlock:
-	mutex_unlock(&watchdog_proc_mutex);
-}
 #else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-void watchdog_nmi_enable_all(void) {}
-void watchdog_nmi_disable_all(void) {}
 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
 
 static struct smp_hotplug_thread watchdog_threads = {
@@ -666,62 +649,102 @@
 	.unpark			= watchdog_enable,
 };
 
-static void restart_watchdog_hrtimer(void *info)
+/*
+ * park all watchdog threads that are specified in 'watchdog_cpumask'
+ */
+static int watchdog_park_threads(void)
 {
-	struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
-	int ret;
+	int cpu, ret = 0;
 
-	/*
-	 * No need to cancel and restart hrtimer if it is currently executing
-	 * because it will reprogram itself with the new period now.
-	 * We should never see it unqueued here because we are running per-cpu
-	 * with interrupts disabled.
-	 */
-	ret = hrtimer_try_to_cancel(hrtimer);
-	if (ret == 1)
-		hrtimer_start(hrtimer, ns_to_ktime(sample_period),
-				HRTIMER_MODE_REL_PINNED);
+	get_online_cpus();
+	for_each_watchdog_cpu(cpu) {
+		ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
+		if (ret)
+			break;
+	}
+	if (ret) {
+		for_each_watchdog_cpu(cpu)
+			kthread_unpark(per_cpu(softlockup_watchdog, cpu));
+	}
+	put_online_cpus();
+
+	return ret;
 }
 
-static void update_watchdog(int cpu)
-{
-	/*
-	 * Make sure that perf event counter will adopt to a new
-	 * sampling period. Updating the sampling period directly would
-	 * be much nicer but we do not have an API for that now so
-	 * let's use a big hammer.
-	 * Hrtimer will adopt the new period on the next tick but this
-	 * might be late already so we have to restart the timer as well.
-	 */
-	watchdog_nmi_disable(cpu);
-	smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
-	watchdog_nmi_enable(cpu);
-}
-
-static void update_watchdog_all_cpus(void)
+/*
+ * unpark all watchdog threads that are specified in 'watchdog_cpumask'
+ */
+static void watchdog_unpark_threads(void)
 {
 	int cpu;
 
 	get_online_cpus();
 	for_each_watchdog_cpu(cpu)
-		update_watchdog(cpu);
+		kthread_unpark(per_cpu(softlockup_watchdog, cpu));
 	put_online_cpus();
 }
 
+/*
+ * Suspend the hard and soft lockup detector by parking the watchdog threads.
+ */
+int lockup_detector_suspend(void)
+{
+	int ret = 0;
+
+	mutex_lock(&watchdog_proc_mutex);
+	/*
+	 * Multiple suspend requests can be active in parallel (counted by
+	 * the 'watchdog_suspended' variable). If the watchdog threads are
+	 * running, the first caller takes care that they will be parked.
+	 * The state of 'watchdog_running' cannot change while a suspend
+	 * request is active (see related code in 'proc' handlers).
+	 */
+	if (watchdog_running && !watchdog_suspended)
+		ret = watchdog_park_threads();
+
+	if (ret == 0)
+		watchdog_suspended++;
+
+	mutex_unlock(&watchdog_proc_mutex);
+
+	return ret;
+}
+
+/*
+ * Resume the hard and soft lockup detector by unparking the watchdog threads.
+ */
+void lockup_detector_resume(void)
+{
+	mutex_lock(&watchdog_proc_mutex);
+
+	watchdog_suspended--;
+	/*
+	 * The watchdog threads are unparked if they were previously running
+	 * and if there is no more active suspend request.
+	 */
+	if (watchdog_running && !watchdog_suspended)
+		watchdog_unpark_threads();
+
+	mutex_unlock(&watchdog_proc_mutex);
+}
+
+static void update_watchdog_all_cpus(void)
+{
+	watchdog_park_threads();
+	watchdog_unpark_threads();
+}
+
 static int watchdog_enable_all_cpus(void)
 {
 	int err = 0;
 
 	if (!watchdog_running) {
-		err = smpboot_register_percpu_thread(&watchdog_threads);
+		err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
+							     &watchdog_cpumask);
 		if (err)
 			pr_err("Failed to create watchdog threads, disabled\n");
-		else {
-			if (smpboot_update_cpumask_percpu_thread(
-				    &watchdog_threads, &watchdog_cpumask))
-				pr_err("Failed to set cpumask for watchdog threads\n");
+		else
 			watchdog_running = 1;
-		}
 	} else {
 		/*
 		 * Enable/disable the lockup detectors or
@@ -787,6 +810,12 @@
 
 	mutex_lock(&watchdog_proc_mutex);
 
+	if (watchdog_suspended) {
+		/* no parameter changes allowed while watchdog is suspended */
+		err = -EAGAIN;
+		goto out;
+	}
+
 	/*
 	 * If the parameter is being read return the state of the corresponding
 	 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
@@ -872,6 +901,12 @@
 
 	mutex_lock(&watchdog_proc_mutex);
 
+	if (watchdog_suspended) {
+		/* no parameter changes allowed while watchdog is suspended */
+		err = -EAGAIN;
+		goto out;
+	}
+
 	old = ACCESS_ONCE(watchdog_thresh);
 	err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
@@ -903,6 +938,13 @@
 	int err;
 
 	mutex_lock(&watchdog_proc_mutex);
+
+	if (watchdog_suspended) {
+		/* no parameter changes allowed while watchdog is suspended */
+		err = -EAGAIN;
+		goto out;
+	}
+
 	err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
 	if (!err && write) {
 		/* Remove impossible cpus to keep sysctl output cleaner. */
@@ -920,6 +962,7 @@
 				pr_err("cpumask update failed\n");
 		}
 	}
+out:
 	mutex_unlock(&watchdog_proc_mutex);
 	return err;
 }
@@ -932,10 +975,8 @@
 
 #ifdef CONFIG_NO_HZ_FULL
 	if (tick_nohz_full_enabled()) {
-		if (!cpumask_empty(tick_nohz_full_mask))
-			pr_info("Disabling watchdog on nohz_full cores by default\n");
-		cpumask_andnot(&watchdog_cpumask, cpu_possible_mask,
-			       tick_nohz_full_mask);
+		pr_info("Disabling watchdog on nohz_full cores by default\n");
+		cpumask_copy(&watchdog_cpumask, housekeeping_mask);
 	} else
 		cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
 #else
diff --git a/lib/Kconfig b/lib/Kconfig
index 3a2ef67..a165552 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -460,16 +460,6 @@
 config LRU_CACHE
 	tristate
 
-config AVERAGE
-	bool "Averaging functions"
-	help
-	  This option is provided for the case where no in-kernel-tree
-	  modules require averaging functions, but a module built outside
-	  the kernel tree does. Such modules that use library averaging
-	  functions require Y here.
-
-	  If unsure, say N.
-
 config CLZ_TAB
 	bool
 
@@ -521,6 +511,13 @@
 
 source "lib/fonts/Kconfig"
 
+config SG_SPLIT
+	def_bool n
+	help
+	 Provides a heler to split scatterlists into chunks, each chunk being a
+	 scatterlist. This should be selected by a driver or an API which
+	 whishes to split a scatterlist amongst multiple DMA channel.
+
 #
 # sg chaining option
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3e0b662..ab76b99 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -916,12 +916,6 @@
 	 This allows rt mutex semantics violations and rt mutex related
 	 deadlocks (lockups) to be detected and reported automatically.
 
-config RT_MUTEX_TESTER
-	bool "Built-in scriptable tester for rt-mutexes"
-	depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
-	help
-	  This option enables a rt-mutex tester.
-
 config DEBUG_SPINLOCK
 	bool "Spinlock and rw-lock debugging: basic checks"
 	depends on DEBUG_KERNEL
@@ -1528,6 +1522,13 @@
 	  and to test how the mmc host driver handles retries from
 	  the block device.
 
+config FAIL_FUTEX
+	bool "Fault-injection capability for futexes"
+	select DEBUG_FS
+	depends on FAULT_INJECTION && FUTEX
+	help
+	  Provide fault-injection capability for futexes.
+
 config FAULT_INJECTION_DEBUG_FS
 	bool "Debugfs entries for fault-injection capabilities"
 	depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1826,6 +1827,15 @@
 	        memtest=17, mean do 17 test patterns.
 	  If you are unsure how to answer this question, answer N.
 
+config TEST_STATIC_KEYS
+	tristate "Test static keys"
+	default n
+	depends on m
+	help
+	  Test the static key interfaces.
+
+	  If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 6897b52..f01c558 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -39,6 +39,8 @@
 obj-$(CONFIG_TEST_LKM) += test_module.o
 obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -138,8 +140,6 @@
 
 obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
 
-obj-$(CONFIG_AVERAGE) += average.o
-
 obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
 
 obj-$(CONFIG_CORDIC) += cordic.o
@@ -160,6 +160,7 @@
 
 obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
 
+obj-$(CONFIG_SG_SPLIT) += sg_split.o
 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
 
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 1298c05e..2886eba 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -102,6 +102,9 @@
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 0211d30..83c33a5b 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,8 +16,39 @@
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 
+#define TEST(bit, op, c_op, val)				\
+do {								\
+	atomic##bit##_set(&v, v0);				\
+	r = v0;							\
+	atomic##bit##_##op(val, &v);				\
+	r c_op val;						\
+	WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n",	\
+		(unsigned long long)atomic##bit##_read(&v),	\
+		(unsigned long long)r);				\
+} while (0)
+
+static __init void test_atomic(void)
+{
+	int v0 = 0xaaa31337;
+	int v1 = 0xdeadbeef;
+	int onestwos = 0x11112222;
+	int one = 1;
+
+	atomic_t v;
+	int r;
+
+	TEST(, add, +=, onestwos);
+	TEST(, add, +=, -one);
+	TEST(, sub, -=, onestwos);
+	TEST(, sub, -=, -one);
+	TEST(, or, |=, v1);
+	TEST(, and, &=, v1);
+	TEST(, xor, ^=, v1);
+	TEST(, andnot, &= ~, v1);
+}
+
 #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
-static __init int test_atomic64(void)
+static __init void test_atomic64(void)
 {
 	long long v0 = 0xaaa31337c001d00dLL;
 	long long v1 = 0xdeadbeefdeafcafeLL;
@@ -34,15 +65,14 @@
 	BUG_ON(v.counter != r);
 	BUG_ON(atomic64_read(&v) != r);
 
-	INIT(v0);
-	atomic64_add(onestwos, &v);
-	r += onestwos;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
-	atomic64_add(-one, &v);
-	r += -one;
-	BUG_ON(v.counter != r);
+	TEST(64, add, +=, onestwos);
+	TEST(64, add, +=, -one);
+	TEST(64, sub, -=, onestwos);
+	TEST(64, sub, -=, -one);
+	TEST(64, or, |=, v1);
+	TEST(64, and, &=, v1);
+	TEST(64, xor, ^=, v1);
+	TEST(64, andnot, &= ~, v1);
 
 	INIT(v0);
 	r += onestwos;
@@ -55,16 +85,6 @@
 	BUG_ON(v.counter != r);
 
 	INIT(v0);
-	atomic64_sub(onestwos, &v);
-	r -= onestwos;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
-	atomic64_sub(-one, &v);
-	r -= -one;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
 	r -= onestwos;
 	BUG_ON(atomic64_sub_return(onestwos, &v) != r);
 	BUG_ON(v.counter != r);
@@ -147,6 +167,12 @@
 	BUG_ON(!atomic64_inc_not_zero(&v));
 	r += one;
 	BUG_ON(v.counter != r);
+}
+
+static __init int test_atomics(void)
+{
+	test_atomic();
+	test_atomic64();
 
 #ifdef CONFIG_X86
 	pr_info("passed for %s platform %s CX8 and %s SSE\n",
@@ -166,4 +192,4 @@
 	return 0;
 }
 
-core_initcall(test_atomic64);
+core_initcall(test_atomics);
diff --git a/lib/average.c b/lib/average.c
deleted file mode 100644
index 114d1be..0000000
--- a/lib/average.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * lib/average.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/export.h>
-#include <linux/average.h>
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/log2.h>
-
-/**
- * DOC: Exponentially Weighted Moving Average (EWMA)
- *
- * These are generic functions for calculating Exponentially Weighted Moving
- * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
- * up internal representation of the average value to prevent rounding errors.
- * The factor for scaling up and the exponential weight (or decay rate) have to
- * be specified thru the init fuction. The structure should not be accessed
- * directly but only thru the helper functions.
- */
-
-/**
- * ewma_init() - Initialize EWMA parameters
- * @avg: Average structure
- * @factor: Factor to use for the scaled up internal value. The maximum value
- *	of averages can be ULONG_MAX/(factor*weight). For performance reasons
- *	factor has to be a power of 2.
- * @weight: Exponential weight, or decay rate. This defines how fast the
- *	influence of older values decreases. For performance reasons weight has
- *	to be a power of 2.
- *
- * Initialize the EWMA parameters for a given struct ewma @avg.
- */
-void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
-{
-	WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
-
-	avg->weight = ilog2(weight);
-	avg->factor = ilog2(factor);
-	avg->internal = 0;
-}
-EXPORT_SYMBOL(ewma_init);
-
-/**
- * ewma_add() - Exponentially weighted moving average (EWMA)
- * @avg: Average structure
- * @val: Current value
- *
- * Add a sample to the average.
- */
-struct ewma *ewma_add(struct ewma *avg, unsigned long val)
-{
-	unsigned long internal = ACCESS_ONCE(avg->internal);
-
-	ACCESS_ONCE(avg->internal) = internal ?
-		(((internal << avg->weight) - internal) +
-			(val << avg->factor)) >> avg->weight :
-		(val << avg->factor);
-	return avg;
-}
-EXPORT_SYMBOL(ewma_add);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index daf0afb..116a166 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -160,6 +160,7 @@
 		pool->min_alloc_order = min_alloc_order;
 		pool->algo = gen_pool_first_fit;
 		pool->data = NULL;
+		pool->name = NULL;
 	}
 	return pool;
 }
@@ -252,8 +253,8 @@
 
 		kfree(chunk);
 	}
+	kfree_const(pool->name);
 	kfree(pool);
-	return;
 }
 EXPORT_SYMBOL(gen_pool_destroy);
 
@@ -570,54 +571,89 @@
 	gen_pool_destroy(*(struct gen_pool **)res);
 }
 
+static int devm_gen_pool_match(struct device *dev, void *res, void *data)
+{
+	struct gen_pool **p = res;
+
+	/* NULL data matches only a pool without an assigned name */
+	if (!data && !(*p)->name)
+		return 1;
+
+	if (!data || !(*p)->name)
+		return 0;
+
+	return !strcmp((*p)->name, data);
+}
+
+/**
+ * gen_pool_get - Obtain the gen_pool (if any) for a device
+ * @dev: device to retrieve the gen_pool from
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
+ *
+ * Returns the gen_pool for the device if one is present, or NULL.
+ */
+struct gen_pool *gen_pool_get(struct device *dev, const char *name)
+{
+	struct gen_pool **p;
+
+	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
+			(void *)name);
+	if (!p)
+		return NULL;
+	return *p;
+}
+EXPORT_SYMBOL_GPL(gen_pool_get);
+
 /**
  * devm_gen_pool_create - managed gen_pool_create
  * @dev: device that provides the gen_pool
  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
- * @nid: node id of the node the pool structure should be allocated on, or -1
+ * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
  *
  * Create a new special memory pool that can be used to manage special purpose
  * memory not managed by the regular kmalloc/kfree interface. The pool will be
  * automatically destroyed by the device management code.
  */
 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
-		int nid)
+				      int nid, const char *name)
 {
 	struct gen_pool **ptr, *pool;
+	const char *pool_name = NULL;
+
+	/* Check that genpool to be created is uniquely addressed on device */
+	if (gen_pool_get(dev, name))
+		return ERR_PTR(-EINVAL);
+
+	if (name) {
+		pool_name = kstrdup_const(name, GFP_KERNEL);
+		if (!pool_name)
+			return ERR_PTR(-ENOMEM);
+	}
 
 	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
 	if (!ptr)
-		return NULL;
+		goto free_pool_name;
 
 	pool = gen_pool_create(min_alloc_order, nid);
-	if (pool) {
-		*ptr = pool;
-		devres_add(dev, ptr);
-	} else {
-		devres_free(ptr);
-	}
+	if (!pool)
+		goto free_devres;
+
+	*ptr = pool;
+	pool->name = pool_name;
+	devres_add(dev, ptr);
 
 	return pool;
+
+free_devres:
+	devres_free(ptr);
+free_pool_name:
+	kfree_const(pool_name);
+
+	return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(devm_gen_pool_create);
 
-/**
- * gen_pool_get - Obtain the gen_pool (if any) for a device
- * @dev: device to retrieve the gen_pool from
- *
- * Returns the gen_pool for the device if one is present, or NULL.
- */
-struct gen_pool *gen_pool_get(struct device *dev)
-{
-	struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
-					NULL);
-
-	if (!p)
-		return NULL;
-	return *p;
-}
-EXPORT_SYMBOL_GPL(gen_pool_get);
-
 #ifdef CONFIG_OF
 /**
  * of_gen_pool_get - find a pool by phandle property
@@ -633,16 +669,30 @@
 	const char *propname, int index)
 {
 	struct platform_device *pdev;
-	struct device_node *np_pool;
+	struct device_node *np_pool, *parent;
+	const char *name = NULL;
+	struct gen_pool *pool = NULL;
 
 	np_pool = of_parse_phandle(np, propname, index);
 	if (!np_pool)
 		return NULL;
+
 	pdev = of_find_device_by_node(np_pool);
+	if (!pdev) {
+		/* Check if named gen_pool is created by parent node device */
+		parent = of_get_parent(np_pool);
+		pdev = of_find_device_by_node(parent);
+		of_node_put(parent);
+
+		of_property_read_string(np_pool, "label", &name);
+		if (!name)
+			name = np_pool->name;
+	}
+	if (pdev)
+		pool = gen_pool_get(&pdev->dev, name);
 	of_node_put(np_pool);
-	if (!pdev)
-		return NULL;
-	return gen_pool_get(&pdev->dev);
+
+	return pool;
 }
 EXPORT_SYMBOL_GPL(of_gen_pool_get);
 #endif /* CONFIG_OF */
diff --git a/lib/lockref.c b/lib/lockref.c
index 494994b..5a92189 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,14 +4,6 @@
 #if USE_CMPXCHG_LOCKREF
 
 /*
- * Allow weakly-ordered memory architectures to provide barrier-less
- * cmpxchg semantics for lockref updates.
- */
-#ifndef cmpxchg64_relaxed
-# define cmpxchg64_relaxed cmpxchg64
-#endif
-
-/*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
  */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d105a9f..bafa993 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -105,16 +105,12 @@
  **/
 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 {
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-	struct scatterlist *ret = &sgl[nents - 1];
-#else
 	struct scatterlist *sg, *ret = NULL;
 	unsigned int i;
 
 	for_each_sg(sgl, sg, nents, i)
 		ret = sg;
 
-#endif
 #ifdef CONFIG_DEBUG_SG
 	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
 	BUG_ON(!sg_is_last(ret));
diff --git a/lib/sg_split.c b/lib/sg_split.c
new file mode 100644
index 0000000..b063410
--- /dev/null
+++ b/lib/sg_split.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * Scatterlist splitting helpers.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct sg_splitter {
+	struct scatterlist *in_sg0;
+	int nents;
+	off_t skip_sg0;
+	unsigned int length_last_sg;
+
+	struct scatterlist *out_sg;
+};
+
+static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
+			      off_t skip, const size_t *sizes,
+			      struct sg_splitter *splitters, bool mapped)
+{
+	int i;
+	unsigned int sglen;
+	size_t size = sizes[0], len;
+	struct sg_splitter *curr = splitters;
+	struct scatterlist *sg;
+
+	for (i = 0; i < nb_splits; i++) {
+		splitters[i].in_sg0 = NULL;
+		splitters[i].nents = 0;
+	}
+
+	for_each_sg(in, sg, nents, i) {
+		sglen = mapped ? sg_dma_len(sg) : sg->length;
+		if (skip > sglen) {
+			skip -= sglen;
+			continue;
+		}
+
+		len = min_t(size_t, size, sglen - skip);
+		if (!curr->in_sg0) {
+			curr->in_sg0 = sg;
+			curr->skip_sg0 = skip;
+		}
+		size -= len;
+		curr->nents++;
+		curr->length_last_sg = len;
+
+		while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
+			curr++;
+			size = *(++sizes);
+			skip += len;
+			len = min_t(size_t, size, sglen - skip);
+
+			curr->in_sg0 = sg;
+			curr->skip_sg0 = skip;
+			curr->nents = 1;
+			curr->length_last_sg = len;
+			size -= len;
+		}
+		skip = 0;
+
+		if (!size && --nb_splits > 0) {
+			curr++;
+			size = *(++sizes);
+		}
+
+		if (!nb_splits)
+			break;
+	}
+
+	return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
+}
+
+static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
+{
+	int i, j;
+	struct scatterlist *in_sg, *out_sg;
+	struct sg_splitter *split;
+
+	for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+		in_sg = split->in_sg0;
+		out_sg = split->out_sg;
+		for (j = 0; j < split->nents; j++, out_sg++) {
+			*out_sg = *in_sg;
+			if (!j) {
+				out_sg->offset += split->skip_sg0;
+				out_sg->length -= split->skip_sg0;
+			} else {
+				out_sg->offset = 0;
+			}
+			sg_dma_address(out_sg) = 0;
+			sg_dma_len(out_sg) = 0;
+			in_sg = sg_next(in_sg);
+		}
+		out_sg[-1].length = split->length_last_sg;
+		sg_mark_end(out_sg - 1);
+	}
+}
+
+static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
+{
+	int i, j;
+	struct scatterlist *in_sg, *out_sg;
+	struct sg_splitter *split;
+
+	for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+		in_sg = split->in_sg0;
+		out_sg = split->out_sg;
+		for (j = 0; j < split->nents; j++, out_sg++) {
+			sg_dma_address(out_sg) = sg_dma_address(in_sg);
+			sg_dma_len(out_sg) = sg_dma_len(in_sg);
+			if (!j) {
+				sg_dma_address(out_sg) += split->skip_sg0;
+				sg_dma_len(out_sg) -= split->skip_sg0;
+			}
+			in_sg = sg_next(in_sg);
+		}
+		sg_dma_len(--out_sg) = split->length_last_sg;
+	}
+}
+
+/**
+ * sg_split - split a scatterlist into several scatterlists
+ * @in: the input sg list
+ * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
+ * @skip: the number of bytes to skip in the input sg list
+ * @nb_splits: the number of desired sg outputs
+ * @split_sizes: the respective size of each output sg list in bytes
+ * @out: an array where to store the allocated output sg lists
+ * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
+ *                    be NULL if sglist not already mapped (in_mapped_nents = 0)
+ * @gfp_mask: the allocation flag
+ *
+ * This function splits the input sg list into nb_splits sg lists, which are
+ * allocated and stored into out.
+ * The @in is split into :
+ *  - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
+ *  - @out[1], which covers bytes [@skip + split_sizes[0] ..
+ *                                 @skip + @split_sizes[0] + @split_sizes[1] -1]
+ * etc ...
+ * It will be the caller's duty to kfree() out array members.
+ *
+ * Returns 0 upon success, or error code
+ */
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+	     const off_t skip, const int nb_splits,
+	     const size_t *split_sizes,
+	     struct scatterlist **out, int *out_mapped_nents,
+	     gfp_t gfp_mask)
+{
+	int i, ret;
+	struct sg_splitter *splitters;
+
+	splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
+	if (!splitters)
+		return -ENOMEM;
+
+	ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
+			   splitters, false);
+	if (ret < 0)
+		goto err;
+
+	ret = -ENOMEM;
+	for (i = 0; i < nb_splits; i++) {
+		splitters[i].out_sg = kmalloc_array(splitters[i].nents,
+						    sizeof(struct scatterlist),
+						    gfp_mask);
+		if (!splitters[i].out_sg)
+			goto err;
+	}
+
+	/*
+	 * The order of these 3 calls is important and should be kept.
+	 */
+	sg_split_phys(splitters, nb_splits);
+	ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+				 split_sizes, splitters, true);
+	if (ret < 0)
+		goto err;
+	sg_split_mapped(splitters, nb_splits);
+
+	for (i = 0; i < nb_splits; i++) {
+		out[i] = splitters[i].out_sg;
+		if (out_mapped_nents)
+			out_mapped_nents[i] = splitters[i].nents;
+	}
+
+	kfree(splitters);
+	return 0;
+
+err:
+	for (i = 0; i < nb_splits; i++)
+		kfree(splitters[i].out_sg);
+	kfree(splitters);
+	return ret;
+}
+EXPORT_SYMBOL(sg_split);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7f58c73..d137739 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -18,10 +18,12 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/random.h>
+#include <linux/highmem.h>
 
 /* General test specific settings */
 #define MAX_SUBTESTS	3
@@ -55,6 +57,7 @@
 /* Flags that can be passed to test cases */
 #define FLAG_NO_DATA		BIT(0)
 #define FLAG_EXPECTED_FAIL	BIT(1)
+#define FLAG_SKB_FRAG		BIT(2)
 
 enum {
 	CLASSIC  = BIT(6),	/* Old BPF instructions only. */
@@ -80,6 +83,7 @@
 		__u32 result;
 	} test[MAX_SUBTESTS];
 	int (*fill_helper)(struct bpf_test *self);
+	__u8 frag_data[MAX_DATA];
 };
 
 /* Large test cases need separate allocation and fill handler. */
@@ -355,6 +359,81 @@
 	return __bpf_fill_ja(self, 12, 9);
 }
 
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len - 1; i += 2) {
+		insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+		insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+					 SKF_AD_OFF + SKF_AD_CPU);
+	}
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+#define PUSH_CNT 68
+/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct bpf_insn *insn;
+	int i = 0, j, k = 0;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[i++] = BPF_MOV64_REG(R6, R1);
+loop:
+	for (j = 0; j < PUSH_CNT; j++) {
+		insn[i++] = BPF_LD_ABS(BPF_B, 0);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+		i++;
+		insn[i++] = BPF_MOV64_REG(R1, R6);
+		insn[i++] = BPF_MOV64_IMM(R2, 1);
+		insn[i++] = BPF_MOV64_IMM(R3, 2);
+		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+					 bpf_skb_vlan_push_proto.func - __bpf_call_base);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+		i++;
+	}
+
+	for (j = 0; j < PUSH_CNT; j++) {
+		insn[i++] = BPF_LD_ABS(BPF_B, 0);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+		i++;
+		insn[i++] = BPF_MOV64_REG(R1, R6);
+		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+					 bpf_skb_vlan_pop_proto.func - __bpf_call_base);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+		i++;
+	}
+	if (++k < 5)
+		goto loop;
+
+	for (; i < len - 1; i++)
+		insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+
+	insn[len - 1] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
 static struct bpf_test tests[] = {
 	{
 		"TAX",
@@ -3674,6 +3753,9 @@
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
 			BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3708,6 +3790,9 @@
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
 			BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -4392,6 +4477,618 @@
 		{ { 0, 0xababcbac } },
 		.fill_helper = bpf_fill_maxinsns11,
 	},
+	{
+		"BPF_MAXINSNS: ld_abs+get_processor_id",
+		{ },
+		CLASSIC,
+		{ },
+		{ { 1, 0xbee } },
+		.fill_helper = bpf_fill_ld_abs_get_processor_id,
+	},
+	{
+		"BPF_MAXINSNS: ld_abs+vlan_push/pop",
+		{ },
+		INTERNAL,
+		{ 0x34 },
+		{ { 1, 0xbef } },
+		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+	},
+	/*
+	 * LD_IND / LD_ABS on fragmented SKBs
+	 */
+	{
+		"LD_IND byte frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x42} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_IND halfword frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x4344} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_IND word frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x21071983} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_IND halfword mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x0519} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	{
+		"LD_IND word mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x25051982} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	{
+		"LD_ABS byte frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x42} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_ABS halfword frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x4344} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_ABS word frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x21071983} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_ABS halfword mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x0519} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	{
+		"LD_ABS word mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x25051982} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	/*
+	 * LD_IND / LD_ABS on non fragmented SKBs
+	 */
+	{
+		/*
+		 * this tests that the JIT/interpreter correctly resets X
+		 * before using it in an LD_IND instruction.
+		 */
+		"LD_IND byte default X",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{ [0x1] = 0x42 },
+		{ {0x40, 0x42 } },
+	},
+	{
+		"LD_IND byte positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x82 } },
+	},
+	{
+		"LD_IND byte negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x05 } },
+	},
+	{
+		"LD_IND halfword positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0xdd88 } },
+	},
+	{
+		"LD_IND halfword negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0xbb66 } },
+	},
+	{
+		"LD_IND halfword unaligned",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0x66cc } },
+	},
+	{
+		"LD_IND word positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xee99ffaa } },
+	},
+	{
+		"LD_IND word negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xaa55bb66 } },
+	},
+	{
+		"LD_IND word unaligned (addr & 3 == 2)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xbb66cc77 } },
+	},
+	{
+		"LD_IND word unaligned (addr & 3 == 1)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x55bb66cc } },
+	},
+	{
+		"LD_IND word unaligned (addr & 3 == 3)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x66cc77dd } },
+	},
+	{
+		"LD_ABS byte",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xcc } },
+	},
+	{
+		"LD_ABS halfword",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xdd88 } },
+	},
+	{
+		"LD_ABS halfword unaligned",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x99ff } },
+	},
+	{
+		"LD_ABS word",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xaa55bb66 } },
+	},
+	{
+		"LD_ABS word unaligned (addr & 3 == 2)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xdd88ee99 } },
+	},
+	{
+		"LD_ABS word unaligned (addr & 3 == 1)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x77dd88ee } },
+	},
+	{
+		"LD_ABS word unaligned (addr & 3 == 3)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x88ee99ff } },
+	},
+	/*
+	 * verify that the interpreter or JIT correctly sets A and X
+	 * to 0.
+	 */
+	{
+		"ADD default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A + X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
+	{
+		"ADD default A",
+		.u.insns = {
+			/*
+			 * A = A + 0x42
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
+	{
+		"SUB default X",
+		.u.insns = {
+			/*
+			 * A = 0x66
+			 * A = A - X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x66 } },
+	},
+	{
+		"SUB default A",
+		.u.insns = {
+			/*
+			 * A = A - -0x66
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x66 } },
+	},
+	{
+		"MUL default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A * X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"MUL default A",
+		.u.insns = {
+			/*
+			 * A = A * 0x66
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"DIV default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A / X ; this halt the filter execution if X is 0
+			 * ret 0x42
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"DIV default A",
+		.u.insns = {
+			/*
+			 * A = A / 1
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"JMP EQ default A",
+		.u.insns = {
+			/*
+			 * cmp A, 0x0, 0, 1
+			 * ret 0x42
+			 * ret 0x66
+			 */
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_K, 0x66),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
+	{
+		"JMP EQ default X",
+		.u.insns = {
+			/*
+			 * A = 0x0
+			 * cmp A, X, 0, 1
+			 * ret 0x42
+			 * ret 0x66
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_K, 0x66),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
 };
 
 static struct net_device dev;
@@ -4427,6 +5124,9 @@
 
 static void *generate_test_data(struct bpf_test *test, int sub)
 {
+	struct sk_buff *skb;
+	struct page *page;
+
 	if (test->aux & FLAG_NO_DATA)
 		return NULL;
 
@@ -4434,7 +5134,38 @@
 	 * subtests generate skbs of different sizes based on
 	 * the same data.
 	 */
-	return populate_skb(test->data, test->test[sub].data_size);
+	skb = populate_skb(test->data, test->test[sub].data_size);
+	if (!skb)
+		return NULL;
+
+	if (test->aux & FLAG_SKB_FRAG) {
+		/*
+		 * when the test requires a fragmented skb, add a
+		 * single fragment to the skb, filled with
+		 * test->frag_data.
+		 */
+		void *ptr;
+
+		page = alloc_page(GFP_KERNEL);
+
+		if (!page)
+			goto err_kfree_skb;
+
+		ptr = kmap(page);
+		if (!ptr)
+			goto err_free_page;
+		memcpy(ptr, test->frag_data, MAX_DATA);
+		kunmap(page);
+		skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
+	}
+
+	return skb;
+
+err_free_page:
+	__free_page(page);
+err_kfree_skb:
+	kfree_skb(skb);
+	return NULL;
 }
 
 static void release_test_data(const struct bpf_test *test, void *data)
@@ -4515,6 +5246,8 @@
 		}
 
 		fp->len = flen;
+		/* Type doesn't really matter here as long as it's not unspec. */
+		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
 		memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
 		bpf_prog_select_runtime(fp);
@@ -4545,14 +5278,14 @@
 	u64 start, finish;
 	int ret = 0, i;
 
-	start = ktime_to_us(ktime_get());
+	start = ktime_get_ns();
 
 	for (i = 0; i < runs; i++)
 		ret = BPF_PROG_RUN(fp, data);
 
-	finish = ktime_to_us(ktime_get());
+	finish = ktime_get_ns();
 
-	*duration = (finish - start) * 1000ULL;
+	*duration = finish - start;
 	do_div(*duration, runs);
 
 	return ret;
@@ -4572,6 +5305,11 @@
 			break;
 
 		data = generate_test_data(test, i);
+		if (!data && !(test->aux & FLAG_NO_DATA)) {
+			pr_cont("data generation failed ");
+			err_cnt++;
+			break;
+		}
 		ret = __run_one(fp, data, runs, &duration);
 		release_test_data(test, data);
 
@@ -4587,10 +5325,73 @@
 	return err_cnt;
 }
 
+static char test_name[64];
+module_param_string(test_name, test_name, sizeof(test_name), 0);
+
+static int test_id = -1;
+module_param(test_id, int, 0);
+
+static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+module_param_array(test_range, int, NULL, 0);
+
+static __init int find_test_index(const char *test_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+		if (!strcmp(tests[i].descr, test_name))
+			return i;
+	}
+	return -1;
+}
+
 static __init int prepare_bpf_tests(void)
 {
 	int i;
 
+	if (test_id >= 0) {
+		/*
+		 * if a test_id was specified, use test_range to
+		 * cover only that test.
+		 */
+		if (test_id >= ARRAY_SIZE(tests)) {
+			pr_err("test_bpf: invalid test_id specified.\n");
+			return -EINVAL;
+		}
+
+		test_range[0] = test_id;
+		test_range[1] = test_id;
+	} else if (*test_name) {
+		/*
+		 * if a test_name was specified, find it and setup
+		 * test_range to cover only that test.
+		 */
+		int idx = find_test_index(test_name);
+
+		if (idx < 0) {
+			pr_err("test_bpf: no test named '%s' found.\n",
+			       test_name);
+			return -EINVAL;
+		}
+		test_range[0] = idx;
+		test_range[1] = idx;
+	} else {
+		/*
+		 * check that the supplied test_range is valid.
+		 */
+		if (test_range[0] >= ARRAY_SIZE(tests) ||
+		    test_range[1] >= ARRAY_SIZE(tests) ||
+		    test_range[0] < 0 || test_range[1] < 0) {
+			pr_err("test_bpf: test_range is out of bound.\n");
+			return -EINVAL;
+		}
+
+		if (test_range[1] < test_range[0]) {
+			pr_err("test_bpf: test_range is ending before it starts.\n");
+			return -EINVAL;
+		}
+	}
+
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		if (tests[i].fill_helper &&
 		    tests[i].fill_helper(&tests[i]) < 0)
@@ -4610,6 +5411,11 @@
 	}
 }
 
+static bool exclude_test(int test_id)
+{
+	return test_id < test_range[0] || test_id > test_range[1];
+}
+
 static __init int test_bpf(void)
 {
 	int i, err_cnt = 0, pass_cnt = 0;
@@ -4619,6 +5425,9 @@
 		struct bpf_prog *fp;
 		int err;
 
+		if (exclude_test(i))
+			continue;
+
 		pr_info("#%d %s ", i, tests[i].descr);
 
 		fp = generate_filter(i, &err);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c90777e..8c1ad1c 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -16,10 +16,14 @@
 #include <linux/init.h>
 #include <linux/jhash.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/rhashtable.h>
+#include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #define MAX_ENTRIES	1000000
 #define TEST_INSERT_FAIL INT_MAX
@@ -44,11 +48,21 @@
 module_param(size, int, 0);
 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
 
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
 struct test_obj {
 	int			value;
 	struct rhash_head	node;
 };
 
+struct thread_data {
+	int id;
+	struct task_struct *task;
+	struct test_obj *objs;
+};
+
 static struct test_obj array[MAX_ENTRIES];
 
 static struct rhashtable_params test_rht_params = {
@@ -59,6 +73,9 @@
 	.nulls_base = (3U << RHT_BASE_SHIFT),
 };
 
+static struct semaphore prestart_sem;
+static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+
 static int __init test_rht_lookup(struct rhashtable *ht)
 {
 	unsigned int i;
@@ -87,6 +104,8 @@
 				return -EINVAL;
 			}
 		}
+
+		cond_resched_rcu();
 	}
 
 	return 0;
@@ -160,6 +179,8 @@
 		} else if (err) {
 			return err;
 		}
+
+		cond_resched();
 	}
 
 	if (insert_fails)
@@ -183,6 +204,8 @@
 
 			rhashtable_remove_fast(ht, &obj->node, test_rht_params);
 		}
+
+		cond_resched();
 	}
 
 	end = ktime_get_ns();
@@ -193,10 +216,97 @@
 
 static struct rhashtable ht;
 
+static int thread_lookup_test(struct thread_data *tdata)
+{
+	int i, err = 0;
+
+	for (i = 0; i < entries; i++) {
+		struct test_obj *obj;
+		int key = (tdata->id << 16) | i;
+
+		obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+		if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
+			pr_err("  found unexpected object %d\n", key);
+			err++;
+		} else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
+			pr_err("  object %d not found!\n", key);
+			err++;
+		} else if (obj && (obj->value != key)) {
+			pr_err("  wrong object returned (got %d, expected %d)\n",
+			       obj->value, key);
+			err++;
+		}
+	}
+	return err;
+}
+
+static int threadfunc(void *data)
+{
+	int i, step, err = 0, insert_fails = 0;
+	struct thread_data *tdata = data;
+
+	up(&prestart_sem);
+	if (down_interruptible(&startup_sem))
+		pr_err("  thread[%d]: down_interruptible failed\n", tdata->id);
+
+	for (i = 0; i < entries; i++) {
+		tdata->objs[i].value = (tdata->id << 16) | i;
+		err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
+		                             test_rht_params);
+		if (err == -ENOMEM || err == -EBUSY) {
+			tdata->objs[i].value = TEST_INSERT_FAIL;
+			insert_fails++;
+		} else if (err) {
+			pr_err("  thread[%d]: rhashtable_insert_fast failed\n",
+			       tdata->id);
+			goto out;
+		}
+	}
+	if (insert_fails)
+		pr_info("  thread[%d]: %d insert failures\n",
+		        tdata->id, insert_fails);
+
+	err = thread_lookup_test(tdata);
+	if (err) {
+		pr_err("  thread[%d]: rhashtable_lookup_test failed\n",
+		       tdata->id);
+		goto out;
+	}
+
+	for (step = 10; step > 0; step--) {
+		for (i = 0; i < entries; i += step) {
+			if (tdata->objs[i].value == TEST_INSERT_FAIL)
+				continue;
+			err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+			                             test_rht_params);
+			if (err) {
+				pr_err("  thread[%d]: rhashtable_remove_fast failed\n",
+				       tdata->id);
+				goto out;
+			}
+			tdata->objs[i].value = TEST_INSERT_FAIL;
+		}
+		err = thread_lookup_test(tdata);
+		if (err) {
+			pr_err("  thread[%d]: rhashtable_lookup_test (2) failed\n",
+			       tdata->id);
+			goto out;
+		}
+	}
+out:
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+	}
+	return err;
+}
+
 static int __init test_rht_init(void)
 {
-	int i, err;
+	int i, err, started_threads = 0, failed_threads = 0;
 	u64 total_time = 0;
+	struct thread_data *tdata;
+	struct test_obj *objs;
 
 	entries = min(entries, MAX_ENTRIES);
 
@@ -232,6 +342,57 @@
 	do_div(total_time, runs);
 	pr_info("Average test time: %llu\n", total_time);
 
+	if (!tcount)
+		return 0;
+
+	pr_info("Testing concurrent rhashtable access from %d threads\n",
+	        tcount);
+	sema_init(&prestart_sem, 1 - tcount);
+	tdata = vzalloc(tcount * sizeof(struct thread_data));
+	if (!tdata)
+		return -ENOMEM;
+	objs  = vzalloc(tcount * entries * sizeof(struct test_obj));
+	if (!objs) {
+		vfree(tdata);
+		return -ENOMEM;
+	}
+
+	err = rhashtable_init(&ht, &test_rht_params);
+	if (err < 0) {
+		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+			err);
+		vfree(tdata);
+		vfree(objs);
+		return -EINVAL;
+	}
+	for (i = 0; i < tcount; i++) {
+		tdata[i].id = i;
+		tdata[i].objs = objs + i * entries;
+		tdata[i].task = kthread_run(threadfunc, &tdata[i],
+		                            "rhashtable_thrad[%d]", i);
+		if (IS_ERR(tdata[i].task))
+			pr_err(" kthread_run failed for thread %d\n", i);
+		else
+			started_threads++;
+	}
+	if (down_interruptible(&prestart_sem))
+		pr_err("  down interruptible failed\n");
+	for (i = 0; i < tcount; i++)
+		up(&startup_sem);
+	for (i = 0; i < tcount; i++) {
+		if (IS_ERR(tdata[i].task))
+			continue;
+		if ((err = kthread_stop(tdata[i].task))) {
+			pr_warn("Test failed: thread %d returned: %d\n",
+			        i, err);
+			failed_threads++;
+		}
+	}
+	pr_info("Started %d threads, %d failed\n",
+	        started_threads, failed_threads);
+	rhashtable_destroy(&ht);
+	vfree(tdata);
+	vfree(objs);
 	return 0;
 }
 
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
new file mode 100644
index 0000000..729447a
--- /dev/null
+++ b/lib/test_static_key_base.c
@@ -0,0 +1,68 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ *      Jason Baron       <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_old_true_key);
+struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_inv_old_true_key);
+struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_old_false_key);
+struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_inv_old_false_key);
+
+/* new keys */
+DEFINE_STATIC_KEY_TRUE(base_true_key);
+EXPORT_SYMBOL_GPL(base_true_key);
+DEFINE_STATIC_KEY_TRUE(base_inv_true_key);
+EXPORT_SYMBOL_GPL(base_inv_true_key);
+DEFINE_STATIC_KEY_FALSE(base_false_key);
+EXPORT_SYMBOL_GPL(base_false_key);
+DEFINE_STATIC_KEY_FALSE(base_inv_false_key);
+EXPORT_SYMBOL_GPL(base_inv_false_key);
+
+static void invert_key(struct static_key *key)
+{
+	if (static_key_enabled(key))
+		static_key_disable(key);
+	else
+		static_key_enable(key);
+}
+
+static int __init test_static_key_base_init(void)
+{
+	invert_key(&base_inv_old_true_key);
+	invert_key(&base_inv_old_false_key);
+	invert_key(&base_inv_true_key.key);
+	invert_key(&base_inv_false_key.key);
+
+	return 0;
+}
+
+static void __exit test_static_key_base_exit(void)
+{
+}
+
+module_init(test_static_key_base_init);
+module_exit(test_static_key_base_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
new file mode 100644
index 0000000..c61b299
--- /dev/null
+++ b/lib/test_static_keys.c
@@ -0,0 +1,225 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ *      Jason Baron       <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key old_true_key	= STATIC_KEY_INIT_TRUE;
+struct static_key old_false_key	= STATIC_KEY_INIT_FALSE;
+
+/* new api */
+DEFINE_STATIC_KEY_TRUE(true_key);
+DEFINE_STATIC_KEY_FALSE(false_key);
+
+/* external */
+extern struct static_key base_old_true_key;
+extern struct static_key base_inv_old_true_key;
+extern struct static_key base_old_false_key;
+extern struct static_key base_inv_old_false_key;
+
+/* new api */
+extern struct static_key_true base_true_key;
+extern struct static_key_true base_inv_true_key;
+extern struct static_key_false base_false_key;
+extern struct static_key_false base_inv_false_key;
+
+
+struct test_key {
+	bool			init_state;
+	struct static_key	*key;
+	bool			(*test_key)(void);
+};
+
+#define test_key_func(key, branch) \
+	({bool func(void) { return branch(key); } func;	})
+
+static void invert_key(struct static_key *key)
+{
+	if (static_key_enabled(key))
+		static_key_disable(key);
+	else
+		static_key_enable(key);
+}
+
+static void invert_keys(struct test_key *keys, int size)
+{
+	struct static_key *previous = NULL;
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (previous != keys[i].key) {
+			invert_key(keys[i].key);
+			previous = keys[i].key;
+		}
+	}
+}
+
+static int verify_keys(struct test_key *keys, int size, bool invert)
+{
+	int i;
+	bool ret, init;
+
+	for (i = 0; i < size; i++) {
+		ret = static_key_enabled(keys[i].key);
+		init = keys[i].init_state;
+		if (ret != (invert ? !init : init))
+			return -EINVAL;
+		ret = keys[i].test_key();
+		if (static_key_enabled(keys[i].key)) {
+			if (!ret)
+				return -EINVAL;
+		} else {
+			if (ret)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int __init test_static_key_init(void)
+{
+	int ret;
+	int size;
+
+	struct test_key static_key_tests[] = {
+		/* internal keys - old keys */
+		{
+			.init_state	= true,
+			.key		= &old_true_key,
+			.test_key	= test_key_func(&old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &old_false_key,
+			.test_key	= test_key_func(&old_false_key, static_key_false),
+		},
+		/* internal keys - new keys */
+		{
+			.init_state	= true,
+			.key		= &true_key.key,
+			.test_key	= test_key_func(&true_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &true_key.key,
+			.test_key	= test_key_func(&true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &false_key.key,
+			.test_key	= test_key_func(&false_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &false_key.key,
+			.test_key	= test_key_func(&false_key, static_branch_unlikely),
+		},
+		/* external keys - old keys */
+		{
+			.init_state	= true,
+			.key		= &base_old_true_key,
+			.test_key	= test_key_func(&base_old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_old_true_key,
+			.test_key	= test_key_func(&base_inv_old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_old_false_key,
+			.test_key	= test_key_func(&base_old_false_key, static_key_false),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_old_false_key,
+			.test_key	= test_key_func(&base_inv_old_false_key, static_key_false),
+		},
+		/* external keys - new keys */
+		{
+			.init_state	= true,
+			.key		= &base_true_key.key,
+			.test_key	= test_key_func(&base_true_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_true_key.key,
+			.test_key	= test_key_func(&base_true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_true_key.key,
+			.test_key	= test_key_func(&base_inv_true_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_true_key.key,
+			.test_key	= test_key_func(&base_inv_true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_false_key.key,
+			.test_key	= test_key_func(&base_false_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_false_key.key,
+			.test_key	= test_key_func(&base_false_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_false_key.key,
+			.test_key	= test_key_func(&base_inv_false_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_false_key.key,
+			.test_key	= test_key_func(&base_inv_false_key, static_branch_unlikely),
+		},
+	};
+
+	size = ARRAY_SIZE(static_key_tests);
+
+	ret = verify_keys(static_key_tests, size, false);
+	if (ret)
+		goto out;
+
+	invert_keys(static_key_tests, size);
+	ret = verify_keys(static_key_tests, size, true);
+	if (ret)
+		goto out;
+
+	invert_keys(static_key_tests, size);
+	ret = verify_keys(static_key_tests, size, false);
+	if (ret)
+		goto out;
+	return 0;
+out:
+	return ret;
+}
+
+static void __exit test_static_key_exit(void)
+{
+}
+
+module_init(test_static_key_init);
+module_exit(test_static_key_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/mm/Kconfig b/mm/Kconfig
index e79de2b..d4e6495 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -299,15 +299,9 @@
 # On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
 # have more than 4GB of memory, but we don't currently use the IOTLB to present
 # a 32-bit address to OHCI.  So we need to use a bounce pool instead.
-#
-# We also use the bounce pool to provide stable page writes for jbd.  jbd
-# initiates buffer writeback without locking the page or setting PG_writeback,
-# and fixing that behavior (a second time; jbd2 doesn't have this problem) is
-# a major rework effort.  Instead, use the bounce buffer to snapshot pages
-# (until jbd goes away).  The only jbd user is ext3.
 config NEED_BOUNCE_POOL
 	bool
-	default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD)
+	default y if TILE && USB_OHCI_HCD
 
 config NR_QUICK
 	int
diff --git a/mm/Makefile b/mm/Makefile
index 98c4eae..b424d5e 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -78,3 +78,4 @@
 obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o
 obj-$(CONFIG_PAGE_EXTENSION) += page_ext.o
 obj-$(CONFIG_CMA_DEBUGFS) += cma_debug.o
+obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
diff --git a/mm/dmapool.c b/mm/dmapool.c
index fd5fe43..59d10d1 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -242,7 +242,7 @@
 	return page;
 }
 
-static inline int is_page_busy(struct dma_page *page)
+static inline bool is_page_busy(struct dma_page *page)
 {
 	return page->in_use != 0;
 }
diff --git a/mm/gup.c b/mm/gup.c
index 6297f6b..a798293 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -12,7 +12,9 @@
 #include <linux/sched.h>
 #include <linux/rwsem.h>
 #include <linux/hugetlb.h>
+
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
 #include "internal.h"
 
@@ -32,6 +34,30 @@
 	return NULL;
 }
 
+static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
+		pte_t *pte, unsigned int flags)
+{
+	/* No page to get reference */
+	if (flags & FOLL_GET)
+		return -EFAULT;
+
+	if (flags & FOLL_TOUCH) {
+		pte_t entry = *pte;
+
+		if (flags & FOLL_WRITE)
+			entry = pte_mkdirty(entry);
+		entry = pte_mkyoung(entry);
+
+		if (!pte_same(*pte, entry)) {
+			set_pte_at(vma->vm_mm, address, pte, entry);
+			update_mmu_cache(vma, address, pte);
+		}
+	}
+
+	/* Proper page table entry exists, but no corresponding struct page */
+	return -EEXIST;
+}
+
 static struct page *follow_page_pte(struct vm_area_struct *vma,
 		unsigned long address, pmd_t *pmd, unsigned int flags)
 {
@@ -73,10 +99,21 @@
 
 	page = vm_normal_page(vma, address, pte);
 	if (unlikely(!page)) {
-		if ((flags & FOLL_DUMP) ||
-		    !is_zero_pfn(pte_pfn(pte)))
-			goto bad_page;
-		page = pte_page(pte);
+		if (flags & FOLL_DUMP) {
+			/* Avoid special (like zero) pages in core dumps */
+			page = ERR_PTR(-EFAULT);
+			goto out;
+		}
+
+		if (is_zero_pfn(pte_pfn(pte))) {
+			page = pte_page(pte);
+		} else {
+			int ret;
+
+			ret = follow_pfn_pte(vma, address, ptep, flags);
+			page = ERR_PTR(ret);
+			goto out;
+		}
 	}
 
 	if (flags & FOLL_GET)
@@ -114,12 +151,9 @@
 			unlock_page(page);
 		}
 	}
+out:
 	pte_unmap_unlock(ptep, ptl);
 	return page;
-bad_page:
-	pte_unmap_unlock(ptep, ptl);
-	return ERR_PTR(-EFAULT);
-
 no_page:
 	pte_unmap_unlock(ptep, ptl);
 	if (!pte_none(pte))
@@ -489,9 +523,15 @@
 				goto next_page;
 			}
 			BUG();
-		}
-		if (IS_ERR(page))
+		} else if (PTR_ERR(page) == -EEXIST) {
+			/*
+			 * Proper page table entry exists, but no corresponding
+			 * struct page.
+			 */
+			goto next_page;
+		} else if (IS_ERR(page)) {
 			return i ? i : PTR_ERR(page);
+		}
 		if (pages) {
 			pages[i] = page;
 			flush_anon_page(vma, page, start);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 097c7a4..279a818 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -23,6 +23,7 @@
 #include <linux/pagemap.h>
 #include <linux/migrate.h>
 #include <linux/hashtable.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -716,21 +717,27 @@
 
 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 					struct vm_area_struct *vma,
-					unsigned long haddr, pmd_t *pmd,
-					struct page *page, gfp_t gfp)
+					unsigned long address, pmd_t *pmd,
+					struct page *page, gfp_t gfp,
+					unsigned int flags)
 {
 	struct mem_cgroup *memcg;
 	pgtable_t pgtable;
 	spinlock_t *ptl;
+	unsigned long haddr = address & HPAGE_PMD_MASK;
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
-	if (mem_cgroup_try_charge(page, mm, gfp, &memcg))
-		return VM_FAULT_OOM;
+	if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) {
+		put_page(page);
+		count_vm_event(THP_FAULT_FALLBACK);
+		return VM_FAULT_FALLBACK;
+	}
 
 	pgtable = pte_alloc_one(mm, haddr);
 	if (unlikely(!pgtable)) {
 		mem_cgroup_cancel_charge(page, memcg);
+		put_page(page);
 		return VM_FAULT_OOM;
 	}
 
@@ -750,6 +757,21 @@
 		pte_free(mm, pgtable);
 	} else {
 		pmd_t entry;
+
+		/* Deliver the page fault to userland */
+		if (userfaultfd_missing(vma)) {
+			int ret;
+
+			spin_unlock(ptl);
+			mem_cgroup_cancel_charge(page, memcg);
+			put_page(page);
+			pte_free(mm, pgtable);
+			ret = handle_userfault(vma, address, flags,
+					       VM_UFFD_MISSING);
+			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+			return ret;
+		}
+
 		entry = mk_huge_pmd(page, vma->vm_page_prot);
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 		page_add_new_anon_rmap(page, vma, haddr);
@@ -760,6 +782,7 @@
 		add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
 		atomic_long_inc(&mm->nr_ptes);
 		spin_unlock(ptl);
+		count_vm_event(THP_FAULT_ALLOC);
 	}
 
 	return 0;
@@ -771,19 +794,16 @@
 }
 
 /* Caller must hold page table lock. */
-static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
+static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
 		struct page *zero_page)
 {
 	pmd_t entry;
-	if (!pmd_none(*pmd))
-		return false;
 	entry = mk_pmd(zero_page, vma->vm_page_prot);
 	entry = pmd_mkhuge(entry);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
 	set_pmd_at(mm, haddr, pmd, entry);
 	atomic_long_inc(&mm->nr_ptes);
-	return true;
 }
 
 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -806,6 +826,7 @@
 		pgtable_t pgtable;
 		struct page *zero_page;
 		bool set;
+		int ret;
 		pgtable = pte_alloc_one(mm, haddr);
 		if (unlikely(!pgtable))
 			return VM_FAULT_OOM;
@@ -816,14 +837,28 @@
 			return VM_FAULT_FALLBACK;
 		}
 		ptl = pmd_lock(mm, pmd);
-		set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
-				zero_page);
-		spin_unlock(ptl);
+		ret = 0;
+		set = false;
+		if (pmd_none(*pmd)) {
+			if (userfaultfd_missing(vma)) {
+				spin_unlock(ptl);
+				ret = handle_userfault(vma, address, flags,
+						       VM_UFFD_MISSING);
+				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
+			} else {
+				set_huge_zero_page(pgtable, mm, vma,
+						   haddr, pmd,
+						   zero_page);
+				spin_unlock(ptl);
+				set = true;
+			}
+		} else
+			spin_unlock(ptl);
 		if (!set) {
 			pte_free(mm, pgtable);
 			put_huge_zero_page();
 		}
-		return 0;
+		return ret;
 	}
 	gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
@@ -831,14 +866,8 @@
 		count_vm_event(THP_FAULT_FALLBACK);
 		return VM_FAULT_FALLBACK;
 	}
-	if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) {
-		put_page(page);
-		count_vm_event(THP_FAULT_FALLBACK);
-		return VM_FAULT_FALLBACK;
-	}
-
-	count_vm_event(THP_FAULT_ALLOC);
-	return 0;
+	return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
+					    flags);
 }
 
 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -873,16 +902,14 @@
 	 */
 	if (is_huge_zero_pmd(pmd)) {
 		struct page *zero_page;
-		bool set;
 		/*
 		 * get_huge_zero_page() will never allocate a new page here,
 		 * since we already have a zero page to copy. It just takes a
 		 * reference.
 		 */
 		zero_page = get_huge_zero_page();
-		set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
+		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
 				zero_page);
-		BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
 		ret = 0;
 		goto out_unlock;
 	}
@@ -2133,7 +2160,8 @@
 	     _pte++, address += PAGE_SIZE) {
 		pte_t pteval = *_pte;
 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
-			if (++none_or_zero <= khugepaged_max_ptes_none)
+			if (!userfaultfd_armed(vma) &&
+			    ++none_or_zero <= khugepaged_max_ptes_none)
 				continue;
 			else
 				goto out;
@@ -2586,7 +2614,8 @@
 	     _pte++, _address += PAGE_SIZE) {
 		pte_t pteval = *_pte;
 		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
-			if (++none_or_zero <= khugepaged_max_ptes_none)
+			if (!userfaultfd_armed(vma) &&
+			    ++none_or_zero <= khugepaged_max_ptes_none)
 				continue;
 			else
 				goto out_unmap;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a8c3087..51ae41d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -616,7 +616,7 @@
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_reserves(struct vm_area_struct *vma, long chg)
+static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
 {
 	if (vma->vm_flags & VM_NORESERVE) {
 		/*
@@ -629,23 +629,23 @@
 		 * properly, so add work-around here.
 		 */
 		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
-			return 1;
+			return true;
 		else
-			return 0;
+			return false;
 	}
 
 	/* Shared mappings always use reserves */
 	if (vma->vm_flags & VM_MAYSHARE)
-		return 1;
+		return true;
 
 	/*
 	 * Only the process that called mmap() has reserves for
 	 * private mappings.
 	 */
 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 static void enqueue_huge_page(struct hstate *h, struct page *page)
@@ -3779,7 +3779,7 @@
 	return saddr;
 }
 
-static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
 {
 	unsigned long base = addr & PUD_MASK;
 	unsigned long end = base + PUD_SIZE;
@@ -3789,8 +3789,8 @@
 	 */
 	if (vma->vm_flags & VM_MAYSHARE &&
 	    vma->vm_start <= base && end <= vma->vm_end)
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 /*
diff --git a/mm/internal.h b/mm/internal.h
index 36b23f1..1195dd2 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -426,4 +426,19 @@
 #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
 #define ALLOC_FAIR		0x100 /* fair zone allocation */
 
+enum ttu_flags;
+struct tlbflush_unmap_batch;
+
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+void try_to_unmap_flush(void);
+void try_to_unmap_flush_dirty(void);
+#else
+static inline void try_to_unmap_flush(void)
+{
+}
+static inline void try_to_unmap_flush_dirty(void)
+{
+}
+
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/maccess.c b/mm/maccess.c
index d53adf9..34fe247 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -60,3 +60,44 @@
 	return ret ? -EFAULT : 0;
 }
 EXPORT_SYMBOL_GPL(probe_kernel_write);
+
+/**
+ * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
+ * @dst:   Destination address, in kernel space.  This buffer must be at
+ *         least @count bytes long.
+ * @src:   Unsafe address.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from unsafe address to kernel buffer.
+ *
+ * On success, returns the length of the string INCLUDING the trailing NUL.
+ *
+ * If access fails, returns -EFAULT (some data may have been copied
+ * and the trailing NUL added).
+ *
+ * If @count is smaller than the length of the string, copies @count-1 bytes,
+ * sets the last byte of @dst buffer to NUL and returns @count.
+ */
+long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+{
+	mm_segment_t old_fs = get_fs();
+	const void *src = unsafe_addr;
+	long ret;
+
+	if (unlikely(count <= 0))
+		return 0;
+
+	set_fs(KERNEL_DS);
+	pagefault_disable();
+
+	do {
+		ret = __copy_from_user_inatomic(dst++,
+						(const void __user __force *)src++, 1);
+	} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
+
+	dst[-1] = '\0';
+	pagefault_enable();
+	set_fs(old_fs);
+
+	return ret < 0 ? ret : src - unsafe_addr;
+}
diff --git a/mm/madvise.c b/mm/madvise.c
index 64bb8a2..ce3a422 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -103,7 +103,8 @@
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
-				vma->vm_file, pgoff, vma_policy(vma));
+			  vma->vm_file, pgoff, vma_policy(vma),
+			  vma->vm_userfaultfd_ctx);
 	if (*prev) {
 		vma = *prev;
 		goto success;
@@ -385,7 +386,7 @@
 	}
 }
 
-static int
+static bool
 madvise_behavior_valid(int behavior)
 {
 	switch (behavior) {
@@ -407,10 +408,10 @@
 #endif
 	case MADV_DONTDUMP:
 	case MADV_DODUMP:
-		return 1;
+		return true;
 
 	default:
-		return 0;
+		return false;
 	}
 }
 
diff --git a/mm/memblock.c b/mm/memblock.c
index 87108e7..95ce68c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -566,6 +566,9 @@
 		 * area, insert that portion.
 		 */
 		if (rbase > base) {
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+			WARN_ON(nid != memblock_get_region_node(rgn));
+#endif
 			nr_new++;
 			if (insert)
 				memblock_insert_region(type, i++, base,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index acb93c5..1af0575 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5965,7 +5965,13 @@
 	if (!mem_cgroup_is_root(memcg))
 		page_counter_uncharge(&memcg->memory, 1);
 
-	/* Caller disabled preemption with mapping->tree_lock */
+	/*
+	 * Interrupts should be disabled here because the caller holds the
+	 * mapping->tree_lock lock which is taken with interrupts-off. It is
+	 * important here to have the interrupts disabled because it is the
+	 * only synchronisation we have for udpating the per-CPU variables.
+	 */
+	VM_BUG_ON(!irqs_disabled());
 	mem_cgroup_charge_statistics(memcg, page, -1);
 	memcg_check_events(memcg, page);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 388dcf9..bb04d8f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -61,6 +61,7 @@
 #include <linux/string.h>
 #include <linux/dma-debug.h>
 #include <linux/debugfs.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -180,22 +181,22 @@
 
 #ifdef HAVE_GENERIC_MMU_GATHER
 
-static int tlb_next_batch(struct mmu_gather *tlb)
+static bool tlb_next_batch(struct mmu_gather *tlb)
 {
 	struct mmu_gather_batch *batch;
 
 	batch = tlb->active;
 	if (batch->next) {
 		tlb->active = batch->next;
-		return 1;
+		return true;
 	}
 
 	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
-		return 0;
+		return false;
 
 	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 	if (!batch)
-		return 0;
+		return false;
 
 	tlb->batch_count++;
 	batch->next = NULL;
@@ -205,7 +206,7 @@
 	tlb->active->next = batch;
 	tlb->active = batch;
 
-	return 1;
+	return true;
 }
 
 /* tlb_gather_mmu
@@ -2685,6 +2686,12 @@
 		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 		if (!pte_none(*page_table))
 			goto unlock;
+		/* Deliver the page fault to userland, check inside PT lock */
+		if (userfaultfd_missing(vma)) {
+			pte_unmap_unlock(page_table, ptl);
+			return handle_userfault(vma, address, flags,
+						VM_UFFD_MISSING);
+		}
 		goto setpte;
 	}
 
@@ -2713,6 +2720,15 @@
 	if (!pte_none(*page_table))
 		goto release;
 
+	/* Deliver the page fault to userland, check inside PT lock */
+	if (userfaultfd_missing(vma)) {
+		pte_unmap_unlock(page_table, ptl);
+		mem_cgroup_cancel_charge(page, memcg);
+		page_cache_release(page);
+		return handle_userfault(vma, address, flags,
+					VM_UFFD_MISSING);
+	}
+
 	inc_mm_counter_fast(mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, address);
 	mem_cgroup_commit_charge(page, memcg, false);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6da82bc..8fd97da 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1248,6 +1248,14 @@
 
 	mem_hotplug_begin();
 
+	/*
+	 * Add new range to memblock so that when hotadd_new_pgdat() is called
+	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
+	 * this new range and calculate total pages correctly.  The range will
+	 * be removed at hot-remove time.
+	 */
+	memblock_add_node(start, size, nid);
+
 	new_node = !node_online(nid);
 	if (new_node) {
 		pgdat = hotadd_new_pgdat(nid, start);
@@ -1277,7 +1285,6 @@
 
 	/* create new memmap entry */
 	firmware_map_add_hotplug(start, start + size, "System RAM");
-	memblock_add_node(start, size, nid);
 
 	goto out;
 
@@ -1286,6 +1293,7 @@
 	if (new_pgdat)
 		rollback_node_hotadd(nid, pgdat);
 	release_memory_resource(res);
+	memblock_remove(start, size);
 
 out:
 	mem_hotplug_done();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 99d4c1d..a7f1e0d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -722,8 +722,8 @@
 		pgoff = vma->vm_pgoff +
 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-				  vma->anon_vma, vma->vm_file, pgoff,
-				  new_pol);
+				 vma->anon_vma, vma->vm_file, pgoff,
+				 new_pol, vma->vm_userfaultfd_ctx);
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
diff --git a/mm/migrate.c b/mm/migrate.c
index eb42671..5c08cab 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1226,7 +1226,9 @@
 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
 			goto set_status;
 
-		page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
+		/* FOLL_DUMP to ignore special (like zero) pages */
+		page = follow_page(vma, pp->addr,
+				FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
@@ -1236,10 +1238,6 @@
 		if (!page)
 			goto set_status;
 
-		/* Use PageReserved to check for zero page */
-		if (PageReserved(page))
-			goto put_and_set;
-
 		pp->page = page;
 		err = page_to_nid(page);
 
@@ -1396,18 +1394,14 @@
 		if (!vma || addr < vma->vm_start)
 			goto set_status;
 
-		page = follow_page(vma, addr, 0);
+		/* FOLL_DUMP to ignore special (like zero) pages */
+		page = follow_page(vma, addr, FOLL_DUMP);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
 			goto set_status;
 
-		err = -ENOENT;
-		/* Use PageReserved to check for zero page */
-		if (!page || PageReserved(page))
-			goto set_status;
-
-		err = page_to_nid(page);
+		err = page ? page_to_nid(page) : -ENOENT;
 set_status:
 		*status = err;
 
diff --git a/mm/mlock.c b/mm/mlock.c
index 6fd2cf1..25936680 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -510,7 +510,8 @@
 
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
-			  vma->vm_file, pgoff, vma_policy(vma));
+			  vma->vm_file, pgoff, vma_policy(vma),
+			  vma->vm_userfaultfd_ctx);
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index f126923..82db4fc 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -41,6 +41,7 @@
 #include <linux/notifier.h>
 #include <linux/memory.h>
 #include <linux/printk.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -919,7 +920,8 @@
  * per-vma resources, so we don't attempt to merge those.
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
-			struct file *file, unsigned long vm_flags)
+				struct file *file, unsigned long vm_flags,
+				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -935,6 +937,8 @@
 		return 0;
 	if (vma->vm_ops && vma->vm_ops->close)
 		return 0;
+	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
+		return 0;
 	return 1;
 }
 
@@ -965,9 +969,11 @@
  */
 static int
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
-	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+		     struct anon_vma *anon_vma, struct file *file,
+		     pgoff_t vm_pgoff,
+		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
-	if (is_mergeable_vma(vma, file, vm_flags) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -984,9 +990,11 @@
  */
 static int
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
-	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
+		    struct anon_vma *anon_vma, struct file *file,
+		    pgoff_t vm_pgoff,
+		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
-	if (is_mergeable_vma(vma, file, vm_flags) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = vma_pages(vma);
@@ -1029,7 +1037,8 @@
 			struct vm_area_struct *prev, unsigned long addr,
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
-			pgoff_t pgoff, struct mempolicy *policy)
+			pgoff_t pgoff, struct mempolicy *policy,
+			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1056,14 +1065,17 @@
 	if (prev && prev->vm_end == addr &&
 			mpol_equal(vma_policy(prev), policy) &&
 			can_vma_merge_after(prev, vm_flags,
-						anon_vma, file, pgoff)) {
+					    anon_vma, file, pgoff,
+					    vm_userfaultfd_ctx)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
 		if (next && end == next->vm_start &&
 				mpol_equal(policy, vma_policy(next)) &&
 				can_vma_merge_before(next, vm_flags,
-					anon_vma, file, pgoff+pglen) &&
+						     anon_vma, file,
+						     pgoff+pglen,
+						     vm_userfaultfd_ctx) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1084,7 +1096,8 @@
 	if (next && end == next->vm_start &&
 			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
-					anon_vma, file, pgoff+pglen)) {
+					     anon_vma, file, pgoff+pglen,
+					     vm_userfaultfd_ctx)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = vma_adjust(prev, prev->vm_start,
 				addr, prev->vm_pgoff, NULL);
@@ -1570,8 +1583,8 @@
 	/*
 	 * Can we just expand an old mapping?
 	 */
-	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
-			NULL);
+	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
+			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
 	if (vma)
 		goto out;
 
@@ -2757,7 +2770,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-					NULL, NULL, pgoff, NULL);
+			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
 	if (vma)
 		goto out;
 
@@ -2913,7 +2926,8 @@
 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
-			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+			    vma->vm_userfaultfd_ctx);
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e7d6f11..ef5be8e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -292,7 +292,8 @@
 	 */
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
-			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
+			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
+			   vma->vm_userfaultfd_ctx);
 	if (*pprev) {
 		vma = *pprev;
 		goto success;
diff --git a/mm/mremap.c b/mm/mremap.c
index a7c93ec..5a71cce 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -276,6 +276,12 @@
 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
 				     need_rmap_locks);
 	if (moved_len < old_len) {
+		err = -ENOMEM;
+	} else if (vma->vm_ops && vma->vm_ops->mremap) {
+		err = vma->vm_ops->mremap(new_vma);
+	}
+
+	if (unlikely(err)) {
 		/*
 		 * On error, move entries back from new area to old,
 		 * which will succeed since page tables still there,
@@ -286,16 +292,8 @@
 		vma = new_vma;
 		old_len = new_len;
 		old_addr = new_addr;
-		new_addr = -ENOMEM;
+		new_addr = err;
 	} else {
-		if (vma->vm_file && vma->vm_file->f_op->mremap) {
-			err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
-			if (err < 0) {
-				move_page_tables(new_vma, new_addr, vma,
-						 old_addr, moved_len, true);
-				return err;
-			}
-		}
 		arch_remap(mm, old_addr, old_addr + old_len,
 			   new_addr, new_addr + new_len);
 	}
@@ -348,6 +346,7 @@
 {
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma = find_vma(mm, addr);
+	unsigned long pgoff;
 
 	if (!vma || vma->vm_start > addr)
 		return ERR_PTR(-EFAULT);
@@ -359,17 +358,17 @@
 	if (old_len > vma->vm_end - addr)
 		return ERR_PTR(-EFAULT);
 
-	/* Need to be careful about a growing mapping */
-	if (new_len > old_len) {
-		unsigned long pgoff;
+	if (new_len == old_len)
+		return vma;
 
-		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
-			return ERR_PTR(-EFAULT);
-		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
-		pgoff += vma->vm_pgoff;
-		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
-			return ERR_PTR(-EINVAL);
-	}
+	/* Need to be careful about a growing mapping */
+	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+	pgoff += vma->vm_pgoff;
+	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
+		return ERR_PTR(-EINVAL);
+
+	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
+		return ERR_PTR(-EFAULT);
 
 	if (vma->vm_flags & VM_LOCKED) {
 		unsigned long locked, lock_limit;
@@ -408,13 +407,8 @@
 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
 		goto out;
 
-	/* Check if the location we're moving into overlaps the
-	 * old location at all, and fail if it does.
-	 */
-	if ((new_addr <= addr) && (new_addr+new_len) > addr)
-		goto out;
-
-	if ((addr <= new_addr) && (addr+old_len) > new_addr)
+	/* Ensure the old/new locations do not overlap */
+	if (addr + old_len > new_addr && new_addr + new_len > addr)
 		goto out;
 
 	ret = do_munmap(mm, new_addr, new_len);
@@ -580,8 +574,10 @@
 		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
 	}
 out:
-	if (ret & ~PAGE_MASK)
+	if (ret & ~PAGE_MASK) {
 		vm_unacct_memory(charged);
+		locked = 0;
+	}
 	up_write(&current->mm->mmap_sem);
 	if (locked && new_len > old_len)
 		mm_populate(new_addr + old_len, new_len - old_len);
diff --git a/mm/rmap.c b/mm/rmap.c
index 171b687..0db38e7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -62,6 +62,8 @@
 
 #include <asm/tlbflush.h>
 
+#include <trace/events/tlb.h>
+
 #include "internal.h"
 
 static struct kmem_cache *anon_vma_cachep;
@@ -583,6 +585,107 @@
 	return address;
 }
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+static void percpu_flush_tlb_batch_pages(void *data)
+{
+	/*
+	 * All TLB entries are flushed on the assumption that it is
+	 * cheaper to flush all TLBs and let them be refilled than
+	 * flushing individual PFNs. Note that we do not track mm's
+	 * to flush as that might simply be multiple full TLB flushes
+	 * for no gain.
+	 */
+	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+	flush_tlb_local();
+}
+
+/*
+ * Flush TLB entries for recently unmapped pages from remote CPUs. It is
+ * important if a PTE was dirty when it was unmapped that it's flushed
+ * before any IO is initiated on the page to prevent lost writes. Similarly,
+ * it must be flushed before freeing to prevent data leakage.
+ */
+void try_to_unmap_flush(void)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+	int cpu;
+
+	if (!tlb_ubc->flush_required)
+		return;
+
+	cpu = get_cpu();
+
+	trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
+
+	if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
+		percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
+
+	if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
+		smp_call_function_many(&tlb_ubc->cpumask,
+			percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
+	}
+	cpumask_clear(&tlb_ubc->cpumask);
+	tlb_ubc->flush_required = false;
+	tlb_ubc->writable = false;
+	put_cpu();
+}
+
+/* Flush iff there are potentially writable TLB entries that can race with IO */
+void try_to_unmap_flush_dirty(void)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+
+	if (tlb_ubc->writable)
+		try_to_unmap_flush();
+}
+
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+		struct page *page, bool writable)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+
+	cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
+	tlb_ubc->flush_required = true;
+
+	/*
+	 * If the PTE was dirty then it's best to assume it's writable. The
+	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+	 * before the page is queued for IO.
+	 */
+	if (writable)
+		tlb_ubc->writable = true;
+}
+
+/*
+ * Returns true if the TLB flush should be deferred to the end of a batch of
+ * unmap operations to reduce IPIs.
+ */
+static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+{
+	bool should_defer = false;
+
+	if (!(flags & TTU_BATCH_FLUSH))
+		return false;
+
+	/* If remote CPUs need to be flushed then defer batch the flush */
+	if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
+		should_defer = true;
+	put_cpu();
+
+	return should_defer;
+}
+#else
+static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
+		struct page *page, bool writable)
+{
+}
+
+static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
+{
+	return false;
+}
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
 /*
  * At what user virtual address is page expected in vma?
  * Caller should check the page is actually part of the vma.
@@ -1220,7 +1323,20 @@
 
 	/* Nuke the page table entry. */
 	flush_cache_page(vma, address, page_to_pfn(page));
-	pteval = ptep_clear_flush(vma, address, pte);
+	if (should_defer_flush(mm, flags)) {
+		/*
+		 * We clear the PTE but do not flush so potentially a remote
+		 * CPU could still be writing to the page. If the entry was
+		 * previously clean then the architecture must guarantee that
+		 * a clear->dirty transition on a cached TLB entry is written
+		 * through and traps if the PTE is unmapped.
+		 */
+		pteval = ptep_get_and_clear(mm, address, pte);
+
+		set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
+	} else {
+		pteval = ptep_clear_flush(vma, address, pte);
+	}
 
 	/* Move the dirty bit to the physical page now the pte is gone. */
 	if (pte_dirty(pteval))
diff --git a/mm/slab.c b/mm/slab.c
index bbd0b47..60c9369 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3416,6 +3416,19 @@
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+	__kmem_cache_free_bulk(s, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+								void **p)
+{
+	return __kmem_cache_alloc_bulk(s, flags, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+
 #ifdef CONFIG_TRACING
 void *
 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
diff --git a/mm/slab.h b/mm/slab.h
index 8da63e4..a3a967d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -163,6 +163,15 @@
 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
 		       size_t count, loff_t *ppos);
 
+/*
+ * Generic implementation of bulk operations
+ * These are useful for situations in which the allocator cannot
+ * perform optimizations. In that case segments of the objecct listed
+ * may be allocated or freed using these operations.
+ */
+void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
+bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+
 #ifdef CONFIG_MEMCG_KMEM
 /*
  * Iterate over all memcg caches of the given root cache. The caller must hold
@@ -321,7 +330,7 @@
 		return cachep;
 
 	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
-	       __func__, cachep->name, s->name);
+	       __func__, s->name, cachep->name);
 	WARN_ON_ONCE(1);
 	return s;
 }
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8683110..c26829f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -104,6 +104,29 @@
 }
 #endif
 
+void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
+{
+	size_t i;
+
+	for (i = 0; i < nr; i++)
+		kmem_cache_free(s, p[i]);
+}
+
+bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
+								void **p)
+{
+	size_t i;
+
+	for (i = 0; i < nr; i++) {
+		void *x = p[i] = kmem_cache_alloc(s, flags);
+		if (!x) {
+			__kmem_cache_free_bulk(s, i, p);
+			return false;
+		}
+	}
+	return true;
+}
+
 #ifdef CONFIG_MEMCG_KMEM
 void slab_init_memcg_params(struct kmem_cache *s)
 {
diff --git a/mm/slob.c b/mm/slob.c
index 4765f65..165bbd3 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -611,6 +611,19 @@
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+	__kmem_cache_free_bulk(s, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+								void **p)
+{
+	return __kmem_cache_alloc_bulk(s, flags, size, p);
+}
+EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+
 int __kmem_cache_shutdown(struct kmem_cache *c)
 {
 	/* No way to check for remaining objects */
diff --git a/mm/slub.c b/mm/slub.c
index f68c0e5..084184e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1306,6 +1306,17 @@
 	kasan_slab_free(s, x);
 }
 
+static void setup_object(struct kmem_cache *s, struct page *page,
+				void *object)
+{
+	setup_object_debug(s, page, object);
+	if (unlikely(s->ctor)) {
+		kasan_unpoison_object_data(s, object);
+		s->ctor(object);
+		kasan_poison_object_data(s, object);
+	}
+}
+
 /*
  * Slab allocation and freeing
  */
@@ -1336,6 +1347,8 @@
 	struct page *page;
 	struct kmem_cache_order_objects oo = s->oo;
 	gfp_t alloc_gfp;
+	void *start, *p;
+	int idx, order;
 
 	flags &= gfp_allowed_mask;
 
@@ -1349,6 +1362,8 @@
 	 * so we fall-back to the minimum order allocation.
 	 */
 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+	if ((alloc_gfp & __GFP_WAIT) && oo_order(oo) > oo_order(s->min))
+		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_WAIT;
 
 	page = alloc_slab_page(s, alloc_gfp, node, oo);
 	if (unlikely(!page)) {
@@ -1359,13 +1374,13 @@
 		 * Try a lower order alloc if possible
 		 */
 		page = alloc_slab_page(s, alloc_gfp, node, oo);
-
-		if (page)
-			stat(s, ORDER_FALLBACK);
+		if (unlikely(!page))
+			goto out;
+		stat(s, ORDER_FALLBACK);
 	}
 
-	if (kmemcheck_enabled && page
-		&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
+	if (kmemcheck_enabled &&
+	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
 		int pages = 1 << oo_order(oo);
 
 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
@@ -1380,51 +1395,9 @@
 			kmemcheck_mark_unallocated_pages(page, pages);
 	}
 
-	if (flags & __GFP_WAIT)
-		local_irq_disable();
-	if (!page)
-		return NULL;
-
 	page->objects = oo_objects(oo);
-	mod_zone_page_state(page_zone(page),
-		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
-		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-		1 << oo_order(oo));
-
-	return page;
-}
-
-static void setup_object(struct kmem_cache *s, struct page *page,
-				void *object)
-{
-	setup_object_debug(s, page, object);
-	if (unlikely(s->ctor)) {
-		kasan_unpoison_object_data(s, object);
-		s->ctor(object);
-		kasan_poison_object_data(s, object);
-	}
-}
-
-static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-{
-	struct page *page;
-	void *start;
-	void *p;
-	int order;
-	int idx;
-
-	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-		BUG();
-	}
-
-	page = allocate_slab(s,
-		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-	if (!page)
-		goto out;
 
 	order = compound_order(page);
-	inc_slabs_node(s, page_to_nid(page), page->objects);
 	page->slab_cache = s;
 	__SetPageSlab(page);
 	if (page_is_pfmemalloc(page))
@@ -1448,10 +1421,34 @@
 	page->freelist = start;
 	page->inuse = page->objects;
 	page->frozen = 1;
+
 out:
+	if (flags & __GFP_WAIT)
+		local_irq_disable();
+	if (!page)
+		return NULL;
+
+	mod_zone_page_state(page_zone(page),
+		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
+		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
+		1 << oo_order(oo));
+
+	inc_slabs_node(s, page_to_nid(page), page->objects);
+
 	return page;
 }
 
+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+{
+	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
+		pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
+		BUG();
+	}
+
+	return allocate_slab(s,
+		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
+}
+
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
 	int order = compound_order(page);
@@ -2712,7 +2709,7 @@
 	 * Determine the currently cpus per cpu slab.
 	 * The cpu may change afterward. However that does not matter since
 	 * data is retrieved via this pointer. If we are on the same cpu
-	 * during the cmpxchg then the free will succedd.
+	 * during the cmpxchg then the free will succeed.
 	 */
 	do {
 		tid = this_cpu_read(s->cpu_slab->tid);
@@ -2750,6 +2747,113 @@
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
+/* Note that interrupts must be enabled when calling this function. */
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
+{
+	struct kmem_cache_cpu *c;
+	struct page *page;
+	int i;
+
+	local_irq_disable();
+	c = this_cpu_ptr(s->cpu_slab);
+
+	for (i = 0; i < size; i++) {
+		void *object = p[i];
+
+		BUG_ON(!object);
+		/* kmem cache debug support */
+		s = cache_from_obj(s, object);
+		if (unlikely(!s))
+			goto exit;
+		slab_free_hook(s, object);
+
+		page = virt_to_head_page(object);
+
+		if (c->page == page) {
+			/* Fastpath: local CPU free */
+			set_freepointer(s, object, c->freelist);
+			c->freelist = object;
+		} else {
+			c->tid = next_tid(c->tid);
+			local_irq_enable();
+			/* Slowpath: overhead locked cmpxchg_double_slab */
+			__slab_free(s, page, object, _RET_IP_);
+			local_irq_disable();
+			c = this_cpu_ptr(s->cpu_slab);
+		}
+	}
+exit:
+	c->tid = next_tid(c->tid);
+	local_irq_enable();
+}
+EXPORT_SYMBOL(kmem_cache_free_bulk);
+
+/* Note that interrupts must be enabled when calling this function. */
+bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+			   void **p)
+{
+	struct kmem_cache_cpu *c;
+	int i;
+
+	/*
+	 * Drain objects in the per cpu slab, while disabling local
+	 * IRQs, which protects against PREEMPT and interrupts
+	 * handlers invoking normal fastpath.
+	 */
+	local_irq_disable();
+	c = this_cpu_ptr(s->cpu_slab);
+
+	for (i = 0; i < size; i++) {
+		void *object = c->freelist;
+
+		if (unlikely(!object)) {
+			local_irq_enable();
+			/*
+			 * Invoking slow path likely have side-effect
+			 * of re-populating per CPU c->freelist
+			 */
+			p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
+					    _RET_IP_, c);
+			if (unlikely(!p[i])) {
+				__kmem_cache_free_bulk(s, i, p);
+				return false;
+			}
+			local_irq_disable();
+			c = this_cpu_ptr(s->cpu_slab);
+			continue; /* goto for-loop */
+		}
+
+		/* kmem_cache debug support */
+		s = slab_pre_alloc_hook(s, flags);
+		if (unlikely(!s)) {
+			__kmem_cache_free_bulk(s, i, p);
+			c->tid = next_tid(c->tid);
+			local_irq_enable();
+			return false;
+		}
+
+		c->freelist = get_freepointer(s, object);
+		p[i] = object;
+
+		/* kmem_cache debug support */
+		slab_post_alloc_hook(s, flags, object);
+	}
+	c->tid = next_tid(c->tid);
+	local_irq_enable();
+
+	/* Clear memory outside IRQ disabled fastpath loop */
+	if (unlikely(flags & __GFP_ZERO)) {
+		int j;
+
+		for (j = 0; j < i; j++)
+			memset(p[j], 0, s->object_size);
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+
+
 /*
  * Object placement in a slab is made very easy because we always start at
  * offset 0. If we tune the size of the object to the alignment then we can
@@ -5181,7 +5285,7 @@
 	s->kobj.kset = cache_kset(s);
 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
 	if (err)
-		goto out_put_kobj;
+		goto out;
 
 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
 	if (err)
@@ -5208,8 +5312,6 @@
 	return err;
 out_del_kobj:
 	kobject_del(&s->kobj);
-out_put_kobj:
-	kobject_put(&s->kobj);
 	goto out;
 }
 
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
new file mode 100644
index 0000000..77fee93
--- /dev/null
+++ b/mm/userfaultfd.c
@@ -0,0 +1,308 @@
+/*
+ *  mm/userfaultfd.c
+ *
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/mmu_notifier.h>
+#include <asm/tlbflush.h>
+#include "internal.h"
+
+static int mcopy_atomic_pte(struct mm_struct *dst_mm,
+			    pmd_t *dst_pmd,
+			    struct vm_area_struct *dst_vma,
+			    unsigned long dst_addr,
+			    unsigned long src_addr,
+			    struct page **pagep)
+{
+	struct mem_cgroup *memcg;
+	pte_t _dst_pte, *dst_pte;
+	spinlock_t *ptl;
+	void *page_kaddr;
+	int ret;
+	struct page *page;
+
+	if (!*pagep) {
+		ret = -ENOMEM;
+		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
+		if (!page)
+			goto out;
+
+		page_kaddr = kmap_atomic(page);
+		ret = copy_from_user(page_kaddr,
+				     (const void __user *) src_addr,
+				     PAGE_SIZE);
+		kunmap_atomic(page_kaddr);
+
+		/* fallback to copy_from_user outside mmap_sem */
+		if (unlikely(ret)) {
+			ret = -EFAULT;
+			*pagep = page;
+			/* don't free the page */
+			goto out;
+		}
+	} else {
+		page = *pagep;
+		*pagep = NULL;
+	}
+
+	/*
+	 * The memory barrier inside __SetPageUptodate makes sure that
+	 * preceeding stores to the page contents become visible before
+	 * the set_pte_at() write.
+	 */
+	__SetPageUptodate(page);
+
+	ret = -ENOMEM;
+	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg))
+		goto out_release;
+
+	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
+	if (dst_vma->vm_flags & VM_WRITE)
+		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+
+	ret = -EEXIST;
+	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+	if (!pte_none(*dst_pte))
+		goto out_release_uncharge_unlock;
+
+	inc_mm_counter(dst_mm, MM_ANONPAGES);
+	page_add_new_anon_rmap(page, dst_vma, dst_addr);
+	mem_cgroup_commit_charge(page, memcg, false);
+	lru_cache_add_active_or_unevictable(page, dst_vma);
+
+	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+
+	/* No need to invalidate - it was non-present before */
+	update_mmu_cache(dst_vma, dst_addr, dst_pte);
+
+	pte_unmap_unlock(dst_pte, ptl);
+	ret = 0;
+out:
+	return ret;
+out_release_uncharge_unlock:
+	pte_unmap_unlock(dst_pte, ptl);
+	mem_cgroup_cancel_charge(page, memcg);
+out_release:
+	page_cache_release(page);
+	goto out;
+}
+
+static int mfill_zeropage_pte(struct mm_struct *dst_mm,
+			      pmd_t *dst_pmd,
+			      struct vm_area_struct *dst_vma,
+			      unsigned long dst_addr)
+{
+	pte_t _dst_pte, *dst_pte;
+	spinlock_t *ptl;
+	int ret;
+
+	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
+					 dst_vma->vm_page_prot));
+	ret = -EEXIST;
+	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+	if (!pte_none(*dst_pte))
+		goto out_unlock;
+	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+	/* No need to invalidate - it was non-present before */
+	update_mmu_cache(dst_vma, dst_addr, dst_pte);
+	ret = 0;
+out_unlock:
+	pte_unmap_unlock(dst_pte, ptl);
+	return ret;
+}
+
+static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd = NULL;
+
+	pgd = pgd_offset(mm, address);
+	pud = pud_alloc(mm, pgd, address);
+	if (pud)
+		/*
+		 * Note that we didn't run this because the pmd was
+		 * missing, the *pmd may be already established and in
+		 * turn it may also be a trans_huge_pmd.
+		 */
+		pmd = pmd_alloc(mm, pud, address);
+	return pmd;
+}
+
+static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
+					      unsigned long dst_start,
+					      unsigned long src_start,
+					      unsigned long len,
+					      bool zeropage)
+{
+	struct vm_area_struct *dst_vma;
+	ssize_t err;
+	pmd_t *dst_pmd;
+	unsigned long src_addr, dst_addr;
+	long copied;
+	struct page *page;
+
+	/*
+	 * Sanitize the command parameters:
+	 */
+	BUG_ON(dst_start & ~PAGE_MASK);
+	BUG_ON(len & ~PAGE_MASK);
+
+	/* Does the address range wrap, or is the span zero-sized? */
+	BUG_ON(src_start + len <= src_start);
+	BUG_ON(dst_start + len <= dst_start);
+
+	src_addr = src_start;
+	dst_addr = dst_start;
+	copied = 0;
+	page = NULL;
+retry:
+	down_read(&dst_mm->mmap_sem);
+
+	/*
+	 * Make sure the vma is not shared, that the dst range is
+	 * both valid and fully within a single existing vma.
+	 */
+	err = -EINVAL;
+	dst_vma = find_vma(dst_mm, dst_start);
+	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+		goto out_unlock;
+	if (dst_start < dst_vma->vm_start ||
+	    dst_start + len > dst_vma->vm_end)
+		goto out_unlock;
+
+	/*
+	 * Be strict and only allow __mcopy_atomic on userfaultfd
+	 * registered ranges to prevent userland errors going
+	 * unnoticed. As far as the VM consistency is concerned, it
+	 * would be perfectly safe to remove this check, but there's
+	 * no useful usage for __mcopy_atomic ouside of userfaultfd
+	 * registered ranges. This is after all why these are ioctls
+	 * belonging to the userfaultfd and not syscalls.
+	 */
+	if (!dst_vma->vm_userfaultfd_ctx.ctx)
+		goto out_unlock;
+
+	/*
+	 * FIXME: only allow copying on anonymous vmas, tmpfs should
+	 * be added.
+	 */
+	if (dst_vma->vm_ops)
+		goto out_unlock;
+
+	/*
+	 * Ensure the dst_vma has a anon_vma or this page
+	 * would get a NULL anon_vma when moved in the
+	 * dst_vma.
+	 */
+	err = -ENOMEM;
+	if (unlikely(anon_vma_prepare(dst_vma)))
+		goto out_unlock;
+
+	while (src_addr < src_start + len) {
+		pmd_t dst_pmdval;
+
+		BUG_ON(dst_addr >= dst_start + len);
+
+		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
+		if (unlikely(!dst_pmd)) {
+			err = -ENOMEM;
+			break;
+		}
+
+		dst_pmdval = pmd_read_atomic(dst_pmd);
+		/*
+		 * If the dst_pmd is mapped as THP don't
+		 * override it and just be strict.
+		 */
+		if (unlikely(pmd_trans_huge(dst_pmdval))) {
+			err = -EEXIST;
+			break;
+		}
+		if (unlikely(pmd_none(dst_pmdval)) &&
+		    unlikely(__pte_alloc(dst_mm, dst_vma, dst_pmd,
+					 dst_addr))) {
+			err = -ENOMEM;
+			break;
+		}
+		/* If an huge pmd materialized from under us fail */
+		if (unlikely(pmd_trans_huge(*dst_pmd))) {
+			err = -EFAULT;
+			break;
+		}
+
+		BUG_ON(pmd_none(*dst_pmd));
+		BUG_ON(pmd_trans_huge(*dst_pmd));
+
+		if (!zeropage)
+			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
+					       dst_addr, src_addr, &page);
+		else
+			err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma,
+						 dst_addr);
+
+		cond_resched();
+
+		if (unlikely(err == -EFAULT)) {
+			void *page_kaddr;
+
+			up_read(&dst_mm->mmap_sem);
+			BUG_ON(!page);
+
+			page_kaddr = kmap(page);
+			err = copy_from_user(page_kaddr,
+					     (const void __user *) src_addr,
+					     PAGE_SIZE);
+			kunmap(page);
+			if (unlikely(err)) {
+				err = -EFAULT;
+				goto out;
+			}
+			goto retry;
+		} else
+			BUG_ON(page);
+
+		if (!err) {
+			dst_addr += PAGE_SIZE;
+			src_addr += PAGE_SIZE;
+			copied += PAGE_SIZE;
+
+			if (fatal_signal_pending(current))
+				err = -EINTR;
+		}
+		if (err)
+			break;
+	}
+
+out_unlock:
+	up_read(&dst_mm->mmap_sem);
+out:
+	if (page)
+		page_cache_release(page);
+	BUG_ON(copied < 0);
+	BUG_ON(err > 0);
+	BUG_ON(!copied && !err);
+	return copied ? copied : err;
+}
+
+ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
+		     unsigned long src_start, unsigned long len)
+{
+	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
+}
+
+ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
+		       unsigned long len)
+{
+	return __mcopy_atomic(dst_mm, start, 0, len, true);
+}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8286938..b113903 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1057,7 +1057,8 @@
 		 * processes. Try to unmap it here.
 		 */
 		if (page_mapped(page) && mapping) {
-			switch (try_to_unmap(page, ttu_flags)) {
+			switch (try_to_unmap(page,
+					ttu_flags|TTU_BATCH_FLUSH)) {
 			case SWAP_FAIL:
 				goto activate_locked;
 			case SWAP_AGAIN:
@@ -1097,7 +1098,12 @@
 			if (!sc->may_writepage)
 				goto keep_locked;
 
-			/* Page is dirty, try to write it out here */
+			/*
+			 * Page is dirty. Flush the TLB if a writable entry
+			 * potentially exists to avoid CPU writes after IO
+			 * starts and then write it out here.
+			 */
+			try_to_unmap_flush_dirty();
 			switch (pageout(page, mapping, sc)) {
 			case PAGE_KEEP:
 				goto keep_locked;
@@ -1208,6 +1214,7 @@
 	}
 
 	mem_cgroup_uncharge_list(&free_pages);
+	try_to_unmap_flush();
 	free_hot_cold_page_list(&free_pages, true);
 
 	list_splice(&ret_pages, page_list);
@@ -2151,6 +2158,23 @@
 	}
 }
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+static void init_tlb_ubc(void)
+{
+	/*
+	 * This deliberately does not clear the cpumask as it's expensive
+	 * and unnecessary. If there happens to be data in there then the
+	 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
+	 * then will be cleared.
+	 */
+	current->tlb_ubc.flush_required = false;
+}
+#else
+static inline void init_tlb_ubc(void)
+{
+}
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2185,6 +2209,8 @@
 	scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
 			 sc->priority == DEF_PRIORITY);
 
+	init_tlb_ubc();
+
 	blk_start_plug(&plug);
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 					nr[LRU_INACTIVE_FILE]) {
diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile
index eb8baa7..c6ffc55 100644
--- a/net/6lowpan/Makefile
+++ b/net/6lowpan/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_6LOWPAN) += 6lowpan.o
 
-6lowpan-y := iphc.o nhc.o
+6lowpan-y := core.o iphc.o nhc.o
 
 #rfc6282 nhcs
 obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
new file mode 100644
index 0000000..ae1896f
--- /dev/null
+++ b/net/6lowpan/core.c
@@ -0,0 +1,40 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ */
+
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
+{
+	lowpan_priv(dev)->lltype = lltype;
+}
+EXPORT_SYMBOL(lowpan_netdev_setup);
+
+static int __init lowpan_module_init(void)
+{
+	request_module_nowait("ipv6");
+
+	request_module_nowait("nhc_dest");
+	request_module_nowait("nhc_fragment");
+	request_module_nowait("nhc_hop");
+	request_module_nowait("nhc_ipv6");
+	request_module_nowait("nhc_mobility");
+	request_module_nowait("nhc_routing");
+	request_module_nowait("nhc_udp");
+
+	return 0;
+}
+module_init(lowpan_module_init);
+
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 94a375c..1e0071f 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -48,7 +48,6 @@
 
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
-#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
@@ -284,7 +283,7 @@
 		if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
 			return -EINVAL;
 
-		hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+		hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
 		memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
 		skb_pull(skb, 2);
 		break;
@@ -610,19 +609,3 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
-
-static int __init lowpan_module_init(void)
-{
-	request_module_nowait("nhc_dest");
-	request_module_nowait("nhc_fragment");
-	request_module_nowait("nhc_hop");
-	request_module_nowait("nhc_ipv6");
-	request_module_nowait("nhc_mobility");
-	request_module_nowait("nhc_routing");
-	request_module_nowait("nhc_udp");
-
-	return 0;
-}
-module_init(lowpan_module_init);
-
-MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 01d7ba8..fded865 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -791,10 +791,9 @@
 {
 	ether_setup(dev);
 
-	dev->priv_flags		|= IFF_802_1Q_VLAN;
+	dev->priv_flags		|= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
 	dev->priv_flags		&= ~IFF_TX_SKB_SHARING;
 	netif_keep_dst(dev);
-	dev->tx_queue_len	= 0;
 
 	dev->netdev_ops		= &vlan_netdev_ops;
 	dev->destructor		= vlan_dev_free;
diff --git a/net/Kconfig b/net/Kconfig
index 57a7c5a..7021c1b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -374,6 +374,13 @@
 source "net/ceph/Kconfig"
 source "net/nfc/Kconfig"
 
+config LWTUNNEL
+	bool "Network light weight tunnels"
+	---help---
+	  This feature provides an infrastructure to support light weight
+	  tunnels like mpls. There is no netdevice associated with a light
+	  weight tunnel endpoint. Tunnel encapsulation parameters are stored
+	  with light weight tunnel state associated with fib routes.
 
 endif   # if NET
 
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index cc78538..aa0047c 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -802,13 +802,10 @@
 			   (brdev->payload == p_bridged) ? "bridged" : "routed",
 			   brvcc->copies_failed, brvcc->copies_needed);
 #ifdef CONFIG_ATM_BR2684_IPFILTER
-#define b1(var, byte)	((u8 *) &brvcc->filter.var)[byte]
-#define bs(var)		b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
 		if (brvcc->filter.netmask != 0)
-			seq_printf(seq, "    filter=%d.%d.%d.%d/"
-				   "%d.%d.%d.%d\n", bs(prefix), bs(netmask));
-#undef bs
-#undef b1
+			seq_printf(seq, "    filter=%pI4/%pI4\n",
+				   &brvcc->filter.prefix,
+				   &brvcc->filter.netmask);
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
 	}
 	return 0;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 753383c..912d9c3 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -77,8 +77,7 @@
  * @lq_index: index to store the value at
  * @value: value to store in the ring buffer
  */
-static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
-				   uint8_t value)
+static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
 {
 	lq_recv[*lq_index] = value;
 	*lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
@@ -91,12 +90,12 @@
  *
  * Returns computed average value.
  */
-static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
+static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 {
-	const uint8_t *ptr;
-	uint16_t count = 0;
-	uint16_t i = 0;
-	uint16_t sum = 0;
+	const u8 *ptr;
+	u16 count = 0;
+	u16 i = 0;
+	u16 sum = 0;
 
 	ptr = lq_recv;
 
@@ -113,7 +112,7 @@
 	if (count == 0)
 		return 0;
 
-	return (uint8_t)(sum / count);
+	return (u8)(sum / count);
 }
 
 /**
@@ -155,14 +154,14 @@
 	kfree(orig_node->bat_iv.bcast_own);
 	orig_node->bat_iv.bcast_own = data_ptr;
 
-	data_ptr = kmalloc_array(max_if_num, sizeof(uint8_t), GFP_ATOMIC);
+	data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
 	if (!data_ptr) {
 		kfree(orig_node->bat_iv.bcast_own);
 		goto unlock;
 	}
 
 	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
-	       (max_if_num - 1) * sizeof(uint8_t));
+	       (max_if_num - 1) * sizeof(u8));
 	kfree(orig_node->bat_iv.bcast_own_sum);
 	orig_node->bat_iv.bcast_own_sum = data_ptr;
 
@@ -215,19 +214,19 @@
 	if (max_if_num == 0)
 		goto free_own_sum;
 
-	data_ptr = kmalloc_array(max_if_num, sizeof(uint8_t), GFP_ATOMIC);
+	data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
 	if (!data_ptr) {
 		kfree(orig_node->bat_iv.bcast_own);
 		goto unlock;
 	}
 
 	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
-	       del_if_num * sizeof(uint8_t));
+	       del_if_num * sizeof(u8));
 
-	if_offset = (del_if_num + 1) * sizeof(uint8_t);
-	memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+	if_offset = (del_if_num + 1) * sizeof(u8);
+	memcpy((char *)data_ptr + del_if_num * sizeof(u8),
 	       orig_node->bat_iv.bcast_own_sum + if_offset,
-	       (max_if_num - del_if_num) * sizeof(uint8_t));
+	       (max_if_num - del_if_num) * sizeof(u8));
 
 free_own_sum:
 	kfree(orig_node->bat_iv.bcast_own_sum);
@@ -250,7 +249,7 @@
  * If the object does not exists it is created an initialised.
  */
 static struct batadv_orig_node *
-batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
 {
 	struct batadv_orig_node *orig_node;
 	int size, hash_added;
@@ -270,7 +269,7 @@
 	if (!orig_node->bat_iv.bcast_own)
 		goto free_orig_node;
 
-	size = bat_priv->num_ifaces * sizeof(uint8_t);
+	size = bat_priv->num_ifaces * sizeof(u8);
 	orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
 	if (!orig_node->bat_iv.bcast_own_sum)
 		goto free_orig_node;
@@ -293,43 +292,17 @@
 
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
-			const uint8_t *neigh_addr,
+			const u8 *neigh_addr,
 			struct batadv_orig_node *orig_node,
 			struct batadv_orig_node *orig_neigh)
 {
-	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-	struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
+	struct batadv_neigh_node *neigh_node;
 
-	neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
+	neigh_node = batadv_neigh_node_new(orig_node, hard_iface, neigh_addr);
 	if (!neigh_node)
 		goto out;
 
-	if (!atomic_inc_not_zero(&hard_iface->refcount)) {
-		kfree(neigh_node);
-		neigh_node = NULL;
-		goto out;
-	}
-
 	neigh_node->orig_node = orig_neigh;
-	neigh_node->if_incoming = hard_iface;
-
-	spin_lock_bh(&orig_node->neigh_list_lock);
-	tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
-					       neigh_addr);
-	if (!tmp_neigh_node) {
-		hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
-	} else {
-		kfree(neigh_node);
-		batadv_hardif_free_ref(hard_iface);
-		neigh_node = tmp_neigh_node;
-	}
-	spin_unlock_bh(&orig_node->neigh_list_lock);
-
-	if (!tmp_neigh_node)
-		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
-			   neigh_addr, orig_node->orig,
-			   hard_iface->net_dev->name);
 
 out:
 	return neigh_node;
@@ -339,7 +312,7 @@
 {
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	unsigned char *ogm_buff;
-	uint32_t random_seqno;
+	u32 random_seqno;
 
 	/* randomize initial seqno to avoid collision */
 	get_random_bytes(&random_seqno, sizeof(random_seqno));
@@ -411,8 +384,7 @@
 }
 
 /* apply hop penalty for a normal link */
-static uint8_t batadv_hop_penalty(uint8_t tq,
-				  const struct batadv_priv *bat_priv)
+static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
 {
 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
 	int new_tq;
@@ -442,11 +414,11 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	const char *fwd_str;
-	uint8_t packet_num;
-	int16_t buff_pos;
+	u8 packet_num;
+	s16 buff_pos;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct sk_buff *skb;
-	uint8_t *packet_pos;
+	u8 *packet_pos;
 
 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
 		return;
@@ -837,7 +809,7 @@
 				  struct batadv_hard_iface *if_outgoing)
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	uint16_t tvlv_len;
+	u16 tvlv_len;
 
 	if (batadv_ogm_packet->ttl <= 1) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -896,9 +868,9 @@
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
 	unsigned long *word;
-	uint32_t i;
+	u32 i;
 	size_t word_index;
-	uint8_t *w;
+	u8 *w;
 	int if_num;
 
 	for (i = 0; i < hash->size; i++) {
@@ -927,8 +899,8 @@
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *primary_if, *tmp_hard_iface;
 	int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
-	uint32_t seqno;
-	uint16_t tvlv_len = 0;
+	u32 seqno;
+	u16 tvlv_len = 0;
 	unsigned long send_time;
 
 	primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -947,7 +919,7 @@
 	batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
 	/* change sequence number to network order */
-	seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
+	seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno);
 	batadv_ogm_packet->seqno = htonl(seqno);
 	atomic_inc(&hard_iface->bat_iv.ogm_seqno);
 
@@ -970,7 +942,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
 		if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
-				continue;
+			continue;
 		batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
 					*ogm_buff_len, hard_iface,
 					tmp_hard_iface, 1, send_time);
@@ -1006,13 +978,14 @@
 {
 	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
-	struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+	struct batadv_neigh_node *neigh_node = NULL;
+	struct batadv_neigh_node *tmp_neigh_node = NULL;
 	struct batadv_neigh_node *router = NULL;
 	struct batadv_orig_node *orig_node_tmp;
 	int if_num;
-	uint8_t sum_orig, sum_neigh;
-	uint8_t *neigh_addr;
-	uint8_t tq_avg;
+	u8 sum_orig, sum_neigh;
+	u8 *neigh_addr;
+	u8 tq_avg;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "update_originator(): Searching and updating originator entry of received packet\n");
@@ -1164,8 +1137,8 @@
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
-	uint8_t total_count;
-	uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+	u8 total_count;
+	u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
 	unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
 	int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
 	unsigned int combined_tq;
@@ -1311,13 +1284,13 @@
 	struct batadv_neigh_node *neigh_node;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
 	int is_dup;
-	int32_t seq_diff;
+	s32 seq_diff;
 	int need_update = 0;
 	int set_mark;
 	enum batadv_dup_status ret = BATADV_NO_DUP;
-	uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
-	uint8_t *neigh_addr;
-	uint8_t packet_count;
+	u32 seqno = ntohl(batadv_ogm_packet->seqno);
+	u8 *neigh_addr;
+	u8 packet_count;
 	unsigned long *bitmap;
 
 	orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
@@ -1406,7 +1379,8 @@
 				struct batadv_hard_iface *if_outgoing)
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct batadv_neigh_node *router = NULL, *router_router = NULL;
+	struct batadv_neigh_node *router = NULL;
+	struct batadv_neigh_node *router_router = NULL;
 	struct batadv_orig_node *orig_neigh_node;
 	struct batadv_orig_ifinfo *orig_ifinfo;
 	struct batadv_neigh_node *orig_neigh_router = NULL;
@@ -1418,7 +1392,7 @@
 	bool sameseq, similar_ttl;
 	struct sk_buff *skb_priv;
 	struct ethhdr *ethhdr;
-	uint8_t *prev_sender;
+	u8 *prev_sender;
 	int is_bidirect;
 
 	/* create a private copy of the skb, as some functions change tq value
@@ -1600,7 +1574,7 @@
 	struct batadv_orig_node *orig_neigh_node, *orig_node;
 	struct batadv_hard_iface *hard_iface;
 	struct batadv_ogm_packet *ogm_packet;
-	uint32_t if_incoming_seqno;
+	u32 if_incoming_seqno;
 	bool has_directlink_flag;
 	struct ethhdr *ethhdr;
 	bool is_my_oldorig = false;
@@ -1673,9 +1647,9 @@
 	if (is_my_orig) {
 		unsigned long *word;
 		int offset;
-		int32_t bit_pos;
-		int16_t if_num;
-		uint8_t *weight;
+		s32 bit_pos;
+		s16 if_num;
+		u8 *weight;
 
 		orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
 							 ethhdr->h_source);
@@ -1751,7 +1725,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_ogm_packet *ogm_packet;
-	uint8_t *packet_pos;
+	u8 *packet_pos;
 	int ogm_offset;
 	bool ret;
 
@@ -1835,7 +1809,7 @@
 	unsigned long last_seen_jiffies;
 	struct hlist_head *head;
 	int batman_count = 0;
-	uint32_t i;
+	u32 i;
 
 	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
 		   "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
@@ -1903,7 +1877,7 @@
 				   struct batadv_hard_iface *if_outgoing2)
 {
 	struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
-	uint8_t tq1, tq2;
+	u8 tq1, tq2;
 	int diff;
 
 	neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
@@ -1945,7 +1919,7 @@
 			   struct batadv_hard_iface *if_outgoing2)
 {
 	struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
-	uint8_t tq1, tq2;
+	u8 tq1, tq2;
 	bool ret;
 
 	neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index cf68c32..25cbc36 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -21,7 +21,7 @@
 #include <linux/bitmap.h>
 
 /* shift the packet array by n places. */
-static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
+static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
 {
 	if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
 		return;
@@ -35,8 +35,8 @@
  *  1 if the window was moved (either new or very old)
  *  0 if the window was not moved/shifted.
  */
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
-			  int32_t seq_num_diff, int set_mark)
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
+			  int set_mark)
 {
 	struct batadv_priv *bat_priv = priv;
 
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 0c24562..0226b22 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -28,9 +28,9 @@
  * and curr_seqno is within range of last_seqno. Otherwise returns 0.
  */
 static inline int batadv_test_bit(const unsigned long *seq_bits,
-				  uint32_t last_seqno, uint32_t curr_seqno)
+				  u32 last_seqno, u32 curr_seqno)
 {
-	int32_t diff;
+	s32 diff;
 
 	diff = last_seqno - curr_seqno;
 	if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
@@ -39,7 +39,7 @@
 }
 
 /* turn corresponding bit on, so we can remember that we got the packet */
-static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
+static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
 {
 	/* if too old, just drop it */
 	if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
@@ -51,7 +51,7 @@
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old
  */
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
-			  int32_t seq_num_diff, int set_mark);
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
+			  int set_mark);
 
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ba06092..191a702 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -51,7 +51,7 @@
 #include "packet.h"
 #include "translation-table.h"
 
-static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
 
 static void batadv_bla_periodic_work(struct work_struct *work);
 static void
@@ -59,10 +59,10 @@
 			 struct batadv_bla_backbone_gw *backbone_gw);
 
 /* return the index of the claim */
-static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
+static inline u32 batadv_choose_claim(const void *data, u32 size)
 {
 	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
 	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
@@ -71,11 +71,10 @@
 }
 
 /* return the index of the backbone gateway */
-static inline uint32_t batadv_choose_backbone_gw(const void *data,
-						 uint32_t size)
+static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 {
 	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
 	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
@@ -89,7 +88,8 @@
 {
 	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
 					 hash_entry);
-	const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
+	const struct batadv_bla_backbone_gw *gw1 = data1;
+	const struct batadv_bla_backbone_gw *gw2 = data2;
 
 	if (!batadv_compare_eth(gw1->orig, gw2->orig))
 		return 0;
@@ -106,7 +106,8 @@
 {
 	const void *data1 = container_of(node, struct batadv_bla_claim,
 					 hash_entry);
-	const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
+	const struct batadv_bla_claim *cl1 = data1;
+	const struct batadv_bla_claim *cl2 = data2;
 
 	if (!batadv_compare_eth(cl1->addr, cl2->addr))
 		return 0;
@@ -192,8 +193,8 @@
  * Returns claim if found or NULL otherwise.
  */
 static struct batadv_bla_backbone_gw *
-batadv_backbone_hash_find(struct batadv_priv *bat_priv,
-			  uint8_t *addr, unsigned short vid)
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+			  unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
 	struct hlist_head *head;
@@ -269,14 +270,14 @@
  * @vid: the VLAN ID
  * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
  */
-static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
 				  unsigned short vid, int claimtype)
 {
 	struct sk_buff *skb;
 	struct ethhdr *ethhdr;
 	struct batadv_hard_iface *primary_if;
 	struct net_device *soft_iface;
-	uint8_t *hw_src;
+	u8 *hw_src;
 	struct batadv_bla_claim_dst local_claim_dest;
 	__be32 zeroip = 0;
 
@@ -304,13 +305,13 @@
 			  * with XX   = claim type
 			  * and YY:YY = group id
 			  */
-			 (uint8_t *)&local_claim_dest);
+			 (u8 *)&local_claim_dest);
 
 	if (!skb)
 		goto out;
 
 	ethhdr = (struct ethhdr *)skb->data;
-	hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
 
 	/* now we pretend that the client would have sent this ... */
 	switch (claimtype) {
@@ -383,7 +384,7 @@
  * be found.
  */
 static struct batadv_bla_backbone_gw *
-batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 			   unsigned short vid, bool own_backbone)
 {
 	struct batadv_bla_backbone_gw *entry;
@@ -552,7 +553,7 @@
 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 				     struct batadv_bla_backbone_gw *backbone_gw)
 {
-	uint8_t mac[ETH_ALEN];
+	u8 mac[ETH_ALEN];
 	__be16 crc;
 
 	memcpy(mac, batadv_announce_mac, 4);
@@ -571,7 +572,7 @@
  * @backbone_gw: the backbone gateway which claims it
  */
 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
-				 const uint8_t *mac, const unsigned short vid,
+				 const u8 *mac, const unsigned short vid,
 				 struct batadv_bla_backbone_gw *backbone_gw)
 {
 	struct batadv_bla_claim *claim;
@@ -635,7 +636,7 @@
  * given mac address and vid.
  */
 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
-				 const uint8_t *mac, const unsigned short vid)
+				 const u8 *mac, const unsigned short vid)
 {
 	struct batadv_bla_claim search_claim, *claim;
 
@@ -659,12 +660,11 @@
 }
 
 /* check for ANNOUNCE frame, return 1 if handled */
-static int batadv_handle_announce(struct batadv_priv *bat_priv,
-				  uint8_t *an_addr, uint8_t *backbone_addr,
-				  unsigned short vid)
+static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+				  u8 *backbone_addr, unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
-	uint16_t crc;
+	u16 crc;
 
 	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
 		return 0;
@@ -708,8 +708,8 @@
 /* check for REQUEST frame, return 1 if handled */
 static int batadv_handle_request(struct batadv_priv *bat_priv,
 				 struct batadv_hard_iface *primary_if,
-				 uint8_t *backbone_addr,
-				 struct ethhdr *ethhdr, unsigned short vid)
+				 u8 *backbone_addr, struct ethhdr *ethhdr,
+				 unsigned short vid)
 {
 	/* check for REQUEST frame */
 	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
@@ -732,8 +732,8 @@
 /* check for UNCLAIM frame, return 1 if handled */
 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
 				 struct batadv_hard_iface *primary_if,
-				 uint8_t *backbone_addr,
-				 uint8_t *claim_addr, unsigned short vid)
+				 u8 *backbone_addr, u8 *claim_addr,
+				 unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -761,7 +761,7 @@
 /* check for CLAIM frame, return 1 if handled */
 static int batadv_handle_claim(struct batadv_priv *bat_priv,
 			       struct batadv_hard_iface *primary_if,
-			       uint8_t *backbone_addr, uint8_t *claim_addr,
+			       u8 *backbone_addr, u8 *claim_addr,
 			       unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
@@ -805,10 +805,10 @@
  */
 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 				    struct batadv_hard_iface *primary_if,
-				    uint8_t *hw_src, uint8_t *hw_dst,
+				    u8 *hw_src, u8 *hw_dst,
 				    struct ethhdr *ethhdr)
 {
-	uint8_t *backbone_addr;
+	u8 *backbone_addr;
 	struct batadv_orig_node *orig_node;
 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 
@@ -877,7 +877,7 @@
 				    struct sk_buff *skb)
 {
 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
-	uint8_t *hw_src, *hw_dst;
+	u8 *hw_src, *hw_dst;
 	struct vlan_hdr *vhdr, vhdr_buf;
 	struct ethhdr *ethhdr;
 	struct arphdr *arphdr;
@@ -923,7 +923,7 @@
 
 	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
 	ethhdr = eth_hdr(skb);
-	arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
 
 	/* Check whether the ARP frame carries a valid
 	 * IP information
@@ -937,7 +937,7 @@
 	if (arphdr->ar_pln != 4)
 		return 0;
 
-	hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
 	hw_dst = hw_src + ETH_ALEN + 4;
 	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
 	bla_dst_own = &bat_priv->bla.claim_dest;
@@ -1238,9 +1238,9 @@
 int batadv_bla_init(struct batadv_priv *bat_priv)
 {
 	int i;
-	uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
 	struct batadv_hard_iface *primary_if;
-	uint16_t crc;
+	u16 crc;
 	unsigned long entrytime;
 
 	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
@@ -1368,7 +1368,7 @@
  *
  * Returns true if orig is a backbone for this vid, false otherwise.
  */
-bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 				    unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@ -1647,9 +1647,9 @@
 	struct batadv_bla_claim *claim;
 	struct batadv_hard_iface *primary_if;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 	bool is_own;
-	uint8_t *primary_addr;
+	u8 *primary_addr;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -1692,9 +1692,9 @@
 	struct batadv_hard_iface *primary_if;
 	struct hlist_head *head;
 	int secs, msecs;
-	uint32_t i;
+	u32 i;
 	bool is_own;
-	uint8_t *primary_addr;
+	u8 *primary_addr;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 0282690..025152b 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -22,9 +22,6 @@
 
 #include <linux/types.h>
 
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
 struct seq_file;
 struct sk_buff;
 
@@ -38,7 +35,7 @@
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
 					     void *offset);
-bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 				    unsigned short vid);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb);
@@ -84,8 +81,7 @@
 }
 
 static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
-						  uint8_t *orig,
-						  unsigned short vid)
+						  u8 *orig, unsigned short vid)
 {
 	return false;
 }
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 187acdc..80ab8d6 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -22,7 +22,6 @@
 
 #include <linux/kconfig.h>
 
-struct batadv_hard_iface;
 struct net_device;
 
 #define BATADV_DEBUGFS_SUBDIR "batman_adv"
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6d0b471..83bc1aa 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -101,7 +102,7 @@
 	struct batadv_dat_entry *dat_entry;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!bat_priv->dat.hash)
 		return;
@@ -167,11 +168,11 @@
  *
  * Returns the value of the hw_src field in the ARP packet.
  */
-static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
+static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 {
-	uint8_t *addr;
+	u8 *addr;
 
-	addr = (uint8_t *)(skb->data + hdr_size);
+	addr = (u8 *)(skb->data + hdr_size);
 	addr += ETH_HLEN + sizeof(struct arphdr);
 
 	return addr;
@@ -196,7 +197,7 @@
  *
  * Returns the value of the hw_dst field in the ARP packet.
  */
-static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
+static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 {
 	return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
 }
@@ -220,12 +221,12 @@
  *
  * Returns the selected index in the hash table for the given data.
  */
-static uint32_t batadv_hash_dat(const void *data, uint32_t size)
+static u32 batadv_hash_dat(const void *data, u32 size)
 {
-	uint32_t hash = 0;
+	u32 hash = 0;
 	const struct batadv_dat_entry *dat = data;
 	const unsigned char *key;
-	uint32_t i;
+	u32 i;
 
 	key = (const unsigned char *)&dat->ip;
 	for (i = 0; i < sizeof(dat->ip); i++) {
@@ -264,7 +265,7 @@
 	struct hlist_head *head;
 	struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
 	struct batadv_hashtable *hash = bat_priv->dat.hash;
-	uint32_t index;
+	u32 index;
 
 	if (!hash)
 		return NULL;
@@ -299,7 +300,7 @@
  * @vid: VLAN identifier
  */
 static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
-				 uint8_t *mac_addr, unsigned short vid)
+				 u8 *mac_addr, unsigned short vid)
 {
 	struct batadv_dat_entry *dat_entry;
 	int hash_added;
@@ -356,11 +357,11 @@
  * @msg: message to print together with the debugging information
  */
 static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
-			   uint16_t type, int hdr_size, char *msg)
+			   u16 type, int hdr_size, char *msg)
 {
 	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
 	struct batadv_bcast_packet *bcast_pkt;
-	uint8_t *orig_addr;
+	u8 *orig_addr;
 	__be32 ip_src, ip_dst;
 
 	if (msg)
@@ -423,7 +424,7 @@
 #else
 
 static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
-			   uint16_t type, int hdr_size, char *msg)
+			   u16 type, int hdr_size, char *msg)
 {
 }
 
@@ -453,7 +454,7 @@
 	int j;
 
 	/* check if orig node candidate is running DAT */
-	if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+	if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
 		goto out;
 
 	/* Check if this node has already been selected... */
@@ -496,7 +497,8 @@
 					 int select, batadv_dat_addr_t ip_key,
 					 batadv_dat_addr_t *last_max)
 {
-	batadv_dat_addr_t max = 0, tmp_max = 0;
+	batadv_dat_addr_t max = 0;
+	batadv_dat_addr_t tmp_max = 0;
 	struct batadv_orig_node *orig_node, *max_orig_node = NULL;
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
@@ -708,14 +710,13 @@
  */
 static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					   struct batadv_orig_node *orig,
-					   uint8_t flags,
-					   void *tvlv_value,
-					   uint16_t tvlv_value_len)
+					   u8 flags,
+					   void *tvlv_value, u16 tvlv_value_len)
 {
 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+		clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
 	else
-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+		set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
 }
 
 /**
@@ -786,7 +787,7 @@
 	struct hlist_head *head;
 	unsigned long last_seen_jiffies;
 	int last_seen_msecs, last_seen_secs, last_seen_mins;
-	uint32_t i;
+	u32 i;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -829,14 +830,14 @@
  *
  * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise.
  */
-static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
-				    struct sk_buff *skb, int hdr_size)
+static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
+			       struct sk_buff *skb, int hdr_size)
 {
 	struct arphdr *arphdr;
 	struct ethhdr *ethhdr;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src, *hw_dst;
-	uint16_t type = 0;
+	u8 *hw_src, *hw_dst;
+	u16 type = 0;
 
 	/* pull the ethernet header */
 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
@@ -933,9 +934,9 @@
 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 					   struct sk_buff *skb)
 {
-	uint16_t type = 0;
+	u16 type = 0;
 	__be32 ip_dst, ip_src;
-	uint8_t *hw_src;
+	u8 *hw_src;
 	bool ret = false;
 	struct batadv_dat_entry *dat_entry = NULL;
 	struct sk_buff *skb_new;
@@ -1021,9 +1022,9 @@
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
 					   struct sk_buff *skb, int hdr_size)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src;
+	u8 *hw_src;
 	struct sk_buff *skb_new;
 	struct batadv_dat_entry *dat_entry = NULL;
 	bool ret = false;
@@ -1099,9 +1100,9 @@
 void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 					 struct sk_buff *skb)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src, *hw_dst;
+	u8 *hw_src, *hw_dst;
 	int hdr_size = 0;
 	unsigned short vid;
 
@@ -1145,9 +1146,9 @@
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
 					 struct sk_buff *skb, int hdr_size)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src, *hw_dst;
+	u8 *hw_src, *hw_dst;
 	bool dropped = false;
 	unsigned short vid;
 
@@ -1201,7 +1202,7 @@
 bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
 				      struct batadv_forw_packet *forw_packet)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_dst;
 	struct batadv_dat_entry *dat_entry = NULL;
 	bool ret = false;
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 3181507..26d4a52 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -54,7 +54,7 @@
 static inline void
 batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
 {
-	uint32_t addr;
+	u32 addr;
 
 	addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX);
 	orig_node->dat_addr = (batadv_dat_addr_t)addr;
@@ -69,7 +69,7 @@
 batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
 			 struct batadv_hard_iface *primary_if)
 {
-	uint32_t addr;
+	u32 addr;
 
 	addr = batadv_choose_orig(primary_if->net_dev->dev_addr,
 				  BATADV_DAT_ADDR_MAX);
@@ -89,7 +89,7 @@
  * Updates the ethtool statistics for the received packet if it is a DAT subtype
  */
 static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
-					  uint8_t subtype)
+					  u8 subtype)
 {
 	switch (subtype) {
 	case BATADV_P_DAT_DHT_GET:
@@ -169,7 +169,7 @@
 }
 
 static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
-					  uint8_t subtype)
+					  u8 subtype)
 {
 }
 
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index c0f0d01..700c96c 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -25,6 +25,7 @@
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/pkt_sched.h>
 #include <linux/skbuff.h>
@@ -66,7 +67,7 @@
 			    bool (*check_cb)(struct batadv_frag_table_entry *))
 {
 	struct batadv_frag_table_entry *chain;
-	uint8_t i;
+	u8 i;
 
 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
 		chain = &orig_node->fragments[i];
@@ -110,8 +111,10 @@
  * without searching for the right position.
  */
 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
-				   uint16_t seqno)
+				   u16 seqno)
 {
+	lockdep_assert_held(&chain->lock);
+
 	if (chain->seqno == seqno)
 		return false;
 
@@ -145,8 +148,8 @@
 	struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
 	struct batadv_frag_list_entry *frag_entry_last = NULL;
 	struct batadv_frag_packet *frag_packet;
-	uint8_t bucket;
-	uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+	u8 bucket;
+	u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
 	bool ret = false;
 
 	/* Linearize packet to avoid linearizing 16 packets in a row when doing
@@ -351,7 +354,7 @@
 	struct batadv_orig_node *orig_node_dst = NULL;
 	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_frag_packet *packet;
-	uint16_t total_size;
+	u16 total_size;
 	bool ret = false;
 
 	packet = (struct batadv_frag_packet *)skb->data;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index cffa92d..e6c8382 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -27,7 +27,6 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
-#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
@@ -153,20 +152,14 @@
 	struct batadv_neigh_node *router;
 	struct batadv_neigh_ifinfo *router_ifinfo;
 	struct batadv_gw_node *gw_node, *curr_gw = NULL;
-	uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
-	uint32_t gw_divisor;
-	uint8_t max_tq = 0;
-	uint8_t tq_avg;
+	u64 max_gw_factor = 0;
+	u64 tmp_gw_factor = 0;
+	u8 max_tq = 0;
+	u8 tq_avg;
 	struct batadv_orig_node *orig_node;
 
-	gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
-	gw_divisor *= 64;
-
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-		if (gw_node->deleted)
-			continue;
-
 		orig_node = gw_node->orig_node;
 		router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
 		if (!router)
@@ -187,7 +180,7 @@
 			tmp_gw_factor = tq_avg * tq_avg;
 			tmp_gw_factor *= gw_node->bandwidth_down;
 			tmp_gw_factor *= 100 * 100;
-			tmp_gw_factor /= gw_divisor;
+			tmp_gw_factor >>= 18;
 
 			if ((tmp_gw_factor > max_gw_factor) ||
 			    ((tmp_gw_factor == max_gw_factor) &&
@@ -267,7 +260,8 @@
 
 void batadv_gw_election(struct batadv_priv *bat_priv)
 {
-	struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
+	struct batadv_gw_node *curr_gw = NULL;
+	struct batadv_gw_node *next_gw = NULL;
 	struct batadv_neigh_node *router = NULL;
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	char gw_addr[18] = { '\0' };
@@ -351,8 +345,9 @@
 	struct batadv_neigh_ifinfo *router_orig_tq = NULL;
 	struct batadv_neigh_ifinfo *router_gw_tq = NULL;
 	struct batadv_orig_node *curr_gw_orig;
-	struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
-	uint8_t gw_tq_avg, orig_tq_avg;
+	struct batadv_neigh_node *router_gw = NULL;
+	struct batadv_neigh_node *router_orig = NULL;
+	u8 gw_tq_avg, orig_tq_avg;
 
 	curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
 	if (!curr_gw_orig)
@@ -474,9 +469,6 @@
 		if (gw_node_tmp->orig_node != orig_node)
 			continue;
 
-		if (gw_node_tmp->deleted)
-			continue;
-
 		if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
 			continue;
 
@@ -526,9 +518,7 @@
 	gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
 	gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
 
-	gw_node->deleted = 0;
 	if (ntohl(gateway->bandwidth_down) == 0) {
-		gw_node->deleted = jiffies;
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Gateway %pM removed from gateway list\n",
 			   orig_node->orig);
@@ -536,14 +526,21 @@
 		/* Note: We don't need a NULL check here, since curr_gw never
 		 * gets dereferenced.
 		 */
+		spin_lock_bh(&bat_priv->gw.list_lock);
+		hlist_del_init_rcu(&gw_node->list);
+		spin_unlock_bh(&bat_priv->gw.list_lock);
+
+		batadv_gw_node_free_ref(gw_node);
+
 		curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 		if (gw_node == curr_gw)
 			batadv_gw_reselect(bat_priv);
+
+		if (curr_gw)
+			batadv_gw_node_free_ref(curr_gw);
 	}
 
 out:
-	if (curr_gw)
-		batadv_gw_node_free_ref(curr_gw);
 	if (gw_node)
 		batadv_gw_node_free_ref(gw_node);
 }
@@ -559,39 +556,18 @@
 	batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
-void batadv_gw_node_purge(struct batadv_priv *bat_priv)
+void batadv_gw_node_free(struct batadv_priv *bat_priv)
 {
-	struct batadv_gw_node *gw_node, *curr_gw;
+	struct batadv_gw_node *gw_node;
 	struct hlist_node *node_tmp;
-	unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
-	int do_reselect = 0;
-
-	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
 	spin_lock_bh(&bat_priv->gw.list_lock);
-
 	hlist_for_each_entry_safe(gw_node, node_tmp,
 				  &bat_priv->gw.list, list) {
-		if (((!gw_node->deleted) ||
-		     (time_before(jiffies, gw_node->deleted + timeout))) &&
-		    atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
-			continue;
-
-		if (curr_gw == gw_node)
-			do_reselect = 1;
-
-		hlist_del_rcu(&gw_node->list);
+		hlist_del_init_rcu(&gw_node->list);
 		batadv_gw_node_free_ref(gw_node);
 	}
-
 	spin_unlock_bh(&bat_priv->gw.list_lock);
-
-	/* gw_reselect() needs to acquire the gw_list_lock */
-	if (do_reselect)
-		batadv_gw_reselect(bat_priv);
-
-	if (curr_gw)
-		batadv_gw_node_free_ref(curr_gw);
 }
 
 /* fails if orig_node has no router */
@@ -655,9 +631,6 @@
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-		if (gw_node->deleted)
-			continue;
-
 		/* fails if orig_node has no router */
 		if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
 			continue;
@@ -692,7 +665,7 @@
  */
 enum batadv_dhcp_recipient
 batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
-			     uint8_t *chaddr)
+			     u8 *chaddr)
 {
 	enum batadv_dhcp_recipient ret = BATADV_DHCP_NO;
 	struct ethhdr *ethhdr;
@@ -702,7 +675,7 @@
 	struct vlan_ethhdr *vhdr;
 	int chaddr_offset;
 	__be16 proto;
-	uint8_t *p;
+	u8 *p;
 
 	/* check for ethernet header */
 	if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
@@ -812,13 +785,15 @@
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
 			    struct sk_buff *skb)
 {
-	struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+	struct batadv_neigh_node *neigh_curr = NULL;
+	struct batadv_neigh_node *neigh_old = NULL;
 	struct batadv_orig_node *orig_dst_node = NULL;
-	struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
+	struct batadv_gw_node *gw_node = NULL;
+	struct batadv_gw_node *curr_gw = NULL;
 	struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	bool out_of_range = false;
-	uint8_t curr_tq_avg;
+	u8 curr_tq_avg;
 	unsigned short vid;
 
 	vid = batadv_get_vid(skb, 0);
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 89565b4..fa95277 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -38,11 +38,11 @@
 			   struct batadv_tvlv_gateway_data *gateway);
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node);
-void batadv_gw_node_purge(struct batadv_priv *bat_priv);
+void batadv_gw_node_free(struct batadv_priv *bat_priv);
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
 enum batadv_dhcp_recipient
 batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
-			     uint8_t *chaddr);
+			     u8 *chaddr);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 39cf44c..0cb5e6b 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -19,8 +19,10 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/errno.h>
 #include <linux/byteorder/generic.h>
 #include <linux/kernel.h>
+#include <linux/math64.h>
 #include <linux/netdevice.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
@@ -39,11 +41,11 @@
  * Returns false on parse error and true otherwise.
  */
 static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
-				      uint32_t *down, uint32_t *up)
+				      u32 *down, u32 *up)
 {
 	enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
 	char *slash_ptr, *tmp_ptr;
-	long ldown, lup;
+	u64 ldown, lup;
 	int ret;
 
 	slash_ptr = strchr(buff, '/');
@@ -61,7 +63,7 @@
 			*tmp_ptr = '\0';
 	}
 
-	ret = kstrtol(buff, 10, &ldown);
+	ret = kstrtou64(buff, 10, &ldown);
 	if (ret) {
 		batadv_err(net_dev,
 			   "Download speed of gateway mode invalid: %s\n",
@@ -71,14 +73,31 @@
 
 	switch (bw_unit_type) {
 	case BATADV_BW_UNIT_MBIT:
-		*down = ldown * 10;
+		/* prevent overflow */
+		if (U64_MAX / 10 < ldown) {
+			batadv_err(net_dev,
+				   "Download speed of gateway mode too large: %s\n",
+				   buff);
+			return false;
+		}
+
+		ldown *= 10;
 		break;
 	case BATADV_BW_UNIT_KBIT:
 	default:
-		*down = ldown / 100;
+		ldown = div_u64(ldown, 100);
 		break;
 	}
 
+	if (U32_MAX < ldown) {
+		batadv_err(net_dev,
+			   "Download speed of gateway mode too large: %s\n",
+			   buff);
+		return false;
+	}
+
+	*down = ldown;
+
 	/* we also got some upload info */
 	if (slash_ptr) {
 		bw_unit_type = BATADV_BW_UNIT_KBIT;
@@ -94,7 +113,7 @@
 				*tmp_ptr = '\0';
 		}
 
-		ret = kstrtol(slash_ptr + 1, 10, &lup);
+		ret = kstrtou64(slash_ptr + 1, 10, &lup);
 		if (ret) {
 			batadv_err(net_dev,
 				   "Upload speed of gateway mode invalid: %s\n",
@@ -104,13 +123,30 @@
 
 		switch (bw_unit_type) {
 		case BATADV_BW_UNIT_MBIT:
-			*up = lup * 10;
+			/* prevent overflow */
+			if (U64_MAX / 10 < lup) {
+				batadv_err(net_dev,
+					   "Upload speed of gateway mode too large: %s\n",
+					   slash_ptr + 1);
+				return false;
+			}
+
+			lup *= 10;
 			break;
 		case BATADV_BW_UNIT_KBIT:
 		default:
-			*up = lup / 100;
+			lup = div_u64(lup, 100);
 			break;
 		}
+
+		if (U32_MAX < lup) {
+			batadv_err(net_dev,
+				   "Upload speed of gateway mode too large: %s\n",
+				   slash_ptr + 1);
+			return false;
+		}
+
+		*up = lup;
 	}
 
 	return true;
@@ -124,7 +160,7 @@
 void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
 {
 	struct batadv_tvlv_gateway_data gw;
-	uint32_t down, up;
+	u32 down, up;
 	char gw_mode;
 
 	gw_mode = atomic_read(&bat_priv->gw_mode);
@@ -149,7 +185,10 @@
 				size_t count)
 {
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
+	u32 down_curr;
+	u32 up_curr;
+	u32 down_new = 0;
+	u32 up_new = 0;
 	bool ret;
 
 	down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
@@ -157,7 +196,7 @@
 
 	ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
 	if (!ret)
-		goto end;
+		return -EINVAL;
 
 	if (!down_new)
 		down_new = 1;
@@ -181,7 +220,6 @@
 	atomic_set(&bat_priv->gw.bandwidth_up, up_new);
 	batadv_gw_tvlv_container_update(bat_priv);
 
-end:
 	return count;
 }
 
@@ -195,9 +233,8 @@
  */
 static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t flags,
-					  void *tvlv_value,
-					  uint16_t tvlv_value_len)
+					  u8 flags,
+					  void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
 
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index bd5c812..ab893e3 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -22,7 +22,6 @@
 
 #include <linux/types.h>
 
-struct batadv_priv;
 struct net_device;
 
 enum batadv_gw_modes {
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f4a15d2..f11345e 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -252,6 +252,44 @@
 	rcu_read_unlock();
 }
 
+/**
+ * batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom
+ * @soft_iface: netdev struct of the mesh interface
+ */
+static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
+{
+	const struct batadv_hard_iface *hard_iface;
+	unsigned short lower_header_len = ETH_HLEN;
+	unsigned short lower_headroom = 0;
+	unsigned short lower_tailroom = 0;
+	unsigned short needed_headroom;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+		if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+			continue;
+
+		if (hard_iface->soft_iface != soft_iface)
+			continue;
+
+		lower_header_len = max_t(unsigned short, lower_header_len,
+					 hard_iface->net_dev->hard_header_len);
+
+		lower_headroom = max_t(unsigned short, lower_headroom,
+				       hard_iface->net_dev->needed_headroom);
+
+		lower_tailroom = max_t(unsigned short, lower_tailroom,
+				       hard_iface->net_dev->needed_tailroom);
+	}
+	rcu_read_unlock();
+
+	needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
+	needed_headroom += batadv_max_header_len();
+
+	soft_iface->needed_headroom = needed_headroom;
+	soft_iface->needed_tailroom = lower_tailroom;
+}
+
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -474,6 +512,8 @@
 			   "Not using interface %s (retrying later): interface not active\n",
 			   hard_iface->net_dev->name);
 
+	batadv_hardif_recalc_extra_skbroom(soft_iface);
+
 	/* begin scheduling originator messages on that interface */
 	batadv_schedule_bat_ogm(hard_iface);
 
@@ -528,6 +568,9 @@
 	batadv_purge_outstanding_packets(bat_priv, hard_iface);
 	dev_put(hard_iface->soft_iface);
 
+	netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
+	batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
+
 	/* nobody uses this interface anymore */
 	if (!bat_priv->num_ifaces) {
 		batadv_gw_check_client_stop(bat_priv);
@@ -536,7 +579,6 @@
 			batadv_softif_destroy_sysfs(hard_iface->soft_iface);
 	}
 
-	netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
 	hard_iface->soft_iface = NULL;
 	batadv_hardif_free_ref(hard_iface);
 
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index e89f314..2ea6a18 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -25,7 +25,7 @@
 /* clears the hash */
 static void batadv_hash_init(struct batadv_hashtable *hash)
 {
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@
 }
 
 /* allocates and clears the hash */
-struct batadv_hashtable *batadv_hash_new(uint32_t size)
+struct batadv_hashtable *batadv_hash_new(u32 size)
 {
 	struct batadv_hashtable *hash;
 
@@ -73,7 +73,7 @@
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 				struct lock_class_key *key)
 {
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++)
 		lockdep_set_class(&hash->list_locks[i], key);
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 5065f50..3776262 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -39,17 +39,17 @@
  * based on the key in the data of the first
  * argument and the size the second
  */
-typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
+typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
 
 struct batadv_hashtable {
 	struct hlist_head *table;   /* the hashtable itself with the buckets */
 	spinlock_t *list_locks;     /* spinlock for each hash list entry */
-	uint32_t size;		    /* size of hashtable */
+	u32 size;		    /* size of hashtable */
 };
 
 /* allocates and clears the hash */
-struct batadv_hashtable *batadv_hash_new(uint32_t size);
+struct batadv_hashtable *batadv_hash_new(u32 size);
 
 /* set class key for all locks */
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
@@ -69,7 +69,7 @@
 	struct hlist_head *head;
 	struct hlist_node *node, *node_tmp;
 	spinlock_t *list_lock; /* spinlock to protect write access */
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -105,7 +105,7 @@
 				  const void *data,
 				  struct hlist_node *data_node)
 {
-	uint32_t index;
+	u32 index;
 	int ret = -1;
 	struct hlist_head *head;
 	struct hlist_node *node;
@@ -149,7 +149,7 @@
 				       batadv_hashdata_choose_cb choose,
 				       void *data)
 {
-	uint32_t index;
+	u32 index;
 	struct hlist_node *node;
 	struct hlist_head *head;
 	void *data_save = NULL;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 07061bc..bcabb5e 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -183,7 +183,7 @@
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_neigh_node *neigh_node = NULL;
 	size_t packet_len = sizeof(struct batadv_icmp_packet);
-	uint8_t *addr;
+	u8 *addr;
 
 	if (len < sizeof(struct batadv_icmp_header)) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -337,8 +337,8 @@
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
- *  on an icmp socket.
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to
+ *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
  * @icmp_len: total length of the icmp packet
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 7de7fce..e937143 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -23,7 +23,6 @@
 #include <linux/types.h>
 
 struct batadv_icmp_header;
-struct batadv_priv;
 
 #define BATADV_ICMP_SOCKET "socket"
 
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 8457097..d7f17c1 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
 #include <linux/ipv6.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
@@ -148,7 +149,7 @@
 	INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
 #endif
 	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
-	INIT_LIST_HEAD(&bat_priv->tt.req_list);
+	INIT_HLIST_HEAD(&bat_priv->tt.req_list);
 	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
 #ifdef CONFIG_BATMAN_ADV_MCAST
 	INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
@@ -198,7 +199,7 @@
 
 	batadv_purge_outstanding_packets(bat_priv, NULL);
 
-	batadv_gw_node_purge(bat_priv);
+	batadv_gw_node_free(bat_priv);
 	batadv_nc_mesh_free(bat_priv);
 	batadv_dat_free(bat_priv);
 	batadv_bla_free(bat_priv);
@@ -234,7 +235,7 @@
  *
  * Returns 'true' if the mac address was found, false otherwise.
  */
-bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 {
 	const struct batadv_hard_iface *hard_iface;
 	bool is_my_mac = false;
@@ -387,7 +388,7 @@
 	struct batadv_priv *bat_priv;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *hard_iface;
-	uint8_t idx;
+	u8 idx;
 	int ret;
 
 	hard_iface = container_of(ptype, struct batadv_hard_iface,
@@ -496,7 +497,7 @@
 }
 
 int
-batadv_recv_handler_register(uint8_t packet_type,
+batadv_recv_handler_register(u8 packet_type,
 			     int (*recv_handler)(struct sk_buff *,
 						 struct batadv_hard_iface *))
 {
@@ -512,7 +513,7 @@
 	return 0;
 }
 
-void batadv_recv_handler_unregister(uint8_t packet_type)
+void batadv_recv_handler_unregister(u8 packet_type)
 {
 	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
 }
@@ -583,7 +584,7 @@
 	seq_puts(seq, "Available routing algorithms:\n");
 
 	hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
-		seq_printf(seq, "%s\n", bat_algo_ops->name);
+		seq_printf(seq, " * %s\n", bat_algo_ops->name);
 	}
 
 	return 0;
@@ -642,8 +643,7 @@
  * Returns tvlv handler if found or NULL otherwise.
  */
 static struct batadv_tvlv_handler
-*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
-			 uint8_t type, uint8_t version)
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
 	struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
 
@@ -691,8 +691,7 @@
  * Returns tvlv container if found or NULL otherwise.
  */
 static struct batadv_tvlv_container
-*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
-			   uint8_t type, uint8_t version)
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
 	struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
 
@@ -723,10 +722,10 @@
  *
  * Returns size of all currently registered tvlv containers in bytes.
  */
-static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 {
 	struct batadv_tvlv_container *tvlv;
-	uint16_t tvlv_len = 0;
+	u16 tvlv_len = 0;
 
 	hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
 		tvlv_len += sizeof(struct batadv_tvlv_hdr);
@@ -739,13 +738,17 @@
 /**
  * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
  *  list
+ * @bat_priv: the bat priv with all the soft interface information
  * @tvlv: the to be removed tvlv container
  *
  * Has to be called with the appropriate locks being acquired
  * (tvlv.container_list_lock).
  */
-static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
+					 struct batadv_tvlv_container *tvlv)
 {
+	lockdep_assert_held(&bat_priv->tvlv.handler_list_lock);
+
 	if (!tvlv)
 		return;
 
@@ -764,13 +767,13 @@
  * @version: tvlv container type to unregister
  */
 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
-				      uint8_t type, uint8_t version)
+				      u8 type, u8 version)
 {
 	struct batadv_tvlv_container *tvlv;
 
 	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
 	tvlv = batadv_tvlv_container_get(bat_priv, type, version);
-	batadv_tvlv_container_remove(tvlv);
+	batadv_tvlv_container_remove(bat_priv, tvlv);
 	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
 }
 
@@ -787,8 +790,8 @@
  * content is going to replace the old one.
  */
 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version,
-				    void *tvlv_value, uint16_t tvlv_value_len)
+				    u8 type, u8 version,
+				    void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_tvlv_container *tvlv_old, *tvlv_new;
 
@@ -809,7 +812,7 @@
 
 	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
 	tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
-	batadv_tvlv_container_remove(tvlv_old);
+	batadv_tvlv_container_remove(bat_priv, tvlv_old);
 	hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
 	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
 }
@@ -861,14 +864,13 @@
  *
  * Returns size of all appended tvlv containers in bytes.
  */
-uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
-					  unsigned char **packet_buff,
-					  int *packet_buff_len,
-					  int packet_min_len)
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+				     unsigned char **packet_buff,
+				     int *packet_buff_len, int packet_min_len)
 {
 	struct batadv_tvlv_container *tvlv;
 	struct batadv_tvlv_hdr *tvlv_hdr;
-	uint16_t tvlv_value_len;
+	u16 tvlv_value_len;
 	void *tvlv_value;
 	bool ret;
 
@@ -893,7 +895,7 @@
 		tvlv_hdr->len = tvlv->tvlv_hdr.len;
 		tvlv_value = tvlv_hdr + 1;
 		memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
-		tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+		tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
 	}
 
 end:
@@ -920,8 +922,8 @@
 				    struct batadv_tvlv_handler *tvlv_handler,
 				    bool ogm_source,
 				    struct batadv_orig_node *orig_node,
-				    uint8_t *src, uint8_t *dst,
-				    void *tvlv_value, uint16_t tvlv_value_len)
+				    u8 *src, u8 *dst,
+				    void *tvlv_value, u16 tvlv_value_len)
 {
 	if (!tvlv_handler)
 		return NET_RX_SUCCESS;
@@ -972,13 +974,13 @@
 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 				   bool ogm_source,
 				   struct batadv_orig_node *orig_node,
-				   uint8_t *src, uint8_t *dst,
-				   void *tvlv_value, uint16_t tvlv_value_len)
+				   u8 *src, u8 *dst,
+				   void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_tvlv_handler *tvlv_handler;
 	struct batadv_tvlv_hdr *tvlv_hdr;
-	uint16_t tvlv_value_cont_len;
-	uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+	u16 tvlv_value_cont_len;
+	u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
 	int ret = NET_RX_SUCCESS;
 
 	while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
@@ -1000,7 +1002,7 @@
 						tvlv_value_cont_len);
 		if (tvlv_handler)
 			batadv_tvlv_handler_free_ref(tvlv_handler);
-		tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+		tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
 		tvlv_value_len -= tvlv_value_cont_len;
 	}
 
@@ -1034,7 +1036,7 @@
 			     struct batadv_orig_node *orig_node)
 {
 	void *tvlv_value;
-	uint16_t tvlv_value_len;
+	u16 tvlv_value_len;
 
 	if (!batadv_ogm_packet)
 		return;
@@ -1066,14 +1068,14 @@
 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 				  void (*optr)(struct batadv_priv *bat_priv,
 					       struct batadv_orig_node *orig,
-					       uint8_t flags,
+					       u8 flags,
 					       void *tvlv_value,
-					       uint16_t tvlv_value_len),
+					       u16 tvlv_value_len),
 				  int (*uptr)(struct batadv_priv *bat_priv,
-					      uint8_t *src, uint8_t *dst,
+					      u8 *src, u8 *dst,
 					      void *tvlv_value,
-					      uint16_t tvlv_value_len),
-				  uint8_t type, uint8_t version, uint8_t flags)
+					      u16 tvlv_value_len),
+				  u8 type, u8 version, u8 flags)
 {
 	struct batadv_tvlv_handler *tvlv_handler;
 
@@ -1108,7 +1110,7 @@
  * @version: tvlv handler version to be unregistered
  */
 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version)
+				    u8 type, u8 version)
 {
 	struct batadv_tvlv_handler *tvlv_handler;
 
@@ -1134,9 +1136,9 @@
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  */
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
-			      uint8_t *dst, uint8_t type, uint8_t version,
-			      void *tvlv_value, uint16_t tvlv_value_len)
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
+			      u8 *dst, u8 type, u8 version,
+			      void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
 	struct batadv_tvlv_hdr *tvlv_hdr;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 41d27c7..ebd8af0 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2015.1"
+#define BATADV_SOURCE_VERSION "2015.2"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -193,7 +193,7 @@
 
 int batadv_mesh_init(struct net_device *soft_iface);
 void batadv_mesh_free(struct net_device *soft_iface);
-bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
 int batadv_max_header_len(void);
@@ -202,10 +202,10 @@
 			   struct packet_type *ptype,
 			   struct net_device *orig_dev);
 int
-batadv_recv_handler_register(uint8_t packet_type,
+batadv_recv_handler_register(u8 packet_type,
 			     int (*recv_handler)(struct sk_buff *,
 						 struct batadv_hard_iface *));
-void batadv_recv_handler_unregister(uint8_t packet_type);
+void batadv_recv_handler_unregister(u8 packet_type);
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
@@ -304,7 +304,7 @@
  * they handle overflows/underflows and can correctly check for a
  * predecessor/successor unless the variable sequence number has grown by
  * more then 2**(bitwidth(x)-1)-1.
- * This means that for a uint8_t with the maximum value 255, it would think:
+ * This means that for a u8 with the maximum value 255, it would think:
  *  - when adding nothing - it is neither a predecessor nor a successor
  *  - before adding more than 127 to the starting value - it is a predecessor,
  *  - when adding 128 - it is neither a predecessor nor a successor,
@@ -327,10 +327,9 @@
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
 /* Sum and return the cpu-local counters for index 'idx' */
-static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
-					  size_t idx)
+static inline u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
 {
-	uint64_t *counters, sum = 0;
+	u64 *counters, sum = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
@@ -348,39 +347,38 @@
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version,
-				    void *tvlv_value, uint16_t tvlv_value_len);
-uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
-					  unsigned char **packet_buff,
-					  int *packet_buff_len,
-					  int packet_min_len);
+				    u8 type, u8 version,
+				    void *tvlv_value, u16 tvlv_value_len);
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+				     unsigned char **packet_buff,
+				     int *packet_buff_len, int packet_min_len);
 void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
 			     struct batadv_ogm_packet *batadv_ogm_packet,
 			     struct batadv_orig_node *orig_node);
 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
-				      uint8_t type, uint8_t version);
+				      u8 type, u8 version);
 
 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 				  void (*optr)(struct batadv_priv *bat_priv,
 					       struct batadv_orig_node *orig,
-					       uint8_t flags,
+					       u8 flags,
 					       void *tvlv_value,
-					       uint16_t tvlv_value_len),
+					       u16 tvlv_value_len),
 				  int (*uptr)(struct batadv_priv *bat_priv,
-					      uint8_t *src, uint8_t *dst,
+					      u8 *src, u8 *dst,
 					      void *tvlv_value,
-					      uint16_t tvlv_value_len),
-				  uint8_t type, uint8_t version, uint8_t flags);
+					      u16 tvlv_value_len),
+				  u8 type, u8 version, u8 flags);
 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version);
+				    u8 type, u8 version);
 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 				   bool ogm_source,
 				   struct batadv_orig_node *orig_node,
-				   uint8_t *src, uint8_t *dst,
-				   void *tvlv_buff, uint16_t tvlv_buff_len);
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
-			      uint8_t *dst, uint8_t type, uint8_t version,
-			      void *tvlv_value, uint16_t tvlv_value_len);
+				   u8 *src, u8 *dst,
+				   void *tvlv_buff, u16 tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
+			      u8 *dst, u8 type, u8 version,
+			      void *tvlv_value, u16 tvlv_value_len);
 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid);
 
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 7aa480b..eb76386 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -19,6 +19,8 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -29,6 +31,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
@@ -87,7 +90,7 @@
  * Returns true if the given address is already in the given list.
  * Otherwise returns false.
  */
-static bool batadv_mcast_mla_is_duplicate(uint8_t *mcast_addr,
+static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
 					  struct hlist_head *mcast_list)
 {
 	struct batadv_hw_addr *mcast_entry;
@@ -101,15 +104,19 @@
 
 /**
  * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: the list to free
  *
  * Removes and frees all items in the given mcast_list.
  */
-static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
+static void batadv_mcast_mla_list_free(struct batadv_priv *bat_priv,
+				       struct hlist_head *mcast_list)
 {
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
 		hlist_del(&mcast_entry->list);
 		kfree(mcast_entry);
@@ -132,6 +139,8 @@
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
 				  list) {
 		if (mcast_list &&
@@ -162,6 +171,8 @@
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	if (!mcast_list)
 		return;
 
@@ -266,7 +277,7 @@
 	batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
 
 out:
-	batadv_mcast_mla_list_free(&mcast_list);
+	batadv_mcast_mla_list_free(bat_priv, &mcast_list);
 }
 
 /**
@@ -588,19 +599,28 @@
  *
  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
  * orig, has toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
 					     struct batadv_orig_node *orig,
-					     uint8_t mcast_flags)
+					     u8 mcast_flags)
 {
+	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
+	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+
+	lockdep_assert_held(&orig->mcast_handler_lock);
+
 	/* switched from flag unset to set */
 	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
 		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
-				   &bat_priv->mcast.want_all_unsnoopables_list);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(!hlist_unhashed(node));
+
+		hlist_add_head_rcu(node, head);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	/* switched from flag set to unset */
 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
@@ -608,7 +628,10 @@
 		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(hlist_unhashed(node));
+
+		hlist_del_init_rcu(node);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	}
 }
@@ -621,19 +644,28 @@
  *
  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t mcast_flags)
+					  u8 mcast_flags)
 {
+	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
+	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+
+	lockdep_assert_held(&orig->mcast_handler_lock);
+
 	/* switched from flag unset to set */
 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
 		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
-				   &bat_priv->mcast.want_all_ipv4_list);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(!hlist_unhashed(node));
+
+		hlist_add_head_rcu(node, head);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	/* switched from flag set to unset */
 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
@@ -641,7 +673,10 @@
 		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(hlist_unhashed(node));
+
+		hlist_del_init_rcu(node);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	}
 }
@@ -654,19 +689,28 @@
  *
  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t mcast_flags)
+					  u8 mcast_flags)
 {
+	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
+	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+
+	lockdep_assert_held(&orig->mcast_handler_lock);
+
 	/* switched from flag unset to set */
 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
 		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
-				   &bat_priv->mcast.want_all_ipv6_list);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(!hlist_unhashed(node));
+
+		hlist_add_head_rcu(node, head);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	/* switched from flag set to unset */
 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
@@ -674,7 +718,10 @@
 		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(hlist_unhashed(node));
+
+		hlist_del_init_rcu(node);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	}
 }
@@ -689,47 +736,50 @@
  */
 static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					     struct batadv_orig_node *orig,
-					     uint8_t flags,
+					     u8 flags,
 					     void *tvlv_value,
-					     uint16_t tvlv_value_len)
+					     u16 tvlv_value_len)
 {
 	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
-	uint8_t mcast_flags = BATADV_NO_FLAGS;
+	u8 mcast_flags = BATADV_NO_FLAGS;
 	bool orig_initialized;
 
-	orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
+	if (orig_mcast_enabled && tvlv_value &&
+	    (tvlv_value_len >= sizeof(mcast_flags)))
+		mcast_flags = *(u8 *)tvlv_value;
+
+	spin_lock_bh(&orig->mcast_handler_lock);
+	orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+				    &orig->capa_initialized);
 
 	/* If mcast support is turned on decrease the disabled mcast node
 	 * counter only if we had increased it for this node before. If this
 	 * is a completely new orig_node no need to decrease the counter.
 	 */
 	if (orig_mcast_enabled &&
-	    !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
+	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
 		if (orig_initialized)
 			atomic_dec(&bat_priv->mcast.num_disabled);
-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
+		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
 	/* If mcast support is being switched off or if this is an initial
 	 * OGM without mcast support then increase the disabled mcast
 	 * node counter.
 	 */
 	} else if (!orig_mcast_enabled &&
-		   (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+		   (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
 		    !orig_initialized)) {
 		atomic_inc(&bat_priv->mcast.num_disabled);
-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
+		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
 	}
 
-	orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
-
-	if (orig_mcast_enabled && tvlv_value &&
-	    (tvlv_value_len >= sizeof(mcast_flags)))
-		mcast_flags = *(uint8_t *)tvlv_value;
+	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
 
 	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
 	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
 	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
 
 	orig->mcast_flags = mcast_flags;
+	spin_unlock_bh(&orig->mcast_handler_lock);
 }
 
 /**
@@ -763,11 +813,15 @@
 {
 	struct batadv_priv *bat_priv = orig->bat_priv;
 
-	if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
-	    orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
+	spin_lock_bh(&orig->mcast_handler_lock);
+
+	if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
+	    test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
 		atomic_dec(&bat_priv->mcast.num_disabled);
 
 	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
 	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
 	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+
+	spin_unlock_bh(&orig->mcast_handler_lock);
 }
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index beb6e56..8f3cb04 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -20,8 +20,6 @@
 
 #include "main.h"
 
-struct batadv_orig_node;
-struct batadv_priv;
 struct sk_buff;
 
 /**
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index f0a50f3..f5276be 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
 #include <linux/debugfs.h>
@@ -129,14 +130,13 @@
  */
 static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t flags,
-					  void *tvlv_value,
-					  uint16_t tvlv_value_len)
+					  u8 flags,
+					  void *tvlv_value, u16 tvlv_value_len)
 {
 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+		clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
 	else
-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+		set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
 }
 
 /**
@@ -381,7 +381,7 @@
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -417,7 +417,7 @@
 	struct hlist_node *node_tmp;
 	struct batadv_nc_path *nc_path;
 	spinlock_t *lock; /* Protects lists in hash */
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -477,10 +477,10 @@
  *
  * Returns the selected index in the hash table for the given data.
  */
-static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size)
+static u32 batadv_nc_hash_choose(const void *data, u32 size)
 {
 	const struct batadv_nc_path *nc_path = data;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
 	hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
@@ -586,6 +586,8 @@
 	unsigned long timeout = bat_priv->nc.max_buffer_time;
 	bool res = false;
 
+	lockdep_assert_held(&nc_path->packet_list_lock);
+
 	/* Packets are added to tail, so the remaining packets did not time
 	 * out and we can stop processing the current queue
 	 */
@@ -622,6 +624,8 @@
 {
 	unsigned long timeout = bat_priv->nc.max_fwd_delay;
 
+	lockdep_assert_held(&nc_path->packet_list_lock);
+
 	/* Packets are added to tail, so the remaining packets did not time
 	 * out and we can stop processing the current queue
 	 */
@@ -743,8 +747,8 @@
 				    struct batadv_ogm_packet *ogm_packet)
 {
 	struct batadv_orig_ifinfo *orig_ifinfo;
-	uint32_t last_real_seqno;
-	uint8_t last_ttl;
+	u32 last_real_seqno;
+	u8 last_ttl;
 
 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, BATADV_IF_DEFAULT);
 	if (!orig_ifinfo)
@@ -872,8 +876,8 @@
 }
 
 /**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node structs
- *  (best called on incoming OGMs)
+ * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ *  structs (best called on incoming OGMs)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
  * @orig_neigh_node: neighboring orig node from which we received the ogm packet
@@ -887,14 +891,15 @@
 			      struct batadv_ogm_packet *ogm_packet,
 			      int is_single_hop_neigh)
 {
-	struct batadv_nc_node *in_nc_node = NULL, *out_nc_node = NULL;
+	struct batadv_nc_node *in_nc_node = NULL;
+	struct batadv_nc_node *out_nc_node = NULL;
 
 	/* Check if network coding is enabled */
 	if (!atomic_read(&bat_priv->network_coding))
 		goto out;
 
 	/* check if orig node is network coding enabled */
-	if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+	if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
 		goto out;
 
 	/* accept ogms from 'good' neighbors and single hop neighbors */
@@ -937,8 +942,8 @@
  */
 static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 						 struct batadv_hashtable *hash,
-						 uint8_t *src,
-						 uint8_t *dst)
+						 u8 *src,
+						 u8 *dst)
 {
 	int hash_added;
 	struct batadv_nc_path *nc_path, nc_path_key;
@@ -990,9 +995,9 @@
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
  */
-static uint8_t batadv_nc_random_weight_tq(uint8_t tq)
+static u8 batadv_nc_random_weight_tq(u8 tq)
 {
-	uint8_t rand_val, rand_tq;
+	u8 rand_val, rand_tq;
 
 	get_random_bytes(&rand_val, sizeof(rand_val));
 
@@ -1037,7 +1042,7 @@
 				   struct batadv_nc_packet *nc_packet,
 				   struct batadv_neigh_node *neigh_node)
 {
-	uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
+	u8 tq_weighted_neigh, tq_weighted_coding, tq_tmp;
 	struct sk_buff *skb_dest, *skb_src;
 	struct batadv_unicast_packet *packet1;
 	struct batadv_unicast_packet *packet2;
@@ -1046,7 +1051,7 @@
 	struct batadv_neigh_node *router_coding = NULL;
 	struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
 	struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
-	uint8_t *first_source, *first_dest, *second_source, *second_dest;
+	u8 *first_source, *first_dest, *second_source, *second_dest;
 	__be32 packet_id1, packet_id2;
 	size_t count;
 	bool res = false;
@@ -1230,8 +1235,7 @@
  *
  * Returns true if coding of a decoded packet is allowed.
  */
-static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
-					  uint8_t *dst, uint8_t *src)
+static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 {
 	if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
 		return false;
@@ -1254,7 +1258,7 @@
 		      struct batadv_nc_node *in_nc_node,
 		      struct batadv_nc_node *out_nc_node,
 		      struct sk_buff *skb,
-		      uint8_t *eth_dst)
+		      u8 *eth_dst)
 {
 	struct batadv_nc_path *nc_path, nc_path_key;
 	struct batadv_nc_packet *nc_packet_out = NULL;
@@ -1320,8 +1324,8 @@
 static struct batadv_nc_packet *
 batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 			 struct sk_buff *skb,
-			 uint8_t *eth_dst,
-			 uint8_t *eth_src,
+			 u8 *eth_dst,
+			 u8 *eth_src,
 			 struct batadv_nc_node *in_nc_node)
 {
 	struct batadv_orig_node *orig_node;
@@ -1361,7 +1365,7 @@
  */
 static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 					      struct sk_buff *skb,
-					      uint8_t *eth_dst_new)
+					      u8 *eth_dst_new)
 {
 	struct ethhdr *ethhdr;
 
@@ -1637,7 +1641,7 @@
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_coded_packet coded_packet_tmp;
 	struct ethhdr *ethhdr, ethhdr_tmp;
-	uint8_t *orig_dest, ttl, ttvn;
+	u8 *orig_dest, ttl, ttvn;
 	unsigned int coding_len;
 	int err;
 
@@ -1729,7 +1733,7 @@
 	struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
 	struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
 	struct batadv_nc_path *nc_path, nc_path_key;
-	uint8_t *dest, *source;
+	u8 *dest, *source;
 	__be32 packet_id;
 	int index;
 
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 5b79aa8..8f6d4ad 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,11 +22,7 @@
 
 #include <linux/types.h>
 
-struct batadv_nc_node;
-struct batadv_neigh_node;
 struct batadv_ogm_packet;
-struct batadv_orig_node;
-struct batadv_priv;
 struct net_device;
 struct seq_file;
 struct sk_buff;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 018b749..7486df9 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/lockdep.h>
 #include <linux/netdevice.h>
+#include <linux/rculist.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -70,7 +71,7 @@
 	struct batadv_orig_node_vlan *vlan = NULL, *tmp;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
 		if (tmp->vid != vid)
 			continue;
 
@@ -118,7 +119,7 @@
 	atomic_set(&vlan->refcount, 2);
 	vlan->vid = vid;
 
-	list_add_rcu(&vlan->list, &orig_node->vlan_list);
+	hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
 
 out:
 	spin_unlock_bh(&orig_node->vlan_list_lock);
@@ -442,41 +443,6 @@
 }
 
 /**
- * batadv_neigh_node_new - create and init a new neigh_node object
- * @hard_iface: the interface where the neighbour is connected to
- * @neigh_addr: the mac address of the neighbour interface
- * @orig_node: originator object representing the neighbour
- *
- * Allocates a new neigh_node object and initialises all the generic fields.
- * Returns the new object or NULL on failure.
- */
-struct batadv_neigh_node *
-batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-		      const uint8_t *neigh_addr,
-		      struct batadv_orig_node *orig_node)
-{
-	struct batadv_neigh_node *neigh_node;
-
-	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
-	if (!neigh_node)
-		goto out;
-
-	INIT_HLIST_NODE(&neigh_node->list);
-	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
-	spin_lock_init(&neigh_node->ifinfo_lock);
-
-	ether_addr_copy(neigh_node->addr, neigh_addr);
-	neigh_node->if_incoming = hard_iface;
-	neigh_node->orig_node = orig_node;
-
-	/* extra reference for return */
-	atomic_set(&neigh_node->refcount, 2);
-
-out:
-	return neigh_node;
-}
-
-/**
  * batadv_neigh_node_get - retrieve a neighbour from the list
  * @orig_node: originator which the neighbour belongs to
  * @hard_iface: the interface where this neighbour is connected to
@@ -486,10 +452,10 @@
  * which is connected through the provided hard interface.
  * Returns NULL if the neighbour is not found.
  */
-struct batadv_neigh_node *
+static struct batadv_neigh_node *
 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 		      const struct batadv_hard_iface *hard_iface,
-		      const uint8_t *addr)
+		      const u8 *addr)
 {
 	struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
 
@@ -513,6 +479,59 @@
 }
 
 /**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @orig_node: originator object representing the neighbour
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+		      struct batadv_hard_iface *hard_iface,
+		      const u8 *neigh_addr)
+{
+	struct batadv_neigh_node *neigh_node;
+
+	neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
+	if (neigh_node)
+		goto out;
+
+	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
+	if (!neigh_node)
+		goto out;
+
+	if (!atomic_inc_not_zero(&hard_iface->refcount)) {
+		kfree(neigh_node);
+		neigh_node = NULL;
+		goto out;
+	}
+
+	INIT_HLIST_NODE(&neigh_node->list);
+	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
+	spin_lock_init(&neigh_node->ifinfo_lock);
+
+	ether_addr_copy(neigh_node->addr, neigh_addr);
+	neigh_node->if_incoming = hard_iface;
+	neigh_node->orig_node = orig_node;
+
+	/* extra reference for return */
+	atomic_set(&neigh_node->refcount, 2);
+
+	spin_lock_bh(&orig_node->neigh_list_lock);
+	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+	spin_unlock_bh(&orig_node->neigh_list_lock);
+
+	batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
+		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
+
+out:
+	return neigh_node;
+}
+
+/**
  * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
  * @rcu: rcu pointer of the orig_ifinfo object
  */
@@ -624,7 +643,7 @@
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* spinlock to protect write access */
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -659,7 +678,7 @@
  * Returns the newly created object or NULL on failure.
  */
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
-					      const uint8_t *addr)
+					      const u8 *addr)
 {
 	struct batadv_orig_node *orig_node;
 	struct batadv_orig_node_vlan *vlan;
@@ -674,7 +693,7 @@
 		return NULL;
 
 	INIT_HLIST_HEAD(&orig_node->neigh_list);
-	INIT_LIST_HEAD(&orig_node->vlan_list);
+	INIT_HLIST_HEAD(&orig_node->vlan_list);
 	INIT_HLIST_HEAD(&orig_node->ifinfo_list);
 	spin_lock_init(&orig_node->bcast_seqno_lock);
 	spin_lock_init(&orig_node->neigh_list_lock);
@@ -696,8 +715,13 @@
 	orig_node->last_seen = jiffies;
 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
 	orig_node->bcast_seqno_reset = reset_time;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
 	orig_node->mcast_flags = BATADV_NO_FLAGS;
+	INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
+	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
+	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
+	spin_lock_init(&orig_node->mcast_handler_lock);
 #endif
 
 	/* create a vlan object for the "untagged" LAN */
@@ -976,7 +1000,7 @@
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* spinlock to protect write access */
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -1005,7 +1029,6 @@
 		spin_unlock_bh(list_lock);
 	}
 
-	batadv_gw_node_purge(bat_priv);
 	batadv_gw_election(bat_priv);
 }
 
@@ -1110,7 +1133,7 @@
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 	int ret;
 
 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
@@ -1147,7 +1170,7 @@
 	struct batadv_hard_iface *hard_iface_tmp;
 	struct batadv_orig_node *orig_node;
 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
-	uint32_t i;
+	u32 i;
 	int ret;
 
 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 79734d3..fa18f9b 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -40,15 +40,11 @@
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
-					      const uint8_t *addr);
+					      const u8 *addr);
 struct batadv_neigh_node *
-batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
-		      const struct batadv_hard_iface *hard_iface,
-		      const uint8_t *addr);
-struct batadv_neigh_node *
-batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-		      const uint8_t *neigh_addr,
-		      struct batadv_orig_node *orig_node);
+batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+		      struct batadv_hard_iface *hard_iface,
+		      const u8 *neigh_addr);
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
 struct batadv_neigh_node *
 batadv_orig_router_get(struct batadv_orig_node *orig_node,
@@ -86,9 +82,9 @@
 /* hashfunction to choose an entry in a hash table of given size
  * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
  */
-static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
+static inline u32 batadv_choose_orig(const void *data, u32 size)
 {
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(data, ETH_ALEN, hash);
 	return hash % size;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 9e747c0..11f996b 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -197,8 +197,8 @@
  * transport the claim type and the group id
  */
 struct batadv_bla_claim_dst {
-	uint8_t magic[3];	/* FF:43:05 */
-	uint8_t type;		/* bla_claimframe */
+	u8     magic[3];	/* FF:43:05 */
+	u8     type;		/* bla_claimframe */
 	__be16 group;		/* group id */
 };
 
@@ -213,16 +213,16 @@
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  flags;
-	__be32   seqno;
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  prev_sender[ETH_ALEN];
-	uint8_t  reserved;
-	uint8_t  tq;
-	__be16   tvlv_len;
+	u8     packet_type;
+	u8     version;
+	u8     ttl;
+	u8     flags;
+	__be32 seqno;
+	u8     orig[ETH_ALEN];
+	u8     prev_sender[ETH_ALEN];
+	u8     reserved;
+	u8     tq;
+	__be16 tvlv_len;
 	/* __packed is not needed as the struct size is divisible by 4,
 	 * and the largest data type in this struct has a size of 4.
 	 */
@@ -246,14 +246,14 @@
  * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  uid;
-	uint8_t  align[3];
+	u8 packet_type;
+	u8 version;
+	u8 ttl;
+	u8 msg_type; /* see ICMP message types above */
+	u8 dst[ETH_ALEN];
+	u8 orig[ETH_ALEN];
+	u8 uid;
+	u8 align[3];
 };
 
 /**
@@ -269,15 +269,15 @@
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  uid;
-	uint8_t  reserved;
-	__be16   seqno;
+	u8     packet_type;
+	u8     version;
+	u8     ttl;
+	u8     msg_type; /* see ICMP message types above */
+	u8     dst[ETH_ALEN];
+	u8     orig[ETH_ALEN];
+	u8     uid;
+	u8     reserved;
+	__be16 seqno;
 };
 
 #define BATADV_RR_LEN 16
@@ -296,16 +296,16 @@
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  uid;
-	uint8_t  rr_cur;
-	__be16   seqno;
-	uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
+	u8     packet_type;
+	u8     version;
+	u8     ttl;
+	u8     msg_type; /* see ICMP message types above */
+	u8     dst[ETH_ALEN];
+	u8     orig[ETH_ALEN];
+	u8     uid;
+	u8     rr_cur;
+	__be16 seqno;
+	u8     rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
 #define BATADV_ICMP_MAX_PACKET_SIZE	sizeof(struct batadv_icmp_packet_rr)
@@ -331,11 +331,11 @@
  * @dest: originator destination of the unicast packet
  */
 struct batadv_unicast_packet {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[ETH_ALEN];
+	u8 packet_type;
+	u8 version;
+	u8 ttl;
+	u8 ttvn; /* destination translation table version number */
+	u8 dest[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -349,9 +349,9 @@
  */
 struct batadv_unicast_4addr_packet {
 	struct batadv_unicast_packet u;
-	uint8_t src[ETH_ALEN];
-	uint8_t subtype;
-	uint8_t reserved;
+	u8 src[ETH_ALEN];
+	u8 subtype;
+	u8 reserved;
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -370,22 +370,22 @@
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-	uint8_t packet_type;
-	uint8_t version;  /* batman version field */
-	uint8_t ttl;
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
-	uint8_t no:4;
-	uint8_t reserved:4;
+	u8     no:4;
+	u8     reserved:4;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-	uint8_t reserved:4;
-	uint8_t no:4;
+	u8     reserved:4;
+	u8     no:4;
 #else
 #error "unknown bitfield endianness"
 #endif
-	uint8_t dest[ETH_ALEN];
-	uint8_t orig[ETH_ALEN];
-	__be16  seqno;
-	__be16  total_size;
+	u8     dest[ETH_ALEN];
+	u8     orig[ETH_ALEN];
+	__be16 seqno;
+	__be16 total_size;
 };
 
 /**
@@ -398,12 +398,12 @@
  * @orig: originator of the broadcast packet
  */
 struct batadv_bcast_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  ttl;
-	uint8_t  reserved;
-	__be32   seqno;
-	uint8_t  orig[ETH_ALEN];
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
+	u8     reserved;
+	__be32 seqno;
+	u8     orig[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -428,21 +428,21 @@
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  ttl;
-	uint8_t  first_ttvn;
-	/* uint8_t  first_dest[ETH_ALEN]; - saved in mac header destination */
-	uint8_t  first_source[ETH_ALEN];
-	uint8_t  first_orig_dest[ETH_ALEN];
-	__be32   first_crc;
-	uint8_t  second_ttl;
-	uint8_t  second_ttvn;
-	uint8_t  second_dest[ETH_ALEN];
-	uint8_t  second_source[ETH_ALEN];
-	uint8_t  second_orig_dest[ETH_ALEN];
-	__be32   second_crc;
-	__be16   coded_len;
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
+	u8     first_ttvn;
+	/* u8  first_dest[ETH_ALEN]; - saved in mac header destination */
+	u8     first_source[ETH_ALEN];
+	u8     first_orig_dest[ETH_ALEN];
+	__be32 first_crc;
+	u8     second_ttl;
+	u8     second_ttvn;
+	u8     second_dest[ETH_ALEN];
+	u8     second_source[ETH_ALEN];
+	u8     second_orig_dest[ETH_ALEN];
+	__be32 second_crc;
+	__be16 coded_len;
 };
 
 #pragma pack()
@@ -459,14 +459,14 @@
  * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  ttl;
-	uint8_t  reserved;
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  src[ETH_ALEN];
-	__be16   tvlv_len;
-	uint16_t align;
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
+	u8     reserved;
+	u8     dst[ETH_ALEN];
+	u8     src[ETH_ALEN];
+	__be16 tvlv_len;
+	u16    align;
 };
 
 /**
@@ -476,9 +476,9 @@
  * @len: tvlv container length
  */
 struct batadv_tvlv_hdr {
-	uint8_t type;
-	uint8_t version;
-	__be16  len;
+	u8     type;
+	u8     version;
+	__be16 len;
 };
 
 /**
@@ -500,9 +500,9 @@
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
-	uint8_t flags;
-	uint8_t ttvn;
-	__be16  num_vlan;
+	u8     flags;
+	u8     ttvn;
+	__be16 num_vlan;
 };
 
 /**
@@ -513,9 +513,9 @@
  * @reserved: unused, useful for alignment purposes
  */
 struct batadv_tvlv_tt_vlan_data {
-	__be32	crc;
-	__be16	vid;
-	uint16_t reserved;
+	__be32 crc;
+	__be16 vid;
+	u16    reserved;
 };
 
 /**
@@ -527,9 +527,9 @@
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
-	uint8_t flags;
-	uint8_t reserved[3];
-	uint8_t addr[ETH_ALEN];
+	u8     flags;
+	u8     reserved[3];
+	u8     addr[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -539,7 +539,7 @@
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_roam_adv {
-	uint8_t  client[ETH_ALEN];
+	u8     client[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -549,8 +549,8 @@
  * @reserved: reserved field
  */
 struct batadv_tvlv_mcast_data {
-	uint8_t	flags;
-	uint8_t reserved[3];
+	u8 flags;
+	u8 reserved[3];
 };
 
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c360c0c..8d990b0 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -145,7 +145,7 @@
  *  0 if the packet is to be accepted
  *  1 if the packet is to be ignored.
  */
-int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
 			    unsigned long *last_reset)
 {
 	if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
@@ -653,19 +653,19 @@
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
 			      struct batadv_unicast_packet *unicast_packet,
-			      uint8_t *dst_addr, unsigned short vid)
+			      u8 *dst_addr, unsigned short vid)
 {
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
 	bool ret = false;
-	uint8_t *orig_addr, orig_ttvn;
+	u8 *orig_addr, orig_ttvn;
 
 	if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
 		primary_if = batadv_primary_if_get_selected(bat_priv);
 		if (!primary_if)
 			goto out;
 		orig_addr = primary_if->net_dev->dev_addr;
-		orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+		orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 	} else {
 		orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
 						     vid);
@@ -676,7 +676,7 @@
 			goto out;
 
 		orig_addr = orig_node->orig;
-		orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+		orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 	}
 
 	/* update the packet header */
@@ -698,7 +698,7 @@
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_hard_iface *primary_if;
 	struct batadv_orig_node *orig_node;
-	uint8_t curr_ttvn, old_ttvn;
+	u8 curr_ttvn, old_ttvn;
 	struct ethhdr *ethhdr;
 	unsigned short vid;
 	int is_old_ttvn;
@@ -740,7 +740,7 @@
 	 * value is used later to check if the node which sent (or re-routed
 	 * last time) the packet had an updated information or not
 	 */
-	curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+	curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 	if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
 		orig_node = batadv_orig_hash_find(bat_priv,
 						  unicast_packet->dest);
@@ -751,7 +751,7 @@
 		if (!orig_node)
 			return 0;
 
-		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+		curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 		batadv_orig_node_free_ref(orig_node);
 	}
 
@@ -833,7 +833,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
-	uint8_t *orig_addr;
+	u8 *orig_addr;
 	struct batadv_orig_node *orig_node = NULL;
 	int check, hdr_size = sizeof(*unicast_packet);
 	bool is4addr;
@@ -904,7 +904,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
 	unsigned char *tvlv_buff;
-	uint16_t tvlv_buff_len;
+	u16 tvlv_buff_len;
 	int hdr_size = sizeof(*unicast_tvlv_packet);
 	int ret = NET_RX_DROP;
 
@@ -1007,8 +1007,8 @@
 	struct ethhdr *ethhdr;
 	int hdr_size = sizeof(*bcast_packet);
 	int ret = NET_RX_DROP;
-	int32_t seq_diff;
-	uint32_t seqno;
+	s32 seq_diff;
+	u32 seqno;
 
 	/* drop packet if it has not necessary minimum size */
 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 6bc29d3..204bbe4 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -22,10 +22,6 @@
 
 #include <linux/types.h>
 
-struct batadv_hard_iface;
-struct batadv_neigh_node;
-struct batadv_orig_node;
-struct batadv_priv;
 struct sk_buff;
 
 bool batadv_check_management_packet(struct sk_buff *skb,
@@ -55,7 +51,7 @@
 batadv_find_router(struct batadv_priv *bat_priv,
 		   struct batadv_orig_node *orig_node,
 		   struct batadv_hard_iface *recv_if);
-int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
 			    unsigned long *last_reset);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 0a01992..f664324 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -54,7 +54,7 @@
  */
 int batadv_send_skb_packet(struct sk_buff *skb,
 			   struct batadv_hard_iface *hard_iface,
-			   const uint8_t *dst_addr)
+			   const u8 *dst_addr)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct ethhdr *ethhdr;
@@ -172,7 +172,7 @@
 				  struct batadv_orig_node *orig_node)
 {
 	struct batadv_unicast_packet *unicast_packet;
-	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 
 	if (batadv_skb_head_push(skb, hdr_size) < 0)
 		return false;
@@ -343,12 +343,12 @@
  */
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb, int packet_type,
-				   int packet_subtype, uint8_t *dst_hint,
+				   int packet_subtype, u8 *dst_hint,
 				   unsigned short vid)
 {
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct batadv_orig_node *orig_node;
-	uint8_t *src, *dst;
+	u8 *src, *dst;
 
 	src = ethhdr->h_source;
 	dst = ethhdr->h_dest;
@@ -616,7 +616,8 @@
 		 * we delete only packets belonging to the given interface
 		 */
 		if ((hard_iface) &&
-		    (forw_packet->if_incoming != hard_iface))
+		    (forw_packet->if_incoming != hard_iface) &&
+		    (forw_packet->if_outgoing != hard_iface))
 			continue;
 
 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 0536835f..82059f2 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -25,15 +25,12 @@
 
 #include "packet.h"
 
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
 struct sk_buff;
 struct work_struct;
 
 int batadv_send_skb_packet(struct sk_buff *skb,
 			   struct batadv_hard_iface *hard_iface,
-			   const uint8_t *dst_addr);
+			   const u8 *dst_addr);
 int batadv_send_skb_to_orig(struct sk_buff *skb,
 			    struct batadv_orig_node *orig_node,
 			    struct batadv_hard_iface *recv_if);
@@ -56,7 +53,7 @@
 			    unsigned short vid);
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb, int packet_type,
-				   int packet_subtype, uint8_t *dst_hint,
+				   int packet_subtype, u8 *dst_hint,
 				   unsigned short vid);
 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 			   unsigned short vid);
@@ -75,7 +72,7 @@
  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
-					 struct sk_buff *skb, uint8_t *dst_hint,
+					 struct sk_buff *skb, u8 *dst_hint,
 					 unsigned short vid)
 {
 	return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
@@ -100,7 +97,7 @@
 static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
 					       struct sk_buff *skb,
 					       int packet_subtype,
-					       uint8_t *dst_hint,
+					       u8 *dst_hint,
 					       unsigned short vid)
 {
 	return batadv_send_skb_via_tt_generic(bat_priv, skb,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a2fc843..ac4d08d 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -131,7 +131,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(dev);
 	struct batadv_softif_vlan *vlan;
 	struct sockaddr *addr = p;
-	uint8_t old_addr[ETH_ALEN];
+	u8 old_addr[ETH_ALEN];
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
@@ -186,22 +186,23 @@
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_bcast_packet *bcast_packet;
 	__be16 ethertype = htons(ETH_P_BATMAN);
-	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
-						   0x00, 0x00};
-	static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
-						    0x00, 0x00};
+	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
+					      0x00, 0x00};
+	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
+					       0x00, 0x00};
 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
-	uint8_t *dst_hint = NULL, chaddr[ETH_ALEN];
+	u8 *dst_hint = NULL, chaddr[ETH_ALEN];
 	struct vlan_ethhdr *vhdr;
 	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
 	unsigned long brd_delay = 1;
 	bool do_bcast = false, client_added;
 	unsigned short vid;
-	uint32_t seqno;
+	u32 seqno;
 	int gw_mode;
 	enum batadv_forw_mode forw_mode;
 	struct batadv_orig_node *mcast_single_orig = NULL;
+	int network_offset = ETH_HLEN;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
@@ -214,14 +215,18 @@
 	case ETH_P_8021Q:
 		vhdr = vlan_eth_hdr(skb);
 
-		if (vhdr->h_vlan_encapsulated_proto != ethertype)
+		if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+			network_offset += VLAN_HLEN;
 			break;
+		}
 
 		/* fall through */
 	case ETH_P_BATMAN:
 		goto dropped;
 	}
 
+	skb_set_network_header(skb, network_offset);
+
 	if (batadv_bla_tx(bat_priv, skb, vid))
 		goto dropped;
 
@@ -745,9 +750,9 @@
 static int batadv_softif_init_late(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv;
-	uint32_t random_seqno;
+	u32 random_seqno;
 	int ret;
-	size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
+	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
 
 	batadv_set_lockdep_class(dev);
 
@@ -758,7 +763,7 @@
 	/* batadv_interface_stats() needs to be available as soon as
 	 * register_netdevice() has been called
 	 */
-	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
 	if (!bat_priv->bat_counters)
 		return -ENOMEM;
 
@@ -936,14 +941,12 @@
 	dev->netdev_ops = &batadv_netdev_ops;
 	dev->destructor = batadv_softif_free;
 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	/* can't call min_mtu, because the needed variables
 	 * have not been initialized yet
 	 */
 	dev->mtu = ETH_DATA_LEN;
-	/* reserve more space in the skbuff for our header */
-	dev->hard_header_len = batadv_max_header_len();
 
 	/* generate random address */
 	eth_hw_addr_random(dev);
@@ -1112,8 +1115,7 @@
 #endif
 };
 
-static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
-			       uint8_t *data)
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	if (stringset == ETH_SS_STATS)
 		memcpy(data, batadv_counters_strings,
@@ -1121,8 +1123,7 @@
 }
 
 static void batadv_get_ethtool_stats(struct net_device *dev,
-				     struct ethtool_stats *stats,
-				     uint64_t *data)
+				     struct ethtool_stats *stats, u64 *data)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
 	int i;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 578e8a6..8e82176 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -22,10 +22,6 @@
 
 #include <net/rtnetlink.h>
 
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
-struct batadv_softif_vlan;
 struct net_device;
 struct sk_buff;
 
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index d6a312a..9de3c88 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -457,7 +457,7 @@
 				     struct attribute *attr, char *buff)
 {
 	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-	uint32_t down, up;
+	u32 down, up;
 
 	down = atomic_read(&bat_priv->gw.bandwidth_down);
 	up = atomic_read(&bat_priv->gw.bandwidth_up);
@@ -512,7 +512,7 @@
 {
 	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	uint32_t mark, mask;
+	u32 mark, mask;
 	char *mask_ptr;
 
 	/* parse the mask if it has been specified, otherwise assume the mask is
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index 2294583..6197442 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -23,8 +23,6 @@
 #include <linux/sysfs.h>
 #include <linux/types.h>
 
-struct batadv_priv;
-struct batadv_softif_vlan;
 struct kobject;
 struct net_device;
 
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5809b39..4228b10 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
@@ -55,7 +56,7 @@
 static struct lock_class_key batadv_tt_local_hash_lock_class_key;
 static struct lock_class_key batadv_tt_global_hash_lock_class_key;
 
-static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
 				 unsigned short vid,
 				 struct batadv_orig_node *orig_node);
 static void batadv_tt_purge(struct work_struct *work);
@@ -84,10 +85,10 @@
  * Returns the hash index where the object represented by 'data' should be
  * stored at.
  */
-static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+static inline u32 batadv_choose_tt(const void *data, u32 size)
 {
 	struct batadv_tt_common_entry *tt;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	tt = (struct batadv_tt_common_entry *)data;
 	hash = jhash(&tt->addr, ETH_ALEN, hash);
@@ -106,12 +107,12 @@
  * found, NULL otherwise.
  */
 static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
 		    unsigned short vid)
 {
 	struct hlist_head *head;
 	struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
-	uint32_t index;
+	u32 index;
 
 	if (!hash)
 		return NULL;
@@ -151,7 +152,7 @@
  * found, NULL otherwise.
  */
 static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 			  unsigned short vid)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
@@ -176,7 +177,7 @@
  * is found, NULL otherwise.
  */
 static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 			   unsigned short vid)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
@@ -222,7 +223,7 @@
  * (excluding ourself).
  */
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid)
+				const u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	int count;
@@ -314,7 +315,7 @@
 
 	if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
 		spin_lock_bh(&orig_node->vlan_list_lock);
-		list_del_rcu(&vlan->list);
+		hlist_del_init_rcu(&vlan->list);
 		spin_unlock_bh(&orig_node->vlan_list_lock);
 		batadv_orig_node_vlan_free_ref(vlan);
 	}
@@ -363,11 +364,11 @@
  */
 static void batadv_tt_local_event(struct batadv_priv *bat_priv,
 				  struct batadv_tt_local_entry *tt_local_entry,
-				  uint8_t event_flags)
+				  u8 event_flags)
 {
 	struct batadv_tt_change_node *tt_change_node, *entry, *safe;
 	struct batadv_tt_common_entry *common = &tt_local_entry->common;
-	uint8_t flags = common->flags | event_flags;
+	u8 flags = common->flags | event_flags;
 	bool event_removed = false;
 	bool del_op_requested, del_op_entry;
 
@@ -447,7 +448,7 @@
  *
  * Returns the number of entries.
  */
-static uint16_t batadv_tt_entries(uint16_t tt_len)
+static u16 batadv_tt_entries(u16 tt_len)
 {
 	return tt_len / batadv_tt_len(1);
 }
@@ -461,7 +462,8 @@
  */
 static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
 {
-	uint16_t num_vlan = 0, tt_local_entries = 0;
+	u16 num_vlan = 0;
+	u16 tt_local_entries = 0;
 	struct batadv_softif_vlan *vlan;
 	int hdr_size;
 
@@ -524,8 +526,8 @@
  *
  * Returns true if the client was successfully added, false otherwise.
  */
-bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-			 unsigned short vid, int ifindex, uint32_t mark)
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+			 unsigned short vid, int ifindex, u32 mark)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_tt_local_entry *tt_local;
@@ -535,9 +537,10 @@
 	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry;
 	int hash_added, table_size, packet_size_max;
-	bool ret = false, roamed_back = false;
-	uint8_t remote_flags;
-	uint32_t match_mark;
+	bool ret = false;
+	bool roamed_back = false;
+	u8 remote_flags;
+	u32 match_mark;
 
 	if (ifindex != BATADV_NULL_IFINDEX)
 		in_dev = dev_get_by_index(&init_net, ifindex);
@@ -604,7 +607,7 @@
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
 		   addr, BATADV_PRINT_VID(vid),
-		   (uint8_t)atomic_read(&bat_priv->tt.vn));
+		   (u8)atomic_read(&bat_priv->tt.vn));
 
 	ether_addr_copy(tt_local->common.addr, addr);
 	/* The local entry has to be marked as NEW to avoid to send it in
@@ -723,19 +726,22 @@
  *
  * Return the size of the allocated buffer or 0 in case of failure.
  */
-static uint16_t
+static u16
 batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
 				   struct batadv_tvlv_tt_data **tt_data,
 				   struct batadv_tvlv_tt_change **tt_change,
-				   int32_t *tt_len)
+				   s32 *tt_len)
 {
-	uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+	u16 num_vlan = 0;
+	u16 num_entries = 0;
+	u16 change_offset;
+	u16 tvlv_len;
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	struct batadv_orig_node_vlan *vlan;
-	uint8_t *tt_change_ptr;
+	u8 *tt_change_ptr;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
 		num_vlan++;
 		num_entries += atomic_read(&vlan->tt.num_entries);
 	}
@@ -761,14 +767,14 @@
 	(*tt_data)->num_vlan = htons(num_vlan);
 
 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
-	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
 		tt_vlan->vid = htons(vlan->vid);
 		tt_vlan->crc = htonl(vlan->tt.crc);
 
 		tt_vlan++;
 	}
 
-	tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+	tt_change_ptr = (u8 *)*tt_data + change_offset;
 	*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
 
 out:
@@ -794,16 +800,18 @@
  *
  * Return the size of the allocated buffer or 0 in case of failure.
  */
-static uint16_t
+static u16
 batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
 				  struct batadv_tvlv_tt_data **tt_data,
 				  struct batadv_tvlv_tt_change **tt_change,
-				  int32_t *tt_len)
+				  s32 *tt_len)
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	struct batadv_softif_vlan *vlan;
-	uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
-	uint8_t *tt_change_ptr;
+	u16 num_vlan = 0;
+	u16 num_entries = 0;
+	u16 tvlv_len;
+	u8 *tt_change_ptr;
 	int change_offset;
 
 	rcu_read_lock();
@@ -840,7 +848,7 @@
 		tt_vlan++;
 	}
 
-	tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+	tt_change_ptr = (u8 *)*tt_data + change_offset;
 	*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
 
 out:
@@ -859,8 +867,9 @@
 	struct batadv_tvlv_tt_data *tt_data;
 	struct batadv_tvlv_tt_change *tt_change;
 	int tt_diff_len, tt_change_len = 0;
-	int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
-	uint16_t tvlv_len;
+	int tt_diff_entries_num = 0;
+	int tt_diff_entries_count = 0;
+	u16 tvlv_len;
 
 	tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
 	tt_diff_len = batadv_tt_len(tt_diff_entries_num);
@@ -934,12 +943,12 @@
 	struct batadv_softif_vlan *vlan;
 	struct hlist_head *head;
 	unsigned short vid;
-	uint32_t i;
+	u32 i;
 	int last_seen_secs;
 	int last_seen_msecs;
 	unsigned long last_seen_jiffies;
 	bool no_purge;
-	uint16_t np_flag = BATADV_TT_CLIENT_NOPURGE;
+	u16 np_flag = BATADV_TT_CLIENT_NOPURGE;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -947,7 +956,7 @@
 
 	seq_printf(seq,
 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
-		   net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+		   net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
 	seq_printf(seq, "       %-13s  %s %-8s %-9s (%-10s)\n", "Client", "VID",
 		   "Flags", "Last seen", "CRC");
 
@@ -1007,7 +1016,7 @@
 static void
 batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
 			    struct batadv_tt_local_entry *tt_local_entry,
-			    uint16_t flags, const char *message)
+			    u16 flags, const char *message)
 {
 	batadv_tt_local_event(bat_priv, tt_local_entry, flags);
 
@@ -1033,12 +1042,12 @@
  *
  * Returns the flags assigned to the local entry before being deleted
  */
-uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid,
-				const char *message, bool roaming)
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
+			   unsigned short vid, const char *message,
+			   bool roaming)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
-	uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+	u16 flags, curr_flags = BATADV_NO_FLAGS;
 	struct batadv_softif_vlan *vlan;
 	void *tt_entry_exists;
 
@@ -1141,7 +1150,7 @@
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1162,7 +1171,7 @@
 	struct batadv_softif_vlan *vlan;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!bat_priv->tt.local_hash)
 		return;
@@ -1337,15 +1346,14 @@
 static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
 				 struct batadv_orig_node *orig_node,
 				 const unsigned char *tt_addr,
-				 unsigned short vid, uint16_t flags,
-				 uint8_t ttvn)
+				 unsigned short vid, u16 flags, u8 ttvn)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
 	int hash_added;
 	struct batadv_tt_common_entry *common;
-	uint16_t local_flags;
+	u16 local_flags;
 
 	/* ignore global entries from backbone nodes */
 	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
@@ -1542,8 +1550,8 @@
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_orig_node_vlan *vlan;
 	struct hlist_head *head;
-	uint8_t last_ttvn;
-	uint16_t flags;
+	u8 last_ttvn;
+	u16 flags;
 
 	tt_common_entry = &tt_global_entry->common;
 	flags = tt_common_entry->flags;
@@ -1617,7 +1625,7 @@
 	struct batadv_tt_global_entry *tt_global;
 	struct batadv_hard_iface *primary_if;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -1650,20 +1658,28 @@
 }
 
 /**
- * batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
  * @tt_global_entry: the global entry to remove the orig_entry from
  * @orig_entry: the orig entry to remove and free
  *
  * Remove an orig_entry from its list in the given tt_global_entry and
  * free this orig_entry afterwards.
+ *
+ * Caller must hold tt_global_entry->list_lock and ensure orig_entry->list is
+ * part of a list.
  */
 static void
-batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
-				struct batadv_tt_orig_list_entry *orig_entry)
+_batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+				 struct batadv_tt_orig_list_entry *orig_entry)
 {
+	lockdep_assert_held(&tt_global_entry->list_lock);
+
 	batadv_tt_global_size_dec(orig_entry->orig_node,
 				  tt_global_entry->common.vid);
 	atomic_dec(&tt_global_entry->orig_list_count);
+	/* requires holding tt_global_entry->list_lock and orig_entry->list
+	 * being part of a list
+	 */
 	hlist_del_rcu(&orig_entry->list);
 	batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
@@ -1679,7 +1695,7 @@
 	spin_lock_bh(&tt_global_entry->list_lock);
 	head = &tt_global_entry->orig_list;
 	hlist_for_each_entry_safe(orig_entry, safe, head, list)
-		batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
+		_batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
 	spin_unlock_bh(&tt_global_entry->list_lock);
 }
 
@@ -1714,8 +1730,8 @@
 				   orig_node->orig,
 				   tt_global_entry->common.addr,
 				   BATADV_PRINT_VID(vid), message);
-			batadv_tt_global_del_orig_entry(tt_global_entry,
-							orig_entry);
+			_batadv_tt_global_del_orig_entry(tt_global_entry,
+							 orig_entry);
 		}
 	}
 	spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1837,12 +1853,12 @@
  */
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
-			       int32_t match_vid,
+			       s32 match_vid,
 			       const char *message)
 {
 	struct batadv_tt_global_entry *tt_global;
 	struct batadv_tt_common_entry *tt_common_entry;
-	uint32_t i;
+	u32 i;
 	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct hlist_node *safe;
 	struct hlist_head *head;
@@ -1882,7 +1898,7 @@
 		}
 		spin_unlock_bh(list_lock);
 	}
-	orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
+	clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
@@ -1913,7 +1929,7 @@
 	struct hlist_head *head;
 	struct hlist_node *node_tmp;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	uint32_t i;
+	u32 i;
 	char *msg = NULL;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_global_entry *tt_global;
@@ -1954,7 +1970,7 @@
 	struct batadv_tt_global_entry *tt_global;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!bat_priv->tt.global_hash)
 		return;
@@ -2015,8 +2031,8 @@
  * If the two clients are AP isolated the function returns NULL.
  */
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
-						  const uint8_t *src,
-						  const uint8_t *addr,
+						  const u8 *src,
+						  const u8 *addr,
 						  unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
@@ -2084,16 +2100,16 @@
  *
  * Returns the checksum of the global table of a given originator.
  */
-static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
-				     struct batadv_orig_node *orig_node,
-				     unsigned short vid)
+static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+				struct batadv_orig_node *orig_node,
+				unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_global_entry *tt_global;
 	struct hlist_head *head;
-	uint32_t i, crc_tmp, crc = 0;
-	uint8_t flags;
+	u32 i, crc_tmp, crc = 0;
+	u8 flags;
 	__be16 tmp_vid;
 
 	for (i = 0; i < hash->size; i++) {
@@ -2161,14 +2177,14 @@
  *
  * Returns the checksum of the local table
  */
-static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
-				    unsigned short vid)
+static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
+			       unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct hlist_head *head;
-	uint32_t i, crc_tmp, crc = 0;
-	uint8_t flags;
+	u32 i, crc_tmp, crc = 0;
+	u8 flags;
 	__be16 tmp_vid;
 
 	for (i = 0; i < hash->size; i++) {
@@ -2210,12 +2226,13 @@
 
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
-	struct batadv_tt_req_node *node, *safe;
+	struct batadv_tt_req_node *node;
+	struct hlist_node *safe;
 
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
 
-	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-		list_del(&node->list);
+	hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+		hlist_del_init(&node->list);
 		kfree(node);
 	}
 
@@ -2225,7 +2242,7 @@
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
 				       struct batadv_orig_node *orig_node,
 				       const void *tt_buff,
-				       uint16_t tt_buff_len)
+				       u16 tt_buff_len)
 {
 	/* Replace the old buffer only if I received something in the
 	 * last OGM (the OGM could carry no changes)
@@ -2245,30 +2262,36 @@
 
 static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 {
-	struct batadv_tt_req_node *node, *safe;
+	struct batadv_tt_req_node *node;
+	struct hlist_node *safe;
 
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+	hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		if (batadv_has_timed_out(node->issued_at,
 					 BATADV_TT_REQUEST_TIMEOUT)) {
-			list_del(&node->list);
+			hlist_del_init(&node->list);
 			kfree(node);
 		}
 	}
 	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
-/* returns the pointer to the new tt_req_node struct if no request
- * has already been issued for this orig_node, NULL otherwise
+/**
+ * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node this request is being issued for
+ *
+ * Returns the pointer to the new tt_req_node struct if no request
+ * has already been issued for this orig_node, NULL otherwise.
  */
 static struct batadv_tt_req_node *
-batadv_new_tt_req_node(struct batadv_priv *bat_priv,
+batadv_tt_req_node_new(struct batadv_priv *bat_priv,
 		       struct batadv_orig_node *orig_node)
 {
 	struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
 
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
-	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
+	hlist_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
 		if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
 		    !batadv_has_timed_out(tt_req_node_tmp->issued_at,
 					  BATADV_TT_REQUEST_TIMEOUT))
@@ -2282,7 +2305,7 @@
 	ether_addr_copy(tt_req_node->addr, orig_node->orig);
 	tt_req_node->issued_at = jiffies;
 
-	list_add(&tt_req_node->list, &bat_priv->tt.req_list);
+	hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
 	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 	return tt_req_node;
@@ -2334,15 +2357,15 @@
  */
 static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 				    struct batadv_hashtable *hash,
-				    void *tvlv_buff, uint16_t tt_len,
+				    void *tvlv_buff, u16 tt_len,
 				    int (*valid_cb)(const void *, const void *),
 				    void *cb_data)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tvlv_tt_change *tt_change;
 	struct hlist_head *head;
-	uint16_t tt_tot, tt_num_entries = 0;
-	uint32_t i;
+	u16 tt_tot, tt_num_entries = 0;
+	u32 i;
 
 	tt_tot = batadv_tt_entries(tt_len);
 	tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
@@ -2384,11 +2407,11 @@
  */
 static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
 				       struct batadv_tvlv_tt_vlan_data *tt_vlan,
-				       uint16_t num_vlan)
+				       u16 num_vlan)
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
 	struct batadv_orig_node_vlan *vlan;
-	uint32_t crc;
+	u32 crc;
 	int i;
 
 	/* check if each received CRC matches the locally stored one */
@@ -2443,11 +2466,11 @@
 					struct batadv_orig_node *orig_node)
 {
 	struct batadv_orig_node_vlan *vlan;
-	uint32_t crc;
+	u32 crc;
 
 	/* recompute the global CRC for each VLAN */
 	rcu_read_lock();
-	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
 		/* if orig_node is a backbone node for this VLAN, don't compute
 		 * the CRC as we ignore all the global entries over it
 		 */
@@ -2473,9 +2496,9 @@
  */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
 				  struct batadv_orig_node *dst_orig_node,
-				  uint8_t ttvn,
+				  u8 ttvn,
 				  struct batadv_tvlv_tt_vlan_data *tt_vlan,
-				  uint16_t num_vlan, bool full_table)
+				  u16 num_vlan, bool full_table)
 {
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_tt_req_node *tt_req_node = NULL;
@@ -2491,7 +2514,7 @@
 	/* The new tt_req will be issued only if I'm not waiting for a
 	 * reply from the same orig_node yet
 	 */
-	tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
+	tt_req_node = batadv_tt_req_node_new(bat_priv, dst_orig_node);
 	if (!tt_req_node)
 		goto out;
 
@@ -2533,7 +2556,8 @@
 		batadv_hardif_free_ref(primary_if);
 	if (ret && tt_req_node) {
 		spin_lock_bh(&bat_priv->tt.req_list_lock);
-		list_del(&tt_req_node->list);
+		/* hlist_del_init() verifies tt_req_node still is in the list */
+		hlist_del_init(&tt_req_node->list);
 		spin_unlock_bh(&bat_priv->tt.req_list_lock);
 		kfree(tt_req_node);
 	}
@@ -2553,7 +2577,7 @@
  */
 static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
 					  struct batadv_tvlv_tt_data *tt_data,
-					  uint8_t *req_src, uint8_t *req_dst)
+					  u8 *req_src, u8 *req_dst)
 {
 	struct batadv_orig_node *req_dst_orig_node;
 	struct batadv_orig_node *res_dst_orig_node = NULL;
@@ -2561,9 +2585,9 @@
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	bool ret = false, full_table;
-	uint8_t orig_ttvn, req_ttvn;
-	uint16_t tvlv_len;
-	int32_t tt_len;
+	u8 orig_ttvn, req_ttvn;
+	u16 tvlv_len;
+	s32 tt_len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
@@ -2579,7 +2603,7 @@
 	if (!res_dst_orig_node)
 		goto out;
 
-	orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+	orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn);
 	req_ttvn = tt_data->ttvn;
 
 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
@@ -2685,16 +2709,16 @@
  */
 static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
 				       struct batadv_tvlv_tt_data *tt_data,
-				       uint8_t *req_src)
+				       u8 *req_src)
 {
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_tvlv_tt_change *tt_change;
 	struct batadv_orig_node *orig_node;
-	uint8_t my_ttvn, req_ttvn;
-	uint16_t tvlv_len;
+	u8 my_ttvn, req_ttvn;
+	u16 tvlv_len;
 	bool full_table;
-	int32_t tt_len;
+	s32 tt_len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
@@ -2703,7 +2727,7 @@
 
 	spin_lock_bh(&bat_priv->tt.commit_lock);
 
-	my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+	my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 	req_ttvn = tt_data->ttvn;
 
 	orig_node = batadv_orig_hash_find(bat_priv, req_src);
@@ -2742,7 +2766,7 @@
 		       bat_priv->tt.last_changeset_len);
 		spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 	} else {
-		req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+		req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 
 		/* allocate the tvlv, put the tt_data and all the tt_vlan_data
 		 * in the initial part
@@ -2803,7 +2827,7 @@
  */
 static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
 				    struct batadv_tvlv_tt_data *tt_data,
-				    uint8_t *req_src, uint8_t *req_dst)
+				    u8 *req_src, u8 *req_dst)
 {
 	if (batadv_is_my_mac(bat_priv, req_dst))
 		return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
@@ -2814,7 +2838,7 @@
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
 				      struct batadv_orig_node *orig_node,
 				      struct batadv_tvlv_tt_change *tt_change,
-				      uint16_t tt_num_changes, uint8_t ttvn)
+				      u16 tt_num_changes, u8 ttvn)
 {
 	int i;
 	int roams;
@@ -2841,13 +2865,13 @@
 				return;
 		}
 	}
-	orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
+	set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
 				  struct batadv_tvlv_tt_change *tt_change,
-				  uint8_t ttvn, uint8_t *resp_src,
-				  uint16_t num_entries)
+				  u8 ttvn, u8 *resp_src,
+				  u16 num_entries)
 {
 	struct batadv_orig_node *orig_node;
 
@@ -2877,7 +2901,7 @@
 
 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 				     struct batadv_orig_node *orig_node,
-				     uint16_t tt_num_changes, uint8_t ttvn,
+				     u16 tt_num_changes, u8 ttvn,
 				     struct batadv_tvlv_tt_change *tt_change)
 {
 	_batadv_tt_update_changes(bat_priv, orig_node, tt_change,
@@ -2896,7 +2920,7 @@
  *
  * Returns true if the client is served by this node, false otherwise.
  */
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
 			 unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
@@ -2927,13 +2951,14 @@
  */
 static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
 				      struct batadv_tvlv_tt_data *tt_data,
-				      uint8_t *resp_src, uint16_t num_entries)
+				      u8 *resp_src, u16 num_entries)
 {
-	struct batadv_tt_req_node *node, *safe;
+	struct batadv_tt_req_node *node;
+	struct hlist_node *safe;
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_tvlv_tt_change *tt_change;
-	uint8_t *tvlv_ptr = (uint8_t *)tt_data;
-	uint16_t change_offset;
+	u8 *tvlv_ptr = (u8 *)tt_data;
+	u16 change_offset;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
@@ -2967,10 +2992,10 @@
 
 	/* Delete the tt_req_node from pending tt_requests list */
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+	hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		if (!batadv_compare_eth(node->addr, resp_src))
 			continue;
-		list_del(&node->list);
+		hlist_del_init(&node->list);
 		kfree(node);
 	}
 
@@ -3016,8 +3041,7 @@
  *
  * returns true if the ROAMING_ADV can be sent, false otherwise
  */
-static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
-				       uint8_t *client)
+static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
 {
 	struct batadv_tt_roam_node *tt_roam_node;
 	bool ret = false;
@@ -3072,7 +3096,7 @@
  * for this particular roamed client has to be forwarded to the sender of the
  * roaming message.
  */
-static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
 				 unsigned short vid,
 				 struct batadv_orig_node *orig_node)
 {
@@ -3150,14 +3174,14 @@
  * @enable: whether to set or unset the flag
  * @count: whether to increase the TT size by the number of changed entries
  */
-static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
-				      uint16_t flags, bool enable, bool count)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
+				      bool enable, bool count)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common_entry;
-	uint16_t changed_num = 0;
+	u16 changed_num = 0;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -3199,7 +3223,7 @@
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -3247,6 +3271,8 @@
  */
 static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 {
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	/* Update multicast addresses in local translation table */
 	batadv_mcast_mla_update(bat_priv);
 
@@ -3265,7 +3291,7 @@
 	atomic_inc(&bat_priv->tt.vn);
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Local changes committed, updating to ttvn %u\n",
-		   (uint8_t)atomic_read(&bat_priv->tt.vn));
+		   (u8)atomic_read(&bat_priv->tt.vn));
 
 	/* reset the sending counter */
 	atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
@@ -3284,8 +3310,8 @@
 	spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
-bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-			   uint8_t *dst, unsigned short vid)
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+			   unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
@@ -3333,17 +3359,18 @@
  */
 static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
 				  struct batadv_orig_node *orig_node,
-				  const void *tt_buff, uint16_t tt_num_vlan,
+				  const void *tt_buff, u16 tt_num_vlan,
 				  struct batadv_tvlv_tt_change *tt_change,
-				  uint16_t tt_num_changes, uint8_t ttvn)
+				  u16 tt_num_changes, u8 ttvn)
 {
-	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	u8 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	bool full_table = true;
 	bool has_tt_init;
 
 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
-	has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
+	has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
+			       &orig_node->capa_initialized);
 
 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
 	 * increased by one -> we can apply the attached changes
@@ -3415,7 +3442,7 @@
  * deleted later by a DEL or because of timeout
  */
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-					uint8_t *addr, unsigned short vid)
+					u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	bool ret = false;
@@ -3441,7 +3468,7 @@
  * to keep the latter consistent with the node TTVN
  */
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-				       uint8_t *addr, unsigned short vid)
+				       u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
@@ -3527,13 +3554,13 @@
  */
 static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t flags, void *tvlv_value,
-					  uint16_t tvlv_value_len)
+					  u8 flags, void *tvlv_value,
+					  u16 tvlv_value_len)
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	struct batadv_tvlv_tt_change *tt_change;
 	struct batadv_tvlv_tt_data *tt_data;
-	uint16_t num_entries, num_vlan;
+	u16 num_entries, num_vlan;
 
 	if (tvlv_value_len < sizeof(*tt_data))
 		return;
@@ -3569,12 +3596,12 @@
  * otherwise.
  */
 static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
-					     uint8_t *src, uint8_t *dst,
+					     u8 *src, u8 *dst,
 					     void *tvlv_value,
-					     uint16_t tvlv_value_len)
+					     u16 tvlv_value_len)
 {
 	struct batadv_tvlv_tt_data *tt_data;
-	uint16_t tt_vlan_len, tt_num_entries;
+	u16 tt_vlan_len, tt_num_entries;
 	char tt_flag;
 	bool ret;
 
@@ -3650,9 +3677,9 @@
  * otherwise.
  */
 static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
-					       uint8_t *src, uint8_t *dst,
+					       u8 *src, u8 *dst,
 					       void *tvlv_value,
-					       uint16_t tvlv_value_len)
+					       u16 tvlv_value_len)
 {
 	struct batadv_tvlv_roam_adv *roaming_adv;
 	struct batadv_orig_node *orig_node = NULL;
@@ -3734,7 +3761,7 @@
  * otherwise
  */
 bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
-				  const uint8_t *addr, unsigned short vid)
+				  const u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt;
 	bool ret;
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 6acc25d..abd8e11 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,44 +22,41 @@
 
 #include <linux/types.h>
 
-struct batadv_orig_node;
-struct batadv_priv;
 struct net_device;
 struct seq_file;
 
 int batadv_tt_init(struct batadv_priv *bat_priv);
-bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-			 unsigned short vid, int ifindex, uint32_t mark);
-uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid,
-				const char *message, bool roaming);
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+			 unsigned short vid, int ifindex, u32 mark);
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
+			   const u8 *addr, unsigned short vid,
+			   const char *message, bool roaming);
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
-			       int32_t match_vid, const char *message);
+			       s32 match_vid, const char *message);
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid);
+				const u8 *addr, unsigned short vid);
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
-						  const uint8_t *src,
-						  const uint8_t *addr,
+						  const u8 *src, const u8 *addr,
 						  unsigned short vid);
 void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
 			 unsigned short vid);
-bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-			   uint8_t *dst, unsigned short vid);
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+			   unsigned short vid);
 void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-					uint8_t *addr, unsigned short vid);
+					u8 *addr, unsigned short vid);
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-				       uint8_t *addr, unsigned short vid);
+				       u8 *addr, unsigned short vid);
 void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
 					  const unsigned char *addr,
 					  unsigned short vid);
 bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
-				  const uint8_t *addr, unsigned short vid);
+				  const u8 *addr, unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 67d6348..d260efd 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -44,7 +44,7 @@
  *
  * *Please be careful: batadv_dat_addr_t must be UNSIGNED*
  */
-#define batadv_dat_addr_t uint16_t
+#define batadv_dat_addr_t u16
 
 #endif /* CONFIG_BATMAN_ADV_DAT */
 
@@ -103,10 +103,10 @@
  */
 struct batadv_hard_iface {
 	struct list_head list;
-	int16_t if_num;
+	s16 if_num;
 	char if_status;
 	struct net_device *net_dev;
-	uint8_t num_bcasts;
+	u8 num_bcasts;
 	struct kobject *hardif_obj;
 	atomic_t refcount;
 	struct packet_type batman_adv_ptype;
@@ -132,8 +132,8 @@
 	struct hlist_node list;
 	struct batadv_hard_iface *if_outgoing;
 	struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
-	uint32_t last_real_seqno;
-	uint8_t last_ttl;
+	u32 last_real_seqno;
+	u8 last_ttl;
 	unsigned long batman_seqno_reset;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -152,9 +152,9 @@
 	struct hlist_head head;
 	spinlock_t lock; /* protects head */
 	unsigned long timestamp;
-	uint16_t seqno;
-	uint16_t size;
-	uint16_t total_size;
+	u16 seqno;
+	u16 size;
+	u16 total_size;
 };
 
 /**
@@ -166,7 +166,7 @@
 struct batadv_frag_list_entry {
 	struct hlist_node list;
 	struct sk_buff *skb;
-	uint8_t no;
+	u8 no;
 };
 
 /**
@@ -175,7 +175,7 @@
  * @num_entries: number of TT entries for this VLAN
  */
 struct batadv_vlan_tt {
-	uint32_t crc;
+	u32 crc;
 	atomic_t num_entries;
 };
 
@@ -190,7 +190,7 @@
 struct batadv_orig_node_vlan {
 	unsigned short vid;
 	struct batadv_vlan_tt tt;
-	struct list_head list;
+	struct hlist_node list;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -206,7 +206,7 @@
  */
 struct batadv_orig_bat_iv {
 	unsigned long *bcast_own;
-	uint8_t *bcast_own_sum;
+	u8 *bcast_own_sum;
 	/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
 	 * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
 	 */
@@ -221,6 +221,7 @@
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
  * @mcast_flags: multicast flags announced by the orig node
  * @mcast_want_all_unsnoop_node: a list node for the
  *  mcast.want_all_unsnoopables list
@@ -259,7 +260,7 @@
  * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
-	uint8_t orig[ETH_ALEN];
+	u8 orig[ETH_ALEN];
 	struct hlist_head ifinfo_list;
 	struct batadv_orig_ifinfo *last_bonding_candidate;
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -268,21 +269,23 @@
 	unsigned long last_seen;
 	unsigned long bcast_seqno_reset;
 #ifdef CONFIG_BATMAN_ADV_MCAST
-	uint8_t mcast_flags;
+	/* synchronizes mcast tvlv specific orig changes */
+	spinlock_t mcast_handler_lock;
+	u8 mcast_flags;
 	struct hlist_node mcast_want_all_unsnoopables_node;
 	struct hlist_node mcast_want_all_ipv4_node;
 	struct hlist_node mcast_want_all_ipv6_node;
 #endif
-	uint8_t capabilities;
-	uint8_t capa_initialized;
+	unsigned long capabilities;
+	unsigned long capa_initialized;
 	atomic_t last_ttvn;
 	unsigned char *tt_buff;
-	int16_t tt_buff_len;
+	s16 tt_buff_len;
 	spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
 	/* prevents from changing the table while reading it */
 	spinlock_t tt_lock;
 	DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-	uint32_t last_bcast_seqno;
+	u32 last_bcast_seqno;
 	struct hlist_head neigh_list;
 	/* neigh_list_lock protects: neigh_list and router */
 	spinlock_t neigh_list_lock;
@@ -299,7 +302,7 @@
 	spinlock_t out_coding_list_lock; /* Protects out_coding_list */
 #endif
 	struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
-	struct list_head vlan_list;
+	struct hlist_head vlan_list;
 	spinlock_t vlan_list_lock; /* protects vlan_list */
 	struct batadv_orig_bat_iv bat_iv;
 };
@@ -313,10 +316,10 @@
  *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
-	BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
-	BATADV_ORIG_CAPA_HAS_NC = BIT(1),
-	BATADV_ORIG_CAPA_HAS_TT = BIT(2),
-	BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
+	BATADV_ORIG_CAPA_HAS_DAT,
+	BATADV_ORIG_CAPA_HAS_NC,
+	BATADV_ORIG_CAPA_HAS_TT,
+	BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
@@ -325,16 +328,14 @@
  * @orig_node: pointer to corresponding orig node
  * @bandwidth_down: advertised uplink download bandwidth
  * @bandwidth_up: advertised uplink upload bandwidth
- * @deleted: this struct is scheduled for deletion
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_gw_node {
 	struct hlist_node list;
 	struct batadv_orig_node *orig_node;
-	uint32_t bandwidth_down;
-	uint32_t bandwidth_up;
-	unsigned long deleted;
+	u32 bandwidth_down;
+	u32 bandwidth_up;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -355,7 +356,7 @@
 struct batadv_neigh_node {
 	struct hlist_node list;
 	struct batadv_orig_node *orig_node;
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	struct hlist_head ifinfo_list;
 	spinlock_t ifinfo_lock;	/* protects ifinfo_list and its members */
 	struct batadv_hard_iface *if_incoming;
@@ -375,11 +376,11 @@
  * @real_packet_count: counted result of real_bits
  */
 struct batadv_neigh_ifinfo_bat_iv {
-	uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
-	uint8_t tq_index;
-	uint8_t tq_avg;
+	u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+	u8 tq_index;
+	u8 tq_avg;
 	DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-	uint8_t real_packet_count;
+	u8 real_packet_count;
 };
 
 /**
@@ -395,7 +396,7 @@
 	struct hlist_node list;
 	struct batadv_hard_iface *if_outgoing;
 	struct batadv_neigh_ifinfo_bat_iv bat_iv;
-	uint8_t last_ttl;
+	u8 last_ttl;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -408,7 +409,7 @@
  */
 #ifdef CONFIG_BATMAN_ADV_BLA
 struct batadv_bcast_duplist_entry {
-	uint8_t orig[ETH_ALEN];
+	u8 orig[ETH_ALEN];
 	__be32 crc;
 	unsigned long entrytime;
 };
@@ -534,13 +535,13 @@
 	struct list_head changes_list;
 	struct batadv_hashtable *local_hash;
 	struct batadv_hashtable *global_hash;
-	struct list_head req_list;
+	struct hlist_head req_list;
 	struct list_head roam_list;
 	spinlock_t changes_list_lock; /* protects changes */
 	spinlock_t req_list_lock; /* protects req_list */
 	spinlock_t roam_list_lock; /* protects roam_list */
 	unsigned char *last_changeset;
-	int16_t last_changeset_len;
+	s16 last_changeset_len;
 	/* protects last_changeset & last_changeset_len */
 	spinlock_t last_changeset_lock;
 	/* prevents from executing a commit while reading the table */
@@ -660,7 +661,7 @@
 	struct hlist_head want_all_unsnoopables_list;
 	struct hlist_head want_all_ipv4_list;
 	struct hlist_head want_all_ipv6_list;
-	uint8_t flags;
+	u8 flags;
 	bool enabled;
 	atomic_t num_disabled;
 	atomic_t num_want_all_unsnoopables;
@@ -778,7 +779,7 @@
 	atomic_t mesh_state;
 	struct net_device *soft_iface;
 	struct net_device_stats stats;
-	uint64_t __percpu *bat_counters; /* Per cpu counters */
+	u64 __percpu *bat_counters; /* Per cpu counters */
 	atomic_t aggregated_ogms;
 	atomic_t bonding;
 	atomic_t fragmentation;
@@ -800,8 +801,8 @@
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 	atomic_t log_level;
 #endif
-	uint32_t isolation_mark;
-	uint32_t isolation_mark_mask;
+	u32 isolation_mark;
+	u32 isolation_mark_mask;
 	atomic_t bcast_seqno;
 	atomic_t bcast_queue_left;
 	atomic_t batman_queue_left;
@@ -867,7 +868,7 @@
 struct batadv_socket_packet {
 	struct list_head list;
 	size_t icmp_len;
-	uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
+	u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
 /**
@@ -888,14 +889,14 @@
  */
 #ifdef CONFIG_BATMAN_ADV_BLA
 struct batadv_bla_backbone_gw {
-	uint8_t orig[ETH_ALEN];
+	u8 orig[ETH_ALEN];
 	unsigned short vid;
 	struct hlist_node hash_entry;
 	struct batadv_priv *bat_priv;
 	unsigned long lasttime;
 	atomic_t wait_periods;
 	atomic_t request_sent;
-	uint16_t crc;
+	u16 crc;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -911,7 +912,7 @@
  * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_claim {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	unsigned short vid;
 	struct batadv_bla_backbone_gw *backbone_gw;
 	unsigned long lasttime;
@@ -933,10 +934,10 @@
  * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_common_entry {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	unsigned short vid;
 	struct hlist_node hash_entry;
-	uint16_t flags;
+	u16 flags;
 	unsigned long added_at;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -978,7 +979,7 @@
  */
 struct batadv_tt_orig_list_entry {
 	struct batadv_orig_node *orig_node;
-	uint8_t ttvn;
+	u8 ttvn;
 	struct hlist_node list;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -1001,9 +1002,9 @@
  * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	unsigned long issued_at;
-	struct list_head list;
+	struct hlist_node list;
 };
 
 /**
@@ -1015,7 +1016,7 @@
  * @list: list node for batadv_priv_tt::roam_list
  */
 struct batadv_tt_roam_node {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	atomic_t counter;
 	unsigned long first_time;
 	struct list_head list;
@@ -1032,7 +1033,7 @@
  */
 struct batadv_nc_node {
 	struct list_head list;
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	atomic_t refcount;
 	struct rcu_head rcu;
 	struct batadv_orig_node *orig_node;
@@ -1056,8 +1057,8 @@
 	atomic_t refcount;
 	struct list_head packet_list;
 	spinlock_t packet_list_lock; /* Protects packet_list */
-	uint8_t next_hop[ETH_ALEN];
-	uint8_t prev_hop[ETH_ALEN];
+	u8 next_hop[ETH_ALEN];
+	u8 prev_hop[ETH_ALEN];
 	unsigned long last_valid;
 };
 
@@ -1109,11 +1110,11 @@
 struct batadv_forw_packet {
 	struct hlist_node list;
 	unsigned long send_time;
-	uint8_t own;
+	u8 own;
 	struct sk_buff *skb;
-	uint16_t packet_len;
-	uint32_t direct_link_flags;
-	uint8_t num_packets;
+	u16 packet_len;
+	u32 direct_link_flags;
+	u8 num_packets;
 	struct delayed_work delayed_work;
 	struct batadv_hard_iface *if_incoming;
 	struct batadv_hard_iface *if_outgoing;
@@ -1188,7 +1189,7 @@
  */
 struct batadv_dat_entry {
 	__be32 ip;
-	uint8_t mac_addr[ETH_ALEN];
+	u8 mac_addr[ETH_ALEN];
 	unsigned short vid;
 	unsigned long last_update;
 	struct hlist_node hash_entry;
@@ -1250,14 +1251,13 @@
 	struct hlist_node list;
 	void (*ogm_handler)(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *orig,
-			    uint8_t flags,
-			    void *tvlv_value, uint16_t tvlv_value_len);
+			    u8 flags, void *tvlv_value, u16 tvlv_value_len);
 	int (*unicast_handler)(struct batadv_priv *bat_priv,
-			       uint8_t *src, uint8_t *dst,
-			       void *tvlv_value, uint16_t tvlv_value_len);
-	uint8_t type;
-	uint8_t version;
-	uint8_t flags;
+			       u8 *src, u8 *dst,
+			       void *tvlv_value, u16 tvlv_value_len);
+	u8 type;
+	u8 version;
+	u8 flags;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 2fb7b30..131e79c 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -85,7 +85,7 @@
 
 static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
 {
-	return netdev_priv(netdev);
+	return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
 }
 
 static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
@@ -848,8 +848,9 @@
 	struct net_device *netdev;
 	int err = 0;
 
-	netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
-			      NET_NAME_UNKNOWN, netdev_setup);
+	netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+			      IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
+			      netdev_setup);
 	if (!netdev)
 		return -ENOMEM;
 
@@ -859,19 +860,7 @@
 	SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
 	SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
-	err = register_netdev(netdev);
-	if (err < 0) {
-		BT_INFO("register_netdev failed %d", err);
-		free_netdev(netdev);
-		goto out;
-	}
-
-	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
-	       netdev->ifindex, &chan->dst, chan->dst_type,
-	       &chan->src, chan->src_type);
-	set_bit(__LINK_STATE_PRESENT, &netdev->state);
-
-	*dev = netdev_priv(netdev);
+	*dev = lowpan_dev(netdev);
 	(*dev)->netdev = netdev;
 	(*dev)->hdev = chan->conn->hcon->hdev;
 	INIT_LIST_HEAD(&(*dev)->peers);
@@ -881,6 +870,23 @@
 	list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
 	spin_unlock(&devices_lock);
 
+	lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
+
+	err = register_netdev(netdev);
+	if (err < 0) {
+		BT_INFO("register_netdev failed %d", err);
+		spin_lock(&devices_lock);
+		list_del_rcu(&(*dev)->list);
+		spin_unlock(&devices_lock);
+		free_netdev(netdev);
+		goto out;
+	}
+
+	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
+	       netdev->ifindex, &chan->dst, chan->dst_type,
+	       &chan->src, chan->src_type);
+	set_bit(__LINK_STATE_PRESENT, &netdev->state);
+
 	return 0;
 
 out:
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index b8c794b..95d1a66 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -53,6 +53,11 @@
 
 source "net/bluetooth/hidp/Kconfig"
 
+config BT_HS
+	bool "Bluetooth High Speed (HS) features"
+	depends on BT_BREDR
+	default y
+
 config BT_LE
 	bool "Bluetooth Low Energy (LE) features"
 	depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 29c12ae..2b15ae8 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,9 +13,10 @@
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
 	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
-	a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+	ecc.o hci_request.o mgmt_util.o
 
 bluetooth-$(CONFIG_BT_BREDR) += sco.o
+bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 5a04eb1..5f123c3 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -286,11 +287,21 @@
 	return 0;
 }
 
+static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+					 u16 opcode)
+{
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	a2mp_send_getinfo_rsp(hdev);
+}
+
 static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
 			    struct a2mp_cmd *hdr)
 {
 	struct a2mp_info_req *req  = (void *) skb->data;
 	struct hci_dev *hdev;
+	struct hci_request hreq;
+	int err = 0;
 
 	if (le16_to_cpu(hdr->len) < sizeof(*req))
 		return -EINVAL;
@@ -311,7 +322,11 @@
 	}
 
 	set_bit(READ_LOC_AMP_INFO, &mgr->state);
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+	hci_req_init(&hreq, hdev);
+	hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+	err = hci_req_run(&hreq, read_local_amp_info_complete);
+	if (err < 0)
+		a2mp_send_getinfo_rsp(hdev);
 
 done:
 	if (hdev)
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 296f665..a4ff3ea 100644
--- a/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
@@ -130,10 +130,29 @@
 #define A2MP_STATUS_SECURITY_VIOLATION		0x06
 
 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+
+#if IS_ENABLED(CONFIG_BT_HS)
 int amp_mgr_put(struct amp_mgr *mgr);
 struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
 				       struct sk_buff *skb);
 void a2mp_discover_amp(struct l2cap_chan *chan);
+#else
+static inline int amp_mgr_put(struct amp_mgr *mgr)
+{
+	return 0;
+}
+
+static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+						     struct sk_buff *skb)
+{
+	return NULL;
+}
+
+static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+}
+#endif
+
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
 void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
 void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index ee016f0..e32f341 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <crypto/hash.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -220,10 +221,49 @@
 	return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
 }
 
+static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+					  u16 opcode, struct sk_buff *skb)
+{
+	struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+	struct amp_assoc *assoc = &hdev->loc_assoc;
+	size_t rem_len, frag_len;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+	if (rp->status)
+		goto send_rsp;
+
+	frag_len = skb->len - sizeof(*rp);
+	rem_len = __le16_to_cpu(rp->rem_len);
+
+	if (rem_len > frag_len) {
+		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+		assoc->offset += frag_len;
+
+		/* Read other fragments */
+		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+		return;
+	}
+
+	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+	assoc->len = assoc->offset + rem_len;
+	assoc->offset = 0;
+
+send_rsp:
+	/* Send A2MP Rsp when all fragments are received */
+	a2mp_send_getampassoc_rsp(hdev, rp->status);
+	a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
 void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
 {
 	struct hci_cp_read_local_amp_assoc cp;
 	struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+	struct hci_request req;
+	int err = 0;
 
 	BT_DBG("%s handle %d", hdev->name, phy_handle);
 
@@ -231,12 +271,18 @@
 	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 	cp.len_so_far = cpu_to_le16(loc_assoc->offset);
 
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+	if (err < 0)
+		a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
 {
 	struct hci_cp_read_local_amp_assoc cp;
+	struct hci_request req;
+	int err = 0;
 
 	memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
 	memset(&cp, 0, sizeof(cp));
@@ -244,7 +290,11 @@
 	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 
 	set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_run_skb(&req, read_local_amp_assoc_complete);
+	if (err < 0)
+		a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
@@ -252,6 +302,8 @@
 {
 	struct hci_cp_read_local_amp_assoc cp;
 	struct amp_mgr *mgr = hcon->amp_mgr;
+	struct hci_request req;
+	int err = 0;
 
 	cp.phy_handle = hcon->handle;
 	cp.len_so_far = cpu_to_le16(0);
@@ -260,7 +312,25 @@
 	set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
 
 	/* Read Local AMP Assoc final link information data */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_run_skb(&req, read_local_amp_assoc_complete);
+	if (err < 0)
+		a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+}
+
+static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+					    u16 opcode, struct sk_buff *skb)
+{
+	struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+
+	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+	       hdev->name, rp->status, rp->phy_handle);
+
+	if (rp->status)
+		return;
+
+	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
 }
 
 /* Write AMP Assoc data fragments, returns true with last fragment written*/
@@ -270,6 +340,7 @@
 	struct hci_cp_write_remote_amp_assoc *cp;
 	struct amp_mgr *mgr = hcon->amp_mgr;
 	struct amp_ctrl *ctrl;
+	struct hci_request req;
 	u16 frag_len, len;
 
 	ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
@@ -307,7 +378,9 @@
 
 	amp_ctrl_put(ctrl);
 
-	hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+	hci_req_run_skb(&req, write_remote_amp_assoc_complete);
 
 	kfree(cp);
 
@@ -344,10 +417,37 @@
 	amp_write_rem_assoc_frag(hdev, hcon);
 }
 
+static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+				    u16 opcode)
+{
+	struct hci_cp_create_phy_link *cp;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+	if (!cp)
+		return;
+
+	hci_dev_lock(hdev);
+
+	if (status) {
+		struct hci_conn *hcon;
+
+		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+		if (hcon)
+			hci_conn_del(hcon);
+	} else {
+		amp_write_remote_assoc(hdev, cp->phy_handle);
+	}
+
+	hci_dev_unlock(hdev);
+}
+
 void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
 			struct hci_conn *hcon)
 {
 	struct hci_cp_create_phy_link cp;
+	struct hci_request req;
 
 	cp.phy_handle = hcon->handle;
 
@@ -360,13 +460,33 @@
 		return;
 	}
 
-	hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+	hci_req_run(&req, create_phylink_complete);
+}
+
+static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+				    u16 opcode)
+{
+	struct hci_cp_accept_phy_link *cp;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	if (status)
+		return;
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+	if (!cp)
+		return;
+
+	amp_write_remote_assoc(hdev, cp->phy_handle);
 }
 
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
 			struct hci_conn *hcon)
 {
 	struct hci_cp_accept_phy_link cp;
+	struct hci_request req;
 
 	cp.phy_handle = hcon->handle;
 
@@ -379,7 +499,9 @@
 		return;
 	}
 
-	hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+	hci_req_run(&req, accept_phylink_complete);
 }
 
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
index 7ea3db7..8848f81 100644
--- a/net/bluetooth/amp.h
+++ b/net/bluetooth/amp.h
@@ -44,6 +44,20 @@
 			struct hci_conn *hcon);
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
 			struct hci_conn *hcon);
+
+#if IS_ENABLED(CONFIG_BT_HS)
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+#else
+static inline void amp_create_logical_link(struct l2cap_chan *chan)
+{
+}
+
+static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+}
+#endif
+
 void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
 void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index b0c6c6a..9a503387 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -100,9 +100,9 @@
 static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
 {
 	struct cmtp_application *app;
-	struct list_head *p, *n;
+	struct list_head *p;
 
-	list_for_each_safe(p, n, &session->applications) {
+	list_for_each(p, &session->applications) {
 		app = list_entry(p, struct cmtp_application, list);
 		switch (pattern) {
 		case CMTP_MSGNUM:
@@ -511,13 +511,13 @@
 	struct capi_ctr *ctrl = m->private;
 	struct cmtp_session *session = ctrl->driverdata;
 	struct cmtp_application *app;
-	struct list_head *p, *n;
+	struct list_head *p;
 
 	seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
 	seq_printf(m, "addr %s\n", session->name);
 	seq_printf(m, "ctrl %d\n", session->num);
 
-	list_for_each_safe(p, n, &session->applications) {
+	list_for_each(p, &session->applications) {
 		app = list_entry(p, struct cmtp_application, list);
 		seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
 	}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 2c48bf0..b4548c73 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -64,6 +64,48 @@
 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
 }
 
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+{
+	struct hci_conn_params *params;
+	struct smp_irk *irk;
+	bdaddr_t *bdaddr;
+	u8 bdaddr_type;
+
+	bdaddr = &conn->dst;
+	bdaddr_type = conn->dst_type;
+
+	/* Check if we need to convert to identity address */
+	irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+	if (irk) {
+		bdaddr = &irk->bdaddr;
+		bdaddr_type = irk->addr_type;
+	}
+
+	params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
+	if (!params)
+		return;
+
+	/* The connection attempt was doing scan for new RPA, and is
+	 * in scan phase. If params are not associated with any other
+	 * autoconnect action, remove them completely. If they are, just unmark
+	 * them as waiting for connection, by clearing explicit_connect field.
+	 */
+	if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+		hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
+	else
+		params->explicit_connect = false;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_remove(struct hci_conn *conn)
+{
+	hci_connect_le_scan_cleanup(conn);
+
+	hci_conn_hash_del(conn->hdev, conn);
+	hci_update_background_scan(conn->hdev);
+}
+
 static void hci_acl_create_connection(struct hci_conn *conn)
 {
 	struct hci_dev *hdev = conn->hdev;
@@ -340,8 +382,12 @@
 		if (conn->out) {
 			if (conn->type == ACL_LINK)
 				hci_acl_create_connection_cancel(conn);
-			else if (conn->type == LE_LINK)
-				hci_le_create_connection_cancel(conn);
+			else if (conn->type == LE_LINK) {
+				if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+					hci_connect_le_scan_remove(conn);
+				else
+					hci_le_create_connection_cancel(conn);
+			}
 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
 			hci_reject_sco(conn);
 		}
@@ -637,15 +683,18 @@
 {
 	struct hci_conn *conn;
 
-	if (status == 0)
-		return;
+	hci_dev_lock(hdev);
+
+	conn = hci_lookup_le_connect(hdev);
+
+	if (!status) {
+		hci_connect_le_scan_cleanup(conn);
+		goto done;
+	}
 
 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
 	       status);
 
-	hci_dev_lock(hdev);
-
-	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
 	if (!conn)
 		goto done;
 
@@ -685,6 +734,7 @@
 	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 
 	conn->state = BT_CONNECT;
+	clear_bit(HCI_CONN_SCANNING, &conn->flags);
 }
 
 static void hci_req_directed_advertising(struct hci_request *req,
@@ -728,7 +778,7 @@
 				u8 role)
 {
 	struct hci_conn_params *params;
-	struct hci_conn *conn;
+	struct hci_conn *conn, *conn_unfinished;
 	struct smp_irk *irk;
 	struct hci_request req;
 	int err;
@@ -751,26 +801,29 @@
 	 * and return the object found.
 	 */
 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	conn_unfinished = NULL;
 	if (conn) {
-		conn->pending_sec_level = sec_level;
-		goto done;
+		if (conn->state == BT_CONNECT &&
+		    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+			BT_DBG("will continue unfinished conn %pMR", dst);
+			conn_unfinished = conn;
+		} else {
+			if (conn->pending_sec_level < sec_level)
+				conn->pending_sec_level = sec_level;
+			goto done;
+		}
 	}
 
 	/* Since the controller supports only one LE connection attempt at a
 	 * time, we return -EBUSY if there is any connection attempt running.
 	 */
-	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-	if (conn)
+	if (hci_lookup_le_connect(hdev))
 		return ERR_PTR(-EBUSY);
 
 	/* When given an identity address with existing identity
 	 * resolving key, the connection needs to be established
 	 * to a resolvable random address.
 	 *
-	 * This uses the cached random resolvable address from
-	 * a previous scan. When no cached address is available,
-	 * try connecting to the identity address instead.
-	 *
 	 * Storing the resolvable random address is required here
 	 * to handle connection failures. The address will later
 	 * be resolved back into the original identity address
@@ -782,15 +835,23 @@
 		dst_type = ADDR_LE_DEV_RANDOM;
 	}
 
-	conn = hci_conn_add(hdev, LE_LINK, dst, role);
+	if (conn_unfinished) {
+		conn = conn_unfinished;
+		bacpy(&conn->dst, dst);
+	} else {
+		conn = hci_conn_add(hdev, LE_LINK, dst, role);
+	}
+
 	if (!conn)
 		return ERR_PTR(-ENOMEM);
 
 	conn->dst_type = dst_type;
 	conn->sec_level = BT_SECURITY_LOW;
-	conn->pending_sec_level = sec_level;
 	conn->conn_timeout = conn_timeout;
 
+	if (!conn_unfinished)
+		conn->pending_sec_level = sec_level;
+
 	hci_req_init(&req, hdev);
 
 	/* Disable advertising if we're active. For master role
@@ -855,6 +916,144 @@
 	}
 
 done:
+	/* If this is continuation of connect started by hci_connect_le_scan,
+	 * it already called hci_conn_hold and calling it again would mess the
+	 * counter.
+	 */
+	if (!conn_unfinished)
+		hci_conn_hold(conn);
+
+	return conn;
+}
+
+static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
+					 u16 opcode)
+{
+	struct hci_conn *conn;
+
+	if (!status)
+		return;
+
+	BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
+	       status);
+
+	hci_dev_lock(hdev);
+
+	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	if (conn)
+		hci_le_conn_failed(conn, status);
+
+	hci_dev_unlock(hdev);
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+	struct hci_conn *conn;
+
+	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+	if (!conn)
+		return false;
+
+	if (conn->dst_type != type)
+		return false;
+
+	if (conn->state != BT_CONNECTED)
+		return false;
+
+	return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_explicit_conn_params_set(struct hci_request *req,
+					bdaddr_t *addr, u8 addr_type)
+{
+	struct hci_dev *hdev = req->hdev;
+	struct hci_conn_params *params;
+
+	if (is_connected(hdev, addr, addr_type))
+		return -EISCONN;
+
+	params = hci_conn_params_add(hdev, addr, addr_type);
+	if (!params)
+		return -EIO;
+
+	/* If we created new params, or existing params were marked as disabled,
+	 * mark them to be used just once to connect.
+	 */
+	if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+		list_del_init(&params->action);
+		list_add(&params->action, &hdev->pend_le_conns);
+	}
+
+	params->explicit_connect = true;
+	__hci_update_background_scan(req);
+
+	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+	       params->auto_connect);
+
+	return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+				     u8 dst_type, u8 sec_level,
+				     u16 conn_timeout, u8 role)
+{
+	struct hci_conn *conn;
+	struct hci_request req;
+	int err;
+
+	/* Let's make sure that le is enabled.*/
+	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+		if (lmp_le_capable(hdev))
+			return ERR_PTR(-ECONNREFUSED);
+
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	/* Some devices send ATT messages as soon as the physical link is
+	 * established. To be able to handle these ATT messages, the user-
+	 * space first establishes the connection and then starts the pairing
+	 * process.
+	 *
+	 * So if a hci_conn object already exists for the following connection
+	 * attempt, we simply update pending_sec_level and auth_type fields
+	 * and return the object found.
+	 */
+	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	if (conn) {
+		if (conn->pending_sec_level < sec_level)
+			conn->pending_sec_level = sec_level;
+		goto done;
+	}
+
+	BT_DBG("requesting refresh of dst_addr");
+
+	conn = hci_conn_add(hdev, LE_LINK, dst, role);
+	if (!conn)
+		return ERR_PTR(-ENOMEM);
+
+	hci_req_init(&req, hdev);
+
+	if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+		return ERR_PTR(-EBUSY);
+
+	conn->state = BT_CONNECT;
+	set_bit(HCI_CONN_SCANNING, &conn->flags);
+
+	err = hci_req_run(&req, hci_connect_le_scan_complete);
+	if (err && err != -ENODATA) {
+		hci_conn_del(conn);
+		return ERR_PTR(err);
+	}
+
+	conn->dst_type = dst_type;
+	conn->sec_level = BT_SECURITY_LOW;
+	conn->pending_sec_level = sec_level;
+	conn->conn_timeout = conn_timeout;
+
+done:
 	hci_conn_hold(conn);
 	return conn;
 }
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2f8fb33..adcbc74 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2822,10 +2822,6 @@
 {
 	struct hci_conn_params *params;
 
-	/* The conn params list only contains identity addresses */
-	if (!hci_is_identity_address(addr, addr_type))
-		return NULL;
-
 	list_for_each_entry(params, &hdev->le_conn_params, list) {
 		if (bacmp(&params->addr, addr) == 0 &&
 		    params->addr_type == addr_type) {
@@ -2842,10 +2838,6 @@
 {
 	struct hci_conn_params *param;
 
-	/* The list only contains identity addresses */
-	if (!hci_is_identity_address(addr, addr_type))
-		return NULL;
-
 	list_for_each_entry(param, list, action) {
 		if (bacmp(&param->addr, addr) == 0 &&
 		    param->addr_type == addr_type)
@@ -2856,14 +2848,35 @@
 }
 
 /* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+						    bdaddr_t *addr,
+						    u8 addr_type)
+{
+	struct hci_conn_params *param;
+
+	list_for_each_entry(param, &hdev->pend_le_conns, action) {
+		if (bacmp(&param->addr, addr) == 0 &&
+		    param->addr_type == addr_type &&
+		    param->explicit_connect)
+			return param;
+	}
+
+	list_for_each_entry(param, &hdev->pend_le_reports, action) {
+		if (bacmp(&param->addr, addr) == 0 &&
+		    param->addr_type == addr_type &&
+		    param->explicit_connect)
+			return param;
+	}
+
+	return NULL;
+}
+
+/* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
 					    bdaddr_t *addr, u8 addr_type)
 {
 	struct hci_conn_params *params;
 
-	if (!hci_is_identity_address(addr, addr_type))
-		return NULL;
-
 	params = hci_conn_params_lookup(hdev, addr, addr_type);
 	if (params)
 		return params;
@@ -2927,6 +2940,15 @@
 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
 			continue;
+
+		/* If trying to estabilish one time connection to disabled
+		 * device, leave the params, but mark them as just once.
+		 */
+		if (params->explicit_connect) {
+			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+			continue;
+		}
+
 		list_del(&params->list);
 		kfree(params);
 	}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 32363c2..1860418 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -823,7 +823,7 @@
 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
 	if (rp->status)
-		goto a2mp_rsp;
+		return;
 
 	hdev->amp_status = rp->amp_status;
 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -835,46 +835,6 @@
 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
-a2mp_rsp:
-	a2mp_send_getinfo_rsp(hdev);
-}
-
-static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
-					struct sk_buff *skb)
-{
-	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
-	struct amp_assoc *assoc = &hdev->loc_assoc;
-	size_t rem_len, frag_len;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-	if (rp->status)
-		goto a2mp_rsp;
-
-	frag_len = skb->len - sizeof(*rp);
-	rem_len = __le16_to_cpu(rp->rem_len);
-
-	if (rem_len > frag_len) {
-		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
-		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
-		assoc->offset += frag_len;
-
-		/* Read other fragments */
-		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
-		return;
-	}
-
-	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
-	assoc->len = assoc->offset + rem_len;
-	assoc->offset = 0;
-
-a2mp_rsp:
-	/* Send A2MP Rsp when all fragments are received */
-	a2mp_send_getampassoc_rsp(hdev, rp->status);
-	a2mp_send_create_phy_link_req(hdev, rp->status);
 }
 
 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -1099,7 +1059,7 @@
 
 		hci_dev_set_flag(hdev, HCI_LE_ADV);
 
-		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+		conn = hci_lookup_le_connect(hdev);
 		if (conn)
 			queue_delayed_work(hdev->workqueue,
 					   &conn->le_conn_timeout,
@@ -1409,20 +1369,6 @@
 	hci_dev_unlock(hdev);
 }
 
-static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
-					  struct sk_buff *skb)
-{
-	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
-
-	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
-	       hdev->name, rp->status, rp->phy_handle);
-
-	if (rp->status)
-		return;
-
-	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1944,47 +1890,6 @@
 	hci_dev_unlock(hdev);
 }
 
-static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
-{
-	struct hci_cp_create_phy_link *cp;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
-	if (!cp)
-		return;
-
-	hci_dev_lock(hdev);
-
-	if (status) {
-		struct hci_conn *hcon;
-
-		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
-		if (hcon)
-			hci_conn_del(hcon);
-	} else {
-		amp_write_remote_assoc(hdev, cp->phy_handle);
-	}
-
-	hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
-{
-	struct hci_cp_accept_phy_link *cp;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-	if (status)
-		return;
-
-	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
-	if (!cp)
-		return;
-
-	amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
 {
 	struct hci_cp_le_create_conn *cp;
@@ -2998,10 +2903,6 @@
 		hci_cc_read_clock(hdev, skb);
 		break;
 
-	case HCI_OP_READ_LOCAL_AMP_ASSOC:
-		hci_cc_read_local_amp_assoc(hdev, skb);
-		break;
-
 	case HCI_OP_READ_INQ_RSP_TX_POWER:
 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
 		break;
@@ -3106,10 +3007,6 @@
 		hci_cc_set_adv_param(hdev, skb);
 		break;
 
-	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
-		hci_cc_write_remote_amp_assoc(hdev, skb);
-		break;
-
 	case HCI_OP_READ_RSSI:
 		hci_cc_read_rssi(hdev, skb);
 		break;
@@ -3193,14 +3090,6 @@
 		hci_cs_setup_sync_conn(hdev, ev->status);
 		break;
 
-	case HCI_OP_CREATE_PHY_LINK:
-		hci_cs_create_phylink(hdev, ev->status);
-		break;
-
-	case HCI_OP_ACCEPT_PHY_LINK:
-		hci_cs_accept_phylink(hdev, ev->status);
-		break;
-
 	case HCI_OP_SNIFF_MODE:
 		hci_cs_sniff_mode(hdev, ev->status);
 		break;
@@ -3837,17 +3726,25 @@
 		if (ev->link_type == ESCO_LINK)
 			goto unlock;
 
+		/* When the link type in the event indicates SCO connection
+		 * and lookup of the connection object fails, then check
+		 * if an eSCO connection object exists.
+		 *
+		 * The core limits the synchronous connections to either
+		 * SCO or eSCO. The eSCO connection is preferred and tried
+		 * to be setup first and until successfully established,
+		 * the link type will be hinted as eSCO.
+		 */
 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
 		if (!conn)
 			goto unlock;
-
-		conn->type = SCO_LINK;
 	}
 
 	switch (ev->status) {
 	case 0x00:
 		conn->handle = __le16_to_cpu(ev->handle);
 		conn->state  = BT_CONNECTED;
+		conn->type   = ev->link_type;
 
 		hci_debugfs_create_conn(conn);
 		hci_conn_add_sysfs(conn);
@@ -4399,6 +4296,23 @@
 	hci_dev_unlock(hdev);
 }
 
+#if IS_ENABLED(CONFIG_BT_HS)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_ev_channel_selected *ev = (void *)skb->data;
+	struct hci_conn *hcon;
+
+	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+	skb_pull(skb, sizeof(*ev));
+
+	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+	if (!hcon)
+		return;
+
+	amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
 				      struct sk_buff *skb)
 {
@@ -4522,6 +4436,7 @@
 
 	hci_dev_unlock(hdev);
 }
+#endif
 
 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
@@ -4540,7 +4455,7 @@
 	 */
 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	conn = hci_lookup_le_connect(hdev);
 	if (!conn) {
 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
 		if (!conn) {
@@ -4733,42 +4648,49 @@
 	/* If we're not connectable only connect devices that we have in
 	 * our pend_le_conns list.
 	 */
-	params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
-					   addr, addr_type);
+	params = hci_explicit_connect_lookup(hdev, addr, addr_type);
+
 	if (!params)
 		return NULL;
 
-	switch (params->auto_connect) {
-	case HCI_AUTO_CONN_DIRECT:
-		/* Only devices advertising with ADV_DIRECT_IND are
-		 * triggering a connection attempt. This is allowing
-		 * incoming connections from slave devices.
-		 */
-		if (adv_type != LE_ADV_DIRECT_IND)
+	if (!params->explicit_connect) {
+		switch (params->auto_connect) {
+		case HCI_AUTO_CONN_DIRECT:
+			/* Only devices advertising with ADV_DIRECT_IND are
+			 * triggering a connection attempt. This is allowing
+			 * incoming connections from slave devices.
+			 */
+			if (adv_type != LE_ADV_DIRECT_IND)
+				return NULL;
+			break;
+		case HCI_AUTO_CONN_ALWAYS:
+			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
+			 * are triggering a connection attempt. This means
+			 * that incoming connectioms from slave device are
+			 * accepted and also outgoing connections to slave
+			 * devices are established when found.
+			 */
+			break;
+		default:
 			return NULL;
-		break;
-	case HCI_AUTO_CONN_ALWAYS:
-		/* Devices advertising with ADV_IND or ADV_DIRECT_IND
-		 * are triggering a connection attempt. This means
-		 * that incoming connectioms from slave device are
-		 * accepted and also outgoing connections to slave
-		 * devices are established when found.
-		 */
-		break;
-	default:
-		return NULL;
+		}
 	}
 
 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
 	if (!IS_ERR(conn)) {
-		/* Store the pointer since we don't really have any
+		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+		 * by higher layer that tried to connect, if no then
+		 * store the pointer since we don't really have any
 		 * other owner of the object besides the params that
 		 * triggered it. This way we can abort the connection if
 		 * the parameters get removed and keep the reference
 		 * count consistent once the connection is established.
 		 */
-		params->conn = hci_conn_get(conn);
+
+		if (!params->explicit_connect)
+			params->conn = hci_conn_get(conn);
+
 		return conn;
 	}
 
@@ -5206,22 +5128,6 @@
 	}
 }
 
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
-	struct hci_ev_channel_selected *ev = (void *) skb->data;
-	struct hci_conn *hcon;
-
-	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
-	skb_pull(skb, sizeof(*ev));
-
-	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
-	if (!hcon)
-		return;
-
-	amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
 				 u8 event, struct sk_buff *skb)
 {
@@ -5442,14 +5348,15 @@
 		hci_le_meta_evt(hdev, skb);
 		break;
 
-	case HCI_EV_CHANNEL_SELECTED:
-		hci_chan_selected_evt(hdev, skb);
-		break;
-
 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
 		hci_remote_oob_data_request_evt(hdev, skb);
 		break;
 
+#if IS_ENABLED(CONFIG_BT_HS)
+	case HCI_EV_CHANNEL_SELECTED:
+		hci_chan_selected_evt(hdev, skb);
+		break;
+
 	case HCI_EV_PHY_LINK_COMPLETE:
 		hci_phy_link_complete_evt(hdev, skb);
 		break;
@@ -5465,6 +5372,7 @@
 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
 		hci_disconn_phylink_complete_evt(hdev, skb);
 		break;
+#endif
 
 	case HCI_EV_NUM_COMP_BLOCKS:
 		hci_num_comp_blocks_evt(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index d6025d6..b736922 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -317,7 +317,7 @@
 	 * address be updated at the next cycle.
 	 */
 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
-	    hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+	    hci_lookup_le_connect(hdev)) {
 		BT_DBG("Deferring random address update");
 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 		return;
@@ -479,7 +479,6 @@
 void __hci_update_background_scan(struct hci_request *req)
 {
 	struct hci_dev *hdev = req->hdev;
-	struct hci_conn *conn;
 
 	if (!test_bit(HCI_UP, &hdev->flags) ||
 	    test_bit(HCI_INIT, &hdev->flags) ||
@@ -529,8 +528,7 @@
 		 * since some controllers are not able to scan and connect at
 		 * the same time.
 		 */
-		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-		if (conn)
+		if (hci_lookup_le_connect(hdev))
 			return;
 
 		/* If controller is currently scanning, we stop it to ensure we
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 45fffa4..7c65ee2 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7113,8 +7113,10 @@
 		else
 			role = HCI_ROLE_MASTER;
 
-		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
-				      HCI_LE_CONN_TIMEOUT, role);
+		hcon = hci_connect_le_scan(hdev, dst, dst_type,
+					   chan->sec_level,
+					   HCI_LE_CONN_TIMEOUT,
+					   role);
 	} else {
 		u8 auth_type = l2cap_get_auth_type(chan);
 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 2442877..586b3d5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1054,18 +1054,23 @@
 	sock_put(sk);
 }
 
-static int __l2cap_wait_ack(struct sock *sk)
+static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
 {
-	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	DECLARE_WAITQUEUE(wait, current);
 	int err = 0;
-	int timeo = HZ/5;
+	int timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
+	/* Timeout to prevent infinite loop */
+	unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT;
 
 	add_wait_queue(sk_sleep(sk), &wait);
 	set_current_state(TASK_INTERRUPTIBLE);
-	while (chan->unacked_frames > 0 && chan->conn) {
+	do {
+		BT_DBG("Waiting for %d ACKs, timeout %04d ms",
+		       chan->unacked_frames, time_after(jiffies, timeout) ? 0 :
+		       jiffies_to_msecs(timeout - jiffies));
+
 		if (!timeo)
-			timeo = HZ/5;
+			timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
 
 		if (signal_pending(current)) {
 			err = sock_intr_errno(timeo);
@@ -1080,7 +1085,15 @@
 		err = sock_error(sk);
 		if (err)
 			break;
-	}
+
+		if (time_after(jiffies, timeout)) {
+			err = -ENOLINK;
+			break;
+		}
+
+	} while (chan->unacked_frames > 0 &&
+		 chan->state == BT_CONNECTED);
+
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(sk_sleep(sk), &wait);
 	return err;
@@ -1098,7 +1111,12 @@
 	if (!sk)
 		return 0;
 
+	/* prevent sk structure from being freed whilst unlocked */
+	sock_hold(sk);
+
 	chan = l2cap_pi(sk)->chan;
+	/* prevent chan structure from being freed whilst unlocked */
+	l2cap_chan_hold(chan);
 	conn = chan->conn;
 
 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1110,8 +1128,10 @@
 	lock_sock(sk);
 
 	if (!sk->sk_shutdown) {
-		if (chan->mode == L2CAP_MODE_ERTM)
-			err = __l2cap_wait_ack(sk);
+		if (chan->mode == L2CAP_MODE_ERTM &&
+		    chan->unacked_frames > 0 &&
+		    chan->state == BT_CONNECTED)
+			err = __l2cap_wait_ack(sk, chan);
 
 		sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -1134,6 +1154,11 @@
 	if (conn)
 		mutex_unlock(&conn->chan_lock);
 
+	l2cap_chan_put(chan);
+	sock_put(sk);
+
+	BT_DBG("err: %d", err);
+
 	return err;
 }
 
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 92720f3..ccaf5a4 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3564,9 +3564,10 @@
 		 */
 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
 
-		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
-				      sec_level, HCI_LE_CONN_TIMEOUT,
-				      HCI_ROLE_MASTER);
+		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
+					   addr_type, sec_level,
+					   HCI_LE_CONN_TIMEOUT,
+					   HCI_ROLE_MASTER);
 	}
 
 	if (IS_ERR(conn)) {
@@ -4210,7 +4211,7 @@
 		/* Don't let discovery abort an outgoing connection attempt
 		 * that's using directed advertising.
 		 */
-		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+		if (hci_lookup_le_connect(hdev)) {
 			*status = MGMT_STATUS_REJECTED;
 			return false;
 		}
@@ -6107,6 +6108,12 @@
 	switch (auto_connect) {
 	case HCI_AUTO_CONN_DISABLED:
 	case HCI_AUTO_CONN_LINK_LOSS:
+		/* If auto connect is being disabled when we're trying to
+		 * connect to device, keep connecting.
+		 */
+		if (params->explicit_connect)
+			list_add(&params->action, &hdev->pend_le_conns);
+
 		__hci_update_background_scan(req);
 		break;
 	case HCI_AUTO_CONN_REPORT:
@@ -6226,6 +6233,17 @@
 	else
 		auto_conn = HCI_AUTO_CONN_REPORT;
 
+	/* Kernel internally uses conn_params with resolvable private
+	 * address, but Add Device allows only identity addresses.
+	 * Make sure it is enforced before calling
+	 * hci_conn_params_lookup.
+	 */
+	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+		err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
+		mgmt_pending_remove(cmd);
+		goto unlock;
+	}
+
 	/* If the connection parameters don't exist for this device,
 	 * they will be created and configured with defaults.
 	 */
@@ -6340,6 +6358,18 @@
 		else
 			addr_type = ADDR_LE_DEV_RANDOM;
 
+		/* Kernel internally uses conn_params with resolvable private
+		 * address, but Remove Device allows only identity addresses.
+		 * Make sure it is enforced before calling
+		 * hci_conn_params_lookup.
+		 */
+		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+			err = cmd->cmd_complete(cmd,
+						MGMT_STATUS_INVALID_PARAMS);
+			mgmt_pending_remove(cmd);
+			goto unlock;
+		}
+
 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
 						addr_type);
 		if (!params) {
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 688a040..f315c8d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -154,13 +154,13 @@
 	sock_set_flag(sk, SOCK_ZAPPED);
 }
 
-static int sco_conn_del(struct hci_conn *hcon, int err)
+static void sco_conn_del(struct hci_conn *hcon, int err)
 {
 	struct sco_conn *conn = hcon->sco_data;
 	struct sock *sk;
 
 	if (!conn)
-		return 0;
+		return;
 
 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
 
@@ -179,7 +179,6 @@
 
 	hcon->sco_data = NULL;
 	kfree(conn);
-	return 0;
 }
 
 static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4ff77a1..6ed2feb 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -339,6 +339,7 @@
 	.ndo_bridge_getlink	 = br_getlink,
 	.ndo_bridge_setlink	 = br_setlink,
 	.ndo_bridge_dellink	 = br_dellink,
+	.ndo_features_check	 = passthru_features_check,
 };
 
 static void br_dev_free(struct net_device *dev)
@@ -364,8 +365,7 @@
 	dev->destructor = br_dev_free;
 	dev->ethtool_ops = &br_ethtool_ops;
 	SET_NETDEV_DEVTYPE(dev, &br_type);
-	dev->tx_queue_len = 0;
-	dev->priv_flags = IFF_EBRIDGE;
+	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
 
 	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a538cb1..45e4757 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -281,6 +281,7 @@
 	br_fdb_delete_by_port(br, NULL, 0, 1);
 
 	br_vlan_flush(br);
+	br_multicast_dev_del(br);
 	del_timer_sync(&br->gc_timer);
 
 	br_sysfs_delbr(br->dev);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index c943219..d747275 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -85,6 +85,7 @@
 					memset(&e, 0, sizeof(e));
 					e.ifindex = port->dev->ifindex;
 					e.state = p->state;
+					e.vid = p->addr.vid;
 					if (p->addr.proto == htons(ETH_P_IP))
 						e.addr.u.ip4 = p->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -230,7 +231,7 @@
 }
 
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-		   struct br_ip *group, int type)
+		   struct br_ip *group, int type, u8 state)
 {
 	struct br_mdb_entry entry;
 
@@ -241,9 +242,78 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	entry.addr.u.ip6 = group->u.ip6;
 #endif
+	entry.state = state;
+	entry.vid = group->vid;
 	__br_mdb_notify(dev, &entry, type);
 }
 
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+				   struct net_device *dev,
+				   int ifindex, u32 pid,
+				   u32 seq, int type, unsigned int flags)
+{
+	struct br_port_msg *bpm;
+	struct nlmsghdr *nlh;
+	struct nlattr *nest;
+
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	bpm = nlmsg_data(nlh);
+	memset(bpm, 0, sizeof(*bpm));
+	bpm->family = AF_BRIDGE;
+	bpm->ifindex = dev->ifindex;
+	nest = nla_nest_start(skb, MDBA_ROUTER);
+	if (!nest)
+		goto cancel;
+
+	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+		goto end;
+
+	nla_nest_end(skb, nest);
+	nlmsg_end(skb, nlh);
+	return 0;
+
+end:
+	nla_nest_end(skb, nest);
+cancel:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct br_port_msg))
+		+ nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+		   int type)
+{
+	struct net *net = dev_net(dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+	int ifindex;
+
+	ifindex = port ? port->dev->ifindex : 0;
+	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+	if (!skb)
+		goto errout;
+
+	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto errout;
+	}
+
+	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+	return;
+
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 {
 	if (entry->ifindex == 0)
@@ -263,6 +333,8 @@
 		return false;
 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
 		return false;
+	if (entry->vid >= VLAN_VID_MASK)
+		return false;
 
 	return true;
 }
@@ -374,6 +446,7 @@
 		return -EINVAL;
 
 	memset(&ip, 0, sizeof(ip));
+	ip.vid = entry->vid;
 	ip.proto = entry->addr.proto;
 	if (ip.proto == htons(ETH_P_IP))
 		ip.u.ip4 = entry->addr.u.ip4;
@@ -391,8 +464,11 @@
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
+	unsigned short vid = VLAN_N_VID;
+	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
-	struct net_device *dev;
+	struct net_bridge_port *p;
+	struct net_port_vlans *pv;
 	struct net_bridge *br;
 	int err;
 
@@ -402,9 +478,32 @@
 
 	br = netdev_priv(dev);
 
-	err = __br_mdb_add(net, br, entry);
-	if (!err)
-		__br_mdb_notify(dev, entry, RTM_NEWMDB);
+	/* If vlan filtering is enabled and VLAN is not specified
+	 * install mdb entry on all vlans configured on the port.
+	 */
+	pdev = __dev_get_by_index(net, entry->ifindex);
+	if (!pdev)
+		return -ENODEV;
+
+	p = br_port_get_rtnl(pdev);
+	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+		return -EINVAL;
+
+	pv = nbp_get_vlan_info(p);
+	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+			entry->vid = vid;
+			err = __br_mdb_add(net, br, entry);
+			if (err)
+				break;
+			__br_mdb_notify(dev, entry, RTM_NEWMDB);
+		}
+	} else {
+		err = __br_mdb_add(net, br, entry);
+		if (!err)
+			__br_mdb_notify(dev, entry, RTM_NEWMDB);
+	}
+
 	return err;
 }
 
@@ -421,6 +520,7 @@
 		return -EINVAL;
 
 	memset(&ip, 0, sizeof(ip));
+	ip.vid = entry->vid;
 	ip.proto = entry->addr.proto;
 	if (ip.proto == htons(ETH_P_IP))
 		ip.u.ip4 = entry->addr.u.ip4;
@@ -465,8 +565,12 @@
 
 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-	struct net_device *dev;
+	struct net *net = sock_net(skb->sk);
+	unsigned short vid = VLAN_N_VID;
+	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
+	struct net_bridge_port *p;
+	struct net_port_vlans *pv;
 	struct net_bridge *br;
 	int err;
 
@@ -476,9 +580,31 @@
 
 	br = netdev_priv(dev);
 
-	err = __br_mdb_del(br, entry);
-	if (!err)
-		__br_mdb_notify(dev, entry, RTM_DELMDB);
+	/* If vlan filtering is enabled and VLAN is not specified
+	 * delete mdb entry on all vlans configured on the port.
+	 */
+	pdev = __dev_get_by_index(net, entry->ifindex);
+	if (!pdev)
+		return -ENODEV;
+
+	p = br_port_get_rtnl(pdev);
+	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+		return -EINVAL;
+
+	pv = nbp_get_vlan_info(p);
+	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+			entry->vid = vid;
+			err = __br_mdb_del(br, entry);
+			if (!err)
+				__br_mdb_notify(dev, entry, RTM_DELMDB);
+		}
+	} else {
+		err = __br_mdb_del(br, entry);
+		if (!err)
+			__br_mdb_notify(dev, entry, RTM_DELMDB);
+	}
+
 	return err;
 }
 
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 1285eaf..66efdc2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,6 +283,8 @@
 		rcu_assign_pointer(*pp, p->next);
 		hlist_del_init(&p->mglist);
 		del_timer(&p->timer);
+		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+			      p->state);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
 		if (!mp->ports && !mp->mglist &&
@@ -704,7 +706,7 @@
 	if (unlikely(!p))
 		goto err;
 	rcu_assign_pointer(*pp, p);
-	br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+	br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
 
 found:
 	mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -764,6 +766,7 @@
 		goto out;
 
 	hlist_del_init_rcu(&port->rlist);
+	br_rtr_notify(br->dev, port, RTM_DELMDB);
 
 out:
 	spin_unlock(&br->multicast_lock);
@@ -924,6 +927,15 @@
 
 void br_multicast_del_port(struct net_bridge_port *port)
 {
+	struct net_bridge *br = port->br;
+	struct net_bridge_port_group *pg;
+	struct hlist_node *n;
+
+	/* Take care of the remaining groups, only perm ones should be left */
+	spin_lock_bh(&br->multicast_lock);
+	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+		br_multicast_del_pg(br, pg);
+	spin_unlock_bh(&br->multicast_lock);
 	del_timer_sync(&port->multicast_router_timer);
 }
 
@@ -963,10 +975,13 @@
 
 	spin_lock(&br->multicast_lock);
 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
-		br_multicast_del_pg(br, pg);
+		if (pg->state == MDB_TEMPORARY)
+			br_multicast_del_pg(br, pg);
 
-	if (!hlist_unhashed(&port->rlist))
+	if (!hlist_unhashed(&port->rlist)) {
 		hlist_del_init_rcu(&port->rlist);
+		br_rtr_notify(br->dev, port, RTM_DELMDB);
+	}
 	del_timer(&port->multicast_router_timer);
 	del_timer(&port->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1204,6 +1219,7 @@
 		hlist_add_behind_rcu(&port->rlist, slot);
 	else
 		hlist_add_head_rcu(&port->rlist, &br->router_list);
+	br_rtr_notify(br->dev, port, RTM_NEWMDB);
 }
 
 static void br_multicast_mark_router(struct net_bridge *br,
@@ -1437,7 +1453,8 @@
 			hlist_del_init(&p->mglist);
 			del_timer(&p->timer);
 			call_rcu_bh(&p->rcu, br_multicast_free_pg);
-			br_mdb_notify(br->dev, port, group, RTM_DELMDB);
+			br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+				      p->state);
 
 			if (!mp->ports && !mp->mglist &&
 			    netif_running(br->dev))
@@ -1754,12 +1771,6 @@
 
 void br_multicast_stop(struct net_bridge *br)
 {
-	struct net_bridge_mdb_htable *mdb;
-	struct net_bridge_mdb_entry *mp;
-	struct hlist_node *n;
-	u32 ver;
-	int i;
-
 	del_timer_sync(&br->multicast_router_timer);
 	del_timer_sync(&br->ip4_other_query.timer);
 	del_timer_sync(&br->ip4_own_query.timer);
@@ -1767,6 +1778,15 @@
 	del_timer_sync(&br->ip6_other_query.timer);
 	del_timer_sync(&br->ip6_own_query.timer);
 #endif
+}
+
+void br_multicast_dev_del(struct net_bridge *br)
+{
+	struct net_bridge_mdb_htable *mdb;
+	struct net_bridge_mdb_entry *mp;
+	struct hlist_node *n;
+	u32 ver;
+	int i;
 
 	spin_lock_bh(&br->multicast_lock);
 	mdb = mlock_dereference(br->mdb, br);
@@ -1834,8 +1854,10 @@
 		p->multicast_router = val;
 		err = 0;
 
-		if (val < 2 && !hlist_unhashed(&p->rlist))
+		if (val < 2 && !hlist_unhashed(&p->rlist)) {
 			hlist_del_init_rcu(&p->rlist);
+			br_rtr_notify(br->dev, p, RTM_DELMDB);
+		}
 
 		if (val == 1)
 			break;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c8b9bcf..0a6f095 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -49,9 +49,9 @@
 static int brnf_call_iptables __read_mostly = 1;
 static int brnf_call_ip6tables __read_mostly = 1;
 static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 0;
-static int brnf_filter_pppoe_tagged __read_mostly = 0;
-static int brnf_pass_vlan_indev __read_mostly = 0;
+static int brnf_filter_vlan_tagged __read_mostly;
+static int brnf_filter_pppoe_tagged __read_mostly;
+static int brnf_pass_vlan_indev __read_mostly;
 #else
 #define brnf_call_iptables 1
 #define brnf_call_ip6tables 1
@@ -284,7 +284,7 @@
 							 nf_bridge->neigh_header,
 							 ETH_HLEN-ETH_ALEN);
 			/* tell br_dev_xmit to continue with forwarding */
-			nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+			nf_bridge->bridged_dnat = 1;
 			/* FIXME Need to refragment */
 			ret = neigh->output(neigh, skb);
 		}
@@ -356,7 +356,7 @@
 		skb->pkt_type = PACKET_OTHERHOST;
 		nf_bridge->pkt_otherhost = false;
 	}
-	nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+	nf_bridge->in_prerouting = 0;
 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
 			struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -444,7 +444,7 @@
 		nf_bridge->pkt_otherhost = true;
 	}
 
-	nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
+	nf_bridge->in_prerouting = 1;
 	nf_bridge->physindev = skb->dev;
 	skb->dev = brnf_get_logical_dev(skb, skb->dev);
 
@@ -850,10 +850,8 @@
 				   struct sk_buff *skb,
 				   const struct nf_hook_state *state)
 {
-	if (skb->nf_bridge &&
-	    !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
+	if (skb->nf_bridge && !skb->nf_bridge->in_prerouting)
 		return NF_STOP;
-	}
 
 	return NF_ACCEPT;
 }
@@ -872,7 +870,7 @@
 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
 	skb_pull(skb, ETH_HLEN);
-	nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+	nf_bridge->bridged_dnat = 0;
 
 	BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
 
@@ -887,7 +885,7 @@
 
 static int br_nf_dev_xmit(struct sk_buff *skb)
 {
-	if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+	if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
 		br_nf_pre_routing_finish_bridge_slow(skb);
 		return 1;
 	}
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 13b7d1e..77383bf 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -174,7 +174,7 @@
 		skb->pkt_type = PACKET_OTHERHOST;
 		nf_bridge->pkt_otherhost = false;
 	}
-	nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+	nf_bridge->in_prerouting = 0;
 	if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
 		skb_dst_drop(skb);
 		v6ops->route_input(skb);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4d74a06..af5e187 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -166,8 +166,6 @@
 			    sizeof(vinfo), &vinfo))
 			goto nla_put_failure;
 
-		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
 		vinfo.vid = vid_end;
 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -675,6 +673,21 @@
 			return -EADDRNOTAVAIL;
 	}
 
+	if (!data)
+		return 0;
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	if (data[IFLA_BR_VLAN_PROTOCOL]) {
+		switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
+		case htons(ETH_P_8021Q):
+		case htons(ETH_P_8021AD):
+			break;
+		default:
+			return -EPROTONOSUPPORT;
+		}
+	}
+#endif
+
 	return 0;
 }
 
@@ -730,6 +743,8 @@
 	[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
 	[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
 	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
+	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
+	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -777,6 +792,24 @@
 		br_stp_set_bridge_priority(br, priority);
 	}
 
+	if (data[IFLA_BR_VLAN_FILTERING]) {
+		u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
+
+		err = __br_vlan_filter_toggle(br, vlan_filter);
+		if (err)
+			return err;
+	}
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	if (data[IFLA_BR_VLAN_PROTOCOL]) {
+		__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
+
+		err = __br_vlan_set_proto(br, vlan_proto);
+		if (err)
+			return err;
+	}
+#endif
+
 	return 0;
 }
 
@@ -788,6 +821,10 @@
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
+#endif
 	       0;
 }
 
@@ -800,15 +837,22 @@
 	u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
 	u32 stp_enabled = br->stp_enabled;
 	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
+	u8 vlan_enabled = br_vlan_enabled(br);
 
 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
 	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
-	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
+	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
+	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
 		return -EMSGSIZE;
 
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto))
+		return -EMSGSIZE;
+#endif
+
 	return 0;
 }
 
@@ -839,7 +883,7 @@
 	.kind			= "bridge",
 	.priv_size		= sizeof(struct net_bridge),
 	.setup			= br_dev_setup,
-	.maxtype		= IFLA_BRPORT_MAX,
+	.maxtype		= IFLA_BR_MAX,
 	.policy			= br_policy,
 	.validate		= br_validate,
 	.newlink		= br_dev_newlink,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8b21146..213baf7 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -95,15 +95,15 @@
 	struct hlist_node		hlist;
 	struct net_bridge_port		*dst;
 
-	struct rcu_head			rcu;
 	unsigned long			updated;
 	unsigned long			used;
 	mac_addr			addr;
+	__u16				vlan_id;
 	unsigned char			is_local:1,
 					is_static:1,
 					added_by_user:1,
 					added_by_external_learn:1;
-	__u16				vlan_id;
+	struct rcu_head			rcu;
 };
 
 struct net_bridge_port_group {
@@ -466,6 +466,7 @@
 void br_multicast_init(struct net_bridge *br);
 void br_multicast_open(struct net_bridge *br);
 void br_multicast_stop(struct net_bridge *br);
+void br_multicast_dev_del(struct net_bridge *br);
 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
 			  struct sk_buff *skb);
 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
@@ -488,7 +489,9 @@
 void br_mdb_init(void);
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-		   struct br_ip *group, int type);
+		   struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+		   int type);
 
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -565,6 +568,10 @@
 {
 }
 
+static inline void br_multicast_dev_del(struct net_bridge *br)
+{
+}
+
 static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
 					struct sk_buff *skb)
 {
@@ -607,7 +614,9 @@
 void br_vlan_flush(struct net_bridge *br);
 bool br_vlan_find(struct net_bridge *br, u16 vid);
 void br_recalculate_fwd_mask(struct net_bridge *br);
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
@@ -764,6 +773,12 @@
 {
 	return 0;
 }
+
+static inline int __br_vlan_filter_toggle(struct net_bridge *br,
+					  unsigned long val)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 struct nf_br_ops {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 0d41f81..3cd8cc9 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -468,41 +468,40 @@
 					      ~(1u << br->group_addr[5]);
 }
 
-int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 {
-	if (!rtnl_trylock())
-		return restart_syscall();
-
 	if (br->vlan_enabled == val)
-		goto unlock;
+		return 0;
 
 	br->vlan_enabled = val;
 	br_manage_promisc(br);
 	recalculate_group_addr(br);
 	br_recalculate_fwd_mask(br);
 
-unlock:
-	rtnl_unlock();
 	return 0;
 }
 
-int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+{
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	__br_vlan_filter_toggle(br, val);
+	rtnl_unlock();
+
+	return 0;
+}
+
+int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 {
 	int err = 0;
 	struct net_bridge_port *p;
 	struct net_port_vlans *pv;
-	__be16 proto, oldproto;
+	__be16 oldproto;
 	u16 vid, errvid;
 
-	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
-		return -EPROTONOSUPPORT;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	proto = htons(val);
 	if (br->vlan_proto == proto)
-		goto unlock;
+		return 0;
 
 	/* Add VLANs for the new proto to the device filter. */
 	list_for_each_entry(p, &br->port_list, list) {
@@ -533,9 +532,7 @@
 			vlan_vid_del(p->dev, oldproto, vid);
 	}
 
-unlock:
-	rtnl_unlock();
-	return err;
+	return 0;
 
 err_filt:
 	errvid = vid;
@@ -551,7 +548,23 @@
 			vlan_vid_del(p->dev, proto, vid);
 	}
 
-	goto unlock;
+	return err;
+}
+
+int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
+{
+	int err;
+
+	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
+		return -EPROTONOSUPPORT;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	err = __br_vlan_set_proto(br, htons(val));
+	rtnl_unlock();
+
+	return err;
 }
 
 static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 18ca4b2..48b6b01 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -176,7 +176,7 @@
 	return 0;
 }
 
-static inline __pure
+static inline
 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index edbca46..d730a0f 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -177,7 +177,7 @@
 	skb->protocol = htons(ETH_P_CAIF);
 
 	/* Check if we need to handle xoff */
-	if (likely(caifd->netdev->tx_queue_len == 0))
+	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
 		goto noxoff;
 
 	if (unlikely(caifd->xoff))
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index f30329f..69a4d30 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -517,8 +517,11 @@
 	struct ceph_options *opt = client->options;
 	size_t pos = m->count;
 
-	if (opt->name)
-		seq_printf(m, "name=%s,", opt->name);
+	if (opt->name) {
+		seq_puts(m, "name=");
+		seq_escape(m, opt->name, ", \t\n\\");
+		seq_putc(m, ',');
+	}
 	if (opt->key)
 		seq_puts(m, "secret=<hidden>,");
 
diff --git a/net/core/Makefile b/net/core/Makefile
index fec0856..086b01f 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -23,3 +23,4 @@
 obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
diff --git a/net/core/dev.c b/net/core/dev.c
index a8e4dd4..877c848 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3061,6 +3061,16 @@
 	else
 		skb_dst_force(skb);
 
+#ifdef CONFIG_NET_SWITCHDEV
+	/* Don't forward if offload device already forwarded */
+	if (skb->offload_fwd_mark &&
+	    skb->offload_fwd_mark == dev->offload_fwd_mark) {
+		consume_skb(skb);
+		rc = NET_XMIT_SUCCESS;
+		goto out;
+	}
+#endif
+
 	txq = netdev_pick_tx(dev, skb, accel_priv);
 	q = rcu_dereference_bh(txq->qdisc);
 
@@ -3645,15 +3655,15 @@
 
 	qdisc_skb_cb(skb)->pkt_len = skb->len;
 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-	qdisc_bstats_update_cpu(cl->q, skb);
+	qdisc_bstats_cpu_update(cl->q, skb);
 
-	switch (tc_classify(skb, cl, &cl_res)) {
+	switch (tc_classify(skb, cl, &cl_res, false)) {
 	case TC_ACT_OK:
 	case TC_ACT_RECLASSIFY:
 		skb->tc_index = TC_H_MIN(cl_res.classid);
 		break;
 	case TC_ACT_SHOT:
-		qdisc_qstats_drop_cpu(cl->q);
+		qdisc_qstats_cpu_drop(cl->q);
 	case TC_ACT_STOLEN:
 	case TC_ACT_QUEUED:
 		kfree_skb(skb);
@@ -4985,7 +4995,7 @@
  * Gets the next netdev_adjacent->private from the dev's lower neighbour
  * list, starting from iter position. The caller must hold either hold the
  * RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next_private(struct net_device *dev,
 				    struct list_head **iter)
@@ -5040,7 +5050,7 @@
  * Gets the next netdev_adjacent from the dev's lower neighbour
  * list, starting from iter position. The caller must hold RTNL lock or
  * its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
 {
@@ -5301,6 +5311,7 @@
 				   struct net_device *upper_dev, bool master,
 				   void *private)
 {
+	struct netdev_notifier_changeupper_info changeupper_info;
 	struct netdev_adjacent *i, *j, *to_i, *to_j;
 	int ret = 0;
 
@@ -5319,6 +5330,10 @@
 	if (master && netdev_master_upper_dev_get(dev))
 		return -EBUSY;
 
+	changeupper_info.upper_dev = upper_dev;
+	changeupper_info.master = master;
+	changeupper_info.linking = true;
+
 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
 						   master);
 	if (ret)
@@ -5357,7 +5372,8 @@
 			goto rollback_lower_mesh;
 	}
 
-	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+				      &changeupper_info.info);
 	return 0;
 
 rollback_lower_mesh:
@@ -5452,9 +5468,14 @@
 void netdev_upper_dev_unlink(struct net_device *dev,
 			     struct net_device *upper_dev)
 {
+	struct netdev_notifier_changeupper_info changeupper_info;
 	struct netdev_adjacent *i, *j;
 	ASSERT_RTNL();
 
+	changeupper_info.upper_dev = upper_dev;
+	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
+	changeupper_info.linking = false;
+
 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
 	/* Here is the tricky part. We must remove all dev's lower
@@ -5474,7 +5495,8 @@
 	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
 		__netdev_adjacent_dev_unlink(dev, i->dev);
 
-	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+				      &changeupper_info.info);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
@@ -6075,6 +6097,26 @@
 EXPORT_SYMBOL(dev_get_phys_port_name);
 
 /**
+ *	dev_change_proto_down - update protocol port state information
+ *	@dev: device
+ *	@proto_down: new value
+ *
+ *	This info can be used by switch drivers to set the phys state of the
+ *	port.
+ */
+int dev_change_proto_down(struct net_device *dev, bool proto_down)
+{
+	const struct net_device_ops *ops = dev->netdev_ops;
+
+	if (!ops->ndo_change_proto_down)
+		return -EOPNOTSUPP;
+	if (!netif_device_present(dev))
+		return -ENODEV;
+	return ops->ndo_change_proto_down(dev, proto_down);
+}
+EXPORT_SYMBOL(dev_change_proto_down);
+
+/**
  *	dev_new_index	-	allocate an ifindex
  *	@net: the applicable net namespace
  *
@@ -6967,6 +7009,9 @@
 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
 	setup(dev);
 
+	if (!dev->tx_queue_len)
+		dev->priv_flags |= IFF_NO_QUEUE;
+
 	dev->num_tx_queues = txqs;
 	dev->real_num_tx_queues = txqs;
 	if (netif_alloc_netdev_queues(dev))
@@ -7639,7 +7684,7 @@
 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
 	hotcpu_notifier(dev_cpu_callback, 0);
-	dst_init();
+	dst_subsys_init();
 	rc = 0;
 out:
 	return rc;
diff --git a/net/core/dst.c b/net/core/dst.c
index 002144be..0771c8c 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -20,8 +20,10 @@
 #include <net/net_namespace.h>
 #include <linux/sched.h>
 #include <linux/prefetch.h>
+#include <net/lwtunnel.h>
 
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 
 /*
  * Theory of operations:
@@ -158,19 +160,10 @@
 	[RTAX_MAX] = 0xdeadbeef,
 };
 
-
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-		int initial_ref, int initial_obsolete, unsigned short flags)
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+	      struct net_device *dev, int initial_ref, int initial_obsolete,
+	      unsigned short flags)
 {
-	struct dst_entry *dst;
-
-	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
-		if (ops->gc(ops))
-			return NULL;
-	}
-	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
-	if (!dst)
-		return NULL;
 	dst->child = NULL;
 	dst->dev = dev;
 	if (dev)
@@ -192,6 +185,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 	dst->tclassid = 0;
 #endif
+	dst->lwtstate = NULL;
 	atomic_set(&dst->__refcnt, initial_ref);
 	dst->__use = 0;
 	dst->lastuse = jiffies;
@@ -200,6 +194,25 @@
 	dst->next = NULL;
 	if (!(flags & DST_NOCOUNT))
 		dst_entries_add(ops, 1);
+}
+EXPORT_SYMBOL(dst_init);
+
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+		int initial_ref, int initial_obsolete, unsigned short flags)
+{
+	struct dst_entry *dst;
+
+	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
+		if (ops->gc(ops))
+			return NULL;
+	}
+
+	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+	if (!dst)
+		return NULL;
+
+	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+
 	return dst;
 }
 EXPORT_SYMBOL(dst_alloc);
@@ -248,7 +261,13 @@
 		dst->ops->destroy(dst);
 	if (dst->dev)
 		dev_put(dst->dev);
-	kmem_cache_free(dst->ops->kmem_cachep, dst);
+
+	lwtstate_put(dst->lwtstate);
+
+	if (dst->flags & DST_METADATA)
+		kfree(dst);
+	else
+		kmem_cache_free(dst->ops->kmem_cachep, dst);
 
 	dst = child;
 	if (dst) {
@@ -329,6 +348,69 @@
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
+static struct dst_ops md_dst_ops = {
+	.family =		AF_UNSPEC,
+};
+
+static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+{
+	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+	kfree_skb(skb);
+	return 0;
+}
+
+static int dst_md_discard(struct sk_buff *skb)
+{
+	WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+	kfree_skb(skb);
+	return 0;
+}
+
+static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
+{
+	struct dst_entry *dst;
+
+	dst = &md_dst->dst;
+	dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+		 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+
+	dst->input = dst_md_discard;
+	dst->output = dst_md_discard_sk;
+
+	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
+{
+	struct metadata_dst *md_dst;
+
+	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
+	if (!md_dst)
+		return NULL;
+
+	__metadata_dst_init(md_dst, optslen);
+
+	return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc);
+
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
+{
+	int cpu;
+	struct metadata_dst __percpu *md_dst;
+
+	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
+				    __alignof__(struct metadata_dst), flags);
+	if (!md_dst)
+		return NULL;
+
+	for_each_possible_cpu(cpu)
+		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
+
+	return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
+
 /* Dirty hack. We did it in 2.2 (in __dst_free),
  * we have _very_ good reasons not to repeat
  * this mistake in 2.3, but we have no choice
@@ -393,7 +475,7 @@
 	.priority = -10, /* must be called after other network notifiers */
 };
 
-void __init dst_init(void)
+void __init dst_subsys_init(void)
 {
 	register_netdevice_notifier(&dst_dev_notifier);
 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 9a12668..ae8306e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -16,6 +16,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
 
 int fib_default_rule_add(struct fib_rules_ops *ops,
 			 u32 pref, u32 table, u32 flags)
@@ -186,6 +187,9 @@
 	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
 		goto out;
 
+	if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
+		goto out;
+
 	ret = ops->match(rule, fl, flags);
 out:
 	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -330,6 +334,9 @@
 	if (tb[FRA_FWMASK])
 		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
 
+	if (tb[FRA_TUN_ID])
+		rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+
 	rule->action = frh->action;
 	rule->flags = frh->flags;
 	rule->table = frh_get_table(frh, tb);
@@ -407,6 +414,9 @@
 	if (unresolved)
 		ops->unresolved_rules++;
 
+	if (rule->tun_id)
+		ip_tunnel_need_metadata();
+
 	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
 	flush_route_cache(ops);
 	rules_ops_put(ops);
@@ -473,6 +483,10 @@
 		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
 			continue;
 
+		if (tb[FRA_TUN_ID] &&
+		    (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
+			continue;
+
 		if (!ops->compare(rule, frh, tb))
 			continue;
 
@@ -487,6 +501,9 @@
 				goto errout;
 		}
 
+		if (rule->tun_id)
+			ip_tunnel_unneed_metadata();
+
 		list_del_rcu(&rule->list);
 
 		if (rule->action == FR_ACT_GOTO) {
@@ -535,7 +552,8 @@
 			 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
 			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
 			 + nla_total_size(4) /* FRA_FWMARK */
-			 + nla_total_size(4); /* FRA_FWMASK */
+			 + nla_total_size(4) /* FRA_FWMASK */
+			 + nla_total_size(8); /* FRA_TUN_ID */
 
 	if (ops->nlmsg_payload)
 		payload += ops->nlmsg_payload(rule);
@@ -591,7 +609,9 @@
 	    ((rule->mark_mask || rule->mark) &&
 	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
 	    (rule->target &&
-	     nla_put_u32(skb, FRA_GOTO, rule->target)))
+	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+	    (rule->tun_id &&
+	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
 		goto nla_put_failure;
 
 	if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index be3098f..13079f0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -47,6 +47,8 @@
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
 #include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
+#include <net/dst_metadata.h>
 
 /**
  *	sk_filter - run a packet through a socket filter
@@ -1122,6 +1124,7 @@
 	*pfp = fp;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
 
 void bpf_prog_destroy(struct bpf_prog *fp)
 {
@@ -1346,7 +1349,7 @@
 static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 {
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
-	u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+	bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
 	int offset = (int) r2;
 	__sum16 sum, *ptr;
 
@@ -1424,6 +1427,139 @@
 	.arg3_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+	.func           = bpf_get_cgroup_classid,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
+static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	__be16 vlan_proto = (__force __be16) r2;
+
+	if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
+		     vlan_proto != htons(ETH_P_8021AD)))
+		vlan_proto = htons(ETH_P_8021Q);
+
+	return skb_vlan_push(skb, vlan_proto, vlan_tci);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+	.func           = bpf_skb_vlan_push,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+	.arg2_type      = ARG_ANYTHING,
+	.arg3_type      = ARG_ANYTHING,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
+
+static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+
+	return skb_vlan_pop(skb);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+	.func           = bpf_skb_vlan_pop,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+
+bool bpf_helper_changes_skb_data(void *func)
+{
+	if (func == bpf_skb_vlan_push)
+		return true;
+	if (func == bpf_skb_vlan_pop)
+		return true;
+	return false;
+}
+
+static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
+	struct ip_tunnel_info *info = skb_tunnel_info(skb);
+
+	if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
+		return -EINVAL;
+	if (ip_tunnel_info_af(info) != AF_INET)
+		return -EINVAL;
+
+	to->tunnel_id = be64_to_cpu(info->key.tun_id);
+	to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
+
+	return 0;
+}
+
+const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+	.func		= bpf_skb_get_tunnel_key,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg3_type	= ARG_CONST_STACK_SIZE,
+	.arg4_type	= ARG_ANYTHING,
+};
+
+static struct metadata_dst __percpu *md_dst;
+
+static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
+	struct metadata_dst *md = this_cpu_ptr(md_dst);
+	struct ip_tunnel_info *info;
+
+	if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
+		return -EINVAL;
+
+	skb_dst_drop(skb);
+	dst_hold((struct dst_entry *) md);
+	skb_dst_set(skb, (struct dst_entry *) md);
+
+	info = &md->u.tun_info;
+	info->mode = IP_TUNNEL_INFO_TX;
+	info->key.tun_flags = TUNNEL_KEY;
+	info->key.tun_id = cpu_to_be64(from->tunnel_id);
+	info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+
+	return 0;
+}
+
+const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+	.func		= bpf_skb_set_tunnel_key,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg3_type	= ARG_CONST_STACK_SIZE,
+	.arg4_type	= ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+{
+	if (!md_dst) {
+		/* race is not possible, since it's called from
+		 * verifier that is holding verifier mutex
+		 */
+		md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+		if (!md_dst)
+			return NULL;
+	}
+	return &bpf_skb_set_tunnel_key_proto;
+}
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -1461,6 +1597,16 @@
 		return &bpf_l4_csum_replace_proto;
 	case BPF_FUNC_clone_redirect:
 		return &bpf_clone_redirect_proto;
+	case BPF_FUNC_get_cgroup_classid:
+		return &bpf_get_cgroup_classid_proto;
+	case BPF_FUNC_skb_vlan_push:
+		return &bpf_skb_vlan_push_proto;
+	case BPF_FUNC_skb_vlan_pop:
+		return &bpf_skb_vlan_pop_proto;
+	case BPF_FUNC_skb_get_tunnel_key:
+		return &bpf_skb_get_tunnel_key_proto;
+	case BPF_FUNC_skb_set_tunnel_key:
+		return bpf_get_skb_set_tunnel_key_proto();
 	default:
 		return sk_filter_func_proto(func_id);
 	}
@@ -1569,6 +1715,13 @@
 				      offsetof(struct net_device, ifindex));
 		break;
 
+	case offsetof(struct __sk_buff, hash):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+				      offsetof(struct sk_buff, hash));
+		break;
+
 	case offsetof(struct __sk_buff, mark):
 		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2a834c6..d79699c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -19,14 +19,14 @@
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
 
-static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
-					enum flow_dissector_key_id key_id)
+static bool dissector_uses_key(const struct flow_dissector *flow_dissector,
+			       enum flow_dissector_key_id key_id)
 {
 	return flow_dissector->used_keys & (1 << key_id);
 }
 
-static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
-				       enum flow_dissector_key_id key_id)
+static void dissector_set_key(struct flow_dissector *flow_dissector,
+			      enum flow_dissector_key_id key_id)
 {
 	flow_dissector->used_keys |= (1 << key_id);
 }
@@ -51,20 +51,20 @@
 		 * boundaries of unsigned short.
 		 */
 		BUG_ON(key->offset > USHRT_MAX);
-		BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
-						   key->key_id));
+		BUG_ON(dissector_uses_key(flow_dissector,
+					  key->key_id));
 
-		skb_flow_dissector_set_key(flow_dissector, key->key_id);
+		dissector_set_key(flow_dissector, key->key_id);
 		flow_dissector->offset[key->key_id] = key->offset;
 	}
 
 	/* Ensure that the dissector always includes control and basic key.
 	 * That way we are able to avoid handling lack of these in fast path.
 	 */
-	BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
-					    FLOW_DISSECTOR_KEY_CONTROL));
-	BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
-					    FLOW_DISSECTOR_KEY_BASIC));
+	BUG_ON(!dissector_uses_key(flow_dissector,
+				   FLOW_DISSECTOR_KEY_CONTROL));
+	BUG_ON(!dissector_uses_key(flow_dissector,
+				   FLOW_DISSECTOR_KEY_BASIC));
 }
 EXPORT_SYMBOL(skb_flow_dissector_init);
 
@@ -121,7 +121,8 @@
 bool __skb_flow_dissect(const struct sk_buff *skb,
 			struct flow_dissector *flow_dissector,
 			void *target_container,
-			void *data, __be16 proto, int nhoff, int hlen)
+			void *data, __be16 proto, int nhoff, int hlen,
+			unsigned int flags)
 {
 	struct flow_dissector_key_control *key_control;
 	struct flow_dissector_key_basic *key_basic;
@@ -130,6 +131,7 @@
 	struct flow_dissector_key_tags *key_tags;
 	struct flow_dissector_key_keyid *key_keyid;
 	u8 ip_proto = 0;
+	bool ret = false;
 
 	if (!data) {
 		data = skb->data;
@@ -152,8 +154,8 @@
 					      FLOW_DISSECTOR_KEY_BASIC,
 					      target_container);
 
-	if (skb_flow_dissector_uses_key(flow_dissector,
-					FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+	if (dissector_uses_key(flow_dissector,
+			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 		struct ethhdr *eth = eth_hdr(skb);
 		struct flow_dissector_key_eth_addrs *key_eth_addrs;
 
@@ -171,15 +173,13 @@
 ip:
 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
 		if (!iph || iph->ihl < 5)
-			return false;
+			goto out_bad;
 		nhoff += iph->ihl * 4;
 
 		ip_proto = iph->protocol;
-		if (ip_is_fragment(iph))
-			ip_proto = 0;
 
-		if (!skb_flow_dissector_uses_key(flow_dissector,
-						 FLOW_DISSECTOR_KEY_IPV4_ADDRS))
+		if (!dissector_uses_key(flow_dissector,
+					FLOW_DISSECTOR_KEY_IPV4_ADDRS))
 			break;
 
 		key_addrs = skb_flow_dissector_target(flow_dissector,
@@ -187,6 +187,22 @@
 		memcpy(&key_addrs->v4addrs, &iph->saddr,
 		       sizeof(key_addrs->v4addrs));
 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+
+		if (ip_is_fragment(iph)) {
+			key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+			if (iph->frag_off & htons(IP_OFFSET)) {
+				goto out_good;
+			} else {
+				key_control->flags |= FLOW_DIS_FIRST_FRAG;
+				if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
+					goto out_good;
+			}
+		}
+
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
+			goto out_good;
+
 		break;
 	}
 	case htons(ETH_P_IPV6): {
@@ -197,13 +213,13 @@
 ipv6:
 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
 		if (!iph)
-			return false;
+			goto out_bad;
 
 		ip_proto = iph->nexthdr;
 		nhoff += sizeof(struct ipv6hdr);
 
-		if (skb_flow_dissector_uses_key(flow_dissector,
-						FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+		if (dissector_uses_key(flow_dissector,
+				       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
 			struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
 
 			key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
@@ -216,15 +232,20 @@
 
 		flow_label = ip6_flowlabel(iph);
 		if (flow_label) {
-			if (skb_flow_dissector_uses_key(flow_dissector,
-				FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+			if (dissector_uses_key(flow_dissector,
+					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
 				key_tags = skb_flow_dissector_target(flow_dissector,
 								     FLOW_DISSECTOR_KEY_FLOW_LABEL,
 								     target_container);
 				key_tags->flow_label = ntohl(flow_label);
 			}
+			if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
+				goto out_good;
 		}
 
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
+			goto out_good;
+
 		break;
 	}
 	case htons(ETH_P_8021AD):
@@ -234,10 +255,10 @@
 
 		vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
 		if (!vlan)
-			return false;
+			goto out_bad;
 
-		if (skb_flow_dissector_uses_key(flow_dissector,
-						FLOW_DISSECTOR_KEY_VLANID)) {
+		if (dissector_uses_key(flow_dissector,
+				       FLOW_DISSECTOR_KEY_VLANID)) {
 			key_tags = skb_flow_dissector_target(flow_dissector,
 							     FLOW_DISSECTOR_KEY_VLANID,
 							     target_container);
@@ -256,7 +277,7 @@
 		} *hdr, _hdr;
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
-			return false;
+			goto out_bad;
 		proto = hdr->proto;
 		nhoff += PPPOE_SES_HLEN;
 		switch (proto) {
@@ -265,7 +286,7 @@
 		case htons(PPP_IPV6):
 			goto ipv6;
 		default:
-			return false;
+			goto out_bad;
 		}
 	}
 	case htons(ETH_P_TIPC): {
@@ -275,19 +296,17 @@
 		} *hdr, _hdr;
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
-			return false;
-		key_basic->n_proto = proto;
-		key_control->thoff = (u16)nhoff;
+			goto out_bad;
 
-		if (skb_flow_dissector_uses_key(flow_dissector,
-						FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
+		if (dissector_uses_key(flow_dissector,
+				       FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
 			key_addrs = skb_flow_dissector_target(flow_dissector,
 							      FLOW_DISSECTOR_KEY_TIPC_ADDRS,
 							      target_container);
 			key_addrs->tipcaddrs.srcnode = hdr->srcnode;
 			key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
 		}
-		return true;
+		goto out_good;
 	}
 
 	case htons(ETH_P_MPLS_UC):
@@ -297,12 +316,12 @@
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
 					   hlen, &_hdr);
 		if (!hdr)
-			return false;
+			goto out_bad;
 
 		if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
 		     MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
-			if (skb_flow_dissector_uses_key(flow_dissector,
-							FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
+			if (dissector_uses_key(flow_dissector,
+					       FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
 				key_keyid = skb_flow_dissector_target(flow_dissector,
 								      FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
 								      target_container);
@@ -310,21 +329,17 @@
 					htonl(MPLS_LS_LABEL_MASK);
 			}
 
-			key_basic->n_proto = proto;
-			key_basic->ip_proto = ip_proto;
-			key_control->thoff = (u16)nhoff;
-
-			return true;
+			goto out_good;
 		}
 
-		return true;
+		goto out_good;
 	}
 
 	case htons(ETH_P_FCOE):
 		key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
 		/* fall through */
 	default:
-		return false;
+		goto out_bad;
 	}
 
 ip_proto_again:
@@ -337,7 +352,7 @@
 
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
-			return false;
+			goto out_bad;
 		/*
 		 * Only look inside GRE if version zero and no
 		 * routing
@@ -357,10 +372,10 @@
 						     data, hlen, &_keyid);
 
 			if (!keyid)
-				return false;
+				goto out_bad;
 
-			if (skb_flow_dissector_uses_key(flow_dissector,
-							FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+			if (dissector_uses_key(flow_dissector,
+					       FLOW_DISSECTOR_KEY_GRE_KEYID)) {
 				key_keyid = skb_flow_dissector_target(flow_dissector,
 								      FLOW_DISSECTOR_KEY_GRE_KEYID,
 								      target_container);
@@ -378,10 +393,15 @@
 						   sizeof(_eth),
 						   data, hlen, &_eth);
 			if (!eth)
-				return false;
+				goto out_bad;
 			proto = eth->h_proto;
 			nhoff += sizeof(*eth);
 		}
+
+		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+			goto out_good;
+
 		goto again;
 	}
 	case NEXTHDR_HOP:
@@ -395,18 +415,53 @@
 		opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
 					      data, hlen, &_opthdr);
 		if (!opthdr)
-			return false;
+			goto out_bad;
 
 		ip_proto = opthdr[0];
 		nhoff += (opthdr[1] + 1) << 3;
 
 		goto ip_proto_again;
 	}
+	case NEXTHDR_FRAGMENT: {
+		struct frag_hdr _fh, *fh;
+
+		if (proto != htons(ETH_P_IPV6))
+			break;
+
+		fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
+					  data, hlen, &_fh);
+
+		if (!fh)
+			goto out_bad;
+
+		key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+		nhoff += sizeof(_fh);
+
+		if (!(fh->frag_off & htons(IP6_OFFSET))) {
+			key_control->flags |= FLOW_DIS_FIRST_FRAG;
+			if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
+				ip_proto = fh->nexthdr;
+				goto ip_proto_again;
+			}
+		}
+		goto out_good;
+	}
 	case IPPROTO_IPIP:
 		proto = htons(ETH_P_IP);
+
+		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+			goto out_good;
+
 		goto ip;
 	case IPPROTO_IPV6:
 		proto = htons(ETH_P_IPV6);
+
+		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+			goto out_good;
+
 		goto ipv6;
 	case IPPROTO_MPLS:
 		proto = htons(ETH_P_MPLS_UC);
@@ -415,12 +470,8 @@
 		break;
 	}
 
-	key_basic->n_proto = proto;
-	key_basic->ip_proto = ip_proto;
-	key_control->thoff = (u16)nhoff;
-
-	if (skb_flow_dissector_uses_key(flow_dissector,
-					FLOW_DISSECTOR_KEY_PORTS)) {
+	if (dissector_uses_key(flow_dissector,
+			       FLOW_DISSECTOR_KEY_PORTS)) {
 		key_ports = skb_flow_dissector_target(flow_dissector,
 						      FLOW_DISSECTOR_KEY_PORTS,
 						      target_container);
@@ -428,7 +479,15 @@
 							data, hlen);
 	}
 
-	return true;
+out_good:
+	ret = true;
+
+out_bad:
+	key_basic->n_proto = proto;
+	key_basic->ip_proto = ip_proto;
+	key_control->thoff = (u16)nhoff;
+
+	return ret;
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 
@@ -438,18 +497,21 @@
 	net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
+static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
+					     u32 keyval)
 {
 	return jhash2(words, length, keyval);
 }
 
-static inline void *flow_keys_hash_start(struct flow_keys *flow)
+static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
 {
+	const void *p = flow;
+
 	BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
-	return (void *)flow + FLOW_KEYS_HASH_OFFSET;
+	return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
 }
 
-static inline size_t flow_keys_hash_length(struct flow_keys *flow)
+static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
 {
 	size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
 	BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
@@ -539,7 +601,7 @@
 
 	__flow_hash_consistentify(keys);
 
-	hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
+	hash = __flow_hash_words(flow_keys_hash_start(keys),
 				 flow_keys_hash_length(keys), keyval);
 	if (!hash)
 		hash = 1;
@@ -557,8 +619,8 @@
 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
 				  struct flow_keys *keys, u32 keyval)
 {
-	if (!skb_flow_dissect_flow_keys(skb, keys))
-		return 0;
+	skb_flow_dissect_flow_keys(skb, keys,
+				   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
 	return __flow_hash_from_keys(keys, keyval);
 }
@@ -602,17 +664,11 @@
 void __skb_get_hash(struct sk_buff *skb)
 {
 	struct flow_keys keys;
-	u32 hash;
 
 	__flow_hash_secret_init();
 
-	hash = ___skb_get_hash(skb, &keys, hashrnd);
-	if (!hash)
-		return;
-	if (keys.ports.ports)
-		skb->l4_hash = 1;
-	skb->sw_hash = 1;
-	skb->hash = hash;
+	__skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
+			  flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -624,6 +680,51 @@
 }
 EXPORT_SYMBOL(skb_get_hash_perturb);
 
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
+{
+	struct flow_keys keys;
+
+	memset(&keys, 0, sizeof(keys));
+
+	memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
+	       sizeof(keys.addrs.v6addrs.src));
+	memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
+	       sizeof(keys.addrs.v6addrs.dst));
+	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+	keys.ports.src = fl6->fl6_sport;
+	keys.ports.dst = fl6->fl6_dport;
+	keys.keyid.keyid = fl6->fl6_gre_key;
+	keys.tags.flow_label = (__force u32)fl6->flowlabel;
+	keys.basic.ip_proto = fl6->flowi6_proto;
+
+	__skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
+			  flow_keys_have_l4(&keys));
+
+	return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi6);
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
+{
+	struct flow_keys keys;
+
+	memset(&keys, 0, sizeof(keys));
+
+	keys.addrs.v4addrs.src = fl4->saddr;
+	keys.addrs.v4addrs.dst = fl4->daddr;
+	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+	keys.ports.src = fl4->fl4_sport;
+	keys.ports.dst = fl4->fl4_dport;
+	keys.keyid.keyid = fl4->fl4_gre_key;
+	keys.basic.ip_proto = fl4->flowi4_proto;
+
+	__skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
+			  flow_keys_have_l4(&keys));
+
+	return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi4);
+
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
 		   const struct flow_keys *keys, int hlen)
 {
@@ -683,12 +784,47 @@
 {
 	struct flow_keys keys;
 
-	if (!skb_flow_dissect_flow_keys(skb, &keys))
+	if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
 		return 0;
 
 	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
 }
 
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
+{
+	memset(keys, 0, sizeof(*keys));
+
+	memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
+	    sizeof(keys->addrs.v6addrs.src));
+	memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
+	    sizeof(keys->addrs.v6addrs.dst));
+	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+	keys->ports.src = fl6->fl6_sport;
+	keys->ports.dst = fl6->fl6_dport;
+	keys->keyid.keyid = fl6->fl6_gre_key;
+	keys->tags.flow_label = (__force u32)fl6->flowlabel;
+	keys->basic.ip_proto = fl6->flowi6_proto;
+
+	return flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(__get_hash_from_flowi6);
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
+{
+	memset(keys, 0, sizeof(*keys));
+
+	keys->addrs.v4addrs.src = fl4->saddr;
+	keys->addrs.v4addrs.dst = fl4->daddr;
+	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+	keys->ports.src = fl4->fl4_sport;
+	keys->ports.dst = fl4->fl4_dport;
+	keys->keyid.keyid = fl4->fl4_gre_key;
+	keys->basic.ip_proto = fl4->flowi4_proto;
+
+	return flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(__get_hash_from_flowi4);
+
 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
 	{
 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
new file mode 100644
index 0000000..dfb1a9c
--- /dev/null
+++ b/net/core/lwtunnel.c
@@ -0,0 +1,249 @@
+/*
+ * lwtunnel	Infrastructure for light weight tunnels like mpls
+ *
+ * Authors:	Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/lwtunnel.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <net/lwtunnel.h>
+#include <net/rtnetlink.h>
+#include <net/ip6_fib.h>
+
+struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
+{
+	struct lwtunnel_state *lws;
+
+	lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
+
+	return lws;
+}
+EXPORT_SYMBOL(lwtunnel_state_alloc);
+
+static const struct lwtunnel_encap_ops __rcu *
+		lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+			   unsigned int num)
+{
+	if (num > LWTUNNEL_ENCAP_MAX)
+		return -ERANGE;
+
+	return !cmpxchg((const struct lwtunnel_encap_ops **)
+			&lwtun_encaps[num],
+			NULL, ops) ? 0 : -1;
+}
+EXPORT_SYMBOL(lwtunnel_encap_add_ops);
+
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
+			   unsigned int encap_type)
+{
+	int ret;
+
+	if (encap_type == LWTUNNEL_ENCAP_NONE ||
+	    encap_type > LWTUNNEL_ENCAP_MAX)
+		return -ERANGE;
+
+	ret = (cmpxchg((const struct lwtunnel_encap_ops **)
+		       &lwtun_encaps[encap_type],
+		       ops, NULL) == ops) ? 0 : -1;
+
+	synchronize_net();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_encap_del_ops);
+
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+			 struct nlattr *encap, unsigned int family,
+			 const void *cfg, struct lwtunnel_state **lws)
+{
+	const struct lwtunnel_encap_ops *ops;
+	int ret = -EINVAL;
+
+	if (encap_type == LWTUNNEL_ENCAP_NONE ||
+	    encap_type > LWTUNNEL_ENCAP_MAX)
+		return ret;
+
+	ret = -EOPNOTSUPP;
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[encap_type]);
+	if (likely(ops && ops->build_state))
+		ret = ops->build_state(dev, encap, family, cfg, lws);
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+{
+	const struct lwtunnel_encap_ops *ops;
+	struct nlattr *nest;
+	int ret = -EINVAL;
+
+	if (!lwtstate)
+		return 0;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	ret = -EOPNOTSUPP;
+	nest = nla_nest_start(skb, RTA_ENCAP);
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->fill_encap))
+		ret = ops->fill_encap(skb, lwtstate);
+	rcu_read_unlock();
+
+	if (ret)
+		goto nla_put_failure;
+	nla_nest_end(skb, nest);
+	ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+	if (ret)
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nest);
+
+	return (ret == -EOPNOTSUPP ? 0 : ret);
+}
+EXPORT_SYMBOL(lwtunnel_fill_encap);
+
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+	const struct lwtunnel_encap_ops *ops;
+	int ret = 0;
+
+	if (!lwtstate)
+		return 0;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->get_encap_size))
+		ret = nla_total_size(ops->get_encap_size(lwtstate));
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_get_encap_size);
+
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	const struct lwtunnel_encap_ops *ops;
+	int ret = 0;
+
+	if (!a && !b)
+		return 0;
+
+	if (!a || !b)
+		return 1;
+
+	if (a->type != b->type)
+		return 1;
+
+	if (a->type == LWTUNNEL_ENCAP_NONE ||
+	    a->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[a->type]);
+	if (likely(ops && ops->cmp_encap))
+		ret = ops->cmp_encap(a, b);
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_cmp_encap);
+
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	const struct lwtunnel_encap_ops *ops;
+	struct lwtunnel_state *lwtstate;
+	int ret = -EINVAL;
+
+	if (!dst)
+		goto drop;
+	lwtstate = dst->lwtstate;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	ret = -EOPNOTSUPP;
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->output))
+		ret = ops->output(sk, skb);
+	rcu_read_unlock();
+
+	if (ret == -EOPNOTSUPP)
+		goto drop;
+
+	return ret;
+
+drop:
+	kfree_skb(skb);
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_output);
+
+int lwtunnel_input(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	const struct lwtunnel_encap_ops *ops;
+	struct lwtunnel_state *lwtstate;
+	int ret = -EINVAL;
+
+	if (!dst)
+		goto drop;
+	lwtstate = dst->lwtstate;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	ret = -EOPNOTSUPP;
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->input))
+		ret = ops->input(skb);
+	rcu_read_unlock();
+
+	if (ret == -EOPNOTSUPP)
+		goto drop;
+
+	return ret;
+
+drop:
+	kfree_skb(skb);
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_input);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 84195da..2b515ba 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -274,8 +274,12 @@
 	    (entries >= tbl->gc_thresh2 &&
 	     time_after(now, tbl->last_flush + 5 * HZ))) {
 		if (!neigh_forced_gc(tbl) &&
-		    entries >= tbl->gc_thresh3)
+		    entries >= tbl->gc_thresh3) {
+			net_info_ratelimited("%s: neighbor table overflow!\n",
+					     tbl->id);
+			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 			goto out_entries;
+		}
 	}
 
 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
@@ -1849,6 +1853,7 @@
 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
+			ndst.ndts_table_fulls		+= st->table_fulls;
 		}
 
 		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
@@ -2717,12 +2722,12 @@
 	struct neigh_statistics *st = v;
 
 	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
+		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
 		return 0;
 	}
 
 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
-			"%08lx %08lx  %08lx %08lx %08lx\n",
+			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
 		   atomic_read(&tbl->entries),
 
 		   st->allocs,
@@ -2739,7 +2744,8 @@
 
 		   st->periodic_gc_runs,
 		   st->forced_gc_runs,
-		   st->unres_discards
+		   st->unres_discards,
+		   st->table_fulls
 		   );
 
 	return 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 18b34d7..b279077 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -404,6 +404,19 @@
 NETDEVICE_SHOW(group, fmt_dec);
 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
 
+static int change_proto_down(struct net_device *dev, unsigned long proto_down)
+{
+	return dev_change_proto_down(dev, (bool) proto_down);
+}
+
+static ssize_t proto_down_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	return netdev_store(dev, attr, buf, len, change_proto_down);
+}
+NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
 static ssize_t phys_port_id_show(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
@@ -501,6 +514,7 @@
 	&dev_attr_phys_port_id.attr,
 	&dev_attr_phys_port_name.attr,
 	&dev_attr_phys_switch_id.attr,
+	&dev_attr_proto_down.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
@@ -675,7 +689,7 @@
 	struct rps_map *old_map, *map;
 	cpumask_var_t mask;
 	int err, cpu, i;
-	static DEFINE_SPINLOCK(rps_map_lock);
+	static DEFINE_MUTEX(rps_map_mutex);
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -708,18 +722,21 @@
 		map = NULL;
 	}
 
-	spin_lock(&rps_map_lock);
+	mutex_lock(&rps_map_mutex);
 	old_map = rcu_dereference_protected(queue->rps_map,
-					    lockdep_is_held(&rps_map_lock));
+					    mutex_is_locked(&rps_map_mutex));
 	rcu_assign_pointer(queue->rps_map, map);
-	spin_unlock(&rps_map_lock);
 
 	if (map)
 		static_key_slow_inc(&rps_needed);
-	if (old_map) {
-		kfree_rcu(old_map, rcu);
+	if (old_map)
 		static_key_slow_dec(&rps_needed);
-	}
+
+	mutex_unlock(&rps_map_mutex);
+
+	if (old_map)
+		kfree_rcu(old_map, rcu);
+
 	free_cpumask_var(mask);
 	return len;
 }
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index ba3c012..adef015 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -31,6 +31,7 @@
 #include <trace/events/napi.h>
 #include <trace/events/sock.h>
 #include <trace/events/udp.h>
+#include <trace/events/fib.h>
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c126a87..6aa3db8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -380,6 +380,8 @@
 	static atomic_t ip_ident;
 	struct ipv6hdr *ip6h;
 
+	WARN_ON_ONCE(!irqs_disabled());
+
 	udp_len = len + sizeof(*udph);
 	if (np->ipv6)
 		ip_len = udp_len + sizeof(*ip6h);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1cbd209..de8d5cc 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -273,7 +273,6 @@
 
 	/* runtime counters relating to clone_skb */
 
-	__u64 allocated_skbs;
 	__u32 clone_count;
 	int last_ok;		/* Was last skb sent?
 				 * Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
 {
-	pkt_dev->pkt_overhead = 0;
+	pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
 	pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
 	pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
 	pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@
 	} else {
 		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
 	}
+	skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
 	return skb;
 }
@@ -3397,7 +3397,6 @@
 			return;
 		}
 		pkt_dev->last_pkt_size = pkt_dev->skb->len;
-		pkt_dev->allocated_skbs++;
 		pkt_dev->clone_count = 0;	/* reset counter */
 	}
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dc004b1..a466821 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -678,6 +678,12 @@
 					continue;
 				if (nla_put_string(skb, i + 1, name))
 					goto nla_put_failure;
+			} else if (i == RTAX_FEATURES - 1) {
+				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
+
+				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
+				if (nla_put_u32(skb, i + 1, user_features))
+					goto nla_put_failure;
 			} else {
 				if (nla_put_u32(skb, i + 1, metrics[i]))
 					goto nla_put_failure;
@@ -896,7 +902,9 @@
 	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
 	       + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
-	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
+	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
+
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1082,7 +1090,8 @@
 	    (dev->ifalias &&
 	     nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
 	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
-			atomic_read(&dev->carrier_changes)))
+			atomic_read(&dev->carrier_changes)) ||
+	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
 		goto nla_put_failure;
 
 	if (1) {
@@ -1319,6 +1328,7 @@
 	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
 	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
 	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
+	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1861,6 +1871,14 @@
 	}
 	err = 0;
 
+	if (tb[IFLA_PROTO_DOWN]) {
+		err = dev_change_proto_down(dev,
+					    nla_get_u8(tb[IFLA_PROTO_DOWN]));
+		if (err)
+			goto errout;
+		status |= DO_SETLINK_NOTIFY;
+	}
+
 errout:
 	if (status & DO_SETLINK_MODIFIED) {
 		if (status & DO_SETLINK_NOTIFY)
@@ -1951,16 +1969,30 @@
 	return 0;
 }
 
+int rtnl_delete_link(struct net_device *dev)
+{
+	const struct rtnl_link_ops *ops;
+	LIST_HEAD(list_kill);
+
+	ops = dev->rtnl_link_ops;
+	if (!ops || !ops->dellink)
+		return -EOPNOTSUPP;
+
+	ops->dellink(dev, &list_kill);
+	unregister_netdevice_many(&list_kill);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtnl_delete_link);
+
 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	const struct rtnl_link_ops *ops;
 	struct net_device *dev;
 	struct ifinfomsg *ifm;
 	char ifname[IFNAMSIZ];
 	struct nlattr *tb[IFLA_MAX+1];
 	int err;
-	LIST_HEAD(list_kill);
 
 	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
 	if (err < 0)
@@ -1982,13 +2014,7 @@
 	if (!dev)
 		return -ENODEV;
 
-	ops = dev->rtnl_link_ops;
-	if (!ops || !ops->dellink)
-		return -EOPNOTSUPP;
-
-	ops->dellink(dev, &list_kill);
-	unregister_netdevice_many(&list_kill);
-	return 0;
+	return rtnl_delete_link(dev);
 }
 
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b84330..dad4dd3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -392,7 +392,7 @@
 /**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *	@dev: network device to receive on
- *	@length: length to allocate
+ *	@len: length to allocate
  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
  *
  *	Allocate a new &sk_buff and assign it a usage count of one. The
@@ -461,7 +461,7 @@
 /**
  *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
  *	@napi: napi instance this buffer was allocated for
- *	@length: length to allocate
+ *	@len: length to allocate
  *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
  *
  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
diff --git a/net/core/sock.c b/net/core/sock.c
index 193901d..ca2984a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2078,7 +2078,7 @@
 EXPORT_SYMBOL(__sk_mem_schedule);
 
 /**
- *	__sk_reclaim - reclaim memory_allocated
+ *	__sk_mem_reclaim - reclaim memory_allocated
  *	@sk: socket
  *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
  */
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d79866c..817622f 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -90,6 +90,9 @@
 		goto out;
 
 	fprog = filter->prog->orig_prog;
+	if (!fprog)
+		goto out;
+
 	flen = bpf_classic_proglen(fprog);
 
 	attr = nla_reserve(skb, attrtype, flen);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 43d3dd6..42689d5 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -60,11 +60,15 @@
 	struct phy_device *phydev;
 	unsigned int type;
 
+	if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
+		return false;
+
 	if (skb_headroom(skb) < ETH_HLEN)
 		return false;
+
 	__skb_push(skb, ETH_HLEN);
 
-	type = classify(skb);
+	type = ptp_classify_raw(skb);
 
 	__skb_pull(skb, ETH_HLEN);
 
diff --git a/net/core/utils.c b/net/core/utils.c
index a7732a0..3dffce9 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -301,7 +301,7 @@
 EXPORT_SYMBOL(in6_pton);
 
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-			      __be32 from, __be32 to, int pseudohdr)
+			      __be32 from, __be32 to, bool pseudohdr)
 {
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
 		csum_replace4(sum, from, to);
@@ -318,7 +318,7 @@
 
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 			       const __be32 *from, const __be32 *to,
-			       int pseudohdr)
+			       bool pseudohdr)
 {
 	__be32 diff[] = {
 		~from[0], ~from[1], ~from[2], ~from[3],
@@ -336,6 +336,19 @@
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
 
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+				     __wsum diff, bool pseudohdr)
+{
+	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+			skb->csum = ~csum_add(diff, ~skb->csum);
+	} else if (pseudohdr) {
+		*sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+	}
+}
+EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
+
 struct __net_random_once_work {
 	struct work_struct work;
 	struct static_key *key;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index b445d49..76e380076 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -176,6 +176,41 @@
 #endif /* CONFIG_NET_DSA_HWMON */
 
 /* basic switch operations **************************************************/
+static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
+{
+	struct dsa_chip_data *cd = ds->pd;
+	struct device_node *port_dn;
+	struct phy_device *phydev;
+	int ret, port, mode;
+
+	for (port = 0; port < DSA_MAX_PORTS; port++) {
+		if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+			continue;
+
+		port_dn = cd->port_dn[port];
+		if (of_phy_is_fixed_link(port_dn)) {
+			ret = of_phy_register_fixed_link(port_dn);
+			if (ret) {
+				netdev_err(master,
+					   "failed to register fixed PHY\n");
+				return ret;
+			}
+			phydev = of_phy_find_device(port_dn);
+
+			mode = of_get_phy_mode(port_dn);
+			if (mode < 0)
+				mode = PHY_INTERFACE_MODE_NA;
+			phydev->interface = mode;
+
+			genphy_config_init(phydev);
+			genphy_read_status(phydev);
+			if (ds->drv->adjust_link)
+				ds->drv->adjust_link(ds, port, phydev);
+		}
+	}
+	return 0;
+}
+
 static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 {
 	struct dsa_switch_driver *drv = ds->drv;
@@ -297,6 +332,14 @@
 		}
 	}
 
+	/* Perform configuration of the CPU and DSA ports */
+	ret = dsa_cpu_dsa_setup(ds, dst->master_netdev);
+	if (ret < 0) {
+		netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
+			   index);
+		ret = 0;
+	}
+
 #ifdef CONFIG_NET_DSA_HWMON
 	/* If the switch provides a temperature sensor,
 	 * register with hardware monitoring subsystem.
@@ -554,6 +597,31 @@
 	return 0;
 }
 
+static int dsa_of_probe_links(struct dsa_platform_data *pd,
+			      struct dsa_chip_data *cd,
+			      int chip_index, int port_index,
+			      struct device_node *port,
+			      const char *port_name)
+{
+	struct device_node *link;
+	int link_index;
+	int ret;
+
+	for (link_index = 0;; link_index++) {
+		link = of_parse_phandle(port, "link", link_index);
+		if (!link)
+			break;
+
+		if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
+			ret = dsa_of_setup_routing_table(pd, cd, chip_index,
+							 port_index, link);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
 static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
 {
 	int i;
@@ -573,8 +641,8 @@
 static int dsa_of_probe(struct device *dev)
 {
 	struct device_node *np = dev->of_node;
-	struct device_node *child, *mdio, *ethernet, *port, *link;
-	struct mii_bus *mdio_bus;
+	struct device_node *child, *mdio, *ethernet, *port;
+	struct mii_bus *mdio_bus, *mdio_bus_switch;
 	struct net_device *ethernet_dev;
 	struct dsa_platform_data *pd;
 	struct dsa_chip_data *cd;
@@ -636,6 +704,16 @@
 		if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
 			cd->eeprom_len = eeprom_len;
 
+		mdio = of_parse_phandle(child, "mii-bus", 0);
+		if (mdio) {
+			mdio_bus_switch = of_mdio_find_bus(mdio);
+			if (!mdio_bus_switch) {
+				ret = -EPROBE_DEFER;
+				goto out_free_chip;
+			}
+			cd->host_dev = &mdio_bus_switch->dev;
+		}
+
 		for_each_available_child_of_node(child, port) {
 			port_reg = of_get_property(port, "reg", NULL);
 			if (!port_reg)
@@ -658,15 +736,10 @@
 				goto out_free_chip;
 			}
 
-			link = of_parse_phandle(port, "link", 0);
-
-			if (!strcmp(port_name, "dsa") && link &&
-					pd->nr_chips > 1) {
-				ret = dsa_of_setup_routing_table(pd, cd,
-						chip_index, port_index, link);
-				if (ret)
-					goto out_free_chip;
-			}
+			ret = dsa_of_probe_links(pd, cd, chip_index,
+						 port_index, port, port_name);
+			if (ret)
+				goto out_free_chip;
 
 		}
 	}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index d5f1f9b..311796c8 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -13,9 +13,10 @@
 
 #include <linux/phy.h>
 #include <linux/netdevice.h>
+#include <linux/netpoll.h>
 
 struct dsa_device_ops {
-	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+	struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
 	int (*rcv)(struct sk_buff *skb, struct net_device *dev,
 		   struct packet_type *pt, struct net_device *orig_dev);
 };
@@ -26,7 +27,7 @@
 	 * switch port.
 	 */
 	struct net_device	*dev;
-	netdev_tx_t		(*xmit)(struct sk_buff *skb,
+	struct sk_buff *	(*xmit)(struct sk_buff *skb,
 					struct net_device *dev);
 
 	/*
@@ -47,6 +48,9 @@
 	int			old_duplex;
 
 	struct net_device	*bridge_dev;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	struct netpoll		*netpoll;
+#endif
 };
 
 /* dsa.c */
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 35c47dd..cce9738 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -18,6 +18,7 @@
 #include <net/rtnetlink.h>
 #include <net/switchdev.h>
 #include <linux/if_bridge.h>
+#include <linux/netpoll.h>
 #include "dsa_priv.h"
 
 /* slave mii_bus handling ***************************************************/
@@ -199,103 +200,212 @@
 	return 0;
 }
 
-static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-			     struct net_device *dev,
-			     const unsigned char *addr, u16 vid, u16 nlm_flags)
+static int dsa_bridge_check_vlan_range(struct dsa_switch *ds,
+				       const struct net_device *bridge,
+				       u16 vid_begin, u16 vid_end)
 {
-	struct dsa_slave_priv *p = netdev_priv(dev);
-	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
+	struct dsa_slave_priv *p;
+	struct net_device *dev, *vlan_br;
+	DECLARE_BITMAP(members, DSA_MAX_PORTS);
+	DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+	u16 vid;
+	int member, err;
 
-	if (ds->drv->fdb_add)
-		ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+	if (!ds->drv->vlan_getnext || !vid_begin)
+		return -EOPNOTSUPP;
 
-	return ret;
+	vid = vid_begin - 1;
+
+	do {
+		err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+		if (err)
+			break;
+
+		if (vid > vid_end)
+			break;
+
+		member = find_first_bit(members, DSA_MAX_PORTS);
+		if (member == DSA_MAX_PORTS)
+			continue;
+
+		dev = ds->ports[member];
+		p = netdev_priv(dev);
+		vlan_br = p->bridge_dev;
+		if (vlan_br == bridge)
+			continue;
+
+		netdev_dbg(vlan_br, "hardware VLAN %d already in use\n", vid);
+		return -EOPNOTSUPP;
+	} while (vid < vid_end);
+
+	return err == -ENOENT ? 0 : err;
 }
 
-static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-			     struct net_device *dev,
-			     const unsigned char *addr, u16 vid)
+static int dsa_slave_port_vlan_add(struct net_device *dev,
+				   struct switchdev_obj *obj)
 {
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
+	u16 vid;
+	int err;
 
-	if (ds->drv->fdb_del)
-		ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+	switch (obj->trans) {
+	case SWITCHDEV_TRANS_PREPARE:
+		if (!ds->drv->port_vlan_add || !ds->drv->port_pvid_set)
+			return -EOPNOTSUPP;
 
-	return ret;
-}
+		/* If the requested port doesn't belong to the same bridge as
+		 * the VLAN members, fallback to software VLAN (hopefully).
+		 */
+		err = dsa_bridge_check_vlan_range(ds, p->bridge_dev,
+						  vlan->vid_begin,
+						  vlan->vid_end);
+		if (err)
+			return err;
+		break;
+	case SWITCHDEV_TRANS_COMMIT:
+		for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+			err = ds->drv->port_vlan_add(ds, p->port, vid,
+						     vlan->flags &
+						     BRIDGE_VLAN_INFO_UNTAGGED);
+			if (!err && vlan->flags & BRIDGE_VLAN_INFO_PVID)
+				err = ds->drv->port_pvid_set(ds, p->port, vid);
+			if (err)
+				return err;
+		}
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
 
-static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
-			       const unsigned char *addr, u16 vid,
-			       bool is_static,
-			       u32 portid, u32 seq, int type,
-			       unsigned int flags)
-{
-	struct nlmsghdr *nlh;
-	struct ndmsg *ndm;
-
-	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
-	if (!nlh)
-		return -EMSGSIZE;
-
-	ndm = nlmsg_data(nlh);
-	ndm->ndm_family	 = AF_BRIDGE;
-	ndm->ndm_pad1    = 0;
-	ndm->ndm_pad2    = 0;
-	ndm->ndm_flags	 = NTF_EXT_LEARNED;
-	ndm->ndm_type	 = 0;
-	ndm->ndm_ifindex = dev->ifindex;
-	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
-
-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
-		goto nla_put_failure;
-
-	if (vid && nla_put_u16(skb, NDA_VLAN, vid))
-		goto nla_put_failure;
-
-	nlmsg_end(skb, nlh);
 	return 0;
-
-nla_put_failure:
-	nlmsg_cancel(skb, nlh);
-	return -EMSGSIZE;
 }
 
-/* Dump information about entries, in response to GETNEIGH */
-static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-			      struct net_device *dev,
-			      struct net_device *filter_dev, int idx)
+static int dsa_slave_port_vlan_del(struct net_device *dev,
+				   struct switchdev_obj *obj)
+{
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	u16 vid;
+	int err;
+
+	if (!ds->drv->port_vlan_del)
+		return -EOPNOTSUPP;
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+		err = ds->drv->port_vlan_del(ds, p->port, vid);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int dsa_slave_port_vlan_dump(struct net_device *dev,
+				    struct switchdev_obj *obj)
+{
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	DECLARE_BITMAP(members, DSA_MAX_PORTS);
+	DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+	u16 pvid, vid = 0;
+	int err;
+
+	if (!ds->drv->vlan_getnext || !ds->drv->port_pvid_get)
+		return -EOPNOTSUPP;
+
+	err = ds->drv->port_pvid_get(ds, p->port, &pvid);
+	if (err)
+		return err;
+
+	for (;;) {
+		err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+		if (err)
+			break;
+
+		if (!test_bit(p->port, members))
+			continue;
+
+		memset(vlan, 0, sizeof(*vlan));
+		vlan->vid_begin = vlan->vid_end = vid;
+
+		if (vid == pvid)
+			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+		if (test_bit(p->port, untagged))
+			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+		err = obj->cb(dev, obj);
+		if (err)
+			break;
+	}
+
+	return err == -ENOENT ? 0 : err;
+}
+
+static int dsa_slave_port_fdb_add(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	int ret = -EOPNOTSUPP;
+
+	if (obj->trans == SWITCHDEV_TRANS_PREPARE)
+		ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
+	else if (obj->trans == SWITCHDEV_TRANS_COMMIT)
+		ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
+
+	return ret;
+}
+
+static int dsa_slave_port_fdb_del(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	int ret = -EOPNOTSUPP;
+
+	if (ds->drv->port_fdb_del)
+		ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
+
+	return ret;
+}
+
+static int dsa_slave_port_fdb_dump(struct net_device *dev,
+				   struct switchdev_obj *obj)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	unsigned char addr[ETH_ALEN] = { 0 };
+	u16 vid = 0;
 	int ret;
 
-	if (!ds->drv->fdb_getnext)
+	if (!ds->drv->port_fdb_getnext)
 		return -EOPNOTSUPP;
 
-	for (; ; idx++) {
+	for (;;) {
 		bool is_static;
 
-		ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+		ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
+						&is_static);
 		if (ret < 0)
 			break;
 
-		if (idx < cb->args[0])
-			continue;
+		obj->u.fdb.addr = addr;
+		obj->u.fdb.vid = vid;
+		obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
 
-		ret = dsa_slave_fill_info(dev, skb, addr, 0,
-					  is_static,
-					  NETLINK_CB(cb->skb).portid,
-					  cb->nlh->nlmsg_seq,
-					  RTM_NEWNEIGH, NLM_F_MULTI);
+		ret = obj->cb(dev, obj);
 		if (ret < 0)
 			break;
 	}
 
-	return idx;
+	return ret == -ENOENT ? 0 : ret;
 }
 
 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -363,6 +473,71 @@
 	return ret;
 }
 
+static int dsa_slave_port_obj_add(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	int err;
+
+	/* For the prepare phase, ensure the full set of changes is feasable in
+	 * one go in order to signal a failure properly. If an operation is not
+	 * supported, return -EOPNOTSUPP.
+	 */
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = dsa_slave_port_fdb_add(dev, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = dsa_slave_port_vlan_add(dev, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int dsa_slave_port_obj_del(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	int err;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = dsa_slave_port_fdb_del(dev, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = dsa_slave_port_vlan_del(dev, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int dsa_slave_port_obj_dump(struct net_device *dev,
+				   struct switchdev_obj *obj)
+{
+	int err;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = dsa_slave_port_fdb_dump(dev, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = dsa_slave_port_vlan_dump(dev, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
 static int dsa_slave_bridge_port_join(struct net_device *dev,
 				      struct net_device *br)
 {
@@ -418,24 +593,53 @@
 	return 0;
 }
 
+static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p,
+					       struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	if (p->netpoll)
+		netpoll_send_skb(p->netpoll, skb);
+#else
+	BUG();
+#endif
+	return NETDEV_TX_OK;
+}
+
 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct sk_buff *nskb;
 
-	return p->xmit(skb, dev);
-}
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
 
-static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
-					struct net_device *dev)
-{
-	struct dsa_slave_priv *p = netdev_priv(dev);
+	/* Transmit function may have to reallocate the original SKB */
+	nskb = p->xmit(skb, dev);
+	if (!nskb)
+		return NETDEV_TX_OK;
 
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
+	/* SKB for netpoll still need to be mangled with the protocol-specific
+	 * tag to be successfully transmitted
+	 */
+	if (unlikely(netpoll_tx_running(dev)))
+		return dsa_netpoll_send_skb(p, nskb);
+
+	/* Queue the SKB for transmission on the parent interface, but
+	 * do not modify its EtherType
+	 */
+	nskb->dev = p->parent->dst->master_netdev;
+	dev_queue_xmit(nskb);
 
 	return NETDEV_TX_OK;
 }
 
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	/* Just return the original SKB */
+	return skb;
+}
+
 
 /* ethtool operations *******************************************************/
 static int
@@ -665,6 +869,49 @@
 	return ret;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+				   struct netpoll_info *ni)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	struct net_device *master = ds->dst->master_netdev;
+	struct netpoll *netpoll;
+	int err = 0;
+
+	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+	if (!netpoll)
+		return -ENOMEM;
+
+	err = __netpoll_setup(netpoll, master);
+	if (err) {
+		kfree(netpoll);
+		goto out;
+	}
+
+	p->netpoll = netpoll;
+out:
+	return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct netpoll *netpoll = p->netpoll;
+
+	if (!netpoll)
+		return;
+
+	p->netpoll = NULL;
+
+	__netpoll_free_async(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
 static const struct ethtool_ops dsa_slave_ethtool_ops = {
 	.get_settings		= dsa_slave_get_settings,
 	.set_settings		= dsa_slave_set_settings,
@@ -692,16 +939,27 @@
 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
-	.ndo_fdb_add		= dsa_slave_fdb_add,
-	.ndo_fdb_del		= dsa_slave_fdb_del,
-	.ndo_fdb_dump		= dsa_slave_fdb_dump,
+	.ndo_fdb_add		= switchdev_port_fdb_add,
+	.ndo_fdb_del		= switchdev_port_fdb_del,
+	.ndo_fdb_dump		= switchdev_port_fdb_dump,
 	.ndo_do_ioctl		= dsa_slave_ioctl,
 	.ndo_get_iflink		= dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
+	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
+	.ndo_poll_controller	= dsa_slave_poll_controller,
+#endif
+	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
+	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
+	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
 };
 
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
 	.switchdev_port_attr_get	= dsa_slave_port_attr_get,
 	.switchdev_port_attr_set	= dsa_slave_port_attr_set,
+	.switchdev_port_obj_add		= dsa_slave_port_obj_add,
+	.switchdev_port_obj_del		= dsa_slave_port_obj_del,
+	.switchdev_port_obj_dump	= dsa_slave_port_obj_dump,
 };
 
 static void dsa_slave_adjust_link(struct net_device *dev)
@@ -889,7 +1147,7 @@
 	slave_dev->features = master->vlan_features;
 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
 	eth_hw_addr_inherit(slave_dev, master);
-	slave_dev->tx_queue_len = 0;
+	slave_dev->priv_flags |= IFF_NO_QUEUE;
 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
 	slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
 
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 83d3572..e2aadb7 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -58,14 +58,11 @@
 #define BRCM_EG_TC_MASK		0x7
 #define BRCM_EG_PID_MASK	0x1f
 
-static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	u8 *brcm_tag;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
 		goto out_free;
 
@@ -87,17 +84,11 @@
 		brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
 	brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
 
-	/* Queue the SKB for transmission on the parent interface, but
-	 * do not modify its EtherType
-	 */
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
-
-	return NETDEV_TX_OK;
+	return skb;
 
 out_free:
 	kfree_skb(skb);
-	return NETDEV_TX_OK;
+	return NULL;
 }
 
 static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 2dab270..aa780e4 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -15,14 +15,11 @@
 
 #define DSA_HLEN	4
 
-static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	u8 *dsa_header;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	/*
 	 * Convert the outermost 802.1q tag to a DSA tag for tagged
 	 * packets, or insert a DSA tag between the addresses and
@@ -63,14 +60,11 @@
 		dsa_header[3] = 0x00;
 	}
 
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
-
-	return NETDEV_TX_OK;
+	return skb;
 
 out_free:
 	kfree_skb(skb);
-	return NETDEV_TX_OK;
+	return NULL;
 }
 
 static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 9aeda59..2288c80 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -16,14 +16,11 @@
 #define DSA_HLEN	4
 #define EDSA_HLEN	8
 
-static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	u8 *edsa_header;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	/*
 	 * Convert the outermost 802.1q tag to a DSA tag and prepend
 	 * a DSA ethertype field is the packet is tagged, or insert
@@ -76,14 +73,11 @@
 		edsa_header[7] = 0x00;
 	}
 
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
-
-	return NETDEV_TX_OK;
+	return skb;
 
 out_free:
 	kfree_skb(skb);
-	return NETDEV_TX_OK;
+	return NULL;
 }
 
 static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index e268f9d..d25efc9 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,16 +13,13 @@
 #include <linux/slab.h>
 #include "dsa_priv.h"
 
-static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct sk_buff *nskb;
 	int padlen;
 	u8 *trailer;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	/*
 	 * We have to make sure that the trailer ends up as the very
 	 * last 4 bytes of the packet.  This means that we have to pad
@@ -36,7 +33,7 @@
 	nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
 	if (nskb == NULL) {
 		kfree_skb(skb);
-		return NETDEV_TX_OK;
+		return NULL;
 	}
 	skb_reserve(nskb, NET_IP_ALIGN);
 
@@ -57,10 +54,7 @@
 	trailer[2] = 0x10;
 	trailer[3] = 0x00;
 
-	nskb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(nskb);
-
-	return NETDEV_TX_OK;
+	return nskb;
 }
 
 static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 77e0f0e..d850fdc 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -114,7 +114,7 @@
 EXPORT_SYMBOL(eth_header);
 
 /**
- * eth_get_headlen - determine the the length of header for an ethernet frame
+ * eth_get_headlen - determine the length of header for an ethernet frame
  * @data: pointer to start of frame
  * @len: total length of frame
  *
@@ -132,7 +132,7 @@
 
 	/* parse any remaining L2/L3 headers, check for L4 */
 	if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
-					    sizeof(*eth), len))
+					    sizeof(*eth), len, 0))
 		return max_t(u32, keys.control.thoff, sizeof(*eth));
 
 	/* parse for any L4 headers */
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 44d2746..35a9788 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -392,7 +392,7 @@
 	dev->header_ops = &hsr_header_ops;
 	dev->netdev_ops = &hsr_device_ops;
 	SET_NETDEV_DEVTYPE(dev, &hsr_type);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	dev->destructor = hsr_dev_destroy;
 
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index e50f69d..ea339fa9 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -5,6 +5,7 @@
 
 #include <net/ieee802154_netdev.h>
 #include <net/inet_frag.h>
+#include <net/6lowpan.h>
 
 struct lowpan_create_arg {
 	u16 tag;
@@ -37,26 +38,18 @@
 	}
 }
 
-struct lowpan_dev_record {
-	struct net_device *ldev;
-	struct list_head list;
-};
-
 /* private device info */
 struct lowpan_dev_info {
 	struct net_device	*real_dev; /* real WPAN device ptr */
-	struct mutex		dev_list_mtx; /* mutex for list ops */
 	u16			fragment_tag;
 };
 
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
-	return netdev_priv(dev);
+	return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
 }
 
-extern struct list_head lowpan_devices;
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index f20a387..953b1c4 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -52,8 +52,7 @@
 
 #include "6lowpan_i.h"
 
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
+static int open_count;
 
 static struct header_ops lowpan_header_ops = {
 	.create	= lowpan_header_create,
@@ -91,7 +90,7 @@
 	dev->hard_header_len	= 2 + 1 + 20 + 14;
 	dev->needed_tailroom	= 2; /* FCS */
 	dev->mtu		= IPV6_MIN_MTU;
-	dev->tx_queue_len	= 0;
+	dev->priv_flags		|= IFF_NO_QUEUE;
 	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
 	dev->watchdog_timeo	= 0;
 
@@ -114,7 +113,6 @@
 			  struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net_device *real_dev;
-	struct lowpan_dev_record *entry;
 	int ret;
 
 	ASSERT_RTNL();
@@ -133,67 +131,52 @@
 		return -EINVAL;
 	}
 
-	lowpan_dev_info(dev)->real_dev = real_dev;
-	mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
-	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry) {
+	if (real_dev->ieee802154_ptr->lowpan_dev) {
 		dev_put(real_dev);
-		lowpan_dev_info(dev)->real_dev = NULL;
-		return -ENOMEM;
+		return -EBUSY;
 	}
 
-	entry->ldev = dev;
-
+	lowpan_dev_info(dev)->real_dev = real_dev;
 	/* Set the lowpan hardware address to the wpan hardware address. */
 	memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
 
-	mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-	INIT_LIST_HEAD(&entry->list);
-	list_add_tail(&entry->list, &lowpan_devices);
-	mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+	lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
 
 	ret = register_netdevice(dev);
-	if (ret >= 0) {
-		if (!lowpan_open_count)
-			lowpan_rx_init();
-		lowpan_open_count++;
+	if (ret < 0) {
+		dev_put(real_dev);
+		return ret;
 	}
 
-	return ret;
+	real_dev->ieee802154_ptr->lowpan_dev = dev;
+	if (!open_count)
+		lowpan_rx_init();
+
+	open_count++;
+
+	return 0;
 }
 
 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
 	struct net_device *real_dev = lowpan_dev->real_dev;
-	struct lowpan_dev_record *entry, *tmp;
 
 	ASSERT_RTNL();
 
-	lowpan_open_count--;
-	if (!lowpan_open_count)
+	open_count--;
+
+	if (!open_count)
 		lowpan_rx_exit();
 
-	mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-	list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-		if (entry->ldev == dev) {
-			list_del(&entry->list);
-			kfree(entry);
-		}
-	}
-	mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
-	mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
-	unregister_netdevice_queue(dev, head);
-
+	real_dev->ieee802154_ptr->lowpan_dev = NULL;
+	unregister_netdevice(dev);
 	dev_put(real_dev);
 }
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
 	.kind		= "lowpan",
-	.priv_size	= sizeof(struct lowpan_dev_info),
+	.priv_size	= LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
 	.setup		= lowpan_setup,
 	.newlink	= lowpan_newlink,
 	.dellink	= lowpan_dellink,
@@ -214,19 +197,21 @@
 			       unsigned long event, void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	LIST_HEAD(del_list);
-	struct lowpan_dev_record *entry, *tmp;
 
 	if (dev->type != ARPHRD_IEEE802154)
 		goto out;
 
-	if (event == NETDEV_UNREGISTER) {
-		list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-			if (lowpan_dev_info(entry->ldev)->real_dev == dev)
-				lowpan_dellink(entry->ldev, &del_list);
-		}
-
-		unregister_netdevice_many(&del_list);
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		/* Check if wpan interface is unregistered that we
+		 * also delete possible lowpan interfaces which belongs
+		 * to the wpan interface.
+		 */
+		if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
+			lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+		break;
+	default:
+		break;
 	}
 
 out:
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
index 4be1d28..12e1020 100644
--- a/net/ieee802154/6lowpan/rx.c
+++ b/net/ieee802154/6lowpan/rx.c
@@ -15,36 +15,14 @@
 
 #include "6lowpan_i.h"
 
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-				      struct net_device *dev)
+static int lowpan_give_skb_to_device(struct sk_buff *skb,
+				     struct net_device *dev)
 {
-	struct lowpan_dev_record *entry;
-	struct sk_buff *skb_cp;
-	int stat = NET_RX_SUCCESS;
-
+	skb->dev = dev->ieee802154_ptr->lowpan_dev;
 	skb->protocol = htons(ETH_P_IPV6);
 	skb->pkt_type = PACKET_HOST;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(entry, &lowpan_devices, list)
-		if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
-			skb_cp = skb_copy(skb, GFP_ATOMIC);
-			if (!skb_cp) {
-				kfree_skb(skb);
-				rcu_read_unlock();
-				return NET_RX_DROP;
-			}
-
-			skb_cp->dev = entry->ldev;
-			stat = netif_rx(skb_cp);
-			if (stat == NET_RX_DROP)
-				break;
-		}
-	rcu_read_unlock();
-
-	consume_skb(skb);
-
-	return stat;
+	return netif_rx(skb);
 }
 
 static int
@@ -89,6 +67,10 @@
 	struct ieee802154_hdr hdr;
 	int ret;
 
+	if (dev->type != ARPHRD_IEEE802154 ||
+	    !dev->ieee802154_ptr->lowpan_dev)
+		goto drop;
+
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb)
 		goto drop;
@@ -99,9 +81,6 @@
 	if (skb->pkt_type == PACKET_OTHERHOST)
 		goto drop_skb;
 
-	if (dev->type != ARPHRD_IEEE802154)
-		goto drop_skb;
-
 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
 		goto drop_skb;
 
@@ -109,7 +88,7 @@
 	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
 		/* Pull off the 1-byte of 6lowpan header. */
 		skb_pull(skb, 1);
-		return lowpan_give_skb_to_devices(skb, NULL);
+		return lowpan_give_skb_to_device(skb, dev);
 	} else {
 		switch (skb->data[0] & 0xe0) {
 		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
@@ -117,7 +96,7 @@
 			if (ret < 0)
 				goto drop_skb;
 
-			return lowpan_give_skb_to_devices(skb, NULL);
+			return lowpan_give_skb_to_device(skb, dev);
 		case LOWPAN_DISPATCH_FRAG1:	/* first fragment header */
 			ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
 			if (ret == 1) {
@@ -125,7 +104,7 @@
 				if (ret < 0)
 					goto drop_skb;
 
-				return lowpan_give_skb_to_devices(skb, NULL);
+				return lowpan_give_skb_to_device(skb, dev);
 			} else if (ret == -1) {
 				return NET_RX_DROP;
 			} else {
@@ -138,7 +117,7 @@
 				if (ret < 0)
 					goto drop_skb;
 
-				return lowpan_give_skb_to_devices(skb, NULL);
+				return lowpan_give_skb_to_device(skb, dev);
 			} else if (ret == -1) {
 				return NET_RX_DROP;
 			} else {
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index 2597abb..f6263fc 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -112,7 +112,7 @@
 
 	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
 	if (IS_ERR(frag))
-		return -PTR_ERR(frag);
+		return PTR_ERR(frag);
 
 	memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
 	memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
@@ -224,7 +224,7 @@
 	} else {
 		da.mode = IEEE802154_ADDR_LONG;
 		da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-		cb->ackreq = wpan_dev->frame_retries >= 0;
+		cb->ackreq = wpan_dev->ackreq;
 	}
 
 	return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 68f2401..3f89c0a 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -230,6 +230,8 @@
 	[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
 
 	[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+	[NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
 };
 
 /* message building helper */
@@ -458,6 +460,7 @@
 	CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
 	CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
 	CMD(set_lbt_mode, SET_LBT_MODE);
+	CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
 
 	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
 		CMD(set_tx_power, SET_TX_POWER);
@@ -656,6 +659,10 @@
 	if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
 		goto nla_put_failure;
 
+	/* ackreq default behaviour */
+	if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+		goto nla_put_failure;
+
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -1027,7 +1034,7 @@
 	struct cfg802154_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
-	bool mode;
+	int mode;
 
 	if (netif_running(dev))
 		return -EBUSY;
@@ -1035,13 +1042,39 @@
 	if (!info->attrs[NL802154_ATTR_LBT_MODE])
 		return -EINVAL;
 
-	mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+	mode = nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+
+	if (mode != 0 && mode != 1)
+		return -EINVAL;
+
 	if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
 		return -EINVAL;
 
 	return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	int ackreq;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+		return -EINVAL;
+
+	ackreq = nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+
+	if (ackreq != 0 && ackreq != 1)
+		return -EINVAL;
+
+	return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
 #define NL802154_FLAG_NEED_WPAN_PHY	0x01
 #define NL802154_FLAG_NEED_NETDEV	0x02
 #define NL802154_FLAG_NEED_RTNL		0x04
@@ -1248,6 +1281,14 @@
 		.internal_flags = NL802154_FLAG_NEED_NETDEV |
 				  NL802154_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+		.doit = nl802154_set_ackreq_default,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
 };
 
 /* initialisation/exit functions */
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index b2155a1..03b3575 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -24,6 +24,26 @@
 }
 
 static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+	int ret;
+	trace_802154_rdev_suspend(&rdev->wpan_phy);
+	ret = rdev->ops->suspend(&rdev->wpan_phy);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+	int ret;
+	trace_802154_rdev_resume(&rdev->wpan_phy);
+	ret = rdev->ops->resume(&rdev->wpan_phy);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
+static inline int
 rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
 		      unsigned char name_assign_type,
 		      enum nl802154_iftype type, __le64 extended_addr)
@@ -175,4 +195,17 @@
 	return ret;
 }
 
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+			struct wpan_dev *wpan_dev, bool ackreq)
+{
+	int ret;
+
+	trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+					     ackreq);
+	ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
 #endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index 133b4280..bd88525 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -14,11 +14,13 @@
  */
 
 #include <linux/device.h>
+#include <linux/rtnetlink.h>
 
 #include <net/cfg802154.h>
 
 #include "core.h"
 #include "sysfs.h"
+#include "rdev-ops.h"
 
 static inline struct cfg802154_registered_device *
 dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@
 };
 ATTRIBUTE_GROUPS(pmib);
 
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+	struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+	int ret = 0;
+
+	if (rdev->ops->suspend) {
+		rtnl_lock();
+		ret = rdev_suspend(rdev);
+		rtnl_unlock();
+	}
+
+	return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+	struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+	int ret = 0;
+
+	if (rdev->ops->resume) {
+		rtnl_lock();
+		ret = rdev_resume(rdev);
+		rtnl_unlock();
+	}
+
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
 struct class wpan_phy_class = {
 	.name = "ieee802154",
 	.dev_release = wpan_phy_release,
 	.dev_groups = pmib_groups,
+	.pm = WPAN_PHY_PM_OPS,
 };
 
 int wpan_phy_sysfs_init(void)
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 9b5f0eb..9a471e4 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -40,6 +40,28 @@
  *			rdev->ops traces		     *
  *************************************************************/
 
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+	TP_PROTO(struct wpan_phy *wpan_phy),
+	TP_ARGS(wpan_phy),
+	TP_STRUCT__entry(
+		WPAN_PHY_ENTRY
+	),
+	TP_fast_assign(
+		WPAN_PHY_ASSIGN;
+	),
+	TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+	TP_PROTO(struct wpan_phy *wpan_phy),
+	TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+	TP_PROTO(struct wpan_phy *wpan_phy),
+	TP_ARGS(wpan_phy)
+);
+
 TRACE_EVENT(802154_rdev_add_virtual_intf,
 	TP_PROTO(struct wpan_phy *wpan_phy, char *name,
 		 enum nl802154_iftype type, __le64 extended_addr),
@@ -253,6 +275,25 @@
 		WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
 );
 
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+	TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		 bool ackreq),
+	TP_ARGS(wpan_phy, wpan_dev, ackreq),
+	TP_STRUCT__entry(
+		WPAN_PHY_ENTRY
+		WPAN_DEV_ENTRY
+		__field(bool, ackreq)
+	),
+	TP_fast_assign(
+		WPAN_PHY_ASSIGN;
+		WPAN_DEV_ASSIGN;
+		__entry->ackreq = ackreq;
+	),
+	TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+		", ackreq default: %s", WPAN_PHY_PR_ARG,
+		WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
 TRACE_EVENT(802154_rdev_return_int,
 	TP_PROTO(struct wpan_phy *wpan_phy, int ret),
 	TP_ARGS(wpan_phy, ret),
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 6fb3c90..416dfa0 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -331,20 +331,6 @@
 	  When this option is enabled IP tunnels can be configured to use
 	  FOU or GUE encapsulation.
 
-config GENEVE_CORE
-	tristate "Generic Network Virtualization Encapsulation library"
-	depends on INET
-	select NET_UDP_TUNNEL
-	---help---
-	This allows one to create Geneve virtual interfaces that provide
-	Layer 2 Networks over Layer 3 Networks. Geneve is often used
-	to tunnel virtual network infrastructure in virtualized environments.
-	For more information see:
-	  http://tools.ietf.org/html/draft-gross-geneve-01
-
-	  To compile this driver as a module, choose M here: the module
-
-
 config INET_AH
 	tristate "IP: AH transformation"
 	select XFRM_ALGO
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index efc43f3..89aacb6 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -57,7 +57,6 @@
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
 obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-obj-$(CONFIG_GENEVE_CORE) += geneve_core.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
 		      xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9532ee8..1d0c3ad 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -112,12 +112,14 @@
 #include <net/raw.h>
 #include <net/icmp.h>
 #include <net/inet_common.h>
+#include <net/ip_tunnels.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/secure_seq.h>
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
+#include <net/vrf.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -426,6 +428,7 @@
 	struct net *net = sock_net(sk);
 	unsigned short snum;
 	int chk_addr_ret;
+	u32 tb_id = RT_TABLE_LOCAL;
 	int err;
 
 	/* If the socket has its own bind function then use it. (RAW) */
@@ -447,7 +450,8 @@
 			goto out;
 	}
 
-	chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+	tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
+	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
 	/* Not specified by any standard per-se, however it breaks too
 	 * many applications when removed.  It is unfortunate since
@@ -1448,38 +1452,51 @@
 }
 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
 
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
+
 unsigned long snmp_fold_field(void __percpu *mib, int offt)
 {
 	unsigned long res = 0;
 	int i;
 
 	for_each_possible_cpu(i)
-		res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
+		res += snmp_get_cpu_field(mib, i, offt);
 	return res;
 }
 EXPORT_SYMBOL_GPL(snmp_fold_field);
 
 #if BITS_PER_LONG==32
 
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
+			 size_t syncp_offset)
+{
+	void *bhptr;
+	struct u64_stats_sync *syncp;
+	u64 v;
+	unsigned int start;
+
+	bhptr = per_cpu_ptr(mib, cpu);
+	syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
+	do {
+		start = u64_stats_fetch_begin_irq(syncp);
+		v = *(((u64 *)bhptr) + offt);
+	} while (u64_stats_fetch_retry_irq(syncp, start));
+
+	return v;
+}
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
+
 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
 {
 	u64 res = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		void *bhptr;
-		struct u64_stats_sync *syncp;
-		u64 v;
-		unsigned int start;
-
-		bhptr = per_cpu_ptr(mib, cpu);
-		syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
-		do {
-			start = u64_stats_fetch_begin_irq(syncp);
-			v = *(((u64 *) bhptr) + offt);
-		} while (u64_stats_fetch_retry_irq(syncp, start));
-
-		res += v;
+		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
 	}
 	return res;
 }
@@ -1780,6 +1797,8 @@
 
 	dev_add_pack(&ip_packet_type);
 
+	ip_tunnel_core_init();
+
 	rc = 0;
 out:
 	return rc;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index ac9a32e..f2a7102 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -360,8 +360,10 @@
 
 	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
 				ahp->icv_trunc_len + seqhi_len);
-	if (!work_iph)
+	if (!work_iph) {
+		err = -ENOMEM;
 		goto out;
+	}
 
 	seqhi = (__be32 *)((char *)work_iph + ihl);
 	auth_data = ah_tmp_auth(seqhi, seqhi_len);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6c8b1fb..30409b7 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -233,7 +233,7 @@
 		return -EINVAL;
 	}
 
-	neigh->type = inet_addr_type(dev_net(dev), addr);
+	neigh->type = inet_addr_type_dev_table(dev_net(dev), dev, addr);
 
 	parms = in_dev->arp_parms;
 	__neigh_parms_put(neigh->parms);
@@ -291,6 +291,40 @@
 	kfree_skb(skb);
 }
 
+/* Create and send an arp packet. */
+static void arp_send_dst(int type, int ptype, __be32 dest_ip,
+			 struct net_device *dev, __be32 src_ip,
+			 const unsigned char *dest_hw,
+			 const unsigned char *src_hw,
+			 const unsigned char *target_hw, struct sk_buff *oskb)
+{
+	struct sk_buff *skb;
+
+	/* arp on this interface. */
+	if (dev->flags & IFF_NOARP)
+		return;
+
+	skb = arp_create(type, ptype, dest_ip, dev, src_ip,
+			 dest_hw, src_hw, target_hw);
+	if (!skb)
+		return;
+
+	if (oskb)
+		skb_dst_copy(skb, oskb);
+
+	arp_xmit(skb);
+}
+
+void arp_send(int type, int ptype, __be32 dest_ip,
+	      struct net_device *dev, __be32 src_ip,
+	      const unsigned char *dest_hw, const unsigned char *src_hw,
+	      const unsigned char *target_hw)
+{
+	arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw,
+		     target_hw, NULL);
+}
+EXPORT_SYMBOL(arp_send);
+
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
 	__be32 saddr = 0;
@@ -309,7 +343,7 @@
 	switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
 	default:
 	case 0:		/* By default announce any local IP */
-		if (skb && inet_addr_type(dev_net(dev),
+		if (skb && inet_addr_type_dev_table(dev_net(dev), dev,
 					  ip_hdr(skb)->saddr) == RTN_LOCAL)
 			saddr = ip_hdr(skb)->saddr;
 		break;
@@ -317,7 +351,8 @@
 		if (!skb)
 			break;
 		saddr = ip_hdr(skb)->saddr;
-		if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) {
+		if (inet_addr_type_dev_table(dev_net(dev), dev,
+					     saddr) == RTN_LOCAL) {
 			/* saddr should be known to target */
 			if (inet_addr_onlink(in_dev, target, saddr))
 				break;
@@ -346,8 +381,9 @@
 		}
 	}
 
-	arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
-		 dst_hw, dev->dev_addr, NULL);
+	arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
+		     dst_hw, dev->dev_addr, NULL,
+		     dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
 }
 
 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -597,32 +633,6 @@
 EXPORT_SYMBOL(arp_xmit);
 
 /*
- *	Create and send an arp packet.
- */
-void arp_send(int type, int ptype, __be32 dest_ip,
-	      struct net_device *dev, __be32 src_ip,
-	      const unsigned char *dest_hw, const unsigned char *src_hw,
-	      const unsigned char *target_hw)
-{
-	struct sk_buff *skb;
-
-	/*
-	 *	No arp on this interface.
-	 */
-
-	if (dev->flags&IFF_NOARP)
-		return;
-
-	skb = arp_create(type, ptype, dest_ip, dev, src_ip,
-			 dest_hw, src_hw, target_hw);
-	if (!skb)
-		return;
-
-	arp_xmit(skb);
-}
-EXPORT_SYMBOL(arp_send);
-
-/*
  *	Process an arp request.
  */
 
@@ -742,7 +752,7 @@
 	/* Special case: IPv4 duplicate address detection packet (RFC2131) */
 	if (sip == 0) {
 		if (arp->ar_op == htons(ARPOP_REQUEST) &&
-		    inet_addr_type(net, tip) == RTN_LOCAL &&
+		    inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
 		    !arp_ignore(in_dev, sip, tip))
 			arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
 				 dev->dev_addr, sha);
@@ -802,16 +812,18 @@
 	n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
 
 	if (IN_DEV_ARP_ACCEPT(in_dev)) {
+		unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip);
+
 		/* Unsolicited ARP is not accepted by default.
 		   It is possible, that this option should be enabled for some
 		   devices (strip is candidate)
 		 */
 		is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
-			  inet_addr_type(net, sip) == RTN_UNICAST;
+			  addr_type == RTN_UNICAST;
 
 		if (!n &&
 		    ((arp->ar_op == htons(ARPOP_REPLY)  &&
-		      inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
+				addr_type == RTN_UNICAST) || is_garp))
 			n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
 	}
 
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 574fad9..f915abf 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -74,7 +74,7 @@
 	inet->inet_daddr = fl4->daddr;
 	inet->inet_dport = usin->sin_port;
 	sk->sk_state = TCP_ESTABLISHED;
-	inet_set_txhash(sk);
+	sk_set_txhash(sk);
 	inet->inet_id = jiffies;
 
 	sk_dst_set(sk, &rt->dst);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6bbc549..6fcbd21 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -45,6 +45,8 @@
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
+#include <net/vrf.h>
+#include <trace/events/fib.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
 
@@ -211,12 +213,12 @@
  */
 static inline unsigned int __inet_dev_addr_type(struct net *net,
 						const struct net_device *dev,
-						__be32 addr)
+						__be32 addr, u32 tb_id)
 {
 	struct flowi4		fl4 = { .daddr = addr };
 	struct fib_result	res;
 	unsigned int ret = RTN_BROADCAST;
-	struct fib_table *local_table;
+	struct fib_table *table;
 
 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
 		return RTN_BROADCAST;
@@ -225,10 +227,10 @@
 
 	rcu_read_lock();
 
-	local_table = fib_get_table(net, RT_TABLE_LOCAL);
-	if (local_table) {
+	table = fib_get_table(net, tb_id);
+	if (table) {
 		ret = RTN_UNICAST;
-		if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
+		if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
 			if (!dev || dev == res.fi->fib_dev)
 				ret = res.type;
 		}
@@ -238,19 +240,40 @@
 	return ret;
 }
 
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
+{
+	return __inet_dev_addr_type(net, NULL, addr, tb_id);
+}
+EXPORT_SYMBOL(inet_addr_type_table);
+
 unsigned int inet_addr_type(struct net *net, __be32 addr)
 {
-	return __inet_dev_addr_type(net, NULL, addr);
+	return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
 }
 EXPORT_SYMBOL(inet_addr_type);
 
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 				__be32 addr)
 {
-	return __inet_dev_addr_type(net, dev, addr);
+	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+	return __inet_dev_addr_type(net, dev, addr, rt_table);
 }
 EXPORT_SYMBOL(inet_dev_addr_type);
 
+/* inet_addr_type with dev == NULL but using the table from a dev
+ * if one is associated
+ */
+unsigned int inet_addr_type_dev_table(struct net *net,
+				      const struct net_device *dev,
+				      __be32 addr)
+{
+	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+	return __inet_dev_addr_type(net, NULL, addr, rt_table);
+}
+EXPORT_SYMBOL(inet_addr_type_dev_table);
+
 __be32 fib_compute_spec_dst(struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
@@ -280,6 +303,7 @@
 		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 		fl4.flowi4_scope = scope;
 		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+		fl4.flowi4_tun_key.tun_id = 0;
 		if (!fib_lookup(net, &fl4, &res, 0))
 			return FIB_RES_PREFSRC(net, res);
 	} else {
@@ -308,16 +332,21 @@
 	bool dev_match;
 
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
+	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev);
+	if (!fl4.flowi4_iif)
+		fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
 	fl4.daddr = src;
 	fl4.saddr = dst;
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+	fl4.flowi4_tun_key.tun_id = 0;
 
 	no_addr = idev->ifa_list == NULL;
 
 	fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
 
+	trace_fib_validate_source(dev, &fl4);
+
 	net = dev_net(dev);
 	if (fib_lookup(net, &fl4, &res, 0))
 		goto last_resort;
@@ -337,6 +366,9 @@
 		if (nh->nh_dev == dev) {
 			dev_match = true;
 			break;
+		} else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+			dev_match = true;
+			break;
 		}
 	}
 #else
@@ -494,9 +526,12 @@
 
 	addr = sk_extract_addr(&rt->rt_gateway);
 	if (rt->rt_gateway.sa_family == AF_INET && addr) {
+		unsigned int addr_type;
+
 		cfg->fc_gw = addr;
+		addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
 		if (rt->rt_flags & RTF_GATEWAY &&
-		    inet_addr_type(net, addr) == RTN_UNICAST)
+		    addr_type == RTN_UNICAST)
 			cfg->fc_scope = RT_SCOPE_UNIVERSE;
 	}
 
@@ -591,6 +626,8 @@
 	[RTA_METRICS]		= { .type = NLA_NESTED },
 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
 	[RTA_FLOW]		= { .type = NLA_U32 },
+	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
+	[RTA_ENCAP]		= { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@ -656,6 +693,12 @@
 		case RTA_TABLE:
 			cfg->fc_table = nla_get_u32(attr);
 			break;
+		case RTA_ENCAP:
+			cfg->fc_encap = attr;
+			break;
+		case RTA_ENCAP_TYPE:
+			cfg->fc_encap_type = nla_get_u16(attr);
+			break;
 		}
 	}
 
@@ -760,6 +803,7 @@
 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
 {
 	struct net *net = dev_net(ifa->ifa_dev->dev);
+	u32 tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev);
 	struct fib_table *tb;
 	struct fib_config cfg = {
 		.fc_protocol = RTPROT_KERNEL,
@@ -774,11 +818,10 @@
 		},
 	};
 
-	if (type == RTN_UNICAST)
-		tb = fib_new_table(net, RT_TABLE_MAIN);
-	else
-		tb = fib_new_table(net, RT_TABLE_LOCAL);
+	if (!tb_id)
+		tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
 
+	tb = fib_new_table(net, tb_id);
 	if (!tb)
 		return;
 
@@ -960,11 +1003,14 @@
 			fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
 	}
 	if (!(ok & LOCAL_OK)) {
+		unsigned int addr_type;
+
 		fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
 
 		/* Check, that this local address finally disappeared. */
-		if (gone &&
-		    inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
+		addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
+						     ifa->ifa_local);
+		if (gone && addr_type != RTN_LOCAL) {
 			/* And the last, but not the least thing.
 			 * We must flush stray FIB entries.
 			 *
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3a06586..064bd3c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -42,6 +42,7 @@
 #include <net/ip_fib.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
 
 #include "fib_lookup.h"
 
@@ -208,6 +209,7 @@
 	change_nexthops(fi) {
 		if (nexthop_nh->nh_dev)
 			dev_put(nexthop_nh->nh_dev);
+		lwtstate_put(nexthop_nh->nh_lwtstate);
 		free_nh_exceptions(nexthop_nh);
 		rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
 		rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -266,6 +268,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
+		    lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
 		    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
 			return -1;
 		onh++;
@@ -366,6 +369,7 @@
 	payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
 
 	if (fi->fib_nhs) {
+		size_t nh_encapsize = 0;
 		/* Also handles the special case fib_nhs == 1 */
 
 		/* each nexthop is packed in an attribute */
@@ -374,8 +378,21 @@
 		/* may contain flow and gateway attribute */
 		nhsize += 2 * nla_total_size(4);
 
+		/* grab encap info */
+		for_nexthops(fi) {
+			if (nh->nh_lwtstate) {
+				/* RTA_ENCAP_TYPE */
+				nh_encapsize += lwtunnel_get_encap_size(
+						nh->nh_lwtstate);
+				/* RTA_ENCAP */
+				nh_encapsize +=  nla_total_size(2);
+			}
+		} endfor_nexthops(fi);
+
 		/* all nexthops are packed in a nested attribute */
-		payload += nla_total_size(fi->fib_nhs * nhsize);
+		payload += nla_total_size((fi->fib_nhs * nhsize) +
+					  nh_encapsize);
+
 	}
 
 	return payload;
@@ -421,13 +438,15 @@
 	if (n) {
 		state = n->nud_state;
 		neigh_release(n);
+	} else {
+		return 0;
 	}
 	if (state == NUD_REACHABLE)
 		return 0;
 	if ((state & NUD_VALID) && order != dflt)
 		return 0;
 	if ((state & NUD_VALID) ||
-	    (*last_idx < 0 && order > dflt)) {
+	    (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
 		*last_resort = fi;
 		*last_idx = order;
 	}
@@ -452,6 +471,9 @@
 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
 		       int remaining, struct fib_config *cfg)
 {
+	struct net *net = cfg->fc_nlinfo.nl_net;
+	int ret;
+
 	change_nexthops(fi) {
 		int attrlen;
 
@@ -475,18 +497,70 @@
 			if (nexthop_nh->nh_tclassid)
 				fi->fib_net->ipv4.fib_num_tclassid_users++;
 #endif
+			nla = nla_find(attrs, attrlen, RTA_ENCAP);
+			if (nla) {
+				struct lwtunnel_state *lwtstate;
+				struct net_device *dev = NULL;
+				struct nlattr *nla_entype;
+
+				nla_entype = nla_find(attrs, attrlen,
+						      RTA_ENCAP_TYPE);
+				if (!nla_entype)
+					goto err_inval;
+				if (cfg->fc_oif)
+					dev = __dev_get_by_index(net, cfg->fc_oif);
+				ret = lwtunnel_build_state(dev, nla_get_u16(
+							   nla_entype),
+							   nla,  AF_INET, cfg,
+							   &lwtstate);
+				if (ret)
+					goto errout;
+				nexthop_nh->nh_lwtstate =
+					lwtstate_get(lwtstate);
+			}
 		}
 
 		rtnh = rtnh_next(rtnh, &remaining);
 	} endfor_nexthops(fi);
 
 	return 0;
+
+err_inval:
+	ret = -EINVAL;
+
+errout:
+	return ret;
 }
 
 #endif
 
+static int fib_encap_match(struct net *net, u16 encap_type,
+			   struct nlattr *encap,
+			   int oif, const struct fib_nh *nh,
+			   const struct fib_config *cfg)
+{
+	struct lwtunnel_state *lwtstate;
+	struct net_device *dev = NULL;
+	int ret, result = 0;
+
+	if (encap_type == LWTUNNEL_ENCAP_NONE)
+		return 0;
+
+	if (oif)
+		dev = __dev_get_by_index(net, oif);
+	ret = lwtunnel_build_state(dev, encap_type, encap,
+				   AF_INET, cfg, &lwtstate);
+	if (!ret) {
+		result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+		lwtstate_free(lwtstate);
+	}
+
+	return result;
+}
+
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 {
+	struct net *net = cfg->fc_nlinfo.nl_net;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	struct rtnexthop *rtnh;
 	int remaining;
@@ -496,6 +570,12 @@
 		return 1;
 
 	if (cfg->fc_oif || cfg->fc_gw) {
+		if (cfg->fc_encap) {
+			if (fib_encap_match(net, cfg->fc_encap_type,
+					    cfg->fc_encap, cfg->fc_oif,
+					    fi->fib_nh, cfg))
+			    return 1;
+		}
 		if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
 		    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
 			return 0;
@@ -585,7 +665,7 @@
 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
 			struct fib_nh *nh)
 {
-	int err;
+	int err = 0;
 	struct net *net;
 	struct net_device *dev;
 
@@ -594,16 +674,18 @@
 		struct fib_result res;
 
 		if (nh->nh_flags & RTNH_F_ONLINK) {
+			unsigned int addr_type;
 
 			if (cfg->fc_scope >= RT_SCOPE_LINK)
 				return -EINVAL;
-			if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
-				return -EINVAL;
 			dev = __dev_get_by_index(net, nh->nh_oif);
 			if (!dev)
 				return -ENODEV;
 			if (!(dev->flags & IFF_UP))
 				return -ENETDOWN;
+			addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
+			if (addr_type != RTN_UNICAST)
+				return -EINVAL;
 			if (!netif_carrier_ok(dev))
 				nh->nh_flags |= RTNH_F_LINKDOWN;
 			nh->nh_dev = dev;
@@ -613,6 +695,7 @@
 		}
 		rcu_read_lock();
 		{
+			struct fib_table *tbl = NULL;
 			struct flowi4 fl4 = {
 				.daddr = nh->nh_gw,
 				.flowi4_scope = cfg->fc_scope + 1,
@@ -623,8 +706,24 @@
 			/* It is not necessary, but requires a bit of thinking */
 			if (fl4.flowi4_scope < RT_SCOPE_LINK)
 				fl4.flowi4_scope = RT_SCOPE_LINK;
-			err = fib_lookup(net, &fl4, &res,
-					 FIB_LOOKUP_IGNORE_LINKSTATE);
+
+			if (cfg->fc_table)
+				tbl = fib_get_table(net, cfg->fc_table);
+
+			if (tbl)
+				err = fib_table_lookup(tbl, &fl4, &res,
+						       FIB_LOOKUP_IGNORE_LINKSTATE |
+						       FIB_LOOKUP_NOREF);
+
+			/* on error or if no table given do full lookup. This
+			 * is needed for example when nexthops are in the local
+			 * table rather than the given table
+			 */
+			if (!tbl || err) {
+				err = fib_lookup(net, &fl4, &res,
+						 FIB_LOOKUP_IGNORE_LINKSTATE);
+			}
+
 			if (err) {
 				rcu_read_unlock();
 				return err;
@@ -760,6 +859,67 @@
 	return nh->nh_saddr;
 }
 
+static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
+{
+	if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
+	    fib_prefsrc != cfg->fc_dst) {
+		u32 tb_id = cfg->fc_table;
+
+		if (tb_id == RT_TABLE_MAIN)
+			tb_id = RT_TABLE_LOCAL;
+
+		if (inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+					 fib_prefsrc, tb_id) != RTN_LOCAL) {
+			return false;
+		}
+	}
+	return true;
+}
+
+static int
+fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
+{
+	bool ecn_ca = false;
+	struct nlattr *nla;
+	int remaining;
+
+	if (!cfg->fc_mx)
+		return 0;
+
+	nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
+		int type = nla_type(nla);
+		u32 val;
+
+		if (!type)
+			continue;
+		if (type > RTAX_MAX)
+			return -EINVAL;
+
+		if (type == RTAX_CC_ALGO) {
+			char tmp[TCP_CA_NAME_MAX];
+
+			nla_strlcpy(tmp, nla, sizeof(tmp));
+			val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+			if (val == TCP_CA_UNSPEC)
+				return -EINVAL;
+		} else {
+			val = nla_get_u32(nla);
+		}
+		if (type == RTAX_ADVMSS && val > 65535 - 40)
+			val = 65535 - 40;
+		if (type == RTAX_MTU && val > 65535 - 15)
+			val = 65535 - 15;
+		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+			return -EINVAL;
+		fi->fib_metrics[type - 1] = val;
+	}
+
+	if (ecn_ca)
+		fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+
+	return 0;
+}
+
 struct fib_info *fib_create_info(struct fib_config *cfg)
 {
 	int err;
@@ -832,36 +992,9 @@
 			goto failure;
 	} endfor_nexthops(fi)
 
-	if (cfg->fc_mx) {
-		struct nlattr *nla;
-		int remaining;
-
-		nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
-			int type = nla_type(nla);
-
-			if (type) {
-				u32 val;
-
-				if (type > RTAX_MAX)
-					goto err_inval;
-				if (type == RTAX_CC_ALGO) {
-					char tmp[TCP_CA_NAME_MAX];
-
-					nla_strlcpy(tmp, nla, sizeof(tmp));
-					val = tcp_ca_get_key_by_name(tmp);
-					if (val == TCP_CA_UNSPEC)
-						goto err_inval;
-				} else {
-					val = nla_get_u32(nla);
-				}
-				if (type == RTAX_ADVMSS && val > 65535 - 40)
-					val = 65535 - 40;
-				if (type == RTAX_MTU && val > 65535 - 15)
-					val = 65535 - 15;
-				fi->fib_metrics[type - 1] = val;
-			}
-		}
-	}
+	err = fib_convert_metrics(fi, cfg);
+	if (err)
+		goto failure;
 
 	if (cfg->fc_mp) {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -882,6 +1015,22 @@
 	} else {
 		struct fib_nh *nh = fi->fib_nh;
 
+		if (cfg->fc_encap) {
+			struct lwtunnel_state *lwtstate;
+			struct net_device *dev = NULL;
+
+			if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+				goto err_inval;
+			if (cfg->fc_oif)
+				dev = __dev_get_by_index(net, cfg->fc_oif);
+			err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+						   cfg->fc_encap, AF_INET, cfg,
+						   &lwtstate);
+			if (err)
+				goto failure;
+
+			nh->nh_lwtstate = lwtstate_get(lwtstate);
+		}
 		nh->nh_oif = cfg->fc_oif;
 		nh->nh_gw = cfg->fc_gw;
 		nh->nh_flags = cfg->fc_flags;
@@ -940,12 +1089,8 @@
 			fi->fib_flags |= RTNH_F_LINKDOWN;
 	}
 
-	if (fi->fib_prefsrc) {
-		if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
-		    fi->fib_prefsrc != cfg->fc_dst)
-			if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
-				goto err_inval;
-	}
+	if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
+		goto err_inval;
 
 	change_nexthops(fi) {
 		fib_info_update_nh_saddr(net, nexthop_nh);
@@ -1055,6 +1200,8 @@
 		    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
 			goto nla_put_failure;
 #endif
+		if (fi->fib_nh->nh_lwtstate)
+			lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
 	}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	if (fi->fib_nhs > 1) {
@@ -1090,6 +1237,8 @@
 			    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
 				goto nla_put_failure;
 #endif
+			if (nh->nh_lwtstate)
+				lwtunnel_fill_encap(skb, nh->nh_lwtstate);
 			/* length of rtnetlink header + attributes */
 			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
 		} endfor_nexthops(fi);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b0c6258..26d6ffb 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -81,6 +81,7 @@
 #include <net/sock.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
+#include <trace/events/fib.h>
 #include "fib_lookup.h"
 
 #define MAX_STAT_DEPTH 32
@@ -1278,6 +1279,8 @@
 	unsigned long index;
 	t_key cindex;
 
+	trace_fib_table_lookup(tb->tb_id, flp);
+
 	pn = t->kv;
 	cindex = 0;
 
@@ -1423,8 +1426,11 @@
 			    nh->nh_flags & RTNH_F_LINKDOWN &&
 			    !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
 				continue;
-			if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
-				continue;
+			if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+				if (flp->flowi4_oif &&
+				    flp->flowi4_oif != nh->nh_oif)
+					continue;
+			}
 
 			if (!(fib_flags & FIB_LOOKUP_NOREF))
 				atomic_inc(&fi->fib_clntref);
@@ -1439,6 +1445,8 @@
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 			this_cpu_inc(stats->semantic_match_passed);
 #endif
+			trace_fib_table_lookup_nh(nh);
+
 			return err;
 		}
 	}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 34968cd..e0fcbbb 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -79,7 +79,11 @@
 	__be16 *pd = data;
 	size_t start = ntohs(pd[0]);
 	size_t offset = ntohs(pd[1]);
-	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+	size_t plen = sizeof(struct udphdr) + hdrlen +
+	    max_t(size_t, offset + sizeof(u16), start);
+
+	if (skb->remcsum_offload)
+		return guehdr;
 
 	if (!pskb_may_pull(skb, plen))
 		return NULL;
@@ -221,29 +225,21 @@
 
 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
 				      struct guehdr *guehdr, void *data,
-				      size_t hdrlen, u8 ipproto,
-				      struct gro_remcsum *grc, bool nopartial)
+				      size_t hdrlen, struct gro_remcsum *grc,
+				      bool nopartial)
 {
 	__be16 *pd = data;
 	size_t start = ntohs(pd[0]);
 	size_t offset = ntohs(pd[1]);
-	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
 	if (skb->remcsum_offload)
-		return NULL;
+		return guehdr;
 
 	if (!NAPI_GRO_CB(skb)->csum_valid)
 		return NULL;
 
-	/* Pull checksum that will be written */
-	if (skb_gro_header_hard(skb, off + plen)) {
-		guehdr = skb_gro_header_slow(skb, off + plen, off);
-		if (!guehdr)
-			return NULL;
-	}
-
-	skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
-				start, offset, grc, nopartial);
+	guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
+					 start, offset, grc, nopartial);
 
 	skb->remcsum_offload = 1;
 
@@ -307,10 +303,10 @@
 
 		if (flags & GUE_PFLAG_REMCSUM) {
 			guehdr = gue_gro_remcsum(skb, off, guehdr,
-						 data + doffset, hdrlen,
-						 guehdr->proto_ctype, &grc,
+						 data + doffset, hdrlen, &grc,
 						 !!(fou->flags &
 						    FOU_F_REMCSUM_NOPARTIAL));
+
 			if (!guehdr)
 				goto out;
 
@@ -351,7 +347,7 @@
 	rcu_read_lock();
 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
 	ops = rcu_dereference(offloads[guehdr->proto_ctype]);
-	if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+	if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
 		goto out_unlock;
 
 	pp = ops->callbacks.gro_receive(head, skb);
@@ -570,7 +566,7 @@
 	if (info->attrs[FOU_ATTR_AF]) {
 		u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
 
-		if (family != AF_INET && family != AF_INET6)
+		if (family != AF_INET)
 			return -EINVAL;
 
 		cfg->udp_config.family = family;
diff --git a/net/ipv4/geneve_core.c b/net/ipv4/geneve_core.c
deleted file mode 100644
index 311a4ba..0000000
--- a/net/ipv4/geneve_core.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Geneve: Generic Network Virtualization Encapsulation
- *
- * Copyright (c) 2014 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/igmp.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
-#include <linux/mutex.h>
-#include <net/arp.h>
-#include <net/ndisc.h>
-#include <net/ip.h>
-#include <net/ip_tunnels.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/geneve.h>
-#include <net/protocol.h>
-#include <net/udp_tunnel.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/addrconf.h>
-#include <net/ip6_tunnel.h>
-#include <net/ip6_checksum.h>
-#endif
-
-/* Protects sock_list and refcounts. */
-static DEFINE_MUTEX(geneve_mutex);
-
-/* per-network namespace private data for this module */
-struct geneve_net {
-	struct list_head	sock_list;
-};
-
-static int geneve_net_id;
-
-static struct geneve_sock *geneve_find_sock(struct net *net,
-					    sa_family_t family, __be16 port)
-{
-	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	struct geneve_sock *gs;
-
-	list_for_each_entry(gs, &gn->sock_list, list) {
-		if (inet_sk(gs->sock->sk)->inet_sport == port &&
-		    inet_sk(gs->sock->sk)->sk.sk_family == family)
-			return gs;
-	}
-
-	return NULL;
-}
-
-static void geneve_build_header(struct genevehdr *geneveh,
-				__be16 tun_flags, u8 vni[3],
-				u8 options_len, u8 *options)
-{
-	geneveh->ver = GENEVE_VER;
-	geneveh->opt_len = options_len / 4;
-	geneveh->oam = !!(tun_flags & TUNNEL_OAM);
-	geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
-	geneveh->rsvd1 = 0;
-	memcpy(geneveh->vni, vni, 3);
-	geneveh->proto_type = htons(ETH_P_TEB);
-	geneveh->rsvd2 = 0;
-
-	memcpy(geneveh->options, options, options_len);
-}
-
-/* Transmit a fully formatted Geneve frame.
- *
- * When calling this function. The skb->data should point
- * to the geneve header which is fully formed.
- *
- * This function will add other UDP tunnel headers.
- */
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
-		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
-		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
-		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-		    bool csum, bool xnet)
-{
-	struct genevehdr *gnvh;
-	int min_headroom;
-	int err;
-
-	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
-			+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
-	err = skb_cow_head(skb, min_headroom);
-	if (unlikely(err)) {
-		kfree_skb(skb);
-		return err;
-	}
-
-	skb = vlan_hwaccel_push_inside(skb);
-	if (unlikely(!skb))
-		return -ENOMEM;
-
-	skb = udp_tunnel_handle_offloads(skb, csum);
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
-
-	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
-	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
-	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-	return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
-				   tos, ttl, df, src_port, dst_port, xnet,
-				   !csum);
-}
-EXPORT_SYMBOL_GPL(geneve_xmit_skb);
-
-static int geneve_hlen(struct genevehdr *gh)
-{
-	return sizeof(*gh) + gh->opt_len * 4;
-}
-
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
-					   struct sk_buff *skb,
-					   struct udp_offload *uoff)
-{
-	struct sk_buff *p, **pp = NULL;
-	struct genevehdr *gh, *gh2;
-	unsigned int hlen, gh_len, off_gnv;
-	const struct packet_offload *ptype;
-	__be16 type;
-	int flush = 1;
-
-	off_gnv = skb_gro_offset(skb);
-	hlen = off_gnv + sizeof(*gh);
-	gh = skb_gro_header_fast(skb, off_gnv);
-	if (skb_gro_header_hard(skb, hlen)) {
-		gh = skb_gro_header_slow(skb, hlen, off_gnv);
-		if (unlikely(!gh))
-			goto out;
-	}
-
-	if (gh->ver != GENEVE_VER || gh->oam)
-		goto out;
-	gh_len = geneve_hlen(gh);
-
-	hlen = off_gnv + gh_len;
-	if (skb_gro_header_hard(skb, hlen)) {
-		gh = skb_gro_header_slow(skb, hlen, off_gnv);
-		if (unlikely(!gh))
-			goto out;
-	}
-
-	flush = 0;
-
-	for (p = *head; p; p = p->next) {
-		if (!NAPI_GRO_CB(p)->same_flow)
-			continue;
-
-		gh2 = (struct genevehdr *)(p->data + off_gnv);
-		if (gh->opt_len != gh2->opt_len ||
-		    memcmp(gh, gh2, gh_len)) {
-			NAPI_GRO_CB(p)->same_flow = 0;
-			continue;
-		}
-	}
-
-	type = gh->proto_type;
-
-	rcu_read_lock();
-	ptype = gro_find_receive_by_type(type);
-	if (!ptype) {
-		flush = 1;
-		goto out_unlock;
-	}
-
-	skb_gro_pull(skb, gh_len);
-	skb_gro_postpull_rcsum(skb, gh, gh_len);
-	pp = ptype->callbacks.gro_receive(head, skb);
-
-out_unlock:
-	rcu_read_unlock();
-out:
-	NAPI_GRO_CB(skb)->flush |= flush;
-
-	return pp;
-}
-
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
-			       struct udp_offload *uoff)
-{
-	struct genevehdr *gh;
-	struct packet_offload *ptype;
-	__be16 type;
-	int gh_len;
-	int err = -ENOSYS;
-
-	udp_tunnel_gro_complete(skb, nhoff);
-
-	gh = (struct genevehdr *)(skb->data + nhoff);
-	gh_len = geneve_hlen(gh);
-	type = gh->proto_type;
-
-	rcu_read_lock();
-	ptype = gro_find_complete_by_type(type);
-	if (ptype)
-		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
-
-	rcu_read_unlock();
-	return err;
-}
-
-static void geneve_notify_add_rx_port(struct geneve_sock *gs)
-{
-	struct sock *sk = gs->sock->sk;
-	sa_family_t sa_family = sk->sk_family;
-	int err;
-
-	if (sa_family == AF_INET) {
-		err = udp_add_offload(&gs->udp_offloads);
-		if (err)
-			pr_warn("geneve: udp_add_offload failed with status %d\n",
-				err);
-	}
-}
-
-static void geneve_notify_del_rx_port(struct geneve_sock *gs)
-{
-	struct sock *sk = gs->sock->sk;
-	sa_family_t sa_family = sk->sk_family;
-
-	if (sa_family == AF_INET)
-		udp_del_offload(&gs->udp_offloads);
-}
-
-/* Callback from net/ipv4/udp.c to receive packets */
-static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
-	struct genevehdr *geneveh;
-	struct geneve_sock *gs;
-	int opts_len;
-
-	/* Need Geneve and inner Ethernet header to be present */
-	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
-		goto error;
-
-	/* Return packets with reserved bits set */
-	geneveh = geneve_hdr(skb);
-
-	if (unlikely(geneveh->ver != GENEVE_VER))
-		goto error;
-
-	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
-		goto error;
-
-	opts_len = geneveh->opt_len * 4;
-	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
-				 htons(ETH_P_TEB)))
-		goto drop;
-
-	gs = rcu_dereference_sk_user_data(sk);
-	if (!gs)
-		goto drop;
-
-	gs->rcv(gs, skb);
-	return 0;
-
-drop:
-	/* Consume bad packet */
-	kfree_skb(skb);
-	return 0;
-
-error:
-	/* Let the UDP layer deal with the skb */
-	return 1;
-}
-
-static struct socket *geneve_create_sock(struct net *net, bool ipv6,
-					 __be16 port)
-{
-	struct socket *sock;
-	struct udp_port_cfg udp_conf;
-	int err;
-
-	memset(&udp_conf, 0, sizeof(udp_conf));
-
-	if (ipv6) {
-		udp_conf.family = AF_INET6;
-	} else {
-		udp_conf.family = AF_INET;
-		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
-	}
-
-	udp_conf.local_udp_port = port;
-
-	/* Open UDP socket */
-	err = udp_sock_create(net, &udp_conf, &sock);
-	if (err < 0)
-		return ERR_PTR(err);
-
-	return sock;
-}
-
-/* Create new listen socket if needed */
-static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
-						geneve_rcv_t *rcv, void *data,
-						bool ipv6)
-{
-	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	struct geneve_sock *gs;
-	struct socket *sock;
-	struct udp_tunnel_sock_cfg tunnel_cfg;
-
-	gs = kzalloc(sizeof(*gs), GFP_KERNEL);
-	if (!gs)
-		return ERR_PTR(-ENOMEM);
-
-	sock = geneve_create_sock(net, ipv6, port);
-	if (IS_ERR(sock)) {
-		kfree(gs);
-		return ERR_CAST(sock);
-	}
-
-	gs->sock = sock;
-	gs->refcnt = 1;
-	gs->rcv = rcv;
-	gs->rcv_data = data;
-
-	/* Initialize the geneve udp offloads structure */
-	gs->udp_offloads.port = port;
-	gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
-	gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
-	geneve_notify_add_rx_port(gs);
-
-	/* Mark socket as an encapsulation socket */
-	tunnel_cfg.sk_user_data = gs;
-	tunnel_cfg.encap_type = 1;
-	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
-	tunnel_cfg.encap_destroy = NULL;
-	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
-
-	list_add(&gs->list, &gn->sock_list);
-
-	return gs;
-}
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
-				    geneve_rcv_t *rcv, void *data,
-				    bool no_share, bool ipv6)
-{
-	struct geneve_sock *gs;
-
-	mutex_lock(&geneve_mutex);
-
-	gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
-	if (gs) {
-		if (!no_share && gs->rcv == rcv)
-			gs->refcnt++;
-		else
-			gs = ERR_PTR(-EBUSY);
-	} else {
-		gs = geneve_socket_create(net, port, rcv, data, ipv6);
-	}
-
-	mutex_unlock(&geneve_mutex);
-
-	return gs;
-}
-EXPORT_SYMBOL_GPL(geneve_sock_add);
-
-void geneve_sock_release(struct geneve_sock *gs)
-{
-	mutex_lock(&geneve_mutex);
-
-	if (--gs->refcnt)
-		goto unlock;
-
-	list_del(&gs->list);
-	geneve_notify_del_rx_port(gs);
-	udp_tunnel_sock_release(gs->sock);
-	kfree_rcu(gs, rcu);
-
-unlock:
-	mutex_unlock(&geneve_mutex);
-}
-EXPORT_SYMBOL_GPL(geneve_sock_release);
-
-static __net_init int geneve_init_net(struct net *net)
-{
-	struct geneve_net *gn = net_generic(net, geneve_net_id);
-
-	INIT_LIST_HEAD(&gn->sock_list);
-
-	return 0;
-}
-
-static struct pernet_operations geneve_net_ops = {
-	.init = geneve_init_net,
-	.id   = &geneve_net_id,
-	.size = sizeof(struct geneve_net),
-};
-
-static int __init geneve_init_module(void)
-{
-	int rc;
-
-	rc = register_pernet_subsys(&geneve_net_ops);
-	if (rc)
-		return rc;
-
-	pr_info("Geneve core logic\n");
-
-	return 0;
-}
-module_init(geneve_init_module);
-
-static void __exit geneve_cleanup_module(void)
-{
-	unregister_pernet_subsys(&geneve_net_ops);
-}
-module_exit(geneve_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
-MODULE_DESCRIPTION("Driver library for GENEVE encapsulated traffic");
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4a7b5b2..d9c552a 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -31,7 +31,6 @@
 #include <net/xfrm.h>
 
 static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
-static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
 {
@@ -61,197 +60,6 @@
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
-		      int hdr_len)
-{
-	struct gre_base_hdr *greh;
-
-	skb_push(skb, hdr_len);
-
-	skb_reset_transport_header(skb);
-	greh = (struct gre_base_hdr *)skb->data;
-	greh->flags = tnl_flags_to_gre_flags(tpi->flags);
-	greh->protocol = tpi->proto;
-
-	if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
-		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
-		if (tpi->flags&TUNNEL_SEQ) {
-			*ptr = tpi->seq;
-			ptr--;
-		}
-		if (tpi->flags&TUNNEL_KEY) {
-			*ptr = tpi->key;
-			ptr--;
-		}
-		if (tpi->flags&TUNNEL_CSUM &&
-		    !(skb_shinfo(skb)->gso_type &
-		      (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
-			*ptr = 0;
-			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
-								 skb->len, 0));
-		}
-	}
-}
-EXPORT_SYMBOL_GPL(gre_build_header);
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-			    bool *csum_err)
-{
-	const struct gre_base_hdr *greh;
-	__be32 *options;
-	int hdr_len;
-
-	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
-		return -EINVAL;
-
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
-	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-		return -EINVAL;
-
-	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
-	hdr_len = ip_gre_calc_hlen(tpi->flags);
-
-	if (!pskb_may_pull(skb, hdr_len))
-		return -EINVAL;
-
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
-	tpi->proto = greh->protocol;
-
-	options = (__be32 *)(greh + 1);
-	if (greh->flags & GRE_CSUM) {
-		if (skb_checksum_simple_validate(skb)) {
-			*csum_err = true;
-			return -EINVAL;
-		}
-
-		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
-					 null_compute_pseudo);
-
-		options++;
-	}
-
-	if (greh->flags & GRE_KEY) {
-		tpi->key = *options;
-		options++;
-	} else
-		tpi->key = 0;
-
-	if (unlikely(greh->flags & GRE_SEQ)) {
-		tpi->seq = *options;
-		options++;
-	} else
-		tpi->seq = 0;
-
-	/* WCCP version 1 and 2 protocol decoding.
-	 * - Change protocol to IP
-	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-	 */
-	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
-		tpi->proto = htons(ETH_P_IP);
-		if ((*(u8 *)options & 0xF0) != 0x40) {
-			hdr_len += 4;
-			if (!pskb_may_pull(skb, hdr_len))
-				return -EINVAL;
-		}
-	}
-
-	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
-}
-
-static int gre_cisco_rcv(struct sk_buff *skb)
-{
-	struct tnl_ptk_info tpi;
-	int i;
-	bool csum_err = false;
-
-#ifdef CONFIG_NET_IPGRE_BROADCAST
-	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
-		/* Looped back packet, drop it! */
-		if (rt_is_output_route(skb_rtable(skb)))
-			goto drop;
-	}
-#endif
-
-	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
-		goto drop;
-
-	rcu_read_lock();
-	for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
-		struct gre_cisco_protocol *proto;
-		int ret;
-
-		proto = rcu_dereference(gre_cisco_proto_list[i]);
-		if (!proto)
-			continue;
-		ret = proto->handler(skb, &tpi);
-		if (ret == PACKET_RCVD) {
-			rcu_read_unlock();
-			return 0;
-		}
-	}
-	rcu_read_unlock();
-
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-drop:
-	kfree_skb(skb);
-	return 0;
-}
-
-static void gre_cisco_err(struct sk_buff *skb, u32 info)
-{
-	/* All the routers (except for Linux) return only
-	 * 8 bytes of packet payload. It means, that precise relaying of
-	 * ICMP in the real Internet is absolutely infeasible.
-	 *
-	 * Moreover, Cisco "wise men" put GRE key to the third word
-	 * in GRE header. It makes impossible maintaining even soft
-	 * state for keyed
-	 * GRE tunnels with enabled checksum. Tell them "thank you".
-	 *
-	 * Well, I wonder, rfc1812 was written by Cisco employee,
-	 * what the hell these idiots break standards established
-	 * by themselves???
-	 */
-
-	const int type = icmp_hdr(skb)->type;
-	const int code = icmp_hdr(skb)->code;
-	struct tnl_ptk_info tpi;
-	bool csum_err = false;
-	int i;
-
-	if (parse_gre_header(skb, &tpi, &csum_err)) {
-		if (!csum_err)		/* ignore csum errors. */
-			return;
-	}
-
-	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-				skb->dev->ifindex, 0, IPPROTO_GRE, 0);
-		return;
-	}
-	if (type == ICMP_REDIRECT) {
-		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
-				IPPROTO_GRE, 0);
-		return;
-	}
-
-	rcu_read_lock();
-	for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
-		struct gre_cisco_protocol *proto;
-
-		proto = rcu_dereference(gre_cisco_proto_list[i]);
-		if (!proto)
-			continue;
-
-		if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
-			goto out;
-
-	}
-out:
-	rcu_read_unlock();
-}
-
 static int gre_rcv(struct sk_buff *skb)
 {
 	const struct gre_protocol *proto;
@@ -302,60 +110,19 @@
 	.netns_ok    = 1,
 };
 
-static const struct gre_protocol ipgre_protocol = {
-	.handler     = gre_cisco_rcv,
-	.err_handler = gre_cisco_err,
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *newp)
-{
-	struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
-					    &gre_cisco_proto_list[newp->priority];
-
-	return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_register);
-
-int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
-{
-	struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
-					    &gre_cisco_proto_list[del_proto->priority];
-	int ret;
-
-	ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
-
-	if (ret)
-		return ret;
-
-	synchronize_net();
-	return 0;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_unregister);
-
 static int __init gre_init(void)
 {
 	pr_info("GRE over IPv4 demultiplexor driver\n");
 
 	if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
 		pr_err("can't add protocol\n");
-		goto err;
+		return -EAGAIN;
 	}
-
-	if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
-		pr_info("%s: can't add ipgre handler\n", __func__);
-		goto err_gre;
-	}
-
 	return 0;
-err_gre:
-	inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
-err:
-	return -EAGAIN;
 }
 
 static void __exit gre_exit(void)
 {
-	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 	inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
 }
 
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f5203fb..79fe05b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -96,6 +96,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/ip_fib.h>
+#include <net/vrf.h>
 
 /*
  *	Build xmit assembly blocks
@@ -308,9 +309,10 @@
 
 	rc = false;
 	if (icmp_global_allow()) {
+		int vif = vrf_master_ifindex(dst->dev);
 		struct inet_peer *peer;
 
-		peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
+		peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
 		rc = inet_peer_xrlim_allow(peer,
 					   net->ipv4.sysctl_icmp_ratelimit);
 		if (peer)
@@ -425,6 +427,7 @@
 	fl4.flowi4_mark = mark;
 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 	fl4.flowi4_proto = IPPROTO_ICMP;
+	fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex;
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
@@ -458,6 +461,8 @@
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
 	fl4->fl4_icmp_code = code;
+	fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex;
+
 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
 	rt = __ip_route_output_key(net, fl4);
 	if (IS_ERR(rt))
@@ -480,7 +485,8 @@
 	if (err)
 		goto relookup_failed;
 
-	if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
+	if (inet_addr_type_dev_table(net, skb_in->dev,
+				     fl4_dec.saddr) == RTN_LOCAL) {
 		rt2 = __ip_route_output_key(net, &fl4_dec);
 		if (IS_ERR(rt2))
 			err = PTR_ERR(rt2);
@@ -496,6 +502,7 @@
 		}
 		/* Ugh! */
 		orefdst = skb_in->_skb_refdst; /* save old refdst */
+		skb_dst_set(skb_in, NULL);
 		err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
 				     RT_TOS(tos), rt2->dst.dev);
 
@@ -828,7 +835,7 @@
 	 */
 
 	if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
-	    inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
+	    inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
 		net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
 				     &ip_hdr(skb)->saddr,
 				     icmph->type, icmph->code,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9fdfd9d..d38b8b6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -110,6 +110,9 @@
 #define IP_MAX_MEMBERSHIPS	20
 #define IP_MAX_MSF		10
 
+/* IGMP reports for link-local multicast groups are enabled by default */
+int sysctl_igmp_llm_reports __read_mostly = 1;
+
 #ifdef CONFIG_IP_MULTICAST
 /* Parameter names and values are taken from igmp-v2-06 draft */
 
@@ -437,6 +440,8 @@
 
 	if (pmc->multiaddr == IGMP_ALL_HOSTS)
 		return skb;
+	if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports)
+		return skb;
 
 	isquery = type == IGMPV3_MODE_IS_INCLUDE ||
 		  type == IGMPV3_MODE_IS_EXCLUDE;
@@ -545,6 +550,9 @@
 		for_each_pmc_rcu(in_dev, pmc) {
 			if (pmc->multiaddr == IGMP_ALL_HOSTS)
 				continue;
+			if (ipv4_is_local_multicast(pmc->multiaddr) &&
+			     !sysctl_igmp_llm_reports)
+				continue;
 			spin_lock_bh(&pmc->lock);
 			if (pmc->sfcount[MCAST_EXCLUDE])
 				type = IGMPV3_MODE_IS_EXCLUDE;
@@ -678,7 +686,11 @@
 
 	if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
 		return igmpv3_send_report(in_dev, pmc);
-	else if (type == IGMP_HOST_LEAVE_MESSAGE)
+
+	if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
+		return 0;
+
+	if (type == IGMP_HOST_LEAVE_MESSAGE)
 		dst = IGMP_ALL_ROUTER;
 	else
 		dst = group;
@@ -851,6 +863,8 @@
 
 	if (group == IGMP_ALL_HOSTS)
 		return false;
+	if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
+		return false;
 
 	rcu_read_lock();
 	for_each_pmc_rcu(in_dev, im) {
@@ -957,6 +971,9 @@
 			continue;
 		if (im->multiaddr == IGMP_ALL_HOSTS)
 			continue;
+		if (ipv4_is_local_multicast(im->multiaddr) &&
+		    !sysctl_igmp_llm_reports)
+			continue;
 		spin_lock_bh(&im->lock);
 		if (im->tm_running)
 			im->gsquery = im->gsquery && mark;
@@ -1181,6 +1198,8 @@
 #ifdef CONFIG_IP_MULTICAST
 	if (im->multiaddr == IGMP_ALL_HOSTS)
 		return;
+	if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
+		return;
 
 	reporter = im->reporter;
 	igmp_stop_timer(im);
@@ -1213,6 +1232,8 @@
 #ifdef CONFIG_IP_MULTICAST
 	if (im->multiaddr == IGMP_ALL_HOSTS)
 		return;
+	if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
+		return;
 
 	if (in_dev->dead)
 		return;
@@ -1518,6 +1539,9 @@
 	for_each_pmc_rtnl(in_dev, im) {
 		if (im->multiaddr == IGMP_ALL_HOSTS)
 			continue;
+		if (ipv4_is_local_multicast(im->multiaddr) &&
+		    !sysctl_igmp_llm_reports)
+			continue;
 
 		/* a failover is happening and switches
 		 * must be notified immediately
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0cb9165..8912019 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -343,7 +343,6 @@
 	struct sock *sk2;
 	const struct hlist_nulls_node *node;
 	struct inet_timewait_sock *tw = NULL;
-	int twrefcnt = 0;
 
 	spin_lock(lock);
 
@@ -371,21 +370,17 @@
 	WARN_ON(!sk_unhashed(sk));
 	__sk_nulls_add_node_rcu(sk, &head->chain);
 	if (tw) {
-		twrefcnt = inet_twsk_unhash(tw);
+		sk_nulls_del_node_init_rcu((struct sock *)tw);
 		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
 	}
 	spin_unlock(lock);
-	if (twrefcnt)
-		inet_twsk_put(tw);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
 	if (twp) {
 		*twp = tw;
 	} else if (tw) {
 		/* Silly. Should hash-dance instead... */
-		inet_twsk_deschedule(tw);
-
-		inet_twsk_put(tw);
+		inet_twsk_deschedule_put(tw);
 	}
 	return 0;
 
@@ -403,13 +398,12 @@
 					  inet->inet_dport);
 }
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct hlist_nulls_head *list;
 	struct inet_ehash_bucket *head;
 	spinlock_t *lock;
-	int twrefcnt = 0;
 
 	WARN_ON(!sk_unhashed(sk));
 
@@ -420,23 +414,22 @@
 
 	spin_lock(lock);
 	__sk_nulls_add_node_rcu(sk, list);
-	if (tw) {
-		WARN_ON(sk->sk_hash != tw->tw_hash);
-		twrefcnt = inet_twsk_unhash(tw);
+	if (osk) {
+		WARN_ON(sk->sk_hash != osk->sk_hash);
+		sk_nulls_del_node_init_rcu(osk);
 	}
 	spin_unlock(lock);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-	return twrefcnt;
 }
 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
 
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct inet_listen_hashbucket *ilb;
 
 	if (sk->sk_state != TCP_LISTEN)
-		return __inet_hash_nolisten(sk, tw);
+		return __inet_hash_nolisten(sk, osk);
 
 	WARN_ON(!sk_unhashed(sk));
 	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -445,7 +438,6 @@
 	__sk_nulls_add_node_rcu(sk, &ilb->head);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 	spin_unlock(&ilb->lock);
-	return 0;
 }
 EXPORT_SYMBOL(__inet_hash);
 
@@ -492,7 +484,6 @@
 	struct inet_bind_bucket *tb;
 	int ret;
 	struct net *net = sock_net(sk);
-	int twrefcnt = 1;
 
 	if (!snum) {
 		int i, remaining, low, high, port;
@@ -560,19 +551,14 @@
 		inet_bind_hash(sk, tb, port);
 		if (sk_unhashed(sk)) {
 			inet_sk(sk)->inet_sport = htons(port);
-			twrefcnt += __inet_hash_nolisten(sk, tw);
+			__inet_hash_nolisten(sk, (struct sock *)tw);
 		}
 		if (tw)
-			twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+			inet_twsk_bind_unhash(tw, hinfo);
 		spin_unlock(&head->lock);
 
-		if (tw) {
-			inet_twsk_deschedule(tw);
-			while (twrefcnt) {
-				twrefcnt--;
-				inet_twsk_put(tw);
-			}
-		}
+		if (tw)
+			inet_twsk_deschedule_put(tw);
 
 		ret = 0;
 		goto out;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2ffbd16..ae22cc2 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -18,28 +18,6 @@
 
 
 /**
- *	inet_twsk_unhash - unhash a timewait socket from established hash
- *	@tw: timewait socket
- *
- *	unhash a timewait socket from established hash, if hashed.
- *	ehash lock must be held by caller.
- *	Returns 1 if caller should call inet_twsk_put() after lock release.
- */
-int inet_twsk_unhash(struct inet_timewait_sock *tw)
-{
-	if (hlist_nulls_unhashed(&tw->tw_node))
-		return 0;
-
-	hlist_nulls_del_rcu(&tw->tw_node);
-	sk_nulls_node_init(&tw->tw_node);
-	/*
-	 * We cannot call inet_twsk_put() ourself under lock,
-	 * caller must call it for us.
-	 */
-	return 1;
-}
-
-/**
  *	inet_twsk_bind_unhash - unhash a timewait socket from bind hash
  *	@tw: timewait socket
  *	@hashinfo: hashinfo pointer
@@ -48,35 +26,29 @@
  *	bind hash lock must be held by caller.
  *	Returns 1 if caller should call inet_twsk_put() after lock release.
  */
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
 			  struct inet_hashinfo *hashinfo)
 {
 	struct inet_bind_bucket *tb = tw->tw_tb;
 
 	if (!tb)
-		return 0;
+		return;
 
 	__hlist_del(&tw->tw_bind_node);
 	tw->tw_tb = NULL;
 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
-	/*
-	 * We cannot call inet_twsk_put() ourself under lock,
-	 * caller must call it for us.
-	 */
-	return 1;
+	__sock_put((struct sock *)tw);
 }
 
 /* Must be called with locally disabled BHs. */
 static void inet_twsk_kill(struct inet_timewait_sock *tw)
 {
 	struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
-	struct inet_bind_hashbucket *bhead;
-	int refcnt;
-	/* Unlink from established hashes. */
 	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+	struct inet_bind_hashbucket *bhead;
 
 	spin_lock(lock);
-	refcnt = inet_twsk_unhash(tw);
+	sk_nulls_del_node_init_rcu((struct sock *)tw);
 	spin_unlock(lock);
 
 	/* Disassociate with bind bucket. */
@@ -84,11 +56,9 @@
 			hashinfo->bhash_size)];
 
 	spin_lock(&bhead->lock);
-	refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+	inet_twsk_bind_unhash(tw, hashinfo);
 	spin_unlock(&bhead->lock);
 
-	BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
-	atomic_sub(refcnt, &tw->tw_refcnt);
 	atomic_dec(&tw->tw_dr->tw_count);
 	inet_twsk_put(tw);
 }
@@ -235,13 +205,17 @@
  * tcp_input.c to verify this.
  */
 
-/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+/* This is for handling early-kills of TIME_WAIT sockets.
+ * Warning : consume reference.
+ * Caller should not access tw anymore.
+ */
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
 {
 	if (del_timer_sync(&tw->tw_timer))
 		inet_twsk_kill(tw);
+	inet_twsk_put(tw);
 }
-EXPORT_SYMBOL(inet_twsk_deschedule);
+EXPORT_SYMBOL(inet_twsk_deschedule_put);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
 {
@@ -311,9 +285,8 @@
 
 			rcu_read_unlock();
 			local_bh_disable();
-			inet_twsk_deschedule(tw);
+			inet_twsk_deschedule_put(tw);
 			local_bh_enable();
-			inet_twsk_put(tw);
 			goto restart_rcu;
 		}
 		/* If the nulls value we got at the end of this lookup is
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 241afd7..86fa458 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -157,22 +157,6 @@
 	INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
 }
 
-static int addr_compare(const struct inetpeer_addr *a,
-			const struct inetpeer_addr *b)
-{
-	int i, n = (a->family == AF_INET ? 1 : 4);
-
-	for (i = 0; i < n; i++) {
-		if (a->addr.a6[i] == b->addr.a6[i])
-			continue;
-		if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
-			return -1;
-		return 1;
-	}
-
-	return 0;
-}
-
 #define rcu_deref_locked(X, BASE)				\
 	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
 
@@ -188,7 +172,7 @@
 	*stackptr++ = &_base->root;				\
 	for (u = rcu_deref_locked(_base->root, _base);		\
 	     u != peer_avl_empty;) {				\
-		int cmp = addr_compare(_daddr, &u->daddr);	\
+		int cmp = inetpeer_addr_cmp(_daddr, &u->daddr);	\
 		if (cmp == 0)					\
 			break;					\
 		if (cmp == -1)					\
@@ -215,7 +199,7 @@
 	int count = 0;
 
 	while (u != peer_avl_empty) {
-		int cmp = addr_compare(daddr, &u->daddr);
+		int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
 		if (cmp == 0) {
 			/* Before taking a reference, check if this entry was
 			 * deleted (refcnt=-1)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 921138f..fa7f153 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -48,6 +48,7 @@
 #include <linux/inet.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/inet_ecn.h>
+#include <net/vrf.h>
 
 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -77,6 +78,7 @@
 	u8		ecn; /* RFC3168 support */
 	u16		max_df_size; /* largest frag with DF set seen */
 	int             iif;
+	int             vif;   /* VRF device index */
 	unsigned int    rid;
 	struct inet_peer *peer;
 };
@@ -99,6 +101,7 @@
 struct ip4_create_arg {
 	struct iphdr *iph;
 	u32 user;
+	int vif;
 };
 
 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
@@ -127,7 +130,8 @@
 		qp->saddr == arg->iph->saddr &&
 		qp->daddr == arg->iph->daddr &&
 		qp->protocol == arg->iph->protocol &&
-		qp->user == arg->user;
+		qp->user == arg->user &&
+		qp->vif == arg->vif;
 }
 
 static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
@@ -144,9 +148,11 @@
 	qp->ecn = ip4_frag_ecn(arg->iph->tos);
 	qp->saddr = arg->iph->saddr;
 	qp->daddr = arg->iph->daddr;
+	qp->vif = arg->vif;
 	qp->user = arg->user;
 	qp->peer = sysctl_ipfrag_max_dist ?
-		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
+		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+		NULL;
 }
 
 static void ip4_frag_free(struct inet_frag_queue *q)
@@ -244,7 +250,8 @@
 /* Find the correct entry in the "incomplete datagrams" queue for
  * this IP datagram, and create new one, if nothing is found.
  */
-static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+static struct ipq *ip_find(struct net *net, struct iphdr *iph,
+			   u32 user, int vif)
 {
 	struct inet_frag_queue *q;
 	struct ip4_create_arg arg;
@@ -252,6 +259,7 @@
 
 	arg.iph = iph;
 	arg.user = user;
+	arg.vif = vif;
 
 	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
@@ -522,7 +530,6 @@
 	int len;
 	int ihlen;
 	int err;
-	int sum_truesize;
 	u8 ecn;
 
 	ipq_kill(qp);
@@ -590,32 +597,19 @@
 		add_frag_mem_limit(qp->q.net, clone->truesize);
 	}
 
+	skb_shinfo(head)->frag_list = head->next;
 	skb_push(head, head->data - skb_network_header(head));
 
-	sum_truesize = head->truesize;
-	for (fp = head->next; fp;) {
-		bool headstolen;
-		int delta;
-		struct sk_buff *next = fp->next;
-
-		sum_truesize += fp->truesize;
+	for (fp=head->next; fp; fp = fp->next) {
+		head->data_len += fp->len;
+		head->len += fp->len;
 		if (head->ip_summed != fp->ip_summed)
 			head->ip_summed = CHECKSUM_NONE;
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
-
-		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-			kfree_skb_partial(fp, headstolen);
-		} else {
-			if (!skb_shinfo(head)->frag_list)
-				skb_shinfo(head)->frag_list = fp;
-			head->data_len += fp->len;
-			head->len += fp->len;
-			head->truesize += fp->truesize;
-		}
-		fp = next;
+		head->truesize += fp->truesize;
 	}
-	sub_frag_mem_limit(qp->q.net, sum_truesize);
+	sub_frag_mem_limit(qp->q.net, head->truesize);
 
 	head->next = NULL;
 	head->dev = dev;
@@ -662,14 +656,15 @@
 /* Process an incoming IP datagram fragment. */
 int ip_defrag(struct sk_buff *skb, u32 user)
 {
+	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
+	int vif = vrf_master_ifindex_rcu(dev);
+	struct net *net = dev_net(dev);
 	struct ipq *qp;
-	struct net *net;
 
-	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
 	/* Lookup (or create) queue header */
-	qp = ip_find(net, ip_hdr(skb), user);
+	qp = ip_find(net, ip_hdr(skb), user, vif);
 	if (qp) {
 		int ret;
 
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5fd7064..bd0679d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -25,6 +25,7 @@
 #include <linux/udp.h>
 #include <linux/if_arp.h>
 #include <linux/mroute.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/in6.h>
 #include <linux/inetdevice.h>
@@ -47,6 +48,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/gre.h>
+#include <net/dst_metadata.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -121,8 +123,127 @@
 static int ipgre_net_id __read_mostly;
 static int gre_tap_net_id __read_mostly;
 
-static int ipgre_err(struct sk_buff *skb, u32 info,
-		     const struct tnl_ptk_info *tpi)
+static int ip_gre_calc_hlen(__be16 o_flags)
+{
+	int addend = 4;
+
+	if (o_flags & TUNNEL_CSUM)
+		addend += 4;
+	if (o_flags & TUNNEL_KEY)
+		addend += 4;
+	if (o_flags & TUNNEL_SEQ)
+		addend += 4;
+	return addend;
+}
+
+static __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+	__be16 tflags = 0;
+
+	if (flags & GRE_CSUM)
+		tflags |= TUNNEL_CSUM;
+	if (flags & GRE_ROUTING)
+		tflags |= TUNNEL_ROUTING;
+	if (flags & GRE_KEY)
+		tflags |= TUNNEL_KEY;
+	if (flags & GRE_SEQ)
+		tflags |= TUNNEL_SEQ;
+	if (flags & GRE_STRICT)
+		tflags |= TUNNEL_STRICT;
+	if (flags & GRE_REC)
+		tflags |= TUNNEL_REC;
+	if (flags & GRE_VERSION)
+		tflags |= TUNNEL_VERSION;
+
+	return tflags;
+}
+
+static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+	__be16 flags = 0;
+
+	if (tflags & TUNNEL_CSUM)
+		flags |= GRE_CSUM;
+	if (tflags & TUNNEL_ROUTING)
+		flags |= GRE_ROUTING;
+	if (tflags & TUNNEL_KEY)
+		flags |= GRE_KEY;
+	if (tflags & TUNNEL_SEQ)
+		flags |= GRE_SEQ;
+	if (tflags & TUNNEL_STRICT)
+		flags |= GRE_STRICT;
+	if (tflags & TUNNEL_REC)
+		flags |= GRE_REC;
+	if (tflags & TUNNEL_VERSION)
+		flags |= GRE_VERSION;
+
+	return flags;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+			    bool *csum_err)
+{
+	const struct gre_base_hdr *greh;
+	__be32 *options;
+	int hdr_len;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+		return -EINVAL;
+
+	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+	hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+	if (!pskb_may_pull(skb, hdr_len))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	tpi->proto = greh->protocol;
+
+	options = (__be32 *)(greh + 1);
+	if (greh->flags & GRE_CSUM) {
+		if (skb_checksum_simple_validate(skb)) {
+			*csum_err = true;
+			return -EINVAL;
+		}
+
+		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+					 null_compute_pseudo);
+		options++;
+	}
+
+	if (greh->flags & GRE_KEY) {
+		tpi->key = *options;
+		options++;
+	} else {
+		tpi->key = 0;
+	}
+	if (unlikely(greh->flags & GRE_SEQ)) {
+		tpi->seq = *options;
+		options++;
+	} else {
+		tpi->seq = 0;
+	}
+	/* WCCP version 1 and 2 protocol decoding.
+	 * - Change protocol to IP
+	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+	 */
+	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+		tpi->proto = htons(ETH_P_IP);
+		if ((*(u8 *)options & 0xF0) != 0x40) {
+			hdr_len += 4;
+			if (!pskb_may_pull(skb, hdr_len))
+				return -EINVAL;
+		}
+	}
+	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+}
+
+static void ipgre_err(struct sk_buff *skb, u32 info,
+		      const struct tnl_ptk_info *tpi)
 {
 
 	/* All the routers (except for Linux) return only
@@ -148,14 +269,14 @@
 	switch (type) {
 	default:
 	case ICMP_PARAMETERPROB:
-		return PACKET_RCVD;
+		return;
 
 	case ICMP_DEST_UNREACH:
 		switch (code) {
 		case ICMP_SR_FAILED:
 		case ICMP_PORT_UNREACH:
 			/* Impossible event. */
-			return PACKET_RCVD;
+			return;
 		default:
 			/* All others are translated to HOST_UNREACH.
 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -164,9 +285,10 @@
 			break;
 		}
 		break;
+
 	case ICMP_TIME_EXCEEDED:
 		if (code != ICMP_EXC_TTL)
-			return PACKET_RCVD;
+			return;
 		break;
 
 	case ICMP_REDIRECT:
@@ -183,26 +305,85 @@
 			     iph->daddr, iph->saddr, tpi->key);
 
 	if (!t)
-		return PACKET_REJECT;
+		return;
 
 	if (t->parms.iph.daddr == 0 ||
 	    ipv4_is_multicast(t->parms.iph.daddr))
-		return PACKET_RCVD;
+		return;
 
 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-		return PACKET_RCVD;
+		return;
 
 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 		t->err_count++;
 	else
 		t->err_count = 1;
 	t->err_time = jiffies;
-	return PACKET_RCVD;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+	/* All the routers (except for Linux) return only
+	 * 8 bytes of packet payload. It means, that precise relaying of
+	 * ICMP in the real Internet is absolutely infeasible.
+	 *
+	 * Moreover, Cisco "wise men" put GRE key to the third word
+	 * in GRE header. It makes impossible maintaining even soft
+	 * state for keyed
+	 * GRE tunnels with enabled checksum. Tell them "thank you".
+	 *
+	 * Well, I wonder, rfc1812 was written by Cisco employee,
+	 * what the hell these idiots break standards established
+	 * by themselves???
+	 */
+
+	const int type = icmp_hdr(skb)->type;
+	const int code = icmp_hdr(skb)->code;
+	struct tnl_ptk_info tpi;
+	bool csum_err = false;
+
+	if (parse_gre_header(skb, &tpi, &csum_err)) {
+		if (!csum_err)		/* ignore csum errors. */
+			return;
+	}
+
+	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+		return;
+	}
+	if (type == ICMP_REDIRECT) {
+		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
+			      IPPROTO_GRE, 0);
+		return;
+	}
+
+	ipgre_err(skb, info, &tpi);
+}
+
+static __be64 key_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+	return (__force __be64)((__force u32)key);
+#else
+	return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 tunnel_id_to_key(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+	return (__force __be32)x;
+#else
+	return (__force __be32)((__force u64)x >> 32);
+#endif
 }
 
 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 {
 	struct net *net = dev_net(skb->dev);
+	struct metadata_dst *tun_dst = NULL;
 	struct ip_tunnel_net *itn;
 	const struct iphdr *iph;
 	struct ip_tunnel *tunnel;
@@ -218,40 +399,184 @@
 
 	if (tunnel) {
 		skb_pop_mac_header(skb);
-		ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+		if (tunnel->collect_md) {
+			__be16 flags;
+			__be64 tun_id;
+
+			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
+			tun_id = key_to_tunnel_id(tpi->key);
+			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
+			if (!tun_dst)
+				return PACKET_REJECT;
+		}
+
+		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 		return PACKET_RCVD;
 	}
 	return PACKET_REJECT;
 }
 
+static int gre_rcv(struct sk_buff *skb)
+{
+	struct tnl_ptk_info tpi;
+	bool csum_err = false;
+
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+		/* Looped back packet, drop it! */
+		if (rt_is_output_route(skb_rtable(skb)))
+			goto drop;
+	}
+#endif
+
+	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+		goto drop;
+
+	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+		return 0;
+
+	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
+static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
+			 __be16 proto, __be32 key, __be32 seq)
+{
+	struct gre_base_hdr *greh;
+
+	skb_push(skb, hdr_len);
+
+	skb_reset_transport_header(skb);
+	greh = (struct gre_base_hdr *)skb->data;
+	greh->flags = tnl_flags_to_gre_flags(flags);
+	greh->protocol = proto;
+
+	if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+		if (flags & TUNNEL_SEQ) {
+			*ptr = seq;
+			ptr--;
+		}
+		if (flags & TUNNEL_KEY) {
+			*ptr = key;
+			ptr--;
+		}
+		if (flags & TUNNEL_CSUM &&
+		    !(skb_shinfo(skb)->gso_type &
+		      (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+			*ptr = 0;
+			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+								 skb->len, 0));
+		}
+	}
+}
+
 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 		       const struct iphdr *tnl_params,
 		       __be16 proto)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	struct tnl_ptk_info tpi;
 
-	tpi.flags = tunnel->parms.o_flags;
-	tpi.proto = proto;
-	tpi.key = tunnel->parms.o_key;
 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
 		tunnel->o_seqno++;
-	tpi.seq = htonl(tunnel->o_seqno);
 
 	/* Push GRE header. */
-	gre_build_header(skb, &tpi, tunnel->tun_hlen);
+	build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+		     proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
-	skb_set_inner_protocol(skb, tpi.proto);
-
+	skb_set_inner_protocol(skb, proto);
 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
+static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+					   bool csum)
+{
+	return iptunnel_handle_offloads(skb, csum,
+					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ip_tunnel_info *tun_info;
+	struct net *net = dev_net(dev);
+	const struct ip_tunnel_key *key;
+	struct flowi4 fl;
+	struct rtable *rt;
+	int min_headroom;
+	int tunnel_hlen;
+	__be16 df, flags;
+	int err;
+
+	tun_info = skb_tunnel_info(skb);
+	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+		     ip_tunnel_info_af(tun_info) != AF_INET))
+		goto err_free_skb;
+
+	key = &tun_info->key;
+	memset(&fl, 0, sizeof(fl));
+	fl.daddr = key->u.ipv4.dst;
+	fl.saddr = key->u.ipv4.src;
+	fl.flowi4_tos = RT_TOS(key->tos);
+	fl.flowi4_mark = skb->mark;
+	fl.flowi4_proto = IPPROTO_GRE;
+
+	rt = ip_route_output_key(net, &fl);
+	if (IS_ERR(rt))
+		goto err_free_skb;
+
+	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+
+	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+			+ tunnel_hlen + sizeof(struct iphdr);
+	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+		int head_delta = SKB_DATA_ALIGN(min_headroom -
+						skb_headroom(skb) +
+						16);
+		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+				       0, GFP_ATOMIC);
+		if (unlikely(err))
+			goto err_free_rt;
+	}
+
+	/* Push Tunnel header. */
+	skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+	if (IS_ERR(skb)) {
+		skb = NULL;
+		goto err_free_rt;
+	}
+
+	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+	build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+		     tunnel_id_to_key(tun_info->key.tun_id), 0);
+
+	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
+	err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
+			    key->u.ipv4.dst, IPPROTO_GRE,
+			    key->tos, key->ttl, df, false);
+	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+	return;
+
+err_free_rt:
+	ip_rt_put(rt);
+err_free_skb:
+	kfree_skb(skb);
+	dev->stats.tx_dropped++;
+}
+
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 			      struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	const struct iphdr *tnl_params;
 
+	if (tunnel->collect_md) {
+		gre_fb_xmit(skb, dev);
+		return NETDEV_TX_OK;
+	}
+
 	if (dev->header_ops) {
 		/* Need space for new headers */
 		if (skb_cow_head(skb, dev->needed_headroom -
@@ -277,7 +602,6 @@
 		goto out;
 
 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
-
 	return NETDEV_TX_OK;
 
 free_skb:
@@ -292,6 +616,11 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
+	if (tunnel->collect_md) {
+		gre_fb_xmit(skb, dev);
+		return NETDEV_TX_OK;
+	}
+
 	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
 	if (IS_ERR(skb))
 		goto out;
@@ -300,7 +629,6 @@
 		goto free_skb;
 
 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
-
 	return NETDEV_TX_OK;
 
 free_skb:
@@ -530,10 +858,9 @@
 	return ip_tunnel_init(dev);
 }
 
-static struct gre_cisco_protocol ipgre_protocol = {
-	.handler        = ipgre_rcv,
-	.err_handler    = ipgre_err,
-	.priority       = 0,
+static const struct gre_protocol ipgre_protocol = {
+	.handler     = gre_rcv,
+	.err_handler = gre_err,
 };
 
 static int __net_init ipgre_init_net(struct net *net)
@@ -596,8 +923,10 @@
 	return ipgre_tunnel_validate(tb, data);
 }
 
-static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
-			       struct ip_tunnel_parm *parms)
+static void ipgre_netlink_parms(struct net_device *dev,
+				struct nlattr *data[],
+				struct nlattr *tb[],
+				struct ip_tunnel_parm *parms)
 {
 	memset(parms, 0, sizeof(*parms));
 
@@ -635,6 +964,12 @@
 
 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
 		parms->iph.frag_off = htons(IP_DF);
+
+	if (data[IFLA_GRE_COLLECT_METADATA]) {
+		struct ip_tunnel *t = netdev_priv(dev);
+
+		t->collect_md = true;
+	}
 }
 
 /* This function returns true when ENCAP attributes are present in the nl msg */
@@ -712,7 +1047,7 @@
 			return err;
 	}
 
-	ipgre_netlink_parms(data, tb, &p);
+	ipgre_netlink_parms(dev, data, tb, &p);
 	return ip_tunnel_newlink(dev, tb, &p);
 }
 
@@ -730,7 +1065,7 @@
 			return err;
 	}
 
-	ipgre_netlink_parms(data, tb, &p);
+	ipgre_netlink_parms(dev, data, tb, &p);
 	return ip_tunnel_changelink(dev, tb, &p);
 }
 
@@ -765,6 +1100,8 @@
 		nla_total_size(2) +
 		/* IFLA_GRE_ENCAP_DPORT */
 		nla_total_size(2) +
+		/* IFLA_GRE_COLLECT_METADATA */
+		nla_total_size(0) +
 		0;
 }
 
@@ -796,6 +1133,11 @@
 			t->encap.flags))
 		goto nla_put_failure;
 
+	if (t->collect_md) {
+		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+			goto nla_put_failure;
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -817,6 +1159,7 @@
 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
+	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
 };
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
@@ -849,9 +1192,38 @@
 	.get_link_net	= ip_tunnel_get_link_net,
 };
 
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+					u8 name_assign_type)
+{
+	struct nlattr *tb[IFLA_MAX + 1];
+	struct net_device *dev;
+	struct ip_tunnel *t;
+	int err;
+
+	memset(&tb, 0, sizeof(tb));
+
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &ipgre_tap_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	/* Configure flow based GRE device. */
+	t = netdev_priv(dev);
+	t->collect_md = true;
+
+	err = ipgre_newlink(net, dev, tb, NULL);
+	if (err < 0)
+		goto out;
+	return dev;
+out:
+	free_netdev(dev);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
+
 static int __net_init ipgre_tap_init_net(struct net *net)
 {
-	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL);
+	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
 }
 
 static void __net_exit ipgre_tap_exit_net(struct net *net)
@@ -881,7 +1253,7 @@
 	if (err < 0)
 		goto pnet_tap_faied;
 
-	err = gre_cisco_register(&ipgre_protocol);
+	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
 	if (err < 0) {
 		pr_info("%s: can't add protocol\n", __func__);
 		goto add_proto_failed;
@@ -900,7 +1272,7 @@
 tap_ops_failed:
 	rtnl_link_unregister(&ipgre_link_ops);
 rtnl_link_failed:
-	gre_cisco_unregister(&ipgre_protocol);
+	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 add_proto_failed:
 	unregister_pernet_device(&ipgre_tap_net_ops);
 pnet_tap_faied:
@@ -912,7 +1284,7 @@
 {
 	rtnl_link_unregister(&ipgre_tap_ops);
 	rtnl_link_unregister(&ipgre_link_ops);
-	gre_cisco_unregister(&ipgre_protocol);
+	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 	unregister_pernet_device(&ipgre_tap_net_ops);
 	unregister_pernet_device(&ipgre_net_ops);
 }
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 2db4c87..f4fc8a7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,6 +146,7 @@
 #include <net/xfrm.h>
 #include <linux/mroute.h>
 #include <linux/netlink.h>
+#include <net/dst_metadata.h>
 
 /*
  *	Process Router Attention IP option (RFC 2113)
@@ -331,7 +332,7 @@
 	 *	Initialise the virtual path cache for the packet. It describes
 	 *	how the packet travels inside Linux networking.
 	 */
-	if (!skb_dst(skb)) {
+	if (!skb_valid_dst(skb)) {
 		int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
 					       iph->tos, skb->dev);
 		if (unlikely(err)) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6bf89a6..0138fad 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1542,6 +1542,7 @@
 	struct net *net = sock_net(sk);
 	struct sk_buff *nskb;
 	int err;
+	int oif;
 
 	if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
 		return;
@@ -1559,7 +1560,11 @@
 			daddr = replyopts.opt.opt.faddr;
 	}
 
-	flowi4_init_output(&fl4, arg->bound_dev_if,
+	oif = arg->bound_dev_if;
+	if (!oif && netif_index_is_vrf(net, skb->skb_iif))
+		oif = skb->skb_iif;
+
+	flowi4_init_output(&fl4, oif,
 			   IP4_REPLY_MARK(net, skb->mark),
 			   RT_TOS(arg->tos),
 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 626d9e5..cbb51f3 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -230,10 +230,13 @@
 	if (cand)
 		return cand;
 
+	t = rcu_dereference(itn->collect_md_tun);
+	if (t)
+		return t;
+
 	if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
 		return netdev_priv(itn->fb_tunnel_dev);
 
-
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
@@ -261,11 +264,15 @@
 {
 	struct hlist_head *head = ip_bucket(itn, &t->parms);
 
+	if (t->collect_md)
+		rcu_assign_pointer(itn->collect_md_tun, t);
 	hlist_add_head_rcu(&t->hash_node, head);
 }
 
-static void ip_tunnel_del(struct ip_tunnel *t)
+static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 {
+	if (t->collect_md)
+		rcu_assign_pointer(itn->collect_md_tun, NULL);
 	hlist_del_init_rcu(&t->hash_node);
 }
 
@@ -419,7 +426,8 @@
 }
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
+		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+		  bool log_ecn_error)
 {
 	struct pcpu_sw_netstats *tstats;
 	const struct iphdr *iph = ip_hdr(skb);
@@ -478,6 +486,9 @@
 		skb->dev = tunnel->dev;
 	}
 
+	if (tun_dst)
+		skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
 	gro_cells_receive(&tunnel->gro_cells, skb);
 	return 0;
 
@@ -806,7 +817,7 @@
 			     struct ip_tunnel_parm *p,
 			     bool set_mtu)
 {
-	ip_tunnel_del(t);
+	ip_tunnel_del(itn, t);
 	t->parms.iph.saddr = p->iph.saddr;
 	t->parms.iph.daddr = p->iph.daddr;
 	t->parms.i_key = p->i_key;
@@ -967,7 +978,7 @@
 	itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
 
 	if (itn->fb_tunnel_dev != dev) {
-		ip_tunnel_del(netdev_priv(dev));
+		ip_tunnel_del(itn, netdev_priv(dev));
 		unregister_netdevice_queue(dev, head);
 	}
 }
@@ -1072,8 +1083,13 @@
 	nt = netdev_priv(dev);
 	itn = net_generic(net, nt->ip_tnl_net_id);
 
-	if (ip_tunnel_find(itn, p, dev->type))
-		return -EEXIST;
+	if (nt->collect_md) {
+		if (rtnl_dereference(itn->collect_md_tun))
+			return -EEXIST;
+	} else {
+		if (ip_tunnel_find(itn, p, dev->type))
+			return -EEXIST;
+	}
 
 	nt->net = net;
 	nt->parms = *p;
@@ -1089,7 +1105,6 @@
 		dev->mtu = mtu;
 
 	ip_tunnel_add(itn, nt);
-
 out:
 	return err;
 }
@@ -1163,6 +1178,10 @@
 	iph->version		= 4;
 	iph->ihl		= 5;
 
+	if (tunnel->collect_md) {
+		dev->features |= NETIF_F_NETNS_LOCAL;
+		netif_keep_dst(dev);
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1176,7 +1195,7 @@
 	itn = net_generic(net, tunnel->ip_tnl_net_id);
 	/* fb_tunnel_dev will be unregisted in net-exit call. */
 	if (itn->fb_tunnel_dev != dev)
-		ip_tunnel_del(netdev_priv(dev));
+		ip_tunnel_del(itn, netdev_priv(dev));
 
 	ip_tunnel_dst_reset_all(tunnel);
 }
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6a51a71..29ed6c5 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/static_key.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -190,3 +191,232 @@
 	return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+	[LWTUNNEL_IP_ID]	= { .type = NLA_U64 },
+	[LWTUNNEL_IP_DST]	= { .type = NLA_U32 },
+	[LWTUNNEL_IP_SRC]	= { .type = NLA_U32 },
+	[LWTUNNEL_IP_TTL]	= { .type = NLA_U8 },
+	[LWTUNNEL_IP_TOS]	= { .type = NLA_U8 },
+	[LWTUNNEL_IP_SPORT]	= { .type = NLA_U16 },
+	[LWTUNNEL_IP_DPORT]	= { .type = NLA_U16 },
+	[LWTUNNEL_IP_FLAGS]	= { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+			      unsigned int family, const void *cfg,
+			      struct lwtunnel_state **ts)
+{
+	struct ip_tunnel_info *tun_info;
+	struct lwtunnel_state *new_state;
+	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
+	if (err < 0)
+		return err;
+
+	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+	if (!new_state)
+		return -ENOMEM;
+
+	new_state->type = LWTUNNEL_ENCAP_IP;
+
+	tun_info = lwt_tun_info(new_state);
+
+	if (tb[LWTUNNEL_IP_ID])
+		tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
+
+	if (tb[LWTUNNEL_IP_DST])
+		tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
+
+	if (tb[LWTUNNEL_IP_SRC])
+		tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
+
+	if (tb[LWTUNNEL_IP_TTL])
+		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
+
+	if (tb[LWTUNNEL_IP_TOS])
+		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
+
+	if (tb[LWTUNNEL_IP_SPORT])
+		tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
+
+	if (tb[LWTUNNEL_IP_DPORT])
+		tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
+
+	if (tb[LWTUNNEL_IP_FLAGS])
+		tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
+
+	tun_info->mode = IP_TUNNEL_INFO_TX;
+	tun_info->options_len = 0;
+
+	*ts = new_state;
+
+	return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+				  struct lwtunnel_state *lwtstate)
+{
+	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+	if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+	    nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+	    nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
+	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
+	    nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
+	    nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
+	    nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	return nla_total_size(8)	/* LWTUNNEL_IP_ID */
+		+ nla_total_size(4)	/* LWTUNNEL_IP_DST */
+		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
+		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
+		+ nla_total_size(1)	/* LWTUNNEL_IP_TTL */
+		+ nla_total_size(2)	/* LWTUNNEL_IP_SPORT */
+		+ nla_total_size(2)	/* LWTUNNEL_IP_DPORT */
+		+ nla_total_size(2);	/* LWTUNNEL_IP_FLAGS */
+}
+
+static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	return memcmp(lwt_tun_info(a), lwt_tun_info(b),
+		      sizeof(struct ip_tunnel_info));
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+	.build_state = ip_tun_build_state,
+	.fill_encap = ip_tun_fill_encap_info,
+	.get_encap_size = ip_tun_encap_nlsize,
+	.cmp_encap = ip_tun_cmp_encap,
+};
+
+static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+	[LWTUNNEL_IP6_ID]		= { .type = NLA_U64 },
+	[LWTUNNEL_IP6_DST]		= { .len = sizeof(struct in6_addr) },
+	[LWTUNNEL_IP6_SRC]		= { .len = sizeof(struct in6_addr) },
+	[LWTUNNEL_IP6_HOPLIMIT]		= { .type = NLA_U8 },
+	[LWTUNNEL_IP6_TC]		= { .type = NLA_U8 },
+	[LWTUNNEL_IP6_SPORT]		= { .type = NLA_U16 },
+	[LWTUNNEL_IP6_DPORT]		= { .type = NLA_U16 },
+	[LWTUNNEL_IP6_FLAGS]		= { .type = NLA_U16 },
+};
+
+static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+			       unsigned int family, const void *cfg,
+			       struct lwtunnel_state **ts)
+{
+	struct ip_tunnel_info *tun_info;
+	struct lwtunnel_state *new_state;
+	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
+	if (err < 0)
+		return err;
+
+	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+	if (!new_state)
+		return -ENOMEM;
+
+	new_state->type = LWTUNNEL_ENCAP_IP6;
+
+	tun_info = lwt_tun_info(new_state);
+
+	if (tb[LWTUNNEL_IP6_ID])
+		tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
+
+	if (tb[LWTUNNEL_IP6_DST])
+		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
+
+	if (tb[LWTUNNEL_IP6_SRC])
+		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
+
+	if (tb[LWTUNNEL_IP6_HOPLIMIT])
+		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
+
+	if (tb[LWTUNNEL_IP6_TC])
+		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
+
+	if (tb[LWTUNNEL_IP6_SPORT])
+		tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
+
+	if (tb[LWTUNNEL_IP6_DPORT])
+		tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
+
+	if (tb[LWTUNNEL_IP6_FLAGS])
+		tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
+
+	tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
+	tun_info->options_len = 0;
+
+	*ts = new_state;
+
+	return 0;
+}
+
+static int ip6_tun_fill_encap_info(struct sk_buff *skb,
+				   struct lwtunnel_state *lwtstate)
+{
+	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+	if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
+	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
+	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
+	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+	    nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
+	    nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
+	    nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	return nla_total_size(8)	/* LWTUNNEL_IP6_ID */
+		+ nla_total_size(16)	/* LWTUNNEL_IP6_DST */
+		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
+		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
+		+ nla_total_size(1)	/* LWTUNNEL_IP6_TC */
+		+ nla_total_size(2)	/* LWTUNNEL_IP6_SPORT */
+		+ nla_total_size(2)	/* LWTUNNEL_IP6_DPORT */
+		+ nla_total_size(2);	/* LWTUNNEL_IP6_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+	.build_state = ip6_tun_build_state,
+	.fill_encap = ip6_tun_fill_encap_info,
+	.get_encap_size = ip6_tun_encap_nlsize,
+	.cmp_encap = ip_tun_cmp_encap,
+};
+
+void __init ip_tunnel_core_init(void)
+{
+	lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+	lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
+}
+
+struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
+
+void ip_tunnel_need_metadata(void)
+{
+	static_key_slow_inc(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
+
+void ip_tunnel_unneed_metadata(void)
+{
+	static_key_slow_dec(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 8e7328c..ed4ef09 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -94,7 +94,7 @@
 /* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
 #define CONF_OPEN_RETRIES 	2	/* (Re)open devices twice */
 #define CONF_SEND_RETRIES 	6	/* Send six requests per open */
-#define CONF_INTER_TIMEOUT	(HZ/2)	/* Inter-device timeout: 1/2 second */
+#define CONF_INTER_TIMEOUT	(HZ)	/* Inter-device timeout: 1 second */
 #define CONF_BASE_TIMEOUT	(HZ*2)	/* Initial timeout: 2 seconds */
 #define CONF_TIMEOUT_RANDOM	(HZ)	/* Maximum amount of randomization */
 #define CONF_TIMEOUT_MULT	*7/4	/* Rate of timeout growth */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 254238d..f34c31d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -198,7 +198,7 @@
 			goto drop;
 		if (iptunnel_pull_header(skb, 0, tpi.proto))
 			goto drop;
-		return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+		return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
 	}
 
 	return -1;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2199a5d..690d27d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -58,6 +58,12 @@
 	default NFT_REJECT
 	tristate
 
+config NFT_DUP_IPV4
+	tristate "IPv4 nf_tables packet duplication support"
+	select NF_DUP_IPV4
+	help
+	  This module enables IPv4 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV4
 
 config NF_TABLES_ARP
@@ -67,6 +73,12 @@
 
 endif # NF_TABLES
 
+config NF_DUP_IPV4
+	tristate "Netfilter IPv4 packet duplication to alternate destination"
+	help
+	  This option enables the nf_dup_ipv4 core, which duplicates an IPv4
+	  packet to be rerouted to another destination.
+
 config NF_LOG_ARP
 	tristate "ARP packet logging"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 7fe6c70..87b073d 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -41,6 +41,7 @@
 obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
 obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
 obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
+obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
 # generic IP tables 
@@ -70,3 +71,5 @@
 
 # just filtering instance of ARP tables for now
 obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+obj-$(CONFIG_NF_DUP_IPV4) += nf_dup_ipv4.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 92305a1..8f87fc3 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -240,7 +240,7 @@
 	return (struct arpt_entry *)(base + offset);
 }
 
-static inline __pure
+static inline
 struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
@@ -280,6 +280,9 @@
 	table_base = private->entries;
 	jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
 
+	/* No TEE support for arptables, so no need to switch to alternate
+	 * stack.  All targets that reenter must return absolute verdicts.
+	 */
 	e = get_entry(table_base, private->hook_entry[hook]);
 
 	acpar.in      = state->in;
@@ -325,11 +328,6 @@
 			}
 			if (table_base + v
 			    != arpt_next_entry(e)) {
-
-				if (stackidx >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
 				jumpstack[stackidx++] = e;
 			}
 
@@ -337,9 +335,6 @@
 			continue;
 		}
 
-		/* Targets which reenter must return
-		 * abs. verdicts
-		 */
 		acpar.target   = t->u.kernel.target;
 		acpar.targinfo = t->data;
 		verdict = t->u.kernel.target->target(skb, &acpar);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 6c72fbb..b0a86e7 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -276,7 +276,7 @@
 }
 #endif
 
-static inline __pure
+static inline
 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
@@ -296,12 +296,13 @@
 	const char *indev, *outdev;
 	const void *table_base;
 	struct ipt_entry *e, **jumpstack;
-	unsigned int *stackptr, origptr, cpu;
+	unsigned int stackidx, cpu;
 	const struct xt_table_info *private;
 	struct xt_action_param acpar;
 	unsigned int addend;
 
 	/* Initialization */
+	stackidx = 0;
 	ip = ip_hdr(skb);
 	indev = state->in ? state->in->name : nulldevname;
 	outdev = state->out ? state->out->name : nulldevname;
@@ -331,13 +332,21 @@
 	smp_read_barrier_depends();
 	table_base = private->entries;
 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
-	stackptr   = per_cpu_ptr(private->stackptr, cpu);
-	origptr    = *stackptr;
+
+	/* Switch to alternate jumpstack if we're being invoked via TEE.
+	 * TEE issues XT_CONTINUE verdict on original skb so we must not
+	 * clobber the jumpstack.
+	 *
+	 * For recursion via REJECT or SYNPROXY the stack will be clobbered
+	 * but it is no problem since absolute verdict is issued by these.
+	 */
+	if (static_key_false(&xt_tee_enabled))
+		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 
-	pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
-		 table->name, hook, origptr,
+	pr_debug("Entering %s(hook %u), UF %p\n",
+		 table->name, hook,
 		 get_entry(table_base, private->underflow[hook]));
 
 	do {
@@ -383,28 +392,24 @@
 					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
-				if (*stackptr <= origptr) {
+				if (stackidx == 0) {
 					e = get_entry(table_base,
 					    private->underflow[hook]);
 					pr_debug("Underflow (this is normal) "
 						 "to %p\n", e);
 				} else {
-					e = jumpstack[--*stackptr];
+					e = jumpstack[--stackidx];
 					pr_debug("Pulled %p out from pos %u\n",
-						 e, *stackptr);
+						 e, stackidx);
 					e = ipt_next_entry(e);
 				}
 				continue;
 			}
 			if (table_base + v != ipt_next_entry(e) &&
 			    !(e->ip.flags & IPT_F_GOTO)) {
-				if (*stackptr >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
-				jumpstack[(*stackptr)++] = e;
+				jumpstack[stackidx++] = e;
 				pr_debug("Pushed %p into pos %u\n",
-					 e, *stackptr - 1);
+					 e, stackidx - 1);
 			}
 
 			e = get_entry(table_base, v);
@@ -423,9 +428,8 @@
 			/* Verdict */
 			break;
 	} while (!acpar.hotdrop);
-	pr_debug("Exiting %s; resetting sp from %u to %u\n",
-		 __func__, *stackptr, origptr);
-	*stackptr = origptr;
+	pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
+
  	xt_write_recseq_end(addend);
  	local_bh_enable();
 
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 4bf3dc4..2707652 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -72,7 +72,7 @@
 		tcph->cwr = einfo->proto.tcp.cwr;
 
 	inet_proto_csum_replace2(&tcph->check, skb,
-				 oldval, ((__be16 *)tcph)[6], 0);
+				 oldval, ((__be16 *)tcph)[6], false);
 	return true;
 }
 
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 30ad955..8a2caaf 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -280,7 +280,7 @@
 		return -EINVAL;
 	}
 
-	h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+	h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
 	if (h) {
 		struct sockaddr_in sin;
 		struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 80d5554..cdde3ec 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -134,9 +134,11 @@
 	struct nf_conntrack_tuple innertuple, origtuple;
 	const struct nf_conntrack_l4proto *innerproto;
 	const struct nf_conntrack_tuple_hash *h;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_zone tmp;
 
 	NF_CT_ASSERT(skb->nfct == NULL);
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
 	/* Are they talking about one of our connections? */
 	if (!nf_ct_get_tuplepr(skb,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index c88b7d4..9306ec4 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -43,22 +43,22 @@
 static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 					      struct sk_buff *skb)
 {
-	u16 zone = NF_CT_DEFAULT_ZONE;
-
+	u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-	if (skb->nfct)
-		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
-#endif
+	if (skb->nfct) {
+		enum ip_conntrack_info ctinfo;
+		const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	if (skb->nf_bridge &&
-	    skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
-		return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+		zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+	}
 #endif
+	if (nf_bridge_in_prerouting(skb))
+		return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
+
 	if (hooknum == NF_INET_PRE_ROUTING)
-		return IP_DEFRAG_CONNTRACK_IN + zone;
+		return IP_DEFRAG_CONNTRACK_IN + zone_id;
 	else
-		return IP_DEFRAG_CONNTRACK_OUT + zone;
+		return IP_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
new file mode 100644
index 0000000..2d79e6e
--- /dev/null
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -0,0 +1,121 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+	const struct dst_entry *dst;
+
+	if (skb->dev != NULL)
+		return dev_net(skb->dev);
+	dst = skb_dst(skb);
+	if (dst != NULL && dst->dev != NULL)
+		return dev_net(dst->dev);
+#endif
+	return &init_net;
+}
+
+static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
+			      int oif)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct net *net = pick_net(skb);
+	struct rtable *rt;
+	struct flowi4 fl4;
+
+	memset(&fl4, 0, sizeof(fl4));
+	if (oif != -1)
+		fl4.flowi4_oif = oif;
+
+	fl4.daddr = gw->s_addr;
+	fl4.flowi4_tos = RT_TOS(iph->tos);
+	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+	fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
+	rt = ip_route_output_key(net, &fl4);
+	if (IS_ERR(rt))
+		return false;
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, &rt->dst);
+	skb->dev      = rt->dst.dev;
+	skb->protocol = htons(ETH_P_IP);
+
+	return true;
+}
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in_addr *gw, int oif)
+{
+	struct iphdr *iph;
+
+	if (this_cpu_read(nf_skb_duplicated))
+		return;
+	/*
+	 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+	 * the original skb, which should continue on its way as if nothing has
+	 * happened. The copy should be independently delivered to the gateway.
+	 */
+	skb = pskb_copy(skb, GFP_ATOMIC);
+	if (skb == NULL)
+		return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+	/* Avoid counting cloned packets towards the original connection. */
+	nf_conntrack_put(skb->nfct);
+	skb->nfct     = &nf_ct_untracked_get()->ct_general;
+	skb->nfctinfo = IP_CT_NEW;
+	nf_conntrack_get(skb->nfct);
+#endif
+	/*
+	 * If we are in PREROUTING/INPUT, the checksum must be recalculated
+	 * since the length could have changed as a result of defragmentation.
+	 *
+	 * We also decrease the TTL to mitigate potential loops between two
+	 * hosts.
+	 *
+	 * Set %IP_DF so that the original source is notified of a potentially
+	 * decreased MTU on the clone route. IPv6 does this too.
+	 */
+	iph = ip_hdr(skb);
+	iph->frag_off |= htons(IP_DF);
+	if (hooknum == NF_INET_PRE_ROUTING ||
+	    hooknum == NF_INET_LOCAL_IN)
+		--iph->ttl;
+	ip_send_check(iph);
+
+	if (nf_dup_ipv4_route(skb, gw, oif)) {
+		__this_cpu_write(nf_skb_duplicated, true);
+		ip_local_out(skb);
+		__this_cpu_write(nf_skb_duplicated, false);
+	} else {
+		kfree_skb(skb);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index e59cc05..22f4579 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -120,7 +120,7 @@
 		oldip = iph->daddr;
 		newip = t->dst.u3.ip;
 	}
-	inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+	inet_proto_csum_replace4(check, skb, oldip, newip, true);
 }
 
 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
@@ -151,7 +151,7 @@
 		}
 	} else
 		inet_proto_csum_replace2(check, skb,
-					 htons(oldlen), htons(datalen), 1);
+					 htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 4557b4a..7b98baa 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -67,7 +67,7 @@
 
 	hdr = (struct icmphdr *)(skb->data + hdroff);
 	inet_proto_csum_replace2(&hdr->checksum, skb,
-				 hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+				 hdr->un.echo.id, tuple->src.u.icmp.id, false);
 	hdr->un.echo.id = tuple->src.u.icmp.id;
 	return true;
 }
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
new file mode 100644
index 0000000..b45932d
--- /dev/null
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+struct nft_dup_ipv4 {
+	enum nft_registers	sreg_addr:8;
+	enum nft_registers	sreg_dev:8;
+};
+
+static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+			      struct nft_regs *regs,
+			      const struct nft_pktinfo *pkt)
+{
+	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+	struct in_addr gw = {
+		.s_addr = (__force __be32)regs->data[priv->sreg_addr],
+	};
+	int oif = regs->data[priv->sreg_dev];
+
+	nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+}
+
+static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+			     const struct nft_expr *expr,
+			     const struct nlattr * const tb[])
+{
+	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+		return -EINVAL;
+
+	priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+	err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+	if (err < 0)
+		return err;
+
+	if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+		priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+		return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+	}
+	return 0;
+}
+
+static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+
+	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv4_type;
+static const struct nft_expr_ops nft_dup_ipv4_ops = {
+	.type		= &nft_dup_ipv4_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
+	.eval		= nft_dup_ipv4_eval,
+	.init		= nft_dup_ipv4_init,
+	.dump		= nft_dup_ipv4_dump,
+};
+
+static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
+	[NFTA_DUP_SREG_ADDR]	= { .type = NLA_U32 },
+	[NFTA_DUP_SREG_DEV]	= { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
+	.family		= NFPROTO_IPV4,
+	.name		= "dup",
+	.ops		= &nft_dup_ipv4_ops,
+	.policy		= nft_dup_ipv4_policy,
+	.maxattr	= NFTA_DUP_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_dup_ipv4_module_init(void)
+{
+	return nft_register_expr(&nft_dup_ipv4_type);
+}
+
+static void __exit nft_dup_ipv4_module_exit(void)
+{
+	nft_unregister_expr(&nft_dup_ipv4_type);
+}
+
+module_init(nft_dup_ipv4_module_init);
+module_exit(nft_dup_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 05ff44b..e89094a 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -363,7 +363,8 @@
 						    scoped);
 		rcu_read_unlock();
 
-		if (!(isk->freebind || isk->transparent || has_addr ||
+		if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
+		      isk->freebind || isk->transparent || has_addr ||
 		      addr_type == IPV6_ADDR_ANY))
 			return -EADDRNOTAVAIL;
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index da5d483..3abd9d7 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -300,6 +300,8 @@
 	SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
 	SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
 	SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+	SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+	SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
 	SNMP_MIB_SENTINEL
 };
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e681b85..5f4a556 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
 #include <linux/slab.h>
 #include <linux/jhash.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #include <net/ip.h>
@@ -102,6 +103,7 @@
 #include <net/tcp.h>
 #include <net/icmp.h>
 #include <net/xfrm.h>
+#include <net/lwtunnel.h>
 #include <net/netevent.h>
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
@@ -109,6 +111,8 @@
 #include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
+#include <net/ip_tunnels.h>
+#include <net/vrf.h>
 
 #define RT_FL_TOS(oldflp4) \
 	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -834,6 +838,7 @@
 	struct inet_peer *peer;
 	struct net *net;
 	int log_martians;
+	int vif;
 
 	rcu_read_lock();
 	in_dev = __in_dev_get_rcu(rt->dst.dev);
@@ -842,10 +847,11 @@
 		return;
 	}
 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
+	vif = vrf_master_ifindex_rcu(rt->dst.dev);
 	rcu_read_unlock();
 
 	net = dev_net(rt->dst.dev);
-	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
 	if (!peer) {
 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
@@ -934,7 +940,8 @@
 		break;
 	}
 
-	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
+			       vrf_master_ifindex(skb->dev), 1);
 
 	send = true;
 	if (peer) {
@@ -1403,6 +1410,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		rt->dst.tclassid = nh->nh_tclassid;
 #endif
+		rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
 		if (unlikely(fnhe))
 			cached = rt_bind_exception(rt, fnhe, daddr);
 		else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1546,7 +1554,6 @@
 	struct rtable *rth;
 	int err;
 	struct in_device *out_dev;
-	unsigned int flags = 0;
 	bool do_cache;
 	u32 itag = 0;
 
@@ -1610,7 +1617,7 @@
 	}
 
 	rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
-	rth->rt_flags = flags;
+	rth->rt_flags = 0;
 	rth->rt_type = res->type;
 	rth->rt_is_input = 1;
 	rth->rt_iif 	= 0;
@@ -1624,6 +1631,14 @@
 	rth->dst.output = ip_output;
 
 	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+	if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+		rth->dst.lwtstate->orig_output = rth->dst.output;
+		rth->dst.output = lwtunnel_output;
+	}
+	if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
+		rth->dst.lwtstate->orig_input = rth->dst.input;
+		rth->dst.input = lwtunnel_input;
+	}
 	skb_dst_set(skb, &rth->dst);
 out:
 	err = 0;
@@ -1662,6 +1677,7 @@
 {
 	struct fib_result res;
 	struct in_device *in_dev = __in_dev_get_rcu(dev);
+	struct ip_tunnel_info *tun_info;
 	struct flowi4	fl4;
 	unsigned int	flags = 0;
 	u32		itag = 0;
@@ -1679,6 +1695,13 @@
 	   by fib_lookup.
 	 */
 
+	tun_info = skb_tunnel_info(skb);
+	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
+		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
+	else
+		fl4.flowi4_tun_key.tun_id = 0;
+	skb_dst_drop(skb);
+
 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
 		goto martian_source;
 
@@ -1710,7 +1733,7 @@
 	 *	Now we are ready to route packet.
 	 */
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = dev->ifindex;
+	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
 	fl4.flowi4_mark = skb->mark;
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
@@ -1792,6 +1815,7 @@
 	rth->rt_gateway	= 0;
 	rth->rt_uses_gateway = 0;
 	INIT_LIST_HEAD(&rth->rt_uncached);
+
 	RT_CACHE_STAT_INC(in_slow_tot);
 	if (res.type == RTN_UNREACHABLE) {
 		rth->dst.input= ip_error;
@@ -1981,7 +2005,6 @@
 	rth->rt_gateway = 0;
 	rth->rt_uses_gateway = 0;
 	INIT_LIST_HEAD(&rth->rt_uncached);
-
 	RT_CACHE_STAT_INC(out_slow_tot);
 
 	if (flags & RTCF_LOCAL)
@@ -2004,6 +2027,8 @@
 	}
 
 	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
+	if (lwtunnel_output_redirect(rth->dst.lwtstate))
+		rth->dst.output = lwtunnel_output;
 
 	return rth;
 }
@@ -2110,6 +2135,11 @@
 				fl4->saddr = inet_select_addr(dev_out, 0,
 							      RT_SCOPE_HOST);
 		}
+		if (netif_is_vrf(dev_out) &&
+		    !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+			rth = vrf_dev_get_rth(dev_out);
+			goto out;
+		}
 	}
 
 	if (!fl4->daddr) {
@@ -2261,7 +2291,6 @@
 		rt->rt_uses_gateway = ort->rt_uses_gateway;
 
 		INIT_LIST_HEAD(&rt->rt_uncached);
-
 		dst_free(new);
 	}
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 0330ab2..894da3a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -29,6 +29,7 @@
 static int zero;
 static int one = 1;
 static int four = 4;
+static int thousand = 1000;
 static int gso_max_segs = GSO_MAX_SEGS;
 static int tcp_retr1_max = 255;
 static int ip_local_port_range_min[] = { 1, 1 };
@@ -712,6 +713,24 @@
 		.extra2		= &gso_max_segs,
 	},
 	{
+		.procname	= "tcp_pacing_ss_ratio",
+		.data		= &sysctl_tcp_pacing_ss_ratio,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &thousand,
+	},
+	{
+		.procname	= "tcp_pacing_ca_ratio",
+		.data		= &sysctl_tcp_pacing_ca_ratio,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &thousand,
+	},
+	{
 		.procname	= "tcp_autocorking",
 		.data		= &sysctl_tcp_autocorking,
 		.maxlen		= sizeof(int),
@@ -910,6 +929,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "igmp_link_local_mcast_reports",
+		.data		= &sysctl_igmp_llm_reports,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
 	{ }
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 45534a5..b8b8fa1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -627,6 +627,8 @@
 	sk_mem_charge(sk, skb->truesize);
 	if (tp->nonagle & TCP_NAGLE_PUSH)
 		tp->nonagle &= ~TCP_NAGLE_PUSH;
+
+	tcp_slow_start_after_idle_check(sk);
 }
 
 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index c037644..fd1405d 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -146,7 +146,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else {
 		bictcp_update(ca, tp->snd_cwnd);
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 8c6fd3d..167b6a3 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -264,7 +264,7 @@
 	u32 prior_snd_cwnd;
 	u32 incr;
 
-	if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+	if (tcp_in_slow_start(tp) && hystart_detect)
 		tcp_cdg_hystart_update(sk);
 
 	if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 84be008..93c4dc3 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -114,16 +114,19 @@
 }
 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 
-u32 tcp_ca_get_key_by_name(const char *name)
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
 {
 	const struct tcp_congestion_ops *ca;
-	u32 key;
+	u32 key = TCP_CA_UNSPEC;
 
 	might_sleep();
 
 	rcu_read_lock();
 	ca = __tcp_ca_find_autoload(name);
-	key = ca ? ca->key : TCP_CA_UNSPEC;
+	if (ca) {
+		key = ca->key;
+		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
+	}
 	rcu_read_unlock();
 
 	return key;
@@ -365,10 +368,8 @@
  */
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
-	u32 cwnd = tp->snd_cwnd + acked;
+	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
 
-	if (cwnd > tp->snd_ssthresh)
-		cwnd = tp->snd_ssthresh + 1;
 	acked -= cwnd - tp->snd_cwnd;
 	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
 
@@ -413,7 +414,7 @@
 		return;
 
 	/* In "safe" area, increase. */
-	if (tp->snd_cwnd <= tp->snd_ssthresh) {
+	if (tcp_in_slow_start(tp)) {
 		acked = tcp_slow_start(tp, acked);
 		if (!acked)
 			return;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 06d3d66..28011fb 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -320,7 +320,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh) {
+	if (tcp_in_slow_start(tp)) {
 		if (hystart && after(ack, ca->end_seq))
 			bictcp_hystart_reset(sk);
 		acked = tcp_slow_start(tp, acked);
@@ -439,7 +439,7 @@
 		ca->delay_min = delay;
 
 	/* hystart triggers when cwnd is larger than some threshold */
-	if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+	if (hystart && tcp_in_slow_start(tp) &&
 	    tp->snd_cwnd >= hystart_low_window)
 		hystart_update(sk, delay);
 }
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 882c08a..db78424 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -116,7 +116,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else {
 		/* Update AIMD parameters.
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 58469ff..82f0d9e 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -236,7 +236,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else {
 		/* In dangerous area, increase slowly.
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index f963b27..083831e 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -112,7 +112,7 @@
 
 	rho_fractions = ca->rho_3ls - (ca->rho << 3);
 
-	if (tp->snd_cwnd < tp->snd_ssthresh) {
+	if (tcp_in_slow_start(tp)) {
 		/*
 		 * slow start
 		 *      INC = 2^RHO - 1
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index f71002e..2ab9bbb 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -268,7 +268,7 @@
 		return;
 
 	/* In slow start */
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 
 	else {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 728f5b3..a8f515b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -109,6 +109,7 @@
 #define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
 #define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
 #define FLAG_ECE		0x40 /* ECE in this ACK				*/
+#define FLAG_LOST_RETRANS	0x80 /* This ACK marks some retransmission lost */
 #define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
 #define FLAG_ORIG_SACK_ACKED	0x200 /* Never retransmitted data are (s)acked	*/
 #define FLAG_SND_UNA_ADVANCED	0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -196,11 +197,13 @@
  * and the session is not interactive.
  */
 
-static inline bool tcp_in_quickack_mode(const struct sock *sk)
+static bool tcp_in_quickack_mode(struct sock *sk)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
+	const struct dst_entry *dst = __sk_dst_get(sk);
 
-	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
+	return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
+		(icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
 }
 
 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -750,13 +753,29 @@
  * TCP pacing, to smooth the burst on large writes when packets
  * in flight is significantly lower than cwnd (or rwin)
  */
+int sysctl_tcp_pacing_ss_ratio __read_mostly = 200;
+int sysctl_tcp_pacing_ca_ratio __read_mostly = 120;
+
 static void tcp_update_pacing_rate(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	u64 rate;
 
 	/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
-	rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
+	rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
+
+	/* current rate is (cwnd * mss) / srtt
+	 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
+	 * In Congestion Avoidance phase, set it to 120 % the current rate.
+	 *
+	 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
+	 *	 If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
+	 *	 end of slow start and should slow down.
+	 */
+	if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+		rate *= sysctl_tcp_pacing_ss_ratio;
+	else
+		rate *= sysctl_tcp_pacing_ca_ratio;
 
 	rate *= max(tp->snd_cwnd, tp->packets_out);
 
@@ -1037,7 +1056,7 @@
  * highest SACK block). Also calculate the lowest snd_nxt among the remaining
  * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -1078,7 +1097,7 @@
 		if (after(received_upto, ack_seq)) {
 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 			tp->retrans_out -= tcp_skb_pcount(skb);
-
+			*flag |= FLAG_LOST_RETRANS;
 			tcp_skb_mark_lost_uncond_verify(tp, skb);
 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
 		} else {
@@ -1818,7 +1837,7 @@
 	    ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
 		tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-	tcp_mark_lost_retrans(sk);
+	tcp_mark_lost_retrans(sk, &state->flag);
 	tcp_verify_left_out(tp);
 out:
 
@@ -2474,15 +2493,14 @@
 	return false;
 }
 
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
  * It computes the number of packets to send (sndcnt) based on packets newly
  * delivered:
  *   1) If the packets in flight is larger than ssthresh, PRR spreads the
  *	cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *	losses and/or application stalls), do not perform any further cwnd
- *	reductions, but instead slow start up to ssthresh.
+ *   2) Otherwise PRR uses packet conservation to send as much as delivered.
+ *      But when the retransmits are acked without further losses, PRR
+ *      slow starts cwnd up to ssthresh to speed up the recovery.
  */
 static void tcp_init_cwnd_reduction(struct sock *sk)
 {
@@ -2499,7 +2517,7 @@
 }
 
 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-			       int fast_rexmit)
+			       int fast_rexmit, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	int sndcnt = 0;
@@ -2508,16 +2526,18 @@
 				 (tp->packets_out - tp->sacked_out);
 
 	tp->prr_delivered += newly_acked_sacked;
-	if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+	if (delta < 0) {
 		u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
 			       tp->prior_cwnd - 1;
 		sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-	} else {
+	} else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+		   !(flag & FLAG_LOST_RETRANS)) {
 		sndcnt = min_t(int, delta,
 			       max_t(int, tp->prr_delivered - tp->prr_out,
 				     newly_acked_sacked) + 1);
+	} else {
+		sndcnt = min(delta, newly_acked_sacked);
 	}
-
 	sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
 	tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
@@ -2578,7 +2598,7 @@
 	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
 		tcp_try_keep_open(sk);
 	} else {
-		tcp_cwnd_reduction(sk, prior_unsacked, 0);
+		tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
 	}
 }
 
@@ -2588,6 +2608,7 @@
 
 	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
 	icsk->icsk_mtup.probe_size = 0;
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
 }
 
 static void tcp_mtup_probe_success(struct sock *sk)
@@ -2607,6 +2628,7 @@
 	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
 	icsk->icsk_mtup.probe_size = 0;
 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -2675,7 +2697,7 @@
 	tp->prior_ssthresh = 0;
 	tcp_init_undo(tp);
 
-	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+	if (!tcp_in_cwnd_reduction(sk)) {
 		if (!ece_ack)
 			tp->prior_ssthresh = tcp_current_ssthresh(sk);
 		tcp_init_cwnd_reduction(sk);
@@ -2735,7 +2757,7 @@
 
 /* Undo during fast recovery after partial ACK. */
 static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-				 const int prior_unsacked)
+				 const int prior_unsacked, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2751,7 +2773,7 @@
 		 * mark more packets lost or retransmit more.
 		 */
 		if (tp->retrans_out) {
-			tcp_cwnd_reduction(sk, prior_unsacked, 0);
+			tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
 			return true;
 		}
 
@@ -2838,7 +2860,7 @@
 			if (tcp_is_reno(tp) && is_dupack)
 				tcp_add_reno_sack(sk);
 		} else {
-			if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+			if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
 				return;
 			/* Partial ACK arrived. Force fast retransmit. */
 			do_lost = tcp_is_reno(tp) ||
@@ -2851,9 +2873,10 @@
 		break;
 	case TCP_CA_Loss:
 		tcp_process_loss(sk, flag, is_dupack);
-		if (icsk->icsk_ca_state != TCP_CA_Open)
+		if (icsk->icsk_ca_state != TCP_CA_Open &&
+		    !(flag & FLAG_LOST_RETRANS))
 			return;
-		/* Fall through to processing in Open state. */
+		/* Change state if cwnd is undone or retransmits are lost */
 	default:
 		if (tcp_is_reno(tp)) {
 			if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2888,7 +2911,7 @@
 
 	if (do_lost)
 		tcp_update_scoreboard(sk, fast_rexmit);
-	tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+	tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
 	tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3325,6 +3348,9 @@
 			tp->pred_flags = 0;
 			tcp_fast_path_check(sk);
 
+			if (tcp_send_head(sk))
+				tcp_slow_start_after_idle_check(sk);
+
 			if (nwin > tp->max_window) {
 				tp->max_window = nwin;
 				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
@@ -3562,10 +3588,6 @@
 				    &sack_state);
 	acked -= tp->packets_out;
 
-	/* Advance cwnd if state allows */
-	if (tcp_may_raise_cwnd(sk, flag))
-		tcp_cong_avoid(sk, ack, acked);
-
 	if (tcp_ack_is_dubious(sk, flag)) {
 		is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
 		tcp_fastretrans_alert(sk, acked, prior_unsacked,
@@ -3574,6 +3596,10 @@
 	if (tp->tlp_high_seq)
 		tcp_process_tlp_ack(sk, ack, flag);
 
+	/* Advance cwnd if state allows */
+	if (tcp_may_raise_cwnd(sk, flag))
+		tcp_cong_avoid(sk, ack, acked);
+
 	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
 		struct dst_entry *dst = __sk_dst_get(sk);
 		if (dst)
@@ -3947,7 +3973,6 @@
 static void tcp_fin(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	const struct dst_entry *dst;
 
 	inet_csk_schedule_ack(sk);
 
@@ -3959,9 +3984,7 @@
 	case TCP_ESTABLISHED:
 		/* Move to CLOSE_WAIT */
 		tcp_set_state(sk, TCP_CLOSE_WAIT);
-		dst = __sk_dst_get(sk);
-		if (!dst || !dst_metric(dst, RTAX_QUICKACK))
-			inet_csk(sk)->icsk_ack.pingpong = 1;
+		inet_csk(sk)->icsk_ack.pingpong = 1;
 		break;
 
 	case TCP_CLOSE_WAIT:
@@ -5980,14 +6003,17 @@
 	const struct net *net = sock_net(listen_sk);
 	bool th_ecn = th->ece && th->cwr;
 	bool ect, ecn_ok;
+	u32 ecn_ok_dst;
 
 	if (!th_ecn)
 		return;
 
 	ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
-	ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
+	ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
+	ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst;
 
-	if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk))
+	if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
+	    (ecn_ok_dst & DST_FEATURE_ECN_CA))
 		inet_rsk(req)->ecn_ok = 1;
 }
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ea2e1c..93898e0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -222,7 +222,7 @@
 	if (err)
 		goto failure;
 
-	inet_set_txhash(sk);
+	sk_set_txhash(sk);
 
 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 			       inet->inet_sport, inet->inet_dport, sk);
@@ -1277,7 +1277,7 @@
 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
 	newinet->rcv_tos      = ip_hdr(skb)->tos;
 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
-	inet_set_txhash(newsk);
+	sk_set_txhash(newsk);
 	if (inet_opt)
 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 	newinet->inet_id = newtp->write_seq ^ jiffies;
@@ -1683,8 +1683,7 @@
 							iph->daddr, th->dest,
 							inet_iif(skb));
 		if (sk2) {
-			inet_twsk_deschedule(inet_twsk(sk));
-			inet_twsk_put(inet_twsk(sk));
+			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
 			goto process;
 		}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index a51d63a..c8cbc2b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -81,11 +81,7 @@
 static bool addr_same(const struct inetpeer_addr *a,
 		      const struct inetpeer_addr *b)
 {
-	if (a->family != b->family)
-		return false;
-	if (a->family == AF_INET)
-		return a->addr.a4 == b->addr.a4;
-	return ipv6_addr_equal(&a->addr.in6, &b->addr.in6);
+	return inetpeer_addr_cmp(a, b) == 0;
 }
 
 struct tcpm_hash_bucket {
@@ -247,14 +243,14 @@
 	daddr.family = req->rsk_ops->family;
 	switch (daddr.family) {
 	case AF_INET:
-		saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
-		daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
-		hash = (__force unsigned int) daddr.addr.a4;
+		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
+		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
+		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 		break;
 #if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
-		saddr.addr.in6 = inet_rsk(req)->ir_v6_loc_addr;
-		daddr.addr.in6 = inet_rsk(req)->ir_v6_rmt_addr;
+		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
+		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 		break;
 #endif
@@ -285,25 +281,19 @@
 	struct net *net;
 
 	if (tw->tw_family == AF_INET) {
-		saddr.family = AF_INET;
-		saddr.addr.a4 = tw->tw_rcv_saddr;
-		daddr.family = AF_INET;
-		daddr.addr.a4 = tw->tw_daddr;
-		hash = (__force unsigned int) daddr.addr.a4;
+		inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
+		inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
+		hash = ipv4_addr_hash(tw->tw_daddr);
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	else if (tw->tw_family == AF_INET6) {
 		if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
-			saddr.family = AF_INET;
-			saddr.addr.a4 = tw->tw_rcv_saddr;
-			daddr.family = AF_INET;
-			daddr.addr.a4 = tw->tw_daddr;
-			hash = (__force unsigned int) daddr.addr.a4;
+			inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
+			inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
+			hash = ipv4_addr_hash(tw->tw_daddr);
 		} else {
-			saddr.family = AF_INET6;
-			saddr.addr.in6 = tw->tw_v6_rcv_saddr;
-			daddr.family = AF_INET6;
-			daddr.addr.in6 = tw->tw_v6_daddr;
+			inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
+			inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
 			hash = ipv6_addr_hash(&tw->tw_v6_daddr);
 		}
 	}
@@ -335,25 +325,19 @@
 	struct net *net;
 
 	if (sk->sk_family == AF_INET) {
-		saddr.family = AF_INET;
-		saddr.addr.a4 = inet_sk(sk)->inet_saddr;
-		daddr.family = AF_INET;
-		daddr.addr.a4 = inet_sk(sk)->inet_daddr;
-		hash = (__force unsigned int) daddr.addr.a4;
+		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
+		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
+		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	else if (sk->sk_family == AF_INET6) {
 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
-			saddr.family = AF_INET;
-			saddr.addr.a4 = inet_sk(sk)->inet_saddr;
-			daddr.family = AF_INET;
-			daddr.addr.a4 = inet_sk(sk)->inet_daddr;
-			hash = (__force unsigned int) daddr.addr.a4;
+			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
+			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
+			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 		} else {
-			saddr.family = AF_INET6;
-			saddr.addr.in6 = sk->sk_v6_rcv_saddr;
-			daddr.family = AF_INET6;
-			daddr.addr.in6 = sk->sk_v6_daddr;
+			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
+			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 		}
 	}
@@ -461,7 +445,7 @@
 				tcp_metric_set(tm, TCP_METRIC_CWND,
 					       tp->snd_cwnd);
 		}
-	} else if (tp->snd_cwnd > tp->snd_ssthresh &&
+	} else if (!tcp_in_slow_start(tp) &&
 		   icsk->icsk_ca_state == TCP_CA_Open) {
 		/* Cong. avoidance phase, cwnd is reliable. */
 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
@@ -796,18 +780,18 @@
 	switch (tm->tcpm_daddr.family) {
 	case AF_INET:
 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
-				    tm->tcpm_daddr.addr.a4) < 0)
+				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 			goto nla_put_failure;
 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
-				    tm->tcpm_saddr.addr.a4) < 0)
+				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 			goto nla_put_failure;
 		break;
 	case AF_INET6:
 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
-				     &tm->tcpm_daddr.addr.in6) < 0)
+				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 			goto nla_put_failure;
 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
-				     &tm->tcpm_saddr.addr.in6) < 0)
+				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 			goto nla_put_failure;
 		break;
 	default:
@@ -956,20 +940,21 @@
 
 	a = info->attrs[v4];
 	if (a) {
-		addr->family = AF_INET;
-		addr->addr.a4 = nla_get_in_addr(a);
+		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 		if (hash)
-			*hash = (__force unsigned int) addr->addr.a4;
+			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 		return 0;
 	}
 	a = info->attrs[v6];
 	if (a) {
+		struct in6_addr in6;
+
 		if (nla_len(a) != sizeof(struct in6_addr))
 			return -EINVAL;
-		addr->family = AF_INET6;
-		addr->addr.in6 = nla_get_in6_addr(a);
+		in6 = nla_get_in6_addr(a);
+		inetpeer_set_addr_v6(addr, &in6);
 		if (hash)
-			*hash = ipv6_addr_hash(&addr->addr.in6);
+			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 		return 0;
 	}
 	return optional ? 1 : -EAFNOSUPPORT;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4bc00cb..6d8795b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -147,8 +147,7 @@
 		if (!th->fin ||
 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
 kill_with_rst:
-			inet_twsk_deschedule(tw);
-			inet_twsk_put(tw);
+			inet_twsk_deschedule_put(tw);
 			return TCP_TW_RST;
 		}
 
@@ -198,8 +197,7 @@
 			 */
 			if (sysctl_tcp_rfc1337 == 0) {
 kill:
-				inet_twsk_deschedule(tw);
-				inet_twsk_put(tw);
+				inet_twsk_deschedule_put(tw);
 				return TCP_TW_SUCCESS;
 			}
 		}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b1c218d..1188e4f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -137,12 +137,12 @@
 }
 
 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
- * This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
+ * This is the first part of cwnd validation mechanism.
+ */
+void tcp_cwnd_restart(struct sock *sk, s32 delta)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	s32 delta = tcp_time_stamp - tp->lsndtime;
-	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
+	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 	u32 cwnd = tp->snd_cwnd;
 
 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
@@ -163,20 +163,14 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	const u32 now = tcp_time_stamp;
-	const struct dst_entry *dst = __sk_dst_get(sk);
-
-	if (sysctl_tcp_slow_start_after_idle &&
-	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
-		tcp_cwnd_restart(sk, __sk_dst_get(sk));
 
 	tp->lsndtime = now;
 
 	/* If it is a reply for ato after last received
 	 * packet, enter pingpong mode.
 	 */
-	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
-	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
-			icsk->icsk_ack.pingpong = 1;
+	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+		icsk->icsk_ack.pingpong = 1;
 }
 
 /* Account for an ACK we sent. */
@@ -1776,7 +1770,7 @@
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		goto send_now;
 
-	if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
 		goto send_now;
 
 	/* Avoid bursty behavior by allowing defer
@@ -2151,7 +2145,7 @@
 		tcp_cwnd_validate(sk, is_cwnd_limited);
 		return false;
 	}
-	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
+	return !tp->packets_out && tcp_send_head(sk);
 }
 
 bool tcp_schedule_loss_probe(struct sock *sk)
@@ -2228,7 +2222,7 @@
 	return false;
 }
 
-/* When probe timeout (PTO) fires, send a new segment if one exists, else
+/* When probe timeout (PTO) fires, try send a new segment if possible, else
  * retransmit the last segment.
  */
 void tcp_send_loss_probe(struct sock *sk)
@@ -2237,11 +2231,19 @@
 	struct sk_buff *skb;
 	int pcount;
 	int mss = tcp_current_mss(sk);
-	int err = -1;
 
-	if (tcp_send_head(sk)) {
-		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
-		goto rearm_timer;
+	skb = tcp_send_head(sk);
+	if (skb) {
+		if (tcp_snd_wnd_test(tp, skb, mss)) {
+			pcount = tp->packets_out;
+			tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
+			if (tp->packets_out > pcount)
+				goto probe_sent;
+			goto rearm_timer;
+		}
+		skb = tcp_write_queue_prev(sk, skb);
+	} else {
+		skb = tcp_write_queue_tail(sk);
 	}
 
 	/* At most one outstanding TLP retransmission. */
@@ -2249,7 +2251,6 @@
 		goto rearm_timer;
 
 	/* Retransmit last segment. */
-	skb = tcp_write_queue_tail(sk);
 	if (WARN_ON(!skb))
 		goto rearm_timer;
 
@@ -2264,26 +2265,24 @@
 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
 					  GFP_ATOMIC)))
 			goto rearm_timer;
-		skb = tcp_write_queue_tail(sk);
+		skb = tcp_write_queue_next(sk, skb);
 	}
 
 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
 		goto rearm_timer;
 
-	err = __tcp_retransmit_skb(sk, skb);
+	if (__tcp_retransmit_skb(sk, skb))
+		goto rearm_timer;
 
 	/* Record snd_nxt for loss detection. */
-	if (likely(!err))
-		tp->tlp_high_seq = tp->snd_nxt;
+	tp->tlp_high_seq = tp->snd_nxt;
 
+probe_sent:
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+	/* Reset s.t. tcp_rearm_rto will restart timer from now */
+	inet_csk(sk)->icsk_pending = 0;
 rearm_timer:
-	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-				  inet_csk(sk)->icsk_rto,
-				  TCP_RTO_MAX);
-
-	if (likely(!err))
-		NET_INC_STATS_BH(sock_net(sk),
-				 LINUX_MIB_TCPLOSSPROBES);
+	tcp_rearm_rto(sk);
 }
 
 /* Push out any pending frames which were held back due to
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 333bcb2..bf5ea9e 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -22,7 +22,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else
 		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5b752f5..7149ebc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -649,4 +649,3 @@
 	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
 				  &tcp_keepalive_timer);
 }
-EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6cea1d..13951c4 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -225,7 +225,7 @@
 			 */
 			diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
-			if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+			if (diff > gamma && tcp_in_slow_start(tp)) {
 				/* Going too fast. Time to slow down
 				 * and switch to congestion avoidance.
 				 */
@@ -240,7 +240,7 @@
 				tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
 				tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
-			} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+			} else if (tcp_in_slow_start(tp)) {
 				/* Slow start.  */
 				tcp_slow_start(tp, acked);
 			} else {
@@ -281,7 +281,7 @@
 		vegas->minRTT = 0x7fffffff;
 	}
 	/* Use normal slow start */
-	else if (tp->snd_cwnd <= tp->snd_ssthresh)
+	else if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 }
 
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 112151e..0d094b9 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -150,7 +150,7 @@
 
 		veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
 
-		if (tp->snd_cwnd <= tp->snd_ssthresh) {
+		if (tcp_in_slow_start(tp)) {
 			/* Slow start.  */
 			tcp_slow_start(tp, acked);
 		} else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b8c5ba..c0a15e7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1013,11 +1013,31 @@
 
 	if (!rt) {
 		struct net *net = sock_net(sk);
+		__u8 flow_flags = inet_sk_flowi_flags(sk);
 
 		fl4 = &fl4_stack;
+
+		/* unconnected socket. If output device is enslaved to a VRF
+		 * device lookup source address from VRF table. This mimics
+		 * behavior of ip_route_connect{_init}.
+		 */
+		if (netif_index_is_vrf(net, ipc.oif)) {
+			flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
+					   RT_SCOPE_UNIVERSE, sk->sk_protocol,
+					   (flow_flags | FLOWI_FLAG_VRFSRC),
+					   faddr, saddr, dport,
+					   inet->inet_sport);
+
+			rt = ip_route_output_flow(net, fl4, sk);
+			if (!IS_ERR(rt)) {
+				saddr = fl4->saddr;
+				ip_rt_put(rt);
+			}
+		}
+
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-				   inet_sk_flowi_flags(sk),
+				   flow_flags,
 				   faddr, saddr, dport, inet->inet_sport);
 
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 933ea90..aba4286 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -4,9 +4,10 @@
 #include <linux/udp.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <net/dst_metadata.h>
+#include <net/net_namespace.h>
 #include <net/udp.h>
 #include <net/udp_tunnel.h>
-#include <net/net_namespace.h>
 
 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
 		     struct socket **sockp)
@@ -103,4 +104,26 @@
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
 
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb,  unsigned short family,
+				    __be16 flags, __be64 tunnel_id, int md_size)
+{
+	struct metadata_dst *tun_dst;
+	struct ip_tunnel_info *info;
+
+	if (family == AF_INET)
+		tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
+	else
+		tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
+	if (!tun_dst)
+		return NULL;
+
+	info = &tun_dst->u.tun_info;
+	info->key.tp_src = udp_hdr(skb)->source;
+	info->key.tp_dst = udp_hdr(skb)->dest;
+	if (udp_hdr(skb)->check)
+		info->key.tun_flags |= TUNNEL_CSUM;
+	return tun_dst;
+}
+EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
+
 MODULE_LICENSE("GPL");
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index bff6974..bb919b2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,11 +15,12 @@
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <net/vrf.h>
 
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
-					    int tos,
+					    int tos, int oif,
 					    const xfrm_address_t *saddr,
 					    const xfrm_address_t *daddr)
 {
@@ -28,6 +29,7 @@
 	memset(fl4, 0, sizeof(*fl4));
 	fl4->daddr = daddr->a4;
 	fl4->flowi4_tos = tos;
+	fl4->flowi4_oif = oif;
 	if (saddr)
 		fl4->saddr = saddr->a4;
 
@@ -38,22 +40,22 @@
 	return ERR_CAST(rt);
 }
 
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
 					  const xfrm_address_t *daddr)
 {
 	struct flowi4 fl4;
 
-	return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+	return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
 }
 
-static int xfrm4_get_saddr(struct net *net,
+static int xfrm4_get_saddr(struct net *net, int oif,
 			   xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
 	struct dst_entry *dst;
 	struct flowi4 fl4;
 
-	dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+	dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
@@ -106,8 +108,10 @@
 	struct flowi4 *fl4 = &fl->u.ip4;
 	int oif = 0;
 
-	if (skb_dst(skb))
-		oif = skb_dst(skb)->dev->ifindex;
+	if (skb_dst(skb)) {
+		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
+			: skb_dst(skb)->dev->ifindex;
+	}
 
 	memset(fl4, 0, sizeof(struct flowi4));
 	fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 438a73a..983bb99 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -5,16 +5,15 @@
 #   IPv6 as module will cause a CRASH if you try to unload it
 menuconfig IPV6
 	tristate "The IPv6 protocol"
-	default m
+	default y
 	---help---
-	  This is complemental support for the IP version 6.
-	  You will still be able to do traditional IPv4 networking as well.
+	  Support for IP version 6 (IPv6).
 
 	  For general information about IPv6, see
 	  <https://en.wikipedia.org/wiki/IPv6>.
-	  For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
-	  For specific information about IPv6 under Linux, read the HOWTO at
-	  <http://www.bieringer.de/linux/IPv6/>.
+	  For specific information about IPv6 under Linux, see
+	  Documentation/networking/ipv6.txt and read the HOWTO at
+	  <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
 
 	  To compile this protocol support as a module, choose M here: the 
 	  module will be called ipv6.
@@ -93,6 +92,25 @@
 
 	  If unsure, say N.
 
+config IPV6_ILA
+	tristate "IPv6: Identifier Locator Addressing (ILA)"
+	select LWTUNNEL
+	---help---
+	  Support for IPv6 Identifier Locator Addressing (ILA).
+
+	  ILA is a mechanism to do network virtualization without
+	  encapsulation. The basic concept of ILA is that we split an
+	  IPv6 address into a 64 bit locator and 64 bit identifier. The
+	  identifier is the identity of an entity in communication
+	  ("who") and the locator expresses the location of the
+	  entity ("where").
+
+	  ILA can be configured using the "encap ila" option with
+	  "ip -6 route" command. ILA is described in
+	  https://tools.ietf.org/html/draft-herbert-nvo3-ila-00.
+
+	  If unsure, say N.
+
 config INET6_XFRM_TUNNEL
 	tristate
 	select INET6_TUNNEL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 0f3f199..2c900c7 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
+obj-$(CONFIG_IPV6_ILA) += ila.o
 obj-$(CONFIG_NETFILTER)	+= netfilter/
 
 obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 21c2c81..99c0f2b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_from_local	= 0,
+	.accept_ra_min_hop_limit= 1,
 	.accept_ra_pinfo	= 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	.accept_ra_rtr_pref	= 1,
@@ -211,7 +212,9 @@
 	.accept_ra_mtu		= 1,
 	.stable_secret		= {
 		.initialized = false,
-	}
+	},
+	.use_oif_addrs_only	= 0,
+	.ignore_routes_with_linkdown = 0,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -236,6 +239,7 @@
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_from_local	= 0,
+	.accept_ra_min_hop_limit= 1,
 	.accept_ra_pinfo	= 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	.accept_ra_rtr_pref	= 1,
@@ -253,6 +257,8 @@
 	.stable_secret		= {
 		.initialized = false,
 	},
+	.use_oif_addrs_only	= 0,
+	.ignore_routes_with_linkdown = 0,
 };
 
 /* Check if a valid qdisc is available */
@@ -468,6 +474,9 @@
 	if (type == -1 || type == NETCONFA_PROXY_NEIGH)
 		size += nla_total_size(4);
 
+	if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
+		size += nla_total_size(4);
+
 	return size;
 }
 
@@ -504,6 +513,11 @@
 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
 		goto nla_put_failure;
 
+	if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
+	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+			devconf->ignore_routes_with_linkdown) < 0)
+		goto nla_put_failure;
+
 	nlmsg_end(skb, nlh);
 	return 0;
 
@@ -540,6 +554,7 @@
 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
+	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
 };
 
 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@ -762,6 +777,63 @@
 		rt6_purge_dflt_routers(net);
 	return 1;
 }
+
+static void addrconf_linkdown_change(struct net *net, __s32 newf)
+{
+	struct net_device *dev;
+	struct inet6_dev *idev;
+
+	for_each_netdev(net, dev) {
+		idev = __in6_dev_get(dev);
+		if (idev) {
+			int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
+
+			idev->cnf.ignore_routes_with_linkdown = newf;
+			if (changed)
+				inet6_netconf_notify_devconf(dev_net(dev),
+							     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+							     dev->ifindex,
+							     &idev->cnf);
+		}
+	}
+}
+
+static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
+{
+	struct net *net;
+	int old;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	net = (struct net *)table->extra2;
+	old = *p;
+	*p = newf;
+
+	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
+		if ((!newf) ^ (!old))
+			inet6_netconf_notify_devconf(net,
+						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+						     NETCONFA_IFINDEX_DEFAULT,
+						     net->ipv6.devconf_dflt);
+		rtnl_unlock();
+		return 0;
+	}
+
+	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
+		net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+		addrconf_linkdown_change(net, newf);
+		if ((!newf) ^ (!old))
+			inet6_netconf_notify_devconf(net,
+						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+						     NETCONFA_IFINDEX_ALL,
+						     net->ipv6.devconf_all);
+	}
+	rtnl_unlock();
+
+	return 1;
+}
+
 #endif
 
 /* Nobody refers to this ifaddr, destroy it */
@@ -1358,15 +1430,96 @@
 	return ret;
 }
 
+static int __ipv6_dev_get_saddr(struct net *net,
+				struct ipv6_saddr_dst *dst,
+				struct inet6_dev *idev,
+				struct ipv6_saddr_score *scores,
+				int hiscore_idx)
+{
+	struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
+
+	read_lock_bh(&idev->lock);
+	list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
+		int i;
+
+		/*
+		 * - Tentative Address (RFC2462 section 5.4)
+		 *  - A tentative address is not considered
+		 *    "assigned to an interface" in the traditional
+		 *    sense, unless it is also flagged as optimistic.
+		 * - Candidate Source Address (section 4)
+		 *  - In any case, anycast addresses, multicast
+		 *    addresses, and the unspecified address MUST
+		 *    NOT be included in a candidate set.
+		 */
+		if ((score->ifa->flags & IFA_F_TENTATIVE) &&
+		    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
+			continue;
+
+		score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+
+		if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
+			     score->addr_type & IPV6_ADDR_MULTICAST)) {
+			net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+					    idev->dev->name);
+			continue;
+		}
+
+		score->rule = -1;
+		bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+
+		for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
+			int minihiscore, miniscore;
+
+			minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
+			miniscore = ipv6_get_saddr_eval(net, score, dst, i);
+
+			if (minihiscore > miniscore) {
+				if (i == IPV6_SADDR_RULE_SCOPE &&
+				    score->scopedist > 0) {
+					/*
+					 * special case:
+					 * each remaining entry
+					 * has too small (not enough)
+					 * scope, because ifa entries
+					 * are sorted by their scope
+					 * values.
+					 */
+					goto out;
+				}
+				break;
+			} else if (minihiscore < miniscore) {
+				if (hiscore->ifa)
+					in6_ifa_put(hiscore->ifa);
+
+				in6_ifa_hold(score->ifa);
+
+				swap(hiscore, score);
+				hiscore_idx = 1 - hiscore_idx;
+
+				/* restore our iterator */
+				score->ifa = hiscore->ifa;
+
+				break;
+			}
+		}
+	}
+out:
+	read_unlock_bh(&idev->lock);
+	return hiscore_idx;
+}
+
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
 		       const struct in6_addr *daddr, unsigned int prefs,
 		       struct in6_addr *saddr)
 {
-	struct ipv6_saddr_score scores[2],
-				*score = &scores[0], *hiscore = &scores[1];
+	struct ipv6_saddr_score scores[2], *hiscore;
 	struct ipv6_saddr_dst dst;
+	struct inet6_dev *idev;
 	struct net_device *dev;
 	int dst_type;
+	bool use_oif_addr = false;
+	int hiscore_idx = 0;
 
 	dst_type = __ipv6_addr_type(daddr);
 	dst.addr = daddr;
@@ -1375,105 +1528,50 @@
 	dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
 	dst.prefs = prefs;
 
-	hiscore->rule = -1;
-	hiscore->ifa = NULL;
+	scores[hiscore_idx].rule = -1;
+	scores[hiscore_idx].ifa = NULL;
 
 	rcu_read_lock();
 
-	for_each_netdev_rcu(net, dev) {
-		struct inet6_dev *idev;
-
-		/* Candidate Source Address (section 4)
-		 *  - multicast and link-local destination address,
-		 *    the set of candidate source address MUST only
-		 *    include addresses assigned to interfaces
-		 *    belonging to the same link as the outgoing
-		 *    interface.
-		 * (- For site-local destination addresses, the
-		 *    set of candidate source addresses MUST only
-		 *    include addresses assigned to interfaces
-		 *    belonging to the same site as the outgoing
-		 *    interface.)
-		 */
-		if (((dst_type & IPV6_ADDR_MULTICAST) ||
-		     dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
-		    dst.ifindex && dev->ifindex != dst.ifindex)
-			continue;
-
-		idev = __in6_dev_get(dev);
-		if (!idev)
-			continue;
-
-		read_lock_bh(&idev->lock);
-		list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
-			int i;
-
-			/*
-			 * - Tentative Address (RFC2462 section 5.4)
-			 *  - A tentative address is not considered
-			 *    "assigned to an interface" in the traditional
-			 *    sense, unless it is also flagged as optimistic.
-			 * - Candidate Source Address (section 4)
-			 *  - In any case, anycast addresses, multicast
-			 *    addresses, and the unspecified address MUST
-			 *    NOT be included in a candidate set.
-			 */
-			if ((score->ifa->flags & IFA_F_TENTATIVE) &&
-			    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
-				continue;
-
-			score->addr_type = __ipv6_addr_type(&score->ifa->addr);
-
-			if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
-				     score->addr_type & IPV6_ADDR_MULTICAST)) {
-				net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
-						    dev->name);
-				continue;
-			}
-
-			score->rule = -1;
-			bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
-
-			for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
-				int minihiscore, miniscore;
-
-				minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
-				miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
-
-				if (minihiscore > miniscore) {
-					if (i == IPV6_SADDR_RULE_SCOPE &&
-					    score->scopedist > 0) {
-						/*
-						 * special case:
-						 * each remaining entry
-						 * has too small (not enough)
-						 * scope, because ifa entries
-						 * are sorted by their scope
-						 * values.
-						 */
-						goto try_nextdev;
-					}
-					break;
-				} else if (minihiscore < miniscore) {
-					if (hiscore->ifa)
-						in6_ifa_put(hiscore->ifa);
-
-					in6_ifa_hold(score->ifa);
-
-					swap(hiscore, score);
-
-					/* restore our iterator */
-					score->ifa = hiscore->ifa;
-
-					break;
-				}
-			}
+	/* Candidate Source Address (section 4)
+	 *  - multicast and link-local destination address,
+	 *    the set of candidate source address MUST only
+	 *    include addresses assigned to interfaces
+	 *    belonging to the same link as the outgoing
+	 *    interface.
+	 * (- For site-local destination addresses, the
+	 *    set of candidate source addresses MUST only
+	 *    include addresses assigned to interfaces
+	 *    belonging to the same site as the outgoing
+	 *    interface.)
+	 *  - "It is RECOMMENDED that the candidate source addresses
+	 *    be the set of unicast addresses assigned to the
+	 *    interface that will be used to send to the destination
+	 *    (the 'outgoing' interface)." (RFC 6724)
+	 */
+	if (dst_dev) {
+		idev = __in6_dev_get(dst_dev);
+		if ((dst_type & IPV6_ADDR_MULTICAST) ||
+		    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+		    (idev && idev->cnf.use_oif_addrs_only)) {
+			use_oif_addr = true;
 		}
-try_nextdev:
-		read_unlock_bh(&idev->lock);
+	}
+
+	if (use_oif_addr) {
+		if (idev)
+			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+	} else {
+		for_each_netdev_rcu(net, dev) {
+			idev = __in6_dev_get(dev);
+			if (!idev)
+				continue;
+			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+		}
 	}
 	rcu_read_unlock();
 
+	hiscore = &scores[hiscore_idx];
 	if (!hiscore->ifa)
 		return -EADDRNOTAVAIL;
 
@@ -3558,7 +3656,7 @@
 
 	/* send a neighbour solicitation for our addr */
 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
-	ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+	ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
 out:
 	in6_ifa_put(ifp);
 	rtnl_unlock();
@@ -4560,6 +4658,7 @@
 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -4585,7 +4684,9 @@
 	array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
 	array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
 	array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
+	array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
 	/* we omit DEVCONF_STABLE_SECRET for now */
+	array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4605,6 +4706,7 @@
 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
 	       + nla_total_size(4) /* IFLA_MTU */
 	       + nla_total_size(4) /* IFLA_LINK */
+	       + nla_total_size(1) /* IFLA_OPERSTATE */
 	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
 }
 
@@ -4624,18 +4726,24 @@
 }
 
 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
-				      int items, int bytes, size_t syncpoff)
+					int bytes, size_t syncpoff)
 {
-	int i;
-	int pad = bytes - sizeof(u64) * items;
+	int i, c;
+	u64 buff[IPSTATS_MIB_MAX];
+	int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
+
 	BUG_ON(pad < 0);
 
-	/* Use put_unaligned() because stats may not be aligned for u64. */
-	put_unaligned(items, &stats[0]);
-	for (i = 1; i < items; i++)
-		put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
+	memset(buff, 0, sizeof(buff));
+	buff[0] = IPSTATS_MIB_MAX;
 
-	memset(&stats[items], 0, pad);
+	for_each_possible_cpu(c) {
+		for (i = 1; i < IPSTATS_MIB_MAX; i++)
+			buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
+	}
+
+	memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
+	memset(&stats[IPSTATS_MIB_MAX], 0, pad);
 }
 
 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
@@ -4643,8 +4751,8 @@
 {
 	switch (attrtype) {
 	case IFLA_INET6_STATS:
-		__snmp6_fill_stats64(stats, idev->stats.ipv6,
-				     IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
+		__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
+				     offsetof(struct ipstats_mib, syncp));
 		break;
 	case IFLA_INET6_ICMP6STATS:
 		__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
@@ -4861,7 +4969,9 @@
 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
 	    (dev->ifindex != dev_get_iflink(dev) &&
-	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
+	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE,
+		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
 		goto nla_put_failure;
 	protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
 	if (!protoinfo)
@@ -5306,6 +5416,34 @@
 	return err;
 }
 
+static
+int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+						int write,
+						void __user *buffer,
+						size_t *lenp,
+						loff_t *ppos)
+{
+	int *valp = ctl->data;
+	int val = *valp;
+	loff_t pos = *ppos;
+	struct ctl_table lctl;
+	int ret;
+
+	/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
+	 * we should not modify it until we get the rtnl lock.
+	 */
+	lctl = *ctl;
+	lctl.data = &val;
+
+	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
+
+	if (write)
+		ret = addrconf_fixup_linkdown(ctl, valp, val);
+	if (ret)
+		*ppos = pos;
+	return ret;
+}
+
 static struct addrconf_sysctl_table
 {
 	struct ctl_table_header *sysctl_header;
@@ -5456,6 +5594,13 @@
 			.proc_handler	= proc_dointvec,
 		},
 		{
+			.procname	= "accept_ra_min_hop_limit",
+			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
+		},
+		{
 			.procname	= "accept_ra_pinfo",
 			.data		= &ipv6_devconf.accept_ra_pinfo,
 			.maxlen		= sizeof(int),
@@ -5585,6 +5730,20 @@
 			.proc_handler	= addrconf_sysctl_stable_secret,
 		},
 		{
+			.procname       = "use_oif_addrs_only",
+			.data           = &ipv6_devconf.use_oif_addrs_only,
+			.maxlen         = sizeof(int),
+			.mode           = 0644,
+			.proc_handler   = proc_dointvec,
+		},
+		{
+			.procname	= "ignore_routes_with_linkdown",
+			.data		= &ipv6_devconf.ignore_routes_with_linkdown,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
+		},
+		{
 			/* sentinel */
 		}
 	},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index ca09bf4..bfa941f 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -107,7 +107,16 @@
 }
 EXPORT_SYMBOL(inet6addr_notifier_call_chain);
 
-const struct ipv6_stub *ipv6_stub __read_mostly;
+static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+					struct dst_entry **u2,
+					struct flowi6 *u3)
+{
+	return -EAFNOSUPPORT;
+}
+
+const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+	.ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+};
 EXPORT_SYMBOL_GPL(ipv6_stub);
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7de52b6..44bb66b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -197,6 +197,7 @@
 	np->mcast_hops	= IPV6_DEFAULT_MCASTHOPS;
 	np->mc_loop	= 1;
 	np->pmtudisc	= IPV6_PMTUDISC_WANT;
+	np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
 	sk->sk_ipv6only	= net->ipv6.sysctl.bindv6only;
 
 	/* Init the ipv4 part of the socket since we can have sockets
@@ -342,7 +343,8 @@
 			 */
 			v4addr = LOOPBACK4_IPV6;
 			if (!(addr_type & IPV6_ADDR_MULTICAST))	{
-				if (!(inet->freebind || inet->transparent) &&
+				if (!net->ipv6.sysctl.ip_nonlocal_bind &&
+				    !(inet->freebind || inet->transparent) &&
 				    !ipv6_chk_addr(net, &addr->sin6_addr,
 						   dev, 0)) {
 					err = -EADDRNOTAVAIL;
@@ -679,8 +681,8 @@
 	const struct ipv6_pinfo *np = inet6_sk(sk);
 
 	if (np->rxopt.all) {
-		if ((opt->hop && (np->rxopt.bits.hopopts ||
-				  np->rxopt.bits.ohopopts)) ||
+		if (((opt->flags & IP6SKB_HOPBYHOP) &&
+		     (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
 		    (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
 		     np->rxopt.bits.rxflow) ||
 		    (opt->srcrt && (np->rxopt.bits.srcrt ||
@@ -766,10 +768,10 @@
 	net->ipv6.sysctl.bindv6only = 0;
 	net->ipv6.sysctl.icmpv6_time = 1*HZ;
 	net->ipv6.sysctl.flowlabel_consistency = 1;
-	net->ipv6.sysctl.auto_flowlabels = 0;
+	net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
 	net->ipv6.sysctl.idgen_retries = 3;
 	net->ipv6.sysctl.idgen_delay = 1 * HZ;
-	net->ipv6.sysctl.flowlabel_state_ranges = 1;
+	net->ipv6.sysctl.flowlabel_state_ranges = 0;
 	atomic_set(&net->ipv6.fib6_sernum, 1);
 
 	err = ipv6_init_mibs(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ed7d4e3f..0630a4d5 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -577,8 +577,10 @@
 
 	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
 				ahp->icv_trunc_len + seqhi_len);
-	if (!work_iph)
+	if (!work_iph) {
+		err = -ENOMEM;
 		goto out;
+	}
 
 	auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
 	seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b10a889..9aadd57 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -199,7 +199,7 @@
 		      NULL);
 
 	sk->sk_state = TCP_ESTABLISHED;
-	ip6_set_txhash(sk);
+	sk_set_txhash(sk);
 out:
 	fl6_sock_release(flowlabel);
 	return err;
@@ -568,8 +568,8 @@
 	}
 
 	/* HbH is allowed only once */
-	if (np->rxopt.bits.hopopts && opt->hop) {
-		u8 *ptr = nh + opt->hop;
+	if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+		u8 *ptr = nh + sizeof(struct ipv6hdr);
 		put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
 	}
 
@@ -630,8 +630,8 @@
 		int hlim = ipv6_hdr(skb)->hop_limit;
 		put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
 	}
-	if (np->rxopt.bits.ohopopts && opt->hop) {
-		u8 *ptr = nh + opt->hop;
+	if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+		u8 *ptr = nh + sizeof(struct ipv6hdr);
 		put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
 	}
 	if (np->rxopt.bits.odstopts && opt->dst0) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index a7bbbe4..ce203b0 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -632,7 +632,7 @@
 		return -1;
 	}
 
-	opt->hop = sizeof(struct ipv6hdr);
+	opt->flags |= IP6SKB_HOPBYHOP;
 	if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
 		skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
 		opt = IP6CB(skb);
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
index 447a7fb..f5e2ba1 100644
--- a/net/ipv6/exthdrs_offload.c
+++ b/net/ipv6/exthdrs_offload.c
@@ -36,6 +36,6 @@
 	return ret;
 
 out_rt:
-	inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+	inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
 	goto out;
 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 713d743..6c2b213 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -329,7 +329,7 @@
 	struct flowi6 fl2;
 	int err;
 
-	err = ip6_dst_lookup(sk, &dst, fl6);
+	err = ip6_dst_lookup(net, sk, &dst, fl6);
 	if (err)
 		return ERR_PTR(err);
 
@@ -361,7 +361,7 @@
 	if (err)
 		goto relookup_failed;
 
-	err = ip6_dst_lookup(sk, &dst2, &fl2);
+	err = ip6_dst_lookup(net, sk, &dst2, &fl2);
 	if (err)
 		goto relookup_failed;
 
@@ -591,7 +591,7 @@
 	else if (!fl6.flowi6_oif)
 		fl6.flowi6_oif = np->ucast_oif;
 
-	err = ip6_dst_lookup(sk, &dst, &fl6);
+	err = ip6_dst_lookup(net, sk, &dst, &fl6);
 	if (err)
 		goto out;
 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
new file mode 100644
index 0000000..678d2df
--- /dev/null
+++ b/net/ipv6/ila.c
@@ -0,0 +1,229 @@
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+#include <net/lwtunnel.h>
+#include <net/protocol.h>
+#include <uapi/linux/ila.h>
+
+struct ila_params {
+	__be64 locator;
+	__be64 locator_match;
+	__wsum csum_diff;
+};
+
+static inline struct ila_params *ila_params_lwtunnel(
+	struct lwtunnel_state *lwstate)
+{
+	return (struct ila_params *)lwstate->data;
+}
+
+static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
+{
+	__be32 diff[] = {
+		~from[0], ~from[1], to[0], to[1],
+	};
+
+	return csum_partial(diff, sizeof(diff), 0);
+}
+
+static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
+{
+	if (*(__be64 *)&ip6h->daddr == p->locator_match)
+		return p->csum_diff;
+	else
+		return compute_csum_diff8((__be32 *)&ip6h->daddr,
+					  (__be32 *)&p->locator);
+}
+
+static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+	__wsum diff;
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	size_t nhoff = sizeof(struct ipv6hdr);
+
+	/* First update checksum */
+	switch (ip6h->nexthdr) {
+	case NEXTHDR_TCP:
+		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
+			struct tcphdr *th = (struct tcphdr *)
+					(skb_network_header(skb) + nhoff);
+
+			diff = get_csum_diff(ip6h, p);
+			inet_proto_csum_replace_by_diff(&th->check, skb,
+							diff, true);
+		}
+		break;
+	case NEXTHDR_UDP:
+		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
+			struct udphdr *uh = (struct udphdr *)
+					(skb_network_header(skb) + nhoff);
+
+			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+				diff = get_csum_diff(ip6h, p);
+				inet_proto_csum_replace_by_diff(&uh->check, skb,
+								diff, true);
+				if (!uh->check)
+					uh->check = CSUM_MANGLED_0;
+			}
+		}
+		break;
+	case NEXTHDR_ICMP:
+		if (likely(pskb_may_pull(skb,
+					 nhoff + sizeof(struct icmp6hdr)))) {
+			struct icmp6hdr *ih = (struct icmp6hdr *)
+					(skb_network_header(skb) + nhoff);
+
+			diff = get_csum_diff(ip6h, p);
+			inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+							diff, true);
+		}
+		break;
+	}
+
+	/* Now change destination address */
+	*(__be64 *)&ip6h->daddr = p->locator;
+}
+
+static int ila_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	if (skb->protocol != htons(ETH_P_IPV6))
+		goto drop;
+
+	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+	return dst->lwtstate->orig_output(sk, skb);
+
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+static int ila_input(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	if (skb->protocol != htons(ETH_P_IPV6))
+		goto drop;
+
+	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+	return dst->lwtstate->orig_input(skb);
+
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+	[ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+};
+
+static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+			   unsigned int family, const void *cfg,
+			   struct lwtunnel_state **ts)
+{
+	struct ila_params *p;
+	struct nlattr *tb[ILA_ATTR_MAX + 1];
+	size_t encap_len = sizeof(*p);
+	struct lwtunnel_state *newts;
+	const struct fib6_config *cfg6 = cfg;
+	int ret;
+
+	if (family != AF_INET6)
+		return -EINVAL;
+
+	ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
+			       ila_nl_policy);
+	if (ret < 0)
+		return ret;
+
+	if (!tb[ILA_ATTR_LOCATOR])
+		return -EINVAL;
+
+	newts = lwtunnel_state_alloc(encap_len);
+	if (!newts)
+		return -ENOMEM;
+
+	newts->len = encap_len;
+	p = ila_params_lwtunnel(newts);
+
+	p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+
+	if (cfg6->fc_dst_len > sizeof(__be64)) {
+		/* Precompute checksum difference for translation since we
+		 * know both the old locator and the new one.
+		 */
+		p->locator_match = *(__be64 *)&cfg6->fc_dst;
+		p->csum_diff = compute_csum_diff8(
+			(__be32 *)&p->locator_match, (__be32 *)&p->locator);
+	}
+
+	newts->type = LWTUNNEL_ENCAP_ILA;
+	newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
+			LWTUNNEL_STATE_INPUT_REDIRECT;
+
+	*ts = newts;
+
+	return 0;
+}
+
+static int ila_fill_encap_info(struct sk_buff *skb,
+			       struct lwtunnel_state *lwtstate)
+{
+	struct ila_params *p = ila_params_lwtunnel(lwtstate);
+
+	if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	/* No encapsulation overhead */
+	return 0;
+}
+
+static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	struct ila_params *a_p = ila_params_lwtunnel(a);
+	struct ila_params *b_p = ila_params_lwtunnel(b);
+
+	return (a_p->locator != b_p->locator);
+}
+
+static const struct lwtunnel_encap_ops ila_encap_ops = {
+	.build_state = ila_build_state,
+	.output = ila_output,
+	.input = ila_input,
+	.fill_encap = ila_fill_encap_info,
+	.get_encap_size = ila_encap_nlsize,
+	.cmp_encap = ila_encap_cmp,
+};
+
+static int __init ila_init(void)
+{
+	return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+static void __exit ila_fini(void)
+{
+	lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b4fd96d..6ac8dad 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -207,7 +207,6 @@
 	struct sock *sk2;
 	const struct hlist_nulls_node *node;
 	struct inet_timewait_sock *tw = NULL;
-	int twrefcnt = 0;
 
 	spin_lock(lock);
 
@@ -234,21 +233,17 @@
 	WARN_ON(!sk_unhashed(sk));
 	__sk_nulls_add_node_rcu(sk, &head->chain);
 	if (tw) {
-		twrefcnt = inet_twsk_unhash(tw);
+		sk_nulls_del_node_init_rcu((struct sock *)tw);
 		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
 	}
 	spin_unlock(lock);
-	if (twrefcnt)
-		inet_twsk_put(tw);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
 	if (twp) {
 		*twp = tw;
 	} else if (tw) {
 		/* Silly. Should hash-dance instead... */
-		inet_twsk_deschedule(tw);
-
-		inet_twsk_put(tw);
+		inet_twsk_deschedule_put(tw);
 	}
 	return 0;
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 548c623..418d982 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -32,6 +32,7 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
+#include <net/lwtunnel.h>
 
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 69f4f68..4038c69 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -729,7 +729,7 @@
 	 */
 	ipv6h = ipv6_hdr(skb);
 	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-		     ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
 	ipv6h->hop_limit = tunnel->parms.hop_limit;
 	ipv6h->nexthdr = proto;
 	ipv6h->saddr = fl6->saddr;
@@ -1183,7 +1183,8 @@
 
 	ip6_flow_hdr(ipv6h, 0,
 		     ip6_make_flowlabel(dev_net(dev), skb,
-					t->fl.u.ip6.flowlabel, false));
+					t->fl.u.ip6.flowlabel, true,
+					&t->fl.u.ip6));
 	ipv6h->hop_limit = t->parms.hop_limit;
 	ipv6h->nexthdr = NEXTHDR_GRE;
 	ipv6h->saddr = t->parms.laddr;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 57990c9..adba03a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -45,6 +45,7 @@
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
 
 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
@@ -55,7 +56,7 @@
 		if (ipprot && ipprot->early_demux)
 			ipprot->early_demux(skb);
 	}
-	if (!skb_dst(skb))
+	if (!skb_valid_dst(skb))
 		ip6_route_input(skb);
 
 	return dst_input(skb);
@@ -98,7 +99,7 @@
 	 * arrived via the sending interface (ethX), because of the
 	 * nature of scoping architecture. --yoshfuji
 	 */
-	IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+	IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
 
 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
 		goto err;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d5f7716..26ea479 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -207,7 +207,7 @@
 		hlimit = ip6_dst_hoplimit(dst);
 
 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-						     np->autoflowlabel));
+						     np->autoflowlabel, fl6));
 
 	hdr->payload_len = htons(seg_len);
 	hdr->nexthdr = proto;
@@ -881,10 +881,9 @@
 	return dst;
 }
 
-static int ip6_dst_lookup_tail(struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
 			       struct dst_entry **dst, struct flowi6 *fl6)
 {
-	struct net *net = sock_net(sk);
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	struct neighbour *n;
 	struct rt6_info *rt;
@@ -994,10 +993,11 @@
  *
  *	It returns zero on success, or a standard errno code on error.
  */
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+		   struct flowi6 *fl6)
 {
 	*dst = NULL;
-	return ip6_dst_lookup_tail(sk, dst, fl6);
+	return ip6_dst_lookup_tail(net, sk, dst, fl6);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
@@ -1018,11 +1018,13 @@
 	struct dst_entry *dst = NULL;
 	int err;
 
-	err = ip6_dst_lookup_tail(sk, &dst, fl6);
+	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
 	if (err)
 		return ERR_PTR(err);
 	if (final_dst)
 		fl6->daddr = *final_dst;
+	if (!fl6->flowi6_oif)
+		fl6->flowi6_oif = dst->dev->ifindex;
 
 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
@@ -1050,7 +1052,7 @@
 
 	dst = ip6_sk_dst_check(sk, dst, fl6);
 
-	err = ip6_dst_lookup_tail(sk, &dst, fl6);
+	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
 	if (err)
 		return ERR_PTR(err);
 	if (final_dst)
@@ -1647,7 +1649,7 @@
 
 	ip6_flow_hdr(hdr, v6_cork->tclass,
 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-					np->autoflowlabel));
+					np->autoflowlabel, fl6));
 	hdr->hop_limit = v6_cork->hop_limit;
 	hdr->nexthdr = proto;
 	hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2e67b66..b0ab420 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1095,7 +1095,7 @@
 	skb_reset_network_header(skb);
 	ipv6h = ipv6_hdr(skb);
 	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-		     ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
 	ipv6h->hop_limit = t->parms.hop_limit;
 	ipv6h->nexthdr = proto;
 	ipv6h->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index e1a1136..14dacf1 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -23,6 +23,15 @@
 	if (err < 0)
 		goto error;
 
+	if (cfg->ipv6_v6only) {
+		int val = 1;
+
+		err = kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+					(char *) &val, sizeof(val));
+		if (err < 0)
+			goto error;
+	}
+
 	udp6_addr.sin6_family = AF_INET6;
 	memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
 	       sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c53331c..64a7135 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -553,7 +553,8 @@
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
 		   const struct in6_addr *solicit,
-		   const struct in6_addr *daddr, const struct in6_addr *saddr)
+		   const struct in6_addr *daddr, const struct in6_addr *saddr,
+		   struct sk_buff *oskb)
 {
 	struct sk_buff *skb;
 	struct in6_addr addr_buf;
@@ -589,6 +590,9 @@
 		ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
 				       dev->dev_addr);
 
+	if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+		skb_dst_copy(skb, oskb);
+
 	ndisc_send_skb(skb, daddr, saddr);
 }
 
@@ -675,12 +679,12 @@
 				  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
 				  __func__, target);
 		}
-		ndisc_send_ns(dev, neigh, target, target, saddr);
+		ndisc_send_ns(dev, neigh, target, target, saddr, skb);
 	} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
 		neigh_app_ns(neigh);
 	} else {
 		addrconf_addr_solict_mult(target, &mcaddr);
-		ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+		ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
 	}
 }
 
@@ -1074,6 +1078,8 @@
 	struct ndisc_options ndopts;
 	int optlen;
 	unsigned int pref = 0;
+	__u32 old_if_flags;
+	bool send_ifinfo_notify = false;
 
 	__u8 *opt = (__u8 *)(ra_msg + 1);
 
@@ -1144,6 +1150,7 @@
 	 * Remember the managed/otherconf flags from most recently
 	 * received RA message (RFC 2462) -- yoshfuji
 	 */
+	old_if_flags = in6_dev->if_flags;
 	in6_dev->if_flags = (in6_dev->if_flags & ~(IF_RA_MANAGED |
 				IF_RA_OTHERCONF)) |
 				(ra_msg->icmph.icmp6_addrconf_managed ?
@@ -1151,6 +1158,9 @@
 				(ra_msg->icmph.icmp6_addrconf_other ?
 					IF_RA_OTHERCONF : 0);
 
+	if (old_if_flags != in6_dev->if_flags)
+		send_ifinfo_notify = true;
+
 	if (!in6_dev->cnf.accept_ra_defrtr) {
 		ND_PRINTK(2, info,
 			  "RA: %s, defrtr is false for dev: %s\n",
@@ -1225,18 +1235,16 @@
 
 	if (rt)
 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
-	if (ra_msg->icmph.icmp6_hop_limit) {
-		/* Only set hop_limit on the interface if it is higher than
-		 * the current hop_limit.
-		 */
-		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+	if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+	    ra_msg->icmph.icmp6_hop_limit) {
+		if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
 			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+			if (rt)
+				dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+					       ra_msg->icmph.icmp6_hop_limit);
 		} else {
-			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
 		}
-		if (rt)
-			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
-				       ra_msg->icmph.icmp6_hop_limit);
 	}
 
 skip_defrtr:
@@ -1254,7 +1262,7 @@
 				rtime = HZ/10;
 			NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
 			in6_dev->tstamp = jiffies;
-			inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+			send_ifinfo_notify = true;
 		}
 
 		rtime = ntohl(ra_msg->reachable_time);
@@ -1271,11 +1279,17 @@
 					      GC_STALETIME, 3 * rtime);
 				in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime);
 				in6_dev->tstamp = jiffies;
-				inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+				send_ifinfo_notify = true;
 			}
 		}
 	}
 
+	/*
+	 *	Send a notify if RA changed managed/otherconf flags or timer settings
+	 */
+	if (send_ifinfo_notify)
+		inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+
 skip_linkparms:
 
 	/*
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index b552cf0..96833e4 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -47,9 +47,21 @@
 	default NFT_REJECT
 	tristate
 
+config NFT_DUP_IPV6
+	tristate "IPv6 nf_tables packet duplication support"
+	select NF_DUP_IPV6
+	help
+	  This module enables IPv6 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV6
 endif # NF_TABLES
 
+config NF_DUP_IPV6
+	tristate "Netfilter IPv6 packet duplication to alternate destination"
+	help
+	  This option enables the nf_dup_ipv6 core, which duplicates an IPv6
+	  packet to be rerouted to another destination.
+
 config NF_REJECT_IPV6
 	tristate "IPv6 packet rejection"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c36e0a5..b4f7d0b 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -30,6 +30,8 @@
 # reject
 obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
 
+obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
+
 # nf_tables
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
@@ -37,6 +39,7 @@
 obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
 obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
 obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
+obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 3c35ced..0771991 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -305,7 +305,7 @@
 }
 #endif
 
-static inline __pure struct ip6t_entry *
+static inline struct ip6t_entry *
 ip6t_next_entry(const struct ip6t_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
@@ -324,12 +324,13 @@
 	const char *indev, *outdev;
 	const void *table_base;
 	struct ip6t_entry *e, **jumpstack;
-	unsigned int *stackptr, origptr, cpu;
+	unsigned int stackidx, cpu;
 	const struct xt_table_info *private;
 	struct xt_action_param acpar;
 	unsigned int addend;
 
 	/* Initialization */
+	stackidx = 0;
 	indev = state->in ? state->in->name : nulldevname;
 	outdev = state->out ? state->out->name : nulldevname;
 	/* We handle fragments by dealing with the first fragment as
@@ -357,8 +358,16 @@
 	cpu        = smp_processor_id();
 	table_base = private->entries;
 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
-	stackptr   = per_cpu_ptr(private->stackptr, cpu);
-	origptr    = *stackptr;
+
+	/* Switch to alternate jumpstack if we're being invoked via TEE.
+	 * TEE issues XT_CONTINUE verdict on original skb so we must not
+	 * clobber the jumpstack.
+	 *
+	 * For recursion via REJECT or SYNPROXY the stack will be clobbered
+	 * but it is no problem since absolute verdict is issued by these.
+	 */
+	if (static_key_false(&xt_tee_enabled))
+		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 
@@ -406,20 +415,16 @@
 					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
-				if (*stackptr <= origptr)
+				if (stackidx == 0)
 					e = get_entry(table_base,
 					    private->underflow[hook]);
 				else
-					e = ip6t_next_entry(jumpstack[--*stackptr]);
+					e = ip6t_next_entry(jumpstack[--stackidx]);
 				continue;
 			}
 			if (table_base + v != ip6t_next_entry(e) &&
 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
-				if (*stackptr >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
-				jumpstack[(*stackptr)++] = e;
+				jumpstack[stackidx++] = e;
 			}
 
 			e = get_entry(table_base, v);
@@ -437,8 +442,6 @@
 			break;
 	} while (!acpar.hotdrop);
 
-	*stackptr = origptr;
-
  	xt_write_recseq_end(addend);
  	local_bh_enable();
 
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 12331ef..0ed841a 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -35,14 +35,12 @@
 MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv6");
 MODULE_LICENSE("GPL");
 
-
 static unsigned int
 reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct ip6t_reject_info *reject = par->targinfo;
 	struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
 
-	pr_debug("%s: medium point\n", __func__);
 	switch (reject->with) {
 	case IP6T_ICMP6_NO_ROUTE:
 		nf_send_unreach6(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -65,8 +63,11 @@
 	case IP6T_TCP_RESET:
 		nf_send_reset6(net, skb, par->hooknum);
 		break;
-	default:
-		net_info_ratelimited("case %u not handled yet\n", reject->with);
+	case IP6T_ICMP6_POLICY_FAIL:
+		nf_send_unreach6(net, skb, ICMPV6_POLICY_FAIL, par->hooknum);
+		break;
+	case IP6T_ICMP6_REJECT_ROUTE:
+		nf_send_unreach6(net, skb, ICMPV6_REJECT_ROUTE, par->hooknum);
 		break;
 	}
 
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index ebbb754..1e4bf99 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -237,7 +237,7 @@
 	nth->ack_seq	= th->ack_seq;
 	tcp_flag_word(nth) = TCP_FLAG_ACK;
 	nth->doff	= tcp_hdr_size / 4;
-	nth->window	= ntohs(htons(th->window) >> opts->wscale);
+	nth->window	= htons(ntohs(th->window) >> opts->wscale);
 	nth->check	= 0;
 	nth->urg_ptr	= 0;
 
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4ba0c34..7302900 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -251,7 +251,7 @@
 	if (*len < 0 || (unsigned int) *len < sizeof(sin6))
 		return -EINVAL;
 
-	h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+	h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
 	if (!h) {
 		pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
 			 &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 90388d6..0e6fae1 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -150,7 +150,7 @@
 	struct nf_conntrack_tuple intuple, origtuple;
 	const struct nf_conntrack_tuple_hash *h;
 	const struct nf_conntrack_l4proto *inproto;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+	struct nf_conntrack_zone tmp;
 
 	NF_CT_ASSERT(skb->nfct == NULL);
 
@@ -177,7 +177,8 @@
 
 	*ctinfo = IP_CT_RELATED;
 
-	h = nf_conntrack_find_get(net, zone, &intuple);
+	h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
+				  &intuple);
 	if (!h) {
 		pr_debug("icmpv6_error: no match\n");
 		return -NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6d02498..701cd2b 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -633,6 +633,7 @@
 	kfree_skb(clone);
 	return skb;
 }
+EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
 
 void nf_ct_frag6_consume_orig(struct sk_buff *skb)
 {
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index a45db0b..6d9c0b3 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -33,23 +33,22 @@
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 						struct sk_buff *skb)
 {
-	u16 zone = NF_CT_DEFAULT_ZONE;
-
+	u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-	if (skb->nfct)
-		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
-#endif
+	if (skb->nfct) {
+		enum ip_conntrack_info ctinfo;
+		const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	if (skb->nf_bridge &&
-	    skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
-		return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+		zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+	}
 #endif
+	if (nf_bridge_in_prerouting(skb))
+		return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
+
 	if (hooknum == NF_INET_PRE_ROUTING)
-		return IP6_DEFRAG_CONNTRACK_IN + zone;
+		return IP6_DEFRAG_CONNTRACK_IN + zone_id;
 	else
-		return IP6_DEFRAG_CONNTRACK_OUT + zone;
-
+		return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
new file mode 100644
index 0000000..c8ab626
--- /dev/null
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -0,0 +1,97 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+	const struct dst_entry *dst;
+
+	if (skb->dev != NULL)
+		return dev_net(skb->dev);
+	dst = skb_dst(skb);
+	if (dst != NULL && dst->dev != NULL)
+		return dev_net(dst->dev);
+#endif
+	return &init_net;
+}
+
+static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
+			      int oif)
+{
+	const struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct net *net = pick_net(skb);
+	struct dst_entry *dst;
+	struct flowi6 fl6;
+
+	memset(&fl6, 0, sizeof(fl6));
+	if (oif != -1)
+		fl6.flowi6_oif = oif;
+
+	fl6.daddr = *gw;
+	fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
+			(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+	dst = ip6_route_output(net, NULL, &fl6);
+	if (dst->error) {
+		dst_release(dst);
+		return false;
+	}
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst);
+	skb->dev      = dst->dev;
+	skb->protocol = htons(ETH_P_IPV6);
+
+	return true;
+}
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in6_addr *gw, int oif)
+{
+	if (this_cpu_read(nf_skb_duplicated))
+		return;
+	skb = pskb_copy(skb, GFP_ATOMIC);
+	if (skb == NULL)
+		return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+	nf_conntrack_put(skb->nfct);
+	skb->nfct     = &nf_ct_untracked_get()->ct_general;
+	skb->nfctinfo = IP_CT_NEW;
+	nf_conntrack_get(skb->nfct);
+#endif
+	if (hooknum == NF_INET_PRE_ROUTING ||
+	    hooknum == NF_INET_LOCAL_IN) {
+		struct ipv6hdr *iph = ipv6_hdr(skb);
+		--iph->hop_limit;
+	}
+	if (nf_dup_ipv6_route(skb, gw, oif)) {
+		__this_cpu_write(nf_skb_duplicated, true);
+		ip6_local_out(skb);
+		__this_cpu_write(nf_skb_duplicated, false);
+	} else {
+		kfree_skb(skb);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv6: IPv6 packet duplication");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index e76900e..70fbaed 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -124,7 +124,7 @@
 		newip = &t->dst.u3.in6;
 	}
 	inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
-				  newip->s6_addr32, 1);
+				  newip->s6_addr32, true);
 }
 
 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
@@ -155,7 +155,7 @@
 		}
 	} else
 		inet_proto_csum_replace2(check, skb,
-					 htons(oldlen), htons(datalen), 1);
+					 htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 2205e8e..57593b00 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -73,7 +73,7 @@
 	    hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
 		inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
 					 hdr->icmp6_identifier,
-					 tuple->src.u.icmp.id, 0);
+					 tuple->src.u.icmp.id, false);
 		hdr->icmp6_identifier = tuple->src.u.icmp.id;
 	}
 	return true;
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
new file mode 100644
index 0000000..0eaa4f6
--- /dev/null
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+struct nft_dup_ipv6 {
+	enum nft_registers	sreg_addr:8;
+	enum nft_registers	sreg_dev:8;
+};
+
+static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+			      struct nft_regs *regs,
+			      const struct nft_pktinfo *pkt)
+{
+	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+	struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
+	int oif = regs->data[priv->sreg_dev];
+
+	nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+}
+
+static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+			     const struct nft_expr *expr,
+			     const struct nlattr * const tb[])
+{
+	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+		return -EINVAL;
+
+	priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+	err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+	if (err < 0)
+		return err;
+
+	if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+		priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+		return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+	}
+	return 0;
+}
+
+static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+
+	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv6_type;
+static const struct nft_expr_ops nft_dup_ipv6_ops = {
+	.type		= &nft_dup_ipv6_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv6)),
+	.eval		= nft_dup_ipv6_eval,
+	.init		= nft_dup_ipv6_init,
+	.dump		= nft_dup_ipv6_dump,
+};
+
+static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
+	[NFTA_DUP_SREG_ADDR]	= { .type = NLA_U32 },
+	[NFTA_DUP_SREG_DEV]	= { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv6_type __read_mostly = {
+	.family		= NFPROTO_IPV6,
+	.name		= "dup",
+	.ops		= &nft_dup_ipv6_ops,
+	.policy		= nft_dup_ipv6_policy,
+	.maxattr	= NFTA_DUP_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_dup_ipv6_module_init(void)
+{
+	return nft_register_expr(&nft_dup_ipv6_type);
+}
+
+static void __exit nft_dup_ipv6_module_exit(void)
+{
+	nft_unregister_expr(&nft_dup_ipv6_type);
+}
+
+module_init(nft_dup_ipv6_module_init);
+module_exit(nft_dup_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ca4700c..fdbada156 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -295,7 +295,8 @@
 		 * unspecified and mapped address have a v4 equivalent.
 		 */
 		v4addr = LOOPBACK4_IPV6;
-		if (!(addr_type & IPV6_ADDR_MULTICAST))	{
+		if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+		    !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
 			err = -EADDRNOTAVAIL;
 			if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
 					   dev, 0)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d155864..f45cac6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -54,10 +54,13 @@
 #include <net/tcp.h>
 #include <linux/rtnetlink.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/xfrm.h>
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
 
 #include <asm/uaccess.h>
 
@@ -535,13 +538,14 @@
 		container_of(w, struct __rt6_probe_work, work);
 
 	addrconf_addr_solict_mult(&work->target, &mcaddr);
-	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
 	dev_put(work->dev);
 	kfree(work);
 }
 
 static void rt6_probe(struct rt6_info *rt)
 {
+	struct __rt6_probe_work *work;
 	struct neighbour *neigh;
 	/*
 	 * Okay, this does not seem to be appropriate
@@ -556,34 +560,33 @@
 	rcu_read_lock_bh();
 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
 	if (neigh) {
-		write_lock(&neigh->lock);
 		if (neigh->nud_state & NUD_VALID)
 			goto out;
-	}
 
-	if (!neigh ||
-	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-		struct __rt6_probe_work *work;
-
-		work = kmalloc(sizeof(*work), GFP_ATOMIC);
-
-		if (neigh && work)
-			__neigh_set_probe_once(neigh);
-
-		if (neigh)
-			write_unlock(&neigh->lock);
-
-		if (work) {
-			INIT_WORK(&work->work, rt6_probe_deferred);
-			work->target = rt->rt6i_gateway;
-			dev_hold(rt->dst.dev);
-			work->dev = rt->dst.dev;
-			schedule_work(&work->work);
+		work = NULL;
+		write_lock(&neigh->lock);
+		if (!(neigh->nud_state & NUD_VALID) &&
+		    time_after(jiffies,
+			       neigh->updated +
+			       rt->rt6i_idev->cnf.rtr_probe_interval)) {
+			work = kmalloc(sizeof(*work), GFP_ATOMIC);
+			if (work)
+				__neigh_set_probe_once(neigh);
 		}
-	} else {
-out:
 		write_unlock(&neigh->lock);
+	} else {
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 	}
+
+	if (work) {
+		INIT_WORK(&work->work, rt6_probe_deferred);
+		work->target = rt->rt6i_gateway;
+		dev_hold(rt->dst.dev);
+		work->dev = rt->dst.dev;
+		schedule_work(&work->work);
+	}
+
+out:
 	rcu_read_unlock_bh();
 }
 #else
@@ -662,6 +665,12 @@
 {
 	int m;
 	bool match_do_rr = false;
+	struct inet6_dev *idev = rt->rt6i_idev;
+	struct net_device *dev = rt->dst.dev;
+
+	if (dev && !netif_carrier_ok(dev) &&
+	    idev->cnf.ignore_routes_with_linkdown)
+		goto out;
 
 	if (rt6_check_expired(rt))
 		goto out;
@@ -1154,6 +1163,7 @@
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	struct net *net = dev_net(skb->dev);
 	int flags = RT6_LOOKUP_F_HAS_SADDR;
+	struct ip_tunnel_info *tun_info;
 	struct flowi6 fl6 = {
 		.flowi6_iif = skb->dev->ifindex,
 		.daddr = iph->daddr,
@@ -1163,6 +1173,10 @@
 		.flowi6_proto = iph->nexthdr,
 	};
 
+	tun_info = skb_tunnel_info(skb);
+	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
+		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
+	skb_dst_drop(skb);
 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
 }
 
@@ -1684,6 +1698,7 @@
 static int ip6_convert_metrics(struct mx6_config *mxc,
 			       const struct fib6_config *cfg)
 {
+	bool ecn_ca = false;
 	struct nlattr *nla;
 	int remaining;
 	u32 *mp;
@@ -1697,30 +1712,36 @@
 
 	nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
 		int type = nla_type(nla);
+		u32 val;
 
-		if (type) {
-			u32 val;
+		if (!type)
+			continue;
+		if (unlikely(type > RTAX_MAX))
+			goto err;
 
-			if (unlikely(type > RTAX_MAX))
+		if (type == RTAX_CC_ALGO) {
+			char tmp[TCP_CA_NAME_MAX];
+
+			nla_strlcpy(tmp, nla, sizeof(tmp));
+			val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+			if (val == TCP_CA_UNSPEC)
 				goto err;
-			if (type == RTAX_CC_ALGO) {
-				char tmp[TCP_CA_NAME_MAX];
-
-				nla_strlcpy(tmp, nla, sizeof(tmp));
-				val = tcp_ca_get_key_by_name(tmp);
-				if (val == TCP_CA_UNSPEC)
-					goto err;
-			} else {
-				val = nla_get_u32(nla);
-			}
-
-			mp[type - 1] = val;
-			__set_bit(type - 1, mxc->mx_valid);
+		} else {
+			val = nla_get_u32(nla);
 		}
+		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+			goto err;
+
+		mp[type - 1] = val;
+		__set_bit(type - 1, mxc->mx_valid);
+	}
+
+	if (ecn_ca) {
+		__set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
+		mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
 	}
 
 	mxc->mx = mp;
-
 	return 0;
  err:
 	kfree(mp);
@@ -1801,6 +1822,25 @@
 
 	rt->dst.output = ip6_output;
 
+	if (cfg->fc_encap) {
+		struct lwtunnel_state *lwtstate;
+
+		err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+					   cfg->fc_encap, AF_INET6, cfg,
+					   &lwtstate);
+		if (err)
+			goto out;
+		rt->dst.lwtstate = lwtstate_get(lwtstate);
+		if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
+			rt->dst.lwtstate->orig_output = rt->dst.output;
+			rt->dst.output = lwtunnel_output;
+		}
+		if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
+			rt->dst.lwtstate->orig_input = rt->dst.input;
+			rt->dst.input = lwtunnel_input;
+		}
+	}
+
 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
 	rt->rt6i_dst.plen = cfg->fc_dst_len;
 	if (rt->rt6i_dst.plen == 128)
@@ -2180,6 +2220,7 @@
 #endif
 	rt->rt6i_prefsrc = ort->rt6i_prefsrc;
 	rt->rt6i_table = ort->rt6i_table;
+	rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2628,6 +2669,8 @@
 	[RTA_METRICS]           = { .type = NLA_NESTED },
 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
 	[RTA_PREF]              = { .type = NLA_U8 },
+	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
+	[RTA_ENCAP]		= { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2722,6 +2765,12 @@
 		cfg->fc_flags |= RTF_PREF(pref);
 	}
 
+	if (tb[RTA_ENCAP])
+		cfg->fc_encap = tb[RTA_ENCAP];
+
+	if (tb[RTA_ENCAP_TYPE])
+		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
 	err = 0;
 errout:
 	return err;
@@ -2754,6 +2803,10 @@
 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
 				r_cfg.fc_flags |= RTF_GATEWAY;
 			}
+			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+			if (nla)
+				r_cfg.fc_encap_type = nla_get_u16(nla);
 		}
 		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
 		if (err) {
@@ -2816,7 +2869,7 @@
 		return ip6_route_add(&cfg);
 }
 
-static inline size_t rt6_nlmsg_size(void)
+static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
 {
 	return NLMSG_ALIGN(sizeof(struct rtmsg))
 	       + nla_total_size(16) /* RTA_SRC */
@@ -2830,7 +2883,8 @@
 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
 	       + nla_total_size(sizeof(struct rta_cacheinfo))
 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
-	       + nla_total_size(1); /* RTA_PREF */
+	       + nla_total_size(1) /* RTA_PREF */
+	       + lwtunnel_get_encap_size(rt->dst.lwtstate);
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2891,6 +2945,11 @@
 	else
 		rtm->rtm_type = RTN_UNICAST;
 	rtm->rtm_flags = 0;
+	if (!netif_carrier_ok(rt->dst.dev)) {
+		rtm->rtm_flags |= RTNH_F_LINKDOWN;
+		if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
+			rtm->rtm_flags |= RTNH_F_DEAD;
+	}
 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = rt->rt6i_protocol;
 	if (rt->rt6i_flags & RTF_DYNAMIC)
@@ -2978,6 +3037,8 @@
 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
 		goto nla_put_failure;
 
+	lwtunnel_fill_encap(skb, rt->dst.lwtstate);
+
 	nlmsg_end(skb, nlh);
 	return 0;
 
@@ -3104,7 +3165,7 @@
 	err = -ENOBUFS;
 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
-	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
+	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
 	if (!skb)
 		goto errout;
 
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ac35a28..94428fd 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -742,7 +742,7 @@
 			goto drop;
 		if (iptunnel_pull_header(skb, 0, tpi.proto))
 			goto drop;
-		return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+		return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
 	}
 
 	return 1;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 4e705ad..45243bb 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -17,6 +17,9 @@
 #include <net/inet_frag.h>
 
 static int one = 1;
+static int auto_flowlabels_min;
+static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+
 
 static struct ctl_table ipv6_table_template[] = {
 	{
@@ -45,7 +48,9 @@
 		.data		= &init_net.ipv6.sysctl.auto_flowlabels,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &auto_flowlabels_min,
+		.extra2		= &auto_flowlabels_max
 	},
 	{
 		.procname	= "fwmark_reflect",
@@ -75,6 +80,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+	{
+		.procname	= "ip_nonlocal_bind",
+		.data		= &init_net.ipv6.sysctl.ip_nonlocal_bind,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
 	{ }
 };
 
@@ -117,6 +129,7 @@
 	ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
 	ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
 	ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
+	ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
 
 	ipv6_route_table = ipv6_route_sysctl_init(net);
 	if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a6cea5..97d9314 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -276,7 +276,7 @@
 	if (err)
 		goto late_failure;
 
-	ip6_set_txhash(sk);
+	sk_set_txhash(sk);
 
 	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
@@ -1090,7 +1090,7 @@
 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
 	newsk->sk_bound_dev_if = ireq->ir_iif;
 
-	ip6_set_txhash(newsk);
+	sk_set_txhash(newsk);
 
 	/* Now IPv6 options...
 
@@ -1481,8 +1481,7 @@
 					    ntohs(th->dest), tcp_v6_iif(skb));
 		if (sk2) {
 			struct inet_timewait_sock *tw = inet_twsk(sk);
-			inet_twsk_deschedule(tw);
-			inet_twsk_put(tw);
+			inet_twsk_deschedule_put(tw);
 			sk = sk2;
 			tcp_v6_restore_cb(skb);
 			goto process;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e51fc3e..0aba654 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1496,7 +1496,8 @@
 	return udp_proc_register(net, &udp6_seq_afinfo);
 }
 
-void udp6_proc_exit(struct net *net) {
+void udp6_proc_exit(struct net *net)
+{
 	udp_proc_unregister(net, &udp6_seq_afinfo);
 }
 #endif /* CONFIG_PROC_FS */
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 901ef6f..f7fbdba 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,10 +20,9 @@
 
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
-	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
 	struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
 
-	if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+	if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
 		IP6_ECN_set_ce(inner_iph);
 }
 
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ed0583c1..30caa28 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,13 +20,14 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/vrf.h>
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
 
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
 
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
 					  const xfrm_address_t *daddr)
 {
@@ -35,6 +36,7 @@
 	int err;
 
 	memset(&fl6, 0, sizeof(fl6));
+	fl6.flowi6_oif = oif;
 	memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
 	if (saddr)
 		memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -50,13 +52,13 @@
 	return dst;
 }
 
-static int xfrm6_get_saddr(struct net *net,
+static int xfrm6_get_saddr(struct net *net, int oif,
 			   xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
 	struct dst_entry *dst;
 	struct net_device *dev;
 
-	dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
@@ -130,8 +132,10 @@
 
 	nexthdr = nh[nhoff];
 
-	if (skb_dst(skb))
-		oif = skb_dst(skb)->dev->ifindex;
+	if (skb_dst(skb)) {
+		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
+			: skb_dst(skb)->dev->ifindex;
+	}
 
 	memset(fl6, 0, sizeof(struct flowi6));
 	fl6->flowi6_mark = skb->mark;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 086de49..3891cbd 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -7,7 +7,6 @@
 	select CRYPTO_CCM
 	select CRYPTO_GCM
 	select CRC32
-	select AVERAGE
 	---help---
 	  This option enables the hardware independent IEEE 802.11
 	  networking stack.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3275f01..783e891 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -3,6 +3,7 @@
 # mac80211 objects
 mac80211-y := \
 	main.o status.o \
+	driver-ops.o \
 	sta_info.o \
 	wep.o \
 	wpa.o \
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 4192806..bdf0790 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -145,20 +145,3 @@
 {
 	crypto_free_cipher(tfm);
 }
-
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-					u8 *k1, u8 *k2)
-{
-	u8 l[AES_BLOCK_SIZE] = {};
-	struct ieee80211_key *key =
-		container_of(keyconf, struct ieee80211_key, conf);
-
-	crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
-
-	memcpy(k1, l, AES_BLOCK_SIZE);
-	gf_mulx(k1);
-
-	memcpy(k2, k1, AES_BLOCK_SIZE);
-	gf_mulx(k2);
-}
-EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bf7023f..685ec13 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1019,6 +1019,65 @@
 	return 0;
 }
 
+static void sta_apply_mesh_params(struct ieee80211_local *local,
+				  struct sta_info *sta,
+				  struct station_parameters *params)
+{
+#ifdef CONFIG_MAC80211_MESH
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u32 changed = 0;
+
+	if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
+		switch (params->plink_state) {
+		case NL80211_PLINK_ESTAB:
+			if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+				changed = mesh_plink_inc_estab_count(sdata);
+			sta->mesh->plink_state = params->plink_state;
+
+			ieee80211_mps_sta_status_update(sta);
+			changed |= ieee80211_mps_set_sta_local_pm(sta,
+				      sdata->u.mesh.mshcfg.power_mode);
+			break;
+		case NL80211_PLINK_LISTEN:
+		case NL80211_PLINK_BLOCKED:
+		case NL80211_PLINK_OPN_SNT:
+		case NL80211_PLINK_OPN_RCVD:
+		case NL80211_PLINK_CNF_RCVD:
+		case NL80211_PLINK_HOLDING:
+			if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+				changed = mesh_plink_dec_estab_count(sdata);
+			sta->mesh->plink_state = params->plink_state;
+
+			ieee80211_mps_sta_status_update(sta);
+			changed |= ieee80211_mps_set_sta_local_pm(sta,
+					NL80211_MESH_POWER_UNKNOWN);
+			break;
+		default:
+			/*  nothing  */
+			break;
+		}
+	}
+
+	switch (params->plink_action) {
+	case NL80211_PLINK_ACTION_NO_ACTION:
+		/* nothing */
+		break;
+	case NL80211_PLINK_ACTION_OPEN:
+		changed |= mesh_plink_open(sta);
+		break;
+	case NL80211_PLINK_ACTION_BLOCK:
+		changed |= mesh_plink_block(sta);
+		break;
+	}
+
+	if (params->local_pm)
+		changed |= ieee80211_mps_set_sta_local_pm(sta,
+							  params->local_pm);
+
+	ieee80211_mbss_info_change_notify(sdata, changed);
+#endif
+}
+
 static int sta_apply_parameters(struct ieee80211_local *local,
 				struct sta_info *sta,
 				struct station_parameters *params)
@@ -1076,7 +1135,6 @@
 	}
 
 	if (mask & BIT(NL80211_STA_FLAG_MFP)) {
-		sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
 		if (set & BIT(NL80211_STA_FLAG_MFP))
 			set_sta_flag(sta, WLAN_STA_MFP);
 		else
@@ -1097,6 +1155,12 @@
 	    params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
 		set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
 
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+	    params->ext_capab_len >= 8 &&
+	    params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
+		set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW);
+
 	if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
 		sta->sta.uapsd_queues = params->uapsd_queues;
 		sta->sta.max_sp = params->max_sp;
@@ -1144,62 +1208,8 @@
 					      band, false);
 	}
 
-	if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
-		u32 changed = 0;
-
-		if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
-			switch (params->plink_state) {
-			case NL80211_PLINK_ESTAB:
-				if (sta->plink_state != NL80211_PLINK_ESTAB)
-					changed = mesh_plink_inc_estab_count(
-							sdata);
-				sta->plink_state = params->plink_state;
-
-				ieee80211_mps_sta_status_update(sta);
-				changed |= ieee80211_mps_set_sta_local_pm(sta,
-					      sdata->u.mesh.mshcfg.power_mode);
-				break;
-			case NL80211_PLINK_LISTEN:
-			case NL80211_PLINK_BLOCKED:
-			case NL80211_PLINK_OPN_SNT:
-			case NL80211_PLINK_OPN_RCVD:
-			case NL80211_PLINK_CNF_RCVD:
-			case NL80211_PLINK_HOLDING:
-				if (sta->plink_state == NL80211_PLINK_ESTAB)
-					changed = mesh_plink_dec_estab_count(
-							sdata);
-				sta->plink_state = params->plink_state;
-
-				ieee80211_mps_sta_status_update(sta);
-				changed |= ieee80211_mps_set_sta_local_pm(sta,
-						NL80211_MESH_POWER_UNKNOWN);
-				break;
-			default:
-				/*  nothing  */
-				break;
-			}
-		}
-
-		switch (params->plink_action) {
-		case NL80211_PLINK_ACTION_NO_ACTION:
-			/* nothing */
-			break;
-		case NL80211_PLINK_ACTION_OPEN:
-			changed |= mesh_plink_open(sta);
-			break;
-		case NL80211_PLINK_ACTION_BLOCK:
-			changed |= mesh_plink_block(sta);
-			break;
-		}
-
-		if (params->local_pm)
-			changed |=
-			      ieee80211_mps_set_sta_local_pm(sta,
-							     params->local_pm);
-		ieee80211_mbss_info_change_notify(sdata, changed);
-#endif
-	}
+	if (ieee80211_vif_is_mesh(&sdata->vif))
+		sta_apply_mesh_params(local, sta, params);
 
 	/* set the STA state after all sta info from usermode has been set */
 	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
@@ -2358,6 +2368,8 @@
 	const u8 *ap;
 	enum ieee80211_smps_mode old_req;
 	int err;
+	struct sta_info *sta;
+	bool tdls_peer_found = false;
 
 	lockdep_assert_held(&sdata->wdev.mtx);
 
@@ -2382,11 +2394,22 @@
 
 	ap = sdata->u.mgd.associated->bssid;
 
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+			continue;
+
+		tdls_peer_found = true;
+		break;
+	}
+	rcu_read_unlock();
+
 	if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
-		if (sdata->u.mgd.powersave)
-			smps_mode = IEEE80211_SMPS_DYNAMIC;
-		else
+		if (tdls_peer_found || !sdata->u.mgd.powersave)
 			smps_mode = IEEE80211_SMPS_OFF;
+		else
+			smps_mode = IEEE80211_SMPS_DYNAMIC;
 	}
 
 	/* send SM PS frame to AP */
@@ -2394,6 +2417,8 @@
 					 ap, ap);
 	if (err)
 		sdata->u.mgd.req_smps = old_req;
+	else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
+		ieee80211_teardown_tdls_peers(sdata);
 
 	return err;
 }
@@ -2479,16 +2504,26 @@
 		sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
 		memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs,
 		       sizeof(mask->control[i].ht_mcs));
+		memcpy(sdata->rc_rateidx_vht_mcs_mask[i],
+		       mask->control[i].vht_mcs,
+		       sizeof(mask->control[i].vht_mcs));
 
 		sdata->rc_has_mcs_mask[i] = false;
+		sdata->rc_has_vht_mcs_mask[i] = false;
 		if (!sband)
 			continue;
 
-		for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++)
-			if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+		for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
+			if (~sdata->rc_rateidx_mcs_mask[i][j])
 				sdata->rc_has_mcs_mask[i] = true;
+
+			if (~sdata->rc_rateidx_vht_mcs_mask[i][j])
+				sdata->rc_has_vht_mcs_mask[i] = true;
+
+			if (sdata->rc_has_mcs_mask[i] &&
+			    sdata->rc_has_vht_mcs_mask[i])
 				break;
-			}
+		}
 	}
 
 	return 0;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f01c18a..1d1b9b7 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -190,7 +190,7 @@
 	return NULL;
 }
 
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
 {
 	switch (sta->bandwidth) {
 	case IEEE80211_STA_RX_BW_20:
@@ -264,9 +264,17 @@
 		case NL80211_IFTYPE_AP_VLAN:
 			width = ieee80211_get_max_required_bw(sdata);
 			break;
+		case NL80211_IFTYPE_STATION:
+			/*
+			 * The ap's sta->bandwidth is not set yet at this
+			 * point, so take the width from the chandef, but
+			 * account also for TDLS peers
+			 */
+			width = max(vif->bss_conf.chandef.width,
+				    ieee80211_get_max_required_bw(sdata));
+			break;
 		case NL80211_IFTYPE_P2P_DEVICE:
 			continue;
-		case NL80211_IFTYPE_STATION:
 		case NL80211_IFTYPE_ADHOC:
 		case NL80211_IFTYPE_WDS:
 		case NL80211_IFTYPE_MESH_POINT:
@@ -554,12 +562,13 @@
 	kfree_rcu(ctx, rcu_head);
 }
 
-static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
-					      struct ieee80211_chanctx *ctx)
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+				       struct ieee80211_chanctx *ctx)
 {
 	struct ieee80211_chanctx_conf *conf = &ctx->conf;
 	struct ieee80211_sub_if_data *sdata;
 	const struct cfg80211_chan_def *compat = NULL;
+	struct sta_info *sta;
 
 	lockdep_assert_held(&local->chanctx_mtx);
 
@@ -581,6 +590,20 @@
 		if (WARN_ON_ONCE(!compat))
 			break;
 	}
+
+	/* TDLS peers can sometimes affect the chandef width */
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (!sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+		    !sta->tdls_chandef.chan)
+			continue;
+
+		compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
+						     compat);
+		if (WARN_ON_ONCE(!compat))
+			break;
+	}
 	rcu_read_unlock();
 
 	if (!compat)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 3ea8b7d..ced6bf3 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -122,6 +122,7 @@
 	FLAG(CHANCTX_STA_CSA),
 	FLAG(SUPPORTS_CLONED_SKBS),
 	FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+	FLAG(TDLS_WIDER_BW),
 
 	/* keep last for the build bug below */
 	(void *)0x1
@@ -277,7 +278,6 @@
 	DEBUGFS_STATS_ADD(rx_handlers_queued);
 	DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
 	DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_short);
 	DEBUGFS_STATS_ADD(tx_expand_skb_head);
 	DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
 	DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e82bf1e..702ca12 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -57,7 +57,6 @@
 KEY_CONF_FILE(keyidx, D);
 KEY_CONF_FILE(hw_key_idx, D);
 KEY_FILE(flags, X);
-KEY_FILE(tx_rx_count, D);
 KEY_READ(ifindex, sdata->name, "%s\n");
 KEY_OPS(ifindex);
 
@@ -310,7 +309,6 @@
 	DEBUGFS_ADD(flags);
 	DEBUGFS_ADD(keyidx);
 	DEBUGFS_ADD(hw_key_idx);
-	DEBUGFS_ADD(tx_rx_count);
 	DEBUGFS_ADD(algorithm);
 	DEBUGFS_ADD(tx_spec);
 	DEBUGFS_ADD(rx_spec);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c09c013..1021e87 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -186,6 +186,38 @@
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
 		  rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
 
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
+				const struct ieee80211_sub_if_data *sdata,
+				char *buf, int buflen)
+{
+	int i, len = 0;
+	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+		len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+	len += scnprintf(buf + len, buflen - len, "\n");
+
+	return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
+
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
+				const struct ieee80211_sub_if_data *sdata,
+				char *buf, int buflen)
+{
+	int i, len = 0;
+	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+		len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+	len += scnprintf(buf + len, buflen - len, "\n");
+
+	return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
+
 IEEE80211_IF_FILE(flags, flags, HEX);
 IEEE80211_IF_FILE(state, state, LHEX);
 IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
@@ -565,6 +597,8 @@
 	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+	DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
+	DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
 	DEBUGFS_ADD(hw_queues);
 }
 
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
new file mode 100644
index 0000000..267c3b1
--- /dev/null
+++ b/net/mac80211/driver-ops.c
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "trace.h"
+#include "driver-ops.h"
+
+__must_check
+int drv_sta_state(struct ieee80211_local *local,
+		  struct ieee80211_sub_if_data *sdata,
+		  struct sta_info *sta,
+		  enum ieee80211_sta_state old_state,
+		  enum ieee80211_sta_state new_state)
+{
+	int ret = 0;
+
+	might_sleep();
+
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+	if (local->ops->sta_state) {
+		ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+					    old_state, new_state);
+	} else if (old_state == IEEE80211_STA_AUTH &&
+		   new_state == IEEE80211_STA_ASSOC) {
+		ret = drv_sta_add(local, sdata, &sta->sta);
+		if (ret == 0)
+			sta->uploaded = true;
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		   new_state == IEEE80211_STA_AUTH) {
+		drv_sta_remove(local, sdata, &sta->sta);
+	}
+	trace_drv_return_int(local, ret);
+	return ret;
+}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 32a2e70..02d9133 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -573,37 +573,12 @@
 	trace_drv_return_void(local);
 }
 
-static inline __must_check
+__must_check
 int drv_sta_state(struct ieee80211_local *local,
 		  struct ieee80211_sub_if_data *sdata,
 		  struct sta_info *sta,
 		  enum ieee80211_sta_state old_state,
-		  enum ieee80211_sta_state new_state)
-{
-	int ret = 0;
-
-	might_sleep();
-
-	sdata = get_bss_sdata(sdata);
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
-	if (local->ops->sta_state) {
-		ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
-					    old_state, new_state);
-	} else if (old_state == IEEE80211_STA_AUTH &&
-		   new_state == IEEE80211_STA_ASSOC) {
-		ret = drv_sta_add(local, sdata, &sta->sta);
-		if (ret == 0)
-			sta->uploaded = true;
-	} else if (old_state == IEEE80211_STA_ASSOC &&
-		   new_state == IEEE80211_STA_AUTH) {
-		drv_sta_remove(local, sdata, &sta->sta);
-	}
-	trace_drv_return_int(local, ret);
-	return ret;
-}
+		  enum ieee80211_sta_state new_state);
 
 static inline void drv_sta_rc_update(struct ieee80211_local *local,
 				     struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b12f615..6e52659 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -84,13 +84,13 @@
 #define IEEE80211_DEAUTH_FRAME_LEN	(24 /* hdr */ + 2 /* reason */)
 
 struct ieee80211_fragment_entry {
-	unsigned long first_frag_time;
-	unsigned int seq;
-	unsigned int rx_queue;
-	unsigned int last_frag;
-	unsigned int extra_len;
 	struct sk_buff_head skb_list;
-	int ccmp; /* Whether fragments were encrypted with CCMP */
+	unsigned long first_frag_time;
+	u16 seq;
+	u16 extra_len;
+	u16 last_frag;
+	u8 rx_queue;
+	bool ccmp; /* Whether fragments were encrypted with CCMP */
 	u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
 };
 
@@ -181,7 +181,6 @@
 
 /**
  * enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_FRAGMENTED: fragmented frame
  * @IEEE80211_RX_AMSDU: a-MSDU packet
  * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
  * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
@@ -190,7 +189,6 @@
  * @rx_flags field of &struct ieee80211_rx_status.
  */
 enum ieee80211_packet_rx_flags {
-	IEEE80211_RX_FRAGMENTED			= BIT(2),
 	IEEE80211_RX_AMSDU			= BIT(3),
 	IEEE80211_RX_MALFORMED_ACTION_FRM	= BIT(4),
 	IEEE80211_RX_DEFERRED_RELEASE		= BIT(5),
@@ -202,8 +200,6 @@
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *	to cfg80211_report_obss_beacon().
- * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
- *	reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -211,10 +207,10 @@
 enum ieee80211_rx_flags {
 	IEEE80211_RX_CMNTR		= BIT(0),
 	IEEE80211_RX_BEACON_REPORTED	= BIT(1),
-	IEEE80211_RX_REORDER_TIMER	= BIT(2),
 };
 
 struct ieee80211_rx_data {
+	struct napi_struct *napi;
 	struct sk_buff *skb;
 	struct ieee80211_local *local;
 	struct ieee80211_sub_if_data *sdata;
@@ -725,6 +721,7 @@
  *	back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
  * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
  */
 enum ieee80211_sub_if_data_flags {
 	IEEE80211_SDATA_ALLMULTI		= BIT(0),
@@ -732,6 +729,7 @@
 	IEEE80211_SDATA_DONT_BRIDGE_PACKETS	= BIT(3),
 	IEEE80211_SDATA_DISCONNECT_RESUME	= BIT(4),
 	IEEE80211_SDATA_IN_DRIVER		= BIT(5),
+	IEEE80211_SDATA_MU_MIMO_OWNER		= BIT(6),
 };
 
 /**
@@ -903,6 +901,9 @@
 	bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
 	u8  rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
 
+	bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
+	u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+
 	union {
 		struct ieee80211_if_ap ap;
 		struct ieee80211_if_wds wds;
@@ -1010,7 +1011,6 @@
 	IEEE80211_SDATA_QUEUE_AGG_STOP		= 2,
 	IEEE80211_SDATA_QUEUE_RX_AGG_START	= 3,
 	IEEE80211_SDATA_QUEUE_RX_AGG_STOP	= 4,
-	IEEE80211_SDATA_QUEUE_TDLS_CHSW		= 5,
 };
 
 enum {
@@ -1286,7 +1286,6 @@
 	unsigned int rx_handlers_queued;
 	unsigned int rx_handlers_drop_nullfunc;
 	unsigned int rx_handlers_drop_defrag;
-	unsigned int rx_handlers_drop_short;
 	unsigned int tx_expand_skb_head;
 	unsigned int tx_expand_skb_head_cloned;
 	unsigned int rx_expand_skb_head_defrag;
@@ -1348,14 +1347,16 @@
 
 	struct ieee80211_sub_if_data __rcu *p2p_sdata;
 
-	struct napi_struct *napi;
-
 	/* virtual monitor interface */
 	struct ieee80211_sub_if_data __rcu *monitor_sdata;
 	struct cfg80211_chan_def monitor_chandef;
 
 	/* extended capabilities provided by mac80211 */
 	u8 ext_capa[8];
+
+	/* TDLS channel switch */
+	struct work_struct tdls_chsw_work;
+	struct sk_buff_head skb_queue_tdls_chsw;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1715,6 +1716,8 @@
 				 enum ieee80211_band band, bool nss_only);
 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
 				      struct ieee80211_sta_vht_cap *vht_cap);
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+				     u16 vht_mask[NL80211_VHT_NSS_MAX]);
 
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1763,8 +1766,6 @@
 
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-			enum nl80211_iftype type);
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
 			     int rate, int erp, int short_preamble,
 			     int shift);
@@ -2042,6 +2043,9 @@
 				 enum ieee80211_chanctx_mode chanmode,
 				 u8 radar_detect);
 int ieee80211_max_num_channels(struct ieee80211_local *local);
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+				       struct ieee80211_chanctx *ctx);
 
 /* TDLS */
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2058,8 +2062,8 @@
 void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
 					  struct net_device *dev,
 					  const u8 *addr);
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-					   struct sk_buff *skb);
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tdls_chsw_work(struct work_struct *wk);
 
 extern const struct ethtool_ops ieee80211_ethtool_ops;
 
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 553ac6d..6964fc6 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1242,8 +1242,6 @@
 							WLAN_BACK_RECIPIENT, 0,
 							false);
 			mutex_unlock(&local->sta_mtx);
-		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
-			ieee80211_process_tdls_channel_switch(sdata, skb);
 		} else if (ieee80211_is_action(mgmt->frame_control) &&
 			   mgmt->u.action.category == WLAN_CATEGORY_BACK) {
 			int len = skb->len;
@@ -1790,13 +1788,23 @@
 		sband = local->hw.wiphy->bands[i];
 		sdata->rc_rateidx_mask[i] =
 			sband ? (1 << sband->n_bitrates) - 1 : 0;
-		if (sband)
+		if (sband) {
+			__le16 cap;
+			u16 *vht_rate_mask;
+
 			memcpy(sdata->rc_rateidx_mcs_mask[i],
 			       sband->ht_cap.mcs.rx_mask,
 			       sizeof(sdata->rc_rateidx_mcs_mask[i]));
-		else
+
+			cap = sband->vht_cap.vht_mcs.rx_mcs_map;
+			vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i];
+			ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask);
+		} else {
 			memset(sdata->rc_rateidx_mcs_mask[i], 0,
 			       sizeof(sdata->rc_rateidx_mcs_mask[i]));
+			memset(sdata->rc_rateidx_vht_mcs_mask[i], 0,
+			       sizeof(sdata->rc_rateidx_vht_mcs_mask[i]));
+		}
 	}
 
 	ieee80211_set_default_queues(sdata);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index b22df3a..44388d6 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -336,7 +336,6 @@
 			ieee80211_check_fast_xmit(sta);
 		} else {
 			rcu_assign_pointer(sta->gtk[idx], new);
-			sta->gtk_idx = idx;
 		}
 	} else {
 		defunikey = old &&
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 3f4f9ea..9951ef0 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -115,9 +115,6 @@
 		} gen;
 	} u;
 
-	/* number of times this key has been used */
-	int tx_rx_count;
-
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct {
 		struct dentry *stalink;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 3c63468..ff79a13 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -629,6 +629,8 @@
 	INIT_WORK(&local->sched_scan_stopped_work,
 		  ieee80211_sched_scan_stopped_work);
 
+	INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
+
 	spin_lock_init(&local->ack_status_lock);
 	idr_init(&local->ack_status_frames);
 
@@ -645,6 +647,7 @@
 
 	skb_queue_head_init(&local->skb_queue);
 	skb_queue_head_init(&local->skb_queue_unreliable);
+	skb_queue_head_init(&local->skb_queue_tdls_chsw);
 
 	ieee80211_alloc_led_names(local);
 
@@ -1132,18 +1135,6 @@
 }
 EXPORT_SYMBOL(ieee80211_register_hw);
 
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-			struct net_device *napi_dev,
-			int (*poll)(struct napi_struct *, int),
-			int weight)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-
-	netif_napi_add(napi_dev, napi, poll, weight);
-	local->napi = napi;
-}
-EXPORT_SYMBOL_GPL(ieee80211_napi_add);
-
 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
@@ -1173,6 +1164,7 @@
 
 	cancel_work_sync(&local->restart_work);
 	cancel_work_sync(&local->reconfig_filter);
+	cancel_work_sync(&local->tdls_chsw_work);
 	flush_work(&local->sched_scan_stopped_work);
 
 	ieee80211_clear_tx_pending(local);
@@ -1183,6 +1175,7 @@
 		wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
 	skb_queue_purge(&local->skb_queue);
 	skb_queue_purge(&local->skb_queue_unreliable);
+	skb_queue_purge(&local->skb_queue_tdls_chsw);
 
 	destroy_workqueue(local->workqueue);
 	wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 817098a..e06a5ca 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -158,7 +158,7 @@
 	changed = mesh_accept_plinks_update(sdata);
 	if (!sdata->u.mesh.user_mpm) {
 		changed |= mesh_plink_deactivate(sta);
-		del_timer_sync(&sta->plink_timer);
+		del_timer_sync(&sta->mesh->plink_timer);
 	}
 
 	if (changed)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 085edc1..d80e0a4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -19,15 +19,6 @@
 
 #define MAX_PREQ_QUEUE_LEN	64
 
-/* Destination only */
-#define MP_F_DO	0x1
-/* Reply and forward */
-#define MP_F_RF	0x2
-/* Unknown Sequence Number */
-#define MP_F_USN    0x01
-/* Reason code Present */
-#define MP_F_RCODE  0x02
-
 static void mesh_queue_preq(struct mesh_path *, u8);
 
 static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
@@ -79,6 +70,12 @@
 #define MSEC_TO_TU(x) (x*1000/1024)
 #define SN_GT(x, y) ((s32)(y - x) < 0)
 #define SN_LT(x, y) ((s32)(x - y) < 0)
+#define MAX_SANE_SN_DELTA 32
+
+static inline u32 SN_DELTA(u32 x, u32 y)
+{
+	return x >= y ? x - y : y - x;
+}
 
 #define net_traversal_jiffies(s) \
 	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -279,15 +276,10 @@
 	*pos++ = ttl;
 	/* number of destinations */
 	*pos++ = 1;
-	/*
-	 * flags bit, bit 1 is unset if we know the sequence number and
-	 * bit 2 is set if we have a reason code
+	/* Flags field has AE bit only as defined in
+	 * sec 8.4.2.117 IEEE802.11-2012
 	 */
 	*pos = 0;
-	if (!target_sn)
-		*pos |= MP_F_USN;
-	if (target_rcode)
-		*pos |= MP_F_RCODE;
 	pos++;
 	memcpy(pos, target, ETH_ALEN);
 	pos += ETH_ALEN;
@@ -316,8 +308,9 @@
 	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
 	/* moving average, scaled to 100 */
-	sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
-	if (sta->fail_avg > 95)
+	sta->mesh->fail_avg =
+		((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
+	if (sta->mesh->fail_avg > 95)
 		mesh_plink_broken(sta);
 }
 
@@ -333,7 +326,7 @@
 	u32 tx_time, estimated_retx;
 	u64 result;
 
-	if (sta->fail_avg >= 100)
+	if (sta->mesh->fail_avg >= 100)
 		return MAX_METRIC;
 
 	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
@@ -341,7 +334,7 @@
 	if (WARN_ON(!rate))
 		return MAX_METRIC;
 
-	err = (sta->fail_avg << ARITH_SHIFT) / 100;
+	err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
 
 	/* bitrate is in units of 100 Kbps, while we need rate in units of
 	 * 1Mbps. This will be corrected on tx_time computation.
@@ -441,6 +434,26 @@
 					process = false;
 					fresh_info = false;
 				}
+			} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+				bool have_sn, newer_sn, bounced;
+
+				have_sn = mpath->flags & MESH_PATH_SN_VALID;
+				newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
+				bounced = have_sn &&
+					  (SN_DELTA(orig_sn, mpath->sn) >
+							MAX_SANE_SN_DELTA);
+
+				if (!have_sn || newer_sn) {
+					/* if SN is newer than what we had
+					 * then we can take it */;
+				} else if (bounced) {
+					/* if SN is way different than what
+					 * we had then assume the other side
+					 * rebooted or restarted */;
+				} else {
+					process = false;
+					fresh_info = false;
+				}
 			}
 		} else {
 			mpath = mesh_path_add(sdata, orig_addr);
@@ -570,15 +583,13 @@
 					SN_LT(mpath->sn, target_sn)) {
 				mpath->sn = target_sn;
 				mpath->flags |= MESH_PATH_SN_VALID;
-			} else if ((!(target_flags & MP_F_DO)) &&
+			} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
 					(mpath->flags & MESH_PATH_ACTIVE)) {
 				reply = true;
 				target_metric = mpath->metric;
 				target_sn = mpath->sn;
-				if (target_flags & MP_F_RF)
-					target_flags |= MP_F_DO;
-				else
-					forward = false;
+				/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
+				target_flags |= IEEE80211_PREQ_TO_FLAG;
 			}
 		}
 		rcu_read_unlock();
@@ -736,9 +747,12 @@
 		if (mpath->flags & MESH_PATH_ACTIVE &&
 		    ether_addr_equal(ta, sta->sta.addr) &&
 		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
-		    SN_GT(target_sn, mpath->sn))) {
+		    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
 			mpath->flags &= ~MESH_PATH_ACTIVE;
-			mpath->sn = target_sn;
+			if (target_sn != 0)
+				mpath->sn = target_sn;
+			else
+				mpath->sn += 1;
 			spin_unlock_bh(&mpath->state_lock);
 			if (!ifmsh->mshcfg.dot11MeshForwarding)
 				goto endperr;
@@ -862,7 +876,7 @@
 
 	rcu_read_lock();
 	sta = sta_info_get(sdata, mgmt->sa);
-	if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 		rcu_read_unlock();
 		return;
 	}
@@ -974,7 +988,7 @@
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_preq_queue *preq_node;
 	struct mesh_path *mpath;
-	u8 ttl, target_flags;
+	u8 ttl, target_flags = 0;
 	const u8 *da;
 	u32 lifetime;
 
@@ -1033,9 +1047,9 @@
 	}
 
 	if (preq_node->flags & PREQ_Q_F_REFRESH)
-		target_flags = MP_F_DO;
+		target_flags |= IEEE80211_PREQ_TO_FLAG;
 	else
-		target_flags = MP_F_RF;
+		target_flags &= ~IEEE80211_PREQ_TO_FLAG;
 
 	spin_unlock_bh(&mpath->state_lock);
 	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
@@ -1176,7 +1190,9 @@
 		spin_unlock_bh(&mpath->state_lock);
 		mesh_queue_preq(mpath, 0);
 	} else {
-		mpath->flags = 0;
+		mpath->flags &= ~(MESH_PATH_RESOLVING |
+				  MESH_PATH_RESOLVED |
+				  MESH_PATH_REQ_QUEUED);
 		mpath->exp_time = jiffies;
 		spin_unlock_bh(&mpath->state_lock);
 		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3b59099..5838464 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,10 +13,11 @@
 #include "rate.h"
 #include "mesh.h"
 
+#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
 #define PLINK_GET_LLID(p) (p + 2)
 #define PLINK_GET_PLID(p) (p + 4)
 
-#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
+#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
 				jiffies + msecs_to_jiffies(t)))
 
 enum plink_event {
@@ -53,18 +54,13 @@
 	[CLS_IGNR] = "CLS_IGNR"
 };
 
-static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
-			       enum ieee80211_self_protected_actioncode action,
-			       u8 *da, u16 llid, u16 plid, u16 reason);
-
-
 /* We only need a valid sta if user configured a minimum rssi_threshold. */
 static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
 				 struct sta_info *sta)
 {
 	s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
 	return rssi_threshold == 0 ||
-	       (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold);
+	       (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
 }
 
 /**
@@ -72,14 +68,14 @@
  *
  * @sta: mesh peer link to restart
  *
- * Locking: this function must be called holding sta->plink_lock
+ * Locking: this function must be called holding sta->mesh->plink_lock
  */
 static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 {
-	lockdep_assert_held(&sta->plink_lock);
-	sta->plink_state = NL80211_PLINK_LISTEN;
-	sta->llid = sta->plid = sta->reason = 0;
-	sta->plink_retries = 0;
+	lockdep_assert_held(&sta->mesh->plink_lock);
+	sta->mesh->plink_state = NL80211_PLINK_LISTEN;
+	sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
+	sta->mesh->plink_retries = 0;
 }
 
 /*
@@ -119,7 +115,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
 		if (sdata != sta->sdata ||
-		    sta->plink_state != NL80211_PLINK_ESTAB)
+		    sta->mesh->plink_state != NL80211_PLINK_ESTAB)
 			continue;
 
 		short_slot = false;
@@ -169,7 +165,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
 		if (sdata != sta->sdata ||
-		    sta->plink_state != NL80211_PLINK_ESTAB)
+		    sta->mesh->plink_state != NL80211_PLINK_ESTAB)
 			continue;
 
 		if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
@@ -204,59 +200,8 @@
 	return BSS_CHANGED_HT;
 }
 
-/**
- * __mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- * Returns beacon changed flag if the beacon content changed.
- *
- * Locking: the caller must hold sta->plink_lock
- */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
-{
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed = 0;
-
-	lockdep_assert_held(&sta->plink_lock);
-
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
-		changed = mesh_plink_dec_estab_count(sdata);
-	sta->plink_state = NL80211_PLINK_BLOCKED;
-	mesh_path_flush_by_nexthop(sta);
-
-	ieee80211_mps_sta_status_update(sta);
-	changed |= ieee80211_mps_set_sta_local_pm(sta,
-			NL80211_MESH_POWER_UNKNOWN);
-
-	return changed;
-}
-
-/**
- * mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- */
-u32 mesh_plink_deactivate(struct sta_info *sta)
-{
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed;
-
-	spin_lock_bh(&sta->plink_lock);
-	changed = __mesh_plink_deactivate(sta);
-	sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
-	mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
-			    sta->sta.addr, sta->llid, sta->plid,
-			    sta->reason);
-	spin_unlock_bh(&sta->plink_lock);
-
-	return changed;
-}
-
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+			       struct sta_info *sta,
 			       enum ieee80211_self_protected_actioncode action,
 			       u8 *da, u16 llid, u16 plid, u16 reason)
 {
@@ -306,7 +251,7 @@
 		if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
 			/* AID */
 			pos = skb_put(skb, 2);
-			put_unaligned_le16(plid, pos);
+			put_unaligned_le16(sta->sta.aid, pos);
 		}
 		if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
 		    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -375,6 +320,58 @@
 	return err;
 }
 
+/**
+ * __mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
+ *
+ * Locking: the caller must hold sta->mesh->plink_lock
+ */
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u32 changed = 0;
+
+	lockdep_assert_held(&sta->mesh->plink_lock);
+
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+		changed = mesh_plink_dec_estab_count(sdata);
+	sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+	mesh_path_flush_by_nexthop(sta);
+
+	ieee80211_mps_sta_status_update(sta);
+	changed |= ieee80211_mps_set_sta_local_pm(sta,
+			NL80211_MESH_POWER_UNKNOWN);
+
+	return changed;
+}
+
+/**
+ * mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ */
+u32 mesh_plink_deactivate(struct sta_info *sta)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u32 changed;
+
+	spin_lock_bh(&sta->mesh->plink_lock);
+	changed = __mesh_plink_deactivate(sta);
+	sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
+	mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
+			    sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
+			    sta->mesh->reason);
+	spin_unlock_bh(&sta->mesh->plink_lock);
+
+	return changed;
+}
+
 static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
 			       struct sta_info *sta,
 			       struct ieee802_11_elems *elems, bool insert)
@@ -388,13 +385,14 @@
 	sband = local->hw.wiphy->bands[band];
 	rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
-	spin_lock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
 	sta->last_rx = jiffies;
 
 	/* rates and capabilities don't change during peering */
-	if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+	    sta->mesh->processed_beacon)
 		goto out;
-	sta->processed_beacon = true;
+	sta->mesh->processed_beacon = true;
 
 	if (sta->sta.supp_rates[band] != rates)
 		changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -421,23 +419,57 @@
 	else
 		rate_control_rate_update(local, sband, sta, changed);
 out:
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
+}
+
+static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
+{
+	struct sta_info *sta;
+	unsigned long *aid_map;
+	int aid;
+
+	aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
+			  sizeof(*aid_map), GFP_KERNEL);
+	if (!aid_map)
+		return -ENOMEM;
+
+	/* reserve aid 0 for mcast indication */
+	__set_bit(0, aid_map);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
+		__set_bit(sta->sta.aid, aid_map);
+	rcu_read_unlock();
+
+	aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
+	kfree(aid_map);
+
+	if (aid > IEEE80211_MAX_AID)
+		return -ENOBUFS;
+
+	return aid;
 }
 
 static struct sta_info *
 __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
 {
 	struct sta_info *sta;
+	int aid;
 
 	if (sdata->local->num_sta >= MESH_MAX_PLINKS)
 		return NULL;
 
+	aid = mesh_allocate_aid(sdata);
+	if (aid < 0)
+		return NULL;
+
 	sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
 	if (!sta)
 		return NULL;
 
-	sta->plink_state = NL80211_PLINK_LISTEN;
+	sta->mesh->plink_state = NL80211_PLINK_LISTEN;
 	sta->sta.wme = true;
+	sta->sta.aid = aid;
 
 	sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
 	sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -524,7 +556,7 @@
 		goto out;
 
 	if (mesh_peer_accepts_plinks(elems) &&
-	    sta->plink_state == NL80211_PLINK_LISTEN &&
+	    sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
 	    sdata->u.mesh.accepting_plinks &&
 	    sdata->u.mesh.mshcfg.auto_open_plinks &&
 	    rssi_threshold_check(sdata, sta))
@@ -554,52 +586,52 @@
 	if (sta->sdata->local->quiescing)
 		return;
 
-	spin_lock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
 
 	/* If a timer fires just before a state transition on another CPU,
 	 * we may have already extended the timeout and changed state by the
 	 * time we've acquired the lock and arrived  here.  In that case,
 	 * skip this timer and wait for the new one.
 	 */
-	if (time_before(jiffies, sta->plink_timer.expires)) {
+	if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
 		mpl_dbg(sta->sdata,
 			"Ignoring timer for %pM in state %s (timer adjusted)",
-			sta->sta.addr, mplstates[sta->plink_state]);
-		spin_unlock_bh(&sta->plink_lock);
+			sta->sta.addr, mplstates[sta->mesh->plink_state]);
+		spin_unlock_bh(&sta->mesh->plink_lock);
 		return;
 	}
 
 	/* del_timer() and handler may race when entering these states */
-	if (sta->plink_state == NL80211_PLINK_LISTEN ||
-	    sta->plink_state == NL80211_PLINK_ESTAB) {
+	if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
+	    sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
 		mpl_dbg(sta->sdata,
 			"Ignoring timer for %pM in state %s (timer deleted)",
-			sta->sta.addr, mplstates[sta->plink_state]);
-		spin_unlock_bh(&sta->plink_lock);
+			sta->sta.addr, mplstates[sta->mesh->plink_state]);
+		spin_unlock_bh(&sta->mesh->plink_lock);
 		return;
 	}
 
 	mpl_dbg(sta->sdata,
 		"Mesh plink timer for %pM fired on state %s\n",
-		sta->sta.addr, mplstates[sta->plink_state]);
+		sta->sta.addr, mplstates[sta->mesh->plink_state]);
 	sdata = sta->sdata;
 	mshcfg = &sdata->u.mesh.mshcfg;
 
-	switch (sta->plink_state) {
+	switch (sta->mesh->plink_state) {
 	case NL80211_PLINK_OPN_RCVD:
 	case NL80211_PLINK_OPN_SNT:
 		/* retry timer */
-		if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
+		if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
 			u32 rand;
 			mpl_dbg(sta->sdata,
 				"Mesh plink for %pM (retry, timeout): %d %d\n",
-				sta->sta.addr, sta->plink_retries,
-				sta->plink_timeout);
+				sta->sta.addr, sta->mesh->plink_retries,
+				sta->mesh->plink_timeout);
 			get_random_bytes(&rand, sizeof(u32));
-			sta->plink_timeout = sta->plink_timeout +
-					     rand % sta->plink_timeout;
-			++sta->plink_retries;
-			mod_plink_timer(sta, sta->plink_timeout);
+			sta->mesh->plink_timeout = sta->mesh->plink_timeout +
+					     rand % sta->mesh->plink_timeout;
+			++sta->mesh->plink_retries;
+			mod_plink_timer(sta, sta->mesh->plink_timeout);
 			action = WLAN_SP_MESH_PEERING_OPEN;
 			break;
 		}
@@ -609,31 +641,31 @@
 		/* confirm timer */
 		if (!reason)
 			reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
-		sta->plink_state = NL80211_PLINK_HOLDING;
+		sta->mesh->plink_state = NL80211_PLINK_HOLDING;
 		mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 		action = WLAN_SP_MESH_PEERING_CLOSE;
 		break;
 	case NL80211_PLINK_HOLDING:
 		/* holding timer */
-		del_timer(&sta->plink_timer);
+		del_timer(&sta->mesh->plink_timer);
 		mesh_plink_fsm_restart(sta);
 		break;
 	default:
 		break;
 	}
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
 	if (action)
-		mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-				    sta->llid, sta->plid, reason);
+		mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+				    sta->mesh->llid, sta->mesh->plid, reason);
 }
 
 static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
 {
-	sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
-	sta->plink_timer.data = (unsigned long) sta;
-	sta->plink_timer.function = mesh_plink_timer;
-	sta->plink_timeout = timeout;
-	add_timer(&sta->plink_timer);
+	sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
+	sta->mesh->plink_timer.data = (unsigned long) sta;
+	sta->mesh->plink_timer.function = mesh_plink_timer;
+	sta->mesh->plink_timeout = timeout;
+	add_timer(&sta->mesh->plink_timer);
 }
 
 static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
@@ -645,7 +677,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
-		if (!memcmp(&sta->llid, &llid, sizeof(llid))) {
+		if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
 			in_use = true;
 			break;
 		}
@@ -661,8 +693,6 @@
 
 	do {
 		get_random_bytes(&llid, sizeof(llid));
-		/* for mesh PS we still only have the AID range for TIM bits */
-		llid = (llid % IEEE80211_MAX_AID) + 1;
 	} while (llid_in_use(sdata, llid));
 
 	return llid;
@@ -676,16 +706,16 @@
 	if (!test_sta_flag(sta, WLAN_STA_AUTH))
 		return 0;
 
-	spin_lock_bh(&sta->plink_lock);
-	sta->llid = mesh_get_new_llid(sdata);
-	if (sta->plink_state != NL80211_PLINK_LISTEN &&
-	    sta->plink_state != NL80211_PLINK_BLOCKED) {
-		spin_unlock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
+	sta->mesh->llid = mesh_get_new_llid(sdata);
+	if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
+	    sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
+		spin_unlock_bh(&sta->mesh->plink_lock);
 		return 0;
 	}
-	sta->plink_state = NL80211_PLINK_OPN_SNT;
+	sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
 	mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
 	mpl_dbg(sdata,
 		"Mesh plink: starting establishment with %pM\n",
 		sta->sta.addr);
@@ -693,8 +723,8 @@
 	/* set the non-peer mode to active during peering */
 	changed = ieee80211_mps_local_status_update(sdata);
 
-	mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
-			    sta->sta.addr, sta->llid, 0, 0);
+	mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
+			    sta->sta.addr, sta->mesh->llid, 0, 0);
 	return changed;
 }
 
@@ -702,10 +732,10 @@
 {
 	u32 changed;
 
-	spin_lock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
 	changed = __mesh_plink_deactivate(sta);
-	sta->plink_state = NL80211_PLINK_BLOCKED;
-	spin_unlock_bh(&sta->plink_lock);
+	sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+	spin_unlock_bh(&sta->mesh->plink_lock);
 
 	return changed;
 }
@@ -715,12 +745,11 @@
 			     enum plink_event event)
 {
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-
 	u16 reason = (event == CLS_ACPT) ?
 		     WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
 
-	sta->reason = reason;
-	sta->plink_state = NL80211_PLINK_HOLDING;
+	sta->mesh->reason = reason;
+	sta->mesh->plink_state = NL80211_PLINK_HOLDING;
 	mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 }
 
@@ -730,8 +759,8 @@
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
 	u32 changed = 0;
 
-	del_timer(&sta->plink_timer);
-	sta->plink_state = NL80211_PLINK_ESTAB;
+	del_timer(&sta->mesh->plink_timer);
+	sta->mesh->plink_state = NL80211_PLINK_ESTAB;
 	changed |= mesh_plink_inc_estab_count(sdata);
 	changed |= mesh_set_ht_prot_mode(sdata);
 	changed |= mesh_set_short_slot_time(sdata);
@@ -758,18 +787,18 @@
 	u32 changed = 0;
 
 	mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
-		mplstates[sta->plink_state], mplevents[event]);
+		mplstates[sta->mesh->plink_state], mplevents[event]);
 
-	spin_lock_bh(&sta->plink_lock);
-	switch (sta->plink_state) {
+	spin_lock_bh(&sta->mesh->plink_lock);
+	switch (sta->mesh->plink_state) {
 	case NL80211_PLINK_LISTEN:
 		switch (event) {
 		case CLS_ACPT:
 			mesh_plink_fsm_restart(sta);
 			break;
 		case OPN_ACPT:
-			sta->plink_state = NL80211_PLINK_OPN_RCVD;
-			sta->llid = mesh_get_new_llid(sdata);
+			sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
+			sta->mesh->llid = mesh_get_new_llid(sdata);
 			mesh_plink_timer_set(sta,
 					     mshcfg->dot11MeshRetryTimeout);
 
@@ -791,11 +820,11 @@
 			break;
 		case OPN_ACPT:
 			/* retry timer is left untouched */
-			sta->plink_state = NL80211_PLINK_OPN_RCVD;
+			sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
 			action = WLAN_SP_MESH_PEERING_CONFIRM;
 			break;
 		case CNF_ACPT:
-			sta->plink_state = NL80211_PLINK_CNF_RCVD;
+			sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
 			mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
 			break;
 		default:
@@ -855,7 +884,7 @@
 	case NL80211_PLINK_HOLDING:
 		switch (event) {
 		case CLS_ACPT:
-			del_timer(&sta->plink_timer);
+			del_timer(&sta->mesh->plink_timer);
 			mesh_plink_fsm_restart(sta);
 			break;
 		case OPN_ACPT:
@@ -874,17 +903,18 @@
 		 */
 		break;
 	}
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
 	if (action) {
-		mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-				    sta->llid, sta->plid, sta->reason);
+		mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+				    sta->mesh->llid, sta->mesh->plid,
+				    sta->mesh->reason);
 
 		/* also send confirm in open case */
 		if (action == WLAN_SP_MESH_PEERING_OPEN) {
-			mesh_plink_frame_tx(sdata,
+			mesh_plink_frame_tx(sdata, sta,
 					    WLAN_SP_MESH_PEERING_CONFIRM,
-					    sta->sta.addr, sta->llid,
-					    sta->plid, 0);
+					    sta->sta.addr, sta->mesh->llid,
+					    sta->mesh->plid, 0);
 		}
 	}
 
@@ -939,7 +969,7 @@
 			mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
 			goto out;
 		}
-		if (sta->plink_state == NL80211_PLINK_BLOCKED)
+		if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
 			goto out;
 	}
 
@@ -954,7 +984,7 @@
 		if (!matches_local)
 			event = OPN_RJCT;
 		if (!mesh_plink_free_count(sdata) ||
-		    (sta->plid && sta->plid != plid))
+		    (sta->mesh->plid && sta->mesh->plid != plid))
 			event = OPN_IGNR;
 		else
 			event = OPN_ACPT;
@@ -963,14 +993,14 @@
 		if (!matches_local)
 			event = CNF_RJCT;
 		if (!mesh_plink_free_count(sdata) ||
-		    sta->llid != llid ||
-		    (sta->plid && sta->plid != plid))
+		    sta->mesh->llid != llid ||
+		    (sta->mesh->plid && sta->mesh->plid != plid))
 			event = CNF_IGNR;
 		else
 			event = CNF_ACPT;
 		break;
 	case WLAN_SP_MESH_PEERING_CLOSE:
-		if (sta->plink_state == NL80211_PLINK_ESTAB)
+		if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 			/* Do not check for llid or plid. This does not
 			 * follow the standard but since multiple plinks
 			 * per sta are not supported, it is necessary in
@@ -981,9 +1011,9 @@
 			 * restarted.
 			 */
 			event = CLS_ACPT;
-		else if (sta->plid != plid)
+		else if (sta->mesh->plid != plid)
 			event = CLS_IGNR;
-		else if (ie_len == 8 && sta->llid != llid)
+		else if (ie_len == 8 && sta->mesh->llid != llid)
 			event = CLS_IGNR;
 		else
 			event = CLS_ACPT;
@@ -1070,9 +1100,9 @@
 			mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
 			goto unlock_rcu;
 		}
-		sta->plid = plid;
+		sta->mesh->plid = plid;
 	} else if (!sta && event == OPN_RJCT) {
-		mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+		mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
 				    mgmt->sa, 0, plid,
 				    WLAN_REASON_MESH_CONFIG);
 		goto unlock_rcu;
@@ -1081,9 +1111,13 @@
 		goto unlock_rcu;
 	}
 
-	/* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
-	if (!sta->plid && event == CNF_ACPT)
-		sta->plid = plid;
+	if (event == CNF_ACPT) {
+		/* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+		if (!sta->mesh->plid)
+			sta->mesh->plid = plid;
+
+		sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
+	}
 
 	changed |= mesh_plink_fsm(sdata, sta, event);
 
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index ad8b377..90a268a 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -92,16 +92,16 @@
 		if (sdata != sta->sdata)
 			continue;
 
-		switch (sta->plink_state) {
+		switch (sta->mesh->plink_state) {
 		case NL80211_PLINK_OPN_SNT:
 		case NL80211_PLINK_OPN_RCVD:
 		case NL80211_PLINK_CNF_RCVD:
 			peering = true;
 			break;
 		case NL80211_PLINK_ESTAB:
-			if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+			if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
 				light_sleep_cnt++;
-			else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+			else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
 				deep_sleep_cnt++;
 			break;
 		default:
@@ -153,19 +153,19 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-	if (sta->local_pm == pm)
+	if (sta->mesh->local_pm == pm)
 		return 0;
 
 	mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
 		pm, sta->sta.addr);
 
-	sta->local_pm = pm;
+	sta->mesh->local_pm = pm;
 
 	/*
 	 * announce peer-specific power mode transition
 	 * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
 	 */
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		mps_qos_null_tx(sta);
 
 	return ieee80211_mps_local_status_update(sdata);
@@ -197,8 +197,8 @@
 
 	if (is_unicast_ether_addr(hdr->addr1) &&
 	    ieee80211_is_data_qos(hdr->frame_control) &&
-	    sta->plink_state == NL80211_PLINK_ESTAB)
-		pm = sta->local_pm;
+	    sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+		pm = sta->mesh->local_pm;
 	else
 		pm = sdata->u.mesh.nonpeer_pm;
 
@@ -241,16 +241,16 @@
 	 * use peer-specific power mode if peering is established and the
 	 * peer's power mode is known
 	 */
-	if (sta->plink_state == NL80211_PLINK_ESTAB &&
-	    sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
-		pm = sta->peer_pm;
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+	    sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+		pm = sta->mesh->peer_pm;
 	else
-		pm = sta->nonpeer_pm;
+		pm = sta->mesh->nonpeer_pm;
 
 	do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
 
 	/* clear the MPSP flags for non-peers or active STA */
-	if (sta->plink_state != NL80211_PLINK_ESTAB) {
+	if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 		clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
 		clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
 	} else if (!do_buffer) {
@@ -296,13 +296,13 @@
 		pm = NL80211_MESH_POWER_ACTIVE;
 	}
 
-	if (sta->peer_pm == pm)
+	if (sta->mesh->peer_pm == pm)
 		return;
 
 	mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
 		sta->sta.addr, pm);
 
-	sta->peer_pm = pm;
+	sta->mesh->peer_pm = pm;
 
 	ieee80211_mps_sta_status_update(sta);
 }
@@ -317,13 +317,13 @@
 	else
 		pm = NL80211_MESH_POWER_ACTIVE;
 
-	if (sta->nonpeer_pm == pm)
+	if (sta->mesh->nonpeer_pm == pm)
 		return;
 
 	mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
 		sta->sta.addr, pm);
 
-	sta->nonpeer_pm = pm;
+	sta->mesh->nonpeer_pm = pm;
 
 	ieee80211_mps_sta_status_update(sta);
 }
@@ -552,7 +552,7 @@
 	} else {
 		if (eosp)
 			clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
-		else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+		else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
 			set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
 
 		if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
@@ -577,9 +577,9 @@
 	int ac, buffer_local = 0;
 	bool has_buffered = false;
 
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
-						   sta->llid);
+						   sta->mesh->aid);
 
 	if (has_buffered)
 		mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
@@ -598,7 +598,7 @@
 	if (!has_buffered && !buffer_local)
 		return;
 
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		mpsp_trigger_send(sta, has_buffered, !buffer_local);
 	else
 		mps_frame_deliver(sta, 1);
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 09625d6..64bc22a 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -127,14 +127,14 @@
 
 	/* Timing offset calculation (see 13.13.2.2.2) */
 	t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
-	sta->t_offset = t_t - t_r;
+	sta->mesh->t_offset = t_t - t_r;
 
 	if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
-		s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
+		s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
 		msync_dbg(sdata,
-			  "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
-			  sta->sta.addr, (long long) sta->t_offset,
-			  (long long) sta->t_offset_setpoint,
+			  "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
+			  sta->sta.addr, (long long) sta->mesh->t_offset,
+			  (long long) sta->mesh->t_offset_setpoint,
 			  (long long) t_clockdrift);
 
 		if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
@@ -152,12 +152,12 @@
 			ifmsh->sync_offset_clockdrift_max = t_clockdrift;
 		spin_unlock_bh(&ifmsh->sync_offset_lock);
 	} else {
-		sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+		sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
 		set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
 		msync_dbg(sdata,
-			  "STA %pM : offset was invalid, sta->t_offset=%lld\n",
+			  "STA %pM : offset was invalid, t_offset=%lld\n",
 			  sta->sta.addr,
-			  (long long) sta->t_offset);
+			  (long long) sta->mesh->t_offset);
 	}
 
 no_sync:
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9b2cc27..705ef1d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6,6 +6,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -538,11 +539,16 @@
 	ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+/* This function determines vht capability flags for the association
+ * and builds the IE.
+ * Note - the function may set the owner of the MU-MIMO capability
+ */
 static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
 				 struct sk_buff *skb,
 				 struct ieee80211_supported_band *sband,
 				 struct ieee80211_vht_cap *ap_vht_cap)
 {
+	struct ieee80211_local *local = sdata->local;
 	u8 *pos;
 	u32 cap;
 	struct ieee80211_sta_vht_cap vht_cap;
@@ -576,7 +582,34 @@
 	 */
 	if (!(ap_vht_cap->vht_cap_info &
 			cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
-		cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+		cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+			 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+	else if (!(ap_vht_cap->vht_cap_info &
+			cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+		cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+	/*
+	 * If some other vif is using the MU-MIMO capablity we cannot associate
+	 * using MU-MIMO - this will lead to contradictions in the group-id
+	 * mechanism.
+	 * Ownership is defined since association request, in order to avoid
+	 * simultaneous associations with MU-MIMO.
+	 */
+	if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
+		bool disable_mu_mimo = false;
+		struct ieee80211_sub_if_data *other;
+
+		list_for_each_entry_rcu(other, &local->interfaces, list) {
+			if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+				disable_mu_mimo = true;
+				break;
+			}
+		}
+		if (disable_mu_mimo)
+			cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+		else
+			sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+	}
 
 	mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
 
@@ -1096,24 +1129,6 @@
 	ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
 
-static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
-{
-	struct sta_info *sta;
-	u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
-		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
-		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-			continue;
-
-		ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
-					    NL80211_TDLS_TEARDOWN, reason,
-					    GFP_ATOMIC);
-	}
-	rcu_read_unlock();
-}
-
 static void
 ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 				 u64 timestamp, u32 device_timestamp,
@@ -2076,6 +2091,7 @@
 	memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 	memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
 	memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+	sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 
 	sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
 
@@ -2538,6 +2554,7 @@
 		eth_zero_addr(sdata->u.mgd.bssid);
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
 		sdata->u.mgd.flags = 0;
+		sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 		mutex_lock(&sdata->local->mtx);
 		ieee80211_vif_release_channel(sdata);
 		mutex_unlock(&sdata->local->mtx);
@@ -3034,12 +3051,8 @@
 
 	rate_control_rate_init(sta);
 
-	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
+	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
 		set_sta_flag(sta, WLAN_STA_MFP);
-		sta->sta.mfp = true;
-	} else {
-		sta->sta.mfp = false;
-	}
 
 	sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
 
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 358d5f9..573b81a 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -179,7 +179,7 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
-	u32 changed = BSS_CHANGED_OCB;
+	u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
 	int err;
 
 	if (ifocb->joined == true)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index fda33f9..9857693 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -29,6 +29,65 @@
 MODULE_PARM_DESC(ieee80211_default_rc_algo,
 		 "Default rate control algorithm for mac80211 to use");
 
+void rate_control_rate_init(struct sta_info *sta)
+{
+	struct ieee80211_local *local = sta->sdata->local;
+	struct rate_control_ref *ref = sta->rate_ctrl;
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	ieee80211_sta_set_rx_nss(sta);
+
+	if (!ref)
+		return;
+
+	rcu_read_lock();
+
+	chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+
+	spin_lock_bh(&sta->rate_ctrl_lock);
+	ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+			    priv_sta);
+	spin_unlock_bh(&sta->rate_ctrl_lock);
+	rcu_read_unlock();
+	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
+}
+
+void rate_control_rate_update(struct ieee80211_local *local,
+				    struct ieee80211_supported_band *sband,
+				    struct sta_info *sta, u32 changed)
+{
+	struct rate_control_ref *ref = local->rate_ctrl;
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	if (ref && ref->ops->rate_update) {
+		rcu_read_lock();
+
+		chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+		if (WARN_ON(!chanctx_conf)) {
+			rcu_read_unlock();
+			return;
+		}
+
+		spin_lock_bh(&sta->rate_ctrl_lock);
+		ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+				      ista, priv_sta, changed);
+		spin_unlock_bh(&sta->rate_ctrl_lock);
+		rcu_read_unlock();
+	}
+	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+}
+
 int ieee80211_rate_control_register(const struct rate_control_ops *ops)
 {
 	struct rate_control_alg *alg;
@@ -294,39 +353,37 @@
 }
 EXPORT_SYMBOL(rate_control_send_low);
 
-static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
-				       int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask)
 {
 	int j;
 
 	/* See whether the selected rate or anything below it is allowed. */
-	for (j = rate->idx; j >= 0; j--) {
+	for (j = *rate_idx; j >= 0; j--) {
 		if (mask & (1 << j)) {
 			/* Okay, found a suitable rate. Use it. */
-			rate->idx = j;
+			*rate_idx = j;
 			return true;
 		}
 	}
 
 	/* Try to find a higher rate that would be allowed */
-	for (j = rate->idx + 1; j < n_bitrates; j++) {
+	for (j = *rate_idx + 1; j < n_bitrates; j++) {
 		if (mask & (1 << j)) {
 			/* Okay, found a suitable rate. Use it. */
-			rate->idx = j;
+			*rate_idx = j;
 			return true;
 		}
 	}
 	return false;
 }
 
-static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
-				    u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask)
 {
 	int i, j;
 	int ridx, rbit;
 
-	ridx = rate->idx / 8;
-	rbit = rate->idx % 8;
+	ridx = *rate_idx / 8;
+	rbit = *rate_idx % 8;
 
 	/* sanity check */
 	if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
@@ -336,20 +393,20 @@
 	for (i = ridx; i >= 0; i--) {
 		for (j = rbit; j >= 0; j--)
 			if (mcs_mask[i] & BIT(j)) {
-				rate->idx = i * 8 + j;
+				*rate_idx = i * 8 + j;
 				return true;
 			}
 		rbit = 7;
 	}
 
 	/* Try to find a higher rate that would be allowed */
-	ridx = (rate->idx + 1) / 8;
-	rbit = (rate->idx + 1) % 8;
+	ridx = (*rate_idx + 1) / 8;
+	rbit = (*rate_idx + 1) % 8;
 
 	for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
 		for (j = rbit; j < 8; j++)
 			if (mcs_mask[i] & BIT(j)) {
-				rate->idx = i * 8 + j;
+				*rate_idx = i * 8 + j;
 				return true;
 			}
 		rbit = 0;
@@ -357,37 +414,93 @@
 	return false;
 }
 
+static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask)
+{
+	int i, j;
+	int ridx, rbit;
 
+	ridx = *rate_idx >> 4;
+	rbit = *rate_idx & 0xf;
 
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+	if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX)
+		return false;
+
+	/* See whether the selected rate or anything below it is allowed. */
+	for (i = ridx; i >= 0; i--) {
+		for (j = rbit; j >= 0; j--) {
+			if (vht_mask[i] & BIT(j)) {
+				*rate_idx = (i << 4) | j;
+				return true;
+			}
+		}
+		rbit = 15;
+	}
+
+	/* Try to find a higher rate that would be allowed */
+	ridx = (*rate_idx + 1) >> 4;
+	rbit = (*rate_idx + 1) & 0xf;
+
+	for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) {
+		for (j = rbit; j < 16; j++) {
+			if (vht_mask[i] & BIT(j)) {
+				*rate_idx = (i << 4) | j;
+				return true;
+			}
+		}
+		rbit = 0;
+	}
+	return false;
+}
+
+static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags,
 				struct ieee80211_supported_band *sband,
 				enum nl80211_chan_width chan_width,
 				u32 mask,
-				u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+				u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+				u16 vht_mask[NL80211_VHT_NSS_MAX])
 {
-	struct ieee80211_tx_rate alt_rate;
+	if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+		/* handle VHT rates */
+		if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask))
+			return;
 
-	/* handle HT rates */
-	if (rate->flags & IEEE80211_TX_RC_MCS) {
-		if (rate_idx_match_mcs_mask(rate, mcs_mask))
+		*rate_idx = 0;
+		/* keep protection flags */
+		*rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+				IEEE80211_TX_RC_USE_CTS_PROTECT |
+				IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+
+		*rate_flags |= IEEE80211_TX_RC_MCS;
+		if (chan_width == NL80211_CHAN_WIDTH_40)
+			*rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+		if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
 			return;
 
 		/* also try the legacy rates. */
-		alt_rate.idx = 0;
-		/* keep protection flags */
-		alt_rate.flags = rate->flags &
-				 (IEEE80211_TX_RC_USE_RTS_CTS |
-				  IEEE80211_TX_RC_USE_CTS_PROTECT |
-				  IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-		alt_rate.count = rate->count;
-		if (rate_idx_match_legacy_mask(&alt_rate,
-					       sband->n_bitrates, mask)) {
-			*rate = alt_rate;
+		*rate_flags &= ~(IEEE80211_TX_RC_MCS |
+				 IEEE80211_TX_RC_40_MHZ_WIDTH);
+		if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+					       mask))
 			return;
-		}
-	} else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
+	} else if (*rate_flags & IEEE80211_TX_RC_MCS) {
+		/* handle HT rates */
+		if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
+			return;
+
+		/* also try the legacy rates. */
+		*rate_idx = 0;
+		/* keep protection flags */
+		*rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+				IEEE80211_TX_RC_USE_CTS_PROTECT |
+				IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+		if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+					       mask))
+			return;
+	} else {
 		/* handle legacy rates */
-		if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
+		if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+					       mask))
 			return;
 
 		/* if HT BSS, and we handle a data frame, also try HT rates */
@@ -400,23 +513,19 @@
 			break;
 		}
 
-		alt_rate.idx = 0;
+		*rate_idx = 0;
 		/* keep protection flags */
-		alt_rate.flags = rate->flags &
-				 (IEEE80211_TX_RC_USE_RTS_CTS |
-				  IEEE80211_TX_RC_USE_CTS_PROTECT |
-				  IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-		alt_rate.count = rate->count;
+		*rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+				IEEE80211_TX_RC_USE_CTS_PROTECT |
+				IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
 
-		alt_rate.flags |= IEEE80211_TX_RC_MCS;
+		*rate_flags |= IEEE80211_TX_RC_MCS;
 
 		if (chan_width == NL80211_CHAN_WIDTH_40)
-			alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+			*rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 
-		if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
-			*rate = alt_rate;
+		if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
 			return;
-		}
 	}
 
 	/*
@@ -569,18 +678,92 @@
 	}
 }
 
+static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
+				  struct ieee80211_supported_band *sband,
+				  struct ieee80211_sta *sta, u32 *mask,
+				  u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+				  u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+	u32 i, flags;
+
+	*mask = sdata->rc_rateidx_mask[sband->band];
+	flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+	for (i = 0; i < sband->n_bitrates; i++) {
+		if ((flags & sband->bitrates[i].flags) != flags)
+			*mask &= ~BIT(i);
+	}
+
+	if (*mask == (1 << sband->n_bitrates) - 1 &&
+	    !sdata->rc_has_mcs_mask[sband->band] &&
+	    !sdata->rc_has_vht_mcs_mask[sband->band])
+		return false;
+
+	if (sdata->rc_has_mcs_mask[sband->band])
+		memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band],
+		       IEEE80211_HT_MCS_MASK_LEN);
+	else
+		memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN);
+
+	if (sdata->rc_has_vht_mcs_mask[sband->band])
+		memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band],
+		       sizeof(u16) * NL80211_VHT_NSS_MAX);
+	else
+		memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+	if (sta) {
+		__le16 sta_vht_cap;
+		u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
+
+		/* Filter out rates that the STA does not support */
+		*mask &= sta->supp_rates[sband->band];
+		for (i = 0; i < sizeof(mcs_mask); i++)
+			mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+
+		sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+		ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
+		for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+			vht_mask[i] &= sta_vht_mask[i];
+	}
+
+	return true;
+}
+
+static void
+rate_control_apply_mask_ratetbl(struct sta_info *sta,
+				struct ieee80211_supported_band *sband,
+				struct ieee80211_sta_rates *rates)
+{
+	int i;
+	u32 mask;
+	u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
+	u16 vht_mask[NL80211_VHT_NSS_MAX];
+	enum nl80211_chan_width chan_width;
+
+	if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask,
+				   mcs_mask, vht_mask))
+		return;
+
+	chan_width = sta->sdata->vif.bss_conf.chandef.width;
+	for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
+		if (rates->rate[i].idx < 0)
+			break;
+
+		rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags,
+				    sband, chan_width, mask, mcs_mask,
+				    vht_mask);
+	}
+}
+
 static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_sta *sta,
 				    struct ieee80211_supported_band *sband,
-				    struct ieee80211_tx_info *info,
 				    struct ieee80211_tx_rate *rates,
 				    int max_rates)
 {
 	enum nl80211_chan_width chan_width;
 	u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
-	bool has_mcs_mask;
 	u32 mask;
-	u32 rate_flags;
+	u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX];
 	int i;
 
 	/*
@@ -588,30 +771,10 @@
 	 * default mask (allow all rates) is used to save some processing for
 	 * the common case.
 	 */
-	mask = sdata->rc_rateidx_mask[info->band];
-	has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
-	rate_flags =
-		ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
-	for (i = 0; i < sband->n_bitrates; i++)
-		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
-			mask &= ~BIT(i);
-
-	if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
+	if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask,
+				   vht_mask))
 		return;
 
-	if (has_mcs_mask)
-		memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
-		       sizeof(mcs_mask));
-	else
-		memset(mcs_mask, 0xff, sizeof(mcs_mask));
-
-	if (sta) {
-		/* Filter out rates that the STA does not support */
-		mask &= sta->supp_rates[info->band];
-		for (i = 0; i < sizeof(mcs_mask); i++)
-			mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
-	}
-
 	/*
 	 * Make sure the rate index selected for each TX rate is
 	 * included in the configured mask and change the rate indexes
@@ -623,8 +786,10 @@
 		if (rates[i].idx < 0)
 			break;
 
-		rate_idx_match_mask(&rates[i], sband, chan_width, mask,
-				    mcs_mask);
+		rate_flags = rates[i].flags;
+		rate_idx_match_mask(&rates[i].idx, &rate_flags, sband,
+				    chan_width, mask, mcs_mask, vht_mask);
+		rates[i].flags = rate_flags;
 	}
 }
 
@@ -648,7 +813,7 @@
 	sband = sdata->local->hw.wiphy->bands[info->band];
 
 	if (ieee80211_is_data(hdr->frame_control))
-		rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
+		rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
 
 	if (dest[0].idx < 0)
 		__rate_control_send_low(&sdata->local->hw, sband, sta, info,
@@ -705,7 +870,10 @@
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	struct ieee80211_sta_rates *old;
+	struct ieee80211_supported_band *sband;
 
+	sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+	rate_control_apply_mask_ratetbl(sta, sband, rates);
 	/*
 	 * mac80211 guarantees that this function will not be called
 	 * concurrently, so the following RCU access is safe, even without
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 25c9be5..624fe5b 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -71,64 +71,10 @@
 	spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
-static inline void rate_control_rate_init(struct sta_info *sta)
-{
-	struct ieee80211_local *local = sta->sdata->local;
-	struct rate_control_ref *ref = sta->rate_ctrl;
-	struct ieee80211_sta *ista = &sta->sta;
-	void *priv_sta = sta->rate_ctrl_priv;
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_chanctx_conf *chanctx_conf;
-
-	ieee80211_sta_set_rx_nss(sta);
-
-	if (!ref)
-		return;
-
-	rcu_read_lock();
-
-	chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-	if (WARN_ON(!chanctx_conf)) {
-		rcu_read_unlock();
-		return;
-	}
-
-	sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-
-	spin_lock_bh(&sta->rate_ctrl_lock);
-	ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
-			    priv_sta);
-	spin_unlock_bh(&sta->rate_ctrl_lock);
-	rcu_read_unlock();
-	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
-}
-
-static inline void rate_control_rate_update(struct ieee80211_local *local,
+void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_update(struct ieee80211_local *local,
 				    struct ieee80211_supported_band *sband,
-				    struct sta_info *sta, u32 changed)
-{
-	struct rate_control_ref *ref = local->rate_ctrl;
-	struct ieee80211_sta *ista = &sta->sta;
-	void *priv_sta = sta->rate_ctrl_priv;
-	struct ieee80211_chanctx_conf *chanctx_conf;
-
-	if (ref && ref->ops->rate_update) {
-		rcu_read_lock();
-
-		chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-		if (WARN_ON(!chanctx_conf)) {
-			rcu_read_unlock();
-			return;
-		}
-
-		spin_lock_bh(&sta->rate_ctrl_lock);
-		ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
-				      ista, priv_sta, changed);
-		spin_unlock_bh(&sta->rate_ctrl_lock);
-		rcu_read_unlock();
-	}
-	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
-}
+				    struct sta_info *sta, u32 changed);
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
 					   struct sta_info *sta, gfp_t gfp)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 543b672..3928dbd 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -867,7 +867,13 @@
 	else
 		idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
 
-	if (offset > 0) {
+	/* enable RTS/CTS if needed:
+	 *  - if station is in dynamic SMPS (and streams > 1)
+	 *  - for fallback rates, to increase chances of getting through
+	 */
+	if (offset > 0 &&
+	    (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+	     group->streams > 1)) {
 		ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
 		flags |= IEEE80211_TX_RC_USE_RTS_CTS;
 	}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5dae166..5bc0b88 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -42,6 +42,51 @@
 	u64_stats_update_end(&tstats->syncp);
 }
 
+static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+			       enum nl80211_iftype type)
+{
+	__le16 fc = hdr->frame_control;
+
+	if (ieee80211_is_data(fc)) {
+		if (len < 24) /* drop incorrect hdr len (data) */
+			return NULL;
+
+		if (ieee80211_has_a4(fc))
+			return NULL;
+		if (ieee80211_has_tods(fc))
+			return hdr->addr1;
+		if (ieee80211_has_fromds(fc))
+			return hdr->addr2;
+
+		return hdr->addr3;
+	}
+
+	if (ieee80211_is_mgmt(fc)) {
+		if (len < 24) /* drop incorrect hdr len (mgmt) */
+			return NULL;
+		return hdr->addr3;
+	}
+
+	if (ieee80211_is_ctl(fc)) {
+		if (ieee80211_is_pspoll(fc))
+			return hdr->addr1;
+
+		if (ieee80211_is_back_req(fc)) {
+			switch (type) {
+			case NL80211_IFTYPE_STATION:
+				return hdr->addr2;
+			case NL80211_IFTYPE_AP:
+			case NL80211_IFTYPE_AP_VLAN:
+				return hdr->addr1;
+			default:
+				break; /* fall through to the return */
+			}
+		}
+	}
+
+	return NULL;
+}
+
 /*
  * monitor mode reception
  *
@@ -77,8 +122,7 @@
 	hdr = (void *)(skb->data + rtap_vendor_space);
 
 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-			    RX_FLAG_FAILED_PLCP_CRC |
-			    RX_FLAG_AMPDU_IS_ZEROLEN))
+			    RX_FLAG_FAILED_PLCP_CRC))
 		return true;
 
 	if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -346,10 +390,6 @@
 			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
 		put_unaligned_le32(status->ampdu_reference, pos);
 		pos += 4;
-		if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
-			flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
-		if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
-			flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
 		if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
 			flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
 		if (status->flag & RX_FLAG_AMPDU_IS_LAST)
@@ -1093,11 +1133,6 @@
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 
-	if (unlikely(rx->skb->len < 16)) {
-		I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
-		return RX_DROP_MONITOR;
-	}
-
 	/* Drop disallowed frame classes based on STA auth/assoc state;
 	 * IEEE 802.11, Chap 5.5.
 	 *
@@ -1240,22 +1275,22 @@
 	ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
-int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
 {
-	struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	bool in_ps;
 
-	WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
+	WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
 
 	/* Don't let the same PS state be set twice */
-	in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
+	in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
 	if ((start && in_ps) || (!start && !in_ps))
 		return -EINVAL;
 
 	if (start)
-		sta_ps_start(sta_inf);
+		sta_ps_start(sta);
 	else
-		sta_ps_end(sta_inf);
+		sta_ps_end(sta);
 
 	return 0;
 }
@@ -1393,7 +1428,7 @@
 	sta->rx_bytes += rx->skb->len;
 	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 		sta->last_signal = status->signal;
-		ewma_add(&sta->avg_signal, -status->signal);
+		ewma_signal_add(&sta->avg_signal, -status->signal);
 	}
 
 	if (status->chains) {
@@ -1405,7 +1440,7 @@
 				continue;
 
 			sta->chain_signal_last[i] = signal;
-			ewma_add(&sta->chain_signal_avg[i], -signal);
+			ewma_signal_add(&sta->chain_signal_avg[i], -signal);
 		}
 	}
 
@@ -1647,7 +1682,6 @@
 		if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
 			return RX_DROP_MONITOR;
 
-		rx->key->tx_rx_count++;
 		/* TODO: add threshold stuff again */
 	} else {
 		return RX_DROP_MONITOR;
@@ -1883,7 +1917,6 @@
 
 	/* Complete frame has been reassembled - process it now */
 	status = IEEE80211_SKB_RXCB(rx->skb);
-	status->rx_flags |= IEEE80211_RX_FRAGMENTED;
 
  out:
 	ieee80211_led_rx(rx->local);
@@ -2108,9 +2141,8 @@
 		/* deliver to local stack */
 		skb->protocol = eth_type_trans(skb, dev);
 		memset(skb->cb, 0, sizeof(skb->cb));
-		if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
-		    rx->local->napi)
-			napi_gro_receive(rx->local->napi, skb);
+		if (rx->napi)
+			napi_gro_receive(rx->napi, skb);
 		else
 			netif_receive_skb(skb);
 	}
@@ -2378,9 +2410,8 @@
 		    tf->category == WLAN_CATEGORY_TDLS &&
 		    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
 		     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
-			rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
-			skb_queue_tail(&sdata->skb_queue, rx->skb);
-			ieee80211_queue_work(&rx->local->hw, &sdata->work);
+			skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
+			schedule_work(&local->tdls_chsw_work);
 			if (rx->sta)
 				rx->sta->rx_packets++;
 
@@ -3004,7 +3035,6 @@
 	return RX_QUEUED;
 }
 
-/* TODO: use IEEE80211_RX_FRAGMENTED */
 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
 					struct ieee80211_rate *rate)
 {
@@ -3216,7 +3246,7 @@
 		/* This is OK -- must be QoS data frame */
 		.security_idx = tid,
 		.seqno_idx = tid,
-		.flags = IEEE80211_RX_REORDER_TIMER,
+		.napi = NULL, /* must be NULL to not have races */
 	};
 	struct tid_ampdu_rx *tid_agg_rx;
 
@@ -3286,7 +3316,7 @@
 	case NL80211_IFTYPE_OCB:
 		if (!bssid)
 			return false;
-		if (ieee80211_is_beacon(hdr->frame_control))
+		if (!ieee80211_is_data_present(hdr->frame_control))
 			return false;
 		if (!is_broadcast_ether_addr(bssid))
 			return false;
@@ -3393,7 +3423,8 @@
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
-					 struct sk_buff *skb)
+					 struct sk_buff *skb,
+					 struct napi_struct *napi)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_sub_if_data *sdata;
@@ -3409,6 +3440,7 @@
 	memset(&rx, 0, sizeof(rx));
 	rx.skb = skb;
 	rx.local = local;
+	rx.napi = napi;
 
 	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
 		I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
@@ -3510,7 +3542,8 @@
  * This is the receive path handler. It is called by a low level driver when an
  * 802.11 MPDU is received from the hardware.
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+		       struct napi_struct *napi)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_rate *rate = NULL;
@@ -3609,7 +3642,7 @@
 	ieee80211_tpt_led_trig_rx(local,
 			((struct ieee80211_hdr *)skb->data)->frame_control,
 			skb->len);
-	__ieee80211_rx_handle_packet(hw, skb);
+	__ieee80211_rx_handle_packet(hw, skb, napi);
 
 	rcu_read_unlock();
 
@@ -3617,7 +3650,7 @@
  drop:
 	kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx_napi);
 
 /* This is a version of the rx handler that can be called from hard irq
  * context. Post the skb on the queue and schedule the tasklet */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 666ddac..64f1936 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -68,7 +68,7 @@
 	.nelem_hint = 3, /* start small */
 	.automatic_shrinking = true,
 	.head_offset = offsetof(struct sta_info, hash_node),
-	.key_offset = offsetof(struct sta_info, sta.addr),
+	.key_offset = offsetof(struct sta_info, addr),
 	.key_len = ETH_ALEN,
 	.hashfn = sta_addr_hash,
 	.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
@@ -249,6 +249,9 @@
 	if (sta->sta.txq[0])
 		kfree(to_txq_info(sta->sta.txq[0]));
 	kfree(rcu_dereference_raw(sta->sta.rates));
+#ifdef CONFIG_MAC80211_MESH
+	kfree(sta->mesh);
+#endif
 	kfree(sta);
 }
 
@@ -313,13 +316,19 @@
 	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
 	mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
-	spin_lock_init(&sta->plink_lock);
-	if (ieee80211_vif_is_mesh(&sdata->vif) &&
-	    !sdata->u.mesh.user_mpm)
-		init_timer(&sta->plink_timer);
-	sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+	if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+		if (!sta->mesh)
+			goto free;
+		spin_lock_init(&sta->mesh->plink_lock);
+		if (ieee80211_vif_is_mesh(&sdata->vif) &&
+		    !sdata->u.mesh.user_mpm)
+			init_timer(&sta->mesh->plink_timer);
+		sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+	}
 #endif
 
+	memcpy(sta->addr, addr, ETH_ALEN);
 	memcpy(sta->sta.addr, addr, ETH_ALEN);
 	sta->local = local;
 	sta->sdata = sdata;
@@ -332,9 +341,9 @@
 
 	ktime_get_ts(&uptime);
 	sta->last_connected = uptime.tv_sec;
-	ewma_init(&sta->avg_signal, 1024, 8);
+	ewma_signal_init(&sta->avg_signal);
 	for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
-		ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+		ewma_signal_init(&sta->chain_signal_avg[i]);
 
 	if (local->ops->wake_tx_queue) {
 		void *txq_data;
@@ -405,6 +414,9 @@
 	if (sta->sta.txq[0])
 		kfree(to_txq_info(sta->sta.txq[0]));
 free:
+#ifdef CONFIG_MAC80211_MESH
+	kfree(sta->mesh);
+#endif
 	kfree(sta);
 	return NULL;
 }
@@ -623,7 +635,7 @@
 	bool indicate_tim = false;
 	u8 ignore_for_tim = sta->sta.uapsd_queues;
 	int ac;
-	u16 id;
+	u16 id = sta->sta.aid;
 
 	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
 	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -631,12 +643,9 @@
 			return;
 
 		ps = &sta->sdata->bss->ps;
-		id = sta->sta.aid;
 #ifdef CONFIG_MAC80211_MESH
 	} else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
 		ps = &sta->sdata->u.mesh.ps;
-		/* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */
-		id = sta->plid % (IEEE80211_MAX_AID + 1);
 #endif
 	} else {
 		return;
@@ -1887,7 +1896,8 @@
 		}
 
 		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
-			sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+			sinfo->signal_avg =
+				(s8) -ewma_signal_read(&sta->avg_signal);
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
 		}
 	}
@@ -1902,7 +1912,7 @@
 		for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
 			sinfo->chain_signal[i] = sta->chain_signal_last[i];
 			sinfo->chain_signal_avg[i] =
-				(s8) -ewma_read(&sta->chain_signal_avg[i]);
+				(s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
 		}
 	}
 
@@ -1956,16 +1966,16 @@
 				 BIT(NL80211_STA_INFO_PEER_PM) |
 				 BIT(NL80211_STA_INFO_NONPEER_PM);
 
-		sinfo->llid = sta->llid;
-		sinfo->plid = sta->plid;
-		sinfo->plink_state = sta->plink_state;
+		sinfo->llid = sta->mesh->llid;
+		sinfo->plid = sta->mesh->plid;
+		sinfo->plink_state = sta->mesh->plink_state;
 		if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
-			sinfo->t_offset = sta->t_offset;
+			sinfo->t_offset = sta->mesh->t_offset;
 		}
-		sinfo->local_pm = sta->local_pm;
-		sinfo->peer_pm = sta->peer_pm;
-		sinfo->nonpeer_pm = sta->nonpeer_pm;
+		sinfo->local_pm = sta->mesh->local_pm;
+		sinfo->peer_pm = sta->mesh->peer_pm;
+		sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
 #endif
 	}
 
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 226f8ca4..b087c71 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -53,6 +53,8 @@
  * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
  * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
  *	TDLS peer
+ * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on
+ *	the BSS base channel.
  * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
  *	keeping station in power-save mode, reply when the driver
  *	unblocks the station.
@@ -84,6 +86,7 @@
 	WLAN_STA_TDLS_INITIATOR,
 	WLAN_STA_TDLS_CHAN_SWITCH,
 	WLAN_STA_TDLS_OFF_CHANNEL,
+	WLAN_STA_TDLS_WIDER_BW,
 	WLAN_STA_UAPSD,
 	WLAN_STA_SP,
 	WLAN_STA_4ADDR_EVENT,
@@ -270,6 +273,56 @@
 };
 
 /**
+ * struct mesh_sta - mesh STA information
+ * @plink_lock: serialize access to plink fields
+ * @llid: Local link ID
+ * @plid: Peer link ID
+ * @aid: local aid supplied by peer
+ * @reason: Cancel reason on PLINK_HOLDING state
+ * @plink_retries: Retries in establishment
+ * @plink_state: peer link state
+ * @plink_timeout: timeout of peer link
+ * @plink_timer: peer link watch timer
+ * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ * 	calculating clockdrift
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @processed_beacon: set to true after peer rates and capabilities are
+ *	processed
+ * @fail_avg: moving percentage of failed MSDUs
+ */
+struct mesh_sta {
+	struct timer_list plink_timer;
+
+	s64 t_offset;
+	s64 t_offset_setpoint;
+
+	spinlock_t plink_lock;
+	u16 llid;
+	u16 plid;
+	u16 aid;
+	u16 reason;
+	u8 plink_retries;
+
+	bool processed_beacon;
+
+	enum nl80211_plink_state plink_state;
+	u32 plink_timeout;
+
+	/* mesh power save */
+	enum nl80211_mesh_power_mode local_pm;
+	enum nl80211_mesh_power_mode peer_pm;
+	enum nl80211_mesh_power_mode nonpeer_pm;
+
+	/* moving percentage of failed MSDUs */
+	unsigned int fail_avg;
+};
+
+DECLARE_EWMA(signal, 1024, 8)
+
+/**
  * struct sta_info - STA information
  *
  * This structure collects information about a station that
@@ -278,12 +331,13 @@
  * @list: global linked list entry
  * @free_list: list entry for keeping track of stations to free
  * @hash_node: hash node for rhashtable
+ * @addr: station's MAC address - duplicated from public part to
+ *	let the hash table work with just a single cacheline
  * @local: pointer to the global information
  * @sdata: virtual interface this station belongs to
  * @ptk: peer keys negotiated with this station, if any
  * @ptk_idx: last installed peer key index
  * @gtk: group keys negotiated with this station, if any
- * @gtk_idx: last installed group key index
  * @rate_ctrl: rate control algorithm reference
  * @rate_ctrl_lock: spinlock used to protect rate control data
  *	(data inside the algorithm, so serializes calls there)
@@ -318,30 +372,17 @@
  * @last_signal: signal of last received frame from this STA
  * @avg_signal: moving average of signal of received frames from this STA
  * @last_ack_signal: signal of last received Ack frame from this STA
- * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
+ * @last_seq_ctrl: last received seq/frag number from this STA (per TID
+ *	plus one for non-QoS frames)
  * @tx_filtered_count: number of frames the hardware filtered for this STA
  * @tx_retry_failed: number of frames that failed retry
  * @tx_retry_count: total number of retries for frames to this STA
- * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: number of bytes transmitted to this STA
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
- * @plink_lock: serialize access to plink fields
- * @llid: Local link ID
- * @plid: Peer link ID
- * @reason: Cancel reason on PLINK_HOLDING state
- * @plink_retries: Retries in establishment
- * @plink_state: peer link state
- * @plink_timeout: timeout of peer link
- * @plink_timer: peer link watch timer
- * @t_offset: timing offset relative to this host
- * @t_offset_setpoint: reference timing offset of this sta to be used when
- * 	calculating clockdrift
- * @local_pm: local link-specific power save mode
- * @peer_pm: peer-specific power save mode towards local STA
- * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @mesh: mesh STA information
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -369,19 +410,19 @@
  * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
  *	entry for non-QoS frames
  * @fast_tx: TX fastpath information
- * @processed_beacon: set to true after peer rates and capabilities are
- *	processed
+ * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+ *	the BSS one.
  */
 struct sta_info {
 	/* General information, mostly static */
 	struct list_head list, free_list;
 	struct rcu_head rcu_head;
 	struct rhash_head hash_node;
+	u8 addr[ETH_ALEN];
 	struct ieee80211_local *local;
 	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
 	struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
-	u8 gtk_idx;
 	u8 ptk_idx;
 	struct rate_control_ref *rate_ctrl;
 	void *rate_ctrl_priv;
@@ -390,6 +431,10 @@
 
 	struct ieee80211_fast_tx __rcu *fast_tx;
 
+#ifdef CONFIG_MAC80211_MESH
+	struct mesh_sta *mesh;
+#endif
+
 	struct work_struct drv_deliver_wk;
 
 	u16 listen_interval;
@@ -419,12 +464,12 @@
 	unsigned long rx_fragments;
 	unsigned long rx_dropped;
 	int last_signal;
-	struct ewma avg_signal;
+	struct ewma_signal avg_signal;
 	int last_ack_signal;
 
 	u8 chains;
 	s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-	struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+	struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
 
 	/* Plus 1 for non-QoS frames */
 	__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -432,8 +477,6 @@
 	/* Updated from TX status path only, no locking requirements */
 	unsigned long tx_filtered_count;
 	unsigned long tx_retry_failed, tx_retry_count;
-	/* moving percentage of failed MSDUs */
-	unsigned int fail_avg;
 
 	/* Updated from TX path only, no locking requirements */
 	u64 tx_packets[IEEE80211_NUM_ACS];
@@ -455,29 +498,6 @@
 	struct sta_ampdu_mlme ampdu_mlme;
 	u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
-#ifdef CONFIG_MAC80211_MESH
-	/*
-	 * Mesh peer link attributes, protected by plink_lock.
-	 * TODO: move to a sub-structure that is referenced with pointer?
-	 */
-	spinlock_t plink_lock;
-	u16 llid;
-	u16 plid;
-	u16 reason;
-	u8 plink_retries;
-	enum nl80211_plink_state plink_state;
-	u32 plink_timeout;
-	struct timer_list plink_timer;
-
-	s64 t_offset;
-	s64 t_offset_setpoint;
-	/* mesh power save */
-	enum nl80211_mesh_power_mode local_pm;
-	enum nl80211_mesh_power_mode peer_pm;
-	enum nl80211_mesh_power_mode nonpeer_pm;
-	bool processed_beacon;
-#endif
-
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct sta_info_debugfsdentries {
 		struct dentry *dir;
@@ -498,6 +518,8 @@
 
 	u8 reserved_tid;
 
+	struct cfg80211_chan_def tdls_chandef;
+
 	/* keep last! */
 	struct ieee80211_sta sta;
 };
@@ -505,7 +527,7 @@
 static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
 {
 #ifdef CONFIG_MAC80211_MESH
-	return sta->plink_state;
+	return sta->mesh->plink_state;
 #endif
 	return NL80211_PLINK_LISTEN;
 }
@@ -608,7 +630,7 @@
 			       _sta_bucket_idx(tbl, _addr),		\
 			       hash_node)				\
 	/* compare address and run code only if it matches */		\
-	if (ether_addr_equal(_sta->sta.addr, (_addr)))
+	if (ether_addr_equal(_sta->addr, (_addr)))
 
 /*
  * Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 45628f3..8ba5832 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -515,7 +515,7 @@
 
 		if (!sdata) {
 			skb->dev = NULL;
-		} else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+		} else {
 			unsigned int hdr_size =
 				ieee80211_hdrlen(hdr->frame_control);
 
@@ -529,9 +529,6 @@
 				ieee80211_mgd_conn_tx_status(sdata,
 							     hdr->frame_control,
 							     acked);
-		} else {
-			/* we assign ack frame ID for the others */
-			WARN_ON(1);
 		}
 
 		rcu_read_unlock();
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 8db6e29..aee701a 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,6 +4,7 @@
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
+ * Copyright 2015  Intel Deutschland GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -11,6 +12,7 @@
 #include <linux/ieee80211.h>
 #include <linux/log2.h>
 #include <net/cfg80211.h>
+#include <linux/rtnetlink.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
@@ -35,20 +37,28 @@
 	mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
 					 struct sk_buff *skb)
 {
-	u8 *pos = (void *)skb_put(skb, 7);
+	struct ieee80211_local *local = sdata->local;
 	bool chan_switch = local->hw.wiphy->features &
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+	bool vht = sband && sband->vht_cap.vht_supported;
+	u8 *pos = (void *)skb_put(skb, 10);
 
 	*pos++ = WLAN_EID_EXT_CAPABILITY;
-	*pos++ = 5; /* len */
+	*pos++ = 8; /* len */
 	*pos++ = 0x0;
 	*pos++ = 0x0;
 	*pos++ = 0x0;
 	*pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
 	*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+	*pos++ = 0;
+	*pos++ = 0;
+	*pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
 }
 
 static u8
@@ -284,6 +294,60 @@
 }
 
 static void
+ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
+				   struct sta_info *sta)
+{
+	/* IEEE802.11ac-2013 Table E-4 */
+	u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+	struct cfg80211_chan_def uc = sta->tdls_chandef;
+	enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+	int i;
+
+	/* only support upgrading non-narrow channels up to 80Mhz */
+	if (max_width == NL80211_CHAN_WIDTH_5 ||
+	    max_width == NL80211_CHAN_WIDTH_10)
+		return;
+
+	if (max_width > NL80211_CHAN_WIDTH_80)
+		max_width = NL80211_CHAN_WIDTH_80;
+
+	if (uc.width == max_width)
+		return;
+	/*
+	 * Channel usage constrains in the IEEE802.11ac-2013 specification only
+	 * allow expanding a 20MHz channel to 80MHz in a single way. In
+	 * addition, there are no 40MHz allowed channels that are not part of
+	 * the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
+	 */
+	for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
+		if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
+			uc.center_freq1 = centers_80mhz[i];
+			uc.width = NL80211_CHAN_WIDTH_80;
+			break;
+		}
+
+	if (!uc.center_freq1)
+		return;
+
+	/* proceed to downgrade the chandef until usable or the same */
+	while (uc.width > max_width &&
+	       !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+					&uc, sdata->wdev.iftype))
+		ieee80211_chandef_downgrade(&uc);
+
+	if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
+		tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
+			 sta->tdls_chandef.width, uc.width);
+
+		/*
+		 * the station is not yet authorized when BW upgrade is done,
+		 * locking is not required
+		 */
+		sta->tdls_chandef = uc;
+	}
+}
+
+static void
 ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
 				   struct sk_buff *skb, const u8 *peer,
 				   u8 action_code, bool initiator,
@@ -320,7 +384,7 @@
 		offset = noffset;
 	}
 
-	ieee80211_tdls_add_ext_capab(local, skb);
+	ieee80211_tdls_add_ext_capab(sdata, skb);
 
 	/* add the QoS element if we support it */
 	if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -350,15 +414,17 @@
 		offset = noffset;
 	}
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 
 	/* we should have the peer STA if we're already responding */
 	if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
 		sta = sta_info_get(sdata, peer);
 		if (WARN_ON_ONCE(!sta)) {
-			rcu_read_unlock();
+			mutex_unlock(&local->sta_mtx);
 			return;
 		}
+
+		sta->tdls_chandef = sdata->vif.bss_conf.chandef;
 	}
 
 	ieee80211_tdls_add_oper_classes(sdata, skb);
@@ -384,10 +450,6 @@
 		ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
 	} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
 		   ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
-		/* disable SMPS in TDLS responder */
-		sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
-					<< IEEE80211_HT_CAP_SM_PS_SHIFT;
-
 		/* the peer caps are already intersected with our own */
 		memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
 
@@ -448,9 +510,16 @@
 
 		pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
 		ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+
+		/*
+		 * if both peers support WIDER_BW, we can expand the chandef to
+		 * a wider compatible one, up to 80MHz
+		 */
+		if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+			ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
 	}
 
-	rcu_read_unlock();
+	mutex_unlock(&local->sta_mtx);
 
 	/* add any remaining IEs */
 	if (extra_ies_len) {
@@ -474,15 +543,17 @@
 	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	u8 *pos;
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 
 	sta = sta_info_get(sdata, peer);
 	ap_sta = sta_info_get(sdata, ifmgd->bssid);
 	if (WARN_ON_ONCE(!sta || !ap_sta)) {
-		rcu_read_unlock();
+		mutex_unlock(&local->sta_mtx);
 		return;
 	}
 
+	sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+
 	/* add any custom IEs that go before the QoS IE */
 	if (extra_ies_len) {
 		static const u8 before_qos[] = {
@@ -530,12 +601,19 @@
 
 	/* only include VHT-operation if not on the 2.4GHz band */
 	if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+		/*
+		 * if both peers support WIDER_BW, we can expand the chandef to
+		 * a wider compatible one, up to 80MHz
+		 */
+		if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+			ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
+
 		pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
 		ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
-					    &sdata->vif.bss_conf.chandef);
+					    &sta->tdls_chandef);
 	}
 
-	rcu_read_unlock();
+	mutex_unlock(&local->sta_mtx);
 
 	/* add any remaining IEs */
 	if (extra_ies_len) {
@@ -784,7 +862,7 @@
 			       max(sizeof(struct ieee80211_mgmt),
 				   sizeof(struct ieee80211_tdls_data)) +
 			       50 + /* supported rates */
-			       7 + /* ext capab */
+			       10 + /* ext capab */
 			       26 + /* max(WMM-info, WMM-param) */
 			       2 + max(sizeof(struct ieee80211_ht_cap),
 				       sizeof(struct ieee80211_ht_operation)) +
@@ -983,8 +1061,17 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
 	int ret;
 
+	/* don't support setup with forced SMPS mode that's not off */
+	if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
+	    smps_mode != IEEE80211_SMPS_OFF) {
+		tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
+			 smps_mode);
+		return -ENOTSUPP;
+	}
+
 	mutex_lock(&local->mtx);
 
 	/* we don't support concurrent TDLS peer setups */
@@ -1146,6 +1233,22 @@
 	return ret;
 }
 
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *conf;
+	struct ieee80211_chanctx *ctx;
+
+	mutex_lock(&local->chanctx_mtx);
+	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+					 lockdep_is_held(&local->chanctx_mtx));
+	if (conf) {
+		ctx = container_of(conf, struct ieee80211_chanctx, conf);
+		ieee80211_recalc_chanctx_chantype(local, ctx);
+	}
+	mutex_unlock(&local->chanctx_mtx);
+}
+
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 			const u8 *peer, enum nl80211_tdls_operation oper)
 {
@@ -1182,6 +1285,8 @@
 			break;
 		}
 
+		iee80211_tdls_recalc_chanctx(sdata);
+
 		rcu_read_lock();
 		sta = sta_info_get(sdata, peer);
 		if (!sta) {
@@ -1213,6 +1318,7 @@
 		ieee80211_flush_queues(local, sdata, false);
 
 		ret = sta_info_destroy_addr(sdata, peer);
+		iee80211_tdls_recalc_chanctx(sdata);
 		break;
 	default:
 		ret = -ENOTSUPP;
@@ -1224,6 +1330,10 @@
 		eth_zero_addr(sdata->u.mgd.tdls_peer);
 	}
 
+	if (ret == 0)
+		ieee80211_queue_work(&sdata->local->hw,
+				     &sdata->u.mgd.request_smps_work);
+
 	mutex_unlock(&local->mtx);
 	return ret;
 }
@@ -1627,6 +1737,31 @@
 		return -EINVAL;
 	}
 
+	if (!elems.sec_chan_offs) {
+		chan_type = NL80211_CHAN_HT20;
+	} else {
+		switch (elems.sec_chan_offs->sec_chan_offs) {
+		case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+			chan_type = NL80211_CHAN_HT40PLUS;
+			break;
+		case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+			chan_type = NL80211_CHAN_HT40MINUS;
+			break;
+		default:
+			chan_type = NL80211_CHAN_HT20;
+			break;
+		}
+	}
+
+	cfg80211_chandef_create(&chandef, chan, chan_type);
+
+	/* we will be active on the TDLS link */
+	if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
+					   sdata->wdev.iftype)) {
+		tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, tf->sa);
 	if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
@@ -1647,27 +1782,15 @@
 		goto out;
 	}
 
-	if (!sta->sta.ht_cap.ht_supported) {
-		chan_type = NL80211_CHAN_NO_HT;
-	} else if (!elems.sec_chan_offs) {
-		chan_type = NL80211_CHAN_HT20;
-	} else {
-		switch (elems.sec_chan_offs->sec_chan_offs) {
-		case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-			chan_type = NL80211_CHAN_HT40PLUS;
-			break;
-		case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-			chan_type = NL80211_CHAN_HT40MINUS;
-			break;
-		default:
-			chan_type = NL80211_CHAN_HT20;
-			break;
-		}
+	/* peer should have known better */
+	if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs &&
+	    elems.sec_chan_offs->sec_chan_offs) {
+		tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
+		ret = -ENOTSUPP;
+		goto out;
 	}
 
-	cfg80211_chandef_create(&chandef, chan, chan_type);
 	params.chandef = &chandef;
-
 	params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
 	params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
 
@@ -1691,12 +1814,15 @@
 	return ret;
 }
 
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-					   struct sk_buff *skb)
+static void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+				      struct sk_buff *skb)
 {
 	struct ieee80211_tdls_data *tf = (void *)skb->data;
 	struct wiphy *wiphy = sdata->local->hw.wiphy;
 
+	ASSERT_RTNL();
+
 	/* make sure the driver supports it */
 	if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
 		return;
@@ -1720,3 +1846,47 @@
 		return;
 	}
 }
+
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+	struct sta_info *sta;
+	u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+			continue;
+
+		ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+					    NL80211_TDLS_TEARDOWN, reason,
+					    GFP_ATOMIC);
+	}
+	rcu_read_unlock();
+}
+
+void ieee80211_tdls_chsw_work(struct work_struct *wk)
+{
+	struct ieee80211_local *local =
+		container_of(wk, struct ieee80211_local, tdls_chsw_work);
+	struct ieee80211_sub_if_data *sdata;
+	struct sk_buff *skb;
+	struct ieee80211_tdls_data *tf;
+
+	rtnl_lock();
+	while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
+		tf = (struct ieee80211_tdls_data *)skb->data;
+		list_for_each_entry(sdata, &local->interfaces, list) {
+			if (!ieee80211_sdata_running(sdata) ||
+			    sdata->vif.type != NL80211_IFTYPE_STATION ||
+			    !ether_addr_equal(tf->da, sdata->vif.addr))
+				continue;
+
+			ieee80211_process_tdls_channel_switch(sdata, skb);
+			break;
+		}
+
+		kfree_skb(skb);
+	}
+	rtnl_unlock();
+}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b823350..84e0e8c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -311,9 +311,6 @@
 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
 		return TX_CONTINUE;
 
-	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-		return TX_CONTINUE;
-
 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
 		return TX_CONTINUE;
 
@@ -610,7 +607,6 @@
 	if (tx->key) {
 		bool skip_hw = false;
 
-		tx->key->tx_rx_count++;
 		/* TODO: add threshold stuff again */
 
 		switch (tx->key->conf.cipher) {
@@ -690,7 +686,8 @@
 
 	txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
 		    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
-		    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
+		    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+		    tx->sdata->vif.type == NL80211_IFTYPE_OCB);
 
 	/* set up RTS protection if desired */
 	if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -2777,7 +2774,11 @@
 		sdata->sequence_number += 0x10;
 	}
 
-	sta->tx_msdu[tid]++;
+	if (skb_shinfo(skb)->gso_size)
+		sta->tx_msdu[tid] +=
+			DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+	else
+		sta->tx_msdu[tid]++;
 
 	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
@@ -3213,6 +3214,16 @@
 	rcu_read_unlock();
 }
 
+static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+{
+	beacon->csa_current_counter--;
+
+	/* the counter should never reach 0 */
+	WARN_ON_ONCE(!beacon->csa_current_counter);
+
+	return beacon->csa_current_counter;
+}
+
 u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -3231,11 +3242,7 @@
 	if (!beacon)
 		goto unlock;
 
-	beacon->csa_current_counter--;
-
-	/* the counter should never reach 0 */
-	WARN_ON_ONCE(!beacon->csa_current_counter);
-	count = beacon->csa_current_counter;
+	count = __ieee80211_csa_update_counter(beacon);
 
 unlock:
 	rcu_read_unlock();
@@ -3335,7 +3342,7 @@
 		if (beacon) {
 			if (beacon->csa_counter_offsets[0]) {
 				if (!is_template)
-					ieee80211_csa_update_counter(vif);
+					__ieee80211_csa_update_counter(beacon);
 
 				ieee80211_set_csa(sdata, beacon);
 			}
@@ -3381,7 +3388,7 @@
 
 		if (beacon->csa_counter_offsets[0]) {
 			if (!is_template)
-				ieee80211_csa_update_counter(vif);
+				__ieee80211_csa_update_counter(beacon);
 
 			ieee80211_set_csa(sdata, beacon);
 		}
@@ -3411,7 +3418,7 @@
 				 * for now we leave it consistent with overall
 				 * mac80211's behavior.
 				 */
-				ieee80211_csa_update_counter(vif);
+				__ieee80211_csa_update_counter(beacon);
 
 			ieee80211_set_csa(sdata, beacon);
 		}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 43e5aad..1104421 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -47,55 +47,6 @@
 }
 EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
 
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-			enum nl80211_iftype type)
-{
-	__le16 fc = hdr->frame_control;
-
-	 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
-	if (len < 16)
-		return NULL;
-
-	if (ieee80211_is_data(fc)) {
-		if (len < 24) /* drop incorrect hdr len (data) */
-			return NULL;
-
-		if (ieee80211_has_a4(fc))
-			return NULL;
-		if (ieee80211_has_tods(fc))
-			return hdr->addr1;
-		if (ieee80211_has_fromds(fc))
-			return hdr->addr2;
-
-		return hdr->addr3;
-	}
-
-	if (ieee80211_is_mgmt(fc)) {
-		if (len < 24) /* drop incorrect hdr len (mgmt) */
-			return NULL;
-		return hdr->addr3;
-	}
-
-	if (ieee80211_is_ctl(fc)) {
-		if (ieee80211_is_pspoll(fc))
-			return hdr->addr1;
-
-		if (ieee80211_is_back_req(fc)) {
-			switch (type) {
-			case NL80211_IFTYPE_STATION:
-				return hdr->addr2;
-			case NL80211_IFTYPE_AP:
-			case NL80211_IFTYPE_AP_VLAN:
-				return hdr->addr1;
-			default:
-				break; /* fall through to the return */
-			}
-		}
-	}
-
-	return NULL;
-}
-
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 {
 	struct sk_buff *skb;
@@ -752,7 +703,12 @@
 
 struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
 {
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_sub_if_data *sdata;
+
+	if (!vif)
+		return NULL;
+
+	sdata = vif_to_sdata(vif);
 
 	if (!ieee80211_sdata_running(sdata) ||
 	    !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
@@ -1709,6 +1665,7 @@
 	local->resuming = false;
 	local->suspended = false;
 	local->started = false;
+	local->in_reconfig = false;
 
 	/* scheduled scan clearly can't be running any more, but tell
 	 * cfg80211 and clear local state
@@ -1759,16 +1716,24 @@
 	struct ieee80211_sub_if_data *sched_scan_sdata;
 	struct cfg80211_sched_scan_request *sched_scan_req;
 	bool sched_scan_stopped = false;
+	bool suspended = local->suspended;
 
 	/* nothing to do if HW shouldn't run */
 	if (!local->open_count)
 		goto wake_up;
 
 #ifdef CONFIG_PM
-	if (local->suspended)
+	if (suspended)
 		local->resuming = true;
 
 	if (local->wowlan) {
+		/*
+		 * In the wowlan case, both mac80211 and the device
+		 * are functional when the resume op is called, so
+		 * clear local->suspended so the device could operate
+		 * normally (e.g. pass rx frames).
+		 */
+		local->suspended = false;
 		res = drv_resume(local);
 		local->wowlan = false;
 		if (res < 0) {
@@ -1781,8 +1746,10 @@
 		/*
 		 * res is 1, which means the driver requested
 		 * to go through a regular reset on wakeup.
+		 * restore local->suspended in this case.
 		 */
 		reconfig_due_to_wowlan = true;
+		local->suspended = true;
 	}
 #endif
 
@@ -1794,7 +1761,7 @@
 	 */
 	res = drv_start(local);
 	if (res) {
-		if (local->suspended)
+		if (suspended)
 			WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
 		else
 			WARN(1, "Hardware became unavailable during restart.\n");
@@ -2088,10 +2055,10 @@
 	 * If this is for hw restart things are still running.
 	 * We may want to change that later, however.
 	 */
-	if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
+	if (local->open_count && (!suspended || reconfig_due_to_wowlan))
 		drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
 
-	if (!local->suspended)
+	if (!suspended)
 		return 0;
 
 #ifdef CONFIG_PM
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 80694d5..834ccdb 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -308,11 +308,15 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	enum ieee80211_sta_rx_bandwidth bw;
+	enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
 
-	bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
-	bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+	bw = ieee80211_sta_cap_rx_bw(sta);
 	bw = min(bw, sta->cur_max_bandwidth);
 
+	/* do not cap the BW of TDLS WIDER_BW peers by the bss */
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+		bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+
 	return bw;
 }
 
@@ -422,3 +426,29 @@
 	if (changed > 0)
 		rate_control_rate_update(local, sband, sta, changed);
 }
+
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+				     u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+	int i;
+	u16 mask, cap = le16_to_cpu(vht_cap);
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
+		switch (mask) {
+		case IEEE80211_VHT_MCS_SUPPORT_0_7:
+			vht_mask[i] = 0x00FF;
+			break;
+		case IEEE80211_VHT_MCS_SUPPORT_0_8:
+			vht_mask[i] = 0x01FF;
+			break;
+		case IEEE80211_VHT_MCS_SUPPORT_0_9:
+			vht_mask[i] = 0x03FF;
+			break;
+		case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+		default:
+			vht_mask[i] = 0;
+			break;
+		}
+	}
+}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 943f760..feb547d 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -516,30 +516,33 @@
 			return RX_DROP_UNUSABLE;
 	}
 
-	ccmp_hdr2pn(pn, skb->data + hdrlen);
+	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+		ccmp_hdr2pn(pn, skb->data + hdrlen);
 
-	queue = rx->security_idx;
+		queue = rx->security_idx;
 
-	if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
-		key->u.ccmp.replays++;
-		return RX_DROP_UNUSABLE;
-	}
-
-	if (!(status->flag & RX_FLAG_DECRYPTED)) {
-		u8 aad[2 * AES_BLOCK_SIZE];
-		u8 b_0[AES_BLOCK_SIZE];
-		/* hardware didn't decrypt/verify MIC */
-		ccmp_special_blocks(skb, pn, b_0, aad);
-
-		if (ieee80211_aes_ccm_decrypt(
-			    key->u.ccmp.tfm, b_0, aad,
-			    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
-			    data_len,
-			    skb->data + skb->len - mic_len, mic_len))
+		if (memcmp(pn, key->u.ccmp.rx_pn[queue],
+			   IEEE80211_CCMP_PN_LEN) <= 0) {
+			key->u.ccmp.replays++;
 			return RX_DROP_UNUSABLE;
-	}
+		}
 
-	memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+		if (!(status->flag & RX_FLAG_DECRYPTED)) {
+			u8 aad[2 * AES_BLOCK_SIZE];
+			u8 b_0[AES_BLOCK_SIZE];
+			/* hardware didn't decrypt/verify MIC */
+			ccmp_special_blocks(skb, pn, b_0, aad);
+
+			if (ieee80211_aes_ccm_decrypt(
+				    key->u.ccmp.tfm, b_0, aad,
+				    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+				    data_len,
+				    skb->data + skb->len - mic_len, mic_len))
+				return RX_DROP_UNUSABLE;
+		}
+
+		memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+	}
 
 	/* Remove CCMP header and MIC */
 	if (pskb_trim(skb, skb->len - mic_len))
@@ -739,30 +742,34 @@
 			return RX_DROP_UNUSABLE;
 	}
 
-	gcmp_hdr2pn(pn, skb->data + hdrlen);
+	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+		gcmp_hdr2pn(pn, skb->data + hdrlen);
 
-	queue = rx->security_idx;
+		queue = rx->security_idx;
 
-	if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
-		key->u.gcmp.replays++;
-		return RX_DROP_UNUSABLE;
-	}
-
-	if (!(status->flag & RX_FLAG_DECRYPTED)) {
-		u8 aad[2 * AES_BLOCK_SIZE];
-		u8 j_0[AES_BLOCK_SIZE];
-		/* hardware didn't decrypt/verify MIC */
-		gcmp_special_blocks(skb, pn, j_0, aad);
-
-		if (ieee80211_aes_gcm_decrypt(
-			    key->u.gcmp.tfm, j_0, aad,
-			    skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
-			    data_len,
-			    skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
+		if (memcmp(pn, key->u.gcmp.rx_pn[queue],
+			   IEEE80211_GCMP_PN_LEN) <= 0) {
+			key->u.gcmp.replays++;
 			return RX_DROP_UNUSABLE;
-	}
+		}
 
-	memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+		if (!(status->flag & RX_FLAG_DECRYPTED)) {
+			u8 aad[2 * AES_BLOCK_SIZE];
+			u8 j_0[AES_BLOCK_SIZE];
+			/* hardware didn't decrypt/verify MIC */
+			gcmp_special_blocks(skb, pn, j_0, aad);
+
+			if (ieee80211_aes_gcm_decrypt(
+				    key->u.gcmp.tfm, j_0, aad,
+				    skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+				    data_len,
+				    skb->data + skb->len -
+				    IEEE80211_GCMP_MIC_LEN))
+				return RX_DROP_UNUSABLE;
+		}
+
+		memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+	}
 
 	/* Remove GCMP header and MIC */
 	if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 317c466..c865ebb 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -44,6 +44,49 @@
 	ieee802154_if_remove(sdata);
 }
 
+#ifdef CONFIG_PM
+static int ieee802154_suspend(struct wpan_phy *wpan_phy)
+{
+	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+	if (!local->open_count)
+		goto suspend;
+
+	ieee802154_stop_queue(&local->hw);
+	synchronize_net();
+
+	/* stop hardware - this must stop RX */
+	ieee802154_stop_device(local);
+
+suspend:
+	local->suspended = true;
+	return 0;
+}
+
+static int ieee802154_resume(struct wpan_phy *wpan_phy)
+{
+	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+	int ret;
+
+	/* nothing to do if HW shouldn't run */
+	if (!local->open_count)
+		goto wake_up;
+
+	/* restart hardware */
+	ret = drv_start(local);
+	if (ret)
+		return ret;
+
+wake_up:
+	ieee802154_wake_queue(&local->hw);
+	local->suspended = false;
+	return 0;
+}
+#else
+#define ieee802154_suspend NULL
+#define ieee802154_resume NULL
+#endif
+
 static int
 ieee802154_add_iface(struct wpan_phy *phy, const char *name,
 		     unsigned char name_assign_type,
@@ -145,13 +188,18 @@
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 		      __le16 pan_id)
 {
+	int ret;
+
 	ASSERT_RTNL();
 
 	if (wpan_dev->pan_id == pan_id)
 		return 0;
 
-	wpan_dev->pan_id = pan_id;
-	return 0;
+	ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
+	if (!ret)
+		wpan_dev->pan_id = pan_id;
+
+	return ret;
 }
 
 static int
@@ -161,10 +209,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->min_be == min_be &&
-	    wpan_dev->max_be == max_be)
-		return 0;
-
 	wpan_dev->min_be = min_be;
 	wpan_dev->max_be = max_be;
 	return 0;
@@ -176,9 +220,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->short_addr == short_addr)
-		return 0;
-
 	wpan_dev->short_addr = short_addr;
 	return 0;
 }
@@ -190,9 +231,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->csma_retries == max_csma_backoffs)
-		return 0;
-
 	wpan_dev->csma_retries = max_csma_backoffs;
 	return 0;
 }
@@ -204,9 +242,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->frame_retries == max_frame_retries)
-		return 0;
-
 	wpan_dev->frame_retries = max_frame_retries;
 	return 0;
 }
@@ -217,16 +252,25 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->lbt == mode)
-		return 0;
-
 	wpan_dev->lbt = mode;
 	return 0;
 }
 
+static int
+ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev, bool ackreq)
+{
+	ASSERT_RTNL();
+
+	wpan_dev->ackreq = ackreq;
+	return 0;
+}
+
 const struct cfg802154_ops mac802154_config_ops = {
 	.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
 	.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+	.suspend = ieee802154_suspend,
+	.resume = ieee802154_resume,
 	.add_virtual_intf = ieee802154_add_iface,
 	.del_virtual_intf = ieee802154_del_iface,
 	.set_channel = ieee802154_set_channel,
@@ -239,4 +283,5 @@
 	.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
 	.set_max_frame_retries = ieee802154_set_max_frame_retries,
 	.set_lbt_mode = ieee802154_set_lbt_mode,
+	.set_ackreq_default = ieee802154_set_ackreq_default,
 };
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 34755d5..56ccffa 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -56,9 +56,13 @@
 	struct hrtimer ifs_timer;
 
 	bool started;
+	bool suspended;
 
 	struct tasklet_struct tasklet;
 	struct sk_buff_head skb_queue;
+
+	struct sk_buff *tx_skb;
+	struct work_struct tx_work;
 };
 
 enum {
@@ -94,8 +98,6 @@
 	struct mac802154_llsec sec;
 };
 
-#define MAC802154_CHAN_NONE		0xff /* No channel is assigned */
-
 /* utility functions/constants */
 extern const void *const mac802154_wpan_phy_privid; /*  for wpan_phy privid */
 
@@ -125,6 +127,8 @@
 
 extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
 
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
+void ieee802154_xmit_worker(struct work_struct *work);
 netdev_tx_t
 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t
@@ -167,6 +171,8 @@
 			 struct ieee802154_llsec_table **t);
 void mac802154_unlock_table(struct net_device *dev);
 
+int mac802154_wpan_update_llsec(struct net_device *dev);
+
 /* interface handling */
 int ieee802154_iface_init(void);
 void ieee802154_iface_exit(void);
@@ -176,5 +182,6 @@
 		  unsigned char name_assign_type, enum nl802154_iftype type,
 		  __le64 extended_addr);
 void ieee802154_remove_interfaces(struct ieee802154_local *local);
+void ieee802154_stop_device(struct ieee802154_local *local);
 
 #endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 8b69824..ed26952 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -30,7 +30,7 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-static int mac802154_wpan_update_llsec(struct net_device *dev)
+int mac802154_wpan_update_llsec(struct net_device *dev)
 {
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 	struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -125,6 +125,14 @@
 	if (netif_running(dev))
 		return -EBUSY;
 
+	/* lowpan need to be down for update
+	 * SLAAC address after ifup
+	 */
+	if (sdata->wpan_dev.lowpan_dev) {
+		if (netif_running(sdata->wpan_dev.lowpan_dev))
+			return -EBUSY;
+	}
+
 	ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
 	if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
 		return -EINVAL;
@@ -132,6 +140,13 @@
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 	sdata->wpan_dev.extended_addr = extended_addr;
 
+	/* update lowpan interface mac address when
+	 * wpan mac has been changed
+	 */
+	if (sdata->wpan_dev.lowpan_dev)
+		memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
+		       dev->addr_len);
+
 	return mac802154_wpan_update_llsec(dev);
 }
 
@@ -314,11 +329,8 @@
 
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-	if (!local->open_count) {
-		flush_workqueue(local->workqueue);
-		hrtimer_cancel(&local->ifs_timer);
-		drv_stop(local);
-	}
+	if (!local->open_count)
+		ieee802154_stop_device(local);
 
 	return 0;
 }
@@ -471,6 +483,7 @@
 		       enum nl802154_iftype type)
 {
 	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+	int ret;
 	u8 tmp;
 
 	/* set some type-dependent values */
@@ -485,8 +498,7 @@
 	wpan_dev->min_be = 3;
 	wpan_dev->max_be = 5;
 	wpan_dev->csma_retries = 4;
-	/* for compatibility, actual default is 3 */
-	wpan_dev->frame_retries = -1;
+	wpan_dev->frame_retries = 3;
 
 	wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
 	wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
@@ -505,6 +517,10 @@
 		mutex_init(&sdata->sec_mtx);
 
 		mac802154_llsec_init(&sdata->sec);
+		ret = mac802154_wpan_update_llsec(sdata->dev);
+		if (ret < 0)
+			return ret;
+
 		break;
 	case NL802154_IFTYPE_MONITOR:
 		sdata->dev->destructor = free_netdev;
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 356b346..e8cab5b 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -40,7 +40,7 @@
 			 * netstack.
 			 */
 			skb->pkt_type = 0;
-			ieee802154_rx(&local->hw, skb);
+			ieee802154_rx(local, skb);
 			break;
 		default:
 			WARN(1, "mac802154: Packet is of unknown type %d\n",
@@ -58,11 +58,9 @@
 	struct ieee802154_local *local;
 	size_t priv_size;
 
-	if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
-	    !ops->start || !ops->stop || !ops->set_channel) {
-		pr_err("undefined IEEE802.15.4 device operations\n");
+	if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+		    !ops->start || !ops->stop || !ops->set_channel))
 		return NULL;
-	}
 
 	/* Ensure 32-byte alignment of our private data and hw private data.
 	 * We use the wpan_phy priv data for both our ieee802154_local and for
@@ -107,11 +105,13 @@
 
 	skb_queue_head_init(&local->skb_queue);
 
+	INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+
 	/* init supported flags with 802.15.4 default ranges */
 	phy->supported.max_minbe = 8;
 	phy->supported.min_maxbe = 3;
 	phy->supported.max_maxbe = 8;
-	phy->supported.min_frame_retries = -1;
+	phy->supported.min_frame_retries = 0;
 	phy->supported.max_frame_retries = 7;
 	phy->supported.max_csma_backoffs = 5;
 	phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
@@ -177,11 +177,8 @@
 	}
 
 	if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
-		/* TODO should be 3, but our default value is -1 which means
-		 * no ARET handling.
-		 */
-		local->phy->supported.min_frame_retries = -1;
-		local->phy->supported.max_frame_retries = -1;
+		local->phy->supported.min_frame_retries = 3;
+		local->phy->supported.max_frame_retries = 3;
 	}
 
 	if (hw->flags & IEEE802154_HW_PROMISCUOUS)
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index d93ad2d..d1c33c1 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -246,13 +246,15 @@
 	}
 }
 
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
 {
-	struct ieee802154_local *local = hw_to_local(hw);
 	u16 crc;
 
 	WARN_ON_ONCE(softirq_count() == 0);
 
+	if (local->suspended)
+		goto drop;
+
 	/* TODO: When a transceiver omits the checksum here, we
 	 * add an own calculated one. This is currently an ugly
 	 * solution because the monitor needs a crc here.
@@ -273,8 +275,7 @@
 		crc = crc_ccitt(0, skb->data, skb->len);
 		if (crc) {
 			rcu_read_unlock();
-			kfree_skb(skb);
-			return;
+			goto drop;
 		}
 	}
 	/* remove crc */
@@ -283,8 +284,11 @@
 	__ieee802154_rx_handle_packet(local, skb);
 
 	rcu_read_unlock();
+
+	return;
+drop:
+	kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee802154_rx);
 
 void
 ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index c62e956..7ed4391 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -30,23 +30,11 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
- * packets through the workqueue.
- */
-struct ieee802154_xmit_cb {
-	struct sk_buff *skb;
-	struct work_struct work;
-	struct ieee802154_local *local;
-};
-
-static struct ieee802154_xmit_cb ieee802154_xmit_cb;
-
-static void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_worker(struct work_struct *work)
 {
-	struct ieee802154_xmit_cb *cb =
-		container_of(work, struct ieee802154_xmit_cb, work);
-	struct ieee802154_local *local = cb->local;
-	struct sk_buff *skb = cb->skb;
+	struct ieee802154_local *local =
+		container_of(work, struct ieee802154_local, tx_work);
+	struct sk_buff *skb = local->tx_skb;
 	struct net_device *dev = skb->dev;
 	int res;
 
@@ -106,11 +94,8 @@
 		dev->stats.tx_packets++;
 		dev->stats.tx_bytes += skb->len;
 	} else {
-		INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
-		ieee802154_xmit_cb.skb = skb;
-		ieee802154_xmit_cb.local = local;
-
-		queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+		local->tx_skb = skb;
+		queue_work(local->workqueue, &local->tx_work);
 	}
 
 	return NETDEV_TX_OK;
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 583435f..f9fd095 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -14,6 +14,7 @@
  */
 
 #include "ieee802154_i.h"
+#include "driver-ops.h"
 
 /* privid for wpan_phys to determine whether they belong to us or not */
 const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
@@ -92,3 +93,10 @@
 	dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
+
+void ieee802154_stop_device(struct ieee802154_local *local)
+{
+	flush_workqueue(local->workqueue);
+	hrtimer_cancel(&local->ifs_timer);
+	drv_stop(local);
+}
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
index 17bde79..5c467ef 100644
--- a/net/mpls/Kconfig
+++ b/net/mpls/Kconfig
@@ -24,7 +24,13 @@
 
 config MPLS_ROUTING
 	tristate "MPLS: routing support"
-	help
+	---help---
 	 Add support for forwarding of mpls packets.
 
+config MPLS_IPTUNNEL
+	tristate "MPLS: IP over MPLS tunnel support"
+	depends on LWTUNNEL && MPLS_ROUTING
+	---help---
+	 mpls ip tunnel support.
+
 endif # MPLS
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
index 65bbe68..9ca9236 100644
--- a/net/mpls/Makefile
+++ b/net/mpls/Makefile
@@ -3,5 +3,6 @@
 #
 obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
 obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+obj-$(CONFIG_MPLS_IPTUNNEL) += mpls_iptunnel.o
 
 mpls_router-y := af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1f93a59..bb185a2 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -15,6 +15,10 @@
 #include <net/ip_fib.h>
 #include <net/netevent.h>
 #include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
 #include "internal.h"
 
 #define LABEL_NOT_SPECIFIED (1<<20)
@@ -23,11 +27,23 @@
 /* This maximum ha length copied from the definition of struct neighbour */
 #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
 
+enum mpls_payload_type {
+	MPT_UNSPEC, /* IPv4 or IPv6 */
+	MPT_IPV4 = 4,
+	MPT_IPV6 = 6,
+
+	/* Other types not implemented:
+	 *  - Pseudo-wire with or without control word (RFC4385)
+	 *  - GAL (RFC5586)
+	 */
+};
+
 struct mpls_route { /* next hop label forwarding entry */
 	struct net_device __rcu *rt_dev;
 	struct rcu_head		rt_rcu;
 	u32			rt_label[MAX_NEW_LABELS];
 	u8			rt_protocol; /* routing protocol that set this entry */
+	u8                      rt_payload_type;
 	u8			rt_labels;
 	u8			rt_via_alen;
 	u8			rt_via_table;
@@ -58,10 +74,11 @@
 	return rcu_dereference_rtnl(dev->mpls_ptr);
 }
 
-static bool mpls_output_possible(const struct net_device *dev)
+bool mpls_output_possible(const struct net_device *dev)
 {
 	return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
 }
+EXPORT_SYMBOL_GPL(mpls_output_possible);
 
 static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
 {
@@ -69,13 +86,14 @@
 	return rt->rt_labels * sizeof(struct mpls_shim_hdr);
 }
 
-static unsigned int mpls_dev_mtu(const struct net_device *dev)
+unsigned int mpls_dev_mtu(const struct net_device *dev)
 {
 	/* The amount of data the layer 2 frame can hold */
 	return dev->mtu;
 }
+EXPORT_SYMBOL_GPL(mpls_dev_mtu);
 
-static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
 	if (skb->len <= mtu)
 		return false;
@@ -85,20 +103,13 @@
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
 			struct mpls_entry_decoded dec)
 {
-	/* RFC4385 and RFC5586 encode other packets in mpls such that
-	 * they don't conflict with the ip version number, making
-	 * decoding by examining the ip version correct in everything
-	 * except for the strangest cases.
-	 *
-	 * The strange cases if we choose to support them will require
-	 * manual configuration.
-	 */
-	struct iphdr *hdr4;
-	bool success = true;
+	enum mpls_payload_type payload_type;
+	bool success = false;
 
 	/* The IPv4 code below accesses through the IPv4 header
 	 * checksum, which is 12 bytes into the packet.
@@ -113,23 +124,32 @@
 	if (!pskb_may_pull(skb, 12))
 		return false;
 
-	/* Use ip_hdr to find the ip protocol version */
-	hdr4 = ip_hdr(skb);
-	if (hdr4->version == 4) {
+	payload_type = rt->rt_payload_type;
+	if (payload_type == MPT_UNSPEC)
+		payload_type = ip_hdr(skb)->version;
+
+	switch (payload_type) {
+	case MPT_IPV4: {
+		struct iphdr *hdr4 = ip_hdr(skb);
 		skb->protocol = htons(ETH_P_IP);
 		csum_replace2(&hdr4->check,
 			      htons(hdr4->ttl << 8),
 			      htons(dec.ttl << 8));
 		hdr4->ttl = dec.ttl;
+		success = true;
+		break;
 	}
-	else if (hdr4->version == 6) {
+	case MPT_IPV6: {
 		struct ipv6hdr *hdr6 = ipv6_hdr(skb);
 		skb->protocol = htons(ETH_P_IPV6);
 		hdr6->hop_limit = dec.ttl;
+		success = true;
+		break;
 	}
-	else
-		/* version 0 and version 1 are used by pseudo wires */
-		success = false;
+	case MPT_UNSPEC:
+		break;
+	}
+
 	return success;
 }
 
@@ -248,16 +268,17 @@
 };
 
 struct mpls_route_config {
-	u32		rc_protocol;
-	u32		rc_ifindex;
-	u16		rc_via_table;
-	u16		rc_via_alen;
-	u8		rc_via[MAX_VIA_ALEN];
-	u32		rc_label;
-	u32		rc_output_labels;
-	u32		rc_output_label[MAX_NEW_LABELS];
-	u32		rc_nlflags;
-	struct nl_info	rc_nlinfo;
+	u32			rc_protocol;
+	u32			rc_ifindex;
+	u16			rc_via_table;
+	u16			rc_via_alen;
+	u8			rc_via[MAX_VIA_ALEN];
+	u32			rc_label;
+	u32			rc_output_labels;
+	u32			rc_output_label[MAX_NEW_LABELS];
+	u32			rc_nlflags;
+	enum mpls_payload_type	rc_payload_type;
+	struct nl_info		rc_nlinfo;
 };
 
 static struct mpls_route *mpls_rt_alloc(size_t alen)
@@ -286,7 +307,7 @@
 	struct mpls_route *rt = new ? new : old;
 	unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
 	/* Ignore reserved labels for now */
-	if (rt && (index >= 16))
+	if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
 		rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
 }
 
@@ -320,13 +341,96 @@
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
 	platform_labels = net->mpls.platform_labels;
-	for (index = 16; index < platform_labels; index++) {
+	for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
+	     index++) {
 		if (!rtnl_dereference(platform_label[index]))
 			return index;
 	}
 	return LABEL_NOT_SPECIFIED;
 }
 
+#if IS_ENABLED(CONFIG_INET)
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+	struct net_device *dev;
+	struct rtable *rt;
+	struct in_addr daddr;
+
+	memcpy(&daddr, addr, sizeof(struct in_addr));
+	rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+	if (IS_ERR(rt))
+		return ERR_CAST(rt);
+
+	dev = rt->dst.dev;
+	dev_hold(dev);
+
+	ip_rt_put(rt);
+
+	return dev;
+}
+#else
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+	return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+	struct net_device *dev;
+	struct dst_entry *dst;
+	struct flowi6 fl6;
+	int err;
+
+	if (!ipv6_stub)
+		return ERR_PTR(-EAFNOSUPPORT);
+
+	memset(&fl6, 0, sizeof(fl6));
+	memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+	err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+	if (err)
+		return ERR_PTR(err);
+
+	dev = dst->dev;
+	dev_hold(dev);
+	dst_release(dst);
+
+	return dev;
+}
+#else
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+	return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+static struct net_device *find_outdev(struct net *net,
+				      struct mpls_route_config *cfg)
+{
+	struct net_device *dev = NULL;
+
+	if (!cfg->rc_ifindex) {
+		switch (cfg->rc_via_table) {
+		case NEIGH_ARP_TABLE:
+			dev = inet_fib_lookup_dev(net, cfg->rc_via);
+			break;
+		case NEIGH_ND_TABLE:
+			dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+			break;
+		case NEIGH_LINK_TABLE:
+			break;
+		}
+	} else {
+		dev = dev_get_by_index(net, cfg->rc_ifindex);
+	}
+
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	return dev;
+}
+
 static int mpls_route_add(struct mpls_route_config *cfg)
 {
 	struct mpls_route __rcu **platform_label;
@@ -345,8 +449,8 @@
 		index = find_free_label(net);
 	}
 
-	/* The first 16 labels are reserved, and may not be set */
-	if (index < 16)
+	/* Reserved labels may not be set */
+	if (index < MPLS_LABEL_FIRST_UNRESERVED)
 		goto errout;
 
 	/* The full 20 bit range may not be supported. */
@@ -357,10 +461,12 @@
 	if (cfg->rc_output_labels > MAX_NEW_LABELS)
 		goto errout;
 
-	err = -ENODEV;
-	dev = dev_get_by_index(net, cfg->rc_ifindex);
-	if (!dev)
+	dev = find_outdev(net, cfg);
+	if (IS_ERR(dev)) {
+		err = PTR_ERR(dev);
+		dev = NULL;
 		goto errout;
+	}
 
 	/* Ensure this is a supported device */
 	err = -EINVAL;
@@ -401,6 +507,7 @@
 		rt->rt_label[i] = cfg->rc_output_label[i];
 	rt->rt_protocol = cfg->rc_protocol;
 	RCU_INIT_POINTER(rt->rt_dev, dev);
+	rt->rt_payload_type = cfg->rc_payload_type;
 	rt->rt_via_table = cfg->rc_via_table;
 	memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
 
@@ -423,8 +530,8 @@
 
 	index = cfg->rc_label;
 
-	/* The first 16 labels are reserved, and may not be removed */
-	if (index < 16)
+	/* Reserved labels may not be removed */
+	if (index < MPLS_LABEL_FIRST_UNRESERVED)
 		goto errout;
 
 	/* The full 20 bit range may not be supported */
@@ -626,6 +733,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
 		   u32 max_labels, u32 *labels, u32 label[])
@@ -671,6 +779,7 @@
 	*labels = nla_labels;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nla_get_labels);
 
 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
 			       struct mpls_route_config *cfg)
@@ -740,8 +849,8 @@
 					   &cfg->rc_label))
 				goto errout;
 
-			/* The first 16 labels are reserved, and may not be set */
-			if (cfg->rc_label < 16)
+			/* Reserved labels may not be set */
+			if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
 				goto errout;
 
 			break;
@@ -866,8 +975,8 @@
 	ASSERT_RTNL();
 
 	index = cb->args[0];
-	if (index < 16)
-		index = 16;
+	if (index < MPLS_LABEL_FIRST_UNRESERVED)
+		index = MPLS_LABEL_FIRST_UNRESERVED;
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
 	platform_labels = net->mpls.platform_labels;
@@ -953,6 +1062,7 @@
 			goto nort0;
 		RCU_INIT_POINTER(rt0->rt_dev, lo);
 		rt0->rt_protocol = RTPROT_KERNEL;
+		rt0->rt_payload_type = MPT_IPV4;
 		rt0->rt_via_table = NEIGH_LINK_TABLE;
 		memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
 	}
@@ -963,6 +1073,7 @@
 			goto nort2;
 		RCU_INIT_POINTER(rt2->rt_dev, lo);
 		rt2->rt_protocol = RTPROT_KERNEL;
+		rt2->rt_payload_type = MPT_IPV6;
 		rt2->rt_via_table = NEIGH_LINK_TABLE;
 		memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
 	}
@@ -1066,8 +1177,10 @@
 
 	table[0].data = net;
 	net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
-	if (net->mpls.ctl == NULL)
+	if (net->mpls.ctl == NULL) {
+		kfree(table);
 		return -ENOMEM;
+	}
 
 	return 0;
 }
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 8cabeb5..2681a4b 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -50,7 +50,12 @@
 	return result;
 }
 
-int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels, const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
+		   const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+		   u32 label[]);
+bool mpls_output_possible(const struct net_device *dev);
+unsigned int mpls_dev_mtu(const struct net_device *dev);
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
 
 #endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
new file mode 100644
index 0000000..21e70bc
--- /dev/null
+++ b/net/mpls/mpls_iptunnel.c
@@ -0,0 +1,231 @@
+/*
+ * mpls tunnels	An implementation mpls tunnels using the light weight tunnel
+ *		infrastructure
+ *
+ * Authors:	Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/lwtunnel.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include <net/ip6_fib.h>
+#include <net/route.h>
+#include <net/mpls_iptunnel.h>
+#include <linux/mpls_iptunnel.h>
+#include "internal.h"
+
+static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+	[MPLS_IPTUNNEL_DST]	= { .type = NLA_U32 },
+};
+
+static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
+{
+	/* The size of the layer 2.5 labels to be added for this route */
+	return en->labels * sizeof(struct mpls_shim_hdr);
+}
+
+int mpls_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+	struct mpls_shim_hdr *hdr;
+	struct net_device *out_dev;
+	unsigned int hh_len;
+	unsigned int new_header_size;
+	unsigned int mtu;
+	struct dst_entry *dst = skb_dst(skb);
+	struct rtable *rt = NULL;
+	struct rt6_info *rt6 = NULL;
+	int err = 0;
+	bool bos;
+	int i;
+	unsigned int ttl;
+
+	/* Obtain the ttl */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		ttl = ip_hdr(skb)->ttl;
+		rt = (struct rtable *)dst;
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		ttl = ipv6_hdr(skb)->hop_limit;
+		rt6 = (struct rt6_info *)dst;
+	} else {
+		goto drop;
+	}
+
+	skb_orphan(skb);
+
+	/* Find the output device */
+	out_dev = dst->dev;
+	if (!mpls_output_possible(out_dev) ||
+	    !dst->lwtstate || skb_warn_if_lro(skb))
+		goto drop;
+
+	skb_forward_csum(skb);
+
+	tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
+
+	/* Verify the destination can hold the packet */
+	new_header_size = mpls_encap_size(tun_encap_info);
+	mtu = mpls_dev_mtu(out_dev);
+	if (mpls_pkt_too_big(skb, mtu - new_header_size))
+		goto drop;
+
+	hh_len = LL_RESERVED_SPACE(out_dev);
+	if (!out_dev->header_ops)
+		hh_len = 0;
+
+	/* Ensure there is enough space for the headers in the skb */
+	if (skb_cow(skb, hh_len + new_header_size))
+		goto drop;
+
+	skb_push(skb, new_header_size);
+	skb_reset_network_header(skb);
+
+	skb->dev = out_dev;
+	skb->protocol = htons(ETH_P_MPLS_UC);
+
+	/* Push the new labels */
+	hdr = mpls_hdr(skb);
+	bos = true;
+	for (i = tun_encap_info->labels - 1; i >= 0; i--) {
+		hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
+					   ttl, 0, bos);
+		bos = false;
+	}
+
+	if (rt)
+		err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
+				 skb);
+	else if (rt6)
+		err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+				 skb);
+	if (err)
+		net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+				    __func__, err);
+
+	return 0;
+
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+			    unsigned int family, const void *cfg,
+			    struct lwtunnel_state **ts)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+	struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
+	struct lwtunnel_state *newts;
+	int tun_encap_info_len;
+	int ret;
+
+	ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
+			       mpls_iptunnel_policy);
+	if (ret < 0)
+		return ret;
+
+	if (!tb[MPLS_IPTUNNEL_DST])
+		return -EINVAL;
+
+	tun_encap_info_len = sizeof(*tun_encap_info);
+
+	newts = lwtunnel_state_alloc(tun_encap_info_len);
+	if (!newts)
+		return -ENOMEM;
+
+	newts->len = tun_encap_info_len;
+	tun_encap_info = mpls_lwtunnel_encap(newts);
+	ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+			     &tun_encap_info->labels, tun_encap_info->label);
+	if (ret)
+		goto errout;
+	newts->type = LWTUNNEL_ENCAP_MPLS;
+	newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+	*ts = newts;
+
+	return 0;
+
+errout:
+	kfree(newts);
+	*ts = NULL;
+
+	return ret;
+}
+
+static int mpls_fill_encap_info(struct sk_buff *skb,
+				struct lwtunnel_state *lwtstate)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+	
+	tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+	if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
+			   tun_encap_info->label))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+
+	tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+	return nla_total_size(tun_encap_info->labels * 4);
+}
+
+static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
+	struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
+	int l;
+
+	if (a_hdr->labels != b_hdr->labels)
+		return 1;
+
+	for (l = 0; l < MAX_NEW_LABELS; l++)
+		if (a_hdr->label[l] != b_hdr->label[l])
+			return 1;
+	return 0;
+}
+
+static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+	.build_state = mpls_build_state,
+	.output = mpls_output,
+	.fill_encap = mpls_fill_encap_info,
+	.get_encap_size = mpls_encap_nlsize,
+	.cmp_encap = mpls_encap_cmp,
+};
+
+static int __init mpls_iptunnel_init(void)
+{
+	return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_init(mpls_iptunnel_init);
+
+static void __exit mpls_iptunnel_exit(void)
+{
+	lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_exit(mpls_iptunnel_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
+MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6eae69a..3e1b4ab 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -867,6 +867,8 @@
 	depends on NETFILTER_ADVANCED
 	depends on IPV6 || IPV6=n
 	depends on !NF_CONNTRACK || NF_CONNTRACK
+	select NF_DUP_IPV4
+	select NF_DUP_IPV6 if IP6_NF_IPTABLES
 	---help---
 	This option adds a "TEE" target with which a packet can be cloned and
 	this clone be rerouted to another nexthop.
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a0e5497..8e47f81 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -34,6 +34,9 @@
 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
+DEFINE_PER_CPU(bool, nf_skb_duplicated);
+EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
 {
 	mutex_lock(&afinfo_mutex);
@@ -52,9 +55,6 @@
 }
 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 
-struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
-EXPORT_SYMBOL(nf_hooks);
-
 #ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
@@ -62,63 +62,166 @@
 
 static DEFINE_MUTEX(nf_hook_mutex);
 
-int nf_register_hook(struct nf_hook_ops *reg)
+static struct list_head *nf_find_hook_list(struct net *net,
+					   const struct nf_hook_ops *reg)
 {
-	struct list_head *nf_hook_list;
+	struct list_head *hook_list = NULL;
+
+	if (reg->pf != NFPROTO_NETDEV)
+		hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
+	else if (reg->hooknum == NF_NETDEV_INGRESS) {
+#ifdef CONFIG_NETFILTER_INGRESS
+		if (reg->dev && dev_net(reg->dev) == net)
+			hook_list = &reg->dev->nf_hooks_ingress;
+#endif
+	}
+	return hook_list;
+}
+
+struct nf_hook_entry {
+	const struct nf_hook_ops	*orig_ops;
+	struct nf_hook_ops		ops;
+};
+
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+	struct list_head *hook_list;
+	struct nf_hook_entry *entry;
 	struct nf_hook_ops *elem;
 
-	mutex_lock(&nf_hook_mutex);
-	switch (reg->pf) {
-	case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
-		if (reg->hooknum == NF_NETDEV_INGRESS) {
-			BUG_ON(reg->dev == NULL);
-			nf_hook_list = &reg->dev->nf_hooks_ingress;
-			net_inc_ingress_queue();
-			break;
-		}
-#endif
-		/* Fall through. */
-	default:
-		nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
-		break;
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->orig_ops	= reg;
+	entry->ops	= *reg;
+
+	hook_list = nf_find_hook_list(net, reg);
+	if (!hook_list) {
+		kfree(entry);
+		return -ENOENT;
 	}
 
-	list_for_each_entry(elem, nf_hook_list, list) {
+	mutex_lock(&nf_hook_mutex);
+	list_for_each_entry(elem, hook_list, list) {
 		if (reg->priority < elem->priority)
 			break;
 	}
-	list_add_rcu(&reg->list, elem->list.prev);
+	list_add_rcu(&entry->ops.list, elem->list.prev);
 	mutex_unlock(&nf_hook_mutex);
+#ifdef CONFIG_NETFILTER_INGRESS
+	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+		net_inc_ingress_queue();
+#endif
 #ifdef HAVE_JUMP_LABEL
 	static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	return 0;
 }
-EXPORT_SYMBOL(nf_register_hook);
+EXPORT_SYMBOL(nf_register_net_hook);
 
-void nf_unregister_hook(struct nf_hook_ops *reg)
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 {
+	struct list_head *hook_list;
+	struct nf_hook_entry *entry;
+	struct nf_hook_ops *elem;
+
+	hook_list = nf_find_hook_list(net, reg);
+	if (!hook_list)
+		return;
+
 	mutex_lock(&nf_hook_mutex);
-	list_del_rcu(&reg->list);
-	mutex_unlock(&nf_hook_mutex);
-	switch (reg->pf) {
-	case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
-		if (reg->hooknum == NF_NETDEV_INGRESS) {
-			net_dec_ingress_queue();
+	list_for_each_entry(elem, hook_list, list) {
+		entry = container_of(elem, struct nf_hook_entry, ops);
+		if (entry->orig_ops == reg) {
+			list_del_rcu(&entry->ops.list);
 			break;
 		}
-		break;
-#endif
-	default:
-		break;
 	}
+	mutex_unlock(&nf_hook_mutex);
+	if (&elem->list == hook_list) {
+		WARN(1, "nf_unregister_net_hook: hook not found!\n");
+		return;
+	}
+#ifdef CONFIG_NETFILTER_INGRESS
+	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+		net_dec_ingress_queue();
+#endif
 #ifdef HAVE_JUMP_LABEL
 	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	synchronize_net();
-	nf_queue_nf_hook_drop(reg);
+	nf_queue_nf_hook_drop(net, &entry->ops);
+	kfree(entry);
+}
+EXPORT_SYMBOL(nf_unregister_net_hook);
+
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			  unsigned int n)
+{
+	unsigned int i;
+	int err = 0;
+
+	for (i = 0; i < n; i++) {
+		err = nf_register_net_hook(net, &reg[i]);
+		if (err)
+			goto err;
+	}
+	return err;
+
+err:
+	if (i > 0)
+		nf_unregister_net_hooks(net, reg, i);
+	return err;
+}
+EXPORT_SYMBOL(nf_register_net_hooks);
+
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			     unsigned int n)
+{
+	while (n-- > 0)
+		nf_unregister_net_hook(net, &reg[n]);
+}
+EXPORT_SYMBOL(nf_unregister_net_hooks);
+
+static LIST_HEAD(nf_hook_list);
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+	struct net *net, *last;
+	int ret;
+
+	rtnl_lock();
+	for_each_net(net) {
+		ret = nf_register_net_hook(net, reg);
+		if (ret && ret != -ENOENT)
+			goto rollback;
+	}
+	list_add_tail(&reg->list, &nf_hook_list);
+	rtnl_unlock();
+
+	return 0;
+rollback:
+	last = net;
+	for_each_net(net) {
+		if (net == last)
+			break;
+		nf_unregister_net_hook(net, reg);
+	}
+	rtnl_unlock();
+	return ret;
+}
+EXPORT_SYMBOL(nf_register_hook);
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+	struct net *net;
+
+	rtnl_lock();
+	list_del(&reg->list);
+	for_each_net(net)
+		nf_unregister_net_hook(net, reg);
+	rtnl_unlock();
 }
 EXPORT_SYMBOL(nf_unregister_hook);
 
@@ -285,9 +388,12 @@
 struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
 EXPORT_SYMBOL_GPL(nfq_ct_hook);
 
-struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
-EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
-
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
+	.id	= NF_CT_DEFAULT_ZONE_ID,
+	.dir	= NF_CT_DEFAULT_ZONE_DIR,
+};
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
 #endif /* CONFIG_NF_CONNTRACK */
 
 #ifdef CONFIG_NF_NAT_NEEDED
@@ -295,8 +401,46 @@
 EXPORT_SYMBOL(nf_nat_decode_session_hook);
 #endif
 
+static int nf_register_hook_list(struct net *net)
+{
+	struct nf_hook_ops *elem;
+	int ret;
+
+	rtnl_lock();
+	list_for_each_entry(elem, &nf_hook_list, list) {
+		ret = nf_register_net_hook(net, elem);
+		if (ret && ret != -ENOENT)
+			goto out_undo;
+	}
+	rtnl_unlock();
+	return 0;
+
+out_undo:
+	list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
+		nf_unregister_net_hook(net, elem);
+	rtnl_unlock();
+	return ret;
+}
+
+static void nf_unregister_hook_list(struct net *net)
+{
+	struct nf_hook_ops *elem;
+
+	rtnl_lock();
+	list_for_each_entry(elem, &nf_hook_list, list)
+		nf_unregister_net_hook(net, elem);
+	rtnl_unlock();
+}
+
 static int __net_init netfilter_net_init(struct net *net)
 {
+	int i, h, ret;
+
+	for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
+		for (h = 0; h < NF_MAX_HOOKS; h++)
+			INIT_LIST_HEAD(&net->nf.hooks[i][h]);
+	}
+
 #ifdef CONFIG_PROC_FS
 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
 						net->proc_net);
@@ -307,11 +451,16 @@
 		return -ENOMEM;
 	}
 #endif
-	return 0;
+	ret = nf_register_hook_list(net);
+	if (ret)
+		remove_proc_entry("netfilter", net->proc_net);
+
+	return ret;
 }
 
 static void __net_exit netfilter_net_exit(struct net *net)
 {
+	nf_unregister_hook_list(net);
 	remove_proc_entry("netfilter", net->proc_net);
 }
 
@@ -322,12 +471,7 @@
 
 int __init netfilter_init(void)
 {
-	int i, h, ret;
-
-	for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
-		for (h = 0; h < NF_MAX_HOOKS; h++)
-			INIT_LIST_HEAD(&nf_hooks[i][h]);
-	}
+	int ret;
 
 	ret = register_pernet_subsys(&netfilter_net_ops);
 	if (ret < 0)
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 3b6929d..b32fb0d 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -162,6 +162,17 @@
 	  If you want to compile it in kernel, say Y. To compile it as a
 	  module, choose M here. If unsure, say N.
 
+config  IP_VS_OVF
+	tristate "weighted overflow scheduling"
+	---help---
+	  The weighted overflow scheduling algorithm directs network
+	  connections to the server with the highest weight that is
+	  currently available and overflows to the next when active
+	  connections exceed the node's weight.
+
+	  If you want to compile it in kernel, say Y. To compile it as a
+	  module, choose M here. If unsure, say N.
+
 config	IP_VS_LBLC
 	tristate "locality-based least-connection scheduling"
 	---help---
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 38b2723..67f3f43 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o
 obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o
 obj-$(CONFIG_IP_VS_FO) += ip_vs_fo.o
+obj-$(CONFIG_IP_VS_OVF) += ip_vs_ovf.o
 obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
 obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
 obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 24c5542..1a23e91 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2335,13 +2335,23 @@
 	    cmd == IP_VS_SO_SET_STOPDAEMON) {
 		struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
 
-		mutex_lock(&ipvs->sync_mutex);
-		if (cmd == IP_VS_SO_SET_STARTDAEMON)
-			ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
-						dm->syncid);
-		else
+		if (cmd == IP_VS_SO_SET_STARTDAEMON) {
+			struct ipvs_sync_daemon_cfg cfg;
+
+			memset(&cfg, 0, sizeof(cfg));
+			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+				sizeof(cfg.mcast_ifn));
+			cfg.syncid = dm->syncid;
+			rtnl_lock();
+			mutex_lock(&ipvs->sync_mutex);
+			ret = start_sync_thread(net, &cfg, dm->state);
+			mutex_unlock(&ipvs->sync_mutex);
+			rtnl_unlock();
+		} else {
+			mutex_lock(&ipvs->sync_mutex);
 			ret = stop_sync_thread(net, dm->state);
-		mutex_unlock(&ipvs->sync_mutex);
+			mutex_unlock(&ipvs->sync_mutex);
+		}
 		goto out_dec;
 	}
 
@@ -2645,15 +2655,15 @@
 		mutex_lock(&ipvs->sync_mutex);
 		if (ipvs->sync_state & IP_VS_STATE_MASTER) {
 			d[0].state = IP_VS_STATE_MASTER;
-			strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+			strlcpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn,
 				sizeof(d[0].mcast_ifn));
-			d[0].syncid = ipvs->master_syncid;
+			d[0].syncid = ipvs->mcfg.syncid;
 		}
 		if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
 			d[1].state = IP_VS_STATE_BACKUP;
-			strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+			strlcpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn,
 				sizeof(d[1].mcast_ifn));
-			d[1].syncid = ipvs->backup_syncid;
+			d[1].syncid = ipvs->bcfg.syncid;
 		}
 		if (copy_to_user(user, &d, sizeof(d)) != 0)
 			ret = -EFAULT;
@@ -2808,6 +2818,11 @@
 	[IPVS_DAEMON_ATTR_MCAST_IFN]	= { .type = NLA_NUL_STRING,
 					    .len = IP_VS_IFNAME_MAXLEN },
 	[IPVS_DAEMON_ATTR_SYNC_ID]	= { .type = NLA_U32 },
+	[IPVS_DAEMON_ATTR_SYNC_MAXLEN]	= { .type = NLA_U16 },
+	[IPVS_DAEMON_ATTR_MCAST_GROUP]	= { .type = NLA_U32 },
+	[IPVS_DAEMON_ATTR_MCAST_GROUP6]	= { .len = sizeof(struct in6_addr) },
+	[IPVS_DAEMON_ATTR_MCAST_PORT]	= { .type = NLA_U16 },
+	[IPVS_DAEMON_ATTR_MCAST_TTL]	= { .type = NLA_U8 },
 };
 
 /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
@@ -3266,7 +3281,7 @@
 }
 
 static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
-				  const char *mcast_ifn, __u32 syncid)
+				  struct ipvs_sync_daemon_cfg *c)
 {
 	struct nlattr *nl_daemon;
 
@@ -3275,9 +3290,23 @@
 		return -EMSGSIZE;
 
 	if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
-	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
-	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) ||
+	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) ||
+	    nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) ||
+	    nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) ||
+	    nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl))
 		goto nla_put_failure;
+#ifdef CONFIG_IP_VS_IPV6
+	if (c->mcast_af == AF_INET6) {
+		if (nla_put_in6_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP6,
+				     &c->mcast_group.in6))
+			goto nla_put_failure;
+	} else
+#endif
+		if (c->mcast_af == AF_INET &&
+		    nla_put_in_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP,
+				    c->mcast_group.ip))
+			goto nla_put_failure;
 	nla_nest_end(skb, nl_daemon);
 
 	return 0;
@@ -3288,7 +3317,7 @@
 }
 
 static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
-				  const char *mcast_ifn, __u32 syncid,
+				  struct ipvs_sync_daemon_cfg *c,
 				  struct netlink_callback *cb)
 {
 	void *hdr;
@@ -3298,7 +3327,7 @@
 	if (!hdr)
 		return -EMSGSIZE;
 
-	if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
+	if (ip_vs_genl_fill_daemon(skb, state, c))
 		goto nla_put_failure;
 
 	genlmsg_end(skb, hdr);
@@ -3318,8 +3347,7 @@
 	mutex_lock(&ipvs->sync_mutex);
 	if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
-					   ipvs->master_mcast_ifn,
-					   ipvs->master_syncid, cb) < 0)
+					   &ipvs->mcfg, cb) < 0)
 			goto nla_put_failure;
 
 		cb->args[0] = 1;
@@ -3327,8 +3355,7 @@
 
 	if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
-					   ipvs->backup_mcast_ifn,
-					   ipvs->backup_syncid, cb) < 0)
+					   &ipvs->bcfg, cb) < 0)
 			goto nla_put_failure;
 
 		cb->args[1] = 1;
@@ -3342,30 +3369,83 @@
 
 static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ipvs_sync_daemon_cfg c;
+	struct nlattr *a;
+	int ret;
+
+	memset(&c, 0, sizeof(c));
 	if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
 	      attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
 	      attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
 		return -EINVAL;
+	strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
+		sizeof(c.mcast_ifn));
+	c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]);
+
+	a = attrs[IPVS_DAEMON_ATTR_SYNC_MAXLEN];
+	if (a)
+		c.sync_maxlen = nla_get_u16(a);
+
+	a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP];
+	if (a) {
+		c.mcast_af = AF_INET;
+		c.mcast_group.ip = nla_get_in_addr(a);
+		if (!ipv4_is_multicast(c.mcast_group.ip))
+			return -EINVAL;
+	} else {
+		a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP6];
+		if (a) {
+#ifdef CONFIG_IP_VS_IPV6
+			int addr_type;
+
+			c.mcast_af = AF_INET6;
+			c.mcast_group.in6 = nla_get_in6_addr(a);
+			addr_type = ipv6_addr_type(&c.mcast_group.in6);
+			if (!(addr_type & IPV6_ADDR_MULTICAST))
+				return -EINVAL;
+#else
+			return -EAFNOSUPPORT;
+#endif
+		}
+	}
+
+	a = attrs[IPVS_DAEMON_ATTR_MCAST_PORT];
+	if (a)
+		c.mcast_port = nla_get_u16(a);
+
+	a = attrs[IPVS_DAEMON_ATTR_MCAST_TTL];
+	if (a)
+		c.mcast_ttl = nla_get_u8(a);
 
 	/* The synchronization protocol is incompatible with mixed family
 	 * services
 	 */
-	if (net_ipvs(net)->mixed_address_family_dests > 0)
+	if (ipvs->mixed_address_family_dests > 0)
 		return -EINVAL;
 
-	return start_sync_thread(net,
-				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
-				 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
-				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
+	rtnl_lock();
+	mutex_lock(&ipvs->sync_mutex);
+	ret = start_sync_thread(net, &c,
+				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	mutex_unlock(&ipvs->sync_mutex);
+	rtnl_unlock();
+	return ret;
 }
 
 static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	int ret;
+
 	if (!attrs[IPVS_DAEMON_ATTR_STATE])
 		return -EINVAL;
 
-	return stop_sync_thread(net,
-				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	mutex_lock(&ipvs->sync_mutex);
+	ret = stop_sync_thread(net,
+			       nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	mutex_unlock(&ipvs->sync_mutex);
+	return ret;
 }
 
 static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
@@ -3389,7 +3469,7 @@
 
 static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
 {
-	int ret = 0, cmd;
+	int ret = -EINVAL, cmd;
 	struct net *net;
 	struct netns_ipvs *ipvs;
 
@@ -3400,22 +3480,19 @@
 	if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
 		struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
 
-		mutex_lock(&ipvs->sync_mutex);
 		if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
 		    nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
 				     info->attrs[IPVS_CMD_ATTR_DAEMON],
-				     ip_vs_daemon_policy)) {
-			ret = -EINVAL;
+				     ip_vs_daemon_policy))
 			goto out;
-		}
 
 		if (cmd == IPVS_CMD_NEW_DAEMON)
 			ret = ip_vs_genl_new_daemon(net, daemon_attrs);
 		else
 			ret = ip_vs_genl_del_daemon(net, daemon_attrs);
-out:
-		mutex_unlock(&ipvs->sync_mutex);
 	}
+
+out:
 	return ret;
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 5882bbf..1361845 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -274,7 +274,7 @@
 		" for conn " FMT_CONN "\n",
 		__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-	h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+	h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
 				  &tuple);
 	if (h) {
 		ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
new file mode 100644
index 0000000..f7d62c3
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -0,0 +1,86 @@
+/*
+ * IPVS:        Overflow-Connection Scheduling module
+ *
+ * Authors:     Raducu Deaconu <rhadoo_io@yahoo.com>
+ *
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              as published by the Free Software Foundation; either version
+ *              2 of the License, or (at your option) any later version.
+ *
+ * Scheduler implements "overflow" loadbalancing according to number of active
+ * connections , will keep all conections to the node with the highest weight
+ * and overflow to the next node if the number of connections exceeds the node's
+ * weight.
+ * Note that this scheduler might not be suitable for UDP because it only uses
+ * active connections
+ *
+ */
+
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <net/ip_vs.h>
+
+/* OVF Connection scheduling  */
+static struct ip_vs_dest *
+ip_vs_ovf_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
+		   struct ip_vs_iphdr *iph)
+{
+	struct ip_vs_dest *dest, *h = NULL;
+	int hw = 0, w;
+
+	IP_VS_DBG(6, "ip_vs_ovf_schedule(): Scheduling...\n");
+	/* select the node with highest weight, go to next in line if active
+	* connections exceed weight
+	*/
+	list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+		w = atomic_read(&dest->weight);
+		if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
+		    atomic_read(&dest->activeconns) > w ||
+		    w == 0)
+			continue;
+		if (!h || w > hw) {
+			h = dest;
+			hw = w;
+		}
+	}
+
+	if (h) {
+		IP_VS_DBG_BUF(6, "OVF: server %s:%u active %d w %d\n",
+			      IP_VS_DBG_ADDR(h->af, &h->addr),
+			      ntohs(h->port),
+			      atomic_read(&h->activeconns),
+			      atomic_read(&h->weight));
+		return h;
+	}
+
+	ip_vs_scheduler_err(svc, "no destination available");
+	return NULL;
+}
+
+static struct ip_vs_scheduler ip_vs_ovf_scheduler = {
+	.name =			"ovf",
+	.refcnt =		ATOMIC_INIT(0),
+	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_ovf_scheduler.n_list),
+	.schedule =		ip_vs_ovf_schedule,
+};
+
+static int __init ip_vs_ovf_init(void)
+{
+	return register_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+}
+
+static void __exit ip_vs_ovf_cleanup(void)
+{
+	unregister_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+	synchronize_rcu();
+}
+
+module_init(ip_vs_ovf_init);
+module_exit(ip_vs_ovf_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 7e81416..a2ff7d7 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -137,7 +137,7 @@
 
 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
 {
-	if (scheduler && scheduler->module)
+	if (scheduler)
 		module_put(scheduler->module);
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index d99ad93e..43f1409 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -262,6 +262,11 @@
 	/* ip_vs_sync_conn entries start here */
 };
 
+union ipvs_sockaddr {
+	struct sockaddr_in	in;
+	struct sockaddr_in6	in6;
+};
+
 struct ip_vs_sync_buff {
 	struct list_head        list;
 	unsigned long           firstuse;
@@ -320,26 +325,28 @@
  * Create a new sync buffer for Version 1 proto.
  */
 static inline struct ip_vs_sync_buff *
-ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs, unsigned int len)
 {
 	struct ip_vs_sync_buff *sb;
 
 	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
 		return NULL;
 
-	sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+	len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg),
+		    ipvs->mcfg.sync_maxlen);
+	sb->mesg = kmalloc(len, GFP_ATOMIC);
 	if (!sb->mesg) {
 		kfree(sb);
 		return NULL;
 	}
 	sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zero now */
 	sb->mesg->version = SYNC_PROTO_VER;
-	sb->mesg->syncid = ipvs->master_syncid;
+	sb->mesg->syncid = ipvs->mcfg.syncid;
 	sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg));
 	sb->mesg->nr_conns = 0;
 	sb->mesg->spare = 0;
 	sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
-	sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+	sb->end = (unsigned char *)sb->mesg + len;
 
 	sb->firstuse = jiffies;
 	return sb;
@@ -402,7 +409,7 @@
  * Create a new sync buffer for Version 0 proto.
  */
 static inline struct ip_vs_sync_buff *
-ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs, unsigned int len)
 {
 	struct ip_vs_sync_buff *sb;
 	struct ip_vs_sync_mesg_v0 *mesg;
@@ -410,17 +417,19 @@
 	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
 		return NULL;
 
-	sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+	len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg_v0),
+		    ipvs->mcfg.sync_maxlen);
+	sb->mesg = kmalloc(len, GFP_ATOMIC);
 	if (!sb->mesg) {
 		kfree(sb);
 		return NULL;
 	}
 	mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
 	mesg->nr_conns = 0;
-	mesg->syncid = ipvs->master_syncid;
+	mesg->syncid = ipvs->mcfg.syncid;
 	mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0));
 	sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
-	sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+	sb->end = (unsigned char *)mesg + len;
 	sb->firstuse = jiffies;
 	return sb;
 }
@@ -533,7 +542,7 @@
 	struct ip_vs_sync_buff *buff;
 	struct ipvs_master_sync_state *ms;
 	int id;
-	int len;
+	unsigned int len;
 
 	if (unlikely(cp->af != AF_INET))
 		return;
@@ -553,17 +562,19 @@
 	id = select_master_thread_id(ipvs, cp);
 	ms = &ipvs->ms[id];
 	buff = ms->sync_buff;
+	len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
+		SIMPLE_CONN_SIZE;
 	if (buff) {
 		m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
 		/* Send buffer if it is for v1 */
-		if (!m->nr_conns) {
+		if (buff->head + len > buff->end || !m->nr_conns) {
 			sb_queue_tail(ipvs, ms);
 			ms->sync_buff = NULL;
 			buff = NULL;
 		}
 	}
 	if (!buff) {
-		buff = ip_vs_sync_buff_create_v0(ipvs);
+		buff = ip_vs_sync_buff_create_v0(ipvs, len);
 		if (!buff) {
 			spin_unlock_bh(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
@@ -572,8 +583,6 @@
 		ms->sync_buff = buff;
 	}
 
-	len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
-		SIMPLE_CONN_SIZE;
 	m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
 	s = (struct ip_vs_sync_conn_v0 *) buff->head;
 
@@ -597,12 +606,6 @@
 	m->nr_conns++;
 	m->size = htons(ntohs(m->size) + len);
 	buff->head += len;
-
-	/* check if there is a space for next one */
-	if (buff->head + FULL_CONN_SIZE > buff->end) {
-		sb_queue_tail(ipvs, ms);
-		ms->sync_buff = NULL;
-	}
 	spin_unlock_bh(&ipvs->sync_buff_lock);
 
 	/* synchronize its controller if it has */
@@ -694,7 +697,7 @@
 	}
 
 	if (!buff) {
-		buff = ip_vs_sync_buff_create(ipvs);
+		buff = ip_vs_sync_buff_create(ipvs, len);
 		if (!buff) {
 			spin_unlock_bh(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
@@ -1219,7 +1222,7 @@
 		return;
 	}
 	/* SyncID sanity check */
-	if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+	if (ipvs->bcfg.syncid != 0 && m2->syncid != ipvs->bcfg.syncid) {
 		IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
 		return;
 	}
@@ -1303,6 +1306,14 @@
 	/* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
 	lock_sock(sk);
 	inet->mc_loop = loop ? 1 : 0;
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		/* IPV6_MULTICAST_LOOP */
+		np->mc_loop = loop ? 1 : 0;
+	}
+#endif
 	release_sock(sk);
 }
 
@@ -1316,6 +1327,33 @@
 	/* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
 	lock_sock(sk);
 	inet->mc_ttl = ttl;
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		/* IPV6_MULTICAST_HOPS */
+		np->mcast_hops = ttl;
+	}
+#endif
+	release_sock(sk);
+}
+
+/* Control fragmentation of messages */
+static void set_mcast_pmtudisc(struct sock *sk, int val)
+{
+	struct inet_sock *inet = inet_sk(sk);
+
+	/* setsockopt(sock, SOL_IP, IP_MTU_DISCOVER, &val, sizeof(val)); */
+	lock_sock(sk);
+	inet->pmtudisc = val;
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		/* IPV6_MTU_DISCOVER */
+		np->pmtudisc = val;
+	}
+#endif
 	release_sock(sk);
 }
 
@@ -1338,44 +1376,15 @@
 	lock_sock(sk);
 	inet->mc_index = dev->ifindex;
 	/*  inet->mc_addr  = 0; */
-	release_sock(sk);
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
 
-	return 0;
-}
-
-
-/*
- *	Set the maximum length of sync message according to the
- *	specified interface's MTU.
- */
-static int set_sync_mesg_maxlen(struct net *net, int sync_state)
-{
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct net_device *dev;
-	int num;
-
-	if (sync_state == IP_VS_STATE_MASTER) {
-		dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
-		if (!dev)
-			return -ENODEV;
-
-		num = (dev->mtu - sizeof(struct iphdr) -
-		       sizeof(struct udphdr) -
-		       SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
-		ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
-			SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
-		IP_VS_DBG(7, "setting the maximum length of sync sending "
-			  "message %d.\n", ipvs->send_mesg_maxlen);
-	} else if (sync_state == IP_VS_STATE_BACKUP) {
-		dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
-		if (!dev)
-			return -ENODEV;
-
-		ipvs->recv_mesg_maxlen = dev->mtu -
-			sizeof(struct iphdr) - sizeof(struct udphdr);
-		IP_VS_DBG(7, "setting the maximum length of sync receiving "
-			  "message %d.\n", ipvs->recv_mesg_maxlen);
+		/* IPV6_MULTICAST_IF */
+		np->mcast_oif = dev->ifindex;
 	}
+#endif
+	release_sock(sk);
 
 	return 0;
 }
@@ -1405,15 +1414,34 @@
 
 	mreq.imr_ifindex = dev->ifindex;
 
-	rtnl_lock();
 	lock_sock(sk);
 	ret = ip_mc_join_group(sk, &mreq);
 	release_sock(sk);
-	rtnl_unlock();
 
 	return ret;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+			     char *ifname)
+{
+	struct net *net = sock_net(sk);
+	struct net_device *dev;
+	int ret;
+
+	dev = __dev_get_by_name(net, ifname);
+	if (!dev)
+		return -ENODEV;
+	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+		return -EINVAL;
+
+	lock_sock(sk);
+	ret = ipv6_sock_mc_join(sk, dev->ifindex, addr);
+	release_sock(sk);
+
+	return ret;
+}
+#endif
 
 static int bind_mcastif_addr(struct socket *sock, char *ifname)
 {
@@ -1442,6 +1470,26 @@
 	return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
 }
 
+static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+			       struct ipvs_sync_daemon_cfg *c, int id)
+{
+	if (AF_INET6 == c->mcast_af) {
+		sa->in6 = (struct sockaddr_in6) {
+			.sin6_family = AF_INET6,
+			.sin6_port = htons(c->mcast_port + id),
+		};
+		sa->in6.sin6_addr = c->mcast_group.in6;
+		*salen = sizeof(sa->in6);
+	} else {
+		sa->in = (struct sockaddr_in) {
+			.sin_family = AF_INET,
+			.sin_port = htons(c->mcast_port + id),
+		};
+		sa->in.sin_addr = c->mcast_group.in;
+		*salen = sizeof(sa->in);
+	}
+}
+
 /*
  *      Set up sending multicast socket over UDP
  */
@@ -1449,40 +1497,43 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
-	struct sockaddr_in mcast_addr = {
-		.sin_family		= AF_INET,
-		.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT + id),
-		.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
-	};
+	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
-	int result;
+	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = sock_create_kern(net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
+				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
 	}
-	result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
+	result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
 	if (result < 0) {
 		pr_err("Error setting outbound mcast interface\n");
 		goto error;
 	}
 
 	set_mcast_loop(sock->sk, 0);
-	set_mcast_ttl(sock->sk, 1);
+	set_mcast_ttl(sock->sk, ipvs->mcfg.mcast_ttl);
+	/* Allow fragmentation if MTU changes */
+	set_mcast_pmtudisc(sock->sk, IP_PMTUDISC_DONT);
 	result = sysctl_sync_sock_size(ipvs);
 	if (result > 0)
 		set_sock_size(sock->sk, 1, result);
 
-	result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
+	if (AF_INET == ipvs->mcfg.mcast_af)
+		result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+	else
+		result = 0;
 	if (result < 0) {
 		pr_err("Error binding address of the mcast interface\n");
 		goto error;
 	}
 
+	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
 	result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
-			sizeof(struct sockaddr), 0);
+				    salen, 0);
 	if (result < 0) {
 		pr_err("Error connecting to the multicast addr\n");
 		goto error;
@@ -1503,16 +1554,13 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
-	struct sockaddr_in mcast_addr = {
-		.sin_family		= AF_INET,
-		.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT + id),
-		.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
-	};
+	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
-	int result;
+	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = sock_create_kern(net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
+				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
@@ -1523,17 +1571,22 @@
 	if (result > 0)
 		set_sock_size(sock->sk, 0, result);
 
-	result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
-			sizeof(struct sockaddr));
+	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
 	if (result < 0) {
 		pr_err("Error binding to the multicast addr\n");
 		goto error;
 	}
 
 	/* join the multicast group */
-	result = join_mcast_group(sock->sk,
-			(struct in_addr *) &mcast_addr.sin_addr,
-			ipvs->backup_mcast_ifn);
+#ifdef CONFIG_IP_VS_IPV6
+	if (ipvs->bcfg.mcast_af == AF_INET6)
+		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
+					   ipvs->bcfg.mcast_ifn);
+	else
+#endif
+		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
+					  ipvs->bcfg.mcast_ifn);
 	if (result < 0) {
 		pr_err("Error joining to the multicast group\n");
 		goto error;
@@ -1641,7 +1694,7 @@
 
 	pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
 		"syncid = %d, id = %d\n",
-		ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id);
+		ipvs->mcfg.mcast_ifn, ipvs->mcfg.syncid, tinfo->id);
 
 	for (;;) {
 		sb = next_sync_buff(ipvs, ms);
@@ -1695,7 +1748,7 @@
 
 	pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
 		"syncid = %d, id = %d\n",
-		ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id);
+		ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
 
 	while (!kthread_should_stop()) {
 		wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -1705,7 +1758,7 @@
 		/* do we have data now? */
 		while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
 			len = ip_vs_receive(tinfo->sock, tinfo->buf,
-					ipvs->recv_mesg_maxlen);
+					ipvs->bcfg.sync_maxlen);
 			if (len <= 0) {
 				if (len != -EAGAIN)
 					pr_err("receiving message error\n");
@@ -1725,16 +1778,19 @@
 }
 
 
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *c,
+		      int state)
 {
 	struct ip_vs_sync_thread_data *tinfo;
 	struct task_struct **array = NULL, *task;
 	struct socket *sock;
 	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net_device *dev;
 	char *name;
 	int (*threadfn)(void *data);
-	int id, count;
+	int id, count, hlen;
 	int result = -ENOMEM;
+	u16 mtu, min_mtu;
 
 	IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
@@ -1746,22 +1802,46 @@
 	} else
 		count = ipvs->threads_mask + 1;
 
+	if (c->mcast_af == AF_UNSPEC) {
+		c->mcast_af = AF_INET;
+		c->mcast_group.ip = cpu_to_be32(IP_VS_SYNC_GROUP);
+	}
+	if (!c->mcast_port)
+		c->mcast_port = IP_VS_SYNC_PORT;
+	if (!c->mcast_ttl)
+		c->mcast_ttl = 1;
+
+	dev = __dev_get_by_name(net, c->mcast_ifn);
+	if (!dev) {
+		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
+		return -ENODEV;
+	}
+	hlen = (AF_INET6 == c->mcast_af) ?
+	       sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
+	       sizeof(struct iphdr) + sizeof(struct udphdr);
+	mtu = (state == IP_VS_STATE_BACKUP) ?
+		  clamp(dev->mtu, 1500U, 65535U) : 1500U;
+	min_mtu = (state == IP_VS_STATE_BACKUP) ? 1024 : 1;
+
+	if (c->sync_maxlen)
+		c->sync_maxlen = clamp_t(unsigned int,
+					 c->sync_maxlen, min_mtu,
+					 65535 - hlen);
+	else
+		c->sync_maxlen = mtu - hlen;
+
 	if (state == IP_VS_STATE_MASTER) {
 		if (ipvs->ms)
 			return -EEXIST;
 
-		strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
-			sizeof(ipvs->master_mcast_ifn));
-		ipvs->master_syncid = syncid;
+		ipvs->mcfg = *c;
 		name = "ipvs-m:%d:%d";
 		threadfn = sync_thread_master;
 	} else if (state == IP_VS_STATE_BACKUP) {
 		if (ipvs->backup_threads)
 			return -EEXIST;
 
-		strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
-			sizeof(ipvs->backup_mcast_ifn));
-		ipvs->backup_syncid = syncid;
+		ipvs->bcfg = *c;
 		name = "ipvs-b:%d:%d";
 		threadfn = sync_thread_backup;
 	} else {
@@ -1789,7 +1869,6 @@
 		if (!array)
 			goto out;
 	}
-	set_sync_mesg_maxlen(net, state);
 
 	tinfo = NULL;
 	for (id = 0; id < count; id++) {
@@ -1807,7 +1886,7 @@
 		tinfo->net = net;
 		tinfo->sock = sock;
 		if (state == IP_VS_STATE_BACKUP) {
-			tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen,
+			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
 					     GFP_KERNEL);
 			if (!tinfo->buf)
 				goto outtinfo;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3c20d02..eedf049 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -126,7 +126,7 @@
 unsigned int nf_conntrack_hash_rnd __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
 {
 	unsigned int n;
 
@@ -135,7 +135,7 @@
 	 * three bytes manually.
 	 */
 	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-	return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+	return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
 		      (((__force __u16)tuple->dst.u.all << 16) |
 		      tuple->dst.protonum));
 }
@@ -151,15 +151,15 @@
 }
 
 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-				  u16 zone, unsigned int size)
+				  unsigned int size)
 {
-	return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+	return __hash_bucket(hash_conntrack_raw(tuple), size);
 }
 
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
 				       const struct nf_conntrack_tuple *tuple)
 {
-	return __hash_conntrack(tuple, zone, net->ct.htable_size);
+	return __hash_conntrack(tuple, net->ct.htable_size);
 }
 
 bool
@@ -288,7 +288,9 @@
 }
 
 /* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags)
 {
 	struct nf_conn *tmpl;
 
@@ -299,24 +301,15 @@
 	tmpl->status = IPS_TEMPLATE;
 	write_pnet(&tmpl->ct_net, net);
 
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-	if (zone) {
-		struct nf_conntrack_zone *nf_ct_zone;
+	if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+		goto out_free;
 
-		nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
-		if (!nf_ct_zone)
-			goto out_free;
-		nf_ct_zone->id = zone;
-	}
-#endif
 	atomic_set(&tmpl->ct_general.use, 0);
 
 	return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
 	kfree(tmpl);
 	return NULL;
-#endif
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 
@@ -373,7 +366,6 @@
 {
 	struct net *net = nf_ct_net(ct);
 	unsigned int hash, reply_hash;
-	u16 zone = nf_ct_zone(ct);
 	unsigned int sequence;
 
 	nf_ct_helper_destroy(ct);
@@ -381,9 +373,9 @@
 	local_bh_disable();
 	do {
 		sequence = read_seqcount_begin(&net->ct.generation);
-		hash = hash_conntrack(net, zone,
+		hash = hash_conntrack(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-		reply_hash = hash_conntrack(net, zone,
+		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -431,8 +423,8 @@
 
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
-			const struct nf_conntrack_tuple *tuple,
-			u16 zone)
+		const struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_zone *zone)
 {
 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -440,8 +432,8 @@
 	 * so we need to check that the conntrack is confirmed
 	 */
 	return nf_ct_tuple_equal(tuple, &h->tuple) &&
-		nf_ct_zone(ct) == zone &&
-		nf_ct_is_confirmed(ct);
+	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+	       nf_ct_is_confirmed(ct);
 }
 
 /*
@@ -450,7 +442,7 @@
  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
  */
 static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
 	struct nf_conntrack_tuple_hash *h;
@@ -486,7 +478,7 @@
 
 /* Find a connection corresponding to a tuple. */
 static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
 			const struct nf_conntrack_tuple *tuple, u32 hash)
 {
 	struct nf_conntrack_tuple_hash *h;
@@ -513,11 +505,11 @@
 }
 
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple)
 {
 	return __nf_conntrack_find_get(net, zone, tuple,
-				       hash_conntrack_raw(tuple, zone));
+				       hash_conntrack_raw(tuple));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,11 +528,11 @@
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
+	const struct nf_conntrack_zone *zone;
 	struct net *net = nf_ct_net(ct);
 	unsigned int hash, reply_hash;
 	struct nf_conntrack_tuple_hash *h;
 	struct hlist_nulls_node *n;
-	u16 zone;
 	unsigned int sequence;
 
 	zone = nf_ct_zone(ct);
@@ -548,9 +540,9 @@
 	local_bh_disable();
 	do {
 		sequence = read_seqcount_begin(&net->ct.generation);
-		hash = hash_conntrack(net, zone,
+		hash = hash_conntrack(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-		reply_hash = hash_conntrack(net, zone,
+		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -558,12 +550,14 @@
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 
 	add_timer(&ct->timeout);
@@ -588,6 +582,7 @@
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+	const struct nf_conntrack_zone *zone;
 	unsigned int hash, reply_hash;
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
@@ -596,7 +591,6 @@
 	struct hlist_nulls_node *n;
 	enum ip_conntrack_info ctinfo;
 	struct net *net;
-	u16 zone;
 	unsigned int sequence;
 
 	ct = nf_ct_get(skb, &ctinfo);
@@ -617,7 +611,7 @@
 		/* reuse the hash saved before */
 		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
 		hash = hash_bucket(hash, net);
-		reply_hash = hash_conntrack(net, zone,
+		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -649,12 +643,14 @@
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 
 	/* Timer relative to confirmation time, not original
@@ -707,11 +703,14 @@
 			 const struct nf_conn *ignored_conntrack)
 {
 	struct net *net = nf_ct_net(ignored_conntrack);
+	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_tuple_hash *h;
 	struct hlist_nulls_node *n;
 	struct nf_conn *ct;
-	u16 zone = nf_ct_zone(ignored_conntrack);
-	unsigned int hash = hash_conntrack(net, zone, tuple);
+	unsigned int hash;
+
+	zone = nf_ct_zone(ignored_conntrack);
+	hash = hash_conntrack(net, tuple);
 
 	/* Disable BHs the entire time since we need to disable them at
 	 * least once for the stats anyway.
@@ -721,7 +720,7 @@
 		ct = nf_ct_tuplehash_to_ctrack(h);
 		if (ct != ignored_conntrack &&
 		    nf_ct_tuple_equal(tuple, &h->tuple) &&
-		    nf_ct_zone(ct) == zone) {
+		    nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
 			NF_CT_STAT_INC(net, found);
 			rcu_read_unlock_bh();
 			return 1;
@@ -810,7 +809,8 @@
 }
 
 static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+		     const struct nf_conntrack_zone *zone,
 		     const struct nf_conntrack_tuple *orig,
 		     const struct nf_conntrack_tuple *repl,
 		     gfp_t gfp, u32 hash)
@@ -820,7 +820,7 @@
 	if (unlikely(!nf_conntrack_hash_rnd)) {
 		init_nf_conntrack_hash_rnd();
 		/* recompute the hash as nf_conntrack_hash_rnd is initialized */
-		hash = hash_conntrack_raw(orig, zone);
+		hash = hash_conntrack_raw(orig);
 	}
 
 	/* We don't want any race condition at early drop stage */
@@ -840,10 +840,9 @@
 	 * SLAB_DESTROY_BY_RCU.
 	 */
 	ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
-	if (ct == NULL) {
-		atomic_dec(&net->ct.count);
-		return ERR_PTR(-ENOMEM);
-	}
+	if (ct == NULL)
+		goto out;
+
 	spin_lock_init(&ct->lock);
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -857,31 +856,24 @@
 	memset(&ct->__nfct_init_offset[0], 0,
 	       offsetof(struct nf_conn, proto) -
 	       offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-	if (zone) {
-		struct nf_conntrack_zone *nf_ct_zone;
 
-		nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
-		if (!nf_ct_zone)
-			goto out_free;
-		nf_ct_zone->id = zone;
-	}
-#endif
+	if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+		goto out_free;
+
 	/* Because we use RCU lookups, we set ct_general.use to zero before
 	 * this is inserted in any list.
 	 */
 	atomic_set(&ct->ct_general.use, 0);
 	return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
-	atomic_dec(&net->ct.count);
 	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+	atomic_dec(&net->ct.count);
 	return ERR_PTR(-ENOMEM);
-#endif
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
 				   const struct nf_conntrack_tuple *orig,
 				   const struct nf_conntrack_tuple *repl,
 				   gfp_t gfp)
@@ -923,8 +915,9 @@
 	struct nf_conntrack_tuple repl_tuple;
 	struct nf_conntrack_ecache *ecache;
 	struct nf_conntrack_expect *exp = NULL;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+	const struct nf_conntrack_zone *zone;
 	struct nf_conn_timeout *timeout_ext;
+	struct nf_conntrack_zone tmp;
 	unsigned int *timeouts;
 
 	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
@@ -932,6 +925,7 @@
 		return NULL;
 	}
 
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
 				  hash);
 	if (IS_ERR(ct))
@@ -1026,10 +1020,11 @@
 		  int *set_reply,
 		  enum ip_conntrack_info *ctinfo)
 {
+	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_tuple tuple;
 	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_zone tmp;
 	struct nf_conn *ct;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
 	u32 hash;
 
 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
@@ -1040,7 +1035,8 @@
 	}
 
 	/* look for tuple match */
-	hash = hash_conntrack_raw(&tuple, zone);
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+	hash = hash_conntrack_raw(&tuple);
 	h = __nf_conntrack_find_get(net, zone, &tuple, hash);
 	if (!h) {
 		h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1596,8 +1592,7 @@
 					struct nf_conntrack_tuple_hash, hnnode);
 			ct = nf_ct_tuplehash_to_ctrack(h);
 			hlist_nulls_del_rcu(&h->hnnode);
-			bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
-						  hashsize);
+			bucket = __hash_conntrack(&h->tuple, hashsize);
 			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
 		}
 	}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index b45a422..acf5c7b 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -88,7 +88,8 @@
 }
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+		    const struct nf_conntrack_zone *zone,
 		    const struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_expect *i;
@@ -100,7 +101,7 @@
 	h = nf_ct_expect_dst_hash(tuple);
 	hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
 		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-		    nf_ct_zone(i->master) == zone)
+		    nf_ct_zone_equal_any(i->master, zone))
 			return i;
 	}
 	return NULL;
@@ -109,7 +110,8 @@
 
 /* Just find a expectation corresponding to a tuple. */
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+		      const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_expect *i;
@@ -127,7 +129,8 @@
 /* If an expectation for this connection is found, it gets delete from
  * global list then returned. */
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+		       const struct nf_conntrack_zone *zone,
 		       const struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_expect *i, *exp = NULL;
@@ -140,7 +143,7 @@
 	hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
 		if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
 		    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-		    nf_ct_zone(i->master) == zone) {
+		    nf_ct_zone_equal_any(i->master, zone)) {
 			exp = i;
 			break;
 		}
@@ -220,16 +223,16 @@
 	}
 
 	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
-	       nf_ct_zone(a->master) == nf_ct_zone(b->master);
+	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 static inline int expect_matches(const struct nf_conntrack_expect *a,
 				 const struct nf_conntrack_expect *b)
 {
 	return a->master == b->master && a->class == b->class &&
-		nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
-		nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
-		nf_ct_zone(a->master) == nf_ct_zone(b->master);
+	       nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+	       nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 /* Generally a bad idea to call this: could have matched already. */
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index bb53f12..3ce5c31 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -14,6 +14,8 @@
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_labels.h>
 
+static spinlock_t nf_connlabels_lock;
+
 static unsigned int label_bits(const struct nf_conn_labels *l)
 {
 	unsigned int longs = l->words;
@@ -48,7 +50,6 @@
 }
 EXPORT_SYMBOL_GPL(nf_connlabel_set);
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static void replace_u32(u32 *address, u32 mask, u32 new)
 {
 	u32 old, tmp;
@@ -89,7 +90,35 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nf_connlabels_replace);
-#endif
+
+int nf_connlabels_get(struct net *net, unsigned int n_bits)
+{
+	size_t words;
+
+	if (n_bits > (NF_CT_LABELS_MAX_SIZE * BITS_PER_BYTE))
+		return -ERANGE;
+
+	words = BITS_TO_LONGS(n_bits);
+
+	spin_lock(&nf_connlabels_lock);
+	net->ct.labels_used++;
+	if (words > net->ct.label_words)
+		net->ct.label_words = words;
+	spin_unlock(&nf_connlabels_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_connlabels_get);
+
+void nf_connlabels_put(struct net *net)
+{
+	spin_lock(&nf_connlabels_lock);
+	net->ct.labels_used--;
+	if (net->ct.labels_used == 0)
+		net->ct.label_words = 0;
+	spin_unlock(&nf_connlabels_lock);
+}
+EXPORT_SYMBOL_GPL(nf_connlabels_put);
 
 static struct nf_ct_ext_type labels_extend __read_mostly = {
 	.len    = sizeof(struct nf_conn_labels),
@@ -99,6 +128,7 @@
 
 int nf_conntrack_labels_init(void)
 {
+	spin_lock_init(&nf_connlabels_lock);
 	return nf_ct_extend_register(&labels_extend);
 }
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6b8b0ab..94a6654 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -128,6 +128,20 @@
 }
 
 static inline int
+ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+		       const struct nf_conntrack_zone *zone, int dir)
+{
+	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
+		return 0;
+	if (nla_put_be16(skb, attrtype, htons(zone->id)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
@@ -458,6 +472,7 @@
 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
 		    struct nf_conn *ct)
 {
+	const struct nf_conntrack_zone *zone;
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfmsg;
 	struct nlattr *nest_parms;
@@ -473,11 +488,16 @@
 	nfmsg->version      = NFNETLINK_V0;
 	nfmsg->res_id	    = 0;
 
+	zone = nf_ct_zone(ct);
+
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_ORIG) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -485,10 +505,13 @@
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_REPL) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct) &&
-	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+				   NF_CT_DEFAULT_ZONE_DIR) < 0)
 		goto nla_put_failure;
 
 	if (ctnetlink_dump_status(skb, ct) < 0 ||
@@ -598,7 +621,7 @@
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
 	       + ctnetlink_proto_size(ct)
 	       + ctnetlink_label_size(ct)
@@ -609,6 +632,7 @@
 static int
 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 {
+	const struct nf_conntrack_zone *zone;
 	struct net *net;
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfmsg;
@@ -655,11 +679,16 @@
 	nfmsg->res_id	= 0;
 
 	rcu_read_lock();
+	zone = nf_ct_zone(ct);
+
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_ORIG) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -667,10 +696,13 @@
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_REPL) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct) &&
-	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+				   NF_CT_DEFAULT_ZONE_DIR) < 0)
 		goto nla_put_failure;
 
 	if (ctnetlink_dump_id(skb, ct) < 0)
@@ -920,15 +952,54 @@
 	return ret;
 }
 
+static int
+ctnetlink_parse_zone(const struct nlattr *attr,
+		     struct nf_conntrack_zone *zone)
+{
+	nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
+			NF_CT_DEFAULT_ZONE_DIR, 0);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+	if (attr)
+		zone->id = ntohs(nla_get_be16(attr));
+#else
+	if (attr)
+		return -EOPNOTSUPP;
+#endif
+	return 0;
+}
+
+static int
+ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
+			   struct nf_conntrack_zone *zone)
+{
+	int ret;
+
+	if (zone->id != NF_CT_DEFAULT_ZONE_ID)
+		return -EINVAL;
+
+	ret = ctnetlink_parse_zone(attr, zone);
+	if (ret < 0)
+		return ret;
+
+	if (type == CTA_TUPLE_REPLY)
+		zone->dir = NF_CT_ZONE_DIR_REPL;
+	else
+		zone->dir = NF_CT_ZONE_DIR_ORIG;
+
+	return 0;
+}
+
 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
 	[CTA_TUPLE_IP]		= { .type = NLA_NESTED },
 	[CTA_TUPLE_PROTO]	= { .type = NLA_NESTED },
+	[CTA_TUPLE_ZONE]	= { .type = NLA_U16 },
 };
 
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
 		      struct nf_conntrack_tuple *tuple,
-		      enum ctattr_type type, u_int8_t l3num)
+		      enum ctattr_type type, u_int8_t l3num,
+		      struct nf_conntrack_zone *zone)
 {
 	struct nlattr *tb[CTA_TUPLE_MAX+1];
 	int err;
@@ -955,6 +1026,16 @@
 	if (err < 0)
 		return err;
 
+	if (tb[CTA_TUPLE_ZONE]) {
+		if (!zone)
+			return -EINVAL;
+
+		err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
+						 type, zone);
+		if (err < 0)
+			return err;
+	}
+
 	/* orig and expect tuples get DIR_ORIGINAL */
 	if (type == CTA_TUPLE_REPLY)
 		tuple->dst.dir = IP_CT_DIR_REPLY;
@@ -964,21 +1045,6 @@
 	return 0;
 }
 
-static int
-ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
-{
-	if (attr)
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-		*zone = ntohs(nla_get_be16(attr));
-#else
-		return -EOPNOTSUPP;
-#endif
-	else
-		*zone = 0;
-
-	return 0;
-}
-
 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
 	[CTA_HELP_NAME]		= { .type = NLA_NUL_STRING,
 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
@@ -1058,7 +1124,7 @@
 	struct nf_conn *ct;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1066,9 +1132,11 @@
 		return err;
 
 	if (cda[CTA_TUPLE_ORIG])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+					    u3, &zone);
 	else if (cda[CTA_TUPLE_REPLY])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+					    u3, &zone);
 	else {
 		return ctnetlink_flush_conntrack(net, cda,
 						 NETLINK_CB(skb).portid,
@@ -1078,7 +1146,7 @@
 	if (err < 0)
 		return err;
 
-	h = nf_conntrack_find_get(net, zone, &tuple);
+	h = nf_conntrack_find_get(net, &zone, &tuple);
 	if (!h)
 		return -ENOENT;
 
@@ -1112,7 +1180,7 @@
 	struct sk_buff *skb2 = NULL;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -1138,16 +1206,18 @@
 		return err;
 
 	if (cda[CTA_TUPLE_ORIG])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+					    u3, &zone);
 	else if (cda[CTA_TUPLE_REPLY])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+					    u3, &zone);
 	else
 		return -EINVAL;
 
 	if (err < 0)
 		return err;
 
-	h = nf_conntrack_find_get(net, zone, &tuple);
+	h = nf_conntrack_find_get(net, &zone, &tuple);
 	if (!h)
 		return -ENOENT;
 
@@ -1645,7 +1715,8 @@
 }
 
 static struct nf_conn *
-ctnetlink_create_conntrack(struct net *net, u16 zone,
+ctnetlink_create_conntrack(struct net *net,
+			   const struct nf_conntrack_zone *zone,
 			   const struct nlattr * const cda[],
 			   struct nf_conntrack_tuple *otuple,
 			   struct nf_conntrack_tuple *rtuple,
@@ -1761,7 +1832,8 @@
 		struct nf_conntrack_tuple_hash *master_h;
 		struct nf_conn *master_ct;
 
-		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
+		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
+					    u3, NULL);
 		if (err < 0)
 			goto err2;
 
@@ -1804,7 +1876,7 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	struct nf_conn *ct;
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1812,21 +1884,23 @@
 		return err;
 
 	if (cda[CTA_TUPLE_ORIG]) {
-		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
+		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
+					    u3, &zone);
 		if (err < 0)
 			return err;
 	}
 
 	if (cda[CTA_TUPLE_REPLY]) {
-		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
+		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
+					    u3, &zone);
 		if (err < 0)
 			return err;
 	}
 
 	if (cda[CTA_TUPLE_ORIG])
-		h = nf_conntrack_find_get(net, zone, &otuple);
+		h = nf_conntrack_find_get(net, &zone, &otuple);
 	else if (cda[CTA_TUPLE_REPLY])
-		h = nf_conntrack_find_get(net, zone, &rtuple);
+		h = nf_conntrack_find_get(net, &zone, &rtuple);
 
 	if (h == NULL) {
 		err = -ENOENT;
@@ -1836,7 +1910,7 @@
 			if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
 				return -EINVAL;
 
-			ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
+			ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
 							&rtuple, u3);
 			if (IS_ERR(ct))
 				return PTR_ERR(ct);
@@ -2082,7 +2156,7 @@
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
 	       + ctnetlink_proto_size(ct)
 	       ;
@@ -2091,14 +2165,20 @@
 static int
 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
+	const struct nf_conntrack_zone *zone;
 	struct nlattr *nest_parms;
 
 	rcu_read_lock();
+	zone = nf_ct_zone(ct);
+
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_ORIG) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -2106,12 +2186,14 @@
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_REPL) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct)) {
-		if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
-			goto nla_put_failure;
-	}
+	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+				   NF_CT_DEFAULT_ZONE_DIR) < 0)
+		goto nla_put_failure;
 
 	if (ctnetlink_dump_id(skb, ct) < 0)
 		goto nla_put_failure;
@@ -2218,12 +2300,12 @@
 	int err;
 
 	err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
-				    nf_ct_l3num(ct));
+				    nf_ct_l3num(ct), NULL);
 	if (err < 0)
 		return err;
 
 	return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
-				     nf_ct_l3num(ct));
+				     nf_ct_l3num(ct), NULL);
 }
 
 static int
@@ -2612,23 +2694,22 @@
 	struct nf_conntrack_tuple tuple;
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
-	u16 zone = 0;
+	struct nf_conntrack_zone zone;
 	struct netlink_dump_control c = {
 		.dump = ctnetlink_exp_ct_dump_table,
 		.done = ctnetlink_exp_done,
 	};
 
-	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
-	if (cda[CTA_EXPECT_ZONE]) {
-		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
-		if (err < 0)
-			return err;
-	}
+	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+	if (err < 0)
+		return err;
 
-	h = nf_conntrack_find_get(net, zone, &tuple);
+	h = nf_conntrack_find_get(net, &zone, &tuple);
 	if (!h)
 		return -ENOENT;
 
@@ -2652,7 +2733,7 @@
 	struct sk_buff *skb2;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -2672,16 +2753,18 @@
 		return err;
 
 	if (cda[CTA_EXPECT_TUPLE])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+					    u3, NULL);
 	else if (cda[CTA_EXPECT_MASTER])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+					    u3, NULL);
 	else
 		return -EINVAL;
 
 	if (err < 0)
 		return err;
 
-	exp = nf_ct_expect_find_get(net, zone, &tuple);
+	exp = nf_ct_expect_find_get(net, &zone, &tuple);
 	if (!exp)
 		return -ENOENT;
 
@@ -2732,8 +2815,8 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	struct hlist_node *next;
 	u_int8_t u3 = nfmsg->nfgen_family;
+	struct nf_conntrack_zone zone;
 	unsigned int i;
-	u16 zone;
 	int err;
 
 	if (cda[CTA_EXPECT_TUPLE]) {
@@ -2742,12 +2825,13 @@
 		if (err < 0)
 			return err;
 
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+					    u3, NULL);
 		if (err < 0)
 			return err;
 
 		/* bump usage count to 2 */
-		exp = nf_ct_expect_find_get(net, zone, &tuple);
+		exp = nf_ct_expect_find_get(net, &zone, &tuple);
 		if (!exp)
 			return -ENOENT;
 
@@ -2849,7 +2933,8 @@
 		return -EINVAL;
 
 	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
-					&nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+				    &nat_tuple, CTA_EXPECT_NAT_TUPLE,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
@@ -2937,7 +3022,8 @@
 }
 
 static int
-ctnetlink_create_expect(struct net *net, u16 zone,
+ctnetlink_create_expect(struct net *net,
+			const struct nf_conntrack_zone *zone,
 			const struct nlattr * const cda[],
 			u_int8_t u3, u32 portid, int report)
 {
@@ -2949,13 +3035,16 @@
 	int err;
 
 	/* caller guarantees that those three CTA_EXPECT_* exist */
-	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+				    u3, NULL);
 	if (err < 0)
 		return err;
-	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
+				    u3, NULL);
 	if (err < 0)
 		return err;
-	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
@@ -3011,7 +3100,7 @@
 	struct nf_conntrack_expect *exp;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	if (!cda[CTA_EXPECT_TUPLE]
@@ -3023,19 +3112,18 @@
 	if (err < 0)
 		return err;
 
-	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
 	spin_lock_bh(&nf_conntrack_expect_lock);
-	exp = __nf_ct_expect_find(net, zone, &tuple);
-
+	exp = __nf_ct_expect_find(net, &zone, &tuple);
 	if (!exp) {
 		spin_unlock_bh(&nf_conntrack_expect_lock);
 		err = -ENOENT;
 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
-			err = ctnetlink_create_expect(net, zone, cda,
-						      u3,
+			err = ctnetlink_create_expect(net, &zone, cda, u3,
 						      NETLINK_CB(skb).portid,
 						      nlmsg_report(nlh));
 		}
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 825c3e3..5588c7a 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -143,13 +143,14 @@
 				  const struct nf_conntrack_tuple *t)
 {
 	const struct nf_conntrack_tuple_hash *h;
+	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_expect *exp;
 	struct nf_conn *sibling;
-	u16 zone = nf_ct_zone(ct);
 
 	pr_debug("trying to timeout ct or exp for tuple ");
 	nf_ct_dump_tuple(t);
 
+	zone = nf_ct_zone(ct);
 	h = nf_conntrack_find_get(net, zone, t);
 	if (h)  {
 		sibling = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index b45da90..6719773 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -42,6 +42,8 @@
 	"SHUTDOWN_SENT",
 	"SHUTDOWN_RECD",
 	"SHUTDOWN_ACK_SENT",
+	"HEARTBEAT_SENT",
+	"HEARTBEAT_ACKED",
 };
 
 #define SECS  * HZ
@@ -57,6 +59,8 @@
 	[SCTP_CONNTRACK_SHUTDOWN_SENT]		= 300 SECS / 1000,
 	[SCTP_CONNTRACK_SHUTDOWN_RECD]		= 300 SECS / 1000,
 	[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]	= 3 SECS,
+	[SCTP_CONNTRACK_HEARTBEAT_SENT]		= 30 SECS,
+	[SCTP_CONNTRACK_HEARTBEAT_ACKED]	= 210 SECS,
 };
 
 #define sNO SCTP_CONNTRACK_NONE
@@ -67,6 +71,8 @@
 #define	sSS SCTP_CONNTRACK_SHUTDOWN_SENT
 #define	sSR SCTP_CONNTRACK_SHUTDOWN_RECD
 #define	sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define	sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+#define	sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
 #define	sIV SCTP_CONNTRACK_MAX
 
 /*
@@ -88,6 +94,10 @@
 		    to that of the SHUTDOWN chunk.
 CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
 		    the SHUTDOWN chunk. Connection is closed.
+HEARTBEAT_SENT    - We have seen a HEARTBEAT in a new flow.
+HEARTBEAT_ACKED   - We have seen a HEARTBEAT-ACK in the direction opposite to
+		    that of the HEARTBEAT chunk. Secondary connection is
+		    established.
 */
 
 /* TODO
@@ -97,36 +107,40 @@
  - Check the error type in the reply dir before transitioning from
 cookie echoed to closed.
  - Sec 5.2.4 of RFC 2960
- - Multi Homing support.
+ - Full Multi Homing support.
 */
 
 /* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
 	{
 /*	ORIGINAL	*/
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
 	},
 	{
 /*	REPLY	*/
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
 	}
 };
 
@@ -278,9 +292,16 @@
 		pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
 		i = 8;
 		break;
+	case SCTP_CID_HEARTBEAT:
+		pr_debug("SCTP_CID_HEARTBEAT");
+		i = 9;
+		break;
+	case SCTP_CID_HEARTBEAT_ACK:
+		pr_debug("SCTP_CID_HEARTBEAT_ACK");
+		i = 10;
+		break;
 	default:
-		/* Other chunks like DATA, SACK, HEARTBEAT and
-		its ACK do not cause a change in state */
+		/* Other chunks like DATA or SACK do not change the state */
 		pr_debug("Unknown chunk type, Will stay in %s\n",
 			 sctp_conntrack_names[cur_state]);
 		return cur_state;
@@ -329,6 +350,8 @@
 	    !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
 	    !test_bit(SCTP_CID_ABORT, map) &&
 	    !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+	    !test_bit(SCTP_CID_HEARTBEAT, map) &&
+	    !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
 	    sh->vtag != ct->proto.sctp.vtag[dir]) {
 		pr_debug("Verification tag check failed\n");
 		goto out;
@@ -357,6 +380,16 @@
 			/* Sec 8.5.1 (D) */
 			if (sh->vtag != ct->proto.sctp.vtag[dir])
 				goto out_unlock;
+		} else if (sch->type == SCTP_CID_HEARTBEAT ||
+			   sch->type == SCTP_CID_HEARTBEAT_ACK) {
+			if (ct->proto.sctp.vtag[dir] == 0) {
+				pr_debug("Setting vtag %x for dir %d\n",
+					 sh->vtag, dir);
+				ct->proto.sctp.vtag[dir] = sh->vtag;
+			} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+				pr_debug("Verification tag check failed\n");
+				goto out_unlock;
+			}
 		}
 
 		old_state = ct->proto.sctp.state;
@@ -466,6 +499,10 @@
 				/* Sec 8.5.1 (A) */
 				return false;
 			}
+		} else if (sch->type == SCTP_CID_HEARTBEAT) {
+			pr_debug("Setting vtag %x for secondary conntrack\n",
+				 sh->vtag);
+			ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
 		}
 		/* If it is a shutdown ack OOTB packet, we expect a return
 		   shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
@@ -610,6 +647,8 @@
 	[CTA_TIMEOUT_SCTP_SHUTDOWN_SENT]	= { .type = NLA_U32 },
 	[CTA_TIMEOUT_SCTP_SHUTDOWN_RECD]	= { .type = NLA_U32 },
 	[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT]	= { .type = NLA_U32 },
+	[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]	= { .type = NLA_U32 },
+	[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]	= { .type = NLA_U32 },
 };
 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
 
@@ -658,6 +697,18 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
 	},
+	{
+		.procname	= "nf_conntrack_sctp_timeout_heartbeat_sent",
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
+	{
+		.procname	= "nf_conntrack_sctp_timeout_heartbeat_acked",
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
 	{ }
 };
 
@@ -730,6 +781,8 @@
 	pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
 	pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
 	pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+	pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
+	pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
 #endif
 	return 0;
 }
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index ce3e840c8..dff0f0c 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -103,9 +103,9 @@
 			 ntohl(sack->end_seq), ntohl(new_end_seq));
 
 		inet_proto_csum_replace4(&tcph->check, skb,
-					 sack->start_seq, new_start_seq, 0);
+					 sack->start_seq, new_start_seq, false);
 		inet_proto_csum_replace4(&tcph->check, skb,
-					 sack->end_seq, new_end_seq, 0);
+					 sack->end_seq, new_end_seq, false);
 		sack->start_seq = new_start_seq;
 		sack->end_seq = new_end_seq;
 		sackoff += sizeof(*sack);
@@ -193,8 +193,9 @@
 	newseq = htonl(ntohl(tcph->seq) + seqoff);
 	newack = htonl(ntohl(tcph->ack_seq) - ackoff);
 
-	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
-	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
+				 false);
 
 	pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
 		 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index fc823fa..1fb3cac 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -140,6 +140,35 @@
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+			 int dir)
+{
+	const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+	if (zone->dir != dir)
+		return;
+	switch (zone->dir) {
+	case NF_CT_DEFAULT_ZONE_DIR:
+		seq_printf(s, "zone=%u ", zone->id);
+		break;
+	case NF_CT_ZONE_DIR_ORIG:
+		seq_printf(s, "zone-orig=%u ", zone->id);
+		break;
+	case NF_CT_ZONE_DIR_REPL:
+		seq_printf(s, "zone-reply=%u ", zone->id);
+		break;
+	default:
+		break;
+	}
+}
+#else
+static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+				int dir)
+{
+}
+#endif
+
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
 static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
 {
@@ -202,6 +231,8 @@
 	print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 		    l3proto, l4proto);
 
+	ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
+
 	if (seq_has_overflowed(s))
 		goto release;
 
@@ -214,6 +245,8 @@
 	print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 		    l3proto, l4proto);
 
+	ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
+
 	if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
 		goto release;
 
@@ -228,11 +261,7 @@
 #endif
 
 	ct_show_secctx(s, ct);
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-	seq_printf(s, "zone=%u ", nf_ct_zone(ct));
-#endif
-
+	ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
 	ct_show_delta_time(s, ct);
 
 	seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 3992106..0655225 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -19,7 +19,7 @@
 /* nf_queue.c */
 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
 	     struct nf_hook_state *state, unsigned int queuenum);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
 int __init netfilter_queue_init(void);
 
 /* nf_log.c */
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 4e0b478..5113dfd 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -118,14 +118,13 @@
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
-	    const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
 {
 	unsigned int hash;
 
 	/* Original src, to ensure we map it consistently if poss. */
 	hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
-		      tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+		      tuple->dst.protonum ^ nf_conntrack_hash_rnd);
 
 	return reciprocal_scale(hash, net->ct.nat_htable_size);
 }
@@ -185,20 +184,22 @@
 
 /* Only called for SRC manip */
 static int
-find_appropriate_src(struct net *net, u16 zone,
+find_appropriate_src(struct net *net,
+		     const struct nf_conntrack_zone *zone,
 		     const struct nf_nat_l3proto *l3proto,
 		     const struct nf_nat_l4proto *l4proto,
 		     const struct nf_conntrack_tuple *tuple,
 		     struct nf_conntrack_tuple *result,
 		     const struct nf_nat_range *range)
 {
-	unsigned int h = hash_by_src(net, zone, tuple);
+	unsigned int h = hash_by_src(net, tuple);
 	const struct nf_conn_nat *nat;
 	const struct nf_conn *ct;
 
 	hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
 		ct = nat->ct;
-		if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+		if (same_src(ct, tuple) &&
+		    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
 			/* Copy source part from reply tuple. */
 			nf_ct_invert_tuplepr(result,
 				       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -218,7 +219,8 @@
  * the ip with the lowest src-ip/dst-ip/proto usage.
  */
 static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(const struct nf_conntrack_zone *zone,
+		    struct nf_conntrack_tuple *tuple,
 		    const struct nf_nat_range *range,
 		    const struct nf_conn *ct,
 		    enum nf_nat_manip_type maniptype)
@@ -258,7 +260,7 @@
 	 */
 	j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
 		   range->flags & NF_NAT_RANGE_PERSISTENT ?
-			0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+			0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
 
 	full_range = false;
 	for (i = 0; i <= max; i++) {
@@ -297,10 +299,12 @@
 		 struct nf_conn *ct,
 		 enum nf_nat_manip_type maniptype)
 {
+	const struct nf_conntrack_zone *zone;
 	const struct nf_nat_l3proto *l3proto;
 	const struct nf_nat_l4proto *l4proto;
 	struct net *net = nf_ct_net(ct);
-	u16 zone = nf_ct_zone(ct);
+
+	zone = nf_ct_zone(ct);
 
 	rcu_read_lock();
 	l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
@@ -420,7 +424,7 @@
 	if (maniptype == NF_NAT_MANIP_SRC) {
 		unsigned int srchash;
 
-		srchash = hash_by_src(net, nf_ct_zone(ct),
+		srchash = hash_by_src(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 		spin_lock_bh(&nf_nat_lock);
 		/* nf_conntrack_alter_reply might re-allocate extension aera */
diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
index b8067b5..15c47b2 100644
--- a/net/netfilter/nf_nat_proto_dccp.c
+++ b/net/netfilter/nf_nat_proto_dccp.c
@@ -69,7 +69,7 @@
 	l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
 			     tuple, maniptype);
 	inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
-				 0);
+				 false);
 	return true;
 }
 
diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
index 37f5505..4f8820f 100644
--- a/net/netfilter/nf_nat_proto_tcp.c
+++ b/net/netfilter/nf_nat_proto_tcp.c
@@ -70,7 +70,7 @@
 		return true;
 
 	l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-	inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+	inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
 	return true;
 }
 
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
index b0ede2f..b1e6272 100644
--- a/net/netfilter/nf_nat_proto_udp.c
+++ b/net/netfilter/nf_nat_proto_udp.c
@@ -57,7 +57,7 @@
 		l3proto->csum_update(skb, iphdroff, &hdr->check,
 				     tuple, maniptype);
 		inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
-					 0);
+					 false);
 		if (!hdr->check)
 			hdr->check = CSUM_MANGLED_0;
 	}
diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c
index 368f14e..58340c97 100644
--- a/net/netfilter/nf_nat_proto_udplite.c
+++ b/net/netfilter/nf_nat_proto_udplite.c
@@ -56,7 +56,7 @@
 	}
 
 	l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-	inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+	inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
 	if (!hdr->check)
 		hdr->check = CSUM_MANGLED_0;
 
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 8a8b2ab..96777f9 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -105,21 +105,15 @@
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
 {
 	const struct nf_queue_handler *qh;
-	struct net *net;
 
-	rtnl_lock();
 	rcu_read_lock();
 	qh = rcu_dereference(queue_handler);
-	if (qh) {
-		for_each_net(net) {
-			qh->nf_hook_drop(net, ops);
-		}
-	}
+	if (qh)
+		qh->nf_hook_drop(net, ops);
 	rcu_read_unlock();
-	rtnl_unlock();
 }
 
 /*
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index d7f1685..888b955 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -17,10 +17,12 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_tcpudp.h>
 #include <linux/netfilter/xt_SYNPROXY.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 int synproxy_net_id;
 EXPORT_SYMBOL_GPL(synproxy_net_id);
@@ -186,7 +188,7 @@
 				    const struct nf_conn_synproxy *synproxy)
 {
 	unsigned int optoff, optend;
-	u32 *ptr, old;
+	__be32 *ptr, old;
 
 	if (synproxy->tsoff == 0)
 		return 1;
@@ -214,18 +216,18 @@
 			if (op[0] == TCPOPT_TIMESTAMP &&
 			    op[1] == TCPOLEN_TIMESTAMP) {
 				if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
-					ptr = (u32 *)&op[2];
+					ptr = (__be32 *)&op[2];
 					old = *ptr;
 					*ptr = htonl(ntohl(*ptr) -
 						     synproxy->tsoff);
 				} else {
-					ptr = (u32 *)&op[6];
+					ptr = (__be32 *)&op[6];
 					old = *ptr;
 					*ptr = htonl(ntohl(*ptr) +
 						     synproxy->tsoff);
 				}
 				inet_proto_csum_replace4(&th->check, skb,
-							 old, *ptr, 0);
+							 old, *ptr, false);
 				return 1;
 			}
 			optoff += op[1];
@@ -352,7 +354,7 @@
 	struct nf_conn *ct;
 	int err = -ENOMEM;
 
-	ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
+	ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
 	if (!ct)
 		goto err1;
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index cfe6368..4a41eb9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -130,20 +130,24 @@
 int nft_register_basechain(struct nft_base_chain *basechain,
 			   unsigned int hook_nops)
 {
+	struct net *net = read_pnet(&basechain->pnet);
+
 	if (basechain->flags & NFT_BASECHAIN_DISABLED)
 		return 0;
 
-	return nf_register_hooks(basechain->ops, hook_nops);
+	return nf_register_net_hooks(net, basechain->ops, hook_nops);
 }
 EXPORT_SYMBOL_GPL(nft_register_basechain);
 
 void nft_unregister_basechain(struct nft_base_chain *basechain,
 			      unsigned int hook_nops)
 {
+	struct net *net = read_pnet(&basechain->pnet);
+
 	if (basechain->flags & NFT_BASECHAIN_DISABLED)
 		return;
 
-	nf_unregister_hooks(basechain->ops, hook_nops);
+	nf_unregister_net_hooks(net, basechain->ops, hook_nops);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_basechain);
 
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f77bad4..05d0b03 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -114,7 +114,6 @@
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
 	const struct nft_chain *chain = ops->priv, *basechain = chain;
-	const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
 	const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 	const struct nft_rule *rule;
 	const struct nft_expr *expr, *last;
@@ -125,10 +124,6 @@
 	int rulenum;
 	unsigned int gencursor = nft_genmask_cur(net);
 
-	/* Ignore chains that are not for the current network namespace */
-	if (!net_eq(net, chain_net))
-		return NF_ACCEPT;
-
 do_chain:
 	rulenum = 0;
 	rule = list_entry(&chain->rules, struct nft_rule, list);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c18af2f..fefbf5f 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -27,8 +27,6 @@
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
 
-static LIST_HEAD(nfnl_acct_list);
-
 struct nf_acct {
 	atomic64_t		pkts;
 	atomic64_t		bytes;
@@ -53,6 +51,7 @@
 	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
 	struct nf_acct *nfacct, *matching = NULL;
+	struct net *net = sock_net(nfnl);
 	char *acct_name;
 	unsigned int size = 0;
 	u32 flags = 0;
@@ -64,7 +63,7 @@
 	if (strlen(acct_name) == 0)
 		return -EINVAL;
 
-	list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+	list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
 		if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
 			continue;
 
@@ -124,7 +123,7 @@
 			     be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
 	}
 	atomic_set(&nfacct->refcnt, 1);
-	list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+	list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
 	return 0;
 }
 
@@ -185,6 +184,7 @@
 static int
 nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+	struct net *net = sock_net(skb->sk);
 	struct nf_acct *cur, *last;
 	const struct nfacct_filter *filter = cb->data;
 
@@ -196,7 +196,7 @@
 		cb->args[1] = 0;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+	list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
 		if (last) {
 			if (cur != last)
 				continue;
@@ -257,6 +257,7 @@
 nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
 	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+	struct net *net = sock_net(nfnl);
 	int ret = -ENOENT;
 	struct nf_acct *cur;
 	char *acct_name;
@@ -283,7 +284,7 @@
 		return -EINVAL;
 	acct_name = nla_data(tb[NFACCT_NAME]);
 
-	list_for_each_entry(cur, &nfnl_acct_list, head) {
+	list_for_each_entry(cur, &net->nfnl_acct_list, head) {
 		struct sk_buff *skb2;
 
 		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
@@ -336,19 +337,20 @@
 nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
 	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+	struct net *net = sock_net(nfnl);
 	char *acct_name;
 	struct nf_acct *cur;
 	int ret = -ENOENT;
 
 	if (!tb[NFACCT_NAME]) {
-		list_for_each_entry(cur, &nfnl_acct_list, head)
+		list_for_each_entry(cur, &net->nfnl_acct_list, head)
 			nfnl_acct_try_del(cur);
 
 		return 0;
 	}
 	acct_name = nla_data(tb[NFACCT_NAME]);
 
-	list_for_each_entry(cur, &nfnl_acct_list, head) {
+	list_for_each_entry(cur, &net->nfnl_acct_list, head) {
 		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
 			continue;
 
@@ -394,12 +396,12 @@
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
 
-struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
 {
 	struct nf_acct *cur, *acct = NULL;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+	list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
 		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
 			continue;
 
@@ -422,7 +424,9 @@
 
 void nfnl_acct_put(struct nf_acct *acct)
 {
-	atomic_dec(&acct->refcnt);
+	if (atomic_dec_and_test(&acct->refcnt))
+		kfree_rcu(acct, rcu_head);
+
 	module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_put);
@@ -478,34 +482,59 @@
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
 
+static int __net_init nfnl_acct_net_init(struct net *net)
+{
+	INIT_LIST_HEAD(&net->nfnl_acct_list);
+
+	return 0;
+}
+
+static void __net_exit nfnl_acct_net_exit(struct net *net)
+{
+	struct nf_acct *cur, *tmp;
+
+	list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+		list_del_rcu(&cur->head);
+
+		if (atomic_dec_and_test(&cur->refcnt))
+			kfree_rcu(cur, rcu_head);
+	}
+}
+
+static struct pernet_operations nfnl_acct_ops = {
+        .init   = nfnl_acct_net_init,
+        .exit   = nfnl_acct_net_exit,
+};
+
 static int __init nfnl_acct_init(void)
 {
 	int ret;
 
+	ret = register_pernet_subsys(&nfnl_acct_ops);
+	if (ret < 0) {
+		pr_err("nfnl_acct_init: failed to register pernet ops\n");
+		goto err_out;
+	}
+
 	pr_info("nfnl_acct: registering with nfnetlink.\n");
 	ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
 	if (ret < 0) {
 		pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
-		goto err_out;
+		goto cleanup_pernet;
 	}
 	return 0;
+
+cleanup_pernet:
+	unregister_pernet_subsys(&nfnl_acct_ops);
 err_out:
 	return ret;
 }
 
 static void __exit nfnl_acct_exit(void)
 {
-	struct nf_acct *cur, *tmp;
-
 	pr_info("nfnl_acct: unregistering from nfnetlink.\n");
 	nfnetlink_subsys_unregister(&nfnl_acct_subsys);
-
-	list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
-		list_del_rcu(&cur->head);
-		/* We are sure that our objects have no clients at this point,
-		 * it's safe to release them all without checking refcnt. */
-		kfree_rcu(cur, rcu_head);
-	}
+	unregister_pernet_subsys(&nfnl_acct_ops);
 }
 
 module_init(nfnl_acct_init);
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1759123..1067fb4 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -18,39 +18,59 @@
 #include <net/netfilter/nf_tables.h>
 
 struct nft_counter {
-	seqlock_t	lock;
 	u64		bytes;
 	u64		packets;
 };
 
+struct nft_counter_percpu {
+	struct nft_counter	counter;
+	struct u64_stats_sync	syncp;
+};
+
+struct nft_counter_percpu_priv {
+	struct nft_counter_percpu __percpu *counter;
+};
+
 static void nft_counter_eval(const struct nft_expr *expr,
 			     struct nft_regs *regs,
 			     const struct nft_pktinfo *pkt)
 {
-	struct nft_counter *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu *this_cpu;
 
-	write_seqlock_bh(&priv->lock);
-	priv->bytes += pkt->skb->len;
-	priv->packets++;
-	write_sequnlock_bh(&priv->lock);
+	local_bh_disable();
+	this_cpu = this_cpu_ptr(priv->counter);
+	u64_stats_update_begin(&this_cpu->syncp);
+	this_cpu->counter.bytes += pkt->skb->len;
+	this_cpu->counter.packets++;
+	u64_stats_update_end(&this_cpu->syncp);
+	local_bh_enable();
 }
 
 static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
-	struct nft_counter *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu *cpu_stats;
+	struct nft_counter total;
+	u64 bytes, packets;
 	unsigned int seq;
-	u64 bytes;
-	u64 packets;
+	int cpu;
 
-	do {
-		seq = read_seqbegin(&priv->lock);
-		bytes	= priv->bytes;
-		packets	= priv->packets;
-	} while (read_seqretry(&priv->lock, seq));
+	memset(&total, 0, sizeof(total));
+	for_each_possible_cpu(cpu) {
+		cpu_stats = per_cpu_ptr(priv->counter, cpu);
+		do {
+			seq	= u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+			bytes	= cpu_stats->counter.bytes;
+			packets	= cpu_stats->counter.packets;
+		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
 
-	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
-		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+		total.packets += packets;
+		total.bytes += bytes;
+	}
+
+	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
+	    nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
 		goto nla_put_failure;
 	return 0;
 
@@ -67,23 +87,44 @@
 			    const struct nft_expr *expr,
 			    const struct nlattr * const tb[])
 {
-	struct nft_counter *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu __percpu *cpu_stats;
+	struct nft_counter_percpu *this_cpu;
 
-	if (tb[NFTA_COUNTER_PACKETS])
-	        priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
-	if (tb[NFTA_COUNTER_BYTES])
-		priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+	cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
+	if (cpu_stats == NULL)
+		return ENOMEM;
 
-	seqlock_init(&priv->lock);
+	preempt_disable();
+	this_cpu = this_cpu_ptr(cpu_stats);
+	if (tb[NFTA_COUNTER_PACKETS]) {
+	        this_cpu->counter.packets =
+			be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+	}
+	if (tb[NFTA_COUNTER_BYTES]) {
+		this_cpu->counter.bytes =
+			be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+	}
+	preempt_enable();
+	priv->counter = cpu_stats;
 	return 0;
 }
 
+static void nft_counter_destroy(const struct nft_ctx *ctx,
+				const struct nft_expr *expr)
+{
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+
+	free_percpu(priv->counter);
+}
+
 static struct nft_expr_type nft_counter_type;
 static const struct nft_expr_ops nft_counter_ops = {
 	.type		= &nft_counter_type,
-	.size		= NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
 	.eval		= nft_counter_eval,
 	.init		= nft_counter_init,
+	.destroy	= nft_counter_destroy,
 	.dump		= nft_counter_dump,
 };
 
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 435c1cc..5d67938 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -20,63 +20,79 @@
 static DEFINE_SPINLOCK(limit_lock);
 
 struct nft_limit {
+	u64		last;
 	u64		tokens;
+	u64		tokens_max;
 	u64		rate;
-	u64		unit;
-	unsigned long	stamp;
+	u64		nsecs;
+	u32		burst;
 };
 
-static void nft_limit_eval(const struct nft_expr *expr,
-			   struct nft_regs *regs,
-			   const struct nft_pktinfo *pkt)
+static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
 {
-	struct nft_limit *priv = nft_expr_priv(expr);
+	u64 now, tokens;
+	s64 delta;
 
 	spin_lock_bh(&limit_lock);
-	if (time_after_eq(jiffies, priv->stamp)) {
-		priv->tokens = priv->rate;
-		priv->stamp = jiffies + priv->unit * HZ;
-	}
+	now = ktime_get_ns();
+	tokens = limit->tokens + now - limit->last;
+	if (tokens > limit->tokens_max)
+		tokens = limit->tokens_max;
 
-	if (priv->tokens >= 1) {
-		priv->tokens--;
+	limit->last = now;
+	delta = tokens - cost;
+	if (delta >= 0) {
+		limit->tokens = delta;
 		spin_unlock_bh(&limit_lock);
-		return;
+		return false;
 	}
+	limit->tokens = tokens;
 	spin_unlock_bh(&limit_lock);
-
-	regs->verdict.code = NFT_BREAK;
+	return true;
 }
 
-static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
-	[NFTA_LIMIT_RATE]	= { .type = NLA_U64 },
-	[NFTA_LIMIT_UNIT]	= { .type = NLA_U64 },
-};
-
-static int nft_limit_init(const struct nft_ctx *ctx,
-			  const struct nft_expr *expr,
+static int nft_limit_init(struct nft_limit *limit,
 			  const struct nlattr * const tb[])
 {
-	struct nft_limit *priv = nft_expr_priv(expr);
+	u64 unit;
 
 	if (tb[NFTA_LIMIT_RATE] == NULL ||
 	    tb[NFTA_LIMIT_UNIT] == NULL)
 		return -EINVAL;
 
-	priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
-	priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-	priv->stamp  = jiffies + priv->unit * HZ;
-	priv->tokens = priv->rate;
+	limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+	unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+	limit->nsecs = unit * NSEC_PER_SEC;
+	if (limit->rate == 0 || limit->nsecs < unit)
+		return -EOVERFLOW;
+	limit->tokens = limit->tokens_max = limit->nsecs;
+
+	if (tb[NFTA_LIMIT_BURST]) {
+		u64 rate;
+
+		limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+		rate = limit->rate + limit->burst;
+		if (rate < limit->rate)
+			return -EOVERFLOW;
+
+		limit->rate = rate;
+	}
+	limit->last = ktime_get_ns();
+
 	return 0;
 }
 
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+			  enum nft_limit_type type)
 {
-	const struct nft_limit *priv = nft_expr_priv(expr);
+	u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+	u64 rate = limit->rate - limit->burst;
 
-	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
-		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
+	    nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+	    nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+	    nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)))
 		goto nla_put_failure;
 	return 0;
 
@@ -84,18 +100,114 @@
 	return -1;
 }
 
+struct nft_limit_pkts {
+	struct nft_limit	limit;
+	u64			cost;
+};
+
+static void nft_limit_pkts_eval(const struct nft_expr *expr,
+				struct nft_regs *regs,
+				const struct nft_pktinfo *pkt)
+{
+	struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+	if (nft_limit_eval(&priv->limit, priv->cost))
+		regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+	[NFTA_LIMIT_RATE]	= { .type = NLA_U64 },
+	[NFTA_LIMIT_UNIT]	= { .type = NLA_U64 },
+	[NFTA_LIMIT_BURST]	= { .type = NLA_U32 },
+	[NFTA_LIMIT_TYPE]	= { .type = NLA_U32 },
+};
+
+static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+			       const struct nft_expr *expr,
+			       const struct nlattr * const tb[])
+{
+	struct nft_limit_pkts *priv = nft_expr_priv(expr);
+	int err;
+
+	err = nft_limit_init(&priv->limit, tb);
+	if (err < 0)
+		return err;
+
+	priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+	return 0;
+}
+
+static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+	return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+}
+
 static struct nft_expr_type nft_limit_type;
-static const struct nft_expr_ops nft_limit_ops = {
+static const struct nft_expr_ops nft_limit_pkts_ops = {
+	.type		= &nft_limit_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+	.eval		= nft_limit_pkts_eval,
+	.init		= nft_limit_pkts_init,
+	.dump		= nft_limit_pkts_dump,
+};
+
+static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
+				     struct nft_regs *regs,
+				     const struct nft_pktinfo *pkt)
+{
+	struct nft_limit *priv = nft_expr_priv(expr);
+	u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+	if (nft_limit_eval(priv, cost))
+		regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_limit_pkt_bytes_init(const struct nft_ctx *ctx,
+				    const struct nft_expr *expr,
+				    const struct nlattr * const tb[])
+{
+	struct nft_limit *priv = nft_expr_priv(expr);
+
+	return nft_limit_init(priv, tb);
+}
+
+static int nft_limit_pkt_bytes_dump(struct sk_buff *skb,
+				    const struct nft_expr *expr)
+{
+	const struct nft_limit *priv = nft_expr_priv(expr);
+
+	return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+}
+
+static const struct nft_expr_ops nft_limit_pkt_bytes_ops = {
 	.type		= &nft_limit_type,
 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_limit)),
-	.eval		= nft_limit_eval,
-	.init		= nft_limit_init,
-	.dump		= nft_limit_dump,
+	.eval		= nft_limit_pkt_bytes_eval,
+	.init		= nft_limit_pkt_bytes_init,
+	.dump		= nft_limit_pkt_bytes_dump,
 };
 
+static const struct nft_expr_ops *
+nft_limit_select_ops(const struct nft_ctx *ctx,
+		     const struct nlattr * const tb[])
+{
+	if (tb[NFTA_LIMIT_TYPE] == NULL)
+		return &nft_limit_pkts_ops;
+
+	switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
+	case NFT_LIMIT_PKTS:
+		return &nft_limit_pkts_ops;
+	case NFT_LIMIT_PKT_BYTES:
+		return &nft_limit_pkt_bytes_ops;
+	}
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
 static struct nft_expr_type nft_limit_type __read_mostly = {
 	.name		= "limit",
-	.ops		= &nft_limit_ops,
+	.select_ops	= nft_limit_select_ops,
 	.policy		= nft_limit_policy,
 	.maxattr	= NFTA_LIMIT_MAX,
 	.flags		= NFT_EXPR_STATEFUL,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 52561e1..cb2f13e 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -166,11 +166,13 @@
 			goto err;
 		*dest = out->group;
 		break;
+#ifdef CONFIG_CGROUP_NET_CLASSID
 	case NFT_META_CGROUP:
 		if (skb->sk == NULL || !sk_fullsock(skb->sk))
 			goto err;
 		*dest = skb->sk->sk_classid;
 		break;
+#endif
 	default:
 		WARN_ON(1);
 		goto err;
@@ -246,7 +248,9 @@
 	case NFT_META_CPU:
 	case NFT_META_IIFGROUP:
 	case NFT_META_OIFGROUP:
+#ifdef CONFIG_CGROUP_NET_CLASSID
 	case NFT_META_CGROUP:
+#endif
 		len = sizeof(u32);
 		break;
 	case NFT_META_IIFNAME:
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 94fb3b2..09b4b07 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netlink.h>
@@ -17,6 +18,53 @@
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
 
+/* add vlan header into the user buffer for if tag was removed by offloads */
+static bool
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+{
+	int mac_off = skb_mac_header(skb) - skb->data;
+	u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+	struct vlan_ethhdr veth;
+
+	vlanh = (u8 *) &veth;
+	if (offset < ETH_HLEN) {
+		u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+
+		if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+			return false;
+
+		veth.h_vlan_proto = skb->vlan_proto;
+
+		memcpy(dst_u8, vlanh + offset, ethlen);
+
+		len -= ethlen;
+		if (len == 0)
+			return true;
+
+		dst_u8 += ethlen;
+		offset = ETH_HLEN;
+	} else if (offset >= VLAN_ETH_HLEN) {
+		offset -= VLAN_HLEN;
+		goto skip;
+	}
+
+	veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+	veth.h_vlan_encapsulated_proto = skb->protocol;
+
+	vlanh += offset;
+
+	vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
+	memcpy(dst_u8, vlanh, vlan_len);
+
+	len -= vlan_len;
+	if (!len)
+		return true;
+
+	dst_u8 += vlan_len;
+ skip:
+	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+}
+
 static void nft_payload_eval(const struct nft_expr *expr,
 			     struct nft_regs *regs,
 			     const struct nft_pktinfo *pkt)
@@ -26,10 +74,18 @@
 	u32 *dest = &regs->data[priv->dreg];
 	int offset;
 
+	dest[priv->len / NFT_REG32_SIZE] = 0;
 	switch (priv->base) {
 	case NFT_PAYLOAD_LL_HEADER:
 		if (!skb_mac_header_was_set(skb))
 			goto err;
+
+		if (skb_vlan_tag_present(skb)) {
+			if (!nft_payload_copy_vlan(dest, skb,
+						   priv->offset, priv->len))
+				goto err;
+			return;
+		}
 		offset = skb_mac_header(skb) - skb->data;
 		break;
 	case NFT_PAYLOAD_NETWORK_HEADER:
@@ -43,7 +99,6 @@
 	}
 	offset += priv->offset;
 
-	dest[priv->len / NFT_REG32_SIZE] = 0;
 	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
 		goto err;
 	return;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index d324fe7..9b42b5e 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -67,9 +67,6 @@
 	[NFPROTO_IPV6]   = "ip6",
 };
 
-/* Allow this many total (re)entries. */
-static const unsigned int xt_jumpstack_multiplier = 2;
-
 /* Registration hooks for targets. */
 int xt_register_target(struct xt_target *target)
 {
@@ -688,8 +685,6 @@
 		kvfree(info->jumpstack);
 	}
 
-	free_percpu(info->stackptr);
-
 	kvfree(info);
 }
 EXPORT_SYMBOL(xt_free_table_info);
@@ -732,15 +727,14 @@
 DEFINE_PER_CPU(seqcount_t, xt_recseq);
 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
 
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
 static int xt_jumpstack_alloc(struct xt_table_info *i)
 {
 	unsigned int size;
 	int cpu;
 
-	i->stackptr = alloc_percpu(unsigned int);
-	if (i->stackptr == NULL)
-		return -ENOMEM;
-
 	size = sizeof(void **) * nr_cpu_ids;
 	if (size > PAGE_SIZE)
 		i->jumpstack = vzalloc(size);
@@ -749,8 +743,21 @@
 	if (i->jumpstack == NULL)
 		return -ENOMEM;
 
-	i->stacksize *= xt_jumpstack_multiplier;
-	size = sizeof(void *) * i->stacksize;
+	/* ruleset without jumps -- no stack needed */
+	if (i->stacksize == 0)
+		return 0;
+
+	/* Jumpstack needs to be able to record two full callchains, one
+	 * from the first rule set traversal, plus one table reentrancy
+	 * via -j TEE without clobbering the callchain that brought us to
+	 * TEE target.
+	 *
+	 * This is done by allocating two jumpstacks per cpu, on reentry
+	 * the upper half of the stack is used.
+	 *
+	 * see the jumpstack setup in ipt_do_table() for more details.
+	 */
+	size = sizeof(void *) * i->stacksize * 2u;
 	for_each_possible_cpu(cpu) {
 		if (size > PAGE_SIZE)
 			i->jumpstack[cpu] = vmalloc_node(size,
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 43ddeee..8e52489 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -181,9 +181,23 @@
 #endif
 }
 
+static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
+{
+	switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
+			       XT_CT_ZONE_DIR_REPL)) {
+	case XT_CT_ZONE_DIR_ORIG:
+		return NF_CT_ZONE_DIR_ORIG;
+	case XT_CT_ZONE_DIR_REPL:
+		return NF_CT_ZONE_DIR_REPL;
+	default:
+		return NF_CT_DEFAULT_ZONE_DIR;
+	}
+}
+
 static int xt_ct_tg_check(const struct xt_tgchk_param *par,
 			  struct xt_ct_target_info_v1 *info)
 {
+	struct nf_conntrack_zone zone;
 	struct nf_conn *ct;
 	int ret = -EOPNOTSUPP;
 
@@ -193,7 +207,9 @@
 	}
 
 #ifndef CONFIG_NF_CONNTRACK_ZONES
-	if (info->zone)
+	if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
+					 XT_CT_ZONE_DIR_REPL |
+					 XT_CT_ZONE_MARK))
 		goto err1;
 #endif
 
@@ -201,7 +217,13 @@
 	if (ret < 0)
 		goto err1;
 
-	ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
+	memset(&zone, 0, sizeof(zone));
+	zone.id = info->zone;
+	zone.dir = xt_ct_flags_to_dir(info);
+	if (info->flags & XT_CT_ZONE_MARK)
+		zone.flags |= NF_CT_FLAG_MARK;
+
+	ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
 	if (!ct) {
 		ret = -ENOMEM;
 		goto err2;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 8c3190e..8c02501 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -144,7 +144,7 @@
 
 			inet_proto_csum_replace2(&tcph->check, skb,
 						 htons(oldmss), htons(newmss),
-						 0);
+						 false);
 			return 0;
 		}
 	}
@@ -185,18 +185,18 @@
 	memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
 
 	inet_proto_csum_replace2(&tcph->check, skb,
-				 htons(len), htons(len + TCPOLEN_MSS), 1);
+				 htons(len), htons(len + TCPOLEN_MSS), true);
 	opt[0] = TCPOPT_MSS;
 	opt[1] = TCPOLEN_MSS;
 	opt[2] = (newmss & 0xff00) >> 8;
 	opt[3] = newmss & 0x00ff;
 
-	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
+	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
 
 	oldval = ((__be16 *)tcph)[6];
 	tcph->doff += TCPOLEN_MSS/4;
 	inet_proto_csum_replace2(&tcph->check, skb,
-				 oldval, ((__be16 *)tcph)[6], 0);
+				 oldval, ((__be16 *)tcph)[6], false);
 	return TCPOLEN_MSS;
 }
 
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 625fa1d..eb92bff 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -80,7 +80,7 @@
 				n <<= 8;
 			}
 			inet_proto_csum_replace2(&tcph->check, skb, htons(o),
-						 htons(n), 0);
+						 htons(n), false);
 		}
 		memset(opt + i, TCPOPT_NOP, optl);
 	}
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index a747eb4..fd980aa 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -10,26 +10,15 @@
  *	modify it under the terms of the GNU General Public License
  *	version 2 or later, as published by the Free Software Foundation.
  */
-#include <linux/ip.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/route.h>
 #include <linux/skbuff.h>
-#include <linux/notifier.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/route.h>
+#include <linux/route.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#	define WITH_CONNTRACK 1
-#	include <net/netfilter/nf_conntrack.h>
-#endif
-
 struct xt_tee_priv {
 	struct notifier_block	notifier;
 	struct xt_tee_tginfo	*tginfo;
@@ -37,163 +26,25 @@
 };
 
 static const union nf_inet_addr tee_zero_address;
-static DEFINE_PER_CPU(bool, tee_active);
-
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-	const struct dst_entry *dst;
-
-	if (skb->dev != NULL)
-		return dev_net(skb->dev);
-	dst = skb_dst(skb);
-	if (dst != NULL && dst->dev != NULL)
-		return dev_net(dst->dev);
-#endif
-	return &init_net;
-}
-
-static bool
-tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-	const struct iphdr *iph = ip_hdr(skb);
-	struct net *net = pick_net(skb);
-	struct rtable *rt;
-	struct flowi4 fl4;
-
-	memset(&fl4, 0, sizeof(fl4));
-	if (info->priv) {
-		if (info->priv->oif == -1)
-			return false;
-		fl4.flowi4_oif = info->priv->oif;
-	}
-	fl4.daddr = info->gw.ip;
-	fl4.flowi4_tos = RT_TOS(iph->tos);
-	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
-	fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
-	rt = ip_route_output_key(net, &fl4);
-	if (IS_ERR(rt))
-		return false;
-
-	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->dst);
-	skb->dev      = rt->dst.dev;
-	skb->protocol = htons(ETH_P_IP);
-	return true;
-}
 
 static unsigned int
 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_tee_tginfo *info = par->targinfo;
-	struct iphdr *iph;
 
-	if (__this_cpu_read(tee_active))
-		return XT_CONTINUE;
-	/*
-	 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
-	 * the original skb, which should continue on its way as if nothing has
-	 * happened. The copy should be independently delivered to the TEE
-	 * --gateway.
-	 */
-	skb = pskb_copy(skb, GFP_ATOMIC);
-	if (skb == NULL)
-		return XT_CONTINUE;
+	nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
 
-#ifdef WITH_CONNTRACK
-	/* Avoid counting cloned packets towards the original connection. */
-	nf_conntrack_put(skb->nfct);
-	skb->nfct     = &nf_ct_untracked_get()->ct_general;
-	skb->nfctinfo = IP_CT_NEW;
-	nf_conntrack_get(skb->nfct);
-#endif
-	/*
-	 * If we are in PREROUTING/INPUT, the checksum must be recalculated
-	 * since the length could have changed as a result of defragmentation.
-	 *
-	 * We also decrease the TTL to mitigate potential TEE loops
-	 * between two hosts.
-	 *
-	 * Set %IP_DF so that the original source is notified of a potentially
-	 * decreased MTU on the clone route. IPv6 does this too.
-	 */
-	iph = ip_hdr(skb);
-	iph->frag_off |= htons(IP_DF);
-	if (par->hooknum == NF_INET_PRE_ROUTING ||
-	    par->hooknum == NF_INET_LOCAL_IN)
-		--iph->ttl;
-	ip_send_check(iph);
-
-	if (tee_tg_route4(skb, info)) {
-		__this_cpu_write(tee_active, true);
-		ip_local_out(skb);
-		__this_cpu_write(tee_active, false);
-	} else {
-		kfree_skb(skb);
-	}
 	return XT_CONTINUE;
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static bool
-tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-	const struct ipv6hdr *iph = ipv6_hdr(skb);
-	struct net *net = pick_net(skb);
-	struct dst_entry *dst;
-	struct flowi6 fl6;
-
-	memset(&fl6, 0, sizeof(fl6));
-	if (info->priv) {
-		if (info->priv->oif == -1)
-			return false;
-		fl6.flowi6_oif = info->priv->oif;
-	}
-	fl6.daddr = info->gw.in6;
-	fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
-			   (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
-	fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
-	dst = ip6_route_output(net, NULL, &fl6);
-	if (dst->error) {
-		dst_release(dst);
-		return false;
-	}
-	skb_dst_drop(skb);
-	skb_dst_set(skb, dst);
-	skb->dev      = dst->dev;
-	skb->protocol = htons(ETH_P_IPV6);
-	return true;
-}
-
+#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
 static unsigned int
 tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_tee_tginfo *info = par->targinfo;
 
-	if (__this_cpu_read(tee_active))
-		return XT_CONTINUE;
-	skb = pskb_copy(skb, GFP_ATOMIC);
-	if (skb == NULL)
-		return XT_CONTINUE;
+	nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
 
-#ifdef WITH_CONNTRACK
-	nf_conntrack_put(skb->nfct);
-	skb->nfct     = &nf_ct_untracked_get()->ct_general;
-	skb->nfctinfo = IP_CT_NEW;
-	nf_conntrack_get(skb->nfct);
-#endif
-	if (par->hooknum == NF_INET_PRE_ROUTING ||
-	    par->hooknum == NF_INET_LOCAL_IN) {
-		struct ipv6hdr *iph = ipv6_hdr(skb);
-		--iph->hop_limit;
-	}
-	if (tee_tg_route6(skb, info)) {
-		__this_cpu_write(tee_active, true);
-		ip6_local_out(skb);
-		__this_cpu_write(tee_active, false);
-	} else {
-		kfree_skb(skb);
-	}
 	return XT_CONTINUE;
 }
 #endif
@@ -252,6 +103,7 @@
 	} else
 		info->priv = NULL;
 
+	static_key_slow_inc(&xt_tee_enabled);
 	return 0;
 }
 
@@ -263,6 +115,7 @@
 		unregister_netdevice_notifier(&info->priv->notifier);
 		kfree(info->priv);
 	}
+	static_key_slow_dec(&xt_tee_enabled);
 }
 
 static struct xt_target tee_tg_reg[] __read_mostly = {
@@ -276,7 +129,7 @@
 		.destroy    = tee_tg_destroy,
 		.me         = THIS_MODULE,
 	},
-#if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
 	{
 		.name       = "TEE",
 		.revision   = 1,
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index cca96ce..d0c96c5 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -272,8 +272,7 @@
 					    hp->source, lport ? lport : hp->dest,
 					    skb->dev, NFT_LOOKUP_LISTENER);
 		if (sk2) {
-			inet_twsk_deschedule(inet_twsk(sk));
-			inet_twsk_put(inet_twsk(sk));
+			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
 		}
 	}
@@ -437,8 +436,7 @@
 					    tgi->lport ? tgi->lport : hp->dest,
 					    skb->dev, NFT_LOOKUP_LISTENER);
 		if (sk2) {
-			inet_twsk_deschedule(inet_twsk(sk));
-			inet_twsk_put(inet_twsk(sk));
+			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
 		}
 	}
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
index 9f8719d..bb9cbeb 100644
--- a/net/netfilter/xt_connlabel.c
+++ b/net/netfilter/xt_connlabel.c
@@ -42,10 +42,6 @@
 			    XT_CONNLABEL_OP_SET;
 	struct xt_connlabel_mtinfo *info = par->matchinfo;
 	int ret;
-	size_t words;
-
-	if (info->bit > XT_CONNLABEL_MAXBIT)
-		return -ERANGE;
 
 	if (info->options & ~options) {
 		pr_err("Unknown options in mask %x\n", info->options);
@@ -59,19 +55,15 @@
 		return ret;
 	}
 
-	par->net->ct.labels_used++;
-	words = BITS_TO_LONGS(info->bit+1);
-	if (words > par->net->ct.label_words)
-		par->net->ct.label_words = words;
-
+	ret = nf_connlabels_get(par->net, info->bit + 1);
+	if (ret < 0)
+		nf_ct_l3proto_module_put(par->family);
 	return ret;
 }
 
 static void connlabel_mt_destroy(const struct xt_mtdtor_param *par)
 {
-	par->net->ct.labels_used--;
-	if (par->net->ct.labels_used == 0)
-		par->net->ct.label_words = 0;
+	nf_connlabels_put(par->net);
 	nf_ct_l3proto_module_put(par->family);
 }
 
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 29ba621..075d89d 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -134,7 +134,7 @@
 static unsigned int check_hlist(struct net *net,
 				struct hlist_head *head,
 				const struct nf_conntrack_tuple *tuple,
-				u16 zone,
+				const struct nf_conntrack_zone *zone,
 				bool *addit)
 {
 	const struct nf_conntrack_tuple_hash *found;
@@ -201,7 +201,7 @@
 count_tree(struct net *net, struct rb_root *root,
 	   const struct nf_conntrack_tuple *tuple,
 	   const union nf_inet_addr *addr, const union nf_inet_addr *mask,
-	   u8 family, u16 zone)
+	   u8 family, const struct nf_conntrack_zone *zone)
 {
 	struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
 	struct rb_node **rbnode, *parent;
@@ -290,7 +290,8 @@
 		      const struct nf_conntrack_tuple *tuple,
 		      const union nf_inet_addr *addr,
 		      const union nf_inet_addr *mask,
-		      u_int8_t family, u16 zone)
+		      u_int8_t family,
+		      const struct nf_conntrack_zone *zone)
 {
 	struct rb_root *root;
 	int count;
@@ -321,10 +322,10 @@
 	union nf_inet_addr addr;
 	struct nf_conntrack_tuple tuple;
 	const struct nf_conntrack_tuple *tuple_ptr = &tuple;
+	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
 	enum ip_conntrack_info ctinfo;
 	const struct nf_conn *ct;
 	unsigned int connections;
-	u16 zone = NF_CT_DEFAULT_ZONE;
 
 	ct = nf_ct_get(skb, &ctinfo);
 	if (ct != NULL) {
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 8c646ed..3048a7e 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -37,7 +37,7 @@
 	struct xt_nfacct_match_info *info = par->matchinfo;
 	struct nf_acct *nfacct;
 
-	nfacct = nfnl_acct_find_get(info->name);
+	nfacct = nfnl_acct_find_get(par->net, info->name);
 	if (nfacct == NULL) {
 		pr_info("xt_nfacct: accounting object with name `%s' "
 			"does not exists\n", info->name);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a774985..50889be 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -84,6 +84,7 @@
 #define NETLINK_F_BROADCAST_SEND_ERROR	0x4
 #define NETLINK_F_RECV_NO_ENOBUFS	0x8
 #define NETLINK_F_LISTEN_ALL_NSID	0x10
+#define NETLINK_F_CAP_ACK		0x20
 
 static inline int netlink_is_kernel(struct sock *sk)
 {
@@ -593,16 +594,6 @@
 	return netlink_lookup_frame(ring, ring->head, status);
 }
 
-static struct nl_mmap_hdr *
-netlink_previous_frame(const struct netlink_ring *ring,
-		       enum nl_mmap_status status)
-{
-	unsigned int prev;
-
-	prev = ring->head ? ring->head - 1 : ring->frame_max;
-	return netlink_lookup_frame(ring, prev, status);
-}
-
 static void netlink_increment_head(struct netlink_ring *ring)
 {
 	ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
@@ -610,11 +601,11 @@
 
 static void netlink_forward_ring(struct netlink_ring *ring)
 {
-	unsigned int head = ring->head, pos = head;
+	unsigned int head = ring->head;
 	const struct nl_mmap_hdr *hdr;
 
 	do {
-		hdr = __netlink_lookup_frame(ring, pos);
+		hdr = __netlink_lookup_frame(ring, ring->head);
 		if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
 			break;
 		if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
@@ -623,6 +614,21 @@
 	} while (ring->head != head);
 }
 
+static bool netlink_has_valid_frame(struct netlink_ring *ring)
+{
+	unsigned int head = ring->head, pos = head;
+	const struct nl_mmap_hdr *hdr;
+
+	do {
+		hdr = __netlink_lookup_frame(ring, pos);
+		if (hdr->nm_status == NL_MMAP_STATUS_VALID)
+			return true;
+		pos = pos != 0 ? pos - 1 : ring->frame_max;
+	} while (pos != head);
+
+	return false;
+}
+
 static bool netlink_dump_space(struct netlink_sock *nlk)
 {
 	struct netlink_ring *ring = &nlk->rx_ring;
@@ -670,8 +676,7 @@
 
 	spin_lock_bh(&sk->sk_receive_queue.lock);
 	if (nlk->rx_ring.pg_vec) {
-		netlink_forward_ring(&nlk->rx_ring);
-		if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
+		if (netlink_has_valid_frame(&nlk->rx_ring))
 			mask |= POLLIN | POLLRDNORM;
 	}
 	spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -2258,6 +2263,13 @@
 			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
 		err = 0;
 		break;
+	case NETLINK_CAP_ACK:
+		if (val)
+			nlk->flags |= NETLINK_F_CAP_ACK;
+		else
+			nlk->flags &= ~NETLINK_F_CAP_ACK;
+		err = 0;
+		break;
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -2332,6 +2344,16 @@
 		netlink_table_ungrab();
 		break;
 	}
+	case NETLINK_CAP_ACK:
+		if (len < sizeof(int))
+			return -EINVAL;
+		len = sizeof(int);
+		val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
+		if (put_user(len, optlen) ||
+		    put_user(val, optval))
+			return -EFAULT;
+		err = 0;
+		break;
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -2873,9 +2895,12 @@
 	struct nlmsghdr *rep;
 	struct nlmsgerr *errmsg;
 	size_t payload = sizeof(*errmsg);
+	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
 
-	/* error messages get the original request appened */
-	if (err)
+	/* Error messages get the original request appened, unless the user
+	 * requests to cap the error message.
+	 */
+	if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
 		payload += nlmsg_len(nlh);
 
 	skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
@@ -2898,7 +2923,7 @@
 			  NLMSG_ERROR, payload, 0);
 	errmsg = nlmsg_data(rep);
 	errmsg->error = err;
-	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
+	memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
 }
 EXPORT_SYMBOL(netlink_ack);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 95af2d2..943889b 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -351,6 +351,20 @@
 }
 EXPORT_SYMBOL(nci_prop_cmd);
 
+int nci_core_reset(struct nci_dev *ndev)
+{
+	return __nci_request(ndev, nci_reset_req, 0,
+			     msecs_to_jiffies(NCI_RESET_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_reset);
+
+int nci_core_init(struct nci_dev *ndev)
+{
+	return __nci_request(ndev, nci_init_req, 0,
+			     msecs_to_jiffies(NCI_INIT_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_init);
+
 static int nci_open_device(struct nci_dev *ndev)
 {
 	int rc = 0;
@@ -388,6 +402,10 @@
 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
 	}
 
+	if (ndev->ops->post_setup) {
+		rc = ndev->ops->post_setup(ndev);
+	}
+
 	if (!rc) {
 		rc = __nci_request(ndev, nci_init_complete_req, 0,
 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index af002df..609f922 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -233,7 +233,7 @@
 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
 
-	if (r == NCI_STATUS_OK)
+	if (r == NCI_STATUS_OK && skb)
 		*skb = conn_info->rx_skb;
 
 	return r;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f85f37e..853172c 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -63,6 +63,8 @@
 	[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
 				     .len = NFC_FIRMWARE_NAME_MAXSIZE },
 	[NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+	[NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1503,7 +1505,7 @@
 	u32 dev_idx, vid, subcmd;
 	u8 *data;
 	size_t data_len;
-	int i;
+	int i, err;
 
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
 	    !info->attrs[NFC_ATTR_VENDOR_ID] ||
@@ -1518,12 +1520,13 @@
 	if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
 		return -ENODEV;
 
-	data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
-	if (data) {
+	if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
+		data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
 		data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
 		if (data_len == 0)
 			return -EINVAL;
 	} else {
+		data = NULL;
 		data_len = 0;
 	}
 
@@ -1533,12 +1536,92 @@
 		if (cmd->vendor_id != vid || cmd->subcmd != subcmd)
 			continue;
 
-		return cmd->doit(dev, data, data_len);
+		dev->cur_cmd_info = info;
+		err = cmd->doit(dev, data, data_len);
+		dev->cur_cmd_info = NULL;
+		return err;
 	}
 
 	return -EOPNOTSUPP;
 }
 
+/* message building helper */
+static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+				int flags, u8 cmd)
+{
+	/* since there is no private header just add the generic one */
+	return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd);
+}
+
+static struct sk_buff *
+__nfc_alloc_vendor_cmd_skb(struct nfc_dev *dev, int approxlen,
+			   u32 portid, u32 seq,
+			   enum nfc_attrs attr,
+			   u32 oui, u32 subcmd, gfp_t gfp)
+{
+	struct sk_buff *skb;
+	void *hdr;
+
+	skb = nlmsg_new(approxlen + 100, gfp);
+	if (!skb)
+		return NULL;
+
+	hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR);
+	if (!hdr) {
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd))
+		goto nla_put_failure;
+
+	((void **)skb->cb)[0] = dev;
+	((void **)skb->cb)[1] = hdr;
+
+	return skb;
+
+nla_put_failure:
+	kfree_skb(skb);
+	return NULL;
+}
+
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+						 enum nfc_attrs attr,
+						 u32 oui, u32 subcmd,
+						 int approxlen)
+{
+	if (WARN_ON(!dev->cur_cmd_info))
+		return NULL;
+
+	return __nfc_alloc_vendor_cmd_skb(dev, approxlen,
+					  dev->cur_cmd_info->snd_portid,
+					  dev->cur_cmd_info->snd_seq, attr,
+					  oui, subcmd, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__nfc_alloc_vendor_cmd_reply_skb);
+
+int nfc_vendor_cmd_reply(struct sk_buff *skb)
+{
+	struct nfc_dev *dev = ((void **)skb->cb)[0];
+	void *hdr = ((void **)skb->cb)[1];
+
+	/* clear CB data for netlink core to own from now on */
+	memset(skb->cb, 0, sizeof(skb->cb));
+
+	if (WARN_ON(!dev->cur_cmd_info)) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	genlmsg_end(skb, hdr);
+	return genlmsg_reply(skb, dev->cur_cmd_info);
+}
+EXPORT_SYMBOL(nfc_vendor_cmd_reply);
+
 static const struct genl_ops nfc_genl_ops[] = {
 	{
 		.cmd = NFC_CMD_GET_DEVICE,
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 1584040..af7cdef 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -31,10 +31,21 @@
 
 	  If unsure, say N.
 
+config OPENVSWITCH_CONNTRACK
+	bool "Open vSwitch conntrack action support"
+	depends on OPENVSWITCH
+	depends on NF_CONNTRACK
+	default OPENVSWITCH
+	---help---
+	  If you say Y here, then Open vSwitch module will be able to pass
+	  packets through conntrack.
+
+	  Say N to exclude this support and reduce the binary size.
+
 config OPENVSWITCH_GRE
 	tristate "Open vSwitch GRE tunneling support"
 	depends on OPENVSWITCH
-	depends on NET_IPGRE_DEMUX
+	depends on NET_IPGRE
 	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create GRE
@@ -59,7 +70,7 @@
 config OPENVSWITCH_GENEVE
 	tristate "Open vSwitch Geneve tunneling support"
 	depends on OPENVSWITCH
-	depends on GENEVE_CORE
+	depends on GENEVE
 	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create geneve vport.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 91b9478..5b5913b 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,6 +15,8 @@
 	vport-internal_dev.o \
 	vport-netdev.o
 
+openvswitch-$(CONFIG_OPENVSWITCH_CONNTRACK) += conntrack.o
+
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
-obj-$(CONFIG_OPENVSWITCH_VXLAN)	+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GRE)	+= vport-gre.o
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ee34f47..315f533 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -22,6 +22,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/openvswitch.h>
+#include <linux/netfilter_ipv6.h>
 #include <linux/sctp.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
@@ -29,8 +30,10 @@
 #include <linux/if_arp.h>
 #include <linux/if_vlan.h>
 
+#include <net/dst.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/ip6_fib.h>
 #include <net/checksum.h>
 #include <net/dsfield.h>
 #include <net/mpls.h>
@@ -38,6 +41,7 @@
 
 #include "datapath.h"
 #include "flow.h"
+#include "conntrack.h"
 #include "vport.h"
 
 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
@@ -52,6 +56,20 @@
 	struct sw_flow_key pkt_key;
 };
 
+#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
+struct ovs_frag_data {
+	unsigned long dst;
+	struct vport *vport;
+	struct ovs_skb_cb cb;
+	__be16 inner_protocol;
+	__u16 vlan_tci;
+	__be16 vlan_proto;
+	unsigned int l2_len;
+	u8 l2_data[MAX_L2_LEN];
+};
+
+static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
+
 #define DEFERRED_ACTION_FIFO_SIZE 10
 struct action_fifo {
 	int head;
@@ -185,10 +203,6 @@
 	return 0;
 }
 
-/* 'KEY' must not have any bits set outside of the 'MASK' */
-#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
-#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
-
 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
 		    const __be32 *mpls_lse, const __be32 *mask)
 {
@@ -201,7 +215,7 @@
 		return err;
 
 	stack = (__be32 *)skb_mpls_header(skb);
-	lse = MASKED(*stack, *mpls_lse, *mask);
+	lse = OVS_MASKED(*stack, *mpls_lse, *mask);
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 		__be32 diff[] = { ~(*stack), lse };
 
@@ -244,9 +258,9 @@
 	const u16 *src = (const u16 *)src_;
 	const u16 *mask = (const u16 *)mask_;
 
-	SET_MASKED(dst[0], src[0], mask[0]);
-	SET_MASKED(dst[1], src[1], mask[1]);
-	SET_MASKED(dst[2], src[2], mask[2]);
+	OVS_SET_MASKED(dst[0], src[0], mask[0]);
+	OVS_SET_MASKED(dst[1], src[1], mask[1]);
+	OVS_SET_MASKED(dst[2], src[2], mask[2]);
 }
 
 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
@@ -284,14 +298,14 @@
 	if (nh->protocol == IPPROTO_TCP) {
 		if (likely(transport_len >= sizeof(struct tcphdr)))
 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
-						 addr, new_addr, 1);
+						 addr, new_addr, true);
 	} else if (nh->protocol == IPPROTO_UDP) {
 		if (likely(transport_len >= sizeof(struct udphdr))) {
 			struct udphdr *uh = udp_hdr(skb);
 
 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 				inet_proto_csum_replace4(&uh->check, skb,
-							 addr, new_addr, 1);
+							 addr, new_addr, true);
 				if (!uh->check)
 					uh->check = CSUM_MANGLED_0;
 			}
@@ -316,14 +330,14 @@
 	if (l4_proto == NEXTHDR_TCP) {
 		if (likely(transport_len >= sizeof(struct tcphdr)))
 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
-						  addr, new_addr, 1);
+						  addr, new_addr, true);
 	} else if (l4_proto == NEXTHDR_UDP) {
 		if (likely(transport_len >= sizeof(struct udphdr))) {
 			struct udphdr *uh = udp_hdr(skb);
 
 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 				inet_proto_csum_replace16(&uh->check, skb,
-							  addr, new_addr, 1);
+							  addr, new_addr, true);
 				if (!uh->check)
 					uh->check = CSUM_MANGLED_0;
 			}
@@ -331,17 +345,17 @@
 	} else if (l4_proto == NEXTHDR_ICMP) {
 		if (likely(transport_len >= sizeof(struct icmp6hdr)))
 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
-						  skb, addr, new_addr, 1);
+						  skb, addr, new_addr, true);
 	}
 }
 
 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
 			   const __be32 mask[4], __be32 masked[4])
 {
-	masked[0] = MASKED(old[0], addr[0], mask[0]);
-	masked[1] = MASKED(old[1], addr[1], mask[1]);
-	masked[2] = MASKED(old[2], addr[2], mask[2]);
-	masked[3] = MASKED(old[3], addr[3], mask[3]);
+	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
+	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
+	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
+	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
 }
 
 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
@@ -358,15 +372,15 @@
 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
 {
 	/* Bits 21-24 are always unmasked, so this retains their values. */
-	SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
-	SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
-	SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
 }
 
 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
 		       u8 mask)
 {
-	new_ttl = MASKED(nh->ttl, new_ttl, mask);
+	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
 
 	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
 	nh->ttl = new_ttl;
@@ -392,7 +406,7 @@
 	 * makes sense to check if the value actually changed.
 	 */
 	if (mask->ipv4_src) {
-		new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
+		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 
 		if (unlikely(new_addr != nh->saddr)) {
 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
@@ -400,7 +414,7 @@
 		}
 	}
 	if (mask->ipv4_dst) {
-		new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
+		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 
 		if (unlikely(new_addr != nh->daddr)) {
 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
@@ -488,7 +502,8 @@
 		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
 	}
 	if (mask->ipv6_hlimit) {
-		SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
+		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
+			       mask->ipv6_hlimit);
 		flow_key->ip.ttl = nh->hop_limit;
 	}
 	return 0;
@@ -498,7 +513,7 @@
 static void set_tp_port(struct sk_buff *skb, __be16 *port,
 			__be16 new_port, __sum16 *check)
 {
-	inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+	inet_proto_csum_replace2(check, skb, *port, new_port, false);
 	*port = new_port;
 }
 
@@ -517,8 +532,8 @@
 
 	uh = udp_hdr(skb);
 	/* Either of the masks is non-zero, so do not bother checking them. */
-	src = MASKED(uh->source, key->udp_src, mask->udp_src);
-	dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
+	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
+	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 
 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
 		if (likely(src != uh->source)) {
@@ -558,12 +573,12 @@
 		return err;
 
 	th = tcp_hdr(skb);
-	src = MASKED(th->source, key->tcp_src, mask->tcp_src);
+	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
 	if (likely(src != th->source)) {
 		set_tp_port(skb, &th->source, src, &th->check);
 		flow_key->tp.src = src;
 	}
-	dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
+	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
 	if (likely(dst != th->dest)) {
 		set_tp_port(skb, &th->dest, dst, &th->check);
 		flow_key->tp.dst = dst;
@@ -590,8 +605,8 @@
 	old_csum = sh->checksum;
 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 
-	sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
-	sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
+	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
+	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 
 	new_csum = sctp_compute_cksum(skb, sctphoff);
 
@@ -605,27 +620,159 @@
 	return 0;
 }
 
-static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
+{
+	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
+	struct vport *vport = data->vport;
+
+	if (skb_cow_head(skb, data->l2_len) < 0) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	__skb_dst_copy(skb, data->dst);
+	*OVS_CB(skb) = data->cb;
+	skb->inner_protocol = data->inner_protocol;
+	skb->vlan_tci = data->vlan_tci;
+	skb->vlan_proto = data->vlan_proto;
+
+	/* Reconstruct the MAC header.  */
+	skb_push(skb, data->l2_len);
+	memcpy(skb->data, &data->l2_data, data->l2_len);
+	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
+	skb_reset_mac_header(skb);
+
+	ovs_vport_send(vport, skb);
+	return 0;
+}
+
+static unsigned int
+ovs_dst_get_mtu(const struct dst_entry *dst)
+{
+	return dst->dev->mtu;
+}
+
+static struct dst_ops ovs_dst_ops = {
+	.family = AF_UNSPEC,
+	.mtu = ovs_dst_get_mtu,
+};
+
+/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
+ * ovs_vport_output(), which is called once per fragmented packet.
+ */
+static void prepare_frag(struct vport *vport, struct sk_buff *skb)
+{
+	unsigned int hlen = skb_network_offset(skb);
+	struct ovs_frag_data *data;
+
+	data = this_cpu_ptr(&ovs_frag_data_storage);
+	data->dst = skb->_skb_refdst;
+	data->vport = vport;
+	data->cb = *OVS_CB(skb);
+	data->inner_protocol = skb->inner_protocol;
+	data->vlan_tci = skb->vlan_tci;
+	data->vlan_proto = skb->vlan_proto;
+	data->l2_len = hlen;
+	memcpy(&data->l2_data, skb->data, hlen);
+
+	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+	skb_pull(skb, hlen);
+}
+
+static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
+			 __be16 ethertype)
+{
+	if (skb_network_offset(skb) > MAX_L2_LEN) {
+		OVS_NLERR(1, "L2 header too long to fragment");
+		return;
+	}
+
+	if (ethertype == htons(ETH_P_IP)) {
+		struct dst_entry ovs_dst;
+		unsigned long orig_dst;
+
+		prepare_frag(vport, skb);
+		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
+			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+		ovs_dst.dev = vport->dev;
+
+		orig_dst = skb->_skb_refdst;
+		skb_dst_set_noref(skb, &ovs_dst);
+		IPCB(skb)->frag_max_size = mru;
+
+		ip_do_fragment(skb->sk, skb, ovs_vport_output);
+		refdst_drop(orig_dst);
+	} else if (ethertype == htons(ETH_P_IPV6)) {
+		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+		unsigned long orig_dst;
+		struct rt6_info ovs_rt;
+
+		if (!v6ops) {
+			kfree_skb(skb);
+			return;
+		}
+
+		prepare_frag(vport, skb);
+		memset(&ovs_rt, 0, sizeof(ovs_rt));
+		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+		ovs_rt.dst.dev = vport->dev;
+
+		orig_dst = skb->_skb_refdst;
+		skb_dst_set_noref(skb, &ovs_rt.dst);
+		IP6CB(skb)->frag_max_size = mru;
+
+		v6ops->fragment(skb->sk, skb, ovs_vport_output);
+		refdst_drop(orig_dst);
+	} else {
+		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
+			  ovs_vport_name(vport), ntohs(ethertype), mru,
+			  vport->dev->mtu);
+		kfree_skb(skb);
+	}
+}
+
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+		      struct sw_flow_key *key)
 {
 	struct vport *vport = ovs_vport_rcu(dp, out_port);
 
-	if (likely(vport))
-		ovs_vport_send(vport, skb);
-	else
+	if (likely(vport)) {
+		u16 mru = OVS_CB(skb)->mru;
+
+		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
+			ovs_vport_send(vport, skb);
+		} else if (mru <= vport->dev->mtu) {
+			__be16 ethertype = key->eth.type;
+
+			if (!is_flow_key_valid(key)) {
+				if (eth_p_mpls(skb->protocol))
+					ethertype = skb->inner_protocol;
+				else
+					ethertype = vlan_get_protocol(skb);
+			}
+
+			ovs_fragment(vport, skb, mru, ethertype);
+		} else {
+			kfree_skb(skb);
+		}
+	} else {
 		kfree_skb(skb);
+	}
 }
 
 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 			    struct sw_flow_key *key, const struct nlattr *attr,
 			    const struct nlattr *actions, int actions_len)
 {
-	struct ovs_tunnel_info info;
+	struct ip_tunnel_info info;
 	struct dp_upcall_info upcall;
 	const struct nlattr *a;
 	int rem;
 
 	memset(&upcall, 0, sizeof(upcall));
 	upcall.cmd = OVS_PACKET_CMD_ACTION;
+	upcall.mru = OVS_CB(skb)->mru;
 
 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 		 a = nla_next(a, &rem)) {
@@ -646,11 +793,13 @@
 			if (vport) {
 				int err;
 
+				upcall.egress_tun_info = &info;
 				err = ovs_vport_get_egress_tun_info(vport, skb,
-								    &info);
-				if (!err)
-					upcall.egress_tun_info = &info;
+								    &upcall);
+				if (err)
+					upcall.egress_tun_info = NULL;
 			}
+
 			break;
 		}
 
@@ -677,9 +826,12 @@
 
 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 		 a = nla_next(a, &rem)) {
+		u32 probability;
+
 		switch (nla_type(a)) {
 		case OVS_SAMPLE_ATTR_PROBABILITY:
-			if (prandom_u32() >= nla_get_u32(a))
+			probability = nla_get_u32(a);
+			if (!probability || prandom_u32() > probability)
 				return 0;
 			break;
 
@@ -741,7 +893,11 @@
 {
 	/* Only tunnel set execution is supported without a mask. */
 	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
-		OVS_CB(skb)->egress_tun_info = nla_data(a);
+		struct ovs_tunnel_info *tun = nla_data(a);
+
+		skb_dst_drop(skb);
+		dst_hold((struct dst_entry *)tun->tun_dst);
+		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
 		return 0;
 	}
 
@@ -759,12 +915,13 @@
 
 	switch (nla_type(a)) {
 	case OVS_KEY_ATTR_PRIORITY:
-		SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
+		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
+			       *get_mask(a, u32 *));
 		flow_key->phy.priority = skb->priority;
 		break;
 
 	case OVS_KEY_ATTR_SKB_MARK:
-		SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
+		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
 		flow_key->phy.skb_mark = skb->mark;
 		break;
 
@@ -807,6 +964,13 @@
 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
 								    __be32 *));
 		break;
+
+	case OVS_KEY_ATTR_CT_STATE:
+	case OVS_KEY_ATTR_CT_ZONE:
+	case OVS_KEY_ATTR_CT_MARK:
+	case OVS_KEY_ATTR_CT_LABEL:
+		err = -EINVAL;
+		break;
 	}
 
 	return err;
@@ -876,7 +1040,7 @@
 			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
 
 			if (out_skb)
-				do_output(dp, out_skb, prev_port);
+				do_output(dp, out_skb, prev_port, key);
 
 			prev_port = -1;
 		}
@@ -933,6 +1097,15 @@
 		case OVS_ACTION_ATTR_SAMPLE:
 			err = sample(dp, skb, key, a, attr, len);
 			break;
+
+		case OVS_ACTION_ATTR_CT:
+			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
+					     nla_data(a));
+
+			/* Hide stolen IP fragments from user space. */
+			if (err == -EINPROGRESS)
+				return 0;
+			break;
 		}
 
 		if (unlikely(err)) {
@@ -942,7 +1115,7 @@
 	}
 
 	if (prev_port != -1)
-		do_output(dp, skb, prev_port);
+		do_output(dp, skb, prev_port, key);
 	else
 		consume_skb(skb);
 
@@ -984,7 +1157,6 @@
 	int err;
 
 	this_cpu_inc(exec_actions_level);
-	OVS_CB(skb)->egress_tun_info = NULL;
 	err = do_execute_actions(dp, skb, key,
 				 acts->actions, acts->actions_len);
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
new file mode 100644
index 0000000..e8e524a
--- /dev/null
+++ b/net/openvswitch/conntrack.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright (c) 2015 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/openvswitch.h>
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+#include "datapath.h"
+#include "conntrack.h"
+#include "flow.h"
+#include "flow_netlink.h"
+
+struct ovs_ct_len_tbl {
+	size_t maxlen;
+	size_t minlen;
+};
+
+/* Metadata mark for masked write to conntrack mark */
+struct md_mark {
+	u32 value;
+	u32 mask;
+};
+
+/* Metadata label for masked write to conntrack label. */
+struct md_label {
+	struct ovs_key_ct_label value;
+	struct ovs_key_ct_label mask;
+};
+
+/* Conntrack action context for execution. */
+struct ovs_conntrack_info {
+	struct nf_conntrack_helper *helper;
+	struct nf_conntrack_zone zone;
+	struct nf_conn *ct;
+	u32 flags;
+	u16 family;
+	struct md_mark mark;
+	struct md_label label;
+};
+
+static u16 key_to_nfproto(const struct sw_flow_key *key)
+{
+	switch (ntohs(key->eth.type)) {
+	case ETH_P_IP:
+		return NFPROTO_IPV4;
+	case ETH_P_IPV6:
+		return NFPROTO_IPV6;
+	default:
+		return NFPROTO_UNSPEC;
+	}
+}
+
+/* Map SKB connection state into the values used by flow definition. */
+static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
+{
+	u8 ct_state = OVS_CS_F_TRACKED;
+
+	switch (ctinfo) {
+	case IP_CT_ESTABLISHED_REPLY:
+	case IP_CT_RELATED_REPLY:
+	case IP_CT_NEW_REPLY:
+		ct_state |= OVS_CS_F_REPLY_DIR;
+		break;
+	default:
+		break;
+	}
+
+	switch (ctinfo) {
+	case IP_CT_ESTABLISHED:
+	case IP_CT_ESTABLISHED_REPLY:
+		ct_state |= OVS_CS_F_ESTABLISHED;
+		break;
+	case IP_CT_RELATED:
+	case IP_CT_RELATED_REPLY:
+		ct_state |= OVS_CS_F_RELATED;
+		break;
+	case IP_CT_NEW:
+	case IP_CT_NEW_REPLY:
+		ct_state |= OVS_CS_F_NEW;
+		break;
+	default:
+		break;
+	}
+
+	return ct_state;
+}
+
+static u32 ovs_ct_get_mark(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+	return ct ? ct->mark : 0;
+#else
+	return 0;
+#endif
+}
+
+static void ovs_ct_get_label(const struct nf_conn *ct,
+			     struct ovs_key_ct_label *label)
+{
+	struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
+
+	if (cl) {
+		size_t len = cl->words * sizeof(long);
+
+		if (len > OVS_CT_LABEL_LEN)
+			len = OVS_CT_LABEL_LEN;
+		else if (len < OVS_CT_LABEL_LEN)
+			memset(label, 0, OVS_CT_LABEL_LEN);
+		memcpy(label, cl->bits, len);
+	} else {
+		memset(label, 0, OVS_CT_LABEL_LEN);
+	}
+}
+
+static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
+				const struct nf_conntrack_zone *zone,
+				const struct nf_conn *ct)
+{
+	key->ct.state = state;
+	key->ct.zone = zone->id;
+	key->ct.mark = ovs_ct_get_mark(ct);
+	ovs_ct_get_label(ct, &key->ct.label);
+}
+
+/* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
+ * previously sent the packet to conntrack via the ct action.
+ */
+static void ovs_ct_update_key(const struct sk_buff *skb,
+			      struct sw_flow_key *key, bool post_ct)
+{
+	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+	u8 state = 0;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (ct) {
+		state = ovs_ct_get_state(ctinfo);
+		if (ct->master)
+			state |= OVS_CS_F_RELATED;
+		zone = nf_ct_zone(ct);
+	} else if (post_ct) {
+		state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
+	}
+	__ovs_ct_update_key(key, state, zone, ct);
+}
+
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+{
+	ovs_ct_update_key(skb, key, false);
+}
+
+int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
+{
+	if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+		return -EMSGSIZE;
+
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+	    nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, key->ct.zone))
+		return -EMSGSIZE;
+
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	    nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, key->ct.mark))
+		return -EMSGSIZE;
+
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+	    nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label),
+		    &key->ct.label))
+		return -EMSGSIZE;
+
+	return 0;
+}
+
+static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
+			   u32 ct_mark, u32 mask)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+	u32 new_mark;
+
+
+	/* The connection could be invalid, in which case set_mark is no-op. */
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 0;
+
+	new_mark = ct_mark | (ct->mark & ~(mask));
+	if (ct->mark != new_mark) {
+		ct->mark = new_mark;
+		nf_conntrack_event_cache(IPCT_MARK, ct);
+		key->ct.mark = new_mark;
+	}
+
+	return 0;
+#else
+	return -ENOTSUPP;
+#endif
+}
+
+static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
+			    const struct ovs_key_ct_label *label,
+			    const struct ovs_key_ct_label *mask)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn_labels *cl;
+	struct nf_conn *ct;
+	int err;
+
+	if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
+		return -ENOTSUPP;
+
+	/* The connection could be invalid, in which case set_label is no-op.*/
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 0;
+
+	cl = nf_ct_labels_find(ct);
+	if (!cl) {
+		nf_ct_labels_ext_add(ct);
+		cl = nf_ct_labels_find(ct);
+	}
+	if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN)
+		return -ENOSPC;
+
+	err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask,
+				    OVS_CT_LABEL_LEN / sizeof(u32));
+	if (err)
+		return err;
+
+	ovs_ct_get_label(ct, &key->ct.label);
+	return 0;
+}
+
+/* 'skb' should already be pulled to nh_ofs. */
+static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
+{
+	const struct nf_conntrack_helper *helper;
+	const struct nf_conn_help *help;
+	enum ip_conntrack_info ctinfo;
+	unsigned int protoff;
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+		return NF_ACCEPT;
+
+	help = nfct_help(ct);
+	if (!help)
+		return NF_ACCEPT;
+
+	helper = rcu_dereference(help->helper);
+	if (!helper)
+		return NF_ACCEPT;
+
+	switch (proto) {
+	case NFPROTO_IPV4:
+		protoff = ip_hdrlen(skb);
+		break;
+	case NFPROTO_IPV6: {
+		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+		__be16 frag_off;
+
+		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+					   &nexthdr, &frag_off);
+		if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+			pr_debug("proto header not found\n");
+			return NF_ACCEPT;
+		}
+		break;
+	}
+	default:
+		WARN_ONCE(1, "helper invoked on non-IP family!");
+		return NF_DROP;
+	}
+
+	return helper->help(skb, protoff, ct, ctinfo);
+}
+
+static int handle_fragments(struct net *net, struct sw_flow_key *key,
+			    u16 zone, struct sk_buff *skb)
+{
+	struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
+
+	if (key->eth.type == htons(ETH_P_IP)) {
+		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
+		int err;
+
+		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+		err = ip_defrag(skb, user);
+		if (err)
+			return err;
+
+		ovs_cb.mru = IPCB(skb)->frag_max_size;
+	} else if (key->eth.type == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+		struct sk_buff *reasm;
+
+		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+		reasm = nf_ct_frag6_gather(skb, user);
+		if (!reasm)
+			return -EINPROGRESS;
+
+		if (skb == reasm)
+			return -EINVAL;
+
+		key->ip.proto = ipv6_hdr(reasm)->nexthdr;
+		skb_morph(skb, reasm);
+		consume_skb(reasm);
+		ovs_cb.mru = IP6CB(skb)->frag_max_size;
+#else
+		return -EPFNOSUPPORT;
+#endif
+	} else {
+		return -EPFNOSUPPORT;
+	}
+
+	key->ip.frag = OVS_FRAG_TYPE_NONE;
+	skb_clear_hash(skb);
+	skb->ignore_df = 1;
+	*OVS_CB(skb) = ovs_cb;
+
+	return 0;
+}
+
+static struct nf_conntrack_expect *
+ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
+		   u16 proto, const struct sk_buff *skb)
+{
+	struct nf_conntrack_tuple tuple;
+
+	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, &tuple))
+		return NULL;
+	return __nf_ct_expect_find(net, zone, &tuple);
+}
+
+/* Determine whether skb->nfct is equal to the result of conntrack lookup. */
+static bool skb_nfct_cached(const struct net *net, const struct sk_buff *skb,
+			    const struct ovs_conntrack_info *info)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return false;
+	if (!net_eq(net, read_pnet(&ct->ct_net)))
+		return false;
+	if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
+		return false;
+	if (info->helper) {
+		struct nf_conn_help *help;
+
+		help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+		if (help && rcu_access_pointer(help->helper) != info->helper)
+			return false;
+	}
+
+	return true;
+}
+
+static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+			   const struct ovs_conntrack_info *info,
+			   struct sk_buff *skb)
+{
+	/* If we are recirculating packets to match on conntrack fields and
+	 * committing with a separate conntrack action,  then we don't need to
+	 * actually run the packet through conntrack twice unless it's for a
+	 * different zone.
+	 */
+	if (!skb_nfct_cached(net, skb, info)) {
+		struct nf_conn *tmpl = info->ct;
+
+		/* Associate skb with specified zone. */
+		if (tmpl) {
+			if (skb->nfct)
+				nf_conntrack_put(skb->nfct);
+			nf_conntrack_get(&tmpl->ct_general);
+			skb->nfct = &tmpl->ct_general;
+			skb->nfctinfo = IP_CT_NEW;
+		}
+
+		if (nf_conntrack_in(net, info->family, NF_INET_PRE_ROUTING,
+				    skb) != NF_ACCEPT)
+			return -ENOENT;
+
+		if (ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
+			WARN_ONCE(1, "helper rejected packet");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* Lookup connection and read fields into key. */
+static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
+			 const struct ovs_conntrack_info *info,
+			 struct sk_buff *skb)
+{
+	struct nf_conntrack_expect *exp;
+
+	exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
+	if (exp) {
+		u8 state;
+
+		state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
+		__ovs_ct_update_key(key, state, &info->zone, exp->master);
+	} else {
+		int err;
+
+		err = __ovs_ct_lookup(net, key, info, skb);
+		if (err)
+			return err;
+
+		ovs_ct_update_key(skb, key, true);
+	}
+
+	return 0;
+}
+
+/* Lookup connection and confirm if unconfirmed. */
+static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+			 const struct ovs_conntrack_info *info,
+			 struct sk_buff *skb)
+{
+	u8 state;
+	int err;
+
+	state = key->ct.state;
+	if (key->ct.zone == info->zone.id &&
+	    ((state & OVS_CS_F_TRACKED) && !(state & OVS_CS_F_NEW))) {
+		/* Previous lookup has shown that this connection is already
+		 * tracked and committed. Skip committing.
+		 */
+		return 0;
+	}
+
+	err = __ovs_ct_lookup(net, key, info, skb);
+	if (err)
+		return err;
+	if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+		return -EINVAL;
+
+	ovs_ct_update_key(skb, key, true);
+
+	return 0;
+}
+
+static bool label_nonzero(const struct ovs_key_ct_label *label)
+{
+	size_t i;
+
+	for (i = 0; i < sizeof(*label); i++)
+		if (label->ct_label[i])
+			return true;
+
+	return false;
+}
+
+int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+		   struct sw_flow_key *key,
+		   const struct ovs_conntrack_info *info)
+{
+	int nh_ofs;
+	int err;
+
+	/* The conntrack module expects to be working at L3. */
+	nh_ofs = skb_network_offset(skb);
+	skb_pull(skb, nh_ofs);
+
+	if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
+		err = handle_fragments(net, key, info->zone.id, skb);
+		if (err)
+			return err;
+	}
+
+	if (info->flags & OVS_CT_F_COMMIT)
+		err = ovs_ct_commit(net, key, info, skb);
+	else
+		err = ovs_ct_lookup(net, key, info, skb);
+	if (err)
+		goto err;
+
+	if (info->mark.mask) {
+		err = ovs_ct_set_mark(skb, key, info->mark.value,
+				      info->mark.mask);
+		if (err)
+			goto err;
+	}
+	if (label_nonzero(&info->label.mask))
+		err = ovs_ct_set_label(skb, key, &info->label.value,
+				       &info->label.mask);
+err:
+	skb_push(skb, nh_ofs);
+	return err;
+}
+
+static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
+			     const struct sw_flow_key *key, bool log)
+{
+	struct nf_conntrack_helper *helper;
+	struct nf_conn_help *help;
+
+	helper = nf_conntrack_helper_try_module_get(name, info->family,
+						    key->ip.proto);
+	if (!helper) {
+		OVS_NLERR(log, "Unknown helper \"%s\"", name);
+		return -EINVAL;
+	}
+
+	help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
+	if (!help) {
+		module_put(helper->me);
+		return -ENOMEM;
+	}
+
+	rcu_assign_pointer(help->helper, helper);
+	info->helper = helper;
+	return 0;
+}
+
+static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
+	[OVS_CT_ATTR_FLAGS]	= { .minlen = sizeof(u32),
+				    .maxlen = sizeof(u32) },
+	[OVS_CT_ATTR_ZONE]	= { .minlen = sizeof(u16),
+				    .maxlen = sizeof(u16) },
+	[OVS_CT_ATTR_MARK]	= { .minlen = sizeof(struct md_mark),
+				    .maxlen = sizeof(struct md_mark) },
+	[OVS_CT_ATTR_LABEL]	= { .minlen = sizeof(struct md_label),
+				    .maxlen = sizeof(struct md_label) },
+	[OVS_CT_ATTR_HELPER]	= { .minlen = 1,
+				    .maxlen = NF_CT_HELPER_NAME_LEN }
+};
+
+static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
+		    const char **helper, bool log)
+{
+	struct nlattr *a;
+	int rem;
+
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
+		int maxlen = ovs_ct_attr_lens[type].maxlen;
+		int minlen = ovs_ct_attr_lens[type].minlen;
+
+		if (type > OVS_CT_ATTR_MAX) {
+			OVS_NLERR(log,
+				  "Unknown conntrack attr (type=%d, max=%d)",
+				  type, OVS_CT_ATTR_MAX);
+			return -EINVAL;
+		}
+		if (nla_len(a) < minlen || nla_len(a) > maxlen) {
+			OVS_NLERR(log,
+				  "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
+				  type, nla_len(a), maxlen);
+			return -EINVAL;
+		}
+
+		switch (type) {
+		case OVS_CT_ATTR_FLAGS:
+			info->flags = nla_get_u32(a);
+			break;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+		case OVS_CT_ATTR_ZONE:
+			info->zone.id = nla_get_u16(a);
+			break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_MARK
+		case OVS_CT_ATTR_MARK: {
+			struct md_mark *mark = nla_data(a);
+
+			info->mark = *mark;
+			break;
+		}
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+		case OVS_CT_ATTR_LABEL: {
+			struct md_label *label = nla_data(a);
+
+			info->label = *label;
+			break;
+		}
+#endif
+		case OVS_CT_ATTR_HELPER:
+			*helper = nla_data(a);
+			if (!memchr(*helper, '\0', nla_len(a))) {
+				OVS_NLERR(log, "Invalid conntrack helper");
+				return -EINVAL;
+			}
+			break;
+		default:
+			OVS_NLERR(log, "Unknown conntrack attr (%d)",
+				  type);
+			return -EINVAL;
+		}
+	}
+
+	if (rem > 0) {
+		OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
+{
+	if (attr == OVS_KEY_ATTR_CT_STATE)
+		return true;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+	    attr == OVS_KEY_ATTR_CT_ZONE)
+		return true;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	    attr == OVS_KEY_ATTR_CT_MARK)
+		return true;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+	    attr == OVS_KEY_ATTR_CT_LABEL) {
+		struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+		return ovs_net->xt_label;
+	}
+
+	return false;
+}
+
+int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+		       const struct sw_flow_key *key,
+		       struct sw_flow_actions **sfa,  bool log)
+{
+	struct ovs_conntrack_info ct_info;
+	const char *helper = NULL;
+	u16 family;
+	int err;
+
+	family = key_to_nfproto(key);
+	if (family == NFPROTO_UNSPEC) {
+		OVS_NLERR(log, "ct family unspecified");
+		return -EINVAL;
+	}
+
+	memset(&ct_info, 0, sizeof(ct_info));
+	ct_info.family = family;
+
+	nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
+			NF_CT_DEFAULT_ZONE_DIR, 0);
+
+	err = parse_ct(attr, &ct_info, &helper, log);
+	if (err)
+		return err;
+
+	/* Set up template for tracking connections in specific zones. */
+	ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
+	if (!ct_info.ct) {
+		OVS_NLERR(log, "Failed to allocate conntrack template");
+		return -ENOMEM;
+	}
+	if (helper) {
+		err = ovs_ct_add_helper(&ct_info, helper, key, log);
+		if (err)
+			goto err_free_ct;
+	}
+
+	err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
+				 sizeof(ct_info), log);
+	if (err)
+		goto err_free_ct;
+
+	__set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+	nf_conntrack_get(&ct_info.ct->ct_general);
+	return 0;
+err_free_ct:
+	nf_conntrack_free(ct_info.ct);
+	return err;
+}
+
+int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
+			  struct sk_buff *skb)
+{
+	struct nlattr *start;
+
+	start = nla_nest_start(skb, OVS_ACTION_ATTR_CT);
+	if (!start)
+		return -EMSGSIZE;
+
+	if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags))
+		return -EMSGSIZE;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+	    nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
+		return -EMSGSIZE;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	    nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
+		    &ct_info->mark))
+		return -EMSGSIZE;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+	    nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label),
+		    &ct_info->label))
+		return -EMSGSIZE;
+	if (ct_info->helper) {
+		if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
+				   ct_info->helper->name))
+			return -EMSGSIZE;
+	}
+
+	nla_nest_end(skb, start);
+
+	return 0;
+}
+
+void ovs_ct_free_action(const struct nlattr *a)
+{
+	struct ovs_conntrack_info *ct_info = nla_data(a);
+
+	if (ct_info->helper)
+		module_put(ct_info->helper->me);
+	if (ct_info->ct)
+		nf_ct_put(ct_info->ct);
+}
+
+void ovs_ct_init(struct net *net)
+{
+	unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE;
+	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+	if (nf_connlabels_get(net, n_bits)) {
+		ovs_net->xt_label = false;
+		OVS_NLERR(true, "Failed to set connlabel length");
+	} else {
+		ovs_net->xt_label = true;
+	}
+}
+
+void ovs_ct_exit(struct net *net)
+{
+	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+	if (ovs_net->xt_label)
+		nf_connlabels_put(net);
+}
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
new file mode 100644
index 0000000..3cb3066
--- /dev/null
+++ b/net/openvswitch/conntrack.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OVS_CONNTRACK_H
+#define OVS_CONNTRACK_H 1
+
+#include "flow.h"
+
+struct ovs_conntrack_info;
+enum ovs_key_attr;
+
+#if defined(CONFIG_OPENVSWITCH_CONNTRACK)
+void ovs_ct_init(struct net *);
+void ovs_ct_exit(struct net *);
+bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
+int ovs_ct_copy_action(struct net *, const struct nlattr *,
+		       const struct sw_flow_key *, struct sw_flow_actions **,
+		       bool log);
+int ovs_ct_action_to_attr(const struct ovs_conntrack_info *, struct sk_buff *);
+
+int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
+		   const struct ovs_conntrack_info *);
+
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
+int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
+void ovs_ct_free_action(const struct nlattr *a);
+#else
+#include <linux/errno.h>
+
+static inline void ovs_ct_init(struct net *net) { }
+
+static inline void ovs_ct_exit(struct net *net) { }
+
+static inline bool ovs_ct_verify(struct net *net, int attr)
+{
+	return false;
+}
+
+static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
+				     const struct sw_flow_key *key,
+				     struct sw_flow_actions **acts, bool log)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ovs_ct_action_to_attr(const struct ovs_conntrack_info *info,
+					struct sk_buff *skb)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+				 struct sw_flow_key *key,
+				 const struct ovs_conntrack_info *info)
+{
+	return -ENOTSUPP;
+}
+
+static inline void ovs_ct_fill_key(const struct sk_buff *skb,
+				   struct sw_flow_key *key)
+{
+	key->ct.state = 0;
+	key->ct.zone = 0;
+	key->ct.mark = 0;
+	memset(&key->ct.label, 0, sizeof(key->ct.label));
+}
+
+static inline int ovs_ct_put_key(const struct sw_flow_key *key,
+				 struct sk_buff *skb)
+{
+	return 0;
+}
+
+static inline void ovs_ct_free_action(const struct nlattr *a) { }
+#endif
+#endif /* ovs_conntrack.h */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ff8c4a4..6fbd2de 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -176,7 +176,7 @@
 const char *ovs_dp_name(const struct datapath *dp)
 {
 	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
-	return vport->ops->get_name(vport);
+	return ovs_vport_name(vport);
 }
 
 static int get_dpifindex(const struct datapath *dp)
@@ -188,7 +188,7 @@
 
 	local = ovs_vport_rcu(dp, OVSP_LOCAL);
 	if (local)
-		ifindex = netdev_vport_priv(local)->dev->ifindex;
+		ifindex = local->dev->ifindex;
 	else
 		ifindex = 0;
 
@@ -275,6 +275,7 @@
 		memset(&upcall, 0, sizeof(upcall));
 		upcall.cmd = OVS_PACKET_CMD_MISS;
 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
+		upcall.mru = OVS_CB(skb)->mru;
 		error = ovs_dp_upcall(dp, skb, key, &upcall);
 		if (unlikely(error))
 			kfree_skb(skb);
@@ -400,9 +401,23 @@
 	if (upcall_info->actions_len)
 		size += nla_total_size(upcall_info->actions_len);
 
+	/* OVS_PACKET_ATTR_MRU */
+	if (upcall_info->mru)
+		size += nla_total_size(sizeof(upcall_info->mru));
+
 	return size;
 }
 
+static void pad_packet(struct datapath *dp, struct sk_buff *skb)
+{
+	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
+		size_t plen = NLA_ALIGN(skb->len) - skb->len;
+
+		if (plen > 0)
+			memset(skb_put(skb, plen), 0, plen);
+	}
+}
+
 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 				  const struct sw_flow_key *key,
 				  const struct dp_upcall_info *upcall_info)
@@ -476,7 +491,8 @@
 	if (upcall_info->egress_tun_info) {
 		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
 		err = ovs_nla_put_egress_tunnel_key(user_skb,
-						    upcall_info->egress_tun_info);
+						    upcall_info->egress_tun_info,
+						    upcall_info->egress_tun_opts);
 		BUG_ON(err);
 		nla_nest_end(user_skb, nla);
 	}
@@ -492,6 +508,16 @@
 			nla_nest_cancel(user_skb, nla);
 	}
 
+	/* Add OVS_PACKET_ATTR_MRU */
+	if (upcall_info->mru) {
+		if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
+				upcall_info->mru)) {
+			err = -ENOBUFS;
+			goto out;
+		}
+		pad_packet(dp, user_skb);
+	}
+
 	/* Only reserve room for attribute header, packet data is added
 	 * in skb_zerocopy() */
 	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -505,12 +531,7 @@
 		goto out;
 
 	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
-	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
-		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
-
-		if (plen > 0)
-			memset(skb_put(user_skb, plen), 0, plen);
-	}
+	pad_packet(dp, user_skb);
 
 	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 
@@ -527,6 +548,7 @@
 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
 {
 	struct ovs_header *ovs_header = info->userhdr;
+	struct net *net = sock_net(skb->sk);
 	struct nlattr **a = info->attrs;
 	struct sw_flow_actions *acts;
 	struct sk_buff *packet;
@@ -535,6 +557,7 @@
 	struct datapath *dp;
 	struct ethhdr *eth;
 	struct vport *input_vport;
+	u16 mru = 0;
 	int len;
 	int err;
 	bool log = !a[OVS_PACKET_ATTR_PROBE];
@@ -564,29 +587,35 @@
 	else
 		packet->protocol = htons(ETH_P_802_2);
 
+	/* Set packet's mru */
+	if (a[OVS_PACKET_ATTR_MRU]) {
+		mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
+		packet->ignore_df = 1;
+	}
+	OVS_CB(packet)->mru = mru;
+
 	/* Build an sw_flow for sending this packet. */
 	flow = ovs_flow_alloc();
 	err = PTR_ERR(flow);
 	if (IS_ERR(flow))
 		goto err_kfree_skb;
 
-	err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
-					     &flow->key, log);
+	err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
+					     packet, &flow->key, log);
 	if (err)
 		goto err_flow_free;
 
-	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
+	err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
 				   &flow->key, &acts, log);
 	if (err)
 		goto err_flow_free;
 
 	rcu_assign_pointer(flow->sf_acts, acts);
-	OVS_CB(packet)->egress_tun_info = NULL;
 	packet->priority = flow->key.phy.priority;
 	packet->mark = flow->key.phy.skb_mark;
 
 	rcu_read_lock();
-	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp_rcu(net, ovs_header->dp_ifindex);
 	err = -ENODEV;
 	if (!dp)
 		goto err_unlock;
@@ -598,6 +627,7 @@
 	if (!input_vport)
 		goto err_unlock;
 
+	packet->dev = input_vport->dev;
 	OVS_CB(packet)->input_vport = input_vport;
 	sf_acts = rcu_dereference(flow->sf_acts);
 
@@ -624,6 +654,7 @@
 	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
 	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
 	[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
+	[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
@@ -713,7 +744,7 @@
 
 	/* OVS_FLOW_ATTR_ACTIONS */
 	if (should_fill_actions(ufid_flags))
-		len += nla_total_size(acts->actions_len);
+		len += nla_total_size(acts->orig_len);
 
 	return len
 		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
@@ -880,6 +911,7 @@
 
 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
 {
+	struct net *net = sock_net(skb->sk);
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
 	struct sw_flow *flow = NULL, *new_flow;
@@ -915,7 +947,7 @@
 
 	/* Extract key. */
 	ovs_match_init(&match, &key, &mask);
-	error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
 				  a[OVS_FLOW_ATTR_MASK], log);
 	if (error)
 		goto err_kfree_flow;
@@ -929,8 +961,8 @@
 		goto err_kfree_flow;
 
 	/* Validate actions. */
-	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
-				     &acts, log);
+	error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+				     &new_flow->key, &acts, log);
 	if (error) {
 		OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
 		goto err_kfree_flow;
@@ -944,7 +976,7 @@
 	}
 
 	ovs_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp(net, ovs_header->dp_ifindex);
 	if (unlikely(!dp)) {
 		error = -ENODEV;
 		goto err_unlock_ovs;
@@ -1018,7 +1050,7 @@
 		}
 		ovs_unlock();
 
-		ovs_nla_free_flow_actions(old_acts);
+		ovs_nla_free_flow_actions_rcu(old_acts);
 		ovs_flow_free(new_flow, false);
 	}
 
@@ -1030,7 +1062,7 @@
 	ovs_unlock();
 	kfree_skb(reply);
 err_kfree_acts:
-	kfree(acts);
+	ovs_nla_free_flow_actions(acts);
 err_kfree_flow:
 	ovs_flow_free(new_flow, false);
 error:
@@ -1038,7 +1070,8 @@
 }
 
 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
-static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
+static struct sw_flow_actions *get_flow_actions(struct net *net,
+						const struct nlattr *a,
 						const struct sw_flow_key *key,
 						const struct sw_flow_mask *mask,
 						bool log)
@@ -1048,7 +1081,7 @@
 	int error;
 
 	ovs_flow_mask_key(&masked_key, key, mask);
-	error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
+	error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
 	if (error) {
 		OVS_NLERR(log,
 			  "Actions may not be safe on all matching packets");
@@ -1060,6 +1093,7 @@
 
 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
 {
+	struct net *net = sock_net(skb->sk);
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
 	struct sw_flow_key key;
@@ -1084,15 +1118,15 @@
 
 	ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
 	ovs_match_init(&match, &key, &mask);
-	error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
 				  a[OVS_FLOW_ATTR_MASK], log);
 	if (error)
 		goto error;
 
 	/* Validate actions. */
 	if (a[OVS_FLOW_ATTR_ACTIONS]) {
-		acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
-					log);
+		acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
+					&mask, log);
 		if (IS_ERR(acts)) {
 			error = PTR_ERR(acts);
 			goto error;
@@ -1108,7 +1142,7 @@
 	}
 
 	ovs_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp(net, ovs_header->dp_ifindex);
 	if (unlikely(!dp)) {
 		error = -ENODEV;
 		goto err_unlock_ovs;
@@ -1157,7 +1191,7 @@
 	if (reply)
 		ovs_notify(&dp_flow_genl_family, reply, info);
 	if (old_acts)
-		ovs_nla_free_flow_actions(old_acts);
+		ovs_nla_free_flow_actions_rcu(old_acts);
 
 	return 0;
 
@@ -1165,7 +1199,7 @@
 	ovs_unlock();
 	kfree_skb(reply);
 err_kfree_acts:
-	kfree(acts);
+	ovs_nla_free_flow_actions(acts);
 error:
 	return error;
 }
@@ -1174,6 +1208,7 @@
 {
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
+	struct net *net = sock_net(skb->sk);
 	struct sw_flow_key key;
 	struct sk_buff *reply;
 	struct sw_flow *flow;
@@ -1188,7 +1223,7 @@
 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
 	if (a[OVS_FLOW_ATTR_KEY]) {
 		ovs_match_init(&match, &key, NULL);
-		err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
 					log);
 	} else if (!ufid_present) {
 		OVS_NLERR(log,
@@ -1232,6 +1267,7 @@
 {
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
+	struct net *net = sock_net(skb->sk);
 	struct sw_flow_key key;
 	struct sk_buff *reply;
 	struct sw_flow *flow = NULL;
@@ -1246,8 +1282,8 @@
 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
 	if (a[OVS_FLOW_ATTR_KEY]) {
 		ovs_match_init(&match, &key, NULL);
-		err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
-					log);
+		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+					NULL, log);
 		if (unlikely(err))
 			return err;
 	}
@@ -1800,7 +1836,7 @@
 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
 	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
-			   vport->ops->get_name(vport)))
+			   ovs_vport_name(vport)))
 		goto nla_put_failure;
 
 	ovs_vport_get_stats(vport, &vport_stats);
@@ -2203,6 +2239,7 @@
 
 	INIT_LIST_HEAD(&ovs_net->dps);
 	INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
+	ovs_ct_init(net);
 	return 0;
 }
 
@@ -2219,13 +2256,10 @@
 			struct vport *vport;
 
 			hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
-				struct netdev_vport *netdev_vport;
-
 				if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
 					continue;
 
-				netdev_vport = netdev_vport_priv(vport);
-				if (dev_net(netdev_vport->dev) == dnet)
+				if (dev_net(vport->dev) == dnet)
 					list_add(&vport->detach_list, head);
 			}
 		}
@@ -2240,6 +2274,7 @@
 	struct net *net;
 	LIST_HEAD(head);
 
+	ovs_ct_exit(dnet);
 	ovs_lock();
 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
 		__dp_destroy(dp);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index cd691e9..f88038a 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -25,10 +25,11 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/u64_stats_sync.h>
+#include <net/ip_tunnels.h>
 
+#include "conntrack.h"
 #include "flow.h"
 #include "flow_table.h"
-#include "vport.h"
 
 #define DP_MAX_PORTS           USHRT_MAX
 #define DP_VPORT_HASH_BUCKETS  1024
@@ -92,14 +93,14 @@
 
 /**
  * struct ovs_skb_cb - OVS data in skb CB
- * @egress_tun_key: Tunnel information about this packet on egress path.
- * NULL if the packet is not being tunneled.
  * @input_vport: The original vport packet came in on. This value is cached
  * when a packet is received by OVS.
+ * @mru: The maximum received fragement size; 0 if the packet is not
+ * fragmented.
  */
 struct ovs_skb_cb {
-	struct ovs_tunnel_info  *egress_tun_info;
 	struct vport		*input_vport;
+	u16			mru;
 };
 #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
 
@@ -112,14 +113,17 @@
  * then no packet is sent and the packet is accounted in the datapath's @n_lost
  * counter.
  * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
+ * @mru: If not zero, Maximum received IP fragment size.
  */
 struct dp_upcall_info {
-	const struct ovs_tunnel_info *egress_tun_info;
+	struct ip_tunnel_info *egress_tun_info;
+	const void *egress_tun_opts;
 	const struct nlattr *userdata;
 	const struct nlattr *actions;
 	int actions_len;
 	u32 portid;
 	u8 cmd;
+	u16 mru;
 };
 
 /**
@@ -130,7 +134,9 @@
 struct ovs_net {
 	struct list_head dps;
 	struct work_struct dp_notify_work;
-	struct vport_net vport_net;
+
+	/* Module reference for configuring conntrack. */
+	bool xt_label;
 };
 
 extern int ovs_net_id;
@@ -199,6 +205,10 @@
 int action_fifos_init(void);
 void action_fifos_exit(void);
 
+/* 'KEY' must not have any bits set outside of the 'MASK' */
+#define OVS_MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
+#define OVS_SET_MASKED(OLD, KEY, MASK) ((OLD) = OVS_MASKED(OLD, KEY, MASK))
+
 #define OVS_NLERR(logging_allowed, fmt, ...)			\
 do {								\
 	if (logging_allowed && net_ratelimit())			\
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 2c631fe..a7a80a6 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,13 +58,10 @@
 			struct hlist_node *n;
 
 			hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
-				struct netdev_vport *netdev_vport;
-
 				if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
 					continue;
 
-				netdev_vport = netdev_vport_priv(vport);
-				if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+				if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
 					dp_detach_port_notify(vport);
 			}
 		}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index bc7b0ab..c8db44a 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -46,9 +46,11 @@
 #include <net/mpls.h>
 #include <net/ndisc.h>
 
+#include "conntrack.h"
 #include "datapath.h"
 #include "flow.h"
 #include "flow_netlink.h"
+#include "vport.h"
 
 u64 ovs_flow_used_time(unsigned long flow_jiffies)
 {
@@ -271,8 +273,6 @@
 	key->ipv6.addr.dst = nh->daddr;
 
 	payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
-	if (unlikely(payload_ofs < 0))
-		return -EINVAL;
 
 	if (frag_off) {
 		if (frag_off & htons(~0x7))
@@ -283,6 +283,13 @@
 		key->ip.frag = OVS_FRAG_TYPE_NONE;
 	}
 
+	/* Delayed handling of error in ipv6_skip_exthdr() as it
+	 * always sets frag_off to a valid value which may be
+	 * used to set key->ip.frag above.
+	 */
+	if (unlikely(payload_ofs < 0))
+		return -EPROTO;
+
 	nh_len = payload_ofs - nh_ofs;
 	skb_set_transport_header(skb, nh_ofs + nh_len);
 	key->ip.proto = nexthdr;
@@ -622,12 +629,16 @@
 
 		nh_len = parse_ipv6hdr(skb, key);
 		if (unlikely(nh_len < 0)) {
-			memset(&key->ip, 0, sizeof(key->ip));
-			memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
-			if (nh_len == -EINVAL) {
+			switch (nh_len) {
+			case -EINVAL:
+				memset(&key->ip, 0, sizeof(key->ip));
+				memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
+				/* fall-through */
+			case -EPROTO:
 				skb->transport_header = skb->network_header;
 				error = 0;
-			} else {
+				break;
+			default:
 				error = nh_len;
 			}
 			return error;
@@ -682,19 +693,22 @@
 	return key_extract(skb, key);
 }
 
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 			 struct sk_buff *skb, struct sw_flow_key *key)
 {
 	/* Extract metadata from packet. */
 	if (tun_info) {
-		memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+		if (ip_tunnel_info_af(tun_info) != AF_INET)
+			return -EINVAL;
+		memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
 
-		if (tun_info->options) {
+		if (tun_info->options_len) {
 			BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
 						   8)) - 1
 					> sizeof(key->tun_opts));
-			memcpy(TUN_METADATA_OPTS(key, tun_info->options_len),
-			       tun_info->options, tun_info->options_len);
+
+			ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
+						tun_info);
 			key->tun_opts_len = tun_info->options_len;
 		} else {
 			key->tun_opts_len = 0;
@@ -707,13 +721,14 @@
 	key->phy.priority = skb->priority;
 	key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
 	key->phy.skb_mark = skb->mark;
+	ovs_ct_fill_key(skb, key);
 	key->ovs_flow_hash = 0;
 	key->recirc_id = 0;
 
 	return key_extract(skb, key);
 }
 
-int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
 				   struct sk_buff *skb,
 				   struct sw_flow_key *key, bool log)
 {
@@ -722,7 +737,7 @@
 	memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
 
 	/* Extract metadata from netlink attributes. */
-	err = ovs_nla_get_flow_metadata(attr, key, log);
+	err = ovs_nla_get_flow_metadata(net, attr, key, log);
 	if (err)
 		return err;
 
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a076e44..fe527d2 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -32,31 +32,11 @@
 #include <linux/time.h>
 #include <linux/flex_array.h>
 #include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_metadata.h>
 
 struct sk_buff;
 
-/* Used to memset ovs_key_ipv4_tunnel padding. */
-#define OVS_TUNNEL_KEY_SIZE					\
-	(offsetof(struct ovs_key_ipv4_tunnel, tp_dst) +		\
-	 FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
-
-struct ovs_key_ipv4_tunnel {
-	__be64 tun_id;
-	__be32 ipv4_src;
-	__be32 ipv4_dst;
-	__be16 tun_flags;
-	u8   ipv4_tos;
-	u8   ipv4_ttl;
-	__be16 tp_src;
-	__be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
-
-struct ovs_tunnel_info {
-	struct ovs_key_ipv4_tunnel tunnel;
-	const void *options;
-	u8 options_len;
-};
-
 /* Store options at the end of the array if they are less than the
  * maximum size. This allows us to get the benefits of variable length
  * matching for small options.
@@ -66,54 +46,9 @@
 #define TUN_METADATA_OPTS(flow_key, opt_len) \
 	((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
 
-static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-					    __be32 saddr, __be32 daddr,
-					    u8 tos, u8 ttl,
-					    __be16 tp_src,
-					    __be16 tp_dst,
-					    __be64 tun_id,
-					    __be16 tun_flags,
-					    const void *opts,
-					    u8 opts_len)
-{
-	tun_info->tunnel.tun_id = tun_id;
-	tun_info->tunnel.ipv4_src = saddr;
-	tun_info->tunnel.ipv4_dst = daddr;
-	tun_info->tunnel.ipv4_tos = tos;
-	tun_info->tunnel.ipv4_ttl = ttl;
-	tun_info->tunnel.tun_flags = tun_flags;
-
-	/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
-	 * the upper tunnel are used.
-	 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
-	 */
-	tun_info->tunnel.tp_src = tp_src;
-	tun_info->tunnel.tp_dst = tp_dst;
-
-	/* Clear struct padding. */
-	if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
-		memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
-		       0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
-
-	tun_info->options = opts;
-	tun_info->options_len = opts_len;
-}
-
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-					  const struct iphdr *iph,
-					  __be16 tp_src,
-					  __be16 tp_dst,
-					  __be64 tun_id,
-					  __be16 tun_flags,
-					  const void *opts,
-					  u8 opts_len)
-{
-	__ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
-				 iph->tos, iph->ttl,
-				 tp_src, tp_dst,
-				 tun_id, tun_flags,
-				 opts, opts_len);
-}
+struct ovs_tunnel_info {
+	struct metadata_dst	*tun_dst;
+};
 
 #define OVS_SW_FLOW_KEY_METADATA_SIZE			\
 	(offsetof(struct sw_flow_key, recirc_id) +	\
@@ -122,7 +57,7 @@
 struct sw_flow_key {
 	u8 tun_opts[255];
 	u8 tun_opts_len;
-	struct ovs_key_ipv4_tunnel tun_key;  /* Encapsulating tunnel key. */
+	struct ip_tunnel_key tun_key;	/* Encapsulating tunnel key. */
 	struct {
 		u32	priority;	/* Packet QoS priority. */
 		u32	skb_mark;	/* SKB mark. */
@@ -176,6 +111,14 @@
 			} nd;
 		} ipv6;
 	};
+	struct {
+		/* Connection tracking fields. */
+		u16 zone;
+		u32 mark;
+		u8 state;
+		struct ovs_key_ct_label label;
+	} ct;
+
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
 
 struct sw_flow_key_range {
@@ -209,6 +152,7 @@
 
 struct sw_flow_actions {
 	struct rcu_head rcu;
+	size_t orig_len;	/* From flow_cmd_new netlink actions size */
 	u32 actions_len;
 	struct nlattr actions[];
 };
@@ -273,11 +217,11 @@
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 			 struct sk_buff *skb,
 			 struct sw_flow_key *key);
 /* Extract key from packet coming from userspace. */
-int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
 				   struct sk_buff *skb,
 				   struct sw_flow_key *key, bool log);
 
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624e41c..c92d6a2 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -47,9 +47,9 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/mpls.h>
+#include <net/vxlan.h>
 
 #include "flow_netlink.h"
-#include "vport-vxlan.h"
 
 struct ovs_len_tbl {
 	int len;
@@ -281,7 +281,7 @@
 	/* Whenever adding new OVS_KEY_ FIELDS, we should consider
 	 * updating this function.
 	 */
-	BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
+	BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26);
 
 	return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
 		+ nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
@@ -290,6 +290,10 @@
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
+		+ nla_total_size(1)   /* OVS_KEY_ATTR_CT_STATE */
+		+ nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
+		+ nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABEL */
 		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
 		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@ -339,6 +343,10 @@
 	[OVS_KEY_ATTR_TUNNEL]	 = { .len = OVS_ATTR_NESTED,
 				     .next = ovs_tunnel_key_lens, },
 	[OVS_KEY_ATTR_MPLS]	 = { .len = sizeof(struct ovs_key_mpls) },
+	[OVS_KEY_ATTR_CT_STATE]	 = { .len = sizeof(u8) },
+	[OVS_KEY_ATTR_CT_ZONE]	 = { .len = sizeof(u16) },
+	[OVS_KEY_ATTR_CT_MARK]	 = { .len = sizeof(u32) },
+	[OVS_KEY_ATTR_CT_LABEL]	 = { .len = sizeof(struct ovs_key_ct_label) },
 };
 
 static bool is_all_zero(const u8 *fp, size_t size)
@@ -475,7 +483,7 @@
 {
 	struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
 	unsigned long opt_key_offset;
-	struct ovs_vxlan_opts opts;
+	struct vxlan_metadata opts;
 	int err;
 
 	BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
@@ -534,19 +542,19 @@
 			tun_flags |= TUNNEL_KEY;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
 					nla_get_in_addr(a), is_mask);
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
 					nla_get_in_addr(a), is_mask);
 			break;
 		case OVS_TUNNEL_KEY_ATTR_TOS:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+			SW_FLOW_KEY_PUT(match, tun_key.tos,
 					nla_get_u8(a), is_mask);
 			break;
 		case OVS_TUNNEL_KEY_ATTR_TTL:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+			SW_FLOW_KEY_PUT(match, tun_key.ttl,
 					nla_get_u8(a), is_mask);
 			ttl = true;
 			break;
@@ -609,7 +617,7 @@
 	}
 
 	if (!is_mask) {
-		if (!match->key->tun_key.ipv4_dst) {
+		if (!match->key->tun_key.u.ipv4.dst) {
 			OVS_NLERR(log, "IPv4 tunnel dst address is zero");
 			return -EINVAL;
 		}
@@ -626,7 +634,7 @@
 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
 			       const void *tun_opts, int swkey_tun_opts_len)
 {
-	const struct ovs_vxlan_opts *opts = tun_opts;
+	const struct vxlan_metadata *opts = tun_opts;
 	struct nlattr *nla;
 
 	nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
@@ -641,24 +649,24 @@
 }
 
 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
-				const struct ovs_key_ipv4_tunnel *output,
+				const struct ip_tunnel_key *output,
 				const void *tun_opts, int swkey_tun_opts_len)
 {
 	if (output->tun_flags & TUNNEL_KEY &&
 	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
 		return -EMSGSIZE;
-	if (output->ipv4_src &&
+	if (output->u.ipv4.src &&
 	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
-			    output->ipv4_src))
+			    output->u.ipv4.src))
 		return -EMSGSIZE;
-	if (output->ipv4_dst &&
+	if (output->u.ipv4.dst &&
 	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
-			    output->ipv4_dst))
+			    output->u.ipv4.dst))
 		return -EMSGSIZE;
-	if (output->ipv4_tos &&
-	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+	if (output->tos &&
+	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
 		return -EMSGSIZE;
-	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
 		return -EMSGSIZE;
 	if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
 	    nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
@@ -689,7 +697,7 @@
 }
 
 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
-			      const struct ovs_key_ipv4_tunnel *output,
+			      const struct ip_tunnel_key *output,
 			      const void *tun_opts, int swkey_tun_opts_len)
 {
 	struct nlattr *nla;
@@ -708,16 +716,17 @@
 }
 
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-				  const struct ovs_tunnel_info *egress_tun_info)
+				  const struct ip_tunnel_info *egress_tun_info,
+				  const void *egress_tun_opts)
 {
-	return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
-				    egress_tun_info->options,
+	return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
+				    egress_tun_opts,
 				    egress_tun_info->options_len);
 }
 
-static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
-				 const struct nlattr **a, bool is_mask,
-				 bool log)
+static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
+				 u64 *attrs, const struct nlattr **a,
+				 bool is_mask, bool log)
 {
 	if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
 		u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
@@ -768,16 +777,47 @@
 			return -EINVAL;
 		*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
 	}
+
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
+		u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]);
+
+		SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
+		u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
+
+		SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
+		u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
+
+		SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) {
+		const struct ovs_key_ct_label *cl;
+
+		cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]);
+		SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label,
+				   sizeof(*cl), is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL);
+	}
 	return 0;
 }
 
-static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
-				const struct nlattr **a, bool is_mask,
-				bool log)
+static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
+				u64 attrs, const struct nlattr **a,
+				bool is_mask, bool log)
 {
 	int err;
 
-	err = metadata_from_nlattrs(match, &attrs, a, is_mask, log);
+	err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
 	if (err)
 		return err;
 
@@ -1029,6 +1069,7 @@
  * mask. In case the 'mask' is NULL, the flow is treated as exact match
  * flow. Otherwise, it is treated as a wildcarded flow, except the mask
  * does not include any don't care bit.
+ * @net: Used to determine per-namespace field support.
  * @match: receives the extracted flow match information.
  * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
  * sequence. The fields should of the packet that triggered the creation
@@ -1039,7 +1080,7 @@
  * probing for feature compatibility this should be passed in as false to
  * suppress unnecessary error logging.
  */
-int ovs_nla_get_match(struct sw_flow_match *match,
+int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
 		      const struct nlattr *nla_key,
 		      const struct nlattr *nla_mask,
 		      bool log)
@@ -1089,7 +1130,7 @@
 		}
 	}
 
-	err = ovs_key_from_nlattrs(match, key_attrs, a, false, log);
+	err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
 	if (err)
 		return err;
 
@@ -1116,7 +1157,7 @@
 			/* The userspace does not send tunnel attributes that
 			 * are 0, but we should not wildcard them nonetheless.
 			 */
-			if (match->key->tun_key.ipv4_dst)
+			if (match->key->tun_key.u.ipv4.dst)
 				SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
 							 0xff, true);
 
@@ -1169,7 +1210,8 @@
 			}
 		}
 
-		err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log);
+		err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
+					   log);
 		if (err)
 			goto free_newmask;
 	}
@@ -1250,7 +1292,7 @@
  * extracted from the packet itself.
  */
 
-int ovs_nla_get_flow_metadata(const struct nlattr *attr,
+int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
 			      struct sw_flow_key *key,
 			      bool log)
 {
@@ -1266,9 +1308,10 @@
 	memset(&match, 0, sizeof(match));
 	match.key = key;
 
+	memset(&key->ct, 0, sizeof(key->ct));
 	key->phy.in_port = DP_MAX_PORTS;
 
-	return metadata_from_nlattrs(&match, &attrs, a, false, log);
+	return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
 }
 
 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
@@ -1287,7 +1330,7 @@
 	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
 		goto nla_put_failure;
 
-	if ((swkey->tun_key.ipv4_dst || is_mask)) {
+	if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
 		const void *opts = NULL;
 
 		if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
@@ -1314,6 +1357,9 @@
 	if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
 		goto nla_put_failure;
 
+	if (ovs_ct_put_key(output, skb))
+		goto nla_put_failure;
+
 	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
 	if (!nla)
 		goto nla_put_failure;
@@ -1548,11 +1594,51 @@
 	return sfa;
 }
 
-/* Schedules 'sf_acts' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
+static void ovs_nla_free_set_action(const struct nlattr *a)
+{
+	const struct nlattr *ovs_key = nla_data(a);
+	struct ovs_tunnel_info *ovs_tun;
+
+	switch (nla_type(ovs_key)) {
+	case OVS_KEY_ATTR_TUNNEL_INFO:
+		ovs_tun = nla_data(ovs_key);
+		dst_release((struct dst_entry *)ovs_tun->tun_dst);
+		break;
+	}
+}
+
 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
 {
-	kfree_rcu(sf_acts, rcu);
+	const struct nlattr *a;
+	int rem;
+
+	if (!sf_acts)
+		return;
+
+	nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+		switch (nla_type(a)) {
+		case OVS_ACTION_ATTR_SET:
+			ovs_nla_free_set_action(a);
+			break;
+		case OVS_ACTION_ATTR_CT:
+			ovs_ct_free_action(a);
+			break;
+		}
+	}
+
+	kfree(sf_acts);
+}
+
+static void __ovs_nla_free_flow_actions(struct rcu_head *head)
+{
+	ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
+{
+	call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
 }
 
 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
@@ -1582,6 +1668,7 @@
 
 	memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
 	acts->actions_len = (*sfa)->actions_len;
+	acts->orig_len = (*sfa)->orig_len;
 	kfree(*sfa);
 	*sfa = acts;
 
@@ -1609,8 +1696,8 @@
 	return a;
 }
 
-static int add_action(struct sw_flow_actions **sfa, int attrtype,
-		      void *data, int len, bool log)
+int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
+		       int len, bool log)
 {
 	struct nlattr *a;
 
@@ -1625,7 +1712,7 @@
 	int used = (*sfa)->actions_len;
 	int err;
 
-	err = add_action(sfa, attrtype, NULL, 0, log);
+	err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
 	if (err)
 		return err;
 
@@ -1641,12 +1728,12 @@
 	a->nla_len = sfa->actions_len - st_offset;
 }
 
-static int __ovs_nla_copy_actions(const struct nlattr *attr,
+static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 				  const struct sw_flow_key *key,
 				  int depth, struct sw_flow_actions **sfa,
 				  __be16 eth_type, __be16 vlan_tci, bool log);
 
-static int validate_and_copy_sample(const struct nlattr *attr,
+static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
 				    const struct sw_flow_key *key, int depth,
 				    struct sw_flow_actions **sfa,
 				    __be16 eth_type, __be16 vlan_tci, bool log)
@@ -1678,15 +1765,15 @@
 	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
 	if (start < 0)
 		return start;
-	err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
-			 nla_data(probability), sizeof(u32), log);
+	err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
+				 nla_data(probability), sizeof(u32), log);
 	if (err)
 		return err;
 	st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
 	if (st_acts < 0)
 		return st_acts;
 
-	err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
+	err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa,
 				     eth_type, vlan_tci, log);
 	if (err)
 		return err;
@@ -1746,7 +1833,9 @@
 {
 	struct sw_flow_match match;
 	struct sw_flow_key key;
-	struct ovs_tunnel_info *tun_info;
+	struct metadata_dst *tun_dst;
+	struct ip_tunnel_info *tun_info;
+	struct ovs_tunnel_info *ovs_tun;
 	struct nlattr *a;
 	int err = 0, start, opts_type;
 
@@ -1771,27 +1860,31 @@
 	if (start < 0)
 		return start;
 
+	tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
+	if (!tun_dst)
+		return -ENOMEM;
+
 	a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
-			 sizeof(*tun_info) + key.tun_opts_len, log);
-	if (IS_ERR(a))
+			 sizeof(*ovs_tun), log);
+	if (IS_ERR(a)) {
+		dst_release((struct dst_entry *)tun_dst);
 		return PTR_ERR(a);
-
-	tun_info = nla_data(a);
-	tun_info->tunnel = key.tun_key;
-	tun_info->options_len = key.tun_opts_len;
-
-	if (tun_info->options_len) {
-		/* We need to store the options in the action itself since
-		 * everything else will go away after flow setup. We can append
-		 * it to tun_info and then point there.
-		 */
-		memcpy((tun_info + 1),
-		       TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
-		tun_info->options = (tun_info + 1);
-	} else {
-		tun_info->options = NULL;
 	}
 
+	ovs_tun = nla_data(a);
+	ovs_tun->tun_dst = tun_dst;
+
+	tun_info = &tun_dst->u.tun_info;
+	tun_info->mode = IP_TUNNEL_INFO_TX;
+	tun_info->key = key.tun_key;
+
+	/* We need to store the options in the action itself since
+	 * everything else will go away after flow setup. We can append
+	 * it to tun_info and then point there.
+	 */
+	ip_tunnel_info_opts_set(tun_info,
+				TUN_METADATA_OPTS(&key, key.tun_opts_len),
+				key.tun_opts_len);
 	add_nested_action_end(*sfa, start);
 
 	return err;
@@ -1843,6 +1936,8 @@
 
 	case OVS_KEY_ATTR_PRIORITY:
 	case OVS_KEY_ATTR_SKB_MARK:
+	case OVS_KEY_ATTR_CT_MARK:
+	case OVS_KEY_ATTR_CT_LABEL:
 	case OVS_KEY_ATTR_ETHERNET:
 		break;
 
@@ -2008,7 +2103,7 @@
 	return 0;
 }
 
-static int __ovs_nla_copy_actions(const struct nlattr *attr,
+static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 				  const struct sw_flow_key *key,
 				  int depth, struct sw_flow_actions **sfa,
 				  __be16 eth_type, __be16 vlan_tci, bool log)
@@ -2032,7 +2127,8 @@
 			[OVS_ACTION_ATTR_SET] = (u32)-1,
 			[OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
 			[OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
-			[OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
+			[OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
+			[OVS_ACTION_ATTR_CT] = (u32)-1,
 		};
 		const struct ovs_action_push_vlan *vlan;
 		int type = nla_type(a);
@@ -2139,13 +2235,20 @@
 			break;
 
 		case OVS_ACTION_ATTR_SAMPLE:
-			err = validate_and_copy_sample(a, key, depth, sfa,
+			err = validate_and_copy_sample(net, a, key, depth, sfa,
 						       eth_type, vlan_tci, log);
 			if (err)
 				return err;
 			skip_copy = true;
 			break;
 
+		case OVS_ACTION_ATTR_CT:
+			err = ovs_ct_copy_action(net, a, key, sfa, log);
+			if (err)
+				return err;
+			skip_copy = true;
+			break;
+
 		default:
 			OVS_NLERR(log, "Unknown Action type %d", type);
 			return -EINVAL;
@@ -2164,7 +2267,7 @@
 }
 
 /* 'key' must be the masked key. */
-int ovs_nla_copy_actions(const struct nlattr *attr,
+int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 			 const struct sw_flow_key *key,
 			 struct sw_flow_actions **sfa, bool log)
 {
@@ -2174,10 +2277,11 @@
 	if (IS_ERR(*sfa))
 		return PTR_ERR(*sfa);
 
-	err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
+	(*sfa)->orig_len = nla_len(attr);
+	err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
 				     key->eth.tci, log);
 	if (err)
-		kfree(*sfa);
+		ovs_nla_free_flow_actions(*sfa);
 
 	return err;
 }
@@ -2227,15 +2331,16 @@
 
 	switch (key_type) {
 	case OVS_KEY_ATTR_TUNNEL_INFO: {
-		struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+		struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
+		struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
 
 		start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
 		if (!start)
 			return -EMSGSIZE;
 
-		err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+		err = ipv4_tun_to_nlattr(skb, &tun_info->key,
 					 tun_info->options_len ?
-						tun_info->options : NULL,
+					     ip_tunnel_info_opts(tun_info) : NULL,
 					 tun_info->options_len);
 		if (err)
 			return err;
@@ -2298,6 +2403,13 @@
 			if (err)
 				return err;
 			break;
+
+		case OVS_ACTION_ATTR_CT:
+			err = ovs_ct_action_to_attr(nla_data(a), skb);
+			if (err)
+				return err;
+			break;
+
 		default:
 			if (nla_put(skb, type, nla_len(a), nla_data(a)))
 				return -EMSGSIZE;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 5c3d75b..6ca3f0b 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,29 +45,34 @@
 
 int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
 		    int attr, bool is_mask, struct sk_buff *);
-int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *,
-			      bool log);
+int ovs_nla_get_flow_metadata(struct net *, const struct nlattr *,
+			      struct sw_flow_key *, bool log);
 
 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
 
-int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
-		      const struct nlattr *mask, bool log);
+int ovs_nla_get_match(struct net *, struct sw_flow_match *,
+		      const struct nlattr *key, const struct nlattr *mask,
+		      bool log);
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-				  const struct ovs_tunnel_info *);
+				  const struct ip_tunnel_info *,
+				  const void *egress_tun_opts);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
 			   const struct sw_flow_key *key, bool log);
 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr);
 
-int ovs_nla_copy_actions(const struct nlattr *attr,
+int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 			 const struct sw_flow_key *key,
 			 struct sw_flow_actions **sfa, bool log);
+int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype,
+		       void *data, int len, bool log);
 int ovs_nla_put_actions(const struct nlattr *attr,
 			int len, struct sk_buff *skb);
 
 void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
 
 #endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 6552394..d22d8e9 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -18,6 +18,7 @@
 
 #include "flow.h"
 #include "datapath.h"
+#include "flow_netlink.h"
 #include <linux/uaccess.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -143,7 +144,8 @@
 
 	if (ovs_identifier_is_key(&flow->id))
 		kfree(flow->id.unmasked_key);
-	kfree((struct sw_flow_actions __force *)flow->sf_acts);
+	if (flow->sf_acts)
+		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
 	for_each_node(node)
 		if (flow->stats[node])
 			kmem_cache_free(flow_stats_cache,
@@ -424,7 +426,7 @@
 
 static int flow_key_start(const struct sw_flow_key *key)
 {
-	if (key->tun_key.ipv4_dst)
+	if (key->tun_key.u.ipv4.dst)
 		return 0;
 	else
 		return rounddown(offsetof(struct sw_flow_key, phy),
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 208c576..2735e9c 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -26,96 +26,42 @@
 
 #include "datapath.h"
 #include "vport.h"
+#include "vport-netdev.h"
 
 static struct vport_ops ovs_geneve_vport_ops;
-
 /**
  * struct geneve_port - Keeps track of open UDP ports
- * @gs: The socket created for this port number.
- * @name: vport name.
+ * @dst_port: destination port.
  */
 struct geneve_port {
-	struct geneve_sock *gs;
-	char name[IFNAMSIZ];
+	u16 port_no;
 };
 
-static LIST_HEAD(geneve_ports);
-
 static inline struct geneve_port *geneve_vport(const struct vport *vport)
 {
 	return vport_priv(vport);
 }
 
-/* Convert 64 bit tunnel ID to 24 bit VNI. */
-static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
-{
-#ifdef __BIG_ENDIAN
-	vni[0] = (__force __u8)(tun_id >> 16);
-	vni[1] = (__force __u8)(tun_id >> 8);
-	vni[2] = (__force __u8)tun_id;
-#else
-	vni[0] = (__force __u8)((__force u64)tun_id >> 40);
-	vni[1] = (__force __u8)((__force u64)tun_id >> 48);
-	vni[2] = (__force __u8)((__force u64)tun_id >> 56);
-#endif
-}
-
-/* Convert 24 bit VNI to 64 bit tunnel ID. */
-static __be64 vni_to_tunnel_id(const __u8 *vni)
-{
-#ifdef __BIG_ENDIAN
-	return (vni[0] << 16) | (vni[1] << 8) | vni[2];
-#else
-	return (__force __be64)(((__force u64)vni[0] << 40) |
-				((__force u64)vni[1] << 48) |
-				((__force u64)vni[2] << 56));
-#endif
-}
-
-static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
-{
-	struct vport *vport = gs->rcv_data;
-	struct genevehdr *geneveh = geneve_hdr(skb);
-	int opts_len;
-	struct ovs_tunnel_info tun_info;
-	__be64 key;
-	__be16 flags;
-
-	opts_len = geneveh->opt_len * 4;
-
-	flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
-		(udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
-		(geneveh->oam ? TUNNEL_OAM : 0) |
-		(geneveh->critical ? TUNNEL_CRIT_OPT : 0);
-
-	key = vni_to_tunnel_id(geneveh->vni);
-
-	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
-			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
-			       key, flags,
-			       geneveh->options, opts_len);
-
-	ovs_vport_receive(vport, skb, &tun_info);
-}
-
 static int geneve_get_options(const struct vport *vport,
 			      struct sk_buff *skb)
 {
 	struct geneve_port *geneve_port = geneve_vport(vport);
-	struct inet_sock *sk = inet_sk(geneve_port->gs->sock->sk);
 
-	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(sk->inet_sport)))
+	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, geneve_port->port_no))
 		return -EMSGSIZE;
 	return 0;
 }
 
-static void geneve_tnl_destroy(struct vport *vport)
+static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+				      struct dp_upcall_info *upcall)
 {
 	struct geneve_port *geneve_port = geneve_vport(vport);
+	struct net *net = ovs_dp_get_net(vport->dp);
+	__be16 dport = htons(geneve_port->port_no);
+	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
 
-	geneve_sock_release(geneve_port->gs);
-
-	ovs_vport_deferred_free(vport);
+	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
+					  skb, IPPROTO_UDP, sport, dport);
 }
 
 static struct vport *geneve_tnl_create(const struct vport_parms *parms)
@@ -123,11 +69,11 @@
 	struct net *net = ovs_dp_get_net(parms->dp);
 	struct nlattr *options = parms->options;
 	struct geneve_port *geneve_port;
-	struct geneve_sock *gs;
+	struct net_device *dev;
 	struct vport *vport;
 	struct nlattr *a;
-	int err;
 	u16 dst_port;
+	int err;
 
 	if (!options) {
 		err = -EINVAL;
@@ -149,104 +95,40 @@
 		return vport;
 
 	geneve_port = geneve_vport(vport);
-	strncpy(geneve_port->name, parms->name, IFNAMSIZ);
+	geneve_port->port_no = dst_port;
 
-	gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0);
-	if (IS_ERR(gs)) {
+	rtnl_lock();
+	dev = geneve_dev_create_fb(net, parms->name, NET_NAME_USER, dst_port);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
 		ovs_vport_free(vport);
-		return (void *)gs;
+		return ERR_CAST(dev);
 	}
-	geneve_port->gs = gs;
 
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
 	return vport;
 error:
 	return ERR_PTR(err);
 }
 
-static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
+static struct vport *geneve_create(const struct vport_parms *parms)
 {
-	const struct ovs_key_ipv4_tunnel *tun_key;
-	struct ovs_tunnel_info *tun_info;
-	struct net *net = ovs_dp_get_net(vport->dp);
-	struct geneve_port *geneve_port = geneve_vport(vport);
-	__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
-	__be16 sport;
-	struct rtable *rt;
-	struct flowi4 fl;
-	u8 vni[3], opts_len, *opts;
-	__be16 df;
-	int err;
+	struct vport *vport;
 
-	tun_info = OVS_CB(skb)->egress_tun_info;
-	if (unlikely(!tun_info)) {
-		err = -EINVAL;
-		goto error;
-	}
+	vport = geneve_tnl_create(parms);
+	if (IS_ERR(vport))
+		return vport;
 
-	tun_key = &tun_info->tunnel;
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
-	if (IS_ERR(rt)) {
-		err = PTR_ERR(rt);
-		goto error;
-	}
-
-	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
-	sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-	tunnel_id_to_vni(tun_key->tun_id, vni);
-	skb->ignore_df = 1;
-
-	if (tun_key->tun_flags & TUNNEL_GENEVE_OPT) {
-		opts = (u8 *)tun_info->options;
-		opts_len = tun_info->options_len;
-	} else {
-		opts = NULL;
-		opts_len = 0;
-	}
-
-	err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
-			      tun_key->ipv4_dst, tun_key->ipv4_tos,
-			      tun_key->ipv4_ttl, df, sport, dport,
-			      tun_key->tun_flags, vni, opts_len, opts,
-			      !!(tun_key->tun_flags & TUNNEL_CSUM), false);
-	if (err < 0)
-		ip_rt_put(rt);
-	return err;
-
-error:
-	kfree_skb(skb);
-	return err;
-}
-
-static const char *geneve_get_name(const struct vport *vport)
-{
-	struct geneve_port *geneve_port = geneve_vport(vport);
-
-	return geneve_port->name;
-}
-
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				      struct ovs_tunnel_info *egress_tun_info)
-{
-	struct geneve_port *geneve_port = geneve_vport(vport);
-	struct net *net = ovs_dp_get_net(vport->dp);
-	__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
-	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
-	/* Get tp_src and tp_dst, refert to geneve_build_header().
-	 */
-	return ovs_tunnel_get_egress_info(egress_tun_info,
-					  ovs_dp_get_net(vport->dp),
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_UDP, skb->mark, sport, dport);
+	return ovs_netdev_link(vport, parms->name);
 }
 
 static struct vport_ops ovs_geneve_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GENEVE,
-	.create		= geneve_tnl_create,
-	.destroy	= geneve_tnl_destroy,
-	.get_name	= geneve_get_name,
+	.create		= geneve_create,
+	.destroy	= ovs_netdev_tunnel_destroy,
 	.get_options	= geneve_get_options,
-	.send		= geneve_tnl_send,
+	.send		= ovs_netdev_send,
 	.owner          = THIS_MODULE,
 	.get_egress_tun_info	= geneve_get_egress_tun_info,
 };
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index f17ac96..4d24481 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -45,253 +45,58 @@
 
 #include "datapath.h"
 #include "vport.h"
+#include "vport-netdev.h"
 
 static struct vport_ops ovs_gre_vport_ops;
 
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 be64_get_low32(__be64 x)
+static struct vport *gre_tnl_create(const struct vport_parms *parms)
 {
-#ifdef __BIG_ENDIAN
-	return (__force __be32)x;
-#else
-	return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
-static __be16 filter_tnl_flags(__be16 flags)
-{
-	return flags & (TUNNEL_CSUM | TUNNEL_KEY);
-}
-
-static struct sk_buff *__build_header(struct sk_buff *skb,
-				      int tunnel_hlen)
-{
-	struct tnl_ptk_info tpi;
-	const struct ovs_key_ipv4_tunnel *tun_key;
-
-	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-
-	skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
-	if (IS_ERR(skb))
-		return skb;
-
-	tpi.flags = filter_tnl_flags(tun_key->tun_flags);
-	tpi.proto = htons(ETH_P_TEB);
-	tpi.key = be64_get_low32(tun_key->tun_id);
-	tpi.seq = 0;
-	gre_build_header(skb, &tpi, tunnel_hlen);
-
-	return skb;
-}
-
-static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
-{
-#ifdef __BIG_ENDIAN
-	return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
-#else
-	return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
-#endif
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_rcv(struct sk_buff *skb,
-		   const struct tnl_ptk_info *tpi)
-{
-	struct ovs_tunnel_info tun_info;
-	struct ovs_net *ovs_net;
-	struct vport *vport;
-	__be64 key;
-
-	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
-	vport = rcu_dereference(ovs_net->vport_net.gre_vport);
-	if (unlikely(!vport))
-		return PACKET_REJECT;
-
-	key = key_to_tunnel_id(tpi->key, tpi->seq);
-	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
-			       filter_tnl_flags(tpi->flags), NULL, 0);
-
-	ovs_vport_receive(vport, skb, &tun_info);
-	return PACKET_RCVD;
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_err(struct sk_buff *skb, u32 info,
-		   const struct tnl_ptk_info *tpi)
-{
-	struct ovs_net *ovs_net;
+	struct net *net = ovs_dp_get_net(parms->dp);
+	struct net_device *dev;
 	struct vport *vport;
 
-	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
-	vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+	vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
 
-	if (unlikely(!vport))
-		return PACKET_REJECT;
-	else
-		return PACKET_RCVD;
-}
-
-static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
-	struct net *net = ovs_dp_get_net(vport->dp);
-	const struct ovs_key_ipv4_tunnel *tun_key;
-	struct flowi4 fl;
-	struct rtable *rt;
-	int min_headroom;
-	int tunnel_hlen;
-	__be16 df;
-	int err;
-
-	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-		err = -EINVAL;
-		goto err_free_skb;
+	rtnl_lock();
+	dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
+		ovs_vport_free(vport);
+		return ERR_CAST(dev);
 	}
 
-	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
-	if (IS_ERR(rt)) {
-		err = PTR_ERR(rt);
-		goto err_free_skb;
-	}
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
 
-	tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
-
-	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-			+ tunnel_hlen + sizeof(struct iphdr)
-			+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
-		int head_delta = SKB_DATA_ALIGN(min_headroom -
-						skb_headroom(skb) +
-						16);
-		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
-					0, GFP_ATOMIC);
-		if (unlikely(err))
-			goto err_free_rt;
-	}
-
-	skb = vlan_hwaccel_push_inside(skb);
-	if (unlikely(!skb)) {
-		err = -ENOMEM;
-		goto err_free_rt;
-	}
-
-	/* Push Tunnel header. */
-	skb = __build_header(skb, tunnel_hlen);
-	if (IS_ERR(skb)) {
-		err = PTR_ERR(skb);
-		skb = NULL;
-		goto err_free_rt;
-	}
-
-	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-		htons(IP_DF) : 0;
-
-	skb->ignore_df = 1;
-
-	return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
-			     tun_key->ipv4_dst, IPPROTO_GRE,
-			     tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
-err_free_rt:
-	ip_rt_put(rt);
-err_free_skb:
-	kfree_skb(skb);
-	return err;
-}
-
-static struct gre_cisco_protocol gre_protocol = {
-	.handler        = gre_rcv,
-	.err_handler    = gre_err,
-	.priority       = 1,
-};
-
-static int gre_ports;
-static int gre_init(void)
-{
-	int err;
-
-	gre_ports++;
-	if (gre_ports > 1)
-		return 0;
-
-	err = gre_cisco_register(&gre_protocol);
-	if (err)
-		pr_warn("cannot register gre protocol handler\n");
-
-	return err;
-}
-
-static void gre_exit(void)
-{
-	gre_ports--;
-	if (gre_ports > 0)
-		return;
-
-	gre_cisco_unregister(&gre_protocol);
-}
-
-static const char *gre_get_name(const struct vport *vport)
-{
-	return vport_priv(vport);
+	return vport;
 }
 
 static struct vport *gre_create(const struct vport_parms *parms)
 {
-	struct net *net = ovs_dp_get_net(parms->dp);
-	struct ovs_net *ovs_net;
 	struct vport *vport;
-	int err;
 
-	err = gre_init();
-	if (err)
-		return ERR_PTR(err);
-
-	ovs_net = net_generic(net, ovs_net_id);
-	if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
-		vport = ERR_PTR(-EEXIST);
-		goto error;
-	}
-
-	vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
+	vport = gre_tnl_create(parms);
 	if (IS_ERR(vport))
-		goto error;
+		return vport;
 
-	strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
-	rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
-	return vport;
-
-error:
-	gre_exit();
-	return vport;
-}
-
-static void gre_tnl_destroy(struct vport *vport)
-{
-	struct net *net = ovs_dp_get_net(vport->dp);
-	struct ovs_net *ovs_net;
-
-	ovs_net = net_generic(net, ovs_net_id);
-
-	RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
-	ovs_vport_deferred_free(vport);
-	gre_exit();
+	return ovs_netdev_link(vport, parms->name);
 }
 
 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				   struct ovs_tunnel_info *egress_tun_info)
+				   struct dp_upcall_info *upcall)
 {
-	return ovs_tunnel_get_egress_info(egress_tun_info,
-					  ovs_dp_get_net(vport->dp),
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_GRE, skb->mark, 0, 0);
+	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
+					  skb, IPPROTO_GRE, 0, 0);
 }
 
 static struct vport_ops ovs_gre_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GRE,
 	.create		= gre_create,
-	.destroy	= gre_tnl_destroy,
-	.get_name	= gre_get_name,
-	.send		= gre_tnl_send,
+	.send		= ovs_netdev_send,
 	.get_egress_tun_info	= gre_get_egress_tun_info,
+	.destroy	= ovs_netdev_tunnel_destroy,
 	.owner		= THIS_MODULE,
 };
 
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 6a55f71..388b8a6 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -43,35 +43,26 @@
 	return netdev_priv(netdev);
 }
 
-/* This function is only called by the kernel network layer.*/
-static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
-							struct rtnl_link_stats64 *stats)
-{
-	struct vport *vport = ovs_internal_dev_get_vport(netdev);
-	struct ovs_vport_stats vport_stats;
-
-	ovs_vport_get_stats(vport, &vport_stats);
-
-	/* The tx and rx stats need to be swapped because the
-	 * switch and host OS have opposite perspectives. */
-	stats->rx_packets	= vport_stats.tx_packets;
-	stats->tx_packets	= vport_stats.rx_packets;
-	stats->rx_bytes		= vport_stats.tx_bytes;
-	stats->tx_bytes		= vport_stats.rx_bytes;
-	stats->rx_errors	= vport_stats.tx_errors;
-	stats->tx_errors	= vport_stats.rx_errors;
-	stats->rx_dropped	= vport_stats.tx_dropped;
-	stats->tx_dropped	= vport_stats.rx_dropped;
-
-	return stats;
-}
-
 /* Called with rcu_read_lock_bh. */
 static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
+	int len, err;
+
+	len = skb->len;
 	rcu_read_lock();
-	ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
+	err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
 	rcu_read_unlock();
+
+	if (likely(!err)) {
+		struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
+
+		u64_stats_update_begin(&tstats->syncp);
+		tstats->tx_bytes += len;
+		tstats->tx_packets++;
+		u64_stats_update_end(&tstats->syncp);
+	} else {
+		netdev->stats.tx_errors++;
+	}
 	return 0;
 }
 
@@ -121,7 +112,6 @@
 	.ndo_start_xmit = internal_dev_xmit,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_change_mtu = internal_dev_change_mtu,
-	.ndo_get_stats64 = internal_dev_get_stats,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -135,7 +125,7 @@
 	netdev->netdev_ops = &internal_dev_netdev_ops;
 
 	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
-	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH;
 	netdev->destructor = internal_dev_destructor;
 	netdev->ethtool_ops = &internal_dev_ethtool_ops;
 	netdev->rtnl_link_ops = &internal_dev_link_ops;
@@ -156,49 +146,44 @@
 static struct vport *internal_dev_create(const struct vport_parms *parms)
 {
 	struct vport *vport;
-	struct netdev_vport *netdev_vport;
 	struct internal_dev *internal_dev;
 	int err;
 
-	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-				&ovs_internal_vport_ops, parms);
+	vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
 	if (IS_ERR(vport)) {
 		err = PTR_ERR(vport);
 		goto error;
 	}
 
-	netdev_vport = netdev_vport_priv(vport);
-
-	netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
-					 parms->name, NET_NAME_UNKNOWN,
-					 do_setup);
-	if (!netdev_vport->dev) {
+	vport->dev = alloc_netdev(sizeof(struct internal_dev),
+				  parms->name, NET_NAME_UNKNOWN, do_setup);
+	if (!vport->dev) {
 		err = -ENOMEM;
 		goto error_free_vport;
 	}
 
-	dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
-	internal_dev = internal_dev_priv(netdev_vport->dev);
+	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
+	internal_dev = internal_dev_priv(vport->dev);
 	internal_dev->vport = vport;
 
 	/* Restrict bridge port to current netns. */
 	if (vport->port_no == OVSP_LOCAL)
-		netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+		vport->dev->features |= NETIF_F_NETNS_LOCAL;
 
 	rtnl_lock();
-	err = register_netdevice(netdev_vport->dev);
+	err = register_netdevice(vport->dev);
 	if (err)
 		goto error_free_netdev;
 
-	dev_set_promiscuity(netdev_vport->dev, 1);
+	dev_set_promiscuity(vport->dev, 1);
 	rtnl_unlock();
-	netif_start_queue(netdev_vport->dev);
+	netif_start_queue(vport->dev);
 
 	return vport;
 
 error_free_netdev:
 	rtnl_unlock();
-	free_netdev(netdev_vport->dev);
+	free_netdev(vport->dev);
 error_free_vport:
 	ovs_vport_free(vport);
 error:
@@ -207,30 +192,27 @@
 
 static void internal_dev_destroy(struct vport *vport)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
-	netif_stop_queue(netdev_vport->dev);
+	netif_stop_queue(vport->dev);
 	rtnl_lock();
-	dev_set_promiscuity(netdev_vport->dev, -1);
+	dev_set_promiscuity(vport->dev, -1);
 
 	/* unregister_netdevice() waits for an RCU grace period. */
-	unregister_netdevice(netdev_vport->dev);
+	unregister_netdevice(vport->dev);
 
 	rtnl_unlock();
 }
 
-static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
 {
-	struct net_device *netdev = netdev_vport_priv(vport)->dev;
-	int len;
+	struct net_device *netdev = vport->dev;
+	struct pcpu_sw_netstats *stats;
 
 	if (unlikely(!(netdev->flags & IFF_UP))) {
 		kfree_skb(skb);
-		return 0;
+		netdev->stats.rx_dropped++;
+		return;
 	}
 
-	len = skb->len;
-
 	skb_dst_drop(skb);
 	nf_reset(skb);
 	secpath_reset(skb);
@@ -240,16 +222,19 @@
 	skb->protocol = eth_type_trans(skb, netdev);
 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
-	netif_rx(skb);
+	stats = this_cpu_ptr(netdev->tstats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->syncp);
 
-	return len;
+	netif_rx(skb);
 }
 
 static struct vport_ops ovs_internal_vport_ops = {
 	.type		= OVS_VPORT_TYPE_INTERNAL,
 	.create		= internal_dev_create,
 	.destroy	= internal_dev_destroy,
-	.get_name	= ovs_netdev_get_name,
 	.send		= internal_dev_recv,
 };
 
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 33e6d6e..f7e8dcc 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -26,18 +26,24 @@
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/openvswitch.h>
+#include <linux/export.h>
 
-#include <net/llc.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
 
 #include "datapath.h"
+#include "vport.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
 static struct vport_ops ovs_netdev_vport_ops;
 
 /* Must be called with rcu_read_lock. */
-static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+static void netdev_port_receive(struct sk_buff *skb)
 {
+	struct vport *vport;
+
+	vport = ovs_netdev_get_vport(skb->dev);
 	if (unlikely(!vport))
 		goto error;
 
@@ -53,10 +59,8 @@
 
 	skb_push(skb, ETH_HLEN);
 	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
-
-	ovs_vport_receive(vport, skb, NULL);
+	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
 	return;
-
 error:
 	kfree_skb(skb);
 }
@@ -65,15 +69,11 @@
 static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
 {
 	struct sk_buff *skb = *pskb;
-	struct vport *vport;
 
 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
 		return RX_HANDLER_PASS;
 
-	vport = ovs_netdev_get_vport(skb->dev);
-
-	netdev_port_receive(vport, skb);
-
+	netdev_port_receive(skb);
 	return RX_HANDLER_CONSUMED;
 }
 
@@ -83,105 +83,112 @@
 
 	local = ovs_vport_ovsl(dp, OVSP_LOCAL);
 	BUG_ON(!local);
-	return netdev_vport_priv(local)->dev;
+	return local->dev;
 }
 
-static struct vport *netdev_create(const struct vport_parms *parms)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
 {
-	struct vport *vport;
-	struct netdev_vport *netdev_vport;
 	int err;
 
-	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-				&ovs_netdev_vport_ops, parms);
-	if (IS_ERR(vport)) {
-		err = PTR_ERR(vport);
-		goto error;
-	}
-
-	netdev_vport = netdev_vport_priv(vport);
-
-	netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
-	if (!netdev_vport->dev) {
+	vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
+	if (!vport->dev) {
 		err = -ENODEV;
 		goto error_free_vport;
 	}
 
-	if (netdev_vport->dev->flags & IFF_LOOPBACK ||
-	    netdev_vport->dev->type != ARPHRD_ETHER ||
-	    ovs_is_internal_dev(netdev_vport->dev)) {
+	if (vport->dev->flags & IFF_LOOPBACK ||
+	    vport->dev->type != ARPHRD_ETHER ||
+	    ovs_is_internal_dev(vport->dev)) {
 		err = -EINVAL;
 		goto error_put;
 	}
 
 	rtnl_lock();
-	err = netdev_master_upper_dev_link(netdev_vport->dev,
+	err = netdev_master_upper_dev_link(vport->dev,
 					   get_dpdev(vport->dp));
 	if (err)
 		goto error_unlock;
 
-	err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+	err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
 					 vport);
 	if (err)
 		goto error_master_upper_dev_unlink;
 
-	dev_disable_lro(netdev_vport->dev);
-	dev_set_promiscuity(netdev_vport->dev, 1);
-	netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+	dev_disable_lro(vport->dev);
+	dev_set_promiscuity(vport->dev, 1);
+	vport->dev->priv_flags |= IFF_OVS_DATAPATH;
 	rtnl_unlock();
 
 	return vport;
 
 error_master_upper_dev_unlink:
-	netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+	netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
 error_unlock:
 	rtnl_unlock();
 error_put:
-	dev_put(netdev_vport->dev);
+	dev_put(vport->dev);
 error_free_vport:
 	ovs_vport_free(vport);
-error:
 	return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
 
-static void free_port_rcu(struct rcu_head *rcu)
+static struct vport *netdev_create(const struct vport_parms *parms)
 {
-	struct netdev_vport *netdev_vport = container_of(rcu,
-					struct netdev_vport, rcu);
+	struct vport *vport;
 
-	dev_put(netdev_vport->dev);
-	ovs_vport_free(vport_from_priv(netdev_vport));
+	vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
+
+	return ovs_netdev_link(vport, parms->name);
+}
+
+static void vport_netdev_free(struct rcu_head *rcu)
+{
+	struct vport *vport = container_of(rcu, struct vport, rcu);
+
+	if (vport->dev)
+		dev_put(vport->dev);
+	ovs_vport_free(vport);
 }
 
 void ovs_netdev_detach_dev(struct vport *vport)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
 	ASSERT_RTNL();
-	netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
-	netdev_rx_handler_unregister(netdev_vport->dev);
-	netdev_upper_dev_unlink(netdev_vport->dev,
-				netdev_master_upper_dev_get(netdev_vport->dev));
-	dev_set_promiscuity(netdev_vport->dev, -1);
+	vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+	netdev_rx_handler_unregister(vport->dev);
+	netdev_upper_dev_unlink(vport->dev,
+				netdev_master_upper_dev_get(vport->dev));
+	dev_set_promiscuity(vport->dev, -1);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
 
 static void netdev_destroy(struct vport *vport)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
 	rtnl_lock();
-	if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+	if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
 		ovs_netdev_detach_dev(vport);
 	rtnl_unlock();
 
-	call_rcu(&netdev_vport->rcu, free_port_rcu);
+	call_rcu(&vport->rcu, vport_netdev_free);
 }
 
-const char *ovs_netdev_get_name(const struct vport *vport)
+void ovs_netdev_tunnel_destroy(struct vport *vport)
 {
-	const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-	return netdev_vport->dev->name;
+	rtnl_lock();
+	if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+		ovs_netdev_detach_dev(vport);
+
+	/* Early release so we can unregister the device */
+	dev_put(vport->dev);
+	rtnl_delete_link(vport->dev);
+	vport->dev = NULL;
+	rtnl_unlock();
+
+	call_rcu(&vport->rcu, vport_netdev_free);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
 
 static unsigned int packet_length(const struct sk_buff *skb)
 {
@@ -193,29 +200,26 @@
 	return length;
 }
 
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-	int mtu = netdev_vport->dev->mtu;
-	int len;
+	int mtu = vport->dev->mtu;
 
 	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
 		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-				     netdev_vport->dev->name,
+				     vport->dev->name,
 				     packet_length(skb), mtu);
+		vport->dev->stats.tx_errors++;
 		goto drop;
 	}
 
-	skb->dev = netdev_vport->dev;
-	len = skb->len;
+	skb->dev = vport->dev;
 	dev_queue_xmit(skb);
-
-	return len;
+	return;
 
 drop:
 	kfree_skb(skb);
-	return 0;
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
 
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -231,8 +235,7 @@
 	.type		= OVS_VPORT_TYPE_NETDEV,
 	.create		= netdev_create,
 	.destroy	= netdev_destroy,
-	.get_name	= ovs_netdev_get_name,
-	.send		= netdev_send,
+	.send		= ovs_netdev_send,
 };
 
 int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 6f7038e..bf22fce 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -26,22 +26,12 @@
 
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
-struct netdev_vport {
-	struct rcu_head rcu;
-
-	struct net_device *dev;
-};
-
-static inline struct netdev_vport *
-netdev_vport_priv(const struct vport *vport)
-{
-	return vport_priv(vport);
-}
-
-const char *ovs_netdev_get_name(const struct vport *);
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
 
 int __init ovs_netdev_init(void);
 void ovs_netdev_exit(void);
 
+void ovs_netdev_tunnel_destroy(struct vport *vport);
 #endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 6d39766..c11413d 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -17,94 +17,37 @@
  * 02110-1301, USA
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <linux/rculist.h>
-#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
 #include <linux/module.h>
-
-#include <net/icmp.h>
-#include <net/ip.h>
 #include <net/udp.h>
 #include <net/ip_tunnels.h>
 #include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
 #include <net/vxlan.h>
 
 #include "datapath.h"
 #include "vport.h"
-#include "vport-vxlan.h"
+#include "vport-netdev.h"
 
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @vs: vxlan_sock created for the port.
- * @name: vport name.
- */
-struct vxlan_port {
-	struct vxlan_sock *vs;
-	char name[IFNAMSIZ];
-	u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
-};
-
-static struct vport_ops ovs_vxlan_vport_ops;
-
-static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
-{
-	return vport_priv(vport);
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-		      struct vxlan_metadata *md)
-{
-	struct ovs_tunnel_info tun_info;
-	struct vxlan_port *vxlan_port;
-	struct vport *vport = vs->data;
-	struct iphdr *iph;
-	struct ovs_vxlan_opts opts = {
-		.gbp = md->gbp,
-	};
-	__be64 key;
-	__be16 flags;
-
-	flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
-	vxlan_port = vxlan_vport(vport);
-	if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
-		flags |= TUNNEL_VXLAN_OPT;
-
-	/* Save outer tunnel values */
-	iph = ip_hdr(skb);
-	key = cpu_to_be64(ntohl(md->vni) >> 8);
-	ovs_flow_tun_info_init(&tun_info, iph,
-			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
-			       key, flags, &opts, sizeof(opts));
-
-	ovs_vport_receive(vport, skb, &tun_info);
-}
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
 
 static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
 {
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+	__be16 dst_port = vxlan->cfg.dst_port;
 
 	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
 		return -EMSGSIZE;
 
-	if (vxlan_port->exts) {
+	if (vxlan->flags & VXLAN_F_GBP) {
 		struct nlattr *exts;
 
 		exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
 		if (!exts)
 			return -EMSGSIZE;
 
-		if (vxlan_port->exts & VXLAN_F_GBP &&
+		if (vxlan->flags & VXLAN_F_GBP &&
 		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
 			return -EMSGSIZE;
 
@@ -114,23 +57,14 @@
 	return 0;
 }
 
-static void vxlan_tnl_destroy(struct vport *vport)
-{
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-
-	vxlan_sock_release(vxlan_port->vs);
-
-	ovs_vport_deferred_free(vport);
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
 	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_FLAG, },
 };
 
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+				struct vxlan_config *conf)
 {
-	struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
-	struct vxlan_port *vxlan_port;
+	struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
 	int err;
 
 	if (nla_len(attr) < sizeof(struct nlattr))
@@ -140,10 +74,8 @@
 	if (err < 0)
 		return err;
 
-	vxlan_port = vxlan_vport(vport);
-
 	if (exts[OVS_VXLAN_EXT_GBP])
-		vxlan_port->exts |= VXLAN_F_GBP;
+		conf->flags |= VXLAN_F_GBP;
 
 	return 0;
 }
@@ -152,128 +84,74 @@
 {
 	struct net *net = ovs_dp_get_net(parms->dp);
 	struct nlattr *options = parms->options;
-	struct vxlan_port *vxlan_port;
-	struct vxlan_sock *vs;
+	struct net_device *dev;
 	struct vport *vport;
 	struct nlattr *a;
-	u16 dst_port;
 	int err;
+	struct vxlan_config conf = {
+		.no_share = true,
+		.flags = VXLAN_F_COLLECT_METADATA,
+	};
 
 	if (!options) {
 		err = -EINVAL;
 		goto error;
 	}
+
 	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
 	if (a && nla_len(a) == sizeof(u16)) {
-		dst_port = nla_get_u16(a);
+		conf.dst_port = htons(nla_get_u16(a));
 	} else {
 		/* Require destination port from userspace. */
 		err = -EINVAL;
 		goto error;
 	}
 
-	vport = ovs_vport_alloc(sizeof(struct vxlan_port),
-				&ovs_vxlan_vport_ops, parms);
+	vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
 	if (IS_ERR(vport))
 		return vport;
 
-	vxlan_port = vxlan_vport(vport);
-	strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
-
 	a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
 	if (a) {
-		err = vxlan_configure_exts(vport, a);
+		err = vxlan_configure_exts(vport, a, &conf);
 		if (err) {
 			ovs_vport_free(vport);
 			goto error;
 		}
 	}
 
-	vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
-			    vxlan_port->exts);
-	if (IS_ERR(vs)) {
+	rtnl_lock();
+	dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
 		ovs_vport_free(vport);
-		return (void *)vs;
+		return ERR_CAST(dev);
 	}
-	vxlan_port->vs = vs;
 
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
 	return vport;
-
 error:
 	return ERR_PTR(err);
 }
 
-static int vxlan_ext_gbp(struct sk_buff *skb)
+static struct vport *vxlan_create(const struct vport_parms *parms)
 {
-	const struct ovs_tunnel_info *tun_info;
-	const struct ovs_vxlan_opts *opts;
+	struct vport *vport;
 
-	tun_info = OVS_CB(skb)->egress_tun_info;
-	opts = tun_info->options;
+	vport = vxlan_tnl_create(parms);
+	if (IS_ERR(vport))
+		return vport;
 
-	if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
-	    tun_info->options_len >= sizeof(*opts))
-		return opts->gbp;
-	else
-		return 0;
-}
-
-static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
-	struct net *net = ovs_dp_get_net(vport->dp);
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	struct sock *sk = vxlan_port->vs->sock->sk;
-	__be16 dst_port = inet_sk(sk)->inet_sport;
-	const struct ovs_key_ipv4_tunnel *tun_key;
-	struct vxlan_metadata md = {0};
-	struct rtable *rt;
-	struct flowi4 fl;
-	__be16 src_port;
-	__be16 df;
-	int err;
-	u32 vxflags;
-
-	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-		err = -EINVAL;
-		goto error;
-	}
-
-	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
-	if (IS_ERR(rt)) {
-		err = PTR_ERR(rt);
-		goto error;
-	}
-
-	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-		htons(IP_DF) : 0;
-
-	skb->ignore_df = 1;
-
-	src_port = udp_flow_src_port(net, skb, 0, 0, true);
-	md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
-	md.gbp = vxlan_ext_gbp(skb);
-	vxflags = vxlan_port->exts |
-		      (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
-
-	err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
-			     tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
-			     src_port, dst_port,
-			     &md, false, vxflags);
-	if (err < 0)
-		ip_rt_put(rt);
-	return err;
-error:
-	kfree_skb(skb);
-	return err;
+	return ovs_netdev_link(vport, parms->name);
 }
 
 static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				     struct ovs_tunnel_info *egress_tun_info)
+				     struct dp_upcall_info *upcall)
 {
+	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
 	struct net *net = ovs_dp_get_net(vport->dp);
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+	__be16 dst_port = vxlan_dev_dst_port(vxlan);
 	__be16 src_port;
 	int port_min;
 	int port_max;
@@ -281,37 +159,28 @@
 	inet_get_local_port_range(net, &port_min, &port_max);
 	src_port = udp_flow_src_port(net, skb, 0, 0, true);
 
-	return ovs_tunnel_get_egress_info(egress_tun_info, net,
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_UDP, skb->mark,
+	return ovs_tunnel_get_egress_info(upcall, net,
+					  skb, IPPROTO_UDP,
 					  src_port, dst_port);
 }
 
-static const char *vxlan_get_name(const struct vport *vport)
-{
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	return vxlan_port->name;
-}
-
-static struct vport_ops ovs_vxlan_vport_ops = {
-	.type		= OVS_VPORT_TYPE_VXLAN,
-	.create		= vxlan_tnl_create,
-	.destroy	= vxlan_tnl_destroy,
-	.get_name	= vxlan_get_name,
-	.get_options	= vxlan_get_options,
-	.send		= vxlan_tnl_send,
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+	.type			= OVS_VPORT_TYPE_VXLAN,
+	.create			= vxlan_create,
+	.destroy		= ovs_netdev_tunnel_destroy,
+	.get_options		= vxlan_get_options,
+	.send			= ovs_netdev_send,
 	.get_egress_tun_info	= vxlan_get_egress_tun_info,
-	.owner		= THIS_MODULE,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
 {
-	return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+	return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
 }
 
 static void __exit ovs_vxlan_tnl_exit(void)
 {
-	ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+	ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
 }
 
 module_init(ovs_vxlan_tnl_init);
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
deleted file mode 100644
index 4b08233e..0000000
--- a/net/openvswitch/vport-vxlan.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef VPORT_VXLAN_H
-#define VPORT_VXLAN_H 1
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct ovs_vxlan_opts {
-	__u32 gbp;
-};
-
-#endif
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 067a3ff..dc81dc6 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -34,9 +34,6 @@
 #include "vport.h"
 #include "vport-internal_dev.h"
 
-static void ovs_vport_record_error(struct vport *,
-				   enum vport_err_type err_type);
-
 static LIST_HEAD(vport_ops_list);
 
 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -113,7 +110,7 @@
 	struct vport *vport;
 
 	hlist_for_each_entry_rcu(vport, bucket, hash_node)
-		if (!strcmp(name, vport->ops->get_name(vport)) &&
+		if (!strcmp(name, ovs_vport_name(vport)) &&
 		    net_eq(ovs_dp_get_net(vport->dp), net))
 			return vport;
 
@@ -157,12 +154,6 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-	if (!vport->percpu_stats) {
-		kfree(vport);
-		return ERR_PTR(-ENOMEM);
-	}
-
 	return vport;
 }
 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
@@ -183,7 +174,6 @@
 	 * it is safe to use raw dereference.
 	 */
 	kfree(rcu_dereference_raw(vport->upcall_portids));
-	free_percpu(vport->percpu_stats);
 	kfree(vport);
 }
 EXPORT_SYMBOL_GPL(ovs_vport_free);
@@ -226,7 +216,7 @@
 		}
 
 		bucket = hash_bucket(ovs_dp_get_net(vport->dp),
-				     vport->ops->get_name(vport));
+				     ovs_vport_name(vport));
 		hlist_add_head_rcu(&vport->hash_node, bucket);
 		return vport;
 	}
@@ -290,30 +280,24 @@
  */
 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
 {
+	struct net_device *dev = vport->dev;
 	int i;
 
 	memset(stats, 0, sizeof(*stats));
+	stats->rx_errors  = dev->stats.rx_errors;
+	stats->tx_errors  = dev->stats.tx_errors;
+	stats->tx_dropped = dev->stats.tx_dropped;
+	stats->rx_dropped = dev->stats.rx_dropped;
 
-	/* We potentially have 2 sources of stats that need to be combined:
-	 * those we have collected (split into err_stats and percpu_stats) from
-	 * set_stats() and device error stats from netdev->get_stats() (for
-	 * errors that happen  downstream and therefore aren't reported through
-	 * our vport_record_error() function).
-	 * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
-	 * netdev-stats can be directly read over netlink-ioctl.
-	 */
-
-	stats->rx_errors  = atomic_long_read(&vport->err_stats.rx_errors);
-	stats->tx_errors  = atomic_long_read(&vport->err_stats.tx_errors);
-	stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
-	stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
+	stats->rx_dropped += atomic_long_read(&dev->rx_dropped);
+	stats->tx_dropped += atomic_long_read(&dev->tx_dropped);
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_sw_netstats *percpu_stats;
 		struct pcpu_sw_netstats local_stats;
 		unsigned int start;
 
-		percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
+		percpu_stats = per_cpu_ptr(dev->tstats, i);
 
 		do {
 			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
@@ -468,94 +452,25 @@
  * Must be called with rcu_read_lock.  The packet cannot be shared and
  * skb->data should point to the Ethernet header.
  */
-void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
-		       const struct ovs_tunnel_info *tun_info)
+int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
+		      const struct ip_tunnel_info *tun_info)
 {
-	struct pcpu_sw_netstats *stats;
 	struct sw_flow_key key;
 	int error;
 
-	stats = this_cpu_ptr(vport->percpu_stats);
-	u64_stats_update_begin(&stats->syncp);
-	stats->rx_packets++;
-	stats->rx_bytes += skb->len +
-			   (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-	u64_stats_update_end(&stats->syncp);
-
 	OVS_CB(skb)->input_vport = vport;
-	OVS_CB(skb)->egress_tun_info = NULL;
+	OVS_CB(skb)->mru = 0;
 	/* Extract flow from 'skb' into 'key'. */
 	error = ovs_flow_key_extract(tun_info, skb, &key);
 	if (unlikely(error)) {
 		kfree_skb(skb);
-		return;
+		return error;
 	}
 	ovs_dp_process_packet(skb, &key);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(ovs_vport_receive);
 
-/**
- *	ovs_vport_send - send a packet on a device
- *
- * @vport: vport on which to send the packet
- * @skb: skb to send
- *
- * Sends the given packet and returns the length of data sent.  Either ovs
- * lock or rcu_read_lock must be held.
- */
-int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
-	int sent = vport->ops->send(vport, skb);
-
-	if (likely(sent > 0)) {
-		struct pcpu_sw_netstats *stats;
-
-		stats = this_cpu_ptr(vport->percpu_stats);
-
-		u64_stats_update_begin(&stats->syncp);
-		stats->tx_packets++;
-		stats->tx_bytes += sent;
-		u64_stats_update_end(&stats->syncp);
-	} else if (sent < 0) {
-		ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
-	} else {
-		ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
-	}
-	return sent;
-}
-
-/**
- *	ovs_vport_record_error - indicate device error to generic stats layer
- *
- * @vport: vport that encountered the error
- * @err_type: one of enum vport_err_type types to indicate the error type
- *
- * If using the vport generic stats layer indicate that an error of the given
- * type has occurred.
- */
-static void ovs_vport_record_error(struct vport *vport,
-				   enum vport_err_type err_type)
-{
-	switch (err_type) {
-	case VPORT_E_RX_DROPPED:
-		atomic_long_inc(&vport->err_stats.rx_dropped);
-		break;
-
-	case VPORT_E_RX_ERROR:
-		atomic_long_inc(&vport->err_stats.rx_errors);
-		break;
-
-	case VPORT_E_TX_DROPPED:
-		atomic_long_inc(&vport->err_stats.tx_dropped);
-		break;
-
-	case VPORT_E_TX_ERROR:
-		atomic_long_inc(&vport->err_stats.tx_errors);
-		break;
-	}
-
-}
-
 static void free_vport_rcu(struct rcu_head *rcu)
 {
 	struct vport *vport = container_of(rcu, struct vport, rcu);
@@ -572,22 +487,26 @@
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
 
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
 			       struct net *net,
-			       const struct ovs_tunnel_info *tun_info,
+			       struct sk_buff *skb,
 			       u8 ipproto,
-			       u32 skb_mark,
 			       __be16 tp_src,
 			       __be16 tp_dst)
 {
-	const struct ovs_key_ipv4_tunnel *tun_key;
+	struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
+	const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
+	const struct ip_tunnel_key *tun_key;
+	u32 skb_mark = skb->mark;
 	struct rtable *rt;
 	struct flowi4 fl;
 
 	if (unlikely(!tun_info))
 		return -EINVAL;
+	if (ip_tunnel_info_af(tun_info) != AF_INET)
+		return -EINVAL;
 
-	tun_key = &tun_info->tunnel;
+	tun_key = &tun_info->key;
 
 	/* Route lookup to get srouce IP address.
 	 * The process may need to be changed if the corresponding process
@@ -602,26 +521,26 @@
 	/* Generate egress_tun_info based on tun_info,
 	 * saddr, tp_src and tp_dst
 	 */
-	__ovs_flow_tun_info_init(egress_tun_info,
-				 fl.saddr, tun_key->ipv4_dst,
-				 tun_key->ipv4_tos,
-				 tun_key->ipv4_ttl,
-				 tp_src, tp_dst,
-				 tun_key->tun_id,
-				 tun_key->tun_flags,
-				 tun_info->options,
-				 tun_info->options_len);
-
+	ip_tunnel_key_init(&egress_tun_info->key,
+			   fl.saddr, tun_key->u.ipv4.dst,
+			   tun_key->tos,
+			   tun_key->ttl,
+			   tp_src, tp_dst,
+			   tun_key->tun_id,
+			   tun_key->tun_flags);
+	egress_tun_info->options_len = tun_info->options_len;
+	egress_tun_info->mode = tun_info->mode;
+	upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
 
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				  struct ovs_tunnel_info *info)
+				  struct dp_upcall_info *upcall)
 {
 	/* get_egress_tun_info() is only implemented on tunnel ports. */
 	if (unlikely(!vport->ops->get_egress_tun_info))
 		return -EINVAL;
 
-	return vport->ops->get_egress_tun_info(vport, skb, info);
+	return vport->ops->get_egress_tun_info(vport, skb, upcall);
 }
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bc85331..a413f3a 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -27,6 +27,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
+#include <net/route.h>
 
 #include "datapath.h"
 
@@ -35,10 +36,6 @@
 
 /* The following definitions are for users of the vport subsytem: */
 
-struct vport_net {
-	struct vport __rcu *gre_vport;
-};
-
 int ovs_vport_init(void);
 void ovs_vport_exit(void);
 
@@ -56,26 +53,16 @@
 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
-int ovs_vport_send(struct vport *, struct sk_buff *);
-
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
 			       struct net *net,
-			       const struct ovs_tunnel_info *tun_info,
+			       struct sk_buff *,
 			       u8 ipproto,
-			       u32 skb_mark,
 			       __be16 tp_src,
 			       __be16 tp_dst);
+
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				  struct ovs_tunnel_info *info);
+				  struct dp_upcall_info *upcall);
 
-/* The following definitions are for implementers of vport devices: */
-
-struct vport_err_stats {
-	atomic_long_t rx_dropped;
-	atomic_long_t rx_errors;
-	atomic_long_t tx_dropped;
-	atomic_long_t tx_errors;
-};
 /**
  * struct vport_portids - array of netlink portids of a vport.
  *                        must be protected by rcu.
@@ -101,12 +88,10 @@
  * @hash_node: Element in @dev_table hash table in vport.c.
  * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
  * @ops: Class structure.
- * @percpu_stats: Points to per-CPU statistics used and maintained by vport
- * @err_stats: Points to error statistics used and maintained by vport
  * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
-	struct rcu_head rcu;
+	struct net_device *dev;
 	struct datapath	*dp;
 	struct vport_portids __rcu *upcall_portids;
 	u16 port_no;
@@ -115,10 +100,8 @@
 	struct hlist_node dp_hash_node;
 	const struct vport_ops *ops;
 
-	struct pcpu_sw_netstats __percpu *percpu_stats;
-
-	struct vport_err_stats err_stats;
 	struct list_head detach_list;
+	struct rcu_head rcu;
 };
 
 /**
@@ -155,8 +138,7 @@
  * @get_options: Appends vport-specific attributes for the configuration of an
  * existing vport to a &struct sk_buff.  May be %NULL for a vport that does not
  * have any configuration.
- * @get_name: Get the device's name.
- * @send: Send a packet on the device.  Returns the length of the packet sent,
+ * @send: Send a packet on the device.
  * zero for dropped packets or negative for error.
  * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
  * a packet.
@@ -171,24 +153,14 @@
 	int (*set_options)(struct vport *, struct nlattr *);
 	int (*get_options)(const struct vport *, struct sk_buff *);
 
-	/* Called with rcu_read_lock or ovs_mutex. */
-	const char *(*get_name)(const struct vport *);
-
-	int (*send)(struct vport *, struct sk_buff *);
+	void (*send)(struct vport *, struct sk_buff *);
 	int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-				   struct ovs_tunnel_info *);
+				   struct dp_upcall_info *upcall);
 
 	struct module *owner;
 	struct list_head list;
 };
 
-enum vport_err_type {
-	VPORT_E_RX_DROPPED,
-	VPORT_E_RX_ERROR,
-	VPORT_E_TX_DROPPED,
-	VPORT_E_TX_ERROR,
-};
-
 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
 			      const struct vport_parms *);
 void ovs_vport_free(struct vport *);
@@ -225,8 +197,8 @@
 	return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
 }
 
-void ovs_vport_receive(struct vport *, struct sk_buff *,
-		       const struct ovs_tunnel_info *);
+int ovs_vport_receive(struct vport *, struct sk_buff *,
+		      const struct ip_tunnel_info *);
 
 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
 				      const void *start, unsigned int len)
@@ -235,11 +207,16 @@
 		skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
 }
 
+static inline const char *ovs_vport_name(struct vport *vport)
+{
+	return vport->dev->name;
+}
+
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
 static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
-						     const struct ovs_key_ipv4_tunnel *key,
+						     const struct ip_tunnel_key *key,
 						     u32 mark,
 						     struct flowi4 *fl,
 						     u8 protocol)
@@ -247,13 +224,19 @@
 	struct rtable *rt;
 
 	memset(fl, 0, sizeof(*fl));
-	fl->daddr = key->ipv4_dst;
-	fl->saddr = key->ipv4_src;
-	fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+	fl->daddr = key->u.ipv4.dst;
+	fl->saddr = key->u.ipv4.src;
+	fl->flowi4_tos = RT_TOS(key->tos);
 	fl->flowi4_mark = mark;
 	fl->flowi4_proto = protocol;
 
 	rt = ip_route_output_key(net, fl);
 	return rt;
 }
+
+static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+	vport->ops->send(vport, skb);
+}
+
 #endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ed458b3..7b8e39a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -92,6 +92,7 @@
 #ifdef CONFIG_INET
 #include <net/inet_common.h>
 #endif
+#include <linux/bpf.h>
 
 #include "internal.h"
 
@@ -518,13 +519,11 @@
 }
 
 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
-		int tx_ring,
 		struct sk_buff_head *rb_queue)
 {
 	struct tpacket_kbdq_core *pkc;
 
-	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
-			GET_PBDQC_FROM_RB(&po->rx_ring);
+	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 
 	spin_lock_bh(&rb_queue->lock);
 	pkc->delete_blk_timer = 1;
@@ -1412,6 +1411,22 @@
 	return skb_get_queue_mapping(skb) % num;
 }
 
+static unsigned int fanout_demux_bpf(struct packet_fanout *f,
+				     struct sk_buff *skb,
+				     unsigned int num)
+{
+	struct bpf_prog *prog;
+	unsigned int ret = 0;
+
+	rcu_read_lock();
+	prog = rcu_dereference(f->bpf_prog);
+	if (prog)
+		ret = BPF_PROG_RUN(prog, skb) % num;
+	rcu_read_unlock();
+
+	return ret;
+}
+
 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
 {
 	return f->flags & (flag >> 8);
@@ -1456,6 +1471,10 @@
 	case PACKET_FANOUT_ROLLOVER:
 		idx = fanout_demux_rollover(f, skb, 0, false, num);
 		break;
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
+		idx = fanout_demux_bpf(f, skb, num);
+		break;
 	}
 
 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
@@ -1504,6 +1523,103 @@
 	return false;
 }
 
+static void fanout_init_data(struct packet_fanout *f)
+{
+	switch (f->type) {
+	case PACKET_FANOUT_LB:
+		atomic_set(&f->rr_cur, 0);
+		break;
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
+		RCU_INIT_POINTER(f->bpf_prog, NULL);
+		break;
+	}
+}
+
+static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
+{
+	struct bpf_prog *old;
+
+	spin_lock(&f->lock);
+	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
+	rcu_assign_pointer(f->bpf_prog, new);
+	spin_unlock(&f->lock);
+
+	if (old) {
+		synchronize_net();
+		bpf_prog_destroy(old);
+	}
+}
+
+static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+				unsigned int len)
+{
+	struct bpf_prog *new;
+	struct sock_fprog fprog;
+	int ret;
+
+	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+		return -EPERM;
+	if (len != sizeof(fprog))
+		return -EINVAL;
+	if (copy_from_user(&fprog, data, len))
+		return -EFAULT;
+
+	ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+	if (ret)
+		return ret;
+
+	__fanout_set_data_bpf(po->fanout, new);
+	return 0;
+}
+
+static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+				unsigned int len)
+{
+	struct bpf_prog *new;
+	u32 fd;
+
+	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+		return -EPERM;
+	if (len != sizeof(fd))
+		return -EINVAL;
+	if (copy_from_user(&fd, data, len))
+		return -EFAULT;
+
+	new = bpf_prog_get(fd);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+	if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
+		bpf_prog_put(new);
+		return -EINVAL;
+	}
+
+	__fanout_set_data_bpf(po->fanout, new);
+	return 0;
+}
+
+static int fanout_set_data(struct packet_sock *po, char __user *data,
+			   unsigned int len)
+{
+	switch (po->fanout->type) {
+	case PACKET_FANOUT_CBPF:
+		return fanout_set_data_cbpf(po, data, len);
+	case PACKET_FANOUT_EBPF:
+		return fanout_set_data_ebpf(po, data, len);
+	default:
+		return -EINVAL;
+	};
+}
+
+static void fanout_release_data(struct packet_fanout *f)
+{
+	switch (f->type) {
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
+		__fanout_set_data_bpf(f, NULL);
+	};
+}
+
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
 	struct packet_sock *po = pkt_sk(sk);
@@ -1521,6 +1637,8 @@
 	case PACKET_FANOUT_CPU:
 	case PACKET_FANOUT_RND:
 	case PACKET_FANOUT_QM:
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
 		break;
 	default:
 		return -EINVAL;
@@ -1563,10 +1681,10 @@
 		match->id = id;
 		match->type = type;
 		match->flags = flags;
-		atomic_set(&match->rr_cur, 0);
 		INIT_LIST_HEAD(&match->list);
 		spin_lock_init(&match->lock);
 		atomic_set(&match->sk_ref, 0);
+		fanout_init_data(match);
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.dev = po->prot_hook.dev;
 		match->prot_hook.func = packet_rcv_fanout;
@@ -1612,6 +1730,7 @@
 	if (atomic_dec_and_test(&f->sk_ref)) {
 		list_del(&f->list);
 		dev_remove_pack(&f->prot_hook);
+		fanout_release_data(f);
 		kfree(f);
 	}
 	mutex_unlock(&fanout_mutex);
@@ -3531,6 +3650,13 @@
 
 		return fanout_add(sk, val & 0xffff, val >> 16);
 	}
+	case PACKET_FANOUT_DATA:
+	{
+		if (!po->fanout)
+			return -EINVAL;
+
+		return fanout_set_data(po, optval, optlen);
+	}
 	case PACKET_TX_HAS_OFF:
 	{
 		unsigned int val;
@@ -4043,7 +4169,7 @@
 	if (closing && (po->tp_version > TPACKET_V2)) {
 		/* Because we don't support block-based V3 on tx-ring */
 		if (!tx_ring)
-			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+			prb_shutdown_retire_blk_timer(po, rb_queue);
 	}
 	release_sock(sk);
 
diff --git a/net/packet/internal.h b/net/packet/internal.h
index e20b3e8..9ee4631 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -79,7 +79,10 @@
 	u16			id;
 	u8			type;
 	u8			flags;
-	atomic_t		rr_cur;
+	union {
+		atomic_t		rr_cur;
+		struct bpf_prog __rcu	*bpf_prog;
+	};
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 896834c..a2f28a6 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -438,6 +438,14 @@
 	.sendpage =	sock_no_sendpage,
 };
 
+static void rds_sock_destruct(struct sock *sk)
+{
+	struct rds_sock *rs = rds_sk_to_rs(sk);
+
+	WARN_ON((&rs->rs_item != rs->rs_item.next ||
+		 &rs->rs_item != rs->rs_item.prev));
+}
+
 static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
 {
 	struct rds_sock *rs;
@@ -445,6 +453,7 @@
 	sock_init_data(sock, sk);
 	sock->ops		= &rds_proto_ops;
 	sk->sk_protocol		= protocol;
+	sk->sk_destruct		= rds_sock_destruct;
 
 	rs = rds_sk_to_rs(sk);
 	spin_lock_init(&rs->rs_lock);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 4ebd29c..dd666fb 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -185,7 +185,8 @@
 		ret = 0;
 		goto out;
 	}
-	trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
+	trans = rds_trans_get_preferred(sock_net(sock->sk),
+					sin->sin_addr.s_addr);
 	if (!trans) {
 		ret = -EADDRNOTAVAIL;
 		rds_remove_bound(rs);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index da6da57..a50e652 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -117,7 +117,8 @@
  * For now they are not garbage collected once they're created.  They
  * are torn down as the module is removed, if ever.
  */
-static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+static struct rds_connection *__rds_conn_create(struct net *net,
+						__be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp,
 				       int is_outgoing)
 {
@@ -157,6 +158,7 @@
 	conn->c_faddr = faddr;
 	spin_lock_init(&conn->c_lock);
 	conn->c_next_tx_seq = 1;
+	rds_conn_net_set(conn, net);
 
 	init_waitqueue_head(&conn->c_waitq);
 	INIT_LIST_HEAD(&conn->c_send_queue);
@@ -174,7 +176,7 @@
 	 * can bind to the destination address then we'd rather the messages
 	 * flow through loopback rather than either transport.
 	 */
-	loop_trans = rds_trans_get_preferred(faddr);
+	loop_trans = rds_trans_get_preferred(net, faddr);
 	if (loop_trans) {
 		rds_trans_put(loop_trans);
 		conn->c_loopback = 1;
@@ -260,17 +262,19 @@
 	return conn;
 }
 
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+				       __be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp)
 {
-	return __rds_conn_create(laddr, faddr, trans, gfp, 0);
+	return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(rds_conn_create);
 
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+						__be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp)
 {
-	return __rds_conn_create(laddr, faddr, trans, gfp, 1);
+	return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
 }
 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
 
@@ -297,6 +301,8 @@
 
 		wait_event(conn->c_waitq,
 			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
+		wait_event(conn->c_waitq,
+			   !test_bit(RDS_RECV_REFILL, &conn->c_flags));
 
 		conn->c_trans->conn_shutdown(conn);
 		rds_conn_reset(conn);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ba2dffe..d020fad 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -317,7 +317,7 @@
  * allowed to influence which paths have priority.  We could call userspace
  * asserting this policy "routing".
  */
-static int rds_ib_laddr_check(__be32 addr)
+static int rds_ib_laddr_check(struct net *net, __be32 addr)
 {
 	int ret;
 	struct rdma_cm_id *cm_id;
@@ -366,6 +366,7 @@
 	rds_ib_sysctl_exit();
 	rds_ib_recv_exit();
 	rds_trans_unregister(&rds_ib_transport);
+	rds_ib_fmr_exit();
 }
 
 struct rds_transport rds_ib_transport = {
@@ -401,10 +402,14 @@
 
 	INIT_LIST_HEAD(&rds_ib_devices);
 
-	ret = ib_register_client(&rds_ib_client);
+	ret = rds_ib_fmr_init();
 	if (ret)
 		goto out;
 
+	ret = ib_register_client(&rds_ib_client);
+	if (ret)
+		goto out_fmr_exit;
+
 	ret = rds_ib_sysctl_init();
 	if (ret)
 		goto out_ibreg;
@@ -427,6 +432,8 @@
 	rds_ib_sysctl_exit();
 out_ibreg:
 	rds_ib_unregister_client();
+out_fmr_exit:
+	rds_ib_fmr_exit();
 out:
 	return ret;
 }
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 86d88ec..9fc95e3 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -313,6 +313,8 @@
 void rds_ib_sync_mr(void *trans_private, int dir);
 void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
+int rds_ib_fmr_init(void);
+void rds_ib_fmr_exit(void);
 
 /* ib_recv.c */
 int rds_ib_recv_init(void);
@@ -320,7 +322,7 @@
 int rds_ib_recv(struct rds_connection *conn);
 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
 void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
-void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
 void rds_ib_inc_free(struct rds_incoming *inc);
 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 0da2a45..d150bb4 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -135,7 +135,7 @@
 	rds_ib_recv_init_ring(ic);
 	/* Post receive buffers - as a side effect, this will update
 	 * the posted credit count. */
-	rds_ib_recv_refill(conn, 1);
+	rds_ib_recv_refill(conn, 1, GFP_KERNEL);
 
 	/* Tune RNR behavior */
 	rds_ib_tune_rnr(ic, &qp_attr);
@@ -448,8 +448,9 @@
 		 (unsigned long long)be64_to_cpu(lguid),
 		 (unsigned long long)be64_to_cpu(fguid));
 
-	conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
-			       GFP_KERNEL);
+	/* RDS/IB is not currently netns aware, thus init_net */
+	conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+			       &rds_ib_transport, GFP_KERNEL);
 	if (IS_ERR(conn)) {
 		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
 		conn = NULL;
@@ -639,6 +640,15 @@
 			   (atomic_read(&ic->i_signaled_sends) == 0));
 		tasklet_kill(&ic->i_recv_tasklet);
 
+		/* first destroy the ib state that generates callbacks */
+		if (ic->i_cm_id->qp)
+			rdma_destroy_qp(ic->i_cm_id);
+		if (ic->i_send_cq)
+			ib_destroy_cq(ic->i_send_cq);
+		if (ic->i_recv_cq)
+			ib_destroy_cq(ic->i_recv_cq);
+
+		/* then free the resources that ib callbacks use */
 		if (ic->i_send_hdrs)
 			ib_dma_free_coherent(dev,
 					   ic->i_send_ring.w_nr *
@@ -662,12 +672,6 @@
 		if (ic->i_recvs)
 			rds_ib_recv_clear_ring(ic);
 
-		if (ic->i_cm_id->qp)
-			rdma_destroy_qp(ic->i_cm_id);
-		if (ic->i_send_cq)
-			ib_destroy_cq(ic->i_send_cq);
-		if (ic->i_recv_cq)
-			ib_destroy_cq(ic->i_recv_cq);
 		rdma_destroy_id(ic->i_cm_id);
 
 		/*
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 657ba9f..251d1ce 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -83,6 +83,25 @@
 	struct ib_fmr_attr	fmr_attr;
 };
 
+struct workqueue_struct *rds_ib_fmr_wq;
+
+int rds_ib_fmr_init(void)
+{
+	rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
+	if (!rds_ib_fmr_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+/* By the time this is called all the IB devices should have been torn down and
+ * had their pools freed.  As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void rds_ib_fmr_exit(void)
+{
+	destroy_workqueue(rds_ib_fmr_wq);
+}
+
 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
@@ -151,12 +170,17 @@
 	struct rds_ib_device *rds_ibdev_old;
 
 	rds_ibdev_old = rds_ib_get_device(ipaddr);
-	if (rds_ibdev_old) {
+	if (!rds_ibdev_old)
+		return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+
+	if (rds_ibdev_old != rds_ibdev) {
 		rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
 		rds_ib_dev_put(rds_ibdev_old);
+		return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
 	}
+	rds_ib_dev_put(rds_ibdev_old);
 
-	return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+	return 0;
 }
 
 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -336,8 +360,6 @@
 		goto out_no_cigar;
 	}
 
-	memset(ibmr, 0, sizeof(*ibmr));
-
 	ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
 			(IB_ACCESS_LOCAL_WRITE |
 			 IB_ACCESS_REMOTE_READ |
@@ -485,7 +507,7 @@
 
 			/* FIXME we need a way to tell a r/w MR
 			 * from a r/o MR */
-			BUG_ON(irqs_disabled());
+			WARN_ON(!page->mapping && irqs_disabled());
 			set_page_dirty(page);
 			put_page(page);
 		}
@@ -523,11 +545,13 @@
 /*
  * given an llist of mrs, put them all into the list_head for more processing
  */
-static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
+static unsigned int llist_append_to_list(struct llist_head *llist,
+					 struct list_head *list)
 {
 	struct rds_ib_mr *ibmr;
 	struct llist_node *node;
 	struct llist_node *next;
+	unsigned int count = 0;
 
 	node = llist_del_all(llist);
 	while (node) {
@@ -535,7 +559,9 @@
 		ibmr = llist_entry(node, struct rds_ib_mr, llnode);
 		list_add_tail(&ibmr->unmap_list, list);
 		node = next;
+		count++;
 	}
+	return count;
 }
 
 /*
@@ -576,7 +602,7 @@
 	LIST_HEAD(unmap_list);
 	LIST_HEAD(fmr_list);
 	unsigned long unpinned = 0;
-	unsigned int nfreed = 0, ncleaned = 0, free_goal;
+	unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
 	int ret = 0;
 
 	rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
@@ -618,8 +644,8 @@
 	/* Get the list of all MRs to be dropped. Ordering matters -
 	 * we want to put drop_list ahead of free_list.
 	 */
-	llist_append_to_list(&pool->drop_list, &unmap_list);
-	llist_append_to_list(&pool->free_list, &unmap_list);
+	dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
+	dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
 	if (free_all)
 		llist_append_to_list(&pool->clean_list, &unmap_list);
 
@@ -647,7 +673,6 @@
 			kfree(ibmr);
 			nfreed++;
 		}
-		ncleaned++;
 	}
 
 	if (!list_empty(&unmap_list)) {
@@ -673,7 +698,7 @@
 	}
 
 	atomic_sub(unpinned, &pool->free_pinned);
-	atomic_sub(ncleaned, &pool->dirty_count);
+	atomic_sub(dirty_to_clean, &pool->dirty_count);
 	atomic_sub(nfreed, &pool->item_count);
 
 out:
@@ -710,16 +735,18 @@
 
 	/* If we've pinned too many pages, request a flush */
 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
-	    atomic_read(&pool->dirty_count) >= pool->max_items / 10)
-		schedule_delayed_work(&pool->flush_worker, 10);
+	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
+		queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
 
 	if (invalidate) {
 		if (likely(!in_interrupt())) {
 			rds_ib_flush_mr_pool(pool, 0, NULL);
 		} else {
 			/* We get here if the user created a MR marked
-			 * as use_once and invalidate at the same time. */
-			schedule_delayed_work(&pool->flush_worker, 10);
+			 * as use_once and invalidate at the same time.
+			 */
+			queue_delayed_work(rds_ib_fmr_wq,
+					   &pool->flush_worker, 10);
 		}
 	}
 
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cac5b45..6bbe620 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -297,7 +297,7 @@
 }
 
 static int rds_ib_recv_refill_one(struct rds_connection *conn,
-				  struct rds_ib_recv_work *recv, int prefill)
+				  struct rds_ib_recv_work *recv, gfp_t gfp)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct ib_sge *sge;
@@ -305,7 +305,7 @@
 	gfp_t slab_mask = GFP_NOWAIT;
 	gfp_t page_mask = GFP_NOWAIT;
 
-	if (prefill) {
+	if (gfp & __GFP_WAIT) {
 		slab_mask = GFP_KERNEL;
 		page_mask = GFP_HIGHUSER;
 	}
@@ -347,6 +347,24 @@
 	return ret;
 }
 
+static int acquire_refill(struct rds_connection *conn)
+{
+	return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
+}
+
+static void release_refill(struct rds_connection *conn)
+{
+	clear_bit(RDS_RECV_REFILL, &conn->c_flags);
+
+	/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+	 * hot path and finding waiters is very rare.  We don't want to walk
+	 * the system-wide hashed waitqueue buckets in the fast path only to
+	 * almost never find waiters.
+	 */
+	if (waitqueue_active(&conn->c_waitq))
+		wake_up_all(&conn->c_waitq);
+}
+
 /*
  * This tries to allocate and post unused work requests after making sure that
  * they have all the allocations they need to queue received fragments into
@@ -354,15 +372,23 @@
  *
  * -1 is returned if posting fails due to temporary resource exhaustion.
  */
-void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct rds_ib_recv_work *recv;
 	struct ib_recv_wr *failed_wr;
 	unsigned int posted = 0;
 	int ret = 0;
+	bool can_wait = !!(gfp & __GFP_WAIT);
 	u32 pos;
 
+	/* the goal here is to just make sure that someone, somewhere
+	 * is posting buffers.  If we can't get the refill lock,
+	 * let them do their thing
+	 */
+	if (!acquire_refill(conn))
+		return;
+
 	while ((prefill || rds_conn_up(conn)) &&
 	       rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
 		if (pos >= ic->i_recv_ring.w_nr) {
@@ -372,7 +398,7 @@
 		}
 
 		recv = &ic->i_recvs[pos];
-		ret = rds_ib_recv_refill_one(conn, recv, prefill);
+		ret = rds_ib_recv_refill_one(conn, recv, gfp);
 		if (ret) {
 			break;
 		}
@@ -402,6 +428,24 @@
 
 	if (ret)
 		rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
+
+	release_refill(conn);
+
+	/* if we're called from the softirq handler, we'll be GFP_NOWAIT.
+	 * in this case the ring being low is going to lead to more interrupts
+	 * and we can safely let the softirq code take care of it unless the
+	 * ring is completely empty.
+	 *
+	 * if we're called from krdsd, we'll be GFP_KERNEL.  In this case
+	 * we might have raced with the softirq code while we had the refill
+	 * lock held.  Use rds_ib_ring_low() instead of ring_empty to decide
+	 * if we should requeue.
+	 */
+	if (rds_conn_up(conn) &&
+	    ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
+	    rds_ib_ring_empty(&ic->i_recv_ring))) {
+		queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
+	}
 }
 
 /*
@@ -982,10 +1026,17 @@
 		}
 
 		/*
-		 * It's very important that we only free this ring entry if we've truly
-		 * freed the resources allocated to the entry.  The refilling path can
-		 * leak if we don't.
+		 * rds_ib_process_recv() doesn't always consume the frag, and
+		 * we might not have called it at all if the wc didn't indicate
+		 * success. We already unmapped the frag's pages, though, and
+		 * the following rds_ib_ring_free() call tells the refill path
+		 * that it will not find an allocated frag here. Make sure we
+		 * keep that promise by freeing a frag that's still on the ring.
 		 */
+		if (recv->r_frag) {
+			rds_ib_frag_free(ic, recv->r_frag);
+			recv->r_frag = NULL;
+		}
 		rds_ib_ring_free(&ic->i_recv_ring, 1);
 	}
 }
@@ -1016,7 +1067,7 @@
 		rds_ib_stats_inc(s_ib_rx_ring_empty);
 
 	if (rds_ib_ring_low(&ic->i_recv_ring))
-		rds_ib_recv_refill(conn, 0);
+		rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
 }
 
 int rds_ib_recv(struct rds_connection *conn)
@@ -1025,8 +1076,10 @@
 	int ret = 0;
 
 	rdsdebug("conn %p\n", conn);
-	if (rds_conn_up(conn))
+	if (rds_conn_up(conn)) {
 		rds_ib_attempt_ack(ic);
+		rds_ib_recv_refill(conn, 0, GFP_KERNEL);
+	}
 
 	return ret;
 }
@@ -1049,9 +1102,10 @@
 	rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
 					sizeof(struct rds_page_frag),
 					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!rds_ib_frag_slab)
+	if (!rds_ib_frag_slab) {
 		kmem_cache_destroy(rds_ib_incoming_slab);
-	else
+		rds_ib_incoming_slab = NULL;
+	} else
 		ret = 0;
 out:
 	return ret;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 5d0a704..c576ebe 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -709,6 +709,11 @@
 	if (scat == &rm->data.op_sg[rm->data.op_count]) {
 		prev->s_op = ic->i_data_op;
 		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
+		if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
+			ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
+			prev->s_wr.send_flags |= IB_SEND_SIGNALED;
+			nr_sig++;
+		}
 		ic->i_data_op = NULL;
 	}
 
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5899356..5d5a9d2 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -218,7 +218,7 @@
  * allowed to influence which paths have priority.  We could call userspace
  * asserting this policy "routing".
  */
-static int rds_iw_laddr_check(__be32 addr)
+static int rds_iw_laddr_check(struct net *net, __be32 addr)
 {
 	int ret;
 	struct rdma_cm_id *cm_id;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 8f486fa..a6553a6 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -398,8 +398,9 @@
 		 &dp->dp_saddr, &dp->dp_daddr,
 		 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
 
-	conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
-			       GFP_KERNEL);
+	/* RDS/IW is not currently netns aware, thus init_net */
+	conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+			       &rds_iw_transport, GFP_KERNEL);
 	if (IS_ERR(conn)) {
 		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
 		conn = NULL;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 40084d8..4c93bad 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -435,9 +435,10 @@
 
 	/* If the MR was marked as invalidate, this will
 	 * trigger an async flush. */
-	if (zot_me)
+	if (zot_me) {
 		rds_destroy_mr(mr);
-	rds_mr_put(mr);
+		rds_mr_put(mr);
+	}
 }
 
 void rds_rdma_free_op(struct rm_rdma_op *ro)
@@ -451,7 +452,7 @@
 		 * is the case for a RDMA_READ which copies from remote
 		 * to local memory */
 		if (!ro->op_write) {
-			BUG_ON(irqs_disabled());
+			WARN_ON(!page->mapping && irqs_disabled());
 			set_page_dirty(page);
 		}
 		put_page(page);
@@ -658,6 +659,8 @@
 		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
 		if (ret < 0)
 			goto out;
+		else
+			ret = 0;
 
 		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
 			 nr_bytes, nr, iov->bytes, iov->addr);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 2082408..b9b40af 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -34,6 +34,7 @@
 #include <rdma/rdma_cm.h>
 
 #include "rdma_transport.h"
+#include "ib.h"
 
 static struct rdma_cm_id *rds_rdma_listen_id;
 
@@ -82,8 +83,18 @@
 		break;
 
 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
-		/* XXX worry about racing with listen acceptance */
-		ret = trans->cm_initiate_connect(cm_id);
+		/* Connection could have been dropped so make sure the
+		 * cm_id is valid before proceeding
+		 */
+		if (conn) {
+			struct rds_ib_connection *ibic;
+
+			ibic = conn->c_transport_data;
+			if (ibic && ibic->i_cm_id == cm_id)
+				ret = trans->cm_initiate_connect(cm_id);
+			else
+				rds_conn_drop(conn);
+		}
 		break;
 
 	case RDMA_CM_EVENT_ESTABLISHED:
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 2260c1e4..afb4048 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@
 #define RDS_LL_SEND_FULL	0
 #define RDS_RECONNECT_PENDING	1
 #define RDS_IN_XMIT		2
+#define RDS_RECV_REFILL		3
 
 struct rds_connection {
 	struct hlist_node	c_hash_node;
@@ -128,8 +129,21 @@
 
 	/* Protocol version */
 	unsigned int		c_version;
+	possible_net_t		c_net;
 };
 
+static inline
+struct net *rds_conn_net(struct rds_connection *conn)
+{
+	return read_pnet(&conn->c_net);
+}
+
+static inline
+void rds_conn_net_set(struct rds_connection *conn, struct net *net)
+{
+	write_pnet(&conn->c_net, net);
+}
+
 #define RDS_FLAG_CONG_BITMAP	0x01
 #define RDS_FLAG_ACK_REQUIRED	0x02
 #define RDS_FLAG_RETRANSMITTED	0x04
@@ -417,7 +431,7 @@
 	unsigned int		t_prefer_loopback:1;
 	unsigned int		t_type;
 
-	int (*laddr_check)(__be32 addr);
+	int (*laddr_check)(struct net *net, __be32 addr);
 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
 	void (*conn_free)(void *data);
 	int (*conn_connect)(struct rds_connection *conn);
@@ -608,9 +622,11 @@
 /* conn.c */
 int rds_conn_init(void);
 void rds_conn_exit(void);
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+				       __be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+						__be32 laddr, __be32 faddr,
 			       struct rds_transport *trans, gfp_t gfp);
 void rds_conn_shutdown(struct rds_connection *conn);
 void rds_conn_destroy(struct rds_connection *conn);
@@ -795,7 +811,7 @@
 /* transport.c */
 int rds_trans_register(struct rds_transport *trans);
 void rds_trans_unregister(struct rds_transport *trans);
-struct rds_transport *rds_trans_get_preferred(__be32 addr);
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
 void rds_trans_put(struct rds_transport *trans);
 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
 				       unsigned int avail);
diff --git a/net/rds/send.c b/net/rds/send.c
index e9430f5..4df61a5 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -282,26 +282,34 @@
 		/* The transport either sends the whole rdma or none of it */
 		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 			rm->m_final_op = &rm->rdma;
+			/* The transport owns the mapped memory for now.
+			 * You can't unmap it while it's on the send queue
+			 */
+			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
-			if (ret)
+			if (ret) {
+				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
+				wake_up_interruptible(&rm->m_flush_wait);
 				break;
+			}
 			conn->c_xmit_rdma_sent = 1;
 
-			/* The transport owns the mapped memory for now.
-			 * You can't unmap it while it's on the send queue */
-			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 		}
 
 		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 			rm->m_final_op = &rm->atomic;
+			/* The transport owns the mapped memory for now.
+			 * You can't unmap it while it's on the send queue
+			 */
+			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
-			if (ret)
+			if (ret) {
+				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
+				wake_up_interruptible(&rm->m_flush_wait);
 				break;
+			}
 			conn->c_xmit_atomic_sent = 1;
 
-			/* The transport owns the mapped memory for now.
-			 * You can't unmap it while it's on the send queue */
-			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 		}
 
 		/*
@@ -411,7 +419,8 @@
 	 */
 	if (ret == 0) {
 		smp_mb();
-		if (!list_empty(&conn->c_send_queue) &&
+		if ((test_bit(0, &conn->c_map_queued) ||
+		     !list_empty(&conn->c_send_queue)) &&
 		    send_gen == conn->c_send_gen) {
 			rds_stats_inc(s_send_lock_queue_raced);
 			goto restart;
@@ -769,8 +778,22 @@
 	while (!list_empty(&list)) {
 		rm = list_entry(list.next, struct rds_message, m_sock_item);
 		list_del_init(&rm->m_sock_item);
-
 		rds_message_wait(rm);
+
+		/* just in case the code above skipped this message
+		 * because RDS_MSG_ON_CONN wasn't set, run it again here
+		 * taking m_rs_lock is the only thing that keeps us
+		 * from racing with ack processing.
+		 */
+		spin_lock_irqsave(&rm->m_rs_lock, flags);
+
+		spin_lock(&rs->rs_lock);
+		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
+		spin_unlock(&rs->rs_lock);
+
+		rm->m_rs = NULL;
+		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+
 		rds_message_put(rm);
 	}
 }
@@ -992,6 +1015,11 @@
 		goto out;
 	}
 
+	if (payload_len > rds_sk_sndbuf(rs)) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
 	/* size of rm including all sgs */
 	ret = rds_rm_size(msg, payload_len);
 	if (ret < 0)
@@ -1023,7 +1051,8 @@
 	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
 		conn = rs->rs_conn;
 	else {
-		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
+		conn = rds_conn_create_outgoing(sock_net(sock->sk),
+						rs->rs_bound_addr, daddr,
 					rs->rs_transport,
 					sock->sk->sk_allocation);
 		if (IS_ERR(conn)) {
@@ -1063,11 +1092,7 @@
 	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
 				  dport, &queued)) {
 		rds_stats_inc(s_send_queue_full);
-		/* XXX make sure this is reasonable */
-		if (payload_len > rds_sk_sndbuf(rs)) {
-			ret = -EMSGSIZE;
-			goto out;
-		}
+
 		if (nonblock) {
 			ret = -EAGAIN;
 			goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef..c42b60b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -35,6 +35,9 @@
 #include <linux/in.h>
 #include <linux/module.h>
 #include <net/tcp.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/tcp.h>
 
 #include "rds.h"
 #include "tcp.h"
@@ -189,9 +192,9 @@
 	spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
 }
 
-static int rds_tcp_laddr_check(__be32 addr)
+static int rds_tcp_laddr_check(struct net *net, __be32 addr)
 {
-	if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
+	if (inet_addr_type(net, addr) == RTN_LOCAL)
 		return 0;
 	return -EADDRNOTAVAIL;
 }
@@ -250,16 +253,7 @@
 	}
 }
 
-static void rds_tcp_exit(void)
-{
-	rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
-	rds_tcp_listen_stop();
-	rds_tcp_destroy_conns();
-	rds_trans_unregister(&rds_tcp_transport);
-	rds_tcp_recv_exit();
-	kmem_cache_destroy(rds_tcp_conn_slab);
-}
-module_exit(rds_tcp_exit);
+static void rds_tcp_exit(void);
 
 struct rds_transport rds_tcp_transport = {
 	.laddr_check		= rds_tcp_laddr_check,
@@ -281,6 +275,136 @@
 	.t_prefer_loopback	= 1,
 };
 
+static int rds_tcp_netid;
+
+/* per-network namespace private data for this module */
+struct rds_tcp_net {
+	struct socket *rds_tcp_listen_sock;
+	struct work_struct rds_tcp_accept_w;
+};
+
+static void rds_tcp_accept_worker(struct work_struct *work)
+{
+	struct rds_tcp_net *rtn = container_of(work,
+					       struct rds_tcp_net,
+					       rds_tcp_accept_w);
+
+	while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
+		cond_resched();
+}
+
+void rds_tcp_accept_work(struct sock *sk)
+{
+	struct net *net = sock_net(sk);
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	queue_work(rds_wq, &rtn->rds_tcp_accept_w);
+}
+
+static __net_init int rds_tcp_init_net(struct net *net)
+{
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
+	if (!rtn->rds_tcp_listen_sock) {
+		pr_warn("could not set up listen sock\n");
+		return -EAFNOSUPPORT;
+	}
+	INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
+	return 0;
+}
+
+static void __net_exit rds_tcp_exit_net(struct net *net)
+{
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	/* If rds_tcp_exit_net() is called as a result of netns deletion,
+	 * the rds_tcp_kill_sock() device notifier would already have cleaned
+	 * up the listen socket, thus there is no work to do in this function.
+	 *
+	 * If rds_tcp_exit_net() is called as a result of module unload,
+	 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
+	 * we do need to clean up the listen socket here.
+	 */
+	if (rtn->rds_tcp_listen_sock) {
+		rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+		rtn->rds_tcp_listen_sock = NULL;
+		flush_work(&rtn->rds_tcp_accept_w);
+	}
+}
+
+static struct pernet_operations rds_tcp_net_ops = {
+	.init = rds_tcp_init_net,
+	.exit = rds_tcp_exit_net,
+	.id = &rds_tcp_netid,
+	.size = sizeof(struct rds_tcp_net),
+};
+
+static void rds_tcp_kill_sock(struct net *net)
+{
+	struct rds_tcp_connection *tc, *_tc;
+	struct sock *sk;
+	LIST_HEAD(tmp_list);
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+	rtn->rds_tcp_listen_sock = NULL;
+	flush_work(&rtn->rds_tcp_accept_w);
+	spin_lock_irq(&rds_tcp_conn_lock);
+	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+		struct net *c_net = read_pnet(&tc->conn->c_net);
+
+		if (net != c_net || !tc->t_sock)
+			continue;
+		list_move_tail(&tc->t_tcp_node, &tmp_list);
+	}
+	spin_unlock_irq(&rds_tcp_conn_lock);
+	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+		sk = tc->t_sock->sk;
+		sk->sk_prot->disconnect(sk, 0);
+		tcp_done(sk);
+		if (tc->conn->c_passive)
+			rds_conn_destroy(tc->conn->c_passive);
+		rds_conn_destroy(tc->conn);
+	}
+}
+
+static int rds_tcp_dev_event(struct notifier_block *this,
+			     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	/* rds-tcp registers as a pernet subys, so the ->exit will only
+	 * get invoked after network acitivity has quiesced. We need to
+	 * clean up all sockets  to quiesce network activity, and use
+	 * the unregistration of the per-net loopback device as a trigger
+	 * to start that cleanup.
+	 */
+	if (event == NETDEV_UNREGISTER_FINAL &&
+	    dev->ifindex == LOOPBACK_IFINDEX)
+		rds_tcp_kill_sock(dev_net(dev));
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block rds_tcp_dev_notifier = {
+	.notifier_call        = rds_tcp_dev_event,
+	.priority = -10, /* must be called after other network notifiers */
+};
+
+static void rds_tcp_exit(void)
+{
+	rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+	unregister_pernet_subsys(&rds_tcp_net_ops);
+	if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+		pr_warn("could not unregister rds_tcp_dev_notifier\n");
+	rds_tcp_destroy_conns();
+	rds_trans_unregister(&rds_tcp_transport);
+	rds_tcp_recv_exit();
+	kmem_cache_destroy(rds_tcp_conn_slab);
+}
+module_exit(rds_tcp_exit);
+
 static int rds_tcp_init(void)
 {
 	int ret;
@@ -293,6 +417,16 @@
 		goto out;
 	}
 
+	ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+	if (ret) {
+		pr_warn("could not register rds_tcp_dev_notifier\n");
+		goto out;
+	}
+
+	ret = register_pernet_subsys(&rds_tcp_net_ops);
+	if (ret)
+		goto out_slab;
+
 	ret = rds_tcp_recv_init();
 	if (ret)
 		goto out_slab;
@@ -301,19 +435,14 @@
 	if (ret)
 		goto out_recv;
 
-	ret = rds_tcp_listen_init();
-	if (ret)
-		goto out_register;
-
 	rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
 
 	goto out;
 
-out_register:
-	rds_trans_unregister(&rds_tcp_transport);
 out_recv:
 	rds_tcp_recv_exit();
 out_slab:
+	unregister_pernet_subsys(&rds_tcp_net_ops);
 	kmem_cache_destroy(rds_tcp_conn_slab);
 out:
 	return ret;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 0dbdd37..64f873c 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -52,6 +52,7 @@
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
 extern struct rds_transport rds_tcp_transport;
+void rds_tcp_accept_work(struct sock *sk);
 
 /* tcp_connect.c */
 int rds_tcp_conn_connect(struct rds_connection *conn);
@@ -59,9 +60,11 @@
 void rds_tcp_state_change(struct sock *sk);
 
 /* tcp_listen.c */
-int rds_tcp_listen_init(void);
-void rds_tcp_listen_stop(void);
+struct socket *rds_tcp_listen_init(struct net *);
+void rds_tcp_listen_stop(struct socket *);
 void rds_tcp_listen_data_ready(struct sock *sk);
+int rds_tcp_accept_one(struct socket *sock);
+int rds_tcp_keepalive(struct socket *sock);
 
 /* tcp_recv.c */
 int rds_tcp_recv_init(void);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 973109c7..5cb1687 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -79,7 +79,8 @@
 	struct sockaddr_in src, dest;
 	int ret;
 
-	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+	ret = sock_create_kern(rds_conn_net(conn), PF_INET,
+			       SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (ret < 0)
 		goto out;
 
@@ -111,10 +112,12 @@
 	rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
 	if (ret == -EINPROGRESS)
 		ret = 0;
-	if (ret == 0)
+	if (ret == 0) {
+		rds_tcp_keepalive(sock);
 		sock = NULL;
-	else
+	} else {
 		rds_tcp_restore_callbacks(sock, conn->c_transport_data);
+	}
 
 out:
 	if (sock)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0da49e3..444d78d 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -38,14 +38,7 @@
 #include "rds.h"
 #include "tcp.h"
 
-/*
- * cheesy, but simple..
- */
-static void rds_tcp_accept_worker(struct work_struct *work);
-static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
-static struct socket *rds_tcp_listen_sock;
-
-static int rds_tcp_keepalive(struct socket *sock)
+int rds_tcp_keepalive(struct socket *sock)
 {
 	/* values below based on xs_udp_default_timeout */
 	int keepidle = 5; /* send a probe 'keepidle' secs after last data */
@@ -77,7 +70,7 @@
 	return ret;
 }
 
-static int rds_tcp_accept_one(struct socket *sock)
+int rds_tcp_accept_one(struct socket *sock)
 {
 	struct socket *new_sock = NULL;
 	struct rds_connection *conn;
@@ -85,8 +78,9 @@
 	struct inet_sock *inet;
 	struct rds_tcp_connection *rs_tcp;
 
-	ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
-			       sock->sk->sk_protocol, &new_sock);
+	ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+			       sock->sk->sk_type, sock->sk->sk_protocol,
+			       &new_sock);
 	if (ret)
 		goto out;
 
@@ -108,7 +102,8 @@
 		 &inet->inet_saddr, ntohs(inet->inet_sport),
 		 &inet->inet_daddr, ntohs(inet->inet_dport));
 
-	conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
+	conn = rds_conn_create(sock_net(sock->sk),
+			       inet->inet_saddr, inet->inet_daddr,
 			       &rds_tcp_transport, GFP_KERNEL);
 	if (IS_ERR(conn)) {
 		ret = PTR_ERR(conn);
@@ -148,12 +143,6 @@
 	return ret;
 }
 
-static void rds_tcp_accept_worker(struct work_struct *work)
-{
-	while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
-		cond_resched();
-}
-
 void rds_tcp_listen_data_ready(struct sock *sk)
 {
 	void (*ready)(struct sock *sk);
@@ -174,20 +163,20 @@
 	 * socket
 	 */
 	if (sk->sk_state == TCP_LISTEN)
-		queue_work(rds_wq, &rds_tcp_listen_work);
+		rds_tcp_accept_work(sk);
 
 out:
 	read_unlock(&sk->sk_callback_lock);
 	ready(sk);
 }
 
-int rds_tcp_listen_init(void)
+struct socket *rds_tcp_listen_init(struct net *net)
 {
 	struct sockaddr_in sin;
 	struct socket *sock = NULL;
 	int ret;
 
-	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+	ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (ret < 0)
 		goto out;
 
@@ -211,17 +200,15 @@
 	if (ret < 0)
 		goto out;
 
-	rds_tcp_listen_sock = sock;
-	sock = NULL;
+	return sock;
 out:
 	if (sock)
 		sock_release(sock);
-	return ret;
+	return NULL;
 }
 
-void rds_tcp_listen_stop(void)
+void rds_tcp_listen_stop(struct socket *sock)
 {
-	struct socket *sock = rds_tcp_listen_sock;
 	struct sock *sk;
 
 	if (!sock)
@@ -242,5 +229,4 @@
 	/* wait for accepts to stop and close the socket */
 	flush_workqueue(rds_wq);
 	sock_release(sock);
-	rds_tcp_listen_sock = NULL;
 }
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 83498e1..f3afd1d 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -77,7 +77,7 @@
 		module_put(trans->t_owner);
 }
 
-struct rds_transport *rds_trans_get_preferred(__be32 addr)
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr)
 {
 	struct rds_transport *ret = NULL;
 	struct rds_transport *trans;
@@ -90,7 +90,7 @@
 	for (i = 0; i < RDS_TRANS_COUNT; i++) {
 		trans = transports[i];
 
-		if (trans && (trans->laddr_check(addr) == 0) &&
+		if (trans && (trans->laddr_check(net, addr) == 0) &&
 		    (!trans->t_owner || try_module_get(trans->t_owner))) {
 			ret = trans;
 			break;
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 4c10e7e..598d374 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -36,7 +36,8 @@
 
 config RFKILL_GPIO
 	tristate "GPIO RFKILL driver"
-	depends on RFKILL && GPIOLIB
+	depends on RFKILL
+	depends on GPIOLIB || COMPILE_TEST
 	default n
 	help
 	  If you say yes here you get support of a generic gpio RFKILL
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index d5d58d9..9312722 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -164,7 +164,6 @@
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id rfkill_acpi_match[] = {
 	{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
-	{ "BCM2E39", RFKILL_TYPE_BLUETOOTH },
 	{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
 	{ "BCM2E40", RFKILL_TYPE_BLUETOOTH },
 	{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 43ec926..06e7c4a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -27,7 +27,16 @@
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-void tcf_hash_destroy(struct tc_action *a)
+static void free_tcf(struct rcu_head *head)
+{
+	struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+	free_percpu(p->cpu_bstats);
+	free_percpu(p->cpu_qstats);
+	kfree(p);
+}
+
+static void tcf_hash_destroy(struct tc_action *a)
 {
 	struct tcf_common *p = a->priv;
 	struct tcf_hashinfo *hinfo = a->ops->hinfo;
@@ -41,9 +50,8 @@
 	 * gen_estimator est_timer() might access p->tcfc_lock
 	 * or bstats, wait a RCU grace period before freeing p
 	 */
-	kfree_rcu(p, tcfc_rcu);
+	call_rcu(&p->tcfc_rcu, free_tcf);
 }
-EXPORT_SYMBOL(tcf_hash_destroy);
 
 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
 {
@@ -231,15 +239,16 @@
 	if (est)
 		gen_kill_estimator(&pc->tcfc_bstats,
 				   &pc->tcfc_rate_est);
-	kfree_rcu(pc, tcfc_rcu);
+	call_rcu(&pc->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_cleanup);
 
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-		    int size, int bind)
+		    int size, int bind, bool cpustats)
 {
 	struct tcf_hashinfo *hinfo = a->ops->hinfo;
 	struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+	int err = -ENOMEM;
 
 	if (unlikely(!p))
 		return -ENOMEM;
@@ -247,18 +256,32 @@
 	if (bind)
 		p->tcfc_bindcnt = 1;
 
+	if (cpustats) {
+		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+		if (!p->cpu_bstats) {
+err1:
+			kfree(p);
+			return err;
+		}
+		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+		if (!p->cpu_qstats) {
+err2:
+			free_percpu(p->cpu_bstats);
+			goto err1;
+		}
+	}
 	spin_lock_init(&p->tcfc_lock);
 	INIT_HLIST_NODE(&p->tcfc_head);
 	p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
 	p->tcfc_tm.install = jiffies;
 	p->tcfc_tm.lastuse = jiffies;
 	if (est) {
-		int err = gen_new_estimator(&p->tcfc_bstats, NULL,
-					    &p->tcfc_rate_est,
-					    &p->tcfc_lock, est);
+		err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+					&p->tcfc_rate_est,
+					&p->tcfc_lock, est);
 		if (err) {
-			kfree(p);
-			return err;
+			free_percpu(p->cpu_qstats);
+			goto err2;
 		}
 	}
 
@@ -616,10 +639,10 @@
 	if (err < 0)
 		goto errout;
 
-	if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+	if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
 	    gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
 				     &p->tcfc_rate_est) < 0 ||
-	    gnet_stats_copy_queue(&d, NULL,
+	    gnet_stats_copy_queue(&d, p->cpu_qstats,
 				  &p->tcfc_qstats,
 				  p->tcfc_qstats.qlen) < 0)
 		goto errout;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d0edeb7..559bfa0 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -37,25 +37,24 @@
 		   struct tcf_result *res)
 {
 	struct tcf_bpf *prog = act->priv;
+	struct bpf_prog *filter;
 	int action, filter_res;
 	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
 
 	if (unlikely(!skb_mac_header_was_set(skb)))
 		return TC_ACT_UNSPEC;
 
-	spin_lock(&prog->tcf_lock);
+	tcf_lastuse_update(&prog->tcf_tm);
+	bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
 
-	prog->tcf_tm.lastuse = jiffies;
-	bstats_update(&prog->tcf_bstats, skb);
-
-	/* Needed here for accessing maps. */
 	rcu_read_lock();
+	filter = rcu_dereference(prog->filter);
 	if (at_ingress) {
 		__skb_push(skb, skb->mac_len);
-		filter_res = BPF_PROG_RUN(prog->filter, skb);
+		filter_res = BPF_PROG_RUN(filter, skb);
 		__skb_pull(skb, skb->mac_len);
 	} else {
-		filter_res = BPF_PROG_RUN(prog->filter, skb);
+		filter_res = BPF_PROG_RUN(filter, skb);
 	}
 	rcu_read_unlock();
 
@@ -77,7 +76,7 @@
 		break;
 	case TC_ACT_SHOT:
 		action = filter_res;
-		prog->tcf_qstats.drops++;
+		qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
 		break;
 	case TC_ACT_UNSPEC:
 		action = prog->tcf_action;
@@ -87,7 +86,6 @@
 		break;
 	}
 
-	spin_unlock(&prog->tcf_lock);
 	return action;
 }
 
@@ -263,7 +261,10 @@
 				  struct tcf_bpf_cfg *cfg)
 {
 	cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
-	cfg->filter = prog->filter;
+	/* updates to prog->filter are prevented, since it's called either
+	 * with rtnl lock or during final cleanup in rcu callback
+	 */
+	cfg->filter = rcu_dereference_protected(prog->filter, 1);
 
 	cfg->bpf_ops = prog->bpf_ops;
 	cfg->bpf_name = prog->bpf_name;
@@ -278,7 +279,7 @@
 	struct tc_act_bpf *parm;
 	struct tcf_bpf *prog;
 	bool is_bpf, is_ebpf;
-	int ret;
+	int ret, res = 0;
 
 	if (!nla)
 		return -EINVAL;
@@ -287,45 +288,47 @@
 	if (ret < 0)
 		return ret;
 
-	is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
-	is_ebpf = tb[TCA_ACT_BPF_FD];
-
-	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
-	    !tb[TCA_ACT_BPF_PARMS])
+	if (!tb[TCA_ACT_BPF_PARMS])
 		return -EINVAL;
 
 	parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
 
+	if (!tcf_hash_check(parm->index, act, bind)) {
+		ret = tcf_hash_create(parm->index, est, act,
+				      sizeof(*prog), bind, true);
+		if (ret < 0)
+			return ret;
+
+		res = ACT_P_CREATED;
+	} else {
+		/* Don't override defaults. */
+		if (bind)
+			return 0;
+
+		tcf_hash_release(act, bind);
+		if (!replace)
+			return -EEXIST;
+	}
+
+	is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+	is_ebpf = tb[TCA_ACT_BPF_FD];
+
+	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	memset(&cfg, 0, sizeof(cfg));
 
 	ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
 		       tcf_bpf_init_from_efd(tb, &cfg);
 	if (ret < 0)
-		return ret;
-
-	if (!tcf_hash_check(parm->index, act, bind)) {
-		ret = tcf_hash_create(parm->index, est, act,
-				      sizeof(*prog), bind);
-		if (ret < 0)
-			goto destroy_fp;
-
-		ret = ACT_P_CREATED;
-	} else {
-		/* Don't override defaults. */
-		if (bind)
-			goto destroy_fp;
-
-		tcf_hash_release(act, bind);
-		if (!replace) {
-			ret = -EEXIST;
-			goto destroy_fp;
-		}
-	}
+		goto out;
 
 	prog = to_bpf(act);
-	spin_lock_bh(&prog->tcf_lock);
+	ASSERT_RTNL();
 
-	if (ret != ACT_P_CREATED)
+	if (res != ACT_P_CREATED)
 		tcf_bpf_prog_fill_cfg(prog, &old);
 
 	prog->bpf_ops = cfg.bpf_ops;
@@ -337,19 +340,21 @@
 		prog->bpf_fd = cfg.bpf_fd;
 
 	prog->tcf_action = parm->action;
-	prog->filter = cfg.filter;
+	rcu_assign_pointer(prog->filter, cfg.filter);
 
-	spin_unlock_bh(&prog->tcf_lock);
-
-	if (ret == ACT_P_CREATED)
+	if (res == ACT_P_CREATED) {
 		tcf_hash_insert(act);
-	else
+	} else {
+		/* make sure the program being replaced is no longer executing */
+		synchronize_rcu();
 		tcf_bpf_cfg_cleanup(&old);
+	}
 
-	return ret;
+	return res;
+out:
+	if (res == ACT_P_CREATED)
+		tcf_hash_cleanup(act, est);
 
-destroy_fp:
-	tcf_bpf_cfg_cleanup(&cfg);
 	return ret;
 }
 
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 295d14b..5019a47 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -37,6 +37,7 @@
 	struct nf_conntrack_tuple tuple;
 	enum ip_conntrack_info ctinfo;
 	struct tcf_connmark_info *ca = a->priv;
+	struct nf_conntrack_zone zone;
 	struct nf_conn *c;
 	int proto;
 
@@ -70,7 +71,10 @@
 			       proto, &tuple))
 		goto out;
 
-	thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+	zone.id = ca->zone;
+	zone.dir = NF_CT_DEFAULT_ZONE_DIR;
+
+	thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
 	if (!thash)
 		goto out;
 
@@ -108,7 +112,8 @@
 	parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 4cd5cf1..b07c535 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -62,7 +62,8 @@
 	parm = nla_data(tb[TCA_CSUM_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+				      bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 7fffc22..5c1b051 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -28,14 +28,18 @@
 #ifdef CONFIG_GACT_PROB
 static int gact_net_rand(struct tcf_gact *gact)
 {
-	if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
+	smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+	if (prandom_u32() % gact->tcfg_pval)
 		return gact->tcf_action;
 	return gact->tcfg_paction;
 }
 
 static int gact_determ(struct tcf_gact *gact)
 {
-	if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
+	u32 pack = atomic_inc_return(&gact->packets);
+
+	smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+	if (pack % gact->tcfg_pval)
 		return gact->tcf_action;
 	return gact->tcfg_paction;
 }
@@ -85,7 +89,8 @@
 #endif
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+				      bind, true);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -99,16 +104,19 @@
 
 	gact = to_gact(a);
 
-	spin_lock_bh(&gact->tcf_lock);
+	ASSERT_RTNL();
 	gact->tcf_action = parm->action;
 #ifdef CONFIG_GACT_PROB
 	if (p_parm) {
 		gact->tcfg_paction = p_parm->paction;
-		gact->tcfg_pval    = p_parm->pval;
+		gact->tcfg_pval    = max_t(u16, 1, p_parm->pval);
+		/* Make sure tcfg_pval is written before tcfg_ptype
+		 * coupled with smp_rmb() in gact_net_rand() & gact_determ()
+		 */
+		smp_wmb();
 		gact->tcfg_ptype   = p_parm->ptype;
 	}
 #endif
-	spin_unlock_bh(&gact->tcf_lock);
 	if (ret == ACT_P_CREATED)
 		tcf_hash_insert(a);
 	return ret;
@@ -118,23 +126,21 @@
 		    struct tcf_result *res)
 {
 	struct tcf_gact *gact = a->priv;
-	int action = TC_ACT_SHOT;
+	int action = READ_ONCE(gact->tcf_action);
 
-	spin_lock(&gact->tcf_lock);
 #ifdef CONFIG_GACT_PROB
-	if (gact->tcfg_ptype)
-		action = gact_rand[gact->tcfg_ptype](gact);
-	else
-		action = gact->tcf_action;
-#else
-	action = gact->tcf_action;
+	{
+	u32 ptype = READ_ONCE(gact->tcfg_ptype);
+
+	if (ptype)
+		action = gact_rand[ptype](gact);
+	}
 #endif
-	gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	gact->tcf_bstats.packets++;
+	bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
 	if (action == TC_ACT_SHOT)
-		gact->tcf_qstats.drops++;
-	gact->tcf_tm.lastuse = jiffies;
-	spin_unlock(&gact->tcf_lock);
+		qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+
+	tcf_lastuse_update(&gact->tcf_tm);
 
 	return action;
 }
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index cbc8dd7..99c9cc1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -114,7 +114,7 @@
 		index = nla_get_u32(tb[TCA_IPT_INDEX]);
 
 	if (!tcf_hash_check(index, a, bind) ) {
-		ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+		ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 2685450..2d1be4a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -35,9 +35,11 @@
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
 	struct tcf_mirred *m = to_mirred(a);
+	struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+
 	list_del(&m->tcfm_list);
-	if (m->tcfm_dev)
-		dev_put(m->tcfm_dev);
+	if (dev)
+		dev_put(dev);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -93,7 +95,8 @@
 	if (!tcf_hash_check(parm->index, a, bind)) {
 		if (dev == NULL)
 			return -EINVAL;
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
+				      bind, true);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -107,18 +110,18 @@
 	}
 	m = to_mirred(a);
 
-	spin_lock_bh(&m->tcf_lock);
+	ASSERT_RTNL();
 	m->tcf_action = parm->action;
 	m->tcfm_eaction = parm->eaction;
 	if (dev != NULL) {
 		m->tcfm_ifindex = parm->ifindex;
 		if (ret != ACT_P_CREATED)
-			dev_put(m->tcfm_dev);
+			dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
 		dev_hold(dev);
-		m->tcfm_dev = dev;
+		rcu_assign_pointer(m->tcfm_dev, dev);
 		m->tcfm_ok_push = ok_push;
 	}
-	spin_unlock_bh(&m->tcf_lock);
+
 	if (ret == ACT_P_CREATED) {
 		list_add(&m->tcfm_list, &mirred_list);
 		tcf_hash_insert(a);
@@ -133,20 +136,22 @@
 	struct tcf_mirred *m = a->priv;
 	struct net_device *dev;
 	struct sk_buff *skb2;
+	int retval, err;
 	u32 at;
-	int retval, err = 1;
 
-	spin_lock(&m->tcf_lock);
-	m->tcf_tm.lastuse = jiffies;
-	bstats_update(&m->tcf_bstats, skb);
+	tcf_lastuse_update(&m->tcf_tm);
 
-	dev = m->tcfm_dev;
-	if (!dev) {
-		printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+	bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+
+	rcu_read_lock();
+	retval = READ_ONCE(m->tcf_action);
+	dev = rcu_dereference(m->tcfm_dev);
+	if (unlikely(!dev)) {
+		pr_notice_once("tc mirred: target device is gone\n");
 		goto out;
 	}
 
-	if (!(dev->flags & IFF_UP)) {
+	if (unlikely(!(dev->flags & IFF_UP))) {
 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
 				       dev->name);
 		goto out;
@@ -154,7 +159,7 @@
 
 	at = G_TC_AT(skb->tc_verd);
 	skb2 = skb_clone(skb, GFP_ATOMIC);
-	if (skb2 == NULL)
+	if (!skb2)
 		goto out;
 
 	if (!(at & AT_EGRESS)) {
@@ -170,16 +175,13 @@
 	skb2->dev = dev;
 	err = dev_queue_xmit(skb2);
 
-out:
 	if (err) {
-		m->tcf_qstats.overlimits++;
+out:
+		qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
 		if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
 			retval = TC_ACT_SHOT;
-		else
-			retval = m->tcf_action;
-	} else
-		retval = m->tcf_action;
-	spin_unlock(&m->tcf_lock);
+	}
+	rcu_read_unlock();
 
 	return retval;
 }
@@ -218,14 +220,16 @@
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct tcf_mirred *m;
 
+	ASSERT_RTNL();
 	if (event == NETDEV_UNREGISTER)
 		list_for_each_entry(m, &mirred_list, tcfm_list) {
-			spin_lock_bh(&m->tcf_lock);
-			if (m->tcfm_dev == dev) {
+			if (rcu_access_pointer(m->tcfm_dev) == dev) {
 				dev_put(dev);
-				m->tcfm_dev = NULL;
+				/* Note : no rcu grace period necessary, as
+				 * net_device are already rcu protected.
+				 */
+				RCU_INIT_POINTER(m->tcfm_dev, NULL);
 			}
-			spin_unlock_bh(&m->tcf_lock);
 		}
 
 	return NOTIFY_DONE;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 270a030..b7c4ead 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -55,7 +55,8 @@
 	parm = nla_data(tb[TCA_NAT_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+				      bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -161,7 +162,8 @@
 			goto drop;
 
 		tcph = (void *)(skb_network_header(skb) + ihl);
-		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
+		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
+					 true);
 		break;
 	}
 	case IPPROTO_UDP:
@@ -177,7 +179,7 @@
 		udph = (void *)(skb_network_header(skb) + ihl);
 		if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 			inet_proto_csum_replace4(&udph->check, skb, addr,
-						 new_addr, 1);
+						 new_addr, true);
 			if (!udph->check)
 				udph->check = CSUM_MANGLED_0;
 		}
@@ -230,7 +232,7 @@
 			iph->saddr = new_addr;
 
 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-					 0);
+					 false);
 		break;
 	}
 	default:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ff8b466..e38a770 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -57,7 +57,8 @@
 	if (!tcf_hash_check(parm->index, a, bind)) {
 		if (!parm->nkeys)
 			return -EINVAL;
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+				      bind, false);
 		if (ret)
 			return ret;
 		p = to_pedit(a);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 6a8d948..d6b708d 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -103,7 +103,8 @@
 	defdata = nla_data(tb[TCA_DEF_DATA]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index fcfeeaf..6751b5f 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,7 +99,8 @@
 	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index d735ecf..796785e 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,7 +116,8 @@
 	action = parm->v_action;
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index ea611b21..4c85bd3 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -30,35 +30,16 @@
 			       struct tcf_result *res)
 {
 	struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
-	u32 classid;
-
-	classid = task_cls_state(current)->classid;
-
-	/*
-	 * Due to the nature of the classifier it is required to ignore all
-	 * packets originating from softirq context as accessing `current'
-	 * would lead to false results.
-	 *
-	 * This test assumes that all callers of dev_queue_xmit() explicitely
-	 * disable bh. Knowing this, it is possible to detect softirq based
-	 * calls by looking at the number of nested bh disable calls because
-	 * softirqs always disables bh.
-	 */
-	if (in_serving_softirq()) {
-		/* If there is an sk_classid we'll use that. */
-		if (!skb->sk)
-			return -1;
-		classid = skb->sk->sk_classid;
-	}
+	u32 classid = task_get_classid(skb);
 
 	if (!classid)
 		return -1;
-
 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
 		return -1;
 
 	res->classid = classid;
 	res->class = 0;
+
 	return tcf_exts_exec(skb, &head->exts, res);
 }
 
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index bb2a0f5..536838b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -301,7 +301,7 @@
 
 		keymask = f->keymask;
 		if (keymask & FLOW_KEYS_NEEDED)
-			skb_flow_dissect_flow_keys(skb, &flow_keys);
+			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
 
 		for (n = 0; n < f->nkeys; n++) {
 			key = ffs(keymask) - 1;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 2f3d03f..5769294 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -129,7 +129,7 @@
 	 * so do it rather here.
 	 */
 	skb_key.basic.n_proto = skb->protocol;
-	skb_flow_dissect(skb, &head->dissector, &skb_key);
+	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
 
 	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
 
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 02fa827..f9c9fc0 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -283,12 +283,22 @@
 	return -ENOBUFS;
 }
 
-static void
-rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
+static void rsvp_delete_filter_rcu(struct rcu_head *head)
+{
+	struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+
+	tcf_exts_destroy(&f->exts);
+	kfree(f);
+}
+
+static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 {
 	tcf_unbind_filter(tp, &f->res);
-	tcf_exts_destroy(&f->exts);
-	kfree_rcu(f, rcu);
+	/* all classifiers are required to call tcf_exts_destroy() after rcu
+	 * grace period, since converted-to-rcu actions are relying on that
+	 * in cleanup() callback
+	 */
+	call_rcu(&f->rcu, rsvp_delete_filter_rcu);
 }
 
 static bool rsvp_destroy(struct tcf_proto *tp, bool force)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index a557dba..944c8ff 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -27,6 +27,7 @@
 struct tcindex_filter_result {
 	struct tcf_exts		exts;
 	struct tcf_result	res;
+	struct rcu_head		rcu;
 };
 
 struct tcindex_filter {
@@ -133,8 +134,23 @@
 	return 0;
 }
 
-static int
-tcindex_delete(struct tcf_proto *tp, unsigned long arg)
+static void tcindex_destroy_rexts(struct rcu_head *head)
+{
+	struct tcindex_filter_result *r;
+
+	r = container_of(head, struct tcindex_filter_result, rcu);
+	tcf_exts_destroy(&r->exts);
+}
+
+static void tcindex_destroy_fexts(struct rcu_head *head)
+{
+	struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
+
+	tcf_exts_destroy(&f->result.exts);
+	kfree(f);
+}
+
+static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
 {
 	struct tcindex_data *p = rtnl_dereference(tp->root);
 	struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
@@ -162,9 +178,14 @@
 		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
 	}
 	tcf_unbind_filter(tp, &r->res);
-	tcf_exts_destroy(&r->exts);
+	/* all classifiers are required to call tcf_exts_destroy() after rcu
+	 * grace period, since converted-to-rcu actions are relying on that
+	 * in cleanup() callback
+	 */
 	if (f)
-		kfree_rcu(f, rcu);
+		call_rcu(&f->rcu, tcindex_destroy_fexts);
+	else
+		call_rcu(&r->rcu, tcindex_destroy_rexts);
 	return 0;
 }
 
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f06aa01..f43c8f3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1806,51 +1806,45 @@
  * to this qdisc, (optionally) tests for protocol and asks
  * specific classifiers.
  */
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
-		       struct tcf_result *res)
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+		struct tcf_result *res, bool compat_mode)
 {
 	__be16 protocol = tc_skb_protocol(skb);
-	int err;
+#ifdef CONFIG_NET_CLS_ACT
+	const struct tcf_proto *old_tp = tp;
+	int limit = 0;
 
+reclassify:
+#endif
 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
+		int err;
+
 		if (tp->protocol != protocol &&
 		    tp->protocol != htons(ETH_P_ALL))
 			continue;
-		err = tp->classify(skb, tp, res);
 
+		err = tp->classify(skb, tp, res);
+#ifdef CONFIG_NET_CLS_ACT
+		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
+			goto reset;
+#endif
 		if (err >= 0)
 			return err;
 	}
+
 	return -1;
-}
-EXPORT_SYMBOL(tc_classify_compat);
-
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
-		struct tcf_result *res)
-{
-	int err = 0;
 #ifdef CONFIG_NET_CLS_ACT
-	const struct tcf_proto *otp = tp;
-	int limit = 0;
-reclassify:
-#endif
-
-	err = tc_classify_compat(skb, tp, res);
-#ifdef CONFIG_NET_CLS_ACT
-	if (err == TC_ACT_RECLASSIFY) {
-		tp = otp;
-
-		if (unlikely(limit++ >= MAX_REC_LOOP)) {
-			net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
-					       tp->q->ops->id,
-					       tp->prio & 0xffff,
-					       ntohs(tp->protocol));
-			return TC_ACT_SHOT;
-		}
-		goto reclassify;
+reset:
+	if (unlikely(limit++ >= MAX_REC_LOOP)) {
+		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
+				       tp->q->ops->id, tp->prio & 0xffff,
+				       ntohs(tp->protocol));
+		return TC_ACT_SHOT;
 	}
+
+	tp = old_tp;
+	goto reclassify;
 #endif
-	return err;
 }
 EXPORT_SYMBOL(tc_classify);
 
@@ -1947,6 +1941,7 @@
 	register_qdisc(&bfifo_qdisc_ops);
 	register_qdisc(&pfifo_head_drop_qdisc_ops);
 	register_qdisc(&mq_qdisc_ops);
+	register_qdisc(&noqueue_qdisc_ops);
 
 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e3e2cc5..1911af3 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -375,7 +375,7 @@
 		list_for_each_entry(flow, &p->flows, list) {
 			fl = rcu_dereference_bh(flow->filter_list);
 			if (fl) {
-				result = tc_classify_compat(skb, fl, &res);
+				result = tc_classify(skb, fl, &res, true);
 				if (result < 0)
 					continue;
 				flow = (struct atm_flow_data *)res.class;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index beeb75f..c538d9e 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -240,7 +240,7 @@
 		/*
 		 * Step 2+n. Apply classifier.
 		 */
-		result = tc_classify_compat(skb, fl, &res);
+		result = tc_classify(skb, fl, &res, true);
 		if (!fl || result < 0)
 			goto fallback;
 
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 6a783af..02bfd3d 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -170,13 +170,13 @@
 
 	if (!choke_skb_cb(skb1)->keys_valid) {
 		choke_skb_cb(skb1)->keys_valid = 1;
-		skb_flow_dissect_flow_keys(skb1, &temp);
+		skb_flow_dissect_flow_keys(skb1, &temp, 0);
 		make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
 	}
 
 	if (!choke_skb_cb(skb2)->keys_valid) {
 		choke_skb_cb(skb2)->keys_valid = 1;
-		skb_flow_dissect_flow_keys(skb2, &temp);
+		skb_flow_dissect_flow_keys(skb2, &temp, 0);
 		make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
 	}
 
@@ -201,7 +201,7 @@
 	int result;
 
 	fl = rcu_dereference_bh(q->filter_list);
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 3387060..f26bdea 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -331,7 +331,7 @@
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	fl = rcu_dereference_bh(q->filter_list);
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 66700a6..c4d45fd 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -230,7 +230,7 @@
 	else {
 		struct tcf_result res;
 		struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
-		int result = tc_classify(skb, fl, &res);
+		int result = tc_classify(skb, fl, &res, false);
 
 		pr_debug("result %d class 0x%04x\n", result, res.classid);
 
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2e2398c..2177eac 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -54,7 +54,7 @@
 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
 	if (opt == NULL) {
-		u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
+		u32 limit = qdisc_dev(sch)->tx_queue_len;
 
 		if (is_bfifo)
 			limit *= psched_mtu(qdisc_dev(sch));
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a9ba030..4c834e9 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -92,7 +92,7 @@
 		return fq_codel_hash(q, skb) + 1;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	result = tc_classify(skb, filter, &res);
+	result = tc_classify(skb, filter, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6efca30..cb5d4ad 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -416,33 +416,25 @@
 };
 EXPORT_SYMBOL(noop_qdisc);
 
-static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+{
+	/* register_qdisc() assigns a default of noop_enqueue if unset,
+	 * but __dev_queue_xmit() treats noqueue only as such
+	 * if this is NULL - so clear it here. */
+	qdisc->enqueue = NULL;
+	return 0;
+}
+
+struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 	.id		=	"noqueue",
 	.priv_size	=	0,
+	.init		=	noqueue_init,
 	.enqueue	=	noop_enqueue,
 	.dequeue	=	noop_dequeue,
 	.peek		=	noop_dequeue,
 	.owner		=	THIS_MODULE,
 };
 
-static struct Qdisc noqueue_qdisc;
-static struct netdev_queue noqueue_netdev_queue = {
-	.qdisc		=	&noqueue_qdisc,
-	.qdisc_sleeping	=	&noqueue_qdisc,
-};
-
-static struct Qdisc noqueue_qdisc = {
-	.enqueue	=	NULL,
-	.dequeue	=	noop_dequeue,
-	.flags		=	TCQ_F_BUILTIN,
-	.ops		=	&noqueue_qdisc_ops,
-	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list),
-	.q.lock		=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
-	.dev_queue	=	&noqueue_netdev_queue,
-	.busylock	=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
-};
-
-
 static const u8 prio2band[TC_PRIO_MAX + 1] = {
 	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
 };
@@ -733,18 +725,19 @@
 				     struct netdev_queue *dev_queue,
 				     void *_unused)
 {
-	struct Qdisc *qdisc = &noqueue_qdisc;
+	struct Qdisc *qdisc;
+	const struct Qdisc_ops *ops = default_qdisc_ops;
 
-	if (dev->tx_queue_len) {
-		qdisc = qdisc_create_dflt(dev_queue,
-					  default_qdisc_ops, TC_H_ROOT);
-		if (!qdisc) {
-			netdev_info(dev, "activation failed\n");
-			return;
-		}
-		if (!netif_is_multiqueue(dev))
-			qdisc->flags |= TCQ_F_ONETXQUEUE;
+	if (dev->priv_flags & IFF_NO_QUEUE)
+		ops = &noqueue_qdisc_ops;
+
+	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+	if (!qdisc) {
+		netdev_info(dev, "activation failed\n");
+		return;
 	}
+	if (!netif_is_multiqueue(dev))
+		qdisc->flags |= TCQ_F_ONETXQUEUE;
 	dev_queue->qdisc_sleeping = qdisc;
 }
 
@@ -755,7 +748,8 @@
 
 	txq = netdev_get_tx_queue(dev, 0);
 
-	if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+	if (!netif_is_multiqueue(dev) ||
+	    dev->priv_flags & IFF_NO_QUEUE) {
 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
 		dev->qdisc = txq->qdisc_sleeping;
 		atomic_inc(&dev->qdisc->refcnt);
@@ -779,7 +773,7 @@
 		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
 
 	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
-	if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
+	if (need_watchdog_p) {
 		dev_queue->trans_start = 0;
 		*need_watchdog_p = 1;
 	}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index abb9f2f..8010510 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -512,11 +512,9 @@
 
 	if (tb[TCA_GRED_LIMIT])
 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
-	else {
-		u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
-
-		sch->limit = qlen * psched_mtu(qdisc_dev(sch));
-	}
+	else
+		sch->limit = qdisc_dev(sch)->tx_queue_len
+		             * psched_mtu(qdisc_dev(sch));
 
 	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 }
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index e6c7416..b7ebe2c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1165,7 +1165,7 @@
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	head = &q->root;
 	tcf = rcu_dereference_bh(q->root.filter_list);
-	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+	while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
 		case TC_ACT_QUEUED:
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f1acb0f..15ccd7f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -229,7 +229,7 @@
 	}
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+	while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
 		case TC_ACT_QUEUED:
@@ -1048,11 +1048,9 @@
 
 	if (tb[TCA_HTB_DIRECT_QLEN])
 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
-	else {
+	else
 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
-		if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
-			q->direct_qlen = 2;
-	}
+
 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
 		q->rate2quantum = 1;
 	q->defcls = gopt->defcls;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 42dd218..4e904ca 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -46,7 +46,7 @@
 	int err;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	err = tc_classify(skb, fl, &res);
+	err = tc_classify(skb, fl, &res, false);
 #ifdef CONFIG_NET_CLS_ACT
 	switch (err) {
 	case TC_ACT_STOLEN:
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index ade9445..5abfe44 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -130,12 +130,8 @@
 	q->unplug_indefinite = false;
 
 	if (opt == NULL) {
-		/* We will set a default limit of 100 pkts (~150kB)
-		 * in case tx_queue_len is not available. The
-		 * default value is completely arbitrary.
-		 */
-		u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
-		q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+		q->limit = qdisc_dev(sch)->tx_queue_len
+		           * psched_mtu(qdisc_dev(sch));
 	} else {
 		struct tc_plug_qopt *ctl = nla_data(opt);
 
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8e5cd34..ba6487f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -42,7 +42,7 @@
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	if (TC_H_MAJ(skb->priority) != sch->handle) {
 		fl = rcu_dereference_bh(q->filter_list);
-		err = tc_classify(skb, fl, &res);
+		err = tc_classify(skb, fl, &res, false);
 #ifdef CONFIG_NET_CLS_ACT
 		switch (err) {
 		case TC_ACT_STOLEN:
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index b8d73bc..3dc3a6e 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -186,7 +186,6 @@
 
 	u64			oldV, V;	/* Precise virtual times. */
 	struct qfq_aggregate	*in_serv_agg;   /* Aggregate being served. */
-	u32			num_active_agg; /* Num. of active aggregates */
 	u32			wsum;		/* weight sum */
 	u32			iwsum;		/* inverse weight sum */
 
@@ -718,7 +717,7 @@
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	fl = rcu_dereference_bh(q->filter_list);
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 4b81519..5bbb633 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -258,7 +258,7 @@
 	struct tcf_result res;
 	int result;
 
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
@@ -502,7 +502,7 @@
 
 	limit = ctl->limit;
 	if (limit == 0)
-		limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+		limit = qdisc_dev(sch)->tx_queue_len;
 
 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
 	if (IS_ERR(child))
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 52f75a5..3abab53 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -179,7 +179,7 @@
 		return sfq_hash(q, skb) + 1;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 59e8035..4345790 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -487,23 +487,35 @@
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+		struct net_device *odev;
+
 		if (!laddr->valid)
 			continue;
-		if ((laddr->state == SCTP_ADDR_SRC) &&
-		    (AF_INET == laddr->a.sa.sa_family)) {
-			fl4->fl4_sport = laddr->a.v4.sin_port;
-			flowi4_update_output(fl4,
-					     asoc->base.sk->sk_bound_dev_if,
-					     RT_CONN_FLAGS(asoc->base.sk),
-					     daddr->v4.sin_addr.s_addr,
-					     laddr->a.v4.sin_addr.s_addr);
+		if (laddr->state != SCTP_ADDR_SRC ||
+		    AF_INET != laddr->a.sa.sa_family)
+			continue;
 
-			rt = ip_route_output_key(sock_net(sk), fl4);
-			if (!IS_ERR(rt)) {
-				dst = &rt->dst;
-				goto out_unlock;
-			}
-		}
+		fl4->fl4_sport = laddr->a.v4.sin_port;
+		flowi4_update_output(fl4,
+				     asoc->base.sk->sk_bound_dev_if,
+				     RT_CONN_FLAGS(asoc->base.sk),
+				     daddr->v4.sin_addr.s_addr,
+				     laddr->a.v4.sin_addr.s_addr);
+
+		rt = ip_route_output_key(sock_net(sk), fl4);
+		if (IS_ERR(rt))
+			continue;
+
+		/* Ensure the src address belongs to the output
+		 * interface.
+		 */
+		odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+				     false);
+		if (!odev || odev->ifindex != fl4->flowi4_oif)
+			continue;
+
+		dst = &rt->dst;
+		break;
 	}
 
 out_unlock:
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index a655ddc..7954c52 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3090,8 +3090,19 @@
 			sctp_assoc_set_primary(asoc, asconf->transport);
 			sctp_assoc_del_nonprimary_peers(asoc,
 							asconf->transport);
-		} else
-			sctp_assoc_del_peer(asoc, &addr);
+			return SCTP_ERROR_NO_ERROR;
+		}
+
+		/* If the address is not part of the association, the
+		 * ASCONF-ACK with Error Cause Indication Parameter
+		 * which including cause of Unresolvable Address should
+		 * be sent.
+		 */
+		peer = sctp_assoc_lookup_paddr(asoc, &addr);
+		if (!peer)
+			return SCTP_ERROR_DNS_FAILED;
+
+		sctp_assoc_rm_peer(asoc, peer);
 		break;
 	case SCTP_PARAM_SET_PRIMARY:
 		/* ADDIP Section 4.2.4
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 85e6f03..35df126 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -954,7 +954,7 @@
 		t = list_entry(pos, struct sctp_transport, transports);
 		if (!sctp_cmp_addr_exact(&t->ipaddr,
 					 &asoc->peer.primary_addr)) {
-			sctp_assoc_del_peer(asoc, &t->ipaddr);
+			sctp_assoc_rm_peer(asoc, t);
 		}
 	}
 }
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3ee27b7..d7eaa73 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -853,7 +853,7 @@
 
 /*
  * Respond to a normal COOKIE ACK chunk.
- * We are the side that is being asked for an association.
+ * We are the side that is asking for an association.
  *
  * RFC 2960 5.1 Normal Establishment of an Association
  *
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2928aff..4a2340a 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -44,7 +44,7 @@
 static void cache_init(struct cache_head *h)
 {
 	time_t now = seconds_since_boot();
-	h->next = NULL;
+	INIT_HLIST_NODE(&h->cache_list);
 	h->flags = 0;
 	kref_init(&h->ref);
 	h->expiry_time = now + CACHE_NEW_EXPIRY;
@@ -54,15 +54,14 @@
 struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
 				       struct cache_head *key, int hash)
 {
-	struct cache_head **head,  **hp;
-	struct cache_head *new = NULL, *freeme = NULL;
+	struct cache_head *new = NULL, *freeme = NULL, *tmp = NULL;
+	struct hlist_head *head;
 
 	head = &detail->hash_table[hash];
 
 	read_lock(&detail->hash_lock);
 
-	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
-		struct cache_head *tmp = *hp;
+	hlist_for_each_entry(tmp, head, cache_list) {
 		if (detail->match(tmp, key)) {
 			if (cache_is_expired(detail, tmp))
 				/* This entry is expired, we will discard it. */
@@ -88,12 +87,10 @@
 	write_lock(&detail->hash_lock);
 
 	/* check if entry appeared while we slept */
-	for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
-		struct cache_head *tmp = *hp;
+	hlist_for_each_entry(tmp, head, cache_list) {
 		if (detail->match(tmp, key)) {
 			if (cache_is_expired(detail, tmp)) {
-				*hp = tmp->next;
-				tmp->next = NULL;
+				hlist_del_init(&tmp->cache_list);
 				detail->entries --;
 				freeme = tmp;
 				break;
@@ -104,8 +101,8 @@
 			return tmp;
 		}
 	}
-	new->next = *head;
-	*head = new;
+
+	hlist_add_head(&new->cache_list, head);
 	detail->entries++;
 	cache_get(new);
 	write_unlock(&detail->hash_lock);
@@ -143,7 +140,6 @@
 	 * If 'old' is not VALID, we update it directly,
 	 * otherwise we need to replace it
 	 */
-	struct cache_head **head;
 	struct cache_head *tmp;
 
 	if (!test_bit(CACHE_VALID, &old->flags)) {
@@ -168,15 +164,13 @@
 	}
 	cache_init(tmp);
 	detail->init(tmp, old);
-	head = &detail->hash_table[hash];
 
 	write_lock(&detail->hash_lock);
 	if (test_bit(CACHE_NEGATIVE, &new->flags))
 		set_bit(CACHE_NEGATIVE, &tmp->flags);
 	else
 		detail->update(tmp, new);
-	tmp->next = *head;
-	*head = tmp;
+	hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 	detail->entries++;
 	cache_get(tmp);
 	cache_fresh_locked(tmp, new->expiry_time);
@@ -416,28 +410,29 @@
 	/* find a non-empty bucket in the table */
 	while (current_detail &&
 	       current_index < current_detail->hash_size &&
-	       current_detail->hash_table[current_index] == NULL)
+	       hlist_empty(&current_detail->hash_table[current_index]))
 		current_index++;
 
 	/* find a cleanable entry in the bucket and clean it, or set to next bucket */
 
 	if (current_detail && current_index < current_detail->hash_size) {
-		struct cache_head *ch, **cp;
+		struct cache_head *ch = NULL;
 		struct cache_detail *d;
+		struct hlist_head *head;
+		struct hlist_node *tmp;
 
 		write_lock(&current_detail->hash_lock);
 
 		/* Ok, now to clean this strand */
 
-		cp = & current_detail->hash_table[current_index];
-		for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) {
+		head = &current_detail->hash_table[current_index];
+		hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 			if (current_detail->nextcheck > ch->expiry_time)
 				current_detail->nextcheck = ch->expiry_time+1;
 			if (!cache_is_expired(current_detail, ch))
 				continue;
 
-			*cp = ch->next;
-			ch->next = NULL;
+			hlist_del_init(&ch->cache_list);
 			current_detail->entries--;
 			rv = 1;
 			break;
@@ -1270,18 +1265,13 @@
  * get a header, then pass each real item in the cache
  */
 
-struct handle {
-	struct cache_detail *cd;
-};
-
-static void *c_start(struct seq_file *m, loff_t *pos)
+void *cache_seq_start(struct seq_file *m, loff_t *pos)
 	__acquires(cd->hash_lock)
 {
 	loff_t n = *pos;
 	unsigned int hash, entry;
 	struct cache_head *ch;
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
-
+	struct cache_detail *cd = m->private;
 
 	read_lock(&cd->hash_lock);
 	if (!n--)
@@ -1289,7 +1279,7 @@
 	hash = n >> 32;
 	entry = n & ((1LL<<32) - 1);
 
-	for (ch=cd->hash_table[hash]; ch; ch=ch->next)
+	hlist_for_each_entry(ch, &cd->hash_table[hash], cache_list)
 		if (!entry--)
 			return ch;
 	n &= ~((1LL<<32) - 1);
@@ -1297,51 +1287,57 @@
 		hash++;
 		n += 1LL<<32;
 	} while(hash < cd->hash_size &&
-		cd->hash_table[hash]==NULL);
+		hlist_empty(&cd->hash_table[hash]));
 	if (hash >= cd->hash_size)
 		return NULL;
 	*pos = n+1;
-	return cd->hash_table[hash];
+	return hlist_entry_safe(cd->hash_table[hash].first,
+				struct cache_head, cache_list);
 }
+EXPORT_SYMBOL_GPL(cache_seq_start);
 
-static void *c_next(struct seq_file *m, void *p, loff_t *pos)
+void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
 {
 	struct cache_head *ch = p;
 	int hash = (*pos >> 32);
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
+	struct cache_detail *cd = m->private;
 
 	if (p == SEQ_START_TOKEN)
 		hash = 0;
-	else if (ch->next == NULL) {
+	else if (ch->cache_list.next == NULL) {
 		hash++;
 		*pos += 1LL<<32;
 	} else {
 		++*pos;
-		return ch->next;
+		return hlist_entry_safe(ch->cache_list.next,
+					struct cache_head, cache_list);
 	}
 	*pos &= ~((1LL<<32) - 1);
 	while (hash < cd->hash_size &&
-	       cd->hash_table[hash] == NULL) {
+	       hlist_empty(&cd->hash_table[hash])) {
 		hash++;
 		*pos += 1LL<<32;
 	}
 	if (hash >= cd->hash_size)
 		return NULL;
 	++*pos;
-	return cd->hash_table[hash];
+	return hlist_entry_safe(cd->hash_table[hash].first,
+				struct cache_head, cache_list);
 }
+EXPORT_SYMBOL_GPL(cache_seq_next);
 
-static void c_stop(struct seq_file *m, void *p)
+void cache_seq_stop(struct seq_file *m, void *p)
 	__releases(cd->hash_lock)
 {
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
+	struct cache_detail *cd = m->private;
 	read_unlock(&cd->hash_lock);
 }
+EXPORT_SYMBOL_GPL(cache_seq_stop);
 
 static int c_show(struct seq_file *m, void *p)
 {
 	struct cache_head *cp = p;
-	struct cache_detail *cd = ((struct handle*)m->private)->cd;
+	struct cache_detail *cd = m->private;
 
 	if (p == SEQ_START_TOKEN)
 		return cd->cache_show(m, cd, NULL);
@@ -1364,33 +1360,36 @@
 }
 
 static const struct seq_operations cache_content_op = {
-	.start	= c_start,
-	.next	= c_next,
-	.stop	= c_stop,
+	.start	= cache_seq_start,
+	.next	= cache_seq_next,
+	.stop	= cache_seq_stop,
 	.show	= c_show,
 };
 
 static int content_open(struct inode *inode, struct file *file,
 			struct cache_detail *cd)
 {
-	struct handle *han;
+	struct seq_file *seq;
+	int err;
 
 	if (!cd || !try_module_get(cd->owner))
 		return -EACCES;
-	han = __seq_open_private(file, &cache_content_op, sizeof(*han));
-	if (han == NULL) {
+
+	err = seq_open(file, &cache_content_op);
+	if (err) {
 		module_put(cd->owner);
-		return -ENOMEM;
+		return err;
 	}
 
-	han->cd = cd;
+	seq = file->private_data;
+	seq->private = cd;
 	return 0;
 }
 
 static int content_release(struct inode *inode, struct file *file,
 		struct cache_detail *cd)
 {
-	int ret = seq_release_private(inode, file);
+	int ret = seq_release(inode, file);
 	module_put(cd->owner);
 	return ret;
 }
@@ -1665,17 +1664,21 @@
 struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
 {
 	struct cache_detail *cd;
+	int i;
 
 	cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
 	if (cd == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *),
+	cd->hash_table = kzalloc(cd->hash_size * sizeof(struct hlist_head),
 				 GFP_KERNEL);
 	if (cd->hash_table == NULL) {
 		kfree(cd);
 		return ERR_PTR(-ENOMEM);
 	}
+
+	for (i = 0; i < cd->hash_size; i++)
+		INIT_HLIST_HEAD(&cd->hash_table[i]);
 	cd->net = net;
 	return cd;
 }
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 337ca85..b140c09 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -297,7 +297,7 @@
 	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 	ret = atomic_dec_and_test(&task->tk_count);
 	if (waitqueue_active(wq))
-		__wake_up_locked_key(wq, TASK_NORMAL, &k);
+		__wake_up_locked_key(wq, TASK_NORMAL, 1, &k);
 	spin_unlock_irqrestore(&wq->lock, flags);
 	return ret;
 }
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5a16d8d..a8f579d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -34,36 +34,19 @@
 
 static void svc_unregister(const struct svc_serv *serv, struct net *net);
 
-#define svc_serv_is_pooled(serv)    ((serv)->sv_function)
+#define svc_serv_is_pooled(serv)    ((serv)->sv_ops->svo_function)
 
-/*
- * Mode for mapping cpus to pools.
- */
-enum {
-	SVC_POOL_AUTO = -1,	/* choose one of the others */
-	SVC_POOL_GLOBAL,	/* no mapping, just a single global pool
-				 * (legacy & UP mode) */
-	SVC_POOL_PERCPU,	/* one pool per cpu */
-	SVC_POOL_PERNODE	/* one pool per numa node */
-};
 #define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL
 
 /*
  * Structure for mapping cpus to pools and vice versa.
  * Setup once during sunrpc initialisation.
  */
-static struct svc_pool_map {
-	int count;			/* How many svc_servs use us */
-	int mode;			/* Note: int not enum to avoid
-					 * warnings about "enumeration value
-					 * not handled in switch" */
-	unsigned int npools;
-	unsigned int *pool_to;		/* maps pool id to cpu or node */
-	unsigned int *to_pool;		/* maps cpu or node to pool id */
-} svc_pool_map = {
-	.count = 0,
+struct svc_pool_map svc_pool_map = {
 	.mode = SVC_POOL_DEFAULT
 };
+EXPORT_SYMBOL_GPL(svc_pool_map);
+
 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
 
 static int
@@ -236,7 +219,7 @@
  * vice versa).  Initialise the map if we're the first user.
  * Returns the number of pools.
  */
-static unsigned int
+unsigned int
 svc_pool_map_get(void)
 {
 	struct svc_pool_map *m = &svc_pool_map;
@@ -271,7 +254,7 @@
 	mutex_unlock(&svc_pool_map_mutex);
 	return m->npools;
 }
-
+EXPORT_SYMBOL_GPL(svc_pool_map_get);
 
 /*
  * Drop a reference to the global map of cpus to pools.
@@ -280,7 +263,7 @@
  * mode using the pool_mode module option without
  * rebooting or re-loading sunrpc.ko.
  */
-static void
+void
 svc_pool_map_put(void)
 {
 	struct svc_pool_map *m = &svc_pool_map;
@@ -297,7 +280,7 @@
 
 	mutex_unlock(&svc_pool_map_mutex);
 }
-
+EXPORT_SYMBOL_GPL(svc_pool_map_put);
 
 static int svc_pool_map_get_node(unsigned int pidx)
 {
@@ -423,7 +406,7 @@
  */
 static struct svc_serv *
 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
-	     void (*shutdown)(struct svc_serv *serv, struct net *net))
+	     struct svc_serv_ops *ops)
 {
 	struct svc_serv	*serv;
 	unsigned int vers;
@@ -440,7 +423,7 @@
 		bufsize = RPCSVC_MAXPAYLOAD;
 	serv->sv_max_payload = bufsize? bufsize : 4096;
 	serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
-	serv->sv_shutdown  = shutdown;
+	serv->sv_ops = ops;
 	xdrsize = 0;
 	while (prog) {
 		prog->pg_lovers = prog->pg_nvers-1;
@@ -486,26 +469,22 @@
 
 struct svc_serv *
 svc_create(struct svc_program *prog, unsigned int bufsize,
-	   void (*shutdown)(struct svc_serv *serv, struct net *net))
+	   struct svc_serv_ops *ops)
 {
-	return __svc_create(prog, bufsize, /*npools*/1, shutdown);
+	return __svc_create(prog, bufsize, /*npools*/1, ops);
 }
 EXPORT_SYMBOL_GPL(svc_create);
 
 struct svc_serv *
 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
-		  void (*shutdown)(struct svc_serv *serv, struct net *net),
-		  svc_thread_fn func, struct module *mod)
+		  struct svc_serv_ops *ops)
 {
 	struct svc_serv *serv;
 	unsigned int npools = svc_pool_map_get();
 
-	serv = __svc_create(prog, bufsize, npools, shutdown);
+	serv = __svc_create(prog, bufsize, npools, ops);
 	if (!serv)
 		goto out_err;
-
-	serv->sv_function = func;
-	serv->sv_module = mod;
 	return serv;
 out_err:
 	svc_pool_map_put();
@@ -517,8 +496,8 @@
 {
 	svc_close_net(serv, net);
 
-	if (serv->sv_shutdown)
-		serv->sv_shutdown(serv, net);
+	if (serv->sv_ops->svo_shutdown)
+		serv->sv_ops->svo_shutdown(serv, net);
 }
 EXPORT_SYMBOL_GPL(svc_shutdown_net);
 
@@ -604,40 +583,52 @@
 }
 
 struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
 {
 	struct svc_rqst	*rqstp;
 
 	rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
 	if (!rqstp)
-		goto out_enomem;
+		return rqstp;
 
-	serv->sv_nrthreads++;
 	__set_bit(RQ_BUSY, &rqstp->rq_flags);
 	spin_lock_init(&rqstp->rq_lock);
 	rqstp->rq_server = serv;
 	rqstp->rq_pool = pool;
+
+	rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
+	if (!rqstp->rq_argp)
+		goto out_enomem;
+
+	rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
+	if (!rqstp->rq_resp)
+		goto out_enomem;
+
+	if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
+		goto out_enomem;
+
+	return rqstp;
+out_enomem:
+	svc_rqst_free(rqstp);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(svc_rqst_alloc);
+
+struct svc_rqst *
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
+{
+	struct svc_rqst	*rqstp;
+
+	rqstp = svc_rqst_alloc(serv, pool, node);
+	if (!rqstp)
+		return ERR_PTR(-ENOMEM);
+
+	serv->sv_nrthreads++;
 	spin_lock_bh(&pool->sp_lock);
 	pool->sp_nrthreads++;
 	list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
 	spin_unlock_bh(&pool->sp_lock);
-
-	rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
-	if (!rqstp->rq_argp)
-		goto out_thread;
-
-	rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
-	if (!rqstp->rq_resp)
-		goto out_thread;
-
-	if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
-		goto out_thread;
-
 	return rqstp;
-out_thread:
-	svc_exit_thread(rqstp);
-out_enomem:
-	return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL_GPL(svc_prepare_thread);
 
@@ -739,12 +730,12 @@
 			break;
 		}
 
-		__module_get(serv->sv_module);
-		task = kthread_create_on_node(serv->sv_function, rqstp,
+		__module_get(serv->sv_ops->svo_module);
+		task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
 					      node, "%s", serv->sv_name);
 		if (IS_ERR(task)) {
 			error = PTR_ERR(task);
-			module_put(serv->sv_module);
+			module_put(serv->sv_ops->svo_module);
 			svc_exit_thread(rqstp);
 			break;
 		}
@@ -772,15 +763,21 @@
  * mutex" for the service.
  */
 void
-svc_exit_thread(struct svc_rqst *rqstp)
+svc_rqst_free(struct svc_rqst *rqstp)
 {
-	struct svc_serv	*serv = rqstp->rq_server;
-	struct svc_pool	*pool = rqstp->rq_pool;
-
 	svc_release_buffer(rqstp);
 	kfree(rqstp->rq_resp);
 	kfree(rqstp->rq_argp);
 	kfree(rqstp->rq_auth_data);
+	kfree_rcu(rqstp, rq_rcu_head);
+}
+EXPORT_SYMBOL_GPL(svc_rqst_free);
+
+void
+svc_exit_thread(struct svc_rqst *rqstp)
+{
+	struct svc_serv	*serv = rqstp->rq_server;
+	struct svc_pool	*pool = rqstp->rq_pool;
 
 	spin_lock_bh(&pool->sp_lock);
 	pool->sp_nrthreads--;
@@ -788,7 +785,7 @@
 		list_del_rcu(&rqstp->rq_all);
 	spin_unlock_bh(&pool->sp_lock);
 
-	kfree_rcu(rqstp, rq_rcu_head);
+	svc_rqst_free(rqstp);
 
 	/* Release the server */
 	if (serv)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 163ac45..a6cbb21 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -24,7 +24,6 @@
 static struct cache_deferred_req *svc_defer(struct cache_req *req);
 static void svc_age_temp_xprts(unsigned long closure);
 static void svc_delete_xprt(struct svc_xprt *xprt);
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
 
 /* apparently the "standard" is that clients close
  * idle connections after 5 minutes, servers after
@@ -225,12 +224,12 @@
 	}
 
 	/* As soon as we clear busy, the xprt could be closed and
-	 * 'put', so we need a reference to call svc_xprt_do_enqueue with:
+	 * 'put', so we need a reference to call svc_enqueue_xprt with:
 	 */
 	svc_xprt_get(xprt);
 	smp_mb__before_atomic();
 	clear_bit(XPT_BUSY, &xprt->xpt_flags);
-	svc_xprt_do_enqueue(xprt);
+	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
 	svc_xprt_put(xprt);
 }
 
@@ -320,7 +319,7 @@
 	return false;
 }
 
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+void svc_xprt_do_enqueue(struct svc_xprt *xprt)
 {
 	struct svc_pool *pool;
 	struct svc_rqst	*rqstp = NULL;
@@ -402,6 +401,7 @@
 out:
 	trace_svc_xprt_do_enqueue(xprt, rqstp);
 }
+EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
 
 /*
  * Queue up a transport with data pending. If there are idle nfsd
@@ -412,7 +412,7 @@
 {
 	if (test_bit(XPT_BUSY, &xprt->xpt_flags))
 		return;
-	svc_xprt_do_enqueue(xprt);
+	xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
 }
 EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index d25cd43..1dfae83 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -136,6 +136,79 @@
 	return dma_addr;
 }
 
+/* Returns the address of the first read chunk or <nul> if no read chunk
+ * is present
+ */
+struct rpcrdma_read_chunk *
+svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+{
+	struct rpcrdma_read_chunk *ch =
+		(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+
+	if (ch->rc_discrim == xdr_zero)
+		return NULL;
+	return ch;
+}
+
+/* Returns the address of the first read write array element or <nul>
+ * if no write array list is present
+ */
+static struct rpcrdma_write_array *
+svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
+{
+	if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
+	    rmsgp->rm_body.rm_chunks[1] == xdr_zero)
+		return NULL;
+	return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
+}
+
+/* Returns the address of the first reply array element or <nul> if no
+ * reply array is present
+ */
+static struct rpcrdma_write_array *
+svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
+{
+	struct rpcrdma_read_chunk *rch;
+	struct rpcrdma_write_array *wr_ary;
+	struct rpcrdma_write_array *rp_ary;
+
+	/* XXX: Need to fix when reply chunk may occur with read list
+	 *	and/or write list.
+	 */
+	if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
+	    rmsgp->rm_body.rm_chunks[1] != xdr_zero)
+		return NULL;
+
+	rch = svc_rdma_get_read_chunk(rmsgp);
+	if (rch) {
+		while (rch->rc_discrim != xdr_zero)
+			rch++;
+
+		/* The reply chunk follows an empty write array located
+		 * at 'rc_position' here. The reply array is at rc_target.
+		 */
+		rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
+		goto found_it;
+	}
+
+	wr_ary = svc_rdma_get_write_array(rmsgp);
+	if (wr_ary) {
+		int chunk = be32_to_cpu(wr_ary->wc_nchunks);
+
+		rp_ary = (struct rpcrdma_write_array *)
+			 &wr_ary->wc_array[chunk].wc_target.rs_length;
+		goto found_it;
+	}
+
+	/* No read list, no write list */
+	rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
+
+ found_it:
+	if (rp_ary->wc_discrim == xdr_zero)
+		return NULL;
+	return rp_ary;
+}
+
 /* Assumptions:
  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
  */
@@ -384,6 +457,7 @@
 		      int byte_count)
 {
 	struct ib_send_wr send_wr;
+	u32 xdr_off;
 	int sge_no;
 	int sge_bytes;
 	int page_no;
@@ -418,8 +492,8 @@
 	ctxt->direction = DMA_TO_DEVICE;
 
 	/* Map the payload indicated by 'byte_count' */
+	xdr_off = 0;
 	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
-		int xdr_off = 0;
 		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
 		byte_count -= sge_bytes;
 		ctxt->sge[sge_no].addr =
@@ -457,6 +531,13 @@
 	}
 	rqstp->rq_next_page = rqstp->rq_respages + 1;
 
+	/* The loop above bumps sc_dma_used for each sge. The
+	 * xdr_buf.tail gets a separate sge, but resides in the
+	 * same page as xdr_buf.head. Don't count it twice.
+	 */
+	if (sge_no > ctxt->count)
+		atomic_dec(&rdma->sc_dma_used);
+
 	if (sge_no > rdma->sc_max_sge) {
 		pr_err("svcrdma: Too many sges (%d)\n", sge_no);
 		goto err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6b36279..21e4036 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -91,7 +91,7 @@
 	.xcl_name = "rdma",
 	.xcl_owner = THIS_MODULE,
 	.xcl_ops = &svc_rdma_ops,
-	.xcl_max_payload = RPCRDMA_MAXPAYLOAD,
+	.xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
 	.xcl_ident = XPRT_TRANSPORT_RDMA,
 };
 
@@ -659,6 +659,7 @@
 		if (xprt) {
 			set_bit(XPT_CLOSE, &xprt->xpt_flags);
 			svc_xprt_enqueue(xprt);
+			svc_xprt_put(xprt);
 		}
 		break;
 	default:
@@ -1201,40 +1202,6 @@
 	return 1;
 }
 
-/*
- * Attempt to register the kvec representing the RPC memory with the
- * device.
- *
- * Returns:
- *  NULL : The device does not support fastreg or there were no more
- *         fastreg mr.
- *  frmr : The kvec register request was successfully posted.
- *    <0 : An error was encountered attempting to register the kvec.
- */
-int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
-		     struct svc_rdma_fastreg_mr *frmr)
-{
-	struct ib_send_wr fastreg_wr;
-	u8 key;
-
-	/* Bump the key */
-	key = (u8)(frmr->mr->lkey & 0x000000FF);
-	ib_update_fast_reg_key(frmr->mr, ++key);
-
-	/* Prepare FASTREG WR */
-	memset(&fastreg_wr, 0, sizeof fastreg_wr);
-	fastreg_wr.opcode = IB_WR_FAST_REG_MR;
-	fastreg_wr.send_flags = IB_SEND_SIGNALED;
-	fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
-	fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
-	fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
-	fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-	fastreg_wr.wr.fast_reg.length = frmr->map_len;
-	fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
-	fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
-	return svc_rdma_send(xprt, &fastreg_wr);
-}
-
 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
 {
 	struct ib_send_wr *bad_wr, *n_wr;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index f49dd8b..e718d09 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -51,7 +51,6 @@
 #include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
 #include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
 #include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */
-#include <linux/sunrpc/svc.h>		/* RPCSVC_MAXPAYLOAD */
 
 #define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
 #define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 9f2add3..16c1c43 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -810,7 +810,7 @@
 	ndm->ndm_flags   = NTF_SELF;
 	ndm->ndm_type    = 0;
 	ndm->ndm_ifindex = dev->ifindex;
-	ndm->ndm_state   = NUD_REACHABLE;
+	ndm->ndm_state   = obj->u.fdb.ndm_state;
 
 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
 		goto nla_put_failure;
@@ -910,13 +910,9 @@
 		if (switchdev_port_attr_get(dev, &attr))
 			return NULL;
 
-		if (nhsel > 0) {
-			if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
+		if (nhsel > 0 &&
+		    !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
 				return NULL;
-			if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
-				   attr.u.ppid.id_len))
-				return NULL;
-		}
 
 		prev_attr = attr;
 	}
@@ -1043,3 +1039,106 @@
 	fi->fib_net->ipv4.fib_offload_disabled = true;
 }
 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
+
+static bool switchdev_port_same_parent_id(struct net_device *a,
+					  struct net_device *b)
+{
+	struct switchdev_attr a_attr = {
+		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.flags = SWITCHDEV_F_NO_RECURSE,
+	};
+	struct switchdev_attr b_attr = {
+		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.flags = SWITCHDEV_F_NO_RECURSE,
+	};
+
+	if (switchdev_port_attr_get(a, &a_attr) ||
+	    switchdev_port_attr_get(b, &b_attr))
+		return false;
+
+	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
+}
+
+static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
+				       struct net_device *group_dev)
+{
+	struct net_device *lower_dev;
+	struct list_head *iter;
+
+	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+		if (lower_dev == dev)
+			continue;
+		if (switchdev_port_same_parent_id(dev, lower_dev))
+			return lower_dev->offload_fwd_mark;
+		return switchdev_port_fwd_mark_get(dev, lower_dev);
+	}
+
+	return dev->ifindex;
+}
+
+static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
+					  u32 old_mark, u32 *reset_mark)
+{
+	struct net_device *lower_dev;
+	struct list_head *iter;
+
+	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+		if (lower_dev->offload_fwd_mark == old_mark) {
+			if (!*reset_mark)
+				*reset_mark = lower_dev->ifindex;
+			lower_dev->offload_fwd_mark = *reset_mark;
+		}
+		switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
+	}
+}
+
+/**
+ *	switchdev_port_fwd_mark_set - Set port offload forwarding mark
+ *
+ *	@dev: port device
+ *	@group_dev: containing device
+ *	@joining: true if dev is joining group; false if leaving group
+ *
+ *	An ungrouped port's offload mark is just its ifindex.  A grouped
+ *	port's (member of a bridge, for example) offload mark is the ifindex
+ *	of one of the ports in the group with the same parent (switch) ID.
+ *	Ports on the same device in the same group will have the same mark.
+ *
+ *	Example:
+ *
+ *		br0		ifindex=9
+ *		  sw1p1		ifindex=2	mark=2
+ *		  sw1p2		ifindex=3	mark=2
+ *		  sw2p1		ifindex=4	mark=5
+ *		  sw2p2		ifindex=5	mark=5
+ *
+ *	If sw2p2 leaves the bridge, we'll have:
+ *
+ *		br0		ifindex=9
+ *		  sw1p1		ifindex=2	mark=2
+ *		  sw1p2		ifindex=3	mark=2
+ *		  sw2p1		ifindex=4	mark=4
+ *		sw2p2		ifindex=5	mark=5
+ */
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+				 struct net_device *group_dev,
+				 bool joining)
+{
+	u32 mark = dev->ifindex;
+	u32 reset_mark = 0;
+
+	if (group_dev && joining) {
+		mark = switchdev_port_fwd_mark_get(dev, group_dev);
+	} else if (group_dev && !joining) {
+		if (dev->offload_fwd_mark == mark)
+			/* Ohoh, this port was the mark reference port,
+			 * but it's leaving the group, so reset the
+			 * mark for the remaining ports in the group.
+			 */
+			switchdev_port_fwd_mark_reset(group_dev, mark,
+						      &reset_mark);
+	}
+
+	dev->offload_fwd_mark = mark;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a816382..8b010c9 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -316,6 +316,29 @@
 	}
 }
 
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
+{
+	u16 last = msg_last_bcast(hdr);
+	int mtyp = msg_type(hdr);
+
+	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
+		return;
+	if (mtyp == STATE_MSG) {
+		tipc_bclink_update_link_state(n, last);
+		return;
+	}
+	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
+	 * and transfer synch info in LINK_PROTOCOL messages.
+	 */
+	if (tipc_node_is_up(n))
+		return;
+	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
+		return;
+	n->bclink.last_sent = last;
+	n->bclink.last_in = last;
+	n->bclink.oos_state = 0;
+}
+
 /**
  * bclink_peek_nack - monitor retransmission requests sent by other nodes
  *
@@ -358,10 +381,9 @@
 
 	/* Prepare clone of message for local node */
 	skb = tipc_msg_reassemble(list);
-	if (unlikely(!skb)) {
-		__skb_queue_purge(list);
+	if (unlikely(!skb))
 		return -EHOSTUNREACH;
-	}
+
 	/* Broadcast to all nodes */
 	if (likely(bclink)) {
 		tipc_bclink_lock(net);
@@ -413,7 +435,7 @@
 	 * all nodes in the cluster don't ACK at the same time
 	 */
 	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-		tipc_link_proto_xmit(node->active_links[node->addr & 1],
+		tipc_link_proto_xmit(node_active_link(node, node->addr),
 				     STATE_MSG, 0, 0, 0, 0);
 		tn->bcl->stats.sent_acks++;
 	}
@@ -925,7 +947,6 @@
 	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
 	bcl->bearer_id = MAX_BEARERS;
 	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-	bcl->state = WORKING_WORKING;
 	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
 	msg_set_prevnode(bcl->pmsg, tn->own_addr);
 	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 3c290a48..d74c69b 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -133,5 +133,6 @@
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 void tipc_bclink_input(struct net *net);
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
 
 #endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 00bc0e6..ce9f7bf 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -343,7 +343,7 @@
 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
 	pr_info("Resetting bearer <%s>\n", b_ptr->name);
-	tipc_link_delete_list(net, b_ptr->identity);
+	tipc_node_delete_links(net, b_ptr->identity);
 	tipc_disc_reset(net, b_ptr);
 	return 0;
 }
@@ -361,7 +361,7 @@
 	pr_info("Disabling bearer <%s>\n", b_ptr->name);
 	b_ptr->media->disable_media(b_ptr);
 
-	tipc_link_delete_list(net, b_ptr->identity);
+	tipc_node_delete_links(net, b_ptr->identity);
 	if (b_ptr->link_req)
 		tipc_disc_delete(b_ptr->link_req);
 
@@ -470,6 +470,32 @@
 	rcu_read_unlock();
 }
 
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+		      struct sk_buff_head *xmitq,
+		      struct tipc_media_addr *dst)
+{
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
+	struct tipc_bearer *b;
+	struct sk_buff *skb, *tmp;
+
+	if (skb_queue_empty(xmitq))
+		return;
+
+	rcu_read_lock();
+	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+	if (likely(b)) {
+		skb_queue_walk_safe(xmitq, skb, tmp) {
+			__skb_dequeue(xmitq);
+			b->media->send_msg(net, skb, b, dst);
+			/* Until we remove cloning in tipc_l2_send_msg(): */
+			kfree_skb(skb);
+		}
+	}
+	rcu_read_unlock();
+}
+
 /**
  * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
  * @buf: the received packet
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dc714d9..6426f24 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -217,5 +217,8 @@
 void tipc_bearer_stop(struct net *net);
 void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
 		      struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+		      struct sk_buff_head *xmitq,
+		      struct tipc_media_addr *dst);
 
 #endif	/* _TIPC_BEARER_H */
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 0fcf133..b96b41e 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -109,6 +109,11 @@
 	atomic_t subscription_count;
 };
 
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+	return net_generic(net, tipc_net_id);
+}
+
 static inline u16 mod(u16 x)
 {
 	return x & 0xffffu;
@@ -129,6 +134,11 @@
 	return less_eq(left, right) && (mod(right) != mod(left));
 }
 
+static inline int in_range(u16 val, u16 min, u16 max)
+{
+	return !less(val, min) && !more(val, max);
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 967e292..d14e0a4 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,7 +35,7 @@
  */
 
 #include "core.h"
-#include "link.h"
+#include "node.h"
 #include "discover.h"
 
 /* min delay during bearer start up */
@@ -120,30 +120,24 @@
  * @buf: buffer containing message
  * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
 		   struct tipc_bearer *bearer)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_node *node;
-	struct tipc_link *link;
 	struct tipc_media_addr maddr;
-	struct sk_buff *rbuf;
-	struct tipc_msg *msg = buf_msg(buf);
-	u32 ddom = msg_dest_domain(msg);
-	u32 onode = msg_prevnode(msg);
-	u32 net_id = msg_bc_netid(msg);
-	u32 mtyp = msg_type(msg);
-	u32 signature = msg_node_sig(msg);
-	u16 caps = msg_node_capabilities(msg);
-	bool addr_match = false;
-	bool sign_match = false;
-	bool link_up = false;
-	bool accept_addr = false;
-	bool accept_sign = false;
+	struct sk_buff *rskb;
+	struct tipc_msg *hdr = buf_msg(skb);
+	u32 ddom = msg_dest_domain(hdr);
+	u32 onode = msg_prevnode(hdr);
+	u32 net_id = msg_bc_netid(hdr);
+	u32 mtyp = msg_type(hdr);
+	u32 signature = msg_node_sig(hdr);
+	u16 caps = msg_node_capabilities(hdr);
 	bool respond = false;
+	bool dupl_addr = false;
 
-	bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
-	kfree_skb(buf);
+	bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
+	kfree_skb(skb);
 
 	/* Ensure message from node is valid and communication is permitted */
 	if (net_id != tn->net_id)
@@ -165,102 +159,20 @@
 	if (!tipc_in_scope(bearer->domain, onode))
 		return;
 
-	node = tipc_node_create(net, onode);
-	if (!node)
-		return;
-	tipc_node_lock(node);
-	node->capabilities = caps;
-	link = node->links[bearer->identity];
-
-	/* Prepare to validate requesting node's signature and media address */
-	sign_match = (signature == node->signature);
-	addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
-	link_up = link && tipc_link_is_up(link);
-
-
-	/* These three flags give us eight permutations: */
-
-	if (sign_match && addr_match && link_up) {
-		/* All is fine. Do nothing. */
-	} else if (sign_match && addr_match && !link_up) {
-		/* Respond. The link will come up in due time */
-		respond = true;
-	} else if (sign_match && !addr_match && link_up) {
-		/* Peer has changed i/f address without rebooting.
-		 * If so, the link will reset soon, and the next
-		 * discovery will be accepted. So we can ignore it.
-		 * It may also be an cloned or malicious peer having
-		 * chosen the same node address and signature as an
-		 * existing one.
-		 * Ignore requests until the link goes down, if ever.
-		 */
+	tipc_node_check_dest(net, onode, bearer, caps, signature,
+			     &maddr, &respond, &dupl_addr);
+	if (dupl_addr)
 		disc_dupl_alert(bearer, onode, &maddr);
-	} else if (sign_match && !addr_match && !link_up) {
-		/* Peer link has changed i/f address without rebooting.
-		 * It may also be a cloned or malicious peer; we can't
-		 * distinguish between the two.
-		 * The signature is correct, so we must accept.
-		 */
-		accept_addr = true;
-		respond = true;
-	} else if (!sign_match && addr_match && link_up) {
-		/* Peer node rebooted. Two possibilities:
-		 *  - Delayed re-discovery; this link endpoint has already
-		 *    reset and re-established contact with the peer, before
-		 *    receiving a discovery message from that node.
-		 *    (The peer happened to receive one from this node first).
-		 *  - The peer came back so fast that our side has not
-		 *    discovered it yet. Probing from this side will soon
-		 *    reset the link, since there can be no working link
-		 *    endpoint at the peer end, and the link will re-establish.
-		 *  Accept the signature, since it comes from a known peer.
-		 */
-		accept_sign = true;
-	} else if (!sign_match && addr_match && !link_up) {
-		/*  The peer node has rebooted.
-		 *  Accept signature, since it is a known peer.
-		 */
-		accept_sign = true;
-		respond = true;
-	} else if (!sign_match && !addr_match && link_up) {
-		/* Peer rebooted with new address, or a new/duplicate peer.
-		 * Ignore until the link goes down, if ever.
-		 */
-		disc_dupl_alert(bearer, onode, &maddr);
-	} else if (!sign_match && !addr_match && !link_up) {
-		/* Peer rebooted with new address, or it is a new peer.
-		 * Accept signature and address.
-		*/
-		accept_sign = true;
-		accept_addr = true;
-		respond = true;
-	}
-
-	if (accept_sign)
-		node->signature = signature;
-
-	if (accept_addr) {
-		if (!link)
-			link = tipc_link_create(node, bearer, &maddr);
-		if (link) {
-			memcpy(&link->media_addr, &maddr, sizeof(maddr));
-			tipc_link_reset(link);
-		} else {
-			respond = false;
-		}
-	}
 
 	/* Send response, if necessary */
 	if (respond && (mtyp == DSC_REQ_MSG)) {
-		rbuf = tipc_buf_acquire(MAX_H_SIZE);
-		if (rbuf) {
-			tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
-			tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
-			kfree_skb(rbuf);
+		rskb = tipc_buf_acquire(MAX_H_SIZE);
+		if (rskb) {
+			tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+			tipc_bearer_send(net, bearer->identity, rskb, &maddr);
+			kfree_skb(rskb);
 		}
 	}
-	tipc_node_unlock(node);
-	tipc_node_put(node);
 }
 
 /**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index eaa9fe5..75db07c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -48,9 +48,8 @@
 /*
  * Error message prefixes
  */
-static const char *link_co_err = "Link changeover error, ";
+static const char *link_co_err = "Link tunneling error, ";
 static const char *link_rst_msg = "Resetting link ";
-static const char *link_unk_evt = "Unknown link event ";
 
 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
@@ -77,256 +76,413 @@
 };
 
 /*
+ * Interval between NACKs when packets arrive out of order
+ */
+#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
+/*
  * Out-of-range value for link session numbers
  */
-#define INVALID_SESSION 0x10000
+#define WILDCARD_SESSION 0x10000
 
-/*
- * Link state events:
+/* Link FSM states:
  */
-#define  STARTING_EVT    856384768	/* link processing trigger */
-#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
-#define  SILENCE_EVT     560817u	/* timer dicovered silence from peer */
+enum {
+	LINK_ESTABLISHED     = 0xe,
+	LINK_ESTABLISHING    = 0xe  << 4,
+	LINK_RESET           = 0x1  << 8,
+	LINK_RESETTING       = 0x2  << 12,
+	LINK_PEER_RESET      = 0xd  << 16,
+	LINK_FAILINGOVER     = 0xf  << 20,
+	LINK_SYNCHING        = 0xc  << 24
+};
 
-/*
- * State value stored in 'failover_pkts'
+/* Link FSM state checking routines
  */
-#define FIRST_FAILOVER 0xffffu
+static int link_is_up(struct tipc_link *l)
+{
+	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
 
-static void link_handle_out_of_seq_msg(struct tipc_link *link,
-				       struct sk_buff *skb);
-static void tipc_link_proto_rcv(struct tipc_link *link,
-				struct sk_buff *skb);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
-static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+			       struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+				      u16 rcvgap, int tolerance, int priority,
+				      struct sk_buff_head *xmitq);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
-static void link_set_timer(struct tipc_link *link, unsigned long time);
-/*
- *  Simple link routines
- */
-static unsigned int align(unsigned int i)
-{
-	return (i + 3) & ~3u;
-}
-
-static void tipc_link_release(struct kref *kref)
-{
-	kfree(container_of(kref, struct tipc_link, ref));
-}
-
-static void tipc_link_get(struct tipc_link *l_ptr)
-{
-	kref_get(&l_ptr->ref);
-}
-
-static void tipc_link_put(struct tipc_link *l_ptr)
-{
-	kref_put(&l_ptr->ref, tipc_link_release);
-}
-
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
-{
-	if (l->owner->active_links[0] != l)
-		return l->owner->active_links[0];
-	return l->owner->active_links[1];
-}
 
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
-int tipc_link_is_up(struct tipc_link *l_ptr)
+bool tipc_link_is_up(struct tipc_link *l)
 {
-	if (!l_ptr)
-		return 0;
-	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
+	return link_is_up(l);
 }
 
-int tipc_link_is_active(struct tipc_link *l_ptr)
+bool tipc_link_is_reset(struct tipc_link *l)
 {
-	return	(l_ptr->owner->active_links[0] == l_ptr) ||
-		(l_ptr->owner->active_links[1] == l_ptr);
+	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 }
 
-/**
- * link_timeout - handle expiration of link timer
- * @l_ptr: pointer to link
- */
-static void link_timeout(unsigned long data)
+bool tipc_link_is_synching(struct tipc_link *l)
 {
-	struct tipc_link *l_ptr = (struct tipc_link *)data;
-	struct sk_buff *skb;
-
-	tipc_node_lock(l_ptr->owner);
-
-	/* update counters used in statistical profiling of send traffic */
-	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
-	l_ptr->stats.queue_sz_counts++;
-
-	skb = skb_peek(&l_ptr->transmq);
-	if (skb) {
-		struct tipc_msg *msg = buf_msg(skb);
-		u32 length = msg_size(msg);
-
-		if ((msg_user(msg) == MSG_FRAGMENTER) &&
-		    (msg_type(msg) == FIRST_FRAGMENT)) {
-			length = msg_size(msg_get_wrapped(msg));
-		}
-		if (length) {
-			l_ptr->stats.msg_lengths_total += length;
-			l_ptr->stats.msg_length_counts++;
-			if (length <= 64)
-				l_ptr->stats.msg_length_profile[0]++;
-			else if (length <= 256)
-				l_ptr->stats.msg_length_profile[1]++;
-			else if (length <= 1024)
-				l_ptr->stats.msg_length_profile[2]++;
-			else if (length <= 4096)
-				l_ptr->stats.msg_length_profile[3]++;
-			else if (length <= 16384)
-				l_ptr->stats.msg_length_profile[4]++;
-			else if (length <= 32768)
-				l_ptr->stats.msg_length_profile[5]++;
-			else
-				l_ptr->stats.msg_length_profile[6]++;
-		}
-	}
-
-	/* do all other link processing performed on a periodic basis */
-	if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
-		link_state_event(l_ptr, SILENCE_EVT);
-	l_ptr->silent_intv_cnt++;
-	if (skb_queue_len(&l_ptr->backlogq))
-		tipc_link_push_packets(l_ptr);
-	link_set_timer(l_ptr, l_ptr->keepalive_intv);
-	tipc_node_unlock(l_ptr->owner);
-	tipc_link_put(l_ptr);
+	return l->state == LINK_SYNCHING;
 }
 
-static void link_set_timer(struct tipc_link *link, unsigned long time)
+bool tipc_link_is_failingover(struct tipc_link *l)
 {
-	if (!mod_timer(&link->timer, jiffies + time))
-		tipc_link_get(link);
+	return l->state == LINK_FAILINGOVER;
+}
+
+bool tipc_link_is_blocked(struct tipc_link *l)
+{
+	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
+}
+
+int tipc_link_is_active(struct tipc_link *l)
+{
+	struct tipc_node *n = l->owner;
+
+	return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
+}
+
+static u32 link_own_addr(struct tipc_link *l)
+{
+	return msg_prevnode(l->pmsg);
 }
 
 /**
  * tipc_link_create - create a new link
- * @n_ptr: pointer to associated node
- * @b_ptr: pointer to associated bearer
- * @media_addr: media address to use when sending messages over link
+ * @n: pointer to associated node
+ * @b: pointer to associated bearer
+ * @ownnode: identity of own node
+ * @peer: identity of peer node
+ * @maddr: media address to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
  *
- * Returns pointer to link.
+ * Returns true if link was created, otherwise false
  */
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-				   struct tipc_bearer *b_ptr,
-				   const struct tipc_media_addr *media_addr)
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+		      u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+		      struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+		      struct tipc_link **link)
 {
-	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-	struct tipc_link *l_ptr;
-	struct tipc_msg *msg;
+	struct tipc_link *l;
+	struct tipc_msg *hdr;
 	char *if_name;
-	char addr_string[16];
-	u32 peer = n_ptr->addr;
 
-	if (n_ptr->link_cnt >= MAX_BEARERS) {
-		tipc_addr_string_fill(addr_string, n_ptr->addr);
-		pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
-		       n_ptr->link_cnt, addr_string, MAX_BEARERS);
-		return NULL;
-	}
+	l = kzalloc(sizeof(*l), GFP_ATOMIC);
+	if (!l)
+		return false;
+	*link = l;
 
-	if (n_ptr->links[b_ptr->identity]) {
-		tipc_addr_string_fill(addr_string, n_ptr->addr);
-		pr_err("Attempt to establish second link on <%s> to %s\n",
-		       b_ptr->name, addr_string);
-		return NULL;
-	}
+	/* Note: peer i/f name is completed by reset/activate message */
+	if_name = strchr(b->name, ':') + 1;
+	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
 
-	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
-	if (!l_ptr) {
-		pr_warn("Link creation failed, no memory\n");
-		return NULL;
-	}
-	kref_init(&l_ptr->ref);
-	l_ptr->addr = peer;
-	if_name = strchr(b_ptr->name, ':') + 1;
-	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
-		tipc_node(tn->own_addr),
-		if_name,
-		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
-		/* note: peer i/f name is updated by reset/activate message */
-	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
-	l_ptr->owner = n_ptr;
-	l_ptr->peer_session = INVALID_SESSION;
-	l_ptr->bearer_id = b_ptr->identity;
-	link_set_supervision_props(l_ptr, b_ptr->tolerance);
-	l_ptr->state = RESET_UNKNOWN;
+	l->addr = peer;
+	l->media_addr = maddr;
+	l->owner = n;
+	l->peer_session = WILDCARD_SESSION;
+	l->bearer_id = b->identity;
+	l->tolerance = b->tolerance;
+	l->net_plane = b->net_plane;
+	l->advertised_mtu = b->mtu;
+	l->mtu = b->mtu;
+	l->priority = b->priority;
+	tipc_link_set_queue_limits(l, b->window);
+	l->inputq = inputq;
+	l->namedq = namedq;
+	l->state = LINK_RESETTING;
+	l->pmsg = (struct tipc_msg *)&l->proto_msg;
+	hdr = l->pmsg;
+	tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+	msg_set_size(hdr, sizeof(l->proto_msg));
+	msg_set_session(hdr, session);
+	msg_set_bearer_id(hdr, l->bearer_id);
+	strcpy((char *)msg_data(hdr), if_name);
+	__skb_queue_head_init(&l->transmq);
+	__skb_queue_head_init(&l->backlogq);
+	__skb_queue_head_init(&l->deferdq);
+	skb_queue_head_init(&l->wakeupq);
+	skb_queue_head_init(l->inputq);
+	return true;
+}
 
-	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
-	msg = l_ptr->pmsg;
-	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
-		      l_ptr->addr);
-	msg_set_size(msg, sizeof(l_ptr->proto_msg));
-	msg_set_session(msg, (tn->random & 0xffff));
-	msg_set_bearer_id(msg, b_ptr->identity);
-	strcpy((char *)msg_data(msg), if_name);
-	l_ptr->net_plane = b_ptr->net_plane;
-	l_ptr->advertised_mtu = b_ptr->mtu;
-	l_ptr->mtu = l_ptr->advertised_mtu;
-	l_ptr->priority = b_ptr->priority;
-	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-	l_ptr->snd_nxt = 1;
-	__skb_queue_head_init(&l_ptr->transmq);
-	__skb_queue_head_init(&l_ptr->backlogq);
-	__skb_queue_head_init(&l_ptr->deferdq);
-	skb_queue_head_init(&l_ptr->wakeupq);
-	skb_queue_head_init(&l_ptr->inputq);
-	skb_queue_head_init(&l_ptr->namedq);
-	link_reset_statistics(l_ptr);
-	tipc_node_attach_link(n_ptr, l_ptr);
-	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
-	link_state_event(l_ptr, STARTING_EVT);
+/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+				    struct sk_buff_head *xmitq)
+{
+	struct sk_buff *skb;
+	struct sk_buff_head list;
+	u16 last_sent;
 
-	return l_ptr;
+	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+			      0, l->addr, link_own_addr(l), 0, 0, 0);
+	if (!skb)
+		return;
+	last_sent = tipc_bclink_get_last_sent(l->owner->net);
+	msg_set_last_bcast(buf_msg(skb), last_sent);
+	__skb_queue_head_init(&list);
+	__skb_queue_tail(&list, skb);
+	tipc_link_xmit(l, &list, xmitq);
 }
 
 /**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
  */
-void tipc_link_delete(struct tipc_link *l)
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
 {
-	tipc_link_reset(l);
-	if (del_timer(&l->timer))
-		tipc_link_put(l);
-	l->flags |= LINK_STOPPED;
-	/* Delete link now, or when timer is finished: */
-	tipc_link_reset_fragments(l);
-	tipc_node_detach_link(l->owner, l);
-	tipc_link_put(l);
+	int rc = 0;
+
+	switch (l->state) {
+	case LINK_RESETTING:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_PEER_RESET;
+			break;
+		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_FAILURE_EVT:
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILOVER_END_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_RESET:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_ESTABLISHING;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+			l->state = LINK_FAILINGOVER;
+		case LINK_FAILURE_EVT:
+		case LINK_RESET_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILOVER_END_EVT:
+			break;
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_PEER_RESET:
+		switch (evt) {
+		case LINK_RESET_EVT:
+			l->state = LINK_ESTABLISHING;
+			break;
+		case LINK_PEER_RESET_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILURE_EVT:
+			break;
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_FAILINGOVER:
+		switch (evt) {
+		case LINK_FAILOVER_END_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_PEER_RESET_EVT:
+		case LINK_RESET_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILURE_EVT:
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_ESTABLISHING:
+		switch (evt) {
+		case LINK_ESTABLISH_EVT:
+			l->state = LINK_ESTABLISHED;
+			rc |= TIPC_LINK_UP_EVT;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+			l->state = LINK_FAILINGOVER;
+			break;
+		case LINK_PEER_RESET_EVT:
+		case LINK_RESET_EVT:
+		case LINK_FAILURE_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+			break;
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_ESTABLISHED:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_PEER_RESET;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_FAILURE_EVT:
+			l->state = LINK_RESETTING;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_ESTABLISH_EVT:
+		case LINK_SYNCH_END_EVT:
+			break;
+		case LINK_SYNCH_BEGIN_EVT:
+			l->state = LINK_SYNCHING;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_SYNCHING:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_PEER_RESET;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_FAILURE_EVT:
+			l->state = LINK_RESETTING;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_ESTABLISH_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+			break;
+		case LINK_SYNCH_END_EVT:
+			l->state = LINK_ESTABLISHED;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	default:
+		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
+	}
+	return rc;
+illegal_evt:
+	pr_err("Illegal FSM event %x in state %x on link %s\n",
+	       evt, l->state, l->name);
+	return rc;
 }
 
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
+/* link_profile_stats - update statistical profiling of traffic
+ */
+static void link_profile_stats(struct tipc_link *l)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *link;
-	struct tipc_node *node;
+	struct sk_buff *skb;
+	struct tipc_msg *msg;
+	int length;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(node, &tn->node_list, list) {
-		tipc_node_lock(node);
-		link = node->links[bearer_id];
-		if (link)
-			tipc_link_delete(link);
-		tipc_node_unlock(node);
+	/* Update counters used in statistical profiling of send traffic */
+	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+	l->stats.queue_sz_counts++;
+
+	skb = skb_peek(&l->transmq);
+	if (!skb)
+		return;
+	msg = buf_msg(skb);
+	length = msg_size(msg);
+
+	if (msg_user(msg) == MSG_FRAGMENTER) {
+		if (msg_type(msg) != FIRST_FRAGMENT)
+			return;
+		length = msg_size(msg_get_wrapped(msg));
 	}
-	rcu_read_unlock();
+	l->stats.msg_lengths_total += length;
+	l->stats.msg_length_counts++;
+	if (length <= 64)
+		l->stats.msg_length_profile[0]++;
+	else if (length <= 256)
+		l->stats.msg_length_profile[1]++;
+	else if (length <= 1024)
+		l->stats.msg_length_profile[2]++;
+	else if (length <= 4096)
+		l->stats.msg_length_profile[3]++;
+	else if (length <= 16384)
+		l->stats.msg_length_profile[4]++;
+	else if (length <= 32768)
+		l->stats.msg_length_profile[5]++;
+	else
+		l->stats.msg_length_profile[6]++;
+}
+
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
+ */
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+	int rc = 0;
+	int mtyp = STATE_MSG;
+	bool xmit = false;
+	bool prb = false;
+
+	link_profile_stats(l);
+
+	switch (l->state) {
+	case LINK_ESTABLISHED:
+	case LINK_SYNCHING:
+		if (!l->silent_intv_cnt) {
+			if (tipc_bclink_acks_missing(l->owner))
+				xmit = true;
+		} else if (l->silent_intv_cnt <= l->abort_limit) {
+			xmit = true;
+			prb = true;
+		} else {
+			rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+		}
+		l->silent_intv_cnt++;
+		break;
+	case LINK_RESET:
+		xmit = true;
+		mtyp = RESET_MSG;
+		break;
+	case LINK_ESTABLISHING:
+		xmit = true;
+		mtyp = ACTIVATE_MSG;
+		break;
+	case LINK_PEER_RESET:
+	case LINK_RESETTING:
+	case LINK_FAILINGOVER:
+		break;
+	default:
+		break;
+	}
+
+	if (xmit)
+		tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
+
+	return rc;
 }
 
 /**
@@ -334,7 +490,7 @@
  * @link: congested link
  * @list: message that was attempted sent
  * Create pseudo msg to send back to user when congestion abates
- * Only consumes message if there is an error
+ * Does not consume buffer list
  */
 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
 {
@@ -347,8 +503,7 @@
 	/* This really cannot happen...  */
 	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
 		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
-		tipc_link_reset(link);
-		goto err;
+		return -ENOBUFS;
 	}
 	/* Non-blocking sender: */
 	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
@@ -358,15 +513,12 @@
 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
 			      addr, addr, oport, 0, 0);
 	if (!skb)
-		goto err;
+		return -ENOBUFS;
 	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
 	TIPC_SKB_CB(skb)->chain_imp = imp;
 	skb_queue_tail(&link->wakeupq, skb);
 	link->stats.link_congs++;
 	return -ELINKCONG;
-err:
-	__skb_queue_purge(list);
-	return -ENOBUFS;
 }
 
 /**
@@ -388,9 +540,7 @@
 		if ((pnd[imp] + l->backlog[imp].len) >= lim)
 			break;
 		skb_unlink(skb, &l->wakeupq);
-		skb_queue_tail(&l->inputq, skb);
-		l->owner->inputq = &l->inputq;
-		l->owner->action_flags |= TIPC_MSG_EVT;
+		skb_queue_tail(l->inputq, skb);
 	}
 }
 
@@ -426,208 +576,36 @@
 	tipc_link_reset_fragments(l_ptr);
 }
 
-void tipc_link_reset(struct tipc_link *l_ptr)
+void tipc_link_reset(struct tipc_link *l)
 {
-	u32 prev_state = l_ptr->state;
-	int was_active_link = tipc_link_is_active(l_ptr);
-	struct tipc_node *owner = l_ptr->owner;
-	struct tipc_link *pl = tipc_parallel_link(l_ptr);
-
-	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
+	tipc_link_fsm_evt(l, LINK_RESET_EVT);
 
 	/* Link is down, accept any session */
-	l_ptr->peer_session = INVALID_SESSION;
+	l->peer_session = WILDCARD_SESSION;
+
+	/* If peer is up, it only accepts an incremented session number */
+	msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
 
 	/* Prepare for renewed mtu size negotiation */
-	l_ptr->mtu = l_ptr->advertised_mtu;
+	l->mtu = l->advertised_mtu;
 
-	l_ptr->state = RESET_UNKNOWN;
+	/* Clean up all queues: */
+	__skb_queue_purge(&l->transmq);
+	__skb_queue_purge(&l->deferdq);
+	skb_queue_splice_init(&l->wakeupq, l->inputq);
 
-	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
-		return;
-
-	tipc_node_link_down(l_ptr->owner, l_ptr);
-	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
-
-	if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
-		l_ptr->flags |= LINK_FAILINGOVER;
-		l_ptr->failover_checkpt = l_ptr->rcv_nxt;
-		pl->failover_pkts = FIRST_FAILOVER;
-		pl->failover_checkpt = l_ptr->rcv_nxt;
-		pl->failover_skb = l_ptr->reasm_buf;
-	} else {
-		kfree_skb(l_ptr->reasm_buf);
-	}
-	/* Clean up all queues, except inputq: */
-	__skb_queue_purge(&l_ptr->transmq);
-	__skb_queue_purge(&l_ptr->deferdq);
-	if (!owner->inputq)
-		owner->inputq = &l_ptr->inputq;
-	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
-	if (!skb_queue_empty(owner->inputq))
-		owner->action_flags |= TIPC_MSG_EVT;
-	tipc_link_purge_backlog(l_ptr);
-	l_ptr->reasm_buf = NULL;
-	l_ptr->rcv_unacked = 0;
-	l_ptr->snd_nxt = 1;
-	l_ptr->silent_intv_cnt = 0;
-	l_ptr->stale_count = 0;
-	link_reset_statistics(l_ptr);
-}
-
-static void link_activate(struct tipc_link *link)
-{
-	struct tipc_node *node = link->owner;
-
-	link->rcv_nxt = 1;
-	link->stats.recv_info = 1;
-	link->silent_intv_cnt = 0;
-	tipc_node_link_up(node, link);
-	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
-}
-
-/**
- * link_state_event - link finite state machine
- * @l_ptr: pointer to link
- * @event: state machine event to process
- */
-static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
-{
-	struct tipc_link *other;
-	unsigned long timer_intv = l_ptr->keepalive_intv;
-
-	if (l_ptr->flags & LINK_STOPPED)
-		return;
-
-	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
-		return;		/* Not yet. */
-
-	if (l_ptr->flags & LINK_FAILINGOVER)
-		return;
-
-	switch (l_ptr->state) {
-	case WORKING_WORKING:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-		case ACTIVATE_MSG:
-			l_ptr->silent_intv_cnt = 0;
-			break;
-		case SILENCE_EVT:
-			if (!l_ptr->silent_intv_cnt) {
-				if (tipc_bclink_acks_missing(l_ptr->owner))
-					tipc_link_proto_xmit(l_ptr, STATE_MSG,
-							     0, 0, 0, 0);
-				break;
-			}
-			l_ptr->state = WORKING_UNKNOWN;
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			break;
-		case RESET_MSG:
-			pr_debug("%s<%s>, requested by peer\n",
-				 link_rst_msg, l_ptr->name);
-			tipc_link_reset(l_ptr);
-			l_ptr->state = RESET_RESET;
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     0, 0, 0, 0);
-			break;
-		default:
-			pr_debug("%s%u in WW state\n", link_unk_evt, event);
-		}
-		break;
-	case WORKING_UNKNOWN:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-		case ACTIVATE_MSG:
-			l_ptr->state = WORKING_WORKING;
-			l_ptr->silent_intv_cnt = 0;
-			break;
-		case RESET_MSG:
-			pr_debug("%s<%s>, requested by peer while probing\n",
-				 link_rst_msg, l_ptr->name);
-			tipc_link_reset(l_ptr);
-			l_ptr->state = RESET_RESET;
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     0, 0, 0, 0);
-			break;
-		case SILENCE_EVT:
-			if (!l_ptr->silent_intv_cnt) {
-				l_ptr->state = WORKING_WORKING;
-				if (tipc_bclink_acks_missing(l_ptr->owner))
-					tipc_link_proto_xmit(l_ptr, STATE_MSG,
-							     0, 0, 0, 0);
-			} else if (l_ptr->silent_intv_cnt <
-				   l_ptr->abort_limit) {
-				tipc_link_proto_xmit(l_ptr, STATE_MSG,
-						     1, 0, 0, 0);
-			} else {	/* Link has failed */
-				pr_debug("%s<%s>, peer not responding\n",
-					 link_rst_msg, l_ptr->name);
-				tipc_link_reset(l_ptr);
-				l_ptr->state = RESET_UNKNOWN;
-				tipc_link_proto_xmit(l_ptr, RESET_MSG,
-						     0, 0, 0, 0);
-			}
-			break;
-		default:
-			pr_err("%s%u in WU state\n", link_unk_evt, event);
-		}
-		break;
-	case RESET_UNKNOWN:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-			break;
-		case ACTIVATE_MSG:
-			other = l_ptr->owner->active_links[0];
-			if (other && link_working_unknown(other))
-				break;
-			l_ptr->state = WORKING_WORKING;
-			link_activate(l_ptr);
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			if (l_ptr->owner->working_links == 1)
-				tipc_link_sync_xmit(l_ptr);
-			break;
-		case RESET_MSG:
-			l_ptr->state = RESET_RESET;
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     1, 0, 0, 0);
-			break;
-		case STARTING_EVT:
-			l_ptr->flags |= LINK_STARTED;
-			link_set_timer(l_ptr, timer_intv);
-			break;
-		case SILENCE_EVT:
-			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
-			break;
-		default:
-			pr_err("%s%u in RU state\n", link_unk_evt, event);
-		}
-		break;
-	case RESET_RESET:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-		case ACTIVATE_MSG:
-			other = l_ptr->owner->active_links[0];
-			if (other && link_working_unknown(other))
-				break;
-			l_ptr->state = WORKING_WORKING;
-			link_activate(l_ptr);
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			if (l_ptr->owner->working_links == 1)
-				tipc_link_sync_xmit(l_ptr);
-			break;
-		case RESET_MSG:
-			break;
-		case SILENCE_EVT:
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     0, 0, 0, 0);
-			break;
-		default:
-			pr_err("%s%u in RR state\n", link_unk_evt, event);
-		}
-		break;
-	default:
-		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
-	}
+	tipc_link_purge_backlog(l);
+	kfree_skb(l->reasm_buf);
+	kfree_skb(l->failover_reasm_skb);
+	l->reasm_buf = NULL;
+	l->failover_reasm_skb = NULL;
+	l->rcv_unacked = 0;
+	l->snd_nxt = 1;
+	l->rcv_nxt = 1;
+	l->silent_intv_cnt = 0;
+	l->stats.recv_info = 0;
+	l->stale_count = 0;
+	link_reset_statistics(l);
 }
 
 /**
@@ -635,8 +613,7 @@
  * @link: link to use
  * @list: chain of buffers containing message
  *
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain, except when returning an error code,
  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
@@ -650,7 +627,7 @@
 	u16 ack = mod(link->rcv_nxt - 1);
 	u16 seqno = link->snd_nxt;
 	u16 bc_last_in = link->owner->bclink.last_in;
-	struct tipc_media_addr *addr = &link->media_addr;
+	struct tipc_media_addr *addr = link->media_addr;
 	struct sk_buff_head *transmq = &link->transmq;
 	struct sk_buff_head *backlogq = &link->backlogq;
 	struct sk_buff *skb, *bskb;
@@ -660,10 +637,9 @@
 		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
 			return link_schedule_user(link, list);
 	}
-	if (unlikely(msg_size(msg) > mtu)) {
-		__skb_queue_purge(list);
+	if (unlikely(msg_size(msg) > mtu))
 		return -EMSGSIZE;
-	}
+
 	/* Prepare each packet for sending, and add to relevant queue: */
 	while (skb_queue_len(list)) {
 		skb = skb_peek(list);
@@ -700,101 +676,76 @@
 	return 0;
 }
 
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
-{
-	skb_queue_head_init(list);
-	__skb_queue_tail(list, skb);
-}
-
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
-{
-	struct sk_buff_head head;
-
-	skb2list(skb, &head);
-	return __tipc_link_xmit(link->owner->net, link, &head);
-}
-
-/* tipc_link_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
- * messages, which will not be rejected
- * The only exception is datagram messages rerouted after secondary
- * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
- */
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
-		       u32 selector)
-{
-	struct sk_buff_head head;
-	int rc;
-
-	skb2list(skb, &head);
-	rc = tipc_link_xmit(net, &head, dnode, selector);
-	if (rc == -ELINKCONG)
-		kfree_skb(skb);
-	return 0;
-}
-
 /**
- * tipc_link_xmit() is the general link level function for message sending
- * @net: the applicable net namespace
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @link: link to use
  * @list: chain of buffers containing message
- * @dsz: amount of user data to be sent
- * @dnode: address of destination node
- * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
- */
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
-		   u32 selector)
-{
-	struct tipc_link *link = NULL;
-	struct tipc_node *node;
-	int rc = -EHOSTUNREACH;
-
-	node = tipc_node_find(net, dnode);
-	if (node) {
-		tipc_node_lock(node);
-		link = node->active_links[selector & 1];
-		if (link)
-			rc = __tipc_link_xmit(net, link, list);
-		tipc_node_unlock(node);
-		tipc_node_put(node);
-	}
-	if (link)
-		return rc;
-
-	if (likely(in_own_node(net, dnode))) {
-		tipc_sk_rcv(net, list);
-		return 0;
-	}
-
-	__skb_queue_purge(list);
-	return rc;
-}
-
-/*
- * tipc_link_sync_xmit - synchronize broadcast link endpoints.
+ * @xmitq: returned list of packets to be sent by caller
  *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
- *
- * Called with node locked
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
-static void tipc_link_sync_xmit(struct tipc_link *link)
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+		   struct sk_buff_head *xmitq)
 {
-	struct sk_buff *skb;
-	struct tipc_msg *msg;
+	struct tipc_msg *hdr = buf_msg(skb_peek(list));
+	unsigned int maxwin = l->window;
+	unsigned int i, imp = msg_importance(hdr);
+	unsigned int mtu = l->mtu;
+	u16 ack = l->rcv_nxt - 1;
+	u16 seqno = l->snd_nxt;
+	u16 bc_last_in = l->owner->bclink.last_in;
+	struct sk_buff_head *transmq = &l->transmq;
+	struct sk_buff_head *backlogq = &l->backlogq;
+	struct sk_buff *skb, *_skb, *bskb;
 
-	skb = tipc_buf_acquire(INT_H_SIZE);
-	if (!skb)
-		return;
+	/* Match msg importance against this and all higher backlog limits: */
+	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+			return link_schedule_user(l, list);
+	}
+	if (unlikely(msg_size(hdr) > mtu))
+		return -EMSGSIZE;
 
-	msg = buf_msg(skb);
-	tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
-		      INT_H_SIZE, link->addr);
-	msg_set_last_bcast(msg, link->owner->bclink.acked);
-	__tipc_link_xmit_skb(link, skb);
+	/* Prepare each packet for sending, and add to relevant queue: */
+	while (skb_queue_len(list)) {
+		skb = skb_peek(list);
+		hdr = buf_msg(skb);
+		msg_set_seqno(hdr, seqno);
+		msg_set_ack(hdr, ack);
+		msg_set_bcast_ack(hdr, bc_last_in);
+
+		if (likely(skb_queue_len(transmq) < maxwin)) {
+			_skb = skb_clone(skb, GFP_ATOMIC);
+			if (!_skb)
+				return -ENOBUFS;
+			__skb_dequeue(list);
+			__skb_queue_tail(transmq, skb);
+			__skb_queue_tail(xmitq, _skb);
+			l->rcv_unacked = 0;
+			seqno++;
+			continue;
+		}
+		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+			kfree_skb(__skb_dequeue(list));
+			l->stats.sent_bundled++;
+			continue;
+		}
+		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+			kfree_skb(__skb_dequeue(list));
+			__skb_queue_tail(backlogq, bskb);
+			l->backlog[msg_importance(buf_msg(bskb))].len++;
+			l->stats.sent_bundled++;
+			l->stats.sent_bundles++;
+			continue;
+		}
+		l->backlog[imp].len += skb_queue_len(list);
+		skb_queue_splice_tail_init(list, backlogq);
+	}
+	l->snd_nxt = seqno;
+	return 0;
 }
 
 /*
@@ -842,29 +793,37 @@
 		link->rcv_unacked = 0;
 		__skb_queue_tail(&link->transmq, skb);
 		tipc_bearer_send(link->owner->net, link->bearer_id,
-				 skb, &link->media_addr);
+				 skb, link->media_addr);
 	}
 	link->snd_nxt = seqno;
 }
 
-void tipc_link_reset_all(struct tipc_node *node)
+void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-	char addr_string[16];
-	u32 i;
+	struct sk_buff *skb, *_skb;
+	struct tipc_msg *hdr;
+	u16 seqno = l->snd_nxt;
+	u16 ack = l->rcv_nxt - 1;
 
-	tipc_node_lock(node);
-
-	pr_warn("Resetting all links to %s\n",
-		tipc_addr_string_fill(addr_string, node->addr));
-
-	for (i = 0; i < MAX_BEARERS; i++) {
-		if (node->links[i]) {
-			link_print(node->links[i], "Resetting link\n");
-			tipc_link_reset(node->links[i]);
-		}
+	while (skb_queue_len(&l->transmq) < l->window) {
+		skb = skb_peek(&l->backlogq);
+		if (!skb)
+			break;
+		_skb = skb_clone(skb, GFP_ATOMIC);
+		if (!_skb)
+			break;
+		__skb_dequeue(&l->backlogq);
+		hdr = buf_msg(skb);
+		l->backlog[msg_importance(hdr)].len--;
+		__skb_queue_tail(&l->transmq, skb);
+		__skb_queue_tail(xmitq, _skb);
+		msg_set_ack(hdr, ack);
+		msg_set_seqno(hdr, seqno);
+		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+		l->rcv_unacked = 0;
+		seqno++;
 	}
-
-	tipc_node_unlock(node);
+	l->snd_nxt = seqno;
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -877,9 +836,12 @@
 
 	if (l_ptr->addr) {
 		/* Handle failure on standard link */
-		link_print(l_ptr, "Resetting link\n");
-		tipc_link_reset(l_ptr);
-
+		link_print(l_ptr, "Resetting link ");
+		pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+			msg_user(msg), msg_type(msg), msg_size(msg),
+			msg_errcode(msg));
+		pr_info("sqno %u, prev: %x, src: %x\n",
+			msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
 	} else {
 		/* Handle failure on broadcast link */
 		struct tipc_node *n_ptr;
@@ -934,191 +896,45 @@
 		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
 		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
-				 &l_ptr->media_addr);
+				 l_ptr->media_addr);
 		retransmits--;
 		l_ptr->stats.retransmitted++;
 	}
 }
 
-/* link_synch(): check if all packets arrived before the synch
- *               point have been consumed
- * Returns true if the parallel links are synched, otherwise false
- */
-static bool link_synch(struct tipc_link *l)
+static int tipc_link_retransm(struct tipc_link *l, int retransm,
+			      struct sk_buff_head *xmitq)
 {
-	unsigned int post_synch;
-	struct tipc_link *pl;
+	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+	struct tipc_msg *hdr;
 
-	pl  = tipc_parallel_link(l);
-	if (pl == l)
-		goto synched;
+	if (!skb)
+		return 0;
 
-	/* Was last pre-synch packet added to input queue ? */
-	if (less_eq(pl->rcv_nxt, l->synch_point))
-		return false;
-
-	/* Is it still in the input queue ? */
-	post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
-	if (skb_queue_len(&pl->inputq) > post_synch)
-		return false;
-synched:
-	l->flags &= ~LINK_SYNCHING;
-	return true;
-}
-
-static void link_retrieve_defq(struct tipc_link *link,
-			       struct sk_buff_head *list)
-{
-	u16 seq_no;
-
-	if (skb_queue_empty(&link->deferdq))
-		return;
-
-	seq_no = buf_seqno(skb_peek(&link->deferdq));
-	if (seq_no == link->rcv_nxt)
-		skb_queue_splice_tail_init(&link->deferdq, list);
-}
-
-/**
- * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @net: the applicable net namespace
- * @skb: TIPC packet
- * @b_ptr: pointer to bearer message arrived on
- *
- * Invoked with no locks held.  Bearer pointer must point to a valid bearer
- * structure (i.e. cannot be NULL), but bearer can be inactive.
- */
-void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct sk_buff_head head;
-	struct tipc_node *n_ptr;
-	struct tipc_link *l_ptr;
-	struct sk_buff *skb1, *tmp;
-	struct tipc_msg *msg;
-	u16 seq_no;
-	u16 ackd;
-	u32 released;
-
-	skb2list(skb, &head);
-
-	while ((skb = __skb_dequeue(&head))) {
-		/* Ensure message is well-formed */
-		if (unlikely(!tipc_msg_validate(skb)))
-			goto discard;
-
-		/* Handle arrival of a non-unicast link message */
-		msg = buf_msg(skb);
-		if (unlikely(msg_non_seq(msg))) {
-			if (msg_user(msg) ==  LINK_CONFIG)
-				tipc_disc_rcv(net, skb, b_ptr);
-			else
-				tipc_bclink_rcv(net, skb);
-			continue;
-		}
-
-		/* Discard unicast link messages destined for another node */
-		if (unlikely(!msg_short(msg) &&
-			     (msg_destnode(msg) != tn->own_addr)))
-			goto discard;
-
-		/* Locate neighboring node that sent message */
-		n_ptr = tipc_node_find(net, msg_prevnode(msg));
-		if (unlikely(!n_ptr))
-			goto discard;
-
-		tipc_node_lock(n_ptr);
-		/* Locate unicast link endpoint that should handle message */
-		l_ptr = n_ptr->links[b_ptr->identity];
-		if (unlikely(!l_ptr))
-			goto unlock;
-
-		/* Verify that communication with node is currently allowed */
-		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
-		    msg_user(msg) == LINK_PROTOCOL &&
-		    (msg_type(msg) == RESET_MSG ||
-		    msg_type(msg) == ACTIVATE_MSG) &&
-		    !msg_redundant_link(msg))
-			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
-
-		if (tipc_node_blocked(n_ptr))
-			goto unlock;
-
-		/* Validate message sequence number info */
-		seq_no = msg_seqno(msg);
-		ackd = msg_ack(msg);
-
-		/* Release acked messages */
-		if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
-			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-
-		released = 0;
-		skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
-			if (more(buf_seqno(skb1), ackd))
-				break;
-			 __skb_unlink(skb1, &l_ptr->transmq);
-			 kfree_skb(skb1);
-			 released = 1;
-		}
-
-		/* Try sending any messages link endpoint has pending */
-		if (unlikely(skb_queue_len(&l_ptr->backlogq)))
-			tipc_link_push_packets(l_ptr);
-
-		if (released && !skb_queue_empty(&l_ptr->wakeupq))
-			link_prepare_wakeup(l_ptr);
-
-		/* Process the incoming packet */
-		if (unlikely(!link_working_working(l_ptr))) {
-			if (msg_user(msg) == LINK_PROTOCOL) {
-				tipc_link_proto_rcv(l_ptr, skb);
-				link_retrieve_defq(l_ptr, &head);
-				skb = NULL;
-				goto unlock;
-			}
-
-			/* Traffic message. Conditionally activate link */
-			link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
-			if (link_working_working(l_ptr)) {
-				/* Re-insert buffer in front of queue */
-				__skb_queue_head(&head, skb);
-				skb = NULL;
-				goto unlock;
-			}
-			goto unlock;
-		}
-
-		/* Link is now in state WORKING_WORKING */
-		if (unlikely(seq_no != l_ptr->rcv_nxt)) {
-			link_handle_out_of_seq_msg(l_ptr, skb);
-			link_retrieve_defq(l_ptr, &head);
-			skb = NULL;
-			goto unlock;
-		}
-		l_ptr->silent_intv_cnt = 0;
-
-		/* Synchronize with parallel link if applicable */
-		if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
-			if (!link_synch(l_ptr))
-				goto unlock;
-		}
-		l_ptr->rcv_nxt++;
-		if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
-			link_retrieve_defq(l_ptr, &head);
-		if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
-			l_ptr->stats.sent_acks++;
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
-		}
-		tipc_link_input(l_ptr, skb);
-		skb = NULL;
-unlock:
-		tipc_node_unlock(n_ptr);
-		tipc_node_put(n_ptr);
-discard:
-		if (unlikely(skb))
-			kfree_skb(skb);
+	/* Detect repeated retransmit failures on same packet */
+	if (likely(l->last_retransm != buf_seqno(skb))) {
+		l->last_retransm = buf_seqno(skb);
+		l->stale_count = 1;
+	} else if (++l->stale_count > 100) {
+		link_retransmit_failure(l, skb);
+		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 	}
+	skb_queue_walk(&l->transmq, skb) {
+		if (!retransm)
+			return 0;
+		hdr = buf_msg(skb);
+		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+		if (!_skb)
+			return 0;
+		hdr = buf_msg(_skb);
+		msg_set_ack(hdr, l->rcv_nxt - 1);
+		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+		_skb->priority = TC_PRIO_CONTROL;
+		__skb_queue_tail(xmitq, _skb);
+		retransm--;
+		l->stats.retransmitted++;
+	}
+	return 0;
 }
 
 /* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1126,29 +942,22 @@
  * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+			    struct sk_buff_head *inputq)
 {
 	struct tipc_node *node = link->owner;
-	struct tipc_msg *msg = buf_msg(skb);
-	u32 dport = msg_destport(msg);
 
-	switch (msg_user(msg)) {
+	switch (msg_user(buf_msg(skb))) {
 	case TIPC_LOW_IMPORTANCE:
 	case TIPC_MEDIUM_IMPORTANCE:
 	case TIPC_HIGH_IMPORTANCE:
 	case TIPC_CRITICAL_IMPORTANCE:
 	case CONN_MANAGER:
-		if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
-			node->inputq = &link->inputq;
-			node->action_flags |= TIPC_MSG_EVT;
-		}
+		__skb_queue_tail(inputq, skb);
 		return true;
 	case NAME_DISTRIBUTOR:
 		node->bclink.recv_permitted = true;
-		node->namedq = &link->namedq;
-		skb_queue_tail(&link->namedq, skb);
-		if (skb_queue_len(&link->namedq) == 1)
-			node->action_flags |= TIPC_NAMED_MSG_EVT;
+		skb_queue_tail(link->namedq, skb);
 		return true;
 	case MSG_BUNDLER:
 	case TUNNEL_PROTOCOL:
@@ -1165,54 +974,160 @@
 /* tipc_link_input - process packet that has passed link protocol check
  *
  * Consumes buffer
- * Node lock must be held
  */
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+			   struct sk_buff_head *inputq)
 {
-	struct tipc_node *node = link->owner;
-	struct tipc_msg *msg = buf_msg(skb);
+	struct tipc_node *node = l->owner;
+	struct tipc_msg *hdr = buf_msg(skb);
+	struct sk_buff **reasm_skb = &l->reasm_buf;
 	struct sk_buff *iskb;
+	int usr = msg_user(hdr);
+	int rc = 0;
 	int pos = 0;
+	int ipos = 0;
 
-	if (likely(tipc_data_input(link, skb)))
-		return;
-
-	switch (msg_user(msg)) {
-	case TUNNEL_PROTOCOL:
-		if (msg_dup(msg)) {
-			link->flags |= LINK_SYNCHING;
-			link->synch_point = msg_seqno(msg_get_wrapped(msg));
-			kfree_skb(skb);
-			break;
+	if (unlikely(usr == TUNNEL_PROTOCOL)) {
+		if (msg_type(hdr) == SYNCH_MSG) {
+			__skb_queue_purge(&l->deferdq);
+			goto drop;
 		}
-		if (!tipc_link_failover_rcv(link, &skb))
-			break;
-		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
-			tipc_data_input(link, skb);
-			break;
-		}
-	case MSG_BUNDLER:
-		link->stats.recv_bundles++;
-		link->stats.recv_bundled += msg_msgcnt(msg);
+		if (!tipc_msg_extract(skb, &iskb, &ipos))
+			return rc;
+		kfree_skb(skb);
+		skb = iskb;
+		hdr = buf_msg(skb);
+		if (less(msg_seqno(hdr), l->drop_point))
+			goto drop;
+		if (tipc_data_input(l, skb, inputq))
+			return rc;
+		usr = msg_user(hdr);
+		reasm_skb = &l->failover_reasm_skb;
+	}
 
+	if (usr == MSG_BUNDLER) {
+		l->stats.recv_bundles++;
+		l->stats.recv_bundled += msg_msgcnt(hdr);
 		while (tipc_msg_extract(skb, &iskb, &pos))
-			tipc_data_input(link, iskb);
-		break;
-	case MSG_FRAGMENTER:
-		link->stats.recv_fragments++;
-		if (tipc_buf_append(&link->reasm_buf, &skb)) {
-			link->stats.recv_fragmented++;
-			tipc_data_input(link, skb);
-		} else if (!link->reasm_buf) {
-			tipc_link_reset(link);
+			tipc_data_input(l, iskb, inputq);
+		return 0;
+	} else if (usr == MSG_FRAGMENTER) {
+		l->stats.recv_fragments++;
+		if (tipc_buf_append(reasm_skb, &skb)) {
+			l->stats.recv_fragmented++;
+			tipc_data_input(l, skb, inputq);
+		} else if (!*reasm_skb) {
+			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 		}
-		break;
-	case BCAST_PROTOCOL:
+		return 0;
+	} else if (usr == BCAST_PROTOCOL) {
 		tipc_link_sync_rcv(node, skb);
-		break;
-	default:
-		break;
-	};
+		return 0;
+	}
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+{
+	bool released = false;
+	struct sk_buff *skb, *tmp;
+
+	skb_queue_walk_safe(&l->transmq, skb, tmp) {
+		if (more(buf_seqno(skb), acked))
+			break;
+		__skb_unlink(skb, &l->transmq);
+		kfree_skb(skb);
+		released = true;
+	}
+	return released;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @link: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+		  struct sk_buff_head *xmitq)
+{
+	struct sk_buff_head *arrvq = &l->deferdq;
+	struct sk_buff_head tmpq;
+	struct tipc_msg *hdr;
+	u16 seqno, rcv_nxt;
+	int rc = 0;
+
+	__skb_queue_head_init(&tmpq);
+
+	if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
+		if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
+			tipc_link_build_proto_msg(l, STATE_MSG, 0,
+						  0, 0, 0, xmitq);
+		return rc;
+	}
+
+	while ((skb = skb_peek(arrvq))) {
+		hdr = buf_msg(skb);
+
+		/* Verify and update link state */
+		if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
+			__skb_dequeue(arrvq);
+			rc = tipc_link_proto_rcv(l, skb, xmitq);
+			continue;
+		}
+
+		if (unlikely(!link_is_up(l))) {
+			rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+			if (!link_is_up(l)) {
+				kfree_skb(__skb_dequeue(arrvq));
+				goto exit;
+			}
+		}
+
+		l->silent_intv_cnt = 0;
+
+		/* Forward queues and wake up waiting users */
+		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+			tipc_link_advance_backlog(l, xmitq);
+			if (unlikely(!skb_queue_empty(&l->wakeupq)))
+				link_prepare_wakeup(l);
+		}
+
+		/* Defer reception if there is a gap in the sequence */
+		seqno = msg_seqno(hdr);
+		rcv_nxt = l->rcv_nxt;
+		if (unlikely(less(rcv_nxt, seqno))) {
+			l->stats.deferred_recv++;
+			goto exit;
+		}
+
+		__skb_dequeue(arrvq);
+
+		/* Drop if packet already received */
+		if (unlikely(more(rcv_nxt, seqno))) {
+			l->stats.duplicates++;
+			kfree_skb(skb);
+			goto exit;
+		}
+
+		/* Packet can be delivered */
+		l->rcv_nxt++;
+		l->stats.recv_info++;
+		if (unlikely(!tipc_data_input(l, skb, &tmpq)))
+			rc = tipc_link_input(l, skb, &tmpq);
+
+		/* Ack at regular intervals */
+		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
+			l->rcv_unacked = 0;
+			l->stats.sent_acks++;
+			tipc_link_build_proto_msg(l, STATE_MSG,
+						  0, 0, 0, 0, xmitq);
+		}
+	}
+exit:
+	tipc_skb_queue_splice_tail(&tmpq, l->inputq);
+	return rc;
 }
 
 /**
@@ -1255,458 +1170,250 @@
 }
 
 /*
- * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
- */
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
-				       struct sk_buff *buf)
-{
-	u32 seq_no = buf_seqno(buf);
-
-	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
-		tipc_link_proto_rcv(l_ptr, buf);
-		return;
-	}
-
-	/* Record OOS packet arrival */
-	l_ptr->silent_intv_cnt = 0;
-
-	/*
-	 * Discard packet if a duplicate; otherwise add it to deferred queue
-	 * and notify peer of gap as per protocol specification
-	 */
-	if (less(seq_no, l_ptr->rcv_nxt)) {
-		l_ptr->stats.duplicates++;
-		kfree_skb(buf);
-		return;
-	}
-
-	if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
-		l_ptr->stats.deferred_recv++;
-		if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
-	} else {
-		l_ptr->stats.duplicates++;
-	}
-}
-
-/*
  * Send protocol message to the other endpoint.
  */
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
+void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
 			  u32 gap, u32 tolerance, u32 priority)
 {
-	struct sk_buff *buf = NULL;
-	struct tipc_msg *msg = l_ptr->pmsg;
-	u32 msg_size = sizeof(l_ptr->proto_msg);
-	int r_flag;
-	u16 last_rcv;
+	struct sk_buff *skb = NULL;
+	struct sk_buff_head xmitq;
 
-	/* Don't send protocol message during link failover */
-	if (l_ptr->flags & LINK_FAILINGOVER)
+	__skb_queue_head_init(&xmitq);
+	tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
+				  tolerance, priority, &xmitq);
+	skb = __skb_dequeue(&xmitq);
+	if (!skb)
 		return;
-
-	/* Abort non-RESET send if communication with node is prohibited */
-	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
-		return;
-
-	/* Create protocol message with "out-of-sequence" sequence number */
-	msg_set_type(msg, msg_typ);
-	msg_set_net_plane(msg, l_ptr->net_plane);
-	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
-
-	if (msg_typ == STATE_MSG) {
-		u16 next_sent = l_ptr->snd_nxt;
-
-		if (!tipc_link_is_up(l_ptr))
-			return;
-		msg_set_next_sent(msg, next_sent);
-		if (!skb_queue_empty(&l_ptr->deferdq)) {
-			last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
-			gap = mod(last_rcv - l_ptr->rcv_nxt);
-		}
-		msg_set_seq_gap(msg, gap);
-		if (gap)
-			l_ptr->stats.sent_nacks++;
-		msg_set_link_tolerance(msg, tolerance);
-		msg_set_linkprio(msg, priority);
-		msg_set_max_pkt(msg, l_ptr->mtu);
-		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
-		msg_set_probe(msg, probe_msg != 0);
-		if (probe_msg)
-			l_ptr->stats.sent_probes++;
-		l_ptr->stats.sent_states++;
-	} else {		/* RESET_MSG or ACTIVATE_MSG */
-		msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
-		msg_set_seq_gap(msg, 0);
-		msg_set_next_sent(msg, 1);
-		msg_set_probe(msg, 0);
-		msg_set_link_tolerance(msg, l_ptr->tolerance);
-		msg_set_linkprio(msg, l_ptr->priority);
-		msg_set_max_pkt(msg, l_ptr->advertised_mtu);
-	}
-
-	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
-	msg_set_redundant_link(msg, r_flag);
-	msg_set_linkprio(msg, l_ptr->priority);
-	msg_set_size(msg, msg_size);
-
-	msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
-
-	buf = tipc_buf_acquire(msg_size);
-	if (!buf)
-		return;
-
-	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
-	buf->priority = TC_PRIO_CONTROL;
-	tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
-			 &l_ptr->media_addr);
-	l_ptr->rcv_unacked = 0;
-	kfree_skb(buf);
+	tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+	l->rcv_unacked = 0;
+	kfree_skb(skb);
 }
 
-/*
- * Receive protocol message :
- * Note that network plane id propagates through the network, and may
- * change at any time. The node with lowest address rules
+/* tipc_link_build_proto_msg: prepare link protocol message for transmission
  */
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
-				struct sk_buff *buf)
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+				      u16 rcvgap, int tolerance, int priority,
+				      struct sk_buff_head *xmitq)
 {
-	u32 rec_gap = 0;
-	u32 msg_tol;
-	struct tipc_msg *msg = buf_msg(buf);
+	struct sk_buff *skb = NULL;
+	struct tipc_msg *hdr = l->pmsg;
+	u16 snd_nxt = l->snd_nxt;
+	u16 rcv_nxt = l->rcv_nxt;
+	u16 rcv_last = rcv_nxt - 1;
+	int node_up = l->owner->bclink.recv_permitted;
 
-	if (l_ptr->flags & LINK_FAILINGOVER)
+	/* Don't send protocol message during reset or link failover */
+	if (tipc_link_is_blocked(l))
+		return;
+
+	msg_set_type(hdr, mtyp);
+	msg_set_net_plane(hdr, l->net_plane);
+	msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+	msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+	msg_set_link_tolerance(hdr, tolerance);
+	msg_set_linkprio(hdr, priority);
+	msg_set_redundant_link(hdr, node_up);
+	msg_set_seq_gap(hdr, 0);
+
+	/* Compatibility: created msg must not be in sequence with pkt flow */
+	msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+
+	if (mtyp == STATE_MSG) {
+		if (!tipc_link_is_up(l))
+			return;
+		msg_set_next_sent(hdr, snd_nxt);
+
+		/* Override rcvgap if there are packets in deferred queue */
+		if (!skb_queue_empty(&l->deferdq))
+			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+		if (rcvgap) {
+			msg_set_seq_gap(hdr, rcvgap);
+			l->stats.sent_nacks++;
+		}
+		msg_set_ack(hdr, rcv_last);
+		msg_set_probe(hdr, probe);
+		if (probe)
+			l->stats.sent_probes++;
+		l->stats.sent_states++;
+	} else {
+		/* RESET_MSG or ACTIVATE_MSG */
+		msg_set_max_pkt(hdr, l->advertised_mtu);
+		msg_set_ack(hdr, l->rcv_nxt - 1);
+		msg_set_next_sent(hdr, 1);
+	}
+	skb = tipc_buf_acquire(msg_size(hdr));
+	if (!skb)
+		return;
+	skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+	skb->priority = TC_PRIO_CONTROL;
+	__skb_queue_tail(xmitq, skb);
+}
+
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's tranmsit and backlog queues.
+ */
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+			   int mtyp, struct sk_buff_head *xmitq)
+{
+	struct sk_buff *skb, *tnlskb;
+	struct tipc_msg *hdr, tnlhdr;
+	struct sk_buff_head *queue = &l->transmq;
+	struct sk_buff_head tmpxq, tnlq;
+	u16 pktlen, pktcnt, seqno = l->snd_nxt;
+
+	if (!tnl)
+		return;
+
+	skb_queue_head_init(&tnlq);
+	skb_queue_head_init(&tmpxq);
+
+	/* At least one packet required for safe algorithm => add dummy */
+	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+			      BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+			      0, 0, TIPC_ERR_NO_PORT);
+	if (!skb) {
+		pr_warn("%sunable to create tunnel packet\n", link_co_err);
+		return;
+	}
+	skb_queue_tail(&tnlq, skb);
+	tipc_link_xmit(l, &tnlq, &tmpxq);
+	__skb_queue_purge(&tmpxq);
+
+	/* Initialize reusable tunnel packet header */
+	tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+		      mtyp, INT_H_SIZE, l->addr);
+	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+	msg_set_msgcnt(&tnlhdr, pktcnt);
+	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+	/* Wrap each packet into a tunnel packet */
+	skb_queue_walk(queue, skb) {
+		hdr = buf_msg(skb);
+		if (queue == &l->backlogq)
+			msg_set_seqno(hdr, seqno++);
+		pktlen = msg_size(hdr);
+		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+		if (!tnlskb) {
+			pr_warn("%sunable to send packet\n", link_co_err);
+			return;
+		}
+		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+		__skb_queue_tail(&tnlq, tnlskb);
+	}
+	if (queue != &l->backlogq) {
+		queue = &l->backlogq;
+		goto tnl;
+	}
+
+	tipc_link_xmit(tnl, &tnlq, xmitq);
+
+	if (mtyp == FAILOVER_MSG) {
+		tnl->drop_point = l->rcv_nxt;
+		tnl->failover_reasm_skb = l->reasm_buf;
+		l->reasm_buf = NULL;
+	}
+}
+
+/* tipc_link_proto_rcv(): receive link level protocol message :
+ * Note that network plane id propagates through the network, and may
+ * change at any time. The node with lowest numerical id determines
+ * network plane
+ */
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+			       struct sk_buff_head *xmitq)
+{
+	struct tipc_msg *hdr = buf_msg(skb);
+	u16 rcvgap = 0;
+	u16 nacked_gap = msg_seq_gap(hdr);
+	u16 peers_snd_nxt =  msg_next_sent(hdr);
+	u16 peers_tol = msg_link_tolerance(hdr);
+	u16 peers_prio = msg_linkprio(hdr);
+	u16 rcv_nxt = l->rcv_nxt;
+	char *if_name;
+	int rc = 0;
+
+	if (tipc_link_is_blocked(l))
 		goto exit;
 
-	if (l_ptr->net_plane != msg_net_plane(msg))
-		if (link_own_addr(l_ptr) > msg_prevnode(msg))
-			l_ptr->net_plane = msg_net_plane(msg);
+	if (link_own_addr(l) > msg_prevnode(hdr))
+		l->net_plane = msg_net_plane(hdr);
 
-	switch (msg_type(msg)) {
-
+	switch (msg_type(hdr)) {
 	case RESET_MSG:
-		if (!link_working_unknown(l_ptr) &&
-		    (l_ptr->peer_session != INVALID_SESSION)) {
-			if (less_eq(msg_session(msg), l_ptr->peer_session))
-				break; /* duplicate or old reset: ignore */
-		}
 
-		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
-				link_working_unknown(l_ptr))) {
-			/*
-			 * peer has lost contact -- don't allow peer's links
-			 * to reactivate before we recognize loss & clean up
-			 */
-			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
-		}
-
-		link_state_event(l_ptr, RESET_MSG);
-
+		/* Ignore duplicate RESET with old session number */
+		if ((less_eq(msg_session(hdr), l->peer_session)) &&
+		    (l->peer_session != WILDCARD_SESSION))
+			break;
 		/* fall thru' */
+
 	case ACTIVATE_MSG:
-		/* Update link settings according other endpoint's values */
-		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
 
-		msg_tol = msg_link_tolerance(msg);
-		if (msg_tol > l_ptr->tolerance)
-			link_set_supervision_props(l_ptr, msg_tol);
+		/* Complete own link name with peer's interface name */
+		if_name =  strrchr(l->name, ':') + 1;
+		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
+			break;
+		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
+			break;
+		strncpy(if_name, msg_data(hdr),	TIPC_MAX_IF_NAME);
 
-		if (msg_linkprio(msg) > l_ptr->priority)
-			l_ptr->priority = msg_linkprio(msg);
+		/* Update own tolerance if peer indicates a non-zero value */
+		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+			l->tolerance = peers_tol;
 
-		if (l_ptr->mtu > msg_max_pkt(msg))
-			l_ptr->mtu = msg_max_pkt(msg);
+		/* Update own priority if peer's priority is higher */
+		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+			l->priority = peers_prio;
 
-		/* Synchronize broadcast link info, if not done previously */
-		if (!tipc_node_is_up(l_ptr->owner)) {
-			l_ptr->owner->bclink.last_sent =
-				l_ptr->owner->bclink.last_in =
-				msg_last_bcast(msg);
-			l_ptr->owner->bclink.oos_state = 0;
+		if (msg_type(hdr) == RESET_MSG) {
+			rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+		} else if (!link_is_up(l)) {
+			tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+			rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
 		}
-
-		l_ptr->peer_session = msg_session(msg);
-		l_ptr->peer_bearer_id = msg_bearer_id(msg);
-
-		if (msg_type(msg) == ACTIVATE_MSG)
-			link_state_event(l_ptr, ACTIVATE_MSG);
+		l->peer_session = msg_session(hdr);
+		l->peer_bearer_id = msg_bearer_id(hdr);
+		if (l->mtu > msg_max_pkt(hdr))
+			l->mtu = msg_max_pkt(hdr);
 		break;
+
 	case STATE_MSG:
 
-		msg_tol = msg_link_tolerance(msg);
-		if (msg_tol)
-			link_set_supervision_props(l_ptr, msg_tol);
+		/* Update own tolerance if peer indicates a non-zero value */
+		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+			l->tolerance = peers_tol;
 
-		if (msg_linkprio(msg) &&
-		    (msg_linkprio(msg) != l_ptr->priority)) {
-			pr_debug("%s<%s>, priority change %u->%u\n",
-				 link_rst_msg, l_ptr->name,
-				 l_ptr->priority, msg_linkprio(msg));
-			l_ptr->priority = msg_linkprio(msg);
-			tipc_link_reset(l_ptr); /* Enforce change to take effect */
-			break;
-		}
-
-		/* Record reception; force mismatch at next timeout: */
-		l_ptr->silent_intv_cnt = 0;
-
-		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-		l_ptr->stats.recv_states++;
-		if (link_reset_unknown(l_ptr))
+		l->silent_intv_cnt = 0;
+		l->stats.recv_states++;
+		if (msg_probe(hdr))
+			l->stats.recv_probes++;
+		rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+		if (!link_is_up(l))
 			break;
 
-		if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
-			rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
+		/* Send NACK if peer has sent pkts we haven't received yet */
+		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
+			rcvgap = peers_snd_nxt - l->rcv_nxt;
+		if (rcvgap || (msg_probe(hdr)))
+			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
+						  0, 0, xmitq);
+		tipc_link_release_pkts(l, msg_ack(hdr));
 
-		if (msg_probe(msg))
-			l_ptr->stats.recv_probes++;
-
-		/* Protocol message before retransmits, reduce loss risk */
-		if (l_ptr->owner->bclink.recv_permitted)
-			tipc_bclink_update_link_state(l_ptr->owner,
-						      msg_last_bcast(msg));
-
-		if (rec_gap || (msg_probe(msg))) {
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
-					     rec_gap, 0, 0);
+		/* If NACK, retransmit will now start at right position */
+		if (nacked_gap) {
+			rc = tipc_link_retransm(l, nacked_gap, xmitq);
+			l->stats.recv_nacks++;
 		}
-		if (msg_seq_gap(msg)) {
-			l_ptr->stats.recv_nacks++;
-			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
-					     msg_seq_gap(msg));
-		}
-		break;
+
+		tipc_link_advance_backlog(l, xmitq);
+		if (unlikely(!skb_queue_empty(&l->wakeupq)))
+			link_prepare_wakeup(l);
 	}
 exit:
-	kfree_skb(buf);
-}
-
-
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
- * a different bearer. Owner node is locked.
- */
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
-				  struct tipc_msg *tunnel_hdr,
-				  struct tipc_msg *msg,
-				  u32 selector)
-{
-	struct tipc_link *tunnel;
-	struct sk_buff *skb;
-	u32 length = msg_size(msg);
-
-	tunnel = l_ptr->owner->active_links[selector & 1];
-	if (!tipc_link_is_up(tunnel)) {
-		pr_warn("%stunnel link no longer available\n", link_co_err);
-		return;
-	}
-	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
-	skb = tipc_buf_acquire(length + INT_H_SIZE);
-	if (!skb) {
-		pr_warn("%sunable to send tunnel msg\n", link_co_err);
-		return;
-	}
-	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
-	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
-	__tipc_link_xmit_skb(tunnel, skb);
-}
-
-
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
- * link is still active. We can do failover. Tunnel the failing link's
- * whole send queue via the remaining link. This way, we don't lose
- * any packets, and sequence order is preserved for subsequent traffic
- * sent over the remaining link. Owner node is locked.
- */
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
-{
-	int msgcount;
-	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
-	struct tipc_msg tunnel_hdr;
-	struct sk_buff *skb;
-	int split_bundles;
-
-	if (!tunnel)
-		return;
-
-	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
-		      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
-
-	skb_queue_walk(&l_ptr->backlogq, skb) {
-		msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
-		l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
-	}
-	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
-	tipc_link_purge_backlog(l_ptr);
-	msgcount = skb_queue_len(&l_ptr->transmq);
-	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
-	msg_set_msgcnt(&tunnel_hdr, msgcount);
-
-	if (skb_queue_empty(&l_ptr->transmq)) {
-		skb = tipc_buf_acquire(INT_H_SIZE);
-		if (skb) {
-			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
-			msg_set_size(&tunnel_hdr, INT_H_SIZE);
-			__tipc_link_xmit_skb(tunnel, skb);
-		} else {
-			pr_warn("%sunable to send changeover msg\n",
-				link_co_err);
-		}
-		return;
-	}
-
-	split_bundles = (l_ptr->owner->active_links[0] !=
-			 l_ptr->owner->active_links[1]);
-
-	skb_queue_walk(&l_ptr->transmq, skb) {
-		struct tipc_msg *msg = buf_msg(skb);
-
-		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
-			struct tipc_msg *m = msg_get_wrapped(msg);
-			unchar *pos = (unchar *)m;
-
-			msgcount = msg_msgcnt(msg);
-			while (msgcount--) {
-				msg_set_seqno(m, msg_seqno(msg));
-				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
-						      msg_link_selector(m));
-				pos += align(msg_size(m));
-				m = (struct tipc_msg *)pos;
-			}
-		} else {
-			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
-					      msg_link_selector(msg));
-		}
-	}
-}
-
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
- * duplicate of the first link's send queue via the new link. This way, we
- * are guaranteed that currently queued packets from a socket are delivered
- * before future traffic from the same socket, even if this is using the
- * new link. The last arriving copy of each duplicate packet is dropped at
- * the receiving end by the regular protocol check, so packet cardinality
- * and sequence order is preserved per sender/receiver socket pair.
- * Owner node is locked.
- */
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
-			      struct tipc_link *tnl)
-{
-	struct sk_buff *skb;
-	struct tipc_msg tnl_hdr;
-	struct sk_buff_head *queue = &link->transmq;
-	int mcnt;
-	u16 seqno;
-
-	tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
-		      SYNCH_MSG, INT_H_SIZE, link->addr);
-	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
-	msg_set_msgcnt(&tnl_hdr, mcnt);
-	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
-
-tunnel_queue:
-	skb_queue_walk(queue, skb) {
-		struct sk_buff *outskb;
-		struct tipc_msg *msg = buf_msg(skb);
-		u32 len = msg_size(msg);
-
-		msg_set_ack(msg, mod(link->rcv_nxt - 1));
-		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
-		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
-		outskb = tipc_buf_acquire(len + INT_H_SIZE);
-		if (outskb == NULL) {
-			pr_warn("%sunable to send duplicate msg\n",
-				link_co_err);
-			return;
-		}
-		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
-		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
-					       skb->data, len);
-		__tipc_link_xmit_skb(tnl, outskb);
-		if (!tipc_link_is_up(link))
-			return;
-	}
-	if (queue == &link->backlogq)
-		return;
-	seqno = link->snd_nxt;
-	skb_queue_walk(&link->backlogq, skb) {
-		msg_set_seqno(buf_msg(skb), seqno);
-		seqno = mod(seqno + 1);
-	}
-	queue = &link->backlogq;
-	goto tunnel_queue;
-}
-
-/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
- *  Owner node is locked.
- */
-static bool tipc_link_failover_rcv(struct tipc_link *link,
-				   struct sk_buff **skb)
-{
-	struct tipc_msg *msg = buf_msg(*skb);
-	struct sk_buff *iskb = NULL;
-	struct tipc_link *pl = NULL;
-	int bearer_id = msg_bearer_id(msg);
-	int pos = 0;
-
-	if (msg_type(msg) != FAILOVER_MSG) {
-		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
-		goto exit;
-	}
-	if (bearer_id >= MAX_BEARERS)
-		goto exit;
-
-	if (bearer_id == link->bearer_id)
-		goto exit;
-
-	pl = link->owner->links[bearer_id];
-	if (pl && tipc_link_is_up(pl))
-		tipc_link_reset(pl);
-
-	if (link->failover_pkts == FIRST_FAILOVER)
-		link->failover_pkts = msg_msgcnt(msg);
-
-	/* Should we expect an inner packet? */
-	if (!link->failover_pkts)
-		goto exit;
-
-	if (!tipc_msg_extract(*skb, &iskb, &pos)) {
-		pr_warn("%sno inner failover pkt\n", link_co_err);
-		*skb = NULL;
-		goto exit;
-	}
-	link->failover_pkts--;
-	*skb = NULL;
-
-	/* Was this packet already delivered? */
-	if (less(buf_seqno(iskb), link->failover_checkpt)) {
-		kfree_skb(iskb);
-		iskb = NULL;
-		goto exit;
-	}
-	if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
-		link->stats.recv_fragments++;
-		tipc_buf_append(&link->failover_skb, &iskb);
-	}
-exit:
-	if (!link->failover_pkts && pl)
-		pl->flags &= ~LINK_FAILINGOVER;
-	kfree_skb(*skb);
-	*skb = iskb;
-	return *skb;
-}
-
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
-{
-	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-
-	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
-		return;
-
-	l_ptr->tolerance = tol;
-	l_ptr->keepalive_intv = msecs_to_jiffies(intv);
-	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
+	kfree_skb(skb);
+	return rc;
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1743,7 +1450,7 @@
 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
 		tipc_node_lock(n_ptr);
 		for (i = 0; i < MAX_BEARERS; i++) {
-			l_ptr = n_ptr->links[i];
+			l_ptr = n_ptr->links[i].link;
 			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
 				*bearer_id = i;
 				found_node = n_ptr;
@@ -1770,27 +1477,16 @@
 	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
 }
 
-static void link_print(struct tipc_link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l, const char *str)
 {
-	struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
-	struct tipc_bearer *b_ptr;
+	struct sk_buff *hskb = skb_peek(&l->transmq);
+	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+	u16 tail = l->snd_nxt - 1;
 
-	rcu_read_lock();
-	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
-	if (b_ptr)
-		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
-	rcu_read_unlock();
-
-	if (link_working_unknown(l_ptr))
-		pr_cont(":WU\n");
-	else if (link_reset_reset(l_ptr))
-		pr_cont(":RR\n");
-	else if (link_reset_unknown(l_ptr))
-		pr_cont(":RU\n");
-	else if (link_working_working(l_ptr))
-		pr_cont(":WW\n");
-	else
-		pr_cont("\n");
+	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+		skb_queue_len(&l->transmq), head, tail,
+		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
 }
 
 /* Parse and validate nested (link) properties valid for media, bearer and link
@@ -1865,7 +1561,7 @@
 
 	tipc_node_lock(node);
 
-	link = node->links[bearer_id];
+	link = node->links[bearer_id].link;
 	if (!link) {
 		res = -EINVAL;
 		goto out;
@@ -1885,7 +1581,7 @@
 			u32 tol;
 
 			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
-			link_set_supervision_props(link, tol);
+			link->tolerance = tol;
 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
 		}
 		if (props[TIPC_NLA_PROP_PRIO]) {
@@ -2055,10 +1751,11 @@
 	for (i = *prev_link; i < MAX_BEARERS; i++) {
 		*prev_link = i;
 
-		if (!node->links[i])
+		if (!node->links[i].link)
 			continue;
 
-		err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
+		err = __tipc_nl_add_link(net, msg,
+					 node->links[i].link, NLM_F_MULTI);
 		if (err)
 			return err;
 	}
@@ -2172,7 +1869,7 @@
 			return -EINVAL;
 
 		tipc_node_lock(node);
-		link = node->links[bearer_id];
+		link = node->links[bearer_id].link;
 		if (!link) {
 			tipc_node_unlock(node);
 			nlmsg_free(msg.skb);
@@ -2227,7 +1924,7 @@
 
 	tipc_node_lock(node);
 
-	link = node->links[bearer_id];
+	link = node->links[bearer_id].link;
 	if (!link) {
 		tipc_node_unlock(node);
 		return -EINVAL;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index ae0a0ea..39ff8b6 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -49,19 +49,25 @@
  */
 #define INVALID_LINK_SEQ 0x10000
 
-/* Link working states
+/* Link FSM events:
  */
-#define WORKING_WORKING 560810u
-#define WORKING_UNKNOWN 560811u
-#define RESET_UNKNOWN   560812u
-#define RESET_RESET     560813u
+enum {
+	LINK_ESTABLISH_EVT       = 0xec1ab1e,
+	LINK_PEER_RESET_EVT      = 0x9eed0e,
+	LINK_FAILURE_EVT         = 0xfa110e,
+	LINK_RESET_EVT           = 0x10ca1d0e,
+	LINK_FAILOVER_BEGIN_EVT  = 0xfa110bee,
+	LINK_FAILOVER_END_EVT    = 0xfa110ede,
+	LINK_SYNCH_BEGIN_EVT     = 0xc1ccbee,
+	LINK_SYNCH_END_EVT       = 0xc1ccede
+};
 
-/* Link endpoint execution states
+/* Events returned from link at packet reception or at timeout
  */
-#define LINK_STARTED     0x0001
-#define LINK_STOPPED     0x0002
-#define LINK_SYNCHING    0x0004
-#define LINK_FAILINGOVER 0x0008
+enum {
+	TIPC_LINK_UP_EVT       = 1,
+	TIPC_LINK_DOWN_EVT     = (1 << 1)
+};
 
 /* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
@@ -106,7 +112,6 @@
  * @timer: link timer
  * @owner: pointer to peer node
  * @refcnt: reference counter for permanent references (owner node & timer)
- * @flags: execution state flags for link endpoint instance
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
@@ -143,20 +148,17 @@
 struct tipc_link {
 	u32 addr;
 	char name[TIPC_MAX_LINK_NAME];
-	struct tipc_media_addr media_addr;
-	struct timer_list timer;
+	struct tipc_media_addr *media_addr;
 	struct tipc_node *owner;
-	struct kref ref;
 
 	/* Management and link supervision data */
-	unsigned int flags;
 	u32 peer_session;
 	u32 peer_bearer_id;
 	u32 bearer_id;
 	u32 tolerance;
 	unsigned long keepalive_intv;
 	u32 abort_limit;
-	int state;
+	u32 state;
 	u32 silent_intv_cnt;
 	struct {
 		unchar hdr[INT_H_SIZE];
@@ -165,12 +167,10 @@
 	struct tipc_msg *pmsg;
 	u32 priority;
 	char net_plane;
-	u16 synch_point;
 
-	/* Failover */
-	u16 failover_pkts;
-	u16 failover_checkpt;
-	struct sk_buff *failover_skb;
+	/* Failover/synch */
+	u16 drop_point;
+	struct sk_buff *failover_reasm_skb;
 
 	/* Max packet negotiation */
 	u16 mtu;
@@ -192,8 +192,8 @@
 	u16 rcv_nxt;
 	u32 rcv_unacked;
 	struct sk_buff_head deferdq;
-	struct sk_buff_head inputq;
-	struct sk_buff_head namedq;
+	struct sk_buff_head *inputq;
+	struct sk_buff_head *namedq;
 
 	/* Congestion handling */
 	struct sk_buff_head wakeupq;
@@ -205,28 +205,29 @@
 	struct tipc_stats stats;
 };
 
-struct tipc_port;
-
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-			      struct tipc_bearer *b_ptr,
-			      const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+		      u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+		      struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+		      struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+			   int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+				    struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
-int tipc_link_is_up(struct tipc_link *l_ptr);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
 void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
-		       u32 selector);
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
-		   u32 selector);
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
 		     struct sk_buff_head *list);
+int tipc_link_xmit(struct tipc_link *link,	struct sk_buff_head *list,
+		   struct sk_buff_head *xmitq);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
 			  u32 gap, u32 tolerance, u32 priority);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -242,34 +243,8 @@
 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
-void link_prepare_wakeup(struct tipc_link *l);
-
-static inline u32 link_own_addr(struct tipc_link *l)
-{
-	return msg_prevnode(l->pmsg);
-}
-
-/*
- * Link status checking routines
- */
-static inline int link_working_working(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == WORKING_WORKING;
-}
-
-static inline int link_working_unknown(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == WORKING_UNKNOWN;
-}
-
-static inline int link_reset_unknown(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == RESET_UNKNOWN;
-}
-
-static inline int link_reset_reset(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == RESET_RESET;
-}
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+		  struct sk_buff_head *xmitq);
 
 #endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 08b4cc7..562c926 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -463,60 +463,72 @@
 
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf:  buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err:  error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb:  buffer containing message to be reversed; may be replaced.
+ * @err:  error code to be set in message, if any
+ * Consumes buffer at failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
-		      int err)
+bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
-	struct tipc_msg *msg = buf_msg(buf);
+	struct sk_buff *_skb = *skb;
+	struct tipc_msg *hdr = buf_msg(_skb);
 	struct tipc_msg ohdr;
-	uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+	int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
 
-	if (skb_linearize(buf))
+	if (skb_linearize(_skb))
 		goto exit;
-	msg = buf_msg(buf);
-	if (msg_dest_droppable(msg))
+	hdr = buf_msg(_skb);
+	if (msg_dest_droppable(hdr))
 		goto exit;
-	if (msg_errcode(msg))
+	if (msg_errcode(hdr))
 		goto exit;
-	memcpy(&ohdr, msg, msg_hdr_sz(msg));
-	msg_set_errcode(msg, err);
-	msg_set_origport(msg, msg_destport(&ohdr));
-	msg_set_destport(msg, msg_origport(&ohdr));
-	msg_set_prevnode(msg, own_addr);
-	if (!msg_short(msg)) {
-		msg_set_orignode(msg, msg_destnode(&ohdr));
-		msg_set_destnode(msg, msg_orignode(&ohdr));
+
+	/* Take a copy of original header before altering message */
+	memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+	/* Never return SHORT header; expand by replacing buffer if necessary */
+	if (msg_short(hdr)) {
+		*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+		if (!*skb)
+			goto exit;
+		memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+		kfree_skb(_skb);
+		_skb = *skb;
+		hdr = buf_msg(_skb);
+		memcpy(hdr, &ohdr, BASIC_H_SIZE);
+		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
 	}
-	msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
-	skb_trim(buf, msg_size(msg));
-	skb_orphan(buf);
-	*dnode = msg_orignode(&ohdr);
+
+	/* Now reverse the concerned fields */
+	msg_set_errcode(hdr, err);
+	msg_set_origport(hdr, msg_destport(&ohdr));
+	msg_set_destport(hdr, msg_origport(&ohdr));
+	msg_set_destnode(hdr, msg_prevnode(&ohdr));
+	msg_set_prevnode(hdr, own_node);
+	msg_set_orignode(hdr, own_node);
+	msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+	skb_trim(_skb, msg_size(hdr));
+	skb_orphan(_skb);
 	return true;
 exit:
-	kfree_skb(buf);
-	*dnode = 0;
+	kfree_skb(_skb);
+	*skb = NULL;
 	return false;
 }
 
 /**
  * tipc_msg_lookup_dest(): try to find new destination for named message
  * @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
  * Does not consume buffer
  * Returns true if a destination is found, false otherwise
  */
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
-			  u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 {
 	struct tipc_msg *msg = buf_msg(skb);
-	u32 dport;
-	u32 own_addr = tipc_own_addr(net);
+	u32 dport, dnode;
+	u32 onode = tipc_own_addr(net);
 
 	if (!msg_isdata(msg))
 		return false;
@@ -529,15 +541,15 @@
 		return false;
 	if (msg_reroute_cnt(msg))
 		return false;
-	*dnode = addr_domain(net, msg_lookup_scope(msg));
+	dnode = addr_domain(net, msg_lookup_scope(msg));
 	dport = tipc_nametbl_translate(net, msg_nametype(msg),
-				       msg_nameinst(msg), dnode);
+				       msg_nameinst(msg), &dnode);
 	if (!dport)
 		return false;
 	msg_incr_reroute_cnt(msg);
-	if (*dnode != own_addr)
-		msg_set_prevnode(msg, own_addr);
-	msg_set_destnode(msg, *dnode);
+	if (dnode != onode)
+		msg_set_prevnode(msg, onode);
+	msg_set_destnode(msg, dnode);
 	msg_set_destport(msg, dport);
 	*err = TIPC_OK;
 	return true;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 19c45fb..a82c584 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -38,6 +38,7 @@
 #define _TIPC_MSG_H
 
 #include <linux/tipc.h>
+#include "core.h"
 
 /*
  * Constants and routines used to read and write TIPC payload message headers
@@ -109,7 +110,6 @@
 	struct sk_buff *tail;
 	bool validated;
 	bool wakeup_pending;
-	bool bundling;
 	u16 chain_sz;
 	u16 chain_imp;
 };
@@ -558,15 +558,6 @@
 	msg_set_bits(m, 1, 15, 0x1fff, n);
 }
 
-static inline bool msg_dup(struct tipc_msg *m)
-{
-	if (likely(msg_user(m) != TUNNEL_PROTOCOL))
-		return false;
-	if (msg_type(m) != SYNCH_MSG)
-		return false;
-	return true;
-}
-
 /*
  * Word 2
  */
@@ -620,12 +611,12 @@
 }
 
 
-static inline u32 msg_next_sent(struct tipc_msg *m)
+static inline u16 msg_next_sent(struct tipc_msg *m)
 {
 	return msg_bits(m, 4, 0, 0xffff);
 }
 
-static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 4, 0, 0xffff, n);
 }
@@ -658,12 +649,12 @@
 /*
  * Word 5
  */
-static inline u32 msg_session(struct tipc_msg *m)
+static inline u16 msg_session(struct tipc_msg *m)
 {
 	return msg_bits(m, 5, 16, 0xffff);
 }
 
-static inline void msg_set_session(struct tipc_msg *m, u32 n)
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 5, 16, 0xffff, n);
 }
@@ -726,12 +717,12 @@
 /*
  * Word 9
  */
-static inline u32 msg_msgcnt(struct tipc_msg *m)
+static inline u16 msg_msgcnt(struct tipc_msg *m)
 {
 	return msg_bits(m, 9, 16, 0xffff);
 }
 
-static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 9, 16, 0xffff, n);
 }
@@ -766,10 +757,25 @@
 	msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+	if (likely(msg_user(m) != LINK_PROTOCOL))
+		return true;
+	if (msg_type(m) == STATE_MSG)
+		return true;
+	return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+	if (msg_peer_link_is_up(m))
+		return true;
+	return msg_redundant_link(m);
+}
+
 struct sk_buff *tipc_buf_acquire(u32 size);
 bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
-		      int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
 		   u32 hsize, u32 destnode);
 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -782,8 +788,7 @@
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
-			  int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
@@ -857,26 +862,65 @@
 	return skb;
 }
 
-/* tipc_skb_queue_tail(): add buffer to tail of list;
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
  * @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
+ * @skb: buffer to add
+ * Returns true if queue should treated further, otherwise false
  */
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
-				       struct sk_buff *skb, u32 dport)
+static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
+					   struct sk_buff *skb)
 {
-	struct sk_buff *_skb = NULL;
-	bool rv = false;
+	struct sk_buff *_skb, *tmp;
+	struct tipc_msg *hdr = buf_msg(skb);
+	u16 seqno = msg_seqno(hdr);
+
+	if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
+		__skb_queue_head(list, skb);
+		return true;
+	}
+	if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
+		__skb_queue_head(list, skb);
+		return true;
+	}
+	if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
+		skb_queue_walk_safe(list, _skb, tmp) {
+			if (likely(less(seqno, buf_seqno(_skb)))) {
+				__skb_queue_before(list, _skb, skb);
+				return true;
+			}
+		}
+	}
+	__skb_queue_tail(list, skb);
+	return false;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+					      struct sk_buff_head *head)
+{
+	spin_lock_bh(&head->lock);
+	skb_queue_splice_tail(list, head);
+	spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+						   struct sk_buff_head *head)
+{
+	struct sk_buff_head tmp;
+
+	__skb_queue_head_init(&tmp);
 
 	spin_lock_bh(&list->lock);
-	_skb = skb_peek_tail(list);
-	if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
-	    (skb_queue_len(list) > 32))
-		rv = true;
-	__skb_queue_tail(list, skb);
+	skb_queue_splice_tail_init(list, &tmp);
 	spin_unlock_bh(&list->lock);
-	return rv;
+	tipc_skb_queue_splice_tail(&tmp, head);
 }
 
 #endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 41e7b7e..e6018b7 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -96,13 +96,13 @@
 		dnode = node->addr;
 		if (in_own_node(net, dnode))
 			continue;
-		if (!tipc_node_active_links(node))
+		if (!tipc_node_is_up(node))
 			continue;
 		oskb = pskb_copy(skb, GFP_ATOMIC);
 		if (!oskb)
 			break;
 		msg_set_destnode(buf_msg(oskb), dnode);
-		tipc_link_xmit_skb(net, oskb, dnode, dnode);
+		tipc_node_xmit_skb(net, oskb, dnode, dnode);
 	}
 	rcu_read_unlock();
 
@@ -223,7 +223,7 @@
 			 &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
 	rcu_read_unlock();
 
-	tipc_link_xmit(net, &head, dnode, dnode);
+	tipc_node_xmit(net, &head, dnode, dnode);
 }
 
 static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 53e0fee..1eadc95 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -1114,7 +1114,7 @@
 	}
 
 	len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-	if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+	if (len && !TLV_OK(msg.req, len)) {
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
 		err = -EOPNOTSUPP;
 		goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0b1d61a..703875f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -40,10 +40,42 @@
 #include "name_distr.h"
 #include "socket.h"
 #include "bcast.h"
+#include "discover.h"
 
-static void node_lost_contact(struct tipc_node *n_ptr);
+/* Node FSM states and events:
+ */
+enum {
+	SELF_DOWN_PEER_DOWN    = 0xdd,
+	SELF_UP_PEER_UP        = 0xaa,
+	SELF_DOWN_PEER_LEAVING = 0xd1,
+	SELF_UP_PEER_COMING    = 0xac,
+	SELF_COMING_PEER_UP    = 0xca,
+	SELF_LEAVING_PEER_DOWN = 0x1d,
+	NODE_FAILINGOVER       = 0xf0,
+	NODE_SYNCHING          = 0xcc
+};
+
+enum {
+	SELF_ESTABL_CONTACT_EVT = 0xece,
+	SELF_LOST_CONTACT_EVT   = 0x1ce,
+	PEER_ESTABL_CONTACT_EVT = 0x9ece,
+	PEER_LOST_CONTACT_EVT   = 0x91ce,
+	NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+	NODE_FAILOVER_END_EVT   = 0xfee,
+	NODE_SYNCH_BEGIN_EVT    = 0xcbe,
+	NODE_SYNCH_END_EVT      = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+				  struct sk_buff_head *xmitq,
+				  struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+				bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
 static void node_established_contact(struct tipc_node *n_ptr);
 static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(unsigned long data);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
 
 struct tipc_sock_conn {
 	u32 port;
@@ -110,7 +142,7 @@
 	return NULL;
 }
 
-struct tipc_node *tipc_node_create(struct net *net, u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_node *n_ptr, *temp_node;
@@ -126,12 +158,14 @@
 	}
 	n_ptr->addr = addr;
 	n_ptr->net = net;
+	n_ptr->capabilities = capabilities;
 	kref_init(&n_ptr->kref);
 	spin_lock_init(&n_ptr->lock);
 	INIT_HLIST_NODE(&n_ptr->hash);
 	INIT_LIST_HEAD(&n_ptr->list);
 	INIT_LIST_HEAD(&n_ptr->publ_list);
 	INIT_LIST_HEAD(&n_ptr->conn_sks);
+	skb_queue_head_init(&n_ptr->bclink.namedq);
 	__skb_queue_head_init(&n_ptr->bclink.deferdq);
 	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
 	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -139,14 +173,32 @@
 			break;
 	}
 	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
-	n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+	n_ptr->state = SELF_DOWN_PEER_LEAVING;
 	n_ptr->signature = INVALID_NODE_SIG;
+	n_ptr->active_links[0] = INVALID_BEARER_ID;
+	n_ptr->active_links[1] = INVALID_BEARER_ID;
 	tipc_node_get(n_ptr);
+	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+	n_ptr->keepalive_intv = U32_MAX;
 exit:
 	spin_unlock_bh(&tn->node_list_lock);
 	return n_ptr;
 }
 
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+	unsigned long tol = l->tolerance;
+	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+	unsigned long keepalive_intv = msecs_to_jiffies(intv);
+
+	/* Link with lowest tolerance determines timer interval */
+	if (keepalive_intv < n->keepalive_intv)
+		n->keepalive_intv = keepalive_intv;
+
+	/* Ensure link's abort limit corresponds to current interval */
+	l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+}
+
 static void tipc_node_delete(struct tipc_node *node)
 {
 	list_del_rcu(&node->list);
@@ -160,8 +212,11 @@
 	struct tipc_node *node, *t_node;
 
 	spin_lock_bh(&tn->node_list_lock);
-	list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+	list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
+		if (del_timer(&node->timer))
+			tipc_node_put(node);
 		tipc_node_put(node);
+	}
 	spin_unlock_bh(&tn->node_list_lock);
 }
 
@@ -219,158 +274,551 @@
 	tipc_node_put(node);
 }
 
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(unsigned long data)
+{
+	struct tipc_node *n = (struct tipc_node *)data;
+	struct tipc_link_entry *le;
+	struct sk_buff_head xmitq;
+	int bearer_id;
+	int rc = 0;
+
+	__skb_queue_head_init(&xmitq);
+
+	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+		tipc_node_lock(n);
+		le = &n->links[bearer_id];
+		if (le->link) {
+			/* Link tolerance may change asynchronously: */
+			tipc_node_calculate_timer(n, le->link);
+			rc = tipc_link_timeout(le->link, &xmitq);
+		}
+		tipc_node_unlock(n);
+		tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+		if (rc & TIPC_LINK_DOWN_EVT)
+			tipc_node_link_down(n, bearer_id, false);
+	}
+	if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+		tipc_node_get(n);
+	tipc_node_put(n);
+}
+
+/**
+ * __tipc_node_link_up - handle addition of link
+ * Node lock must be held by caller
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+				struct sk_buff_head *xmitq)
+{
+	int *slot0 = &n->active_links[0];
+	int *slot1 = &n->active_links[1];
+	struct tipc_link *ol = node_active_link(n, 0);
+	struct tipc_link *nl = n->links[bearer_id].link;
+
+	if (!nl || !tipc_link_is_up(nl))
+		return;
+
+	n->working_links++;
+	n->action_flags |= TIPC_NOTIFY_LINK_UP;
+	n->link_id = nl->peer_bearer_id << 16 | bearer_id;
+
+	/* Leave room for tunnel header when returning 'mtu' to users: */
+	n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+
+	tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+
+	pr_debug("Established link <%s> on network plane %c\n",
+		 nl->name, nl->net_plane);
+
+	/* First link? => give it both slots */
+	if (!ol) {
+		*slot0 = bearer_id;
+		*slot1 = bearer_id;
+		tipc_link_build_bcast_sync_msg(nl, xmitq);
+		node_established_contact(n);
+		return;
+	}
+
+	/* Second link => redistribute slots */
+	if (nl->priority > ol->priority) {
+		pr_debug("Old link <%s> becomes standby\n", ol->name);
+		*slot0 = bearer_id;
+		*slot1 = bearer_id;
+	} else if (nl->priority == ol->priority) {
+		*slot0 = bearer_id;
+	} else {
+		pr_debug("New link <%s> is standby\n", nl->name);
+	}
+
+	/* Prepare synchronization with first link */
+	tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
+}
+
 /**
  * tipc_node_link_up - handle addition of link
  *
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+			      struct sk_buff_head *xmitq)
 {
-	struct tipc_link **active = &n_ptr->active_links[0];
-
-	n_ptr->working_links++;
-	n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
-	n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
-
-	pr_debug("Established link <%s> on network plane %c\n",
-		 l_ptr->name, l_ptr->net_plane);
-
-	if (!active[0]) {
-		active[0] = active[1] = l_ptr;
-		node_established_contact(n_ptr);
-		goto exit;
-	}
-	if (l_ptr->priority < active[0]->priority) {
-		pr_debug("New link <%s> becomes standby\n", l_ptr->name);
-		goto exit;
-	}
-	tipc_link_dup_queue_xmit(active[0], l_ptr);
-	if (l_ptr->priority == active[0]->priority) {
-		active[0] = l_ptr;
-		goto exit;
-	}
-	pr_debug("Old link <%s> becomes standby\n", active[0]->name);
-	if (active[1] != active[0])
-		pr_debug("Old link <%s> becomes standby\n", active[1]->name);
-	active[0] = active[1] = l_ptr;
-exit:
-	/* Leave room for changeover header when returning 'mtu' to users: */
-	n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-	n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+	tipc_node_lock(n);
+	__tipc_node_link_up(n, bearer_id, xmitq);
+	tipc_node_unlock(n);
 }
 
 /**
- * node_select_active_links - select active link
+ * __tipc_node_link_down - handle loss of link
  */
-static void node_select_active_links(struct tipc_node *n_ptr)
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+				  struct sk_buff_head *xmitq,
+				  struct tipc_media_addr **maddr)
 {
-	struct tipc_link **active = &n_ptr->active_links[0];
-	u32 i;
-	u32 highest_prio = 0;
+	struct tipc_link_entry *le = &n->links[*bearer_id];
+	int *slot0 = &n->active_links[0];
+	int *slot1 = &n->active_links[1];
+	int i, highest = 0;
+	struct tipc_link *l, *_l, *tnl;
 
-	active[0] = active[1] = NULL;
-
-	for (i = 0; i < MAX_BEARERS; i++) {
-		struct tipc_link *l_ptr = n_ptr->links[i];
-
-		if (!l_ptr || !tipc_link_is_up(l_ptr) ||
-		    (l_ptr->priority < highest_prio))
-			continue;
-
-		if (l_ptr->priority > highest_prio) {
-			highest_prio = l_ptr->priority;
-			active[0] = active[1] = l_ptr;
-		} else {
-			active[1] = l_ptr;
-		}
-	}
-}
-
-/**
- * tipc_node_link_down - handle loss of link
- */
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
-{
-	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-	struct tipc_link **active;
-
-	n_ptr->working_links--;
-	n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
-	n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
-
-	if (!tipc_link_is_active(l_ptr)) {
-		pr_debug("Lost standby link <%s> on network plane %c\n",
-			 l_ptr->name, l_ptr->net_plane);
+	l = n->links[*bearer_id].link;
+	if (!l || tipc_link_is_reset(l))
 		return;
-	}
+
+	n->working_links--;
+	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+	n->link_id = l->peer_bearer_id << 16 | *bearer_id;
+
+	tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
+
 	pr_debug("Lost link <%s> on network plane %c\n",
-		 l_ptr->name, l_ptr->net_plane);
+		 l->name, l->net_plane);
 
-	active = &n_ptr->active_links[0];
-	if (active[0] == l_ptr)
-		active[0] = active[1];
-	if (active[1] == l_ptr)
-		active[1] = active[0];
-	if (active[0] == l_ptr)
-		node_select_active_links(n_ptr);
-	if (tipc_node_is_up(n_ptr))
-		tipc_link_failover_send_queue(l_ptr);
-	else
-		node_lost_contact(n_ptr);
+	/* Select new active link if any available */
+	*slot0 = INVALID_BEARER_ID;
+	*slot1 = INVALID_BEARER_ID;
+	for (i = 0; i < MAX_BEARERS; i++) {
+		_l = n->links[i].link;
+		if (!_l || !tipc_link_is_up(_l))
+			continue;
+		if (_l == l)
+			continue;
+		if (_l->priority < highest)
+			continue;
+		if (_l->priority > highest) {
+			highest = _l->priority;
+			*slot0 = i;
+			*slot1 = i;
+			continue;
+		}
+		*slot1 = i;
+	}
 
-	/* Leave room for changeover header when returning 'mtu' to users: */
-	if (active[0]) {
-		n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-		n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+	if (!tipc_node_is_up(n)) {
+		tipc_link_reset(l);
+		node_lost_contact(n, &le->inputq);
 		return;
 	}
-	/* Loopback link went down? No fragmentation needed from now on. */
-	if (n_ptr->addr == tn->own_addr) {
-		n_ptr->act_mtus[0] = MAX_MSG_SIZE;
-		n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+
+	/* There is still a working link => initiate failover */
+	tnl = node_active_link(n, 0);
+	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+	n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+	tipc_link_reset(l);
+	tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+	*maddr = &n->links[tnl->bearer_id].maddr;
+	*bearer_id = tnl->bearer_id;
+}
+
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
+{
+	struct tipc_link_entry *le = &n->links[bearer_id];
+	struct tipc_media_addr *maddr;
+	struct sk_buff_head xmitq;
+
+	__skb_queue_head_init(&xmitq);
+
+	tipc_node_lock(n);
+	__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+	if (delete && le->link) {
+		kfree(le->link);
+		le->link = NULL;
+		n->link_cnt--;
 	}
+	tipc_node_unlock(n);
+
+	tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+	tipc_sk_rcv(n->net, &le->inputq);
 }
 
-int tipc_node_active_links(struct tipc_node *n_ptr)
+bool tipc_node_is_up(struct tipc_node *n)
 {
-	return n_ptr->active_links[0] != NULL;
+	return n->active_links[0] != INVALID_BEARER_ID;
 }
 
-int tipc_node_is_up(struct tipc_node *n_ptr)
+void tipc_node_check_dest(struct net *net, u32 onode,
+			  struct tipc_bearer *b,
+			  u16 capabilities, u32 signature,
+			  struct tipc_media_addr *maddr,
+			  bool *respond, bool *dupl_addr)
 {
-	return tipc_node_active_links(n_ptr);
+	struct tipc_node *n;
+	struct tipc_link *l;
+	struct tipc_link_entry *le;
+	bool addr_match = false;
+	bool sign_match = false;
+	bool link_up = false;
+	bool accept_addr = false;
+	bool reset = true;
+
+	*dupl_addr = false;
+	*respond = false;
+
+	n = tipc_node_create(net, onode, capabilities);
+	if (!n)
+		return;
+
+	tipc_node_lock(n);
+
+	le = &n->links[b->identity];
+
+	/* Prepare to validate requesting node's signature and media address */
+	l = le->link;
+	link_up = l && tipc_link_is_up(l);
+	addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+	sign_match = (signature == n->signature);
+
+	/* These three flags give us eight permutations: */
+
+	if (sign_match && addr_match && link_up) {
+		/* All is fine. Do nothing. */
+		reset = false;
+	} else if (sign_match && addr_match && !link_up) {
+		/* Respond. The link will come up in due time */
+		*respond = true;
+	} else if (sign_match && !addr_match && link_up) {
+		/* Peer has changed i/f address without rebooting.
+		 * If so, the link will reset soon, and the next
+		 * discovery will be accepted. So we can ignore it.
+		 * It may also be an cloned or malicious peer having
+		 * chosen the same node address and signature as an
+		 * existing one.
+		 * Ignore requests until the link goes down, if ever.
+		 */
+		*dupl_addr = true;
+	} else if (sign_match && !addr_match && !link_up) {
+		/* Peer link has changed i/f address without rebooting.
+		 * It may also be a cloned or malicious peer; we can't
+		 * distinguish between the two.
+		 * The signature is correct, so we must accept.
+		 */
+		accept_addr = true;
+		*respond = true;
+	} else if (!sign_match && addr_match && link_up) {
+		/* Peer node rebooted. Two possibilities:
+		 *  - Delayed re-discovery; this link endpoint has already
+		 *    reset and re-established contact with the peer, before
+		 *    receiving a discovery message from that node.
+		 *    (The peer happened to receive one from this node first).
+		 *  - The peer came back so fast that our side has not
+		 *    discovered it yet. Probing from this side will soon
+		 *    reset the link, since there can be no working link
+		 *    endpoint at the peer end, and the link will re-establish.
+		 *  Accept the signature, since it comes from a known peer.
+		 */
+		n->signature = signature;
+	} else if (!sign_match && addr_match && !link_up) {
+		/*  The peer node has rebooted.
+		 *  Accept signature, since it is a known peer.
+		 */
+		n->signature = signature;
+		*respond = true;
+	} else if (!sign_match && !addr_match && link_up) {
+		/* Peer rebooted with new address, or a new/duplicate peer.
+		 * Ignore until the link goes down, if ever.
+		 */
+		*dupl_addr = true;
+	} else if (!sign_match && !addr_match && !link_up) {
+		/* Peer rebooted with new address, or it is a new peer.
+		 * Accept signature and address.
+		 */
+		n->signature = signature;
+		accept_addr = true;
+		*respond = true;
+	}
+
+	if (!accept_addr)
+		goto exit;
+
+	/* Now create new link if not already existing */
+	if (!l) {
+		if (n->link_cnt == 2) {
+			pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+			goto exit;
+		}
+		if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
+				      tipc_own_addr(net), onode, &le->maddr,
+				      &le->inputq, &n->bclink.namedq, &l)) {
+			*respond = false;
+			goto exit;
+		}
+		tipc_link_reset(l);
+		if (n->state == NODE_FAILINGOVER)
+			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+		le->link = l;
+		n->link_cnt++;
+		tipc_node_calculate_timer(n, l);
+		if (n->link_cnt == 1)
+			if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+				tipc_node_get(n);
+	}
+	memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+	tipc_node_unlock(n);
+	if (reset)
+		tipc_node_link_down(n, b->identity, false);
+	tipc_node_put(n);
 }
 
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+void tipc_node_delete_links(struct net *net, int bearer_id)
 {
-	n_ptr->links[l_ptr->bearer_id] = l_ptr;
-	n_ptr->link_cnt++;
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
+	struct tipc_node *n;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(n, &tn->node_list, list) {
+		tipc_node_link_down(n, bearer_id, true);
+	}
+	rcu_read_unlock();
 }
 
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_reset_links(struct tipc_node *n)
 {
+	char addr_string[16];
 	int i;
 
+	pr_warn("Resetting all links to %s\n",
+		tipc_addr_string_fill(addr_string, n->addr));
+
 	for (i = 0; i < MAX_BEARERS; i++) {
-		if (l_ptr != n_ptr->links[i])
-			continue;
-		n_ptr->links[i] = NULL;
-		n_ptr->link_cnt--;
+		tipc_node_link_down(n, i, false);
 	}
 }
 
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
+{
+	int state = n->state;
+
+	switch (state) {
+	case SELF_DOWN_PEER_DOWN:
+		switch (evt) {
+		case SELF_ESTABL_CONTACT_EVT:
+			state = SELF_UP_PEER_COMING;
+			break;
+		case PEER_ESTABL_CONTACT_EVT:
+			state = SELF_COMING_PEER_UP;
+			break;
+		case SELF_LOST_CONTACT_EVT:
+		case PEER_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_UP_PEER_UP:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
+			state = NODE_SYNCHING;
+			break;
+		case NODE_FAILOVER_BEGIN_EVT:
+			state = NODE_FAILINGOVER;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+		case NODE_SYNCH_END_EVT:
+		case NODE_FAILOVER_END_EVT:
+			break;
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_DOWN_PEER_LEAVING:
+		switch (evt) {
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_DOWN;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+		case SELF_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_UP_PEER_COMING:
+		switch (evt) {
+		case PEER_ESTABL_CONTACT_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_COMING_PEER_UP:
+		switch (evt) {
+		case SELF_ESTABL_CONTACT_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case SELF_LOST_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_LEAVING_PEER_DOWN:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_DOWN;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+		case PEER_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case NODE_FAILINGOVER:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case NODE_FAILOVER_END_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case NODE_FAILOVER_BEGIN_EVT:
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case NODE_SYNCHING:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case NODE_SYNCH_END_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case NODE_FAILOVER_BEGIN_EVT:
+			state = NODE_FAILINGOVER;
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+			break;
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	default:
+		pr_err("Unknown node fsm state %x\n", state);
+		break;
+	}
+	n->state = state;
+	return;
+
+illegal_evt:
+	pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
+}
+
+bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
+{
+	int state = n->state;
+
+	if (likely(state == SELF_UP_PEER_UP))
+		return true;
+
+	if (state == SELF_LEAVING_PEER_DOWN)
+		return false;
+
+	if (state == SELF_DOWN_PEER_LEAVING) {
+		if (msg_peer_node_is_up(hdr))
+			return false;
+	}
+
+	return true;
+}
+
 static void node_established_contact(struct tipc_node *n_ptr)
 {
+	tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
 	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
 	n_ptr->bclink.oos_state = 0;
 	n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
 	tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
 }
 
-static void node_lost_contact(struct tipc_node *n_ptr)
+static void node_lost_contact(struct tipc_node *n_ptr,
+			      struct sk_buff_head *inputq)
 {
 	char addr_string[16];
 	struct tipc_sock_conn *conn, *safe;
+	struct tipc_link *l;
 	struct list_head *conns = &n_ptr->conn_sks;
 	struct sk_buff *skb;
 	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
@@ -396,21 +844,13 @@
 
 	/* Abort any ongoing link failover */
 	for (i = 0; i < MAX_BEARERS; i++) {
-		struct tipc_link *l_ptr = n_ptr->links[i];
-		if (!l_ptr)
-			continue;
-		l_ptr->flags &= ~LINK_FAILINGOVER;
-		l_ptr->failover_checkpt = 0;
-		l_ptr->failover_pkts = 0;
-		kfree_skb(l_ptr->failover_skb);
-		l_ptr->failover_skb = NULL;
-		tipc_link_reset_fragments(l_ptr);
+		l = n_ptr->links[i].link;
+		if (l)
+			tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
 	}
 
-	n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
 	/* Prevent re-contact with node until cleanup is done */
-	n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+	tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
 
 	/* Notify publications from this node */
 	n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
@@ -421,10 +861,8 @@
 				      SHORT_H_SIZE, 0, tn->own_addr,
 				      conn->peer_node, conn->port,
 				      conn->peer_port, TIPC_ERR_NO_NODE);
-		if (likely(skb)) {
-			skb_queue_tail(n_ptr->inputq, skb);
-			n_ptr->action_flags |= TIPC_MSG_EVT;
-		}
+		if (likely(skb))
+			skb_queue_tail(inputq, skb);
 		list_del(&conn->list);
 		kfree(conn);
 	}
@@ -453,7 +891,7 @@
 		goto exit;
 
 	tipc_node_lock(node);
-	link = node->links[bearer_id];
+	link = node->links[bearer_id].link;
 	if (link) {
 		strncpy(linkname, link->name, len);
 		err = 0;
@@ -471,27 +909,20 @@
 	u32 flags = node->action_flags;
 	u32 link_id = 0;
 	struct list_head *publ_list;
-	struct sk_buff_head *inputq = node->inputq;
-	struct sk_buff_head *namedq;
 
-	if (likely(!flags || (flags == TIPC_MSG_EVT))) {
-		node->action_flags = 0;
+	if (likely(!flags)) {
 		spin_unlock_bh(&node->lock);
-		if (flags == TIPC_MSG_EVT)
-			tipc_sk_rcv(net, inputq);
 		return;
 	}
 
 	addr = node->addr;
 	link_id = node->link_id;
-	namedq = node->namedq;
 	publ_list = &node->publ_list;
 
-	node->action_flags &= ~(TIPC_MSG_EVT |
-				TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+	node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
 				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
 				TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-				TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+				TIPC_BCAST_RESET);
 
 	spin_unlock_bh(&node->lock);
 
@@ -512,17 +943,11 @@
 		tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
 				      link_id, addr);
 
-	if (flags & TIPC_MSG_EVT)
-		tipc_sk_rcv(net, inputq);
-
-	if (flags & TIPC_NAMED_MSG_EVT)
-		tipc_named_rcv(net, namedq);
-
 	if (flags & TIPC_BCAST_MSG_EVT)
 		tipc_bclink_input(net);
 
 	if (flags & TIPC_BCAST_RESET)
-		tipc_link_reset_all(node);
+		tipc_node_reset_links(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -559,6 +984,290 @@
 	return -EMSGSIZE;
 }
 
+static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
+					       int *bearer_id,
+					       struct tipc_media_addr **maddr)
+{
+	int id = n->active_links[sel & 1];
+
+	if (unlikely(id < 0))
+		return NULL;
+
+	*bearer_id = id;
+	*maddr = &n->links[id].maddr;
+	return n->links[id].link;
+}
+
+/**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+		   u32 dnode, int selector)
+{
+	struct tipc_link *l = NULL;
+	struct tipc_node *n;
+	struct sk_buff_head xmitq;
+	struct tipc_media_addr *maddr;
+	int bearer_id;
+	int rc = -EHOSTUNREACH;
+
+	__skb_queue_head_init(&xmitq);
+	n = tipc_node_find(net, dnode);
+	if (likely(n)) {
+		tipc_node_lock(n);
+		l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
+		if (likely(l))
+			rc = tipc_link_xmit(l, list, &xmitq);
+		tipc_node_unlock(n);
+		if (unlikely(rc == -ENOBUFS))
+			tipc_node_link_down(n, bearer_id, false);
+		tipc_node_put(n);
+	}
+	if (likely(!rc)) {
+		tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
+		return 0;
+	}
+	if (likely(in_own_node(net, dnode))) {
+		tipc_sk_rcv(net, list);
+		return 0;
+	}
+	return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+		       u32 selector)
+{
+	struct sk_buff_head head;
+	int rc;
+
+	skb_queue_head_init(&head);
+	__skb_queue_tail(&head, skb);
+	rc = tipc_node_xmit(net, &head, dnode, selector);
+	if (rc == -ELINKCONG)
+		kfree_skb(skb);
+	return 0;
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * Returns true if state is ok, otherwise consumes buffer and returns false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+				  int bearer_id, struct sk_buff_head *xmitq)
+{
+	struct tipc_msg *hdr = buf_msg(skb);
+	int usr = msg_user(hdr);
+	int mtyp = msg_type(hdr);
+	u16 oseqno = msg_seqno(hdr);
+	u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
+	u16 exp_pkts = msg_msgcnt(hdr);
+	u16 rcv_nxt, syncpt, dlv_nxt;
+	int state = n->state;
+	struct tipc_link *l, *tnl, *pl = NULL;
+	struct tipc_media_addr *maddr;
+	int i, pb_id;
+
+	l = n->links[bearer_id].link;
+	if (!l)
+		return false;
+	rcv_nxt = l->rcv_nxt;
+
+
+	if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+		return true;
+
+	/* Find parallel link, if any */
+	for (i = 0; i < MAX_BEARERS; i++) {
+		if ((i != bearer_id) && n->links[i].link) {
+			pl = n->links[i].link;
+			break;
+		}
+	}
+
+	/* Update node accesibility if applicable */
+	if (state == SELF_UP_PEER_COMING) {
+		if (!tipc_link_is_up(l))
+			return true;
+		if (!msg_peer_link_is_up(hdr))
+			return true;
+		tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+	}
+
+	if (state == SELF_DOWN_PEER_LEAVING) {
+		if (msg_peer_node_is_up(hdr))
+			return false;
+		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+	}
+
+	/* Ignore duplicate packets */
+	if (less(oseqno, rcv_nxt))
+		return true;
+
+	/* Initiate or update failover mode if applicable */
+	if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+		syncpt = oseqno + exp_pkts - 1;
+		if (pl && tipc_link_is_up(pl)) {
+			pb_id = pl->bearer_id;
+			__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+			tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+		}
+		/* If pkts arrive out of order, use lowest calculated syncpt */
+		if (less(syncpt, n->sync_point))
+			n->sync_point = syncpt;
+	}
+
+	/* Open parallel link when tunnel link reaches synch point */
+	if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
+		if (!more(rcv_nxt, n->sync_point))
+			return true;
+		tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+		if (pl)
+			tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+		return true;
+	}
+
+	/* No synching needed if only one link */
+	if (!pl || !tipc_link_is_up(pl))
+		return true;
+
+	/* Initiate or update synch mode if applicable */
+	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+		syncpt = iseqno + exp_pkts - 1;
+		if (!tipc_link_is_up(l)) {
+			tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+			__tipc_node_link_up(n, bearer_id, xmitq);
+		}
+		if (n->state == SELF_UP_PEER_UP) {
+			n->sync_point = syncpt;
+			tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+			tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+		}
+		if (less(syncpt, n->sync_point))
+			n->sync_point = syncpt;
+	}
+
+	/* Open tunnel link when parallel link reaches synch point */
+	if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+		if (tipc_link_is_synching(l)) {
+			tnl = l;
+		} else {
+			tnl = pl;
+			pl = l;
+		}
+		dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+		if (more(dlv_nxt, n->sync_point)) {
+			tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+			tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+			return true;
+		}
+		if (l == pl)
+			return true;
+		if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+			return true;
+		if (usr == LINK_PROTOCOL)
+			return true;
+		return false;
+	}
+	return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+	struct sk_buff_head xmitq;
+	struct tipc_node *n;
+	struct tipc_msg *hdr = buf_msg(skb);
+	int usr = msg_user(hdr);
+	int bearer_id = b->identity;
+	struct tipc_link_entry *le;
+	int rc = 0;
+
+	__skb_queue_head_init(&xmitq);
+
+	/* Ensure message is well-formed */
+	if (unlikely(!tipc_msg_validate(skb)))
+		goto discard;
+
+	/* Handle arrival of a non-unicast link packet */
+	if (unlikely(msg_non_seq(hdr))) {
+		if (usr ==  LINK_CONFIG)
+			tipc_disc_rcv(net, skb, b);
+		else
+			tipc_bclink_rcv(net, skb);
+		return;
+	}
+
+	/* Locate neighboring node that sent packet */
+	n = tipc_node_find(net, msg_prevnode(hdr));
+	if (unlikely(!n))
+		goto discard;
+	le = &n->links[bearer_id];
+
+	tipc_node_lock(n);
+
+	/* Is reception permitted at the moment ? */
+	if (!tipc_node_filter_pkt(n, hdr))
+		goto unlock;
+
+	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+		tipc_bclink_sync_state(n, hdr);
+
+	/* Release acked broadcast packets */
+	if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
+		tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
+
+	/* Check and if necessary update node state */
+	if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
+		rc = tipc_link_rcv(le->link, skb, &xmitq);
+		skb = NULL;
+	}
+unlock:
+	tipc_node_unlock(n);
+
+	if (unlikely(rc & TIPC_LINK_UP_EVT))
+		tipc_node_link_up(n, bearer_id, &xmitq);
+
+	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+		tipc_node_link_down(n, bearer_id, false);
+
+	if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+		tipc_named_rcv(net, &n->bclink.namedq);
+
+	if (!skb_queue_empty(&le->inputq))
+		tipc_sk_rcv(net, &le->inputq);
+
+	if (!skb_queue_empty(&xmitq))
+		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+	tipc_node_put(n);
+discard:
+	kfree_skb(skb);
+}
+
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	int err;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5a834cf..344b3e7 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,23 +45,19 @@
 /* Out-of-range value for node signature */
 #define INVALID_NODE_SIG	0x10000
 
+#define INVALID_BEARER_ID -1
+
 /* Flags used to take different actions according to flag type
- * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
- * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
  * TIPC_NOTIFY_NODE_DOWN: notify node is down
  * TIPC_NOTIFY_NODE_UP: notify node is up
  * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  */
 enum {
-	TIPC_MSG_EVT                    = 1,
-	TIPC_WAIT_PEER_LINKS_DOWN	= (1 << 1),
-	TIPC_WAIT_OWN_LINKS_DOWN	= (1 << 2),
 	TIPC_NOTIFY_NODE_DOWN		= (1 << 3),
 	TIPC_NOTIFY_NODE_UP		= (1 << 4),
 	TIPC_WAKEUP_BCAST_USERS		= (1 << 5),
 	TIPC_NOTIFY_LINK_UP		= (1 << 6),
 	TIPC_NOTIFY_LINK_DOWN		= (1 << 7),
-	TIPC_NAMED_MSG_EVT		= (1 << 8),
 	TIPC_BCAST_MSG_EVT		= (1 << 9),
 	TIPC_BCAST_RESET		= (1 << 10)
 };
@@ -85,10 +81,17 @@
 	u32 deferred_size;
 	struct sk_buff_head deferdq;
 	struct sk_buff *reasm_buf;
-	int inputq_map;
+	struct sk_buff_head namedq;
 	bool recv_permitted;
 };
 
+struct tipc_link_entry {
+	struct tipc_link *link;
+	u32 mtu;
+	struct sk_buff_head inputq;
+	struct tipc_media_addr maddr;
+};
+
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
@@ -98,11 +101,12 @@
  * @hash: links to adjacent nodes in unsorted hash chain
  * @inputq: pointer to input queue containing messages for msg event
  * @namedq: pointer to name table input queue with name table messages
- * @curr_link: the link holding the node lock, if any
- * @active_links: pointers to active links to node
- * @links: pointers to all links to node
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
  * @action_flags: bit mask of different types of node actions
  * @bclink: broadcast-related info
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
  * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
  * @link_cnt: number of links to node
@@ -118,14 +122,13 @@
 	spinlock_t lock;
 	struct net *net;
 	struct hlist_node hash;
-	struct sk_buff_head *inputq;
-	struct sk_buff_head *namedq;
-	struct tipc_link *active_links[2];
-	u32 act_mtus[2];
-	struct tipc_link *links[MAX_BEARERS];
+	int active_links[2];
+	struct tipc_link_entry links[MAX_BEARERS];
 	int action_flags;
 	struct tipc_node_bclink bclink;
 	struct list_head list;
+	int state;
+	u16 sync_point;
 	int link_cnt;
 	u16 working_links;
 	u16 capabilities;
@@ -133,25 +136,32 @@
 	u32 link_id;
 	struct list_head publ_list;
 	struct list_head conn_sks;
+	unsigned long keepalive_intv;
+	struct timer_list timer;
 	struct rcu_head rcu;
 };
 
 struct tipc_node *tipc_node_find(struct net *net, u32 addr);
 void tipc_node_put(struct tipc_node *node);
-struct tipc_node *tipc_node_create(struct net *net, u32 addr);
 void tipc_node_stop(struct net *net);
+void tipc_node_check_dest(struct net *net, u32 onode,
+			  struct tipc_bearer *bearer,
+			  u16 capabilities, u32 signature,
+			  struct tipc_media_addr *maddr,
+			  bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_is_up(struct tipc_node *n_ptr);
+bool tipc_node_is_up(struct tipc_node *n);
 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
 			   char *linkname, size_t len);
 void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+		   int selector);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+		       u32 selector);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 static inline void tipc_node_lock(struct tipc_node *node)
@@ -159,26 +169,30 @@
 	spin_lock_bh(&node->lock);
 }
 
-static inline bool tipc_node_blocked(struct tipc_node *node)
+static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
 {
-	return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
-		TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
+	int bearer_id = n->active_links[sel & 1];
+
+	if (unlikely(bearer_id == INVALID_BEARER_ID))
+		return NULL;
+
+	return n->links[bearer_id].link;
 }
 
-static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
+static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
 {
-	struct tipc_node *node;
-	u32 mtu;
+	struct tipc_node *n;
+	int bearer_id;
+	unsigned int mtu = MAX_MSG_SIZE;
 
-	node = tipc_node_find(net, addr);
+	n = tipc_node_find(net, addr);
+	if (unlikely(!n))
+		return mtu;
 
-	if (likely(node)) {
-		mtu = node->act_mtus[selector & 1];
-		tipc_node_put(node);
-	} else {
-		mtu = MAX_MSG_SIZE;
-	}
-
+	bearer_id = n->active_links[sel & 1];
+	if (likely(bearer_id != INVALID_BEARER_ID))
+		mtu = n->links[bearer_id].mtu;
+	tipc_node_put(n);
 	return mtu;
 }
 
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3a7567f..1060d52 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -248,6 +248,22 @@
 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 }
 
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+	u32 selector;
+	u32 dnode;
+	u32 onode = tipc_own_addr(sock_net(sk));
+
+	if (!tipc_msg_reverse(onode, &skb, err))
+		return;
+
+	dnode = msg_destnode(buf_msg(skb));
+	selector = msg_origport(buf_msg(skb));
+	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
 /**
  * tsk_rej_rx_queue - reject all buffers in socket receive queue
  *
@@ -256,13 +272,9 @@
 static void tsk_rej_rx_queue(struct sock *sk)
 {
 	struct sk_buff *skb;
-	u32 dnode;
-	u32 own_node = tsk_own_node(tipc_sk(sk));
 
-	while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-		if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
-			tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
-	}
+	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 }
 
 /* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@
 				tsk->connected = 0;
 				tipc_node_remove_conn(net, dnode, tsk->portid);
 			}
-			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-					     TIPC_ERR_NO_PORT))
-				tipc_link_xmit_skb(net, skb, dnode, 0);
+			tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 		}
 	}
 
@@ -456,7 +466,7 @@
 				      tsk_own_node(tsk), tsk_peer_port(tsk),
 				      tsk->portid, TIPC_ERR_NO_PORT);
 		if (skb)
-			tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 		tipc_node_remove_conn(net, dnode, tsk->portid);
 	}
 
@@ -686,21 +696,22 @@
 
 	do {
 		rc = tipc_bclink_xmit(net, pktchain);
-		if (likely(rc >= 0)) {
-			rc = dsz;
-			break;
+		if (likely(!rc))
+			return dsz;
+
+		if (rc == -ELINKCONG) {
+			tsk->link_cong = 1;
+			rc = tipc_wait_for_sndmsg(sock, &timeo);
+			if (!rc)
+				continue;
 		}
+		__skb_queue_purge(pktchain);
 		if (rc == -EMSGSIZE) {
 			msg->msg_iter = save;
 			goto new_mtu;
 		}
-		if (rc != -ELINKCONG)
-			break;
-		tipc_sk(sk)->link_cong = 1;
-		rc = tipc_wait_for_sndmsg(sock, &timeo);
-		if (rc)
-			__skb_queue_purge(pktchain);
-	} while (!rc);
+		break;
+	} while (1);
 	return rc;
 }
 
@@ -763,35 +774,35 @@
 /**
  * tipc_sk_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 {
-	struct tipc_msg *msg = buf_msg(*skb);
+	struct sock *sk = &tsk->sk;
+	struct tipc_msg *hdr = buf_msg(skb);
+	int mtyp = msg_type(hdr);
 	int conn_cong;
-	u32 dnode;
-	u32 own_node = tsk_own_node(tsk);
+
 	/* Ignore if connection cannot be validated: */
-	if (!tsk_peer_msg(tsk, msg))
+	if (!tsk_peer_msg(tsk, hdr))
 		goto exit;
 
 	tsk->probing_state = TIPC_CONN_OK;
 
-	if (msg_type(msg) == CONN_ACK) {
+	if (mtyp == CONN_PROBE) {
+		msg_set_type(hdr, CONN_PROBE_REPLY);
+		tipc_sk_respond(sk, skb, TIPC_OK);
+		return;
+	} else if (mtyp == CONN_ACK) {
 		conn_cong = tsk_conn_cong(tsk);
-		tsk->sent_unacked -= msg_msgcnt(msg);
+		tsk->sent_unacked -= msg_msgcnt(hdr);
 		if (conn_cong)
-			tsk->sk.sk_write_space(&tsk->sk);
-	} else if (msg_type(msg) == CONN_PROBE) {
-		if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
-			msg_set_type(msg, CONN_PROBE_REPLY);
-			return;
-		}
+			sk->sk_write_space(sk);
+	} else if (mtyp != CONN_PROBE_REPLY) {
+		pr_warn("Received unknown CONN_PROTO msg\n");
 	}
-	/* Do nothing if msg_type() == CONN_PROBE_REPLY */
 exit:
-	kfree_skb(*skb);
-	*skb = NULL;
+	kfree_skb(skb);
 }
 
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -924,24 +935,25 @@
 	do {
 		skb = skb_peek(pktchain);
 		TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-		rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
-		if (likely(rc >= 0)) {
+		rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+		if (likely(!rc)) {
 			if (sock->state != SS_READY)
 				sock->state = SS_CONNECTING;
-			rc = dsz;
-			break;
+			return dsz;
 		}
+		if (rc == -ELINKCONG) {
+			tsk->link_cong = 1;
+			rc = tipc_wait_for_sndmsg(sock, &timeo);
+			if (!rc)
+				continue;
+		}
+		__skb_queue_purge(pktchain);
 		if (rc == -EMSGSIZE) {
 			m->msg_iter = save;
 			goto new_mtu;
 		}
-		if (rc != -ELINKCONG)
-			break;
-		tsk->link_cong = 1;
-		rc = tipc_wait_for_sndmsg(sock, &timeo);
-		if (rc)
-			__skb_queue_purge(pktchain);
-	} while (!rc);
+		break;
+	} while (1);
 
 	return rc;
 }
@@ -1043,15 +1055,16 @@
 		return rc;
 	do {
 		if (likely(!tsk_conn_cong(tsk))) {
-			rc = tipc_link_xmit(net, pktchain, dnode, portid);
+			rc = tipc_node_xmit(net, pktchain, dnode, portid);
 			if (likely(!rc)) {
 				tsk->sent_unacked++;
 				sent += send;
 				if (sent == dsz)
-					break;
+					return dsz;
 				goto next;
 			}
 			if (rc == -EMSGSIZE) {
+				__skb_queue_purge(pktchain);
 				tsk->max_pkt = tipc_node_get_mtu(net, dnode,
 								 portid);
 				m->msg_iter = save;
@@ -1059,13 +1072,13 @@
 			}
 			if (rc != -ELINKCONG)
 				break;
+
 			tsk->link_cong = 1;
 		}
 		rc = tipc_wait_for_sndpkt(sock, &timeo);
-		if (rc)
-			__skb_queue_purge(pktchain);
 	} while (!rc);
 
+	__skb_queue_purge(pktchain);
 	return sent ? sent : rc;
 }
 
@@ -1221,7 +1234,7 @@
 		return;
 	msg = buf_msg(skb);
 	msg_set_msgcnt(msg, ack);
-	tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1507,82 +1520,81 @@
  * @tsk: TIPC socket
  * @skb: pointer to message buffer. Set to NULL if buffer is consumed
  *
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
  */
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 {
 	struct sock *sk = &tsk->sk;
 	struct net *net = sock_net(sk);
 	struct socket *sock = sk->sk_socket;
-	struct tipc_msg *msg = buf_msg(*skb);
-	int retval = -TIPC_ERR_NO_PORT;
+	struct tipc_msg *hdr = buf_msg(skb);
 
-	if (msg_mcast(msg))
-		return retval;
+	if (unlikely(msg_mcast(hdr)))
+		return false;
 
 	switch ((int)sock->state) {
 	case SS_CONNECTED:
+
 		/* Accept only connection-based messages sent by peer */
-		if (tsk_peer_msg(tsk, msg)) {
-			if (unlikely(msg_errcode(msg))) {
-				sock->state = SS_DISCONNECTING;
-				tsk->connected = 0;
-				/* let timer expire on it's own */
-				tipc_node_remove_conn(net, tsk_peer_node(tsk),
-						      tsk->portid);
-			}
-			retval = TIPC_OK;
+		if (unlikely(!tsk_peer_msg(tsk, hdr)))
+			return false;
+
+		if (unlikely(msg_errcode(hdr))) {
+			sock->state = SS_DISCONNECTING;
+			tsk->connected = 0;
+			/* Let timer expire on it's own */
+			tipc_node_remove_conn(net, tsk_peer_node(tsk),
+					      tsk->portid);
 		}
-		break;
+		return true;
+
 	case SS_CONNECTING:
+
 		/* Accept only ACK or NACK message */
+		if (unlikely(!msg_connected(hdr)))
+			return false;
 
-		if (unlikely(!msg_connected(msg)))
-			break;
-
-		if (unlikely(msg_errcode(msg))) {
+		if (unlikely(msg_errcode(hdr))) {
 			sock->state = SS_DISCONNECTING;
 			sk->sk_err = ECONNREFUSED;
-			retval = TIPC_OK;
-			break;
+			return true;
 		}
 
-		if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+		if (unlikely(!msg_isdata(hdr))) {
 			sock->state = SS_DISCONNECTING;
 			sk->sk_err = EINVAL;
-			retval = TIPC_OK;
-			break;
+			return true;
 		}
 
-		tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
-		msg_set_importance(&tsk->phdr, msg_importance(msg));
+		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+		msg_set_importance(&tsk->phdr, msg_importance(hdr));
 		sock->state = SS_CONNECTED;
 
-		/* If an incoming message is an 'ACK-', it should be
-		 * discarded here because it doesn't contain useful
-		 * data. In addition, we should try to wake up
-		 * connect() routine if sleeping.
-		 */
-		if (msg_data_sz(msg) == 0) {
-			kfree_skb(*skb);
-			*skb = NULL;
-			if (waitqueue_active(sk_sleep(sk)))
-				wake_up_interruptible(sk_sleep(sk));
-		}
-		retval = TIPC_OK;
-		break;
+		/* If 'ACK+' message, add to socket receive queue */
+		if (msg_data_sz(hdr))
+			return true;
+
+		/* If empty 'ACK-' message, wake up sleeping connect() */
+		if (waitqueue_active(sk_sleep(sk)))
+			wake_up_interruptible(sk_sleep(sk));
+
+		/* 'ACK-' message is neither accepted nor rejected: */
+		msg_set_dest_droppable(hdr, 1);
+		return false;
+
 	case SS_LISTENING:
 	case SS_UNCONNECTED:
+
 		/* Accept only SYN message */
-		if (!msg_connected(msg) && !(msg_errcode(msg)))
-			retval = TIPC_OK;
+		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+			return true;
 		break;
 	case SS_DISCONNECTING:
 		break;
 	default:
 		pr_err("Unknown socket state %u\n", sock->state);
 	}
-	return retval;
+	return false;
 }
 
 /**
@@ -1617,61 +1629,70 @@
 /**
  * filter_rcv - validate incoming message
  * @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
  *
  * Enqueues message on receive queue if acceptable; optionally handles
  * disconnect indication for a connected socket.
  *
  * Called with socket lock already taken
  *
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
  */
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct socket *sock = sk->sk_socket;
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_msg *msg = buf_msg(*skb);
-	unsigned int limit = rcvbuf_limit(sk, *skb);
-	int rc = TIPC_OK;
+	struct tipc_msg *hdr = buf_msg(skb);
+	unsigned int limit = rcvbuf_limit(sk, skb);
+	int err = TIPC_OK;
+	int usr = msg_user(hdr);
 
-	if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
 		tipc_sk_proto_rcv(tsk, skb);
-		return TIPC_OK;
+		return false;
 	}
 
-	if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
-		kfree_skb(*skb);
+	if (unlikely(usr == SOCK_WAKEUP)) {
+		kfree_skb(skb);
 		tsk->link_cong = 0;
 		sk->sk_write_space(sk);
-		*skb = NULL;
-		return TIPC_OK;
+		return false;
 	}
 
-	/* Reject message if it is wrong sort of message for socket */
-	if (msg_type(msg) > TIPC_DIRECT_MSG)
-		return -TIPC_ERR_NO_PORT;
+	/* Drop if illegal message type */
+	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+		kfree_skb(skb);
+		return false;
+	}
 
-	if (sock->state == SS_READY) {
-		if (msg_connected(msg))
-			return -TIPC_ERR_NO_PORT;
-	} else {
-		rc = filter_connect(tsk, skb);
-		if (rc != TIPC_OK || !*skb)
-			return rc;
+	/* Reject if wrong message type for current socket state */
+	if (unlikely(sock->state == SS_READY)) {
+		if (msg_connected(hdr)) {
+			err = TIPC_ERR_NO_PORT;
+			goto reject;
+		}
+	} else if (unlikely(!filter_connect(tsk, skb))) {
+		err = TIPC_ERR_NO_PORT;
+		goto reject;
 	}
 
 	/* Reject message if there isn't room to queue it */
-	if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
-		return -TIPC_ERR_OVERLOAD;
+	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+		err = TIPC_ERR_OVERLOAD;
+		goto reject;
+	}
 
 	/* Enqueue message */
-	TIPC_SKB_CB(*skb)->handle = NULL;
-	__skb_queue_tail(&sk->sk_receive_queue, *skb);
-	skb_set_owner_r(*skb, sk);
+	TIPC_SKB_CB(skb)->handle = NULL;
+	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	skb_set_owner_r(skb, sk);
 
 	sk->sk_data_ready(sk);
-	*skb = NULL;
-	return TIPC_OK;
+	return true;
+
+reject:
+	tipc_sk_respond(sk, skb, err);
+	return false;
 }
 
 /**
@@ -1685,22 +1706,10 @@
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
-	int err;
-	atomic_t *dcnt;
-	u32 dnode;
-	struct tipc_sock *tsk = tipc_sk(sk);
-	struct net *net = sock_net(sk);
-	uint truesize = skb->truesize;
+	unsigned int truesize = skb->truesize;
 
-	err = filter_rcv(sk, &skb);
-	if (likely(!skb)) {
-		dcnt = &tsk->dupl_rcvcnt;
-		if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-			atomic_add(truesize, dcnt);
-		return 0;
-	}
-	if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
-		tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+	if (likely(filter_rcv(sk, skb)))
+		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
 	return 0;
 }
 
@@ -1710,45 +1719,43 @@
  * @inputq: list of incoming buffers with potentially different destinations
  * @sk: socket where the buffers should be enqueued
  * @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
  *
  * Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
  */
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-			   u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+			    u32 dport)
 {
 	unsigned int lim;
 	atomic_t *dcnt;
-	int err;
 	struct sk_buff *skb;
 	unsigned long time_limit = jiffies + 2;
 
 	while (skb_queue_len(inputq)) {
 		if (unlikely(time_after_eq(jiffies, time_limit)))
-			return TIPC_OK;
+			return;
+
 		skb = tipc_skb_dequeue(inputq, dport);
 		if (unlikely(!skb))
-			return TIPC_OK;
+			return;
+
+		/* Add message directly to receive queue if possible */
 		if (!sock_owned_by_user(sk)) {
-			err = filter_rcv(sk, &skb);
-			if (likely(!skb))
-				continue;
-			*_skb = skb;
-			return err;
+			filter_rcv(sk, skb);
+			continue;
 		}
+
+		/* Try backlog, compensating for double-counted bytes */
 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
 		if (sk->sk_backlog.len)
 			atomic_set(dcnt, 0);
 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
 		if (likely(!sk_add_backlog(sk, skb, lim)))
 			continue;
-		*_skb = skb;
-		return -TIPC_ERR_OVERLOAD;
+
+		/* Overload => reject message back to sender */
+		tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+		break;
 	}
-	return TIPC_OK;
 }
 
 /**
@@ -1756,49 +1763,46 @@
  * @inputq: buffer list containing the buffers
  * Consumes all buffers in list until inputq is empty
  * Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
  */
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
 	u32 dnode, dport = 0;
 	int err;
-	struct sk_buff *skb;
 	struct tipc_sock *tsk;
-	struct tipc_net *tn;
 	struct sock *sk;
+	struct sk_buff *skb;
 
 	while (skb_queue_len(inputq)) {
-		err = -TIPC_ERR_NO_PORT;
-		skb = NULL;
 		dport = tipc_skb_peek_port(inputq, dport);
 		tsk = tipc_sk_lookup(net, dport);
+
 		if (likely(tsk)) {
 			sk = &tsk->sk;
 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-				err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+				tipc_sk_enqueue(inputq, sk, dport);
 				spin_unlock_bh(&sk->sk_lock.slock);
-				dport = 0;
 			}
 			sock_put(sk);
-		} else {
-			skb = tipc_skb_dequeue(inputq, dport);
-		}
-		if (likely(!skb))
 			continue;
-		if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
-			goto xmit;
-		if (!err) {
-			dnode = msg_destnode(buf_msg(skb));
-			goto xmit;
 		}
-		tn = net_generic(net, tipc_net_id);
-		if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+		/* No destination socket => dequeue skb if still there */
+		skb = tipc_skb_dequeue(inputq, dport);
+		if (!skb)
+			return;
+
+		/* Try secondary lookup if unresolved named message */
+		err = TIPC_ERR_NO_PORT;
+		if (tipc_msg_lookup_dest(net, skb, &err))
+			goto xmit;
+
+		/* Prepare for message rejection */
+		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
 			continue;
 xmit:
-		tipc_link_xmit_skb(net, skb, dnode, dport);
+		dnode = msg_destnode(buf_msg(skb));
+		tipc_node_xmit_skb(net, skb, dnode, dport);
 	}
-	return err ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2067,7 +2071,10 @@
 	struct net *net = sock_net(sk);
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct sk_buff *skb;
-	u32 dnode;
+	u32 dnode = tsk_peer_node(tsk);
+	u32 dport = tsk_peer_port(tsk);
+	u32 onode = tipc_own_addr(net);
+	u32 oport = tsk->portid;
 	int res;
 
 	if (how != SHUT_RDWR)
@@ -2080,6 +2087,8 @@
 	case SS_CONNECTED:
 
 restart:
+		dnode = tsk_peer_node(tsk);
+
 		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
 		skb = __skb_dequeue(&sk->sk_receive_queue);
 		if (skb) {
@@ -2087,19 +2096,13 @@
 				kfree_skb(skb);
 				goto restart;
 			}
-			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-					     TIPC_CONN_SHUTDOWN))
-				tipc_link_xmit_skb(net, skb, dnode,
-						   tsk->portid);
+			tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
 		} else {
-			dnode = tsk_peer_node(tsk);
-
 			skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
 					      TIPC_CONN_MSG, SHORT_H_SIZE,
-					      0, dnode, tsk_own_node(tsk),
-					      tsk_peer_port(tsk),
-					      tsk->portid, TIPC_CONN_SHUTDOWN);
-			tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+					      0, dnode, onode, dport, oport,
+					      TIPC_CONN_SHUTDOWN);
+			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 		}
 		tsk->connected = 0;
 		sock->state = SS_DISCONNECTING;
@@ -2161,7 +2164,7 @@
 	}
 	bh_unlock_sock(sk);
 	if (skb)
-		tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
+		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
 exit:
 	sock_put(sk);
 }
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index bf65513..4241f22 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,7 +44,7 @@
 				  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 		       struct sk_buff_head *inputq);
 void tipc_sk_reinit(struct net *net);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 66deebc..c170d31 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -194,7 +194,8 @@
 			.saddr = src->ipv6,
 			.flowi6_proto = IPPROTO_UDP
 		};
-		err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+		err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+						 &fl6);
 		if (err)
 			goto tx_error;
 		ttl = ip6_dst_hoplimit(ndst);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 7d73054..477364a 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -135,8 +135,7 @@
  * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
  *     %WIMAX_RF_OFF radio off.
  *
- * Reports changes in the software RF switch state to the the WiMAX
- * stack.
+ * Reports changes in the software RF switch state to the WiMAX stack.
  *
  * The main use is during initialization, so the driver can query the
  * device for its current software radio kill switch state and feed it
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 2a0bbd2..3893409 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -407,6 +407,9 @@
 	INIT_LIST_HEAD(&rdev->bss_list);
 	INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
 	INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+	INIT_LIST_HEAD(&rdev->mlme_unreg);
+	spin_lock_init(&rdev->mlme_unreg_lock);
+	INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk);
 	INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
 			  cfg80211_dfs_channels_update_work);
 #ifdef CONFIG_CFG80211_WEXT
@@ -802,6 +805,7 @@
 	cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
 	flush_work(&rdev->destroy_work);
 	flush_work(&rdev->sched_scan_stop_wk);
+	flush_work(&rdev->mlme_unreg_wk);
 
 #ifdef CONFIG_PM
 	if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -855,6 +859,7 @@
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_P2P_DEVICE:
+		cfg80211_mlme_purge_registrations(wdev);
 		cfg80211_stop_p2p_device(rdev, wdev);
 		break;
 	default:
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 311eef2..b9d5bc8 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -59,6 +59,10 @@
 	struct list_head beacon_registrations;
 	spinlock_t beacon_registrations_lock;
 
+	struct list_head mlme_unreg;
+	spinlock_t mlme_unreg_lock;
+	struct work_struct mlme_unreg_wk;
+
 	/* protected by RTNL only */
 	int num_running_ifaces;
 	int num_running_monitor_ifaces;
@@ -348,6 +352,7 @@
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
 				u16 frame_type, const u8 *match_data,
 				int match_len);
+void cfg80211_mlme_unreg_wk(struct work_struct *wk);
 void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 7aae329..fb44fa3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -2,6 +2,7 @@
  * cfg80211 MLME SAP interface
  *
  * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2015		Intel Deutschland GmbH
  */
 
 #include <linux/kernel.h>
@@ -389,6 +390,7 @@
 
 struct cfg80211_mgmt_registration {
 	struct list_head list;
+	struct wireless_dev *wdev;
 
 	u32 nlportid;
 
@@ -399,6 +401,46 @@
 	u8 match[];
 };
 
+static void
+cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev)
+{
+	struct cfg80211_mgmt_registration *reg;
+
+	ASSERT_RTNL();
+
+	spin_lock_bh(&rdev->mlme_unreg_lock);
+	while ((reg = list_first_entry_or_null(&rdev->mlme_unreg,
+					       struct cfg80211_mgmt_registration,
+					       list))) {
+		list_del(&reg->list);
+		spin_unlock_bh(&rdev->mlme_unreg_lock);
+
+		if (rdev->ops->mgmt_frame_register) {
+			u16 frame_type = le16_to_cpu(reg->frame_type);
+
+			rdev_mgmt_frame_register(rdev, reg->wdev,
+						 frame_type, false);
+		}
+
+		kfree(reg);
+
+		spin_lock_bh(&rdev->mlme_unreg_lock);
+	}
+	spin_unlock_bh(&rdev->mlme_unreg_lock);
+}
+
+void cfg80211_mlme_unreg_wk(struct work_struct *wk)
+{
+	struct cfg80211_registered_device *rdev;
+
+	rdev = container_of(wk, struct cfg80211_registered_device,
+			    mlme_unreg_wk);
+
+	rtnl_lock();
+	cfg80211_process_mlme_unregistrations(rdev);
+	rtnl_unlock();
+}
+
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
 				u16 frame_type, const u8 *match_data,
 				int match_len)
@@ -449,11 +491,18 @@
 	nreg->match_len = match_len;
 	nreg->nlportid = snd_portid;
 	nreg->frame_type = cpu_to_le16(frame_type);
+	nreg->wdev = wdev;
 	list_add(&nreg->list, &wdev->mgmt_registrations);
+	spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+	/* process all unregistrations to avoid driver confusion */
+	cfg80211_process_mlme_unregistrations(rdev);
 
 	if (rdev->ops->mgmt_frame_register)
 		rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
 
+	return 0;
+
  out:
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
@@ -472,15 +521,12 @@
 		if (reg->nlportid != nlportid)
 			continue;
 
-		if (rdev->ops->mgmt_frame_register) {
-			u16 frame_type = le16_to_cpu(reg->frame_type);
-
-			rdev_mgmt_frame_register(rdev, wdev,
-						 frame_type, false);
-		}
-
 		list_del(&reg->list);
-		kfree(reg);
+		spin_lock(&rdev->mlme_unreg_lock);
+		list_add_tail(&reg->list, &rdev->mlme_unreg);
+		spin_unlock(&rdev->mlme_unreg_lock);
+
+		schedule_work(&rdev->mlme_unreg_wk);
 	}
 
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -496,16 +542,15 @@
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
 {
-	struct cfg80211_mgmt_registration *reg, *tmp;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
 	spin_lock_bh(&wdev->mgmt_registrations_lock);
-
-	list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
-		list_del(&reg->list);
-		kfree(reg);
-	}
-
+	spin_lock(&rdev->mlme_unreg_lock);
+	list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg);
+	spin_unlock(&rdev->mlme_unreg_lock);
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+	cfg80211_process_mlme_unregistrations(rdev);
 }
 
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 76b4157..5d8748b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2321,6 +2321,7 @@
 			rdev->wiphy.frag_threshold = old_frag_threshold;
 			rdev->wiphy.rts_threshold = old_rts_threshold;
 			rdev->wiphy.coverage_class = old_coverage_class;
+			return result;
 		}
 	}
 	return 0;
@@ -7390,7 +7391,8 @@
 	int err;
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
 		return -EOPNOTSUPP;
 
 	if (!rdev->ops->set_mcast_rate)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index c6e83a7..c23516d 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -733,6 +733,8 @@
 rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
 			 struct wireless_dev *wdev, u16 frame_type, bool reg)
 {
+	might_sleep();
+
 	trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
 	rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
 	trace_rdev_return_void(&rdev->wiphy);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index aa2d754..b144485 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1004,7 +1004,7 @@
 
 static const struct ieee80211_reg_rule *
 freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
-		   const struct ieee80211_regdomain *regd)
+		   const struct ieee80211_regdomain *regd, u32 bw)
 {
 	int i;
 	bool band_rule_found = false;
@@ -1028,7 +1028,7 @@
 		if (!band_rule_found)
 			band_rule_found = freq_in_rule_band(fr, center_freq);
 
-		bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+		bw_fits = reg_does_bw_fit(fr, center_freq, bw);
 
 		if (band_rule_found && bw_fits)
 			return rr;
@@ -1040,14 +1040,26 @@
 	return ERR_PTR(-EINVAL);
 }
 
+const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
+						 u32 center_freq, u32 min_bw)
+{
+	const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
+	const struct ieee80211_reg_rule *reg_rule = NULL;
+	u32 bw;
+
+	for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+		reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+		if (!IS_ERR(reg_rule))
+			return reg_rule;
+	}
+
+	return reg_rule;
+}
+
 const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
 					       u32 center_freq)
 {
-	const struct ieee80211_regdomain *regd;
-
-	regd = reg_get_regdomain(wiphy);
-
-	return freq_reg_info_regd(wiphy, center_freq, regd);
+	return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
 }
 EXPORT_SYMBOL(freq_reg_info);
 
@@ -1176,8 +1188,20 @@
 	if (reg_rule->flags & NL80211_RRF_AUTO_BW)
 		max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+	/* If we get a reg_rule we can assume that at least 5Mhz fit */
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(10)))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(20)))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+	if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-		bw_flags = IEEE80211_CHAN_NO_HT40;
+		bw_flags |= IEEE80211_CHAN_NO_HT40;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(80))
 		bw_flags |= IEEE80211_CHAN_NO_80MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1695,9 +1719,15 @@
 	const struct ieee80211_power_rule *power_rule = NULL;
 	const struct ieee80211_freq_range *freq_range = NULL;
 	u32 max_bandwidth_khz;
+	u32 bw;
 
-	reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
-				      regd);
+	for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+		reg_rule = freq_reg_info_regd(wiphy,
+					      MHZ_TO_KHZ(chan->center_freq),
+					      regd, bw);
+		if (!IS_ERR(reg_rule))
+			break;
+	}
 
 	if (IS_ERR(reg_rule)) {
 		REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
@@ -1721,8 +1751,20 @@
 	if (reg_rule->flags & NL80211_RRF_AUTO_BW)
 		max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+	/* If we get a reg_rule we can assume that at least 5Mhz fit */
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(10)))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(20)))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+	if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-		bw_flags = IEEE80211_CHAN_NO_HT40;
+		bw_flags |= IEEE80211_CHAN_NO_HT40;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(80))
 		bw_flags |= IEEE80211_CHAN_NO_80MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -2079,10 +2121,7 @@
 		reg_process_hint_core(reg_request);
 		return;
 	case NL80211_REGDOM_SET_BY_USER:
-		treatment = reg_process_hint_user(reg_request);
-		if (treatment == REG_REQ_IGNORE ||
-		    treatment == REG_REQ_ALREADY_SET)
-			return;
+		reg_process_hint_user(reg_request);
 		return;
 	case NL80211_REGDOM_SET_BY_DRIVER:
 		if (!wiphy)
@@ -2099,7 +2138,9 @@
 		goto out_free;
 	}
 
-	/* This is required so that the orig_* parameters are saved */
+	/* This is required so that the orig_* parameters are saved.
+	 * NOTE: treatment must be set for any case that reaches here!
+	 */
 	if (treatment == REG_REQ_ALREADY_SET && wiphy &&
 	    wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
 		wiphy_update_regulatory(wiphy, reg_request->initiator);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 18cead7..94af3d0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -115,7 +115,8 @@
 	rcu_read_unlock();
 }
 
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+						  int tos, int oif,
 						  const xfrm_address_t *saddr,
 						  const xfrm_address_t *daddr,
 						  int family)
@@ -127,14 +128,15 @@
 	if (unlikely(afinfo == NULL))
 		return ERR_PTR(-EAFNOSUPPORT);
 
-	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 
 	xfrm_policy_put_afinfo(afinfo);
 
 	return dst;
 }
 
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+						int tos, int oif,
 						xfrm_address_t *prev_saddr,
 						xfrm_address_t *prev_daddr,
 						int family)
@@ -153,7 +155,7 @@
 		daddr = x->coaddr;
 	}
 
-	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
 
 	if (!IS_ERR(dst)) {
 		if (prev_saddr != saddr)
@@ -1373,15 +1375,15 @@
 }
 
 static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
-	       unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+	       xfrm_address_t *remote, unsigned short family)
 {
 	int err;
 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
 	if (unlikely(afinfo == NULL))
 		return -EINVAL;
-	err = afinfo->get_saddr(net, local, remote);
+	err = afinfo->get_saddr(net, oif, local, remote);
 	xfrm_policy_put_afinfo(afinfo);
 	return err;
 }
@@ -1410,7 +1412,9 @@
 			remote = &tmpl->id.daddr;
 			local = &tmpl->saddr;
 			if (xfrm_addr_any(local, tmpl->encap_family)) {
-				error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
+				error = xfrm_get_saddr(net, fl->flowi_oif,
+						       &tmp, remote,
+						       tmpl->encap_family);
 				if (error)
 					goto fail;
 				local = &tmp;
@@ -1690,8 +1694,8 @@
 
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 			family = xfrm[i]->props.family;
-			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
-					      family);
+			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+					      &saddr, &daddr, family);
 			err = PTR_ERR(dst);
 			if (IS_ERR(dst))
 				goto put_states;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bd16c6c..a8de9e3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -925,12 +925,10 @@
 			return err;
 
 		if (attrs[XFRMA_ADDRESS_FILTER]) {
-			filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+			filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+					 sizeof(*filter), GFP_KERNEL);
 			if (filter == NULL)
 				return -ENOMEM;
-
-			memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
-			       sizeof(*filter));
 		}
 
 		if (attrs[XFRMA_PROTO])
@@ -2048,7 +2046,7 @@
 		xfrm_audit_policy_delete(xp, 1, true);
 	} else {
 		// reset the timers here?
-		WARN(1, "Dont know what to do with soft policy expire\n");
+		WARN(1, "Don't know what to do with soft policy expire\n");
 	}
 	km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4450fed..63e7d50 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -12,6 +12,7 @@
 hostprogs-y += tracex3
 hostprogs-y += tracex4
 hostprogs-y += tracex5
+hostprogs-y += tracex6
 hostprogs-y += lathist
 
 test_verifier-objs := test_verifier.o libbpf.o
@@ -25,6 +26,7 @@
 tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
 tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
+tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 
 # Tell kbuild to always build the programs
@@ -37,6 +39,7 @@
 always += tracex3_kern.o
 always += tracex4_kern.o
 always += tracex5_kern.o
+always += tracex6_kern.o
 always += tcbpf1_kern.o
 always += lathist_kern.o
 
@@ -51,6 +54,7 @@
 HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
+HOSTLOADLIBES_tracex6 += -lelf
 HOSTLOADLIBES_lathist += -lelf
 
 # point this to your LLVM backend with bpf support
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index bdf1c16..3a44d3a 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -31,6 +31,8 @@
 	(void *) BPF_FUNC_get_current_uid_gid;
 static int (*bpf_get_current_comm)(void *buf, int buf_size) =
 	(void *) BPF_FUNC_get_current_comm;
+static int (*bpf_perf_event_read)(void *map, int index) =
+	(void *) BPF_FUNC_perf_event_read;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -60,4 +62,29 @@
 static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
 	(void *) BPF_FUNC_l4_csum_replace;
 
+#if defined(__x86_64__)
+
+#define PT_REGS_PARM1(x) ((x)->di)
+#define PT_REGS_PARM2(x) ((x)->si)
+#define PT_REGS_PARM3(x) ((x)->dx)
+#define PT_REGS_PARM4(x) ((x)->cx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->sp)
+#define PT_REGS_FP(x) ((x)->bp)
+#define PT_REGS_RC(x) ((x)->ax)
+#define PT_REGS_SP(x) ((x)->sp)
+
+#elif defined(__s390x__)
+
+#define PT_REGS_PARM1(x) ((x)->gprs[2])
+#define PT_REGS_PARM2(x) ((x)->gprs[3])
+#define PT_REGS_PARM3(x) ((x)->gprs[4])
+#define PT_REGS_PARM4(x) ((x)->gprs[5])
+#define PT_REGS_PARM5(x) ((x)->gprs[6])
+#define PT_REGS_RET(x) ((x)->gprs[14])
+#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->gprs[2])
+#define PT_REGS_SP(x) ((x)->gprs[15])
+
+#endif
 #endif
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 6936059..ee0f110 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -822,6 +822,65 @@
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
+	{
+		"PTR_TO_STACK store/load",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
+	{
+		"PTR_TO_STACK store/load - bad alignment on off",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "misaligned access off -6 size 8",
+	},
+	{
+		"PTR_TO_STACK store/load - bad alignment on reg",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "misaligned access off -2 size 8",
+	},
+	{
+		"PTR_TO_STACK store/load - out of bounds low",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off=-79992 size=8",
+	},
+	{
+		"PTR_TO_STACK store/load - out of bounds high",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off=0 size=8",
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
index 3162046..3f450a8 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1_kern.c
@@ -29,7 +29,7 @@
 	int len;
 
 	/* non-portable! works for the given kernel only */
-	skb = (struct sk_buff *) ctx->di;
+	skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
 
 	dev = _(skb->dev);
 
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index dc50f4f..b32367c 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -27,10 +27,10 @@
 	long init_val = 1;
 	long *value;
 
-	/* x64 specific: read ip of kfree_skb caller.
+	/* x64/s390x specific: read ip of kfree_skb caller.
 	 * non-portable version of __builtin_return_address(0)
 	 */
-	bpf_probe_read(&loc, sizeof(loc), (void *)ctx->sp);
+	bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
 
 	value = bpf_map_lookup_elem(&my_map, &loc);
 	if (value)
@@ -79,7 +79,7 @@
 SEC("kprobe/sys_write")
 int bpf_prog3(struct pt_regs *ctx)
 {
-	long write_size = ctx->dx; /* arg3 */
+	long write_size = PT_REGS_PARM3(ctx);
 	long init_val = 1;
 	long *value;
 	struct hist_key key = {};
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index 255ff27..bf337fb 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -23,7 +23,7 @@
 SEC("kprobe/blk_mq_start_request")
 int bpf_prog1(struct pt_regs *ctx)
 {
-	long rq = ctx->di;
+	long rq = PT_REGS_PARM1(ctx);
 	u64 val = bpf_ktime_get_ns();
 
 	bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
@@ -51,7 +51,7 @@
 SEC("kprobe/blk_update_request")
 int bpf_prog2(struct pt_regs *ctx)
 {
-	long rq = ctx->di;
+	long rq = PT_REGS_PARM1(ctx);
 	u64 *value, l, base;
 	u32 index;
 
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index 126b805..ac46714 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -27,7 +27,7 @@
 SEC("kprobe/kmem_cache_free")
 int bpf_prog1(struct pt_regs *ctx)
 {
-	long ptr = ctx->si;
+	long ptr = PT_REGS_PARM2(ctx);
 
 	bpf_map_delete_elem(&my_map, &ptr);
 	return 0;
@@ -36,11 +36,11 @@
 SEC("kretprobe/kmem_cache_alloc_node")
 int bpf_prog2(struct pt_regs *ctx)
 {
-	long ptr = ctx->ax;
+	long ptr = PT_REGS_RC(ctx);
 	long ip = 0;
 
 	/* get ip address of kmem_cache_alloc_node() caller */
-	bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip)));
+	bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
 
 	struct pair v = {
 		.val = bpf_ktime_get_ns(),
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index b71fe07..b3f4295 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -24,7 +24,7 @@
 {
 	struct seccomp_data sd = {};
 
-	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 
 	/* dispatch into next BPF program depending on syscall number */
 	bpf_tail_call(ctx, &progs, sd.nr);
@@ -42,7 +42,7 @@
 {
 	struct seccomp_data sd = {};
 
-	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 	if (sd.args[2] == 512) {
 		char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
 		bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +55,7 @@
 {
 	struct seccomp_data sd = {};
 
-	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 	if (sd.args[2] > 128 && sd.args[2] <= 1024) {
 		char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
 		bpf_trace_printk(fmt, sizeof(fmt),
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
new file mode 100644
index 0000000..be479c4
--- /dev/null
+++ b/samples/bpf/tracex6_kern.c
@@ -0,0 +1,27 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	.key_size = sizeof(int),
+	.value_size = sizeof(u32),
+	.max_entries = 32,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	u64 count;
+	u32 key = bpf_get_smp_processor_id();
+	char fmt[] = "CPU-%d   %llu\n";
+
+	count = bpf_perf_event_read(&my_map, key);
+	bpf_trace_printk(fmt, sizeof(fmt), key, count);
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
new file mode 100644
index 0000000..8ea4976
--- /dev/null
+++ b/samples/bpf/tracex6_user.c
@@ -0,0 +1,72 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define SAMPLE_PERIOD  0x7fffffffffffffffULL
+
+static void test_bpf_perf_event(void)
+{
+	int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+	int *pmu_fd = malloc(nr_cpus * sizeof(int));
+	int status, i;
+
+	struct perf_event_attr attr_insn_pmu = {
+		.freq = 0,
+		.sample_period = SAMPLE_PERIOD,
+		.inherit = 0,
+		.type = PERF_TYPE_HARDWARE,
+		.read_format = 0,
+		.sample_type = 0,
+		.config = 0,/* PMU: cycles */
+	};
+
+	for (i = 0; i < nr_cpus; i++) {
+		pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
+		if (pmu_fd[i] < 0) {
+			printf("event syscall failed\n");
+			goto exit;
+		}
+
+		bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
+		ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+	}
+
+	status = system("ls > /dev/null");
+	if (status)
+		goto exit;
+	status = system("sleep 2");
+	if (status)
+		goto exit;
+
+exit:
+	for (i = 0; i < nr_cpus; i++)
+		close(pmu_fd[i]);
+	close(map_fd[0]);
+	free(pmu_fd);
+}
+
+int main(int argc, char **argv)
+{
+	char filename[256];
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	test_bpf_perf_event();
+	read_trace_pipe();
+
+	return 0;
+}
diff --git a/scripts/Lindent b/scripts/Lindent
index 9c4b3e2..6d889de 100755
--- a/scripts/Lindent
+++ b/scripts/Lindent
@@ -1,6 +1,9 @@
 #!/bin/sh
 PARAM="-npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1"
 RES=`indent --version`
+if [ "$RES" = "" ]; then
+	exit 1
+fi
 V1=`echo $RES | cut -d' ' -f3 | cut -d'.' -f1`
 V2=`echo $RES | cut -d' ' -f3 | cut -d'.' -f2`
 V3=`echo $RES | cut -d' ' -f3 | cut -d'.' -f3`
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 515c4c0..00d6d53c 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -14,11 +14,14 @@
 
 parse_symbol() {
 	# The structure of symbol at this point is:
-	#   [name]+[offset]/[total length]
+	#   ([name]+[offset]/[total length])
 	#
 	# For example:
 	#   do_basic_setup+0x9c/0xbf
 
+	# Remove the englobing parenthesis
+	symbol=${symbol#\(}
+	symbol=${symbol%\)}
 
 	# Strip the symbol name so that we could look it up
 	local name=${symbol%+*}
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index a7bf5f6..9a08fb5 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -469,7 +469,7 @@
     } else {
 #	print STDERR "other section '$name' = '$contents'\n";
 	if (defined($sections{$name}) && ($sections{$name} ne "")) {
-		print STDERR "Error(${file}:$.): duplicate section name '$name'\n";
+		print STDERR "${file}:$.: error: duplicate section name '$name'\n";
 		++$errors;
 	}
 	$sections{$name} = $contents;
@@ -1820,7 +1820,7 @@
 			   });
     }
     else {
-	print STDERR "Error(${file}:$.): Cannot parse struct or union!\n";
+	print STDERR "${file}:$.: error: Cannot parse struct or union!\n";
 	++$errors;
     }
 }
@@ -1841,7 +1841,7 @@
 	    push @parameterlist, $arg;
 	    if (!$parameterdescs{$arg}) {
 		$parameterdescs{$arg} = $undescribed;
-		print STDERR "Warning(${file}:$.): Enum value '$arg' ".
+		print STDERR "${file}:$.: warning: Enum value '$arg' ".
 		    "not described in enum '$declaration_name'\n";
 	    }
 
@@ -1859,7 +1859,7 @@
 			   });
     }
     else {
-	print STDERR "Error(${file}:$.): Cannot parse enum!\n";
+	print STDERR "${file}:$.: error: Cannot parse enum!\n";
 	++$errors;
     }
 }
@@ -1887,7 +1887,7 @@
 			   });
     }
     else {
-	print STDERR "Error(${file}:$.): Cannot parse typedef!\n";
+	print STDERR "${file}:$.: error: Cannot parse typedef!\n";
 	++$errors;
     }
 }
@@ -2019,11 +2019,11 @@
 	    $parameterdescs{$param_name} = $undescribed;
 
 	    if (($type eq 'function') || ($type eq 'enum')) {
-		print STDERR "Warning(${file}:$.): Function parameter ".
+		print STDERR "${file}:$.: warning: Function parameter ".
 		    "or member '$param' not " .
 		    "described in '$declaration_name'\n";
 	    }
-	    print STDERR "Warning(${file}:$.):" .
+	    print STDERR "${file}:$.: warning:" .
 			 " No description found for parameter '$param'\n";
 	    ++$warnings;
 	}
@@ -2074,14 +2074,14 @@
 		}
 		if ($err) {
 			if ($decl_type eq "function") {
-				print STDERR "Warning(${file}:$.): " .
+				print STDERR "${file}:$.: warning: " .
 					"Excess function parameter " .
 					"'$sects[$sx]' " .
 					"description in '$decl_name'\n";
 				++$warnings;
 			} else {
 				if ($nested !~ m/\Q$sects[$sx]\E/) {
-				    print STDERR "Warning(${file}:$.): " .
+				    print STDERR "${file}:$.: warning: " .
 					"Excess struct/union/enum/typedef member " .
 					"'$sects[$sx]' " .
 					"description in '$decl_name'\n";
@@ -2107,7 +2107,7 @@
 
         if (!defined($sections{$section_return}) ||
             $sections{$section_return} eq "") {
-                print STDERR "Warning(${file}:$.): " .
+                print STDERR "${file}:$.: warning: " .
                         "No description found for return value of " .
                         "'$declaration_name'\n";
                 ++$warnings;
@@ -2186,7 +2186,7 @@
 
 	create_parameterlist($args, ',', $file);
     } else {
-	print STDERR "Warning(${file}:$.): cannot understand function prototype: '$prototype'\n";
+	print STDERR "${file}:$.: warning: cannot understand function prototype: '$prototype'\n";
 	return;
     }
 
@@ -2251,7 +2251,7 @@
 		$tracepointargs = $1;
 	}
 	if (($tracepointname eq 0) || ($tracepointargs eq 0)) {
-		print STDERR "Warning(${file}:$.): Unrecognized tracepoint format: \n".
+		print STDERR "${file}:$.: warning: Unrecognized tracepoint format: \n".
 			     "$prototype\n";
 	} else {
 		$prototype = "static inline void trace_$tracepointname($tracepointargs)";
@@ -2450,7 +2450,7 @@
 		}
 
 		if (($declaration_purpose eq "") && $verbose) {
-			print STDERR "Warning(${file}:$.): missing initial short description on line:\n";
+			print STDERR "${file}:$.: warning: missing initial short description on line:\n";
 			print STDERR $_;
 			++$warnings;
 		}
@@ -2468,10 +2468,10 @@
 		}
 
 		if ($verbose) {
-		    print STDERR "Info(${file}:$.): Scanning doc for $identifier\n";
+		    print STDERR "${file}:$.: info: Scanning doc for $identifier\n";
 		}
 	    } else {
-		print STDERR "Warning(${file}:$.): Cannot understand $_ on line $.",
+		print STDERR "${file}:$.: warning: Cannot understand $_ on line $.",
 		" - I thought it was a doc line\n";
 		++$warnings;
 		$state = 0;
@@ -2483,7 +2483,7 @@
 
 		if (($contents ne "") && ($contents ne "\n")) {
 		    if (!$in_doc_sect && $verbose) {
-			print STDERR "Warning(${file}:$.): contents before sections\n";
+			print STDERR "${file}:$.: warning: contents before sections\n";
 			++$warnings;
 		    }
 		    dump_section($file, $section, xml_escape($contents));
@@ -2509,7 +2509,7 @@
 		}
 		# look for doc_com + <text> + doc_end:
 		if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') {
-		    print STDERR "Warning(${file}:$.): suspicious ending line: $_";
+		    print STDERR "${file}:$.: warning: suspicious ending line: $_";
 		    ++$warnings;
 		}
 
@@ -2539,7 +2539,7 @@
 		}
 	    } else {
 		# i dont know - bad line?  ignore.
-		print STDERR "Warning(${file}:$.): bad line: $_";
+		print STDERR "${file}:$.: warning: bad line: $_";
 		++$warnings;
 	    }
 	} elsif ($state == 5) { # scanning for split parameters
@@ -2631,7 +2631,7 @@
 	}
     }
     if ($initial_section_counter == $section_counter) {
-	print STDERR "Warning(${file}): no structured comments found\n";
+	print STDERR "${file}:1: warning: no structured comments found\n";
 	if (($function_only == 1) && ($show_not_found == 1)) {
 	    print STDERR "    Was looking for '$_'.\n" for keys %function_table;
 	}
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh
deleted file mode 100644
index 6b5c83b..0000000
--- a/scripts/rt-tester/check-all.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-function testit ()
-{
- printf "%-30s: " $1
- ./rt-tester.py $1 | grep Pass
-}
-
-testit t2-l1-2rt-sameprio.tst
-testit t2-l1-pi.tst
-testit t2-l1-signal.tst
-#testit t2-l2-2rt-deadlock.tst
-testit t3-l1-pi-1rt.tst
-testit t3-l1-pi-2rt.tst
-testit t3-l1-pi-3rt.tst
-testit t3-l1-pi-signal.tst
-testit t3-l1-pi-steal.tst
-testit t3-l2-pi.tst
-testit t4-l2-pi-deboost.tst
-testit t5-l4-pi-boost-deboost.tst
-testit t5-l4-pi-boost-deboost-setsched.tst
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
deleted file mode 100755
index 6d916c2..0000000
--- a/scripts/rt-tester/rt-tester.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-#
-# rt-mutex tester
-#
-# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-import os
-import sys
-import getopt
-import shutil
-import string
-
-# Globals
-quiet = 0
-test = 0
-comments = 0
-
-sysfsprefix = "/sys/devices/system/rttest/rttest"
-statusfile = "/status"
-commandfile = "/command"
-
-# Command opcodes
-cmd_opcodes = {
-    "schedother"    : "1",
-    "schedfifo"     : "2",
-    "lock"          : "3",
-    "locknowait"    : "4",
-    "lockint"       : "5",
-    "lockintnowait" : "6",
-    "lockcont"      : "7",
-    "unlock"        : "8",
-    "signal"        : "11",
-    "resetevent"    : "98",
-    "reset"         : "99",
-    }
-
-test_opcodes = {
-    "prioeq"        : ["P" , "eq" , None],
-    "priolt"        : ["P" , "lt" , None],
-    "priogt"        : ["P" , "gt" , None],
-    "nprioeq"       : ["N" , "eq" , None],
-    "npriolt"       : ["N" , "lt" , None],
-    "npriogt"       : ["N" , "gt" , None],
-    "unlocked"      : ["M" , "eq" , 0],
-    "trylock"       : ["M" , "eq" , 1],
-    "blocked"       : ["M" , "eq" , 2],
-    "blockedwake"   : ["M" , "eq" , 3],
-    "locked"        : ["M" , "eq" , 4],
-    "opcodeeq"      : ["O" , "eq" , None],
-    "opcodelt"      : ["O" , "lt" , None],
-    "opcodegt"      : ["O" , "gt" , None],
-    "eventeq"       : ["E" , "eq" , None],
-    "eventlt"       : ["E" , "lt" , None],
-    "eventgt"       : ["E" , "gt" , None],
-    }
-
-# Print usage information
-def usage():
-    print "rt-tester.py <-c -h -q -t> <testfile>"
-    print " -c    display comments after first command"
-    print " -h    help"
-    print " -q    quiet mode"
-    print " -t    test mode (syntax check)"
-    print " testfile: read test specification from testfile"
-    print " otherwise from stdin"
-    return
-
-# Print progress when not in quiet mode
-def progress(str):
-    if not quiet:
-        print str
-
-# Analyse a status value
-def analyse(val, top, arg):
-
-    intval = int(val)
-
-    if top[0] == "M":
-        intval = intval / (10 ** int(arg))
-	intval = intval % 10
-        argval = top[2]
-    elif top[0] == "O":
-        argval = int(cmd_opcodes.get(arg, arg))
-    else:
-        argval = int(arg)
-
-    # progress("%d %s %d" %(intval, top[1], argval))
-
-    if top[1] == "eq" and intval == argval:
-	return 1
-    if top[1] == "lt" and intval < argval:
-        return 1
-    if top[1] == "gt" and intval > argval:
-	return 1
-    return 0
-
-# Parse the commandline
-try:
-    (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
-except getopt.GetoptError, ex:
-    usage()
-    sys.exit(1)
-
-# Parse commandline options
-for option, value in options:
-    if option == "-c":
-        comments = 1
-    elif option == "-q":
-        quiet = 1
-    elif option == "-t":
-        test = 1
-    elif option == '-h':
-        usage()
-        sys.exit(0)
-
-# Select the input source
-if arguments:
-    try:
-        fd = open(arguments[0])
-    except Exception,ex:
-        sys.stderr.write("File not found %s\n" %(arguments[0]))
-        sys.exit(1)
-else:
-    fd = sys.stdin
-
-linenr = 0
-
-# Read the test patterns
-while 1:
-
-    linenr = linenr + 1
-    line = fd.readline()
-    if not len(line):
-        break
-
-    line = line.strip()
-    parts = line.split(":")
-
-    if not parts or len(parts) < 1:
-        continue
-
-    if len(parts[0]) == 0:
-        continue
-
-    if parts[0].startswith("#"):
-	if comments > 1:
-	    progress(line)
-	continue
-
-    if comments == 1:
-	comments = 2
-
-    progress(line)
-
-    cmd = parts[0].strip().lower()
-    opc = parts[1].strip().lower()
-    tid = parts[2].strip()
-    dat = parts[3].strip()
-
-    try:
-        # Test or wait for a status value
-        if cmd == "t" or cmd == "w":
-            testop = test_opcodes[opc]
-
-            fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
-            if test:
-		print fname
-                continue
-
-            while 1:
-                query = 1
-                fsta = open(fname, 'r')
-                status = fsta.readline().strip()
-                fsta.close()
-                stat = status.split(",")
-                for s in stat:
-		    s = s.strip()
-                    if s.startswith(testop[0]):
-                        # Separate status value
-                        val = s[2:].strip()
-                        query = analyse(val, testop, dat)
-                        break
-                if query or cmd == "t":
-                    break
-
-            progress("   " + status)
-
-            if not query:
-                sys.stderr.write("Test failed in line %d\n" %(linenr))
-		sys.exit(1)
-
-        # Issue a command to the tester
-        elif cmd == "c":
-            cmdnr = cmd_opcodes[opc]
-            # Build command string and sys filename
-            cmdstr = "%s:%s" %(cmdnr, dat)
-            fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
-            if test:
-		print fname
-                continue
-            fcmd = open(fname, 'w')
-            fcmd.write(cmdstr)
-            fcmd.close()
-
-    except Exception,ex:
-    	sys.stderr.write(str(ex))
-        sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
-        if not test:
-            fd.close()
-            sys.exit(1)
-
-# Normal exit pass
-print "Pass"
-sys.exit(0)
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
deleted file mode 100644
index 3710c8b..0000000
--- a/scripts/rt-tester/t2-l1-2rt-sameprio.tst
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-C: locknowait:		1:	0
-W: locked:		0: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-W: locked:		1: 	0
-
-# Verify T0
-W: unlocked:		0: 	0
-T: prioeq:		0: 	80
-
-# Unlock
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
-# T1,T0 lock L0
-C: locknowait:		1: 	0
-C: locknowait:		0:	0
-W: locked:		1: 	0
-W: blocked:		0: 	0
-T: prioeq:		1: 	80
-
-# T1 unlock L0
-C: unlock:		1: 	0
-W: locked:		0: 	0
-
-# Verify T1
-W: unlocked:		1: 	0
-T: prioeq:		1: 	80
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
-
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
deleted file mode 100644
index b4cc959..0000000
--- a/scripts/rt-tester/t2-l1-pi.tst
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock with priority inversion
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-W: locked:		1: 	0
-
-# Verify T1
-W: unlocked:		0: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
deleted file mode 100644
index 1b57376..0000000
--- a/scripts/rt-tester/t2-l1-signal.tst
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock with priority inversion
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-
-# Interrupt T1
-C: signal:		1:	0
-W: unlocked:		1: 	0
-T: opcodeeq:		1:	-4
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
deleted file mode 100644
index 68b1062..0000000
--- a/scripts/rt-tester/t2-l2-2rt-deadlock.tst
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 2 lock
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1:	1
-W: locked:		1: 	1
-
-# T0 lock L1
-C: lockintnowait:	0: 	1
-W: blocked:		0: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-
-# Make deadlock go away
-C: signal:		1:	0
-W: unlocked:		1:	0
-C: signal:		0:	0
-W: unlocked:		0:	1
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
-C: unlock:		1: 	1
-W: unlocked:		1: 	1
-
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
deleted file mode 100644
index 8e6c8b1..0000000
--- a/scripts/rt-tester/t3-l1-pi-1rt.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
deleted file mode 100644
index 69c2212..0000000
--- a/scripts/rt-tester/t3-l1-pi-2rt.tst
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-T: prioeq:		1:	81
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
deleted file mode 100644
index 9b0f1eb..0000000
--- a/scripts/rt-tester/t3-l1-pi-3rt.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: prioeq:		0:	80
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: locked:		1: 	0
-W: unlocked:		2: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
deleted file mode 100644
index 39ec74a..0000000
--- a/scripts/rt-tester/t3-l1-pi-signal.tst
+++ /dev/null
@@ -1,93 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-# Reset event counter
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set priorities
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-C: schedfifo:		2: 	81
-
-# T0 lock L0
-C: lock:		0:	0
-W: locked:		0: 	0
-
-# T1 lock L0, no wait in the wakeup path
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0:	80
-T: prioeq:		1:	80
-
-# T2 lock L0 interruptible, no wait in the wakeup path
-C: lockintnowait:	2:	0
-W: blocked:		2: 	0
-T: prioeq:		0:	81
-T: prioeq:		1:	80
-
-# Interrupt T2
-C: signal:		2:	2
-W: unlocked:		2:	0
-T: prioeq:		1:	80
-T: prioeq:		0:	80
-
-T: locked:		0:	0
-T: blocked:		1:	0
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T1 has locked L0 and exit
-W: locked:		1:	0
-W: unlocked:		0: 	0
-T: priolt:		0:	1
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
-
-
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
deleted file mode 100644
index e03db7e..0000000
--- a/scripts/rt-tester/t3-l1-pi-steal.tst
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI steal pending ownership
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-C: schedfifo:		2: 	81
-
-# T0 lock L0
-C: lock:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: lock:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T1 is in the wakeup loop
-W: blockedwake:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: lock:		2: 	0
-# T1 leave wakeup loop
-C: lockcont:		1: 	0
-
-# T2 must have the lock and T1 must be blocked
-W: locked:		2: 	0
-W: blocked:		1: 	0
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-# Wait until T1 is in the wakeup loop and let it run
-W: blockedwake:		1: 	0
-C: lockcont:		1: 	0
-W: locked:		1: 	0
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
deleted file mode 100644
index 7b59100..0000000
--- a/scripts/rt-tester/t3-l2-pi.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 2 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
deleted file mode 100644
index 2f0e049..0000000
--- a/scripts/rt-tester/t4-l2-pi-deboost.tst
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 4 threads 2 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T3 lock L0
-C: lockintnowait:	3: 	0
-W: blocked:		3: 	0
-T: prioeq:		0: 	83
-
-# T0 lock L1
-C: lock:		0: 	1
-W: blocked:		0: 	1
-T: prioeq:		1: 	83
-
-# T1 unlock L1
-C: unlock:		1:	1
-
-# Wait until T0 is in the wakeup code
-W: blockedwake:		0:	1
-
-# Verify that T1 is unboosted
-W: unlocked:		1: 	1
-T: priolt:		1: 	1
-
-# T2 lock L1 (T0 is boosted and pending owner !)
-C: locknowait:		2:	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	83
-
-# Interrupt T3 and wait until T3 returned
-C: signal:		3:	0
-W: unlocked:		3:	0
-
-# Verify prio of T0 (still pending owner,
-# but T2 is enqueued due to the previous boost by T3
-T: prioeq:		0:	82
-
-# Let T0 continue
-C: lockcont:		0:	1
-W: locked:		0:	1
-
-# Unlock L1 and let T2 get L1
-C: unlock:		0:	1
-W: locked:		2:	1
-
-# Verify that T0 is unboosted
-W: unlocked:		0:	1
-T: priolt:		0:	1
-
-# Unlock everything and exit
-C: unlock:		2:	1
-W: unlocked:		2:	1
-
-C: unlock:		0:	0
-W: unlocked:		0:	0
-
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
deleted file mode 100644
index 04f4034..0000000
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
+++ /dev/null
@@ -1,178 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 5 threads 4 lock PI - modify priority of blocked threads
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-C: schedfifo:		4: 	84
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L2
-C: locknowait:		2: 	2
-W: locked:		2: 	2
-
-# T2 lock L1
-C: lockintnowait:	2: 	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-
-# T3 lock L3
-C: locknowait:		3: 	3
-W: locked:		3: 	3
-
-# T3 lock L2
-C: lockintnowait:	3: 	2
-W: blocked:		3: 	2
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-
-# T4 lock L3
-C: lockintnowait:	4:	3
-W: blocked:		4: 	3
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-
-# Reduce prio of T4
-C: schedfifo:		4: 	80
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-T: prioeq:		4:	80
-
-# Increase prio of T4
-C: schedfifo:		4: 	84
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Reduce prio of T3
-C: schedfifo:		3: 	80
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Increase prio of T3
-C: schedfifo:		3: 	85
-T: prioeq:		0: 	85
-T: prioeq:		1:	85
-T: prioeq:		2:	85
-T: prioeq:		3:	85
-T: prioeq:		4:	84
-
-# Reduce prio of T3
-C: schedfifo:		3: 	83
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Signal T4
-C: signal:		4: 	0
-W: unlocked:		4: 	3
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-
-# Signal T3
-C: signal:		3: 	0
-W: unlocked:		3: 	2
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-T: prioeq:		2:	82
-
-# Signal T2
-C: signal:		2: 	0
-W: unlocked:		2: 	1
-T: prioeq:		0: 	81
-T: prioeq:		1:	81
-
-# Signal T1
-C: signal:		1: 	0
-W: unlocked:		1: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		3:	3
-C: unlock:		2:	2
-C: unlock:		1:	1
-C: unlock:		0:	0
-
-W: unlocked:		3:	3
-W: unlocked:		2:	2
-W: unlocked:		1:	1
-W: unlocked:		0:	0
-
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
deleted file mode 100644
index a48a6ee..0000000
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
+++ /dev/null
@@ -1,138 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 5 threads 4 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-C: schedfifo:		4: 	84
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L2
-C: locknowait:		2: 	2
-W: locked:		2: 	2
-
-# T2 lock L1
-C: lockintnowait:	2: 	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-
-# T3 lock L3
-C: locknowait:		3: 	3
-W: locked:		3: 	3
-
-# T3 lock L2
-C: lockintnowait:	3: 	2
-W: blocked:		3: 	2
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-
-# T4 lock L3
-C: lockintnowait:	4:	3
-W: blocked:		4: 	3
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-
-# Signal T4
-C: signal:		4: 	0
-W: unlocked:		4: 	3
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-
-# Signal T3
-C: signal:		3: 	0
-W: unlocked:		3: 	2
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-T: prioeq:		2:	82
-
-# Signal T2
-C: signal:		2: 	0
-W: unlocked:		2: 	1
-T: prioeq:		0: 	81
-T: prioeq:		1:	81
-
-# Signal T1
-C: signal:		1: 	0
-W: unlocked:		1: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		3:	3
-C: unlock:		2:	2
-C: unlock:		1:	1
-C: unlock:		0:	0
-
-W: unlocked:		3:	3
-W: unlocked:		2:	2
-W: unlocked:		1:	1
-W: unlocked:		0:	0
-
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index bb8e4d0..946caf3 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -32,6 +32,7 @@
 accout||account
 accquire||acquire
 accquired||acquired
+accross||across
 acessable||accessible
 acess||access
 achitecture||architecture
@@ -100,8 +101,10 @@
 appropriatly||appropriately
 approriate||appropriate
 approriately||appropriately
+apropriate||appropriate
 aquainted||acquainted
 aquired||acquired
+aquisition||acquisition
 arbitary||arbitrary
 architechture||architecture
 arguement||argument
@@ -111,6 +114,8 @@
 arraival||arrival
 artifical||artificial
 artillary||artillery
+asign||assign
+assertation||assertion
 assiged||assigned
 assigment||assignment
 assigments||assignments
@@ -136,6 +141,7 @@
 automatized||automated
 automatizes||automates
 autonymous||autonomous
+auxillary||auxiliary
 auxilliary||auxiliary
 avaiable||available
 avaible||available
@@ -187,6 +193,7 @@
 carefuly||carefully
 cariage||carriage
 catagory||category
+cehck||check
 challange||challenge
 challanges||challenges
 chanell||channel
@@ -199,6 +206,8 @@
 charater||character
 charaters||characters
 charcter||character
+chcek||check
+chck||check
 checksuming||checksumming
 childern||children
 childs||children
@@ -231,6 +240,8 @@
 compatable||compatible
 compatibiliy||compatibility
 compatibilty||compatibility
+compatiblity||compatibility
+competion||completion
 compilant||compliant
 compleatly||completely
 completly||completely
@@ -291,6 +302,7 @@
 definate||definite
 definately||definitely
 defintion||definition
+defintions||definitions
 defualt||default
 defult||default
 deivce||device
@@ -306,6 +318,7 @@
 depreacte||deprecate
 desactivate||deactivate
 desciptors||descriptors
+descripton||description
 descrition||description
 descritptor||descriptor
 desctiptor||descriptor
@@ -327,6 +340,7 @@
 deviece||device
 diable||disable
 dictionnary||dictionary
+didnt||didn't
 diferent||different
 differrence||difference
 difinition||definition
@@ -344,6 +358,7 @@
 documantation||documentation
 documentaion||documentation
 documment||document
+doesnt||doesn't
 dorp||drop
 dosen||doesn
 downlad||download
@@ -450,11 +465,13 @@
 grahpical||graphical
 grapic||graphic
 guage||gauge
+guarenteed||guaranteed
 guarentee||guarantee
 halfs||halves
 hander||handler
 handfull||handful
 hanled||handled
+happend||happened
 harware||hardware
 heirarchically||hierarchically
 helpfull||helpful
@@ -512,6 +529,7 @@
 initilization||initialization
 initilize||initialize
 inofficial||unofficial
+insititute||institute
 instal||install
 inteface||interface
 integreated||integrated
@@ -546,6 +564,7 @@
 invokation||invocation
 invokations||invocations
 irrelevent||irrelevant
+isnt||isn't
 isssue||issue
 itslef||itself
 jave||java
@@ -558,6 +577,7 @@
 langauge||language
 langugage||language
 lauch||launch
+layed||laid
 leightweight||lightweight
 lengh||length
 lenght||length
@@ -714,6 +734,7 @@
 preceed||precede
 precendence||precedence
 precission||precision
+preemptable||preemptible
 prefered||preferred
 prefferably||preferably
 premption||preemption
@@ -744,6 +765,7 @@
 programm||program
 programms||programs
 progresss||progress
+promiscous||promiscuous
 promps||prompts
 pronnounced||pronounced
 prononciation||pronunciation
@@ -817,6 +839,7 @@
 resizeable||resizable
 resouces||resources
 resoures||resources
+responce||response
 ressizes||resizes
 ressource||resource
 ressources||resources
@@ -869,6 +892,7 @@
 settting||setting
 shotdown||shutdown
 shoud||should
+shouldnt||shouldn't
 shoule||should
 shrinked||shrunk
 siginificantly||significantly
@@ -913,9 +937,11 @@
 struc||struct
 structres||structures
 stuct||struct
+stucture||structure
 sturcture||structure
 subdirectoires||subdirectories
 suble||subtle
+substract||subtract
 succesfully||successfully
 succesful||successful
 successfull||successful
@@ -987,6 +1013,7 @@
 unexpeted||unexpected
 unfortunatelly||unfortunately
 unifiy||unify
+unintialized||uninitialized
 unknonw||unknown
 unknow||unknown
 unkown||unknown
@@ -1027,7 +1054,9 @@
 visiters||visitors
 vitual||virtual
 wating||waiting
+wether||whether
 whataver||whatever
+whcih||which
 whenver||whenever
 wheter||whether
 whe||when
diff --git a/security/commoncap.c b/security/commoncap.c
index d103f5a4..1832cf7 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -267,6 +267,16 @@
 	new->cap_effective   = *effective;
 	new->cap_inheritable = *inheritable;
 	new->cap_permitted   = *permitted;
+
+	/*
+	 * Mask off ambient bits that are no longer both permitted and
+	 * inheritable.
+	 */
+	new->cap_ambient = cap_intersect(new->cap_ambient,
+					 cap_intersect(*permitted,
+						       *inheritable));
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EINVAL;
 	return 0;
 }
 
@@ -347,6 +357,7 @@
 
 		/*
 		 * pP' = (X & fP) | (pI & fI)
+		 * The addition of pA' is handled later.
 		 */
 		new->cap_permitted.cap[i] =
 			(new->cap_bset.cap[i] & permitted) |
@@ -474,10 +485,13 @@
 {
 	const struct cred *old = current_cred();
 	struct cred *new = bprm->cred;
-	bool effective, has_cap = false;
+	bool effective, has_cap = false, is_setid;
 	int ret;
 	kuid_t root_uid;
 
+	if (WARN_ON(!cap_ambient_invariant_ok(old)))
+		return -EPERM;
+
 	effective = false;
 	ret = get_file_caps(bprm, &effective, &has_cap);
 	if (ret < 0)
@@ -522,8 +536,9 @@
 	 *
 	 * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
 	 */
-	if ((!uid_eq(new->euid, old->uid) ||
-	     !gid_eq(new->egid, old->gid) ||
+	is_setid = !uid_eq(new->euid, old->uid) || !gid_eq(new->egid, old->gid);
+
+	if ((is_setid ||
 	     !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
 	    bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
 		/* downgrade; they get no more than they had, and maybe less */
@@ -539,10 +554,28 @@
 	new->suid = new->fsuid = new->euid;
 	new->sgid = new->fsgid = new->egid;
 
+	/* File caps or setid cancels ambient. */
+	if (has_cap || is_setid)
+		cap_clear(new->cap_ambient);
+
+	/*
+	 * Now that we've computed pA', update pP' to give:
+	 *   pP' = (X & fP) | (pI & fI) | pA'
+	 */
+	new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
+
+	/*
+	 * Set pE' = (fE ? pP' : pA').  Because pA' is zero if fE is set,
+	 * this is the same as pE' = (fE ? pP' : 0) | pA'.
+	 */
 	if (effective)
 		new->cap_effective = new->cap_permitted;
 	else
-		cap_clear(new->cap_effective);
+		new->cap_effective = new->cap_ambient;
+
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EPERM;
+
 	bprm->cap_effective = effective;
 
 	/*
@@ -557,7 +590,7 @@
 	 * Number 1 above might fail if you don't have a full bset, but I think
 	 * that is interesting information to audit.
 	 */
-	if (!cap_isclear(new->cap_effective)) {
+	if (!cap_issubset(new->cap_effective, new->cap_ambient)) {
 		if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
 		    !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
 		    issecure(SECURE_NOROOT)) {
@@ -568,6 +601,10 @@
 	}
 
 	new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
+
+	if (WARN_ON(!cap_ambient_invariant_ok(new)))
+		return -EPERM;
+
 	return 0;
 }
 
@@ -589,7 +626,7 @@
 	if (!uid_eq(cred->uid, root_uid)) {
 		if (bprm->cap_effective)
 			return 1;
-		if (!cap_isclear(cred->cap_permitted))
+		if (!cap_issubset(cred->cap_permitted, cred->cap_ambient))
 			return 1;
 	}
 
@@ -691,10 +728,18 @@
 	     uid_eq(old->suid, root_uid)) &&
 	    (!uid_eq(new->uid, root_uid) &&
 	     !uid_eq(new->euid, root_uid) &&
-	     !uid_eq(new->suid, root_uid)) &&
-	    !issecure(SECURE_KEEP_CAPS)) {
-		cap_clear(new->cap_permitted);
-		cap_clear(new->cap_effective);
+	     !uid_eq(new->suid, root_uid))) {
+		if (!issecure(SECURE_KEEP_CAPS)) {
+			cap_clear(new->cap_permitted);
+			cap_clear(new->cap_effective);
+		}
+
+		/*
+		 * Pre-ambient programs expect setresuid to nonroot followed
+		 * by exec to drop capabilities.  We should make sure that
+		 * this remains the case.
+		 */
+		cap_clear(new->cap_ambient);
 	}
 	if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
 		cap_clear(new->cap_effective);
@@ -924,6 +969,44 @@
 			new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
 		return commit_creds(new);
 
+	case PR_CAP_AMBIENT:
+		if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
+			if (arg3 | arg4 | arg5)
+				return -EINVAL;
+
+			new = prepare_creds();
+			if (!new)
+				return -ENOMEM;
+			cap_clear(new->cap_ambient);
+			return commit_creds(new);
+		}
+
+		if (((!cap_valid(arg3)) | arg4 | arg5))
+			return -EINVAL;
+
+		if (arg2 == PR_CAP_AMBIENT_IS_SET) {
+			return !!cap_raised(current_cred()->cap_ambient, arg3);
+		} else if (arg2 != PR_CAP_AMBIENT_RAISE &&
+			   arg2 != PR_CAP_AMBIENT_LOWER) {
+			return -EINVAL;
+		} else {
+			if (arg2 == PR_CAP_AMBIENT_RAISE &&
+			    (!cap_raised(current_cred()->cap_permitted, arg3) ||
+			     !cap_raised(current_cred()->cap_inheritable,
+					 arg3) ||
+			     issecure(SECURE_NO_CAP_AMBIENT_RAISE)))
+				return -EPERM;
+
+			new = prepare_creds();
+			if (!new)
+				return -ENOMEM;
+			if (arg2 == PR_CAP_AMBIENT_RAISE)
+				cap_raise(new->cap_ambient, arg3);
+			else
+				cap_lower(new->cap_ambient, arg3);
+			return commit_creds(new);
+		}
+
 	default:
 		/* No functionality available - continue with default */
 		return -ENOSYS;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index bd536cb..43b4cdd 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -848,6 +848,7 @@
 	new->cap_inheritable	= old->cap_inheritable;
 	new->cap_permitted	= old->cap_permitted;
 	new->cap_effective	= old->cap_effective;
+	new->cap_ambient	= old->cap_ambient;
 	new->cap_bset		= old->cap_bset;
 
 	new->jit_keyring	= old->jit_keyring;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 564079c..cdf4c58 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1100,7 +1100,7 @@
 		seq_puts(m, prefix);
 		if (has_comma)
 			seq_putc(m, '\"');
-		seq_puts(m, opts->mnt_opts[i]);
+		seq_escape(m, opts->mnt_opts[i], "\"\n\\");
 		if (has_comma)
 			seq_putc(m, '\"');
 	}
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index 2b50cbe..52e4bc5 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -18,6 +18,68 @@
 #include <sound/ac97_codec.h>
 
 /*
+ * snd_ac97_check_id() - Reads and checks the vendor ID of the device
+ * @ac97: The AC97 device to check
+ * @id: The ID to compare to
+ * @id_mask: Mask that is applied to the device ID before comparing to @id
+ *
+ * If @id is 0 this function returns true if the read device vendor ID is
+ * a valid ID. If @id is non 0 this functions returns true if @id
+ * matches the read vendor ID. Otherwise the function returns false.
+ */
+static bool snd_ac97_check_id(struct snd_ac97 *ac97, unsigned int id,
+	unsigned int id_mask)
+{
+	ac97->id = ac97->bus->ops->read(ac97, AC97_VENDOR_ID1) << 16;
+	ac97->id |= ac97->bus->ops->read(ac97, AC97_VENDOR_ID2);
+
+	if (ac97->id == 0x0 || ac97->id == 0xffffffff)
+		return false;
+
+	if (id != 0 && id != (ac97->id & id_mask))
+		return false;
+
+	return true;
+}
+
+/**
+ * snd_ac97_reset() - Reset AC'97 device
+ * @ac97: The AC'97 device to reset
+ * @try_warm: Try a warm reset first
+ * @id: Expected device vendor ID
+ * @id_mask: Mask that is applied to the device ID before comparing to @id
+ *
+ * This function resets the AC'97 device. If @try_warm is true the function
+ * first performs a warm reset. If the warm reset is successful the function
+ * returns 1. Otherwise or if @try_warm is false the function issues cold reset
+ * followed by a warm reset. If this is successful the function returns 0,
+ * otherwise a negative error code. If @id is 0 any valid device ID will be
+ * accepted, otherwise only the ID that matches @id and @id_mask is accepted.
+ */
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+	unsigned int id_mask)
+{
+	struct snd_ac97_bus_ops *ops = ac97->bus->ops;
+
+	if (try_warm && ops->warm_reset) {
+		ops->warm_reset(ac97);
+		if (snd_ac97_check_id(ac97, id, id_mask))
+			return 1;
+	}
+
+	if (ops->reset)
+		ops->reset(ac97);
+	if (ops->warm_reset)
+		ops->warm_reset(ac97);
+
+	if (snd_ac97_check_id(ac97, id, id_mask))
+		return 0;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(snd_ac97_reset);
+
+/*
  * Let drivers decide whether they want to support given codec from their
  * probe method. Drivers have direct access to the struct snd_ac97
  * structure and may  decide based on the id field amongst other things.
@@ -27,35 +89,9 @@
 	return 1;
 }
 
-#ifdef CONFIG_PM
-static int ac97_bus_suspend(struct device *dev, pm_message_t state)
-{
-	int ret = 0;
-
-	if (dev->driver && dev->driver->suspend)
-		ret = dev->driver->suspend(dev, state);
-
-	return ret;
-}
-
-static int ac97_bus_resume(struct device *dev)
-{
-	int ret = 0;
-
-	if (dev->driver && dev->driver->resume)
-		ret = dev->driver->resume(dev);
-
-	return ret;
-}
-#endif /* CONFIG_PM */
-
 struct bus_type ac97_bus_type = {
 	.name		= "ac97",
 	.match		= ac97_bus_match,
-#ifdef CONFIG_PM
-	.suspend	= ac97_bus_suspend,
-	.resume		= ac97_bus_resume,
-#endif /* CONFIG_PM */
 };
 
 static int __init ac97_bus_init(void)
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 23c371ec..a04edff 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -1050,7 +1050,6 @@
 static struct i2c_driver onyx_driver = {
 	.driver = {
 		.name = "aoa_codec_onyx",
-		.owner = THIS_MODULE,
 	},
 	.probe = onyx_i2c_probe,
 	.remove = onyx_i2c_remove,
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 364c7c4..78ed1ff 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -939,7 +939,6 @@
 static struct i2c_driver tas_driver = {
 	.driver = {
 		.name = "aoa_codec_tas",
-		.owner = THIS_MODULE,
 	},
 	.probe = tas_i2c_probe,
 	.remove = tas_i2c_remove,
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 9dc5806..8f71f7e 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -1120,10 +1120,10 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int aoa_fabric_layout_suspend(struct device *dev)
 {
-	struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
+	struct layout_dev *ldev = dev_get_drvdata(dev);
 
 	if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
 		ldev->gpio.methods->all_amps_off(&ldev->gpio);
@@ -1131,15 +1131,19 @@
 	return 0;
 }
 
-static int aoa_fabric_layout_resume(struct soundbus_dev *sdev)
+static int aoa_fabric_layout_resume(struct device *dev)
 {
-	struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
+	struct layout_dev *ldev = dev_get_drvdata(dev);
 
 	if (ldev->gpio.methods && ldev->gpio.methods->all_amps_restore)
 		ldev->gpio.methods->all_amps_restore(&ldev->gpio);
 
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(aoa_fabric_layout_pm_ops,
+	aoa_fabric_layout_suspend, aoa_fabric_layout_resume);
+
 #endif
 
 static struct soundbus_driver aoa_soundbus_driver = {
@@ -1147,12 +1151,11 @@
 	.owner = THIS_MODULE,
 	.probe = aoa_fabric_layout_probe,
 	.remove = aoa_fabric_layout_remove,
-#ifdef CONFIG_PM
-	.suspend = aoa_fabric_layout_suspend,
-	.resume = aoa_fabric_layout_resume,
-#endif
 	.driver = {
 		.owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &aoa_fabric_layout_pm_ops,
+#endif
 	}
 };
 
diff --git a/sound/aoa/soundbus/core.c b/sound/aoa/soundbus/core.c
index 3edf736..70bcaa7 100644
--- a/sound/aoa/soundbus/core.c
+++ b/sound/aoa/soundbus/core.c
@@ -126,30 +126,6 @@
 		drv->shutdown(soundbus_dev);
 }
 
-#ifdef CONFIG_PM
-
-static int soundbus_device_suspend(struct device *dev, pm_message_t state)
-{
-	struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
-	struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
-
-	if (dev->driver && drv->suspend)
-		return drv->suspend(soundbus_dev, state);
-	return 0;
-}
-
-static int soundbus_device_resume(struct device * dev)
-{
-	struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
-	struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
-
-	if (dev->driver && drv->resume)
-		return drv->resume(soundbus_dev);
-	return 0;
-}
-
-#endif /* CONFIG_PM */
-
 /* soundbus_dev_attrs is declared in sysfs.c */
 ATTRIBUTE_GROUPS(soundbus_dev);
 static struct bus_type soundbus_bus_type = {
@@ -158,10 +134,6 @@
 	.uevent		= soundbus_uevent,
 	.remove		= soundbus_device_remove,
 	.shutdown	= soundbus_device_shutdown,
-#ifdef CONFIG_PM
-	.suspend	= soundbus_device_suspend,
-	.resume		= soundbus_device_resume,
-#endif
 	.dev_groups	= soundbus_dev_groups,
 };
 
diff --git a/sound/aoa/soundbus/soundbus.h b/sound/aoa/soundbus/soundbus.h
index 21e756c..ae40224 100644
--- a/sound/aoa/soundbus/soundbus.h
+++ b/sound/aoa/soundbus/soundbus.h
@@ -188,8 +188,6 @@
 	int	(*probe)(struct soundbus_dev* dev);
 	int	(*remove)(struct soundbus_dev* dev);
 
-	int	(*suspend)(struct soundbus_dev* dev, pm_message_t state);
-	int	(*resume)(struct soundbus_dev* dev);
 	int	(*shutdown)(struct soundbus_dev* dev);
 
 	struct device_driver driver;
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index 7a2c1f5..c0f018a 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -211,26 +211,38 @@
 		      struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_bebob *bebob = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&bebob->substreams_counter);
 	amdtp_stream_set_pcm_format(&bebob->tx_stream,
 				    params_format(hw_params));
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+
+	return 0;
 }
 static int
 pcm_playback_hw_params(struct snd_pcm_substream *substream,
 		       struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_bebob *bebob = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&bebob->substreams_counter);
 	amdtp_stream_set_pcm_format(&bebob->rx_stream,
 				    params_format(hw_params));
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+
+	return 0;
 }
 
 static int
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index f7771451..4e67b1d 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -230,6 +230,12 @@
 			     struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_dice *dice = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&dice->mutex);
@@ -240,13 +246,18 @@
 	amdtp_stream_set_pcm_format(&dice->tx_stream,
 				    params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int playback_hw_params(struct snd_pcm_substream *substream,
 			      struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_dice *dice = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&dice->mutex);
@@ -257,8 +268,7 @@
 	amdtp_stream_set_pcm_format(&dice->rx_stream,
 				    params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index 8a34753..c30b2ff 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -244,25 +244,35 @@
 				 struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_efw *efw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&efw->capture_substreams);
 	amdtp_stream_set_pcm_format(&efw->tx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_efw *efw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&efw->playback_substreams);
 	amdtp_stream_set_pcm_format(&efw->rx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 67ade07..9c73930 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -231,7 +231,12 @@
 				 struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_oxfw *oxfw = substream->private_data;
+	int err;
 
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&oxfw->mutex);
@@ -241,13 +246,18 @@
 
 	amdtp_stream_set_pcm_format(&oxfw->tx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_oxfw *oxfw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&oxfw->mutex);
@@ -257,8 +267,7 @@
 
 	amdtp_stream_set_pcm_format(&oxfw->rx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 873d40f..77ad5b9 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -512,12 +512,11 @@
 	if (err < 0)
 		goto end;
 
-	formats[eid] = kmalloc(*len, GFP_KERNEL);
+	formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
 	if (formats[eid] == NULL) {
 		err = -ENOMEM;
 		goto end;
 	}
-	memcpy(formats[eid], buf, *len);
 
 	/* apply the format for each available sampling rate */
 	for (i = 0; i < ARRAY_SIZE(oxfw_rate_table); i++) {
@@ -531,12 +530,11 @@
 			continue;
 
 		eid++;
-		formats[eid] = kmalloc(*len, GFP_KERNEL);
+		formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
 		if (formats[eid] == NULL) {
 			err = -ENOMEM;
 			goto end;
 		}
-		memcpy(formats[eid], buf, *len);
 		formats[eid][2] = avc_stream_rate_table[i];
 	}
 
@@ -594,12 +592,11 @@
 		if (err < 0)
 			break;
 
-		formats[eid] = kmalloc(len, GFP_KERNEL);
+		formats[eid] = kmemdup(buf, len, GFP_KERNEL);
 		if (formats[eid] == NULL) {
 			err = -ENOMEM;
 			break;
 		}
-		memcpy(formats[eid], buf, len);
 
 		/* get next entry */
 		len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 0aa5d9e..4449d1a9 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -125,7 +125,7 @@
 }
 
 /**
- * snd_hdac_ext_device_init - initialize the HDA extended codec base device
+ * snd_hdac_ext_bus_device_init - initialize the HDA extended codec base device
  * @ebus: hdac extended bus to attach to
  * @addr: codec address
  *
@@ -133,14 +133,16 @@
  */
 int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
 {
+	struct hdac_ext_device *edev;
 	struct hdac_device *hdev = NULL;
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
 	char name[15];
 	int ret;
 
-	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
-	if (!hdev)
+	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+	if (!edev)
 		return -ENOMEM;
+	hdev = &edev->hdac;
 
 	snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
 
@@ -158,6 +160,7 @@
 		snd_hdac_ext_bus_device_exit(hdev);
 		return ret;
 	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_init);
@@ -168,7 +171,94 @@
  */
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev)
 {
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+
 	snd_hdac_device_exit(hdev);
-	kfree(hdev);
+	kfree(edev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
+
+/**
+ * snd_hdac_ext_bus_device_remove - remove HD-audio extended codec base devices
+ *
+ * @ebus: HD-audio extended bus
+ */
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus)
+{
+	struct hdac_device *codec, *__codec;
+	/*
+	 * we need to remove all the codec devices objects created in the
+	 * snd_hdac_ext_bus_device_init
+	 */
+	list_for_each_entry_safe(codec, __codec, &ebus->bus.codec_list, list) {
+		snd_hdac_device_unregister(codec);
+		put_device(&codec->dev);
+	}
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_remove);
+#define dev_to_hdac(dev) (container_of((dev), \
+			struct hdac_device, dev))
+
+static inline struct hdac_ext_driver *get_edrv(struct device *dev)
+{
+	struct hdac_driver *hdrv = drv_to_hdac_driver(dev->driver);
+	struct hdac_ext_driver *edrv = to_ehdac_driver(hdrv);
+
+	return edrv;
+}
+
+static inline struct hdac_ext_device *get_edev(struct device *dev)
+{
+	struct hdac_device *hdev = dev_to_hdac_dev(dev);
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+
+	return edev;
+}
+
+static int hda_ext_drv_probe(struct device *dev)
+{
+	return (get_edrv(dev))->probe(get_edev(dev));
+}
+
+static int hdac_ext_drv_remove(struct device *dev)
+{
+	return (get_edrv(dev))->remove(get_edev(dev));
+}
+
+static void hdac_ext_drv_shutdown(struct device *dev)
+{
+	return (get_edrv(dev))->shutdown(get_edev(dev));
+}
+
+/**
+ * snd_hda_ext_driver_register - register a driver for ext hda devices
+ *
+ * @drv: ext hda driver structure
+ */
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv)
+{
+	drv->hdac.type = HDA_DEV_ASOC;
+	drv->hdac.driver.bus = &snd_hda_bus_type;
+	/* we use default match */
+
+	if (drv->probe)
+		drv->hdac.driver.probe = hda_ext_drv_probe;
+	if (drv->remove)
+		drv->hdac.driver.remove = hdac_ext_drv_remove;
+	if (drv->shutdown)
+		drv->hdac.driver.shutdown = hdac_ext_drv_shutdown;
+
+	return driver_register(&drv->hdac.driver);
+}
+EXPORT_SYMBOL_GPL(snd_hda_ext_driver_register);
+
+/**
+ * snd_hda_ext_driver_unregister - unregister a driver for ext hda devices
+ *
+ * @drv: ext hda driver structure
+ */
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv)
+{
+	driver_unregister(&drv->hdac.driver);
+}
+EXPORT_SYMBOL_GPL(snd_hda_ext_driver_unregister);
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 358f161..63215b1 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -177,8 +177,8 @@
 		hlink->bus = bus;
 		hlink->ml_addr = ebus->mlcap + AZX_ML_BASE +
 					(AZX_ML_INTERVAL * idx);
-		hlink->lcaps  = snd_hdac_chip_readl(bus, ML_LCAP);
-		hlink->lsdiid = snd_hdac_chip_readw(bus, ML_LSDIID);
+		hlink->lcaps  = readl(hlink->ml_addr + AZX_REG_ML_LCAP);
+		hlink->lsdiid = readw(hlink->ml_addr + AZX_REG_ML_LSDIID);
 
 		list_add_tail(&hlink->list, &ebus->hlink_list);
 	}
@@ -243,7 +243,7 @@
 	timeout = 50;
 
 	do {
-		val = snd_hdac_chip_readl(link->bus, ML_LCTL);
+		val = readl(link->ml_addr + AZX_REG_ML_LCTL);
 		if (enable) {
 			if (((val & mask) >> AZX_MLCTL_CPA))
 				return 0;
@@ -263,7 +263,7 @@
  */
 int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link)
 {
-	snd_hdac_chip_updatel(link->bus, ML_LCTL, 0, AZX_MLCTL_SPA);
+	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
 
 	return check_hdac_link_power_active(link, true);
 }
@@ -275,8 +275,28 @@
  */
 int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link)
 {
-	snd_hdac_chip_updatel(link->bus, ML_LCTL, AZX_MLCTL_SPA, 0);
+	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
 
 	return check_hdac_link_power_active(link, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down);
+
+/**
+ * snd_hdac_ext_bus_link_power_down_all -power down all hda link
+ * @ebus: HD-audio extended bus
+ */
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus)
+{
+	struct hdac_ext_link *hlink = NULL;
+	int ret;
+
+	list_for_each_entry(hlink, &ebus->hlink_list, list) {
+		snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
+		ret = check_hdac_link_power_active(hlink, false);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 3de47dd..33ba77d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -49,6 +49,16 @@
 				AZX_PPLC_INTERVAL * idx;
 	}
 
+	if (ebus->spbcap) {
+		stream->spib_addr = ebus->spbcap + AZX_SPB_BASE +
+					AZX_SPB_INTERVAL * idx +
+					AZX_SPB_SPIB;
+
+		stream->fifo_addr = ebus->spbcap + AZX_SPB_BASE +
+					AZX_SPB_INTERVAL * idx +
+					AZX_SPB_MAXFIFO;
+	}
+
 	stream->decoupled = false;
 	snd_hdac_stream_init(bus, &stream->hstream, idx, direction, tag);
 }
@@ -281,17 +291,12 @@
 	struct hdac_ext_stream *res = NULL;
 	struct hdac_stream *stream = NULL;
 	struct hdac_bus *hbus = &ebus->bus;
-	int key;
 
 	if (!ebus->ppcap) {
 		dev_err(hbus->dev, "stream type not supported\n");
 		return NULL;
 	}
 
-	/* make a non-zero unique key for the substream */
-	key = (substream->pcm->device << 16) | (substream->number << 2) |
-			(substream->stream + 1);
-
 	list_for_each_entry(stream, &hbus->stream_list, list) {
 		struct hdac_ext_stream *hstream = container_of(stream,
 						struct hdac_ext_stream,
@@ -310,7 +315,6 @@
 		spin_lock_irq(&hbus->reg_lock);
 		res->hstream.opened = 1;
 		res->hstream.running = 0;
-		res->hstream.assigned_key = key;
 		res->hstream.substream = substream;
 		spin_unlock_irq(&hbus->reg_lock);
 	}
@@ -423,7 +427,7 @@
 
 	mask |= (1 << index);
 
-	register_mask = snd_hdac_chip_readl(bus, SPB_SPBFCCTL);
+	register_mask = readl(ebus->spbcap + AZX_REG_SPB_SPBFCCTL);
 
 	mask |= register_mask;
 
@@ -435,6 +439,50 @@
 EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_spbcap_enable);
 
 /**
+ * snd_hdac_ext_stream_set_spib - sets the spib value of a stream
+ * @ebus: HD-audio ext core bus
+ * @stream: hdac_ext_stream
+ * @value: spib value to set
+ */
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream, u32 value)
+{
+	struct hdac_bus *bus = &ebus->bus;
+
+	if (!ebus->spbcap) {
+		dev_err(bus->dev, "Address of SPB capability is NULL");
+		return -EINVAL;
+	}
+
+	writel(value, stream->spib_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_set_spib);
+
+/**
+ * snd_hdac_ext_stream_get_spbmaxfifo - gets the spib value of a stream
+ * @ebus: HD-audio ext core bus
+ * @stream: hdac_ext_stream
+ *
+ * Return maxfifo for the stream
+ */
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream)
+{
+	struct hdac_bus *bus = &ebus->bus;
+
+	if (!ebus->spbcap) {
+		dev_err(bus->dev, "Address of SPB capability is NULL");
+		return -EINVAL;
+	}
+
+	return readl(stream->fifo_addr);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo);
+
+
+/**
  * snd_hdac_ext_stop_streams - stop all stream if running
  * @ebus: HD-audio ext core bus
  */
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index cdee710..db96042 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -372,6 +372,36 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets);
 
+/**
+ * snd_hdac_refresh_widget_sysfs - Reset the codec widgets and reinit the
+ * codec sysfs
+ * @codec: the codec object
+ *
+ * first we need to remove sysfs, then refresh widgets and lastly
+ * recreate it
+ */
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec)
+{
+	int ret;
+
+	if (device_is_registered(&codec->dev))
+		hda_widget_sysfs_exit(codec);
+	ret = snd_hdac_refresh_widgets(codec);
+	if (ret) {
+		dev_err(&codec->dev, "failed to refresh widget: %d\n", ret);
+		return ret;
+	}
+	if (device_is_registered(&codec->dev)) {
+		ret = hda_widget_sysfs_init(codec);
+		if (ret) {
+			dev_err(&codec->dev, "failed to init sysfs: %d\n", ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_refresh_widget_sysfs);
+
 /* return CONNLIST_LEN parameter of the given widget */
 static unsigned int get_num_conns(struct hdac_device *codec, hda_nid_t nid)
 {
@@ -501,23 +531,27 @@
  * This function calls the runtime PM helper to power up the given codec.
  * Unlike snd_hdac_power_up_pm(), you should call this only for the code
  * path that isn't included in PM path.  Otherwise it gets stuck.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_up(struct hdac_device *codec)
+int snd_hdac_power_up(struct hdac_device *codec)
 {
-	pm_runtime_get_sync(&codec->dev);
+	return pm_runtime_get_sync(&codec->dev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_up);
 
 /**
  * snd_hdac_power_down - power down the codec
  * @codec: the codec object
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_down(struct hdac_device *codec)
+int snd_hdac_power_down(struct hdac_device *codec)
 {
 	struct device *dev = &codec->dev;
 
 	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_autosuspend(dev);
+	return pm_runtime_put_autosuspend(dev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_down);
 
@@ -529,11 +563,14 @@
  * which may be called by PM suspend/resume again.  OTOH, if a power-up
  * call must wake up the sleeper (e.g. in a kctl callback), use
  * snd_hdac_power_up() instead.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_up_pm(struct hdac_device *codec)
+int snd_hdac_power_up_pm(struct hdac_device *codec)
 {
 	if (!atomic_inc_not_zero(&codec->in_pm))
-		snd_hdac_power_up(codec);
+		return snd_hdac_power_up(codec);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
 
@@ -543,11 +580,14 @@
  *
  * Like snd_hdac_power_up_pm(), this function is used in a recursive
  * code path like init code which may be called by PM suspend/resume again.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_down_pm(struct hdac_device *codec)
+int snd_hdac_power_down_pm(struct hdac_device *codec)
 {
 	if (atomic_dec_if_positive(&codec->in_pm) < 0)
-		snd_hdac_power_down(codec);
+		return snd_hdac_power_down(codec);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm);
 #endif
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 5676b84..55c3df4 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -134,6 +134,16 @@
 	return !strcmp(dev->driver->name, "i915");
 }
 
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
+{
+	if (WARN_ON(!hdac_acomp))
+		return -ENODEV;
+
+	hdac_acomp->audio_ops = aops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
+
 int snd_hdac_i915_init(struct hdac_bus *bus)
 {
 	struct component_match *match = NULL;
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 1eabcdf..b0ed870 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -410,8 +410,9 @@
 
 	err = reg_raw_write(codec, reg, val);
 	if (err == -EAGAIN) {
-		snd_hdac_power_up_pm(codec);
-		err = reg_raw_write(codec, reg, val);
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_write(codec, reg, val);
 		snd_hdac_power_down_pm(codec);
 	}
 	return err;
@@ -442,8 +443,9 @@
 
 	err = reg_raw_read(codec, reg, val);
 	if (err == -EAGAIN) {
-		snd_hdac_power_up_pm(codec);
-		err = reg_raw_read(codec, reg, val);
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_read(codec, reg, val);
 		snd_hdac_power_down_pm(codec);
 	}
 	return err;
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index 4c15d0a..8981159 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -286,6 +286,28 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_stream_release);
 
+/**
+ * snd_hdac_get_stream - return hdac_stream based on stream_tag and
+ * direction
+ *
+ * @bus: HD-audio core bus
+ * @dir: direction for the stream to be found
+ * @stream_tag: stream tag for stream to be found
+ */
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+					int dir, int stream_tag)
+{
+	struct hdac_stream *s;
+
+	list_for_each_entry(s, &bus->stream_list, list) {
+		if (s->direction == dir && s->stream_tag == stream_tag)
+			return s;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_get_stream);
+
 /*
  * set up a BDL entry
  */
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index 0a6ce3b..c71142d 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -321,8 +321,7 @@
 			free_widget_node(*p, &widget_node_group);
 		kfree(tree->nodes);
 	}
-	if (tree->root)
-		kobject_put(tree->root);
+	kobject_put(tree->root);
 	kfree(tree);
 	codec->widgets = NULL;
 }
@@ -391,6 +390,9 @@
 {
 	int err;
 
+	if (codec->widgets)
+		return 0; /* already created */
+
 	err = widget_tree_create(codec);
 	if (err < 0) {
 		widget_tree_free(codec);
diff --git a/sound/pci/echoaudio/darla20_dsp.c b/sound/pci/echoaudio/darla20_dsp.c
index febee5b..320837b 100644
--- a/sound/pci/echoaudio/darla20_dsp.c
+++ b/sound/pci/echoaudio/darla20_dsp.c
@@ -44,18 +44,18 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_DARLA20_DSP;
 	chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
 	chip->clock_state = GD_CLOCK_UNDEF;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/darla24_dsp.c b/sound/pci/echoaudio/darla24_dsp.c
index 7b4f6fd..8736b5e 100644
--- a/sound/pci/echoaudio/darla24_dsp.c
+++ b/sound/pci/echoaudio/darla24_dsp.c
@@ -44,17 +44,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_DARLA24_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_ESYNC;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/echo3g_dsp.c b/sound/pci/echoaudio/echo3g_dsp.c
index ae11ce1..6deb80c 100644
--- a/sound/pci/echoaudio/echo3g_dsp.c
+++ b/sound/pci/echoaudio/echo3g_dsp.c
@@ -59,8 +59,8 @@
 		cpu_to_le32((E3G_MAGIC_NUMBER / 48000) - 2);
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_ECHO3G_DSP;
 
 	/* Load the DSP code and the ASIC on the PCI card and get
@@ -78,8 +78,8 @@
 		chip->px_analog_in = chip->bx_analog_in = 14;
 		chip->px_digital_in = chip->bx_digital_in = 16;
 		chip->px_num = chip->bx_num = 24;
-		chip->has_phantom_power = TRUE;
-		chip->hasnt_input_nominal_level = TRUE;
+		chip->has_phantom_power = true;
+		chip->hasnt_input_nominal_level = true;
 	} else if (err == E3G_LAYLA3G_BOX_TYPE) {
 		chip->input_clock_types =	ECHO_CLOCK_BIT_INTERNAL |
 						ECHO_CLOCK_BIT_SPDIF |
@@ -106,10 +106,10 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->non_audio_spdif = FALSE;
-	chip->bad_board = FALSE;
-	chip->phantom_power = FALSE;
+	chip->professional_spdif = false;
+	chip->non_audio_spdif = false;
+	chip->bad_board = false;
+	chip->phantom_power = false;
 	return init_line_levels(chip);
 }
 
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 862db9a..1cb85ae 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2245,7 +2245,7 @@
 
 #ifdef ECHOCARD_HAS_MIDI
 	if (chip->midi_input_enabled)
-		enable_midi_input(chip, TRUE);
+		enable_midi_input(chip, true);
 	if (chip->midi_out)
 		snd_echo_midi_output_trigger(chip->midi_out, 1);
 #endif
diff --git a/sound/pci/echoaudio/echoaudio.h b/sound/pci/echoaudio/echoaudio.h
index 3251522..152ce15 100644
--- a/sound/pci/echoaudio/echoaudio.h
+++ b/sound/pci/echoaudio/echoaudio.h
@@ -153,9 +153,6 @@
 #define _ECHOAUDIO_H_
 
 
-#define TRUE 1
-#define FALSE 0
-
 #include "echoaudio_dsp.h"
 
 
@@ -378,8 +375,8 @@
 					 */
 	u8 output_clock;		/* Layla20 only */
 	char meters_enabled;		/* VU-meters status */
-	char asic_loaded;		/* Set TRUE when ASIC loaded */
-	char bad_board;			/* Set TRUE if DSP won't load */
+	char asic_loaded;		/* Set true when ASIC loaded */
+	char bad_board;			/* Set true if DSP won't load */
 	char professional_spdif;	/* 0 = consumer; 1 = professional */
 	char non_audio_spdif;		/* 3G - only */
 	char digital_in_automute;	/* Gina24, Layla24, Mona - only */
diff --git a/sound/pci/echoaudio/echoaudio_3g.c b/sound/pci/echoaudio/echoaudio_3g.c
index 2fa66dc..22c786b 100644
--- a/sound/pci/echoaudio/echoaudio_3g.c
+++ b/sound/pci/echoaudio/echoaudio_3g.c
@@ -41,7 +41,7 @@
 		return -EIO;
 
 	chip->comm_page->ext_box_status = cpu_to_le32(E3G_ASIC_NOT_LOADED);
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	clear_handshake(chip);
 	send_vector(chip, DSP_VC_TEST_ASIC);
 
@@ -55,7 +55,7 @@
 	if (box_status == E3G_ASIC_NOT_LOADED)
 		return -ENODEV;
 
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	return box_status & E3G_BOX_TYPE_MASK;
 }
 
@@ -243,7 +243,7 @@
 	 * 48 kHz, internal clock, S/PDIF RCA mode */
 	if (box_type >= 0) {
 		err = write_control_reg(chip, E3G_48KHZ,
-					E3G_FREQ_REG_DEFAULT, TRUE);
+					E3G_FREQ_REG_DEFAULT, true);
 		if (err < 0)
 			return err;
 	}
@@ -378,16 +378,16 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 1a9427a..15aae2f 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -103,8 +103,8 @@
 		cond_resched();
 	}
 
-	chip->bad_board = TRUE;		/* Set TRUE until DSP re-loaded */
-	dev_dbg(chip->card->dev, "write_dsp: Set bad_board to TRUE\n");
+	chip->bad_board = true;		/* Set true until DSP re-loaded */
+	dev_dbg(chip->card->dev, "write_dsp: Set bad_board to true\n");
 	return -EIO;
 }
 
@@ -126,8 +126,8 @@
 		cond_resched();
 	}
 
-	chip->bad_board = TRUE;		/* Set TRUE until DSP re-loaded */
-	dev_err(chip->card->dev, "read_dsp: Set bad_board to TRUE\n");
+	chip->bad_board = true;		/* Set true until DSP re-loaded */
+	dev_err(chip->card->dev, "read_dsp: Set bad_board to true\n");
 	return -EIO;
 }
 
@@ -166,7 +166,7 @@
 /* This card has no ASIC, just return ok */
 static inline int check_asic_status(struct echoaudio *chip)
 {
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	return 0;
 }
 
@@ -341,11 +341,11 @@
 		dev_warn(chip->card->dev, "DSP is already loaded!\n");
 		return 0;
 	}
-	chip->bad_board = TRUE;		/* Set TRUE until DSP loaded */
+	chip->bad_board = true;		/* Set true until DSP loaded */
 	chip->dsp_code = NULL;		/* Current DSP code not loaded */
-	chip->asic_loaded = FALSE;	/* Loading the DSP code will reset the ASIC */
+	chip->asic_loaded = false;	/* Loading the DSP code will reset the ASIC */
 
-	dev_dbg(chip->card->dev, "load_dsp: Set bad_board to TRUE\n");
+	dev_dbg(chip->card->dev, "load_dsp: Set bad_board to true\n");
 
 	/* If this board requires a resident loader, install it. */
 #ifdef DSP_56361
@@ -471,7 +471,7 @@
 			}
 
 			chip->dsp_code = code;		/* Show which DSP code loaded */
-			chip->bad_board = FALSE;	/* DSP OK */
+			chip->bad_board = false;	/* DSP OK */
 			return 0;
 		}
 		udelay(100);
@@ -951,10 +951,10 @@
 	/* Stops all active pipes (just to be sure) */
 	stop_transport(chip, chip->active_mask);
 
-	set_meters_on(chip, FALSE);
+	set_meters_on(chip, false);
 
 #ifdef ECHOCARD_HAS_MIDI
-	enable_midi_input(chip, FALSE);
+	enable_midi_input(chip, false);
 #endif
 
 	/* Go to sleep */
@@ -981,9 +981,9 @@
 
 	/* Init all the basic stuff */
 	chip->card_name = ECHOCARD_NAME;
-	chip->bad_board = TRUE;	/* Set TRUE until DSP loaded */
+	chip->bad_board = true;	/* Set true until DSP loaded */
 	chip->dsp_code = NULL;	/* Current DSP code not loaded */
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	memset(chip->comm_page, 0, sizeof(struct comm_page));
 
 	/* Init the comm page */
diff --git a/sound/pci/echoaudio/echoaudio_gml.c b/sound/pci/echoaudio/echoaudio_gml.c
index 23a0994..834b39e 100644
--- a/sound/pci/echoaudio/echoaudio_gml.c
+++ b/sound/pci/echoaudio/echoaudio_gml.c
@@ -48,7 +48,7 @@
 	if (read_dsp(chip, &asic_status) < 0) {
 		dev_err(chip->card->dev,
 			"check_asic_status: failed on read_dsp\n");
-		chip->asic_loaded = FALSE;
+		chip->asic_loaded = false;
 		return -EIO;
 	}
 
@@ -192,7 +192,7 @@
 		}
 	}
 
-	if ((err = write_control_reg(chip, control_reg, FALSE)))
+	if ((err = write_control_reg(chip, control_reg, false)))
 		return err;
 	chip->professional_spdif = prof;
 	dev_dbg(chip->card->dev, "set_professional_spdif to %s\n",
diff --git a/sound/pci/echoaudio/gina20.c b/sound/pci/echoaudio/gina20.c
index 4fa32a2..67bd0c9 100644
--- a/sound/pci/echoaudio/gina20.c
+++ b/sound/pci/echoaudio/gina20.c
@@ -23,7 +23,7 @@
 #define ECHOCARD_HAS_INPUT_GAIN
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 
 /* Pipe indexes */
 #define PX_ANALOG_OUT	0	/* 8 */
diff --git a/sound/pci/echoaudio/gina20_dsp.c b/sound/pci/echoaudio/gina20_dsp.c
index 5dafe92..b237757 100644
--- a/sound/pci/echoaudio/gina20_dsp.c
+++ b/sound/pci/echoaudio/gina20_dsp.c
@@ -48,19 +48,19 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_GINA20_DSP;
 	chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
 	chip->clock_state = GD_CLOCK_UNDEF;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_SPDIF;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -69,7 +69,7 @@
 
 static int set_mixer_defaults(struct echoaudio *chip)
 {
-	chip->professional_spdif = FALSE;
+	chip->professional_spdif = false;
 	return init_line_levels(chip);
 }
 
diff --git a/sound/pci/echoaudio/gina24_dsp.c b/sound/pci/echoaudio/gina24_dsp.c
index 6971766..8eff2b4 100644
--- a/sound/pci/echoaudio/gina24_dsp.c
+++ b/sound/pci/echoaudio/gina24_dsp.c
@@ -52,7 +52,7 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
 		ECHO_CLOCK_BIT_ESYNC | ECHO_CLOCK_BIT_ESYNC96 |
@@ -76,7 +76,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -86,8 +86,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -152,7 +152,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err) {
 		control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
-		err = write_control_reg(chip, control_reg, TRUE);
+		err = write_control_reg(chip, control_reg, true);
 	}
 	return err;
 }
@@ -226,7 +226,7 @@
 	chip->sample_rate = rate;
 	dev_dbg(chip->card->dev, "set_sample_rate: %d clock %d\n", rate, clock);
 
-	return write_control_reg(chip, control_reg, FALSE);
+	return write_control_reg(chip, control_reg, false);
 }
 
 
@@ -274,7 +274,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -285,17 +285,17 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_CDROM:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
@@ -333,7 +333,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, TRUE);
+	err = write_control_reg(chip, control_reg, true);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/echoaudio/indigo_dsp.c b/sound/pci/echoaudio/indigo_dsp.c
index 54edd67..c97dc83 100644
--- a/sound/pci/echoaudio/indigo_dsp.c
+++ b/sound/pci/echoaudio/indigo_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigodj_dsp.c b/sound/pci/echoaudio/indigodj_dsp.c
index 2cf5cc0..2428b35 100644
--- a/sound/pci/echoaudio/indigodj_dsp.c
+++ b/sound/pci/echoaudio/indigodj_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DJ_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigodjx_dsp.c b/sound/pci/echoaudio/indigodjx_dsp.c
index 5252863..5fbd4a3 100644
--- a/sound/pci/echoaudio/indigodjx_dsp.c
+++ b/sound/pci/echoaudio/indigodjx_dsp.c
@@ -47,17 +47,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DJX_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	err = load_firmware(chip);
 	if (err < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigoio_dsp.c b/sound/pci/echoaudio/indigoio_dsp.c
index 4e81787..79b68ba 100644
--- a/sound/pci/echoaudio/indigoio_dsp.c
+++ b/sound/pci/echoaudio/indigoio_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_IO_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigoiox_dsp.c b/sound/pci/echoaudio/indigoiox_dsp.c
index 6de3f9b..1ae394e 100644
--- a/sound/pci/echoaudio/indigoiox_dsp.c
+++ b/sound/pci/echoaudio/indigoiox_dsp.c
@@ -47,17 +47,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_IOX_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	err = load_firmware(chip);
 	if (err < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/layla20.c b/sound/pci/echoaudio/layla20.c
index 12e5d21..fc8468d 100644
--- a/sound/pci/echoaudio/layla20.c
+++ b/sound/pci/echoaudio/layla20.c
@@ -26,7 +26,7 @@
 #define ECHOCARD_HAS_SUPER_INTERLEAVE
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 #define ECHOCARD_HAS_OUTPUT_CLOCK_SWITCH
 #define ECHOCARD_HAS_MIDI
 
diff --git a/sound/pci/echoaudio/layla20_dsp.c b/sound/pci/echoaudio/layla20_dsp.c
index f2024a3..5e5b6e2 100644
--- a/sound/pci/echoaudio/layla20_dsp.c
+++ b/sound/pci/echoaudio/layla20_dsp.c
@@ -51,8 +51,8 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_LAYLA20_DSP;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
@@ -62,7 +62,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -71,7 +71,7 @@
 
 static int set_mixer_defaults(struct echoaudio *chip)
 {
-	chip->professional_spdif = FALSE;
+	chip->professional_spdif = false;
 	return init_line_levels(chip);
 }
 
@@ -113,7 +113,7 @@
 	u32 asic_status;
 	int goodcnt, i;
 
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	for (i = goodcnt = 0; i < 5; i++) {
 		send_vector(chip, DSP_VC_TEST_ASIC);
 
@@ -127,7 +127,7 @@
 
 		if (asic_status == ASIC_ALREADY_LOADED) {
 			if (++goodcnt == 3) {
-				chip->asic_loaded = TRUE;
+				chip->asic_loaded = true;
 				return 0;
 			}
 		}
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index 4f11e81f..df28e51 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -51,8 +51,8 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_LAYLA24_DSP;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
@@ -64,7 +64,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	if ((err = init_line_levels(chip)) < 0)
 		return err;
@@ -77,8 +77,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -135,7 +135,7 @@
 	err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
 				FW_LAYLA24_2S_ASIC);
 	if (err < 0)
-		return FALSE;
+		return false;
 
 	/* Now give the external ASIC a little time to set up */
 	mdelay(10);
@@ -147,7 +147,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err)
 		err = write_control_reg(chip, GML_CONVERTER_ENABLE | GML_48KHZ,
-					TRUE);
+					true);
 	
 	return err;
 }
@@ -241,7 +241,7 @@
 	dev_dbg(chip->card->dev,
 		"set_sample_rate: %d clock %d\n", rate, control_reg);
 
-	return write_control_reg(chip, control_reg, FALSE);
+	return write_control_reg(chip, control_reg, false);
 }
 
 
@@ -287,7 +287,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -334,17 +334,17 @@
 	short asic;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		asic = FW_LAYLA24_2S_ASIC;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		asic = FW_LAYLA24_2A_ASIC;
 		break;
 	default:
@@ -383,7 +383,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, TRUE);
+	err = write_control_reg(chip, control_reg, true);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/echoaudio/mia.c b/sound/pci/echoaudio/mia.c
index 2f7562f..62b5240 100644
--- a/sound/pci/echoaudio/mia.c
+++ b/sound/pci/echoaudio/mia.c
@@ -26,7 +26,7 @@
 #define ECHOCARD_HAS_VMIXER
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32
 #define ECHOCARD_HAS_MIDI
 #define ECHOCARD_HAS_LINE_OUT_GAIN
diff --git a/sound/pci/echoaudio/mia_dsp.c b/sound/pci/echoaudio/mia_dsp.c
index fdad079..8f612a0 100644
--- a/sound/pci/echoaudio/mia_dsp.c
+++ b/sound/pci/echoaudio/mia_dsp.c
@@ -52,19 +52,19 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_MIA_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	if ((subdevice_id & 0x0000f) == MIA_MIDI_REV)
-		chip->has_midi = TRUE;
+		chip->has_midi = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_SPDIF;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/mona_dsp.c b/sound/pci/echoaudio/mona_dsp.c
index 843c7a5..dce9e57 100644
--- a/sound/pci/echoaudio/mona_dsp.c
+++ b/sound/pci/echoaudio/mona_dsp.c
@@ -52,7 +52,7 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
 		ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
@@ -69,7 +69,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -79,8 +79,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -148,7 +148,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err) {
 		control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
-		err = write_control_reg(chip, control_reg, TRUE);
+		err = write_control_reg(chip, control_reg, true);
 	}
 
 	return err;
@@ -356,7 +356,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -367,16 +367,16 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
@@ -415,7 +415,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, FALSE);
+	err = write_control_reg(chip, control_reg, false);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 55e5716..076b117 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -1741,7 +1741,7 @@
 static struct snd_kcontrol_new snd_audigy_capture_boost =
 {
 	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name =		"Analog Capture Boost",
+	.name =		"Mic Extra Boost",
 	.info =		snd_audigy_capture_boost_info,
 	.get =		snd_audigy_capture_boost_get,
 	.put =		snd_audigy_capture_boost_put
@@ -1819,8 +1819,6 @@
 		 * the Philips ADC for 24bit capture */
 		"PCM Playback Switch",
 		"PCM Playback Volume",
-		"Master Mono Playback Switch",
-		"Master Mono Playback Volume",
 		"Master Playback Switch",
 		"Master Playback Volume",
 		"PCM Out Path & Mute",
@@ -1830,10 +1828,16 @@
 		"Capture Switch",
 		"Capture Volume",
 		"Mic Select",
+		"Headphone Playback Switch",
+		"Headphone Playback Volume",
+		"3D Control - Center",
+		"3D Control - Depth",
+		"3D Control - Switch",
 		"Video Playback Switch",
 		"Video Playback Volume",
 		"Mic Playback Switch",
 		"Mic Playback Volume",
+		"External Amplifier",
 		NULL
 	};
 	static char *audigy_rename_ctls[] = {
@@ -1842,6 +1846,8 @@
 		/* "Wave Capture Volume", "PCM Capture Volume", */
 		"Wave Master Playback Volume", "Master Playback Volume",
 		"AMic Playback Volume", "Mic Playback Volume",
+		"Master Mono Playback Switch", "Phone Output Playback Switch",
+		"Master Mono Playback Volume", "Phone Output Playback Volume",
 		NULL
 	};
 	static char *audigy_rename_ctls_i2c_adc[] = {
@@ -1867,8 +1873,6 @@
 		 * the Philips ADC for 24bit capture */
 		"PCM Playback Switch",
 		"PCM Playback Volume",
-		"Master Mono Playback Switch",
-		"Master Mono Playback Volume",
 		"Capture Source",
 		"Capture Switch",
 		"Capture Volume",
@@ -1900,7 +1904,8 @@
 		"Aux Playback Volume", "Aux Capture Volume",
 		"Video Playback Switch", "Video Capture Switch",
 		"Video Playback Volume", "Video Capture Volume",
-
+		"Master Mono Playback Switch", "Phone Output Playback Switch",
+		"Master Mono Playback Volume", "Phone Output Playback Volume",
 		NULL
 	};
 
@@ -1935,6 +1940,9 @@
 			snd_ac97_write_cache(emu->ac97, AC97_MASTER, 0x0000);
 			/* set capture source to mic */
 			snd_ac97_write_cache(emu->ac97, AC97_REC_SEL, 0x0000);
+			/* set mono output (TAD) to mic */
+			snd_ac97_update_bits(emu->ac97, AC97_GENERAL_PURPOSE,
+				0x0200, 0x0200);
 			if (emu->card_capabilities->adc_1361t)
 				c = audigy_remove_ctls_1361t_adc;
 			else 
@@ -1996,11 +2004,6 @@
 		rename_ctl(card, "Analog Mix Capture Volume", "Line2 Capture Volume");
 		rename_ctl(card, "Aux2 Capture Volume", "Line3 Capture Volume");
 		rename_ctl(card, "Mic Capture Volume", "Unknown1 Capture Volume");
-		remove_ctl(card, "Headphone Playback Switch");
-		remove_ctl(card, "Headphone Playback Volume");
-		remove_ctl(card, "3D Control - Center");
-		remove_ctl(card, "3D Control - Depth");
-		remove_ctl(card, "3D Control - Switch");
 	}
 	if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL)
 		return -ENOMEM;
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 03b7399..7f57a14 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -887,11 +887,32 @@
 static bool pin_config_match(struct hda_codec *codec,
 			     const struct hda_pintbl *pins)
 {
-	for (; pins->nid; pins++) {
-		u32 def_conf = snd_hda_codec_get_pincfg(codec, pins->nid);
-		if (pins->val != def_conf)
+	int i;
+
+	for (i = 0; i < codec->init_pins.used; i++) {
+		struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
+		hda_nid_t nid = pin->nid;
+		u32 cfg = pin->cfg;
+		const struct hda_pintbl *t_pins;
+		int found;
+
+		t_pins = pins;
+		found = 0;
+		for (; t_pins->nid; t_pins++) {
+			if (t_pins->nid == nid) {
+				found = 1;
+				if (t_pins->val == cfg)
+					break;
+				else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
+					break;
+				else
+					return false;
+			}
+		}
+		if (!found && (cfg & 0xf0000000) != 0x40000000)
 			return false;
 	}
+
 	return true;
 }
 
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index d1a2cb6..37f43a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -53,76 +53,6 @@
 #define codec_has_clkstop(codec) \
 	((codec)->core.power_caps & AC_PWRST_CLKSTOP)
 
-/**
- * snd_hda_get_jack_location - Give a location string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack location, e.g. "Rear", "Front", etc.
- */
-const char *snd_hda_get_jack_location(u32 cfg)
-{
-	static char *bases[7] = {
-		"N/A", "Rear", "Front", "Left", "Right", "Top", "Bottom",
-	};
-	static unsigned char specials_idx[] = {
-		0x07, 0x08,
-		0x17, 0x18, 0x19,
-		0x37, 0x38
-	};
-	static char *specials[] = {
-		"Rear Panel", "Drive Bar",
-		"Riser", "HDMI", "ATAPI",
-		"Mobile-In", "Mobile-Out"
-	};
-	int i;
-	cfg = (cfg & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT;
-	if ((cfg & 0x0f) < 7)
-		return bases[cfg & 0x0f];
-	for (i = 0; i < ARRAY_SIZE(specials_idx); i++) {
-		if (cfg == specials_idx[i])
-			return specials[i];
-	}
-	return "UNKNOWN";
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_location);
-
-/**
- * snd_hda_get_jack_connectivity - Give a connectivity string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack connectivity, i.e. external or internal connection.
- */
-const char *snd_hda_get_jack_connectivity(u32 cfg)
-{
-	static char *jack_locations[4] = { "Ext", "Int", "Sep", "Oth" };
-
-	return jack_locations[(cfg >> (AC_DEFCFG_LOCATION_SHIFT + 4)) & 3];
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_connectivity);
-
-/**
- * snd_hda_get_jack_type - Give a type string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack type, i.e. the purpose of the jack, such as Line-Out or CD.
- */
-const char *snd_hda_get_jack_type(u32 cfg)
-{
-	static char *jack_types[16] = {
-		"Line Out", "Speaker", "HP Out", "CD",
-		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
-		"Line In", "Aux", "Mic", "Telephony",
-		"SPDIF In", "Digital In", "Reserved", "Other"
-	};
-
-	return jack_types[(cfg & AC_DEFCFG_DEVICE)
-				>> AC_DEFCFG_DEVICE_SHIFT];
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_type);
-
 /*
  * Send and receive a verb - passed to exec_verb override for hdac_device
  */
@@ -975,7 +905,7 @@
 	if (codec->bus->modelname) {
 		codec->modelname = kstrdup(codec->bus->modelname, GFP_KERNEL);
 		if (!codec->modelname) {
-			err = -ENODEV;
+			err = -ENOMEM;
 			goto error;
 		}
 	}
@@ -1025,7 +955,7 @@
 	hda_nid_t fg;
 	int err;
 
-	err = snd_hdac_refresh_widgets(&codec->core);
+	err = snd_hdac_refresh_widget_sysfs(&codec->core);
 	if (err < 0)
 		return err;
 
@@ -3172,8 +3102,7 @@
 			struct snd_pcm_chmap *chmap;
 			const struct snd_pcm_chmap_elem *elem;
 
-			if (!pcm || !pcm->pcm || pcm->own_chmap ||
-			    !hinfo->substreams)
+			if (!pcm->pcm || pcm->own_chmap || !hinfo->substreams)
 				continue;
 			elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
 			err = snd_pcm_add_chmap_ctls(pcm->pcm, str, elem,
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 12837ab..2970413 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -469,13 +469,6 @@
 }
 
 /*
- * get widget information
- */
-const char *snd_hda_get_jack_connectivity(u32 cfg);
-const char *snd_hda_get_jack_type(u32 cfg);
-const char *snd_hda_get_jack_location(u32 cfg);
-
-/*
  * power saving
  */
 #define snd_hda_power_up(codec)		snd_hdac_power_up(&(codec)->core)
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index c746cd9..563984d 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -42,7 +42,7 @@
 	CEA_EDID_VER_RESERVED	= 4,
 };
 
-static char *cea_speaker_allocation_names[] = {
+static const char * const cea_speaker_allocation_names[] = {
 	/*  0 */ "FL/FR",
 	/*  1 */ "LFE",
 	/*  2 */ "FC",
@@ -56,7 +56,7 @@
 	/* 10 */ "FCH",
 };
 
-static char *eld_connection_type_names[4] = {
+static const char * const eld_connection_type_names[4] = {
 	"HDMI",
 	"DisplayPort",
 	"2-reserved",
@@ -94,7 +94,7 @@
 	AUDIO_CODING_XTYPE_FIRST_RESERVED	= 4,
 };
 
-static char *cea_audio_coding_type_names[] = {
+static const char * const cea_audio_coding_type_names[] = {
 	/*  0 */ "undefined",
 	/*  1 */ "LPCM",
 	/*  2 */ "AC-3",
@@ -482,14 +482,14 @@
 	struct parsed_hdmi_eld *e = &eld->info;
 	char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
 	int i;
-	static char *eld_version_names[32] = {
+	static const char * const eld_version_names[32] = {
 		"reserved",
 		"reserved",
 		"CEA-861D or below",
 		[3 ... 30] = "reserved",
 		[31] = "partial"
 	};
-	static char *cea_edid_version_names[8] = {
+	static const char * const cea_edid_version_names[8] = {
 		"no CEA EDID Timing Extension block present",
 		"CEA-861",
 		"CEA-861-A",
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index baaf7ed0..033aa84 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -36,24 +36,9 @@
 #define param_read(codec, nid, parm) \
 	snd_hdac_read_parm_uncached(&(codec)->core, nid, parm)
 
-static char *bits_names(unsigned int bits, char *names[], int size)
-{
-	int i, n;
-	static char buf[128];
-
-	for (i = 0, n = 0; i < size; i++) {
-		if (bits & (1U<<i) && names[i])
-			n += snprintf(buf + n, sizeof(buf) - n, " %s",
-				      names[i]);
-	}
-	buf[n] = '\0';
-
-	return buf;
-}
-
 static const char *get_wid_type_name(unsigned int wid_value)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		[AC_WID_AUD_OUT] = "Audio Output",
 		[AC_WID_AUD_IN] = "Audio Input",
 		[AC_WID_AUD_MIX] = "Audio Mixer",
@@ -241,7 +226,7 @@
 
 static const char *get_jack_connection(u32 cfg)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		"Unknown", "1/8", "1/4", "ATAPI",
 		"RCA", "Optical","Digital", "Analog",
 		"DIN", "XLR", "RJ11", "Comb",
@@ -256,7 +241,7 @@
 
 static const char *get_jack_color(u32 cfg)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		"Unknown", "Black", "Grey", "Blue",
 		"Green", "Red", "Orange", "Yellow",
 		"Purple", "Pink", NULL, NULL,
@@ -269,11 +254,74 @@
 		return "UNKNOWN";
 }
 
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack location, e.g. "Rear", "Front", etc.
+ */
+static const char *get_jack_location(u32 cfg)
+{
+	static const char * const bases[7] = {
+		"N/A", "Rear", "Front", "Left", "Right", "Top", "Bottom",
+	};
+	static const unsigned char specials_idx[] = {
+		0x07, 0x08,
+		0x17, 0x18, 0x19,
+		0x37, 0x38
+	};
+	static const char * const specials[] = {
+		"Rear Panel", "Drive Bar",
+		"Riser", "HDMI", "ATAPI",
+		"Mobile-In", "Mobile-Out"
+	};
+	int i;
+
+	cfg = (cfg & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT;
+	if ((cfg & 0x0f) < 7)
+		return bases[cfg & 0x0f];
+	for (i = 0; i < ARRAY_SIZE(specials_idx); i++) {
+		if (cfg == specials_idx[i])
+			return specials[i];
+	}
+	return "UNKNOWN";
+}
+
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack connectivity, i.e. external or internal connection.
+ */
+static const char *get_jack_connectivity(u32 cfg)
+{
+	static const char * const jack_locations[4] = {
+		"Ext", "Int", "Sep", "Oth"
+	};
+
+	return jack_locations[(cfg >> (AC_DEFCFG_LOCATION_SHIFT + 4)) & 3];
+}
+
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack type, i.e. the purpose of the jack, such as Line-Out or CD.
+ */
+static const char *get_jack_type(u32 cfg)
+{
+	static const char * const jack_types[16] = {
+		"Line Out", "Speaker", "HP Out", "CD",
+		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
+		"Line In", "Aux", "Mic", "Telephony",
+		"SPDIF In", "Digital In", "Reserved", "Other"
+	};
+
+	return jack_types[(cfg & AC_DEFCFG_DEVICE)
+				>> AC_DEFCFG_DEVICE_SHIFT];
+}
+
 static void print_pin_caps(struct snd_info_buffer *buffer,
 			   struct hda_codec *codec, hda_nid_t nid,
 			   int *supports_vref)
 {
-	static char *jack_conns[4] = { "Jack", "N/A", "Fixed", "Both" };
+	static const char * const jack_conns[4] = {
+		"Jack", "N/A", "Fixed", "Both"
+	};
 	unsigned int caps, val;
 
 	caps = param_read(codec, nid, AC_PAR_PIN_CAP);
@@ -340,9 +388,9 @@
 	caps = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONFIG_DEFAULT, 0);
 	snd_iprintf(buffer, "  Pin Default 0x%08x: [%s] %s at %s %s\n", caps,
 		    jack_conns[(caps & AC_DEFCFG_PORT_CONN) >> AC_DEFCFG_PORT_CONN_SHIFT],
-		    snd_hda_get_jack_type(caps),
-		    snd_hda_get_jack_connectivity(caps),
-		    snd_hda_get_jack_location(caps));
+		    get_jack_type(caps),
+		    get_jack_connectivity(caps),
+		    get_jack_location(caps));
 	snd_iprintf(buffer, "    Conn = %s, Color = %s\n",
 		    get_jack_connection(caps),
 		    get_jack_color(caps));
@@ -478,7 +526,7 @@
 static void print_power_state(struct snd_info_buffer *buffer,
 			      struct hda_codec *codec, hda_nid_t nid)
 {
-	static char *names[] = {
+	static const char * const names[] = {
 		[ilog2(AC_PWRST_D0SUP)]		= "D0",
 		[ilog2(AC_PWRST_D1SUP)]		= "D1",
 		[ilog2(AC_PWRST_D2SUP)]		= "D2",
@@ -492,9 +540,16 @@
 	int sup = param_read(codec, nid, AC_PAR_POWER_STATE);
 	int pwr = snd_hda_codec_read(codec, nid, 0,
 				     AC_VERB_GET_POWER_STATE, 0);
-	if (sup != -1)
-		snd_iprintf(buffer, "  Power states: %s\n",
-			    bits_names(sup, names, ARRAY_SIZE(names)));
+	if (sup != -1) {
+		int i;
+
+		snd_iprintf(buffer, "  Power states: ");
+		for (i = 0; i < ARRAY_SIZE(names); i++) {
+			if (sup & (1U << i))
+				snd_iprintf(buffer, " %s", names[i]);
+		}
+		snd_iprintf(buffer, "\n");
+	}
 
 	snd_iprintf(buffer, "  Power: setting=%s, actual=%s",
 		    get_pwr_state(pwr & AC_PWRST_SETTING),
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0f039ab..186792f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -763,6 +763,20 @@
 	QUIRK_ALIENWARE,
 };
 
+static const struct hda_pintbl alienware_pincfgs[] = {
+	{ 0x0b, 0x90170110 }, /* Builtin Speaker */
+	{ 0x0c, 0x411111f0 }, /* N/A */
+	{ 0x0d, 0x411111f0 }, /* N/A */
+	{ 0x0e, 0x411111f0 }, /* N/A */
+	{ 0x0f, 0x0321101f }, /* HP */
+	{ 0x10, 0x411111f0 }, /* Headset?  disabled for now */
+	{ 0x11, 0x03a11021 }, /* Mic */
+	{ 0x12, 0xd5a30140 }, /* Builtin Mic */
+	{ 0x13, 0x411111f0 }, /* N/A */
+	{ 0x18, 0x411111f0 }, /* N/A */
+	{}
+};
+
 static const struct snd_pci_quirk ca0132_quirks[] = {
 	SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15", QUIRK_ALIENWARE),
 	{}
@@ -3147,7 +3161,7 @@
 	auto_jack = spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
 
 	if (auto_jack)
-		jack_present = snd_hda_jack_detect(codec, spec->out_pins[1]);
+		jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_hp);
 	else
 		jack_present =
 			spec->vnode_lswitch[VNID_HP_SEL - VNODE_START_NID];
@@ -3309,7 +3323,7 @@
 	auto_jack = spec->vnode_lswitch[VNID_AMIC1_ASEL - VNODE_START_NID];
 
 	if (auto_jack)
-		jack_present = snd_hda_jack_detect(codec, spec->input_pins[0]);
+		jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_amic1);
 	else
 		jack_present =
 			spec->vnode_lswitch[VNID_AMIC1_SEL - VNODE_START_NID];
@@ -4617,37 +4631,54 @@
 	spec->multiout.num_dacs = 3;
 	spec->multiout.max_channels = 2;
 
-	spec->num_outputs = 2;
-	spec->out_pins[0] = 0x0b; /* speaker out */
 	if (spec->quirk == QUIRK_ALIENWARE) {
 		codec_dbg(codec, "ca0132_config: QUIRK_ALIENWARE applied.\n");
+		snd_hda_apply_pincfgs(codec, alienware_pincfgs);
+
+		spec->num_outputs = 2;
+		spec->out_pins[0] = 0x0b; /* speaker out */
 		spec->out_pins[1] = 0x0f;
-	} else{
+		spec->shared_out_nid = 0x2;
+		spec->unsol_tag_hp = 0x0f;
+
+		spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
+		spec->adcs[1] = 0x8; /* analog mic2 */
+		spec->adcs[2] = 0xa; /* what u hear */
+
+		spec->num_inputs = 3;
+		spec->input_pins[0] = 0x12;
+		spec->input_pins[1] = 0x11;
+		spec->input_pins[2] = 0x13;
+		spec->shared_mic_nid = 0x7;
+		spec->unsol_tag_amic1 = 0x11;
+	} else {
+		spec->num_outputs = 2;
+		spec->out_pins[0] = 0x0b; /* speaker out */
 		spec->out_pins[1] = 0x10; /* headphone out */
+		spec->shared_out_nid = 0x2;
+		spec->unsol_tag_hp = spec->out_pins[1];
+
+		spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
+		spec->adcs[1] = 0x8; /* analog mic2 */
+		spec->adcs[2] = 0xa; /* what u hear */
+
+		spec->num_inputs = 3;
+		spec->input_pins[0] = 0x12;
+		spec->input_pins[1] = 0x11;
+		spec->input_pins[2] = 0x13;
+		spec->shared_mic_nid = 0x7;
+		spec->unsol_tag_amic1 = spec->input_pins[0];
+
+		/* SPDIF I/O */
+		spec->dig_out = 0x05;
+		spec->multiout.dig_out_nid = spec->dig_out;
+		cfg->dig_out_pins[0] = 0x0c;
+		cfg->dig_outs = 1;
+		cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
+		spec->dig_in = 0x09;
+		cfg->dig_in_pin = 0x0e;
+		cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 	}
-	spec->shared_out_nid = 0x2;
-	spec->unsol_tag_hp = spec->out_pins[1];
-
-	spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
-	spec->adcs[1] = 0x8; /* analog mic2 */
-	spec->adcs[2] = 0xa; /* what u hear */
-
-	spec->num_inputs = 3;
-	spec->input_pins[0] = 0x12;
-	spec->input_pins[1] = 0x11;
-	spec->input_pins[2] = 0x13;
-	spec->shared_mic_nid = 0x7;
-	spec->unsol_tag_amic1 = spec->input_pins[0];
-
-	/* SPDIF I/O */
-	spec->dig_out = 0x05;
-	spec->multiout.dig_out_nid = spec->dig_out;
-	cfg->dig_out_pins[0] = 0x0c;
-	cfg->dig_outs = 1;
-	cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
-	spec->dig_in = 0x09;
-	cfg->dig_in_pin = 0x0e;
-	cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 }
 
 static int ca0132_prepare_verbs(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index a97db5f..acbfbe08 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -37,6 +37,8 @@
 #include <sound/jack.h>
 #include <sound/asoundef.h>
 #include <sound/tlv.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_i915.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 #include "hda_jack.h"
@@ -144,6 +146,9 @@
 	 */
 	struct hda_multi_out multiout;
 	struct hda_pcm_stream pcm_playback;
+
+	/* i915/powerwell (Haswell+/Valleyview+) specific */
+	struct i915_audio_component_audio_ops i915_audio_ops;
 };
 
 
@@ -2191,6 +2196,9 @@
 	struct hdmi_spec *spec = codec->spec;
 	int pin_idx;
 
+	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
+		snd_hdac_i915_register_notifier(NULL);
+
 	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 
@@ -2316,6 +2324,14 @@
 	snd_hda_codec_set_power_to_all(codec, fg, power_state);
 }
 
+static void intel_pin_eld_notify(void *audio_ptr, int port)
+{
+	struct hda_codec *codec = audio_ptr;
+	int pin_nid = port + 0x04;
+
+	check_presence_and_report(codec, pin_nid);
+}
+
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
 	struct hdmi_spec *spec;
@@ -2342,8 +2358,12 @@
 	if (is_valleyview_plus(codec) || is_skylake(codec))
 		codec->core.link_power_control = 1;
 
-	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
+	if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
 		codec->depop_delay = 0;
+		spec->i915_audio_ops.audio_ptr = codec;
+		spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+		snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+	}
 
 	if (hdmi_parse_codec(codec) < 0) {
 		codec->spec = NULL;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 374ea53..4e6b090 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5389,400 +5389,202 @@
 	{}
 };
 
-#define ALC255_STANDARD_PINS \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
-
 #define ALC256_STANDARD_PINS \
 	{0x12, 0x90a60140}, \
 	{0x14, 0x90170110}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
 	{0x21, 0x02211020}
 
 #define ALC282_STANDARD_PINS \
-	{0x14, 0x90170110}, \
-	{0x18, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
-
-#define ALC288_STANDARD_PINS \
-	{0x17, 0x411111f0}, \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1e, 0x411111f0}
+	{0x14, 0x90170110}
 
 #define ALC290_STANDARD_PINS \
-	{0x12, 0x99a30130}, \
-	{0x13, 0x40000000}, \
-	{0x16, 0x411111f0}, \
-	{0x17, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
+	{0x12, 0x99a30130}
 
 #define ALC292_STANDARD_PINS \
 	{0x14, 0x90170110}, \
-	{0x15, 0x0221401f}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1d, 0x40700001}
+	{0x15, 0x0221401f}
 
 #define ALC298_STANDARD_PINS \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1e, 0x411111f0}, \
-	{0x1f, 0x411111f0}
+	{0x12, 0x90a60130}, \
+	{0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
-		{0x12, 0x40300000},
 		{0x14, 0x90170110},
-		{0x17, 0x411111f0},
-		{0x1d, 0x40538029},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60140},
 		{0x14, 0x90170110},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170130},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x01014020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221103f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170150},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x02011020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221105f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170110},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x01014020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
 		{0x17, 0x90170140},
-		{0x18, 0x40000000},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41163b05},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0321102f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170140},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170140},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60180},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x40000000},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x411111f0},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x411111f0},
-		{0x1d, 0x4077992d},
-		{0x1e, 0x411111ff}),
+		ALC256_STANDARD_PINS),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
 		{0x12, 0x90a60130},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x16, 0x411111f0},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40748605},
-		{0x1e, 0x411111f0}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED,
 		{0x12, 0x90a60140},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x16, 0x411111f0},
-		{0x17, 0x411111f0},
 		{0x18, 0x02811030},
-		{0x19, 0x411111f0},
 		{0x1a, 0x04a1103f},
-		{0x1b, 0x02011020},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
+		{0x1b, 0x02011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP 15 Touchsmart", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40f41905},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40020008},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40e00001},
 		{0x21, 0x03211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x03a11030},
-		{0x1d, 0x40e00001},
 		{0x21, 0x03211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
-		{0x19, 0x03a11030},
-		{0x1d, 0x40f00001},
-		{0x21, 0x03211020}),
-	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
-		ALC282_STANDARD_PINS,
-		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x04a11020},
-		{0x1d, 0x40f00001},
 		{0x21, 0x0421101f}),
-	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
-		ALC282_STANDARD_PINS,
-		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
-		{0x19, 0x03a11030},
-		{0x1d, 0x40f00001},
-		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x17, 0x40000000},
 		{0x19, 0x04a11030},
-		{0x1d, 0x40f00001},
 		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60130},
-		{0x17, 0x40020008},
-		{0x19, 0x411111f0},
-		{0x1d, 0x40e00001},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60130},
-		{0x17, 0x40020008},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40e00001},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL_XPS_13_GPIO6,
-		ALC288_STANDARD_PINS,
 		{0x12, 0x90a60120},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
-		{0x1d, 0x4076832d},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211040},
 		{0x18, 0x90170112},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211040},
 		{0x18, 0x90170110},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x0421101f},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11040},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11040}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11040},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11040}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
 		{0x16, 0x01014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x01a19030},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x01a19030}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
 		{0x16, 0x01014020},
 		{0x18, 0x02a19031},
-		{0x19, 0x01a1903e},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x01a1903e}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
-		{0x16, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1e, 0x411111f0}),
+		{0x12, 0x90a60140}),
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
 		{0x13, 0x90a60140},
 		{0x16, 0x21014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x21a19030},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x21a19030}),
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
-		{0x13, 0x90a60140},
-		{0x16, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
-		{0x13, 0x90a60140},
-		{0x16, 0x21014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x21a19030},
-		{0x1e, 0x411111ff}),
+		{0x13, 0x90a60140}),
 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC298_STANDARD_PINS,
-		{0x12, 0x90a60130},
-		{0x13, 0x40000000},
-		{0x14, 0x411111f0},
-		{0x17, 0x90170140},
-		{0x1d, 0x4068a36d},
-		{0x21, 0x03211020}),
+		{0x17, 0x90170110}),
+	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC298_STANDARD_PINS,
+		{0x17, 0x90170140}),
+	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC298_STANDARD_PINS,
+		{0x17, 0x90170150}),
 	{}
 };
 
@@ -6675,77 +6477,33 @@
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
-		{0x12, 0x4004c000},
 		{0x14, 0x01014010},
-		{0x15, 0x411111f0},
-		{0x16, 0x411111f0},
 		{0x18, 0x01a19020},
-		{0x19, 0x411111f0},
 		{0x1a, 0x0181302f},
-		{0x1b, 0x0221401f},
-		{0x1c, 0x411111f0},
-		{0x1d, 0x4054c601},
-		{0x1e, 0x411111f0}),
+		{0x1b, 0x0221401f}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30130},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30140},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30150},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
-		{0x12, 0x411111f0},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x90a60130},
 		{0x14, 0x90170110},
-		{0x15, 0x0321101f},
-		{0x16, 0x40000000},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40d6832d},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x15, 0x0321101f}),
 	{}
 };
 
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index c19e021..9bba275 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1526,7 +1526,7 @@
 
 static int snd_hdsp_create_midi (struct snd_card *card, struct hdsp *hdsp, int id)
 {
-	char buf[32];
+	char buf[40];
 
 	hdsp->midi[id].id = id;
 	hdsp->midi[id].rmidi = NULL;
@@ -1537,7 +1537,7 @@
 	hdsp->midi[id].pending = 0;
 	spin_lock_init (&hdsp->midi[id].lock);
 
-	sprintf (buf, "%s MIDI %d", card->shortname, id+1);
+	snprintf(buf, sizeof(buf), "%s MIDI %d", card->shortname, id + 1);
 	if (snd_rawmidi_new (card, buf, id, 1, 1, &hdsp->midi[id].rmidi) < 0)
 		return -1;
 
@@ -2806,7 +2806,8 @@
 	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
 
 	offset = ucontrol->id.index - 1;
-	snd_BUG_ON(offset < 0);
+	if (snd_BUG_ON(offset < 0))
+		return -EINVAL;
 
 	switch (hdsp->io_type) {
 	case Digiface:
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 6120a06..4373615 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include <sound/core.h>
 #include "pmac.h"
 
@@ -101,6 +102,7 @@
 	{ "keywest", 0 },		/* instantiated by us if needed */
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, keywest_i2c_id);
 
 static struct i2c_driver keywest_driver = {
 	.driver = {
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 1d651b8..225bfda 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -57,6 +57,7 @@
 source "sound/soc/sh/Kconfig"
 source "sound/soc/sirf/Kconfig"
 source "sound/soc/spear/Kconfig"
+source "sound/soc/sti/Kconfig"
 source "sound/soc/tegra/Kconfig"
 source "sound/soc/txx9/Kconfig"
 source "sound/soc/ux500/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 669648b..134aca1 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= sirf/
 obj-$(CONFIG_SND_SOC)	+= spear/
+obj-$(CONFIG_SND_SOC)	+= sti/
 obj-$(CONFIG_SND_SOC)	+= tegra/
 obj-$(CONFIG_SND_SOC)	+= txx9/
 obj-$(CONFIG_SND_SOC)	+= ux500/
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 841d059..ba8def5 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -290,7 +290,7 @@
 	int dir, dir_mask;
 	int ret;
 
-	pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
+	pr_debug("atmel_ssc_startup: SSC_SR=0x%x\n",
 		ssc_readl(ssc_p->ssc->regs, SR));
 
 	/* Enable PMC peripheral clock for this SSC */
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index dd94fea..5741c0a 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -344,14 +344,8 @@
 
 	platform_set_drvdata(pdev, dmadata);
 
-	return snd_soc_register_platform(&pdev->dev, &au1xpsc_soc_platform);
-}
-
-static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &au1xpsc_soc_platform);
 }
 
 static struct platform_driver au1xpsc_pcm_driver = {
@@ -359,7 +353,6 @@
 		.name	= "au1xpsc-pcm",
 	},
 	.probe		= au1xpsc_pcm_drvprobe,
-	.remove		= au1xpsc_pcm_drvremove,
 };
 
 module_platform_driver(au1xpsc_pcm_driver);
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 24cc7f4..fcf5a9a 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -312,14 +312,8 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	return snd_soc_register_platform(&pdev->dev, &alchemy_pcm_soc_platform);
-}
-
-static int alchemy_pcm_drvremove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &alchemy_pcm_soc_platform);
 }
 
 static struct platform_driver alchemy_pcmdma_driver = {
@@ -327,7 +321,6 @@
 		.name	= "alchemy-pcm-dma",
 	},
 	.probe		= alchemy_pcm_drvprobe,
-	.remove		= alchemy_pcm_drvremove,
 };
 
 module_platform_driver(alchemy_pcmdma_driver);
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index e742ef6..38e853a 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -305,19 +305,9 @@
 		return -ENOMEM;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iores)
-		return -ENODEV;
-
-	ret = -EBUSY;
-	if (!devm_request_mem_region(&pdev->dev, iores->start,
-				     resource_size(iores),
-				     pdev->name))
-		return -EBUSY;
-
-	wd->mmio = devm_ioremap(&pdev->dev, iores->start,
-				resource_size(iores));
-	if (!wd->mmio)
-		return -EBUSY;
+	wd->mmio = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(wd->mmio))
+		return PTR_ERR(wd->mmio);
 
 	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!dmares)
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 03fa1cb..8c435be 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -862,6 +862,8 @@
 	{},
 };
 
+MODULE_DEVICE_TABLE(of, bcm2835_i2s_of_match);
+
 static struct platform_driver bcm2835_i2s_driver = {
 	.probe		= bcm2835_i2s_probe,
 	.driver		= {
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 238913e..02ad260 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -450,13 +450,8 @@
 
 static int bf5xx_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform);
-}
-
-static int bf5xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &bf5xx_ac97_soc_platform);
 }
 
 static struct platform_driver bf5xx_pcm_driver = {
@@ -465,7 +460,6 @@
 	},
 
 	.probe = bf5xx_soc_platform_probe,
-	.remove = bf5xx_soc_platform_remove,
 };
 
 module_platform_driver(bf5xx_pcm_driver);
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index d95477a..6cba211d 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -342,13 +342,8 @@
 
 static int bfin_i2s_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &bf5xx_i2s_soc_platform);
-}
-
-static int bfin_i2s_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &bf5xx_i2s_soc_platform);
 }
 
 static struct platform_driver bfin_i2s_pcm_driver = {
@@ -357,7 +352,6 @@
 	},
 
 	.probe = bfin_i2s_soc_platform_probe,
-	.remove = bfin_i2s_soc_platform_remove,
 };
 
 module_platform_driver(bfin_i2s_pcm_driver);
diff --git a/sound/soc/blackfin/bfin-eval-adau1x61.c b/sound/soc/blackfin/bfin-eval-adau1x61.c
index 4229f76..fddfe00c 100644
--- a/sound/soc/blackfin/bfin-eval-adau1x61.c
+++ b/sound/soc/blackfin/bfin-eval-adau1x61.c
@@ -108,6 +108,7 @@
 
 static struct snd_soc_card bfin_eval_adau1x61 = {
 	.name = "bfin-eval-adau1x61",
+	.owner = THIS_MODULE,
 	.driver_name = "eval-adau1x61",
 	.dai_link = &bfin_eval_adau1x61_dai,
 	.num_links = 1,
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 38b3dad..e8bed6b 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -156,33 +156,29 @@
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -900, 300, 0);
 
 /* {-23, -17, -13.5, -11, -9, -6, -3, 0}dB */
-static const unsigned int mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(mic_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-2300, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(-1700, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(-1350, 0, 0),
 	3, 3, TLV_DB_SCALE_ITEM(-1100, 0, 0),
-	4, 7, TLV_DB_SCALE_ITEM(-900, 300, 0),
-};
+	4, 7, TLV_DB_SCALE_ITEM(-900, 300, 0)
+);
 
 /* {0, 0, 0, -6, 0, 6, 12, 18}dB */
-static const unsigned int aux_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(0, 0, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-600, 600, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-600, 600, 0)
+);
 
 /* {-16, -13, -10, -7, -5.2, -3,3, -2.2, 0}dB, mute instead of -16dB */
-static const unsigned int out_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(out_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-1600, 300, 1),
 	4, 4, TLV_DB_SCALE_ITEM(-520, 0, 0),
 	5, 5, TLV_DB_SCALE_ITEM(-330, 0, 0),
-	6, 7, TLV_DB_SCALE_ITEM(-220, 220, 0),
-};
+	6, 7, TLV_DB_SCALE_ITEM(-220, 220, 0)
+);
 
-static const unsigned int st_tlv[] = {
-	TLV_DB_RANGE_HEAD(8),
+static const DECLARE_TLV_DB_RANGE(st_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-12041, 602, 0),
 	2, 3, TLV_DB_SCALE_ITEM(-11087, 250, 0),
 	4, 5, TLV_DB_SCALE_ITEM(-10643, 158, 0),
@@ -190,8 +186,8 @@
 	8, 9, TLV_DB_SCALE_ITEM(-10133, 92, 0),
 	10, 13, TLV_DB_SCALE_ITEM(-9958, 70, 0),
 	14, 17, TLV_DB_SCALE_ITEM(-9689, 53, 0),
-	18, 271, TLV_DB_SCALE_ITEM(-9484, 37, 0),
-};
+	18, 271, TLV_DB_SCALE_ITEM(-9484, 37, 0)
+);
 
 /* Sidetone Gain = M * 2^(-5-N) */
 struct st_gain {
@@ -1028,10 +1024,8 @@
 
 	if (dir == PM860X_CLK_DIR_OUT)
 		pm860x->dir = PM860X_CLK_DIR_OUT;
-	else {
-		pm860x->dir = PM860X_CLK_DIR_IN;
+	else	/* Slave mode is not supported */
 		return -EINVAL;
-	}
 
 	return 0;
 }
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index efaafce..0c9733e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -53,6 +53,7 @@
 	select SND_SOC_CS4271_I2C if I2C
 	select SND_SOC_CS4271_SPI if SPI_MASTER
 	select SND_SOC_CS42XX8_I2C if I2C
+	select SND_SOC_CS4349 if I2C
 	select SND_SOC_CX20442 if TTY
 	select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_DA7213 if I2C
@@ -62,6 +63,8 @@
 	select SND_SOC_BT_SCO
 	select SND_SOC_ES8328_SPI if SPI_MASTER
 	select SND_SOC_ES8328_I2C if I2C
+	select SND_SOC_GTM601
+	select SND_SOC_ICS43432
 	select SND_SOC_ISABELLE if I2C
 	select SND_SOC_JZ4740_CODEC
 	select SND_SOC_LM4857 if I2C
@@ -83,6 +86,7 @@
 	select SND_SOC_PCM512x_I2C if I2C
 	select SND_SOC_PCM512x_SPI if SPI_MASTER
 	select SND_SOC_RT286 if I2C
+	select SND_SOC_RT298 if I2C
 	select SND_SOC_RT5631 if I2C
 	select SND_SOC_RT5640 if I2C
 	select SND_SOC_RT5645 if I2C
@@ -102,6 +106,7 @@
 	select SND_SOC_STA350 if I2C
 	select SND_SOC_STA529 if I2C
 	select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
+	select SND_SOC_STI_SAS
 	select SND_SOC_TAS2552 if I2C
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TAS571X if I2C
@@ -403,6 +408,11 @@
 	select SND_SOC_CS42XX8
 	select REGMAP_I2C
 
+# Cirrus Logic CS4349 HiFi DAC
+config SND_SOC_CS4349
+	tristate "Cirrus Logic CS4349 CODEC"
+	depends on I2C
+
 config SND_SOC_CX20442
 	tristate
 	depends on TTY
@@ -446,6 +456,12 @@
 	tristate
 	select SND_SOC_ES8328
 
+config SND_SOC_GTM601
+	tristate 'GTM601 UMTS modem audio codec'
+
+config SND_SOC_ICS43432
+	tristate
+
 config SND_SOC_ISABELLE
         tristate
 
@@ -512,12 +528,18 @@
 config SND_SOC_RL6347A
 	tristate
 	default y if SND_SOC_RT286=y
+	default y if SND_SOC_RT298=y
 	default m if SND_SOC_RT286=m
+	default m if SND_SOC_RT298=m
 
 config SND_SOC_RT286
 	tristate
 	depends on I2C
 
+config SND_SOC_RT298
+	tristate
+	depends on I2C
+
 config SND_SOC_RT5631
 	tristate "Realtek ALC5631/RT5631 CODEC"
 	depends on I2C
@@ -610,6 +632,9 @@
 config SND_SOC_STAC9766
 	tristate
 
+config SND_SOC_STI_SAS
+	tristate "codec Audio support for STI SAS codec"
+
 config SND_SOC_TAS2552
 	tristate "Texas Instruments TAS2552 Mono Audio amplifier"
 	depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index cf160d9..4a32077 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -45,6 +45,7 @@
 snd-soc-cs4271-spi-objs := cs4271-spi.o
 snd-soc-cs42xx8-objs := cs42xx8.o
 snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
+snd-soc-cs4349-objs := cs4349.o
 snd-soc-cx20442-objs := cx20442.o
 snd-soc-da7210-objs := da7210.o
 snd-soc-da7213-objs := da7213.o
@@ -55,6 +56,8 @@
 snd-soc-es8328-objs := es8328.o
 snd-soc-es8328-i2c-objs := es8328-i2c.o
 snd-soc-es8328-spi-objs := es8328-spi.o
+snd-soc-gtm601-objs := gtm601.o
+snd-soc-ics43432-objs := ics43432.o
 snd-soc-isabelle-objs := isabelle.o
 snd-soc-jz4740-codec-objs := jz4740.o
 snd-soc-l3-objs := l3.o
@@ -79,6 +82,7 @@
 snd-soc-rl6231-objs := rl6231.o
 snd-soc-rl6347a-objs := rl6347a.o
 snd-soc-rt286-objs := rt286.o
+snd-soc-rt298-objs := rt298.o
 snd-soc-rt5631-objs := rt5631.o
 snd-soc-rt5640-objs := rt5640.o
 snd-soc-rt5645-objs := rt5645.o
@@ -106,6 +110,7 @@
 snd-soc-sta350-objs := sta350.o
 snd-soc-sta529-objs := sta529.o
 snd-soc-stac9766-objs := stac9766.o
+snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
 snd-soc-tfa9879-objs := tfa9879.o
@@ -232,6 +237,7 @@
 obj-$(CONFIG_SND_SOC_CS4271_SPI)	+= snd-soc-cs4271-spi.o
 obj-$(CONFIG_SND_SOC_CS42XX8)	+= snd-soc-cs42xx8.o
 obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
+obj-$(CONFIG_SND_SOC_CS4349)	+= snd-soc-cs4349.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
 obj-$(CONFIG_SND_SOC_DA7210)	+= snd-soc-da7210.o
 obj-$(CONFIG_SND_SOC_DA7213)	+= snd-soc-da7213.o
@@ -242,6 +248,8 @@
 obj-$(CONFIG_SND_SOC_ES8328)	+= snd-soc-es8328.o
 obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
 obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
+obj-$(CONFIG_SND_SOC_GTM601)    += snd-soc-gtm601.o
+obj-$(CONFIG_SND_SOC_ICS43432)	+= snd-soc-ics43432.o
 obj-$(CONFIG_SND_SOC_ISABELLE)	+= snd-soc-isabelle.o
 obj-$(CONFIG_SND_SOC_JZ4740_CODEC)	+= snd-soc-jz4740-codec.o
 obj-$(CONFIG_SND_SOC_L3)	+= snd-soc-l3.o
@@ -266,6 +274,7 @@
 obj-$(CONFIG_SND_SOC_RL6231)	+= snd-soc-rl6231.o
 obj-$(CONFIG_SND_SOC_RL6347A)	+= snd-soc-rl6347a.o
 obj-$(CONFIG_SND_SOC_RT286)	+= snd-soc-rt286.o
+obj-$(CONFIG_SND_SOC_RT298)	+= snd-soc-rt298.o
 obj-$(CONFIG_SND_SOC_RT5631)	+= snd-soc-rt5631.o
 obj-$(CONFIG_SND_SOC_RT5640)	+= snd-soc-rt5640.o
 obj-$(CONFIG_SND_SOC_RT5645)	+= snd-soc-rt5645.o
@@ -289,6 +298,7 @@
 obj-$(CONFIG_SND_SOC_STA350)   += snd-soc-sta350.o
 obj-$(CONFIG_SND_SOC_STA529)   += snd-soc-sta529.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
+obj-$(CONFIG_SND_SOC_STI_SAS)	+= snd-soc-sti-sas.o
 obj-$(CONFIG_SND_SOC_TAS2552)	+= snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)	+= snd-soc-tas571x.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index c7d243d..affb192 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1335,11 +1335,10 @@
 static DECLARE_TLV_DB_SCALE(hs_ear_dig_gain_tlv, -100, 100, 1);
 /* -1dB = Mute */
 
-static const unsigned int hs_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hs_gain_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-3200, 400, 0),
-	4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0),
-};
+	4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0)
+);
 
 static DECLARE_TLV_DB_SCALE(mic_gain_tlv, 0, 100, 0);
 
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 3cc69a6..9ef20db 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -202,19 +202,21 @@
 		.formats = SND_SOC_STD_AC97_FMTS, },
 };
 
+#define AD1980_VENDOR_ID 0x41445300
+#define AD1980_VENDOR_MASK 0xffffff00
+
 static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
 	unsigned int retry_cnt = 0;
+	int ret;
 
 	do {
-		if (try_warm && soc_ac97_ops->warm_reset) {
-			soc_ac97_ops->warm_reset(ac97);
-			if (snd_soc_read(codec, AC97_RESET) == 0x0090)
-				return 1;
-		}
+		ret = snd_ac97_reset(ac97, true, AD1980_VENDOR_ID,
+			AD1980_VENDOR_MASK);
+		if (ret >= 0)
+			return 0;
 
-		soc_ac97_ops->reset(ac97);
 		/*
 		 * Set bit 16slot in register 74h, then every slot will has only
 		 * 16 bits. This command is sent out in 20bit mode, in which
@@ -223,8 +225,6 @@
 		 */
 		snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
 
-		if (snd_soc_read(codec, AC97_RESET)  == 0x0090)
-			return 0;
 	} while (retry_cnt++ < 10);
 
 	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
@@ -240,7 +240,7 @@
 	u16 vendor_id2;
 	u16 ext_status;
 
-	ac97 = snd_soc_new_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, 0, 0);
 	if (IS_ERR(ac97)) {
 		ret = PTR_ERR(ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -260,22 +260,10 @@
 	if (ret < 0)
 		goto reset_err;
 
-	/* Read out vendor ID to make sure it is ad1980 */
-	if (snd_soc_read(codec, AC97_VENDOR_ID1) != 0x4144) {
-		ret = -ENODEV;
-		goto reset_err;
-	}
-
 	vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2);
-
-	if (vendor_id2 != 0x5370) {
-		if (vendor_id2 != 0x5374) {
-			ret = -ENODEV;
-			goto reset_err;
-		} else {
-			dev_warn(codec->dev,
-				"Found AD1981 - only 2/2 IN/OUT Channels supported\n");
-		}
+	if (vendor_id2 == 0x5374) {
+		dev_warn(codec->dev,
+			"Found AD1981 - only 2/2 IN/OUT Channels supported\n");
 	}
 
 	/* unmute captures and playbacks volume */
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index a431602..fe1353a 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -320,13 +320,12 @@
 	{ ADAU1373_DIGEN,		0x00 },
 };
 
-static const unsigned int adau1373_out_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(adau1373_out_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
 	16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
-	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0)
+);
 
 static const DECLARE_TLV_DB_MINMAX(adau1373_digital_tlv, -9563, 0);
 static const DECLARE_TLV_DB_SCALE(adau1373_in_pga_tlv, -1300, 100, 1);
@@ -381,12 +380,11 @@
 	"158Hz", "232Hz", "347Hz", "520Hz",
 };
 
-static const unsigned int adau1373_bass_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(adau1373_bass_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
 	3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
-	5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
-};
+	5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0)
+);
 
 static SOC_ENUM_SINGLE_DECL(adau1373_bass_lpf_cutoff_enum,
 	ADAU1373_BASS1, 5, adau1373_bass_lpf_cutoff_text);
@@ -414,11 +412,10 @@
 static SOC_ENUM_SINGLE_DECL(adau1373_3d_cutoff_enum,
 	ADAU1373_3D_CTRL1, 0, adau1373_3d_cutoff_text);
 
-static const unsigned int adau1373_3d_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(adau1373_3d_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
-	1, 7, TLV_DB_LINEAR_ITEM(-1800, -120),
-};
+	1, 7, TLV_DB_LINEAR_ITEM(-1800, -120)
+);
 
 static const char *adau1373_lr_mux_text[] = {
 	"Mute",
@@ -1534,7 +1531,6 @@
 static struct i2c_driver adau1373_i2c_driver = {
 	.driver = {
 		.name = "adau1373",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1373_i2c_probe,
 	.remove = adau1373_i2c_remove,
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index ff7f846..de53c0d 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -915,7 +915,6 @@
 static struct i2c_driver adau1701_i2c_driver = {
 	.driver = {
 		.name	= "adau1701",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(adau1701_dt_ids),
 	},
 	.probe		= adau1701_i2c_probe,
diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c
index 862796d..348ccb1 100644
--- a/sound/soc/codecs/adau1761-i2c.c
+++ b/sound/soc/codecs/adau1761-i2c.c
@@ -47,7 +47,6 @@
 static struct i2c_driver adau1761_i2c_driver = {
 	.driver = {
 		.name = "adau1761",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1761_i2c_probe,
 	.remove = adau1761_i2c_remove,
diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c
index 2ce4362..0e32bba 100644
--- a/sound/soc/codecs/adau1781-i2c.c
+++ b/sound/soc/codecs/adau1781-i2c.c
@@ -45,7 +45,6 @@
 static struct i2c_driver adau1781_i2c_driver = {
 	.driver = {
 		.name = "adau1781",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1781_i2c_probe,
 	.remove = adau1781_i2c_remove,
diff --git a/sound/soc/codecs/adau1977-i2c.c b/sound/soc/codecs/adau1977-i2c.c
index 9700e8c..21e7394 100644
--- a/sound/soc/codecs/adau1977-i2c.c
+++ b/sound/soc/codecs/adau1977-i2c.c
@@ -46,7 +46,6 @@
 static struct i2c_driver adau1977_i2c_driver = {
 	.driver = {
 		.name = "adau1977",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1977_i2c_probe,
 	.remove = adau1977_i2c_remove,
diff --git a/sound/soc/codecs/adav803.c b/sound/soc/codecs/adav803.c
index 66d9fce..52881fa 100644
--- a/sound/soc/codecs/adav803.c
+++ b/sound/soc/codecs/adav803.c
@@ -36,7 +36,6 @@
 static struct i2c_driver adav803_driver = {
 	.driver = {
 		.name = "adav803",
-		.owner = THIS_MODULE,
 	},
 	.probe = adav803_probe,
 	.remove = adav803_remove,
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 36d8425..198c924 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -113,7 +113,7 @@
 
 #define ADAV80X_PLL_OUTE_SYSCLKPD(x)		BIT(2 - (x))
 
-static struct reg_default adav80x_reg_defaults[] = {
+static const struct reg_default adav80x_reg_defaults[] = {
 	{ ADAV80X_PLAYBACK_CTRL,	0x01 },
 	{ ADAV80X_AUX_IN_CTRL,		0x01 },
 	{ ADAV80X_REC_CTRL,		0x02 },
@@ -865,7 +865,6 @@
 	.val_bits = 8,
 	.pad_bits = 1,
 	.reg_bits = 7,
-	.read_flag_mask = 0x01,
 
 	.max_register = ADAV80X_PLL_OUTE,
 
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 8670861..54428c6 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -444,7 +444,6 @@
 static struct i2c_driver ak4535_i2c_driver = {
 	.driver = {
 		.name = "ak4535",
-		.owner = THIS_MODULE,
 	},
 	.probe =    ak4535_i2c_probe,
 	.remove =   ak4535_i2c_remove,
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 2d0ff45..b14176f 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -609,7 +609,6 @@
 static struct i2c_driver ak4641_i2c_driver = {
 	.driver = {
 		.name = "ak4641",
-		.owner = THIS_MODULE,
 	},
 	.probe =    ak4641_i2c_probe,
 	.remove =   ak4641_i2c_remove,
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 7c0f6552..4a90143 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -64,12 +64,15 @@
 #define FIL1_0		0x1c
 #define FIL1_1		0x1d
 #define FIL1_2		0x1e
-#define FIL1_3		0x1f
+#define FIL1_3		0x1f	/* The maximum valid register for ak4642 */
 #define PW_MGMT4	0x20
 #define MD_CTL5		0x21
 #define LO_MS		0x22
 #define HP_MS		0x23
-#define SPK_MS		0x24
+#define SPK_MS		0x24	/* The maximum valid register for ak4643 */
+#define EQ_FBEQAB	0x25
+#define EQ_FBEQCD	0x26
+#define EQ_FBEQE	0x27	/* The maximum valid register for ak4648 */
 
 /* PW_MGMT1*/
 #define PMVCM		(1 << 6) /* VCOM Power Management */
@@ -241,7 +244,7 @@
 /*
  * ak4642 register cache
  */
-static const struct reg_default ak4642_reg[] = {
+static const struct reg_default ak4643_reg[] = {
 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
 	{  8, 0xe1 }, {  9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 },
@@ -254,6 +257,14 @@
 	{ 36, 0x00 },
 };
 
+/* The default settings for 0x0 ~ 0x1f registers are the same for ak4642
+   and ak4643. So we reuse the ak4643 reg_default for ak4642.
+   The valid registers for ak4642 are 0x0 ~ 0x1f which is a subset of ak4643,
+   so define NUM_AK4642_REG_DEFAULTS for ak4642.
+*/
+#define ak4642_reg ak4643_reg
+#define NUM_AK4642_REG_DEFAULTS	(FIL1_3 + 1)
+
 static const struct reg_default ak4648_reg[] = {
 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
@@ -535,15 +546,23 @@
 static const struct regmap_config ak4642_regmap = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
-	.max_register		= ARRAY_SIZE(ak4642_reg) + 1,
+	.max_register		= FIL1_3,
 	.reg_defaults		= ak4642_reg,
-	.num_reg_defaults	= ARRAY_SIZE(ak4642_reg),
+	.num_reg_defaults	= NUM_AK4642_REG_DEFAULTS,
+};
+
+static const struct regmap_config ak4643_regmap = {
+	.reg_bits		= 8,
+	.val_bits		= 8,
+	.max_register		= SPK_MS,
+	.reg_defaults		= ak4643_reg,
+	.num_reg_defaults	= ARRAY_SIZE(ak4643_reg),
 };
 
 static const struct regmap_config ak4648_regmap = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
-	.max_register		= ARRAY_SIZE(ak4648_reg) + 1,
+	.max_register		= EQ_FBEQE,
 	.reg_defaults		= ak4648_reg,
 	.num_reg_defaults	= ARRAY_SIZE(ak4648_reg),
 };
@@ -553,7 +572,7 @@
 };
 
 static const struct ak4642_drvdata ak4643_drvdata = {
-	.regmap_config = &ak4642_regmap,
+	.regmap_config = &ak4643_regmap,
 };
 
 static const struct ak4642_drvdata ak4648_drvdata = {
@@ -626,7 +645,6 @@
 static struct i2c_driver ak4642_i2c_driver = {
 	.driver = {
 		.name = "ak4642-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = ak4642_of_match,
 	},
 	.probe		= ak4642_i2c_probe,
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 0e59063..c73a9f6 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -663,7 +663,6 @@
 static struct i2c_driver ak4671_i2c_driver = {
 	.driver = {
 		.name = "ak4671-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe = ak4671_i2c_probe,
 	.remove = ak4671_i2c_remove,
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 0fc24e0..d2e3a3e 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -82,12 +82,11 @@
 static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
 static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
-static const unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
 
 static const struct snd_kcontrol_new alc5621_vol_snd_controls[] = {
@@ -1085,7 +1084,6 @@
 static struct i2c_driver alc5623_i2c_driver = {
 	.driver = {
 		.name = "alc562x-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(alc5623_of_match),
 	},
 	.probe = alc5623_i2c_probe,
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 607a63b..4d3ba33 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -35,7 +35,7 @@
 /*
  * ALC5632 register cache
  */
-static struct reg_default  alc5632_reg_defaults[] = {
+static const struct reg_default alc5632_reg_defaults[] = {
 	{   2, 0x8080 },	/* R2   - Speaker Output Volume */
 	{   4, 0x8080 },	/* R4   - Headphone Output Volume */
 	{   6, 0x8080 },	/* R6   - AUXOUT Volume */
@@ -146,11 +146,10 @@
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
 /* -16.5db min scale, 1.5db steps, no mute */
 static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
-static const unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
-};
+	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0)
+);
 /* 0db min scale, 6 db steps, no mute */
 static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
 /* 0db min scalem 0.75db steps, no mute */
@@ -1183,7 +1182,6 @@
 static struct i2c_driver alc5632_i2c_driver = {
 	.driver = {
 		.name = "alc5632",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(alc5632_of_match),
 	},
 	.probe = alc5632_i2c_probe,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 802e05e..8a2221a 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1366,7 +1366,7 @@
 {
 	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
 	struct arizona *arizona = priv->arizona;
-	struct reg_default dac_comp[] = {
+	struct reg_sequence dac_comp[] = {
 		{ 0x80, 0x3 },
 		{ ARIZONA_DAC_COMP_1, 0 },
 		{ ARIZONA_DAC_COMP_2, 0 },
@@ -1504,7 +1504,7 @@
 	else
 		rates = &arizona_48k_bclk_rates[0];
 
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 
 	if (tdm_slots) {
 		arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n",
@@ -1756,17 +1756,6 @@
 }
 EXPORT_SYMBOL_GPL(arizona_init_dai);
 
-static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
-{
-	struct arizona_fll *fll = data;
-
-	arizona_fll_dbg(fll, "clock OK\n");
-
-	complete(&fll->ok);
-
-	return IRQ_HANDLED;
-}
-
 static struct {
 	unsigned int min;
 	unsigned int max;
@@ -2048,17 +2037,18 @@
 static int arizona_enable_fll(struct arizona_fll *fll)
 {
 	struct arizona *arizona = fll->arizona;
-	unsigned long time_left;
 	bool use_sync = false;
 	int already_enabled = arizona_is_enabled_fll(fll);
 	struct arizona_fll_cfg cfg;
+	int i;
+	unsigned int val;
 
 	if (already_enabled < 0)
 		return already_enabled;
 
 	if (already_enabled) {
 		/* Facilitate smooth refclk across the transition */
-		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7,
+		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
 					 ARIZONA_FLL1_GAIN_MASK, 0);
 		regmap_update_bits_async(fll->arizona->regmap, fll->base + 1,
 					 ARIZONA_FLL1_FREERUN,
@@ -2110,9 +2100,6 @@
 	if (!already_enabled)
 		pm_runtime_get(arizona->dev);
 
-	/* Clear any pending completions */
-	try_wait_for_completion(&fll->ok);
-
 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
 				 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
 	if (use_sync)
@@ -2124,10 +2111,24 @@
 		regmap_update_bits_async(arizona->regmap, fll->base + 1,
 					 ARIZONA_FLL1_FREERUN, 0);
 
-	time_left = wait_for_completion_timeout(&fll->ok,
-					  msecs_to_jiffies(250));
-	if (time_left == 0)
+	arizona_fll_dbg(fll, "Waiting for FLL lock...\n");
+	val = 0;
+	for (i = 0; i < 15; i++) {
+		if (i < 5)
+			usleep_range(200, 400);
+		else
+			msleep(20);
+
+		regmap_read(arizona->regmap,
+			    ARIZONA_INTERRUPT_RAW_STATUS_5,
+			    &val);
+		if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1)))
+			break;
+	}
+	if (i == 15)
 		arizona_fll_warn(fll, "Timed out waiting for lock\n");
+	else
+		arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i);
 
 	return 0;
 }
@@ -2212,11 +2213,8 @@
 int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
 		     int ok_irq, struct arizona_fll *fll)
 {
-	int ret;
 	unsigned int val;
 
-	init_completion(&fll->ok);
-
 	fll->id = id;
 	fll->base = base;
 	fll->arizona = arizona;
@@ -2238,13 +2236,6 @@
 	snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
 		 "FLL%d clock OK", id);
 
-	ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
-				  arizona_fll_clock_ok, fll);
-	if (ret != 0) {
-		dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
-			id, ret);
-	}
-
 	regmap_update_bits(arizona->regmap, fll->base + 1,
 			   ARIZONA_FLL1_FREERUN, 0);
 
@@ -2313,6 +2304,82 @@
 };
 EXPORT_SYMBOL_GPL(arizona_adsp2_rate_controls);
 
+static bool arizona_eq_filter_unstable(bool mode, __be16 _a, __be16 _b)
+{
+	s16 a = be16_to_cpu(_a);
+	s16 b = be16_to_cpu(_b);
+
+	if (!mode) {
+		return abs(a) >= 4096;
+	} else {
+		if (abs(b) >= 4096)
+			return true;
+
+		return (abs((a << 16) / (4096 - b)) >= 4096 << 4);
+	}
+}
+
+int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	struct soc_bytes *params = (void *)kcontrol->private_value;
+	unsigned int val;
+	__be16 *data;
+	int len;
+	int ret;
+
+	len = params->num_regs * regmap_get_val_bytes(arizona->regmap);
+
+	data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
+	if (!data)
+		return -ENOMEM;
+
+	data[0] &= cpu_to_be16(ARIZONA_EQ1_B1_MODE);
+
+	if (arizona_eq_filter_unstable(!!data[0], data[1], data[2]) ||
+	    arizona_eq_filter_unstable(true, data[4], data[5]) ||
+	    arizona_eq_filter_unstable(true, data[8], data[9]) ||
+	    arizona_eq_filter_unstable(true, data[12], data[13]) ||
+	    arizona_eq_filter_unstable(false, data[16], data[17])) {
+		dev_err(arizona->dev, "Rejecting unstable EQ coefficients\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = regmap_read(arizona->regmap, params->base, &val);
+	if (ret != 0)
+		goto out;
+
+	val &= ~ARIZONA_EQ1_B1_MODE;
+	data[0] |= cpu_to_be16(val);
+
+	ret = regmap_raw_write(arizona->regmap, params->base, data, len);
+
+out:
+	kfree(data);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(arizona_eq_coeff_put);
+
+int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	__be16 *data = (__be16 *)ucontrol->value.bytes.data;
+	s16 val = be16_to_cpu(*data);
+
+	if (abs(val) >= 4096) {
+		dev_err(arizona->dev, "Rejecting unstable LHPF coefficients\n");
+		return -EINVAL;
+	}
+
+	return snd_soc_bytes_put(kcontrol, ucontrol);
+}
+EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
+
 MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 43deb04..ada0a41 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -194,6 +194,20 @@
 	ARIZONA_MIXER_ROUTES(name " Preloader", name "L"), \
 	ARIZONA_MIXER_ROUTES(name " Preloader", name "R")
 
+#define ARIZONA_EQ_CONTROL(xname, xbase)                      \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,   \
+	.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
+	.put = arizona_eq_coeff_put, .private_value =         \
+	((unsigned long)&(struct soc_bytes) { .base = xbase,  \
+	 .num_regs = 20, .mask = ~ARIZONA_EQ1_B1_MODE }) }
+
+#define ARIZONA_LHPF_CONTROL(xname, xbase)                    \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,   \
+	.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
+	.put = arizona_lhpf_coeff_put, .private_value =       \
+	((unsigned long)&(struct soc_bytes) { .base = xbase,  \
+	 .num_regs = 1 }) }
+
 #define ARIZONA_RATE_ENUM_SIZE 4
 extern const char *arizona_rate_text[ARIZONA_RATE_ENUM_SIZE];
 extern const int arizona_rate_val[ARIZONA_RATE_ENUM_SIZE];
@@ -229,6 +243,11 @@
 			 struct snd_kcontrol *kcontrol,
 			 int event);
 
+extern int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol);
+extern int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol);
+
 extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
 			      int source, unsigned int freq, int dir);
 
@@ -242,7 +261,6 @@
 	int id;
 	unsigned int base;
 	unsigned int vco_mult;
-	struct completion ok;
 
 	unsigned int fout;
 	int sync_src;
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 8f40025..44c30fe 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -74,33 +74,8 @@
 static bool cs35l32_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_DEVID_AB:
-	case CS35L32_DEVID_CD:
-	case CS35L32_DEVID_E:
-	case CS35L32_FAB_ID:
-	case CS35L32_REV_ID:
-	case CS35L32_PWRCTL1:
-	case CS35L32_PWRCTL2:
-	case CS35L32_CLK_CTL:
-	case CS35L32_BATT_THRESHOLD:
-	case CS35L32_VMON:
-	case CS35L32_BST_CPCP_CTL:
-	case CS35L32_IMON_SCALING:
-	case CS35L32_AUDIO_LED_MNGR:
-	case CS35L32_ADSP_CTL:
-	case CS35L32_CLASSD_CTL:
-	case CS35L32_PROTECT_CTL:
-	case CS35L32_INT_MASK_1:
-	case CS35L32_INT_MASK_2:
-	case CS35L32_INT_MASK_3:
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
-	case CS35L32_FLASH_MODE:
-	case CS35L32_MOVIE_MODE:
-	case CS35L32_FLASH_TIMER:
-	case CS35L32_FLASH_INHIBIT:
+	case CS35L32_DEVID_AB ... CS35L32_AUDIO_LED_MNGR:
+	case CS35L32_ADSP_CTL ... CS35L32_FLASH_INHIBIT:
 		return true;
 	default:
 		return false;
@@ -110,15 +85,8 @@
 static bool cs35l32_volatile_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_DEVID_AB:
-	case CS35L32_DEVID_CD:
-	case CS35L32_DEVID_E:
-	case CS35L32_FAB_ID:
-	case CS35L32_REV_ID:
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
+	case CS35L32_DEVID_AB ... CS35L32_REV_ID:
+	case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
 		return true;
 	default:
 		return false;
@@ -128,10 +96,7 @@
 static bool cs35l32_precious_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
+	case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
 		return true;
 	default:
 		return false;
@@ -276,7 +241,7 @@
 };
 
 /* Current and threshold powerup sequence Pg37 in datasheet */
-static const struct reg_default cs35l32_monitor_patch[] = {
+static const struct reg_sequence cs35l32_monitor_patch[] = {
 
 	{ 0x00, 0x99 },
 	{ 0x48, 0x17 },
@@ -441,8 +406,7 @@
 	if (IS_ERR(cs35l32->reset_gpio))
 		return PTR_ERR(cs35l32->reset_gpio);
 
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
 
 	/* initialize codec */
 	ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
@@ -536,8 +500,7 @@
 	snd_soc_unregister_codec(&i2c_client->dev);
 
 	/* Hold down reset */
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
 
 	return 0;
 }
@@ -551,8 +514,7 @@
 	regcache_mark_dirty(cs35l32->regmap);
 
 	/* Hold down reset */
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
 
 	/* remove power */
 	regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
@@ -575,8 +537,7 @@
 		return ret;
 	}
 
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
 
 	regcache_cache_only(cs35l32->regmap, false);
 	regcache_sync(cs35l32->regmap);
@@ -607,7 +568,6 @@
 static struct i2c_driver cs35l32_i2c_driver = {
 	.driver = {
 		   .name = "cs35l32",
-		   .owner = THIS_MODULE,
 		   .pm = &cs35l32_runtime_pm,
 		   .of_match_table = cs35l32_of_match,
 		   },
diff --git a/sound/soc/codecs/cs35l32.h b/sound/soc/codecs/cs35l32.h
index 31ab804..1d6c250 100644
--- a/sound/soc/codecs/cs35l32.h
+++ b/sound/soc/codecs/cs35l32.h
@@ -80,7 +80,7 @@
 #define CS35L32_GAIN_MGR_MASK		0x08
 #define CS35L32_ADSP_SHARE_MASK		0x08
 #define CS35L32_ADSP_DATACFG_MASK	0x30
-#define CS35L32_SDOUT_3ST		0x80
+#define CS35L32_SDOUT_3ST		0x08
 #define CS35L32_BATT_REC_MASK		0x0E
 #define CS35L32_BATT_THRESH_MASK	0x30
 
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 8e36198..55db19d 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,23 +60,7 @@
 static bool cs4265_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS4265_PWRCTL:
-	case CS4265_DAC_CTL:
-	case CS4265_ADC_CTL:
-	case CS4265_MCLK_FREQ:
-	case CS4265_SIG_SEL:
-	case CS4265_CHB_PGA_CTL:
-	case CS4265_CHA_PGA_CTL:
-	case CS4265_ADC_CTL2:
-	case CS4265_DAC_CHA_VOL:
-	case CS4265_DAC_CHB_VOL:
-	case CS4265_DAC_CTL2:
-	case CS4265_SPDIF_CTL1:
-	case CS4265_SPDIF_CTL2:
-	case CS4265_INT_MASK:
-	case CS4265_STATUS_MODE_MSB:
-	case CS4265_STATUS_MODE_LSB:
-	case CS4265_CHIP_ID:
+	case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
 		return true;
 	default:
 		return false;
@@ -658,7 +642,6 @@
 static struct i2c_driver cs4265_i2c_driver = {
 	.driver = {
 		.name = "cs4265",
-		.owner = THIS_MODULE,
 		.of_match_table = cs4265_of_match,
 	},
 	.id_table = cs4265_id,
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index e6d4ff9..e07807d 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -751,7 +751,6 @@
 static struct i2c_driver cs4270_i2c_driver = {
 	.driver = {
 		.name = "cs4270",
-		.owner = THIS_MODULE,
 		.of_match_table = cs4270_of_match,
 	},
 	.id_table = cs4270_id,
diff --git a/sound/soc/codecs/cs4271-i2c.c b/sound/soc/codecs/cs4271-i2c.c
index b264da0..dcb3223 100644
--- a/sound/soc/codecs/cs4271-i2c.c
+++ b/sound/soc/codecs/cs4271-i2c.c
@@ -48,7 +48,6 @@
 static struct i2c_driver cs4271_i2c_driver = {
 	.driver = {
 		.name = "cs4271",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(cs4271_dt_ids),
 	},
 	.probe = cs4271_i2c_probe,
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index c40428f..9bad478 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -45,7 +45,6 @@
 static struct i2c_driver cs42l51_i2c_driver = {
 	.driver = {
 		.name = "cs42l51",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l51_of_match,
 	},
 	.probe = cs42l51_i2c_probe,
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 4de52c9..47b97fc 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -110,58 +110,7 @@
 static bool cs42l52_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L52_CHIP:
-	case CS42L52_PWRCTL1:
-	case CS42L52_PWRCTL2:
-	case CS42L52_PWRCTL3:
-	case CS42L52_CLK_CTL:
-	case CS42L52_IFACE_CTL1:
-	case CS42L52_IFACE_CTL2:
-	case CS42L52_ADC_PGA_A:
-	case CS42L52_ADC_PGA_B:
-	case CS42L52_ANALOG_HPF_CTL:
-	case CS42L52_ADC_HPF_FREQ:
-	case CS42L52_ADC_MISC_CTL:
-	case CS42L52_PB_CTL1:
-	case CS42L52_MISC_CTL:
-	case CS42L52_PB_CTL2:
-	case CS42L52_MICA_CTL:
-	case CS42L52_MICB_CTL:
-	case CS42L52_PGAA_CTL:
-	case CS42L52_PGAB_CTL:
-	case CS42L52_PASSTHRUA_VOL:
-	case CS42L52_PASSTHRUB_VOL:
-	case CS42L52_ADCA_VOL:
-	case CS42L52_ADCB_VOL:
-	case CS42L52_ADCA_MIXER_VOL:
-	case CS42L52_ADCB_MIXER_VOL:
-	case CS42L52_PCMA_MIXER_VOL:
-	case CS42L52_PCMB_MIXER_VOL:
-	case CS42L52_BEEP_FREQ:
-	case CS42L52_BEEP_VOL:
-	case CS42L52_BEEP_TONE_CTL:
-	case CS42L52_TONE_CTL:
-	case CS42L52_MASTERA_VOL:
-	case CS42L52_MASTERB_VOL:
-	case CS42L52_HPA_VOL:
-	case CS42L52_HPB_VOL:
-	case CS42L52_SPKA_VOL:
-	case CS42L52_SPKB_VOL:
-	case CS42L52_ADC_PCM_MIXER:
-	case CS42L52_LIMITER_CTL1:
-	case CS42L52_LIMITER_CTL2:
-	case CS42L52_LIMITER_AT_RATE:
-	case CS42L52_ALC_CTL:
-	case CS42L52_ALC_RATE:
-	case CS42L52_ALC_THRESHOLD:
-	case CS42L52_NOISE_GATE_CTL:
-	case CS42L52_CLK_STATUS:
-	case CS42L52_BATT_COMPEN:
-	case CS42L52_BATT_LEVEL:
-	case CS42L52_SPK_STATUS:
-	case CS42L52_TEM_CTL:
-	case CS42L52_THE_FOLDBACK:
-	case CS42L52_CHARGE_PUMP:
+	case CS42L52_CHIP ... CS42L52_CHARGE_PUMP:
 		return true;
 	default:
 		return false;
@@ -196,11 +145,10 @@
 
 static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
 
-static const unsigned int limiter_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(limiter_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const char * const cs42l52_adca_text[] = {
 	"Input1A", "Input2A", "Input3A", "Input4A", "PGA Input Left"};
@@ -919,7 +867,7 @@
 			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_U20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE)
 
-static struct snd_soc_dai_ops cs42l52_ops = {
+static const struct snd_soc_dai_ops cs42l52_ops = {
 	.hw_params	= cs42l52_pcm_hw_params,
 	.digital_mute	= cs42l52_digital_mute,
 	.set_fmt	= cs42l52_set_fmt,
@@ -1118,7 +1066,7 @@
 };
 
 /* Current and threshold powerup sequence Pg37 */
-static const struct reg_default cs42l52_threshold_patch[] = {
+static const struct reg_sequence cs42l52_threshold_patch[] = {
 
 	{ 0x00, 0x99 },
 	{ 0x3E, 0xBA },
@@ -1285,7 +1233,6 @@
 static struct i2c_driver cs42l52_i2c_driver = {
 	.driver = {
 		.name = "cs42l52",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l52_of_match,
 	},
 	.id_table = cs42l52_id,
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 1e11ba4..7cd5f76 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -115,52 +115,7 @@
 static bool cs42l56_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L56_CHIP_ID_1:
-	case CS42L56_CHIP_ID_2:
-	case CS42L56_PWRCTL_1:
-	case CS42L56_PWRCTL_2:
-	case CS42L56_CLKCTL_1:
-	case CS42L56_CLKCTL_2:
-	case CS42L56_SERIAL_FMT:
-	case CS42L56_CLASSH_CTL:
-	case CS42L56_MISC_CTL:
-	case CS42L56_INT_STATUS:
-	case CS42L56_PLAYBACK_CTL:
-	case CS42L56_DSP_MUTE_CTL:
-	case CS42L56_ADCA_MIX_VOLUME:
-	case CS42L56_ADCB_MIX_VOLUME:
-	case CS42L56_PCMA_MIX_VOLUME:
-	case CS42L56_PCMB_MIX_VOLUME:
-	case CS42L56_ANAINPUT_ADV_VOLUME:
-	case CS42L56_DIGINPUT_ADV_VOLUME:
-	case CS42L56_MASTER_A_VOLUME:
-	case CS42L56_MASTER_B_VOLUME:
-	case CS42L56_BEEP_FREQ_ONTIME:
-	case CS42L56_BEEP_FREQ_OFFTIME:
-	case CS42L56_BEEP_TONE_CFG:
-	case CS42L56_TONE_CTL:
-	case CS42L56_CHAN_MIX_SWAP:
-	case CS42L56_AIN_REFCFG_ADC_MUX:
-	case CS42L56_HPF_CTL:
-	case CS42L56_MISC_ADC_CTL:
-	case CS42L56_GAIN_BIAS_CTL:
-	case CS42L56_PGAA_MUX_VOLUME:
-	case CS42L56_PGAB_MUX_VOLUME:
-	case CS42L56_ADCA_ATTENUATOR:
-	case CS42L56_ADCB_ATTENUATOR:
-	case CS42L56_ALC_EN_ATTACK_RATE:
-	case CS42L56_ALC_RELEASE_RATE:
-	case CS42L56_ALC_THRESHOLD:
-	case CS42L56_NOISE_GATE_CTL:
-	case CS42L56_ALC_LIM_SFT_ZC:
-	case CS42L56_AMUTE_HPLO_MUX:
-	case CS42L56_HPA_VOLUME:
-	case CS42L56_HPB_VOLUME:
-	case CS42L56_LOA_VOLUME:
-	case CS42L56_LOB_VOLUME:
-	case CS42L56_LIM_THRESHOLD_CTL:
-	case CS42L56_LIM_CTL_RELEASE_RATE:
-	case CS42L56_LIM_ATTACK_RATE:
+	case CS42L56_CHIP_ID_1 ... CS42L56_LIM_ATTACK_RATE:
 		return true;
 	default:
 		return false;
@@ -185,21 +140,18 @@
 static DECLARE_TLV_DB_SCALE(preamp_tlv, 0, 1000, 0);
 static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
 
-static const unsigned int ngnb_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(ngnb_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-8200, 600, 0),
-	2, 5, TLV_DB_SCALE_ITEM(-7600, 300, 0),
-};
-static const unsigned int ngb_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	2, 5, TLV_DB_SCALE_ITEM(-7600, 300, 0)
+);
+static const DECLARE_TLV_DB_RANGE(ngb_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-6400, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-4600, 300, 0),
-};
-static const unsigned int alc_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	3, 7, TLV_DB_SCALE_ITEM(-4600, 300, 0)
+);
+static const DECLARE_TLV_DB_RANGE(alc_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const char * const beep_config_text[] = {
 	"Off", "Single", "Multiple", "Continuous"
@@ -989,7 +941,7 @@
 			SNDRV_PCM_FMTBIT_S32_LE)
 
 
-static struct snd_soc_dai_ops cs42l56_ops = {
+static const struct snd_soc_dai_ops cs42l56_ops = {
 	.hw_params	= cs42l56_pcm_hw_params,
 	.digital_mute	= cs42l56_digital_mute,
 	.set_fmt	= cs42l56_set_dai_fmt,
@@ -1408,7 +1360,6 @@
 static struct i2c_driver cs42l56_i2c_driver = {
 	.driver = {
 		.name = "cs42l56",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l56_of_match,
 	},
 	.id_table = cs42l56_id,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index b7853b9..42a8fd4 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -153,111 +153,18 @@
 static bool cs42l73_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L73_DEVID_AB:
-	case CS42L73_DEVID_CD:
-	case CS42L73_DEVID_E:
-	case CS42L73_REVID:
-	case CS42L73_PWRCTL1:
-	case CS42L73_PWRCTL2:
-	case CS42L73_PWRCTL3:
-	case CS42L73_CPFCHC:
-	case CS42L73_OLMBMSDC:
-	case CS42L73_DMMCC:
-	case CS42L73_XSPC:
-	case CS42L73_XSPMMCC:
-	case CS42L73_ASPC:
-	case CS42L73_ASPMMCC:
-	case CS42L73_VSPC:
-	case CS42L73_VSPMMCC:
-	case CS42L73_VXSPFS:
-	case CS42L73_MIOPC:
-	case CS42L73_ADCIPC:
-	case CS42L73_MICAPREPGAAVOL:
-	case CS42L73_MICBPREPGABVOL:
-	case CS42L73_IPADVOL:
-	case CS42L73_IPBDVOL:
-	case CS42L73_PBDC:
-	case CS42L73_HLADVOL:
-	case CS42L73_HLBDVOL:
-	case CS42L73_SPKDVOL:
-	case CS42L73_ESLDVOL:
-	case CS42L73_HPAAVOL:
-	case CS42L73_HPBAVOL:
-	case CS42L73_LOAAVOL:
-	case CS42L73_LOBAVOL:
-	case CS42L73_STRINV:
-	case CS42L73_XSPINV:
-	case CS42L73_ASPINV:
-	case CS42L73_VSPINV:
-	case CS42L73_LIMARATEHL:
-	case CS42L73_LIMRRATEHL:
-	case CS42L73_LMAXHL:
-	case CS42L73_LIMARATESPK:
-	case CS42L73_LIMRRATESPK:
-	case CS42L73_LMAXSPK:
-	case CS42L73_LIMARATEESL:
-	case CS42L73_LIMRRATEESL:
-	case CS42L73_LMAXESL:
-	case CS42L73_ALCARATE:
-	case CS42L73_ALCRRATE:
-	case CS42L73_ALCMINMAX:
-	case CS42L73_NGCAB:
-	case CS42L73_ALCNGMC:
-	case CS42L73_MIXERCTL:
-	case CS42L73_HLAIPAA:
-	case CS42L73_HLBIPBA:
-	case CS42L73_HLAXSPAA:
-	case CS42L73_HLBXSPBA:
-	case CS42L73_HLAASPAA:
-	case CS42L73_HLBASPBA:
-	case CS42L73_HLAVSPMA:
-	case CS42L73_HLBVSPMA:
-	case CS42L73_XSPAIPAA:
-	case CS42L73_XSPBIPBA:
-	case CS42L73_XSPAXSPAA:
-	case CS42L73_XSPBXSPBA:
-	case CS42L73_XSPAASPAA:
-	case CS42L73_XSPAASPBA:
-	case CS42L73_XSPAVSPMA:
-	case CS42L73_XSPBVSPMA:
-	case CS42L73_ASPAIPAA:
-	case CS42L73_ASPBIPBA:
-	case CS42L73_ASPAXSPAA:
-	case CS42L73_ASPBXSPBA:
-	case CS42L73_ASPAASPAA:
-	case CS42L73_ASPBASPBA:
-	case CS42L73_ASPAVSPMA:
-	case CS42L73_ASPBVSPMA:
-	case CS42L73_VSPAIPAA:
-	case CS42L73_VSPBIPBA:
-	case CS42L73_VSPAXSPAA:
-	case CS42L73_VSPBXSPBA:
-	case CS42L73_VSPAASPAA:
-	case CS42L73_VSPBASPBA:
-	case CS42L73_VSPAVSPMA:
-	case CS42L73_VSPBVSPMA:
-	case CS42L73_MMIXCTL:
-	case CS42L73_SPKMIPMA:
-	case CS42L73_SPKMXSPA:
-	case CS42L73_SPKMASPA:
-	case CS42L73_SPKMVSPMA:
-	case CS42L73_ESLMIPMA:
-	case CS42L73_ESLMXSPA:
-	case CS42L73_ESLMASPA:
-	case CS42L73_ESLMVSPMA:
-	case CS42L73_IM1:
-	case CS42L73_IM2:
+	case CS42L73_DEVID_AB ... CS42L73_DEVID_E:
+	case CS42L73_REVID ... CS42L73_IM2:
 		return true;
 	default:
 		return false;
 	}
 }
 
-static const unsigned int hpaloa_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hpaloa_tlv,
 	0, 13, TLV_DB_SCALE_ITEM(-7600, 200, 0),
-	14, 75, TLV_DB_SCALE_ITEM(-4900, 100, 0),
-};
+	14, 75, TLV_DB_SCALE_ITEM(-4900, 100, 0)
+);
 
 static DECLARE_TLV_DB_SCALE(adc_boost_tlv, 0, 2500, 0);
 
@@ -267,11 +174,10 @@
 
 static DECLARE_TLV_DB_SCALE(micpga_tlv, -600, 50, 0);
 
-static const unsigned int limiter_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(limiter_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(attn_tlv, -6300, 100, 1);
 
@@ -1236,8 +1142,8 @@
 	struct snd_soc_codec *codec = dai->codec;
 	int id = dai->id;
 
-	return snd_soc_update_bits(codec, CS42L73_SPC(id),
-					0x7F, tristate << 7);
+	return snd_soc_update_bits(codec, CS42L73_SPC(id), CS42L73_SP_3ST,
+				   tristate << 7);
 }
 
 static const struct snd_pcm_hw_constraint_list constraints_12_24 = {
@@ -1491,7 +1397,6 @@
 static struct i2c_driver cs42l73_i2c_driver = {
 	.driver = {
 		   .name = "cs42l73",
-		   .owner = THIS_MODULE,
 		   .of_match_table = cs42l73_of_match,
 		   },
 	.id_table = cs42l73_id,
diff --git a/sound/soc/codecs/cs42xx8-i2c.c b/sound/soc/codecs/cs42xx8-i2c.c
index 657dce2..800c1d5 100644
--- a/sound/soc/codecs/cs42xx8-i2c.c
+++ b/sound/soc/codecs/cs42xx8-i2c.c
@@ -20,7 +20,7 @@
 static int cs42xx8_i2c_probe(struct i2c_client *i2c,
 			     const struct i2c_device_id *id)
 {
-	u32 ret = cs42xx8_probe(&i2c->dev,
+	int ret = cs42xx8_probe(&i2c->dev,
 			devm_regmap_init_i2c(i2c, &cs42xx8_regmap_config));
 	if (ret)
 		return ret;
@@ -49,8 +49,8 @@
 static struct i2c_driver cs42xx8_i2c_driver = {
 	.driver = {
 		.name = "cs42xx8",
-		.owner = THIS_MODULE,
 		.pm = &cs42xx8_pm,
+		.of_match_table = cs42xx8_of_match,
 	},
 	.probe = cs42xx8_i2c_probe,
 	.remove = cs42xx8_i2c_remove,
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index e1d4686..d562e1b 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -425,7 +425,7 @@
 };
 EXPORT_SYMBOL_GPL(cs42888_data);
 
-static const struct of_device_id cs42xx8_of_match[] = {
+const struct of_device_id cs42xx8_of_match[] = {
 	{ .compatible = "cirrus,cs42448", .data = &cs42448_data, },
 	{ .compatible = "cirrus,cs42888", .data = &cs42888_data, },
 	{ /* sentinel */ }
@@ -435,16 +435,24 @@
 
 int cs42xx8_probe(struct device *dev, struct regmap *regmap)
 {
-	const struct of_device_id *of_id = of_match_device(cs42xx8_of_match, dev);
+	const struct of_device_id *of_id;
 	struct cs42xx8_priv *cs42xx8;
 	int ret, val, i;
 
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		dev_err(dev, "failed to allocate regmap: %d\n", ret);
+		return ret;
+	}
+
 	cs42xx8 = devm_kzalloc(dev, sizeof(*cs42xx8), GFP_KERNEL);
 	if (cs42xx8 == NULL)
 		return -ENOMEM;
 
+	cs42xx8->regmap = regmap;
 	dev_set_drvdata(dev, cs42xx8);
 
+	of_id = of_match_device(cs42xx8_of_match, dev);
 	if (of_id)
 		cs42xx8->drvdata = of_id->data;
 
@@ -482,13 +490,6 @@
 	/* Make sure hardware reset done */
 	msleep(5);
 
-	cs42xx8->regmap = regmap;
-	if (IS_ERR(cs42xx8->regmap)) {
-		ret = PTR_ERR(cs42xx8->regmap);
-		dev_err(dev, "failed to allocate regmap: %d\n", ret);
-		goto err_enable;
-	}
-
 	/*
 	 * We haven't marked the chip revision as volatile due to
 	 * sharing a register with the right input volume; explicitly
diff --git a/sound/soc/codecs/cs42xx8.h b/sound/soc/codecs/cs42xx8.h
index b2c10e5..d36c61b 100644
--- a/sound/soc/codecs/cs42xx8.h
+++ b/sound/soc/codecs/cs42xx8.h
@@ -22,6 +22,7 @@
 extern const struct cs42xx8_driver_data cs42448_data;
 extern const struct cs42xx8_driver_data cs42888_data;
 extern const struct regmap_config cs42xx8_regmap_config;
+extern const struct of_device_id cs42xx8_of_match[];
 int cs42xx8_probe(struct device *dev, struct regmap *regmap);
 
 /* CS42888 register map */
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
new file mode 100644
index 0000000..0ac8fc5
--- /dev/null
+++ b/sound/soc/codecs/cs4349.c
@@ -0,0 +1,392 @@
+/*
+ * cs4349.c  --  CS4349 ALSA Soc Audio driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Authors: Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include "cs4349.h"
+
+
+static const struct reg_default cs4349_reg_defaults[] = {
+	{ 2, 0x00 },	/* r02	- Mode Control */
+	{ 3, 0x09 },	/* r03	- Volume, Mixing and Inversion Control */
+	{ 4, 0x81 },	/* r04	- Mute Control */
+	{ 5, 0x00 },	/* r05	- Channel A Volume Control */
+	{ 6, 0x00 },	/* r06	- Channel B Volume Control */
+	{ 7, 0xB1 },	/* r07	- Ramp and Filter Control */
+	{ 8, 0x1C },	/* r08	- Misc. Control */
+};
+
+/* Private data for the CS4349 */
+struct  cs4349_private {
+	struct regmap			*regmap;
+	struct gpio_desc		*reset_gpio;
+	unsigned int			mode;
+	int				rate;
+};
+
+static bool cs4349_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS4349_CHIPID ... CS4349_MISC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool cs4349_writeable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS4349_MODE ...  CS4349_MISC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int cs4349_set_dai_fmt(struct snd_soc_dai *codec_dai,
+			      unsigned int format)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
+	unsigned int fmt;
+
+	fmt = format & SND_SOC_DAIFMT_FORMAT_MASK;
+
+	switch (fmt) {
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+	case SND_SOC_DAIFMT_RIGHT_J:
+		cs4349->mode = format & SND_SOC_DAIFMT_FORMAT_MASK;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cs4349_pcm_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
+	int fmt, ret;
+
+	cs4349->rate = params_rate(params);
+
+	switch (cs4349->mode) {
+	case SND_SOC_DAIFMT_I2S:
+		fmt = DIF_I2S;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		fmt = DIF_LEFT_JST;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		switch (params_width(params)) {
+		case 16:
+			fmt = DIF_RGHT_JST16;
+			break;
+		case 24:
+			fmt = DIF_RGHT_JST24;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_update_bits(codec, CS4349_MODE, DIF_MASK,
+				  MODE_FORMAT(fmt));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int cs4349_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int reg;
+
+	reg = 0;
+	if (mute)
+		reg = MUTE_AB_MASK;
+
+	return snd_soc_update_bits(codec, CS4349_MUTE, MUTE_AB_MASK, reg);
+}
+
+static DECLARE_TLV_DB_SCALE(dig_tlv, -12750, 50, 0);
+
+static const char * const chan_mix_texts[] = {
+	"Mute", "MuteA", "MuteA SwapB", "MuteA MonoB", "SwapA MuteB",
+	"BothR", "Swap", "SwapA MonoB", "MuteB", "Normal", "BothL",
+	"MonoB", "MonoA MuteB", "MonoA", "MonoA SwapB", "Mono",
+	/*Normal == Channel A = Left, Channel B = Right*/
+};
+
+static const char * const fm_texts[] = {
+	"Auto", "Single", "Double", "Quad",
+};
+
+static const char * const deemph_texts[] = {
+	"None", "44.1k", "48k", "32k",
+};
+
+static const char * const softr_zeroc_texts[] = {
+	"Immediate", "Zero Cross", "Soft Ramp", "SR on ZC",
+};
+
+static int deemph_values[] = {
+	0, 4, 8, 12,
+};
+
+static int softr_zeroc_values[] = {
+	0, 64, 128, 192,
+};
+
+static const struct soc_enum chan_mix_enum =
+	SOC_ENUM_SINGLE(CS4349_VMI, 0,
+			ARRAY_SIZE(chan_mix_texts),
+			chan_mix_texts);
+
+static const struct soc_enum fm_mode_enum =
+	SOC_ENUM_SINGLE(CS4349_MODE, 0,
+			ARRAY_SIZE(fm_texts),
+			fm_texts);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(deemph_enum, CS4349_MODE, 0, DEM_MASK,
+				deemph_texts, deemph_values);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(softr_zeroc_enum, CS4349_RMPFLT, 0,
+				SR_ZC_MASK, softr_zeroc_texts,
+				softr_zeroc_values);
+
+static const struct snd_kcontrol_new cs4349_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("Master Playback Volume",
+			 CS4349_VOLA, CS4349_VOLB, 0, 0xFF, 1, dig_tlv),
+	SOC_ENUM("Functional Mode", fm_mode_enum),
+	SOC_ENUM("De-Emphasis Control", deemph_enum),
+	SOC_ENUM("Soft Ramp Zero Cross Control", softr_zeroc_enum),
+	SOC_ENUM("Channel Mixer", chan_mix_enum),
+	SOC_SINGLE("VolA = VolB Switch", CS4349_VMI, 7, 1, 0),
+	SOC_SINGLE("InvertA Switch", CS4349_VMI, 6, 1, 0),
+	SOC_SINGLE("InvertB Switch", CS4349_VMI, 5, 1, 0),
+	SOC_SINGLE("Auto-Mute Switch", CS4349_MUTE, 7, 1, 0),
+	SOC_SINGLE("MUTEC A = B Switch", CS4349_MUTE, 5, 1, 0),
+	SOC_SINGLE("Soft Ramp Up Switch", CS4349_RMPFLT, 5, 1, 0),
+	SOC_SINGLE("Soft Ramp Down Switch", CS4349_RMPFLT, 4, 1, 0),
+	SOC_SINGLE("Slow Roll Off Filter Switch", CS4349_RMPFLT, 2, 1, 0),
+	SOC_SINGLE("Freeze Switch", CS4349_MISC, 5, 1, 0),
+	SOC_SINGLE("Popguard Switch", CS4349_MISC, 4, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget cs4349_dapm_widgets[] = {
+	SND_SOC_DAPM_DAC("HiFi DAC", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_OUTPUT("OutputA"),
+	SND_SOC_DAPM_OUTPUT("OutputB"),
+};
+
+static const struct snd_soc_dapm_route cs4349_routes[] = {
+	{"DAC Playback", NULL, "OutputA"},
+	{"DAC Playback", NULL, "OutputB"},
+
+	{"OutputA", NULL, "HiFi DAC"},
+	{"OutputB", NULL, "HiFi DAC"},
+};
+
+#define CS4349_PCM_FORMATS (SNDRV_PCM_FMTBIT_S8  | \
+			SNDRV_PCM_FMTBIT_S16_LE  | SNDRV_PCM_FMTBIT_S16_BE  | \
+			SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE | \
+			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE | \
+			SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE | \
+			SNDRV_PCM_FMTBIT_S24_LE  | SNDRV_PCM_FMTBIT_S24_BE  | \
+			SNDRV_PCM_FMTBIT_S32_LE)
+
+#define CS4349_PCM_RATES SNDRV_PCM_RATE_8000_192000
+
+static const struct snd_soc_dai_ops cs4349_dai_ops = {
+	.hw_params	= cs4349_pcm_hw_params,
+	.set_fmt	= cs4349_set_dai_fmt,
+	.digital_mute	= cs4349_digital_mute,
+};
+
+static struct snd_soc_dai_driver cs4349_dai = {
+	.name = "cs4349_hifi",
+	.playback = {
+		.stream_name	= "DAC Playback",
+		.channels_min	= 1,
+		.channels_max	= 2,
+		.rates		= CS4349_PCM_RATES,
+		.formats	= CS4349_PCM_FORMATS,
+	},
+	.ops = &cs4349_dai_ops,
+	.symmetric_rates = 1,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_cs4349 = {
+	.controls		= cs4349_snd_controls,
+	.num_controls		= ARRAY_SIZE(cs4349_snd_controls),
+
+	.dapm_widgets		= cs4349_dapm_widgets,
+	.num_dapm_widgets	= ARRAY_SIZE(cs4349_dapm_widgets),
+	.dapm_routes		= cs4349_routes,
+	.num_dapm_routes	= ARRAY_SIZE(cs4349_routes),
+};
+
+static const struct regmap_config cs4349_regmap = {
+	.reg_bits		= 8,
+	.val_bits		= 8,
+
+	.max_register		= CS4349_MISC,
+	.reg_defaults		= cs4349_reg_defaults,
+	.num_reg_defaults	= ARRAY_SIZE(cs4349_reg_defaults),
+	.readable_reg		= cs4349_readable_register,
+	.writeable_reg		= cs4349_writeable_register,
+	.cache_type		= REGCACHE_RBTREE,
+};
+
+static int cs4349_i2c_probe(struct i2c_client *client,
+				      const struct i2c_device_id *id)
+{
+	struct cs4349_private *cs4349;
+	int ret;
+
+	cs4349 = devm_kzalloc(&client->dev, sizeof(*cs4349), GFP_KERNEL);
+	if (!cs4349)
+		return -ENOMEM;
+
+	cs4349->regmap = devm_regmap_init_i2c(client, &cs4349_regmap);
+	if (IS_ERR(cs4349->regmap)) {
+		ret = PTR_ERR(cs4349->regmap);
+		dev_err(&client->dev, "regmap_init() failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Reset the Device */
+	cs4349->reset_gpio = devm_gpiod_get_optional(&client->dev,
+		"reset", GPIOD_OUT_LOW);
+	if (IS_ERR(cs4349->reset_gpio))
+		return PTR_ERR(cs4349->reset_gpio);
+
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
+
+	i2c_set_clientdata(client, cs4349);
+
+	return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4349,
+		&cs4349_dai, 1);
+}
+
+static int cs4349_i2c_remove(struct i2c_client *client)
+{
+	struct cs4349_private *cs4349 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+
+	/* Hold down reset */
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs4349_runtime_suspend(struct device *dev)
+{
+	struct cs4349_private *cs4349 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, PWR_DWN);
+	if (ret < 0)
+		return ret;
+
+	regcache_cache_only(cs4349->regmap, true);
+
+	/* Hold down reset */
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
+
+	return 0;
+}
+
+static int cs4349_runtime_resume(struct device *dev)
+{
+	struct cs4349_private *cs4349 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, 0);
+	if (ret < 0)
+		return ret;
+
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
+
+	regcache_cache_only(cs4349->regmap, false);
+	regcache_sync(cs4349->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs4349_runtime_pm = {
+	SET_RUNTIME_PM_OPS(cs4349_runtime_suspend, cs4349_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id cs4349_of_match[] = {
+	{ .compatible = "cirrus,cs4349", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, cs4349_of_match);
+
+static const struct i2c_device_id cs4349_i2c_id[] = {
+	{"cs4349", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs4349_i2c_id);
+
+static struct i2c_driver cs4349_i2c_driver = {
+	.driver = {
+		.name		= "cs4349",
+		.of_match_table	= cs4349_of_match,
+	},
+	.id_table	= cs4349_i2c_id,
+	.probe		= cs4349_i2c_probe,
+	.remove		= cs4349_i2c_remove,
+};
+
+module_i2c_driver(cs4349_i2c_driver);
+
+MODULE_AUTHOR("Tim Howe <tim.howe@cirrus.com>");
+MODULE_DESCRIPTION("Cirrus Logic CS4349 ALSA SoC Codec Driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4349.h b/sound/soc/codecs/cs4349.h
new file mode 100644
index 0000000..d58c06a
--- /dev/null
+++ b/sound/soc/codecs/cs4349.h
@@ -0,0 +1,136 @@
+/*
+ * ALSA SoC CS4349 codec driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Author: Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __CS4349_H__
+#define __CS4349_H__
+
+/* CS4349 registers addresses */
+#define CS4349_CHIPID		0x01	/* Device and Rev ID, Read Only */
+#define CS4349_MODE		0x02	/* Mode Control */
+#define CS4349_VMI		0x03	/* Volume, Mixing, Inversion Control */
+#define CS4349_MUTE		0x04	/* Mute Control */
+#define CS4349_VOLA		0x05	/* DAC Channel A Volume Control */
+#define CS4349_VOLB		0x06	/* DAC Channel B Volume Control */
+#define CS4349_RMPFLT		0x07	/* Ramp and Filter Control */
+#define CS4349_MISC		0x08	/* Power Down,Freeze Control,Pop Stop*/
+
+#define CS4349_I2C_INCR		0x80
+
+
+/* Device and Revision ID */
+#define CS4349_REVA		0xF0	/* Rev A */
+#define CS4349_REVB		0xF1	/* Rev B */
+#define CS4349_REVC2		0xFF	/* Rev C2 */
+
+
+/* PDN_DONE Poll Maximum
+ * If soft ramp is set it will take much longer to power down
+ * the system.
+ */
+#define PDN_POLL_MAX		900
+
+
+/* Bitfield Definitions */
+
+/* CS4349_MODE */
+/* (Digital Interface Format, De-Emphasis Control, Functional Mode */
+#define DIF2			(1 << 6)
+#define DIF1			(1 << 5)
+#define DIF0			(1 << 4)
+#define DEM1			(1 << 3)
+#define DEM0			(1 << 2)
+#define FM1			(1 << 1)
+#define DIF_LEFT_JST		0x00
+#define DIF_I2S			0x01
+#define DIF_RGHT_JST16		0x02
+#define DIF_RGHT_JST24		0x03
+#define DIF_TDM0		0x04
+#define DIF_TDM1		0x05
+#define DIF_TDM2		0x06
+#define DIF_TDM3		0x07
+#define DIF_MASK		0x70
+#define MODE_FORMAT(x)		(((x)&7)<<4)
+#define DEM_MASK		0x0C
+#define NO_DEM			0x00
+#define DEM_441			0x04
+#define DEM_48K			0x08
+#define DEM_32K			0x0C
+#define FM_AUTO			0x00
+#define FM_SNGL			0x01
+#define FM_DBL			0x02
+#define FM_QUAD			0x03
+#define FM_SNGL_MIN		30000
+#define FM_SNGL_MAX		54000
+#define FM_DBL_MAX		108000
+#define FM_QUAD_MAX		216000
+#define FM_MASK			0x03
+
+/* CS4349_VMI (VMI = Volume, Mixing and Inversion Controls) */
+#define VOLBISA			(1 << 7)
+#define VOLAISB			(1 << 7)
+/* INVERT_A only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
+#define INVERT_A		(1 << 6)
+/* INVERT_B only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
+#define INVERT_B		(1 << 5)
+#define ATAPI3			(1 << 3)
+#define ATAPI2			(1 << 2)
+#define ATAPI1			(1 << 1)
+#define ATAPI0			(1 << 0)
+#define MUTEAB			0x00
+#define MUTEA_RIGHTB		0x01
+#define MUTEA_LEFTB		0x02
+#define MUTEA_SUMLRDIV2B	0x03
+#define RIGHTA_MUTEB		0x04
+#define RIGHTA_RIGHTB		0x05
+#define RIGHTA_LEFTB		0x06
+#define RIGHTA_SUMLRDIV2B	0x07
+#define LEFTA_MUTEB		0x08
+#define LEFTA_RIGHTB		0x09	/* Default */
+#define LEFTA_LEFTB		0x0A
+#define LEFTA_SUMLRDIV2B	0x0B
+#define SUMLRDIV2A_MUTEB	0x0C
+#define SUMLRDIV2A_RIGHTB	0x0D
+#define SUMLRDIV2A_LEFTB	0x0E
+#define SUMLRDIV2_AB		0x0F
+#define CHMIX_MASK		0x0F
+
+/* CS4349_MUTE */
+#define AUTOMUTE		(1 << 7)
+#define MUTEC_AB		(1 << 5)
+#define MUTE_A			(1 << 4)
+#define MUTE_B			(1 << 3)
+#define MUTE_AB_MASK		0x18
+
+/* CS4349_RMPFLT (Ramp and Filter Control) */
+#define SCZ1			(1 << 7)
+#define SCZ0			(1 << 6)
+#define RMP_UP			(1 << 5)
+#define RMP_DN			(1 << 4)
+#define FILT_SEL		(1 << 2)
+#define IMMDT_CHNG		0x31
+#define ZEROCRSS		0x71
+#define SOFT_RMP		0xB1
+#define SFTRMP_ZEROCRSS		0xF1
+#define SR_ZC_MASK		0xC0
+
+/* CS4349_MISC */
+#define PWR_DWN			(1 << 7)
+#define FREEZE			(1 << 5)
+#define POPG_EN			(1 << 4)
+
+#endif	/* __CS4349_H__ */
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 21810e5..7dc52fe 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -267,33 +267,29 @@
  *
  * Reserved area are considered as "mute".
  */
-static const unsigned int hp_out_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hp_out_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -54 dB to +15 dB */
-	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0),
-};
+	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
+);
 
-static const unsigned int lineout_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(lineout_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -54dB to 15dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
-};
+);
 
-static const unsigned int mono_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(mono_vol_tlv,
 	0x0, 0x2, TLV_DB_SCALE_ITEM(-1800, 0, 1),
 	/* -18dB to 6dB */
 	0x3, 0x7, TLV_DB_SCALE_ITEM(-1800, 600, 0)
-};
+);
 
-static const unsigned int aux1_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux1_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -48dB to 21dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-4800, 150, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(eq_gain_tlv, -1050, 150, 0);
 static const DECLARE_TLV_DB_SCALE(adc_eq_master_gain_tlv, -1800, 600, 1);
@@ -680,7 +676,7 @@
 	int master;
 };
 
-static struct reg_default da7210_reg_defaults[] = {
+static const struct reg_default da7210_reg_defaults[] = {
 	{ 0x00, 0x00 },
 	{ 0x01, 0x11 },
 	{ 0x03, 0x00 },
@@ -1182,7 +1178,7 @@
 
 #if IS_ENABLED(CONFIG_I2C)
 
-static struct reg_default da7210_regmap_i2c_patch[] = {
+static const struct reg_sequence da7210_regmap_i2c_patch[] = {
 
 	/* System controller master disable */
 	{ DA7210_STARTUP1, 0x00 },
@@ -1259,7 +1255,6 @@
 static struct i2c_driver da7210_i2c_driver = {
 	.driver = {
 		.name = "da7210",
-		.owner = THIS_MODULE,
 	},
 	.probe		= da7210_i2c_probe,
 	.remove		= da7210_i2c_remove,
@@ -1269,7 +1264,7 @@
 
 #if defined(CONFIG_SPI_MASTER)
 
-static struct reg_default da7210_regmap_spi_patch[] = {
+static const struct reg_sequence da7210_regmap_spi_patch[] = {
 	/* Dummy read to give two pulses over nCS for SPI */
 	{ DA7210_AUX2, 0x00 },
 	{ DA7210_AUX2, 0x00 },
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 238e48a..a9c86ef 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -28,27 +28,24 @@
 
 
 /* Gain and Volume */
-static const unsigned int aux_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_vol_tlv,
 	/* -54dB */
 	0x0, 0x11, TLV_DB_SCALE_ITEM(-5400, 0, 0),
 	/* -52.5dB to 15dB */
 	0x12, 0x3f, TLV_DB_SCALE_ITEM(-5250, 150, 0)
-};
+);
 
-static const unsigned int digital_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(digital_gain_tlv,
 	0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -78dB to 12dB */
 	0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0)
-};
+);
 
-static const unsigned int alc_analog_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv,
 	0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* 0dB to 36dB */
 	0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0);
@@ -954,7 +951,7 @@
 	{"LINE", NULL, "Lineout PGA"},
 };
 
-static struct reg_default da7213_reg_defaults[] = {
+static const struct reg_default da7213_reg_defaults[] = {
 	{ DA7213_DIG_ROUTING_DAI, 0x10 },
 	{ DA7213_SR, 0x0A },
 	{ DA7213_REFERENCES, 0x80 },
@@ -1585,7 +1582,6 @@
 static struct i2c_driver da7213_i2c_driver = {
 	.driver = {
 		.name = "da7213",
-		.owner = THIS_MODULE,
 	},
 	.probe		= da7213_i2c_probe,
 	.remove		= da7213_remove,
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 2075236..1d5a89c 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -43,7 +43,7 @@
 /*
  * da732x register cache - default settings
  */
-static struct reg_default da732x_reg_cache[] = {
+static const struct reg_default da732x_reg_cache[] = {
 	{ DA732X_REG_REF1		, 0x02 },
 	{ DA732X_REG_BIAS_EN		, 0x80 },
 	{ DA732X_REG_BIAS1		, 0x00 },
@@ -1196,13 +1196,7 @@
 #define	DA732X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops da732x_dai1_ops = {
-	.hw_params	= da732x_hw_params,
-	.set_fmt	= da732x_set_dai_fmt,
-	.set_sysclk	= da732x_set_dai_sysclk,
-};
-
-static struct snd_soc_dai_ops da732x_dai2_ops = {
+static const struct snd_soc_dai_ops da732x_dai_ops = {
 	.hw_params	= da732x_hw_params,
 	.set_fmt	= da732x_set_dai_fmt,
 	.set_sysclk	= da732x_set_dai_sysclk,
@@ -1227,7 +1221,7 @@
 			.rates = DA732X_RATES,
 			.formats = DA732X_FORMATS,
 		},
-		.ops = &da732x_dai1_ops,
+		.ops = &da732x_dai_ops,
 	},
 	{
 		.name	= "DA732X_AIFB",
@@ -1247,7 +1241,7 @@
 			.rates = DA732X_RATES,
 			.formats = DA732X_FORMATS,
 		},
-		.ops = &da732x_dai2_ops,
+		.ops = &da732x_dai_ops,
 	},
 };
 
@@ -1572,7 +1566,6 @@
 static struct i2c_driver da732x_i2c_driver = {
 	.driver		= {
 		.name	= "da7320",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= da732x_i2c_probe,
 	.remove		= da732x_i2c_remove,
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 66bb446..0b2ede8 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -289,26 +289,23 @@
 
 /* Gain and Volume */
 
-static const unsigned int aux_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(-5400, 0, 0),
 	/* -54dB to 15dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
-};
+);
 
-static const unsigned int digital_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(digital_gain_tlv,
 	0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -78dB to 12dB */
 	0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0)
-};
+);
 
-static const unsigned int alc_analog_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv,
 	0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* 0dB to 36dB */
 	0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0);
@@ -948,7 +945,7 @@
 	struct da9055_platform_data *pdata;
 };
 
-static struct reg_default da9055_reg_defaults[] = {
+static const struct reg_default da9055_reg_defaults[] = {
 	{ 0x21, 0x10 },
 	{ 0x22, 0x0A },
 	{ 0x23, 0x00 },
@@ -1533,12 +1530,12 @@
 	{ .compatible = "dlg,da9055-codec", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, da9055_of_match);
 
 /* I2C codec control layer */
 static struct i2c_driver da9055_i2c_driver = {
 	.driver = {
 		.name = "da9055-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9055_of_match),
 	},
 	.probe		= da9055_i2c_probe,
diff --git a/sound/soc/codecs/gtm601.c b/sound/soc/codecs/gtm601.c
new file mode 100644
index 0000000..0b80052
--- /dev/null
+++ b/sound/soc/codecs/gtm601.c
@@ -0,0 +1,95 @@
+/*
+ * This is a simple driver for the GTM601 Voice PCM interface
+ *
+ * Copyright (C) 2015 Goldelico GmbH
+ *
+ * Author: Marek Belisko <marek@goldelico.com>
+ *
+ * Based on wm8727.c driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+static const struct snd_soc_dapm_widget gtm601_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("AOUT"),
+	SND_SOC_DAPM_INPUT("AIN"),
+};
+
+static const struct snd_soc_dapm_route gtm601_dapm_routes[] = {
+	{ "AOUT", NULL, "Playback" },
+	{ "Capture", NULL, "AIN" },
+};
+
+static struct snd_soc_dai_driver gtm601_dai = {
+	.name = "gtm601",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_gtm601 = {
+	.dapm_widgets = gtm601_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(gtm601_dapm_widgets),
+	.dapm_routes = gtm601_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(gtm601_dapm_routes),
+};
+
+static int gtm601_platform_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev,
+			&soc_codec_dev_gtm601, &gtm601_dai, 1);
+}
+
+static int gtm601_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id gtm601_codec_of_match[] = {
+	{ .compatible = "option,gtm601", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, gtm601_codec_of_match);
+#endif
+
+static struct platform_driver gtm601_codec_driver = {
+	.driver = {
+		.name = "gtm601",
+		.of_match_table = of_match_ptr(gtm601_codec_of_match),
+	},
+	.probe = gtm601_platform_probe,
+	.remove = gtm601_platform_remove,
+};
+
+module_platform_driver(gtm601_codec_driver);
+
+MODULE_DESCRIPTION("ASoC gtm601 driver");
+MODULE_AUTHOR("Marek Belisko <marek@goldelico.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gtm601");
diff --git a/sound/soc/codecs/ics43432.c b/sound/soc/codecs/ics43432.c
new file mode 100644
index 0000000..dd850b9
--- /dev/null
+++ b/sound/soc/codecs/ics43432.c
@@ -0,0 +1,76 @@
+/*
+ * I2S MEMS microphone driver for InvenSense ICS-43432
+ *
+ * - Non configurable.
+ * - I2S interface, 64 BCLs per frame, 32 bits per channel, 24 bit data
+ *
+ * Copyright (c) 2015 Axis Communications AB
+ *
+ * Licensed under GPL v2.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#define ICS43432_RATE_MIN 7190 /* Hz, from data sheet */
+#define ICS43432_RATE_MAX 52800  /* Hz, from data sheet */
+
+#define ICS43432_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32)
+
+static struct snd_soc_dai_driver ics43432_dai = {
+	.name = "ics43432-hifi",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = ICS43432_RATE_MIN,
+		.rate_max = ICS43432_RATE_MAX,
+		.rates = SNDRV_PCM_RATE_CONTINUOUS,
+		.formats = ICS43432_FORMATS,
+	},
+};
+
+static struct snd_soc_codec_driver ics43432_codec_driver = {
+};
+
+static int ics43432_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev, &ics43432_codec_driver,
+			&ics43432_dai, 1);
+}
+
+static int ics43432_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ics43432_ids[] = {
+	{ .compatible = "invensense,ics43432", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ics43432_ids);
+#endif
+
+static struct platform_driver ics43432_driver = {
+	.driver = {
+		.name = "ics43432",
+		.of_match_table = of_match_ptr(ics43432_ids),
+	},
+	.probe = ics43432_probe,
+	.remove = ics43432_remove,
+};
+
+module_platform_driver(ics43432_driver);
+
+MODULE_DESCRIPTION("ASoC ICS43432 driver");
+MODULE_AUTHOR("Ricard Wanderlof <ricardw@axis.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index ebd90283c..be44837 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -33,7 +33,7 @@
 
 
 /* Register default values for ISABELLE driver. */
-static struct reg_default isabelle_reg_defs[] = {
+static const struct reg_default isabelle_reg_defs[] = {
 	{ 0, 0x00 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -1016,25 +1016,25 @@
 #define ISABELLE_FORMATS (SNDRV_PCM_FMTBIT_S20_3LE |\
 			SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops isabelle_hs_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_hs_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_hs_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_hf_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_hf_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_hf_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_line_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_line_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_line_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_ul_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_ul_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 };
@@ -1149,7 +1149,6 @@
 static struct i2c_driver isabelle_i2c_driver = {
 	.driver = {
 		.name = "isabelle",
-		.owner = THIS_MODULE,
 	},
 	.probe = isabelle_i2c_probe,
 	.remove = isabelle_i2c_remove,
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 9363fdb..1f5ab99 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -78,11 +78,10 @@
 	struct regmap *regmap;
 };
 
-static const unsigned int jz4740_mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(jz4740_mic_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(0, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(jz4740_out_tlv, 0, 200, 0);
 static const DECLARE_TLV_DB_SCALE(jz4740_in_tlv, -3450, 150, 0);
diff --git a/sound/soc/codecs/lm4857.c b/sound/soc/codecs/lm4857.c
index 99ffc49..558de10 100644
--- a/sound/soc/codecs/lm4857.c
+++ b/sound/soc/codecs/lm4857.c
@@ -142,7 +142,6 @@
 static struct i2c_driver lm4857_i2c_driver = {
 	.driver = {
 		.name = "lm4857",
-		.owner = THIS_MODULE,
 	},
 	.probe = lm4857_i2c_probe,
 	.id_table = lm4857_i2c_id,
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 6600aa0..9af5640 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -30,7 +30,7 @@
 #include <asm/div64.h>
 #include "lm49453.h"
 
-static struct reg_default lm49453_reg_defs[] = {
+static const struct reg_default lm49453_reg_defs[] = {
 	{ 0, 0x00 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -188,7 +188,6 @@
 /* codec private data */
 struct lm49453_priv {
 	struct regmap *regmap;
-	int fs_rate;
 };
 
 /* capture path controls */
@@ -1112,13 +1111,10 @@
 			     struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
 	u16 clk_div = 0;
 
-	lm49453->fs_rate = params_rate(params);
-
 	/* Setting DAC clock dividers based on substream sample rate. */
-	switch (lm49453->fs_rate) {
+	switch (params_rate(params)) {
 	case 8000:
 	case 16000:
 	case 32000:
@@ -1291,35 +1287,35 @@
 #define LM49453_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 			 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops lm49453_headset_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_headset_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_hp_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ls_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ha_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_ep_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_ep_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ep_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
@@ -1460,7 +1456,6 @@
 static struct i2c_driver lm49453_i2c_driver = {
 	.driver = {
 		.name = "lm49453",
-		.owner = THIS_MODULE,
 	},
 	.probe = lm49453_i2c_probe,
 	.remove = lm49453_i2c_remove,
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index e1c196a..5b82e26 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -35,7 +35,7 @@
 	u32 flags;
 };
 
-static struct reg_default max9768_default_regs[] = {
+static const struct reg_default max9768_default_regs[] = {
 	{ 0, 0 },
 	{ 3,  MAX9768_CTRL_FILTERLESS},
 };
@@ -43,8 +43,8 @@
 static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
 	int val = gpio_get_value_cansleep(max9768->mute_gpio);
 
 	ucontrol->value.integer.value[0] = !val;
@@ -55,16 +55,15 @@
 static int max9768_set_gpio(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
 
 	gpio_set_value_cansleep(max9768->mute_gpio, !ucontrol->value.integer.value[0]);
 
 	return 0;
 }
 
-static const unsigned int volume_tlv[] = {
-	TLV_DB_RANGE_HEAD(43),
+static const DECLARE_TLV_DB_RANGE(volume_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-16150, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(-9280, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(-9030, 0, 0),
@@ -107,8 +106,8 @@
 	51, 57, TLV_DB_SCALE_ITEM(290, 50, 0),
 	58, 58, TLV_DB_SCALE_ITEM(650, 0, 0),
 	59, 62, TLV_DB_SCALE_ITEM(700, 60, 0),
-	63, 63, TLV_DB_SCALE_ITEM(950, 0, 0),
-};
+	63, 63, TLV_DB_SCALE_ITEM(950, 0, 0)
+);
 
 static const struct snd_kcontrol_new max9768_volume[] = {
 	SOC_SINGLE_TLV("Playback Volume", MAX9768_VOL, 0, 63, 0, volume_tlv),
@@ -130,19 +129,20 @@
 	{ "OUT-", NULL, "IN" },
 };
 
-static int max9768_probe(struct snd_soc_codec *codec)
+static int max9768_probe(struct snd_soc_component *component)
 {
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(component);
 	int ret;
 
 	if (max9768->flags & MAX9768_FLAG_CLASSIC_PWM) {
-		ret = snd_soc_write(codec, MAX9768_CTRL, MAX9768_CTRL_PWM);
+		ret = regmap_write(max9768->regmap, MAX9768_CTRL,
+			MAX9768_CTRL_PWM);
 		if (ret)
 			return ret;
 	}
 
 	if (gpio_is_valid(max9768->mute_gpio)) {
-		ret = snd_soc_add_codec_controls(codec, max9768_mute,
+		ret = snd_soc_add_component_controls(component, max9768_mute,
 				ARRAY_SIZE(max9768_mute));
 		if (ret)
 			return ret;
@@ -151,7 +151,7 @@
 	return 0;
 }
 
-static struct snd_soc_codec_driver max9768_codec_driver = {
+static struct snd_soc_component_driver max9768_component_driver = {
 	.probe = max9768_probe,
 	.controls = max9768_volume,
 	.num_controls = ARRAY_SIZE(max9768_volume),
@@ -183,11 +183,13 @@
 
 	if (pdata) {
 		/* Mute on powerup to avoid clicks */
-		err = gpio_request_one(pdata->mute_gpio, GPIOF_INIT_HIGH, "MAX9768 Mute");
+		err = devm_gpio_request_one(&client->dev, pdata->mute_gpio,
+				GPIOF_INIT_HIGH, "MAX9768 Mute");
 		max9768->mute_gpio = err ?: pdata->mute_gpio;
 
 		/* Activate chip by releasing shutdown, enables I2C */
-		err = gpio_request_one(pdata->shdn_gpio, GPIOF_INIT_HIGH, "MAX9768 Shutdown");
+		err = devm_gpio_request_one(&client->dev, pdata->shdn_gpio,
+				GPIOF_INIT_HIGH, "MAX9768 Shutdown");
 		max9768->shdn_gpio = err ?: pdata->shdn_gpio;
 
 		max9768->flags = pdata->flags;
@@ -199,38 +201,11 @@
 	i2c_set_clientdata(client, max9768);
 
 	max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config);
-	if (IS_ERR(max9768->regmap)) {
-		err = PTR_ERR(max9768->regmap);
-		goto err_gpio_free;
-	}
+	if (IS_ERR(max9768->regmap))
+		return PTR_ERR(max9768->regmap);
 
-	err = snd_soc_register_codec(&client->dev, &max9768_codec_driver, NULL, 0);
-	if (err)
-		goto err_gpio_free;
-
-	return 0;
-
- err_gpio_free:
-	if (gpio_is_valid(max9768->shdn_gpio))
-		gpio_free(max9768->shdn_gpio);
-	if (gpio_is_valid(max9768->mute_gpio))
-		gpio_free(max9768->mute_gpio);
-
-	return err;
-}
-
-static int max9768_i2c_remove(struct i2c_client *client)
-{
-	struct max9768 *max9768 = i2c_get_clientdata(client);
-
-	snd_soc_unregister_codec(&client->dev);
-
-	if (gpio_is_valid(max9768->shdn_gpio))
-		gpio_free(max9768->shdn_gpio);
-	if (gpio_is_valid(max9768->mute_gpio))
-		gpio_free(max9768->mute_gpio);
-
-	return 0;
+	return devm_snd_soc_register_component(&client->dev,
+		&max9768_component_driver, NULL, 0);
 }
 
 static const struct i2c_device_id max9768_i2c_id[] = {
@@ -242,10 +217,8 @@
 static struct i2c_driver max9768_i2c_driver = {
 	.driver = {
 		.name = "max9768",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9768_i2c_probe,
-	.remove = max9768_i2c_remove,
 	.id_table = max9768_i2c_id,
 };
 module_i2c_driver(max9768_i2c_driver);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index d0f4534..20dcc49 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -258,292 +258,36 @@
 	{ 0xc9, 0x00 }, /* C9 DAI2 biquad */
 };
 
-static struct {
-       int readable;
-       int writable;
-       int vol;
-} max98088_access[M98088_REG_CNT] = {
-       { 0xFF, 0xFF, 1 }, /* 00 IRQ status */
-       { 0xFF, 0x00, 1 }, /* 01 MIC status */
-       { 0xFF, 0x00, 1 }, /* 02 jack status */
-       { 0x1F, 0x1F, 1 }, /* 03 battery voltage */
-       { 0xFF, 0xFF, 0 }, /* 04 */
-       { 0xFF, 0xFF, 0 }, /* 05 */
-       { 0xFF, 0xFF, 0 }, /* 06 */
-       { 0xFF, 0xFF, 0 }, /* 07 */
-       { 0xFF, 0xFF, 0 }, /* 08 */
-       { 0xFF, 0xFF, 0 }, /* 09 */
-       { 0xFF, 0xFF, 0 }, /* 0A */
-       { 0xFF, 0xFF, 0 }, /* 0B */
-       { 0xFF, 0xFF, 0 }, /* 0C */
-       { 0xFF, 0xFF, 0 }, /* 0D */
-       { 0xFF, 0xFF, 0 }, /* 0E */
-       { 0xFF, 0xFF, 0 }, /* 0F interrupt enable */
-
-       { 0xFF, 0xFF, 0 }, /* 10 master clock */
-       { 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */
-       { 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */
-       { 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */
-       { 0xFF, 0xFF, 0 }, /* 14 DAI1 format */
-       { 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */
-       { 0xFF, 0xFF, 0 }, /* 16 DAI1 config */
-       { 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */
-       { 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */
-       { 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */
-       { 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */
-       { 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */
-       { 0xFF, 0xFF, 0 }, /* 1C DAI2 format */
-       { 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */
-       { 0xFF, 0xFF, 0 }, /* 1E DAI2 config */
-       { 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */
-
-       { 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */
-       { 0xFF, 0xFF, 0 }, /* 21 data config */
-       { 0xFF, 0xFF, 0 }, /* 22 DAC mixer */
-       { 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */
-       { 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */
-       { 0xFF, 0xFF, 0 }, /* 25 left HP mixer */
-       { 0xFF, 0xFF, 0 }, /* 26 right HP mixer */
-       { 0xFF, 0xFF, 0 }, /* 27 HP control */
-       { 0xFF, 0xFF, 0 }, /* 28 left REC mixer */
-       { 0xFF, 0xFF, 0 }, /* 29 right REC mixer */
-       { 0xFF, 0xFF, 0 }, /* 2A REC control */
-       { 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */
-       { 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */
-       { 0xFF, 0xFF, 0 }, /* 2D SPK control */
-       { 0xFF, 0xFF, 0 }, /* 2E sidetone */
-       { 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */
-
-       { 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */
-       { 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */
-       { 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */
-       { 0xFF, 0xFF, 0 }, /* 33 left ADC level */
-       { 0xFF, 0xFF, 0 }, /* 34 right ADC level */
-       { 0xFF, 0xFF, 0 }, /* 35 MIC1 level */
-       { 0xFF, 0xFF, 0 }, /* 36 MIC2 level */
-       { 0xFF, 0xFF, 0 }, /* 37 INA level */
-       { 0xFF, 0xFF, 0 }, /* 38 INB level */
-       { 0xFF, 0xFF, 0 }, /* 39 left HP volume */
-       { 0xFF, 0xFF, 0 }, /* 3A right HP volume */
-       { 0xFF, 0xFF, 0 }, /* 3B left REC volume */
-       { 0xFF, 0xFF, 0 }, /* 3C right REC volume */
-       { 0xFF, 0xFF, 0 }, /* 3D left SPK volume */
-       { 0xFF, 0xFF, 0 }, /* 3E right SPK volume */
-       { 0xFF, 0xFF, 0 }, /* 3F MIC config */
-
-       { 0xFF, 0xFF, 0 }, /* 40 MIC threshold */
-       { 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */
-       { 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */
-       { 0xFF, 0xFF, 0 }, /* 43 ALC */
-       { 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */
-       { 0xFF, 0xFF, 0 }, /* 45 power limiter config */
-       { 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */
-       { 0xFF, 0xFF, 0 }, /* 47 audio input */
-       { 0xFF, 0xFF, 0 }, /* 48 microphone */
-       { 0xFF, 0xFF, 0 }, /* 49 level control */
-       { 0xFF, 0xFF, 0 }, /* 4A bypass switches */
-       { 0xFF, 0xFF, 0 }, /* 4B jack detect */
-       { 0xFF, 0xFF, 0 }, /* 4C input enable */
-       { 0xFF, 0xFF, 0 }, /* 4D output enable */
-       { 0xFF, 0xFF, 0 }, /* 4E bias control */
-       { 0xFF, 0xFF, 0 }, /* 4F DAC power */
-
-       { 0xFF, 0xFF, 0 }, /* 50 DAC power */
-       { 0xFF, 0xFF, 0 }, /* 51 system */
-       { 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */
-
-       { 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */
-
-       { 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */
-
-       { 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */
-
-       { 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */
-
-       { 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */
-
-       { 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */
-
-       { 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */
-       { 0x00, 0x00, 0 }, /* CA */
-       { 0x00, 0x00, 0 }, /* CB */
-       { 0x00, 0x00, 0 }, /* CC */
-       { 0x00, 0x00, 0 }, /* CD */
-       { 0x00, 0x00, 0 }, /* CE */
-       { 0x00, 0x00, 0 }, /* CF */
-
-       { 0x00, 0x00, 0 }, /* D0 */
-       { 0x00, 0x00, 0 }, /* D1 */
-       { 0x00, 0x00, 0 }, /* D2 */
-       { 0x00, 0x00, 0 }, /* D3 */
-       { 0x00, 0x00, 0 }, /* D4 */
-       { 0x00, 0x00, 0 }, /* D5 */
-       { 0x00, 0x00, 0 }, /* D6 */
-       { 0x00, 0x00, 0 }, /* D7 */
-       { 0x00, 0x00, 0 }, /* D8 */
-       { 0x00, 0x00, 0 }, /* D9 */
-       { 0x00, 0x00, 0 }, /* DA */
-       { 0x00, 0x00, 0 }, /* DB */
-       { 0x00, 0x00, 0 }, /* DC */
-       { 0x00, 0x00, 0 }, /* DD */
-       { 0x00, 0x00, 0 }, /* DE */
-       { 0x00, 0x00, 0 }, /* DF */
-
-       { 0x00, 0x00, 0 }, /* E0 */
-       { 0x00, 0x00, 0 }, /* E1 */
-       { 0x00, 0x00, 0 }, /* E2 */
-       { 0x00, 0x00, 0 }, /* E3 */
-       { 0x00, 0x00, 0 }, /* E4 */
-       { 0x00, 0x00, 0 }, /* E5 */
-       { 0x00, 0x00, 0 }, /* E6 */
-       { 0x00, 0x00, 0 }, /* E7 */
-       { 0x00, 0x00, 0 }, /* E8 */
-       { 0x00, 0x00, 0 }, /* E9 */
-       { 0x00, 0x00, 0 }, /* EA */
-       { 0x00, 0x00, 0 }, /* EB */
-       { 0x00, 0x00, 0 }, /* EC */
-       { 0x00, 0x00, 0 }, /* ED */
-       { 0x00, 0x00, 0 }, /* EE */
-       { 0x00, 0x00, 0 }, /* EF */
-
-       { 0x00, 0x00, 0 }, /* F0 */
-       { 0x00, 0x00, 0 }, /* F1 */
-       { 0x00, 0x00, 0 }, /* F2 */
-       { 0x00, 0x00, 0 }, /* F3 */
-       { 0x00, 0x00, 0 }, /* F4 */
-       { 0x00, 0x00, 0 }, /* F5 */
-       { 0x00, 0x00, 0 }, /* F6 */
-       { 0x00, 0x00, 0 }, /* F7 */
-       { 0x00, 0x00, 0 }, /* F8 */
-       { 0x00, 0x00, 0 }, /* F9 */
-       { 0x00, 0x00, 0 }, /* FA */
-       { 0x00, 0x00, 0 }, /* FB */
-       { 0x00, 0x00, 0 }, /* FC */
-       { 0x00, 0x00, 0 }, /* FD */
-       { 0x00, 0x00, 0 }, /* FE */
-       { 0xFF, 0x00, 1 }, /* FF */
-};
-
 static bool max98088_readable_register(struct device *dev, unsigned int reg)
 {
-       return max98088_access[reg].readable;
+	switch (reg) {
+	case M98088_REG_00_IRQ_STATUS ... 0xC9:
+	case M98088_REG_FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98088_writeable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case M98088_REG_03_BATTERY_VOLTAGE ... 0xC9:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static bool max98088_volatile_register(struct device *dev, unsigned int reg)
 {
-       return max98088_access[reg].vol;
+	switch (reg) {
+	case M98088_REG_00_IRQ_STATUS ... M98088_REG_03_BATTERY_VOLTAGE:
+	case M98088_REG_FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static const struct regmap_config max98088_regmap = {
@@ -551,6 +295,7 @@
 	.val_bits = 8,
 
 	.readable_reg = max98088_readable_register,
+	.writeable_reg = max98088_writeable_register,
 	.volatile_reg = max98088_volatile_register,
 	.max_register = 0xff,
 
@@ -680,29 +425,26 @@
        return 0;
 }
 
-static const unsigned int max98088_micboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(2),
-       0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-       2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+static const DECLARE_TLV_DB_RANGE(max98088_micboost_tlv,
+	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
-static const unsigned int max98088_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98088_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98088_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98088_spk_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
 static const struct snd_kcontrol_new max98088_snd_controls[] = {
 
@@ -2011,7 +1753,6 @@
 static struct i2c_driver max98088_i2c_driver = {
 	.driver = {
 		.name = "max98088",
-		.owner = THIS_MODULE,
 	},
 	.probe  = max98088_i2c_probe,
 	.remove = max98088_i2c_remove,
diff --git a/sound/soc/codecs/max98088.h b/sound/soc/codecs/max98088.h
index be89a4f..efa39bf 100644
--- a/sound/soc/codecs/max98088.h
+++ b/sound/soc/codecs/max98088.h
@@ -16,7 +16,7 @@
  */
 #define M98088_REG_00_IRQ_STATUS            0x00
 #define M98088_REG_01_MIC_STATUS            0x01
-#define M98088_REG_02_JACK_STAUS            0x02
+#define M98088_REG_02_JACK_STATUS           0x02
 #define M98088_REG_03_BATTERY_VOLTAGE       0x03
 #define M98088_REG_0F_IRQ_ENABLE            0x0F
 #define M98088_REG_10_SYS_CLK               0x10
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 78268f05..584aab8 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -267,75 +267,8 @@
 static bool max98090_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case M98090_REG_DEVICE_STATUS:
-	case M98090_REG_JACK_STATUS:
-	case M98090_REG_INTERRUPT_S:
-	case M98090_REG_RESERVED:
-	case M98090_REG_LINE_INPUT_CONFIG:
-	case M98090_REG_LINE_INPUT_LEVEL:
-	case M98090_REG_INPUT_MODE:
-	case M98090_REG_MIC1_INPUT_LEVEL:
-	case M98090_REG_MIC2_INPUT_LEVEL:
-	case M98090_REG_MIC_BIAS_VOLTAGE:
-	case M98090_REG_DIGITAL_MIC_ENABLE:
-	case M98090_REG_DIGITAL_MIC_CONFIG:
-	case M98090_REG_LEFT_ADC_MIXER:
-	case M98090_REG_RIGHT_ADC_MIXER:
-	case M98090_REG_LEFT_ADC_LEVEL:
-	case M98090_REG_RIGHT_ADC_LEVEL:
-	case M98090_REG_ADC_BIQUAD_LEVEL:
-	case M98090_REG_ADC_SIDETONE:
-	case M98090_REG_SYSTEM_CLOCK:
-	case M98090_REG_CLOCK_MODE:
-	case M98090_REG_CLOCK_RATIO_NI_MSB:
-	case M98090_REG_CLOCK_RATIO_NI_LSB:
-	case M98090_REG_CLOCK_RATIO_MI_MSB:
-	case M98090_REG_CLOCK_RATIO_MI_LSB:
-	case M98090_REG_MASTER_MODE:
-	case M98090_REG_INTERFACE_FORMAT:
-	case M98090_REG_TDM_CONTROL:
-	case M98090_REG_TDM_FORMAT:
-	case M98090_REG_IO_CONFIGURATION:
-	case M98090_REG_FILTER_CONFIG:
-	case M98090_REG_DAI_PLAYBACK_LEVEL:
-	case M98090_REG_DAI_PLAYBACK_LEVEL_EQ:
-	case M98090_REG_LEFT_HP_MIXER:
-	case M98090_REG_RIGHT_HP_MIXER:
-	case M98090_REG_HP_CONTROL:
-	case M98090_REG_LEFT_HP_VOLUME:
-	case M98090_REG_RIGHT_HP_VOLUME:
-	case M98090_REG_LEFT_SPK_MIXER:
-	case M98090_REG_RIGHT_SPK_MIXER:
-	case M98090_REG_SPK_CONTROL:
-	case M98090_REG_LEFT_SPK_VOLUME:
-	case M98090_REG_RIGHT_SPK_VOLUME:
-	case M98090_REG_DRC_TIMING:
-	case M98090_REG_DRC_COMPRESSOR:
-	case M98090_REG_DRC_EXPANDER:
-	case M98090_REG_DRC_GAIN:
-	case M98090_REG_RCV_LOUTL_MIXER:
-	case M98090_REG_RCV_LOUTL_CONTROL:
-	case M98090_REG_RCV_LOUTL_VOLUME:
-	case M98090_REG_LOUTR_MIXER:
-	case M98090_REG_LOUTR_CONTROL:
-	case M98090_REG_LOUTR_VOLUME:
-	case M98090_REG_JACK_DETECT:
-	case M98090_REG_INPUT_ENABLE:
-	case M98090_REG_OUTPUT_ENABLE:
-	case M98090_REG_LEVEL_CONTROL:
-	case M98090_REG_DSP_FILTER_ENABLE:
-	case M98090_REG_BIAS_CONTROL:
-	case M98090_REG_DAC_CONTROL:
-	case M98090_REG_ADC_CONTROL:
-	case M98090_REG_DEVICE_SHUTDOWN:
-	case M98090_REG_EQUALIZER_BASE ... M98090_REG_EQUALIZER_BASE + 0x68:
-	case M98090_REG_RECORD_BIQUAD_BASE ... M98090_REG_RECORD_BIQUAD_BASE + 0x0E:
-	case M98090_REG_DMIC3_VOLUME:
-	case M98090_REG_DMIC4_VOLUME:
-	case M98090_REG_DMIC34_BQ_PREATTEN:
-	case M98090_REG_RECORD_TDM_SLOT:
-	case M98090_REG_SAMPLE_RATE:
-	case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
+	case M98090_REG_DEVICE_STATUS ... M98090_REG_INTERRUPT_S:
+	case M98090_REG_LINE_INPUT_CONFIG ... 0xD1:
 	case M98090_REG_REVISION_ID:
 		return true;
 	default:
@@ -360,22 +293,20 @@
 	return ret;
 }
 
-static const unsigned int max98090_micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_micboost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98090_mic_tlv, 0, 100, 0);
 
 static const DECLARE_TLV_DB_SCALE(max98090_line_single_ended_tlv,
 	-600, 600, 0);
 
-static const unsigned int max98090_line_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_line_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-600, 300, 0),
-	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
-};
+	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98090_avg_tlv, 0, 600, 0);
 static const DECLARE_TLV_DB_SCALE(max98090_av_tlv, -1200, 100, 0);
@@ -391,38 +322,34 @@
 static const DECLARE_TLV_DB_SCALE(max98090_drcexp_tlv, -6600, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98090_sdg_tlv, 50, 200, 0);
 
-static const unsigned int max98090_mixout_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_mixout_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-1200, 250, 0),
-	2, 3, TLV_DB_SCALE_ITEM(-600, 600, 0),
-};
+	2, 3, TLV_DB_SCALE_ITEM(-600, 600, 0)
+);
 
-static const unsigned int max98090_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98090_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_spk_tlv,
 	0, 4, TLV_DB_SCALE_ITEM(-4800, 400, 0),
 	5, 10, TLV_DB_SCALE_ITEM(-2900, 300, 0),
 	11, 14, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	15, 29, TLV_DB_SCALE_ITEM(-500, 100, 0),
-	30, 39, TLV_DB_SCALE_ITEM(950, 50, 0),
-};
+	30, 39, TLV_DB_SCALE_ITEM(950, 50, 0)
+);
 
-static const unsigned int max98090_rcv_lout_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_rcv_lout_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
 static int max98090_get_enab_tlv(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
@@ -850,6 +777,19 @@
 	return 0;
 }
 
+static int max98090_shdn_event(struct snd_soc_dapm_widget *w,
+				 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+	if (event & SND_SOC_DAPM_POST_PMU)
+		max98090->shdn_pending = true;
+
+	return 0;
+
+}
+
 static const char *mic1_mux_text[] = { "IN12", "IN56" };
 
 static SOC_ENUM_SINGLE_DECL(mic1_mux_enum,
@@ -1158,9 +1098,11 @@
 	SND_SOC_DAPM_SUPPLY("SDOEN", M98090_REG_IO_CONFIGURATION,
 		M98090_SDOEN_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("DMICL_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
-		 M98090_DIGMICL_SHIFT, 0, NULL, 0),
+		 M98090_DIGMICL_SHIFT, 0, max98090_shdn_event,
+			SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("DMICR_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
-		 M98090_DIGMICR_SHIFT, 0, NULL, 0),
+		 M98090_DIGMICR_SHIFT, 0, max98090_shdn_event,
+			 SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("AHPF", M98090_REG_FILTER_CONFIG,
 		M98090_AHPF_SHIFT, 0, NULL, 0),
 
@@ -1205,10 +1147,12 @@
 		&max98090_right_adc_mixer_controls[0],
 		ARRAY_SIZE(max98090_right_adc_mixer_controls)),
 
-	SND_SOC_DAPM_ADC("ADCL", NULL, M98090_REG_INPUT_ENABLE,
-		M98090_ADLEN_SHIFT, 0),
-	SND_SOC_DAPM_ADC("ADCR", NULL, M98090_REG_INPUT_ENABLE,
-		M98090_ADREN_SHIFT, 0),
+	SND_SOC_DAPM_ADC_E("ADCL", NULL, M98090_REG_INPUT_ENABLE,
+		M98090_ADLEN_SHIFT, 0, max98090_shdn_event,
+		SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_ADC_E("ADCR", NULL, M98090_REG_INPUT_ENABLE,
+		M98090_ADREN_SHIFT, 0, max98090_shdn_event,
+		SND_SOC_DAPM_POST_PMU),
 
 	SND_SOC_DAPM_AIF_OUT("AIFOUTL", "HiFi Capture", 0,
 		SND_SOC_NOPM, 0, 0),
@@ -1801,10 +1745,13 @@
 		if (IS_ERR(max98090->mclk))
 			break;
 
-		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON)
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
 			clk_disable_unprepare(max98090->mclk);
-		else
-			clk_prepare_enable(max98090->mclk);
+		} else {
+			ret = clk_prepare_enable(max98090->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
@@ -2383,7 +2330,7 @@
 #define MAX98090_RATES SNDRV_PCM_RATE_8000_96000
 #define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 
-static struct snd_soc_dai_ops max98090_dai_ops = {
+static const struct snd_soc_dai_ops max98090_dai_ops = {
 	.set_sysclk = max98090_dai_set_sysclk,
 	.set_fmt = max98090_dai_set_fmt,
 	.set_tdm_slot = max98090_set_tdm_slot,
@@ -2536,9 +2483,26 @@
 	return 0;
 }
 
+static void max98090_seq_notifier(struct snd_soc_dapm_context *dapm,
+	enum snd_soc_dapm_type event, int subseq)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+	struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+	if (max98090->shdn_pending) {
+		snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+				M98090_SHDNN_MASK, 0);
+		msleep(40);
+		snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+				M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+		max98090->shdn_pending = false;
+	}
+}
+
 static struct snd_soc_codec_driver soc_codec_dev_max98090 = {
 	.probe   = max98090_probe,
 	.remove  = max98090_remove,
+	.seq_notifier = max98090_seq_notifier,
 	.set_bias_level = max98090_set_bias_level,
 };
 
@@ -2714,7 +2678,6 @@
 static struct i2c_driver max98090_i2c_driver = {
 	.driver = {
 		.name = "max98090",
-		.owner = THIS_MODULE,
 		.pm = &max98090_pm,
 		.of_match_table = of_match_ptr(max98090_of_match),
 		.acpi_match_table = ACPI_PTR(max98090_acpi_match),
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index 21ff743..bc610d9 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -1543,6 +1543,7 @@
 	unsigned int pa2en;
 	unsigned int sidetone;
 	bool master;
+	bool shdn_pending;
 };
 
 int max98090_mic_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 9a46d3d..1fedac5 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -202,300 +202,36 @@
 	{ 0xff, 0x00 }, /* FF */
 };
 
-static struct {
-	int readable;
-	int writable;
-} max98095_access[M98095_REG_CNT] = {
-	{ 0x00, 0x00 }, /* 00 */
-	{ 0xFF, 0x00 }, /* 01 */
-	{ 0xFF, 0x00 }, /* 02 */
-	{ 0xFF, 0x00 }, /* 03 */
-	{ 0xFF, 0x00 }, /* 04 */
-	{ 0xFF, 0x00 }, /* 05 */
-	{ 0xFF, 0x00 }, /* 06 */
-	{ 0xFF, 0x00 }, /* 07 */
-	{ 0xFF, 0x00 }, /* 08 */
-	{ 0xFF, 0x00 }, /* 09 */
-	{ 0xFF, 0x00 }, /* 0A */
-	{ 0xFF, 0x00 }, /* 0B */
-	{ 0xFF, 0x00 }, /* 0C */
-	{ 0xFF, 0x00 }, /* 0D */
-	{ 0xFF, 0x00 }, /* 0E */
-	{ 0xFF, 0x9F }, /* 0F */
-	{ 0xFF, 0xFF }, /* 10 */
-	{ 0xFF, 0xFF }, /* 11 */
-	{ 0xFF, 0xFF }, /* 12 */
-	{ 0xFF, 0xFF }, /* 13 */
-	{ 0xFF, 0xFF }, /* 14 */
-	{ 0xFF, 0xFF }, /* 15 */
-	{ 0xFF, 0xFF }, /* 16 */
-	{ 0xFF, 0xFF }, /* 17 */
-	{ 0xFF, 0xFF }, /* 18 */
-	{ 0xFF, 0xFF }, /* 19 */
-	{ 0xFF, 0xFF }, /* 1A */
-	{ 0xFF, 0xFF }, /* 1B */
-	{ 0xFF, 0xFF }, /* 1C */
-	{ 0xFF, 0xFF }, /* 1D */
-	{ 0xFF, 0x77 }, /* 1E */
-	{ 0xFF, 0x77 }, /* 1F */
-	{ 0xFF, 0x77 }, /* 20 */
-	{ 0xFF, 0x77 }, /* 21 */
-	{ 0xFF, 0x77 }, /* 22 */
-	{ 0xFF, 0x77 }, /* 23 */
-	{ 0xFF, 0xFF }, /* 24 */
-	{ 0xFF, 0x7F }, /* 25 */
-	{ 0xFF, 0x31 }, /* 26 */
-	{ 0xFF, 0xFF }, /* 27 */
-	{ 0xFF, 0xFF }, /* 28 */
-	{ 0xFF, 0xFF }, /* 29 */
-	{ 0xFF, 0xF7 }, /* 2A */
-	{ 0xFF, 0x2F }, /* 2B */
-	{ 0xFF, 0xEF }, /* 2C */
-	{ 0xFF, 0xFF }, /* 2D */
-	{ 0xFF, 0xFF }, /* 2E */
-	{ 0xFF, 0xFF }, /* 2F */
-	{ 0xFF, 0xFF }, /* 30 */
-	{ 0xFF, 0xFF }, /* 31 */
-	{ 0xFF, 0xFF }, /* 32 */
-	{ 0xFF, 0xFF }, /* 33 */
-	{ 0xFF, 0xF7 }, /* 34 */
-	{ 0xFF, 0x2F }, /* 35 */
-	{ 0xFF, 0xCF }, /* 36 */
-	{ 0xFF, 0xFF }, /* 37 */
-	{ 0xFF, 0xFF }, /* 38 */
-	{ 0xFF, 0xFF }, /* 39 */
-	{ 0xFF, 0xFF }, /* 3A */
-	{ 0xFF, 0xFF }, /* 3B */
-	{ 0xFF, 0xFF }, /* 3C */
-	{ 0xFF, 0xFF }, /* 3D */
-	{ 0xFF, 0xF7 }, /* 3E */
-	{ 0xFF, 0x2F }, /* 3F */
-	{ 0xFF, 0xCF }, /* 40 */
-	{ 0xFF, 0xFF }, /* 41 */
-	{ 0xFF, 0x77 }, /* 42 */
-	{ 0xFF, 0xFF }, /* 43 */
-	{ 0xFF, 0xFF }, /* 44 */
-	{ 0xFF, 0xFF }, /* 45 */
-	{ 0xFF, 0xFF }, /* 46 */
-	{ 0xFF, 0xFF }, /* 47 */
-	{ 0xFF, 0xFF }, /* 48 */
-	{ 0xFF, 0x0F }, /* 49 */
-	{ 0xFF, 0xFF }, /* 4A */
-	{ 0xFF, 0xFF }, /* 4B */
-	{ 0xFF, 0x3F }, /* 4C */
-	{ 0xFF, 0x3F }, /* 4D */
-	{ 0xFF, 0x3F }, /* 4E */
-	{ 0xFF, 0xFF }, /* 4F */
-	{ 0xFF, 0x7F }, /* 50 */
-	{ 0xFF, 0x7F }, /* 51 */
-	{ 0xFF, 0x0F }, /* 52 */
-	{ 0xFF, 0x3F }, /* 53 */
-	{ 0xFF, 0x3F }, /* 54 */
-	{ 0xFF, 0x3F }, /* 55 */
-	{ 0xFF, 0xFF }, /* 56 */
-	{ 0xFF, 0xFF }, /* 57 */
-	{ 0xFF, 0xBF }, /* 58 */
-	{ 0xFF, 0x1F }, /* 59 */
-	{ 0xFF, 0xBF }, /* 5A */
-	{ 0xFF, 0x1F }, /* 5B */
-	{ 0xFF, 0xBF }, /* 5C */
-	{ 0xFF, 0x3F }, /* 5D */
-	{ 0xFF, 0x3F }, /* 5E */
-	{ 0xFF, 0x7F }, /* 5F */
-	{ 0xFF, 0x7F }, /* 60 */
-	{ 0xFF, 0x47 }, /* 61 */
-	{ 0xFF, 0x9F }, /* 62 */
-	{ 0xFF, 0x9F }, /* 63 */
-	{ 0xFF, 0x9F }, /* 64 */
-	{ 0xFF, 0x9F }, /* 65 */
-	{ 0xFF, 0x9F }, /* 66 */
-	{ 0xFF, 0xBF }, /* 67 */
-	{ 0xFF, 0xBF }, /* 68 */
-	{ 0xFF, 0xFF }, /* 69 */
-	{ 0xFF, 0xFF }, /* 6A */
-	{ 0xFF, 0x7F }, /* 6B */
-	{ 0xFF, 0xF7 }, /* 6C */
-	{ 0xFF, 0xFF }, /* 6D */
-	{ 0xFF, 0xFF }, /* 6E */
-	{ 0xFF, 0x1F }, /* 6F */
-	{ 0xFF, 0xF7 }, /* 70 */
-	{ 0xFF, 0xFF }, /* 71 */
-	{ 0xFF, 0xFF }, /* 72 */
-	{ 0xFF, 0x1F }, /* 73 */
-	{ 0xFF, 0xF7 }, /* 74 */
-	{ 0xFF, 0xFF }, /* 75 */
-	{ 0xFF, 0xFF }, /* 76 */
-	{ 0xFF, 0x1F }, /* 77 */
-	{ 0xFF, 0xF7 }, /* 78 */
-	{ 0xFF, 0xFF }, /* 79 */
-	{ 0xFF, 0xFF }, /* 7A */
-	{ 0xFF, 0x1F }, /* 7B */
-	{ 0xFF, 0xF7 }, /* 7C */
-	{ 0xFF, 0xFF }, /* 7D */
-	{ 0xFF, 0xFF }, /* 7E */
-	{ 0xFF, 0x1F }, /* 7F */
-	{ 0xFF, 0xF7 }, /* 80 */
-	{ 0xFF, 0xFF }, /* 81 */
-	{ 0xFF, 0xFF }, /* 82 */
-	{ 0xFF, 0x1F }, /* 83 */
-	{ 0xFF, 0x7F }, /* 84 */
-	{ 0xFF, 0x0F }, /* 85 */
-	{ 0xFF, 0xD8 }, /* 86 */
-	{ 0xFF, 0xFF }, /* 87 */
-	{ 0xFF, 0xEF }, /* 88 */
-	{ 0xFF, 0xFE }, /* 89 */
-	{ 0xFF, 0xFE }, /* 8A */
-	{ 0xFF, 0xFF }, /* 8B */
-	{ 0xFF, 0xFF }, /* 8C */
-	{ 0xFF, 0x3F }, /* 8D */
-	{ 0xFF, 0xFF }, /* 8E */
-	{ 0xFF, 0x3F }, /* 8F */
-	{ 0xFF, 0x8F }, /* 90 */
-	{ 0xFF, 0xFF }, /* 91 */
-	{ 0xFF, 0x3F }, /* 92 */
-	{ 0xFF, 0xFF }, /* 93 */
-	{ 0xFF, 0xFF }, /* 94 */
-	{ 0xFF, 0x0F }, /* 95 */
-	{ 0xFF, 0x3F }, /* 96 */
-	{ 0xFF, 0x8C }, /* 97 */
-	{ 0x00, 0x00 }, /* 98 */
-	{ 0x00, 0x00 }, /* 99 */
-	{ 0x00, 0x00 }, /* 9A */
-	{ 0x00, 0x00 }, /* 9B */
-	{ 0x00, 0x00 }, /* 9C */
-	{ 0x00, 0x00 }, /* 9D */
-	{ 0x00, 0x00 }, /* 9E */
-	{ 0x00, 0x00 }, /* 9F */
-	{ 0x00, 0x00 }, /* A0 */
-	{ 0x00, 0x00 }, /* A1 */
-	{ 0x00, 0x00 }, /* A2 */
-	{ 0x00, 0x00 }, /* A3 */
-	{ 0x00, 0x00 }, /* A4 */
-	{ 0x00, 0x00 }, /* A5 */
-	{ 0x00, 0x00 }, /* A6 */
-	{ 0x00, 0x00 }, /* A7 */
-	{ 0x00, 0x00 }, /* A8 */
-	{ 0x00, 0x00 }, /* A9 */
-	{ 0x00, 0x00 }, /* AA */
-	{ 0x00, 0x00 }, /* AB */
-	{ 0x00, 0x00 }, /* AC */
-	{ 0x00, 0x00 }, /* AD */
-	{ 0x00, 0x00 }, /* AE */
-	{ 0x00, 0x00 }, /* AF */
-	{ 0x00, 0x00 }, /* B0 */
-	{ 0x00, 0x00 }, /* B1 */
-	{ 0x00, 0x00 }, /* B2 */
-	{ 0x00, 0x00 }, /* B3 */
-	{ 0x00, 0x00 }, /* B4 */
-	{ 0x00, 0x00 }, /* B5 */
-	{ 0x00, 0x00 }, /* B6 */
-	{ 0x00, 0x00 }, /* B7 */
-	{ 0x00, 0x00 }, /* B8 */
-	{ 0x00, 0x00 }, /* B9 */
-	{ 0x00, 0x00 }, /* BA */
-	{ 0x00, 0x00 }, /* BB */
-	{ 0x00, 0x00 }, /* BC */
-	{ 0x00, 0x00 }, /* BD */
-	{ 0x00, 0x00 }, /* BE */
-	{ 0x00, 0x00 }, /* BF */
-	{ 0x00, 0x00 }, /* C0 */
-	{ 0x00, 0x00 }, /* C1 */
-	{ 0x00, 0x00 }, /* C2 */
-	{ 0x00, 0x00 }, /* C3 */
-	{ 0x00, 0x00 }, /* C4 */
-	{ 0x00, 0x00 }, /* C5 */
-	{ 0x00, 0x00 }, /* C6 */
-	{ 0x00, 0x00 }, /* C7 */
-	{ 0x00, 0x00 }, /* C8 */
-	{ 0x00, 0x00 }, /* C9 */
-	{ 0x00, 0x00 }, /* CA */
-	{ 0x00, 0x00 }, /* CB */
-	{ 0x00, 0x00 }, /* CC */
-	{ 0x00, 0x00 }, /* CD */
-	{ 0x00, 0x00 }, /* CE */
-	{ 0x00, 0x00 }, /* CF */
-	{ 0x00, 0x00 }, /* D0 */
-	{ 0x00, 0x00 }, /* D1 */
-	{ 0x00, 0x00 }, /* D2 */
-	{ 0x00, 0x00 }, /* D3 */
-	{ 0x00, 0x00 }, /* D4 */
-	{ 0x00, 0x00 }, /* D5 */
-	{ 0x00, 0x00 }, /* D6 */
-	{ 0x00, 0x00 }, /* D7 */
-	{ 0x00, 0x00 }, /* D8 */
-	{ 0x00, 0x00 }, /* D9 */
-	{ 0x00, 0x00 }, /* DA */
-	{ 0x00, 0x00 }, /* DB */
-	{ 0x00, 0x00 }, /* DC */
-	{ 0x00, 0x00 }, /* DD */
-	{ 0x00, 0x00 }, /* DE */
-	{ 0x00, 0x00 }, /* DF */
-	{ 0x00, 0x00 }, /* E0 */
-	{ 0x00, 0x00 }, /* E1 */
-	{ 0x00, 0x00 }, /* E2 */
-	{ 0x00, 0x00 }, /* E3 */
-	{ 0x00, 0x00 }, /* E4 */
-	{ 0x00, 0x00 }, /* E5 */
-	{ 0x00, 0x00 }, /* E6 */
-	{ 0x00, 0x00 }, /* E7 */
-	{ 0x00, 0x00 }, /* E8 */
-	{ 0x00, 0x00 }, /* E9 */
-	{ 0x00, 0x00 }, /* EA */
-	{ 0x00, 0x00 }, /* EB */
-	{ 0x00, 0x00 }, /* EC */
-	{ 0x00, 0x00 }, /* ED */
-	{ 0x00, 0x00 }, /* EE */
-	{ 0x00, 0x00 }, /* EF */
-	{ 0x00, 0x00 }, /* F0 */
-	{ 0x00, 0x00 }, /* F1 */
-	{ 0x00, 0x00 }, /* F2 */
-	{ 0x00, 0x00 }, /* F3 */
-	{ 0x00, 0x00 }, /* F4 */
-	{ 0x00, 0x00 }, /* F5 */
-	{ 0x00, 0x00 }, /* F6 */
-	{ 0x00, 0x00 }, /* F7 */
-	{ 0x00, 0x00 }, /* F8 */
-	{ 0x00, 0x00 }, /* F9 */
-	{ 0x00, 0x00 }, /* FA */
-	{ 0x00, 0x00 }, /* FB */
-	{ 0x00, 0x00 }, /* FC */
-	{ 0x00, 0x00 }, /* FD */
-	{ 0x00, 0x00 }, /* FE */
-	{ 0xFF, 0x00 }, /* FF */
-};
-
 static bool max98095_readable(struct device *dev, unsigned int reg)
 {
-	if (reg >= M98095_REG_CNT)
-		return 0;
-	return max98095_access[reg].readable != 0;
+	switch (reg) {
+	case M98095_001_HOST_INT_STS ... M98095_097_PWR_SYS:
+	case M98095_0FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98095_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case M98095_00F_HOST_CFG ... M98095_097_PWR_SYS:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static bool max98095_volatile(struct device *dev, unsigned int reg)
 {
-	if (reg > M98095_REG_MAX_CACHED)
-		return 1;
-
 	switch (reg) {
-	case M98095_000_HOST_DATA:
-	case M98095_001_HOST_INT_STS:
-	case M98095_002_HOST_RSP_STS:
-	case M98095_003_HOST_CMD_STS:
-	case M98095_004_CODEC_STS:
-	case M98095_005_DAI1_ALC_STS:
-	case M98095_006_DAI2_ALC_STS:
-	case M98095_007_JACK_AUTO_STS:
-	case M98095_008_JACK_MANUAL_STS:
-	case M98095_009_JACK_VBAT_STS:
-	case M98095_00A_ACC_ADC_STS:
-	case M98095_00B_MIC_NG_AGC_STS:
-	case M98095_00C_SPK_L_VOLT_STS:
-	case M98095_00D_SPK_R_VOLT_STS:
-	case M98095_00E_TEMP_SENSOR_STS:
-		return 1;
+	case M98095_000_HOST_DATA ... M98095_00E_TEMP_SENSOR_STS:
+	case M98095_REG_MAX_CACHED + 1 ... M98095_0FF_REV_ID:
+		return true;
+	default:
+		return false;
 	}
-
-	return 0;
 }
 
 static const struct regmap_config max98095_regmap = {
@@ -508,6 +244,7 @@
 	.cache_type = REGCACHE_RBTREE,
 
 	.readable_reg = max98095_readable,
+	.writeable_reg = max98095_writeable,
 	.volatile_reg = max98095_volatile,
 };
 
@@ -661,48 +398,43 @@
 	return 0;
 }
 
-static const unsigned int max98095_micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98095_micboost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98095_mic_tlv, 0, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98095_adc_tlv, -1200, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98095_adcboost_tlv, 0, 600, 0);
 
-static const unsigned int max98095_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98095_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98095_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max98095_spk_tlv,
 	0, 10, TLV_DB_SCALE_ITEM(-5900, 400, 0),
 	11, 18, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	19, 27, TLV_DB_SCALE_ITEM(-200, 100, 0),
-	28, 39, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 39, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
-static const unsigned int max98095_rcv_lout_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98095_rcv_lout_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
-static const unsigned int max98095_lin_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(max98095_lin_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-600, 300, 0),
 	3, 3, TLV_DB_SCALE_ITEM(300, 1100, 0),
-	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
-};
+	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0)
+);
 
 static const struct snd_kcontrol_new max98095_snd_controls[] = {
 
@@ -1653,10 +1385,13 @@
 		if (IS_ERR(max98095->mclk))
 			break;
 
-		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON)
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
 			clk_disable_unprepare(max98095->mclk);
-		else
-			clk_prepare_enable(max98095->mclk);
+		} else {
+			ret = clk_prepare_enable(max98095->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
@@ -2431,7 +2166,6 @@
 static struct i2c_driver max98095_i2c_driver = {
 	.driver = {
 		.name = "max98095",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max98095_of_match),
 	},
 	.probe  = max98095_i2c_probe,
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 3a2fda0..f5e3dce 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -31,6 +31,9 @@
 {
 	struct gpio_desc *sdmode = snd_soc_dai_get_drvdata(dai);
 
+	if (!sdmode)
+		return 0;
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
@@ -48,24 +51,21 @@
 }
 
 static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = {
-	SND_SOC_DAPM_DAC("SDMode", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_OUTPUT("Speaker"),
 };
 
 static const struct snd_soc_dapm_route max98357a_dapm_routes[] = {
-	{"Speaker", NULL, "SDMode"},
+	{"Speaker", NULL, "HiFi Playback"},
 };
 
 static int max98357a_codec_probe(struct snd_soc_codec *codec)
 {
 	struct gpio_desc *sdmode;
 
-	sdmode = devm_gpiod_get(codec->dev, "sdmode", GPIOD_OUT_LOW);
-	if (IS_ERR(sdmode)) {
-		dev_err(codec->dev, "%s() unable to get sdmode GPIO: %ld\n",
-				__func__, PTR_ERR(sdmode));
+	sdmode = devm_gpiod_get_optional(codec->dev, "sdmode", GPIOD_OUT_LOW);
+	if (IS_ERR(sdmode))
 		return PTR_ERR(sdmode);
-	}
+
 	snd_soc_codec_set_drvdata(codec, sdmode);
 
 	return 0;
@@ -79,7 +79,7 @@
 	.num_dapm_routes	= ARRAY_SIZE(max98357a_dapm_routes),
 };
 
-static struct snd_soc_dai_ops max98357a_dai_ops = {
+static const struct snd_soc_dai_ops max98357a_dai_ops = {
 	.trigger	= max98357a_daiops_trigger,
 };
 
@@ -104,15 +104,8 @@
 
 static int max98357a_platform_probe(struct platform_device *pdev)
 {
-	int ret;
-
-	ret = snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
+	return snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
 			&max98357a_dai_driver, 1);
-	if (ret)
-		dev_err(&pdev->dev, "%s() error registering codec driver: %d\n",
-				__func__, ret);
-
-	return ret;
 }
 
 static int max98357a_platform_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 481d58f1..c14a79d 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -67,13 +67,12 @@
 	.cache_type = REGCACHE_RBTREE,
 };
 
-static const unsigned int max9850_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max9850_tlv,
 	0x18, 0x1f, TLV_DB_SCALE_ITEM(-7450, 400, 0),
 	0x20, 0x33, TLV_DB_SCALE_ITEM(-4150, 200, 0),
 	0x34, 0x37, TLV_DB_SCALE_ITEM(-150, 100, 0),
-	0x38, 0x3f, TLV_DB_SCALE_ITEM(250, 50, 0),
-};
+	0x38, 0x3f, TLV_DB_SCALE_ITEM(250, 50, 0)
+);
 
 static const struct snd_kcontrol_new max9850_controls[] = {
 SOC_SINGLE_TLV("Headphone Volume", MAX9850_VOLUME, 0, 0x3f, 1, max9850_tlv),
@@ -352,7 +351,6 @@
 static struct i2c_driver max9850_i2c_driver = {
 	.driver = {
 		.name = "max9850",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9850_i2c_probe,
 	.remove = max9850_i2c_remove,
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 29549cd..61cc18e 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -20,9 +20,7 @@
 
 #include "max9877.h"
 
-static struct regmap *regmap;
-
-static struct reg_default max9877_regs[] = {
+static const struct reg_default max9877_regs[] = {
 	{ 0, 0x40 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -30,19 +28,17 @@
 	{ 4, 0x49 },
 };
 
-static const unsigned int max9877_pgain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max9877_pgain_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 900, 0),
-	2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0)
+);
 
-static const unsigned int max9877_output_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max9877_output_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
 	16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
-	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0)
+);
 
 static const char *max9877_out_mode[] = {
 	"INA -> SPK",
@@ -123,7 +119,7 @@
 	{ "HPR", NULL, "SHDN" },
 };
 
-static const struct snd_soc_codec_driver max9877_codec = {
+static const struct snd_soc_component_driver max9877_component_driver = {
 	.controls = max9877_controls,
 	.num_controls = ARRAY_SIZE(max9877_controls),
 
@@ -145,6 +141,7 @@
 static int max9877_i2c_probe(struct i2c_client *client,
 			     const struct i2c_device_id *id)
 {
+	struct regmap *regmap;
 	int i;
 
 	regmap = devm_regmap_init_i2c(client, &max9877_regmap);
@@ -155,14 +152,8 @@
 	for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
 		regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def);
 
-	return snd_soc_register_codec(&client->dev, &max9877_codec, NULL, 0);
-}
-
-static int max9877_i2c_remove(struct i2c_client *client)
-{
-	snd_soc_unregister_codec(&client->dev);
-
-	return 0;
+	return devm_snd_soc_register_component(&client->dev,
+			&max9877_component_driver, NULL, 0);
 }
 
 static const struct i2c_device_id max9877_i2c_id[] = {
@@ -174,10 +165,8 @@
 static struct i2c_driver max9877_i2c_driver = {
 	.driver = {
 		.name = "max9877",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9877_i2c_probe,
-	.remove = max9877_i2c_remove,
 	.id_table = max9877_i2c_id,
 };
 
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index aad6642..5990de3 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -271,8 +271,6 @@
 			break;
 		}
 	}
-	dev_dbg(codec->dev, "%s: sample rate is %d, returning %d\n",
-				__func__, rate_table[i].rate, *value);
 	return ret;
 }
 
@@ -432,7 +430,7 @@
 	struct snd_soc_codec *codec = dai->codec;
 	struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
 
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		regmap_update_bits(max98925->regmap,
 				MAX98925_FORMAT,
@@ -523,7 +521,6 @@
 	struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
 
 	max98925->codec = codec;
-	codec->control_data = max98925->regmap;
 	regmap_write(max98925->regmap, MAX98925_GLOBAL_ENABLE, 0x00);
 	/* It's not the default but we need to set DAI_DLY */
 	regmap_write(max98925->regmap,
@@ -639,7 +636,6 @@
 static struct i2c_driver max98925_i2c_driver = {
 	.driver = {
 		.name = "max98925",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max98925_of_match),
 		.pm = NULL,
 	},
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 3d44fc5..3e770cb 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -650,14 +650,14 @@
 #define MC13783_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 	SNDRV_PCM_FMTBIT_S24_LE)
 
-static struct snd_soc_dai_ops mc13783_ops_dac = {
+static const struct snd_soc_dai_ops mc13783_ops_dac = {
 	.hw_params	= mc13783_pcm_hw_params_dac,
 	.set_fmt	= mc13783_set_fmt_async,
 	.set_sysclk	= mc13783_set_sysclk_dac,
 	.set_tdm_slot	= mc13783_set_tdm_slot_dac,
 };
 
-static struct snd_soc_dai_ops mc13783_ops_codec = {
+static const struct snd_soc_dai_ops mc13783_ops_codec = {
 	.hw_params	= mc13783_pcm_hw_params_codec,
 	.set_fmt	= mc13783_set_fmt_async,
 	.set_sysclk	= mc13783_set_sysclk_codec,
@@ -698,7 +698,7 @@
 	},
 };
 
-static struct snd_soc_dai_ops mc13783_ops_sync = {
+static const struct snd_soc_dai_ops mc13783_ops_sync = {
 	.hw_params	= mc13783_pcm_hw_params_sync,
 	.set_fmt	= mc13783_set_fmt_sync,
 	.set_sysclk	= mc13783_set_sysclk_sync,
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index b74118e..f561c78 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -199,7 +199,7 @@
 	{12288000, 48000, 0xc, 0x0, 0x30, 0x0, 0x4},
 };
 
-static struct reg_default ml26124_reg[] = {
+static const struct reg_default ml26124_reg[] = {
 	/* CLOCK control Register */
 	{0x00, 0x00 },	/* Sampling Rate */
 	{0x02, 0x00},	/* PLL NL */
@@ -597,7 +597,6 @@
 static struct i2c_driver ml26124_i2c_driver = {
 	.driver = {
 		.name = "ml26124",
-		.owner = THIS_MODULE,
 	},
 	.probe = ml26124_i2c_probe,
 	.remove = ml26124_i2c_remove,
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index e7ba557..5832523 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -95,17 +95,22 @@
 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 	int i = 0, val = -1, enable = 0;
 
-	if (priv->deemph)
-		for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++)
-			if (pcm1681_deemph[i] == priv->rate)
+	if (priv->deemph) {
+		for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++) {
+			if (pcm1681_deemph[i] == priv->rate) {
 				val = i;
+				break;
+			}
+		}
+	}
 
 	if (val != -1) {
 		regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
 				   PCM1681_DEEMPH_RATE_MASK, val << 3);
 		enable = 1;
-	} else
+	} else {
 		enable = 0;
+	}
 
 	/* enable/disable deemphasis functionality */
 	return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
@@ -330,7 +335,6 @@
 static struct i2c_driver pcm1681_i2c_driver = {
 	.driver = {
 		.name	= "pcm1681",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pcm1681_dt_ids),
 	},
 	.id_table	= pcm1681_i2c_id,
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index dcdfac0..dbff416 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -67,7 +67,6 @@
 	.id_table	= pcm512x_i2c_id,
 	.driver		= {
 		.name	= "pcm512x",
-		.owner	= THIS_MODULE,
 		.of_match_table = pcm512x_of_match,
 		.pm     = &pcm512x_pm_ops,
 	},
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index de16429..047c489 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1117,7 +1117,7 @@
 		params_rate(params),
 		params_channels(params));
 
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		alen = PCM512x_ALEN_16;
 		break;
@@ -1132,7 +1132,7 @@
 		break;
 	default:
 		dev_err(codec->dev, "Bad frame size: %d\n",
-			snd_pcm_format_width(params_format(params)));
+			params_width(params));
 		return -EINVAL;
 	}
 
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 56650d6..aca479f 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -11,38 +11,98 @@
  */
 
 #include <linux/module.h>
+#include <linux/regmap.h>
 
 #include "rl6231.h"
 
 /**
- * rl6231_calc_dmic_clk - Calculate the parameter of dmic.
+ * rl6231_get_pre_div - Return the value of pre divider.
+ *
+ * @map: map for setting.
+ * @reg: register.
+ * @sft: shift.
+ *
+ * Return the value of pre divider from given register value.
+ * Return negative error code for unexpected register value.
+ */
+int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft)
+{
+	int pd, val;
+
+	regmap_read(map, reg, &val);
+
+	val = (val >> sft) & 0x7;
+
+	switch (val) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		pd = val + 1;
+		break;
+	case 4:
+		pd = 6;
+		break;
+	case 5:
+		pd = 8;
+		break;
+	case 6:
+		pd = 12;
+		break;
+	case 7:
+		pd = 16;
+		break;
+	default:
+		pd = -EINVAL;
+		break;
+	}
+
+	return pd;
+}
+EXPORT_SYMBOL_GPL(rl6231_get_pre_div);
+
+/**
+ * rl6231_calc_dmic_clk - Calculate the frequency divider parameter of dmic.
  *
  * @rate: base clock rate.
  *
- * Choose dmic clock between 1MHz and 3MHz.
- * It is better for clock to approximate 3MHz.
+ * Choose divider parameter that gives the highest possible DMIC frequency in
+ * 1MHz - 3MHz range.
  */
 int rl6231_calc_dmic_clk(int rate)
 {
-	int div[] = {2, 3, 4, 6, 8, 12}, idx = -EINVAL;
-	int i, red, bound, temp;
+	int div[] = {2, 3, 4, 6, 8, 12};
+	int i;
 
-	red = 3000000 * 12;
-	for (i = 0; i < ARRAY_SIZE(div); i++) {
-		bound = div[i] * 3000000;
-		if (rate > bound)
-			continue;
-		temp = bound - rate;
-		if (temp < red) {
-			red = temp;
-			idx = i;
-		}
+	if (rate < 1000000 * div[0]) {
+		pr_warn("Base clock rate %d is too low\n", rate);
+		return -EINVAL;
 	}
 
-	return idx;
+	for (i = 0; i < ARRAY_SIZE(div); i++) {
+		/* find divider that gives DMIC frequency below 3MHz */
+		if (3000000 * div[i] >= rate)
+			return i;
+	}
+
+	pr_warn("Base clock rate %d is too high\n", rate);
+	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(rl6231_calc_dmic_clk);
 
+struct pll_calc_map {
+	unsigned int pll_in;
+	unsigned int pll_out;
+	int k;
+	int n;
+	int m;
+	bool m_bp;
+};
+
+static const struct pll_calc_map pll_preset_table[] = {
+	{19200000,  24576000,  3, 30, 3, false},
+};
+
 /**
  * rl6231_pll_calc - Calcualte PLL M/N/K code.
  * @freq_in: external clock provided to codec.
@@ -57,7 +117,7 @@
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code)
 {
 	int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
-	int k, red, n_t, pll_out, in_t, out_t;
+	int i, k, red, n_t, pll_out, in_t, out_t;
 	int n = 0, m = 0, m_t = 0;
 	int red_t = abs(freq_out - freq_in);
 	bool bypass = false;
@@ -65,6 +125,18 @@
 	if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
 		return -EINVAL;
 
+	for (i = 0; i < ARRAY_SIZE(pll_preset_table); i++) {
+		if (freq_in == pll_preset_table[i].pll_in &&
+			freq_out == pll_preset_table[i].pll_out) {
+			k = pll_preset_table[i].k;
+			m = pll_preset_table[i].m;
+			n = pll_preset_table[i].n;
+			bypass = pll_preset_table[i].m_bp;
+			pr_debug("Use preset PLL parameter table\n");
+			goto code_find;
+		}
+	}
+
 	k = 100000000 / freq_out - 2;
 	if (k > RL6231_PLL_K_MAX)
 		k = RL6231_PLL_K_MAX;
diff --git a/sound/soc/codecs/rl6231.h b/sound/soc/codecs/rl6231.h
index 0f7b057..4c77b44 100644
--- a/sound/soc/codecs/rl6231.h
+++ b/sound/soc/codecs/rl6231.h
@@ -30,5 +30,6 @@
 int rl6231_pll_calc(const unsigned int freq_in,
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code);
 int rl6231_get_clk_info(int sclk, int rate);
+int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft);
 
 #endif /* __RL6231_H__ */
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 5c43e26..bd93658 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -38,7 +38,7 @@
 #define RT288_VENDOR_ID 0x10ec0288
 
 struct rt286_priv {
-	struct reg_default *index_cache;
+	const struct reg_default *index_cache;
 	int index_cache_size;
 	struct regmap *regmap;
 	struct snd_soc_codec *codec;
@@ -50,7 +50,7 @@
 	int clk_id;
 };
 
-static struct reg_default rt286_index_def[] = {
+static const struct reg_default rt286_index_def[] = {
 	{ 0x01, 0xaaaa },
 	{ 0x02, 0x8aaa },
 	{ 0x03, 0x0002 },
@@ -1108,7 +1108,7 @@
 };
 MODULE_DEVICE_TABLE(acpi, rt286_acpi_match);
 
-static struct dmi_system_id force_combo_jack_table[] = {
+static const struct dmi_system_id force_combo_jack_table[] = {
 	{
 		.ident = "Intel Wilson Beach",
 		.matches = {
@@ -1118,7 +1118,7 @@
 	{ }
 };
 
-static struct dmi_system_id dmi_dell_dino[] = {
+static const struct dmi_system_id dmi_dell_dino[] = {
 	{
 		.ident = "Dell Dino",
 		.matches = {
@@ -1157,7 +1157,7 @@
 	}
 	if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt286\n", val);
+			"Device with ID register %#x is not rt286\n", val);
 		return -ENODEV;
 	}
 
@@ -1259,7 +1259,6 @@
 static struct i2c_driver rt286_i2c_driver = {
 	.driver = {
 		   .name = "rt286",
-		   .owner = THIS_MODULE,
 		   .acpi_match_table = ACPI_PTR(rt286_acpi_match),
 		   },
 	.probe = rt286_i2c_probe,
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
new file mode 100644
index 0000000..3c2f0f8
--- /dev/null
+++ b/sound/soc/codecs/rt298.c
@@ -0,0 +1,1271 @@
+/*
+ * rt298.c  --  RT298 ALSA SoC audio codec driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/jack.h>
+#include <linux/workqueue.h>
+#include <sound/rt298.h>
+#include <sound/hda_verbs.h>
+
+#include "rl6347a.h"
+#include "rt298.h"
+
+#define RT298_VENDOR_ID 0x10ec0298
+
+struct rt298_priv {
+	struct reg_default *index_cache;
+	int index_cache_size;
+	struct regmap *regmap;
+	struct snd_soc_codec *codec;
+	struct rt298_platform_data pdata;
+	struct i2c_client *i2c;
+	struct snd_soc_jack *jack;
+	struct delayed_work jack_detect_work;
+	int sys_clk;
+	int clk_id;
+	int is_hp_in;
+};
+
+static struct reg_default rt298_index_def[] = {
+	{ 0x01, 0xaaaa },
+	{ 0x02, 0x8aaa },
+	{ 0x03, 0x0002 },
+	{ 0x04, 0xaf01 },
+	{ 0x08, 0x000d },
+	{ 0x09, 0xd810 },
+	{ 0x0a, 0x0120 },
+	{ 0x0b, 0x0000 },
+	{ 0x0d, 0x2800 },
+	{ 0x0f, 0x0000 },
+	{ 0x19, 0x0a17 },
+	{ 0x20, 0x0020 },
+	{ 0x33, 0x0208 },
+	{ 0x46, 0x0300 },
+	{ 0x49, 0x0004 },
+	{ 0x4f, 0x50e9 },
+	{ 0x50, 0x2000 },
+	{ 0x63, 0x2902 },
+	{ 0x67, 0x1111 },
+	{ 0x68, 0x1016 },
+	{ 0x69, 0x273f },
+};
+#define INDEX_CACHE_SIZE ARRAY_SIZE(rt298_index_def)
+
+static const struct reg_default rt298_reg[] = {
+	{ 0x00170500, 0x00000400 },
+	{ 0x00220000, 0x00000031 },
+	{ 0x00239000, 0x0000007f },
+	{ 0x0023a000, 0x0000007f },
+	{ 0x00270500, 0x00000400 },
+	{ 0x00370500, 0x00000400 },
+	{ 0x00870500, 0x00000400 },
+	{ 0x00920000, 0x00000031 },
+	{ 0x00935000, 0x000000c3 },
+	{ 0x00936000, 0x000000c3 },
+	{ 0x00970500, 0x00000400 },
+	{ 0x00b37000, 0x00000097 },
+	{ 0x00b37200, 0x00000097 },
+	{ 0x00b37300, 0x00000097 },
+	{ 0x00c37000, 0x00000000 },
+	{ 0x00c37100, 0x00000080 },
+	{ 0x01270500, 0x00000400 },
+	{ 0x01370500, 0x00000400 },
+	{ 0x01371f00, 0x411111f0 },
+	{ 0x01439000, 0x00000080 },
+	{ 0x0143a000, 0x00000080 },
+	{ 0x01470700, 0x00000000 },
+	{ 0x01470500, 0x00000400 },
+	{ 0x01470c00, 0x00000000 },
+	{ 0x01470100, 0x00000000 },
+	{ 0x01837000, 0x00000000 },
+	{ 0x01870500, 0x00000400 },
+	{ 0x02050000, 0x00000000 },
+	{ 0x02139000, 0x00000080 },
+	{ 0x0213a000, 0x00000080 },
+	{ 0x02170100, 0x00000000 },
+	{ 0x02170500, 0x00000400 },
+	{ 0x02170700, 0x00000000 },
+	{ 0x02270100, 0x00000000 },
+	{ 0x02370100, 0x00000000 },
+	{ 0x01870700, 0x00000020 },
+	{ 0x00830000, 0x000000c3 },
+	{ 0x00930000, 0x000000c3 },
+	{ 0x01270700, 0x00000000 },
+};
+
+static bool rt298_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0 ... 0xff:
+	case RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID):
+	case RT298_GET_HP_SENSE:
+	case RT298_GET_MIC1_SENSE:
+	case RT298_PROC_COEF:
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_MIC1, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_SPK_OUT, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_HP_OUT, 0):
+		return true;
+	default:
+		return true;
+	}
+
+
+}
+
+static bool rt298_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0 ... 0xff:
+	case RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID):
+	case RT298_GET_HP_SENSE:
+	case RT298_GET_MIC1_SENSE:
+	case RT298_SET_AUDIO_POWER:
+	case RT298_SET_HPO_POWER:
+	case RT298_SET_SPK_POWER:
+	case RT298_SET_DMIC1_POWER:
+	case RT298_SPK_MUX:
+	case RT298_HPO_MUX:
+	case RT298_ADC0_MUX:
+	case RT298_ADC1_MUX:
+	case RT298_SET_MIC1:
+	case RT298_SET_PIN_HPO:
+	case RT298_SET_PIN_SPK:
+	case RT298_SET_PIN_DMIC1:
+	case RT298_SPK_EAPD:
+	case RT298_SET_AMP_GAIN_HPO:
+	case RT298_SET_DMIC2_DEFAULT:
+	case RT298_DACL_GAIN:
+	case RT298_DACR_GAIN:
+	case RT298_ADCL_GAIN:
+	case RT298_ADCR_GAIN:
+	case RT298_MIC_GAIN:
+	case RT298_SPOL_GAIN:
+	case RT298_SPOR_GAIN:
+	case RT298_HPOL_GAIN:
+	case RT298_HPOR_GAIN:
+	case RT298_F_DAC_SWITCH:
+	case RT298_F_RECMIX_SWITCH:
+	case RT298_REC_MIC_SWITCH:
+	case RT298_REC_I2S_SWITCH:
+	case RT298_REC_LINE_SWITCH:
+	case RT298_REC_BEEP_SWITCH:
+	case RT298_DAC_FORMAT:
+	case RT298_ADC_FORMAT:
+	case RT298_COEF_INDEX:
+	case RT298_PROC_COEF:
+	case RT298_SET_AMP_GAIN_ADC_IN1:
+	case RT298_SET_AMP_GAIN_ADC_IN2:
+	case RT298_SET_POWER(RT298_DAC_OUT1):
+	case RT298_SET_POWER(RT298_DAC_OUT2):
+	case RT298_SET_POWER(RT298_ADC_IN1):
+	case RT298_SET_POWER(RT298_ADC_IN2):
+	case RT298_SET_POWER(RT298_DMIC2):
+	case RT298_SET_POWER(RT298_MIC1):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_MIC1, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_SPK_OUT, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_HP_OUT, 0):
+		return true;
+	default:
+		return false;
+	}
+}
+
+#ifdef CONFIG_PM
+static void rt298_index_sync(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < INDEX_CACHE_SIZE; i++) {
+		snd_soc_write(codec, rt298->index_cache[i].reg,
+				  rt298->index_cache[i].def);
+	}
+}
+#endif
+
+static int rt298_support_power_controls[] = {
+	RT298_DAC_OUT1,
+	RT298_DAC_OUT2,
+	RT298_ADC_IN1,
+	RT298_ADC_IN2,
+	RT298_MIC1,
+	RT298_DMIC1,
+	RT298_DMIC2,
+	RT298_SPK_OUT,
+	RT298_HP_OUT,
+};
+#define RT298_POWER_REG_LEN ARRAY_SIZE(rt298_support_power_controls)
+
+static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
+{
+	struct snd_soc_dapm_context *dapm;
+	unsigned int val, buf;
+
+	*hp = false;
+	*mic = false;
+
+	if (!rt298->codec)
+		return -EINVAL;
+
+	dapm = snd_soc_codec_get_dapm(rt298->codec);
+
+	if (rt298->pdata.cbj_en) {
+		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
+		*hp = buf & 0x80000000;
+		if (*hp == rt298->is_hp_in)
+			return -1;
+		rt298->is_hp_in = *hp;
+		if (*hp) {
+			/* power on HV,VERF */
+			regmap_update_bits(rt298->regmap,
+				RT298_DC_GAIN, 0x200, 0x200);
+
+			snd_soc_dapm_force_enable_pin(dapm, "HV");
+			snd_soc_dapm_force_enable_pin(dapm, "VREF");
+			/* power LDO1 */
+			snd_soc_dapm_force_enable_pin(dapm, "LDO1");
+			snd_soc_dapm_sync(dapm);
+
+			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24);
+			msleep(50);
+
+			regmap_update_bits(rt298->regmap,
+				RT298_CBJ_CTRL1, 0xfcc0, 0xd400);
+			msleep(300);
+			regmap_read(rt298->regmap, RT298_CBJ_CTRL2, &val);
+
+			if (0x0070 == (val & 0x0070)) {
+				*mic = true;
+			} else {
+				regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xfcc0, 0xe400);
+				msleep(300);
+				regmap_read(rt298->regmap,
+					RT298_CBJ_CTRL2, &val);
+				if (0x0070 == (val & 0x0070))
+					*mic = true;
+				else
+					*mic = false;
+			}
+			regmap_update_bits(rt298->regmap,
+				RT298_DC_GAIN, 0x200, 0x0);
+
+		} else {
+			*mic = false;
+			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20);
+		}
+	} else {
+		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
+		*hp = buf & 0x80000000;
+		regmap_read(rt298->regmap, RT298_GET_MIC1_SENSE, &buf);
+		*mic = buf & 0x80000000;
+	}
+
+	snd_soc_dapm_disable_pin(dapm, "HV");
+	snd_soc_dapm_disable_pin(dapm, "VREF");
+	if (!*hp)
+		snd_soc_dapm_disable_pin(dapm, "LDO1");
+	snd_soc_dapm_sync(dapm);
+
+	pr_debug("*hp = %d *mic = %d\n", *hp, *mic);
+
+	return 0;
+}
+
+static void rt298_jack_detect_work(struct work_struct *work)
+{
+	struct rt298_priv *rt298 =
+		container_of(work, struct rt298_priv, jack_detect_work.work);
+	int status = 0;
+	bool hp = false;
+	bool mic = false;
+
+	if (rt298_jack_detect(rt298, &hp, &mic) < 0)
+		return;
+
+	if (hp == true)
+		status |= SND_JACK_HEADPHONE;
+
+	if (mic == true)
+		status |= SND_JACK_MICROPHONE;
+
+	snd_soc_jack_report(rt298->jack, status,
+		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+}
+
+int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->jack = jack;
+
+	/* Send an initial empty report */
+	snd_soc_jack_report(rt298->jack, 0,
+		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt298_mic_detect);
+
+static int is_mclk_mode(struct snd_soc_dapm_widget *source,
+			 struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	if (rt298->clk_id == RT298_SCLK_S_MCLK)
+		return 1;
+	else
+		return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -6350, 50, 0);
+static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
+
+static const struct snd_kcontrol_new rt298_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("DAC0 Playback Volume", RT298_DACL_GAIN,
+			    RT298_DACR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+	SOC_DOUBLE_R_TLV("ADC0 Capture Volume", RT298_ADCL_GAIN,
+			    RT298_ADCR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+	SOC_SINGLE_TLV("AMIC Volume", RT298_MIC_GAIN,
+			    0, 0x3, 0, mic_vol_tlv),
+	SOC_DOUBLE_R("Speaker Playback Switch", RT298_SPOL_GAIN,
+			    RT298_SPOR_GAIN, RT298_MUTE_SFT, 1, 1),
+};
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt298_front_mix[] = {
+	SOC_DAPM_SINGLE("DAC Switch",  RT298_F_DAC_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("RECMIX Switch", RT298_F_RECMIX_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt298_rec_mix[] = {
+	SOC_DAPM_SINGLE("Mic1 Switch", RT298_REC_MIC_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("I2S Switch", RT298_REC_I2S_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("Line1 Switch", RT298_REC_LINE_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("Beep Switch", RT298_REC_BEEP_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new spo_enable_control =
+	SOC_DAPM_SINGLE("Switch", RT298_SET_PIN_SPK,
+			RT298_SET_PIN_SFT, 1, 0);
+
+static const struct snd_kcontrol_new hpol_enable_control =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT298_HPOL_GAIN,
+			RT298_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new hpor_enable_control =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT298_HPOR_GAIN,
+			RT298_MUTE_SFT, 1, 1);
+
+/* ADC0 source */
+static const char * const rt298_adc_src[] = {
+	"Mic", "RECMIX", "Dmic"
+};
+
+static const int rt298_adc_values[] = {
+	0, 4, 5,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(
+	rt298_adc0_enum, RT298_ADC0_MUX, RT298_ADC_SEL_SFT,
+	RT298_ADC_SEL_MASK, rt298_adc_src, rt298_adc_values);
+
+static const struct snd_kcontrol_new rt298_adc0_mux =
+	SOC_DAPM_ENUM("ADC 0 source", rt298_adc0_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(
+	rt298_adc1_enum, RT298_ADC1_MUX, RT298_ADC_SEL_SFT,
+	RT298_ADC_SEL_MASK, rt298_adc_src, rt298_adc_values);
+
+static const struct snd_kcontrol_new rt298_adc1_mux =
+	SOC_DAPM_ENUM("ADC 1 source", rt298_adc1_enum);
+
+static const char * const rt298_dac_src[] = {
+	"Front", "Surround"
+};
+/* HP-OUT source */
+static SOC_ENUM_SINGLE_DECL(rt298_hpo_enum, RT298_HPO_MUX,
+				0, rt298_dac_src);
+
+static const struct snd_kcontrol_new rt298_hpo_mux =
+SOC_DAPM_ENUM("HPO source", rt298_hpo_enum);
+
+/* SPK-OUT source */
+static SOC_ENUM_SINGLE_DECL(rt298_spo_enum, RT298_SPK_MUX,
+				0, rt298_dac_src);
+
+static const struct snd_kcontrol_new rt298_spo_mux =
+SOC_DAPM_ENUM("SPO source", rt298_spo_enum);
+
+static int rt298_spk_event(struct snd_soc_dapm_widget *w,
+			    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec,
+			RT298_SPK_EAPD, RT298_SET_EAPD_HIGH);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_write(codec,
+			RT298_SPK_EAPD, RT298_SET_EAPD_LOW);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_set_dmic1_event(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec, RT298_SET_PIN_DMIC1, 0x20);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_write(codec, RT298_SET_PIN_DMIC1, 0);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_adc_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	unsigned int nid;
+
+	nid = (w->reg >> 20) & 0xff;
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec,
+			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
+			0x7080, 0x7000);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec,
+			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
+			0x7080, 0x7080);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_mic1_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL3, 0xc000, 0x8000);
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL2, 0xc000, 0x8000);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL3, 0xc000, 0x0000);
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL2, 0xc000, 0x0000);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_vref_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0000);
+		mdelay(50);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = {
+
+	SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1,
+		12, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1,
+		0, 1, rt298_vref_event, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2,
+		1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2,
+		2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LDO2", 1, RT298_POWER_CTRL2,
+		3, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("VREF1", 1, RT298_POWER_CTRL2,
+		4, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LV", 2, RT298_POWER_CTRL1,
+		13, 1, NULL, 0),
+
+
+	SND_SOC_DAPM_SUPPLY("MCLK MODE", RT298_PLL_CTRL1,
+		5, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MIC1 Input Buffer", SND_SOC_NOPM,
+		0, 0, rt298_mic1_event, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	/* Input Lines */
+	SND_SOC_DAPM_INPUT("DMIC1 Pin"),
+	SND_SOC_DAPM_INPUT("DMIC2 Pin"),
+	SND_SOC_DAPM_INPUT("MIC1"),
+	SND_SOC_DAPM_INPUT("LINE1"),
+	SND_SOC_DAPM_INPUT("Beep"),
+
+	/* DMIC */
+	SND_SOC_DAPM_PGA_E("DMIC1", RT298_SET_POWER(RT298_DMIC1), 0, 1,
+		NULL, 0, rt298_set_dmic1_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_PGA("DMIC2", RT298_SET_POWER(RT298_DMIC2), 0, 1,
+		NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DMIC Receiver", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+
+	/* REC Mixer */
+	SND_SOC_DAPM_MIXER("RECMIX", SND_SOC_NOPM, 0, 0,
+		rt298_rec_mix, ARRAY_SIZE(rt298_rec_mix)),
+
+	/* ADCs */
+	SND_SOC_DAPM_ADC("ADC 0", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC 1", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* ADC Mux */
+	SND_SOC_DAPM_MUX_E("ADC 0 Mux", RT298_SET_POWER(RT298_ADC_IN1), 0, 1,
+		&rt298_adc0_mux, rt298_adc_event, SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("ADC 1 Mux", RT298_SET_POWER(RT298_ADC_IN2), 0, 1,
+		&rt298_adc1_mux, rt298_adc_event, SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMU),
+
+	/* Audio Interface */
+	SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF2RX", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+	/* Output Side */
+	/* DACs */
+	SND_SOC_DAPM_DAC("DAC 0", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DAC 1", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* Output Mux */
+	SND_SOC_DAPM_MUX("SPK Mux", SND_SOC_NOPM, 0, 0, &rt298_spo_mux),
+	SND_SOC_DAPM_MUX("HPO Mux", SND_SOC_NOPM, 0, 0, &rt298_hpo_mux),
+
+	SND_SOC_DAPM_SUPPLY("HP Power", RT298_SET_PIN_HPO,
+		RT298_SET_PIN_SFT, 0, NULL, 0),
+
+	/* Output Mixer */
+	SND_SOC_DAPM_MIXER("Front", RT298_SET_POWER(RT298_DAC_OUT1), 0, 1,
+			rt298_front_mix, ARRAY_SIZE(rt298_front_mix)),
+	SND_SOC_DAPM_PGA("Surround", RT298_SET_POWER(RT298_DAC_OUT2), 0, 1,
+			NULL, 0),
+
+	/* Output Pga */
+	SND_SOC_DAPM_SWITCH_E("SPO", SND_SOC_NOPM, 0, 0,
+		&spo_enable_control, rt298_spk_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_SWITCH("HPO L", SND_SOC_NOPM, 0, 0,
+		&hpol_enable_control),
+	SND_SOC_DAPM_SWITCH("HPO R", SND_SOC_NOPM, 0, 0,
+		&hpor_enable_control),
+
+	/* Output Lines */
+	SND_SOC_DAPM_OUTPUT("SPOL"),
+	SND_SOC_DAPM_OUTPUT("SPOR"),
+	SND_SOC_DAPM_OUTPUT("HPO Pin"),
+	SND_SOC_DAPM_OUTPUT("SPDIF"),
+};
+
+static const struct snd_soc_dapm_route rt298_dapm_routes[] = {
+
+	{"ADC 0", NULL, "MCLK MODE", is_mclk_mode},
+	{"ADC 1", NULL, "MCLK MODE", is_mclk_mode},
+	{"Front", NULL, "MCLK MODE", is_mclk_mode},
+	{"Surround", NULL, "MCLK MODE", is_mclk_mode},
+
+	{"HP Power", NULL, "LDO1"},
+	{"HP Power", NULL, "LDO2"},
+	{"HP Power", NULL, "LV"},
+	{"HP Power", NULL, "VREF1"},
+	{"HP Power", NULL, "BG_MBIAS"},
+
+	{"MIC1", NULL, "LDO1"},
+	{"MIC1", NULL, "LDO2"},
+	{"MIC1", NULL, "HV"},
+	{"MIC1", NULL, "LV"},
+	{"MIC1", NULL, "VREF"},
+	{"MIC1", NULL, "VREF1"},
+	{"MIC1", NULL, "BG_MBIAS"},
+	{"MIC1", NULL, "MIC1 Input Buffer"},
+
+	{"SPO", NULL, "LDO1"},
+	{"SPO", NULL, "LDO2"},
+	{"SPO", NULL, "HV"},
+	{"SPO", NULL, "LV"},
+	{"SPO", NULL, "VREF"},
+	{"SPO", NULL, "VREF1"},
+	{"SPO", NULL, "BG_MBIAS"},
+
+	{"DMIC1", NULL, "DMIC1 Pin"},
+	{"DMIC2", NULL, "DMIC2 Pin"},
+	{"DMIC1", NULL, "DMIC Receiver"},
+	{"DMIC2", NULL, "DMIC Receiver"},
+
+	{"RECMIX", "Beep Switch", "Beep"},
+	{"RECMIX", "Line1 Switch", "LINE1"},
+	{"RECMIX", "Mic1 Switch", "MIC1"},
+
+	{"ADC 0 Mux", "Dmic", "DMIC1"},
+	{"ADC 0 Mux", "RECMIX", "RECMIX"},
+	{"ADC 0 Mux", "Mic", "MIC1"},
+	{"ADC 1 Mux", "Dmic", "DMIC2"},
+	{"ADC 1 Mux", "RECMIX", "RECMIX"},
+	{"ADC 1 Mux", "Mic", "MIC1"},
+
+	{"ADC 0", NULL, "ADC 0 Mux"},
+	{"ADC 1", NULL, "ADC 1 Mux"},
+
+	{"AIF1TX", NULL, "ADC 0"},
+	{"AIF2TX", NULL, "ADC 1"},
+
+	{"DAC 0", NULL, "AIF1RX"},
+	{"DAC 1", NULL, "AIF2RX"},
+
+	{"Front", "DAC Switch", "DAC 0"},
+	{"Front", "RECMIX Switch", "RECMIX"},
+
+	{"Surround", NULL, "DAC 1"},
+
+	{"SPK Mux", "Front", "Front"},
+	{"SPK Mux", "Surround", "Surround"},
+
+	{"HPO Mux", "Front", "Front"},
+	{"HPO Mux", "Surround", "Surround"},
+
+	{"SPO", "Switch", "SPK Mux"},
+	{"HPO L", "Switch", "HPO Mux"},
+	{"HPO R", "Switch", "HPO Mux"},
+	{"HPO L", NULL, "HP Power"},
+	{"HPO R", NULL, "HP Power"},
+
+	{"SPOL", NULL, "SPO"},
+	{"SPOR", NULL, "SPO"},
+	{"HPO Pin", NULL, "HPO L"},
+	{"HPO Pin", NULL, "HPO R"},
+};
+
+static int rt298_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = 0;
+	int d_len_code;
+
+	switch (params_rate(params)) {
+	/* bit 14 0:48K 1:44.1K */
+	case 44100:
+	case 48000:
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported sample rate %d\n",
+					params_rate(params));
+		return -EINVAL;
+	}
+	switch (rt298->sys_clk) {
+	case 12288000:
+	case 24576000:
+		if (params_rate(params) != 48000) {
+			dev_err(codec->dev, "Sys_clk is not matched (%d %d)\n",
+					params_rate(params), rt298->sys_clk);
+			return -EINVAL;
+		}
+		break;
+	case 11289600:
+	case 22579200:
+		if (params_rate(params) != 44100) {
+			dev_err(codec->dev, "Sys_clk is not matched (%d %d)\n",
+					params_rate(params), rt298->sys_clk);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	if (params_channels(params) <= 16) {
+		/* bit 3:0 Number of Channel */
+		val |= (params_channels(params) - 1);
+	} else {
+		dev_err(codec->dev, "Unsupported channels %d\n",
+					params_channels(params));
+		return -EINVAL;
+	}
+
+	d_len_code = 0;
+	switch (params_width(params)) {
+	/* bit 6:4 Bits per Sample */
+	case 16:
+		d_len_code = 0;
+		val |= (0x1 << 4);
+		break;
+	case 32:
+		d_len_code = 2;
+		val |= (0x4 << 4);
+		break;
+	case 20:
+		d_len_code = 1;
+		val |= (0x2 << 4);
+		break;
+	case 24:
+		d_len_code = 2;
+		val |= (0x3 << 4);
+		break;
+	case 8:
+		d_len_code = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec,
+		RT298_I2S_CTRL1, 0x0018, d_len_code << 3);
+	dev_dbg(codec->dev, "format val = 0x%x\n", val);
+
+	snd_soc_update_bits(codec, RT298_DAC_FORMAT, 0x407f, val);
+	snd_soc_update_bits(codec, RT298_ADC_FORMAT, 0x407f, val);
+
+	return 0;
+}
+
+static int rt298_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x800, 0x800);
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x800, 0x0);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x0);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x1 << 8);
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x2 << 8);
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x3 << 8);
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* bit 15 Stream Type 0:PCM 1:Non-PCM */
+	snd_soc_update_bits(codec, RT298_DAC_FORMAT, 0x8000, 0);
+	snd_soc_update_bits(codec, RT298_ADC_FORMAT, 0x8000, 0);
+
+	return 0;
+}
+
+static int rt298_set_dai_sysclk(struct snd_soc_dai *dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s freq=%d\n", __func__, freq);
+
+	if (RT298_SCLK_S_MCLK == clk_id) {
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x0100, 0x0);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL1, 0x20, 0x20);
+	} else {
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x0100, 0x0100);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL, 0x4, 0x4);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL1, 0x20, 0x0);
+	}
+
+	switch (freq) {
+	case 19200000:
+		if (RT298_SCLK_S_MCLK == clk_id) {
+			dev_err(codec->dev, "Should not use MCLK\n");
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x40, 0x40);
+		break;
+	case 24000000:
+		if (RT298_SCLK_S_MCLK == clk_id) {
+			dev_err(codec->dev, "Should not use MCLK\n");
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x40, 0x0);
+		break;
+	case 12288000:
+	case 11289600:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x8, 0x0);
+		snd_soc_update_bits(codec,
+			RT298_CLK_DIV, 0xfc1e, 0x0004);
+		break;
+	case 24576000:
+	case 22579200:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x8, 0x8);
+		snd_soc_update_bits(codec,
+			RT298_CLK_DIV, 0xfc1e, 0x5406);
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported system clock\n");
+		return -EINVAL;
+	}
+
+	rt298->sys_clk = freq;
+	rt298->clk_id = clk_id;
+
+	return 0;
+}
+
+static int rt298_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	dev_dbg(codec->dev, "%s ratio=%d\n", __func__, ratio);
+	if (50 == ratio)
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x1000, 0x1000);
+	else
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x1000, 0x0);
+
+
+	return 0;
+}
+
+static int rt298_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (SND_SOC_BIAS_STANDBY ==
+			snd_soc_codec_get_bias_level(codec)) {
+			snd_soc_write(codec,
+				RT298_SET_AUDIO_POWER, AC_PWRST_D0);
+			snd_soc_update_bits(codec, 0x0d, 0x200, 0x200);
+			snd_soc_update_bits(codec, 0x52, 0x80, 0x0);
+			mdelay(20);
+			snd_soc_update_bits(codec, 0x0d, 0x200, 0x0);
+			snd_soc_update_bits(codec, 0x52, 0x80, 0x80);
+		}
+		break;
+
+	case SND_SOC_BIAS_ON:
+		mdelay(30);
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0400);
+
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		snd_soc_write(codec,
+			RT298_SET_AUDIO_POWER, AC_PWRST_D3);
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0000);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static irqreturn_t rt298_irq(int irq, void *data)
+{
+	struct rt298_priv *rt298 = data;
+	bool hp = false;
+	bool mic = false;
+	int ret, status = 0;
+
+	ret = rt298_jack_detect(rt298, &hp, &mic);
+
+	/* Clear IRQ */
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x1, 0x1);
+
+	if (ret == 0) {
+		if (hp == true)
+			status |= SND_JACK_HEADPHONE;
+
+		if (mic == true)
+			status |= SND_JACK_MICROPHONE;
+
+		snd_soc_jack_report(rt298->jack, status,
+			SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+
+		pm_wakeup_event(&rt298->i2c->dev, 300);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int rt298_probe(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->codec = codec;
+
+	if (rt298->i2c->irq) {
+		regmap_update_bits(rt298->regmap,
+					RT298_IRQ_CTRL, 0x2, 0x2);
+
+		INIT_DELAYED_WORK(&rt298->jack_detect_work,
+					rt298_jack_detect_work);
+		schedule_delayed_work(&rt298->jack_detect_work,
+					msecs_to_jiffies(1250));
+	}
+
+	return 0;
+}
+
+static int rt298_remove(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	cancel_delayed_work_sync(&rt298->jack_detect_work);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt298_suspend(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->is_hp_in = -1;
+	regcache_cache_only(rt298->regmap, true);
+	regcache_mark_dirty(rt298->regmap);
+
+	return 0;
+}
+
+static int rt298_resume(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	regcache_cache_only(rt298->regmap, false);
+	rt298_index_sync(codec);
+	regcache_sync(rt298->regmap);
+
+	return 0;
+}
+#else
+#define rt298_suspend NULL
+#define rt298_resume NULL
+#endif
+
+#define RT298_STEREO_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+#define RT298_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt298_aif_dai_ops = {
+	.hw_params = rt298_hw_params,
+	.set_fmt = rt298_set_dai_fmt,
+	.set_sysclk = rt298_set_dai_sysclk,
+	.set_bclk_ratio = rt298_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt298_dai[] = {
+	{
+		.name = "rt298-aif1",
+		.id = RT298_AIF1,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.ops = &rt298_aif_dai_ops,
+		.symmetric_rates = 1,
+	},
+	{
+		.name = "rt298-aif2",
+		.id = RT298_AIF2,
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.ops = &rt298_aif_dai_ops,
+		.symmetric_rates = 1,
+	},
+
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt298 = {
+	.probe = rt298_probe,
+	.remove = rt298_remove,
+	.suspend = rt298_suspend,
+	.resume = rt298_resume,
+	.set_bias_level = rt298_set_bias_level,
+	.idle_bias_off = true,
+	.controls = rt298_snd_controls,
+	.num_controls = ARRAY_SIZE(rt298_snd_controls),
+	.dapm_widgets = rt298_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rt298_dapm_widgets),
+	.dapm_routes = rt298_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(rt298_dapm_routes),
+};
+
+static const struct regmap_config rt298_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.max_register = 0x02370100,
+	.volatile_reg = rt298_volatile_register,
+	.readable_reg = rt298_readable_register,
+	.reg_write = rl6347a_hw_write,
+	.reg_read = rl6347a_hw_read,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = rt298_reg,
+	.num_reg_defaults = ARRAY_SIZE(rt298_reg),
+};
+
+static const struct i2c_device_id rt298_i2c_id[] = {
+	{"rt298", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, rt298_i2c_id);
+
+static const struct acpi_device_id rt298_acpi_match[] = {
+	{ "INT343A", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, rt298_acpi_match);
+
+static int rt298_i2c_probe(struct i2c_client *i2c,
+			   const struct i2c_device_id *id)
+{
+	struct rt298_platform_data *pdata = dev_get_platdata(&i2c->dev);
+	struct rt298_priv *rt298;
+	struct device *dev = &i2c->dev;
+	const struct acpi_device_id *acpiid;
+	int i, ret;
+
+	rt298 = devm_kzalloc(&i2c->dev,	sizeof(*rt298),
+				GFP_KERNEL);
+	if (NULL == rt298)
+		return -ENOMEM;
+
+	rt298->regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt298_regmap);
+	if (IS_ERR(rt298->regmap)) {
+		ret = PTR_ERR(rt298->regmap);
+		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	regmap_read(rt298->regmap,
+		RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &ret);
+	if (ret != RT298_VENDOR_ID) {
+		dev_err(&i2c->dev,
+			"Device with ID register %#x is not rt298\n", ret);
+		return -ENODEV;
+	}
+
+	rt298->index_cache = rt298_index_def;
+	rt298->index_cache_size = INDEX_CACHE_SIZE;
+	rt298->i2c = i2c;
+	i2c_set_clientdata(i2c, rt298);
+
+	/* restore codec default */
+	for (i = 0; i < INDEX_CACHE_SIZE; i++)
+		regmap_write(rt298->regmap, rt298->index_cache[i].reg,
+				rt298->index_cache[i].def);
+	for (i = 0; i < ARRAY_SIZE(rt298_reg); i++)
+		regmap_write(rt298->regmap, rt298_reg[i].reg,
+				rt298_reg[i].def);
+
+	if (pdata)
+		rt298->pdata = *pdata;
+
+	/* enable jack combo mode on supported devices */
+	acpiid = acpi_match_device(dev->driver->acpi_match_table, dev);
+	if (acpiid) {
+		rt298->pdata = *(struct rt298_platform_data *)
+				acpiid->driver_data;
+	}
+
+	/* VREF Charging */
+	regmap_update_bits(rt298->regmap, 0x04, 0x80, 0x80);
+	regmap_update_bits(rt298->regmap, 0x1b, 0x860, 0x860);
+	/* Vref2 */
+	regmap_update_bits(rt298->regmap, 0x08, 0x20, 0x20);
+
+	regmap_write(rt298->regmap, RT298_SET_AUDIO_POWER, AC_PWRST_D3);
+
+	for (i = 0; i < RT298_POWER_REG_LEN; i++)
+		regmap_write(rt298->regmap,
+			RT298_SET_POWER(rt298_support_power_controls[i]),
+			AC_PWRST_D1);
+
+	if (!rt298->pdata.cbj_en) {
+		regmap_write(rt298->regmap, RT298_CBJ_CTRL2, 0x0000);
+		regmap_write(rt298->regmap, RT298_MIC1_DET_CTRL, 0x0816);
+		regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xf000, 0xb000);
+	} else {
+		regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xf000, 0x5000);
+	}
+
+	mdelay(10);
+
+	if (!rt298->pdata.gpio2_en)
+		regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000);
+	else
+		regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0);
+
+	mdelay(10);
+
+	regmap_write(rt298->regmap, RT298_MISC_CTRL1, 0x0000);
+	regmap_update_bits(rt298->regmap,
+				RT298_WIND_FILTER_CTRL, 0x0082, 0x0082);
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
+	rt298->is_hp_in = -1;
+
+	if (rt298->i2c->irq) {
+		ret = request_threaded_irq(rt298->i2c->irq, NULL, rt298_irq,
+			IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "rt298", rt298);
+		if (ret != 0) {
+			dev_err(&i2c->dev,
+				"Failed to reguest IRQ: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt298,
+				     rt298_dai, ARRAY_SIZE(rt298_dai));
+
+	return ret;
+}
+
+static int rt298_i2c_remove(struct i2c_client *i2c)
+{
+	struct rt298_priv *rt298 = i2c_get_clientdata(i2c);
+
+	if (i2c->irq)
+		free_irq(i2c->irq, rt298);
+	snd_soc_unregister_codec(&i2c->dev);
+
+	return 0;
+}
+
+
+static struct i2c_driver rt298_i2c_driver = {
+	.driver = {
+		   .name = "rt298",
+		   .acpi_match_table = ACPI_PTR(rt298_acpi_match),
+		   },
+	.probe = rt298_i2c_probe,
+	.remove = rt298_i2c_remove,
+	.id_table = rt298_i2c_id,
+};
+
+module_i2c_driver(rt298_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT298 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
new file mode 100644
index 0000000..31da162
--- /dev/null
+++ b/sound/soc/codecs/rt298.h
@@ -0,0 +1,206 @@
+/*
+ * rt298.h  --  RT298 ALSA SoC audio driver
+ *
+ * Copyright 2011 Realtek Microelectronics
+ * Author: Johnny Hsu <johnnyhsu@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT298_H__
+#define __RT298_H__
+
+#define VERB_CMD(V, N, D) ((N << 20) | (V << 8) | D)
+
+#define RT298_AUDIO_FUNCTION_GROUP			0x01
+#define RT298_DAC_OUT1					0x02
+#define RT298_DAC_OUT2					0x03
+#define RT298_DIG_CVT					0x06
+#define RT298_ADC_IN1					0x09
+#define RT298_ADC_IN2					0x08
+#define RT298_MIXER_IN					0x0b
+#define RT298_MIXER_OUT1				0x0c
+#define RT298_MIXER_OUT2				0x0d
+#define RT298_DMIC1					0x12
+#define RT298_DMIC2					0x13
+#define RT298_SPK_OUT					0x14
+#define RT298_MIC1					0x18
+#define RT298_LINE1					0x1a
+#define RT298_BEEP					0x1d
+#define RT298_SPDIF					0x1e
+#define RT298_VENDOR_REGISTERS				0x20
+#define RT298_HP_OUT					0x21
+#define RT298_MIXER_IN1					0x22
+#define RT298_MIXER_IN2					0x23
+
+#define RT298_SET_PIN_SFT				6
+#define RT298_SET_PIN_ENABLE				0x40
+#define RT298_SET_PIN_DISABLE				0
+#define RT298_SET_EAPD_HIGH				0x2
+#define RT298_SET_EAPD_LOW				0
+
+#define RT298_MUTE_SFT					7
+
+/* Verb commands */
+#define RT298_GET_PARAM(NID, PARAM) VERB_CMD(AC_VERB_PARAMETERS, NID, PARAM)
+#define RT298_SET_POWER(NID) VERB_CMD(AC_VERB_SET_POWER_STATE, NID, 0)
+#define RT298_SET_AUDIO_POWER RT298_SET_POWER(RT298_AUDIO_FUNCTION_GROUP)
+#define RT298_SET_HPO_POWER RT298_SET_POWER(RT298_HP_OUT)
+#define RT298_SET_SPK_POWER RT298_SET_POWER(RT298_SPK_OUT)
+#define RT298_SET_DMIC1_POWER RT298_SET_POWER(RT298_DMIC1)
+#define RT298_SPK_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_SPK_OUT, 0)
+#define RT298_HPO_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_HP_OUT, 0)
+#define RT298_ADC0_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_MIXER_IN1, 0)
+#define RT298_ADC1_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_MIXER_IN2, 0)
+#define RT298_SET_MIC1\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_MIC1, 0)
+#define RT298_SET_PIN_HPO\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_HP_OUT, 0)
+#define RT298_SET_PIN_SPK\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_SPK_OUT, 0)
+#define RT298_SET_PIN_DMIC1\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_DMIC1, 0)
+#define RT298_SET_PIN_SPDIF\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_SPDIF, 0)
+#define RT298_SET_PIN_DIG_CVT\
+	VERB_CMD(AC_VERB_SET_DIGI_CONVERT_1, RT298_DIG_CVT, 0)
+#define RT298_SPK_EAPD\
+	VERB_CMD(AC_VERB_SET_EAPD_BTLENABLE, RT298_SPK_OUT, 0)
+#define RT298_SET_AMP_GAIN_HPO\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0)
+#define RT298_SET_AMP_GAIN_ADC_IN1\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0)
+#define RT298_SET_AMP_GAIN_ADC_IN2\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN2, 0)
+#define RT298_GET_HP_SENSE\
+	VERB_CMD(AC_VERB_GET_PIN_SENSE, RT298_HP_OUT, 0)
+#define RT298_GET_MIC1_SENSE\
+	VERB_CMD(AC_VERB_GET_PIN_SENSE, RT298_MIC1, 0)
+#define RT298_SET_DMIC2_DEFAULT\
+	VERB_CMD(AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, RT298_DMIC2, 0)
+#define RT298_SET_SPDIF_DEFAULT\
+	VERB_CMD(AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, RT298_SPDIF, 0)
+#define RT298_DACL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_DAC_OUT1, 0xa000)
+#define RT298_DACR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_DAC_OUT1, 0x9000)
+#define RT298_ADCL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0x6000)
+#define RT298_ADCR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0x5000)
+#define RT298_MIC_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIC1, 0x7000)
+#define RT298_SPOL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_SPK_OUT, 0xa000)
+#define RT298_SPOR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_SPK_OUT, 0x9000)
+#define RT298_HPOL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0xa000)
+#define RT298_HPOR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0x9000)
+#define RT298_F_DAC_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_OUT1, 0x7000)
+#define RT298_F_RECMIX_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_OUT1, 0x7100)
+#define RT298_REC_MIC_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7000)
+#define RT298_REC_I2S_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7100)
+#define RT298_REC_LINE_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7200)
+#define RT298_REC_BEEP_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7300)
+#define RT298_DAC_FORMAT\
+	VERB_CMD(AC_VERB_SET_STREAM_FORMAT, RT298_DAC_OUT1, 0)
+#define RT298_ADC_FORMAT\
+	VERB_CMD(AC_VERB_SET_STREAM_FORMAT, RT298_ADC_IN1, 0)
+#define RT298_COEF_INDEX\
+	VERB_CMD(AC_VERB_SET_COEF_INDEX, RT298_VENDOR_REGISTERS, 0)
+#define RT298_PROC_COEF\
+	VERB_CMD(AC_VERB_SET_PROC_COEF, RT298_VENDOR_REGISTERS, 0)
+
+/* Index registers */
+#define RT298_A_BIAS_CTRL1	0x01
+#define RT298_A_BIAS_CTRL2	0x02
+#define RT298_POWER_CTRL1	0x03
+#define RT298_A_BIAS_CTRL3	0x04
+#define RT298_POWER_CTRL2	0x08
+#define RT298_I2S_CTRL1		0x09
+#define RT298_I2S_CTRL2		0x0a
+#define RT298_CLK_DIV		0x0b
+#define RT298_DC_GAIN		0x0d
+#define RT298_POWER_CTRL3	0x0f
+#define RT298_MIC1_DET_CTRL	0x19
+#define RT298_MISC_CTRL1	0x20
+#define RT298_IRQ_CTRL		0x33
+#define RT298_WIND_FILTER_CTRL	0x46
+#define RT298_PLL_CTRL1		0x49
+#define RT298_CBJ_CTRL1		0x4f
+#define RT298_CBJ_CTRL2		0x50
+#define RT298_PLL_CTRL		0x63
+#define RT298_DEPOP_CTRL1	0x66
+#define RT298_DEPOP_CTRL2	0x67
+#define RT298_DEPOP_CTRL3	0x68
+#define RT298_DEPOP_CTRL4	0x69
+
+/* SPDIF (0x06) */
+#define RT298_SPDIF_SEL_SFT	0
+#define RT298_SPDIF_SEL_PCM0	0
+#define RT298_SPDIF_SEL_PCM1	1
+#define RT298_SPDIF_SEL_SPOUT	2
+#define RT298_SPDIF_SEL_PP	3
+
+/* RECMIX (0x0b) */
+#define RT298_M_REC_BEEP_SFT	0
+#define RT298_M_REC_LINE1_SFT	1
+#define RT298_M_REC_MIC1_SFT	2
+#define RT298_M_REC_I2S_SFT	3
+
+/* Front (0x0c) */
+#define RT298_M_FRONT_DAC_SFT	0
+#define RT298_M_FRONT_REC_SFT	1
+
+/* SPK-OUT (0x14) */
+#define RT298_M_SPK_MUX_SFT	14
+#define RT298_SPK_SEL_MASK	0x1
+#define RT298_SPK_SEL_SFT	0
+#define RT298_SPK_SEL_F		0
+#define RT298_SPK_SEL_S		1
+
+/* HP-OUT (0x21) */
+#define RT298_M_HP_MUX_SFT	14
+#define RT298_HP_SEL_MASK	0x1
+#define RT298_HP_SEL_SFT	0
+#define RT298_HP_SEL_F		0
+#define RT298_HP_SEL_S		1
+
+/* ADC (0x22) (0x23) */
+#define RT298_ADC_SEL_MASK	0x7
+#define RT298_ADC_SEL_SFT	0
+#define RT298_ADC_SEL_SURR	0
+#define RT298_ADC_SEL_FRONT	1
+#define RT298_ADC_SEL_DMIC	2
+#define RT298_ADC_SEL_BEEP	4
+#define RT298_ADC_SEL_LINE1	5
+#define RT298_ADC_SEL_I2S	6
+#define RT298_ADC_SEL_MIC1	7
+
+#define RT298_SCLK_S_MCLK	0
+#define RT298_SCLK_S_PLL	1
+
+enum {
+	RT298_AIF1,
+	RT298_AIF2,
+	RT298_AIFS,
+};
+
+int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack);
+
+#endif /* __RT298_H__ */
+
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 058167c..1be2bab 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -174,16 +174,15 @@
 static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
-static unsigned int mic_bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(mic_bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static int rt5631_dmic_get(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
@@ -1725,7 +1724,6 @@
 static struct i2c_driver rt5631_i2c_driver = {
 	.driver = {
 		.name = "rt5631",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(rt5631_i2c_dt_ids),
 	},
 	.probe = rt5631_i2c_probe,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 9bc78e5..e1ceeb8 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -51,7 +51,7 @@
 	  .window_len = 0x1, },
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5640_PR_BASE + 0x3d,	0x3600},
 	{RT5640_PR_BASE + 0x12,	0x0aa8},
 	{RT5640_PR_BASE + 0x14,	0x0aaa},
@@ -347,16 +347,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5640_data_select[] = {
@@ -459,10 +458,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5640->sysclk);
-
+	rate = rt5640->sysclk / rl6231_get_pre_div(rt5640->regmap,
+		RT5640_ADDA_CLK1, RT5640_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -984,6 +984,35 @@
 	return 0;
 }
 
+static int rt5640_lout_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		hp_amp_power_on(codec);
+		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+			RT5640_PWR_LM, RT5640_PWR_LM);
+		snd_soc_update_bits(codec, RT5640_OUTPUT,
+			RT5640_L_MUTE | RT5640_R_MUTE, 0);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, RT5640_OUTPUT,
+			RT5640_L_MUTE | RT5640_R_MUTE,
+			RT5640_L_MUTE | RT5640_R_MUTE);
+		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+			RT5640_PWR_LM, 0);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
 			   struct snd_kcontrol *kcontrol, int event)
 {
@@ -1179,13 +1208,16 @@
 		0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
 	SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
 		0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
-	SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0,
+	SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
 		rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
 	SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
 		0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
 		rt5640_hp_event,
 		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
+		rt5640_lout_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
 		RT5640_PWR_HP_L_BIT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
@@ -1500,8 +1532,10 @@
 	{"HP R Playback", "Switch", "HP Amp"},
 	{"HPOL", NULL, "HP L Playback"},
 	{"HPOR", NULL, "HP R Playback"},
-	{"LOUTL", NULL, "LOUT MIX"},
-	{"LOUTR", NULL, "LOUT MIX"},
+
+	{"LOUT amp", NULL, "LOUT MIX"},
+	{"LOUTL", NULL, "LOUT amp"},
+	{"LOUTR", NULL, "LOUT amp"},
 };
 
 static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = {
@@ -2207,7 +2241,7 @@
 	regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val);
 	if (val != RT5640_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5640/39\n", val);
+			"Device with ID register %#x is not rt5640/39\n", val);
 		return -ENODEV;
 	}
 
@@ -2242,7 +2276,6 @@
 static struct i2c_driver rt5640_i2c_driver = {
 	.driver = {
 		.name = "rt5640",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5640_acpi_match),
 		.of_match_table = of_match_ptr(rt5640_of_match),
 	},
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 961bd7e..4972bf3 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -21,6 +21,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
+#include <linux/regulator/consumer.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -54,7 +55,7 @@
 	},
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5645_PR_BASE + 0x3d,	0x3600},
 	{RT5645_PR_BASE + 0x1c,	0xfd20},
 	{RT5645_PR_BASE + 0x20,	0x611f},
@@ -63,7 +64,7 @@
 };
 #define RT5645_INIT_REG_LEN ARRAY_SIZE(init_list)
 
-static const struct reg_default rt5650_init_list[] = {
+static const struct reg_sequence rt5650_init_list[] = {
 	{0xf6,	0x0100},
 };
 
@@ -223,6 +224,39 @@
 	{ 0xff, 0x6308 },
 };
 
+static const char *const rt5645_supply_names[] = {
+	"avdd",
+	"cpvdd",
+};
+
+struct rt5645_priv {
+	struct snd_soc_codec *codec;
+	struct rt5645_platform_data pdata;
+	struct regmap *regmap;
+	struct i2c_client *i2c;
+	struct gpio_desc *gpiod_hp_det;
+	struct snd_soc_jack *hp_jack;
+	struct snd_soc_jack *mic_jack;
+	struct snd_soc_jack *btn_jack;
+	struct delayed_work jack_detect_work;
+	struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
+
+	int codec_type;
+	int sysclk;
+	int sysclk_src;
+	int lrck[RT5645_AIFS];
+	int bclk[RT5645_AIFS];
+	int master[RT5645_AIFS];
+
+	int pll_src;
+	int pll_in;
+	int pll_out;
+
+	int jack_type;
+	bool en_button_func;
+	bool hp_on;
+};
+
 static int rt5645_reset(struct snd_soc_codec *codec)
 {
 	return snd_soc_write(codec, RT5645_RESET, 0);
@@ -360,6 +394,7 @@
 	case RT5645_DEPOP_M1:
 	case RT5645_DEPOP_M2:
 	case RT5645_DEPOP_M3:
+	case RT5645_CHARGE_PUMP:
 	case RT5645_MICBIAS:
 	case RT5645_A_JD_CTRL1:
 	case RT5645_VAD_CTRL4:
@@ -424,16 +459,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static const struct snd_kcontrol_new rt5645_snd_controls[] = {
 	/* Speaker Output Volume */
@@ -510,10 +544,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5645->sysclk);
-
+	rate = rt5645->sysclk / rl6231_get_pre_div(rt5645->regmap,
+		RT5645_ADDA_CLK1, RT5645_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -1331,15 +1366,23 @@
 	if (on) {
 		if (hp_amp_power_count <= 0) {
 			if (rt5645->codec_type == CODEC_TYPE_RT5650) {
+				snd_soc_write(codec, RT5645_DEPOP_M2, 0x3100);
 				snd_soc_write(codec, RT5645_CHARGE_PUMP,
 					0x0e06);
-				snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
+				snd_soc_write(codec, RT5645_DEPOP_M1, 0x000d);
+				regmap_write(rt5645->regmap, RT5645_PR_BASE +
+					RT5645_HP_DCC_INT1, 0x9f01);
+				msleep(20);
+				snd_soc_update_bits(codec, RT5645_DEPOP_M1,
+					RT5645_HP_CO_MASK, RT5645_HP_CO_EN);
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					0x3e, 0x7400);
 				snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737);
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					RT5645_MAMP_INT_REG2, 0xfc00);
 				snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
+				mdelay(5);
+				rt5645->hp_on = true;
 			} else {
 				/* depop parameters */
 				snd_soc_update_bits(codec, RT5645_DEPOP_M2,
@@ -1553,6 +1596,27 @@
 	return 0;
 }
 
+static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (rt5645->hp_on) {
+			msleep(100);
+			rt5645->hp_on = false;
+		}
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
 	SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
 		RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1697,15 +1761,6 @@
 	SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
 
 	/* IF1 2 Mux */
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc1_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc2_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc3_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc_in_mux),
-
 	SND_SOC_DAPM_MUX("IF2 ADC Mux", SND_SOC_NOPM,
 		0, 0, &rt5645_if2_adc_in_mux),
 
@@ -1716,14 +1771,6 @@
 	SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac0_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac1_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac2_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac3_tdm_sel_mux),
 	SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -1854,6 +1901,26 @@
 	SND_SOC_DAPM_OUTPUT("PDM1R"),
 	SND_SOC_DAPM_OUTPUT("SPOL"),
 	SND_SOC_DAPM_OUTPUT("SPOR"),
+	SND_SOC_DAPM_POST("DAPM_POST", rt5650_hp_event),
+};
+
+static const struct snd_soc_dapm_widget rt5645_specific_dapm_widgets[] = {
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac0_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac1_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac2_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac3_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc1_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc2_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc3_in_mux),
 };
 
 static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = {
@@ -2642,7 +2709,7 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_PREPARE:
-		if (SND_SOC_BIAS_STANDBY == codec->dapm.bias_level) {
+		if (SND_SOC_BIAS_STANDBY == snd_soc_codec_get_bias_level(codec)) {
 			snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
 				RT5645_PWR_VREF1 | RT5645_PWR_MB |
 				RT5645_PWR_BG | RT5645_PWR_VREF2,
@@ -2686,94 +2753,15 @@
 	return 0;
 }
 
-static int rt5650_calibration(struct rt5645_priv *rt5645)
-{
-	int val, i;
-	int ret = -1;
-
-	regcache_cache_bypass(rt5645->regmap, true);
-	regmap_write(rt5645->regmap, RT5645_RESET, 0);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0800);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_CHOP_DAC_ADC,
-		0x3600);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x25, 0x7000);
-	regmap_write(rt5645->regmap, RT5645_I2S1_SDP, 0x8008);
-	/* headset type */
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL1, 0x2061);
-	regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0x2012);
-	regmap_write(rt5645->regmap, RT5645_PWR_MIXER, 0x0002);
-	regmap_write(rt5645->regmap, RT5645_PWR_VOL, 0x0020);
-	regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x1827);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x0827);
-	msleep(400);
-	/* Inline command */
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0001);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
-	/* Calbration */
-	regmap_write(rt5645->regmap, RT5645_GLB_CLK, 0x8000);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
-	regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x8800);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0xe8fa);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x8c04);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x3100);
-	regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0e06);
-	regmap_write(rt5645->regmap, RT5645_BASS_BACK, 0x8a13);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0820);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x000d);
-	/* Power on and Calbration */
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_HP_DCC_INT1,
-		0x9f01);
-	msleep(200);
-	for (i = 0; i < 5; i++) {
-		regmap_read(rt5645->regmap, RT5645_PR_BASE + 0x7a, &val);
-		if (val != 0 && val != 0x3f3f) {
-			ret = 0;
-			break;
-		}
-		msleep(50);
-	}
-	pr_debug("%s: PR-7A = 0x%x\n", __func__, val);
-
-	/* mute */
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x3e, 0x7400);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M3, 0x0737);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_MAMP_INT_REG2,
-		0xfc00);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x1140);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL2, 0x4020);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x0000);
-	msleep(350);
-
-	regcache_cache_bypass(rt5645->regmap, false);
-
-	return ret;
-}
-
 static void rt5645_enable_push_button_irq(struct snd_soc_codec *codec,
 	bool enable)
 {
-	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 
 	if (enable) {
-		snd_soc_dapm_mutex_lock(&codec->dapm);
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"ADC L power");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"ADC R power");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"LDO2");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"Mic Det Power");
-		snd_soc_dapm_sync_unlocked(&codec->dapm);
-		snd_soc_dapm_mutex_unlock(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "ADC L power");
+		snd_soc_dapm_force_enable_pin(dapm, "ADC R power");
+		snd_soc_dapm_sync(dapm);
 
 		snd_soc_update_bits(codec,
 					RT5645_INT_IRQ_ST, 0x8, 0x8);
@@ -2786,36 +2774,26 @@
 		snd_soc_update_bits(codec, RT5650_4BTN_IL_CMD2, 0x8000, 0x0);
 		snd_soc_update_bits(codec, RT5645_INT_IRQ_ST, 0x8, 0x0);
 
-		snd_soc_dapm_mutex_lock(&codec->dapm);
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"ADC L power");
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"ADC R power");
-		if (rt5645->pdata.jd_mode == 0)
-			snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-								"LDO2");
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"Mic Det Power");
-		snd_soc_dapm_sync_unlocked(&codec->dapm);
-		snd_soc_dapm_mutex_unlock(&codec->dapm);
+		snd_soc_dapm_disable_pin(dapm, "ADC L power");
+		snd_soc_dapm_disable_pin(dapm, "ADC R power");
+		snd_soc_dapm_sync(dapm);
 	}
 }
 
 static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
 	unsigned int val;
 
 	if (jack_insert) {
 		regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
 
-		if (codec->component.card->instantiated) {
-			/* for jack type detect */
-			snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
-			snd_soc_dapm_force_enable_pin(&codec->dapm,
-				"Mic Det Power");
-			snd_soc_dapm_sync(&codec->dapm);
-		} else {
+		/* for jack type detect */
+		snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+		snd_soc_dapm_force_enable_pin(dapm, "Mic Det Power");
+		snd_soc_dapm_sync(dapm);
+		if (!dapm->card->instantiated) {
 			/* Power up necessary bits for JD if dapm is
 			   not ready yet */
 			regmap_update_bits(rt5645->regmap, RT5645_PWR_ANLG1,
@@ -2828,14 +2806,15 @@
 		}
 
 		regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
-		regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
-		regmap_update_bits(rt5645->regmap,
-				   RT5645_IN1_CTRL2, 0x1000, 0x1000);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+			RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
 		msleep(100);
-		regmap_update_bits(rt5645->regmap,
-				   RT5645_IN1_CTRL2, 0x1000, 0x0000);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, 0);
 
-		msleep(450);
+		msleep(600);
 		regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
 		val &= 0x7;
 		dev_dbg(codec->dev, "val = %d\n", val);
@@ -2846,43 +2825,46 @@
 				rt5645_enable_push_button_irq(codec, true);
 			}
 		} else {
-			if (codec->component.card->instantiated) {
-				snd_soc_dapm_disable_pin(&codec->dapm,
-					"Mic Det Power");
-				snd_soc_dapm_sync(&codec->dapm);
-			} else
-				regmap_update_bits(rt5645->regmap,
-					RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
+			snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+			snd_soc_dapm_sync(dapm);
 			rt5645->jack_type = SND_JACK_HEADPHONE;
 		}
 
+		snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
+		snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
+		snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
 	} else { /* jack out */
 		rt5645->jack_type = 0;
+
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+			RT5645_CBJ_BST1_EN, 0);
+
 		if (rt5645->en_button_func)
 			rt5645_enable_push_button_irq(codec, false);
-		else {
-			if (codec->component.card->instantiated) {
-				if (rt5645->pdata.jd_mode == 0)
-					snd_soc_dapm_disable_pin(&codec->dapm,
-						"LDO2");
-				snd_soc_dapm_disable_pin(&codec->dapm,
-					"Mic Det Power");
-				snd_soc_dapm_sync(&codec->dapm);
-			} else {
-				if (rt5645->pdata.jd_mode == 0)
-					regmap_update_bits(rt5645->regmap,
-						RT5645_PWR_MIXER,
-						RT5645_PWR_LDO2, 0);
-				regmap_update_bits(rt5645->regmap,
-					RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
-			}
-		}
+
+		if (rt5645->pdata.jd_mode == 0)
+			snd_soc_dapm_disable_pin(dapm, "LDO2");
+		snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return rt5645->jack_type;
 }
 
-static int rt5645_irq_detection(struct rt5645_priv *rt5645);
+static int rt5645_button_detect(struct snd_soc_codec *codec)
+{
+	int btn_type, val;
+
+	val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
+	pr_debug("val=0x%x\n", val);
+	btn_type = val & 0xfff0;
+	snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
+
+	return btn_type;
+}
+
 static irqreturn_t rt5645_irq(int irq, void *data);
 
 int rt5645_set_jack_detect(struct snd_soc_codec *codec,
@@ -2913,38 +2895,10 @@
 {
 	struct rt5645_priv *rt5645 =
 		container_of(work, struct rt5645_priv, jack_detect_work.work);
-
-	rt5645_irq_detection(rt5645);
-}
-
-static irqreturn_t rt5645_irq(int irq, void *data)
-{
-	struct rt5645_priv *rt5645 = data;
-
-	queue_delayed_work(system_power_efficient_wq,
-			   &rt5645->jack_detect_work, msecs_to_jiffies(250));
-
-	return IRQ_HANDLED;
-}
-
-static int rt5645_button_detect(struct snd_soc_codec *codec)
-{
-	int btn_type, val;
-
-	val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
-	pr_debug("val=0x%x\n", val);
-	btn_type = val & 0xfff0;
-	snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
-
-	return btn_type;
-}
-
-static int rt5645_irq_detection(struct rt5645_priv *rt5645)
-{
 	int val, btn_type, gpio_state = 0, report = 0;
 
 	if (!rt5645->codec)
-		return -EINVAL;
+		return;
 
 	switch (rt5645->pdata.jd_mode) {
 	case 0: /* Not using rt5645 JD */
@@ -2958,7 +2912,7 @@
 				    report, SND_JACK_HEADPHONE);
 		snd_soc_jack_report(rt5645->mic_jack,
 				    report, SND_JACK_MICROPHONE);
-		return report;
+		return;
 	case 1: /* 2 port */
 		val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0070;
 		break;
@@ -3040,27 +2994,39 @@
 		snd_soc_jack_report(rt5645->btn_jack,
 			report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
 				SND_JACK_BTN_2 | SND_JACK_BTN_3);
+}
 
-	return report;
+static irqreturn_t rt5645_irq(int irq, void *data)
+{
+	struct rt5645_priv *rt5645 = data;
+
+	queue_delayed_work(system_power_efficient_wq,
+			   &rt5645->jack_detect_work, msecs_to_jiffies(250));
+
+	return IRQ_HANDLED;
 }
 
 static int rt5645_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
 
 	rt5645->codec = codec;
 
 	switch (rt5645->codec_type) {
 	case CODEC_TYPE_RT5645:
-		snd_soc_dapm_add_routes(&codec->dapm,
+		snd_soc_dapm_new_controls(dapm,
+			rt5645_specific_dapm_widgets,
+			ARRAY_SIZE(rt5645_specific_dapm_widgets));
+		snd_soc_dapm_add_routes(dapm,
 			rt5645_specific_dapm_routes,
 			ARRAY_SIZE(rt5645_specific_dapm_routes));
 		break;
 	case CODEC_TYPE_RT5650:
-		snd_soc_dapm_new_controls(&codec->dapm,
+		snd_soc_dapm_new_controls(dapm,
 			rt5650_specific_dapm_widgets,
 			ARRAY_SIZE(rt5650_specific_dapm_widgets));
-		snd_soc_dapm_add_routes(&codec->dapm,
+		snd_soc_dapm_add_routes(dapm,
 			rt5650_specific_dapm_routes,
 			ARRAY_SIZE(rt5650_specific_dapm_routes));
 		break;
@@ -3070,9 +3036,9 @@
 
 	/* for JD function */
 	if (rt5645->pdata.jd_mode) {
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "JD Power");
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+		snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return 0;
@@ -3113,7 +3079,7 @@
 #define RT5645_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5645_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5645_aif_dai_ops = {
 	.hw_params = rt5645_hw_params,
 	.set_fmt = rt5645_set_dai_fmt,
 	.set_sysclk = rt5645_set_dai_sysclk,
@@ -3224,7 +3190,7 @@
 	return 1;
 }
 
-static struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct dmi_system_id dmi_platform_intel_braswell[] = {
 	{
 		.ident = "Intel Strago",
 		.callback = strago_quirk_cb,
@@ -3232,6 +3198,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
 		},
 	},
+	{
+		.ident = "Google Celes",
+		.callback = strago_quirk_cb,
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+		},
+	},
 	{ }
 };
 
@@ -3254,7 +3227,7 @@
 {
 	struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
 	struct rt5645_priv *rt5645;
-	int ret;
+	int ret, i;
 	unsigned int val;
 
 	rt5645 = devm_kzalloc(&i2c->dev, sizeof(struct rt5645_priv),
@@ -3288,6 +3261,24 @@
 		return ret;
 	}
 
+	for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
+		rt5645->supplies[i].supply = rt5645_supply_names[i];
+
+	ret = devm_regulator_bulk_get(&i2c->dev,
+				      ARRAY_SIZE(rt5645->supplies),
+				      rt5645->supplies);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(rt5645->supplies),
+				    rt5645->supplies);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
 	regmap_read(rt5645->regmap, RT5645_VENDOR_ID2, &val);
 
 	switch (val) {
@@ -3299,16 +3290,10 @@
 		break;
 	default:
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5645 or rt5650\n",
+			"Device with ID register %#x is not rt5645 or rt5650\n",
 			val);
-		return -ENODEV;
-	}
-
-	if (rt5645->codec_type == CODEC_TYPE_RT5650) {
-		ret = rt5650_calibration(rt5645);
-
-		if (ret < 0)
-			pr_err("calibration failed!\n");
+		ret = -ENODEV;
+		goto err_enable;
 	}
 
 	regmap_write(rt5645->regmap, RT5645_RESET, 0);
@@ -3398,8 +3383,6 @@
 		regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
 				   RT5645_IRQ_CLK_GATE_CTRL,
 				   RT5645_IRQ_CLK_GATE_CTRL);
-		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
-				   RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
 		regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
 				   RT5645_IRQ_CLK_INT, RT5645_IRQ_CLK_INT);
 		regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
@@ -3439,12 +3422,25 @@
 		ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
 			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
 			| IRQF_ONESHOT, "rt5645", rt5645);
-		if (ret)
+		if (ret) {
 			dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+			goto err_enable;
+		}
 	}
 
-	return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
-				      rt5645_dai, ARRAY_SIZE(rt5645_dai));
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
+				     rt5645_dai, ARRAY_SIZE(rt5645_dai));
+	if (ret)
+		goto err_irq;
+
+	return 0;
+
+err_irq:
+	if (rt5645->i2c->irq)
+		free_irq(rt5645->i2c->irq, rt5645);
+err_enable:
+	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
+	return ret;
 }
 
 static int rt5645_i2c_remove(struct i2c_client *i2c)
@@ -3457,18 +3453,31 @@
 	cancel_delayed_work_sync(&rt5645->jack_detect_work);
 
 	snd_soc_unregister_codec(&i2c->dev);
+	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
 
 	return 0;
 }
 
+static void rt5645_i2c_shutdown(struct i2c_client *i2c)
+{
+	struct rt5645_priv *rt5645 = i2c_get_clientdata(i2c);
+
+	regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
+		RT5645_RING2_SLEEVE_GND, RT5645_RING2_SLEEVE_GND);
+	regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, RT5645_CBJ_MN_JD,
+		RT5645_CBJ_MN_JD);
+	regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, RT5645_CBJ_BST1_EN,
+		0);
+}
+
 static struct i2c_driver rt5645_i2c_driver = {
 	.driver = {
 		.name = "rt5645",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5645_acpi_match),
 	},
 	.probe = rt5645_i2c_probe,
-	.remove   = rt5645_i2c_remove,
+	.remove = rt5645_i2c_remove,
+	.shutdown = rt5645_i2c_shutdown,
 	.id_table = rt5645_i2c_id,
 };
 module_i2c_driver(rt5645_i2c_driver);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 278bb9f..0e4cfc6 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2115,6 +2115,7 @@
 #define RT5645_JD_PSV_MODE			(0x1 << 12)
 #define RT5645_IRQ_CLK_GATE_CTRL		(0x1 << 11)
 #define RT5645_MICINDET_MANU			(0x1 << 7)
+#define RT5645_RING2_SLEEVE_GND			(0x1 << 5)
 
 /* Vendor ID (0xfd) */
 #define RT5645_VER_C				0x2
@@ -2181,32 +2182,6 @@
 int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec,
 		unsigned int filter_mask, unsigned int clk_src);
 
-struct rt5645_priv {
-	struct snd_soc_codec *codec;
-	struct rt5645_platform_data pdata;
-	struct regmap *regmap;
-	struct i2c_client *i2c;
-	struct gpio_desc *gpiod_hp_det;
-	struct snd_soc_jack *hp_jack;
-	struct snd_soc_jack *mic_jack;
-	struct snd_soc_jack *btn_jack;
-	struct delayed_work jack_detect_work;
-
-	int codec_type;
-	int sysclk;
-	int sysclk_src;
-	int lrck[RT5645_AIFS];
-	int bclk[RT5645_AIFS];
-	int master[RT5645_AIFS];
-
-	int pll_src;
-	int pll_in;
-	int pll_out;
-
-	int jack_type;
-	bool en_button_func;
-};
-
 int rt5645_set_jack_detect(struct snd_soc_codec *codec,
 	struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
 	struct snd_soc_jack *btn_jack);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index a3506e1..1d40318 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -46,7 +46,7 @@
 	  .window_len = 0x1, },
 };
 
-static struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5651_PR_BASE + 0x3d,	0x3e00},
 };
 
@@ -292,16 +292,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5651_data_select[] = {
@@ -378,10 +377,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5651->sysclk);
-
+	rate = rt5651->sysclk / rl6231_get_pre_div(rt5651->regmap,
+		RT5651_ADDA_CLK1, RT5651_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -1769,7 +1769,7 @@
 	regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret);
 	if (ret != RT5651_DEVICE_ID_VALUE) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5651\n", ret);
+			"Device with ID register %#x is not rt5651\n", ret);
 		return -ENODEV;
 	}
 
@@ -1806,7 +1806,6 @@
 static struct i2c_driver rt5651_i2c_driver = {
 	.driver = {
 		.name = "rt5651",
-		.owner = THIS_MODULE,
 	},
 	.probe = rt5651_i2c_probe,
 	.remove   = rt5651_i2c_remove,
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index a9123d4..49a9e70 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -51,7 +51,7 @@
 	  .window_len = 0x1, },
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{ RT5670_PR_BASE + 0x14, 0x9a8a },
 	{ RT5670_PR_BASE + 0x38, 0x3ba1 },
 	{ RT5670_PR_BASE + 0x3d, 0x3640 },
@@ -592,16 +592,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5670_data_select[] = {
@@ -683,10 +682,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5670->sysclk);
-
+	rate = rt5670->sysclk / rl6231_get_pre_div(rt5670->regmap,
+		RT5670_ADDA_CLK1, RT5670_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -2720,7 +2720,7 @@
 #define RT5670_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5670_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5670_aif_dai_ops = {
 	.hw_params = rt5670_hw_params,
 	.set_fmt = rt5670_set_dai_fmt,
 	.set_sysclk = rt5670_set_dai_sysclk,
@@ -2863,7 +2863,7 @@
 	regmap_read(rt5670->regmap, RT5670_VENDOR_ID2, &val);
 	if (val != RT5670_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5670/72\n", val);
+			"Device with ID register %#x is not rt5670/72\n", val);
 		return -ENODEV;
 	}
 
@@ -3043,7 +3043,6 @@
 static struct i2c_driver rt5670_i2c_driver = {
 	.driver = {
 		.name = "rt5670",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5670_acpi_match),
 	},
 	.probe = rt5670_i2c_probe,
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index ef6348c..3505aaf 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -31,84 +31,197 @@
 
 #include "rt5677-spi.h"
 
-static struct spi_device *g_spi;
+#define RT5677_SPI_BURST_LEN	240
+#define RT5677_SPI_HEADER	5
+#define RT5677_SPI_FREQ		6000000
 
-/**
- * rt5677_spi_write - Write data to SPI.
- * @txbuf: Data Buffer for writing.
- * @len: Data length.
- *
- *
- * Returns true for success.
+/* The AddressPhase and DataPhase of SPI commands are MSB first on the wire.
+ * DataPhase word size of 16-bit commands is 2 bytes.
+ * DataPhase word size of 32-bit commands is 4 bytes.
+ * DataPhase word size of burst commands is 8 bytes.
+ * The DSP CPU is little-endian.
  */
-int rt5677_spi_write(u8 *txbuf, size_t len)
+#define RT5677_SPI_WRITE_BURST	0x5
+#define RT5677_SPI_READ_BURST	0x4
+#define RT5677_SPI_WRITE_32	0x3
+#define RT5677_SPI_READ_32	0x2
+#define RT5677_SPI_WRITE_16	0x1
+#define RT5677_SPI_READ_16	0x0
+
+static struct spi_device *g_spi;
+static DEFINE_MUTEX(spi_mutex);
+
+/* Select a suitable transfer command for the next transfer to ensure
+ * the transfer address is always naturally aligned while minimizing
+ * the total number of transfers required.
+ *
+ * 3 transfer commands are available:
+ * RT5677_SPI_READ/WRITE_16:	Transfer 2 bytes
+ * RT5677_SPI_READ/WRITE_32:	Transfer 4 bytes
+ * RT5677_SPI_READ/WRITE_BURST:	Transfer any multiples of 8 bytes
+ *
+ * For example, reading 260 bytes at 0x60030002 uses the following commands:
+ * 0x60030002 RT5677_SPI_READ_16	2 bytes
+ * 0x60030004 RT5677_SPI_READ_32	4 bytes
+ * 0x60030008 RT5677_SPI_READ_BURST	240 bytes
+ * 0x600300F8 RT5677_SPI_READ_BURST	8 bytes
+ * 0x60030100 RT5677_SPI_READ_32	4 bytes
+ * 0x60030104 RT5677_SPI_READ_16	2 bytes
+ *
+ * Input:
+ * @read: true for read commands; false for write commands
+ * @align: alignment of the next transfer address
+ * @remain: number of bytes remaining to transfer
+ *
+ * Output:
+ * @len: number of bytes to transfer with the selected command
+ * Returns the selected command
+ */
+static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
 {
-	int status;
+	u8 cmd;
 
-	status = spi_write(g_spi, txbuf, len);
+	if (align == 2 || align == 6 || remain == 2) {
+		cmd = RT5677_SPI_READ_16;
+		*len = 2;
+	} else if (align == 4 || remain <= 6) {
+		cmd = RT5677_SPI_READ_32;
+		*len = 4;
+	} else {
+		cmd = RT5677_SPI_READ_BURST;
+		*len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
+	}
+	return read ? cmd : cmd + 1;
+}
 
-	if (status)
-		dev_err(&g_spi->dev, "rt5677_spi_write error %d\n", status);
+/* Copy dstlen bytes from src to dst, while reversing byte order for each word.
+ * If srclen < dstlen, zeros are padded.
+ */
+static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
+{
+	u32 w, i, si;
+	u32 word_size = min_t(u32, dstlen, 8);
 
+	for (w = 0; w < dstlen; w += word_size) {
+		for (i = 0; i < word_size; i++) {
+			si = w + word_size - i - 1;
+			dst[w + i] = si < srclen ? src[si] : 0;
+		}
+	}
+}
+
+/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
+int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+{
+	u32 offset;
+	int status = 0;
+	struct spi_transfer t[2];
+	struct spi_message m;
+	/* +4 bytes is for the DummyPhase following the AddressPhase */
+	u8 header[RT5677_SPI_HEADER + 4];
+	u8 body[RT5677_SPI_BURST_LEN];
+	u8 spi_cmd;
+	u8 *cb = rxbuf;
+
+	if (!g_spi)
+		return -ENODEV;
+
+	if ((addr & 1) || (len & 1)) {
+		dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
+		return -EACCES;
+	}
+
+	memset(t, 0, sizeof(t));
+	t[0].tx_buf = header;
+	t[0].len = sizeof(header);
+	t[0].speed_hz = RT5677_SPI_FREQ;
+	t[1].rx_buf = body;
+	t[1].speed_hz = RT5677_SPI_FREQ;
+	spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
+
+	for (offset = 0; offset < len; offset += t[1].len) {
+		spi_cmd = rt5677_spi_select_cmd(true, (addr + offset) & 7,
+				len - offset, &t[1].len);
+
+		/* Construct SPI message header */
+		header[0] = spi_cmd;
+		header[1] = ((addr + offset) & 0xff000000) >> 24;
+		header[2] = ((addr + offset) & 0x00ff0000) >> 16;
+		header[3] = ((addr + offset) & 0x0000ff00) >> 8;
+		header[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+		mutex_lock(&spi_mutex);
+		status |= spi_sync(g_spi, &m);
+		mutex_unlock(&spi_mutex);
+
+		/* Copy data back to caller buffer */
+		rt5677_spi_reverse(cb + offset, t[1].len, body, t[1].len);
+	}
+	return status;
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_read);
+
+/* Write DSP address space using SPI. addr has to be 2-byte aligned.
+ * If len is not 2-byte aligned, an extra byte of zero is written at the end
+ * as padding.
+ */
+int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
+{
+	u32 offset, len_with_pad = len;
+	int status = 0;
+	struct spi_transfer t;
+	struct spi_message m;
+	/* +1 byte is for the DummyPhase following the DataPhase */
+	u8 buf[RT5677_SPI_HEADER + RT5677_SPI_BURST_LEN + 1];
+	u8 *body = buf + RT5677_SPI_HEADER;
+	u8 spi_cmd;
+	const u8 *cb = txbuf;
+
+	if (!g_spi)
+		return -ENODEV;
+
+	if (addr & 1) {
+		dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
+		return -EACCES;
+	}
+
+	if (len & 1)
+		len_with_pad = len + 1;
+
+	memset(&t, 0, sizeof(t));
+	t.tx_buf = buf;
+	t.speed_hz = RT5677_SPI_FREQ;
+	spi_message_init_with_transfers(&m, &t, 1);
+
+	for (offset = 0; offset < len_with_pad;) {
+		spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
+				len_with_pad - offset, &t.len);
+
+		/* Construct SPI message header */
+		buf[0] = spi_cmd;
+		buf[1] = ((addr + offset) & 0xff000000) >> 24;
+		buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+		buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+		buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+		/* Fetch data from caller buffer */
+		rt5677_spi_reverse(body, t.len, cb + offset, len - offset);
+		offset += t.len;
+		t.len += RT5677_SPI_HEADER + 1;
+
+		mutex_lock(&spi_mutex);
+		status |= spi_sync(g_spi, &m);
+		mutex_unlock(&spi_mutex);
+	}
 	return status;
 }
 EXPORT_SYMBOL_GPL(rt5677_spi_write);
 
-/**
- * rt5677_spi_burst_write - Write data to SPI by rt5677 dsp memory address.
- * @addr: Start address.
- * @txbuf: Data Buffer for writng.
- * @len: Data length, it must be a multiple of 8.
- *
- *
- * Returns true for success.
- */
-int rt5677_spi_burst_write(u32 addr, const struct firmware *fw)
+int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
 {
-	u8 spi_cmd = RT5677_SPI_CMD_BURST_WRITE;
-	u8 *write_buf;
-	unsigned int i, end, offset = 0;
-
-	write_buf = kmalloc(RT5677_SPI_BUF_LEN + 6, GFP_KERNEL);
-
-	if (write_buf == NULL)
-		return -ENOMEM;
-
-	while (offset < fw->size) {
-		if (offset + RT5677_SPI_BUF_LEN <= fw->size)
-			end = RT5677_SPI_BUF_LEN;
-		else
-			end = fw->size % RT5677_SPI_BUF_LEN;
-
-		write_buf[0] = spi_cmd;
-		write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
-		write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
-		write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
-		write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
-
-		for (i = 0; i < end; i += 8) {
-			write_buf[i + 12] = fw->data[offset + i + 0];
-			write_buf[i + 11] = fw->data[offset + i + 1];
-			write_buf[i + 10] = fw->data[offset + i + 2];
-			write_buf[i +  9] = fw->data[offset + i + 3];
-			write_buf[i +  8] = fw->data[offset + i + 4];
-			write_buf[i +  7] = fw->data[offset + i + 5];
-			write_buf[i +  6] = fw->data[offset + i + 6];
-			write_buf[i +  5] = fw->data[offset + i + 7];
-		}
-
-		write_buf[end + 5] = spi_cmd;
-
-		rt5677_spi_write(write_buf, end + 6);
-
-		offset += RT5677_SPI_BUF_LEN;
-	}
-
-	kfree(write_buf);
-
-	return 0;
+	return rt5677_spi_write(addr, fw->data, fw->size);
 }
-EXPORT_SYMBOL_GPL(rt5677_spi_burst_write);
+EXPORT_SYMBOL_GPL(rt5677_spi_write_firmware);
 
 static int rt5677_spi_probe(struct spi_device *spi)
 {
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
index ec41b2b..662db16 100644
--- a/sound/soc/codecs/rt5677-spi.h
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -12,10 +12,8 @@
 #ifndef __RT5677_SPI_H__
 #define __RT5677_SPI_H__
 
-#define RT5677_SPI_BUF_LEN 240
-#define RT5677_SPI_CMD_BURST_WRITE 0x05
-
-int rt5677_spi_write(u8 *txbuf, size_t len);
-int rt5677_spi_burst_write(u32 addr, const struct firmware *fw);
+int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
+int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
+int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
 
 #endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 31d969a..b4cd7e3 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -15,13 +15,12 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
-#include <linux/of_gpio.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/firmware.h>
-#include <linux/gpio.h>
+#include <linux/property.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -54,7 +53,7 @@
 	},
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5677_ASRC_12,	0x0018},
 	{RT5677_PR_BASE + 0x3d,	0x364d},
 	{RT5677_PR_BASE + 0x17,	0x4fc0},
@@ -746,14 +745,14 @@
 		ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
 			codec->dev);
 		if (ret == 0) {
-			rt5677_spi_burst_write(0x50000000, rt5677->fw1);
+			rt5677_spi_write_firmware(0x50000000, rt5677->fw1);
 			release_firmware(rt5677->fw1);
 		}
 
 		ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
 			codec->dev);
 		if (ret == 0) {
-			rt5677_spi_burst_write(0x60000000, rt5677->fw2);
+			rt5677_spi_write_firmware(0x60000000, rt5677->fw2);
 			release_firmware(rt5677->fw2);
 		}
 
@@ -789,16 +788,15 @@
 static const DECLARE_TLV_DB_SCALE(st_vol_tlv, -4650, 150, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
@@ -917,8 +915,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
-	int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
+	int idx, rate;
 
+	rate = rt5677->sysclk / rl6231_get_pre_div(rt5677->regmap,
+		RT5677_CLK_TREE_CTRL1, RT5677_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -4764,10 +4765,8 @@
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
 	regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
-	if (gpio_is_valid(rt5677->pow_ldo2))
-		gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
-	if (gpio_is_valid(rt5677->reset_pin))
-		gpio_set_value_cansleep(rt5677->reset_pin, 0);
+	gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
+	gpiod_set_value_cansleep(rt5677->reset_pin, 0);
 
 	return 0;
 }
@@ -4781,10 +4780,8 @@
 		regcache_cache_only(rt5677->regmap, true);
 		regcache_mark_dirty(rt5677->regmap);
 
-		if (gpio_is_valid(rt5677->pow_ldo2))
-			gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
-		if (gpio_is_valid(rt5677->reset_pin))
-			gpio_set_value_cansleep(rt5677->reset_pin, 0);
+		gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
+		gpiod_set_value_cansleep(rt5677->reset_pin, 0);
 	}
 
 	return 0;
@@ -4795,12 +4792,9 @@
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
 	if (!rt5677->dsp_vad_en) {
-		if (gpio_is_valid(rt5677->pow_ldo2))
-			gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
-		if (gpio_is_valid(rt5677->reset_pin))
-			gpio_set_value_cansleep(rt5677->reset_pin, 1);
-		if (gpio_is_valid(rt5677->pow_ldo2) ||
-		    gpio_is_valid(rt5677->reset_pin))
+		gpiod_set_value_cansleep(rt5677->pow_ldo2, 1);
+		gpiod_set_value_cansleep(rt5677->reset_pin, 1);
+		if (rt5677->pow_ldo2 || rt5677->reset_pin)
 			msleep(10);
 
 		regcache_cache_only(rt5677->regmap, false);
@@ -4863,7 +4857,7 @@
 #define RT5677_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5677_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5677_aif_dai_ops = {
 	.hw_params = rt5677_hw_params,
 	.set_fmt = rt5677_set_dai_fmt,
 	.set_sysclk = rt5677_set_dai_sysclk,
@@ -5024,45 +5018,29 @@
 };
 MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
 
-static int rt5677_parse_dt(struct rt5677_priv *rt5677, struct device_node *np)
+static void rt5677_read_device_properties(struct rt5677_priv *rt5677,
+		struct device *dev)
 {
-	rt5677->pdata.in1_diff = of_property_read_bool(np,
-					"realtek,in1-differential");
-	rt5677->pdata.in2_diff = of_property_read_bool(np,
-					"realtek,in2-differential");
-	rt5677->pdata.lout1_diff = of_property_read_bool(np,
-					"realtek,lout1-differential");
-	rt5677->pdata.lout2_diff = of_property_read_bool(np,
-					"realtek,lout2-differential");
-	rt5677->pdata.lout3_diff = of_property_read_bool(np,
-					"realtek,lout3-differential");
+	rt5677->pdata.in1_diff = device_property_read_bool(dev,
+			"realtek,in1-differential");
+	rt5677->pdata.in2_diff = device_property_read_bool(dev,
+			"realtek,in2-differential");
+	rt5677->pdata.lout1_diff = device_property_read_bool(dev,
+			"realtek,lout1-differential");
+	rt5677->pdata.lout2_diff = device_property_read_bool(dev,
+			"realtek,lout2-differential");
+	rt5677->pdata.lout3_diff = device_property_read_bool(dev,
+			"realtek,lout3-differential");
 
-	rt5677->pow_ldo2 = of_get_named_gpio(np,
-					"realtek,pow-ldo2-gpio", 0);
-	rt5677->reset_pin = of_get_named_gpio(np,
-					"realtek,reset-gpio", 0);
+	device_property_read_u8_array(dev, "realtek,gpio-config",
+			rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
 
-	/*
-	 * POW_LDO2 is optional (it may be statically tied on the board).
-	 * -ENOENT means that the property doesn't exist, i.e. there is no
-	 * GPIO, so is not an error. Any other error code means the property
-	 * exists, but could not be parsed.
-	 */
-	if (!gpio_is_valid(rt5677->pow_ldo2) &&
-			(rt5677->pow_ldo2 != -ENOENT))
-		return rt5677->pow_ldo2;
-	if (!gpio_is_valid(rt5677->reset_pin) &&
-			(rt5677->reset_pin != -ENOENT))
-		return rt5677->reset_pin;
-
-	of_property_read_u8_array(np, "realtek,gpio-config",
-		rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
-
-	of_property_read_u32(np, "realtek,jd1-gpio", &rt5677->pdata.jd1_gpio);
-	of_property_read_u32(np, "realtek,jd2-gpio", &rt5677->pdata.jd2_gpio);
-	of_property_read_u32(np, "realtek,jd3-gpio", &rt5677->pdata.jd3_gpio);
-
-	return 0;
+	device_property_read_u32(dev, "realtek,jd1-gpio",
+			&rt5677->pdata.jd1_gpio);
+	device_property_read_u32(dev, "realtek,jd2-gpio",
+			&rt5677->pdata.jd2_gpio);
+	device_property_read_u32(dev, "realtek,jd3-gpio",
+			&rt5677->pdata.jd3_gpio);
 }
 
 static struct regmap_irq rt5677_irqs[] = {
@@ -5145,43 +5123,29 @@
 
 	if (pdata)
 		rt5677->pdata = *pdata;
+	else
+		rt5677_read_device_properties(rt5677, &i2c->dev);
 
-	if (i2c->dev.of_node) {
-		ret = rt5677_parse_dt(rt5677, i2c->dev.of_node);
-		if (ret) {
-			dev_err(&i2c->dev, "Failed to parse device tree: %d\n",
-				ret);
-			return ret;
-		}
-	} else {
-		rt5677->pow_ldo2 = -EINVAL;
-		rt5677->reset_pin = -EINVAL;
+	/* pow-ldo2 and reset are optional. The codec pins may be statically
+	 * connected on the board without gpios. If the gpio device property
+	 * isn't specified, devm_gpiod_get_optional returns NULL.
+	 */
+	rt5677->pow_ldo2 = devm_gpiod_get_optional(&i2c->dev,
+			"realtek,pow-ldo2", GPIOD_OUT_HIGH);
+	if (IS_ERR(rt5677->pow_ldo2)) {
+		ret = PTR_ERR(rt5677->pow_ldo2);
+		dev_err(&i2c->dev, "Failed to request POW_LDO2: %d\n", ret);
+		return ret;
+	}
+	rt5677->reset_pin = devm_gpiod_get_optional(&i2c->dev,
+			"realtek,reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(rt5677->reset_pin)) {
+		ret = PTR_ERR(rt5677->reset_pin);
+		dev_err(&i2c->dev, "Failed to request RESET: %d\n", ret);
+		return ret;
 	}
 
-	if (gpio_is_valid(rt5677->pow_ldo2)) {
-		ret = devm_gpio_request_one(&i2c->dev, rt5677->pow_ldo2,
-					    GPIOF_OUT_INIT_HIGH,
-					    "RT5677 POW_LDO2");
-		if (ret < 0) {
-			dev_err(&i2c->dev, "Failed to request POW_LDO2 %d: %d\n",
-				rt5677->pow_ldo2, ret);
-			return ret;
-		}
-	}
-
-	if (gpio_is_valid(rt5677->reset_pin)) {
-		ret = devm_gpio_request_one(&i2c->dev, rt5677->reset_pin,
-					    GPIOF_OUT_INIT_HIGH,
-					    "RT5677 RESET");
-		if (ret < 0) {
-			dev_err(&i2c->dev, "Failed to request RESET %d: %d\n",
-				rt5677->reset_pin, ret);
-			return ret;
-		}
-	}
-
-	if (gpio_is_valid(rt5677->pow_ldo2) ||
-	    gpio_is_valid(rt5677->reset_pin)) {
+	if (rt5677->pow_ldo2 || rt5677->reset_pin) {
 		/* Wait a while until I2C bus becomes available. The datasheet
 		 * does not specify the exact we should wait but startup
 		 * sequence mentiones at least a few milliseconds.
@@ -5209,7 +5173,7 @@
 	regmap_read(rt5677->regmap, RT5677_VENDOR_ID2, &val);
 	if (val != RT5677_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5677\n", val);
+			"Device with ID register %#x is not rt5677\n", val);
 		return -ENODEV;
 	}
 
@@ -5273,7 +5237,6 @@
 static struct i2c_driver rt5677_i2c_driver = {
 	.driver = {
 		.name = "rt5677",
-		.owner = THIS_MODULE,
 	},
 	.probe = rt5677_i2c_probe,
 	.remove   = rt5677_i2c_remove,
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 7eca38a..d46855a 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -14,6 +14,7 @@
 
 #include <sound/rt5677.h>
 #include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
 
 /* Info */
 #define RT5677_RESET				0x00
@@ -1775,8 +1776,8 @@
 	int pll_src;
 	int pll_in;
 	int pll_out;
-	int pow_ldo2; /* POW_LDO2 pin */
-	int reset_pin; /* RESET pin */
+	struct gpio_desc *pow_ldo2; /* POW_LDO2 pin */
+	struct gpio_desc *reset_pin; /* RESET pin */
 	enum rt5677_type type;
 #ifdef CONFIG_GPIOLIB
 	struct gpio_chip gpio_chip;
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e673f6c..bfda25e 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -406,11 +406,10 @@
 static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
 
 /* tlv for mic gain, 0db 20db 30db 40db */
-static const unsigned int mic_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(mic_gain_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
-	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
-};
+	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0)
+);
 
 /* tlv for hp volume, -51.5db to 12.0db, step .5db */
 static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
@@ -1601,7 +1600,6 @@
 static struct i2c_driver sgtl5000_i2c_driver = {
 	.driver = {
 		   .name = "sgtl5000",
-		   .owner = THIS_MODULE,
 		   .of_match_table = sgtl5000_dt_ids,
 		   },
 	.probe = sgtl5000_i2c_probe,
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 3e72964..a8402d0 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -208,7 +208,7 @@
 	return err;
 }
 
-static struct snd_soc_dai_ops si476x_dai_ops = {
+static const struct snd_soc_dai_ops si476x_dai_ops = {
 	.hw_params	= si476x_codec_hw_params,
 	.set_fmt	= si476x_codec_set_dai_fmt,
 };
diff --git a/sound/soc/codecs/sirf-audio-codec.c b/sound/soc/codecs/sirf-audio-codec.c
index 29cb442..6bfd25c 100644
--- a/sound/soc/codecs/sirf-audio-codec.c
+++ b/sound/soc/codecs/sirf-audio-codec.c
@@ -370,11 +370,11 @@
 	return 0;
 }
 
-struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
+static const struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
 	.trigger = sirf_audio_codec_trigger,
 };
 
-struct snd_soc_dai_driver sirf_audio_codec_dai = {
+static struct snd_soc_dai_driver sirf_audio_codec_dai = {
 	.name = "sirf-audio-codec",
 	.playback = {
 		.stream_name = "Playback",
diff --git a/sound/soc/codecs/ssm2518.c b/sound/soc/codecs/ssm2518.c
index f30de76..ddb0203 100644
--- a/sound/soc/codecs/ssm2518.c
+++ b/sound/soc/codecs/ssm2518.c
@@ -806,6 +806,14 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id ssm2518_dt_ids[] = {
+	{ .compatible = "adi,ssm2518", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ssm2518_dt_ids);
+#endif
+
 static const struct i2c_device_id ssm2518_i2c_ids[] = {
 	{ "ssm2518", 0 },
 	{ }
@@ -815,7 +823,7 @@
 static struct i2c_driver ssm2518_driver = {
 	.driver = {
 		.name = "ssm2518",
-		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(ssm2518_dt_ids),
 	},
 	.probe = ssm2518_i2c_probe,
 	.remove = ssm2518_i2c_remove,
diff --git a/sound/soc/codecs/ssm2602-i2c.c b/sound/soc/codecs/ssm2602-i2c.c
index 0d9779d..173ba85 100644
--- a/sound/soc/codecs/ssm2602-i2c.c
+++ b/sound/soc/codecs/ssm2602-i2c.c
@@ -52,7 +52,6 @@
 static struct i2c_driver ssm2602_i2c_driver = {
 	.driver = {
 		.name = "ssm2602",
-		.owner = THIS_MODULE,
 		.of_match_table = ssm2602_of_match,
 	},
 	.probe = ssm2602_i2c_probe,
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 69a773a..4452fea 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -75,11 +75,10 @@
 			ssm2602_deemph),
 };
 
-static const unsigned int ssm260x_outmix_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(ssm260x_outmix_tlv,
 	0, 47, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0),
-	48, 127, TLV_DB_SCALE_ITEM(-7400, 100, 0),
-};
+	48, 127, TLV_DB_SCALE_ITEM(-7400, 100, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(ssm260x_inpga_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(ssm260x_sidetone_tlv, -1500, 300, 0);
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 84a4f5a..e619d56 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -10,6 +10,7 @@
  * Licensed under the GPL-2.
  */
 
+#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
@@ -173,6 +174,12 @@
 	SND_SOC_DAPM_SWITCH("Amplifier Boost", SSM4567_REG_POWER_CTRL, 3, 1,
 		&ssm4567_amplifier_boost_control),
 
+	SND_SOC_DAPM_SIGGEN("Sense"),
+
+	SND_SOC_DAPM_PGA("Current Sense", SSM4567_REG_POWER_CTRL, 4, 1, NULL, 0),
+	SND_SOC_DAPM_PGA("Voltage Sense", SSM4567_REG_POWER_CTRL, 5, 1, NULL, 0),
+	SND_SOC_DAPM_PGA("VBAT Sense", SSM4567_REG_POWER_CTRL, 6, 1, NULL, 0),
+
 	SND_SOC_DAPM_OUTPUT("OUT"),
 };
 
@@ -180,6 +187,13 @@
 	{ "OUT", NULL, "Amplifier Boost" },
 	{ "Amplifier Boost", "Switch", "DAC" },
 	{ "OUT", NULL, "DAC" },
+
+	{ "Current Sense", NULL, "Sense" },
+	{ "Voltage Sense", NULL, "Sense" },
+	{ "VBAT Sense", NULL, "Sense" },
+	{ "Capture Sense", NULL, "Current Sense" },
+	{ "Capture Sense", NULL, "Voltage Sense" },
+	{ "Capture Sense", NULL, "VBAT Sense" },
 };
 
 static int ssm4567_hw_params(struct snd_pcm_substream *substream,
@@ -387,6 +401,14 @@
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
 			SNDRV_PCM_FMTBIT_S32,
 	},
+	.capture = {
+		.stream_name = "Capture Sense",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			SNDRV_PCM_FMTBIT_S32,
+	},
 	.ops = &ssm4567_dai_ops,
 };
 
@@ -456,10 +478,20 @@
 };
 MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids);
 
+#ifdef CONFIG_ACPI
+
+static const struct acpi_device_id ssm4567_acpi_match[] = {
+	{ "INT343B", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, ssm4567_acpi_match);
+
+#endif
+
 static struct i2c_driver ssm4567_driver = {
 	.driver = {
 		.name = "ssm4567",
-		.owner = THIS_MODULE,
+		.acpi_match_table = ACPI_PTR(ssm4567_acpi_match),
 	},
 	.probe = ssm4567_i2c_probe,
 	.remove = ssm4567_i2c_remove,
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 60eff36..a9844b2 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -1144,7 +1144,6 @@
 static struct i2c_driver sta32x_i2c_driver = {
 	.driver = {
 		.name = "sta32x",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(st32x_dt_ids),
 	},
 	.probe =    sta32x_i2c_probe,
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index bd819a3..33a4612 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -1264,7 +1264,6 @@
 static struct i2c_driver sta350_i2c_driver = {
 	.driver = {
 		.name = "sta350",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(st350_dt_ids),
 	},
 	.probe =    sta350_i2c_probe,
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 4f70378..2cdaca9 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -339,9 +339,6 @@
 	struct sta529 *sta529;
 	int ret;
 
-	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return -EINVAL;
-
 	sta529 = devm_kzalloc(&i2c->dev, sizeof(struct sta529), GFP_KERNEL);
 	if (!sta529)
 		return -ENOMEM;
@@ -379,7 +376,6 @@
 static struct i2c_driver sta529_i2c_driver = {
 	.driver = {
 		.name = "sta529",
-		.owner = THIS_MODULE,
 	},
 	.probe		= sta529_i2c_probe,
 	.remove		= sta529_i2c_remove,
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index ed4cca7..0945c51 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -28,6 +28,9 @@
 
 #include "stac9766.h"
 
+#define STAC9766_VENDOR_ID 0x83847666
+#define STAC9766_VENDOR_ID_MASK 0xffffffff
+
 /*
  * STAC9766 register cache
  */
@@ -239,45 +242,12 @@
 	return 0;
 }
 
-static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(ac97);
-		if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(ac97);
-	if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
-		return -EIO;
-	return 0;
-}
-
 static int stac9766_codec_resume(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 id, reset;
 
-	reset = 0;
-	/* give the codec an AC97 warm reset to start the link */
-reset:
-	if (reset > 5) {
-		dev_err(codec->dev, "Failed to resume\n");
-		return -EIO;
-	}
-	ac97->bus->ops->warm_reset(ac97);
-	id = soc_ac97_ops->read(ac97, AC97_VENDOR_ID2);
-	if (id != 0x4c13) {
-		stac9766_reset(codec, 0);
-		reset++;
-		goto reset;
-	}
-
-	return 0;
+	return snd_ac97_reset(ac97, true, STAC9766_VENDOR_ID,
+		STAC9766_VENDOR_ID_MASK);
 }
 
 static const struct snd_soc_dai_ops stac9766_dai_ops_analog = {
@@ -330,28 +300,15 @@
 static int stac9766_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
-	int ret = 0;
 
-	ac97 = snd_soc_new_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, STAC9766_VENDOR_ID,
+			STAC9766_VENDOR_ID_MASK);
 	if (IS_ERR(ac97))
 		return PTR_ERR(ac97);
 
 	snd_soc_codec_set_drvdata(codec, ac97);
 
-	/* do a cold reset for the controller and then try
-	 * a warm reset followed by an optional cold reset for codec */
-	stac9766_reset(codec, 0);
-	ret = stac9766_reset(codec, 1);
-	if (ret < 0) {
-		dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-		goto codec_err;
-	}
-
 	return 0;
-
-codec_err:
-	snd_soc_free_ac97_codec(ac97);
-	return ret;
 }
 
 static int stac9766_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
new file mode 100644
index 0000000..160d61a
--- /dev/null
+++ b/sound/soc/codecs/sti-sas.c
@@ -0,0 +1,628 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+/* chipID supported */
+#define CHIPID_STIH416 0
+#define CHIPID_STIH407 1
+
+/* DAC definitions */
+
+/* stih416 DAC registers */
+/* sysconf 2517: Audio-DAC-Control */
+#define STIH416_AUDIO_DAC_CTRL 0x00000814
+/* sysconf 2519: Audio-Gue-Control */
+#define STIH416_AUDIO_GLUE_CTRL 0x0000081C
+
+#define STIH416_DAC_NOT_STANDBY	0x3
+#define STIH416_DAC_SOFTMUTE	0x4
+#define STIH416_DAC_ANA_NOT_PWR	0x5
+#define STIH416_DAC_NOT_PNDBG	0x6
+
+#define STIH416_DAC_NOT_STANDBY_MASK	BIT(STIH416_DAC_NOT_STANDBY)
+#define STIH416_DAC_SOFTMUTE_MASK	BIT(STIH416_DAC_SOFTMUTE)
+#define STIH416_DAC_ANA_NOT_PWR_MASK	BIT(STIH416_DAC_ANA_NOT_PWR)
+#define STIH416_DAC_NOT_PNDBG_MASK	BIT(STIH416_DAC_NOT_PNDBG)
+
+/* stih407 DAC registers */
+/* sysconf 5041: Audio-Gue-Control */
+#define STIH407_AUDIO_GLUE_CTRL 0x000000A4
+/* sysconf 5042: Audio-DAC-Control */
+#define STIH407_AUDIO_DAC_CTRL 0x000000A8
+
+/* DAC definitions */
+#define STIH407_DAC_SOFTMUTE		0x0
+#define STIH407_DAC_STANDBY_ANA		0x1
+#define STIH407_DAC_STANDBY		0x2
+
+#define STIH407_DAC_SOFTMUTE_MASK	BIT(STIH407_DAC_SOFTMUTE)
+#define STIH407_DAC_STANDBY_ANA_MASK    BIT(STIH407_DAC_STANDBY_ANA)
+#define STIH407_DAC_STANDBY_MASK        BIT(STIH407_DAC_STANDBY)
+
+/* SPDIF definitions */
+#define SPDIF_BIPHASE_ENABLE		0x6
+#define SPDIF_BIPHASE_IDLE		0x7
+
+#define SPDIF_BIPHASE_ENABLE_MASK	BIT(SPDIF_BIPHASE_ENABLE)
+#define SPDIF_BIPHASE_IDLE_MASK		BIT(SPDIF_BIPHASE_IDLE)
+
+enum {
+	STI_SAS_DAI_SPDIF_OUT,
+	STI_SAS_DAI_ANALOG_OUT,
+};
+
+static const struct reg_default stih416_sas_reg_defaults[] = {
+	{ STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
+	{ STIH407_AUDIO_DAC_CTRL, 0x000000000 },
+};
+
+static const struct reg_default stih407_sas_reg_defaults[] = {
+	{ STIH416_AUDIO_DAC_CTRL, 0x000000000 },
+	{ STIH416_AUDIO_GLUE_CTRL, 0x00000040 },
+};
+
+struct sti_dac_audio {
+	struct regmap *regmap;
+	struct regmap *virt_regmap;
+	struct regmap_field  **field;
+	struct reset_control *rst;
+	int mclk;
+};
+
+struct sti_spdif_audio {
+	struct regmap *regmap;
+	struct regmap_field  **field;
+	int mclk;
+};
+
+/* device data structure */
+struct sti_sas_dev_data {
+	const int chipid; /* IC version */
+	const struct regmap_config *regmap;
+	const struct snd_soc_dai_ops *dac_ops;  /* DAC function callbacks */
+	const struct snd_soc_dapm_widget *dapm_widgets; /* dapms declaration */
+	const int num_dapm_widgets; /* dapms declaration */
+	const struct snd_soc_dapm_route *dapm_routes; /* route declaration */
+	const int num_dapm_routes; /* route declaration */
+};
+
+/* driver data structure */
+struct sti_sas_data {
+	struct device *dev;
+	const struct sti_sas_dev_data *dev_data;
+	struct sti_dac_audio dac;
+	struct sti_spdif_audio spdif;
+};
+
+/* Read a register from the sysconf reg bank */
+static int sti_sas_read_reg(void *context, unsigned int reg,
+			    unsigned int *value)
+{
+	struct sti_sas_data *drvdata = context;
+	int status;
+	u32 val;
+
+	status = regmap_read(drvdata->dac.regmap, reg, &val);
+	*value = (unsigned int)val;
+
+	return status;
+}
+
+/* Read a register from the sysconf reg bank */
+static int sti_sas_write_reg(void *context, unsigned int reg,
+			     unsigned int value)
+{
+	struct sti_sas_data *drvdata = context;
+	int status;
+
+	status = regmap_write(drvdata->dac.regmap, reg, value);
+
+	return status;
+}
+
+static int  sti_sas_init_sas_registers(struct snd_soc_codec *codec,
+				       struct sti_sas_data *data)
+{
+	int ret;
+	/*
+	 * DAC and SPDIF are activated by default
+	 * put them in IDLE to save power
+	 */
+
+	/* Initialise bi-phase formatter to disabled */
+	ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+				  SPDIF_BIPHASE_ENABLE_MASK, 0);
+
+	if (!ret)
+		/* Initialise bi-phase formatter idle value to 0 */
+		ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					  SPDIF_BIPHASE_IDLE_MASK, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update SPDIF registers");
+		return ret;
+	}
+
+	/* Init DAC configuration */
+	switch (data->dev_data->chipid) {
+	case CHIPID_STIH407:
+		/* init configuration */
+		ret =  snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					   STIH407_DAC_STANDBY_MASK,
+					   STIH407_DAC_STANDBY_MASK);
+
+		if (!ret)
+			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+						  STIH407_DAC_STANDBY_ANA_MASK,
+						  STIH407_DAC_STANDBY_ANA_MASK);
+		if (!ret)
+			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+						  STIH407_DAC_SOFTMUTE_MASK,
+						  STIH407_DAC_SOFTMUTE_MASK);
+		break;
+	case CHIPID_STIH416:
+		ret =  snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					   STIH416_DAC_NOT_STANDBY_MASK, 0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_ANA_NOT_PWR, 0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_NOT_PNDBG_MASK,
+						   0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_SOFTMUTE_MASK,
+						   STIH416_DAC_SOFTMUTE_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update DAC registers");
+		return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * DAC
+ */
+static int sti_sas_dac_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	/* Sanity check only */
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: Unsupporter master mask 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int stih416_dac_probe(struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	struct sti_dac_audio *dac = &drvdata->dac;
+
+	/* Get reset control */
+	dac->rst = devm_reset_control_get(codec->dev, "dac_rst");
+	if (IS_ERR(dac->rst)) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: DAC reset control not defined !\n",
+			__func__);
+		dac->rst = NULL;
+		return -EFAULT;
+	}
+	/* Put the DAC into reset */
+	reset_control_assert(dac->rst);
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget stih416_sas_dapm_widgets[] = {
+	SND_SOC_DAPM_PGA("DAC bandgap", STIH416_AUDIO_DAC_CTRL,
+			 STIH416_DAC_NOT_PNDBG_MASK, 0, NULL, 0),
+	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH416_AUDIO_DAC_CTRL,
+			     STIH416_DAC_ANA_NOT_PWR, 0, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH416_AUDIO_DAC_CTRL,
+			 STIH416_DAC_NOT_STANDBY, 0),
+	SND_SOC_DAPM_OUTPUT("DAC Output"),
+};
+
+static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
+	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH407_AUDIO_DAC_CTRL,
+			     STIH407_DAC_STANDBY_ANA, 1, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH407_AUDIO_DAC_CTRL,
+			 STIH407_DAC_STANDBY, 1),
+	SND_SOC_DAPM_OUTPUT("DAC Output"),
+};
+
+static const struct snd_soc_dapm_route stih416_sas_route[] = {
+	{"DAC Output", NULL, "DAC bandgap"},
+	{"DAC Output", NULL, "DAC standby ana"},
+	{"DAC standby ana", NULL, "DAC standby"},
+};
+
+static const struct snd_soc_dapm_route stih407_sas_route[] = {
+	{"DAC Output", NULL, "DAC standby ana"},
+	{"DAC standby ana", NULL, "DAC standby"},
+};
+
+static int stih416_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (mute) {
+		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					    STIH416_DAC_SOFTMUTE_MASK,
+					    STIH416_DAC_SOFTMUTE_MASK);
+	} else {
+		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					    STIH416_DAC_SOFTMUTE_MASK, 0);
+	}
+}
+
+static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (mute) {
+		return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					    STIH407_DAC_SOFTMUTE_MASK,
+					    STIH407_DAC_SOFTMUTE_MASK);
+	} else {
+		return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					    STIH407_DAC_SOFTMUTE_MASK,
+					    0);
+	}
+}
+
+/*
+ * SPDIF
+ */
+static int sti_sas_spdif_set_fmt(struct snd_soc_dai *dai,
+				 unsigned int fmt)
+{
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: Unsupporter master mask 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * sti_sas_spdif_trigger:
+ * Trigger function is used to ensure that BiPhase Formater is disabled
+ * before CPU dai is stopped.
+ * This is mandatory to avoid that BPF is stalled
+ */
+static int sti_sas_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					    SPDIF_BIPHASE_ENABLE_MASK,
+					    SPDIF_BIPHASE_ENABLE_MASK);
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					    SPDIF_BIPHASE_ENABLE_MASK,
+					    0);
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool sti_sas_volatile_register(struct device *dev, unsigned int reg)
+{
+	if (reg == STIH407_AUDIO_GLUE_CTRL)
+		return true;
+
+	return false;
+}
+
+/*
+ * CODEC DAIS
+ */
+
+/*
+ * sti_sas_set_sysclk:
+ * get MCLK input frequency to check that MCLK-FS ratio is coherent
+ */
+static int sti_sas_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+			      unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+
+	if (dir == SND_SOC_CLOCK_OUT)
+		return 0;
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	switch (dai->id) {
+	case STI_SAS_DAI_SPDIF_OUT:
+		drvdata->spdif.mclk = freq;
+		break;
+
+	case STI_SAS_DAI_ANALOG_OUT:
+		drvdata->dac.mclk = freq;
+		break;
+	}
+
+	return 0;
+}
+
+static int sti_sas_prepare(struct snd_pcm_substream *substream,
+			   struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	switch (dai->id) {
+	case STI_SAS_DAI_SPDIF_OUT:
+		if ((drvdata->spdif.mclk / runtime->rate) != 128) {
+			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			return -EINVAL;
+		}
+		break;
+	case STI_SAS_DAI_ANALOG_OUT:
+		if ((drvdata->dac.mclk / runtime->rate) != 256) {
+			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			return -EINVAL;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops stih416_dac_ops = {
+	.set_fmt = sti_sas_dac_set_fmt,
+	.mute_stream = stih416_sas_dac_mute,
+	.prepare = sti_sas_prepare,
+	.set_sysclk = sti_sas_set_sysclk,
+};
+
+static const struct snd_soc_dai_ops stih407_dac_ops = {
+	.set_fmt = sti_sas_dac_set_fmt,
+	.mute_stream = stih407_sas_dac_mute,
+	.prepare = sti_sas_prepare,
+	.set_sysclk = sti_sas_set_sysclk,
+};
+
+static const struct regmap_config stih407_sas_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+
+	.max_register = STIH407_AUDIO_DAC_CTRL,
+	.reg_defaults = stih407_sas_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
+	.volatile_reg = sti_sas_volatile_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_read = sti_sas_read_reg,
+	.reg_write = sti_sas_write_reg,
+};
+
+static const struct regmap_config stih416_sas_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+
+	.max_register = STIH416_AUDIO_DAC_CTRL,
+	.reg_defaults = stih416_sas_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stih416_sas_reg_defaults),
+	.volatile_reg = sti_sas_volatile_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_read = sti_sas_read_reg,
+	.reg_write = sti_sas_write_reg,
+};
+
+static const struct sti_sas_dev_data stih416_data = {
+	.chipid = CHIPID_STIH416,
+	.regmap = &stih416_sas_regmap,
+	.dac_ops = &stih416_dac_ops,
+	.dapm_widgets = stih416_sas_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(stih416_sas_dapm_widgets),
+	.dapm_routes =	stih416_sas_route,
+	.num_dapm_routes = ARRAY_SIZE(stih416_sas_route),
+};
+
+static const struct sti_sas_dev_data stih407_data = {
+	.chipid = CHIPID_STIH407,
+	.regmap = &stih407_sas_regmap,
+	.dac_ops = &stih407_dac_ops,
+	.dapm_widgets = stih407_sas_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(stih407_sas_dapm_widgets),
+	.dapm_routes =	stih407_sas_route,
+	.num_dapm_routes = ARRAY_SIZE(stih407_sas_route),
+};
+
+static struct snd_soc_dai_driver sti_sas_dai[] = {
+	{
+		.name = "sas-dai-spdif-out",
+		.id = STI_SAS_DAI_SPDIF_OUT,
+		.playback = {
+			.stream_name = "spdif_p",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 |
+				 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = (struct snd_soc_dai_ops[]) {
+			{
+				.set_fmt = sti_sas_spdif_set_fmt,
+				.trigger = sti_sas_spdif_trigger,
+				.set_sysclk = sti_sas_set_sysclk,
+				.prepare = sti_sas_prepare,
+			}
+		},
+	},
+	{
+		.name = "sas-dai-dac",
+		.id = STI_SAS_DAI_ANALOG_OUT,
+		.playback = {
+			.stream_name = "dac_p",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int sti_sas_resume(struct snd_soc_codec *codec)
+{
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+
+	return sti_sas_init_sas_registers(codec, drvdata);
+}
+#else
+#define sti_sas_resume NULL
+#endif
+
+static int sti_sas_codec_probe(struct snd_soc_codec *codec)
+{
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	int ret;
+
+	ret = sti_sas_init_sas_registers(codec, drvdata);
+
+	return ret;
+}
+
+static struct snd_soc_codec_driver sti_sas_driver = {
+	.probe = sti_sas_codec_probe,
+	.resume = sti_sas_resume,
+};
+
+static const struct of_device_id sti_sas_dev_match[] = {
+	{
+		.compatible = "st,stih416-sas-codec",
+		.data = &stih416_data,
+	},
+	{
+		.compatible = "st,stih407-sas-codec",
+		.data = &stih407_data,
+	},
+	{},
+};
+
+static int sti_sas_driver_probe(struct platform_device *pdev)
+{
+	struct device_node *pnode = pdev->dev.of_node;
+	struct sti_sas_data *drvdata;
+	const struct of_device_id *of_id;
+
+	/* Allocate device structure */
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct sti_sas_data),
+			       GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	/* Populate data structure depending on compatibility */
+	of_id = of_match_node(sti_sas_dev_match, pnode);
+	if (!of_id->data) {
+		dev_err(&pdev->dev, "data associated to device is missing");
+		return -EINVAL;
+	}
+
+	drvdata->dev_data = (struct sti_sas_dev_data *)of_id->data;
+
+	/* Initialise device structure */
+	drvdata->dev = &pdev->dev;
+
+	/* Request the DAC & SPDIF registers memory region */
+	drvdata->dac.virt_regmap = devm_regmap_init(&pdev->dev, NULL, drvdata,
+						    drvdata->dev_data->regmap);
+	if (IS_ERR(drvdata->dac.virt_regmap)) {
+		dev_err(&pdev->dev, "audio registers not enabled\n");
+		return PTR_ERR(drvdata->dac.virt_regmap);
+	}
+
+	/* Request the syscon region */
+	drvdata->dac.regmap =
+		syscon_regmap_lookup_by_phandle(pnode, "st,syscfg");
+	if (IS_ERR(drvdata->dac.regmap)) {
+		dev_err(&pdev->dev, "syscon registers not available\n");
+		return PTR_ERR(drvdata->dac.regmap);
+	}
+	drvdata->spdif.regmap = drvdata->dac.regmap;
+
+	/* Set DAC dai probe */
+	if (drvdata->dev_data->chipid == CHIPID_STIH416)
+		sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].probe = stih416_dac_probe;
+
+	sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
+
+	/* Set dapms*/
+	sti_sas_driver.dapm_widgets = drvdata->dev_data->dapm_widgets;
+	sti_sas_driver.num_dapm_widgets = drvdata->dev_data->num_dapm_widgets;
+
+	sti_sas_driver.dapm_routes = drvdata->dev_data->dapm_routes;
+	sti_sas_driver.num_dapm_routes = drvdata->dev_data->num_dapm_routes;
+
+	/* Store context */
+	dev_set_drvdata(&pdev->dev, drvdata);
+
+	return snd_soc_register_codec(&pdev->dev, &sti_sas_driver,
+					sti_sas_dai,
+					ARRAY_SIZE(sti_sas_dai));
+}
+
+static int sti_sas_driver_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver sti_sas_platform_driver = {
+	.driver = {
+		.name = "sti-sas-codec",
+		.of_match_table = sti_sas_dev_match,
+	},
+	.probe = sti_sas_driver_probe,
+	.remove = sti_sas_driver_remove,
+};
+
+module_platform_driver(sti_sas_platform_driver);
+
+MODULE_DESCRIPTION("audio codec for STMicroelectronics sti platforms");
+MODULE_AUTHOR("Arnaud.pouliquen@st.com");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 4f25a7d..e3a0bca 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -38,7 +38,7 @@
 
 #include "tas2552.h"
 
-static struct reg_default tas2552_reg_defs[] = {
+static const struct reg_default tas2552_reg_defs[] = {
 	{TAS2552_CFG_1, 0x22},
 	{TAS2552_CFG_3, 0x80},
 	{TAS2552_DOUT, 0x00},
@@ -493,8 +493,7 @@
 	regcache_cache_only(tas2552->regmap, true);
 	regcache_mark_dirty(tas2552->regmap);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	return 0;
 }
@@ -503,8 +502,7 @@
 {
 	struct tas2552_data *tas2552 = dev_get_drvdata(dev);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 1);
+	gpiod_set_value(tas2552->enable_gpio, 1);
 
 	tas2552_sw_shutdown(tas2552, 0);
 
@@ -520,7 +518,7 @@
 			   NULL)
 };
 
-static struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
+static const struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
 	.hw_params	= tas2552_hw_params,
 	.prepare	= tas2552_prepare,
 	.set_sysclk	= tas2552_set_dai_sysclk,
@@ -585,8 +583,7 @@
 		return ret;
 	}
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 1);
+	gpiod_set_value(tas2552->enable_gpio, 1);
 
 	ret = pm_runtime_get_sync(codec->dev);
 	if (ret < 0) {
@@ -610,8 +607,7 @@
 	return 0;
 
 probe_fail:
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies),
 					tas2552->supplies);
@@ -624,8 +620,7 @@
 
 	pm_runtime_put(codec->dev);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	return 0;
 };
@@ -769,7 +764,6 @@
 static struct i2c_driver tas2552_i2c_driver = {
 	.driver = {
 		.name = "tas2552",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tas2552_of_match),
 		.pm = &tas2552_pm,
 	},
diff --git a/sound/soc/codecs/tas2552.h b/sound/soc/codecs/tas2552.h
index 5746f8f..e34752b 100644
--- a/sound/soc/codecs/tas2552.h
+++ b/sound/soc/codecs/tas2552.h
@@ -42,7 +42,7 @@
 #define TAS2552_BOOST_APT_CTRL		0x14
 #define TAS2552_VER_NUM			0x16
 #define TAS2552_VBAT_DATA		0x19
-#define TAS2552_MAX_REG			0x20
+#define TAS2552_MAX_REG			TAS2552_VBAT_DATA
 
 /* CFG1 Register Masks */
 #define TAS2552_DEV_RESET		(1 << 0)
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 32942be..d49d25d 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -266,10 +266,14 @@
 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 	int i, val = 0;
 
-	if (priv->deemph)
-		for (i = 0; i < ARRAY_SIZE(tas5086_deemph); i++)
-			if (tas5086_deemph[i] == priv->rate)
+	if (priv->deemph) {
+		for (i = 0; i < ARRAY_SIZE(tas5086_deemph); i++) {
+			if (tas5086_deemph[i] == priv->rate) {
 				val = i;
+				break;
+			}
+		}
+	}
 
 	return regmap_update_bits(priv->regmap, TAS5086_SYS_CONTROL_1,
 				  TAS5086_DEEMPH_MASK, val);
@@ -994,7 +998,6 @@
 static struct i2c_driver tas5086_i2c_driver = {
 	.driver = {
 		.name	= "tas5086",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tas5086_dt_ids),
 	},
 	.id_table	= tas5086_i2c_id,
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 85bcc37..39307ad 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -179,7 +179,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
 			if (!IS_ERR(priv->mclk)) {
 				ret = clk_prepare_enable(priv->mclk);
 				if (ret) {
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index aab0af6..cb5310d 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-static struct reg_default tfa9879_regs[] = {
+static const struct reg_default tfa9879_regs[] = {
 	{ TFA9879_DEVICE_CONTROL,	0x0000 }, /* 0x00 */
 	{ TFA9879_SERIAL_INTERFACE_1,	0x0a18 }, /* 0x01 */
 	{ TFA9879_PCM_IOM2_FORMAT_1,	0x0007 }, /* 0x02 */
@@ -314,7 +314,6 @@
 static struct i2c_driver tfa9879_i2c_driver = {
 	.driver = {
 		.name = "tfa9879",
-		.owner = THIS_MODULE,
 	},
 	.probe = tfa9879_i2c_probe,
 	.remove = tfa9879_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index c4c960f..ee4def4 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1121,7 +1121,7 @@
 	.num_dapm_routes	= ARRAY_SIZE(aic31xx_audio_map),
 };
 
-static struct snd_soc_dai_ops aic31xx_dai_ops = {
+static const struct snd_soc_dai_ops aic31xx_dai_ops = {
 	.hw_params	= aic31xx_hw_params,
 	.set_sysclk	= aic31xx_set_dai_sysclk,
 	.set_fmt	= aic31xx_set_dai_fmt,
@@ -1283,7 +1283,6 @@
 static struct i2c_driver aic31xx_i2c_driver = {
 	.driver = {
 		.name	= "tlv320aic31xx-codec",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tlv320aic31xx_of_match),
 	},
 	.probe		= aic31xx_i2c_probe,
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index ad6cb90..f2d3191 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -871,7 +871,6 @@
 static struct i2c_driver aic32x4_i2c_driver = {
 	.driver = {
 		.name = "tlv320aic32x4",
-		.owner = THIS_MODULE,
 		.of_match_table = aic32x4_of_id,
 	},
 	.probe =    aic32x4_i2c_probe,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index a7cf19b..1a82b19 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1668,7 +1668,7 @@
 };
 MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
 
-static const struct reg_default aic3007_class_d[] = {
+static const struct reg_sequence aic3007_class_d[] = {
 	/* Class-D speaker driver init; datasheet p. 46 */
 	{ AIC3X_PAGE_SELECT, 0x0D },
 	{ 0xD, 0x0D },
@@ -1825,7 +1825,6 @@
 static struct i2c_driver aic3x_i2c_driver = {
 	.driver = {
 		.name = "tlv320aic3x-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tlv320aic3x_of_match),
 	},
 	.probe	= aic3x_i2c_probe,
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d67a311..781398fb 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -1585,7 +1585,6 @@
 static struct i2c_driver tlv320dac33_i2c_driver = {
 	.driver = {
 		.name = "tlv320dac33-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe		= dac33_i2c_probe,
 	.remove		= dac33_i2c_remove,
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 6fac9e0..11d85c5 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -259,8 +259,7 @@
  * TPA6130 volume. From -59.5 to 4 dB with increasing step size when going
  * down in gain.
  */
-static const unsigned int tpa6130_tlv[] = {
-	TLV_DB_RANGE_HEAD(10),
+static const DECLARE_TLV_DB_RANGE(tpa6130_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-5950, 600, 0),
 	2, 3, TLV_DB_SCALE_ITEM(-5000, 250, 0),
 	4, 5, TLV_DB_SCALE_ITEM(-4550, 160, 0),
@@ -270,8 +269,8 @@
 	12, 13, TLV_DB_SCALE_ITEM(-3040, 180, 0),
 	14, 20, TLV_DB_SCALE_ITEM(-2710, 110, 0),
 	21, 37, TLV_DB_SCALE_ITEM(-1960, 74, 0),
-	38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0),
-};
+	38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0)
+);
 
 static const struct snd_kcontrol_new tpa6130a2_controls[] = {
 	SOC_SINGLE_EXT_TLV("TPA6130A2 Headphone Playback Volume",
@@ -280,12 +279,11 @@
 		       tpa6130_tlv),
 };
 
-static const unsigned int tpa6140_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(tpa6140_tlv,
 	0, 8, TLV_DB_SCALE_ITEM(-5900, 400, 0),
 	9, 16, TLV_DB_SCALE_ITEM(-2500, 200, 0),
-	17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0),
-};
+	17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0)
+);
 
 static const struct snd_kcontrol_new tpa6140a2_controls[] = {
 	SOC_SINGLE_EXT_TLV("TPA6140A2 Headphone Playback Volume",
@@ -488,7 +486,6 @@
 static struct i2c_driver tpa6130a2_i2c_driver = {
 	.driver = {
 		.name = "tpa6130a2",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tpa6130a2_of_match),
 	},
 	.probe = tpa6130a2_probe,
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 12232d7..4356843 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -23,11 +23,13 @@
 #include "ts3a227e.h"
 
 struct ts3a227e {
+	struct device *dev;
 	struct regmap *regmap;
 	struct snd_soc_jack *jack;
 	bool plugged;
 	bool mic_present;
 	unsigned int buttons_held;
+	int irq;
 };
 
 /* Button values to be reported on the jack */
@@ -189,16 +191,28 @@
 	struct ts3a227e *ts3a227e = (struct ts3a227e *)data;
 	struct regmap *regmap = ts3a227e->regmap;
 	unsigned int int_reg, kp_int_reg, acc_reg, i;
+	struct device *dev = ts3a227e->dev;
+	int ret;
 
 	/* Check for plug/unplug. */
-	regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+	ret = regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+	if (ret) {
+		dev_err(dev, "failed to clear interrupt ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
 	if (int_reg & (DETECTION_COMPLETE_EVENT | INS_REM_EVENT)) {
 		regmap_read(regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
 		ts3a227e_new_jack_state(ts3a227e, acc_reg);
 	}
 
 	/* Report any key events. */
-	regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+	ret = regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+	if (ret) {
+		dev_err(dev, "failed to clear key interrupt ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
 	for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) {
 		if (kp_int_reg & PRESS_MASK(i))
 			ts3a227e->buttons_held |= (1 << i);
@@ -283,6 +297,8 @@
 		return -ENOMEM;
 
 	i2c_set_clientdata(i2c, ts3a227e);
+	ts3a227e->dev = dev;
+	ts3a227e->irq = i2c->irq;
 
 	ts3a227e->regmap = devm_regmap_init_i2c(i2c, &ts3a227e_regmap_config);
 	if (IS_ERR(ts3a227e->regmap))
@@ -320,6 +336,32 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int ts3a227e_suspend(struct device *dev)
+{
+	struct ts3a227e *ts3a227e = dev_get_drvdata(dev);
+
+	dev_dbg(ts3a227e->dev, "suspend disable irq\n");
+	disable_irq(ts3a227e->irq);
+
+	return 0;
+}
+
+static int ts3a227e_resume(struct device *dev)
+{
+	struct ts3a227e *ts3a227e = dev_get_drvdata(dev);
+
+	dev_dbg(ts3a227e->dev, "resume enable irq\n");
+	enable_irq(ts3a227e->irq);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops ts3a227e_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(ts3a227e_suspend, ts3a227e_resume)
+};
+
 static const struct i2c_device_id ts3a227e_i2c_ids[] = {
 	{ "ts3a227e", 0 },
 	{ }
@@ -335,7 +377,7 @@
 static struct i2c_driver ts3a227e_driver = {
 	.driver = {
 		.name = "ts3a227e",
-		.owner = THIS_MODULE,
+		.pm = &ts3a227e_pm,
 		.of_match_table = of_match_ptr(ts3a227e_of_match),
 	},
 	.probe = ts3a227e_i2c_probe,
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 90f5f04..2713e18 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -524,12 +524,11 @@
 	SOC_DAPM_SINGLE("Switch", TWL4030_REG_VDL_APGA_CTL, 2, 1, 0);
 
 /* Digital bypass gain, mute instead of -30dB */
-static const unsigned int twl4030_dapm_dbypass_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(twl4030_dapm_dbypass_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-3000, 600, 1),
 	2, 3, TLV_DB_SCALE_ITEM(-2400, 0, 0),
-	4, 7, TLV_DB_SCALE_ITEM(-1800, 600, 0),
-};
+	4, 7, TLV_DB_SCALE_ITEM(-1800, 600, 0)
+);
 
 /* Digital bypass left (TX1L -> RX2L) */
 static const struct snd_kcontrol_new twl4030_dapm_dbypassl_control =
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 913edf2..e190263 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -37,74 +37,53 @@
 
 	struct snd_pcm_substream *master_substream;
 	struct snd_pcm_substream *slave_substream;
+
+	struct regmap *regmap;
+	struct uda134x_platform_data *pd;
 };
 
-/* In-data addresses are hard-coded into the reg-cache values */
-static const char uda134x_reg[UDA134X_REGS_NUM] = {
-	/* Extended address registers */
-	0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* Status, data regs */
-	0x00, 0x83, 0x00, 0x40, 0x80, 0xC0, 0x00,
+static const struct reg_default uda134x_reg_defaults[] = {
+	{ UDA134X_EA000, 0x04 },
+	{ UDA134X_EA001, 0x04 },
+	{ UDA134X_EA010, 0x04 },
+	{ UDA134X_EA011, 0x00 },
+	{ UDA134X_EA100, 0x00 },
+	{ UDA134X_EA101, 0x00 },
+	{ UDA134X_EA110, 0x00 },
+	{ UDA134X_EA111, 0x00 },
+	{ UDA134X_STATUS0, 0x00 },
+	{ UDA134X_STATUS1, 0x03 },
+	{ UDA134X_DATA000, 0x00 },
+	{ UDA134X_DATA001, 0x00 },
+	{ UDA134X_DATA010, 0x00 },
+	{ UDA134X_DATA011, 0x00 },
+	{ UDA134X_DATA1, 0x00 },
 };
 
 /*
- * The codec has no support for reading its registers except for peak level...
- */
-static inline unsigned int uda134x_read_reg_cache(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	u8 *cache = codec->reg_cache;
-
-	if (reg >= UDA134X_REGS_NUM)
-		return -1;
-	return cache[reg];
-}
-
-/*
- * Write the register cache
- */
-static inline void uda134x_write_reg_cache(struct snd_soc_codec *codec,
-	u8 reg, unsigned int value)
-{
-	u8 *cache = codec->reg_cache;
-
-	if (reg >= UDA134X_REGS_NUM)
-		return;
-	cache[reg] = value;
-}
-
-/*
  * Write to the uda134x registers
  *
  */
-static int uda134x_write(struct snd_soc_codec *codec, unsigned int reg,
+static int uda134x_regmap_write(void *context, unsigned int reg,
 	unsigned int value)
 {
+	struct uda134x_platform_data *pd = context;
 	int ret;
 	u8 addr;
 	u8 data = value;
-	struct uda134x_platform_data *pd = codec->control_data;
-
-	pr_debug("%s reg: %02X, value:%02X\n", __func__, reg, value);
-
-	if (reg >= UDA134X_REGS_NUM) {
-		printk(KERN_ERR "%s unknown register: reg: %u",
-		       __func__, reg);
-		return -EINVAL;
-	}
-
-	uda134x_write_reg_cache(codec, reg, value);
 
 	switch (reg) {
 	case UDA134X_STATUS0:
 	case UDA134X_STATUS1:
 		addr = UDA134X_STATUS_ADDR;
+		data |= (reg - UDA134X_STATUS0) << 7;
 		break;
 	case UDA134X_DATA000:
 	case UDA134X_DATA001:
 	case UDA134X_DATA010:
 	case UDA134X_DATA011:
 		addr = UDA134X_DATA0_ADDR;
+		data |= (reg - UDA134X_DATA000) << 6;
 		break;
 	case UDA134X_DATA1:
 		addr = UDA134X_DATA1_ADDR;
@@ -133,27 +112,28 @@
 
 static inline void uda134x_reset(struct snd_soc_codec *codec)
 {
-	u8 reset_reg = uda134x_read_reg_cache(codec, UDA134X_STATUS0);
-	uda134x_write(codec, UDA134X_STATUS0, reset_reg | (1<<6));
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	unsigned int mask = 1<<6;
+
+	regmap_update_bits(uda134x->regmap, UDA134X_STATUS0, mask, mask);
 	msleep(1);
-	uda134x_write(codec, UDA134X_STATUS0, reset_reg & ~(1<<6));
+	regmap_update_bits(uda134x->regmap, UDA134X_STATUS0, mask, 0);
 }
 
 static int uda134x_mute(struct snd_soc_dai *dai, int mute)
 {
-	struct snd_soc_codec *codec = dai->codec;
-	u8 mute_reg = uda134x_read_reg_cache(codec, UDA134X_DATA010);
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(dai->codec);
+	unsigned int mask = 1<<2;
+	unsigned int val;
 
 	pr_debug("%s mute: %d\n", __func__, mute);
 
 	if (mute)
-		mute_reg |= (1<<2);
+		val = mask;
 	else
-		mute_reg &= ~(1<<2);
+		val = 0;
 
-	uda134x_write(codec, UDA134X_DATA010, mute_reg);
-
-	return 0;
+	return regmap_update_bits(uda134x->regmap, UDA134X_DATA010, mask, val);
 }
 
 static int uda134x_startup(struct snd_pcm_substream *substream,
@@ -205,7 +185,7 @@
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
-	u8 hw_params;
+	unsigned int hw_params = 0;
 
 	if (substream == uda134x->slave_substream) {
 		pr_debug("%s ignoring hw_params for slave substream\n",
@@ -213,10 +193,6 @@
 		return 0;
 	}
 
-	hw_params = uda134x_read_reg_cache(codec, UDA134X_STATUS0);
-	hw_params &= STATUS0_SYSCLK_MASK;
-	hw_params &= STATUS0_DAIFMT_MASK;
-
 	pr_debug("%s sysclk: %d, rate:%d\n", __func__,
 		 uda134x->sysclk, params_rate(params));
 
@@ -267,9 +243,8 @@
 		return -EINVAL;
 	}
 
-	uda134x_write(codec, UDA134X_STATUS0, hw_params);
-
-	return 0;
+	return regmap_update_bits(uda134x->regmap, UDA134X_STATUS0,
+		STATUS0_SYSCLK_MASK | STATUS0_DAIFMT_MASK, hw_params);
 }
 
 static int uda134x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -324,10 +299,8 @@
 static int uda134x_set_bias_level(struct snd_soc_codec *codec,
 				  enum snd_soc_bias_level level)
 {
-	struct uda134x_platform_data *pd = codec->control_data;
-	int i;
-	u8 *cache = codec->reg_cache;
-
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	struct uda134x_platform_data *pd = uda134x->pd;
 	pr_debug("%s bias level %d\n", __func__, level);
 
 	switch (level) {
@@ -337,17 +310,17 @@
 		/* power on */
 		if (pd->power) {
 			pd->power(1);
-			/* Sync reg_cache with the hardware */
-			for (i = 0; i < ARRAY_SIZE(uda134x_reg); i++)
-				codec->driver->write(codec, i, *cache++);
+			regcache_sync(uda134x->regmap);
 		}
 		break;
 	case SND_SOC_BIAS_STANDBY:
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* power off */
-		if (pd->power)
+		if (pd->power) {
 			pd->power(0);
+			regcache_mark_dirty(uda134x->regmap);
+		}
 		break;
 	}
 	return 0;
@@ -478,21 +451,14 @@
 static int uda134x_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-	struct uda134x_priv *uda134x;
-	struct uda134x_platform_data *pd = codec->component.card->dev->platform_data;
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	struct uda134x_platform_data *pd = uda134x->pd;
 	const struct snd_soc_dapm_widget *widgets;
 	unsigned num_widgets;
-
 	int ret;
 
 	printk(KERN_INFO "UDA134X SoC Audio Codec\n");
 
-	if (!pd) {
-		printk(KERN_ERR "UDA134X SoC codec: "
-		       "missing L3 bitbang function\n");
-		return -ENODEV;
-	}
-
 	switch (pd->model) {
 	case UDA134X_UDA1340:
 	case UDA134X_UDA1341:
@@ -506,13 +472,6 @@
 		return -EINVAL;
 	}
 
-	uda134x = kzalloc(sizeof(struct uda134x_priv), GFP_KERNEL);
-	if (uda134x == NULL)
-		return -ENOMEM;
-	snd_soc_codec_set_drvdata(codec, uda134x);
-
-	codec->control_data = pd;
-
 	if (pd->power)
 		pd->power(1);
 
@@ -530,7 +489,6 @@
 	if (ret) {
 		printk(KERN_ERR "%s failed to register dapm controls: %d",
 			__func__, ret);
-		kfree(uda134x);
 		return ret;
 	}
 
@@ -551,36 +509,19 @@
 	default:
 		printk(KERN_ERR "%s unknown codec type: %d",
 			__func__, pd->model);
-		kfree(uda134x);
 		return -EINVAL;
 	}
 
 	if (ret < 0) {
 		printk(KERN_ERR "UDA134X: failed to register controls\n");
-		kfree(uda134x);
 		return ret;
 	}
 
 	return 0;
 }
 
-/* power down chip */
-static int uda134x_soc_remove(struct snd_soc_codec *codec)
-{
-	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
-
-	kfree(uda134x);
-	return 0;
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
 	.probe =        uda134x_soc_probe,
-	.remove =       uda134x_soc_remove,
-	.reg_cache_size = sizeof(uda134x_reg),
-	.reg_word_size = sizeof(u8),
-	.reg_cache_default = uda134x_reg,
-	.reg_cache_step = 1,
-	.read = uda134x_read_reg_cache,
 	.set_bias_level = uda134x_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -590,8 +531,39 @@
 	.num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
 };
 
+static const struct regmap_config uda134x_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = UDA134X_DATA1,
+	.reg_defaults = uda134x_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(uda134x_reg_defaults),
+	.cache_type = REGCACHE_RBTREE,
+
+	.reg_write = uda134x_regmap_write,
+};
+
 static int uda134x_codec_probe(struct platform_device *pdev)
 {
+	struct uda134x_platform_data *pd = pdev->dev.platform_data;
+	struct uda134x_priv *uda134x;
+
+	if (!pd) {
+		dev_err(&pdev->dev, "Missing L3 bitbang function\n");
+		return -ENODEV;
+	}
+
+	uda134x = devm_kzalloc(&pdev->dev, sizeof(*uda134x), GFP_KERNEL);
+	if (!uda134x)
+		return -ENOMEM;
+
+	uda134x->pd = pd;
+	platform_set_drvdata(pdev, uda134x);
+
+	uda134x->regmap = devm_regmap_init(&pdev->dev, NULL, pd,
+		&uda134x_regmap_config);
+	if (IS_ERR(uda134x->regmap))
+		return PTR_ERR(uda134x->regmap);
+
 	return snd_soc_register_codec(&pdev->dev,
 			&soc_codec_dev_uda134x, &uda134x_dai, 1);
 }
diff --git a/sound/soc/codecs/uda134x.h b/sound/soc/codecs/uda134x.h
index 9faae06..e41ab38 100644
--- a/sound/soc/codecs/uda134x.h
+++ b/sound/soc/codecs/uda134x.h
@@ -26,8 +26,6 @@
 #define UDA134X_DATA011 13
 #define UDA134X_DATA1   14
 
-#define UDA134X_REGS_NUM 15
-
 #define STATUS0_DAIFMT_MASK (~(7<<1))
 #define STATUS0_SYSCLK_MASK (~(3<<4))
 
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 6e159f5..35f0469 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -269,12 +269,11 @@
  * from -66 dB in 0.5 dB steps (2 dB steps, really) and
  * from -52 dB in 0.25 dB steps
  */
-static const unsigned int mvol_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(mvol_tlv,
 	0, 15, TLV_DB_SCALE_ITEM(-8200, 100, 1),
 	16, 43, TLV_DB_SCALE_ITEM(-6600, 50, 0),
-	44, 252, TLV_DB_SCALE_ITEM(-5200, 25, 0),
-};
+	44, 252, TLV_DB_SCALE_ITEM(-5200, 25, 0)
+);
 
 /*
  * from -72 dB in 1.5 dB steps (6 dB steps really),
@@ -282,13 +281,12 @@
  * from -60 dB in 0.5 dB steps (2 dB steps really) and
  * from -46 dB in 0.25 dB steps
  */
-static const unsigned int vc_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(vc_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7800, 150, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-6600, 75, 0),
 	16, 43, TLV_DB_SCALE_ITEM(-6000, 50, 0),
-	44, 228, TLV_DB_SCALE_ITEM(-4600, 25, 0),
-};
+	44, 228, TLV_DB_SCALE_ITEM(-4600, 25, 0)
+);
 
 /* from 0 to 6 dB in 2 dB steps if SPF mode != flat */
 static DECLARE_TLV_DB_SCALE(tr_tlv, 0, 200, 0);
@@ -810,7 +808,6 @@
 static struct i2c_driver uda1380_i2c_driver = {
 	.driver = {
 		.name =  "uda1380-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe =    uda1380_i2c_probe,
 	.remove =   uda1380_i2c_remove,
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 6560a66..f2c6ad4 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -953,7 +953,7 @@
 		trigger = IRQF_TRIGGER_FALLING;
 	trigger |= IRQF_ONESHOT;
 
-	ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger | IRQF_ONESHOT,
+	ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger,
 				   "wm0010", wm0010);
 	if (ret) {
 		dev_err(wm0010->dev, "Failed to request IRQ %d: %d\n",
@@ -1003,7 +1003,6 @@
 static struct spi_driver wm0010_spi_driver = {
 	.driver = {
 		.name	= "wm0010",
-		.bus 	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= wm0010_spi_probe,
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 048f005..ec45c5b 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -251,7 +251,6 @@
 static struct i2c_driver wm1250_ev1_i2c_driver = {
 	.driver = {
 		.name = "wm1250-ev1",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm1250_ev1_probe,
 	.remove =   wm1250_ev1_remove,
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 21d5402..786abd0 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -942,7 +942,6 @@
 static struct i2c_driver wm2000_i2c_driver = {
 	.driver = {
 		.name = "wm2000",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm2000_i2c_probe,
 	.remove = wm2000_i2c_remove,
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index c830832..fd1439e 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -166,7 +166,7 @@
 	{ .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE },
 };
 
-static struct reg_default wm2200_reg_defaults[] = {
+static const struct reg_default wm2200_reg_defaults[] = {
 	{ 0x000B, 0x0000 },   /* R11    - Tone Generator 1 */
 	{ 0x0102, 0x0000 },   /* R258   - Clocking 3 */
 	{ 0x0103, 0x0011 },   /* R259   - Clocking 4 */
@@ -897,7 +897,7 @@
 	}
 }
 
-static const struct reg_default wm2200_reva_patch[] = {
+static const struct reg_sequence wm2200_reva_patch[] = {
 	{ 0x07, 0x0003 },
 	{ 0x102, 0x0200 },
 	{ 0x203, 0x0084 },
@@ -1702,7 +1702,7 @@
 	int *bclk_rates;
 
 	/* Data sizes if not using TDM */
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 	if (wl < 0)
 		return wl;
 	fl = snd_soc_params_to_frame_size(params);
@@ -2481,7 +2481,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm2200_pm = {
+static const struct dev_pm_ops wm2200_pm = {
 	SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume,
 			   NULL)
 };
@@ -2495,7 +2495,6 @@
 static struct i2c_driver wm2200_i2c_driver = {
 	.driver = {
 		.name = "wm2200",
-		.owner = THIS_MODULE,
 		.pm = &wm2200_pm,
 	},
 	.probe =    wm2200_i2c_probe,
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 4c10cd8..c2cdcae 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1247,7 +1247,7 @@
 	{ "PWM2", NULL, "PWM2 Driver" },
 };
 
-static const struct reg_default wm5100_reva_patches[] = {
+static const struct reg_sequence wm5100_reva_patches[] = {
 	{ WM5100_AUDIO_IF_1_10, 0 },
 	{ WM5100_AUDIO_IF_1_11, 1 },
 	{ WM5100_AUDIO_IF_1_12, 2 },
@@ -1408,7 +1408,7 @@
 	base = dai->driver->base;
 
 	/* Data sizes if not using TDM */
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 	if (wl < 0)
 		return wl;
 	fl = snd_soc_params_to_frame_size(params);
@@ -2570,13 +2570,11 @@
 
 		if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 			ret = request_threaded_irq(i2c->irq, NULL,
-						   wm5100_edge_irq,
-						   irq_flags | IRQF_ONESHOT,
+						   wm5100_edge_irq, irq_flags,
 						   "wm5100", wm5100);
 		else
 			ret = request_threaded_irq(i2c->irq, NULL, wm5100_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm5100",
+						   irq_flags, "wm5100",
 						   wm5100);
 
 		if (ret != 0) {
@@ -2708,7 +2706,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm5100_pm = {
+static const struct dev_pm_ops wm5100_pm = {
 	SET_RUNTIME_PM_OPS(wm5100_runtime_suspend, wm5100_runtime_resume,
 			   NULL)
 };
@@ -2722,7 +2720,6 @@
 static struct i2c_driver wm5100_i2c_driver = {
 	.driver = {
 		.name = "wm5100",
-		.owner = THIS_MODULE,
 		.pm = &wm5100_pm,
 	},
 	.probe =    wm5100_i2c_probe,
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index d097f09e5..64637d1 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -788,8 +788,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -801,8 +800,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -814,8 +812,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -827,8 +824,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -851,10 +847,10 @@
 ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE),
@@ -1883,7 +1879,7 @@
 	ret = snd_soc_add_codec_controls(codec,
 					 arizona_adsp2_rate_controls, 1);
 	if (ret)
-		return ret;
+		goto err_adsp2_codec_probe;
 
 	arizona_init_spk(codec);
 	arizona_init_gpio(codec);
@@ -1893,6 +1889,11 @@
 	priv->core.arizona->dapm = dapm;
 
 	return 0;
+
+err_adsp2_codec_probe:
+	wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
+
+	return ret;
 }
 
 static int wm5102_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 709fcc6..9756578 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -131,6 +131,25 @@
 	{ 0x33fb, 0xfe00 },
 };
 
+static const struct reg_default wm5110_sysclk_reve_patch[] = {
+	{ 0x3270, 0xE410 },
+	{ 0x3271, 0x3078 },
+	{ 0x3272, 0xE410 },
+	{ 0x3273, 0x3070 },
+	{ 0x3274, 0xE410 },
+	{ 0x3275, 0x3066 },
+	{ 0x3276, 0xE410 },
+	{ 0x3277, 0x3056 },
+	{ 0x327A, 0xE414 },
+	{ 0x327B, 0x3078 },
+	{ 0x327C, 0xE414 },
+	{ 0x327D, 0x3070 },
+	{ 0x327E, 0xE414 },
+	{ 0x327F, 0x3066 },
+	{ 0x3280, 0xE414 },
+	{ 0x3281, 0x3056 },
+};
+
 static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
 			    struct snd_kcontrol *kcontrol, int event)
 {
@@ -146,7 +165,9 @@
 		patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch);
 		break;
 	default:
-		return 0;
+		patch = wm5110_sysclk_reve_patch;
+		patch_size = ARRAY_SIZE(wm5110_sysclk_reve_patch);
+		break;
 	}
 
 	switch (event) {
@@ -164,6 +185,249 @@
 	return 0;
 }
 
+static const struct reg_sequence wm5110_no_dre_left_enable[] = {
+	{ 0x3024, 0xE410 },
+	{ 0x3025, 0x0056 },
+	{ 0x301B, 0x0224 },
+	{ 0x301F, 0x4263 },
+	{ 0x3021, 0x5291 },
+	{ 0x3030, 0xE410 },
+	{ 0x3031, 0x3066 },
+	{ 0x3032, 0xE410 },
+	{ 0x3033, 0x3070 },
+	{ 0x3034, 0xE410 },
+	{ 0x3035, 0x3078 },
+	{ 0x3036, 0xE410 },
+	{ 0x3037, 0x3080 },
+	{ 0x3038, 0xE410 },
+	{ 0x3039, 0x3080 },
+};
+
+static const struct reg_sequence wm5110_dre_left_enable[] = {
+	{ 0x3024, 0x0231 },
+	{ 0x3025, 0x0B00 },
+	{ 0x301B, 0x0227 },
+	{ 0x301F, 0x4266 },
+	{ 0x3021, 0x5294 },
+	{ 0x3030, 0xE231 },
+	{ 0x3031, 0x0266 },
+	{ 0x3032, 0x8231 },
+	{ 0x3033, 0x4B15 },
+	{ 0x3034, 0x8231 },
+	{ 0x3035, 0x0B15 },
+	{ 0x3036, 0xE231 },
+	{ 0x3037, 0x5294 },
+	{ 0x3038, 0x0231 },
+	{ 0x3039, 0x0B00 },
+};
+
+static const struct reg_sequence wm5110_no_dre_right_enable[] = {
+	{ 0x3074, 0xE414 },
+	{ 0x3075, 0x0056 },
+	{ 0x306B, 0x0224 },
+	{ 0x306F, 0x4263 },
+	{ 0x3071, 0x5291 },
+	{ 0x3080, 0xE414 },
+	{ 0x3081, 0x3066 },
+	{ 0x3082, 0xE414 },
+	{ 0x3083, 0x3070 },
+	{ 0x3084, 0xE414 },
+	{ 0x3085, 0x3078 },
+	{ 0x3086, 0xE414 },
+	{ 0x3087, 0x3080 },
+	{ 0x3088, 0xE414 },
+	{ 0x3089, 0x3080 },
+};
+
+static const struct reg_sequence wm5110_dre_right_enable[] = {
+	{ 0x3074, 0x0231 },
+	{ 0x3075, 0x0B00 },
+	{ 0x306B, 0x0227 },
+	{ 0x306F, 0x4266 },
+	{ 0x3071, 0x5294 },
+	{ 0x3080, 0xE231 },
+	{ 0x3081, 0x0266 },
+	{ 0x3082, 0x8231 },
+	{ 0x3083, 0x4B17 },
+	{ 0x3084, 0x8231 },
+	{ 0x3085, 0x0B17 },
+	{ 0x3086, 0xE231 },
+	{ 0x3087, 0x5294 },
+	{ 0x3088, 0x0231 },
+	{ 0x3089, 0x0B00 },
+};
+
+static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->arizona;
+	unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE);
+	const struct reg_sequence *wseq;
+	int nregs;
+
+	switch (w->shift) {
+	case ARIZONA_OUT1L_ENA_SHIFT:
+		if (val & ARIZONA_DRE1L_ENA_MASK) {
+			wseq = wm5110_dre_left_enable;
+			nregs = ARRAY_SIZE(wm5110_dre_left_enable);
+		} else {
+			wseq = wm5110_no_dre_left_enable;
+			nregs = ARRAY_SIZE(wm5110_no_dre_left_enable);
+			priv->out_up_delay += 10;
+		}
+		break;
+	case ARIZONA_OUT1R_ENA_SHIFT:
+		if (val & ARIZONA_DRE1R_ENA_MASK) {
+			wseq = wm5110_dre_right_enable;
+			nregs = ARRAY_SIZE(wm5110_dre_right_enable);
+		} else {
+			wseq = wm5110_no_dre_right_enable;
+			nregs = ARRAY_SIZE(wm5110_no_dre_right_enable);
+			priv->out_up_delay += 10;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	return regmap_multi_reg_write(arizona->regmap, wseq, nregs);
+}
+
+static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE);
+
+	switch (w->shift) {
+	case ARIZONA_OUT1L_ENA_SHIFT:
+		if (!(val & ARIZONA_DRE1L_ENA_MASK)) {
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG1, ARIZONA_WS_TRG1);
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG1, 0);
+			priv->out_down_delay += 27;
+		}
+		break;
+	case ARIZONA_OUT1R_ENA_SHIFT:
+		if (!(val & ARIZONA_DRE1R_ENA_MASK)) {
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG2, ARIZONA_WS_TRG2);
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG2, 0);
+			priv->out_down_delay += 27;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int wm5110_hp_ev(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (priv->arizona->rev) {
+	case 0 ... 3:
+		break;
+	default:
+		switch (event) {
+		case SND_SOC_DAPM_PRE_PMU:
+			wm5110_hp_pre_enable(w);
+			break;
+		case SND_SOC_DAPM_PRE_PMD:
+			wm5110_hp_pre_disable(w);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	return arizona_hp_ev(w, kcontrol, event);
+}
+
+static int wm5110_clear_pga_volume(struct arizona *arizona, int output)
+{
+	struct reg_sequence clear_pga = {
+		ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80
+	};
+	int ret;
+
+	ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1);
+	if (ret)
+		dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n",
+			clear_pga.reg, ret);
+
+	return ret;
+}
+
+static int wm5110_put_dre(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int ena, dre;
+	unsigned int mask = (0x1 << mc->shift) | (0x1 << mc->rshift);
+	unsigned int lnew = (!!ucontrol->value.integer.value[0]) << mc->shift;
+	unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift;
+	unsigned int lold, rold;
+	unsigned int lena, rena;
+	int ret;
+
+	snd_soc_dapm_mutex_lock(dapm);
+
+	ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &ena);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to read output state: %d\n", ret);
+		goto err;
+	}
+	ret = regmap_read(arizona->regmap, ARIZONA_DRE_ENABLE, &dre);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to read DRE state: %d\n", ret);
+		goto err;
+	}
+
+	lold = dre & (1 << mc->shift);
+	rold = dre & (1 << mc->rshift);
+	/* Enables are channel wise swapped from the DRE enables */
+	lena = ena & (1 << mc->rshift);
+	rena = ena & (1 << mc->shift);
+
+	if ((lena && lnew != lold) || (rena && rnew != rold)) {
+		dev_err(arizona->dev, "Can't change DRE on active outputs\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE,
+				 mask, lnew | rnew);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to set DRE: %d\n", ret);
+		goto err;
+	}
+
+	/* Force reset of PGA volumes, if turning DRE off */
+	if (!lnew && lold)
+		wm5110_clear_pga_volume(arizona, mc->shift);
+
+	if (!rnew && rold)
+		wm5110_clear_pga_volume(arizona, mc->rshift);
+
+err:
+	snd_soc_dapm_mutex_unlock(dapm);
+
+	return ret;
+}
+
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
@@ -247,8 +511,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -260,8 +523,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -273,8 +535,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -286,8 +547,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -314,10 +574,10 @@
 ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
 SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
@@ -409,12 +669,15 @@
 SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
 	   ARIZONA_SPK2R_MUTE_SHIFT, 1, 1),
 
-SOC_DOUBLE("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0),
-SOC_DOUBLE("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0),
-SOC_DOUBLE("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0),
+SOC_DOUBLE_EXT("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
+SOC_DOUBLE_EXT("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
+SOC_DOUBLE_EXT("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
 
 SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
 SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
@@ -904,11 +1167,11 @@
 		    ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
 
 SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
-		   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+		   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev,
 		   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
 		   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
-		   ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+		   ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev,
 		   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
 		   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
@@ -1611,18 +1874,24 @@
 	for (i = 0; i < WM5110_NUM_ADSP; ++i) {
 		ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
 		if (ret)
-			return ret;
+			goto err_adsp2_codec_probe;
 	}
 
 	ret = snd_soc_add_codec_controls(codec,
 					 arizona_adsp2_rate_controls,
 					 WM5110_NUM_ADSP);
 	if (ret)
-		return ret;
+		goto err_adsp2_codec_probe;
 
 	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
 
 	return 0;
+
+err_adsp2_codec_probe:
+	for (--i; i >= 0; --i)
+		wm_adsp2_codec_remove(&priv->core.adsp[i], codec);
+
+	return ret;
 }
 
 static int wm5110_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 41c62c1..ffbf3df 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -394,11 +394,10 @@
 static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
 static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
 
-static const unsigned int capture_sd_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(capture_sd_tlv,
 	0, 12, TLV_DB_SCALE_ITEM(-3600, 300, 1),
-	13, 15, TLV_DB_SCALE_ITEM(0, 0, 0),
-};
+	13, 15, TLV_DB_SCALE_ITEM(0, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm8350_snd_controls[] = {
 	SOC_ENUM("Playback Deemphasis", wm8350_enum[0]),
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index d755508..b1d346a 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -370,10 +370,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
-};
+static const DECLARE_TLV_DB_SCALE(in_mix_tlv, -1200, 600, 0);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8400_dapm_lin12_pga_controls[] = {
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index dac5beb..b098a83 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -598,6 +598,7 @@
 	{ .compatible = "wlf,wm8510" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8510_of_match);
 
 static const struct regmap_config wm8510_regmap = {
 	.reg_bits = 7,
@@ -690,7 +691,6 @@
 static struct i2c_driver wm8510_i2c_driver = {
 	.driver = {
 		.name = "wm8510",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8510_of_match,
 	},
 	.probe =    wm8510_i2c_probe,
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 43ea8ae..aa287a3 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -430,6 +430,7 @@
 	{ .compatible = "wlf,wm8523" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8523_of_match);
 
 static const struct regmap_config wm8523_regmap = {
 	.reg_bits = 8,
@@ -534,7 +535,6 @@
 static struct i2c_driver wm8523_i2c_driver = {
 	.driver = {
 		.name = "wm8523",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8523_of_match,
 	},
 	.probe =    wm8523_i2c_probe,
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 759a792..66602bf 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -916,6 +916,7 @@
 	{ .compatible = "wlf,wm8580" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8580_of_match);
 
 static const struct regmap_config wm8580_regmap = {
 	.reg_bits = 7,
@@ -978,7 +979,6 @@
 static struct i2c_driver wm8580_i2c_driver = {
 	.driver = {
 		.name = "wm8580",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8580_of_match,
 	},
 	.probe =    wm8580_i2c_probe,
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index cc8251f..44b9e0a 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -478,7 +478,6 @@
 static struct i2c_driver wm8711_i2c_driver = {
 	.driver = {
 		.name = "wm8711",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8711_of_match,
 	},
 	.probe =    wm8711_i2c_probe,
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index f1a173e..cd7b024 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -319,7 +319,6 @@
 static struct i2c_driver wm8728_i2c_driver = {
 	.driver = {
 		.name = "wm8728",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8728_of_match,
 	},
 	.probe =    wm8728_i2c_probe,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 915ea11..ace8645 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -79,12 +79,7 @@
 	return reg == WM8731_RESET;
 }
 
-static bool wm8731_writeable(struct device *dev, unsigned int reg)
-{
-	return reg <= WM8731_RESET;
-}
-
-#define wm8731_reset(c)	snd_soc_write(c, WM8731_RESET, 0)
+#define wm8731_reset(m)	regmap_write(m, WM8731_RESET, 0)
 
 static const char *wm8731_input_select[] = {"Line In", "Mic"};
 
@@ -496,8 +491,11 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		if (wm8731->mclk)
-			clk_prepare_enable(wm8731->mclk);
+		if (wm8731->mclk) {
+			ret = clk_prepare_enable(wm8731->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 	case SND_SOC_BIAS_PREPARE:
 		break;
@@ -571,69 +569,63 @@
 	.symmetric_rates = 1,
 };
 
-static int wm8731_probe(struct snd_soc_codec *codec)
+static int wm8731_request_supplies(struct device *dev,
+		struct wm8731_priv *wm8731)
 {
-	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
 	int ret = 0, i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8731->supplies); i++)
 		wm8731->supplies[i].supply = wm8731_supply_names[i];
 
-	ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8731->supplies),
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(wm8731->supplies),
 				 wm8731->supplies);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		dev_err(dev, "Failed to request supplies: %d\n", ret);
 		return ret;
 	}
 
 	ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
 				    wm8731->supplies);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		dev_err(dev, "Failed to enable supplies: %d\n", ret);
 		return ret;
 	}
 
-	ret = wm8731_reset(codec);
+	return 0;
+}
+
+static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
+{
+	int ret = 0;
+
+	ret = wm8731_reset(wm8731->regmap);
 	if (ret < 0) {
-		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		dev_err(dev, "Failed to issue reset: %d\n", ret);
 		goto err_regulator_enable;
 	}
 
-	snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	/* Clear POWEROFF, keep everything else disabled */
+	regmap_write(wm8731->regmap, WM8731_PWR, 0x7f);
 
 	/* Latch the update bits */
-	snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_LOUT1V, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_ROUT1V, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_LINVOL, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_RINVOL, 0x100, 0);
 
 	/* Disable bypass path by default */
-	snd_soc_update_bits(codec, WM8731_APANA, 0x8, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_APANA, 0x8, 0);
 
-	/* Regulators will have been enabled by bias management */
-	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
-	return 0;
+	regcache_mark_dirty(wm8731->regmap);
 
 err_regulator_enable:
+	/* Regulators will be enabled by bias management */
 	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
 
 	return ret;
 }
 
-/* power down chip */
-static int wm8731_remove(struct snd_soc_codec *codec)
-{
-	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
-
-	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
-	return 0;
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
-	.probe =	wm8731_probe,
-	.remove =	wm8731_remove,
 	.set_bias_level = wm8731_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -658,7 +650,6 @@
 
 	.max_register = WM8731_RESET,
 	.volatile_reg = wm8731_volatile,
-	.writeable_reg = wm8731_writeable,
 
 	.cache_type = REGCACHE_RBTREE,
 	.reg_defaults = wm8731_reg_defaults,
@@ -690,6 +681,12 @@
 
 	mutex_init(&wm8731->lock);
 
+	spi_set_drvdata(spi, wm8731);
+
+	ret = wm8731_request_supplies(&spi->dev, wm8731);
+	if (ret != 0)
+		return ret;
+
 	wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
 	if (IS_ERR(wm8731->regmap)) {
 		ret = PTR_ERR(wm8731->regmap);
@@ -698,7 +695,9 @@
 		return ret;
 	}
 
-	spi_set_drvdata(spi, wm8731);
+	ret = wm8731_hw_init(&spi->dev, wm8731);
+	if (ret != 0)
+		return ret;
 
 	ret = snd_soc_register_codec(&spi->dev,
 			&soc_codec_dev_wm8731, &wm8731_dai, 1);
@@ -754,6 +753,12 @@
 
 	mutex_init(&wm8731->lock);
 
+	i2c_set_clientdata(i2c, wm8731);
+
+	ret = wm8731_request_supplies(&i2c->dev, wm8731);
+	if (ret != 0)
+		return ret;
+
 	wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
 	if (IS_ERR(wm8731->regmap)) {
 		ret = PTR_ERR(wm8731->regmap);
@@ -762,7 +767,9 @@
 		return ret;
 	}
 
-	i2c_set_clientdata(i2c, wm8731);
+	ret = wm8731_hw_init(&i2c->dev, wm8731);
+	if (ret != 0)
+		return ret;
 
 	ret = snd_soc_register_codec(&i2c->dev,
 			&soc_codec_dev_wm8731, &wm8731_dai, 1);
@@ -789,7 +796,6 @@
 static struct i2c_driver wm8731_i2c_driver = {
 	.driver = {
 		.name = "wm8731",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8731_of_match,
 	},
 	.probe =    wm8731_i2c_probe,
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index 6ad606f..e4a03d9 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -79,13 +79,12 @@
 	return snd_soc_write(codec, WM8737_RESET, 0);
 }
 
-static const unsigned int micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(micboost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
@@ -657,7 +656,6 @@
 static struct i2c_driver wm8737_i2c_driver = {
 	.driver = {
 		.name = "wm8737",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8737_of_match,
 	},
 	.probe =    wm8737_i2c_probe,
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index b346237..de42c03 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -61,25 +61,6 @@
 	{ 32, 0x0002 },     /* R32 - ADDITONAL_CONTROL_1 */
 };
 
-static bool wm8741_readable(struct device *dev, unsigned int reg)
-{
-	switch (reg) {
-	case WM8741_DACLLSB_ATTENUATION:
-	case WM8741_DACLMSB_ATTENUATION:
-	case WM8741_DACRLSB_ATTENUATION:
-	case WM8741_DACRMSB_ATTENUATION:
-	case WM8741_VOLUME_CONTROL:
-	case WM8741_FORMAT_CONTROL:
-	case WM8741_FILTER_CONTROL:
-	case WM8741_MODE_CONTROL_1:
-	case WM8741_MODE_CONTROL_2:
-	case WM8741_ADDITIONAL_CONTROL_1:
-		return true;
-	default:
-		return false;
-	}
-}
-
 static int wm8741_reset(struct snd_soc_codec *codec)
 {
 	return snd_soc_write(codec, WM8741_RESET, 0);
@@ -278,51 +259,38 @@
 	switch (freq) {
 	case 0:
 		wm8741->sysclk_constraints = NULL;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 11289600:
 		wm8741->sysclk_constraints = &constraints_11289;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 12288000:
 		wm8741->sysclk_constraints = &constraints_12288;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 16384000:
 		wm8741->sysclk_constraints = &constraints_16384;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 16934400:
 		wm8741->sysclk_constraints = &constraints_16934;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 18432000:
 		wm8741->sysclk_constraints = &constraints_18432;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 22579200:
 	case 33868800:
 		wm8741->sysclk_constraints = &constraints_22579;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 24576000:
 		wm8741->sysclk_constraints = &constraints_24576;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 36864000:
 		wm8741->sysclk_constraints = &constraints_36864;
-		wm8741->sysclk = freq;
-		return 0;
+		break;
+	default:
+		return -EINVAL;
 	}
-	return -EINVAL;
+
+	wm8741->sysclk = freq;
+	return 0;
 }
 
 static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
@@ -554,8 +522,6 @@
 	.reg_defaults = wm8741_reg_defaults,
 	.num_reg_defaults = ARRAY_SIZE(wm8741_reg_defaults),
 	.cache_type = REGCACHE_RBTREE,
-
-	.readable_reg = wm8741_readable,
 };
 
 static int wm8741_set_pdata(struct device *dev, struct wm8741_priv *wm8741)
@@ -633,7 +599,6 @@
 static struct i2c_driver wm8741_i2c_driver = {
 	.driver = {
 		.name = "wm8741",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8741_of_match,
 	},
 	.probe =    wm8741_i2c_probe,
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 56d89b0..873933a 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -826,7 +826,6 @@
 static struct i2c_driver wm8750_i2c_driver = {
 	.driver = {
 		.name = "wm8750",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8750_of_match,
 	},
 	.probe =    wm8750_i2c_probe,
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index feb2997a..a801c6d 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -138,11 +138,6 @@
 	return reg == WM8753_RESET;
 }
 
-static bool wm8753_writeable(struct device *dev, unsigned int reg)
-{
-	return reg <= WM8753_ADCTL2;
-}
-
 /* codec private data */
 struct wm8753_priv {
 	struct regmap *regmap;
@@ -276,12 +271,11 @@
 static const DECLARE_TLV_DB_SCALE(mic_preamp_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
-static const unsigned int out_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(out_tlv,
 	/* 0000000 - 0101111 = "Analogue mute" */
 	0, 48, TLV_DB_SCALE_ITEM(-25500, 0, 0),
-	48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0),
-};
+	48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0)
+);
 static const DECLARE_TLV_DB_SCALE(mix_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(voice_mix_tlv, -1200, 300, 0);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0);
@@ -1510,7 +1504,6 @@
 	.val_bits = 9,
 
 	.max_register = WM8753_ADCTL2,
-	.writeable_reg = wm8753_writeable,
 	.volatile_reg = wm8753_volatile,
 
 	.cache_type = REGCACHE_RBTREE,
@@ -1609,7 +1602,6 @@
 static struct i2c_driver wm8753_i2c_driver = {
 	.driver = {
 		.name = "wm8753",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8753_of_match,
 	},
 	.probe =    wm8753_i2c_probe,
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index ece9b44..183c9a4 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -265,7 +265,7 @@
 	}
 
 	/* Set word length */
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		iface = 0;
 		break;
@@ -280,7 +280,7 @@
 		break;
 	default:
 		dev_err(codec->dev, "Unsupported sample size: %i\n",
-			snd_pcm_format_width(params_format(params)));
+			params_width(params));
 		return -EINVAL;
 	}
 
@@ -536,7 +536,6 @@
 static struct i2c_driver wm8776_i2c_driver = {
 	.driver = {
 		.name = "wm8776",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8776_of_match,
 	},
 	.probe =    wm8776_i2c_probe,
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index 6596f5f..f27464c 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -49,7 +49,6 @@
 static struct i2c_driver wm8804_i2c_driver = {
 	.driver = {
 		.name = "wm8804",
-		.owner = THIS_MODULE,
 		.pm = &wm8804_pm,
 		.of_match_table = wm8804_of_match,
 	},
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index c195c2e..8d91470 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -98,7 +98,7 @@
 WM8804_REGULATOR_EVENT(1)
 
 static const char *txsrc_text[] = { "S/PDIF RX", "AIF" };
-static const SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
+static SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
 
 static const struct snd_kcontrol_new wm8804_tx_source_mux[] = {
 	SOC_DAPM_ENUM_EXT("Input Source", txsrc,
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index f3759ec..98900aa 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1312,7 +1312,6 @@
 static struct i2c_driver wm8900_i2c_driver = {
 	.driver = {
 		.name = "wm8900",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8900_i2c_probe,
 	.remove =   wm8900_i2c_remove,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index b5322c1..b011253 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -2193,7 +2193,6 @@
 static struct i2c_driver wm8903_i2c_driver = {
 	.driver = {
 		.name = "wm8903",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8903_of_match,
 	},
 	.probe =    wm8903_i2c_probe,
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 265a4a5..b783743 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1837,7 +1837,9 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		clk_prepare_enable(wm8904->mclk);
+		ret = clk_prepare_enable(wm8904->mclk);
+		if (ret)
+			return ret;
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
@@ -2292,7 +2294,6 @@
 static struct i2c_driver wm8904_i2c_driver = {
 	.driver = {
 		.name = "wm8904",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(wm8904_of_match),
 	},
 	.probe =    wm8904_i2c_probe,
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 98ef0ba..f6f9395 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -787,7 +787,6 @@
 static struct i2c_driver wm8940_i2c_driver = {
 	.driver = {
 		.name = "wm8940",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8940_i2c_probe,
 	.remove =   wm8940_i2c_remove,
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2d591c2..12e4435 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -1009,7 +1009,6 @@
 static struct i2c_driver wm8955_i2c_driver = {
 	.driver = {
 		.name = "wm8955",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8955_i2c_probe,
 	.remove =   wm8955_i2c_remove,
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 94c5c46..e3b7d0c 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -48,6 +48,9 @@
 #define WM8960_DISOP     0x40
 #define WM8960_DRES_MASK 0x30
 
+static bool is_pll_freq_available(unsigned int source, unsigned int target);
+static int wm8960_set_pll(struct snd_soc_codec *codec,
+		unsigned int freq_in, unsigned int freq_out);
 /*
  * wm8960 register cache
  * We can't read the WM8960 register space when we are
@@ -126,9 +129,12 @@
 	struct snd_soc_dapm_widget *rout1;
 	struct snd_soc_dapm_widget *out3;
 	bool deemph;
-	int playback_fs;
+	int lrclk;
 	int bclk;
 	int sysclk;
+	int clk_id;
+	int freq_in;
+	bool is_stream_in_use[2];
 	struct wm8960_data pdata;
 };
 
@@ -164,8 +170,8 @@
 	if (wm8960->deemph) {
 		best = 1;
 		for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
-			if (abs(deemph_settings[i] - wm8960->playback_fs) <
-			    abs(deemph_settings[best] - wm8960->playback_fs))
+			if (abs(deemph_settings[i] - wm8960->lrclk) <
+			    abs(deemph_settings[best] - wm8960->lrclk))
 				best = i;
 		}
 
@@ -565,6 +571,9 @@
 	{  8000, 5 },
 };
 
+/* -1 for reserved value */
+static const int sysclk_divs[] = { 1, -1, 2, -1 };
+
 /* Multiply 256 for internal 256 div */
 static const int dac_divs[] = { 256, 384, 512, 768, 1024, 1408, 1536 };
 
@@ -574,61 +583,110 @@
 	120, 160, 220, 240, 320, 320, 320
 };
 
-static void wm8960_configure_clocking(struct snd_soc_codec *codec,
-		bool tx, int lrclk)
+static int wm8960_configure_clocking(struct snd_soc_codec *codec)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	int sysclk, bclk, lrclk, freq_out, freq_in;
 	u16 iface1 = snd_soc_read(codec, WM8960_IFACE1);
-	u16 iface2 = snd_soc_read(codec, WM8960_IFACE2);
-	u32 sysclk;
-	int i, j;
+	int i, j, k;
 
 	if (!(iface1 & (1<<6))) {
 		dev_dbg(codec->dev,
 			"Codec is slave mode, no need to configure clock\n");
-		return;
+		return 0;
 	}
 
-	if (!wm8960->sysclk) {
-		dev_dbg(codec->dev, "No SYSCLK configured\n");
-		return;
+	if (wm8960->clk_id != WM8960_SYSCLK_MCLK && !wm8960->freq_in) {
+		dev_err(codec->dev, "No MCLK configured\n");
+		return -EINVAL;
 	}
 
-	if (!wm8960->bclk || !lrclk) {
-		dev_dbg(codec->dev, "No audio clocks configured\n");
-		return;
+	freq_in = wm8960->freq_in;
+	bclk = wm8960->bclk;
+	lrclk = wm8960->lrclk;
+	/*
+	 * If it's sysclk auto mode, check if the MCLK can provide sysclk or
+	 * not. If MCLK can provide sysclk, using MCLK to provide sysclk
+	 * directly. Otherwise, auto select a available pll out frequency
+	 * and set PLL.
+	 */
+	if (wm8960->clk_id == WM8960_SYSCLK_AUTO) {
+		/* disable the PLL and using MCLK to provide sysclk */
+		wm8960_set_pll(codec, 0, 0);
+		freq_out = freq_in;
+	} else if (wm8960->sysclk) {
+		freq_out = wm8960->sysclk;
+	} else {
+		dev_err(codec->dev, "No SYSCLK configured\n");
+		return -EINVAL;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(dac_divs); ++i) {
-		if (wm8960->sysclk == lrclk * dac_divs[i]) {
-			for (j = 0; j < ARRAY_SIZE(bclk_divs); ++j) {
-				sysclk = wm8960->bclk * bclk_divs[j] / 10;
-				if (wm8960->sysclk == sysclk)
+	/* check if the sysclk frequency is available. */
+	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+		if (sysclk_divs[i] == -1)
+			continue;
+		sysclk = freq_out / sysclk_divs[i];
+		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+			if (sysclk == dac_divs[j] * lrclk) {
+				for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
+					if (sysclk == bclk * bclk_divs[k] / 10)
+						break;
+				if (k != ARRAY_SIZE(bclk_divs))
 					break;
 			}
-			if(j != ARRAY_SIZE(bclk_divs))
+		}
+		if (j != ARRAY_SIZE(dac_divs))
+			break;
+	}
+
+	if (i != ARRAY_SIZE(sysclk_divs)) {
+		goto configure_clock;
+	} else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
+		dev_err(codec->dev, "failed to configure clock\n");
+		return -EINVAL;
+	}
+	/* get a available pll out frequency and set pll */
+	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+		if (sysclk_divs[i] == -1)
+			continue;
+		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+			sysclk = lrclk * dac_divs[j];
+			freq_out = sysclk * sysclk_divs[i];
+
+			for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) {
+				if (sysclk == bclk * bclk_divs[k] / 10 &&
+				    is_pll_freq_available(freq_in, freq_out)) {
+					wm8960_set_pll(codec,
+						       freq_in, freq_out);
+					break;
+				} else {
+					continue;
+				}
+			}
+			if (k != ARRAY_SIZE(bclk_divs))
 				break;
 		}
+		if (j != ARRAY_SIZE(dac_divs))
+			break;
 	}
 
-	if (i == ARRAY_SIZE(dac_divs)) {
-		dev_err(codec->dev, "Unsupported sysclk %d\n", wm8960->sysclk);
-		return;
+	if (i == ARRAY_SIZE(sysclk_divs)) {
+		dev_err(codec->dev, "failed to configure clock\n");
+		return -EINVAL;
 	}
 
-	/*
-	 * configure frame clock. If ADCLRC configure as GPIO pin, DACLRC
-	 * pin is used as a frame clock for ADCs and DACs.
-	 */
-	if (iface2 & (1<<6))
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, i << 3);
-	else if (tx)
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, i << 3);
-	else if (!tx)
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 6, i << 6);
+configure_clock:
+	/* configure sysclk clock */
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 3 << 1, i << 1);
+
+	/* configure frame clock */
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, j << 3);
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 6, j << 6);
 
 	/* configure bit clock */
-	snd_soc_update_bits(codec, WM8960_CLOCK2, 0xf, j);
+	snd_soc_update_bits(codec, WM8960_CLOCK2, 0xf, k);
+
+	return 0;
 }
 
 static int wm8960_hw_params(struct snd_pcm_substream *substream,
@@ -667,9 +725,9 @@
 		return -EINVAL;
 	}
 
+	wm8960->lrclk = params_rate(params);
 	/* Update filters for the new rate */
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		wm8960->playback_fs = params_rate(params);
+	if (tx) {
 		wm8960_set_deemph(codec);
 	} else {
 		for (i = 0; i < ARRAY_SIZE(alc_rates); i++)
@@ -682,7 +740,23 @@
 	/* set iface */
 	snd_soc_write(codec, WM8960_IFACE1, iface);
 
-	wm8960_configure_clocking(codec, tx, params_rate(params));
+	wm8960->is_stream_in_use[tx] = true;
+
+	if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON &&
+	    !wm8960->is_stream_in_use[!tx])
+		return wm8960_configure_clocking(codec);
+
+	return 0;
+}
+
+static int wm8960_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
+	wm8960->is_stream_in_use[tx] = false;
 
 	return 0;
 }
@@ -702,6 +776,7 @@
 				      enum snd_soc_bias_level level)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	u16 pm2 = snd_soc_read(codec, WM8960_POWER2);
 	int ret;
 
 	switch (level) {
@@ -721,11 +796,22 @@
 				}
 			}
 
+			ret = wm8960_configure_clocking(codec);
+			if (ret)
+				return ret;
+
 			/* Set VMID to 2x50k */
 			snd_soc_update_bits(codec, WM8960_POWER1, 0x180, 0x80);
 			break;
 
 		case SND_SOC_BIAS_ON:
+			/*
+			 * If it's sysclk auto mode, and the pll is enabled,
+			 * disable the pll
+			 */
+			if (wm8960->clk_id == WM8960_SYSCLK_AUTO && (pm2 & 0x1))
+				wm8960_set_pll(codec, 0, 0);
+
 			if (!IS_ERR(wm8960->mclk))
 				clk_disable_unprepare(wm8960->mclk);
 			break;
@@ -780,6 +866,7 @@
 					 enum snd_soc_bias_level level)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	u16 pm2 = snd_soc_read(codec, WM8960_POWER2);
 	int reg, ret;
 
 	switch (level) {
@@ -831,9 +918,21 @@
 					return ret;
 				}
 			}
+
+			ret = wm8960_configure_clocking(codec);
+			if (ret)
+				return ret;
+
 			break;
 
 		case SND_SOC_BIAS_ON:
+			/*
+			 * If it's sysclk auto mode, and the pll is enabled,
+			 * disable the pll
+			 */
+			if (wm8960->clk_id == WM8960_SYSCLK_AUTO && (pm2 & 0x1))
+				wm8960_set_pll(codec, 0, 0);
+
 			if (!IS_ERR(wm8960->mclk))
 				clk_disable_unprepare(wm8960->mclk);
 
@@ -892,6 +991,28 @@
 	u32 k:24;
 };
 
+static bool is_pll_freq_available(unsigned int source, unsigned int target)
+{
+	unsigned int Ndiv;
+
+	if (source == 0 || target == 0)
+		return false;
+
+	/* Scale up target to PLL operating frequency */
+	target *= 4;
+	Ndiv = target / source;
+
+	if (Ndiv < 6) {
+		source >>= 1;
+		Ndiv = target / source;
+	}
+
+	if ((Ndiv < 6) || (Ndiv > 12))
+		return false;
+
+	return true;
+}
+
 /* The size in bits of the pll divide multiplied by 10
  * to allow rounding later */
 #define FIXED_PLL_SIZE ((1 << 24) * 10)
@@ -943,10 +1064,9 @@
 	return 0;
 }
 
-static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
-		int source, unsigned int freq_in, unsigned int freq_out)
+static int wm8960_set_pll(struct snd_soc_codec *codec,
+		unsigned int freq_in, unsigned int freq_out)
 {
-	struct snd_soc_codec *codec = codec_dai->codec;
 	u16 reg;
 	static struct _pll_div pll_div;
 	int ret;
@@ -986,6 +1106,20 @@
 	return 0;
 }
 
+static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+		int source, unsigned int freq_in, unsigned int freq_out)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+
+	wm8960->freq_in = freq_in;
+
+	if (pll_id == WM8960_SYSCLK_AUTO)
+		return 0;
+
+	return wm8960_set_pll(codec, freq_in, freq_out);
+}
+
 static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
 		int div_id, int div)
 {
@@ -1043,11 +1177,14 @@
 		snd_soc_update_bits(codec, WM8960_CLOCK1,
 					0x1, WM8960_SYSCLK_PLL);
 		break;
+	case WM8960_SYSCLK_AUTO:
+		break;
 	default:
 		return -EINVAL;
 	}
 
 	wm8960->sysclk = freq;
+	wm8960->clk_id = clk_id;
 
 	return 0;
 }
@@ -1060,6 +1197,7 @@
 
 static const struct snd_soc_dai_ops wm8960_dai_ops = {
 	.hw_params = wm8960_hw_params,
+	.hw_free = wm8960_hw_free,
 	.digital_mute = wm8960_mute,
 	.set_fmt = wm8960_set_dai_fmt,
 	.set_clkdiv = wm8960_set_dai_clkdiv,
@@ -1216,7 +1354,6 @@
 static struct i2c_driver wm8960_i2c_driver = {
 	.driver = {
 		.name = "wm8960",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8960_of_match,
 	},
 	.probe =    wm8960_i2c_probe,
diff --git a/sound/soc/codecs/wm8960.h b/sound/soc/codecs/wm8960.h
index 2d8163d..ab3220d 100644
--- a/sound/soc/codecs/wm8960.h
+++ b/sound/soc/codecs/wm8960.h
@@ -82,6 +82,7 @@
 
 #define WM8960_SYSCLK_MCLK		(0 << 0)
 #define WM8960_SYSCLK_PLL		(1 << 0)
+#define WM8960_SYSCLK_AUTO		(2 << 0)
 
 #define WM8960_DAC_DIV_1		(0 << 3)
 #define WM8960_DAC_DIV_1_5		(1 << 3)
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index a057662..e30446a 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -331,13 +331,12 @@
 static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
-static unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0,  0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(29, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
 
 static const struct snd_kcontrol_new wm8961_snd_controls[] = {
@@ -982,7 +981,6 @@
 static struct i2c_driver wm8961_i2c_driver = {
 	.driver = {
 		.name = "wm8961",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8961_i2c_probe,
 	.remove =   wm8961_i2c_remove,
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index c5748fd..b4eb975 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -113,7 +113,7 @@
 WM8962_REGULATOR_EVENT(6)
 WM8962_REGULATOR_EVENT(7)
 
-static struct reg_default wm8962_reg[] = {
+static const struct reg_default wm8962_reg[] = {
 	{ 0, 0x009F },   /* R0     - Left Input volume */
 	{ 1, 0x049F },   /* R1     - Right Input volume */
 	{ 2, 0x0000 },   /* R2     - HPOUTL volume */
@@ -1456,14 +1456,13 @@
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
-static const unsigned int mixinpga_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(mixinpga_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
 	2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
 	3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
 	5, 5, TLV_DB_SCALE_ITEM(2400, 0, 0),
-	6, 7, TLV_DB_SCALE_ITEM(2700, 300, 0),
-};
+	6, 7, TLV_DB_SCALE_ITEM(2700, 300, 0)
+);
 static const DECLARE_TLV_DB_SCALE(beep_tlv, -9600, 600, 1);
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
@@ -1471,11 +1470,10 @@
 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
-static const unsigned int classd_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(classd_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 
 static int wm8962_dsp2_write_config(struct snd_soc_codec *codec)
@@ -3495,7 +3493,7 @@
 };
 
 /* Improve power consumption for IN4 DC measurement mode */
-static const struct reg_default wm8962_dc_measure[] = {
+static const struct reg_sequence wm8962_dc_measure[] = {
 	{ 0xfd, 0x1 },
 	{ 0xcc, 0x40 },
 	{ 0xfd, 0 },
@@ -3859,7 +3857,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm8962_pm = {
+static const struct dev_pm_ops wm8962_pm = {
 	SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL)
 };
 
@@ -3878,7 +3876,6 @@
 static struct i2c_driver wm8962_i2c_driver = {
 	.driver = {
 		.name = "wm8962",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8962_of_match,
 		.pm = &wm8962_pm,
 	},
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index b51184c..2cdde32 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -710,7 +710,6 @@
 static struct i2c_driver wm8971_i2c_driver = {
 	.driver = {
 		.name = "wm8971",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8971_i2c_probe,
 	.remove =   wm8971_i2c_remove,
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 33b16a7..0a60677 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -634,7 +634,6 @@
 static struct i2c_driver wm8974_i2c_driver = {
 	.driver = {
 		.name = "wm8974",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8974_i2c_probe,
 	.remove =   wm8974_i2c_remove,
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index cfc8cdf..d36d600 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -1072,7 +1072,6 @@
 static struct i2c_driver wm8978_i2c_driver = {
 	.driver = {
 		.name = "wm8978",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8978_i2c_probe,
 	.remove =   wm8978_i2c_remove,
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index 2fdd2c6..f3193fb 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -84,66 +84,6 @@
 	{ 0x3D, 0x0000 },      /* R61 - BIAS CTRL */
 };
 
-static const struct wm8983_reg_access {
-	u16 read; /* Mask of readable bits */
-	u16 write; /* Mask of writable bits */
-} wm8983_access_masks[WM8983_MAX_REGISTER + 1] = {
-	[0x00] = { 0x0000, 0x01FF }, /* R0  - Software Reset */
-	[0x01] = { 0x0000, 0x01FF }, /* R1  - Power management 1 */
-	[0x02] = { 0x0000, 0x01FF }, /* R2  - Power management 2 */
-	[0x03] = { 0x0000, 0x01EF }, /* R3  - Power management 3 */
-	[0x04] = { 0x0000, 0x01FF }, /* R4  - Audio Interface */
-	[0x05] = { 0x0000, 0x003F }, /* R5  - Companding control */
-	[0x06] = { 0x0000, 0x01FD }, /* R6  - Clock Gen control */
-	[0x07] = { 0x0000, 0x000F }, /* R7  - Additional control */
-	[0x08] = { 0x0000, 0x003F }, /* R8  - GPIO Control */
-	[0x09] = { 0x0000, 0x0070 }, /* R9  - Jack Detect Control 1 */
-	[0x0A] = { 0x0000, 0x004F }, /* R10 - DAC Control */
-	[0x0B] = { 0x0000, 0x01FF }, /* R11 - Left DAC digital Vol */
-	[0x0C] = { 0x0000, 0x01FF }, /* R12 - Right DAC digital vol */
-	[0x0D] = { 0x0000, 0x00FF }, /* R13 - Jack Detect Control 2 */
-	[0x0E] = { 0x0000, 0x01FB }, /* R14 - ADC Control */
-	[0x0F] = { 0x0000, 0x01FF }, /* R15 - Left ADC Digital Vol */
-	[0x10] = { 0x0000, 0x01FF }, /* R16 - Right ADC Digital Vol */
-	[0x12] = { 0x0000, 0x017F }, /* R18 - EQ1 - low shelf */
-	[0x13] = { 0x0000, 0x017F }, /* R19 - EQ2 - peak 1 */
-	[0x14] = { 0x0000, 0x017F }, /* R20 - EQ3 - peak 2 */
-	[0x15] = { 0x0000, 0x017F }, /* R21 - EQ4 - peak 3 */
-	[0x16] = { 0x0000, 0x007F }, /* R22 - EQ5 - high shelf */
-	[0x18] = { 0x0000, 0x01FF }, /* R24 - DAC Limiter 1 */
-	[0x19] = { 0x0000, 0x007F }, /* R25 - DAC Limiter 2 */
-	[0x1B] = { 0x0000, 0x01FF }, /* R27 - Notch Filter 1 */
-	[0x1C] = { 0x0000, 0x017F }, /* R28 - Notch Filter 2 */
-	[0x1D] = { 0x0000, 0x017F }, /* R29 - Notch Filter 3 */
-	[0x1E] = { 0x0000, 0x017F }, /* R30 - Notch Filter 4 */
-	[0x20] = { 0x0000, 0x01BF }, /* R32 - ALC control 1 */
-	[0x21] = { 0x0000, 0x00FF }, /* R33 - ALC control 2 */
-	[0x22] = { 0x0000, 0x01FF }, /* R34 - ALC control 3 */
-	[0x23] = { 0x0000, 0x000F }, /* R35 - Noise Gate */
-	[0x24] = { 0x0000, 0x001F }, /* R36 - PLL N */
-	[0x25] = { 0x0000, 0x003F }, /* R37 - PLL K 1 */
-	[0x26] = { 0x0000, 0x01FF }, /* R38 - PLL K 2 */
-	[0x27] = { 0x0000, 0x01FF }, /* R39 - PLL K 3 */
-	[0x29] = { 0x0000, 0x000F }, /* R41 - 3D control */
-	[0x2A] = { 0x0000, 0x01E7 }, /* R42 - OUT4 to ADC */
-	[0x2B] = { 0x0000, 0x01BF }, /* R43 - Beep control */
-	[0x2C] = { 0x0000, 0x0177 }, /* R44 - Input ctrl */
-	[0x2D] = { 0x0000, 0x01FF }, /* R45 - Left INP PGA gain ctrl */
-	[0x2E] = { 0x0000, 0x01FF }, /* R46 - Right INP PGA gain ctrl */
-	[0x2F] = { 0x0000, 0x0177 }, /* R47 - Left ADC BOOST ctrl */
-	[0x30] = { 0x0000, 0x0177 }, /* R48 - Right ADC BOOST ctrl */
-	[0x31] = { 0x0000, 0x007F }, /* R49 - Output ctrl */
-	[0x32] = { 0x0000, 0x01FF }, /* R50 - Left mixer ctrl */
-	[0x33] = { 0x0000, 0x01FF }, /* R51 - Right mixer ctrl */
-	[0x34] = { 0x0000, 0x01FF }, /* R52 - LOUT1 (HP) volume ctrl */
-	[0x35] = { 0x0000, 0x01FF }, /* R53 - ROUT1 (HP) volume ctrl */
-	[0x36] = { 0x0000, 0x01FF }, /* R54 - LOUT2 (SPK) volume ctrl */
-	[0x37] = { 0x0000, 0x01FF }, /* R55 - ROUT2 (SPK) volume ctrl */
-	[0x38] = { 0x0000, 0x004F }, /* R56 - OUT3 mixer ctrl */
-	[0x39] = { 0x0000, 0x00FF }, /* R57 - OUT4 (MONO) mix ctrl */
-	[0x3D] = { 0x0000, 0x0100 }  /* R61 - BIAS CTRL */
-};
-
 /* vol/gain update regs */
 static const int vol_update_regs[] = {
 	WM8983_LEFT_DAC_DIGITAL_VOL,
@@ -605,12 +545,19 @@
 	return 0;
 }
 
-static bool wm8983_readable(struct device *dev, unsigned int reg)
+static bool wm8983_writeable(struct device *dev, unsigned int reg)
 {
-	if (reg > WM8983_MAX_REGISTER)
-		return 0;
-
-	return wm8983_access_masks[reg].read != 0;
+	switch (reg) {
+	case WM8983_SOFTWARE_RESET ... WM8983_RIGHT_ADC_DIGITAL_VOL:
+	case WM8983_EQ1_LOW_SHELF ... WM8983_DAC_LIMITER_2:
+	case WM8983_NOTCH_FILTER_1 ... WM8983_NOTCH_FILTER_4:
+	case WM8983_ALC_CONTROL_1 ... WM8983_PLL_K_3:
+	case WM8983_3D_CONTROL ... WM8983_OUT4_MONO_MIX_CTRL:
+	case WM8983_BIAS_CTRL:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static int wm8983_dac_mute(struct snd_soc_dai *dai, int mute)
@@ -1048,8 +995,9 @@
 	.reg_defaults = wm8983_defaults,
 	.num_reg_defaults = ARRAY_SIZE(wm8983_defaults),
 	.cache_type = REGCACHE_RBTREE,
+	.max_register = WM8983_MAX_REGISTER,
 
-	.readable_reg = wm8983_readable,
+	.writeable_reg = wm8983_writeable,
 };
 
 #if defined(CONFIG_SPI_MASTER)
@@ -1133,7 +1081,6 @@
 static struct i2c_driver wm8983_i2c_driver = {
 	.driver = {
 		.name = "wm8983",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8983_i2c_probe,
 	.remove = wm8983_i2c_remove,
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 8a85f50..9c3c151 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1144,7 +1144,6 @@
 static struct i2c_driver wm8985_i2c_driver = {
 	.driver = {
 		.name = "wm8985",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8985_i2c_probe,
 	.remove = wm8985_i2c_remove,
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index f13a995..c88ce99 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -919,7 +919,6 @@
 static struct i2c_driver wm8988_i2c_driver = {
 	.driver = {
 		.name = "wm8988",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8988_i2c_probe,
 	.remove =   wm8988_i2c_remove,
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 1993fd2..23ecd30 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -418,10 +418,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
-};
+static const DECLARE_TLV_DB_SCALE(in_mix_tlv, -1200, 600, 0);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8990_dapm_lin12_pga_controls[] = {
@@ -1356,7 +1353,6 @@
 static struct i2c_driver wm8990_i2c_driver = {
 	.driver = {
 		.name = "wm8990",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8990_i2c_probe,
 	.remove =   wm8990_i2c_remove,
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 44a6777..c9ee0ac 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -111,45 +111,14 @@
 	}
 }
 
-static const unsigned int rec_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-1500, 600),
-};
-
-static const unsigned int in_pga_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 0x1F, TLV_DB_LINEAR_ITEM(-1650, 3000),
-};
-
-static const unsigned int out_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(0, -2100),
-};
-
-static const unsigned int out_pga_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 127, TLV_DB_LINEAR_ITEM(-7300, 600),
-};
-
-static const unsigned int out_omix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-600, 0),
-};
-
-static const unsigned int out_dac_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 255, TLV_DB_LINEAR_ITEM(-7163, 0),
-};
-
-static const unsigned int in_adc_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 255, TLV_DB_LINEAR_ITEM(-7163, 1763),
-};
-
-static const unsigned int out_sidetone_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 31, TLV_DB_LINEAR_ITEM(-3600, 0),
-};
+static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
+static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
 
 static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
 				      struct snd_ctl_elem_value *ucontrol)
@@ -429,10 +398,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
-};
+static const DECLARE_TLV_DB_LINEAR(in_mix_tlv, -1200, 600);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8991_dapm_lin12_pga_controls[] = {
@@ -1363,7 +1329,6 @@
 static struct i2c_driver wm8991_i2c_driver = {
 	.driver = {
 		.name = "wm8991",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8991_i2c_probe,
 	.remove = wm8991_i2c_remove,
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 8a8db86..8668c4c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -41,7 +41,7 @@
 	"SPKVDD",
 };
 
-static struct reg_default wm8993_reg_defaults[] = {
+static const struct reg_default wm8993_reg_defaults[] = {
 	{ 1,   0x0000 },     /* R1   - Power Management (1) */
 	{ 2,   0x6000 },     /* R2   - Power Management (2) */
 	{ 3,   0x0000 },     /* R3   - Power Management (3) */
@@ -628,11 +628,10 @@
 static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
-static const unsigned int drc_max_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(drc_max_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -1800, 300, 0);
 static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
@@ -1595,7 +1594,7 @@
 #endif
 
 /* Tune DC servo configuration */
-static struct reg_default wm8993_regmap_patch[] = {
+static const struct reg_sequence wm8993_regmap_patch[] = {
 	{ 0x44, 3 },
 	{ 0x56, 3 },
 	{ 0x44, 0 },
@@ -1742,7 +1741,6 @@
 static struct i2c_driver wm8993_i2c_driver = {
 	.driver = {
 		.name = "wm8993",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8993_i2c_probe,
 	.remove =   wm8993_i2c_remove,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 962e1d3..2ccbb32 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1942,14 +1942,16 @@
 	{ "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
 
 	/* AIF3 output */
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1L" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1R" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2L" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2R" },
-	{ "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCL" },
-	{ "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCR" },
-	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" },
-	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1L" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1R" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2L" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2R" },
+	{ "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
+	{ "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
+	{ "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACL" },
+	{ "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACR" },
+
+	{ "AIF3ADCDAT", NULL, "AIF3ADC Mux" },
 
 	/* Loopback */
 	{ "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 505b65f..eda52a9 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2298,7 +2298,6 @@
 static struct i2c_driver wm8995_i2c_driver = {
 	.driver = {
 		.name = "wm8995",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8995_i2c_probe,
 	.remove = wm8995_i2c_remove,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 3dd063f..f7ccd9f 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -117,7 +117,7 @@
 WM8996_REGULATOR_EVENT(1)
 WM8996_REGULATOR_EVENT(2)
 
-static struct reg_default wm8996_reg[] = {
+static const struct reg_default wm8996_reg[] = {
 	{ WM8996_POWER_MANAGEMENT_1, 0x0 },
 	{ WM8996_POWER_MANAGEMENT_2, 0x0 },
 	{ WM8996_POWER_MANAGEMENT_3, 0x0 },
@@ -1780,7 +1780,7 @@
 	wm8996->rx_rate[dai->id] = params_rate(params);
 
 	/* Needs looking at for TDM */
-	bits = snd_pcm_format_width(params_format(params));
+	bits = params_width(params);
 	if (bits < 0)
 		return bits;
 	aifdata |= (bits << WM8996_AIF1TX_WL_SHIFT) | bits;
@@ -2647,12 +2647,10 @@
 		if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 			ret = request_threaded_irq(i2c->irq, NULL,
 						   wm8996_edge_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm8996", codec);
+						   irq_flags, "wm8996", codec);
 		else
 			ret = request_threaded_irq(i2c->irq, NULL, wm8996_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm8996", codec);
+						   irq_flags, "wm8996", codec);
 
 		if (ret == 0) {
 			/* Unmask the interrupt */
@@ -3100,7 +3098,6 @@
 static struct i2c_driver wm8996_i2c_driver = {
 	.driver = {
 		.name = "wm8996",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8996_i2c_probe,
 	.remove =   wm8996_i2c_remove,
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 4134dc7..b4dba3a 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -174,8 +174,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -187,8 +186,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -200,8 +198,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -213,8 +210,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -242,10 +238,10 @@
 SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
 SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]),
 SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]),
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 8a8b1c0..ccb3b15 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -30,7 +30,7 @@
 #include <sound/wm9081.h>
 #include "wm9081.h"
 
-static struct reg_default wm9081_reg[] = {
+static const struct reg_default wm9081_reg[] = {
 	{  2, 0x00B9 },     /* R2  - Analogue Lineout */
 	{  3, 0x00B9 },     /* R3  - Analogue Speaker PGA */
 	{  4, 0x0001 },     /* R4  - VMID Control */
@@ -243,13 +243,12 @@
 static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_out_tlv, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
-static unsigned int drc_max_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(drc_max_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(1200, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -300, 50, 0);
 
@@ -1378,7 +1377,6 @@
 static struct i2c_driver wm9081_i2c_driver = {
 	.driver = {
 		.name = "wm9081",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm9081_i2c_probe,
 	.remove =   wm9081_i2c_remove,
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 13d23fc..5d73729 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -162,23 +162,20 @@
 		dev_err(codec->dev, "Timed out waiting for DC Servo\n");
 }
 
-static const unsigned int in_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(in_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
 	1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
-	4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
-};
-static const unsigned int mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	4, 6, TLV_DB_SCALE_ITEM(600, 600, 0)
+);
+static const DECLARE_TLV_DB_RANGE(mix_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-	3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(0, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
-static const unsigned int spkboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(spkboost_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm9090_controls[] = {
 SOC_SINGLE_TLV("IN1A Volume", WM9090_IN1_LINE_INPUT_A_VOLUME, 0, 6, 0,
@@ -636,7 +633,6 @@
 static struct i2c_driver wm9090_i2c_driver = {
 	.driver = {
 		.name = "wm9090",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm9090_i2c_probe,
 	.remove = wm9090_i2c_remove,
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 5cc457e..744842c 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -22,6 +22,9 @@
 
 #include "wm9705.h"
 
+#define WM9705_VENDOR_ID 0x574d4c05
+#define WM9705_VENDOR_ID_MASK 0xffffffff
+
 /*
  * WM9705 register cache
  */
@@ -293,21 +296,6 @@
 	}
 };
 
-static int wm9705_reset(struct snd_soc_codec *codec)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	if (soc_ac97_ops->reset) {
-		soc_ac97_ops->reset(ac97);
-		if (ac97_read(codec, 0) == wm9705_reg[0])
-			return 0; /* Success */
-	}
-
-	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-
-	return -EIO;
-}
-
 #ifdef CONFIG_PM
 static int wm9705_soc_suspend(struct snd_soc_codec *codec)
 {
@@ -324,7 +312,8 @@
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9705_reset(codec);
+	ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
+		WM9705_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -342,30 +331,17 @@
 static int wm9705_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
-	int ret = 0;
 
-	ac97 = snd_soc_alloc_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
+		WM9705_VENDOR_ID_MASK);
 	if (IS_ERR(ac97)) {
-		ret = PTR_ERR(ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec\n");
-		return ret;
+		return PTR_ERR(ac97);
 	}
 
-	ret = wm9705_reset(codec);
-	if (ret)
-		goto err_put_device;
-
-	ret = device_add(&ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	snd_soc_codec_set_drvdata(codec, ac97);
 
 	return 0;
-
-err_put_device:
-	put_device(&ac97->dev);
-	return ret;
 }
 
 static int wm9705_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 1fda104..488a922 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -23,6 +23,9 @@
 #include <sound/tlv.h>
 #include "wm9712.h"
 
+#define WM9712_VENDOR_ID 0x574d4c12
+#define WM9712_VENDOR_ID_MASK 0xffffffff
+
 struct wm9712_priv {
 	struct snd_ac97 *ac97;
 	unsigned int hp_mixer[2];
@@ -613,35 +616,14 @@
 	return 0;
 }
 
-static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(wm9712->ac97);
-		if (ac97_read(codec, 0) == wm9712_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(wm9712->ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(wm9712->ac97);
-	if (ac97_read(codec, 0) != wm9712_reg[0])
-		goto err;
-	return 0;
-
-err:
-	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-	return -EIO;
-}
-
 static int wm9712_soc_resume(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9712_reset(codec, 1);
+	ret = snd_ac97_reset(wm9712->ac97, true, WM9712_VENDOR_ID,
+		WM9712_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -663,31 +645,20 @@
 static int wm9712_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
+	int ret;
 
-	wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
+	wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
+		WM9712_VENDOR_ID_MASK);
 	if (IS_ERR(wm9712->ac97)) {
 		ret = PTR_ERR(wm9712->ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
 		return ret;
 	}
 
-	ret = wm9712_reset(codec, 0);
-	if (ret < 0)
-		goto err_put_device;
-
-	ret = device_add(&wm9712->ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	/* set alc mux to none */
 	ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
 
 	return 0;
-
-err_put_device:
-	put_device(&wm9712->ac97->dev);
-	return ret;
 }
 
 static int wm9712_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 89cd2d6..4083a51 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -29,6 +29,9 @@
 
 #include "wm9713.h"
 
+#define WM9713_VENDOR_ID 0x574d4c13
+#define WM9713_VENDOR_ID_MASK 0xffffffff
+
 struct wm9713_priv {
 	struct snd_ac97 *ac97;
 	u32 pll_in; /* PLL input frequency */
@@ -116,11 +119,10 @@
 static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0);
 static const DECLARE_TLV_DB_SCALE(main_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(misc_tlv, -1500, 300, 0);
-static unsigned int mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const  DECLARE_TLV_DB_RANGE(mic_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm9713_snd_ac97_controls[] = {
 SOC_DOUBLE_TLV("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1, out_tlv),
@@ -1123,28 +1125,6 @@
 	},
 };
 
-int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(wm9713->ac97);
-		if (ac97_read(codec, 0) == wm9713_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(wm9713->ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(wm9713->ac97);
-	if (ac97_read(codec, 0) != wm9713_reg[0]) {
-		dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(wm9713_reset);
-
 static int wm9713_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
@@ -1196,7 +1176,8 @@
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9713_reset(codec, 1);
+	ret = snd_ac97_reset(wm9713->ac97, true, WM9713_VENDOR_ID,
+		WM9713_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -1222,32 +1203,18 @@
 static int wm9713_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0, reg;
+	int reg;
 
-	wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
+	wm9713->ac97 = snd_soc_new_ac97_codec(codec, WM9713_VENDOR_ID,
+		WM9713_VENDOR_ID_MASK);
 	if (IS_ERR(wm9713->ac97))
 		return PTR_ERR(wm9713->ac97);
 
-	/* do a cold reset for the controller and then try
-	 * a warm reset followed by an optional cold reset for codec */
-	wm9713_reset(codec, 0);
-	ret = wm9713_reset(codec, 1);
-	if (ret < 0)
-		goto err_put_device;
-
-	ret = device_add(&wm9713->ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	/* unmute the adc - move to kcontrol */
 	reg = ac97_read(codec, AC97_CD) & 0x7fff;
 	ac97_write(codec, AC97_CD, reg);
 
 	return 0;
-
-err_put_device:
-	put_device(&wm9713->ac97->dev);
-	return ret;
 }
 
 static int wm9713_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.h b/sound/soc/codecs/wm9713.h
index 793da86..53df11b 100644
--- a/sound/soc/codecs/wm9713.h
+++ b/sound/soc/codecs/wm9713.h
@@ -45,6 +45,4 @@
 #define WM9713_DAI_AC97_AUX		1
 #define WM9713_DAI_PCM_VOICE	2
 
-int wm9713_reset(struct snd_soc_codec *codec,  int try_warm);
-
 #endif
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index fd86bd1..624b3b9 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -38,11 +38,10 @@
 static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
 static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
 static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
-static const unsigned int spkboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(spkboost_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(line_tlv, -600, 600, 0);
 
 static const char *speaker_ref_text[] = {
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 56cb4d9..ec98548 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -651,23 +651,15 @@
 static int davinci_i2s_probe(struct platform_device *pdev)
 {
 	struct davinci_mcbsp_dev *dev;
-	struct resource *mem, *ioarea, *res;
+	struct resource *mem, *res;
+	void __iomem *io_base;
 	int *dma;
 	int ret;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "no mem resource?\n");
-		return -ENODEV;
-	}
-
-	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-					 resource_size(mem),
-					 pdev->name);
-	if (!ioarea) {
-		dev_err(&pdev->dev, "McBSP region already claimed\n");
-		return -EBUSY;
-	}
+	io_base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev),
 			   GFP_KERNEL);
@@ -679,12 +671,7 @@
 		return -ENODEV;
 	clk_enable(dev->clk);
 
-	dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!dev->base) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err_release_clk;
-	}
+	dev->base = io_base;
 
 	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
 	    (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b960e62..add6bb9 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1613,7 +1613,7 @@
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
 	struct snd_dmaengine_dai_dma_data *dma_data;
-	struct resource *mem, *ioarea, *res, *dat;
+	struct resource *mem, *res, *dat;
 	struct davinci_mcasp_pdata *pdata;
 	struct davinci_mcasp *mcasp;
 	char *irq_name;
@@ -1648,22 +1648,12 @@
 		}
 	}
 
-	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-			resource_size(mem), pdev->name);
-	if (!ioarea) {
-		dev_err(&pdev->dev, "Audio region already claimed\n");
-		return -EBUSY;
-	}
+	mcasp->base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(mcasp->base))
+		return PTR_ERR(mcasp->base);
 
 	pm_runtime_enable(&pdev->dev);
 
-	mcasp->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!mcasp->base) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 	mcasp->op_mode = pdata->op_mode;
 	/* sanity check for tdm slots parameter */
 	if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index fabd05f..c77d921 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -231,8 +231,9 @@
 
 	dev_set_drvdata(&pdev->dev, davinci_vcif_dev);
 
-	ret = snd_soc_register_component(&pdev->dev, &davinci_vcif_component,
-					 &davinci_vcif_dai, 1);
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &davinci_vcif_component,
+					      &davinci_vcif_dai, 1);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "could not register dai\n");
 		return ret;
@@ -241,23 +242,14 @@
 	ret = edma_pcm_platform_register(&pdev->dev);
 	if (ret) {
 		dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
-		snd_soc_unregister_component(&pdev->dev);
 		return ret;
 	}
 
 	return 0;
 }
 
-static int davinci_vcif_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-
-	return 0;
-}
-
 static struct platform_driver davinci_vcif_driver = {
 	.probe		= davinci_vcif_probe,
-	.remove		= davinci_vcif_remove,
 	.driver		= {
 		.name	= "davinci-vcif",
 	},
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index e1aa3834..883087f 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -182,7 +182,7 @@
 		);
 	} else {
 		if (np) {
-			/* The eukrea,asoc-tlv320 driver was explicitely
+			/* The eukrea,asoc-tlv320 driver was explicitly
 			 * requested (through the device tree).
 			 */
 			dev_err(&pdev->dev,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index de43887..5aeb6ed 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -23,6 +23,7 @@
 
 #include "../codecs/sgtl5000.h"
 #include "../codecs/wm8962.h"
+#include "../codecs/wm8960.h"
 
 #define RX 0
 #define TX 1
@@ -407,6 +408,7 @@
 	struct fsl_asoc_card_priv *priv;
 	struct i2c_client *codec_dev;
 	struct clk *codec_clk;
+	const char *codec_dai_name;
 	u32 width;
 	int ret;
 
@@ -459,6 +461,7 @@
 
 	/* Diversify the card configurations */
 	if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
+		codec_dai_name = "cs42888";
 		priv->card.set_bias_level = NULL;
 		priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
 		priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
@@ -467,14 +470,22 @@
 		priv->cpu_priv.slot_width = 32;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
 	} else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
+		codec_dai_name = "sgtl5000";
 		priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
+		codec_dai_name = "wm8962";
 		priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
 		priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
 		priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
 		priv->codec_priv.pll_id = WM8962_FLL;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+	} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) {
+		codec_dai_name = "wm8960-hifi";
+		priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
+		priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO;
+		priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO;
+		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else {
 		dev_err(&pdev->dev, "unknown Device Tree compatible\n");
 		return -EINVAL;
@@ -521,7 +532,7 @@
 	/* Normal DAI Link */
 	priv->dai_link[0].cpu_of_node = cpu_np;
 	priv->dai_link[0].codec_of_node = codec_np;
-	priv->dai_link[0].codec_dai_name = codec_dev->name;
+	priv->dai_link[0].codec_dai_name = codec_dai_name;
 	priv->dai_link[0].platform_of_node = cpu_np;
 	priv->dai_link[0].dai_fmt = priv->dai_fmt;
 	priv->card.num_links = 1;
@@ -530,7 +541,7 @@
 		/* DPCM DAI Links only if ASRC exsits */
 		priv->dai_link[1].cpu_of_node = asrc_np;
 		priv->dai_link[1].platform_of_node = asrc_np;
-		priv->dai_link[2].codec_dai_name = codec_dev->name;
+		priv->dai_link[2].codec_dai_name = codec_dai_name;
 		priv->dai_link[2].codec_of_node = codec_np;
 		priv->dai_link[2].cpu_of_node = cpu_np;
 		priv->dai_link[2].dai_fmt = priv->dai_fmt;
@@ -578,6 +589,7 @@
 	{ .compatible = "fsl,imx-audio-cs42888", },
 	{ .compatible = "fsl,imx-audio-sgtl5000", },
 	{ .compatible = "fsl,imx-audio-wm8962", },
+	{ .compatible = "fsl,imx-audio-wm8960", },
 	{}
 };
 
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index c068494..9f087d4 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -931,14 +931,29 @@
 static int fsl_asrc_runtime_resume(struct device *dev)
 {
 	struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
-	int i;
+	int i, ret;
 
-	clk_prepare_enable(asrc_priv->mem_clk);
-	clk_prepare_enable(asrc_priv->ipg_clk);
-	for (i = 0; i < ASRC_CLK_MAX_NUM; i++)
-		clk_prepare_enable(asrc_priv->asrck_clk[i]);
+	ret = clk_prepare_enable(asrc_priv->mem_clk);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(asrc_priv->ipg_clk);
+	if (ret)
+		goto disable_mem_clk;
+	for (i = 0; i < ASRC_CLK_MAX_NUM; i++) {
+		ret = clk_prepare_enable(asrc_priv->asrck_clk[i]);
+		if (ret)
+			goto disable_asrck_clk;
+	}
 
 	return 0;
+
+disable_asrck_clk:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(asrc_priv->asrck_clk[i]);
+	clk_disable_unprepare(asrc_priv->ipg_clk);
+disable_mem_clk:
+	clk_disable_unprepare(asrc_priv->mem_clk);
+	return ret;
 }
 
 static int fsl_asrc_runtime_suspend(struct device *dev)
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 45eee13..837979e 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -839,7 +839,7 @@
 		return ret;
 	}
 
-	ret = imx_pcm_dma_init(pdev);
+	ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
 	if (ret)
 		dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
 
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 5c73bea..a18fd92 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -791,7 +791,7 @@
 		return ret;
 
 	if (sai->sai_on_imx)
-		return imx_pcm_dma_init(pdev);
+		return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
 	else
 		return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
 }
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 0662809..b95fbc3 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -13,7 +13,8 @@
 
 #define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
 			 SNDRV_PCM_FMTBIT_S20_3LE |\
-			 SNDRV_PCM_FMTBIT_S24_LE)
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
 
 /* SAI Register Map Register */
 #define FSL_SAI_TCSR	0x00 /* SAI Transmit Control */
@@ -45,7 +46,7 @@
 #define FSL_SAI_xFR(tx)		(tx ? FSL_SAI_TFR : FSL_SAI_RFR)
 #define FSL_SAI_xMR(tx)		(tx ? FSL_SAI_TMR : FSL_SAI_RMR)
 
-/* SAI Transmit/Recieve Control Register */
+/* SAI Transmit/Receive Control Register */
 #define FSL_SAI_CSR_TERE	BIT(31)
 #define FSL_SAI_CSR_FR		BIT(25)
 #define FSL_SAI_CSR_SR		BIT(24)
@@ -67,10 +68,10 @@
 #define FSL_SAI_CSR_FRIE	BIT(8)
 #define FSL_SAI_CSR_FRDE	BIT(0)
 
-/* SAI Transmit and Recieve Configuration 1 Register */
+/* SAI Transmit and Receive Configuration 1 Register */
 #define FSL_SAI_CR1_RFW_MASK	0x1f
 
-/* SAI Transmit and Recieve Configuration 2 Register */
+/* SAI Transmit and Receive Configuration 2 Register */
 #define FSL_SAI_CR2_SYNC	BIT(30)
 #define FSL_SAI_CR2_MSEL_MASK	(0x3 << 26)
 #define FSL_SAI_CR2_MSEL_BUS	0
@@ -82,12 +83,12 @@
 #define FSL_SAI_CR2_BCD_MSTR	BIT(24)
 #define FSL_SAI_CR2_DIV_MASK	0xff
 
-/* SAI Transmit and Recieve Configuration 3 Register */
+/* SAI Transmit and Receive Configuration 3 Register */
 #define FSL_SAI_CR3_TRCE	BIT(16)
 #define FSL_SAI_CR3_WDFL(x)	(x)
 #define FSL_SAI_CR3_WDFL_MASK	0x1f
 
-/* SAI Transmit and Recieve Configuration 4 Register */
+/* SAI Transmit and Receive Configuration 4 Register */
 #define FSL_SAI_CR4_FRSZ(x)	(((x) - 1) << 16)
 #define FSL_SAI_CR4_FRSZ_MASK	(0x1f << 16)
 #define FSL_SAI_CR4_SYWD(x)	(((x) - 1) << 8)
@@ -97,7 +98,7 @@
 #define FSL_SAI_CR4_FSP		BIT(1)
 #define FSL_SAI_CR4_FSD_MSTR	BIT(0)
 
-/* SAI Transmit and Recieve Configuration 5 Register */
+/* SAI Transmit and Receive Configuration 5 Register */
 #define FSL_SAI_CR5_WNW(x)	(((x) - 1) << 24)
 #define FSL_SAI_CR5_WNW_MASK	(0x1f << 24)
 #define FSL_SAI_CR5_W0W(x)	(((x) - 1) << 16)
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 8e93221..ab729f2 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -454,7 +454,8 @@
 	struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 	struct platform_device *pdev = spdif_priv->pdev;
 	struct regmap *regmap = spdif_priv->regmap;
-	u32 scr, mask, i;
+	u32 scr, mask;
+	int i;
 	int ret;
 
 	/* Reset module and interrupts only for first initialization */
@@ -482,13 +483,18 @@
 		mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
 			SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
 			SCR_TXFIFO_FSEL_MASK;
-		for (i = 0; i < SPDIF_TXRATE_MAX; i++)
-			clk_prepare_enable(spdif_priv->txclk[i]);
+		for (i = 0; i < SPDIF_TXRATE_MAX; i++) {
+			ret = clk_prepare_enable(spdif_priv->txclk[i]);
+			if (ret)
+				goto disable_txclk;
+		}
 	} else {
 		scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC;
 		mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
 			SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK;
-		clk_prepare_enable(spdif_priv->rxclk);
+		ret = clk_prepare_enable(spdif_priv->rxclk);
+		if (ret)
+			goto err;
 	}
 	regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
 
@@ -497,6 +503,9 @@
 
 	return 0;
 
+disable_txclk:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(spdif_priv->txclk[i]);
 err:
 	clk_disable_unprepare(spdif_priv->coreclk);
 
@@ -707,7 +716,7 @@
 	return ret;
 }
 
-/* Q-subcode infomation. The byte size is SPDIF_UBITS_SIZE/8 */
+/* Q-subcode information. The byte size is SPDIF_UBITS_SIZE/8 */
 static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -739,7 +748,7 @@
 	return ret;
 }
 
-/* Valid bit infomation */
+/* Valid bit information */
 static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -767,7 +776,7 @@
 	return 0;
 }
 
-/* DPLL lock infomation */
+/* DPLL lock information */
 static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -1255,7 +1264,7 @@
 		return ret;
 	}
 
-	ret = imx_pcm_dma_init(pdev);
+	ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
 	if (ret)
 		dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
 
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c0b940e..8ec6fb2 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -156,7 +156,7 @@
  *
  * @dbg_stats: Debugging statistics
  *
- * @soc: SoC specifc data
+ * @soc: SoC specific data
  */
 struct fsl_ssi_private {
 	struct regmap *regs;
@@ -900,14 +900,16 @@
 		scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
 		break;
 	default:
-		return -EINVAL;
+		if (!fsl_ssi_is_ac97(ssi_private))
+			return -EINVAL;
 	}
 
 	stcr |= strcr;
 	srcr |= strcr;
 
-	if (ssi_private->cpu_dai_drv.symmetric_rates) {
-		/* Need to clear RXDIR when using SYNC mode */
+	if (ssi_private->cpu_dai_drv.symmetric_rates
+			|| fsl_ssi_is_ac97(ssi_private)) {
+		/* Need to clear RXDIR when using SYNC or AC97 mode */
 		srcr &= ~CCSR_SSI_SRCR_RXDIR;
 		scr |= CCSR_SSI_SCR_SYN;
 	}
@@ -1101,6 +1103,7 @@
 
 static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
 	.bus_control = true,
+	.probe = fsl_ssi_dai_probe,
 	.playback = {
 		.stream_name = "AC97 Playback",
 		.channels_min = 2,
@@ -1127,10 +1130,17 @@
 	struct regmap *regs = fsl_ac97_data->regs;
 	unsigned int lreg;
 	unsigned int lval;
+	int ret;
 
 	if (reg > 0x7f)
 		return;
 
+	ret = clk_prepare_enable(fsl_ac97_data->clk);
+	if (ret) {
+		pr_err("ac97 write clk_prepare_enable failed: %d\n",
+			ret);
+		return;
+	}
 
 	lreg = reg <<  12;
 	regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1141,6 +1151,8 @@
 	regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
 			CCSR_SSI_SACNT_WR);
 	udelay(100);
+
+	clk_disable_unprepare(fsl_ac97_data->clk);
 }
 
 static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
@@ -1151,6 +1163,14 @@
 	unsigned short val = -1;
 	u32 reg_val;
 	unsigned int lreg;
+	int ret;
+
+	ret = clk_prepare_enable(fsl_ac97_data->clk);
+	if (ret) {
+		pr_err("ac97 read clk_prepare_enable failed: %d\n",
+			ret);
+		return -1;
+	}
 
 	lreg = (reg & 0x7f) <<  12;
 	regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1162,6 +1182,8 @@
 	regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
 	val = (reg_val >> 4) & 0xffff;
 
+	clk_disable_unprepare(fsl_ac97_data->clk);
+
 	return val;
 }
 
@@ -1210,7 +1232,7 @@
 		}
 	}
 
-	/* For those SLAVE implementations, we ingore non-baudclk cases
+	/* For those SLAVE implementations, we ignore non-baudclk cases
 	 * and, instead, abandon MASTER mode that needs baud clock.
 	 */
 	ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
@@ -1257,7 +1279,7 @@
 		if (ret)
 			goto error_pcm;
 	} else {
-		ret = imx_pcm_dma_init(pdev);
+		ret = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
 		if (ret)
 			goto error_pcm;
 	}
@@ -1320,7 +1342,11 @@
 
 		fsl_ac97_data = ssi_private;
 
-		snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		if (ret) {
+			dev_err(&pdev->dev, "could not set AC'97 ops\n");
+			return ret;
+		}
 	} else {
 		/* Initialize this copy of the CPU DAI driver structure */
 		memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1357,7 +1383,9 @@
 
 	/* Are the RX and the TX clocks locked? */
 	if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
-		ssi_private->cpu_dai_drv.symmetric_rates = 1;
+		if (!fsl_ssi_is_ac97(ssi_private))
+			ssi_private->cpu_dai_drv.symmetric_rates = 1;
+
 		ssi_private->cpu_dai_drv.symmetric_channels = 1;
 		ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
 	}
@@ -1434,6 +1462,27 @@
 		_fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
 				     ssi_private->dai_fmt);
 
+	if (fsl_ssi_is_ac97(ssi_private)) {
+		u32 ssi_idx;
+
+		ret = of_property_read_u32(np, "cell-index", &ssi_idx);
+		if (ret) {
+			dev_err(&pdev->dev, "cannot get SSI index property\n");
+			goto error_sound_card;
+		}
+
+		ssi_private->pdev =
+			platform_device_register_data(NULL,
+					"ac97-codec", ssi_idx, NULL, 0);
+		if (IS_ERR(ssi_private->pdev)) {
+			ret = PTR_ERR(ssi_private->pdev);
+			dev_err(&pdev->dev,
+				"failed to register AC97 codec platform: %d\n",
+				ret);
+			goto error_sound_card;
+		}
+	}
+
 	return 0;
 
 error_sound_card:
@@ -1458,6 +1507,9 @@
 	if (ssi_private->soc->imx)
 		fsl_ssi_imx_clean(pdev, ssi_private);
 
+	if (fsl_ssi_is_ac97(ssi_private))
+		snd_soc_set_ac97_ops(NULL);
+
 	return 0;
 }
 
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0db94f49..1fc01ed 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -40,7 +40,7 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_PAUSE |
 		SNDRV_PCM_INFO_RESUME,
-	.buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
+	.buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
 	.period_bytes_min = 128,
 	.period_bytes_max = 65535, /* Limited by SDMA engine */
 	.periods_min = 2,
@@ -52,13 +52,30 @@
 	.pcm_hardware = &imx_pcm_hardware,
 	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
 	.compat_filter_fn = filter,
-	.prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
+	.prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
 };
 
-int imx_pcm_dma_init(struct platform_device *pdev)
+int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
 {
+	struct snd_dmaengine_pcm_config *config;
+	struct snd_pcm_hardware *pcm_hardware;
+
+	config = devm_kzalloc(&pdev->dev,
+			sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
+	*config = imx_dmaengine_pcm_config;
+	if (size)
+		config->prealloc_buffer_size = size;
+
+	pcm_hardware = devm_kzalloc(&pdev->dev,
+			sizeof(struct snd_pcm_hardware), GFP_KERNEL);
+	*pcm_hardware = imx_pcm_hardware;
+	if (size)
+		pcm_hardware->buffer_bytes_max = size;
+
+	config->pcm_hardware = pcm_hardware;
+
 	return devm_snd_dmaengine_pcm_register(&pdev->dev,
-		&imx_dmaengine_pcm_config,
+		config,
 		SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index c79cb27..133c4470a 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -20,6 +20,11 @@
  */
 #define IMX_SSI_DMABUF_SIZE	(64 * 1024)
 
+#define IMX_DEFAULT_DMABUF_SIZE	(64 * 1024)
+#define IMX_SAI_DMABUF_SIZE	(64 * 1024)
+#define IMX_SPDIF_DMABUF_SIZE	(64 * 1024)
+#define IMX_ESAI_DMABUF_SIZE	(256 * 1024)
+
 static inline void
 imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
 	int dma, enum sdma_peripheral_type peripheral_type)
@@ -39,9 +44,9 @@
 };
 
 #if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
-int imx_pcm_dma_init(struct platform_device *pdev);
+int imx_pcm_dma_init(struct platform_device *pdev, size_t size);
 #else
-static inline int imx_pcm_dma_init(struct platform_device *pdev)
+static inline int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
 {
 	return -ENODEV;
 }
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 461ce27..48b2d24 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -603,7 +603,7 @@
 	ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
 	ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
-	ssi->dma_init = imx_pcm_dma_init(pdev);
+	ssi->dma_init = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
 
 	if (ssi->fiq_init && ssi->dma_init) {
 		ret = ssi->fiq_init;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index d555493..3ff76d4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -76,6 +76,7 @@
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
 	struct simple_dai_props *dai_props =
 		&priv->dai_props[rtd - rtd->card->rtd];
@@ -91,8 +92,16 @@
 		mclk = params_rate(params) * mclk_fs;
 		ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
 					     SND_SOC_CLOCK_IN);
+		if (ret && ret != -ENOTSUPP)
+			goto err;
+
+		ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+					     SND_SOC_CLOCK_OUT);
+		if (ret && ret != -ENOTSUPP)
+			goto err;
 	}
 
+err:
 	return ret;
 }
 
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f3060a4..05fde5e6e 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -26,14 +26,9 @@
 	depends on ACPI
 
 config SND_SOC_INTEL_SST
-	tristate "ASoC support for Intel(R) Smart Sound Technology"
+	tristate
 	select SND_SOC_INTEL_SST_ACPI if ACPI
 	depends on (X86 || COMPILE_TEST)
-	depends on DW_DMAC_CORE
-	help
-          This adds support for Intel(R) Smart Sound Technology (SST).
-          Say Y if you have such a device
-          If unsure select "N".
 
 config SND_SOC_INTEL_SST_ACPI
 	tristate
@@ -46,8 +41,9 @@
 
 config SND_SOC_INTEL_HASWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \
-		   I2C_DESIGNWARE_PLATFORM
+	depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT5640
 	help
@@ -58,7 +54,9 @@
 
 config SND_SOC_INTEL_BYT_RT5640_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+	depends on X86_INTEL_LPSS && I2C
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_RT5640
 	help
@@ -67,7 +65,9 @@
 
 config SND_SOC_INTEL_BYT_MAX98090_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+	depends on X86_INTEL_LPSS && I2C
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_MAX98090
 	help
@@ -76,8 +76,10 @@
 
 config SND_SOC_INTEL_BROADWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \
+	depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
 		   I2C_DESIGNWARE_PLATFORM
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT286
 	help
@@ -132,3 +134,8 @@
       This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
       platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
       If unsure select "N".
+
+config SND_SOC_INTEL_SKYLAKE
+	tristate
+	select SND_HDA_EXT_CORE
+	select SND_SOC_INTEL_SST
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 6de5d5c..2b45435 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
 obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
 obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
 
 # Machine support
 obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 31e9b9e..d55388e 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -132,7 +132,7 @@
 			      sizeof(cmd.header) + cmd.header.length);
 }
 
-int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
+static int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
 		       struct snd_ctl_elem_info *uinfo)
 {
 	struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
@@ -1298,7 +1298,7 @@
 		dev_dbg(dai->dev, "Stream name=%s\n",
 				dai->playback_widget->name);
 		w = dai->playback_widget;
-		list_for_each_entry(p, &w->sinks, list_source) {
+		snd_soc_dapm_widget_for_each_sink_path(w, p) {
 			if (p->connected && !p->connected(w, p->sink))
 				continue;
 
@@ -1317,7 +1317,7 @@
 		dev_dbg(dai->dev, "Stream name=%s\n",
 				dai->capture_widget->name);
 		w = dai->capture_widget;
-		list_for_each_entry(p, &w->sources, list_sink) {
+		snd_soc_dapm_widget_for_each_source_path(w, p) {
 			if (p->connected && !p->connected(w, p->sink))
 				continue;
 
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 641ebe6..683e501 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -33,7 +33,6 @@
 
 struct sst_device *sst;
 static DEFINE_MUTEX(sst_lock);
-extern struct snd_compr_ops sst_platform_compr_ops;
 
 int sst_register_dsp(struct sst_device *dev)
 {
diff --git a/sound/soc/intel/atom/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index 2409b23..cb32cc7 100644
--- a/sound/soc/intel/atom/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -25,6 +25,7 @@
 #include "sst-atom-controls.h"
 
 extern struct sst_device *sst;
+extern struct snd_compr_ops sst_platform_compr_ops;
 
 #define SST_MONO		1
 #define SST_STEREO		2
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 0e0e4d9..ce689c5 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -151,6 +151,7 @@
 		usage_count = GET_USAGE_COUNT(dev);
 		dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
 		if (ret < 0) {
+			pm_runtime_put_sync(dev);
 			dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
 			return ret;
 		}
@@ -204,8 +205,10 @@
 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 
 	retval = pm_runtime_get_sync(ctx->dev);
-	if (retval < 0)
+	if (retval < 0) {
+		pm_runtime_put_sync(ctx->dev);
 		return retval;
+	}
 
 	str_id = sst_get_stream(ctx, str_params);
 	if (str_id > 0) {
@@ -672,8 +675,10 @@
 	if (NULL == bytes)
 		return -EINVAL;
 	ret_val = pm_runtime_get_sync(ctx->dev);
-	if (ret_val < 0)
+	if (ret_val < 0) {
+		pm_runtime_put_sync(ctx->dev);
 		return ret_val;
+	}
 
 	ret_val = sst_send_byte_stream_mrfld(ctx, bytes);
 	sst_pm_runtime_put(ctx);
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 5a27861..3dc7358 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -352,10 +352,9 @@
 	 * copy from mailbox
 	 **/
 	if (msg_high.part.large) {
-		data = kzalloc(msg_low, GFP_KERNEL);
+		data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
 		if (!data)
 			return;
-		memcpy(data, (void *) msg->mailbox_data, msg_low);
 		/* Copy command id so that we can use to put sst to reset */
 		dsp_hdr = (struct ipc_dsp_hdr *)data;
 		cmd_id = dsp_hdr->cmd_id;
diff --git a/sound/soc/intel/boards/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index 7ab8cc9..d9f81b8 100644
--- a/sound/soc/intel/boards/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -126,6 +126,7 @@
 
 static struct snd_soc_card byt_max98090_card = {
 	.name = "byt-max98090",
+	.owner = THIS_MODULE,
 	.dai_link = byt_max98090_dais,
 	.num_links = ARRAY_SIZE(byt_max98090_dais),
 	.dapm_widgets = byt_max98090_widgets,
diff --git a/sound/soc/intel/boards/byt-rt5640.c b/sound/soc/intel/boards/byt-rt5640.c
index ae89b9b9..de9788a 100644
--- a/sound/soc/intel/boards/byt-rt5640.c
+++ b/sound/soc/intel/boards/byt-rt5640.c
@@ -197,6 +197,7 @@
 
 static struct snd_soc_card byt_rt5640_card = {
 	.name = "byt-rt5640",
+	.owner = THIS_MODULE,
 	.dai_link = byt_rt5640_dais,
 	.num_links = ARRAY_SIZE(byt_rt5640_dais),
 	.dapm_widgets = byt_rt5640_widgets,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7f55d59..c445312 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -185,6 +185,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_byt = {
 	.name = "baytrailcraudio",
+	.owner = THIS_MODULE,
 	.dai_link = byt_dailink,
 	.num_links = ARRAY_SIZE(byt_dailink),
 	.dapm_widgets = byt_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 70f8321..49f4869 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -104,21 +104,17 @@
 static int cht_ti_jack_event(struct notifier_block *nb,
 		unsigned long event, void *data)
 {
-
 	struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
-	struct snd_soc_dai *codec_dai = jack->card->rtd->codec_dai;
-	struct snd_soc_codec *codec = codec_dai->codec;
+	struct snd_soc_dapm_context *dapm = &jack->card->dapm;
 
 	if (event & SND_JACK_MICROPHONE) {
-
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "SHDN");
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "MICBIAS");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "SHDN");
+		snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+		snd_soc_dapm_sync(dapm);
 	} else {
-
-		snd_soc_dapm_disable_pin(&codec->dapm, "MICBIAS");
-		snd_soc_dapm_disable_pin(&codec->dapm, "SHDN");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+		snd_soc_dapm_disable_pin(dapm, "SHDN");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return 0;
@@ -279,6 +275,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_cht = {
 	.name = "chtmax98090",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.aux_dev = &cht_max98090_headset_dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index bdcaf46..7be8461 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -305,6 +305,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_chtrt5645 = {
 	.name = "chtrt5645",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
@@ -317,6 +318,7 @@
 
 static struct snd_soc_card snd_soc_card_chtrt5650 = {
 	.name = "chtrt5650",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 2c9cc5b..23fe040 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -323,6 +323,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_cht = {
 	.name = "cherrytrailcraudio",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index 396d545..cbd568e 100644
--- a/sound/soc/intel/common/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -22,6 +22,8 @@
 #include <linux/interrupt.h>
 #include <linux/firmware.h>
 
+#include "../skylake/skl-sst-dsp.h"
+
 struct sst_mem_block;
 struct sst_module;
 struct sst_fw;
@@ -258,6 +260,8 @@
  */
 struct sst_dsp {
 
+	/* Shared for all platforms */
+
 	/* runtime */
 	struct sst_dsp_device *sst_dev;
 	spinlock_t spinlock;	/* IPC locking */
@@ -268,10 +272,6 @@
 	int irq;
 	u32 id;
 
-	/* list of free and used ADSP memory blocks */
-	struct list_head used_block_list;
-	struct list_head free_block_list;
-
 	/* operations */
 	struct sst_ops *ops;
 
@@ -284,6 +284,12 @@
 	/* mailbox */
 	struct sst_mailbox mailbox;
 
+	/* HSW/Byt data */
+
+	/* list of free and used ADSP memory blocks */
+	struct list_head used_block_list;
+	struct list_head free_block_list;
+
 	/* SST FW files loaded and their modules */
 	struct list_head module_list;
 	struct list_head fw_list;
@@ -299,6 +305,15 @@
 	/* DMA FW loading */
 	struct sst_dma *dma;
 	bool fw_use_dma;
+
+	/* SKL data */
+
+	/* To allocate CL dma buffers */
+	struct skl_dsp_loader_ops dsp_ops;
+	struct skl_dsp_fw_ops fw_ops;
+	int sst_state;
+	struct skl_cl_dev cl_dev;
+	u32 intr_status;
 };
 
 /* Size optimised DRAM/IRAM memcpy */
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 64e9421..a627236 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 
 #include "sst-dsp.h"
 #include "sst-dsp-priv.h"
@@ -196,6 +197,22 @@
 }
 EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked);
 
+/* This is for registers bits with attribute RWC */
+void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value)
+{
+	unsigned int old, new;
+	u32 ret;
+
+	ret = sst_dsp_shim_read_unlocked(sst, offset);
+
+	old = ret;
+	new = (old & (~mask)) | (value & mask);
+
+	sst_dsp_shim_write_unlocked(sst, offset, new);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced_unlocked);
+
 int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
 				u32 mask, u32 value)
 {
@@ -222,6 +239,60 @@
 }
 EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64);
 
+/* This is for registers bits with attribute RWC */
+void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sst->spinlock, flags);
+	sst_dsp_shim_update_bits_forced_unlocked(sst, offset, mask, value);
+	spin_unlock_irqrestore(&sst->spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced);
+
+int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
+			 u32 target, u32 timeout, char *operation)
+{
+	int time, ret;
+	u32 reg;
+	bool done = false;
+
+	/*
+	 * we will poll for couple of ms using mdelay, if not successful
+	 * then go to longer sleep using usleep_range
+	 */
+
+	/* check if set state successful */
+	for (time = 0; time < 5; time++) {
+		if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target) {
+			done = true;
+			break;
+		}
+		mdelay(1);
+	}
+
+	if (done ==  false) {
+		/* sleeping in 10ms steps so adjust timeout value */
+		timeout /= 10;
+
+		for (time = 0; time < timeout; time++) {
+			if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target)
+				break;
+
+			usleep_range(5000, 10000);
+		}
+	}
+
+	reg = sst_dsp_shim_read_unlocked(ctx, offset);
+	dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
+			(time < timeout) ? "successful" : "timedout");
+	ret = time < timeout ? 0 : -ETIME;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_register_poll);
+
 void sst_dsp_dump(struct sst_dsp *sst)
 {
 	if (sst->ops->dump)
diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h
index 96aeb25..1f45f18 100644
--- a/sound/soc/intel/common/sst-dsp.h
+++ b/sound/soc/intel/common/sst-dsp.h
@@ -230,6 +230,8 @@
 u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
 int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
 				u64 mask, u64 value);
+void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value);
 
 /* SHIM Read / Write Unlocked for callers already holding sst lock */
 void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
@@ -240,6 +242,8 @@
 u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
 int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
 					u64 mask, u64 value);
+void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value);
 
 /* Internal generic low-level SST IO functions - can be overidden */
 void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
@@ -278,6 +282,8 @@
 void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
 void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
 void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
+int sst_dsp_register_poll(struct sst_dsp  *dsp, u32 offset, u32 mask,
+		 u32 expected_value, u32 timeout, char *operation);
 
 /* Debug */
 void sst_dsp_dump(struct sst_dsp *sst);
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
new file mode 100644
index 0000000..27db221
--- /dev/null
+++ b/sound/soc/intel/skylake/Makefile
@@ -0,0 +1,9 @@
+snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
+
+# Skylake IPC Support
+snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
+		skl-sst.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
new file mode 100644
index 0000000..826d4fd
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -0,0 +1,884 @@
+/*
+ *  skl-message.c - HDA DSP interface for FW registration, Pipe and Module
+ *  configurations
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	   Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+#include "skl.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-topology.h"
+#include "skl-tplg-interface.h"
+
+static int skl_alloc_dma_buf(struct device *dev,
+		struct snd_dma_buffer *dmab, size_t size)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	return  bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
+}
+
+static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	bus->io_ops->dma_free_pages(bus, dmab);
+
+	return 0;
+}
+
+int skl_init_dsp(struct skl *skl)
+{
+	void __iomem *mmio_base;
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int irq = bus->irq;
+	struct skl_dsp_loader_ops loader_ops;
+	int ret;
+
+	loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
+	loader_ops.free_dma_buf = skl_free_dma_buf;
+
+	/* enable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+
+	/* read the BAR of the ADSP MMIO */
+	mmio_base = pci_ioremap_bar(skl->pci, 4);
+	if (mmio_base == NULL) {
+		dev_err(bus->dev, "ioremap error\n");
+		return -ENXIO;
+	}
+
+	ret = skl_sst_dsp_init(bus->dev, mmio_base, irq,
+			loader_ops, &skl->skl_sst);
+
+	dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
+
+	return ret;
+}
+
+void skl_free_dsp(struct skl *skl)
+{
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl_sst *ctx =  skl->skl_sst;
+
+	/* disable  ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+
+	skl_sst_dsp_cleanup(bus->dev, ctx);
+	if (ctx->dsp->addr.lpe)
+		iounmap(ctx->dsp->addr.lpe);
+}
+
+int skl_suspend_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+	int ret;
+
+	/* if ppcap is not supported return 0 */
+	if (!skl->ebus.ppcap)
+		return 0;
+
+	ret = skl_dsp_sleep(ctx->dsp);
+	if (ret < 0)
+		return ret;
+
+	/* disable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
+
+	return 0;
+}
+
+int skl_resume_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+
+	/* if ppcap is not supported return 0 */
+	if (!skl->ebus.ppcap)
+		return 0;
+
+	/* enable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+
+	return skl_dsp_wake(ctx->dsp);
+}
+
+enum skl_bitdepth skl_get_bit_depth(int params)
+{
+	switch (params) {
+	case 8:
+		return SKL_DEPTH_8BIT;
+
+	case 16:
+		return SKL_DEPTH_16BIT;
+
+	case 24:
+		return SKL_DEPTH_24BIT;
+
+	case 32:
+		return SKL_DEPTH_32BIT;
+
+	default:
+		return SKL_DEPTH_INVALID;
+
+	}
+}
+
+static u32 skl_create_channel_map(enum skl_ch_cfg ch_cfg)
+{
+	u32 config;
+
+	switch (ch_cfg) {
+	case SKL_CH_CFG_MONO:
+		config =  (0xFFFFFFF0 | SKL_CHANNEL_LEFT);
+		break;
+
+	case SKL_CH_CFG_STEREO:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4));
+		break;
+
+	case SKL_CH_CFG_2_1:
+		config = (0xFFFFF000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4)
+			| (SKL_CHANNEL_LFE << 8));
+		break;
+
+	case SKL_CH_CFG_3_0:
+		config =  (0xFFFFF000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8));
+		break;
+
+	case SKL_CH_CFG_3_1:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LFE << 12));
+		break;
+
+	case SKL_CH_CFG_QUATRO:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4)
+			| (SKL_CHANNEL_LEFT_SURROUND << 8)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 12));
+		break;
+
+	case SKL_CH_CFG_4_0:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_CENTER_SURROUND << 12));
+		break;
+
+	case SKL_CH_CFG_5_0:
+		config = (0xFFF00000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LEFT_SURROUND << 12)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 16));
+		break;
+
+	case SKL_CH_CFG_5_1:
+		config = (0xFF000000 | SKL_CHANNEL_CENTER
+			| (SKL_CHANNEL_LEFT << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LEFT_SURROUND << 12)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 16)
+			| (SKL_CHANNEL_LFE << 20));
+		break;
+
+	case SKL_CH_CFG_DUAL_MONO:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_LEFT << 4));
+		break;
+
+	case SKL_CH_CFG_I2S_DUAL_STEREO_0:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4));
+		break;
+
+	case SKL_CH_CFG_I2S_DUAL_STEREO_1:
+		config = (0xFFFF00FF | (SKL_CHANNEL_LEFT << 8)
+			| (SKL_CHANNEL_RIGHT << 12));
+		break;
+
+	default:
+		config =  0xFFFFFFFF;
+		break;
+
+	}
+
+	return config;
+}
+
+/*
+ * Each module in DSP expects a base module configuration, which consists of
+ * PCM format information, which we calculate in driver and resource values
+ * which are read from widget information passed through topology binary
+ * This is send when we create a module with INIT_INSTANCE IPC msg
+ */
+static void skl_set_base_module_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_base_cfg *base_cfg)
+{
+	struct skl_module_fmt *format = &mconfig->in_fmt;
+
+	base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
+
+	base_cfg->audio_fmt.s_freq = format->s_freq;
+	base_cfg->audio_fmt.bit_depth = format->bit_depth;
+	base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
+	base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+
+	dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
+			format->bit_depth, format->valid_bit_depth,
+			format->ch_cfg);
+
+	base_cfg->audio_fmt.channel_map = skl_create_channel_map(
+					base_cfg->audio_fmt.ch_cfg);
+
+	base_cfg->audio_fmt.interleaving = SKL_INTERLEAVING_PER_CHANNEL;
+
+	base_cfg->cps = mconfig->mcps;
+	base_cfg->ibs = mconfig->ibs;
+	base_cfg->obs = mconfig->obs;
+}
+
+/*
+ * Copies copier capabilities into copier module and updates copier module
+ * config size.
+ */
+static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
+				struct skl_cpr_cfg *cpr_mconfig)
+{
+	if (mconfig->formats_config.caps_size == 0)
+		return;
+
+	memcpy(cpr_mconfig->gtw_cfg.config_data,
+			mconfig->formats_config.caps,
+			mconfig->formats_config.caps_size);
+
+	cpr_mconfig->gtw_cfg.config_length =
+			(mconfig->formats_config.caps_size) / 4;
+}
+
+/*
+ * Calculate the gatewat settings required for copier module, type of
+ * gateway and index of gateway to use
+ */
+static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_cpr_cfg *cpr_mconfig)
+{
+	union skl_connector_node_id node_id = {0};
+	struct skl_pipe_params *params = mconfig->pipe->p_params;
+
+	switch (mconfig->dev_type) {
+	case SKL_DEVICE_BT:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
+			SKL_DMA_I2S_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id +
+					(mconfig->vbus_id << 3);
+		break;
+
+	case SKL_DEVICE_I2S:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
+			SKL_DMA_I2S_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id +
+					 (mconfig->time_slot << 1) +
+					 (mconfig->vbus_id << 3);
+		break;
+
+	case SKL_DEVICE_DMIC:
+		node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
+		node_id.node.vindex = mconfig->vbus_id +
+					 (mconfig->time_slot);
+		break;
+
+	case SKL_DEVICE_HDALINK:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_HDA_LINK_OUTPUT_CLASS :
+			SKL_DMA_HDA_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->link_dma_id;
+		break;
+
+	default:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_HDA_HOST_OUTPUT_CLASS :
+			SKL_DMA_HDA_HOST_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id;
+		break;
+	}
+
+	cpr_mconfig->gtw_cfg.node_id = node_id.val;
+
+	if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
+		cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
+	else
+		cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
+
+	cpr_mconfig->cpr_feature_mask = 0;
+	cpr_mconfig->gtw_cfg.config_length  = 0;
+
+	skl_copy_copier_caps(mconfig, cpr_mconfig);
+}
+
+static void skl_setup_out_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_audio_data_format *out_fmt)
+{
+	struct skl_module_fmt *format = &mconfig->out_fmt;
+
+	out_fmt->number_of_channels = (u8)format->channels;
+	out_fmt->s_freq = format->s_freq;
+	out_fmt->bit_depth = format->bit_depth;
+	out_fmt->valid_bit_depth = format->valid_bit_depth;
+	out_fmt->ch_cfg = format->ch_cfg;
+
+	out_fmt->channel_map = skl_create_channel_map(out_fmt->ch_cfg);
+	out_fmt->interleaving = SKL_INTERLEAVING_PER_CHANNEL;
+
+	dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
+		out_fmt->number_of_channels, format->s_freq, format->bit_depth);
+}
+
+/*
+ * DSP needs SRC module for frequency conversion, SRC takes base module
+ * configuration and the target frequency as extra parameter passed as src
+ * config
+ */
+static void skl_set_src_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_src_module_cfg *src_mconfig)
+{
+	struct skl_module_fmt *fmt = &mconfig->out_fmt;
+
+	skl_set_base_module_format(ctx, mconfig,
+		(struct skl_base_cfg *)src_mconfig);
+
+	src_mconfig->src_cfg = fmt->s_freq;
+}
+
+/*
+ * DSP needs updown module to do channel conversion. updown module take base
+ * module configuration and channel configuration
+ * It also take coefficients and now we have defaults applied here
+ */
+static void skl_set_updown_mixer_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_up_down_mixer_cfg *mixer_mconfig)
+{
+	struct skl_module_fmt *fmt = &mconfig->out_fmt;
+	int i = 0;
+
+	skl_set_base_module_format(ctx,	mconfig,
+		(struct skl_base_cfg *)mixer_mconfig);
+	mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
+
+	/* Select F/W default coefficient */
+	mixer_mconfig->coeff_sel = 0x0;
+
+	/* User coeff, don't care since we are selecting F/W defaults */
+	for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
+		mixer_mconfig->coeff[i] = 0xDEADBEEF;
+}
+
+/*
+ * 'copier' is DSP internal module which copies data from Host DMA (HDA host
+ * dma) or link (hda link, SSP, PDM)
+ * Here we calculate the copier module parameters, like PCM format, output
+ * format, gateway settings
+ * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
+ */
+static void skl_set_copier_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_cpr_cfg *cpr_mconfig)
+{
+	struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
+	struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
+
+	skl_set_base_module_format(ctx, mconfig, base_cfg);
+
+	skl_setup_out_format(ctx, mconfig, out_fmt);
+	skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
+}
+
+static u16 skl_get_module_param_size(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig)
+{
+	u16 param_size;
+
+	switch (mconfig->m_type) {
+	case SKL_MODULE_TYPE_COPIER:
+		param_size = sizeof(struct skl_cpr_cfg);
+		param_size += mconfig->formats_config.caps_size;
+		return param_size;
+
+	case SKL_MODULE_TYPE_SRCINT:
+		return sizeof(struct skl_src_module_cfg);
+
+	case SKL_MODULE_TYPE_UPDWMIX:
+		return sizeof(struct skl_up_down_mixer_cfg);
+
+	default:
+		/*
+		 * return only base cfg when no specific module type is
+		 * specified
+		 */
+		return sizeof(struct skl_base_cfg);
+	}
+
+	return 0;
+}
+
+/*
+ * DSP firmware supports various modules like copier, SRC, updown etc.
+ * These modules required various parameters to be calculated and sent for
+ * the module initialization to DSP. By default a generic module needs only
+ * base module format configuration
+ */
+
+static int skl_set_module_format(struct skl_sst *ctx,
+			struct skl_module_cfg *module_config,
+			u16 *module_config_size,
+			void **param_data)
+{
+	u16 param_size;
+
+	param_size  = skl_get_module_param_size(ctx, module_config);
+
+	*param_data = kzalloc(param_size, GFP_KERNEL);
+	if (NULL == *param_data)
+		return -ENOMEM;
+
+	*module_config_size = param_size;
+
+	switch (module_config->m_type) {
+	case SKL_MODULE_TYPE_COPIER:
+		skl_set_copier_format(ctx, module_config, *param_data);
+		break;
+
+	case SKL_MODULE_TYPE_SRCINT:
+		skl_set_src_format(ctx, module_config, *param_data);
+		break;
+
+	case SKL_MODULE_TYPE_UPDWMIX:
+		skl_set_updown_mixer_format(ctx, module_config, *param_data);
+		break;
+
+	default:
+		skl_set_base_module_format(ctx, module_config, *param_data);
+		break;
+
+	}
+
+	dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
+			module_config->id.module_id, param_size);
+	print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
+			*param_data, param_size, false);
+	return 0;
+}
+
+static int skl_get_queue_index(struct skl_module_pin *mpin,
+				struct skl_module_inst_id id, int max)
+{
+	int i;
+
+	for (i = 0; i < max; i++)  {
+		if (mpin[i].id.module_id == id.module_id &&
+			mpin[i].id.instance_id == id.instance_id)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Allocates queue for each module.
+ * if dynamic, the pin_index is allocated 0 to max_pin.
+ * In static, the pin_index is fixed based on module_id and instance id
+ */
+static int skl_alloc_queue(struct skl_module_pin *mpin,
+			struct skl_module_inst_id id, int max)
+{
+	int i;
+
+	/*
+	 * if pin in dynamic, find first free pin
+	 * otherwise find match module and instance id pin as topology will
+	 * ensure a unique pin is assigned to this so no need to
+	 * allocate/free
+	 */
+	for (i = 0; i < max; i++)  {
+		if (mpin[i].is_dynamic) {
+			if (!mpin[i].in_use) {
+				mpin[i].in_use = true;
+				mpin[i].id.module_id = id.module_id;
+				mpin[i].id.instance_id = id.instance_id;
+				return i;
+			}
+		} else {
+			if (mpin[i].id.module_id == id.module_id &&
+				mpin[i].id.instance_id == id.instance_id)
+				return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
+{
+	if (mpin[q_index].is_dynamic) {
+		mpin[q_index].in_use = false;
+		mpin[q_index].id.module_id = 0;
+		mpin[q_index].id.instance_id = 0;
+	}
+}
+
+/*
+ * A module needs to be instanataited in DSP. A mdoule is present in a
+ * collection of module referred as a PIPE.
+ * We first calculate the module format, based on module type and then
+ * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
+ */
+int skl_init_module(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig, char *param)
+{
+	u16 module_config_size = 0;
+	void *param_data = NULL;
+	int ret;
+	struct skl_ipc_init_instance_msg msg;
+
+	dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
+		 mconfig->id.module_id, mconfig->id.instance_id);
+
+	if (mconfig->pipe->state != SKL_PIPE_CREATED) {
+		dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
+				 mconfig->pipe->state, mconfig->pipe->ppl_id);
+		return -EIO;
+	}
+
+	ret = skl_set_module_format(ctx, mconfig,
+			&module_config_size, &param_data);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
+		return ret;
+	}
+
+	msg.module_id = mconfig->id.module_id;
+	msg.instance_id = mconfig->id.instance_id;
+	msg.ppl_instance_id = mconfig->pipe->ppl_id;
+	msg.param_data_size = module_config_size;
+	msg.core_id = mconfig->core_id;
+
+	ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
+		kfree(param_data);
+		return ret;
+	}
+	mconfig->m_state = SKL_MODULE_INIT_DONE;
+
+	return ret;
+}
+
+static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module)
+{
+	dev_dbg(ctx->dev, "%s: src module_id = %d  src_instance=%d\n",
+		__func__, src_module->id.module_id, src_module->id.instance_id);
+	dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
+		 dst_module->id.module_id, dst_module->id.instance_id);
+
+	dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
+		src_module->m_state, dst_module->m_state);
+}
+
+/*
+ * On module freeup, we need to unbind the module with modules
+ * it is already bind.
+ * Find the pin allocated and unbind then using bind_unbind IPC
+ */
+int skl_unbind_modules(struct skl_sst *ctx,
+			struct skl_module_cfg *src_mcfg,
+			struct skl_module_cfg *dst_mcfg)
+{
+	int ret;
+	struct skl_ipc_bind_unbind_msg msg;
+	struct skl_module_inst_id src_id = src_mcfg->id;
+	struct skl_module_inst_id dst_id = dst_mcfg->id;
+	int in_max = dst_mcfg->max_in_queue;
+	int out_max = src_mcfg->max_out_queue;
+	int src_index, dst_index;
+
+	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
+
+	if (src_mcfg->m_state != SKL_MODULE_BIND_DONE)
+		return 0;
+
+	/*
+	 * if intra module unbind, check if both modules are BIND,
+	 * then send unbind
+	 */
+	if ((src_mcfg->pipe->ppl_id != dst_mcfg->pipe->ppl_id) &&
+				dst_mcfg->m_state != SKL_MODULE_BIND_DONE)
+		return 0;
+	else if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+				 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
+		return 0;
+
+	/* get src queue index */
+	src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
+	if (src_index < 0)
+		return -EINVAL;
+
+	msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
+
+	/* get dst queue index */
+	dst_index  = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
+	if (dst_index < 0)
+		return -EINVAL;
+
+	msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
+
+	msg.module_id = src_mcfg->id.module_id;
+	msg.instance_id = src_mcfg->id.instance_id;
+	msg.dst_module_id = dst_mcfg->id.module_id;
+	msg.dst_instance_id = dst_mcfg->id.instance_id;
+	msg.bind = false;
+
+	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
+	if (!ret) {
+		src_mcfg->m_state = SKL_MODULE_UNINIT;
+		/* free queue only if unbind is success */
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
+	}
+
+	return ret;
+}
+
+/*
+ * Once a module is instantiated it need to be 'bind' with other modules in
+ * the pipeline. For binding we need to find the module pins which are bind
+ * together
+ * This function finds the pins and then sends bund_unbind IPC message to
+ * DSP using IPC helper
+ */
+int skl_bind_modules(struct skl_sst *ctx,
+			struct skl_module_cfg *src_mcfg,
+			struct skl_module_cfg *dst_mcfg)
+{
+	int ret;
+	struct skl_ipc_bind_unbind_msg msg;
+	struct skl_module_inst_id src_id = src_mcfg->id;
+	struct skl_module_inst_id dst_id = dst_mcfg->id;
+	int in_max = dst_mcfg->max_in_queue;
+	int out_max = src_mcfg->max_out_queue;
+	int src_index, dst_index;
+
+	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
+
+	if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+		dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
+		return 0;
+
+	src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_id, out_max);
+	if (src_index < 0)
+		return -EINVAL;
+
+	msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
+	dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_id, in_max);
+	if (dst_index < 0) {
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		return -EINVAL;
+	}
+
+	msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
+
+	dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
+			 msg.src_queue, msg.dst_queue);
+
+	msg.module_id = src_mcfg->id.module_id;
+	msg.instance_id = src_mcfg->id.instance_id;
+	msg.dst_module_id = dst_mcfg->id.module_id;
+	msg.dst_instance_id = dst_mcfg->id.instance_id;
+	msg.bind = true;
+
+	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
+
+	if (!ret) {
+		src_mcfg->m_state = SKL_MODULE_BIND_DONE;
+	} else {
+		/* error case , if IPC fails, clear the queue index */
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
+	}
+
+	return ret;
+}
+
+static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
+	enum skl_ipc_pipeline_state state)
+{
+	dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
+
+	return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
+}
+
+/*
+ * A pipeline is a collection of modules. Before a module in instantiated a
+ * pipeline needs to be created for it.
+ * This function creates pipeline, by sending create pipeline IPC messages
+ * to FW
+ */
+int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
+
+	ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
+				pipe->pipe_priority, pipe->ppl_id);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to create pipeline\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_CREATED;
+
+	return 0;
+}
+
+/*
+ * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
+ * pause the pipeline first and then delete it
+ * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
+ * DMA engines and releases resources
+ */
+int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
+
+	/* If pipe is not started, do not try to stop the pipe in FW. */
+	if (pipe->state > SKL_PIPE_STARTED) {
+		ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+		if (ret < 0) {
+			dev_err(ctx->dev, "Failed to stop pipeline\n");
+			return ret;
+		}
+
+		pipe->state = SKL_PIPE_PAUSED;
+	} else {
+		/* If pipe was not created in FW, do not try to delete it */
+		if (pipe->state < SKL_PIPE_CREATED)
+			return 0;
+
+		ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
+		if (ret < 0)
+			dev_err(ctx->dev, "Failed to delete pipeline\n");
+	}
+
+	return ret;
+}
+
+/*
+ * A pipeline is also a scheduling entity in DSP which can be run, stopped
+ * For processing data the pipe need to be run by sending IPC set pipe state
+ * to DSP
+ */
+int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
+
+	/* If pipe was not created in FW, do not try to pause or delete */
+	if (pipe->state < SKL_PIPE_CREATED)
+		return 0;
+
+	/* Pipe has to be paused before it is started */
+	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to pause pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_PAUSED;
+
+	ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to start pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop the pipeline by sending set pipe state IPC
+ * DSP doesnt implement stop so we always send pause message
+ */
+int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
+
+	/* If pipe was not created in FW, do not try to pause or delete */
+	if (pipe->state < SKL_PIPE_PAUSED)
+		return 0;
+
+	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "Failed to stop pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_CREATED;
+
+	return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
new file mode 100644
index 0000000..13036b1
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -0,0 +1,140 @@
+/*
+ *  skl-nhlt.c - Intel SKL Platform NHLT parsing
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include "skl.h"
+
+/* Unique identification for getting NHLT blobs */
+static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
+				0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
+
+#define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
+
+void __iomem *skl_nhlt_init(struct device *dev)
+{
+	acpi_handle handle;
+	union acpi_object *obj;
+	struct nhlt_resource_desc  *nhlt_ptr = NULL;
+
+	if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
+		dev_err(dev, "Requested NHLT device not found\n");
+		return NULL;
+	}
+
+	obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
+	if (obj && obj->type == ACPI_TYPE_BUFFER) {
+		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
+
+		return ioremap_cache(nhlt_ptr->min_addr, nhlt_ptr->length);
+	}
+
+	dev_err(dev, "device specific method to extract NHLT blob failed\n");
+	return NULL;
+}
+
+void skl_nhlt_free(void __iomem *addr)
+{
+	iounmap(addr);
+	addr = NULL;
+}
+
+static struct nhlt_specific_cfg *skl_get_specific_cfg(
+		struct device *dev, struct nhlt_fmt *fmt,
+		u8 no_ch, u32 rate, u16 bps)
+{
+	struct nhlt_specific_cfg *sp_config;
+	struct wav_fmt *wfmt;
+	struct nhlt_fmt_cfg *fmt_config = fmt->fmt_config;
+	int i;
+
+	dev_dbg(dev, "Format count =%d\n", fmt->fmt_count);
+
+	for (i = 0; i < fmt->fmt_count; i++) {
+		wfmt = &fmt_config->fmt_ext.fmt;
+		dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", wfmt->channels,
+			 wfmt->bits_per_sample, wfmt->samples_per_sec);
+		if (wfmt->channels == no_ch && wfmt->samples_per_sec == rate &&
+					wfmt->bits_per_sample == bps) {
+			sp_config = &fmt_config->config;
+
+			return sp_config;
+		}
+
+		fmt_config = (struct nhlt_fmt_cfg *)(fmt_config->config.caps +
+						fmt_config->config.size);
+	}
+
+	return NULL;
+}
+
+static void dump_config(struct device *dev, u32 instance_id, u8 linktype,
+		u8 s_fmt, u8 num_channels, u32 s_rate, u8 dirn, u16 bps)
+{
+	dev_dbg(dev, "Input configuration\n");
+	dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", num_channels, s_fmt, s_rate);
+	dev_dbg(dev, "vbus_id=%d link_type=%d\n", instance_id, linktype);
+	dev_dbg(dev, "bits_per_sample=%d\n", bps);
+}
+
+static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
+				u32 instance_id, u8 link_type, u8 dirn)
+{
+	dev_dbg(dev, "vbus_id=%d link_type=%d dir=%d\n",
+		epnt->virtual_bus_id, epnt->linktype, epnt->direction);
+
+	if ((epnt->virtual_bus_id == instance_id) &&
+			(epnt->linktype == link_type) &&
+			(epnt->direction == dirn))
+		return true;
+	else
+		return false;
+}
+
+struct nhlt_specific_cfg
+*skl_get_ep_blob(struct skl *skl, u32 instance, u8 link_type,
+			u8 s_fmt, u8 num_ch, u32 s_rate, u8 dirn)
+{
+	struct nhlt_fmt *fmt;
+	struct nhlt_endpoint *epnt;
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct device *dev = bus->dev;
+	struct nhlt_specific_cfg *sp_config;
+	struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+	u16 bps = num_ch * s_fmt;
+	u8 j;
+
+	dump_config(dev, instance, link_type, s_fmt, num_ch, s_rate, dirn, bps);
+
+	epnt = (struct nhlt_endpoint *)nhlt->desc;
+
+	dev_dbg(dev, "endpoint count =%d\n", nhlt->endpoint_count);
+
+	for (j = 0; j < nhlt->endpoint_count; j++) {
+		if (skl_check_ep_match(dev, epnt, instance, link_type, dirn)) {
+			fmt = (struct nhlt_fmt *)(epnt->config.caps +
+						 epnt->config.size);
+			sp_config = skl_get_specific_cfg(dev, fmt, num_ch, s_rate, bps);
+			if (sp_config)
+				return sp_config;
+		}
+
+		epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+	}
+
+	return NULL;
+}
diff --git a/sound/soc/intel/skylake/skl-nhlt.h b/sound/soc/intel/skylake/skl-nhlt.h
new file mode 100644
index 0000000..3769f9f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.h
@@ -0,0 +1,106 @@
+/*
+ *  skl-nhlt.h - Intel HDA Platform NHLT header
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __SKL_NHLT_H__
+#define __SKL_NHLT_H__
+
+#include <linux/acpi.h>
+
+struct wav_fmt {
+	u16 fmt_tag;
+	u16 channels;
+	u32 samples_per_sec;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 bits_per_sample;
+	u16 cb_size;
+} __packed;
+
+struct wav_fmt_ext {
+	struct wav_fmt fmt;
+	union samples {
+		u16 valid_bits_per_sample;
+		u16 samples_per_block;
+		u16 reserved;
+	} sample;
+	u32 channel_mask;
+	u8 sub_fmt[16];
+} __packed;
+
+enum nhlt_link_type {
+	NHLT_LINK_HDA = 0,
+	NHLT_LINK_DSP = 1,
+	NHLT_LINK_DMIC = 2,
+	NHLT_LINK_SSP = 3,
+	NHLT_LINK_INVALID
+};
+
+enum nhlt_device_type {
+	NHLT_DEVICE_BT = 0,
+	NHLT_DEVICE_DMIC = 1,
+	NHLT_DEVICE_I2S = 4,
+	NHLT_DEVICE_INVALID
+};
+
+struct nhlt_specific_cfg {
+	u32 size;
+	u8 caps[0];
+} __packed;
+
+struct nhlt_fmt_cfg {
+	struct wav_fmt_ext fmt_ext;
+	struct nhlt_specific_cfg config;
+} __packed;
+
+struct nhlt_fmt {
+	u8 fmt_count;
+	struct nhlt_fmt_cfg fmt_config[0];
+} __packed;
+
+struct nhlt_endpoint {
+	u32  length;
+	u8   linktype;
+	u8   instance_id;
+	u16  vendor_id;
+	u16  device_id;
+	u16  revision_id;
+	u32  subsystem_id;
+	u8   device_type;
+	u8   direction;
+	u8   virtual_bus_id;
+	struct nhlt_specific_cfg config;
+} __packed;
+
+struct nhlt_acpi_table {
+	struct acpi_table_header header;
+	u8 endpoint_count;
+	struct nhlt_endpoint desc[0];
+} __packed;
+
+struct nhlt_resource_desc  {
+	u32 extra;
+	u16 flags;
+	u64 addr_spc_gra;
+	u64 min_addr;
+	u64 max_addr;
+	u64 addr_trans_offset;
+	u64 length;
+} __packed;
+
+#endif
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
new file mode 100644
index 0000000..7d617bf
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -0,0 +1,916 @@
+/*
+ *  skl-pcm.c -ASoC HDA Platform driver file implementing PCM functionality
+ *
+ *  Copyright (C) 2014-2015 Intel Corp
+ *  Author:  Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "skl.h"
+
+#define HDA_MONO 1
+#define HDA_STEREO 2
+
+static struct snd_pcm_hardware azx_pcm_hw = {
+	.info =			(SNDRV_PCM_INFO_MMAP |
+				 SNDRV_PCM_INFO_INTERLEAVED |
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				 SNDRV_PCM_INFO_MMAP_VALID |
+				 SNDRV_PCM_INFO_PAUSE |
+				 SNDRV_PCM_INFO_SYNC_START |
+				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
+				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
+				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
+	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =		SNDRV_PCM_RATE_48000,
+	.rate_min =		48000,
+	.rate_max =		48000,
+	.channels_min =		2,
+	.channels_max =		2,
+	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
+	.period_bytes_min =	128,
+	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
+	.periods_min =		2,
+	.periods_max =		AZX_MAX_FRAG,
+	.fifo_size =		0,
+};
+
+static inline
+struct hdac_ext_stream *get_hdac_ext_stream(struct snd_pcm_substream *substream)
+{
+	return substream->runtime->private_data;
+}
+
+static struct hdac_ext_bus *get_bus_ctx(struct snd_pcm_substream *substream)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct hdac_stream *hstream = hdac_stream(stream);
+	struct hdac_bus *bus = hstream->bus;
+
+	return hbus_to_ebus(bus);
+}
+
+static int skl_substream_alloc_pages(struct hdac_ext_bus *ebus,
+				 struct snd_pcm_substream *substream,
+				 size_t size)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+
+	hdac_stream(stream)->bufsize = 0;
+	hdac_stream(stream)->period_bytes = 0;
+	hdac_stream(stream)->format_val = 0;
+
+	return snd_pcm_lib_malloc_pages(substream, size);
+}
+
+static int skl_substream_free_pages(struct hdac_bus *bus,
+				struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
+				 struct snd_pcm_runtime *runtime)
+{
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+	/* avoid wrap-around with wall-clock */
+	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+				     20, 178000000);
+}
+
+static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
+{
+	if (ebus->ppcap)
+		return HDAC_EXT_STREAM_TYPE_HOST;
+	else
+		return HDAC_EXT_STREAM_TYPE_COUPLED;
+}
+
+static int skl_pcm_open(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct skl_dma_params *dma_params;
+	int ret;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	ret = pm_runtime_get_sync(dai->dev);
+	if (ret)
+		return ret;
+
+	stream = snd_hdac_ext_stream_assign(ebus, substream,
+					skl_get_host_stream_type(ebus));
+	if (stream == NULL)
+		return -EBUSY;
+
+	skl_set_pcm_constrains(ebus, runtime);
+
+	/*
+	 * disable WALLCLOCK timestamps for capture streams
+	 * until we figure out how to handle digital inputs
+	 */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
+		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
+	}
+
+	runtime->private_data = stream;
+
+	dma_params = kzalloc(sizeof(*dma_params), GFP_KERNEL);
+	if (!dma_params)
+		return -ENOMEM;
+
+	dma_params->stream_tag = hdac_stream(stream)->stream_tag;
+	snd_soc_dai_set_dma_data(dai, substream, dma_params);
+
+	dev_dbg(dai->dev, "stream tag set in dma params=%d\n",
+				 dma_params->stream_tag);
+	snd_pcm_set_sync(substream);
+
+	return 0;
+}
+
+static int skl_get_format(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct skl_dma_params *dma_params;
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	int format_val = 0;
+
+	if (ebus->ppcap) {
+		struct snd_pcm_runtime *runtime = substream->runtime;
+
+		format_val = snd_hdac_calc_stream_format(runtime->rate,
+						runtime->channels,
+						runtime->format,
+						32, 0);
+	} else {
+		struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+		dma_params = snd_soc_dai_get_dma_data(codec_dai, substream);
+		if (dma_params)
+			format_val = dma_params->format;
+	}
+
+	return format_val;
+}
+
+static int skl_pcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	unsigned int format_val;
+	int err;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	if (hdac_stream(stream)->prepared) {
+		dev_dbg(dai->dev, "already stream is prepared - returning\n");
+		return 0;
+	}
+
+	format_val = skl_get_format(substream, dai);
+	dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
+				hdac_stream(stream)->stream_tag, format_val);
+	snd_hdac_stream_reset(hdac_stream(stream));
+
+	err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
+	if (err < 0)
+		return err;
+
+	err = snd_hdac_stream_setup(hdac_stream(stream));
+	if (err < 0)
+		return err;
+
+	hdac_stream(stream)->prepared = 1;
+
+	return err;
+}
+
+static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int ret, dma_id;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	ret = skl_substream_alloc_pages(ebus, substream,
+					  params_buffer_bytes(params));
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(dai->dev, "format_val, rate=%d, ch=%d, format=%d\n",
+			runtime->rate, runtime->channels, runtime->format);
+
+	dma_id = hdac_stream(stream)->stream_tag - 1;
+	dev_dbg(dai->dev, "dma_id=%d\n", dma_id);
+
+	return 0;
+}
+
+static void skl_pcm_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct skl_dma_params *dma_params = NULL;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(ebus));
+
+	dma_params = snd_soc_dai_get_dma_data(dai, substream);
+	/*
+	 * now we should set this to NULL as we are freeing by the
+	 * dma_params
+	 */
+	snd_soc_dai_set_dma_data(dai, substream, NULL);
+
+	pm_runtime_mark_last_busy(dai->dev);
+	pm_runtime_put_autosuspend(dai->dev);
+	kfree(dma_params);
+}
+
+static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	snd_hdac_stream_cleanup(hdac_stream(stream));
+	hdac_stream(stream)->prepared = 0;
+
+	return skl_substream_free_pages(ebus_to_hbus(ebus), substream);
+}
+
+static int skl_link_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *link_dev;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct skl_dma_params *dma_params;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int dma_id;
+
+	pr_debug("%s\n", __func__);
+	link_dev = snd_hdac_ext_stream_assign(ebus, substream,
+					HDAC_EXT_STREAM_TYPE_LINK);
+	if (!link_dev)
+		return -EBUSY;
+
+	snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
+
+	/* set the stream tag in the codec dai dma params  */
+	dma_params = (struct skl_dma_params *)
+			snd_soc_dai_get_dma_data(codec_dai, substream);
+	if (dma_params)
+		dma_params->stream_tag =  hdac_stream(link_dev)->stream_tag;
+	snd_soc_dai_set_dma_data(codec_dai, substream, (void *)dma_params);
+	dma_id = hdac_stream(link_dev)->stream_tag - 1;
+
+	return 0;
+}
+
+static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *link_dev =
+			snd_soc_dai_get_dma_data(dai, substream);
+	unsigned int format_val = 0;
+	struct skl_dma_params *dma_params;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_pcm_hw_params *params;
+	struct snd_interval *channels, *rate;
+	struct hdac_ext_link *link;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	if (link_dev->link_prepared) {
+		dev_dbg(dai->dev, "already stream is prepared - returning\n");
+		return 0;
+	}
+	params  = devm_kzalloc(dai->dev, sizeof(*params), GFP_KERNEL);
+	if (params == NULL)
+		return -ENOMEM;
+
+	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+	channels->min = channels->max = substream->runtime->channels;
+	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+	rate->min = rate->max = substream->runtime->rate;
+	snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+					SNDRV_PCM_HW_PARAM_FIRST_MASK],
+					substream->runtime->format);
+
+
+	dma_params  = (struct skl_dma_params *)
+			snd_soc_dai_get_dma_data(codec_dai, substream);
+	if (dma_params)
+		format_val = dma_params->format;
+	dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d codec_dai_name=%s\n",
+			hdac_stream(link_dev)->stream_tag, format_val, codec_dai->name);
+
+	snd_hdac_ext_link_stream_reset(link_dev);
+
+	snd_hdac_ext_link_stream_setup(link_dev, format_val);
+
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	if (!link)
+		return -EINVAL;
+
+	snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	link_dev->link_prepared = 1;
+
+	return 0;
+}
+
+static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
+	int cmd, struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *link_dev =
+				snd_soc_dai_get_dma_data(dai, substream);
+
+	dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		snd_hdac_ext_link_stream_start(link_dev);
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		snd_hdac_ext_link_stream_clear(link_dev);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int skl_link_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct hdac_ext_stream *link_dev =
+				snd_soc_dai_get_dma_data(dai, substream);
+	struct hdac_ext_link *link;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	link_dev->link_prepared = 0;
+
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	if (!link)
+		return -EINVAL;
+
+	snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
+	return 0;
+}
+
+static int skl_hda_be_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	return pm_runtime_get_sync(dai->dev);
+}
+
+static void skl_hda_be_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	pm_runtime_mark_last_busy(dai->dev);
+	pm_runtime_put_autosuspend(dai->dev);
+}
+
+static struct snd_soc_dai_ops skl_pcm_dai_ops = {
+	.startup = skl_pcm_open,
+	.shutdown = skl_pcm_close,
+	.prepare = skl_pcm_prepare,
+	.hw_params = skl_pcm_hw_params,
+	.hw_free = skl_pcm_hw_free,
+};
+
+static struct snd_soc_dai_ops skl_dmic_dai_ops = {
+	.startup = skl_hda_be_startup,
+	.shutdown = skl_hda_be_shutdown,
+};
+
+static struct snd_soc_dai_ops skl_link_dai_ops = {
+	.startup = skl_hda_be_startup,
+	.prepare = skl_link_pcm_prepare,
+	.hw_params = skl_link_hw_params,
+	.hw_free = skl_link_hw_free,
+	.trigger = skl_link_pcm_trigger,
+	.shutdown = skl_hda_be_shutdown,
+};
+
+static struct snd_soc_dai_driver skl_platform_dai[] = {
+{
+	.name = "System Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "System Playback",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+	.capture = {
+		.stream_name = "System Capture",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "Reference Pin",
+	.ops = &skl_pcm_dai_ops,
+	.capture = {
+		.stream_name = "Reference Capture",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "Deepbuffer Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "Deepbuffer Playback",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "LowLatency Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "Low Latency Playback",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+/* BE CPU  Dais */
+{
+	.name = "iDisp Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "iDisp Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "DMIC01 Pin",
+	.ops = &skl_dmic_dai_ops,
+	.capture = {
+		.stream_name = "DMIC01 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "DMIC23 Pin",
+	.ops = &skl_dmic_dai_ops,
+	.capture = {
+		.stream_name = "DMIC23 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "HD-Codec Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "HD-Codec Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "HD-Codec Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "HD-Codec-SPK Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "HD-Codec-SPK Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "HD-Codec-AMIC Pin",
+	.ops = &skl_link_dai_ops,
+	.capture = {
+		.stream_name = "HD-Codec-AMIC Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+};
+
+static int skl_platform_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
+					dai_link->cpu_dai_name);
+
+	runtime = substream->runtime;
+	snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
+
+	return 0;
+}
+
+static int skl_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_ext_stream *stream;
+	struct snd_pcm_substream *s;
+	bool start;
+	int sbits = 0;
+	unsigned long cookie;
+	struct hdac_stream *hstr;
+
+	stream = get_hdac_ext_stream(substream);
+	hstr = hdac_stream(stream);
+
+	dev_dbg(bus->dev, "In %s cmd=%d\n", __func__, cmd);
+
+	if (!hstr->prepared)
+		return -EPIPE;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		start = true;
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		start = false;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	snd_pcm_group_for_each_entry(s, substream) {
+		if (s->pcm->card != substream->pcm->card)
+			continue;
+		stream = get_hdac_ext_stream(s);
+		sbits |= 1 << hdac_stream(stream)->index;
+		snd_pcm_trigger_done(s, substream);
+	}
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	/* first, set SYNC bits of corresponding streams */
+	snd_hdac_stream_sync_trigger(hstr, true, sbits, AZX_REG_SSYNC);
+
+	snd_pcm_group_for_each_entry(s, substream) {
+		if (s->pcm->card != substream->pcm->card)
+			continue;
+		stream = get_hdac_ext_stream(s);
+		if (start)
+			snd_hdac_stream_start(hdac_stream(stream), true);
+		else
+			snd_hdac_stream_stop(hdac_stream(stream));
+	}
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	snd_hdac_stream_sync(hstr, start, sbits);
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	/* reset SYNC bits */
+	snd_hdac_stream_sync_trigger(hstr, false, sbits, AZX_REG_SSYNC);
+	if (start)
+		snd_hdac_stream_timecounter_init(hstr, sbits);
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	return 0;
+}
+
+static int skl_dsp_trigger(struct snd_pcm_substream *substream,
+		int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct hdac_ext_stream *stream;
+	int start;
+	unsigned long cookie;
+	struct hdac_stream *hstr;
+
+	dev_dbg(bus->dev, "In %s cmd=%d streamname=%s\n", __func__, cmd, cpu_dai->name);
+
+	stream = get_hdac_ext_stream(substream);
+	hstr = hdac_stream(stream);
+
+	if (!hstr->prepared)
+		return -EPIPE;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		start = 1;
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		start = 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	if (start)
+		snd_hdac_stream_start(hdac_stream(stream), true);
+	else
+		snd_hdac_stream_stop(hdac_stream(stream));
+
+	if (start)
+		snd_hdac_stream_timecounter_init(hstr, 0);
+
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	return 0;
+}
+static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+
+	if (ebus->ppcap)
+		return skl_dsp_trigger(substream, cmd);
+	else
+		return skl_pcm_trigger(substream, cmd);
+}
+
+/* calculate runtime delay from LPIB */
+static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
+				struct hdac_ext_stream *sstream,
+				unsigned int pos)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_stream *hstream = hdac_stream(sstream);
+	struct snd_pcm_substream *substream = hstream->substream;
+	int stream = substream->stream;
+	unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
+	int delay;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		delay = pos - lpib_pos;
+	else
+		delay = lpib_pos - pos;
+
+	if (delay < 0) {
+		if (delay >= hstream->delay_negative_threshold)
+			delay = 0;
+		else
+			delay += hstream->bufsize;
+	}
+
+	if (delay >= hstream->period_bytes) {
+		dev_info(bus->dev,
+			 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
+			 delay, hstream->period_bytes);
+		delay = 0;
+	}
+
+	return bytes_to_frames(substream->runtime, delay);
+}
+
+static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
+					int codec_delay)
+{
+	struct hdac_stream *hstr = hdac_stream(hstream);
+	struct snd_pcm_substream *substream = hstr->substream;
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	unsigned int pos;
+	int delay;
+
+	/* use the position buffer as default */
+	pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
+
+	if (pos >= hdac_stream(hstream)->bufsize)
+		pos = 0;
+
+	if (substream->runtime) {
+		delay = skl_get_delay_from_lpib(ebus, hstream, pos)
+						 + codec_delay;
+		substream->runtime->delay += delay;
+	}
+
+	return pos;
+}
+
+static snd_pcm_uframes_t skl_platform_pcm_pointer
+			(struct snd_pcm_substream *substream)
+{
+	struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
+
+	return bytes_to_frames(substream->runtime,
+			       skl_get_position(hstream, 0));
+}
+
+static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
+				u64 nsec)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	u64 codec_frames, codec_nsecs;
+
+	if (!codec_dai->driver->ops->delay)
+		return nsec;
+
+	codec_frames = codec_dai->driver->ops->delay(substream, codec_dai);
+	codec_nsecs = div_u64(codec_frames * 1000000000LL,
+			      substream->runtime->rate);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		return nsec + codec_nsecs;
+
+	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
+}
+
+static int skl_get_time_info(struct snd_pcm_substream *substream,
+			struct timespec *system_ts, struct timespec *audio_ts,
+			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
+{
+	struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream);
+	struct hdac_stream *hstr = hdac_stream(sstream);
+	u64 nsec;
+
+	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
+		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
+
+		snd_pcm_gettime(substream->runtime, system_ts);
+
+		nsec = timecounter_read(&hstr->tc);
+		nsec = div_u64(nsec, 3); /* can be optimized */
+		if (audio_tstamp_config->report_delay)
+			nsec = skl_adjust_codec_delay(substream, nsec);
+
+		*audio_ts = ns_to_timespec(nsec);
+
+		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
+		audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */
+		audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */
+
+	} else {
+		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
+	}
+
+	return 0;
+}
+
+static struct snd_pcm_ops skl_platform_ops = {
+	.open = skl_platform_open,
+	.ioctl = snd_pcm_lib_ioctl,
+	.trigger = skl_platform_pcm_trigger,
+	.pointer = skl_platform_pcm_pointer,
+	.get_time_info =  skl_get_time_info,
+	.mmap = snd_pcm_lib_default_mmap,
+	.page = snd_pcm_sgbuf_ops_page,
+};
+
+static void skl_pcm_free(struct snd_pcm *pcm)
+{
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+#define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
+
+static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct snd_pcm *pcm = rtd->pcm;
+	unsigned int size;
+	int retval = 0;
+	struct skl *skl = ebus_to_skl(ebus);
+
+	if (dai->driver->playback.channels_min ||
+		dai->driver->capture.channels_min) {
+		/* buffer pre-allocation */
+		size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
+		if (size > MAX_PREALLOC_SIZE)
+			size = MAX_PREALLOC_SIZE;
+		retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+						SNDRV_DMA_TYPE_DEV_SG,
+						snd_dma_pci_data(skl->pci),
+						size, MAX_PREALLOC_SIZE);
+		if (retval) {
+			dev_err(dai->dev, "dma buffer allocationf fail\n");
+			return retval;
+		}
+	}
+
+	return retval;
+}
+
+static struct snd_soc_platform_driver skl_platform_drv  = {
+	.ops		= &skl_platform_ops,
+	.pcm_new	= skl_pcm_new,
+	.pcm_free	= skl_pcm_free,
+};
+
+static const struct snd_soc_component_driver skl_component = {
+	.name           = "pcm",
+};
+
+int skl_platform_register(struct device *dev)
+{
+	int ret;
+
+	ret = snd_soc_register_platform(dev, &skl_platform_drv);
+	if (ret) {
+		dev_err(dev, "soc platform registration failed %d\n", ret);
+		return ret;
+	}
+	ret = snd_soc_register_component(dev, &skl_component,
+				skl_platform_dai,
+				ARRAY_SIZE(skl_platform_dai));
+	if (ret) {
+		dev_err(dev, "soc component registration failed %d\n", ret);
+		snd_soc_unregister_platform(dev);
+	}
+
+	return ret;
+
+}
+
+int skl_platform_unregister(struct device *dev)
+{
+	snd_soc_unregister_component(dev);
+	snd_soc_unregister_platform(dev);
+	return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
new file mode 100644
index 0000000..44748ba
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -0,0 +1,327 @@
+/*
+ * skl-sst-cldma.c - Code Loader DMA handler
+ *
+ * Copyright (C) 2015, Intel Corporation.
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/kthread.h>
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+
+static void skl_cldma_int_enable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
+				SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
+}
+
+void skl_cldma_int_disable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
+}
+
+/* Code loader helper APIs */
+static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
+		struct snd_dma_buffer *dmab_data,
+		u32 **bdlp, int size, int with_ioc)
+{
+	u32 *bdl = *bdlp;
+
+	ctx->cl_dev.frags = 0;
+	while (size > 0) {
+		phys_addr_t addr = virt_to_phys(dmab_data->area +
+				(ctx->cl_dev.frags * ctx->cl_dev.bufsize));
+
+		bdl[0] = cpu_to_le32(lower_32_bits(addr));
+		bdl[1] = cpu_to_le32(upper_32_bits(addr));
+
+		bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
+
+		size -= ctx->cl_dev.bufsize;
+		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
+
+		bdl += 4;
+		ctx->cl_dev.frags++;
+	}
+}
+
+/*
+ * Setup controller
+ * Configure the registers to update the dma buffer address and
+ * enable interrupts.
+ * Note: Using the channel 1 for transfer
+ */
+static void skl_cldma_setup_controller(struct sst_dsp  *ctx,
+		struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
+		u32 count)
+{
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
+			CL_SD_BDLPLBA(dmab_bdl->addr));
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
+			CL_SD_BDLPUBA(dmab_bdl->addr));
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
+}
+
+static void skl_cldma_setup_spb(struct sst_dsp  *ctx,
+		unsigned int size, bool enable)
+{
+	if (enable)
+		sst_dsp_shim_update_bits_unlocked(ctx,
+				SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+				CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+				CL_SPBFIFO_SPBFCCTL_SPIBE(1));
+
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
+}
+
+static void skl_cldma_cleanup_spb(struct sst_dsp  *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+			CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+			CL_SPBFIFO_SPBFCCTL_SPIBE(0));
+
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
+}
+
+static void skl_cldma_trigger(struct sst_dsp  *ctx, bool enable)
+{
+	if (enable)
+		sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(1));
+	else
+		sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(0));
+}
+
+static void skl_cldma_cleanup(struct sst_dsp  *ctx)
+{
+	skl_cldma_cleanup_spb(ctx);
+
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
+}
+
+static int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
+{
+	int ret = 0;
+
+	if (!wait_event_timeout(ctx->cl_dev.wait_queue,
+				ctx->cl_dev.wait_condition,
+				msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
+		dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
+		ret = -EIO;
+		goto cleanup;
+	}
+
+	dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
+	if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
+		dev_err(ctx->dev, "%s: DMA Error\n", __func__);
+		ret = -EIO;
+	}
+
+cleanup:
+	ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
+	return ret;
+}
+
+static void skl_cldma_stop(struct sst_dsp *ctx)
+{
+	ctx->cl_dev.ops.cl_trigger(ctx, false);
+}
+
+static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
+		const void *curr_pos, bool intr_enable, bool trigger)
+{
+	dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
+	dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
+			ctx->cl_dev.dma_buffer_offset, trigger);
+	dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
+
+	memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
+			curr_pos, size);
+
+	if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
+		ctx->cl_dev.dma_buffer_offset = 0;
+	else
+		ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
+
+	ctx->cl_dev.wait_condition = false;
+
+	if (intr_enable)
+		skl_cldma_int_enable(ctx);
+
+	ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
+	if (trigger)
+		ctx->cl_dev.ops.cl_trigger(ctx, true);
+}
+
+/*
+ * The CL dma doesn't have any way to update the transfer status until a BDL
+ * buffer is fully transferred
+ *
+ * So Copying is divided in two parts.
+ * 1. Interrupt on buffer done where the size to be transferred is more than
+ *    ring buffer size.
+ * 2. Polling on fw register to identify if data left to transferred doesn't
+ *    fill the ring buffer. Caller takes care of polling the required status
+ *    register to identify the transfer status.
+ */
+static int
+skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
+{
+	int ret = 0;
+	bool start = true;
+	unsigned int excess_bytes;
+	u32 size;
+	unsigned int bytes_left = total_size;
+	const void *curr_pos = bin;
+
+	if (total_size <= 0)
+		return -EINVAL;
+
+	dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
+
+	while (bytes_left) {
+		if (bytes_left > ctx->cl_dev.bufsize) {
+
+			/*
+			 * dma transfers only till the write pointer as
+			 * updated in spib
+			 */
+			if (ctx->cl_dev.curr_spib_pos == 0)
+				ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
+
+			size = ctx->cl_dev.bufsize;
+			skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
+
+			start = false;
+			ret = skl_cldma_wait_interruptible(ctx);
+			if (ret < 0) {
+				skl_cldma_stop(ctx);
+				return ret;
+			}
+
+		} else {
+			skl_cldma_int_disable(ctx);
+
+			if ((ctx->cl_dev.curr_spib_pos + bytes_left)
+							<= ctx->cl_dev.bufsize) {
+				ctx->cl_dev.curr_spib_pos += bytes_left;
+			} else {
+				excess_bytes = bytes_left -
+					(ctx->cl_dev.bufsize -
+					ctx->cl_dev.curr_spib_pos);
+				ctx->cl_dev.curr_spib_pos = excess_bytes;
+			}
+
+			size = bytes_left;
+			skl_cldma_fill_buffer(ctx, size,
+					curr_pos, false, start);
+		}
+		bytes_left -= size;
+		curr_pos = curr_pos + size;
+	}
+
+	return ret;
+}
+
+void skl_cldma_process_intr(struct sst_dsp *ctx)
+{
+	u8 cl_dma_intr_status;
+
+	cl_dma_intr_status =
+		sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
+
+	if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
+		ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
+	else
+		ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
+
+	ctx->cl_dev.wait_condition = true;
+	wake_up(&ctx->cl_dev.wait_queue);
+}
+
+int skl_cldma_prepare(struct sst_dsp *ctx)
+{
+	int ret;
+	u32 *bdl;
+
+	ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
+
+	/* Allocate cl ops */
+	ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
+	ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
+	ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
+	ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
+	ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger;
+	ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
+	ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
+	ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
+
+	/* Allocate buffer*/
+	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
+			&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
+		return ret;
+	}
+	/* Setup Code loader BDL */
+	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
+			&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
+		ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
+		return ret;
+	}
+	bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
+
+	/* Allocate BDLs */
+	ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
+			&bdl, ctx->cl_dev.bufsize, 1);
+	ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
+			ctx->cl_dev.bufsize, ctx->cl_dev.frags);
+
+	ctx->cl_dev.curr_spib_pos = 0;
+	ctx->cl_dev.dma_buffer_offset = 0;
+	init_waitqueue_head(&ctx->cl_dev.wait_queue);
+
+	return ret;
+}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.h b/sound/soc/intel/skylake/skl-sst-cldma.h
new file mode 100644
index 0000000..99e4c86
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.h
@@ -0,0 +1,251 @@
+/*
+ * Intel Code Loader DMA support
+ *
+ * Copyright (C) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef SKL_SST_CLDMA_H_
+#define SKL_SST_CLDMA_H_
+
+#define FW_CL_STREAM_NUMBER		0x1
+
+#define DMA_ADDRESS_128_BITS_ALIGNMENT	7
+#define BDL_ALIGN(x)			(x >> DMA_ADDRESS_128_BITS_ALIGNMENT)
+
+#define SKL_ADSPIC_CL_DMA			0x2
+#define SKL_ADSPIS_CL_DMA			0x2
+#define SKL_CL_DMA_SD_INT_DESC_ERR		0x10 /* Descriptor error interrupt */
+#define SKL_CL_DMA_SD_INT_FIFO_ERR		0x08 /* FIFO error interrupt */
+#define SKL_CL_DMA_SD_INT_COMPLETE		0x04 /* Buffer completion interrupt */
+
+/* Intel HD Audio Code Loader DMA Registers */
+
+#define HDA_ADSP_LOADER_BASE		0x80
+
+/* Stream Registers */
+#define SKL_ADSP_REG_CL_SD_CTL			(HDA_ADSP_LOADER_BASE + 0x00)
+#define SKL_ADSP_REG_CL_SD_STS			(HDA_ADSP_LOADER_BASE + 0x03)
+#define SKL_ADSP_REG_CL_SD_LPIB			(HDA_ADSP_LOADER_BASE + 0x04)
+#define SKL_ADSP_REG_CL_SD_CBL			(HDA_ADSP_LOADER_BASE + 0x08)
+#define SKL_ADSP_REG_CL_SD_LVI			(HDA_ADSP_LOADER_BASE + 0x0c)
+#define SKL_ADSP_REG_CL_SD_FIFOW		(HDA_ADSP_LOADER_BASE + 0x0e)
+#define SKL_ADSP_REG_CL_SD_FIFOSIZE		(HDA_ADSP_LOADER_BASE + 0x10)
+#define SKL_ADSP_REG_CL_SD_FORMAT		(HDA_ADSP_LOADER_BASE + 0x12)
+#define SKL_ADSP_REG_CL_SD_FIFOL		(HDA_ADSP_LOADER_BASE + 0x14)
+#define SKL_ADSP_REG_CL_SD_BDLPL		(HDA_ADSP_LOADER_BASE + 0x18)
+#define SKL_ADSP_REG_CL_SD_BDLPU		(HDA_ADSP_LOADER_BASE + 0x1c)
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SKL_ADSP_REG_CL_SPBFIFO			(HDA_ADSP_LOADER_BASE + 0x20)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCH		(SKL_ADSP_REG_CL_SPBFIFO + 0x0)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL	(SKL_ADSP_REG_CL_SPBFIFO + 0x4)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPIB		(SKL_ADSP_REG_CL_SPBFIFO + 0x8)
+#define SKL_ADSP_REG_CL_SPBFIFO_MAXFIFOS	(SKL_ADSP_REG_CL_SPBFIFO + 0xc)
+
+/* CL: Stream Descriptor x Control */
+
+/* Stream Reset */
+#define CL_SD_CTL_SRST_SHIFT		0
+#define CL_SD_CTL_SRST_MASK		(1 << CL_SD_CTL_SRST_SHIFT)
+#define CL_SD_CTL_SRST(x)		\
+			((x << CL_SD_CTL_SRST_SHIFT) & CL_SD_CTL_SRST_MASK)
+
+/* Stream Run */
+#define CL_SD_CTL_RUN_SHIFT		1
+#define CL_SD_CTL_RUN_MASK		(1 << CL_SD_CTL_RUN_SHIFT)
+#define CL_SD_CTL_RUN(x)		\
+			((x << CL_SD_CTL_RUN_SHIFT) & CL_SD_CTL_RUN_MASK)
+
+/* Interrupt On Completion Enable */
+#define CL_SD_CTL_IOCE_SHIFT		2
+#define CL_SD_CTL_IOCE_MASK		(1 << CL_SD_CTL_IOCE_SHIFT)
+#define CL_SD_CTL_IOCE(x)		\
+			((x << CL_SD_CTL_IOCE_SHIFT) & CL_SD_CTL_IOCE_MASK)
+
+/* FIFO Error Interrupt Enable */
+#define CL_SD_CTL_FEIE_SHIFT		3
+#define CL_SD_CTL_FEIE_MASK		(1 << CL_SD_CTL_FEIE_SHIFT)
+#define CL_SD_CTL_FEIE(x)		\
+			((x << CL_SD_CTL_FEIE_SHIFT) & CL_SD_CTL_FEIE_MASK)
+
+/* Descriptor Error Interrupt Enable */
+#define CL_SD_CTL_DEIE_SHIFT		4
+#define CL_SD_CTL_DEIE_MASK		(1 << CL_SD_CTL_DEIE_SHIFT)
+#define CL_SD_CTL_DEIE(x)		\
+			((x << CL_SD_CTL_DEIE_SHIFT) & CL_SD_CTL_DEIE_MASK)
+
+/* FIFO Limit Change */
+#define CL_SD_CTL_FIFOLC_SHIFT		5
+#define CL_SD_CTL_FIFOLC_MASK		(1 << CL_SD_CTL_FIFOLC_SHIFT)
+#define CL_SD_CTL_FIFOLC(x)		\
+			((x << CL_SD_CTL_FIFOLC_SHIFT) & CL_SD_CTL_FIFOLC_MASK)
+
+/* Stripe Control */
+#define CL_SD_CTL_STRIPE_SHIFT		16
+#define CL_SD_CTL_STRIPE_MASK		(0x3 << CL_SD_CTL_STRIPE_SHIFT)
+#define CL_SD_CTL_STRIPE(x)		\
+			((x << CL_SD_CTL_STRIPE_SHIFT) & CL_SD_CTL_STRIPE_MASK)
+
+/* Traffic Priority */
+#define CL_SD_CTL_TP_SHIFT		18
+#define CL_SD_CTL_TP_MASK		(1 << CL_SD_CTL_TP_SHIFT)
+#define CL_SD_CTL_TP(x)			\
+			((x << CL_SD_CTL_TP_SHIFT) & CL_SD_CTL_TP_MASK)
+
+/* Bidirectional Direction Control */
+#define CL_SD_CTL_DIR_SHIFT		19
+#define CL_SD_CTL_DIR_MASK		(1 << CL_SD_CTL_DIR_SHIFT)
+#define CL_SD_CTL_DIR(x)		\
+			((x << CL_SD_CTL_DIR_SHIFT) & CL_SD_CTL_DIR_MASK)
+
+/* Stream Number */
+#define CL_SD_CTL_STRM_SHIFT		20
+#define CL_SD_CTL_STRM_MASK		(0xf << CL_SD_CTL_STRM_SHIFT)
+#define CL_SD_CTL_STRM(x)		\
+			((x << CL_SD_CTL_STRM_SHIFT) & CL_SD_CTL_STRM_MASK)
+
+/* CL: Stream Descriptor x Status */
+
+/* Buffer Completion Interrupt Status */
+#define CL_SD_STS_BCIS(x)		CL_SD_CTL_IOCE(x)
+
+/* FIFO Error */
+#define CL_SD_STS_FIFOE(x)		CL_SD_CTL_FEIE(x)
+
+/* Descriptor Error */
+#define CL_SD_STS_DESE(x)		CL_SD_CTL_DEIE(x)
+
+/* FIFO Ready */
+#define CL_SD_STS_FIFORDY(x)	CL_SD_CTL_FIFOLC(x)
+
+
+/* CL: Stream Descriptor x Last Valid Index */
+#define CL_SD_LVI_SHIFT			0
+#define CL_SD_LVI_MASK			(0xff << CL_SD_LVI_SHIFT)
+#define CL_SD_LVI(x)			((x << CL_SD_LVI_SHIFT) & CL_SD_LVI_MASK)
+
+/* CL: Stream Descriptor x FIFO Eviction Watermark */
+#define CL_SD_FIFOW_SHIFT		0
+#define CL_SD_FIFOW_MASK		(0x7 << CL_SD_FIFOW_SHIFT)
+#define CL_SD_FIFOW(x)			\
+			((x << CL_SD_FIFOW_SHIFT) & CL_SD_FIFOW_MASK)
+
+/* CL: Stream Descriptor x Buffer Descriptor List Pointer Lower Base Address */
+
+/* Protect Bits */
+#define CL_SD_BDLPLBA_PROT_SHIFT	0
+#define CL_SD_BDLPLBA_PROT_MASK		(1 << CL_SD_BDLPLBA_PROT_SHIFT)
+#define CL_SD_BDLPLBA_PROT(x)		\
+		((x << CL_SD_BDLPLBA_PROT_SHIFT) & CL_SD_BDLPLBA_PROT_MASK)
+
+/* Buffer Descriptor List Lower Base Address */
+#define CL_SD_BDLPLBA_SHIFT		7
+#define CL_SD_BDLPLBA_MASK		(0x1ffffff << CL_SD_BDLPLBA_SHIFT)
+#define CL_SD_BDLPLBA(x)		\
+	((BDL_ALIGN(lower_32_bits(x)) << CL_SD_BDLPLBA_SHIFT) & CL_SD_BDLPLBA_MASK)
+
+/* Buffer Descriptor List Upper Base Address */
+#define CL_SD_BDLPUBA_SHIFT		0
+#define CL_SD_BDLPUBA_MASK		(0xffffffff << CL_SD_BDLPUBA_SHIFT)
+#define CL_SD_BDLPUBA(x)		\
+		((upper_32_bits(x) << CL_SD_BDLPUBA_SHIFT) & CL_SD_BDLPUBA_MASK)
+
+/*
+ * Code Loader - Software Position Based FIFO
+ * Capability Registers x Software Position Based FIFO Header
+ */
+
+/* Next Capability Pointer */
+#define CL_SPBFIFO_SPBFCH_PTR_SHIFT	0
+#define CL_SPBFIFO_SPBFCH_PTR_MASK	(0xff << CL_SPBFIFO_SPBFCH_PTR_SHIFT)
+#define CL_SPBFIFO_SPBFCH_PTR(x)	\
+		((x << CL_SPBFIFO_SPBFCH_PTR_SHIFT) & CL_SPBFIFO_SPBFCH_PTR_MASK)
+
+/* Capability Identifier */
+#define CL_SPBFIFO_SPBFCH_ID_SHIFT	16
+#define CL_SPBFIFO_SPBFCH_ID_MASK	(0xfff << CL_SPBFIFO_SPBFCH_ID_SHIFT)
+#define CL_SPBFIFO_SPBFCH_ID(x)		\
+		((x << CL_SPBFIFO_SPBFCH_ID_SHIFT) & CL_SPBFIFO_SPBFCH_ID_MASK)
+
+/* Capability Version */
+#define CL_SPBFIFO_SPBFCH_VER_SHIFT	28
+#define CL_SPBFIFO_SPBFCH_VER_MASK	(0xf << CL_SPBFIFO_SPBFCH_VER_SHIFT)
+#define CL_SPBFIFO_SPBFCH_VER(x)	\
+	((x << CL_SPBFIFO_SPBFCH_VER_SHIFT) & CL_SPBFIFO_SPBFCH_VER_MASK)
+
+/* Software Position in Buffer Enable */
+#define CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT	0
+#define CL_SPBFIFO_SPBFCCTL_SPIBE_MASK	(1 << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
+#define CL_SPBFIFO_SPBFCCTL_SPIBE(x)	\
+	((x << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
+
+/* SST IPC SKL defines */
+#define SKL_WAIT_TIMEOUT		500	/* 500 msec */
+#define SKL_MAX_BUFFER_SIZE		(32 * PAGE_SIZE)
+
+enum skl_cl_dma_wake_states {
+	SKL_CL_DMA_STATUS_NONE = 0,
+	SKL_CL_DMA_BUF_COMPLETE,
+	SKL_CL_DMA_ERR,	/* TODO: Expand the error states */
+};
+
+struct sst_dsp;
+
+struct skl_cl_dev_ops {
+	void (*cl_setup_bdle)(struct sst_dsp *ctx,
+			struct snd_dma_buffer *dmab_data,
+			u32 **bdlp, int size, int with_ioc);
+	void (*cl_setup_controller)(struct sst_dsp *ctx,
+			struct snd_dma_buffer *dmab_bdl,
+			unsigned int max_size, u32 page_count);
+	void (*cl_setup_spb)(struct sst_dsp  *ctx,
+			unsigned int size, bool enable);
+	void (*cl_cleanup_spb)(struct sst_dsp  *ctx);
+	void (*cl_trigger)(struct sst_dsp  *ctx, bool enable);
+	void (*cl_cleanup_controller)(struct sst_dsp  *ctx);
+	int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
+			const void *bin, u32 size);
+	void (*cl_stop_dma)(struct sst_dsp *ctx);
+};
+
+/**
+ * skl_cl_dev - holds information for code loader dma transfer
+ *
+ * @dmab_data: buffer pointer
+ * @dmab_bdl: buffer descriptor list
+ * @bufsize: ring buffer size
+ * @frags: Last valid buffer descriptor index in the BDL
+ * @curr_spib_pos: Current position in ring buffer
+ * @dma_buffer_offset: dma buffer offset
+ * @ops: operations supported on CL dma
+ * @wait_queue: wait queue to wake for wake event
+ * @wake_status: DMA wake status
+ * @wait_condition: condition to wait on wait queue
+ * @cl_dma_lock: for synchronized access to cldma
+ */
+struct skl_cl_dev {
+	struct snd_dma_buffer dmab_data;
+	struct snd_dma_buffer dmab_bdl;
+
+	unsigned int bufsize;
+	unsigned int frags;
+
+	unsigned int curr_spib_pos;
+	unsigned int dma_buffer_offset;
+	struct skl_cl_dev_ops ops;
+
+	wait_queue_head_t wait_queue;
+	int wake_status;
+	bool wait_condition;
+};
+
+#endif /* SKL_SST_CLDMA_H_ */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
new file mode 100644
index 0000000..94875b0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -0,0 +1,342 @@
+/*
+ * skl-sst-dsp.c - SKL SST library generic function
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <sound/pcm.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-ipc.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-ipc.h"
+
+/* various timeout values */
+#define SKL_DSP_PU_TO		50
+#define SKL_DSP_PD_TO		50
+#define SKL_DSP_RESET_TO	50
+
+void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
+{
+	mutex_lock(&ctx->mutex);
+	ctx->sst_state = state;
+	mutex_unlock(&ctx->mutex);
+}
+
+static int skl_dsp_core_set_reset_state(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
+			SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CRST_MASK,
+			SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
+			SKL_DSP_RESET_TO,
+			"Set reset");
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
+				SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
+		dev_err(ctx->dev, "Set reset state failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_unset_reset_state(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "In %s\n", __func__);
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+					SKL_ADSPCS_CRST_MASK, 0);
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CRST_MASK,
+			0,
+			SKL_DSP_RESET_TO,
+			"Unset reset");
+
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
+		dev_err(ctx->dev, "Unset reset state failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static bool is_skl_dsp_core_enable(struct sst_dsp  *ctx)
+{
+	int val;
+	bool is_enable;
+
+	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
+
+	is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
+			(val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
+			!(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
+			!(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
+
+	dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
+	return is_enable;
+}
+
+static int skl_dsp_reset_core(struct sst_dsp *ctx)
+{
+	/* stall core */
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+
+	/* set reset state */
+	return skl_dsp_core_set_reset_state(ctx);
+}
+
+static int skl_dsp_start_core(struct sst_dsp *ctx)
+{
+	int ret;
+
+	/* unset reset state */
+	ret = skl_dsp_core_unset_reset_state(ctx);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "dsp unset reset fails\n");
+		return ret;
+	}
+
+	/* run core */
+	dev_dbg(ctx->dev, "run core...\n");
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+
+	if (!is_skl_dsp_core_enable(ctx)) {
+		skl_dsp_reset_core(ctx);
+		dev_err(ctx->dev, "DSP core enable failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_power_up(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CPA_MASK,
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
+			SKL_DSP_PU_TO,
+			"Power up");
+
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
+		dev_err(ctx->dev, "DSP core power up failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_power_down(struct sst_dsp  *ctx)
+{
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+					SKL_ADSPCS_SPA_MASK, 0);
+
+	/* poll with timeout to check if operation successful */
+	return sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_SPA_MASK,
+			0,
+			SKL_DSP_PD_TO,
+			"Power down");
+}
+
+static int skl_dsp_enable_core(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* power up */
+	ret = skl_dsp_core_power_up(ctx);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "dsp core power up failed\n");
+		return ret;
+	}
+
+	return skl_dsp_start_core(ctx);
+}
+
+int skl_dsp_disable_core(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	ret = skl_dsp_reset_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "dsp core reset failed\n");
+		return ret;
+	}
+
+	/* power down core*/
+	ret = skl_dsp_core_power_down(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "dsp core power down failed\n");
+		return ret;
+	}
+
+	if (is_skl_dsp_core_enable(ctx)) {
+		dev_err(ctx->dev, "DSP core disable failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+int skl_dsp_boot(struct sst_dsp *ctx)
+{
+	int ret;
+
+	if (is_skl_dsp_core_enable(ctx)) {
+		dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
+		ret = skl_dsp_reset_core(ctx);
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp reset failed\n");
+			return ret;
+		}
+
+		ret = skl_dsp_start_core(ctx);
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp start failed\n");
+			return ret;
+		}
+	} else {
+		dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
+		ret = skl_dsp_disable_core(ctx);
+
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp disable core failes\n");
+			return ret;
+		}
+		ret = skl_dsp_enable_core(ctx);
+	}
+
+	return ret;
+}
+
+irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
+{
+	struct sst_dsp *ctx = dev_id;
+	u32 val;
+	irqreturn_t result = IRQ_NONE;
+
+	spin_lock(&ctx->spinlock);
+
+	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
+	ctx->intr_status = val;
+
+	if (val & SKL_ADSPIS_IPC) {
+		skl_ipc_int_disable(ctx);
+		result = IRQ_WAKE_THREAD;
+	}
+
+	if (val & SKL_ADSPIS_CL_DMA) {
+		skl_cldma_int_disable(ctx);
+		result = IRQ_WAKE_THREAD;
+	}
+
+	spin_unlock(&ctx->spinlock);
+
+	return result;
+}
+
+int skl_dsp_wake(struct sst_dsp *ctx)
+{
+	return ctx->fw_ops.set_state_D0(ctx);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_wake);
+
+int skl_dsp_sleep(struct sst_dsp *ctx)
+{
+	return ctx->fw_ops.set_state_D3(ctx);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_sleep);
+
+struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
+		struct sst_dsp_device *sst_dev, int irq)
+{
+	int ret;
+	struct sst_dsp *sst;
+
+	sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
+	if (sst == NULL)
+		return NULL;
+
+	spin_lock_init(&sst->spinlock);
+	mutex_init(&sst->mutex);
+	sst->dev = dev;
+	sst->sst_dev = sst_dev;
+	sst->irq = irq;
+	sst->ops = sst_dev->ops;
+	sst->thread_context = sst_dev->thread_context;
+
+	/* Initialise SST Audio DSP */
+	if (sst->ops->init) {
+		ret = sst->ops->init(sst, NULL);
+		if (ret < 0)
+			return NULL;
+	}
+
+	/* Register the ISR */
+	ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
+		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
+	if (ret) {
+		dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
+			       sst->irq);
+		return NULL;
+	}
+
+	return sst;
+}
+
+void skl_dsp_free(struct sst_dsp *dsp)
+{
+	skl_ipc_int_disable(dsp);
+
+	free_irq(dsp->irq, dsp);
+	skl_dsp_disable_core(dsp);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_free);
+
+bool is_skl_dsp_running(struct sst_dsp *ctx)
+{
+	return (ctx->sst_state == SKL_DSP_RUNNING);
+}
+EXPORT_SYMBOL_GPL(is_skl_dsp_running);
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
new file mode 100644
index 0000000..6bfcef4
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -0,0 +1,145 @@
+/*
+ * Skylake SST DSP Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __SKL_SST_DSP_H__
+#define __SKL_SST_DSP_H__
+
+#include <linux/interrupt.h>
+#include <sound/memalloc.h>
+#include "skl-sst-cldma.h"
+
+struct sst_dsp;
+struct skl_sst;
+struct sst_dsp_device;
+
+/* Intel HD Audio General DSP Registers */
+#define SKL_ADSP_GEN_BASE		0x0
+#define SKL_ADSP_REG_ADSPCS		(SKL_ADSP_GEN_BASE + 0x04)
+#define SKL_ADSP_REG_ADSPIC		(SKL_ADSP_GEN_BASE + 0x08)
+#define SKL_ADSP_REG_ADSPIS		(SKL_ADSP_GEN_BASE + 0x0C)
+#define SKL_ADSP_REG_ADSPIC2		(SKL_ADSP_GEN_BASE + 0x10)
+#define SKL_ADSP_REG_ADSPIS2		(SKL_ADSP_GEN_BASE + 0x14)
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define SKL_ADSP_IPC_BASE		0x40
+#define SKL_ADSP_REG_HIPCT		(SKL_ADSP_IPC_BASE + 0x00)
+#define SKL_ADSP_REG_HIPCTE		(SKL_ADSP_IPC_BASE + 0x04)
+#define SKL_ADSP_REG_HIPCI		(SKL_ADSP_IPC_BASE + 0x08)
+#define SKL_ADSP_REG_HIPCIE		(SKL_ADSP_IPC_BASE + 0x0C)
+#define SKL_ADSP_REG_HIPCCTL		(SKL_ADSP_IPC_BASE + 0x10)
+
+/*  HIPCI */
+#define SKL_ADSP_REG_HIPCI_BUSY		BIT(31)
+
+/* HIPCIE */
+#define SKL_ADSP_REG_HIPCIE_DONE	BIT(30)
+
+/* HIPCCTL */
+#define SKL_ADSP_REG_HIPCCTL_DONE	BIT(1)
+#define SKL_ADSP_REG_HIPCCTL_BUSY	BIT(0)
+
+/* HIPCT */
+#define SKL_ADSP_REG_HIPCT_BUSY		BIT(31)
+
+/* Intel HD Audio SRAM Window 1 */
+#define SKL_ADSP_SRAM1_BASE		0xA000
+
+#define SKL_ADSP_MMIO_LEN		0x10000
+
+#define SKL_ADSP_W0_STAT_SZ		0x800
+
+#define SKL_ADSP_W0_UP_SZ		0x800
+
+#define SKL_ADSP_W1_SZ			0x1000
+
+#define SKL_FW_STS_MASK			0xf
+
+#define SKL_FW_INIT			0x1
+#define SKL_FW_RFW_START		0xf
+
+#define SKL_ADSPIC_IPC			1
+#define SKL_ADSPIS_IPC			1
+
+/* ADSPCS - Audio DSP Control & Status */
+#define SKL_DSP_CORES		1
+#define SKL_DSP_CORE0_MASK	1
+#define SKL_DSP_CORES_MASK	((1 << SKL_DSP_CORES) - 1)
+
+/* Core Reset - asserted high */
+#define SKL_ADSPCS_CRST_SHIFT	0
+#define SKL_ADSPCS_CRST_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
+#define SKL_ADSPCS_CRST(x)	((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
+
+/* Core run/stall - when set to '1' core is stalled */
+#define SKL_ADSPCS_CSTALL_SHIFT	8
+#define SKL_ADSPCS_CSTALL_MASK	(SKL_DSP_CORES_MASK <<	\
+					SKL_ADSPCS_CSTALL_SHIFT)
+#define SKL_ADSPCS_CSTALL(x)	((x << SKL_ADSPCS_CSTALL_SHIFT) &	\
+				SKL_ADSPCS_CSTALL_MASK)
+
+/* Set Power Active - when set to '1' turn cores on */
+#define SKL_ADSPCS_SPA_SHIFT	16
+#define SKL_ADSPCS_SPA_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
+#define SKL_ADSPCS_SPA(x)	((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
+
+/* Current Power Active - power status of cores, set by hardware */
+#define SKL_ADSPCS_CPA_SHIFT	24
+#define SKL_ADSPCS_CPA_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
+#define SKL_ADSPCS_CPA(x)	((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
+
+#define SST_DSP_POWER_D0	0x0  /* full On */
+#define SST_DSP_POWER_D3	0x3  /* Off */
+
+enum skl_dsp_states {
+	SKL_DSP_RUNNING = 1,
+	SKL_DSP_RESET,
+};
+
+struct skl_dsp_fw_ops {
+	int (*load_fw)(struct sst_dsp  *ctx);
+	/* FW module parser/loader */
+	int (*parse_fw)(struct sst_dsp *ctx);
+	int (*set_state_D0)(struct sst_dsp *ctx);
+	int (*set_state_D3)(struct sst_dsp *ctx);
+	unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
+};
+
+struct skl_dsp_loader_ops {
+	int (*alloc_dma_buf)(struct device *dev,
+		struct snd_dma_buffer *dmab, size_t size);
+	int (*free_dma_buf)(struct device *dev,
+		struct snd_dma_buffer *dmab);
+};
+
+void skl_cldma_process_intr(struct sst_dsp *ctx);
+void skl_cldma_int_disable(struct sst_dsp *ctx);
+int skl_cldma_prepare(struct sst_dsp *ctx);
+
+void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
+struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
+		struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_disable_core(struct sst_dsp  *ctx);
+bool is_skl_dsp_running(struct sst_dsp *ctx);
+irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
+int skl_dsp_wake(struct sst_dsp *ctx);
+int skl_dsp_sleep(struct sst_dsp *ctx);
+void skl_dsp_free(struct sst_dsp *dsp);
+
+int skl_dsp_boot(struct sst_dsp *ctx);
+int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp);
+void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
+
+#endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
new file mode 100644
index 0000000..937a0a3
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -0,0 +1,771 @@
+/*
+ * skl-sst-ipc.c - Intel skl IPC Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+
+
+#define IPC_IXC_STATUS_BITS		24
+
+/* Global Message - Generic */
+#define IPC_GLB_TYPE_SHIFT		24
+#define IPC_GLB_TYPE_MASK		(0xf << IPC_GLB_TYPE_SHIFT)
+#define IPC_GLB_TYPE(x)			((x) << IPC_GLB_TYPE_SHIFT)
+
+/* Global Message - Reply */
+#define IPC_GLB_REPLY_STATUS_SHIFT	24
+#define IPC_GLB_REPLY_STATUS_MASK	((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1)
+#define IPC_GLB_REPLY_STATUS(x)		((x) << IPC_GLB_REPLY_STATUS_SHIFT)
+
+#define IPC_TIMEOUT_MSECS		3000
+
+#define IPC_EMPTY_LIST_SIZE		8
+
+#define IPC_MSG_TARGET_SHIFT		30
+#define IPC_MSG_TARGET_MASK		0x1
+#define IPC_MSG_TARGET(x)		(((x) & IPC_MSG_TARGET_MASK) \
+					<< IPC_MSG_TARGET_SHIFT)
+
+#define IPC_MSG_DIR_SHIFT		29
+#define IPC_MSG_DIR_MASK		0x1
+#define IPC_MSG_DIR(x)			(((x) & IPC_MSG_DIR_MASK) \
+					<< IPC_MSG_DIR_SHIFT)
+/* Global Notification Message */
+#define IPC_GLB_NOTIFY_TYPE_SHIFT	16
+#define IPC_GLB_NOTIFY_TYPE_MASK	0xFF
+#define IPC_GLB_NOTIFY_TYPE(x)		(((x) >> IPC_GLB_NOTIFY_TYPE_SHIFT) \
+					& IPC_GLB_NOTIFY_TYPE_MASK)
+
+#define IPC_GLB_NOTIFY_MSG_TYPE_SHIFT	24
+#define IPC_GLB_NOTIFY_MSG_TYPE_MASK	0x1F
+#define IPC_GLB_NOTIFY_MSG_TYPE(x)	(((x) >> IPC_GLB_NOTIFY_MSG_TYPE_SHIFT)	\
+						& IPC_GLB_NOTIFY_MSG_TYPE_MASK)
+
+#define IPC_GLB_NOTIFY_RSP_SHIFT	29
+#define IPC_GLB_NOTIFY_RSP_MASK		0x1
+#define IPC_GLB_NOTIFY_RSP_TYPE(x)	(((x) >> IPC_GLB_NOTIFY_RSP_SHIFT) \
+					& IPC_GLB_NOTIFY_RSP_MASK)
+
+/* Pipeline operations */
+
+/* Create pipeline message */
+#define IPC_PPL_MEM_SIZE_SHIFT		0
+#define IPC_PPL_MEM_SIZE_MASK		0x7FF
+#define IPC_PPL_MEM_SIZE(x)		(((x) & IPC_PPL_MEM_SIZE_MASK) \
+					<< IPC_PPL_MEM_SIZE_SHIFT)
+
+#define IPC_PPL_TYPE_SHIFT		11
+#define IPC_PPL_TYPE_MASK		0x1F
+#define IPC_PPL_TYPE(x)			(((x) & IPC_PPL_TYPE_MASK) \
+					<< IPC_PPL_TYPE_SHIFT)
+
+#define IPC_INSTANCE_ID_SHIFT		16
+#define IPC_INSTANCE_ID_MASK		0xFF
+#define IPC_INSTANCE_ID(x)		(((x) & IPC_INSTANCE_ID_MASK) \
+					<< IPC_INSTANCE_ID_SHIFT)
+
+/* Set pipeline state message */
+#define IPC_PPL_STATE_SHIFT		0
+#define IPC_PPL_STATE_MASK		0x1F
+#define IPC_PPL_STATE(x)		(((x) & IPC_PPL_STATE_MASK) \
+					<< IPC_PPL_STATE_SHIFT)
+
+/* Module operations primary register */
+#define IPC_MOD_ID_SHIFT		0
+#define IPC_MOD_ID_MASK		0xFFFF
+#define IPC_MOD_ID(x)		(((x) & IPC_MOD_ID_MASK) \
+					<< IPC_MOD_ID_SHIFT)
+
+#define IPC_MOD_INSTANCE_ID_SHIFT	16
+#define IPC_MOD_INSTANCE_ID_MASK	0xFF
+#define IPC_MOD_INSTANCE_ID(x)	(((x) & IPC_MOD_INSTANCE_ID_MASK) \
+					<< IPC_MOD_INSTANCE_ID_SHIFT)
+
+/* Init instance message extension register */
+#define IPC_PARAM_BLOCK_SIZE_SHIFT	0
+#define IPC_PARAM_BLOCK_SIZE_MASK	0xFFFF
+#define IPC_PARAM_BLOCK_SIZE(x)		(((x) & IPC_PARAM_BLOCK_SIZE_MASK) \
+					<< IPC_PARAM_BLOCK_SIZE_SHIFT)
+
+#define IPC_PPL_INSTANCE_ID_SHIFT	16
+#define IPC_PPL_INSTANCE_ID_MASK	0xFF
+#define IPC_PPL_INSTANCE_ID(x)		(((x) & IPC_PPL_INSTANCE_ID_MASK) \
+					<< IPC_PPL_INSTANCE_ID_SHIFT)
+
+#define IPC_CORE_ID_SHIFT		24
+#define IPC_CORE_ID_MASK		0x1F
+#define IPC_CORE_ID(x)			(((x) & IPC_CORE_ID_MASK) \
+					<< IPC_CORE_ID_SHIFT)
+
+/* Bind/Unbind message extension register */
+#define IPC_DST_MOD_ID_SHIFT		0
+#define IPC_DST_MOD_ID(x)		(((x) & IPC_MOD_ID_MASK) \
+					<< IPC_DST_MOD_ID_SHIFT)
+
+#define IPC_DST_MOD_INSTANCE_ID_SHIFT 16
+#define IPC_DST_MOD_INSTANCE_ID(x)	(((x) & IPC_MOD_INSTANCE_ID_MASK) \
+					<< IPC_DST_MOD_INSTANCE_ID_SHIFT)
+
+#define IPC_DST_QUEUE_SHIFT		24
+#define IPC_DST_QUEUE_MASK		0x7
+#define IPC_DST_QUEUE(x)		(((x) & IPC_DST_QUEUE_MASK) \
+					<< IPC_DST_QUEUE_SHIFT)
+
+#define IPC_SRC_QUEUE_SHIFT		27
+#define IPC_SRC_QUEUE_MASK		0x7
+#define IPC_SRC_QUEUE(x)		(((x) & IPC_SRC_QUEUE_MASK) \
+					<< IPC_SRC_QUEUE_SHIFT)
+
+/* Save pipeline messgae extension register */
+#define IPC_DMA_ID_SHIFT		0
+#define IPC_DMA_ID_MASK			0x1F
+#define IPC_DMA_ID(x)			(((x) & IPC_DMA_ID_MASK) \
+					<< IPC_DMA_ID_SHIFT)
+/* Large Config message extension register */
+#define IPC_DATA_OFFSET_SZ_SHIFT	0
+#define IPC_DATA_OFFSET_SZ_MASK		0xFFFFF
+#define IPC_DATA_OFFSET_SZ(x)		(((x) & IPC_DATA_OFFSET_SZ_MASK) \
+					<< IPC_DATA_OFFSET_SZ_SHIFT)
+#define IPC_DATA_OFFSET_SZ_CLEAR	~(IPC_DATA_OFFSET_SZ_MASK \
+					  << IPC_DATA_OFFSET_SZ_SHIFT)
+
+#define IPC_LARGE_PARAM_ID_SHIFT	20
+#define IPC_LARGE_PARAM_ID_MASK		0xFF
+#define IPC_LARGE_PARAM_ID(x)		(((x) & IPC_LARGE_PARAM_ID_MASK) \
+					<< IPC_LARGE_PARAM_ID_SHIFT)
+
+#define IPC_FINAL_BLOCK_SHIFT		28
+#define IPC_FINAL_BLOCK_MASK		0x1
+#define IPC_FINAL_BLOCK(x)		(((x) & IPC_FINAL_BLOCK_MASK) \
+					<< IPC_FINAL_BLOCK_SHIFT)
+
+#define IPC_INITIAL_BLOCK_SHIFT		29
+#define IPC_INITIAL_BLOCK_MASK		0x1
+#define IPC_INITIAL_BLOCK(x)		(((x) & IPC_INITIAL_BLOCK_MASK) \
+					<< IPC_INITIAL_BLOCK_SHIFT)
+#define IPC_INITIAL_BLOCK_CLEAR		~(IPC_INITIAL_BLOCK_MASK \
+					  << IPC_INITIAL_BLOCK_SHIFT)
+
+enum skl_ipc_msg_target {
+	IPC_FW_GEN_MSG = 0,
+	IPC_MOD_MSG = 1
+};
+
+enum skl_ipc_msg_direction {
+	IPC_MSG_REQUEST = 0,
+	IPC_MSG_REPLY = 1
+};
+
+/* Global Message Types */
+enum skl_ipc_glb_type {
+	IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
+	IPC_GLB_LOAD_MULTIPLE_MODS = 15,
+	IPC_GLB_UNLOAD_MULTIPLE_MODS = 16,
+	IPC_GLB_CREATE_PPL = 17,
+	IPC_GLB_DELETE_PPL = 18,
+	IPC_GLB_SET_PPL_STATE = 19,
+	IPC_GLB_GET_PPL_STATE = 20,
+	IPC_GLB_GET_PPL_CONTEXT_SIZE = 21,
+	IPC_GLB_SAVE_PPL = 22,
+	IPC_GLB_RESTORE_PPL = 23,
+	IPC_GLB_NOTIFY = 26,
+	IPC_GLB_MAX_IPC_MSG_NUMBER = 31 /* Maximum message number */
+};
+
+enum skl_ipc_glb_reply {
+	IPC_GLB_REPLY_SUCCESS = 0,
+
+	IPC_GLB_REPLY_UNKNOWN_MSG_TYPE = 1,
+	IPC_GLB_REPLY_ERROR_INVALID_PARAM = 2,
+
+	IPC_GLB_REPLY_BUSY = 3,
+	IPC_GLB_REPLY_PENDING = 4,
+	IPC_GLB_REPLY_FAILURE = 5,
+	IPC_GLB_REPLY_INVALID_REQUEST = 6,
+
+	IPC_GLB_REPLY_OUT_OF_MEMORY = 7,
+	IPC_GLB_REPLY_OUT_OF_MIPS = 8,
+
+	IPC_GLB_REPLY_INVALID_RESOURCE_ID = 9,
+	IPC_GLB_REPLY_INVALID_RESOURCE_STATE = 10,
+
+	IPC_GLB_REPLY_MOD_MGMT_ERROR = 100,
+	IPC_GLB_REPLY_MOD_LOAD_CL_FAILED = 101,
+	IPC_GLB_REPLY_MOD_LOAD_INVALID_HASH = 102,
+
+	IPC_GLB_REPLY_MOD_UNLOAD_INST_EXIST = 103,
+	IPC_GLB_REPLY_MOD_NOT_INITIALIZED = 104,
+
+	IPC_GLB_REPLY_INVALID_CONFIG_PARAM_ID = 120,
+	IPC_GLB_REPLY_INVALID_CONFIG_DATA_LEN = 121,
+	IPC_GLB_REPLY_GATEWAY_NOT_INITIALIZED = 140,
+	IPC_GLB_REPLY_GATEWAY_NOT_EXIST = 141,
+
+	IPC_GLB_REPLY_PPL_NOT_INITIALIZED = 160,
+	IPC_GLB_REPLY_PPL_NOT_EXIST = 161,
+	IPC_GLB_REPLY_PPL_SAVE_FAILED = 162,
+	IPC_GLB_REPLY_PPL_RESTORE_FAILED = 163,
+
+	IPC_MAX_STATUS = ((1<<IPC_IXC_STATUS_BITS)-1)
+};
+
+enum skl_ipc_notification_type {
+	IPC_GLB_NOTIFY_GLITCH = 0,
+	IPC_GLB_NOTIFY_OVERRUN = 1,
+	IPC_GLB_NOTIFY_UNDERRUN = 2,
+	IPC_GLB_NOTIFY_END_STREAM = 3,
+	IPC_GLB_NOTIFY_PHRASE_DETECTED = 4,
+	IPC_GLB_NOTIFY_RESOURCE_EVENT = 5,
+	IPC_GLB_NOTIFY_LOG_BUFFER_STATUS = 6,
+	IPC_GLB_NOTIFY_TIMESTAMP_CAPTURED = 7,
+	IPC_GLB_NOTIFY_FW_READY = 8
+};
+
+/* Module Message Types */
+enum skl_ipc_module_msg {
+	IPC_MOD_INIT_INSTANCE = 0,
+	IPC_MOD_CONFIG_GET = 1,
+	IPC_MOD_CONFIG_SET = 2,
+	IPC_MOD_LARGE_CONFIG_GET = 3,
+	IPC_MOD_LARGE_CONFIG_SET = 4,
+	IPC_MOD_BIND = 5,
+	IPC_MOD_UNBIND = 6,
+	IPC_MOD_SET_DX = 7
+};
+
+static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
+		size_t tx_size)
+{
+	if (tx_size)
+		memcpy(msg->tx_data, tx_data, tx_size);
+}
+
+static bool skl_ipc_is_dsp_busy(struct sst_dsp *dsp)
+{
+	u32 hipci;
+
+	hipci = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCI);
+	return (hipci & SKL_ADSP_REG_HIPCI_BUSY);
+}
+
+/* Lock to be held by caller */
+static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
+{
+	struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->header);
+
+	if (msg->tx_size)
+		sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
+	sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCIE,
+						header->extension);
+	sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCI,
+		header->primary | SKL_ADSP_REG_HIPCI_BUSY);
+}
+
+static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
+				u64 ipc_header)
+{
+	struct ipc_message *msg =  NULL;
+	struct skl_ipc_header *header = (struct skl_ipc_header *)(&ipc_header);
+
+	if (list_empty(&ipc->rx_list)) {
+		dev_err(ipc->dev, "ipc: rx list is empty but received 0x%x\n",
+			header->primary);
+		goto out;
+	}
+
+	msg = list_first_entry(&ipc->rx_list, struct ipc_message, list);
+
+out:
+	return msg;
+
+}
+
+static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
+		struct skl_ipc_header header)
+{
+	struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+
+	if (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
+		switch (IPC_GLB_NOTIFY_TYPE(header.primary)) {
+
+		case IPC_GLB_NOTIFY_UNDERRUN:
+			dev_err(ipc->dev, "FW Underrun %x\n", header.primary);
+			break;
+
+		case IPC_GLB_NOTIFY_RESOURCE_EVENT:
+			dev_err(ipc->dev, "MCPS Budget Violation: %x\n",
+						header.primary);
+			break;
+
+		case IPC_GLB_NOTIFY_FW_READY:
+			skl->boot_complete = true;
+			wake_up(&skl->boot_wait);
+			break;
+
+		default:
+			dev_err(ipc->dev, "ipc: Unhandled error msg=%x",
+						header.primary);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
+		struct skl_ipc_header header)
+{
+	struct ipc_message *msg;
+	u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
+	u64 *ipc_header = (u64 *)(&header);
+
+	msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+	if (msg == NULL) {
+		dev_dbg(ipc->dev, "ipc: rx list is empty\n");
+		return;
+	}
+
+	/* first process the header */
+	switch (reply) {
+	case IPC_GLB_REPLY_SUCCESS:
+		dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
+		break;
+
+	case IPC_GLB_REPLY_OUT_OF_MEMORY:
+		dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary);
+		msg->errno = -ENOMEM;
+		break;
+
+	case IPC_GLB_REPLY_BUSY:
+		dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary);
+		msg->errno = -EBUSY;
+		break;
+
+	default:
+		dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply);
+		msg->errno = -EINVAL;
+		break;
+	}
+
+	if (reply != IPC_GLB_REPLY_SUCCESS) {
+		dev_err(ipc->dev, "ipc FW reply: reply=%d", reply);
+		dev_err(ipc->dev, "FW Error Code: %u\n",
+			ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
+	}
+
+	list_del(&msg->list);
+	sst_ipc_tx_msg_reply_complete(ipc, msg);
+}
+
+irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
+{
+	struct sst_dsp *dsp = context;
+	struct skl_sst *skl = sst_dsp_get_thread_context(dsp);
+	struct sst_generic_ipc *ipc = &skl->ipc;
+	struct skl_ipc_header header = {0};
+	u32 hipcie, hipct, hipcte;
+	int ipc_irq = 0;
+
+	if (dsp->intr_status & SKL_ADSPIS_CL_DMA)
+		skl_cldma_process_intr(dsp);
+
+	/* Here we handle IPC interrupts only */
+	if (!(dsp->intr_status & SKL_ADSPIS_IPC))
+		return IRQ_NONE;
+
+	hipcie = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCIE);
+	hipct = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCT);
+
+	/* reply message from DSP */
+	if (hipcie & SKL_ADSP_REG_HIPCIE_DONE) {
+		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
+			SKL_ADSP_REG_HIPCCTL_DONE, 0);
+
+		/* clear DONE bit - tell DSP we have completed the operation */
+		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCIE,
+			SKL_ADSP_REG_HIPCIE_DONE, SKL_ADSP_REG_HIPCIE_DONE);
+
+		ipc_irq = 1;
+
+		/* unmask Done interrupt */
+		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
+			SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
+	}
+
+	/* New message from DSP */
+	if (hipct & SKL_ADSP_REG_HIPCT_BUSY) {
+		hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
+		header.primary = hipct;
+		header.extension = hipcte;
+		dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
+						header.primary);
+		dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
+						header.extension);
+
+		if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
+			/* Handle Immediate reply from DSP Core */
+			skl_ipc_process_reply(ipc, header);
+		} else {
+			dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
+			skl_ipc_process_notification(ipc, header);
+		}
+		/* clear  busy interrupt */
+		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCT,
+			SKL_ADSP_REG_HIPCT_BUSY, SKL_ADSP_REG_HIPCT_BUSY);
+		ipc_irq = 1;
+	}
+
+	if (ipc_irq == 0)
+		return IRQ_NONE;
+
+	skl_ipc_int_enable(dsp);
+
+	/* continue to send any remaining messages... */
+	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+
+	return IRQ_HANDLED;
+}
+
+void skl_ipc_int_enable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_ADSPIC,
+			SKL_ADSPIC_IPC, SKL_ADSPIC_IPC);
+}
+
+void skl_ipc_int_disable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
+			SKL_ADSPIC_IPC, 0);
+}
+
+void skl_ipc_op_int_enable(struct sst_dsp *ctx)
+{
+	/* enable IPC DONE interrupt */
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
+
+	/* Enable IPC BUSY interrupt */
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_BUSY, SKL_ADSP_REG_HIPCCTL_BUSY);
+}
+
+bool skl_ipc_int_status(struct sst_dsp *ctx)
+{
+	return sst_dsp_shim_read_unlocked(ctx,
+			SKL_ADSP_REG_ADSPIS) & SKL_ADSPIS_IPC;
+}
+
+int skl_ipc_init(struct device *dev, struct skl_sst *skl)
+{
+	struct sst_generic_ipc *ipc;
+	int err;
+
+	ipc = &skl->ipc;
+	ipc->dsp = skl->dsp;
+	ipc->dev = dev;
+
+	ipc->tx_data_max_size = SKL_ADSP_W1_SZ;
+	ipc->rx_data_max_size = SKL_ADSP_W0_UP_SZ;
+
+	err = sst_ipc_init(ipc);
+	if (err)
+		return err;
+
+	ipc->ops.tx_msg = skl_ipc_tx_msg;
+	ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
+	ipc->ops.is_dsp_busy = skl_ipc_is_dsp_busy;
+
+	return 0;
+}
+
+void skl_ipc_free(struct sst_generic_ipc *ipc)
+{
+	/* Disable IPC DONE interrupt */
+	sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_DONE, 0);
+
+	/* Disable IPC BUSY interrupt */
+	sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_BUSY, 0);
+
+	sst_ipc_fini(ipc);
+}
+
+int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_CREATE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+	header.primary |= IPC_PPL_TYPE(ppl_type);
+	header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: create pipeline fail, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_create_pipeline);
+
+int skl_ipc_delete_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_DELETE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: delete pipeline failed, err %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_delete_pipeline);
+
+int skl_ipc_set_pipeline_state(struct sst_generic_ipc *ipc,
+		u8 instance_id, enum skl_ipc_pipeline_state state)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_SET_PPL_STATE);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+	header.primary |= IPC_PPL_STATE(state);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: set pipeline state failed, err: %d\n", ret);
+		return ret;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_pipeline_state);
+
+int
+skl_ipc_save_pipeline(struct sst_generic_ipc *ipc, u8 instance_id, int dma_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_SAVE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	header.extension = IPC_DMA_ID(dma_id);
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: save pipeline failed, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_save_pipeline);
+
+int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_RESTORE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: restore  pipeline failed, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_restore_pipeline);
+
+int skl_ipc_set_dx(struct sst_generic_ipc *ipc, u8 instance_id,
+		u16 module_id, struct skl_ipc_dxstate_info *dx)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_DX);
+	header.primary |= IPC_MOD_INSTANCE_ID(instance_id);
+	header.primary |= IPC_MOD_ID(module_id);
+
+	dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
+			 header.primary, header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
+				dx, sizeof(dx), NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: set dx failed, err %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_dx);
+
+int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
+		struct skl_ipc_init_instance_msg *msg, void *param_data)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+	u32 *buffer = (u32 *)param_data;
+	 /* param_block_size must be in dwords */
+	u16 param_block_size = msg->param_data_size / sizeof(u32);
+
+	print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
+		16, 4, buffer, param_block_size, false);
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_INIT_INSTANCE);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_CORE_ID(msg->core_id);
+	header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id);
+	header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size);
+
+	dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
+			 header.primary, header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, param_data,
+			msg->param_data_size, NULL, 0);
+
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: init instance failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_init_instance);
+
+int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
+		struct skl_ipc_bind_unbind_msg *msg)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	u8 bind_unbind = msg->bind ? IPC_MOD_BIND : IPC_MOD_UNBIND;
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(bind_unbind);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_DST_MOD_ID(msg->dst_module_id);
+	header.extension |= IPC_DST_MOD_INSTANCE_ID(msg->dst_instance_id);
+	header.extension |= IPC_DST_QUEUE(msg->dst_queue);
+	header.extension |= IPC_SRC_QUEUE(msg->src_queue);
+
+	dev_dbg(ipc->dev, "In %s hdr=%x ext=%x\n", __func__, header.primary,
+			 header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: bind/unbind faileden");
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_bind_unbind);
+
+int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
+		struct skl_ipc_large_config_msg *msg, u32 *param)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret = 0;
+	size_t sz_remaining, tx_size, data_offset;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_LARGE_CONFIG_SET);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_DATA_OFFSET_SZ(msg->param_data_size);
+	header.extension |= IPC_LARGE_PARAM_ID(msg->large_param_id);
+	header.extension |= IPC_FINAL_BLOCK(0);
+	header.extension |= IPC_INITIAL_BLOCK(1);
+
+	sz_remaining = msg->param_data_size;
+	data_offset = 0;
+	while (sz_remaining != 0) {
+		tx_size = sz_remaining > SKL_ADSP_W1_SZ
+				? SKL_ADSP_W1_SZ : sz_remaining;
+		if (tx_size == sz_remaining)
+			header.extension |= IPC_FINAL_BLOCK(1);
+
+		dev_dbg(ipc->dev, "In %s primary=%#x ext=%#x\n", __func__,
+			header.primary, header.extension);
+		dev_dbg(ipc->dev, "transmitting offset: %#x, size: %#x\n",
+			(unsigned)data_offset, (unsigned)tx_size);
+		ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
+					  ((char *)param) + data_offset,
+					  tx_size, NULL, 0);
+		if (ret < 0) {
+			dev_err(ipc->dev,
+				"ipc: set large config fail, err: %d\n", ret);
+			return ret;
+		}
+		sz_remaining -= tx_size;
+		data_offset = msg->param_data_size - sz_remaining;
+
+		/* clear the fields */
+		header.extension &= IPC_INITIAL_BLOCK_CLEAR;
+		header.extension &= IPC_DATA_OFFSET_SZ_CLEAR;
+		/* fill the fields */
+		header.extension |= IPC_INITIAL_BLOCK(0);
+		header.extension |= IPC_DATA_OFFSET_SZ(data_offset);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_large_config);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
new file mode 100644
index 0000000..9f5f672
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -0,0 +1,125 @@
+/*
+ * Intel SKL IPC Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __SKL_IPC_H
+#define __SKL_IPC_H
+
+#include <linux/kthread.h>
+#include <linux/irqreturn.h>
+#include "../common/sst-ipc.h"
+
+struct sst_dsp;
+struct skl_sst;
+struct sst_generic_ipc;
+
+enum skl_ipc_pipeline_state {
+	PPL_INVALID_STATE =	0,
+	PPL_UNINITIALIZED =	1,
+	PPL_RESET =		2,
+	PPL_PAUSED =		3,
+	PPL_RUNNING =		4,
+	PPL_ERROR_STOP =	5,
+	PPL_SAVED =		6,
+	PPL_RESTORED =		7
+};
+
+struct skl_ipc_dxstate_info {
+	u32 core_mask;
+	u32 dx_mask;
+};
+
+struct skl_ipc_header {
+	u32 primary;
+	u32 extension;
+};
+
+struct skl_sst {
+	struct device *dev;
+	struct sst_dsp *dsp;
+
+	/* boot */
+	wait_queue_head_t boot_wait;
+	bool boot_complete;
+
+	/* IPC messaging */
+	struct sst_generic_ipc ipc;
+};
+
+struct skl_ipc_init_instance_msg {
+	u32 module_id;
+	u32 instance_id;
+	u16 param_data_size;
+	u8 ppl_instance_id;
+	u8 core_id;
+};
+
+struct skl_ipc_bind_unbind_msg {
+	u32 module_id;
+	u32 instance_id;
+	u32 dst_module_id;
+	u32 dst_instance_id;
+	u8 src_queue;
+	u8 dst_queue;
+	bool bind;
+};
+
+struct skl_ipc_large_config_msg {
+	u32 module_id;
+	u32 instance_id;
+	u32 large_param_id;
+	u32 param_data_size;
+};
+
+#define SKL_IPC_BOOT_MSECS		3000
+
+#define SKL_IPC_D3_MASK	0
+#define SKL_IPC_D0_MASK	3
+
+irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
+
+int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
+
+int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
+
+int skl_ipc_set_pipeline_state(struct sst_generic_ipc *sst_ipc,
+		u8 instance_id,	enum skl_ipc_pipeline_state state);
+
+int skl_ipc_save_pipeline(struct sst_generic_ipc *ipc,
+		u8 instance_id, int dma_id);
+
+int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
+
+int skl_ipc_init_instance(struct sst_generic_ipc *sst_ipc,
+		struct skl_ipc_init_instance_msg *msg, void *param_data);
+
+int skl_ipc_bind_unbind(struct sst_generic_ipc *sst_ipc,
+		struct skl_ipc_bind_unbind_msg *msg);
+
+int skl_ipc_set_dx(struct sst_generic_ipc *ipc,
+		u8 instance_id, u16 module_id, struct skl_ipc_dxstate_info *dx);
+
+int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
+		struct skl_ipc_large_config_msg *msg, u32 *param);
+
+void skl_ipc_int_enable(struct sst_dsp *dsp);
+void skl_ipc_op_int_enable(struct sst_dsp *ctx);
+void skl_ipc_int_disable(struct sst_dsp *dsp);
+
+bool skl_ipc_int_status(struct sst_dsp *dsp);
+void skl_ipc_free(struct sst_generic_ipc *ipc);
+int skl_ipc_init(struct device *dev, struct skl_sst *skl);
+
+#endif /* __SKL_IPC_H */
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
new file mode 100644
index 0000000..c18ea51
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -0,0 +1,280 @@
+/*
+ * skl-sst.c - HDA DSP library functions for SKL platform
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-ipc.h"
+#include "skl-sst-ipc.h"
+
+#define SKL_BASEFW_TIMEOUT	300
+#define SKL_INIT_TIMEOUT	1000
+
+/* Intel HD Audio SRAM Window 0*/
+#define SKL_ADSP_SRAM0_BASE	0x8000
+
+/* Firmware status window */
+#define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
+#define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
+
+#define SKL_INSTANCE_ID		0
+#define SKL_BASE_FW_MODULE_ID	0
+
+static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
+{
+	u32 cur_sts;
+
+	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
+
+	return (cur_sts == status);
+}
+
+static int skl_transfer_firmware(struct sst_dsp *ctx,
+		const void *basefw, u32 base_fw_size)
+{
+	int ret = 0;
+
+	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_FW_STATUS,
+			SKL_FW_STS_MASK,
+			SKL_FW_RFW_START,
+			SKL_BASEFW_TIMEOUT,
+			"Firmware boot");
+
+	ctx->cl_dev.ops.cl_stop_dma(ctx);
+
+	return ret;
+}
+
+static int skl_load_base_firmware(struct sst_dsp *ctx)
+{
+	int ret = 0, i;
+	const struct firmware *fw = NULL;
+	struct skl_sst *skl = ctx->thread_context;
+	u32 reg;
+
+	ret = request_firmware(&fw, "dsp_fw_release.bin", ctx->dev);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Request firmware failed %d\n", ret);
+		skl_dsp_disable_core(ctx);
+		return -EIO;
+	}
+
+	/* enable Interrupt */
+	skl_ipc_int_enable(ctx);
+	skl_ipc_op_int_enable(ctx);
+
+	/* check ROM Status */
+	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
+		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
+			dev_dbg(ctx->dev,
+				"ROM loaded, we can continue with FW loading\n");
+			break;
+		}
+		mdelay(1);
+	}
+	if (!i) {
+		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
+		dev_err(ctx->dev,
+			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
+		ret = -EIO;
+		goto skl_load_base_firmware_failed;
+	}
+
+	ret = skl_transfer_firmware(ctx, fw->data, fw->size);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
+		goto skl_load_base_firmware_failed;
+	} else {
+		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
+					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+		if (ret == 0) {
+			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
+			ret = -EIO;
+			goto skl_load_base_firmware_failed;
+		}
+
+		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
+		skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+	}
+	release_firmware(fw);
+
+	return 0;
+
+skl_load_base_firmware_failed:
+	skl_dsp_disable_core(ctx);
+	release_firmware(fw);
+	return ret;
+}
+
+static int skl_set_dsp_D0(struct sst_dsp *ctx)
+{
+	int ret;
+
+	ret = skl_load_base_firmware(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "unable to load firmware\n");
+		return ret;
+	}
+
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+
+	return ret;
+}
+
+static int skl_set_dsp_D3(struct sst_dsp *ctx)
+{
+	int ret;
+	struct skl_ipc_dxstate_info dx;
+	struct skl_sst *skl = ctx->thread_context;
+
+	dev_dbg(ctx->dev, "In %s:\n", __func__);
+	mutex_lock(&ctx->mutex);
+	if (!is_skl_dsp_running(ctx)) {
+		mutex_unlock(&ctx->mutex);
+		return 0;
+	}
+	mutex_unlock(&ctx->mutex);
+
+	dx.core_mask = SKL_DSP_CORE0_MASK;
+	dx.dx_mask = SKL_IPC_D3_MASK;
+	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set DSP to D3 state\n");
+		return ret;
+	}
+
+	ret = skl_dsp_disable_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
+		ret = -EIO;
+	}
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+
+	return ret;
+}
+
+static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
+{
+	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
+}
+
+static struct skl_dsp_fw_ops skl_fw_ops = {
+	.set_state_D0 = skl_set_dsp_D0,
+	.set_state_D3 = skl_set_dsp_D3,
+	.load_fw = skl_load_base_firmware,
+	.get_fw_errcode = skl_get_errorcode,
+};
+
+static struct sst_ops skl_ops = {
+	.irq_handler = skl_dsp_sst_interrupt,
+	.write = sst_shim32_write,
+	.read = sst_shim32_read,
+	.ram_read = sst_memcpy_fromio_32,
+	.ram_write = sst_memcpy_toio_32,
+	.free = skl_dsp_free,
+};
+
+static struct sst_dsp_device skl_dev = {
+	.thread = skl_dsp_irq_thread_handler,
+	.ops = &skl_ops,
+};
+
+int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
+{
+	struct skl_sst *skl;
+	struct sst_dsp *sst;
+	int ret;
+
+	skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
+	if (skl == NULL)
+		return -ENOMEM;
+
+	skl->dev = dev;
+	skl_dev.thread_context = skl;
+
+	skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
+	if (!skl->dsp) {
+		dev_err(skl->dev, "%s: no device\n", __func__);
+		return -ENODEV;
+	}
+
+	sst = skl->dsp;
+
+	sst->addr.lpe = mmio_base;
+	sst->addr.shim = mmio_base;
+	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
+			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+
+	sst->dsp_ops = dsp_ops;
+	sst->fw_ops = skl_fw_ops;
+
+	ret = skl_ipc_init(dev, skl);
+	if (ret)
+		return ret;
+
+	skl->boot_complete = false;
+	init_waitqueue_head(&skl->boot_wait);
+
+	ret = skl_dsp_boot(sst);
+	if (ret < 0) {
+		dev_err(skl->dev, "Boot dsp core failed ret: %d", ret);
+		goto free_ipc;
+	}
+
+	ret = skl_cldma_prepare(sst);
+	if (ret < 0) {
+		dev_err(dev, "CL dma prepare failed : %d", ret);
+		goto free_ipc;
+	}
+
+
+	ret = sst->fw_ops.load_fw(sst);
+	if (ret < 0) {
+		dev_err(dev, "Load base fw failed : %d", ret);
+		return ret;
+	}
+
+	if (dsp)
+		*dsp = skl;
+
+	return 0;
+
+free_ipc:
+	skl_ipc_free(&skl->ipc);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
+
+void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
+{
+	skl_ipc_free(&ctx->ipc);
+	ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
+	ctx->dsp->ops->free(ctx->dsp);
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Skylake IPC driver");
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
new file mode 100644
index 0000000..8c7767b
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -0,0 +1,286 @@
+/*
+ *  skl_topology.h - Intel HDA Platform topology header file
+ *
+ *  Copyright (C) 2014-15 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SKL_TOPOLOGY_H__
+#define __SKL_TOPOLOGY_H__
+
+#include <linux/types.h>
+
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "skl.h"
+#include "skl-tplg-interface.h"
+
+#define BITS_PER_BYTE 8
+#define MAX_TS_GROUPS 8
+#define MAX_DMIC_TS_GROUPS 4
+#define MAX_FIXED_DMIC_PARAMS_SIZE 727
+
+/* Maximum number of coefficients up down mixer module */
+#define UP_DOWN_MIXER_MAX_COEFF		6
+
+enum skl_channel_index {
+	SKL_CHANNEL_LEFT = 0,
+	SKL_CHANNEL_RIGHT = 1,
+	SKL_CHANNEL_CENTER = 2,
+	SKL_CHANNEL_LEFT_SURROUND = 3,
+	SKL_CHANNEL_CENTER_SURROUND = 3,
+	SKL_CHANNEL_RIGHT_SURROUND = 4,
+	SKL_CHANNEL_LFE = 7,
+	SKL_CHANNEL_INVALID = 0xF,
+};
+
+enum skl_bitdepth {
+	SKL_DEPTH_8BIT = 8,
+	SKL_DEPTH_16BIT = 16,
+	SKL_DEPTH_24BIT = 24,
+	SKL_DEPTH_32BIT = 32,
+	SKL_DEPTH_INVALID
+};
+
+enum skl_interleaving {
+	/* [s1_ch1...s1_chN,...,sM_ch1...sM_chN] */
+	SKL_INTERLEAVING_PER_CHANNEL = 0,
+	/* [s1_ch1...sM_ch1,...,s1_chN...sM_chN] */
+	SKL_INTERLEAVING_PER_SAMPLE = 1,
+};
+
+enum skl_s_freq {
+	SKL_FS_8000 = 8000,
+	SKL_FS_11025 = 11025,
+	SKL_FS_12000 = 12000,
+	SKL_FS_16000 = 16000,
+	SKL_FS_22050 = 22050,
+	SKL_FS_24000 = 24000,
+	SKL_FS_32000 = 32000,
+	SKL_FS_44100 = 44100,
+	SKL_FS_48000 = 48000,
+	SKL_FS_64000 = 64000,
+	SKL_FS_88200 = 88200,
+	SKL_FS_96000 = 96000,
+	SKL_FS_128000 = 128000,
+	SKL_FS_176400 = 176400,
+	SKL_FS_192000 = 192000,
+	SKL_FS_INVALID
+};
+
+enum skl_widget_type {
+	SKL_WIDGET_VMIXER = 1,
+	SKL_WIDGET_MIXER = 2,
+	SKL_WIDGET_PGA = 3,
+	SKL_WIDGET_MUX = 4
+};
+
+struct skl_audio_data_format {
+	enum skl_s_freq s_freq;
+	enum skl_bitdepth bit_depth;
+	u32 channel_map;
+	enum skl_ch_cfg ch_cfg;
+	enum skl_interleaving interleaving;
+	u8 number_of_channels;
+	u8 valid_bit_depth;
+	u8 sample_type;
+	u8 reserved[1];
+} __packed;
+
+struct skl_base_cfg {
+	u32 cps;
+	u32 ibs;
+	u32 obs;
+	u32 is_pages;
+	struct skl_audio_data_format audio_fmt;
+};
+
+struct skl_cpr_gtw_cfg {
+	u32 node_id;
+	u32 dma_buffer_size;
+	u32 config_length;
+	/* not mandatory; required only for DMIC/I2S */
+	u32 config_data[1];
+} __packed;
+
+struct skl_cpr_cfg {
+	struct skl_base_cfg base_cfg;
+	struct skl_audio_data_format out_fmt;
+	u32 cpr_feature_mask;
+	struct skl_cpr_gtw_cfg gtw_cfg;
+} __packed;
+
+
+struct skl_src_module_cfg {
+	struct skl_base_cfg base_cfg;
+	enum skl_s_freq src_cfg;
+} __packed;
+
+struct skl_up_down_mixer_cfg {
+	struct skl_base_cfg base_cfg;
+	enum skl_ch_cfg out_ch_cfg;
+	/* This should be set to 1 if user coefficients are required */
+	u32 coeff_sel;
+	/* Pass the user coeff in this array */
+	s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
+} __packed;
+
+enum skl_dma_type {
+	SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0,
+	SKL_DMA_HDA_HOST_INPUT_CLASS = 1,
+	SKL_DMA_HDA_HOST_INOUT_CLASS = 2,
+	SKL_DMA_HDA_LINK_OUTPUT_CLASS = 8,
+	SKL_DMA_HDA_LINK_INPUT_CLASS = 9,
+	SKL_DMA_HDA_LINK_INOUT_CLASS = 0xA,
+	SKL_DMA_DMIC_LINK_INPUT_CLASS = 0xB,
+	SKL_DMA_I2S_LINK_OUTPUT_CLASS = 0xC,
+	SKL_DMA_I2S_LINK_INPUT_CLASS = 0xD,
+};
+
+union skl_ssp_dma_node {
+	u8 val;
+	struct {
+		u8 dual_mono:1;
+		u8 time_slot:3;
+		u8 i2s_instance:4;
+	} dma_node;
+};
+
+union skl_connector_node_id {
+	u32 val;
+	struct {
+		u32 vindex:8;
+		u32 dma_type:4;
+		u32 rsvd:20;
+	} node;
+};
+
+struct skl_module_fmt {
+	u32 channels;
+	u32 s_freq;
+	u32 bit_depth;
+	u32 valid_bit_depth;
+	u32 ch_cfg;
+};
+
+struct skl_module_inst_id {
+	u32 module_id;
+	u32 instance_id;
+};
+
+struct skl_module_pin {
+	struct skl_module_inst_id id;
+	u8 pin_index;
+	bool is_dynamic;
+	bool in_use;
+};
+
+struct skl_specific_cfg {
+	u32 caps_size;
+	u32 *caps;
+};
+
+enum skl_pipe_state {
+	SKL_PIPE_INVALID = 0,
+	SKL_PIPE_CREATED = 1,
+	SKL_PIPE_PAUSED = 2,
+	SKL_PIPE_STARTED = 3
+};
+
+struct skl_pipe_module {
+	struct snd_soc_dapm_widget *w;
+	struct list_head node;
+};
+
+struct skl_pipe_params {
+	u8 host_dma_id;
+	u8 link_dma_id;
+	u32 ch;
+	u32 s_freq;
+	u32 s_fmt;
+	u8 linktype;
+	int stream;
+};
+
+struct skl_pipe {
+	u8 ppl_id;
+	u8 pipe_priority;
+	u16 conn_type;
+	u32 memory_pages;
+	struct skl_pipe_params *p_params;
+	enum skl_pipe_state state;
+	struct list_head w_list;
+};
+
+enum skl_module_state {
+	SKL_MODULE_UNINIT = 0,
+	SKL_MODULE_INIT_DONE = 1,
+	SKL_MODULE_LOADED = 2,
+	SKL_MODULE_UNLOADED = 3,
+	SKL_MODULE_BIND_DONE = 4
+};
+
+struct skl_module_cfg {
+	struct skl_module_inst_id id;
+	struct skl_module_fmt in_fmt;
+	struct skl_module_fmt out_fmt;
+	u8 max_in_queue;
+	u8 max_out_queue;
+	u8 in_queue_mask;
+	u8 out_queue_mask;
+	u8 in_queue;
+	u8 out_queue;
+	u32 mcps;
+	u32 ibs;
+	u32 obs;
+	u8 is_loadable;
+	u8 core_id;
+	u8 dev_type;
+	u8 dma_id;
+	u8 time_slot;
+	u32 params_fixup;
+	u32 converter;
+	u32 vbus_id;
+	struct skl_module_pin *m_in_pin;
+	struct skl_module_pin *m_out_pin;
+	enum skl_module_type m_type;
+	enum skl_hw_conn_type  hw_conn_type;
+	enum skl_module_state m_state;
+	struct skl_pipe *pipe;
+	struct skl_specific_cfg formats_config;
+};
+
+int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_pause_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config,
+	char *param);
+
+int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module);
+
+int skl_unbind_modules(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module);
+
+enum skl_bitdepth skl_get_bit_depth(int params);
+#endif
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
new file mode 100644
index 0000000..a506898
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -0,0 +1,88 @@
+/*
+ * skl-tplg-interface.h - Intel DSP FW private data interface
+ *
+ * Copyright (C) 2015 Intel Corp
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *	    Nilofer, Samreen <samreen.nilofer@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDA_TPLG_INTERFACE_H__
+#define __HDA_TPLG_INTERFACE_H__
+
+/**
+ * enum skl_ch_cfg - channel configuration
+ *
+ * @SKL_CH_CFG_MONO:	One channel only
+ * @SKL_CH_CFG_STEREO:	L & R
+ * @SKL_CH_CFG_2_1:	L, R & LFE
+ * @SKL_CH_CFG_3_0:	L, C & R
+ * @SKL_CH_CFG_3_1:	L, C, R & LFE
+ * @SKL_CH_CFG_QUATRO:	L, R, Ls & Rs
+ * @SKL_CH_CFG_4_0:	L, C, R & Cs
+ * @SKL_CH_CFG_5_0:	L, C, R, Ls & Rs
+ * @SKL_CH_CFG_5_1:	L, C, R, Ls, Rs & LFE
+ * @SKL_CH_CFG_DUAL_MONO: One channel replicated in two
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_0: Stereo(L,R) in 4 slots, 1st stream:[ L, R, -, - ]
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_1: Stereo(L,R) in 4 slots, 2nd stream:[ -, -, L, R ]
+ * @SKL_CH_CFG_INVALID:	Invalid
+ */
+enum skl_ch_cfg {
+	SKL_CH_CFG_MONO = 0,
+	SKL_CH_CFG_STEREO = 1,
+	SKL_CH_CFG_2_1 = 2,
+	SKL_CH_CFG_3_0 = 3,
+	SKL_CH_CFG_3_1 = 4,
+	SKL_CH_CFG_QUATRO = 5,
+	SKL_CH_CFG_4_0 = 6,
+	SKL_CH_CFG_5_0 = 7,
+	SKL_CH_CFG_5_1 = 8,
+	SKL_CH_CFG_DUAL_MONO = 9,
+	SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
+	SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
+	SKL_CH_CFG_INVALID
+};
+
+enum skl_module_type {
+	SKL_MODULE_TYPE_MIXER = 0,
+	SKL_MODULE_TYPE_COPIER,
+	SKL_MODULE_TYPE_UPDWMIX,
+	SKL_MODULE_TYPE_SRCINT
+};
+
+enum skl_core_affinity {
+	SKL_AFFINITY_CORE_0 = 0,
+	SKL_AFFINITY_CORE_1,
+	SKL_AFFINITY_CORE_MAX
+};
+
+enum skl_pipe_conn_type {
+	SKL_PIPE_CONN_TYPE_NONE = 0,
+	SKL_PIPE_CONN_TYPE_FE,
+	SKL_PIPE_CONN_TYPE_BE
+};
+
+enum skl_hw_conn_type {
+	SKL_CONN_NONE = 0,
+	SKL_CONN_SOURCE = 1,
+	SKL_CONN_SINK = 2
+};
+
+enum skl_dev_type {
+	SKL_DEVICE_BT = 0x0,
+	SKL_DEVICE_DMIC = 0x1,
+	SKL_DEVICE_I2S = 0x2,
+	SKL_DEVICE_SLIMBUS = 0x3,
+	SKL_DEVICE_HDALINK = 0x4,
+	SKL_DEVICE_NONE
+};
+#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
new file mode 100644
index 0000000..348d094
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.c
@@ -0,0 +1,536 @@
+/*
+ *  skl.c - Implementation of ASoC Intel SKL HD Audio driver
+ *
+ *  Copyright (C) 2014-2015 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  Derived mostly from Intel HDA driver with following copyrights:
+ *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
+ *                     PeiSen Hou <pshou@realtek.com.tw>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include "skl.h"
+
+/*
+ * initialize the PCI registers
+ */
+static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
+			    unsigned char mask, unsigned char val)
+{
+	unsigned char data;
+
+	pci_read_config_byte(pci, reg, &data);
+	data &= ~mask;
+	data |= (val & mask);
+	pci_write_config_byte(pci, reg, data);
+}
+
+static void skl_init_pci(struct skl *skl)
+{
+	struct hdac_ext_bus *ebus = &skl->ebus;
+
+	/*
+	 * Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
+	 * TCSEL == Traffic Class Select Register, which sets PCI express QOS
+	 * Ensuring these bits are 0 clears playback static on some HD Audio
+	 * codecs.
+	 * The PCI register TCSEL is defined in the Intel manuals.
+	 */
+	dev_dbg(ebus_to_hbus(ebus)->dev, "Clearing TCSEL\n");
+	skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
+}
+
+/* called from IRQ */
+static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
+{
+	snd_pcm_period_elapsed(hstr->substream);
+}
+
+static irqreturn_t skl_interrupt(int irq, void *dev_id)
+{
+	struct hdac_ext_bus *ebus = dev_id;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u32 status;
+
+	if (!pm_runtime_active(bus->dev))
+		return IRQ_NONE;
+
+	spin_lock(&bus->reg_lock);
+
+	status = snd_hdac_chip_readl(bus, INTSTS);
+	if (status == 0 || status == 0xffffffff) {
+		spin_unlock(&bus->reg_lock);
+		return IRQ_NONE;
+	}
+
+	/* clear rirb int */
+	status = snd_hdac_chip_readb(bus, RIRBSTS);
+	if (status & RIRB_INT_MASK) {
+		if (status & RIRB_INT_RESPONSE)
+			snd_hdac_bus_update_rirb(bus);
+		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+	}
+
+	spin_unlock(&bus->reg_lock);
+
+	return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
+{
+	struct hdac_ext_bus *ebus = dev_id;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u32 status;
+
+	status = snd_hdac_chip_readl(bus, INTSTS);
+
+	snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update);
+
+	return IRQ_HANDLED;
+}
+
+static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
+{
+	struct skl *skl = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int ret;
+
+	ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
+			skl_threaded_handler,
+			IRQF_SHARED,
+			KBUILD_MODNAME, ebus);
+	if (ret) {
+		dev_err(bus->dev,
+			"unable to grab IRQ %d, disabling device\n",
+			skl->pci->irq);
+		return ret;
+	}
+
+	bus->irq = skl->pci->irq;
+	pci_intx(skl->pci, 1);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * power management
+ */
+static int skl_suspend(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	snd_hdac_bus_stop_chip(bus);
+	snd_hdac_bus_enter_link_reset(bus);
+
+	return 0;
+}
+
+static int skl_resume(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl *hda = ebus_to_skl(ebus);
+
+	skl_init_pci(hda);
+
+	snd_hdac_bus_init_chip(bus, 1);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int skl_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	dev_dbg(bus->dev, "in %s\n", __func__);
+
+	/* enable controller wake up event */
+	snd_hdac_chip_updatew(bus, WAKEEN, 0, STATESTS_INT_MASK);
+
+	snd_hdac_bus_stop_chip(bus);
+	snd_hdac_bus_enter_link_reset(bus);
+
+	return 0;
+}
+
+static int skl_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl *hda = ebus_to_skl(ebus);
+	int status;
+
+	dev_dbg(bus->dev, "in %s\n", __func__);
+
+	/* Read STATESTS before controller reset */
+	status = snd_hdac_chip_readw(bus, STATESTS);
+
+	skl_init_pci(hda);
+	snd_hdac_bus_init_chip(bus, true);
+	/* disable controller Wake Up event */
+	snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops skl_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
+	SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
+};
+
+/*
+ * destructor
+ */
+static int skl_free(struct hdac_ext_bus *ebus)
+{
+	struct skl *skl  = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	skl->init_failed = 1; /* to be sure */
+
+	snd_hdac_ext_stop_streams(ebus);
+
+	if (bus->irq >= 0)
+		free_irq(bus->irq, (void *)bus);
+	if (bus->remap_addr)
+		iounmap(bus->remap_addr);
+
+	snd_hdac_bus_free_stream_pages(bus);
+	snd_hdac_stream_free_all(ebus);
+	snd_hdac_link_free_all(ebus);
+	pci_release_regions(skl->pci);
+	pci_disable_device(skl->pci);
+
+	snd_hdac_ext_bus_exit(ebus);
+
+	return 0;
+}
+
+static int skl_dmic_device_register(struct skl *skl)
+{
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct platform_device *pdev;
+	int ret;
+
+	/* SKL has one dmic port, so allocate dmic device for this */
+	pdev = platform_device_alloc("dmic-codec", -1);
+	if (!pdev) {
+		dev_err(bus->dev, "failed to allocate dmic device\n");
+		return -ENOMEM;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		dev_err(bus->dev, "failed to add dmic device: %d\n", ret);
+		platform_device_put(pdev);
+		return ret;
+	}
+	skl->dmic_dev = pdev;
+
+	return 0;
+}
+
+static void skl_dmic_device_unregister(struct skl *skl)
+{
+	if (skl->dmic_dev)
+		platform_device_unregister(skl->dmic_dev);
+}
+
+/*
+ * Probe the given codec address
+ */
+static int probe_codec(struct hdac_ext_bus *ebus, int addr)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
+		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+	unsigned int res;
+
+	mutex_lock(&bus->cmd_mutex);
+	snd_hdac_bus_send_cmd(bus, cmd);
+	snd_hdac_bus_get_response(bus, addr, &res);
+	mutex_unlock(&bus->cmd_mutex);
+	if (res == -1)
+		return -EIO;
+	dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
+
+	return snd_hdac_ext_bus_device_init(ebus, addr);
+}
+
+/* Codec initialization */
+static int skl_codec_create(struct hdac_ext_bus *ebus)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int c, max_slots;
+
+	max_slots = HDA_MAX_CODECS;
+
+	/* First try to probe all given codec slots */
+	for (c = 0; c < max_slots; c++) {
+		if ((bus->codec_mask & (1 << c))) {
+			if (probe_codec(ebus, c) < 0) {
+				/*
+				 * Some BIOSen give you wrong codec addresses
+				 * that don't exist
+				 */
+				dev_warn(bus->dev,
+					 "Codec #%d probe error; disabling it...\n", c);
+				bus->codec_mask &= ~(1 << c);
+				/*
+				 * More badly, accessing to a non-existing
+				 * codec often screws up the controller bus,
+				 * and disturbs the further communications.
+				 * Thus if an error occurs during probing,
+				 * better to reset the controller bus to get
+				 * back to the sanity state.
+				 */
+				snd_hdac_bus_stop_chip(bus);
+				snd_hdac_bus_init_chip(bus, true);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static const struct hdac_bus_ops bus_core_ops = {
+	.command = snd_hdac_bus_send_cmd,
+	.get_response = snd_hdac_bus_get_response,
+};
+
+/*
+ * constructor
+ */
+static int skl_create(struct pci_dev *pci,
+		      const struct hdac_io_ops *io_ops,
+		      struct skl **rskl)
+{
+	struct skl *skl;
+	struct hdac_ext_bus *ebus;
+
+	int err;
+
+	*rskl = NULL;
+
+	err = pci_enable_device(pci);
+	if (err < 0)
+		return err;
+
+	skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL);
+	if (!skl) {
+		pci_disable_device(pci);
+		return -ENOMEM;
+	}
+	ebus = &skl->ebus;
+	snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
+	ebus->bus.use_posbuf = 1;
+	skl->pci = pci;
+
+	ebus->bus.bdl_pos_adj = 0;
+
+	*rskl = skl;
+
+	return 0;
+}
+
+static int skl_first_init(struct hdac_ext_bus *ebus)
+{
+	struct skl *skl = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct pci_dev *pci = skl->pci;
+	int err;
+	unsigned short gcap;
+	int cp_streams, pb_streams, start_idx;
+
+	err = pci_request_regions(pci, "Skylake HD audio");
+	if (err < 0)
+		return err;
+
+	bus->addr = pci_resource_start(pci, 0);
+	bus->remap_addr = pci_ioremap_bar(pci, 0);
+	if (bus->remap_addr == NULL) {
+		dev_err(bus->dev, "ioremap error\n");
+		return -ENXIO;
+	}
+
+	snd_hdac_ext_bus_parse_capabilities(ebus);
+
+	if (skl_acquire_irq(ebus, 0) < 0)
+		return -EBUSY;
+
+	pci_set_master(pci);
+	synchronize_irq(bus->irq);
+
+	gcap = snd_hdac_chip_readw(bus, GCAP);
+	dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
+
+	/* allow 64bit DMA address if supported by H/W */
+	if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
+		dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
+	} else {
+		dma_set_mask(bus->dev, DMA_BIT_MASK(32));
+		dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
+	}
+
+	/* read number of streams from GCAP register */
+	cp_streams = (gcap >> 8) & 0x0f;
+	pb_streams = (gcap >> 12) & 0x0f;
+
+	if (!pb_streams && !cp_streams)
+		return -EIO;
+
+	ebus->num_streams = cp_streams + pb_streams;
+
+	/* initialize streams */
+	snd_hdac_ext_stream_init_all
+		(ebus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
+	start_idx = cp_streams;
+	snd_hdac_ext_stream_init_all
+		(ebus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
+
+	err = snd_hdac_bus_alloc_stream_pages(bus);
+	if (err < 0)
+		return err;
+
+	/* initialize chip */
+	skl_init_pci(skl);
+
+	snd_hdac_bus_init_chip(bus, true);
+
+	/* codec detection */
+	if (!bus->codec_mask) {
+		dev_err(bus->dev, "no codecs found!\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int skl_probe(struct pci_dev *pci,
+		     const struct pci_device_id *pci_id)
+{
+	struct skl *skl;
+	struct hdac_ext_bus *ebus = NULL;
+	struct hdac_bus *bus = NULL;
+	int err;
+
+	/* we use ext core ops, so provide NULL for ops here */
+	err = skl_create(pci, NULL, &skl);
+	if (err < 0)
+		return err;
+
+	ebus = &skl->ebus;
+	bus = ebus_to_hbus(ebus);
+
+	err = skl_first_init(ebus);
+	if (err < 0)
+		goto out_free;
+
+	pci_set_drvdata(skl->pci, ebus);
+
+	/* check if dsp is there */
+	if (ebus->ppcap) {
+		/* TODO register with dsp IPC */
+		dev_dbg(bus->dev, "Register dsp\n");
+	}
+
+	if (ebus->mlcap)
+		snd_hdac_ext_bus_get_ml_capabilities(ebus);
+
+	/* create device for soc dmic */
+	err = skl_dmic_device_register(skl);
+	if (err < 0)
+		goto out_free;
+
+	/* register platform dai and controls */
+	err = skl_platform_register(bus->dev);
+	if (err < 0)
+		goto out_dmic_free;
+
+	/* create codec instances */
+	err = skl_codec_create(ebus);
+	if (err < 0)
+		goto out_unregister;
+
+	/*configure PM */
+	pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(bus->dev);
+	pm_runtime_put_noidle(bus->dev);
+	pm_runtime_allow(bus->dev);
+
+	return 0;
+
+out_unregister:
+	skl_platform_unregister(bus->dev);
+out_dmic_free:
+	skl_dmic_device_unregister(skl);
+out_free:
+	skl->init_failed = 1;
+	skl_free(ebus);
+
+	return err;
+}
+
+static void skl_remove(struct pci_dev *pci)
+{
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct skl *skl = ebus_to_skl(ebus);
+
+	if (pci_dev_run_wake(pci))
+		pm_runtime_get_noresume(&pci->dev);
+	pci_dev_put(pci);
+	skl_platform_unregister(&pci->dev);
+	skl_dmic_device_unregister(skl);
+	skl_free(ebus);
+	dev_set_drvdata(&pci->dev, NULL);
+}
+
+/* PCI IDs */
+static const struct pci_device_id skl_ids[] = {
+	/* Sunrise Point-LP */
+	{ PCI_DEVICE(0x8086, 0x9d70), 0},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, skl_ids);
+
+/* pci_driver definition */
+static struct pci_driver skl_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = skl_ids,
+	.probe = skl_probe,
+	.remove = skl_remove,
+	.driver = {
+		.pm = &skl_pm,
+	},
+};
+module_pci_driver(skl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
new file mode 100644
index 0000000..f7fdbb0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.h
@@ -0,0 +1,84 @@
+/*
+ *  skl.h - HD Audio skylake defintions.
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_H
+#define __SOUND_SOC_SKL_H
+
+#include <sound/hda_register.h>
+#include <sound/hdaudio_ext.h>
+#include "skl-nhlt.h"
+
+#define SKL_SUSPEND_DELAY 2000
+
+/* Vendor Specific Registers */
+#define AZX_REG_VS_EM1			0x1000
+#define AZX_REG_VS_INRC			0x1004
+#define AZX_REG_VS_OUTRC		0x1008
+#define AZX_REG_VS_FIFOTRK		0x100C
+#define AZX_REG_VS_FIFOTRK2		0x1010
+#define AZX_REG_VS_EM2			0x1030
+#define AZX_REG_VS_EM3L			0x1038
+#define AZX_REG_VS_EM3U			0x103C
+#define AZX_REG_VS_EM4L			0x1040
+#define AZX_REG_VS_EM4U			0x1044
+#define AZX_REG_VS_LTRC			0x1048
+#define AZX_REG_VS_D0I3C		0x104A
+#define AZX_REG_VS_PCE			0x104B
+#define AZX_REG_VS_L2MAGC		0x1050
+#define AZX_REG_VS_L2LAHPT		0x1054
+#define AZX_REG_VS_SDXDPIB_XBASE	0x1084
+#define AZX_REG_VS_SDXDPIB_XINTERVAL	0x20
+#define AZX_REG_VS_SDXEFIFOS_XBASE	0x1094
+#define AZX_REG_VS_SDXEFIFOS_XINTERVAL	0x20
+
+struct skl {
+	struct hdac_ext_bus ebus;
+	struct pci_dev *pci;
+
+	unsigned int init_failed:1; /* delayed init failed */
+	struct platform_device *dmic_dev;
+
+	void __iomem *nhlt; /* nhlt ptr */
+	struct skl_sst *skl_sst; /* sst skl ctx */
+};
+
+#define skl_to_ebus(s)	(&(s)->ebus)
+#define ebus_to_skl(sbus) \
+	container_of(sbus, struct skl, sbus)
+
+/* to pass dai dma data */
+struct skl_dma_params {
+	u32 format;
+	u8 stream_tag;
+};
+
+int skl_platform_unregister(struct device *dev);
+int skl_platform_register(struct device *dev);
+
+void __iomem *skl_nhlt_init(struct device *dev);
+void skl_nhlt_free(void __iomem *addr);
+struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
+			u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
+
+int skl_init_dsp(struct skl *skl);
+void skl_free_dsp(struct skl *skl);
+int skl_suspend_dsp(struct skl *skl);
+int skl_resume_dsp(struct skl *skl);
+#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 4cf2245..dbfdfe9 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -148,10 +148,14 @@
 	dram = mv_mbus_dram_info();
 	addr = substream->dma_buffer.addr;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (priv->substream_play)
+			return -EBUSY;
 		priv->substream_play = substream;
 		kirkwood_dma_conf_mbus_windows(priv->io,
 			KIRKWOOD_PLAYBACK_WIN, addr, dram);
 	} else {
+		if (priv->substream_rec)
+			return -EBUSY;
 		priv->substream_rec = substream;
 		kirkwood_dma_conf_mbus_windows(priv->io,
 			KIRKWOOD_RECORD_WIN, addr, dram);
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 2d2536a..684e8a7 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -136,6 +136,7 @@
 
 static struct snd_soc_card mt8173_max98090_card = {
 	.name = "mt8173-max98090",
+	.owner = THIS_MODULE,
 	.dai_link = mt8173_max98090_dais,
 	.num_links = ARRAY_SIZE(mt8173_max98090_dais),
 	.controls = mt8173_max98090_controls,
@@ -202,7 +203,6 @@
 static struct platform_driver mt8173_max98090_driver = {
 	.driver = {
 		   .name = "mt8173-max98090",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mt8173_max98090_dt_match,
 #ifdef CONFIG_PM
 		   .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 6f52eca..86cf975 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -191,6 +191,7 @@
 
 static struct snd_soc_card mt8173_rt5650_rt5676_card = {
 	.name = "mtk-rt5650-rt5676",
+	.owner = THIS_MODULE,
 	.dai_link = mt8173_rt5650_rt5676_dais,
 	.num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais),
 	.codec_conf = mt8173_rt5650_rt5676_codec_conf,
@@ -269,7 +270,6 @@
 static struct platform_driver mt8173_rt5650_rt5676_driver = {
 	.driver = {
 		   .name = "mtk-rt5650-rt5676",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mt8173_rt5650_rt5676_dt_match,
 #ifdef CONFIG_PM
 		   .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mtk-afe-common.h b/sound/soc/mediatek/mtk-afe-common.h
index a88b175..cc4393c 100644
--- a/sound/soc/mediatek/mtk-afe-common.h
+++ b/sound/soc/mediatek/mtk-afe-common.h
@@ -98,12 +98,4 @@
 	const struct mtk_afe_irq_data *irqdata;
 };
 
-struct mtk_afe {
-	/* address for ioremap audio hardware register */
-	void __iomem *base_addr;
-	struct device *dev;
-	struct regmap *regmap;
-	struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
-	struct clk *clocks[MTK_CLK_NUM];
-};
 #endif
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index 9863da7..d190fe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -45,18 +45,21 @@
 /* Memory interface */
 #define AFE_DL1_BASE		0x0040
 #define AFE_DL1_CUR		0x0044
+#define AFE_DL1_END		0x0048
 #define AFE_DL2_BASE		0x0050
 #define AFE_DL2_CUR		0x0054
 #define AFE_AWB_BASE		0x0070
 #define AFE_AWB_CUR		0x007c
 #define AFE_VUL_BASE		0x0080
 #define AFE_VUL_CUR		0x008c
+#define AFE_VUL_END		0x0088
 #define AFE_DAI_BASE		0x0090
 #define AFE_DAI_CUR		0x009c
 #define AFE_MOD_PCM_BASE	0x0330
 #define AFE_MOD_PCM_CUR		0x033c
 #define AFE_HDMI_OUT_BASE	0x0374
 #define AFE_HDMI_OUT_CUR	0x0378
+#define AFE_HDMI_OUT_END	0x037c
 
 #define AFE_ADDA2_TOP_CON0	0x0600
 
@@ -127,6 +130,34 @@
 	AFE_TDM_CH_ZERO,
 };
 
+static const unsigned int mtk_afe_backup_list[] = {
+	AUDIO_TOP_CON0,
+	AFE_CONN1,
+	AFE_CONN2,
+	AFE_CONN7,
+	AFE_CONN8,
+	AFE_DAC_CON1,
+	AFE_DL1_BASE,
+	AFE_DL1_END,
+	AFE_VUL_BASE,
+	AFE_VUL_END,
+	AFE_HDMI_OUT_BASE,
+	AFE_HDMI_OUT_END,
+	AFE_HDMI_CONN0,
+	AFE_DAC_CON0,
+};
+
+struct mtk_afe {
+	/* address for ioremap audio hardware register */
+	void __iomem *base_addr;
+	struct device *dev;
+	struct regmap *regmap;
+	struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
+	struct clk *clocks[MTK_CLK_NUM];
+	unsigned int backup_regs[ARRAY_SIZE(mtk_afe_backup_list)];
+	bool suspended;
+};
+
 static const struct snd_pcm_hardware mtk_afe_hardware = {
 	.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 		 SNDRV_PCM_INFO_MMAP_VALID),
@@ -722,11 +753,53 @@
 
 };
 
+static int mtk_afe_runtime_suspend(struct device *dev);
+static int mtk_afe_runtime_resume(struct device *dev);
+
+static int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int i;
+
+	dev_dbg(afe->dev, "%s\n", __func__);
+	if (pm_runtime_status_suspended(afe->dev) || afe->suspended)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
+		regmap_read(afe->regmap, mtk_afe_backup_list[i],
+			    &afe->backup_regs[i]);
+
+	afe->suspended = true;
+	mtk_afe_runtime_suspend(afe->dev);
+	return 0;
+}
+
+static int mtk_afe_dai_resume(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int i = 0;
+
+	dev_dbg(afe->dev, "%s\n", __func__);
+	if (pm_runtime_status_suspended(afe->dev) || !afe->suspended)
+		return 0;
+
+	mtk_afe_runtime_resume(afe->dev);
+
+	for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
+		regmap_write(afe->regmap, mtk_afe_backup_list[i],
+			     afe->backup_regs[i]);
+
+	afe->suspended = false;
+	return 0;
+}
+
 static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
 	/* FE DAIs: memory intefaces to CPU */
 	{
 		.name = "DL1", /* downlink 1 */
 		.id = MTK_AFE_MEMIF_DL1,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.playback = {
 			.stream_name = "DL1",
 			.channels_min = 1,
@@ -738,6 +811,8 @@
 	}, {
 		.name = "VUL", /* voice uplink */
 		.id = MTK_AFE_MEMIF_VUL,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.capture = {
 			.stream_name = "VUL",
 			.channels_min = 1,
@@ -774,6 +849,8 @@
 	{
 		.name = "HDMI",
 		.id = MTK_AFE_MEMIF_HDMI,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.playback = {
 			.stream_name = "HDMI",
 			.channels_min = 2,
@@ -820,10 +897,6 @@
 };
 
 static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
-	/* Backend DAIs  */
-	SND_SOC_DAPM_AIF_IN("I2S Capture", NULL, 0, SND_SOC_NOPM, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("I2S Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
-
 	/* inter-connections */
 	SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -855,11 +928,6 @@
 	{ "O10", "I18 Switch", "I18" },
 };
 
-static const struct snd_soc_dapm_widget mtk_afe_hdmi_widgets[] = {
-	/* Backend DAIs  */
-	SND_SOC_DAPM_AIF_OUT("HDMIO Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
-};
-
 static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = {
 	{"HDMIO Playback", NULL, "HDMI"},
 };
@@ -874,8 +942,6 @@
 
 static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = {
 	.name = "mtk-afe-hdmi-dai",
-	.dapm_widgets = mtk_afe_hdmi_widgets,
-	.num_dapm_widgets = ARRAY_SIZE(mtk_afe_hdmi_widgets),
 	.dapm_routes = mtk_afe_hdmi_routes,
 	.num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes),
 };
@@ -1220,7 +1286,6 @@
 static struct platform_driver mtk_afe_pcm_driver = {
 	.driver = {
 		   .name = "mtk-afe-pcm",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mtk_afe_pcm_dt_match,
 		   .pm = &mtk_afe_pm_ops,
 	},
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 5ae5ca1..e093261 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -308,13 +308,7 @@
 
 static int nuc900_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
-}
-
-static int nuc900_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
 }
 
 static struct platform_driver nuc900_pcm_driver = {
@@ -323,7 +317,6 @@
 	},
 
 	.probe = nuc900_soc_platform_probe,
-	.remove = nuc900_soc_platform_remove,
 };
 
 module_platform_driver(nuc900_pcm_driver);
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 68a1252..c7563e2 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -965,25 +965,15 @@
 	mcbsp->free = true;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
-	if (!res) {
+	if (!res)
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		if (!res) {
-			dev_err(mcbsp->dev, "invalid memory resource\n");
-			return -ENOMEM;
-		}
-	}
-	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
-				     dev_name(&pdev->dev))) {
-		dev_err(mcbsp->dev, "memory region already claimed\n");
-		return -ENODEV;
-	}
+
+	mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mcbsp->io_base))
+		return PTR_ERR(mcbsp->io_base);
 
 	mcbsp->phys_base = res->start;
 	mcbsp->reg_cache_size = resource_size(res);
-	mcbsp->io_base = devm_ioremap(&pdev->dev, res->start,
-				      resource_size(res));
-	if (!mcbsp->io_base)
-		return -ENOMEM;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
 	if (!res)
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index aeef25c..584b237 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -81,7 +81,15 @@
 	ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
 					 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
 	if (ret < 0) {
-		dev_err(dai->dev, "could not apply constraint\n");
+		dev_err(dai->dev, "Could not apply period constraint: %d\n",
+			ret);
+		return ret;
+	}
+	ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+					 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 128);
+	if (ret < 0) {
+		dev_err(dai->dev, "Could not apply buffer constraint: %d\n",
+			ret);
 		return ret;
 	}
 
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 076bec6..732e749 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -154,8 +154,7 @@
 
 static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
 
 	/* All TWL4030 output pins are floating */
 	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
@@ -174,8 +173,7 @@
 
 static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
 
 	/* Not comnnected */
 	snd_soc_dapm_nc_pin(dapm, "HSMIC");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 1eb45dc..51e790d 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -232,13 +232,7 @@
 		mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max =
 						pdata->period_max_capture;
 	}
-	return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
-}
-
-static int mmp_pcm_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
 }
 
 static struct platform_driver mmp_pcm_driver = {
@@ -247,7 +241,6 @@
 	},
 
 	.probe = mmp_pcm_probe,
-	.remove = mmp_pcm_remove,
 };
 
 module_platform_driver(mmp_pcm_driver);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index fbe2e93..3da485e 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -813,14 +813,8 @@
 
 static int asoc_ssp_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
-					  &pxa_ssp_dai, 1);
-}
-
-static int asoc_ssp_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
+					       &pxa_ssp_dai, 1);
 }
 
 static struct platform_driver asoc_ssp_driver = {
@@ -830,7 +824,6 @@
 	},
 
 	.probe = asoc_ssp_probe,
-	.remove = asoc_ssp_remove,
 };
 
 module_platform_driver(asoc_ssp_driver);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index e68290c..6b4e400 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -367,19 +367,12 @@
 
 static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
-					  &pxa_i2s_dai, 1);
-}
-
-static int pxa2xx_i2s_drv_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
+					       &pxa_i2s_dai, 1);
 }
 
 static struct platform_driver pxa2xx_i2s_driver = {
 	.probe = pxa2xx_i2s_drv_probe,
-	.remove = pxa2xx_i2s_drv_remove,
 
 	.driver = {
 		.name = "pxa2xx-i2s",
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index a51c9da..831ee37 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -124,13 +124,7 @@
 
 static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
-}
-
-static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
 }
 
 #ifdef CONFIG_OF
@@ -147,7 +141,6 @@
 	},
 
 	.probe = pxa2xx_soc_platform_probe,
-	.remove = pxa2xx_soc_platform_remove,
 };
 
 module_platform_driver(pxa_pcm_driver);
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 807fedf..3cc252e 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -1,5 +1,6 @@
 config SND_SOC_QCOM
 	tristate "ASoC support for QCOM platforms"
+	depends on ARCH_QCOM || COMPILE_TEST
 	help
           Say Y or M if you want to add support to use audio devices
           in Qualcomm Technologies SOC-based platforms.
@@ -14,19 +15,17 @@
 
 config SND_SOC_LPASS_IPQ806X
 	tristate
-	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_CPU
 	select SND_SOC_LPASS_PLATFORM
 
 config SND_SOC_LPASS_APQ8016
 	tristate
-	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_CPU
 	select SND_SOC_LPASS_PLATFORM
 
 config SND_SOC_STORM
 	tristate "ASoC I2S support for Storm boards"
-	depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST)
+	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_IPQ806X
 	select SND_SOC_MAX98357A
 	help
@@ -35,7 +34,7 @@
 
 config SND_SOC_APQ8016_SBC
 	tristate "SoC Audio support for APQ8016 SBC platforms"
-	depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST)
+	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_APQ8016
 	help
           Support for Qualcomm Technologies LPASS audio block in
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 23f3d59..97bc202 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -235,7 +235,7 @@
 	return ret;
 }
 
-struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
 	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
 	.startup	= lpass_cpu_daiops_startup,
 	.shutdown	= lpass_cpu_daiops_shutdown,
diff --git a/sound/soc/qcom/lpass-ipq806x.c b/sound/soc/qcom/lpass-ipq806x.c
index 7356d3a..7a41679 100644
--- a/sound/soc/qcom/lpass-ipq806x.c
+++ b/sound/soc/qcom/lpass-ipq806x.c
@@ -73,7 +73,7 @@
 	return 0;
 }
 
-struct lpass_variant ipq806x_data = {
+static struct lpass_variant ipq806x_data = {
 	.i2sctrl_reg_base	= 0x0010,
 	.i2sctrl_reg_stride	= 0x04,
 	.i2s_ports		= 5,
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index d6e86c1..0b63e2e 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -93,6 +93,6 @@
 int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev);
 int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev);
 int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai);
-extern struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
+extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
 
 #endif /* __LPASS_H__ */
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index e181826..58bae8e 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -14,3 +14,22 @@
 	  Say Y or M if you want to add support for I2S driver for
 	  Rockchip I2S device. The device supports upto maximum of
 	  8 channels each for play and record.
+
+config SND_SOC_ROCKCHIP_MAX98090
+	tristate "ASoC support for Rockchip boards using a MAX98090 codec"
+	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
+	select SND_SOC_ROCKCHIP_I2S
+	select SND_SOC_MAX98090
+	select SND_SOC_TS3A227E
+	help
+	  Say Y or M here if you want to add support for SoC audio on Rockchip
+	  boards using the MAX98090 codec, such as Veyron.
+
+config SND_SOC_ROCKCHIP_RT5645
+	tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
+	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
+	select SND_SOC_ROCKCHIP_I2S
+	select SND_SOC_RT5645
+	help
+	  Say Y or M here if you want to add support for SoC audio on Rockchip
+	  boards using the RT5645/RT5650 codec, such as Veyron.
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index b921909..1bc1dc3 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -2,3 +2,9 @@
 snd-soc-i2s-objs := rockchip_i2s.o
 
 obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-i2s.o
+
+snd-soc-rockchip-max98090-objs := rockchip_max98090.o
+snd-soc-rockchip-rt5645-objs := rockchip_rt5645.o
+
+obj-$(CONFIG_SND_SOC_ROCKCHIP_MAX98090) += snd-soc-rockchip-max98090.o
+obj-$(CONFIG_SND_SOC_ROCKCHIP_RT5645) += snd-soc-rockchip-rt5645.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index acb5be5..b936102 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -483,16 +483,14 @@
 		goto err_suspend;
 	}
 
-	ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not register PCM\n");
-		goto err_pcm_register;
+		return ret;
 	}
 
 	return 0;
 
-err_pcm_register:
-	snd_dmaengine_pcm_unregister(&pdev->dev);
 err_suspend:
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		i2s_runtime_suspend(&pdev->dev);
@@ -512,8 +510,6 @@
 
 	clk_disable_unprepare(i2s->mclk);
 	clk_disable_unprepare(i2s->hclk);
-	snd_dmaengine_pcm_unregister(&pdev->dev);
-	snd_soc_unregister_component(&pdev->dev);
 
 	return 0;
 }
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
new file mode 100644
index 0000000..26567b1
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -0,0 +1,236 @@
+/*
+ * Rockchip machine ASoC driver for boards using a MAX90809 CODEC.
+ *
+ * Copyright (c) 2014, ROCKCHIP CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "rockchip_i2s.h"
+#include "../codecs/ts3a227e.h"
+
+#define DRV_NAME "rockchip-snd-max98090"
+
+static struct snd_soc_jack headset_jack;
+static struct snd_soc_jack_pin headset_jack_pins[] = {
+	{
+		.pin = "Headset Jack",
+		.mask = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+			SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			SND_JACK_BTN_2 | SND_JACK_BTN_3,
+	},
+};
+
+static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Mic", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+static const struct snd_soc_dapm_route rk_audio_map[] = {
+	{"IN34", NULL, "Headset Mic"},
+	{"IN34", NULL, "MICBIAS"},
+	{"MICBIAS", NULL, "Headset Mic"},
+	{"DMICL", NULL, "Int Mic"},
+	{"Headphone", NULL, "HPL"},
+	{"Headphone", NULL, "HPR"},
+	{"Speaker", NULL, "SPKL"},
+	{"Speaker", NULL, "SPKR"},
+};
+
+static const struct snd_kcontrol_new rk_mc_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Headphone"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Mic"),
+	SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int mclk;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		mclk = 12288000;
+		break;
+	case 44100:
+		mclk = 11289600;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+				     SND_SOC_CLOCK_OUT);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rk_init(struct snd_soc_pcm_runtime *runtime)
+{
+	/* Enable Headset and 4 Buttons Jack detection */
+	return snd_soc_card_jack_new(runtime->card, "Headset Jack",
+			       SND_JACK_HEADSET |
+			       SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			       SND_JACK_BTN_2 | SND_JACK_BTN_3,
+			       &headset_jack,
+			       headset_jack_pins,
+			       ARRAY_SIZE(headset_jack_pins));
+}
+
+static int rk_98090_headset_init(struct snd_soc_component *component)
+{
+	return ts3a227e_enable_jack_detect(component, &headset_jack);
+}
+
+static struct snd_soc_ops rk_aif1_ops = {
+	.hw_params = rk_aif1_hw_params,
+};
+
+static struct snd_soc_aux_dev rk_98090_headset_dev = {
+	.name = "Headset Chip",
+	.init = rk_98090_headset_init,
+};
+
+static struct snd_soc_dai_link rk_dailink = {
+	.name = "max98090",
+	.stream_name = "Audio",
+	.codec_dai_name = "HiFi",
+	.init = rk_init,
+	.ops = &rk_aif1_ops,
+	/* set max98090 as slave */
+	.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_card_rk = {
+	.name = "ROCKCHIP-I2S",
+	.owner = THIS_MODULE,
+	.dai_link = &rk_dailink,
+	.num_links = 1,
+	.aux_dev = &rk_98090_headset_dev,
+	.num_aux_devs = 1,
+	.dapm_widgets = rk_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
+	.dapm_routes = rk_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(rk_audio_map),
+	.controls = rk_mc_controls,
+	.num_controls = ARRAY_SIZE(rk_mc_controls),
+};
+
+static int snd_rk_mc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct snd_soc_card *card = &snd_soc_card_rk;
+	struct device_node *np = pdev->dev.of_node;
+
+	/* register the soc card */
+	card->dev = &pdev->dev;
+
+	rk_dailink.codec_of_node = of_parse_phandle(np,
+			"rockchip,audio-codec", 0);
+	if (!rk_dailink.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.cpu_of_node = of_parse_phandle(np,
+			"rockchip,i2s-controller", 0);
+	if (!rk_dailink.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,i2s-controller' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
+
+	rk_98090_headset_dev.codec_of_node = of_parse_phandle(np,
+			"rockchip,headset-codec", 0);
+	if (!rk_98090_headset_dev.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,headset-codec' missing/invalid\n");
+		return -EINVAL;
+	}
+
+	ret = snd_soc_of_parse_card_name(card, "rockchip,model");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc parse card name failed %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc register card failed %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id rockchip_max98090_of_match[] = {
+	{ .compatible = "rockchip,rockchip-audio-max98090", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_max98090_of_match);
+
+static struct platform_driver snd_rk_mc_driver = {
+	.probe = snd_rk_mc_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = rockchip_max98090_of_match,
+	},
+};
+
+module_platform_driver(snd_rk_mc_driver);
+
+MODULE_AUTHOR("jianqun <jay.xu@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip max98090 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
new file mode 100644
index 0000000..68c62e4
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -0,0 +1,225 @@
+/*
+ * Rockchip machine ASoC driver for boards using a RT5645/RT5650 CODEC.
+ *
+ * Copyright (c) 2015, ROCKCHIP CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "rockchip_i2s.h"
+
+#define DRV_NAME "rockchip-snd-rt5645"
+
+static struct snd_soc_jack headset_jack;
+
+/* Jack detect via rt5645 driver. */
+extern int rt5645_set_jack_detect(struct snd_soc_codec *codec,
+	struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
+	struct snd_soc_jack *btn_jack);
+
+static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_SPK("Speakers", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route rk_audio_map[] = {
+	/* Input Lines */
+	{"DMIC L2", NULL, "Int Mic"},
+	{"DMIC R2", NULL, "Int Mic"},
+	{"RECMIXL", NULL, "Headset Mic"},
+	{"RECMIXR", NULL, "Headset Mic"},
+
+	/* Output Lines */
+	{"Headphones", NULL, "HPOR"},
+	{"Headphones", NULL, "HPOL"},
+	{"Speakers", NULL, "SPOL"},
+	{"Speakers", NULL, "SPOR"},
+};
+
+static const struct snd_kcontrol_new rk_mc_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Headphones"),
+	SOC_DAPM_PIN_SWITCH("Speakers"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Mic"),
+};
+
+static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int mclk;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		mclk = 12288000;
+		break;
+	case 44100:
+		mclk = 11289600;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+				     SND_SOC_CLOCK_OUT);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rk_init(struct snd_soc_pcm_runtime *runtime)
+{
+	struct snd_soc_card *card = runtime->card;
+	int ret;
+
+	/* Enable Headset and 4 Buttons Jack detection */
+	ret = snd_soc_card_jack_new(card, "Headset Jack",
+				    SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+				    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+				    SND_JACK_BTN_2 | SND_JACK_BTN_3,
+				    &headset_jack, NULL, 0);
+	if (ret) {
+		dev_err(card->dev, "New Headset Jack failed! (%d)\n", ret);
+		return ret;
+	}
+
+	return rt5645_set_jack_detect(runtime->codec,
+				     &headset_jack,
+				     &headset_jack,
+				     &headset_jack);
+}
+
+static struct snd_soc_ops rk_aif1_ops = {
+	.hw_params = rk_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link rk_dailink = {
+	.name = "rt5645",
+	.stream_name = "rt5645 PCM",
+	.codec_dai_name = "rt5645-aif1",
+	.init = rk_init,
+	.ops = &rk_aif1_ops,
+	/* set rt5645 as slave */
+	.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_card_rk = {
+	.name = "I2S-RT5650",
+	.owner = THIS_MODULE,
+	.dai_link = &rk_dailink,
+	.num_links = 1,
+	.dapm_widgets = rk_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
+	.dapm_routes = rk_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(rk_audio_map),
+	.controls = rk_mc_controls,
+	.num_controls = ARRAY_SIZE(rk_mc_controls),
+};
+
+static int snd_rk_mc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct snd_soc_card *card = &snd_soc_card_rk;
+	struct device_node *np = pdev->dev.of_node;
+
+	/* register the soc card */
+	card->dev = &pdev->dev;
+
+	rk_dailink.codec_of_node = of_parse_phandle(np,
+			"rockchip,audio-codec", 0);
+	if (!rk_dailink.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.cpu_of_node = of_parse_phandle(np,
+			"rockchip,i2s-controller", 0);
+	if (!rk_dailink.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,i2s-controller' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
+
+	ret = snd_soc_of_parse_card_name(card, "rockchip,model");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc parse card name failed %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc register card failed %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id rockchip_rt5645_of_match[] = {
+	{ .compatible = "rockchip,rockchip-audio-rt5645", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rt5645_of_match);
+
+static struct platform_driver snd_rk_mc_driver = {
+	.probe = snd_rk_mc_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = rockchip_rt5645_of_match,
+	},
+};
+
+module_platform_driver(snd_rk_mc_driver);
+
+MODULE_AUTHOR("Xing Zheng <zhengxing@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip rt5645 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
index 8bf2e2c..ee1fda9 100644
--- a/sound/soc/samsung/arndale_rt5631.c
+++ b/sound/soc/samsung/arndale_rt5631.c
@@ -71,6 +71,7 @@
 
 static struct snd_soc_card arndale_rt5631 = {
 	.name = "Arndale RT5631",
+	.owner = THIS_MODULE,
 	.dai_link = arndale_rt5631_dai,
 	.num_links = ARRAY_SIZE(arndale_rt5631_dai),
 };
@@ -116,15 +117,6 @@
 	return ret;
 }
 
-static int arndale_audio_remove(struct platform_device *pdev)
-{
-	struct snd_soc_card *card = platform_get_drvdata(pdev);
-
-	snd_soc_unregister_card(card);
-
-	return 0;
-}
-
 static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
 	{ .compatible = "samsung,arndale-rt5631", },
 	{ .compatible = "samsung,arndale-alc5631", },
@@ -139,7 +131,6 @@
 		.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
 	},
 	.probe = arndale_audio_probe,
-	.remove = arndale_audio_remove,
 };
 
 module_platform_driver(arndale_audio_driver);
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 7651dc9..07ce2cf 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -56,6 +56,7 @@
 
 static struct snd_soc_card snow_snd = {
 	.name = "Snow-I2S",
+	.owner = THIS_MODULE,
 	.dai_link = snow_dai,
 	.num_links = ARRAY_SIZE(snow_dai),
 
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index fd11404..8fad444 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -327,13 +327,7 @@
 
 static int sh7760_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
-}
-
-static int sh7760_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
 }
 
 static struct platform_driver sh7760_pcm_driver = {
@@ -342,7 +336,6 @@
 	},
 
 	.probe = sh7760_soc_platform_probe,
-	.remove = sh7760_soc_platform_remove,
 };
 
 module_platform_driver(sh7760_pcm_driver);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 142c066..0215c78 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1911,7 +1911,6 @@
 
 static const struct platform_device_id fsi_id_table[] = {
 	{ "sh_fsi",	(kernel_ulong_t)&fsi1_core },
-	{ "sh_fsi2",	(kernel_ulong_t)&fsi2_core },
 	{},
 };
 MODULE_DEVICE_TABLE(platform, fsi_id_table);
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index f1b4451..8b25850 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,4 +1,4 @@
-snd-soc-rcar-objs	:= core.o gen.o dma.o src.o adg.o ssi.o dvc.o
+snd-soc-rcar-objs	:= core.o gen.o dma.o adg.o ssi.o src.o ctu.o mix.o dvc.o
 obj-$(CONFIG_SND_SOC_RCAR)	+= snd-soc-rcar.o
 
 snd-soc-rsrc-card-objs	:= rsrc-card.o
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f1e5920..f3feed5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -203,9 +203,9 @@
 }
 
 /*
- *	settting function
+ *	ADINR function
  */
-u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 {
 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -227,6 +227,64 @@
 	return adinr;
 }
 
+u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+{
+	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	u32 chan = runtime->channels;
+
+	switch (chan) {
+	case 1:
+	case 2:
+	case 4:
+	case 6:
+	case 8:
+		break;
+	default:
+		dev_warn(dev, "not supported channel\n");
+		chan = 0;
+		break;
+	}
+
+	return chan;
+}
+
+/*
+ *	DALIGN function
+ */
+u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+{
+	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+	struct rsnd_mod *target = src ? src : ssi;
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	u32 val = 0x76543210;
+	u32 mask = ~0;
+
+	mask <<= runtime->channels * 4;
+	val = val & mask;
+
+	switch (runtime->sample_bits) {
+	case 16:
+		val |= 0x67452301 & ~mask;
+		break;
+	case 32:
+		val |= 0x76543210 & ~mask;
+		break;
+	}
+
+	/*
+	 * exchange channeles on SRC if possible,
+	 * otherwise, R/L volume settings on DVC
+	 * changes inverted channels
+	 */
+	if (mod == target)
+		return val;
+	else
+		return 0x76543210;
+}
+
 /*
  *	rsnd_dai functions
  */
@@ -242,9 +300,9 @@
 	if (val == __rsnd_mod_call_##func) {				\
 		called = 1;						\
 		ret = (mod)->ops->func(mod, io, param);			\
-		mod->status = (mod->status & ~mask) +			\
-			(add << __rsnd_mod_shift_##func);		\
 	}								\
+	mod->status = (mod->status & ~mask) +				\
+		(add << __rsnd_mod_shift_##func);			\
 	dev_dbg(dev, "%s[%d] 0x%08x %s\n",				\
 		rsnd_mod_name(mod), rsnd_mod_id(mod), mod->status,	\
 		called ? #func : "");					\
@@ -274,21 +332,21 @@
 static int rsnd_dai_connect(struct rsnd_mod *mod,
 			    struct rsnd_dai_stream *io)
 {
+	struct rsnd_priv *priv;
+	struct device *dev;
+
 	if (!mod)
 		return -EIO;
 
-	if (io->mod[mod->type]) {
-		struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-		struct device *dev = rsnd_priv_to_dev(priv);
-
-		dev_err(dev, "%s[%d] is not empty\n",
-			rsnd_mod_name(mod),
-			rsnd_mod_id(mod));
-		return -EIO;
-	}
+	priv = rsnd_mod_to_priv(mod);
+	dev = rsnd_priv_to_dev(priv);
 
 	io->mod[mod->type] = mod;
 
+	dev_dbg(dev, "%s[%d] is connected to io (%s)\n",
+		rsnd_mod_name(mod), rsnd_mod_id(mod),
+		rsnd_io_is_play(io) ? "Playback" : "Capture");
+
 	return 0;
 }
 
@@ -517,7 +575,7 @@
 	.set_fmt	= rsnd_soc_dai_set_fmt,
 };
 
-#define rsnd_path_parse(priv, io, type)				\
+#define rsnd_path_add(priv, io, type)				\
 ({								\
 	struct rsnd_mod *mod;					\
 	int ret = 0;						\
@@ -533,7 +591,7 @@
 	ret;							\
 })
 
-#define rsnd_path_break(priv, io, type)				\
+#define rsnd_path_remove(priv, io, type)			\
 {								\
 	struct rsnd_mod *mod;					\
 	int id = -1;						\
@@ -547,6 +605,79 @@
 	}							\
 }
 
+void rsnd_path_parse(struct rsnd_priv *priv,
+		     struct rsnd_dai_stream *io)
+{
+	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
+	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
+	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *cmd;
+	struct device *dev = rsnd_priv_to_dev(priv);
+	u32 data;
+
+	/* Gen1 is not supported */
+	if (rsnd_is_gen1(priv))
+		return;
+
+	if (!mix && !dvc)
+		return;
+
+	if (mix) {
+		struct rsnd_dai *rdai;
+		int i;
+		u32 path[] = {
+			[0] = 0,
+			[1] = 1 << 0,
+			[2] = 0,
+			[3] = 0,
+			[4] = 0,
+			[5] = 1 << 8
+		};
+
+		/*
+		 * it is assuming that integrater is well understanding about
+		 * data path. Here doesn't check impossible connection,
+		 * like src2 + src5
+		 */
+		data = 0;
+		for_each_rsnd_dai(rdai, priv, i) {
+			io = &rdai->playback;
+			if (mix == rsnd_io_to_mod_mix(io))
+				data |= path[rsnd_mod_id(src)];
+
+			io = &rdai->capture;
+			if (mix == rsnd_io_to_mod_mix(io))
+				data |= path[rsnd_mod_id(src)];
+		}
+
+		/*
+		 * We can't use ctu = rsnd_io_ctu() here.
+		 * Since, ID of dvc/mix are 0 or 1 (= same as CMD number)
+		 * but ctu IDs are 0 - 7 (= CTU00 - CTU13)
+		 */
+		cmd = mix;
+	} else {
+		u32 path[] = {
+			[0] = 0x30000,
+			[1] = 0x30001,
+			[2] = 0x40000,
+			[3] = 0x10000,
+			[4] = 0x20000,
+			[5] = 0x40100
+		};
+
+		data = path[rsnd_mod_id(src)];
+
+		cmd = dvc;
+	}
+
+	dev_dbg(dev, "ctu/mix path = 0x%08x", data);
+
+	rsnd_mod_write(cmd, CMD_ROUTE_SLCT, data);
+
+	rsnd_mod_write(cmd, CMD_CTRL, 0x10);
+}
+
 static int rsnd_path_init(struct rsnd_priv *priv,
 			  struct rsnd_dai *rdai,
 			  struct rsnd_dai_stream *io)
@@ -564,18 +695,28 @@
 	 * using fixed path.
 	 */
 
-	/* SRC */
-	ret = rsnd_path_parse(priv, io, src);
+	/* SSI */
+	ret = rsnd_path_add(priv, io, ssi);
 	if (ret < 0)
 		return ret;
 
-	/* SSI */
-	ret = rsnd_path_parse(priv, io, ssi);
+	/* SRC */
+	ret = rsnd_path_add(priv, io, src);
+	if (ret < 0)
+		return ret;
+
+	/* CTU */
+	ret = rsnd_path_add(priv, io, ctu);
+	if (ret < 0)
+		return ret;
+
+	/* MIX */
+	ret = rsnd_path_add(priv, io, mix);
 	if (ret < 0)
 		return ret;
 
 	/* DVC */
-	ret = rsnd_path_parse(priv, io, dvc);
+	ret = rsnd_path_add(priv, io, dvc);
 	if (ret < 0)
 		return ret;
 
@@ -589,13 +730,15 @@
 	struct device_node *dai_node,	*dai_np;
 	struct device_node *ssi_node,	*ssi_np;
 	struct device_node *src_node,	*src_np;
+	struct device_node *ctu_node,	*ctu_np;
+	struct device_node *mix_node,	*mix_np;
 	struct device_node *dvc_node,	*dvc_np;
 	struct device_node *playback, *capture;
 	struct rsnd_dai_platform_info *dai_info;
 	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
 	struct device *dev = &pdev->dev;
 	int nr, i;
-	int dai_i, ssi_i, src_i, dvc_i;
+	int dai_i, ssi_i, src_i, ctu_i, mix_i, dvc_i;
 
 	if (!of_data)
 		return;
@@ -621,6 +764,8 @@
 
 	ssi_node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi");
 	src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src");
+	ctu_node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
+	mix_node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
 	dvc_node = of_get_child_by_name(dev->of_node, "rcar_sound,dvc");
 
 #define mod_parse(name)							\
@@ -657,6 +802,8 @@
 
 			mod_parse(ssi);
 			mod_parse(src);
+			mod_parse(ctu);
+			mod_parse(mix);
 			mod_parse(dvc);
 
 			of_node_put(playback);
@@ -1033,8 +1180,8 @@
 		/*
 		 * remove SRC/DVC from DAI,
 		 */
-		rsnd_path_break(priv, io, src);
-		rsnd_path_break(priv, io, dvc);
+		rsnd_path_remove(priv, io, src);
+		rsnd_path_remove(priv, io, dvc);
 
 		/*
 		 * fallback
@@ -1069,6 +1216,8 @@
 		rsnd_dma_probe,
 		rsnd_ssi_probe,
 		rsnd_src_probe,
+		rsnd_ctu_probe,
+		rsnd_mix_probe,
 		rsnd_dvc_probe,
 		rsnd_adg_probe,
 		rsnd_dai_probe,
@@ -1164,6 +1313,8 @@
 			      struct rsnd_priv *priv) = {
 		rsnd_ssi_remove,
 		rsnd_src_remove,
+		rsnd_ctu_remove,
+		rsnd_mix_remove,
 		rsnd_dvc_remove,
 	};
 	int ret = 0, i;
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
new file mode 100644
index 0000000..05498bb
--- /dev/null
+++ b/sound/soc/sh/rcar/ctu.c
@@ -0,0 +1,171 @@
+/*
+ * ctu.c
+ *
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+#define CTU_NAME_SIZE	16
+#define CTU_NAME "ctu"
+
+struct rsnd_ctu {
+	struct rsnd_ctu_platform_info *info; /* rcar_snd.h */
+	struct rsnd_mod mod;
+};
+
+#define rsnd_ctu_nr(priv) ((priv)->ctu_nr)
+#define for_each_rsnd_ctu(pos, priv, i)					\
+	for ((i) = 0;							\
+	     ((i) < rsnd_ctu_nr(priv)) &&				\
+		     ((pos) = (struct rsnd_ctu *)(priv)->ctu + i);	\
+	     i++)
+
+#define rsnd_ctu_initialize_lock(mod)	__rsnd_ctu_initialize_lock(mod, 1)
+#define rsnd_ctu_initialize_unlock(mod)	__rsnd_ctu_initialize_lock(mod, 0)
+static void __rsnd_ctu_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, CTU_CTUIR, enable);
+}
+
+static int rsnd_ctu_init(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_start(mod);
+
+	rsnd_ctu_initialize_lock(mod);
+
+	rsnd_mod_write(mod, CTU_ADINR, rsnd_get_adinr_chan(mod, io));
+
+	rsnd_ctu_initialize_unlock(mod);
+
+	return 0;
+}
+
+static int rsnd_ctu_quit(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_stop(mod);
+
+	return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ctu_ops = {
+	.name		= CTU_NAME,
+	.init		= rsnd_ctu_init,
+	.quit		= rsnd_ctu_quit,
+};
+
+struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id)
+{
+	if (WARN_ON(id < 0 || id >= rsnd_ctu_nr(priv)))
+		id = 0;
+
+	return &((struct rsnd_ctu *)(priv->ctu) + id)->mod;
+}
+
+static void rsnd_of_parse_ctu(struct platform_device *pdev,
+		       const struct rsnd_of_data *of_data,
+		       struct rsnd_priv *priv)
+{
+	struct device_node *node;
+	struct rsnd_ctu_platform_info *ctu_info;
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = &pdev->dev;
+	int nr;
+
+	if (!of_data)
+		return;
+
+	node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
+	if (!node)
+		return;
+
+	nr = of_get_child_count(node);
+	if (!nr)
+		goto rsnd_of_parse_ctu_end;
+
+	ctu_info = devm_kzalloc(dev,
+				sizeof(struct rsnd_ctu_platform_info) * nr,
+				GFP_KERNEL);
+	if (!ctu_info) {
+		dev_err(dev, "ctu info allocation error\n");
+		goto rsnd_of_parse_ctu_end;
+	}
+
+	info->ctu_info		= ctu_info;
+	info->ctu_info_nr	= nr;
+
+rsnd_of_parse_ctu_end:
+	of_node_put(node);
+
+}
+
+int rsnd_ctu_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv)
+{
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_ctu *ctu;
+	struct clk *clk;
+	char name[CTU_NAME_SIZE];
+	int i, nr, ret;
+
+	/* This driver doesn't support Gen1 at this point */
+	if (rsnd_is_gen1(priv)) {
+		dev_warn(dev, "CTU is not supported on Gen1\n");
+		return -EINVAL;
+	}
+
+	rsnd_of_parse_ctu(pdev, of_data, priv);
+
+	nr = info->ctu_info_nr;
+	if (!nr)
+		return 0;
+
+	ctu = devm_kzalloc(dev, sizeof(*ctu) * nr, GFP_KERNEL);
+	if (!ctu)
+		return -ENOMEM;
+
+	priv->ctu_nr	= nr;
+	priv->ctu	= ctu;
+
+	for_each_rsnd_ctu(ctu, priv, i) {
+		/*
+		 * CTU00, CTU01, CTU02, CTU03 => CTU0
+		 * CTU10, CTU11, CTU12, CTU13 => CTU1
+		 */
+		snprintf(name, CTU_NAME_SIZE, "%s.%d",
+			 CTU_NAME, i / 4);
+
+		clk = devm_clk_get(dev, name);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		ctu->info = &info->ctu_info[i];
+
+		ret = rsnd_mod_init(priv, &ctu->mod, &rsnd_ctu_ops,
+				    clk, RSND_MOD_CTU, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void rsnd_ctu_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv)
+{
+	struct rsnd_ctu *ctu;
+	int i;
+
+	for_each_rsnd_ctu(ctu, priv, i) {
+		rsnd_mod_quit(&ctu->mod);
+	}
+}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index d306e29..bfbb8a5 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -27,6 +27,15 @@
 	int dmapp_num;
 };
 
+struct rsnd_dma_ops {
+	char *name;
+	void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+	void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+	int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
+		    struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
+	void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+};
+
 #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
 
 /*
@@ -168,7 +177,7 @@
 		dma_cap_set(DMA_SLAVE, mask);
 
 		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
-						  (void *)id);
+						  (void *)(uintptr_t)id);
 	}
 	if (IS_ERR_OR_NULL(dmaen->chan)) {
 		dmaen->chan = NULL;
@@ -182,7 +191,8 @@
 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
-	dev_dbg(dev, "dma : %pad -> %pad\n",
+	dev_dbg(dev, "%s %pad -> %pad\n",
+		dma->ops->name,
 		&cfg.src_addr, &cfg.dst_addr);
 
 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
@@ -215,6 +225,7 @@
 }
 
 static struct rsnd_dma_ops rsnd_dmaen_ops = {
+	.name	= "audmac",
 	.start	= rsnd_dmaen_start,
 	.stop	= rsnd_dmaen_stop,
 	.init	= rsnd_dmaen_init,
@@ -360,6 +371,7 @@
 }
 
 static struct rsnd_dma_ops rsnd_dmapp_ops = {
+	.name	= "audmac-pp",
 	.start	= rsnd_dmapp_start,
 	.stop	= rsnd_dmapp_stop,
 	.init	= rsnd_dmapp_init,
@@ -414,7 +426,9 @@
 	phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
 	int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
 	int use_src = !!rsnd_io_to_mod_src(io);
-	int use_dvc = !!rsnd_io_to_mod_dvc(io);
+	int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
+		      !!rsnd_io_to_mod_mix(io) ||
+		      !!rsnd_io_to_mod_ctu(io);
 	int id = rsnd_mod_id(mod);
 	struct dma_addr {
 		dma_addr_t out_addr;
@@ -452,7 +466,7 @@
 	};
 
 	/* it shouldn't happen */
-	if (use_dvc && !use_src)
+	if (use_cmd && !use_src)
 		dev_err(dev, "DVC is selected without SRC\n");
 
 	/* use SSIU or SSI ? */
@@ -460,8 +474,8 @@
 		is_ssi++;
 
 	return (is_from) ?
-		dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr :
-		dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr;
+		dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
+		dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
 }
 
 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
@@ -482,7 +496,7 @@
 	return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
 }
 
-#define MOD_MAX 4 /* MEM/SSI/SRC/DVC */
+#define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
 static void rsnd_dma_of_path(struct rsnd_dma *dma,
 			     struct rsnd_dai_stream *io,
 			     int is_play,
@@ -492,55 +506,81 @@
 	struct rsnd_mod *this = rsnd_dma_to_mod(dma);
 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
+	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
 	struct rsnd_mod *mod[MOD_MAX];
-	int i, index;
+	struct rsnd_mod *mod_start, *mod_end;
+	struct rsnd_priv *priv = rsnd_mod_to_priv(this);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	int nr, i;
 
+	if (!ssi)
+		return;
 
-	for (i = 0; i < MOD_MAX; i++)
+	nr = 0;
+	for (i = 0; i < MOD_MAX; i++) {
 		mod[i] = NULL;
-
-	/*
-	 * in play case...
-	 *
-	 * src -> dst
-	 *
-	 * mem -> SSI
-	 * mem -> SRC -> SSI
-	 * mem -> SRC -> DVC -> SSI
-	 */
-	mod[0] = NULL; /* for "mem" */
-	index = 1;
-	for (i = 1; i < MOD_MAX; i++) {
-		if (!src) {
-			mod[i] = ssi;
-		} else if (!dvc) {
-			mod[i] = src;
-			src = NULL;
-		} else {
-			if ((!is_play) && (this == src))
-				this = dvc;
-
-			mod[i] = (is_play) ? src : dvc;
-			i++;
-			mod[i] = (is_play) ? dvc : src;
-			src = NULL;
-			dvc = NULL;
-		}
-
-		if (mod[i] == this)
-			index = i;
-
-		if (mod[i] == ssi)
-			break;
+		nr += !!rsnd_io_to_mod(io, i);
 	}
 
-	if (is_play) {
-		*mod_from = mod[index - 1];
-		*mod_to   = mod[index];
+	/*
+	 * [S] -*-> [E]
+	 * [S] -*-> SRC -o-> [E]
+	 * [S] -*-> SRC -> DVC -o-> [E]
+	 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
+	 *
+	 * playback	[S] = mem
+	 *		[E] = SSI
+	 *
+	 * capture	[S] = SSI
+	 *		[E] = mem
+	 *
+	 * -*->		Audio DMAC
+	 * -o->		Audio DMAC peri peri
+	 */
+	mod_start	= (is_play) ? NULL : ssi;
+	mod_end		= (is_play) ? ssi  : NULL;
+
+	mod[0] = mod_start;
+	for (i = 1; i < nr; i++) {
+		if (src) {
+			mod[i] = src;
+			src = NULL;
+		} else if (ctu) {
+			mod[i] = ctu;
+			ctu = NULL;
+		} else if (mix) {
+			mod[i] = mix;
+			mix = NULL;
+		} else if (dvc) {
+			mod[i] = dvc;
+			dvc = NULL;
+		}
+	}
+	mod[i] = mod_end;
+
+	/*
+	 *		| SSI | SRC |
+	 * -------------+-----+-----+
+	 *  is_play	|  o  |  *  |
+	 * !is_play	|  *  |  o  |
+	 */
+	if ((this == ssi) == (is_play)) {
+		*mod_from	= mod[nr - 1];
+		*mod_to		= mod[nr];
 	} else {
-		*mod_from = mod[index];
-		*mod_to   = mod[index - 1];
+		*mod_from	= mod[0];
+		*mod_to		= mod[1];
+	}
+
+	dev_dbg(dev, "module connection (this is %s[%d])\n",
+		rsnd_mod_name(this), rsnd_mod_id(this));
+	for (i = 0; i <= nr; i++) {
+		dev_dbg(dev, "  %s[%d]%s\n",
+		       rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
+		       (mod[i] == *mod_from) ? " from" :
+		       (mod[i] == *mod_to)   ? " to" : "");
 	}
 }
 
@@ -568,10 +608,11 @@
 
 int rsnd_dma_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id)
 {
-	struct rsnd_mod *mod_from;
-	struct rsnd_mod *mod_to;
+	struct rsnd_mod *mod_from = NULL;
+	struct rsnd_mod *mod_to = NULL;
 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
 	int is_play = rsnd_io_is_play(io);
 
 	/*
@@ -598,6 +639,11 @@
 	if (rsnd_is_gen1(priv))
 		dma->ops = &rsnd_dmaen_ops;
 
+	dev_dbg(dev, "%s %s[%d] -> %s[%d]\n",
+		dma->ops->name,
+		rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
+		rsnd_mod_name(mod_to),   rsnd_mod_id(mod_to));
+
 	return dma->ops->init(io, dma, id, mod_from, mod_to);
 }
 
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 36fc020..5779638 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -24,6 +24,7 @@
 	struct rsnd_kctrl_cfg_s rdown;	/* Ramp Rate Down */
 };
 
+#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
 #define rsnd_dvc_of_node(priv) \
 	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
 
@@ -63,6 +64,19 @@
 	"0.125 dB/8192 steps",	 /* 10111 */
 };
 
+static void rsnd_dvc_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, DVC_SWRSR, 0);
+	rsnd_mod_write(mod, DVC_SWRSR, 1);
+}
+
+#define rsnd_dvc_initialize_lock(mod)	__rsnd_dvc_initialize_lock(mod, 1)
+#define rsnd_dvc_initialize_unlock(mod)	__rsnd_dvc_initialize_lock(mod, 0)
+static void __rsnd_dvc_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, DVC_DVUIR, enable);
+}
+
 static void rsnd_dvc_volume_update(struct rsnd_dai_stream *io,
 				   struct rsnd_mod *mod)
 {
@@ -135,49 +149,24 @@
 	return 0;
 }
 
-static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
+static int rsnd_dvc_init(struct rsnd_mod *mod,
 			 struct rsnd_dai_stream *io,
 			 struct rsnd_priv *priv)
 {
-	struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
-	struct device *dev = rsnd_priv_to_dev(priv);
-	int dvc_id = rsnd_mod_id(dvc_mod);
-	int src_id = rsnd_mod_id(src_mod);
-	u32 route[] = {
-		[0] = 0x30000,
-		[1] = 0x30001,
-		[2] = 0x40000,
-		[3] = 0x10000,
-		[4] = 0x20000,
-		[5] = 0x40100
-	};
+	rsnd_mod_hw_start(mod);
 
-	if (src_id >= ARRAY_SIZE(route)) {
-		dev_err(dev, "DVC%d isn't connected to SRC%d\n", dvc_id, src_id);
-		return -EINVAL;
-	}
+	rsnd_dvc_soft_reset(mod);
 
-	rsnd_mod_hw_start(dvc_mod);
+	rsnd_dvc_initialize_lock(mod);
 
-	/*
-	 * fixme
-	 * it doesn't support CTU/MIX
-	 */
-	rsnd_mod_write(dvc_mod, CMD_ROUTE_SLCT, route[src_id]);
+	rsnd_path_parse(priv, io);
 
-	rsnd_mod_write(dvc_mod, DVC_SWRSR, 0);
-	rsnd_mod_write(dvc_mod, DVC_SWRSR, 1);
-
-	rsnd_mod_write(dvc_mod, DVC_DVUIR, 1);
-
-	rsnd_mod_write(dvc_mod, DVC_ADINR, rsnd_get_adinr(dvc_mod, io));
+	rsnd_mod_write(mod, DVC_ADINR, rsnd_get_adinr_bit(mod, io));
 
 	/* ch0/ch1 Volume */
-	rsnd_dvc_volume_update(io, dvc_mod);
+	rsnd_dvc_volume_update(io, mod);
 
-	rsnd_mod_write(dvc_mod, DVC_DVUIR, 0);
-
-	rsnd_adg_set_cmd_timsel_gen2(dvc_mod, io);
+	rsnd_adg_set_cmd_timsel_gen2(mod, io);
 
 	return 0;
 }
@@ -195,6 +184,8 @@
 			  struct rsnd_dai_stream *io,
 			  struct rsnd_priv *priv)
 {
+	rsnd_dvc_initialize_unlock(mod);
+
 	rsnd_mod_write(mod, CMD_CTRL, 0x10);
 
 	return 0;
@@ -341,23 +332,21 @@
 	char name[RSND_DVC_NAME_SIZE];
 	int i, nr, ret;
 
-	rsnd_of_parse_dvc(pdev, of_data, priv);
-
-	nr = info->dvc_info_nr;
-	if (!nr)
-		return 0;
-
 	/* This driver doesn't support Gen1 at this point */
 	if (rsnd_is_gen1(priv)) {
 		dev_warn(dev, "CMD is not supported on Gen1\n");
 		return -EINVAL;
 	}
 
+	rsnd_of_parse_dvc(pdev, of_data, priv);
+
+	nr = info->dvc_info_nr;
+	if (!nr)
+		return 0;
+
 	dvc	= devm_kzalloc(dev, sizeof(*dvc) * nr, GFP_KERNEL);
-	if (!dvc) {
-		dev_err(dev, "CMD allocate failed\n");
+	if (!dvc)
 		return -ENOMEM;
-	}
 
 	priv->dvc_nr	= nr;
 	priv->dvc	= dvc;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 8c7dc51..f04d17b 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -103,6 +103,22 @@
 	regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
 }
 
+void rsnd_force_write(struct rsnd_priv *priv,
+		      struct rsnd_mod *mod,
+		      enum rsnd_reg reg, u32 data)
+{
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+	if (!rsnd_is_accessible_reg(priv, gen, reg))
+		return;
+
+	dev_dbg(dev, "w %s[%d] - %4d : %08x\n",
+		rsnd_mod_name(mod), rsnd_mod_id(mod), reg, data);
+
+	regmap_fields_force_write(gen->regs[reg], rsnd_mod_id(mod), data);
+}
+
 void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
 	       enum rsnd_reg reg, u32 mask, u32 data)
 {
@@ -200,12 +216,13 @@
 		/* FIXME: it needs SSI_MODE2/3 in the future */
 		RSND_GEN_M_REG(SSI_BUSIF_MODE,	0x0,	0x80),
 		RSND_GEN_M_REG(SSI_BUSIF_ADINR,	0x4,	0x80),
-		RSND_GEN_M_REG(BUSIF_DALIGN,	0x8,	0x80),
+		RSND_GEN_M_REG(SSI_BUSIF_DALIGN,0x8,	0x80),
 		RSND_GEN_M_REG(SSI_CTRL,	0x10,	0x80),
-		RSND_GEN_M_REG(INT_ENABLE,	0x18,	0x80),
+		RSND_GEN_M_REG(SSI_INT_ENABLE,	0x18,	0x80),
 	};
 	struct rsnd_regmap_field_conf conf_scu[] = {
 		RSND_GEN_M_REG(SRC_BUSIF_MODE,	0x0,	0x20),
+		RSND_GEN_M_REG(SRC_BUSIF_DALIGN,0x8,	0x20),
 		RSND_GEN_M_REG(SRC_ROUTE_MODE0,	0xc,	0x20),
 		RSND_GEN_M_REG(SRC_CTRL,	0x10,	0x20),
 		RSND_GEN_M_REG(SRC_INT_ENABLE0,	0x18,	0x20),
@@ -223,6 +240,18 @@
 		RSND_GEN_M_REG(SRC_SRCCR,	0x224,	0x40),
 		RSND_GEN_M_REG(SRC_BSDSR,	0x22c,	0x40),
 		RSND_GEN_M_REG(SRC_BSISR,	0x238,	0x40),
+		RSND_GEN_M_REG(CTU_CTUIR,	0x504,	0x100),
+		RSND_GEN_M_REG(CTU_ADINR,	0x508,	0x100),
+		RSND_GEN_M_REG(MIX_SWRSR,	0xd00,	0x40),
+		RSND_GEN_M_REG(MIX_MIXIR,	0xd04,	0x40),
+		RSND_GEN_M_REG(MIX_ADINR,	0xd08,	0x40),
+		RSND_GEN_M_REG(MIX_MIXMR,	0xd10,	0x40),
+		RSND_GEN_M_REG(MIX_MVPDR,	0xd14,	0x40),
+		RSND_GEN_M_REG(MIX_MDBAR,	0xd18,	0x40),
+		RSND_GEN_M_REG(MIX_MDBBR,	0xd1c,	0x40),
+		RSND_GEN_M_REG(MIX_MDBCR,	0xd20,	0x40),
+		RSND_GEN_M_REG(MIX_MDBDR,	0xd24,	0x40),
+		RSND_GEN_M_REG(MIX_MDBER,	0xd28,	0x40),
 		RSND_GEN_M_REG(DVC_SWRSR,	0xe00,	0x100),
 		RSND_GEN_M_REG(DVC_DVUIR,	0xe04,	0x100),
 		RSND_GEN_M_REG(DVC_ADINR,	0xe08,	0x100),
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
new file mode 100644
index 0000000..0d5c102
--- /dev/null
+++ b/sound/soc/sh/rcar/mix.c
@@ -0,0 +1,200 @@
+/*
+ * mix.c
+ *
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+#define MIX_NAME_SIZE	16
+#define MIX_NAME "mix"
+
+struct rsnd_mix {
+	struct rsnd_mix_platform_info *info; /* rcar_snd.h */
+	struct rsnd_mod mod;
+};
+
+#define rsnd_mix_nr(priv) ((priv)->mix_nr)
+#define for_each_rsnd_mix(pos, priv, i)					\
+	for ((i) = 0;							\
+	     ((i) < rsnd_mix_nr(priv)) &&				\
+		     ((pos) = (struct rsnd_mix *)(priv)->mix + i);	\
+	     i++)
+
+
+static void rsnd_mix_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, MIX_SWRSR, 0);
+	rsnd_mod_write(mod, MIX_SWRSR, 1);
+}
+
+#define rsnd_mix_initialize_lock(mod)	__rsnd_mix_initialize_lock(mod, 1)
+#define rsnd_mix_initialize_unlock(mod)	__rsnd_mix_initialize_lock(mod, 0)
+static void __rsnd_mix_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, MIX_MIXIR, enable);
+}
+
+static void rsnd_mix_volume_update(struct rsnd_dai_stream *io,
+				  struct rsnd_mod *mod)
+{
+
+	/* Disable MIX dB setting */
+	rsnd_mod_write(mod, MIX_MDBER, 0);
+
+	rsnd_mod_write(mod, MIX_MDBAR, 0);
+	rsnd_mod_write(mod, MIX_MDBBR, 0);
+	rsnd_mod_write(mod, MIX_MDBCR, 0);
+	rsnd_mod_write(mod, MIX_MDBDR, 0);
+
+	/* Enable MIX dB setting */
+	rsnd_mod_write(mod, MIX_MDBER, 1);
+}
+
+static int rsnd_mix_init(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_start(mod);
+
+	rsnd_mix_soft_reset(mod);
+
+	rsnd_mix_initialize_lock(mod);
+
+	rsnd_mod_write(mod, MIX_ADINR, rsnd_get_adinr_chan(mod, io));
+
+	rsnd_path_parse(priv, io);
+
+	/* volume step */
+	rsnd_mod_write(mod, MIX_MIXMR, 0);
+	rsnd_mod_write(mod, MIX_MVPDR, 0);
+
+	rsnd_mix_volume_update(io, mod);
+
+	rsnd_mix_initialize_unlock(mod);
+
+	return 0;
+}
+
+static int rsnd_mix_quit(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_stop(mod);
+
+	return 0;
+}
+
+static struct rsnd_mod_ops rsnd_mix_ops = {
+	.name		= MIX_NAME,
+	.init		= rsnd_mix_init,
+	.quit		= rsnd_mix_quit,
+};
+
+struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
+{
+	if (WARN_ON(id < 0 || id >= rsnd_mix_nr(priv)))
+		id = 0;
+
+	return &((struct rsnd_mix *)(priv->mix) + id)->mod;
+}
+
+static void rsnd_of_parse_mix(struct platform_device *pdev,
+			      const struct rsnd_of_data *of_data,
+			      struct rsnd_priv *priv)
+{
+	struct device_node *node;
+	struct rsnd_mix_platform_info *mix_info;
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = &pdev->dev;
+	int nr;
+
+	if (!of_data)
+		return;
+
+	node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
+	if (!node)
+		return;
+
+	nr = of_get_child_count(node);
+	if (!nr)
+		goto rsnd_of_parse_mix_end;
+
+	mix_info = devm_kzalloc(dev,
+				sizeof(struct rsnd_mix_platform_info) * nr,
+				GFP_KERNEL);
+	if (!mix_info) {
+		dev_err(dev, "mix info allocation error\n");
+		goto rsnd_of_parse_mix_end;
+	}
+
+	info->mix_info		= mix_info;
+	info->mix_info_nr	= nr;
+
+rsnd_of_parse_mix_end:
+	of_node_put(node);
+
+}
+
+int rsnd_mix_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv)
+{
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_mix *mix;
+	struct clk *clk;
+	char name[MIX_NAME_SIZE];
+	int i, nr, ret;
+
+	/* This driver doesn't support Gen1 at this point */
+	if (rsnd_is_gen1(priv)) {
+		dev_warn(dev, "MIX is not supported on Gen1\n");
+		return -EINVAL;
+	}
+
+	rsnd_of_parse_mix(pdev, of_data, priv);
+
+	nr = info->mix_info_nr;
+	if (!nr)
+		return 0;
+
+	mix	= devm_kzalloc(dev, sizeof(*mix) * nr, GFP_KERNEL);
+	if (!mix)
+		return -ENOMEM;
+
+	priv->mix_nr	= nr;
+	priv->mix	= mix;
+
+	for_each_rsnd_mix(mix, priv, i) {
+		snprintf(name, MIX_NAME_SIZE, "%s.%d",
+			 MIX_NAME, i);
+
+		clk = devm_clk_get(dev, name);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		mix->info = &info->mix_info[i];
+
+		ret = rsnd_mod_init(priv, &mix->mod, &rsnd_mix_ops,
+				    clk, RSND_MOD_MIX, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void rsnd_mix_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv)
+{
+	struct rsnd_mix *mix;
+	int i;
+
+	for_each_rsnd_mix(mix, priv, i) {
+		rsnd_mod_quit(&mix->mod);
+	}
+}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 09fcc54..7a0e52b 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -47,6 +47,18 @@
 	RSND_REG_SCU_SYS_STATUS0,
 	RSND_REG_SCU_SYS_INT_EN0,
 	RSND_REG_CMD_ROUTE_SLCT,
+	RSND_REG_CTU_CTUIR,
+	RSND_REG_CTU_ADINR,
+	RSND_REG_MIX_SWRSR,
+	RSND_REG_MIX_MIXIR,
+	RSND_REG_MIX_ADINR,
+	RSND_REG_MIX_MIXMR,
+	RSND_REG_MIX_MVPDR,
+	RSND_REG_MIX_MDBAR,
+	RSND_REG_MIX_MDBBR,
+	RSND_REG_MIX_MDBCR,
+	RSND_REG_MIX_MDBDR,
+	RSND_REG_MIX_MDBER,
 	RSND_REG_DVC_SWRSR,
 	RSND_REG_DVC_DVUIR,
 	RSND_REG_DVC_ADINR,
@@ -99,6 +111,7 @@
 	RSND_REG_SHARE26,
 	RSND_REG_SHARE27,
 	RSND_REG_SHARE28,
+	RSND_REG_SHARE29,
 
 	RSND_REG_MAX,
 };
@@ -119,7 +132,7 @@
 #define RSND_REG_SSI_CTRL		RSND_REG_SHARE02
 #define RSND_REG_SSI_BUSIF_MODE		RSND_REG_SHARE03
 #define RSND_REG_SSI_BUSIF_ADINR	RSND_REG_SHARE04
-#define RSND_REG_INT_ENABLE		RSND_REG_SHARE05
+#define RSND_REG_SSI_INT_ENABLE		RSND_REG_SHARE05
 #define RSND_REG_SRC_BSDSR		RSND_REG_SHARE06
 #define RSND_REG_SRC_BSISR		RSND_REG_SHARE07
 #define RSND_REG_DIV_EN			RSND_REG_SHARE08
@@ -136,13 +149,14 @@
 #define RSND_REG_AUDIO_CLK_SEL2		RSND_REG_SHARE19
 #define RSND_REG_CMD_CTRL		RSND_REG_SHARE20
 #define RSND_REG_CMDOUT_TIMSEL		RSND_REG_SHARE21
-#define RSND_REG_BUSIF_DALIGN		RSND_REG_SHARE22
+#define RSND_REG_SSI_BUSIF_DALIGN	RSND_REG_SHARE22
 #define RSND_REG_DVC_VRCTR		RSND_REG_SHARE23
 #define RSND_REG_DVC_VRPDR		RSND_REG_SHARE24
 #define RSND_REG_DVC_VRDBR		RSND_REG_SHARE25
 #define RSND_REG_SCU_SYS_STATUS1	RSND_REG_SHARE26
 #define RSND_REG_SCU_SYS_INT_EN1	RSND_REG_SHARE27
 #define RSND_REG_SRC_INT_ENABLE0	RSND_REG_SHARE28
+#define RSND_REG_SRC_BUSIF_DALIGN	RSND_REG_SHARE29
 
 struct rsnd_of_data;
 struct rsnd_priv;
@@ -157,27 +171,28 @@
 	rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
 #define rsnd_mod_write(m, r, d) \
 	rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
+#define rsnd_mod_force_write(m, r, d) \
+	rsnd_force_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
 #define rsnd_mod_bset(m, r, s, d) \
 	rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
 
 u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
 void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
 		enum rsnd_reg reg, u32 data);
+void rsnd_force_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
+		enum rsnd_reg reg, u32 data);
 void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
 		    u32 mask, u32 data);
-u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+void rsnd_path_parse(struct rsnd_priv *priv,
+		     struct rsnd_dai_stream *io);
 
 /*
  *	R-Car DMA
  */
 struct rsnd_dma;
-struct rsnd_dma_ops {
-	void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-	void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-	int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
-		    struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
-	void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-};
 
 struct rsnd_dmaen {
 	struct dma_chan		*chan;
@@ -217,6 +232,8 @@
  */
 enum rsnd_mod_type {
 	RSND_MOD_DVC = 0,
+	RSND_MOD_MIX,
+	RSND_MOD_CTU,
 	RSND_MOD_SRC,
 	RSND_MOD_SSI,
 	RSND_MOD_MAX,
@@ -312,7 +329,7 @@
 
 #define rsnd_mod_to_priv(mod) ((mod)->priv)
 #define rsnd_mod_to_dma(mod) (&(mod)->dma)
-#define rsnd_mod_id(mod) ((mod)->id)
+#define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
 #define rsnd_mod_hw_start(mod)	clk_enable((mod)->clk)
 #define rsnd_mod_hw_stop(mod)	clk_disable((mod)->clk)
 
@@ -345,9 +362,12 @@
 	int byte_per_period;
 	int next_period_byte;
 };
-#define rsnd_io_to_mod_ssi(io)	((io)->mod[RSND_MOD_SSI])
-#define rsnd_io_to_mod_src(io)	((io)->mod[RSND_MOD_SRC])
-#define rsnd_io_to_mod_dvc(io)	((io)->mod[RSND_MOD_DVC])
+#define rsnd_io_to_mod(io, i)	((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
+#define rsnd_io_to_mod_ssi(io)	rsnd_io_to_mod((io), RSND_MOD_SSI)
+#define rsnd_io_to_mod_src(io)	rsnd_io_to_mod((io), RSND_MOD_SRC)
+#define rsnd_io_to_mod_ctu(io)	rsnd_io_to_mod((io), RSND_MOD_CTU)
+#define rsnd_io_to_mod_mix(io)	rsnd_io_to_mod((io), RSND_MOD_MIX)
+#define rsnd_io_to_mod_dvc(io)	rsnd_io_to_mod((io), RSND_MOD_DVC)
 #define rsnd_io_to_rdai(io)	((io)->rdai)
 #define rsnd_io_to_priv(io)	(rsnd_rdai_to_priv(rsnd_io_to_rdai(io)))
 #define rsnd_io_is_play(io)	(&rsnd_io_to_rdai(io)->playback == io)
@@ -437,12 +457,6 @@
 	void *gen;
 
 	/*
-	 * below value will be filled on rsnd_src_probe()
-	 */
-	void *src;
-	int src_nr;
-
-	/*
 	 * below value will be filled on rsnd_adg_probe()
 	 */
 	void *adg;
@@ -459,6 +473,24 @@
 	int ssi_nr;
 
 	/*
+	 * below value will be filled on rsnd_src_probe()
+	 */
+	void *src;
+	int src_nr;
+
+	/*
+	 * below value will be filled on rsnd_ctu_probe()
+	 */
+	void *ctu;
+	int ctu_nr;
+
+	/*
+	 * below value will be filled on rsnd_mix_probe()
+	 */
+	void *mix;
+	int mix_nr;
+
+	/*
 	 * below value will be filled on rsnd_dvc_probe()
 	 */
 	void *dvc;
@@ -531,6 +563,19 @@
 		     u32 max);
 
 /*
+ *	R-Car SSI
+ */
+int rsnd_ssi_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv);
+void rsnd_ssi_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
+int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
+int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+
+/*
  *	R-Car SRC
  */
 int rsnd_src_probe(struct platform_device *pdev,
@@ -550,20 +595,27 @@
 int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod);
 int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod);
 
-#define rsnd_src_nr(priv) ((priv)->src_nr)
-
 /*
- *	R-Car SSI
+ *	R-Car CTU
  */
-int rsnd_ssi_probe(struct platform_device *pdev,
+int rsnd_ctu_probe(struct platform_device *pdev,
 		   const struct rsnd_of_data *of_data,
 		   struct rsnd_priv *priv);
-void rsnd_ssi_remove(struct platform_device *pdev,
+
+void rsnd_ctu_remove(struct platform_device *pdev,
 		     struct rsnd_priv *priv);
-struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
-int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
-int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
-int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
+
+/*
+ *	R-Car MIX
+ */
+int rsnd_mix_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv);
+
+void rsnd_mix_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
 
 /*
  *	R-Car DVC
@@ -575,7 +627,4 @@
 		     struct rsnd_priv *priv);
 struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
 
-#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
-
-
 #endif
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
index 84e9357..d61db9c 100644
--- a/sound/soc/sh/rcar/rsrc-card.c
+++ b/sound/soc/sh/rcar/rsrc-card.c
@@ -41,6 +41,7 @@
 static const struct of_device_id rsrc_card_of_match[] = {
 	{ .compatible = "renesas,rsrc-card,lager",	.data = &routes_of_ssi0_ak4642 },
 	{ .compatible = "renesas,rsrc-card,koelsch",	.data = &routes_of_ssi0_ak4642 },
+	{ .compatible = "renesas,rsrc-card", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
@@ -242,8 +243,15 @@
 		snd_soc_of_get_dai_name(np, &dai_link->codec_dai_name);
 
 		/* additional name prefix */
-		priv->codec_conf.of_node	= dai_link->codec_of_node;
-		priv->codec_conf.name_prefix	= of_data->prefix;
+		if (of_data) {
+			priv->codec_conf.of_node = dai_link->codec_of_node;
+			priv->codec_conf.name_prefix = of_data->prefix;
+		} else {
+			snd_soc_of_parse_audio_prefix(&priv->snd_card,
+						      &priv->codec_conf,
+						      dai_link->codec_of_node,
+						      "audio-prefix");
+		}
 
 		/* set dai_name */
 		snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s",
@@ -361,8 +369,14 @@
 	priv->snd_card.num_links		= num;
 	priv->snd_card.codec_conf		= &priv->codec_conf;
 	priv->snd_card.num_configs		= 1;
-	priv->snd_card.of_dapm_routes		= of_data->routes;
-	priv->snd_card.num_of_dapm_routes	= of_data->num_routes;
+
+	if (of_data) {
+		priv->snd_card.of_dapm_routes		= of_data->routes;
+		priv->snd_card.num_of_dapm_routes	= of_data->num_routes;
+	} else {
+		snd_soc_of_parse_audio_routing(&priv->snd_card,
+					       "audio-routing");
+	}
 
 	/* Parse the card name from DT */
 	snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index c61c171..89a18e1 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -30,6 +30,7 @@
 
 #define RSND_SRC_NAME_SIZE 16
 
+#define rsnd_src_nr(priv) ((priv)->src_nr)
 #define rsnd_enable_sync_convert(src) ((src)->sen.val)
 #define rsnd_src_of_node(priv) \
 	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
@@ -117,6 +118,20 @@
 /*
  *		Gen1/Gen2 common functions
  */
+static void rsnd_src_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, SRC_SWRSR, 0);
+	rsnd_mod_write(mod, SRC_SWRSR, 1);
+}
+
+
+#define rsnd_src_initialize_lock(mod)	__rsnd_src_initialize_lock(mod, 1)
+#define rsnd_src_initialize_unlock(mod)	__rsnd_src_initialize_lock(mod, 0)
+static void __rsnd_src_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, SRC_SRCIR, enable);
+}
+
 static struct dma_chan *rsnd_src_dma_req(struct rsnd_dai_stream *io,
 					 struct rsnd_mod *mod)
 {
@@ -133,7 +148,6 @@
 			int use_busif)
 {
 	struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
-	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 	int ssi_id = rsnd_mod_id(ssi_mod);
 
 	/*
@@ -170,27 +184,14 @@
 	 * DMA settings for SSIU
 	 */
 	if (use_busif) {
-		u32 val = 0x76543210;
-		u32 mask = ~0;
+		u32 val = rsnd_get_dalign(ssi_mod, io);
 
 		rsnd_mod_write(ssi_mod, SSI_BUSIF_ADINR,
-			       rsnd_get_adinr(ssi_mod, io));
+			       rsnd_get_adinr_bit(ssi_mod, io));
 		rsnd_mod_write(ssi_mod, SSI_BUSIF_MODE,  1);
 		rsnd_mod_write(ssi_mod, SSI_CTRL, 0x1);
 
-		mask <<= runtime->channels * 4;
-		val = val & mask;
-
-		switch (runtime->sample_bits) {
-		case 16:
-			val |= 0x67452301 & ~mask;
-			break;
-		case 32:
-			val |= 0x76543210 & ~mask;
-			break;
-		}
-		rsnd_mod_write(ssi_mod, BUSIF_DALIGN, val);
-
+		rsnd_mod_write(ssi_mod, SSI_BUSIF_DALIGN, val);
 	}
 
 	return 0;
@@ -215,10 +216,9 @@
 		return 0;
 
 	/* enable SSI interrupt if Gen2 */
-	if (rsnd_ssi_is_dma_mode(ssi_mod))
-		rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0e000000);
-	else
-		rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0f000000);
+	rsnd_mod_write(ssi_mod, SSI_INT_ENABLE,
+		       rsnd_ssi_is_dma_mode(ssi_mod) ?
+		       0x0e000000 : 0x0f000000);
 
 	return 0;
 }
@@ -231,7 +231,7 @@
 		return 0;
 
 	/* disable SSI interrupt if Gen2 */
-	rsnd_mod_write(ssi_mod, INT_ENABLE, 0x00000000);
+	rsnd_mod_write(ssi_mod, SSI_INT_ENABLE, 0x00000000);
 
 	return 0;
 }
@@ -294,12 +294,8 @@
 	if (convert_rate)
 		fsrate = 0x0400000 / convert_rate * runtime->rate;
 
-	/* set/clear soft reset */
-	rsnd_mod_write(mod, SRC_SWRSR, 0);
-	rsnd_mod_write(mod, SRC_SWRSR, 1);
-
 	/* Set channel number and output bit length */
-	rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr(mod, io));
+	rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr_bit(mod, io));
 
 	/* Enable the initial value of IFS */
 	if (fsrate) {
@@ -358,17 +354,15 @@
 
 	rsnd_mod_hw_start(mod);
 
+	rsnd_src_soft_reset(mod);
+
+	rsnd_src_initialize_lock(mod);
+
 	src->err = 0;
 
 	/* reset sync convert_rate */
 	src->sync.val = 0;
 
-	/*
-	 * Initialize the operation of the SRC internal circuits
-	 * see rsnd_src_start()
-	 */
-	rsnd_mod_write(mod, SRC_SRCIR, 1);
-
 	return 0;
 }
 
@@ -395,11 +389,7 @@
 
 static int rsnd_src_start(struct rsnd_mod *mod)
 {
-	/*
-	 * Cancel the initialization and operate the SRC function
-	 * see rsnd_src_init()
-	 */
-	rsnd_mod_write(mod, SRC_SRCIR, 0);
+	rsnd_src_initialize_unlock(mod);
 
 	return 0;
 }
@@ -617,6 +607,14 @@
 		int_val = 0;
 	}
 
+	/*
+	 * WORKAROUND
+	 *
+	 * ignore over flow error when rsnd_enable_sync_convert()
+	 */
+	if (rsnd_enable_sync_convert(src))
+		sys_int_val = sys_int_val & 0xffff;
+
 	rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val);
 	rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val);
 	rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val);
@@ -632,11 +630,22 @@
 
 static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod)
 {
-	u32 val = OUF_SRC(rsnd_mod_id(mod));
+	struct rsnd_src *src = rsnd_mod_to_src(mod);
+	u32 val0, val1;
 	bool ret = false;
 
-	if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val) ||
-	    (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val)) {
+	val0 = val1 = OUF_SRC(rsnd_mod_id(mod));
+
+	/*
+	 * WORKAROUND
+	 *
+	 * ignore over flow error when rsnd_enable_sync_convert()
+	 */
+	if (rsnd_enable_sync_convert(src))
+		val0 = val0 & 0xffff;
+
+	if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val0) ||
+	    (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val1)) {
 		struct rsnd_src *src = rsnd_mod_to_src(mod);
 
 		src->err++;
@@ -652,7 +661,20 @@
 static int _rsnd_src_start_gen2(struct rsnd_mod *mod,
 				struct rsnd_dai_stream *io)
 {
-	u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11;
+	struct rsnd_src *src = rsnd_mod_to_src(mod);
+	u32 val;
+
+	val = rsnd_get_dalign(mod, io);
+
+	rsnd_mod_write(mod, SRC_BUSIF_DALIGN, val);
+
+	/*
+	 * WORKAROUND
+	 *
+	 * Enable SRC output if you want to use sync convert together with DVC
+	 */
+	val = (rsnd_io_to_mod_dvc(io) && !rsnd_enable_sync_convert(src)) ?
+		0x01 : 0x11;
 
 	rsnd_mod_write(mod, SRC_CTRL, val);
 
@@ -922,13 +944,6 @@
 		return 0;
 
 	/*
-	 * We can't use SRC sync convert
-	 * if it has DVC
-	 */
-	if (rsnd_io_to_mod_dvc(io))
-		return 0;
-
-	/*
 	 * enable sync convert
 	 */
 	ret = rsnd_kctrl_new_s(mod, io, rtd,
@@ -1047,10 +1062,8 @@
 		return 0;
 
 	src	= devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL);
-	if (!src) {
-		dev_err(dev, "SRC allocate failed\n");
+	if (!src)
 		return -ENOMEM;
-	}
 
 	priv->src_nr	= nr;
 	priv->src	= src;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 2fbe59f..d45b9a7 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -770,10 +770,8 @@
 	 */
 	nr	= info->ssi_info_nr;
 	ssi	= devm_kzalloc(dev, sizeof(*ssi) * nr, GFP_KERNEL);
-	if (!ssi) {
-		dev_err(dev, "SSI allocate failed\n");
+	if (!ssi)
 		return -ENOMEM;
-	}
 
 	priv->ssi	= ssi;
 	priv->ssi_nr	= nr;
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index ab13146..89ed1b1 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -385,14 +385,9 @@
 
 static int sh4_soc_dai_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
-					  sh4_ssi_dai, ARRAY_SIZE(sh4_ssi_dai));
-}
-
-static int sh4_soc_dai_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
+					       sh4_ssi_dai,
+					       ARRAY_SIZE(sh4_ssi_dai));
 }
 
 static struct platform_driver sh4_ssi_driver = {
@@ -401,7 +396,6 @@
 	},
 
 	.probe = sh4_soc_dai_probe,
-	.remove = sh4_soc_dai_remove,
 };
 
 module_platform_driver(sh4_ssi_driver);
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 08d7259..d40efc9 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -85,10 +85,19 @@
 /**
  * snd_soc_new_ac97_codec - initailise AC97 device
  * @codec: audio codec
+ * @id: The expected device ID
+ * @id_mask: Mask that is applied to the device ID before comparing with @id
  *
  * Initialises AC97 codec resources for use by ad-hoc devices only.
+ *
+ * If @id is not 0 this function will reset the device, then read the ID from
+ * the device and check if it matches the expected ID. If it doesn't match an
+ * error will be returned and device will not be registered.
+ *
+ * Returns: A PTR_ERR() on failure or a valid snd_ac97 struct on success.
  */
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+	unsigned int id, unsigned int id_mask)
 {
 	struct snd_ac97 *ac97;
 	int ret;
@@ -97,13 +106,24 @@
 	if (IS_ERR(ac97))
 		return ac97;
 
-	ret = device_add(&ac97->dev);
-	if (ret) {
-		put_device(&ac97->dev);
-		return ERR_PTR(ret);
+	if (id) {
+		ret = snd_ac97_reset(ac97, false, id, id_mask);
+		if (ret < 0) {
+			dev_err(codec->dev, "Failed to reset AC97 device: %d\n",
+				ret);
+			goto err_put_device;
+		}
 	}
 
+	ret = device_add(&ac97->dev);
+	if (ret)
+		goto err_put_device;
+
 	return ac97;
+
+err_put_device:
+	put_device(&ac97->dev);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0e1e69c..6173d15 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -654,10 +654,12 @@
 
 	/* suspend all CODECs */
 	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
 		/* If there are paths active then the CODEC will be held with
 		 * bias _ON and should not be suspended. */
 		if (!codec->suspended) {
-			switch (codec->dapm.bias_level) {
+			switch (snd_soc_dapm_get_bias_level(dapm)) {
 			case SND_SOC_BIAS_STANDBY:
 				/*
 				 * If the CODEC is capable of idle
@@ -665,7 +667,7 @@
 				 * means it's doing something,
 				 * otherwise fall through.
 				 */
-				if (codec->dapm.idle_bias_off) {
+				if (dapm->idle_bias_off) {
 					dev_dbg(codec->dev,
 						"ASoC: idle_bias_off CODEC on over suspend\n");
 					break;
@@ -978,7 +980,7 @@
 
 static void soc_remove_component(struct snd_soc_component *component)
 {
-	if (!component->probed)
+	if (!component->card)
 		return;
 
 	/* This is a HACK and will be removed soon */
@@ -991,7 +993,7 @@
 	snd_soc_dapm_free(snd_soc_component_get_dapm(component));
 
 	soc_cleanup_component_debugfs(component);
-	component->probed = 0;
+	component->card = NULL;
 	module_put(component->dev->driver->owner);
 }
 
@@ -1102,16 +1104,26 @@
 	struct snd_soc_dai *dai;
 	int ret;
 
-	if (component->probed)
+	if (!strcmp(component->name, "snd-soc-dummy"))
 		return 0;
 
+	if (component->card) {
+		if (component->card != card) {
+			dev_err(component->dev,
+				"Trying to bind component to card \"%s\" but is already bound to card \"%s\"\n",
+				card->name, component->card->name);
+			return -ENODEV;
+		}
+		return 0;
+	}
+
+	if (!try_module_get(component->dev->driver->owner))
+		return -ENODEV;
+
 	component->card = card;
 	dapm->card = card;
 	soc_set_name_prefix(card, component);
 
-	if (!try_module_get(component->dev->driver->owner))
-		return -ENODEV;
-
 	soc_init_component_debugfs(component);
 
 	if (component->dapm_widgets) {
@@ -1155,7 +1167,6 @@
 		snd_soc_dapm_add_routes(dapm, component->dapm_routes,
 					component->num_dapm_routes);
 
-	component->probed = 1;
 	list_add(&dapm->list, &card->dapm_list);
 
 	/* This is a HACK and will be removed soon */
@@ -1166,6 +1177,7 @@
 
 err_probe:
 	soc_cleanup_component_debugfs(component);
+	component->card = NULL;
 	module_put(component->dev->driver->owner);
 
 	return ret;
@@ -1449,7 +1461,7 @@
 		rtd->dev_registered = 0;
 	}
 
-	if (component && component->probed)
+	if (component)
 		soc_remove_component(component);
 }
 
@@ -2128,7 +2140,7 @@
 /**
  * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio.
  * @dai: DAI
- * @ratio Ratio of BCLK to Sample rate.
+ * @ratio: Ratio of BCLK to Sample rate.
  *
  * Configures the DAI for a preset BCLK to sample rate ratio.
  */
@@ -2652,10 +2664,7 @@
 	component->probe = component->driver->probe;
 	component->remove = component->driver->remove;
 
-	if (!component->dapm_ptr)
-		component->dapm_ptr = &component->dapm;
-
-	dapm = component->dapm_ptr;
+	dapm = &component->dapm;
 	dapm->dev = dev;
 	dapm->component = component;
 	dapm->bias_level = SND_SOC_BIAS_OFF;
@@ -2799,6 +2808,7 @@
 /**
  * snd_soc_unregister_component - Unregister a component from the ASoC core
  *
+ * @dev: The device to unregister
  */
 void snd_soc_unregister_component(struct device *dev)
 {
@@ -2839,7 +2849,7 @@
  * snd_soc_add_platform - Add a platform to the ASoC core
  * @dev: The parent device for the platform
  * @platform: The platform to add
- * @platform_driver: The driver for the platform
+ * @platform_drv: The driver for the platform
  */
 int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
 		const struct snd_soc_platform_driver *platform_drv)
@@ -2878,7 +2888,8 @@
 /**
  * snd_soc_register_platform - Register a platform with the ASoC core
  *
- * @platform: platform to register
+ * @dev: The device for the platform
+ * @platform_drv: The driver for the platform
  */
 int snd_soc_register_platform(struct device *dev,
 		const struct snd_soc_platform_driver *platform_drv)
@@ -2939,7 +2950,7 @@
 /**
  * snd_soc_unregister_platform - Unregister a platform from the ASoC core
  *
- * @platform: platform to unregister
+ * @dev: platform to unregister
  */
 void snd_soc_unregister_platform(struct device *dev)
 {
@@ -3030,13 +3041,17 @@
 /**
  * snd_soc_register_codec - Register a codec with the ASoC core
  *
- * @codec: codec to register
+ * @dev: The parent device for this codec
+ * @codec_drv: Codec driver
+ * @dai_drv: The associated DAI driver
+ * @num_dai: Number of DAIs
  */
 int snd_soc_register_codec(struct device *dev,
 			   const struct snd_soc_codec_driver *codec_drv,
 			   struct snd_soc_dai_driver *dai_drv,
 			   int num_dai)
 {
+	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_codec *codec;
 	struct snd_soc_dai *dai;
 	int ret, i;
@@ -3047,7 +3062,6 @@
 	if (codec == NULL)
 		return -ENOMEM;
 
-	codec->component.dapm_ptr = &codec->dapm;
 	codec->component.codec = codec;
 
 	ret = snd_soc_component_initialize(&codec->component,
@@ -3077,12 +3091,14 @@
 	if (codec_drv->read)
 		codec->component.read = snd_soc_codec_drv_read;
 	codec->component.ignore_pmdown_time = codec_drv->ignore_pmdown_time;
-	codec->dapm.idle_bias_off = codec_drv->idle_bias_off;
-	codec->dapm.suspend_bias_off = codec_drv->suspend_bias_off;
+
+	dapm = snd_soc_codec_get_dapm(codec);
+	dapm->idle_bias_off = codec_drv->idle_bias_off;
+	dapm->suspend_bias_off = codec_drv->suspend_bias_off;
 	if (codec_drv->seq_notifier)
-		codec->dapm.seq_notifier = codec_drv->seq_notifier;
+		dapm->seq_notifier = codec_drv->seq_notifier;
 	if (codec_drv->set_bias_level)
-		codec->dapm.set_bias_level = snd_soc_codec_set_bias_level;
+		dapm->set_bias_level = snd_soc_codec_set_bias_level;
 	codec->dev = dev;
 	codec->driver = codec_drv;
 	codec->component.val_bytes = codec_drv->reg_word_size;
@@ -3129,7 +3145,7 @@
 /**
  * snd_soc_unregister_codec - Unregister a codec from the ASoC core
  *
- * @codec: codec to unregister
+ * @dev: codec to unregister
  */
 void snd_soc_unregister_codec(struct device *dev)
 {
@@ -3304,6 +3320,26 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
 
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+				   struct snd_soc_codec_conf *codec_conf,
+				   struct device_node *of_node,
+				   const char *propname)
+{
+	struct device_node *np = card->dev->of_node;
+	const char *str;
+	int ret;
+
+	ret = of_property_read_string(np, propname, &str);
+	if (ret < 0) {
+		/* no prefix is not error */
+		return;
+	}
+
+	codec_conf->of_node	= of_node;
+	codec_conf->name_prefix	= str;
+}
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
+
 int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 				   const char *propname)
 {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index e0de807..f4bf21a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -47,6 +47,13 @@
 
 #define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++;
 
+#define SND_SOC_DAPM_DIR_REVERSE(x) ((x == SND_SOC_DAPM_DIR_IN) ? \
+	SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN)
+
+#define snd_soc_dapm_for_each_direction(dir) \
+	for ((dir) = SND_SOC_DAPM_DIR_IN; (dir) <= SND_SOC_DAPM_DIR_OUT; \
+		(dir)++)
+
 static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
 	const char *control,
@@ -167,45 +174,59 @@
 }
 
 /*
- * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input
- *  paths
- * @w: The widget for which to invalidate the cached number of input paths
- *
- * The function resets the cached number of inputs for the specified widget and
- * all widgets that can be reached via outgoing paths from the widget.
- *
- * This function must be called if the number of input paths for a widget might
- * have changed. E.g. if the source state of a widget changes or a path is added
- * or activated with the widget as the sink.
+ * Common implementation for dapm_widget_invalidate_input_paths() and
+ * dapm_widget_invalidate_output_paths(). The function is inlined since the
+ * combined size of the two specialized functions is only marginally larger then
+ * the size of the generic function and at the same time the fast path of the
+ * specialized functions is significantly smaller than the generic function.
  */
-static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+static __always_inline void dapm_widget_invalidate_paths(
+	struct snd_soc_dapm_widget *w, enum snd_soc_dapm_direction dir)
 {
-	struct snd_soc_dapm_widget *sink;
+	enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+	struct snd_soc_dapm_widget *node;
 	struct snd_soc_dapm_path *p;
 	LIST_HEAD(list);
 
 	dapm_assert_locked(w->dapm);
 
-	if (w->inputs == -1)
+	if (w->endpoints[dir] == -1)
 		return;
 
-	w->inputs = -1;
 	list_add_tail(&w->work_list, &list);
+	w->endpoints[dir] = -1;
 
 	list_for_each_entry(w, &list, work_list) {
-		list_for_each_entry(p, &w->sinks, list_source) {
+		snd_soc_dapm_widget_for_each_path(w, dir, p) {
 			if (p->is_supply || p->weak || !p->connect)
 				continue;
-			sink = p->sink;
-			if (sink->inputs != -1) {
-				sink->inputs = -1;
-				list_add_tail(&sink->work_list, &list);
+			node = p->node[rdir];
+			if (node->endpoints[dir] != -1) {
+				node->endpoints[dir] = -1;
+				list_add_tail(&node->work_list, &list);
 			}
 		}
 	}
 }
 
 /*
+ * dapm_widget_invalidate_input_paths() - Invalidate the cached number of
+ *  input paths
+ * @w: The widget for which to invalidate the cached number of input paths
+ *
+ * Resets the cached number of inputs for the specified widget and all widgets
+ * that can be reached via outcoming paths from the widget.
+ *
+ * This function must be called if the number of output paths for a widget might
+ * have changed. E.g. if the source state of a widget changes or a path is added
+ * or activated with the widget as the sink.
+ */
+static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+{
+	dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_IN);
+}
+
+/*
  * dapm_widget_invalidate_output_paths() - Invalidate the cached number of
  *  output paths
  * @w: The widget for which to invalidate the cached number of output paths
@@ -219,29 +240,7 @@
  */
 static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w)
 {
-	struct snd_soc_dapm_widget *source;
-	struct snd_soc_dapm_path *p;
-	LIST_HEAD(list);
-
-	dapm_assert_locked(w->dapm);
-
-	if (w->outputs == -1)
-		return;
-
-	w->outputs = -1;
-	list_add_tail(&w->work_list, &list);
-
-	list_for_each_entry(w, &list, work_list) {
-		list_for_each_entry(p, &w->sources, list_sink) {
-			if (p->is_supply || p->weak || !p->connect)
-				continue;
-			source = p->source;
-			if (source->outputs != -1) {
-				source->outputs = -1;
-				list_add_tail(&source->work_list, &list);
-			}
-		}
-	}
+	dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_OUT);
 }
 
 /*
@@ -270,9 +269,9 @@
 	 * endpoints is either connected or disconnected that sum won't change,
 	 * so there is no need to re-check the path.
 	 */
-	if (p->source->inputs != 0)
+	if (p->source->endpoints[SND_SOC_DAPM_DIR_IN] != 0)
 		dapm_widget_invalidate_input_paths(p->sink);
-	if (p->sink->outputs != 0)
+	if (p->sink->endpoints[SND_SOC_DAPM_DIR_OUT] != 0)
 		dapm_widget_invalidate_output_paths(p->source);
 }
 
@@ -283,11 +282,11 @@
 	mutex_lock(&card->dapm_mutex);
 
 	list_for_each_entry(w, &card->widgets, list) {
-		if (w->is_sink || w->is_source) {
+		if (w->is_ep) {
 			dapm_mark_dirty(w, "Rechecking endpoints");
-			if (w->is_sink)
+			if (w->is_ep & SND_SOC_DAPM_EP_SINK)
 				dapm_widget_invalidate_output_paths(w);
-			if (w->is_source)
+			if (w->is_ep & SND_SOC_DAPM_EP_SOURCE)
 				dapm_widget_invalidate_input_paths(w);
 		}
 	}
@@ -894,7 +893,7 @@
 	/* add kcontrol */
 	for (i = 0; i < w->num_kcontrols; i++) {
 		/* match name */
-		list_for_each_entry(path, &w->sources, list_sink) {
+		snd_soc_dapm_widget_for_each_source_path(w, path) {
 			/* mixer/mux paths name must match control name */
 			if (path->name != (char *)w->kcontrol_news[i].name)
 				continue;
@@ -923,18 +922,18 @@
 static int dapm_new_mux(struct snd_soc_dapm_widget *w)
 {
 	struct snd_soc_dapm_context *dapm = w->dapm;
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *path;
-	struct list_head *paths;
 	const char *type;
 	int ret;
 
 	switch (w->id) {
 	case snd_soc_dapm_mux:
-		paths = &w->sources;
+		dir = SND_SOC_DAPM_DIR_OUT;
 		type = "mux";
 		break;
 	case snd_soc_dapm_demux:
-		paths = &w->sinks;
+		dir = SND_SOC_DAPM_DIR_IN;
 		type = "demux";
 		break;
 	default:
@@ -948,7 +947,7 @@
 		return -EINVAL;
 	}
 
-	if (list_empty(paths)) {
+	if (list_empty(&w->edges[dir])) {
 		dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name);
 		return -EINVAL;
 	}
@@ -957,16 +956,9 @@
 	if (ret < 0)
 		return ret;
 
-	if (w->id == snd_soc_dapm_mux) {
-		list_for_each_entry(path, &w->sources, list_sink) {
-			if (path->name)
-				dapm_kcontrol_add_path(w->kcontrols[0], path);
-		}
-	} else {
-		list_for_each_entry(path, &w->sinks, list_source) {
-			if (path->name)
-				dapm_kcontrol_add_path(w->kcontrols[0], path);
-		}
+	snd_soc_dapm_widget_for_each_path(w, dir, path) {
+		if (path->name)
+			dapm_kcontrol_add_path(w->kcontrols[0], path);
 	}
 
 	return 0;
@@ -1032,43 +1024,79 @@
 	}
 }
 
-/* add widget to list if it's not already in the list */
-static int dapm_list_add_widget(struct snd_soc_dapm_widget_list **list,
-	struct snd_soc_dapm_widget *w)
+static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
+	struct list_head *widgets)
 {
-	struct snd_soc_dapm_widget_list *wlist;
-	int wlistsize, wlistentries, i;
+	struct snd_soc_dapm_widget *w;
+	struct list_head *it;
+	unsigned int size = 0;
+	unsigned int i = 0;
 
+	list_for_each(it, widgets)
+		size++;
+
+	*list = kzalloc(sizeof(**list) + size * sizeof(*w), GFP_KERNEL);
 	if (*list == NULL)
-		return -EINVAL;
-
-	wlist = *list;
-
-	/* is this widget already in the list */
-	for (i = 0; i < wlist->num_widgets; i++) {
-		if (wlist->widgets[i] == w)
-			return 0;
-	}
-
-	/* allocate some new space */
-	wlistentries = wlist->num_widgets + 1;
-	wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
-			wlistentries * sizeof(struct snd_soc_dapm_widget *);
-	*list = krealloc(wlist, wlistsize, GFP_KERNEL);
-	if (*list == NULL) {
-		dev_err(w->dapm->dev, "ASoC: can't allocate widget list for %s\n",
-			w->name);
 		return -ENOMEM;
+
+	list_for_each_entry(w, widgets, work_list)
+		(*list)->widgets[i++] = w;
+
+	(*list)->num_widgets = i;
+
+	return 0;
+}
+
+/*
+ * Common implementation for is_connected_output_ep() and
+ * is_connected_input_ep(). The function is inlined since the combined size of
+ * the two specialized functions is only marginally larger then the size of the
+ * generic function and at the same time the fast path of the specialized
+ * functions is significantly smaller than the generic function.
+ */
+static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
+	struct list_head *list, enum snd_soc_dapm_direction dir,
+	int (*fn)(struct snd_soc_dapm_widget *, struct list_head *))
+{
+	enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+	struct snd_soc_dapm_path *path;
+	int con = 0;
+
+	if (widget->endpoints[dir] >= 0)
+		return widget->endpoints[dir];
+
+	DAPM_UPDATE_STAT(widget, path_checks);
+
+	/* do we need to add this widget to the list ? */
+	if (list)
+		list_add_tail(&widget->work_list, list);
+
+	if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
+		widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget);
+		return widget->endpoints[dir];
 	}
-	wlist = *list;
 
-	/* insert the widget */
-	dev_dbg(w->dapm->dev, "ASoC: added %s in widget list pos %d\n",
-			w->name, wlist->num_widgets);
+	snd_soc_dapm_widget_for_each_path(widget, rdir, path) {
+		DAPM_UPDATE_STAT(widget, neighbour_checks);
 
-	wlist->widgets[wlist->num_widgets] = w;
-	wlist->num_widgets++;
-	return 1;
+		if (path->weak || path->is_supply)
+			continue;
+
+		if (path->walking)
+			return 1;
+
+		trace_snd_soc_dapm_path(widget, dir, path);
+
+		if (path->connect) {
+			path->walking = 1;
+			con += fn(path->node[dir], list);
+			path->walking = 0;
+		}
+	}
+
+	widget->endpoints[dir] = con;
+
+	return con;
 }
 
 /*
@@ -1076,57 +1104,10 @@
  * output widget. Returns number of complete paths.
  */
 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
-	struct snd_soc_dapm_widget_list **list)
+	struct list_head *list)
 {
-	struct snd_soc_dapm_path *path;
-	int con = 0;
-
-	if (widget->outputs >= 0)
-		return widget->outputs;
-
-	DAPM_UPDATE_STAT(widget, path_checks);
-
-	if (widget->is_sink && widget->connected) {
-		widget->outputs = snd_soc_dapm_suspend_check(widget);
-		return widget->outputs;
-	}
-
-	list_for_each_entry(path, &widget->sinks, list_source) {
-		DAPM_UPDATE_STAT(widget, neighbour_checks);
-
-		if (path->weak || path->is_supply)
-			continue;
-
-		if (path->walking)
-			return 1;
-
-		trace_snd_soc_dapm_output_path(widget, path);
-
-		if (path->connect) {
-			path->walking = 1;
-
-			/* do we need to add this widget to the list ? */
-			if (list) {
-				int err;
-				err = dapm_list_add_widget(list, path->sink);
-				if (err < 0) {
-					dev_err(widget->dapm->dev,
-						"ASoC: could not add widget %s\n",
-						widget->name);
-					path->walking = 0;
-					return con;
-				}
-			}
-
-			con += is_connected_output_ep(path->sink, list);
-
-			path->walking = 0;
-		}
-	}
-
-	widget->outputs = con;
-
-	return con;
+	return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT,
+			is_connected_output_ep);
 }
 
 /*
@@ -1134,57 +1115,10 @@
  * input widget. Returns number of complete paths.
  */
 static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
-	struct snd_soc_dapm_widget_list **list)
+	struct list_head *list)
 {
-	struct snd_soc_dapm_path *path;
-	int con = 0;
-
-	if (widget->inputs >= 0)
-		return widget->inputs;
-
-	DAPM_UPDATE_STAT(widget, path_checks);
-
-	if (widget->is_source && widget->connected) {
-		widget->inputs = snd_soc_dapm_suspend_check(widget);
-		return widget->inputs;
-	}
-
-	list_for_each_entry(path, &widget->sources, list_sink) {
-		DAPM_UPDATE_STAT(widget, neighbour_checks);
-
-		if (path->weak || path->is_supply)
-			continue;
-
-		if (path->walking)
-			return 1;
-
-		trace_snd_soc_dapm_input_path(widget, path);
-
-		if (path->connect) {
-			path->walking = 1;
-
-			/* do we need to add this widget to the list ? */
-			if (list) {
-				int err;
-				err = dapm_list_add_widget(list, path->source);
-				if (err < 0) {
-					dev_err(widget->dapm->dev,
-						"ASoC: could not add widget %s\n",
-						widget->name);
-					path->walking = 0;
-					return con;
-				}
-			}
-
-			con += is_connected_input_ep(path->source, list);
-
-			path->walking = 0;
-		}
-	}
-
-	widget->inputs = con;
-
-	return con;
+	return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN,
+			is_connected_input_ep);
 }
 
 /**
@@ -1204,7 +1138,9 @@
 {
 	struct snd_soc_card *card = dai->component->card;
 	struct snd_soc_dapm_widget *w;
+	LIST_HEAD(widgets);
 	int paths;
+	int ret;
 
 	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
@@ -1213,14 +1149,21 @@
 	 * to reset the cached number of inputs and outputs.
 	 */
 	list_for_each_entry(w, &card->widgets, list) {
-		w->inputs = -1;
-		w->outputs = -1;
+		w->endpoints[SND_SOC_DAPM_DIR_IN] = -1;
+		w->endpoints[SND_SOC_DAPM_DIR_OUT] = -1;
 	}
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
-		paths = is_connected_output_ep(dai->playback_widget, list);
+		paths = is_connected_output_ep(dai->playback_widget, &widgets);
 	else
-		paths = is_connected_input_ep(dai->capture_widget, list);
+		paths = is_connected_input_ep(dai->capture_widget, &widgets);
+
+	/* Drop starting point */
+	list_del(widgets.next);
+
+	ret = dapm_widget_list_create(list, &widgets);
+	if (ret)
+		paths = ret;
 
 	trace_snd_soc_dapm_connected(paths, stream);
 	mutex_unlock(&card->dapm_mutex);
@@ -1321,7 +1264,7 @@
 	DAPM_UPDATE_STAT(w, power_checks);
 
 	/* Check if one of our outputs is connected */
-	list_for_each_entry(path, &w->sinks, list_source) {
+	snd_soc_dapm_widget_for_each_sink_path(w, path) {
 		DAPM_UPDATE_STAT(w, neighbour_checks);
 
 		if (path->weak)
@@ -1745,12 +1688,12 @@
 	/* If we changed our power state perhaps our neigbours changed
 	 * also.
 	 */
-	list_for_each_entry(path, &w->sources, list_sink)
+	snd_soc_dapm_widget_for_each_source_path(w, path)
 		dapm_widget_set_peer_power(path->source, power, path->connect);
 
 	/* Supplies can't affect their outputs, only their inputs */
 	if (!w->is_supply) {
-		list_for_each_entry(path, &w->sinks, list_source)
+		snd_soc_dapm_widget_for_each_sink_path(w, path)
 			dapm_widget_set_peer_power(path->sink, power,
 						   path->connect);
 	}
@@ -1951,6 +1894,7 @@
 {
 	struct snd_soc_dapm_widget *w = file->private_data;
 	struct snd_soc_card *card = w->dapm->card;
+	enum snd_soc_dapm_direction dir, rdir;
 	char *buf;
 	int in, out;
 	ssize_t ret;
@@ -1987,25 +1931,21 @@
 				w->sname,
 				w->active ? "active" : "inactive");
 
-	list_for_each_entry(p, &w->sources, list_sink) {
-		if (p->connected && !p->connected(w, p->source))
-			continue;
+	snd_soc_dapm_for_each_direction(dir) {
+		rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+		snd_soc_dapm_widget_for_each_path(w, dir, p) {
+			if (p->connected && !p->connected(w, p->node[rdir]))
+				continue;
 
-		if (p->connect)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
-					" in  \"%s\" \"%s\"\n",
-					p->name ? p->name : "static",
-					p->source->name);
-	}
-	list_for_each_entry(p, &w->sinks, list_source) {
-		if (p->connected && !p->connected(w, p->sink))
-			continue;
+			if (!p->connect)
+				continue;
 
-		if (p->connect)
 			ret += snprintf(buf + ret, PAGE_SIZE - ret,
-					" out \"%s\" \"%s\"\n",
+					" %s  \"%s\" \"%s\"\n",
+					(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
 					p->name ? p->name : "static",
-					p->sink->name);
+					p->node[rdir]->name);
+		}
 	}
 
 	mutex_unlock(&card->dapm_mutex);
@@ -2223,14 +2163,16 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
 
-static ssize_t dapm_widget_show_codec(struct snd_soc_codec *codec, char *buf)
+static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
+	char *buf)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
 	struct snd_soc_dapm_widget *w;
 	int count = 0;
 	char *state = "not set";
 
-	list_for_each_entry(w, &codec->component.card->widgets, list) {
-		if (w->dapm != &codec->dapm)
+	list_for_each_entry(w, &cmpnt->card->widgets, list) {
+		if (w->dapm != dapm)
 			continue;
 
 		/* only display widgets that burnm power */
@@ -2258,7 +2200,7 @@
 		}
 	}
 
-	switch (codec->dapm.bias_level) {
+	switch (snd_soc_dapm_get_bias_level(dapm)) {
 	case SND_SOC_BIAS_ON:
 		state = "On";
 		break;
@@ -2287,8 +2229,9 @@
 	mutex_lock(&rtd->card->dapm_mutex);
 
 	for (i = 0; i < rtd->num_codecs; i++) {
-		struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
-		count += dapm_widget_show_codec(codec, buf + count);
+		struct snd_soc_component *cmpnt = rtd->codec_dais[i]->component;
+
+		count += dapm_widget_show_component(cmpnt, buf + count);
 	}
 
 	mutex_unlock(&rtd->card->dapm_mutex);
@@ -2305,37 +2248,43 @@
 
 static void dapm_free_path(struct snd_soc_dapm_path *path)
 {
-	list_del(&path->list_sink);
-	list_del(&path->list_source);
+	list_del(&path->list_node[SND_SOC_DAPM_DIR_IN]);
+	list_del(&path->list_node[SND_SOC_DAPM_DIR_OUT]);
 	list_del(&path->list_kcontrol);
 	list_del(&path->list);
 	kfree(path);
 }
 
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_dapm_path *p, *next_p;
+	enum snd_soc_dapm_direction dir;
+
+	list_del(&w->list);
+	/*
+	 * remove source and sink paths associated to this widget.
+	 * While removing the path, remove reference to it from both
+	 * source and sink widgets so that path is removed only once.
+	 */
+	snd_soc_dapm_for_each_direction(dir) {
+		snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p)
+			dapm_free_path(p);
+	}
+
+	kfree(w->kcontrols);
+	kfree_const(w->name);
+	kfree(w);
+}
+
 /* free all dapm widgets and resources */
 static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
-	struct snd_soc_dapm_path *p, *next_p;
 
 	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
 		if (w->dapm != dapm)
 			continue;
-		list_del(&w->list);
-		/*
-		 * remove source and sink paths associated to this widget.
-		 * While removing the path, remove reference to it from both
-		 * source and sink widgets so that path is removed only once.
-		 */
-		list_for_each_entry_safe(p, next_p, &w->sources, list_sink)
-			dapm_free_path(p);
-
-		list_for_each_entry_safe(p, next_p, &w->sinks, list_source)
-			dapm_free_path(p);
-
-		kfree(w->kcontrols);
-		kfree(w->name);
-		kfree(w);
+		snd_soc_dapm_free_widget(w);
 	}
 }
 
@@ -2441,20 +2390,22 @@
  */
 static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
 {
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *p;
+	unsigned int ep;
 
 	switch (w->id) {
 	case snd_soc_dapm_input:
 		/* On a fully routed card a input is never a source */
 		if (w->dapm->card->fully_routed)
-			break;
-		w->is_source = 1;
-		list_for_each_entry(p, &w->sources, list_sink) {
+			return;
+		ep = SND_SOC_DAPM_EP_SOURCE;
+		snd_soc_dapm_widget_for_each_source_path(w, p) {
 			if (p->source->id == snd_soc_dapm_micbias ||
 				p->source->id == snd_soc_dapm_mic ||
 				p->source->id == snd_soc_dapm_line ||
 				p->source->id == snd_soc_dapm_output) {
-					w->is_source = 0;
+					ep = 0;
 					break;
 			}
 		}
@@ -2462,25 +2413,30 @@
 	case snd_soc_dapm_output:
 		/* On a fully routed card a output is never a sink */
 		if (w->dapm->card->fully_routed)
-			break;
-		w->is_sink = 1;
-		list_for_each_entry(p, &w->sinks, list_source) {
+			return;
+		ep = SND_SOC_DAPM_EP_SINK;
+		snd_soc_dapm_widget_for_each_sink_path(w, p) {
 			if (p->sink->id == snd_soc_dapm_spk ||
 				p->sink->id == snd_soc_dapm_hp ||
 				p->sink->id == snd_soc_dapm_line ||
 				p->sink->id == snd_soc_dapm_input) {
-					w->is_sink = 0;
+					ep = 0;
 					break;
 			}
 		}
 		break;
 	case snd_soc_dapm_line:
-		w->is_sink = !list_empty(&w->sources);
-		w->is_source = !list_empty(&w->sinks);
+		ep = 0;
+		snd_soc_dapm_for_each_direction(dir) {
+			if (!list_empty(&w->edges[dir]))
+				ep |= SND_SOC_DAPM_DIR_TO_EP(dir);
+		}
 		break;
 	default:
-		break;
+		return;
 	}
+
+	w->is_ep = ep;
 }
 
 static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm,
@@ -2533,6 +2489,8 @@
 	int (*connected)(struct snd_soc_dapm_widget *source,
 			 struct snd_soc_dapm_widget *sink))
 {
+	struct snd_soc_dapm_widget *widgets[2];
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *path;
 	int ret;
 
@@ -2565,13 +2523,14 @@
 	if (!path)
 		return -ENOMEM;
 
-	path->source = wsource;
-	path->sink = wsink;
+	path->node[SND_SOC_DAPM_DIR_IN] = wsource;
+	path->node[SND_SOC_DAPM_DIR_OUT] = wsink;
+	widgets[SND_SOC_DAPM_DIR_IN] = wsource;
+	widgets[SND_SOC_DAPM_DIR_OUT] = wsink;
+
 	path->connected = connected;
 	INIT_LIST_HEAD(&path->list);
 	INIT_LIST_HEAD(&path->list_kcontrol);
-	INIT_LIST_HEAD(&path->list_source);
-	INIT_LIST_HEAD(&path->list_sink);
 
 	if (wsource->is_supply || wsink->is_supply)
 		path->is_supply = 1;
@@ -2609,14 +2568,13 @@
 	}
 
 	list_add(&path->list, &dapm->card->paths);
-	list_add(&path->list_sink, &wsink->sources);
-	list_add(&path->list_source, &wsource->sinks);
+	snd_soc_dapm_for_each_direction(dir)
+		list_add(&path->list_node[dir], &widgets[dir]->edges[dir]);
 
-	dapm_update_widget_flags(wsource);
-	dapm_update_widget_flags(wsink);
-
-	dapm_mark_dirty(wsource, "Route added");
-	dapm_mark_dirty(wsink, "Route added");
+	snd_soc_dapm_for_each_direction(dir) {
+		dapm_update_widget_flags(widgets[dir]);
+		dapm_mark_dirty(widgets[dir], "Route added");
+	}
 
 	if (dapm->card->instantiated && path->connect)
 		dapm_path_invalidate(path);
@@ -2864,7 +2822,7 @@
 		dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n",
 			 route->source, route->sink);
 
-	list_for_each_entry(path, &source->sinks, list_source) {
+	snd_soc_dapm_widget_for_each_sink_path(source, path) {
 		if (path->sink == sink) {
 			path->weak = 1;
 			count++;
@@ -2918,7 +2876,7 @@
 
 /**
  * snd_soc_dapm_new_widgets - add new dapm widgets
- * @dapm: DAPM context
+ * @card: card to be checked for new dapm widgets
  *
  * Checks the codec for any new dapm widgets and creates them if found.
  *
@@ -3298,6 +3256,7 @@
 snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
 			 const struct snd_soc_dapm_widget *widget)
 {
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_widget *w;
 	const char *prefix;
 	int ret;
@@ -3344,7 +3303,7 @@
 	if (prefix)
 		w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
 	else
-		w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
+		w->name = kstrdup_const(widget->name, GFP_KERNEL);
 	if (w->name == NULL) {
 		kfree(w);
 		return NULL;
@@ -3352,27 +3311,27 @@
 
 	switch (w->id) {
 	case snd_soc_dapm_mic:
-		w->is_source = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_input:
 		if (!dapm->card->fully_routed)
-			w->is_source = 1;
+			w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_spk:
 	case snd_soc_dapm_hp:
-		w->is_sink = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SINK;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_output:
 		if (!dapm->card->fully_routed)
-			w->is_sink = 1;
+			w->is_ep = SND_SOC_DAPM_EP_SINK;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_vmid:
 	case snd_soc_dapm_siggen:
-		w->is_source = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_always_on_check_power;
 		break;
 	case snd_soc_dapm_mux:
@@ -3406,14 +3365,14 @@
 	}
 
 	w->dapm = dapm;
-	INIT_LIST_HEAD(&w->sources);
-	INIT_LIST_HEAD(&w->sinks);
 	INIT_LIST_HEAD(&w->list);
 	INIT_LIST_HEAD(&w->dirty);
 	list_add_tail(&w->list, &dapm->card->widgets);
 
-	w->inputs = -1;
-	w->outputs = -1;
+	snd_soc_dapm_for_each_direction(dir) {
+		INIT_LIST_HEAD(&w->edges[dir]);
+		w->endpoints[dir] = -1;
+	}
 
 	/* machine layer set ups unconnected pins and insertions */
 	w->connected = 1;
@@ -3467,19 +3426,17 @@
 	int ret;
 
 	if (WARN_ON(!config) ||
-	    WARN_ON(list_empty(&w->sources) || list_empty(&w->sinks)))
+	    WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) ||
+		    list_empty(&w->edges[SND_SOC_DAPM_DIR_IN])))
 		return -EINVAL;
 
 	/* We only support a single source and sink, pick the first */
-	source_p = list_first_entry(&w->sources, struct snd_soc_dapm_path,
-				    list_sink);
-	sink_p = list_first_entry(&w->sinks, struct snd_soc_dapm_path,
-				  list_source);
-
-	if (WARN_ON(!source_p || !sink_p) ||
-	    WARN_ON(!sink_p->source || !source_p->sink) ||
-	    WARN_ON(!source_p->source || !sink_p->sink))
-		return -EINVAL;
+	source_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_OUT],
+				    struct snd_soc_dapm_path,
+				    list_node[SND_SOC_DAPM_DIR_OUT]);
+	sink_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_IN],
+				    struct snd_soc_dapm_path,
+				    list_node[SND_SOC_DAPM_DIR_IN]);
 
 	source = source_p->source->priv;
 	sink = sink_p->sink->priv;
@@ -3821,11 +3778,6 @@
 	for (i = 0; i < rtd->num_codecs; i++) {
 		struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
 
-		/* there is no point in connecting BE DAI links with dummies */
-		if (snd_soc_dai_is_dummy(codec_dai) ||
-			snd_soc_dai_is_dummy(cpu_dai))
-			continue;
-
 		/* connect BE DAI playback if widgets are valid */
 		if (codec_dai->playback_widget && cpu_dai->playback_widget) {
 			source = cpu_dai->playback_widget;
@@ -3856,6 +3808,7 @@
 	int event)
 {
 	struct snd_soc_dapm_widget *w;
+	unsigned int ep;
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
 		w = dai->playback_widget;
@@ -3865,12 +3818,22 @@
 	if (w) {
 		dapm_mark_dirty(w, "stream event");
 
+		if (w->id == snd_soc_dapm_dai_in) {
+			ep = SND_SOC_DAPM_EP_SOURCE;
+			dapm_widget_invalidate_input_paths(w);
+		} else {
+			ep = SND_SOC_DAPM_EP_SINK;
+			dapm_widget_invalidate_output_paths(w);
+		}
+
 		switch (event) {
 		case SND_SOC_DAPM_STREAM_START:
 			w->active = 1;
+			w->is_ep = ep;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
 			w->active = 0;
+			w->is_ep = 0;
 			break;
 		case SND_SOC_DAPM_STREAM_SUSPEND:
 		case SND_SOC_DAPM_STREAM_RESUME:
@@ -3878,14 +3841,6 @@
 		case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
 			break;
 		}
-
-		if (w->id == snd_soc_dapm_dai_in) {
-			w->is_source = w->active;
-			dapm_widget_invalidate_input_paths(w);
-		} else {
-			w->is_sink = w->active;
-			dapm_widget_invalidate_output_paths(w);
-		}
 	}
 }
 
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 256b9c9..70e4b9d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1231,24 +1231,17 @@
 }
 
 int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
-	int stream, struct snd_soc_dapm_widget_list **list_)
+	int stream, struct snd_soc_dapm_widget_list **list)
 {
 	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
-	struct snd_soc_dapm_widget_list *list;
 	int paths;
 
-	list = kzalloc(sizeof(struct snd_soc_dapm_widget_list) +
-			sizeof(struct snd_soc_dapm_widget *), GFP_KERNEL);
-	if (list == NULL)
-		return -ENOMEM;
-
 	/* get number of valid DAI paths and their widgets */
-	paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, &list);
+	paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list);
 
 	dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
 			stream ? "capture" : "playback");
 
-	*list_ = list;
 	return paths;
 }
 
@@ -1306,7 +1299,12 @@
 
 		switch (list->widgets[i]->id) {
 		case snd_soc_dapm_dai_in:
+			if (stream != SNDRV_PCM_STREAM_PLAYBACK)
+				continue;
+			break;
 		case snd_soc_dapm_dai_out:
+			if (stream != SNDRV_PCM_STREAM_CAPTURE)
+				continue;
 			break;
 		default:
 			continue;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 31068b8..69d01cd 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -45,12 +45,12 @@
 #define SOC_TPLG_PASS_VENDOR		1
 #define SOC_TPLG_PASS_MIXER		2
 #define SOC_TPLG_PASS_WIDGET		3
-#define SOC_TPLG_PASS_GRAPH		4
-#define SOC_TPLG_PASS_PINS		5
-#define SOC_TPLG_PASS_PCM_DAI		6
+#define SOC_TPLG_PASS_PCM_DAI		4
+#define SOC_TPLG_PASS_GRAPH		5
+#define SOC_TPLG_PASS_PINS		6
 
 #define SOC_TPLG_PASS_START	SOC_TPLG_PASS_MANIFEST
-#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_PCM_DAI
+#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_PINS
 
 struct soc_tplg {
 	const struct firmware *fw;
@@ -66,10 +66,14 @@
 	u32 index;	/* current block index */
 	u32 req_index;	/* required index, only loaded/free matching blocks */
 
-	/* kcontrol operations */
+	/* vendor specific kcontrol operations */
 	const struct snd_soc_tplg_kcontrol_ops *io_ops;
 	int io_ops_count;
 
+	/* vendor specific bytes ext handlers, for TLV bytes controls */
+	const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+	int bytes_ext_ops_count;
+
 	/* optional fw loading callbacks to component drivers */
 	struct snd_soc_tplg_ops *ops;
 };
@@ -508,19 +512,70 @@
 /* bind a kcontrol to it's IO handlers */
 static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
 	struct snd_kcontrol_new *k,
-	const struct snd_soc_tplg_kcontrol_ops *ops, int num_ops,
-	const struct snd_soc_tplg_kcontrol_ops *bops, int num_bops)
+	const struct soc_tplg *tplg)
 {
-	int i;
+	const struct snd_soc_tplg_kcontrol_ops *ops;
+	const struct snd_soc_tplg_bytes_ext_ops *ext_ops;
+	int num_ops, i;
 
-	/* try and map standard kcontrols handler first */
+	if (hdr->ops.info == SND_SOC_TPLG_CTL_BYTES
+		&& k->iface & SNDRV_CTL_ELEM_IFACE_MIXER
+		&& k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE
+		&& k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+		struct soc_bytes_ext *sbe;
+		struct snd_soc_tplg_bytes_control *be;
+
+		sbe = (struct soc_bytes_ext *)k->private_value;
+		be = container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+
+		/* TLV bytes controls need standard kcontrol info handler,
+		 * TLV callback and extended put/get handlers.
+		 */
+		k->info = snd_soc_bytes_info;
+		k->tlv.c = snd_soc_bytes_tlv_callback;
+
+		ext_ops = tplg->bytes_ext_ops;
+		num_ops = tplg->bytes_ext_ops_count;
+		for (i = 0; i < num_ops; i++) {
+			if (!sbe->put && ext_ops[i].id == be->ext_ops.put)
+				sbe->put = ext_ops[i].put;
+			if (!sbe->get && ext_ops[i].id == be->ext_ops.get)
+				sbe->get = ext_ops[i].get;
+		}
+
+		if (sbe->put && sbe->get)
+			return 0;
+		else
+			return -EINVAL;
+	}
+
+	/* try and map vendor specific kcontrol handlers first */
+	ops = tplg->io_ops;
+	num_ops = tplg->io_ops_count;
 	for (i = 0; i < num_ops; i++) {
 
-		if (ops[i].id == hdr->ops.put)
+		if (k->put == NULL && ops[i].id == hdr->ops.put)
 			k->put = ops[i].put;
-		if (ops[i].id == hdr->ops.get)
+		if (k->get == NULL && ops[i].id == hdr->ops.get)
 			k->get = ops[i].get;
-		if (ops[i].id == hdr->ops.info)
+		if (k->info == NULL && ops[i].id == hdr->ops.info)
+			k->info = ops[i].info;
+	}
+
+	/* vendor specific handlers found ? */
+	if (k->put && k->get && k->info)
+		return 0;
+
+	/* none found so try standard kcontrol handlers */
+	ops = io_ops;
+	num_ops = ARRAY_SIZE(io_ops);
+	for (i = 0; i < num_ops; i++) {
+
+		if (k->put == NULL && ops[i].id == hdr->ops.put)
+			k->put = ops[i].put;
+		if (k->get == NULL && ops[i].id == hdr->ops.get)
+			k->get = ops[i].get;
+		if (k->info == NULL && ops[i].id == hdr->ops.info)
 			k->info = ops[i].info;
 	}
 
@@ -528,21 +583,6 @@
 	if (k->put && k->get && k->info)
 		return 0;
 
-	/* none found so try bespoke handlers */
-	for (i = 0; i < num_bops; i++) {
-
-		if (k->put == NULL && bops[i].id == hdr->ops.put)
-			k->put = bops[i].put;
-		if (k->get == NULL && bops[i].id == hdr->ops.get)
-			k->get = bops[i].get;
-		if (k->info == NULL && bops[i].id == hdr->ops.info)
-			k->info = bops[i].info;
-	}
-
-	/* bespoke handlers found ? */
-	if (k->put && k->get && k->info)
-		return 0;
-
 	/* nothing to bind */
 	return -EINVAL;
 }
@@ -609,9 +649,7 @@
 	if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
 		return 0;
 
-	if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
-		kc->tlv.c = snd_soc_bytes_tlv_callback;
-	} else {
+	if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)) {
 		tplg_tlv = &tc->tlv;
 		switch (tplg_tlv->type) {
 		case SNDRV_CTL_TLVT_DB_SCALE:
@@ -682,8 +720,7 @@
 		INIT_LIST_HEAD(&sbe->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &be->hdr, be->hdr.name);
 			kfree(sbe);
@@ -777,8 +814,7 @@
 		INIT_LIST_HEAD(&sm->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &mc->hdr, mc->hdr.name);
 			kfree(sm);
@@ -855,12 +891,12 @@
 	if (ec->items > sizeof(*ec->values))
 		return -EINVAL;
 
-	se->dobj.control.dvalues =
-		kmalloc(ec->items * sizeof(u32), GFP_KERNEL);
+	se->dobj.control.dvalues = kmemdup(ec->values,
+					   ec->items * sizeof(u32),
+					   GFP_KERNEL);
 	if (!se->dobj.control.dvalues)
 		return -ENOMEM;
 
-	memcpy(se->dobj.control.dvalues, ec->values, ec->items * sizeof(u32));
 	return 0;
 }
 
@@ -950,8 +986,7 @@
 		}
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &ec->hdr, ec->hdr.name);
 			kfree(se);
@@ -1093,7 +1128,7 @@
 	struct snd_soc_tplg_mixer_control *mc;
 	int i, err;
 
-	kc = kzalloc(sizeof(*kc) * num_kcontrols, GFP_KERNEL);
+	kc = kcalloc(num_kcontrols, sizeof(*kc), GFP_KERNEL);
 	if (kc == NULL)
 		return NULL;
 
@@ -1137,8 +1172,7 @@
 		INIT_LIST_HEAD(&sm->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], tplg);
 		if (err) {
 			soc_control_err(tplg, &mc->hdr, mc->hdr.name);
 			kfree(sm);
@@ -1235,8 +1269,7 @@
 	}
 
 	/* map io handlers */
-	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, io_ops,
-		ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg);
 	if (err) {
 		soc_control_err(tplg, &ec->hdr, ec->hdr.name);
 		goto err_se;
@@ -1274,7 +1307,7 @@
 	struct snd_kcontrol_new *kc;
 	int i, err;
 
-	kc = kzalloc(sizeof(*kc) * count, GFP_KERNEL);
+	kc = kcalloc(count, sizeof(*kc), GFP_KERNEL);
 	if (!kc)
 		return NULL;
 
@@ -1297,7 +1330,6 @@
 			"ASoC: adding bytes kcontrol %s with access 0x%x\n",
 			be->hdr.name, be->hdr.access);
 
-		memset(kc, 0, sizeof(*kc));
 		kc[i].name = be->hdr.name;
 		kc[i].private_value = (long)sbe;
 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -1307,9 +1339,7 @@
 		INIT_LIST_HEAD(&sbe->dobj.list);
 
 		/* map standard io handlers and check for external handlers */
-		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], io_ops,
-				ARRAY_SIZE(io_ops), tplg->io_ops,
-				tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], tplg);
 		if (err) {
 			soc_control_err(tplg, &be->hdr, be->hdr.name);
 			kfree(sbe);
@@ -1737,6 +1767,8 @@
 	tplg.req_index = id;
 	tplg.io_ops = ops->io_ops;
 	tplg.io_ops_count = ops->io_ops_count;
+	tplg.bytes_ext_ops = ops->bytes_ext_ops;
+	tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
 
 	return soc_tplg_load(&tplg);
 }
@@ -1758,7 +1790,6 @@
 	u32 index)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
-	struct snd_soc_dapm_path *p, *next_p;
 
 	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
 
@@ -1770,31 +1801,9 @@
 		if (w->dobj.index != index &&
 			w->dobj.index != SND_SOC_TPLG_INDEX_ALL)
 			continue;
-
-		list_del(&w->list);
-
-		/*
-		 * remove source and sink paths associated to this widget.
-		 * While removing the path, remove reference to it from both
-		 * source and sink widgets so that path is removed only once.
-		 */
-		list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
-			list_del(&p->list_sink);
-			list_del(&p->list_source);
-			list_del(&p->list);
-			kfree(p);
-		}
-		list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
-			list_del(&p->list_sink);
-			list_del(&p->list_source);
-			list_del(&p->list);
-			kfree(p);
-		}
 		/* check and free and dynamic widget kcontrols */
 		snd_soc_tplg_widget_remove(w);
-		kfree(w->kcontrols);
-		kfree(w->name);
-		kfree(w);
+		snd_soc_dapm_free_widget(w);
 	}
 }
 EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index a402860..977a078 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -203,35 +203,25 @@
 	struct spdif_in_dev *host;
 	struct spear_spdif_platform_data *pdata;
 	struct resource *res, *res_fifo;
+	void __iomem *io_base;
 	int ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
+	io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0);
 	if (!res_fifo)
 		return -EINVAL;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				resource_size(res), pdev->name)) {
-		dev_warn(&pdev->dev, "Failed to get memory resourse\n");
-		return -ENOENT;
-	}
-
 	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 	if (!host) {
 		dev_warn(&pdev->dev, "kzalloc fail\n");
 		return -ENOMEM;
 	}
 
-	host->io_base = devm_ioremap(&pdev->dev, res->start,
-				resource_size(res));
-	if (!host->io_base) {
-		dev_warn(&pdev->dev, "ioremap failed\n");
-		return -ENOMEM;
-	}
-
+	host->io_base = io_base;
 	host->irq = platform_get_irq(pdev, 0);
 	if (host->irq < 0)
 		return -EINVAL;
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index a7dc3c5..e8476da 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -44,7 +44,7 @@
 	*config = spear_dmaengine_pcm_config;
 	config->compat_filter_fn = filter;
 
-	return snd_dmaengine_pcm_register(dev, config,
+	return devm_snd_dmaengine_pcm_register(dev, config,
 		SND_DMAENGINE_PCM_FLAG_NO_DT |
 		SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
diff --git a/sound/soc/sti/Kconfig b/sound/soc/sti/Kconfig
new file mode 100644
index 0000000..64a6900
--- /dev/null
+++ b/sound/soc/sti/Kconfig
@@ -0,0 +1,11 @@
+#
+# STM SoC audio configuration
+#
+menuconfig SND_SOC_STI
+	tristate "SoC Audio support for STI System-On-Chip"
+	depends on SND_SOC
+	depends on ARCH_STI || COMPILE_TEST
+	select SND_SOC_GENERIC_DMAENGINE_PCM
+	help
+		Say Y if you want to enable ASoC-support for
+		any of the STI platforms (e.g. STIH416).
diff --git a/sound/soc/sti/Makefile b/sound/soc/sti/Makefile
new file mode 100644
index 0000000..4b188d2
--- /dev/null
+++ b/sound/soc/sti/Makefile
@@ -0,0 +1,4 @@
+# STI platform support
+snd-soc-sti-objs := sti_uniperif.o uniperif_player.o uniperif_reader.o
+
+obj-$(CONFIG_SND_SOC_STI) += snd-soc-sti.o
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
new file mode 100644
index 0000000..39bcefe
--- /dev/null
+++ b/sound/soc/sti/sti_uniperif.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "uniperif.h"
+
+/*
+ * sti_uniperiph_dai_create_ctrl
+ * This function is used to create Ctrl associated to DAI but also pcm device.
+ * Request is done by front end to associate ctrl with pcm device id
+ */
+static int sti_uniperiph_dai_create_ctrl(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	struct snd_kcontrol_new *ctrl;
+	int i;
+
+	if (!uni->num_ctrls)
+		return 0;
+
+	for (i = 0; i < uni->num_ctrls; i++) {
+		/*
+		 * Several Control can have same name. Controls are indexed on
+		 * Uniperipheral instance ID
+		 */
+		ctrl = &uni->snd_ctrls[i];
+		ctrl->index = uni->info->id;
+		ctrl->device = uni->info->id;
+	}
+
+	return snd_soc_add_dai_controls(dai, uni->snd_ctrls, uni->num_ctrls);
+}
+
+/*
+ * DAI
+ */
+int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct snd_dmaengine_dai_dma_data *dma_data;
+	int transfer_size;
+
+	transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
+
+	dma_data = snd_soc_dai_get_dma_data(dai, substream);
+	dma_data->maxburst = transfer_size;
+
+	return 0;
+}
+
+int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+
+	priv->dai_data.uni->daifmt = fmt;
+
+	return 0;
+}
+
+static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int ret;
+
+	/* The uniperipheral should be in stopped state */
+	if (uni->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(uni->dev, "%s: invalid uni state( %d)",
+			__func__, (int)uni->state);
+		return -EBUSY;
+	}
+
+	/* Pinctrl: switch pinstate to sleep */
+	ret = pinctrl_pm_select_sleep_state(uni->dev);
+	if (ret)
+		dev_err(uni->dev, "%s: failed to select pinctrl state",
+			__func__);
+
+	return ret;
+}
+
+static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int ret;
+
+	if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player")) {
+		ret = uni_player_resume(uni);
+		if (ret)
+			return ret;
+	}
+
+	/* pinctrl: switch pinstate to default */
+	ret = pinctrl_pm_select_default_state(uni->dev);
+	if (ret)
+		dev_err(uni->dev, "%s: failed to select pinctrl state",
+			__func__);
+
+	return ret;
+}
+
+static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+
+	/* DMA settings*/
+	if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player"))
+		snd_soc_dai_init_dma_data(dai, &dai_data->dma_data, NULL);
+	else
+		snd_soc_dai_init_dma_data(dai, NULL, &dai_data->dma_data);
+
+	dai_data->dma_data.addr = dai_data->uni->fifo_phys_address;
+	dai_data->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	return sti_uniperiph_dai_create_ctrl(dai);
+}
+
+static const struct snd_soc_dai_driver sti_uniperiph_dai_template = {
+	.probe = sti_uniperiph_dai_probe,
+	.suspend = sti_uniperiph_dai_suspend,
+	.resume = sti_uniperiph_dai_resume
+};
+
+static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
+	.name = "sti_cpu_dai",
+};
+
+static int sti_uniperiph_cpu_dai_of(struct device_node *node,
+				    struct sti_uniperiph_data *priv)
+{
+	const char *str;
+	int ret;
+	struct device *dev = &priv->pdev->dev;
+	struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+	struct snd_soc_dai_driver *dai = priv->dai;
+	struct snd_soc_pcm_stream *stream;
+	struct uniperif *uni;
+
+	uni = devm_kzalloc(dev, sizeof(*uni), GFP_KERNEL);
+	if (!uni)
+		return -ENOMEM;
+
+	*dai = sti_uniperiph_dai_template;
+	ret = of_property_read_string(node, "dai-name", &str);
+	if (ret < 0) {
+		dev_err(dev, "%s: dai name missing.\n", __func__);
+		return -EINVAL;
+	}
+	dai->name = str;
+
+	/* Get resources */
+	uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
+
+	if (!uni->mem_region) {
+		dev_err(dev, "Failed to get memory resource");
+		return -ENODEV;
+	}
+
+	uni->base = devm_ioremap_resource(dev, uni->mem_region);
+
+	if (IS_ERR(uni->base))
+		return PTR_ERR(uni->base);
+
+	uni->fifo_phys_address = uni->mem_region->start +
+				     UNIPERIF_FIFO_DATA_OFFSET(uni);
+
+	uni->irq = platform_get_irq(priv->pdev, 0);
+	if (uni->irq < 0) {
+		dev_err(dev, "Failed to get IRQ resource");
+		return -ENXIO;
+	}
+
+	dai_data->uni = uni;
+
+	if (of_device_is_compatible(node, "st,sti-uni-player")) {
+		uni_player_init(priv->pdev, uni);
+		stream = &dai->playback;
+	} else {
+		uni_reader_init(priv->pdev, uni);
+		stream = &dai->capture;
+	}
+	dai->ops = uni->dai_ops;
+
+	stream->stream_name = dai->name;
+	stream->channels_min = uni->hw->channels_min;
+	stream->channels_max = uni->hw->channels_max;
+	stream->rates = uni->hw->rates;
+	stream->formats = uni->hw->formats;
+
+	return 0;
+}
+
+static const struct snd_dmaengine_pcm_config dmaengine_pcm_config = {
+	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+};
+
+static int sti_uniperiph_probe(struct platform_device *pdev)
+{
+	struct sti_uniperiph_data *priv;
+	struct device_node *node = pdev->dev.of_node;
+	int ret;
+
+	/* Allocate the private data and the CPU_DAI array */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dai = devm_kzalloc(&pdev->dev, sizeof(*priv->dai), GFP_KERNEL);
+	if (!priv->dai)
+		return -ENOMEM;
+
+	priv->pdev = pdev;
+
+	ret = sti_uniperiph_cpu_dai_of(node, priv);
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &sti_uniperiph_dai_component,
+					      priv->dai, 1);
+	if (ret < 0)
+		return ret;
+
+	return devm_snd_dmaengine_pcm_register(&pdev->dev,
+					       &dmaengine_pcm_config, 0);
+}
+
+static const struct of_device_id snd_soc_sti_match[] = {
+	{ .compatible = "st,sti-uni-player", },
+	{ .compatible = "st,sti-uni-reader", },
+	{},
+};
+
+static struct platform_driver sti_uniperiph_driver = {
+	.driver = {
+		.name = "sti-uniperiph-dai",
+		.of_match_table = snd_soc_sti_match,
+	},
+	.probe = sti_uniperiph_probe,
+};
+module_platform_driver(sti_uniperiph_driver);
+
+MODULE_DESCRIPTION("uniperipheral DAI driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
new file mode 100644
index 0000000..f0fd5a9
--- /dev/null
+++ b/sound/soc/sti/uniperif.h
@@ -0,0 +1,1229 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef __SND_ST_AUD_UNIPERIF_H
+#define __SND_ST_AUD_UNIPERIF_H
+
+#include <linux/regmap.h>
+
+#include <sound/dmaengine_pcm.h>
+
+/*
+ * Register access macros
+ */
+
+#define GET_UNIPERIF_REG(ip, offset, shift, mask) \
+	((readl_relaxed(ip->base + offset) >> shift) & mask)
+#define SET_UNIPERIF_REG(ip, offset, shift, mask, value) \
+	writel_relaxed(((readl_relaxed(ip->base + offset) & \
+	~(mask << shift)) | (((value) & mask) << shift)), ip->base + offset)
+#define SET_UNIPERIF_BIT_REG(ip, offset, shift, mask, value) \
+	writel_relaxed((((value) & mask) << shift), ip->base + offset)
+
+/*
+ * AUD_UNIPERIF_SOFT_RST reg
+ */
+
+#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
+#define GET_UNIPERIF_SOFT_RST(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		readl_relaxed(ip->base + UNIPERIF_SOFT_RST_OFFSET(ip)) : 0)
+#define SET_UNIPERIF_SOFT_RST(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_SOFT_RST_OFFSET(ip))
+
+/* SOFT_RST */
+#define UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip) 0x0
+#define UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip) 0x1
+#define SET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
+	SET_UNIPERIF_BIT_REG(ip, \
+		UNIPERIF_SOFT_RST_OFFSET(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip), 1)
+#define GET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_SOFT_RST_OFFSET(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
+
+/*
+ * AUD_UNIPERIF_FIFO_DATA reg
+ */
+
+#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
+#define SET_UNIPERIF_DATA(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
+
+/*
+ * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ */
+
+#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
+#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
+#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_CHANNEL_STA_REGN(ip, n))
+
+#define UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip) 0x0060
+#define GET_UNIPERIF_CHANNEL_STA_REG0(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG0(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip) 0x0064
+#define GET_UNIPERIF_CHANNEL_STA_REG1(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG1(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip) 0x0068
+#define GET_UNIPERIF_CHANNEL_STA_REG2(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG2(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip) 0x006C
+#define GET_UNIPERIF_CHANNEL_STA_REG3(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG3(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip) 0x0070
+#define GET_UNIPERIF_CHANNEL_STA_REG4(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG4(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip) 0x0074
+#define GET_UNIPERIF_CHANNEL_STA_REG5(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG5(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
+
+/*
+ *  AUD_UNIPERIF_ITS reg
+ */
+
+#define UNIPERIF_ITS_OFFSET(ip) 0x000C
+#define GET_UNIPERIF_ITS(ip) \
+	readl_relaxed(ip->base + UNIPERIF_ITS_OFFSET(ip))
+
+/* MEM_BLK_READ */
+#define UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip) 5
+#define UNIPERIF_ITS_MEM_BLK_READ_MASK(ip) \
+	(BIT(UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip)))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITS_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip)))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITS_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITS_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_DMA_ERROR_SHIFT(ip)))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip))))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+
+/*
+ *  AUD_UNIPERIF_ITS_BCLR reg
+ */
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITS_BCLR(ip, \
+		UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip))
+
+#define UNIPERIF_ITS_BCLR_OFFSET(ip) 0x0010
+#define SET_UNIPERIF_ITS_BCLR(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
+
+/*
+ *  AUD_UNIPERIF_ITM reg
+ */
+
+#define UNIPERIF_ITM_OFFSET(ip) 0x0018
+#define GET_UNIPERIF_ITM(ip) \
+	readl_relaxed(ip->base + UNIPERIF_ITM_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip)))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip))))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+
+/*
+ *  AUD_UNIPERIF_ITM_BCLR reg
+ */
+
+#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
+#define SET_UNIPERIF_ITM_BCLR(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITM_BCLR_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITM_BCLR(ip, \
+		UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BCLR_DMA_ERROR(ip) \
+	SET_UNIPERIF_ITM_BCLR(ip, \
+		UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
+
+/*
+ *  AUD_UNIPERIF_ITM_BSET reg
+ */
+
+#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
+#define SET_UNIPERIF_ITM_BSET(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITM_BSET_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip))
+
+/* MEM_BLK_READ */
+#define UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip) 5
+#define UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_DMA_ERROR(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip))))
+#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip))
+
+/*
+ * UNIPERIF_CONFIG reg
+ */
+
+#define UNIPERIF_CONFIG_OFFSET(ip) 0x0040
+#define GET_UNIPERIF_CONFIG(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CONFIG_OFFSET(ip))
+#define SET_UNIPERIF_CONFIG(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CONFIG_OFFSET(ip))
+
+/* PARITY_CNTR */
+#define UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip) 0
+#define UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_PARITY_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 1)
+
+/* CHANNEL_STA_CNTR */
+#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip) 1
+#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip),    \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 1)
+
+/* USER_DAT_CNTR */
+#define UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip) 2
+#define UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_USER_DAT_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 0)
+
+/* VALIDITY_DAT_CNTR */
+#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip) 3
+#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 1)
+
+/* ONE_BIT_AUD_SUPPORT */
+#define UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip) 4
+#define UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_ONE_BIT_AUD(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip))
+#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 1)
+
+/* MEMORY_FMT */
+#define UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip) 5
+#define UNIPERIF_CONFIG_MEM_FMT_MASK(ip) 0x1
+#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) 0
+#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) 1
+#define GET_UNIPERIF_CONFIG_MEM_FMT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_MASK(ip))
+#define SET_UNIPERIF_CONFIG_MEM_FMT(ip, value)	\
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_MASK(ip), value)
+#define SET_UNIPERIF_CONFIG_MEM_FMT_16_0(ip)   \
+	SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
+		VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip))
+#define SET_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) \
+	SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
+		VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip))
+
+/* REPEAT_CHL_STS */
+#define UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip) 6
+#define UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_REPEAT_CHL_STS(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip))
+#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 1)
+
+/* BACK_STALL_REQ */
+#define UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 7 : -1)
+#define UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_BACK_STALL_REQ(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip))
+#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 1)
+
+/* FDMA_TRIGGER_LIMIT */
+#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip) 8
+#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip) 0x7F
+#define GET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip))
+#define SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip), value)
+
+/* CHL_STS_UPDATE */
+#define UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
+#define UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip),  \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip))
+#define SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip), 1)
+
+/* IDLE_MOD */
+#define UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip) 18
+#define UNIPERIF_CONFIG_IDLE_MOD_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_IDLE_MOD(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip))
+#define SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_IDLE_MOD_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 1)
+
+/* SUBFRAME_SELECTION */
+#define UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip) 19
+#define UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_SUBFRAME_SEL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip))
+#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF0_SUBF1(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 0)
+
+/* FULL_SW_CONTROL */
+#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip) 20
+#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_SPDIF_SW_CTRL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip))
+#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 0)
+
+/* MASTER_CLKEDGE */
+#define UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 24 : -1)
+#define UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_MSTR_CLKEDGE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip))
+#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_FALLING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_RISING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 0)
+
+/*
+ * UNIPERIF_CTRL reg
+ */
+
+#define UNIPERIF_CTRL_OFFSET(ip) 0x0044
+#define GET_UNIPERIF_CTRL(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CTRL_OFFSET(ip))
+#define SET_UNIPERIF_CTRL(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CTRL_OFFSET(ip))
+
+/* OPERATION */
+#define UNIPERIF_CTRL_OPERATION_SHIFT(ip) 0
+#define UNIPERIF_CTRL_OPERATION_MASK(ip) 0x7
+#define GET_UNIPERIF_CTRL_OPERATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip) 0
+#define SET_UNIPERIF_CTRL_OPERATION_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 1 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 2 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) 3
+#define SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip))
+/* This is the same as above! */
+#define VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) 3
+#define SET_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) 4
+#define SET_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 5 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 7)
+#define SET_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip))
+
+/* EXIT_STBY_ON_EOBLOCK */
+#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 3)
+#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip))
+#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 1)
+
+/* ROUNDING */
+#define UNIPERIF_CTRL_ROUNDING_SHIFT(ip) 4
+#define UNIPERIF_CTRL_ROUNDING_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_ROUNDING(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip))
+#define SET_UNIPERIF_CTRL_ROUNDING_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_ROUNDING_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip), 1)
+
+/* DIVIDER */
+#define UNIPERIF_CTRL_DIVIDER_SHIFT(ip) 5
+#define UNIPERIF_CTRL_DIVIDER_MASK(ip) 0xff
+#define GET_UNIPERIF_CTRL_DIVIDER(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
+		UNIPERIF_CTRL_DIVIDER_MASK(ip))
+#define SET_UNIPERIF_CTRL_DIVIDER(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
+		UNIPERIF_CTRL_DIVIDER_MASK(ip), value)
+
+/* BYTE_SWAP */
+#define UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 13 : -1)
+#define UNIPERIF_CTRL_BYTE_SWP_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_BYTE_SWP(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip))
+#define SET_UNIPERIF_CTRL_BYTE_SWP_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_BYTE_SWP_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 1)
+
+/* ZERO_STUFFING_HW_SW */
+#define UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 14 : -1)
+#define UNIPERIF_CTRL_ZERO_STUFF_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_ZERO_STUFF(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip))
+#define SET_UNIPERIF_CTRL_ZERO_STUFF_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_ZERO_STUFF_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 0)
+
+/* SPDIF_LAT */
+#define UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
+#define UNIPERIF_CTRL_SPDIF_LAT_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_SPDIF_LAT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip))
+#define SET_UNIPERIF_CTRL_SPDIF_LAT_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 0)
+
+/* EN_SPDIF_FORMATTING */
+#define UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip) 17
+#define UNIPERIF_CTRL_SPDIF_FMT_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_SPDIF_FMT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip))
+#define SET_UNIPERIF_CTRL_SPDIF_FMT_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 0)
+
+/* READER_OUT_SELECT */
+#define UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 18 : -1)
+#define UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_READER_OUT_SEL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip))
+#define SET_UNIPERIF_CTRL_READER_OUT_SEL_IN_MEM(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_READER_OUT_SEL_ON_I2S_LINE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
+
+/* UNDERFLOW_REC_WINDOW */
+#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
+#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip) 0xff
+#define GET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip))
+#define SET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip), value)
+
+/*
+ * UNIPERIF_I2S_FMT a.k.a UNIPERIF_FORMAT reg
+ */
+
+#define UNIPERIF_I2S_FMT_OFFSET(ip) 0x0048
+#define GET_UNIPERIF_I2S_FMT(ip) \
+	readl_relaxed(ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
+#define SET_UNIPERIF_I2S_FMT(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
+
+/* NBIT */
+#define UNIPERIF_I2S_FMT_NBIT_SHIFT(ip) 0
+#define UNIPERIF_I2S_FMT_NBIT_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_NBIT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NBIT_32(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_NBIT_16(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip), 1)
+
+/* DATA_SIZE */
+#define UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip) 1
+#define UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip) 0x7
+#define GET_UNIPERIF_I2S_FMT_DATA_SIZE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_18(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 1)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_20(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 2)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 3)
+#define SET_UNIPERIF_I2S_FMTL_DATA_SIZE_28(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 4)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 5)
+
+/* LR_POL */
+#define UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip) 4
+#define UNIPERIF_I2S_FMT_LR_POL_MASK(ip) 0x1
+#define VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) 0x0
+#define VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_LR_POL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_LR_POL(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_MASK(ip), value)
+#define SET_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) \
+	SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
+		VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip))
+#define SET_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) \
+	SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
+		VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip))
+
+/* SCLK_EDGE */
+#define UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip) 5
+#define UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_SCLK_EDGE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 1)
+
+/* PADDING */
+#define UNIPERIF_I2S_FMT_PADDING_SHIFT(ip) 6
+#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
+#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
+#define VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) 0x0
+#define VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_PADDING(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_PADDING_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_PADDING(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_PADDING_MASK(ip), value)
+#define SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) \
+	SET_UNIPERIF_I2S_FMT_PADDING(ip, \
+		VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip))
+#define SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) \
+	SET_UNIPERIF_I2S_FMT_PADDING(ip, \
+		VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip))
+
+/* ALIGN */
+#define UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip) 7
+#define UNIPERIF_I2S_FMT_ALIGN_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_ALIGN(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 1)
+
+/* ORDER */
+#define UNIPERIF_I2S_FMT_ORDER_SHIFT(ip) 8
+#define UNIPERIF_I2S_FMT_ORDER_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_ORDER(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_ORDER_LSB(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_ORDER_MSB(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip), 1)
+
+/* NUM_CH */
+#define UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip) 9
+#define UNIPERIF_I2S_FMT_NUM_CH_MASK(ip) 0x7
+#define GET_UNIPERIF_I2S_FMT_NUM_CH(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NUM_CH(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_MASK(ip), value)
+
+/* NO_OF_SAMPLES_TO_READ */
+#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip) 12
+#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip) 0xfffff
+#define GET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip), value)
+
+/*
+ * UNIPERIF_BIT_CONTROL reg
+ */
+
+#define UNIPERIF_BIT_CONTROL_OFFSET(ip)  \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0x004c)
+#define GET_UNIPERIF_BIT_CONTROL(ip) \
+	readl_relaxed(ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
+#define SET_UNIPERIF_BIT_CONTROL(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
+
+/* CLR_UNDERFLOW_DURATION */
+#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip) 0
+#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip) 0x1
+#define GET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip))
+#define SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip), 1)
+
+/* CHL_STS_UPDATE */
+#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip) 1
+#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip) 0x1
+#define GET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip))
+#define SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
+	SET_UNIPERIF_BIT_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip), 1)
+
+/*
+ * UNIPERIF_STATUS_1 reg
+ */
+
+#define UNIPERIF_STATUS_1_OFFSET(ip) 0x0050
+#define GET_UNIPERIF_STATUS_1(ip) \
+	readl_relaxed(ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
+#define SET_UNIPERIF_STATUS_1(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
+
+/* UNDERFLOW_DURATION */
+#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
+#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip) 0xff
+#define GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_STATUS_1_OFFSET(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip))
+#define SET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_STATUS_1_OFFSET(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
+
+/*
+ * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ */
+
+#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
+#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
+#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_CHANNEL_STA_REGN(ip, n))
+
+/*
+ * AUD_UNIPERIF_USER_VALIDITY reg
+ */
+
+#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
+#define GET_UNIPERIF_USER_VALIDITY(ip) \
+	readl_relaxed(ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
+#define SET_UNIPERIF_USER_VALIDITY(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
+
+/* VALIDITY_LEFT_AND_RIGHT */
+#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip) 0
+#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip) 0x3
+#define GET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_USER_VALIDITY_OFFSET(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip))
+#define SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_USER_VALIDITY_OFFSET(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip), \
+		value ? 0x3 : 0)
+
+/*
+ * UNIPERIF_DBG_STANDBY_LEFT_SP reg
+ */
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip) 0x0150
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 0xFFFFFF)
+#define GET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip))
+#define SET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
+
+/*
+ * uniperipheral IP capabilities
+ */
+
+#define UNIPERIF_FIFO_SIZE		70 /* FIFO is 70 cells deep */
+#define UNIPERIF_FIFO_FRAMES		4  /* FDMA trigger limit in frames */
+
+/*
+ * Uniperipheral IP revisions
+ */
+enum uniperif_version {
+	SND_ST_UNIPERIF_VERSION_UNKNOWN,
+	/* SASG1 (Orly), Newman */
+	SND_ST_UNIPERIF_VERSION_C6AUD0_UNI_1_0,
+	/* SASC1, SASG2 (Orly2) */
+	SND_ST_UNIPERIF_VERSION_UNI_PLR_1_0,
+	/* SASC1, SASG2 (Orly2), TELSS, Cannes */
+	SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
+	/* TELSS (SASC1) */
+	SND_ST_UNIPERIF_VERSION_TDM_PLR_1_0,
+	/* Cannes/Monaco */
+	SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+};
+
+enum uniperif_type {
+	SND_ST_UNIPERIF_PLAYER_TYPE_NONE,
+	SND_ST_UNIPERIF_PLAYER_TYPE_HDMI,
+	SND_ST_UNIPERIF_PLAYER_TYPE_PCM,
+	SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF
+};
+
+enum uniperif_state {
+	UNIPERIF_STATE_STOPPED,
+	UNIPERIF_STATE_STARTED,
+	UNIPERIF_STATE_STANDBY,
+	UNIPERIF_STATE_UNDERFLOW,
+	UNIPERIF_STATE_OVERFLOW = UNIPERIF_STATE_UNDERFLOW,
+	UNIPERIF_STATE_XRUN
+};
+
+enum uniperif_iec958_encoding_mode {
+	UNIPERIF_IEC958_ENCODING_MODE_PCM,
+	UNIPERIF_IEC958_ENCODING_MODE_ENCODED
+};
+
+struct uniperif_info {
+	int id; /* instance value of the uniperipheral IP */
+	enum uniperif_type player_type;
+	int underflow_enabled;		/* Underflow recovery mode */
+};
+
+struct uniperif_iec958_settings {
+	enum uniperif_iec958_encoding_mode encoding_mode;
+	struct snd_aes_iec958 iec958;
+};
+
+struct uniperif {
+	/* System information */
+	struct uniperif_info *info;
+	struct device *dev;
+	int ver; /* IP version, used by register access macros */
+	struct regmap_field *clk_sel;
+
+	/* capabilities */
+	const struct snd_pcm_hardware *hw;
+
+	/* Resources */
+	struct resource *mem_region;
+	void __iomem *base;
+	unsigned long fifo_phys_address;
+	int irq;
+
+	/* Clocks */
+	struct clk *clk;
+	int mclk;
+	int clk_adj;
+
+	/* Runtime data */
+	enum uniperif_state state;
+
+	struct snd_pcm_substream *substream;
+
+	/* Specific to IEC958 player */
+	struct uniperif_iec958_settings stream_settings;
+	struct mutex ctrl_lock; /* For resource updated by stream and controls*/
+
+	/*alsa ctrl*/
+	struct snd_kcontrol_new *snd_ctrls;
+	int num_ctrls;
+
+	/* dai properties */
+	unsigned int daifmt;
+
+	/* DAI callbacks */
+	const struct snd_soc_dai_ops *dai_ops;
+};
+
+struct sti_uniperiph_dai {
+	int stream;
+	struct uniperif *uni;
+	struct snd_dmaengine_dai_dma_data dma_data;
+};
+
+struct sti_uniperiph_data {
+	struct platform_device *pdev;
+	struct snd_soc_dai_driver *dai;
+	struct sti_uniperiph_dai dai_data;
+};
+
+/* uniperiph player*/
+int uni_player_init(struct platform_device *pdev,
+		    struct uniperif *uni_player);
+int uni_player_resume(struct uniperif *player);
+
+/* uniperiph reader */
+int uni_reader_init(struct platform_device *pdev,
+		    struct uniperif *uni_reader);
+
+/* common */
+int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai,
+			      unsigned int fmt);
+
+int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai);
+
+#endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
new file mode 100644
index 0000000..f6eefe1
--- /dev/null
+++ b/sound/soc/sti/uniperif_player.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/asoundef.h>
+#include <sound/soc.h>
+
+#include "uniperif.h"
+
+/*
+ * Some hardware-related definitions
+ */
+
+/* sys config registers definitions */
+#define SYS_CFG_AUDIO_GLUE 0xA4
+#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
+
+/*
+ * Driver specific types.
+ */
+#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
+#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
+#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
+#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
+	(UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
+		UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
+
+#define UNIPERIF_PLAYER_CLK_ADJ_MIN  -999999
+#define UNIPERIF_PLAYER_CLK_ADJ_MAX  1000000
+
+/*
+ * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
+ * integrate  DAI_CPU capability in term of rate and supported channels
+ */
+static const struct snd_pcm_hardware uni_player_pcm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 192000,
+
+	.channels_min = 2,
+	.channels_max = 8,
+
+	.periods_min = 2,
+	.periods_max = 48,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
+static inline int reset_player(struct uniperif *player)
+{
+	int count = 10;
+
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		while (GET_UNIPERIF_SOFT_RST_SOFT_RST(player) && count) {
+			udelay(5);
+			count--;
+		}
+	}
+
+	if (!count) {
+		dev_err(player->dev, "Failed to reset uniperif");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * uni_player_irq_handler
+ * In case of error audio stream is stopped; stop action is protected via PCM
+ * stream lock to avoid race condition with trigger callback.
+ */
+static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct uniperif *player = dev_id;
+	unsigned int status;
+	unsigned int tmp;
+
+	if (player->state == UNIPERIF_STATE_STOPPED) {
+		/* Unexpected IRQ: do nothing */
+		return IRQ_NONE;
+	}
+
+	/* Get interrupt status & clear them immediately */
+	status = GET_UNIPERIF_ITS(player);
+	SET_UNIPERIF_ITS_BCLR(player, status);
+
+	/* Check for fifo error (underrun) */
+	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
+		dev_err(player->dev, "FIFO underflow error detected");
+
+		/* Interrupt is just for information when underflow recovery */
+		if (player->info->underflow_enabled) {
+			/* Update state to underflow */
+			player->state = UNIPERIF_STATE_UNDERFLOW;
+
+		} else {
+			/* Disable interrupt so doesn't continually fire */
+			SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
+
+			/* Stop the player */
+			snd_pcm_stream_lock(player->substream);
+			snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+			snd_pcm_stream_unlock(player->substream);
+		}
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check for dma error (overrun) */
+	if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
+		dev_err(player->dev, "DMA error detected");
+
+		/* Disable interrupt so doesn't continually fire */
+		SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
+
+		/* Stop the player */
+		snd_pcm_stream_lock(player->substream);
+		snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(player->substream);
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check for underflow recovery done */
+	if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
+		if (!player->info->underflow_enabled) {
+			dev_err(player->dev, "unexpected Underflow recovering");
+			return -EPERM;
+		}
+		/* Read the underflow recovery duration */
+		tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
+
+		/* Clear the underflow recovery duration */
+		SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
+
+		/* Update state to started */
+		player->state = UNIPERIF_STATE_STARTED;
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check if underflow recovery failed */
+	if (unlikely(status &
+		     UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
+		dev_err(player->dev, "Underflow recovery failed");
+
+		/* Stop the player */
+		snd_pcm_stream_lock(player->substream);
+		snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(player->substream);
+
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int uni_player_clk_set_rate(struct uniperif *player, unsigned long rate)
+{
+	int rate_adjusted, rate_achieved, delta, ret;
+	int adjustment = player->clk_adj;
+
+	/*
+	 *             a
+	 * F = f + --------- * f = f + d
+	 *          1000000
+	 *
+	 *         a
+	 * d = --------- * f
+	 *      1000000
+	 *
+	 * where:
+	 *   f - nominal rate
+	 *   a - adjustment in ppm (parts per milion)
+	 *   F - rate to be set in synthesizer
+	 *   d - delta (difference) between f and F
+	 */
+	if (adjustment < 0) {
+		/* div64_64 operates on unsigned values... */
+		delta = -1;
+		adjustment = -adjustment;
+	} else {
+		delta = 1;
+	}
+	/* 500000 ppm is 0.5, which is used to round up values */
+	delta *= (int)div64_u64((uint64_t)rate *
+				(uint64_t)adjustment + 500000, 1000000);
+	rate_adjusted = rate + delta;
+
+	/* Adjusted rate should never be == 0 */
+	if (!rate_adjusted)
+		return -EINVAL;
+
+	ret = clk_set_rate(player->clk, rate_adjusted);
+	if (ret < 0)
+		return ret;
+
+	rate_achieved = clk_get_rate(player->clk);
+	if (!rate_achieved)
+		/* If value is 0 means that clock or parent not valid */
+		return -EINVAL;
+
+	/*
+	 * Using ALSA's adjustment control, we can modify the rate to be up
+	 * to twice as much as requested, but no more
+	 */
+	delta = rate_achieved - rate;
+	if (delta < 0) {
+		/* div64_64 operates on unsigned values... */
+		delta = -delta;
+		adjustment = -1;
+	} else {
+		adjustment = 1;
+	}
+	/* Frequency/2 is added to round up result */
+	adjustment *= (int)div64_u64((uint64_t)delta * 1000000 + rate / 2,
+				     rate);
+	player->clk_adj = adjustment;
+	return 0;
+}
+
+static void uni_player_set_channel_status(struct uniperif *player,
+					  struct snd_pcm_runtime *runtime)
+{
+	int n;
+	unsigned int status;
+
+	/*
+	 * Some AVRs and TVs require the channel status to contain a correct
+	 * sampling frequency. If no sample rate is already specified, then
+	 * set one.
+	 */
+	mutex_lock(&player->ctrl_lock);
+	if (runtime && (player->stream_settings.iec958.status[3]
+					== IEC958_AES3_CON_FS_NOTID)) {
+		switch (runtime->rate) {
+		case 22050:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_22050;
+			break;
+		case 44100:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_44100;
+			break;
+		case 88200:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_88200;
+			break;
+		case 176400:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_176400;
+			break;
+		case 24000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_24000;
+			break;
+		case 48000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_48000;
+			break;
+		case 96000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_96000;
+			break;
+		case 192000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_192000;
+			break;
+		case 32000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_32000;
+			break;
+		default:
+			/* Mark as sampling frequency not indicated */
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_NOTID;
+			break;
+		}
+	}
+
+	/* Audio mode:
+	 * Use audio mode status to select PCM or encoded mode
+	 */
+	if (player->stream_settings.iec958.status[0] & IEC958_AES0_NONAUDIO)
+		player->stream_settings.encoding_mode =
+			UNIPERIF_IEC958_ENCODING_MODE_ENCODED;
+	else
+		player->stream_settings.encoding_mode =
+			UNIPERIF_IEC958_ENCODING_MODE_PCM;
+
+	if (player->stream_settings.encoding_mode ==
+		UNIPERIF_IEC958_ENCODING_MODE_PCM)
+		/* Clear user validity bits */
+		SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
+	else
+		/* Set user validity bits */
+		SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 1);
+
+	/* Program the new channel status */
+	for (n = 0; n < 6; ++n) {
+		status  =
+		player->stream_settings.iec958.status[0 + (n * 4)] & 0xf;
+		status |=
+		player->stream_settings.iec958.status[1 + (n * 4)] << 8;
+		status |=
+		player->stream_settings.iec958.status[2 + (n * 4)] << 16;
+		status |=
+		player->stream_settings.iec958.status[3 + (n * 4)] << 24;
+		SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
+	}
+	mutex_unlock(&player->ctrl_lock);
+
+	/* Update the channel status */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
+	else
+		SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
+}
+
+static int uni_player_prepare_iec958(struct uniperif *player,
+				     struct snd_pcm_runtime *runtime)
+{
+	int clk_div;
+
+	clk_div = player->mclk / runtime->rate;
+
+	/* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
+	if ((clk_div % 128) || (clk_div <= 0)) {
+		dev_err(player->dev, "%s: invalid clk_div %d",
+			__func__, clk_div);
+		return -EINVAL;
+	}
+
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* 16/16 memory format */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
+		/* 16-bits per sub-frame */
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		/* Set 16-bit sample precision */
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/* 16/0 memory format */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+		/* 32-bits per sub-frame */
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		/* Set 24-bit sample precision */
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
+		break;
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	/* Set parity to be calculated by the hardware */
+	SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(player);
+
+	/* Set channel status bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(player);
+
+	/* Set user data bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(player);
+
+	/* Set validity bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(player);
+
+	/* Set full software control to disabled */
+	SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(player);
+
+	SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
+
+	/* Update the channel status */
+	uni_player_set_channel_status(player, runtime);
+
+	/* Clear the user validity user bits */
+	SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
+
+	/* Disable one-bit audio mode */
+	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
+
+	/* Enable consecutive frames repetition of Z preamble (not for HBRA) */
+	SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(player);
+
+	/* Change to SUF0_SUBF1 and left/right channels swap! */
+	SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(player);
+
+	/* Set data output as MSB first */
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+
+	if (player->stream_settings.encoding_mode ==
+				UNIPERIF_IEC958_ENCODING_MODE_ENCODED)
+		SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(player);
+	else
+		SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(player);
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
+
+	/* Set rounding to off */
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+
+	/* Set clock divisor */
+	SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / 128);
+
+	/* Set the spdif latency to not wait before starting player */
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+
+	/*
+	 * Ensure iec958 formatting is off. It will be enabled in function
+	 * uni_player_start() at the same time as the operation
+	 * mode is set to work around a silicon issue.
+	 */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
+	else
+		SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+
+	return 0;
+}
+
+static int uni_player_prepare_pcm(struct uniperif *player,
+				  struct snd_pcm_runtime *runtime)
+{
+	int output_frame_size, slot_width, clk_div;
+
+	/* Force slot width to 32 in I2S mode (HW constraint) */
+	if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+		SND_SOC_DAIFMT_I2S) {
+		slot_width = 32;
+	} else {
+		switch (runtime->format) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			slot_width = 16;
+			break;
+		default:
+			slot_width = 32;
+			break;
+		}
+	}
+	output_frame_size = slot_width * runtime->channels;
+
+	clk_div = player->mclk / runtime->rate;
+	/*
+	 * For 32 bits subframe clk_div must be a multiple of 128,
+	 * for 16 bits must be a multiple of 64
+	 */
+	if ((slot_width == 32) && (clk_div % 128)) {
+		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		return -EINVAL;
+	}
+
+	if ((slot_width == 16) && (clk_div % 64)) {
+		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Number of bits per subframe (which is one channel sample)
+	 * on output - Transfer 16 or 32 bits from FIFO
+	 */
+	switch (slot_width) {
+	case 32:
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
+		break;
+	case 16:
+		SET_UNIPERIF_I2S_FMT_NBIT_16(player);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
+		break;
+	default:
+		dev_err(player->dev, "subframe format not supported");
+		return -EINVAL;
+	}
+
+	/* Configure data memory format */
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* One data word contains two samples */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/*
+		 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
+		 * on the left than zeros (if less than 32 bytes)"... ;-)
+		 */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+		break;
+
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	/* Set rounding to off */
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+
+	/* Set clock divisor */
+	SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / (2 * output_frame_size));
+
+	/* Number of channelsmust be even*/
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(player->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
+
+	/* Set 1-bit audio format to disabled */
+	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+
+	/* No iec958 formatting as outputting to DAC  */
+	SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
+
+	return 0;
+}
+
+/*
+ * ALSA uniperipheral iec958 controls
+ */
+static int  uni_player_ctl_iec958_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+
+	return 0;
+}
+
+static int uni_player_ctl_iec958_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
+
+	mutex_lock(&player->ctrl_lock);
+	ucontrol->value.iec958.status[0] = iec958->status[0];
+	ucontrol->value.iec958.status[1] = iec958->status[1];
+	ucontrol->value.iec958.status[2] = iec958->status[2];
+	ucontrol->value.iec958.status[3] = iec958->status[3];
+	mutex_unlock(&player->ctrl_lock);
+	return 0;
+}
+
+static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_aes_iec958 *iec958 =  &player->stream_settings.iec958;
+
+	mutex_lock(&player->ctrl_lock);
+	iec958->status[0] = ucontrol->value.iec958.status[0];
+	iec958->status[1] = ucontrol->value.iec958.status[1];
+	iec958->status[2] = ucontrol->value.iec958.status[2];
+	iec958->status[3] = ucontrol->value.iec958.status[3];
+	mutex_unlock(&player->ctrl_lock);
+
+	uni_player_set_channel_status(player, NULL);
+
+	return 0;
+}
+
+static struct snd_kcontrol_new uni_player_iec958_ctl = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+	.info = uni_player_ctl_iec958_info,
+	.get = uni_player_ctl_iec958_get,
+	.put = uni_player_ctl_iec958_put,
+};
+
+/*
+ * uniperif rate adjustement control
+ */
+static int snd_sti_clk_adjustment_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = UNIPERIF_PLAYER_CLK_ADJ_MIN;
+	uinfo->value.integer.max = UNIPERIF_PLAYER_CLK_ADJ_MAX;
+	uinfo->value.integer.step = 1;
+
+	return 0;
+}
+
+static int snd_sti_clk_adjustment_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	mutex_lock(&player->ctrl_lock);
+	ucontrol->value.integer.value[0] = player->clk_adj;
+	mutex_unlock(&player->ctrl_lock);
+
+	return 0;
+}
+
+static int snd_sti_clk_adjustment_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	int ret = 0;
+
+	if ((ucontrol->value.integer.value[0] < UNIPERIF_PLAYER_CLK_ADJ_MIN) ||
+	    (ucontrol->value.integer.value[0] > UNIPERIF_PLAYER_CLK_ADJ_MAX))
+		return -EINVAL;
+
+	mutex_lock(&player->ctrl_lock);
+	player->clk_adj = ucontrol->value.integer.value[0];
+
+	if (player->mclk)
+		ret = uni_player_clk_set_rate(player, player->mclk);
+	mutex_unlock(&player->ctrl_lock);
+
+	return ret;
+}
+
+static struct snd_kcontrol_new uni_player_clk_adj_ctl = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = "PCM Playback Oversampling Freq. Adjustment",
+	.info = snd_sti_clk_adjustment_info,
+	.get = snd_sti_clk_adjustment_get,
+	.put = snd_sti_clk_adjustment_put,
+};
+
+static struct snd_kcontrol_new *snd_sti_pcm_ctl[] = {
+	&uni_player_clk_adj_ctl,
+};
+
+static struct snd_kcontrol_new *snd_sti_iec_ctl[] = {
+	&uni_player_iec958_ctl,
+	&uni_player_clk_adj_ctl,
+};
+
+static int uni_player_startup(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	player->clk_adj = 0;
+
+	return 0;
+}
+
+static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+				 unsigned int freq, int dir)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	int ret;
+
+	if (dir == SND_SOC_CLOCK_IN)
+		return 0;
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	mutex_lock(&player->ctrl_lock);
+	ret = uni_player_clk_set_rate(player, freq);
+	if (!ret)
+		player->mclk = freq;
+	mutex_unlock(&player->ctrl_lock);
+
+	return ret;
+}
+
+static int uni_player_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit;
+	int ret;
+
+	/* The player should be stopped */
+	if (player->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state %d", __func__,
+			player->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	} else {
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+	}
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
+		dev_err(player->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
+
+	/* Uniperipheral setup depends on player type */
+	switch (player->info->player_type) {
+	case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI:
+		ret = uni_player_prepare_iec958(player, runtime);
+		break;
+	case SND_ST_UNIPERIF_PLAYER_TYPE_PCM:
+		ret = uni_player_prepare_pcm(player, runtime);
+		break;
+	case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF:
+		ret = uni_player_prepare_iec958(player, runtime);
+		break;
+	default:
+		dev_err(player->dev, "invalid player type");
+		return -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	switch (player->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+		break;
+	}
+
+	switch (player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(player);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
+		break;
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
+
+	/* Reset uniperipheral player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	return reset_player(player);
+}
+
+static int uni_player_start(struct uniperif *player)
+{
+	int ret;
+
+	/* The player should be stopped */
+	if (player->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state", __func__);
+		return -EINVAL;
+	}
+
+	ret = clk_prepare_enable(player->clk);
+	if (ret) {
+		dev_err(player->dev, "%s: Failed to enable clock", __func__);
+		return ret;
+	}
+
+	/* Clear any pending interrupts */
+	SET_UNIPERIF_ITS_BCLR(player, GET_UNIPERIF_ITS(player));
+
+	/* Set the interrupt mask */
+	SET_UNIPERIF_ITM_BSET_DMA_ERROR(player);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(player);
+
+	/* Enable underflow recovery interrupts */
+	if (player->info->underflow_enabled) {
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(player);
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
+	}
+
+	/* Reset uniperipheral player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	ret = reset_player(player);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Does not use IEC61937 features of the uniperipheral hardware.
+	 * Instead it performs IEC61937 in software and inserts it directly
+	 * into the audio data stream. As such, when encoded mode is selected,
+	 * linear pcm mode is still used, but with the differences of the
+	 * channel status bits set for encoded mode and the validity bits set.
+	 */
+	SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(player);
+
+	/*
+	 * If iec958 formatting is required for hdmi or spdif, then it must be
+	 * enabled after the operation mode is set. If set prior to this, it
+	 * will not take affect and hang the player.
+	 */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player))
+				SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+
+	/* Force channel status update (no update if clk disable) */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
+	else
+		SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
+
+	/* Update state to started */
+	player->state = UNIPERIF_STATE_STARTED;
+
+	return 0;
+}
+
+static int uni_player_stop(struct uniperif *player)
+{
+	int ret;
+
+	/* The player should not be in stopped state */
+	if (player->state == UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state", __func__);
+		return -EINVAL;
+	}
+
+	/* Turn the player off */
+	SET_UNIPERIF_CTRL_OPERATION_OFF(player);
+
+	/* Soft reset the player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	ret = reset_player(player);
+	if (ret < 0)
+		return ret;
+
+	/* Disable interrupts */
+	SET_UNIPERIF_ITM_BCLR(player, GET_UNIPERIF_ITM(player));
+
+	/* Disable clock */
+	clk_disable_unprepare(player->clk);
+
+	/* Update state to stopped and return */
+	player->state = UNIPERIF_STATE_STOPPED;
+
+	return 0;
+}
+
+int uni_player_resume(struct uniperif *player)
+{
+	int ret;
+
+	/* Select the frequency synthesizer clock */
+	if (player->clk_sel) {
+		ret = regmap_field_write(player->clk_sel, 1);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: Failed to select freq synth clock",
+				__func__);
+			return ret;
+		}
+	}
+
+	SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_player_resume);
+
+static int uni_player_trigger(struct snd_pcm_substream *substream,
+			      int cmd, struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		return uni_player_start(player);
+	case SNDRV_PCM_TRIGGER_STOP:
+		return uni_player_stop(player);
+	case SNDRV_PCM_TRIGGER_RESUME:
+		return uni_player_resume(player);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void uni_player_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	if (player->state != UNIPERIF_STATE_STOPPED)
+		/* Stop the player */
+		uni_player_stop(player);
+}
+
+static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
+					struct uniperif *player)
+{
+	int bit_offset;
+	struct device_node *node = pdev->dev.of_node;
+	struct regmap *regmap;
+
+	bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id;
+
+	regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
+
+	if (regmap) {
+		struct reg_field regfield =
+			REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
+
+		player->clk_sel = regmap_field_alloc(regmap, regfield);
+	} else {
+		dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int uni_player_parse_dt(struct platform_device *pdev,
+			       struct uniperif *player)
+{
+	struct uniperif_info *info;
+	struct device *dev = &pdev->dev;
+	struct device_node *pnode = pdev->dev.of_node;
+	const char *mode;
+
+	/* Allocate memory for the info structure */
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	of_property_read_u32(pnode, "version", &player->ver);
+	if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
+		dev_err(dev, "Unknown uniperipheral version ");
+		return -EINVAL;
+	}
+	/* Underflow recovery is only supported on later ip revisions */
+	if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		info->underflow_enabled = 1;
+
+	of_property_read_u32(pnode, "uniperiph-id", &info->id);
+
+	/* Read the device mode property */
+	of_property_read_string(pnode, "mode", &mode);
+
+	if (strcasecmp(mode, "hdmi") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
+	else if (strcasecmp(mode, "pcm") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM;
+	else if (strcasecmp(mode, "spdif") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF;
+	else
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE;
+
+	/* Save the info structure */
+	player->info = info;
+
+	/* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */
+	if (uni_player_parse_dt_clk_glue(pdev, player))
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops uni_player_dai_ops = {
+		.startup = uni_player_startup,
+		.shutdown = uni_player_shutdown,
+		.prepare = uni_player_prepare,
+		.trigger = uni_player_trigger,
+		.hw_params = sti_uniperiph_dai_hw_params,
+		.set_fmt = sti_uniperiph_dai_set_fmt,
+		.set_sysclk = uni_player_set_sysclk
+};
+
+int uni_player_init(struct platform_device *pdev,
+		    struct uniperif *player)
+{
+	int ret = 0;
+
+	player->dev = &pdev->dev;
+	player->state = UNIPERIF_STATE_STOPPED;
+	player->hw = &uni_player_pcm_hw;
+	player->dai_ops = &uni_player_dai_ops;
+
+	ret = uni_player_parse_dt(pdev, player);
+
+	if (ret < 0) {
+		dev_err(player->dev, "Failed to parse DeviceTree");
+		return ret;
+	}
+
+	/* Get uniperif resource */
+	player->clk = of_clk_get(pdev->dev.of_node, 0);
+	if (IS_ERR(player->clk))
+		ret = PTR_ERR(player->clk);
+
+	/* Select the frequency synthesizer clock */
+	if (player->clk_sel) {
+		ret = regmap_field_write(player->clk_sel, 1);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: Failed to select freq synth clock",
+				__func__);
+			return ret;
+		}
+	}
+
+	ret = devm_request_irq(&pdev->dev, player->irq,
+			       uni_player_irq_handler, IRQF_SHARED,
+			       dev_name(&pdev->dev), player);
+	if (ret < 0)
+		return ret;
+
+	mutex_init(&player->ctrl_lock);
+
+	/* Ensure that disabled by default */
+	SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
+
+	if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) {
+		/* Set default iec958 status bits  */
+
+		/* Consumer, PCM, copyright, 2ch, mode 0 */
+		player->stream_settings.iec958.status[0] = 0x00;
+		/* Broadcast reception category */
+		player->stream_settings.iec958.status[1] =
+					IEC958_AES1_CON_GENERAL;
+		/* Do not take into account source or channel number */
+		player->stream_settings.iec958.status[2] =
+					IEC958_AES2_CON_SOURCE_UNSPEC;
+		/* Sampling frequency not indicated */
+		player->stream_settings.iec958.status[3] =
+					IEC958_AES3_CON_FS_NOTID;
+		/* Max sample word 24-bit, sample word length not indicated */
+		player->stream_settings.iec958.status[4] =
+					IEC958_AES4_CON_MAX_WORDLEN_24 |
+					IEC958_AES4_CON_WORDLEN_24_20;
+
+		player->num_ctrls = ARRAY_SIZE(snd_sti_iec_ctl);
+		player->snd_ctrls = snd_sti_iec_ctl[0];
+	} else {
+		player->num_ctrls = ARRAY_SIZE(snd_sti_pcm_ctl);
+		player->snd_ctrls = snd_sti_pcm_ctl[0];
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_player_init);
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
new file mode 100644
index 0000000..c502626
--- /dev/null
+++ b/sound/soc/sti/uniperif_reader.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <sound/soc.h>
+
+#include "uniperif.h"
+
+/*
+ * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
+ * integrate unireader capability in term of rate and supported channels
+ */
+static const struct snd_pcm_hardware uni_reader_pcm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 96000,
+
+	.channels_min = 2,
+	.channels_max = 8,
+
+	.periods_min = 2,
+	.periods_max = 48,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
+/*
+ * uni_reader_irq_handler
+ * In case of error audio stream is stopped; stop action is protected via PCM
+ * stream lock  to avoid race condition with trigger callback.
+ */
+static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct uniperif *reader = dev_id;
+	unsigned int status;
+
+	if (reader->state == UNIPERIF_STATE_STOPPED) {
+		/* Unexpected IRQ: do nothing */
+		dev_warn(reader->dev, "unexpected IRQ ");
+		return IRQ_HANDLED;
+	}
+
+	/* Get interrupt status & clear them immediately */
+	status = GET_UNIPERIF_ITS(reader);
+	SET_UNIPERIF_ITS_BCLR(reader, status);
+
+	/* Check for fifo overflow error */
+	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
+		dev_err(reader->dev, "FIFO error detected");
+
+		snd_pcm_stream_lock(reader->substream);
+		snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(reader->substream);
+
+		return IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int uni_reader_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit;
+	int slot_width;
+	int count = 10;
+
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+			reader->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	else
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) ||
+	    (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
+		dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
+
+	switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_IB_IF:
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+		break;
+	default:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+	}
+
+	/* Force slot width to 32 in I2S mode */
+	if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
+		== SND_SOC_DAIFMT_I2S) {
+		slot_width = 32;
+	} else {
+		switch (runtime->format) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			slot_width = 16;
+			break;
+		default:
+			slot_width = 32;
+			break;
+		}
+	}
+
+	/* Number of bits per subframe (i.e one channel sample) on input. */
+	switch (slot_width) {
+	case 32:
+		SET_UNIPERIF_I2S_FMT_NBIT_32(reader);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
+		break;
+	case 16:
+		SET_UNIPERIF_I2S_FMT_NBIT_16(reader);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
+		break;
+	default:
+		dev_err(reader->dev, "subframe format not supported");
+		return -EINVAL;
+	}
+
+	/* Configure data memory format */
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* One data word contains two samples */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(reader);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/*
+		 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
+		 * on the MSB then zeros (if less than 32 bytes)"...
+		 */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
+		break;
+
+	default:
+		dev_err(reader->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(reader);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
+		break;
+	default:
+		dev_err(reader->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+
+	/* Data clocking (changing) on the rising edge */
+	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+
+	/* Number of channels must be even */
+
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
+
+	/* Clear any pending interrupts */
+	SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
+
+	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(reader, 0);
+
+	/* Set the interrupt mask */
+	SET_UNIPERIF_ITM_BSET_DMA_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(reader);
+
+	/* Enable underflow recovery interrupts */
+	if (reader->info->underflow_enabled) {
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(reader);
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(reader);
+	}
+
+	/* Reset uniperipheral reader */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(reader);
+
+	while (GET_UNIPERIF_SOFT_RST_SOFT_RST(reader)) {
+		udelay(5);
+		count--;
+	}
+	if (!count) {
+		dev_err(reader->dev, "Failed to reset uniperif");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int uni_reader_start(struct uniperif *reader)
+{
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		return -EINVAL;
+	}
+
+	/* Enable reader interrupts (and clear possible stalled ones) */
+	SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
+
+	/* Launch the reader */
+	SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(reader);
+
+	/* Update state to started */
+	reader->state = UNIPERIF_STATE_STARTED;
+	return 0;
+}
+
+static int uni_reader_stop(struct uniperif *reader)
+{
+	/* The reader should not be in stopped state */
+	if (reader->state == UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		return -EINVAL;
+	}
+
+	/* Turn the reader off */
+	SET_UNIPERIF_CTRL_OPERATION_OFF(reader);
+
+	/* Disable interrupts */
+	SET_UNIPERIF_ITM_BCLR(reader, GET_UNIPERIF_ITM(reader));
+
+	/* Update state to stopped and return */
+	reader->state = UNIPERIF_STATE_STOPPED;
+
+	return 0;
+}
+
+static int  uni_reader_trigger(struct snd_pcm_substream *substream,
+			       int cmd, struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		return  uni_reader_start(reader);
+	case SNDRV_PCM_TRIGGER_STOP:
+		return  uni_reader_stop(reader);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void uni_reader_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		/* Stop the reader */
+		uni_reader_stop(reader);
+	}
+}
+
+static int uni_reader_parse_dt(struct platform_device *pdev,
+			       struct uniperif *reader)
+{
+	struct uniperif_info *info;
+	struct device_node *node = pdev->dev.of_node;
+
+	/* Allocate memory for the info structure */
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	of_property_read_u32(node, "version", &reader->ver);
+
+	/* Save the info structure */
+	reader->info = info;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+		.shutdown = uni_reader_shutdown,
+		.prepare = uni_reader_prepare,
+		.trigger = uni_reader_trigger,
+		.hw_params = sti_uniperiph_dai_hw_params,
+		.set_fmt = sti_uniperiph_dai_set_fmt,
+};
+
+int uni_reader_init(struct platform_device *pdev,
+		    struct uniperif *reader)
+{
+	int ret = 0;
+
+	reader->dev = &pdev->dev;
+	reader->state = UNIPERIF_STATE_STOPPED;
+	reader->hw = &uni_reader_pcm_hw;
+	reader->dai_ops = &uni_reader_dai_ops;
+
+	dev_err(reader->dev, "%s: enter\n", __func__);
+	ret = uni_reader_parse_dt(pdev, reader);
+	if (ret < 0) {
+		dev_err(reader->dev, "Failed to parse DeviceTree");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, reader->irq,
+			       uni_reader_irq_handler, IRQF_SHARED,
+			       dev_name(&pdev->dev), reader);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request IRQ");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_reader_init);
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index f52600b..89add13 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -133,7 +133,7 @@
 
 static int tegra20_das_probe(struct platform_device *pdev)
 {
-	struct resource *res, *region;
+	struct resource *res;
 	void __iomem *regs;
 	int ret = 0;
 
@@ -149,24 +149,9 @@
 	das->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err;
-	}
-
-	region = devm_request_mem_region(&pdev->dev, res->start,
-					 resource_size(res), pdev->name);
-	if (!region) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err;
-	}
-
-	regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err;
 	}
 
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 05f1c6e..14106fa 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -339,7 +339,7 @@
 static int tegra20_i2s_platform_probe(struct platform_device *pdev)
 {
 	struct tegra20_i2s *i2s;
-	struct resource *mem, *memregion;
+	struct resource *mem;
 	void __iomem *regs;
 	int ret;
 
@@ -362,24 +362,9 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err_clk_put;
 	}
 
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 9141477..a0c3640 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -265,7 +265,7 @@
 static int tegra20_spdif_platform_probe(struct platform_device *pdev)
 {
 	struct tegra20_spdif *spdif;
-	struct resource *mem, *memregion, *dmareq;
+	struct resource *mem, *dmareq;
 	void __iomem *regs;
 	int ret;
 
@@ -273,45 +273,26 @@
 			     GFP_KERNEL);
 	if (!spdif) {
 		dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	dev_set_drvdata(&pdev->dev, spdif);
 
-	spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
+	spdif->clk_spdif_out = devm_clk_get(&pdev->dev, "spdif_out");
 	if (IS_ERR(spdif->clk_spdif_out)) {
 		pr_err("Can't retrieve spdif clock\n");
 		ret = PTR_ERR(spdif->clk_spdif_out);
-		goto err;
+		return ret;
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!dmareq) {
 		dev_err(&pdev->dev, "No DMA resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put;
+		return -ENODEV;
 	}
 
 	spdif->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
@@ -319,7 +300,7 @@
 	if (IS_ERR(spdif->regmap)) {
 		dev_err(&pdev->dev, "regmap init failed\n");
 		ret = PTR_ERR(spdif->regmap);
-		goto err_clk_put;
+		return ret;
 	}
 
 	spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
@@ -335,7 +316,7 @@
 	}
 
 	ret = snd_soc_register_component(&pdev->dev, &tegra20_spdif_component,
-				   &tegra20_spdif_dai, 1);
+					 &tegra20_spdif_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
 		ret = -ENOMEM;
@@ -357,16 +338,12 @@
 		tegra20_spdif_runtime_suspend(&pdev->dev);
 err_pm_disable:
 	pm_runtime_disable(&pdev->dev);
-err_clk_put:
-	clk_put(spdif->clk_spdif_out);
-err:
+
 	return ret;
 }
 
 static int tegra20_spdif_platform_remove(struct platform_device *pdev)
 {
-	struct tegra20_spdif *spdif = dev_get_drvdata(&pdev->dev);
-
 	pm_runtime_disable(&pdev->dev);
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		tegra20_spdif_runtime_suspend(&pdev->dev);
@@ -374,8 +351,6 @@
 	tegra_pcm_platform_unregister(&pdev->dev);
 	snd_soc_unregister_component(&pdev->dev);
 
-	clk_put(spdif->clk_spdif_out);
-
 	return 0;
 }
 
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index bc94e5d..fef3b9a 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -521,7 +521,7 @@
 	const struct tegra30_ahub_soc_data *soc_data;
 	struct reset_control *rst;
 	int i;
-	struct resource *res0, *res1, *region;
+	struct resource *res0, *res1;
 	void __iomem *regs_apbif, *regs_ahub;
 	int ret = 0;
 
@@ -549,103 +549,67 @@
 			dev_err(&pdev->dev, "Can't get reset %s\n",
 				configlink_mods[i].rst_name);
 			ret = PTR_ERR(rst);
-			goto err;
+			return ret;
 		}
 
 		ret = reset_control_deassert(rst);
 		reset_control_put(rst);
 		if (ret)
-			goto err;
+			return ret;
 	}
 
 	ahub = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_ahub),
 			    GFP_KERNEL);
 	if (!ahub) {
 		dev_err(&pdev->dev, "Can't allocate tegra30_ahub\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	dev_set_drvdata(&pdev->dev, ahub);
 
 	ahub->soc_data = soc_data;
 	ahub->dev = &pdev->dev;
 
-	ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio");
+	ahub->clk_d_audio = devm_clk_get(&pdev->dev, "d_audio");
 	if (IS_ERR(ahub->clk_d_audio)) {
 		dev_err(&pdev->dev, "Can't retrieve ahub d_audio clock\n");
 		ret = PTR_ERR(ahub->clk_d_audio);
-		goto err;
+		return ret;
 	}
 
-	ahub->clk_apbif = clk_get(&pdev->dev, "apbif");
+	ahub->clk_apbif = devm_clk_get(&pdev->dev, "apbif");
 	if (IS_ERR(ahub->clk_apbif)) {
 		dev_err(&pdev->dev, "Can't retrieve ahub apbif clock\n");
 		ret = PTR_ERR(ahub->clk_apbif);
-		goto err_clk_put_d_audio;
+		return ret;
 	}
 
 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res0) {
-		dev_err(&pdev->dev, "No apbif memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put_apbif;
-	}
+	regs_apbif = devm_ioremap_resource(&pdev->dev, res0);
+	if (IS_ERR(regs_apbif))
+		return PTR_ERR(regs_apbif);
 
-	region = devm_request_mem_region(&pdev->dev, res0->start,
-					 resource_size(res0), DRV_NAME);
-	if (!region) {
-		dev_err(&pdev->dev, "request region apbif failed\n");
-		ret = -EBUSY;
-		goto err_clk_put_apbif;
-	}
 	ahub->apbif_addr = res0->start;
 
-	regs_apbif = devm_ioremap(&pdev->dev, res0->start,
-				  resource_size(res0));
-	if (!regs_apbif) {
-		dev_err(&pdev->dev, "ioremap apbif failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put_apbif;
-	}
-
 	ahub->regmap_apbif = devm_regmap_init_mmio(&pdev->dev, regs_apbif,
 					&tegra30_ahub_apbif_regmap_config);
 	if (IS_ERR(ahub->regmap_apbif)) {
 		dev_err(&pdev->dev, "apbif regmap init failed\n");
 		ret = PTR_ERR(ahub->regmap_apbif);
-		goto err_clk_put_apbif;
+		return ret;
 	}
 	regcache_cache_only(ahub->regmap_apbif, true);
 
 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!res1) {
-		dev_err(&pdev->dev, "No ahub memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put_apbif;
-	}
-
-	region = devm_request_mem_region(&pdev->dev, res1->start,
-					 resource_size(res1), DRV_NAME);
-	if (!region) {
-		dev_err(&pdev->dev, "request region ahub failed\n");
-		ret = -EBUSY;
-		goto err_clk_put_apbif;
-	}
-
-	regs_ahub = devm_ioremap(&pdev->dev, res1->start,
-				 resource_size(res1));
-	if (!regs_ahub) {
-		dev_err(&pdev->dev, "ioremap ahub failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put_apbif;
-	}
+	regs_ahub = devm_ioremap_resource(&pdev->dev, res1);
+	if (IS_ERR(regs_ahub))
+		return PTR_ERR(regs_ahub);
 
 	ahub->regmap_ahub = devm_regmap_init_mmio(&pdev->dev, regs_ahub,
 					&tegra30_ahub_ahub_regmap_config);
 	if (IS_ERR(ahub->regmap_ahub)) {
 		dev_err(&pdev->dev, "ahub regmap init failed\n");
 		ret = PTR_ERR(ahub->regmap_ahub);
-		goto err_clk_put_apbif;
+		return ret;
 	}
 	regcache_cache_only(ahub->regmap_ahub, true);
 
@@ -662,12 +626,7 @@
 
 err_pm_disable:
 	pm_runtime_disable(&pdev->dev);
-err_clk_put_apbif:
-	clk_put(ahub->clk_apbif);
-err_clk_put_d_audio:
-	clk_put(ahub->clk_d_audio);
-	ahub = NULL;
-err:
+
 	return ret;
 }
 
@@ -680,11 +639,6 @@
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		tegra30_ahub_runtime_suspend(&pdev->dev);
 
-	clk_put(ahub->clk_apbif);
-	clk_put(ahub->clk_d_audio);
-
-	ahub = NULL;
-
 	return 0;
 }
 
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index fe36375..8e55583 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -379,7 +379,7 @@
 	struct tegra30_i2s *i2s;
 	const struct of_device_id *match;
 	u32 cif_ids[2];
-	struct resource *mem, *memregion;
+	struct resource *mem;
 	void __iomem *regs;
 	int ret;
 
@@ -419,24 +419,9 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err_clk_put;
 	}
 
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 88eacfd..a8f705b 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -411,13 +411,8 @@
 
 static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &txx9aclc_soc_platform);
-}
-
-static int txx9aclc_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &txx9aclc_soc_platform);
 }
 
 static struct platform_driver txx9aclc_pcm_driver = {
@@ -426,7 +421,6 @@
 	},
 
 	.probe = txx9aclc_soc_platform_probe,
-	.remove = txx9aclc_soc_platform_remove,
 };
 
 module_platform_driver(txx9aclc_pcm_driver);
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 978f2d7..f5df08d 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -773,20 +773,22 @@
 	}
 	prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)pdev->name, 50);
 
-	drvdata->pclk = clk_get(&pdev->dev, "apb_pclk");
+	drvdata->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
 	if (IS_ERR(drvdata->pclk)) {
 		ret = (int)PTR_ERR(drvdata->pclk);
-		dev_err(&pdev->dev, "%s: ERROR: clk_get of pclk failed (%d)!\n",
+		dev_err(&pdev->dev,
+			"%s: ERROR: devm_clk_get of pclk failed (%d)!\n",
 			__func__, ret);
-		goto err_pclk;
+		return ret;
 	}
 
-	drvdata->clk = clk_get(&pdev->dev, NULL);
+	drvdata->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(drvdata->clk)) {
 		ret = (int)PTR_ERR(drvdata->clk);
-		dev_err(&pdev->dev, "%s: ERROR: clk_get failed (%d)!\n",
+		dev_err(&pdev->dev,
+			"%s: ERROR: devm_clk_get failed (%d)!\n",
 			__func__, ret);
-		goto err_clk;
+		return ret;
 	}
 
 	ret = ux500_msp_i2s_init_msp(pdev, &drvdata->msp,
@@ -795,7 +797,7 @@
 		dev_err(&pdev->dev,
 			"%s: ERROR: Failed to init MSP-struct (%d)!",
 			__func__, ret);
-		goto err_init_msp;
+		return ret;
 	}
 	dev_set_drvdata(&pdev->dev, drvdata);
 
@@ -804,7 +806,7 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Error: %s: Failed to register MSP%d!\n",
 			__func__, drvdata->msp->id);
-		goto err_init_msp;
+		return ret;
 	}
 
 	ret = ux500_pcm_register_platform(pdev);
@@ -819,13 +821,6 @@
 
 err_reg_plat:
 	snd_soc_unregister_component(&pdev->dev);
-err_init_msp:
-	clk_put(drvdata->clk);
-err_clk:
-	clk_put(drvdata->pclk);
-err_pclk:
-	devm_regulator_put(drvdata->reg_vape);
-
 	return ret;
 }
 
@@ -837,12 +832,8 @@
 
 	snd_soc_unregister_component(&pdev->dev);
 
-	devm_regulator_put(drvdata->reg_vape);
 	prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s");
 
-	clk_put(drvdata->clk);
-	clk_put(drvdata->pclk);
-
 	ux500_msp_i2s_cleanup_msp(pdev, drvdata->msp);
 
 	return 0;
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index 1cfb19e..8382ffa 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -75,7 +75,7 @@
 	 * stream in the pcm_close callback it synchronizes with the interrupt
 	 * handler by means of synchronize_rcu call.
 	 */
-	struct snd_pcm_substream *tx_substream;
+	struct snd_pcm_substream __rcu *tx_substream;
 	unsigned (*tx_fn)(struct xtfpga_i2s *i2s,
 			  struct snd_pcm_runtime *runtime,
 			  unsigned tx_ptr);
@@ -474,11 +474,6 @@
 						     card->dev, size, size);
 }
 
-static void xtfpga_pcm_free(struct snd_pcm *pcm)
-{
-	snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
 static const struct snd_pcm_ops xtfpga_pcm_ops = {
 	.open		= xtfpga_pcm_open,
 	.close		= xtfpga_pcm_close,
@@ -490,7 +485,6 @@
 
 static const struct snd_soc_platform_driver xtfpga_soc_platform = {
 	.pcm_new	= xtfpga_pcm_new,
-	.pcm_free	= xtfpga_pcm_free,
 	.ops		= &xtfpga_pcm_ops,
 };
 
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 1930c42..1cad93d 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -380,7 +380,7 @@
 	struct zx_i2s_info *zx_i2s;
 	int ret;
 
-	zx_i2s =  kzalloc(sizeof(*zx_i2s), GFP_KERNEL);
+	zx_i2s = devm_kzalloc(&pdev->dev, sizeof(*zx_i2s), GFP_KERNEL);
 	if (!zx_i2s)
 		return -ENOMEM;
 
@@ -401,8 +401,8 @@
 	writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
 	platform_set_drvdata(pdev, zx_i2s);
 
-	ret = snd_soc_register_component(&pdev->dev, &zx_i2s_component,
-					 &zx_i2s_dai, 1);
+	ret = devm_snd_soc_register_component(&pdev->dev, &zx_i2s_component,
+					      &zx_i2s_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Register DAI failed: %d\n", ret);
 		return ret;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 0450593..18f5664 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -365,13 +365,15 @@
 	}
 
 	mutex_init(&chip->mutex);
-	init_rwsem(&chip->shutdown_rwsem);
+	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
 	chip->card = card;
 	chip->setup = device_setup[idx];
 	chip->autoclock = autoclock;
-	chip->probing = 1;
+	atomic_set(&chip->active, 1); /* avoid autopm during probing */
+	atomic_set(&chip->usage_count, 0);
+	atomic_set(&chip->shutdown, 0);
 
 	chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
 			      le16_to_cpu(dev->descriptor.idProduct));
@@ -495,13 +497,13 @@
 	mutex_lock(&register_mutex);
 	for (i = 0; i < SNDRV_CARDS; i++) {
 		if (usb_chip[i] && usb_chip[i]->dev == dev) {
-			if (usb_chip[i]->shutdown) {
+			if (atomic_read(&usb_chip[i]->shutdown)) {
 				dev_err(&dev->dev, "USB device is in the shutdown state, cannot create a card instance\n");
 				err = -EIO;
 				goto __error;
 			}
 			chip = usb_chip[i];
-			chip->probing = 1;
+			atomic_inc(&chip->active); /* avoid autopm */
 			break;
 		}
 	}
@@ -561,8 +563,8 @@
 
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
-	chip->probing = 0;
 	usb_set_intfdata(intf, chip);
+	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
 
@@ -570,7 +572,7 @@
 	if (chip) {
 		if (!chip->num_interfaces)
 			snd_card_free(chip->card);
-		chip->probing = 0;
+		atomic_dec(&chip->active);
 	}
 	mutex_unlock(&register_mutex);
 	return err;
@@ -585,23 +587,23 @@
 	struct snd_usb_audio *chip = usb_get_intfdata(intf);
 	struct snd_card *card;
 	struct list_head *p;
-	bool was_shutdown;
 
 	if (chip == (void *)-1L)
 		return;
 
 	card = chip->card;
-	down_write(&chip->shutdown_rwsem);
-	was_shutdown = chip->shutdown;
-	chip->shutdown = 1;
-	up_write(&chip->shutdown_rwsem);
 
 	mutex_lock(&register_mutex);
-	if (!was_shutdown) {
+	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
 		struct snd_usb_endpoint *ep;
 		struct usb_mixer_interface *mixer;
 
+		/* wait until all pending tasks done;
+		 * they are protected by snd_usb_lock_shutdown()
+		 */
+		wait_event(chip->shutdown_wait,
+			   !atomic_read(&chip->usage_count));
 		snd_card_disconnect(card);
 		/* release the pcm resources */
 		list_for_each_entry(as, &chip->pcm_list, list) {
@@ -631,28 +633,50 @@
 	}
 }
 
+/* lock the shutdown (disconnect) task and autoresume */
+int snd_usb_lock_shutdown(struct snd_usb_audio *chip)
+{
+	int err;
+
+	atomic_inc(&chip->usage_count);
+	if (atomic_read(&chip->shutdown)) {
+		err = -EIO;
+		goto error;
+	}
+	err = snd_usb_autoresume(chip);
+	if (err < 0)
+		goto error;
+	return 0;
+
+ error:
+	if (atomic_dec_and_test(&chip->usage_count))
+		wake_up(&chip->shutdown_wait);
+	return err;
+}
+
+/* autosuspend and unlock the shutdown */
+void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
+{
+	snd_usb_autosuspend(chip);
+	if (atomic_dec_and_test(&chip->usage_count))
+		wake_up(&chip->shutdown_wait);
+}
+
 #ifdef CONFIG_PM
 
 int snd_usb_autoresume(struct snd_usb_audio *chip)
 {
-	int err = -ENODEV;
-
-	down_read(&chip->shutdown_rwsem);
-	if (chip->probing || chip->in_pm)
-		err = 0;
-	else if (!chip->shutdown)
-		err = usb_autopm_get_interface(chip->pm_intf);
-	up_read(&chip->shutdown_rwsem);
-
-	return err;
+	if (atomic_read(&chip->shutdown))
+		return -EIO;
+	if (atomic_inc_return(&chip->active) == 1)
+		return usb_autopm_get_interface(chip->pm_intf);
+	return 0;
 }
 
 void snd_usb_autosuspend(struct snd_usb_audio *chip)
 {
-	down_read(&chip->shutdown_rwsem);
-	if (!chip->shutdown && !chip->probing && !chip->in_pm)
+	if (atomic_dec_and_test(&chip->active))
 		usb_autopm_put_interface(chip->pm_intf);
-	up_read(&chip->shutdown_rwsem);
 }
 
 static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
@@ -665,30 +689,20 @@
 	if (chip == (void *)-1L)
 		return 0;
 
-	if (!PMSG_IS_AUTO(message)) {
+	chip->autosuspended = !!PMSG_IS_AUTO(message);
+	if (!chip->autosuspended)
 		snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
-		if (!chip->num_suspended_intf++) {
-			list_for_each_entry(as, &chip->pcm_list, list) {
-				snd_pcm_suspend_all(as->pcm);
-				as->substream[0].need_setup_ep =
-					as->substream[1].need_setup_ep = true;
-			}
-			list_for_each(p, &chip->midi_list) {
-				snd_usbmidi_suspend(p);
-			}
+	if (!chip->num_suspended_intf++) {
+		list_for_each_entry(as, &chip->pcm_list, list) {
+			snd_pcm_suspend_all(as->pcm);
+			as->substream[0].need_setup_ep =
+				as->substream[1].need_setup_ep = true;
 		}
-	} else {
-		/*
-		 * otherwise we keep the rest of the system in the dark
-		 * to keep this transparent
-		 */
-		if (!chip->num_suspended_intf++)
-			chip->autosuspended = 1;
-	}
-
-	if (chip->num_suspended_intf == 1)
+		list_for_each(p, &chip->midi_list)
+			snd_usbmidi_suspend(p);
 		list_for_each_entry(mixer, &chip->mixer_list, list)
 			snd_usb_mixer_suspend(mixer);
+	}
 
 	return 0;
 }
@@ -705,7 +719,7 @@
 	if (--chip->num_suspended_intf)
 		return 0;
 
-	chip->in_pm = 1;
+	atomic_inc(&chip->active); /* avoid autopm */
 	/*
 	 * ALSA leaves material resumption to user space
 	 * we just notify and restart the mixers
@@ -725,7 +739,7 @@
 	chip->autosuspended = 0;
 
 err_out:
-	chip->in_pm = 0;
+	atomic_dec(&chip->active); /* allow autopm after this point */
 	return err;
 }
 
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 03b0744..e6f7189 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -355,8 +355,10 @@
 	if (unlikely(urb->status == -ENOENT ||		/* unlinked */
 		     urb->status == -ENODEV ||		/* device removed */
 		     urb->status == -ECONNRESET ||	/* unlinked */
-		     urb->status == -ESHUTDOWN ||	/* device disabled */
-		     ep->chip->shutdown))		/* device disconnected */
+		     urb->status == -ESHUTDOWN))	/* device disabled */
+		goto exit_clear;
+	/* device disconnected */
+	if (unlikely(atomic_read(&ep->chip->shutdown)))
 		goto exit_clear;
 
 	if (usb_pipeout(ep->pipe)) {
@@ -529,7 +531,7 @@
 {
 	unsigned int i;
 
-	if (!force && ep->chip->shutdown) /* to be sure... */
+	if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
 		return -EBADFD;
 
 	clear_bit(EP_FLAG_RUNNING, &ep->flags);
@@ -868,7 +870,7 @@
 	int err;
 	unsigned int i;
 
-	if (ep->chip->shutdown)
+	if (atomic_read(&ep->chip->shutdown))
 		return -EBADFD;
 
 	/* already running? */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 6b3acba..f494dce 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -282,6 +282,21 @@
 	return val;
 }
 
+static int uac2_ctl_value_size(int val_type)
+{
+	switch (val_type) {
+	case USB_MIXER_S32:
+	case USB_MIXER_U32:
+		return 4;
+	case USB_MIXER_S16:
+	case USB_MIXER_U16:
+		return 2;
+	default:
+		return 1;
+	}
+	return 0; /* unreachable */
+}
+
 
 /*
  * retrieve a mixer value
@@ -296,14 +311,11 @@
 	int timeout = 10;
 	int idx = 0, err;
 
-	err = snd_usb_autoresume(chip);
+	err = snd_usb_lock_shutdown(chip);
 	if (err < 0)
 		return -EIO;
 
-	down_read(&chip->shutdown_rwsem);
 	while (timeout-- > 0) {
-		if (chip->shutdown)
-			break;
 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
 		if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
 				    USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
@@ -319,8 +331,7 @@
 	err = -EINVAL;
 
  out:
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -328,14 +339,14 @@
 			    int validx, int *value_ret)
 {
 	struct snd_usb_audio *chip = cval->head.mixer->chip;
-	unsigned char buf[2 + 3 * sizeof(__u16)]; /* enough space for one range */
+	unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
 	unsigned char *val;
 	int idx = 0, ret, size;
 	__u8 bRequest;
 
 	if (request == UAC_GET_CUR) {
 		bRequest = UAC2_CS_CUR;
-		size = sizeof(__u16);
+		size = uac2_ctl_value_size(cval->val_type);
 	} else {
 		bRequest = UAC2_CS_RANGE;
 		size = sizeof(buf);
@@ -343,21 +354,15 @@
 
 	memset(buf, 0, sizeof(buf));
 
-	ret = snd_usb_autoresume(chip) ? -EIO : 0;
+	ret = snd_usb_lock_shutdown(chip) ? -EIO : 0;
 	if (ret)
 		goto error;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		ret = -ENODEV;
-	} else {
-		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
-		ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+	idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+	ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
 			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
 			      validx, idx, buf, size);
-	}
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 
 	if (ret < 0) {
 error:
@@ -446,7 +451,7 @@
 				int request, int validx, int value_set)
 {
 	struct snd_usb_audio *chip = cval->head.mixer->chip;
-	unsigned char buf[2];
+	unsigned char buf[4];
 	int idx = 0, val_len, err, timeout = 10;
 
 	validx += cval->idx_off;
@@ -454,8 +459,7 @@
 	if (cval->head.mixer->protocol == UAC_VERSION_1) {
 		val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
 	} else { /* UAC_VERSION_2 */
-		/* audio class v2 controls are always 2 bytes in size */
-		val_len = sizeof(__u16);
+		val_len = uac2_ctl_value_size(cval->val_type);
 
 		/* FIXME */
 		if (request != UAC_SET_CUR) {
@@ -469,13 +473,14 @@
 	value_set = convert_bytes_value(cval, value_set);
 	buf[0] = value_set & 0xff;
 	buf[1] = (value_set >> 8) & 0xff;
-	err = snd_usb_autoresume(chip);
+	buf[2] = (value_set >> 16) & 0xff;
+	buf[3] = (value_set >> 24) & 0xff;
+
+	err = snd_usb_lock_shutdown(chip);
 	if (err < 0)
 		return -EIO;
-	down_read(&chip->shutdown_rwsem);
+
 	while (timeout-- > 0) {
-		if (chip->shutdown)
-			break;
 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
 		if (snd_usb_ctl_msg(chip->dev,
 				    usb_sndctrlpipe(chip->dev, 0), request,
@@ -490,8 +495,7 @@
 	err = -EINVAL;
 
  out:
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -715,15 +719,21 @@
 				term->name = d->iTerminal;
 			} else { /* UAC_VERSION_2 */
 				struct uac2_input_terminal_descriptor *d = p1;
+
+				/* call recursively to verify that the
+				 * referenced clock entity is valid */
+				err = check_input_term(state, d->bCSourceID, term);
+				if (err < 0)
+					return err;
+
+				/* save input term properties after recursion,
+				 * to ensure they are not overriden by the
+				 * recursion calls */
+				term->id = id;
 				term->type = le16_to_cpu(d->wTerminalType);
 				term->channels = d->bNrChannels;
 				term->chconfig = le32_to_cpu(d->bmChannelConfig);
 				term->name = d->iTerminal;
-
-				/* call recursively to get the clock selectors */
-				err = check_input_term(state, d->bCSourceID, term);
-				if (err < 0)
-					return err;
 			}
 			return 0;
 		case UAC_FEATURE_UNIT: {
@@ -798,24 +808,25 @@
 /* feature unit control information */
 struct usb_feature_control_info {
 	const char *name;
-	unsigned int type;	/* control type (mute, volume, etc.) */
+	int type;	/* data type for uac1 */
+	int type_uac2;	/* data type for uac2 if different from uac1, else -1 */
 };
 
 static struct usb_feature_control_info audio_feature_info[] = {
-	{ "Mute",			USB_MIXER_INV_BOOLEAN },
-	{ "Volume",			USB_MIXER_S16 },
-	{ "Tone Control - Bass",	USB_MIXER_S8 },
-	{ "Tone Control - Mid",		USB_MIXER_S8 },
-	{ "Tone Control - Treble",	USB_MIXER_S8 },
-	{ "Graphic Equalizer",		USB_MIXER_S8 }, /* FIXME: not implemeted yet */
-	{ "Auto Gain Control",		USB_MIXER_BOOLEAN },
-	{ "Delay Control",		USB_MIXER_U16 }, /* FIXME: U32 in UAC2 */
-	{ "Bass Boost",			USB_MIXER_BOOLEAN },
-	{ "Loudness",			USB_MIXER_BOOLEAN },
+	{ "Mute",			USB_MIXER_INV_BOOLEAN, -1 },
+	{ "Volume",			USB_MIXER_S16, -1 },
+	{ "Tone Control - Bass",	USB_MIXER_S8, -1 },
+	{ "Tone Control - Mid",		USB_MIXER_S8, -1 },
+	{ "Tone Control - Treble",	USB_MIXER_S8, -1 },
+	{ "Graphic Equalizer",		USB_MIXER_S8, -1 }, /* FIXME: not implemeted yet */
+	{ "Auto Gain Control",		USB_MIXER_BOOLEAN, -1 },
+	{ "Delay Control",		USB_MIXER_U16, USB_MIXER_U32 },
+	{ "Bass Boost",			USB_MIXER_BOOLEAN, -1 },
+	{ "Loudness",			USB_MIXER_BOOLEAN, -1 },
 	/* UAC2 specific */
-	{ "Input Gain Control",		USB_MIXER_S16 },
-	{ "Input Gain Pad Control",	USB_MIXER_S16 },
-	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN },
+	{ "Input Gain Control",		USB_MIXER_S16, -1 },
+	{ "Input Gain Pad Control",	USB_MIXER_S16, -1 },
+	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN, -1 },
 };
 
 /* private_free callback */
@@ -1215,6 +1226,7 @@
 			      int readonly_mask)
 {
 	struct uac_feature_unit_descriptor *desc = raw_desc;
+	struct usb_feature_control_info *ctl_info;
 	unsigned int len = 0;
 	int mapped_name = 0;
 	int nameid = uac_feature_unit_iFeature(desc);
@@ -1240,7 +1252,13 @@
 	snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
 	cval->control = control;
 	cval->cmask = ctl_mask;
-	cval->val_type = audio_feature_info[control-1].type;
+	ctl_info = &audio_feature_info[control-1];
+	if (state->mixer->protocol == UAC_VERSION_1)
+		cval->val_type = ctl_info->type;
+	else /* UAC_VERSION_2 */
+		cval->val_type = ctl_info->type_uac2 >= 0 ?
+			ctl_info->type_uac2 : ctl_info->type;
+
 	if (ctl_mask == 0) {
 		cval->channels = 1;	/* master channel */
 		cval->master_readonly = readonly_mask;
@@ -2522,7 +2540,7 @@
 		for (c = 0; c < MAX_CHANNELS; c++) {
 			if (!(cval->cmask & (1 << c)))
 				continue;
-			if (cval->cached & (1 << c)) {
+			if (cval->cached & (1 << (c + 1))) {
 				err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
 							cval->cache_val[idx]);
 				if (err < 0)
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index d3268f0..3417ef3 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -33,6 +33,8 @@
 	USB_MIXER_U8,
 	USB_MIXER_S16,
 	USB_MIXER_U16,
+	USB_MIXER_S32,
+	USB_MIXER_U32,
 };
 
 typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 337c317..d3608c0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -308,11 +308,10 @@
 	struct snd_usb_audio *chip = mixer->chip;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto out;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+
 	if (chip->usb_id == USB_ID(0x041e, 0x3042))
 		err = snd_usb_ctl_msg(chip->dev,
 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
@@ -329,8 +328,7 @@
 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      value, index + 2, NULL, 0);
- out:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -441,16 +439,15 @@
 
 	for (i = 0; jacks[i].name; ++i) {
 		snd_iprintf(buffer, "%s: ", jacks[i].name);
-		down_read(&mixer->chip->shutdown_rwsem);
-		if (mixer->chip->shutdown)
-			err = 0;
-		else
-			err = snd_usb_ctl_msg(mixer->chip->dev,
+		err = snd_usb_lock_shutdown(mixer->chip);
+		if (err < 0)
+			return;
+		err = snd_usb_ctl_msg(mixer->chip->dev,
 				      usb_rcvctrlpipe(mixer->chip->dev, 0),
 				      UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
 				      USB_RECIP_INTERFACE, 0,
 				      jacks[i].unitid << 8, buf, 3);
-		up_read(&mixer->chip->shutdown_rwsem);
+		snd_usb_unlock_shutdown(mixer->chip);
 		if (err == 3 && (buf[0] == 3 || buf[0] == 6))
 			snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
 		else
@@ -481,11 +478,9 @@
 	int err;
 	unsigned char buf[2];
 
-	down_read(&chip->shutdown_rwsem);
-	if (mixer->chip->shutdown) {
-		err = -ENODEV;
-		goto out;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	buf[0] = 0x01;
 	buf[1] = value ? 0x02 : 0x01;
@@ -493,8 +488,7 @@
 		      usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
 		      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
 		      0x0400, 0x0e00, buf, 2);
- out:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -554,15 +548,14 @@
 	struct snd_usb_audio *chip = mixer->chip;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = snd_usb_ctl_msg(chip->dev,
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = snd_usb_ctl_msg(chip->dev,
 			      usb_sndctrlpipe(chip->dev, 0), 0x08,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      50, 0, &status, 1);
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -623,11 +616,9 @@
 	int err;
 	unsigned char buff[3];
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto err;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	/* Prepare for magic command to toggle clock source */
 	err = snd_usb_ctl_msg(chip->dev,
@@ -683,7 +674,7 @@
 		goto err;
 
 err:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -778,15 +769,14 @@
 	unsigned int pval = list->kctl->private_value;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
-				      (pval >> 16) & 0xff,
-				      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
-				      pval >> 24, pval & 0xffff, NULL, 0, 1000);
-	up_read(&chip->shutdown_rwsem);
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
+			      (pval >> 16) & 0xff,
+			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+			      pval >> 24, pval & 0xffff, NULL, 0, 1000);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -944,18 +934,17 @@
 	value[0] = pval >> 24;
 	value[1] = 0;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = snd_usb_ctl_msg(chip->dev,
-				      usb_sndctrlpipe(chip->dev, 0),
-				      UAC_SET_CUR,
-				      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
-				      pval & 0xff00,
-				      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
-				      value, 2);
-	up_read(&chip->shutdown_rwsem);
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = snd_usb_ctl_msg(chip->dev,
+			      usb_sndctrlpipe(chip->dev, 0),
+			      UAC_SET_CUR,
+			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+			      pval & 0xff00,
+			      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
+			      value, 2);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1519,11 +1508,9 @@
 	unsigned char data[3];
 	int rate;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	ucontrol->value.iec958.status[0] = kcontrol->private_value & 0xff;
 	ucontrol->value.iec958.status[1] = (kcontrol->private_value >> 8) & 0xff;
@@ -1551,7 +1538,7 @@
 
 	err = 0;
  end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1562,11 +1549,9 @@
 	u8 reg;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	reg = ((pval >> 4) & 0xf0) | (pval & 0x0f);
 	err = snd_usb_ctl_msg(chip->dev,
@@ -1594,7 +1579,7 @@
 		goto end;
 
  end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1650,11 +1635,9 @@
 	u8 reg = list->kctl->private_value;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	err = snd_usb_ctl_msg(chip->dev,
 			usb_sndctrlpipe(chip->dev, 0),
@@ -1665,8 +1648,7 @@
 			NULL,
 			0);
 
- end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b4ef410..cdac517 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -80,7 +80,7 @@
 	unsigned int hwptr_done;
 
 	subs = (struct snd_usb_substream *)substream->runtime->private_data;
-	if (subs->stream->chip->shutdown)
+	if (atomic_read(&subs->stream->chip->shutdown))
 		return SNDRV_PCM_POS_XRUN;
 	spin_lock(&subs->lock);
 	hwptr_done = subs->hwptr_done;
@@ -391,6 +391,20 @@
 	 */
 	attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
 
+	if ((is_playback && (attr != USB_ENDPOINT_SYNC_ASYNC)) ||
+		(!is_playback && (attr != USB_ENDPOINT_SYNC_ADAPTIVE))) {
+
+		/*
+		 * In these modes the notion of sync_endpoint is irrelevant.
+		 * Reset pointers to avoid using stale data from previously
+		 * used settings, e.g. when configuration and endpoints were
+		 * changed
+		 */
+
+		subs->sync_endpoint = NULL;
+		subs->data_endpoint->sync_master = NULL;
+	}
+
 	err = set_sync_ep_implicit_fb_quirk(subs, dev, altsd, attr);
 	if (err < 0)
 		return err;
@@ -398,10 +412,17 @@
 	if (altsd->bNumEndpoints < 2)
 		return 0;
 
-	if ((is_playback && attr != USB_ENDPOINT_SYNC_ASYNC) ||
+	if ((is_playback && (attr == USB_ENDPOINT_SYNC_SYNC ||
+			     attr == USB_ENDPOINT_SYNC_ADAPTIVE)) ||
 	    (!is_playback && attr != USB_ENDPOINT_SYNC_ADAPTIVE))
 		return 0;
 
+	/*
+	 * In case of illegal SYNC_NONE for OUT endpoint, we keep going to see
+	 * if we don't find a sync endpoint, as on M-Audio Transit. In case of
+	 * error fall back to SYNC mode and don't create sync endpoint
+	 */
+
 	/* check sync-pipe endpoint */
 	/* ... and check descriptor size before accessing bSynchAddress
 	   because there is a version of the SB Audigy 2 NX firmware lacking
@@ -415,6 +436,8 @@
 			   get_endpoint(alts, 1)->bmAttributes,
 			   get_endpoint(alts, 1)->bLength,
 			   get_endpoint(alts, 1)->bSynchAddress);
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
 	}
 	ep = get_endpoint(alts, 1)->bEndpointAddress;
@@ -425,6 +448,8 @@
 			"%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
 			   fmt->iface, fmt->altsetting,
 			   is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
 	}
 
@@ -436,8 +461,11 @@
 						   implicit_fb ?
 							SND_USB_ENDPOINT_TYPE_DATA :
 							SND_USB_ENDPOINT_TYPE_SYNC);
-	if (!subs->sync_endpoint)
+	if (!subs->sync_endpoint) {
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
+	}
 
 	subs->data_endpoint->sync_master = subs->sync_endpoint;
 
@@ -707,12 +735,11 @@
 		return -EINVAL;
 	}
 
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (subs->stream->chip->shutdown)
-		ret = -ENODEV;
-	else
-		ret = set_format(subs, fmt);
-	up_read(&subs->stream->chip->shutdown_rwsem);
+	ret = snd_usb_lock_shutdown(subs->stream->chip);
+	if (ret < 0)
+		return ret;
+	ret = set_format(subs, fmt);
+	snd_usb_unlock_shutdown(subs->stream->chip);
 	if (ret < 0)
 		return ret;
 
@@ -735,13 +762,12 @@
 	subs->cur_audiofmt = NULL;
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (!subs->stream->chip->shutdown) {
+	if (!snd_usb_lock_shutdown(subs->stream->chip)) {
 		stop_endpoints(subs, true);
 		snd_usb_endpoint_deactivate(subs->sync_endpoint);
 		snd_usb_endpoint_deactivate(subs->data_endpoint);
+		snd_usb_unlock_shutdown(subs->stream->chip);
 	}
-	up_read(&subs->stream->chip->shutdown_rwsem);
 	return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
@@ -763,11 +789,9 @@
 		return -ENXIO;
 	}
 
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (subs->stream->chip->shutdown) {
-		ret = -ENODEV;
-		goto unlock;
-	}
+	ret = snd_usb_lock_shutdown(subs->stream->chip);
+	if (ret < 0)
+		return ret;
 	if (snd_BUG_ON(!subs->data_endpoint)) {
 		ret = -EIO;
 		goto unlock;
@@ -816,7 +840,7 @@
 		ret = start_endpoints(subs, true);
 
  unlock:
-	up_read(&subs->stream->chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(subs->stream->chip);
 	return ret;
 }
 
@@ -1218,9 +1242,11 @@
 
 	stop_endpoints(subs, true);
 
-	if (!as->chip->shutdown && subs->interface >= 0) {
+	if (subs->interface >= 0 &&
+	    !snd_usb_lock_shutdown(subs->stream->chip)) {
 		usb_set_interface(subs->dev, subs->interface, 0);
 		subs->interface = -1;
+		snd_usb_unlock_shutdown(subs->stream->chip);
 	}
 
 	subs->pcm_substream = NULL;
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 5f761ab..0ac89e2 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -46,14 +46,14 @@
 static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
 {
 	struct snd_usb_audio *chip = entry->private_data;
-	if (!chip->shutdown)
+	if (!atomic_read(&chip->shutdown))
 		snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
 }
 
 static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
 {
 	struct snd_usb_audio *chip = entry->private_data;
-	if (!chip->shutdown)
+	if (!atomic_read(&chip->shutdown))
 		snd_iprintf(buffer, "%04x:%04x\n", 
 			    USB_ID_VENDOR(chip->usb_id),
 			    USB_ID_PRODUCT(chip->usb_id));
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 91d0380..33a1764 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -37,11 +37,11 @@
 	struct usb_interface *pm_intf;
 	u32 usb_id;
 	struct mutex mutex;
-	struct rw_semaphore shutdown_rwsem;
-	unsigned int shutdown:1;
-	unsigned int probing:1;
-	unsigned int in_pm:1;
 	unsigned int autosuspended:1;	
+	atomic_t active;
+	atomic_t shutdown;
+	atomic_t usage_count;
+	wait_queue_head_t shutdown_wait;
 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
 	
 	int num_interfaces;
@@ -115,4 +115,7 @@
 #define combine_triple(s)  (combine_word(s) | ((unsigned int)(s)[2] << 16))
 #define combine_quad(s)    (combine_triple(s) | ((unsigned int)(s)[3] << 24))
 
+int snd_usb_lock_shutdown(struct snd_usb_audio *chip);
+void snd_usb_unlock_shutdown(struct snd_usb_audio *chip);
+
 #endif /* __USBAUDIO_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 4fa4bc4..4252fc2 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -880,15 +880,26 @@
 }
 
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
-					   size_t obj_buf_sz)
+					   size_t obj_buf_sz,
+					   const char *name)
 {
+	char tmp_name[64];
+
 	/* param validation */
 	if (!obj_buf || obj_buf_sz <= 0)
 		return NULL;
 
-	pr_debug("loading object from buffer\n");
+	if (!name) {
+		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
+			 (unsigned long)obj_buf,
+			 (unsigned long)obj_buf_sz);
+		tmp_name[sizeof(tmp_name) - 1] = '\0';
+		name = tmp_name;
+	}
+	pr_debug("loading object '%s' from buffer\n",
+		 name);
 
-	return __bpf_object__open("[buffer]", obj_buf, obj_buf_sz);
+	return __bpf_object__open(name, obj_buf, obj_buf_sz);
 }
 
 int bpf_object__unload(struct bpf_object *obj)
@@ -975,6 +986,14 @@
 	return next;
 }
 
+const char *
+bpf_object__get_name(struct bpf_object *obj)
+{
+	if (!obj)
+		return NULL;
+	return obj->path;
+}
+
 struct bpf_program *
 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
 {
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index ea8adc2..f16170c 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -28,12 +28,14 @@
 
 struct bpf_object *bpf_object__open(const char *path);
 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
-					   size_t obj_buf_sz);
+					   size_t obj_buf_sz,
+					   const char *name);
 void bpf_object__close(struct bpf_object *object);
 
 /* Load/unload object into/from kernel */
 int bpf_object__load(struct bpf_object *obj);
 int bpf_object__unload(struct bpf_object *obj);
+const char *bpf_object__get_name(struct bpf_object *obj);
 
 struct bpf_object *bpf_object__next(struct bpf_object *prev);
 #define bpf_object__for_each_safe(pos, tmp)			\
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index 618c2bc..2cd3d4c 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -22,9 +22,14 @@
 #include <string.h>
 #include <bfd.h>
 #include <dis-asm.h>
+#include <regex.h>
+#include <fcntl.h>
 #include <sys/klog.h>
 #include <sys/types.h>
-#include <regex.h>
+#include <sys/stat.h>
+
+#define CMD_ACTION_SIZE_BUFFER		10
+#define CMD_ACTION_READ_ALL		3
 
 static void get_exec_path(char *tpath, size_t size)
 {
@@ -87,20 +92,66 @@
 	bfd_close(bfdf);
 }
 
-static char *get_klog_buff(int *klen)
+static char *get_klog_buff(unsigned int *klen)
 {
-	int ret, len = klogctl(10, NULL, 0);
-	char *buff = malloc(len);
+	int ret, len;
+	char *buff;
 
-	assert(buff && klen);
-	ret = klogctl(3, buff, len);
-	assert(ret >= 0);
+	len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+	buff = malloc(len);
+	if (!buff)
+		return NULL;
+
+	ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
+	if (ret < 0) {
+		free(buff);
+		return NULL;
+	}
+
 	*klen = ret;
-
 	return buff;
 }
 
-static void put_klog_buff(char *buff)
+static char *get_flog_buff(const char *file, unsigned int *klen)
+{
+	int fd, ret, len;
+	struct stat fi;
+	char *buff;
+
+	fd = open(file, O_RDONLY);
+	if (fd < 0)
+		return NULL;
+
+	ret = fstat(fd, &fi);
+	if (ret < 0 || !S_ISREG(fi.st_mode))
+		goto out;
+
+	len = fi.st_size + 1;
+	buff = malloc(len);
+	if (!buff)
+		goto out;
+
+	memset(buff, 0, len);
+	ret = read(fd, buff, len - 1);
+	if (ret <= 0)
+		goto out_free;
+
+	close(fd);
+	*klen = ret;
+	return buff;
+out_free:
+	free(buff);
+out:
+	close(fd);
+	return NULL;
+}
+
+static char *get_log_buff(const char *file, unsigned int *klen)
+{
+	return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
+}
+
+static void put_log_buff(char *buff)
 {
 	free(buff);
 }
@@ -138,8 +189,10 @@
 	ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
 	ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
 		     &flen, &proglen, &pass, &base);
-	if (ret != 4)
+	if (ret != 4) {
+		regfree(&regex);
 		return 0;
+	}
 
 	tmp = ptr = haystack + off;
 	while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
@@ -169,31 +222,49 @@
 	return ulen;
 }
 
+static void usage(void)
+{
+	printf("Usage: bpf_jit_disasm [...]\n");
+	printf("       -o          Also display related opcodes (default: off).\n");
+	printf("       -f <file>   Read last image dump from file or stdin (default: klog).\n");
+	printf("       -h          Display this help.\n");
+}
+
 int main(int argc, char **argv)
 {
-	int len, klen, opcodes = 0;
-	char *kbuff;
+	unsigned int len, klen, opt, opcodes = 0;
 	static uint8_t image[32768];
+	char *kbuff, *file = NULL;
 
-	if (argc > 1) {
-		if (!strncmp("-o", argv[argc - 1], 2)) {
+	while ((opt = getopt(argc, argv, "of:")) != -1) {
+		switch (opt) {
+		case 'o':
 			opcodes = 1;
-		} else {
-			printf("usage: bpf_jit_disasm [-o: show opcodes]\n");
-			exit(0);
+			break;
+		case 'f':
+			file = optarg;
+			break;
+		default:
+			usage();
+			return -1;
 		}
 	}
 
 	bfd_init();
 	memset(image, 0, sizeof(image));
 
-	kbuff = get_klog_buff(&klen);
+	kbuff = get_log_buff(file, &klen);
+	if (!kbuff) {
+		fprintf(stderr, "Could not retrieve log buffer!\n");
+		return -1;
+	}
 
 	len = get_last_jit_image(kbuff, klen, image, sizeof(image));
 	if (len > 0)
 		get_asm_insns(image, len, opcodes);
+	else
+		fprintf(stderr, "No JIT image found!\n");
 
-	put_klog_buff(kbuff);
-
+	put_log_buff(kbuff);
 	return 0;
 }
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 347a273..2e9ce77 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -276,7 +276,11 @@
 --intr-regs::
 Capture machine state (registers) at interrupt, i.e., on counter overflows for
 each sample. List of captured registers depends on the architecture. This option
-is off by default.
+is off by default. It is possible to select the registers to sample using their
+symbolic names, e.g. on x86, ax, si. To list the available registers use
+--intr-regs=\?. To name registers, pass a comma separated list such as
+--intr-regs=ax,bx. The list of register is architecture dependent.
+
 
 --running-time::
 Record running and enabled time for read events (:S)
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 614b2c7..dc3ec78 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,7 +116,7 @@
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-	srcline, period, flags.
+	srcline, period, iregs, flags.
         Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -f sw:comm,tid,time,ip,sym  and -f trace:time,cpu,trace
diff --git a/tools/perf/arch/sh/util/dwarf-regs.c b/tools/perf/arch/sh/util/dwarf-regs.c
index 0d0897f..f8dfa89 100644
--- a/tools/perf/arch/sh/util/dwarf-regs.c
+++ b/tools/perf/arch/sh/util/dwarf-regs.c
@@ -51,5 +51,5 @@
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= SH_MAX_REGS) ? sh_regs_table[n] : NULL;
+	return (n < SH_MAX_REGS) ? sh_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c
index 92eda41..b704fdb 100644
--- a/tools/perf/arch/sparc/util/dwarf-regs.c
+++ b/tools/perf/arch/sparc/util/dwarf-regs.c
@@ -39,5 +39,5 @@
  */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
+	return (n < SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 2c55e1b..ff63649f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -2,6 +2,7 @@
 libperf-y += tsc.o
 libperf-y += pmu.o
 libperf-y += kvm-stat.o
+libperf-y += perf_regs.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index be22dd4..a08de0a 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -71,5 +71,5 @@
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+	return (n < ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
new file mode 100644
index 0000000..c5db14f
--- /dev/null
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -0,0 +1,28 @@
+#include "../../perf.h"
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+	SMPL_REG(AX, PERF_REG_X86_AX),
+	SMPL_REG(BX, PERF_REG_X86_BX),
+	SMPL_REG(CX, PERF_REG_X86_CX),
+	SMPL_REG(DX, PERF_REG_X86_DX),
+	SMPL_REG(SI, PERF_REG_X86_SI),
+	SMPL_REG(DI, PERF_REG_X86_DI),
+	SMPL_REG(BP, PERF_REG_X86_BP),
+	SMPL_REG(SP, PERF_REG_X86_SP),
+	SMPL_REG(IP, PERF_REG_X86_IP),
+	SMPL_REG(FLAGS, PERF_REG_X86_FLAGS),
+	SMPL_REG(CS, PERF_REG_X86_CS),
+	SMPL_REG(SS, PERF_REG_X86_SS),
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+	SMPL_REG(R8, PERF_REG_X86_R8),
+	SMPL_REG(R9, PERF_REG_X86_R9),
+	SMPL_REG(R10, PERF_REG_X86_R10),
+	SMPL_REG(R11, PERF_REG_X86_R11),
+	SMPL_REG(R12, PERF_REG_X86_R12),
+	SMPL_REG(R13, PERF_REG_X86_R13),
+	SMPL_REG(R14, PERF_REG_X86_R14),
+	SMPL_REG(R15, PERF_REG_X86_R15),
+#endif
+	SMPL_REG_END
+};
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a660022..142eeb3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -27,8 +27,10 @@
 #include "util/cpumap.h"
 #include "util/thread_map.h"
 #include "util/data.h"
+#include "util/perf_regs.h"
 #include "util/auxtrace.h"
 #include "util/parse-branch-options.h"
+#include "util/parse-regs-options.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -279,7 +281,7 @@
 
 	evlist__for_each(evlist, pos) {
 try_again:
-		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
+		if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
 			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 				if (verbose)
 					ui__warning("%s\n", msg);
@@ -1080,8 +1082,9 @@
 		    "sample transaction flags (special events only)"),
 	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
 		    "use per-thread mmaps"),
-	OPT_BOOLEAN('I', "intr-regs", &record.opts.sample_intr_regs,
-		    "Sample machine registers on interrupt"),
+	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
+		    "sample selected machine registers on interrupt,"
+		    " use -I ? to list register names", parse_regs),
 	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
 		    "Record running/enabled time of read (:S) events"),
 	OPT_CALLBACK('k', "clockid", &record.opts,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 4430340..eb51325 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -6,6 +6,7 @@
 #include "util/exec_cmd.h"
 #include "util/header.h"
 #include "util/parse-options.h"
+#include "util/perf_regs.h"
 #include "util/session.h"
 #include "util/tool.h"
 #include "util/symbol.h"
@@ -46,6 +47,7 @@
 	PERF_OUTPUT_SYMOFFSET       = 1U << 11,
 	PERF_OUTPUT_SRCLINE         = 1U << 12,
 	PERF_OUTPUT_PERIOD          = 1U << 13,
+	PERF_OUTPUT_IREGS	    = 1U << 14,
 };
 
 struct output_option {
@@ -66,6 +68,7 @@
 	{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
 	{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
 	{.str = "period", .field = PERF_OUTPUT_PERIOD},
+	{.str = "iregs", .field = PERF_OUTPUT_IREGS},
 };
 
 /* default set to maintain compatibility with current format */
@@ -255,6 +258,11 @@
 					PERF_OUTPUT_PERIOD))
 		return -EINVAL;
 
+	if (PRINT_FIELD(IREGS) &&
+		perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
+					PERF_OUTPUT_IREGS))
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -352,6 +360,24 @@
 	return 0;
 }
 
+static void print_sample_iregs(union perf_event *event __maybe_unused,
+			  struct perf_sample *sample,
+			  struct thread *thread __maybe_unused,
+			  struct perf_event_attr *attr)
+{
+	struct regs_dump *regs = &sample->intr_regs;
+	uint64_t mask = attr->sample_regs_intr;
+	unsigned i = 0, r;
+
+	if (!regs)
+		return;
+
+	for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
+		u64 val = regs->regs[i++];
+		printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val);
+	}
+}
+
 static void print_sample_start(struct perf_sample *sample,
 			       struct thread *thread,
 			       struct perf_evsel *evsel)
@@ -525,6 +551,9 @@
 				     PERF_MAX_STACK_DEPTH);
 	}
 
+	if (PRINT_FIELD(IREGS))
+		print_sample_iregs(event, sample, thread, attr);
+
 	printf("\n");
 }
 
@@ -1643,7 +1672,7 @@
 		     "comma separated output fields prepend with 'type:'. "
 		     "Valid types: hw,sw,trace,raw. "
 		     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-		     "addr,symoff,period,flags", parse_output_fields),
+		     "addr,symoff,period,iregs,flags", parse_output_fields),
 	OPT_BOOLEAN('a', "all-cpus", &system_wide,
 		    "system-wide collection from all CPUs"),
 	OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index cccb4cf..90129ac 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -54,7 +54,6 @@
 	bool	     sample_time_set;
 	bool	     callgraph_set;
 	bool	     period;
-	bool	     sample_intr_regs;
 	bool	     running_time;
 	bool	     full_auxtrace;
 	bool	     auxtrace_snapshot_mode;
@@ -64,6 +63,7 @@
 	unsigned int auxtrace_mmap_pages;
 	unsigned int user_freq;
 	u64          branch_stack;
+	u64	     sample_intr_regs;
 	u64	     default_interval;
 	u64	     user_interval;
 	size_t	     auxtrace_snapshot_size;
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index a337356..52d5597 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -26,7 +26,7 @@
 {
 	struct bpf_object *obj;
 
-	obj = bpf_object__open_buffer(obj_buf, obj_buf_sz);
+	obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL);
 	if (!obj)
 		return -1;
 	bpf_object__close(obj);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e912856..349bc96 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -75,6 +75,7 @@
 libperf-y += srcline.o
 libperf-y += data.o
 libperf-$(CONFIG_X86) += tsc.o
+libperf-$(CONFIG_AUXTRACE) += tsc.o
 libperf-y += cloexec.o
 libperf-y += thread-stack.o
 libperf-$(CONFIG_AUXTRACE) += auxtrace.o
@@ -82,6 +83,7 @@
 libperf-$(CONFIG_AUXTRACE) += intel-pt.o
 libperf-$(CONFIG_AUXTRACE) += intel-bts.o
 libperf-y += parse-branch-options.o
+libperf-y += parse-regs-options.o
 
 libperf-$(CONFIG_LIBELF) += symbol-elf.o
 libperf-$(CONFIG_LIBELF) += probe-file.o
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8d00039..d51a520 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1181,6 +1181,10 @@
 		if (evsel->filter == NULL)
 			continue;
 
+		/*
+		 * filters only work for tracepoint event, which doesn't have cpu limit.
+		 * So evlist and evsel should always be same.
+		 */
 		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
 		if (err) {
 			*err_evsel = evsel;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bac25f4..c53f791 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -787,7 +787,7 @@
 		perf_evsel__config_callgraph(evsel, opts, &callchain_param);
 
 	if (opts->sample_intr_regs) {
-		attr->sample_regs_intr = PERF_REGS_MASK;
+		attr->sample_regs_intr = opts->sample_intr_regs;
 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
 	}
 
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 240730d..2386322 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -4,6 +4,7 @@
 inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
 
 $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+	$(call rule_mkdir)
 	@$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
 
 $(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 9e4eb8f..d23138c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -146,6 +146,9 @@
 		case 4:
 			intel_pt_insn->rel = bswap_32(insn->immediate.value);
 			break;
+		default:
+			intel_pt_insn->rel = 0;
+			break;
 		}
 #else
 		intel_pt_insn->rel = insn->immediate.value;
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
new file mode 100644
index 0000000..4f2c1c2
--- /dev/null
+++ b/tools/perf/util/parse-regs-options.c
@@ -0,0 +1,71 @@
+#include "perf.h"
+#include "util/util.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/parse-regs-options.h"
+
+int
+parse_regs(const struct option *opt, const char *str, int unset)
+{
+	uint64_t *mode = (uint64_t *)opt->value;
+	const struct sample_reg *r;
+	char *s, *os = NULL, *p;
+	int ret = -1;
+
+	if (unset)
+		return 0;
+
+	/*
+	 * cannot set it twice
+	 */
+	if (*mode)
+		return -1;
+
+	/* str may be NULL in case no arg is passed to -I */
+	if (str) {
+		/* because str is read-only */
+		s = os = strdup(str);
+		if (!s)
+			return -1;
+
+		for (;;) {
+			p = strchr(s, ',');
+			if (p)
+				*p = '\0';
+
+			if (!strcmp(s, "?")) {
+				fprintf(stderr, "available registers: ");
+				for (r = sample_reg_masks; r->name; r++) {
+					fprintf(stderr, "%s ", r->name);
+				}
+				fputc('\n', stderr);
+				/* just printing available regs */
+				return -1;
+			}
+			for (r = sample_reg_masks; r->name; r++) {
+				if (!strcasecmp(s, r->name))
+					break;
+			}
+			if (!r->name) {
+				ui__warning("unknown register %s,"
+					    " check man page\n", s);
+				goto error;
+			}
+
+			*mode |= r->mask;
+
+			if (!p)
+				break;
+
+			s = p + 1;
+		}
+	}
+	ret = 0;
+
+	/* default to all possible regs */
+	if (*mode == 0)
+		*mode = PERF_REGS_MASK;
+error:
+	free(os);
+	return ret;
+}
diff --git a/tools/perf/util/parse-regs-options.h b/tools/perf/util/parse-regs-options.h
new file mode 100644
index 0000000..7d762b1
--- /dev/null
+++ b/tools/perf/util/parse-regs-options.h
@@ -0,0 +1,5 @@
+#ifndef _PERF_PARSE_REGS_OPTIONS_H
+#define _PERF_PARSE_REGS_OPTIONS_H 1
+struct option;
+int parse_regs(const struct option *opt, const char *str, int unset);
+#endif /* _PERF_PARSE_REGS_OPTIONS_H */
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 43168fb..885e8ac 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -2,6 +2,10 @@
 #include "perf_regs.h"
 #include "event.h"
 
+const struct sample_reg __weak sample_reg_masks[] = {
+	SMPL_REG_END
+};
+
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
 	int i, idx = 0;
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 980dbf7..2984dcc 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -5,6 +5,15 @@
 
 struct regs_dump;
 
+struct sample_reg {
+	const char *name;
+	uint64_t mask;
+};
+#define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) }
+#define SMPL_REG_END { .name = NULL }
+
+extern const struct sample_reg sample_reg_masks[];
+
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
 
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 24ae9e8..b8f12e0 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -20,6 +20,7 @@
 TARGETS += timers
 endif
 TARGETS += user
+TARGETS += jumplabel
 TARGETS += vm
 TARGETS += x86
 #Please keep the TARGETS list alphabetically sorted
diff --git a/tools/testing/selftests/capabilities/.gitignore b/tools/testing/selftests/capabilities/.gitignore
new file mode 100644
index 0000000..b732dd0
--- /dev/null
+++ b/tools/testing/selftests/capabilities/.gitignore
@@ -0,0 +1,2 @@
+test_execve
+validate_cap
diff --git a/tools/testing/selftests/capabilities/Makefile b/tools/testing/selftests/capabilities/Makefile
new file mode 100644
index 0000000..8c8f0c1
--- /dev/null
+++ b/tools/testing/selftests/capabilities/Makefile
@@ -0,0 +1,18 @@
+all:
+
+include ../lib.mk
+
+.PHONY: all clean
+
+TARGETS := validate_cap test_execve
+TEST_PROGS := test_execve
+
+CFLAGS := -O2 -g -std=gnu99 -Wall -lcap-ng
+
+all: $(TARGETS)
+
+clean:
+	$(RM) $(TARGETS)
+
+$(TARGETS): %: %.c
+	$(CC) -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
new file mode 100644
index 0000000..10a21a9
--- /dev/null
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -0,0 +1,427 @@
+#define _GNU_SOURCE
+
+#include <cap-ng.h>
+#include <err.h>
+#include <linux/capability.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <sched.h>
+#include <sys/mount.h>
+#include <limits.h>
+#include <libgen.h>
+#include <malloc.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+
+#ifndef PR_CAP_AMBIENT
+#define PR_CAP_AMBIENT			47
+# define PR_CAP_AMBIENT_IS_SET		1
+# define PR_CAP_AMBIENT_RAISE		2
+# define PR_CAP_AMBIENT_LOWER		3
+# define PR_CAP_AMBIENT_CLEAR_ALL	4
+#endif
+
+static int nerrs;
+
+static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+{
+	char buf[4096];
+	int fd;
+	ssize_t written;
+	int buf_len;
+
+	buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+	if (buf_len < 0) {
+		err(1, "vsnprintf failed");
+	}
+	if (buf_len >= sizeof(buf)) {
+		errx(1, "vsnprintf output truncated");
+	}
+
+	fd = open(filename, O_WRONLY);
+	if (fd < 0) {
+		if ((errno == ENOENT) && enoent_ok)
+			return;
+		err(1, "open of %s failed", filename);
+	}
+	written = write(fd, buf, buf_len);
+	if (written != buf_len) {
+		if (written >= 0) {
+			errx(1, "short write to %s", filename);
+		} else {
+			err(1, "write to %s failed", filename);
+		}
+	}
+	if (close(fd) != 0) {
+		err(1, "close of %s failed", filename);
+	}
+}
+
+static void maybe_write_file(char *filename, char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vmaybe_write_file(true, filename, fmt, ap);
+	va_end(ap);
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	vmaybe_write_file(false, filename, fmt, ap);
+	va_end(ap);
+}
+
+static bool create_and_enter_ns(uid_t inner_uid)
+{
+	uid_t outer_uid;
+	gid_t outer_gid;
+	int i;
+	bool have_outer_privilege;
+
+	outer_uid = getuid();
+	outer_gid = getgid();
+
+	/*
+	 * TODO: If we're already root, we could skip creating the userns.
+	 */
+
+	if (unshare(CLONE_NEWNS) == 0) {
+		printf("[NOTE]\tUsing global UIDs for tests\n");
+		if (prctl(PR_SET_KEEPCAPS, 1, 0, 0, 0) != 0)
+			err(1, "PR_SET_KEEPCAPS");
+		if (setresuid(inner_uid, inner_uid, -1) != 0)
+			err(1, "setresuid");
+
+		// Re-enable effective caps
+		capng_get_caps_process();
+		for (i = 0; i < CAP_LAST_CAP; i++)
+			if (capng_have_capability(CAPNG_PERMITTED, i))
+				capng_update(CAPNG_ADD, CAPNG_EFFECTIVE, i);
+		if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+			err(1, "capng_apply");
+
+		have_outer_privilege = true;
+	} else if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == 0) {
+		printf("[NOTE]\tUsing a user namespace for tests\n");
+		maybe_write_file("/proc/self/setgroups", "deny");
+		write_file("/proc/self/uid_map", "%d %d 1", inner_uid, outer_uid);
+		write_file("/proc/self/gid_map", "0 %d 1", outer_gid);
+
+		have_outer_privilege = false;
+	} else {
+		errx(1, "must be root or be able to create a userns");
+	}
+
+	if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL) != 0)
+		err(1, "remount everything private");
+
+	return have_outer_privilege;
+}
+
+static void chdir_to_tmpfs(void)
+{
+	char cwd[PATH_MAX];
+	if (getcwd(cwd, sizeof(cwd)) != cwd)
+		err(1, "getcwd");
+
+	if (mount("private_tmp", ".", "tmpfs", 0, "mode=0777") != 0)
+		err(1, "mount private tmpfs");
+
+	if (chdir(cwd) != 0)
+		err(1, "chdir to private tmpfs");
+
+	if (umount2(".", MNT_DETACH) != 0)
+		err(1, "detach private tmpfs");
+}
+
+static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
+{
+	int from = openat(fromfd, fromname, O_RDONLY);
+	if (from == -1)
+		err(1, "open copy source");
+
+	int to = open(toname, O_CREAT | O_WRONLY | O_EXCL, 0700);
+
+	while (true) {
+		char buf[4096];
+		ssize_t sz = read(from, buf, sizeof(buf));
+		if (sz == 0)
+			break;
+		if (sz < 0)
+			err(1, "read");
+
+		if (write(to, buf, sz) != sz)
+			err(1, "write");	/* no short writes on tmpfs */
+	}
+
+	close(from);
+	close(to);
+}
+
+static bool fork_wait(void)
+{
+	pid_t child = fork();
+	if (child == 0) {
+		nerrs = 0;
+		return true;
+	} else if (child > 0) {
+		int status;
+		if (waitpid(child, &status, 0) != child ||
+		    !WIFEXITED(status)) {
+			printf("[FAIL]\tChild died\n");
+			nerrs++;
+		} else if (WEXITSTATUS(status) != 0) {
+			printf("[FAIL]\tChild failed\n");
+			nerrs++;
+		} else {
+			printf("[OK]\tChild succeeded\n");
+		}
+
+		return false;
+	} else {
+		err(1, "fork");
+	}
+}
+
+static void exec_other_validate_cap(const char *name,
+				    bool eff, bool perm, bool inh, bool ambient)
+{
+	execl(name, name, (eff ? "1" : "0"),
+	      (perm ? "1" : "0"), (inh ? "1" : "0"), (ambient ? "1" : "0"),
+	      NULL);
+	err(1, "execl");
+}
+
+static void exec_validate_cap(bool eff, bool perm, bool inh, bool ambient)
+{
+	exec_other_validate_cap("./validate_cap", eff, perm, inh, ambient);
+}
+
+static int do_tests(int uid, const char *our_path)
+{
+	bool have_outer_privilege = create_and_enter_ns(uid);
+
+	int ourpath_fd = open(our_path, O_RDONLY | O_DIRECTORY);
+	if (ourpath_fd == -1)
+		err(1, "open '%s'", our_path);
+
+	chdir_to_tmpfs();
+
+	copy_fromat_to(ourpath_fd, "validate_cap", "validate_cap");
+
+	if (have_outer_privilege) {
+		uid_t gid = getegid();
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_suidroot");
+		if (chown("validate_cap_suidroot", 0, -1) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_suidroot", S_ISUID | 0700) != 0)
+			err(1, "chmod");
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_suidnonroot");
+		if (chown("validate_cap_suidnonroot", uid + 1, -1) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_suidnonroot", S_ISUID | 0700) != 0)
+			err(1, "chmod");
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_sgidroot");
+		if (chown("validate_cap_sgidroot", -1, 0) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_sgidroot", S_ISGID | 0710) != 0)
+			err(1, "chmod");
+
+		copy_fromat_to(ourpath_fd, "validate_cap",
+			       "validate_cap_sgidnonroot");
+		if (chown("validate_cap_sgidnonroot", -1, gid + 1) != 0)
+			err(1, "chown");
+		if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
+			err(1, "chmod");
+}
+
+	capng_get_caps_process();
+
+	/* Make sure that i starts out clear */
+	capng_update(CAPNG_DROP, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+
+	if (uid == 0) {
+		printf("[RUN]\tRoot => ep\n");
+		if (fork_wait())
+			exec_validate_cap(true, true, false, false);
+	} else {
+		printf("[RUN]\tNon-root => no caps\n");
+		if (fork_wait())
+			exec_validate_cap(false, false, false, false);
+	}
+
+	printf("[OK]\tCheck cap_ambient manipulation rules\n");
+
+	/* We should not be able to add ambient caps yet. */
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != -1 || errno != EPERM) {
+		if (errno == EINVAL)
+			printf("[FAIL]\tPR_CAP_AMBIENT_RAISE isn't supported\n");
+		else
+			printf("[FAIL]\tPR_CAP_AMBIENT_RAISE should have failed eith EPERM on a non-inheritable cap\n");
+		return 1;
+	}
+	printf("[OK]\tPR_CAP_AMBIENT_RAISE failed on non-inheritable cap\n");
+
+	capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_RAW);
+	capng_update(CAPNG_DROP, CAPNG_PERMITTED, CAP_NET_RAW);
+	capng_update(CAPNG_DROP, CAPNG_EFFECTIVE, CAP_NET_RAW);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_RAW, 0, 0, 0) != -1 || errno != EPERM) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_RAISE should have failed on a non-permitted cap\n");
+		return 1;
+	}
+	printf("[OK]\tPR_CAP_AMBIENT_RAISE failed on non-permitted cap\n");
+
+	capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_RAISE should have succeeded\n");
+		return 1;
+	}
+	printf("[OK]\tPR_CAP_AMBIENT_RAISE worked\n");
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 1) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_IS_SET is broken\n");
+		return 1;
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_CLEAR_ALL, 0, 0, 0, 0) != 0)
+		err(1, "PR_CAP_AMBIENT_CLEAR_ALL");
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
+		printf("[FAIL]\tPR_CAP_AMBIENT_CLEAR_ALL didn't work\n");
+		return 1;
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0)
+		err(1, "PR_CAP_AMBIENT_RAISE");
+
+	capng_update(CAPNG_DROP, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0) {
+		printf("[FAIL]\tDropping I should have dropped A\n");
+		return 1;
+	}
+
+	printf("[OK]\tBasic manipulation appears to work\n");
+
+	capng_update(CAPNG_ADD, CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE);
+	if (capng_apply(CAPNG_SELECT_CAPS) != 0)
+		err(1, "capng_apply");
+	if (uid == 0) {
+		printf("[RUN]\tRoot +i => eip\n");
+		if (fork_wait())
+			exec_validate_cap(true, true, true, false);
+	} else {
+		printf("[RUN]\tNon-root +i => i\n");
+		if (fork_wait())
+			exec_validate_cap(false, false, true, false);
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, CAP_NET_BIND_SERVICE, 0, 0, 0) != 0)
+		err(1, "PR_CAP_AMBIENT_RAISE");
+
+	printf("[RUN]\tUID %d +ia => eipa\n", uid);
+	if (fork_wait())
+		exec_validate_cap(true, true, true, true);
+
+	/* The remaining tests need real privilege */
+
+	if (!have_outer_privilege) {
+		printf("[SKIP]\tSUID/SGID tests (needs privilege)\n");
+		goto done;
+	}
+
+	if (uid == 0) {
+		printf("[RUN]\tRoot +ia, suidroot => eipa\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_suidroot",
+						true, true, true, true);
+
+		printf("[RUN]\tRoot +ia, suidnonroot => ip\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_suidnonroot",
+						false, true, true, false);
+
+		printf("[RUN]\tRoot +ia, sgidroot => eipa\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_sgidroot",
+						true, true, true, true);
+
+		if (fork_wait()) {
+			printf("[RUN]\tRoot, gid != 0, +ia, sgidroot => eip\n");
+			if (setresgid(1, 1, 1) != 0)
+				err(1, "setresgid");
+			exec_other_validate_cap("./validate_cap_sgidroot",
+						true, true, true, false);
+		}
+
+		printf("[RUN]\tRoot +ia, sgidnonroot => eip\n");
+		if (fork_wait())
+			exec_other_validate_cap("./validate_cap_sgidnonroot",
+						true, true, true, false);
+	} else {
+		printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
+		exec_other_validate_cap("./validate_cap_sgidnonroot",
+						false, false, true, false);
+
+		if (fork_wait()) {
+			printf("[RUN]\tNon-root +ia, sgidroot => i\n");
+			if (setresgid(1, 1, 1) != 0)
+				err(1, "setresgid");
+			exec_other_validate_cap("./validate_cap_sgidroot",
+						false, false, true, false);
+		}
+	}
+
+done:
+	return nerrs ? 1 : 0;
+}
+
+int main(int argc, char **argv)
+{
+	char *tmp1, *tmp2, *our_path;
+
+	/* Find our path */
+	tmp1 = strdup(argv[0]);
+	if (!tmp1)
+		err(1, "strdup");
+	tmp2 = dirname(tmp1);
+	our_path = strdup(tmp2);
+	if (!our_path)
+		err(1, "strdup");
+	free(tmp1);
+
+	if (fork_wait()) {
+		printf("[RUN]\t+++ Tests with uid == 0 +++\n");
+		return do_tests(0, our_path);
+	}
+
+	if (fork_wait()) {
+		printf("[RUN]\t+++ Tests with uid != 0 +++\n");
+		return do_tests(1, our_path);
+	}
+
+	return nerrs ? 1 : 0;
+}
diff --git a/tools/testing/selftests/capabilities/validate_cap.c b/tools/testing/selftests/capabilities/validate_cap.c
new file mode 100644
index 0000000..dd3c45f
--- /dev/null
+++ b/tools/testing/selftests/capabilities/validate_cap.c
@@ -0,0 +1,73 @@
+#include <cap-ng.h>
+#include <err.h>
+#include <linux/capability.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+#include <sys/prctl.h>
+#include <sys/auxv.h>
+
+#ifndef PR_CAP_AMBIENT
+#define PR_CAP_AMBIENT			47
+# define PR_CAP_AMBIENT_IS_SET		1
+# define PR_CAP_AMBIENT_RAISE		2
+# define PR_CAP_AMBIENT_LOWER		3
+# define PR_CAP_AMBIENT_CLEAR_ALL	4
+#endif
+
+#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 19)
+# define HAVE_GETAUXVAL
+#endif
+
+static bool bool_arg(char **argv, int i)
+{
+	if (!strcmp(argv[i], "0"))
+		return false;
+	else if (!strcmp(argv[i], "1"))
+		return true;
+	else
+		errx(1, "wrong argv[%d]", i);
+}
+
+int main(int argc, char **argv)
+{
+	const char *atsec = "";
+
+	/*
+	 * Be careful just in case a setgid or setcapped copy of this
+	 * helper gets out.
+	 */
+
+	if (argc != 5)
+		errx(1, "wrong argc");
+
+#ifdef HAVE_GETAUXVAL
+	if (getauxval(AT_SECURE))
+		atsec = " (AT_SECURE is set)";
+	else
+		atsec = " (AT_SECURE is not set)";
+#endif
+
+	capng_get_caps_process();
+
+	if (capng_have_capability(CAPNG_EFFECTIVE, CAP_NET_BIND_SERVICE) != bool_arg(argv, 1)) {
+		printf("[FAIL]\tWrong effective state%s\n", atsec);
+		return 1;
+	}
+	if (capng_have_capability(CAPNG_PERMITTED, CAP_NET_BIND_SERVICE) != bool_arg(argv, 2)) {
+		printf("[FAIL]\tWrong permitted state%s\n", atsec);
+		return 1;
+	}
+	if (capng_have_capability(CAPNG_INHERITABLE, CAP_NET_BIND_SERVICE) != bool_arg(argv, 3)) {
+		printf("[FAIL]\tWrong inheritable state%s\n", atsec);
+		return 1;
+	}
+
+	if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, CAP_NET_BIND_SERVICE, 0, 0, 0) != bool_arg(argv, 4)) {
+		printf("[FAIL]\tWrong ambient state%s\n", atsec);
+		return 1;
+	}
+
+	printf("[OK]\tCapabilities after execve were correct\n");
+	return 0;
+}
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 08c2a36..4124593 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -19,6 +19,8 @@
  *   - PACKET_FANOUT_LB
  *   - PACKET_FANOUT_CPU
  *   - PACKET_FANOUT_ROLLOVER
+ *   - PACKET_FANOUT_CBPF
+ *   - PACKET_FANOUT_EBPF
  *
  * Todo:
  * - functionality: PACKET_FANOUT_FLAG_DEFRAG
@@ -44,7 +46,9 @@
 #include <arpa/inet.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/unistd.h>	/* for __NR_bpf */
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/if_packet.h>
 #include <net/ethernet.h>
 #include <netinet/ip.h>
@@ -91,6 +95,51 @@
 	return fd;
 }
 
+static void sock_fanout_set_ebpf(int fd)
+{
+	const int len_off = __builtin_offsetof(struct __sk_buff, len);
+	struct bpf_insn prog[] = {
+		{ BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
+		{ BPF_LDX   | BPF_W   | BPF_MEM, 0, 6, len_off, 0 },
+		{ BPF_JMP   | BPF_JGE | BPF_K,   0, 0, 1, DATA_LEN },
+		{ BPF_JMP   | BPF_JA  | BPF_K,   0, 0, 4, 0 },
+		{ BPF_LD    | BPF_B   | BPF_ABS, 0, 0, 0, 0x50 },
+		{ BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 2, DATA_CHAR },
+		{ BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 1, DATA_CHAR_1 },
+		{ BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
+		{ BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
+	};
+	char log_buf[512];
+	union bpf_attr attr;
+	int pfd;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+	attr.insns = (unsigned long) prog;
+	attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+	attr.license = (unsigned long) "GPL";
+	attr.log_buf = (unsigned long) log_buf,
+	attr.log_size = sizeof(log_buf),
+	attr.log_level = 1,
+
+	pfd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+	if (pfd < 0) {
+		perror("bpf");
+		fprintf(stderr, "bpf verifier:\n%s\n", log_buf);
+		exit(1);
+	}
+
+	if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &pfd, sizeof(pfd))) {
+		perror("fanout data ebpf");
+		exit(1);
+	}
+
+	if (close(pfd)) {
+		perror("close ebpf");
+		exit(1);
+	}
+}
+
 static char *sock_fanout_open_ring(int fd)
 {
 	struct tpacket_req req = {
@@ -115,8 +164,8 @@
 
 	ring = mmap(0, req.tp_block_size * req.tp_block_nr,
 		    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-	if (!ring) {
-		fprintf(stderr, "packetsock ring mmap\n");
+	if (ring == MAP_FAILED) {
+		perror("packetsock ring mmap");
 		exit(1);
 	}
 
@@ -209,6 +258,7 @@
 {
 	const int expect0[] = { 0, 0 };
 	char *rings[2];
+	uint8_t type = typeflags & 0xFF;
 	int fds[2], fds_udp[2][2], ret;
 
 	fprintf(stderr, "test: datapath 0x%hx\n", typeflags);
@@ -219,6 +269,11 @@
 		fprintf(stderr, "ERROR: failed open\n");
 		exit(1);
 	}
+	if (type == PACKET_FANOUT_CBPF)
+		sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+	else if (type == PACKET_FANOUT_EBPF)
+		sock_fanout_set_ebpf(fds[0]);
+
 	rings[0] = sock_fanout_open_ring(fds[0]);
 	rings[1] = sock_fanout_open_ring(fds[1]);
 	pair_udp_open(fds_udp[0], PORT_BASE);
@@ -227,11 +282,11 @@
 
 	/* Send data, but not enough to overflow a queue */
 	pair_udp_send(fds_udp[0], 15);
-	pair_udp_send(fds_udp[1], 5);
+	pair_udp_send_char(fds_udp[1], 5, DATA_CHAR_1);
 	ret = sock_fanout_read(fds, rings, expect1);
 
 	/* Send more data, overflow the queue */
-	pair_udp_send(fds_udp[0], 15);
+	pair_udp_send_char(fds_udp[0], 15, DATA_CHAR_1);
 	/* TODO: ensure consistent order between expect1 and expect2 */
 	ret |= sock_fanout_read(fds, rings, expect2);
 
@@ -275,6 +330,7 @@
 	const int expect_rb[2][2]	= { { 15, 5 },  { 20, 15 } };
 	const int expect_cpu0[2][2]	= { { 20, 0 },  { 20, 0 } };
 	const int expect_cpu1[2][2]	= { { 0, 20 },  { 0, 20 } };
+	const int expect_bpf[2][2]	= { { 15, 5 },  { 15, 20 } };
 	int port_off = 2, tries = 5, ret;
 
 	test_control_single();
@@ -296,6 +352,11 @@
 	ret |= test_datapath(PACKET_FANOUT_ROLLOVER,
 			     port_off, expect_rb[0], expect_rb[1]);
 
+	ret |= test_datapath(PACKET_FANOUT_CBPF,
+			     port_off, expect_bpf[0], expect_bpf[1]);
+	ret |= test_datapath(PACKET_FANOUT_EBPF,
+			     port_off, expect_bpf[0], expect_bpf[1]);
+
 	set_cpuaffinity(0);
 	ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
 			     expect_cpu0[0], expect_cpu0[1]);
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 37da54a..24bc7ec 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -30,6 +30,7 @@
 
 #define DATA_LEN			100
 #define DATA_CHAR			'a'
+#define DATA_CHAR_1			'b'
 
 #define PORT_BASE			8000
 
@@ -37,29 +38,36 @@
 # define __maybe_unused		__attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void pair_udp_setfilter(int fd)
+static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
 {
 	struct sock_filter bpf_filter[] = {
 		{ 0x80, 0, 0, 0x00000000 },  /* LD  pktlen		      */
-		{ 0x35, 0, 5, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
+		{ 0x35, 0, 4, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
 		{ 0x30, 0, 0, 0x00000050 },  /* LD  ip[80]		      */
-		{ 0x15, 0, 3, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
-		{ 0x30, 0, 0, 0x00000051 },  /* LD  ip[81]		      */
-		{ 0x15, 0, 1, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
+		{ 0x15, 1, 0, DATA_CHAR  },  /* JEQ DATA_CHAR   [t goto match]*/
+		{ 0x15, 0, 1, DATA_CHAR_1},  /* JEQ DATA_CHAR_1 [t goto match]*/
 		{ 0x06, 0, 0, 0x00000060 },  /* RET match	              */
 		{ 0x06, 0, 0, 0x00000000 },  /* RET no match		      */
 	};
 	struct sock_fprog bpf_prog;
 
+	if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
+		bpf_filter[5].code = 0x16;   /* RET A			      */
+
 	bpf_prog.filter = bpf_filter;
 	bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-	if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
+	if (setsockopt(fd, lvl, optnum, &bpf_prog,
 		       sizeof(bpf_prog))) {
 		perror("setsockopt SO_ATTACH_FILTER");
 		exit(1);
 	}
 }
 
+static __maybe_unused void pair_udp_setfilter(int fd)
+{
+	sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
+}
+
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
 	struct sockaddr_in saddr, daddr;
@@ -96,11 +104,11 @@
 	}
 }
 
-static __maybe_unused void pair_udp_send(int fds[], int num)
+static __maybe_unused void pair_udp_send_char(int fds[], int num, char payload)
 {
 	char buf[DATA_LEN], rbuf[DATA_LEN];
 
-	memset(buf, DATA_CHAR, sizeof(buf));
+	memset(buf, payload, sizeof(buf));
 	while (num--) {
 		/* Should really handle EINTR and EAGAIN */
 		if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) {
@@ -118,6 +126,11 @@
 	}
 }
 
+static __maybe_unused void pair_udp_send(int fds[], int num)
+{
+	return pair_udp_send_char(fds, num, DATA_CHAR);
+}
+
 static __maybe_unused void pair_udp_close(int fds[])
 {
 	close(fds[0]);
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 41cc3ed..ee179e2 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,8 +2,9 @@
 	$(MAKE) -C ../
 
 TEST_PROGS := hugetlb_vs_thp_test subpage_prot
+TEST_FILES := tempfile
 
-all: $(TEST_PROGS) tempfile
+all: $(TEST_PROGS) $(TEST_FILES)
 
 $(TEST_PROGS): ../harness.c
 
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index c5abe7f..a004b4c 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -14,6 +14,7 @@
 #include <linux/filter.h>
 #include <sys/prctl.h>
 #include <sys/ptrace.h>
+#include <sys/types.h>
 #include <sys/user.h>
 #include <linux/prctl.h>
 #include <linux/ptrace.h>
@@ -82,7 +83,13 @@
 };
 #endif
 
+#if __BYTE_ORDER == __LITTLE_ENDIAN
 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
+#else
+#error "wut? Unknown __BYTE_ORDER?!"
+#endif
 
 #define SIBLING_EXIT_UNKILLED	0xbadbeef
 #define SIBLING_EXIT_FAILURE	0xbadface
@@ -1199,6 +1206,10 @@
 # define ARCH_REGS	struct user_pt_regs
 # define SYSCALL_NUM	regs[8]
 # define SYSCALL_RET	regs[0]
+#elif defined(__powerpc__)
+# define ARCH_REGS	struct pt_regs
+# define SYSCALL_NUM	gpr[0]
+# define SYSCALL_RET	gpr[3]
 #else
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
@@ -1232,7 +1243,7 @@
 	ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
 	EXPECT_EQ(0, ret);
 
-#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__)
 	{
 		regs.SYSCALL_NUM = syscall;
 	}
@@ -1396,6 +1407,8 @@
 #  define __NR_seccomp 383
 # elif defined(__aarch64__)
 #  define __NR_seccomp 277
+# elif defined(__powerpc__)
+#  define __NR_seccomp 358
 # else
 #  warning "seccomp syscall number unknown for this architecture"
 #  define __NR_seccomp 0xffff
diff --git a/tools/testing/selftests/static_keys/Makefile b/tools/testing/selftests/static_keys/Makefile
new file mode 100644
index 0000000..9cdadf3
--- /dev/null
+++ b/tools/testing/selftests/static_keys/Makefile
@@ -0,0 +1,8 @@
+# Makefile for static keys selftests
+
+# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+all:
+
+TEST_PROGS := test_static_keys.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
new file mode 100644
index 0000000..1261e3f
--- /dev/null
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# Runs static keys kernel module tests
+
+if /sbin/modprobe -q test_static_key_base; then
+	if /sbin/modprobe -q test_static_keys; then
+		echo "static_key: ok"
+		/sbin/modprobe -q -r test_static_keys
+		/sbin/modprobe -q -r test_static_key_base
+	else
+		echo "static_keys: [FAIL]"
+		/sbin/modprobe -q -r test_static_key_base
+	fi
+else
+	echo "static_key: [FAIL]"
+	exit 1
+fi
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 231b9a0..0d68547 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -8,10 +8,13 @@
 BINARIES += map_hugetlb
 BINARIES += thuge-gen
 BINARIES += transhuge-stress
+BINARIES += userfaultfd
 
 all: $(BINARIES)
 %: %.c
 	$(CC) $(CFLAGS) -o $@ $^ -lrt
+userfaultfd: userfaultfd.c
+	$(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread
 
 TEST_PROGS := run_vmtests
 TEST_FILES := $(BINARIES)
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 49ece11..831adeb 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -86,6 +86,17 @@
 	echo "[PASS]"
 fi
 
+echo "--------------------"
+echo "running userfaultfd"
+echo "--------------------"
+./userfaultfd 128 32
+if [ $? -ne 0 ]; then
+	echo "[FAIL]"
+	exitcode=1
+else
+	echo "[PASS]"
+fi
+
 #cleanup
 umount $mnt
 rm -rf $mnt
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
new file mode 100644
index 0000000..0c0b839
--- /dev/null
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -0,0 +1,636 @@
+/*
+ * Stress userfaultfd syscall.
+ *
+ *  Copyright (C) 2015  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ *
+ * This test allocates two virtual areas and bounces the physical
+ * memory across the two virtual areas (from area_src to area_dst)
+ * using userfaultfd.
+ *
+ * There are three threads running per CPU:
+ *
+ * 1) one per-CPU thread takes a per-page pthread_mutex in a random
+ *    page of the area_dst (while the physical page may still be in
+ *    area_src), and increments a per-page counter in the same page,
+ *    and checks its value against a verification region.
+ *
+ * 2) another per-CPU thread handles the userfaults generated by
+ *    thread 1 above. userfaultfd blocking reads or poll() modes are
+ *    exercised interleaved.
+ *
+ * 3) one last per-CPU thread transfers the memory in the background
+ *    at maximum bandwidth (if not already transferred by thread
+ *    2). Each cpu thread takes cares of transferring a portion of the
+ *    area.
+ *
+ * When all threads of type 3 completed the transfer, one bounce is
+ * complete. area_src and area_dst are then swapped. All threads are
+ * respawned and so the bounce is immediately restarted in the
+ * opposite direction.
+ *
+ * per-CPU threads 1 by triggering userfaults inside
+ * pthread_mutex_lock will also verify the atomicity of the memory
+ * transfer (UFFDIO_COPY).
+ *
+ * The program takes two parameters: the amounts of physical memory in
+ * megabytes (MiB) of the area and the number of bounces to execute.
+ *
+ * # 100MiB 99999 bounces
+ * ./userfaultfd 100 99999
+ *
+ * # 1GiB 99 bounces
+ * ./userfaultfd 1000 99
+ *
+ * # 10MiB-~6GiB 999 bounces, continue forever unless an error triggers
+ * while ./userfaultfd $[RANDOM % 6000 + 10] 999; do true; done
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <time.h>
+#include <signal.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include "../../../../include/uapi/linux/userfaultfd.h"
+
+#ifdef __x86_64__
+#define __NR_userfaultfd 323
+#elif defined(__i386__)
+#define __NR_userfaultfd 359
+#elif defined(__powewrpc__)
+#define __NR_userfaultfd 364
+#else
+#error "missing __NR_userfaultfd definition"
+#endif
+
+static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+
+#define BOUNCE_RANDOM		(1<<0)
+#define BOUNCE_RACINGFAULTS	(1<<1)
+#define BOUNCE_VERIFY		(1<<2)
+#define BOUNCE_POLL		(1<<3)
+static int bounces;
+
+static unsigned long long *count_verify;
+static int uffd, finished, *pipefd;
+static char *area_src, *area_dst;
+static char *zeropage;
+pthread_attr_t attr;
+
+/* pthread_mutex_t starts at page offset 0 */
+#define area_mutex(___area, ___nr)					\
+	((pthread_mutex_t *) ((___area) + (___nr)*page_size))
+/*
+ * count is placed in the page after pthread_mutex_t naturally aligned
+ * to avoid non alignment faults on non-x86 archs.
+ */
+#define area_count(___area, ___nr)					\
+	((volatile unsigned long long *) ((unsigned long)		\
+				 ((___area) + (___nr)*page_size +	\
+				  sizeof(pthread_mutex_t) +		\
+				  sizeof(unsigned long long) - 1) &	\
+				 ~(unsigned long)(sizeof(unsigned long long) \
+						  -  1)))
+
+static int my_bcmp(char *str1, char *str2, size_t n)
+{
+	unsigned long i;
+	for (i = 0; i < n; i++)
+		if (str1[i] != str2[i])
+			return 1;
+	return 0;
+}
+
+static void *locking_thread(void *arg)
+{
+	unsigned long cpu = (unsigned long) arg;
+	struct random_data rand;
+	unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */
+	int32_t rand_nr;
+	unsigned long long count;
+	char randstate[64];
+	unsigned int seed;
+	time_t start;
+
+	if (bounces & BOUNCE_RANDOM) {
+		seed = (unsigned int) time(NULL) - bounces;
+		if (!(bounces & BOUNCE_RACINGFAULTS))
+			seed += cpu;
+		bzero(&rand, sizeof(rand));
+		bzero(&randstate, sizeof(randstate));
+		if (initstate_r(seed, randstate, sizeof(randstate), &rand))
+			fprintf(stderr, "srandom_r error\n"), exit(1);
+	} else {
+		page_nr = -bounces;
+		if (!(bounces & BOUNCE_RACINGFAULTS))
+			page_nr += cpu * nr_pages_per_cpu;
+	}
+
+	while (!finished) {
+		if (bounces & BOUNCE_RANDOM) {
+			if (random_r(&rand, &rand_nr))
+				fprintf(stderr, "random_r 1 error\n"), exit(1);
+			page_nr = rand_nr;
+			if (sizeof(page_nr) > sizeof(rand_nr)) {
+				if (random_r(&rand, &rand_nr))
+					fprintf(stderr, "random_r 2 error\n"), exit(1);
+				page_nr |= ((unsigned long) rand_nr) << 32;
+			}
+		} else
+			page_nr += 1;
+		page_nr %= nr_pages;
+
+		start = time(NULL);
+		if (bounces & BOUNCE_VERIFY) {
+			count = *area_count(area_dst, page_nr);
+			if (!count)
+				fprintf(stderr,
+					"page_nr %lu wrong count %Lu %Lu\n",
+					page_nr, count,
+					count_verify[page_nr]), exit(1);
+
+
+			/*
+			 * We can't use bcmp (or memcmp) because that
+			 * returns 0 erroneously if the memory is
+			 * changing under it (even if the end of the
+			 * page is never changing and always
+			 * different).
+			 */
+#if 1
+			if (!my_bcmp(area_dst + page_nr * page_size, zeropage,
+				     page_size))
+				fprintf(stderr,
+					"my_bcmp page_nr %lu wrong count %Lu %Lu\n",
+					page_nr, count,
+					count_verify[page_nr]), exit(1);
+#else
+			unsigned long loops;
+
+			loops = 0;
+			/* uncomment the below line to test with mutex */
+			/* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */
+			while (!bcmp(area_dst + page_nr * page_size, zeropage,
+				     page_size)) {
+				loops += 1;
+				if (loops > 10)
+					break;
+			}
+			/* uncomment below line to test with mutex */
+			/* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */
+			if (loops) {
+				fprintf(stderr,
+					"page_nr %lu all zero thread %lu %p %lu\n",
+					page_nr, cpu, area_dst + page_nr * page_size,
+					loops);
+				if (loops > 10)
+					exit(1);
+			}
+#endif
+		}
+
+		pthread_mutex_lock(area_mutex(area_dst, page_nr));
+		count = *area_count(area_dst, page_nr);
+		if (count != count_verify[page_nr]) {
+			fprintf(stderr,
+				"page_nr %lu memory corruption %Lu %Lu\n",
+				page_nr, count,
+				count_verify[page_nr]), exit(1);
+		}
+		count++;
+		*area_count(area_dst, page_nr) = count_verify[page_nr] = count;
+		pthread_mutex_unlock(area_mutex(area_dst, page_nr));
+
+		if (time(NULL) - start > 1)
+			fprintf(stderr,
+				"userfault too slow %ld "
+				"possible false positive with overcommit\n",
+				time(NULL) - start);
+	}
+
+	return NULL;
+}
+
+static int copy_page(unsigned long offset)
+{
+	struct uffdio_copy uffdio_copy;
+
+	if (offset >= nr_pages * page_size)
+		fprintf(stderr, "unexpected offset %lu\n",
+			offset), exit(1);
+	uffdio_copy.dst = (unsigned long) area_dst + offset;
+	uffdio_copy.src = (unsigned long) area_src + offset;
+	uffdio_copy.len = page_size;
+	uffdio_copy.mode = 0;
+	uffdio_copy.copy = 0;
+	if (ioctl(uffd, UFFDIO_COPY, &uffdio_copy)) {
+		/* real retval in ufdio_copy.copy */
+		if (uffdio_copy.copy != -EEXIST)
+			fprintf(stderr, "UFFDIO_COPY error %Ld\n",
+				uffdio_copy.copy), exit(1);
+	} else if (uffdio_copy.copy != page_size) {
+		fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
+			uffdio_copy.copy), exit(1);
+	} else
+		return 1;
+	return 0;
+}
+
+static void *uffd_poll_thread(void *arg)
+{
+	unsigned long cpu = (unsigned long) arg;
+	struct pollfd pollfd[2];
+	struct uffd_msg msg;
+	int ret;
+	unsigned long offset;
+	char tmp_chr;
+	unsigned long userfaults = 0;
+
+	pollfd[0].fd = uffd;
+	pollfd[0].events = POLLIN;
+	pollfd[1].fd = pipefd[cpu*2];
+	pollfd[1].events = POLLIN;
+
+	for (;;) {
+		ret = poll(pollfd, 2, -1);
+		if (!ret)
+			fprintf(stderr, "poll error %d\n", ret), exit(1);
+		if (ret < 0)
+			perror("poll"), exit(1);
+		if (pollfd[1].revents & POLLIN) {
+			if (read(pollfd[1].fd, &tmp_chr, 1) != 1)
+				fprintf(stderr, "read pipefd error\n"),
+					exit(1);
+			break;
+		}
+		if (!(pollfd[0].revents & POLLIN))
+			fprintf(stderr, "pollfd[0].revents %d\n",
+				pollfd[0].revents), exit(1);
+		ret = read(uffd, &msg, sizeof(msg));
+		if (ret < 0) {
+			if (errno == EAGAIN)
+				continue;
+			perror("nonblocking read error"), exit(1);
+		}
+		if (msg.event != UFFD_EVENT_PAGEFAULT)
+			fprintf(stderr, "unexpected msg event %u\n",
+				msg.event), exit(1);
+		if (msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
+			fprintf(stderr, "unexpected write fault\n"), exit(1);
+		offset = (char *)msg.arg.pagefault.address - area_dst;
+		offset &= ~(page_size-1);
+		if (copy_page(offset))
+			userfaults++;
+	}
+	return (void *)userfaults;
+}
+
+pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void *uffd_read_thread(void *arg)
+{
+	unsigned long *this_cpu_userfaults;
+	struct uffd_msg msg;
+	unsigned long offset;
+	int ret;
+
+	this_cpu_userfaults = (unsigned long *) arg;
+	*this_cpu_userfaults = 0;
+
+	pthread_mutex_unlock(&uffd_read_mutex);
+	/* from here cancellation is ok */
+
+	for (;;) {
+		ret = read(uffd, &msg, sizeof(msg));
+		if (ret != sizeof(msg)) {
+			if (ret < 0)
+				perror("blocking read error"), exit(1);
+			else
+				fprintf(stderr, "short read\n"), exit(1);
+		}
+		if (msg.event != UFFD_EVENT_PAGEFAULT)
+			fprintf(stderr, "unexpected msg event %u\n",
+				msg.event), exit(1);
+		if (bounces & BOUNCE_VERIFY &&
+		    msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)
+			fprintf(stderr, "unexpected write fault\n"), exit(1);
+		offset = (char *)msg.arg.pagefault.address - area_dst;
+		offset &= ~(page_size-1);
+		if (copy_page(offset))
+			(*this_cpu_userfaults)++;
+	}
+	return (void *)NULL;
+}
+
+static void *background_thread(void *arg)
+{
+	unsigned long cpu = (unsigned long) arg;
+	unsigned long page_nr;
+
+	for (page_nr = cpu * nr_pages_per_cpu;
+	     page_nr < (cpu+1) * nr_pages_per_cpu;
+	     page_nr++)
+		copy_page(page_nr * page_size);
+
+	return NULL;
+}
+
+static int stress(unsigned long *userfaults)
+{
+	unsigned long cpu;
+	pthread_t locking_threads[nr_cpus];
+	pthread_t uffd_threads[nr_cpus];
+	pthread_t background_threads[nr_cpus];
+	void **_userfaults = (void **) userfaults;
+
+	finished = 0;
+	for (cpu = 0; cpu < nr_cpus; cpu++) {
+		if (pthread_create(&locking_threads[cpu], &attr,
+				   locking_thread, (void *)cpu))
+			return 1;
+		if (bounces & BOUNCE_POLL) {
+			if (pthread_create(&uffd_threads[cpu], &attr,
+					   uffd_poll_thread, (void *)cpu))
+				return 1;
+		} else {
+			if (pthread_create(&uffd_threads[cpu], &attr,
+					   uffd_read_thread,
+					   &_userfaults[cpu]))
+				return 1;
+			pthread_mutex_lock(&uffd_read_mutex);
+		}
+		if (pthread_create(&background_threads[cpu], &attr,
+				   background_thread, (void *)cpu))
+			return 1;
+	}
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		if (pthread_join(background_threads[cpu], NULL))
+			return 1;
+
+	/*
+	 * Be strict and immediately zap area_src, the whole area has
+	 * been transferred already by the background treads. The
+	 * area_src could then be faulted in in a racy way by still
+	 * running uffdio_threads reading zeropages after we zapped
+	 * area_src (but they're guaranteed to get -EEXIST from
+	 * UFFDIO_COPY without writing zero pages into area_dst
+	 * because the background threads already completed).
+	 */
+	if (madvise(area_src, nr_pages * page_size, MADV_DONTNEED)) {
+		perror("madvise");
+		return 1;
+	}
+
+	for (cpu = 0; cpu < nr_cpus; cpu++) {
+		char c;
+		if (bounces & BOUNCE_POLL) {
+			if (write(pipefd[cpu*2+1], &c, 1) != 1) {
+				fprintf(stderr, "pipefd write error\n");
+				return 1;
+			}
+			if (pthread_join(uffd_threads[cpu], &_userfaults[cpu]))
+				return 1;
+		} else {
+			if (pthread_cancel(uffd_threads[cpu]))
+				return 1;
+			if (pthread_join(uffd_threads[cpu], NULL))
+				return 1;
+		}
+	}
+
+	finished = 1;
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		if (pthread_join(locking_threads[cpu], NULL))
+			return 1;
+
+	return 0;
+}
+
+static int userfaultfd_stress(void)
+{
+	void *area;
+	char *tmp_area;
+	unsigned long nr;
+	struct uffdio_register uffdio_register;
+	struct uffdio_api uffdio_api;
+	unsigned long cpu;
+	int uffd_flags;
+	unsigned long userfaults[nr_cpus];
+
+	if (posix_memalign(&area, page_size, nr_pages * page_size)) {
+		fprintf(stderr, "out of memory\n");
+		return 1;
+	}
+	area_src = area;
+	if (posix_memalign(&area, page_size, nr_pages * page_size)) {
+		fprintf(stderr, "out of memory\n");
+		return 1;
+	}
+	area_dst = area;
+
+	uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+	if (uffd < 0) {
+		fprintf(stderr,
+			"userfaultfd syscall not available in this kernel\n");
+		return 1;
+	}
+	uffd_flags = fcntl(uffd, F_GETFD, NULL);
+
+	uffdio_api.api = UFFD_API;
+	uffdio_api.features = 0;
+	if (ioctl(uffd, UFFDIO_API, &uffdio_api)) {
+		fprintf(stderr, "UFFDIO_API\n");
+		return 1;
+	}
+	if (uffdio_api.api != UFFD_API) {
+		fprintf(stderr, "UFFDIO_API error %Lu\n", uffdio_api.api);
+		return 1;
+	}
+
+	count_verify = malloc(nr_pages * sizeof(unsigned long long));
+	if (!count_verify) {
+		perror("count_verify");
+		return 1;
+	}
+
+	for (nr = 0; nr < nr_pages; nr++) {
+		*area_mutex(area_src, nr) = (pthread_mutex_t)
+			PTHREAD_MUTEX_INITIALIZER;
+		count_verify[nr] = *area_count(area_src, nr) = 1;
+	}
+
+	pipefd = malloc(sizeof(int) * nr_cpus * 2);
+	if (!pipefd) {
+		perror("pipefd");
+		return 1;
+	}
+	for (cpu = 0; cpu < nr_cpus; cpu++) {
+		if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) {
+			perror("pipe");
+			return 1;
+		}
+	}
+
+	if (posix_memalign(&area, page_size, page_size)) {
+		fprintf(stderr, "out of memory\n");
+		return 1;
+	}
+	zeropage = area;
+	bzero(zeropage, page_size);
+
+	pthread_mutex_lock(&uffd_read_mutex);
+
+	pthread_attr_init(&attr);
+	pthread_attr_setstacksize(&attr, 16*1024*1024);
+
+	while (bounces--) {
+		unsigned long expected_ioctls;
+
+		printf("bounces: %d, mode:", bounces);
+		if (bounces & BOUNCE_RANDOM)
+			printf(" rnd");
+		if (bounces & BOUNCE_RACINGFAULTS)
+			printf(" racing");
+		if (bounces & BOUNCE_VERIFY)
+			printf(" ver");
+		if (bounces & BOUNCE_POLL)
+			printf(" poll");
+		printf(", ");
+		fflush(stdout);
+
+		if (bounces & BOUNCE_POLL)
+			fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
+		else
+			fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK);
+
+		/* register */
+		uffdio_register.range.start = (unsigned long) area_dst;
+		uffdio_register.range.len = nr_pages * page_size;
+		uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+		if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) {
+			fprintf(stderr, "register failure\n");
+			return 1;
+		}
+		expected_ioctls = (1 << _UFFDIO_WAKE) |
+				  (1 << _UFFDIO_COPY) |
+				  (1 << _UFFDIO_ZEROPAGE);
+		if ((uffdio_register.ioctls & expected_ioctls) !=
+		    expected_ioctls) {
+			fprintf(stderr,
+				"unexpected missing ioctl for anon memory\n");
+			return 1;
+		}
+
+		/*
+		 * The madvise done previously isn't enough: some
+		 * uffd_thread could have read userfaults (one of
+		 * those already resolved by the background thread)
+		 * and it may be in the process of calling
+		 * UFFDIO_COPY. UFFDIO_COPY will read the zapped
+		 * area_src and it would map a zero page in it (of
+		 * course such a UFFDIO_COPY is perfectly safe as it'd
+		 * return -EEXIST). The problem comes at the next
+		 * bounce though: that racing UFFDIO_COPY would
+		 * generate zeropages in the area_src, so invalidating
+		 * the previous MADV_DONTNEED. Without this additional
+		 * MADV_DONTNEED those zeropages leftovers in the
+		 * area_src would lead to -EEXIST failure during the
+		 * next bounce, effectively leaving a zeropage in the
+		 * area_dst.
+		 *
+		 * Try to comment this out madvise to see the memory
+		 * corruption being caught pretty quick.
+		 *
+		 * khugepaged is also inhibited to collapse THP after
+		 * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's
+		 * required to MADV_DONTNEED here.
+		 */
+		if (madvise(area_dst, nr_pages * page_size, MADV_DONTNEED)) {
+			perror("madvise 2");
+			return 1;
+		}
+
+		/* bounce pass */
+		if (stress(userfaults))
+			return 1;
+
+		/* unregister */
+		if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) {
+			fprintf(stderr, "register failure\n");
+			return 1;
+		}
+
+		/* verification */
+		if (bounces & BOUNCE_VERIFY) {
+			for (nr = 0; nr < nr_pages; nr++) {
+				if (my_bcmp(area_dst,
+					    area_dst + nr * page_size,
+					    sizeof(pthread_mutex_t))) {
+					fprintf(stderr,
+						"error mutex 2 %lu\n",
+						nr);
+					bounces = 0;
+				}
+				if (*area_count(area_dst, nr) != count_verify[nr]) {
+					fprintf(stderr,
+						"error area_count %Lu %Lu %lu\n",
+						*area_count(area_src, nr),
+						count_verify[nr],
+						nr);
+					bounces = 0;
+				}
+			}
+		}
+
+		/* prepare next bounce */
+		tmp_area = area_src;
+		area_src = area_dst;
+		area_dst = tmp_area;
+
+		printf("userfaults:");
+		for (cpu = 0; cpu < nr_cpus; cpu++)
+			printf(" %lu", userfaults[cpu]);
+		printf("\n");
+	}
+
+	return 0;
+}
+
+int main(int argc, char **argv)
+{
+	if (argc < 3)
+		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
+	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+	page_size = sysconf(_SC_PAGE_SIZE);
+	if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) >
+	    page_size)
+		fprintf(stderr, "Impossible to run this test\n"), exit(2);
+	nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size /
+		nr_cpus;
+	if (!nr_pages_per_cpu) {
+		fprintf(stderr, "invalid MiB\n");
+		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
+	}
+	bounces = atoi(argv[2]);
+	if (bounces <= 0) {
+		fprintf(stderr, "invalid bounces\n");
+		fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
+	}
+	nr_pages = nr_pages_per_cpu * nr_cpus;
+	printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n",
+	       nr_pages, nr_pages_per_cpu);
+	return userfaultfd_stress();
+}